From 783a898b9dac0f09e609ffbdd47b50109a7171e8 Mon Sep 17 00:00:00 2001 From: Avogar Date: Thu, 12 Mar 2020 20:19:40 +0300 Subject: [PATCH 001/484] Add MsgPackRowOutputFormat. --- dbms/programs/server/metadata/test_n2zcw0.sql | 2 + .../server/metadata/test_n2zcw0/t.sql | 8 + dbms/src/Formats/FormatFactory.cpp | 1 + dbms/src/Formats/FormatFactory.h | 4 +- .../Formats/Impl/MsgPackRowOutputFormat.cpp | 164 ++++++++++++++++++ .../Formats/Impl/MsgPackRowOutputFormat.h | 29 ++++ 6 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 dbms/programs/server/metadata/test_n2zcw0.sql create mode 100644 dbms/programs/server/metadata/test_n2zcw0/t.sql create mode 100644 dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp create mode 100644 dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h diff --git a/dbms/programs/server/metadata/test_n2zcw0.sql b/dbms/programs/server/metadata/test_n2zcw0.sql new file mode 100644 index 00000000000..80046cd585c --- /dev/null +++ b/dbms/programs/server/metadata/test_n2zcw0.sql @@ -0,0 +1,2 @@ +ATTACH DATABASE test_n2zcw0 +ENGINE = Ordinary diff --git a/dbms/programs/server/metadata/test_n2zcw0/t.sql b/dbms/programs/server/metadata/test_n2zcw0/t.sql new file mode 100644 index 00000000000..64e3abcfe34 --- /dev/null +++ b/dbms/programs/server/metadata/test_n2zcw0/t.sql @@ -0,0 +1,8 @@ +ATTACH TABLE t +( + `a` Int, + `b` Int +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS index_granularity = 400 diff --git a/dbms/src/Formats/FormatFactory.cpp b/dbms/src/Formats/FormatFactory.cpp index a8e27054704..8b6034dad9d 100644 --- a/dbms/src/Formats/FormatFactory.cpp +++ b/dbms/src/Formats/FormatFactory.cpp @@ -352,6 +352,7 @@ FormatFactory::FormatFactory() registerOutputFormatProcessorAvro(*this); registerInputFormatProcessorTemplate(*this); registerOutputFormatProcessorTemplate(*this); + registerOutputFormatProcessorMsgPack(*this); registerFileSegmentationEngineTabSeparated(*this); registerFileSegmentationEngineCSV(*this); diff --git a/dbms/src/Formats/FormatFactory.h b/dbms/src/Formats/FormatFactory.h index 7c18971e0eb..68ba2155642 100644 --- a/dbms/src/Formats/FormatFactory.h +++ b/dbms/src/Formats/FormatFactory.h @@ -171,7 +171,9 @@ void registerOutputFormatProcessorProtobuf(FormatFactory & factory); void registerInputFormatProcessorAvro(FormatFactory & factory); void registerOutputFormatProcessorAvro(FormatFactory & factory); void registerInputFormatProcessorTemplate(FormatFactory & factory); -void registerOutputFormatProcessorTemplate(FormatFactory &factory); +void registerOutputFormatProcessorTemplate(FormatFactory & factory); +void registerOutputFormatProcessorMsgPack(FormatFactory &factory); + /// File Segmentation Engines for parallel reading diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp new file mode 100644 index 00000000000..061f4228158 --- /dev/null +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -0,0 +1,164 @@ +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; +} + +MsgPackRowOutputFormat::MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_) + : IRowOutputFormat(header_, out_, callback), settings(settings_), packer(out_) {} + +void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num) +{ + switch (data_type->getTypeId()) + { + case TypeIndex::UInt8: + { + packer.pack_uint8(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::UInt16: + { + packer.pack_uint16(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::UInt32: + { + packer.pack_uint32(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::UInt64: + { + packer.pack_uint64(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int8: + { + packer.pack_int8(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int16: + { + packer.pack_int16(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int32: + { + packer.pack_int32(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Int64: + { + packer.pack_int64(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Float32: + { + packer.pack_float(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Float64: + { + packer.pack_double(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::Date: + { + packer.pack_uint16(assert_cast(column).getElement(row_num)); + return; + } + case TypeIndex::DateTime: + { + UInt32 datetime = assert_cast(column).getElement(row_num); + // Timestamp extension type in MsgPack is -1. + packer.pack_ext(sizeof(datetime), -1); + packer.pack_ext_body(reinterpret_cast(&datetime), sizeof(datetime)); + return; + } + case TypeIndex::String: + { + const StringRef & string = assert_cast(column).getDataAt(row_num); + packer.pack_str(string.size); + packer.pack_str_body(string.data, string.size); + return; + } + case TypeIndex::FixedString: + { + const StringRef & string = assert_cast(column).getDataAt(row_num); + packer.pack_str(string.size); + packer.pack_str_body(string.data, string.size); + return; + } + case TypeIndex::Array: + { + auto nested_type = assert_cast(*data_type).getNestedType(); + const ColumnArray & column_array = assert_cast(column); + const IColumn & nested_column = column_array.getData(); + const ColumnArray::Offsets & offsets = column_array.getOffsets(); + size_t offset = offsets[row_num - 1]; + size_t size = offsets[row_num] - offset; + packer.pack_array(size); + for (size_t i = 0; i < size; ++i) + { + serializeField(nested_column, nested_type, offset + i); + } + return; + } + case TypeIndex::Nullable: + { + auto nested_type = removeNullable(data_type); + const ColumnNullable & column_nullable = assert_cast(column); + if (!column_nullable.isNullAt(row_num)) + serializeField(column_nullable.getNestedColumn(), nested_type, row_num); + else + packer.pack_nil(); + return; + } + case TypeIndex::Nothing: + { + packer.pack_nil(); + return; + } + default: + break; + } + throw Exception("Type " + data_type->getName() + " is not supported for MsgPack output format", ErrorCodes::ILLEGAL_COLUMN); +} + +void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num) +{ + size_t num_columns = columns.size(); + for (size_t i = 0; i < num_columns; ++i) + { + serializeField(*columns[i], types[i], row_num); + } +} + +void registerOutputFormatProcessorMsgPack(FormatFactory & factory) +{ + factory.registerOutputFormatProcessor("MsgPack", []( + WriteBuffer & buf, + const Block & sample, + FormatFactory::WriteCallback callback, + const FormatSettings & settings) + { + return std::make_shared(buf, sample, callback, settings); + }); +} + +} \ No newline at end of file diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h new file mode 100644 index 00000000000..20df018b60c --- /dev/null +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include +#include + + +namespace DB +{ + +class MsgPackRowOutputFormat : public IRowOutputFormat +{ +public: + MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_); + + String getName() const override { return "MsgPackRowOutputFormat"; } + + void write(const Columns & columns, size_t row_num) override; + void writeField(const IColumn &, const IDataType &, size_t) override {} + void serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num); + +private: + FormatSettings settings; + msgpack::packer packer; +}; + +} From 8f71c743457b5f86e1fd40bf871ca988e96ef6f7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 19 Mar 2020 22:54:36 +0300 Subject: [PATCH 002/484] Added a test that checks that read from MergeTree with single thread is performed in order --- .../01201_read_single_thread_in_order.reference | 1 + .../01201_read_single_thread_in_order.sql | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference create mode 100644 dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql diff --git a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference new file mode 100644 index 00000000000..7660873d103 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference @@ -0,0 +1 @@ +[1] diff --git a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql new file mode 100644 index 00000000000..7110131905f --- /dev/null +++ b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + number UInt64 +) +ENGINE = MergeTree +ORDER BY number +SETTINGS index_granularity = 128; + +INSERT INTO t SELECT number FROM numbers(10000000); + +SET max_threads = 1, max_block_size = 12345; +SELECT arrayDistinct(arrayPopFront(arrayDifference(groupArray(number)))) FROM t; + +DROP TABLE t; From 93daa16271220b4385522a6a46b06abe262dc584 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 19 Mar 2020 22:55:16 +0300 Subject: [PATCH 003/484] Better test --- .../queries/0_stateless/01201_read_single_thread_in_order.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql index 7110131905f..bfe03192891 100644 --- a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql +++ b/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql @@ -8,6 +8,7 @@ ENGINE = MergeTree ORDER BY number SETTINGS index_granularity = 128; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; INSERT INTO t SELECT number FROM numbers(10000000); SET max_threads = 1, max_block_size = 12345; From 4159fa0382830aee5e934da2d48a267363807c45 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 24 Mar 2020 20:05:38 +0300 Subject: [PATCH 004/484] Preparation for rename --- dbms/src/Access/AccessType.h | 2 ++ .../Interpreters/InterpreterAlterQuery.cpp | 5 ++++ dbms/src/Parsers/ASTAlterQuery.cpp | 14 +++++++++++ dbms/src/Parsers/ASTAlterQuery.h | 5 ++++ dbms/src/Storages/AlterCommands.cpp | 18 ++++++++++++++- dbms/src/Storages/AlterCommands.h | 4 ++++ dbms/src/Storages/ColumnsDescription.cpp | 23 +++++++++++++++++++ dbms/src/Storages/ColumnsDescription.h | 3 +++ dbms/src/Storages/MergeTree/MergeTreeData.cpp | 4 ++++ dbms/src/Storages/MergeTree/MergeTreeData.h | 9 ++++++++ .../MergeTree/ReplicatedMergeTreeQueue.cpp | 15 ++++++++++++ .../MergeTree/ReplicatedMergeTreeQueue.h | 3 +++ dbms/src/Storages/StorageMergeTree.h | 2 ++ .../Storages/StorageReplicatedMergeTree.cpp | 5 ++++ .../src/Storages/StorageReplicatedMergeTree.h | 4 ++++ 15 files changed, 115 insertions(+), 1 deletion(-) diff --git a/dbms/src/Access/AccessType.h b/dbms/src/Access/AccessType.h index 27892076d59..29a289ac235 100644 --- a/dbms/src/Access/AccessType.h +++ b/dbms/src/Access/AccessType.h @@ -28,6 +28,7 @@ enum class AccessType ADD_COLUMN, DROP_COLUMN, MODIFY_COLUMN, + RENAME_COLUMN, COMMENT_COLUMN, CLEAR_COLUMN, ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN @@ -195,6 +196,7 @@ namespace impl ACCESS_TYPE_TO_KEYWORD_CASE(ADD_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(DROP_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN); + ACCESS_TYPE_TO_KEYWORD_CASE(RENAME_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN); diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp index 315527765ef..298f3fc4097 100644 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ b/dbms/src/Interpreters/InterpreterAlterQuery.cpp @@ -280,6 +280,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS required_access.emplace_back(AccessType::REFRESH_VIEW, database, table); break; } + case ASTAlterCommand::RENAME_COLUMN: + { + required_access.emplace_back(AccessType::RENAME_COLUMN, database, table); + break; + } case ASTAlterCommand::NO_TYPE: break; } diff --git a/dbms/src/Parsers/ASTAlterQuery.cpp b/dbms/src/Parsers/ASTAlterQuery.cpp index 50d751a9c3b..9ec2fad5768 100644 --- a/dbms/src/Parsers/ASTAlterQuery.cpp +++ b/dbms/src/Parsers/ASTAlterQuery.cpp @@ -56,6 +56,11 @@ ASTPtr ASTAlterCommand::clone() const res->values = values->clone(); res->children.push_back(res->values); } + if (rename_to) + { + res->rename_to = rename_to->clone(); + res->children.push_back(res->rename_to); + } return res; } @@ -285,6 +290,15 @@ void ASTAlterCommand::formatImpl( { settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "REFRESH " << (settings.hilite ? hilite_none : ""); } + else if (type == ASTAlterCommand::RENAME_COLUMN) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "RENAME COLUMN " << (if_exists ? "IF EXISTS " : "") + << (settings.hilite ? hilite_none : ""); + column->formatImpl(settings, state, frame); + + settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO "; + rename_to->formatImpl(settings, state, frame); + } else throw Exception("Unexpected type of ALTER", ErrorCodes::UNEXPECTED_AST_STRUCTURE); } diff --git a/dbms/src/Parsers/ASTAlterQuery.h b/dbms/src/Parsers/ASTAlterQuery.h index de36394a9c3..85e9a4d7552 100644 --- a/dbms/src/Parsers/ASTAlterQuery.h +++ b/dbms/src/Parsers/ASTAlterQuery.h @@ -29,6 +29,7 @@ public: DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN, + RENAME_COLUMN, MODIFY_ORDER_BY, MODIFY_TTL, MATERIALIZE_TTL, @@ -69,6 +70,7 @@ public: /** The ADD COLUMN query here optionally stores the name of the column following AFTER * The DROP query stores the column name for deletion here + * Also used for RENAME COLUMN. */ ASTPtr column; @@ -155,6 +157,9 @@ public: String to_database; String to_table; + /// Target column name + ASTPtr rename_to; + String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast(type))); } ASTPtr clone() const override; diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index a02e5b5a879..4af0e3e49c9 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -231,10 +231,20 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY) { AlterCommand command; + command.ast = command_ast->clone(); command.type = AlterCommand::MODIFY_QUERY; command.select = command_ast->select; return command; } + else if (command_ast->type == ASTAlterCommand::RENAME_COLUMN) + { + AlterCommand command; + command.ast = command_ast->clone(); + command.type = AlterCommand::RENAME_COLUMN; + command.column_name = command_ast->column->as().name; + command.rename_to = command_ast->rename_to->as().name; + return command; + } else return {}; } @@ -437,6 +447,10 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const settings_from_storage.push_back(change); } } + else if (type == RENAME_COLUMN) + { + metadata.columns.rename(column_name, rename_to); + } else throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR); } @@ -519,7 +533,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada if (ignore) return false; - if (type == DROP_COLUMN || type == DROP_INDEX) + if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN) return true; if (type != MODIFY_COLUMN || data_type == nullptr) @@ -619,6 +633,8 @@ String alterTypeToString(const AlterCommand::Type type) return "MODIFY SETTING"; case AlterCommand::Type::MODIFY_QUERY: return "MODIFY QUERY"; + case AlterCommand::Type::RENAME_COLUMN: + return "RENAME COLUMN"; } __builtin_unreachable(); } diff --git a/dbms/src/Storages/AlterCommands.h b/dbms/src/Storages/AlterCommands.h index 886c8beaed9..be27ba6ac2b 100644 --- a/dbms/src/Storages/AlterCommands.h +++ b/dbms/src/Storages/AlterCommands.h @@ -35,6 +35,7 @@ struct AlterCommand MODIFY_TTL, MODIFY_SETTING, MODIFY_QUERY, + RENAME_COLUMN, }; Type type; @@ -96,6 +97,9 @@ struct AlterCommand /// For MODIFY_QUERY ASTPtr select = nullptr; + /// Target column name + String rename_to; + static std::optional parse(const ASTAlterCommand * command); void apply(StorageInMemoryMetadata & metadata) const; diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/src/Storages/ColumnsDescription.cpp index 2b2281c9663..ea8217c5f18 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/dbms/src/Storages/ColumnsDescription.cpp @@ -1,4 +1,6 @@ #include + +#include #include #include #include @@ -195,6 +197,27 @@ void ColumnsDescription::remove(const String & column_name) list_it = columns.get<0>().erase(list_it); } +void ColumnsDescription::rename(const String & column_from, const String & column_to) +{ + auto range = getNameRange(columns, column_from); + + if (range.first == range.second) + throw Exception("There is no column " + column_from + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE); + + std::vector iterators; + for (auto list_it = range.first; list_it != range.second;) + { + iterators.push_back(*list_it); + list_it = columns.get<0>().erase(list_it); + } + + for (auto & col_desc : iterators) + { + boost::replace_all(col_desc.name, column_from, column_to); + add(col_desc); + } +} + void ColumnsDescription::flattenNested() { diff --git a/dbms/src/Storages/ColumnsDescription.h b/dbms/src/Storages/ColumnsDescription.h index f930b333577..b6315bfa6eb 100644 --- a/dbms/src/Storages/ColumnsDescription.h +++ b/dbms/src/Storages/ColumnsDescription.h @@ -57,6 +57,9 @@ public: /// `column_name` can be a Nested column name; void remove(const String & column_name); + /// TODO(alesap) + void rename(const String & column_from, const String & column_to); + void flattenNested(); /// TODO: remove, insert already flattened Nested columns. bool operator==(const ColumnsDescription & other) const { return columns == other.columns; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 2279618c9a0..83442f4dff1 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -3589,4 +3589,8 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S return true; } +MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr /*part*/) const +{ + return AlterConversions{}; +} } diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/src/Storages/MergeTree/MergeTreeData.h index fbc42de5517..5e4fb5c8430 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/dbms/src/Storages/MergeTree/MergeTreeData.h @@ -33,6 +33,7 @@ namespace DB class MergeListEntry; class AlterCommands; class MergeTreePartsMover; +class MutationCommands; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; @@ -124,6 +125,11 @@ public: STRONG_TYPEDEF(String, PartitionID) + struct AlterConversions + { + std::unordered_map rename_map; + }; + struct LessDataPart { using is_transparent = void; @@ -647,6 +653,8 @@ public: /// Reserves 0 bytes ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); } + AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const; + MergeTreeDataFormatVersion format_version; Context & global_context; @@ -908,6 +916,7 @@ protected: /// mechanisms for parts locking virtual bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const = 0; + virtual MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const = 0; /// Moves part to specified space, used in ALTER ... MOVE ... queries bool movePartsToSpace(const DataPartsVector & parts, SpacePtr space); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 73ea2098c71..1f5c5ea9a98 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1309,6 +1309,21 @@ ReplicatedMergeTreeMergePredicate ReplicatedMergeTreeQueue::getMergePredicate(zk } +MutationCommands ReplicatedMergeTreeQueue::getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const +{ + std::lock_guard lock(state_mutex); + auto in_partition = mutations_by_partition.find(part->info.partition_id); + if (in_partition == mutations_by_partition.end()) + return MutationCommands{}; + + Int64 part_version = part->info.getDataVersion(); + for (auto [mutation_version, mutation_status] : in_partition->second) + if (mutation_version > part_version && mutation_status->entry->alter_version != -1) + return mutation_status->entry->commands; + + return MutationCommands{}; +} + MutationCommands ReplicatedMergeTreeQueue::getMutationCommands( const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const { diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 22d198b9f19..7a3c70023da 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -331,6 +331,9 @@ public: MutationCommands getMutationCommands(const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const; + /// TODO(alesap) + MutationCommands getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const; + /// Mark finished mutations as done. If the function needs to be called again at some later time /// (because some mutations are probably done but we are not sure yet), returns true. bool tryFinalizeMutations(zkutil::ZooKeeperPtr zookeeper); diff --git a/dbms/src/Storages/StorageMergeTree.h b/dbms/src/Storages/StorageMergeTree.h index 93d7ac89832..bfbf4d6ab7e 100644 --- a/dbms/src/Storages/StorageMergeTree.h +++ b/dbms/src/Storages/StorageMergeTree.h @@ -164,6 +164,8 @@ protected: const MergingParams & merging_params_, std::unique_ptr settings_, bool has_force_restore_data_flag); + + MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & /* part */) const override { return {}; } }; } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 68cc98cb1b9..2e70d9037f0 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -5297,4 +5297,9 @@ StorageReplicatedMergeTree::getMetadataFromSharedZookeeper(const String & metada } +MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const +{ + return queue.getFirstAlterMutationCommandsForPart(part); +} + } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/src/Storages/StorageReplicatedMergeTree.h index f6483baf353..3ec72d2f7b6 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/dbms/src/Storages/StorageReplicatedMergeTree.h @@ -527,6 +527,9 @@ private: StorageInMemoryMetadata getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const; + + MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; + protected: /** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table. */ @@ -542,6 +545,7 @@ protected: const MergingParams & merging_params_, std::unique_ptr settings_, bool has_force_restore_data_flag); + }; From 4837257bca175f52ad9c2b5530229b095a44c05c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 25 Mar 2020 15:10:24 +0300 Subject: [PATCH 005/484] Force read in order of parts using single thread. --- .../Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 0b87b241d85..392708b45f2 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -61,6 +61,7 @@ namespace std #include #include #include +#include namespace ProfileEvents { @@ -802,6 +803,15 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( res.emplace_back(std::move(source)); } + + /// Use ConcatProcessor to concat sources together. + /// It is needed to read in parts order (and so in PK order) if single thread is used. + if (res.size() > 1) + { + auto concat = std::make_shared(res.front().getHeader(), res.size()); + Pipe pipe(std::move(res), std::move(concat)); + res = { std::move(pipe) }; + } } return res; From 333ac3f8dbfd27839ebc0c965bd53c302749b087 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 25 Mar 2020 15:15:51 +0300 Subject: [PATCH 006/484] Fix build. --- dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 392708b45f2..7ff2a454eec 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -810,7 +810,8 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( { auto concat = std::make_shared(res.front().getHeader(), res.size()); Pipe pipe(std::move(res), std::move(concat)); - res = { std::move(pipe) }; + res = {}; + res.emplace_back(std::move(pipe)); } } From 68f8343af633592a9fb61d5c4c6a5ceb7857bcfb Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 25 Mar 2020 15:17:11 +0300 Subject: [PATCH 007/484] Fix build. --- dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 7ff2a454eec..ff80fa56686 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -810,7 +810,7 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( { auto concat = std::make_shared(res.front().getHeader(), res.size()); Pipe pipe(std::move(res), std::move(concat)); - res = {}; + res = Pipes(); res.emplace_back(std::move(pipe)); } } From 41feef19016476885f41544f41317676124c8ede Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 25 Mar 2020 21:44:08 +0300 Subject: [PATCH 008/484] Small progress on alter rename --- dbms/src/Access/AccessType.h | 2 -- .../Interpreters/InterpreterAlterQuery.cpp | 1 - dbms/src/Parsers/ParserAlterQuery.cpp | 18 +++++++++++++++ dbms/src/Parsers/ParserAlterQuery.h | 1 + dbms/src/Storages/AlterCommands.cpp | 2 +- dbms/src/Storages/ColumnsDescription.cpp | 21 +++++------------ .../Storages/MergeTree/IMergeTreeReader.cpp | 6 +++++ .../src/Storages/MergeTree/IMergeTreeReader.h | 4 ++++ dbms/src/Storages/MergeTree/MergeTreeData.cpp | 11 +++++++-- dbms/src/Storages/MutationCommands.cpp | 9 ++++++++ dbms/src/Storages/MutationCommands.h | 6 ++++- .../01213_alter_rename_column.reference | 2 ++ .../0_stateless/01213_alter_rename_column.sql | 23 +++++++++++++++++++ 13 files changed, 84 insertions(+), 22 deletions(-) create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_column.reference create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_column.sql diff --git a/dbms/src/Access/AccessType.h b/dbms/src/Access/AccessType.h index 29a289ac235..27892076d59 100644 --- a/dbms/src/Access/AccessType.h +++ b/dbms/src/Access/AccessType.h @@ -28,7 +28,6 @@ enum class AccessType ADD_COLUMN, DROP_COLUMN, MODIFY_COLUMN, - RENAME_COLUMN, COMMENT_COLUMN, CLEAR_COLUMN, ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN @@ -196,7 +195,6 @@ namespace impl ACCESS_TYPE_TO_KEYWORD_CASE(ADD_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(DROP_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(RENAME_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN); ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN); diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp index 298f3fc4097..ff2afeab8ef 100644 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ b/dbms/src/Interpreters/InterpreterAlterQuery.cpp @@ -282,7 +282,6 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS } case ASTAlterCommand::RENAME_COLUMN: { - required_access.emplace_back(AccessType::RENAME_COLUMN, database, table); break; } case ASTAlterCommand::NO_TYPE: break; diff --git a/dbms/src/Parsers/ParserAlterQuery.cpp b/dbms/src/Parsers/ParserAlterQuery.cpp index 1b647cf067c..3df946ba485 100644 --- a/dbms/src/Parsers/ParserAlterQuery.cpp +++ b/dbms/src/Parsers/ParserAlterQuery.cpp @@ -27,6 +27,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_drop_column("DROP COLUMN"); ParserKeyword s_clear_column("CLEAR COLUMN"); ParserKeyword s_modify_column("MODIFY COLUMN"); + ParserKeyword s_rename_column("RENAME COLUMN"); ParserKeyword s_comment_column("COMMENT COLUMN"); ParserKeyword s_modify_order_by("MODIFY ORDER BY"); ParserKeyword s_modify_ttl("MODIFY TTL"); @@ -77,6 +78,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_delete_where("DELETE WHERE"); ParserKeyword s_update("UPDATE"); ParserKeyword s_where("WHERE"); + ParserKeyword s_to("TO"); ParserCompoundIdentifier parser_name; ParserStringLiteral parser_string_literal; @@ -121,6 +123,22 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected command->type = ASTAlterCommand::ADD_COLUMN; } + else if (s_rename_column.ignore(pos, expected)) + { + if (s_if_not_exists.ignore(pos, expected)) + command->if_not_exists = true; + + if (!parser_name.parse(pos, command->column, expected)) + return false; + + if (!s_to.ignore(pos, expected)) + return false; + + if (!parser_name.parse(pos, command->rename_to, expected)) + return false; + + command->type = ASTAlterCommand::RENAME_COLUMN; + } else if (s_drop_partition.ignore(pos, expected)) { if (!parser_partition.parse(pos, command->partition, expected)) diff --git a/dbms/src/Parsers/ParserAlterQuery.h b/dbms/src/Parsers/ParserAlterQuery.h index 61a25b9b387..a0981c77ca6 100644 --- a/dbms/src/Parsers/ParserAlterQuery.h +++ b/dbms/src/Parsers/ParserAlterQuery.h @@ -12,6 +12,7 @@ namespace DB * [DROP COLUMN [IF EXISTS] col_to_drop, ...] * [CLEAR COLUMN [IF EXISTS] col_to_clear [IN PARTITION partition],] * [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...] + * [RENAME COLUMN [IF EXISTS] col_name TO col_name] * [MODIFY PRIMARY KEY (a, b, c...)] * [MODIFY SETTING setting_name=setting_value, ...] * [COMMENT COLUMN [IF EXISTS] col_name string] diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index 4af0e3e49c9..f20a3f58382 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -533,7 +533,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada if (ignore) return false; - if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN) + if (type == DROP_COLUMN || type == DROP_INDEX) return true; if (type != MODIFY_COLUMN || data_type == nullptr) diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/src/Storages/ColumnsDescription.cpp index ea8217c5f18..787fa2b739a 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/dbms/src/Storages/ColumnsDescription.cpp @@ -199,23 +199,14 @@ void ColumnsDescription::remove(const String & column_name) void ColumnsDescription::rename(const String & column_from, const String & column_to) { - auto range = getNameRange(columns, column_from); + auto it = columns.get<1>().find(column_from); + if (it == columns.get<1>().end()) + throw Exception("Cannot find column " + column_from + " in ColumnsDescription", ErrorCodes::LOGICAL_ERROR); - if (range.first == range.second) - throw Exception("There is no column " + column_from + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE); - - std::vector iterators; - for (auto list_it = range.first; list_it != range.second;) + columns.get<1>().modify_key(it, [&column_to] (String & old_name) { - iterators.push_back(*list_it); - list_it = columns.get<0>().erase(list_it); - } - - for (auto & col_desc : iterators) - { - boost::replace_all(col_desc.name, column_from, column_to); - add(col_desc); - } + old_name = column_to; + }); } diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp index 91d52cfa1fc..7ccbe71938c 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -30,6 +30,7 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_ , columns(columns_), uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_) , settings(settings_), storage(data_part_->storage) , all_mark_ranges(all_mark_ranges_) + , alter_conversions(storage.getAlterConversionsForPart(data_part)) { } @@ -62,6 +63,11 @@ static bool arrayHasNoElementsRead(const IColumn & column) } +//void IMergeTreeReader::renameColumnsWithAlters(Columns & res_columns) +//{ +// +//} + void IMergeTreeReader::fillMissingColumns(Columns & res_columns, bool & should_evaluate_missing_defaults, size_t num_rows) { try diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.h b/dbms/src/Storages/MergeTree/IMergeTreeReader.h index 75fefe235f8..f74530f2d5f 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.h +++ b/dbms/src/Storages/MergeTree/IMergeTreeReader.h @@ -48,6 +48,9 @@ public: /// try to perform conversions of columns. void performRequiredConversions(Columns & res_columns); + /// TODO(alesap) + void renameColumnsWithAlters(Columns & res_columns); + const NamesAndTypesList & getColumns() const { return columns; } size_t numColumnsInResult() const { return columns.size(); } @@ -78,6 +81,7 @@ protected: MarkRanges all_mark_ranges; friend class MergeTreeRangeReader::DelayedStream; + MergeTreeData::AlterConversions alter_conversions; }; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 83442f4dff1..7d07540741e 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -3589,8 +3589,15 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S return true; } -MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr /*part*/) const +MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr part) const { - return AlterConversions{}; + MutationCommands commands = getFirtsAlterMutationCommandsForPart(part); + + AlterConversions result{}; + for (const auto & command : commands) + if (command.type == MutationCommand::Type::RENAME_COLUMN) + result.rename_map[command.column_name] = command.rename_to; + + return result; } } diff --git a/dbms/src/Storages/MutationCommands.cpp b/dbms/src/Storages/MutationCommands.cpp index 8c66646abed..d48f8525162 100644 --- a/dbms/src/Storages/MutationCommands.cpp +++ b/dbms/src/Storages/MutationCommands.cpp @@ -94,6 +94,15 @@ std::optional MutationCommand::parse(ASTAlterCommand * command, res.clear = true; return res; } + else if (parse_alter_commands && command->type == ASTAlterCommand::RENAME_COLUMN) + { + MutationCommand res; + res.ast = command->ptr(); + res.type = MutationCommand::Type::RENAME_COLUMN; + res.column_name = command->column->as().name; + res.rename_to = command->rename_to->as().name; + return res; + } else if (command->type == ASTAlterCommand::MATERIALIZE_TTL) { MutationCommand res; diff --git a/dbms/src/Storages/MutationCommands.h b/dbms/src/Storages/MutationCommands.h index f006575a9b8..6fa4f7fb641 100644 --- a/dbms/src/Storages/MutationCommands.h +++ b/dbms/src/Storages/MutationCommands.h @@ -31,7 +31,8 @@ struct MutationCommand READ_COLUMN, DROP_COLUMN, DROP_INDEX, - MATERIALIZE_TTL + MATERIALIZE_TTL, + RENAME_COLUMN, }; Type type = EMPTY; @@ -53,6 +54,9 @@ struct MutationCommand /// We need just clear column, not drop from metadata. bool clear = false; + /// Column rename_to + String rename_to; + /// If parse_alter_commands, than consider more Alter commands as mutation commands static std::optional parse(ASTAlterCommand * command, bool parse_alter_commands = false); }; diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql new file mode 100644 index 00000000000..efd485ef89b --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_for_rename WHERE key = 1; + +ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_for_rename WHERE key = 1; + +DROP TABLE IF EXISTS table_for_rename; From b02636f9163f163a15e99d12084636d8e5cea9ee Mon Sep 17 00:00:00 2001 From: Avogar Date: Thu, 26 Mar 2020 19:33:00 +0300 Subject: [PATCH 009/484] Add MsgPackRowInputFormat, msgpack-c contrib and tests. --- .gitmodules | 3 + CMakeLists.txt | 1 + cmake/find/msgpack.cmake | 2 + contrib/msgpack-c | 1 + dbms/CMakeLists.txt | 2 + dbms/src/Formats/FormatFactory.cpp | 1 + dbms/src/Formats/FormatFactory.h | 3 +- .../Formats/Impl/MsgPackRowInputFormat.cpp | 178 ++++++++++++++++++ .../Formats/Impl/MsgPackRowInputFormat.h | 28 +++ .../Formats/Impl/MsgPackRowOutputFormat.cpp | 31 +-- .../Formats/Impl/MsgPackRowOutputFormat.h | 3 +- .../01098_msgpack_format.reference | 8 + .../0_stateless/01098_msgpack_format.sh | 31 +++ .../0_stateless/data_msgpack/all_types.msgpk | Bin 0 -> 200 bytes .../data_msgpack/nested_arrays.msgpk | 1 + 15 files changed, 269 insertions(+), 24 deletions(-) create mode 100644 cmake/find/msgpack.cmake create mode 160000 contrib/msgpack-c create mode 100644 dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp create mode 100644 dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h create mode 100644 dbms/tests/queries/0_stateless/01098_msgpack_format.reference create mode 100755 dbms/tests/queries/0_stateless/01098_msgpack_format.sh create mode 100644 dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk create mode 100644 dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk diff --git a/.gitmodules b/.gitmodules index 29b2ada63ea..c6afed3a5e2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -148,3 +148,6 @@ path = contrib/avro url = https://github.com/ClickHouse-Extras/avro.git ignore = untracked +[submodule "contrib/msgpack-c"] + path = contrib/msgpack-c + url = https://github.com/msgpack/msgpack-c diff --git a/CMakeLists.txt b/CMakeLists.txt index 9513caa8eee..d79cf152e15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -345,6 +345,7 @@ include (cmake/find/rapidjson.cmake) include (cmake/find/fastops.cmake) include (cmake/find/orc.cmake) include (cmake/find/avro.cmake) +include (cmake/find/msgpack.cmake) find_contrib_lib(cityhash) find_contrib_lib(farmhash) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake new file mode 100644 index 00000000000..a1f18bb1eb0 --- /dev/null +++ b/cmake/find/msgpack.cmake @@ -0,0 +1,2 @@ +set(MSGPACK_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include) +message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}") diff --git a/contrib/msgpack-c b/contrib/msgpack-c new file mode 160000 index 00000000000..46684265d50 --- /dev/null +++ b/contrib/msgpack-c @@ -0,0 +1 @@ +Subproject commit 46684265d50b5d1b062d4c5c428ba08462844b1d diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index aa10b0ed2ca..b13958a7b4d 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -574,6 +574,8 @@ target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) +target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) + add_subdirectory (programs) add_subdirectory (tests) diff --git a/dbms/src/Formats/FormatFactory.cpp b/dbms/src/Formats/FormatFactory.cpp index 8b6034dad9d..55cd4b1f368 100644 --- a/dbms/src/Formats/FormatFactory.cpp +++ b/dbms/src/Formats/FormatFactory.cpp @@ -352,6 +352,7 @@ FormatFactory::FormatFactory() registerOutputFormatProcessorAvro(*this); registerInputFormatProcessorTemplate(*this); registerOutputFormatProcessorTemplate(*this); + registerInputFormatProcessorMsgPack(*this); registerOutputFormatProcessorMsgPack(*this); registerFileSegmentationEngineTabSeparated(*this); diff --git a/dbms/src/Formats/FormatFactory.h b/dbms/src/Formats/FormatFactory.h index 68ba2155642..705bd2039fc 100644 --- a/dbms/src/Formats/FormatFactory.h +++ b/dbms/src/Formats/FormatFactory.h @@ -172,7 +172,8 @@ void registerInputFormatProcessorAvro(FormatFactory & factory); void registerOutputFormatProcessorAvro(FormatFactory & factory); void registerInputFormatProcessorTemplate(FormatFactory & factory); void registerOutputFormatProcessorTemplate(FormatFactory & factory); -void registerOutputFormatProcessorMsgPack(FormatFactory &factory); +void registerInputFormatProcessorMsgPack(FormatFactory & factory); +void registerOutputFormatProcessorMsgPack(FormatFactory & factory); /// File Segmentation Engines for parallel reading diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp new file mode 100644 index 00000000000..59a8d176f32 --- /dev/null +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -0,0 +1,178 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int INCORRECT_DATA; +} + +MsgPackRowInputFormat::MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_) + : IRowInputFormat(header_, in_, std::move(params_)), data_types(header_.getDataTypes()) {} + +bool MsgPackRowInputFormat::readObject() +{ + if (in.eof() && unpacker.nonparsed_size() == 0) + return false; + while (!unpacker.next(object_handle)) + { + if (in.eof()) + throw Exception("Unexpected end of file while parsing MsgPack object.", ErrorCodes::INCORRECT_DATA); + unpacker.reserve_buffer(in.available()); + memcpy(unpacker.buffer(), in.position(), in.available()); + unpacker.buffer_consumed(in.available()); + in.position() += in.available(); + } + return true; +} + +void MsgPackRowInputFormat::insertObject(IColumn & column, DataTypePtr data_type, const msgpack::object & object) +{ + switch (data_type->getTypeId()) + { + case TypeIndex::UInt8: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Date: [[fallthrough]]; + case TypeIndex::UInt16: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::DateTime: [[fallthrough]]; + case TypeIndex::UInt32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::UInt64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int8: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int16: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Int64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Float32: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::Float64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::DateTime64: + { + assert_cast(column).insertValue(object.as()); + return; + } + case TypeIndex::FixedString: [[fallthrough]]; + case TypeIndex::String: + { + String str = object.as(); + column.insertData(str.data(), str.size()); + return; + } + case TypeIndex::Array: + { + msgpack::object_array object_array = object.via.array; + auto nested_type = assert_cast(*data_type).getNestedType(); + ColumnArray & column_array = assert_cast(column); + ColumnArray::Offsets & offsets = column_array.getOffsets(); + IColumn & nested_column = column_array.getData(); + for (size_t i = 0; i != object_array.size; ++i) + { + insertObject(nested_column, nested_type, object_array.ptr[i]); + } + offsets.push_back(offsets.back() + object_array.size); + return; + } + case TypeIndex::Nullable: + { + auto nested_type = removeNullable(data_type); + ColumnNullable & column_nullable = assert_cast(column); + if (object.type == msgpack::type::NIL) + column_nullable.insertDefault(); + else + insertObject(column_nullable.getNestedColumn(), nested_type, object); + return; + } + case TypeIndex::Nothing: + { + // Nothing to insert, MsgPack object is nil. + return; + } + default: + break; + } + throw Exception("Type " + data_type->getName() + " is not supported for MsgPack input format", ErrorCodes::ILLEGAL_COLUMN); +} + +bool MsgPackRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) +{ + size_t column_index = 0; + bool has_more_data = true; + for (; column_index != columns.size(); ++column_index) + { + has_more_data = readObject(); + if (!has_more_data) + break; + insertObject(*columns[column_index], data_types[column_index], object_handle.get()); + } + if (!has_more_data) + { + if (column_index != 0) + throw Exception("Not enough values to complete the row.", ErrorCodes::INCORRECT_DATA); + return false; + } + return true; +} + +void registerInputFormatProcessorMsgPack(FormatFactory & factory) { + factory.registerInputFormatProcessor("MsgPack", []( + ReadBuffer &buf, + const Block &sample, + const RowInputFormatParams ¶ms, + const FormatSettings &) { + return std::make_shared(sample, buf, params); + }); +} + +} \ No newline at end of file diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h new file mode 100644 index 00000000000..b2f14fca6f6 --- /dev/null +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class ReadBuffer; + +class MsgPackRowInputFormat : public IRowInputFormat +{ +public: + MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); + + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + String getName() const override { return "MagPackRowInputFormat"; } +private: + bool readObject(); + void insertObject(IColumn & column, DataTypePtr type, const msgpack::object & object); + + DataTypes data_types; + msgpack::unpacker unpacker; + msgpack::object_handle object_handle; +}; + +} \ No newline at end of file diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index 061f4228158..b4cb7185406 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -20,8 +21,8 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } -MsgPackRowOutputFormat::MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_) - : IRowOutputFormat(header_, out_, callback), settings(settings_), packer(out_) {} +MsgPackRowOutputFormat::MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback) + : IRowOutputFormat(header_, out_, callback), packer(out_) {} void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num) { @@ -32,11 +33,13 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr packer.pack_uint8(assert_cast(column).getElement(row_num)); return; } + case TypeIndex::Date: [[fallthrough]]; case TypeIndex::UInt16: { packer.pack_uint16(assert_cast(column).getElement(row_num)); return; } + case TypeIndex::DateTime: [[fallthrough]]; case TypeIndex::UInt32: { packer.pack_uint32(assert_cast(column).getElement(row_num)); @@ -77,19 +80,12 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr packer.pack_double(assert_cast(column).getElement(row_num)); return; } - case TypeIndex::Date: + case TypeIndex::DateTime64: { - packer.pack_uint16(assert_cast(column).getElement(row_num)); - return; - } - case TypeIndex::DateTime: - { - UInt32 datetime = assert_cast(column).getElement(row_num); - // Timestamp extension type in MsgPack is -1. - packer.pack_ext(sizeof(datetime), -1); - packer.pack_ext_body(reinterpret_cast(&datetime), sizeof(datetime)); + packer.pack_uint64(assert_cast(column).getElement(row_num)); return; } + case TypeIndex::FixedString: [[fallthrough]]; case TypeIndex::String: { const StringRef & string = assert_cast(column).getDataAt(row_num); @@ -97,13 +93,6 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr packer.pack_str_body(string.data, string.size); return; } - case TypeIndex::FixedString: - { - const StringRef & string = assert_cast(column).getDataAt(row_num); - packer.pack_str(string.size); - packer.pack_str_body(string.data, string.size); - return; - } case TypeIndex::Array: { auto nested_type = assert_cast(*data_type).getNestedType(); @@ -155,9 +144,9 @@ void registerOutputFormatProcessorMsgPack(FormatFactory & factory) WriteBuffer & buf, const Block & sample, FormatFactory::WriteCallback callback, - const FormatSettings & settings) + const FormatSettings &) { - return std::make_shared(buf, sample, callback, settings); + return std::make_shared(buf, sample, callback); }); } diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h index 20df018b60c..351920eb7c8 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h @@ -13,7 +13,7 @@ namespace DB class MsgPackRowOutputFormat : public IRowOutputFormat { public: - MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback, const FormatSettings & settings_); + MsgPackRowOutputFormat(WriteBuffer & out_, const Block & header_, FormatFactory::WriteCallback callback); String getName() const override { return "MsgPackRowOutputFormat"; } @@ -22,7 +22,6 @@ public: void serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num); private: - FormatSettings settings; msgpack::packer packer; }; diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.reference b/dbms/tests/queries/0_stateless/01098_msgpack_format.reference new file mode 100644 index 00000000000..aab048208bc --- /dev/null +++ b/dbms/tests/queries/0_stateless/01098_msgpack_format.reference @@ -0,0 +1,8 @@ +255 65535 4294967295 100000000000 -128 -32768 -2147483648 -100000000000 2.02 10000.0000001 String 2021-12-19 2021-12-19 03:00:00 2021-12-19 03:00:00.000 [1,2,3,4,5] +4 1234 3244467295 500000000000 -1 -256 -14741221 -7000000000 100.1 14321.032141201 Another string 2024-10-04 2028-04-21 01:20:00 2021-12-19 03:14:51.123 [5,4,3,2,1] +42 42 42 42 42 42 42 42 42.42 42.42 42 1970-02-12 1970-01-01 03:00:42 1970-01-01 03:00:00.042 [42] +255 65535 4294967295 100000000000 -128 -32768 -2147483648 -100000000000 2.02 10000.0000001 String 2021-12-19 2021-12-19 03:00:00 2021-12-19 03:00:00.000 [1,2,3,4,5] +4 1234 3244467295 500000000000 -1 -256 -14741221 -7000000000 100.1 14321.032141201 Another string 2024-10-04 2028-04-21 01:20:00 2021-12-19 03:14:51.123 [5,4,3,2,1] +42 42 42 42 42 42 42 42 42.42 42.42 42 1970-02-12 1970-01-01 03:00:42 1970-01-01 03:00:00.042 [42] +[[1,2,3],[1001,2002],[3167]] [[['one'],['two']],[['three']],[['four'],['five']]] +[[1,2,3],[1001,2002],[3167]] [[['one'],['two']],[['three']],[['four'],['five']]] diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.sh b/dbms/tests/queries/0_stateless/01098_msgpack_format.sh new file mode 100755 index 00000000000..2aaf2dfd527 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01098_msgpack_format.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS msgpack"; +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (uint8 UInt8, uint16 UInt16, uint32 UInt32, uint64 UInt64, int8 Int8, int16 Int16, int32 Int32, int64 Int64, float Float32, double Float64, string String, date Date, datetime DateTime, datetime64 DateTime64, array Array(UInt32)) ENGINE = Memory"; + + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES (255, 65535, 4294967295, 100000000000, -128, -32768, -2147483648, -100000000000, 2.02, 10000.0000001, 'String', 18980, 1639872000, 1639872000000, [1,2,3,4,5]), (4, 1234, 3244467295, 500000000000, -1, -256, -14741221, -7000000000, 100.1, 14321.032141201, 'Another string', 20000, 1839882000, 1639872891123, [5,4,3,2,1]),(42, 42, 42, 42, 42, 42, 42, 42, 42.42, 42.42, '42', 42, 42, 42, [42])"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/data_msgpack/all_types.msgpk; + +cat $CURDIR/data_msgpack/all_types.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array1 Array(Array(UInt32)), array2 Array(Array(Array(String)))) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ([[1,2,3], [1001, 2002], [3167]], [[['one'], ['two']], [['three']],[['four'], ['five']]])"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/data_msgpack/nested_arrays.msgpk; + +cat $CURDIR/data_msgpack/nested_arrays.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + diff --git a/dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk b/dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk new file mode 100644 index 0000000000000000000000000000000000000000..efefdf32a55f96112d8952e725c2023f9687cde3 GIT binary patch literal 200 zcmX@}|Lp(&=l%o1c?JdsagVYW3>O+MHZWXj0CFz_g$*9;swCwUf}BZ jJe8G&nTb&g3Qjp`t`j-!;A(oPKStx*A`>H^_(UxL8(vuZ literal 0 HcmV?d00001 diff --git a/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk b/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk new file mode 100644 index 00000000000..761ef1d5b6c --- /dev/null +++ b/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk @@ -0,0 +1 @@ +ґ _onetwothreefourfive \ No newline at end of file From d2d6d637acd77153b82c0831b31410220cc48872 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 26 Mar 2020 20:14:52 +0300 Subject: [PATCH 010/484] Delete excess file --- dbms/programs/server/metadata/test_n2zcw0/t.sql | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 dbms/programs/server/metadata/test_n2zcw0/t.sql diff --git a/dbms/programs/server/metadata/test_n2zcw0/t.sql b/dbms/programs/server/metadata/test_n2zcw0/t.sql deleted file mode 100644 index 64e3abcfe34..00000000000 --- a/dbms/programs/server/metadata/test_n2zcw0/t.sql +++ /dev/null @@ -1,8 +0,0 @@ -ATTACH TABLE t -( - `a` Int, - `b` Int -) -ENGINE = MergeTree -ORDER BY (a, b) -SETTINGS index_granularity = 400 From 2cdb1989a0c29f9c69ad425c0aa9a29a759be39d Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 26 Mar 2020 20:15:35 +0300 Subject: [PATCH 011/484] Delete test_n2zcw0.sql --- dbms/programs/server/metadata/test_n2zcw0.sql | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 dbms/programs/server/metadata/test_n2zcw0.sql diff --git a/dbms/programs/server/metadata/test_n2zcw0.sql b/dbms/programs/server/metadata/test_n2zcw0.sql deleted file mode 100644 index 80046cd585c..00000000000 --- a/dbms/programs/server/metadata/test_n2zcw0.sql +++ /dev/null @@ -1,2 +0,0 @@ -ATTACH DATABASE test_n2zcw0 -ENGINE = Ordinary From bc9d18a9c4abe7b6ec9548b5bc2944351dac7622 Mon Sep 17 00:00:00 2001 From: Avogar Date: Fri, 27 Mar 2020 00:11:33 +0300 Subject: [PATCH 012/484] Fix style and build errors. --- .../src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp | 8 +++++--- dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h | 2 +- .../Processors/Formats/Impl/MsgPackRowOutputFormat.cpp | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 59a8d176f32..0b3fb3d58ed 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -165,14 +165,16 @@ bool MsgPackRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & return true; } -void registerInputFormatProcessorMsgPack(FormatFactory & factory) { +void registerInputFormatProcessorMsgPack(FormatFactory & factory) +{ factory.registerInputFormatProcessor("MsgPack", []( ReadBuffer &buf, const Block &sample, const RowInputFormatParams ¶ms, - const FormatSettings &) { + const FormatSettings &) + { return std::make_shared(sample, buf, params); }); } -} \ No newline at end of file +} diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h index b2f14fca6f6..7daac811374 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -25,4 +25,4 @@ private: msgpack::object_handle object_handle; }; -} \ No newline at end of file +} diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index b4cb7185406..7c5e2c5b522 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -150,4 +150,4 @@ void registerOutputFormatProcessorMsgPack(FormatFactory & factory) }); } -} \ No newline at end of file +} From de0754ef0d98438da5e716fb328326ae2be34a34 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 15:51:05 +0300 Subject: [PATCH 013/484] First working version --- dbms/src/Storages/AlterCommands.cpp | 8 +- .../Storages/MergeTree/IMergeTreeReader.cpp | 2 + dbms/src/Storages/MergeTree/MergeTreeData.cpp | 6 ++ .../MergeTree/MergeTreeDataMergerMutator.cpp | 81 ++++++++++++++++--- .../MergeTree/MergeTreeDataMergerMutator.h | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 2 + dbms/src/Storages/StorageMergeTree.cpp | 14 +++- 7 files changed, 99 insertions(+), 16 deletions(-) diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index f20a3f58382..afd30bdb067 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -533,7 +533,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada if (ignore) return false; - if (type == DROP_COLUMN || type == DROP_INDEX) + if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN) return true; if (type != MODIFY_COLUMN || data_type == nullptr) @@ -599,6 +599,12 @@ std::optional AlterCommand::tryConvertToMutationCommand(const S result.predicate = nullptr; } + else if (type == RENAME_COLUMN) + { + result.type = MutationCommand::Type::RENAME_COLUMN; + result.column_name = column_name; + result.rename_to = rename_to; + } result.ast = ast->clone(); return result; diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp index 7ccbe71938c..58479f39bcb 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -32,6 +32,8 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_ , all_mark_ranges(all_mark_ranges_) , alter_conversions(storage.getAlterConversionsForPart(data_part)) { + LOG_DEBUG(&Poco::Logger::get("IMergeTreeReader"), "Columns to read:" << columns_.toString()); + LOG_DEBUG(&Poco::Logger::get("IMergeTreeReader"), "Columns in part:" << data_part_->getColumns().toString()); } IMergeTreeReader::~IMergeTreeReader() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 7d07540741e..b2df5f7ed8f 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -469,6 +469,7 @@ void MergeTreeData::setProperties(const StorageInMemoryMetadata & metadata, bool if (!only_check) { + LOG_DEBUG(log, "SETTING UP COLUMNS:" << metadata.columns.toString()); setColumns(std::move(metadata.columns)); order_by_ast = metadata.order_by_ast; @@ -3595,8 +3596,13 @@ MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const AlterConversions result{}; for (const auto & command : commands) + { if (command.type == MutationCommand::Type::RENAME_COLUMN) + { result.rename_map[command.column_name] = command.rename_to; + LOG_DEBUG(log, "Add to rename map:" << command.column_name); + } + } return result; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 0c8c39b074c..00637dc5017 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -31,6 +31,8 @@ #include +#include + namespace ProfileEvents { extern const Event MergedRows; @@ -988,6 +990,9 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor splitMutationCommands(source_part, commands_for_part, for_interpreter, for_file_renames); + + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "COMMANDS FOR INTERPRETER:" << for_interpreter.size()); + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "COMMANDS FOR RENAMES:" << for_file_renames.size()); UInt64 watch_prev_elapsed = 0; MergeStageProgress stage_progress(1.0); @@ -1010,6 +1015,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor /// It shouldn't be changed by mutation. new_data_part->index_granularity_info = source_part->index_granularity_info; new_data_part->setColumns(getColumnsForNewDataPart(source_part, updated_header, all_columns, for_file_renames)); + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "New data part columns:" << new_data_part->getColumns().toString()); new_data_part->partition.assign(source_part->partition); auto disk = new_data_part->disk; @@ -1056,19 +1062,34 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor auto indices_to_recalc = getIndicesToRecalculate(in, storage_from_source_part, updated_header.getNamesAndTypesList(), context); NameSet files_to_skip = collectFilesToSkip(updated_header, indices_to_recalc, mrk_extension); - NameSet files_to_remove = collectFilesToRemove(source_part, for_file_renames, mrk_extension); + NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension); + + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "FILES RENAME MAP:" << files_to_rename.size()); if (need_remove_expired_values) files_to_skip.insert("ttl.txt"); /// Create hardlinks for unchanged files for (auto it = disk->iterateDirectory(source_part->getFullRelativePath()); it->isValid(); it->next()) { - if (files_to_skip.count(it->name()) || files_to_remove.count(it->name())) + if (files_to_skip.count(it->name())) continue; - String destination = new_part_tmp_path + "/" + it->name(); + String destination = new_part_tmp_path + "/"; + auto rename_it = files_to_rename.find(it->name()); + if (rename_it != files_to_rename.end()) + { + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "RENAME IT FOUND:" << rename_it->first << " to " << rename_it->second); + if (rename_it->second.empty()) + continue; + destination += rename_it->second; + } + else + { + destination += it->name(); + } + LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "HARDLINKING FROM:" << it->path() << " TO " << destination); disk->createHardLink(it->path(), destination); } @@ -1090,9 +1111,19 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor need_remove_expired_values); } - for (const String & removed_file : files_to_remove) - if (new_data_part->checksums.files.count(removed_file)) - new_data_part->checksums.files.erase(removed_file); + for (const auto & [rename_from, rename_to] : files_to_rename) + { + if (rename_to.empty() && new_data_part->checksums.files.count(rename_from)) + { + new_data_part->checksums.files.erase(rename_from); + } + else if (new_data_part->checksums.files.count(rename_from)) + { + new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from]; + + new_data_part->checksums.files.erase(rename_from); + } + } finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values); } @@ -1262,7 +1293,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands( } -NameSet MergeTreeDataMergerMutator::collectFilesToRemove( +NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames( MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension) { /// Collect counts for shared streams of different columns. As an example, Nested columns have shared stream with array sizes. @@ -1277,14 +1308,14 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( {}); } - NameSet remove_files; + NameToNameMap rename_map; /// Remove old indices for (const auto & command : commands_for_removes) { if (command.type == MutationCommand::Type::DROP_INDEX) { - remove_files.emplace("skp_idx_" + command.column_name + ".idx"); - remove_files.emplace("skp_idx_" + command.column_name + mrk_extension); + rename_map.emplace("skp_idx_" + command.column_name + ".idx", ""); + rename_map.emplace("skp_idx_" + command.column_name + mrk_extension, ""); } else if (command.type == MutationCommand::Type::DROP_COLUMN) { @@ -1294,8 +1325,8 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( /// Delete files if they are no longer shared with another column. if (--stream_counts[stream_name] == 0) { - remove_files.emplace(stream_name + ".bin"); - remove_files.emplace(stream_name + mrk_extension); + rename_map.emplace(stream_name + ".bin", ""); + rename_map.emplace(stream_name + mrk_extension, ""); } }; @@ -1304,9 +1335,25 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove( if (column) column->type->enumerateStreams(callback, stream_path); } + else if (command.type == MutationCommand::Type::RENAME_COLUMN) + { + LOG_DEBUG(&Poco::Logger::get("collectFilesForRenames"), "Has mutation command"); + IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path) + { + String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path); + + String stream_to = boost::replace_first_copy(stream_from, command.column_name, command.rename_to); + rename_map.emplace(stream_from + ".bin", stream_to + ".bin"); + rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension); + }; + IDataType::SubstreamPath stream_path; + auto column = source_part->getColumns().tryGetByName(command.column_name); + if (column) + column->type->enumerateStreams(callback, stream_path); + } } - return remove_files; + return rename_map; } NameSet MergeTreeDataMergerMutator::collectFilesToSkip( @@ -1344,15 +1391,19 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( const MutationCommands & commands_for_removes) { NameSet removed_columns; + NameToNameMap renamed_columns; for (const auto & command : commands_for_removes) { if (command.type == MutationCommand::DROP_COLUMN) removed_columns.insert(command.column_name); + if (command.type == MutationCommand::RENAME_COLUMN) + renamed_columns.emplace(command.rename_to, command.column_name); } Names source_column_names = source_part->getColumns().getNames(); NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end()); for (auto it = all_columns.begin(); it != all_columns.end();) { + LOG_DEBUG(&Poco::Logger::get("getColumnsForNewDataPart"), "Looking at column:" << it->name); if (updated_header.has(it->name)) { auto updated_type = updated_header.getByName(it->name).type; @@ -1364,6 +1415,10 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( { ++it; } + else if (renamed_columns.count(it->name) && source_columns_name_set.count(renamed_columns[it->name])) + { + ++it; + } else it = all_columns.erase(it); } diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 3d41ceee990..b24b56a4780 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -147,7 +147,7 @@ private: /// Apply commands to source_part i.e. remove some columns in source_part /// and return set of files, that have to be removed from filesystem and checksums - static NameSet collectFilesToRemove(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension); + static NameToNameMap collectFilesForRenames(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension); /// Files, that we don't need to remove and don't need to hardlink, for example columns.txt and checksums.txt. /// Because we will generate new versions of them after we perform mutation. diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 629a2b2cc18..9bb7db3ea11 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -183,6 +183,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( for (const String & name : column_names_to_return) { + LOG_DEBUG(log, "Column name to return:" << name); if (name == "_part") { part_column_queried = true; @@ -209,6 +210,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( NamesAndTypesList available_real_columns = data.getColumns().getAllPhysical(); + LOG_DEBUG(log, "Available columns:" << available_real_columns.toString()); /// If there are only virtual columns in the query, you must request at least one non-virtual one. if (real_column_names.empty()) real_column_names.push_back(ExpressionActions::getSmallestColumn(available_real_columns)); diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 79079aa3095..d6a7fe376f4 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -213,11 +213,14 @@ void StorageMergeTree::alter( StorageInMemoryMetadata metadata = getInMemoryMetadata(); auto maybe_mutation_commands = commands.getMutationCommands(metadata); + LOG_DEBUG(log, "Applying commands"); commands.apply(metadata); + LOG_DEBUG(log, "Commands applied"); /// This alter can be performed at metadata level only if (commands.isSettingsAlter()) { + LOG_DEBUG(log, "Settings alter"); lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); changeSettings(metadata.settings_ast, table_lock_holder); @@ -226,15 +229,18 @@ void StorageMergeTree::alter( } else { + LOG_DEBUG(log, "Not settings alter"); lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); changeSettings(metadata.settings_ast, table_lock_holder); /// Reinitialize primary key because primary key column types might have changed. setProperties(metadata); + LOG_DEBUG(log, "Metadata setup"); setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata); + LOG_DEBUG(log, "Data on disk changed"); /// We release all locks except alter_lock which allows /// to execute alter queries sequentially @@ -683,10 +689,16 @@ bool StorageMergeTree::tryMutatePart() MutationCommands commands_for_size_validation; for (const auto & command : it->second.commands) { - if (command.type != MutationCommand::Type::DROP_COLUMN && command.type != MutationCommand::Type::DROP_INDEX) + if (command.type != MutationCommand::Type::DROP_COLUMN + && command.type != MutationCommand::Type::DROP_INDEX + && command.type != MutationCommand::Type::RENAME_COLUMN) + { commands_for_size_validation.push_back(command); + } else + { commands_size += command.ast->size(); + } } if (!commands_for_size_validation.empty()) From 8f91892d7bd55dc99f3883977fc43fe3bd4afbfb Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 16:34:19 +0300 Subject: [PATCH 014/484] Add simple validation for renames --- dbms/src/Storages/AlterCommands.cpp | 24 +++++++++++++++++++ .../01213_alter_rename_column.reference | 2 ++ .../0_stateless/01213_alter_rename_column.sql | 6 +++++ 3 files changed, 32 insertions(+) diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index afd30bdb067..14ff07daa12 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -702,6 +702,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con auto all_columns = metadata.columns; /// Default expression for all added/modified columns ASTPtr default_expr_list = std::make_shared(); + NameToNameMap renames_map; for (size_t i = 0; i < size(); ++i) { auto & command = (*this)[i]; @@ -775,6 +776,29 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con if (metadata.settings_ast == nullptr) throw Exception{"Cannot alter settings, because table engine doesn't support settings changes", ErrorCodes::BAD_ARGUMENTS}; } + else if (command.type == AlterCommand::RENAME_COLUMN) + { + if (!metadata.columns.has(command.column_name)) + { + if (!command.if_exists) + throw Exception{"Wrong column name. Cannot find column " + backQuote(command.column_name) + " to rename", + ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK}; + } + + if (metadata.columns.has(command.rename_to)) + throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists", + ErrorCodes::DUPLICATE_COLUMN}; + + + if (renames_map.count(command.column_name)) + throw Exception{"Cannot rename column '" + backQuote(command.column_name) + "' to two different names in a single ALTER query", ErrorCodes::BAD_ARGUMENTS}; + + if (renames_map.count(command.rename_to)) + throw Exception{"Rename loop detected in ALTER query", + ErrorCodes::BAD_ARGUMENTS}; + + renames_map[command.column_name] = command.rename_to; + } /// Collect default expressions for MODIFY and ADD comands if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN) diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference index 6ed281c757a..8595d2f423a 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference @@ -1,2 +1,4 @@ 1 1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql index efd485ef89b..8599360cdeb 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql @@ -20,4 +20,10 @@ ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1; SELECT renamed_value1 FROM table_for_rename WHERE key = 1; +SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 36} + DROP TABLE IF EXISTS table_for_rename; From a9b675297ed7b8f1c7832f634087abdb74a93c51 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 17:40:43 +0300 Subject: [PATCH 015/484] Better --- .../Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 2 -- .../0_stateless/01213_alter_rename_column.reference | 3 +++ .../queries/0_stateless/01213_alter_rename_column.sql | 9 ++++++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 00637dc5017..894f05d6aca 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1114,9 +1114,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor for (const auto & [rename_from, rename_to] : files_to_rename) { if (rename_to.empty() && new_data_part->checksums.files.count(rename_from)) - { new_data_part->checksums.files.erase(rename_from); - } else if (new_data_part->checksums.files.count(rename_from)) { new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from]; diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference index 8595d2f423a..a5e642f56ad 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference @@ -2,3 +2,6 @@ 1 date key renamed_value1 value2 value3 2019-10-02 1 1 1 1 +7 7 +date key renamed_value1 renamed_value2 renamed_value3 +2019-10-02 7 7 7 7 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql index 8599360cdeb..7c6209ac0b4 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql @@ -24,6 +24,13 @@ SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames; ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15} ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36} -ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 36} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 10} + + +ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7; + +SELECT * FROM table_for_rename WHERE key = 7 FORMAT TSVWithNames; DROP TABLE IF EXISTS table_for_rename; From 6261ab0ef1ad6da7cf6b60b29f86559372663fdc Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 19:03:08 +0300 Subject: [PATCH 016/484] Alter only metadata works --- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 2 +- .../MergeTree/MergeTreeReaderWide.cpp | 34 ++++++++++++++++--- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index b2df5f7ed8f..b4f1b265174 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -3599,7 +3599,7 @@ MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const { if (command.type == MutationCommand::Type::RENAME_COLUMN) { - result.rename_map[command.column_name] = command.rename_to; + result.rename_map[command.rename_to] = command.column_name; LOG_DEBUG(log, "Add to rename map:" << command.column_name); } } diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 1f87f229cc5..daa59eafaab 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -39,19 +39,38 @@ MergeTreeReaderWide::MergeTreeReaderWide( std::move(data_part_), std::move(columns_), uncompressed_cache_, std::move(mark_cache_), std::move(mark_ranges_), std::move(settings_), std::move(avg_value_size_hints_)) { + LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "Alter conversions size:" << alter_conversions.rename_map.size()); + for (const auto & [rename_to, rename_from] : alter_conversions.rename_map) + { + LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "RENAME T:" << rename_to << " F:" << rename_from); + } try { for (const NameAndTypePair & column_from_part : data_part->getColumns()) - { columns_from_part[column_from_part.name] = column_from_part.type; - } for (const NameAndTypePair & column : columns) { if (columns_from_part.count(column.name)) + { + LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING STREAM:" << column.name); addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_); + } else - addStreams(column.name, *column.type, profile_callback_, clock_type_); + { + auto renamed_it = alter_conversions.rename_map.find(column.name); + if (renamed_it != alter_conversions.rename_map.end() + && columns_from_part.count(renamed_it->second)) + { + LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING RENAMED STREAM:" << renamed_it->second); + addStreams(renamed_it->second, *columns_from_part[renamed_it->second], profile_callback_, clock_type_); + } + else + { + LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING STREAM:" << column.name); + addStreams(column.name, *column.type, profile_callback_, clock_type_); + } + } } } catch (...) @@ -82,7 +101,14 @@ size_t MergeTreeReaderWide::readRows(size_t from_mark, bool continue_reading, si auto name_and_type = columns.begin(); for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type) { - String & name = name_and_type->name; + String name = name_and_type->name; + if (alter_conversions.rename_map.count(name)) + { + String original_name = alter_conversions.rename_map[name]; + if (!columns_from_part.count(name) && columns_from_part.count(original_name)) + name = original_name; + } + DataTypePtr type; if (columns_from_part.count(name)) type = columns_from_part[name]; From eec7ae287347053b5170a31af74fca4b05516e4c Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 19:28:24 +0300 Subject: [PATCH 017/484] Less debug output --- dbms/src/Storages/MergeTree/IMergeTreeReader.cpp | 2 -- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 1 - .../src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 8 -------- dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp | 5 ----- 4 files changed, 16 deletions(-) diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp index 58479f39bcb..7ccbe71938c 100644 --- a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -32,8 +32,6 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_ , all_mark_ranges(all_mark_ranges_) , alter_conversions(storage.getAlterConversionsForPart(data_part)) { - LOG_DEBUG(&Poco::Logger::get("IMergeTreeReader"), "Columns to read:" << columns_.toString()); - LOG_DEBUG(&Poco::Logger::get("IMergeTreeReader"), "Columns in part:" << data_part_->getColumns().toString()); } IMergeTreeReader::~IMergeTreeReader() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index b4f1b265174..d299a82efcb 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -469,7 +469,6 @@ void MergeTreeData::setProperties(const StorageInMemoryMetadata & metadata, bool if (!only_check) { - LOG_DEBUG(log, "SETTING UP COLUMNS:" << metadata.columns.toString()); setColumns(std::move(metadata.columns)); order_by_ast = metadata.order_by_ast; diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 894f05d6aca..64650165c28 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -991,8 +991,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor splitMutationCommands(source_part, commands_for_part, for_interpreter, for_file_renames); - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "COMMANDS FOR INTERPRETER:" << for_interpreter.size()); - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "COMMANDS FOR RENAMES:" << for_file_renames.size()); UInt64 watch_prev_elapsed = 0; MergeStageProgress stage_progress(1.0); @@ -1015,7 +1013,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor /// It shouldn't be changed by mutation. new_data_part->index_granularity_info = source_part->index_granularity_info; new_data_part->setColumns(getColumnsForNewDataPart(source_part, updated_header, all_columns, for_file_renames)); - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "New data part columns:" << new_data_part->getColumns().toString()); new_data_part->partition.assign(source_part->partition); auto disk = new_data_part->disk; @@ -1065,7 +1062,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension); - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "FILES RENAME MAP:" << files_to_rename.size()); if (need_remove_expired_values) files_to_skip.insert("ttl.txt"); @@ -1079,7 +1075,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor auto rename_it = files_to_rename.find(it->name()); if (rename_it != files_to_rename.end()) { - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "RENAME IT FOUND:" << rename_it->first << " to " << rename_it->second); if (rename_it->second.empty()) continue; destination += rename_it->second; @@ -1089,7 +1084,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor destination += it->name(); } - LOG_DEBUG(&Poco::Logger::get("MergerMutator"), "HARDLINKING FROM:" << it->path() << " TO " << destination); disk->createHardLink(it->path(), destination); } @@ -1335,7 +1329,6 @@ NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames( } else if (command.type == MutationCommand::Type::RENAME_COLUMN) { - LOG_DEBUG(&Poco::Logger::get("collectFilesForRenames"), "Has mutation command"); IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path) { String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path); @@ -1401,7 +1394,6 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end()); for (auto it = all_columns.begin(); it != all_columns.end();) { - LOG_DEBUG(&Poco::Logger::get("getColumnsForNewDataPart"), "Looking at column:" << it->name); if (updated_header.has(it->name)) { auto updated_type = updated_header.getByName(it->name).type; diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp index daa59eafaab..920ff56cdf5 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -39,11 +39,6 @@ MergeTreeReaderWide::MergeTreeReaderWide( std::move(data_part_), std::move(columns_), uncompressed_cache_, std::move(mark_cache_), std::move(mark_ranges_), std::move(settings_), std::move(avg_value_size_hints_)) { - LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "Alter conversions size:" << alter_conversions.rename_map.size()); - for (const auto & [rename_to, rename_from] : alter_conversions.rename_map) - { - LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "RENAME T:" << rename_to << " F:" << rename_from); - } try { for (const NameAndTypePair & column_from_part : data_part->getColumns()) From b67947cbae04093bbbb5584fd347fe4810172010 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 19:35:24 +0300 Subject: [PATCH 018/484] Test for replicated MT --- ...13_alter_rename_column_zookeeper.reference | 6 +++ .../01213_alter_rename_column_zookeeper.sql | 37 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference new file mode 100644 index 00000000000..a6c50f985c2 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -0,0 +1,6 @@ +1 +CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicaed\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicaed\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql new file mode 100644 index 00000000000..abfab3fa937 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS table_for_rename_replicated; + + +CREATE TABLE table_for_rename_replicated +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicaed', '1') +PARTITION BY date +ORDER BY key; + + +INSERT INTO table_for_rename_replicated SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_for_rename_replicated WHERE key = 1; + +SYSTEM STOP MERGES; + +SHOW CREATE TABLE table_for_rename_replicated; + +ALTER TABLE table_for_rename_replicated RENAME COLUMN value1 to renamed_value1 SETTINGS replication_alter_partitions_sync = 0; + +SELECT sleep(2) FORMAT Null; + +SHOW CREATE TABLE table_for_rename_replicated; + +SELECT renamed_value1 FROM table_for_rename_replicated WHERE key = 1; + +SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames; + +SYSTEM START MERGES; + +DROP TABLE IF EXISTS table_for_rename_replicated; From 6808160cf11449e5bcbcab0c2c14821c1db940b0 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 19:43:47 +0300 Subject: [PATCH 019/484] Remove debug logs --- .../src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 2 -- dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp | 7 ------- 2 files changed, 9 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 9bb7db3ea11..629a2b2cc18 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -183,7 +183,6 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( for (const String & name : column_names_to_return) { - LOG_DEBUG(log, "Column name to return:" << name); if (name == "_part") { part_column_queried = true; @@ -210,7 +209,6 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( NamesAndTypesList available_real_columns = data.getColumns().getAllPhysical(); - LOG_DEBUG(log, "Available columns:" << available_real_columns.toString()); /// If there are only virtual columns in the query, you must request at least one non-virtual one. if (real_column_names.empty()) real_column_names.push_back(ExpressionActions::getSmallestColumn(available_real_columns)); diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 920ff56cdf5..ae60f8c1733 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -48,7 +48,6 @@ MergeTreeReaderWide::MergeTreeReaderWide( { if (columns_from_part.count(column.name)) { - LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING STREAM:" << column.name); addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_); } else @@ -56,15 +55,9 @@ MergeTreeReaderWide::MergeTreeReaderWide( auto renamed_it = alter_conversions.rename_map.find(column.name); if (renamed_it != alter_conversions.rename_map.end() && columns_from_part.count(renamed_it->second)) - { - LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING RENAMED STREAM:" << renamed_it->second); addStreams(renamed_it->second, *columns_from_part[renamed_it->second], profile_callback_, clock_type_); - } else - { - LOG_DEBUG(&Poco::Logger::get("ReaderWide"), "ADDING STREAM:" << column.name); addStreams(column.name, *column.type, profile_callback_, clock_type_); - } } } } From c03d48bd39a7ca253089b9e7dd963b43b2b209df Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 30 Mar 2020 20:04:10 +0300 Subject: [PATCH 020/484] Less debug logs --- dbms/src/Storages/StorageMergeTree.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index d6a7fe376f4..3785891fcc5 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -213,14 +213,11 @@ void StorageMergeTree::alter( StorageInMemoryMetadata metadata = getInMemoryMetadata(); auto maybe_mutation_commands = commands.getMutationCommands(metadata); - LOG_DEBUG(log, "Applying commands"); commands.apply(metadata); - LOG_DEBUG(log, "Commands applied"); /// This alter can be performed at metadata level only if (commands.isSettingsAlter()) { - LOG_DEBUG(log, "Settings alter"); lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); changeSettings(metadata.settings_ast, table_lock_holder); @@ -229,18 +226,15 @@ void StorageMergeTree::alter( } else { - LOG_DEBUG(log, "Not settings alter"); lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); changeSettings(metadata.settings_ast, table_lock_holder); /// Reinitialize primary key because primary key column types might have changed. setProperties(metadata); - LOG_DEBUG(log, "Metadata setup"); setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata); - LOG_DEBUG(log, "Data on disk changed"); /// We release all locks except alter_lock which allows /// to execute alter queries sequentially From abae7dfffec2618cc5acb87371e13635c0eea8f1 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Tue, 31 Mar 2020 16:14:59 +0300 Subject: [PATCH 021/484] Initial implementation of splitting string into Alpha-Num tokens with SIMD intrinsics. --- .../MergeTree/MergeTreeIndexFullText.cpp | 53 +++++++++++++++++++ .../tests/gtest_SplitTokenExtractor.cpp | 0 2 files changed, 53 insertions(+) create mode 100644 dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 4b3bd954496..5e4bf15418c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -606,8 +606,60 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size { *token_start = *pos; *token_len = 0; + while (*pos < len) { +#if __SSE2__ + // NOTE: we assume that `data` string is padded from the right with 15 zero-bytes. + const __m128i haystack = _mm_loadu_si128(reinterpret_cast(data + *pos)); + const size_t haystack_length = 16; + +#if __SSE4_2__ + // With the help of https://www.strchr.com/strcmp_and_strlen_using_sse_4.2 + static const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 'Z', 'A', 'z', 'a', '9', '0'); + // Every bit represents if `haystack` character is in the ranges (1) or not(0) + const auto result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 6, haystack, haystack_length, _SIDD_CMP_RANGES | _SIDD_UBYTE_OPS)); +#else + // NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8). + static const auto number_begin = _mm_set1_epi8('0' - 1); + static const auto number_end = _mm_set1_epi8('9' + 1); + static const auto alpha_lower_begin = _mm_set1_epi8('a' - 1); + static const auto alpha_lower_end = _mm_set1_epi8('z' + 1); + static const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); + static const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); + + // every bit represents if `haystack` character `c` statisfies condition: + // (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) + const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128( + _mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end)), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_upper_begin), _mm_cmplt_epi8(haystack, alpha_upper_end)))); +#endif + // NOTE: __builtin_ctz family explicitly state that result is UNDEFINED if argument is 0 + if (result_bitmask == 0) + { + // end of token started on previous haystack + if (*token_len != 0) + return true; + + *pos += haystack_length; + continue; + } + + const auto start = getTrailingZeroBits(result_bitmask); + if (*token_len == 0) + *token_start = *pos + start; + + const auto l = getTrailingZeroBits(~(result_bitmask >> start)); + *token_len += l; + + *pos += start + l; + if (start + l == 16) + // check if there are leftovers in next `haystack` + continue; + + return true; +#else if (isASCII(data[*pos]) && !isAlphaNumericASCII(data[*pos])) { /// Finish current token if any @@ -621,6 +673,7 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size ++*pos; ++*token_len; } +#endif } return *token_len > 0; } diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp new file mode 100644 index 00000000000..e69de29bb2d From 0b3e81aef0e554da74a1d33f436552cf787886e8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 31 Mar 2020 19:18:18 +0300 Subject: [PATCH 022/484] Add limited support for nested structs --- dbms/src/Storages/AlterCommands.cpp | 26 ++++++++++++- dbms/src/Storages/ColumnsDescription.cpp | 1 + .../MergeTree/MergeTreeDataMergerMutator.cpp | 17 +++++++-- .../01213_alter_rename_nested.reference | 6 +++ .../0_stateless/01213_alter_rename_nested.sql | 34 +++++++++++++++++ .../01213_alter_table_rename_nested.sql | 38 +++++++++++++++++++ 6 files changed, 117 insertions(+), 5 deletions(-) create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql create mode 100644 dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index 14ff07daa12..1b135563095 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -39,6 +39,7 @@ namespace ErrorCodes extern const int NOT_FOUND_COLUMN_IN_BLOCK; extern const int LOGICAL_ERROR; extern const int DUPLICATE_COLUMN; + extern const int NOT_IMPLEMENTED; } @@ -778,6 +779,12 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con } else if (command.type == AlterCommand::RENAME_COLUMN) { + /// TODO Implement nested rename + if (metadata.columns.hasNested(command.column_name)) + { + throw Exception{"Cannot rename whole Nested struct", ErrorCodes::NOT_IMPLEMENTED}; + } + if (!metadata.columns.has(command.column_name)) { if (!command.if_exists) @@ -797,7 +804,24 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con throw Exception{"Rename loop detected in ALTER query", ErrorCodes::BAD_ARGUMENTS}; - renames_map[command.column_name] = command.rename_to; + String from_nested_table_name = Nested::extractTableName(command.column_name); + String to_nested_table_name = Nested::extractTableName(command.rename_to); + bool from_nested = from_nested_table_name != command.column_name; + bool to_nested = to_nested_table_name != command.rename_to; + + if (from_nested && to_nested) + { + if (from_nested_table_name != to_nested_table_name) + throw Exception{"Cannot rename column from one nested name to another", ErrorCodes::BAD_ARGUMENTS}; + } + else if (!from_nested && !to_nested) + { + renames_map[command.column_name] = command.rename_to; + } + else + { + throw Exception{"Cannot rename column from nested struct to normal column and vice versa", ErrorCodes::BAD_ARGUMENTS}; + } } /// Collect default expressions for MODIFY and ADD comands diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/src/Storages/ColumnsDescription.cpp index 787fa2b739a..f7d00ea2d54 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/dbms/src/Storages/ColumnsDescription.cpp @@ -38,6 +38,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int CANNOT_PARSE_TEXT; extern const int THERE_IS_NO_DEFAULT_VALUE; + extern const int LOGICAL_ERROR; } ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, bool is_virtual_) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 64650165c28..d1dd2861202 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1061,7 +1062,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor NameSet files_to_skip = collectFilesToSkip(updated_header, indices_to_recalc, mrk_extension); NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension); - if (need_remove_expired_values) files_to_skip.insert("ttl.txt"); @@ -1108,7 +1108,9 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor for (const auto & [rename_from, rename_to] : files_to_rename) { if (rename_to.empty() && new_data_part->checksums.files.count(rename_from)) + { new_data_part->checksums.files.erase(rename_from); + } else if (new_data_part->checksums.files.count(rename_from)) { new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from]; @@ -1329,13 +1331,20 @@ NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames( } else if (command.type == MutationCommand::Type::RENAME_COLUMN) { + String escaped_name_from = escapeForFileName(command.column_name); + String escaped_name_to = escapeForFileName(command.rename_to); + IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path) { String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path); - String stream_to = boost::replace_first_copy(stream_from, command.column_name, command.rename_to); - rename_map.emplace(stream_from + ".bin", stream_to + ".bin"); - rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension); + String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to); + + if (stream_from != stream_to) + { + rename_map.emplace(stream_from + ".bin", stream_to + ".bin"); + rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension); + } }; IDataType::SubstreamPath stream_path; auto column = source_part->getColumns().tryGetByName(command.column_name); diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference new file mode 100644 index 00000000000..51647dc2e7b --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference @@ -0,0 +1,6 @@ +[8,9,10] +['a','b','c'] +CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +7 [8,9,10] +7 ['a','b','c'] diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql new file mode 100644 index 00000000000..8dc221ef388 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48} + +DROP TABLE IF EXISTS table_for_rename_nested; + diff --git a/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql b/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql new file mode 100644 index 00000000000..e08e3c0c3b1 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36} + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO q.renamed_x; --{serverError 36} + +ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO q.renamed_x; --{serverError 36} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48} + +DROP TABLE IF EXISTS table_for_rename_nested; + From d99dca6e48466c7021c70eae469a7705c5516af7 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 31 Mar 2020 22:14:34 +0300 Subject: [PATCH 023/484] Add missed file --- .../0_stateless/01213_alter_table_rename_nested.reference | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference diff --git a/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference b/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference new file mode 100644 index 00000000000..51647dc2e7b --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference @@ -0,0 +1,6 @@ +[8,9,10] +['a','b','c'] +CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +7 [8,9,10] +7 ['a','b','c'] From 46322370c004f00662b68dd6eea5d6f2efdff26f Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 1 Apr 2020 15:43:09 +0300 Subject: [PATCH 024/484] Revert "Remove useless code around locks" --- .../PushingToViewsBlockOutputStream.cpp | 4 +-- dbms/src/Databases/DatabaseMySQL.cpp | 2 +- dbms/src/Functions/FunctionJoinGet.cpp | 2 +- .../Interpreters/InterpreterAlterQuery.cpp | 6 ++--- .../Interpreters/InterpreterCreateQuery.cpp | 2 +- .../Interpreters/InterpreterDescribeQuery.cpp | 2 +- .../Interpreters/InterpreterInsertQuery.cpp | 2 +- .../Interpreters/InterpreterSelectQuery.cpp | 2 +- dbms/src/Storages/IStorage.cpp | 15 +++++++---- dbms/src/Storages/IStorage.h | 11 +++++--- .../src/Storages/LiveView/StorageLiveView.cpp | 11 +++++--- dbms/src/Storages/LiveView/StorageLiveView.h | 2 +- .../Storages/MergeTree/DataPartsExchange.cpp | 2 +- .../ReplicatedMergeTreeCleanupThread.cpp | 2 +- .../ReplicatedMergeTreePartCheckThread.cpp | 2 +- dbms/src/Storages/StorageBuffer.cpp | 2 +- dbms/src/Storages/StorageMaterializedView.cpp | 4 +-- dbms/src/Storages/StorageMerge.cpp | 4 +-- dbms/src/Storages/StorageMergeTree.cpp | 20 +++++++------- .../Storages/StorageReplicatedMergeTree.cpp | 26 +++++++++---------- .../Storages/System/StorageSystemColumns.cpp | 2 +- .../System/StorageSystemPartsBase.cpp | 2 +- .../Storages/System/StorageSystemTables.cpp | 2 +- dbms/src/Storages/TableStructureLockHolder.h | 9 ++++--- 24 files changed, 77 insertions(+), 61 deletions(-) diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp index 5752fbaff96..991d206777a 100644 --- a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -25,7 +25,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( * Although now any insertion into the table is done via PushingToViewsBlockOutputStream, * but it's clear that here is not the best place for this functionality. */ - addTableLock(storage->lockStructureForShare(context.getInitialQueryId())); + addTableLock(storage->lockStructureForShare(true, context.getInitialQueryId())); /// If the "root" table deduplactes blocks, there are no need to make deduplication for children /// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks @@ -54,7 +54,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( if (auto * materialized_view = dynamic_cast(dependent_table.get())) { - addTableLock(materialized_view->lockStructureForShare(context.getInitialQueryId())); + addTableLock(materialized_view->lockStructureForShare(true, context.getInitialQueryId())); StoragePtr inner_table = materialized_view->getTargetTable(); auto inner_table_id = inner_table->getStorageID(); diff --git a/dbms/src/Databases/DatabaseMySQL.cpp b/dbms/src/Databases/DatabaseMySQL.cpp index ad40cff9e6b..959121585ea 100644 --- a/dbms/src/Databases/DatabaseMySQL.cpp +++ b/dbms/src/Databases/DatabaseMySQL.cpp @@ -358,7 +358,7 @@ void DatabaseMySQL::cleanOutdatedTables() ++iterator; else { - const auto table_lock = (*iterator)->lockAlterIntention(); + const auto table_lock = (*iterator)->lockAlterIntention(RWLockImpl::NO_QUERY); (*iterator)->shutdown(); (*iterator)->is_dropped = true; diff --git a/dbms/src/Functions/FunctionJoinGet.cpp b/dbms/src/Functions/FunctionJoinGet.cpp index 6a6c0c4a97e..0860deccb14 100644 --- a/dbms/src/Functions/FunctionJoinGet.cpp +++ b/dbms/src/Functions/FunctionJoinGet.cpp @@ -65,7 +65,7 @@ FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName auto join = storage_join->getJoin(); DataTypes data_types(arguments.size()); - auto table_lock = storage_join->lockStructureForShare(context.getInitialQueryId()); + auto table_lock = storage_join->lockStructureForShare(false, context.getInitialQueryId()); for (size_t i = 0; i < arguments.size(); ++i) data_types[i] = arguments[i].type; diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp index ddf1e27af87..315527765ef 100644 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ b/dbms/src/Interpreters/InterpreterAlterQuery.cpp @@ -82,7 +82,7 @@ BlockIO InterpreterAlterQuery::execute() if (!mutation_commands.empty()) { - auto table_lock_holder = table->lockStructureForShare(context.getCurrentQueryId()); + auto table_lock_holder = table->lockStructureForShare(false /* because mutation is executed asyncronously */, context.getCurrentQueryId()); MutationsInterpreter(table, mutation_commands, context, false).validate(table_lock_holder); table->mutate(mutation_commands, context); } @@ -101,7 +101,7 @@ BlockIO InterpreterAlterQuery::execute() switch (command.type) { case LiveViewCommand::REFRESH: - live_view->refresh(); + live_view->refresh(context); break; } } @@ -109,7 +109,7 @@ BlockIO InterpreterAlterQuery::execute() if (!alter_commands.empty()) { - auto table_lock_holder = table->lockAlterIntention(); + auto table_lock_holder = table->lockAlterIntention(context.getCurrentQueryId()); StorageInMemoryMetadata metadata = table->getInMemoryMetadata(); alter_commands.validate(metadata, context); alter_commands.prepare(metadata); diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/src/Interpreters/InterpreterCreateQuery.cpp index b57604828e1..f15796688e1 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCreateQuery.cpp @@ -403,7 +403,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::setProperties(AS StoragePtr as_storage = DatabaseCatalog::instance().getTable({as_database_name, create.as_table}); /// as_storage->getColumns() and setEngine(...) must be called under structure lock of other_table for CREATE ... AS other_table. - as_storage_lock = as_storage->lockStructureForShare(context.getCurrentQueryId()); + as_storage_lock = as_storage->lockStructureForShare(false, context.getCurrentQueryId()); properties.columns = as_storage->getColumns(); /// Secondary indices make sense only for MergeTree family of storage engines. diff --git a/dbms/src/Interpreters/InterpreterDescribeQuery.cpp b/dbms/src/Interpreters/InterpreterDescribeQuery.cpp index cf7bb0458e9..1353c01ebf6 100644 --- a/dbms/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/dbms/src/Interpreters/InterpreterDescribeQuery.cpp @@ -89,7 +89,7 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl() table = DatabaseCatalog::instance().getTable(table_id); } - auto table_lock = table->lockStructureForShare(context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare(false, context.getInitialQueryId()); columns = table->getColumns(); } diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.cpp b/dbms/src/Interpreters/InterpreterInsertQuery.cpp index f12ac68cede..b4280ee20e6 100644 --- a/dbms/src/Interpreters/InterpreterInsertQuery.cpp +++ b/dbms/src/Interpreters/InterpreterInsertQuery.cpp @@ -109,7 +109,7 @@ BlockIO InterpreterInsertQuery::execute() BlockIO res; StoragePtr table = getTable(query); - auto table_lock = table->lockStructureForShare(context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare(true, context.getInitialQueryId()); auto query_sample_block = getSampleBlock(query, table); if (!query.table_function) diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index b08e0ce1146..514efb90a00 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -255,7 +255,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (storage) { - table_lock = storage->lockStructureForShare(context->getInitialQueryId()); + table_lock = storage->lockStructureForShare(false, context->getInitialQueryId()); table_id = storage->getStorageID(); } diff --git a/dbms/src/Storages/IStorage.cpp b/dbms/src/Storages/IStorage.cpp index c36a28b115f..4d916ca1b46 100644 --- a/dbms/src/Storages/IStorage.cpp +++ b/dbms/src/Storages/IStorage.cpp @@ -314,9 +314,11 @@ bool IStorage::isVirtualColumn(const String & column_name) const return getColumns().get(column_name).is_virtual; } -TableStructureReadLockHolder IStorage::lockStructureForShare(const String & query_id) +TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id) { TableStructureReadLockHolder result; + if (will_add_new_data) + result.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Read, query_id); result.structure_lock = structure_lock->getLock(RWLockImpl::Read, query_id); if (is_dropped) @@ -324,10 +326,10 @@ TableStructureReadLockHolder IStorage::lockStructureForShare(const String & quer return result; } -TableStructureWriteLockHolder IStorage::lockAlterIntention() +TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_id) { TableStructureWriteLockHolder result; - result.alter_lock = std::unique_lock(alter_lock); + result.alter_intention_lock = alter_intention_lock->getLock(RWLockImpl::Write, query_id); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); @@ -336,20 +338,23 @@ TableStructureWriteLockHolder IStorage::lockAlterIntention() void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id) { - if (!lock_holder.alter_lock) + if (!lock_holder.alter_intention_lock) throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR); + if (!lock_holder.new_data_structure_lock) + lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id); lock_holder.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); } TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id) { TableStructureWriteLockHolder result; - result.alter_lock = std::unique_lock(alter_lock); + result.alter_intention_lock = alter_intention_lock->getLock(RWLockImpl::Write, query_id); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); + result.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id); result.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); return result; diff --git a/dbms/src/Storages/IStorage.h b/dbms/src/Storages/IStorage.h index 469f39d65df..d3cede6e5c8 100644 --- a/dbms/src/Storages/IStorage.h +++ b/dbms/src/Storages/IStorage.h @@ -199,11 +199,11 @@ public: /// Acquire this lock if you need the table structure to remain constant during the execution of /// the query. If will_add_new_data is true, this means that the query will add new data to the table /// (INSERT or a parts merge). - TableStructureReadLockHolder lockStructureForShare(const String & query_id); + TableStructureReadLockHolder lockStructureForShare(bool will_add_new_data, const String & query_id); /// Acquire this lock at the start of ALTER to lock out other ALTERs and make sure that only you /// can modify the table structure. It can later be upgraded to the exclusive lock. - TableStructureWriteLockHolder lockAlterIntention(); + TableStructureWriteLockHolder lockAlterIntention(const String & query_id); /// Upgrade alter intention lock to the full exclusive structure lock. This is done by ALTER queries /// to ensure that no other query uses the table structure and it can be safely changed. @@ -490,7 +490,12 @@ private: /// If you hold this lock exclusively, you can be sure that no other structure modifying queries /// (e.g. ALTER, DROP) are concurrently executing. But queries that only read table structure /// (e.g. SELECT, INSERT) can continue to execute. - mutable std::mutex alter_lock; + mutable RWLock alter_intention_lock = RWLockImpl::create(); + + /// It is taken for share for the entire INSERT query and the entire merge of the parts (for MergeTree). + /// ALTER COLUMN queries acquire an exclusive lock to ensure that no new parts with the old structure + /// are added to the table and thus the set of parts to modify doesn't change. + mutable RWLock new_data_structure_lock = RWLockImpl::create(); /// Lock for the table column structure (names, types, etc.) and data path. /// It is taken in exclusive mode by queries that modify them (e.g. RENAME, ALTER and DROP) diff --git a/dbms/src/Storages/LiveView/StorageLiveView.cpp b/dbms/src/Storages/LiveView/StorageLiveView.cpp index 93d183a594f..049110a3294 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.cpp +++ b/dbms/src/Storages/LiveView/StorageLiveView.cpp @@ -517,11 +517,14 @@ void StorageLiveView::drop(TableStructureWriteLockHolder &) condition.notify_all(); } -void StorageLiveView::refresh() +void StorageLiveView::refresh(const Context & context) { - std::lock_guard lock(mutex); - if (getNewBlocks()) - condition.notify_all(); + auto alter_lock = lockAlterIntention(context.getCurrentQueryId()); + { + std::lock_guard lock(mutex); + if (getNewBlocks()) + condition.notify_all(); + } } Pipes StorageLiveView::read( diff --git a/dbms/src/Storages/LiveView/StorageLiveView.h b/dbms/src/Storages/LiveView/StorageLiveView.h index b3ed89f8d10..9186132f99d 100644 --- a/dbms/src/Storages/LiveView/StorageLiveView.h +++ b/dbms/src/Storages/LiveView/StorageLiveView.h @@ -123,7 +123,7 @@ public: void startup() override; void shutdown() override; - void refresh(); + void refresh(const Context & context); Pipes read( const Names & column_names, diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp b/dbms/src/Storages/MergeTree/DataPartsExchange.cpp index 1b216e8bec3..6373c85a15d 100644 --- a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/dbms/src/Storages/MergeTree/DataPartsExchange.cpp @@ -85,7 +85,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo try { - auto storage_lock = data.lockStructureForShare(RWLockImpl::NO_QUERY); + auto storage_lock = data.lockStructureForShare(false, RWLockImpl::NO_QUERY); MergeTreeData::DataPartPtr part = findPart(part_name); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 27ad6871573..77a5bca7a92 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -57,7 +57,7 @@ void ReplicatedMergeTreeCleanupThread::iterate() { /// TODO: Implement tryLockStructureForShare. - auto lock = storage.lockStructureForShare(""); + auto lock = storage.lockStructureForShare(false, ""); storage.clearOldTemporaryDirectories(); } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 5c8f878503a..17b716d14c2 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -203,7 +203,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na else if (part->name == part_name) { auto zookeeper = storage.getZooKeeper(); - auto table_lock = storage.lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = storage.lockStructureForShare(false, RWLockImpl::NO_QUERY); auto local_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( part->getColumns(), part->checksums); diff --git a/dbms/src/Storages/StorageBuffer.cpp b/dbms/src/Storages/StorageBuffer.cpp index 53fb257d58d..7699f8379d9 100644 --- a/dbms/src/Storages/StorageBuffer.cpp +++ b/dbms/src/Storages/StorageBuffer.cpp @@ -168,7 +168,7 @@ Pipes StorageBuffer::read( if (destination.get() == this) throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - auto destination_lock = destination->lockStructureForShare(context.getCurrentQueryId()); + auto destination_lock = destination->lockStructureForShare(false, context.getCurrentQueryId()); const bool dst_has_same_structure = std::all_of(column_names.begin(), column_names.end(), [this, destination](const String& column_name) { diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/dbms/src/Storages/StorageMaterializedView.cpp index 63031572cd6..3fb25bf8275 100644 --- a/dbms/src/Storages/StorageMaterializedView.cpp +++ b/dbms/src/Storages/StorageMaterializedView.cpp @@ -185,7 +185,7 @@ Pipes StorageMaterializedView::read( const unsigned num_streams) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare(false, context.getCurrentQueryId()); if (query_info.order_by_optimizer) query_info.input_sorting_info = query_info.order_by_optimizer->getInputOrder(storage); @@ -200,7 +200,7 @@ Pipes StorageMaterializedView::read( BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const Context & context) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare(true, context.getCurrentQueryId()); auto stream = storage->write(query, context); stream->addTableLock(lock); return stream; diff --git a/dbms/src/Storages/StorageMerge.cpp b/dbms/src/Storages/StorageMerge.cpp index f102ee1c6f8..f3322c7dfff 100644 --- a/dbms/src/Storages/StorageMerge.cpp +++ b/dbms/src/Storages/StorageMerge.cpp @@ -364,7 +364,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String { auto & table = iterator->table(); if (table.get() != this) - selected_tables.emplace_back(table, table->lockStructureForShare(query_id), iterator->name()); + selected_tables.emplace_back(table, table->lockStructureForShare(false, query_id), iterator->name()); iterator->next(); } @@ -389,7 +389,7 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr if (storage.get() != this) { - selected_tables.emplace_back(storage, storage->lockStructureForShare(query_id), iterator->name()); + selected_tables.emplace_back(storage, storage->lockStructureForShare(false, query_id), iterator->name()); virtual_column->insert(iterator->name()); } diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index 64950a47437..2efeff19657 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -241,7 +241,7 @@ void StorageMergeTree::alter( DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata); - /// We release all locks except alter_lock which allows + /// We release all locks except alter_intention_lock which allows /// to execute alter queries sequentially table_lock_holder.releaseAllExceptAlterIntention(); @@ -537,7 +537,7 @@ bool StorageMergeTree::merge( bool deduplicate, String * out_disable_reason) { - auto table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); FutureMergedMutatedPart future_part; @@ -655,7 +655,7 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::movePartsTask() bool StorageMergeTree::tryMutatePart() { - auto table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); size_t max_ast_elements = global_context.getSettingsRef().max_expanded_ast_elements; FutureMergedMutatedPart future_part; @@ -780,7 +780,7 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::mergeMutateTask() { { /// TODO: Implement tryLockStructureForShare. - auto lock_structure = lockStructureForShare(""); + auto lock_structure = lockStructureForShare(false, ""); clearOldPartsFromFilesystem(); clearOldTemporaryDirectories(); } @@ -973,14 +973,14 @@ void StorageMergeTree::alterPartition(const ASTPtr & query, const PartitionComma case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(context.getCurrentQueryId()); + auto lock = lockStructureForShare(false, context.getCurrentQueryId()); freezePartition(command.partition, command.with_name, context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(context.getCurrentQueryId()); + auto lock = lockStructureForShare(false, context.getCurrentQueryId()); freezeAll(command.with_name, context, lock); } break; @@ -1045,8 +1045,8 @@ void StorageMergeTree::attachPartition(const ASTPtr & partition, bool attach_par void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId()); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -1116,8 +1116,8 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId()); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 336fef069d0..8896151561b 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -1025,7 +1025,7 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) ReservationPtr reserved_space = reserveSpacePreferringTTLRules(estimated_space_for_merge, ttl_infos, time(nullptr), max_volume_index); - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); FutureMergedMutatedPart future_merged_part(parts, entry.new_part_type); if (future_merged_part.name != entry.new_part_name) @@ -1160,7 +1160,7 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM /// Can throw an exception. ReservationPtr reserved_space = reserveSpace(estimated_space_for_result, source_part->disk); - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); MutableDataPartPtr new_part; Transaction transaction(*this); @@ -1514,7 +1514,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) PartDescriptions parts_to_add; DataPartsVector parts_to_remove; - auto table_lock_holder_dst_table = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock_holder_dst_table = lockStructureForShare(false, RWLockImpl::NO_QUERY); for (size_t i = 0; i < entry_replace.new_part_names.size(); ++i) { @@ -1576,7 +1576,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) return 0; } - table_lock_holder_src_table = source_table->lockStructureForShare(RWLockImpl::NO_QUERY); + table_lock_holder_src_table = source_table->lockStructureForShare(false, RWLockImpl::NO_QUERY); DataPartStates valid_states{MergeTreeDataPartState::PreCommitted, MergeTreeDataPartState::Committed, MergeTreeDataPartState::Outdated}; @@ -2699,7 +2699,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin TableStructureReadLockHolder table_lock_holder; if (!to_detached) - table_lock_holder = lockStructureForShare(RWLockImpl::NO_QUERY); + table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); /// Logging Stopwatch stopwatch; @@ -3223,7 +3223,7 @@ void StorageReplicatedMergeTree::alter( alter_entry.emplace(); mutation_znode.reset(); - /// We can safely read structure, because we guarded with alter_lock + /// We can safely read structure, because we guarded with alter_intention_lock if (is_readonly) throw Exception("Can't ALTER readonly table", ErrorCodes::TABLE_IS_READ_ONLY); @@ -3428,14 +3428,14 @@ void StorageReplicatedMergeTree::alterPartition(const ASTPtr & query, const Part case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(query_context.getCurrentQueryId()); + auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); freezePartition(command.partition, command.with_name, query_context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(query_context.getCurrentQueryId()); + auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); freezeAll(command.with_name, query_context, lock); } break; @@ -4443,7 +4443,7 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() { /// Critical section is not required (since grabOldParts() returns unique part set on each call) - auto table_lock = lockStructureForShare(RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); auto zookeeper = getZooKeeper(); DataPartsVector parts = grabOldParts(); @@ -4738,8 +4738,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ const Context & context) { /// First argument is true, because we possibly will add new data to current table. - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(true, context.getCurrentQueryId()); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId()); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -4917,8 +4917,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId()); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/src/Storages/System/StorageSystemColumns.cpp index 9af8904ab26..cbf6ada9ed3 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemColumns.cpp @@ -103,7 +103,7 @@ protected: try { - table_lock = storage->lockStructureForShare(query_id); + table_lock = storage->lockStructureForShare(false, query_id); } catch (const Exception & e) { diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/dbms/src/Storages/System/StorageSystemPartsBase.cpp index c212b30d268..d8f564b0160 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsBase.cpp @@ -192,7 +192,7 @@ StoragesInfo StoragesInfoStream::next() try { /// For table not to be dropped and set of columns to remain constant. - info.table_lock = info.storage->lockStructureForShare(query_id); + info.table_lock = info.storage->lockStructureForShare(false, query_id); } catch (const Exception & e) { diff --git a/dbms/src/Storages/System/StorageSystemTables.cpp b/dbms/src/Storages/System/StorageSystemTables.cpp index 5d0aec921de..a8d5fc2ec57 100644 --- a/dbms/src/Storages/System/StorageSystemTables.cpp +++ b/dbms/src/Storages/System/StorageSystemTables.cpp @@ -244,7 +244,7 @@ protected: if (need_lock_structure) { table = tables_it->table(); - lock = table->lockStructureForShare(context.getCurrentQueryId()); + lock = table->lockStructureForShare(false, context.getCurrentQueryId()); } } catch (const Exception & e) diff --git a/dbms/src/Storages/TableStructureLockHolder.h b/dbms/src/Storages/TableStructureLockHolder.h index 50f196517e3..b5fc0c620ad 100644 --- a/dbms/src/Storages/TableStructureLockHolder.h +++ b/dbms/src/Storages/TableStructureLockHolder.h @@ -12,11 +12,12 @@ struct TableStructureWriteLockHolder { void release() { - *this = {}; + *this = TableStructureWriteLockHolder(); } void releaseAllExceptAlterIntention() { + new_data_structure_lock.reset(); structure_lock.reset(); } @@ -24,7 +25,8 @@ private: friend class IStorage; /// Order is important. - std::unique_lock alter_lock; + RWLockImpl::LockHolder alter_intention_lock; + RWLockImpl::LockHolder new_data_structure_lock; RWLockImpl::LockHolder structure_lock; }; @@ -32,13 +34,14 @@ struct TableStructureReadLockHolder { void release() { - *this = {}; + *this = TableStructureReadLockHolder(); } private: friend class IStorage; /// Order is important. + RWLockImpl::LockHolder new_data_structure_lock; RWLockImpl::LockHolder structure_lock; }; From 0fcfa48758a3b536c2716651e66a632930d3bc34 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Wed, 1 Apr 2020 17:19:59 +0300 Subject: [PATCH 025/484] remove {: .grey } --- docs/en/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/index.md b/docs/en/index.md index da2cfdd9cdf..705f5165870 100644 --- a/docs/en/index.md +++ b/docs/en/index.md @@ -13,7 +13,7 @@ In a “normal” row-oriented DBMS, data is stored in this order: In other words, all the values related to a row are physically stored next to each other. -Examples of a row-oriented DBMS are MySQL, Postgres, and MS SQL Server. {: .grey } +Examples of a row-oriented DBMS are MySQL, Postgres, and MS SQL Server. In a column-oriented DBMS, data is stored like this: @@ -27,7 +27,7 @@ In a column-oriented DBMS, data is stored like this: These examples only show the order that data is arranged in. The values from different columns are stored separately, and data from the same column is stored together. -Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, and kdb+. {: .grey } +Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, and kdb+. Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. From 836c6fed80f801deb9d9c38cd4f455bfae611442 Mon Sep 17 00:00:00 2001 From: Artem Zuikov Date: Wed, 1 Apr 2020 17:21:37 +0300 Subject: [PATCH 026/484] Fix columns override in distributed queries (#9972) --- dbms/src/Interpreters/QueryNormalizer.cpp | 13 ------- .../TranslateQualifiedNamesVisitor.cpp | 37 +++++++++++++++++-- .../TranslateQualifiedNamesVisitor.h | 21 ++++++++--- dbms/src/Storages/StorageDistributed.cpp | 22 +++++++---- .../00818_inner_join_bug_3567.reference | 24 +++++++++--- .../0_stateless/00818_inner_join_bug_3567.sql | 22 ++++++----- .../01104_distributed_numbers_test.sql | 2 +- .../01104_distributed_one_test.reference | 1 + .../01104_distributed_one_test.sql | 5 ++- 9 files changed, 100 insertions(+), 47 deletions(-) diff --git a/dbms/src/Interpreters/QueryNormalizer.cpp b/dbms/src/Interpreters/QueryNormalizer.cpp index 568b08b8f5a..86fbd108f51 100644 --- a/dbms/src/Interpreters/QueryNormalizer.cpp +++ b/dbms/src/Interpreters/QueryNormalizer.cpp @@ -76,20 +76,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) if (it_alias != data.aliases.end() && current_alias != node.name) { if (!IdentifierSemantic::canBeAlias(node)) - { - /// This means that column had qualified name, which was translated (so, canBeAlias() returns false). - /// But there is an alias with the same name. So, let's use original name for that column. - /// If alias wasn't set, use original column name as alias. - /// That helps to avoid result set with columns which have same names but different values. - if (node.alias.empty()) - { - node.name.swap(node.alias); - node.restoreCompoundName(); - node.name.swap(node.alias); - } - return; - } /// We are alias for other column (node.name), but we are alias by /// ourselves to some other column diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index a0f411dcc96..17b1bc004f8 100644 --- a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -300,14 +300,45 @@ void TranslateQualifiedNamesMatcher::extractJoinUsingColumns(const ASTPtr ast, D } } -void RestoreQualifiedNamesData::visit(ASTIdentifier & identifier, ASTPtr & ast) + +void RestoreQualifiedNamesMatcher::Data::changeTable(ASTIdentifier & identifier) const +{ + auto match = IdentifierSemantic::canReferColumnToTable(identifier, distributed_table); + switch (match) + { + case IdentifierSemantic::ColumnMatch::AliasedTableName: + case IdentifierSemantic::ColumnMatch::TableName: + case IdentifierSemantic::ColumnMatch::DbAndTable: + IdentifierSemantic::setColumnLongName(identifier, remote_table); + break; + default: + break; + } +} + +bool RestoreQualifiedNamesMatcher::needChildVisit(ASTPtr &, const ASTPtr & child) +{ + /// Do not go into subqueries + if (child->as()) + return false; // NOLINT + return true; +} + +void RestoreQualifiedNamesMatcher::visit(ASTPtr & ast, Data & data) +{ + if (auto * t = ast->as()) + visit(*t, ast, data); +} + +void RestoreQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, Data & data) { if (IdentifierSemantic::getColumnName(identifier)) { if (IdentifierSemantic::getMembership(identifier)) { - ast = identifier.clone(); - ast->as()->restoreCompoundName(); + identifier.restoreCompoundName(); + if (data.rename) + data.changeTable(identifier); } } } diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h index 51c6c2c42f0..e8c320671bf 100644 --- a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h +++ b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h @@ -59,15 +59,24 @@ private: /// It finds columns and translate their names to the normal form. Expand asterisks and qualified asterisks with column names. using TranslateQualifiedNamesVisitor = TranslateQualifiedNamesMatcher::Visitor; -/// Restore ASTIdentifiers to long form -struct RestoreQualifiedNamesData -{ - using TypeToVisit = ASTIdentifier; - static void visit(ASTIdentifier & identifier, ASTPtr & ast); +/// Restore ASTIdentifiers to long form, change table name in case of distributed. +struct RestoreQualifiedNamesMatcher +{ + struct Data + { + DatabaseAndTableWithAlias distributed_table; + DatabaseAndTableWithAlias remote_table; + bool rename = false; + + void changeTable(ASTIdentifier & identifier) const; + }; + + static bool needChildVisit(ASTPtr & node, const ASTPtr & child); + static void visit(ASTPtr & ast, Data & data); + static void visit(ASTIdentifier & identifier, ASTPtr & ast, Data & data); }; -using RestoreQualifiedNamesMatcher = OneTypeMatcher; using RestoreQualifiedNamesVisitor = InDepthNodeVisitor; } diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 6f98d282e8c..b4375dd5b0a 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -89,18 +90,23 @@ ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, co auto modified_query_ast = query->clone(); ASTSelectQuery & select_query = modified_query_ast->as(); - - /// restore long column names in JOIN ON expressions - if (auto tables = select_query.tables()) - { - RestoreQualifiedNamesVisitor::Data data; - RestoreQualifiedNamesVisitor(data).visit(tables); - } - if (table_function_ptr) select_query.addTableFunction(table_function_ptr); else select_query.replaceDatabaseAndTable(database, table); + + /// Restore long column names (cause our short names are ambiguous). + /// TODO: aliased table functions & CREATE TABLE AS table function cases + if (!table_function_ptr) + { + RestoreQualifiedNamesVisitor::Data data; + data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query->as(), 0)); + data.remote_table.database = database; + data.remote_table.table = table; + data.rename = true; + RestoreQualifiedNamesVisitor(data).visit(modified_query_ast); + } + return modified_query_ast; } diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference index 7967cf7837e..c0fe46ee963 100644 --- a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference +++ b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference @@ -1,5 +1,19 @@ -a 2018-01-01 00:00:00 0000-00-00 00:00:00 -b 2018-01-01 00:00:00 b b 2018-01-01 00:00:00 -c 2018-01-01 00:00:00 c c 2018-01-01 00:00:00 -b 2018-01-01 00:00:00 b b 2018-01-01 00:00:00 -c 2018-01-01 00:00:00 c c 2018-01-01 00:00:00 +┌─a─┬──────────b─┐ +│ a │ 2018-01-01 │ +│ b │ 2018-01-01 │ +│ c │ 2018-01-01 │ +└───┴────────────┘ +┌─c─┬─a─┬──────────d─┬─a─┬──────────b─┐ +│ B │ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ C │ c │ 2018-01-01 │ C │ 2018-01-01 │ +│ D │ d │ 2018-01-01 │ D │ 2018-01-01 │ +└───┴───┴────────────┴───┴────────────┘ +┌─a─┬──────────b─┬─c─┬──────────d─┐ +│ a │ 2018-01-01 │ │ 0000-00-00 │ +│ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ c │ 2018-01-01 │ C │ 2018-01-01 │ +└───┴────────────┴───┴────────────┘ +┌─a─┬──────────b─┬─c─┬──────────d─┐ +│ b │ 2018-01-01 │ B │ 2018-01-01 │ +│ c │ 2018-01-01 │ C │ 2018-01-01 │ +└───┴────────────┴───┴────────────┘ diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql index b8bd6d3384c..2058d2309e4 100644 --- a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql +++ b/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql @@ -1,14 +1,16 @@ -DROP TABLE IF EXISTS using1; -DROP TABLE IF EXISTS using2; +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; -CREATE TABLE using1(a String, b DateTime) ENGINE=MergeTree order by a; -CREATE TABLE using2(c String, a String, d DateTime) ENGINE=MergeTree order by c; +CREATE TABLE table1(a String, b Date) ENGINE MergeTree order by a; +CREATE TABLE table2(c String, a String, d Date) ENGINE MergeTree order by c; -INSERT INTO using1 VALUES ('a', '2018-01-01 00:00:00') ('b', '2018-01-01 00:00:00') ('c', '2018-01-01 00:00:00'); -INSERT INTO using2 VALUES ('d', 'd', '2018-01-01 00:00:00') ('b', 'b', '2018-01-01 00:00:00') ('c', 'c', '2018-01-01 00:00:00'); +INSERT INTO table1 VALUES ('a', '2018-01-01') ('b', '2018-01-01') ('c', '2018-01-01'); +INSERT INTO table2 VALUES ('D', 'd', '2018-01-01') ('B', 'b', '2018-01-01') ('C', 'c', '2018-01-01'); -SELECT * FROM using1 t1 ALL LEFT JOIN (SELECT *, c as a, d as b FROM using2) t2 USING (a, b) ORDER BY d; -SELECT * FROM using1 t1 ALL INNER JOIN (SELECT *, c as a, d as b FROM using2) t2 USING (a, b) ORDER BY d; +SELECT * FROM table1 t1 FORMAT PrettyCompact; +SELECT *, c as a, d as b FROM table2 FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL LEFT JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL INNER JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d FORMAT PrettyCompact; -DROP TABLE using1; -DROP TABLE using2; +DROP TABLE table1; +DROP TABLE table2; diff --git a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql b/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql index b301c0ac00f..7f56a4e08fd 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql +++ b/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS d_numbers; -CREATE TABLE d_numbers (number UInt32) ENGINE = Distributed(test_cluster_two_shards_localhost, system, numbers, rand()); +CREATE TABLE d_numbers (number UInt32) ENGINE = Distributed(test_cluster_two_shards, system, numbers, rand()); SET experimental_use_processors = 1; diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference b/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference index 929dd64ae90..efbf8ed025e 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference +++ b/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference @@ -4,3 +4,4 @@ distributed_0 2 1 local_0 1 distributed_0 1 1 distributed_0 2 1 +remote_0 1 diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql b/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql index 92b4a83ebf3..0ae6a180570 100644 --- a/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql +++ b/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS d_one; -CREATE TABLE d_one (dummy UInt8) ENGINE = Distributed(test_cluster_two_shards_localhost, system, one, rand()); +CREATE TABLE d_one (dummy UInt8) ENGINE = Distributed(test_cluster_two_shards, system, one, rand()); SELECT 'local_0', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 0; SELECT 'local_1', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 1; @@ -16,3 +16,6 @@ SELECT 'distributed_0', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o. SELECT 'distributed_1', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 1 ORDER BY _shard_num; DROP TABLE d_one; + +SELECT 'remote_0', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 0; +SELECT 'remote_1', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 1; From 90dd36346ecbdd1e1a54c4610b3e6ef24ad408f6 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 1 Apr 2020 17:52:37 +0300 Subject: [PATCH 027/484] fix test_distributed_respect_user_timeouts --- .../test_distributed_respect_user_timeouts/test.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py b/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py index 72c3001ee91..ba760e90412 100644 --- a/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py +++ b/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py @@ -108,7 +108,16 @@ def started_cluster(request): def _check_timeout_and_exception(node, user, query_base, query): repeats = EXPECTED_BEHAVIOR[user]['times'] - expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats + + extra_repeats = 1 + # Table function remote() are executed two times. + # It tries to get table stucture from remote shards. + # On 'node'2 it will firsty try to get structure from 'node1' (which is not available), + # so so threre are two extra conection attempts for 'node2' and 'remote' + if node.name == 'node2' and query_base == 'remote': + extra_repeats = 3 + + expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats * extra_repeats start = timeit.default_timer() exception = node.query_and_get_error(query, user=user) From 770066960c96986c4b0b919e24dcd84bc31b5471 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 1 Apr 2020 19:05:33 +0300 Subject: [PATCH 028/484] Fix some tests for random execution order. Found in MemorySanitizer CI run, by virtue of it running the tests in random order. --- dbms/tests/config/decimals_dictionary.xml | 10 +++++----- dbms/tests/config/ints_dictionary.xml | 12 +++++------ dbms/tests/config/strings_dictionary.xml | 14 ++++++------- .../queries/0_stateless/00950_dict_get.sql | 20 ++++++++++++------- 4 files changed, 31 insertions(+), 25 deletions(-) diff --git a/dbms/tests/config/decimals_dictionary.xml b/dbms/tests/config/decimals_dictionary.xml index ff465b91b85..f728fa774a7 100644 --- a/dbms/tests/config/decimals_dictionary.xml +++ b/dbms/tests/config/decimals_dictionary.xml @@ -7,7 +7,7 @@ 9000 default - test_00950 + system decimals
@@ -45,7 +45,7 @@ 9000 default - test_00950 + system decimals
@@ -83,7 +83,7 @@ 9000 default - test_00950 + system decimals
@@ -121,7 +121,7 @@ 9000 default - test_00950 + system decimals
@@ -162,7 +162,7 @@ 9000 default - test_00950 + system decimals
diff --git a/dbms/tests/config/ints_dictionary.xml b/dbms/tests/config/ints_dictionary.xml index 5cf8419ad77..a22dab8933c 100644 --- a/dbms/tests/config/ints_dictionary.xml +++ b/dbms/tests/config/ints_dictionary.xml @@ -7,7 +7,7 @@ 9000 default - test_00950 + system ints
@@ -70,7 +70,7 @@ 9000 default - test_00950 + system ints
@@ -133,7 +133,7 @@ 9000 default - test_00950 + system ints
@@ -196,7 +196,7 @@ 9000 default - test_00950 + system ints
@@ -259,7 +259,7 @@ 9000 default - test_00950 + system ints
@@ -325,7 +325,7 @@ 9000 default - test_00950 + system ints
diff --git a/dbms/tests/config/strings_dictionary.xml b/dbms/tests/config/strings_dictionary.xml index 88fad6ae2d7..c5643eecb68 100644 --- a/dbms/tests/config/strings_dictionary.xml +++ b/dbms/tests/config/strings_dictionary.xml @@ -7,7 +7,7 @@ 9000 default - test_00950 + system strings
@@ -35,7 +35,7 @@ 9000 default - test_00950 + system strings
@@ -63,7 +63,7 @@ 9000 default - test_00950 + system strings
@@ -91,7 +91,7 @@ 9000 default - test_00950 + system strings
@@ -122,7 +122,7 @@ 9000 default - test_00950 + system strings
@@ -153,7 +153,7 @@ 9000 default - test_00950 + system strings
@@ -184,7 +184,7 @@ 9000 default - test_00950 + system strings
diff --git a/dbms/tests/queries/0_stateless/00950_dict_get.sql b/dbms/tests/queries/0_stateless/00950_dict_get.sql index 2483a21c0d3..159f3eb0c4e 100644 --- a/dbms/tests/queries/0_stateless/00950_dict_get.sql +++ b/dbms/tests/queries/0_stateless/00950_dict_get.sql @@ -1,6 +1,5 @@ --- Must use `test_00950` database and these tables - they're configured in dbms/tests/*_dictionary.xml -create database if not exists test_00950; -use test_00950; +-- Must use `system` database and these tables - they're configured in dbms/tests/*_dictionary.xml +use system; drop table if exists ints; drop table if exists strings; drop table if exists decimals; @@ -270,7 +269,14 @@ select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64 dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)); -drop table ints; -drop table strings; -drop table decimals; -drop database test_00950; +-- +-- Keep the tables, so that the dictionaries can be reloaded correctly and +-- SYSTEM RELOAD DICTIONARIES doesn't break. +-- We could also: +-- * drop the dictionaries -- not possible, they are configured in a .xml; +-- * switch dictionaries to DDL syntax so that they can be dropped -- tedious, +-- because there are a couple dozens of them, and also we need to have some +-- .xml dictionaries in tests so that we test backward compatibility with this +-- format; +-- * unload dictionaries -- no command for that. +-- From f0607a8d30e4dc5f380694933593417b1c32949d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 1 Apr 2020 19:49:54 +0300 Subject: [PATCH 029/484] try fix 01108_restart_replicas_rename_deadlock flaps --- .../0_stateless/01108_restart_replicas_rename_deadlock.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh b/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh index 93327a44388..7fedf321fdb 100755 --- a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh +++ b/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh @@ -63,6 +63,7 @@ timeout $TIMEOUT bash -c restart_thread_2 2> /dev/null & wait $CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICAS" +sleep 2 $CLICKHOUSE_CLIENT -q "SELECT sum(n), count(n) FROM merge(currentDatabase(), '^replica_01108_') GROUP BY position(_table, 'tmp')" From 214c465b535e45c7d11759154930bb9a9b0dfbb8 Mon Sep 17 00:00:00 2001 From: TCeason <33082201+TCeason@users.noreply.github.com> Date: Thu, 2 Apr 2020 02:16:13 +0800 Subject: [PATCH 030/484] Add docs for predefined_http (#8161) * Add docs for predefined_http * fix review suggestions Co-authored-by: Ivan Blinkov --- docs/en/interfaces/http.md | 225 ++++++++++++++++++++++++++++++++++++- 1 file changed, 224 insertions(+), 1 deletion(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index fbc116c4c97..76f6121f5d0 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -278,4 +278,227 @@ You can create a query with parameters and pass values for them from the corresp $ curl -sS "
?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" ``` -[Original article](https://clickhouse.tech/docs/en/interfaces/http_interface/) +## Predefined HTTP Interface {#predefined_http_interface} + +ClickHouse supports specific queries through the HTTP interface. For example, you can write data to a table as follows: + +```bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +ClickHouse also supports Predefined HTTP Interface which can help you more easy integration with third party tools like [Prometheus exporter](https://github.com/percona-lab/clickhouse_exporter). + +Example: + +* First of all, add this section to server configuration file: + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +* You can now request the url directly for data in the Prometheus format: + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +As you can see from the example, if `` is configured in the config.xml file, ClickHouse will match the HTTP requests received to the predefined type in ``, then ClickHouse will execute the corresponding predefined query if the match is successful. + +Now `` can configure ``, ``, ``, `` and `` . + +## root_handler + + `` returns the specified content for the root path request. The specific return content is configured by `http_server_default_response` in config.xml. if not specified, return **Ok.** + +`http_server_default_response` is not defined and an HTTP request is sent to ClickHouse. The result is as follows: + +```xml + + + +``` + +``` +$ curl 'http://localhost:8123' +Ok. +``` + +`http_server_default_response` is defined and an HTTP request is sent to ClickHouse. The result is as follows: + +```xml +
]]>
+ + + + +``` + +``` +$ curl 'http://localhost:8123' +
% +``` + +## ping_handler + +`` can be used to probe the health of the current ClickHouse Server. When the ClickHouse HTTP Server is normal, accessing ClickHouse through `` will return **Ok.**. + +Example: + +```xml + + /ping + +``` + +```bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## replicas_status_handler + +`` is used to detect the state of the replica node and return **Ok.** if the replica node has no delay. If there is a delay, return the specific delay. The value of `` supports customization. If you do not specify ``, ClickHouse default setting `` is **/replicas_status**. + +Example: + +```xml + + /replicas_status + +``` + +No delay case: + +```bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +Delayed case: + +```bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## predefined_query_handler + +You can configure ``, ``, `` and `` in ``. + +`` is responsible for matching the method part of the HTTP request. `` fully conforms to the definition of [method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) in the HTTP protocol. It is an optional configuration. If it is not defined in the configuration file, it does not match the method portion of the HTTP request + +`` is responsible for matching the url part of the HTTP request. It is compatible with [RE2](https://github.com/google/re2)'s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the url portion of the HTTP request + +`` is responsible for matching the header part of the HTTP request. It is compatible with RE2's regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request + +`` value is a predefined query of ``, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration. + +`` supports setting Settings and query_params values. + +The following example defines the values of `max_threads` and `max_alter_threads` settings, then queries the system table to check whether these settings were set successfully. + +Example: + +```xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +```bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "Note" + In one ``, one `` only supports one `` of an insert type. + +## dynamic_query_handler + +`` than `` increased `` . + +ClickHouse extracts and executes the value corresponding to the `` value in the url of the HTTP request. +ClickHouse default setting `` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the param is not passed in. + +To experiment with this functionality, the example defines the values of max_threads and max_alter_threads and queries whether the Settings were set successfully. +The difference is that in ``, query is wrote in the configuration file. But in ``, query is written in the form of param of the HTTP request. + +Example: + +```xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +```bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + +[Original article](https://clickhouse.tech/docs/en/interfaces/http_interface/) \ No newline at end of file From 1d5a77c11321d212a3475639c922209f27b06bb0 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 1 Apr 2020 21:21:27 +0300 Subject: [PATCH 031/484] Tried to add ability to rename primary key columns but just banned this ability --- dbms/src/Interpreters/RenameColumnVisitor.cpp | 12 +++++ dbms/src/Interpreters/RenameColumnVisitor.h | 21 ++++++++ dbms/src/Storages/MergeTree/MergeTreeData.cpp | 10 ++++ dbms/src/Storages/MergeTree/MergeTreeData.h | 8 ++- .../Storages/StorageReplicatedMergeTree.cpp | 29 ---------- .../src/Storages/StorageReplicatedMergeTree.h | 3 -- .../0_stateless/01213_alter_rename_column.sql | 1 - .../01213_alter_rename_column_zookeeper.sql | 2 +- ...ter_rename_primary_key_zookeeper.reference | 1 + ...213_alter_rename_primary_key_zookeeper.sql | 53 +++++++++++++++++++ 10 files changed, 101 insertions(+), 39 deletions(-) create mode 100644 dbms/src/Interpreters/RenameColumnVisitor.cpp create mode 100644 dbms/src/Interpreters/RenameColumnVisitor.h create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference create mode 100644 dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql diff --git a/dbms/src/Interpreters/RenameColumnVisitor.cpp b/dbms/src/Interpreters/RenameColumnVisitor.cpp new file mode 100644 index 00000000000..a22fa78e2cc --- /dev/null +++ b/dbms/src/Interpreters/RenameColumnVisitor.cpp @@ -0,0 +1,12 @@ +#include +#include + +namespace DB +{ +void RenameColumnData::visit(ASTIdentifier & identifier, ASTPtr &) +{ + std::optional identifier_column_name = IdentifierSemantic::getColumnName(identifier); + if (identifier_column_name && identifier_column_name == column_name) + identifier.name = rename_to; +} +} diff --git a/dbms/src/Interpreters/RenameColumnVisitor.h b/dbms/src/Interpreters/RenameColumnVisitor.h new file mode 100644 index 00000000000..e59936e6cd0 --- /dev/null +++ b/dbms/src/Interpreters/RenameColumnVisitor.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +namespace DB +{ +/// Rename ASTIdentifiers to column name +struct RenameColumnData +{ + using TypeToVisit = ASTIdentifier; + + String column_name; + String rename_to; + + void visit(ASTIdentifier & identifier, ASTPtr & ast); +}; + +using RenameColumnMatcher = OneTypeMatcher; +using RenameColumnVisitor = InDepthNodeVisitor; +} diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index b218066978b..ab99c955701 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1457,6 +1457,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S for (const auto & column : getColumns().getAllPhysical()) old_types.emplace(column.name, column.type.get()); + for (const AlterCommand & command : commands) { if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned) @@ -1471,6 +1472,15 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S "ALTER ADD INDEX is not supported for tables with the old syntax", ErrorCodes::BAD_ARGUMENTS); } + if (command.type == AlterCommand::RENAME_COLUMN) + { + if (columns_alter_type_forbidden.count(command.column_name) || columns_alter_type_metadata_only.count(command.column_name)) + { + throw Exception( + "Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression", + ErrorCodes::ILLEGAL_COLUMN); + } + } else if (command.isModifyingData()) { if (columns_alter_type_forbidden.count(command.column_name)) diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/src/Storages/MergeTree/MergeTreeData.h index 5e4fb5c8430..5a9a8a61376 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/dbms/src/Storages/MergeTree/MergeTreeData.h @@ -541,10 +541,9 @@ public: broken_part_callback(name); } - /** Get the key expression AST as an ASTExpressionList. - * It can be specified in the tuple: (CounterID, Date), - * or as one column: CounterID. - */ + /** Get the key expression AST as an ASTExpressionList. It can be specified + * in the tuple: (CounterID, Date), or as one column: CounterID. + */ static ASTPtr extractKeyExpressionList(const ASTPtr & node); bool hasSortingKey() const { return !sorting_key_columns.empty(); } @@ -864,7 +863,6 @@ protected: std::mutex grab_old_parts_mutex; /// The same for clearOldTemporaryDirectories. std::mutex clear_old_temporary_directories_mutex; - /// Mutex for settings usage void setProperties(const StorageInMemoryMetadata & metadata, bool only_check = false); diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index 43a889c3ea2..651b17f8be7 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -447,7 +447,6 @@ void StorageReplicatedMergeTree::checkTableStructure(const String & zookeeper_pr } } - void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff) { StorageInMemoryMetadata metadata = getInMemoryMetadata(); @@ -5293,37 +5292,9 @@ bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const } -StorageInMemoryMetadata -StorageReplicatedMergeTree::getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const -{ - auto replicated_metadata = ReplicatedMergeTreeTableMetadata::parse(metadata_str); - StorageInMemoryMetadata result = getInMemoryMetadata(); - result.columns = ColumnsDescription::parse(columns_str); - result.constraints = ConstraintsDescription::parse(replicated_metadata.constraints); - result.indices = IndicesDescription::parse(replicated_metadata.skip_indices); - - ParserExpression expression_p; - - /// The only thing, that can be changed is ttl expression - if (replicated_metadata.primary_key.empty()) - throw Exception("Primary key cannot be empty" , ErrorCodes::LOGICAL_ERROR); - - if (!replicated_metadata.sorting_key.empty()) - { - result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.sorting_key + ")", 0); - result.primary_key_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0); - } - else - { - result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0); - } - return result; - -} MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const { return queue.getFirstAlterMutationCommandsForPart(part); } - } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/src/Storages/StorageReplicatedMergeTree.h index fbd827434e7..01dd32614f9 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/dbms/src/Storages/StorageReplicatedMergeTree.h @@ -526,9 +526,6 @@ private: void waitMutationToFinishOnReplicas( const Strings & replicas, const String & mutation_id) const; - StorageInMemoryMetadata getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const; - - MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; protected: diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql index 7c6209ac0b4..c1831798c47 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql @@ -26,7 +26,6 @@ ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15} ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36} ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 10} - ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7; diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql index abfab3fa937..daf25f7ba4b 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql @@ -9,7 +9,7 @@ CREATE TABLE table_for_rename_replicated value2 String, value3 String ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicaed', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicated', '1') PARTITION BY date ORDER BY key; diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference b/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference new file mode 100644 index 00000000000..9972842f982 --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference @@ -0,0 +1 @@ +1 1 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql b/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql new file mode 100644 index 00000000000..360a2f3745d --- /dev/null +++ b/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS table_for_rename_pk; + +CREATE TABLE table_for_rename_pk +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1') +PARTITION BY date +ORDER BY (key1, pow(key2, 2), key3); + +INSERT INTO table_for_rename_pk SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +SELECT key1, value1 FROM table_for_rename_pk WHERE key1 = 1 AND key2 = 1 AND key3 = 1; + +ALTER TABLE table_for_rename_pk RENAME COLUMN key1 TO renamed_key1; --{serverError 44} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key3 TO renamed_key3; --{serverError 44} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key2 TO renamed_key2; --{serverError 44} + +DROP TABLE IF EXISTS table_for_rename_pk; + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; + +CREATE TABLE table_for_rename_with_primary_key +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1') +PARTITION BY date +ORDER BY (key1, key2, key3) +PRIMARY KEY (key1, key2); + +INSERT INTO table_for_rename_with_primary_key SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key1 TO renamed_key1; --{serverError 44} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2; --{serverError 44} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError 44} + + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; From 6f255c43095e517754b7f909fa96f6434cf4fb04 Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Wed, 1 Apr 2020 22:56:40 +0300 Subject: [PATCH 032/484] Virtual hosted-style support of S3 URI. --- dbms/src/IO/S3Common.cpp | 66 +++++++++++++----------------- dbms/src/IO/tests/gtest_s3_uri.cpp | 53 ++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 38 deletions(-) create mode 100644 dbms/src/IO/tests/gtest_s3_uri.cpp diff --git a/dbms/src/IO/S3Common.cpp b/dbms/src/IO/S3Common.cpp index 137fe22c872..e1952f5eafd 100644 --- a/dbms/src/IO/S3Common.cpp +++ b/dbms/src/IO/S3Common.cpp @@ -2,15 +2,15 @@ #if USE_AWS_S3 -#include -#include +# include +# include -#include -#include -#include -#include -#include -#include +# include +# include +# include +# include +# include +# include namespace @@ -57,7 +57,6 @@ private: namespace DB { - namespace ErrorCodes { extern const int BAD_ARGUMENTS; @@ -106,46 +105,37 @@ namespace S3 URI::URI(const Poco::URI & uri_) { - static const std::regex bucket_key_pattern("([^/]+)/(.*)"); /// TODO std::regex is discouraged + static const RE2 virtual_hosted_style_pattern("(.+\\.)?s3[.-][a-z0-9-.]+"); + static const RE2 path_style_pattern("([^/]+)/(.*)"); uri = uri_; - // s3://* - if (uri.getScheme() == "s3" || uri.getScheme() == "S3") - { - bucket = uri.getAuthority(); - if (bucket.empty()) - throw Exception ("Invalid S3 URI: no bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - const auto & path = uri.getPath(); - // s3://bucket or s3://bucket/ - if (path.length() <= 1) - throw Exception ("Invalid S3 URI: no key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - - key = path.substr(1); - return; - } - if (uri.getHost().empty()) - throw Exception("Invalid S3 URI: no host: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Invalid S3 URI host: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); endpoint = uri.getScheme() + "://" + uri.getAuthority(); - // Parse bucket and key from path. - std::smatch match; - std::regex_search(uri.getPath(), match, bucket_key_pattern); - if (!match.empty()) + if (re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket)) { - bucket = match.str(1); - if (bucket.empty()) - throw Exception ("Invalid S3 URI: no bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + if (!bucket.empty()) + bucket = bucket.substr(0, bucket.length() - 1); + if (bucket.length() < 3 || bucket.length() > 63) + throw Exception("Invalid S3 URI bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); - key = match.str(2); - if (key.empty()) - throw Exception ("Invalid S3 URI: no key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + key = uri.getPath().substr(1); + if (key.empty() || key == "/") + throw Exception("Invalid S3 URI key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + } + else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key)) + { + if (bucket.length() < 3 || bucket.length() > 63) + throw Exception("Invalid S3 URI bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + + if (key.empty() || key == "/") + throw Exception("Invalid S3 URI key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); } else - throw Exception("Invalid S3 URI: no bucket or key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Invalid S3 URI bucket or key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); } } diff --git a/dbms/src/IO/tests/gtest_s3_uri.cpp b/dbms/src/IO/tests/gtest_s3_uri.cpp new file mode 100644 index 00000000000..93891b35ba2 --- /dev/null +++ b/dbms/src/IO/tests/gtest_s3_uri.cpp @@ -0,0 +1,53 @@ +#include + +#if USE_AWS_S3 + +#include + +namespace +{ +using namespace DB; + +class S3UriTest : public testing::TestWithParam +{ +}; + +TEST(S3UriTest, validPatterns) +{ + { + S3::URI uri(Poco::URI("https://jokserfn.s3.yandexcloud.net/data")); + ASSERT_EQ("https://jokserfn.s3.yandexcloud.net", uri.endpoint); + ASSERT_EQ("jokserfn", uri.bucket); + ASSERT_EQ("data", uri.key); + } + { + S3::URI uri(Poco::URI("https://storage.yandexcloud.net/jokserfn/data")); + ASSERT_EQ("https://storage.yandexcloud.net", uri.endpoint); + ASSERT_EQ("jokserfn", uri.bucket); + ASSERT_EQ("data", uri.key); + } +} + +TEST_P(S3UriTest, invalidPatterns) +{ + ASSERT_ANY_THROW(S3::URI(Poco::URI(GetParam()))); +} + +INSTANTIATE_TEST_SUITE_P( + S3, + S3UriTest, + testing::Values( + "https:///", + "https://jokserfn.s3.yandexcloud.net/", + "https://.s3.yandexcloud.net/key", + "https://s3.yandexcloud.net/key", + "https://s3.yandexcloud.net/key/", + "https://s3.yandexcloud.net//", + "https://yandexcloud.net/", + "https://yandexcloud.net//", + "https://yandexcloud.net/bucket/", + "https://yandexcloud.net//key")); + +} + +#endif From 710085d6c0c3118c55d7e17160d488b8caf9cd74 Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Wed, 1 Apr 2020 22:59:44 +0300 Subject: [PATCH 033/484] Virtual hosted-style support of S3 URI. --- dbms/src/IO/tests/gtest_s3_uri.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dbms/src/IO/tests/gtest_s3_uri.cpp b/dbms/src/IO/tests/gtest_s3_uri.cpp index 93891b35ba2..c1714703615 100644 --- a/dbms/src/IO/tests/gtest_s3_uri.cpp +++ b/dbms/src/IO/tests/gtest_s3_uri.cpp @@ -1,8 +1,9 @@ #include +#include #if USE_AWS_S3 -#include +# include namespace { From bd3d61e09fe364e60ddead08b369760cbd9af4d9 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 2 Apr 2020 00:02:52 +0300 Subject: [PATCH 034/484] Incorporate the last illustration into the front page (#9995) * [docs] alternative approach to pdf generation * Incorporate the last illustration into the front page --- website/images/flags/es.svg | 2 +- website/images/index/linearly-scalable.svg | 297 +++++++++++++++++++++ website/images/index/shield.svg | 1 + website/index.html | 2 +- website/templates/index/community.html | 20 +- website/templates/index/efficient.html | 2 +- website/templates/index/features.html | 2 +- website/templates/index/quickstart.html | 2 +- website/templates/index/reliable.html | 27 -- website/templates/index/rich.html | 2 +- website/templates/index/scalable.html | 16 ++ website/templates/index/success.html | 2 +- website/templates/index/use.html | 4 +- website/templates/index/why.html | 22 +- 14 files changed, 344 insertions(+), 57 deletions(-) create mode 100644 website/images/index/linearly-scalable.svg create mode 100644 website/images/index/shield.svg delete mode 100644 website/templates/index/reliable.html create mode 100644 website/templates/index/scalable.html diff --git a/website/images/flags/es.svg b/website/images/flags/es.svg index 04f609b6c1d..d859aa650b2 100644 --- a/website/images/flags/es.svg +++ b/website/images/flags/es.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/website/images/index/linearly-scalable.svg b/website/images/index/linearly-scalable.svg new file mode 100644 index 00000000000..40bb67a3c81 --- /dev/null +++ b/website/images/index/linearly-scalable.svg @@ -0,0 +1,297 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/website/images/index/shield.svg b/website/images/index/shield.svg new file mode 100644 index 00000000000..e48b824909f --- /dev/null +++ b/website/images/index/shield.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/website/index.html b/website/index.html index 8770c8a97a9..b249fc31285 100644 --- a/website/index.html +++ b/website/index.html @@ -11,7 +11,7 @@ {% include "templates/index/efficient.html" %} {% include "templates/index/rich.html" %} {% include "templates/index/features.html" %} -{% include "templates/index/reliable.html" %} +{% include "templates/index/scalable.html" %} {% include "templates/index/use.html" %} {% include "templates/index/quickstart.html" %} {% include "templates/index/community.html" %} diff --git a/website/templates/index/community.html b/website/templates/index/community.html index 206dce5d34a..e48edb311b6 100644 --- a/website/templates/index/community.html +++ b/website/templates/index/community.html @@ -1,7 +1,7 @@
-

{{ _('ClickHouse community') }}

+

{{ _('ClickHouse community') }}

@@ -139,9 +139,9 @@
-

{{ _('Like ClickHouse?') }} {{ _('Help to spread the word about it via') }} Facebook, - Twitter{{ _('and') }} - LinkedIn!

+

{{ _('Like ClickHouse?') }}

+

{{ _('Help to spread the word about it via') }} Facebook, + Twitter {{ _('and') }} LinkedIn!

@@ -150,18 +150,18 @@
-
-
-

{{ _('Hosting ClickHouse Meetups') }}

+

{{ _('Hosting ClickHouse Meetups') }}

+
+

{{ _('ClickHouse meetups are essential for strengthening community worldwide, but they couldn\'t be possible without the help of local organizers. Please, feel this form if you want to become one or want to meet ClickHouse core team for any other reason.') }}

- {{ _('ClickHouse Meetup') }} + {{ _('ClickHouse Meetup') }}
-
+
@@ -206,7 +206,7 @@
-
+

{{ _('If you have any more thoughts or questions, feel free to contact Yandex ClickHouse team directly at') }} diff --git a/website/templates/index/efficient.html b/website/templates/index/efficient.html index ae0a7f2c17b..240852696b4 100644 --- a/website/templates/index/efficient.html +++ b/website/templates/index/efficient.html @@ -2,7 +2,7 @@

-

Hardware efficient

+

Hardware efficient

ClickHouse processes typical analytical queries two to three orders of magnitude faster than traditional row-oriented systems with the same available I/O throughput and CPU capacity. Columnar storage format allows fitting more hot data in RAM, which leads to shorter typical response times.

diff --git a/website/templates/index/features.html b/website/templates/index/features.html index 30d682843b0..c659e0d9301 100644 --- a/website/templates/index/features.html +++ b/website/templates/index/features.html @@ -2,7 +2,7 @@
-

ClickHouse. Just makes you think faster!

+

ClickHouse. Just makes you think faster!

diff --git a/website/templates/index/quickstart.html b/website/templates/index/quickstart.html index 32d3b21bcc5..0afa40e6030 100644 --- a/website/templates/index/quickstart.html +++ b/website/templates/index/quickstart.html @@ -1,6 +1,6 @@
-

Quick start

+

Quick start

System requirements for pre-built packages: Linux, x86_64 with SSE 4.2.

diff --git a/website/templates/index/reliable.html b/website/templates/index/reliable.html deleted file mode 100644 index 05ba1b00027..00000000000 --- a/website/templates/index/reliable.html +++ /dev/null @@ -1,27 +0,0 @@ -
-
- -

Highly reliable

- -

ClickHouse has been managing petabytes of data serving a number of highload mass audience services of - Yandex, Russia's - leading search provider and one of the largest European IT companies. - Since 2012, ClickHouse has been providing robust database management for the company's web analytics service, comparison - e-commerce platform, public email service, online advertising platform, business intelligence tools - and infrastructure monitoring.

- -

ClickHouse can be configured as a purely distributed system located on independent nodes, - without any single points of failure.

- -

Software and hardware failures or misconfigurations do not result in loss of data. Instead of deleting "broken" - data, ClickHouse saves it or asks you what to do before a startup. All data is checksummed before every - read or write to disk or network. It is virtually impossible to delete data by accident as there are safeguards - even for human errors.

- -

ClickHouse offers flexible limits on query complexity and resource usage, which can be fine-tuned with settings. - It is possible to simultaneously serve both a number of high priority low-latency requests and some - long-running queries with a background priority.

-
-
diff --git a/website/templates/index/rich.html b/website/templates/index/rich.html index 14b2f86e75f..1f2b4957306 100644 --- a/website/templates/index/rich.html +++ b/website/templates/index/rich.html @@ -3,7 +3,7 @@
-

Feature-rich

+

Feature-rich

diff --git a/website/templates/index/scalable.html b/website/templates/index/scalable.html new file mode 100644 index 00000000000..deb63839131 --- /dev/null +++ b/website/templates/index/scalable.html @@ -0,0 +1,16 @@ +
+
+
+
+

Linerarly scalable

+

ClickHouse scales well both vertically and horizontally. ClickHouse is easily adaptable to perform either on a cluster with hundreds or thousands of nodes or on a single server or even on a tiny virtual machine. Currently, there are installations with more multiple trillion rows or hundreds of terabytes of data per single node.

+

+ There are many ClickHouse clusters consisting of multiple hundred nodes, including few clusters of Yandex Metrica, while the largest known ClickHouse cluster is well over a thousand nodes. +

+
+
+ Lineraly scalable +
+
+
+
diff --git a/website/templates/index/success.html b/website/templates/index/success.html index 2d34d808e3d..961dc859535 100644 --- a/website/templates/index/success.html +++ b/website/templates/index/success.html @@ -1,6 +1,6 @@
-

Success stories

+

Success stories

diff --git a/website/templates/index/use.html b/website/templates/index/use.html index edf4a28cf67..1f345186d71 100644 --- a/website/templates/index/use.html +++ b/website/templates/index/use.html @@ -2,7 +2,7 @@
-

When to use ClickHouse

+

When to use ClickHouse

For analytics over a stream of clean, well structured and immutable events or logs. It is recommended to put each such stream into a single wide fact table with pre-joined dimensions.

@@ -27,7 +27,7 @@
-

When NOT to use ClickHouse

+

When NOT to use ClickHouse

    diff --git a/website/templates/index/why.html b/website/templates/index/why.html index 131b6757793..53bde640c4f 100644 --- a/website/templates/index/why.html +++ b/website/templates/index/why.html @@ -2,7 +2,7 @@
    -

    Why ClickHouse might be the right choice for you?

    +

    Why ClickHouse might be the right choice?

    @@ -10,7 +10,7 @@ Blazing fast
    -

    Blazing fast

    +

    Blazing fast

    ClickHouse uses all available hardware to its full potential to process each query as fast as possible. Peak processing performance for a single query stands at more than 2 terabytes per second (after decompression, only used columns). In distributed setup reads are automatically balanced among healthy replicas to avoid increasing latency.

    @@ -18,25 +18,25 @@ Fault tolerant
    -

    Fault-tolerant

    +

    Fault-tolerant

    ClickHouse supports multi-master asynchronous replication and can be deployed across multiple datacenters. All nodes are equal, which allows avoiding having single points of failure. Downtime of a single node or the whole datacenter won't affect the system's availability for both reads and writes.

-
- Linearly scalable -
-
-

Linearly scalable

-

ClickHouse scales well both vertically and horizontally. ClickHouse is easily adaptable to perform either on a cluster with hundreds or thousands of nodes or on a single server or even on a tiny virtual machine. Currently, there are installations with more multiple trillion rows or hundreds of terabytes of data per single node.

-
Easy to use
-

Easy to use

+

Easy to use

ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some DBMS.

+
+ Highly reliable +
+
+

Highly reliable

+

ClickHouse can be configured as a purely distributed system located on independent nodes, without any single points of failure. It also includes a lot of enterprise-grade security features and fail-safe mechanisms against human errors.

+
From 7fbdb7b9b58b00f3a2a7778362f66a7cb739f6b3 Mon Sep 17 00:00:00 2001 From: ImgBotApp Date: Wed, 1 Apr 2020 21:03:55 +0000 Subject: [PATCH 035/484] [ImgBot] Optimize images /website/images/index/linearly-scalable.svg -- 17.72kb -> 15.95kb (9.95%) Signed-off-by: ImgBotApp --- website/images/index/linearly-scalable.svg | 298 +-------------------- 1 file changed, 1 insertion(+), 297 deletions(-) diff --git a/website/images/index/linearly-scalable.svg b/website/images/index/linearly-scalable.svg index 40bb67a3c81..b2cd41338ec 100644 --- a/website/images/index/linearly-scalable.svg +++ b/website/images/index/linearly-scalable.svg @@ -1,297 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file From 9d6c88c78e7fcd8c650b0b7f1461ee5c7649806c Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 2 Apr 2020 00:27:21 +0300 Subject: [PATCH 036/484] SplitTokenExtractor::next unit-tests --- .../tests/gtest_SplitTokenExtractor.cpp | 168 ++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp index e69de29bb2d..6be6650369e 100644 --- a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -0,0 +1,168 @@ +#include + +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +namespace +{ +using namespace DB; +} + +struct SplitTokenExtractorTestCase +{ + const char * description; + const std::string source; + const std::vector tokens; +}; + +std::ostream & operator<<(std::ostream & ostr, const SplitTokenExtractorTestCase & test_case) +{ + return ostr << test_case.description; +} + +class SplitTokenExtractorTest : public ::testing::TestWithParam +{ +public: + void SetUp() override + { + const auto & param = GetParam(); + const auto & source = param.source; + data = std::make_unique>(source.data(), source.data() + source.size()); + } + + std::unique_ptr> data; +}; + +TEST_P(SplitTokenExtractorTest, next) +{ + const auto & param = GetParam(); + + SplitTokenExtractor token_extractor; + + size_t i = 0; + + size_t pos = 0; + size_t token_start = 0; + size_t token_len = 0; + + for (const auto & expected_token : param.tokens) + { + SCOPED_TRACE(++i); + EXPECT_TRUE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); + EXPECT_EQ(expected_token, param.source.substr(token_start, token_len)) + << " token_start:" << token_start << " token_len: " << token_len; + } +} + +#define BINARY_STRING(str) std::string{str, sizeof(str)-1} + +INSTANTIATE_TEST_SUITE_P(ShortSingleToken, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Empty input sequence produces no tokens.", + "", + {} + }, + { + "Short single token", + "foo", + {"foo"} + }, + { + "Short single token surruonded by whitespace", + "\t\vfoo\n\r", + {"foo"} + } + }) +); + +INSTANTIATE_TEST_SUITE_P(UTF8, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Single token with mixed ASCII and UTF-8 chars", + BINARY_STRING("abc\u0442" "123\u0447XYZ\u043A"), + {"abc\u0442" "123\u0447XYZ\u043A"} + }, + { + "Multiple UTF-8 tokens", + BINARY_STRING("\u043F\u0440\u0438\u0432\u0435\u0442, u043C\u0438\u0440!"), + {"\u043F\u0440\u0438\u0432\u0435\u0442", "u043C\u0438\u0440"} + }, + }) +); + +INSTANTIATE_TEST_SUITE_P(MultipleTokens, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Multiple tokens separated by whitespace", + BINARY_STRING("\nabc 123\tXYZ\r"), + { + "abc", "123", "XYZ" + } + }, + { + "Multiple tokens separated by non-printable chars", + BINARY_STRING("\0abc\1" "123\2XYZ\4"), + { + "abc", "123", "XYZ" + } + }, + { + "ASCII table is split into numeric, upper case and lower case letters", + + BINARY_STRING("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16" + "\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNO" + "PQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c" + "\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1" + "\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6" + "\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb" + "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" + "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"), + { + "0123456789", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz" + } + } + }) +); + + +INSTANTIATE_TEST_SUITE_P(SIMD_Cases, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "First 16 bytes are empty, then a shor token", + " abcdef", + {"abcdef"} + }, + { + "Token crosses bounday of 16-byte chunk", + " abcdef", + {"abcdef"} + }, + { + "Token ends at the end of 16-byte chunk", + " abcdef", + {"abcdef"} + }, + { + "Token crosses bondaries of multiple 16-byte chunks", + "abcdefghijklmnopqrstuvwxyz", + {"abcdefghijklmnopqrstuvwxyz"} + }, + }) +); From 90cb6a25cf1d10d6e45d83112dcdc6fc713415c6 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 2 Apr 2020 00:28:02 +0300 Subject: [PATCH 037/484] Fixed compilation issues and fixed several bugs in SplitTokenExtractor::next * Handling all characters above 0x80 as symbols (fixes UTF8 tokens) * Properly handling tokens that end exactly on haystack boundary. --- .../MergeTree/MergeTreeIndexFullText.cpp | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 5e4bf15418c..68d67a0c787 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -19,6 +19,10 @@ #include +#include +#include +#include + namespace DB { @@ -609,16 +613,17 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size while (*pos < len) { -#if __SSE2__ +#if defined(__SSE2__) // NOTE: we assume that `data` string is padded from the right with 15 zero-bytes. const __m128i haystack = _mm_loadu_si128(reinterpret_cast(data + *pos)); const size_t haystack_length = 16; -#if __SSE4_2__ +#if defined(__SSE4_2__) // With the help of https://www.strchr.com/strcmp_and_strlen_using_sse_4.2 - static const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 'Z', 'A', 'z', 'a', '9', '0'); + static const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + '\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0'); // Every bit represents if `haystack` character is in the ranges (1) or not(0) - const auto result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 6, haystack, haystack_length, _SIDD_CMP_RANGES | _SIDD_UBYTE_OPS)); + const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES)); #else // NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8). static const auto number_begin = _mm_set1_epi8('0' - 1); @@ -627,13 +632,16 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size static const auto alpha_lower_end = _mm_set1_epi8('z' + 1); static const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); static const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); + static const auto zero = _mm_set1_epi8(0); // every bit represents if `haystack` character `c` statisfies condition: - // (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) - const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128( - _mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end)), - _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))), - _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_upper_begin), _mm_cmplt_epi8(haystack, alpha_upper_end)))); + // (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) + // < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and hence all chars > 0x80 are negative. + const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128( + _mm_cmplt_epi8(haystack, zero), + _mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end))), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))), + _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_upper_begin), _mm_cmplt_epi8(haystack, alpha_upper_end)))); #endif // NOTE: __builtin_ctz family explicitly state that result is UNDEFINED if argument is 0 if (result_bitmask == 0) @@ -649,12 +657,15 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size const auto start = getTrailingZeroBits(result_bitmask); if (*token_len == 0) *token_start = *pos + start; + else if (start != 0) + // token is not continued in this haystack + return true; const auto l = getTrailingZeroBits(~(result_bitmask >> start)); *token_len += l; *pos += start + l; - if (start + l == 16) + if (start + l == haystack_length) // check if there are leftovers in next `haystack` continue; From f17fd7969ca01550fe1932aaaacb36c6255074c4 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 2 Apr 2020 00:32:12 +0300 Subject: [PATCH 038/484] Minor: excluded superflous includes --- dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp index 6be6650369e..2cd20a70821 100644 --- a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -10,10 +10,6 @@ #include #include -#include -#include -#include - namespace { using namespace DB; From b3553ba62471a620296432c79d5ffb1974918e9a Mon Sep 17 00:00:00 2001 From: tavplubix Date: Thu, 2 Apr 2020 00:39:59 +0300 Subject: [PATCH 039/484] Update 01108_restart_replicas_rename_deadlock.sh --- .../0_stateless/01108_restart_replicas_rename_deadlock.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh b/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh index 7fedf321fdb..aac5b637d2b 100755 --- a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh +++ b/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh @@ -61,9 +61,8 @@ timeout $TIMEOUT bash -c restart_thread_1 2> /dev/null & timeout $TIMEOUT bash -c restart_thread_2 2> /dev/null & wait +sleep 3 -$CLICKHOUSE_CLIENT -q "SYSTEM RESTART REPLICAS" -sleep 2 $CLICKHOUSE_CLIENT -q "SELECT sum(n), count(n) FROM merge(currentDatabase(), '^replica_01108_') GROUP BY position(_table, 'tmp')" From 97f2a2213e754ba25dabba4bc8ddf507cd67660c Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Thu, 2 Apr 2020 02:51:21 +0300 Subject: [PATCH 040/484] Move all folders inside /dbms one level up (#9974) * Move some code outside dbms/src folder * Fix paths --- .../ISSUE_TEMPLATE/backward-compatibility.md | 2 +- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- .github/ISSUE_TEMPLATE/performance-issue.md | 2 +- .../ISSUE_TEMPLATE/unexpected-behaviour.md | 2 +- .github/ISSUE_TEMPLATE/usability-issue.md | 2 +- .gitignore | 200 ++--- .gitlab-ci.yml | 6 +- CHANGELOG.md | 4 +- CMakeLists.txt | 7 +- {dbms/benchmark => benchmark}/benchmark.sh | 0 .../clickhouse/benchmark-chyt.sh | 0 .../clickhouse/benchmark-new.sh | 0 .../clickhouse/benchmark-yql.sh | 0 .../clickhouse/queries.sql | 0 {dbms/benchmark => benchmark}/create_dump.sh | 0 .../benchmark => benchmark}/greenplum/README | 0 .../greenplum/benchmark.sh | 0 .../greenplum/dump_dataset_from_ch.sh | 0 .../greenplum/load_data_set.sql | 0 .../greenplum/queries.sql | 0 .../greenplum/result_parser.py | 0 .../greenplum/schema.sql | 0 {dbms/benchmark => benchmark}/hive/conf.sh | 0 .../hive/define_schema.sql | 0 {dbms/benchmark => benchmark}/hive/expect.tcl | 0 .../hive/log/log_100m_tuned | 0 .../hive/log/log_10m/log_10m_ | 0 .../hive/log/log_10m/log_10m_1 | 0 .../hive/log/log_10m/log_10m_2 | 0 .../hive/log/log_10m/log_10m_3 | 0 .../hive/log/log_10m/log_10m_tuned | 0 .../hive/log/log_10m/log_hits_10m | 0 .../benchmark => benchmark}/hive/queries.sql | 0 .../benchmark => benchmark}/hive/run_hive.sh | 0 .../benchmark => benchmark}/infinidb/conf.sh | 0 .../infinidb/define_schema.sql | 0 .../infinidb/expect.tcl | 0 .../infinidb/log/log_100m | 0 .../infinidb/log/log_100m_tuned | 0 .../infinidb/log/log_10m | 0 .../infinidb/log/log_10m_tuned | 0 .../infinidb/queries.sql | 0 .../infobright/conf.sh | 0 .../infobright/define_schema.sql | 0 .../infobright/expect.tcl | 0 .../infobright/log-community/log_10m | 0 .../infobright/queries.sql | 0 .../memsql/benchmark.sh | 0 .../memsql/instructions.txt | 0 .../memsql/queries.sql | 0 {dbms/benchmark => benchmark}/monetdb/conf.sh | 0 .../monetdb/define_schema.sql | 0 .../monetdb/expect.tcl | 0 .../monetdb/log/log_100m | 0 .../monetdb/log/log_100m_1 | 0 .../monetdb/log/log_100m_corrected | 0 .../monetdb/log/log_100m_corrected_1 | Bin .../monetdb/log/log_100m_corrected_2 | 0 .../monetdb/log/log_10m | 0 .../monetdb/log/log_10m_corrected | 0 .../monetdb/log/log_10m_corrected_1 | 0 .../monetdb/log/log_upload_100m | 0 .../monetdb/log/log_upload_1b | 0 .../monetdb/queries.sql | 0 {dbms/benchmark => benchmark}/vertica/README | 0 .../vertica/benchmark.sh | 0 .../vertica/hits_define_schema.sql | 0 .../vertica/queries.sql | 0 cmake/lib_name.cmake | 2 +- cmake/sanitize.cmake | 2 +- .../{src => }/Access/AccessControlManager.cpp | 0 dbms/{src => }/Access/AccessControlManager.h | 0 dbms/{src => }/Access/AccessFlags.h | 0 dbms/{src => }/Access/AccessRights.cpp | 0 dbms/{src => }/Access/AccessRights.h | 0 dbms/{src => }/Access/AccessRightsElement.cpp | 0 dbms/{src => }/Access/AccessRightsElement.h | 0 dbms/{src => }/Access/AccessType.h | 0 dbms/{src => }/Access/AllowedClientHosts.cpp | 0 dbms/{src => }/Access/AllowedClientHosts.h | 0 dbms/{src => }/Access/Authentication.cpp | 0 dbms/{src => }/Access/Authentication.h | 0 dbms/{src => }/Access/CMakeLists.txt | 0 dbms/{src => }/Access/ContextAccess.cpp | 0 dbms/{src => }/Access/ContextAccess.h | 0 dbms/{src => }/Access/DiskAccessStorage.cpp | 0 dbms/{src => }/Access/DiskAccessStorage.h | 0 dbms/{src => }/Access/EnabledQuota.cpp | 0 dbms/{src => }/Access/EnabledQuota.h | 0 dbms/{src => }/Access/EnabledRoles.cpp | 0 dbms/{src => }/Access/EnabledRoles.h | 0 dbms/{src => }/Access/EnabledRolesInfo.cpp | 0 dbms/{src => }/Access/EnabledRolesInfo.h | 0 dbms/{src => }/Access/EnabledRowPolicies.cpp | 0 dbms/{src => }/Access/EnabledRowPolicies.h | 0 dbms/{src => }/Access/EnabledSettings.cpp | 0 dbms/{src => }/Access/EnabledSettings.h | 0 dbms/{src => }/Access/ExtendedRoleSet.cpp | 0 dbms/{src => }/Access/ExtendedRoleSet.h | 0 dbms/{src => }/Access/IAccessEntity.cpp | 0 dbms/{src => }/Access/IAccessEntity.h | 0 dbms/{src => }/Access/IAccessStorage.cpp | 0 dbms/{src => }/Access/IAccessStorage.h | 0 dbms/{src => }/Access/MemoryAccessStorage.cpp | 0 dbms/{src => }/Access/MemoryAccessStorage.h | 0 .../Access/MultipleAccessStorage.cpp | 0 dbms/{src => }/Access/MultipleAccessStorage.h | 0 dbms/{src => }/Access/Quota.cpp | 0 dbms/{src => }/Access/Quota.h | 0 dbms/{src => }/Access/QuotaCache.cpp | 0 dbms/{src => }/Access/QuotaCache.h | 0 dbms/{src => }/Access/QuotaUsageInfo.cpp | 0 dbms/{src => }/Access/QuotaUsageInfo.h | 0 dbms/{src => }/Access/Role.cpp | 0 dbms/{src => }/Access/Role.h | 0 dbms/{src => }/Access/RoleCache.cpp | 0 dbms/{src => }/Access/RoleCache.h | 0 dbms/{src => }/Access/RowPolicy.cpp | 0 dbms/{src => }/Access/RowPolicy.h | 0 dbms/{src => }/Access/RowPolicyCache.cpp | 0 dbms/{src => }/Access/RowPolicyCache.h | 0 dbms/{src => }/Access/SettingsConstraints.cpp | 0 dbms/{src => }/Access/SettingsConstraints.h | 0 dbms/{src => }/Access/SettingsProfile.cpp | 0 dbms/{src => }/Access/SettingsProfile.h | 0 .../Access/SettingsProfileElement.cpp | 0 .../{src => }/Access/SettingsProfileElement.h | 0 .../Access/SettingsProfilesCache.cpp | 0 dbms/{src => }/Access/SettingsProfilesCache.h | 0 dbms/{src => }/Access/User.cpp | 0 dbms/{src => }/Access/User.h | 0 .../Access/UsersConfigAccessStorage.cpp | 0 .../Access/UsersConfigAccessStorage.h | 0 .../AggregateFunctionAggThrow.cpp | 0 .../AggregateFunctionArgMinMax.h | 0 .../AggregateFunctionArray.cpp | 0 .../AggregateFunctionArray.h | 0 .../AggregateFunctionAvg.cpp | 0 .../AggregateFunctions/AggregateFunctionAvg.h | 0 .../AggregateFunctionAvgWeighted.cpp | 0 .../AggregateFunctionAvgWeighted.h | 0 .../AggregateFunctionBitwise.cpp | 0 .../AggregateFunctionBitwise.h | 0 .../AggregateFunctionBoundingRatio.cpp | 0 .../AggregateFunctionBoundingRatio.h | 0 ...ateFunctionCategoricalInformationValue.cpp | 0 ...egateFunctionCategoricalInformationValue.h | 0 .../AggregateFunctionCombinatorFactory.cpp | 0 .../AggregateFunctionCombinatorFactory.h | 0 .../AggregateFunctionCount.cpp | 0 .../AggregateFunctionCount.h | 0 .../AggregateFunctionEntropy.cpp | 0 .../AggregateFunctionEntropy.h | 0 .../AggregateFunctionFactory.cpp | 0 .../AggregateFunctionFactory.h | 0 .../AggregateFunctionForEach.cpp | 0 .../AggregateFunctionForEach.h | 0 .../AggregateFunctionGroupArray.cpp | 0 .../AggregateFunctionGroupArray.h | 0 .../AggregateFunctionGroupArrayInsertAt.cpp | 0 .../AggregateFunctionGroupArrayInsertAt.h | 0 .../AggregateFunctionGroupArrayMoving.cpp | 0 .../AggregateFunctionGroupArrayMoving.h | 0 .../AggregateFunctionGroupBitmap.cpp | 0 .../AggregateFunctionGroupBitmap.h | 0 .../AggregateFunctionGroupBitmapData.h | 0 .../AggregateFunctionGroupUniqArray.cpp | 0 .../AggregateFunctionGroupUniqArray.h | 0 .../AggregateFunctionHistogram.cpp | 0 .../AggregateFunctionHistogram.h | 0 .../AggregateFunctionIf.cpp | 0 .../AggregateFunctions/AggregateFunctionIf.h | 0 .../AggregateFunctionMLMethod.cpp | 0 .../AggregateFunctionMLMethod.h | 0 .../AggregateFunctionMaxIntersections.cpp | 0 .../AggregateFunctionMaxIntersections.h | 0 .../AggregateFunctionMerge.cpp | 0 .../AggregateFunctionMerge.h | 0 .../AggregateFunctionMinMaxAny.cpp | 0 .../AggregateFunctionMinMaxAny.h | 0 .../AggregateFunctionNothing.h | 0 .../AggregateFunctionNull.cpp | 0 .../AggregateFunctionNull.h | 0 .../AggregateFunctionOrFill.cpp | 0 .../AggregateFunctionOrFill.h | 0 .../AggregateFunctionQuantile.cpp | 0 .../AggregateFunctionQuantile.h | 0 .../AggregateFunctionResample.cpp | 0 .../AggregateFunctionResample.h | 0 .../AggregateFunctionRetention.cpp | 0 .../AggregateFunctionRetention.h | 0 .../AggregateFunctionSequenceMatch.cpp | 0 .../AggregateFunctionSequenceMatch.h | 0 ...ggregateFunctionSimpleLinearRegression.cpp | 0 .../AggregateFunctionSimpleLinearRegression.h | 0 .../AggregateFunctionState.cpp | 0 .../AggregateFunctionState.h | 0 .../AggregateFunctionStatistics.cpp | 0 .../AggregateFunctionStatistics.h | 0 .../AggregateFunctionStatisticsSimple.cpp | 0 .../AggregateFunctionStatisticsSimple.h | 0 .../AggregateFunctionSum.cpp | 0 .../AggregateFunctions/AggregateFunctionSum.h | 0 .../AggregateFunctionSumMap.cpp | 0 .../AggregateFunctionSumMap.h | 0 .../AggregateFunctionTimeSeriesGroupSum.cpp | 0 .../AggregateFunctionTimeSeriesGroupSum.h | 0 .../AggregateFunctionTopK.cpp | 0 .../AggregateFunctionTopK.h | 0 .../AggregateFunctionUniq.cpp | 0 .../AggregateFunctionUniq.h | 0 .../AggregateFunctionUniqCombined.cpp | 0 .../AggregateFunctionUniqCombined.h | 0 .../AggregateFunctionUniqUpTo.cpp | 0 .../AggregateFunctionUniqUpTo.h | 0 .../AggregateFunctionWindowFunnel.cpp | 0 .../AggregateFunctionWindowFunnel.h | 0 .../AggregateFunctions/CMakeLists.txt | 0 .../AggregateFunctions/FactoryHelpers.h | 0 dbms/{src => }/AggregateFunctions/Helpers.h | 0 .../AggregateFunctions/HelpersMinMaxAny.h | 0 .../AggregateFunctions/IAggregateFunction.h | 0 .../IAggregateFunctionCombinator.h | 0 .../AggregateFunctions/QuantileExact.h | 0 .../QuantileExactWeighted.h | 0 .../QuantileReservoirSampler.h | 0 .../QuantileReservoirSamplerDeterministic.h | 0 .../AggregateFunctions/QuantileTDigest.h | 0 .../AggregateFunctions/QuantileTiming.h | 0 .../AggregateFunctions/QuantilesCommon.h | 0 .../AggregateFunctions/ReservoirSampler.h | 0 .../ReservoirSamplerDeterministic.h | 0 .../UniqCombinedBiasData.cpp | 0 .../AggregateFunctions/UniqCombinedBiasData.h | 0 .../AggregateFunctions/UniqVariadicHash.cpp | 0 .../AggregateFunctions/UniqVariadicHash.h | 0 .../AggregateFunctions/UniquesHashSet.h | 0 .../parseAggregateFunctionParameters.cpp | 0 .../parseAggregateFunctionParameters.h | 0 .../registerAggregateFunctions.cpp | 0 .../registerAggregateFunctions.h | 0 .../AggregateFunctions/tests/CMakeLists.txt | 0 .../tests/quantile-t-digest.cpp | 0 dbms/CMakeLists.txt | 183 ++--- dbms/{src => }/Client/CMakeLists.txt | 0 dbms/{src => }/Client/Connection.cpp | 0 dbms/{src => }/Client/Connection.h | 0 dbms/{src => }/Client/ConnectionPool.h | 0 .../Client/ConnectionPoolWithFailover.cpp | 0 .../Client/ConnectionPoolWithFailover.h | 0 .../Client/MultiplexedConnections.cpp | 0 .../{src => }/Client/MultiplexedConnections.h | 0 dbms/{src => }/Client/TimeoutSetter.cpp | 0 dbms/{src => }/Client/TimeoutSetter.h | 0 dbms/{src => }/Client/tests/CMakeLists.txt | 0 dbms/{src => }/Client/tests/test_connect.cpp | 0 dbms/{src => }/Columns/CMakeLists.txt | 0 dbms/{src => }/Columns/Collator.cpp | 0 dbms/{src => }/Columns/Collator.h | 0 .../Columns/ColumnAggregateFunction.cpp | 0 .../Columns/ColumnAggregateFunction.h | 0 dbms/{src => }/Columns/ColumnArray.cpp | 0 dbms/{src => }/Columns/ColumnArray.h | 0 dbms/{src => }/Columns/ColumnConst.cpp | 0 dbms/{src => }/Columns/ColumnConst.h | 0 dbms/{src => }/Columns/ColumnDecimal.cpp | 0 dbms/{src => }/Columns/ColumnDecimal.h | 0 dbms/{src => }/Columns/ColumnFixedString.cpp | 0 dbms/{src => }/Columns/ColumnFixedString.h | 0 dbms/{src => }/Columns/ColumnFunction.cpp | 0 dbms/{src => }/Columns/ColumnFunction.h | 0 .../Columns/ColumnLowCardinality.cpp | 0 dbms/{src => }/Columns/ColumnLowCardinality.h | 0 dbms/{src => }/Columns/ColumnNothing.h | 0 dbms/{src => }/Columns/ColumnNullable.cpp | 0 dbms/{src => }/Columns/ColumnNullable.h | 0 dbms/{src => }/Columns/ColumnSet.h | 0 dbms/{src => }/Columns/ColumnString.cpp | 0 dbms/{src => }/Columns/ColumnString.h | 0 dbms/{src => }/Columns/ColumnTuple.cpp | 0 dbms/{src => }/Columns/ColumnTuple.h | 0 dbms/{src => }/Columns/ColumnUnique.h | 0 dbms/{src => }/Columns/ColumnVector.cpp | 0 dbms/{src => }/Columns/ColumnVector.h | 0 dbms/{src => }/Columns/ColumnVectorHelper.h | 0 dbms/{src => }/Columns/ColumnsCommon.cpp | 0 dbms/{src => }/Columns/ColumnsCommon.h | 0 dbms/{src => }/Columns/ColumnsNumber.h | 0 dbms/{src => }/Columns/FilterDescription.cpp | 0 dbms/{src => }/Columns/FilterDescription.h | 0 dbms/{src => }/Columns/IColumn.cpp | 0 dbms/{src => }/Columns/IColumn.h | 0 dbms/{src => }/Columns/IColumnDummy.h | 0 dbms/{src => }/Columns/IColumnImpl.h | 0 dbms/{src => }/Columns/IColumnUnique.h | 0 dbms/{src => }/Columns/ReverseIndex.h | 0 .../{src => }/Columns/getLeastSuperColumn.cpp | 0 dbms/{src => }/Columns/getLeastSuperColumn.h | 0 dbms/{src => }/Columns/tests/CMakeLists.txt | 0 .../Columns/tests/gtest_column_unique.cpp | 0 .../Columns/tests/gtest_weak_hash_32.cpp | 0 dbms/{src => }/Common/ActionBlocker.h | 0 dbms/{src => }/Common/ActionLock.cpp | 0 dbms/{src => }/Common/ActionLock.h | 0 dbms/{src => }/Common/AlignedBuffer.cpp | 0 dbms/{src => }/Common/AlignedBuffer.h | 0 dbms/{src => }/Common/Allocator.h | 0 dbms/{src => }/Common/Allocator_fwd.h | 0 dbms/{src => }/Common/Arena.h | 0 dbms/{src => }/Common/ArenaAllocator.h | 0 dbms/{src => }/Common/ArenaWithFreeLists.h | 0 dbms/{src => }/Common/ArrayCache.h | 0 dbms/{src => }/Common/AutoArray.h | 0 dbms/{src => }/Common/BitHelpers.h | 0 dbms/{src => }/Common/CMakeLists.txt | 0 dbms/{src => }/Common/COW.h | 0 dbms/{src => }/Common/ClickHouseRevision.cpp | 0 dbms/{src => }/Common/ClickHouseRevision.h | 0 dbms/{src => }/Common/ColumnsHashing.h | 0 dbms/{src => }/Common/ColumnsHashingImpl.h | 0 .../Common/CombinedCardinalityEstimator.h | 0 dbms/{src => }/Common/CompactArray.h | 0 .../{src => }/Common/ConcurrentBoundedQueue.h | 0 .../AbstractConfigurationComparison.cpp | 0 .../Config/AbstractConfigurationComparison.h | 0 dbms/{src => }/Common/Config/CMakeLists.txt | 0 .../Common/Config/ConfigProcessor.cpp | 0 .../{src => }/Common/Config/ConfigProcessor.h | 0 .../Common/Config/ConfigReloader.cpp | 0 dbms/{src => }/Common/Config/ConfigReloader.h | 0 .../Common/Config/configReadClient.cpp | 0 .../Common/Config/configReadClient.h | 0 dbms/{src => }/Common/CounterInFile.h | 0 dbms/{src => }/Common/CpuId.h | 0 dbms/{src => }/Common/CurrentMetrics.cpp | 0 dbms/{src => }/Common/CurrentMetrics.h | 0 dbms/{src => }/Common/CurrentThread.cpp | 0 dbms/{src => }/Common/CurrentThread.h | 0 dbms/{src => }/Common/DNSResolver.cpp | 0 dbms/{src => }/Common/DNSResolver.h | 0 dbms/{src => }/Common/Dwarf.cpp | 0 dbms/{src => }/Common/Dwarf.h | 0 dbms/{src => }/Common/Elf.cpp | 0 dbms/{src => }/Common/Elf.h | 0 dbms/{src => }/Common/ErrorCodes.cpp | 0 dbms/{src => }/Common/EventCounter.h | 0 dbms/{src => }/Common/Exception.cpp | 0 dbms/{src => }/Common/Exception.h | 0 .../{src => }/Common/ExternalLoaderStatus.cpp | 0 dbms/{src => }/Common/ExternalLoaderStatus.h | 0 dbms/{src => }/Common/FieldVisitors.cpp | 0 dbms/{src => }/Common/FieldVisitors.h | 0 dbms/{src => }/Common/FileChecker.cpp | 0 dbms/{src => }/Common/FileChecker.h | 0 dbms/{src => }/Common/FileUpdatesTracker.h | 0 dbms/{src => }/Common/HTMLForm.h | 0 .../Common/HashTable/ClearableHashMap.h | 0 .../Common/HashTable/ClearableHashSet.h | 0 .../Common/HashTable/FixedClearableHashMap.h | 0 .../Common/HashTable/FixedClearableHashSet.h | 0 .../{src => }/Common/HashTable/FixedHashMap.h | 0 .../{src => }/Common/HashTable/FixedHashSet.h | 0 .../Common/HashTable/FixedHashTable.h | 0 dbms/{src => }/Common/HashTable/Hash.h | 0 dbms/{src => }/Common/HashTable/HashMap.h | 0 dbms/{src => }/Common/HashTable/HashSet.h | 0 dbms/{src => }/Common/HashTable/HashTable.h | 0 .../Common/HashTable/HashTableAllocator.h | 0 .../Common/HashTable/HashTableKeyHolder.h | 0 dbms/{src => }/Common/HashTable/SmallTable.h | 0 .../Common/HashTable/StringHashMap.h | 0 .../Common/HashTable/StringHashTable.h | 0 .../Common/HashTable/TwoLevelHashMap.h | 0 .../Common/HashTable/TwoLevelHashTable.h | 0 .../Common/HashTable/TwoLevelStringHashMap.h | 0 .../HashTable/TwoLevelStringHashTable.h | 0 .../Common/HyperLogLogBiasEstimator.h | 0 dbms/{src => }/Common/HyperLogLogCounter.h | 0 .../HyperLogLogWithSmallSetOptimization.h | 0 dbms/{src => }/Common/IFactoryWithAliases.h | 0 dbms/{src => }/Common/IPv6ToBinary.cpp | 0 dbms/{src => }/Common/IPv6ToBinary.h | 0 dbms/{src => }/Common/Increment.h | 0 dbms/{src => }/Common/InterruptListener.h | 0 dbms/{src => }/Common/IntervalKind.cpp | 0 dbms/{src => }/Common/IntervalKind.h | 0 dbms/{src => }/Common/LRUCache.h | 0 dbms/{src => }/Common/Macros.cpp | 0 dbms/{src => }/Common/Macros.h | 0 dbms/{src => }/Common/MemorySanitizer.h | 0 dbms/{src => }/Common/MemoryTracker.cpp | 0 dbms/{src => }/Common/MemoryTracker.h | 0 dbms/{src => }/Common/MultiVersion.h | 0 dbms/{src => }/Common/NaNUtils.h | 0 dbms/{src => }/Common/NamePrompter.h | 0 dbms/{src => }/Common/NetException.h | 0 dbms/{src => }/Common/ObjectPool.h | 0 dbms/{src => }/Common/OpenSSLHelpers.cpp | 0 dbms/{src => }/Common/OpenSSLHelpers.h | 0 .../Common/OptimizedRegularExpression.cpp | 0 .../Common/OptimizedRegularExpression.h | 0 dbms/{src => }/Common/PODArray.cpp | 0 dbms/{src => }/Common/PODArray.h | 0 dbms/{src => }/Common/PODArray_fwd.h | 0 dbms/{src => }/Common/PipeFDs.cpp | 0 dbms/{src => }/Common/PipeFDs.h | 0 dbms/{src => }/Common/PoolBase.h | 0 dbms/{src => }/Common/PoolWithFailoverBase.h | 0 dbms/{src => }/Common/ProfileEvents.cpp | 0 dbms/{src => }/Common/ProfileEvents.h | 0 dbms/{src => }/Common/ProfilingScopedRWLock.h | 0 dbms/{src => }/Common/QueryProfiler.cpp | 0 dbms/{src => }/Common/QueryProfiler.h | 0 dbms/{src => }/Common/RWLock.cpp | 0 dbms/{src => }/Common/RWLock.h | 0 dbms/{src => }/Common/RadixSort.h | 0 dbms/{src => }/Common/RemoteHostFilter.cpp | 0 dbms/{src => }/Common/RemoteHostFilter.h | 0 dbms/{src => }/Common/SensitiveDataMasker.cpp | 0 dbms/{src => }/Common/SensitiveDataMasker.h | 0 dbms/{src => }/Common/SettingsChanges.h | 0 dbms/{src => }/Common/SharedBlockRowRef.h | 0 dbms/{src => }/Common/SharedLibrary.cpp | 0 dbms/{src => }/Common/SharedLibrary.h | 0 dbms/{src => }/Common/ShellCommand.cpp | 0 dbms/{src => }/Common/ShellCommand.h | 0 dbms/{src => }/Common/SimpleActionBlocker.h | 0 dbms/{src => }/Common/SimpleIncrement.h | 0 dbms/{src => }/Common/SipHash.h | 0 dbms/{src => }/Common/SmallObjectPool.h | 0 dbms/{src => }/Common/SpaceSaving.h | 0 dbms/{src => }/Common/StackTrace.cpp | 0 dbms/{src => }/Common/StackTrace.h | 0 dbms/{src => }/Common/StatusFile.cpp | 0 dbms/{src => }/Common/StatusFile.h | 0 dbms/{src => }/Common/StatusInfo.cpp | 0 dbms/{src => }/Common/StatusInfo.h | 0 dbms/{src => }/Common/Stopwatch.cpp | 0 dbms/{src => }/Common/Stopwatch.h | 0 dbms/{src => }/Common/StringSearcher.h | 0 .../Common/StringUtils/CMakeLists.txt | 0 .../Common/StringUtils/StringUtils.cpp | 0 .../Common/StringUtils/StringUtils.h | 0 dbms/{src => }/Common/StudentTTest.cpp | 0 dbms/{src => }/Common/StudentTTest.h | 0 dbms/{src => }/Common/SymbolIndex.cpp | 0 dbms/{src => }/Common/SymbolIndex.h | 0 dbms/{src => }/Common/TaskStatsInfoGetter.cpp | 0 dbms/{src => }/Common/TaskStatsInfoGetter.h | 0 dbms/{src => }/Common/TerminalSize.cpp | 0 dbms/{src => }/Common/TerminalSize.h | 0 dbms/{src => }/Common/ThreadFuzzer.cpp | 0 dbms/{src => }/Common/ThreadFuzzer.h | 0 dbms/{src => }/Common/ThreadPool.cpp | 0 dbms/{src => }/Common/ThreadPool.h | 0 dbms/{src => }/Common/ThreadProfileEvents.h | 0 dbms/{src => }/Common/ThreadStatus.cpp | 0 dbms/{src => }/Common/ThreadStatus.h | 0 dbms/{src => }/Common/Throttler.h | 0 dbms/{src => }/Common/TraceCollector.cpp | 0 dbms/{src => }/Common/TraceCollector.h | 0 dbms/{src => }/Common/TypeList.h | 0 dbms/{src => }/Common/TypePromotion.h | 0 dbms/{src => }/Common/UInt128.h | 0 dbms/{src => }/Common/UTF8Helpers.cpp | 0 dbms/{src => }/Common/UTF8Helpers.h | 0 dbms/{src => }/Common/UnicodeBar.h | 0 dbms/{src => }/Common/VariableContext.h | 0 dbms/{src => }/Common/Visitor.h | 0 dbms/{src => }/Common/Volnitsky.h | 0 dbms/{src => }/Common/WeakHash.cpp | 0 dbms/{src => }/Common/WeakHash.h | 0 dbms/{src => }/Common/XDBCBridgeHelper.h | 0 .../{src => }/Common/ZooKeeper/CMakeLists.txt | 0 dbms/{src => }/Common/ZooKeeper/Common.h | 0 dbms/{src => }/Common/ZooKeeper/IKeeper.cpp | 0 dbms/{src => }/Common/ZooKeeper/IKeeper.h | 0 dbms/{src => }/Common/ZooKeeper/Increment.h | 0 .../Common/ZooKeeper/KeeperException.h | 0 .../Common/ZooKeeper/LeaderElection.h | 0 dbms/{src => }/Common/ZooKeeper/Lock.cpp | 0 dbms/{src => }/Common/ZooKeeper/Lock.h | 0 .../{src => }/Common/ZooKeeper/TestKeeper.cpp | 0 dbms/{src => }/Common/ZooKeeper/TestKeeper.h | 0 dbms/{src => }/Common/ZooKeeper/Types.h | 0 dbms/{src => }/Common/ZooKeeper/ZooKeeper.cpp | 0 dbms/{src => }/Common/ZooKeeper/ZooKeeper.h | 0 .../Common/ZooKeeper/ZooKeeperHolder.cpp | 0 .../Common/ZooKeeper/ZooKeeperHolder.h | 0 .../Common/ZooKeeper/ZooKeeperImpl.cpp | 0 .../Common/ZooKeeper/ZooKeeperImpl.h | 0 .../Common/ZooKeeper/ZooKeeperNodeCache.cpp | 0 .../Common/ZooKeeper/ZooKeeperNodeCache.h | 0 .../Common/ZooKeeper/tests/CMakeLists.txt | 0 .../gtest_zkutil_test_multi_exception.cpp | 0 dbms/{src => }/Common/ZooKeeper/tests/nozk.sh | 0 .../{src => }/Common/ZooKeeper/tests/yeszk.sh | 0 .../tests/zk_many_watches_reconnect.cpp | 0 .../tests/zkutil_expiration_test.cpp | 0 .../ZooKeeper/tests/zkutil_test_async.cpp | 0 .../ZooKeeper/tests/zkutil_test_commands.cpp | 0 .../tests/zkutil_test_commands_new_lib.cpp | 0 .../ZooKeeper/tests/zkutil_test_lock.cpp | 0 .../tests/zkutil_zookeeper_holder.cpp | 0 .../Common/ZooKeeper/tests/zookeeper_impl.cpp | 0 dbms/{src => }/Common/assert_cast.h | 0 dbms/{src => }/Common/checkStackSize.cpp | 0 dbms/{src => }/Common/checkStackSize.h | 0 dbms/{src => }/Common/config.h.in | 0 dbms/{src => }/Common/config_version.h.in | 0 dbms/{src => }/Common/createHardLink.cpp | 0 dbms/{src => }/Common/createHardLink.h | 0 dbms/{src => }/Common/escapeForFileName.cpp | 0 dbms/{src => }/Common/escapeForFileName.h | 0 dbms/{src => }/Common/filesystemHelpers.cpp | 0 dbms/{src => }/Common/filesystemHelpers.h | 0 dbms/{src => }/Common/formatIPv6.cpp | 0 dbms/{src => }/Common/formatIPv6.h | 0 dbms/{src => }/Common/formatReadable.cpp | 0 dbms/{src => }/Common/formatReadable.h | 0 dbms/{src => }/Common/getExecutablePath.cpp | 0 dbms/{src => }/Common/getExecutablePath.h | 0 .../Common/getMultipleKeysFromConfig.cpp | 0 .../Common/getMultipleKeysFromConfig.h | 0 .../Common/getNumberOfPhysicalCPUCores.cpp | 0 .../Common/getNumberOfPhysicalCPUCores.h | 0 dbms/{src => }/Common/hasLinuxCapability.cpp | 0 dbms/{src => }/Common/hasLinuxCapability.h | 0 dbms/{src => }/Common/hex.cpp | 0 dbms/{src => }/Common/hex.h | 0 dbms/{src => }/Common/intExp.h | 0 dbms/{src => }/Common/interpolate.h | 0 dbms/{src => }/Common/isLocalAddress.cpp | 0 dbms/{src => }/Common/isLocalAddress.h | 0 dbms/{src => }/Common/malloc.cpp | 0 dbms/{src => }/Common/memcmpSmall.h | 0 dbms/{src => }/Common/memcpySmall.h | 0 dbms/{src => }/Common/new_delete.cpp | 0 dbms/{src => }/Common/parseAddress.cpp | 0 dbms/{src => }/Common/parseAddress.h | 0 dbms/{src => }/Common/parseGlobs.cpp | 0 dbms/{src => }/Common/parseGlobs.h | 0 .../Common/parseRemoteDescription.cpp | 0 .../{src => }/Common/parseRemoteDescription.h | 0 dbms/{src => }/Common/quoteString.cpp | 0 dbms/{src => }/Common/quoteString.h | 0 dbms/{src => }/Common/randomSeed.cpp | 0 dbms/{src => }/Common/randomSeed.h | 0 dbms/{src => }/Common/setThreadName.cpp | 0 dbms/{src => }/Common/setThreadName.h | 0 dbms/{src => }/Common/tests/CMakeLists.txt | 0 .../Common/tests/arena_with_free_lists.cpp | 0 dbms/{src => }/Common/tests/array_cache.cpp | 0 dbms/{src => }/Common/tests/auto_array.cpp | 0 .../Common/tests/chaos_sanitizer.cpp | 0 dbms/{src => }/Common/tests/compact_array.cpp | 0 dbms/{src => }/Common/tests/cow_columns.cpp | 0 .../Common/tests/cow_compositions.cpp | 0 .../gtest_getMultipleValuesFromConfig.cpp | 0 .../Common/tests/gtest_global_context.h | 0 .../gtest_makeRegexpPatternFromGlobs.cpp | 0 .../Common/tests/gtest_pod_array.cpp | 0 dbms/{src => }/Common/tests/gtest_rw_lock.cpp | 0 .../tests/gtest_sensitive_data_masker.cpp | 0 .../Common/tests/gtest_shell_command.cpp | 0 .../gtest_thread_pool_concurrent_wait.cpp | 0 .../tests/gtest_thread_pool_global_full.cpp | 0 .../Common/tests/gtest_thread_pool_limit.cpp | 0 .../Common/tests/gtest_thread_pool_loop.cpp | 0 .../gtest_thread_pool_schedule_exception.cpp | 0 .../tests/gtest_unescapeForFileName.cpp | 0 dbms/{src => }/Common/tests/hash_table.cpp | 0 dbms/{src => }/Common/tests/hashes_test.cpp | 0 .../Common/tests/int_hashes_perf.cpp | 0 .../tests/integer_hash_tables_and_hashes.cpp | 0 .../Common/tests/parallel_aggregation.cpp | 0 .../Common/tests/parallel_aggregation2.cpp | 0 dbms/{src => }/Common/tests/pod_array.cpp | 0 dbms/{src => }/Common/tests/radix_sort.cpp | 0 dbms/{src => }/Common/tests/simple_cache.cpp | 0 dbms/{src => }/Common/tests/sip_hash_perf.cpp | 0 dbms/{src => }/Common/tests/small_table.cpp | 0 dbms/{src => }/Common/tests/space_saving.cpp | 0 dbms/{src => }/Common/tests/stopwatch.cpp | 0 dbms/{src => }/Common/tests/symbol_index.cpp | 0 .../Common/tests/thread_creation_latency.cpp | 0 dbms/{src => }/Common/thread_local_rng.cpp | 0 dbms/{src => }/Common/thread_local_rng.h | 0 dbms/{src => }/Common/typeid_cast.h | 0 dbms/{src => }/Compression/CMakeLists.txt | 0 .../CachedCompressedReadBuffer.cpp | 0 .../Compression/CachedCompressedReadBuffer.h | 0 .../Compression/CompressedReadBuffer.cpp | 0 .../Compression/CompressedReadBuffer.h | 0 .../Compression/CompressedReadBufferBase.cpp | 0 .../Compression/CompressedReadBufferBase.h | 0 .../CompressedReadBufferFromFile.cpp | 0 .../CompressedReadBufferFromFile.h | 0 .../Compression/CompressedWriteBuffer.cpp | 0 .../Compression/CompressedWriteBuffer.h | 0 .../Compression/CompressionCodecDelta.cpp | 0 .../Compression/CompressionCodecDelta.h | 0 .../CompressionCodecDoubleDelta.cpp | 0 .../Compression/CompressionCodecDoubleDelta.h | 0 .../Compression/CompressionCodecGorilla.cpp | 0 .../Compression/CompressionCodecGorilla.h | 0 .../Compression/CompressionCodecLZ4.cpp | 0 .../Compression/CompressionCodecLZ4.h | 0 .../Compression/CompressionCodecMultiple.cpp | 0 .../Compression/CompressionCodecMultiple.h | 0 .../Compression/CompressionCodecNone.cpp | 0 .../Compression/CompressionCodecNone.h | 0 .../Compression/CompressionCodecT64.cpp | 0 .../Compression/CompressionCodecT64.h | 0 .../Compression/CompressionCodecZSTD.cpp | 0 .../Compression/CompressionCodecZSTD.h | 0 .../Compression/CompressionFactory.cpp | 0 .../Compression/CompressionFactory.h | 0 dbms/{src => }/Compression/CompressionInfo.h | 0 .../Compression/ICompressionCodec.cpp | 0 .../{src => }/Compression/ICompressionCodec.h | 0 .../Compression/LZ4_decompress_faster.cpp | 0 .../Compression/LZ4_decompress_faster.h | 0 .../Compression/tests/CMakeLists.txt | 0 .../tests/cached_compressed_read_buffer.cpp | 0 .../Compression/tests/compressed_buffer.cpp | 0 .../tests/compressed_buffer_fuzz.cpp | 0 .../tests/gtest_compressionCodec.cpp | 0 dbms/{src => }/Core/AccurateComparison.h | 0 .../{src => }/Core/BackgroundSchedulePool.cpp | 0 dbms/{src => }/Core/BackgroundSchedulePool.h | 0 dbms/{src => }/Core/Block.cpp | 0 dbms/{src => }/Core/Block.h | 0 dbms/{src => }/Core/BlockInfo.cpp | 0 dbms/{src => }/Core/BlockInfo.h | 0 dbms/{src => }/Core/CMakeLists.txt | 0 dbms/{src => }/Core/ColumnNumbers.h | 0 dbms/{src => }/Core/ColumnWithTypeAndName.cpp | 0 dbms/{src => }/Core/ColumnWithTypeAndName.h | 0 dbms/{src => }/Core/ColumnsWithTypeAndName.h | 0 dbms/{src => }/Core/DecimalComparison.h | 0 dbms/{src => }/Core/DecimalFunctions.h | 0 dbms/{src => }/Core/Defines.h | 0 .../Core/ExternalResultDescription.cpp | 0 .../Core/ExternalResultDescription.h | 0 dbms/{src => }/Core/ExternalTable.cpp | 0 dbms/{src => }/Core/ExternalTable.h | 0 dbms/{src => }/Core/Field.cpp | 0 dbms/{src => }/Core/Field.h | 0 dbms/{src => }/Core/MySQLProtocol.cpp | 0 dbms/{src => }/Core/MySQLProtocol.h | 0 dbms/{src => }/Core/Names.h | 0 dbms/{src => }/Core/NamesAndTypes.cpp | 0 dbms/{src => }/Core/NamesAndTypes.h | 0 dbms/{src => }/Core/Protocol.h | 0 dbms/{src => }/Core/QualifiedTableName.h | 0 dbms/{src => }/Core/QueryProcessingStage.h | 0 dbms/{src => }/Core/Row.h | 0 dbms/{src => }/Core/Settings.cpp | 0 dbms/{src => }/Core/Settings.h | 0 dbms/{src => }/Core/SettingsCollection.cpp | 0 dbms/{src => }/Core/SettingsCollection.h | 0 dbms/{src => }/Core/SettingsCollectionImpl.h | 0 dbms/{src => }/Core/SortCursor.h | 0 dbms/{src => }/Core/SortDescription.h | 0 dbms/{src => }/Core/TypeListNumber.h | 0 dbms/{src => }/Core/Types.h | 0 dbms/{src => }/Core/UUID.h | 0 dbms/{src => }/Core/callOnTypeIndex.h | 0 dbms/{src => }/Core/config_core.h.in | 0 .../{src => }/Core/iostream_debug_helpers.cpp | 0 dbms/{src => }/Core/iostream_debug_helpers.h | 0 dbms/{src => }/Core/tests/CMakeLists.txt | 0 dbms/{src => }/Core/tests/field.cpp | 0 .../Core/tests/gtest_DecimalFunctions.cpp | 0 dbms/{src => }/Core/tests/move_field.cpp | 0 dbms/{src => }/Core/tests/string_pool.cpp | 0 dbms/{src => }/Core/tests/string_ref_hash.cpp | 0 .../AddingConstColumnBlockInputStream.h | 0 .../AddingDefaultBlockOutputStream.cpp | 0 .../AddingDefaultBlockOutputStream.h | 0 .../AddingDefaultsBlockInputStream.cpp | 0 .../AddingDefaultsBlockInputStream.h | 0 .../AggregatingBlockInputStream.cpp | 0 .../DataStreams/AggregatingBlockInputStream.h | 0 .../AggregatingSortedBlockInputStream.cpp | 0 .../AggregatingSortedBlockInputStream.h | 0 .../AsynchronousBlockInputStream.cpp | 0 .../AsynchronousBlockInputStream.h | 0 dbms/{src => }/DataStreams/BlockIO.cpp | 0 dbms/{src => }/DataStreams/BlockIO.h | 0 .../DataStreams/BlockStreamProfileInfo.cpp | 0 .../DataStreams/BlockStreamProfileInfo.h | 0 .../DataStreams/BlocksBlockInputStream.h | 0 .../DataStreams/BlocksListBlockInputStream.h | 0 dbms/{src => }/DataStreams/CMakeLists.txt | 0 .../CheckConstraintsBlockOutputStream.cpp | 0 .../CheckConstraintsBlockOutputStream.h | 0 .../CheckSortedBlockInputStream.cpp | 0 .../DataStreams/CheckSortedBlockInputStream.h | 0 .../CollapsingFinalBlockInputStream.cpp | 0 .../CollapsingFinalBlockInputStream.h | 0 .../CollapsingSortedBlockInputStream.cpp | 0 .../CollapsingSortedBlockInputStream.h | 0 .../DataStreams/ColumnGathererStream.cpp | 0 .../DataStreams/ColumnGathererStream.h | 0 .../DataStreams/ConcatBlockInputStream.h | 0 ...lumnLowCardinalityToFullBlockInputStream.h | 0 .../ConvertingBlockInputStream.cpp | 0 .../DataStreams/ConvertingBlockInputStream.h | 0 .../DataStreams/CountingBlockOutputStream.cpp | 0 .../DataStreams/CountingBlockOutputStream.h | 0 .../CreatingSetsBlockInputStream.cpp | 0 .../CreatingSetsBlockInputStream.h | 0 .../DataStreams/CubeBlockInputStream.cpp | 0 .../DataStreams/CubeBlockInputStream.h | 0 .../DataStreams/DistinctBlockInputStream.cpp | 0 .../DataStreams/DistinctBlockInputStream.h | 0 .../DistinctSortedBlockInputStream.cpp | 0 .../DistinctSortedBlockInputStream.h | 0 .../DataStreams/ExecutionSpeedLimits.cpp | 0 .../DataStreams/ExecutionSpeedLimits.h | 0 .../ExpressionBlockInputStream.cpp | 0 .../DataStreams/ExpressionBlockInputStream.h | 0 .../DataStreams/FillingBlockInputStream.cpp | 0 .../DataStreams/FillingBlockInputStream.h | 0 .../DataStreams/FilterBlockInputStream.cpp | 0 .../DataStreams/FilterBlockInputStream.h | 0 .../FilterColumnsBlockInputStream.cpp | 0 .../FilterColumnsBlockInputStream.h | 0 .../FinishSortingBlockInputStream.cpp | 0 .../FinishSortingBlockInputStream.h | 0 .../GraphiteRollupSortedBlockInputStream.cpp | 0 .../GraphiteRollupSortedBlockInputStream.h | 0 .../DataStreams/IBlockInputStream.cpp | 0 .../{src => }/DataStreams/IBlockInputStream.h | 0 .../DataStreams/IBlockOutputStream.h | 0 dbms/{src => }/DataStreams/IBlockStream_fwd.h | 0 .../InputStreamFromASTInsertQuery.cpp | 0 .../InputStreamFromASTInsertQuery.h | 0 .../InternalTextLogsRowOutputStream.cpp | 0 .../InternalTextLogsRowOutputStream.h | 0 .../DataStreams/LazyBlockInputStream.h | 0 .../DataStreams/LimitBlockInputStream.cpp | 0 .../DataStreams/LimitBlockInputStream.h | 0 .../DataStreams/LimitByBlockInputStream.cpp | 0 .../DataStreams/LimitByBlockInputStream.h | 0 .../DataStreams/MarkInCompressedFile.h | 0 .../MaterializingBlockInputStream.cpp | 0 .../MaterializingBlockInputStream.h | 0 .../MaterializingBlockOutputStream.h | 0 .../MergeSortingBlockInputStream.cpp | 0 .../MergeSortingBlockInputStream.h | 0 .../MergingAggregatedBlockInputStream.cpp | 0 .../MergingAggregatedBlockInputStream.h | 0 ...regatedMemoryEfficientBlockInputStream.cpp | 0 ...ggregatedMemoryEfficientBlockInputStream.h | 0 .../MergingSortedBlockInputStream.cpp | 0 .../MergingSortedBlockInputStream.h | 0 .../DataStreams/NativeBlockInputStream.cpp | 0 .../DataStreams/NativeBlockInputStream.h | 0 .../DataStreams/NativeBlockOutputStream.cpp | 0 .../DataStreams/NativeBlockOutputStream.h | 0 .../NullAndDoCopyBlockInputStream.h | 0 .../DataStreams/NullBlockInputStream.h | 0 .../DataStreams/NullBlockOutputStream.h | 0 .../DataStreams/OneBlockInputStream.h | 0 .../DataStreams/OwningBlockInputStream.h | 0 .../ParallelAggregatingBlockInputStream.cpp | 0 .../ParallelAggregatingBlockInputStream.h | 0 .../DataStreams/ParallelInputsProcessor.h | 0 .../ParallelParsingBlockInputStream.cpp | 0 .../ParallelParsingBlockInputStream.h | 0 .../PartialSortingBlockInputStream.cpp | 0 .../PartialSortingBlockInputStream.h | 0 .../PushingToViewsBlockOutputStream.cpp | 0 .../PushingToViewsBlockOutputStream.h | 0 .../DataStreams/RemoteBlockInputStream.cpp | 0 .../DataStreams/RemoteBlockInputStream.h | 0 .../DataStreams/RemoteBlockOutputStream.cpp | 0 .../DataStreams/RemoteBlockOutputStream.h | 0 .../ReplacingSortedBlockInputStream.cpp | 0 .../ReplacingSortedBlockInputStream.h | 0 .../DataStreams/ReverseBlockInputStream.cpp | 0 .../DataStreams/ReverseBlockInputStream.h | 0 .../DataStreams/RollupBlockInputStream.cpp | 0 .../DataStreams/RollupBlockInputStream.h | 0 dbms/{src => }/DataStreams/SizeLimits.cpp | 0 dbms/{src => }/DataStreams/SizeLimits.h | 0 .../DataStreams/SquashingBlockInputStream.cpp | 0 .../DataStreams/SquashingBlockInputStream.h | 0 .../SquashingBlockOutputStream.cpp | 0 .../DataStreams/SquashingBlockOutputStream.h | 0 .../DataStreams/SquashingTransform.cpp | 0 .../DataStreams/SquashingTransform.h | 0 .../SummingSortedBlockInputStream.cpp | 0 .../SummingSortedBlockInputStream.h | 0 .../DataStreams/TTLBlockInputStream.cpp | 0 .../DataStreams/TTLBlockInputStream.h | 0 .../DataStreams/TemporaryFileStream.h | 0 .../TotalsHavingBlockInputStream.cpp | 0 .../TotalsHavingBlockInputStream.h | 0 .../DataStreams/UnionBlockInputStream.h | 0 ...sionedCollapsingSortedBlockInputStream.cpp | 0 ...ersionedCollapsingSortedBlockInputStream.h | 0 dbms/{src => }/DataStreams/copyData.cpp | 0 dbms/{src => }/DataStreams/copyData.h | 0 dbms/{src => }/DataStreams/finalizeBlock.cpp | 0 dbms/{src => }/DataStreams/finalizeBlock.h | 0 .../DataStreams/materializeBlock.cpp | 0 dbms/{src => }/DataStreams/materializeBlock.h | 0 .../DataStreams/narrowBlockInputStreams.cpp | 0 .../DataStreams/narrowBlockInputStreams.h | 0 .../DataStreams/processConstants.cpp | 0 dbms/{src => }/DataStreams/processConstants.h | 0 .../DataStreams/tests/CMakeLists.txt | 0 .../tests/collapsing_sorted_stream.cpp | 0 .../DataStreams/tests/expression_stream.cpp | 0 .../DataStreams/tests/filter_stream.cpp | 0 .../tests/finish_sorting_stream.cpp | 0 .../gtest_blocks_size_merging_streams.cpp | 0 .../tests/gtest_check_sorted_stream.cpp | 0 .../DataStreams/tests/union_stream2.cpp | 0 dbms/{src => }/DataTypes/CMakeLists.txt | 0 .../DataTypes/DataTypeAggregateFunction.cpp | 0 .../DataTypes/DataTypeAggregateFunction.h | 0 dbms/{src => }/DataTypes/DataTypeArray.cpp | 0 dbms/{src => }/DataTypes/DataTypeArray.h | 0 dbms/{src => }/DataTypes/DataTypeCustom.h | 0 .../DataTypes/DataTypeCustomIPv4AndIPv6.cpp | 0 .../DataTypeCustomSimpleAggregateFunction.cpp | 0 .../DataTypeCustomSimpleAggregateFunction.h | 0 .../DataTypeCustomSimpleTextSerialization.cpp | 0 .../DataTypeCustomSimpleTextSerialization.h | 0 dbms/{src => }/DataTypes/DataTypeDate.cpp | 0 dbms/{src => }/DataTypes/DataTypeDate.h | 0 dbms/{src => }/DataTypes/DataTypeDateTime.cpp | 0 dbms/{src => }/DataTypes/DataTypeDateTime.h | 0 .../DataTypes/DataTypeDateTime64.cpp | 0 dbms/{src => }/DataTypes/DataTypeDateTime64.h | 0 .../DataTypes/DataTypeDecimalBase.cpp | 0 .../{src => }/DataTypes/DataTypeDecimalBase.h | 0 dbms/{src => }/DataTypes/DataTypeEnum.cpp | 0 dbms/{src => }/DataTypes/DataTypeEnum.h | 0 dbms/{src => }/DataTypes/DataTypeFactory.cpp | 0 dbms/{src => }/DataTypes/DataTypeFactory.h | 0 .../DataTypes/DataTypeFixedString.cpp | 0 .../{src => }/DataTypes/DataTypeFixedString.h | 0 dbms/{src => }/DataTypes/DataTypeFunction.cpp | 0 dbms/{src => }/DataTypes/DataTypeFunction.h | 0 dbms/{src => }/DataTypes/DataTypeInterval.cpp | 0 dbms/{src => }/DataTypes/DataTypeInterval.h | 0 .../DataTypes/DataTypeLowCardinality.cpp | 0 .../DataTypes/DataTypeLowCardinality.h | 0 .../DataTypeLowCardinalityHelpers.cpp | 0 dbms/{src => }/DataTypes/DataTypeNothing.cpp | 0 dbms/{src => }/DataTypes/DataTypeNothing.h | 0 dbms/{src => }/DataTypes/DataTypeNullable.cpp | 0 dbms/{src => }/DataTypes/DataTypeNullable.h | 0 .../DataTypes/DataTypeNumberBase.cpp | 0 dbms/{src => }/DataTypes/DataTypeNumberBase.h | 0 dbms/{src => }/DataTypes/DataTypeSet.h | 0 dbms/{src => }/DataTypes/DataTypeString.cpp | 0 dbms/{src => }/DataTypes/DataTypeString.h | 0 dbms/{src => }/DataTypes/DataTypeTuple.cpp | 0 dbms/{src => }/DataTypes/DataTypeTuple.h | 0 dbms/{src => }/DataTypes/DataTypeUUID.cpp | 0 dbms/{src => }/DataTypes/DataTypeUUID.h | 0 .../DataTypeWithSimpleSerialization.h | 0 dbms/{src => }/DataTypes/DataTypesDecimal.cpp | 0 dbms/{src => }/DataTypes/DataTypesDecimal.h | 0 dbms/{src => }/DataTypes/DataTypesNumber.cpp | 0 dbms/{src => }/DataTypes/DataTypesNumber.h | 0 dbms/{src => }/DataTypes/FieldToDataType.cpp | 0 dbms/{src => }/DataTypes/FieldToDataType.h | 0 dbms/{src => }/DataTypes/IDataType.cpp | 0 dbms/{src => }/DataTypes/IDataType.h | 0 dbms/{src => }/DataTypes/IDataTypeDummy.h | 0 dbms/{src => }/DataTypes/Native.h | 0 dbms/{src => }/DataTypes/NestedUtils.cpp | 0 dbms/{src => }/DataTypes/NestedUtils.h | 0 dbms/{src => }/DataTypes/NumberTraits.h | 0 .../DataTypes/convertMySQLDataType.cpp | 0 .../DataTypes/convertMySQLDataType.h | 0 .../{src => }/DataTypes/getLeastSupertype.cpp | 0 dbms/{src => }/DataTypes/getLeastSupertype.h | 0 dbms/{src => }/DataTypes/getMostSubtype.cpp | 0 dbms/{src => }/DataTypes/getMostSubtype.h | 0 dbms/{src => }/DataTypes/tests/CMakeLists.txt | 0 .../DataTypes/tests/data_type_string.cpp | 0 .../tests/data_types_number_fixed.cpp | 0 .../tests/gtest_data_type_get_common_type.cpp | 0 .../Databases/DatabaseDictionary.cpp | 0 dbms/{src => }/Databases/DatabaseDictionary.h | 0 dbms/{src => }/Databases/DatabaseFactory.cpp | 0 dbms/{src => }/Databases/DatabaseFactory.h | 0 dbms/{src => }/Databases/DatabaseLazy.cpp | 0 dbms/{src => }/Databases/DatabaseLazy.h | 0 dbms/{src => }/Databases/DatabaseMemory.cpp | 0 dbms/{src => }/Databases/DatabaseMemory.h | 0 dbms/{src => }/Databases/DatabaseMySQL.cpp | 0 dbms/{src => }/Databases/DatabaseMySQL.h | 0 dbms/{src => }/Databases/DatabaseOnDisk.cpp | 0 dbms/{src => }/Databases/DatabaseOnDisk.h | 0 dbms/{src => }/Databases/DatabaseOrdinary.cpp | 0 dbms/{src => }/Databases/DatabaseOrdinary.h | 0 .../Databases/DatabaseWithDictionaries.cpp | 0 .../Databases/DatabaseWithDictionaries.h | 0 dbms/{src => }/Databases/DatabasesCommon.cpp | 0 dbms/{src => }/Databases/DatabasesCommon.h | 0 dbms/{src => }/Databases/IDatabase.h | 0 dbms/{src => }/Dictionaries/CMakeLists.txt | 0 .../Dictionaries/CacheDictionary.cpp | 0 dbms/{src => }/Dictionaries/CacheDictionary.h | 0 .../Dictionaries/CacheDictionary.inc.h | 0 .../CacheDictionary_generate1.cpp.in | 0 .../CacheDictionary_generate2.cpp.in | 0 .../CacheDictionary_generate3.cpp.in | 0 .../ClickHouseDictionarySource.cpp | 0 .../Dictionaries/ClickHouseDictionarySource.h | 0 .../ComplexKeyCacheDictionary.cpp | 0 .../Dictionaries/ComplexKeyCacheDictionary.h | 0 ...acheDictionary_createAttributeWithType.cpp | 0 ...ComplexKeyCacheDictionary_generate1.cpp.in | 0 ...ComplexKeyCacheDictionary_generate2.cpp.in | 0 ...ComplexKeyCacheDictionary_generate3.cpp.in | 0 ...exKeyCacheDictionary_setAttributeValue.cpp | 0 ...cheDictionary_setDefaultAttributeValue.cpp | 0 .../ComplexKeyHashedDictionary.cpp | 0 .../Dictionaries/ComplexKeyHashedDictionary.h | 0 .../Dictionaries/DictionaryBlockInputStream.h | 0 .../DictionaryBlockInputStreamBase.cpp | 0 .../DictionaryBlockInputStreamBase.h | 0 .../Dictionaries/DictionaryFactory.cpp | 0 .../Dictionaries/DictionaryFactory.h | 0 .../Dictionaries/DictionarySourceFactory.cpp | 0 .../Dictionaries/DictionarySourceFactory.h | 0 .../Dictionaries/DictionarySourceHelpers.cpp | 0 .../Dictionaries/DictionarySourceHelpers.h | 0 .../Dictionaries/DictionaryStructure.cpp | 0 .../Dictionaries/DictionaryStructure.h | 0 .../Dictionaries/Embedded/CMakeLists.txt | 0 .../Embedded/GeoDictionariesLoader.cpp | 0 .../Embedded/GeoDictionariesLoader.h | 0 .../Embedded/GeodataProviders/Entries.h | 0 .../GeodataProviders/HierarchiesProvider.cpp | 0 .../GeodataProviders/HierarchiesProvider.h | 0 .../HierarchyFormatReader.cpp | 0 .../GeodataProviders/HierarchyFormatReader.h | 0 .../GeodataProviders/IHierarchiesProvider.h | 0 .../GeodataProviders/INamesProvider.h | 0 .../GeodataProviders/NamesFormatReader.cpp | 0 .../GeodataProviders/NamesFormatReader.h | 0 .../GeodataProviders/NamesProvider.cpp | 0 .../Embedded/GeodataProviders/NamesProvider.h | 0 .../Embedded/GeodataProviders/Types.h | 0 .../Embedded/RegionsHierarchies.cpp | 0 .../Embedded/RegionsHierarchies.h | 0 .../Embedded/RegionsHierarchy.cpp | 0 .../Dictionaries/Embedded/RegionsHierarchy.h | 0 .../Dictionaries/Embedded/RegionsNames.cpp | 0 .../Dictionaries/Embedded/RegionsNames.h | 0 .../ExecutableDictionarySource.cpp | 0 .../Dictionaries/ExecutableDictionarySource.h | 0 .../Dictionaries/ExternalQueryBuilder.cpp | 0 .../Dictionaries/ExternalQueryBuilder.h | 0 .../Dictionaries/FileDictionarySource.cpp | 0 .../Dictionaries/FileDictionarySource.h | 0 .../{src => }/Dictionaries/FlatDictionary.cpp | 0 dbms/{src => }/Dictionaries/FlatDictionary.h | 0 .../Dictionaries/HTTPDictionarySource.cpp | 0 .../Dictionaries/HTTPDictionarySource.h | 0 .../Dictionaries/HashedDictionary.cpp | 0 .../{src => }/Dictionaries/HashedDictionary.h | 0 dbms/{src => }/Dictionaries/IDictionary.h | 0 .../Dictionaries/IDictionarySource.h | 0 .../Dictionaries/LibraryDictionarySource.cpp | 0 .../Dictionaries/LibraryDictionarySource.h | 2 +- .../LibraryDictionarySourceExternal.cpp | 0 .../LibraryDictionarySourceExternal.h | 0 .../Dictionaries/MongoDBBlockInputStream.cpp | 0 .../Dictionaries/MongoDBBlockInputStream.h | 0 .../Dictionaries/MongoDBDictionarySource.cpp | 2 +- .../Dictionaries/MongoDBDictionarySource.h | 0 .../Dictionaries/MySQLDictionarySource.cpp | 0 .../Dictionaries/MySQLDictionarySource.h | 0 .../Dictionaries/PolygonDictionary.cpp | 0 .../Dictionaries/PolygonDictionary.h | 0 .../RangeDictionaryBlockInputStream.h | 0 .../Dictionaries/RangeHashedDictionary.cpp | 0 .../Dictionaries/RangeHashedDictionary.h | 0 .../Dictionaries/RedisBlockInputStream.cpp | 0 .../Dictionaries/RedisBlockInputStream.h | 0 .../Dictionaries/RedisDictionarySource.cpp | 0 .../Dictionaries/RedisDictionarySource.h | 0 .../{src => }/Dictionaries/TrieDictionary.cpp | 0 dbms/{src => }/Dictionaries/TrieDictionary.h | 0 .../Dictionaries/XDBCDictionarySource.cpp | 0 .../Dictionaries/XDBCDictionarySource.h | 0 .../getDictionaryConfigurationFromAST.cpp | 0 .../getDictionaryConfigurationFromAST.h | 0 .../Dictionaries/readInvalidateQuery.cpp | 0 .../Dictionaries/readInvalidateQuery.h | 0 .../Dictionaries/registerDictionaries.cpp | 0 .../Dictionaries/registerDictionaries.h | 0 .../Dictionaries/tests/CMakeLists.txt | 0 .../tests/gtest_dictionary_configuration.cpp | 0 .../Dictionaries/writeParenthesisedString.cpp | 0 .../Dictionaries/writeParenthesisedString.h | 0 dbms/{src => }/Disks/CMakeLists.txt | 0 dbms/{src => }/Disks/DiskFactory.cpp | 0 dbms/{src => }/Disks/DiskFactory.h | 0 dbms/{src => }/Disks/DiskLocal.cpp | 0 dbms/{src => }/Disks/DiskLocal.h | 0 dbms/{src => }/Disks/DiskMemory.cpp | 0 dbms/{src => }/Disks/DiskMemory.h | 0 dbms/{src => }/Disks/DiskS3.cpp | 0 dbms/{src => }/Disks/DiskS3.h | 0 dbms/{src => }/Disks/DiskSpaceMonitor.cpp | 0 dbms/{src => }/Disks/DiskSpaceMonitor.h | 0 dbms/{src => }/Disks/IDisk.cpp | 0 dbms/{src => }/Disks/IDisk.h | 0 dbms/{src => }/Disks/registerDisks.cpp | 0 dbms/{src => }/Disks/registerDisks.h | 0 dbms/{src => }/Disks/tests/CMakeLists.txt | 0 dbms/{src => }/Disks/tests/gtest_disk.cpp | 0 dbms/{src => }/Disks/tests/gtest_disk.h | 0 .../Disks/tests/gtest_path_functions.cpp | 0 dbms/{src => }/Formats/CMakeLists.txt | 0 dbms/{src => }/Formats/FormatFactory.cpp | 0 dbms/{src => }/Formats/FormatFactory.h | 0 dbms/{src => }/Formats/FormatSchemaInfo.cpp | 0 dbms/{src => }/Formats/FormatSchemaInfo.h | 0 dbms/{src => }/Formats/FormatSettings.h | 0 dbms/{src => }/Formats/IRowInputStream.cpp | 0 dbms/{src => }/Formats/IRowInputStream.h | 0 dbms/{src => }/Formats/IRowOutputStream.cpp | 0 dbms/{src => }/Formats/IRowOutputStream.h | 0 .../Formats/MySQLBlockInputStream.cpp | 0 .../{src => }/Formats/MySQLBlockInputStream.h | 0 dbms/{src => }/Formats/NativeFormat.cpp | 0 dbms/{src => }/Formats/NullFormat.cpp | 0 .../Formats/ParsedTemplateFormatString.cpp | 0 .../Formats/ParsedTemplateFormatString.h | 0 .../Formats/ProtobufColumnMatcher.cpp | 0 .../{src => }/Formats/ProtobufColumnMatcher.h | 0 dbms/{src => }/Formats/ProtobufReader.cpp | 0 dbms/{src => }/Formats/ProtobufReader.h | 0 dbms/{src => }/Formats/ProtobufSchemas.cpp | 0 dbms/{src => }/Formats/ProtobufSchemas.h | 0 dbms/{src => }/Formats/ProtobufWriter.cpp | 0 dbms/{src => }/Formats/ProtobufWriter.h | 0 dbms/{src => }/Formats/config_formats.h.in | 0 dbms/{src => }/Formats/tests/CMakeLists.txt | 0 .../Formats/tests/tab_separated_streams.cpp | 0 dbms/{src => }/Formats/verbosePrintString.cpp | 0 dbms/{src => }/Formats/verbosePrintString.h | 0 dbms/{src => }/Functions/CMakeLists.txt | 0 dbms/{src => }/Functions/CRC.cpp | 0 .../Functions/CustomWeekTransforms.h | 0 dbms/{src => }/Functions/DateTimeTransforms.h | 0 dbms/{src => }/Functions/DivisionUtils.h | 0 dbms/{src => }/Functions/DummyJSONParser.h | 0 dbms/{src => }/Functions/EmptyImpl.h | 0 .../Functions/FunctionBase64Conversion.h | 0 .../Functions/FunctionBinaryArithmetic.h | 0 .../{src => }/Functions/FunctionBitTestMany.h | 0 .../Functions/FunctionCustomWeekToSomething.h | 0 .../FunctionDateOrDateTimeAddInterval.h | 0 .../FunctionDateOrDateTimeToSomething.h | 0 dbms/{src => }/Functions/FunctionFQDN.cpp | 0 dbms/{src => }/Functions/FunctionFactory.cpp | 0 dbms/{src => }/Functions/FunctionFactory.h | 0 dbms/{src => }/Functions/FunctionHelpers.cpp | 0 dbms/{src => }/Functions/FunctionHelpers.h | 0 dbms/{src => }/Functions/FunctionIfBase.h | 0 dbms/{src => }/Functions/FunctionJoinGet.cpp | 0 dbms/{src => }/Functions/FunctionJoinGet.h | 0 .../Functions/FunctionMathBinaryFloat64.h | 0 .../Functions/FunctionMathConstFloat64.h | 0 dbms/{src => }/Functions/FunctionMathUnary.h | 0 .../Functions/FunctionNumericPredicate.h | 0 .../Functions/FunctionStartsEndsWith.h | 0 .../Functions/FunctionStringOrArrayToT.h | 0 .../Functions/FunctionStringToString.h | 0 .../Functions/FunctionUnaryArithmetic.h | 0 dbms/{src => }/Functions/FunctionsBitmap.cpp | 0 dbms/{src => }/Functions/FunctionsBitmap.h | 0 dbms/{src => }/Functions/FunctionsCoding.cpp | 0 dbms/{src => }/Functions/FunctionsCoding.h | 0 .../{src => }/Functions/FunctionsComparison.h | 0 .../Functions/FunctionsConsistentHashing.h | 0 .../Functions/FunctionsConversion.cpp | 0 .../{src => }/Functions/FunctionsConversion.h | 0 .../FunctionsEmbeddedDictionaries.cpp | 0 .../Functions/FunctionsEmbeddedDictionaries.h | 0 .../FunctionsExternalDictionaries.cpp | 0 .../Functions/FunctionsExternalDictionaries.h | 0 .../Functions/FunctionsExternalModels.cpp | 0 .../Functions/FunctionsExternalModels.h | 0 .../Functions/FunctionsFormatting.cpp | 0 .../{src => }/Functions/FunctionsFormatting.h | 0 dbms/{src => }/Functions/FunctionsHashing.cpp | 0 dbms/{src => }/Functions/FunctionsHashing.h | 0 dbms/{src => }/Functions/FunctionsJSON.cpp | 0 dbms/{src => }/Functions/FunctionsJSON.h | 0 dbms/{src => }/Functions/FunctionsLogical.cpp | 0 dbms/{src => }/Functions/FunctionsLogical.h | 0 .../Functions/FunctionsMiscellaneous.h | 0 .../Functions/FunctionsMultiStringPosition.h | 0 .../Functions/FunctionsMultiStringSearch.h | 0 dbms/{src => }/Functions/FunctionsRandom.cpp | 0 dbms/{src => }/Functions/FunctionsRandom.h | 0 dbms/{src => }/Functions/FunctionsRound.cpp | 0 dbms/{src => }/Functions/FunctionsRound.h | 0 .../Functions/FunctionsStringArray.cpp | 0 .../Functions/FunctionsStringArray.h | 0 .../Functions/FunctionsStringRegex.cpp | 0 .../Functions/FunctionsStringRegex.h | 0 dbms/Functions/FunctionsStringSearch.cpp | 707 ++++++++++++++++++ .../Functions/FunctionsStringSearch.h | 0 .../Functions/FunctionsStringSearchToString.h | 0 .../Functions/FunctionsStringSimilarity.cpp | 0 .../Functions/FunctionsStringSimilarity.h | 0 .../{src => }/Functions/FunctionsVisitParam.h | 0 .../Functions/GatherUtils/Algorithms.h | 0 .../Functions/GatherUtils/ArraySinkVisitor.h | 0 .../GatherUtils/ArraySourceVisitor.h | 0 .../Functions/GatherUtils/CMakeLists.txt | 0 .../Functions/GatherUtils/GatherUtils.h | 0 .../Functions/GatherUtils/IArraySink.h | 0 .../Functions/GatherUtils/IArraySource.h | 0 .../Functions/GatherUtils/IValueSource.h | 0 .../Functions/GatherUtils/Selectors.h | 0 dbms/{src => }/Functions/GatherUtils/Sinks.h | 0 dbms/{src => }/Functions/GatherUtils/Slices.h | 0 .../{src => }/Functions/GatherUtils/Sources.h | 0 .../GatherUtils/ValueSourceVisitor.h | 0 .../Functions/GatherUtils/concat.cpp | 0 .../Functions/GatherUtils/createArraySink.cpp | 0 .../GatherUtils/createArraySource.cpp | 0 .../GatherUtils/createValueSource.cpp | 0 dbms/{src => }/Functions/GatherUtils/has.cpp | 0 dbms/{src => }/Functions/GatherUtils/push.cpp | 0 .../GatherUtils/resizeConstantSize.cpp | 0 .../GatherUtils/resizeDynamicSize.cpp | 0 .../GatherUtils/sliceDynamicOffsetBounded.cpp | 0 .../sliceDynamicOffsetUnbounded.cpp | 0 .../sliceFromLeftConstantOffsetBounded.cpp | 0 .../sliceFromLeftConstantOffsetUnbounded.cpp | 0 .../sliceFromRightConstantOffsetBounded.cpp | 0 .../sliceFromRightConstantOffsetUnbounded.cpp | 0 dbms/{src => }/Functions/GeoHash.cpp | 0 dbms/{src => }/Functions/GeoHash.h | 0 dbms/{src => }/Functions/HasTokenImpl.h | 0 dbms/{src => }/Functions/IFunction.cpp | 0 dbms/{src => }/Functions/IFunction.h | 0 dbms/{src => }/Functions/IFunctionAdaptors.h | 0 dbms/{src => }/Functions/IFunctionImpl.h | 0 dbms/{src => }/Functions/LowerUpperImpl.h | 0 dbms/{src => }/Functions/LowerUpperUTF8Impl.h | 0 .../Functions/MultiSearchAllPositionsImpl.h | 0 .../Functions/MultiSearchFirstIndexImpl.h | 0 .../Functions/MultiSearchFirstPositionImpl.h | 0 dbms/{src => }/Functions/MultiSearchImpl.h | 0 dbms/{src => }/Functions/PolygonUtils.h | 0 dbms/{src => }/Functions/PositionImpl.h | 0 dbms/{src => }/Functions/RapidJSONParser.h | 0 dbms/{src => }/Functions/Regexps.h | 0 dbms/{src => }/Functions/SimdJSONParser.h | 0 dbms/{src => }/Functions/URL/CMakeLists.txt | 0 dbms/{src => }/Functions/URL/FunctionsURL.h | 0 dbms/{src => }/Functions/URL/URLHierarchy.cpp | 0 .../Functions/URL/URLPathHierarchy.cpp | 0 dbms/{src => }/Functions/URL/basename.cpp | 0 .../Functions/URL/config_functions_url.h.in | 0 dbms/{src => }/Functions/URL/cutFragment.cpp | 0 .../Functions/URL/cutQueryString.cpp | 0 .../URL/cutQueryStringAndFragment.cpp | 0 .../URL/cutToFirstSignificantSubdomain.cpp | 0 .../Functions/URL/cutURLParameter.cpp | 0 dbms/{src => }/Functions/URL/cutWWW.cpp | 0 .../Functions/URL/decodeURLComponent.cpp | 0 dbms/{src => }/Functions/URL/domain.cpp | 0 dbms/{src => }/Functions/URL/domain.h | 0 .../Functions/URL/domainWithoutWWW.cpp | 0 .../Functions/URL/extractURLParameter.cpp | 0 .../URL/extractURLParameterNames.cpp | 0 .../Functions/URL/extractURLParameters.cpp | 0 .../URL/firstSignificantSubdomain.cpp | 0 .../Functions/URL/firstSignificantSubdomain.h | 0 dbms/{src => }/Functions/URL/fragment.cpp | 0 dbms/{src => }/Functions/URL/fragment.h | 0 dbms/{src => }/Functions/URL/path.cpp | 0 dbms/{src => }/Functions/URL/pathFull.cpp | 0 dbms/{src => }/Functions/URL/protocol.cpp | 0 dbms/{src => }/Functions/URL/protocol.h | 0 dbms/{src => }/Functions/URL/queryString.cpp | 0 dbms/{src => }/Functions/URL/queryString.h | 0 .../Functions/URL/queryStringAndFragment.cpp | 0 .../Functions/URL/queryStringAndFragment.h | 0 .../Functions/URL/registerFunctionsURL.cpp | 0 .../Functions/URL/tldLookup.generated.cpp | 0 dbms/{src => }/Functions/URL/tldLookup.gperf | 0 dbms/{src => }/Functions/URL/tldLookup.h | 0 dbms/{src => }/Functions/URL/tldLookup.sh | 0 .../Functions/URL/topLevelDomain.cpp | 0 dbms/{src => }/Functions/abs.cpp | 0 dbms/{src => }/Functions/acos.cpp | 0 dbms/{src => }/Functions/addDays.cpp | 0 dbms/{src => }/Functions/addHours.cpp | 0 dbms/{src => }/Functions/addMinutes.cpp | 0 dbms/{src => }/Functions/addMonths.cpp | 0 dbms/{src => }/Functions/addQuarters.cpp | 0 dbms/{src => }/Functions/addSeconds.cpp | 0 dbms/{src => }/Functions/addWeeks.cpp | 0 dbms/{src => }/Functions/addYears.cpp | 0 dbms/{src => }/Functions/addressToLine.cpp | 0 dbms/{src => }/Functions/addressToSymbol.cpp | 0 .../Functions/appendTrailingCharIfAbsent.cpp | 0 dbms/{src => }/Functions/array/CMakeLists.txt | 0 .../Functions/array/FunctionArrayMapped.h | 0 dbms/{src => }/Functions/array/array.cpp | 0 dbms/{src => }/Functions/array/arrayAUC.cpp | 0 dbms/{src => }/Functions/array/arrayAll.cpp | 0 .../Functions/array/arrayCompact.cpp | 0 .../{src => }/Functions/array/arrayConcat.cpp | 0 dbms/{src => }/Functions/array/arrayCount.cpp | 0 .../{src => }/Functions/array/arrayCumSum.cpp | 0 .../array/arrayCumSumNonNegative.cpp | 0 .../Functions/array/arrayDifference.cpp | 0 .../Functions/array/arrayDistinct.cpp | 0 .../Functions/array/arrayElement.cpp | 0 .../Functions/array/arrayEnumerate.cpp | 0 .../Functions/array/arrayEnumerateDense.cpp | 0 .../array/arrayEnumerateDenseRanked.cpp | 0 .../Functions/array/arrayEnumerateExtended.h | 0 .../Functions/array/arrayEnumerateRanked.cpp | 0 .../Functions/array/arrayEnumerateRanked.h | 0 .../Functions/array/arrayEnumerateUniq.cpp | 0 .../array/arrayEnumerateUniqRanked.cpp | 0 .../{src => }/Functions/array/arrayExists.cpp | 0 dbms/{src => }/Functions/array/arrayFill.cpp | 0 .../{src => }/Functions/array/arrayFilter.cpp | 0 dbms/{src => }/Functions/array/arrayFirst.cpp | 0 .../Functions/array/arrayFirstIndex.cpp | 0 .../Functions/array/arrayFlatten.cpp | 0 dbms/{src => }/Functions/array/arrayIndex.h | 0 .../Functions/array/arrayIntersect.cpp | 0 dbms/{src => }/Functions/array/arrayJoin.cpp | 0 dbms/{src => }/Functions/array/arrayMap.cpp | 0 dbms/{src => }/Functions/array/arrayPop.h | 0 .../Functions/array/arrayPopBack.cpp | 0 .../Functions/array/arrayPopFront.cpp | 0 dbms/{src => }/Functions/array/arrayPush.h | 0 .../Functions/array/arrayPushBack.cpp | 0 .../Functions/array/arrayPushFront.cpp | 0 .../{src => }/Functions/array/arrayReduce.cpp | 0 .../Functions/array/arrayReduceInRanges.cpp | 0 .../{src => }/Functions/array/arrayResize.cpp | 0 .../Functions/array/arrayReverse.cpp | 0 .../Functions/array/arrayScalarProduct.h | 0 dbms/{src => }/Functions/array/arraySlice.cpp | 0 dbms/{src => }/Functions/array/arraySort.cpp | 0 dbms/{src => }/Functions/array/arraySplit.cpp | 0 dbms/{src => }/Functions/array/arraySum.cpp | 0 dbms/{src => }/Functions/array/arrayUniq.cpp | 0 .../Functions/array/arrayWithConstant.cpp | 0 dbms/{src => }/Functions/array/arrayZip.cpp | 0 dbms/{src => }/Functions/array/countEqual.cpp | 0 dbms/{src => }/Functions/array/emptyArray.cpp | 0 .../Functions/array/emptyArrayToSingle.cpp | 0 dbms/{src => }/Functions/array/has.cpp | 0 dbms/{src => }/Functions/array/hasAll.cpp | 0 dbms/{src => }/Functions/array/hasAllAny.h | 0 dbms/{src => }/Functions/array/hasAny.cpp | 0 dbms/{src => }/Functions/array/indexOf.cpp | 0 dbms/{src => }/Functions/array/length.cpp | 0 dbms/{src => }/Functions/array/range.cpp | 0 .../array/registerFunctionsArray.cpp | 0 dbms/{src => }/Functions/asin.cpp | 0 dbms/{src => }/Functions/assumeNotNull.cpp | 0 dbms/{src => }/Functions/atan.cpp | 0 dbms/{src => }/Functions/bar.cpp | 0 dbms/{src => }/Functions/base64Decode.cpp | 0 dbms/{src => }/Functions/base64Encode.cpp | 0 dbms/{src => }/Functions/bitAnd.cpp | 0 dbms/{src => }/Functions/bitBoolMaskAnd.cpp | 2 +- dbms/{src => }/Functions/bitBoolMaskOr.cpp | 2 +- dbms/{src => }/Functions/bitCount.cpp | 0 dbms/{src => }/Functions/bitNot.cpp | 0 dbms/{src => }/Functions/bitOr.cpp | 0 dbms/{src => }/Functions/bitRotateLeft.cpp | 0 dbms/{src => }/Functions/bitRotateRight.cpp | 0 dbms/{src => }/Functions/bitShiftLeft.cpp | 0 dbms/{src => }/Functions/bitShiftRight.cpp | 0 dbms/{src => }/Functions/bitSwapLastTwo.cpp | 2 +- dbms/{src => }/Functions/bitTest.cpp | 0 dbms/{src => }/Functions/bitTestAll.cpp | 0 dbms/{src => }/Functions/bitTestAny.cpp | 0 dbms/{src => }/Functions/bitWrapperFunc.cpp | 2 +- dbms/{src => }/Functions/bitXor.cpp | 0 dbms/{src => }/Functions/blockNumber.cpp | 0 .../Functions/blockSerializedSize.cpp | 0 dbms/{src => }/Functions/blockSize.cpp | 0 .../Functions/caseWithExpression.cpp | 0 dbms/{src => }/Functions/castTypeToEither.h | 0 dbms/{src => }/Functions/cbrt.cpp | 0 dbms/{src => }/Functions/coalesce.cpp | 0 dbms/{src => }/Functions/concat.cpp | 0 .../{src => }/Functions/config_functions.h.in | 0 dbms/{src => }/Functions/convertCharset.cpp | 0 dbms/{src => }/Functions/cos.cpp | 0 dbms/{src => }/Functions/currentDatabase.cpp | 0 dbms/{src => }/Functions/currentQuota.cpp | 0 .../Functions/currentRowPolicies.cpp | 0 dbms/{src => }/Functions/currentUser.cpp | 0 dbms/{src => }/Functions/dateDiff.cpp | 0 .../Functions/defaultValueOfArgumentType.cpp | 0 dbms/{src => }/Functions/demange.cpp | 0 dbms/{src => }/Functions/divide.cpp | 0 .../Functions/dumpColumnStructure.cpp | 0 dbms/{src => }/Functions/e.cpp | 0 dbms/{src => }/Functions/empty.cpp | 0 dbms/{src => }/Functions/endsWith.cpp | 0 dbms/{src => }/Functions/equals.cpp | 0 dbms/{src => }/Functions/erf.cpp | 0 dbms/{src => }/Functions/erfc.cpp | 0 dbms/{src => }/Functions/evalMLMethod.cpp | 0 dbms/{src => }/Functions/exp.cpp | 0 dbms/{src => }/Functions/exp10.cpp | 0 dbms/{src => }/Functions/exp2.cpp | 0 .../extractTimeZoneFromFunctionArguments.cpp | 0 .../extractTimeZoneFromFunctionArguments.h | 0 dbms/{src => }/Functions/filesystem.cpp | 0 .../Functions/finalizeAggregation.cpp | 0 dbms/{src => }/Functions/formatDateTime.cpp | 0 dbms/{src => }/Functions/formatString.cpp | 0 dbms/{src => }/Functions/formatString.h | 0 dbms/{src => }/Functions/gcd.cpp | 0 dbms/{src => }/Functions/generateUUIDv4.cpp | 0 dbms/{src => }/Functions/geoToH3.cpp | 0 dbms/{src => }/Functions/geohashDecode.cpp | 0 dbms/{src => }/Functions/geohashEncode.cpp | 0 dbms/{src => }/Functions/geohashesInBox.cpp | 0 dbms/{src => }/Functions/getMacro.cpp | 0 dbms/{src => }/Functions/getScalar.cpp | 0 .../{src => }/Functions/getSizeOfEnumType.cpp | 0 .../Functions/greatCircleDistance.cpp | 0 dbms/{src => }/Functions/greater.cpp | 0 dbms/{src => }/Functions/greaterOrEquals.cpp | 0 dbms/{src => }/Functions/greatest.cpp | 0 dbms/{src => }/Functions/h3EdgeAngle.cpp | 0 dbms/{src => }/Functions/h3EdgeLengthM.cpp | 0 dbms/{src => }/Functions/h3GetBaseCell.cpp | 0 dbms/{src => }/Functions/h3GetResolution.cpp | 0 dbms/{src => }/Functions/h3HexAreaM2.cpp | 0 .../Functions/h3IndexesAreNeighbors.cpp | 0 dbms/{src => }/Functions/h3IsValid.cpp | 0 dbms/{src => }/Functions/h3ToChildren.cpp | 0 dbms/{src => }/Functions/h3ToParent.cpp | 0 dbms/{src => }/Functions/h3ToString.cpp | 0 dbms/{src => }/Functions/h3kRing.cpp | 0 dbms/{src => }/Functions/hasColumnInTable.cpp | 0 dbms/{src => }/Functions/hasToken.cpp | 0 .../Functions/hasTokenCaseInsensitive.cpp | 0 dbms/{src => }/Functions/hostName.cpp | 0 dbms/{src => }/Functions/identity.cpp | 0 dbms/{src => }/Functions/if.cpp | 0 dbms/{src => }/Functions/ifNotFinite.cpp | 0 dbms/{src => }/Functions/ifNull.cpp | 0 dbms/{src => }/Functions/ignore.cpp | 0 dbms/{src => }/Functions/ignoreExceptNull.cpp | 0 dbms/{src => }/Functions/in.cpp | 0 dbms/{src => }/Functions/intDiv.cpp | 0 dbms/{src => }/Functions/intDivOrZero.cpp | 0 dbms/{src => }/Functions/intExp10.cpp | 0 dbms/{src => }/Functions/intExp2.cpp | 0 dbms/{src => }/Functions/isFinite.cpp | 0 dbms/{src => }/Functions/isInfinite.cpp | 0 dbms/{src => }/Functions/isNaN.cpp | 0 dbms/{src => }/Functions/isNotNull.cpp | 0 dbms/{src => }/Functions/isNull.cpp | 0 dbms/{src => }/Functions/isValidUTF8.cpp | 0 .../Functions/jumpConsistentHash.cpp | 0 dbms/{src => }/Functions/lcm.cpp | 0 dbms/{src => }/Functions/least.cpp | 0 dbms/{src => }/Functions/lengthUTF8.cpp | 0 dbms/{src => }/Functions/less.cpp | 0 dbms/{src => }/Functions/lessOrEquals.cpp | 0 dbms/{src => }/Functions/lgamma.cpp | 0 .../{src => }/Functions/likePatternToRegexp.h | 0 dbms/{src => }/Functions/log.cpp | 0 dbms/{src => }/Functions/log10.cpp | 0 dbms/{src => }/Functions/log2.cpp | 0 .../Functions/lowCardinalityIndices.cpp | 0 .../Functions/lowCardinalityKeys.cpp | 0 dbms/{src => }/Functions/lower.cpp | 0 dbms/{src => }/Functions/lowerUTF8.cpp | 0 dbms/{src => }/Functions/materialize.cpp | 0 dbms/{src => }/Functions/minus.cpp | 0 dbms/{src => }/Functions/modulo.cpp | 0 dbms/{src => }/Functions/moduloOrZero.cpp | 0 dbms/{src => }/Functions/multiIf.cpp | 0 .../Functions/multiSearchAllPositions.cpp | 0 ...multiSearchAllPositionsCaseInsensitive.cpp | 0 ...iSearchAllPositionsCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchAllPositionsUTF8.cpp | 0 dbms/{src => }/Functions/multiSearchAny.cpp | 0 .../multiSearchAnyCaseInsensitive.cpp | 0 .../multiSearchAnyCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchAnyUTF8.cpp | 0 .../Functions/multiSearchFirstIndex.cpp | 0 .../multiSearchFirstIndexCaseInsensitive.cpp | 0 ...ltiSearchFirstIndexCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchFirstIndexUTF8.cpp | 0 .../Functions/multiSearchFirstPosition.cpp | 0 ...ultiSearchFirstPositionCaseInsensitive.cpp | 0 ...SearchFirstPositionCaseInsensitiveUTF8.cpp | 0 .../multiSearchFirstPositionUTF8.cpp | 0 dbms/{src => }/Functions/multiply.cpp | 0 dbms/{src => }/Functions/negate.cpp | 0 dbms/{src => }/Functions/neighbor.cpp | 0 dbms/{src => }/Functions/notEmpty.cpp | 0 dbms/{src => }/Functions/notEquals.cpp | 0 dbms/{src => }/Functions/now.cpp | 0 dbms/{src => }/Functions/now64.cpp | 0 dbms/{src => }/Functions/nullIf.cpp | 0 dbms/{src => }/Functions/pi.cpp | 0 dbms/{src => }/Functions/plus.cpp | 0 dbms/{src => }/Functions/pointInEllipses.cpp | 0 dbms/{src => }/Functions/pointInPolygon.cpp | 0 dbms/{src => }/Functions/position.cpp | 0 .../Functions/positionCaseInsensitive.cpp | 0 .../Functions/positionCaseInsensitiveUTF8.cpp | 0 dbms/{src => }/Functions/positionUTF8.cpp | 0 dbms/{src => }/Functions/pow.cpp | 0 dbms/{src => }/Functions/rand.cpp | 0 dbms/{src => }/Functions/rand64.cpp | 0 dbms/{src => }/Functions/randConstant.cpp | 0 .../Functions/randomPrintableASCII.cpp | 0 dbms/{src => }/Functions/regexpQuoteMeta.cpp | 0 .../{src => }/Functions/registerFunctions.cpp | 0 dbms/{src => }/Functions/registerFunctions.h | 0 .../Functions/registerFunctionsArithmetic.cpp | 0 .../Functions/registerFunctionsComparison.cpp | 0 .../registerFunctionsConditional.cpp | 0 .../registerFunctionsConsistentHashing.cpp | 0 .../Functions/registerFunctionsDateTime.cpp | 0 .../Functions/registerFunctionsGeo.cpp | 0 .../registerFunctionsHigherOrder.cpp | 0 .../registerFunctionsIntrospection.cpp | 0 .../Functions/registerFunctionsMath.cpp | 0 .../registerFunctionsMiscellaneous.cpp | 0 .../Functions/registerFunctionsNull.cpp | 0 .../Functions/registerFunctionsRandom.cpp | 0 .../registerFunctionsReinterpret.cpp | 0 .../Functions/registerFunctionsString.cpp | 0 .../registerFunctionsStringSearch.cpp | 0 .../Functions/registerFunctionsTuple.cpp | 0 .../Functions/registerFunctionsVisitParam.cpp | 0 .../Functions/reinterpretAsFixedString.cpp | 0 .../Functions/reinterpretAsString.cpp | 0 .../Functions/reinterpretStringAs.cpp | 0 dbms/{src => }/Functions/repeat.cpp | 0 dbms/{src => }/Functions/replicate.cpp | 0 dbms/{src => }/Functions/reverse.cpp | 0 dbms/{src => }/Functions/reverseUTF8.cpp | 0 dbms/{src => }/Functions/roundAge.cpp | 0 dbms/{src => }/Functions/roundDuration.cpp | 0 dbms/{src => }/Functions/roundToExp2.cpp | 0 .../Functions/rowNumberInAllBlocks.cpp | 0 dbms/{src => }/Functions/rowNumberInBlock.cpp | 0 .../{src => }/Functions/runningAccumulate.cpp | 0 .../{src => }/Functions/runningDifference.cpp | 0 dbms/{src => }/Functions/runningDifference.h | 0 ...unningDifferenceStartingWithFirstValue.cpp | 0 dbms/{src => }/Functions/sigmoid.cpp | 0 dbms/{src => }/Functions/sin.cpp | 0 dbms/{src => }/Functions/sleep.cpp | 0 dbms/{src => }/Functions/sleep.h | 0 dbms/{src => }/Functions/sleepEachRow.cpp | 0 dbms/{src => }/Functions/sqrt.cpp | 0 dbms/{src => }/Functions/startsWith.cpp | 0 dbms/{src => }/Functions/stringToH3.cpp | 0 dbms/{src => }/Functions/substring.cpp | 0 dbms/{src => }/Functions/subtractDays.cpp | 0 dbms/{src => }/Functions/subtractHours.cpp | 0 dbms/{src => }/Functions/subtractMinutes.cpp | 0 dbms/{src => }/Functions/subtractMonths.cpp | 0 dbms/{src => }/Functions/subtractQuarters.cpp | 0 dbms/{src => }/Functions/subtractSeconds.cpp | 0 dbms/{src => }/Functions/subtractWeeks.cpp | 0 dbms/{src => }/Functions/subtractYears.cpp | 0 .../Functions/sumburConsistentHash.cpp | 0 dbms/{src => }/Functions/tan.cpp | 0 dbms/{src => }/Functions/tanh.cpp | 0 dbms/{src => }/Functions/tests/CMakeLists.txt | 0 .../Functions/tests/number_traits.cpp | 0 dbms/{src => }/Functions/tgamma.cpp | 0 dbms/{src => }/Functions/throwIf.cpp | 0 dbms/{src => }/Functions/timeSlot.cpp | 0 dbms/{src => }/Functions/timeSlots.cpp | 0 dbms/{src => }/Functions/timezone.cpp | 0 dbms/{src => }/Functions/toColumnTypeName.cpp | 0 dbms/{src => }/Functions/toCustomWeek.cpp | 0 dbms/{src => }/Functions/toDayOfMonth.cpp | 0 dbms/{src => }/Functions/toDayOfWeek.cpp | 0 dbms/{src => }/Functions/toDayOfYear.cpp | 0 dbms/{src => }/Functions/toHour.cpp | 0 dbms/{src => }/Functions/toISOWeek.cpp | 0 dbms/{src => }/Functions/toISOYear.cpp | 0 dbms/{src => }/Functions/toLowCardinality.cpp | 0 dbms/{src => }/Functions/toMinute.cpp | 0 dbms/{src => }/Functions/toMonday.cpp | 0 dbms/{src => }/Functions/toMonth.cpp | 0 dbms/{src => }/Functions/toNullable.cpp | 0 dbms/{src => }/Functions/toQuarter.cpp | 0 dbms/{src => }/Functions/toRelativeDayNum.cpp | 0 .../{src => }/Functions/toRelativeHourNum.cpp | 0 .../Functions/toRelativeMinuteNum.cpp | 0 .../Functions/toRelativeMonthNum.cpp | 0 .../Functions/toRelativeQuarterNum.cpp | 0 .../Functions/toRelativeSecondNum.cpp | 0 .../{src => }/Functions/toRelativeWeekNum.cpp | 0 .../{src => }/Functions/toRelativeYearNum.cpp | 0 dbms/{src => }/Functions/toSecond.cpp | 0 dbms/{src => }/Functions/toStartOfDay.cpp | 0 .../Functions/toStartOfFifteenMinutes.cpp | 0 .../Functions/toStartOfFiveMinute.cpp | 0 dbms/{src => }/Functions/toStartOfHour.cpp | 0 dbms/{src => }/Functions/toStartOfISOYear.cpp | 0 .../{src => }/Functions/toStartOfInterval.cpp | 0 dbms/{src => }/Functions/toStartOfMinute.cpp | 0 dbms/{src => }/Functions/toStartOfMonth.cpp | 0 dbms/{src => }/Functions/toStartOfQuarter.cpp | 0 .../Functions/toStartOfTenMinutes.cpp | 0 dbms/{src => }/Functions/toStartOfYear.cpp | 0 dbms/{src => }/Functions/toTime.cpp | 0 dbms/{src => }/Functions/toTimeZone.cpp | 0 dbms/{src => }/Functions/toTypeName.cpp | 0 dbms/{src => }/Functions/toValidUTF8.cpp | 0 dbms/{src => }/Functions/toYYYYMM.cpp | 0 dbms/{src => }/Functions/toYYYYMMDD.cpp | 0 dbms/{src => }/Functions/toYYYYMMDDhhmmss.cpp | 0 dbms/{src => }/Functions/toYear.cpp | 0 dbms/{src => }/Functions/today.cpp | 0 dbms/{src => }/Functions/transform.cpp | 0 dbms/{src => }/Functions/trap.cpp | 0 dbms/{src => }/Functions/trim.cpp | 0 dbms/{src => }/Functions/tryBase64Decode.cpp | 0 dbms/{src => }/Functions/tuple.cpp | 0 dbms/{src => }/Functions/tupleElement.cpp | 0 dbms/{src => }/Functions/upper.cpp | 0 dbms/{src => }/Functions/upperUTF8.cpp | 0 dbms/{src => }/Functions/uptime.cpp | 0 dbms/{src => }/Functions/version.cpp | 0 dbms/{src => }/Functions/visibleWidth.cpp | 0 .../Functions/visitParamExtractBool.cpp | 0 .../Functions/visitParamExtractFloat.cpp | 0 .../Functions/visitParamExtractInt.cpp | 0 .../Functions/visitParamExtractRaw.cpp | 0 .../Functions/visitParamExtractString.cpp | 0 .../Functions/visitParamExtractUInt.cpp | 0 dbms/{src => }/Functions/visitParamHas.cpp | 0 .../Functions/yandexConsistentHash.cpp | 0 dbms/{src => }/Functions/yesterday.cpp | 0 dbms/{src => }/IO/AIO.cpp | 0 dbms/{src => }/IO/AIO.h | 0 dbms/{src => }/IO/AIOContextPool.cpp | 0 dbms/{src => }/IO/AIOContextPool.h | 0 dbms/{src => }/IO/AsynchronousWriteBuffer.h | 0 dbms/{src => }/IO/BitHelpers.h | 0 dbms/{src => }/IO/BrotliReadBuffer.cpp | 0 dbms/{src => }/IO/BrotliReadBuffer.h | 0 dbms/{src => }/IO/BrotliWriteBuffer.cpp | 0 dbms/{src => }/IO/BrotliWriteBuffer.h | 0 dbms/{src => }/IO/BufferBase.h | 0 dbms/{src => }/IO/BufferWithOwnMemory.h | 0 dbms/{src => }/IO/CMakeLists.txt | 0 dbms/{src => }/IO/CascadeWriteBuffer.cpp | 0 dbms/{src => }/IO/CascadeWriteBuffer.h | 0 dbms/{src => }/IO/CompressionMethod.cpp | 0 dbms/{src => }/IO/CompressionMethod.h | 0 dbms/{src => }/IO/ConcatReadBuffer.h | 0 dbms/{src => }/IO/ConnectionTimeouts.h | 0 dbms/{src => }/IO/DoubleConverter.cpp | 0 dbms/{src => }/IO/DoubleConverter.h | 0 dbms/{src => }/IO/HDFSCommon.cpp | 0 dbms/{src => }/IO/HDFSCommon.h | 0 dbms/{src => }/IO/HTTPCommon.cpp | 0 dbms/{src => }/IO/HTTPCommon.h | 0 dbms/{src => }/IO/HashingReadBuffer.h | 0 dbms/{src => }/IO/HashingWriteBuffer.cpp | 0 dbms/{src => }/IO/HashingWriteBuffer.h | 0 dbms/{src => }/IO/HexWriteBuffer.cpp | 0 dbms/{src => }/IO/HexWriteBuffer.h | 0 dbms/{src => }/IO/IReadableWriteBuffer.h | 0 dbms/{src => }/IO/LimitReadBuffer.cpp | 0 dbms/{src => }/IO/LimitReadBuffer.h | 0 dbms/{src => }/IO/MMapReadBufferFromFile.cpp | 0 dbms/{src => }/IO/MMapReadBufferFromFile.h | 0 .../IO/MMapReadBufferFromFileDescriptor.cpp | 0 .../IO/MMapReadBufferFromFileDescriptor.h | 0 dbms/{src => }/IO/MemoryReadWriteBuffer.cpp | 0 dbms/{src => }/IO/MemoryReadWriteBuffer.h | 0 dbms/{src => }/IO/NullWriteBuffer.cpp | 0 dbms/{src => }/IO/NullWriteBuffer.h | 0 dbms/{src => }/IO/Operators.h | 0 dbms/{src => }/IO/PeekableReadBuffer.cpp | 0 dbms/{src => }/IO/PeekableReadBuffer.h | 0 dbms/{src => }/IO/Progress.cpp | 0 dbms/{src => }/IO/Progress.h | 0 dbms/{src => }/IO/ReadBuffer.h | 0 dbms/{src => }/IO/ReadBufferAIO.cpp | 0 dbms/{src => }/IO/ReadBufferAIO.h | 0 dbms/{src => }/IO/ReadBufferFromFile.cpp | 0 dbms/{src => }/IO/ReadBufferFromFile.h | 0 dbms/{src => }/IO/ReadBufferFromFileBase.cpp | 0 dbms/{src => }/IO/ReadBufferFromFileBase.h | 0 .../IO/ReadBufferFromFileDescriptor.cpp | 0 .../IO/ReadBufferFromFileDescriptor.h | 0 dbms/{src => }/IO/ReadBufferFromHDFS.cpp | 0 dbms/{src => }/IO/ReadBufferFromHDFS.h | 0 dbms/{src => }/IO/ReadBufferFromIStream.cpp | 0 dbms/{src => }/IO/ReadBufferFromIStream.h | 0 dbms/{src => }/IO/ReadBufferFromMemory.cpp | 0 dbms/{src => }/IO/ReadBufferFromMemory.h | 0 .../{src => }/IO/ReadBufferFromPocoSocket.cpp | 0 dbms/{src => }/IO/ReadBufferFromPocoSocket.h | 0 dbms/{src => }/IO/ReadBufferFromS3.cpp | 0 dbms/{src => }/IO/ReadBufferFromS3.h | 0 dbms/{src => }/IO/ReadBufferFromString.h | 0 dbms/{src => }/IO/ReadHelpers.cpp | 0 dbms/{src => }/IO/ReadHelpers.h | 0 dbms/{src => }/IO/ReadWriteBufferFromHTTP.cpp | 0 dbms/{src => }/IO/ReadWriteBufferFromHTTP.h | 0 dbms/{src => }/IO/S3Common.cpp | 0 dbms/{src => }/IO/S3Common.h | 0 dbms/{src => }/IO/SeekableReadBuffer.h | 0 dbms/{src => }/IO/UncompressedCache.h | 0 dbms/{src => }/IO/UseSSL.cpp | 0 dbms/{src => }/IO/UseSSL.h | 0 dbms/{src => }/IO/VarInt.h | 0 dbms/{src => }/IO/WriteBuffer.h | 0 dbms/{src => }/IO/WriteBufferAIO.cpp | 0 dbms/{src => }/IO/WriteBufferAIO.h | 0 dbms/{src => }/IO/WriteBufferFromArena.h | 0 dbms/{src => }/IO/WriteBufferFromFile.cpp | 0 dbms/{src => }/IO/WriteBufferFromFile.h | 0 dbms/{src => }/IO/WriteBufferFromFileBase.cpp | 0 dbms/{src => }/IO/WriteBufferFromFileBase.h | 0 .../IO/WriteBufferFromFileDescriptor.cpp | 0 .../IO/WriteBufferFromFileDescriptor.h | 0 ...fferFromFileDescriptorDiscardOnFailure.cpp | 0 ...BufferFromFileDescriptorDiscardOnFailure.h | 0 dbms/{src => }/IO/WriteBufferFromHDFS.cpp | 0 dbms/{src => }/IO/WriteBufferFromHDFS.h | 0 dbms/{src => }/IO/WriteBufferFromHTTP.cpp | 0 dbms/{src => }/IO/WriteBufferFromHTTP.h | 0 .../IO/WriteBufferFromHTTPServerResponse.cpp | 0 .../IO/WriteBufferFromHTTPServerResponse.h | 0 dbms/{src => }/IO/WriteBufferFromOStream.cpp | 0 dbms/{src => }/IO/WriteBufferFromOStream.h | 0 .../IO/WriteBufferFromPocoSocket.cpp | 0 dbms/{src => }/IO/WriteBufferFromPocoSocket.h | 0 dbms/{src => }/IO/WriteBufferFromS3.cpp | 0 dbms/{src => }/IO/WriteBufferFromS3.h | 0 dbms/{src => }/IO/WriteBufferFromString.h | 0 .../IO/WriteBufferFromTemporaryFile.cpp | 0 .../IO/WriteBufferFromTemporaryFile.h | 0 dbms/{src => }/IO/WriteBufferFromVector.h | 0 dbms/{src => }/IO/WriteBufferValidUTF8.cpp | 0 dbms/{src => }/IO/WriteBufferValidUTF8.h | 0 dbms/{src => }/IO/WriteHelpers.cpp | 0 dbms/{src => }/IO/WriteHelpers.h | 0 dbms/{src => }/IO/WriteIntText.h | 0 .../{src => }/IO/ZlibDeflatingWriteBuffer.cpp | 0 dbms/{src => }/IO/ZlibDeflatingWriteBuffer.h | 0 dbms/{src => }/IO/ZlibInflatingReadBuffer.cpp | 0 dbms/{src => }/IO/ZlibInflatingReadBuffer.h | 0 dbms/{src => }/IO/copyData.cpp | 0 dbms/{src => }/IO/copyData.h | 0 .../IO/createReadBufferFromFileBase.cpp | 0 .../IO/createReadBufferFromFileBase.h | 0 .../IO/createWriteBufferFromFileBase.cpp | 0 .../IO/createWriteBufferFromFileBase.h | 0 dbms/{src => }/IO/parseDateTimeBestEffort.cpp | 0 dbms/{src => }/IO/parseDateTimeBestEffort.h | 0 dbms/{src => }/IO/readDecimalText.h | 0 dbms/{src => }/IO/readFloatText.cpp | 0 dbms/{src => }/IO/readFloatText.h | 0 dbms/{src => }/IO/tests/CMakeLists.txt | 0 dbms/{src => }/IO/tests/async_write.cpp | 0 .../gtest_DateTime64_parsing_and_writing.cpp | 0 .../IO/tests/gtest_DateTimeToString.cpp | 0 .../tests/gtest_aio_seek_back_after_eof.cpp | 0 dbms/{src => }/IO/tests/gtest_bit_io.cpp | 0 .../gtest_cascade_and_memory_write_buffer.cpp | 0 .../IO/tests/gtest_peekable_read_buffer.cpp | 0 dbms/{src => }/IO/tests/hashing_buffer.h | 0 .../IO/tests/hashing_read_buffer.cpp | 0 .../IO/tests/hashing_write_buffer.cpp | 0 dbms/{src => }/IO/tests/io_operators.cpp | 0 dbms/{src => }/IO/tests/limit_read_buffer.cpp | 0 .../IO/tests/limit_read_buffer.reference | 0 dbms/{src => }/IO/tests/limit_read_buffer.sh | 0 .../{src => }/IO/tests/limit_read_buffer2.cpp | 0 dbms/{src => }/IO/tests/mempbrk.cpp | 0 .../IO/tests/o_direct_and_dirty_pages.cpp | 0 .../IO/tests/parse_date_time_best_effort.cpp | 0 dbms/{src => }/IO/tests/parse_int_perf.cpp | 0 dbms/{src => }/IO/tests/parse_int_perf2.cpp | 0 dbms/{src => }/IO/tests/read_buffer.cpp | 0 dbms/{src => }/IO/tests/read_buffer_aio.cpp | 0 dbms/{src => }/IO/tests/read_buffer_perf.cpp | 0 .../IO/tests/read_escaped_string.cpp | 0 dbms/{src => }/IO/tests/read_float_perf.cpp | 0 dbms/{src => }/IO/tests/read_write_int.cpp | 0 dbms/{src => }/IO/tests/ryu_test.cpp | 0 dbms/{src => }/IO/tests/valid_utf8.cpp | 0 dbms/{src => }/IO/tests/valid_utf8_perf.cpp | 0 dbms/{src => }/IO/tests/var_uint.cpp | 0 dbms/{src => }/IO/tests/write_buffer.cpp | 0 dbms/{src => }/IO/tests/write_buffer_aio.cpp | 0 dbms/{src => }/IO/tests/write_buffer_perf.cpp | 0 dbms/{src => }/IO/tests/write_int.cpp | 0 dbms/{src => }/IO/tests/zlib_buffers.cpp | 0 dbms/{src => }/IO/tests/zlib_ng_bug.cpp | 0 .../Interpreters/ActionLocksManager.cpp | 0 .../Interpreters/ActionLocksManager.h | 0 .../{src => }/Interpreters/ActionsVisitor.cpp | 0 dbms/{src => }/Interpreters/ActionsVisitor.h | 0 .../Interpreters/AddDefaultDatabaseVisitor.h | 0 .../Interpreters/AggregateDescription.h | 0 .../Interpreters/AggregationCommon.h | 0 dbms/{src => }/Interpreters/Aggregator.cpp | 0 dbms/{src => }/Interpreters/Aggregator.h | 0 dbms/{src => }/Interpreters/Aliases.h | 0 dbms/{src => }/Interpreters/AnalyzedJoin.cpp | 0 dbms/{src => }/Interpreters/AnalyzedJoin.h | 0 .../Interpreters/ArrayJoinAction.cpp | 0 dbms/{src => }/Interpreters/ArrayJoinAction.h | 0 .../Interpreters/ArrayJoinedColumnsVisitor.h | 0 .../{src => }/Interpreters/AsteriskSemantic.h | 0 .../Interpreters/AsynchronousMetrics.cpp | 0 .../Interpreters/AsynchronousMetrics.h | 0 dbms/{src => }/Interpreters/BloomFilter.cpp | 0 dbms/{src => }/Interpreters/BloomFilter.h | 0 dbms/{src => }/Interpreters/BloomFilterHash.h | 0 dbms/{src => }/Interpreters/CMakeLists.txt | 0 .../{src => }/Interpreters/CancellationCode.h | 0 dbms/{src => }/Interpreters/CatBoostModel.cpp | 0 dbms/{src => }/Interpreters/CatBoostModel.h | 0 dbms/{src => }/Interpreters/ClientInfo.cpp | 0 dbms/{src => }/Interpreters/ClientInfo.h | 0 dbms/{src => }/Interpreters/Cluster.cpp | 0 dbms/{src => }/Interpreters/Cluster.h | 0 .../ClusterProxy/IStreamFactory.h | 0 .../ClusterProxy/SelectStreamFactory.cpp | 0 .../ClusterProxy/SelectStreamFactory.h | 0 .../ClusterProxy/executeQuery.cpp | 0 .../Interpreters/ClusterProxy/executeQuery.h | 0 .../Interpreters/CollectJoinOnKeysVisitor.cpp | 0 .../Interpreters/CollectJoinOnKeysVisitor.h | 0 .../Interpreters/ColumnNamesContext.cpp | 0 .../Interpreters/ColumnNamesContext.h | 0 dbms/{src => }/Interpreters/Context.cpp | 0 dbms/{src => }/Interpreters/Context.h | 0 .../Interpreters/CrossToInnerJoinVisitor.cpp | 0 .../Interpreters/CrossToInnerJoinVisitor.h | 0 dbms/{src => }/Interpreters/DDLWorker.cpp | 0 dbms/{src => }/Interpreters/DDLWorker.h | 0 .../Interpreters/DNSCacheUpdater.cpp | 0 dbms/{src => }/Interpreters/DNSCacheUpdater.h | 0 .../DatabaseAndTableWithAlias.cpp | 0 .../Interpreters/DatabaseAndTableWithAlias.h | 0 .../Interpreters/DatabaseCatalog.cpp | 0 dbms/{src => }/Interpreters/DatabaseCatalog.h | 0 .../Interpreters/EmbeddedDictionaries.cpp | 0 .../Interpreters/EmbeddedDictionaries.h | 0 .../ExecuteScalarSubqueriesVisitor.cpp | 0 .../ExecuteScalarSubqueriesVisitor.h | 0 .../Interpreters/ExpressionActions.cpp | 0 .../Interpreters/ExpressionActions.h | 0 .../Interpreters/ExpressionAnalyzer.cpp | 0 .../Interpreters/ExpressionAnalyzer.h | 0 dbms/{src => }/Interpreters/ExpressionJIT.cpp | 0 dbms/{src => }/Interpreters/ExpressionJIT.h | 0 .../ExternalDictionariesLoader.cpp | 0 .../Interpreters/ExternalDictionariesLoader.h | 0 .../{src => }/Interpreters/ExternalLoader.cpp | 0 dbms/{src => }/Interpreters/ExternalLoader.h | 0 ...ExternalLoaderDatabaseConfigRepository.cpp | 0 .../ExternalLoaderDatabaseConfigRepository.h | 0 .../ExternalLoaderTempConfigRepository.cpp | 0 .../ExternalLoaderTempConfigRepository.h | 0 .../ExternalLoaderXMLConfigRepository.cpp | 0 .../ExternalLoaderXMLConfigRepository.h | 0 .../Interpreters/ExternalModelsLoader.cpp | 0 .../Interpreters/ExternalModelsLoader.h | 0 .../ExtractExpressionInfoVisitor.cpp | 0 .../ExtractExpressionInfoVisitor.h | 0 dbms/{src => }/Interpreters/FillingRow.cpp | 0 dbms/{src => }/Interpreters/FillingRow.h | 0 .../Interpreters/GetAggregatesVisitor.h | 0 .../Interpreters/GlobalSubqueriesVisitor.h | 0 .../Interpreters/IExternalLoadable.cpp | 0 .../Interpreters/IExternalLoadable.h | 0 .../IExternalLoaderConfigRepository.h | 0 dbms/{src => }/Interpreters/IInterpreter.h | 0 dbms/{src => }/Interpreters/IJoin.h | 0 .../Interpreters/IdentifierSemantic.cpp | 0 .../Interpreters/IdentifierSemantic.h | 0 .../Interpreters/InDepthNodeVisitor.h | 0 .../InJoinSubqueriesPreprocessor.cpp | 0 .../InJoinSubqueriesPreprocessor.h | 0 .../Interpreters/InternalTextLogsQueue.cpp | 0 .../Interpreters/InternalTextLogsQueue.h | 0 .../Interpreters/InterpreterAlterQuery.cpp | 0 .../Interpreters/InterpreterAlterQuery.h | 0 .../Interpreters/InterpreterCheckQuery.cpp | 0 .../Interpreters/InterpreterCheckQuery.h | 0 .../Interpreters/InterpreterCreateQuery.cpp | 0 .../Interpreters/InterpreterCreateQuery.h | 0 .../InterpreterCreateQuotaQuery.cpp | 0 .../InterpreterCreateQuotaQuery.h | 0 .../InterpreterCreateRoleQuery.cpp | 0 .../Interpreters/InterpreterCreateRoleQuery.h | 0 .../InterpreterCreateRowPolicyQuery.cpp | 0 .../InterpreterCreateRowPolicyQuery.h | 0 .../InterpreterCreateSettingsProfileQuery.cpp | 0 .../InterpreterCreateSettingsProfileQuery.h | 0 .../InterpreterCreateUserQuery.cpp | 0 .../Interpreters/InterpreterCreateUserQuery.h | 0 .../Interpreters/InterpreterDescribeQuery.cpp | 0 .../Interpreters/InterpreterDescribeQuery.h | 0 .../InterpreterDropAccessEntityQuery.cpp | 0 .../InterpreterDropAccessEntityQuery.h | 0 .../Interpreters/InterpreterDropQuery.cpp | 0 .../Interpreters/InterpreterDropQuery.h | 0 .../Interpreters/InterpreterExistsQuery.cpp | 0 .../Interpreters/InterpreterExistsQuery.h | 0 .../Interpreters/InterpreterExplainQuery.cpp | 0 .../Interpreters/InterpreterExplainQuery.h | 0 .../Interpreters/InterpreterFactory.cpp | 0 .../Interpreters/InterpreterFactory.h | 0 .../Interpreters/InterpreterGrantQuery.cpp | 0 .../Interpreters/InterpreterGrantQuery.h | 0 .../Interpreters/InterpreterInsertQuery.cpp | 0 .../Interpreters/InterpreterInsertQuery.h | 0 .../InterpreterKillQueryQuery.cpp | 0 .../Interpreters/InterpreterKillQueryQuery.h | 0 .../Interpreters/InterpreterOptimizeQuery.cpp | 0 .../Interpreters/InterpreterOptimizeQuery.h | 0 .../Interpreters/InterpreterRenameQuery.cpp | 0 .../Interpreters/InterpreterRenameQuery.h | 0 .../Interpreters/InterpreterSelectQuery.cpp | 0 .../Interpreters/InterpreterSelectQuery.h | 0 .../InterpreterSelectWithUnionQuery.cpp | 0 .../InterpreterSelectWithUnionQuery.h | 0 .../Interpreters/InterpreterSetQuery.cpp | 0 .../Interpreters/InterpreterSetQuery.h | 0 .../Interpreters/InterpreterSetRoleQuery.cpp | 0 .../Interpreters/InterpreterSetRoleQuery.h | 0 ...InterpreterShowCreateAccessEntityQuery.cpp | 0 .../InterpreterShowCreateAccessEntityQuery.h | 0 .../InterpreterShowCreateQuery.cpp | 0 .../Interpreters/InterpreterShowCreateQuery.h | 0 .../InterpreterShowGrantsQuery.cpp | 0 .../Interpreters/InterpreterShowGrantsQuery.h | 0 .../InterpreterShowProcesslistQuery.cpp | 0 .../InterpreterShowProcesslistQuery.h | 0 .../InterpreterShowQuotasQuery.cpp | 0 .../Interpreters/InterpreterShowQuotasQuery.h | 0 .../InterpreterShowRowPoliciesQuery.cpp | 0 .../InterpreterShowRowPoliciesQuery.h | 0 .../InterpreterShowTablesQuery.cpp | 0 .../Interpreters/InterpreterShowTablesQuery.h | 0 .../Interpreters/InterpreterSystemQuery.cpp | 0 .../Interpreters/InterpreterSystemQuery.h | 0 .../Interpreters/InterpreterUseQuery.cpp | 0 .../Interpreters/InterpreterUseQuery.h | 0 .../Interpreters/InterpreterWatchQuery.cpp | 0 .../Interpreters/InterpreterWatchQuery.h | 0 .../Interpreters/InterserverIOHandler.h | 0 dbms/{src => }/Interpreters/Join.cpp | 0 dbms/{src => }/Interpreters/Join.h | 0 dbms/{src => }/Interpreters/JoinSwitcher.cpp | 0 dbms/{src => }/Interpreters/JoinSwitcher.h | 0 .../JoinToSubqueryTransformVisitor.cpp | 0 .../JoinToSubqueryTransformVisitor.h | 0 dbms/{src => }/Interpreters/JoinedTables.cpp | 0 dbms/{src => }/Interpreters/JoinedTables.h | 0 .../LogicalExpressionsOptimizer.cpp | 0 .../LogicalExpressionsOptimizer.h | 0 .../MarkTableIdentifiersVisitor.cpp | 0 .../MarkTableIdentifiersVisitor.h | 0 dbms/{src => }/Interpreters/MergeJoin.cpp | 0 dbms/{src => }/Interpreters/MergeJoin.h | 0 dbms/{src => }/Interpreters/MetricLog.cpp | 0 dbms/{src => }/Interpreters/MetricLog.h | 0 .../Interpreters/MutationsInterpreter.cpp | 0 .../Interpreters/MutationsInterpreter.h | 0 dbms/{src => }/Interpreters/NullableUtils.cpp | 0 dbms/{src => }/Interpreters/NullableUtils.h | 0 .../Interpreters/OptimizeIfChains.cpp | 0 .../{src => }/Interpreters/OptimizeIfChains.h | 0 ...OptimizeIfWithConstantConditionVisitor.cpp | 0 .../OptimizeIfWithConstantConditionVisitor.h | 0 dbms/{src => }/Interpreters/PartLog.cpp | 0 dbms/{src => }/Interpreters/PartLog.h | 0 .../PredicateExpressionsOptimizer.cpp | 0 .../PredicateExpressionsOptimizer.h | 0 .../Interpreters/PredicateRewriteVisitor.cpp | 0 .../Interpreters/PredicateRewriteVisitor.h | 0 dbms/{src => }/Interpreters/PreparedSets.h | 0 dbms/{src => }/Interpreters/ProcessList.cpp | 0 dbms/{src => }/Interpreters/ProcessList.h | 0 .../Interpreters/ProfileEventsExt.cpp | 0 .../{src => }/Interpreters/ProfileEventsExt.h | 0 .../Interpreters/QueryAliasesVisitor.cpp | 0 .../Interpreters/QueryAliasesVisitor.h | 0 dbms/{src => }/Interpreters/QueryLog.cpp | 0 dbms/{src => }/Interpreters/QueryLog.h | 0 .../Interpreters/QueryNormalizer.cpp | 0 dbms/{src => }/Interpreters/QueryNormalizer.h | 0 dbms/{src => }/Interpreters/QueryPriorities.h | 0 .../{src => }/Interpreters/QueryThreadLog.cpp | 0 dbms/{src => }/Interpreters/QueryThreadLog.h | 0 .../ReplaceQueryParameterVisitor.cpp | 0 .../ReplaceQueryParameterVisitor.h | 0 .../RequiredSourceColumnsVisitor.cpp | 0 .../RequiredSourceColumnsVisitor.h | 0 dbms/{src => }/Interpreters/RowRefs.cpp | 0 dbms/{src => }/Interpreters/RowRefs.h | 0 .../Interpreters/SelectQueryOptions.h | 0 dbms/{src => }/Interpreters/Set.cpp | 0 dbms/{src => }/Interpreters/Set.h | 0 dbms/{src => }/Interpreters/SetVariants.cpp | 0 dbms/{src => }/Interpreters/SetVariants.h | 0 dbms/{src => }/Interpreters/StorageID.cpp | 0 dbms/{src => }/Interpreters/StorageID.h | 0 .../{src => }/Interpreters/SubqueryForSet.cpp | 0 dbms/{src => }/Interpreters/SubqueryForSet.h | 0 .../{src => }/Interpreters/SyntaxAnalyzer.cpp | 0 dbms/{src => }/Interpreters/SyntaxAnalyzer.h | 0 dbms/{src => }/Interpreters/SystemLog.cpp | 0 dbms/{src => }/Interpreters/SystemLog.h | 0 dbms/{src => }/Interpreters/TablesStatus.cpp | 0 dbms/{src => }/Interpreters/TablesStatus.h | 0 dbms/{src => }/Interpreters/TextLog.cpp | 0 dbms/{src => }/Interpreters/TextLog.h | 0 .../Interpreters/ThreadStatusExt.cpp | 0 dbms/{src => }/Interpreters/TraceLog.cpp | 0 dbms/{src => }/Interpreters/TraceLog.h | 0 .../TranslateQualifiedNamesVisitor.cpp | 0 .../TranslateQualifiedNamesVisitor.h | 0 .../Interpreters/addMissingDefaults.cpp | 0 .../Interpreters/addMissingDefaults.h | 0 .../Interpreters/addTypeConversionToAST.cpp | 0 .../Interpreters/addTypeConversionToAST.h | 0 dbms/{src => }/Interpreters/asof.h | 0 dbms/{src => }/Interpreters/castColumn.cpp | 0 dbms/{src => }/Interpreters/castColumn.h | 0 .../Interpreters/convertFieldToType.cpp | 0 .../Interpreters/convertFieldToType.h | 0 .../Interpreters/createBlockSelector.cpp | 0 .../Interpreters/createBlockSelector.h | 0 .../evaluateConstantExpression.cpp | 0 .../Interpreters/evaluateConstantExpression.h | 0 dbms/{src => }/Interpreters/executeQuery.cpp | 0 dbms/{src => }/Interpreters/executeQuery.h | 0 .../{src => }/Interpreters/getClusterName.cpp | 0 dbms/{src => }/Interpreters/getClusterName.h | 0 .../Interpreters/getTableExpressions.cpp | 0 .../Interpreters/getTableExpressions.h | 0 .../Interpreters/inplaceBlockConversions.cpp | 0 .../Interpreters/inplaceBlockConversions.h | 0 .../Interpreters/interpretSubquery.cpp | 0 .../Interpreters/interpretSubquery.h | 0 dbms/{src => }/Interpreters/joinDispatch.h | 0 dbms/{src => }/Interpreters/join_common.cpp | 0 dbms/{src => }/Interpreters/join_common.h | 0 dbms/{src => }/Interpreters/loadMetadata.cpp | 0 dbms/{src => }/Interpreters/loadMetadata.h | 0 dbms/{src => }/Interpreters/misc.h | 0 dbms/{src => }/Interpreters/sortBlock.cpp | 0 dbms/{src => }/Interpreters/sortBlock.h | 0 .../Interpreters/tests/CMakeLists.txt | 0 .../Interpreters/tests/aggregate.cpp | 0 .../Interpreters/tests/create_query.cpp | 0 .../Interpreters/tests/expression.cpp | 0 .../tests/expression_analyzer.cpp | 0 .../tests/gtest_cycle_aliases.cpp | 0 .../tests/gtest_merge_tree_set_index.cpp | 0 .../{src => }/Interpreters/tests/hash_map.cpp | 0 .../Interpreters/tests/hash_map3.cpp | 0 .../Interpreters/tests/hash_map_lookup.cpp | 0 .../Interpreters/tests/hash_map_string.cpp | 0 .../Interpreters/tests/hash_map_string_2.cpp | 0 .../Interpreters/tests/hash_map_string_3.cpp | 0 .../tests/hash_map_string_small.cpp | 0 .../tests/in_join_subqueries_preprocessor.cpp | 0 .../Interpreters/tests/internal_iotop.cpp | 0 .../tests/logical_expressions_optimizer.cpp | 0 .../Interpreters/tests/select_query.cpp | 0 .../Interpreters/tests/string_hash_map.cpp | 0 .../Interpreters/tests/two_level_hash_map.cpp | 0 dbms/{src => }/Interpreters/tests/users.cpp | 0 dbms/{src => }/NOTICE | 0 dbms/{src => }/Parsers/ASTAlterQuery.cpp | 0 dbms/{src => }/Parsers/ASTAlterQuery.h | 0 dbms/{src => }/Parsers/ASTAssignment.h | 0 dbms/{src => }/Parsers/ASTAsterisk.cpp | 0 dbms/{src => }/Parsers/ASTAsterisk.h | 0 dbms/{src => }/Parsers/ASTCheckQuery.h | 0 .../Parsers/ASTColumnDeclaration.cpp | 0 dbms/{src => }/Parsers/ASTColumnDeclaration.h | 0 dbms/{src => }/Parsers/ASTColumnsMatcher.cpp | 0 dbms/{src => }/Parsers/ASTColumnsMatcher.h | 0 .../Parsers/ASTConstraintDeclaration.cpp | 0 .../Parsers/ASTConstraintDeclaration.h | 0 dbms/{src => }/Parsers/ASTCreateQuery.cpp | 0 dbms/{src => }/Parsers/ASTCreateQuery.h | 0 .../{src => }/Parsers/ASTCreateQuotaQuery.cpp | 0 dbms/{src => }/Parsers/ASTCreateQuotaQuery.h | 0 dbms/{src => }/Parsers/ASTCreateRoleQuery.cpp | 0 dbms/{src => }/Parsers/ASTCreateRoleQuery.h | 0 .../Parsers/ASTCreateRowPolicyQuery.cpp | 0 .../Parsers/ASTCreateRowPolicyQuery.h | 0 .../Parsers/ASTCreateSettingsProfileQuery.cpp | 0 .../Parsers/ASTCreateSettingsProfileQuery.h | 0 dbms/{src => }/Parsers/ASTCreateUserQuery.cpp | 0 dbms/{src => }/Parsers/ASTCreateUserQuery.h | 0 dbms/{src => }/Parsers/ASTDictionary.cpp | 0 dbms/{src => }/Parsers/ASTDictionary.h | 0 .../ASTDictionaryAttributeDeclaration.cpp | 0 .../ASTDictionaryAttributeDeclaration.h | 0 .../Parsers/ASTDropAccessEntityQuery.cpp | 0 .../Parsers/ASTDropAccessEntityQuery.h | 0 dbms/{src => }/Parsers/ASTDropQuery.cpp | 0 dbms/{src => }/Parsers/ASTDropQuery.h | 0 dbms/{src => }/Parsers/ASTEnumElement.h | 0 dbms/{src => }/Parsers/ASTExplainQuery.h | 0 dbms/{src => }/Parsers/ASTExpressionList.cpp | 0 dbms/{src => }/Parsers/ASTExpressionList.h | 0 dbms/{src => }/Parsers/ASTExtendedRoleSet.cpp | 0 dbms/{src => }/Parsers/ASTExtendedRoleSet.h | 0 dbms/{src => }/Parsers/ASTFunction.cpp | 0 dbms/{src => }/Parsers/ASTFunction.h | 0 .../ASTFunctionWithKeyValueArguments.cpp | 0 .../ASTFunctionWithKeyValueArguments.h | 0 dbms/{src => }/Parsers/ASTGrantQuery.cpp | 0 dbms/{src => }/Parsers/ASTGrantQuery.h | 0 dbms/{src => }/Parsers/ASTIdentifier.cpp | 0 dbms/{src => }/Parsers/ASTIdentifier.h | 0 dbms/{src => }/Parsers/ASTIndexDeclaration.h | 0 dbms/{src => }/Parsers/ASTInsertQuery.cpp | 0 dbms/{src => }/Parsers/ASTInsertQuery.h | 0 dbms/{src => }/Parsers/ASTKillQueryQuery.cpp | 0 dbms/{src => }/Parsers/ASTKillQueryQuery.h | 0 dbms/{src => }/Parsers/ASTLiteral.cpp | 0 dbms/{src => }/Parsers/ASTLiteral.h | 0 dbms/{src => }/Parsers/ASTNameTypePair.h | 0 dbms/{src => }/Parsers/ASTOptimizeQuery.cpp | 0 dbms/{src => }/Parsers/ASTOptimizeQuery.h | 0 dbms/{src => }/Parsers/ASTOrderByElement.cpp | 0 dbms/{src => }/Parsers/ASTOrderByElement.h | 0 dbms/{src => }/Parsers/ASTPartition.cpp | 0 dbms/{src => }/Parsers/ASTPartition.h | 0 .../Parsers/ASTQualifiedAsterisk.cpp | 0 dbms/{src => }/Parsers/ASTQualifiedAsterisk.h | 0 dbms/{src => }/Parsers/ASTQueryParameter.cpp | 0 dbms/{src => }/Parsers/ASTQueryParameter.h | 0 .../Parsers/ASTQueryWithOnCluster.cpp | 0 .../{src => }/Parsers/ASTQueryWithOnCluster.h | 0 dbms/{src => }/Parsers/ASTQueryWithOutput.cpp | 0 dbms/{src => }/Parsers/ASTQueryWithOutput.h | 0 .../Parsers/ASTQueryWithTableAndOutput.cpp | 0 .../Parsers/ASTQueryWithTableAndOutput.h | 0 dbms/{src => }/Parsers/ASTRenameQuery.h | 0 dbms/{src => }/Parsers/ASTSampleRatio.cpp | 0 dbms/{src => }/Parsers/ASTSampleRatio.h | 0 dbms/{src => }/Parsers/ASTSelectQuery.cpp | 0 dbms/{src => }/Parsers/ASTSelectQuery.h | 0 .../Parsers/ASTSelectWithUnionQuery.cpp | 0 .../Parsers/ASTSelectWithUnionQuery.h | 0 dbms/{src => }/Parsers/ASTSetQuery.h | 0 dbms/{src => }/Parsers/ASTSetRoleQuery.cpp | 0 dbms/{src => }/Parsers/ASTSetRoleQuery.h | 0 .../Parsers/ASTSettingsProfileElement.cpp | 0 .../Parsers/ASTSettingsProfileElement.h | 0 .../ASTShowCreateAccessEntityQuery.cpp | 0 .../Parsers/ASTShowCreateAccessEntityQuery.h | 0 dbms/{src => }/Parsers/ASTShowGrantsQuery.cpp | 0 dbms/{src => }/Parsers/ASTShowGrantsQuery.h | 0 .../Parsers/ASTShowProcesslistQuery.h | 0 dbms/{src => }/Parsers/ASTShowQuotasQuery.cpp | 0 dbms/{src => }/Parsers/ASTShowQuotasQuery.h | 0 .../Parsers/ASTShowRowPoliciesQuery.cpp | 0 .../Parsers/ASTShowRowPoliciesQuery.h | 0 dbms/{src => }/Parsers/ASTShowTablesQuery.cpp | 0 dbms/{src => }/Parsers/ASTShowTablesQuery.h | 0 dbms/{src => }/Parsers/ASTSubquery.cpp | 0 dbms/{src => }/Parsers/ASTSubquery.h | 0 dbms/{src => }/Parsers/ASTSystemQuery.cpp | 0 dbms/{src => }/Parsers/ASTSystemQuery.h | 0 dbms/{src => }/Parsers/ASTTTLElement.cpp | 0 dbms/{src => }/Parsers/ASTTTLElement.h | 0 .../Parsers/ASTTablesInSelectQuery.cpp | 0 .../Parsers/ASTTablesInSelectQuery.h | 0 dbms/{src => }/Parsers/ASTUseQuery.h | 0 dbms/{src => }/Parsers/ASTWatchQuery.h | 0 dbms/{src => }/Parsers/ASTWithAlias.cpp | 0 dbms/{src => }/Parsers/ASTWithAlias.h | 0 dbms/{src => }/Parsers/CMakeLists.txt | 2 +- dbms/{src => }/Parsers/CommonParsers.cpp | 0 dbms/{src => }/Parsers/CommonParsers.h | 0 dbms/{src => }/Parsers/DumpASTNode.h | 0 .../Parsers/ExpressionElementParsers.cpp | 0 .../Parsers/ExpressionElementParsers.h | 0 .../Parsers/ExpressionListParsers.cpp | 0 .../{src => }/Parsers/ExpressionListParsers.h | 0 dbms/{src => }/Parsers/IAST.cpp | 0 dbms/{src => }/Parsers/IAST.h | 0 dbms/{src => }/Parsers/IAST_fwd.h | 0 dbms/{src => }/Parsers/IParser.h | 0 dbms/{src => }/Parsers/IParserBase.cpp | 0 dbms/{src => }/Parsers/IParserBase.h | 0 .../Parsers/IdentifierQuotingStyle.h | 0 dbms/{src => }/Parsers/Lexer.cpp | 0 dbms/{src => }/Parsers/Lexer.h | 0 dbms/{src => }/Parsers/ParserAlterQuery.cpp | 0 dbms/{src => }/Parsers/ParserAlterQuery.h | 0 dbms/{src => }/Parsers/ParserCase.cpp | 0 dbms/{src => }/Parsers/ParserCase.h | 0 dbms/{src => }/Parsers/ParserCheckQuery.cpp | 0 dbms/{src => }/Parsers/ParserCheckQuery.h | 0 dbms/{src => }/Parsers/ParserCreateQuery.cpp | 0 dbms/{src => }/Parsers/ParserCreateQuery.h | 0 .../Parsers/ParserCreateQuotaQuery.cpp | 0 .../Parsers/ParserCreateQuotaQuery.h | 0 .../Parsers/ParserCreateRoleQuery.cpp | 0 .../{src => }/Parsers/ParserCreateRoleQuery.h | 0 .../Parsers/ParserCreateRowPolicyQuery.cpp | 0 .../Parsers/ParserCreateRowPolicyQuery.h | 0 .../ParserCreateSettingsProfileQuery.cpp | 0 .../ParserCreateSettingsProfileQuery.h | 0 .../Parsers/ParserCreateUserQuery.cpp | 0 .../{src => }/Parsers/ParserCreateUserQuery.h | 0 .../Parsers/ParserDescribeTableQuery.cpp | 0 .../Parsers/ParserDescribeTableQuery.h | 0 dbms/{src => }/Parsers/ParserDictionary.cpp | 0 dbms/{src => }/Parsers/ParserDictionary.h | 0 .../ParserDictionaryAttributeDeclaration.cpp | 0 .../ParserDictionaryAttributeDeclaration.h | 0 .../Parsers/ParserDropAccessEntityQuery.cpp | 0 .../Parsers/ParserDropAccessEntityQuery.h | 0 dbms/{src => }/Parsers/ParserDropQuery.cpp | 0 dbms/{src => }/Parsers/ParserDropQuery.h | 0 .../Parsers/ParserExtendedRoleSet.cpp | 0 .../{src => }/Parsers/ParserExtendedRoleSet.h | 0 dbms/{src => }/Parsers/ParserGrantQuery.cpp | 0 dbms/{src => }/Parsers/ParserGrantQuery.h | 0 dbms/{src => }/Parsers/ParserInsertQuery.cpp | 0 dbms/{src => }/Parsers/ParserInsertQuery.h | 0 .../Parsers/ParserKillQueryQuery.cpp | 0 dbms/{src => }/Parsers/ParserKillQueryQuery.h | 0 .../{src => }/Parsers/ParserOptimizeQuery.cpp | 0 dbms/{src => }/Parsers/ParserOptimizeQuery.h | 0 dbms/{src => }/Parsers/ParserPartition.cpp | 0 dbms/{src => }/Parsers/ParserPartition.h | 0 dbms/{src => }/Parsers/ParserQuery.cpp | 0 dbms/{src => }/Parsers/ParserQuery.h | 0 .../Parsers/ParserQueryWithOutput.cpp | 0 .../{src => }/Parsers/ParserQueryWithOutput.h | 0 dbms/{src => }/Parsers/ParserRenameQuery.cpp | 0 dbms/{src => }/Parsers/ParserRenameQuery.h | 0 dbms/{src => }/Parsers/ParserSampleRatio.cpp | 0 dbms/{src => }/Parsers/ParserSampleRatio.h | 0 dbms/{src => }/Parsers/ParserSelectQuery.cpp | 0 dbms/{src => }/Parsers/ParserSelectQuery.h | 0 .../Parsers/ParserSelectWithUnionQuery.cpp | 0 .../Parsers/ParserSelectWithUnionQuery.h | 0 dbms/{src => }/Parsers/ParserSetQuery.cpp | 0 dbms/{src => }/Parsers/ParserSetQuery.h | 0 dbms/{src => }/Parsers/ParserSetRoleQuery.cpp | 0 dbms/{src => }/Parsers/ParserSetRoleQuery.h | 0 .../Parsers/ParserSettingsProfileElement.cpp | 0 .../Parsers/ParserSettingsProfileElement.h | 0 .../ParserShowCreateAccessEntityQuery.cpp | 0 .../ParserShowCreateAccessEntityQuery.h | 0 .../Parsers/ParserShowGrantsQuery.cpp | 0 .../{src => }/Parsers/ParserShowGrantsQuery.h | 0 .../Parsers/ParserShowProcesslistQuery.h | 0 .../Parsers/ParserShowQuotasQuery.cpp | 0 .../{src => }/Parsers/ParserShowQuotasQuery.h | 0 .../Parsers/ParserShowRowPoliciesQuery.cpp | 0 .../Parsers/ParserShowRowPoliciesQuery.h | 0 .../Parsers/ParserShowTablesQuery.cpp | 0 .../{src => }/Parsers/ParserShowTablesQuery.h | 0 dbms/{src => }/Parsers/ParserSystemQuery.cpp | 0 dbms/{src => }/Parsers/ParserSystemQuery.h | 0 .../Parsers/ParserTablePropertiesQuery.cpp | 0 .../Parsers/ParserTablePropertiesQuery.h | 0 .../Parsers/ParserTablesInSelectQuery.cpp | 0 .../Parsers/ParserTablesInSelectQuery.h | 0 .../Parsers/ParserUnionQueryElement.cpp | 0 .../Parsers/ParserUnionQueryElement.h | 0 dbms/{src => }/Parsers/ParserUseQuery.cpp | 0 dbms/{src => }/Parsers/ParserUseQuery.h | 0 dbms/{src => }/Parsers/ParserWatchQuery.cpp | 0 dbms/{src => }/Parsers/ParserWatchQuery.h | 0 dbms/{src => }/Parsers/StringRange.h | 0 .../Parsers/TablePropertiesQueriesASTs.h | 0 dbms/{src => }/Parsers/TokenIterator.cpp | 0 dbms/{src => }/Parsers/TokenIterator.h | 0 dbms/{src => }/Parsers/formatAST.cpp | 0 dbms/{src => }/Parsers/formatAST.h | 0 .../Parsers/iostream_debug_helpers.cpp | 0 .../Parsers/iostream_debug_helpers.h | 0 .../Parsers/parseDatabaseAndTableName.cpp | 0 .../Parsers/parseDatabaseAndTableName.h | 0 .../parseIdentifierOrStringLiteral.cpp | 0 .../Parsers/parseIdentifierOrStringLiteral.h | 0 dbms/{src => }/Parsers/parseIntervalKind.cpp | 0 dbms/{src => }/Parsers/parseIntervalKind.h | 0 dbms/{src => }/Parsers/parseQuery.cpp | 0 dbms/{src => }/Parsers/parseQuery.h | 0 dbms/{src => }/Parsers/parseUserName.cpp | 0 dbms/{src => }/Parsers/parseUserName.h | 0 dbms/{src => }/Parsers/queryToString.cpp | 0 dbms/{src => }/Parsers/queryToString.h | 0 dbms/{src => }/Parsers/tests/CMakeLists.txt | 0 .../{src => }/Parsers/tests/create_parser.cpp | 0 .../Parsers/tests/gtest_dictionary_parser.cpp | 0 dbms/{src => }/Parsers/tests/lexer.cpp | 2 +- .../{src => }/Parsers/tests/select_parser.cpp | 0 dbms/{src => }/Processors/CMakeLists.txt | 0 dbms/{src => }/Processors/Chunk.cpp | 0 dbms/{src => }/Processors/Chunk.h | 0 dbms/{src => }/Processors/ConcatProcessor.cpp | 0 dbms/{src => }/Processors/ConcatProcessor.h | 0 .../Processors/DelayedPortsProcessor.cpp | 0 .../Processors/DelayedPortsProcessor.h | 0 .../Executors/ParallelPipelineExecutor.cpp | 0 .../Executors/ParallelPipelineExecutor.h | 0 .../Processors/Executors/PipelineExecutor.cpp | 0 .../Processors/Executors/PipelineExecutor.h | 0 .../Executors/SequentialPipelineExecutor.cpp | 0 .../Executors/SequentialPipelineExecutor.h | 0 .../Processors/Executors/ThreadsQueue.h | 0 .../TreeExecutorBlockInputStream.cpp | 0 .../Executors/TreeExecutorBlockInputStream.h | 0 .../{src => }/Processors/Executors/traverse.h | 0 dbms/{src => }/Processors/ForkProcessor.cpp | 0 dbms/{src => }/Processors/ForkProcessor.h | 0 .../Processors/Formats/IInputFormat.cpp | 0 .../Processors/Formats/IInputFormat.h | 0 .../Processors/Formats/IOutputFormat.cpp | 0 .../Processors/Formats/IOutputFormat.h | 0 .../Processors/Formats/IRowInputFormat.cpp | 0 .../Processors/Formats/IRowInputFormat.h | 0 .../Processors/Formats/IRowOutputFormat.cpp | 0 .../Processors/Formats/IRowOutputFormat.h | 0 .../Formats/Impl/ArrowColumnToCHColumn.cpp | 0 .../Formats/Impl/ArrowColumnToCHColumn.h | 0 .../Formats/Impl/AvroRowInputFormat.cpp | 0 .../Formats/Impl/AvroRowInputFormat.h | 0 .../Formats/Impl/AvroRowOutputFormat.cpp | 0 .../Formats/Impl/AvroRowOutputFormat.h | 0 .../Formats/Impl/BinaryRowInputFormat.cpp | 0 .../Formats/Impl/BinaryRowInputFormat.h | 0 .../Formats/Impl/BinaryRowOutputFormat.cpp | 0 .../Formats/Impl/BinaryRowOutputFormat.h | 0 .../Processors/Formats/Impl/CMakeLists.txt | 0 .../Formats/Impl/CSVRowInputFormat.cpp | 0 .../Formats/Impl/CSVRowInputFormat.h | 0 .../Formats/Impl/CSVRowOutputFormat.cpp | 0 .../Formats/Impl/CSVRowOutputFormat.h | 0 .../Formats/Impl/CapnProtoRowInputFormat.cpp | 0 .../Formats/Impl/CapnProtoRowInputFormat.h | 0 .../Impl/ConstantExpressionTemplate.cpp | 0 .../Formats/Impl/ConstantExpressionTemplate.h | 0 .../Impl/JSONCompactEachRowRowInputFormat.cpp | 0 .../Impl/JSONCompactEachRowRowInputFormat.h | 0 .../JSONCompactEachRowRowOutputFormat.cpp | 0 .../Impl/JSONCompactEachRowRowOutputFormat.h | 0 .../Impl/JSONCompactRowOutputFormat.cpp | 0 .../Formats/Impl/JSONCompactRowOutputFormat.h | 0 .../Impl/JSONEachRowRowInputFormat.cpp | 0 .../Formats/Impl/JSONEachRowRowInputFormat.h | 0 .../Impl/JSONEachRowRowOutputFormat.cpp | 0 .../Formats/Impl/JSONEachRowRowOutputFormat.h | 0 ...JSONEachRowWithProgressRowOutputFormat.cpp | 0 .../JSONEachRowWithProgressRowOutputFormat.h | 0 .../Formats/Impl/JSONRowOutputFormat.cpp | 0 .../Formats/Impl/JSONRowOutputFormat.h | 0 .../Formats/Impl/MySQLOutputFormat.cpp | 0 .../Formats/Impl/MySQLOutputFormat.h | 0 .../Processors/Formats/Impl/NativeFormat.cpp | 0 .../Processors/Formats/Impl/NullFormat.cpp | 0 .../Impl/ODBCDriver2BlockOutputFormat.cpp | 0 .../Impl/ODBCDriver2BlockOutputFormat.h | 0 .../Impl/ODBCDriverBlockOutputFormat.cpp | 0 .../Impl/ODBCDriverBlockOutputFormat.h | 0 .../Formats/Impl/ORCBlockInputFormat.cpp | 0 .../Formats/Impl/ORCBlockInputFormat.h | 0 .../Formats/Impl/ParquetBlockInputFormat.cpp | 0 .../Formats/Impl/ParquetBlockInputFormat.h | 0 .../Formats/Impl/ParquetBlockOutputFormat.cpp | 0 .../Formats/Impl/ParquetBlockOutputFormat.h | 0 .../Formats/Impl/PrettyBlockOutputFormat.cpp | 0 .../Formats/Impl/PrettyBlockOutputFormat.h | 0 .../Impl/PrettyCompactBlockOutputFormat.cpp | 0 .../Impl/PrettyCompactBlockOutputFormat.h | 0 .../Impl/PrettySpaceBlockOutputFormat.cpp | 0 .../Impl/PrettySpaceBlockOutputFormat.h | 0 .../Formats/Impl/ProtobufRowInputFormat.cpp | 0 .../Formats/Impl/ProtobufRowInputFormat.h | 0 .../Formats/Impl/ProtobufRowOutputFormat.cpp | 0 .../Formats/Impl/ProtobufRowOutputFormat.h | 0 .../Formats/Impl/RegexpRowInputFormat.cpp | 0 .../Formats/Impl/RegexpRowInputFormat.h | 0 .../Formats/Impl/TSKVRowInputFormat.cpp | 0 .../Formats/Impl/TSKVRowInputFormat.h | 0 .../Formats/Impl/TSKVRowOutputFormat.cpp | 0 .../Formats/Impl/TSKVRowOutputFormat.h | 0 .../Impl/TabSeparatedRawRowOutputFormat.h | 0 .../Impl/TabSeparatedRowInputFormat.cpp | 0 .../Formats/Impl/TabSeparatedRowInputFormat.h | 0 .../Impl/TabSeparatedRowOutputFormat.cpp | 0 .../Impl/TabSeparatedRowOutputFormat.h | 0 .../Impl/TemplateBlockOutputFormat.cpp | 0 .../Formats/Impl/TemplateBlockOutputFormat.h | 0 .../Formats/Impl/TemplateRowInputFormat.cpp | 0 .../Formats/Impl/TemplateRowInputFormat.h | 0 .../Formats/Impl/ValuesBlockInputFormat.cpp | 0 .../Formats/Impl/ValuesBlockInputFormat.h | 0 .../Formats/Impl/ValuesRowOutputFormat.cpp | 0 .../Formats/Impl/ValuesRowOutputFormat.h | 0 .../Formats/Impl/VerticalRowOutputFormat.cpp | 0 .../Formats/Impl/VerticalRowOutputFormat.h | 0 .../Formats/Impl/XMLRowOutputFormat.cpp | 0 .../Formats/Impl/XMLRowOutputFormat.h | 0 .../Formats/InputStreamFromInputFormat.h | 0 .../Processors/Formats/LazyOutputFormat.cpp | 0 .../Processors/Formats/LazyOutputFormat.h | 0 .../Formats/OutputStreamToOutputFormat.cpp | 0 .../Formats/OutputStreamToOutputFormat.h | 0 .../RowInputFormatWithDiagnosticInfo.cpp | 0 .../RowInputFormatWithDiagnosticInfo.h | 0 .../Processors/IAccumulatingTransform.cpp | 0 .../Processors/IAccumulatingTransform.h | 0 .../Processors/IInflatingTransform.cpp | 0 .../Processors/IInflatingTransform.h | 0 dbms/{src => }/Processors/IProcessor.cpp | 0 dbms/{src => }/Processors/IProcessor.h | 0 .../{src => }/Processors/ISimpleTransform.cpp | 0 dbms/{src => }/Processors/ISimpleTransform.h | 0 dbms/{src => }/Processors/ISink.cpp | 0 dbms/{src => }/Processors/ISink.h | 0 dbms/{src => }/Processors/ISource.cpp | 0 dbms/{src => }/Processors/ISource.h | 0 dbms/{src => }/Processors/LimitTransform.cpp | 0 dbms/{src => }/Processors/LimitTransform.h | 0 dbms/{src => }/Processors/NullSink.h | 0 dbms/{src => }/Processors/Pipe.cpp | 0 dbms/{src => }/Processors/Pipe.h | 0 dbms/{src => }/Processors/Port.cpp | 0 dbms/{src => }/Processors/Port.h | 0 dbms/{src => }/Processors/QueryPipeline.cpp | 0 dbms/{src => }/Processors/QueryPipeline.h | 0 dbms/{src => }/Processors/QueueBuffer.h | 0 dbms/{src => }/Processors/ResizeProcessor.cpp | 0 dbms/{src => }/Processors/ResizeProcessor.h | 0 .../Processors/RowsBeforeLimitCounter.h | 0 .../{src => }/Processors/Sources/NullSource.h | 0 .../Processors/Sources/SinkToOutputStream.cpp | 0 .../Processors/Sources/SinkToOutputStream.h | 0 .../Sources/SourceFromInputStream.cpp | 0 .../Sources/SourceFromInputStream.h | 0 .../Sources/SourceFromSingleChunk.h | 0 .../Processors/Sources/SourceWithProgress.cpp | 0 .../Processors/Sources/SourceWithProgress.h | 0 .../Transforms/AddingConstColumnTransform.h | 0 .../Transforms/AddingMissedTransform.cpp | 0 .../Transforms/AddingMissedTransform.h | 0 .../Transforms/AggregatingTransform.cpp | 0 .../Transforms/AggregatingTransform.h | 0 .../Transforms/ConvertingTransform.cpp | 0 .../Transforms/ConvertingTransform.h | 0 .../Transforms/CreatingSetsTransform.cpp | 0 .../Transforms/CreatingSetsTransform.h | 0 .../Processors/Transforms/CubeTransform.cpp | 0 .../Processors/Transforms/CubeTransform.h | 0 .../Transforms/DistinctTransform.cpp | 0 .../Processors/Transforms/DistinctTransform.h | 0 .../Transforms/ExpressionTransform.cpp | 0 .../Transforms/ExpressionTransform.h | 0 .../Transforms/ExtremesTransform.cpp | 0 .../Processors/Transforms/ExtremesTransform.h | 0 .../Transforms/FillingTransform.cpp | 0 .../Processors/Transforms/FillingTransform.h | 0 .../Processors/Transforms/FilterTransform.cpp | 0 .../Processors/Transforms/FilterTransform.h | 0 .../Transforms/FinishSortingTransform.cpp | 0 .../Transforms/FinishSortingTransform.h | 0 .../InflatingExpressionTransform.cpp | 0 .../Transforms/InflatingExpressionTransform.h | 0 .../Transforms/LimitByTransform.cpp | 0 .../Processors/Transforms/LimitByTransform.h | 0 .../Transforms/LimitsCheckingTransform.cpp | 0 .../Transforms/LimitsCheckingTransform.h | 0 .../Transforms/MaterializingTransform.cpp | 0 .../Transforms/MaterializingTransform.h | 0 .../Transforms/MergeSortingTransform.cpp | 0 .../Transforms/MergeSortingTransform.h | 0 ...gingAggregatedMemoryEfficientTransform.cpp | 0 ...ergingAggregatedMemoryEfficientTransform.h | 0 .../Transforms/MergingAggregatedTransform.cpp | 0 .../Transforms/MergingAggregatedTransform.h | 0 .../Transforms/MergingSortedTransform.cpp | 0 .../Transforms/MergingSortedTransform.h | 0 .../Transforms/PartialSortingTransform.cpp | 0 .../Transforms/PartialSortingTransform.h | 0 .../Transforms/ReverseTransform.cpp | 0 .../Processors/Transforms/ReverseTransform.h | 0 .../Processors/Transforms/RollupTransform.cpp | 0 .../Processors/Transforms/RollupTransform.h | 0 .../Transforms/SortingTransform.cpp | 0 .../Processors/Transforms/SortingTransform.h | 0 .../Transforms/TotalsHavingTransform.cpp | 0 .../Transforms/TotalsHavingTransform.h | 0 dbms/{src => }/Processors/printPipeline.h | 0 .../{src => }/Processors/tests/CMakeLists.txt | 0 .../gtest_exception_on_incorrect_pipeline.cpp | 0 .../Processors/tests/processors_test.cpp | 0 .../tests/processors_test_aggregation.cpp | 0 .../tests/processors_test_chain.cpp | 0 .../tests/processors_test_expand_pipeline.cpp | 0 .../tests/processors_test_merge.cpp | 0 ...rocessors_test_merge_sorting_transform.cpp | 0 ...ocessors_test_merging_sorted_transform.cpp | 0 dbms/{src => }/Storages/AlterCommands.cpp | 0 dbms/{src => }/Storages/AlterCommands.h | 0 dbms/{src => }/Storages/CMakeLists.txt | 0 dbms/{src => }/Storages/CheckResults.h | 0 dbms/{src => }/Storages/ColumnCodec.h | 0 dbms/{src => }/Storages/ColumnDefault.cpp | 0 dbms/{src => }/Storages/ColumnDefault.h | 0 dbms/{src => }/Storages/ColumnDependency.h | 0 .../{src => }/Storages/ColumnsDescription.cpp | 0 dbms/{src => }/Storages/ColumnsDescription.h | 0 .../Storages/CompressionCodecSelector.h | 0 .../Storages/ConstraintsDescription.cpp | 0 .../Storages/ConstraintsDescription.h | 0 .../Storages/Distributed/DirectoryMonitor.cpp | 0 .../Storages/Distributed/DirectoryMonitor.h | 0 .../DistributedBlockOutputStream.cpp | 0 .../DistributedBlockOutputStream.h | 0 dbms/{src => }/Storages/IStorage.cpp | 0 dbms/{src => }/Storages/IStorage.h | 0 dbms/{src => }/Storages/IStorage_fwd.h | 0 .../{src => }/Storages/IndicesDescription.cpp | 0 dbms/{src => }/Storages/IndicesDescription.h | 0 dbms/{src => }/Storages/Kafka/Buffer_fwd.h | 0 .../Storages/Kafka/KafkaBlockInputStream.cpp | 0 .../Storages/Kafka/KafkaBlockInputStream.h | 0 .../Storages/Kafka/KafkaBlockOutputStream.cpp | 0 .../Storages/Kafka/KafkaBlockOutputStream.h | 0 .../Storages/Kafka/KafkaSettings.cpp | 0 dbms/{src => }/Storages/Kafka/KafkaSettings.h | 0 .../Kafka/ReadBufferFromKafkaConsumer.cpp | 0 .../Kafka/ReadBufferFromKafkaConsumer.h | 0 .../{src => }/Storages/Kafka/StorageKafka.cpp | 0 dbms/{src => }/Storages/Kafka/StorageKafka.h | 0 .../Kafka/WriteBufferToKafkaProducer.cpp | 0 .../Kafka/WriteBufferToKafkaProducer.h | 0 .../LiveView/LiveViewBlockInputStream.h | 0 .../LiveView/LiveViewBlockOutputStream.h | 0 .../Storages/LiveView/LiveViewCommands.h | 0 .../LiveView/LiveViewEventsBlockInputStream.h | 0 .../Storages/LiveView/StorageBlocks.h | 0 .../Storages/LiveView/StorageLiveView.cpp | 0 .../Storages/LiveView/StorageLiveView.h | 0 dbms/{src => }/Storages/MarkCache.h | 0 .../Storages/MergeTree/ActiveDataPartSet.cpp | 0 .../Storages/MergeTree/ActiveDataPartSet.h | 0 .../Storages/MergeTree/AllMergeSelector.cpp | 0 .../Storages/MergeTree/AllMergeSelector.h | 0 .../MergeTree/BackgroundProcessingPool.cpp | 0 .../MergeTree/BackgroundProcessingPool.h | 0 .../{src => }/Storages/MergeTree/BoolMask.cpp | 0 dbms/{src => }/Storages/MergeTree/BoolMask.h | 0 .../Storages/MergeTree/DataPartsExchange.cpp | 0 .../Storages/MergeTree/DataPartsExchange.h | 0 .../MergeTree/EphemeralLockInZooKeeper.cpp | 0 .../MergeTree/EphemeralLockInZooKeeper.h | 0 .../Storages/MergeTree/IMergeTreeDataPart.cpp | 0 .../Storages/MergeTree/IMergeTreeDataPart.h | 0 .../MergeTree/IMergeTreeDataPartWriter.cpp | 0 .../MergeTree/IMergeTreeDataPartWriter.h | 0 .../Storages/MergeTree/IMergeTreeReader.cpp | 0 .../Storages/MergeTree/IMergeTreeReader.h | 0 .../MergeTree/IMergedBlockOutputStream.cpp | 0 .../MergeTree/IMergedBlockOutputStream.h | 0 .../Storages/MergeTree/KeyCondition.cpp | 0 .../Storages/MergeTree/KeyCondition.h | 0 .../Storages/MergeTree/LevelMergeSelector.cpp | 0 .../Storages/MergeTree/LevelMergeSelector.h | 0 dbms/{src => }/Storages/MergeTree/MarkRange.h | 0 .../Storages/MergeTree/MergeList.cpp | 0 dbms/{src => }/Storages/MergeTree/MergeList.h | 0 .../Storages/MergeTree/MergeSelector.h | 0 .../MergeTreeBaseSelectProcessor.cpp | 0 .../MergeTree/MergeTreeBaseSelectProcessor.h | 0 .../MergeTree/MergeTreeBlockOutputStream.cpp | 0 .../MergeTree/MergeTreeBlockOutputStream.h | 0 .../MergeTree/MergeTreeBlockReadUtils.cpp | 0 .../MergeTree/MergeTreeBlockReadUtils.h | 0 .../Storages/MergeTree/MergeTreeData.cpp | 0 .../Storages/MergeTree/MergeTreeData.h | 0 .../MergeTree/MergeTreeDataFormatVersion.h | 0 .../MergeTree/MergeTreeDataMergerMutator.cpp | 0 .../MergeTree/MergeTreeDataMergerMutator.h | 0 .../MergeTree/MergeTreeDataPartChecksum.cpp | 0 .../MergeTree/MergeTreeDataPartChecksum.h | 0 .../MergeTree/MergeTreeDataPartCompact.cpp | 0 .../MergeTree/MergeTreeDataPartCompact.h | 0 .../MergeTree/MergeTreeDataPartTTLInfo.cpp | 0 .../MergeTree/MergeTreeDataPartTTLInfo.h | 0 .../MergeTree/MergeTreeDataPartType.cpp | 0 .../MergeTree/MergeTreeDataPartType.h | 0 .../MergeTree/MergeTreeDataPartWide.cpp | 0 .../MergeTree/MergeTreeDataPartWide.h | 0 .../MergeTreeDataPartWriterCompact.cpp | 0 .../MergeTreeDataPartWriterCompact.h | 0 .../MergeTree/MergeTreeDataPartWriterWide.cpp | 0 .../MergeTree/MergeTreeDataPartWriterWide.h | 0 .../MergeTree/MergeTreeDataSelectExecutor.cpp | 0 .../MergeTree/MergeTreeDataSelectExecutor.h | 0 .../MergeTree/MergeTreeDataWriter.cpp | 0 .../Storages/MergeTree/MergeTreeDataWriter.h | 0 .../Storages/MergeTree/MergeTreeIOSettings.h | 0 .../MergeTreeIndexAggregatorBloomFilter.cpp | 0 .../MergeTreeIndexAggregatorBloomFilter.h | 0 .../MergeTree/MergeTreeIndexBloomFilter.cpp | 0 .../MergeTree/MergeTreeIndexBloomFilter.h | 0 .../MergeTreeIndexConditionBloomFilter.cpp | 0 .../MergeTreeIndexConditionBloomFilter.h | 0 .../MergeTree/MergeTreeIndexFullText.cpp | 0 .../MergeTree/MergeTreeIndexFullText.h | 0 .../MergeTree/MergeTreeIndexGranularity.cpp | 0 .../MergeTree/MergeTreeIndexGranularity.h | 0 .../MergeTreeIndexGranularityInfo.cpp | 0 .../MergeTree/MergeTreeIndexGranularityInfo.h | 0 .../MergeTreeIndexGranularityInfo.h.gch | Bin .../MergeTreeIndexGranuleBloomFilter.cpp | 0 .../MergeTreeIndexGranuleBloomFilter.h | 0 .../MergeTree/MergeTreeIndexMinMax.cpp | 0 .../Storages/MergeTree/MergeTreeIndexMinMax.h | 0 .../MergeTree/MergeTreeIndexReader.cpp | 0 .../Storages/MergeTree/MergeTreeIndexReader.h | 0 .../Storages/MergeTree/MergeTreeIndexSet.cpp | 2 +- .../Storages/MergeTree/MergeTreeIndexSet.h | 0 .../Storages/MergeTree/MergeTreeIndices.cpp | 0 .../Storages/MergeTree/MergeTreeIndices.h | 0 .../MergeTree/MergeTreeMarksLoader.cpp | 0 .../Storages/MergeTree/MergeTreeMarksLoader.h | 0 .../MergeTree/MergeTreeMutationEntry.cpp | 0 .../MergeTree/MergeTreeMutationEntry.h | 0 .../MergeTree/MergeTreeMutationStatus.h | 0 .../Storages/MergeTree/MergeTreePartInfo.cpp | 0 .../Storages/MergeTree/MergeTreePartInfo.h | 0 .../Storages/MergeTree/MergeTreePartition.cpp | 0 .../Storages/MergeTree/MergeTreePartition.h | 0 .../MergeTree/MergeTreePartsMover.cpp | 0 .../Storages/MergeTree/MergeTreePartsMover.h | 0 .../MergeTree/MergeTreeRangeReader.cpp | 0 .../Storages/MergeTree/MergeTreeRangeReader.h | 0 .../Storages/MergeTree/MergeTreeReadPool.cpp | 0 .../Storages/MergeTree/MergeTreeReadPool.h | 0 .../MergeTree/MergeTreeReaderCompact.cpp | 0 .../MergeTree/MergeTreeReaderCompact.h | 0 .../MergeTree/MergeTreeReaderStream.cpp | 0 .../MergeTree/MergeTreeReaderStream.h | 0 .../MergeTree/MergeTreeReaderWide.cpp | 0 .../Storages/MergeTree/MergeTreeReaderWide.h | 0 .../MergeTreeReverseSelectProcessor.cpp | 0 .../MergeTreeReverseSelectProcessor.h | 0 .../MergeTree/MergeTreeSelectProcessor.cpp | 0 .../MergeTree/MergeTreeSelectProcessor.h | 0 .../MergeTreeSequentialBlockInputStream.cpp | 0 .../MergeTreeSequentialBlockInputStream.h | 0 .../Storages/MergeTree/MergeTreeSettings.cpp | 0 .../Storages/MergeTree/MergeTreeSettings.h | 0 ...rgeTreeThreadSelectBlockInputProcessor.cpp | 0 ...MergeTreeThreadSelectBlockInputProcessor.h | 0 .../MergeTree/MergeTreeWhereOptimizer.cpp | 0 .../MergeTree/MergeTreeWhereOptimizer.h | 0 .../MergeTree/MergedBlockOutputStream.cpp | 0 .../MergeTree/MergedBlockOutputStream.h | 0 .../MergedColumnOnlyOutputStream.cpp | 0 .../MergeTree/MergedColumnOnlyOutputStream.h | 0 .../Storages/MergeTree/PartDestinationType.h | 0 .../{src => }/Storages/MergeTree/RPNBuilder.h | 0 .../Storages/MergeTree/RangesInDataPart.h | 0 .../MergeTree/ReplicatedMergeTreeAddress.cpp | 0 .../MergeTree/ReplicatedMergeTreeAddress.h | 0 .../ReplicatedMergeTreeAltersSequence.cpp | 0 .../ReplicatedMergeTreeAltersSequence.h | 0 .../ReplicatedMergeTreeBlockOutputStream.cpp | 0 .../ReplicatedMergeTreeBlockOutputStream.h | 0 .../ReplicatedMergeTreeCleanupThread.cpp | 0 .../ReplicatedMergeTreeCleanupThread.h | 0 .../MergeTree/ReplicatedMergeTreeLogEntry.cpp | 0 .../MergeTree/ReplicatedMergeTreeLogEntry.h | 0 .../ReplicatedMergeTreeMutationEntry.cpp | 0 .../ReplicatedMergeTreeMutationEntry.h | 0 .../ReplicatedMergeTreePartCheckThread.cpp | 0 .../ReplicatedMergeTreePartCheckThread.h | 0 .../ReplicatedMergeTreePartHeader.cpp | 0 .../MergeTree/ReplicatedMergeTreePartHeader.h | 0 .../MergeTree/ReplicatedMergeTreeQueue.cpp | 0 .../MergeTree/ReplicatedMergeTreeQueue.h | 0 .../ReplicatedMergeTreeQuorumAddedParts.h | 0 .../ReplicatedMergeTreeQuorumEntry.h | 0 .../ReplicatedMergeTreeRestartingThread.cpp | 0 .../ReplicatedMergeTreeRestartingThread.h | 0 .../ReplicatedMergeTreeTableMetadata.cpp | 0 .../ReplicatedMergeTreeTableMetadata.h | 0 .../MergeTree/SimpleMergeSelector.cpp | 0 .../Storages/MergeTree/SimpleMergeSelector.h | 0 .../MergeTree/StorageFromMergeTreeDataPart.h | 0 .../Storages/MergeTree/TTLMergeSelector.cpp | 0 .../Storages/MergeTree/TTLMergeSelector.h | 0 .../Storages/MergeTree/checkDataPart.cpp | 0 .../Storages/MergeTree/checkDataPart.h | 0 .../Storages/MergeTree/localBackup.cpp | 0 .../Storages/MergeTree/localBackup.h | 0 .../MergeTree/registerStorageMergeTree.cpp | 2 +- dbms/{src => }/Storages/MutationCommands.cpp | 0 dbms/{src => }/Storages/MutationCommands.h | 0 dbms/{src => }/Storages/PartitionCommands.cpp | 0 dbms/{src => }/Storages/PartitionCommands.h | 0 .../Storages/ReadInOrderOptimizer.cpp | 0 .../{src => }/Storages/ReadInOrderOptimizer.h | 0 dbms/{src => }/Storages/SelectQueryInfo.h | 0 dbms/{src => }/Storages/StorageBuffer.cpp | 0 dbms/{src => }/Storages/StorageBuffer.h | 0 dbms/{src => }/Storages/StorageDictionary.cpp | 0 dbms/{src => }/Storages/StorageDictionary.h | 0 .../{src => }/Storages/StorageDistributed.cpp | 0 dbms/{src => }/Storages/StorageDistributed.h | 0 dbms/{src => }/Storages/StorageFactory.cpp | 0 dbms/{src => }/Storages/StorageFactory.h | 0 dbms/{src => }/Storages/StorageFile.cpp | 0 dbms/{src => }/Storages/StorageFile.h | 0 .../Storages/StorageGenerateRandom.cpp | 0 .../Storages/StorageGenerateRandom.h | 0 dbms/{src => }/Storages/StorageHDFS.cpp | 0 dbms/{src => }/Storages/StorageHDFS.h | 0 .../Storages/StorageInMemoryMetadata.cpp | 0 .../Storages/StorageInMemoryMetadata.h | 0 dbms/{src => }/Storages/StorageInput.cpp | 0 dbms/{src => }/Storages/StorageInput.h | 0 dbms/{src => }/Storages/StorageJoin.cpp | 0 dbms/{src => }/Storages/StorageJoin.h | 0 dbms/{src => }/Storages/StorageLog.cpp | 0 dbms/{src => }/Storages/StorageLog.h | 0 .../{src => }/Storages/StorageLogSettings.cpp | 0 dbms/{src => }/Storages/StorageLogSettings.h | 0 .../Storages/StorageMaterializedView.cpp | 0 .../Storages/StorageMaterializedView.h | 0 dbms/{src => }/Storages/StorageMemory.cpp | 0 dbms/{src => }/Storages/StorageMemory.h | 0 dbms/{src => }/Storages/StorageMerge.cpp | 0 dbms/{src => }/Storages/StorageMerge.h | 0 dbms/{src => }/Storages/StorageMergeTree.cpp | 0 dbms/{src => }/Storages/StorageMergeTree.h | 0 dbms/{src => }/Storages/StorageMySQL.cpp | 0 dbms/{src => }/Storages/StorageMySQL.h | 0 dbms/{src => }/Storages/StorageNull.cpp | 0 dbms/{src => }/Storages/StorageNull.h | 0 .../Storages/StorageReplicatedMergeTree.cpp | 0 .../Storages/StorageReplicatedMergeTree.h | 0 dbms/{src => }/Storages/StorageS3.cpp | 0 dbms/{src => }/Storages/StorageS3.h | 0 dbms/{src => }/Storages/StorageSet.cpp | 0 dbms/{src => }/Storages/StorageSet.h | 0 dbms/{src => }/Storages/StorageStripeLog.cpp | 0 dbms/{src => }/Storages/StorageStripeLog.h | 0 dbms/{src => }/Storages/StorageTinyLog.cpp | 0 dbms/{src => }/Storages/StorageTinyLog.h | 0 dbms/{src => }/Storages/StorageURL.cpp | 0 dbms/{src => }/Storages/StorageURL.h | 0 dbms/{src => }/Storages/StorageValues.cpp | 0 dbms/{src => }/Storages/StorageValues.h | 0 dbms/{src => }/Storages/StorageView.cpp | 0 dbms/{src => }/Storages/StorageView.h | 0 dbms/{src => }/Storages/StorageXDBC.cpp | 0 dbms/{src => }/Storages/StorageXDBC.h | 0 dbms/{src => }/Storages/System/CMakeLists.txt | 0 .../Storages/System/IStorageSystemOneBlock.h | 0 ...rageSystemAggregateFunctionCombinators.cpp | 0 ...torageSystemAggregateFunctionCombinators.h | 0 .../StorageSystemAsynchronousMetrics.cpp | 0 .../System/StorageSystemAsynchronousMetrics.h | 0 .../System/StorageSystemBuildOptions.cpp | 0 ...StorageSystemBuildOptions.generated.cpp.in | 0 .../System/StorageSystemBuildOptions.h | 0 .../Storages/System/StorageSystemClusters.cpp | 0 .../Storages/System/StorageSystemClusters.h | 0 .../System/StorageSystemCollations.cpp | 0 .../Storages/System/StorageSystemCollations.h | 0 .../Storages/System/StorageSystemColumns.cpp | 0 .../Storages/System/StorageSystemColumns.h | 0 .../System/StorageSystemContributors.cpp | 0 .../StorageSystemContributors.generated.cpp | 0 .../System/StorageSystemContributors.h | 0 .../System/StorageSystemContributors.sh | 0 .../System/StorageSystemDataTypeFamilies.cpp | 0 .../System/StorageSystemDataTypeFamilies.h | 0 .../System/StorageSystemDatabases.cpp | 0 .../Storages/System/StorageSystemDatabases.h | 0 .../System/StorageSystemDetachedParts.cpp | 0 .../System/StorageSystemDetachedParts.h | 0 .../System/StorageSystemDictionaries.cpp | 0 .../System/StorageSystemDictionaries.h | 0 .../Storages/System/StorageSystemDisks.cpp | 0 .../Storages/System/StorageSystemDisks.h | 0 .../Storages/System/StorageSystemEvents.cpp | 0 .../Storages/System/StorageSystemEvents.h | 0 .../Storages/System/StorageSystemFormats.cpp | 0 .../Storages/System/StorageSystemFormats.h | 0 .../System/StorageSystemFunctions.cpp | 0 .../Storages/System/StorageSystemFunctions.h | 0 .../Storages/System/StorageSystemGraphite.cpp | 0 .../Storages/System/StorageSystemGraphite.h | 0 .../Storages/System/StorageSystemMacros.cpp | 0 .../Storages/System/StorageSystemMacros.h | 0 .../System/StorageSystemMergeTreeSettings.cpp | 0 .../System/StorageSystemMergeTreeSettings.h | 0 .../Storages/System/StorageSystemMerges.cpp | 0 .../Storages/System/StorageSystemMerges.h | 0 .../Storages/System/StorageSystemMetrics.cpp | 0 .../Storages/System/StorageSystemMetrics.h | 0 .../Storages/System/StorageSystemModels.cpp | 0 .../Storages/System/StorageSystemModels.h | 0 .../System/StorageSystemMutations.cpp | 0 .../Storages/System/StorageSystemMutations.h | 0 .../Storages/System/StorageSystemNumbers.cpp | 0 .../Storages/System/StorageSystemNumbers.h | 0 .../Storages/System/StorageSystemOne.cpp | 0 .../Storages/System/StorageSystemOne.h | 0 .../Storages/System/StorageSystemParts.cpp | 0 .../Storages/System/StorageSystemParts.h | 0 .../System/StorageSystemPartsBase.cpp | 0 .../Storages/System/StorageSystemPartsBase.h | 0 .../System/StorageSystemPartsColumns.cpp | 0 .../System/StorageSystemPartsColumns.h | 0 .../System/StorageSystemProcesses.cpp | 0 .../Storages/System/StorageSystemProcesses.h | 0 .../System/StorageSystemQuotaUsage.cpp | 0 .../Storages/System/StorageSystemQuotaUsage.h | 0 .../Storages/System/StorageSystemQuotas.cpp | 0 .../Storages/System/StorageSystemQuotas.h | 0 .../Storages/System/StorageSystemReplicas.cpp | 0 .../Storages/System/StorageSystemReplicas.h | 0 .../System/StorageSystemReplicationQueue.cpp | 0 .../System/StorageSystemReplicationQueue.h | 0 .../System/StorageSystemRowPolicies.cpp | 0 .../System/StorageSystemRowPolicies.h | 0 .../Storages/System/StorageSystemSettings.cpp | 0 .../Storages/System/StorageSystemSettings.h | 0 .../System/StorageSystemStackTrace.cpp | 0 .../Storages/System/StorageSystemStackTrace.h | 0 .../System/StorageSystemStoragePolicies.cpp | 0 .../System/StorageSystemStoragePolicies.h | 0 .../System/StorageSystemTableEngines.cpp | 0 .../System/StorageSystemTableEngines.h | 0 .../System/StorageSystemTableFunctions.cpp | 0 .../System/StorageSystemTableFunctions.h | 0 .../Storages/System/StorageSystemTables.cpp | 0 .../Storages/System/StorageSystemTables.h | 0 .../Storages/System/StorageSystemZeros.cpp | 0 .../Storages/System/StorageSystemZeros.h | 0 .../System/StorageSystemZooKeeper.cpp | 0 .../Storages/System/StorageSystemZooKeeper.h | 0 .../Storages/System/attachSystemTables.cpp | 0 .../Storages/System/attachSystemTables.h | 0 .../Storages/TableStructureLockHolder.h | 0 .../{src => }/Storages/VirtualColumnUtils.cpp | 0 dbms/{src => }/Storages/VirtualColumnUtils.h | 0 .../Storages/getStructureOfRemoteTable.cpp | 0 .../Storages/getStructureOfRemoteTable.h | 0 dbms/{src => }/Storages/registerStorages.cpp | 0 dbms/{src => }/Storages/registerStorages.h | 0 dbms/{src => }/Storages/tests/CMakeLists.txt | 0 dbms/{src => }/Storages/tests/active_parts.py | 0 ...get_abandonable_lock_in_all_partitions.cpp | 0 .../get_current_inserts_in_replicated.cpp | 0 ...est_aux_funcs_for_adaptive_granularity.cpp | 0 .../tests/gtest_row_source_bits_test.cpp | 0 .../Storages/tests/gtest_storage_log.cpp | 0 ..._transform_query_for_external_database.cpp | 0 .../Storages/tests/merge_selector.cpp | 0 .../Storages/tests/merge_selector2.cpp | 0 dbms/{src => }/Storages/tests/part_name.cpp | 0 .../tests/remove_symlink_directory.cpp | 0 dbms/{src => }/Storages/tests/storage_log.cpp | 0 .../Storages/tests/system_numbers.cpp | 0 .../Storages/tests/test_alter_distributed.sql | 0 .../Storages/tests/test_alter_merge.sql | 0 .../Storages/tests/test_alter_merge_tree.sql | 0 .../tests/transform_part_zk_nodes.cpp | 0 .../transformQueryForExternalDatabase.cpp | 0 .../transformQueryForExternalDatabase.h | 0 dbms/{src => }/TableFunctions/CMakeLists.txt | 0 .../TableFunctions/ITableFunction.cpp | 0 .../{src => }/TableFunctions/ITableFunction.h | 0 .../TableFunctions/ITableFunctionFileLike.cpp | 0 .../TableFunctions/ITableFunctionFileLike.h | 0 .../TableFunctions/ITableFunctionXDBC.cpp | 0 .../TableFunctions/ITableFunctionXDBC.h | 0 .../TableFunctions/TableFunctionFactory.cpp | 0 .../TableFunctions/TableFunctionFactory.h | 0 .../TableFunctions/TableFunctionFile.cpp | 0 .../TableFunctions/TableFunctionFile.h | 0 .../TableFunctionGenerateRandom.cpp | 0 .../TableFunctionGenerateRandom.h | 0 .../TableFunctions/TableFunctionHDFS.cpp | 0 .../TableFunctions/TableFunctionHDFS.h | 0 .../TableFunctions/TableFunctionInput.cpp | 0 .../TableFunctions/TableFunctionInput.h | 0 .../TableFunctions/TableFunctionMerge.cpp | 0 .../TableFunctions/TableFunctionMerge.h | 0 .../TableFunctions/TableFunctionMySQL.cpp | 0 .../TableFunctions/TableFunctionMySQL.h | 0 .../TableFunctions/TableFunctionNumbers.cpp | 0 .../TableFunctions/TableFunctionNumbers.h | 0 .../TableFunctions/TableFunctionRemote.cpp | 0 .../TableFunctions/TableFunctionRemote.h | 0 .../TableFunctions/TableFunctionS3.cpp | 0 .../TableFunctions/TableFunctionS3.h | 0 .../TableFunctions/TableFunctionURL.cpp | 0 .../TableFunctions/TableFunctionURL.h | 0 .../TableFunctions/TableFunctionValues.cpp | 0 .../TableFunctions/TableFunctionValues.h | 0 .../TableFunctions/TableFunctionZeros.cpp | 0 .../TableFunctions/TableFunctionZeros.h | 0 .../parseColumnsListForTableFunction.cpp | 0 .../parseColumnsListForTableFunction.h | 0 .../TableFunctions/registerTableFunctions.cpp | 0 .../TableFunctions/registerTableFunctions.h | 0 dbms/src/CMakeLists.txt | 19 - docker/builder/README.md | 14 +- docker/images.json | 2 +- docker/packager/binary/build.sh | 10 +- docker/packager/packager | 2 +- .../test/performance-comparison/entrypoint.sh | 2 +- .../performance_comparison.md | 2 +- docker/test/test_runner.sh | 6 +- docker/test/test_runner_docker_compose.yaml | 4 +- docs/en/development/browse_code.md | 2 +- docs/en/development/build.md | 2 +- docs/en/development/developer_instruction.md | 16 +- docs/en/development/tests.md | 23 +- .../example_datasets/metrica.md | 2 +- docs/en/getting_started/install.md | 6 +- docs/en/interfaces/tcp.md | 2 +- docs/en/operations/backup.md | 2 +- ...sampling_query_profiler_example_result.txt | 236 +++--- docs/en/operations/performance_test.md | 12 +- .../operations/settings/query_complexity.md | 4 +- docs/en/operations/system_tables.md | 2 +- docs/en/query_language/alter.md | 2 +- docs/en/query_language/create.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/en/query_language/operators.md | 2 +- docs/es/changelog/index.md | 2 +- docs/es/development/browse_code.md | 2 +- docs/es/development/build.md | 2 +- docs/es/development/developer_instruction.md | 16 +- docs/es/development/tests.md | 22 +- .../example_datasets/metrica.md | 2 +- docs/es/getting_started/install.md | 6 +- docs/es/interfaces/tcp.md | 2 +- docs/es/operations/backup.md | 2 +- ...sampling_query_profiler_example_result.txt | 236 +++--- docs/es/operations/performance_test.md | 12 +- .../operations/settings/query_complexity.md | 4 +- docs/es/operations/system_tables.md | 2 +- docs/es/query_language/alter.md | 2 +- docs/es/query_language/create.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/es/query_language/operators.md | 2 +- .../example_datasets/metrica.md | 2 +- docs/fa/getting_started/install.md | 8 +- docs/fa/interfaces/tcp.md | 2 +- docs/ru/development/browse_code.md | 2 +- docs/ru/development/developer_instruction.md | 16 +- .../example_datasets/metrica.md | 2 +- docs/ru/getting_started/install.md | 6 +- docs/ru/interfaces/tcp.md | 2 +- docs/ru/operations/backup.md | 2 +- .../operations/settings/query_complexity.md | 4 +- docs/ru/operations/system_tables.md | 2 +- docs/ru/query_language/alter.md | 2 +- docs/ru/query_language/create.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/ru/query_language/operators.md | 2 +- docs/zh/development/build.md | 2 +- docs/zh/development/developer_instruction.md | 16 +- docs/zh/development/tests.md | 22 +- docs/zh/getting_started/install.md | 8 +- docs/zh/interfaces/tcp.md | 2 +- .../operations/settings/query_complexity.md | 4 +- docs/zh/operations/table_engines/mergetree.md | 2 +- docs/zh/query_language/create.md | 2 +- docs/zh/query_language/operators.md | 2 +- {dbms/programs => programs}/CMakeLists.txt | 0 .../benchmark/Benchmark.cpp | 0 .../benchmark/CMakeLists.txt | 0 .../benchmark/clickhouse-benchmark.cpp | 0 .../clickhouse-split-helper | 0 .../client/CMakeLists.txt | 0 {dbms/programs => programs}/client/Client.cpp | 0 .../client/ConnectionParameters.cpp | 0 .../client/ConnectionParameters.h | 0 .../programs => programs}/client/Suggest.cpp | 0 {dbms/programs => programs}/client/Suggest.h | 0 {dbms/programs => programs}/client/TestHint.h | 0 .../client/clickhouse-client.cpp | 0 .../client/clickhouse-client.xml | 0 .../client/config_client.h.in | 0 .../client/readpassphrase/CMakeLists.txt | 0 .../client/readpassphrase/includes.h.in | 0 .../client/readpassphrase/readpassphrase.c | 0 .../client/readpassphrase/readpassphrase.h | 0 .../compressor/CMakeLists.txt | 0 .../compressor/Compressor.cpp | 0 .../compressor/README.md | 0 .../compressor/clickhouse-compressor.cpp | 0 {dbms/programs => programs}/config_tools.h.in | 0 {dbms/programs => programs}/copier/Aliases.h | 0 .../copier/CMakeLists.txt | 0 .../copier/ClusterCopier.cpp | 0 .../copier/ClusterCopier.h | 0 .../copier/ClusterCopierApp.cpp | 0 .../copier/ClusterCopierApp.h | 0 .../copier/ClusterPartition.h | 0 .../copier/Internals.cpp | 0 .../programs => programs}/copier/Internals.h | 0 .../copier/ShardPartition.h | 0 .../copier/ShardPartitionPiece.h | 0 .../copier/TaskCluster.h | 0 .../copier/TaskTableAndShard.h | 0 .../copier/ZooKeeperStaff.h | 0 .../copier/clickhouse-copier.cpp | 0 .../extract-from-config/CMakeLists.txt | 0 .../extract-from-config/ExtractFromConfig.cpp | 0 .../clickhouse-extract-from-config.cpp | 0 .../format/CMakeLists.txt | 0 {dbms/programs => programs}/format/Format.cpp | 0 .../format/clickhouse-format.cpp | 0 .../local/CMakeLists.txt | 0 .../local/LocalServer.cpp | 0 .../programs => programs}/local/LocalServer.h | 0 .../local/clickhouse-local.cpp | 0 {dbms/programs => programs}/main.cpp | 0 .../obfuscator/CMakeLists.txt | 0 .../obfuscator/Obfuscator.cpp | 0 .../obfuscator/clickhouse-obfuscator.cpp | 0 .../odbc-bridge/CMakeLists.txt | 0 .../odbc-bridge/ColumnInfoHandler.cpp | 0 .../odbc-bridge/ColumnInfoHandler.h | 0 .../odbc-bridge/HandlerFactory.cpp | 0 .../odbc-bridge/HandlerFactory.h | 0 .../odbc-bridge/IdentifierQuoteHandler.cpp | 0 .../odbc-bridge/IdentifierQuoteHandler.h | 0 .../odbc-bridge/MainHandler.cpp | 0 .../odbc-bridge/MainHandler.h | 0 .../odbc-bridge/ODBCBlockInputStream.cpp | 0 .../odbc-bridge/ODBCBlockInputStream.h | 0 .../odbc-bridge/ODBCBridge.cpp | 0 .../odbc-bridge/ODBCBridge.h | 0 .../odbc-bridge/PingHandler.cpp | 0 .../odbc-bridge/PingHandler.h | 0 .../odbc-bridge/README.md | 0 .../odbc-bridge/getIdentifierQuote.cpp | 0 .../odbc-bridge/getIdentifierQuote.h | 0 .../odbc-bridge/odbc-bridge.cpp | 0 .../odbc-bridge/tests/CMakeLists.txt | 0 .../tests/validate-odbc-connection-string.cpp | 0 .../validate-odbc-connection-string.reference | 0 .../tests/validate-odbc-connection-string.sh | 0 .../validateODBCConnectionString.cpp | 0 .../validateODBCConnectionString.h | 0 .../server/CMakeLists.txt | 0 .../server/HTTPHandler.cpp | 0 .../server/HTTPHandler.h | 0 .../server/HTTPHandlerFactory.cpp | 0 .../server/HTTPHandlerFactory.h | 0 {dbms/programs => programs}/server/IServer.h | 0 .../server/InterserverIOHTTPHandler.cpp | 0 .../server/InterserverIOHTTPHandler.h | 0 .../server/MetricsTransmitter.cpp | 0 .../server/MetricsTransmitter.h | 0 .../server/MySQLHandler.cpp | 0 .../server/MySQLHandler.h | 0 .../server/MySQLHandlerFactory.cpp | 0 .../server/MySQLHandlerFactory.h | 0 .../server/NotFoundHandler.cpp | 0 .../server/NotFoundHandler.h | 0 .../server/PingRequestHandler.cpp | 0 .../server/PingRequestHandler.h | 0 .../server/PrometheusMetricsWriter.cpp | 0 .../server/PrometheusMetricsWriter.h | 0 .../server/PrometheusRequestHandler.cpp | 0 .../server/PrometheusRequestHandler.h | 0 .../server/ReplicasStatusHandler.cpp | 0 .../server/ReplicasStatusHandler.h | 0 .../server/RootRequestHandler.cpp | 0 .../server/RootRequestHandler.h | 0 {dbms/programs => programs}/server/Server.cpp | 0 {dbms/programs => programs}/server/Server.h | 0 .../server/TCPHandler.cpp | 0 .../programs => programs}/server/TCPHandler.h | 0 .../server/TCPHandlerFactory.h | 0 .../server/clickhouse-server.cpp | 0 .../server/config.d/listen.xml.disabled | 0 .../server/config.d/log_to_console.xml | 0 .../server/config.d/macros.xml | 0 .../server/config.d/metric_log.xml | 0 .../server/config.d/more_clusters.xml | 0 .../server/config.d/part_log.xml | 0 .../server/config.d/path.xml | 0 .../server/config.d/query_masking_rules.xml | 0 .../server/config.d/text_log.xml | 0 .../server/config.d/tls.xml.disabled | 0 .../server/config.d/zookeeper.xml | 0 {dbms/programs => programs}/server/config.xml | 0 .../server/data/.gitignore | 0 .../server/data/default/.gitignore | 0 .../server/metadata/default/.gitignore | 0 .../users.d/allow_only_from_localhost.xml | 0 .../server/users.d/log_queries.xml | 0 .../server/users.d/readonly.xml | 0 {dbms/programs => programs}/server/users.xml | 0 {dbms/tests => tests}/.gitignore | 0 {dbms/tests => tests}/CMakeLists.txt | 0 {dbms/tests => tests}/CTestCustom.cmake | 0 {dbms/tests => tests}/clickhouse-client.xml | 0 {dbms/tests => tests}/clickhouse-test | 0 {dbms/tests => tests}/clickhouse-test-server | 6 +- {dbms/tests => tests}/client-test.xml | 0 .../tests => tests}/config/client_config.xml | 0 .../config/decimals_dictionary.xml | 0 {dbms/tests => tests}/config/dhparam.pem | 0 {dbms/tests => tests}/config/disks.xml | 0 .../config/ints_dictionary.xml | 0 {dbms/tests => tests}/config/listen.xml | 0 {dbms/tests => tests}/config/log_queries.xml | 0 {dbms/tests => tests}/config/macros.xml | 0 {dbms/tests => tests}/config/metric_log.xml | 0 {dbms/tests => tests}/config/part_log.xml | 0 .../config/query_masking_rules.xml | 0 {dbms/tests => tests}/config/readonly.xml | 0 {dbms/tests => tests}/config/secure_ports.xml | 0 {dbms/tests => tests}/config/server.crt | 0 {dbms/tests => tests}/config/server.key | 0 .../config/strings_dictionary.xml | 0 {dbms/tests => tests}/config/text_log.xml | 0 {dbms/tests => tests}/config/zookeeper.xml | 0 {dbms/tests => tests}/decimals_dictionary.xml | 0 .../catboost/data/build_catboost.sh | 0 .../catboost/helpers/__init__.py | 0 .../catboost/helpers/client.py | 0 .../catboost/helpers/generate.py | 0 .../catboost/helpers/server.py | 0 .../catboost/helpers/server_with_models.py | 0 .../external_models/catboost/helpers/table.py | 0 .../external_models/catboost/helpers/train.py | 0 .../external_models/catboost/pytest.ini | 0 .../test_apply_catboost_model/test.py | 0 .../instructions/clang-tidy.txt | 0 .../tests => tests}/instructions/coverity.txt | 0 .../tests => tests}/instructions/cppcheck.txt | 0 .../instructions/developer_instruction_en.md | 0 .../instructions/developer_instruction_ru.md | 0 .../instructions/easy_tasks_sorted_ru.md | 2 +- .../instructions/heap-profiler.txt | 2 +- .../instructions/jemalloc_memory_profile.txt | 0 {dbms/tests => tests}/instructions/kafka.txt | 0 .../instructions/ninja_trace.txt | 0 .../instructions/pvs-studio.txt | 0 .../instructions/sanitizers.md | 2 +- {dbms/tests => tests}/instructions/syntax.txt | 0 .../instructions/tscancode.txt | 0 .../tests => tests}/integration/.dockerignore | 0 {dbms/tests => tests}/integration/.gitignore | 0 .../integration/CMakeLists.txt | 8 +- {dbms/tests => tests}/integration/README.md | 14 +- {dbms/tests => tests}/integration/conftest.py | 0 .../helpers/0_common_instance_config.xml | 0 .../integration/helpers/__init__.py | 0 .../integration/helpers/client.py | 0 .../integration/helpers/cluster.py | 0 .../helpers/docker_compose_hdfs.yml | 0 .../helpers/docker_compose_kafka.yml | 0 .../helpers/docker_compose_minio.yml | 0 .../helpers/docker_compose_mongo.yml | 0 .../helpers/docker_compose_mysql.yml | 0 .../helpers/docker_compose_net.yml | 0 .../helpers/docker_compose_postgres.yml | 0 .../helpers/docker_compose_redis.yml | 0 .../helpers/docker_compose_zookeeper.yml | 0 .../integration/helpers/hdfs_api.py | 0 .../helpers/helper_container/Dockerfile | 0 .../integration/helpers/network.py | 0 .../integration/helpers/test_tools.py | 0 .../integration/helpers/zookeeper_config.xml | 0 .../integration/image/Dockerfile | 0 .../integration/image/dockerd-entrypoint.sh | 4 +- .../integration/image/modprobe.sh | 0 {dbms/tests => tests}/integration/pytest.ini | 0 {dbms/tests => tests}/integration/runner | 2 +- .../test_adaptive_granularity/__init__.py | 0 .../configs/log_conf.xml | 0 .../configs/merge_tree_settings.xml | 0 .../configs/remote_servers.xml | 0 .../test_adaptive_granularity/test.py | 0 .../__init__.py | 0 .../test.py | 0 .../__init__.py | 0 .../test_aggregation_memory_efficient/test.py | 0 .../test_allowed_client_hosts/__init__.py | 0 .../configs/users.d/network.xml | 0 .../configs/users.xml | 0 .../test_allowed_client_hosts/test.py | 0 .../test_allowed_url_from_config/__init__.py | 0 .../configs/config_for_redirect.xml | 0 .../configs/config_for_remote.xml | 0 .../configs/config_with_hosts.xml | 0 .../config_with_only_primary_hosts.xml | 0 .../configs/config_with_only_regexp_hosts.xml | 0 .../configs/config_without_allowed_hosts.xml | 0 .../test_allowed_url_from_config/test.py | 0 .../test_atomic_drop_table/__init__.py | 0 .../config.d/zookeeper_session_timeout.xml | 0 .../configs/remote_servers.xml | 0 .../test_atomic_drop_table/test.py | 0 .../test_authentication/__init__.py | 0 .../integration/test_authentication/test.py | 0 .../test_backup_restore/__init__.py | 0 .../integration/test_backup_restore/test.py | 0 .../test_backward_compatability/__init__.py | 0 .../test_backward_compatability/test.py | 0 .../test_block_structure_mismatch/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_block_structure_mismatch/test.py | 0 .../integration/test_check_table/__init__.py | 0 .../integration/test_check_table/test.py | 0 .../test_cluster_all_replicas/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_cluster_all_replicas/test.py | 0 .../test_cluster_copier/__init__.py | 0 .../configs/conf.d/clusters.xml | 0 .../configs/conf.d/ddl.xml | 0 .../configs/conf.d/query_log.xml | 0 .../configs/config-copier.xml | 0 .../test_cluster_copier/configs/users.xml | 0 .../test_cluster_copier/task0_description.xml | 0 .../task_month_to_week_description.xml | 0 .../test_cluster_copier/task_no_arg.xml | 0 .../test_cluster_copier/task_no_index.xml | 0 .../task_test_block_size.xml | 0 .../test_cluster_copier/task_trivial.xml | 0 .../integration/test_cluster_copier/test.py | 0 .../test_cluster_copier/trivial_test.py | 0 .../__init__.py | 0 .../configs/user_restrictions.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/config.d/bad.xml | 0 .../configs/config.xml | 0 .../configs/users.xml | 0 .../test_config_corresponding_root/test.py | 0 .../test_config_substitutions/__init__.py | 0 .../configs/config_allow_databases.xml | 0 .../configs/config_env.xml | 0 .../configs/config_incl.xml | 0 .../configs/config_include_from_env.xml | 0 .../configs/config_no_substs.xml | 0 .../configs/config_zk.xml | 0 .../configs/max_query_size.xml | 0 .../test_config_substitutions/test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../test_cross_replication/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_cross_replication/test.py | 0 .../test_delayed_replica_failover/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_delayed_replica_failover/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/.gitkeep | 0 .../configs/users.xml | 0 .../dictionary.py | 0 .../external_sources.py | 0 .../fake_cert.pem | 0 .../http_server.py | 0 .../test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../dictionaries/complex_key_cache_string.xml | 0 .../configs/users.xml | 0 .../test.py | 0 .../test_dictionaries_ddl/__init__.py | 0 .../test_dictionaries_ddl/configs/config.xml | 0 .../dictionary_with_conflict_name.xml | 0 .../configs/dictionaries/lazy_load.xml | 0 .../dictionaries/simple_dictionary.xml | 0 .../test_dictionaries_ddl/configs/users.xml | 0 .../integration/test_dictionaries_ddl/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/dep_x.xml | 0 .../configs/dictionaries/dep_y.xml | 0 .../configs/dictionaries/dep_z.xml | 0 .../configs/users.xml | 0 .../test.py | 0 .../test_dictionaries_mysql/__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/mysql_dict1.xml | 0 .../configs/dictionaries/mysql_dict2.xml | 0 .../configs/remote_servers.xml | 0 .../test_dictionaries_mysql/configs/users.xml | 0 .../test_dictionaries_mysql/test.py | 0 .../test_dictionaries_null_value/__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/cache.xml | 0 .../configs/users.xml | 0 .../test_dictionaries_null_value/test.py | 0 .../test_dictionaries_select_all/__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/.gitignore | 0 .../configs/dictionaries/source.tsv | 0 .../configs/users.xml | 0 .../generate_dictionaries.py | 0 .../test_dictionaries_select_all/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/dictionaries/cache_xypairs.xml | 0 .../configs/dictionaries/executable.xml | 0 .../configs/dictionaries/file.txt | 0 .../configs/dictionaries/file.xml | 0 .../configs/dictionaries/slow.xml | 0 .../configs/users.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../dictionaries/cache_ints_dictionary.xml | 0 .../configs/users.xml | 0 .../test_default_reading.py | 0 .../test_dict_get.py | 0 .../test_dict_get_or_default.py | 0 .../__init__.py | 0 .../configs/config.d/clusters.xml | 0 .../configs/config.d/ddl.xml | 0 .../test_dictionary_ddl_on_cluster/test.py | 0 .../test_disk_access_storage/__init__.py | 0 .../configs/access_control_path.xml | 0 .../test_disk_access_storage/test.py | 0 .../test_distributed_ddl/__init__.py | 0 .../test_distributed_ddl/cluster.py | 0 .../configs/config.d/clusters.xml | 0 .../configs/config.d/ddl.xml | 0 .../configs/config.d/macro.xml | 0 .../configs/config.d/query_log.xml | 0 .../config.d/zookeeper_session_timeout.xml | 0 .../configs/users.d/query_log.xml | 0 .../configs/users.d/restricted_user.xml | 0 .../configs_secure/config.d/clusters.xml | 0 .../configs_secure/config.d/ddl.xml | 0 .../configs_secure/config.d/macro.xml | 0 .../configs_secure/config.d/query_log.xml | 0 .../configs_secure/config.d/ssl_conf.xml | 0 .../config.d/zookeeper_session_timeout.xml | 0 .../configs_secure/dhparam.pem | 0 .../configs_secure/server.crt | 0 .../configs_secure/server.key | 0 .../configs_secure/users.d/query_log.xml | 0 .../users.d/restricted_user.xml | 0 .../integration/test_distributed_ddl/test.py | 0 .../test_replicated_alter.py | 0 .../test_distributed_ddl_password/__init__.py | 0 .../configs/config.d/clusters.xml | 0 .../configs/users.d/default_with_password.xml | 0 .../test_distributed_ddl_password/test.py | 0 .../test_distributed_format/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_distributed_format/test.py | 0 .../__init__.py | 0 .../configs/config.d/remote_servers.xml | 0 .../users.d/set_distributed_defaults.xml | 0 .../config.d/remote_servers.xml | 0 .../configs_secure/config.d/ssl_conf.xml | 0 .../configs_secure/dhparam.pem | 0 .../configs_secure/server.crt | 0 .../configs_secure/server.key | 0 .../users.d/set_distributed_defaults.xml | 0 .../test.py | 0 .../__init__.py | 0 .../config.d/storage_configuration.xml | 0 .../test.py | 0 .../test_distributed_system_query/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_distributed_system_query/test.py | 0 .../test_extreme_deduplication/__init__.py | 0 .../configs/conf.d/merge_tree.xml | 0 .../configs/conf.d/remote_servers.xml | 0 .../test_extreme_deduplication/test.py | 0 .../test_filesystem_layout/__init__.py | 0 .../test_filesystem_layout/test.py | 0 .../test_force_deduplication/__init__.py | 0 .../test_force_deduplication/test.py | 0 .../test_format_avro_confluent/__init__.py | 0 .../test_format_avro_confluent/test.py | 0 .../test_format_schema_on_server/__init__.py | 0 .../format_schemas/simple.proto | 0 .../test_format_schema_on_server/test.py | 0 .../test_globs_in_filepath/__init__.py | 0 .../test_globs_in_filepath/test.py | 0 .../test_grant_and_revoke/__init__.py | 0 .../test_grant_and_revoke/configs/users.xml | 0 .../integration/test_grant_and_revoke/test.py | 0 .../test_graphite_merge_tree/__init__.py | 0 .../configs/graphite_rollup.xml | 0 .../test_graphite_merge_tree/test.py | 0 ...test_multiple_paths_and_versions.reference | 0 .../test_host_ip_change/__init__.py | 0 .../configs/dns_update_long.xml | 0 .../configs/dns_update_short.xml | 0 .../configs/listen_host.xml | 0 .../configs/remote_servers.xml | 0 .../integration/test_host_ip_change/test.py | 0 .../test_http_and_readonly/__init__.py | 0 .../test_http_and_readonly/test.py | 0 .../test_https_replication/__init__.py | 0 .../test_https_replication/configs/config.xml | 0 .../configs/no_ssl_conf.xml | 0 .../configs/remote_servers.xml | 0 .../test_https_replication/configs/server.crt | 0 .../test_https_replication/configs/server.key | 0 .../configs/ssl_conf.xml | 0 .../test_https_replication/test.py | 0 .../__init__.py | 0 .../configs/combined_profile.xml | 0 .../test_inherit_multiple_profiles/test.py | 0 .../test_insert_into_distributed/__init__.py | 0 .../enable_distributed_inserts_batching.xml | 0 .../configs/forbid_background_merges.xml | 0 .../configs/remote_servers.xml | 0 .../test_insert_into_distributed/test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../__init__.py | 0 .../enable_distributed_inserts_batching.xml | 0 .../configs/forbid_background_merges.xml | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../test_log_family_s3/__init__.py | 0 .../configs/config.d/log_conf.xml | 0 .../test_log_family_s3/configs/config.xml | 0 .../test_log_family_s3/configs/users.xml | 0 .../integration/test_log_family_s3/test.py | 0 .../integration/test_logs_level/__init__.py | 0 .../configs/config_information.xml | 0 .../integration/test_logs_level/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/users.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/log_conf.xml | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_merge_table_over_distributed/test.py | 0 .../test_merge_tree_s3/__init__.py | 0 .../config.d/bg_processing_pool_conf.xml | 0 .../configs/config.d/log_conf.xml | 0 .../configs/config.d/storage_conf.xml | 0 .../configs/config.d/users.xml | 0 .../test_merge_tree_s3/configs/config.xml | 0 .../integration/test_merge_tree_s3/test.py | 0 .../test_multiple_disks/__init__.py | 0 .../configs/config.d/cluster.xml | 0 .../config.d/storage_configuration.xml | 0 .../configs/logs_config.xml | 0 .../integration/test_multiple_disks/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/users.xml | 0 .../test_mutations_with_merge_tree/test.py | 0 .../test_mysql_database_engine/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_mysql_database_engine/test.py | 0 .../test_mysql_protocol/__init__.py | 0 .../clients/golang/0.reference | 0 .../clients/golang/Dockerfile | 0 .../clients/golang/docker_compose.yml | 0 .../clients/golang/main.go | 0 .../clients/mysql/docker_compose.yml | 0 .../clients/mysqljs/Dockerfile | 0 .../clients/mysqljs/docker_compose.yml | 0 .../clients/mysqljs/test.js | 0 .../clients/php-mysqlnd/Dockerfile | 0 .../clients/php-mysqlnd/client.crt | 0 .../clients/php-mysqlnd/client.key | 0 .../clients/php-mysqlnd/docker_compose.yml | 0 .../clients/php-mysqlnd/test.php | 0 .../clients/php-mysqlnd/test_ssl.php | 0 .../test_mysql_protocol/configs/config.xml | 0 .../test_mysql_protocol/configs/dhparam.pem | 0 .../test_mysql_protocol/configs/server.crt | 0 .../test_mysql_protocol/configs/server.key | 0 .../test_mysql_protocol/configs/users.xml | 0 .../integration/test_mysql_protocol/test.py | 0 .../test_non_default_compression/__init__.py | 0 .../configs/custom_compression_by_default.xml | 0 .../configs/enable_uncompressed_cache.xml | 0 .../configs/lz4hc_compression_by_default.xml | 0 .../configs/zstd_compression_by_default.xml | 0 .../test_non_default_compression/test.py | 0 .../test_odbc_interaction/__init__.py | 0 .../test_odbc_interaction/configs/config.xml | 0 .../postgres_odbc_hashed_dictionary.xml | 0 .../sqlite3_odbc_cached_dictionary.xml | 0 .../sqlite3_odbc_hashed_dictionary.xml | 0 .../test_odbc_interaction/configs/users.xml | 0 .../integration/test_odbc_interaction/test.py | 0 .../integration/test_old_versions/__init__.py | 0 .../configs/config.d/test_cluster.xml | 0 .../integration/test_old_versions/test.py | 0 .../test_part_log_table/__init__.py | 0 .../config_with_non_standard_part_log.xml | 0 .../configs/config_with_standard_part_log.xml | 0 .../integration/test_part_log_table/test.py | 0 .../integration/test_partition/__init__.py | 0 .../integration/test_partition/test.py | 0 .../test_parts_delete_zookeeper/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_parts_delete_zookeeper/test.py | 0 .../test_polymorphic_parts/__init__.py | 0 .../configs/compact_parts.xml | 0 .../configs/no_leader.xml | 0 .../configs/users.d/not_optimize_count.xml | 0 .../test_polymorphic_parts/test.py | 0 .../test_prometheus_endpoint/__init__.py | 0 .../configs/prom_conf.xml | 0 .../test_prometheus_endpoint/test.py | 0 .../integration/test_quota/__init__.py | 0 .../test_quota/configs/users.d/quota.xml | 0 .../integration/test_quota/configs/users.xml | 0 .../integration/test_quota/no_quotas.xml | 0 .../integration/test_quota/normal_limits.xml | 0 .../integration/test_quota/simpliest.xml | 0 .../integration/test_quota/test.py | 0 .../integration/test_quota/tiny_limits.xml | 0 .../integration/test_quota/tracking.xml | 0 .../integration/test_quota/two_intervals.xml | 0 .../integration/test_quota/two_quotas.xml | 0 .../configs/conf.d/merge_tree.xml | 0 .../configs/conf.d/remote_servers.xml | 0 .../integration/test_random_inserts/test.py | 0 .../integration/test_random_inserts/test.sh | 0 .../__init__.py | 0 .../test.py | 0 .../test_recovery_replica/__init__.py | 0 .../configs/remote_servers.xml | 0 .../integration/test_recovery_replica/test.py | 0 .../test_redirect_url_storage/__init__.py | 0 .../test_redirect_url_storage/test.py | 0 .../test_relative_filepath/__init__.py | 0 .../test_relative_filepath/configs/config.xml | 0 .../test_relative_filepath/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/users.xml | 0 .../test.py | 0 .../__init__.py | 0 .../configs/config.d/cluster.xml | 0 .../config.d/storage_configuration.xml | 0 .../configs/logs_config.xml | 0 .../test.py | 0 .../test_remote_prewhere/__init__.py | 0 .../test_remote_prewhere/configs/log_conf.xml | 0 .../integration/test_remote_prewhere/test.py | 0 .../test_replace_partition/__init__.py | 0 .../configs/remote_servers.xml | 0 .../test_replace_partition/test.py | 0 .../__init__.py | 0 .../configs/notleader.xml | 0 .../configs/notleaderignorecase.xml | 0 .../test_replica_can_become_leader/test.py | 0 .../test_replicated_mutations/__init__.py | 0 .../configs/merge_tree.xml | 0 .../configs/merge_tree_max_parts.xml | 0 .../test_replicated_mutations/test.py | 0 .../test_replicating_constants/__init__.py | 0 .../test_replicating_constants/test.py | 0 .../test_replication_credentials/__init__.py | 0 .../configs/credentials1.xml | 0 .../configs/credentials2.xml | 0 .../configs/no_credentials.xml | 0 .../configs/remote_servers.xml | 0 .../test_replication_credentials/test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../integration/test_row_policy/__init__.py | 0 .../integration/test_row_policy/all_rows.xml | 0 .../configs/config.d/remote_servers.xml | 0 .../configs/users.d/row_policy.xml | 0 .../test_row_policy/configs/users.xml | 0 .../multiple_tags_with_table_names.xml | 0 .../test_row_policy/no_filters.xml | 0 .../integration/test_row_policy/no_rows.xml | 0 .../test_row_policy/normal_filters.xml | 0 .../test_row_policy/tag_with_table_name.xml | 0 .../integration/test_row_policy/test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../configs/user_good_allowed.xml | 0 .../configs/user_good_restricted.xml | 0 .../test.py | 0 .../test_server_initialization/__init__.py | 0 .../data/default/should_be_restored/data.CSV | 0 .../default/should_be_dropped.sql.tmp_drop | 0 .../default/should_be_restored.sql.tmp_drop | 0 .../default/sophisticated_default.sql | 0 .../clickhouse_path_fail/metadata/default.sql | 0 .../test_server_initialization/test.py | 0 .../test_settings_constraints/__init__.py | 0 .../configs/users.xml | 0 .../test_settings_constraints/test.py | 0 .../__init__.py | 0 .../configs/remote_servers.xml | 0 .../test.py | 0 .../test_settings_profile/__init__.py | 0 .../integration/test_settings_profile/test.py | 0 .../integration/test_storage_hdfs/__init__.py | 0 .../test_storage_hdfs/configs/log_conf.xml | 0 .../integration/test_storage_hdfs/test.py | 0 .../test_storage_kafka/__init__.py | 0 .../format_schemas/kafka.proto | 0 .../test_storage_kafka/configs/kafka.xml | 0 .../test_storage_kafka/configs/log_conf.xml | 0 .../test_storage_kafka/configs/users.xml | 0 .../test_storage_kafka/kafka_pb2.py | 0 .../integration/test_storage_kafka/test.py | 0 .../test_kafka_json.reference | 0 .../test_kafka_virtual1.reference | 0 .../test_kafka_virtual2.reference | 0 .../test_storage_mysql/__init__.py | 0 .../configs/remote_servers.xml | 0 .../integration/test_storage_mysql/test.py | 0 .../integration/test_storage_s3/__init__.py | 0 .../config_for_test_remote_host_filter.xml | 0 .../integration/test_storage_s3/test.py | 0 .../test_system_merges/__init__.py | 0 .../configs/config.d/cluster.xml | 0 .../configs/logs_config.xml | 0 .../integration/test_system_merges/test.py | 0 .../test_system_queries/__init__.py | 0 .../configs/config.d/clusters_config.xml | 0 .../configs/config.d/dictionaries_config.xml | 0 .../configs/config.d/query_log.xml | 0 .../dictionary_clickhouse_cache.xml | 0 .../dictionary_clickhouse_flat.xml | 0 .../test_system_queries/configs/users.xml | 0 .../integration/test_system_queries/test.py | 0 .../test_text_log_level/__init__.py | 0 .../configs/config.d/text_log.xml | 0 .../integration/test_text_log_level/test.py | 0 .../test_timezone_config/__init__.py | 0 .../test_timezone_config/configs/config.xml | 0 .../integration/test_timezone_config/test.py | 0 .../integration/test_tmp_policy/__init__.py | 0 .../config.d/storage_configuration.xml | 0 .../integration/test_tmp_policy/test.py | 0 .../integration/test_ttl_move/__init__.py | 0 .../configs/config.d/cluster.xml | 0 .../configs/config.d/instant_moves.xml | 0 .../config.d/storage_configuration.xml | 0 .../test_ttl_move/configs/logs_config.xml | 0 .../integration/test_ttl_move/test.py | 0 .../test_ttl_replicated/__init__.py | 0 .../integration/test_ttl_replicated/test.py | 0 .../integration/test_union_header/__init__.py | 0 .../configs/remote_servers.xml | 0 .../integration/test_union_header/test.py | 0 .../test_user_ip_restrictions/__init__.py | 0 .../configs/config_ipv6.xml | 0 .../configs/users_ipv4.xml | 0 .../configs/users_ipv6.xml | 0 .../test_user_ip_restrictions/test.py | 0 .../__init__.py | 0 .../configs/config.xml | 0 .../configs/users.xml | 0 .../test_user_zero_database_access.py | 0 .../__init__.py | 0 .../test.py | 0 .../test_zookeeper_config/__init__.py | 0 .../configs/remote_servers.xml | 0 .../configs/zookeeper_config_root_a.xml | 0 .../configs/zookeeper_config_root_b.xml | 0 .../zookeeper_config_with_password.xml | 0 .../integration/test_zookeeper_config/test.py | 0 {dbms/tests => tests}/ints_dictionary.xml | 0 {dbms/tests => tests}/msan_suppressions.txt | 0 .../accurate_comparisons.sh | 0 .../perf_drafts/vert_merge/add_id_to_csv | 0 .../perf_drafts/vert_merge/ontime.struct | 0 .../perf_drafts/vert_merge/test_merges | 0 .../vert_merge/wait_clickhouse_server | 0 {dbms/tests => tests}/performance/IPv4.xml | 0 {dbms/tests => tests}/performance/IPv6.xml | 0 {dbms/tests => tests}/performance/README.md | 0 .../performance/agg_functions_min_max_any.xml | 0 .../performance/analyze_array_tuples.xml | 0 .../performance/and_function.xml | 0 .../performance/arithmetic.xml | 0 .../tests => tests}/performance/array_auc.xml | 0 .../performance/array_element.xml | 0 .../performance/array_fill.xml | 0 .../performance/array_join.xml | 0 .../performance/array_reduce.xml | 0 {dbms/tests => tests}/performance/base64.xml | 0 .../performance/base64_hits.xml | 0 .../tests => tests}/performance/basename.xml | 0 .../tests => tests}/performance/bitCount.xml | 0 .../bit_operations_fixed_string.xml | 0 .../bit_operations_fixed_string_numbers.xml | 0 .../performance/bloom_filter.xml | 0 .../performance/bounding_ratio.xml | 0 {dbms/tests => tests}/performance/cidr.xml | 0 .../performance/codecs_float_insert.xml | 0 .../performance/codecs_float_select.xml | 0 .../performance/codecs_int_insert.xml | 0 .../performance/codecs_int_select.xml | 0 .../performance/collations.xml | 0 .../performance/column_column_comparison.xml | 0 .../performance/columns_hashing.xml | 0 .../performance/complex_array_creation.xml | 0 .../performance/concat_hits.xml | 0 .../performance/conditional.xml | 0 .../performance/consistent_hashes.xml | 0 .../constant_column_comparison.xml | 0 .../performance/constant_column_search.xml | 0 {dbms/tests => tests}/performance/count.xml | 0 .../performance/cpu_synthetic.xml | 0 .../performance/create_benchmark_page.py | 0 .../performance/cryptographic_hashes.xml | 0 .../performance/date_parsing.xml | 0 .../tests => tests}/performance/date_time.xml | 0 .../performance/date_time_64.xml | 0 .../performance/decimal_aggregates.xml | 0 .../performance/early_constant_folding.xml | 0 .../empty_string_deserialization.xml | 0 .../empty_string_serialization.xml | 0 {dbms/tests => tests}/performance/entropy.xml | 0 .../first_significant_subdomain.xml | 0 .../performance/fixed_string16.xml | 0 .../performance/float_formatting.xml | 0 .../performance/float_parsing.xml | 0 .../performance/format_date_time.xml | 0 .../performance/functions_coding.xml | 0 .../performance/functions_geo.xml | 0 .../performance/general_purpose_hashes.xml | 0 .../general_purpose_hashes_on_UUID.xml | 0 .../performance/generate_table_function.xml | 0 .../performance/great_circle_dist.xml | 0 .../performance/group_array_moving_sum.xml | 0 {dbms/tests => tests}/performance/h3.xml | 0 .../performance/if_array_num.xml | 0 .../performance/if_array_string.xml | 0 .../performance/if_string_const.xml | 0 .../performance/if_string_hits.xml | 0 .../performance/if_to_multiif.xml | 0 .../performance/information_value.xml | 0 .../insert_values_with_expressions.xml | 0 .../inserts_arrays_lowcardinality.xml | 0 .../performance/int_parsing.xml | 0 .../performance/jit_large_requests.xml | 0 .../performance/jit_small_requests.xml | 0 .../performance/joins_in_memory.xml | 0 .../performance/joins_in_memory_pmj.xml | 0 .../performance/json_extract_rapidjson.xml | 0 .../performance/json_extract_simdjson.xml | 0 {dbms/tests => tests}/performance/leftpad.xml | 0 .../performance/linear_regression.xml | 0 .../performance/logical_functions_large.xml | 0 .../performance/logical_functions_medium.xml | 0 .../performance/logical_functions_small.xml | 0 {dbms/tests => tests}/performance/math.xml | 0 .../performance/merge_table_streams.xml | 0 .../performance/merge_tree_huge_pk.xml | 0 .../merge_tree_many_partitions.xml | 0 .../merge_tree_many_partitions_2.xml | 0 .../performance/merge_tree_simple_select.xml | 0 .../performance/mingroupby-orderbylimit1.xml | 0 {dbms/tests => tests}/performance/modulo.xml | 0 .../performance/ngram_distance.xml | 0 .../performance/number_formatting_formats.xml | 0 .../tests => tests}/performance/nyc_taxi.xml | 0 .../performance/order_by_decimals.xml | 0 .../performance/order_by_read_in_order.xml | 0 .../performance/order_by_single_column.xml | 0 .../performance/parallel_insert.xml | 0 .../performance/parse_engine_file.xml | 0 .../performance/pre_limit_no_sorting.xml | 0 .../tests => tests}/performance/prewhere.xml | 0 .../performance/random_printable_ascii.xml | 0 {dbms/tests => tests}/performance/range.xml | 0 .../performance/read_hits_with_aio.xml | 0 {dbms/tests => tests}/performance/right.xml | 0 .../performance/round_down.xml | 0 .../performance/round_methods.xml | 0 {dbms/tests => tests}/performance/scalar.xml | 0 .../performance/select_format.xml | 0 {dbms/tests => tests}/performance/set.xml | 0 .../tests => tests}/performance/set_hits.xml | 0 .../tests => tests}/performance/set_index.xml | 0 .../performance/simple_join_query.xml | 0 .../performance/slices_hits.xml | 0 {dbms/tests => tests}/performance/sort.xml | 0 .../performance/string_join.xml | 0 .../performance/string_set.xml | 0 .../performance/string_sort.xml | 0 {dbms/tests => tests}/performance/sum_map.xml | 0 .../synthetic_hardware_benchmark.xml | 0 .../performance/trim_numbers.xml | 0 .../tests => tests}/performance/trim_urls.xml | 0 .../performance/trim_whitespace.xml | 0 {dbms/tests => tests}/performance/uniq.xml | 0 .../tests => tests}/performance/url_hits.xml | 0 .../vectorize_aggregation_combinators.xml | 0 .../performance/visit_param_extract_raw.xml | 0 {dbms/tests => tests}/performance/website.xml | 0 {dbms/tests => tests}/queries/.gitignore | 0 .../0_stateless/00001_select_1.reference | 0 .../queries/0_stateless/00001_select_1.sql | 0 .../00002_system_numbers.reference | 0 .../0_stateless/00002_system_numbers.sql | 0 .../00003_reinterpret_as_string.reference | 0 .../00003_reinterpret_as_string.sql | 0 ...hard_format_ast_and_remote_table.reference | 0 ...0004_shard_format_ast_and_remote_table.sql | 0 ...rmat_ast_and_remote_table_lambda.reference | 0 ...ard_format_ast_and_remote_table_lambda.sql | 0 ...00006_extremes_and_subquery_from.reference | 0 .../00006_extremes_and_subquery_from.sql | 0 .../queries/0_stateless/00007_array.reference | 0 .../queries/0_stateless/00007_array.sql | 0 .../0_stateless/00008_array_join.reference | 0 .../queries/0_stateless/00008_array_join.sql | 0 .../00009_array_join_subquery.reference | 0 .../0_stateless/00009_array_join_subquery.sql | 0 .../00010_big_array_join.reference | 0 .../0_stateless/00010_big_array_join.sql | 0 .../00011_array_join_alias.reference | 0 .../0_stateless/00011_array_join_alias.sql | 0 .../00012_array_join_alias_2.reference | 0 .../0_stateless/00012_array_join_alias_2.sql | 0 .../00013_create_table_with_arrays.reference | 0 .../00013_create_table_with_arrays.sql | 0 ...14_select_from_table_with_nested.reference | 0 .../00014_select_from_table_with_nested.sql | 0 .../00015_totals_having_constants.reference | 0 .../00015_totals_having_constants.sql | 0 .../00016_totals_having_constants.reference | 0 .../00016_totals_having_constants.sql | 0 ...17_in_subquery_with_empty_result.reference | 0 .../00017_in_subquery_with_empty_result.sql | 0 .../00018_distinct_in_subquery.reference | 0 .../00018_distinct_in_subquery.sql | 0 ...ard_quantiles_totals_distributed.reference | 0 ...019_shard_quantiles_totals_distributed.sql | 0 .../00020_sorting_arrays.reference | 0 .../0_stateless/00020_sorting_arrays.sql | 0 .../00021_sorting_arrays.reference | 0 .../0_stateless/00021_sorting_arrays.sql | 0 ..._func_higher_order_and_constants.reference | 0 .../00022_func_higher_order_and_constants.sql | 0 .../00023_agg_select_agg_subquery.reference | 0 .../00023_agg_select_agg_subquery.sql | 0 ...24_unused_array_join_in_subquery.reference | 0 .../00024_unused_array_join_in_subquery.sql | 0 ..._implicitly_used_subquery_column.reference | 0 .../00025_implicitly_used_subquery_column.sql | 0 ...0026_shard_something_distributed.reference | 0 .../00026_shard_something_distributed.sql | 0 .../00027_distinct_and_order_by.reference | 0 .../00027_distinct_and_order_by.sql | 0 .../00027_simple_argMinArray.reference | 0 .../0_stateless/00027_simple_argMinArray.sql | 0 ...028_shard_big_agg_aj_distributed.reference | 0 .../00028_shard_big_agg_aj_distributed.sql | 0 ...est_zookeeper_optimize_exception.reference | 0 ...00029_test_zookeeper_optimize_exception.sh | 0 .../0_stateless/00030_alter_table.reference | 0 .../queries/0_stateless/00030_alter_table.sql | 0 .../0_stateless/00031_parser_number.reference | 0 .../0_stateless/00031_parser_number.sql | 0 .../00032_fixed_string_to_string.reference | 0 .../00032_fixed_string_to_string.sql | 0 .../00033_fixed_string_to_string.reference | 0 .../00033_fixed_string_to_string.sql | 0 .../00034_fixed_string_to_number.reference | 0 .../00034_fixed_string_to_number.sql | 0 ...00035_function_array_return_type.reference | 0 .../00035_function_array_return_type.sql | 0 .../0_stateless/00036_array_element.reference | 0 .../0_stateless/00036_array_element.sql | 0 .../0_stateless/00037_totals_limit.reference | 0 .../0_stateless/00037_totals_limit.sql | 0 .../0_stateless/00038_totals_limit.reference | 0 .../0_stateless/00038_totals_limit.sql | 0 .../00039_inserts_through_http.reference | 0 .../0_stateless/00039_inserts_through_http.sh | 0 .../00040_array_enumerate_uniq.reference | 0 .../00040_array_enumerate_uniq.sql | 0 .../00041_aggregation_remap.reference | 0 .../0_stateless/00041_aggregation_remap.sql | 0 .../00041_big_array_join.reference | 0 .../0_stateless/00041_big_array_join.sql | 0 .../queries/0_stateless/00042_set.reference | 0 .../queries/0_stateless/00042_set.sql | 0 .../00043_summing_empty_part.reference | 0 .../0_stateless/00043_summing_empty_part.sql | 0 ...044_sorting_by_string_descending.reference | 0 .../00044_sorting_by_string_descending.sql | 0 ...rting_by_fixed_string_descending.reference | 0 ...045_sorting_by_fixed_string_descending.sql | 0 .../00046_stored_aggregates_simple.reference | 0 .../00046_stored_aggregates_simple.sql | 0 .../00047_stored_aggregates_complex.reference | 0 .../00047_stored_aggregates_complex.sql | 0 .../00048_a_stored_aggregates_merge.reference | 0 .../00048_a_stored_aggregates_merge.sql | 0 .../00048_b_stored_aggregates_merge.reference | 0 .../00048_b_stored_aggregates_merge.sql | 0 .../0_stateless/00049_any_left_join.reference | 0 .../0_stateless/00049_any_left_join.sql | 0 .../0_stateless/00050_any_left_join.reference | 0 .../0_stateless/00050_any_left_join.sql | 0 .../00051_any_inner_join.reference | 0 .../0_stateless/00051_any_inner_join.sql | 0 .../0_stateless/00052_all_left_join.reference | 0 .../0_stateless/00052_all_left_join.sql | 0 .../00053_all_inner_join.reference | 0 .../0_stateless/00053_all_inner_join.sql | 0 .../0_stateless/00054_join_string.reference | 0 .../queries/0_stateless/00054_join_string.sql | 0 .../00055_join_two_numbers.reference | 0 .../0_stateless/00055_join_two_numbers.sql | 0 .../00056_join_number_string.reference | 0 .../0_stateless/00056_join_number_string.sql | 0 .../0_stateless/00057_join_aliases.reference | 0 .../0_stateless/00057_join_aliases.sql | 0 .../00059_shard_global_in.reference | 0 .../0_stateless/00059_shard_global_in.sql | 0 .../0_stateless/00060_date_lut.reference | 0 .../queries/0_stateless/00060_date_lut.sql | 0 .../00061_merge_tree_alter.reference | 0 .../0_stateless/00061_merge_tree_alter.sql | 0 ...cated_merge_tree_alter_zookeeper.reference | 0 ..._replicated_merge_tree_alter_zookeeper.sql | 0 .../0_stateless/00063_check_query.reference | 0 .../queries/0_stateless/00063_check_query.sql | 0 .../0_stateless/00064_negate_bug.reference | 0 .../queries/0_stateless/00064_negate_bug.sql | 0 ..._shard_float_literals_formatting.reference | 0 .../00065_shard_float_literals_formatting.sql | 0 .../0_stateless/00066_group_by_in.reference | 0 .../queries/0_stateless/00066_group_by_in.sql | 0 .../00067_replicate_segfault.reference | 0 .../0_stateless/00067_replicate_segfault.sql | 0 .../00068_empty_tiny_log.reference | 0 .../0_stateless/00068_empty_tiny_log.sql | 0 .../00069_date_arithmetic.reference | 0 .../0_stateless/00069_date_arithmetic.sql | 0 .../00070_insert_fewer_columns_http.reference | 0 .../00070_insert_fewer_columns_http.sh | 0 .../00071_insert_fewer_columns.reference | 0 .../00071_insert_fewer_columns.sql | 0 .../0_stateless/00072_in_types.reference | 0 .../queries/0_stateless/00072_in_types.sql | 0 ...merge_sorting_empty_array_joined.reference | 0 ...00073_merge_sorting_empty_array_joined.sql | 0 ...tting_negate_of_negative_literal.reference | 0 ..._formatting_negate_of_negative_literal.sql | 0 .../00076_ip_coding_functions.reference | 0 .../0_stateless/00076_ip_coding_functions.sql | 0 ...et_keys_fit_128_bits_many_blocks.reference | 0 ...0077_set_keys_fit_128_bits_many_blocks.sql | 0 .../0_stateless/00078_string_concat.reference | 0 .../0_stateless/00078_string_concat.sql | 0 .../00079_defaulted_columns.reference | 0 .../0_stateless/00079_defaulted_columns.sql | 0 ...80_show_tables_and_system_tables.reference | 0 .../00080_show_tables_and_system_tables.sql | 0 .../00081_int_div_or_zero.reference | 0 .../0_stateless/00081_int_div_or_zero.sql | 0 ...2_append_trailing_char_if_absent.reference | 0 .../00082_append_trailing_char_if_absent.sql | 0 ...0083_create_merge_tree_zookeeper.reference | 0 .../00083_create_merge_tree_zookeeper.sql | 0 .../00084_summing_merge_tree.reference | 0 .../0_stateless/00084_summing_merge_tree.sql | 0 ..._visible_width_of_tuple_of_dates.reference | 0 .../00085_visible_width_of_tuple_of_dates.sql | 0 ...ary_const_with_nonconst_segfault.reference | 0 ...ncat_nary_const_with_nonconst_segfault.sql | 0 .../00087_distinct_of_empty_arrays.reference | 0 .../00087_distinct_of_empty_arrays.sql | 0 .../00087_math_functions.reference | 0 .../0_stateless/00087_math_functions.sql | 0 ...88_distinct_of_arrays_of_strings.reference | 0 .../00088_distinct_of_arrays_of_strings.sql | 0 .../00089_group_by_arrays_of_fixed.reference | 0 .../00089_group_by_arrays_of_fixed.sql | 0 .../00090_union_race_conditions_1.reference | 0 .../00090_union_race_conditions_1.sh | 0 .../00091_union_race_conditions_2.reference | 0 .../00091_union_race_conditions_2.sh | 0 .../00092_union_race_conditions_3.reference | 0 .../00092_union_race_conditions_3.sh | 0 .../00093_union_race_conditions_4.reference | 0 .../00093_union_race_conditions_4.sh | 0 .../00094_union_race_conditions_5.reference | 0 .../00094_union_race_conditions_5.sh | 0 .../00096_aggregation_min_if.reference | 0 .../0_stateless/00096_aggregation_min_if.sql | 0 ...ng_storage_buffer_race_condition.reference | 0 ...0097_long_storage_buffer_race_condition.sh | 0 ...storage_buffer_race_condition_mt.reference | 0 ...7_long_storage_buffer_race_condition_mt.sh | 0 .../0_stateless/00098_1_union_all.reference | 0 .../queries/0_stateless/00098_1_union_all.sql | 0 .../0_stateless/00098_2_union_all.reference | 0 .../queries/0_stateless/00098_2_union_all.sql | 0 .../0_stateless/00098_3_union_all.reference | 0 .../queries/0_stateless/00098_3_union_all.sql | 0 .../0_stateless/00098_4_union_all.reference | 0 .../queries/0_stateless/00098_4_union_all.sql | 0 .../0_stateless/00098_5_union_all.reference | 0 .../queries/0_stateless/00098_5_union_all.sql | 0 .../0_stateless/00098_6_union_all.reference | 0 .../queries/0_stateless/00098_6_union_all.sql | 0 .../0_stateless/00098_7_union_all.reference | 0 .../queries/0_stateless/00098_7_union_all.sql | 0 .../0_stateless/00098_8_union_all.reference | 0 .../queries/0_stateless/00098_8_union_all.sql | 0 .../0_stateless/00098_9_union_all.reference | 0 .../queries/0_stateless/00098_9_union_all.sql | 0 .../0_stateless/00098_a_union_all.reference | 0 .../queries/0_stateless/00098_a_union_all.sql | 0 .../0_stateless/00098_b_union_all.reference | 0 .../queries/0_stateless/00098_b_union_all.sql | 0 .../0_stateless/00098_c_union_all.reference | 0 .../queries/0_stateless/00098_c_union_all.sql | 0 .../0_stateless/00098_d_union_all.reference | 0 .../queries/0_stateless/00098_d_union_all.sql | 0 .../0_stateless/00098_e_union_all.reference | 0 .../queries/0_stateless/00098_e_union_all.sql | 0 .../0_stateless/00098_f_union_all.reference | 0 .../queries/0_stateless/00098_f_union_all.sql | 0 .../0_stateless/00098_g_union_all.reference | 0 .../queries/0_stateless/00098_g_union_all.sql | 0 .../0_stateless/00098_h_union_all.reference | 0 .../queries/0_stateless/00098_h_union_all.sql | 0 .../0_stateless/00098_j_union_all.reference | 0 .../queries/0_stateless/00098_j_union_all.sql | 0 .../0_stateless/00098_k_union_all.reference | 0 .../queries/0_stateless/00098_k_union_all.sql | 0 .../0_stateless/00098_l_union_all.reference | 0 .../queries/0_stateless/00098_l_union_all.sql | 0 .../00098_shard_i_union_all.reference | 0 .../0_stateless/00098_shard_i_union_all.sql | 0 .../00099_join_many_blocks_segfault.reference | 0 .../00099_join_many_blocks_segfault.sql | 0 .../00100_subquery_table_identifier.reference | 0 .../00100_subquery_table_identifier.sh | 0 ...insert_without_explicit_database.reference | 0 ...s_and_insert_without_explicit_database.sql | 0 ...0102_insert_into_temporary_table.reference | 0 .../00102_insert_into_temporary_table.sql | 0 ...00103_ipv4_num_to_string_class_c.reference | 0 .../00103_ipv4_num_to_string_class_c.sql | 0 .../00104_totals_having_mode.reference | 0 .../0_stateless/00104_totals_having_mode.sql | 0 .../00105_shard_collations.reference | 0 .../0_stateless/00105_shard_collations.sql | 0 .../00106_totals_after_having.reference | 0 .../0_stateless/00106_totals_after_having.sql | 0 .../00107_totals_after_having.reference | 0 .../0_stateless/00107_totals_after_having.sql | 0 .../00108_shard_totals_after_having.reference | 0 .../00108_shard_totals_after_having.sql | 0 .../00109_shard_totals_after_having.reference | 0 .../00109_shard_totals_after_having.sql | 0 .../0_stateless/00110_external_sort.reference | 0 .../0_stateless/00110_external_sort.sql | 0 ..._shard_external_sort_distributed.reference | 0 .../00111_shard_external_sort_distributed.sql | 0 .../00112_shard_totals_after_having.reference | 0 .../00112_shard_totals_after_having.sql | 0 .../00113_shard_group_array.reference | 0 .../0_stateless/00113_shard_group_array.sql | 0 ...14_float_type_result_of_division.reference | 0 .../00114_float_type_result_of_division.sql | 0 ...00115_shard_in_incomplete_result.reference | 0 .../00115_shard_in_incomplete_result.sh | 0 .../0_stateless/00116_storage_set.reference | 0 .../queries/0_stateless/00116_storage_set.sql | 0 .../00117_parsing_arrays.reference | 0 .../0_stateless/00117_parsing_arrays.sql | 0 .../0_stateless/00118_storage_join.reference | 0 .../0_stateless/00118_storage_join.sql | 0 .../0_stateless/00119_storage_join.reference | 0 .../0_stateless/00119_storage_join.sql | 0 .../00120_join_and_group_by.reference | 0 .../0_stateless/00120_join_and_group_by.sql | 0 .../00121_drop_column_zookeeper.reference | 0 .../00121_drop_column_zookeeper.sql | 0 ...join_with_subquery_with_subquery.reference | 0 ...00122_join_with_subquery_with_subquery.sql | 0 ...x_distributed_connections_is_one.reference | 0 ...hen_max_distributed_connections_is_one.sql | 0 ...d_distributed_with_many_replicas.reference | 0 ...4_shard_distributed_with_many_replicas.sql | 0 ..._array_element_of_array_of_tuple.reference | 0 .../00125_array_element_of_array_of_tuple.sql | 0 .../0_stateless/00126_buffer.reference | 0 .../queries/0_stateless/00126_buffer.sql | 0 .../00127_group_by_concat.reference | 0 .../0_stateless/00127_group_by_concat.sql | 0 ...group_by_number_and_fixed_string.reference | 0 ...00128_group_by_number_and_fixed_string.sql | 0 .../00129_quantile_timing_weighted.reference | 0 .../00129_quantile_timing_weighted.sql | 0 .../0_stateless/00131_set_hashed.reference | 0 .../queries/0_stateless/00131_set_hashed.sql | 0 .../queries/0_stateless/00132_sets.reference | 0 .../queries/0_stateless/00132_sets.sql | 0 ...ory_tracker_and_exception_safety.reference | 0 ...ard_memory_tracker_and_exception_safety.sh | 0 ..._by_fixed_string_of_size_1_2_4_8.reference | 0 ...gation_by_fixed_string_of_size_1_2_4_8.sql | 0 ...duplicate_group_by_keys_segfault.reference | 0 ...00135_duplicate_group_by_keys_segfault.sql | 0 .../00136_duplicate_order_by_elems.reference | 0 .../00136_duplicate_order_by_elems.sql | 0 .../0_stateless/00137_in_constants.reference | 0 .../0_stateless/00137_in_constants.sql | 0 .../0_stateless/00138_table_aliases.reference | 0 .../0_stateless/00138_table_aliases.sql | 0 ...parse_unix_timestamp_as_datetime.reference | 0 ...00140_parse_unix_timestamp_as_datetime.sql | 0 .../00140_prewhere_column_order.reference | 0 .../00140_prewhere_column_order.sql | 0 ...0141_parse_timestamp_as_datetime.reference | 0 .../00141_parse_timestamp_as_datetime.sql | 0 ...0142_parse_timestamp_as_datetime.reference | 0 .../00142_parse_timestamp_as_datetime.sql | 0 ..._number_classification_functions.reference | 0 .../00143_number_classification_functions.sql | 0 .../0_stateless/00144_empty_regexp.reference | 0 .../0_stateless/00144_empty_regexp.sql | 0 .../0_stateless/00145_empty_likes.reference | 0 .../queries/0_stateless/00145_empty_likes.sql | 0 ...46_summing_merge_tree_nested_map.reference | 0 .../00146_summing_merge_tree_nested_map.sql | 0 .../00147_alter_nested_default.reference | 0 .../00147_alter_nested_default.sql | 0 ...ng_merge_tree_aggregate_function.reference | 0 ..._summing_merge_tree_aggregate_function.sql | 0 ..._tree_nested_map_multiple_values.reference | 0 ..._merge_tree_nested_map_multiple_values.sql | 0 .../00149_function_url_hash.reference | 0 .../0_stateless/00149_function_url_hash.sql | 0 .../00150_with_totals_and_join.reference | 0 .../00150_with_totals_and_join.sql | 0 .../00151_tuple_with_array.reference | 0 .../0_stateless/00151_tuple_with_array.sql | 0 .../00152_totals_in_subquery.reference | 0 .../0_stateless/00152_totals_in_subquery.sql | 0 .../0_stateless/00153_transform.reference | 0 .../queries/0_stateless/00153_transform.sql | 0 ..._shard_distributed_with_distinct.reference | 0 .../00154_shard_distributed_with_distinct.sql | 0 .../0_stateless/00155_long_merges.reference | 0 .../queries/0_stateless/00155_long_merges.sh | 0 .../00156_array_map_to_constant.reference | 0 .../00156_array_map_to_constant.sql | 0 ...ses_and_lambda_formal_parameters.reference | 0 ...7_aliases_and_lambda_formal_parameters.sql | 0 ...158_buffer_and_nonexistent_table.reference | 0 .../00158_buffer_and_nonexistent_table.sql | 0 ...00159_whitespace_in_columns_list.reference | 0 .../00159_whitespace_in_columns_list.sql | 0 .../00160_merge_and_index_in_in.reference | 0 .../00160_merge_and_index_in_in.sql | 0 .../00161_rounding_functions.reference | 0 .../0_stateless/00161_rounding_functions.sql | 0 .../00162_shard_global_join.reference | 0 .../0_stateless/00162_shard_global_join.sql | 0 ...0163_shard_join_with_empty_table.reference | 0 .../00163_shard_join_with_empty_table.sql | 0 .../0_stateless/00164_not_chain.reference | 0 .../queries/0_stateless/00164_not_chain.sql | 0 ...0165_transform_non_const_default.reference | 0 .../00165_transform_non_const_default.sql | 0 ..._functions_of_aggregation_states.reference | 0 .../00166_functions_of_aggregation_states.sql | 0 .../00167_settings_inside_query.reference | 0 .../00167_settings_inside_query.sql | 0 .../00168_buffer_defaults.reference | 0 .../0_stateless/00168_buffer_defaults.sql | 0 .../00169_join_constant_keys.reference | 0 .../0_stateless/00169_join_constant_keys.sql | 0 .../00170_lower_upper_utf8.reference | 0 .../0_stateless/00170_lower_upper_utf8.sql | 0 ...0171_shard_array_of_tuple_remote.reference | 0 .../00171_shard_array_of_tuple_remote.sql | 0 .../00172_constexprs_in_set.reference | 0 .../0_stateless/00172_constexprs_in_set.sql | 0 ...e_date_time_with_constant_string.reference | 0 ...compare_date_time_with_constant_string.sql | 0 ..._time_with_constant_string_in_in.reference | 0 ...e_date_time_with_constant_string_in_in.sql | 0 .../0_stateless/00175_if_num_arrays.reference | 0 .../0_stateless/00175_if_num_arrays.sql | 0 .../00176_if_string_arrays.reference | 0 .../0_stateless/00176_if_string_arrays.sql | 0 ...00177_inserts_through_http_parts.reference | 0 .../00177_inserts_through_http_parts.sh | 0 .../00178_function_replicate.reference | 0 .../0_stateless/00178_function_replicate.sql | 0 ...th_common_expressions_and_filter.reference | 0 ...das_with_common_expressions_and_filter.sql | 0 .../00180_attach_materialized_view.reference | 0 .../00180_attach_materialized_view.sql | 0 ...1_aggregate_functions_statistics.reference | 0 .../00181_aggregate_functions_statistics.sql | 0 ...gate_functions_statistics_stable.reference | 0 ..._aggregate_functions_statistics_stable.sql | 0 ...unctions_higher_order_and_consts.reference | 0 ...0182_functions_higher_order_and_consts.sql | 0 .../00183_skip_unavailable_shards.reference | 0 .../00183_skip_unavailable_shards.sql | 0 ...rd_distributed_group_by_no_merge.reference | 0 ...84_shard_distributed_group_by_no_merge.sql | 0 .../00185_array_literals.reference | 0 .../0_stateless/00185_array_literals.sql | 0 .../00186_very_long_arrays.reference | 0 .../0_stateless/00186_very_long_arrays.sh | 0 .../00187_like_regexp_prefix.reference | 0 .../0_stateless/00187_like_regexp_prefix.sql | 0 ...arguments_of_aggregate_functions.reference | 0 ...ts_as_arguments_of_aggregate_functions.sql | 0 .../0_stateless/00189_time_zones.reference | 0 .../queries/0_stateless/00189_time_zones.sql | 0 ..._constant_array_of_constant_data.reference | 0 ...90_non_constant_array_of_constant_data.sql | 0 ...aggregating_merge_tree_and_final.reference | 0 ...00191_aggregating_merge_tree_and_final.sql | 0 .../00192_least_greatest.reference | 0 .../0_stateless/00192_least_greatest.sql | 0 .../00193_parallel_replicas.reference | 0 .../0_stateless/00193_parallel_replicas.sql | 0 .../0_stateless/00194_identity.reference | 0 .../queries/0_stateless/00194_identity.sql | 0 ...95_shard_union_all_and_global_in.reference | 0 .../00195_shard_union_all_and_global_in.sql | 0 .../00196_float32_formatting.reference | 0 .../0_stateless/00196_float32_formatting.sql | 0 .../00197_if_fixed_string.reference | 0 .../0_stateless/00197_if_fixed_string.sql | 0 .../00198_group_by_empty_arrays.reference | 0 .../00198_group_by_empty_arrays.sql | 0 ...0199_ternary_operator_type_check.reference | 0 .../00199_ternary_operator_type_check.sql | 0 ...tinct_order_by_limit_distributed.reference | 0 ...rd_distinct_order_by_limit_distributed.sql | 0 .../0_stateless/00201_array_uniq.reference | 0 .../queries/0_stateless/00201_array_uniq.sql | 0 .../0_stateless/00202_cross_join.reference | 0 .../queries/0_stateless/00202_cross_join.sql | 0 .../0_stateless/00203_full_join.reference | 0 .../queries/0_stateless/00203_full_join.sql | 0 .../00204_extract_url_parameter.reference | 0 .../00204_extract_url_parameter.sql | 0 .../00205_scalar_subqueries.reference | 0 .../0_stateless/00205_scalar_subqueries.sql | 0 .../00206_empty_array_to_single.reference | 0 .../00206_empty_array_to_single.sql | 0 .../00207_left_array_join.reference | 0 .../0_stateless/00207_left_array_join.sql | 0 .../00208_agg_state_merge.reference | 0 .../0_stateless/00208_agg_state_merge.sql | 0 .../00209_insert_select_extremes.reference | 0 .../00209_insert_select_extremes.sql | 0 ...0210_insert_select_extremes_http.reference | 0 .../00210_insert_select_extremes_http.sh | 0 ...1_shard_query_formatting_aliases.reference | 0 .../00211_shard_query_formatting_aliases.sql | 0 ...12_shard_aggregate_function_uniq.reference | 0 .../00212_shard_aggregate_function_uniq.sql | 0 .../00213_multiple_global_in.reference | 0 .../0_stateless/00213_multiple_global_in.sql | 0 .../00214_primary_key_order.reference | 0 .../0_stateless/00214_primary_key_order.sql | 0 ...0215_primary_key_order_zookeeper.reference | 0 .../00215_primary_key_order_zookeeper.sql | 0 .../00216_bit_test_function_family.reference | 0 .../00216_bit_test_function_family.sql | 0 ..._subquery_columns_with_same_name.reference | 0 ...global_subquery_columns_with_same_name.sql | 0 .../00218_like_regexp_newline.reference | 0 .../0_stateless/00218_like_regexp_newline.sql | 0 ...219_full_right_join_column_order.reference | 0 .../00219_full_right_join_column_order.sql | 0 ...als_in_subquery_remote_and_limit.reference | 0 ...th_totals_in_subquery_remote_and_limit.sql | 0 ...quence_aggregate_function_family.reference | 0 ...222_sequence_aggregate_function_family.sql | 0 ...ted_aggregation_memory_efficient.reference | 0 ...stributed_aggregation_memory_efficient.sql | 0 ...n_memory_efficient_and_overflows.reference | 0 ...egation_memory_efficient_and_overflows.sql | 0 .../00225_join_duplicate_columns.reference | 0 .../00225_join_duplicate_columns.sql | 0 ...duplication_and_unexpected_parts.reference | 0 ...per_deduplication_and_unexpected_parts.sql | 0 ...quantiles_timing_arbitrary_order.reference | 0 ...00227_quantiles_timing_arbitrary_order.sql | 0 ...les_deterministic_merge_overflow.reference | 0 ...quantiles_deterministic_merge_overflow.sql | 0 .../00229_prewhere_column_missing.reference | 0 .../00229_prewhere_column_missing.sql | 0 ...al_index_of_non_const_second_arg.reference | 0 ...nt_equal_index_of_non_const_second_arg.sql | 0 .../00231_format_vertical_raw.reference | 0 .../0_stateless/00231_format_vertical_raw.sql | 0 .../00232_format_readable_size.reference | 0 .../00232_format_readable_size.sql | 0 .../00233_position_function_family.reference | 0 .../00233_position_function_family.sql | 0 ...ive_equality_chains_optimization.reference | 0 ...sjunctive_equality_chains_optimization.sql | 0 .../00235_create_temporary_table_as.reference | 0 .../00235_create_temporary_table_as.sql | 0 ...ted_drop_on_non_leader_zookeeper.reference | 0 ...eplicated_drop_on_non_leader_zookeeper.sql | 0 .../00237_group_by_arrays.reference | 0 .../0_stateless/00237_group_by_arrays.sql | 0 ...238_removal_of_temporary_columns.reference | 0 .../00238_removal_of_temporary_columns.sql | 0 .../00239_type_conversion_in_in.reference | 0 .../00239_type_conversion_in_in.sql | 0 .../00240_replace_substring_loop.reference | 0 .../00240_replace_substring_loop.sql | 0 .../00250_tuple_comparison.reference | 0 .../0_stateless/00250_tuple_comparison.sql | 0 .../0_stateless/00251_has_types.reference | 0 .../queries/0_stateless/00251_has_types.sql | 0 ...ard_global_in_aggregate_function.reference | 0 ...252_shard_global_in_aggregate_function.sql | 0 .../00253_insert_recursive_defaults.reference | 0 .../00253_insert_recursive_defaults.sql | 0 .../00254_tuple_extremes.reference | 0 .../0_stateless/00254_tuple_extremes.sql | 0 .../00255_array_concat_string.reference | 0 .../0_stateless/00255_array_concat_string.sql | 0 .../0_stateless/00256_reverse.reference | 0 .../queries/0_stateless/00256_reverse.sql | 0 ..._no_aggregates_and_constant_keys.reference | 0 ..._shard_no_aggregates_and_constant_keys.sql | 0 .../00258_materializing_tuples.reference | 0 .../00258_materializing_tuples.sql | 0 .../00259_hashing_tuples.reference | 0 .../0_stateless/00259_hashing_tuples.sql | 0 .../00260_like_and_curly_braces.reference | 0 .../00260_like_and_curly_braces.sql | 0 ...1_storage_aliases_and_array_join.reference | 0 .../00261_storage_aliases_and_array_join.sql | 0 .../0_stateless/00262_alter_alias.reference | 0 .../queries/0_stateless/00262_alter_alias.sql | 0 ...63_merge_aggregates_and_overflow.reference | 0 .../00263_merge_aggregates_and_overflow.sql | 0 .../00264_uniq_many_args.reference | 0 .../0_stateless/00264_uniq_many_args.sql | 0 ...ttp_content_type_format_timezone.reference | 0 ...00265_http_content_type_format_timezone.sh | 0 .../00266_read_overflow_mode.reference | 0 .../0_stateless/00266_read_overflow_mode.sql | 0 ...hard_global_subquery_and_aliases.reference | 0 ...0266_shard_global_subquery_and_aliases.sql | 0 ..._array_access_operators_priority.reference | 0 ..._tuple_array_access_operators_priority.sql | 0 ...00268_aliases_without_as_keyword.reference | 0 .../00268_aliases_without_as_keyword.sql | 0 .../00269_database_table_whitespace.reference | 0 .../00269_database_table_whitespace.sql | 0 ...270_views_query_processing_stage.reference | 0 .../00270_views_query_processing_stage.sql | 0 .../00271_agg_state_and_totals.reference | 0 .../00271_agg_state_and_totals.sql | 0 .../00272_union_all_and_in_subquery.reference | 0 .../00272_union_all_and_in_subquery.sql | 0 .../0_stateless/00273_quantiles.reference | 0 .../queries/0_stateless/00273_quantiles.sql | 0 .../00274_shard_group_array.reference | 0 .../0_stateless/00274_shard_group_array.sql | 0 .../00275_shard_quantiles_weighted.reference | 0 .../00275_shard_quantiles_weighted.sql | 0 .../0_stateless/00276_sample.reference | 0 .../queries/0_stateless/00276_sample.sql | 0 .../0_stateless/00277_array_filter.reference | 0 .../0_stateless/00277_array_filter.sql | 0 .../00278_insert_already_sorted.reference | 0 .../00278_insert_already_sorted.sql | 0 .../00279_quantiles_permuted_args.reference | 0 .../00279_quantiles_permuted_args.sql | 0 .../00280_hex_escape_sequence.reference | 0 .../0_stateless/00280_hex_escape_sequence.sql | 0 .../00281_compile_sizeof_packed.re | 0 .../0_stateless/00282_merging.reference | 0 .../queries/0_stateless/00282_merging.sql | 0 .../0_stateless/00283_column_cut.reference | 0 .../queries/0_stateless/00283_column_cut.sql | 0 .../00284_external_aggregation.reference | 0 .../00284_external_aggregation.sql | 0 .../00285_not_all_data_in_totals.reference | 0 .../00285_not_all_data_in_totals.sql | 0 ...00286_format_long_negative_float.reference | 0 .../00286_format_long_negative_float.sql | 0 .../00287_column_const_with_nan.reference | 0 .../00287_column_const_with_nan.sql | 0 .../00288_empty_stripelog.reference | 0 .../0_stateless/00288_empty_stripelog.sql | 0 ...ard_aggregation_memory_efficient.reference | 0 ...290_shard_aggregation_memory_efficient.sql | 0 .../0_stateless/00291_array_reduce.reference | 0 .../0_stateless/00291_array_reduce.sql | 0 .../00292_parser_tuple_element.reference | 0 .../00292_parser_tuple_element.sql | 0 .../00293_shard_max_subquery_depth.reference | 0 .../00293_shard_max_subquery_depth.sql | 0 .../0_stateless/00294_shard_enums.reference | 0 .../queries/0_stateless/00294_shard_enums.sql | 0 ...l_in_one_shard_rows_before_limit.reference | 0 ..._global_in_one_shard_rows_before_limit.sql | 0 .../00296_url_parameters.reference | 0 .../0_stateless/00296_url_parameters.sql | 0 ...ttach_negative_numbers_zookeeper.reference | 0 ...ch_negative_numbers_zookeeper.sql.disabled | 0 .../00298_enum_width_and_cast.reference | 0 .../0_stateless/00298_enum_width_and_cast.sql | 0 ...0299_stripe_log_multiple_inserts.reference | 0 .../00299_stripe_log_multiple_inserts.sql | 0 .../queries/0_stateless/00300_csv.reference | 0 .../queries/0_stateless/00300_csv.sql | 0 .../queries/0_stateless/00301_csv.reference | 0 .../queries/0_stateless/00301_csv.sh | 0 .../00302_http_compression.reference | 0 .../0_stateless/00302_http_compression.sh | 0 .../00304_http_external_data.reference | 0 .../0_stateless/00304_http_external_data.sh | 0 .../00305_http_and_readonly.reference | 0 .../0_stateless/00305_http_and_readonly.sh | 0 ...06_insert_values_and_expressions.reference | 0 .../00306_insert_values_and_expressions.sql | 0 .../0_stateless/00307_format_xml.reference | 0 .../queries/0_stateless/00307_format_xml.sql | 0 .../00308_write_buffer_valid_utf8.reference | 0 .../00308_write_buffer_valid_utf8.sql | 0 .../0_stateless/00309_formats.reference | Bin .../queries/0_stateless/00309_formats.sql | 0 .../queries/0_stateless/00310_tskv.reference | 0 .../queries/0_stateless/00310_tskv.sh | 0 .../00311_array_primary_key.reference | 0 .../0_stateless/00311_array_primary_key.sql | 0 ...2_position_case_insensitive_utf8.reference | 0 .../00312_position_case_insensitive_utf8.sql | 0 .../00313_const_totals_extremes.reference | 0 .../00313_const_totals_extremes.sh | 0 ...314_sample_factor_virtual_column.reference | 0 .../00314_sample_factor_virtual_column.sql | 0 .../00315_quantile_off_by_one.reference | 0 .../0_stateless/00315_quantile_off_by_one.sql | 0 ...unding_functions_and_empty_block.reference | 0 ...316_rounding_functions_and_empty_block.sql | 0 ...n_tuples_and_out_of_range_values.reference | 0 ...0317_in_tuples_and_out_of_range_values.sql | 0 .../00318_pk_tuple_order.reference | 0 .../0_stateless/00318_pk_tuple_order.sql | 0 .../00319_index_for_like.reference | 0 .../0_stateless/00319_index_for_like.sql | 0 .../0_stateless/00320_between.reference | 0 .../queries/0_stateless/00320_between.sql | 0 .../0_stateless/00321_pk_set.reference | 0 .../queries/0_stateless/00321_pk_set.sql | 0 .../00322_disable_checksumming.reference | 0 .../0_stateless/00322_disable_checksumming.sh | 0 .../00323_quantiles_timing_bug.reference | 0 .../00323_quantiles_timing_bug.sql | 0 .../0_stateless/00324_hashing_enums.reference | 0 .../0_stateless/00324_hashing_enums.sql | 0 .../00325_replacing_merge_tree.reference | 0 .../00325_replacing_merge_tree.sql.disabled | 0 .../00326_long_function_multi_if.reference | 0 .../00326_long_function_multi_if.sql | 0 .../00327_summing_composite_nested.reference | 0 .../00327_summing_composite_nested.sql | 0 .../00328_long_case_construction.reference | 0 .../00328_long_case_construction.sql | 0 .../00330_view_subqueries.reference | 0 .../0_stateless/00330_view_subqueries.sql | 0 .../00331_final_and_prewhere.reference | 0 .../0_stateless/00331_final_and_prewhere.sql | 0 ...0332_quantile_timing_memory_leak.reference | 0 .../00332_quantile_timing_memory_leak.sql | 0 .../00333_parser_number_bug.reference | 0 .../0_stateless/00333_parser_number_bug.sql | 0 ..._column_aggregate_function_limit.reference | 0 .../00334_column_aggregate_function_limit.sql | 0 .../queries/0_stateless/00335_bom.reference | 0 .../queries/0_stateless/00335_bom.sh | 0 .../00336_shard_stack_trace.reference | 0 .../0_stateless/00336_shard_stack_trace.sh | 0 .../00337_shard_any_heavy.reference | 0 .../0_stateless/00337_shard_any_heavy.sql | 0 ...00338_replicate_array_of_strings.reference | 0 .../00338_replicate_array_of_strings.sql | 0 .../00339_parsing_bad_arrays.reference | 0 .../0_stateless/00339_parsing_bad_arrays.sh | 0 .../00340_squashing_insert_select.reference | 0 .../00340_squashing_insert_select.sql | 0 .../00341_squashing_insert_select2.reference | 0 .../00341_squashing_insert_select2.sql | 0 .../00342_escape_sequences.reference | 0 .../0_stateless/00342_escape_sequences.sql | 0 .../00343_array_element_generic.reference | 0 .../00343_array_element_generic.sql | 0 .../00344_row_number_in_all_blocks.reference | 0 .../00344_row_number_in_all_blocks.sql | 0 .../00345_index_accurate_comparison.reference | 0 .../00345_index_accurate_comparison.sql | 0 .../0_stateless/00346_if_tuple.reference | 0 .../queries/0_stateless/00346_if_tuple.sql | 0 .../0_stateless/00347_has_tuple.reference | 0 .../queries/0_stateless/00347_has_tuple.sql | 0 .../0_stateless/00348_tuples.reference | 0 .../queries/0_stateless/00348_tuples.sql | 0 .../0_stateless/00349_visible_width.reference | 0 .../0_stateless/00349_visible_width.sql | 0 .../00350_count_distinct.reference | 0 .../0_stateless/00350_count_distinct.sql | 0 ...51_select_distinct_arrays_tuples.reference | 0 .../00351_select_distinct_arrays_tuples.sql | 0 ...2_external_sorting_and_constants.reference | 0 .../00352_external_sorting_and_constants.sql | 0 .../0_stateless/00353_join_by_tuple.reference | 0 .../0_stateless/00353_join_by_tuple.sql | 0 .../00354_host_command_line_option.reference | 0 .../00354_host_command_line_option.sh | 0 ...y_of_non_const_convertible_types.reference | 0 ...5_array_of_non_const_convertible_types.sql | 0 ...alyze_aggregations_and_union_all.reference | 0 ...356_analyze_aggregations_and_union_all.sql | 0 .../00357_to_string_complex_types.reference | 0 .../00357_to_string_complex_types.sql | 0 .../00358_from_string_complex_types.reference | 0 .../00358_from_string_complex_types.sql | 0 .../00359_convert_or_zero_functions.reference | 0 .../00359_convert_or_zero_functions.sql | 0 ...o_date_from_string_with_datetime.reference | 0 ...0360_to_date_from_string_with_datetime.sql | 0 ..._array_offsets_and_squash_blocks.reference | 0 ...shared_array_offsets_and_squash_blocks.sql | 0 .../00362_great_circle_distance.reference | 0 .../00362_great_circle_distance.sql | 0 .../0_stateless/00363_defaults.reference | 0 .../queries/0_stateless/00363_defaults.sql | 0 .../00364_java_style_denormals.reference | 0 .../00364_java_style_denormals.sql | 0 .../00365_statistics_in_formats.reference | 0 .../00365_statistics_in_formats.sh | 0 .../00366_multi_statements.reference | 0 .../0_stateless/00366_multi_statements.sh | 0 ...isible_width_of_array_tuple_enum.reference | 0 ...0367_visible_width_of_array_tuple_enum.sql | 0 .../00368_format_option_collision.reference | 0 .../00368_format_option_collision.sh | 0 .../00369_int_div_of_float.reference | 0 .../0_stateless/00369_int_div_of_float.sql | 0 ..._duplicate_columns_in_subqueries.reference | 0 .../00370_duplicate_columns_in_subqueries.sql | 0 .../0_stateless/00371_union_all.reference | 0 .../queries/0_stateless/00371_union_all.sql | 0 .../0_stateless/00372_cors_header.reference | 0 .../queries/0_stateless/00372_cors_header.sh | 0 .../00373_group_by_tuple.reference | 0 .../0_stateless/00373_group_by_tuple.sql | 0 .../00374_any_last_if_merge.reference | 0 .../0_stateless/00374_any_last_if_merge.sql | 0 ...each_row_input_with_noisy_fields.reference | 0 ...4_json_each_row_input_with_noisy_fields.sh | 0 ...shard_group_uniq_array_of_string.reference | 0 ...00375_shard_group_uniq_array_of_string.sql | 0 ...rd_group_uniq_array_of_int_array.reference | 0 ...76_shard_group_uniq_array_of_int_array.sql | 0 ...group_uniq_array_of_string_array.reference | 0 ...shard_group_uniq_array_of_string_array.sql | 0 .../00378_json_quote_64bit_integers.reference | 0 .../00378_json_quote_64bit_integers.sql | 0 .../00379_system_processes_port.reference | 0 .../00379_system_processes_port.sh | 0 ...break_at_exception_in_batch_mode.reference | 0 ...client_break_at_exception_in_batch_mode.sh | 0 ...0381_first_significant_subdomain.reference | 0 .../00381_first_significant_subdomain.sql | 0 .../00383_utf8_validation.reference | 0 .../0_stateless/00383_utf8_validation.sql | 0 ...n_aggregate_function_insert_from.reference | 0 ..._column_aggregate_function_insert_from.sql | 0 ...ge_file_and_clickhouse-local_app.reference | 0 ...5_storage_file_and_clickhouse-local_app.sh | 0 .../0_stateless/00386_enum_in_pk.reference | 0 .../queries/0_stateless/00386_enum_in_pk.sql | 0 .../00386_has_column_in_table.reference | 0 .../0_stateless/00386_has_column_in_table.sql | 0 .../0_stateless/00386_long_in_pk.python | 0 .../0_stateless/00386_long_in_pk.reference | 0 .../queries/0_stateless/00386_long_in_pk.sh | 0 .../00387_use_client_time_zone.reference | 0 .../0_stateless/00387_use_client_time_zone.sh | 0 .../00388_enum_with_totals.reference | 0 .../0_stateless/00388_enum_with_totals.sql | 0 .../00389_concat_operator.reference | 0 .../0_stateless/00389_concat_operator.sql | 0 .../0_stateless/00390_array_sort.reference | 0 .../queries/0_stateless/00390_array_sort.sql | 0 .../00392_enum_nested_alter.reference | 0 .../0_stateless/00392_enum_nested_alter.sql | 0 ...00393_if_with_constant_condition.reference | 0 .../00393_if_with_constant_condition.sql | 0 ..._new_nested_column_keeps_offsets.reference | 0 .../00394_new_nested_column_keeps_offsets.sql | 0 .../00394_replaceall_vector_fixed.reference | 0 .../00394_replaceall_vector_fixed.sql | 0 .../0_stateless/00395_nullable.reference | 0 .../queries/0_stateless/00395_nullable.sql | 0 .../queries/0_stateless/00396_uuid.reference | 0 .../queries/0_stateless/00396_uuid.sql | 0 .../00397_tsv_format_synonym.reference | 0 .../0_stateless/00397_tsv_format_synonym.sql | 0 .../0_stateless/00398_url_functions.reference | 0 .../0_stateless/00398_url_functions.sql | 0 ...9_group_uniq_array_date_datetime.reference | 0 .../00399_group_uniq_array_date_datetime.sql | 0 .../00400_client_external_options.reference | 0 .../00400_client_external_options.sh | 0 .../00401_merge_and_stripelog.reference | 0 .../0_stateless/00401_merge_and_stripelog.sql | 0 .../00402_nan_and_extremes.reference | 0 .../0_stateless/00402_nan_and_extremes.sql | 0 .../00403_to_start_of_day.reference | 0 .../0_stateless/00403_to_start_of_day.sql | 0 .../0_stateless/00404_null_literal.reference | 0 .../0_stateless/00404_null_literal.sql | 0 .../00405_pretty_formats.reference | 0 .../0_stateless/00405_pretty_formats.sql | 0 .../00406_tuples_with_nulls.reference | 0 .../0_stateless/00406_tuples_with_nulls.sql | 0 .../0_stateless/00407_parsing_nulls.reference | 0 .../0_stateless/00407_parsing_nulls.sh | 0 .../00408_http_keep_alive.reference | 0 .../0_stateless/00408_http_keep_alive.sh | 0 .../00409_shard_limit_by.reference | 0 .../0_stateless/00409_shard_limit_by.sql | 0 ...regation_combinators_with_arenas.reference | 0 ...10_aggregation_combinators_with_arenas.sql | 0 ...411_long_accurate_number_comparison.python | 0 ...accurate_number_comparison_float.reference | 0 ...1_long_accurate_number_comparison_float.sh | 0 ..._accurate_number_comparison_int1.reference | 0 ...11_long_accurate_number_comparison_int1.sh | 0 ..._accurate_number_comparison_int2.reference | 0 ...11_long_accurate_number_comparison_int2.sh | 0 ..._accurate_number_comparison_int3.reference | 0 ...11_long_accurate_number_comparison_int3.sh | 0 ..._accurate_number_comparison_int4.reference | 0 ...11_long_accurate_number_comparison_int4.sh | 0 ...11_merge_tree_where_const_in_set.reference | 0 .../00411_merge_tree_where_const_in_set.sql | 0 ...12_logical_expressions_optimizer.reference | 0 .../00412_logical_expressions_optimizer.sql | 0 .../0_stateless/00413_distinct.reference | 0 .../queries/0_stateless/00413_distinct.sql | 0 ...0413_least_greatest_new_behavior.reference | 0 .../00413_least_greatest_new_behavior.sql | 0 ...414_time_zones_direct_conversion.reference | 0 .../00414_time_zones_direct_conversion.sql | 0 .../0_stateless/00415_into_outfile.reference | 0 .../queries/0_stateless/00415_into_outfile.sh | 0 ...copatch_progress_in_http_headers.reference | 0 ...0416_pocopatch_progress_in_http_headers.sh | 0 .../0_stateless/00417_kill_query.reference | 0 .../queries/0_stateless/00417_kill_query.sh | 0 .../00417_system_build_options.reference | 0 .../0_stateless/00417_system_build_options.sh | 0 .../00418_input_format_allow_errors.reference | 0 .../00418_input_format_allow_errors.sh | 0 .../00419_show_sql_queries.reference | 0 .../0_stateless/00419_show_sql_queries.sh | 0 .../00420_null_in_scalar_subqueries.reference | 0 .../00420_null_in_scalar_subqueries.sql | 0 ...00421_storage_merge__table_index.reference | 0 .../00421_storage_merge__table_index.sh | 0 .../00422_hash_function_constexpr.reference | 0 .../00422_hash_function_constexpr.sql | 0 .../00423_storage_log_single_thread.reference | 0 .../00423_storage_log_single_thread.sql | 0 ..._aggregate_functions_of_nullable.reference | 0 ..._shard_aggregate_functions_of_nullable.sql | 0 .../00425_count_nullable.reference | 0 .../0_stateless/00425_count_nullable.sql | 0 .../0_stateless/00426_nulls_sorting.reference | 0 .../0_stateless/00426_nulls_sorting.sql | 0 .../00427_alter_primary_key.reference | 0 .../0_stateless/00427_alter_primary_key.sh | 0 .../00429_long_http_bufferization.reference | 0 .../00429_long_http_bufferization.sh | 0 .../00429_point_in_ellipses.reference | 0 .../0_stateless/00429_point_in_ellipses.sql | 0 .../0_stateless/00430_https_server.reference | 0 .../queries/0_stateless/00430_https_server.sh | 0 .../0_stateless/00431_if_nulls.reference | 0 .../queries/0_stateless/00431_if_nulls.sql | 0 ...e_function_scalars_and_constants.reference | 0 ...gregate_function_scalars_and_constants.sql | 0 .../0_stateless/00433_ifnull.reference | 0 .../queries/0_stateless/00433_ifnull.sql | 0 .../0_stateless/00434_tonullable.reference | 0 .../queries/0_stateless/00434_tonullable.sql | 0 .../0_stateless/00435_coalesce.reference | 0 .../queries/0_stateless/00435_coalesce.sql | 0 .../00436_convert_charset.reference | 0 .../0_stateless/00436_convert_charset.sql | 0 ...0436_fixed_string_16_comparisons.reference | 0 .../00436_fixed_string_16_comparisons.sql | 0 .../00437_nulls_first_last.reference | 0 .../0_stateless/00437_nulls_first_last.sql | 0 .../0_stateless/00438_bit_rotate.reference | 0 .../queries/0_stateless/00438_bit_rotate.sql | 0 .../00439_fixed_string_filter.reference | 0 .../0_stateless/00439_fixed_string_filter.sql | 0 .../00440_nulls_merge_tree.reference | 0 .../0_stateless/00440_nulls_merge_tree.sql | 0 .../0_stateless/00441_nulls_in.reference | 0 .../queries/0_stateless/00441_nulls_in.sql | 0 .../00442_filter_by_nullable.reference | 0 .../0_stateless/00442_filter_by_nullable.sql | 0 ...tree_uniform_read_distribution_0.reference | 0 ..._merge_tree_uniform_read_distribution_0.sh | 0 ...43_optimize_final_vertical_merge.reference | 0 .../00443_optimize_final_vertical_merge.sh | 0 ...00443_preferred_block_size_bytes.reference | 0 .../00443_preferred_block_size_bytes.sh | 0 .../00444_join_use_nulls.reference | 0 .../0_stateless/00444_join_use_nulls.sql | 0 .../00445_join_nullable_keys.reference | 0 .../0_stateless/00445_join_nullable_keys.sql | 0 ...n_partition_concurrent_zookeeper.reference | 0 ...olumn_in_partition_concurrent_zookeeper.sh | 0 ...ar_column_in_partition_zookeeper.reference | 0 ...46_clear_column_in_partition_zookeeper.sql | 0 .../00447_foreach_modifier.reference | 0 .../0_stateless/00447_foreach_modifier.sql | 0 ...replicate_nullable_tuple_generic.reference | 0 ...00448_replicate_nullable_tuple_generic.sql | 0 .../00448_to_string_cut_to_zero.reference | 0 .../00448_to_string_cut_to_zero.sql | 0 ...0449_filter_array_nullable_tuple.reference | 0 .../00449_filter_array_nullable_tuple.sql | 0 .../00450_higher_order_and_nullable.reference | 0 .../00450_higher_order_and_nullable.sql | 0 ...51_left_array_join_and_constants.reference | 0 .../00451_left_array_join_and_constants.sql | 0 ...452_left_array_join_and_nullable.reference | 0 .../00452_left_array_join_and_nullable.sql | 0 .../0_stateless/00453_cast_enum.reference | 0 .../queries/0_stateless/00453_cast_enum.sql | 0 .../queries/0_stateless/00453_top_k.reference | 0 .../queries/0_stateless/00453_top_k.sql | 0 .../00456_alter_nullable.reference | 0 .../0_stateless/00456_alter_nullable.sql | 0 ...7_log_tinylog_stripelog_nullable.reference | 0 .../00457_log_tinylog_stripelog_nullable.sql | 0 .../00458_merge_type_cast.reference | 0 .../0_stateless/00458_merge_type_cast.sql | 0 .../00459_group_array_insert_at.reference | 0 .../00459_group_array_insert_at.sql | 0 ...460_vertical_and_totals_extremes.reference | 0 .../00460_vertical_and_totals_extremes.sql | 0 ...1_default_value_of_argument_type.reference | 0 .../00461_default_value_of_argument_type.sql | 0 .../00462_json_true_false_literals.reference | 0 .../00462_json_true_false_literals.sql | 0 ..._long_sessions_in_http_interface.reference | 0 .../00463_long_sessions_in_http_interface.sh | 0 ...00464_array_element_out_of_range.reference | 0 .../00464_array_element_out_of_range.sql | 0 .../00464_sort_all_constant_columns.reference | 0 .../00464_sort_all_constant_columns.sql | 0 .../00465_nullable_default.reference | 0 .../0_stateless/00465_nullable_default.sql | 0 .../00466_comments_in_keyword.reference | 0 .../0_stateless/00466_comments_in_keyword.sql | 0 .../00467_qualified_names.reference | 0 .../0_stateless/00467_qualified_names.sql | 0 ...e_arrays_and_use_original_column.reference | 0 ...ultiple_arrays_and_use_original_column.sql | 0 ..._of_strings_containing_null_char.reference | 0 ...arison_of_strings_containing_null_char.sql | 0 ...470_identifiers_in_double_quotes.reference | 0 .../00470_identifiers_in_double_quotes.sql | 0 .../00471_sql_style_quoting.reference | 0 .../0_stateless/00471_sql_style_quoting.sql | 0 ...ompare_uuid_with_constant_string.reference | 0 ...0472_compare_uuid_with_constant_string.sql | 0 .../00472_create_view_if_not_exists.reference | 0 .../00472_create_view_if_not_exists.sql | 0 ...tput_format_json_quote_denormals.reference | 0 ...0473_output_format_json_quote_denormals.sh | 0 .../00474_readonly_settings.reference | 0 .../0_stateless/00474_readonly_settings.sh | 0 .../00475_in_join_db_table.reference | 0 .../0_stateless/00475_in_join_db_table.sql | 0 .../00476_pretty_formats_and_widths.reference | 0 .../00476_pretty_formats_and_widths.sql | 0 .../00477_parsing_data_types.reference | 0 .../0_stateless/00477_parsing_data_types.sql | 0 ...0479_date_and_datetime_to_number.reference | 0 .../00479_date_and_datetime_to_number.sql | 0 .../0_stateless/00480_mac_addresses.reference | 0 .../0_stateless/00480_mac_addresses.sql | 0 .../00481_create_view_for_null.reference | 0 .../00481_create_view_for_null.sql | 0 .../00481_reading_from_last_granula.reference | 0 .../00481_reading_from_last_granula.sql | 0 .../00482_subqueries_and_aliases.reference | 0 .../00482_subqueries_and_aliases.sql | 0 .../0_stateless/00483_cast_syntax.reference | 0 .../queries/0_stateless/00483_cast_syntax.sql | 0 ...483_reading_from_array_structure.reference | 0 .../00483_reading_from_array_structure.sql | 0 ...d_max_column_in_block_size_bytes.reference | 0 ...eferred_max_column_in_block_size_bytes.sql | 0 .../00485_http_insert_format.reference | 0 .../0_stateless/00485_http_insert_format.sh | 0 .../00486_if_fixed_string.reference | 0 .../0_stateless/00486_if_fixed_string.sql | 0 .../00487_if_array_fixed_string.reference | 0 .../00487_if_array_fixed_string.sql | 0 .../00488_non_ascii_column_names.reference | 0 .../00488_non_ascii_column_names.sql | 0 .../00489_pk_subexpression.reference | 0 .../0_stateless/00489_pk_subexpression.sql | 0 ...rs_and_characters_outside_of_bmp.reference | 0 ...parators_and_characters_outside_of_bmp.sql | 0 .../0_stateless/00490_with_select.reference | 0 .../queries/0_stateless/00490_with_select.sql | 0 ...uted_and_aliases_in_where_having.reference | 0 ...istributed_and_aliases_in_where_having.sql | 0 .../00492_drop_temporary_table.reference | 0 .../00492_drop_temporary_table.sql | 0 .../00493_substring_of_fixedstring.reference | 0 .../00493_substring_of_fixedstring.sql | 0 ...494_shard_alias_substitution_bug.reference | 0 .../00494_shard_alias_substitution_bug.sql | 0 .../00495_reading_const_zero_column.reference | 0 .../00495_reading_const_zero_column.sql | 0 .../00496_substring_negative_offset.reference | 0 .../00496_substring_negative_offset.sql | 0 .../00497_whitespaces_in_insert.reference | 0 .../00497_whitespaces_in_insert.sh | 0 ..._functions_concat_slice_push_pop.reference | 0 ..._array_functions_concat_slice_push_pop.sql | 0 ...0498_bitwise_aggregate_functions.reference | 0 .../00498_bitwise_aggregate_functions.sql | 0 .../00499_json_enum_insert.reference | 0 .../0_stateless/00499_json_enum_insert.sql | 0 .../00500_point_in_polygon.reference | 0 .../0_stateless/00500_point_in_polygon.sql | 0 .../00500_point_in_polygon_bug.reference | 0 .../00500_point_in_polygon_bug.sql | 0 .../00500_point_in_polygon_bug_2.reference | 0 .../00500_point_in_polygon_bug_2.sql | 0 ..._3_linestring_rotation_precision.reference | 0 ...on_bug_3_linestring_rotation_precision.sql | 0 .../0_stateless/00501_http_head.reference | 0 .../queries/0_stateless/00501_http_head.sh | 0 .../00502_custom_partitioning_local.reference | 0 .../00502_custom_partitioning_local.sql | 0 ...artitioning_replicated_zookeeper.reference | 0 ...stom_partitioning_replicated_zookeeper.sql | 0 .../00502_string_concat_with_array.reference | 0 .../00502_string_concat_with_array.sql | 0 .../0_stateless/00502_sum_map.reference | 0 .../queries/0_stateless/00502_sum_map.sql | 0 .../00503_cast_const_nullable.reference | 0 .../0_stateless/00503_cast_const_nullable.sql | 0 .../00504_insert_miss_columns.reference | 0 .../0_stateless/00504_insert_miss_columns.sh | 0 .../00504_mergetree_arrays_rw.reference | 0 .../0_stateless/00504_mergetree_arrays_rw.sql | 0 .../0_stateless/00505_distributed_secure.data | 0 .../0_stateless/00505_secure.reference | 0 .../queries/0_stateless/00505_secure.sh | 0 .../0_stateless/00505_shard_secure.reference | 0 .../queries/0_stateless/00505_shard_secure.sh | 0 .../00506_shard_global_in_union.reference | 0 .../00506_shard_global_in_union.sql | 0 .../00506_union_distributed.reference | 0 .../0_stateless/00506_union_distributed.sql | 0 .../00507_array_no_params.reference | 0 .../0_stateless/00507_array_no_params.sh | 0 .../00507_sumwithoverflow.reference | 0 .../0_stateless/00507_sumwithoverflow.sql | 0 .../00508_materialized_view_to.reference | 0 .../00508_materialized_view_to.sql | 0 ...rage_definition_syntax_zookeeper.reference | 0 ...ed_storage_definition_syntax_zookeeper.sql | 0 ...view_and_deduplication_zookeeper.reference | 0 ...lized_view_and_deduplication_zookeeper.sql | 0 .../00511_get_size_of_enum.reference | 0 .../0_stateless/00511_get_size_of_enum.sql | 0 .../00512_fractional_time_zones.reference | 0 .../00512_fractional_time_zones.sh | 0 .../00513_fractional_time_zones.reference | 0 .../00513_fractional_time_zones.sql | 0 .../00514_interval_operators.reference | 0 .../0_stateless/00514_interval_operators.sql | 0 .../00515_enhanced_time_zones.reference | 0 .../0_stateless/00515_enhanced_time_zones.sql | 0 .../0_stateless/00515_gcd_lcm.reference | 0 .../queries/0_stateless/00515_gcd_lcm.sql | 0 ...c_table_functions_and_subqueries.reference | 0 ...rd_desc_table_functions_and_subqueries.sql | 0 ...n_after_drop_partition_zookeeper.reference | 0 ...ication_after_drop_partition_zookeeper.sql | 0 .../0_stateless/00516_is_inf_nan.reference | 0 .../queries/0_stateless/00516_is_inf_nan.sql | 0 .../0_stateless/00516_modulo.reference | 0 .../queries/0_stateless/00516_modulo.sql | 0 .../0_stateless/00517_date_parsing.reference | 0 .../0_stateless/00517_date_parsing.sql | 0 ...18_extract_all_and_empty_matches.reference | 0 .../00518_extract_all_and_empty_matches.sql | 0 ...e_as_select_from_temporary_table.reference | 0 ..._create_as_select_from_temporary_table.sql | 0 .../0_stateless/00520_http_nullable.reference | 0 .../0_stateless/00520_http_nullable.sh | 0 .../00520_tuple_values_interpreter.reference | 0 .../00520_tuple_values_interpreter.sql | 0 .../00521_multidimensional.reference | 0 .../0_stateless/00521_multidimensional.sql | 0 .../00522_multidimensional.reference | 0 .../0_stateless/00522_multidimensional.sql | 0 ...gregate_functions_in_group_array.reference | 0 ...523_aggregate_functions_in_group_array.sql | 0 ..._time_intervals_months_underflow.reference | 0 .../00524_time_intervals_months_underflow.sql | 0 ...ullable_that_return_non_nullable.reference | 0 ...s_of_nullable_that_return_non_nullable.sql | 0 ...ray_join_with_arrays_of_nullable.reference | 0 ...526_array_join_with_arrays_of_nullable.sql | 0 .../00527_totals_having_nullable.reference | 0 .../00527_totals_having_nullable.sql | 0 .../00528_const_of_nullable.reference | 0 .../0_stateless/00528_const_of_nullable.sql | 0 .../0_stateless/00529_orantius.reference | 0 .../queries/0_stateless/00529_orantius.sql | 0 .../00530_arrays_of_nothing.reference | 0 .../0_stateless/00530_arrays_of_nothing.sql | 0 .../00531_aggregate_over_nullable.reference | 0 .../00531_aggregate_over_nullable.sql | 0 .../00531_client_ignore_error.reference | 0 .../0_stateless/00531_client_ignore_error.sh | 0 .../0_stateless/00532_topk_generic.reference | 0 .../0_stateless/00532_topk_generic.sql | 0 .../0_stateless/00533_uniq_array.reference | 0 .../queries/0_stateless/00533_uniq_array.sql | 0 .../00534_client_ignore_error.reference | 0 .../0_stateless/00534_client_ignore_error.sh | 0 .../queries/0_stateless/00534_exp10.reference | 0 .../queries/0_stateless/00534_exp10.sql | 0 .../queries/0_stateless/00534_filimonov.data | 0 .../0_stateless/00534_filimonov.reference | 0 .../queries/0_stateless/00534_filimonov.sh | 0 .../00534_functions_bad_arguments.lib | 0 .../00534_functions_bad_arguments1.reference | 0 .../00534_functions_bad_arguments1.sh | 0 .../00534_functions_bad_arguments10.reference | 0 .../00534_functions_bad_arguments10.sh | 0 .../00534_functions_bad_arguments11.reference | 0 .../00534_functions_bad_arguments11.sh | 0 .../00534_functions_bad_arguments12.reference | 0 .../00534_functions_bad_arguments12.sh | 0 .../00534_functions_bad_arguments13.reference | 0 .../00534_functions_bad_arguments13.sh | 0 .../00534_functions_bad_arguments2.reference | 0 .../00534_functions_bad_arguments2.sh | 0 .../00534_functions_bad_arguments3.reference | 0 .../00534_functions_bad_arguments3.sh | 0 .../00534_functions_bad_arguments4.reference | 0 .../00534_functions_bad_arguments4.sh | 0 .../00534_functions_bad_arguments5.reference | 0 .../00534_functions_bad_arguments5.sh | 0 .../00534_functions_bad_arguments6.reference | 0 .../00534_functions_bad_arguments6.sh | 0 .../00534_functions_bad_arguments7.reference | 0 .../00534_functions_bad_arguments7.sh | 0 .../00534_functions_bad_arguments8.reference | 0 .../00534_functions_bad_arguments8.sh | 0 .../00534_functions_bad_arguments9.reference | 0 .../00534_functions_bad_arguments9.sh | 0 .../00535_parse_float_scientific.reference | 0 .../00535_parse_float_scientific.sql | 0 .../0_stateless/00536_int_exp.reference | 0 .../queries/0_stateless/00536_int_exp.sql | 0 .../0_stateless/00537_quarters.reference | 0 .../queries/0_stateless/00537_quarters.sql | 0 .../0_stateless/00538_datediff.reference | 0 .../queries/0_stateless/00538_datediff.sql | 0 ..._functions_for_working_with_json.reference | 0 .../00539_functions_for_working_with_json.sql | 0 .../00540_bad_data_types.reference | 0 .../0_stateless/00540_bad_data_types.sh | 0 .../0_stateless/00541_kahan_sum.reference | 0 .../queries/0_stateless/00541_kahan_sum.sql | 0 ...0541_to_start_of_fifteen_minutes.reference | 0 .../00541_to_start_of_fifteen_minutes.sql | 0 ...temporary_table_in_readonly_mode.reference | 0 ...ss_to_temporary_table_in_readonly_mode.sql | 0 ...erialized_view_and_time_zone_tag.reference | 0 ...42_materialized_view_and_time_zone_tag.sql | 0 ...temporary_table_in_readonly_mode.reference | 0 ...ess_to_temporary_table_in_readonly_mode.sh | 0 .../00543_null_and_prewhere.reference | 0 .../0_stateless/00543_null_and_prewhere.sql | 0 .../00544_agg_foreach_of_two_arg.reference | 0 .../00544_agg_foreach_of_two_arg.sql | 0 .../00544_insert_with_select.reference | 0 .../0_stateless/00544_insert_with_select.sql | 0 .../00545_weird_aggregate_functions.reference | 0 .../00545_weird_aggregate_functions.sql | 0 ...6_shard_tuple_element_formatting.reference | 0 .../00546_shard_tuple_element_formatting.sql | 0 .../0_stateless/00547_named_tuples.reference | 0 .../0_stateless/00547_named_tuples.sql | 0 .../00548_slice_of_nested.reference | 0 .../0_stateless/00548_slice_of_nested.sql | 0 .../00549_join_use_nulls.reference | 0 .../0_stateless/00549_join_use_nulls.sql | 0 .../00550_join_insert_select.reference | 0 .../0_stateless/00550_join_insert_select.sh | 0 .../0_stateless/00551_parse_or_null.reference | 0 .../0_stateless/00551_parse_or_null.sql | 0 .../00552_logical_functions_simple.reference | 0 .../00552_logical_functions_simple.sql | 0 .../00552_logical_functions_ternary.reference | 0 .../00552_logical_functions_ternary.sql | 0 .../0_stateless/00552_or_nullable.reference | 0 .../queries/0_stateless/00552_or_nullable.sql | 0 ...53_buff_exists_materlized_column.reference | 0 .../00553_buff_exists_materlized_column.sql | 0 .../00553_invalid_nested_name.reference | 0 .../0_stateless/00553_invalid_nested_name.sql | 0 .../00554_nested_and_table_engines.reference | 0 .../00554_nested_and_table_engines.sql | 0 .../0_stateless/00555_hasAll_hasAny.reference | 0 .../0_stateless/00555_hasAll_hasAny.sql | 0 .../00555_right_join_excessive_rows.reference | 0 .../00555_right_join_excessive_rows.sql | 0 .../00556_array_intersect.reference | 0 .../0_stateless/00556_array_intersect.sql | 0 ...556_remove_columns_from_subquery.reference | 0 .../00556_remove_columns_from_subquery.sql | 0 .../00557_alter_null_storage_tables.reference | 0 .../00557_alter_null_storage_tables.sql | 0 .../0_stateless/00557_array_resize.reference | 0 .../0_stateless/00557_array_resize.sql | 0 .../0_stateless/00557_remote_port.reference | 0 .../queries/0_stateless/00557_remote_port.sh | 0 ...gregate_merge_totals_with_arenas.reference | 0 ...558_aggregate_merge_totals_with_arenas.sql | 0 .../0_stateless/00558_parse_floats.reference | 0 .../0_stateless/00558_parse_floats.sql | 0 .../00559_filter_array_generic.reference | 0 .../00559_filter_array_generic.sql | 0 ...0_float_leading_plus_in_exponent.reference | 0 .../00560_float_leading_plus_in_exponent.sql | 0 .../0_stateless/00561_storage_join.reference | 0 .../0_stateless/00561_storage_join.sql | 0 .../00562_in_subquery_merge_tree.reference | 0 .../00562_in_subquery_merge_tree.sql | 0 ...ite_select_expression_with_union.reference | 0 ...2_rewrite_select_expression_with_union.sql | 0 .../00563_complex_in_expression.reference | 0 .../00563_complex_in_expression.sql | 0 ...insert_into_remote_and_zookeeper.reference | 0 ...00563_insert_into_remote_and_zookeeper.sql | 0 .../00563_shard_insert_into_remote.reference | 0 .../00563_shard_insert_into_remote.sql | 0 .../0_stateless/00564_enum_order.reference | 0 .../queries/0_stateless/00564_enum_order.sh | 0 ...n_values_with_default_expression.reference | 0 ..._column_values_with_default_expression.sql | 0 ...00564_temporary_table_management.reference | 0 .../00564_temporary_table_management.sql | 0 ..._versioned_collapsing_merge_tree.reference | 0 .../00564_versioned_collapsing_merge_tree.sql | 0 .../0_stateless/00565_enum_order.reference | 0 .../queries/0_stateless/00565_enum_order.sh | 0 .../0_stateless/00566_enum_min_max.reference | 0 .../0_stateless/00566_enum_min_max.sql | 0 ...parse_datetime_as_unix_timestamp.reference | 0 ...00567_parse_datetime_as_unix_timestamp.sql | 0 ...empty_function_with_fixed_string.reference | 0 ...00568_empty_function_with_fixed_string.sql | 0 ...0569_parse_date_time_best_effort.reference | 0 .../00569_parse_date_time_best_effort.sql | 0 .../00570_empty_array_is_const.reference | 0 .../00570_empty_array_is_const.sql | 0 .../00571_alter_nullable.reference | 0 .../0_stateless/00571_alter_nullable.sql | 0 ...base_when_create_materializ_view.reference | 0 ...t_database_when_create_materializ_view.sql | 0 .../00572_aggregation_by_empty_set.reference | 0 .../00572_aggregation_by_empty_set.sql | 0 ...3_shard_aggregation_by_empty_set.reference | 0 .../00573_shard_aggregation_by_empty_set.sql | 0 ...74_empty_strings_deserialization.reference | 0 .../00574_empty_strings_deserialization.sh | 0 ...exception_when_drop_depen_column.reference | 0 ...column_exception_when_drop_depen_column.sh | 0 ...ge_and_index_with_function_in_in.reference | 0 ...75_merge_and_index_with_function_in_in.sql | 0 .../00576_nested_and_prewhere.reference | 0 .../0_stateless/00576_nested_and_prewhere.sql | 0 .../00577_full_join_segfault.reference | 0 .../0_stateless/00577_full_join_segfault.sql | 0 ...lacing_merge_tree_vertical_merge.reference | 0 ...77_replacing_merge_tree_vertical_merge.sql | 0 ...e_table_and_table_virtual_column.reference | 0 ...8_merge_table_and_table_virtual_column.sql | 0 .../00578_merge_table_sampling.reference | 0 .../00578_merge_table_sampling.sql | 0 ...erge_table_shadow_virtual_column.reference | 0 ...0578_merge_table_shadow_virtual_column.sql | 0 ..._merge_trees_without_primary_key.reference | 0 .../00578_merge_trees_without_primary_key.sql | 0 ...imary_keys_using_same_expression.reference | 0 ...and_primary_keys_using_same_expression.sql | 0 .../00579_virtual_column_and_lazy.reference | 0 .../00579_virtual_column_and_lazy.sql | 0 ...80_cast_nullable_to_non_nullable.reference | 0 .../00580_cast_nullable_to_non_nullable.sql | 0 ...580_consistent_hashing_functions.reference | 0 .../00580_consistent_hashing_functions.sql | 0 ...n_result_and_subquery_and_insert.reference | 0 ...imit_on_result_and_subquery_and_insert.sql | 0 .../00582_not_aliasing_functions.reference | 0 .../00582_not_aliasing_functions.sql | 0 .../00583_limit_by_expressions.reference | 0 .../00583_limit_by_expressions.sql | 0 .../00584_view_union_all.reference | 0 .../0_stateless/00584_view_union_all.sql | 0 ...query_aggregation_column_removal.reference | 0 ...ll_subquery_aggregation_column_removal.sql | 0 ...ing_unused_columns_from_subquery.reference | 0 ..._removing_unused_columns_from_subquery.sql | 0 ...00587_union_all_type_conversions.reference | 0 .../00587_union_all_type_conversions.sql | 0 ...00588_shard_distributed_prewhere.reference | 0 .../00588_shard_distributed_prewhere.sql | 0 ...moval_unused_columns_aggregation.reference | 0 ...589_removal_unused_columns_aggregation.sql | 0 .../00590_limit_by_column_removal.reference | 0 .../00590_limit_by_column_removal.sql | 0 .../00591_columns_removal_union_all.reference | 0 .../00591_columns_removal_union_all.sql | 0 ...0592_union_all_different_aliases.reference | 0 .../00592_union_all_different_aliases.sql | 0 ...union_all_assert_columns_removed.reference | 0 ...00593_union_all_assert_columns_removed.sql | 0 .../00594_alias_in_distributed.reference | 0 .../00594_alias_in_distributed.sql | 0 .../00595_insert_into_view.reference | 0 .../0_stateless/00595_insert_into_view.sh | 0 .../00596_limit_on_expanded_ast.reference | 0 .../00596_limit_on_expanded_ast.sh | 0 .../00597_push_down_predicate.reference | 0 .../0_stateless/00597_push_down_predicate.sql | 0 .../00597_with_totals_on_empty_set.reference | 0 .../00597_with_totals_on_empty_set.sql | 0 .../00598_create_as_select_http.reference | 0 .../00598_create_as_select_http.sh | 0 .../00599_create_view_with_subquery.reference | 0 .../00599_create_view_with_subquery.sql | 0 ...te_temporary_table_if_not_exists.reference | 0 ...0_create_temporary_table_if_not_exists.sql | 0 .../00600_replace_running_query.reference | 0 .../00600_replace_running_query.sh | 0 .../00601_kill_running_query.reference | 0 .../0_stateless/00601_kill_running_query.sh | 0 .../0_stateless/00602_throw_if.reference | 0 .../queries/0_stateless/00602_throw_if.sh | 0 ...ystem_parts_nonexistent_database.reference | 0 ...0603_system_parts_nonexistent_database.sql | 0 ...remote_and_columns_with_defaults.reference | 0 ...shard_remote_and_columns_with_defaults.sql | 0 .../00604_show_create_database.reference | 0 .../00604_show_create_database.sql | 0 ...ntersections_aggregate_functions.reference | 0 ...0605_intersections_aggregate_functions.sql | 0 .../00606_quantiles_and_nans.reference | 0 .../0_stateless/00606_quantiles_and_nans.sql | 0 .../0_stateless/00607_index_in_in.reference | 0 .../queries/0_stateless/00607_index_in_in.sql | 0 .../0_stateless/00608_uniq_array.reference | 0 .../queries/0_stateless/00608_uniq_array.sql | 0 ..._distributed_with_case_when_then.reference | 0 .../00609_distributed_with_case_when_then.sql | 0 .../00609_mv_index_in_in.reference | 0 .../0_stateless/00609_mv_index_in_in.sql | 0 .../00609_prewhere_and_default.reference | 0 .../00609_prewhere_and_default.sql | 0 ...rward_alter_partition_statements.reference | 0 ...iew_forward_alter_partition_statements.sql | 0 .../queries/0_stateless/00612_count.reference | 0 .../queries/0_stateless/00612_count.sql | 0 .../00612_http_max_query_size.reference | 0 .../0_stateless/00612_http_max_query_size.sh | 0 .../0_stateless/00612_pk_in_tuple.reference | 0 .../queries/0_stateless/00612_pk_in_tuple.sql | 0 .../0_stateless/00612_shard_count.reference | 0 .../queries/0_stateless/00612_shard_count.sql | 0 .../00612_union_query_with_subquery.reference | 0 .../00612_union_query_with_subquery.sql | 0 ...d_distributed_max_execution_time.reference | 0 ...3_shard_distributed_max_execution_time.sql | 0 .../00614_array_nullable.reference | 0 .../0_stateless/00614_array_nullable.sql | 0 ...remote_node_in_distributed_query.reference | 0 ...l_and_remote_node_in_distributed_query.sql | 0 .../00615_nullable_alter_optimize.reference | 0 .../00615_nullable_alter_optimize.sql | 0 .../00616_final_single_part.reference | 0 .../0_stateless/00616_final_single_part.sql | 0 .../0_stateless/00617_array_in.reference | 0 .../queries/0_stateless/00617_array_in.sql | 0 .../0_stateless/00618_nullable_in.reference | 0 .../queries/0_stateless/00618_nullable_in.sql | 0 .../0_stateless/00619_extract.reference | 0 .../queries/0_stateless/00619_extract.sql | 0 .../00619_union_highlite.reference | 0 .../0_stateless/00619_union_highlite.sql | 0 ...e_on_nonleader_replica_zookeeper.reference | 0 ...ptimize_on_nonleader_replica_zookeeper.sql | 0 ...00621_regression_for_in_operator.reference | 0 .../00621_regression_for_in_operator.sql | 0 .../00622_select_in_parens.reference | 0 .../0_stateless/00622_select_in_parens.sql | 0 .../00623_in_partition_key.reference | 0 .../0_stateless/00623_in_partition_key.sql | 0 ...licated_truncate_table_zookeeper.reference | 0 ...23_replicated_truncate_table_zookeeper.sql | 0 .../00623_truncate_table.reference | 0 .../0_stateless/00623_truncate_table.sql | 0 ...3_truncate_table_throw_exception.reference | 0 .../00623_truncate_table_throw_exception.sh | 0 .../0_stateless/00624_length_utf8.reference | 0 .../queries/0_stateless/00624_length_utf8.sql | 0 .../00625_arrays_in_nested.reference | 0 .../0_stateless/00625_arrays_in_nested.sql | 0 .../00625_query_in_form_data.reference | 0 .../0_stateless/00625_query_in_form_data.sh | 0 .../00625_summing_merge_tree_merge.reference | 0 .../00625_summing_merge_tree_merge.sql | 0 .../0_stateless/00626_in_syntax.reference | 0 .../queries/0_stateless/00626_in_syntax.sql | 0 ...626_replace_partition_from_table.reference | 0 .../00626_replace_partition_from_table.sql | 0 ...e_partition_from_table_zookeeper.reference | 0 ..._replace_partition_from_table_zookeeper.sh | 0 .../00627_recursive_alias.reference | 0 .../0_stateless/00627_recursive_alias.sql | 0 ...628_in_lambda_on_merge_table_bug.reference | 0 .../00628_in_lambda_on_merge_table_bug.sql | 0 .../00630_arbitrary_csv_delimiter.reference | 0 .../00630_arbitrary_csv_delimiter.sh | 0 .../00632_aggregation_window_funnel.reference | 0 .../00632_aggregation_window_funnel.sql | 0 .../00632_get_sample_block_cache.reference | 0 .../00632_get_sample_block_cache.sql | 0 .../0_stateless/00633_func_or_in.reference | 0 .../queries/0_stateless/00633_func_or_in.sql | 0 ...iew_and_too_many_parts_zookeeper.reference | 0 ...lized_view_and_too_many_parts_zookeeper.sh | 0 .../0_stateless/00634_logging_shard.reference | 0 .../0_stateless/00634_logging_shard.sh | 0 ...rmance_introspection_and_logging.reference | 0 ...4_performance_introspection_and_logging.sh | 0 .../0_stateless/00634_rename_view.reference | 0 .../queries/0_stateless/00634_rename_view.sql | 0 .../00635_shard_distinct_order_by.reference | 0 .../00635_shard_distinct_order_by.sql | 0 ...0636_partition_key_parts_pruning.reference | 0 .../00636_partition_key_parts_pruning.sh | 0 ...s_in_http_interface_and_settings.reference | 0 ...sessions_in_http_interface_and_settings.sh | 0 .../0_stateless/00638_remote_ssrf.reference | 0 .../0_stateless/00638_remote_ssrf.sh.disabled | 0 .../0_stateless/00639_startsWith.reference | 0 .../queries/0_stateless/00639_startsWith.sql | 0 .../0_stateless/00640_endsWith.reference | 0 .../queries/0_stateless/00640_endsWith.sql | 0 .../queries/0_stateless/00642_cast.reference | 0 .../queries/0_stateless/00642_cast.sql | 0 .../00643_cast_zookeeper.reference | 0 .../0_stateless/00643_cast_zookeeper.sql | 0 ...rent_expressions_with_same_alias.reference | 0 ..._different_expressions_with_same_alias.sql | 0 .../00645_date_time_input_format.reference | 0 .../00645_date_time_input_format.sql | 0 .../0_stateless/00646_url_engine.python | 0 .../0_stateless/00646_url_engine.reference | 0 .../queries/0_stateless/00646_url_engine.sh | 0 .../0_stateless/00646_weird_mmx.reference | 0 .../queries/0_stateless/00646_weird_mmx.sql | 0 .../0_stateless/00647_histogram.reference | 0 .../queries/0_stateless/00647_histogram.sql | 0 ...00647_multiply_aggregation_state.reference | 0 .../00647_multiply_aggregation_state.sql | 0 ...00647_select_numbers_with_offset.reference | 0 .../00647_select_numbers_with_offset.sql | 0 ...eplacing_empty_set_from_prewhere.reference | 0 ...0648_replacing_empty_set_from_prewhere.sql | 0 .../00649_quantile_tdigest_negative.reference | 0 .../00649_quantile_tdigest_negative.sql | 0 ...array_enumerate_uniq_with_tuples.reference | 0 ...00650_array_enumerate_uniq_with_tuples.sql | 0 ...50_csv_with_specified_quote_rule.reference | 0 .../00650_csv_with_specified_quote_rule.sh | 0 ...ult_database_on_client_reconnect.reference | 0 ...51_default_database_on_client_reconnect.sh | 0 .../00652_mergetree_mutations.reference | 0 .../0_stateless/00652_mergetree_mutations.sh | 0 .../00652_mutations_alter_update.reference | 0 .../00652_mutations_alter_update.sh | 0 ...00652_mutations_default_database.reference | 0 .../00652_mutations_default_database.sh | 0 ...tions_default_database_zookeeper.reference | 0 ...ed_mutations_default_database_zookeeper.sh | 0 ...2_replicated_mutations_zookeeper.reference | 0 .../00652_replicated_mutations_zookeeper.sh | 0 .../00653_monotonic_integer_cast.reference | 0 .../00653_monotonic_integer_cast.sql | 0 .../00653_running_difference.reference | 0 .../0_stateless/00653_running_difference.sql | 0 ...verification_monotonic_data_load.reference | 0 .../00653_verification_monotonic_data_load.sh | 0 ...optimize_final_without_partition.reference | 0 ...00660_optimize_final_without_partition.sql | 0 .../00661_array_has_silviucpp.reference | 0 .../0_stateless/00661_array_has_silviucpp.sql | 0 ...ated_without_partition_zookeeper.reference | 0 ...replicated_without_partition_zookeeper.sql | 0 .../00662_array_has_nullable.reference | 0 .../0_stateless/00662_array_has_nullable.sql | 0 .../00663_tiny_log_empty_insert.reference | 0 .../00663_tiny_log_empty_insert.sql | 0 ...664_cast_from_string_to_nullable.reference | 0 .../00664_cast_from_string_to_nullable.sql | 0 ...ullable_string_to_nullable_uint8.reference | 0 ...lter_nullable_string_to_nullable_uint8.sql | 0 .../00666_uniq_complex_types.reference | 0 .../0_stateless/00666_uniq_complex_types.sql | 0 ...ompare_arrays_of_different_types.reference | 0 ...0667_compare_arrays_of_different_types.sql | 0 .../00668_compare_arrays_silviucpp.reference | 0 .../00668_compare_arrays_silviucpp.sql | 0 .../00670_truncate_temporary_table.reference | 0 .../00670_truncate_temporary_table.sql | 0 .../00671_max_intersections.reference | 0 .../0_stateless/00671_max_intersections.sql | 0 .../0_stateless/00672_arrayDistinct.reference | 0 .../0_stateless/00672_arrayDistinct.sql | 0 ...ubquery_prepared_set_performance.reference | 0 ...0673_subquery_prepared_set_performance.sql | 0 .../00674_has_array_enum.reference | 0 .../0_stateless/00674_has_array_enum.sql | 0 .../00674_join_on_syntax.reference | 0 .../0_stateless/00674_join_on_syntax.sql | 0 ...shard_remote_with_table_function.reference | 0 ...00675_shard_remote_with_table_function.sql | 0 .../0_stateless/00676_group_by_in.reference | 0 .../queries/0_stateless/00676_group_by_in.sql | 0 .../00677_shard_any_heavy_merge.reference | 0 .../00677_shard_any_heavy_merge.sql | 0 .../0_stateless/00678_murmurhash.reference | 0 .../queries/0_stateless/00678_murmurhash.sql | 0 .../00678_shard_funnel_window.reference | 0 .../0_stateless/00678_shard_funnel_window.sql | 0 .../00679_replace_asterisk.reference | 0 .../0_stateless/00679_replace_asterisk.sql | 0 .../0_stateless/00679_uuid_in_key.reference | 0 .../queries/0_stateless/00679_uuid_in_key.sql | 0 ...plicate_columns_inside_union_all.reference | 0 ...680_duplicate_columns_inside_union_all.sql | 0 ...s_inside_union_all_stas_sviridov.reference | 0 ...columns_inside_union_all_stas_sviridov.sql | 0 .../00682_empty_parts_merge.reference | 0 .../0_stateless/00682_empty_parts_merge.sh | 0 ...rmat_json_escape_forward_slashes.reference | 0 ...put_format_json_escape_forward_slashes.sql | 0 .../00686_client_exit_code.reference | 0 .../0_stateless/00686_client_exit_code.sh | 0 .../00687_insert_into_mv.reference | 0 .../0_stateless/00687_insert_into_mv.sql | 0 .../00687_top_and_offset.reference | 0 .../0_stateless/00687_top_and_offset.sh | 0 .../00688_aggregation_retention.reference | 0 .../00688_aggregation_retention.sql | 0 .../00688_case_without_else.reference | 0 .../0_stateless/00688_case_without_else.sql | 0 ...low_cardinality_alter_add_column.reference | 0 ...00688_low_cardinality_alter_add_column.sql | 0 .../00688_low_cardinality_defaults.reference | 0 .../00688_low_cardinality_defaults.sql | 0 ...ality_dictionary_deserialization.reference | 0 ...cardinality_dictionary_deserialization.sql | 0 .../00688_low_cardinality_in.reference | 0 .../0_stateless/00688_low_cardinality_in.sql | 0 ...88_low_cardinality_nullable_cast.reference | 0 .../00688_low_cardinality_nullable_cast.sql | 0 .../00688_low_cardinality_prewhere.reference | 0 .../00688_low_cardinality_prewhere.sql | 0 ...88_low_cardinality_serialization.reference | 0 .../00688_low_cardinality_serialization.sql | 0 .../00688_low_cardinality_syntax.reference | 0 .../00688_low_cardinality_syntax.sql | 0 .../queries/0_stateless/00689_file.txt | 0 .../00689_join_table_function.reference | 0 .../0_stateless/00689_join_table_function.sql | 0 ...ect_converting_exception_message.reference | 0 ...ert_select_converting_exception_message.sh | 0 .../00691_array_distinct.reference | 0 .../0_stateless/00691_array_distinct.sql | 0 .../00692_if_exception_code.reference | 0 .../0_stateless/00692_if_exception_code.sql | 0 ...block_size_system_tables_columns.reference | 0 ...3_max_block_size_system_tables_columns.sql | 0 .../00694_max_block_size_zero.reference | 0 .../0_stateless/00694_max_block_size_zero.sql | 0 ...0695_pretty_max_column_pad_width.reference | 0 .../00695_pretty_max_column_pad_width.sql | 0 .../00696_system_columns_limit.reference | 0 .../00696_system_columns_limit.sql | 0 .../00697_in_subquery_shard.reference | 0 .../0_stateless/00697_in_subquery_shard.sql | 0 ..._validate_array_sizes_for_nested.reference | 0 .../00698_validate_array_sizes_for_nested.sql | 0 ..._array_sizes_for_nested_kshvakov.reference | 0 ...lidate_array_sizes_for_nested_kshvakov.sql | 0 ...0699_materialized_view_mutations.reference | 0 .../00699_materialized_view_mutations.sh | 0 .../00700_decimal_aggregates.reference | 0 .../0_stateless/00700_decimal_aggregates.sql | 0 .../00700_decimal_arithm.reference | 0 .../0_stateless/00700_decimal_arithm.sql | 0 .../00700_decimal_array_functions.reference | 0 .../00700_decimal_array_functions.sql | 0 .../00700_decimal_bounds.reference | 0 .../0_stateless/00700_decimal_bounds.sql | 0 .../0_stateless/00700_decimal_casts.reference | 0 .../0_stateless/00700_decimal_casts.sql | 0 .../00700_decimal_compare.reference | 0 .../0_stateless/00700_decimal_compare.sql | 0 .../00700_decimal_complex_types.reference | 0 .../00700_decimal_complex_types.sql | 0 .../00700_decimal_defaults.reference | 0 .../0_stateless/00700_decimal_defaults.sql | 0 .../00700_decimal_empty_aggregates.reference | 0 .../00700_decimal_empty_aggregates.sql | 0 .../00700_decimal_formats.reference | 0 .../0_stateless/00700_decimal_formats.sql | 0 .../00700_decimal_gathers.reference | 0 .../0_stateless/00700_decimal_gathers.sql | 0 .../00700_decimal_in_keys.reference | 0 .../0_stateless/00700_decimal_in_keys.sql | 0 .../0_stateless/00700_decimal_math.reference | 0 .../0_stateless/00700_decimal_math.sql | 0 .../0_stateless/00700_decimal_null.reference | 0 .../0_stateless/00700_decimal_null.sql | 0 .../0_stateless/00700_decimal_round.reference | 0 .../0_stateless/00700_decimal_round.sql | 0 .../00700_to_decimal_or_something.reference | 0 .../00700_to_decimal_or_something.sql | 0 .../00701_context_use_after_free.reference | 0 .../00701_context_use_after_free.sql | 0 .../00701_join_default_strictness.reference | 0 .../00701_join_default_strictness.sql | 0 .../0_stateless/00701_rollup.reference | 0 .../queries/0_stateless/00701_rollup.sql | 0 .../0_stateless/00702_join_on_dups.reference | 0 .../0_stateless/00702_join_on_dups.sql | 0 .../00702_join_with_using.reference | 0 .../0_stateless/00702_join_with_using.sql | 0 .../00702_join_with_using_dups.reference | 0 .../00702_join_with_using_dups.sql | 0 ...0702_where_with_quailified_names.reference | 0 .../00702_where_with_quailified_names.sql | 0 .../0_stateless/00703_join_crash.reference | 0 .../queries/0_stateless/00703_join_crash.sql | 0 ...rayCumSumLimited_arrayDifference.reference | 0 ...704_arrayCumSumLimited_arrayDifference.sql | 0 ...00704_drop_truncate_memory_table.reference | 0 .../00704_drop_truncate_memory_table.sh | 0 .../00705_aggregate_states_addition.reference | 0 .../00705_aggregate_states_addition.sql | 0 .../00705_drop_create_merge_tree.reference | 0 .../00705_drop_create_merge_tree.sh | 0 .../00706_iso_week_and_day_of_year.reference | 0 .../00706_iso_week_and_day_of_year.sql | 0 .../00707_float_csv_delimiter.reference | 0 .../0_stateless/00707_float_csv_delimiter.sql | 0 ...0709_virtual_column_partition_id.reference | 0 .../00709_virtual_column_partition_id.sql | 0 .../00710_array_enumerate_dense.reference | 0 .../00710_array_enumerate_dense.sql | 0 .../00711_array_enumerate_variants.reference | 0 .../00711_array_enumerate_variants.sql | 0 .../00712_nan_comparison.reference | 0 .../0_stateless/00712_nan_comparison.sql | 0 .../00712_prewhere_with_alias.reference | 0 .../0_stateless/00712_prewhere_with_alias.sql | 0 ...re_with_alias_and_virtual_column.reference | 0 ...prewhere_with_alias_and_virtual_column.sql | 0 .../00712_prewhere_with_alias_bug.reference | 0 .../00712_prewhere_with_alias_bug.sql | 0 .../00712_prewhere_with_alias_bug_2.reference | 0 .../00712_prewhere_with_alias_bug_2.sql | 0 .../00712_prewhere_with_final.reference | 0 .../0_stateless/00712_prewhere_with_final.sql | 0 ...12_prewhere_with_missing_columns.reference | 0 .../00712_prewhere_with_missing_columns.sql | 0 ..._prewhere_with_missing_columns_2.reference | 0 .../00712_prewhere_with_missing_columns_2.sql | 0 .../00712_prewhere_with_sampling.reference | 0 .../00712_prewhere_with_sampling.sql | 0 ...prewhere_with_sampling_and_alias.reference | 0 ...00712_prewhere_with_sampling_and_alias.sql | 0 .../00713_collapsing_merge_tree.reference | 0 .../00713_collapsing_merge_tree.sql | 0 .../0_stateless/00714_alter_uuid.reference | 0 .../queries/0_stateless/00714_alter_uuid.sql | 0 ...e_temporary_table_with_in_clause.reference | 0 ..._create_temporary_table_with_in_clause.sql | 0 .../00715_bounding_ratio.reference | 0 .../0_stateless/00715_bounding_ratio.sql | 0 ...merged_or_mutated_part_zookeeper.reference | 0 ..._fetch_merged_or_mutated_part_zookeeper.sh | 0 ...00715_json_each_row_input_nested.reference | 0 .../00715_json_each_row_input_nested.sh | 0 .../0_stateless/00716_allow_ddl.reference | 0 .../queries/0_stateless/00716_allow_ddl.sql | 0 .../00717_default_join_type.reference | 0 .../0_stateless/00717_default_join_type.sql | 0 ...cardinaliry_distributed_group_by.reference | 0 ...7_low_cardinaliry_distributed_group_by.sql | 0 .../00717_low_cardinaliry_group_by.reference | 0 .../00717_low_cardinaliry_group_by.sql | 0 .../00717_merge_and_distributed.reference | 0 .../00717_merge_and_distributed.sql | 0 .../00718_format_datetime.reference | 0 .../0_stateless/00718_format_datetime.sql | 0 .../00718_low_cardinaliry_alter.reference | 0 .../00718_low_cardinaliry_alter.sql | 0 .../00719_format_datetime_rand.reference | 0 .../00719_format_datetime_rand.sql | 0 ...0719_insert_block_without_column.reference | 0 .../00719_insert_block_without_column.sh | 0 .../00719_parallel_ddl_db.reference | 0 .../0_stateless/00719_parallel_ddl_db.sh | 0 .../00719_parallel_ddl_table.reference | 0 .../0_stateless/00719_parallel_ddl_table.sh | 0 ...nations_of_aggregate_combinators.reference | 0 ..._combinations_of_aggregate_combinators.sql | 0 .../0_stateless/00720_with_cube.reference | 0 .../queries/0_stateless/00720_with_cube.sql | 0 ...cal_result_after_merge_zookeeper.reference | 0 ...identical_result_after_merge_zookeeper.sql | 0 .../0_stateless/00722_inner_join.reference | 0 .../queries/0_stateless/00722_inner_join.sql | 0 .../0_stateless/00723_remerge_sort.reference | 0 .../0_stateless/00723_remerge_sort.sql | 0 ...nsert_values_datetime_conversion.reference | 0 ...0724_insert_values_datetime_conversion.sql | 0 .../00725_comment_columns.reference | 0 .../0_stateless/00725_comment_columns.sql | 0 .../00725_ipv4_ipv6_domains.reference | 0 .../0_stateless/00725_ipv4_ipv6_domains.sql | 0 .../0_stateless/00725_join_on_bug_1.reference | 0 .../0_stateless/00725_join_on_bug_1.sql | 0 .../0_stateless/00725_join_on_bug_2.reference | 0 .../0_stateless/00725_join_on_bug_2.sql | 0 .../0_stateless/00725_join_on_bug_3.reference | 0 .../0_stateless/00725_join_on_bug_3.sql | 0 .../0_stateless/00725_join_on_bug_4.reference | 0 .../0_stateless/00725_join_on_bug_4.sql | 0 .../00725_memory_tracking.reference | 0 .../0_stateless/00725_memory_tracking.sql | 0 .../00725_quantiles_shard.reference | 0 .../0_stateless/00725_quantiles_shard.sql | 0 .../00726_length_aliases.reference | 0 .../0_stateless/00726_length_aliases.sql | 0 ...726_materialized_view_concurrent.reference | 0 .../00726_materialized_view_concurrent.sql | 0 .../00726_modulo_for_date.reference | 0 .../0_stateless/00726_modulo_for_date.sql | 0 .../0_stateless/00727_concat.reference | 0 .../queries/0_stateless/00727_concat.sql | 0 .../00728_json_each_row_parsing.reference | 0 .../00728_json_each_row_parsing.sh | 0 .../00729_prewhere_array_join.reference | 0 .../0_stateless/00729_prewhere_array_join.sql | 0 .../00730_unicode_terminal_format.reference | 0 .../00730_unicode_terminal_format.sql | 0 ...g_merge_tree_select_opened_files.reference | 0 ...731_long_merge_tree_select_opened_files.sh | 0 .../00732_base64_functions.reference | 0 .../0_stateless/00732_base64_functions.sql | 0 ...00732_decimal_summing_merge_tree.reference | 0 .../00732_decimal_summing_merge_tree.sql | 0 ...ave_data_before_quorum_zookeeper.reference | 0 ...sert_have_data_before_quorum_zookeeper.sql | 0 ...st_part_and_alive_part_zookeeper.reference | 0 ...ert_lost_part_and_alive_part_zookeeper.sql | 0 ...uorum_insert_lost_part_zookeeper.reference | 0 ...0732_quorum_insert_lost_part_zookeeper.sql | 0 ...ata_and_without_quorum_zookeeper.reference | 0 ..._old_data_and_without_quorum_zookeeper.sql | 0 ...rt_simple_test_1_parts_zookeeper.reference | 0 ...m_insert_simple_test_1_parts_zookeeper.sql | 0 ...rt_simple_test_2_parts_zookeeper.reference | 0 ...m_insert_simple_test_2_parts_zookeeper.sql | 0 .../0_stateless/00733_if_datetime.reference | 0 .../queries/0_stateless/00733_if_datetime.sql | 0 .../0_stateless/00734_timeslot.reference | 0 .../queries/0_stateless/00734_timeslot.sql | 0 .../0_stateless/00735_conditional.reference | 0 .../queries/0_stateless/00735_conditional.sql | 0 .../00735_or_expr_optimize_bug.reference | 0 .../00735_or_expr_optimize_bug.sql | 0 .../00736_disjunction_optimisation.reference | 0 .../00736_disjunction_optimisation.sql | 0 .../00737_decimal_group_by.reference | 0 .../0_stateless/00737_decimal_group_by.sql | 0 .../00738_lock_for_inner_table.reference | 0 .../0_stateless/00738_lock_for_inner_table.sh | 0 ...ted_merge_multidimensional_array.reference | 0 ...38_nested_merge_multidimensional_array.sql | 0 ...ent_nullable_string_mattrobenolt.reference | 0 ...y_element_nullable_string_mattrobenolt.sql | 0 .../00740_database_in_nested_view.reference | 0 .../00740_database_in_nested_view.sql | 0 ...40_optimize_predicate_expression.reference | 0 .../00740_optimize_predicate_expression.sql | 0 .../00741_client_comment_multiline.reference | 0 .../00741_client_comment_multiline.sql | 0 .../00742_require_join_strictness.reference | 0 .../00742_require_join_strictness.sql | 0 .../00743_limit_by_not_found_column.reference | 0 .../00743_limit_by_not_found_column.sql | 0 .../00744_join_not_found_column.reference | 0 .../00744_join_not_found_column.sql | 0 .../00745_compile_scalar_subquery.reference | 0 .../00745_compile_scalar_subquery.sql | 0 ...mpile_non_deterministic_function.reference | 0 ...746_compile_non_deterministic_function.sql | 0 .../00746_hashing_tuples.reference | 0 .../0_stateless/00746_hashing_tuples.sql | 0 .../queries/0_stateless/00746_sql_fuzzy.pl | 0 .../0_stateless/00746_sql_fuzzy.reference | 0 .../queries/0_stateless/00746_sql_fuzzy.sh | 0 .../0_stateless/00747_contributors.reference | 0 .../0_stateless/00747_contributors.sql | 0 .../00748_insert_array_with_null.reference | 0 .../00748_insert_array_with_null.sql | 0 ...inner_join_of_unnamed_subqueries.reference | 0 ...00749_inner_join_of_unnamed_subqueries.sql | 0 ...0_merge_tree_merge_with_o_direct.reference | 0 .../00750_merge_tree_merge_with_o_direct.sql | 0 ...51_default_databasename_for_view.reference | 0 .../00751_default_databasename_for_view.sql | 0 .../0_stateless/00751_hashing_ints.reference | 0 .../0_stateless/00751_hashing_ints.sql | 0 ...ow_cardinality_nullable_group_by.reference | 0 ...0751_low_cardinality_nullable_group_by.sql | 0 ...752_low_cardinality_array_result.reference | 0 .../00752_low_cardinality_array_result.sql | 0 ..._low_cardinality_lambda_argument.reference | 0 .../00752_low_cardinality_lambda_argument.sql | 0 ..._low_cardinality_left_array_join.reference | 0 .../00752_low_cardinality_left_array_join.sql | 0 .../00752_low_cardinality_mv_1.reference | 0 .../00752_low_cardinality_mv_1.sql | 0 .../00752_low_cardinality_mv_2.reference | 0 .../00752_low_cardinality_mv_2.sql | 0 .../00752_low_cardinality_permute.reference | 0 .../00752_low_cardinality_permute.sql | 0 .../0_stateless/00753_alter_attach.reference | 0 .../0_stateless/00753_alter_attach.sql | 0 ...r_destination_for_storage_buffer.reference | 0 ...3_alter_destination_for_storage_buffer.sql | 0 .../00753_comment_columns_zookeeper.reference | 0 .../00753_comment_columns_zookeeper.sql | 0 .../00753_quantile_format.reference | 0 .../0_stateless/00753_quantile_format.sql | 0 ...system_columns_and_system_tables.reference | 0 ...00753_system_columns_and_system_tables.sql | 0 .../00753_with_with_single_alias.reference | 0 .../00753_with_with_single_alias.sql | 0 ...4_alter_modify_column_partitions.reference | 0 .../00754_alter_modify_column_partitions.sql | 0 .../00754_alter_modify_order_by.reference | 0 .../00754_alter_modify_order_by.sql | 0 ...fy_order_by_replicated_zookeeper.reference | 0 ...r_modify_order_by_replicated_zookeeper.sql | 0 ...ize_skip_select_on_unused_shards.reference | 0 ...d_optimize_skip_select_on_unused_shards.sh | 0 ...t_on_unused_shards_with_prewhere.reference | 0 ...p_select_on_unused_shards_with_prewhere.sh | 0 ...first_significant_subdomain_more.reference | 0 ...00754_first_significant_subdomain_more.sql | 0 ...0755_avg_value_size_hint_passing.reference | 0 .../00755_avg_value_size_hint_passing.sql | 0 .../0_stateless/00756_power_alias.reference | 0 .../queries/0_stateless/00756_power_alias.sql | 0 .../0_stateless/00757_enum_defaults.reference | 0 .../0_stateless/00757_enum_defaults.sql | 0 .../0_stateless/00758_array_reverse.reference | 0 .../0_stateless/00758_array_reverse.sql | 0 .../0_stateless/00759_kodieg.reference | 0 .../queries/0_stateless/00759_kodieg.sql | 0 .../00760_insert_json_with_defaults.reference | 0 .../00760_insert_json_with_defaults.sql | 0 .../00760_url_functions_overflow.reference | 0 .../00760_url_functions_overflow.sql | 0 .../00761_lower_utf8_bug.reference | 0 .../0_stateless/00761_lower_utf8_bug.sql | 0 .../00762_date_comparsion.reference | 0 .../0_stateless/00762_date_comparsion.sql | 0 ...create_query_as_table_engine_bug.reference | 0 ...00763_create_query_as_table_engine_bug.sql | 0 .../0_stateless/00763_lock_buffer.reference | 0 .../queries/0_stateless/00763_lock_buffer.sh | 0 ...k_buffer_alter_destination_table.reference | 0 ...ong_lock_buffer_alter_destination_table.sh | 0 .../00764_max_query_size_allocation.reference | 0 .../00764_max_query_size_allocation.sh | 0 .../00765_sql_compatibility_aliases.reference | 0 .../00765_sql_compatibility_aliases.sql | 0 ...79_all_right_join_max_block_size.reference | 0 .../00779_all_right_join_max_block_size.sql | 0 .../00780_unaligned_array_join.reference | 0 .../00780_unaligned_array_join.sql | 0 ...alized_view_with_column_defaults.reference | 0 ...materialized_view_with_column_defaults.sql | 0 .../00799_function_dry_run.reference | 0 .../0_stateless/00799_function_dry_run.sql | 0 .../00800_function_java_hash.reference | 0 .../0_stateless/00800_function_java_hash.sql | 0 ...w_cardinality_array_group_by_arg.reference | 0 ...800_low_cardinality_array_group_by_arg.sql | 0 ...low_cardinality_distinct_numeric.reference | 0 ...00800_low_cardinality_distinct_numeric.sql | 0 ...w_cardinality_distributed_insert.reference | 0 ...800_low_cardinality_distributed_insert.sql | 0 ...0800_low_cardinality_empty_array.reference | 0 .../00800_low_cardinality_empty_array.sql | 0 .../00800_low_cardinality_join.reference | 0 .../00800_low_cardinality_join.sql | 0 .../00800_versatile_storage_join.reference | 0 .../00800_versatile_storage_join.sql | 0 ...light_saving_time_hour_underflow.reference | 0 ...01_daylight_saving_time_hour_underflow.sql | 0 ...time_shift_backwards_at_midnight.reference | 0 ...aving_time_shift_backwards_at_midnight.sql | 0 ...em_parts_with_datetime_partition.reference | 0 ...2_system_parts_with_datetime_partition.sql | 0 .../00803_odbc_driver_2_format.reference | Bin .../00803_odbc_driver_2_format.sql | 0 .../0_stateless/00803_xxhash.reference | 0 .../queries/0_stateless/00803_xxhash.sql | 0 .../00804_rollup_with_having.reference | 0 .../0_stateless/00804_rollup_with_having.sql | 0 ...04_test_alter_compression_codecs.reference | 0 .../00804_test_alter_compression_codecs.sql | 0 ...4_test_custom_compression_codecs.reference | 0 .../00804_test_custom_compression_codecs.sql | 0 ...m_compression_codes_log_storages.reference | 0 ..._custom_compression_codes_log_storages.sql | 0 ...804_test_delta_codec_compression.reference | 0 .../00804_test_delta_codec_compression.sql | 0 ...4_test_delta_codec_no_type_alter.reference | 0 .../00804_test_delta_codec_no_type_alter.sql | 0 .../0_stateless/00805_round_down.reference | 0 .../queries/0_stateless/00805_round_down.sql | 0 .../0_stateless/00806_alter_update.reference | 0 .../0_stateless/00806_alter_update.sql | 0 .../00807_regexp_quote_meta.reference | 0 .../0_stateless/00807_regexp_quote_meta.sql | 0 .../00808_array_enumerate_segfault.reference | 0 .../00808_array_enumerate_segfault.sql | 0 .../00808_not_optimize_predicate.reference | 0 .../00808_not_optimize_predicate.sql | 0 .../00809_add_days_segfault.reference | 0 .../0_stateless/00809_add_days_segfault.sql | 0 .../00810_in_operators_segfault.reference | 0 .../00810_in_operators_segfault.sql | 0 .../0_stateless/00811_garbage.reference | 0 .../queries/0_stateless/00811_garbage.sql | 0 .../00812_prewhere_alias_array.reference | 0 .../00812_prewhere_alias_array.sql | 0 ...parse_date_time_best_effort_more.reference | 0 ...00813_parse_date_time_best_effort_more.sql | 0 .../0_stateless/00814_parsing_ub.reference | 0 .../queries/0_stateless/00814_parsing_ub.sql | 0 ...nimalistic_part_header_zookeeper.reference | 0 ...ted_minimalistic_part_header_zookeeper.sql | 0 .../00815_left_join_on_stepanel.reference | 0 .../00815_left_join_on_stepanel.sql | 0 .../00816_join_column_names_sarg.reference | 0 .../00816_join_column_names_sarg.sql | 0 ...816_long_concurrent_alter_column.reference | 0 .../00816_long_concurrent_alter_column.sh | 0 .../0_stateless/00817_with_simple.reference | 0 .../queries/0_stateless/00817_with_simple.sql | 0 .../00818_alias_bug_4110.reference | 0 .../0_stateless/00818_alias_bug_4110.sql | 0 .../00818_inner_join_bug_3567.reference | 0 .../0_stateless/00818_inner_join_bug_3567.sql | 0 .../0_stateless/00818_join_bug_4271.reference | 0 .../0_stateless/00818_join_bug_4271.sql | 0 .../00819_ast_refactoring_bugs.reference | 0 .../00819_ast_refactoring_bugs.sql | 0 ...full_join_wrong_columns_in_block.reference | 0 ...00819_full_join_wrong_columns_in_block.sql | 0 .../00820_multiple_joins.reference | 0 .../0_stateless/00820_multiple_joins.sql | 0 ...le_joins_subquery_requires_alias.reference | 0 ...multiple_joins_subquery_requires_alias.sql | 0 ...distributed_storage_with_join_on.reference | 0 ...00821_distributed_storage_with_join_on.sql | 0 .../00822_array_insert_default.reference | 0 .../00822_array_insert_default.sql | 0 .../00823_capnproto_input.reference | 0 .../0_stateless/00823_capnproto_input.sh | 0 .../00823_sequence_match_dfa.reference | 0 .../0_stateless/00823_sequence_match_dfa.sql | 0 .../0_stateless/00824_filesystem.reference | 0 .../queries/0_stateless/00824_filesystem.sql | 0 .../00825_http_header_query_id.reference | 0 .../0_stateless/00825_http_header_query_id.sh | 0 .../0_stateless/00825_protobuf_format.proto | 0 .../00825_protobuf_format_input.insh | 0 .../00825_protobuf_format_input.reference | 0 .../00825_protobuf_format_input.sh | 0 .../00825_protobuf_format_output.reference | Bin .../00825_protobuf_format_output.sh | 0 .../00825_protobuf_format_syntax2.proto | 0 .../00826_cross_to_inner_join.reference | 0 .../0_stateless/00826_cross_to_inner_join.sql | 0 .../00829_bitmap_function.reference | 0 .../0_stateless/00829_bitmap_function.sql | 0 .../00830_join_overwrite.reference | 0 .../0_stateless/00830_join_overwrite.sql | 0 ...uantile_weighted_parameter_check.reference | 0 ...0831_quantile_weighted_parameter_check.sql | 0 .../00832_storage_file_lock.reference | 0 .../0_stateless/00832_storage_file_lock.sql | 0 .../00833_sleep_overflow.reference | 0 .../0_stateless/00833_sleep_overflow.sql | 0 ...readonly_queries_on_client_close.reference | 0 ...l_http_readonly_queries_on_client_close.sh | 0 .../00834_date_datetime_cmp.reference | 0 .../0_stateless/00834_date_datetime_cmp.sql | 0 ...wo_configuration_files_in_client.reference | 0 ...o_set_two_configuration_files_in_client.sh | 0 ...34_hints_for_type_function_typos.reference | 0 .../00834_hints_for_type_function_typos.sh | 0 .../0_stateless/00834_kill_mutation.reference | 0 .../0_stateless/00834_kill_mutation.sh | 0 ...ll_mutation_replicated_zookeeper.reference | 0 ...0834_kill_mutation_replicated_zookeeper.sh | 0 ..._limit_with_constant_expressions.reference | 0 .../00834_limit_with_constant_expressions.sql | 0 .../0_stateless/00834_not_between.reference | 0 .../queries/0_stateless/00834_not_between.sql | 0 .../00835_if_generic_case.reference | 0 .../0_stateless/00835_if_generic_case.sql | 0 .../0_stateless/00836_indices_alter.reference | 0 .../0_stateless/00836_indices_alter.sql | 0 ...dices_alter_replicated_zookeeper.reference | 0 ...836_indices_alter_replicated_zookeeper.sql | 0 ...0836_numbers_table_function_zero.reference | 0 .../00836_numbers_table_function_zero.sql | 0 ...37_insert_select_and_read_prefix.reference | 0 .../00837_insert_select_and_read_prefix.sql | 0 .../0_stateless/00837_minmax_index.reference | 0 .../queries/0_stateless/00837_minmax_index.sh | 0 ...inmax_index_replicated_zookeeper.reference | 0 ...0837_minmax_index_replicated_zookeeper.sql | 0 ...38_system_tables_drop_table_race.reference | 0 .../00838_system_tables_drop_table_race.sh | 0 .../0_stateless/00838_unique_index.reference | 0 .../queries/0_stateless/00838_unique_index.sh | 0 .../00839_bitmask_negative.reference | 0 .../0_stateless/00839_bitmask_negative.sql | 0 ...current_select_and_drop_deadlock.reference | 0 ...ong_concurrent_select_and_drop_deadlock.sh | 0 .../00840_top_k_weighted.reference | 0 .../0_stateless/00840_top_k_weighted.sql | 0 .../00841_temporary_table_database.reference | 0 .../00841_temporary_table_database.sql | 0 ...842_array_with_constant_overflow.reference | 0 .../00842_array_with_constant_overflow.sql | 0 ...imize_predicate_and_rename_table.reference | 0 ...43_optimize_predicate_and_rename_table.sql | 0 .../0_stateless/00844_join_lightee2.reference | 0 .../0_stateless/00844_join_lightee2.sql | 0 .../00845_join_on_aliases.reference | 0 .../0_stateless/00845_join_on_aliases.sql | 0 .../00846_join_using_tuple_crash.reference | 0 .../00846_join_using_tuple_crash.sql | 0 .../00847_multiple_join_same_column.reference | 0 .../00847_multiple_join_same_column.sql | 0 .../00848_join_use_nulls_segfault.reference | 0 .../00848_join_use_nulls_segfault.sql | 0 .../00849_multiple_comma_join.reference | 0 .../0_stateless/00849_multiple_comma_join.sql | 0 .../00849_multiple_comma_join_2.reference | 0 .../00849_multiple_comma_join_2.sql | 0 .../00850_global_join_dups.reference | 0 .../0_stateless/00850_global_join_dups.sql | 0 .../00851_http_insert_json_defaults.reference | 0 .../00851_http_insert_json_defaults.sh | 0 .../00852_any_join_nulls.reference | 0 .../0_stateless/00852_any_join_nulls.sql | 0 .../00853_join_with_nulls_crash.reference | 0 .../00853_join_with_nulls_crash.sql | 0 .../00854_multiple_join_asterisks.reference | 0 .../00854_multiple_join_asterisks.sql | 0 .../00855_join_with_array_join.reference | 0 .../00855_join_with_array_join.sql | 0 .../00856_no_column_issue_4242.reference | 0 .../00856_no_column_issue_4242.sql | 0 ...857_global_joinsavel_table_alias.reference | 0 .../00857_global_joinsavel_table_alias.sql | 0 .../0_stateless/00858_issue_4756.reference | 0 .../queries/0_stateless/00858_issue_4756.sql | 0 .../00859_distinct_with_join.reference | 0 .../0_stateless/00859_distinct_with_join.sql | 0 .../00860_unknown_identifier_bug.reference | 0 .../00860_unknown_identifier_bug.sql | 0 .../00861_decimal_quoted_csv.reference | 0 .../0_stateless/00861_decimal_quoted_csv.sql | 0 .../0_stateless/00862_decimal_in.reference | 0 .../queries/0_stateless/00862_decimal_in.sql | 0 .../0_stateless/00863_comma_join_in.reference | 0 .../0_stateless/00863_comma_join_in.sql | 0 .../00864_union_all_supertype.reference | 0 .../0_stateless/00864_union_all_supertype.sql | 0 .../0_stateless/00870_t64_codec.reference | 0 .../queries/0_stateless/00870_t64_codec.sql | 0 .../00871_t64_codec_signed.reference | 0 .../0_stateless/00871_t64_codec_signed.sql | 0 .../0_stateless/00872_t64_bit_codec.reference | 0 .../0_stateless/00872_t64_bit_codec.sql | 0 .../0_stateless/00874_issue_3495.reference | 0 .../queries/0_stateless/00874_issue_3495.sql | 0 .../00875_join_right_nulls.reference | 0 .../0_stateless/00875_join_right_nulls.sql | 0 .../00876_wrong_arraj_join_column.reference | 0 .../00876_wrong_arraj_join_column.sql | 0 ...0877_memory_limit_for_new_delete.reference | 0 .../00877_memory_limit_for_new_delete.sql | 0 .../00878_join_unexpected_results.reference | 0 .../00878_join_unexpected_results.sql | 0 .../00879_cast_to_decimal_crash.reference | 0 .../00879_cast_to_decimal_crash.sql | 0 .../00880_decimal_in_key.reference | 0 .../0_stateless/00880_decimal_in_key.sql | 0 .../00881_unknown_identifier_in_in.reference | 0 .../00881_unknown_identifier_in_in.sql | 0 .../00882_multiple_join_no_alias.reference | 0 .../00882_multiple_join_no_alias.sql | 0 .../0_stateless/00897_flatten.reference | 0 .../queries/0_stateless/00897_flatten.sql | 0 ...8_parsing_bad_diagnostic_message.reference | 0 .../00898_parsing_bad_diagnostic_message.sh | 0 ..._quantile_timing_parameter_check.reference | 0 .../00898_quantile_timing_parameter_check.sql | 0 .../00899_long_attach_memory_limit.reference | 0 .../00899_long_attach_memory_limit.sql | 0 .../0_stateless/00900_entropy_shard.reference | 0 .../0_stateless/00900_entropy_shard.sql | 0 .../0_stateless/00900_orc_load.reference | 0 .../queries/0_stateless/00900_orc_load.sh | 0 .../0_stateless/00900_parquet.reference | 0 .../queries/0_stateless/00900_parquet.sh | 0 .../00900_parquet_create_table_columns.pl | 0 .../00900_parquet_decimal.reference | 0 .../0_stateless/00900_parquet_decimal.sh | 0 .../0_stateless/00900_parquet_load.reference | 0 .../queries/0_stateless/00900_parquet_load.sh | 0 .../0_stateless/00901_joint_entropy.reference | 0 .../0_stateless/00901_joint_entropy.sql | 0 .../0_stateless/00902_entropy.reference | 0 .../queries/0_stateless/00902_entropy.sql | 0 ...903_array_with_constant_function.reference | 0 .../00903_array_with_constant_function.sql | 0 .../00904_array_with_constant_2.reference | 0 .../00904_array_with_constant_2.sql | 0 ...le_expressions_compare_big_dates.reference | 0 ..._compile_expressions_compare_big_dates.sql | 0 ...ld_with_aggregate_function_state.reference | 0 ...05_field_with_aggregate_function_state.sql | 0 .../00906_low_cardinality_cache.reference | 0 .../00906_low_cardinality_cache.sql | 0 ...6_low_cardinality_const_argument.reference | 0 .../00906_low_cardinality_const_argument.sql | 0 .../00906_low_cardinality_rollup.reference | 0 .../00906_low_cardinality_rollup.sql | 0 .../00907_set_index_max_rows.reference | 0 .../0_stateless/00907_set_index_max_rows.sh | 0 ...ith_nullable_and_low_cardinality.reference | 0 ...ndex_with_nullable_and_low_cardinality.sql | 0 ...nullable_and_low_cardinality_bug.reference | 0 ..._with_nullable_and_low_cardinality_bug.sql | 0 .../0_stateless/00908_analyze_query.reference | 0 .../0_stateless/00908_analyze_query.sql | 0 .../00908_bloom_filter_index.reference | 0 .../0_stateless/00908_bloom_filter_index.sh | 0 .../00908_long_http_insert.reference | 0 .../0_stateless/00908_long_http_insert.sh | 0 .../00909_arrayEnumerateUniq.reference | 0 .../0_stateless/00909_arrayEnumerateUniq.sql | 0 ...00909_kill_not_initialized_query.reference | 0 .../00909_kill_not_initialized_query.sh | 0 .../00909_ngram_distance.reference | 0 .../0_stateless/00909_ngram_distance.sql | 0 ...0_aggregation_timeseriesgroupsum.reference | 0 .../00910_aggregation_timeseriesgroupsum.sql | 0 .../00910_buffer_prewhere.reference | 0 .../0_stateless/00910_buffer_prewhere.sql | 0 ...910_client_window_size_detection.reference | 0 .../00910_client_window_size_detection.sh | 0 ...when_distributed_modify_order_by.reference | 0 ...crash_when_distributed_modify_order_by.sql | 0 ...0_decimal_group_array_crash_3783.reference | 0 .../00910_decimal_group_array_crash_3783.sql | 0 ...om_compression_codecs_replicated.reference | 0 ...r_custom_compression_codecs_replicated.sql | 0 ...er_test_alter_compression_codecs.reference | 0 ...ookeeper_test_alter_compression_codecs.sql | 0 .../00911_tautological_compare.reference | 0 .../00911_tautological_compare.sql | 0 .../00912_string_comparison.reference | 0 .../0_stateless/00912_string_comparison.sql | 0 .../0_stateless/00913_many_threads.reference | 0 .../0_stateless/00913_many_threads.sql | 0 .../0_stateless/00914_join_bgranvea.reference | 0 .../0_stateless/00914_join_bgranvea.sql | 0 .../0_stateless/00914_replicate.reference | 0 .../queries/0_stateless/00914_replicate.sql | 0 .../00915_simple_aggregate_function.reference | 0 .../00915_simple_aggregate_function.sql | 0 .../00915_tuple_orantius.reference | 0 .../0_stateless/00915_tuple_orantius.sql | 0 ...16_add_materialized_column_after.reference | 0 .../00916_add_materialized_column_after.sql | 0 .../00916_create_or_replace_view.reference | 0 .../00916_create_or_replace_view.sql | 0 ...916_join_using_duplicate_columns.reference | 0 .../00916_join_using_duplicate_columns.sql | 0 .../0_stateless/00917_least_sqr.reference | 0 .../queries/0_stateless/00917_least_sqr.sql | 0 ...00917_multiple_joins_denny_crane.reference | 0 .../00917_multiple_joins_denny_crane.sql | 0 ...0918_has_unsufficient_type_check.reference | 0 .../00918_has_unsufficient_type_check.sql | 0 .../00918_json_functions.reference | 0 .../0_stateless/00918_json_functions.sql | 0 .../00919_histogram_merge.reference | 0 .../0_stateless/00919_histogram_merge.sql | 0 ...9_sum_aggregate_states_constants.reference | 0 .../00919_sum_aggregate_states_constants.sql | 0 ...tiply_aggregate_states_constants.reference | 0 ...20_multiply_aggregate_states_constants.sql | 0 .../00921_datetime64_basic.reference | 0 .../0_stateless/00921_datetime64_basic.sql | 0 .../00921_datetime64_compatibility.python | 0 .../00921_datetime64_compatibility.reference | 0 .../00921_datetime64_compatibility.sh | 0 ...icated_merge_tree_optimize_final.reference | 0 ...y_replicated_merge_tree_optimize_final.sql | 0 ...ranularity_collapsing_merge_tree.reference | 0 ...ndex_granularity_collapsing_merge_tree.sql | 0 ...ive_index_granularity_merge_tree.reference | 0 ..._adaptive_index_granularity_merge_tree.sql | 0 ...26_adaptive_index_granularity_pk.reference | 0 .../00926_adaptive_index_granularity_pk.sql | 0 ...granularity_replacing_merge_tree.reference | 0 ...index_granularity_replacing_merge_tree.sql | 0 ..._versioned_collapsing_merge_tree.reference | 0 ...larity_versioned_collapsing_merge_tree.sql | 0 .../0_stateless/00926_geo_to_h3.reference | 0 .../queries/0_stateless/00926_geo_to_h3.sql | 0 .../0_stateless/00926_multimatch.reference | 0 .../queries/0_stateless/00926_multimatch.sql | 0 ...ranularity_replicated_merge_tree.reference | 0 ...ndex_granularity_replicated_merge_tree.sql | 0 .../00927_asof_join_correct_bt.reference | 0 .../00927_asof_join_correct_bt.sql | 0 .../00927_asof_join_long.reference | 0 .../0_stateless/00927_asof_join_long.sql | 0 .../00927_asof_join_noninclusive.reference | 0 .../00927_asof_join_noninclusive.sql | 0 .../00927_asof_join_other_types.reference | 0 .../00927_asof_join_other_types.sh | 0 .../0_stateless/00927_asof_joins.reference | 0 .../queries/0_stateless/00927_asof_joins.sql | 0 .../00927_disable_hyperscan.reference | 0 .../0_stateless/00927_disable_hyperscan.sql | 0 ...28_multi_match_constant_constant.reference | 0 .../00928_multi_match_constant_constant.sql | 0 .../00929_multi_match_edit_distance.reference | 0 .../00929_multi_match_edit_distance.sql | 0 .../00930_arrayIntersect.reference | 0 .../0_stateless/00930_arrayIntersect.sql | 0 ..._max_partitions_per_insert_block.reference | 0 .../00930_max_partitions_per_insert_block.sql | 0 ...nullable_aggregate_function_type.reference | 0 ...ality_nullable_aggregate_function_type.sql | 0 ...ardinality_read_with_empty_array.reference | 0 ..._low_cardinality_read_with_empty_array.sql | 0 ...ality_set_index_in_key_condition.reference | 0 ...cardinality_set_index_in_key_condition.sql | 0 .../00932_array_intersect_bug.reference | 0 .../0_stateless/00932_array_intersect_bug.sql | 0 .../00932_geohash_support.reference | 0 .../0_stateless/00932_geohash_support.sql | 0 .../0_stateless/00933_alter_ttl.reference | 0 .../queries/0_stateless/00933_alter_ttl.sql | 0 .../0_stateless/00933_reserved_word.reference | 0 .../0_stateless/00933_reserved_word.sql | 0 ...x_extra_seek_on_compressed_cache.reference | 0 ...test_fix_extra_seek_on_compressed_cache.sh | 0 .../00933_ttl_replicated_zookeeper.reference | 0 .../00933_ttl_replicated_zookeeper.sql | 0 .../0_stateless/00933_ttl_simple.reference | 0 .../queries/0_stateless/00933_ttl_simple.sql | 0 .../00933_ttl_with_default.reference | 0 .../0_stateless/00933_ttl_with_default.sql | 0 .../0_stateless/00934_is_valid_utf8.reference | 0 .../0_stateless/00934_is_valid_utf8.sql | 0 .../00935_to_iso_week_first_year.reference | 0 .../00935_to_iso_week_first_year.sql | 0 .../0_stateless/00936_crc_functions.reference | 0 .../0_stateless/00936_crc_functions.sql | 0 ...function_result_with_operator_in.reference | 0 ...00936_function_result_with_operator_in.sql | 0 .../00936_substring_utf8_non_const.reference | 0 .../00936_substring_utf8_non_const.sql | 0 .../00937_ipv4_cidr_range.reference | 0 .../0_stateless/00937_ipv4_cidr_range.sql | 0 .../00937_template_output_format.reference | 0 .../00937_template_output_format.sh | 0 .../00937_test_use_header_csv.reference | 0 .../0_stateless/00937_test_use_header_csv.sh | 0 .../00937_test_use_header_tsv.reference | 0 .../0_stateless/00937_test_use_header_tsv.sh | 0 .../0_stateless/00938_basename.reference | 0 .../queries/0_stateless/00938_basename.sql | 0 .../0_stateless/00938_dataset_test.reference | 0 .../0_stateless/00938_dataset_test.sql | 0 .../00938_fix_rwlock_segfault.reference | 0 .../0_stateless/00938_fix_rwlock_segfault.sh | 0 .../00938_ipv6_cidr_range.reference | 0 .../0_stateless/00938_ipv6_cidr_range.sql | 0 .../00938_template_input_format.reference | 0 .../00938_template_input_format.sh | 0 .../00938_test_retention_function.reference | 0 .../00938_test_retention_function.sql | 0 .../00939_limit_by_offset.reference | 0 .../0_stateless/00939_limit_by_offset.sql | 0 .../0_stateless/00939_test_null_in.reference | 0 .../0_stateless/00939_test_null_in.sql | 0 .../00940_max_parts_in_total.reference | 0 .../0_stateless/00940_max_parts_in_total.sql | 0 .../00940_order_by_read_in_order.reference | 0 .../00940_order_by_read_in_order.sql | 0 ...41_system_columns_race_condition.reference | 0 .../00941_system_columns_race_condition.sh | 0 .../00941_to_custom_week.reference | 0 .../0_stateless/00941_to_custom_week.sql | 0 .../0_stateless/00942_dataparts_500.reference | 0 .../0_stateless/00942_dataparts_500.sh | 0 .../0_stateless/00942_mutate_index.reference | 0 .../queries/0_stateless/00942_mutate_index.sh | 0 .../00942_mv_rename_table.reference | 0 .../0_stateless/00942_mv_rename_table.sql | 0 .../00943_materialize_index.reference | 0 .../0_stateless/00943_materialize_index.sh | 0 ...43_mv_rename_without_inner_table.reference | 0 .../00943_mv_rename_without_inner_table.sql | 0 .../00944_clear_index_in_partition.reference | 0 .../00944_clear_index_in_partition.sh | 0 ...oom_filter_index_with_merge_tree.reference | 0 ...eate_bloom_filter_index_with_merge_tree.sh | 0 .../0_stateless/00944_minmax_null.reference | 0 .../queries/0_stateless/00944_minmax_null.sql | 0 .../0_stateless/00944_ml_test.reference | 0 .../queries/0_stateless/00944_ml_test.sql | 0 .../00945_bloom_filter_index.reference | 0 .../0_stateless/00945_bloom_filter_index.sql | 0 .../0_stateless/00945_ml_test.reference | 0 .../queries/0_stateless/00945_ml_test.sql | 0 .../0_stateless/00946_ml_test.reference | 0 .../queries/0_stateless/00946_ml_test.sql | 0 .../0_stateless/00947_ml_test.reference | 0 .../queries/0_stateless/00947_ml_test.sql | 0 ...48_format_in_with_single_element.reference | 0 .../00948_format_in_with_single_element.sh | 0 .../0_stateless/00948_to_valid_utf8.reference | 0 .../0_stateless/00948_to_valid_utf8.sql | 0 ...0948_values_interpreter_template.reference | 0 .../00948_values_interpreter_template.sql | 0 .../0_stateless/00949_format.reference | 0 .../queries/0_stateless/00949_format.sql | 0 ...alloc_when_truncate_join_storage.reference | 0 ...0_bad_alloc_when_truncate_join_storage.sql | 0 .../00950_default_prewhere.reference | 0 .../0_stateless/00950_default_prewhere.sql | 0 .../0_stateless/00950_dict_get.reference | 0 .../queries/0_stateless/00950_dict_get.sql | 2 +- .../00950_test_double_delta_codec.reference | 0 .../00950_test_double_delta_codec.sql | 0 .../00950_test_gorilla_codec.reference | 0 .../0_stateless/00950_test_gorilla_codec.sql | 0 .../0_stateless/00951_ngram_search.reference | 0 .../0_stateless/00951_ngram_search.sql | 0 .../00952_basic_constraints.reference | 0 .../0_stateless/00952_basic_constraints.sh | 0 .../00952_input_function.reference | 0 .../0_stateless/00952_input_function.sh | 0 ...ributed_with_materialized_column.reference | 0 ...o_distributed_with_materialized_column.sql | 0 .../00952_part_frozen_info.reference | 0 .../0_stateless/00952_part_frozen_info.sql | 0 .../00953_constraints_operations.reference | 0 .../00953_constraints_operations.sh | 0 .../00953_indices_alter_exceptions.reference | 0 .../00953_indices_alter_exceptions.sh | 0 .../00953_moving_functions.reference | 0 .../0_stateless/00953_moving_functions.sql | 0 ...okeeper_suetin_deduplication_bug.reference | 0 ...0953_zookeeper_suetin_deduplication_bug.sh | 0 ...00954_client_prepared_statements.reference | 0 .../00954_client_prepared_statements.sh | 0 .../00954_resample_combinator.reference | 0 .../0_stateless/00954_resample_combinator.sql | 0 ...0955_complex_prepared_statements.reference | 0 .../00955_complex_prepared_statements.sh | 0 .../00955_test_final_mark.reference | 0 .../0_stateless/00955_test_final_mark.sql | 0 .../00955_test_final_mark_use.reference | 0 .../0_stateless/00955_test_final_mark_use.sh | 0 .../00956_http_prepared_statements.reference | 0 .../00956_http_prepared_statements.sh | 0 ...join_use_nulls_with_array_column.reference | 0 ...00956_join_use_nulls_with_array_column.sql | 0 .../00956_sensitive_data_masking.reference | 0 .../00956_sensitive_data_masking.sh | 4 +- ...57_coalesce_const_nullable_crash.reference | 0 .../00957_coalesce_const_nullable_crash.sql | 0 .../00957_delta_diff_bug.reference | 0 .../0_stateless/00957_delta_diff_bug.sql | 0 ...0957_format_with_clashed_aliases.reference | 0 .../00957_format_with_clashed_aliases.sh | 0 .../0_stateless/00957_neighbor.reference | 0 .../queries/0_stateless/00957_neighbor.sql | 0 ...58_format_of_tuple_array_element.reference | 0 .../00958_format_of_tuple_array_element.sh | 0 ...59_format_with_different_aliases.reference | 0 .../00959_format_with_different_aliases.sh | 0 .../00960_eval_ml_method_const.reference | 0 .../00960_eval_ml_method_const.sql | 0 .../00960_live_view_watch_events_live.py | 0 ...0960_live_view_watch_events_live.reference | 0 .../0_stateless/00961_check_table.reference | 0 .../queries/0_stateless/00961_check_table.sql | 0 ...ms_in_system_parts_columns_table.reference | 0 ...hecksums_in_system_parts_columns_table.sql | 0 .../00961_temporary_live_view_watch.reference | 0 .../00961_temporary_live_view_watch.sql | 0 ...961_visit_param_buffer_underflow.reference | 0 .../00961_visit_param_buffer_underflow.sql | 0 .../0_stateless/00962_enumNotExect.reference | 0 .../0_stateless/00962_enumNotExect.sql | 0 .../00962_temporary_live_view_watch_live.py | 0 ...2_temporary_live_view_watch_live.reference | 0 .../00962_visit_param_various.reference | 0 .../0_stateless/00962_visit_param_various.sql | 0 .../0_stateless/00963_achimbab.reference | 0 .../queries/0_stateless/00963_achimbab.sql | 0 ...963_startsWith_force_primary_key.reference | 0 .../00963_startsWith_force_primary_key.sql | 0 ...y_live_view_watch_live_timeout.py.disabled | 0 ...ary_live_view_watch_live_timeout.reference | 0 ...964_bloom_index_string_functions.reference | 0 .../00964_bloom_index_string_functions.sh | 0 .../00964_live_view_watch_events_heartbeat.py | 0 ...live_view_watch_events_heartbeat.reference | 0 .../00964_os_thread_priority.reference | 0 .../0_stateless/00964_os_thread_priority.sql | 0 .../00965_live_view_watch_heartbeat.py | 0 .../00965_live_view_watch_heartbeat.reference | 0 .../00965_logs_level_bugfix.reference | 0 .../0_stateless/00965_logs_level_bugfix.sh | 0 ...nd_logs_level_concurrent_queries.reference | 0 ...0965_send_logs_level_concurrent_queries.sh | 0 ...00965_set_index_string_functions.reference | 0 .../00965_set_index_string_functions.sh | 0 ...965_shard_unresolvable_addresses.reference | 0 .../00965_shard_unresolvable_addresses.sql | 0 ...0966_invalid_json_must_not_parse.reference | 0 .../00966_invalid_json_must_not_parse.sql | 0 .../00966_live_view_watch_events_http.py | 0 ...0966_live_view_watch_events_http.reference | 0 .../0_stateless/00967_live_view_watch_http.py | 0 .../00967_live_view_watch_http.reference | 0 .../00967_ubsan_bit_test.reference | 0 .../0_stateless/00967_ubsan_bit_test.sql | 0 .../00968_file_engine_in_subquery.reference | 0 .../00968_file_engine_in_subquery.sql | 0 ...t_format_jsoneachrowwithprogress.reference | 0 ..._select_format_jsoneachrowwithprogress.sql | 0 .../0_stateless/00968_roundAge.reference | 0 .../queries/0_stateless/00968_roundAge.sql | 0 .../00969_columns_clause.reference | 0 .../0_stateless/00969_columns_clause.sql | 0 ...h_format_jsoneachrowwithprogress.reference | 0 ...w_watch_format_jsoneachrowwithprogress.sql | 0 .../0_stateless/00969_roundDuration.reference | 0 .../0_stateless/00969_roundDuration.sql | 0 ...0_live_view_watch_events_http_heartbeat.py | 0 ...view_watch_events_http_heartbeat.reference | 0 .../00970_substring_arg_validation.reference | 0 .../00970_substring_arg_validation.sql | 0 .../00971_live_view_watch_http_heartbeat.py | 0 ...1_live_view_watch_http_heartbeat.reference | 0 ...istribution_and_max_rows_to_read.reference | 0 ...read_distribution_and_max_rows_to_read.sql | 0 .../00971_query_id_in_logs.reference | 0 .../0_stateless/00971_query_id_in_logs.sh | 0 ...00972_desc_table_virtual_columns.reference | 0 .../00972_desc_table_virtual_columns.sql | 0 .../00972_geohashesInBox.reference | 0 .../0_stateless/00972_geohashesInBox.sql | 0 .../00972_live_view_select_1.reference | 0 .../0_stateless/00972_live_view_select_1.sql | 0 ...3_create_table_as_table_function.reference | 0 .../00973_create_table_as_table_function.sql | 0 .../00973_live_view_select.reference | 0 .../0_stateless/00973_live_view_select.sql | 0 ...3_live_view_with_subquery_select.reference | 0 .../00973_live_view_with_subquery_select.sql | 0 ...e_view_with_subquery_select_join.reference | 0 ...73_live_view_with_subquery_select_join.sql | 0 ...th_subquery_select_join_no_alias.reference | 0 ...iew_with_subquery_select_join_no_alias.sql | 0 ...view_with_subquery_select_nested.reference | 0 ..._live_view_with_subquery_select_nested.sql | 0 ...y_select_nested_with_aggregation.reference | 0 ...ubquery_select_nested_with_aggregation.sql | 0 ...ted_with_aggregation_table_alias.reference | 0 ...ct_nested_with_aggregation_table_alias.sql | 0 ...with_subquery_select_table_alias.reference | 0 ..._view_with_subquery_select_table_alias.sql | 0 ...subquery_select_with_aggregation.reference | 0 ..._with_subquery_select_with_aggregation.sql | 0 ...ect_with_aggregation_in_subquery.reference | 0 ...ry_select_with_aggregation_in_subquery.sql | 0 .../00973_uniq_non_associativity.reference | 0 .../00973_uniq_non_associativity.sql | 0 ...tive_granularity_secondary_index.reference | 0 ...4_adaptive_granularity_secondary_index.sql | 0 ..._bitmapContains_with_primary_key.reference | 0 .../00974_bitmapContains_with_primary_key.sql | 0 .../00974_distributed_join_on.reference | 0 .../0_stateless/00974_distributed_join_on.sql | 0 .../00974_final_predicate_push_down.reference | 0 .../00974_final_predicate_push_down.sql | 0 .../0_stateless/00974_fix_join_on.reference | 0 .../queries/0_stateless/00974_fix_join_on.sql | 0 .../00974_full_outer_join.reference | 0 .../0_stateless/00974_full_outer_join.sql | 0 ...ive_view_select_with_aggregation.reference | 0 ...0974_live_view_select_with_aggregation.sql | 0 .../00974_low_cardinality_cast.reference | 0 .../00974_low_cardinality_cast.sql | 0 ...4_primary_key_for_lowCardinality.reference | 0 .../00974_primary_key_for_lowCardinality.sh | 0 .../00974_query_profiler.reference | 0 .../0_stateless/00974_query_profiler.sql | 0 .../00974_text_log_table_not_empty.reference | 0 .../00974_text_log_table_not_empty.sh | 0 ...es_mutation_replicated_zookeeper.reference | 0 ...5_indices_mutation_replicated_zookeeper.sh | 0 .../0_stateless/00975_json_hang.reference | 0 .../queries/0_stateless/00975_json_hang.sql | 0 .../00975_live_view_create.reference | 0 .../0_stateless/00975_live_view_create.sql | 0 .../00975_move_partition_merge_tree.reference | 0 .../00975_move_partition_merge_tree.sql | 0 ...0975_recursive_materialized_view.reference | 0 .../00975_recursive_materialized_view.sql | 0 ...0975_sample_prewhere_distributed.reference | 0 .../00975_sample_prewhere_distributed.sql | 0 .../0_stateless/00975_values_list.reference | 0 .../queries/0_stateless/00975_values_list.sql | 0 .../0_stateless/00976_asof_join_on.reference | 0 .../0_stateless/00976_asof_join_on.sql | 0 .../00976_live_view_select_version.reference | 0 .../00976_live_view_select_version.sql | 0 .../00976_max_execution_speed.reference | 0 .../0_stateless/00976_max_execution_speed.sql | 0 ...6_shard_low_cardinality_achimbab.reference | 0 .../00976_shard_low_cardinality_achimbab.sql | 0 .../00976_system_stop_ttl_merges.reference | 0 .../00976_system_stop_ttl_merges.sql | 0 .../00976_ttl_with_old_parts.reference | 0 .../0_stateless/00976_ttl_with_old_parts.sql | 0 .../0_stateless/00977_int_div.reference | 0 .../queries/0_stateless/00977_int_div.sql | 0 ...00977_join_use_nulls_denny_crane.reference | 0 .../00977_join_use_nulls_denny_crane.sql | 0 .../00977_live_view_watch_events.reference | 0 .../00977_live_view_watch_events.sql | 0 .../00978_live_view_watch.reference | 0 .../0_stateless/00978_live_view_watch.sql | 0 .../0_stateless/00978_ml_math.reference | 0 .../queries/0_stateless/00978_ml_math.sql | 0 .../00978_sum_map_bugfix.reference | 0 .../0_stateless/00978_sum_map_bugfix.sql | 0 ...0978_table_function_values_alias.reference | 0 .../00978_table_function_values_alias.sql | 0 .../0_stateless/00979_live_view_watch_live.py | 0 .../00979_live_view_watch_live.reference | 0 .../00979_live_view_watch_live_moving_avg.py | 0 ..._live_view_watch_live_moving_avg.reference | 0 ...0979_live_view_watch_live_with_subquery.py | 0 ...ve_view_watch_live_with_subquery.reference | 0 ...tileExcatExclusive_and_Inclusive.reference | 0 ...9_quantileExcatExclusive_and_Inclusive.sql | 0 .../0_stateless/00979_set_index_not.reference | 0 .../0_stateless/00979_set_index_not.sql | 0 .../00979_toFloat_monotonicity.reference | 0 .../00979_toFloat_monotonicity.sql | 0 ...00979_yandex_consistent_hash_fpe.reference | 0 .../00979_yandex_consistent_hash_fpe.sql | 0 .../00980_alter_settings_race.reference | 0 .../0_stateless/00980_alter_settings_race.sh | 0 .../00980_crash_nullable_decimal.reference | 0 .../00980_crash_nullable_decimal.sql | 0 ...00980_create_temporary_live_view.reference | 0 .../00980_create_temporary_live_view.sql | 0 .../00980_full_join_crash_fancyqlx.reference | 0 .../00980_full_join_crash_fancyqlx.sql | 0 .../00980_merge_alter_settings.reference | 0 .../00980_merge_alter_settings.sql | 0 ...ggregation_state_deserialization.reference | 0 ...hard_aggregation_state_deserialization.sql | 0 ...used_shards_without_sharding_key.reference | 0 ...kip_unused_shards_without_sharding_key.sql | 0 ...keeper_merge_tree_alter_settings.reference | 0 ...80_zookeeper_merge_tree_alter_settings.sql | 0 .../00981_in_subquery_with_tuple.reference | 0 .../00981_in_subquery_with_tuple.sh | 0 .../00981_no_virtual_columns.reference | 0 .../0_stateless/00981_no_virtual_columns.sql | 0 .../00981_topK_topKWeighted_long.reference | 0 .../00981_topK_topKWeighted_long.sql | 0 ...0982_array_enumerate_uniq_ranked.reference | 0 .../00982_array_enumerate_uniq_ranked.sql | 0 ...82_low_cardinality_setting_in_mv.reference | 0 .../00982_low_cardinality_setting_in_mv.sql | 0 ...ing_merge_tree_not_an_identifier.reference | 0 ...3_summing_merge_tree_not_an_identifier.sql | 0 ...984_materialized_view_to_columns.reference | 0 .../00984_materialized_view_to_columns.sql | 0 .../00984_parser_stack_overflow.reference | 0 .../00984_parser_stack_overflow.sh | 0 .../00985_merge_stack_overflow.reference | 0 .../00985_merge_stack_overflow.sql | 0 ...materialized_view_stack_overflow.reference | 0 ...00986_materialized_view_stack_overflow.sql | 0 ...00987_distributed_stack_overflow.reference | 0 .../00987_distributed_stack_overflow.sql | 0 ...onstraints_replication_zookeeper.reference | 0 ...0988_constraints_replication_zookeeper.sql | 0 .../00988_expansion_aliases_limit.reference | 0 .../00988_expansion_aliases_limit.sql | 0 .../00988_parallel_parts_removal.reference | 0 .../00988_parallel_parts_removal.sql | 0 .../00989_parallel_parts_loading.reference | 0 .../00989_parallel_parts_loading.sql | 0 .../00990_function_current_user.reference | 0 .../00990_function_current_user.sql | 0 .../queries/0_stateless/00990_hasToken.python | 0 .../0_stateless/00990_hasToken.reference | 0 .../queries/0_stateless/00990_hasToken.sh | 0 .../00990_hasToken_and_tokenbf.reference | 0 .../00990_hasToken_and_tokenbf.sql | 0 ...00990_metric_log_table_not_empty.reference | 0 .../00990_metric_log_table_not_empty.sql | 0 .../00990_request_splitting.reference | 0 .../0_stateless/00990_request_splitting.sql | 0 .../00991_live_view_watch_event_live.python | 0 ...00991_live_view_watch_event_live.reference | 0 ...991_live_view_watch_event_live.sh.disabled | 0 .../00991_live_view_watch_http.python | 0 .../00991_live_view_watch_http.reference | 0 .../00991_live_view_watch_http.sh.disabled | 0 ...0991_system_parts_race_condition.reference | 0 .../00991_system_parts_race_condition.sh | 0 ...ry_live_view_watch_events_heartbeat.python | 0 ...live_view_watch_events_heartbeat.reference | 0 ...ve_view_watch_events_heartbeat.sh.disabled | 0 ...0991_temporary_live_view_watch_live.python | 0 ...1_temporary_live_view_watch_live.reference | 0 ...temporary_live_view_watch_live.sh.disabled | 0 ...m_parts_race_condition_zookeeper.reference | 0 ...2_system_parts_race_condition_zookeeper.sh | 0 ...ts_race_condition_drop_zookeeper.reference | 0 ...tem_parts_race_condition_drop_zookeeper.sh | 0 .../00994_table_function_numbers_mt.reference | 0 .../00994_table_function_numbers_mt.sql | 0 .../00995_exception_while_insert.reference | 0 .../00995_exception_while_insert.sh | 0 ...e_read_in_order_with_aggregation.reference | 0 ...ptimize_read_in_order_with_aggregation.sql | 0 .../00995_order_by_with_fill.reference | 0 .../0_stateless/00995_order_by_with_fill.sql | 0 .../00996_limit_with_ties.reference | 0 .../0_stateless/00996_limit_with_ties.sql | 0 .../0_stateless/00996_neighbor.reference | 0 .../queries/0_stateless/00996_neighbor.sql | 0 .../00997_extract_all_crash_6627.reference | 0 .../00997_extract_all_crash_6627.sql | 0 .../00997_set_index_array.reference | 0 .../0_stateless/00997_set_index_array.sql | 0 .../queries/0_stateless/00997_trim.reference | 0 .../queries/0_stateless/00997_trim.sql | 0 .../00998_constraints_all_tables.reference | 0 .../00998_constraints_all_tables.sql | 0 .../00999_full_join_dup_keys_crash.reference | 0 .../00999_full_join_dup_keys_crash.sql | 0 .../00999_join_not_nullable_types.reference | 0 .../00999_join_not_nullable_types.sql | 0 .../00999_join_on_expression.reference | 0 .../0_stateless/00999_join_on_expression.sql | 0 ...00999_nullable_nested_types_4877.reference | 0 .../00999_nullable_nested_types_4877.sql | 0 .../00999_settings_no_extra_quotes.reference | 0 .../00999_settings_no_extra_quotes.sql | 0 ...kip_indices_with_alter_and_merge.reference | 0 ...test_skip_indices_with_alter_and_merge.sql | 0 ...01000_bad_size_of_marks_skip_idx.reference | 0 .../01000_bad_size_of_marks_skip_idx.sql | 0 .../01000_subquery_requires_alias.reference | 0 .../01000_subquery_requires_alias.sql | 0 ...00_unneeded_substitutions_client.reference | 0 .../01000_unneeded_substitutions_client.sh | 0 .../01001_enums_in_in_section.reference | 0 .../0_stateless/01001_enums_in_in_section.sql | 0 ...1001_rename_merge_race_condition.reference | 0 .../01001_rename_merge_race_condition.sh | 0 ...llable_adaptive_granularity_long.reference | 0 ...lter_nullable_adaptive_granularity_long.sh | 0 .../01003_kill_query_race_condition.reference | 0 .../01003_kill_query_race_condition.sh | 0 .../01004_rename_deadlock.reference | 0 .../0_stateless/01004_rename_deadlock.sh | 0 .../01005_rwr_shard_deadlock.reference | 0 .../0_stateless/01005_rwr_shard_deadlock.sh | 0 ...d_empty_part_single_column_write.reference | 0 ...6_simpod_empty_part_single_column_write.sh | 0 .../01006_ttl_with_default_2.reference | 0 .../0_stateless/01006_ttl_with_default_2.sql | 0 .../01007_r1r2_w_r2r1_deadlock.reference | 0 .../0_stateless/01007_r1r2_w_r2r1_deadlock.sh | 0 ...aterialized_view_henyihanwobushi.reference | 0 ...1008_materialized_view_henyihanwobushi.sql | 0 .../01009_global_array_join_names.reference | 0 .../01009_global_array_join_names.sql | 0 .../01009_insert_select_data_loss.reference | 0 .../01009_insert_select_data_loss.sql | 0 .../01009_insert_select_nicelulu.reference | 0 .../01009_insert_select_nicelulu.sql | 0 ..._low_cardinality_and_native_http.reference | 0 .../01010_low_cardinality_and_native_http.sh | 0 .../01010_partial_merge_join.reference | 0 .../0_stateless/01010_partial_merge_join.sql | 0 ..._partial_merge_join_const_and_lc.reference | 0 .../01010_partial_merge_join_const_and_lc.sql | 0 ...1010_partial_merge_join_negative.reference | 0 .../01010_partial_merge_join_negative.sql | 0 .../01010_pm_join_all_join_bug.reference | 0 .../01010_pm_join_all_join_bug.sql | 0 .../0_stateless/01010_pmj_on_disk.reference | 0 .../queries/0_stateless/01010_pmj_on_disk.sql | 0 .../01010_pmj_one_row_blocks.reference | 0 .../0_stateless/01010_pmj_one_row_blocks.sql | 0 ...10_pmj_right_table_memory_limits.reference | 0 .../01010_pmj_right_table_memory_limits.sql | 0 .../01010_pmj_skip_blocks.reference | 0 .../0_stateless/01010_pmj_skip_blocks.sql | 0 .../01011_group_uniq_array_memsan.reference | 0 .../01011_group_uniq_array_memsan.sql | 0 ...1011_test_create_as_skip_indices.reference | 0 .../01011_test_create_as_skip_indices.sql | 0 .../01012_reset_running_accumulate.reference | 0 .../01012_reset_running_accumulate.sql | 0 .../01012_select_limit_x_0.reference | 0 .../0_stateless/01012_select_limit_x_0.sql | 0 ...012_serialize_array_memory_usage.reference | 0 .../01012_serialize_array_memory_usage.sql | 0 .../01012_show_tables_limit.reference | 0 .../0_stateless/01012_show_tables_limit.sql | 0 .../0_stateless/01013_hex_decimal.reference | 0 .../queries/0_stateless/01013_hex_decimal.sql | 0 .../0_stateless/01013_hex_float.reference | 0 .../queries/0_stateless/01013_hex_float.sql | 0 .../01013_repeat_function.reference | 0 .../0_stateless/01013_repeat_function.sql | 0 ...3_sync_replica_timeout_zookeeper.reference | 0 .../01013_sync_replica_timeout_zookeeper.sh | 0 ...01013_totals_without_aggregation.reference | 0 .../01013_totals_without_aggregation.sql | 0 .../01014_count_of_merges_metrics.reference | 0 .../01014_count_of_merges_metrics.sql | 0 .../01014_format_custom_separated.reference | 0 .../01014_format_custom_separated.sh | 0 ...014_function_repeat_corner_cases.reference | 0 .../01014_function_repeat_corner_cases.sql | 0 .../01014_lazy_database_basic.reference | 0 .../0_stateless/01014_lazy_database_basic.sh | 0 .../0_stateless/01015_array_split.reference | 0 .../queries/0_stateless/01015_array_split.sql | 0 .../0_stateless/01015_attach_part.reference | 0 .../queries/0_stateless/01015_attach_part.sql | 0 .../01015_database_bad_tables.reference | 0 .../0_stateless/01015_database_bad_tables.sql | 0 .../01015_empty_in_inner_right_join.reference | 0 .../01015_empty_in_inner_right_join.sql | 0 ...01015_insert_values_parametrized.reference | 0 .../01015_insert_values_parametrized.sh | 0 .../01015_random_constant.reference | 0 .../0_stateless/01015_random_constant.sql | 0 .../01016_index_tuple_field_type.reference | 0 .../01016_index_tuple_field_type.sql | 0 .../01016_input_null_as_default.reference | 0 .../01016_input_null_as_default.sh | 0 .../0_stateless/01016_macros.reference | 0 .../queries/0_stateless/01016_macros.sql | 0 .../01016_null_part_minmax.reference | 0 .../0_stateless/01016_null_part_minmax.sql | 0 .../01016_uniqCombined64.reference | 0 .../0_stateless/01016_uniqCombined64.sql | 0 ...17_in_unconvertible_complex_type.reference | 0 .../01017_in_unconvertible_complex_type.sql | 0 ...eterministic_functions_zookeeper.reference | 0 ...th_nondeterministic_functions_zookeeper.sh | 0 .../01017_tsv_empty_as_default.reference | 0 .../0_stateless/01017_tsv_empty_as_default.sh | 0 .../01017_uniqCombined_memory_usage.reference | 0 .../01017_uniqCombined_memory_usage.sql | 0 .../01018_Distributed__shard_num.reference | 0 .../01018_Distributed__shard_num.sql | 0 .../01018_ambiguous_column.reference | 0 .../0_stateless/01018_ambiguous_column.sql | 0 ...018_ddl_dictionaries_bad_queries.reference | 0 .../01018_ddl_dictionaries_bad_queries.sh | 0 ...ictionaries_concurrent_requrests.reference | 0 ...8_ddl_dictionaries_concurrent_requrests.sh | 0 .../01018_ddl_dictionaries_create.reference | 0 .../01018_ddl_dictionaries_create.sql | 0 .../01018_ddl_dictionaries_select.reference | 0 .../01018_ddl_dictionaries_select.sql | 0 .../01018_ddl_dictionaries_special.reference | 0 .../01018_ddl_dictionaries_special.sql | 0 ...8_dictionaries_from_dictionaries.reference | 0 .../01018_dictionaries_from_dictionaries.sql | 0 .../01018_empty_aggregation_filling.reference | 0 .../01018_empty_aggregation_filling.sql | 0 ...rt_multiple_blocks_with_defaults.reference | 0 ...18_insert_multiple_blocks_with_defaults.sh | 0 ...e_read_in_order_with_in_subquery.reference | 0 ...ptimize_read_in_order_with_in_subquery.sql | 0 ...1019_Buffer_and_max_memory_usage.reference | 0 .../01019_Buffer_and_max_memory_usage.sql | 0 ...9_alter_materialized_view_atomic.reference | 0 .../01019_alter_materialized_view_atomic.sh | 0 ...ter_materialized_view_consistent.reference | 0 ...1019_alter_materialized_view_consistent.sh | 0 ...19_alter_materialized_view_query.reference | 0 .../01019_alter_materialized_view_query.sql | 0 .../0_stateless/01019_array_fill.reference | 0 .../queries/0_stateless/01019_array_fill.sql | 0 ...alized_view_select_extra_columns.reference | 0 ...materialized_view_select_extra_columns.sql | 0 .../01019_parallel_parsing_cancel.reference | 0 .../01019_parallel_parsing_cancel.sh | 0 .../01020_function_array_compact.reference | 0 .../01020_function_array_compact.sql | 0 .../0_stateless/01020_function_char.reference | 0 .../0_stateless/01020_function_char.sql | 0 .../01020_having_without_group_by.reference | 0 .../01020_having_without_group_by.sql | 0 .../01021_create_as_select.reference | 0 .../0_stateless/01021_create_as_select.sql | 0 .../01021_only_tuple_columns.reference | 0 .../0_stateless/01021_only_tuple_columns.sql | 0 .../0_stateless/01021_tuple_parser.reference | 0 .../0_stateless/01021_tuple_parser.sql | 0 ..._materialized_view_query_context.reference | 0 .../01023_materialized_view_query_context.sql | 0 .../0_stateless/01024__getScalar.reference | 0 .../queries/0_stateless/01024__getScalar.sql | 0 .../01025_array_compact_generic.reference | 0 .../01025_array_compact_generic.sql | 0 .../0_stateless/01026_char_utf8.reference | 0 .../queries/0_stateless/01026_char_utf8.sql | 0 .../01029_early_constant_folding.reference | 0 .../01029_early_constant_folding.sql | 0 ..._concatenate_equal_fixed_strings.reference | 0 .../01030_concatenate_equal_fixed_strings.sql | 0 ...030_final_mark_empty_primary_key.reference | 0 .../01030_final_mark_empty_primary_key.sql | 0 ...correct_count_summing_merge_tree.reference | 0 ...030_incorrect_count_summing_merge_tree.sql | 0 .../01030_limit_by_with_ties_error.reference | 0 .../01030_limit_by_with_ties_error.sh | 0 .../01030_storage_hdfs_syntax.reference | 0 .../0_stateless/01030_storage_hdfs_syntax.sql | 0 .../01030_storage_set_supports_read.reference | 0 .../01030_storage_set_supports_read.sql | 0 .../01030_storage_url_syntax.reference | 0 .../0_stateless/01030_storage_url_syntax.sql | 0 ...utations_interpreter_and_context.reference | 0 ...01031_mutations_interpreter_and_context.sh | 0 .../0_stateless/01031_new_any_join.reference | 0 .../0_stateless/01031_new_any_join.sql | 0 .../01031_pmj_new_any_semi_join.reference | 0 .../01031_pmj_new_any_semi_join.sql | 0 .../01031_semi_anti_join.reference | 0 .../0_stateless/01031_semi_anti_join.sql | 0 .../01032_cityHash64_for_UUID.reference | 0 .../0_stateless/01032_cityHash64_for_UUID.sql | 0 .../01032_cityHash64_for_decimal.reference | 0 .../01032_cityHash64_for_decimal.sql | 0 ...32_duplicate_column_insert_query.reference | 0 .../01032_duplicate_column_insert_query.sql | 0 .../01033_dictionaries_lifetime.reference | 0 .../01033_dictionaries_lifetime.sql | 0 .../0_stateless/01033_quota_dcl.reference | 0 .../queries/0_stateless/01033_quota_dcl.sql | 0 ...age_odbc_parsing_exception_check.reference | 0 ...3_storage_odbc_parsing_exception_check.sql | 0 .../01033_substr_negative_size_arg.reference | 0 .../01033_substr_negative_size_arg.sql | 0 .../01034_JSONCompactEachRow.reference | 0 .../0_stateless/01034_JSONCompactEachRow.sql | 0 ...e_partition_from_table_zookeeper.reference | 0 ...034_move_partition_from_table_zookeeper.sh | 0 .../01034_order_by_pk_prefix.reference | 0 .../0_stateless/01034_order_by_pk_prefix.sql | 0 ...ax_parallel_replicas_distributed.reference | 0 ...here_max_parallel_replicas_distributed.sql | 0 .../01034_sample_final_distributed.reference | 0 .../01034_sample_final_distributed.sql | 0 ...unknown_qualified_column_in_join.reference | 0 ...01034_unknown_qualified_column_in_join.sql | 0 .../01034_values_parse_float_bug.reference | 0 .../01034_values_parse_float_bug.sh | 0 ...ith_fill_and_push_down_predicate.reference | 0 ...1034_with_fill_and_push_down_predicate.sql | 0 .../0_stateless/01035_avg_weighted.reference | 0 .../queries/0_stateless/01035_avg_weighted.sh | 0 ...e_partition_from_table_zookeeper.reference | 0 ...ent_move_partition_from_table_zookeeper.sh | 0 ...35_enum_conversion_native_format.reference | 0 .../01035_enum_conversion_native_format.sh | 0 .../01035_lc_empty_part_bug.reference | 0 .../0_stateless/01035_lc_empty_part_bug.sh | 0 .../01035_prewhere_with_alias.reference | 0 .../0_stateless/01035_prewhere_with_alias.sql | 0 ...s_dict_reload_on_create_database.reference | 0 ...rfluous_dict_reload_on_create_database.sql | 0 ...dict_reload_on_create_database_2.reference | 0 ...luous_dict_reload_on_create_database_2.sql | 0 .../01036_union_different_columns.reference | 0 .../01036_union_different_columns.sql | 0 ...1037_polygon_dict_multi_polygons.reference | 0 .../01037_polygon_dict_multi_polygons.sql | 0 ...037_polygon_dict_simple_polygons.reference | 0 .../01037_polygon_dict_simple_polygons.sql | 0 ...7_zookeeper_check_table_empty_pk.reference | 0 .../01037_zookeeper_check_table_empty_pk.sql | 0 .../01038_array_of_unnamed_tuples.reference | 0 .../01038_array_of_unnamed_tuples.sql | 0 ...dictionary_lifetime_min_zero_sec.reference | 0 .../01038_dictionary_lifetime_min_zero_sec.sh | 0 .../01039_mergetree_exec_time.reference | 0 .../0_stateless/01039_mergetree_exec_time.sql | 0 .../01039_row_policy_dcl.reference | 0 .../0_stateless/01039_row_policy_dcl.sql | 0 .../01039_test_setting_parse.reference | 0 .../0_stateless/01039_test_setting_parse.sql | 0 ...invalidate_query_switchover_long.reference | 0 ...ionary_invalidate_query_switchover_long.sh | 0 ..._directory_monitor_batch_inserts.reference | 0 ...ibuted_directory_monitor_batch_inserts.sql | 0 .../01040_h3_get_resolution.reference | 0 .../0_stateless/01040_h3_get_resolution.sql | 0 ..._create_dictionary_if_not_exists.reference | 0 .../01041_create_dictionary_if_not_exists.sql | 0 .../0_stateless/01041_h3_is_valid.reference | 0 .../queries/0_stateless/01041_h3_is_valid.sql | 0 ...heck_query_and_last_granule_size.reference | 0 ...1042_check_query_and_last_granule_size.sql | 0 .../0_stateless/01042_h3_k_ring.reference | 0 .../queries/0_stateless/01042_h3_k_ring.sql | 0 ...ad_dictionary_reloads_completely.reference | 0 ...em_reload_dictionary_reloads_completely.sh | 0 .../01043_categorical_iv.reference | 0 .../0_stateless/01043_categorical_iv.sql | 0 ...nary_attribute_properties_values.reference | 0 ...dictionary_attribute_properties_values.sql | 0 .../0_stateless/01043_geo_distance.reference | 0 .../0_stateless/01043_geo_distance.sql | 0 .../01043_h3_edge_length_m.reference | 0 .../0_stateless/01043_h3_edge_length_m.sql | 0 .../01044_great_circle_angle.reference | 0 .../0_stateless/01044_great_circle_angle.sql | 0 .../0_stateless/01044_h3_edge_angle.reference | 0 .../0_stateless/01044_h3_edge_angle.sql | 0 .../0_stateless/01045_array_zip.reference | 0 .../queries/0_stateless/01045_array_zip.sql | 0 .../01045_bloom_filter_null_array.reference | 0 .../01045_bloom_filter_null_array.sql | 0 .../01045_dictionaries_restrictions.reference | 0 .../01045_dictionaries_restrictions.sql | 0 ...045_order_by_pk_special_storages.reference | 0 .../01045_order_by_pk_special_storages.sh | 0 ...ystem_mutations_with_parts_names.reference | 0 ...eeper_system_mutations_with_parts_names.sh | 0 ..._view_with_join_over_distributed.reference | 0 ...alized_view_with_join_over_distributed.sql | 0 ..._trivial_count_query_distributed.reference | 0 .../01046_trivial_count_query_distributed.sql | 0 ...alias_columns_with_table_aliases.reference | 0 ...47_no_alias_columns_with_table_aliases.sql | 0 .../0_stateless/01047_nullable_rand.reference | 0 .../0_stateless/01047_nullable_rand.sql | 0 ...e_aggregate_sizes_of_columns_bug.reference | 0 ..._simple_aggregate_sizes_of_columns_bug.sql | 0 .../0_stateless/01048_exists_query.reference | 0 .../0_stateless/01048_exists_query.sql | 0 .../01049_join_low_card_bug.reference | 0 .../0_stateless/01049_join_low_card_bug.sql | 0 .../01049_join_low_card_crash.reference | 0 .../0_stateless/01049_join_low_card_crash.sql | 0 ..._zookeeper_synchronous_mutations.reference | 0 .../01049_zookeeper_synchronous_mutations.sql | 0 ...khouse_dict_source_with_subquery.reference | 0 ...0_clickhouse_dict_source_with_subquery.sql | 0 .../01050_engine_join_crash.reference | 0 .../0_stateless/01050_engine_join_crash.sql | 0 .../01050_engine_join_view_crash.reference | 0 .../01050_engine_join_view_crash.sql | 0 .../01050_group_array_sample.reference | 0 .../0_stateless/01050_group_array_sample.sql | 0 .../01051_aggregate_function_crash.reference | 0 .../01051_aggregate_function_crash.sql | 0 .../01051_all_join_engine.reference | 0 .../0_stateless/01051_all_join_engine.sql | 0 .../01051_new_any_join_engine.reference | 0 .../0_stateless/01051_new_any_join_engine.sql | 0 .../01051_random_printable_ascii.reference | 0 .../01051_random_printable_ascii.sql | 0 ...01051_same_name_alias_with_joins.reference | 0 .../01051_same_name_alias_with_joins.sql | 0 .../01052_array_reduce_exception.reference | 0 .../01052_array_reduce_exception.sql | 0 ...01052_compression_buffer_overrun.reference | 0 .../01052_compression_buffer_overrun.sh | 0 .../01053_drop_database_mat_view.reference | 0 .../01053_drop_database_mat_view.sql | 0 .../01053_if_chain_check.reference | 0 .../0_stateless/01053_if_chain_check.sql | 0 ...54_cache_dictionary_bunch_update.reference | 0 .../01054_cache_dictionary_bunch_update.sh | 0 ...4_cache_dictionary_overflow_cell.reference | 0 .../01054_cache_dictionary_overflow_cell.sql | 0 ...054_random_printable_ascii_ubsan.reference | 0 .../01054_random_printable_ascii_ubsan.sh | 0 .../0_stateless/01055_compact_parts.reference | 0 .../0_stateless/01055_compact_parts.sql | 0 .../01055_compact_parts_1.reference | 0 .../0_stateless/01055_compact_parts_1.sql | 0 .../01055_compact_parts_granularity.reference | 0 .../01055_compact_parts_granularity.sh | 0 ...01055_minmax_index_compact_parts.reference | 0 .../01055_minmax_index_compact_parts.sh | 0 .../0_stateless/01055_prewhere_bugs.reference | 0 .../0_stateless/01055_prewhere_bugs.sql | 0 .../01056_create_table_as.reference | 0 .../0_stateless/01056_create_table_as.sql | 0 ...01056_negative_with_bloom_filter.reference | 0 .../01056_negative_with_bloom_filter.sql | 0 .../01056_predicate_optimizer_bugs.reference | 0 .../01056_predicate_optimizer_bugs.sql | 0 ...red_statements_null_and_escaping.reference | 0 ...6_prepared_statements_null_and_escaping.sh | 0 ...7_http_compression_prefer_brotli.reference | 0 .../01057_http_compression_prefer_brotli.sh | 0 .../01058_zlib_ng_level1_bug.reference | 0 .../0_stateless/01058_zlib_ng_level1_bug.sh | 0 .../01059_storage_file_brotli.reference | 0 .../0_stateless/01059_storage_file_brotli.sql | 0 .../queries/0_stateless/01060_avro.reference | 0 .../queries/0_stateless/01060_avro.sh | 0 .../01060_defaults_all_columns.reference | 0 .../01060_defaults_all_columns.sql | 0 ...1060_shutdown_table_after_detach.reference | 0 .../01060_shutdown_table_after_detach.sql | 0 .../01060_substring_negative_size.reference | 0 .../01060_substring_negative_size.sql | 0 .../01061_alter_codec_with_type.reference | 0 .../01061_alter_codec_with_type.sql | 0 .../01062_alter_on_mutataion.reference | 0 .../0_stateless/01062_alter_on_mutataion.sql | 0 .../01062_max_parser_depth.reference | 0 .../0_stateless/01062_max_parser_depth.sh | 0 ...all_join_with_block_continuation.reference | 0 ...62_pm_all_join_with_block_continuation.sql | 0 ..._pm_multiple_all_join_same_value.reference | 0 .../01062_pm_multiple_all_join_same_value.sql | 0 .../01063_create_column_set.reference | 0 .../0_stateless/01063_create_column_set.sql | 0 .../0_stateless/01064_array_auc.reference | 0 .../queries/0_stateless/01064_array_auc.sql | 0 ...reaming_from_2_src_with_feedback.reference | 0 ...tal_streaming_from_2_src_with_feedback.sql | 0 ...4_pm_all_join_const_and_nullable.reference | 0 .../01064_pm_all_join_const_and_nullable.sql | 0 .../01065_array_zip_mixed_const.reference | 0 .../01065_array_zip_mixed_const.sql | 0 .../0_stateless/01065_if_not_finite.reference | 0 .../0_stateless/01065_if_not_finite.sql | 0 .../0_stateless/01066_bit_count.reference | 0 .../queries/0_stateless/01066_bit_count.sql | 0 .../0_stateless/01067_join_null.reference | 0 .../queries/0_stateless/01067_join_null.sql | 0 .../0_stateless/01068_parens.reference | 0 .../queries/0_stateless/01068_parens.sql | 0 .../01069_database_memory.reference | 0 .../0_stateless/01069_database_memory.sql | 0 ...9_insert_float_as_nullable_unit8.reference | 0 .../01069_insert_float_as_nullable_unit8.sql | 0 ...rialized_view_alter_target_table.reference | 0 ...9_materialized_view_alter_target_table.sql | 0 ...et_table_with_default_expression.reference | 0 ...r_target_table_with_default_expression.sql | 0 .../01069_set_in_group_by.reference | 0 .../0_stateless/01069_set_in_group_by.sql | 0 .../01070_alter_with_ttl.reference | 0 .../0_stateless/01070_alter_with_ttl.sql | 0 ...xception_code_in_query_log_table.reference | 0 ...1070_exception_code_in_query_log_table.sql | 0 .../01070_h3_get_base_cell.reference | 0 .../0_stateless/01070_h3_get_base_cell.sql | 0 .../01070_h3_hex_area_m2.reference | 0 .../0_stateless/01070_h3_hex_area_m2.sql | 0 .../01070_h3_indexes_are_neighbors.reference | 0 .../01070_h3_indexes_are_neighbors.sql | 0 .../01070_h3_to_children.reference | 0 .../0_stateless/01070_h3_to_children.sql | 0 .../0_stateless/01070_h3_to_parent.reference | 0 .../0_stateless/01070_h3_to_parent.sql | 0 .../0_stateless/01070_h3_to_string.reference | 0 .../0_stateless/01070_h3_to_string.sql | 0 .../01070_materialize_ttl.reference | 0 .../0_stateless/01070_materialize_ttl.sql | 0 ...1070_mutations_with_dependencies.reference | 0 .../01070_mutations_with_dependencies.sql | 0 .../0_stateless/01070_string_to_h3.reference | 0 .../0_stateless/01070_string_to_h3.sql | 0 .../01070_template_empty_file.reference | 0 .../0_stateless/01070_template_empty_file.sql | 0 ...070_to_decimal_or_null_exception.reference | 0 .../01070_to_decimal_or_null_exception.sql | 0 ...orce_optimize_skip_unused_shards.reference | 0 ...1071_force_optimize_skip_unused_shards.sql | 0 ...01071_http_header_exception_code.reference | 0 .../01071_http_header_exception_code.sh | 0 .../0_stateless/01071_in_array.reference | 0 .../queries/0_stateless/01071_in_array.sql | 0 ...1071_live_view_detach_dependency.reference | 0 .../01071_live_view_detach_dependency.sql | 0 ...index_with_old_format_merge_tree.reference | 0 ...ndary_index_with_old_format_merge_tree.sql | 0 ...p_temporary_table_with_same_name.reference | 0 ...72_drop_temporary_table_with_same_name.sql | 0 ...each_row_data_in_square_brackets.reference | 0 ..._json_each_row_data_in_square_brackets.sql | 0 .../0_stateless/01072_nullable_jit.reference | 0 .../0_stateless/01072_nullable_jit.sql | 0 ...ip_unused_shards_const_expr_eval.reference | 0 ...ize_skip_unused_shards_const_expr_eval.sql | 0 .../01072_select_constant_limit.reference | 0 .../01072_select_constant_limit.sql | 0 .../01073_attach_if_not_exists.reference | 0 .../01073_attach_if_not_exists.sql | 0 .../01073_bad_alter_partition.reference | 0 .../0_stateless/01073_bad_alter_partition.sql | 0 .../01073_blockSerializedSize.reference | 0 .../0_stateless/01073_blockSerializedSize.sql | 0 .../01073_crlf_end_of_line.reference | 0 .../0_stateless/01073_crlf_end_of_line.sql | 0 .../01073_grant_and_revoke.reference | 0 .../0_stateless/01073_grant_and_revoke.sql | 0 .../01073_show_tables_not_like.reference | 0 .../01073_show_tables_not_like.sql | 0 .../01074_h3_range_check.reference | 0 .../0_stateless/01074_h3_range_check.sql | 0 .../01074_partial_revokes.reference | 0 .../0_stateless/01074_partial_revokes.sql | 0 .../01075_allowed_client_hosts.reference | 0 .../01075_allowed_client_hosts.sql | 0 .../01075_in_arrays_enmk.reference | 0 .../0_stateless/01075_in_arrays_enmk.sql | 0 ...rray_join_prewhere_const_folding.reference | 0 ...1076_array_join_prewhere_const_folding.sql | 0 ...ictionary_datarace_exception_ptr.reference | 0 ...cache_dictionary_datarace_exception_ptr.sh | 0 .../01076_json_each_row_array.reference | 0 .../0_stateless/01076_json_each_row_array.sh | 0 ...allel_alter_replicated_zookeeper.reference | 0 ...076_parallel_alter_replicated_zookeeper.sh | 0 ...76_predicate_optimizer_with_view.reference | 0 .../01076_predicate_optimizer_with_view.sql | 0 .../01076_range_reader_segfault.reference | 0 .../01076_range_reader_segfault.sql | 0 ...1077_mutations_index_consistency.reference | 0 .../01077_mutations_index_consistency.sh | 0 .../01077_yet_another_prewhere_test.reference | 0 .../01077_yet_another_prewhere_test.sql | 0 ...78_bloom_filter_operator_not_has.reference | 0 .../01078_bloom_filter_operator_not_has.sql | 0 ...01078_merge_tree_read_one_thread.reference | 0 .../01078_merge_tree_read_one_thread.sql | 0 .../01079_alter_default_zookeeper.reference | 0 .../01079_alter_default_zookeeper.sql | 0 .../01079_bad_alters_zookeeper.reference | 0 .../0_stateless/01079_bad_alters_zookeeper.sh | 0 ...1079_bit_operations_using_bitset.reference | 0 .../01079_bit_operations_using_bitset.sql | 0 .../01079_new_range_reader_segfault.reference | 0 .../01079_new_range_reader_segfault.sql | 0 .../0_stateless/01079_order_by_pk.reference | 0 .../queries/0_stateless/01079_order_by_pk.sql | 0 ..._alter_add_drop_column_zookeeper.reference | 0 ...arallel_alter_add_drop_column_zookeeper.sh | 0 ...lel_alter_detach_table_zookeeper.reference | 0 ...9_parallel_alter_detach_table_zookeeper.sh | 0 ...9_parallel_alter_modify_zookeeper.referece | 0 ..._parallel_alter_modify_zookeeper.reference | 0 .../01079_parallel_alter_modify_zookeeper.sh | 0 ...1079_reinterpret_as_fixed_string.reference | 0 .../01079_reinterpret_as_fixed_string.sql | 0 ..._incorrect_size_of_nested_column.reference | 0 ..._error_incorrect_size_of_nested_column.sql | 0 ...erge_prewhere_tupleelement_error.reference | 0 ...gine_merge_prewhere_tupleelement_error.sql | 0 .../0_stateless/01080_join_get_null.reference | 0 .../0_stateless/01080_join_get_null.sql | 0 ...tialSortingTransform_full_column.reference | 0 ...81_PartialSortingTransform_full_column.sql | 0 .../0_stateless/01081_demangle.reference | 0 .../queries/0_stateless/01081_demangle.sql | 0 .../01081_keywords_formatting.reference | 0 .../0_stateless/01081_keywords_formatting.sql | 0 .../01082_bit_test_out_of_bound.reference | 0 .../01082_bit_test_out_of_bound.sql | 0 ...aggregation_memory_efficient_bug.reference | 0 ...01083_aggregation_memory_efficient_bug.sql | 0 ...01083_cross_to_inner_with_in_bug.reference | 0 .../01083_cross_to_inner_with_in_bug.sql | 0 .../01083_cross_to_inner_with_like.reference | 0 .../01083_cross_to_inner_with_like.sql | 0 ..._expressions_in_engine_arguments.reference | 0 .../01083_expressions_in_engine_arguments.sql | 0 ...83_functional_index_in_mergetree.reference | 0 .../01083_functional_index_in_mergetree.sql | 0 .../01083_log_family_disk_memory.reference | 0 .../01083_log_family_disk_memory.sql | 0 .../01083_log_first_column_alias.reference | 0 .../01083_log_first_column_alias.sql | 0 .../01083_match_zero_byte.reference | 0 .../0_stateless/01083_match_zero_byte.sql | 0 .../01084_defaults_on_aliases.reference | 0 .../0_stateless/01084_defaults_on_aliases.sql | 0 .../0_stateless/01084_regexp_empty.reference | 0 .../0_stateless/01084_regexp_empty.sql | 0 ...ime_arithmetic_preserve_timezone.reference | 0 ..._datetime_arithmetic_preserve_timezone.sql | 0 .../01085_extract_all_empty.reference | 0 .../0_stateless/01085_extract_all_empty.sql | 0 ...1085_max_distributed_connections.reference | 0 .../01085_max_distributed_connections.sh | 0 ...max_distributed_connections_http.reference | 0 .../01085_max_distributed_connections_http.sh | 0 .../01085_regexp_input_format.reference | 0 .../0_stateless/01085_regexp_input_format.sh | 0 .../01085_simdjson_uint64.reference | 0 .../0_stateless/01085_simdjson_uint64.sql | 0 .../01086_modulo_or_zero.reference | 0 .../0_stateless/01086_modulo_or_zero.sql | 0 .../01086_odbc_roundtrip.reference | 0 .../0_stateless/01086_odbc_roundtrip.sql | 0 ...gexp_input_format_skip_unmatched.reference | 0 ...1086_regexp_input_format_skip_unmatched.sh | 0 .../01087_index_set_ubsan.reference | 0 .../0_stateless/01087_index_set_ubsan.sql | 0 .../01087_storage_generate.reference | 0 .../0_stateless/01087_storage_generate.sql | 0 .../01087_table_function_generate.reference | 0 .../01087_table_function_generate.sql | 0 ...ray_slice_of_aggregate_functions.reference | 0 ...088_array_slice_of_aggregate_functions.sql | 0 .../01088_benchmark_query_id.reference | 0 .../0_stateless/01088_benchmark_query_id.sh | 0 .../01089_alter_settings_old_format.reference | 0 .../01089_alter_settings_old_format.sql | 0 .../01090_fixed_string_bit_ops.reference | 0 .../01090_fixed_string_bit_ops.sql | 0 ...eper_mutations_and_insert_quorum.reference | 0 ..._zookeeper_mutations_and_insert_quorum.sql | 0 .../01091_insert_with_default_json.reference | 0 .../01091_insert_with_default_json.sql | 0 .../0_stateless/01091_num_threads.reference | 0 .../queries/0_stateless/01091_num_threads.sql | 0 ...091_query_profiler_does_not_hang.reference | 0 .../01091_query_profiler_does_not_hang.sql | 0 .../0_stateless/01092_base64.reference | 0 .../queries/0_stateless/01092_base64.sql | 0 .../01092_memory_profiler.reference | 0 .../0_stateless/01092_memory_profiler.sql | 0 .../01093_cyclic_defaults_filimonov.reference | 0 .../01093_cyclic_defaults_filimonov.sql | 0 .../01095_tpch_like_smoke.reference | 0 .../0_stateless/01095_tpch_like_smoke.sql | 0 .../01096_array_reduce_in_ranges.reference | 0 .../01096_array_reduce_in_ranges.sql | 0 .../01096_block_serialized_state.reference | 0 .../01096_block_serialized_state.sql | 0 .../queries/0_stateless/01096_zeros.reference | 0 .../queries/0_stateless/01096_zeros.sql | 0 .../01097_cyclic_defaults.reference | 0 .../0_stateless/01097_cyclic_defaults.sql | 0 ...01097_one_more_range_reader_test.reference | 0 .../01097_one_more_range_reader_test.sql | 0 .../0_stateless/01097_pre_limit.reference | 0 .../queries/0_stateless/01097_pre_limit.sql | 0 .../queries/0_stateless/01098_sum.reference | 0 .../queries/0_stateless/01098_sum.sql | 0 ...98_temporary_and_external_tables.reference | 0 .../01098_temporary_and_external_tables.sh | 0 ...099_operators_date_and_timestamp.reference | 0 .../01099_operators_date_and_timestamp.sql | 0 ...rallel_distributed_insert_select.reference | 0 ...099_parallel_distributed_insert_select.sql | 0 .../01100_split_by_string.reference | 0 .../0_stateless/01100_split_by_string.sql | 0 .../01101_prewhere_after_alter.reference | 0 .../01101_prewhere_after_alter.sql | 0 .../01102_distributed_local_in_bug.reference | 0 .../01102_distributed_local_in_bug.sql | 0 ...heck_cpu_instructions_at_startup.reference | 0 ...01103_check_cpu_instructions_at_startup.sh | 0 ...roduct_mode_local_column_renames.reference | 0 ...uted_product_mode_local_column_renames.sql | 0 ...103_optimize_drop_race_zookeeper.reference | 0 .../01103_optimize_drop_race_zookeeper.sh | 0 .../01104_distributed_numbers_test.reference | 0 .../01104_distributed_numbers_test.sql | 0 .../01104_distributed_one_test.reference | 0 .../01104_distributed_one_test.sql | 0 .../01104_fixed_string_like.reference | 0 .../0_stateless/01104_fixed_string_like.sql | 0 .../0_stateless/01105_string_like.reference | 0 .../queries/0_stateless/01105_string_like.sql | 0 .../01106_const_fixed_string_like.reference | 0 .../01106_const_fixed_string_like.sql | 0 ...200_mutations_memory_consumption.reference | 0 .../01200_mutations_memory_consumption.sql | 0 ...p_column_compact_part_replicated.reference | 0 ...01_drop_column_compact_part_replicated.sql | 0 .../01202_array_auc_special.reference | 0 .../0_stateless/01202_array_auc_special.sql | 0 .../0_stateless/01210_drop_view.reference | 0 .../queries/0_stateless/01210_drop_view.sql | 0 ...skip_unused_shards_type_mismatch.reference | 0 ...imize_skip_unused_shards_type_mismatch.sql | 0 .../01212_empty_join_and_totals.reference | 0 .../01212_empty_join_and_totals.sql | 0 ...mize_skip_unused_shards_DISTINCT.reference | 0 ...3_optimize_skip_unused_shards_DISTINCT.sql | 0 .../01213_point_in_Myanmar.reference | 0 .../0_stateless/01213_point_in_Myanmar.sql | 0 .../01214_point_in_Mecca.reference | 0 .../0_stateless/01214_point_in_Mecca.sql | 0 ...220_scalar_optimization_in_alter.reference | 0 .../01220_scalar_optimization_in_alter.sql | 0 .../01221_system_settings.reference | 0 .../0_stateless/01221_system_settings.sql | 0 .../01230_join_get_truncate.reference | 0 .../0_stateless/01230_join_get_truncate.sql | 0 .../0_stateless/data_avro/complex.avro | Bin .../0_stateless/data_avro/complex.avsc | 0 .../0_stateless/data_avro/complex.json | 0 .../queries/0_stateless/data_avro/empty.avro | Bin .../queries/0_stateless/data_avro/empty.avsc | 0 .../queries/0_stateless/data_avro/empty.json | 0 .../0_stateless/data_avro/generate_avro.sh | 0 .../0_stateless/data_avro/logical_types.avro | Bin .../0_stateless/data_avro/logical_types.avsc | 0 .../0_stateless/data_avro/logical_types.json | 0 .../0_stateless/data_avro/primitive.avro | Bin .../0_stateless/data_avro/primitive.avsc | 0 .../0_stateless/data_avro/primitive.json | 0 .../0_stateless/data_avro/references.avro | Bin .../0_stateless/data_avro/references.avsc | 0 .../0_stateless/data_avro/references.json | 0 .../queries/0_stateless/data_avro/simple.avsc | 0 .../0_stateless/data_avro/simple.deflate.avro | Bin .../queries/0_stateless/data_avro/simple.json | 0 .../0_stateless/data_avro/simple.null.avro | Bin .../0_stateless/data_avro/simple.snappy.avro | Bin .../queries/0_stateless/data_orc/test.orc | Bin .../data_parquet/alltypes_dictionary.parquet | Bin .../alltypes_dictionary.parquet.columns | 0 .../data_parquet/alltypes_plain.parquet | Bin .../alltypes_plain.parquet.columns | 0 .../alltypes_plain.snappy.parquet | Bin .../alltypes_plain.snappy.parquet.columns | 0 .../data_parquet/byte_array_decimal.parquet | Bin .../byte_array_decimal.parquet.columns | 0 .../data_parquet/datapage_v2.snappy.parquet | Bin .../datapage_v2.snappy.parquet.columns | 0 .../fixed_length_decimal_1.parquet | Bin .../fixed_length_decimal_1.parquet.columns | 0 .../fixed_length_decimal_legacy.parquet | Bin ...ixed_length_decimal_legacy.parquet.columns | 0 .../data_parquet/int32_decimal.parquet | Bin .../int32_decimal.parquet.columns | 0 .../data_parquet/int64_decimal.parquet | Bin .../int64_decimal.parquet.columns | 0 .../nation.dict-malformed.parquet | Bin .../nation.dict-malformed.parquet.columns | 0 .../data_parquet/nested_lists.snappy.parquet | Bin .../nested_lists.snappy.parquet.columns | 0 .../data_parquet/nested_maps.snappy.parquet | Bin .../nested_maps.snappy.parquet.columns | 0 .../data_parquet/nonnullable.impala.parquet | Bin .../nonnullable.impala.parquet.columns | 0 .../data_parquet/nullable.impala.parquet | Bin .../nullable.impala.parquet.columns | 0 .../data_parquet/nulls.snappy.parquet | Bin .../data_parquet/nulls.snappy.parquet.columns | 0 .../repeated_no_annotation.parquet | Bin .../repeated_no_annotation.parquet.columns | 0 .../data_parquet/userdata1.parquet | Bin .../data_parquet/userdata1.parquet.columns | 0 .../data_parquet/userdata2.parquet | Bin .../data_parquet/userdata2.parquet.columns | 0 .../data_parquet/userdata3.parquet | Bin .../data_parquet/userdata3.parquet.columns | 0 .../data_parquet/userdata4.parquet | Bin .../data_parquet/userdata4.parquet.columns | 0 .../data_parquet/userdata5.parquet | Bin .../data_parquet/userdata5.parquet.columns | 0 .../v0.7.1.all-named-index.parquet | Bin .../v0.7.1.all-named-index.parquet.columns | 0 .../v0.7.1.column-metadata-handling.parquet | Bin ...1.column-metadata-handling.parquet.columns | 0 .../0_stateless/data_parquet/v0.7.1.parquet | Bin .../data_parquet/v0.7.1.parquet.columns | 0 .../v0.7.1.some-named-index.parquet | Bin .../v0.7.1.some-named-index.parquet.columns | 0 .../queries/0_stateless/helpers/client.py | 0 .../queries/0_stateless/helpers/httpclient.py | 0 .../queries/0_stateless/helpers/httpexpect.py | 0 .../queries/0_stateless/helpers/uexpect.py | 0 .../0_stateless/mergetree_mutations.lib | 0 .../1_stateful/00001_count_hits.reference | 0 .../queries/1_stateful/00001_count_hits.sql | 0 .../1_stateful/00002_count_visits.reference | 0 .../queries/1_stateful/00002_count_visits.sql | 0 .../1_stateful/00004_top_counters.reference | 0 .../queries/1_stateful/00004_top_counters.sql | 0 .../1_stateful/00005_filtering.reference | 0 .../queries/1_stateful/00005_filtering.sql | 0 .../1_stateful/00006_agregates.reference | 0 .../queries/1_stateful/00006_agregates.sql | 0 .../queries/1_stateful/00007_uniq.reference | 0 .../queries/1_stateful/00007_uniq.sql | 0 .../queries/1_stateful/00008_uniq.reference | 0 .../queries/1_stateful/00008_uniq.sql | 0 .../00009_uniq_distributed.reference | 0 .../1_stateful/00009_uniq_distributed.sql | 0 .../00010_quantiles_segfault.reference | 0 .../1_stateful/00010_quantiles_segfault.sql | 0 .../1_stateful/00011_sorting.reference | 0 .../queries/1_stateful/00011_sorting.sql | 0 .../00012_sorting_distributed.reference | 0 .../1_stateful/00012_sorting_distributed.sql | 0 .../00013_sorting_of_nested.reference | 0 .../1_stateful/00013_sorting_of_nested.sql | 0 .../00014_filtering_arrays.reference | 0 .../1_stateful/00014_filtering_arrays.sql | 0 ...otals_and_no_aggregate_functions.reference | 0 ...0015_totals_and_no_aggregate_functions.sql | 0 ...if_distributed_cond_always_false.reference | 0 ...6_any_if_distributed_cond_always_false.sql | 0 ...aggregation_uninitialized_memory.reference | 0 ...00017_aggregation_uninitialized_memory.sql | 0 ...20_distinct_order_by_distributed.reference | 0 .../00020_distinct_order_by_distributed.sql | 0 .../00021_1_select_with_in.reference | 0 .../1_stateful/00021_1_select_with_in.sql | 0 .../00021_2_select_with_in.reference | 0 .../1_stateful/00021_2_select_with_in.sql | 0 .../00021_3_select_with_in.reference | 0 .../1_stateful/00021_3_select_with_in.sql | 0 .../1_stateful/00022_merge_prewhere.reference | 0 .../1_stateful/00022_merge_prewhere.sql | 0 .../1_stateful/00023_totals_limit.reference | 0 .../queries/1_stateful/00023_totals_limit.sql | 0 .../00024_random_counters.reference | 0 .../1_stateful/00024_random_counters.sql | 0 .../00030_array_enumerate_uniq.reference | 0 .../1_stateful/00030_array_enumerate_uniq.sql | 0 .../00031_array_enumerate_uniq.reference | 0 .../1_stateful/00031_array_enumerate_uniq.sql | 0 .../00032_aggregate_key64.reference | 0 .../1_stateful/00032_aggregate_key64.sql | 0 .../00033_aggregate_key_string.reference | 0 .../1_stateful/00033_aggregate_key_string.sql | 0 ...00034_aggregate_key_fixed_string.reference | 0 .../00034_aggregate_key_fixed_string.sql | 0 .../00035_aggregate_keys128.reference | 0 .../1_stateful/00035_aggregate_keys128.sql | 0 .../00036_aggregate_hashed.reference | 0 .../1_stateful/00036_aggregate_hashed.sql | 0 .../00037_uniq_state_merge1.reference | 0 .../1_stateful/00037_uniq_state_merge1.sql | 0 .../00038_uniq_state_merge2.reference | 0 .../1_stateful/00038_uniq_state_merge2.sql | 0 .../1_stateful/00039_primary_key.reference | 0 .../queries/1_stateful/00039_primary_key.sql | 0 ...40_aggregating_materialized_view.reference | 0 .../00040_aggregating_materialized_view.sql | 0 ...41_aggregating_materialized_view.reference | 0 .../00041_aggregating_materialized_view.sql | 0 .../1_stateful/00042_any_left_join.reference | 0 .../1_stateful/00042_any_left_join.sql | 0 .../1_stateful/00043_any_left_join.reference | 0 .../1_stateful/00043_any_left_join.sql | 0 .../00044_any_left_join_string.reference | 0 .../1_stateful/00044_any_left_join_string.sql | 0 .../1_stateful/00045_uniq_upto.reference | 0 .../queries/1_stateful/00045_uniq_upto.sql | 0 .../00046_uniq_upto_distributed.reference | 0 .../00046_uniq_upto_distributed.sql | 0 .../queries/1_stateful/00047_bar.reference | 0 .../queries/1_stateful/00047_bar.sql | 0 .../1_stateful/00048_min_max.reference | 0 .../queries/1_stateful/00048_min_max.sql | 0 .../1_stateful/00049_max_string_if.reference | 0 .../1_stateful/00049_max_string_if.sql | 0 .../1_stateful/00050_min_max.reference | 0 .../queries/1_stateful/00050_min_max.sql | 0 .../1_stateful/00051_min_max_array.reference | 0 .../1_stateful/00051_min_max_array.sql | 0 .../1_stateful/00052_group_by_in.reference | 0 .../queries/1_stateful/00052_group_by_in.sql | 0 .../00053_replicate_segfault.reference | 0 .../1_stateful/00053_replicate_segfault.sql | 0 .../00054_merge_tree_partitions.reference | 0 .../00054_merge_tree_partitions.sql | 0 .../1_stateful/00055_index_and_not.reference | 0 .../1_stateful/00055_index_and_not.sql | 0 .../queries/1_stateful/00056_view.reference | 0 .../queries/1_stateful/00056_view.sql | 0 ...merge_sorting_empty_array_joined.reference | 0 ...00059_merge_sorting_empty_array_joined.sql | 0 .../00060_move_to_prewhere_and_sets.reference | 0 .../00060_move_to_prewhere_and_sets.sql | 0 .../1_stateful/00061_storage_buffer.reference | 0 .../1_stateful/00061_storage_buffer.sql | 0 .../1_stateful/00062_loyalty.reference | 0 .../queries/1_stateful/00062_loyalty.sql | 0 .../1_stateful/00063_loyalty_joins.reference | 0 .../1_stateful/00063_loyalty_joins.sql | 0 .../00065_loyalty_with_storage_join.reference | 0 .../00065_loyalty_with_storage_join.sql | 0 ...orting_distributed_many_replicas.reference | 0 ...0066_sorting_distributed_many_replicas.sql | 0 .../1_stateful/00067_union_all.reference | 0 .../queries/1_stateful/00067_union_all.sql | 0 .../00068_subquery_in_prewhere.reference | 0 .../1_stateful/00068_subquery_in_prewhere.sql | 0 ...00069_duplicate_aggregation_keys.reference | 0 .../00069_duplicate_aggregation_keys.sql | 0 .../00071_merge_tree_optimize_aio.reference | 0 .../00071_merge_tree_optimize_aio.sql | 0 ...72_compare_date_and_string_index.reference | 0 .../00072_compare_date_and_string_index.sql | 0 .../1_stateful/00073_uniq_array.reference | 0 .../queries/1_stateful/00073_uniq_array.sql | 0 .../1_stateful/00074_full_join.reference | 0 .../queries/1_stateful/00074_full_join.sql | 0 .../00075_left_array_join.reference | 0 .../1_stateful/00075_left_array_join.sql | 0 .../00076_system_columns_bytes.reference | 0 .../1_stateful/00076_system_columns_bytes.sql | 0 .../00077_log_tinylog_stripelog.reference | 0 .../00077_log_tinylog_stripelog.sql | 0 .../00078_group_by_arrays.reference | 0 .../1_stateful/00078_group_by_arrays.sql | 0 ...rray_join_not_used_joined_column.reference | 0 ...0079_array_join_not_used_joined_column.sql | 0 .../00080_array_join_and_union.reference | 0 .../1_stateful/00080_array_join_and_union.sql | 0 ..._group_by_without_key_and_totals.reference | 0 .../00081_group_by_without_key_and_totals.sql | 0 .../1_stateful/00082_quantiles.reference | 0 .../queries/1_stateful/00082_quantiles.sql | 0 .../1_stateful/00083_array_filter.reference | 0 .../queries/1_stateful/00083_array_filter.sql | 0 .../00084_external_aggregation.reference | 0 .../1_stateful/00084_external_aggregation.sql | 0 ...85_monotonic_evaluation_segfault.reference | 0 .../00085_monotonic_evaluation_segfault.sql | 0 .../1_stateful/00086_array_reduce.reference | 0 .../queries/1_stateful/00086_array_reduce.sql | 0 .../1_stateful/00087_where_0.reference | 0 .../queries/1_stateful/00087_where_0.sql | 0 ..._one_shard_and_rows_before_limit.reference | 0 ...bal_in_one_shard_and_rows_before_limit.sql | 0 ..._functions_with_non_constant_arg.reference | 0 ...sition_functions_with_non_constant_arg.sql | 0 .../00090_thread_pool_deadlock.reference | 0 .../1_stateful/00090_thread_pool_deadlock.sh | 0 .../00091_prewhere_two_conditions.reference | 0 .../00091_prewhere_two_conditions.sql | 0 .../1_stateful/00092_obfuscator.reference | 0 .../queries/1_stateful/00092_obfuscator.sh | 0 .../queries/1_stateful/00139_like.reference | 0 .../queries/1_stateful/00139_like.sql | 0 .../queries/1_stateful/00140_rename.reference | 0 .../queries/1_stateful/00140_rename.sql | 0 .../1_stateful/00141_transform.reference | 0 .../queries/1_stateful/00141_transform.sql | 0 .../1_stateful/00142_system_columns.reference | 0 .../1_stateful/00142_system_columns.sql | 0 ...0143_transform_non_const_default.reference | 0 .../00143_transform_non_const_default.sql | 0 ..._functions_of_aggregation_states.reference | 0 .../00144_functions_of_aggregation_states.sql | 0 ...5_aggregate_functions_statistics.reference | 0 .../00145_aggregate_functions_statistics.sql | 0 .../00146_aggregate_function_uniq.reference | 0 .../00146_aggregate_function_uniq.sql | 0 ...147_global_in_aggregate_function.reference | 0 .../00147_global_in_aggregate_function.sql | 0 ...48_monotonic_functions_and_index.reference | 0 .../00148_monotonic_functions_and_index.sql | 0 ...149_quantiles_timing_distributed.reference | 0 .../00149_quantiles_timing_distributed.sql | 0 ...00150_quantiles_timing_precision.reference | 0 .../00150_quantiles_timing_precision.sql | 0 .../00151_order_by_read_in_order.reference | 0 .../00151_order_by_read_in_order.sql | 0 ...ition_with_different_granularity.reference | 0 ...e_partition_with_different_granularity.sql | 0 ...152_insert_different_granularity.reference | 0 .../00152_insert_different_granularity.sql | 0 .../00153_aggregate_arena_race.reference | 0 .../1_stateful/00153_aggregate_arena_race.sql | 0 .../queries/1_stateful/00154_avro.reference | 0 .../queries/1_stateful/00154_avro.sql | 0 {dbms/tests => tests}/queries/__init__.py | 0 .../queries/bugs/00938_client_suggestions.sh | 0 .../bugs/01060_defaults_all_columns.reference | 0 .../queries/bugs/default_prewhere.sql | 0 .../queries/bugs/low_cardinality_remove.sql | 0 .../bugs/missing_scalar_subquery_removal.sql | 0 .../bugs/position_case_insensitive_utf8.sql | 0 .../queries/bugs/remote_scalar_subquery.sql | 0 .../totals_rollup_having_block_header.sql | 0 .../queries/bugs/view_bad_types.sql | 0 {dbms/tests => tests}/queries/conftest.py | 0 {dbms/tests => tests}/queries/query_test.py | 0 {dbms/tests => tests}/queries/server.py | 0 {dbms/tests => tests}/queries/shell_config.sh | 0 {dbms/tests => tests}/server-test.xml | 0 {dbms/tests => tests}/stress | 0 {dbms/tests => tests}/strings_dictionary.xml | 0 {dbms/tests => tests}/tsan_suppressions.txt | 0 {dbms/tests => tests}/users.d/readonly.xml | 0 {dbms/tests => tests}/users.xml | 0 utils/build/build_debian.sh | 4 +- utils/build/build_freebsd.sh | 4 +- utils/build/build_macos.sh | 4 +- utils/check-style/check-include | 4 +- utils/ci/run-clickhouse-from-binaries.sh | 4 +- utils/package/arch/PKGBUILD.in | 6 +- utils/release/release_lib.sh | 4 +- utils/test-data-generator/CMakeLists.txt | 6 +- .../ProtobufDelimitedMessagesSerializer.cpp | 2 +- 7489 files changed, 1381 insertions(+), 674 deletions(-) rename {dbms/benchmark => benchmark}/benchmark.sh (100%) rename {dbms/benchmark => benchmark}/clickhouse/benchmark-chyt.sh (100%) rename {dbms/benchmark => benchmark}/clickhouse/benchmark-new.sh (100%) rename {dbms/benchmark => benchmark}/clickhouse/benchmark-yql.sh (100%) rename {dbms/benchmark => benchmark}/clickhouse/queries.sql (100%) rename {dbms/benchmark => benchmark}/create_dump.sh (100%) rename {dbms/benchmark => benchmark}/greenplum/README (100%) rename {dbms/benchmark => benchmark}/greenplum/benchmark.sh (100%) rename {dbms/benchmark => benchmark}/greenplum/dump_dataset_from_ch.sh (100%) rename {dbms/benchmark => benchmark}/greenplum/load_data_set.sql (100%) rename {dbms/benchmark => benchmark}/greenplum/queries.sql (100%) rename {dbms/benchmark => benchmark}/greenplum/result_parser.py (100%) rename {dbms/benchmark => benchmark}/greenplum/schema.sql (100%) rename {dbms/benchmark => benchmark}/hive/conf.sh (100%) rename {dbms/benchmark => benchmark}/hive/define_schema.sql (100%) rename {dbms/benchmark => benchmark}/hive/expect.tcl (100%) rename {dbms/benchmark => benchmark}/hive/log/log_100m_tuned (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_10m_ (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_10m_1 (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_10m_2 (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_10m_3 (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_10m_tuned (100%) rename {dbms/benchmark => benchmark}/hive/log/log_10m/log_hits_10m (100%) rename {dbms/benchmark => benchmark}/hive/queries.sql (100%) rename {dbms/benchmark => benchmark}/hive/run_hive.sh (100%) rename {dbms/benchmark => benchmark}/infinidb/conf.sh (100%) rename {dbms/benchmark => benchmark}/infinidb/define_schema.sql (100%) rename {dbms/benchmark => benchmark}/infinidb/expect.tcl (100%) rename {dbms/benchmark => benchmark}/infinidb/log/log_100m (100%) rename {dbms/benchmark => benchmark}/infinidb/log/log_100m_tuned (100%) rename {dbms/benchmark => benchmark}/infinidb/log/log_10m (100%) rename {dbms/benchmark => benchmark}/infinidb/log/log_10m_tuned (100%) rename {dbms/benchmark => benchmark}/infinidb/queries.sql (100%) rename {dbms/benchmark => benchmark}/infobright/conf.sh (100%) rename {dbms/benchmark => benchmark}/infobright/define_schema.sql (100%) rename {dbms/benchmark => benchmark}/infobright/expect.tcl (100%) rename {dbms/benchmark => benchmark}/infobright/log-community/log_10m (100%) rename {dbms/benchmark => benchmark}/infobright/queries.sql (100%) rename {dbms/benchmark => benchmark}/memsql/benchmark.sh (100%) rename {dbms/benchmark => benchmark}/memsql/instructions.txt (100%) rename {dbms/benchmark => benchmark}/memsql/queries.sql (100%) rename {dbms/benchmark => benchmark}/monetdb/conf.sh (100%) rename {dbms/benchmark => benchmark}/monetdb/define_schema.sql (100%) rename {dbms/benchmark => benchmark}/monetdb/expect.tcl (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_100m (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_100m_1 (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_100m_corrected (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_100m_corrected_1 (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_100m_corrected_2 (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_10m (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_10m_corrected (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_10m_corrected_1 (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_upload_100m (100%) rename {dbms/benchmark => benchmark}/monetdb/log/log_upload_1b (100%) rename {dbms/benchmark => benchmark}/monetdb/queries.sql (100%) rename {dbms/benchmark => benchmark}/vertica/README (100%) rename {dbms/benchmark => benchmark}/vertica/benchmark.sh (100%) rename {dbms/benchmark => benchmark}/vertica/hits_define_schema.sql (100%) rename {dbms/benchmark => benchmark}/vertica/queries.sql (100%) rename dbms/{src => }/Access/AccessControlManager.cpp (100%) rename dbms/{src => }/Access/AccessControlManager.h (100%) rename dbms/{src => }/Access/AccessFlags.h (100%) rename dbms/{src => }/Access/AccessRights.cpp (100%) rename dbms/{src => }/Access/AccessRights.h (100%) rename dbms/{src => }/Access/AccessRightsElement.cpp (100%) rename dbms/{src => }/Access/AccessRightsElement.h (100%) rename dbms/{src => }/Access/AccessType.h (100%) rename dbms/{src => }/Access/AllowedClientHosts.cpp (100%) rename dbms/{src => }/Access/AllowedClientHosts.h (100%) rename dbms/{src => }/Access/Authentication.cpp (100%) rename dbms/{src => }/Access/Authentication.h (100%) rename dbms/{src => }/Access/CMakeLists.txt (100%) rename dbms/{src => }/Access/ContextAccess.cpp (100%) rename dbms/{src => }/Access/ContextAccess.h (100%) rename dbms/{src => }/Access/DiskAccessStorage.cpp (100%) rename dbms/{src => }/Access/DiskAccessStorage.h (100%) rename dbms/{src => }/Access/EnabledQuota.cpp (100%) rename dbms/{src => }/Access/EnabledQuota.h (100%) rename dbms/{src => }/Access/EnabledRoles.cpp (100%) rename dbms/{src => }/Access/EnabledRoles.h (100%) rename dbms/{src => }/Access/EnabledRolesInfo.cpp (100%) rename dbms/{src => }/Access/EnabledRolesInfo.h (100%) rename dbms/{src => }/Access/EnabledRowPolicies.cpp (100%) rename dbms/{src => }/Access/EnabledRowPolicies.h (100%) rename dbms/{src => }/Access/EnabledSettings.cpp (100%) rename dbms/{src => }/Access/EnabledSettings.h (100%) rename dbms/{src => }/Access/ExtendedRoleSet.cpp (100%) rename dbms/{src => }/Access/ExtendedRoleSet.h (100%) rename dbms/{src => }/Access/IAccessEntity.cpp (100%) rename dbms/{src => }/Access/IAccessEntity.h (100%) rename dbms/{src => }/Access/IAccessStorage.cpp (100%) rename dbms/{src => }/Access/IAccessStorage.h (100%) rename dbms/{src => }/Access/MemoryAccessStorage.cpp (100%) rename dbms/{src => }/Access/MemoryAccessStorage.h (100%) rename dbms/{src => }/Access/MultipleAccessStorage.cpp (100%) rename dbms/{src => }/Access/MultipleAccessStorage.h (100%) rename dbms/{src => }/Access/Quota.cpp (100%) rename dbms/{src => }/Access/Quota.h (100%) rename dbms/{src => }/Access/QuotaCache.cpp (100%) rename dbms/{src => }/Access/QuotaCache.h (100%) rename dbms/{src => }/Access/QuotaUsageInfo.cpp (100%) rename dbms/{src => }/Access/QuotaUsageInfo.h (100%) rename dbms/{src => }/Access/Role.cpp (100%) rename dbms/{src => }/Access/Role.h (100%) rename dbms/{src => }/Access/RoleCache.cpp (100%) rename dbms/{src => }/Access/RoleCache.h (100%) rename dbms/{src => }/Access/RowPolicy.cpp (100%) rename dbms/{src => }/Access/RowPolicy.h (100%) rename dbms/{src => }/Access/RowPolicyCache.cpp (100%) rename dbms/{src => }/Access/RowPolicyCache.h (100%) rename dbms/{src => }/Access/SettingsConstraints.cpp (100%) rename dbms/{src => }/Access/SettingsConstraints.h (100%) rename dbms/{src => }/Access/SettingsProfile.cpp (100%) rename dbms/{src => }/Access/SettingsProfile.h (100%) rename dbms/{src => }/Access/SettingsProfileElement.cpp (100%) rename dbms/{src => }/Access/SettingsProfileElement.h (100%) rename dbms/{src => }/Access/SettingsProfilesCache.cpp (100%) rename dbms/{src => }/Access/SettingsProfilesCache.h (100%) rename dbms/{src => }/Access/User.cpp (100%) rename dbms/{src => }/Access/User.h (100%) rename dbms/{src => }/Access/UsersConfigAccessStorage.cpp (100%) rename dbms/{src => }/Access/UsersConfigAccessStorage.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionAggThrow.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionArgMinMax.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionArray.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionArray.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionAvg.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionAvg.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionAvgWeighted.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionAvgWeighted.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionBitwise.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionBitwise.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionBoundingRatio.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionBoundingRatio.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCombinatorFactory.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCount.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionCount.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionEntropy.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionEntropy.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionFactory.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionFactory.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionForEach.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionForEach.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArray.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArray.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupArrayMoving.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupBitmap.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupBitmap.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupBitmapData.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionGroupUniqArray.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionHistogram.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionHistogram.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionIf.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionIf.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMLMethod.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMLMethod.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMaxIntersections.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMaxIntersections.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMerge.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMerge.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMinMaxAny.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionMinMaxAny.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionNothing.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionNull.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionNull.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionOrFill.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionOrFill.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionQuantile.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionQuantile.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionResample.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionResample.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionRetention.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionRetention.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSequenceMatch.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSequenceMatch.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionState.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionState.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionStatistics.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionStatistics.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionStatisticsSimple.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSum.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSum.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSumMap.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionSumMap.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionTopK.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionTopK.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniq.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniq.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniqCombined.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniqCombined.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniqUpTo.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionUniqUpTo.h (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionWindowFunnel.cpp (100%) rename dbms/{src => }/AggregateFunctions/AggregateFunctionWindowFunnel.h (100%) rename dbms/{src => }/AggregateFunctions/CMakeLists.txt (100%) rename dbms/{src => }/AggregateFunctions/FactoryHelpers.h (100%) rename dbms/{src => }/AggregateFunctions/Helpers.h (100%) rename dbms/{src => }/AggregateFunctions/HelpersMinMaxAny.h (100%) rename dbms/{src => }/AggregateFunctions/IAggregateFunction.h (100%) rename dbms/{src => }/AggregateFunctions/IAggregateFunctionCombinator.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileExact.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileExactWeighted.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileReservoirSampler.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileReservoirSamplerDeterministic.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileTDigest.h (100%) rename dbms/{src => }/AggregateFunctions/QuantileTiming.h (100%) rename dbms/{src => }/AggregateFunctions/QuantilesCommon.h (100%) rename dbms/{src => }/AggregateFunctions/ReservoirSampler.h (100%) rename dbms/{src => }/AggregateFunctions/ReservoirSamplerDeterministic.h (100%) rename dbms/{src => }/AggregateFunctions/UniqCombinedBiasData.cpp (100%) rename dbms/{src => }/AggregateFunctions/UniqCombinedBiasData.h (100%) rename dbms/{src => }/AggregateFunctions/UniqVariadicHash.cpp (100%) rename dbms/{src => }/AggregateFunctions/UniqVariadicHash.h (100%) rename dbms/{src => }/AggregateFunctions/UniquesHashSet.h (100%) rename dbms/{src => }/AggregateFunctions/parseAggregateFunctionParameters.cpp (100%) rename dbms/{src => }/AggregateFunctions/parseAggregateFunctionParameters.h (100%) rename dbms/{src => }/AggregateFunctions/registerAggregateFunctions.cpp (100%) rename dbms/{src => }/AggregateFunctions/registerAggregateFunctions.h (100%) rename dbms/{src => }/AggregateFunctions/tests/CMakeLists.txt (100%) rename dbms/{src => }/AggregateFunctions/tests/quantile-t-digest.cpp (100%) rename dbms/{src => }/Client/CMakeLists.txt (100%) rename dbms/{src => }/Client/Connection.cpp (100%) rename dbms/{src => }/Client/Connection.h (100%) rename dbms/{src => }/Client/ConnectionPool.h (100%) rename dbms/{src => }/Client/ConnectionPoolWithFailover.cpp (100%) rename dbms/{src => }/Client/ConnectionPoolWithFailover.h (100%) rename dbms/{src => }/Client/MultiplexedConnections.cpp (100%) rename dbms/{src => }/Client/MultiplexedConnections.h (100%) rename dbms/{src => }/Client/TimeoutSetter.cpp (100%) rename dbms/{src => }/Client/TimeoutSetter.h (100%) rename dbms/{src => }/Client/tests/CMakeLists.txt (100%) rename dbms/{src => }/Client/tests/test_connect.cpp (100%) rename dbms/{src => }/Columns/CMakeLists.txt (100%) rename dbms/{src => }/Columns/Collator.cpp (100%) rename dbms/{src => }/Columns/Collator.h (100%) rename dbms/{src => }/Columns/ColumnAggregateFunction.cpp (100%) rename dbms/{src => }/Columns/ColumnAggregateFunction.h (100%) rename dbms/{src => }/Columns/ColumnArray.cpp (100%) rename dbms/{src => }/Columns/ColumnArray.h (100%) rename dbms/{src => }/Columns/ColumnConst.cpp (100%) rename dbms/{src => }/Columns/ColumnConst.h (100%) rename dbms/{src => }/Columns/ColumnDecimal.cpp (100%) rename dbms/{src => }/Columns/ColumnDecimal.h (100%) rename dbms/{src => }/Columns/ColumnFixedString.cpp (100%) rename dbms/{src => }/Columns/ColumnFixedString.h (100%) rename dbms/{src => }/Columns/ColumnFunction.cpp (100%) rename dbms/{src => }/Columns/ColumnFunction.h (100%) rename dbms/{src => }/Columns/ColumnLowCardinality.cpp (100%) rename dbms/{src => }/Columns/ColumnLowCardinality.h (100%) rename dbms/{src => }/Columns/ColumnNothing.h (100%) rename dbms/{src => }/Columns/ColumnNullable.cpp (100%) rename dbms/{src => }/Columns/ColumnNullable.h (100%) rename dbms/{src => }/Columns/ColumnSet.h (100%) rename dbms/{src => }/Columns/ColumnString.cpp (100%) rename dbms/{src => }/Columns/ColumnString.h (100%) rename dbms/{src => }/Columns/ColumnTuple.cpp (100%) rename dbms/{src => }/Columns/ColumnTuple.h (100%) rename dbms/{src => }/Columns/ColumnUnique.h (100%) rename dbms/{src => }/Columns/ColumnVector.cpp (100%) rename dbms/{src => }/Columns/ColumnVector.h (100%) rename dbms/{src => }/Columns/ColumnVectorHelper.h (100%) rename dbms/{src => }/Columns/ColumnsCommon.cpp (100%) rename dbms/{src => }/Columns/ColumnsCommon.h (100%) rename dbms/{src => }/Columns/ColumnsNumber.h (100%) rename dbms/{src => }/Columns/FilterDescription.cpp (100%) rename dbms/{src => }/Columns/FilterDescription.h (100%) rename dbms/{src => }/Columns/IColumn.cpp (100%) rename dbms/{src => }/Columns/IColumn.h (100%) rename dbms/{src => }/Columns/IColumnDummy.h (100%) rename dbms/{src => }/Columns/IColumnImpl.h (100%) rename dbms/{src => }/Columns/IColumnUnique.h (100%) rename dbms/{src => }/Columns/ReverseIndex.h (100%) rename dbms/{src => }/Columns/getLeastSuperColumn.cpp (100%) rename dbms/{src => }/Columns/getLeastSuperColumn.h (100%) rename dbms/{src => }/Columns/tests/CMakeLists.txt (100%) rename dbms/{src => }/Columns/tests/gtest_column_unique.cpp (100%) rename dbms/{src => }/Columns/tests/gtest_weak_hash_32.cpp (100%) rename dbms/{src => }/Common/ActionBlocker.h (100%) rename dbms/{src => }/Common/ActionLock.cpp (100%) rename dbms/{src => }/Common/ActionLock.h (100%) rename dbms/{src => }/Common/AlignedBuffer.cpp (100%) rename dbms/{src => }/Common/AlignedBuffer.h (100%) rename dbms/{src => }/Common/Allocator.h (100%) rename dbms/{src => }/Common/Allocator_fwd.h (100%) rename dbms/{src => }/Common/Arena.h (100%) rename dbms/{src => }/Common/ArenaAllocator.h (100%) rename dbms/{src => }/Common/ArenaWithFreeLists.h (100%) rename dbms/{src => }/Common/ArrayCache.h (100%) rename dbms/{src => }/Common/AutoArray.h (100%) rename dbms/{src => }/Common/BitHelpers.h (100%) rename dbms/{src => }/Common/CMakeLists.txt (100%) rename dbms/{src => }/Common/COW.h (100%) rename dbms/{src => }/Common/ClickHouseRevision.cpp (100%) rename dbms/{src => }/Common/ClickHouseRevision.h (100%) rename dbms/{src => }/Common/ColumnsHashing.h (100%) rename dbms/{src => }/Common/ColumnsHashingImpl.h (100%) rename dbms/{src => }/Common/CombinedCardinalityEstimator.h (100%) rename dbms/{src => }/Common/CompactArray.h (100%) rename dbms/{src => }/Common/ConcurrentBoundedQueue.h (100%) rename dbms/{src => }/Common/Config/AbstractConfigurationComparison.cpp (100%) rename dbms/{src => }/Common/Config/AbstractConfigurationComparison.h (100%) rename dbms/{src => }/Common/Config/CMakeLists.txt (100%) rename dbms/{src => }/Common/Config/ConfigProcessor.cpp (100%) rename dbms/{src => }/Common/Config/ConfigProcessor.h (100%) rename dbms/{src => }/Common/Config/ConfigReloader.cpp (100%) rename dbms/{src => }/Common/Config/ConfigReloader.h (100%) rename dbms/{src => }/Common/Config/configReadClient.cpp (100%) rename dbms/{src => }/Common/Config/configReadClient.h (100%) rename dbms/{src => }/Common/CounterInFile.h (100%) rename dbms/{src => }/Common/CpuId.h (100%) rename dbms/{src => }/Common/CurrentMetrics.cpp (100%) rename dbms/{src => }/Common/CurrentMetrics.h (100%) rename dbms/{src => }/Common/CurrentThread.cpp (100%) rename dbms/{src => }/Common/CurrentThread.h (100%) rename dbms/{src => }/Common/DNSResolver.cpp (100%) rename dbms/{src => }/Common/DNSResolver.h (100%) rename dbms/{src => }/Common/Dwarf.cpp (100%) rename dbms/{src => }/Common/Dwarf.h (100%) rename dbms/{src => }/Common/Elf.cpp (100%) rename dbms/{src => }/Common/Elf.h (100%) rename dbms/{src => }/Common/ErrorCodes.cpp (100%) rename dbms/{src => }/Common/EventCounter.h (100%) rename dbms/{src => }/Common/Exception.cpp (100%) rename dbms/{src => }/Common/Exception.h (100%) rename dbms/{src => }/Common/ExternalLoaderStatus.cpp (100%) rename dbms/{src => }/Common/ExternalLoaderStatus.h (100%) rename dbms/{src => }/Common/FieldVisitors.cpp (100%) rename dbms/{src => }/Common/FieldVisitors.h (100%) rename dbms/{src => }/Common/FileChecker.cpp (100%) rename dbms/{src => }/Common/FileChecker.h (100%) rename dbms/{src => }/Common/FileUpdatesTracker.h (100%) rename dbms/{src => }/Common/HTMLForm.h (100%) rename dbms/{src => }/Common/HashTable/ClearableHashMap.h (100%) rename dbms/{src => }/Common/HashTable/ClearableHashSet.h (100%) rename dbms/{src => }/Common/HashTable/FixedClearableHashMap.h (100%) rename dbms/{src => }/Common/HashTable/FixedClearableHashSet.h (100%) rename dbms/{src => }/Common/HashTable/FixedHashMap.h (100%) rename dbms/{src => }/Common/HashTable/FixedHashSet.h (100%) rename dbms/{src => }/Common/HashTable/FixedHashTable.h (100%) rename dbms/{src => }/Common/HashTable/Hash.h (100%) rename dbms/{src => }/Common/HashTable/HashMap.h (100%) rename dbms/{src => }/Common/HashTable/HashSet.h (100%) rename dbms/{src => }/Common/HashTable/HashTable.h (100%) rename dbms/{src => }/Common/HashTable/HashTableAllocator.h (100%) rename dbms/{src => }/Common/HashTable/HashTableKeyHolder.h (100%) rename dbms/{src => }/Common/HashTable/SmallTable.h (100%) rename dbms/{src => }/Common/HashTable/StringHashMap.h (100%) rename dbms/{src => }/Common/HashTable/StringHashTable.h (100%) rename dbms/{src => }/Common/HashTable/TwoLevelHashMap.h (100%) rename dbms/{src => }/Common/HashTable/TwoLevelHashTable.h (100%) rename dbms/{src => }/Common/HashTable/TwoLevelStringHashMap.h (100%) rename dbms/{src => }/Common/HashTable/TwoLevelStringHashTable.h (100%) rename dbms/{src => }/Common/HyperLogLogBiasEstimator.h (100%) rename dbms/{src => }/Common/HyperLogLogCounter.h (100%) rename dbms/{src => }/Common/HyperLogLogWithSmallSetOptimization.h (100%) rename dbms/{src => }/Common/IFactoryWithAliases.h (100%) rename dbms/{src => }/Common/IPv6ToBinary.cpp (100%) rename dbms/{src => }/Common/IPv6ToBinary.h (100%) rename dbms/{src => }/Common/Increment.h (100%) rename dbms/{src => }/Common/InterruptListener.h (100%) rename dbms/{src => }/Common/IntervalKind.cpp (100%) rename dbms/{src => }/Common/IntervalKind.h (100%) rename dbms/{src => }/Common/LRUCache.h (100%) rename dbms/{src => }/Common/Macros.cpp (100%) rename dbms/{src => }/Common/Macros.h (100%) rename dbms/{src => }/Common/MemorySanitizer.h (100%) rename dbms/{src => }/Common/MemoryTracker.cpp (100%) rename dbms/{src => }/Common/MemoryTracker.h (100%) rename dbms/{src => }/Common/MultiVersion.h (100%) rename dbms/{src => }/Common/NaNUtils.h (100%) rename dbms/{src => }/Common/NamePrompter.h (100%) rename dbms/{src => }/Common/NetException.h (100%) rename dbms/{src => }/Common/ObjectPool.h (100%) rename dbms/{src => }/Common/OpenSSLHelpers.cpp (100%) rename dbms/{src => }/Common/OpenSSLHelpers.h (100%) rename dbms/{src => }/Common/OptimizedRegularExpression.cpp (100%) rename dbms/{src => }/Common/OptimizedRegularExpression.h (100%) rename dbms/{src => }/Common/PODArray.cpp (100%) rename dbms/{src => }/Common/PODArray.h (100%) rename dbms/{src => }/Common/PODArray_fwd.h (100%) rename dbms/{src => }/Common/PipeFDs.cpp (100%) rename dbms/{src => }/Common/PipeFDs.h (100%) rename dbms/{src => }/Common/PoolBase.h (100%) rename dbms/{src => }/Common/PoolWithFailoverBase.h (100%) rename dbms/{src => }/Common/ProfileEvents.cpp (100%) rename dbms/{src => }/Common/ProfileEvents.h (100%) rename dbms/{src => }/Common/ProfilingScopedRWLock.h (100%) rename dbms/{src => }/Common/QueryProfiler.cpp (100%) rename dbms/{src => }/Common/QueryProfiler.h (100%) rename dbms/{src => }/Common/RWLock.cpp (100%) rename dbms/{src => }/Common/RWLock.h (100%) rename dbms/{src => }/Common/RadixSort.h (100%) rename dbms/{src => }/Common/RemoteHostFilter.cpp (100%) rename dbms/{src => }/Common/RemoteHostFilter.h (100%) rename dbms/{src => }/Common/SensitiveDataMasker.cpp (100%) rename dbms/{src => }/Common/SensitiveDataMasker.h (100%) rename dbms/{src => }/Common/SettingsChanges.h (100%) rename dbms/{src => }/Common/SharedBlockRowRef.h (100%) rename dbms/{src => }/Common/SharedLibrary.cpp (100%) rename dbms/{src => }/Common/SharedLibrary.h (100%) rename dbms/{src => }/Common/ShellCommand.cpp (100%) rename dbms/{src => }/Common/ShellCommand.h (100%) rename dbms/{src => }/Common/SimpleActionBlocker.h (100%) rename dbms/{src => }/Common/SimpleIncrement.h (100%) rename dbms/{src => }/Common/SipHash.h (100%) rename dbms/{src => }/Common/SmallObjectPool.h (100%) rename dbms/{src => }/Common/SpaceSaving.h (100%) rename dbms/{src => }/Common/StackTrace.cpp (100%) rename dbms/{src => }/Common/StackTrace.h (100%) rename dbms/{src => }/Common/StatusFile.cpp (100%) rename dbms/{src => }/Common/StatusFile.h (100%) rename dbms/{src => }/Common/StatusInfo.cpp (100%) rename dbms/{src => }/Common/StatusInfo.h (100%) rename dbms/{src => }/Common/Stopwatch.cpp (100%) rename dbms/{src => }/Common/Stopwatch.h (100%) rename dbms/{src => }/Common/StringSearcher.h (100%) rename dbms/{src => }/Common/StringUtils/CMakeLists.txt (100%) rename dbms/{src => }/Common/StringUtils/StringUtils.cpp (100%) rename dbms/{src => }/Common/StringUtils/StringUtils.h (100%) rename dbms/{src => }/Common/StudentTTest.cpp (100%) rename dbms/{src => }/Common/StudentTTest.h (100%) rename dbms/{src => }/Common/SymbolIndex.cpp (100%) rename dbms/{src => }/Common/SymbolIndex.h (100%) rename dbms/{src => }/Common/TaskStatsInfoGetter.cpp (100%) rename dbms/{src => }/Common/TaskStatsInfoGetter.h (100%) rename dbms/{src => }/Common/TerminalSize.cpp (100%) rename dbms/{src => }/Common/TerminalSize.h (100%) rename dbms/{src => }/Common/ThreadFuzzer.cpp (100%) rename dbms/{src => }/Common/ThreadFuzzer.h (100%) rename dbms/{src => }/Common/ThreadPool.cpp (100%) rename dbms/{src => }/Common/ThreadPool.h (100%) rename dbms/{src => }/Common/ThreadProfileEvents.h (100%) rename dbms/{src => }/Common/ThreadStatus.cpp (100%) rename dbms/{src => }/Common/ThreadStatus.h (100%) rename dbms/{src => }/Common/Throttler.h (100%) rename dbms/{src => }/Common/TraceCollector.cpp (100%) rename dbms/{src => }/Common/TraceCollector.h (100%) rename dbms/{src => }/Common/TypeList.h (100%) rename dbms/{src => }/Common/TypePromotion.h (100%) rename dbms/{src => }/Common/UInt128.h (100%) rename dbms/{src => }/Common/UTF8Helpers.cpp (100%) rename dbms/{src => }/Common/UTF8Helpers.h (100%) rename dbms/{src => }/Common/UnicodeBar.h (100%) rename dbms/{src => }/Common/VariableContext.h (100%) rename dbms/{src => }/Common/Visitor.h (100%) rename dbms/{src => }/Common/Volnitsky.h (100%) rename dbms/{src => }/Common/WeakHash.cpp (100%) rename dbms/{src => }/Common/WeakHash.h (100%) rename dbms/{src => }/Common/XDBCBridgeHelper.h (100%) rename dbms/{src => }/Common/ZooKeeper/CMakeLists.txt (100%) rename dbms/{src => }/Common/ZooKeeper/Common.h (100%) rename dbms/{src => }/Common/ZooKeeper/IKeeper.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/IKeeper.h (100%) rename dbms/{src => }/Common/ZooKeeper/Increment.h (100%) rename dbms/{src => }/Common/ZooKeeper/KeeperException.h (100%) rename dbms/{src => }/Common/ZooKeeper/LeaderElection.h (100%) rename dbms/{src => }/Common/ZooKeeper/Lock.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/Lock.h (100%) rename dbms/{src => }/Common/ZooKeeper/TestKeeper.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/TestKeeper.h (100%) rename dbms/{src => }/Common/ZooKeeper/Types.h (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeper.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeper.h (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperHolder.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperHolder.h (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperImpl.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperImpl.h (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperNodeCache.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/ZooKeeperNodeCache.h (100%) rename dbms/{src => }/Common/ZooKeeper/tests/CMakeLists.txt (100%) rename dbms/{src => }/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/nozk.sh (100%) rename dbms/{src => }/Common/ZooKeeper/tests/yeszk.sh (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_expiration_test.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_test_async.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_test_commands.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_test_lock.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp (100%) rename dbms/{src => }/Common/ZooKeeper/tests/zookeeper_impl.cpp (100%) rename dbms/{src => }/Common/assert_cast.h (100%) rename dbms/{src => }/Common/checkStackSize.cpp (100%) rename dbms/{src => }/Common/checkStackSize.h (100%) rename dbms/{src => }/Common/config.h.in (100%) rename dbms/{src => }/Common/config_version.h.in (100%) rename dbms/{src => }/Common/createHardLink.cpp (100%) rename dbms/{src => }/Common/createHardLink.h (100%) rename dbms/{src => }/Common/escapeForFileName.cpp (100%) rename dbms/{src => }/Common/escapeForFileName.h (100%) rename dbms/{src => }/Common/filesystemHelpers.cpp (100%) rename dbms/{src => }/Common/filesystemHelpers.h (100%) rename dbms/{src => }/Common/formatIPv6.cpp (100%) rename dbms/{src => }/Common/formatIPv6.h (100%) rename dbms/{src => }/Common/formatReadable.cpp (100%) rename dbms/{src => }/Common/formatReadable.h (100%) rename dbms/{src => }/Common/getExecutablePath.cpp (100%) rename dbms/{src => }/Common/getExecutablePath.h (100%) rename dbms/{src => }/Common/getMultipleKeysFromConfig.cpp (100%) rename dbms/{src => }/Common/getMultipleKeysFromConfig.h (100%) rename dbms/{src => }/Common/getNumberOfPhysicalCPUCores.cpp (100%) rename dbms/{src => }/Common/getNumberOfPhysicalCPUCores.h (100%) rename dbms/{src => }/Common/hasLinuxCapability.cpp (100%) rename dbms/{src => }/Common/hasLinuxCapability.h (100%) rename dbms/{src => }/Common/hex.cpp (100%) rename dbms/{src => }/Common/hex.h (100%) rename dbms/{src => }/Common/intExp.h (100%) rename dbms/{src => }/Common/interpolate.h (100%) rename dbms/{src => }/Common/isLocalAddress.cpp (100%) rename dbms/{src => }/Common/isLocalAddress.h (100%) rename dbms/{src => }/Common/malloc.cpp (100%) rename dbms/{src => }/Common/memcmpSmall.h (100%) rename dbms/{src => }/Common/memcpySmall.h (100%) rename dbms/{src => }/Common/new_delete.cpp (100%) rename dbms/{src => }/Common/parseAddress.cpp (100%) rename dbms/{src => }/Common/parseAddress.h (100%) rename dbms/{src => }/Common/parseGlobs.cpp (100%) rename dbms/{src => }/Common/parseGlobs.h (100%) rename dbms/{src => }/Common/parseRemoteDescription.cpp (100%) rename dbms/{src => }/Common/parseRemoteDescription.h (100%) rename dbms/{src => }/Common/quoteString.cpp (100%) rename dbms/{src => }/Common/quoteString.h (100%) rename dbms/{src => }/Common/randomSeed.cpp (100%) rename dbms/{src => }/Common/randomSeed.h (100%) rename dbms/{src => }/Common/setThreadName.cpp (100%) rename dbms/{src => }/Common/setThreadName.h (100%) rename dbms/{src => }/Common/tests/CMakeLists.txt (100%) rename dbms/{src => }/Common/tests/arena_with_free_lists.cpp (100%) rename dbms/{src => }/Common/tests/array_cache.cpp (100%) rename dbms/{src => }/Common/tests/auto_array.cpp (100%) rename dbms/{src => }/Common/tests/chaos_sanitizer.cpp (100%) rename dbms/{src => }/Common/tests/compact_array.cpp (100%) rename dbms/{src => }/Common/tests/cow_columns.cpp (100%) rename dbms/{src => }/Common/tests/cow_compositions.cpp (100%) rename dbms/{src => }/Common/tests/gtest_getMultipleValuesFromConfig.cpp (100%) rename dbms/{src => }/Common/tests/gtest_global_context.h (100%) rename dbms/{src => }/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp (100%) rename dbms/{src => }/Common/tests/gtest_pod_array.cpp (100%) rename dbms/{src => }/Common/tests/gtest_rw_lock.cpp (100%) rename dbms/{src => }/Common/tests/gtest_sensitive_data_masker.cpp (100%) rename dbms/{src => }/Common/tests/gtest_shell_command.cpp (100%) rename dbms/{src => }/Common/tests/gtest_thread_pool_concurrent_wait.cpp (100%) rename dbms/{src => }/Common/tests/gtest_thread_pool_global_full.cpp (100%) rename dbms/{src => }/Common/tests/gtest_thread_pool_limit.cpp (100%) rename dbms/{src => }/Common/tests/gtest_thread_pool_loop.cpp (100%) rename dbms/{src => }/Common/tests/gtest_thread_pool_schedule_exception.cpp (100%) rename dbms/{src => }/Common/tests/gtest_unescapeForFileName.cpp (100%) rename dbms/{src => }/Common/tests/hash_table.cpp (100%) rename dbms/{src => }/Common/tests/hashes_test.cpp (100%) rename dbms/{src => }/Common/tests/int_hashes_perf.cpp (100%) rename dbms/{src => }/Common/tests/integer_hash_tables_and_hashes.cpp (100%) rename dbms/{src => }/Common/tests/parallel_aggregation.cpp (100%) rename dbms/{src => }/Common/tests/parallel_aggregation2.cpp (100%) rename dbms/{src => }/Common/tests/pod_array.cpp (100%) rename dbms/{src => }/Common/tests/radix_sort.cpp (100%) rename dbms/{src => }/Common/tests/simple_cache.cpp (100%) rename dbms/{src => }/Common/tests/sip_hash_perf.cpp (100%) rename dbms/{src => }/Common/tests/small_table.cpp (100%) rename dbms/{src => }/Common/tests/space_saving.cpp (100%) rename dbms/{src => }/Common/tests/stopwatch.cpp (100%) rename dbms/{src => }/Common/tests/symbol_index.cpp (100%) rename dbms/{src => }/Common/tests/thread_creation_latency.cpp (100%) rename dbms/{src => }/Common/thread_local_rng.cpp (100%) rename dbms/{src => }/Common/thread_local_rng.h (100%) rename dbms/{src => }/Common/typeid_cast.h (100%) rename dbms/{src => }/Compression/CMakeLists.txt (100%) rename dbms/{src => }/Compression/CachedCompressedReadBuffer.cpp (100%) rename dbms/{src => }/Compression/CachedCompressedReadBuffer.h (100%) rename dbms/{src => }/Compression/CompressedReadBuffer.cpp (100%) rename dbms/{src => }/Compression/CompressedReadBuffer.h (100%) rename dbms/{src => }/Compression/CompressedReadBufferBase.cpp (100%) rename dbms/{src => }/Compression/CompressedReadBufferBase.h (100%) rename dbms/{src => }/Compression/CompressedReadBufferFromFile.cpp (100%) rename dbms/{src => }/Compression/CompressedReadBufferFromFile.h (100%) rename dbms/{src => }/Compression/CompressedWriteBuffer.cpp (100%) rename dbms/{src => }/Compression/CompressedWriteBuffer.h (100%) rename dbms/{src => }/Compression/CompressionCodecDelta.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecDelta.h (100%) rename dbms/{src => }/Compression/CompressionCodecDoubleDelta.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecDoubleDelta.h (100%) rename dbms/{src => }/Compression/CompressionCodecGorilla.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecGorilla.h (100%) rename dbms/{src => }/Compression/CompressionCodecLZ4.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecLZ4.h (100%) rename dbms/{src => }/Compression/CompressionCodecMultiple.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecMultiple.h (100%) rename dbms/{src => }/Compression/CompressionCodecNone.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecNone.h (100%) rename dbms/{src => }/Compression/CompressionCodecT64.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecT64.h (100%) rename dbms/{src => }/Compression/CompressionCodecZSTD.cpp (100%) rename dbms/{src => }/Compression/CompressionCodecZSTD.h (100%) rename dbms/{src => }/Compression/CompressionFactory.cpp (100%) rename dbms/{src => }/Compression/CompressionFactory.h (100%) rename dbms/{src => }/Compression/CompressionInfo.h (100%) rename dbms/{src => }/Compression/ICompressionCodec.cpp (100%) rename dbms/{src => }/Compression/ICompressionCodec.h (100%) rename dbms/{src => }/Compression/LZ4_decompress_faster.cpp (100%) rename dbms/{src => }/Compression/LZ4_decompress_faster.h (100%) rename dbms/{src => }/Compression/tests/CMakeLists.txt (100%) rename dbms/{src => }/Compression/tests/cached_compressed_read_buffer.cpp (100%) rename dbms/{src => }/Compression/tests/compressed_buffer.cpp (100%) rename dbms/{src => }/Compression/tests/compressed_buffer_fuzz.cpp (100%) rename dbms/{src => }/Compression/tests/gtest_compressionCodec.cpp (100%) rename dbms/{src => }/Core/AccurateComparison.h (100%) rename dbms/{src => }/Core/BackgroundSchedulePool.cpp (100%) rename dbms/{src => }/Core/BackgroundSchedulePool.h (100%) rename dbms/{src => }/Core/Block.cpp (100%) rename dbms/{src => }/Core/Block.h (100%) rename dbms/{src => }/Core/BlockInfo.cpp (100%) rename dbms/{src => }/Core/BlockInfo.h (100%) rename dbms/{src => }/Core/CMakeLists.txt (100%) rename dbms/{src => }/Core/ColumnNumbers.h (100%) rename dbms/{src => }/Core/ColumnWithTypeAndName.cpp (100%) rename dbms/{src => }/Core/ColumnWithTypeAndName.h (100%) rename dbms/{src => }/Core/ColumnsWithTypeAndName.h (100%) rename dbms/{src => }/Core/DecimalComparison.h (100%) rename dbms/{src => }/Core/DecimalFunctions.h (100%) rename dbms/{src => }/Core/Defines.h (100%) rename dbms/{src => }/Core/ExternalResultDescription.cpp (100%) rename dbms/{src => }/Core/ExternalResultDescription.h (100%) rename dbms/{src => }/Core/ExternalTable.cpp (100%) rename dbms/{src => }/Core/ExternalTable.h (100%) rename dbms/{src => }/Core/Field.cpp (100%) rename dbms/{src => }/Core/Field.h (100%) rename dbms/{src => }/Core/MySQLProtocol.cpp (100%) rename dbms/{src => }/Core/MySQLProtocol.h (100%) rename dbms/{src => }/Core/Names.h (100%) rename dbms/{src => }/Core/NamesAndTypes.cpp (100%) rename dbms/{src => }/Core/NamesAndTypes.h (100%) rename dbms/{src => }/Core/Protocol.h (100%) rename dbms/{src => }/Core/QualifiedTableName.h (100%) rename dbms/{src => }/Core/QueryProcessingStage.h (100%) rename dbms/{src => }/Core/Row.h (100%) rename dbms/{src => }/Core/Settings.cpp (100%) rename dbms/{src => }/Core/Settings.h (100%) rename dbms/{src => }/Core/SettingsCollection.cpp (100%) rename dbms/{src => }/Core/SettingsCollection.h (100%) rename dbms/{src => }/Core/SettingsCollectionImpl.h (100%) rename dbms/{src => }/Core/SortCursor.h (100%) rename dbms/{src => }/Core/SortDescription.h (100%) rename dbms/{src => }/Core/TypeListNumber.h (100%) rename dbms/{src => }/Core/Types.h (100%) rename dbms/{src => }/Core/UUID.h (100%) rename dbms/{src => }/Core/callOnTypeIndex.h (100%) rename dbms/{src => }/Core/config_core.h.in (100%) rename dbms/{src => }/Core/iostream_debug_helpers.cpp (100%) rename dbms/{src => }/Core/iostream_debug_helpers.h (100%) rename dbms/{src => }/Core/tests/CMakeLists.txt (100%) rename dbms/{src => }/Core/tests/field.cpp (100%) rename dbms/{src => }/Core/tests/gtest_DecimalFunctions.cpp (100%) rename dbms/{src => }/Core/tests/move_field.cpp (100%) rename dbms/{src => }/Core/tests/string_pool.cpp (100%) rename dbms/{src => }/Core/tests/string_ref_hash.cpp (100%) rename dbms/{src => }/DataStreams/AddingConstColumnBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/AddingDefaultBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/AddingDefaultBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/AddingDefaultsBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/AddingDefaultsBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/AggregatingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/AggregatingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/AggregatingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/AggregatingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/AsynchronousBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/AsynchronousBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/BlockIO.cpp (100%) rename dbms/{src => }/DataStreams/BlockIO.h (100%) rename dbms/{src => }/DataStreams/BlockStreamProfileInfo.cpp (100%) rename dbms/{src => }/DataStreams/BlockStreamProfileInfo.h (100%) rename dbms/{src => }/DataStreams/BlocksBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/BlocksListBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/CMakeLists.txt (100%) rename dbms/{src => }/DataStreams/CheckConstraintsBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/CheckConstraintsBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/CheckSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/CheckSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/CollapsingFinalBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/CollapsingFinalBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/CollapsingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/CollapsingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ColumnGathererStream.cpp (100%) rename dbms/{src => }/DataStreams/ColumnGathererStream.h (100%) rename dbms/{src => }/DataStreams/ConcatBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ConvertingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ConvertingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/CountingBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/CountingBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/CreatingSetsBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/CreatingSetsBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/CubeBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/CubeBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/DistinctBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/DistinctBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/DistinctSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/DistinctSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ExecutionSpeedLimits.cpp (100%) rename dbms/{src => }/DataStreams/ExecutionSpeedLimits.h (100%) rename dbms/{src => }/DataStreams/ExpressionBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ExpressionBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/FillingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/FillingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/FilterBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/FilterBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/FilterColumnsBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/FilterColumnsBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/FinishSortingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/FinishSortingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/GraphiteRollupSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/GraphiteRollupSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/IBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/IBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/IBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/IBlockStream_fwd.h (100%) rename dbms/{src => }/DataStreams/InputStreamFromASTInsertQuery.cpp (100%) rename dbms/{src => }/DataStreams/InputStreamFromASTInsertQuery.h (100%) rename dbms/{src => }/DataStreams/InternalTextLogsRowOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/InternalTextLogsRowOutputStream.h (100%) rename dbms/{src => }/DataStreams/LazyBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/LimitBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/LimitBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/LimitByBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/LimitByBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/MarkInCompressedFile.h (100%) rename dbms/{src => }/DataStreams/MaterializingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/MaterializingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/MaterializingBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/MergeSortingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/MergeSortingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/MergingAggregatedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/MergingAggregatedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/MergingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/MergingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/NativeBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/NativeBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/NativeBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/NativeBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/NullAndDoCopyBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/NullBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/NullBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/OneBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/OwningBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ParallelAggregatingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ParallelAggregatingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ParallelInputsProcessor.h (100%) rename dbms/{src => }/DataStreams/ParallelParsingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ParallelParsingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/PartialSortingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/PartialSortingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/PushingToViewsBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/PushingToViewsBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/RemoteBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/RemoteBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/RemoteBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/RemoteBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/ReplacingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ReplacingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/ReverseBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/ReverseBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/RollupBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/RollupBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/SizeLimits.cpp (100%) rename dbms/{src => }/DataStreams/SizeLimits.h (100%) rename dbms/{src => }/DataStreams/SquashingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/SquashingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/SquashingBlockOutputStream.cpp (100%) rename dbms/{src => }/DataStreams/SquashingBlockOutputStream.h (100%) rename dbms/{src => }/DataStreams/SquashingTransform.cpp (100%) rename dbms/{src => }/DataStreams/SquashingTransform.h (100%) rename dbms/{src => }/DataStreams/SummingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/SummingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/TTLBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/TTLBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/TemporaryFileStream.h (100%) rename dbms/{src => }/DataStreams/TotalsHavingBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/TotalsHavingBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/UnionBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp (100%) rename dbms/{src => }/DataStreams/VersionedCollapsingSortedBlockInputStream.h (100%) rename dbms/{src => }/DataStreams/copyData.cpp (100%) rename dbms/{src => }/DataStreams/copyData.h (100%) rename dbms/{src => }/DataStreams/finalizeBlock.cpp (100%) rename dbms/{src => }/DataStreams/finalizeBlock.h (100%) rename dbms/{src => }/DataStreams/materializeBlock.cpp (100%) rename dbms/{src => }/DataStreams/materializeBlock.h (100%) rename dbms/{src => }/DataStreams/narrowBlockInputStreams.cpp (100%) rename dbms/{src => }/DataStreams/narrowBlockInputStreams.h (100%) rename dbms/{src => }/DataStreams/processConstants.cpp (100%) rename dbms/{src => }/DataStreams/processConstants.h (100%) rename dbms/{src => }/DataStreams/tests/CMakeLists.txt (100%) rename dbms/{src => }/DataStreams/tests/collapsing_sorted_stream.cpp (100%) rename dbms/{src => }/DataStreams/tests/expression_stream.cpp (100%) rename dbms/{src => }/DataStreams/tests/filter_stream.cpp (100%) rename dbms/{src => }/DataStreams/tests/finish_sorting_stream.cpp (100%) rename dbms/{src => }/DataStreams/tests/gtest_blocks_size_merging_streams.cpp (100%) rename dbms/{src => }/DataStreams/tests/gtest_check_sorted_stream.cpp (100%) rename dbms/{src => }/DataStreams/tests/union_stream2.cpp (100%) rename dbms/{src => }/DataTypes/CMakeLists.txt (100%) rename dbms/{src => }/DataTypes/DataTypeAggregateFunction.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeAggregateFunction.h (100%) rename dbms/{src => }/DataTypes/DataTypeArray.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeArray.h (100%) rename dbms/{src => }/DataTypes/DataTypeCustom.h (100%) rename dbms/{src => }/DataTypes/DataTypeCustomIPv4AndIPv6.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeCustomSimpleAggregateFunction.h (100%) rename dbms/{src => }/DataTypes/DataTypeCustomSimpleTextSerialization.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeCustomSimpleTextSerialization.h (100%) rename dbms/{src => }/DataTypes/DataTypeDate.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeDate.h (100%) rename dbms/{src => }/DataTypes/DataTypeDateTime.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeDateTime.h (100%) rename dbms/{src => }/DataTypes/DataTypeDateTime64.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeDateTime64.h (100%) rename dbms/{src => }/DataTypes/DataTypeDecimalBase.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeDecimalBase.h (100%) rename dbms/{src => }/DataTypes/DataTypeEnum.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeEnum.h (100%) rename dbms/{src => }/DataTypes/DataTypeFactory.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeFactory.h (100%) rename dbms/{src => }/DataTypes/DataTypeFixedString.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeFixedString.h (100%) rename dbms/{src => }/DataTypes/DataTypeFunction.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeFunction.h (100%) rename dbms/{src => }/DataTypes/DataTypeInterval.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeInterval.h (100%) rename dbms/{src => }/DataTypes/DataTypeLowCardinality.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeLowCardinality.h (100%) rename dbms/{src => }/DataTypes/DataTypeLowCardinalityHelpers.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeNothing.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeNothing.h (100%) rename dbms/{src => }/DataTypes/DataTypeNullable.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeNullable.h (100%) rename dbms/{src => }/DataTypes/DataTypeNumberBase.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeNumberBase.h (100%) rename dbms/{src => }/DataTypes/DataTypeSet.h (100%) rename dbms/{src => }/DataTypes/DataTypeString.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeString.h (100%) rename dbms/{src => }/DataTypes/DataTypeTuple.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeTuple.h (100%) rename dbms/{src => }/DataTypes/DataTypeUUID.cpp (100%) rename dbms/{src => }/DataTypes/DataTypeUUID.h (100%) rename dbms/{src => }/DataTypes/DataTypeWithSimpleSerialization.h (100%) rename dbms/{src => }/DataTypes/DataTypesDecimal.cpp (100%) rename dbms/{src => }/DataTypes/DataTypesDecimal.h (100%) rename dbms/{src => }/DataTypes/DataTypesNumber.cpp (100%) rename dbms/{src => }/DataTypes/DataTypesNumber.h (100%) rename dbms/{src => }/DataTypes/FieldToDataType.cpp (100%) rename dbms/{src => }/DataTypes/FieldToDataType.h (100%) rename dbms/{src => }/DataTypes/IDataType.cpp (100%) rename dbms/{src => }/DataTypes/IDataType.h (100%) rename dbms/{src => }/DataTypes/IDataTypeDummy.h (100%) rename dbms/{src => }/DataTypes/Native.h (100%) rename dbms/{src => }/DataTypes/NestedUtils.cpp (100%) rename dbms/{src => }/DataTypes/NestedUtils.h (100%) rename dbms/{src => }/DataTypes/NumberTraits.h (100%) rename dbms/{src => }/DataTypes/convertMySQLDataType.cpp (100%) rename dbms/{src => }/DataTypes/convertMySQLDataType.h (100%) rename dbms/{src => }/DataTypes/getLeastSupertype.cpp (100%) rename dbms/{src => }/DataTypes/getLeastSupertype.h (100%) rename dbms/{src => }/DataTypes/getMostSubtype.cpp (100%) rename dbms/{src => }/DataTypes/getMostSubtype.h (100%) rename dbms/{src => }/DataTypes/tests/CMakeLists.txt (100%) rename dbms/{src => }/DataTypes/tests/data_type_string.cpp (100%) rename dbms/{src => }/DataTypes/tests/data_types_number_fixed.cpp (100%) rename dbms/{src => }/DataTypes/tests/gtest_data_type_get_common_type.cpp (100%) rename dbms/{src => }/Databases/DatabaseDictionary.cpp (100%) rename dbms/{src => }/Databases/DatabaseDictionary.h (100%) rename dbms/{src => }/Databases/DatabaseFactory.cpp (100%) rename dbms/{src => }/Databases/DatabaseFactory.h (100%) rename dbms/{src => }/Databases/DatabaseLazy.cpp (100%) rename dbms/{src => }/Databases/DatabaseLazy.h (100%) rename dbms/{src => }/Databases/DatabaseMemory.cpp (100%) rename dbms/{src => }/Databases/DatabaseMemory.h (100%) rename dbms/{src => }/Databases/DatabaseMySQL.cpp (100%) rename dbms/{src => }/Databases/DatabaseMySQL.h (100%) rename dbms/{src => }/Databases/DatabaseOnDisk.cpp (100%) rename dbms/{src => }/Databases/DatabaseOnDisk.h (100%) rename dbms/{src => }/Databases/DatabaseOrdinary.cpp (100%) rename dbms/{src => }/Databases/DatabaseOrdinary.h (100%) rename dbms/{src => }/Databases/DatabaseWithDictionaries.cpp (100%) rename dbms/{src => }/Databases/DatabaseWithDictionaries.h (100%) rename dbms/{src => }/Databases/DatabasesCommon.cpp (100%) rename dbms/{src => }/Databases/DatabasesCommon.h (100%) rename dbms/{src => }/Databases/IDatabase.h (100%) rename dbms/{src => }/Dictionaries/CMakeLists.txt (100%) rename dbms/{src => }/Dictionaries/CacheDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/CacheDictionary.h (100%) rename dbms/{src => }/Dictionaries/CacheDictionary.inc.h (100%) rename dbms/{src => }/Dictionaries/CacheDictionary_generate1.cpp.in (100%) rename dbms/{src => }/Dictionaries/CacheDictionary_generate2.cpp.in (100%) rename dbms/{src => }/Dictionaries/CacheDictionary_generate3.cpp.in (100%) rename dbms/{src => }/Dictionaries/ClickHouseDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/ClickHouseDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary.h (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp (100%) rename dbms/{src => }/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp (100%) rename dbms/{src => }/Dictionaries/ComplexKeyHashedDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/ComplexKeyHashedDictionary.h (100%) rename dbms/{src => }/Dictionaries/DictionaryBlockInputStream.h (100%) rename dbms/{src => }/Dictionaries/DictionaryBlockInputStreamBase.cpp (100%) rename dbms/{src => }/Dictionaries/DictionaryBlockInputStreamBase.h (100%) rename dbms/{src => }/Dictionaries/DictionaryFactory.cpp (100%) rename dbms/{src => }/Dictionaries/DictionaryFactory.h (100%) rename dbms/{src => }/Dictionaries/DictionarySourceFactory.cpp (100%) rename dbms/{src => }/Dictionaries/DictionarySourceFactory.h (100%) rename dbms/{src => }/Dictionaries/DictionarySourceHelpers.cpp (100%) rename dbms/{src => }/Dictionaries/DictionarySourceHelpers.h (100%) rename dbms/{src => }/Dictionaries/DictionaryStructure.cpp (100%) rename dbms/{src => }/Dictionaries/DictionaryStructure.h (100%) rename dbms/{src => }/Dictionaries/Embedded/CMakeLists.txt (100%) rename dbms/{src => }/Dictionaries/Embedded/GeoDictionariesLoader.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/GeoDictionariesLoader.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/Entries.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/INamesProvider.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/NamesProvider.h (100%) rename dbms/{src => }/Dictionaries/Embedded/GeodataProviders/Types.h (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsHierarchies.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsHierarchies.h (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsHierarchy.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsHierarchy.h (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsNames.cpp (100%) rename dbms/{src => }/Dictionaries/Embedded/RegionsNames.h (100%) rename dbms/{src => }/Dictionaries/ExecutableDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/ExecutableDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/ExternalQueryBuilder.cpp (100%) rename dbms/{src => }/Dictionaries/ExternalQueryBuilder.h (100%) rename dbms/{src => }/Dictionaries/FileDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/FileDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/FlatDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/FlatDictionary.h (100%) rename dbms/{src => }/Dictionaries/HTTPDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/HTTPDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/HashedDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/HashedDictionary.h (100%) rename dbms/{src => }/Dictionaries/IDictionary.h (100%) rename dbms/{src => }/Dictionaries/IDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/LibraryDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/LibraryDictionarySource.h (95%) rename dbms/{src => }/Dictionaries/LibraryDictionarySourceExternal.cpp (100%) rename dbms/{src => }/Dictionaries/LibraryDictionarySourceExternal.h (100%) rename dbms/{src => }/Dictionaries/MongoDBBlockInputStream.cpp (100%) rename dbms/{src => }/Dictionaries/MongoDBBlockInputStream.h (100%) rename dbms/{src => }/Dictionaries/MongoDBDictionarySource.cpp (99%) rename dbms/{src => }/Dictionaries/MongoDBDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/MySQLDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/MySQLDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/PolygonDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/PolygonDictionary.h (100%) rename dbms/{src => }/Dictionaries/RangeDictionaryBlockInputStream.h (100%) rename dbms/{src => }/Dictionaries/RangeHashedDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/RangeHashedDictionary.h (100%) rename dbms/{src => }/Dictionaries/RedisBlockInputStream.cpp (100%) rename dbms/{src => }/Dictionaries/RedisBlockInputStream.h (100%) rename dbms/{src => }/Dictionaries/RedisDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/RedisDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/TrieDictionary.cpp (100%) rename dbms/{src => }/Dictionaries/TrieDictionary.h (100%) rename dbms/{src => }/Dictionaries/XDBCDictionarySource.cpp (100%) rename dbms/{src => }/Dictionaries/XDBCDictionarySource.h (100%) rename dbms/{src => }/Dictionaries/getDictionaryConfigurationFromAST.cpp (100%) rename dbms/{src => }/Dictionaries/getDictionaryConfigurationFromAST.h (100%) rename dbms/{src => }/Dictionaries/readInvalidateQuery.cpp (100%) rename dbms/{src => }/Dictionaries/readInvalidateQuery.h (100%) rename dbms/{src => }/Dictionaries/registerDictionaries.cpp (100%) rename dbms/{src => }/Dictionaries/registerDictionaries.h (100%) rename dbms/{src => }/Dictionaries/tests/CMakeLists.txt (100%) rename dbms/{src => }/Dictionaries/tests/gtest_dictionary_configuration.cpp (100%) rename dbms/{src => }/Dictionaries/writeParenthesisedString.cpp (100%) rename dbms/{src => }/Dictionaries/writeParenthesisedString.h (100%) rename dbms/{src => }/Disks/CMakeLists.txt (100%) rename dbms/{src => }/Disks/DiskFactory.cpp (100%) rename dbms/{src => }/Disks/DiskFactory.h (100%) rename dbms/{src => }/Disks/DiskLocal.cpp (100%) rename dbms/{src => }/Disks/DiskLocal.h (100%) rename dbms/{src => }/Disks/DiskMemory.cpp (100%) rename dbms/{src => }/Disks/DiskMemory.h (100%) rename dbms/{src => }/Disks/DiskS3.cpp (100%) rename dbms/{src => }/Disks/DiskS3.h (100%) rename dbms/{src => }/Disks/DiskSpaceMonitor.cpp (100%) rename dbms/{src => }/Disks/DiskSpaceMonitor.h (100%) rename dbms/{src => }/Disks/IDisk.cpp (100%) rename dbms/{src => }/Disks/IDisk.h (100%) rename dbms/{src => }/Disks/registerDisks.cpp (100%) rename dbms/{src => }/Disks/registerDisks.h (100%) rename dbms/{src => }/Disks/tests/CMakeLists.txt (100%) rename dbms/{src => }/Disks/tests/gtest_disk.cpp (100%) rename dbms/{src => }/Disks/tests/gtest_disk.h (100%) rename dbms/{src => }/Disks/tests/gtest_path_functions.cpp (100%) rename dbms/{src => }/Formats/CMakeLists.txt (100%) rename dbms/{src => }/Formats/FormatFactory.cpp (100%) rename dbms/{src => }/Formats/FormatFactory.h (100%) rename dbms/{src => }/Formats/FormatSchemaInfo.cpp (100%) rename dbms/{src => }/Formats/FormatSchemaInfo.h (100%) rename dbms/{src => }/Formats/FormatSettings.h (100%) rename dbms/{src => }/Formats/IRowInputStream.cpp (100%) rename dbms/{src => }/Formats/IRowInputStream.h (100%) rename dbms/{src => }/Formats/IRowOutputStream.cpp (100%) rename dbms/{src => }/Formats/IRowOutputStream.h (100%) rename dbms/{src => }/Formats/MySQLBlockInputStream.cpp (100%) rename dbms/{src => }/Formats/MySQLBlockInputStream.h (100%) rename dbms/{src => }/Formats/NativeFormat.cpp (100%) rename dbms/{src => }/Formats/NullFormat.cpp (100%) rename dbms/{src => }/Formats/ParsedTemplateFormatString.cpp (100%) rename dbms/{src => }/Formats/ParsedTemplateFormatString.h (100%) rename dbms/{src => }/Formats/ProtobufColumnMatcher.cpp (100%) rename dbms/{src => }/Formats/ProtobufColumnMatcher.h (100%) rename dbms/{src => }/Formats/ProtobufReader.cpp (100%) rename dbms/{src => }/Formats/ProtobufReader.h (100%) rename dbms/{src => }/Formats/ProtobufSchemas.cpp (100%) rename dbms/{src => }/Formats/ProtobufSchemas.h (100%) rename dbms/{src => }/Formats/ProtobufWriter.cpp (100%) rename dbms/{src => }/Formats/ProtobufWriter.h (100%) rename dbms/{src => }/Formats/config_formats.h.in (100%) rename dbms/{src => }/Formats/tests/CMakeLists.txt (100%) rename dbms/{src => }/Formats/tests/tab_separated_streams.cpp (100%) rename dbms/{src => }/Formats/verbosePrintString.cpp (100%) rename dbms/{src => }/Formats/verbosePrintString.h (100%) rename dbms/{src => }/Functions/CMakeLists.txt (100%) rename dbms/{src => }/Functions/CRC.cpp (100%) rename dbms/{src => }/Functions/CustomWeekTransforms.h (100%) rename dbms/{src => }/Functions/DateTimeTransforms.h (100%) rename dbms/{src => }/Functions/DivisionUtils.h (100%) rename dbms/{src => }/Functions/DummyJSONParser.h (100%) rename dbms/{src => }/Functions/EmptyImpl.h (100%) rename dbms/{src => }/Functions/FunctionBase64Conversion.h (100%) rename dbms/{src => }/Functions/FunctionBinaryArithmetic.h (100%) rename dbms/{src => }/Functions/FunctionBitTestMany.h (100%) rename dbms/{src => }/Functions/FunctionCustomWeekToSomething.h (100%) rename dbms/{src => }/Functions/FunctionDateOrDateTimeAddInterval.h (100%) rename dbms/{src => }/Functions/FunctionDateOrDateTimeToSomething.h (100%) rename dbms/{src => }/Functions/FunctionFQDN.cpp (100%) rename dbms/{src => }/Functions/FunctionFactory.cpp (100%) rename dbms/{src => }/Functions/FunctionFactory.h (100%) rename dbms/{src => }/Functions/FunctionHelpers.cpp (100%) rename dbms/{src => }/Functions/FunctionHelpers.h (100%) rename dbms/{src => }/Functions/FunctionIfBase.h (100%) rename dbms/{src => }/Functions/FunctionJoinGet.cpp (100%) rename dbms/{src => }/Functions/FunctionJoinGet.h (100%) rename dbms/{src => }/Functions/FunctionMathBinaryFloat64.h (100%) rename dbms/{src => }/Functions/FunctionMathConstFloat64.h (100%) rename dbms/{src => }/Functions/FunctionMathUnary.h (100%) rename dbms/{src => }/Functions/FunctionNumericPredicate.h (100%) rename dbms/{src => }/Functions/FunctionStartsEndsWith.h (100%) rename dbms/{src => }/Functions/FunctionStringOrArrayToT.h (100%) rename dbms/{src => }/Functions/FunctionStringToString.h (100%) rename dbms/{src => }/Functions/FunctionUnaryArithmetic.h (100%) rename dbms/{src => }/Functions/FunctionsBitmap.cpp (100%) rename dbms/{src => }/Functions/FunctionsBitmap.h (100%) rename dbms/{src => }/Functions/FunctionsCoding.cpp (100%) rename dbms/{src => }/Functions/FunctionsCoding.h (100%) rename dbms/{src => }/Functions/FunctionsComparison.h (100%) rename dbms/{src => }/Functions/FunctionsConsistentHashing.h (100%) rename dbms/{src => }/Functions/FunctionsConversion.cpp (100%) rename dbms/{src => }/Functions/FunctionsConversion.h (100%) rename dbms/{src => }/Functions/FunctionsEmbeddedDictionaries.cpp (100%) rename dbms/{src => }/Functions/FunctionsEmbeddedDictionaries.h (100%) rename dbms/{src => }/Functions/FunctionsExternalDictionaries.cpp (100%) rename dbms/{src => }/Functions/FunctionsExternalDictionaries.h (100%) rename dbms/{src => }/Functions/FunctionsExternalModels.cpp (100%) rename dbms/{src => }/Functions/FunctionsExternalModels.h (100%) rename dbms/{src => }/Functions/FunctionsFormatting.cpp (100%) rename dbms/{src => }/Functions/FunctionsFormatting.h (100%) rename dbms/{src => }/Functions/FunctionsHashing.cpp (100%) rename dbms/{src => }/Functions/FunctionsHashing.h (100%) rename dbms/{src => }/Functions/FunctionsJSON.cpp (100%) rename dbms/{src => }/Functions/FunctionsJSON.h (100%) rename dbms/{src => }/Functions/FunctionsLogical.cpp (100%) rename dbms/{src => }/Functions/FunctionsLogical.h (100%) rename dbms/{src => }/Functions/FunctionsMiscellaneous.h (100%) rename dbms/{src => }/Functions/FunctionsMultiStringPosition.h (100%) rename dbms/{src => }/Functions/FunctionsMultiStringSearch.h (100%) rename dbms/{src => }/Functions/FunctionsRandom.cpp (100%) rename dbms/{src => }/Functions/FunctionsRandom.h (100%) rename dbms/{src => }/Functions/FunctionsRound.cpp (100%) rename dbms/{src => }/Functions/FunctionsRound.h (100%) rename dbms/{src => }/Functions/FunctionsStringArray.cpp (100%) rename dbms/{src => }/Functions/FunctionsStringArray.h (100%) rename dbms/{src => }/Functions/FunctionsStringRegex.cpp (100%) rename dbms/{src => }/Functions/FunctionsStringRegex.h (100%) create mode 100644 dbms/Functions/FunctionsStringSearch.cpp rename dbms/{src => }/Functions/FunctionsStringSearch.h (100%) rename dbms/{src => }/Functions/FunctionsStringSearchToString.h (100%) rename dbms/{src => }/Functions/FunctionsStringSimilarity.cpp (100%) rename dbms/{src => }/Functions/FunctionsStringSimilarity.h (100%) rename dbms/{src => }/Functions/FunctionsVisitParam.h (100%) rename dbms/{src => }/Functions/GatherUtils/Algorithms.h (100%) rename dbms/{src => }/Functions/GatherUtils/ArraySinkVisitor.h (100%) rename dbms/{src => }/Functions/GatherUtils/ArraySourceVisitor.h (100%) rename dbms/{src => }/Functions/GatherUtils/CMakeLists.txt (100%) rename dbms/{src => }/Functions/GatherUtils/GatherUtils.h (100%) rename dbms/{src => }/Functions/GatherUtils/IArraySink.h (100%) rename dbms/{src => }/Functions/GatherUtils/IArraySource.h (100%) rename dbms/{src => }/Functions/GatherUtils/IValueSource.h (100%) rename dbms/{src => }/Functions/GatherUtils/Selectors.h (100%) rename dbms/{src => }/Functions/GatherUtils/Sinks.h (100%) rename dbms/{src => }/Functions/GatherUtils/Slices.h (100%) rename dbms/{src => }/Functions/GatherUtils/Sources.h (100%) rename dbms/{src => }/Functions/GatherUtils/ValueSourceVisitor.h (100%) rename dbms/{src => }/Functions/GatherUtils/concat.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/createArraySink.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/createArraySource.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/createValueSource.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/has.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/push.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/resizeConstantSize.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/resizeDynamicSize.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp (100%) rename dbms/{src => }/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp (100%) rename dbms/{src => }/Functions/GeoHash.cpp (100%) rename dbms/{src => }/Functions/GeoHash.h (100%) rename dbms/{src => }/Functions/HasTokenImpl.h (100%) rename dbms/{src => }/Functions/IFunction.cpp (100%) rename dbms/{src => }/Functions/IFunction.h (100%) rename dbms/{src => }/Functions/IFunctionAdaptors.h (100%) rename dbms/{src => }/Functions/IFunctionImpl.h (100%) rename dbms/{src => }/Functions/LowerUpperImpl.h (100%) rename dbms/{src => }/Functions/LowerUpperUTF8Impl.h (100%) rename dbms/{src => }/Functions/MultiSearchAllPositionsImpl.h (100%) rename dbms/{src => }/Functions/MultiSearchFirstIndexImpl.h (100%) rename dbms/{src => }/Functions/MultiSearchFirstPositionImpl.h (100%) rename dbms/{src => }/Functions/MultiSearchImpl.h (100%) rename dbms/{src => }/Functions/PolygonUtils.h (100%) rename dbms/{src => }/Functions/PositionImpl.h (100%) rename dbms/{src => }/Functions/RapidJSONParser.h (100%) rename dbms/{src => }/Functions/Regexps.h (100%) rename dbms/{src => }/Functions/SimdJSONParser.h (100%) rename dbms/{src => }/Functions/URL/CMakeLists.txt (100%) rename dbms/{src => }/Functions/URL/FunctionsURL.h (100%) rename dbms/{src => }/Functions/URL/URLHierarchy.cpp (100%) rename dbms/{src => }/Functions/URL/URLPathHierarchy.cpp (100%) rename dbms/{src => }/Functions/URL/basename.cpp (100%) rename dbms/{src => }/Functions/URL/config_functions_url.h.in (100%) rename dbms/{src => }/Functions/URL/cutFragment.cpp (100%) rename dbms/{src => }/Functions/URL/cutQueryString.cpp (100%) rename dbms/{src => }/Functions/URL/cutQueryStringAndFragment.cpp (100%) rename dbms/{src => }/Functions/URL/cutToFirstSignificantSubdomain.cpp (100%) rename dbms/{src => }/Functions/URL/cutURLParameter.cpp (100%) rename dbms/{src => }/Functions/URL/cutWWW.cpp (100%) rename dbms/{src => }/Functions/URL/decodeURLComponent.cpp (100%) rename dbms/{src => }/Functions/URL/domain.cpp (100%) rename dbms/{src => }/Functions/URL/domain.h (100%) rename dbms/{src => }/Functions/URL/domainWithoutWWW.cpp (100%) rename dbms/{src => }/Functions/URL/extractURLParameter.cpp (100%) rename dbms/{src => }/Functions/URL/extractURLParameterNames.cpp (100%) rename dbms/{src => }/Functions/URL/extractURLParameters.cpp (100%) rename dbms/{src => }/Functions/URL/firstSignificantSubdomain.cpp (100%) rename dbms/{src => }/Functions/URL/firstSignificantSubdomain.h (100%) rename dbms/{src => }/Functions/URL/fragment.cpp (100%) rename dbms/{src => }/Functions/URL/fragment.h (100%) rename dbms/{src => }/Functions/URL/path.cpp (100%) rename dbms/{src => }/Functions/URL/pathFull.cpp (100%) rename dbms/{src => }/Functions/URL/protocol.cpp (100%) rename dbms/{src => }/Functions/URL/protocol.h (100%) rename dbms/{src => }/Functions/URL/queryString.cpp (100%) rename dbms/{src => }/Functions/URL/queryString.h (100%) rename dbms/{src => }/Functions/URL/queryStringAndFragment.cpp (100%) rename dbms/{src => }/Functions/URL/queryStringAndFragment.h (100%) rename dbms/{src => }/Functions/URL/registerFunctionsURL.cpp (100%) rename dbms/{src => }/Functions/URL/tldLookup.generated.cpp (100%) rename dbms/{src => }/Functions/URL/tldLookup.gperf (100%) rename dbms/{src => }/Functions/URL/tldLookup.h (100%) rename dbms/{src => }/Functions/URL/tldLookup.sh (100%) rename dbms/{src => }/Functions/URL/topLevelDomain.cpp (100%) rename dbms/{src => }/Functions/abs.cpp (100%) rename dbms/{src => }/Functions/acos.cpp (100%) rename dbms/{src => }/Functions/addDays.cpp (100%) rename dbms/{src => }/Functions/addHours.cpp (100%) rename dbms/{src => }/Functions/addMinutes.cpp (100%) rename dbms/{src => }/Functions/addMonths.cpp (100%) rename dbms/{src => }/Functions/addQuarters.cpp (100%) rename dbms/{src => }/Functions/addSeconds.cpp (100%) rename dbms/{src => }/Functions/addWeeks.cpp (100%) rename dbms/{src => }/Functions/addYears.cpp (100%) rename dbms/{src => }/Functions/addressToLine.cpp (100%) rename dbms/{src => }/Functions/addressToSymbol.cpp (100%) rename dbms/{src => }/Functions/appendTrailingCharIfAbsent.cpp (100%) rename dbms/{src => }/Functions/array/CMakeLists.txt (100%) rename dbms/{src => }/Functions/array/FunctionArrayMapped.h (100%) rename dbms/{src => }/Functions/array/array.cpp (100%) rename dbms/{src => }/Functions/array/arrayAUC.cpp (100%) rename dbms/{src => }/Functions/array/arrayAll.cpp (100%) rename dbms/{src => }/Functions/array/arrayCompact.cpp (100%) rename dbms/{src => }/Functions/array/arrayConcat.cpp (100%) rename dbms/{src => }/Functions/array/arrayCount.cpp (100%) rename dbms/{src => }/Functions/array/arrayCumSum.cpp (100%) rename dbms/{src => }/Functions/array/arrayCumSumNonNegative.cpp (100%) rename dbms/{src => }/Functions/array/arrayDifference.cpp (100%) rename dbms/{src => }/Functions/array/arrayDistinct.cpp (100%) rename dbms/{src => }/Functions/array/arrayElement.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerate.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerateDense.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerateDenseRanked.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerateExtended.h (100%) rename dbms/{src => }/Functions/array/arrayEnumerateRanked.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerateRanked.h (100%) rename dbms/{src => }/Functions/array/arrayEnumerateUniq.cpp (100%) rename dbms/{src => }/Functions/array/arrayEnumerateUniqRanked.cpp (100%) rename dbms/{src => }/Functions/array/arrayExists.cpp (100%) rename dbms/{src => }/Functions/array/arrayFill.cpp (100%) rename dbms/{src => }/Functions/array/arrayFilter.cpp (100%) rename dbms/{src => }/Functions/array/arrayFirst.cpp (100%) rename dbms/{src => }/Functions/array/arrayFirstIndex.cpp (100%) rename dbms/{src => }/Functions/array/arrayFlatten.cpp (100%) rename dbms/{src => }/Functions/array/arrayIndex.h (100%) rename dbms/{src => }/Functions/array/arrayIntersect.cpp (100%) rename dbms/{src => }/Functions/array/arrayJoin.cpp (100%) rename dbms/{src => }/Functions/array/arrayMap.cpp (100%) rename dbms/{src => }/Functions/array/arrayPop.h (100%) rename dbms/{src => }/Functions/array/arrayPopBack.cpp (100%) rename dbms/{src => }/Functions/array/arrayPopFront.cpp (100%) rename dbms/{src => }/Functions/array/arrayPush.h (100%) rename dbms/{src => }/Functions/array/arrayPushBack.cpp (100%) rename dbms/{src => }/Functions/array/arrayPushFront.cpp (100%) rename dbms/{src => }/Functions/array/arrayReduce.cpp (100%) rename dbms/{src => }/Functions/array/arrayReduceInRanges.cpp (100%) rename dbms/{src => }/Functions/array/arrayResize.cpp (100%) rename dbms/{src => }/Functions/array/arrayReverse.cpp (100%) rename dbms/{src => }/Functions/array/arrayScalarProduct.h (100%) rename dbms/{src => }/Functions/array/arraySlice.cpp (100%) rename dbms/{src => }/Functions/array/arraySort.cpp (100%) rename dbms/{src => }/Functions/array/arraySplit.cpp (100%) rename dbms/{src => }/Functions/array/arraySum.cpp (100%) rename dbms/{src => }/Functions/array/arrayUniq.cpp (100%) rename dbms/{src => }/Functions/array/arrayWithConstant.cpp (100%) rename dbms/{src => }/Functions/array/arrayZip.cpp (100%) rename dbms/{src => }/Functions/array/countEqual.cpp (100%) rename dbms/{src => }/Functions/array/emptyArray.cpp (100%) rename dbms/{src => }/Functions/array/emptyArrayToSingle.cpp (100%) rename dbms/{src => }/Functions/array/has.cpp (100%) rename dbms/{src => }/Functions/array/hasAll.cpp (100%) rename dbms/{src => }/Functions/array/hasAllAny.h (100%) rename dbms/{src => }/Functions/array/hasAny.cpp (100%) rename dbms/{src => }/Functions/array/indexOf.cpp (100%) rename dbms/{src => }/Functions/array/length.cpp (100%) rename dbms/{src => }/Functions/array/range.cpp (100%) rename dbms/{src => }/Functions/array/registerFunctionsArray.cpp (100%) rename dbms/{src => }/Functions/asin.cpp (100%) rename dbms/{src => }/Functions/assumeNotNull.cpp (100%) rename dbms/{src => }/Functions/atan.cpp (100%) rename dbms/{src => }/Functions/bar.cpp (100%) rename dbms/{src => }/Functions/base64Decode.cpp (100%) rename dbms/{src => }/Functions/base64Encode.cpp (100%) rename dbms/{src => }/Functions/bitAnd.cpp (100%) rename dbms/{src => }/Functions/bitBoolMaskAnd.cpp (96%) rename dbms/{src => }/Functions/bitBoolMaskOr.cpp (96%) rename dbms/{src => }/Functions/bitCount.cpp (100%) rename dbms/{src => }/Functions/bitNot.cpp (100%) rename dbms/{src => }/Functions/bitOr.cpp (100%) rename dbms/{src => }/Functions/bitRotateLeft.cpp (100%) rename dbms/{src => }/Functions/bitRotateRight.cpp (100%) rename dbms/{src => }/Functions/bitShiftLeft.cpp (100%) rename dbms/{src => }/Functions/bitShiftRight.cpp (100%) rename dbms/{src => }/Functions/bitSwapLastTwo.cpp (97%) rename dbms/{src => }/Functions/bitTest.cpp (100%) rename dbms/{src => }/Functions/bitTestAll.cpp (100%) rename dbms/{src => }/Functions/bitTestAny.cpp (100%) rename dbms/{src => }/Functions/bitWrapperFunc.cpp (96%) rename dbms/{src => }/Functions/bitXor.cpp (100%) rename dbms/{src => }/Functions/blockNumber.cpp (100%) rename dbms/{src => }/Functions/blockSerializedSize.cpp (100%) rename dbms/{src => }/Functions/blockSize.cpp (100%) rename dbms/{src => }/Functions/caseWithExpression.cpp (100%) rename dbms/{src => }/Functions/castTypeToEither.h (100%) rename dbms/{src => }/Functions/cbrt.cpp (100%) rename dbms/{src => }/Functions/coalesce.cpp (100%) rename dbms/{src => }/Functions/concat.cpp (100%) rename dbms/{src => }/Functions/config_functions.h.in (100%) rename dbms/{src => }/Functions/convertCharset.cpp (100%) rename dbms/{src => }/Functions/cos.cpp (100%) rename dbms/{src => }/Functions/currentDatabase.cpp (100%) rename dbms/{src => }/Functions/currentQuota.cpp (100%) rename dbms/{src => }/Functions/currentRowPolicies.cpp (100%) rename dbms/{src => }/Functions/currentUser.cpp (100%) rename dbms/{src => }/Functions/dateDiff.cpp (100%) rename dbms/{src => }/Functions/defaultValueOfArgumentType.cpp (100%) rename dbms/{src => }/Functions/demange.cpp (100%) rename dbms/{src => }/Functions/divide.cpp (100%) rename dbms/{src => }/Functions/dumpColumnStructure.cpp (100%) rename dbms/{src => }/Functions/e.cpp (100%) rename dbms/{src => }/Functions/empty.cpp (100%) rename dbms/{src => }/Functions/endsWith.cpp (100%) rename dbms/{src => }/Functions/equals.cpp (100%) rename dbms/{src => }/Functions/erf.cpp (100%) rename dbms/{src => }/Functions/erfc.cpp (100%) rename dbms/{src => }/Functions/evalMLMethod.cpp (100%) rename dbms/{src => }/Functions/exp.cpp (100%) rename dbms/{src => }/Functions/exp10.cpp (100%) rename dbms/{src => }/Functions/exp2.cpp (100%) rename dbms/{src => }/Functions/extractTimeZoneFromFunctionArguments.cpp (100%) rename dbms/{src => }/Functions/extractTimeZoneFromFunctionArguments.h (100%) rename dbms/{src => }/Functions/filesystem.cpp (100%) rename dbms/{src => }/Functions/finalizeAggregation.cpp (100%) rename dbms/{src => }/Functions/formatDateTime.cpp (100%) rename dbms/{src => }/Functions/formatString.cpp (100%) rename dbms/{src => }/Functions/formatString.h (100%) rename dbms/{src => }/Functions/gcd.cpp (100%) rename dbms/{src => }/Functions/generateUUIDv4.cpp (100%) rename dbms/{src => }/Functions/geoToH3.cpp (100%) rename dbms/{src => }/Functions/geohashDecode.cpp (100%) rename dbms/{src => }/Functions/geohashEncode.cpp (100%) rename dbms/{src => }/Functions/geohashesInBox.cpp (100%) rename dbms/{src => }/Functions/getMacro.cpp (100%) rename dbms/{src => }/Functions/getScalar.cpp (100%) rename dbms/{src => }/Functions/getSizeOfEnumType.cpp (100%) rename dbms/{src => }/Functions/greatCircleDistance.cpp (100%) rename dbms/{src => }/Functions/greater.cpp (100%) rename dbms/{src => }/Functions/greaterOrEquals.cpp (100%) rename dbms/{src => }/Functions/greatest.cpp (100%) rename dbms/{src => }/Functions/h3EdgeAngle.cpp (100%) rename dbms/{src => }/Functions/h3EdgeLengthM.cpp (100%) rename dbms/{src => }/Functions/h3GetBaseCell.cpp (100%) rename dbms/{src => }/Functions/h3GetResolution.cpp (100%) rename dbms/{src => }/Functions/h3HexAreaM2.cpp (100%) rename dbms/{src => }/Functions/h3IndexesAreNeighbors.cpp (100%) rename dbms/{src => }/Functions/h3IsValid.cpp (100%) rename dbms/{src => }/Functions/h3ToChildren.cpp (100%) rename dbms/{src => }/Functions/h3ToParent.cpp (100%) rename dbms/{src => }/Functions/h3ToString.cpp (100%) rename dbms/{src => }/Functions/h3kRing.cpp (100%) rename dbms/{src => }/Functions/hasColumnInTable.cpp (100%) rename dbms/{src => }/Functions/hasToken.cpp (100%) rename dbms/{src => }/Functions/hasTokenCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/hostName.cpp (100%) rename dbms/{src => }/Functions/identity.cpp (100%) rename dbms/{src => }/Functions/if.cpp (100%) rename dbms/{src => }/Functions/ifNotFinite.cpp (100%) rename dbms/{src => }/Functions/ifNull.cpp (100%) rename dbms/{src => }/Functions/ignore.cpp (100%) rename dbms/{src => }/Functions/ignoreExceptNull.cpp (100%) rename dbms/{src => }/Functions/in.cpp (100%) rename dbms/{src => }/Functions/intDiv.cpp (100%) rename dbms/{src => }/Functions/intDivOrZero.cpp (100%) rename dbms/{src => }/Functions/intExp10.cpp (100%) rename dbms/{src => }/Functions/intExp2.cpp (100%) rename dbms/{src => }/Functions/isFinite.cpp (100%) rename dbms/{src => }/Functions/isInfinite.cpp (100%) rename dbms/{src => }/Functions/isNaN.cpp (100%) rename dbms/{src => }/Functions/isNotNull.cpp (100%) rename dbms/{src => }/Functions/isNull.cpp (100%) rename dbms/{src => }/Functions/isValidUTF8.cpp (100%) rename dbms/{src => }/Functions/jumpConsistentHash.cpp (100%) rename dbms/{src => }/Functions/lcm.cpp (100%) rename dbms/{src => }/Functions/least.cpp (100%) rename dbms/{src => }/Functions/lengthUTF8.cpp (100%) rename dbms/{src => }/Functions/less.cpp (100%) rename dbms/{src => }/Functions/lessOrEquals.cpp (100%) rename dbms/{src => }/Functions/lgamma.cpp (100%) rename dbms/{src => }/Functions/likePatternToRegexp.h (100%) rename dbms/{src => }/Functions/log.cpp (100%) rename dbms/{src => }/Functions/log10.cpp (100%) rename dbms/{src => }/Functions/log2.cpp (100%) rename dbms/{src => }/Functions/lowCardinalityIndices.cpp (100%) rename dbms/{src => }/Functions/lowCardinalityKeys.cpp (100%) rename dbms/{src => }/Functions/lower.cpp (100%) rename dbms/{src => }/Functions/lowerUTF8.cpp (100%) rename dbms/{src => }/Functions/materialize.cpp (100%) rename dbms/{src => }/Functions/minus.cpp (100%) rename dbms/{src => }/Functions/modulo.cpp (100%) rename dbms/{src => }/Functions/moduloOrZero.cpp (100%) rename dbms/{src => }/Functions/multiIf.cpp (100%) rename dbms/{src => }/Functions/multiSearchAllPositions.cpp (100%) rename dbms/{src => }/Functions/multiSearchAllPositionsCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchAllPositionsUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchAny.cpp (100%) rename dbms/{src => }/Functions/multiSearchAnyCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchAnyUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstIndex.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstIndexCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstIndexUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstPosition.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstPositionCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp (100%) rename dbms/{src => }/Functions/multiSearchFirstPositionUTF8.cpp (100%) rename dbms/{src => }/Functions/multiply.cpp (100%) rename dbms/{src => }/Functions/negate.cpp (100%) rename dbms/{src => }/Functions/neighbor.cpp (100%) rename dbms/{src => }/Functions/notEmpty.cpp (100%) rename dbms/{src => }/Functions/notEquals.cpp (100%) rename dbms/{src => }/Functions/now.cpp (100%) rename dbms/{src => }/Functions/now64.cpp (100%) rename dbms/{src => }/Functions/nullIf.cpp (100%) rename dbms/{src => }/Functions/pi.cpp (100%) rename dbms/{src => }/Functions/plus.cpp (100%) rename dbms/{src => }/Functions/pointInEllipses.cpp (100%) rename dbms/{src => }/Functions/pointInPolygon.cpp (100%) rename dbms/{src => }/Functions/position.cpp (100%) rename dbms/{src => }/Functions/positionCaseInsensitive.cpp (100%) rename dbms/{src => }/Functions/positionCaseInsensitiveUTF8.cpp (100%) rename dbms/{src => }/Functions/positionUTF8.cpp (100%) rename dbms/{src => }/Functions/pow.cpp (100%) rename dbms/{src => }/Functions/rand.cpp (100%) rename dbms/{src => }/Functions/rand64.cpp (100%) rename dbms/{src => }/Functions/randConstant.cpp (100%) rename dbms/{src => }/Functions/randomPrintableASCII.cpp (100%) rename dbms/{src => }/Functions/regexpQuoteMeta.cpp (100%) rename dbms/{src => }/Functions/registerFunctions.cpp (100%) rename dbms/{src => }/Functions/registerFunctions.h (100%) rename dbms/{src => }/Functions/registerFunctionsArithmetic.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsComparison.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsConditional.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsConsistentHashing.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsDateTime.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsGeo.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsHigherOrder.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsIntrospection.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsMath.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsMiscellaneous.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsNull.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsRandom.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsReinterpret.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsString.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsStringSearch.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsTuple.cpp (100%) rename dbms/{src => }/Functions/registerFunctionsVisitParam.cpp (100%) rename dbms/{src => }/Functions/reinterpretAsFixedString.cpp (100%) rename dbms/{src => }/Functions/reinterpretAsString.cpp (100%) rename dbms/{src => }/Functions/reinterpretStringAs.cpp (100%) rename dbms/{src => }/Functions/repeat.cpp (100%) rename dbms/{src => }/Functions/replicate.cpp (100%) rename dbms/{src => }/Functions/reverse.cpp (100%) rename dbms/{src => }/Functions/reverseUTF8.cpp (100%) rename dbms/{src => }/Functions/roundAge.cpp (100%) rename dbms/{src => }/Functions/roundDuration.cpp (100%) rename dbms/{src => }/Functions/roundToExp2.cpp (100%) rename dbms/{src => }/Functions/rowNumberInAllBlocks.cpp (100%) rename dbms/{src => }/Functions/rowNumberInBlock.cpp (100%) rename dbms/{src => }/Functions/runningAccumulate.cpp (100%) rename dbms/{src => }/Functions/runningDifference.cpp (100%) rename dbms/{src => }/Functions/runningDifference.h (100%) rename dbms/{src => }/Functions/runningDifferenceStartingWithFirstValue.cpp (100%) rename dbms/{src => }/Functions/sigmoid.cpp (100%) rename dbms/{src => }/Functions/sin.cpp (100%) rename dbms/{src => }/Functions/sleep.cpp (100%) rename dbms/{src => }/Functions/sleep.h (100%) rename dbms/{src => }/Functions/sleepEachRow.cpp (100%) rename dbms/{src => }/Functions/sqrt.cpp (100%) rename dbms/{src => }/Functions/startsWith.cpp (100%) rename dbms/{src => }/Functions/stringToH3.cpp (100%) rename dbms/{src => }/Functions/substring.cpp (100%) rename dbms/{src => }/Functions/subtractDays.cpp (100%) rename dbms/{src => }/Functions/subtractHours.cpp (100%) rename dbms/{src => }/Functions/subtractMinutes.cpp (100%) rename dbms/{src => }/Functions/subtractMonths.cpp (100%) rename dbms/{src => }/Functions/subtractQuarters.cpp (100%) rename dbms/{src => }/Functions/subtractSeconds.cpp (100%) rename dbms/{src => }/Functions/subtractWeeks.cpp (100%) rename dbms/{src => }/Functions/subtractYears.cpp (100%) rename dbms/{src => }/Functions/sumburConsistentHash.cpp (100%) rename dbms/{src => }/Functions/tan.cpp (100%) rename dbms/{src => }/Functions/tanh.cpp (100%) rename dbms/{src => }/Functions/tests/CMakeLists.txt (100%) rename dbms/{src => }/Functions/tests/number_traits.cpp (100%) rename dbms/{src => }/Functions/tgamma.cpp (100%) rename dbms/{src => }/Functions/throwIf.cpp (100%) rename dbms/{src => }/Functions/timeSlot.cpp (100%) rename dbms/{src => }/Functions/timeSlots.cpp (100%) rename dbms/{src => }/Functions/timezone.cpp (100%) rename dbms/{src => }/Functions/toColumnTypeName.cpp (100%) rename dbms/{src => }/Functions/toCustomWeek.cpp (100%) rename dbms/{src => }/Functions/toDayOfMonth.cpp (100%) rename dbms/{src => }/Functions/toDayOfWeek.cpp (100%) rename dbms/{src => }/Functions/toDayOfYear.cpp (100%) rename dbms/{src => }/Functions/toHour.cpp (100%) rename dbms/{src => }/Functions/toISOWeek.cpp (100%) rename dbms/{src => }/Functions/toISOYear.cpp (100%) rename dbms/{src => }/Functions/toLowCardinality.cpp (100%) rename dbms/{src => }/Functions/toMinute.cpp (100%) rename dbms/{src => }/Functions/toMonday.cpp (100%) rename dbms/{src => }/Functions/toMonth.cpp (100%) rename dbms/{src => }/Functions/toNullable.cpp (100%) rename dbms/{src => }/Functions/toQuarter.cpp (100%) rename dbms/{src => }/Functions/toRelativeDayNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeHourNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeMinuteNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeMonthNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeQuarterNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeSecondNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeWeekNum.cpp (100%) rename dbms/{src => }/Functions/toRelativeYearNum.cpp (100%) rename dbms/{src => }/Functions/toSecond.cpp (100%) rename dbms/{src => }/Functions/toStartOfDay.cpp (100%) rename dbms/{src => }/Functions/toStartOfFifteenMinutes.cpp (100%) rename dbms/{src => }/Functions/toStartOfFiveMinute.cpp (100%) rename dbms/{src => }/Functions/toStartOfHour.cpp (100%) rename dbms/{src => }/Functions/toStartOfISOYear.cpp (100%) rename dbms/{src => }/Functions/toStartOfInterval.cpp (100%) rename dbms/{src => }/Functions/toStartOfMinute.cpp (100%) rename dbms/{src => }/Functions/toStartOfMonth.cpp (100%) rename dbms/{src => }/Functions/toStartOfQuarter.cpp (100%) rename dbms/{src => }/Functions/toStartOfTenMinutes.cpp (100%) rename dbms/{src => }/Functions/toStartOfYear.cpp (100%) rename dbms/{src => }/Functions/toTime.cpp (100%) rename dbms/{src => }/Functions/toTimeZone.cpp (100%) rename dbms/{src => }/Functions/toTypeName.cpp (100%) rename dbms/{src => }/Functions/toValidUTF8.cpp (100%) rename dbms/{src => }/Functions/toYYYYMM.cpp (100%) rename dbms/{src => }/Functions/toYYYYMMDD.cpp (100%) rename dbms/{src => }/Functions/toYYYYMMDDhhmmss.cpp (100%) rename dbms/{src => }/Functions/toYear.cpp (100%) rename dbms/{src => }/Functions/today.cpp (100%) rename dbms/{src => }/Functions/transform.cpp (100%) rename dbms/{src => }/Functions/trap.cpp (100%) rename dbms/{src => }/Functions/trim.cpp (100%) rename dbms/{src => }/Functions/tryBase64Decode.cpp (100%) rename dbms/{src => }/Functions/tuple.cpp (100%) rename dbms/{src => }/Functions/tupleElement.cpp (100%) rename dbms/{src => }/Functions/upper.cpp (100%) rename dbms/{src => }/Functions/upperUTF8.cpp (100%) rename dbms/{src => }/Functions/uptime.cpp (100%) rename dbms/{src => }/Functions/version.cpp (100%) rename dbms/{src => }/Functions/visibleWidth.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractBool.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractFloat.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractInt.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractRaw.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractString.cpp (100%) rename dbms/{src => }/Functions/visitParamExtractUInt.cpp (100%) rename dbms/{src => }/Functions/visitParamHas.cpp (100%) rename dbms/{src => }/Functions/yandexConsistentHash.cpp (100%) rename dbms/{src => }/Functions/yesterday.cpp (100%) rename dbms/{src => }/IO/AIO.cpp (100%) rename dbms/{src => }/IO/AIO.h (100%) rename dbms/{src => }/IO/AIOContextPool.cpp (100%) rename dbms/{src => }/IO/AIOContextPool.h (100%) rename dbms/{src => }/IO/AsynchronousWriteBuffer.h (100%) rename dbms/{src => }/IO/BitHelpers.h (100%) rename dbms/{src => }/IO/BrotliReadBuffer.cpp (100%) rename dbms/{src => }/IO/BrotliReadBuffer.h (100%) rename dbms/{src => }/IO/BrotliWriteBuffer.cpp (100%) rename dbms/{src => }/IO/BrotliWriteBuffer.h (100%) rename dbms/{src => }/IO/BufferBase.h (100%) rename dbms/{src => }/IO/BufferWithOwnMemory.h (100%) rename dbms/{src => }/IO/CMakeLists.txt (100%) rename dbms/{src => }/IO/CascadeWriteBuffer.cpp (100%) rename dbms/{src => }/IO/CascadeWriteBuffer.h (100%) rename dbms/{src => }/IO/CompressionMethod.cpp (100%) rename dbms/{src => }/IO/CompressionMethod.h (100%) rename dbms/{src => }/IO/ConcatReadBuffer.h (100%) rename dbms/{src => }/IO/ConnectionTimeouts.h (100%) rename dbms/{src => }/IO/DoubleConverter.cpp (100%) rename dbms/{src => }/IO/DoubleConverter.h (100%) rename dbms/{src => }/IO/HDFSCommon.cpp (100%) rename dbms/{src => }/IO/HDFSCommon.h (100%) rename dbms/{src => }/IO/HTTPCommon.cpp (100%) rename dbms/{src => }/IO/HTTPCommon.h (100%) rename dbms/{src => }/IO/HashingReadBuffer.h (100%) rename dbms/{src => }/IO/HashingWriteBuffer.cpp (100%) rename dbms/{src => }/IO/HashingWriteBuffer.h (100%) rename dbms/{src => }/IO/HexWriteBuffer.cpp (100%) rename dbms/{src => }/IO/HexWriteBuffer.h (100%) rename dbms/{src => }/IO/IReadableWriteBuffer.h (100%) rename dbms/{src => }/IO/LimitReadBuffer.cpp (100%) rename dbms/{src => }/IO/LimitReadBuffer.h (100%) rename dbms/{src => }/IO/MMapReadBufferFromFile.cpp (100%) rename dbms/{src => }/IO/MMapReadBufferFromFile.h (100%) rename dbms/{src => }/IO/MMapReadBufferFromFileDescriptor.cpp (100%) rename dbms/{src => }/IO/MMapReadBufferFromFileDescriptor.h (100%) rename dbms/{src => }/IO/MemoryReadWriteBuffer.cpp (100%) rename dbms/{src => }/IO/MemoryReadWriteBuffer.h (100%) rename dbms/{src => }/IO/NullWriteBuffer.cpp (100%) rename dbms/{src => }/IO/NullWriteBuffer.h (100%) rename dbms/{src => }/IO/Operators.h (100%) rename dbms/{src => }/IO/PeekableReadBuffer.cpp (100%) rename dbms/{src => }/IO/PeekableReadBuffer.h (100%) rename dbms/{src => }/IO/Progress.cpp (100%) rename dbms/{src => }/IO/Progress.h (100%) rename dbms/{src => }/IO/ReadBuffer.h (100%) rename dbms/{src => }/IO/ReadBufferAIO.cpp (100%) rename dbms/{src => }/IO/ReadBufferAIO.h (100%) rename dbms/{src => }/IO/ReadBufferFromFile.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromFile.h (100%) rename dbms/{src => }/IO/ReadBufferFromFileBase.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromFileBase.h (100%) rename dbms/{src => }/IO/ReadBufferFromFileDescriptor.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromFileDescriptor.h (100%) rename dbms/{src => }/IO/ReadBufferFromHDFS.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromHDFS.h (100%) rename dbms/{src => }/IO/ReadBufferFromIStream.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromIStream.h (100%) rename dbms/{src => }/IO/ReadBufferFromMemory.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromMemory.h (100%) rename dbms/{src => }/IO/ReadBufferFromPocoSocket.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromPocoSocket.h (100%) rename dbms/{src => }/IO/ReadBufferFromS3.cpp (100%) rename dbms/{src => }/IO/ReadBufferFromS3.h (100%) rename dbms/{src => }/IO/ReadBufferFromString.h (100%) rename dbms/{src => }/IO/ReadHelpers.cpp (100%) rename dbms/{src => }/IO/ReadHelpers.h (100%) rename dbms/{src => }/IO/ReadWriteBufferFromHTTP.cpp (100%) rename dbms/{src => }/IO/ReadWriteBufferFromHTTP.h (100%) rename dbms/{src => }/IO/S3Common.cpp (100%) rename dbms/{src => }/IO/S3Common.h (100%) rename dbms/{src => }/IO/SeekableReadBuffer.h (100%) rename dbms/{src => }/IO/UncompressedCache.h (100%) rename dbms/{src => }/IO/UseSSL.cpp (100%) rename dbms/{src => }/IO/UseSSL.h (100%) rename dbms/{src => }/IO/VarInt.h (100%) rename dbms/{src => }/IO/WriteBuffer.h (100%) rename dbms/{src => }/IO/WriteBufferAIO.cpp (100%) rename dbms/{src => }/IO/WriteBufferAIO.h (100%) rename dbms/{src => }/IO/WriteBufferFromArena.h (100%) rename dbms/{src => }/IO/WriteBufferFromFile.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromFile.h (100%) rename dbms/{src => }/IO/WriteBufferFromFileBase.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromFileBase.h (100%) rename dbms/{src => }/IO/WriteBufferFromFileDescriptor.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromFileDescriptor.h (100%) rename dbms/{src => }/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h (100%) rename dbms/{src => }/IO/WriteBufferFromHDFS.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromHDFS.h (100%) rename dbms/{src => }/IO/WriteBufferFromHTTP.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromHTTP.h (100%) rename dbms/{src => }/IO/WriteBufferFromHTTPServerResponse.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromHTTPServerResponse.h (100%) rename dbms/{src => }/IO/WriteBufferFromOStream.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromOStream.h (100%) rename dbms/{src => }/IO/WriteBufferFromPocoSocket.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromPocoSocket.h (100%) rename dbms/{src => }/IO/WriteBufferFromS3.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromS3.h (100%) rename dbms/{src => }/IO/WriteBufferFromString.h (100%) rename dbms/{src => }/IO/WriteBufferFromTemporaryFile.cpp (100%) rename dbms/{src => }/IO/WriteBufferFromTemporaryFile.h (100%) rename dbms/{src => }/IO/WriteBufferFromVector.h (100%) rename dbms/{src => }/IO/WriteBufferValidUTF8.cpp (100%) rename dbms/{src => }/IO/WriteBufferValidUTF8.h (100%) rename dbms/{src => }/IO/WriteHelpers.cpp (100%) rename dbms/{src => }/IO/WriteHelpers.h (100%) rename dbms/{src => }/IO/WriteIntText.h (100%) rename dbms/{src => }/IO/ZlibDeflatingWriteBuffer.cpp (100%) rename dbms/{src => }/IO/ZlibDeflatingWriteBuffer.h (100%) rename dbms/{src => }/IO/ZlibInflatingReadBuffer.cpp (100%) rename dbms/{src => }/IO/ZlibInflatingReadBuffer.h (100%) rename dbms/{src => }/IO/copyData.cpp (100%) rename dbms/{src => }/IO/copyData.h (100%) rename dbms/{src => }/IO/createReadBufferFromFileBase.cpp (100%) rename dbms/{src => }/IO/createReadBufferFromFileBase.h (100%) rename dbms/{src => }/IO/createWriteBufferFromFileBase.cpp (100%) rename dbms/{src => }/IO/createWriteBufferFromFileBase.h (100%) rename dbms/{src => }/IO/parseDateTimeBestEffort.cpp (100%) rename dbms/{src => }/IO/parseDateTimeBestEffort.h (100%) rename dbms/{src => }/IO/readDecimalText.h (100%) rename dbms/{src => }/IO/readFloatText.cpp (100%) rename dbms/{src => }/IO/readFloatText.h (100%) rename dbms/{src => }/IO/tests/CMakeLists.txt (100%) rename dbms/{src => }/IO/tests/async_write.cpp (100%) rename dbms/{src => }/IO/tests/gtest_DateTime64_parsing_and_writing.cpp (100%) rename dbms/{src => }/IO/tests/gtest_DateTimeToString.cpp (100%) rename dbms/{src => }/IO/tests/gtest_aio_seek_back_after_eof.cpp (100%) rename dbms/{src => }/IO/tests/gtest_bit_io.cpp (100%) rename dbms/{src => }/IO/tests/gtest_cascade_and_memory_write_buffer.cpp (100%) rename dbms/{src => }/IO/tests/gtest_peekable_read_buffer.cpp (100%) rename dbms/{src => }/IO/tests/hashing_buffer.h (100%) rename dbms/{src => }/IO/tests/hashing_read_buffer.cpp (100%) rename dbms/{src => }/IO/tests/hashing_write_buffer.cpp (100%) rename dbms/{src => }/IO/tests/io_operators.cpp (100%) rename dbms/{src => }/IO/tests/limit_read_buffer.cpp (100%) rename dbms/{src => }/IO/tests/limit_read_buffer.reference (100%) rename dbms/{src => }/IO/tests/limit_read_buffer.sh (100%) rename dbms/{src => }/IO/tests/limit_read_buffer2.cpp (100%) rename dbms/{src => }/IO/tests/mempbrk.cpp (100%) rename dbms/{src => }/IO/tests/o_direct_and_dirty_pages.cpp (100%) rename dbms/{src => }/IO/tests/parse_date_time_best_effort.cpp (100%) rename dbms/{src => }/IO/tests/parse_int_perf.cpp (100%) rename dbms/{src => }/IO/tests/parse_int_perf2.cpp (100%) rename dbms/{src => }/IO/tests/read_buffer.cpp (100%) rename dbms/{src => }/IO/tests/read_buffer_aio.cpp (100%) rename dbms/{src => }/IO/tests/read_buffer_perf.cpp (100%) rename dbms/{src => }/IO/tests/read_escaped_string.cpp (100%) rename dbms/{src => }/IO/tests/read_float_perf.cpp (100%) rename dbms/{src => }/IO/tests/read_write_int.cpp (100%) rename dbms/{src => }/IO/tests/ryu_test.cpp (100%) rename dbms/{src => }/IO/tests/valid_utf8.cpp (100%) rename dbms/{src => }/IO/tests/valid_utf8_perf.cpp (100%) rename dbms/{src => }/IO/tests/var_uint.cpp (100%) rename dbms/{src => }/IO/tests/write_buffer.cpp (100%) rename dbms/{src => }/IO/tests/write_buffer_aio.cpp (100%) rename dbms/{src => }/IO/tests/write_buffer_perf.cpp (100%) rename dbms/{src => }/IO/tests/write_int.cpp (100%) rename dbms/{src => }/IO/tests/zlib_buffers.cpp (100%) rename dbms/{src => }/IO/tests/zlib_ng_bug.cpp (100%) rename dbms/{src => }/Interpreters/ActionLocksManager.cpp (100%) rename dbms/{src => }/Interpreters/ActionLocksManager.h (100%) rename dbms/{src => }/Interpreters/ActionsVisitor.cpp (100%) rename dbms/{src => }/Interpreters/ActionsVisitor.h (100%) rename dbms/{src => }/Interpreters/AddDefaultDatabaseVisitor.h (100%) rename dbms/{src => }/Interpreters/AggregateDescription.h (100%) rename dbms/{src => }/Interpreters/AggregationCommon.h (100%) rename dbms/{src => }/Interpreters/Aggregator.cpp (100%) rename dbms/{src => }/Interpreters/Aggregator.h (100%) rename dbms/{src => }/Interpreters/Aliases.h (100%) rename dbms/{src => }/Interpreters/AnalyzedJoin.cpp (100%) rename dbms/{src => }/Interpreters/AnalyzedJoin.h (100%) rename dbms/{src => }/Interpreters/ArrayJoinAction.cpp (100%) rename dbms/{src => }/Interpreters/ArrayJoinAction.h (100%) rename dbms/{src => }/Interpreters/ArrayJoinedColumnsVisitor.h (100%) rename dbms/{src => }/Interpreters/AsteriskSemantic.h (100%) rename dbms/{src => }/Interpreters/AsynchronousMetrics.cpp (100%) rename dbms/{src => }/Interpreters/AsynchronousMetrics.h (100%) rename dbms/{src => }/Interpreters/BloomFilter.cpp (100%) rename dbms/{src => }/Interpreters/BloomFilter.h (100%) rename dbms/{src => }/Interpreters/BloomFilterHash.h (100%) rename dbms/{src => }/Interpreters/CMakeLists.txt (100%) rename dbms/{src => }/Interpreters/CancellationCode.h (100%) rename dbms/{src => }/Interpreters/CatBoostModel.cpp (100%) rename dbms/{src => }/Interpreters/CatBoostModel.h (100%) rename dbms/{src => }/Interpreters/ClientInfo.cpp (100%) rename dbms/{src => }/Interpreters/ClientInfo.h (100%) rename dbms/{src => }/Interpreters/Cluster.cpp (100%) rename dbms/{src => }/Interpreters/Cluster.h (100%) rename dbms/{src => }/Interpreters/ClusterProxy/IStreamFactory.h (100%) rename dbms/{src => }/Interpreters/ClusterProxy/SelectStreamFactory.cpp (100%) rename dbms/{src => }/Interpreters/ClusterProxy/SelectStreamFactory.h (100%) rename dbms/{src => }/Interpreters/ClusterProxy/executeQuery.cpp (100%) rename dbms/{src => }/Interpreters/ClusterProxy/executeQuery.h (100%) rename dbms/{src => }/Interpreters/CollectJoinOnKeysVisitor.cpp (100%) rename dbms/{src => }/Interpreters/CollectJoinOnKeysVisitor.h (100%) rename dbms/{src => }/Interpreters/ColumnNamesContext.cpp (100%) rename dbms/{src => }/Interpreters/ColumnNamesContext.h (100%) rename dbms/{src => }/Interpreters/Context.cpp (100%) rename dbms/{src => }/Interpreters/Context.h (100%) rename dbms/{src => }/Interpreters/CrossToInnerJoinVisitor.cpp (100%) rename dbms/{src => }/Interpreters/CrossToInnerJoinVisitor.h (100%) rename dbms/{src => }/Interpreters/DDLWorker.cpp (100%) rename dbms/{src => }/Interpreters/DDLWorker.h (100%) rename dbms/{src => }/Interpreters/DNSCacheUpdater.cpp (100%) rename dbms/{src => }/Interpreters/DNSCacheUpdater.h (100%) rename dbms/{src => }/Interpreters/DatabaseAndTableWithAlias.cpp (100%) rename dbms/{src => }/Interpreters/DatabaseAndTableWithAlias.h (100%) rename dbms/{src => }/Interpreters/DatabaseCatalog.cpp (100%) rename dbms/{src => }/Interpreters/DatabaseCatalog.h (100%) rename dbms/{src => }/Interpreters/EmbeddedDictionaries.cpp (100%) rename dbms/{src => }/Interpreters/EmbeddedDictionaries.h (100%) rename dbms/{src => }/Interpreters/ExecuteScalarSubqueriesVisitor.cpp (100%) rename dbms/{src => }/Interpreters/ExecuteScalarSubqueriesVisitor.h (100%) rename dbms/{src => }/Interpreters/ExpressionActions.cpp (100%) rename dbms/{src => }/Interpreters/ExpressionActions.h (100%) rename dbms/{src => }/Interpreters/ExpressionAnalyzer.cpp (100%) rename dbms/{src => }/Interpreters/ExpressionAnalyzer.h (100%) rename dbms/{src => }/Interpreters/ExpressionJIT.cpp (100%) rename dbms/{src => }/Interpreters/ExpressionJIT.h (100%) rename dbms/{src => }/Interpreters/ExternalDictionariesLoader.cpp (100%) rename dbms/{src => }/Interpreters/ExternalDictionariesLoader.h (100%) rename dbms/{src => }/Interpreters/ExternalLoader.cpp (100%) rename dbms/{src => }/Interpreters/ExternalLoader.h (100%) rename dbms/{src => }/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp (100%) rename dbms/{src => }/Interpreters/ExternalLoaderDatabaseConfigRepository.h (100%) rename dbms/{src => }/Interpreters/ExternalLoaderTempConfigRepository.cpp (100%) rename dbms/{src => }/Interpreters/ExternalLoaderTempConfigRepository.h (100%) rename dbms/{src => }/Interpreters/ExternalLoaderXMLConfigRepository.cpp (100%) rename dbms/{src => }/Interpreters/ExternalLoaderXMLConfigRepository.h (100%) rename dbms/{src => }/Interpreters/ExternalModelsLoader.cpp (100%) rename dbms/{src => }/Interpreters/ExternalModelsLoader.h (100%) rename dbms/{src => }/Interpreters/ExtractExpressionInfoVisitor.cpp (100%) rename dbms/{src => }/Interpreters/ExtractExpressionInfoVisitor.h (100%) rename dbms/{src => }/Interpreters/FillingRow.cpp (100%) rename dbms/{src => }/Interpreters/FillingRow.h (100%) rename dbms/{src => }/Interpreters/GetAggregatesVisitor.h (100%) rename dbms/{src => }/Interpreters/GlobalSubqueriesVisitor.h (100%) rename dbms/{src => }/Interpreters/IExternalLoadable.cpp (100%) rename dbms/{src => }/Interpreters/IExternalLoadable.h (100%) rename dbms/{src => }/Interpreters/IExternalLoaderConfigRepository.h (100%) rename dbms/{src => }/Interpreters/IInterpreter.h (100%) rename dbms/{src => }/Interpreters/IJoin.h (100%) rename dbms/{src => }/Interpreters/IdentifierSemantic.cpp (100%) rename dbms/{src => }/Interpreters/IdentifierSemantic.h (100%) rename dbms/{src => }/Interpreters/InDepthNodeVisitor.h (100%) rename dbms/{src => }/Interpreters/InJoinSubqueriesPreprocessor.cpp (100%) rename dbms/{src => }/Interpreters/InJoinSubqueriesPreprocessor.h (100%) rename dbms/{src => }/Interpreters/InternalTextLogsQueue.cpp (100%) rename dbms/{src => }/Interpreters/InternalTextLogsQueue.h (100%) rename dbms/{src => }/Interpreters/InterpreterAlterQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterAlterQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCheckQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCheckQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateQuotaQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateQuotaQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateRoleQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateRoleQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateRowPolicyQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateRowPolicyQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateSettingsProfileQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateSettingsProfileQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterCreateUserQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterCreateUserQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterDescribeQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterDescribeQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterDropAccessEntityQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterDropAccessEntityQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterDropQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterDropQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterExistsQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterExistsQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterExplainQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterExplainQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterFactory.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterFactory.h (100%) rename dbms/{src => }/Interpreters/InterpreterGrantQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterGrantQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterInsertQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterInsertQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterKillQueryQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterKillQueryQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterOptimizeQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterOptimizeQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterRenameQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterRenameQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterSelectQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterSelectQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterSelectWithUnionQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterSelectWithUnionQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterSetQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterSetQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterSetRoleQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterSetRoleQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowCreateAccessEntityQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowCreateQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowCreateQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowGrantsQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowGrantsQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowProcesslistQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowProcesslistQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowQuotasQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowQuotasQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowRowPoliciesQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowRowPoliciesQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterShowTablesQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterShowTablesQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterSystemQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterSystemQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterUseQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterUseQuery.h (100%) rename dbms/{src => }/Interpreters/InterpreterWatchQuery.cpp (100%) rename dbms/{src => }/Interpreters/InterpreterWatchQuery.h (100%) rename dbms/{src => }/Interpreters/InterserverIOHandler.h (100%) rename dbms/{src => }/Interpreters/Join.cpp (100%) rename dbms/{src => }/Interpreters/Join.h (100%) rename dbms/{src => }/Interpreters/JoinSwitcher.cpp (100%) rename dbms/{src => }/Interpreters/JoinSwitcher.h (100%) rename dbms/{src => }/Interpreters/JoinToSubqueryTransformVisitor.cpp (100%) rename dbms/{src => }/Interpreters/JoinToSubqueryTransformVisitor.h (100%) rename dbms/{src => }/Interpreters/JoinedTables.cpp (100%) rename dbms/{src => }/Interpreters/JoinedTables.h (100%) rename dbms/{src => }/Interpreters/LogicalExpressionsOptimizer.cpp (100%) rename dbms/{src => }/Interpreters/LogicalExpressionsOptimizer.h (100%) rename dbms/{src => }/Interpreters/MarkTableIdentifiersVisitor.cpp (100%) rename dbms/{src => }/Interpreters/MarkTableIdentifiersVisitor.h (100%) rename dbms/{src => }/Interpreters/MergeJoin.cpp (100%) rename dbms/{src => }/Interpreters/MergeJoin.h (100%) rename dbms/{src => }/Interpreters/MetricLog.cpp (100%) rename dbms/{src => }/Interpreters/MetricLog.h (100%) rename dbms/{src => }/Interpreters/MutationsInterpreter.cpp (100%) rename dbms/{src => }/Interpreters/MutationsInterpreter.h (100%) rename dbms/{src => }/Interpreters/NullableUtils.cpp (100%) rename dbms/{src => }/Interpreters/NullableUtils.h (100%) rename dbms/{src => }/Interpreters/OptimizeIfChains.cpp (100%) rename dbms/{src => }/Interpreters/OptimizeIfChains.h (100%) rename dbms/{src => }/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp (100%) rename dbms/{src => }/Interpreters/OptimizeIfWithConstantConditionVisitor.h (100%) rename dbms/{src => }/Interpreters/PartLog.cpp (100%) rename dbms/{src => }/Interpreters/PartLog.h (100%) rename dbms/{src => }/Interpreters/PredicateExpressionsOptimizer.cpp (100%) rename dbms/{src => }/Interpreters/PredicateExpressionsOptimizer.h (100%) rename dbms/{src => }/Interpreters/PredicateRewriteVisitor.cpp (100%) rename dbms/{src => }/Interpreters/PredicateRewriteVisitor.h (100%) rename dbms/{src => }/Interpreters/PreparedSets.h (100%) rename dbms/{src => }/Interpreters/ProcessList.cpp (100%) rename dbms/{src => }/Interpreters/ProcessList.h (100%) rename dbms/{src => }/Interpreters/ProfileEventsExt.cpp (100%) rename dbms/{src => }/Interpreters/ProfileEventsExt.h (100%) rename dbms/{src => }/Interpreters/QueryAliasesVisitor.cpp (100%) rename dbms/{src => }/Interpreters/QueryAliasesVisitor.h (100%) rename dbms/{src => }/Interpreters/QueryLog.cpp (100%) rename dbms/{src => }/Interpreters/QueryLog.h (100%) rename dbms/{src => }/Interpreters/QueryNormalizer.cpp (100%) rename dbms/{src => }/Interpreters/QueryNormalizer.h (100%) rename dbms/{src => }/Interpreters/QueryPriorities.h (100%) rename dbms/{src => }/Interpreters/QueryThreadLog.cpp (100%) rename dbms/{src => }/Interpreters/QueryThreadLog.h (100%) rename dbms/{src => }/Interpreters/ReplaceQueryParameterVisitor.cpp (100%) rename dbms/{src => }/Interpreters/ReplaceQueryParameterVisitor.h (100%) rename dbms/{src => }/Interpreters/RequiredSourceColumnsVisitor.cpp (100%) rename dbms/{src => }/Interpreters/RequiredSourceColumnsVisitor.h (100%) rename dbms/{src => }/Interpreters/RowRefs.cpp (100%) rename dbms/{src => }/Interpreters/RowRefs.h (100%) rename dbms/{src => }/Interpreters/SelectQueryOptions.h (100%) rename dbms/{src => }/Interpreters/Set.cpp (100%) rename dbms/{src => }/Interpreters/Set.h (100%) rename dbms/{src => }/Interpreters/SetVariants.cpp (100%) rename dbms/{src => }/Interpreters/SetVariants.h (100%) rename dbms/{src => }/Interpreters/StorageID.cpp (100%) rename dbms/{src => }/Interpreters/StorageID.h (100%) rename dbms/{src => }/Interpreters/SubqueryForSet.cpp (100%) rename dbms/{src => }/Interpreters/SubqueryForSet.h (100%) rename dbms/{src => }/Interpreters/SyntaxAnalyzer.cpp (100%) rename dbms/{src => }/Interpreters/SyntaxAnalyzer.h (100%) rename dbms/{src => }/Interpreters/SystemLog.cpp (100%) rename dbms/{src => }/Interpreters/SystemLog.h (100%) rename dbms/{src => }/Interpreters/TablesStatus.cpp (100%) rename dbms/{src => }/Interpreters/TablesStatus.h (100%) rename dbms/{src => }/Interpreters/TextLog.cpp (100%) rename dbms/{src => }/Interpreters/TextLog.h (100%) rename dbms/{src => }/Interpreters/ThreadStatusExt.cpp (100%) rename dbms/{src => }/Interpreters/TraceLog.cpp (100%) rename dbms/{src => }/Interpreters/TraceLog.h (100%) rename dbms/{src => }/Interpreters/TranslateQualifiedNamesVisitor.cpp (100%) rename dbms/{src => }/Interpreters/TranslateQualifiedNamesVisitor.h (100%) rename dbms/{src => }/Interpreters/addMissingDefaults.cpp (100%) rename dbms/{src => }/Interpreters/addMissingDefaults.h (100%) rename dbms/{src => }/Interpreters/addTypeConversionToAST.cpp (100%) rename dbms/{src => }/Interpreters/addTypeConversionToAST.h (100%) rename dbms/{src => }/Interpreters/asof.h (100%) rename dbms/{src => }/Interpreters/castColumn.cpp (100%) rename dbms/{src => }/Interpreters/castColumn.h (100%) rename dbms/{src => }/Interpreters/convertFieldToType.cpp (100%) rename dbms/{src => }/Interpreters/convertFieldToType.h (100%) rename dbms/{src => }/Interpreters/createBlockSelector.cpp (100%) rename dbms/{src => }/Interpreters/createBlockSelector.h (100%) rename dbms/{src => }/Interpreters/evaluateConstantExpression.cpp (100%) rename dbms/{src => }/Interpreters/evaluateConstantExpression.h (100%) rename dbms/{src => }/Interpreters/executeQuery.cpp (100%) rename dbms/{src => }/Interpreters/executeQuery.h (100%) rename dbms/{src => }/Interpreters/getClusterName.cpp (100%) rename dbms/{src => }/Interpreters/getClusterName.h (100%) rename dbms/{src => }/Interpreters/getTableExpressions.cpp (100%) rename dbms/{src => }/Interpreters/getTableExpressions.h (100%) rename dbms/{src => }/Interpreters/inplaceBlockConversions.cpp (100%) rename dbms/{src => }/Interpreters/inplaceBlockConversions.h (100%) rename dbms/{src => }/Interpreters/interpretSubquery.cpp (100%) rename dbms/{src => }/Interpreters/interpretSubquery.h (100%) rename dbms/{src => }/Interpreters/joinDispatch.h (100%) rename dbms/{src => }/Interpreters/join_common.cpp (100%) rename dbms/{src => }/Interpreters/join_common.h (100%) rename dbms/{src => }/Interpreters/loadMetadata.cpp (100%) rename dbms/{src => }/Interpreters/loadMetadata.h (100%) rename dbms/{src => }/Interpreters/misc.h (100%) rename dbms/{src => }/Interpreters/sortBlock.cpp (100%) rename dbms/{src => }/Interpreters/sortBlock.h (100%) rename dbms/{src => }/Interpreters/tests/CMakeLists.txt (100%) rename dbms/{src => }/Interpreters/tests/aggregate.cpp (100%) rename dbms/{src => }/Interpreters/tests/create_query.cpp (100%) rename dbms/{src => }/Interpreters/tests/expression.cpp (100%) rename dbms/{src => }/Interpreters/tests/expression_analyzer.cpp (100%) rename dbms/{src => }/Interpreters/tests/gtest_cycle_aliases.cpp (100%) rename dbms/{src => }/Interpreters/tests/gtest_merge_tree_set_index.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map3.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map_lookup.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map_string.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map_string_2.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map_string_3.cpp (100%) rename dbms/{src => }/Interpreters/tests/hash_map_string_small.cpp (100%) rename dbms/{src => }/Interpreters/tests/in_join_subqueries_preprocessor.cpp (100%) rename dbms/{src => }/Interpreters/tests/internal_iotop.cpp (100%) rename dbms/{src => }/Interpreters/tests/logical_expressions_optimizer.cpp (100%) rename dbms/{src => }/Interpreters/tests/select_query.cpp (100%) rename dbms/{src => }/Interpreters/tests/string_hash_map.cpp (100%) rename dbms/{src => }/Interpreters/tests/two_level_hash_map.cpp (100%) rename dbms/{src => }/Interpreters/tests/users.cpp (100%) rename dbms/{src => }/NOTICE (100%) rename dbms/{src => }/Parsers/ASTAlterQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTAlterQuery.h (100%) rename dbms/{src => }/Parsers/ASTAssignment.h (100%) rename dbms/{src => }/Parsers/ASTAsterisk.cpp (100%) rename dbms/{src => }/Parsers/ASTAsterisk.h (100%) rename dbms/{src => }/Parsers/ASTCheckQuery.h (100%) rename dbms/{src => }/Parsers/ASTColumnDeclaration.cpp (100%) rename dbms/{src => }/Parsers/ASTColumnDeclaration.h (100%) rename dbms/{src => }/Parsers/ASTColumnsMatcher.cpp (100%) rename dbms/{src => }/Parsers/ASTColumnsMatcher.h (100%) rename dbms/{src => }/Parsers/ASTConstraintDeclaration.cpp (100%) rename dbms/{src => }/Parsers/ASTConstraintDeclaration.h (100%) rename dbms/{src => }/Parsers/ASTCreateQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateQuery.h (100%) rename dbms/{src => }/Parsers/ASTCreateQuotaQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateQuotaQuery.h (100%) rename dbms/{src => }/Parsers/ASTCreateRoleQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateRoleQuery.h (100%) rename dbms/{src => }/Parsers/ASTCreateRowPolicyQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateRowPolicyQuery.h (100%) rename dbms/{src => }/Parsers/ASTCreateSettingsProfileQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateSettingsProfileQuery.h (100%) rename dbms/{src => }/Parsers/ASTCreateUserQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTCreateUserQuery.h (100%) rename dbms/{src => }/Parsers/ASTDictionary.cpp (100%) rename dbms/{src => }/Parsers/ASTDictionary.h (100%) rename dbms/{src => }/Parsers/ASTDictionaryAttributeDeclaration.cpp (100%) rename dbms/{src => }/Parsers/ASTDictionaryAttributeDeclaration.h (100%) rename dbms/{src => }/Parsers/ASTDropAccessEntityQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTDropAccessEntityQuery.h (100%) rename dbms/{src => }/Parsers/ASTDropQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTDropQuery.h (100%) rename dbms/{src => }/Parsers/ASTEnumElement.h (100%) rename dbms/{src => }/Parsers/ASTExplainQuery.h (100%) rename dbms/{src => }/Parsers/ASTExpressionList.cpp (100%) rename dbms/{src => }/Parsers/ASTExpressionList.h (100%) rename dbms/{src => }/Parsers/ASTExtendedRoleSet.cpp (100%) rename dbms/{src => }/Parsers/ASTExtendedRoleSet.h (100%) rename dbms/{src => }/Parsers/ASTFunction.cpp (100%) rename dbms/{src => }/Parsers/ASTFunction.h (100%) rename dbms/{src => }/Parsers/ASTFunctionWithKeyValueArguments.cpp (100%) rename dbms/{src => }/Parsers/ASTFunctionWithKeyValueArguments.h (100%) rename dbms/{src => }/Parsers/ASTGrantQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTGrantQuery.h (100%) rename dbms/{src => }/Parsers/ASTIdentifier.cpp (100%) rename dbms/{src => }/Parsers/ASTIdentifier.h (100%) rename dbms/{src => }/Parsers/ASTIndexDeclaration.h (100%) rename dbms/{src => }/Parsers/ASTInsertQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTInsertQuery.h (100%) rename dbms/{src => }/Parsers/ASTKillQueryQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTKillQueryQuery.h (100%) rename dbms/{src => }/Parsers/ASTLiteral.cpp (100%) rename dbms/{src => }/Parsers/ASTLiteral.h (100%) rename dbms/{src => }/Parsers/ASTNameTypePair.h (100%) rename dbms/{src => }/Parsers/ASTOptimizeQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTOptimizeQuery.h (100%) rename dbms/{src => }/Parsers/ASTOrderByElement.cpp (100%) rename dbms/{src => }/Parsers/ASTOrderByElement.h (100%) rename dbms/{src => }/Parsers/ASTPartition.cpp (100%) rename dbms/{src => }/Parsers/ASTPartition.h (100%) rename dbms/{src => }/Parsers/ASTQualifiedAsterisk.cpp (100%) rename dbms/{src => }/Parsers/ASTQualifiedAsterisk.h (100%) rename dbms/{src => }/Parsers/ASTQueryParameter.cpp (100%) rename dbms/{src => }/Parsers/ASTQueryParameter.h (100%) rename dbms/{src => }/Parsers/ASTQueryWithOnCluster.cpp (100%) rename dbms/{src => }/Parsers/ASTQueryWithOnCluster.h (100%) rename dbms/{src => }/Parsers/ASTQueryWithOutput.cpp (100%) rename dbms/{src => }/Parsers/ASTQueryWithOutput.h (100%) rename dbms/{src => }/Parsers/ASTQueryWithTableAndOutput.cpp (100%) rename dbms/{src => }/Parsers/ASTQueryWithTableAndOutput.h (100%) rename dbms/{src => }/Parsers/ASTRenameQuery.h (100%) rename dbms/{src => }/Parsers/ASTSampleRatio.cpp (100%) rename dbms/{src => }/Parsers/ASTSampleRatio.h (100%) rename dbms/{src => }/Parsers/ASTSelectQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTSelectQuery.h (100%) rename dbms/{src => }/Parsers/ASTSelectWithUnionQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTSelectWithUnionQuery.h (100%) rename dbms/{src => }/Parsers/ASTSetQuery.h (100%) rename dbms/{src => }/Parsers/ASTSetRoleQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTSetRoleQuery.h (100%) rename dbms/{src => }/Parsers/ASTSettingsProfileElement.cpp (100%) rename dbms/{src => }/Parsers/ASTSettingsProfileElement.h (100%) rename dbms/{src => }/Parsers/ASTShowCreateAccessEntityQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTShowCreateAccessEntityQuery.h (100%) rename dbms/{src => }/Parsers/ASTShowGrantsQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTShowGrantsQuery.h (100%) rename dbms/{src => }/Parsers/ASTShowProcesslistQuery.h (100%) rename dbms/{src => }/Parsers/ASTShowQuotasQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTShowQuotasQuery.h (100%) rename dbms/{src => }/Parsers/ASTShowRowPoliciesQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTShowRowPoliciesQuery.h (100%) rename dbms/{src => }/Parsers/ASTShowTablesQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTShowTablesQuery.h (100%) rename dbms/{src => }/Parsers/ASTSubquery.cpp (100%) rename dbms/{src => }/Parsers/ASTSubquery.h (100%) rename dbms/{src => }/Parsers/ASTSystemQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTSystemQuery.h (100%) rename dbms/{src => }/Parsers/ASTTTLElement.cpp (100%) rename dbms/{src => }/Parsers/ASTTTLElement.h (100%) rename dbms/{src => }/Parsers/ASTTablesInSelectQuery.cpp (100%) rename dbms/{src => }/Parsers/ASTTablesInSelectQuery.h (100%) rename dbms/{src => }/Parsers/ASTUseQuery.h (100%) rename dbms/{src => }/Parsers/ASTWatchQuery.h (100%) rename dbms/{src => }/Parsers/ASTWithAlias.cpp (100%) rename dbms/{src => }/Parsers/ASTWithAlias.h (100%) rename dbms/{src => }/Parsers/CMakeLists.txt (87%) rename dbms/{src => }/Parsers/CommonParsers.cpp (100%) rename dbms/{src => }/Parsers/CommonParsers.h (100%) rename dbms/{src => }/Parsers/DumpASTNode.h (100%) rename dbms/{src => }/Parsers/ExpressionElementParsers.cpp (100%) rename dbms/{src => }/Parsers/ExpressionElementParsers.h (100%) rename dbms/{src => }/Parsers/ExpressionListParsers.cpp (100%) rename dbms/{src => }/Parsers/ExpressionListParsers.h (100%) rename dbms/{src => }/Parsers/IAST.cpp (100%) rename dbms/{src => }/Parsers/IAST.h (100%) rename dbms/{src => }/Parsers/IAST_fwd.h (100%) rename dbms/{src => }/Parsers/IParser.h (100%) rename dbms/{src => }/Parsers/IParserBase.cpp (100%) rename dbms/{src => }/Parsers/IParserBase.h (100%) rename dbms/{src => }/Parsers/IdentifierQuotingStyle.h (100%) rename dbms/{src => }/Parsers/Lexer.cpp (100%) rename dbms/{src => }/Parsers/Lexer.h (100%) rename dbms/{src => }/Parsers/ParserAlterQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserAlterQuery.h (100%) rename dbms/{src => }/Parsers/ParserCase.cpp (100%) rename dbms/{src => }/Parsers/ParserCase.h (100%) rename dbms/{src => }/Parsers/ParserCheckQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCheckQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateQuotaQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateQuotaQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateRoleQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateRoleQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateRowPolicyQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateRowPolicyQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateSettingsProfileQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateSettingsProfileQuery.h (100%) rename dbms/{src => }/Parsers/ParserCreateUserQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserCreateUserQuery.h (100%) rename dbms/{src => }/Parsers/ParserDescribeTableQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserDescribeTableQuery.h (100%) rename dbms/{src => }/Parsers/ParserDictionary.cpp (100%) rename dbms/{src => }/Parsers/ParserDictionary.h (100%) rename dbms/{src => }/Parsers/ParserDictionaryAttributeDeclaration.cpp (100%) rename dbms/{src => }/Parsers/ParserDictionaryAttributeDeclaration.h (100%) rename dbms/{src => }/Parsers/ParserDropAccessEntityQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserDropAccessEntityQuery.h (100%) rename dbms/{src => }/Parsers/ParserDropQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserDropQuery.h (100%) rename dbms/{src => }/Parsers/ParserExtendedRoleSet.cpp (100%) rename dbms/{src => }/Parsers/ParserExtendedRoleSet.h (100%) rename dbms/{src => }/Parsers/ParserGrantQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserGrantQuery.h (100%) rename dbms/{src => }/Parsers/ParserInsertQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserInsertQuery.h (100%) rename dbms/{src => }/Parsers/ParserKillQueryQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserKillQueryQuery.h (100%) rename dbms/{src => }/Parsers/ParserOptimizeQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserOptimizeQuery.h (100%) rename dbms/{src => }/Parsers/ParserPartition.cpp (100%) rename dbms/{src => }/Parsers/ParserPartition.h (100%) rename dbms/{src => }/Parsers/ParserQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserQuery.h (100%) rename dbms/{src => }/Parsers/ParserQueryWithOutput.cpp (100%) rename dbms/{src => }/Parsers/ParserQueryWithOutput.h (100%) rename dbms/{src => }/Parsers/ParserRenameQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserRenameQuery.h (100%) rename dbms/{src => }/Parsers/ParserSampleRatio.cpp (100%) rename dbms/{src => }/Parsers/ParserSampleRatio.h (100%) rename dbms/{src => }/Parsers/ParserSelectQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserSelectQuery.h (100%) rename dbms/{src => }/Parsers/ParserSelectWithUnionQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserSelectWithUnionQuery.h (100%) rename dbms/{src => }/Parsers/ParserSetQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserSetQuery.h (100%) rename dbms/{src => }/Parsers/ParserSetRoleQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserSetRoleQuery.h (100%) rename dbms/{src => }/Parsers/ParserSettingsProfileElement.cpp (100%) rename dbms/{src => }/Parsers/ParserSettingsProfileElement.h (100%) rename dbms/{src => }/Parsers/ParserShowCreateAccessEntityQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserShowCreateAccessEntityQuery.h (100%) rename dbms/{src => }/Parsers/ParserShowGrantsQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserShowGrantsQuery.h (100%) rename dbms/{src => }/Parsers/ParserShowProcesslistQuery.h (100%) rename dbms/{src => }/Parsers/ParserShowQuotasQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserShowQuotasQuery.h (100%) rename dbms/{src => }/Parsers/ParserShowRowPoliciesQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserShowRowPoliciesQuery.h (100%) rename dbms/{src => }/Parsers/ParserShowTablesQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserShowTablesQuery.h (100%) rename dbms/{src => }/Parsers/ParserSystemQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserSystemQuery.h (100%) rename dbms/{src => }/Parsers/ParserTablePropertiesQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserTablePropertiesQuery.h (100%) rename dbms/{src => }/Parsers/ParserTablesInSelectQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserTablesInSelectQuery.h (100%) rename dbms/{src => }/Parsers/ParserUnionQueryElement.cpp (100%) rename dbms/{src => }/Parsers/ParserUnionQueryElement.h (100%) rename dbms/{src => }/Parsers/ParserUseQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserUseQuery.h (100%) rename dbms/{src => }/Parsers/ParserWatchQuery.cpp (100%) rename dbms/{src => }/Parsers/ParserWatchQuery.h (100%) rename dbms/{src => }/Parsers/StringRange.h (100%) rename dbms/{src => }/Parsers/TablePropertiesQueriesASTs.h (100%) rename dbms/{src => }/Parsers/TokenIterator.cpp (100%) rename dbms/{src => }/Parsers/TokenIterator.h (100%) rename dbms/{src => }/Parsers/formatAST.cpp (100%) rename dbms/{src => }/Parsers/formatAST.h (100%) rename dbms/{src => }/Parsers/iostream_debug_helpers.cpp (100%) rename dbms/{src => }/Parsers/iostream_debug_helpers.h (100%) rename dbms/{src => }/Parsers/parseDatabaseAndTableName.cpp (100%) rename dbms/{src => }/Parsers/parseDatabaseAndTableName.h (100%) rename dbms/{src => }/Parsers/parseIdentifierOrStringLiteral.cpp (100%) rename dbms/{src => }/Parsers/parseIdentifierOrStringLiteral.h (100%) rename dbms/{src => }/Parsers/parseIntervalKind.cpp (100%) rename dbms/{src => }/Parsers/parseIntervalKind.h (100%) rename dbms/{src => }/Parsers/parseQuery.cpp (100%) rename dbms/{src => }/Parsers/parseQuery.h (100%) rename dbms/{src => }/Parsers/parseUserName.cpp (100%) rename dbms/{src => }/Parsers/parseUserName.h (100%) rename dbms/{src => }/Parsers/queryToString.cpp (100%) rename dbms/{src => }/Parsers/queryToString.h (100%) rename dbms/{src => }/Parsers/tests/CMakeLists.txt (100%) rename dbms/{src => }/Parsers/tests/create_parser.cpp (100%) rename dbms/{src => }/Parsers/tests/gtest_dictionary_parser.cpp (100%) rename dbms/{src => }/Parsers/tests/lexer.cpp (96%) rename dbms/{src => }/Parsers/tests/select_parser.cpp (100%) rename dbms/{src => }/Processors/CMakeLists.txt (100%) rename dbms/{src => }/Processors/Chunk.cpp (100%) rename dbms/{src => }/Processors/Chunk.h (100%) rename dbms/{src => }/Processors/ConcatProcessor.cpp (100%) rename dbms/{src => }/Processors/ConcatProcessor.h (100%) rename dbms/{src => }/Processors/DelayedPortsProcessor.cpp (100%) rename dbms/{src => }/Processors/DelayedPortsProcessor.h (100%) rename dbms/{src => }/Processors/Executors/ParallelPipelineExecutor.cpp (100%) rename dbms/{src => }/Processors/Executors/ParallelPipelineExecutor.h (100%) rename dbms/{src => }/Processors/Executors/PipelineExecutor.cpp (100%) rename dbms/{src => }/Processors/Executors/PipelineExecutor.h (100%) rename dbms/{src => }/Processors/Executors/SequentialPipelineExecutor.cpp (100%) rename dbms/{src => }/Processors/Executors/SequentialPipelineExecutor.h (100%) rename dbms/{src => }/Processors/Executors/ThreadsQueue.h (100%) rename dbms/{src => }/Processors/Executors/TreeExecutorBlockInputStream.cpp (100%) rename dbms/{src => }/Processors/Executors/TreeExecutorBlockInputStream.h (100%) rename dbms/{src => }/Processors/Executors/traverse.h (100%) rename dbms/{src => }/Processors/ForkProcessor.cpp (100%) rename dbms/{src => }/Processors/ForkProcessor.h (100%) rename dbms/{src => }/Processors/Formats/IInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/IInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/IOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/IOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/IRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/IRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/IRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/IRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ArrowColumnToCHColumn.h (100%) rename dbms/{src => }/Processors/Formats/Impl/AvroRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/AvroRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/AvroRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/AvroRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/BinaryRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/BinaryRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/BinaryRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/BinaryRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/CMakeLists.txt (100%) rename dbms/{src => }/Processors/Formats/Impl/CSVRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/CSVRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/CSVRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/CSVRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/CapnProtoRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ConstantExpressionTemplate.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ConstantExpressionTemplate.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONCompactRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/JSONRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/MySQLOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/MySQLOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/NativeFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/NullFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ORCBlockInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ORCBlockInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ParquetBlockInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ParquetBlockInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ParquetBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettyBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ProtobufRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ProtobufRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ProtobufRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/RegexpRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/RegexpRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TSKVRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TSKVRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TSKVRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TSKVRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TabSeparatedRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TemplateBlockOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/TemplateRowInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/TemplateRowInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ValuesBlockInputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ValuesBlockInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/ValuesRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/ValuesRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/VerticalRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/VerticalRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/Impl/XMLRowOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/Impl/XMLRowOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/InputStreamFromInputFormat.h (100%) rename dbms/{src => }/Processors/Formats/LazyOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/LazyOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/OutputStreamToOutputFormat.cpp (100%) rename dbms/{src => }/Processors/Formats/OutputStreamToOutputFormat.h (100%) rename dbms/{src => }/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp (100%) rename dbms/{src => }/Processors/Formats/RowInputFormatWithDiagnosticInfo.h (100%) rename dbms/{src => }/Processors/IAccumulatingTransform.cpp (100%) rename dbms/{src => }/Processors/IAccumulatingTransform.h (100%) rename dbms/{src => }/Processors/IInflatingTransform.cpp (100%) rename dbms/{src => }/Processors/IInflatingTransform.h (100%) rename dbms/{src => }/Processors/IProcessor.cpp (100%) rename dbms/{src => }/Processors/IProcessor.h (100%) rename dbms/{src => }/Processors/ISimpleTransform.cpp (100%) rename dbms/{src => }/Processors/ISimpleTransform.h (100%) rename dbms/{src => }/Processors/ISink.cpp (100%) rename dbms/{src => }/Processors/ISink.h (100%) rename dbms/{src => }/Processors/ISource.cpp (100%) rename dbms/{src => }/Processors/ISource.h (100%) rename dbms/{src => }/Processors/LimitTransform.cpp (100%) rename dbms/{src => }/Processors/LimitTransform.h (100%) rename dbms/{src => }/Processors/NullSink.h (100%) rename dbms/{src => }/Processors/Pipe.cpp (100%) rename dbms/{src => }/Processors/Pipe.h (100%) rename dbms/{src => }/Processors/Port.cpp (100%) rename dbms/{src => }/Processors/Port.h (100%) rename dbms/{src => }/Processors/QueryPipeline.cpp (100%) rename dbms/{src => }/Processors/QueryPipeline.h (100%) rename dbms/{src => }/Processors/QueueBuffer.h (100%) rename dbms/{src => }/Processors/ResizeProcessor.cpp (100%) rename dbms/{src => }/Processors/ResizeProcessor.h (100%) rename dbms/{src => }/Processors/RowsBeforeLimitCounter.h (100%) rename dbms/{src => }/Processors/Sources/NullSource.h (100%) rename dbms/{src => }/Processors/Sources/SinkToOutputStream.cpp (100%) rename dbms/{src => }/Processors/Sources/SinkToOutputStream.h (100%) rename dbms/{src => }/Processors/Sources/SourceFromInputStream.cpp (100%) rename dbms/{src => }/Processors/Sources/SourceFromInputStream.h (100%) rename dbms/{src => }/Processors/Sources/SourceFromSingleChunk.h (100%) rename dbms/{src => }/Processors/Sources/SourceWithProgress.cpp (100%) rename dbms/{src => }/Processors/Sources/SourceWithProgress.h (100%) rename dbms/{src => }/Processors/Transforms/AddingConstColumnTransform.h (100%) rename dbms/{src => }/Processors/Transforms/AddingMissedTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/AddingMissedTransform.h (100%) rename dbms/{src => }/Processors/Transforms/AggregatingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/AggregatingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/ConvertingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/ConvertingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/CreatingSetsTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/CreatingSetsTransform.h (100%) rename dbms/{src => }/Processors/Transforms/CubeTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/CubeTransform.h (100%) rename dbms/{src => }/Processors/Transforms/DistinctTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/DistinctTransform.h (100%) rename dbms/{src => }/Processors/Transforms/ExpressionTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/ExpressionTransform.h (100%) rename dbms/{src => }/Processors/Transforms/ExtremesTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/ExtremesTransform.h (100%) rename dbms/{src => }/Processors/Transforms/FillingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/FillingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/FilterTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/FilterTransform.h (100%) rename dbms/{src => }/Processors/Transforms/FinishSortingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/FinishSortingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/InflatingExpressionTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/InflatingExpressionTransform.h (100%) rename dbms/{src => }/Processors/Transforms/LimitByTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/LimitByTransform.h (100%) rename dbms/{src => }/Processors/Transforms/LimitsCheckingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/LimitsCheckingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/MaterializingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/MaterializingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/MergeSortingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/MergeSortingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h (100%) rename dbms/{src => }/Processors/Transforms/MergingAggregatedTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/MergingAggregatedTransform.h (100%) rename dbms/{src => }/Processors/Transforms/MergingSortedTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/MergingSortedTransform.h (100%) rename dbms/{src => }/Processors/Transforms/PartialSortingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/PartialSortingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/ReverseTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/ReverseTransform.h (100%) rename dbms/{src => }/Processors/Transforms/RollupTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/RollupTransform.h (100%) rename dbms/{src => }/Processors/Transforms/SortingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/SortingTransform.h (100%) rename dbms/{src => }/Processors/Transforms/TotalsHavingTransform.cpp (100%) rename dbms/{src => }/Processors/Transforms/TotalsHavingTransform.h (100%) rename dbms/{src => }/Processors/printPipeline.h (100%) rename dbms/{src => }/Processors/tests/CMakeLists.txt (100%) rename dbms/{src => }/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_aggregation.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_chain.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_expand_pipeline.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_merge.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_merge_sorting_transform.cpp (100%) rename dbms/{src => }/Processors/tests/processors_test_merging_sorted_transform.cpp (100%) rename dbms/{src => }/Storages/AlterCommands.cpp (100%) rename dbms/{src => }/Storages/AlterCommands.h (100%) rename dbms/{src => }/Storages/CMakeLists.txt (100%) rename dbms/{src => }/Storages/CheckResults.h (100%) rename dbms/{src => }/Storages/ColumnCodec.h (100%) rename dbms/{src => }/Storages/ColumnDefault.cpp (100%) rename dbms/{src => }/Storages/ColumnDefault.h (100%) rename dbms/{src => }/Storages/ColumnDependency.h (100%) rename dbms/{src => }/Storages/ColumnsDescription.cpp (100%) rename dbms/{src => }/Storages/ColumnsDescription.h (100%) rename dbms/{src => }/Storages/CompressionCodecSelector.h (100%) rename dbms/{src => }/Storages/ConstraintsDescription.cpp (100%) rename dbms/{src => }/Storages/ConstraintsDescription.h (100%) rename dbms/{src => }/Storages/Distributed/DirectoryMonitor.cpp (100%) rename dbms/{src => }/Storages/Distributed/DirectoryMonitor.h (100%) rename dbms/{src => }/Storages/Distributed/DistributedBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/Distributed/DistributedBlockOutputStream.h (100%) rename dbms/{src => }/Storages/IStorage.cpp (100%) rename dbms/{src => }/Storages/IStorage.h (100%) rename dbms/{src => }/Storages/IStorage_fwd.h (100%) rename dbms/{src => }/Storages/IndicesDescription.cpp (100%) rename dbms/{src => }/Storages/IndicesDescription.h (100%) rename dbms/{src => }/Storages/Kafka/Buffer_fwd.h (100%) rename dbms/{src => }/Storages/Kafka/KafkaBlockInputStream.cpp (100%) rename dbms/{src => }/Storages/Kafka/KafkaBlockInputStream.h (100%) rename dbms/{src => }/Storages/Kafka/KafkaBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/Kafka/KafkaBlockOutputStream.h (100%) rename dbms/{src => }/Storages/Kafka/KafkaSettings.cpp (100%) rename dbms/{src => }/Storages/Kafka/KafkaSettings.h (100%) rename dbms/{src => }/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp (100%) rename dbms/{src => }/Storages/Kafka/ReadBufferFromKafkaConsumer.h (100%) rename dbms/{src => }/Storages/Kafka/StorageKafka.cpp (100%) rename dbms/{src => }/Storages/Kafka/StorageKafka.h (100%) rename dbms/{src => }/Storages/Kafka/WriteBufferToKafkaProducer.cpp (100%) rename dbms/{src => }/Storages/Kafka/WriteBufferToKafkaProducer.h (100%) rename dbms/{src => }/Storages/LiveView/LiveViewBlockInputStream.h (100%) rename dbms/{src => }/Storages/LiveView/LiveViewBlockOutputStream.h (100%) rename dbms/{src => }/Storages/LiveView/LiveViewCommands.h (100%) rename dbms/{src => }/Storages/LiveView/LiveViewEventsBlockInputStream.h (100%) rename dbms/{src => }/Storages/LiveView/StorageBlocks.h (100%) rename dbms/{src => }/Storages/LiveView/StorageLiveView.cpp (100%) rename dbms/{src => }/Storages/LiveView/StorageLiveView.h (100%) rename dbms/{src => }/Storages/MarkCache.h (100%) rename dbms/{src => }/Storages/MergeTree/ActiveDataPartSet.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ActiveDataPartSet.h (100%) rename dbms/{src => }/Storages/MergeTree/AllMergeSelector.cpp (100%) rename dbms/{src => }/Storages/MergeTree/AllMergeSelector.h (100%) rename dbms/{src => }/Storages/MergeTree/BackgroundProcessingPool.cpp (100%) rename dbms/{src => }/Storages/MergeTree/BackgroundProcessingPool.h (100%) rename dbms/{src => }/Storages/MergeTree/BoolMask.cpp (100%) rename dbms/{src => }/Storages/MergeTree/BoolMask.h (100%) rename dbms/{src => }/Storages/MergeTree/DataPartsExchange.cpp (100%) rename dbms/{src => }/Storages/MergeTree/DataPartsExchange.h (100%) rename dbms/{src => }/Storages/MergeTree/EphemeralLockInZooKeeper.cpp (100%) rename dbms/{src => }/Storages/MergeTree/EphemeralLockInZooKeeper.h (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeDataPart.cpp (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeDataPart.h (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeDataPartWriter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeDataPartWriter.h (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeReader.cpp (100%) rename dbms/{src => }/Storages/MergeTree/IMergeTreeReader.h (100%) rename dbms/{src => }/Storages/MergeTree/IMergedBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/IMergedBlockOutputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/KeyCondition.cpp (100%) rename dbms/{src => }/Storages/MergeTree/KeyCondition.h (100%) rename dbms/{src => }/Storages/MergeTree/LevelMergeSelector.cpp (100%) rename dbms/{src => }/Storages/MergeTree/LevelMergeSelector.h (100%) rename dbms/{src => }/Storages/MergeTree/MarkRange.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeList.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeList.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeSelector.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBaseSelectProcessor.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBlockOutputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBlockReadUtils.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeBlockReadUtils.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeData.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeData.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataFormatVersion.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataMergerMutator.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataMergerMutator.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartChecksum.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartChecksum.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartCompact.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartCompact.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartTTLInfo.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartType.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartType.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWide.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWide.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWriterCompact.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataPartWriterWide.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataSelectExecutor.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataWriter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeDataWriter.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIOSettings.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexBloomFilter.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexFullText.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexFullText.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranularity.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranularity.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranularityInfo.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexMinMax.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexMinMax.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexReader.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexReader.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexSet.cpp (99%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndexSet.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndices.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeIndices.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeMarksLoader.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeMarksLoader.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeMutationEntry.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeMutationEntry.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeMutationStatus.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartInfo.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartInfo.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartition.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartition.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartsMover.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreePartsMover.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeRangeReader.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeRangeReader.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReadPool.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReadPool.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderCompact.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderCompact.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderStream.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderWide.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReaderWide.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeReverseSelectProcessor.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSelectProcessor.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSelectProcessor.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSettings.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeSettings.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeWhereOptimizer.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergeTreeWhereOptimizer.h (100%) rename dbms/{src => }/Storages/MergeTree/MergedBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergedBlockOutputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/MergedColumnOnlyOutputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/PartDestinationType.h (100%) rename dbms/{src => }/Storages/MergeTree/RPNBuilder.h (100%) rename dbms/{src => }/Storages/MergeTree/RangesInDataPart.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeAddress.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreePartHeader.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeQueue.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp (100%) rename dbms/{src => }/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h (100%) rename dbms/{src => }/Storages/MergeTree/SimpleMergeSelector.cpp (100%) rename dbms/{src => }/Storages/MergeTree/SimpleMergeSelector.h (100%) rename dbms/{src => }/Storages/MergeTree/StorageFromMergeTreeDataPart.h (100%) rename dbms/{src => }/Storages/MergeTree/TTLMergeSelector.cpp (100%) rename dbms/{src => }/Storages/MergeTree/TTLMergeSelector.h (100%) rename dbms/{src => }/Storages/MergeTree/checkDataPart.cpp (100%) rename dbms/{src => }/Storages/MergeTree/checkDataPart.h (100%) rename dbms/{src => }/Storages/MergeTree/localBackup.cpp (100%) rename dbms/{src => }/Storages/MergeTree/localBackup.h (100%) rename dbms/{src => }/Storages/MergeTree/registerStorageMergeTree.cpp (99%) rename dbms/{src => }/Storages/MutationCommands.cpp (100%) rename dbms/{src => }/Storages/MutationCommands.h (100%) rename dbms/{src => }/Storages/PartitionCommands.cpp (100%) rename dbms/{src => }/Storages/PartitionCommands.h (100%) rename dbms/{src => }/Storages/ReadInOrderOptimizer.cpp (100%) rename dbms/{src => }/Storages/ReadInOrderOptimizer.h (100%) rename dbms/{src => }/Storages/SelectQueryInfo.h (100%) rename dbms/{src => }/Storages/StorageBuffer.cpp (100%) rename dbms/{src => }/Storages/StorageBuffer.h (100%) rename dbms/{src => }/Storages/StorageDictionary.cpp (100%) rename dbms/{src => }/Storages/StorageDictionary.h (100%) rename dbms/{src => }/Storages/StorageDistributed.cpp (100%) rename dbms/{src => }/Storages/StorageDistributed.h (100%) rename dbms/{src => }/Storages/StorageFactory.cpp (100%) rename dbms/{src => }/Storages/StorageFactory.h (100%) rename dbms/{src => }/Storages/StorageFile.cpp (100%) rename dbms/{src => }/Storages/StorageFile.h (100%) rename dbms/{src => }/Storages/StorageGenerateRandom.cpp (100%) rename dbms/{src => }/Storages/StorageGenerateRandom.h (100%) rename dbms/{src => }/Storages/StorageHDFS.cpp (100%) rename dbms/{src => }/Storages/StorageHDFS.h (100%) rename dbms/{src => }/Storages/StorageInMemoryMetadata.cpp (100%) rename dbms/{src => }/Storages/StorageInMemoryMetadata.h (100%) rename dbms/{src => }/Storages/StorageInput.cpp (100%) rename dbms/{src => }/Storages/StorageInput.h (100%) rename dbms/{src => }/Storages/StorageJoin.cpp (100%) rename dbms/{src => }/Storages/StorageJoin.h (100%) rename dbms/{src => }/Storages/StorageLog.cpp (100%) rename dbms/{src => }/Storages/StorageLog.h (100%) rename dbms/{src => }/Storages/StorageLogSettings.cpp (100%) rename dbms/{src => }/Storages/StorageLogSettings.h (100%) rename dbms/{src => }/Storages/StorageMaterializedView.cpp (100%) rename dbms/{src => }/Storages/StorageMaterializedView.h (100%) rename dbms/{src => }/Storages/StorageMemory.cpp (100%) rename dbms/{src => }/Storages/StorageMemory.h (100%) rename dbms/{src => }/Storages/StorageMerge.cpp (100%) rename dbms/{src => }/Storages/StorageMerge.h (100%) rename dbms/{src => }/Storages/StorageMergeTree.cpp (100%) rename dbms/{src => }/Storages/StorageMergeTree.h (100%) rename dbms/{src => }/Storages/StorageMySQL.cpp (100%) rename dbms/{src => }/Storages/StorageMySQL.h (100%) rename dbms/{src => }/Storages/StorageNull.cpp (100%) rename dbms/{src => }/Storages/StorageNull.h (100%) rename dbms/{src => }/Storages/StorageReplicatedMergeTree.cpp (100%) rename dbms/{src => }/Storages/StorageReplicatedMergeTree.h (100%) rename dbms/{src => }/Storages/StorageS3.cpp (100%) rename dbms/{src => }/Storages/StorageS3.h (100%) rename dbms/{src => }/Storages/StorageSet.cpp (100%) rename dbms/{src => }/Storages/StorageSet.h (100%) rename dbms/{src => }/Storages/StorageStripeLog.cpp (100%) rename dbms/{src => }/Storages/StorageStripeLog.h (100%) rename dbms/{src => }/Storages/StorageTinyLog.cpp (100%) rename dbms/{src => }/Storages/StorageTinyLog.h (100%) rename dbms/{src => }/Storages/StorageURL.cpp (100%) rename dbms/{src => }/Storages/StorageURL.h (100%) rename dbms/{src => }/Storages/StorageValues.cpp (100%) rename dbms/{src => }/Storages/StorageValues.h (100%) rename dbms/{src => }/Storages/StorageView.cpp (100%) rename dbms/{src => }/Storages/StorageView.h (100%) rename dbms/{src => }/Storages/StorageXDBC.cpp (100%) rename dbms/{src => }/Storages/StorageXDBC.h (100%) rename dbms/{src => }/Storages/System/CMakeLists.txt (100%) rename dbms/{src => }/Storages/System/IStorageSystemOneBlock.h (100%) rename dbms/{src => }/Storages/System/StorageSystemAggregateFunctionCombinators.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemAggregateFunctionCombinators.h (100%) rename dbms/{src => }/Storages/System/StorageSystemAsynchronousMetrics.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemAsynchronousMetrics.h (100%) rename dbms/{src => }/Storages/System/StorageSystemBuildOptions.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemBuildOptions.generated.cpp.in (100%) rename dbms/{src => }/Storages/System/StorageSystemBuildOptions.h (100%) rename dbms/{src => }/Storages/System/StorageSystemClusters.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemClusters.h (100%) rename dbms/{src => }/Storages/System/StorageSystemCollations.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemCollations.h (100%) rename dbms/{src => }/Storages/System/StorageSystemColumns.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemColumns.h (100%) rename dbms/{src => }/Storages/System/StorageSystemContributors.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemContributors.generated.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemContributors.h (100%) rename dbms/{src => }/Storages/System/StorageSystemContributors.sh (100%) rename dbms/{src => }/Storages/System/StorageSystemDataTypeFamilies.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemDataTypeFamilies.h (100%) rename dbms/{src => }/Storages/System/StorageSystemDatabases.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemDatabases.h (100%) rename dbms/{src => }/Storages/System/StorageSystemDetachedParts.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemDetachedParts.h (100%) rename dbms/{src => }/Storages/System/StorageSystemDictionaries.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemDictionaries.h (100%) rename dbms/{src => }/Storages/System/StorageSystemDisks.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemDisks.h (100%) rename dbms/{src => }/Storages/System/StorageSystemEvents.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemEvents.h (100%) rename dbms/{src => }/Storages/System/StorageSystemFormats.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemFormats.h (100%) rename dbms/{src => }/Storages/System/StorageSystemFunctions.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemFunctions.h (100%) rename dbms/{src => }/Storages/System/StorageSystemGraphite.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemGraphite.h (100%) rename dbms/{src => }/Storages/System/StorageSystemMacros.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemMacros.h (100%) rename dbms/{src => }/Storages/System/StorageSystemMergeTreeSettings.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemMergeTreeSettings.h (100%) rename dbms/{src => }/Storages/System/StorageSystemMerges.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemMerges.h (100%) rename dbms/{src => }/Storages/System/StorageSystemMetrics.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemMetrics.h (100%) rename dbms/{src => }/Storages/System/StorageSystemModels.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemModels.h (100%) rename dbms/{src => }/Storages/System/StorageSystemMutations.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemMutations.h (100%) rename dbms/{src => }/Storages/System/StorageSystemNumbers.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemNumbers.h (100%) rename dbms/{src => }/Storages/System/StorageSystemOne.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemOne.h (100%) rename dbms/{src => }/Storages/System/StorageSystemParts.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemParts.h (100%) rename dbms/{src => }/Storages/System/StorageSystemPartsBase.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemPartsBase.h (100%) rename dbms/{src => }/Storages/System/StorageSystemPartsColumns.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemPartsColumns.h (100%) rename dbms/{src => }/Storages/System/StorageSystemProcesses.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemProcesses.h (100%) rename dbms/{src => }/Storages/System/StorageSystemQuotaUsage.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemQuotaUsage.h (100%) rename dbms/{src => }/Storages/System/StorageSystemQuotas.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemQuotas.h (100%) rename dbms/{src => }/Storages/System/StorageSystemReplicas.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemReplicas.h (100%) rename dbms/{src => }/Storages/System/StorageSystemReplicationQueue.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemReplicationQueue.h (100%) rename dbms/{src => }/Storages/System/StorageSystemRowPolicies.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemRowPolicies.h (100%) rename dbms/{src => }/Storages/System/StorageSystemSettings.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemSettings.h (100%) rename dbms/{src => }/Storages/System/StorageSystemStackTrace.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemStackTrace.h (100%) rename dbms/{src => }/Storages/System/StorageSystemStoragePolicies.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemStoragePolicies.h (100%) rename dbms/{src => }/Storages/System/StorageSystemTableEngines.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemTableEngines.h (100%) rename dbms/{src => }/Storages/System/StorageSystemTableFunctions.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemTableFunctions.h (100%) rename dbms/{src => }/Storages/System/StorageSystemTables.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemTables.h (100%) rename dbms/{src => }/Storages/System/StorageSystemZeros.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemZeros.h (100%) rename dbms/{src => }/Storages/System/StorageSystemZooKeeper.cpp (100%) rename dbms/{src => }/Storages/System/StorageSystemZooKeeper.h (100%) rename dbms/{src => }/Storages/System/attachSystemTables.cpp (100%) rename dbms/{src => }/Storages/System/attachSystemTables.h (100%) rename dbms/{src => }/Storages/TableStructureLockHolder.h (100%) rename dbms/{src => }/Storages/VirtualColumnUtils.cpp (100%) rename dbms/{src => }/Storages/VirtualColumnUtils.h (100%) rename dbms/{src => }/Storages/getStructureOfRemoteTable.cpp (100%) rename dbms/{src => }/Storages/getStructureOfRemoteTable.h (100%) rename dbms/{src => }/Storages/registerStorages.cpp (100%) rename dbms/{src => }/Storages/registerStorages.h (100%) rename dbms/{src => }/Storages/tests/CMakeLists.txt (100%) rename dbms/{src => }/Storages/tests/active_parts.py (100%) rename dbms/{src => }/Storages/tests/get_abandonable_lock_in_all_partitions.cpp (100%) rename dbms/{src => }/Storages/tests/get_current_inserts_in_replicated.cpp (100%) rename dbms/{src => }/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp (100%) rename dbms/{src => }/Storages/tests/gtest_row_source_bits_test.cpp (100%) rename dbms/{src => }/Storages/tests/gtest_storage_log.cpp (100%) rename dbms/{src => }/Storages/tests/gtest_transform_query_for_external_database.cpp (100%) rename dbms/{src => }/Storages/tests/merge_selector.cpp (100%) rename dbms/{src => }/Storages/tests/merge_selector2.cpp (100%) rename dbms/{src => }/Storages/tests/part_name.cpp (100%) rename dbms/{src => }/Storages/tests/remove_symlink_directory.cpp (100%) rename dbms/{src => }/Storages/tests/storage_log.cpp (100%) rename dbms/{src => }/Storages/tests/system_numbers.cpp (100%) rename dbms/{src => }/Storages/tests/test_alter_distributed.sql (100%) rename dbms/{src => }/Storages/tests/test_alter_merge.sql (100%) rename dbms/{src => }/Storages/tests/test_alter_merge_tree.sql (100%) rename dbms/{src => }/Storages/tests/transform_part_zk_nodes.cpp (100%) rename dbms/{src => }/Storages/transformQueryForExternalDatabase.cpp (100%) rename dbms/{src => }/Storages/transformQueryForExternalDatabase.h (100%) rename dbms/{src => }/TableFunctions/CMakeLists.txt (100%) rename dbms/{src => }/TableFunctions/ITableFunction.cpp (100%) rename dbms/{src => }/TableFunctions/ITableFunction.h (100%) rename dbms/{src => }/TableFunctions/ITableFunctionFileLike.cpp (100%) rename dbms/{src => }/TableFunctions/ITableFunctionFileLike.h (100%) rename dbms/{src => }/TableFunctions/ITableFunctionXDBC.cpp (100%) rename dbms/{src => }/TableFunctions/ITableFunctionXDBC.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionFactory.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionFactory.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionFile.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionFile.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionGenerateRandom.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionGenerateRandom.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionHDFS.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionHDFS.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionInput.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionInput.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionMerge.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionMerge.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionMySQL.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionMySQL.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionNumbers.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionNumbers.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionRemote.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionRemote.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionS3.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionS3.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionURL.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionURL.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionValues.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionValues.h (100%) rename dbms/{src => }/TableFunctions/TableFunctionZeros.cpp (100%) rename dbms/{src => }/TableFunctions/TableFunctionZeros.h (100%) rename dbms/{src => }/TableFunctions/parseColumnsListForTableFunction.cpp (100%) rename dbms/{src => }/TableFunctions/parseColumnsListForTableFunction.h (100%) rename dbms/{src => }/TableFunctions/registerTableFunctions.cpp (100%) rename dbms/{src => }/TableFunctions/registerTableFunctions.h (100%) delete mode 100644 dbms/src/CMakeLists.txt rename {dbms/programs => programs}/CMakeLists.txt (100%) rename {dbms/programs => programs}/benchmark/Benchmark.cpp (100%) rename {dbms/programs => programs}/benchmark/CMakeLists.txt (100%) rename {dbms/programs => programs}/benchmark/clickhouse-benchmark.cpp (100%) rename {dbms/programs => programs}/clickhouse-split-helper (100%) rename {dbms/programs => programs}/client/CMakeLists.txt (100%) rename {dbms/programs => programs}/client/Client.cpp (100%) rename {dbms/programs => programs}/client/ConnectionParameters.cpp (100%) rename {dbms/programs => programs}/client/ConnectionParameters.h (100%) rename {dbms/programs => programs}/client/Suggest.cpp (100%) rename {dbms/programs => programs}/client/Suggest.h (100%) rename {dbms/programs => programs}/client/TestHint.h (100%) rename {dbms/programs => programs}/client/clickhouse-client.cpp (100%) rename {dbms/programs => programs}/client/clickhouse-client.xml (100%) rename {dbms/programs => programs}/client/config_client.h.in (100%) rename {dbms/programs => programs}/client/readpassphrase/CMakeLists.txt (100%) rename {dbms/programs => programs}/client/readpassphrase/includes.h.in (100%) rename {dbms/programs => programs}/client/readpassphrase/readpassphrase.c (100%) rename {dbms/programs => programs}/client/readpassphrase/readpassphrase.h (100%) rename {dbms/programs => programs}/compressor/CMakeLists.txt (100%) rename {dbms/programs => programs}/compressor/Compressor.cpp (100%) rename {dbms/programs => programs}/compressor/README.md (100%) rename {dbms/programs => programs}/compressor/clickhouse-compressor.cpp (100%) rename {dbms/programs => programs}/config_tools.h.in (100%) rename {dbms/programs => programs}/copier/Aliases.h (100%) rename {dbms/programs => programs}/copier/CMakeLists.txt (100%) rename {dbms/programs => programs}/copier/ClusterCopier.cpp (100%) rename {dbms/programs => programs}/copier/ClusterCopier.h (100%) rename {dbms/programs => programs}/copier/ClusterCopierApp.cpp (100%) rename {dbms/programs => programs}/copier/ClusterCopierApp.h (100%) rename {dbms/programs => programs}/copier/ClusterPartition.h (100%) rename {dbms/programs => programs}/copier/Internals.cpp (100%) rename {dbms/programs => programs}/copier/Internals.h (100%) rename {dbms/programs => programs}/copier/ShardPartition.h (100%) rename {dbms/programs => programs}/copier/ShardPartitionPiece.h (100%) rename {dbms/programs => programs}/copier/TaskCluster.h (100%) rename {dbms/programs => programs}/copier/TaskTableAndShard.h (100%) rename {dbms/programs => programs}/copier/ZooKeeperStaff.h (100%) rename {dbms/programs => programs}/copier/clickhouse-copier.cpp (100%) rename {dbms/programs => programs}/extract-from-config/CMakeLists.txt (100%) rename {dbms/programs => programs}/extract-from-config/ExtractFromConfig.cpp (100%) rename {dbms/programs => programs}/extract-from-config/clickhouse-extract-from-config.cpp (100%) rename {dbms/programs => programs}/format/CMakeLists.txt (100%) rename {dbms/programs => programs}/format/Format.cpp (100%) rename {dbms/programs => programs}/format/clickhouse-format.cpp (100%) rename {dbms/programs => programs}/local/CMakeLists.txt (100%) rename {dbms/programs => programs}/local/LocalServer.cpp (100%) rename {dbms/programs => programs}/local/LocalServer.h (100%) rename {dbms/programs => programs}/local/clickhouse-local.cpp (100%) rename {dbms/programs => programs}/main.cpp (100%) rename {dbms/programs => programs}/obfuscator/CMakeLists.txt (100%) rename {dbms/programs => programs}/obfuscator/Obfuscator.cpp (100%) rename {dbms/programs => programs}/obfuscator/clickhouse-obfuscator.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/CMakeLists.txt (100%) rename {dbms/programs => programs}/odbc-bridge/ColumnInfoHandler.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/ColumnInfoHandler.h (100%) rename {dbms/programs => programs}/odbc-bridge/HandlerFactory.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/HandlerFactory.h (100%) rename {dbms/programs => programs}/odbc-bridge/IdentifierQuoteHandler.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/IdentifierQuoteHandler.h (100%) rename {dbms/programs => programs}/odbc-bridge/MainHandler.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/MainHandler.h (100%) rename {dbms/programs => programs}/odbc-bridge/ODBCBlockInputStream.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/ODBCBlockInputStream.h (100%) rename {dbms/programs => programs}/odbc-bridge/ODBCBridge.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/ODBCBridge.h (100%) rename {dbms/programs => programs}/odbc-bridge/PingHandler.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/PingHandler.h (100%) rename {dbms/programs => programs}/odbc-bridge/README.md (100%) rename {dbms/programs => programs}/odbc-bridge/getIdentifierQuote.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/getIdentifierQuote.h (100%) rename {dbms/programs => programs}/odbc-bridge/odbc-bridge.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/tests/CMakeLists.txt (100%) rename {dbms/programs => programs}/odbc-bridge/tests/validate-odbc-connection-string.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/tests/validate-odbc-connection-string.reference (100%) rename {dbms/programs => programs}/odbc-bridge/tests/validate-odbc-connection-string.sh (100%) rename {dbms/programs => programs}/odbc-bridge/validateODBCConnectionString.cpp (100%) rename {dbms/programs => programs}/odbc-bridge/validateODBCConnectionString.h (100%) rename {dbms/programs => programs}/server/CMakeLists.txt (100%) rename {dbms/programs => programs}/server/HTTPHandler.cpp (100%) rename {dbms/programs => programs}/server/HTTPHandler.h (100%) rename {dbms/programs => programs}/server/HTTPHandlerFactory.cpp (100%) rename {dbms/programs => programs}/server/HTTPHandlerFactory.h (100%) rename {dbms/programs => programs}/server/IServer.h (100%) rename {dbms/programs => programs}/server/InterserverIOHTTPHandler.cpp (100%) rename {dbms/programs => programs}/server/InterserverIOHTTPHandler.h (100%) rename {dbms/programs => programs}/server/MetricsTransmitter.cpp (100%) rename {dbms/programs => programs}/server/MetricsTransmitter.h (100%) rename {dbms/programs => programs}/server/MySQLHandler.cpp (100%) rename {dbms/programs => programs}/server/MySQLHandler.h (100%) rename {dbms/programs => programs}/server/MySQLHandlerFactory.cpp (100%) rename {dbms/programs => programs}/server/MySQLHandlerFactory.h (100%) rename {dbms/programs => programs}/server/NotFoundHandler.cpp (100%) rename {dbms/programs => programs}/server/NotFoundHandler.h (100%) rename {dbms/programs => programs}/server/PingRequestHandler.cpp (100%) rename {dbms/programs => programs}/server/PingRequestHandler.h (100%) rename {dbms/programs => programs}/server/PrometheusMetricsWriter.cpp (100%) rename {dbms/programs => programs}/server/PrometheusMetricsWriter.h (100%) rename {dbms/programs => programs}/server/PrometheusRequestHandler.cpp (100%) rename {dbms/programs => programs}/server/PrometheusRequestHandler.h (100%) rename {dbms/programs => programs}/server/ReplicasStatusHandler.cpp (100%) rename {dbms/programs => programs}/server/ReplicasStatusHandler.h (100%) rename {dbms/programs => programs}/server/RootRequestHandler.cpp (100%) rename {dbms/programs => programs}/server/RootRequestHandler.h (100%) rename {dbms/programs => programs}/server/Server.cpp (100%) rename {dbms/programs => programs}/server/Server.h (100%) rename {dbms/programs => programs}/server/TCPHandler.cpp (100%) rename {dbms/programs => programs}/server/TCPHandler.h (100%) rename {dbms/programs => programs}/server/TCPHandlerFactory.h (100%) rename {dbms/programs => programs}/server/clickhouse-server.cpp (100%) rename {dbms/programs => programs}/server/config.d/listen.xml.disabled (100%) rename {dbms/programs => programs}/server/config.d/log_to_console.xml (100%) rename {dbms/programs => programs}/server/config.d/macros.xml (100%) rename {dbms/programs => programs}/server/config.d/metric_log.xml (100%) rename {dbms/programs => programs}/server/config.d/more_clusters.xml (100%) rename {dbms/programs => programs}/server/config.d/part_log.xml (100%) rename {dbms/programs => programs}/server/config.d/path.xml (100%) rename {dbms/programs => programs}/server/config.d/query_masking_rules.xml (100%) rename {dbms/programs => programs}/server/config.d/text_log.xml (100%) rename {dbms/programs => programs}/server/config.d/tls.xml.disabled (100%) rename {dbms/programs => programs}/server/config.d/zookeeper.xml (100%) rename {dbms/programs => programs}/server/config.xml (100%) rename {dbms/programs => programs}/server/data/.gitignore (100%) rename {dbms/programs => programs}/server/data/default/.gitignore (100%) rename {dbms/programs => programs}/server/metadata/default/.gitignore (100%) rename {dbms/programs => programs}/server/users.d/allow_only_from_localhost.xml (100%) rename {dbms/programs => programs}/server/users.d/log_queries.xml (100%) rename {dbms/programs => programs}/server/users.d/readonly.xml (100%) rename {dbms/programs => programs}/server/users.xml (100%) rename {dbms/tests => tests}/.gitignore (100%) rename {dbms/tests => tests}/CMakeLists.txt (100%) rename {dbms/tests => tests}/CTestCustom.cmake (100%) rename {dbms/tests => tests}/clickhouse-client.xml (100%) rename {dbms/tests => tests}/clickhouse-test (100%) rename {dbms/tests => tests}/clickhouse-test-server (93%) rename {dbms/tests => tests}/client-test.xml (100%) rename {dbms/tests => tests}/config/client_config.xml (100%) rename {dbms/tests => tests}/config/decimals_dictionary.xml (100%) rename {dbms/tests => tests}/config/dhparam.pem (100%) rename {dbms/tests => tests}/config/disks.xml (100%) rename {dbms/tests => tests}/config/ints_dictionary.xml (100%) rename {dbms/tests => tests}/config/listen.xml (100%) rename {dbms/tests => tests}/config/log_queries.xml (100%) rename {dbms/tests => tests}/config/macros.xml (100%) rename {dbms/tests => tests}/config/metric_log.xml (100%) rename {dbms/tests => tests}/config/part_log.xml (100%) rename {dbms/tests => tests}/config/query_masking_rules.xml (100%) rename {dbms/tests => tests}/config/readonly.xml (100%) rename {dbms/tests => tests}/config/secure_ports.xml (100%) rename {dbms/tests => tests}/config/server.crt (100%) rename {dbms/tests => tests}/config/server.key (100%) rename {dbms/tests => tests}/config/strings_dictionary.xml (100%) rename {dbms/tests => tests}/config/text_log.xml (100%) rename {dbms/tests => tests}/config/zookeeper.xml (100%) rename {dbms/tests => tests}/decimals_dictionary.xml (100%) rename {dbms/tests => tests}/external_models/catboost/data/build_catboost.sh (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/__init__.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/client.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/generate.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/server.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/server_with_models.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/table.py (100%) rename {dbms/tests => tests}/external_models/catboost/helpers/train.py (100%) rename {dbms/tests => tests}/external_models/catboost/pytest.ini (100%) rename {dbms/tests => tests}/external_models/catboost/test_apply_catboost_model/test.py (100%) rename {dbms/tests => tests}/instructions/clang-tidy.txt (100%) rename {dbms/tests => tests}/instructions/coverity.txt (100%) rename {dbms/tests => tests}/instructions/cppcheck.txt (100%) rename {dbms/tests => tests}/instructions/developer_instruction_en.md (100%) rename {dbms/tests => tests}/instructions/developer_instruction_ru.md (100%) rename {dbms/tests => tests}/instructions/easy_tasks_sorted_ru.md (99%) rename {dbms/tests => tests}/instructions/heap-profiler.txt (92%) rename {dbms/tests => tests}/instructions/jemalloc_memory_profile.txt (100%) rename {dbms/tests => tests}/instructions/kafka.txt (100%) rename {dbms/tests => tests}/instructions/ninja_trace.txt (100%) rename {dbms/tests => tests}/instructions/pvs-studio.txt (100%) rename {dbms/tests => tests}/instructions/sanitizers.md (96%) rename {dbms/tests => tests}/instructions/syntax.txt (100%) rename {dbms/tests => tests}/instructions/tscancode.txt (100%) rename {dbms/tests => tests}/integration/.dockerignore (100%) rename {dbms/tests => tests}/integration/.gitignore (100%) rename {dbms/tests => tests}/integration/CMakeLists.txt (69%) rename {dbms/tests => tests}/integration/README.md (89%) rename {dbms/tests => tests}/integration/conftest.py (100%) rename {dbms/tests => tests}/integration/helpers/0_common_instance_config.xml (100%) rename {dbms/tests => tests}/integration/helpers/__init__.py (100%) rename {dbms/tests => tests}/integration/helpers/client.py (100%) rename {dbms/tests => tests}/integration/helpers/cluster.py (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_hdfs.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_kafka.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_minio.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_mongo.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_mysql.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_net.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_postgres.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_redis.yml (100%) rename {dbms/tests => tests}/integration/helpers/docker_compose_zookeeper.yml (100%) rename {dbms/tests => tests}/integration/helpers/hdfs_api.py (100%) rename {dbms/tests => tests}/integration/helpers/helper_container/Dockerfile (100%) rename {dbms/tests => tests}/integration/helpers/network.py (100%) rename {dbms/tests => tests}/integration/helpers/test_tools.py (100%) rename {dbms/tests => tests}/integration/helpers/zookeeper_config.xml (100%) rename {dbms/tests => tests}/integration/image/Dockerfile (100%) rename {dbms/tests => tests}/integration/image/dockerd-entrypoint.sh (92%) rename {dbms/tests => tests}/integration/image/modprobe.sh (100%) rename {dbms/tests => tests}/integration/pytest.ini (100%) rename {dbms/tests => tests}/integration/runner (98%) rename {dbms/tests => tests}/integration/test_adaptive_granularity/__init__.py (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity/configs/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity/configs/merge_tree_settings.xml (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity/test.py (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity_replicated/__init__.py (100%) rename {dbms/tests => tests}/integration/test_adaptive_granularity_replicated/test.py (100%) rename {dbms/tests => tests}/integration/test_aggregation_memory_efficient/__init__.py (100%) rename {dbms/tests => tests}/integration/test_aggregation_memory_efficient/test.py (100%) rename {dbms/tests => tests}/integration/test_allowed_client_hosts/__init__.py (100%) rename {dbms/tests => tests}/integration/test_allowed_client_hosts/configs/users.d/network.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_client_hosts/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_client_hosts/test.py (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/__init__.py (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_for_redirect.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_for_remote.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_with_hosts.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml (100%) rename {dbms/tests => tests}/integration/test_allowed_url_from_config/test.py (100%) rename {dbms/tests => tests}/integration/test_atomic_drop_table/__init__.py (100%) rename {dbms/tests => tests}/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml (100%) rename {dbms/tests => tests}/integration/test_atomic_drop_table/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_atomic_drop_table/test.py (100%) rename {dbms/tests => tests}/integration/test_authentication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_authentication/test.py (100%) rename {dbms/tests => tests}/integration/test_backup_restore/__init__.py (100%) rename {dbms/tests => tests}/integration/test_backup_restore/test.py (100%) rename {dbms/tests => tests}/integration/test_backward_compatability/__init__.py (100%) rename {dbms/tests => tests}/integration/test_backward_compatability/test.py (100%) rename {dbms/tests => tests}/integration/test_block_structure_mismatch/__init__.py (100%) rename {dbms/tests => tests}/integration/test_block_structure_mismatch/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_block_structure_mismatch/test.py (100%) rename {dbms/tests => tests}/integration/test_check_table/__init__.py (100%) rename {dbms/tests => tests}/integration/test_check_table/test.py (100%) rename {dbms/tests => tests}/integration/test_cluster_all_replicas/__init__.py (100%) rename {dbms/tests => tests}/integration/test_cluster_all_replicas/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_all_replicas/test.py (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/__init__.py (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/configs/conf.d/clusters.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/configs/conf.d/ddl.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/configs/conf.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/configs/config-copier.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task0_description.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task_month_to_week_description.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task_no_arg.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task_no_index.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task_test_block_size.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/task_trivial.xml (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/test.py (100%) rename {dbms/tests => tests}/integration/test_cluster_copier/trivial_test.py (100%) rename {dbms/tests => tests}/integration/test_concurrent_queries_for_user_restriction/__init__.py (100%) rename {dbms/tests => tests}/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml (100%) rename {dbms/tests => tests}/integration/test_concurrent_queries_for_user_restriction/test.py (100%) rename {dbms/tests => tests}/integration/test_config_corresponding_root/__init__.py (100%) rename {dbms/tests => tests}/integration/test_config_corresponding_root/configs/config.d/bad.xml (100%) rename {dbms/tests => tests}/integration/test_config_corresponding_root/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_config_corresponding_root/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_config_corresponding_root/test.py (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/__init__.py (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_allow_databases.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_env.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_incl.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_include_from_env.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_no_substs.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/config_zk.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/configs/max_query_size.xml (100%) rename {dbms/tests => tests}/integration/test_config_substitutions/test.py (100%) rename {dbms/tests => tests}/integration/test_consistant_parts_after_move_partition/__init__.py (100%) rename {dbms/tests => tests}/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_consistant_parts_after_move_partition/test.py (100%) rename {dbms/tests => tests}/integration/test_consistent_parts_after_clone_replica/__init__.py (100%) rename {dbms/tests => tests}/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_consistent_parts_after_clone_replica/test.py (100%) rename {dbms/tests => tests}/integration/test_cross_replication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_cross_replication/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_cross_replication/test.py (100%) rename {dbms/tests => tests}/integration/test_delayed_replica_failover/__init__.py (100%) rename {dbms/tests => tests}/integration/test_delayed_replica_failover/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_delayed_replica_failover/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/dictionary.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/external_sources.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/http_server.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_all_layouts_and_sources/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_complex_key_cache_string/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_complex_key_cache_string/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_complex_key_cache_string/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_complex_key_cache_string/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_ddl/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_depend_on_dictionaries/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_mysql/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_null_value/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_null_value/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_null_value/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_null_value/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/generate_dictionaries.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_select_all/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionaries_update_and_reload/test.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_ddl_on_cluster/__init__.py (100%) rename {dbms/tests => tests}/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml (100%) rename {dbms/tests => tests}/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml (100%) rename {dbms/tests => tests}/integration/test_dictionary_ddl_on_cluster/test.py (100%) rename {dbms/tests => tests}/integration/test_disk_access_storage/__init__.py (100%) rename {dbms/tests => tests}/integration/test_disk_access_storage/configs/access_control_path.xml (100%) rename {dbms/tests => tests}/integration/test_disk_access_storage/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/cluster.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/config.d/clusters.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/config.d/ddl.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/config.d/macro.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/config.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/users.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs/users.d/restricted_user.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/macro.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/dhparam.pem (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/server.crt (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/server.key (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl/test_replicated_alter.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl_password/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl_password/configs/config.d/clusters.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_ddl_password/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_format/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_format/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_format/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/server.key (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_respect_user_timeouts/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_storage_configuration/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_storage_configuration/test.py (100%) rename {dbms/tests => tests}/integration/test_distributed_system_query/__init__.py (100%) rename {dbms/tests => tests}/integration/test_distributed_system_query/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_distributed_system_query/test.py (100%) rename {dbms/tests => tests}/integration/test_extreme_deduplication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml (100%) rename {dbms/tests => tests}/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_extreme_deduplication/test.py (100%) rename {dbms/tests => tests}/integration/test_filesystem_layout/__init__.py (100%) rename {dbms/tests => tests}/integration/test_filesystem_layout/test.py (100%) rename {dbms/tests => tests}/integration/test_force_deduplication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_force_deduplication/test.py (100%) rename {dbms/tests => tests}/integration/test_format_avro_confluent/__init__.py (100%) rename {dbms/tests => tests}/integration/test_format_avro_confluent/test.py (100%) rename {dbms/tests => tests}/integration/test_format_schema_on_server/__init__.py (100%) rename {dbms/tests => tests}/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto (100%) rename {dbms/tests => tests}/integration/test_format_schema_on_server/test.py (100%) rename {dbms/tests => tests}/integration/test_globs_in_filepath/__init__.py (100%) rename {dbms/tests => tests}/integration/test_globs_in_filepath/test.py (100%) rename {dbms/tests => tests}/integration/test_grant_and_revoke/__init__.py (100%) rename {dbms/tests => tests}/integration/test_grant_and_revoke/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_grant_and_revoke/test.py (100%) rename {dbms/tests => tests}/integration/test_graphite_merge_tree/__init__.py (100%) rename {dbms/tests => tests}/integration/test_graphite_merge_tree/configs/graphite_rollup.xml (100%) rename {dbms/tests => tests}/integration/test_graphite_merge_tree/test.py (100%) rename {dbms/tests => tests}/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/__init__.py (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/configs/dns_update_long.xml (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/configs/dns_update_short.xml (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/configs/listen_host.xml (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_host_ip_change/test.py (100%) rename {dbms/tests => tests}/integration/test_http_and_readonly/__init__.py (100%) rename {dbms/tests => tests}/integration/test_http_and_readonly/test.py (100%) rename {dbms/tests => tests}/integration/test_https_replication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/no_ssl_conf.xml (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/server.crt (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/server.key (100%) rename {dbms/tests => tests}/integration/test_https_replication/configs/ssl_conf.xml (100%) rename {dbms/tests => tests}/integration/test_https_replication/test.py (100%) rename {dbms/tests => tests}/integration/test_inherit_multiple_profiles/__init__.py (100%) rename {dbms/tests => tests}/integration/test_inherit_multiple_profiles/configs/combined_profile.xml (100%) rename {dbms/tests => tests}/integration/test_inherit_multiple_profiles/test.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed/__init__.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed/configs/forbid_background_merges.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed/test.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_sync_async/__init__.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_sync_async/test.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_through_materialized_view/__init__.py (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_insert_into_distributed_through_materialized_view/test.py (100%) rename {dbms/tests => tests}/integration/test_log_family_s3/__init__.py (100%) rename {dbms/tests => tests}/integration/test_log_family_s3/configs/config.d/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_log_family_s3/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_log_family_s3/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_log_family_s3/test.py (100%) rename {dbms/tests => tests}/integration/test_logs_level/__init__.py (100%) rename {dbms/tests => tests}/integration/test_logs_level/configs/config_information.xml (100%) rename {dbms/tests => tests}/integration/test_logs_level/test.py (100%) rename {dbms/tests => tests}/integration/test_match_process_uid_against_data_owner/__init__.py (100%) rename {dbms/tests => tests}/integration/test_match_process_uid_against_data_owner/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_match_process_uid_against_data_owner/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_match_process_uid_against_data_owner/test.py (100%) rename {dbms/tests => tests}/integration/test_max_http_connections_for_replication/__init__.py (100%) rename {dbms/tests => tests}/integration/test_max_http_connections_for_replication/configs/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_max_http_connections_for_replication/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_max_http_connections_for_replication/test.py (100%) rename {dbms/tests => tests}/integration/test_merge_table_over_distributed/__init__.py (100%) rename {dbms/tests => tests}/integration/test_merge_table_over_distributed/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_merge_table_over_distributed/test.py (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/__init__.py (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/configs/config.d/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/configs/config.d/users.xml (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_merge_tree_s3/test.py (100%) rename {dbms/tests => tests}/integration/test_multiple_disks/__init__.py (100%) rename {dbms/tests => tests}/integration/test_multiple_disks/configs/config.d/cluster.xml (100%) rename {dbms/tests => tests}/integration/test_multiple_disks/configs/config.d/storage_configuration.xml (100%) rename {dbms/tests => tests}/integration/test_multiple_disks/configs/logs_config.xml (100%) rename {dbms/tests => tests}/integration/test_multiple_disks/test.py (100%) rename {dbms/tests => tests}/integration/test_mutations_with_merge_tree/__init__.py (100%) rename {dbms/tests => tests}/integration/test_mutations_with_merge_tree/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_mutations_with_merge_tree/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_mutations_with_merge_tree/test.py (100%) rename {dbms/tests => tests}/integration/test_mysql_database_engine/__init__.py (100%) rename {dbms/tests => tests}/integration/test_mysql_database_engine/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_mysql_database_engine/test.py (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/__init__.py (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/golang/0.reference (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/golang/Dockerfile (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/golang/docker_compose.yml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/golang/main.go (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/mysql/docker_compose.yml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/mysqljs/Dockerfile (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/mysqljs/test.js (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/client.key (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/test.php (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/configs/dhparam.pem (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/configs/server.crt (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/configs/server.key (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_mysql_protocol/test.py (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/__init__.py (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/configs/custom_compression_by_default.xml (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/configs/zstd_compression_by_default.xml (100%) rename {dbms/tests => tests}/integration/test_non_default_compression/test.py (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/__init__.py (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_odbc_interaction/test.py (100%) rename {dbms/tests => tests}/integration/test_old_versions/__init__.py (100%) rename {dbms/tests => tests}/integration/test_old_versions/configs/config.d/test_cluster.xml (100%) rename {dbms/tests => tests}/integration/test_old_versions/test.py (100%) rename {dbms/tests => tests}/integration/test_part_log_table/__init__.py (100%) rename {dbms/tests => tests}/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml (100%) rename {dbms/tests => tests}/integration/test_part_log_table/configs/config_with_standard_part_log.xml (100%) rename {dbms/tests => tests}/integration/test_part_log_table/test.py (100%) rename {dbms/tests => tests}/integration/test_partition/__init__.py (100%) rename {dbms/tests => tests}/integration/test_partition/test.py (100%) rename {dbms/tests => tests}/integration/test_parts_delete_zookeeper/__init__.py (100%) rename {dbms/tests => tests}/integration/test_parts_delete_zookeeper/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_parts_delete_zookeeper/test.py (100%) rename {dbms/tests => tests}/integration/test_polymorphic_parts/__init__.py (100%) rename {dbms/tests => tests}/integration/test_polymorphic_parts/configs/compact_parts.xml (100%) rename {dbms/tests => tests}/integration/test_polymorphic_parts/configs/no_leader.xml (100%) rename {dbms/tests => tests}/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml (100%) rename {dbms/tests => tests}/integration/test_polymorphic_parts/test.py (100%) rename {dbms/tests => tests}/integration/test_prometheus_endpoint/__init__.py (100%) rename {dbms/tests => tests}/integration/test_prometheus_endpoint/configs/prom_conf.xml (100%) rename {dbms/tests => tests}/integration/test_prometheus_endpoint/test.py (100%) rename {dbms/tests => tests}/integration/test_quota/__init__.py (100%) rename {dbms/tests => tests}/integration/test_quota/configs/users.d/quota.xml (100%) rename {dbms/tests => tests}/integration/test_quota/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_quota/no_quotas.xml (100%) rename {dbms/tests => tests}/integration/test_quota/normal_limits.xml (100%) rename {dbms/tests => tests}/integration/test_quota/simpliest.xml (100%) rename {dbms/tests => tests}/integration/test_quota/test.py (100%) rename {dbms/tests => tests}/integration/test_quota/tiny_limits.xml (100%) rename {dbms/tests => tests}/integration/test_quota/tracking.xml (100%) rename {dbms/tests => tests}/integration/test_quota/two_intervals.xml (100%) rename {dbms/tests => tests}/integration/test_quota/two_quotas.xml (100%) rename {dbms/tests => tests}/integration/test_random_inserts/configs/conf.d/merge_tree.xml (100%) rename {dbms/tests => tests}/integration/test_random_inserts/configs/conf.d/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_random_inserts/test.py (100%) rename {dbms/tests => tests}/integration/test_random_inserts/test.sh (100%) rename {dbms/tests => tests}/integration/test_read_temporary_tables_on_failure/__init__.py (100%) rename {dbms/tests => tests}/integration/test_read_temporary_tables_on_failure/test.py (100%) rename {dbms/tests => tests}/integration/test_recovery_replica/__init__.py (100%) rename {dbms/tests => tests}/integration/test_recovery_replica/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_recovery_replica/test.py (100%) rename {dbms/tests => tests}/integration/test_redirect_url_storage/__init__.py (100%) rename {dbms/tests => tests}/integration/test_redirect_url_storage/test.py (100%) rename {dbms/tests => tests}/integration/test_relative_filepath/__init__.py (100%) rename {dbms/tests => tests}/integration/test_relative_filepath/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_relative_filepath/test.py (100%) rename {dbms/tests => tests}/integration/test_reload_max_table_size_to_drop/__init__.py (100%) rename {dbms/tests => tests}/integration/test_reload_max_table_size_to_drop/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_reload_max_table_size_to_drop/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_reload_max_table_size_to_drop/test.py (100%) rename {dbms/tests => tests}/integration/test_reloading_storage_configuration/__init__.py (100%) rename {dbms/tests => tests}/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml (100%) rename {dbms/tests => tests}/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml (100%) rename {dbms/tests => tests}/integration/test_reloading_storage_configuration/configs/logs_config.xml (100%) rename {dbms/tests => tests}/integration/test_reloading_storage_configuration/test.py (100%) rename {dbms/tests => tests}/integration/test_remote_prewhere/__init__.py (100%) rename {dbms/tests => tests}/integration/test_remote_prewhere/configs/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_remote_prewhere/test.py (100%) rename {dbms/tests => tests}/integration/test_replace_partition/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replace_partition/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_replace_partition/test.py (100%) rename {dbms/tests => tests}/integration/test_replica_can_become_leader/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replica_can_become_leader/configs/notleader.xml (100%) rename {dbms/tests => tests}/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml (100%) rename {dbms/tests => tests}/integration/test_replica_can_become_leader/test.py (100%) rename {dbms/tests => tests}/integration/test_replicated_mutations/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replicated_mutations/configs/merge_tree.xml (100%) rename {dbms/tests => tests}/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml (100%) rename {dbms/tests => tests}/integration/test_replicated_mutations/test.py (100%) rename {dbms/tests => tests}/integration/test_replicating_constants/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replicating_constants/test.py (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/configs/credentials1.xml (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/configs/credentials2.xml (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/configs/no_credentials.xml (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_replication_credentials/test.py (100%) rename {dbms/tests => tests}/integration/test_replication_without_zookeeper/__init__.py (100%) rename {dbms/tests => tests}/integration/test_replication_without_zookeeper/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_replication_without_zookeeper/test.py (100%) rename {dbms/tests => tests}/integration/test_row_policy/__init__.py (100%) rename {dbms/tests => tests}/integration/test_row_policy/all_rows.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/configs/config.d/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/configs/users.d/row_policy.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/multiple_tags_with_table_names.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/no_filters.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/no_rows.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/normal_filters.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/tag_with_table_name.xml (100%) rename {dbms/tests => tests}/integration/test_row_policy/test.py (100%) rename {dbms/tests => tests}/integration/test_send_request_to_leader_replica/__init__.py (100%) rename {dbms/tests => tests}/integration/test_send_request_to_leader_replica/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml (100%) rename {dbms/tests => tests}/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml (100%) rename {dbms/tests => tests}/integration/test_send_request_to_leader_replica/test.py (100%) rename {dbms/tests => tests}/integration/test_server_initialization/__init__.py (100%) rename {dbms/tests => tests}/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV (100%) rename {dbms/tests => tests}/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop (100%) rename {dbms/tests => tests}/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop (100%) rename {dbms/tests => tests}/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql (100%) rename {dbms/tests => tests}/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql (100%) rename {dbms/tests => tests}/integration/test_server_initialization/test.py (100%) rename {dbms/tests => tests}/integration/test_settings_constraints/__init__.py (100%) rename {dbms/tests => tests}/integration/test_settings_constraints/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_settings_constraints/test.py (100%) rename {dbms/tests => tests}/integration/test_settings_constraints_distributed/__init__.py (100%) rename {dbms/tests => tests}/integration/test_settings_constraints_distributed/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_settings_constraints_distributed/test.py (100%) rename {dbms/tests => tests}/integration/test_settings_profile/__init__.py (100%) rename {dbms/tests => tests}/integration/test_settings_profile/test.py (100%) rename {dbms/tests => tests}/integration/test_storage_hdfs/__init__.py (100%) rename {dbms/tests => tests}/integration/test_storage_hdfs/configs/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_storage_hdfs/test.py (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/__init__.py (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/configs/kafka.xml (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/configs/log_conf.xml (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/kafka_pb2.py (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/test.py (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/test_kafka_json.reference (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/test_kafka_virtual1.reference (100%) rename {dbms/tests => tests}/integration/test_storage_kafka/test_kafka_virtual2.reference (100%) rename {dbms/tests => tests}/integration/test_storage_mysql/__init__.py (100%) rename {dbms/tests => tests}/integration/test_storage_mysql/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_storage_mysql/test.py (100%) rename {dbms/tests => tests}/integration/test_storage_s3/__init__.py (100%) rename {dbms/tests => tests}/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml (100%) rename {dbms/tests => tests}/integration/test_storage_s3/test.py (100%) rename {dbms/tests => tests}/integration/test_system_merges/__init__.py (100%) rename {dbms/tests => tests}/integration/test_system_merges/configs/config.d/cluster.xml (100%) rename {dbms/tests => tests}/integration/test_system_merges/configs/logs_config.xml (100%) rename {dbms/tests => tests}/integration/test_system_merges/test.py (100%) rename {dbms/tests => tests}/integration/test_system_queries/__init__.py (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/config.d/clusters_config.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/config.d/dictionaries_config.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/config.d/query_log.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_system_queries/test.py (100%) rename {dbms/tests => tests}/integration/test_text_log_level/__init__.py (100%) rename {dbms/tests => tests}/integration/test_text_log_level/configs/config.d/text_log.xml (100%) rename {dbms/tests => tests}/integration/test_text_log_level/test.py (100%) rename {dbms/tests => tests}/integration/test_timezone_config/__init__.py (100%) rename {dbms/tests => tests}/integration/test_timezone_config/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_timezone_config/test.py (100%) rename {dbms/tests => tests}/integration/test_tmp_policy/__init__.py (100%) rename {dbms/tests => tests}/integration/test_tmp_policy/configs/config.d/storage_configuration.xml (100%) rename {dbms/tests => tests}/integration/test_tmp_policy/test.py (100%) rename {dbms/tests => tests}/integration/test_ttl_move/__init__.py (100%) rename {dbms/tests => tests}/integration/test_ttl_move/configs/config.d/cluster.xml (100%) rename {dbms/tests => tests}/integration/test_ttl_move/configs/config.d/instant_moves.xml (100%) rename {dbms/tests => tests}/integration/test_ttl_move/configs/config.d/storage_configuration.xml (100%) rename {dbms/tests => tests}/integration/test_ttl_move/configs/logs_config.xml (100%) rename {dbms/tests => tests}/integration/test_ttl_move/test.py (100%) rename {dbms/tests => tests}/integration/test_ttl_replicated/__init__.py (100%) rename {dbms/tests => tests}/integration/test_ttl_replicated/test.py (100%) rename {dbms/tests => tests}/integration/test_union_header/__init__.py (100%) rename {dbms/tests => tests}/integration/test_union_header/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_union_header/test.py (100%) rename {dbms/tests => tests}/integration/test_user_ip_restrictions/__init__.py (100%) rename {dbms/tests => tests}/integration/test_user_ip_restrictions/configs/config_ipv6.xml (100%) rename {dbms/tests => tests}/integration/test_user_ip_restrictions/configs/users_ipv4.xml (100%) rename {dbms/tests => tests}/integration/test_user_ip_restrictions/configs/users_ipv6.xml (100%) rename {dbms/tests => tests}/integration/test_user_ip_restrictions/test.py (100%) rename {dbms/tests => tests}/integration/test_user_zero_database_access/__init__.py (100%) rename {dbms/tests => tests}/integration/test_user_zero_database_access/configs/config.xml (100%) rename {dbms/tests => tests}/integration/test_user_zero_database_access/configs/users.xml (100%) rename {dbms/tests => tests}/integration/test_user_zero_database_access/test_user_zero_database_access.py (100%) rename {dbms/tests => tests}/integration/test_version_update_after_mutation/__init__.py (100%) rename {dbms/tests => tests}/integration/test_version_update_after_mutation/test.py (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/__init__.py (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/configs/remote_servers.xml (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml (100%) rename {dbms/tests => tests}/integration/test_zookeeper_config/test.py (100%) rename {dbms/tests => tests}/ints_dictionary.xml (100%) rename {dbms/tests => tests}/msan_suppressions.txt (100%) rename {dbms/tests => tests}/perf_drafts/accurate_comparisons/accurate_comparisons.sh (100%) rename {dbms/tests => tests}/perf_drafts/vert_merge/add_id_to_csv (100%) rename {dbms/tests => tests}/perf_drafts/vert_merge/ontime.struct (100%) rename {dbms/tests => tests}/perf_drafts/vert_merge/test_merges (100%) rename {dbms/tests => tests}/perf_drafts/vert_merge/wait_clickhouse_server (100%) rename {dbms/tests => tests}/performance/IPv4.xml (100%) rename {dbms/tests => tests}/performance/IPv6.xml (100%) rename {dbms/tests => tests}/performance/README.md (100%) rename {dbms/tests => tests}/performance/agg_functions_min_max_any.xml (100%) rename {dbms/tests => tests}/performance/analyze_array_tuples.xml (100%) rename {dbms/tests => tests}/performance/and_function.xml (100%) rename {dbms/tests => tests}/performance/arithmetic.xml (100%) rename {dbms/tests => tests}/performance/array_auc.xml (100%) rename {dbms/tests => tests}/performance/array_element.xml (100%) rename {dbms/tests => tests}/performance/array_fill.xml (100%) rename {dbms/tests => tests}/performance/array_join.xml (100%) rename {dbms/tests => tests}/performance/array_reduce.xml (100%) rename {dbms/tests => tests}/performance/base64.xml (100%) rename {dbms/tests => tests}/performance/base64_hits.xml (100%) rename {dbms/tests => tests}/performance/basename.xml (100%) rename {dbms/tests => tests}/performance/bitCount.xml (100%) rename {dbms/tests => tests}/performance/bit_operations_fixed_string.xml (100%) rename {dbms/tests => tests}/performance/bit_operations_fixed_string_numbers.xml (100%) rename {dbms/tests => tests}/performance/bloom_filter.xml (100%) rename {dbms/tests => tests}/performance/bounding_ratio.xml (100%) rename {dbms/tests => tests}/performance/cidr.xml (100%) rename {dbms/tests => tests}/performance/codecs_float_insert.xml (100%) rename {dbms/tests => tests}/performance/codecs_float_select.xml (100%) rename {dbms/tests => tests}/performance/codecs_int_insert.xml (100%) rename {dbms/tests => tests}/performance/codecs_int_select.xml (100%) rename {dbms/tests => tests}/performance/collations.xml (100%) rename {dbms/tests => tests}/performance/column_column_comparison.xml (100%) rename {dbms/tests => tests}/performance/columns_hashing.xml (100%) rename {dbms/tests => tests}/performance/complex_array_creation.xml (100%) rename {dbms/tests => tests}/performance/concat_hits.xml (100%) rename {dbms/tests => tests}/performance/conditional.xml (100%) rename {dbms/tests => tests}/performance/consistent_hashes.xml (100%) rename {dbms/tests => tests}/performance/constant_column_comparison.xml (100%) rename {dbms/tests => tests}/performance/constant_column_search.xml (100%) rename {dbms/tests => tests}/performance/count.xml (100%) rename {dbms/tests => tests}/performance/cpu_synthetic.xml (100%) rename {dbms/tests => tests}/performance/create_benchmark_page.py (100%) rename {dbms/tests => tests}/performance/cryptographic_hashes.xml (100%) rename {dbms/tests => tests}/performance/date_parsing.xml (100%) rename {dbms/tests => tests}/performance/date_time.xml (100%) rename {dbms/tests => tests}/performance/date_time_64.xml (100%) rename {dbms/tests => tests}/performance/decimal_aggregates.xml (100%) rename {dbms/tests => tests}/performance/early_constant_folding.xml (100%) rename {dbms/tests => tests}/performance/empty_string_deserialization.xml (100%) rename {dbms/tests => tests}/performance/empty_string_serialization.xml (100%) rename {dbms/tests => tests}/performance/entropy.xml (100%) rename {dbms/tests => tests}/performance/first_significant_subdomain.xml (100%) rename {dbms/tests => tests}/performance/fixed_string16.xml (100%) rename {dbms/tests => tests}/performance/float_formatting.xml (100%) rename {dbms/tests => tests}/performance/float_parsing.xml (100%) rename {dbms/tests => tests}/performance/format_date_time.xml (100%) rename {dbms/tests => tests}/performance/functions_coding.xml (100%) rename {dbms/tests => tests}/performance/functions_geo.xml (100%) rename {dbms/tests => tests}/performance/general_purpose_hashes.xml (100%) rename {dbms/tests => tests}/performance/general_purpose_hashes_on_UUID.xml (100%) rename {dbms/tests => tests}/performance/generate_table_function.xml (100%) rename {dbms/tests => tests}/performance/great_circle_dist.xml (100%) rename {dbms/tests => tests}/performance/group_array_moving_sum.xml (100%) rename {dbms/tests => tests}/performance/h3.xml (100%) rename {dbms/tests => tests}/performance/if_array_num.xml (100%) rename {dbms/tests => tests}/performance/if_array_string.xml (100%) rename {dbms/tests => tests}/performance/if_string_const.xml (100%) rename {dbms/tests => tests}/performance/if_string_hits.xml (100%) rename {dbms/tests => tests}/performance/if_to_multiif.xml (100%) rename {dbms/tests => tests}/performance/information_value.xml (100%) rename {dbms/tests => tests}/performance/insert_values_with_expressions.xml (100%) rename {dbms/tests => tests}/performance/inserts_arrays_lowcardinality.xml (100%) rename {dbms/tests => tests}/performance/int_parsing.xml (100%) rename {dbms/tests => tests}/performance/jit_large_requests.xml (100%) rename {dbms/tests => tests}/performance/jit_small_requests.xml (100%) rename {dbms/tests => tests}/performance/joins_in_memory.xml (100%) rename {dbms/tests => tests}/performance/joins_in_memory_pmj.xml (100%) rename {dbms/tests => tests}/performance/json_extract_rapidjson.xml (100%) rename {dbms/tests => tests}/performance/json_extract_simdjson.xml (100%) rename {dbms/tests => tests}/performance/leftpad.xml (100%) rename {dbms/tests => tests}/performance/linear_regression.xml (100%) rename {dbms/tests => tests}/performance/logical_functions_large.xml (100%) rename {dbms/tests => tests}/performance/logical_functions_medium.xml (100%) rename {dbms/tests => tests}/performance/logical_functions_small.xml (100%) rename {dbms/tests => tests}/performance/math.xml (100%) rename {dbms/tests => tests}/performance/merge_table_streams.xml (100%) rename {dbms/tests => tests}/performance/merge_tree_huge_pk.xml (100%) rename {dbms/tests => tests}/performance/merge_tree_many_partitions.xml (100%) rename {dbms/tests => tests}/performance/merge_tree_many_partitions_2.xml (100%) rename {dbms/tests => tests}/performance/merge_tree_simple_select.xml (100%) rename {dbms/tests => tests}/performance/mingroupby-orderbylimit1.xml (100%) rename {dbms/tests => tests}/performance/modulo.xml (100%) rename {dbms/tests => tests}/performance/ngram_distance.xml (100%) rename {dbms/tests => tests}/performance/number_formatting_formats.xml (100%) rename {dbms/tests => tests}/performance/nyc_taxi.xml (100%) rename {dbms/tests => tests}/performance/order_by_decimals.xml (100%) rename {dbms/tests => tests}/performance/order_by_read_in_order.xml (100%) rename {dbms/tests => tests}/performance/order_by_single_column.xml (100%) rename {dbms/tests => tests}/performance/parallel_insert.xml (100%) rename {dbms/tests => tests}/performance/parse_engine_file.xml (100%) rename {dbms/tests => tests}/performance/pre_limit_no_sorting.xml (100%) rename {dbms/tests => tests}/performance/prewhere.xml (100%) rename {dbms/tests => tests}/performance/random_printable_ascii.xml (100%) rename {dbms/tests => tests}/performance/range.xml (100%) rename {dbms/tests => tests}/performance/read_hits_with_aio.xml (100%) rename {dbms/tests => tests}/performance/right.xml (100%) rename {dbms/tests => tests}/performance/round_down.xml (100%) rename {dbms/tests => tests}/performance/round_methods.xml (100%) rename {dbms/tests => tests}/performance/scalar.xml (100%) rename {dbms/tests => tests}/performance/select_format.xml (100%) rename {dbms/tests => tests}/performance/set.xml (100%) rename {dbms/tests => tests}/performance/set_hits.xml (100%) rename {dbms/tests => tests}/performance/set_index.xml (100%) rename {dbms/tests => tests}/performance/simple_join_query.xml (100%) rename {dbms/tests => tests}/performance/slices_hits.xml (100%) rename {dbms/tests => tests}/performance/sort.xml (100%) rename {dbms/tests => tests}/performance/string_join.xml (100%) rename {dbms/tests => tests}/performance/string_set.xml (100%) rename {dbms/tests => tests}/performance/string_sort.xml (100%) rename {dbms/tests => tests}/performance/sum_map.xml (100%) rename {dbms/tests => tests}/performance/synthetic_hardware_benchmark.xml (100%) rename {dbms/tests => tests}/performance/trim_numbers.xml (100%) rename {dbms/tests => tests}/performance/trim_urls.xml (100%) rename {dbms/tests => tests}/performance/trim_whitespace.xml (100%) rename {dbms/tests => tests}/performance/uniq.xml (100%) rename {dbms/tests => tests}/performance/url_hits.xml (100%) rename {dbms/tests => tests}/performance/vectorize_aggregation_combinators.xml (100%) rename {dbms/tests => tests}/performance/visit_param_extract_raw.xml (100%) rename {dbms/tests => tests}/performance/website.xml (100%) rename {dbms/tests => tests}/queries/.gitignore (100%) rename {dbms/tests => tests}/queries/0_stateless/00001_select_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00001_select_1.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00002_system_numbers.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00002_system_numbers.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00003_reinterpret_as_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00003_reinterpret_as_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00006_extremes_and_subquery_from.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00006_extremes_and_subquery_from.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00007_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00007_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00008_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00008_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00009_array_join_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00009_array_join_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00010_big_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00010_big_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00011_array_join_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00011_array_join_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00012_array_join_alias_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00012_array_join_alias_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00013_create_table_with_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00013_create_table_with_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00014_select_from_table_with_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00014_select_from_table_with_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00015_totals_having_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00015_totals_having_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00016_totals_having_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00016_totals_having_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00017_in_subquery_with_empty_result.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00017_in_subquery_with_empty_result.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00018_distinct_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00018_distinct_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00020_sorting_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00020_sorting_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00021_sorting_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00021_sorting_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00022_func_higher_order_and_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00022_func_higher_order_and_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00023_agg_select_agg_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00023_agg_select_agg_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00024_unused_array_join_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00024_unused_array_join_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00025_implicitly_used_subquery_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00025_implicitly_used_subquery_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00026_shard_something_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00026_shard_something_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00027_distinct_and_order_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00027_distinct_and_order_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00027_simple_argMinArray.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00027_simple_argMinArray.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00030_alter_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00030_alter_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00031_parser_number.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00031_parser_number.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00032_fixed_string_to_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00032_fixed_string_to_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00033_fixed_string_to_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00033_fixed_string_to_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00034_fixed_string_to_number.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00034_fixed_string_to_number.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00035_function_array_return_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00035_function_array_return_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00036_array_element.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00036_array_element.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00037_totals_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00037_totals_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00038_totals_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00038_totals_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00039_inserts_through_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00039_inserts_through_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00040_array_enumerate_uniq.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00040_array_enumerate_uniq.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00041_aggregation_remap.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00041_aggregation_remap.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00041_big_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00041_big_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00042_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00042_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00043_summing_empty_part.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00043_summing_empty_part.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00044_sorting_by_string_descending.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00044_sorting_by_string_descending.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00046_stored_aggregates_simple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00046_stored_aggregates_simple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00047_stored_aggregates_complex.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00047_stored_aggregates_complex.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00048_a_stored_aggregates_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00048_a_stored_aggregates_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00048_b_stored_aggregates_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00048_b_stored_aggregates_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00049_any_left_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00049_any_left_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00050_any_left_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00050_any_left_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00051_any_inner_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00051_any_inner_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00052_all_left_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00052_all_left_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00053_all_inner_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00053_all_inner_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00054_join_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00054_join_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00055_join_two_numbers.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00055_join_two_numbers.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00056_join_number_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00056_join_number_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00057_join_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00057_join_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00059_shard_global_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00059_shard_global_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00060_date_lut.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00060_date_lut.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00061_merge_tree_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00061_merge_tree_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00063_check_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00063_check_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00064_negate_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00064_negate_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00065_shard_float_literals_formatting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00065_shard_float_literals_formatting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00066_group_by_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00066_group_by_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00067_replicate_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00067_replicate_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00068_empty_tiny_log.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00068_empty_tiny_log.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00069_date_arithmetic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00069_date_arithmetic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00070_insert_fewer_columns_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00070_insert_fewer_columns_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00071_insert_fewer_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00071_insert_fewer_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00072_in_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00072_in_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00076_ip_coding_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00076_ip_coding_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00078_string_concat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00078_string_concat.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00079_defaulted_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00079_defaulted_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00080_show_tables_and_system_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00080_show_tables_and_system_tables.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00081_int_div_or_zero.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00081_int_div_or_zero.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00082_append_trailing_char_if_absent.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00082_append_trailing_char_if_absent.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00083_create_merge_tree_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00083_create_merge_tree_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00084_summing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00084_summing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00087_distinct_of_empty_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00087_distinct_of_empty_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00087_math_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00087_math_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00089_group_by_arrays_of_fixed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00089_group_by_arrays_of_fixed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00090_union_race_conditions_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00090_union_race_conditions_1.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00091_union_race_conditions_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00091_union_race_conditions_2.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00092_union_race_conditions_3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00092_union_race_conditions_3.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00093_union_race_conditions_4.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00093_union_race_conditions_4.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00094_union_race_conditions_5.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00094_union_race_conditions_5.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00096_aggregation_min_if.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00096_aggregation_min_if.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00097_long_storage_buffer_race_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00097_long_storage_buffer_race_condition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_1_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_1_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_2_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_2_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_3_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_3_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_4_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_4_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_5_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_5_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_6_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_6_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_7_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_7_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_8_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_8_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_9_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_9_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_a_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_a_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_b_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_b_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_c_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_c_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_d_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_d_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_e_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_e_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_f_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_f_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_g_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_g_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_h_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_h_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_j_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_j_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_k_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_k_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_l_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_l_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_shard_i_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00098_shard_i_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00099_join_many_blocks_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00099_join_many_blocks_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00100_subquery_table_identifier.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00100_subquery_table_identifier.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00102_insert_into_temporary_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00102_insert_into_temporary_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00104_totals_having_mode.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00104_totals_having_mode.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00105_shard_collations.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00105_shard_collations.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00106_totals_after_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00106_totals_after_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00107_totals_after_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00107_totals_after_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00108_shard_totals_after_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00108_shard_totals_after_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00109_shard_totals_after_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00109_shard_totals_after_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00110_external_sort.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00110_external_sort.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00111_shard_external_sort_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00111_shard_external_sort_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00112_shard_totals_after_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00112_shard_totals_after_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00113_shard_group_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00113_shard_group_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00114_float_type_result_of_division.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00114_float_type_result_of_division.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00115_shard_in_incomplete_result.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00115_shard_in_incomplete_result.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00116_storage_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00116_storage_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00117_parsing_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00117_parsing_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00118_storage_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00118_storage_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00119_storage_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00119_storage_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00120_join_and_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00120_join_and_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00121_drop_column_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00121_drop_column_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00122_join_with_subquery_with_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00122_join_with_subquery_with_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00125_array_element_of_array_of_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00125_array_element_of_array_of_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00126_buffer.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00126_buffer.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00127_group_by_concat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00127_group_by_concat.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00128_group_by_number_and_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00128_group_by_number_and_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00129_quantile_timing_weighted.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00129_quantile_timing_weighted.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00131_set_hashed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00131_set_hashed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00132_sets.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00132_sets.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00136_duplicate_order_by_elems.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00136_duplicate_order_by_elems.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00137_in_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00137_in_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00138_table_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00138_table_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00140_prewhere_column_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00140_prewhere_column_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00141_parse_timestamp_as_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00141_parse_timestamp_as_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00142_parse_timestamp_as_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00142_parse_timestamp_as_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00143_number_classification_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00143_number_classification_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00144_empty_regexp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00144_empty_regexp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00145_empty_likes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00145_empty_likes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00146_summing_merge_tree_nested_map.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00146_summing_merge_tree_nested_map.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00147_alter_nested_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00147_alter_nested_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00149_function_url_hash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00149_function_url_hash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00150_with_totals_and_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00150_with_totals_and_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00151_tuple_with_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00151_tuple_with_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00152_totals_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00152_totals_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00153_transform.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00153_transform.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00154_shard_distributed_with_distinct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00154_shard_distributed_with_distinct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00155_long_merges.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00155_long_merges.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00156_array_map_to_constant.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00156_array_map_to_constant.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00158_buffer_and_nonexistent_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00158_buffer_and_nonexistent_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00159_whitespace_in_columns_list.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00159_whitespace_in_columns_list.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00160_merge_and_index_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00160_merge_and_index_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00161_rounding_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00161_rounding_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00162_shard_global_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00162_shard_global_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00163_shard_join_with_empty_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00163_shard_join_with_empty_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00164_not_chain.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00164_not_chain.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00165_transform_non_const_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00165_transform_non_const_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00166_functions_of_aggregation_states.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00166_functions_of_aggregation_states.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00167_settings_inside_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00167_settings_inside_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00168_buffer_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00168_buffer_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00169_join_constant_keys.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00169_join_constant_keys.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00170_lower_upper_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00170_lower_upper_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00171_shard_array_of_tuple_remote.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00171_shard_array_of_tuple_remote.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00172_constexprs_in_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00172_constexprs_in_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00173_compare_date_time_with_constant_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00173_compare_date_time_with_constant_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00175_if_num_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00175_if_num_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00176_if_string_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00176_if_string_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00177_inserts_through_http_parts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00177_inserts_through_http_parts.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00178_function_replicate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00178_function_replicate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00180_attach_materialized_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00180_attach_materialized_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00181_aggregate_functions_statistics.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00181_aggregate_functions_statistics.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00182_functions_higher_order_and_consts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00182_functions_higher_order_and_consts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00183_skip_unavailable_shards.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00183_skip_unavailable_shards.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00185_array_literals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00185_array_literals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00186_very_long_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00186_very_long_arrays.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00187_like_regexp_prefix.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00187_like_regexp_prefix.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00189_time_zones.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00189_time_zones.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00190_non_constant_array_of_constant_data.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00190_non_constant_array_of_constant_data.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00192_least_greatest.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00192_least_greatest.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00193_parallel_replicas.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00193_parallel_replicas.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00194_identity.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00194_identity.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00195_shard_union_all_and_global_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00195_shard_union_all_and_global_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00196_float32_formatting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00196_float32_formatting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00197_if_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00197_if_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00198_group_by_empty_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00198_group_by_empty_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00199_ternary_operator_type_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00199_ternary_operator_type_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00201_array_uniq.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00201_array_uniq.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00202_cross_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00202_cross_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00203_full_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00203_full_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00204_extract_url_parameter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00204_extract_url_parameter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00205_scalar_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00205_scalar_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00206_empty_array_to_single.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00206_empty_array_to_single.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00207_left_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00207_left_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00208_agg_state_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00208_agg_state_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00209_insert_select_extremes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00209_insert_select_extremes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00210_insert_select_extremes_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00210_insert_select_extremes_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00211_shard_query_formatting_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00211_shard_query_formatting_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00212_shard_aggregate_function_uniq.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00212_shard_aggregate_function_uniq.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00213_multiple_global_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00213_multiple_global_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00214_primary_key_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00214_primary_key_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00215_primary_key_order_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00215_primary_key_order_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00216_bit_test_function_family.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00216_bit_test_function_family.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00218_like_regexp_newline.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00218_like_regexp_newline.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00219_full_right_join_column_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00219_full_right_join_column_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00222_sequence_aggregate_function_family.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00222_sequence_aggregate_function_family.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00225_join_duplicate_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00225_join_duplicate_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00229_prewhere_column_missing.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00229_prewhere_column_missing.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00231_format_vertical_raw.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00231_format_vertical_raw.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00232_format_readable_size.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00232_format_readable_size.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00233_position_function_family.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00233_position_function_family.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00235_create_temporary_table_as.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00235_create_temporary_table_as.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00237_group_by_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00237_group_by_arrays.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00238_removal_of_temporary_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00238_removal_of_temporary_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00239_type_conversion_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00239_type_conversion_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00240_replace_substring_loop.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00240_replace_substring_loop.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00250_tuple_comparison.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00250_tuple_comparison.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00251_has_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00251_has_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00252_shard_global_in_aggregate_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00252_shard_global_in_aggregate_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00253_insert_recursive_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00253_insert_recursive_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00254_tuple_extremes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00254_tuple_extremes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00255_array_concat_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00255_array_concat_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00256_reverse.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00256_reverse.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00258_materializing_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00258_materializing_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00259_hashing_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00259_hashing_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00260_like_and_curly_braces.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00260_like_and_curly_braces.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00261_storage_aliases_and_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00261_storage_aliases_and_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00262_alter_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00262_alter_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00263_merge_aggregates_and_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00263_merge_aggregates_and_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00264_uniq_many_args.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00264_uniq_many_args.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00265_http_content_type_format_timezone.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00265_http_content_type_format_timezone.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00266_read_overflow_mode.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00266_read_overflow_mode.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00267_tuple_array_access_operators_priority.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00267_tuple_array_access_operators_priority.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00268_aliases_without_as_keyword.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00268_aliases_without_as_keyword.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00269_database_table_whitespace.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00269_database_table_whitespace.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00270_views_query_processing_stage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00270_views_query_processing_stage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00271_agg_state_and_totals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00271_agg_state_and_totals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00272_union_all_and_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00272_union_all_and_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00273_quantiles.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00273_quantiles.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00274_shard_group_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00274_shard_group_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00275_shard_quantiles_weighted.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00275_shard_quantiles_weighted.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00276_sample.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00276_sample.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00277_array_filter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00277_array_filter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00278_insert_already_sorted.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00278_insert_already_sorted.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00279_quantiles_permuted_args.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00279_quantiles_permuted_args.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00280_hex_escape_sequence.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00280_hex_escape_sequence.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00281_compile_sizeof_packed.re (100%) rename {dbms/tests => tests}/queries/0_stateless/00282_merging.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00282_merging.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00283_column_cut.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00283_column_cut.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00284_external_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00284_external_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00285_not_all_data_in_totals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00285_not_all_data_in_totals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00286_format_long_negative_float.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00286_format_long_negative_float.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00287_column_const_with_nan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00287_column_const_with_nan.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00288_empty_stripelog.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00288_empty_stripelog.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00291_array_reduce.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00291_array_reduce.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00292_parser_tuple_element.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00292_parser_tuple_element.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00293_shard_max_subquery_depth.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00293_shard_max_subquery_depth.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00294_shard_enums.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00294_shard_enums.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00296_url_parameters.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00296_url_parameters.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00298_enum_width_and_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00298_enum_width_and_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00299_stripe_log_multiple_inserts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00299_stripe_log_multiple_inserts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00300_csv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00300_csv.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00301_csv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00301_csv.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00302_http_compression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00302_http_compression.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00304_http_external_data.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00304_http_external_data.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00305_http_and_readonly.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00305_http_and_readonly.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00306_insert_values_and_expressions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00306_insert_values_and_expressions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00307_format_xml.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00307_format_xml.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00308_write_buffer_valid_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00308_write_buffer_valid_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00309_formats.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00309_formats.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00310_tskv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00310_tskv.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00311_array_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00311_array_primary_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00312_position_case_insensitive_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00312_position_case_insensitive_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00313_const_totals_extremes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00313_const_totals_extremes.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00314_sample_factor_virtual_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00314_sample_factor_virtual_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00315_quantile_off_by_one.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00315_quantile_off_by_one.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00316_rounding_functions_and_empty_block.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00316_rounding_functions_and_empty_block.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00318_pk_tuple_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00318_pk_tuple_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00319_index_for_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00319_index_for_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00320_between.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00320_between.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00321_pk_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00321_pk_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00322_disable_checksumming.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00322_disable_checksumming.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00323_quantiles_timing_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00323_quantiles_timing_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00324_hashing_enums.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00324_hashing_enums.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00325_replacing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00325_replacing_merge_tree.sql.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00326_long_function_multi_if.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00326_long_function_multi_if.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00327_summing_composite_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00327_summing_composite_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00328_long_case_construction.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00328_long_case_construction.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00330_view_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00330_view_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00331_final_and_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00331_final_and_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00332_quantile_timing_memory_leak.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00332_quantile_timing_memory_leak.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00333_parser_number_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00333_parser_number_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00334_column_aggregate_function_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00334_column_aggregate_function_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00335_bom.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00335_bom.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00336_shard_stack_trace.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00336_shard_stack_trace.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00337_shard_any_heavy.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00337_shard_any_heavy.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00338_replicate_array_of_strings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00338_replicate_array_of_strings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00339_parsing_bad_arrays.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00339_parsing_bad_arrays.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00340_squashing_insert_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00340_squashing_insert_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00341_squashing_insert_select2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00341_squashing_insert_select2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00342_escape_sequences.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00342_escape_sequences.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00343_array_element_generic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00343_array_element_generic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00344_row_number_in_all_blocks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00344_row_number_in_all_blocks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00345_index_accurate_comparison.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00345_index_accurate_comparison.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00346_if_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00346_if_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00347_has_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00347_has_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00348_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00348_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00349_visible_width.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00349_visible_width.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00350_count_distinct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00350_count_distinct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00351_select_distinct_arrays_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00351_select_distinct_arrays_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00352_external_sorting_and_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00352_external_sorting_and_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00353_join_by_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00353_join_by_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00354_host_command_line_option.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00354_host_command_line_option.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00355_array_of_non_const_convertible_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00355_array_of_non_const_convertible_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00357_to_string_complex_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00357_to_string_complex_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00358_from_string_complex_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00358_from_string_complex_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00359_convert_or_zero_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00359_convert_or_zero_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00360_to_date_from_string_with_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00360_to_date_from_string_with_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00362_great_circle_distance.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00362_great_circle_distance.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00363_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00363_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00364_java_style_denormals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00364_java_style_denormals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00365_statistics_in_formats.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00365_statistics_in_formats.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00366_multi_statements.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00366_multi_statements.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00368_format_option_collision.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00368_format_option_collision.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00369_int_div_of_float.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00369_int_div_of_float.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00371_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00371_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00372_cors_header.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00372_cors_header.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00373_group_by_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00373_group_by_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00374_any_last_if_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00374_any_last_if_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00378_json_quote_64bit_integers.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00378_json_quote_64bit_integers.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00379_system_processes_port.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00379_system_processes_port.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00381_first_significant_subdomain.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00381_first_significant_subdomain.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00383_utf8_validation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00383_utf8_validation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00384_column_aggregate_function_insert_from.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00384_column_aggregate_function_insert_from.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_enum_in_pk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_enum_in_pk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_has_column_in_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_has_column_in_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_long_in_pk.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_long_in_pk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00386_long_in_pk.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00387_use_client_time_zone.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00387_use_client_time_zone.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00388_enum_with_totals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00388_enum_with_totals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00389_concat_operator.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00389_concat_operator.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00390_array_sort.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00390_array_sort.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00392_enum_nested_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00392_enum_nested_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00393_if_with_constant_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00393_if_with_constant_condition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00394_replaceall_vector_fixed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00394_replaceall_vector_fixed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00395_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00395_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00396_uuid.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00396_uuid.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00397_tsv_format_synonym.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00397_tsv_format_synonym.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00398_url_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00398_url_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00399_group_uniq_array_date_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00399_group_uniq_array_date_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00400_client_external_options.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00400_client_external_options.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00401_merge_and_stripelog.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00401_merge_and_stripelog.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00402_nan_and_extremes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00402_nan_and_extremes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00403_to_start_of_day.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00403_to_start_of_day.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00404_null_literal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00404_null_literal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00405_pretty_formats.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00405_pretty_formats.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00406_tuples_with_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00406_tuples_with_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00407_parsing_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00407_parsing_nulls.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00408_http_keep_alive.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00408_http_keep_alive.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00409_shard_limit_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00409_shard_limit_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_float.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_float.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_merge_tree_where_const_in_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00411_merge_tree_where_const_in_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00412_logical_expressions_optimizer.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00412_logical_expressions_optimizer.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00413_distinct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00413_distinct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00413_least_greatest_new_behavior.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00413_least_greatest_new_behavior.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00414_time_zones_direct_conversion.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00414_time_zones_direct_conversion.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00415_into_outfile.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00415_into_outfile.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00417_kill_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00417_kill_query.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00417_system_build_options.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00417_system_build_options.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00418_input_format_allow_errors.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00418_input_format_allow_errors.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00419_show_sql_queries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00419_show_sql_queries.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00420_null_in_scalar_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00420_null_in_scalar_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00421_storage_merge__table_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00421_storage_merge__table_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00422_hash_function_constexpr.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00422_hash_function_constexpr.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00423_storage_log_single_thread.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00423_storage_log_single_thread.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00425_count_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00425_count_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00426_nulls_sorting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00426_nulls_sorting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00427_alter_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00427_alter_primary_key.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00429_long_http_bufferization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00429_long_http_bufferization.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00429_point_in_ellipses.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00429_point_in_ellipses.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00430_https_server.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00430_https_server.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00431_if_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00431_if_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00433_ifnull.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00433_ifnull.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00434_tonullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00434_tonullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00435_coalesce.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00435_coalesce.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00436_convert_charset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00436_convert_charset.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00436_fixed_string_16_comparisons.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00436_fixed_string_16_comparisons.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00437_nulls_first_last.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00437_nulls_first_last.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00438_bit_rotate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00438_bit_rotate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00439_fixed_string_filter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00439_fixed_string_filter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00440_nulls_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00440_nulls_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00441_nulls_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00441_nulls_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00442_filter_by_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00442_filter_by_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_optimize_final_vertical_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_optimize_final_vertical_merge.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_preferred_block_size_bytes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00443_preferred_block_size_bytes.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00444_join_use_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00444_join_use_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00445_join_nullable_keys.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00445_join_nullable_keys.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00447_foreach_modifier.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00447_foreach_modifier.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00448_to_string_cut_to_zero.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00448_to_string_cut_to_zero.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00449_filter_array_nullable_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00449_filter_array_nullable_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00450_higher_order_and_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00450_higher_order_and_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00451_left_array_join_and_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00451_left_array_join_and_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00452_left_array_join_and_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00452_left_array_join_and_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00453_cast_enum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00453_cast_enum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00453_top_k.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00453_top_k.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00456_alter_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00456_alter_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00458_merge_type_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00458_merge_type_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00459_group_array_insert_at.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00459_group_array_insert_at.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00460_vertical_and_totals_extremes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00460_vertical_and_totals_extremes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00461_default_value_of_argument_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00461_default_value_of_argument_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00462_json_true_false_literals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00462_json_true_false_literals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00463_long_sessions_in_http_interface.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00463_long_sessions_in_http_interface.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00464_array_element_out_of_range.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00464_array_element_out_of_range.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00464_sort_all_constant_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00464_sort_all_constant_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00465_nullable_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00465_nullable_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00466_comments_in_keyword.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00466_comments_in_keyword.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00467_qualified_names.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00467_qualified_names.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00470_identifiers_in_double_quotes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00470_identifiers_in_double_quotes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00471_sql_style_quoting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00471_sql_style_quoting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00472_compare_uuid_with_constant_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00472_compare_uuid_with_constant_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00472_create_view_if_not_exists.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00472_create_view_if_not_exists.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00473_output_format_json_quote_denormals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00473_output_format_json_quote_denormals.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00474_readonly_settings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00474_readonly_settings.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00475_in_join_db_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00475_in_join_db_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00476_pretty_formats_and_widths.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00476_pretty_formats_and_widths.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00477_parsing_data_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00477_parsing_data_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00479_date_and_datetime_to_number.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00479_date_and_datetime_to_number.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00480_mac_addresses.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00480_mac_addresses.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00481_create_view_for_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00481_create_view_for_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00481_reading_from_last_granula.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00481_reading_from_last_granula.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00482_subqueries_and_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00482_subqueries_and_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00483_cast_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00483_cast_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00483_reading_from_array_structure.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00483_reading_from_array_structure.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00485_http_insert_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00485_http_insert_format.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00486_if_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00486_if_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00487_if_array_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00487_if_array_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00488_non_ascii_column_names.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00488_non_ascii_column_names.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00489_pk_subexpression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00489_pk_subexpression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00490_with_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00490_with_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00492_drop_temporary_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00492_drop_temporary_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00493_substring_of_fixedstring.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00493_substring_of_fixedstring.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00494_shard_alias_substitution_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00494_shard_alias_substitution_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00495_reading_const_zero_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00495_reading_const_zero_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00496_substring_negative_offset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00496_substring_negative_offset.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00497_whitespaces_in_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00497_whitespaces_in_insert.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00498_bitwise_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00498_bitwise_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00499_json_enum_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00499_json_enum_insert.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00501_http_head.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00501_http_head.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_custom_partitioning_local.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_custom_partitioning_local.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_string_concat_with_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_string_concat_with_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_sum_map.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00502_sum_map.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00503_cast_const_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00503_cast_const_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00504_insert_miss_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00504_insert_miss_columns.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00504_mergetree_arrays_rw.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00504_mergetree_arrays_rw.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00505_distributed_secure.data (100%) rename {dbms/tests => tests}/queries/0_stateless/00505_secure.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00505_secure.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00505_shard_secure.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00505_shard_secure.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00506_shard_global_in_union.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00506_shard_global_in_union.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00506_union_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00506_union_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00507_array_no_params.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00507_array_no_params.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00507_sumwithoverflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00507_sumwithoverflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00508_materialized_view_to.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00508_materialized_view_to.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00511_get_size_of_enum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00511_get_size_of_enum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00512_fractional_time_zones.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00512_fractional_time_zones.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00513_fractional_time_zones.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00513_fractional_time_zones.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00514_interval_operators.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00514_interval_operators.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_enhanced_time_zones.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_enhanced_time_zones.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_gcd_lcm.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_gcd_lcm.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_is_inf_nan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_is_inf_nan.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_modulo.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00516_modulo.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00517_date_parsing.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00517_date_parsing.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00518_extract_all_and_empty_matches.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00518_extract_all_and_empty_matches.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00519_create_as_select_from_temporary_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00519_create_as_select_from_temporary_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00520_http_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00520_http_nullable.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00520_tuple_values_interpreter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00520_tuple_values_interpreter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00521_multidimensional.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00521_multidimensional.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00522_multidimensional.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00522_multidimensional.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00523_aggregate_functions_in_group_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00523_aggregate_functions_in_group_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00524_time_intervals_months_underflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00524_time_intervals_months_underflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00527_totals_having_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00527_totals_having_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00528_const_of_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00528_const_of_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00529_orantius.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00529_orantius.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00530_arrays_of_nothing.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00530_arrays_of_nothing.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00531_aggregate_over_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00531_aggregate_over_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00531_client_ignore_error.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00531_client_ignore_error.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00532_topk_generic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00532_topk_generic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00533_uniq_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00533_uniq_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_client_ignore_error.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_client_ignore_error.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_exp10.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_exp10.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_filimonov.data (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_filimonov.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_filimonov.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments.lib (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments1.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments10.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments10.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments11.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments11.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments12.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments12.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments13.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments13.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments2.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments3.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments4.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments4.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments5.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments5.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments6.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments6.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments7.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments7.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments8.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments9.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00534_functions_bad_arguments9.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00535_parse_float_scientific.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00535_parse_float_scientific.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00536_int_exp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00536_int_exp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00537_quarters.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00537_quarters.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00538_datediff.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00538_datediff.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00539_functions_for_working_with_json.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00539_functions_for_working_with_json.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00540_bad_data_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00540_bad_data_types.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00541_kahan_sum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00541_kahan_sum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00543_null_and_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00543_null_and_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00544_agg_foreach_of_two_arg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00544_agg_foreach_of_two_arg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00544_insert_with_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00544_insert_with_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00545_weird_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00545_weird_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00546_shard_tuple_element_formatting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00546_shard_tuple_element_formatting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00547_named_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00547_named_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00548_slice_of_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00548_slice_of_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00549_join_use_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00549_join_use_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00550_join_insert_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00550_join_insert_select.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00551_parse_or_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00551_parse_or_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_logical_functions_simple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_logical_functions_simple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_logical_functions_ternary.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_logical_functions_ternary.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_or_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00552_or_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00553_buff_exists_materlized_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00553_buff_exists_materlized_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00553_invalid_nested_name.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00553_invalid_nested_name.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00554_nested_and_table_engines.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00554_nested_and_table_engines.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00555_hasAll_hasAny.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00555_hasAll_hasAny.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00555_right_join_excessive_rows.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00555_right_join_excessive_rows.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00556_array_intersect.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00556_array_intersect.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00556_remove_columns_from_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00556_remove_columns_from_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_alter_null_storage_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_alter_null_storage_tables.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_array_resize.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_array_resize.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_remote_port.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00557_remote_port.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00558_parse_floats.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00558_parse_floats.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00559_filter_array_generic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00559_filter_array_generic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00560_float_leading_plus_in_exponent.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00560_float_leading_plus_in_exponent.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00561_storage_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00561_storage_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00562_in_subquery_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00562_in_subquery_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00562_rewrite_select_expression_with_union.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00562_rewrite_select_expression_with_union.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_complex_in_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_complex_in_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_shard_insert_into_remote.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00563_shard_insert_into_remote.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_enum_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_enum_order.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_initial_column_values_with_default_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_initial_column_values_with_default_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_temporary_table_management.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_temporary_table_management.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00565_enum_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00565_enum_order.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00566_enum_min_max.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00566_enum_min_max.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00568_empty_function_with_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00568_empty_function_with_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00569_parse_date_time_best_effort.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00569_parse_date_time_best_effort.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00570_empty_array_is_const.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00570_empty_array_is_const.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00571_alter_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00571_alter_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00572_aggregation_by_empty_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00572_aggregation_by_empty_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00574_empty_strings_deserialization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00574_empty_strings_deserialization.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00576_nested_and_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00576_nested_and_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00577_full_join_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00577_full_join_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_sampling.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_sampling.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_trees_without_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00578_merge_trees_without_primary_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00579_virtual_column_and_lazy.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00579_virtual_column_and_lazy.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00580_consistent_hashing_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00580_consistent_hashing_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00582_not_aliasing_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00582_not_aliasing_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00583_limit_by_expressions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00583_limit_by_expressions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00584_view_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00584_view_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00587_union_all_type_conversions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00587_union_all_type_conversions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00588_shard_distributed_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00588_shard_distributed_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00589_removal_unused_columns_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00589_removal_unused_columns_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00590_limit_by_column_removal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00590_limit_by_column_removal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00591_columns_removal_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00591_columns_removal_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00592_union_all_different_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00592_union_all_different_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00593_union_all_assert_columns_removed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00593_union_all_assert_columns_removed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00594_alias_in_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00594_alias_in_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00595_insert_into_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00595_insert_into_view.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00596_limit_on_expanded_ast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00596_limit_on_expanded_ast.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00597_push_down_predicate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00597_push_down_predicate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00597_with_totals_on_empty_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00597_with_totals_on_empty_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00598_create_as_select_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00598_create_as_select_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00599_create_view_with_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00599_create_view_with_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00600_replace_running_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00600_replace_running_query.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00601_kill_running_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00601_kill_running_query.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00602_throw_if.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00602_throw_if.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00603_system_parts_nonexistent_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00603_system_parts_nonexistent_database.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00604_show_create_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00604_show_create_database.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00605_intersections_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00605_intersections_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00606_quantiles_and_nans.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00606_quantiles_and_nans.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00607_index_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00607_index_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00608_uniq_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00608_uniq_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_distributed_with_case_when_then.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_distributed_with_case_when_then.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_mv_index_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_mv_index_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_prewhere_and_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00609_prewhere_and_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_count.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_count.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_http_max_query_size.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_http_max_query_size.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_pk_in_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_pk_in_tuple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_shard_count.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_shard_count.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_union_query_with_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00612_union_query_with_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00613_shard_distributed_max_execution_time.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00613_shard_distributed_max_execution_time.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00614_array_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00614_array_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00615_nullable_alter_optimize.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00615_nullable_alter_optimize.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00616_final_single_part.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00616_final_single_part.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00617_array_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00617_array_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00618_nullable_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00618_nullable_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00619_extract.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00619_extract.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00619_union_highlite.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00619_union_highlite.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00621_regression_for_in_operator.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00621_regression_for_in_operator.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00622_select_in_parens.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00622_select_in_parens.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_in_partition_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_in_partition_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_truncate_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_truncate_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_truncate_table_throw_exception.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00623_truncate_table_throw_exception.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00624_length_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00624_length_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_arrays_in_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_arrays_in_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_query_in_form_data.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_query_in_form_data.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_summing_merge_tree_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00625_summing_merge_tree_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_in_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_in_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_replace_partition_from_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_replace_partition_from_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00627_recursive_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00627_recursive_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00630_arbitrary_csv_delimiter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00630_arbitrary_csv_delimiter.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00632_aggregation_window_funnel.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00632_aggregation_window_funnel.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00632_get_sample_block_cache.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00632_get_sample_block_cache.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00633_func_or_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00633_func_or_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_logging_shard.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_logging_shard.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_performance_introspection_and_logging.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_performance_introspection_and_logging.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_rename_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00634_rename_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00635_shard_distinct_order_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00635_shard_distinct_order_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00636_partition_key_parts_pruning.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00636_partition_key_parts_pruning.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00638_remote_ssrf.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00638_remote_ssrf.sh.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00639_startsWith.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00639_startsWith.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00640_endsWith.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00640_endsWith.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00642_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00642_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00643_cast_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00643_cast_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00644_different_expressions_with_same_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00644_different_expressions_with_same_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00645_date_time_input_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00645_date_time_input_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00646_url_engine.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00646_url_engine.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00646_url_engine.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00646_weird_mmx.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00646_weird_mmx.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_histogram.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_histogram.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_multiply_aggregation_state.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_multiply_aggregation_state.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_select_numbers_with_offset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00647_select_numbers_with_offset.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00649_quantile_tdigest_negative.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00649_quantile_tdigest_negative.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00650_csv_with_specified_quote_rule.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00650_csv_with_specified_quote_rule.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00651_default_database_on_client_reconnect.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00651_default_database_on_client_reconnect.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mergetree_mutations.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mergetree_mutations.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mutations_alter_update.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mutations_alter_update.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mutations_default_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_mutations_default_database.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_replicated_mutations_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00652_replicated_mutations_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_monotonic_integer_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_monotonic_integer_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_running_difference.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_running_difference.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_verification_monotonic_data_load.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00653_verification_monotonic_data_load.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00660_optimize_final_without_partition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00660_optimize_final_without_partition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00661_array_has_silviucpp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00661_array_has_silviucpp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00662_array_has_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00662_array_has_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00663_tiny_log_empty_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00663_tiny_log_empty_insert.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00664_cast_from_string_to_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00664_cast_from_string_to_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00666_uniq_complex_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00666_uniq_complex_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00667_compare_arrays_of_different_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00667_compare_arrays_of_different_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00668_compare_arrays_silviucpp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00668_compare_arrays_silviucpp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00670_truncate_temporary_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00670_truncate_temporary_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00671_max_intersections.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00671_max_intersections.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00672_arrayDistinct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00672_arrayDistinct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00673_subquery_prepared_set_performance.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00673_subquery_prepared_set_performance.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00674_has_array_enum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00674_has_array_enum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00674_join_on_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00674_join_on_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00675_shard_remote_with_table_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00675_shard_remote_with_table_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00676_group_by_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00676_group_by_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00677_shard_any_heavy_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00677_shard_any_heavy_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00678_murmurhash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00678_murmurhash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00678_shard_funnel_window.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00678_shard_funnel_window.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00679_replace_asterisk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00679_replace_asterisk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00679_uuid_in_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00679_uuid_in_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00682_empty_parts_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00682_empty_parts_merge.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00686_client_exit_code.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00686_client_exit_code.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00687_insert_into_mv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00687_insert_into_mv.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00687_top_and_offset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00687_top_and_offset.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_aggregation_retention.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_aggregation_retention.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_case_without_else.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_case_without_else.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_alter_add_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_alter_add_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_nullable_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_nullable_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_serialization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_serialization.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00688_low_cardinality_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00689_file.txt (100%) rename {dbms/tests => tests}/queries/0_stateless/00689_join_table_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00689_join_table_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00690_insert_select_converting_exception_message.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00690_insert_select_converting_exception_message.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00691_array_distinct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00691_array_distinct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00692_if_exception_code.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00692_if_exception_code.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00693_max_block_size_system_tables_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00693_max_block_size_system_tables_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00694_max_block_size_zero.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00694_max_block_size_zero.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00695_pretty_max_column_pad_width.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00695_pretty_max_column_pad_width.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00696_system_columns_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00696_system_columns_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00697_in_subquery_shard.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00697_in_subquery_shard.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00698_validate_array_sizes_for_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00698_validate_array_sizes_for_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00699_materialized_view_mutations.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00699_materialized_view_mutations.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_aggregates.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_aggregates.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_arithm.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_arithm.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_array_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_array_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_bounds.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_bounds.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_casts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_casts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_compare.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_compare.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_complex_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_complex_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_empty_aggregates.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_empty_aggregates.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_formats.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_formats.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_gathers.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_gathers.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_in_keys.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_in_keys.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_math.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_math.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_round.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_decimal_round.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_to_decimal_or_something.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00700_to_decimal_or_something.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_context_use_after_free.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_context_use_after_free.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_join_default_strictness.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_join_default_strictness.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_rollup.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00701_rollup.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_on_dups.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_on_dups.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_with_using.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_with_using.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_with_using_dups.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_join_with_using_dups.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_where_with_quailified_names.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00702_where_with_quailified_names.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00703_join_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00703_join_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00704_drop_truncate_memory_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00704_drop_truncate_memory_table.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00705_aggregate_states_addition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00705_aggregate_states_addition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00705_drop_create_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00705_drop_create_merge_tree.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00706_iso_week_and_day_of_year.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00706_iso_week_and_day_of_year.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00707_float_csv_delimiter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00707_float_csv_delimiter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00709_virtual_column_partition_id.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00709_virtual_column_partition_id.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00710_array_enumerate_dense.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00710_array_enumerate_dense.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00711_array_enumerate_variants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00711_array_enumerate_variants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_nan_comparison.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_nan_comparison.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_final.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_final.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_missing_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_missing_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_sampling.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_sampling.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00713_collapsing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00713_collapsing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00714_alter_uuid.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00714_alter_uuid.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_bounding_ratio.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_bounding_ratio.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_json_each_row_input_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00715_json_each_row_input_nested.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00716_allow_ddl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00716_allow_ddl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_default_join_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_default_join_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_low_cardinaliry_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_low_cardinaliry_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_merge_and_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00717_merge_and_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00718_format_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00718_format_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00718_low_cardinaliry_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00718_low_cardinaliry_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_format_datetime_rand.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_format_datetime_rand.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_insert_block_without_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_insert_block_without_column.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_parallel_ddl_db.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_parallel_ddl_db.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_parallel_ddl_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00719_parallel_ddl_table.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00720_with_cube.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00720_with_cube.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00722_inner_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00722_inner_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00723_remerge_sort.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00723_remerge_sort.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00724_insert_values_datetime_conversion.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00724_insert_values_datetime_conversion.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_comment_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_comment_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_ipv4_ipv6_domains.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_ipv4_ipv6_domains.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_1.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_3.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_4.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_join_on_bug_4.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_memory_tracking.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_memory_tracking.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_quantiles_shard.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00725_quantiles_shard.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_length_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_length_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_materialized_view_concurrent.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_materialized_view_concurrent.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_modulo_for_date.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00726_modulo_for_date.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00727_concat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00727_concat.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00728_json_each_row_parsing.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00728_json_each_row_parsing.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00729_prewhere_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00729_prewhere_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00730_unicode_terminal_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00730_unicode_terminal_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_base64_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_base64_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_decimal_summing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_decimal_summing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00733_if_datetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00733_if_datetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00734_timeslot.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00734_timeslot.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00735_conditional.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00735_conditional.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00735_or_expr_optimize_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00735_or_expr_optimize_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00736_disjunction_optimisation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00736_disjunction_optimisation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00737_decimal_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00737_decimal_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00738_lock_for_inner_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00738_lock_for_inner_table.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00738_nested_merge_multidimensional_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00738_nested_merge_multidimensional_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00740_database_in_nested_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00740_database_in_nested_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00740_optimize_predicate_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00740_optimize_predicate_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00741_client_comment_multiline.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00741_client_comment_multiline.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00742_require_join_strictness.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00742_require_join_strictness.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00743_limit_by_not_found_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00743_limit_by_not_found_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00744_join_not_found_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00744_join_not_found_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00745_compile_scalar_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00745_compile_scalar_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_compile_non_deterministic_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_compile_non_deterministic_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_hashing_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_hashing_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_sql_fuzzy.pl (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_sql_fuzzy.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00746_sql_fuzzy.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00747_contributors.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00747_contributors.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00748_insert_array_with_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00748_insert_array_with_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_default_databasename_for_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_default_databasename_for_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_hashing_ints.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_hashing_ints.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_array_result.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_array_result.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_lambda_argument.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_lambda_argument.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_left_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_left_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_mv_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_mv_1.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_mv_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_mv_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_permute.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00752_low_cardinality_permute.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_alter_attach.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_alter_attach.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_comment_columns_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_comment_columns_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_quantile_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_quantile_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_system_columns_and_system_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_system_columns_and_system_tables.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_with_with_single_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00753_with_with_single_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_column_partitions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_column_partitions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_order_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_order_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_first_significant_subdomain_more.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00754_first_significant_subdomain_more.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00755_avg_value_size_hint_passing.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00755_avg_value_size_hint_passing.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00756_power_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00756_power_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00757_enum_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00757_enum_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00758_array_reverse.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00758_array_reverse.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00759_kodieg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00759_kodieg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00760_insert_json_with_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00760_insert_json_with_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00760_url_functions_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00760_url_functions_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00761_lower_utf8_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00761_lower_utf8_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00762_date_comparsion.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00762_date_comparsion.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_create_query_as_table_engine_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_create_query_as_table_engine_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_lock_buffer.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_lock_buffer.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00764_max_query_size_allocation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00764_max_query_size_allocation.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00765_sql_compatibility_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00765_sql_compatibility_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00779_all_right_join_max_block_size.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00779_all_right_join_max_block_size.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00780_unaligned_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00780_unaligned_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00794_materialized_view_with_column_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00794_materialized_view_with_column_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00799_function_dry_run.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00799_function_dry_run.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_function_java_hash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_function_java_hash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_distributed_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_distributed_insert.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_empty_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_empty_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_low_cardinality_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_versatile_storage_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00800_versatile_storage_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00802_system_parts_with_datetime_partition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00802_system_parts_with_datetime_partition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00803_odbc_driver_2_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00803_odbc_driver_2_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00803_xxhash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00803_xxhash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_rollup_with_having.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_rollup_with_having.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_alter_compression_codecs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_alter_compression_codecs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_custom_compression_codecs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_custom_compression_codecs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_delta_codec_compression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_delta_codec_compression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00805_round_down.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00805_round_down.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00806_alter_update.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00806_alter_update.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00807_regexp_quote_meta.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00807_regexp_quote_meta.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00808_array_enumerate_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00808_array_enumerate_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00808_not_optimize_predicate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00808_not_optimize_predicate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00809_add_days_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00809_add_days_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00810_in_operators_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00810_in_operators_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00811_garbage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00811_garbage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00812_prewhere_alias_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00812_prewhere_alias_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00813_parse_date_time_best_effort_more.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00813_parse_date_time_best_effort_more.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00814_parsing_ub.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00814_parsing_ub.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00815_left_join_on_stepanel.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00815_left_join_on_stepanel.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00816_join_column_names_sarg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00816_join_column_names_sarg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00816_long_concurrent_alter_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00816_long_concurrent_alter_column.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00817_with_simple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00817_with_simple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_alias_bug_4110.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_alias_bug_4110.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_inner_join_bug_3567.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_inner_join_bug_3567.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_join_bug_4271.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00818_join_bug_4271.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00819_ast_refactoring_bugs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00819_ast_refactoring_bugs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00820_multiple_joins.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00820_multiple_joins.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00821_distributed_storage_with_join_on.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00821_distributed_storage_with_join_on.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00822_array_insert_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00822_array_insert_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00823_capnproto_input.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00823_capnproto_input.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00823_sequence_match_dfa.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00823_sequence_match_dfa.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00824_filesystem.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00824_filesystem.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_http_header_query_id.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_http_header_query_id.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format.proto (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_input.insh (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_input.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_input.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_output.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_output.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00825_protobuf_format_syntax2.proto (100%) rename {dbms/tests => tests}/queries/0_stateless/00826_cross_to_inner_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00826_cross_to_inner_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00829_bitmap_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00829_bitmap_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00830_join_overwrite.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00830_join_overwrite.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00831_quantile_weighted_parameter_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00831_quantile_weighted_parameter_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00832_storage_file_lock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00832_storage_file_lock.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00833_sleep_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00833_sleep_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_date_datetime_cmp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_date_datetime_cmp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_hints_for_type_function_typos.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_hints_for_type_function_typos.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_kill_mutation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_kill_mutation.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_limit_with_constant_expressions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_limit_with_constant_expressions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_not_between.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00834_not_between.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00835_if_generic_case.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00835_if_generic_case.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_indices_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_indices_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_numbers_table_function_zero.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00836_numbers_table_function_zero.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_insert_select_and_read_prefix.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_insert_select_and_read_prefix.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_minmax_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_minmax_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00838_system_tables_drop_table_race.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00838_system_tables_drop_table_race.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00838_unique_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00838_unique_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00839_bitmask_negative.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00839_bitmask_negative.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00840_top_k_weighted.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00840_top_k_weighted.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00841_temporary_table_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00841_temporary_table_database.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00842_array_with_constant_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00842_array_with_constant_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00844_join_lightee2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00844_join_lightee2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00845_join_on_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00845_join_on_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00846_join_using_tuple_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00846_join_using_tuple_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00847_multiple_join_same_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00847_multiple_join_same_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00848_join_use_nulls_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00848_join_use_nulls_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00849_multiple_comma_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00849_multiple_comma_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00849_multiple_comma_join_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00849_multiple_comma_join_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00850_global_join_dups.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00850_global_join_dups.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00851_http_insert_json_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00851_http_insert_json_defaults.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00852_any_join_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00852_any_join_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00853_join_with_nulls_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00853_join_with_nulls_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00854_multiple_join_asterisks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00854_multiple_join_asterisks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00855_join_with_array_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00855_join_with_array_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00856_no_column_issue_4242.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00856_no_column_issue_4242.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00857_global_joinsavel_table_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00857_global_joinsavel_table_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00858_issue_4756.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00858_issue_4756.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00859_distinct_with_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00859_distinct_with_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00860_unknown_identifier_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00860_unknown_identifier_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00861_decimal_quoted_csv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00861_decimal_quoted_csv.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00862_decimal_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00862_decimal_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00863_comma_join_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00863_comma_join_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00864_union_all_supertype.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00864_union_all_supertype.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00870_t64_codec.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00870_t64_codec.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00871_t64_codec_signed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00871_t64_codec_signed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00872_t64_bit_codec.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00872_t64_bit_codec.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00874_issue_3495.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00874_issue_3495.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00875_join_right_nulls.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00875_join_right_nulls.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00876_wrong_arraj_join_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00876_wrong_arraj_join_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00877_memory_limit_for_new_delete.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00877_memory_limit_for_new_delete.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00878_join_unexpected_results.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00878_join_unexpected_results.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00879_cast_to_decimal_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00879_cast_to_decimal_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00880_decimal_in_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00880_decimal_in_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00881_unknown_identifier_in_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00881_unknown_identifier_in_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00882_multiple_join_no_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00882_multiple_join_no_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00897_flatten.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00897_flatten.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00898_quantile_timing_parameter_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00898_quantile_timing_parameter_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00899_long_attach_memory_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00899_long_attach_memory_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_entropy_shard.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_entropy_shard.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_orc_load.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_orc_load.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet_create_table_columns.pl (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet_decimal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet_decimal.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet_load.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00900_parquet_load.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00901_joint_entropy.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00901_joint_entropy.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00902_entropy.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00902_entropy.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00903_array_with_constant_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00903_array_with_constant_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00904_array_with_constant_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00904_array_with_constant_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00905_field_with_aggregate_function_state.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00905_field_with_aggregate_function_state.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_cache.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_cache.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_const_argument.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_const_argument.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_rollup.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00906_low_cardinality_rollup.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_max_rows.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_max_rows.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_analyze_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_analyze_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_bloom_filter_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_bloom_filter_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_long_http_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00908_long_http_insert.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_arrayEnumerateUniq.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_arrayEnumerateUniq.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_kill_not_initialized_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_kill_not_initialized_query.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_ngram_distance.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00909_ngram_distance.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_buffer_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_buffer_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_client_window_size_detection.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_client_window_size_detection.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_decimal_group_array_crash_3783.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_decimal_group_array_crash_3783.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00911_tautological_compare.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00911_tautological_compare.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00912_string_comparison.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00912_string_comparison.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00913_many_threads.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00913_many_threads.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00914_join_bgranvea.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00914_join_bgranvea.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00914_replicate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00914_replicate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00915_simple_aggregate_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00915_simple_aggregate_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00915_tuple_orantius.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00915_tuple_orantius.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_add_materialized_column_after.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_add_materialized_column_after.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_create_or_replace_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_create_or_replace_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_join_using_duplicate_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00916_join_using_duplicate_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00917_least_sqr.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00917_least_sqr.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00917_multiple_joins_denny_crane.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00917_multiple_joins_denny_crane.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00918_has_unsufficient_type_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00918_has_unsufficient_type_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00918_json_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00918_json_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00919_histogram_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00919_histogram_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00919_sum_aggregate_states_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00919_sum_aggregate_states_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00920_multiply_aggregate_states_constants.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00920_multiply_aggregate_states_constants.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00921_datetime64_basic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00921_datetime64_basic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00921_datetime64_compatibility.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00921_datetime64_compatibility.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00921_datetime64_compatibility.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_pk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_pk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_geo_to_h3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_geo_to_h3.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_multimatch.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_multimatch.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_correct_bt.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_correct_bt.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_long.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_long.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_noninclusive.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_noninclusive.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_other_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_join_other_types.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_joins.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_asof_joins.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_disable_hyperscan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00927_disable_hyperscan.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00928_multi_match_constant_constant.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00928_multi_match_constant_constant.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00929_multi_match_edit_distance.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00929_multi_match_edit_distance.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00930_arrayIntersect.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00930_arrayIntersect.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00930_max_partitions_per_insert_block.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00930_max_partitions_per_insert_block.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00932_array_intersect_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00932_array_intersect_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00932_geohash_support.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00932_geohash_support.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_alter_ttl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_alter_ttl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_reserved_word.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_reserved_word.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_replicated_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_simple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_simple.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_with_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00933_ttl_with_default.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00934_is_valid_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00934_is_valid_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00935_to_iso_week_first_year.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00935_to_iso_week_first_year.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_crc_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_crc_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_function_result_with_operator_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_function_result_with_operator_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_substring_utf8_non_const.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00936_substring_utf8_non_const.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_ipv4_cidr_range.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_ipv4_cidr_range.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_template_output_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_template_output_format.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_test_use_header_csv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_test_use_header_csv.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_test_use_header_tsv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00937_test_use_header_tsv.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_basename.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_basename.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_dataset_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_dataset_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_fix_rwlock_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_fix_rwlock_segfault.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_ipv6_cidr_range.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_ipv6_cidr_range.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_template_input_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_template_input_format.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_test_retention_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00938_test_retention_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00939_limit_by_offset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00939_limit_by_offset.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00939_test_null_in.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00939_test_null_in.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00940_max_parts_in_total.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00940_max_parts_in_total.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00940_order_by_read_in_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00940_order_by_read_in_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00941_system_columns_race_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00941_system_columns_race_condition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00941_to_custom_week.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00941_to_custom_week.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_dataparts_500.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_dataparts_500.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_mutate_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_mutate_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_mv_rename_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00942_mv_rename_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00943_materialize_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00943_materialize_index.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00943_mv_rename_without_inner_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00943_mv_rename_without_inner_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_clear_index_in_partition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_clear_index_in_partition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_minmax_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_minmax_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_ml_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00944_ml_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00945_bloom_filter_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00945_bloom_filter_index.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00945_ml_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00945_ml_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00946_ml_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00946_ml_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00947_ml_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00947_ml_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_format_in_with_single_element.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_format_in_with_single_element.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_to_valid_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_to_valid_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_values_interpreter_template.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00948_values_interpreter_template.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00949_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00949_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_default_prewhere.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_default_prewhere.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_dict_get.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_dict_get.sql (99%) rename {dbms/tests => tests}/queries/0_stateless/00950_test_double_delta_codec.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_test_double_delta_codec.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_test_gorilla_codec.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00950_test_gorilla_codec.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00951_ngram_search.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00951_ngram_search.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_basic_constraints.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_basic_constraints.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_input_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_input_function.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_part_frozen_info.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00952_part_frozen_info.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_constraints_operations.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_constraints_operations.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_indices_alter_exceptions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_indices_alter_exceptions.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_moving_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_moving_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00954_client_prepared_statements.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00954_client_prepared_statements.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00954_resample_combinator.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00954_resample_combinator.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_complex_prepared_statements.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_complex_prepared_statements.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_test_final_mark.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_test_final_mark.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_test_final_mark_use.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00955_test_final_mark_use.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_http_prepared_statements.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_http_prepared_statements.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_join_use_nulls_with_array_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_join_use_nulls_with_array_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_sensitive_data_masking.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00956_sensitive_data_masking.sh (97%) rename {dbms/tests => tests}/queries/0_stateless/00957_coalesce_const_nullable_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_coalesce_const_nullable_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_delta_diff_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_delta_diff_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_format_with_clashed_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_format_with_clashed_aliases.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_neighbor.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00957_neighbor.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00958_format_of_tuple_array_element.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00958_format_of_tuple_array_element.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00959_format_with_different_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00959_format_with_different_aliases.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00960_eval_ml_method_const.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00960_eval_ml_method_const.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00960_live_view_watch_events_live.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00960_live_view_watch_events_live.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_check_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_check_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_temporary_live_view_watch.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_temporary_live_view_watch.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_visit_param_buffer_underflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00961_visit_param_buffer_underflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_enumNotExect.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_enumNotExect.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_temporary_live_view_watch_live.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_temporary_live_view_watch_live.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_visit_param_various.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00962_visit_param_various.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_achimbab.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_achimbab.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_startsWith_force_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_startsWith_force_primary_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_bloom_index_string_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_bloom_index_string_functions.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_live_view_watch_events_heartbeat.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_os_thread_priority.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00964_os_thread_priority.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_live_view_watch_heartbeat.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_live_view_watch_heartbeat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_logs_level_bugfix.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_logs_level_bugfix.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_set_index_string_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_set_index_string_functions.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_shard_unresolvable_addresses.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00965_shard_unresolvable_addresses.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00966_invalid_json_must_not_parse.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00966_invalid_json_must_not_parse.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00966_live_view_watch_events_http.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00966_live_view_watch_events_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00967_live_view_watch_http.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00967_live_view_watch_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00967_ubsan_bit_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00967_ubsan_bit_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_file_engine_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_file_engine_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_roundAge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00968_roundAge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_columns_clause.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_columns_clause.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_roundDuration.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00969_roundDuration.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00970_substring_arg_validation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00970_substring_arg_validation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_live_view_watch_http_heartbeat.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_query_id_in_logs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00971_query_id_in_logs.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_desc_table_virtual_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_desc_table_virtual_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_geohashesInBox.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_geohashesInBox.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_live_view_select_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00972_live_view_select_1.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_create_table_as_table_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_create_table_as_table_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_uniq_non_associativity.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00973_uniq_non_associativity.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_bitmapContains_with_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_bitmapContains_with_primary_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_distributed_join_on.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_distributed_join_on.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_final_predicate_push_down.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_final_predicate_push_down.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_fix_join_on.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_fix_join_on.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_full_outer_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_full_outer_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_live_view_select_with_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_live_view_select_with_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_low_cardinality_cast.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_low_cardinality_cast.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_primary_key_for_lowCardinality.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_primary_key_for_lowCardinality.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_query_profiler.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_query_profiler.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_text_log_table_not_empty.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00974_text_log_table_not_empty.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_json_hang.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_json_hang.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_live_view_create.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_live_view_create.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_move_partition_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_move_partition_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_recursive_materialized_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_recursive_materialized_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_sample_prewhere_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_sample_prewhere_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_values_list.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00975_values_list.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_asof_join_on.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_asof_join_on.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_live_view_select_version.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_live_view_select_version.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_max_execution_speed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_max_execution_speed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_system_stop_ttl_merges.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_system_stop_ttl_merges.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_ttl_with_old_parts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00976_ttl_with_old_parts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_int_div.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_int_div.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_join_use_nulls_denny_crane.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_join_use_nulls_denny_crane.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_live_view_watch_events.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00977_live_view_watch_events.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_live_view_watch.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_live_view_watch.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_ml_math.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_ml_math.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_sum_map_bugfix.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_sum_map_bugfix.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_table_function_values_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00978_table_function_values_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live_moving_avg.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live_with_subquery.py (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_set_index_not.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_set_index_not.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_toFloat_monotonicity.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_toFloat_monotonicity.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_alter_settings_race.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_alter_settings_race.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_crash_nullable_decimal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_crash_nullable_decimal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_create_temporary_live_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_create_temporary_live_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_full_join_crash_fancyqlx.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_full_join_crash_fancyqlx.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_merge_alter_settings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_merge_alter_settings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_in_subquery_with_tuple.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_in_subquery_with_tuple.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_no_virtual_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_no_virtual_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_topK_topKWeighted_long.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00981_topK_topKWeighted_long.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00984_materialized_view_to_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00984_materialized_view_to_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00984_parser_stack_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00984_parser_stack_overflow.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00985_merge_stack_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00985_merge_stack_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00986_materialized_view_stack_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00986_materialized_view_stack_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00987_distributed_stack_overflow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00987_distributed_stack_overflow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_constraints_replication_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_constraints_replication_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_expansion_aliases_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_expansion_aliases_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_parallel_parts_removal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00988_parallel_parts_removal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00989_parallel_parts_loading.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00989_parallel_parts_loading.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_function_current_user.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_function_current_user.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_hasToken.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_hasToken.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_hasToken.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_hasToken_and_tokenbf.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_hasToken_and_tokenbf.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_metric_log_table_not_empty.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_metric_log_table_not_empty.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_request_splitting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00990_request_splitting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_event_live.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_event_live.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_http.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_live_view_watch_http.sh.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_system_parts_race_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_system_parts_race_condition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_live.python (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_live.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled (100%) rename {dbms/tests => tests}/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00994_table_function_numbers_mt.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00994_table_function_numbers_mt.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_exception_while_insert.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_exception_while_insert.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_order_by_with_fill.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00995_order_by_with_fill.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00996_limit_with_ties.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00996_limit_with_ties.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00996_neighbor.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00996_neighbor.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_extract_all_crash_6627.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_extract_all_crash_6627.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_set_index_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_set_index_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_trim.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00997_trim.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00998_constraints_all_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00998_constraints_all_tables.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_full_join_dup_keys_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_full_join_dup_keys_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_join_not_nullable_types.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_join_not_nullable_types.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_join_on_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_join_on_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_nullable_nested_types_4877.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_nullable_nested_types_4877.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_settings_no_extra_quotes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_settings_no_extra_quotes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_subquery_requires_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_subquery_requires_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_unneeded_substitutions_client.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01000_unneeded_substitutions_client.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01001_enums_in_in_section.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01001_enums_in_in_section.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01001_rename_merge_race_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01001_rename_merge_race_condition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01003_kill_query_race_condition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01003_kill_query_race_condition.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01004_rename_deadlock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01004_rename_deadlock.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01005_rwr_shard_deadlock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01005_rwr_shard_deadlock.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01006_ttl_with_default_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01006_ttl_with_default_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_global_array_join_names.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_global_array_join_names.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_insert_select_data_loss.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_insert_select_data_loss.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_insert_select_nicelulu.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01009_insert_select_nicelulu.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_low_cardinality_and_native_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_low_cardinality_and_native_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join_negative.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_partial_merge_join_negative.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pm_join_all_join_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pm_join_all_join_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_on_disk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_on_disk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_one_row_blocks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_one_row_blocks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_right_table_memory_limits.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_right_table_memory_limits.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_skip_blocks.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01010_pmj_skip_blocks.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01011_group_uniq_array_memsan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01011_group_uniq_array_memsan.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01011_test_create_as_skip_indices.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01011_test_create_as_skip_indices.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_reset_running_accumulate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_reset_running_accumulate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_select_limit_x_0.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_select_limit_x_0.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_serialize_array_memory_usage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_serialize_array_memory_usage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_show_tables_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01012_show_tables_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_hex_decimal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_hex_decimal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_hex_float.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_hex_float.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_repeat_function.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_repeat_function.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_totals_without_aggregation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01013_totals_without_aggregation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_count_of_merges_metrics.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_count_of_merges_metrics.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_format_custom_separated.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_format_custom_separated.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_function_repeat_corner_cases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_function_repeat_corner_cases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_lazy_database_basic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01014_lazy_database_basic.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_array_split.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_array_split.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_attach_part.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_attach_part.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_database_bad_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_database_bad_tables.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_empty_in_inner_right_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_empty_in_inner_right_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_insert_values_parametrized.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_insert_values_parametrized.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_random_constant.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01015_random_constant.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_index_tuple_field_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_index_tuple_field_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_input_null_as_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_input_null_as_default.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_macros.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_macros.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_null_part_minmax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_null_part_minmax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_uniqCombined64.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01016_uniqCombined64.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_in_unconvertible_complex_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_in_unconvertible_complex_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_tsv_empty_as_default.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_tsv_empty_as_default.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_uniqCombined_memory_usage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01017_uniqCombined_memory_usage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_Distributed__shard_num.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_Distributed__shard_num.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ambiguous_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ambiguous_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_create.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_create.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_special.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_ddl_dictionaries_special.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_dictionaries_from_dictionaries.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_dictionaries_from_dictionaries.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_empty_aggregation_filling.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_empty_aggregation_filling.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_atomic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_atomic.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_consistent.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_consistent.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_alter_materialized_view_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_array_fill.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_array_fill.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_materialized_view_select_extra_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_materialized_view_select_extra_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_parallel_parsing_cancel.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01019_parallel_parsing_cancel.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_function_array_compact.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_function_array_compact.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_function_char.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_function_char.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_having_without_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01020_having_without_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_create_as_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_create_as_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_only_tuple_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_only_tuple_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_tuple_parser.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01021_tuple_parser.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01023_materialized_view_query_context.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01023_materialized_view_query_context.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01024__getScalar.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01024__getScalar.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01025_array_compact_generic.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01025_array_compact_generic.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01026_char_utf8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01026_char_utf8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01029_early_constant_folding.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01029_early_constant_folding.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_final_mark_empty_primary_key.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_final_mark_empty_primary_key.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_limit_by_with_ties_error.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_limit_by_with_ties_error.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_hdfs_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_hdfs_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_set_supports_read.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_set_supports_read.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_url_syntax.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01030_storage_url_syntax.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_mutations_interpreter_and_context.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_mutations_interpreter_and_context.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_new_any_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_new_any_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_pmj_new_any_semi_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_pmj_new_any_semi_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_semi_anti_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01031_semi_anti_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_cityHash64_for_UUID.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_cityHash64_for_UUID.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_cityHash64_for_decimal.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_cityHash64_for_decimal.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_duplicate_column_insert_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01032_duplicate_column_insert_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_dictionaries_lifetime.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_dictionaries_lifetime.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_quota_dcl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_quota_dcl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_substr_negative_size_arg.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01033_substr_negative_size_arg.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_JSONCompactEachRow.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_JSONCompactEachRow.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_order_by_pk_prefix.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_order_by_pk_prefix.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_sample_final_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_sample_final_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_unknown_qualified_column_in_join.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_unknown_qualified_column_in_join.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_values_parse_float_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_values_parse_float_bug.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_avg_weighted.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_avg_weighted.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_enum_conversion_native_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_enum_conversion_native_format.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_lc_empty_part_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_lc_empty_part_bug.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_prewhere_with_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01035_prewhere_with_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_union_different_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01036_union_different_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_polygon_dict_multi_polygons.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_polygon_dict_multi_polygons.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_polygon_dict_simple_polygons.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_polygon_dict_simple_polygons.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01038_array_of_unnamed_tuples.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01038_array_of_unnamed_tuples.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_mergetree_exec_time.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_mergetree_exec_time.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_row_policy_dcl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_row_policy_dcl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_test_setting_parse.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01039_test_setting_parse.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_h3_get_resolution.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01040_h3_get_resolution.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01041_create_dictionary_if_not_exists.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01041_create_dictionary_if_not_exists.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01041_h3_is_valid.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01041_h3_is_valid.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_check_query_and_last_granule_size.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_check_query_and_last_granule_size.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_h3_k_ring.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_h3_k_ring.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_categorical_iv.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_categorical_iv.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_dictionary_attribute_properties_values.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_dictionary_attribute_properties_values.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_geo_distance.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_geo_distance.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_h3_edge_length_m.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01043_h3_edge_length_m.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01044_great_circle_angle.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01044_great_circle_angle.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01044_h3_edge_angle.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01044_h3_edge_angle.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_array_zip.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_array_zip.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_bloom_filter_null_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_bloom_filter_null_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_dictionaries_restrictions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_dictionaries_restrictions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_order_by_pk_special_storages.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_order_by_pk_special_storages.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01046_trivial_count_query_distributed.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01046_trivial_count_query_distributed.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_nullable_rand.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_nullable_rand.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01048_exists_query.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01048_exists_query.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_join_low_card_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_join_low_card_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_join_low_card_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_join_low_card_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_engine_join_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_engine_join_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_engine_join_view_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_engine_join_view_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_group_array_sample.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01050_group_array_sample.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_aggregate_function_crash.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_aggregate_function_crash.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_all_join_engine.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_all_join_engine.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_new_any_join_engine.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_new_any_join_engine.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_random_printable_ascii.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_random_printable_ascii.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_same_name_alias_with_joins.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01051_same_name_alias_with_joins.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01052_array_reduce_exception.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01052_array_reduce_exception.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01052_compression_buffer_overrun.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01052_compression_buffer_overrun.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01053_drop_database_mat_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01053_drop_database_mat_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01053_if_chain_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01053_if_chain_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_cache_dictionary_bunch_update.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_cache_dictionary_bunch_update.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_random_printable_ascii_ubsan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01054_random_printable_ascii_ubsan.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts_1.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts_1.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts_granularity.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_compact_parts_granularity.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_minmax_index_compact_parts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_minmax_index_compact_parts.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_prewhere_bugs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01055_prewhere_bugs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_create_table_as.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_create_table_as.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_negative_with_bloom_filter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_negative_with_bloom_filter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_predicate_optimizer_bugs.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_predicate_optimizer_bugs.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01057_http_compression_prefer_brotli.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01057_http_compression_prefer_brotli.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01058_zlib_ng_level1_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01058_zlib_ng_level1_bug.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01059_storage_file_brotli.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01059_storage_file_brotli.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_avro.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_avro.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_defaults_all_columns.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_defaults_all_columns.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_shutdown_table_after_detach.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_shutdown_table_after_detach.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_substring_negative_size.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01060_substring_negative_size.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01061_alter_codec_with_type.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01061_alter_codec_with_type.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_alter_on_mutataion.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_alter_on_mutataion.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_max_parser_depth.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_max_parser_depth.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01063_create_column_set.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01063_create_column_set.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_array_auc.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_array_auc.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01065_array_zip_mixed_const.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01065_array_zip_mixed_const.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01065_if_not_finite.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01065_if_not_finite.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01066_bit_count.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01066_bit_count.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01067_join_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01067_join_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01068_parens.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01068_parens.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_database_memory.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_database_memory.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_materialized_view_alter_target_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_materialized_view_alter_target_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_set_in_group_by.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01069_set_in_group_by.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_alter_with_ttl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_alter_with_ttl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_exception_code_in_query_log_table.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_exception_code_in_query_log_table.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_get_base_cell.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_get_base_cell.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_hex_area_m2.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_hex_area_m2.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_indexes_are_neighbors.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_indexes_are_neighbors.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_children.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_children.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_parent.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_parent.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_h3_to_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_materialize_ttl.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_materialize_ttl.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_mutations_with_dependencies.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_mutations_with_dependencies.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_string_to_h3.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_string_to_h3.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_template_empty_file.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_template_empty_file.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_to_decimal_or_null_exception.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01070_to_decimal_or_null_exception.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_http_header_exception_code.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_http_header_exception_code.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_in_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_in_array.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_live_view_detach_dependency.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_live_view_detach_dependency.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_nullable_jit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_nullable_jit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_select_constant_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01072_select_constant_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_attach_if_not_exists.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_attach_if_not_exists.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_bad_alter_partition.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_bad_alter_partition.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_blockSerializedSize.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_blockSerializedSize.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_crlf_end_of_line.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_crlf_end_of_line.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_grant_and_revoke.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_grant_and_revoke.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_show_tables_not_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01073_show_tables_not_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01074_h3_range_check.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01074_h3_range_check.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01074_partial_revokes.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01074_partial_revokes.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01075_allowed_client_hosts.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01075_allowed_client_hosts.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01075_in_arrays_enmk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01075_in_arrays_enmk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_array_join_prewhere_const_folding.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_array_join_prewhere_const_folding.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_json_each_row_array.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_json_each_row_array.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_predicate_optimizer_with_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_predicate_optimizer_with_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_range_reader_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01076_range_reader_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01077_mutations_index_consistency.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01077_mutations_index_consistency.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01077_yet_another_prewhere_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01077_yet_another_prewhere_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01078_bloom_filter_operator_not_has.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01078_bloom_filter_operator_not_has.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01078_merge_tree_read_one_thread.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01078_merge_tree_read_one_thread.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_alter_default_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_alter_default_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_bad_alters_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_bad_alters_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_bit_operations_using_bitset.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_bit_operations_using_bitset.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_new_range_reader_segfault.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_new_range_reader_segfault.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_order_by_pk.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_order_by_pk.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_reinterpret_as_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01079_reinterpret_as_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_join_get_null.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01080_join_get_null.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_PartialSortingTransform_full_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_PartialSortingTransform_full_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_demangle.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_demangle.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_keywords_formatting.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01081_keywords_formatting.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01082_bit_test_out_of_bound.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01082_bit_test_out_of_bound.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_cross_to_inner_with_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_cross_to_inner_with_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_expressions_in_engine_arguments.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_expressions_in_engine_arguments.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_functional_index_in_mergetree.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_functional_index_in_mergetree.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_log_family_disk_memory.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_log_family_disk_memory.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_log_first_column_alias.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_log_first_column_alias.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_match_zero_byte.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01083_match_zero_byte.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01084_defaults_on_aliases.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01084_defaults_on_aliases.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01084_regexp_empty.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01084_regexp_empty.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_extract_all_empty.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_extract_all_empty.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_max_distributed_connections.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_max_distributed_connections.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_max_distributed_connections_http.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_max_distributed_connections_http.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_regexp_input_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_regexp_input_format.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_simdjson_uint64.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01085_simdjson_uint64.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_modulo_or_zero.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_modulo_or_zero.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_odbc_roundtrip.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_odbc_roundtrip.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_index_set_ubsan.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_index_set_ubsan.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_storage_generate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_storage_generate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_table_function_generate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01087_table_function_generate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01088_array_slice_of_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01088_benchmark_query_id.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01088_benchmark_query_id.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01089_alter_settings_old_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01089_alter_settings_old_format.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01090_fixed_string_bit_ops.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01090_fixed_string_bit_ops.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_insert_with_default_json.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_insert_with_default_json.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_num_threads.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_num_threads.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_query_profiler_does_not_hang.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01091_query_profiler_does_not_hang.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01092_base64.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01092_base64.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01092_memory_profiler.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01092_memory_profiler.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01093_cyclic_defaults_filimonov.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01093_cyclic_defaults_filimonov.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01095_tpch_like_smoke.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01095_tpch_like_smoke.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_array_reduce_in_ranges.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_array_reduce_in_ranges.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_block_serialized_state.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_block_serialized_state.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_zeros.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01096_zeros.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_cyclic_defaults.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_cyclic_defaults.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_one_more_range_reader_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_one_more_range_reader_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_pre_limit.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01097_pre_limit.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_sum.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_sum.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_temporary_and_external_tables.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_temporary_and_external_tables.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01099_operators_date_and_timestamp.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01099_operators_date_and_timestamp.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01099_parallel_distributed_insert_select.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01099_parallel_distributed_insert_select.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01100_split_by_string.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01100_split_by_string.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01101_prewhere_after_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01101_prewhere_after_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01102_distributed_local_in_bug.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01102_distributed_local_in_bug.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_distributed_numbers_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_distributed_numbers_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_distributed_one_test.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_distributed_one_test.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_fixed_string_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01104_fixed_string_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01105_string_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01105_string_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01106_const_fixed_string_like.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01106_const_fixed_string_like.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01200_mutations_memory_consumption.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01200_mutations_memory_consumption.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01201_drop_column_compact_part_replicated.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01201_drop_column_compact_part_replicated.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01202_array_auc_special.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01202_array_auc_special.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01210_drop_view.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01210_drop_view.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01212_empty_join_and_totals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01212_empty_join_and_totals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_point_in_Myanmar.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_point_in_Myanmar.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01214_point_in_Mecca.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01214_point_in_Mecca.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01220_scalar_optimization_in_alter.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01220_scalar_optimization_in_alter.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01221_system_settings.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01221_system_settings.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01230_join_get_truncate.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01230_join_get_truncate.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/complex.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/complex.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/complex.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/empty.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/empty.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/empty.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/generate_avro.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/logical_types.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/logical_types.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/logical_types.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/primitive.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/primitive.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/primitive.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/references.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/references.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/references.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/simple.avsc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/simple.deflate.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/simple.json (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/simple.null.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_avro/simple.snappy.avro (100%) rename {dbms/tests => tests}/queries/0_stateless/data_orc/test.orc (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_dictionary.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_plain.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/byte_array_decimal.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/int32_decimal.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/int32_decimal.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/int64_decimal.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/int64_decimal.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nation.dict-malformed.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nested_lists.snappy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nested_maps.snappy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nonnullable.impala.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nullable.impala.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nullable.impala.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nulls.snappy.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/repeated_no_annotation.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata1.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata1.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata2.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata2.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata3.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata3.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata4.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata4.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata5.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/userdata5.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet (100%) rename {dbms/tests => tests}/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns (100%) rename {dbms/tests => tests}/queries/0_stateless/helpers/client.py (100%) rename {dbms/tests => tests}/queries/0_stateless/helpers/httpclient.py (100%) rename {dbms/tests => tests}/queries/0_stateless/helpers/httpexpect.py (100%) rename {dbms/tests => tests}/queries/0_stateless/helpers/uexpect.py (100%) rename {dbms/tests => tests}/queries/0_stateless/mergetree_mutations.lib (100%) rename {dbms/tests => tests}/queries/1_stateful/00001_count_hits.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00001_count_hits.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00002_count_visits.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00002_count_visits.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00004_top_counters.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00004_top_counters.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00005_filtering.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00005_filtering.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00006_agregates.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00006_agregates.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00007_uniq.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00007_uniq.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00008_uniq.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00008_uniq.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00009_uniq_distributed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00009_uniq_distributed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00010_quantiles_segfault.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00010_quantiles_segfault.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00011_sorting.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00011_sorting.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00012_sorting_distributed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00012_sorting_distributed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00013_sorting_of_nested.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00013_sorting_of_nested.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00014_filtering_arrays.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00014_filtering_arrays.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00017_aggregation_uninitialized_memory.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00017_aggregation_uninitialized_memory.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00020_distinct_order_by_distributed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00020_distinct_order_by_distributed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_1_select_with_in.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_1_select_with_in.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_2_select_with_in.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_2_select_with_in.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_3_select_with_in.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00021_3_select_with_in.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00022_merge_prewhere.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00022_merge_prewhere.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00023_totals_limit.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00023_totals_limit.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00024_random_counters.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00024_random_counters.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00030_array_enumerate_uniq.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00030_array_enumerate_uniq.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00031_array_enumerate_uniq.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00031_array_enumerate_uniq.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00032_aggregate_key64.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00032_aggregate_key64.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00033_aggregate_key_string.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00033_aggregate_key_string.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00034_aggregate_key_fixed_string.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00034_aggregate_key_fixed_string.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00035_aggregate_keys128.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00035_aggregate_keys128.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00036_aggregate_hashed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00036_aggregate_hashed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00037_uniq_state_merge1.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00037_uniq_state_merge1.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00038_uniq_state_merge2.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00038_uniq_state_merge2.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00039_primary_key.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00039_primary_key.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00040_aggregating_materialized_view.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00040_aggregating_materialized_view.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00041_aggregating_materialized_view.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00041_aggregating_materialized_view.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00042_any_left_join.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00042_any_left_join.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00043_any_left_join.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00043_any_left_join.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00044_any_left_join_string.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00044_any_left_join_string.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00045_uniq_upto.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00045_uniq_upto.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00046_uniq_upto_distributed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00046_uniq_upto_distributed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00047_bar.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00047_bar.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00048_min_max.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00048_min_max.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00049_max_string_if.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00049_max_string_if.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00050_min_max.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00050_min_max.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00051_min_max_array.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00051_min_max_array.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00052_group_by_in.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00052_group_by_in.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00053_replicate_segfault.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00053_replicate_segfault.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00054_merge_tree_partitions.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00054_merge_tree_partitions.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00055_index_and_not.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00055_index_and_not.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00056_view.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00056_view.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00060_move_to_prewhere_and_sets.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00060_move_to_prewhere_and_sets.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00061_storage_buffer.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00061_storage_buffer.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00062_loyalty.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00062_loyalty.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00063_loyalty_joins.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00063_loyalty_joins.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00065_loyalty_with_storage_join.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00065_loyalty_with_storage_join.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00066_sorting_distributed_many_replicas.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00066_sorting_distributed_many_replicas.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00067_union_all.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00067_union_all.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00068_subquery_in_prewhere.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00068_subquery_in_prewhere.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00069_duplicate_aggregation_keys.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00069_duplicate_aggregation_keys.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00071_merge_tree_optimize_aio.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00071_merge_tree_optimize_aio.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00072_compare_date_and_string_index.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00072_compare_date_and_string_index.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00073_uniq_array.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00073_uniq_array.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00074_full_join.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00074_full_join.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00075_left_array_join.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00075_left_array_join.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00076_system_columns_bytes.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00076_system_columns_bytes.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00077_log_tinylog_stripelog.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00077_log_tinylog_stripelog.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00078_group_by_arrays.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00078_group_by_arrays.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00079_array_join_not_used_joined_column.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00079_array_join_not_used_joined_column.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00080_array_join_and_union.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00080_array_join_and_union.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00081_group_by_without_key_and_totals.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00081_group_by_without_key_and_totals.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00082_quantiles.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00082_quantiles.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00083_array_filter.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00083_array_filter.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00084_external_aggregation.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00084_external_aggregation.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00085_monotonic_evaluation_segfault.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00085_monotonic_evaluation_segfault.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00086_array_reduce.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00086_array_reduce.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00087_where_0.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00087_where_0.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00090_thread_pool_deadlock.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00090_thread_pool_deadlock.sh (100%) rename {dbms/tests => tests}/queries/1_stateful/00091_prewhere_two_conditions.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00091_prewhere_two_conditions.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00092_obfuscator.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00092_obfuscator.sh (100%) rename {dbms/tests => tests}/queries/1_stateful/00139_like.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00139_like.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00140_rename.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00140_rename.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00141_transform.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00141_transform.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00142_system_columns.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00142_system_columns.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00143_transform_non_const_default.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00143_transform_non_const_default.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00144_functions_of_aggregation_states.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00144_functions_of_aggregation_states.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00145_aggregate_functions_statistics.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00145_aggregate_functions_statistics.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00146_aggregate_function_uniq.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00146_aggregate_function_uniq.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00147_global_in_aggregate_function.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00147_global_in_aggregate_function.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00148_monotonic_functions_and_index.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00148_monotonic_functions_and_index.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00149_quantiles_timing_distributed.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00149_quantiles_timing_distributed.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00150_quantiles_timing_precision.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00150_quantiles_timing_precision.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00151_order_by_read_in_order.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00151_order_by_read_in_order.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00151_replace_partition_with_different_granularity.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00151_replace_partition_with_different_granularity.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00152_insert_different_granularity.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00152_insert_different_granularity.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00153_aggregate_arena_race.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00153_aggregate_arena_race.sql (100%) rename {dbms/tests => tests}/queries/1_stateful/00154_avro.reference (100%) rename {dbms/tests => tests}/queries/1_stateful/00154_avro.sql (100%) rename {dbms/tests => tests}/queries/__init__.py (100%) rename {dbms/tests => tests}/queries/bugs/00938_client_suggestions.sh (100%) rename {dbms/tests => tests}/queries/bugs/01060_defaults_all_columns.reference (100%) rename {dbms/tests => tests}/queries/bugs/default_prewhere.sql (100%) rename {dbms/tests => tests}/queries/bugs/low_cardinality_remove.sql (100%) rename {dbms/tests => tests}/queries/bugs/missing_scalar_subquery_removal.sql (100%) rename {dbms/tests => tests}/queries/bugs/position_case_insensitive_utf8.sql (100%) rename {dbms/tests => tests}/queries/bugs/remote_scalar_subquery.sql (100%) rename {dbms/tests => tests}/queries/bugs/totals_rollup_having_block_header.sql (100%) rename {dbms/tests => tests}/queries/bugs/view_bad_types.sql (100%) rename {dbms/tests => tests}/queries/conftest.py (100%) rename {dbms/tests => tests}/queries/query_test.py (100%) rename {dbms/tests => tests}/queries/server.py (100%) rename {dbms/tests => tests}/queries/shell_config.sh (100%) rename {dbms/tests => tests}/server-test.xml (100%) rename {dbms/tests => tests}/stress (100%) rename {dbms/tests => tests}/strings_dictionary.xml (100%) rename {dbms/tests => tests}/tsan_suppressions.txt (100%) rename {dbms/tests => tests}/users.d/readonly.xml (100%) rename {dbms/tests => tests}/users.xml (100%) diff --git a/.github/ISSUE_TEMPLATE/backward-compatibility.md b/.github/ISSUE_TEMPLATE/backward-compatibility.md index f40a9d6a915..8f87197e73d 100644 --- a/.github/ISSUE_TEMPLATE/backward-compatibility.md +++ b/.github/ISSUE_TEMPLATE/backward-compatibility.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Error message and/or stacktrace** diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index eb73dc3e435..1445af4b051 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.github/ISSUE_TEMPLATE/performance-issue.md b/.github/ISSUE_TEMPLATE/performance-issue.md index 96c8cb77afb..d0e549039a6 100644 --- a/.github/ISSUE_TEMPLATE/performance-issue.md +++ b/.github/ISSUE_TEMPLATE/performance-issue.md @@ -17,7 +17,7 @@ What exactly works slower than expected? * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to slow performance **Expected performance** diff --git a/.github/ISSUE_TEMPLATE/unexpected-behaviour.md b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md index 25557693140..27ab217ca33 100644 --- a/.github/ISSUE_TEMPLATE/unexpected-behaviour.md +++ b/.github/ISSUE_TEMPLATE/unexpected-behaviour.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.github/ISSUE_TEMPLATE/usability-issue.md b/.github/ISSUE_TEMPLATE/usability-issue.md index daa83878182..6a084a72619 100644 --- a/.github/ISSUE_TEMPLATE/usability-issue.md +++ b/.github/ISSUE_TEMPLATE/usability-issue.md @@ -17,7 +17,7 @@ A clear and concise description of what works not as it is supposed to. * Which interface to use, if matters * Non-default settings, if any * `CREATE TABLE` statements for all tables involved -* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary +* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary * Queries to run that lead to unexpected result **Expected behavior** diff --git a/.gitignore b/.gitignore index 6aa331edc84..703306f9232 100644 --- a/.gitignore +++ b/.gitignore @@ -73,100 +73,100 @@ contrib/libpoco/Poco/ contrib/libpoco/bin/ contrib/libpoco/cmake_uninstall.cmake contrib/libre2/re2_st/ -dbms/src/Client/clickhouse-benchmark -dbms/src/Client/clickhouse-client -dbms/src/Client/tests/test-connect -dbms/src/Common/tests/arena_with_free_lists -dbms/src/Common/tests/auto_array -dbms/src/Common/tests/compact_array -dbms/src/Common/tests/hash_table -dbms/src/Common/tests/hashes_test -dbms/src/Common/tests/int_hashes_perf -dbms/src/Common/tests/lru_cache -dbms/src/Common/tests/parallel_aggregation -dbms/src/Common/tests/parallel_aggregation2 -dbms/src/Common/tests/radix_sort -dbms/src/Common/tests/shell_command_test -dbms/src/Common/tests/simple_cache -dbms/src/Common/tests/sip_hash -dbms/src/Common/tests/sip_hash_perf -dbms/src/Common/tests/small_table -dbms/src/Core/tests/exception -dbms/src/Core/tests/field -dbms/src/Core/tests/rvo_test -dbms/src/Core/tests/string_pool -dbms/src/DataStreams/tests/aggregating_stream -dbms/src/DataStreams/tests/block_tab_separated_streams -dbms/src/DataStreams/tests/collapsing_sorted_stream -dbms/src/DataStreams/tests/expression_stream -dbms/src/DataStreams/tests/filter_stream -dbms/src/DataStreams/tests/filter_stream_hitlog -dbms/src/DataStreams/tests/fork_streams -dbms/src/DataStreams/tests/glue_streams -dbms/src/DataStreams/tests/json_streams -dbms/src/DataStreams/tests/native_streams -dbms/src/DataStreams/tests/sorting_stream -dbms/src/DataStreams/tests/tab_separated_streams -dbms/src/DataStreams/tests/union_stream -dbms/src/DataStreams/tests/union_stream2 -dbms/src/DataTypes/tests/data_type_string -dbms/src/DataTypes/tests/data_types_number_fixed -dbms/src/Functions/tests/functions_arithmetic -dbms/src/Functions/tests/logical_functions_performance -dbms/src/Functions/tests/number_traits -dbms/src/IO/tests/async_write -dbms/src/IO/tests/cached_compressed_read_buffer -dbms/src/IO/tests/compressed_buffer -dbms/src/IO/tests/hashing_read_buffer -dbms/src/IO/tests/hashing_write_buffer -dbms/src/IO/tests/io_and_exceptions -dbms/src/IO/tests/io_operators -dbms/src/IO/tests/mempbrk -dbms/src/IO/tests/o_direct_and_dirty_pages -dbms/src/IO/tests/parse_int_perf -dbms/src/IO/tests/parse_int_perf2 -dbms/src/IO/tests/read_buffer -dbms/src/IO/tests/read_buffer_aio -dbms/src/IO/tests/read_buffer_perf -dbms/src/IO/tests/read_escaped_string -dbms/src/IO/tests/read_float_perf -dbms/src/IO/tests/read_write_int -dbms/src/IO/tests/valid_utf8 -dbms/src/IO/tests/valid_utf8_perf -dbms/src/IO/tests/var_uint -dbms/src/IO/tests/write_buffer -dbms/src/IO/tests/write_buffer_aio -dbms/src/IO/tests/write_buffer_perf -dbms/src/Interpreters/tests/address_patterns -dbms/src/Interpreters/tests/aggregate -dbms/src/Interpreters/tests/compiler_test -dbms/src/Interpreters/tests/create_query -dbms/src/Interpreters/tests/expression -dbms/src/Interpreters/tests/expression_analyzer -dbms/src/Interpreters/tests/hash_map -dbms/src/Interpreters/tests/hash_map2 -dbms/src/Interpreters/tests/hash_map3 -dbms/src/Interpreters/tests/hash_map_string -dbms/src/Interpreters/tests/hash_map_string_2 -dbms/src/Interpreters/tests/hash_map_string_3 -dbms/src/Interpreters/tests/hash_map_string_small -dbms/src/Interpreters/tests/in_join_subqueries_preprocessor -dbms/src/Interpreters/tests/logical_expressions_optimizer -dbms/src/Interpreters/tests/select_query -dbms/src/Interpreters/tests/two_level_hash_map -dbms/src/Interpreters/tests/users -dbms/src/Parsers/tests/create_parser -dbms/src/Parsers/tests/select_parser -dbms/src/Server/clickhouse-server -dbms/src/Server/clickhouse-server.init -dbms/src/Storages/tests/hit_log -dbms/src/Storages/tests/merge_tree -dbms/src/Storages/tests/part_checker -dbms/src/Storages/tests/part_name -dbms/src/Storages/tests/pk_condition -dbms/src/Storages/tests/seek_speed_test -dbms/src/Storages/tests/storage_log -dbms/src/Storages/tests/system_numbers +dbms/Client/clickhouse-benchmark +dbms/Client/clickhouse-client +dbms/Client/tests/test-connect +dbms/Common/tests/arena_with_free_lists +dbms/Common/tests/auto_array +dbms/Common/tests/compact_array +dbms/Common/tests/hash_table +dbms/Common/tests/hashes_test +dbms/Common/tests/int_hashes_perf +dbms/Common/tests/lru_cache +dbms/Common/tests/parallel_aggregation +dbms/Common/tests/parallel_aggregation2 +dbms/Common/tests/radix_sort +dbms/Common/tests/shell_command_test +dbms/Common/tests/simple_cache +dbms/Common/tests/sip_hash +dbms/Common/tests/sip_hash_perf +dbms/Common/tests/small_table +dbms/Core/tests/exception +dbms/Core/tests/field +dbms/Core/tests/rvo_test +dbms/Core/tests/string_pool +dbms/DataStreams/tests/aggregating_stream +dbms/DataStreams/tests/block_tab_separated_streams +dbms/DataStreams/tests/collapsing_sorted_stream +dbms/DataStreams/tests/expression_stream +dbms/DataStreams/tests/filter_stream +dbms/DataStreams/tests/filter_stream_hitlog +dbms/DataStreams/tests/fork_streams +dbms/DataStreams/tests/glue_streams +dbms/DataStreams/tests/json_streams +dbms/DataStreams/tests/native_streams +dbms/DataStreams/tests/sorting_stream +dbms/DataStreams/tests/tab_separated_streams +dbms/DataStreams/tests/union_stream +dbms/DataStreams/tests/union_stream2 +dbms/DataTypes/tests/data_type_string +dbms/DataTypes/tests/data_types_number_fixed +dbms/Functions/tests/functions_arithmetic +dbms/Functions/tests/logical_functions_performance +dbms/Functions/tests/number_traits +dbms/IO/tests/async_write +dbms/IO/tests/cached_compressed_read_buffer +dbms/IO/tests/compressed_buffer +dbms/IO/tests/hashing_read_buffer +dbms/IO/tests/hashing_write_buffer +dbms/IO/tests/io_and_exceptions +dbms/IO/tests/io_operators +dbms/IO/tests/mempbrk +dbms/IO/tests/o_direct_and_dirty_pages +dbms/IO/tests/parse_int_perf +dbms/IO/tests/parse_int_perf2 +dbms/IO/tests/read_buffer +dbms/IO/tests/read_buffer_aio +dbms/IO/tests/read_buffer_perf +dbms/IO/tests/read_escaped_string +dbms/IO/tests/read_float_perf +dbms/IO/tests/read_write_int +dbms/IO/tests/valid_utf8 +dbms/IO/tests/valid_utf8_perf +dbms/IO/tests/var_uint +dbms/IO/tests/write_buffer +dbms/IO/tests/write_buffer_aio +dbms/IO/tests/write_buffer_perf +dbms/Interpreters/tests/address_patterns +dbms/Interpreters/tests/aggregate +dbms/Interpreters/tests/compiler_test +dbms/Interpreters/tests/create_query +dbms/Interpreters/tests/expression +dbms/Interpreters/tests/expression_analyzer +dbms/Interpreters/tests/hash_map +dbms/Interpreters/tests/hash_map2 +dbms/Interpreters/tests/hash_map3 +dbms/Interpreters/tests/hash_map_string +dbms/Interpreters/tests/hash_map_string_2 +dbms/Interpreters/tests/hash_map_string_3 +dbms/Interpreters/tests/hash_map_string_small +dbms/Interpreters/tests/in_join_subqueries_preprocessor +dbms/Interpreters/tests/logical_expressions_optimizer +dbms/Interpreters/tests/select_query +dbms/Interpreters/tests/two_level_hash_map +dbms/Interpreters/tests/users +dbms/Parsers/tests/create_parser +dbms/Parsers/tests/select_parser +dbms/Server/clickhouse-server +dbms/Server/clickhouse-server.init +dbms/Storages/tests/hit_log +dbms/Storages/tests/merge_tree +dbms/Storages/tests/part_checker +dbms/Storages/tests/part_name +dbms/Storages/tests/pk_condition +dbms/Storages/tests/seek_speed_test +dbms/Storages/tests/storage_log +dbms/Storages/tests/system_numbers libs/libcommon/src/revision.h libs/libcommon/src/tests/date_lut2 libs/libcommon/src/tests/date_lut3 @@ -184,15 +184,15 @@ libs/libzkutil/src/tests/zkutil_zookeeper_holder utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download-part utils/zookeeper-dump-tree/zookeeper-dump-tree utils/zookeeper-remove-by-list/zookeeper-remove-by-list -dbms/src/Storages/tests/remove_symlink_directory +dbms/Storages/tests/remove_symlink_directory libs/libcommon/src/tests/json_test utils/compressor/zstd_test utils/wikistat-loader/wikistat-loader -dbms/src/Common/tests/pod_array +dbms/Common/tests/pod_array -dbms/src/Server/data/* -dbms/src/Server/metadata/* -dbms/src/Server/status +dbms/Server/data/* +dbms/Server/metadata/* +dbms/Server/status config-9001.xml *-preprocessed.xml @@ -242,7 +242,7 @@ website/package-lock.json */.DS_Store # Ignore files for locally disabled tests -/dbms/tests/queries/**/*.disabled +/dbms/queries/**/*.disabled # cquery cache /.cquery-cache diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d9f207a06d4..972edf11384 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,11 +31,11 @@ build: - docker pull $CI_REGISTRY/yandex/clickhouse-builder - docker run --rm --volumes-from "${HOSTNAME}-build" --workdir "${CI_PROJECT_DIR}" --env CI_PROJECT_DIR=${CI_PROJECT_DIR} $CI_REGISTRY/yandex/clickhouse-builder /build_gitlab_ci.sh # You can upload your binary to nexus - - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./dbms/src/Server/clickhouse + - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./dbms/Server/clickhouse # Or download artifacts from gitlab artifacts: paths: - - ./dbms/src/Server/clickhouse + - ./dbms/Server/clickhouse expire_in: 1 day tags: - - docker \ No newline at end of file + - docker diff --git a/CHANGELOG.md b/CHANGELOG.md index f588adb7ef0..a0ea7f62b6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -234,7 +234,7 @@ * Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) * Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) * Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -341,7 +341,7 @@ [#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) #### New Feature -* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. +* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. [#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) ### ClickHouse release v20.1.2.4, 2020-01-22 diff --git a/CMakeLists.txt b/CMakeLists.txt index 8feb5d8c129..c1342a5ca97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -380,8 +380,13 @@ macro (add_executable target) endif() endmacro() +set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") +include_directories(${ConfigIncludePath}) + add_subdirectory (base) -add_subdirectory (utils) add_subdirectory (dbms) +add_subdirectory (programs) +add_subdirectory (tests) +add_subdirectory (utils) include (cmake/print_include_directories.cmake) diff --git a/dbms/benchmark/benchmark.sh b/benchmark/benchmark.sh similarity index 100% rename from dbms/benchmark/benchmark.sh rename to benchmark/benchmark.sh diff --git a/dbms/benchmark/clickhouse/benchmark-chyt.sh b/benchmark/clickhouse/benchmark-chyt.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-chyt.sh rename to benchmark/clickhouse/benchmark-chyt.sh diff --git a/dbms/benchmark/clickhouse/benchmark-new.sh b/benchmark/clickhouse/benchmark-new.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-new.sh rename to benchmark/clickhouse/benchmark-new.sh diff --git a/dbms/benchmark/clickhouse/benchmark-yql.sh b/benchmark/clickhouse/benchmark-yql.sh similarity index 100% rename from dbms/benchmark/clickhouse/benchmark-yql.sh rename to benchmark/clickhouse/benchmark-yql.sh diff --git a/dbms/benchmark/clickhouse/queries.sql b/benchmark/clickhouse/queries.sql similarity index 100% rename from dbms/benchmark/clickhouse/queries.sql rename to benchmark/clickhouse/queries.sql diff --git a/dbms/benchmark/create_dump.sh b/benchmark/create_dump.sh similarity index 100% rename from dbms/benchmark/create_dump.sh rename to benchmark/create_dump.sh diff --git a/dbms/benchmark/greenplum/README b/benchmark/greenplum/README similarity index 100% rename from dbms/benchmark/greenplum/README rename to benchmark/greenplum/README diff --git a/dbms/benchmark/greenplum/benchmark.sh b/benchmark/greenplum/benchmark.sh similarity index 100% rename from dbms/benchmark/greenplum/benchmark.sh rename to benchmark/greenplum/benchmark.sh diff --git a/dbms/benchmark/greenplum/dump_dataset_from_ch.sh b/benchmark/greenplum/dump_dataset_from_ch.sh similarity index 100% rename from dbms/benchmark/greenplum/dump_dataset_from_ch.sh rename to benchmark/greenplum/dump_dataset_from_ch.sh diff --git a/dbms/benchmark/greenplum/load_data_set.sql b/benchmark/greenplum/load_data_set.sql similarity index 100% rename from dbms/benchmark/greenplum/load_data_set.sql rename to benchmark/greenplum/load_data_set.sql diff --git a/dbms/benchmark/greenplum/queries.sql b/benchmark/greenplum/queries.sql similarity index 100% rename from dbms/benchmark/greenplum/queries.sql rename to benchmark/greenplum/queries.sql diff --git a/dbms/benchmark/greenplum/result_parser.py b/benchmark/greenplum/result_parser.py similarity index 100% rename from dbms/benchmark/greenplum/result_parser.py rename to benchmark/greenplum/result_parser.py diff --git a/dbms/benchmark/greenplum/schema.sql b/benchmark/greenplum/schema.sql similarity index 100% rename from dbms/benchmark/greenplum/schema.sql rename to benchmark/greenplum/schema.sql diff --git a/dbms/benchmark/hive/conf.sh b/benchmark/hive/conf.sh similarity index 100% rename from dbms/benchmark/hive/conf.sh rename to benchmark/hive/conf.sh diff --git a/dbms/benchmark/hive/define_schema.sql b/benchmark/hive/define_schema.sql similarity index 100% rename from dbms/benchmark/hive/define_schema.sql rename to benchmark/hive/define_schema.sql diff --git a/dbms/benchmark/hive/expect.tcl b/benchmark/hive/expect.tcl similarity index 100% rename from dbms/benchmark/hive/expect.tcl rename to benchmark/hive/expect.tcl diff --git a/dbms/benchmark/hive/log/log_100m_tuned b/benchmark/hive/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_100m_tuned rename to benchmark/hive/log/log_100m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_ b/benchmark/hive/log/log_10m/log_10m_ similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_ rename to benchmark/hive/log/log_10m/log_10m_ diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_1 b/benchmark/hive/log/log_10m/log_10m_1 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_1 rename to benchmark/hive/log/log_10m/log_10m_1 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_2 b/benchmark/hive/log/log_10m/log_10m_2 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_2 rename to benchmark/hive/log/log_10m/log_10m_2 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_3 b/benchmark/hive/log/log_10m/log_10m_3 similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_3 rename to benchmark/hive/log/log_10m/log_10m_3 diff --git a/dbms/benchmark/hive/log/log_10m/log_10m_tuned b/benchmark/hive/log/log_10m/log_10m_tuned similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_10m_tuned rename to benchmark/hive/log/log_10m/log_10m_tuned diff --git a/dbms/benchmark/hive/log/log_10m/log_hits_10m b/benchmark/hive/log/log_10m/log_hits_10m similarity index 100% rename from dbms/benchmark/hive/log/log_10m/log_hits_10m rename to benchmark/hive/log/log_10m/log_hits_10m diff --git a/dbms/benchmark/hive/queries.sql b/benchmark/hive/queries.sql similarity index 100% rename from dbms/benchmark/hive/queries.sql rename to benchmark/hive/queries.sql diff --git a/dbms/benchmark/hive/run_hive.sh b/benchmark/hive/run_hive.sh similarity index 100% rename from dbms/benchmark/hive/run_hive.sh rename to benchmark/hive/run_hive.sh diff --git a/dbms/benchmark/infinidb/conf.sh b/benchmark/infinidb/conf.sh similarity index 100% rename from dbms/benchmark/infinidb/conf.sh rename to benchmark/infinidb/conf.sh diff --git a/dbms/benchmark/infinidb/define_schema.sql b/benchmark/infinidb/define_schema.sql similarity index 100% rename from dbms/benchmark/infinidb/define_schema.sql rename to benchmark/infinidb/define_schema.sql diff --git a/dbms/benchmark/infinidb/expect.tcl b/benchmark/infinidb/expect.tcl similarity index 100% rename from dbms/benchmark/infinidb/expect.tcl rename to benchmark/infinidb/expect.tcl diff --git a/dbms/benchmark/infinidb/log/log_100m b/benchmark/infinidb/log/log_100m similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m rename to benchmark/infinidb/log/log_100m diff --git a/dbms/benchmark/infinidb/log/log_100m_tuned b/benchmark/infinidb/log/log_100m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_100m_tuned rename to benchmark/infinidb/log/log_100m_tuned diff --git a/dbms/benchmark/infinidb/log/log_10m b/benchmark/infinidb/log/log_10m similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m rename to benchmark/infinidb/log/log_10m diff --git a/dbms/benchmark/infinidb/log/log_10m_tuned b/benchmark/infinidb/log/log_10m_tuned similarity index 100% rename from dbms/benchmark/infinidb/log/log_10m_tuned rename to benchmark/infinidb/log/log_10m_tuned diff --git a/dbms/benchmark/infinidb/queries.sql b/benchmark/infinidb/queries.sql similarity index 100% rename from dbms/benchmark/infinidb/queries.sql rename to benchmark/infinidb/queries.sql diff --git a/dbms/benchmark/infobright/conf.sh b/benchmark/infobright/conf.sh similarity index 100% rename from dbms/benchmark/infobright/conf.sh rename to benchmark/infobright/conf.sh diff --git a/dbms/benchmark/infobright/define_schema.sql b/benchmark/infobright/define_schema.sql similarity index 100% rename from dbms/benchmark/infobright/define_schema.sql rename to benchmark/infobright/define_schema.sql diff --git a/dbms/benchmark/infobright/expect.tcl b/benchmark/infobright/expect.tcl similarity index 100% rename from dbms/benchmark/infobright/expect.tcl rename to benchmark/infobright/expect.tcl diff --git a/dbms/benchmark/infobright/log-community/log_10m b/benchmark/infobright/log-community/log_10m similarity index 100% rename from dbms/benchmark/infobright/log-community/log_10m rename to benchmark/infobright/log-community/log_10m diff --git a/dbms/benchmark/infobright/queries.sql b/benchmark/infobright/queries.sql similarity index 100% rename from dbms/benchmark/infobright/queries.sql rename to benchmark/infobright/queries.sql diff --git a/dbms/benchmark/memsql/benchmark.sh b/benchmark/memsql/benchmark.sh similarity index 100% rename from dbms/benchmark/memsql/benchmark.sh rename to benchmark/memsql/benchmark.sh diff --git a/dbms/benchmark/memsql/instructions.txt b/benchmark/memsql/instructions.txt similarity index 100% rename from dbms/benchmark/memsql/instructions.txt rename to benchmark/memsql/instructions.txt diff --git a/dbms/benchmark/memsql/queries.sql b/benchmark/memsql/queries.sql similarity index 100% rename from dbms/benchmark/memsql/queries.sql rename to benchmark/memsql/queries.sql diff --git a/dbms/benchmark/monetdb/conf.sh b/benchmark/monetdb/conf.sh similarity index 100% rename from dbms/benchmark/monetdb/conf.sh rename to benchmark/monetdb/conf.sh diff --git a/dbms/benchmark/monetdb/define_schema.sql b/benchmark/monetdb/define_schema.sql similarity index 100% rename from dbms/benchmark/monetdb/define_schema.sql rename to benchmark/monetdb/define_schema.sql diff --git a/dbms/benchmark/monetdb/expect.tcl b/benchmark/monetdb/expect.tcl similarity index 100% rename from dbms/benchmark/monetdb/expect.tcl rename to benchmark/monetdb/expect.tcl diff --git a/dbms/benchmark/monetdb/log/log_100m b/benchmark/monetdb/log/log_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m rename to benchmark/monetdb/log/log_100m diff --git a/dbms/benchmark/monetdb/log/log_100m_1 b/benchmark/monetdb/log/log_100m_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_1 rename to benchmark/monetdb/log/log_100m_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected b/benchmark/monetdb/log/log_100m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected rename to benchmark/monetdb/log/log_100m_corrected diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_1 b/benchmark/monetdb/log/log_100m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_1 rename to benchmark/monetdb/log/log_100m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_100m_corrected_2 b/benchmark/monetdb/log/log_100m_corrected_2 similarity index 100% rename from dbms/benchmark/monetdb/log/log_100m_corrected_2 rename to benchmark/monetdb/log/log_100m_corrected_2 diff --git a/dbms/benchmark/monetdb/log/log_10m b/benchmark/monetdb/log/log_10m similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m rename to benchmark/monetdb/log/log_10m diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected b/benchmark/monetdb/log/log_10m_corrected similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected rename to benchmark/monetdb/log/log_10m_corrected diff --git a/dbms/benchmark/monetdb/log/log_10m_corrected_1 b/benchmark/monetdb/log/log_10m_corrected_1 similarity index 100% rename from dbms/benchmark/monetdb/log/log_10m_corrected_1 rename to benchmark/monetdb/log/log_10m_corrected_1 diff --git a/dbms/benchmark/monetdb/log/log_upload_100m b/benchmark/monetdb/log/log_upload_100m similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_100m rename to benchmark/monetdb/log/log_upload_100m diff --git a/dbms/benchmark/monetdb/log/log_upload_1b b/benchmark/monetdb/log/log_upload_1b similarity index 100% rename from dbms/benchmark/monetdb/log/log_upload_1b rename to benchmark/monetdb/log/log_upload_1b diff --git a/dbms/benchmark/monetdb/queries.sql b/benchmark/monetdb/queries.sql similarity index 100% rename from dbms/benchmark/monetdb/queries.sql rename to benchmark/monetdb/queries.sql diff --git a/dbms/benchmark/vertica/README b/benchmark/vertica/README similarity index 100% rename from dbms/benchmark/vertica/README rename to benchmark/vertica/README diff --git a/dbms/benchmark/vertica/benchmark.sh b/benchmark/vertica/benchmark.sh similarity index 100% rename from dbms/benchmark/vertica/benchmark.sh rename to benchmark/vertica/benchmark.sh diff --git a/dbms/benchmark/vertica/hits_define_schema.sql b/benchmark/vertica/hits_define_schema.sql similarity index 100% rename from dbms/benchmark/vertica/hits_define_schema.sql rename to benchmark/vertica/hits_define_schema.sql diff --git a/dbms/benchmark/vertica/queries.sql b/benchmark/vertica/queries.sql similarity index 100% rename from dbms/benchmark/vertica/queries.sql rename to benchmark/vertica/queries.sql diff --git a/cmake/lib_name.cmake b/cmake/lib_name.cmake index 51a424cb4e2..9f53c7bc6d8 100644 --- a/cmake/lib_name.cmake +++ b/cmake/lib_name.cmake @@ -1,5 +1,5 @@ set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide) -set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms/src ${ClickHouse_BINARY_DIR}/dbms/src) +set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms ${ClickHouse_BINARY_DIR}/dbms) set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) set(METROHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libmetrohash/src) set(PCG_RANDOM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpcg-random/include) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 3d192f1fe76..7d906de7602 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -23,7 +23,7 @@ if (SANITIZE) # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to # keep the binary size down. # TODO: try compiling with -Og and with ld.gold. - set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/dbms/tests/msan_suppressions.txt") + set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") diff --git a/dbms/src/Access/AccessControlManager.cpp b/dbms/Access/AccessControlManager.cpp similarity index 100% rename from dbms/src/Access/AccessControlManager.cpp rename to dbms/Access/AccessControlManager.cpp diff --git a/dbms/src/Access/AccessControlManager.h b/dbms/Access/AccessControlManager.h similarity index 100% rename from dbms/src/Access/AccessControlManager.h rename to dbms/Access/AccessControlManager.h diff --git a/dbms/src/Access/AccessFlags.h b/dbms/Access/AccessFlags.h similarity index 100% rename from dbms/src/Access/AccessFlags.h rename to dbms/Access/AccessFlags.h diff --git a/dbms/src/Access/AccessRights.cpp b/dbms/Access/AccessRights.cpp similarity index 100% rename from dbms/src/Access/AccessRights.cpp rename to dbms/Access/AccessRights.cpp diff --git a/dbms/src/Access/AccessRights.h b/dbms/Access/AccessRights.h similarity index 100% rename from dbms/src/Access/AccessRights.h rename to dbms/Access/AccessRights.h diff --git a/dbms/src/Access/AccessRightsElement.cpp b/dbms/Access/AccessRightsElement.cpp similarity index 100% rename from dbms/src/Access/AccessRightsElement.cpp rename to dbms/Access/AccessRightsElement.cpp diff --git a/dbms/src/Access/AccessRightsElement.h b/dbms/Access/AccessRightsElement.h similarity index 100% rename from dbms/src/Access/AccessRightsElement.h rename to dbms/Access/AccessRightsElement.h diff --git a/dbms/src/Access/AccessType.h b/dbms/Access/AccessType.h similarity index 100% rename from dbms/src/Access/AccessType.h rename to dbms/Access/AccessType.h diff --git a/dbms/src/Access/AllowedClientHosts.cpp b/dbms/Access/AllowedClientHosts.cpp similarity index 100% rename from dbms/src/Access/AllowedClientHosts.cpp rename to dbms/Access/AllowedClientHosts.cpp diff --git a/dbms/src/Access/AllowedClientHosts.h b/dbms/Access/AllowedClientHosts.h similarity index 100% rename from dbms/src/Access/AllowedClientHosts.h rename to dbms/Access/AllowedClientHosts.h diff --git a/dbms/src/Access/Authentication.cpp b/dbms/Access/Authentication.cpp similarity index 100% rename from dbms/src/Access/Authentication.cpp rename to dbms/Access/Authentication.cpp diff --git a/dbms/src/Access/Authentication.h b/dbms/Access/Authentication.h similarity index 100% rename from dbms/src/Access/Authentication.h rename to dbms/Access/Authentication.h diff --git a/dbms/src/Access/CMakeLists.txt b/dbms/Access/CMakeLists.txt similarity index 100% rename from dbms/src/Access/CMakeLists.txt rename to dbms/Access/CMakeLists.txt diff --git a/dbms/src/Access/ContextAccess.cpp b/dbms/Access/ContextAccess.cpp similarity index 100% rename from dbms/src/Access/ContextAccess.cpp rename to dbms/Access/ContextAccess.cpp diff --git a/dbms/src/Access/ContextAccess.h b/dbms/Access/ContextAccess.h similarity index 100% rename from dbms/src/Access/ContextAccess.h rename to dbms/Access/ContextAccess.h diff --git a/dbms/src/Access/DiskAccessStorage.cpp b/dbms/Access/DiskAccessStorage.cpp similarity index 100% rename from dbms/src/Access/DiskAccessStorage.cpp rename to dbms/Access/DiskAccessStorage.cpp diff --git a/dbms/src/Access/DiskAccessStorage.h b/dbms/Access/DiskAccessStorage.h similarity index 100% rename from dbms/src/Access/DiskAccessStorage.h rename to dbms/Access/DiskAccessStorage.h diff --git a/dbms/src/Access/EnabledQuota.cpp b/dbms/Access/EnabledQuota.cpp similarity index 100% rename from dbms/src/Access/EnabledQuota.cpp rename to dbms/Access/EnabledQuota.cpp diff --git a/dbms/src/Access/EnabledQuota.h b/dbms/Access/EnabledQuota.h similarity index 100% rename from dbms/src/Access/EnabledQuota.h rename to dbms/Access/EnabledQuota.h diff --git a/dbms/src/Access/EnabledRoles.cpp b/dbms/Access/EnabledRoles.cpp similarity index 100% rename from dbms/src/Access/EnabledRoles.cpp rename to dbms/Access/EnabledRoles.cpp diff --git a/dbms/src/Access/EnabledRoles.h b/dbms/Access/EnabledRoles.h similarity index 100% rename from dbms/src/Access/EnabledRoles.h rename to dbms/Access/EnabledRoles.h diff --git a/dbms/src/Access/EnabledRolesInfo.cpp b/dbms/Access/EnabledRolesInfo.cpp similarity index 100% rename from dbms/src/Access/EnabledRolesInfo.cpp rename to dbms/Access/EnabledRolesInfo.cpp diff --git a/dbms/src/Access/EnabledRolesInfo.h b/dbms/Access/EnabledRolesInfo.h similarity index 100% rename from dbms/src/Access/EnabledRolesInfo.h rename to dbms/Access/EnabledRolesInfo.h diff --git a/dbms/src/Access/EnabledRowPolicies.cpp b/dbms/Access/EnabledRowPolicies.cpp similarity index 100% rename from dbms/src/Access/EnabledRowPolicies.cpp rename to dbms/Access/EnabledRowPolicies.cpp diff --git a/dbms/src/Access/EnabledRowPolicies.h b/dbms/Access/EnabledRowPolicies.h similarity index 100% rename from dbms/src/Access/EnabledRowPolicies.h rename to dbms/Access/EnabledRowPolicies.h diff --git a/dbms/src/Access/EnabledSettings.cpp b/dbms/Access/EnabledSettings.cpp similarity index 100% rename from dbms/src/Access/EnabledSettings.cpp rename to dbms/Access/EnabledSettings.cpp diff --git a/dbms/src/Access/EnabledSettings.h b/dbms/Access/EnabledSettings.h similarity index 100% rename from dbms/src/Access/EnabledSettings.h rename to dbms/Access/EnabledSettings.h diff --git a/dbms/src/Access/ExtendedRoleSet.cpp b/dbms/Access/ExtendedRoleSet.cpp similarity index 100% rename from dbms/src/Access/ExtendedRoleSet.cpp rename to dbms/Access/ExtendedRoleSet.cpp diff --git a/dbms/src/Access/ExtendedRoleSet.h b/dbms/Access/ExtendedRoleSet.h similarity index 100% rename from dbms/src/Access/ExtendedRoleSet.h rename to dbms/Access/ExtendedRoleSet.h diff --git a/dbms/src/Access/IAccessEntity.cpp b/dbms/Access/IAccessEntity.cpp similarity index 100% rename from dbms/src/Access/IAccessEntity.cpp rename to dbms/Access/IAccessEntity.cpp diff --git a/dbms/src/Access/IAccessEntity.h b/dbms/Access/IAccessEntity.h similarity index 100% rename from dbms/src/Access/IAccessEntity.h rename to dbms/Access/IAccessEntity.h diff --git a/dbms/src/Access/IAccessStorage.cpp b/dbms/Access/IAccessStorage.cpp similarity index 100% rename from dbms/src/Access/IAccessStorage.cpp rename to dbms/Access/IAccessStorage.cpp diff --git a/dbms/src/Access/IAccessStorage.h b/dbms/Access/IAccessStorage.h similarity index 100% rename from dbms/src/Access/IAccessStorage.h rename to dbms/Access/IAccessStorage.h diff --git a/dbms/src/Access/MemoryAccessStorage.cpp b/dbms/Access/MemoryAccessStorage.cpp similarity index 100% rename from dbms/src/Access/MemoryAccessStorage.cpp rename to dbms/Access/MemoryAccessStorage.cpp diff --git a/dbms/src/Access/MemoryAccessStorage.h b/dbms/Access/MemoryAccessStorage.h similarity index 100% rename from dbms/src/Access/MemoryAccessStorage.h rename to dbms/Access/MemoryAccessStorage.h diff --git a/dbms/src/Access/MultipleAccessStorage.cpp b/dbms/Access/MultipleAccessStorage.cpp similarity index 100% rename from dbms/src/Access/MultipleAccessStorage.cpp rename to dbms/Access/MultipleAccessStorage.cpp diff --git a/dbms/src/Access/MultipleAccessStorage.h b/dbms/Access/MultipleAccessStorage.h similarity index 100% rename from dbms/src/Access/MultipleAccessStorage.h rename to dbms/Access/MultipleAccessStorage.h diff --git a/dbms/src/Access/Quota.cpp b/dbms/Access/Quota.cpp similarity index 100% rename from dbms/src/Access/Quota.cpp rename to dbms/Access/Quota.cpp diff --git a/dbms/src/Access/Quota.h b/dbms/Access/Quota.h similarity index 100% rename from dbms/src/Access/Quota.h rename to dbms/Access/Quota.h diff --git a/dbms/src/Access/QuotaCache.cpp b/dbms/Access/QuotaCache.cpp similarity index 100% rename from dbms/src/Access/QuotaCache.cpp rename to dbms/Access/QuotaCache.cpp diff --git a/dbms/src/Access/QuotaCache.h b/dbms/Access/QuotaCache.h similarity index 100% rename from dbms/src/Access/QuotaCache.h rename to dbms/Access/QuotaCache.h diff --git a/dbms/src/Access/QuotaUsageInfo.cpp b/dbms/Access/QuotaUsageInfo.cpp similarity index 100% rename from dbms/src/Access/QuotaUsageInfo.cpp rename to dbms/Access/QuotaUsageInfo.cpp diff --git a/dbms/src/Access/QuotaUsageInfo.h b/dbms/Access/QuotaUsageInfo.h similarity index 100% rename from dbms/src/Access/QuotaUsageInfo.h rename to dbms/Access/QuotaUsageInfo.h diff --git a/dbms/src/Access/Role.cpp b/dbms/Access/Role.cpp similarity index 100% rename from dbms/src/Access/Role.cpp rename to dbms/Access/Role.cpp diff --git a/dbms/src/Access/Role.h b/dbms/Access/Role.h similarity index 100% rename from dbms/src/Access/Role.h rename to dbms/Access/Role.h diff --git a/dbms/src/Access/RoleCache.cpp b/dbms/Access/RoleCache.cpp similarity index 100% rename from dbms/src/Access/RoleCache.cpp rename to dbms/Access/RoleCache.cpp diff --git a/dbms/src/Access/RoleCache.h b/dbms/Access/RoleCache.h similarity index 100% rename from dbms/src/Access/RoleCache.h rename to dbms/Access/RoleCache.h diff --git a/dbms/src/Access/RowPolicy.cpp b/dbms/Access/RowPolicy.cpp similarity index 100% rename from dbms/src/Access/RowPolicy.cpp rename to dbms/Access/RowPolicy.cpp diff --git a/dbms/src/Access/RowPolicy.h b/dbms/Access/RowPolicy.h similarity index 100% rename from dbms/src/Access/RowPolicy.h rename to dbms/Access/RowPolicy.h diff --git a/dbms/src/Access/RowPolicyCache.cpp b/dbms/Access/RowPolicyCache.cpp similarity index 100% rename from dbms/src/Access/RowPolicyCache.cpp rename to dbms/Access/RowPolicyCache.cpp diff --git a/dbms/src/Access/RowPolicyCache.h b/dbms/Access/RowPolicyCache.h similarity index 100% rename from dbms/src/Access/RowPolicyCache.h rename to dbms/Access/RowPolicyCache.h diff --git a/dbms/src/Access/SettingsConstraints.cpp b/dbms/Access/SettingsConstraints.cpp similarity index 100% rename from dbms/src/Access/SettingsConstraints.cpp rename to dbms/Access/SettingsConstraints.cpp diff --git a/dbms/src/Access/SettingsConstraints.h b/dbms/Access/SettingsConstraints.h similarity index 100% rename from dbms/src/Access/SettingsConstraints.h rename to dbms/Access/SettingsConstraints.h diff --git a/dbms/src/Access/SettingsProfile.cpp b/dbms/Access/SettingsProfile.cpp similarity index 100% rename from dbms/src/Access/SettingsProfile.cpp rename to dbms/Access/SettingsProfile.cpp diff --git a/dbms/src/Access/SettingsProfile.h b/dbms/Access/SettingsProfile.h similarity index 100% rename from dbms/src/Access/SettingsProfile.h rename to dbms/Access/SettingsProfile.h diff --git a/dbms/src/Access/SettingsProfileElement.cpp b/dbms/Access/SettingsProfileElement.cpp similarity index 100% rename from dbms/src/Access/SettingsProfileElement.cpp rename to dbms/Access/SettingsProfileElement.cpp diff --git a/dbms/src/Access/SettingsProfileElement.h b/dbms/Access/SettingsProfileElement.h similarity index 100% rename from dbms/src/Access/SettingsProfileElement.h rename to dbms/Access/SettingsProfileElement.h diff --git a/dbms/src/Access/SettingsProfilesCache.cpp b/dbms/Access/SettingsProfilesCache.cpp similarity index 100% rename from dbms/src/Access/SettingsProfilesCache.cpp rename to dbms/Access/SettingsProfilesCache.cpp diff --git a/dbms/src/Access/SettingsProfilesCache.h b/dbms/Access/SettingsProfilesCache.h similarity index 100% rename from dbms/src/Access/SettingsProfilesCache.h rename to dbms/Access/SettingsProfilesCache.h diff --git a/dbms/src/Access/User.cpp b/dbms/Access/User.cpp similarity index 100% rename from dbms/src/Access/User.cpp rename to dbms/Access/User.cpp diff --git a/dbms/src/Access/User.h b/dbms/Access/User.h similarity index 100% rename from dbms/src/Access/User.h rename to dbms/Access/User.h diff --git a/dbms/src/Access/UsersConfigAccessStorage.cpp b/dbms/Access/UsersConfigAccessStorage.cpp similarity index 100% rename from dbms/src/Access/UsersConfigAccessStorage.cpp rename to dbms/Access/UsersConfigAccessStorage.cpp diff --git a/dbms/src/Access/UsersConfigAccessStorage.h b/dbms/Access/UsersConfigAccessStorage.h similarity index 100% rename from dbms/src/Access/UsersConfigAccessStorage.h rename to dbms/Access/UsersConfigAccessStorage.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAggThrow.cpp b/dbms/AggregateFunctions/AggregateFunctionAggThrow.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAggThrow.cpp rename to dbms/AggregateFunctions/AggregateFunctionAggThrow.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/dbms/AggregateFunctions/AggregateFunctionArgMinMax.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h rename to dbms/AggregateFunctions/AggregateFunctionArgMinMax.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp b/dbms/AggregateFunctions/AggregateFunctionArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArray.cpp rename to dbms/AggregateFunctions/AggregateFunctionArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.h b/dbms/AggregateFunctions/AggregateFunctionArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionArray.h rename to dbms/AggregateFunctions/AggregateFunctionArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp b/dbms/AggregateFunctions/AggregateFunctionAvg.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvg.cpp rename to dbms/AggregateFunctions/AggregateFunctionAvg.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvg.h b/dbms/AggregateFunctions/AggregateFunctionAvg.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvg.h rename to dbms/AggregateFunctions/AggregateFunctionAvg.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp b/dbms/AggregateFunctions/AggregateFunctionAvgWeighted.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp rename to dbms/AggregateFunctions/AggregateFunctionAvgWeighted.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.h b/dbms/AggregateFunctions/AggregateFunctionAvgWeighted.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionAvgWeighted.h rename to dbms/AggregateFunctions/AggregateFunctionAvgWeighted.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp b/dbms/AggregateFunctions/AggregateFunctionBitwise.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp rename to dbms/AggregateFunctions/AggregateFunctionBitwise.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.h b/dbms/AggregateFunctions/AggregateFunctionBitwise.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBitwise.h rename to dbms/AggregateFunctions/AggregateFunctionBitwise.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp b/dbms/AggregateFunctions/AggregateFunctionBoundingRatio.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp rename to dbms/AggregateFunctions/AggregateFunctionBoundingRatio.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.h b/dbms/AggregateFunctions/AggregateFunctionBoundingRatio.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionBoundingRatio.h rename to dbms/AggregateFunctions/AggregateFunctionBoundingRatio.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp b/dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp rename to dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h b/dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h rename to dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp b/dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp rename to dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h rename to dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCount.cpp b/dbms/AggregateFunctions/AggregateFunctionCount.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCount.cpp rename to dbms/AggregateFunctions/AggregateFunctionCount.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCount.h b/dbms/AggregateFunctions/AggregateFunctionCount.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionCount.h rename to dbms/AggregateFunctions/AggregateFunctionCount.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionEntropy.cpp b/dbms/AggregateFunctions/AggregateFunctionEntropy.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionEntropy.cpp rename to dbms/AggregateFunctions/AggregateFunctionEntropy.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionEntropy.h b/dbms/AggregateFunctions/AggregateFunctionEntropy.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionEntropy.h rename to dbms/AggregateFunctions/AggregateFunctionEntropy.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp b/dbms/AggregateFunctions/AggregateFunctionFactory.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp rename to dbms/AggregateFunctions/AggregateFunctionFactory.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h b/dbms/AggregateFunctions/AggregateFunctionFactory.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionFactory.h rename to dbms/AggregateFunctions/AggregateFunctionFactory.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionForEach.cpp b/dbms/AggregateFunctions/AggregateFunctionForEach.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionForEach.cpp rename to dbms/AggregateFunctions/AggregateFunctionForEach.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionForEach.h b/dbms/AggregateFunctions/AggregateFunctionForEach.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionForEach.h rename to dbms/AggregateFunctions/AggregateFunctionForEach.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/dbms/AggregateFunctions/AggregateFunctionGroupArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArray.cpp rename to dbms/AggregateFunctions/AggregateFunctionGroupArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h b/dbms/AggregateFunctions/AggregateFunctionGroupArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h rename to dbms/AggregateFunctions/AggregateFunctionGroupArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp b/dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp rename to dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h b/dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h rename to dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp rename to dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h b/dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h rename to dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp b/dbms/AggregateFunctions/AggregateFunctionGroupBitmap.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp rename to dbms/AggregateFunctions/AggregateFunctionGroupBitmap.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.h b/dbms/AggregateFunctions/AggregateFunctionGroupBitmap.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmap.h rename to dbms/AggregateFunctions/AggregateFunctionGroupBitmap.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/dbms/AggregateFunctions/AggregateFunctionGroupBitmapData.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h rename to dbms/AggregateFunctions/AggregateFunctionGroupBitmapData.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp rename to dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h rename to dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp b/dbms/AggregateFunctions/AggregateFunctionHistogram.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionHistogram.cpp rename to dbms/AggregateFunctions/AggregateFunctionHistogram.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h b/dbms/AggregateFunctions/AggregateFunctionHistogram.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionHistogram.h rename to dbms/AggregateFunctions/AggregateFunctionHistogram.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionIf.cpp b/dbms/AggregateFunctions/AggregateFunctionIf.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionIf.cpp rename to dbms/AggregateFunctions/AggregateFunctionIf.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionIf.h b/dbms/AggregateFunctions/AggregateFunctionIf.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionIf.h rename to dbms/AggregateFunctions/AggregateFunctionIf.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp b/dbms/AggregateFunctions/AggregateFunctionMLMethod.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp rename to dbms/AggregateFunctions/AggregateFunctionMLMethod.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h b/dbms/AggregateFunctions/AggregateFunctionMLMethod.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h rename to dbms/AggregateFunctions/AggregateFunctionMLMethod.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp b/dbms/AggregateFunctions/AggregateFunctionMaxIntersections.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp rename to dbms/AggregateFunctions/AggregateFunctionMaxIntersections.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h b/dbms/AggregateFunctions/AggregateFunctionMaxIntersections.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMaxIntersections.h rename to dbms/AggregateFunctions/AggregateFunctionMaxIntersections.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMerge.cpp b/dbms/AggregateFunctions/AggregateFunctionMerge.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMerge.cpp rename to dbms/AggregateFunctions/AggregateFunctionMerge.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMerge.h b/dbms/AggregateFunctions/AggregateFunctionMerge.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMerge.h rename to dbms/AggregateFunctions/AggregateFunctionMerge.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp b/dbms/AggregateFunctions/AggregateFunctionMinMaxAny.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp rename to dbms/AggregateFunctions/AggregateFunctionMinMaxAny.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/dbms/AggregateFunctions/AggregateFunctionMinMaxAny.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h rename to dbms/AggregateFunctions/AggregateFunctionMinMaxAny.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNothing.h b/dbms/AggregateFunctions/AggregateFunctionNothing.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNothing.h rename to dbms/AggregateFunctions/AggregateFunctionNothing.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp b/dbms/AggregateFunctions/AggregateFunctionNull.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNull.cpp rename to dbms/AggregateFunctions/AggregateFunctionNull.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.h b/dbms/AggregateFunctions/AggregateFunctionNull.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionNull.h rename to dbms/AggregateFunctions/AggregateFunctionNull.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionOrFill.cpp b/dbms/AggregateFunctions/AggregateFunctionOrFill.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionOrFill.cpp rename to dbms/AggregateFunctions/AggregateFunctionOrFill.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionOrFill.h b/dbms/AggregateFunctions/AggregateFunctionOrFill.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionOrFill.h rename to dbms/AggregateFunctions/AggregateFunctionOrFill.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/dbms/AggregateFunctions/AggregateFunctionQuantile.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp rename to dbms/AggregateFunctions/AggregateFunctionQuantile.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h b/dbms/AggregateFunctions/AggregateFunctionQuantile.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionQuantile.h rename to dbms/AggregateFunctions/AggregateFunctionQuantile.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.cpp b/dbms/AggregateFunctions/AggregateFunctionResample.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionResample.cpp rename to dbms/AggregateFunctions/AggregateFunctionResample.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.h b/dbms/AggregateFunctions/AggregateFunctionResample.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionResample.h rename to dbms/AggregateFunctions/AggregateFunctionResample.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionRetention.cpp b/dbms/AggregateFunctions/AggregateFunctionRetention.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionRetention.cpp rename to dbms/AggregateFunctions/AggregateFunctionRetention.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionRetention.h b/dbms/AggregateFunctions/AggregateFunctionRetention.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionRetention.h rename to dbms/AggregateFunctions/AggregateFunctionRetention.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp b/dbms/AggregateFunctions/AggregateFunctionSequenceMatch.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp rename to dbms/AggregateFunctions/AggregateFunctionSequenceMatch.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/dbms/AggregateFunctions/AggregateFunctionSequenceMatch.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h rename to dbms/AggregateFunctions/AggregateFunctionSequenceMatch.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp b/dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp rename to dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h b/dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h rename to dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.cpp b/dbms/AggregateFunctions/AggregateFunctionState.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionState.cpp rename to dbms/AggregateFunctions/AggregateFunctionState.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.h b/dbms/AggregateFunctions/AggregateFunctionState.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionState.h rename to dbms/AggregateFunctions/AggregateFunctionState.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatistics.cpp b/dbms/AggregateFunctions/AggregateFunctionStatistics.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatistics.cpp rename to dbms/AggregateFunctions/AggregateFunctionStatistics.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatistics.h b/dbms/AggregateFunctions/AggregateFunctionStatistics.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatistics.h rename to dbms/AggregateFunctions/AggregateFunctionStatistics.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp b/dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp rename to dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h b/dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h rename to dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSum.cpp b/dbms/AggregateFunctions/AggregateFunctionSum.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSum.cpp rename to dbms/AggregateFunctions/AggregateFunctionSum.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSum.h b/dbms/AggregateFunctions/AggregateFunctionSum.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSum.h rename to dbms/AggregateFunctions/AggregateFunctionSum.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/dbms/AggregateFunctions/AggregateFunctionSumMap.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSumMap.cpp rename to dbms/AggregateFunctions/AggregateFunctionSumMap.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h b/dbms/AggregateFunctions/AggregateFunctionSumMap.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionSumMap.h rename to dbms/AggregateFunctions/AggregateFunctionSumMap.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp b/dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp rename to dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h b/dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h rename to dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.cpp b/dbms/AggregateFunctions/AggregateFunctionTopK.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTopK.cpp rename to dbms/AggregateFunctions/AggregateFunctionTopK.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h b/dbms/AggregateFunctions/AggregateFunctionTopK.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionTopK.h rename to dbms/AggregateFunctions/AggregateFunctionTopK.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp b/dbms/AggregateFunctions/AggregateFunctionUniq.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniq.cpp rename to dbms/AggregateFunctions/AggregateFunctionUniq.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniq.h b/dbms/AggregateFunctions/AggregateFunctionUniq.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniq.h rename to dbms/AggregateFunctions/AggregateFunctionUniq.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/dbms/AggregateFunctions/AggregateFunctionUniqCombined.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp rename to dbms/AggregateFunctions/AggregateFunctionUniqCombined.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/dbms/AggregateFunctions/AggregateFunctionUniqCombined.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqCombined.h rename to dbms/AggregateFunctions/AggregateFunctionUniqCombined.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp b/dbms/AggregateFunctions/AggregateFunctionUniqUpTo.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp rename to dbms/AggregateFunctions/AggregateFunctionUniqUpTo.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h b/dbms/AggregateFunctions/AggregateFunctionUniqUpTo.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h rename to dbms/AggregateFunctions/AggregateFunctionUniqUpTo.h diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp b/dbms/AggregateFunctions/AggregateFunctionWindowFunnel.cpp similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp rename to dbms/AggregateFunctions/AggregateFunctionWindowFunnel.cpp diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/dbms/AggregateFunctions/AggregateFunctionWindowFunnel.h similarity index 100% rename from dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h rename to dbms/AggregateFunctions/AggregateFunctionWindowFunnel.h diff --git a/dbms/src/AggregateFunctions/CMakeLists.txt b/dbms/AggregateFunctions/CMakeLists.txt similarity index 100% rename from dbms/src/AggregateFunctions/CMakeLists.txt rename to dbms/AggregateFunctions/CMakeLists.txt diff --git a/dbms/src/AggregateFunctions/FactoryHelpers.h b/dbms/AggregateFunctions/FactoryHelpers.h similarity index 100% rename from dbms/src/AggregateFunctions/FactoryHelpers.h rename to dbms/AggregateFunctions/FactoryHelpers.h diff --git a/dbms/src/AggregateFunctions/Helpers.h b/dbms/AggregateFunctions/Helpers.h similarity index 100% rename from dbms/src/AggregateFunctions/Helpers.h rename to dbms/AggregateFunctions/Helpers.h diff --git a/dbms/src/AggregateFunctions/HelpersMinMaxAny.h b/dbms/AggregateFunctions/HelpersMinMaxAny.h similarity index 100% rename from dbms/src/AggregateFunctions/HelpersMinMaxAny.h rename to dbms/AggregateFunctions/HelpersMinMaxAny.h diff --git a/dbms/src/AggregateFunctions/IAggregateFunction.h b/dbms/AggregateFunctions/IAggregateFunction.h similarity index 100% rename from dbms/src/AggregateFunctions/IAggregateFunction.h rename to dbms/AggregateFunctions/IAggregateFunction.h diff --git a/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h b/dbms/AggregateFunctions/IAggregateFunctionCombinator.h similarity index 100% rename from dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h rename to dbms/AggregateFunctions/IAggregateFunctionCombinator.h diff --git a/dbms/src/AggregateFunctions/QuantileExact.h b/dbms/AggregateFunctions/QuantileExact.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileExact.h rename to dbms/AggregateFunctions/QuantileExact.h diff --git a/dbms/src/AggregateFunctions/QuantileExactWeighted.h b/dbms/AggregateFunctions/QuantileExactWeighted.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileExactWeighted.h rename to dbms/AggregateFunctions/QuantileExactWeighted.h diff --git a/dbms/src/AggregateFunctions/QuantileReservoirSampler.h b/dbms/AggregateFunctions/QuantileReservoirSampler.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileReservoirSampler.h rename to dbms/AggregateFunctions/QuantileReservoirSampler.h diff --git a/dbms/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h b/dbms/AggregateFunctions/QuantileReservoirSamplerDeterministic.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h rename to dbms/AggregateFunctions/QuantileReservoirSamplerDeterministic.h diff --git a/dbms/src/AggregateFunctions/QuantileTDigest.h b/dbms/AggregateFunctions/QuantileTDigest.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileTDigest.h rename to dbms/AggregateFunctions/QuantileTDigest.h diff --git a/dbms/src/AggregateFunctions/QuantileTiming.h b/dbms/AggregateFunctions/QuantileTiming.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantileTiming.h rename to dbms/AggregateFunctions/QuantileTiming.h diff --git a/dbms/src/AggregateFunctions/QuantilesCommon.h b/dbms/AggregateFunctions/QuantilesCommon.h similarity index 100% rename from dbms/src/AggregateFunctions/QuantilesCommon.h rename to dbms/AggregateFunctions/QuantilesCommon.h diff --git a/dbms/src/AggregateFunctions/ReservoirSampler.h b/dbms/AggregateFunctions/ReservoirSampler.h similarity index 100% rename from dbms/src/AggregateFunctions/ReservoirSampler.h rename to dbms/AggregateFunctions/ReservoirSampler.h diff --git a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/dbms/AggregateFunctions/ReservoirSamplerDeterministic.h similarity index 100% rename from dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h rename to dbms/AggregateFunctions/ReservoirSamplerDeterministic.h diff --git a/dbms/src/AggregateFunctions/UniqCombinedBiasData.cpp b/dbms/AggregateFunctions/UniqCombinedBiasData.cpp similarity index 100% rename from dbms/src/AggregateFunctions/UniqCombinedBiasData.cpp rename to dbms/AggregateFunctions/UniqCombinedBiasData.cpp diff --git a/dbms/src/AggregateFunctions/UniqCombinedBiasData.h b/dbms/AggregateFunctions/UniqCombinedBiasData.h similarity index 100% rename from dbms/src/AggregateFunctions/UniqCombinedBiasData.h rename to dbms/AggregateFunctions/UniqCombinedBiasData.h diff --git a/dbms/src/AggregateFunctions/UniqVariadicHash.cpp b/dbms/AggregateFunctions/UniqVariadicHash.cpp similarity index 100% rename from dbms/src/AggregateFunctions/UniqVariadicHash.cpp rename to dbms/AggregateFunctions/UniqVariadicHash.cpp diff --git a/dbms/src/AggregateFunctions/UniqVariadicHash.h b/dbms/AggregateFunctions/UniqVariadicHash.h similarity index 100% rename from dbms/src/AggregateFunctions/UniqVariadicHash.h rename to dbms/AggregateFunctions/UniqVariadicHash.h diff --git a/dbms/src/AggregateFunctions/UniquesHashSet.h b/dbms/AggregateFunctions/UniquesHashSet.h similarity index 100% rename from dbms/src/AggregateFunctions/UniquesHashSet.h rename to dbms/AggregateFunctions/UniquesHashSet.h diff --git a/dbms/src/AggregateFunctions/parseAggregateFunctionParameters.cpp b/dbms/AggregateFunctions/parseAggregateFunctionParameters.cpp similarity index 100% rename from dbms/src/AggregateFunctions/parseAggregateFunctionParameters.cpp rename to dbms/AggregateFunctions/parseAggregateFunctionParameters.cpp diff --git a/dbms/src/AggregateFunctions/parseAggregateFunctionParameters.h b/dbms/AggregateFunctions/parseAggregateFunctionParameters.h similarity index 100% rename from dbms/src/AggregateFunctions/parseAggregateFunctionParameters.h rename to dbms/AggregateFunctions/parseAggregateFunctionParameters.h diff --git a/dbms/src/AggregateFunctions/registerAggregateFunctions.cpp b/dbms/AggregateFunctions/registerAggregateFunctions.cpp similarity index 100% rename from dbms/src/AggregateFunctions/registerAggregateFunctions.cpp rename to dbms/AggregateFunctions/registerAggregateFunctions.cpp diff --git a/dbms/src/AggregateFunctions/registerAggregateFunctions.h b/dbms/AggregateFunctions/registerAggregateFunctions.h similarity index 100% rename from dbms/src/AggregateFunctions/registerAggregateFunctions.h rename to dbms/AggregateFunctions/registerAggregateFunctions.h diff --git a/dbms/src/AggregateFunctions/tests/CMakeLists.txt b/dbms/AggregateFunctions/tests/CMakeLists.txt similarity index 100% rename from dbms/src/AggregateFunctions/tests/CMakeLists.txt rename to dbms/AggregateFunctions/tests/CMakeLists.txt diff --git a/dbms/src/AggregateFunctions/tests/quantile-t-digest.cpp b/dbms/AggregateFunctions/tests/quantile-t-digest.cpp similarity index 100% rename from dbms/src/AggregateFunctions/tests/quantile-t-digest.cpp rename to dbms/AggregateFunctions/tests/quantile-t-digest.cpp diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 81cb5afbc43..1c891353aaa 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -1,6 +1,3 @@ -set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") -include_directories(${ConfigIncludePath}) - if (USE_INCLUDE_WHAT_YOU_USE) set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) endif () @@ -21,21 +18,21 @@ else() endif() include(../cmake/limit_jobs.cmake) -set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config_version.h) -set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config.h) +set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/Common/config_version.h) +set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/Common/config.h) include (cmake/version.cmake) message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") -configure_file (src/Common/config.h.in ${CONFIG_COMMON}) -configure_file (src/Common/config_version.h.in ${CONFIG_VERSION}) -configure_file (src/Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include/config_core.h) +configure_file (Common/config.h.in ${CONFIG_COMMON}) +configure_file (Common/config_version.h.in ${CONFIG_VERSION}) +configure_file (Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/Core/include/config_core.h) if (NOT MSVC) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") endif () if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Core/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/Core/iostream_debug_helpers.h") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () @@ -199,71 +196,90 @@ if (COMPILER_GCC) # (gdb) bt #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 - #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/src/Common/memcpySmall.h:37 + #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/Common/memcpySmall.h:37 add_definitions ("-fno-tree-loop-distribute-patterns") endif () -add_subdirectory (src) +add_subdirectory (Access) +add_subdirectory (Columns) +add_subdirectory (Common) +add_subdirectory (Core) +add_subdirectory (DataStreams) +add_subdirectory (DataTypes) +add_subdirectory (Dictionaries) +add_subdirectory (Disks) +add_subdirectory (Storages) +add_subdirectory (Parsers) +add_subdirectory (IO) +add_subdirectory (Functions) +add_subdirectory (Interpreters) +add_subdirectory (AggregateFunctions) +add_subdirectory (Client) +add_subdirectory (TableFunctions) +add_subdirectory (Processors) +add_subdirectory (Formats) +add_subdirectory (Compression) + set(dbms_headers) set(dbms_sources) -add_headers_and_sources(clickhouse_common_io src/Common) -add_headers_and_sources(clickhouse_common_io src/Common/HashTable) -add_headers_and_sources(clickhouse_common_io src/IO) -list (REMOVE_ITEM clickhouse_common_io_sources src/Common/malloc.cpp src/Common/new_delete.cpp) +add_headers_and_sources(clickhouse_common_io Common) +add_headers_and_sources(clickhouse_common_io Common/HashTable) +add_headers_and_sources(clickhouse_common_io IO) +list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp) if(USE_RDKAFKA) - add_headers_and_sources(dbms src/Storages/Kafka) + add_headers_and_sources(dbms Storages/Kafka) endif() list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) -list (APPEND dbms_sources src/Functions/IFunction.cpp src/Functions/FunctionFactory.cpp src/Functions/FunctionHelpers.cpp src/Functions/extractTimeZoneFromFunctionArguments.cpp) -list (APPEND dbms_headers src/Functions/IFunctionImpl.h src/Functions/FunctionFactory.h src/Functions/FunctionHelpers.h src/Functions/extractTimeZoneFromFunctionArguments.h) +list (APPEND dbms_sources Functions/IFunction.cpp Functions/FunctionFactory.cpp Functions/FunctionHelpers.cpp Functions/extractTimeZoneFromFunctionArguments.cpp) +list (APPEND dbms_headers Functions/IFunctionImpl.h Functions/FunctionFactory.h Functions/FunctionHelpers.h Functions/extractTimeZoneFromFunctionArguments.h) list (APPEND dbms_sources - src/AggregateFunctions/AggregateFunctionFactory.cpp - src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp - src/AggregateFunctions/AggregateFunctionState.cpp - src/AggregateFunctions/parseAggregateFunctionParameters.cpp) + AggregateFunctions/AggregateFunctionFactory.cpp + AggregateFunctions/AggregateFunctionCombinatorFactory.cpp + AggregateFunctions/AggregateFunctionState.cpp + AggregateFunctions/parseAggregateFunctionParameters.cpp) list (APPEND dbms_headers - src/AggregateFunctions/IAggregateFunction.h - src/AggregateFunctions/IAggregateFunctionCombinator.h - src/AggregateFunctions/AggregateFunctionFactory.h - src/AggregateFunctions/AggregateFunctionCombinatorFactory.h - src/AggregateFunctions/AggregateFunctionState.h - src/AggregateFunctions/FactoryHelpers.h - src/AggregateFunctions/parseAggregateFunctionParameters.h) + AggregateFunctions/IAggregateFunction.h + AggregateFunctions/IAggregateFunctionCombinator.h + AggregateFunctions/AggregateFunctionFactory.h + AggregateFunctions/AggregateFunctionCombinatorFactory.h + AggregateFunctions/AggregateFunctionState.h + AggregateFunctions/FactoryHelpers.h + AggregateFunctions/parseAggregateFunctionParameters.h) -list (APPEND dbms_sources src/TableFunctions/ITableFunction.cpp src/TableFunctions/TableFunctionFactory.cpp) -list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions/TableFunctionFactory.h) -list (APPEND dbms_sources src/Dictionaries/DictionaryFactory.cpp src/Dictionaries/DictionarySourceFactory.cpp src/Dictionaries/DictionaryStructure.cpp src/Dictionaries/getDictionaryConfigurationFromAST.cpp) -list (APPEND dbms_headers src/Dictionaries/DictionaryFactory.h src/Dictionaries/DictionarySourceFactory.h src/Dictionaries/DictionaryStructure.h src/Dictionaries/getDictionaryConfigurationFromAST.h) +list (APPEND dbms_sources TableFunctions/ITableFunction.cpp TableFunctions/TableFunctionFactory.cpp) +list (APPEND dbms_headers TableFunctions/ITableFunction.h TableFunctions/TableFunctionFactory.h) +list (APPEND dbms_sources Dictionaries/DictionaryFactory.cpp Dictionaries/DictionarySourceFactory.cpp Dictionaries/DictionaryStructure.cpp Dictionaries/getDictionaryConfigurationFromAST.cpp) +list (APPEND dbms_headers Dictionaries/DictionaryFactory.h Dictionaries/DictionarySourceFactory.h Dictionaries/DictionaryStructure.h Dictionaries/getDictionaryConfigurationFromAST.h) if (NOT ENABLE_SSL) - list (REMOVE_ITEM clickhouse_common_io_sources src/Common/OpenSSLHelpers.cpp) - list (REMOVE_ITEM clickhouse_common_io_headers src/Common/OpenSSLHelpers.h) + list (REMOVE_ITEM clickhouse_common_io_sources Common/OpenSSLHelpers.cpp) + list (REMOVE_ITEM clickhouse_common_io_headers Common/OpenSSLHelpers.h) endif () add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) -add_library (clickhouse_malloc OBJECT src/Common/malloc.cpp) -set_source_files_properties(src/Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") +add_library (clickhouse_malloc OBJECT Common/malloc.cpp) +set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") -add_library (clickhouse_new_delete STATIC src/Common/new_delete.cpp) +add_library (clickhouse_new_delete STATIC Common/new_delete.cpp) target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc) if (OS_FREEBSD) target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) endif () -add_subdirectory(src/Common/ZooKeeper) -add_subdirectory(src/Common/Config) +add_subdirectory(Common/ZooKeeper) +add_subdirectory(Common/Config) set (all_modules) macro(add_object_library name common_path) @@ -277,28 +293,28 @@ macro(add_object_library name common_path) endif () endmacro() -add_object_library(clickhouse_access src/Access) -add_object_library(clickhouse_core src/Core) -add_object_library(clickhouse_compression src/Compression) -add_object_library(clickhouse_datastreams src/DataStreams) -add_object_library(clickhouse_datatypes src/DataTypes) -add_object_library(clickhouse_databases src/Databases) -add_object_library(clickhouse_disks src/Disks) -add_object_library(clickhouse_interpreters src/Interpreters) -add_object_library(clickhouse_interpreters_clusterproxy src/Interpreters/ClusterProxy) -add_object_library(clickhouse_columns src/Columns) -add_object_library(clickhouse_storages src/Storages) -add_object_library(clickhouse_storages_distributed src/Storages/Distributed) -add_object_library(clickhouse_storages_mergetree src/Storages/MergeTree) -add_object_library(clickhouse_storages_liveview src/Storages/LiveView) -add_object_library(clickhouse_client src/Client) -add_object_library(clickhouse_formats src/Formats) -add_object_library(clickhouse_processors src/Processors) -add_object_library(clickhouse_processors_executors src/Processors/Executors) -add_object_library(clickhouse_processors_formats src/Processors/Formats) -add_object_library(clickhouse_processors_formats_impl src/Processors/Formats/Impl) -add_object_library(clickhouse_processors_transforms src/Processors/Transforms) -add_object_library(clickhouse_processors_sources src/Processors/Sources) +add_object_library(clickhouse_access Access) +add_object_library(clickhouse_core Core) +add_object_library(clickhouse_compression Compression) +add_object_library(clickhouse_datastreams DataStreams) +add_object_library(clickhouse_datatypes DataTypes) +add_object_library(clickhouse_databases Databases) +add_object_library(clickhouse_disks Disks) +add_object_library(clickhouse_interpreters Interpreters) +add_object_library(clickhouse_interpreters_clusterproxy Interpreters/ClusterProxy) +add_object_library(clickhouse_columns Columns) +add_object_library(clickhouse_storages Storages) +add_object_library(clickhouse_storages_distributed Storages/Distributed) +add_object_library(clickhouse_storages_mergetree Storages/MergeTree) +add_object_library(clickhouse_storages_liveview Storages/LiveView) +add_object_library(clickhouse_client Client) +add_object_library(clickhouse_formats Formats) +add_object_library(clickhouse_processors Processors) +add_object_library(clickhouse_processors_executors Processors/Executors) +add_object_library(clickhouse_processors_formats Processors/Formats) +add_object_library(clickhouse_processors_formats_impl Processors/Formats/Impl) +add_object_library(clickhouse_processors_transforms Processors/Transforms) +add_object_library(clickhouse_processors_sources Processors/Sources) if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) @@ -334,29 +350,29 @@ endif () if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") # Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size. set_source_files_properties( - src/Dictionaries/FlatDictionary.cpp - src/Dictionaries/HashedDictionary.cpp - src/Dictionaries/CacheDictionary.cpp - src/Dictionaries/TrieDictionary.cpp - src/Dictionaries/RangeHashedDictionary.cpp - src/Dictionaries/ComplexKeyHashedDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp - src/Dictionaries/ODBCBlockInputStream.cpp - src/Dictionaries/HTTPDictionarySource.cpp - src/Dictionaries/LibraryDictionarySource.cpp - src/Dictionaries/ExecutableDictionarySource.cpp - src/Dictionaries/ClickHouseDictionarySource.cpp + Dictionaries/FlatDictionary.cpp + Dictionaries/HashedDictionary.cpp + Dictionaries/CacheDictionary.cpp + Dictionaries/TrieDictionary.cpp + Dictionaries/RangeHashedDictionary.cpp + Dictionaries/ComplexKeyHashedDictionary.cpp + Dictionaries/ComplexKeyCacheDictionary.cpp + Dictionaries/ComplexKeyCacheDictionary_generate1.cpp + Dictionaries/ComplexKeyCacheDictionary_generate2.cpp + Dictionaries/ComplexKeyCacheDictionary_generate3.cpp + Dictionaries/ODBCBlockInputStream.cpp + Dictionaries/HTTPDictionarySource.cpp + Dictionaries/LibraryDictionarySource.cpp + Dictionaries/ExecutableDictionarySource.cpp + Dictionaries/ClickHouseDictionarySource.cpp PROPERTIES COMPILE_FLAGS -g0) endif () # Otherwise it will slow down stack traces printing too much. set_source_files_properties( - src/Common/Elf.cpp - src/Common/Dwarf.cpp - src/Common/SymbolIndex.cpp + Common/Elf.cpp + Common/Dwarf.cpp + Common/SymbolIndex.cpp PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}") target_link_libraries (clickhouse_common_io @@ -438,8 +454,8 @@ dbms_target_link_libraries ( ${Boost_SYSTEM_LIBRARY} ) -target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) # uses some includes from core -dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) +target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include) # uses some includes from core +dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/Core/include) target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) @@ -562,9 +578,6 @@ target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) -add_subdirectory (programs) -add_subdirectory (tests) - if (ENABLE_TESTS AND USE_GTEST) macro (grep_gtest_sources BASE_DIR DST_VAR) # Cold match files that are not in tests/ directories diff --git a/dbms/src/Client/CMakeLists.txt b/dbms/Client/CMakeLists.txt similarity index 100% rename from dbms/src/Client/CMakeLists.txt rename to dbms/Client/CMakeLists.txt diff --git a/dbms/src/Client/Connection.cpp b/dbms/Client/Connection.cpp similarity index 100% rename from dbms/src/Client/Connection.cpp rename to dbms/Client/Connection.cpp diff --git a/dbms/src/Client/Connection.h b/dbms/Client/Connection.h similarity index 100% rename from dbms/src/Client/Connection.h rename to dbms/Client/Connection.h diff --git a/dbms/src/Client/ConnectionPool.h b/dbms/Client/ConnectionPool.h similarity index 100% rename from dbms/src/Client/ConnectionPool.h rename to dbms/Client/ConnectionPool.h diff --git a/dbms/src/Client/ConnectionPoolWithFailover.cpp b/dbms/Client/ConnectionPoolWithFailover.cpp similarity index 100% rename from dbms/src/Client/ConnectionPoolWithFailover.cpp rename to dbms/Client/ConnectionPoolWithFailover.cpp diff --git a/dbms/src/Client/ConnectionPoolWithFailover.h b/dbms/Client/ConnectionPoolWithFailover.h similarity index 100% rename from dbms/src/Client/ConnectionPoolWithFailover.h rename to dbms/Client/ConnectionPoolWithFailover.h diff --git a/dbms/src/Client/MultiplexedConnections.cpp b/dbms/Client/MultiplexedConnections.cpp similarity index 100% rename from dbms/src/Client/MultiplexedConnections.cpp rename to dbms/Client/MultiplexedConnections.cpp diff --git a/dbms/src/Client/MultiplexedConnections.h b/dbms/Client/MultiplexedConnections.h similarity index 100% rename from dbms/src/Client/MultiplexedConnections.h rename to dbms/Client/MultiplexedConnections.h diff --git a/dbms/src/Client/TimeoutSetter.cpp b/dbms/Client/TimeoutSetter.cpp similarity index 100% rename from dbms/src/Client/TimeoutSetter.cpp rename to dbms/Client/TimeoutSetter.cpp diff --git a/dbms/src/Client/TimeoutSetter.h b/dbms/Client/TimeoutSetter.h similarity index 100% rename from dbms/src/Client/TimeoutSetter.h rename to dbms/Client/TimeoutSetter.h diff --git a/dbms/src/Client/tests/CMakeLists.txt b/dbms/Client/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Client/tests/CMakeLists.txt rename to dbms/Client/tests/CMakeLists.txt diff --git a/dbms/src/Client/tests/test_connect.cpp b/dbms/Client/tests/test_connect.cpp similarity index 100% rename from dbms/src/Client/tests/test_connect.cpp rename to dbms/Client/tests/test_connect.cpp diff --git a/dbms/src/Columns/CMakeLists.txt b/dbms/Columns/CMakeLists.txt similarity index 100% rename from dbms/src/Columns/CMakeLists.txt rename to dbms/Columns/CMakeLists.txt diff --git a/dbms/src/Columns/Collator.cpp b/dbms/Columns/Collator.cpp similarity index 100% rename from dbms/src/Columns/Collator.cpp rename to dbms/Columns/Collator.cpp diff --git a/dbms/src/Columns/Collator.h b/dbms/Columns/Collator.h similarity index 100% rename from dbms/src/Columns/Collator.h rename to dbms/Columns/Collator.h diff --git a/dbms/src/Columns/ColumnAggregateFunction.cpp b/dbms/Columns/ColumnAggregateFunction.cpp similarity index 100% rename from dbms/src/Columns/ColumnAggregateFunction.cpp rename to dbms/Columns/ColumnAggregateFunction.cpp diff --git a/dbms/src/Columns/ColumnAggregateFunction.h b/dbms/Columns/ColumnAggregateFunction.h similarity index 100% rename from dbms/src/Columns/ColumnAggregateFunction.h rename to dbms/Columns/ColumnAggregateFunction.h diff --git a/dbms/src/Columns/ColumnArray.cpp b/dbms/Columns/ColumnArray.cpp similarity index 100% rename from dbms/src/Columns/ColumnArray.cpp rename to dbms/Columns/ColumnArray.cpp diff --git a/dbms/src/Columns/ColumnArray.h b/dbms/Columns/ColumnArray.h similarity index 100% rename from dbms/src/Columns/ColumnArray.h rename to dbms/Columns/ColumnArray.h diff --git a/dbms/src/Columns/ColumnConst.cpp b/dbms/Columns/ColumnConst.cpp similarity index 100% rename from dbms/src/Columns/ColumnConst.cpp rename to dbms/Columns/ColumnConst.cpp diff --git a/dbms/src/Columns/ColumnConst.h b/dbms/Columns/ColumnConst.h similarity index 100% rename from dbms/src/Columns/ColumnConst.h rename to dbms/Columns/ColumnConst.h diff --git a/dbms/src/Columns/ColumnDecimal.cpp b/dbms/Columns/ColumnDecimal.cpp similarity index 100% rename from dbms/src/Columns/ColumnDecimal.cpp rename to dbms/Columns/ColumnDecimal.cpp diff --git a/dbms/src/Columns/ColumnDecimal.h b/dbms/Columns/ColumnDecimal.h similarity index 100% rename from dbms/src/Columns/ColumnDecimal.h rename to dbms/Columns/ColumnDecimal.h diff --git a/dbms/src/Columns/ColumnFixedString.cpp b/dbms/Columns/ColumnFixedString.cpp similarity index 100% rename from dbms/src/Columns/ColumnFixedString.cpp rename to dbms/Columns/ColumnFixedString.cpp diff --git a/dbms/src/Columns/ColumnFixedString.h b/dbms/Columns/ColumnFixedString.h similarity index 100% rename from dbms/src/Columns/ColumnFixedString.h rename to dbms/Columns/ColumnFixedString.h diff --git a/dbms/src/Columns/ColumnFunction.cpp b/dbms/Columns/ColumnFunction.cpp similarity index 100% rename from dbms/src/Columns/ColumnFunction.cpp rename to dbms/Columns/ColumnFunction.cpp diff --git a/dbms/src/Columns/ColumnFunction.h b/dbms/Columns/ColumnFunction.h similarity index 100% rename from dbms/src/Columns/ColumnFunction.h rename to dbms/Columns/ColumnFunction.h diff --git a/dbms/src/Columns/ColumnLowCardinality.cpp b/dbms/Columns/ColumnLowCardinality.cpp similarity index 100% rename from dbms/src/Columns/ColumnLowCardinality.cpp rename to dbms/Columns/ColumnLowCardinality.cpp diff --git a/dbms/src/Columns/ColumnLowCardinality.h b/dbms/Columns/ColumnLowCardinality.h similarity index 100% rename from dbms/src/Columns/ColumnLowCardinality.h rename to dbms/Columns/ColumnLowCardinality.h diff --git a/dbms/src/Columns/ColumnNothing.h b/dbms/Columns/ColumnNothing.h similarity index 100% rename from dbms/src/Columns/ColumnNothing.h rename to dbms/Columns/ColumnNothing.h diff --git a/dbms/src/Columns/ColumnNullable.cpp b/dbms/Columns/ColumnNullable.cpp similarity index 100% rename from dbms/src/Columns/ColumnNullable.cpp rename to dbms/Columns/ColumnNullable.cpp diff --git a/dbms/src/Columns/ColumnNullable.h b/dbms/Columns/ColumnNullable.h similarity index 100% rename from dbms/src/Columns/ColumnNullable.h rename to dbms/Columns/ColumnNullable.h diff --git a/dbms/src/Columns/ColumnSet.h b/dbms/Columns/ColumnSet.h similarity index 100% rename from dbms/src/Columns/ColumnSet.h rename to dbms/Columns/ColumnSet.h diff --git a/dbms/src/Columns/ColumnString.cpp b/dbms/Columns/ColumnString.cpp similarity index 100% rename from dbms/src/Columns/ColumnString.cpp rename to dbms/Columns/ColumnString.cpp diff --git a/dbms/src/Columns/ColumnString.h b/dbms/Columns/ColumnString.h similarity index 100% rename from dbms/src/Columns/ColumnString.h rename to dbms/Columns/ColumnString.h diff --git a/dbms/src/Columns/ColumnTuple.cpp b/dbms/Columns/ColumnTuple.cpp similarity index 100% rename from dbms/src/Columns/ColumnTuple.cpp rename to dbms/Columns/ColumnTuple.cpp diff --git a/dbms/src/Columns/ColumnTuple.h b/dbms/Columns/ColumnTuple.h similarity index 100% rename from dbms/src/Columns/ColumnTuple.h rename to dbms/Columns/ColumnTuple.h diff --git a/dbms/src/Columns/ColumnUnique.h b/dbms/Columns/ColumnUnique.h similarity index 100% rename from dbms/src/Columns/ColumnUnique.h rename to dbms/Columns/ColumnUnique.h diff --git a/dbms/src/Columns/ColumnVector.cpp b/dbms/Columns/ColumnVector.cpp similarity index 100% rename from dbms/src/Columns/ColumnVector.cpp rename to dbms/Columns/ColumnVector.cpp diff --git a/dbms/src/Columns/ColumnVector.h b/dbms/Columns/ColumnVector.h similarity index 100% rename from dbms/src/Columns/ColumnVector.h rename to dbms/Columns/ColumnVector.h diff --git a/dbms/src/Columns/ColumnVectorHelper.h b/dbms/Columns/ColumnVectorHelper.h similarity index 100% rename from dbms/src/Columns/ColumnVectorHelper.h rename to dbms/Columns/ColumnVectorHelper.h diff --git a/dbms/src/Columns/ColumnsCommon.cpp b/dbms/Columns/ColumnsCommon.cpp similarity index 100% rename from dbms/src/Columns/ColumnsCommon.cpp rename to dbms/Columns/ColumnsCommon.cpp diff --git a/dbms/src/Columns/ColumnsCommon.h b/dbms/Columns/ColumnsCommon.h similarity index 100% rename from dbms/src/Columns/ColumnsCommon.h rename to dbms/Columns/ColumnsCommon.h diff --git a/dbms/src/Columns/ColumnsNumber.h b/dbms/Columns/ColumnsNumber.h similarity index 100% rename from dbms/src/Columns/ColumnsNumber.h rename to dbms/Columns/ColumnsNumber.h diff --git a/dbms/src/Columns/FilterDescription.cpp b/dbms/Columns/FilterDescription.cpp similarity index 100% rename from dbms/src/Columns/FilterDescription.cpp rename to dbms/Columns/FilterDescription.cpp diff --git a/dbms/src/Columns/FilterDescription.h b/dbms/Columns/FilterDescription.h similarity index 100% rename from dbms/src/Columns/FilterDescription.h rename to dbms/Columns/FilterDescription.h diff --git a/dbms/src/Columns/IColumn.cpp b/dbms/Columns/IColumn.cpp similarity index 100% rename from dbms/src/Columns/IColumn.cpp rename to dbms/Columns/IColumn.cpp diff --git a/dbms/src/Columns/IColumn.h b/dbms/Columns/IColumn.h similarity index 100% rename from dbms/src/Columns/IColumn.h rename to dbms/Columns/IColumn.h diff --git a/dbms/src/Columns/IColumnDummy.h b/dbms/Columns/IColumnDummy.h similarity index 100% rename from dbms/src/Columns/IColumnDummy.h rename to dbms/Columns/IColumnDummy.h diff --git a/dbms/src/Columns/IColumnImpl.h b/dbms/Columns/IColumnImpl.h similarity index 100% rename from dbms/src/Columns/IColumnImpl.h rename to dbms/Columns/IColumnImpl.h diff --git a/dbms/src/Columns/IColumnUnique.h b/dbms/Columns/IColumnUnique.h similarity index 100% rename from dbms/src/Columns/IColumnUnique.h rename to dbms/Columns/IColumnUnique.h diff --git a/dbms/src/Columns/ReverseIndex.h b/dbms/Columns/ReverseIndex.h similarity index 100% rename from dbms/src/Columns/ReverseIndex.h rename to dbms/Columns/ReverseIndex.h diff --git a/dbms/src/Columns/getLeastSuperColumn.cpp b/dbms/Columns/getLeastSuperColumn.cpp similarity index 100% rename from dbms/src/Columns/getLeastSuperColumn.cpp rename to dbms/Columns/getLeastSuperColumn.cpp diff --git a/dbms/src/Columns/getLeastSuperColumn.h b/dbms/Columns/getLeastSuperColumn.h similarity index 100% rename from dbms/src/Columns/getLeastSuperColumn.h rename to dbms/Columns/getLeastSuperColumn.h diff --git a/dbms/src/Columns/tests/CMakeLists.txt b/dbms/Columns/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Columns/tests/CMakeLists.txt rename to dbms/Columns/tests/CMakeLists.txt diff --git a/dbms/src/Columns/tests/gtest_column_unique.cpp b/dbms/Columns/tests/gtest_column_unique.cpp similarity index 100% rename from dbms/src/Columns/tests/gtest_column_unique.cpp rename to dbms/Columns/tests/gtest_column_unique.cpp diff --git a/dbms/src/Columns/tests/gtest_weak_hash_32.cpp b/dbms/Columns/tests/gtest_weak_hash_32.cpp similarity index 100% rename from dbms/src/Columns/tests/gtest_weak_hash_32.cpp rename to dbms/Columns/tests/gtest_weak_hash_32.cpp diff --git a/dbms/src/Common/ActionBlocker.h b/dbms/Common/ActionBlocker.h similarity index 100% rename from dbms/src/Common/ActionBlocker.h rename to dbms/Common/ActionBlocker.h diff --git a/dbms/src/Common/ActionLock.cpp b/dbms/Common/ActionLock.cpp similarity index 100% rename from dbms/src/Common/ActionLock.cpp rename to dbms/Common/ActionLock.cpp diff --git a/dbms/src/Common/ActionLock.h b/dbms/Common/ActionLock.h similarity index 100% rename from dbms/src/Common/ActionLock.h rename to dbms/Common/ActionLock.h diff --git a/dbms/src/Common/AlignedBuffer.cpp b/dbms/Common/AlignedBuffer.cpp similarity index 100% rename from dbms/src/Common/AlignedBuffer.cpp rename to dbms/Common/AlignedBuffer.cpp diff --git a/dbms/src/Common/AlignedBuffer.h b/dbms/Common/AlignedBuffer.h similarity index 100% rename from dbms/src/Common/AlignedBuffer.h rename to dbms/Common/AlignedBuffer.h diff --git a/dbms/src/Common/Allocator.h b/dbms/Common/Allocator.h similarity index 100% rename from dbms/src/Common/Allocator.h rename to dbms/Common/Allocator.h diff --git a/dbms/src/Common/Allocator_fwd.h b/dbms/Common/Allocator_fwd.h similarity index 100% rename from dbms/src/Common/Allocator_fwd.h rename to dbms/Common/Allocator_fwd.h diff --git a/dbms/src/Common/Arena.h b/dbms/Common/Arena.h similarity index 100% rename from dbms/src/Common/Arena.h rename to dbms/Common/Arena.h diff --git a/dbms/src/Common/ArenaAllocator.h b/dbms/Common/ArenaAllocator.h similarity index 100% rename from dbms/src/Common/ArenaAllocator.h rename to dbms/Common/ArenaAllocator.h diff --git a/dbms/src/Common/ArenaWithFreeLists.h b/dbms/Common/ArenaWithFreeLists.h similarity index 100% rename from dbms/src/Common/ArenaWithFreeLists.h rename to dbms/Common/ArenaWithFreeLists.h diff --git a/dbms/src/Common/ArrayCache.h b/dbms/Common/ArrayCache.h similarity index 100% rename from dbms/src/Common/ArrayCache.h rename to dbms/Common/ArrayCache.h diff --git a/dbms/src/Common/AutoArray.h b/dbms/Common/AutoArray.h similarity index 100% rename from dbms/src/Common/AutoArray.h rename to dbms/Common/AutoArray.h diff --git a/dbms/src/Common/BitHelpers.h b/dbms/Common/BitHelpers.h similarity index 100% rename from dbms/src/Common/BitHelpers.h rename to dbms/Common/BitHelpers.h diff --git a/dbms/src/Common/CMakeLists.txt b/dbms/Common/CMakeLists.txt similarity index 100% rename from dbms/src/Common/CMakeLists.txt rename to dbms/Common/CMakeLists.txt diff --git a/dbms/src/Common/COW.h b/dbms/Common/COW.h similarity index 100% rename from dbms/src/Common/COW.h rename to dbms/Common/COW.h diff --git a/dbms/src/Common/ClickHouseRevision.cpp b/dbms/Common/ClickHouseRevision.cpp similarity index 100% rename from dbms/src/Common/ClickHouseRevision.cpp rename to dbms/Common/ClickHouseRevision.cpp diff --git a/dbms/src/Common/ClickHouseRevision.h b/dbms/Common/ClickHouseRevision.h similarity index 100% rename from dbms/src/Common/ClickHouseRevision.h rename to dbms/Common/ClickHouseRevision.h diff --git a/dbms/src/Common/ColumnsHashing.h b/dbms/Common/ColumnsHashing.h similarity index 100% rename from dbms/src/Common/ColumnsHashing.h rename to dbms/Common/ColumnsHashing.h diff --git a/dbms/src/Common/ColumnsHashingImpl.h b/dbms/Common/ColumnsHashingImpl.h similarity index 100% rename from dbms/src/Common/ColumnsHashingImpl.h rename to dbms/Common/ColumnsHashingImpl.h diff --git a/dbms/src/Common/CombinedCardinalityEstimator.h b/dbms/Common/CombinedCardinalityEstimator.h similarity index 100% rename from dbms/src/Common/CombinedCardinalityEstimator.h rename to dbms/Common/CombinedCardinalityEstimator.h diff --git a/dbms/src/Common/CompactArray.h b/dbms/Common/CompactArray.h similarity index 100% rename from dbms/src/Common/CompactArray.h rename to dbms/Common/CompactArray.h diff --git a/dbms/src/Common/ConcurrentBoundedQueue.h b/dbms/Common/ConcurrentBoundedQueue.h similarity index 100% rename from dbms/src/Common/ConcurrentBoundedQueue.h rename to dbms/Common/ConcurrentBoundedQueue.h diff --git a/dbms/src/Common/Config/AbstractConfigurationComparison.cpp b/dbms/Common/Config/AbstractConfigurationComparison.cpp similarity index 100% rename from dbms/src/Common/Config/AbstractConfigurationComparison.cpp rename to dbms/Common/Config/AbstractConfigurationComparison.cpp diff --git a/dbms/src/Common/Config/AbstractConfigurationComparison.h b/dbms/Common/Config/AbstractConfigurationComparison.h similarity index 100% rename from dbms/src/Common/Config/AbstractConfigurationComparison.h rename to dbms/Common/Config/AbstractConfigurationComparison.h diff --git a/dbms/src/Common/Config/CMakeLists.txt b/dbms/Common/Config/CMakeLists.txt similarity index 100% rename from dbms/src/Common/Config/CMakeLists.txt rename to dbms/Common/Config/CMakeLists.txt diff --git a/dbms/src/Common/Config/ConfigProcessor.cpp b/dbms/Common/Config/ConfigProcessor.cpp similarity index 100% rename from dbms/src/Common/Config/ConfigProcessor.cpp rename to dbms/Common/Config/ConfigProcessor.cpp diff --git a/dbms/src/Common/Config/ConfigProcessor.h b/dbms/Common/Config/ConfigProcessor.h similarity index 100% rename from dbms/src/Common/Config/ConfigProcessor.h rename to dbms/Common/Config/ConfigProcessor.h diff --git a/dbms/src/Common/Config/ConfigReloader.cpp b/dbms/Common/Config/ConfigReloader.cpp similarity index 100% rename from dbms/src/Common/Config/ConfigReloader.cpp rename to dbms/Common/Config/ConfigReloader.cpp diff --git a/dbms/src/Common/Config/ConfigReloader.h b/dbms/Common/Config/ConfigReloader.h similarity index 100% rename from dbms/src/Common/Config/ConfigReloader.h rename to dbms/Common/Config/ConfigReloader.h diff --git a/dbms/src/Common/Config/configReadClient.cpp b/dbms/Common/Config/configReadClient.cpp similarity index 100% rename from dbms/src/Common/Config/configReadClient.cpp rename to dbms/Common/Config/configReadClient.cpp diff --git a/dbms/src/Common/Config/configReadClient.h b/dbms/Common/Config/configReadClient.h similarity index 100% rename from dbms/src/Common/Config/configReadClient.h rename to dbms/Common/Config/configReadClient.h diff --git a/dbms/src/Common/CounterInFile.h b/dbms/Common/CounterInFile.h similarity index 100% rename from dbms/src/Common/CounterInFile.h rename to dbms/Common/CounterInFile.h diff --git a/dbms/src/Common/CpuId.h b/dbms/Common/CpuId.h similarity index 100% rename from dbms/src/Common/CpuId.h rename to dbms/Common/CpuId.h diff --git a/dbms/src/Common/CurrentMetrics.cpp b/dbms/Common/CurrentMetrics.cpp similarity index 100% rename from dbms/src/Common/CurrentMetrics.cpp rename to dbms/Common/CurrentMetrics.cpp diff --git a/dbms/src/Common/CurrentMetrics.h b/dbms/Common/CurrentMetrics.h similarity index 100% rename from dbms/src/Common/CurrentMetrics.h rename to dbms/Common/CurrentMetrics.h diff --git a/dbms/src/Common/CurrentThread.cpp b/dbms/Common/CurrentThread.cpp similarity index 100% rename from dbms/src/Common/CurrentThread.cpp rename to dbms/Common/CurrentThread.cpp diff --git a/dbms/src/Common/CurrentThread.h b/dbms/Common/CurrentThread.h similarity index 100% rename from dbms/src/Common/CurrentThread.h rename to dbms/Common/CurrentThread.h diff --git a/dbms/src/Common/DNSResolver.cpp b/dbms/Common/DNSResolver.cpp similarity index 100% rename from dbms/src/Common/DNSResolver.cpp rename to dbms/Common/DNSResolver.cpp diff --git a/dbms/src/Common/DNSResolver.h b/dbms/Common/DNSResolver.h similarity index 100% rename from dbms/src/Common/DNSResolver.h rename to dbms/Common/DNSResolver.h diff --git a/dbms/src/Common/Dwarf.cpp b/dbms/Common/Dwarf.cpp similarity index 100% rename from dbms/src/Common/Dwarf.cpp rename to dbms/Common/Dwarf.cpp diff --git a/dbms/src/Common/Dwarf.h b/dbms/Common/Dwarf.h similarity index 100% rename from dbms/src/Common/Dwarf.h rename to dbms/Common/Dwarf.h diff --git a/dbms/src/Common/Elf.cpp b/dbms/Common/Elf.cpp similarity index 100% rename from dbms/src/Common/Elf.cpp rename to dbms/Common/Elf.cpp diff --git a/dbms/src/Common/Elf.h b/dbms/Common/Elf.h similarity index 100% rename from dbms/src/Common/Elf.h rename to dbms/Common/Elf.h diff --git a/dbms/src/Common/ErrorCodes.cpp b/dbms/Common/ErrorCodes.cpp similarity index 100% rename from dbms/src/Common/ErrorCodes.cpp rename to dbms/Common/ErrorCodes.cpp diff --git a/dbms/src/Common/EventCounter.h b/dbms/Common/EventCounter.h similarity index 100% rename from dbms/src/Common/EventCounter.h rename to dbms/Common/EventCounter.h diff --git a/dbms/src/Common/Exception.cpp b/dbms/Common/Exception.cpp similarity index 100% rename from dbms/src/Common/Exception.cpp rename to dbms/Common/Exception.cpp diff --git a/dbms/src/Common/Exception.h b/dbms/Common/Exception.h similarity index 100% rename from dbms/src/Common/Exception.h rename to dbms/Common/Exception.h diff --git a/dbms/src/Common/ExternalLoaderStatus.cpp b/dbms/Common/ExternalLoaderStatus.cpp similarity index 100% rename from dbms/src/Common/ExternalLoaderStatus.cpp rename to dbms/Common/ExternalLoaderStatus.cpp diff --git a/dbms/src/Common/ExternalLoaderStatus.h b/dbms/Common/ExternalLoaderStatus.h similarity index 100% rename from dbms/src/Common/ExternalLoaderStatus.h rename to dbms/Common/ExternalLoaderStatus.h diff --git a/dbms/src/Common/FieldVisitors.cpp b/dbms/Common/FieldVisitors.cpp similarity index 100% rename from dbms/src/Common/FieldVisitors.cpp rename to dbms/Common/FieldVisitors.cpp diff --git a/dbms/src/Common/FieldVisitors.h b/dbms/Common/FieldVisitors.h similarity index 100% rename from dbms/src/Common/FieldVisitors.h rename to dbms/Common/FieldVisitors.h diff --git a/dbms/src/Common/FileChecker.cpp b/dbms/Common/FileChecker.cpp similarity index 100% rename from dbms/src/Common/FileChecker.cpp rename to dbms/Common/FileChecker.cpp diff --git a/dbms/src/Common/FileChecker.h b/dbms/Common/FileChecker.h similarity index 100% rename from dbms/src/Common/FileChecker.h rename to dbms/Common/FileChecker.h diff --git a/dbms/src/Common/FileUpdatesTracker.h b/dbms/Common/FileUpdatesTracker.h similarity index 100% rename from dbms/src/Common/FileUpdatesTracker.h rename to dbms/Common/FileUpdatesTracker.h diff --git a/dbms/src/Common/HTMLForm.h b/dbms/Common/HTMLForm.h similarity index 100% rename from dbms/src/Common/HTMLForm.h rename to dbms/Common/HTMLForm.h diff --git a/dbms/src/Common/HashTable/ClearableHashMap.h b/dbms/Common/HashTable/ClearableHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/ClearableHashMap.h rename to dbms/Common/HashTable/ClearableHashMap.h diff --git a/dbms/src/Common/HashTable/ClearableHashSet.h b/dbms/Common/HashTable/ClearableHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/ClearableHashSet.h rename to dbms/Common/HashTable/ClearableHashSet.h diff --git a/dbms/src/Common/HashTable/FixedClearableHashMap.h b/dbms/Common/HashTable/FixedClearableHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/FixedClearableHashMap.h rename to dbms/Common/HashTable/FixedClearableHashMap.h diff --git a/dbms/src/Common/HashTable/FixedClearableHashSet.h b/dbms/Common/HashTable/FixedClearableHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/FixedClearableHashSet.h rename to dbms/Common/HashTable/FixedClearableHashSet.h diff --git a/dbms/src/Common/HashTable/FixedHashMap.h b/dbms/Common/HashTable/FixedHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashMap.h rename to dbms/Common/HashTable/FixedHashMap.h diff --git a/dbms/src/Common/HashTable/FixedHashSet.h b/dbms/Common/HashTable/FixedHashSet.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashSet.h rename to dbms/Common/HashTable/FixedHashSet.h diff --git a/dbms/src/Common/HashTable/FixedHashTable.h b/dbms/Common/HashTable/FixedHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/FixedHashTable.h rename to dbms/Common/HashTable/FixedHashTable.h diff --git a/dbms/src/Common/HashTable/Hash.h b/dbms/Common/HashTable/Hash.h similarity index 100% rename from dbms/src/Common/HashTable/Hash.h rename to dbms/Common/HashTable/Hash.h diff --git a/dbms/src/Common/HashTable/HashMap.h b/dbms/Common/HashTable/HashMap.h similarity index 100% rename from dbms/src/Common/HashTable/HashMap.h rename to dbms/Common/HashTable/HashMap.h diff --git a/dbms/src/Common/HashTable/HashSet.h b/dbms/Common/HashTable/HashSet.h similarity index 100% rename from dbms/src/Common/HashTable/HashSet.h rename to dbms/Common/HashTable/HashSet.h diff --git a/dbms/src/Common/HashTable/HashTable.h b/dbms/Common/HashTable/HashTable.h similarity index 100% rename from dbms/src/Common/HashTable/HashTable.h rename to dbms/Common/HashTable/HashTable.h diff --git a/dbms/src/Common/HashTable/HashTableAllocator.h b/dbms/Common/HashTable/HashTableAllocator.h similarity index 100% rename from dbms/src/Common/HashTable/HashTableAllocator.h rename to dbms/Common/HashTable/HashTableAllocator.h diff --git a/dbms/src/Common/HashTable/HashTableKeyHolder.h b/dbms/Common/HashTable/HashTableKeyHolder.h similarity index 100% rename from dbms/src/Common/HashTable/HashTableKeyHolder.h rename to dbms/Common/HashTable/HashTableKeyHolder.h diff --git a/dbms/src/Common/HashTable/SmallTable.h b/dbms/Common/HashTable/SmallTable.h similarity index 100% rename from dbms/src/Common/HashTable/SmallTable.h rename to dbms/Common/HashTable/SmallTable.h diff --git a/dbms/src/Common/HashTable/StringHashMap.h b/dbms/Common/HashTable/StringHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/StringHashMap.h rename to dbms/Common/HashTable/StringHashMap.h diff --git a/dbms/src/Common/HashTable/StringHashTable.h b/dbms/Common/HashTable/StringHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/StringHashTable.h rename to dbms/Common/HashTable/StringHashTable.h diff --git a/dbms/src/Common/HashTable/TwoLevelHashMap.h b/dbms/Common/HashTable/TwoLevelHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelHashMap.h rename to dbms/Common/HashTable/TwoLevelHashMap.h diff --git a/dbms/src/Common/HashTable/TwoLevelHashTable.h b/dbms/Common/HashTable/TwoLevelHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelHashTable.h rename to dbms/Common/HashTable/TwoLevelHashTable.h diff --git a/dbms/src/Common/HashTable/TwoLevelStringHashMap.h b/dbms/Common/HashTable/TwoLevelStringHashMap.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelStringHashMap.h rename to dbms/Common/HashTable/TwoLevelStringHashMap.h diff --git a/dbms/src/Common/HashTable/TwoLevelStringHashTable.h b/dbms/Common/HashTable/TwoLevelStringHashTable.h similarity index 100% rename from dbms/src/Common/HashTable/TwoLevelStringHashTable.h rename to dbms/Common/HashTable/TwoLevelStringHashTable.h diff --git a/dbms/src/Common/HyperLogLogBiasEstimator.h b/dbms/Common/HyperLogLogBiasEstimator.h similarity index 100% rename from dbms/src/Common/HyperLogLogBiasEstimator.h rename to dbms/Common/HyperLogLogBiasEstimator.h diff --git a/dbms/src/Common/HyperLogLogCounter.h b/dbms/Common/HyperLogLogCounter.h similarity index 100% rename from dbms/src/Common/HyperLogLogCounter.h rename to dbms/Common/HyperLogLogCounter.h diff --git a/dbms/src/Common/HyperLogLogWithSmallSetOptimization.h b/dbms/Common/HyperLogLogWithSmallSetOptimization.h similarity index 100% rename from dbms/src/Common/HyperLogLogWithSmallSetOptimization.h rename to dbms/Common/HyperLogLogWithSmallSetOptimization.h diff --git a/dbms/src/Common/IFactoryWithAliases.h b/dbms/Common/IFactoryWithAliases.h similarity index 100% rename from dbms/src/Common/IFactoryWithAliases.h rename to dbms/Common/IFactoryWithAliases.h diff --git a/dbms/src/Common/IPv6ToBinary.cpp b/dbms/Common/IPv6ToBinary.cpp similarity index 100% rename from dbms/src/Common/IPv6ToBinary.cpp rename to dbms/Common/IPv6ToBinary.cpp diff --git a/dbms/src/Common/IPv6ToBinary.h b/dbms/Common/IPv6ToBinary.h similarity index 100% rename from dbms/src/Common/IPv6ToBinary.h rename to dbms/Common/IPv6ToBinary.h diff --git a/dbms/src/Common/Increment.h b/dbms/Common/Increment.h similarity index 100% rename from dbms/src/Common/Increment.h rename to dbms/Common/Increment.h diff --git a/dbms/src/Common/InterruptListener.h b/dbms/Common/InterruptListener.h similarity index 100% rename from dbms/src/Common/InterruptListener.h rename to dbms/Common/InterruptListener.h diff --git a/dbms/src/Common/IntervalKind.cpp b/dbms/Common/IntervalKind.cpp similarity index 100% rename from dbms/src/Common/IntervalKind.cpp rename to dbms/Common/IntervalKind.cpp diff --git a/dbms/src/Common/IntervalKind.h b/dbms/Common/IntervalKind.h similarity index 100% rename from dbms/src/Common/IntervalKind.h rename to dbms/Common/IntervalKind.h diff --git a/dbms/src/Common/LRUCache.h b/dbms/Common/LRUCache.h similarity index 100% rename from dbms/src/Common/LRUCache.h rename to dbms/Common/LRUCache.h diff --git a/dbms/src/Common/Macros.cpp b/dbms/Common/Macros.cpp similarity index 100% rename from dbms/src/Common/Macros.cpp rename to dbms/Common/Macros.cpp diff --git a/dbms/src/Common/Macros.h b/dbms/Common/Macros.h similarity index 100% rename from dbms/src/Common/Macros.h rename to dbms/Common/Macros.h diff --git a/dbms/src/Common/MemorySanitizer.h b/dbms/Common/MemorySanitizer.h similarity index 100% rename from dbms/src/Common/MemorySanitizer.h rename to dbms/Common/MemorySanitizer.h diff --git a/dbms/src/Common/MemoryTracker.cpp b/dbms/Common/MemoryTracker.cpp similarity index 100% rename from dbms/src/Common/MemoryTracker.cpp rename to dbms/Common/MemoryTracker.cpp diff --git a/dbms/src/Common/MemoryTracker.h b/dbms/Common/MemoryTracker.h similarity index 100% rename from dbms/src/Common/MemoryTracker.h rename to dbms/Common/MemoryTracker.h diff --git a/dbms/src/Common/MultiVersion.h b/dbms/Common/MultiVersion.h similarity index 100% rename from dbms/src/Common/MultiVersion.h rename to dbms/Common/MultiVersion.h diff --git a/dbms/src/Common/NaNUtils.h b/dbms/Common/NaNUtils.h similarity index 100% rename from dbms/src/Common/NaNUtils.h rename to dbms/Common/NaNUtils.h diff --git a/dbms/src/Common/NamePrompter.h b/dbms/Common/NamePrompter.h similarity index 100% rename from dbms/src/Common/NamePrompter.h rename to dbms/Common/NamePrompter.h diff --git a/dbms/src/Common/NetException.h b/dbms/Common/NetException.h similarity index 100% rename from dbms/src/Common/NetException.h rename to dbms/Common/NetException.h diff --git a/dbms/src/Common/ObjectPool.h b/dbms/Common/ObjectPool.h similarity index 100% rename from dbms/src/Common/ObjectPool.h rename to dbms/Common/ObjectPool.h diff --git a/dbms/src/Common/OpenSSLHelpers.cpp b/dbms/Common/OpenSSLHelpers.cpp similarity index 100% rename from dbms/src/Common/OpenSSLHelpers.cpp rename to dbms/Common/OpenSSLHelpers.cpp diff --git a/dbms/src/Common/OpenSSLHelpers.h b/dbms/Common/OpenSSLHelpers.h similarity index 100% rename from dbms/src/Common/OpenSSLHelpers.h rename to dbms/Common/OpenSSLHelpers.h diff --git a/dbms/src/Common/OptimizedRegularExpression.cpp b/dbms/Common/OptimizedRegularExpression.cpp similarity index 100% rename from dbms/src/Common/OptimizedRegularExpression.cpp rename to dbms/Common/OptimizedRegularExpression.cpp diff --git a/dbms/src/Common/OptimizedRegularExpression.h b/dbms/Common/OptimizedRegularExpression.h similarity index 100% rename from dbms/src/Common/OptimizedRegularExpression.h rename to dbms/Common/OptimizedRegularExpression.h diff --git a/dbms/src/Common/PODArray.cpp b/dbms/Common/PODArray.cpp similarity index 100% rename from dbms/src/Common/PODArray.cpp rename to dbms/Common/PODArray.cpp diff --git a/dbms/src/Common/PODArray.h b/dbms/Common/PODArray.h similarity index 100% rename from dbms/src/Common/PODArray.h rename to dbms/Common/PODArray.h diff --git a/dbms/src/Common/PODArray_fwd.h b/dbms/Common/PODArray_fwd.h similarity index 100% rename from dbms/src/Common/PODArray_fwd.h rename to dbms/Common/PODArray_fwd.h diff --git a/dbms/src/Common/PipeFDs.cpp b/dbms/Common/PipeFDs.cpp similarity index 100% rename from dbms/src/Common/PipeFDs.cpp rename to dbms/Common/PipeFDs.cpp diff --git a/dbms/src/Common/PipeFDs.h b/dbms/Common/PipeFDs.h similarity index 100% rename from dbms/src/Common/PipeFDs.h rename to dbms/Common/PipeFDs.h diff --git a/dbms/src/Common/PoolBase.h b/dbms/Common/PoolBase.h similarity index 100% rename from dbms/src/Common/PoolBase.h rename to dbms/Common/PoolBase.h diff --git a/dbms/src/Common/PoolWithFailoverBase.h b/dbms/Common/PoolWithFailoverBase.h similarity index 100% rename from dbms/src/Common/PoolWithFailoverBase.h rename to dbms/Common/PoolWithFailoverBase.h diff --git a/dbms/src/Common/ProfileEvents.cpp b/dbms/Common/ProfileEvents.cpp similarity index 100% rename from dbms/src/Common/ProfileEvents.cpp rename to dbms/Common/ProfileEvents.cpp diff --git a/dbms/src/Common/ProfileEvents.h b/dbms/Common/ProfileEvents.h similarity index 100% rename from dbms/src/Common/ProfileEvents.h rename to dbms/Common/ProfileEvents.h diff --git a/dbms/src/Common/ProfilingScopedRWLock.h b/dbms/Common/ProfilingScopedRWLock.h similarity index 100% rename from dbms/src/Common/ProfilingScopedRWLock.h rename to dbms/Common/ProfilingScopedRWLock.h diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/Common/QueryProfiler.cpp similarity index 100% rename from dbms/src/Common/QueryProfiler.cpp rename to dbms/Common/QueryProfiler.cpp diff --git a/dbms/src/Common/QueryProfiler.h b/dbms/Common/QueryProfiler.h similarity index 100% rename from dbms/src/Common/QueryProfiler.h rename to dbms/Common/QueryProfiler.h diff --git a/dbms/src/Common/RWLock.cpp b/dbms/Common/RWLock.cpp similarity index 100% rename from dbms/src/Common/RWLock.cpp rename to dbms/Common/RWLock.cpp diff --git a/dbms/src/Common/RWLock.h b/dbms/Common/RWLock.h similarity index 100% rename from dbms/src/Common/RWLock.h rename to dbms/Common/RWLock.h diff --git a/dbms/src/Common/RadixSort.h b/dbms/Common/RadixSort.h similarity index 100% rename from dbms/src/Common/RadixSort.h rename to dbms/Common/RadixSort.h diff --git a/dbms/src/Common/RemoteHostFilter.cpp b/dbms/Common/RemoteHostFilter.cpp similarity index 100% rename from dbms/src/Common/RemoteHostFilter.cpp rename to dbms/Common/RemoteHostFilter.cpp diff --git a/dbms/src/Common/RemoteHostFilter.h b/dbms/Common/RemoteHostFilter.h similarity index 100% rename from dbms/src/Common/RemoteHostFilter.h rename to dbms/Common/RemoteHostFilter.h diff --git a/dbms/src/Common/SensitiveDataMasker.cpp b/dbms/Common/SensitiveDataMasker.cpp similarity index 100% rename from dbms/src/Common/SensitiveDataMasker.cpp rename to dbms/Common/SensitiveDataMasker.cpp diff --git a/dbms/src/Common/SensitiveDataMasker.h b/dbms/Common/SensitiveDataMasker.h similarity index 100% rename from dbms/src/Common/SensitiveDataMasker.h rename to dbms/Common/SensitiveDataMasker.h diff --git a/dbms/src/Common/SettingsChanges.h b/dbms/Common/SettingsChanges.h similarity index 100% rename from dbms/src/Common/SettingsChanges.h rename to dbms/Common/SettingsChanges.h diff --git a/dbms/src/Common/SharedBlockRowRef.h b/dbms/Common/SharedBlockRowRef.h similarity index 100% rename from dbms/src/Common/SharedBlockRowRef.h rename to dbms/Common/SharedBlockRowRef.h diff --git a/dbms/src/Common/SharedLibrary.cpp b/dbms/Common/SharedLibrary.cpp similarity index 100% rename from dbms/src/Common/SharedLibrary.cpp rename to dbms/Common/SharedLibrary.cpp diff --git a/dbms/src/Common/SharedLibrary.h b/dbms/Common/SharedLibrary.h similarity index 100% rename from dbms/src/Common/SharedLibrary.h rename to dbms/Common/SharedLibrary.h diff --git a/dbms/src/Common/ShellCommand.cpp b/dbms/Common/ShellCommand.cpp similarity index 100% rename from dbms/src/Common/ShellCommand.cpp rename to dbms/Common/ShellCommand.cpp diff --git a/dbms/src/Common/ShellCommand.h b/dbms/Common/ShellCommand.h similarity index 100% rename from dbms/src/Common/ShellCommand.h rename to dbms/Common/ShellCommand.h diff --git a/dbms/src/Common/SimpleActionBlocker.h b/dbms/Common/SimpleActionBlocker.h similarity index 100% rename from dbms/src/Common/SimpleActionBlocker.h rename to dbms/Common/SimpleActionBlocker.h diff --git a/dbms/src/Common/SimpleIncrement.h b/dbms/Common/SimpleIncrement.h similarity index 100% rename from dbms/src/Common/SimpleIncrement.h rename to dbms/Common/SimpleIncrement.h diff --git a/dbms/src/Common/SipHash.h b/dbms/Common/SipHash.h similarity index 100% rename from dbms/src/Common/SipHash.h rename to dbms/Common/SipHash.h diff --git a/dbms/src/Common/SmallObjectPool.h b/dbms/Common/SmallObjectPool.h similarity index 100% rename from dbms/src/Common/SmallObjectPool.h rename to dbms/Common/SmallObjectPool.h diff --git a/dbms/src/Common/SpaceSaving.h b/dbms/Common/SpaceSaving.h similarity index 100% rename from dbms/src/Common/SpaceSaving.h rename to dbms/Common/SpaceSaving.h diff --git a/dbms/src/Common/StackTrace.cpp b/dbms/Common/StackTrace.cpp similarity index 100% rename from dbms/src/Common/StackTrace.cpp rename to dbms/Common/StackTrace.cpp diff --git a/dbms/src/Common/StackTrace.h b/dbms/Common/StackTrace.h similarity index 100% rename from dbms/src/Common/StackTrace.h rename to dbms/Common/StackTrace.h diff --git a/dbms/src/Common/StatusFile.cpp b/dbms/Common/StatusFile.cpp similarity index 100% rename from dbms/src/Common/StatusFile.cpp rename to dbms/Common/StatusFile.cpp diff --git a/dbms/src/Common/StatusFile.h b/dbms/Common/StatusFile.h similarity index 100% rename from dbms/src/Common/StatusFile.h rename to dbms/Common/StatusFile.h diff --git a/dbms/src/Common/StatusInfo.cpp b/dbms/Common/StatusInfo.cpp similarity index 100% rename from dbms/src/Common/StatusInfo.cpp rename to dbms/Common/StatusInfo.cpp diff --git a/dbms/src/Common/StatusInfo.h b/dbms/Common/StatusInfo.h similarity index 100% rename from dbms/src/Common/StatusInfo.h rename to dbms/Common/StatusInfo.h diff --git a/dbms/src/Common/Stopwatch.cpp b/dbms/Common/Stopwatch.cpp similarity index 100% rename from dbms/src/Common/Stopwatch.cpp rename to dbms/Common/Stopwatch.cpp diff --git a/dbms/src/Common/Stopwatch.h b/dbms/Common/Stopwatch.h similarity index 100% rename from dbms/src/Common/Stopwatch.h rename to dbms/Common/Stopwatch.h diff --git a/dbms/src/Common/StringSearcher.h b/dbms/Common/StringSearcher.h similarity index 100% rename from dbms/src/Common/StringSearcher.h rename to dbms/Common/StringSearcher.h diff --git a/dbms/src/Common/StringUtils/CMakeLists.txt b/dbms/Common/StringUtils/CMakeLists.txt similarity index 100% rename from dbms/src/Common/StringUtils/CMakeLists.txt rename to dbms/Common/StringUtils/CMakeLists.txt diff --git a/dbms/src/Common/StringUtils/StringUtils.cpp b/dbms/Common/StringUtils/StringUtils.cpp similarity index 100% rename from dbms/src/Common/StringUtils/StringUtils.cpp rename to dbms/Common/StringUtils/StringUtils.cpp diff --git a/dbms/src/Common/StringUtils/StringUtils.h b/dbms/Common/StringUtils/StringUtils.h similarity index 100% rename from dbms/src/Common/StringUtils/StringUtils.h rename to dbms/Common/StringUtils/StringUtils.h diff --git a/dbms/src/Common/StudentTTest.cpp b/dbms/Common/StudentTTest.cpp similarity index 100% rename from dbms/src/Common/StudentTTest.cpp rename to dbms/Common/StudentTTest.cpp diff --git a/dbms/src/Common/StudentTTest.h b/dbms/Common/StudentTTest.h similarity index 100% rename from dbms/src/Common/StudentTTest.h rename to dbms/Common/StudentTTest.h diff --git a/dbms/src/Common/SymbolIndex.cpp b/dbms/Common/SymbolIndex.cpp similarity index 100% rename from dbms/src/Common/SymbolIndex.cpp rename to dbms/Common/SymbolIndex.cpp diff --git a/dbms/src/Common/SymbolIndex.h b/dbms/Common/SymbolIndex.h similarity index 100% rename from dbms/src/Common/SymbolIndex.h rename to dbms/Common/SymbolIndex.h diff --git a/dbms/src/Common/TaskStatsInfoGetter.cpp b/dbms/Common/TaskStatsInfoGetter.cpp similarity index 100% rename from dbms/src/Common/TaskStatsInfoGetter.cpp rename to dbms/Common/TaskStatsInfoGetter.cpp diff --git a/dbms/src/Common/TaskStatsInfoGetter.h b/dbms/Common/TaskStatsInfoGetter.h similarity index 100% rename from dbms/src/Common/TaskStatsInfoGetter.h rename to dbms/Common/TaskStatsInfoGetter.h diff --git a/dbms/src/Common/TerminalSize.cpp b/dbms/Common/TerminalSize.cpp similarity index 100% rename from dbms/src/Common/TerminalSize.cpp rename to dbms/Common/TerminalSize.cpp diff --git a/dbms/src/Common/TerminalSize.h b/dbms/Common/TerminalSize.h similarity index 100% rename from dbms/src/Common/TerminalSize.h rename to dbms/Common/TerminalSize.h diff --git a/dbms/src/Common/ThreadFuzzer.cpp b/dbms/Common/ThreadFuzzer.cpp similarity index 100% rename from dbms/src/Common/ThreadFuzzer.cpp rename to dbms/Common/ThreadFuzzer.cpp diff --git a/dbms/src/Common/ThreadFuzzer.h b/dbms/Common/ThreadFuzzer.h similarity index 100% rename from dbms/src/Common/ThreadFuzzer.h rename to dbms/Common/ThreadFuzzer.h diff --git a/dbms/src/Common/ThreadPool.cpp b/dbms/Common/ThreadPool.cpp similarity index 100% rename from dbms/src/Common/ThreadPool.cpp rename to dbms/Common/ThreadPool.cpp diff --git a/dbms/src/Common/ThreadPool.h b/dbms/Common/ThreadPool.h similarity index 100% rename from dbms/src/Common/ThreadPool.h rename to dbms/Common/ThreadPool.h diff --git a/dbms/src/Common/ThreadProfileEvents.h b/dbms/Common/ThreadProfileEvents.h similarity index 100% rename from dbms/src/Common/ThreadProfileEvents.h rename to dbms/Common/ThreadProfileEvents.h diff --git a/dbms/src/Common/ThreadStatus.cpp b/dbms/Common/ThreadStatus.cpp similarity index 100% rename from dbms/src/Common/ThreadStatus.cpp rename to dbms/Common/ThreadStatus.cpp diff --git a/dbms/src/Common/ThreadStatus.h b/dbms/Common/ThreadStatus.h similarity index 100% rename from dbms/src/Common/ThreadStatus.h rename to dbms/Common/ThreadStatus.h diff --git a/dbms/src/Common/Throttler.h b/dbms/Common/Throttler.h similarity index 100% rename from dbms/src/Common/Throttler.h rename to dbms/Common/Throttler.h diff --git a/dbms/src/Common/TraceCollector.cpp b/dbms/Common/TraceCollector.cpp similarity index 100% rename from dbms/src/Common/TraceCollector.cpp rename to dbms/Common/TraceCollector.cpp diff --git a/dbms/src/Common/TraceCollector.h b/dbms/Common/TraceCollector.h similarity index 100% rename from dbms/src/Common/TraceCollector.h rename to dbms/Common/TraceCollector.h diff --git a/dbms/src/Common/TypeList.h b/dbms/Common/TypeList.h similarity index 100% rename from dbms/src/Common/TypeList.h rename to dbms/Common/TypeList.h diff --git a/dbms/src/Common/TypePromotion.h b/dbms/Common/TypePromotion.h similarity index 100% rename from dbms/src/Common/TypePromotion.h rename to dbms/Common/TypePromotion.h diff --git a/dbms/src/Common/UInt128.h b/dbms/Common/UInt128.h similarity index 100% rename from dbms/src/Common/UInt128.h rename to dbms/Common/UInt128.h diff --git a/dbms/src/Common/UTF8Helpers.cpp b/dbms/Common/UTF8Helpers.cpp similarity index 100% rename from dbms/src/Common/UTF8Helpers.cpp rename to dbms/Common/UTF8Helpers.cpp diff --git a/dbms/src/Common/UTF8Helpers.h b/dbms/Common/UTF8Helpers.h similarity index 100% rename from dbms/src/Common/UTF8Helpers.h rename to dbms/Common/UTF8Helpers.h diff --git a/dbms/src/Common/UnicodeBar.h b/dbms/Common/UnicodeBar.h similarity index 100% rename from dbms/src/Common/UnicodeBar.h rename to dbms/Common/UnicodeBar.h diff --git a/dbms/src/Common/VariableContext.h b/dbms/Common/VariableContext.h similarity index 100% rename from dbms/src/Common/VariableContext.h rename to dbms/Common/VariableContext.h diff --git a/dbms/src/Common/Visitor.h b/dbms/Common/Visitor.h similarity index 100% rename from dbms/src/Common/Visitor.h rename to dbms/Common/Visitor.h diff --git a/dbms/src/Common/Volnitsky.h b/dbms/Common/Volnitsky.h similarity index 100% rename from dbms/src/Common/Volnitsky.h rename to dbms/Common/Volnitsky.h diff --git a/dbms/src/Common/WeakHash.cpp b/dbms/Common/WeakHash.cpp similarity index 100% rename from dbms/src/Common/WeakHash.cpp rename to dbms/Common/WeakHash.cpp diff --git a/dbms/src/Common/WeakHash.h b/dbms/Common/WeakHash.h similarity index 100% rename from dbms/src/Common/WeakHash.h rename to dbms/Common/WeakHash.h diff --git a/dbms/src/Common/XDBCBridgeHelper.h b/dbms/Common/XDBCBridgeHelper.h similarity index 100% rename from dbms/src/Common/XDBCBridgeHelper.h rename to dbms/Common/XDBCBridgeHelper.h diff --git a/dbms/src/Common/ZooKeeper/CMakeLists.txt b/dbms/Common/ZooKeeper/CMakeLists.txt similarity index 100% rename from dbms/src/Common/ZooKeeper/CMakeLists.txt rename to dbms/Common/ZooKeeper/CMakeLists.txt diff --git a/dbms/src/Common/ZooKeeper/Common.h b/dbms/Common/ZooKeeper/Common.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Common.h rename to dbms/Common/ZooKeeper/Common.h diff --git a/dbms/src/Common/ZooKeeper/IKeeper.cpp b/dbms/Common/ZooKeeper/IKeeper.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/IKeeper.cpp rename to dbms/Common/ZooKeeper/IKeeper.cpp diff --git a/dbms/src/Common/ZooKeeper/IKeeper.h b/dbms/Common/ZooKeeper/IKeeper.h similarity index 100% rename from dbms/src/Common/ZooKeeper/IKeeper.h rename to dbms/Common/ZooKeeper/IKeeper.h diff --git a/dbms/src/Common/ZooKeeper/Increment.h b/dbms/Common/ZooKeeper/Increment.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Increment.h rename to dbms/Common/ZooKeeper/Increment.h diff --git a/dbms/src/Common/ZooKeeper/KeeperException.h b/dbms/Common/ZooKeeper/KeeperException.h similarity index 100% rename from dbms/src/Common/ZooKeeper/KeeperException.h rename to dbms/Common/ZooKeeper/KeeperException.h diff --git a/dbms/src/Common/ZooKeeper/LeaderElection.h b/dbms/Common/ZooKeeper/LeaderElection.h similarity index 100% rename from dbms/src/Common/ZooKeeper/LeaderElection.h rename to dbms/Common/ZooKeeper/LeaderElection.h diff --git a/dbms/src/Common/ZooKeeper/Lock.cpp b/dbms/Common/ZooKeeper/Lock.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/Lock.cpp rename to dbms/Common/ZooKeeper/Lock.cpp diff --git a/dbms/src/Common/ZooKeeper/Lock.h b/dbms/Common/ZooKeeper/Lock.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Lock.h rename to dbms/Common/ZooKeeper/Lock.h diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.cpp b/dbms/Common/ZooKeeper/TestKeeper.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/TestKeeper.cpp rename to dbms/Common/ZooKeeper/TestKeeper.cpp diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.h b/dbms/Common/ZooKeeper/TestKeeper.h similarity index 100% rename from dbms/src/Common/ZooKeeper/TestKeeper.h rename to dbms/Common/ZooKeeper/TestKeeper.h diff --git a/dbms/src/Common/ZooKeeper/Types.h b/dbms/Common/ZooKeeper/Types.h similarity index 100% rename from dbms/src/Common/ZooKeeper/Types.h rename to dbms/Common/ZooKeeper/Types.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/dbms/Common/ZooKeeper/ZooKeeper.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeper.cpp rename to dbms/Common/ZooKeeper/ZooKeeper.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/dbms/Common/ZooKeeper/ZooKeeper.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeper.h rename to dbms/Common/ZooKeeper/ZooKeeper.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp b/dbms/Common/ZooKeeper/ZooKeeperHolder.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperHolder.cpp rename to dbms/Common/ZooKeeper/ZooKeeperHolder.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperHolder.h b/dbms/Common/ZooKeeper/ZooKeeperHolder.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperHolder.h rename to dbms/Common/ZooKeeper/ZooKeeperHolder.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/dbms/Common/ZooKeeper/ZooKeeperImpl.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp rename to dbms/Common/ZooKeeper/ZooKeeperImpl.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/dbms/Common/ZooKeeper/ZooKeeperImpl.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperImpl.h rename to dbms/Common/ZooKeeper/ZooKeeperImpl.h diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp b/dbms/Common/ZooKeeper/ZooKeeperNodeCache.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp rename to dbms/Common/ZooKeeper/ZooKeeperNodeCache.cpp diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.h b/dbms/Common/ZooKeeper/ZooKeeperNodeCache.h similarity index 100% rename from dbms/src/Common/ZooKeeper/ZooKeeperNodeCache.h rename to dbms/Common/ZooKeeper/ZooKeeperNodeCache.h diff --git a/dbms/src/Common/ZooKeeper/tests/CMakeLists.txt b/dbms/Common/ZooKeeper/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/CMakeLists.txt rename to dbms/Common/ZooKeeper/tests/CMakeLists.txt diff --git a/dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp b/dbms/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp rename to dbms/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/nozk.sh b/dbms/Common/ZooKeeper/tests/nozk.sh similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/nozk.sh rename to dbms/Common/ZooKeeper/tests/nozk.sh diff --git a/dbms/src/Common/ZooKeeper/tests/yeszk.sh b/dbms/Common/ZooKeeper/tests/yeszk.sh similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/yeszk.sh rename to dbms/Common/ZooKeeper/tests/yeszk.sh diff --git a/dbms/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp b/dbms/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp rename to dbms/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp b/dbms/Common/ZooKeeper/tests/zkutil_expiration_test.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_expiration_test.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_async.cpp b/dbms/Common/ZooKeeper/tests/zkutil_test_async.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_async.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_test_async.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp b/dbms/Common/ZooKeeper/tests/zkutil_test_commands.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_test_commands.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp b/dbms/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp b/dbms/Common/ZooKeeper/tests/zkutil_test_lock.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_test_lock.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp b/dbms/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp rename to dbms/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp diff --git a/dbms/src/Common/ZooKeeper/tests/zookeeper_impl.cpp b/dbms/Common/ZooKeeper/tests/zookeeper_impl.cpp similarity index 100% rename from dbms/src/Common/ZooKeeper/tests/zookeeper_impl.cpp rename to dbms/Common/ZooKeeper/tests/zookeeper_impl.cpp diff --git a/dbms/src/Common/assert_cast.h b/dbms/Common/assert_cast.h similarity index 100% rename from dbms/src/Common/assert_cast.h rename to dbms/Common/assert_cast.h diff --git a/dbms/src/Common/checkStackSize.cpp b/dbms/Common/checkStackSize.cpp similarity index 100% rename from dbms/src/Common/checkStackSize.cpp rename to dbms/Common/checkStackSize.cpp diff --git a/dbms/src/Common/checkStackSize.h b/dbms/Common/checkStackSize.h similarity index 100% rename from dbms/src/Common/checkStackSize.h rename to dbms/Common/checkStackSize.h diff --git a/dbms/src/Common/config.h.in b/dbms/Common/config.h.in similarity index 100% rename from dbms/src/Common/config.h.in rename to dbms/Common/config.h.in diff --git a/dbms/src/Common/config_version.h.in b/dbms/Common/config_version.h.in similarity index 100% rename from dbms/src/Common/config_version.h.in rename to dbms/Common/config_version.h.in diff --git a/dbms/src/Common/createHardLink.cpp b/dbms/Common/createHardLink.cpp similarity index 100% rename from dbms/src/Common/createHardLink.cpp rename to dbms/Common/createHardLink.cpp diff --git a/dbms/src/Common/createHardLink.h b/dbms/Common/createHardLink.h similarity index 100% rename from dbms/src/Common/createHardLink.h rename to dbms/Common/createHardLink.h diff --git a/dbms/src/Common/escapeForFileName.cpp b/dbms/Common/escapeForFileName.cpp similarity index 100% rename from dbms/src/Common/escapeForFileName.cpp rename to dbms/Common/escapeForFileName.cpp diff --git a/dbms/src/Common/escapeForFileName.h b/dbms/Common/escapeForFileName.h similarity index 100% rename from dbms/src/Common/escapeForFileName.h rename to dbms/Common/escapeForFileName.h diff --git a/dbms/src/Common/filesystemHelpers.cpp b/dbms/Common/filesystemHelpers.cpp similarity index 100% rename from dbms/src/Common/filesystemHelpers.cpp rename to dbms/Common/filesystemHelpers.cpp diff --git a/dbms/src/Common/filesystemHelpers.h b/dbms/Common/filesystemHelpers.h similarity index 100% rename from dbms/src/Common/filesystemHelpers.h rename to dbms/Common/filesystemHelpers.h diff --git a/dbms/src/Common/formatIPv6.cpp b/dbms/Common/formatIPv6.cpp similarity index 100% rename from dbms/src/Common/formatIPv6.cpp rename to dbms/Common/formatIPv6.cpp diff --git a/dbms/src/Common/formatIPv6.h b/dbms/Common/formatIPv6.h similarity index 100% rename from dbms/src/Common/formatIPv6.h rename to dbms/Common/formatIPv6.h diff --git a/dbms/src/Common/formatReadable.cpp b/dbms/Common/formatReadable.cpp similarity index 100% rename from dbms/src/Common/formatReadable.cpp rename to dbms/Common/formatReadable.cpp diff --git a/dbms/src/Common/formatReadable.h b/dbms/Common/formatReadable.h similarity index 100% rename from dbms/src/Common/formatReadable.h rename to dbms/Common/formatReadable.h diff --git a/dbms/src/Common/getExecutablePath.cpp b/dbms/Common/getExecutablePath.cpp similarity index 100% rename from dbms/src/Common/getExecutablePath.cpp rename to dbms/Common/getExecutablePath.cpp diff --git a/dbms/src/Common/getExecutablePath.h b/dbms/Common/getExecutablePath.h similarity index 100% rename from dbms/src/Common/getExecutablePath.h rename to dbms/Common/getExecutablePath.h diff --git a/dbms/src/Common/getMultipleKeysFromConfig.cpp b/dbms/Common/getMultipleKeysFromConfig.cpp similarity index 100% rename from dbms/src/Common/getMultipleKeysFromConfig.cpp rename to dbms/Common/getMultipleKeysFromConfig.cpp diff --git a/dbms/src/Common/getMultipleKeysFromConfig.h b/dbms/Common/getMultipleKeysFromConfig.h similarity index 100% rename from dbms/src/Common/getMultipleKeysFromConfig.h rename to dbms/Common/getMultipleKeysFromConfig.h diff --git a/dbms/src/Common/getNumberOfPhysicalCPUCores.cpp b/dbms/Common/getNumberOfPhysicalCPUCores.cpp similarity index 100% rename from dbms/src/Common/getNumberOfPhysicalCPUCores.cpp rename to dbms/Common/getNumberOfPhysicalCPUCores.cpp diff --git a/dbms/src/Common/getNumberOfPhysicalCPUCores.h b/dbms/Common/getNumberOfPhysicalCPUCores.h similarity index 100% rename from dbms/src/Common/getNumberOfPhysicalCPUCores.h rename to dbms/Common/getNumberOfPhysicalCPUCores.h diff --git a/dbms/src/Common/hasLinuxCapability.cpp b/dbms/Common/hasLinuxCapability.cpp similarity index 100% rename from dbms/src/Common/hasLinuxCapability.cpp rename to dbms/Common/hasLinuxCapability.cpp diff --git a/dbms/src/Common/hasLinuxCapability.h b/dbms/Common/hasLinuxCapability.h similarity index 100% rename from dbms/src/Common/hasLinuxCapability.h rename to dbms/Common/hasLinuxCapability.h diff --git a/dbms/src/Common/hex.cpp b/dbms/Common/hex.cpp similarity index 100% rename from dbms/src/Common/hex.cpp rename to dbms/Common/hex.cpp diff --git a/dbms/src/Common/hex.h b/dbms/Common/hex.h similarity index 100% rename from dbms/src/Common/hex.h rename to dbms/Common/hex.h diff --git a/dbms/src/Common/intExp.h b/dbms/Common/intExp.h similarity index 100% rename from dbms/src/Common/intExp.h rename to dbms/Common/intExp.h diff --git a/dbms/src/Common/interpolate.h b/dbms/Common/interpolate.h similarity index 100% rename from dbms/src/Common/interpolate.h rename to dbms/Common/interpolate.h diff --git a/dbms/src/Common/isLocalAddress.cpp b/dbms/Common/isLocalAddress.cpp similarity index 100% rename from dbms/src/Common/isLocalAddress.cpp rename to dbms/Common/isLocalAddress.cpp diff --git a/dbms/src/Common/isLocalAddress.h b/dbms/Common/isLocalAddress.h similarity index 100% rename from dbms/src/Common/isLocalAddress.h rename to dbms/Common/isLocalAddress.h diff --git a/dbms/src/Common/malloc.cpp b/dbms/Common/malloc.cpp similarity index 100% rename from dbms/src/Common/malloc.cpp rename to dbms/Common/malloc.cpp diff --git a/dbms/src/Common/memcmpSmall.h b/dbms/Common/memcmpSmall.h similarity index 100% rename from dbms/src/Common/memcmpSmall.h rename to dbms/Common/memcmpSmall.h diff --git a/dbms/src/Common/memcpySmall.h b/dbms/Common/memcpySmall.h similarity index 100% rename from dbms/src/Common/memcpySmall.h rename to dbms/Common/memcpySmall.h diff --git a/dbms/src/Common/new_delete.cpp b/dbms/Common/new_delete.cpp similarity index 100% rename from dbms/src/Common/new_delete.cpp rename to dbms/Common/new_delete.cpp diff --git a/dbms/src/Common/parseAddress.cpp b/dbms/Common/parseAddress.cpp similarity index 100% rename from dbms/src/Common/parseAddress.cpp rename to dbms/Common/parseAddress.cpp diff --git a/dbms/src/Common/parseAddress.h b/dbms/Common/parseAddress.h similarity index 100% rename from dbms/src/Common/parseAddress.h rename to dbms/Common/parseAddress.h diff --git a/dbms/src/Common/parseGlobs.cpp b/dbms/Common/parseGlobs.cpp similarity index 100% rename from dbms/src/Common/parseGlobs.cpp rename to dbms/Common/parseGlobs.cpp diff --git a/dbms/src/Common/parseGlobs.h b/dbms/Common/parseGlobs.h similarity index 100% rename from dbms/src/Common/parseGlobs.h rename to dbms/Common/parseGlobs.h diff --git a/dbms/src/Common/parseRemoteDescription.cpp b/dbms/Common/parseRemoteDescription.cpp similarity index 100% rename from dbms/src/Common/parseRemoteDescription.cpp rename to dbms/Common/parseRemoteDescription.cpp diff --git a/dbms/src/Common/parseRemoteDescription.h b/dbms/Common/parseRemoteDescription.h similarity index 100% rename from dbms/src/Common/parseRemoteDescription.h rename to dbms/Common/parseRemoteDescription.h diff --git a/dbms/src/Common/quoteString.cpp b/dbms/Common/quoteString.cpp similarity index 100% rename from dbms/src/Common/quoteString.cpp rename to dbms/Common/quoteString.cpp diff --git a/dbms/src/Common/quoteString.h b/dbms/Common/quoteString.h similarity index 100% rename from dbms/src/Common/quoteString.h rename to dbms/Common/quoteString.h diff --git a/dbms/src/Common/randomSeed.cpp b/dbms/Common/randomSeed.cpp similarity index 100% rename from dbms/src/Common/randomSeed.cpp rename to dbms/Common/randomSeed.cpp diff --git a/dbms/src/Common/randomSeed.h b/dbms/Common/randomSeed.h similarity index 100% rename from dbms/src/Common/randomSeed.h rename to dbms/Common/randomSeed.h diff --git a/dbms/src/Common/setThreadName.cpp b/dbms/Common/setThreadName.cpp similarity index 100% rename from dbms/src/Common/setThreadName.cpp rename to dbms/Common/setThreadName.cpp diff --git a/dbms/src/Common/setThreadName.h b/dbms/Common/setThreadName.h similarity index 100% rename from dbms/src/Common/setThreadName.h rename to dbms/Common/setThreadName.h diff --git a/dbms/src/Common/tests/CMakeLists.txt b/dbms/Common/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Common/tests/CMakeLists.txt rename to dbms/Common/tests/CMakeLists.txt diff --git a/dbms/src/Common/tests/arena_with_free_lists.cpp b/dbms/Common/tests/arena_with_free_lists.cpp similarity index 100% rename from dbms/src/Common/tests/arena_with_free_lists.cpp rename to dbms/Common/tests/arena_with_free_lists.cpp diff --git a/dbms/src/Common/tests/array_cache.cpp b/dbms/Common/tests/array_cache.cpp similarity index 100% rename from dbms/src/Common/tests/array_cache.cpp rename to dbms/Common/tests/array_cache.cpp diff --git a/dbms/src/Common/tests/auto_array.cpp b/dbms/Common/tests/auto_array.cpp similarity index 100% rename from dbms/src/Common/tests/auto_array.cpp rename to dbms/Common/tests/auto_array.cpp diff --git a/dbms/src/Common/tests/chaos_sanitizer.cpp b/dbms/Common/tests/chaos_sanitizer.cpp similarity index 100% rename from dbms/src/Common/tests/chaos_sanitizer.cpp rename to dbms/Common/tests/chaos_sanitizer.cpp diff --git a/dbms/src/Common/tests/compact_array.cpp b/dbms/Common/tests/compact_array.cpp similarity index 100% rename from dbms/src/Common/tests/compact_array.cpp rename to dbms/Common/tests/compact_array.cpp diff --git a/dbms/src/Common/tests/cow_columns.cpp b/dbms/Common/tests/cow_columns.cpp similarity index 100% rename from dbms/src/Common/tests/cow_columns.cpp rename to dbms/Common/tests/cow_columns.cpp diff --git a/dbms/src/Common/tests/cow_compositions.cpp b/dbms/Common/tests/cow_compositions.cpp similarity index 100% rename from dbms/src/Common/tests/cow_compositions.cpp rename to dbms/Common/tests/cow_compositions.cpp diff --git a/dbms/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp b/dbms/Common/tests/gtest_getMultipleValuesFromConfig.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp rename to dbms/Common/tests/gtest_getMultipleValuesFromConfig.cpp diff --git a/dbms/src/Common/tests/gtest_global_context.h b/dbms/Common/tests/gtest_global_context.h similarity index 100% rename from dbms/src/Common/tests/gtest_global_context.h rename to dbms/Common/tests/gtest_global_context.h diff --git a/dbms/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp b/dbms/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp rename to dbms/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp diff --git a/dbms/src/Common/tests/gtest_pod_array.cpp b/dbms/Common/tests/gtest_pod_array.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_pod_array.cpp rename to dbms/Common/tests/gtest_pod_array.cpp diff --git a/dbms/src/Common/tests/gtest_rw_lock.cpp b/dbms/Common/tests/gtest_rw_lock.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_rw_lock.cpp rename to dbms/Common/tests/gtest_rw_lock.cpp diff --git a/dbms/src/Common/tests/gtest_sensitive_data_masker.cpp b/dbms/Common/tests/gtest_sensitive_data_masker.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_sensitive_data_masker.cpp rename to dbms/Common/tests/gtest_sensitive_data_masker.cpp diff --git a/dbms/src/Common/tests/gtest_shell_command.cpp b/dbms/Common/tests/gtest_shell_command.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_shell_command.cpp rename to dbms/Common/tests/gtest_shell_command.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp b/dbms/Common/tests/gtest_thread_pool_concurrent_wait.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp rename to dbms/Common/tests/gtest_thread_pool_concurrent_wait.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_global_full.cpp b/dbms/Common/tests/gtest_thread_pool_global_full.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_global_full.cpp rename to dbms/Common/tests/gtest_thread_pool_global_full.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_limit.cpp b/dbms/Common/tests/gtest_thread_pool_limit.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_limit.cpp rename to dbms/Common/tests/gtest_thread_pool_limit.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_loop.cpp b/dbms/Common/tests/gtest_thread_pool_loop.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_loop.cpp rename to dbms/Common/tests/gtest_thread_pool_loop.cpp diff --git a/dbms/src/Common/tests/gtest_thread_pool_schedule_exception.cpp b/dbms/Common/tests/gtest_thread_pool_schedule_exception.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_thread_pool_schedule_exception.cpp rename to dbms/Common/tests/gtest_thread_pool_schedule_exception.cpp diff --git a/dbms/src/Common/tests/gtest_unescapeForFileName.cpp b/dbms/Common/tests/gtest_unescapeForFileName.cpp similarity index 100% rename from dbms/src/Common/tests/gtest_unescapeForFileName.cpp rename to dbms/Common/tests/gtest_unescapeForFileName.cpp diff --git a/dbms/src/Common/tests/hash_table.cpp b/dbms/Common/tests/hash_table.cpp similarity index 100% rename from dbms/src/Common/tests/hash_table.cpp rename to dbms/Common/tests/hash_table.cpp diff --git a/dbms/src/Common/tests/hashes_test.cpp b/dbms/Common/tests/hashes_test.cpp similarity index 100% rename from dbms/src/Common/tests/hashes_test.cpp rename to dbms/Common/tests/hashes_test.cpp diff --git a/dbms/src/Common/tests/int_hashes_perf.cpp b/dbms/Common/tests/int_hashes_perf.cpp similarity index 100% rename from dbms/src/Common/tests/int_hashes_perf.cpp rename to dbms/Common/tests/int_hashes_perf.cpp diff --git a/dbms/src/Common/tests/integer_hash_tables_and_hashes.cpp b/dbms/Common/tests/integer_hash_tables_and_hashes.cpp similarity index 100% rename from dbms/src/Common/tests/integer_hash_tables_and_hashes.cpp rename to dbms/Common/tests/integer_hash_tables_and_hashes.cpp diff --git a/dbms/src/Common/tests/parallel_aggregation.cpp b/dbms/Common/tests/parallel_aggregation.cpp similarity index 100% rename from dbms/src/Common/tests/parallel_aggregation.cpp rename to dbms/Common/tests/parallel_aggregation.cpp diff --git a/dbms/src/Common/tests/parallel_aggregation2.cpp b/dbms/Common/tests/parallel_aggregation2.cpp similarity index 100% rename from dbms/src/Common/tests/parallel_aggregation2.cpp rename to dbms/Common/tests/parallel_aggregation2.cpp diff --git a/dbms/src/Common/tests/pod_array.cpp b/dbms/Common/tests/pod_array.cpp similarity index 100% rename from dbms/src/Common/tests/pod_array.cpp rename to dbms/Common/tests/pod_array.cpp diff --git a/dbms/src/Common/tests/radix_sort.cpp b/dbms/Common/tests/radix_sort.cpp similarity index 100% rename from dbms/src/Common/tests/radix_sort.cpp rename to dbms/Common/tests/radix_sort.cpp diff --git a/dbms/src/Common/tests/simple_cache.cpp b/dbms/Common/tests/simple_cache.cpp similarity index 100% rename from dbms/src/Common/tests/simple_cache.cpp rename to dbms/Common/tests/simple_cache.cpp diff --git a/dbms/src/Common/tests/sip_hash_perf.cpp b/dbms/Common/tests/sip_hash_perf.cpp similarity index 100% rename from dbms/src/Common/tests/sip_hash_perf.cpp rename to dbms/Common/tests/sip_hash_perf.cpp diff --git a/dbms/src/Common/tests/small_table.cpp b/dbms/Common/tests/small_table.cpp similarity index 100% rename from dbms/src/Common/tests/small_table.cpp rename to dbms/Common/tests/small_table.cpp diff --git a/dbms/src/Common/tests/space_saving.cpp b/dbms/Common/tests/space_saving.cpp similarity index 100% rename from dbms/src/Common/tests/space_saving.cpp rename to dbms/Common/tests/space_saving.cpp diff --git a/dbms/src/Common/tests/stopwatch.cpp b/dbms/Common/tests/stopwatch.cpp similarity index 100% rename from dbms/src/Common/tests/stopwatch.cpp rename to dbms/Common/tests/stopwatch.cpp diff --git a/dbms/src/Common/tests/symbol_index.cpp b/dbms/Common/tests/symbol_index.cpp similarity index 100% rename from dbms/src/Common/tests/symbol_index.cpp rename to dbms/Common/tests/symbol_index.cpp diff --git a/dbms/src/Common/tests/thread_creation_latency.cpp b/dbms/Common/tests/thread_creation_latency.cpp similarity index 100% rename from dbms/src/Common/tests/thread_creation_latency.cpp rename to dbms/Common/tests/thread_creation_latency.cpp diff --git a/dbms/src/Common/thread_local_rng.cpp b/dbms/Common/thread_local_rng.cpp similarity index 100% rename from dbms/src/Common/thread_local_rng.cpp rename to dbms/Common/thread_local_rng.cpp diff --git a/dbms/src/Common/thread_local_rng.h b/dbms/Common/thread_local_rng.h similarity index 100% rename from dbms/src/Common/thread_local_rng.h rename to dbms/Common/thread_local_rng.h diff --git a/dbms/src/Common/typeid_cast.h b/dbms/Common/typeid_cast.h similarity index 100% rename from dbms/src/Common/typeid_cast.h rename to dbms/Common/typeid_cast.h diff --git a/dbms/src/Compression/CMakeLists.txt b/dbms/Compression/CMakeLists.txt similarity index 100% rename from dbms/src/Compression/CMakeLists.txt rename to dbms/Compression/CMakeLists.txt diff --git a/dbms/src/Compression/CachedCompressedReadBuffer.cpp b/dbms/Compression/CachedCompressedReadBuffer.cpp similarity index 100% rename from dbms/src/Compression/CachedCompressedReadBuffer.cpp rename to dbms/Compression/CachedCompressedReadBuffer.cpp diff --git a/dbms/src/Compression/CachedCompressedReadBuffer.h b/dbms/Compression/CachedCompressedReadBuffer.h similarity index 100% rename from dbms/src/Compression/CachedCompressedReadBuffer.h rename to dbms/Compression/CachedCompressedReadBuffer.h diff --git a/dbms/src/Compression/CompressedReadBuffer.cpp b/dbms/Compression/CompressedReadBuffer.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBuffer.cpp rename to dbms/Compression/CompressedReadBuffer.cpp diff --git a/dbms/src/Compression/CompressedReadBuffer.h b/dbms/Compression/CompressedReadBuffer.h similarity index 100% rename from dbms/src/Compression/CompressedReadBuffer.h rename to dbms/Compression/CompressedReadBuffer.h diff --git a/dbms/src/Compression/CompressedReadBufferBase.cpp b/dbms/Compression/CompressedReadBufferBase.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBufferBase.cpp rename to dbms/Compression/CompressedReadBufferBase.cpp diff --git a/dbms/src/Compression/CompressedReadBufferBase.h b/dbms/Compression/CompressedReadBufferBase.h similarity index 100% rename from dbms/src/Compression/CompressedReadBufferBase.h rename to dbms/Compression/CompressedReadBufferBase.h diff --git a/dbms/src/Compression/CompressedReadBufferFromFile.cpp b/dbms/Compression/CompressedReadBufferFromFile.cpp similarity index 100% rename from dbms/src/Compression/CompressedReadBufferFromFile.cpp rename to dbms/Compression/CompressedReadBufferFromFile.cpp diff --git a/dbms/src/Compression/CompressedReadBufferFromFile.h b/dbms/Compression/CompressedReadBufferFromFile.h similarity index 100% rename from dbms/src/Compression/CompressedReadBufferFromFile.h rename to dbms/Compression/CompressedReadBufferFromFile.h diff --git a/dbms/src/Compression/CompressedWriteBuffer.cpp b/dbms/Compression/CompressedWriteBuffer.cpp similarity index 100% rename from dbms/src/Compression/CompressedWriteBuffer.cpp rename to dbms/Compression/CompressedWriteBuffer.cpp diff --git a/dbms/src/Compression/CompressedWriteBuffer.h b/dbms/Compression/CompressedWriteBuffer.h similarity index 100% rename from dbms/src/Compression/CompressedWriteBuffer.h rename to dbms/Compression/CompressedWriteBuffer.h diff --git a/dbms/src/Compression/CompressionCodecDelta.cpp b/dbms/Compression/CompressionCodecDelta.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecDelta.cpp rename to dbms/Compression/CompressionCodecDelta.cpp diff --git a/dbms/src/Compression/CompressionCodecDelta.h b/dbms/Compression/CompressionCodecDelta.h similarity index 100% rename from dbms/src/Compression/CompressionCodecDelta.h rename to dbms/Compression/CompressionCodecDelta.h diff --git a/dbms/src/Compression/CompressionCodecDoubleDelta.cpp b/dbms/Compression/CompressionCodecDoubleDelta.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecDoubleDelta.cpp rename to dbms/Compression/CompressionCodecDoubleDelta.cpp diff --git a/dbms/src/Compression/CompressionCodecDoubleDelta.h b/dbms/Compression/CompressionCodecDoubleDelta.h similarity index 100% rename from dbms/src/Compression/CompressionCodecDoubleDelta.h rename to dbms/Compression/CompressionCodecDoubleDelta.h diff --git a/dbms/src/Compression/CompressionCodecGorilla.cpp b/dbms/Compression/CompressionCodecGorilla.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecGorilla.cpp rename to dbms/Compression/CompressionCodecGorilla.cpp diff --git a/dbms/src/Compression/CompressionCodecGorilla.h b/dbms/Compression/CompressionCodecGorilla.h similarity index 100% rename from dbms/src/Compression/CompressionCodecGorilla.h rename to dbms/Compression/CompressionCodecGorilla.h diff --git a/dbms/src/Compression/CompressionCodecLZ4.cpp b/dbms/Compression/CompressionCodecLZ4.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecLZ4.cpp rename to dbms/Compression/CompressionCodecLZ4.cpp diff --git a/dbms/src/Compression/CompressionCodecLZ4.h b/dbms/Compression/CompressionCodecLZ4.h similarity index 100% rename from dbms/src/Compression/CompressionCodecLZ4.h rename to dbms/Compression/CompressionCodecLZ4.h diff --git a/dbms/src/Compression/CompressionCodecMultiple.cpp b/dbms/Compression/CompressionCodecMultiple.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecMultiple.cpp rename to dbms/Compression/CompressionCodecMultiple.cpp diff --git a/dbms/src/Compression/CompressionCodecMultiple.h b/dbms/Compression/CompressionCodecMultiple.h similarity index 100% rename from dbms/src/Compression/CompressionCodecMultiple.h rename to dbms/Compression/CompressionCodecMultiple.h diff --git a/dbms/src/Compression/CompressionCodecNone.cpp b/dbms/Compression/CompressionCodecNone.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecNone.cpp rename to dbms/Compression/CompressionCodecNone.cpp diff --git a/dbms/src/Compression/CompressionCodecNone.h b/dbms/Compression/CompressionCodecNone.h similarity index 100% rename from dbms/src/Compression/CompressionCodecNone.h rename to dbms/Compression/CompressionCodecNone.h diff --git a/dbms/src/Compression/CompressionCodecT64.cpp b/dbms/Compression/CompressionCodecT64.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecT64.cpp rename to dbms/Compression/CompressionCodecT64.cpp diff --git a/dbms/src/Compression/CompressionCodecT64.h b/dbms/Compression/CompressionCodecT64.h similarity index 100% rename from dbms/src/Compression/CompressionCodecT64.h rename to dbms/Compression/CompressionCodecT64.h diff --git a/dbms/src/Compression/CompressionCodecZSTD.cpp b/dbms/Compression/CompressionCodecZSTD.cpp similarity index 100% rename from dbms/src/Compression/CompressionCodecZSTD.cpp rename to dbms/Compression/CompressionCodecZSTD.cpp diff --git a/dbms/src/Compression/CompressionCodecZSTD.h b/dbms/Compression/CompressionCodecZSTD.h similarity index 100% rename from dbms/src/Compression/CompressionCodecZSTD.h rename to dbms/Compression/CompressionCodecZSTD.h diff --git a/dbms/src/Compression/CompressionFactory.cpp b/dbms/Compression/CompressionFactory.cpp similarity index 100% rename from dbms/src/Compression/CompressionFactory.cpp rename to dbms/Compression/CompressionFactory.cpp diff --git a/dbms/src/Compression/CompressionFactory.h b/dbms/Compression/CompressionFactory.h similarity index 100% rename from dbms/src/Compression/CompressionFactory.h rename to dbms/Compression/CompressionFactory.h diff --git a/dbms/src/Compression/CompressionInfo.h b/dbms/Compression/CompressionInfo.h similarity index 100% rename from dbms/src/Compression/CompressionInfo.h rename to dbms/Compression/CompressionInfo.h diff --git a/dbms/src/Compression/ICompressionCodec.cpp b/dbms/Compression/ICompressionCodec.cpp similarity index 100% rename from dbms/src/Compression/ICompressionCodec.cpp rename to dbms/Compression/ICompressionCodec.cpp diff --git a/dbms/src/Compression/ICompressionCodec.h b/dbms/Compression/ICompressionCodec.h similarity index 100% rename from dbms/src/Compression/ICompressionCodec.h rename to dbms/Compression/ICompressionCodec.h diff --git a/dbms/src/Compression/LZ4_decompress_faster.cpp b/dbms/Compression/LZ4_decompress_faster.cpp similarity index 100% rename from dbms/src/Compression/LZ4_decompress_faster.cpp rename to dbms/Compression/LZ4_decompress_faster.cpp diff --git a/dbms/src/Compression/LZ4_decompress_faster.h b/dbms/Compression/LZ4_decompress_faster.h similarity index 100% rename from dbms/src/Compression/LZ4_decompress_faster.h rename to dbms/Compression/LZ4_decompress_faster.h diff --git a/dbms/src/Compression/tests/CMakeLists.txt b/dbms/Compression/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Compression/tests/CMakeLists.txt rename to dbms/Compression/tests/CMakeLists.txt diff --git a/dbms/src/Compression/tests/cached_compressed_read_buffer.cpp b/dbms/Compression/tests/cached_compressed_read_buffer.cpp similarity index 100% rename from dbms/src/Compression/tests/cached_compressed_read_buffer.cpp rename to dbms/Compression/tests/cached_compressed_read_buffer.cpp diff --git a/dbms/src/Compression/tests/compressed_buffer.cpp b/dbms/Compression/tests/compressed_buffer.cpp similarity index 100% rename from dbms/src/Compression/tests/compressed_buffer.cpp rename to dbms/Compression/tests/compressed_buffer.cpp diff --git a/dbms/src/Compression/tests/compressed_buffer_fuzz.cpp b/dbms/Compression/tests/compressed_buffer_fuzz.cpp similarity index 100% rename from dbms/src/Compression/tests/compressed_buffer_fuzz.cpp rename to dbms/Compression/tests/compressed_buffer_fuzz.cpp diff --git a/dbms/src/Compression/tests/gtest_compressionCodec.cpp b/dbms/Compression/tests/gtest_compressionCodec.cpp similarity index 100% rename from dbms/src/Compression/tests/gtest_compressionCodec.cpp rename to dbms/Compression/tests/gtest_compressionCodec.cpp diff --git a/dbms/src/Core/AccurateComparison.h b/dbms/Core/AccurateComparison.h similarity index 100% rename from dbms/src/Core/AccurateComparison.h rename to dbms/Core/AccurateComparison.h diff --git a/dbms/src/Core/BackgroundSchedulePool.cpp b/dbms/Core/BackgroundSchedulePool.cpp similarity index 100% rename from dbms/src/Core/BackgroundSchedulePool.cpp rename to dbms/Core/BackgroundSchedulePool.cpp diff --git a/dbms/src/Core/BackgroundSchedulePool.h b/dbms/Core/BackgroundSchedulePool.h similarity index 100% rename from dbms/src/Core/BackgroundSchedulePool.h rename to dbms/Core/BackgroundSchedulePool.h diff --git a/dbms/src/Core/Block.cpp b/dbms/Core/Block.cpp similarity index 100% rename from dbms/src/Core/Block.cpp rename to dbms/Core/Block.cpp diff --git a/dbms/src/Core/Block.h b/dbms/Core/Block.h similarity index 100% rename from dbms/src/Core/Block.h rename to dbms/Core/Block.h diff --git a/dbms/src/Core/BlockInfo.cpp b/dbms/Core/BlockInfo.cpp similarity index 100% rename from dbms/src/Core/BlockInfo.cpp rename to dbms/Core/BlockInfo.cpp diff --git a/dbms/src/Core/BlockInfo.h b/dbms/Core/BlockInfo.h similarity index 100% rename from dbms/src/Core/BlockInfo.h rename to dbms/Core/BlockInfo.h diff --git a/dbms/src/Core/CMakeLists.txt b/dbms/Core/CMakeLists.txt similarity index 100% rename from dbms/src/Core/CMakeLists.txt rename to dbms/Core/CMakeLists.txt diff --git a/dbms/src/Core/ColumnNumbers.h b/dbms/Core/ColumnNumbers.h similarity index 100% rename from dbms/src/Core/ColumnNumbers.h rename to dbms/Core/ColumnNumbers.h diff --git a/dbms/src/Core/ColumnWithTypeAndName.cpp b/dbms/Core/ColumnWithTypeAndName.cpp similarity index 100% rename from dbms/src/Core/ColumnWithTypeAndName.cpp rename to dbms/Core/ColumnWithTypeAndName.cpp diff --git a/dbms/src/Core/ColumnWithTypeAndName.h b/dbms/Core/ColumnWithTypeAndName.h similarity index 100% rename from dbms/src/Core/ColumnWithTypeAndName.h rename to dbms/Core/ColumnWithTypeAndName.h diff --git a/dbms/src/Core/ColumnsWithTypeAndName.h b/dbms/Core/ColumnsWithTypeAndName.h similarity index 100% rename from dbms/src/Core/ColumnsWithTypeAndName.h rename to dbms/Core/ColumnsWithTypeAndName.h diff --git a/dbms/src/Core/DecimalComparison.h b/dbms/Core/DecimalComparison.h similarity index 100% rename from dbms/src/Core/DecimalComparison.h rename to dbms/Core/DecimalComparison.h diff --git a/dbms/src/Core/DecimalFunctions.h b/dbms/Core/DecimalFunctions.h similarity index 100% rename from dbms/src/Core/DecimalFunctions.h rename to dbms/Core/DecimalFunctions.h diff --git a/dbms/src/Core/Defines.h b/dbms/Core/Defines.h similarity index 100% rename from dbms/src/Core/Defines.h rename to dbms/Core/Defines.h diff --git a/dbms/src/Core/ExternalResultDescription.cpp b/dbms/Core/ExternalResultDescription.cpp similarity index 100% rename from dbms/src/Core/ExternalResultDescription.cpp rename to dbms/Core/ExternalResultDescription.cpp diff --git a/dbms/src/Core/ExternalResultDescription.h b/dbms/Core/ExternalResultDescription.h similarity index 100% rename from dbms/src/Core/ExternalResultDescription.h rename to dbms/Core/ExternalResultDescription.h diff --git a/dbms/src/Core/ExternalTable.cpp b/dbms/Core/ExternalTable.cpp similarity index 100% rename from dbms/src/Core/ExternalTable.cpp rename to dbms/Core/ExternalTable.cpp diff --git a/dbms/src/Core/ExternalTable.h b/dbms/Core/ExternalTable.h similarity index 100% rename from dbms/src/Core/ExternalTable.h rename to dbms/Core/ExternalTable.h diff --git a/dbms/src/Core/Field.cpp b/dbms/Core/Field.cpp similarity index 100% rename from dbms/src/Core/Field.cpp rename to dbms/Core/Field.cpp diff --git a/dbms/src/Core/Field.h b/dbms/Core/Field.h similarity index 100% rename from dbms/src/Core/Field.h rename to dbms/Core/Field.h diff --git a/dbms/src/Core/MySQLProtocol.cpp b/dbms/Core/MySQLProtocol.cpp similarity index 100% rename from dbms/src/Core/MySQLProtocol.cpp rename to dbms/Core/MySQLProtocol.cpp diff --git a/dbms/src/Core/MySQLProtocol.h b/dbms/Core/MySQLProtocol.h similarity index 100% rename from dbms/src/Core/MySQLProtocol.h rename to dbms/Core/MySQLProtocol.h diff --git a/dbms/src/Core/Names.h b/dbms/Core/Names.h similarity index 100% rename from dbms/src/Core/Names.h rename to dbms/Core/Names.h diff --git a/dbms/src/Core/NamesAndTypes.cpp b/dbms/Core/NamesAndTypes.cpp similarity index 100% rename from dbms/src/Core/NamesAndTypes.cpp rename to dbms/Core/NamesAndTypes.cpp diff --git a/dbms/src/Core/NamesAndTypes.h b/dbms/Core/NamesAndTypes.h similarity index 100% rename from dbms/src/Core/NamesAndTypes.h rename to dbms/Core/NamesAndTypes.h diff --git a/dbms/src/Core/Protocol.h b/dbms/Core/Protocol.h similarity index 100% rename from dbms/src/Core/Protocol.h rename to dbms/Core/Protocol.h diff --git a/dbms/src/Core/QualifiedTableName.h b/dbms/Core/QualifiedTableName.h similarity index 100% rename from dbms/src/Core/QualifiedTableName.h rename to dbms/Core/QualifiedTableName.h diff --git a/dbms/src/Core/QueryProcessingStage.h b/dbms/Core/QueryProcessingStage.h similarity index 100% rename from dbms/src/Core/QueryProcessingStage.h rename to dbms/Core/QueryProcessingStage.h diff --git a/dbms/src/Core/Row.h b/dbms/Core/Row.h similarity index 100% rename from dbms/src/Core/Row.h rename to dbms/Core/Row.h diff --git a/dbms/src/Core/Settings.cpp b/dbms/Core/Settings.cpp similarity index 100% rename from dbms/src/Core/Settings.cpp rename to dbms/Core/Settings.cpp diff --git a/dbms/src/Core/Settings.h b/dbms/Core/Settings.h similarity index 100% rename from dbms/src/Core/Settings.h rename to dbms/Core/Settings.h diff --git a/dbms/src/Core/SettingsCollection.cpp b/dbms/Core/SettingsCollection.cpp similarity index 100% rename from dbms/src/Core/SettingsCollection.cpp rename to dbms/Core/SettingsCollection.cpp diff --git a/dbms/src/Core/SettingsCollection.h b/dbms/Core/SettingsCollection.h similarity index 100% rename from dbms/src/Core/SettingsCollection.h rename to dbms/Core/SettingsCollection.h diff --git a/dbms/src/Core/SettingsCollectionImpl.h b/dbms/Core/SettingsCollectionImpl.h similarity index 100% rename from dbms/src/Core/SettingsCollectionImpl.h rename to dbms/Core/SettingsCollectionImpl.h diff --git a/dbms/src/Core/SortCursor.h b/dbms/Core/SortCursor.h similarity index 100% rename from dbms/src/Core/SortCursor.h rename to dbms/Core/SortCursor.h diff --git a/dbms/src/Core/SortDescription.h b/dbms/Core/SortDescription.h similarity index 100% rename from dbms/src/Core/SortDescription.h rename to dbms/Core/SortDescription.h diff --git a/dbms/src/Core/TypeListNumber.h b/dbms/Core/TypeListNumber.h similarity index 100% rename from dbms/src/Core/TypeListNumber.h rename to dbms/Core/TypeListNumber.h diff --git a/dbms/src/Core/Types.h b/dbms/Core/Types.h similarity index 100% rename from dbms/src/Core/Types.h rename to dbms/Core/Types.h diff --git a/dbms/src/Core/UUID.h b/dbms/Core/UUID.h similarity index 100% rename from dbms/src/Core/UUID.h rename to dbms/Core/UUID.h diff --git a/dbms/src/Core/callOnTypeIndex.h b/dbms/Core/callOnTypeIndex.h similarity index 100% rename from dbms/src/Core/callOnTypeIndex.h rename to dbms/Core/callOnTypeIndex.h diff --git a/dbms/src/Core/config_core.h.in b/dbms/Core/config_core.h.in similarity index 100% rename from dbms/src/Core/config_core.h.in rename to dbms/Core/config_core.h.in diff --git a/dbms/src/Core/iostream_debug_helpers.cpp b/dbms/Core/iostream_debug_helpers.cpp similarity index 100% rename from dbms/src/Core/iostream_debug_helpers.cpp rename to dbms/Core/iostream_debug_helpers.cpp diff --git a/dbms/src/Core/iostream_debug_helpers.h b/dbms/Core/iostream_debug_helpers.h similarity index 100% rename from dbms/src/Core/iostream_debug_helpers.h rename to dbms/Core/iostream_debug_helpers.h diff --git a/dbms/src/Core/tests/CMakeLists.txt b/dbms/Core/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Core/tests/CMakeLists.txt rename to dbms/Core/tests/CMakeLists.txt diff --git a/dbms/src/Core/tests/field.cpp b/dbms/Core/tests/field.cpp similarity index 100% rename from dbms/src/Core/tests/field.cpp rename to dbms/Core/tests/field.cpp diff --git a/dbms/src/Core/tests/gtest_DecimalFunctions.cpp b/dbms/Core/tests/gtest_DecimalFunctions.cpp similarity index 100% rename from dbms/src/Core/tests/gtest_DecimalFunctions.cpp rename to dbms/Core/tests/gtest_DecimalFunctions.cpp diff --git a/dbms/src/Core/tests/move_field.cpp b/dbms/Core/tests/move_field.cpp similarity index 100% rename from dbms/src/Core/tests/move_field.cpp rename to dbms/Core/tests/move_field.cpp diff --git a/dbms/src/Core/tests/string_pool.cpp b/dbms/Core/tests/string_pool.cpp similarity index 100% rename from dbms/src/Core/tests/string_pool.cpp rename to dbms/Core/tests/string_pool.cpp diff --git a/dbms/src/Core/tests/string_ref_hash.cpp b/dbms/Core/tests/string_ref_hash.cpp similarity index 100% rename from dbms/src/Core/tests/string_ref_hash.cpp rename to dbms/Core/tests/string_ref_hash.cpp diff --git a/dbms/src/DataStreams/AddingConstColumnBlockInputStream.h b/dbms/DataStreams/AddingConstColumnBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingConstColumnBlockInputStream.h rename to dbms/DataStreams/AddingConstColumnBlockInputStream.h diff --git a/dbms/src/DataStreams/AddingDefaultBlockOutputStream.cpp b/dbms/DataStreams/AddingDefaultBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AddingDefaultBlockOutputStream.cpp rename to dbms/DataStreams/AddingDefaultBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/AddingDefaultBlockOutputStream.h b/dbms/DataStreams/AddingDefaultBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingDefaultBlockOutputStream.h rename to dbms/DataStreams/AddingDefaultBlockOutputStream.h diff --git a/dbms/src/DataStreams/AddingDefaultsBlockInputStream.cpp b/dbms/DataStreams/AddingDefaultsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AddingDefaultsBlockInputStream.cpp rename to dbms/DataStreams/AddingDefaultsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AddingDefaultsBlockInputStream.h b/dbms/DataStreams/AddingDefaultsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AddingDefaultsBlockInputStream.h rename to dbms/DataStreams/AddingDefaultsBlockInputStream.h diff --git a/dbms/src/DataStreams/AggregatingBlockInputStream.cpp b/dbms/DataStreams/AggregatingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AggregatingBlockInputStream.cpp rename to dbms/DataStreams/AggregatingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AggregatingBlockInputStream.h b/dbms/DataStreams/AggregatingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AggregatingBlockInputStream.h rename to dbms/DataStreams/AggregatingBlockInputStream.h diff --git a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp b/dbms/DataStreams/AggregatingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AggregatingSortedBlockInputStream.cpp rename to dbms/DataStreams/AggregatingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AggregatingSortedBlockInputStream.h b/dbms/DataStreams/AggregatingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AggregatingSortedBlockInputStream.h rename to dbms/DataStreams/AggregatingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/AsynchronousBlockInputStream.cpp b/dbms/DataStreams/AsynchronousBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/AsynchronousBlockInputStream.cpp rename to dbms/DataStreams/AsynchronousBlockInputStream.cpp diff --git a/dbms/src/DataStreams/AsynchronousBlockInputStream.h b/dbms/DataStreams/AsynchronousBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/AsynchronousBlockInputStream.h rename to dbms/DataStreams/AsynchronousBlockInputStream.h diff --git a/dbms/src/DataStreams/BlockIO.cpp b/dbms/DataStreams/BlockIO.cpp similarity index 100% rename from dbms/src/DataStreams/BlockIO.cpp rename to dbms/DataStreams/BlockIO.cpp diff --git a/dbms/src/DataStreams/BlockIO.h b/dbms/DataStreams/BlockIO.h similarity index 100% rename from dbms/src/DataStreams/BlockIO.h rename to dbms/DataStreams/BlockIO.h diff --git a/dbms/src/DataStreams/BlockStreamProfileInfo.cpp b/dbms/DataStreams/BlockStreamProfileInfo.cpp similarity index 100% rename from dbms/src/DataStreams/BlockStreamProfileInfo.cpp rename to dbms/DataStreams/BlockStreamProfileInfo.cpp diff --git a/dbms/src/DataStreams/BlockStreamProfileInfo.h b/dbms/DataStreams/BlockStreamProfileInfo.h similarity index 100% rename from dbms/src/DataStreams/BlockStreamProfileInfo.h rename to dbms/DataStreams/BlockStreamProfileInfo.h diff --git a/dbms/src/DataStreams/BlocksBlockInputStream.h b/dbms/DataStreams/BlocksBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/BlocksBlockInputStream.h rename to dbms/DataStreams/BlocksBlockInputStream.h diff --git a/dbms/src/DataStreams/BlocksListBlockInputStream.h b/dbms/DataStreams/BlocksListBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/BlocksListBlockInputStream.h rename to dbms/DataStreams/BlocksListBlockInputStream.h diff --git a/dbms/src/DataStreams/CMakeLists.txt b/dbms/DataStreams/CMakeLists.txt similarity index 100% rename from dbms/src/DataStreams/CMakeLists.txt rename to dbms/DataStreams/CMakeLists.txt diff --git a/dbms/src/DataStreams/CheckConstraintsBlockOutputStream.cpp b/dbms/DataStreams/CheckConstraintsBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CheckConstraintsBlockOutputStream.cpp rename to dbms/DataStreams/CheckConstraintsBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/CheckConstraintsBlockOutputStream.h b/dbms/DataStreams/CheckConstraintsBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/CheckConstraintsBlockOutputStream.h rename to dbms/DataStreams/CheckConstraintsBlockOutputStream.h diff --git a/dbms/src/DataStreams/CheckSortedBlockInputStream.cpp b/dbms/DataStreams/CheckSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CheckSortedBlockInputStream.cpp rename to dbms/DataStreams/CheckSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CheckSortedBlockInputStream.h b/dbms/DataStreams/CheckSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CheckSortedBlockInputStream.h rename to dbms/DataStreams/CheckSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.cpp b/dbms/DataStreams/CollapsingFinalBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CollapsingFinalBlockInputStream.cpp rename to dbms/DataStreams/CollapsingFinalBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h b/dbms/DataStreams/CollapsingFinalBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CollapsingFinalBlockInputStream.h rename to dbms/DataStreams/CollapsingFinalBlockInputStream.h diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp b/dbms/DataStreams/CollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp rename to dbms/DataStreams/CollapsingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h b/dbms/DataStreams/CollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CollapsingSortedBlockInputStream.h rename to dbms/DataStreams/CollapsingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ColumnGathererStream.cpp b/dbms/DataStreams/ColumnGathererStream.cpp similarity index 100% rename from dbms/src/DataStreams/ColumnGathererStream.cpp rename to dbms/DataStreams/ColumnGathererStream.cpp diff --git a/dbms/src/DataStreams/ColumnGathererStream.h b/dbms/DataStreams/ColumnGathererStream.h similarity index 100% rename from dbms/src/DataStreams/ColumnGathererStream.h rename to dbms/DataStreams/ColumnGathererStream.h diff --git a/dbms/src/DataStreams/ConcatBlockInputStream.h b/dbms/DataStreams/ConcatBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ConcatBlockInputStream.h rename to dbms/DataStreams/ConcatBlockInputStream.h diff --git a/dbms/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h b/dbms/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h rename to dbms/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h diff --git a/dbms/src/DataStreams/ConvertingBlockInputStream.cpp b/dbms/DataStreams/ConvertingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ConvertingBlockInputStream.cpp rename to dbms/DataStreams/ConvertingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ConvertingBlockInputStream.h b/dbms/DataStreams/ConvertingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ConvertingBlockInputStream.h rename to dbms/DataStreams/ConvertingBlockInputStream.h diff --git a/dbms/src/DataStreams/CountingBlockOutputStream.cpp b/dbms/DataStreams/CountingBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CountingBlockOutputStream.cpp rename to dbms/DataStreams/CountingBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/CountingBlockOutputStream.h b/dbms/DataStreams/CountingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/CountingBlockOutputStream.h rename to dbms/DataStreams/CountingBlockOutputStream.h diff --git a/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp b/dbms/DataStreams/CreatingSetsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp rename to dbms/DataStreams/CreatingSetsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CreatingSetsBlockInputStream.h b/dbms/DataStreams/CreatingSetsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CreatingSetsBlockInputStream.h rename to dbms/DataStreams/CreatingSetsBlockInputStream.h diff --git a/dbms/src/DataStreams/CubeBlockInputStream.cpp b/dbms/DataStreams/CubeBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/CubeBlockInputStream.cpp rename to dbms/DataStreams/CubeBlockInputStream.cpp diff --git a/dbms/src/DataStreams/CubeBlockInputStream.h b/dbms/DataStreams/CubeBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/CubeBlockInputStream.h rename to dbms/DataStreams/CubeBlockInputStream.h diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.cpp b/dbms/DataStreams/DistinctBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/DistinctBlockInputStream.cpp rename to dbms/DataStreams/DistinctBlockInputStream.cpp diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.h b/dbms/DataStreams/DistinctBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/DistinctBlockInputStream.h rename to dbms/DataStreams/DistinctBlockInputStream.h diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp b/dbms/DataStreams/DistinctSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp rename to dbms/DataStreams/DistinctSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h b/dbms/DataStreams/DistinctSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/DistinctSortedBlockInputStream.h rename to dbms/DataStreams/DistinctSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ExecutionSpeedLimits.cpp b/dbms/DataStreams/ExecutionSpeedLimits.cpp similarity index 100% rename from dbms/src/DataStreams/ExecutionSpeedLimits.cpp rename to dbms/DataStreams/ExecutionSpeedLimits.cpp diff --git a/dbms/src/DataStreams/ExecutionSpeedLimits.h b/dbms/DataStreams/ExecutionSpeedLimits.h similarity index 100% rename from dbms/src/DataStreams/ExecutionSpeedLimits.h rename to dbms/DataStreams/ExecutionSpeedLimits.h diff --git a/dbms/src/DataStreams/ExpressionBlockInputStream.cpp b/dbms/DataStreams/ExpressionBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ExpressionBlockInputStream.cpp rename to dbms/DataStreams/ExpressionBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ExpressionBlockInputStream.h b/dbms/DataStreams/ExpressionBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ExpressionBlockInputStream.h rename to dbms/DataStreams/ExpressionBlockInputStream.h diff --git a/dbms/src/DataStreams/FillingBlockInputStream.cpp b/dbms/DataStreams/FillingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FillingBlockInputStream.cpp rename to dbms/DataStreams/FillingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FillingBlockInputStream.h b/dbms/DataStreams/FillingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FillingBlockInputStream.h rename to dbms/DataStreams/FillingBlockInputStream.h diff --git a/dbms/src/DataStreams/FilterBlockInputStream.cpp b/dbms/DataStreams/FilterBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FilterBlockInputStream.cpp rename to dbms/DataStreams/FilterBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FilterBlockInputStream.h b/dbms/DataStreams/FilterBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FilterBlockInputStream.h rename to dbms/DataStreams/FilterBlockInputStream.h diff --git a/dbms/src/DataStreams/FilterColumnsBlockInputStream.cpp b/dbms/DataStreams/FilterColumnsBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FilterColumnsBlockInputStream.cpp rename to dbms/DataStreams/FilterColumnsBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FilterColumnsBlockInputStream.h b/dbms/DataStreams/FilterColumnsBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FilterColumnsBlockInputStream.h rename to dbms/DataStreams/FilterColumnsBlockInputStream.h diff --git a/dbms/src/DataStreams/FinishSortingBlockInputStream.cpp b/dbms/DataStreams/FinishSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/FinishSortingBlockInputStream.cpp rename to dbms/DataStreams/FinishSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/FinishSortingBlockInputStream.h b/dbms/DataStreams/FinishSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/FinishSortingBlockInputStream.h rename to dbms/DataStreams/FinishSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/dbms/DataStreams/GraphiteRollupSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp rename to dbms/DataStreams/GraphiteRollupSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h b/dbms/DataStreams/GraphiteRollupSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h rename to dbms/DataStreams/GraphiteRollupSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/IBlockInputStream.cpp b/dbms/DataStreams/IBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/IBlockInputStream.cpp rename to dbms/DataStreams/IBlockInputStream.cpp diff --git a/dbms/src/DataStreams/IBlockInputStream.h b/dbms/DataStreams/IBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/IBlockInputStream.h rename to dbms/DataStreams/IBlockInputStream.h diff --git a/dbms/src/DataStreams/IBlockOutputStream.h b/dbms/DataStreams/IBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/IBlockOutputStream.h rename to dbms/DataStreams/IBlockOutputStream.h diff --git a/dbms/src/DataStreams/IBlockStream_fwd.h b/dbms/DataStreams/IBlockStream_fwd.h similarity index 100% rename from dbms/src/DataStreams/IBlockStream_fwd.h rename to dbms/DataStreams/IBlockStream_fwd.h diff --git a/dbms/src/DataStreams/InputStreamFromASTInsertQuery.cpp b/dbms/DataStreams/InputStreamFromASTInsertQuery.cpp similarity index 100% rename from dbms/src/DataStreams/InputStreamFromASTInsertQuery.cpp rename to dbms/DataStreams/InputStreamFromASTInsertQuery.cpp diff --git a/dbms/src/DataStreams/InputStreamFromASTInsertQuery.h b/dbms/DataStreams/InputStreamFromASTInsertQuery.h similarity index 100% rename from dbms/src/DataStreams/InputStreamFromASTInsertQuery.h rename to dbms/DataStreams/InputStreamFromASTInsertQuery.h diff --git a/dbms/src/DataStreams/InternalTextLogsRowOutputStream.cpp b/dbms/DataStreams/InternalTextLogsRowOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/InternalTextLogsRowOutputStream.cpp rename to dbms/DataStreams/InternalTextLogsRowOutputStream.cpp diff --git a/dbms/src/DataStreams/InternalTextLogsRowOutputStream.h b/dbms/DataStreams/InternalTextLogsRowOutputStream.h similarity index 100% rename from dbms/src/DataStreams/InternalTextLogsRowOutputStream.h rename to dbms/DataStreams/InternalTextLogsRowOutputStream.h diff --git a/dbms/src/DataStreams/LazyBlockInputStream.h b/dbms/DataStreams/LazyBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LazyBlockInputStream.h rename to dbms/DataStreams/LazyBlockInputStream.h diff --git a/dbms/src/DataStreams/LimitBlockInputStream.cpp b/dbms/DataStreams/LimitBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/LimitBlockInputStream.cpp rename to dbms/DataStreams/LimitBlockInputStream.cpp diff --git a/dbms/src/DataStreams/LimitBlockInputStream.h b/dbms/DataStreams/LimitBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LimitBlockInputStream.h rename to dbms/DataStreams/LimitBlockInputStream.h diff --git a/dbms/src/DataStreams/LimitByBlockInputStream.cpp b/dbms/DataStreams/LimitByBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/LimitByBlockInputStream.cpp rename to dbms/DataStreams/LimitByBlockInputStream.cpp diff --git a/dbms/src/DataStreams/LimitByBlockInputStream.h b/dbms/DataStreams/LimitByBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/LimitByBlockInputStream.h rename to dbms/DataStreams/LimitByBlockInputStream.h diff --git a/dbms/src/DataStreams/MarkInCompressedFile.h b/dbms/DataStreams/MarkInCompressedFile.h similarity index 100% rename from dbms/src/DataStreams/MarkInCompressedFile.h rename to dbms/DataStreams/MarkInCompressedFile.h diff --git a/dbms/src/DataStreams/MaterializingBlockInputStream.cpp b/dbms/DataStreams/MaterializingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockInputStream.cpp rename to dbms/DataStreams/MaterializingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MaterializingBlockInputStream.h b/dbms/DataStreams/MaterializingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockInputStream.h rename to dbms/DataStreams/MaterializingBlockInputStream.h diff --git a/dbms/src/DataStreams/MaterializingBlockOutputStream.h b/dbms/DataStreams/MaterializingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/MaterializingBlockOutputStream.h rename to dbms/DataStreams/MaterializingBlockOutputStream.h diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.cpp b/dbms/DataStreams/MergeSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergeSortingBlockInputStream.cpp rename to dbms/DataStreams/MergeSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.h b/dbms/DataStreams/MergeSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergeSortingBlockInputStream.h rename to dbms/DataStreams/MergeSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingAggregatedBlockInputStream.cpp b/dbms/DataStreams/MergingAggregatedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedBlockInputStream.cpp rename to dbms/DataStreams/MergingAggregatedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingAggregatedBlockInputStream.h b/dbms/DataStreams/MergingAggregatedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedBlockInputStream.h rename to dbms/DataStreams/MergingAggregatedBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp rename to dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h b/dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h rename to dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.cpp b/dbms/DataStreams/MergingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/MergingSortedBlockInputStream.cpp rename to dbms/DataStreams/MergingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/MergingSortedBlockInputStream.h b/dbms/DataStreams/MergingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/MergingSortedBlockInputStream.h rename to dbms/DataStreams/MergingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/NativeBlockInputStream.cpp b/dbms/DataStreams/NativeBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/NativeBlockInputStream.cpp rename to dbms/DataStreams/NativeBlockInputStream.cpp diff --git a/dbms/src/DataStreams/NativeBlockInputStream.h b/dbms/DataStreams/NativeBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NativeBlockInputStream.h rename to dbms/DataStreams/NativeBlockInputStream.h diff --git a/dbms/src/DataStreams/NativeBlockOutputStream.cpp b/dbms/DataStreams/NativeBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/NativeBlockOutputStream.cpp rename to dbms/DataStreams/NativeBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/NativeBlockOutputStream.h b/dbms/DataStreams/NativeBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/NativeBlockOutputStream.h rename to dbms/DataStreams/NativeBlockOutputStream.h diff --git a/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h b/dbms/DataStreams/NullAndDoCopyBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h rename to dbms/DataStreams/NullAndDoCopyBlockInputStream.h diff --git a/dbms/src/DataStreams/NullBlockInputStream.h b/dbms/DataStreams/NullBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/NullBlockInputStream.h rename to dbms/DataStreams/NullBlockInputStream.h diff --git a/dbms/src/DataStreams/NullBlockOutputStream.h b/dbms/DataStreams/NullBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/NullBlockOutputStream.h rename to dbms/DataStreams/NullBlockOutputStream.h diff --git a/dbms/src/DataStreams/OneBlockInputStream.h b/dbms/DataStreams/OneBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/OneBlockInputStream.h rename to dbms/DataStreams/OneBlockInputStream.h diff --git a/dbms/src/DataStreams/OwningBlockInputStream.h b/dbms/DataStreams/OwningBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/OwningBlockInputStream.h rename to dbms/DataStreams/OwningBlockInputStream.h diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp b/dbms/DataStreams/ParallelAggregatingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp rename to dbms/DataStreams/ParallelAggregatingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h b/dbms/DataStreams/ParallelAggregatingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h rename to dbms/DataStreams/ParallelAggregatingBlockInputStream.h diff --git a/dbms/src/DataStreams/ParallelInputsProcessor.h b/dbms/DataStreams/ParallelInputsProcessor.h similarity index 100% rename from dbms/src/DataStreams/ParallelInputsProcessor.h rename to dbms/DataStreams/ParallelInputsProcessor.h diff --git a/dbms/src/DataStreams/ParallelParsingBlockInputStream.cpp b/dbms/DataStreams/ParallelParsingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ParallelParsingBlockInputStream.cpp rename to dbms/DataStreams/ParallelParsingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ParallelParsingBlockInputStream.h b/dbms/DataStreams/ParallelParsingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ParallelParsingBlockInputStream.h rename to dbms/DataStreams/ParallelParsingBlockInputStream.h diff --git a/dbms/src/DataStreams/PartialSortingBlockInputStream.cpp b/dbms/DataStreams/PartialSortingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/PartialSortingBlockInputStream.cpp rename to dbms/DataStreams/PartialSortingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/PartialSortingBlockInputStream.h b/dbms/DataStreams/PartialSortingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/PartialSortingBlockInputStream.h rename to dbms/DataStreams/PartialSortingBlockInputStream.h diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/dbms/DataStreams/PushingToViewsBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp rename to dbms/DataStreams/PushingToViewsBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.h b/dbms/DataStreams/PushingToViewsBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/PushingToViewsBlockOutputStream.h rename to dbms/DataStreams/PushingToViewsBlockOutputStream.h diff --git a/dbms/src/DataStreams/RemoteBlockInputStream.cpp b/dbms/DataStreams/RemoteBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/RemoteBlockInputStream.cpp rename to dbms/DataStreams/RemoteBlockInputStream.cpp diff --git a/dbms/src/DataStreams/RemoteBlockInputStream.h b/dbms/DataStreams/RemoteBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/RemoteBlockInputStream.h rename to dbms/DataStreams/RemoteBlockInputStream.h diff --git a/dbms/src/DataStreams/RemoteBlockOutputStream.cpp b/dbms/DataStreams/RemoteBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/RemoteBlockOutputStream.cpp rename to dbms/DataStreams/RemoteBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/RemoteBlockOutputStream.h b/dbms/DataStreams/RemoteBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/RemoteBlockOutputStream.h rename to dbms/DataStreams/RemoteBlockOutputStream.h diff --git a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp b/dbms/DataStreams/ReplacingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ReplacingSortedBlockInputStream.cpp rename to dbms/DataStreams/ReplacingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ReplacingSortedBlockInputStream.h b/dbms/DataStreams/ReplacingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ReplacingSortedBlockInputStream.h rename to dbms/DataStreams/ReplacingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/ReverseBlockInputStream.cpp b/dbms/DataStreams/ReverseBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/ReverseBlockInputStream.cpp rename to dbms/DataStreams/ReverseBlockInputStream.cpp diff --git a/dbms/src/DataStreams/ReverseBlockInputStream.h b/dbms/DataStreams/ReverseBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/ReverseBlockInputStream.h rename to dbms/DataStreams/ReverseBlockInputStream.h diff --git a/dbms/src/DataStreams/RollupBlockInputStream.cpp b/dbms/DataStreams/RollupBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/RollupBlockInputStream.cpp rename to dbms/DataStreams/RollupBlockInputStream.cpp diff --git a/dbms/src/DataStreams/RollupBlockInputStream.h b/dbms/DataStreams/RollupBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/RollupBlockInputStream.h rename to dbms/DataStreams/RollupBlockInputStream.h diff --git a/dbms/src/DataStreams/SizeLimits.cpp b/dbms/DataStreams/SizeLimits.cpp similarity index 100% rename from dbms/src/DataStreams/SizeLimits.cpp rename to dbms/DataStreams/SizeLimits.cpp diff --git a/dbms/src/DataStreams/SizeLimits.h b/dbms/DataStreams/SizeLimits.h similarity index 100% rename from dbms/src/DataStreams/SizeLimits.h rename to dbms/DataStreams/SizeLimits.h diff --git a/dbms/src/DataStreams/SquashingBlockInputStream.cpp b/dbms/DataStreams/SquashingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingBlockInputStream.cpp rename to dbms/DataStreams/SquashingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/SquashingBlockInputStream.h b/dbms/DataStreams/SquashingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/SquashingBlockInputStream.h rename to dbms/DataStreams/SquashingBlockInputStream.h diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp b/dbms/DataStreams/SquashingBlockOutputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingBlockOutputStream.cpp rename to dbms/DataStreams/SquashingBlockOutputStream.cpp diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.h b/dbms/DataStreams/SquashingBlockOutputStream.h similarity index 100% rename from dbms/src/DataStreams/SquashingBlockOutputStream.h rename to dbms/DataStreams/SquashingBlockOutputStream.h diff --git a/dbms/src/DataStreams/SquashingTransform.cpp b/dbms/DataStreams/SquashingTransform.cpp similarity index 100% rename from dbms/src/DataStreams/SquashingTransform.cpp rename to dbms/DataStreams/SquashingTransform.cpp diff --git a/dbms/src/DataStreams/SquashingTransform.h b/dbms/DataStreams/SquashingTransform.h similarity index 100% rename from dbms/src/DataStreams/SquashingTransform.h rename to dbms/DataStreams/SquashingTransform.h diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp b/dbms/DataStreams/SummingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/SummingSortedBlockInputStream.cpp rename to dbms/DataStreams/SummingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.h b/dbms/DataStreams/SummingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/SummingSortedBlockInputStream.h rename to dbms/DataStreams/SummingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/TTLBlockInputStream.cpp b/dbms/DataStreams/TTLBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/TTLBlockInputStream.cpp rename to dbms/DataStreams/TTLBlockInputStream.cpp diff --git a/dbms/src/DataStreams/TTLBlockInputStream.h b/dbms/DataStreams/TTLBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/TTLBlockInputStream.h rename to dbms/DataStreams/TTLBlockInputStream.h diff --git a/dbms/src/DataStreams/TemporaryFileStream.h b/dbms/DataStreams/TemporaryFileStream.h similarity index 100% rename from dbms/src/DataStreams/TemporaryFileStream.h rename to dbms/DataStreams/TemporaryFileStream.h diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp b/dbms/DataStreams/TotalsHavingBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/TotalsHavingBlockInputStream.cpp rename to dbms/DataStreams/TotalsHavingBlockInputStream.cpp diff --git a/dbms/src/DataStreams/TotalsHavingBlockInputStream.h b/dbms/DataStreams/TotalsHavingBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/TotalsHavingBlockInputStream.h rename to dbms/DataStreams/TotalsHavingBlockInputStream.h diff --git a/dbms/src/DataStreams/UnionBlockInputStream.h b/dbms/DataStreams/UnionBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/UnionBlockInputStream.h rename to dbms/DataStreams/UnionBlockInputStream.h diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp b/dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp rename to dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h b/dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h rename to dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.h diff --git a/dbms/src/DataStreams/copyData.cpp b/dbms/DataStreams/copyData.cpp similarity index 100% rename from dbms/src/DataStreams/copyData.cpp rename to dbms/DataStreams/copyData.cpp diff --git a/dbms/src/DataStreams/copyData.h b/dbms/DataStreams/copyData.h similarity index 100% rename from dbms/src/DataStreams/copyData.h rename to dbms/DataStreams/copyData.h diff --git a/dbms/src/DataStreams/finalizeBlock.cpp b/dbms/DataStreams/finalizeBlock.cpp similarity index 100% rename from dbms/src/DataStreams/finalizeBlock.cpp rename to dbms/DataStreams/finalizeBlock.cpp diff --git a/dbms/src/DataStreams/finalizeBlock.h b/dbms/DataStreams/finalizeBlock.h similarity index 100% rename from dbms/src/DataStreams/finalizeBlock.h rename to dbms/DataStreams/finalizeBlock.h diff --git a/dbms/src/DataStreams/materializeBlock.cpp b/dbms/DataStreams/materializeBlock.cpp similarity index 100% rename from dbms/src/DataStreams/materializeBlock.cpp rename to dbms/DataStreams/materializeBlock.cpp diff --git a/dbms/src/DataStreams/materializeBlock.h b/dbms/DataStreams/materializeBlock.h similarity index 100% rename from dbms/src/DataStreams/materializeBlock.h rename to dbms/DataStreams/materializeBlock.h diff --git a/dbms/src/DataStreams/narrowBlockInputStreams.cpp b/dbms/DataStreams/narrowBlockInputStreams.cpp similarity index 100% rename from dbms/src/DataStreams/narrowBlockInputStreams.cpp rename to dbms/DataStreams/narrowBlockInputStreams.cpp diff --git a/dbms/src/DataStreams/narrowBlockInputStreams.h b/dbms/DataStreams/narrowBlockInputStreams.h similarity index 100% rename from dbms/src/DataStreams/narrowBlockInputStreams.h rename to dbms/DataStreams/narrowBlockInputStreams.h diff --git a/dbms/src/DataStreams/processConstants.cpp b/dbms/DataStreams/processConstants.cpp similarity index 100% rename from dbms/src/DataStreams/processConstants.cpp rename to dbms/DataStreams/processConstants.cpp diff --git a/dbms/src/DataStreams/processConstants.h b/dbms/DataStreams/processConstants.h similarity index 100% rename from dbms/src/DataStreams/processConstants.h rename to dbms/DataStreams/processConstants.h diff --git a/dbms/src/DataStreams/tests/CMakeLists.txt b/dbms/DataStreams/tests/CMakeLists.txt similarity index 100% rename from dbms/src/DataStreams/tests/CMakeLists.txt rename to dbms/DataStreams/tests/CMakeLists.txt diff --git a/dbms/src/DataStreams/tests/collapsing_sorted_stream.cpp b/dbms/DataStreams/tests/collapsing_sorted_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/collapsing_sorted_stream.cpp rename to dbms/DataStreams/tests/collapsing_sorted_stream.cpp diff --git a/dbms/src/DataStreams/tests/expression_stream.cpp b/dbms/DataStreams/tests/expression_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/expression_stream.cpp rename to dbms/DataStreams/tests/expression_stream.cpp diff --git a/dbms/src/DataStreams/tests/filter_stream.cpp b/dbms/DataStreams/tests/filter_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/filter_stream.cpp rename to dbms/DataStreams/tests/filter_stream.cpp diff --git a/dbms/src/DataStreams/tests/finish_sorting_stream.cpp b/dbms/DataStreams/tests/finish_sorting_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/finish_sorting_stream.cpp rename to dbms/DataStreams/tests/finish_sorting_stream.cpp diff --git a/dbms/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp b/dbms/DataStreams/tests/gtest_blocks_size_merging_streams.cpp similarity index 100% rename from dbms/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp rename to dbms/DataStreams/tests/gtest_blocks_size_merging_streams.cpp diff --git a/dbms/src/DataStreams/tests/gtest_check_sorted_stream.cpp b/dbms/DataStreams/tests/gtest_check_sorted_stream.cpp similarity index 100% rename from dbms/src/DataStreams/tests/gtest_check_sorted_stream.cpp rename to dbms/DataStreams/tests/gtest_check_sorted_stream.cpp diff --git a/dbms/src/DataStreams/tests/union_stream2.cpp b/dbms/DataStreams/tests/union_stream2.cpp similarity index 100% rename from dbms/src/DataStreams/tests/union_stream2.cpp rename to dbms/DataStreams/tests/union_stream2.cpp diff --git a/dbms/src/DataTypes/CMakeLists.txt b/dbms/DataTypes/CMakeLists.txt similarity index 100% rename from dbms/src/DataTypes/CMakeLists.txt rename to dbms/DataTypes/CMakeLists.txt diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp b/dbms/DataTypes/DataTypeAggregateFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeAggregateFunction.cpp rename to dbms/DataTypes/DataTypeAggregateFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.h b/dbms/DataTypes/DataTypeAggregateFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeAggregateFunction.h rename to dbms/DataTypes/DataTypeAggregateFunction.h diff --git a/dbms/src/DataTypes/DataTypeArray.cpp b/dbms/DataTypes/DataTypeArray.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeArray.cpp rename to dbms/DataTypes/DataTypeArray.cpp diff --git a/dbms/src/DataTypes/DataTypeArray.h b/dbms/DataTypes/DataTypeArray.h similarity index 100% rename from dbms/src/DataTypes/DataTypeArray.h rename to dbms/DataTypes/DataTypeArray.h diff --git a/dbms/src/DataTypes/DataTypeCustom.h b/dbms/DataTypes/DataTypeCustom.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustom.h rename to dbms/DataTypes/DataTypeCustom.h diff --git a/dbms/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp b/dbms/DataTypes/DataTypeCustomIPv4AndIPv6.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp rename to dbms/DataTypes/DataTypeCustomIPv4AndIPv6.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp b/dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp rename to dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h b/dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h rename to dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.h diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp b/dbms/DataTypes/DataTypeCustomSimpleTextSerialization.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp rename to dbms/DataTypes/DataTypeCustomSimpleTextSerialization.cpp diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.h b/dbms/DataTypes/DataTypeCustomSimpleTextSerialization.h similarity index 100% rename from dbms/src/DataTypes/DataTypeCustomSimpleTextSerialization.h rename to dbms/DataTypes/DataTypeCustomSimpleTextSerialization.h diff --git a/dbms/src/DataTypes/DataTypeDate.cpp b/dbms/DataTypes/DataTypeDate.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDate.cpp rename to dbms/DataTypes/DataTypeDate.cpp diff --git a/dbms/src/DataTypes/DataTypeDate.h b/dbms/DataTypes/DataTypeDate.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDate.h rename to dbms/DataTypes/DataTypeDate.h diff --git a/dbms/src/DataTypes/DataTypeDateTime.cpp b/dbms/DataTypes/DataTypeDateTime.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime.cpp rename to dbms/DataTypes/DataTypeDateTime.cpp diff --git a/dbms/src/DataTypes/DataTypeDateTime.h b/dbms/DataTypes/DataTypeDateTime.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime.h rename to dbms/DataTypes/DataTypeDateTime.h diff --git a/dbms/src/DataTypes/DataTypeDateTime64.cpp b/dbms/DataTypes/DataTypeDateTime64.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime64.cpp rename to dbms/DataTypes/DataTypeDateTime64.cpp diff --git a/dbms/src/DataTypes/DataTypeDateTime64.h b/dbms/DataTypes/DataTypeDateTime64.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDateTime64.h rename to dbms/DataTypes/DataTypeDateTime64.h diff --git a/dbms/src/DataTypes/DataTypeDecimalBase.cpp b/dbms/DataTypes/DataTypeDecimalBase.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeDecimalBase.cpp rename to dbms/DataTypes/DataTypeDecimalBase.cpp diff --git a/dbms/src/DataTypes/DataTypeDecimalBase.h b/dbms/DataTypes/DataTypeDecimalBase.h similarity index 100% rename from dbms/src/DataTypes/DataTypeDecimalBase.h rename to dbms/DataTypes/DataTypeDecimalBase.h diff --git a/dbms/src/DataTypes/DataTypeEnum.cpp b/dbms/DataTypes/DataTypeEnum.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeEnum.cpp rename to dbms/DataTypes/DataTypeEnum.cpp diff --git a/dbms/src/DataTypes/DataTypeEnum.h b/dbms/DataTypes/DataTypeEnum.h similarity index 100% rename from dbms/src/DataTypes/DataTypeEnum.h rename to dbms/DataTypes/DataTypeEnum.h diff --git a/dbms/src/DataTypes/DataTypeFactory.cpp b/dbms/DataTypes/DataTypeFactory.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeFactory.cpp rename to dbms/DataTypes/DataTypeFactory.cpp diff --git a/dbms/src/DataTypes/DataTypeFactory.h b/dbms/DataTypes/DataTypeFactory.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFactory.h rename to dbms/DataTypes/DataTypeFactory.h diff --git a/dbms/src/DataTypes/DataTypeFixedString.cpp b/dbms/DataTypes/DataTypeFixedString.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeFixedString.cpp rename to dbms/DataTypes/DataTypeFixedString.cpp diff --git a/dbms/src/DataTypes/DataTypeFixedString.h b/dbms/DataTypes/DataTypeFixedString.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFixedString.h rename to dbms/DataTypes/DataTypeFixedString.h diff --git a/dbms/src/DataTypes/DataTypeFunction.cpp b/dbms/DataTypes/DataTypeFunction.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeFunction.cpp rename to dbms/DataTypes/DataTypeFunction.cpp diff --git a/dbms/src/DataTypes/DataTypeFunction.h b/dbms/DataTypes/DataTypeFunction.h similarity index 100% rename from dbms/src/DataTypes/DataTypeFunction.h rename to dbms/DataTypes/DataTypeFunction.h diff --git a/dbms/src/DataTypes/DataTypeInterval.cpp b/dbms/DataTypes/DataTypeInterval.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeInterval.cpp rename to dbms/DataTypes/DataTypeInterval.cpp diff --git a/dbms/src/DataTypes/DataTypeInterval.h b/dbms/DataTypes/DataTypeInterval.h similarity index 100% rename from dbms/src/DataTypes/DataTypeInterval.h rename to dbms/DataTypes/DataTypeInterval.h diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.cpp b/dbms/DataTypes/DataTypeLowCardinality.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinality.cpp rename to dbms/DataTypes/DataTypeLowCardinality.cpp diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.h b/dbms/DataTypes/DataTypeLowCardinality.h similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinality.h rename to dbms/DataTypes/DataTypeLowCardinality.h diff --git a/dbms/src/DataTypes/DataTypeLowCardinalityHelpers.cpp b/dbms/DataTypes/DataTypeLowCardinalityHelpers.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeLowCardinalityHelpers.cpp rename to dbms/DataTypes/DataTypeLowCardinalityHelpers.cpp diff --git a/dbms/src/DataTypes/DataTypeNothing.cpp b/dbms/DataTypes/DataTypeNothing.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNothing.cpp rename to dbms/DataTypes/DataTypeNothing.cpp diff --git a/dbms/src/DataTypes/DataTypeNothing.h b/dbms/DataTypes/DataTypeNothing.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNothing.h rename to dbms/DataTypes/DataTypeNothing.h diff --git a/dbms/src/DataTypes/DataTypeNullable.cpp b/dbms/DataTypes/DataTypeNullable.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNullable.cpp rename to dbms/DataTypes/DataTypeNullable.cpp diff --git a/dbms/src/DataTypes/DataTypeNullable.h b/dbms/DataTypes/DataTypeNullable.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNullable.h rename to dbms/DataTypes/DataTypeNullable.h diff --git a/dbms/src/DataTypes/DataTypeNumberBase.cpp b/dbms/DataTypes/DataTypeNumberBase.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeNumberBase.cpp rename to dbms/DataTypes/DataTypeNumberBase.cpp diff --git a/dbms/src/DataTypes/DataTypeNumberBase.h b/dbms/DataTypes/DataTypeNumberBase.h similarity index 100% rename from dbms/src/DataTypes/DataTypeNumberBase.h rename to dbms/DataTypes/DataTypeNumberBase.h diff --git a/dbms/src/DataTypes/DataTypeSet.h b/dbms/DataTypes/DataTypeSet.h similarity index 100% rename from dbms/src/DataTypes/DataTypeSet.h rename to dbms/DataTypes/DataTypeSet.h diff --git a/dbms/src/DataTypes/DataTypeString.cpp b/dbms/DataTypes/DataTypeString.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeString.cpp rename to dbms/DataTypes/DataTypeString.cpp diff --git a/dbms/src/DataTypes/DataTypeString.h b/dbms/DataTypes/DataTypeString.h similarity index 100% rename from dbms/src/DataTypes/DataTypeString.h rename to dbms/DataTypes/DataTypeString.h diff --git a/dbms/src/DataTypes/DataTypeTuple.cpp b/dbms/DataTypes/DataTypeTuple.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeTuple.cpp rename to dbms/DataTypes/DataTypeTuple.cpp diff --git a/dbms/src/DataTypes/DataTypeTuple.h b/dbms/DataTypes/DataTypeTuple.h similarity index 100% rename from dbms/src/DataTypes/DataTypeTuple.h rename to dbms/DataTypes/DataTypeTuple.h diff --git a/dbms/src/DataTypes/DataTypeUUID.cpp b/dbms/DataTypes/DataTypeUUID.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypeUUID.cpp rename to dbms/DataTypes/DataTypeUUID.cpp diff --git a/dbms/src/DataTypes/DataTypeUUID.h b/dbms/DataTypes/DataTypeUUID.h similarity index 100% rename from dbms/src/DataTypes/DataTypeUUID.h rename to dbms/DataTypes/DataTypeUUID.h diff --git a/dbms/src/DataTypes/DataTypeWithSimpleSerialization.h b/dbms/DataTypes/DataTypeWithSimpleSerialization.h similarity index 100% rename from dbms/src/DataTypes/DataTypeWithSimpleSerialization.h rename to dbms/DataTypes/DataTypeWithSimpleSerialization.h diff --git a/dbms/src/DataTypes/DataTypesDecimal.cpp b/dbms/DataTypes/DataTypesDecimal.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypesDecimal.cpp rename to dbms/DataTypes/DataTypesDecimal.cpp diff --git a/dbms/src/DataTypes/DataTypesDecimal.h b/dbms/DataTypes/DataTypesDecimal.h similarity index 100% rename from dbms/src/DataTypes/DataTypesDecimal.h rename to dbms/DataTypes/DataTypesDecimal.h diff --git a/dbms/src/DataTypes/DataTypesNumber.cpp b/dbms/DataTypes/DataTypesNumber.cpp similarity index 100% rename from dbms/src/DataTypes/DataTypesNumber.cpp rename to dbms/DataTypes/DataTypesNumber.cpp diff --git a/dbms/src/DataTypes/DataTypesNumber.h b/dbms/DataTypes/DataTypesNumber.h similarity index 100% rename from dbms/src/DataTypes/DataTypesNumber.h rename to dbms/DataTypes/DataTypesNumber.h diff --git a/dbms/src/DataTypes/FieldToDataType.cpp b/dbms/DataTypes/FieldToDataType.cpp similarity index 100% rename from dbms/src/DataTypes/FieldToDataType.cpp rename to dbms/DataTypes/FieldToDataType.cpp diff --git a/dbms/src/DataTypes/FieldToDataType.h b/dbms/DataTypes/FieldToDataType.h similarity index 100% rename from dbms/src/DataTypes/FieldToDataType.h rename to dbms/DataTypes/FieldToDataType.h diff --git a/dbms/src/DataTypes/IDataType.cpp b/dbms/DataTypes/IDataType.cpp similarity index 100% rename from dbms/src/DataTypes/IDataType.cpp rename to dbms/DataTypes/IDataType.cpp diff --git a/dbms/src/DataTypes/IDataType.h b/dbms/DataTypes/IDataType.h similarity index 100% rename from dbms/src/DataTypes/IDataType.h rename to dbms/DataTypes/IDataType.h diff --git a/dbms/src/DataTypes/IDataTypeDummy.h b/dbms/DataTypes/IDataTypeDummy.h similarity index 100% rename from dbms/src/DataTypes/IDataTypeDummy.h rename to dbms/DataTypes/IDataTypeDummy.h diff --git a/dbms/src/DataTypes/Native.h b/dbms/DataTypes/Native.h similarity index 100% rename from dbms/src/DataTypes/Native.h rename to dbms/DataTypes/Native.h diff --git a/dbms/src/DataTypes/NestedUtils.cpp b/dbms/DataTypes/NestedUtils.cpp similarity index 100% rename from dbms/src/DataTypes/NestedUtils.cpp rename to dbms/DataTypes/NestedUtils.cpp diff --git a/dbms/src/DataTypes/NestedUtils.h b/dbms/DataTypes/NestedUtils.h similarity index 100% rename from dbms/src/DataTypes/NestedUtils.h rename to dbms/DataTypes/NestedUtils.h diff --git a/dbms/src/DataTypes/NumberTraits.h b/dbms/DataTypes/NumberTraits.h similarity index 100% rename from dbms/src/DataTypes/NumberTraits.h rename to dbms/DataTypes/NumberTraits.h diff --git a/dbms/src/DataTypes/convertMySQLDataType.cpp b/dbms/DataTypes/convertMySQLDataType.cpp similarity index 100% rename from dbms/src/DataTypes/convertMySQLDataType.cpp rename to dbms/DataTypes/convertMySQLDataType.cpp diff --git a/dbms/src/DataTypes/convertMySQLDataType.h b/dbms/DataTypes/convertMySQLDataType.h similarity index 100% rename from dbms/src/DataTypes/convertMySQLDataType.h rename to dbms/DataTypes/convertMySQLDataType.h diff --git a/dbms/src/DataTypes/getLeastSupertype.cpp b/dbms/DataTypes/getLeastSupertype.cpp similarity index 100% rename from dbms/src/DataTypes/getLeastSupertype.cpp rename to dbms/DataTypes/getLeastSupertype.cpp diff --git a/dbms/src/DataTypes/getLeastSupertype.h b/dbms/DataTypes/getLeastSupertype.h similarity index 100% rename from dbms/src/DataTypes/getLeastSupertype.h rename to dbms/DataTypes/getLeastSupertype.h diff --git a/dbms/src/DataTypes/getMostSubtype.cpp b/dbms/DataTypes/getMostSubtype.cpp similarity index 100% rename from dbms/src/DataTypes/getMostSubtype.cpp rename to dbms/DataTypes/getMostSubtype.cpp diff --git a/dbms/src/DataTypes/getMostSubtype.h b/dbms/DataTypes/getMostSubtype.h similarity index 100% rename from dbms/src/DataTypes/getMostSubtype.h rename to dbms/DataTypes/getMostSubtype.h diff --git a/dbms/src/DataTypes/tests/CMakeLists.txt b/dbms/DataTypes/tests/CMakeLists.txt similarity index 100% rename from dbms/src/DataTypes/tests/CMakeLists.txt rename to dbms/DataTypes/tests/CMakeLists.txt diff --git a/dbms/src/DataTypes/tests/data_type_string.cpp b/dbms/DataTypes/tests/data_type_string.cpp similarity index 100% rename from dbms/src/DataTypes/tests/data_type_string.cpp rename to dbms/DataTypes/tests/data_type_string.cpp diff --git a/dbms/src/DataTypes/tests/data_types_number_fixed.cpp b/dbms/DataTypes/tests/data_types_number_fixed.cpp similarity index 100% rename from dbms/src/DataTypes/tests/data_types_number_fixed.cpp rename to dbms/DataTypes/tests/data_types_number_fixed.cpp diff --git a/dbms/src/DataTypes/tests/gtest_data_type_get_common_type.cpp b/dbms/DataTypes/tests/gtest_data_type_get_common_type.cpp similarity index 100% rename from dbms/src/DataTypes/tests/gtest_data_type_get_common_type.cpp rename to dbms/DataTypes/tests/gtest_data_type_get_common_type.cpp diff --git a/dbms/src/Databases/DatabaseDictionary.cpp b/dbms/Databases/DatabaseDictionary.cpp similarity index 100% rename from dbms/src/Databases/DatabaseDictionary.cpp rename to dbms/Databases/DatabaseDictionary.cpp diff --git a/dbms/src/Databases/DatabaseDictionary.h b/dbms/Databases/DatabaseDictionary.h similarity index 100% rename from dbms/src/Databases/DatabaseDictionary.h rename to dbms/Databases/DatabaseDictionary.h diff --git a/dbms/src/Databases/DatabaseFactory.cpp b/dbms/Databases/DatabaseFactory.cpp similarity index 100% rename from dbms/src/Databases/DatabaseFactory.cpp rename to dbms/Databases/DatabaseFactory.cpp diff --git a/dbms/src/Databases/DatabaseFactory.h b/dbms/Databases/DatabaseFactory.h similarity index 100% rename from dbms/src/Databases/DatabaseFactory.h rename to dbms/Databases/DatabaseFactory.h diff --git a/dbms/src/Databases/DatabaseLazy.cpp b/dbms/Databases/DatabaseLazy.cpp similarity index 100% rename from dbms/src/Databases/DatabaseLazy.cpp rename to dbms/Databases/DatabaseLazy.cpp diff --git a/dbms/src/Databases/DatabaseLazy.h b/dbms/Databases/DatabaseLazy.h similarity index 100% rename from dbms/src/Databases/DatabaseLazy.h rename to dbms/Databases/DatabaseLazy.h diff --git a/dbms/src/Databases/DatabaseMemory.cpp b/dbms/Databases/DatabaseMemory.cpp similarity index 100% rename from dbms/src/Databases/DatabaseMemory.cpp rename to dbms/Databases/DatabaseMemory.cpp diff --git a/dbms/src/Databases/DatabaseMemory.h b/dbms/Databases/DatabaseMemory.h similarity index 100% rename from dbms/src/Databases/DatabaseMemory.h rename to dbms/Databases/DatabaseMemory.h diff --git a/dbms/src/Databases/DatabaseMySQL.cpp b/dbms/Databases/DatabaseMySQL.cpp similarity index 100% rename from dbms/src/Databases/DatabaseMySQL.cpp rename to dbms/Databases/DatabaseMySQL.cpp diff --git a/dbms/src/Databases/DatabaseMySQL.h b/dbms/Databases/DatabaseMySQL.h similarity index 100% rename from dbms/src/Databases/DatabaseMySQL.h rename to dbms/Databases/DatabaseMySQL.h diff --git a/dbms/src/Databases/DatabaseOnDisk.cpp b/dbms/Databases/DatabaseOnDisk.cpp similarity index 100% rename from dbms/src/Databases/DatabaseOnDisk.cpp rename to dbms/Databases/DatabaseOnDisk.cpp diff --git a/dbms/src/Databases/DatabaseOnDisk.h b/dbms/Databases/DatabaseOnDisk.h similarity index 100% rename from dbms/src/Databases/DatabaseOnDisk.h rename to dbms/Databases/DatabaseOnDisk.h diff --git a/dbms/src/Databases/DatabaseOrdinary.cpp b/dbms/Databases/DatabaseOrdinary.cpp similarity index 100% rename from dbms/src/Databases/DatabaseOrdinary.cpp rename to dbms/Databases/DatabaseOrdinary.cpp diff --git a/dbms/src/Databases/DatabaseOrdinary.h b/dbms/Databases/DatabaseOrdinary.h similarity index 100% rename from dbms/src/Databases/DatabaseOrdinary.h rename to dbms/Databases/DatabaseOrdinary.h diff --git a/dbms/src/Databases/DatabaseWithDictionaries.cpp b/dbms/Databases/DatabaseWithDictionaries.cpp similarity index 100% rename from dbms/src/Databases/DatabaseWithDictionaries.cpp rename to dbms/Databases/DatabaseWithDictionaries.cpp diff --git a/dbms/src/Databases/DatabaseWithDictionaries.h b/dbms/Databases/DatabaseWithDictionaries.h similarity index 100% rename from dbms/src/Databases/DatabaseWithDictionaries.h rename to dbms/Databases/DatabaseWithDictionaries.h diff --git a/dbms/src/Databases/DatabasesCommon.cpp b/dbms/Databases/DatabasesCommon.cpp similarity index 100% rename from dbms/src/Databases/DatabasesCommon.cpp rename to dbms/Databases/DatabasesCommon.cpp diff --git a/dbms/src/Databases/DatabasesCommon.h b/dbms/Databases/DatabasesCommon.h similarity index 100% rename from dbms/src/Databases/DatabasesCommon.h rename to dbms/Databases/DatabasesCommon.h diff --git a/dbms/src/Databases/IDatabase.h b/dbms/Databases/IDatabase.h similarity index 100% rename from dbms/src/Databases/IDatabase.h rename to dbms/Databases/IDatabase.h diff --git a/dbms/src/Dictionaries/CMakeLists.txt b/dbms/Dictionaries/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/CMakeLists.txt rename to dbms/Dictionaries/CMakeLists.txt diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/dbms/Dictionaries/CacheDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.cpp rename to dbms/Dictionaries/CacheDictionary.cpp diff --git a/dbms/src/Dictionaries/CacheDictionary.h b/dbms/Dictionaries/CacheDictionary.h similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.h rename to dbms/Dictionaries/CacheDictionary.h diff --git a/dbms/src/Dictionaries/CacheDictionary.inc.h b/dbms/Dictionaries/CacheDictionary.inc.h similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary.inc.h rename to dbms/Dictionaries/CacheDictionary.inc.h diff --git a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in b/dbms/Dictionaries/CacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in rename to dbms/Dictionaries/CacheDictionary_generate1.cpp.in diff --git a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in b/dbms/Dictionaries/CacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in rename to dbms/Dictionaries/CacheDictionary_generate2.cpp.in diff --git a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in b/dbms/Dictionaries/CacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in rename to dbms/Dictionaries/CacheDictionary_generate3.cpp.in diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp b/dbms/Dictionaries/ClickHouseDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/ClickHouseDictionarySource.cpp rename to dbms/Dictionaries/ClickHouseDictionarySource.cpp diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.h b/dbms/Dictionaries/ClickHouseDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/ClickHouseDictionarySource.h rename to dbms/Dictionaries/ClickHouseDictionarySource.h diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp b/dbms/Dictionaries/ComplexKeyCacheDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp rename to dbms/Dictionaries/ComplexKeyCacheDictionary.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h b/dbms/Dictionaries/ComplexKeyCacheDictionary.h similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary.h rename to dbms/Dictionaries/ComplexKeyCacheDictionary.h diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp b/dbms/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp rename to dbms/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in b/dbms/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in rename to dbms/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in b/dbms/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in rename to dbms/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in b/dbms/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in rename to dbms/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/dbms/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp rename to dbms/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp b/dbms/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp rename to dbms/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp b/dbms/Dictionaries/ComplexKeyHashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp rename to dbms/Dictionaries/ComplexKeyHashedDictionary.cpp diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h b/dbms/Dictionaries/ComplexKeyHashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/ComplexKeyHashedDictionary.h rename to dbms/Dictionaries/ComplexKeyHashedDictionary.h diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStream.h b/dbms/Dictionaries/DictionaryBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStream.h rename to dbms/Dictionaries/DictionaryBlockInputStream.h diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp b/dbms/Dictionaries/DictionaryBlockInputStreamBase.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp rename to dbms/Dictionaries/DictionaryBlockInputStreamBase.cpp diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h b/dbms/Dictionaries/DictionaryBlockInputStreamBase.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h rename to dbms/Dictionaries/DictionaryBlockInputStreamBase.h diff --git a/dbms/src/Dictionaries/DictionaryFactory.cpp b/dbms/Dictionaries/DictionaryFactory.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryFactory.cpp rename to dbms/Dictionaries/DictionaryFactory.cpp diff --git a/dbms/src/Dictionaries/DictionaryFactory.h b/dbms/Dictionaries/DictionaryFactory.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryFactory.h rename to dbms/Dictionaries/DictionaryFactory.h diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.cpp b/dbms/Dictionaries/DictionarySourceFactory.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceFactory.cpp rename to dbms/Dictionaries/DictionarySourceFactory.cpp diff --git a/dbms/src/Dictionaries/DictionarySourceFactory.h b/dbms/Dictionaries/DictionarySourceFactory.h similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceFactory.h rename to dbms/Dictionaries/DictionarySourceFactory.h diff --git a/dbms/src/Dictionaries/DictionarySourceHelpers.cpp b/dbms/Dictionaries/DictionarySourceHelpers.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceHelpers.cpp rename to dbms/Dictionaries/DictionarySourceHelpers.cpp diff --git a/dbms/src/Dictionaries/DictionarySourceHelpers.h b/dbms/Dictionaries/DictionarySourceHelpers.h similarity index 100% rename from dbms/src/Dictionaries/DictionarySourceHelpers.h rename to dbms/Dictionaries/DictionarySourceHelpers.h diff --git a/dbms/src/Dictionaries/DictionaryStructure.cpp b/dbms/Dictionaries/DictionaryStructure.cpp similarity index 100% rename from dbms/src/Dictionaries/DictionaryStructure.cpp rename to dbms/Dictionaries/DictionaryStructure.cpp diff --git a/dbms/src/Dictionaries/DictionaryStructure.h b/dbms/Dictionaries/DictionaryStructure.h similarity index 100% rename from dbms/src/Dictionaries/DictionaryStructure.h rename to dbms/Dictionaries/DictionaryStructure.h diff --git a/dbms/src/Dictionaries/Embedded/CMakeLists.txt b/dbms/Dictionaries/Embedded/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/Embedded/CMakeLists.txt rename to dbms/Dictionaries/Embedded/CMakeLists.txt diff --git a/dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp b/dbms/Dictionaries/Embedded/GeoDictionariesLoader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp rename to dbms/Dictionaries/Embedded/GeoDictionariesLoader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.h b/dbms/Dictionaries/Embedded/GeoDictionariesLoader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeoDictionariesLoader.h rename to dbms/Dictionaries/Embedded/GeoDictionariesLoader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/Entries.h b/dbms/Dictionaries/Embedded/GeodataProviders/Entries.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/Entries.h rename to dbms/Dictionaries/Embedded/GeodataProviders/Entries.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp rename to dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h rename to dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp b/dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp rename to dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h b/dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h rename to dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h b/dbms/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h rename to dbms/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h b/dbms/Dictionaries/Embedded/GeodataProviders/INamesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h rename to dbms/Dictionaries/Embedded/GeodataProviders/INamesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp b/dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp rename to dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h b/dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h rename to dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp b/dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp rename to dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h b/dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h rename to dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.h diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/Types.h b/dbms/Dictionaries/Embedded/GeodataProviders/Types.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/GeodataProviders/Types.h rename to dbms/Dictionaries/Embedded/GeodataProviders/Types.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/dbms/Dictionaries/Embedded/RegionsHierarchies.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchies.cpp rename to dbms/Dictionaries/Embedded/RegionsHierarchies.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchies.h b/dbms/Dictionaries/Embedded/RegionsHierarchies.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchies.h rename to dbms/Dictionaries/Embedded/RegionsHierarchies.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/dbms/Dictionaries/Embedded/RegionsHierarchy.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp rename to dbms/Dictionaries/Embedded/RegionsHierarchy.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.h b/dbms/Dictionaries/Embedded/RegionsHierarchy.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsHierarchy.h rename to dbms/Dictionaries/Embedded/RegionsHierarchy.h diff --git a/dbms/src/Dictionaries/Embedded/RegionsNames.cpp b/dbms/Dictionaries/Embedded/RegionsNames.cpp similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsNames.cpp rename to dbms/Dictionaries/Embedded/RegionsNames.cpp diff --git a/dbms/src/Dictionaries/Embedded/RegionsNames.h b/dbms/Dictionaries/Embedded/RegionsNames.h similarity index 100% rename from dbms/src/Dictionaries/Embedded/RegionsNames.h rename to dbms/Dictionaries/Embedded/RegionsNames.h diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp b/dbms/Dictionaries/ExecutableDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/ExecutableDictionarySource.cpp rename to dbms/Dictionaries/ExecutableDictionarySource.cpp diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.h b/dbms/Dictionaries/ExecutableDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/ExecutableDictionarySource.h rename to dbms/Dictionaries/ExecutableDictionarySource.h diff --git a/dbms/src/Dictionaries/ExternalQueryBuilder.cpp b/dbms/Dictionaries/ExternalQueryBuilder.cpp similarity index 100% rename from dbms/src/Dictionaries/ExternalQueryBuilder.cpp rename to dbms/Dictionaries/ExternalQueryBuilder.cpp diff --git a/dbms/src/Dictionaries/ExternalQueryBuilder.h b/dbms/Dictionaries/ExternalQueryBuilder.h similarity index 100% rename from dbms/src/Dictionaries/ExternalQueryBuilder.h rename to dbms/Dictionaries/ExternalQueryBuilder.h diff --git a/dbms/src/Dictionaries/FileDictionarySource.cpp b/dbms/Dictionaries/FileDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/FileDictionarySource.cpp rename to dbms/Dictionaries/FileDictionarySource.cpp diff --git a/dbms/src/Dictionaries/FileDictionarySource.h b/dbms/Dictionaries/FileDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/FileDictionarySource.h rename to dbms/Dictionaries/FileDictionarySource.h diff --git a/dbms/src/Dictionaries/FlatDictionary.cpp b/dbms/Dictionaries/FlatDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/FlatDictionary.cpp rename to dbms/Dictionaries/FlatDictionary.cpp diff --git a/dbms/src/Dictionaries/FlatDictionary.h b/dbms/Dictionaries/FlatDictionary.h similarity index 100% rename from dbms/src/Dictionaries/FlatDictionary.h rename to dbms/Dictionaries/FlatDictionary.h diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.cpp b/dbms/Dictionaries/HTTPDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/HTTPDictionarySource.cpp rename to dbms/Dictionaries/HTTPDictionarySource.cpp diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.h b/dbms/Dictionaries/HTTPDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/HTTPDictionarySource.h rename to dbms/Dictionaries/HTTPDictionarySource.h diff --git a/dbms/src/Dictionaries/HashedDictionary.cpp b/dbms/Dictionaries/HashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/HashedDictionary.cpp rename to dbms/Dictionaries/HashedDictionary.cpp diff --git a/dbms/src/Dictionaries/HashedDictionary.h b/dbms/Dictionaries/HashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/HashedDictionary.h rename to dbms/Dictionaries/HashedDictionary.h diff --git a/dbms/src/Dictionaries/IDictionary.h b/dbms/Dictionaries/IDictionary.h similarity index 100% rename from dbms/src/Dictionaries/IDictionary.h rename to dbms/Dictionaries/IDictionary.h diff --git a/dbms/src/Dictionaries/IDictionarySource.h b/dbms/Dictionaries/IDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/IDictionarySource.h rename to dbms/Dictionaries/IDictionarySource.h diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.cpp b/dbms/Dictionaries/LibraryDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySource.cpp rename to dbms/Dictionaries/LibraryDictionarySource.cpp diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.h b/dbms/Dictionaries/LibraryDictionarySource.h similarity index 95% rename from dbms/src/Dictionaries/LibraryDictionarySource.h rename to dbms/Dictionaries/LibraryDictionarySource.h index e42a7ba1dc8..4d73b3f97d4 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.h +++ b/dbms/Dictionaries/LibraryDictionarySource.h @@ -28,7 +28,7 @@ class CStringsHolder; /// Allows loading dictionaries from dynamic libraries (.so) /// Experimental version -/// Example: dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp +/// Example: tests/external_dictionaries/dictionary_library/dictionary_library.cpp class LibraryDictionarySource final : public IDictionarySource { public: diff --git a/dbms/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/dbms/Dictionaries/LibraryDictionarySourceExternal.cpp similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySourceExternal.cpp rename to dbms/Dictionaries/LibraryDictionarySourceExternal.cpp diff --git a/dbms/src/Dictionaries/LibraryDictionarySourceExternal.h b/dbms/Dictionaries/LibraryDictionarySourceExternal.h similarity index 100% rename from dbms/src/Dictionaries/LibraryDictionarySourceExternal.h rename to dbms/Dictionaries/LibraryDictionarySourceExternal.h diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/dbms/Dictionaries/MongoDBBlockInputStream.cpp similarity index 100% rename from dbms/src/Dictionaries/MongoDBBlockInputStream.cpp rename to dbms/Dictionaries/MongoDBBlockInputStream.cpp diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.h b/dbms/Dictionaries/MongoDBBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/MongoDBBlockInputStream.h rename to dbms/Dictionaries/MongoDBBlockInputStream.h diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp b/dbms/Dictionaries/MongoDBDictionarySource.cpp similarity index 99% rename from dbms/src/Dictionaries/MongoDBDictionarySource.cpp rename to dbms/Dictionaries/MongoDBDictionarySource.cpp index 0484315aec0..ba219d3c0fa 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/dbms/Dictionaries/MongoDBDictionarySource.cpp @@ -48,7 +48,7 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) // only after poco // naming conflict: // Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); -// dbms/src/IO/WriteHelpers.h:146 #define writeCString(s, buf) +// dbms/IO/WriteHelpers.h:146 #define writeCString(s, buf) # include # include # include diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.h b/dbms/Dictionaries/MongoDBDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/MongoDBDictionarySource.h rename to dbms/Dictionaries/MongoDBDictionarySource.h diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.cpp b/dbms/Dictionaries/MySQLDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/MySQLDictionarySource.cpp rename to dbms/Dictionaries/MySQLDictionarySource.cpp diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.h b/dbms/Dictionaries/MySQLDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/MySQLDictionarySource.h rename to dbms/Dictionaries/MySQLDictionarySource.h diff --git a/dbms/src/Dictionaries/PolygonDictionary.cpp b/dbms/Dictionaries/PolygonDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/PolygonDictionary.cpp rename to dbms/Dictionaries/PolygonDictionary.cpp diff --git a/dbms/src/Dictionaries/PolygonDictionary.h b/dbms/Dictionaries/PolygonDictionary.h similarity index 100% rename from dbms/src/Dictionaries/PolygonDictionary.h rename to dbms/Dictionaries/PolygonDictionary.h diff --git a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h b/dbms/Dictionaries/RangeDictionaryBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h rename to dbms/Dictionaries/RangeDictionaryBlockInputStream.h diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.cpp b/dbms/Dictionaries/RangeHashedDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/RangeHashedDictionary.cpp rename to dbms/Dictionaries/RangeHashedDictionary.cpp diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.h b/dbms/Dictionaries/RangeHashedDictionary.h similarity index 100% rename from dbms/src/Dictionaries/RangeHashedDictionary.h rename to dbms/Dictionaries/RangeHashedDictionary.h diff --git a/dbms/src/Dictionaries/RedisBlockInputStream.cpp b/dbms/Dictionaries/RedisBlockInputStream.cpp similarity index 100% rename from dbms/src/Dictionaries/RedisBlockInputStream.cpp rename to dbms/Dictionaries/RedisBlockInputStream.cpp diff --git a/dbms/src/Dictionaries/RedisBlockInputStream.h b/dbms/Dictionaries/RedisBlockInputStream.h similarity index 100% rename from dbms/src/Dictionaries/RedisBlockInputStream.h rename to dbms/Dictionaries/RedisBlockInputStream.h diff --git a/dbms/src/Dictionaries/RedisDictionarySource.cpp b/dbms/Dictionaries/RedisDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/RedisDictionarySource.cpp rename to dbms/Dictionaries/RedisDictionarySource.cpp diff --git a/dbms/src/Dictionaries/RedisDictionarySource.h b/dbms/Dictionaries/RedisDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/RedisDictionarySource.h rename to dbms/Dictionaries/RedisDictionarySource.h diff --git a/dbms/src/Dictionaries/TrieDictionary.cpp b/dbms/Dictionaries/TrieDictionary.cpp similarity index 100% rename from dbms/src/Dictionaries/TrieDictionary.cpp rename to dbms/Dictionaries/TrieDictionary.cpp diff --git a/dbms/src/Dictionaries/TrieDictionary.h b/dbms/Dictionaries/TrieDictionary.h similarity index 100% rename from dbms/src/Dictionaries/TrieDictionary.h rename to dbms/Dictionaries/TrieDictionary.h diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.cpp b/dbms/Dictionaries/XDBCDictionarySource.cpp similarity index 100% rename from dbms/src/Dictionaries/XDBCDictionarySource.cpp rename to dbms/Dictionaries/XDBCDictionarySource.cpp diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.h b/dbms/Dictionaries/XDBCDictionarySource.h similarity index 100% rename from dbms/src/Dictionaries/XDBCDictionarySource.h rename to dbms/Dictionaries/XDBCDictionarySource.h diff --git a/dbms/src/Dictionaries/getDictionaryConfigurationFromAST.cpp b/dbms/Dictionaries/getDictionaryConfigurationFromAST.cpp similarity index 100% rename from dbms/src/Dictionaries/getDictionaryConfigurationFromAST.cpp rename to dbms/Dictionaries/getDictionaryConfigurationFromAST.cpp diff --git a/dbms/src/Dictionaries/getDictionaryConfigurationFromAST.h b/dbms/Dictionaries/getDictionaryConfigurationFromAST.h similarity index 100% rename from dbms/src/Dictionaries/getDictionaryConfigurationFromAST.h rename to dbms/Dictionaries/getDictionaryConfigurationFromAST.h diff --git a/dbms/src/Dictionaries/readInvalidateQuery.cpp b/dbms/Dictionaries/readInvalidateQuery.cpp similarity index 100% rename from dbms/src/Dictionaries/readInvalidateQuery.cpp rename to dbms/Dictionaries/readInvalidateQuery.cpp diff --git a/dbms/src/Dictionaries/readInvalidateQuery.h b/dbms/Dictionaries/readInvalidateQuery.h similarity index 100% rename from dbms/src/Dictionaries/readInvalidateQuery.h rename to dbms/Dictionaries/readInvalidateQuery.h diff --git a/dbms/src/Dictionaries/registerDictionaries.cpp b/dbms/Dictionaries/registerDictionaries.cpp similarity index 100% rename from dbms/src/Dictionaries/registerDictionaries.cpp rename to dbms/Dictionaries/registerDictionaries.cpp diff --git a/dbms/src/Dictionaries/registerDictionaries.h b/dbms/Dictionaries/registerDictionaries.h similarity index 100% rename from dbms/src/Dictionaries/registerDictionaries.h rename to dbms/Dictionaries/registerDictionaries.h diff --git a/dbms/src/Dictionaries/tests/CMakeLists.txt b/dbms/Dictionaries/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Dictionaries/tests/CMakeLists.txt rename to dbms/Dictionaries/tests/CMakeLists.txt diff --git a/dbms/src/Dictionaries/tests/gtest_dictionary_configuration.cpp b/dbms/Dictionaries/tests/gtest_dictionary_configuration.cpp similarity index 100% rename from dbms/src/Dictionaries/tests/gtest_dictionary_configuration.cpp rename to dbms/Dictionaries/tests/gtest_dictionary_configuration.cpp diff --git a/dbms/src/Dictionaries/writeParenthesisedString.cpp b/dbms/Dictionaries/writeParenthesisedString.cpp similarity index 100% rename from dbms/src/Dictionaries/writeParenthesisedString.cpp rename to dbms/Dictionaries/writeParenthesisedString.cpp diff --git a/dbms/src/Dictionaries/writeParenthesisedString.h b/dbms/Dictionaries/writeParenthesisedString.h similarity index 100% rename from dbms/src/Dictionaries/writeParenthesisedString.h rename to dbms/Dictionaries/writeParenthesisedString.h diff --git a/dbms/src/Disks/CMakeLists.txt b/dbms/Disks/CMakeLists.txt similarity index 100% rename from dbms/src/Disks/CMakeLists.txt rename to dbms/Disks/CMakeLists.txt diff --git a/dbms/src/Disks/DiskFactory.cpp b/dbms/Disks/DiskFactory.cpp similarity index 100% rename from dbms/src/Disks/DiskFactory.cpp rename to dbms/Disks/DiskFactory.cpp diff --git a/dbms/src/Disks/DiskFactory.h b/dbms/Disks/DiskFactory.h similarity index 100% rename from dbms/src/Disks/DiskFactory.h rename to dbms/Disks/DiskFactory.h diff --git a/dbms/src/Disks/DiskLocal.cpp b/dbms/Disks/DiskLocal.cpp similarity index 100% rename from dbms/src/Disks/DiskLocal.cpp rename to dbms/Disks/DiskLocal.cpp diff --git a/dbms/src/Disks/DiskLocal.h b/dbms/Disks/DiskLocal.h similarity index 100% rename from dbms/src/Disks/DiskLocal.h rename to dbms/Disks/DiskLocal.h diff --git a/dbms/src/Disks/DiskMemory.cpp b/dbms/Disks/DiskMemory.cpp similarity index 100% rename from dbms/src/Disks/DiskMemory.cpp rename to dbms/Disks/DiskMemory.cpp diff --git a/dbms/src/Disks/DiskMemory.h b/dbms/Disks/DiskMemory.h similarity index 100% rename from dbms/src/Disks/DiskMemory.h rename to dbms/Disks/DiskMemory.h diff --git a/dbms/src/Disks/DiskS3.cpp b/dbms/Disks/DiskS3.cpp similarity index 100% rename from dbms/src/Disks/DiskS3.cpp rename to dbms/Disks/DiskS3.cpp diff --git a/dbms/src/Disks/DiskS3.h b/dbms/Disks/DiskS3.h similarity index 100% rename from dbms/src/Disks/DiskS3.h rename to dbms/Disks/DiskS3.h diff --git a/dbms/src/Disks/DiskSpaceMonitor.cpp b/dbms/Disks/DiskSpaceMonitor.cpp similarity index 100% rename from dbms/src/Disks/DiskSpaceMonitor.cpp rename to dbms/Disks/DiskSpaceMonitor.cpp diff --git a/dbms/src/Disks/DiskSpaceMonitor.h b/dbms/Disks/DiskSpaceMonitor.h similarity index 100% rename from dbms/src/Disks/DiskSpaceMonitor.h rename to dbms/Disks/DiskSpaceMonitor.h diff --git a/dbms/src/Disks/IDisk.cpp b/dbms/Disks/IDisk.cpp similarity index 100% rename from dbms/src/Disks/IDisk.cpp rename to dbms/Disks/IDisk.cpp diff --git a/dbms/src/Disks/IDisk.h b/dbms/Disks/IDisk.h similarity index 100% rename from dbms/src/Disks/IDisk.h rename to dbms/Disks/IDisk.h diff --git a/dbms/src/Disks/registerDisks.cpp b/dbms/Disks/registerDisks.cpp similarity index 100% rename from dbms/src/Disks/registerDisks.cpp rename to dbms/Disks/registerDisks.cpp diff --git a/dbms/src/Disks/registerDisks.h b/dbms/Disks/registerDisks.h similarity index 100% rename from dbms/src/Disks/registerDisks.h rename to dbms/Disks/registerDisks.h diff --git a/dbms/src/Disks/tests/CMakeLists.txt b/dbms/Disks/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Disks/tests/CMakeLists.txt rename to dbms/Disks/tests/CMakeLists.txt diff --git a/dbms/src/Disks/tests/gtest_disk.cpp b/dbms/Disks/tests/gtest_disk.cpp similarity index 100% rename from dbms/src/Disks/tests/gtest_disk.cpp rename to dbms/Disks/tests/gtest_disk.cpp diff --git a/dbms/src/Disks/tests/gtest_disk.h b/dbms/Disks/tests/gtest_disk.h similarity index 100% rename from dbms/src/Disks/tests/gtest_disk.h rename to dbms/Disks/tests/gtest_disk.h diff --git a/dbms/src/Disks/tests/gtest_path_functions.cpp b/dbms/Disks/tests/gtest_path_functions.cpp similarity index 100% rename from dbms/src/Disks/tests/gtest_path_functions.cpp rename to dbms/Disks/tests/gtest_path_functions.cpp diff --git a/dbms/src/Formats/CMakeLists.txt b/dbms/Formats/CMakeLists.txt similarity index 100% rename from dbms/src/Formats/CMakeLists.txt rename to dbms/Formats/CMakeLists.txt diff --git a/dbms/src/Formats/FormatFactory.cpp b/dbms/Formats/FormatFactory.cpp similarity index 100% rename from dbms/src/Formats/FormatFactory.cpp rename to dbms/Formats/FormatFactory.cpp diff --git a/dbms/src/Formats/FormatFactory.h b/dbms/Formats/FormatFactory.h similarity index 100% rename from dbms/src/Formats/FormatFactory.h rename to dbms/Formats/FormatFactory.h diff --git a/dbms/src/Formats/FormatSchemaInfo.cpp b/dbms/Formats/FormatSchemaInfo.cpp similarity index 100% rename from dbms/src/Formats/FormatSchemaInfo.cpp rename to dbms/Formats/FormatSchemaInfo.cpp diff --git a/dbms/src/Formats/FormatSchemaInfo.h b/dbms/Formats/FormatSchemaInfo.h similarity index 100% rename from dbms/src/Formats/FormatSchemaInfo.h rename to dbms/Formats/FormatSchemaInfo.h diff --git a/dbms/src/Formats/FormatSettings.h b/dbms/Formats/FormatSettings.h similarity index 100% rename from dbms/src/Formats/FormatSettings.h rename to dbms/Formats/FormatSettings.h diff --git a/dbms/src/Formats/IRowInputStream.cpp b/dbms/Formats/IRowInputStream.cpp similarity index 100% rename from dbms/src/Formats/IRowInputStream.cpp rename to dbms/Formats/IRowInputStream.cpp diff --git a/dbms/src/Formats/IRowInputStream.h b/dbms/Formats/IRowInputStream.h similarity index 100% rename from dbms/src/Formats/IRowInputStream.h rename to dbms/Formats/IRowInputStream.h diff --git a/dbms/src/Formats/IRowOutputStream.cpp b/dbms/Formats/IRowOutputStream.cpp similarity index 100% rename from dbms/src/Formats/IRowOutputStream.cpp rename to dbms/Formats/IRowOutputStream.cpp diff --git a/dbms/src/Formats/IRowOutputStream.h b/dbms/Formats/IRowOutputStream.h similarity index 100% rename from dbms/src/Formats/IRowOutputStream.h rename to dbms/Formats/IRowOutputStream.h diff --git a/dbms/src/Formats/MySQLBlockInputStream.cpp b/dbms/Formats/MySQLBlockInputStream.cpp similarity index 100% rename from dbms/src/Formats/MySQLBlockInputStream.cpp rename to dbms/Formats/MySQLBlockInputStream.cpp diff --git a/dbms/src/Formats/MySQLBlockInputStream.h b/dbms/Formats/MySQLBlockInputStream.h similarity index 100% rename from dbms/src/Formats/MySQLBlockInputStream.h rename to dbms/Formats/MySQLBlockInputStream.h diff --git a/dbms/src/Formats/NativeFormat.cpp b/dbms/Formats/NativeFormat.cpp similarity index 100% rename from dbms/src/Formats/NativeFormat.cpp rename to dbms/Formats/NativeFormat.cpp diff --git a/dbms/src/Formats/NullFormat.cpp b/dbms/Formats/NullFormat.cpp similarity index 100% rename from dbms/src/Formats/NullFormat.cpp rename to dbms/Formats/NullFormat.cpp diff --git a/dbms/src/Formats/ParsedTemplateFormatString.cpp b/dbms/Formats/ParsedTemplateFormatString.cpp similarity index 100% rename from dbms/src/Formats/ParsedTemplateFormatString.cpp rename to dbms/Formats/ParsedTemplateFormatString.cpp diff --git a/dbms/src/Formats/ParsedTemplateFormatString.h b/dbms/Formats/ParsedTemplateFormatString.h similarity index 100% rename from dbms/src/Formats/ParsedTemplateFormatString.h rename to dbms/Formats/ParsedTemplateFormatString.h diff --git a/dbms/src/Formats/ProtobufColumnMatcher.cpp b/dbms/Formats/ProtobufColumnMatcher.cpp similarity index 100% rename from dbms/src/Formats/ProtobufColumnMatcher.cpp rename to dbms/Formats/ProtobufColumnMatcher.cpp diff --git a/dbms/src/Formats/ProtobufColumnMatcher.h b/dbms/Formats/ProtobufColumnMatcher.h similarity index 100% rename from dbms/src/Formats/ProtobufColumnMatcher.h rename to dbms/Formats/ProtobufColumnMatcher.h diff --git a/dbms/src/Formats/ProtobufReader.cpp b/dbms/Formats/ProtobufReader.cpp similarity index 100% rename from dbms/src/Formats/ProtobufReader.cpp rename to dbms/Formats/ProtobufReader.cpp diff --git a/dbms/src/Formats/ProtobufReader.h b/dbms/Formats/ProtobufReader.h similarity index 100% rename from dbms/src/Formats/ProtobufReader.h rename to dbms/Formats/ProtobufReader.h diff --git a/dbms/src/Formats/ProtobufSchemas.cpp b/dbms/Formats/ProtobufSchemas.cpp similarity index 100% rename from dbms/src/Formats/ProtobufSchemas.cpp rename to dbms/Formats/ProtobufSchemas.cpp diff --git a/dbms/src/Formats/ProtobufSchemas.h b/dbms/Formats/ProtobufSchemas.h similarity index 100% rename from dbms/src/Formats/ProtobufSchemas.h rename to dbms/Formats/ProtobufSchemas.h diff --git a/dbms/src/Formats/ProtobufWriter.cpp b/dbms/Formats/ProtobufWriter.cpp similarity index 100% rename from dbms/src/Formats/ProtobufWriter.cpp rename to dbms/Formats/ProtobufWriter.cpp diff --git a/dbms/src/Formats/ProtobufWriter.h b/dbms/Formats/ProtobufWriter.h similarity index 100% rename from dbms/src/Formats/ProtobufWriter.h rename to dbms/Formats/ProtobufWriter.h diff --git a/dbms/src/Formats/config_formats.h.in b/dbms/Formats/config_formats.h.in similarity index 100% rename from dbms/src/Formats/config_formats.h.in rename to dbms/Formats/config_formats.h.in diff --git a/dbms/src/Formats/tests/CMakeLists.txt b/dbms/Formats/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Formats/tests/CMakeLists.txt rename to dbms/Formats/tests/CMakeLists.txt diff --git a/dbms/src/Formats/tests/tab_separated_streams.cpp b/dbms/Formats/tests/tab_separated_streams.cpp similarity index 100% rename from dbms/src/Formats/tests/tab_separated_streams.cpp rename to dbms/Formats/tests/tab_separated_streams.cpp diff --git a/dbms/src/Formats/verbosePrintString.cpp b/dbms/Formats/verbosePrintString.cpp similarity index 100% rename from dbms/src/Formats/verbosePrintString.cpp rename to dbms/Formats/verbosePrintString.cpp diff --git a/dbms/src/Formats/verbosePrintString.h b/dbms/Formats/verbosePrintString.h similarity index 100% rename from dbms/src/Formats/verbosePrintString.h rename to dbms/Formats/verbosePrintString.h diff --git a/dbms/src/Functions/CMakeLists.txt b/dbms/Functions/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/CMakeLists.txt rename to dbms/Functions/CMakeLists.txt diff --git a/dbms/src/Functions/CRC.cpp b/dbms/Functions/CRC.cpp similarity index 100% rename from dbms/src/Functions/CRC.cpp rename to dbms/Functions/CRC.cpp diff --git a/dbms/src/Functions/CustomWeekTransforms.h b/dbms/Functions/CustomWeekTransforms.h similarity index 100% rename from dbms/src/Functions/CustomWeekTransforms.h rename to dbms/Functions/CustomWeekTransforms.h diff --git a/dbms/src/Functions/DateTimeTransforms.h b/dbms/Functions/DateTimeTransforms.h similarity index 100% rename from dbms/src/Functions/DateTimeTransforms.h rename to dbms/Functions/DateTimeTransforms.h diff --git a/dbms/src/Functions/DivisionUtils.h b/dbms/Functions/DivisionUtils.h similarity index 100% rename from dbms/src/Functions/DivisionUtils.h rename to dbms/Functions/DivisionUtils.h diff --git a/dbms/src/Functions/DummyJSONParser.h b/dbms/Functions/DummyJSONParser.h similarity index 100% rename from dbms/src/Functions/DummyJSONParser.h rename to dbms/Functions/DummyJSONParser.h diff --git a/dbms/src/Functions/EmptyImpl.h b/dbms/Functions/EmptyImpl.h similarity index 100% rename from dbms/src/Functions/EmptyImpl.h rename to dbms/Functions/EmptyImpl.h diff --git a/dbms/src/Functions/FunctionBase64Conversion.h b/dbms/Functions/FunctionBase64Conversion.h similarity index 100% rename from dbms/src/Functions/FunctionBase64Conversion.h rename to dbms/Functions/FunctionBase64Conversion.h diff --git a/dbms/src/Functions/FunctionBinaryArithmetic.h b/dbms/Functions/FunctionBinaryArithmetic.h similarity index 100% rename from dbms/src/Functions/FunctionBinaryArithmetic.h rename to dbms/Functions/FunctionBinaryArithmetic.h diff --git a/dbms/src/Functions/FunctionBitTestMany.h b/dbms/Functions/FunctionBitTestMany.h similarity index 100% rename from dbms/src/Functions/FunctionBitTestMany.h rename to dbms/Functions/FunctionBitTestMany.h diff --git a/dbms/src/Functions/FunctionCustomWeekToSomething.h b/dbms/Functions/FunctionCustomWeekToSomething.h similarity index 100% rename from dbms/src/Functions/FunctionCustomWeekToSomething.h rename to dbms/Functions/FunctionCustomWeekToSomething.h diff --git a/dbms/src/Functions/FunctionDateOrDateTimeAddInterval.h b/dbms/Functions/FunctionDateOrDateTimeAddInterval.h similarity index 100% rename from dbms/src/Functions/FunctionDateOrDateTimeAddInterval.h rename to dbms/Functions/FunctionDateOrDateTimeAddInterval.h diff --git a/dbms/src/Functions/FunctionDateOrDateTimeToSomething.h b/dbms/Functions/FunctionDateOrDateTimeToSomething.h similarity index 100% rename from dbms/src/Functions/FunctionDateOrDateTimeToSomething.h rename to dbms/Functions/FunctionDateOrDateTimeToSomething.h diff --git a/dbms/src/Functions/FunctionFQDN.cpp b/dbms/Functions/FunctionFQDN.cpp similarity index 100% rename from dbms/src/Functions/FunctionFQDN.cpp rename to dbms/Functions/FunctionFQDN.cpp diff --git a/dbms/src/Functions/FunctionFactory.cpp b/dbms/Functions/FunctionFactory.cpp similarity index 100% rename from dbms/src/Functions/FunctionFactory.cpp rename to dbms/Functions/FunctionFactory.cpp diff --git a/dbms/src/Functions/FunctionFactory.h b/dbms/Functions/FunctionFactory.h similarity index 100% rename from dbms/src/Functions/FunctionFactory.h rename to dbms/Functions/FunctionFactory.h diff --git a/dbms/src/Functions/FunctionHelpers.cpp b/dbms/Functions/FunctionHelpers.cpp similarity index 100% rename from dbms/src/Functions/FunctionHelpers.cpp rename to dbms/Functions/FunctionHelpers.cpp diff --git a/dbms/src/Functions/FunctionHelpers.h b/dbms/Functions/FunctionHelpers.h similarity index 100% rename from dbms/src/Functions/FunctionHelpers.h rename to dbms/Functions/FunctionHelpers.h diff --git a/dbms/src/Functions/FunctionIfBase.h b/dbms/Functions/FunctionIfBase.h similarity index 100% rename from dbms/src/Functions/FunctionIfBase.h rename to dbms/Functions/FunctionIfBase.h diff --git a/dbms/src/Functions/FunctionJoinGet.cpp b/dbms/Functions/FunctionJoinGet.cpp similarity index 100% rename from dbms/src/Functions/FunctionJoinGet.cpp rename to dbms/Functions/FunctionJoinGet.cpp diff --git a/dbms/src/Functions/FunctionJoinGet.h b/dbms/Functions/FunctionJoinGet.h similarity index 100% rename from dbms/src/Functions/FunctionJoinGet.h rename to dbms/Functions/FunctionJoinGet.h diff --git a/dbms/src/Functions/FunctionMathBinaryFloat64.h b/dbms/Functions/FunctionMathBinaryFloat64.h similarity index 100% rename from dbms/src/Functions/FunctionMathBinaryFloat64.h rename to dbms/Functions/FunctionMathBinaryFloat64.h diff --git a/dbms/src/Functions/FunctionMathConstFloat64.h b/dbms/Functions/FunctionMathConstFloat64.h similarity index 100% rename from dbms/src/Functions/FunctionMathConstFloat64.h rename to dbms/Functions/FunctionMathConstFloat64.h diff --git a/dbms/src/Functions/FunctionMathUnary.h b/dbms/Functions/FunctionMathUnary.h similarity index 100% rename from dbms/src/Functions/FunctionMathUnary.h rename to dbms/Functions/FunctionMathUnary.h diff --git a/dbms/src/Functions/FunctionNumericPredicate.h b/dbms/Functions/FunctionNumericPredicate.h similarity index 100% rename from dbms/src/Functions/FunctionNumericPredicate.h rename to dbms/Functions/FunctionNumericPredicate.h diff --git a/dbms/src/Functions/FunctionStartsEndsWith.h b/dbms/Functions/FunctionStartsEndsWith.h similarity index 100% rename from dbms/src/Functions/FunctionStartsEndsWith.h rename to dbms/Functions/FunctionStartsEndsWith.h diff --git a/dbms/src/Functions/FunctionStringOrArrayToT.h b/dbms/Functions/FunctionStringOrArrayToT.h similarity index 100% rename from dbms/src/Functions/FunctionStringOrArrayToT.h rename to dbms/Functions/FunctionStringOrArrayToT.h diff --git a/dbms/src/Functions/FunctionStringToString.h b/dbms/Functions/FunctionStringToString.h similarity index 100% rename from dbms/src/Functions/FunctionStringToString.h rename to dbms/Functions/FunctionStringToString.h diff --git a/dbms/src/Functions/FunctionUnaryArithmetic.h b/dbms/Functions/FunctionUnaryArithmetic.h similarity index 100% rename from dbms/src/Functions/FunctionUnaryArithmetic.h rename to dbms/Functions/FunctionUnaryArithmetic.h diff --git a/dbms/src/Functions/FunctionsBitmap.cpp b/dbms/Functions/FunctionsBitmap.cpp similarity index 100% rename from dbms/src/Functions/FunctionsBitmap.cpp rename to dbms/Functions/FunctionsBitmap.cpp diff --git a/dbms/src/Functions/FunctionsBitmap.h b/dbms/Functions/FunctionsBitmap.h similarity index 100% rename from dbms/src/Functions/FunctionsBitmap.h rename to dbms/Functions/FunctionsBitmap.h diff --git a/dbms/src/Functions/FunctionsCoding.cpp b/dbms/Functions/FunctionsCoding.cpp similarity index 100% rename from dbms/src/Functions/FunctionsCoding.cpp rename to dbms/Functions/FunctionsCoding.cpp diff --git a/dbms/src/Functions/FunctionsCoding.h b/dbms/Functions/FunctionsCoding.h similarity index 100% rename from dbms/src/Functions/FunctionsCoding.h rename to dbms/Functions/FunctionsCoding.h diff --git a/dbms/src/Functions/FunctionsComparison.h b/dbms/Functions/FunctionsComparison.h similarity index 100% rename from dbms/src/Functions/FunctionsComparison.h rename to dbms/Functions/FunctionsComparison.h diff --git a/dbms/src/Functions/FunctionsConsistentHashing.h b/dbms/Functions/FunctionsConsistentHashing.h similarity index 100% rename from dbms/src/Functions/FunctionsConsistentHashing.h rename to dbms/Functions/FunctionsConsistentHashing.h diff --git a/dbms/src/Functions/FunctionsConversion.cpp b/dbms/Functions/FunctionsConversion.cpp similarity index 100% rename from dbms/src/Functions/FunctionsConversion.cpp rename to dbms/Functions/FunctionsConversion.cpp diff --git a/dbms/src/Functions/FunctionsConversion.h b/dbms/Functions/FunctionsConversion.h similarity index 100% rename from dbms/src/Functions/FunctionsConversion.h rename to dbms/Functions/FunctionsConversion.h diff --git a/dbms/src/Functions/FunctionsEmbeddedDictionaries.cpp b/dbms/Functions/FunctionsEmbeddedDictionaries.cpp similarity index 100% rename from dbms/src/Functions/FunctionsEmbeddedDictionaries.cpp rename to dbms/Functions/FunctionsEmbeddedDictionaries.cpp diff --git a/dbms/src/Functions/FunctionsEmbeddedDictionaries.h b/dbms/Functions/FunctionsEmbeddedDictionaries.h similarity index 100% rename from dbms/src/Functions/FunctionsEmbeddedDictionaries.h rename to dbms/Functions/FunctionsEmbeddedDictionaries.h diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.cpp b/dbms/Functions/FunctionsExternalDictionaries.cpp similarity index 100% rename from dbms/src/Functions/FunctionsExternalDictionaries.cpp rename to dbms/Functions/FunctionsExternalDictionaries.cpp diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.h b/dbms/Functions/FunctionsExternalDictionaries.h similarity index 100% rename from dbms/src/Functions/FunctionsExternalDictionaries.h rename to dbms/Functions/FunctionsExternalDictionaries.h diff --git a/dbms/src/Functions/FunctionsExternalModels.cpp b/dbms/Functions/FunctionsExternalModels.cpp similarity index 100% rename from dbms/src/Functions/FunctionsExternalModels.cpp rename to dbms/Functions/FunctionsExternalModels.cpp diff --git a/dbms/src/Functions/FunctionsExternalModels.h b/dbms/Functions/FunctionsExternalModels.h similarity index 100% rename from dbms/src/Functions/FunctionsExternalModels.h rename to dbms/Functions/FunctionsExternalModels.h diff --git a/dbms/src/Functions/FunctionsFormatting.cpp b/dbms/Functions/FunctionsFormatting.cpp similarity index 100% rename from dbms/src/Functions/FunctionsFormatting.cpp rename to dbms/Functions/FunctionsFormatting.cpp diff --git a/dbms/src/Functions/FunctionsFormatting.h b/dbms/Functions/FunctionsFormatting.h similarity index 100% rename from dbms/src/Functions/FunctionsFormatting.h rename to dbms/Functions/FunctionsFormatting.h diff --git a/dbms/src/Functions/FunctionsHashing.cpp b/dbms/Functions/FunctionsHashing.cpp similarity index 100% rename from dbms/src/Functions/FunctionsHashing.cpp rename to dbms/Functions/FunctionsHashing.cpp diff --git a/dbms/src/Functions/FunctionsHashing.h b/dbms/Functions/FunctionsHashing.h similarity index 100% rename from dbms/src/Functions/FunctionsHashing.h rename to dbms/Functions/FunctionsHashing.h diff --git a/dbms/src/Functions/FunctionsJSON.cpp b/dbms/Functions/FunctionsJSON.cpp similarity index 100% rename from dbms/src/Functions/FunctionsJSON.cpp rename to dbms/Functions/FunctionsJSON.cpp diff --git a/dbms/src/Functions/FunctionsJSON.h b/dbms/Functions/FunctionsJSON.h similarity index 100% rename from dbms/src/Functions/FunctionsJSON.h rename to dbms/Functions/FunctionsJSON.h diff --git a/dbms/src/Functions/FunctionsLogical.cpp b/dbms/Functions/FunctionsLogical.cpp similarity index 100% rename from dbms/src/Functions/FunctionsLogical.cpp rename to dbms/Functions/FunctionsLogical.cpp diff --git a/dbms/src/Functions/FunctionsLogical.h b/dbms/Functions/FunctionsLogical.h similarity index 100% rename from dbms/src/Functions/FunctionsLogical.h rename to dbms/Functions/FunctionsLogical.h diff --git a/dbms/src/Functions/FunctionsMiscellaneous.h b/dbms/Functions/FunctionsMiscellaneous.h similarity index 100% rename from dbms/src/Functions/FunctionsMiscellaneous.h rename to dbms/Functions/FunctionsMiscellaneous.h diff --git a/dbms/src/Functions/FunctionsMultiStringPosition.h b/dbms/Functions/FunctionsMultiStringPosition.h similarity index 100% rename from dbms/src/Functions/FunctionsMultiStringPosition.h rename to dbms/Functions/FunctionsMultiStringPosition.h diff --git a/dbms/src/Functions/FunctionsMultiStringSearch.h b/dbms/Functions/FunctionsMultiStringSearch.h similarity index 100% rename from dbms/src/Functions/FunctionsMultiStringSearch.h rename to dbms/Functions/FunctionsMultiStringSearch.h diff --git a/dbms/src/Functions/FunctionsRandom.cpp b/dbms/Functions/FunctionsRandom.cpp similarity index 100% rename from dbms/src/Functions/FunctionsRandom.cpp rename to dbms/Functions/FunctionsRandom.cpp diff --git a/dbms/src/Functions/FunctionsRandom.h b/dbms/Functions/FunctionsRandom.h similarity index 100% rename from dbms/src/Functions/FunctionsRandom.h rename to dbms/Functions/FunctionsRandom.h diff --git a/dbms/src/Functions/FunctionsRound.cpp b/dbms/Functions/FunctionsRound.cpp similarity index 100% rename from dbms/src/Functions/FunctionsRound.cpp rename to dbms/Functions/FunctionsRound.cpp diff --git a/dbms/src/Functions/FunctionsRound.h b/dbms/Functions/FunctionsRound.h similarity index 100% rename from dbms/src/Functions/FunctionsRound.h rename to dbms/Functions/FunctionsRound.h diff --git a/dbms/src/Functions/FunctionsStringArray.cpp b/dbms/Functions/FunctionsStringArray.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringArray.cpp rename to dbms/Functions/FunctionsStringArray.cpp diff --git a/dbms/src/Functions/FunctionsStringArray.h b/dbms/Functions/FunctionsStringArray.h similarity index 100% rename from dbms/src/Functions/FunctionsStringArray.h rename to dbms/Functions/FunctionsStringArray.h diff --git a/dbms/src/Functions/FunctionsStringRegex.cpp b/dbms/Functions/FunctionsStringRegex.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringRegex.cpp rename to dbms/Functions/FunctionsStringRegex.cpp diff --git a/dbms/src/Functions/FunctionsStringRegex.h b/dbms/Functions/FunctionsStringRegex.h similarity index 100% rename from dbms/src/Functions/FunctionsStringRegex.h rename to dbms/Functions/FunctionsStringRegex.h diff --git a/dbms/Functions/FunctionsStringSearch.cpp b/dbms/Functions/FunctionsStringSearch.cpp new file mode 100644 index 00000000000..8279ded5f81 --- /dev/null +++ b/dbms/Functions/FunctionsStringSearch.cpp @@ -0,0 +1,707 @@ +#include "FunctionsStringSearch.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; +} +/** Implementation details for functions of 'position' family depending on ASCII/UTF8 and case sensitiveness. + */ +struct PositionCaseSensitiveASCII +{ + /// For searching single substring inside big-enough contiguous chunk of data. Coluld have slightly expensive initialization. + using SearcherInBigHaystack = Volnitsky; + + /// For search many substrings in one string + using MultiSearcherInBigHaystack = MultiVolnitsky; + + /// For searching single substring, that is different each time. This object is created for each row of data. It must have cheap initialization. + using SearcherInSmallHaystack = LibCASCIICaseSensitiveStringSearcher; + + static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) + { + return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); + } + + static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) + { + return SearcherInSmallHaystack(needle_data, needle_size); + } + + static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) + { + return MultiSearcherInBigHaystack(needles); + } + + /// Number of code points between 'begin' and 'end' (this has different behaviour for ASCII and UTF-8). + static size_t countChars(const char * begin, const char * end) { return end - begin; } + + /// Convert string to lowercase. Only for case-insensitive search. + /// Implementation is permitted to be inefficient because it is called for single string. + static void toLowerIfNeed(std::string &) { } +}; + +struct PositionCaseInsensitiveASCII +{ + /// `Volnitsky` is not used here, because one person has measured that this is better. It will be good if you question it. + using SearcherInBigHaystack = ASCIICaseInsensitiveStringSearcher; + using MultiSearcherInBigHaystack = MultiVolnitskyCaseInsensitive; + using SearcherInSmallHaystack = LibCASCIICaseInsensitiveStringSearcher; + + static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t /*haystack_size_hint*/) + { + return SearcherInBigHaystack(needle_data, needle_size); + } + + static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) + { + return SearcherInSmallHaystack(needle_data, needle_size); + } + + static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) + { + return MultiSearcherInBigHaystack(needles); + } + + static size_t countChars(const char * begin, const char * end) { return end - begin; } + + static void toLowerIfNeed(std::string & s) { std::transform(std::begin(s), std::end(s), std::begin(s), tolower); } +}; + +struct PositionCaseSensitiveUTF8 +{ + using SearcherInBigHaystack = VolnitskyUTF8; + using MultiSearcherInBigHaystack = MultiVolnitskyUTF8; + using SearcherInSmallHaystack = LibCASCIICaseSensitiveStringSearcher; + + static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) + { + return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); + } + + static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) + { + return SearcherInSmallHaystack(needle_data, needle_size); + } + + static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) + { + return MultiSearcherInBigHaystack(needles); + } + + static size_t countChars(const char * begin, const char * end) + { + size_t res = 0; + for (auto it = begin; it != end; ++it) + if (!UTF8::isContinuationOctet(static_cast(*it))) + ++res; + return res; + } + + static void toLowerIfNeed(std::string &) { } +}; + +struct PositionCaseInsensitiveUTF8 +{ + using SearcherInBigHaystack = VolnitskyCaseInsensitiveUTF8; + using MultiSearcherInBigHaystack = MultiVolnitskyCaseInsensitiveUTF8; + using SearcherInSmallHaystack = UTF8CaseInsensitiveStringSearcher; /// TODO Very suboptimal. + + static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) + { + return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); + } + + static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) + { + return SearcherInSmallHaystack(needle_data, needle_size); + } + + static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) + { + return MultiSearcherInBigHaystack(needles); + } + + static size_t countChars(const char * begin, const char * end) + { + size_t res = 0; + for (auto it = begin; it != end; ++it) + if (!UTF8::isContinuationOctet(static_cast(*it))) + ++res; + return res; + } + + static void toLowerIfNeed(std::string & s) { Poco::UTF8::toLowerInPlace(s); } +}; + +template +struct PositionImpl +{ + static constexpr bool use_default_implementation_for_constants = false; + + using ResultType = UInt64; + + /// Find one substring in many strings. + static void vectorConstant( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, const std::string & needle, PaddedPODArray & res) + { + const UInt8 * begin = data.data(); + const UInt8 * pos = begin; + const UInt8 * end = pos + data.size(); + + /// Current index in the array of strings. + size_t i = 0; + + typename Impl::SearcherInBigHaystack searcher = Impl::createSearcherInBigHaystack(needle.data(), needle.size(), end - pos); + + /// We will search for the next occurrence in all strings at once. + while (pos < end && end != (pos = searcher.search(pos, end - pos))) + { + /// Determine which index it refers to. + while (begin + offsets[i] <= pos) + { + res[i] = 0; + ++i; + } + + /// We check that the entry does not pass through the boundaries of strings. + if (pos + needle.size() < begin + offsets[i]) + res[i] = 1 + Impl::countChars(reinterpret_cast(begin + offsets[i - 1]), reinterpret_cast(pos)); + else + res[i] = 0; + + pos = begin + offsets[i]; + ++i; + } + + if (i < res.size()) + memset(&res[i], 0, (res.size() - i) * sizeof(res[0])); + } + + /// Search for substring in string. + static void constantConstant(std::string data, std::string needle, UInt64 & res) + { + Impl::toLowerIfNeed(data); + Impl::toLowerIfNeed(needle); + + res = data.find(needle); + if (res == std::string::npos) + res = 0; + else + res = 1 + Impl::countChars(data.data(), data.data() + res); + } + + /// Search each time for a different single substring inside each time different string. + static void vectorVector( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const ColumnString::Chars & needle_data, + const ColumnString::Offsets & needle_offsets, + PaddedPODArray & res) + { + ColumnString::Offset prev_haystack_offset = 0; + ColumnString::Offset prev_needle_offset = 0; + + size_t size = haystack_offsets.size(); + + for (size_t i = 0; i < size; ++i) + { + size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; + size_t haystack_size = haystack_offsets[i] - prev_haystack_offset - 1; + + if (0 == needle_size) + { + /// An empty string is always at the very beginning of `haystack`. + res[i] = 1; + } + else + { + /// It is assumed that the StringSearcher is not very difficult to initialize. + typename Impl::SearcherInSmallHaystack searcher = Impl::createSearcherInSmallHaystack( + reinterpret_cast(&needle_data[prev_needle_offset]), + needle_offsets[i] - prev_needle_offset - 1); /// zero byte at the end + + /// searcher returns a pointer to the found substring or to the end of `haystack`. + size_t pos = searcher.search(&haystack_data[prev_haystack_offset], &haystack_data[haystack_offsets[i] - 1]) + - &haystack_data[prev_haystack_offset]; + + if (pos != haystack_size) + { + res[i] = 1 + + Impl::countChars( + reinterpret_cast(&haystack_data[prev_haystack_offset]), + reinterpret_cast(&haystack_data[prev_haystack_offset + pos])); + } + else + res[i] = 0; + } + + prev_haystack_offset = haystack_offsets[i]; + prev_needle_offset = needle_offsets[i]; + } + } + + /// Find many substrings in single string. + static void constantVector( + const String & haystack, + const ColumnString::Chars & needle_data, + const ColumnString::Offsets & needle_offsets, + PaddedPODArray & res) + { + // NOTE You could use haystack indexing. But this is a rare case. + + ColumnString::Offset prev_needle_offset = 0; + + size_t size = needle_offsets.size(); + + for (size_t i = 0; i < size; ++i) + { + size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; + + if (0 == needle_size) + { + res[i] = 1; + } + else + { + typename Impl::SearcherInSmallHaystack searcher = Impl::createSearcherInSmallHaystack( + reinterpret_cast(&needle_data[prev_needle_offset]), needle_offsets[i] - prev_needle_offset - 1); + + size_t pos = searcher.search( + reinterpret_cast(haystack.data()), + reinterpret_cast(haystack.data()) + haystack.size()) + - reinterpret_cast(haystack.data()); + + if (pos != haystack.size()) + { + res[i] = 1 + Impl::countChars(haystack.data(), haystack.data() + pos); + } + else + res[i] = 0; + } + + prev_needle_offset = needle_offsets[i]; + } + } + + template + static void vectorFixedConstant(Args &&...) + { + throw Exception("Functions 'position' don't support FixedString haystack argument", ErrorCodes::ILLEGAL_COLUMN); + } +}; + +template +struct MultiSearchAllPositionsImpl +{ + using ResultType = UInt64; + + static void vectorConstant( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const std::vector & needles, + PaddedPODArray & res) + { + auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 + { + return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); + }; + + auto searcher = Impl::createMultiSearcherInBigHaystack(needles); + + const size_t haystack_string_size = haystack_offsets.size(); + const size_t needles_size = needles.size(); + + /// Something can be uninitialized after the search itself + std::fill(res.begin(), res.end(), 0); + + while (searcher.hasMoreToSearch()) + { + size_t prev_offset = 0; + for (size_t j = 0, from = 0; j < haystack_string_size; ++j, from += needles_size) + { + const auto * haystack = &haystack_data[prev_offset]; + const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; + searcher.searchOneAll(haystack, haystack_end, res.data() + from, res_callback); + prev_offset = haystack_offsets[j]; + } + } + } +}; + +template +struct MultiSearchImpl +{ + using ResultType = UInt8; + static constexpr bool is_using_hyperscan = false; + /// Variable for understanding, if we used offsets for the output, most + /// likely to determine whether the function returns ColumnVector of ColumnArray. + static constexpr bool is_column_array = false; + static auto getReturnType() { return std::make_shared>(); } + + static void vectorConstant( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const std::vector & needles, + PaddedPODArray & res, + [[maybe_unused]] PaddedPODArray & offsets) + { + auto searcher = Impl::createMultiSearcherInBigHaystack(needles); + const size_t haystack_string_size = haystack_offsets.size(); + res.resize(haystack_string_size); + size_t iteration = 0; + while (searcher.hasMoreToSearch()) + { + size_t prev_offset = 0; + for (size_t j = 0; j < haystack_string_size; ++j) + { + const auto * haystack = &haystack_data[prev_offset]; + const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; + if (iteration == 0 || !res[j]) + res[j] = searcher.searchOne(haystack, haystack_end); + prev_offset = haystack_offsets[j]; + } + ++iteration; + } + } +}; + +template +struct MultiSearchFirstPositionImpl +{ + using ResultType = UInt64; + static constexpr bool is_using_hyperscan = false; + /// Variable for understanding, if we used offsets for the output, most + /// likely to determine whether the function returns ColumnVector of ColumnArray. + static constexpr bool is_column_array = false; + static auto getReturnType() { return std::make_shared>(); } + + static void vectorConstant( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const std::vector & needles, + PaddedPODArray & res, + [[maybe_unused]] PaddedPODArray & offsets) + { + auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 + { + return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); + }; + auto searcher = Impl::createMultiSearcherInBigHaystack(needles); + const size_t haystack_string_size = haystack_offsets.size(); + res.resize(haystack_string_size); + size_t iteration = 0; + while (searcher.hasMoreToSearch()) + { + size_t prev_offset = 0; + for (size_t j = 0; j < haystack_string_size; ++j) + { + const auto * haystack = &haystack_data[prev_offset]; + const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; + if (iteration == 0 || res[j] == 0) + res[j] = searcher.searchOneFirstPosition(haystack, haystack_end, res_callback); + else + { + UInt64 result = searcher.searchOneFirstPosition(haystack, haystack_end, res_callback); + if (result != 0) + res[j] = std::min(result, res[j]); + } + prev_offset = haystack_offsets[j]; + } + ++iteration; + } + } +}; + +template +struct MultiSearchFirstIndexImpl +{ + using ResultType = UInt64; + static constexpr bool is_using_hyperscan = false; + /// Variable for understanding, if we used offsets for the output, most + /// likely to determine whether the function returns ColumnVector of ColumnArray. + static constexpr bool is_column_array = false; + static auto getReturnType() { return std::make_shared>(); } + + static void vectorConstant( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const std::vector & needles, + PaddedPODArray & res, + [[maybe_unused]] PaddedPODArray & offsets) + { + auto searcher = Impl::createMultiSearcherInBigHaystack(needles); + const size_t haystack_string_size = haystack_offsets.size(); + res.resize(haystack_string_size); + size_t iteration = 0; + while (searcher.hasMoreToSearch()) + { + size_t prev_offset = 0; + for (size_t j = 0; j < haystack_string_size; ++j) + { + const auto * haystack = &haystack_data[prev_offset]; + const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; + /// hasMoreToSearch traverse needles in increasing order + if (iteration == 0 || res[j] == 0) + res[j] = searcher.searchOneFirstIndex(haystack, haystack_end); + prev_offset = haystack_offsets[j]; + } + ++iteration; + } + } +}; + +/** Token search the string, means that needle must be surrounded by some separator chars, like whitespace or puctuation. + */ +template +struct HasTokenImpl +{ + using ResultType = UInt8; + + static constexpr bool use_default_implementation_for_constants = true; + + static void vectorConstant( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, const std::string & pattern, PaddedPODArray & res) + { + if (offsets.empty()) + return; + + const UInt8 * begin = data.data(); + const UInt8 * pos = begin; + const UInt8 * end = pos + data.size(); + + /// The current index in the array of strings. + size_t i = 0; + + TokenSearcher searcher(pattern.data(), pattern.size(), end - pos); + + /// We will search for the next occurrence in all rows at once. + while (pos < end && end != (pos = searcher.search(pos, end - pos))) + { + /// Let's determine which index it refers to. + while (begin + offsets[i] <= pos) + { + res[i] = negate_result; + ++i; + } + + /// We check that the entry does not pass through the boundaries of strings. + if (pos + pattern.size() < begin + offsets[i]) + res[i] = !negate_result; + else + res[i] = negate_result; + + pos = begin + offsets[i]; + ++i; + } + + /// Tail, in which there can be no substring. + if (i < res.size()) + memset(&res[i], negate_result, (res.size() - i) * sizeof(res[0])); + } + + template + static void vectorVector(Args &&...) + { + throw Exception("Function 'hasToken' does not support non-constant needle argument", ErrorCodes::ILLEGAL_COLUMN); + } + + /// Search different needles in single haystack. + template + static void constantVector(Args &&...) + { + throw Exception("Function 'hasToken' does not support non-constant needle argument", ErrorCodes::ILLEGAL_COLUMN); + } + + template + static void vectorFixedConstant(Args &&...) + { + throw Exception("Functions 'hasToken' don't support FixedString haystack argument", ErrorCodes::ILLEGAL_COLUMN); + } +}; + + +struct NamePosition +{ + static constexpr auto name = "position"; +}; +struct NamePositionUTF8 +{ + static constexpr auto name = "positionUTF8"; +}; +struct NamePositionCaseInsensitive +{ + static constexpr auto name = "positionCaseInsensitive"; +}; +struct NamePositionCaseInsensitiveUTF8 +{ + static constexpr auto name = "positionCaseInsensitiveUTF8"; +}; +struct NameMultiSearchAllPositions +{ + static constexpr auto name = "multiSearchAllPositions"; +}; +struct NameMultiSearchAllPositionsUTF8 +{ + static constexpr auto name = "multiSearchAllPositionsUTF8"; +}; +struct NameMultiSearchAllPositionsCaseInsensitive +{ + static constexpr auto name = "multiSearchAllPositionsCaseInsensitive"; +}; +struct NameMultiSearchAllPositionsCaseInsensitiveUTF8 +{ + static constexpr auto name = "multiSearchAllPositionsCaseInsensitiveUTF8"; +}; +struct NameMultiSearchAny +{ + static constexpr auto name = "multiSearchAny"; +}; +struct NameMultiSearchAnyUTF8 +{ + static constexpr auto name = "multiSearchAnyUTF8"; +}; +struct NameMultiSearchAnyCaseInsensitive +{ + static constexpr auto name = "multiSearchAnyCaseInsensitive"; +}; +struct NameMultiSearchAnyCaseInsensitiveUTF8 +{ + static constexpr auto name = "multiSearchAnyCaseInsensitiveUTF8"; +}; +struct NameMultiSearchFirstIndex +{ + static constexpr auto name = "multiSearchFirstIndex"; +}; +struct NameMultiSearchFirstIndexUTF8 +{ + static constexpr auto name = "multiSearchFirstIndexUTF8"; +}; +struct NameMultiSearchFirstIndexCaseInsensitive +{ + static constexpr auto name = "multiSearchFirstIndexCaseInsensitive"; +}; +struct NameMultiSearchFirstIndexCaseInsensitiveUTF8 +{ + static constexpr auto name = "multiSearchFirstIndexCaseInsensitiveUTF8"; +}; +struct NameMultiSearchFirstPosition +{ + static constexpr auto name = "multiSearchFirstPosition"; +}; +struct NameMultiSearchFirstPositionUTF8 +{ + static constexpr auto name = "multiSearchFirstPositionUTF8"; +}; +struct NameMultiSearchFirstPositionCaseInsensitive +{ + static constexpr auto name = "multiSearchFirstPositionCaseInsensitive"; +}; +struct NameMultiSearchFirstPositionCaseInsensitiveUTF8 +{ + static constexpr auto name = "multiSearchFirstPositionCaseInsensitiveUTF8"; +}; + +struct NameHasToken +{ + static constexpr auto name = "hasToken"; +}; + +struct NameHasTokenCaseInsensitive +{ + static constexpr auto name = "hasTokenCaseInsensitive"; +}; + + +using FunctionPosition = FunctionsStringSearch, NamePosition>; +using FunctionPositionUTF8 = FunctionsStringSearch, NamePositionUTF8>; +using FunctionPositionCaseInsensitive = FunctionsStringSearch, NamePositionCaseInsensitive>; +using FunctionPositionCaseInsensitiveUTF8 + = FunctionsStringSearch, NamePositionCaseInsensitiveUTF8>; + +using FunctionMultiSearchAllPositions + = FunctionsMultiStringPosition, NameMultiSearchAllPositions>; +using FunctionMultiSearchAllPositionsUTF8 + = FunctionsMultiStringPosition, NameMultiSearchAllPositionsUTF8>; +using FunctionMultiSearchAllPositionsCaseInsensitive + = FunctionsMultiStringPosition, NameMultiSearchAllPositionsCaseInsensitive>; +using FunctionMultiSearchAllPositionsCaseInsensitiveUTF8 = FunctionsMultiStringPosition< + MultiSearchAllPositionsImpl, + NameMultiSearchAllPositionsCaseInsensitiveUTF8>; + +using FunctionMultiSearch = FunctionsMultiStringSearch, NameMultiSearchAny>; +using FunctionMultiSearchUTF8 = FunctionsMultiStringSearch, NameMultiSearchAnyUTF8>; +using FunctionMultiSearchCaseInsensitive + = FunctionsMultiStringSearch, NameMultiSearchAnyCaseInsensitive>; +using FunctionMultiSearchCaseInsensitiveUTF8 + = FunctionsMultiStringSearch, NameMultiSearchAnyCaseInsensitiveUTF8>; + +using FunctionMultiSearchFirstIndex + = FunctionsMultiStringSearch, NameMultiSearchFirstIndex>; +using FunctionMultiSearchFirstIndexUTF8 + = FunctionsMultiStringSearch, NameMultiSearchFirstIndexUTF8>; +using FunctionMultiSearchFirstIndexCaseInsensitive + = FunctionsMultiStringSearch, NameMultiSearchFirstIndexCaseInsensitive>; +using FunctionMultiSearchFirstIndexCaseInsensitiveUTF8 + = FunctionsMultiStringSearch, NameMultiSearchFirstIndexCaseInsensitiveUTF8>; + +using FunctionMultiSearchFirstPosition + = FunctionsMultiStringSearch, NameMultiSearchFirstPosition>; +using FunctionMultiSearchFirstPositionUTF8 + = FunctionsMultiStringSearch, NameMultiSearchFirstPositionUTF8>; +using FunctionMultiSearchFirstPositionCaseInsensitive + = FunctionsMultiStringSearch, NameMultiSearchFirstPositionCaseInsensitive>; +using FunctionMultiSearchFirstPositionCaseInsensitiveUTF8 = FunctionsMultiStringSearch< + MultiSearchFirstPositionImpl, + NameMultiSearchFirstPositionCaseInsensitiveUTF8>; + +using FunctionHasToken = FunctionsStringSearch, NameHasToken>; +using FunctionHasTokenCaseInsensitive + = FunctionsStringSearch, NameHasTokenCaseInsensitive>; + +void registerFunctionsStringSearch(FunctionFactory & factory) +{ + factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + + factory.registerFunction(); + factory.registerFunction(); + + factory.registerAlias("locate", NamePosition::name, FunctionFactory::CaseInsensitive); +} +} diff --git a/dbms/src/Functions/FunctionsStringSearch.h b/dbms/Functions/FunctionsStringSearch.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSearch.h rename to dbms/Functions/FunctionsStringSearch.h diff --git a/dbms/src/Functions/FunctionsStringSearchToString.h b/dbms/Functions/FunctionsStringSearchToString.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSearchToString.h rename to dbms/Functions/FunctionsStringSearchToString.h diff --git a/dbms/src/Functions/FunctionsStringSimilarity.cpp b/dbms/Functions/FunctionsStringSimilarity.cpp similarity index 100% rename from dbms/src/Functions/FunctionsStringSimilarity.cpp rename to dbms/Functions/FunctionsStringSimilarity.cpp diff --git a/dbms/src/Functions/FunctionsStringSimilarity.h b/dbms/Functions/FunctionsStringSimilarity.h similarity index 100% rename from dbms/src/Functions/FunctionsStringSimilarity.h rename to dbms/Functions/FunctionsStringSimilarity.h diff --git a/dbms/src/Functions/FunctionsVisitParam.h b/dbms/Functions/FunctionsVisitParam.h similarity index 100% rename from dbms/src/Functions/FunctionsVisitParam.h rename to dbms/Functions/FunctionsVisitParam.h diff --git a/dbms/src/Functions/GatherUtils/Algorithms.h b/dbms/Functions/GatherUtils/Algorithms.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Algorithms.h rename to dbms/Functions/GatherUtils/Algorithms.h diff --git a/dbms/src/Functions/GatherUtils/ArraySinkVisitor.h b/dbms/Functions/GatherUtils/ArraySinkVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ArraySinkVisitor.h rename to dbms/Functions/GatherUtils/ArraySinkVisitor.h diff --git a/dbms/src/Functions/GatherUtils/ArraySourceVisitor.h b/dbms/Functions/GatherUtils/ArraySourceVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ArraySourceVisitor.h rename to dbms/Functions/GatherUtils/ArraySourceVisitor.h diff --git a/dbms/src/Functions/GatherUtils/CMakeLists.txt b/dbms/Functions/GatherUtils/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/GatherUtils/CMakeLists.txt rename to dbms/Functions/GatherUtils/CMakeLists.txt diff --git a/dbms/src/Functions/GatherUtils/GatherUtils.h b/dbms/Functions/GatherUtils/GatherUtils.h similarity index 100% rename from dbms/src/Functions/GatherUtils/GatherUtils.h rename to dbms/Functions/GatherUtils/GatherUtils.h diff --git a/dbms/src/Functions/GatherUtils/IArraySink.h b/dbms/Functions/GatherUtils/IArraySink.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IArraySink.h rename to dbms/Functions/GatherUtils/IArraySink.h diff --git a/dbms/src/Functions/GatherUtils/IArraySource.h b/dbms/Functions/GatherUtils/IArraySource.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IArraySource.h rename to dbms/Functions/GatherUtils/IArraySource.h diff --git a/dbms/src/Functions/GatherUtils/IValueSource.h b/dbms/Functions/GatherUtils/IValueSource.h similarity index 100% rename from dbms/src/Functions/GatherUtils/IValueSource.h rename to dbms/Functions/GatherUtils/IValueSource.h diff --git a/dbms/src/Functions/GatherUtils/Selectors.h b/dbms/Functions/GatherUtils/Selectors.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Selectors.h rename to dbms/Functions/GatherUtils/Selectors.h diff --git a/dbms/src/Functions/GatherUtils/Sinks.h b/dbms/Functions/GatherUtils/Sinks.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Sinks.h rename to dbms/Functions/GatherUtils/Sinks.h diff --git a/dbms/src/Functions/GatherUtils/Slices.h b/dbms/Functions/GatherUtils/Slices.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Slices.h rename to dbms/Functions/GatherUtils/Slices.h diff --git a/dbms/src/Functions/GatherUtils/Sources.h b/dbms/Functions/GatherUtils/Sources.h similarity index 100% rename from dbms/src/Functions/GatherUtils/Sources.h rename to dbms/Functions/GatherUtils/Sources.h diff --git a/dbms/src/Functions/GatherUtils/ValueSourceVisitor.h b/dbms/Functions/GatherUtils/ValueSourceVisitor.h similarity index 100% rename from dbms/src/Functions/GatherUtils/ValueSourceVisitor.h rename to dbms/Functions/GatherUtils/ValueSourceVisitor.h diff --git a/dbms/src/Functions/GatherUtils/concat.cpp b/dbms/Functions/GatherUtils/concat.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/concat.cpp rename to dbms/Functions/GatherUtils/concat.cpp diff --git a/dbms/src/Functions/GatherUtils/createArraySink.cpp b/dbms/Functions/GatherUtils/createArraySink.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createArraySink.cpp rename to dbms/Functions/GatherUtils/createArraySink.cpp diff --git a/dbms/src/Functions/GatherUtils/createArraySource.cpp b/dbms/Functions/GatherUtils/createArraySource.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createArraySource.cpp rename to dbms/Functions/GatherUtils/createArraySource.cpp diff --git a/dbms/src/Functions/GatherUtils/createValueSource.cpp b/dbms/Functions/GatherUtils/createValueSource.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/createValueSource.cpp rename to dbms/Functions/GatherUtils/createValueSource.cpp diff --git a/dbms/src/Functions/GatherUtils/has.cpp b/dbms/Functions/GatherUtils/has.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/has.cpp rename to dbms/Functions/GatherUtils/has.cpp diff --git a/dbms/src/Functions/GatherUtils/push.cpp b/dbms/Functions/GatherUtils/push.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/push.cpp rename to dbms/Functions/GatherUtils/push.cpp diff --git a/dbms/src/Functions/GatherUtils/resizeConstantSize.cpp b/dbms/Functions/GatherUtils/resizeConstantSize.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/resizeConstantSize.cpp rename to dbms/Functions/GatherUtils/resizeConstantSize.cpp diff --git a/dbms/src/Functions/GatherUtils/resizeDynamicSize.cpp b/dbms/Functions/GatherUtils/resizeDynamicSize.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/resizeDynamicSize.cpp rename to dbms/Functions/GatherUtils/resizeDynamicSize.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp b/dbms/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp rename to dbms/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp b/dbms/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp rename to dbms/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp b/dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp rename to dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp b/dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp rename to dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp b/dbms/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp rename to dbms/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp diff --git a/dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp b/dbms/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp rename to dbms/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp diff --git a/dbms/src/Functions/GeoHash.cpp b/dbms/Functions/GeoHash.cpp similarity index 100% rename from dbms/src/Functions/GeoHash.cpp rename to dbms/Functions/GeoHash.cpp diff --git a/dbms/src/Functions/GeoHash.h b/dbms/Functions/GeoHash.h similarity index 100% rename from dbms/src/Functions/GeoHash.h rename to dbms/Functions/GeoHash.h diff --git a/dbms/src/Functions/HasTokenImpl.h b/dbms/Functions/HasTokenImpl.h similarity index 100% rename from dbms/src/Functions/HasTokenImpl.h rename to dbms/Functions/HasTokenImpl.h diff --git a/dbms/src/Functions/IFunction.cpp b/dbms/Functions/IFunction.cpp similarity index 100% rename from dbms/src/Functions/IFunction.cpp rename to dbms/Functions/IFunction.cpp diff --git a/dbms/src/Functions/IFunction.h b/dbms/Functions/IFunction.h similarity index 100% rename from dbms/src/Functions/IFunction.h rename to dbms/Functions/IFunction.h diff --git a/dbms/src/Functions/IFunctionAdaptors.h b/dbms/Functions/IFunctionAdaptors.h similarity index 100% rename from dbms/src/Functions/IFunctionAdaptors.h rename to dbms/Functions/IFunctionAdaptors.h diff --git a/dbms/src/Functions/IFunctionImpl.h b/dbms/Functions/IFunctionImpl.h similarity index 100% rename from dbms/src/Functions/IFunctionImpl.h rename to dbms/Functions/IFunctionImpl.h diff --git a/dbms/src/Functions/LowerUpperImpl.h b/dbms/Functions/LowerUpperImpl.h similarity index 100% rename from dbms/src/Functions/LowerUpperImpl.h rename to dbms/Functions/LowerUpperImpl.h diff --git a/dbms/src/Functions/LowerUpperUTF8Impl.h b/dbms/Functions/LowerUpperUTF8Impl.h similarity index 100% rename from dbms/src/Functions/LowerUpperUTF8Impl.h rename to dbms/Functions/LowerUpperUTF8Impl.h diff --git a/dbms/src/Functions/MultiSearchAllPositionsImpl.h b/dbms/Functions/MultiSearchAllPositionsImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchAllPositionsImpl.h rename to dbms/Functions/MultiSearchAllPositionsImpl.h diff --git a/dbms/src/Functions/MultiSearchFirstIndexImpl.h b/dbms/Functions/MultiSearchFirstIndexImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchFirstIndexImpl.h rename to dbms/Functions/MultiSearchFirstIndexImpl.h diff --git a/dbms/src/Functions/MultiSearchFirstPositionImpl.h b/dbms/Functions/MultiSearchFirstPositionImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchFirstPositionImpl.h rename to dbms/Functions/MultiSearchFirstPositionImpl.h diff --git a/dbms/src/Functions/MultiSearchImpl.h b/dbms/Functions/MultiSearchImpl.h similarity index 100% rename from dbms/src/Functions/MultiSearchImpl.h rename to dbms/Functions/MultiSearchImpl.h diff --git a/dbms/src/Functions/PolygonUtils.h b/dbms/Functions/PolygonUtils.h similarity index 100% rename from dbms/src/Functions/PolygonUtils.h rename to dbms/Functions/PolygonUtils.h diff --git a/dbms/src/Functions/PositionImpl.h b/dbms/Functions/PositionImpl.h similarity index 100% rename from dbms/src/Functions/PositionImpl.h rename to dbms/Functions/PositionImpl.h diff --git a/dbms/src/Functions/RapidJSONParser.h b/dbms/Functions/RapidJSONParser.h similarity index 100% rename from dbms/src/Functions/RapidJSONParser.h rename to dbms/Functions/RapidJSONParser.h diff --git a/dbms/src/Functions/Regexps.h b/dbms/Functions/Regexps.h similarity index 100% rename from dbms/src/Functions/Regexps.h rename to dbms/Functions/Regexps.h diff --git a/dbms/src/Functions/SimdJSONParser.h b/dbms/Functions/SimdJSONParser.h similarity index 100% rename from dbms/src/Functions/SimdJSONParser.h rename to dbms/Functions/SimdJSONParser.h diff --git a/dbms/src/Functions/URL/CMakeLists.txt b/dbms/Functions/URL/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/URL/CMakeLists.txt rename to dbms/Functions/URL/CMakeLists.txt diff --git a/dbms/src/Functions/URL/FunctionsURL.h b/dbms/Functions/URL/FunctionsURL.h similarity index 100% rename from dbms/src/Functions/URL/FunctionsURL.h rename to dbms/Functions/URL/FunctionsURL.h diff --git a/dbms/src/Functions/URL/URLHierarchy.cpp b/dbms/Functions/URL/URLHierarchy.cpp similarity index 100% rename from dbms/src/Functions/URL/URLHierarchy.cpp rename to dbms/Functions/URL/URLHierarchy.cpp diff --git a/dbms/src/Functions/URL/URLPathHierarchy.cpp b/dbms/Functions/URL/URLPathHierarchy.cpp similarity index 100% rename from dbms/src/Functions/URL/URLPathHierarchy.cpp rename to dbms/Functions/URL/URLPathHierarchy.cpp diff --git a/dbms/src/Functions/URL/basename.cpp b/dbms/Functions/URL/basename.cpp similarity index 100% rename from dbms/src/Functions/URL/basename.cpp rename to dbms/Functions/URL/basename.cpp diff --git a/dbms/src/Functions/URL/config_functions_url.h.in b/dbms/Functions/URL/config_functions_url.h.in similarity index 100% rename from dbms/src/Functions/URL/config_functions_url.h.in rename to dbms/Functions/URL/config_functions_url.h.in diff --git a/dbms/src/Functions/URL/cutFragment.cpp b/dbms/Functions/URL/cutFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/cutFragment.cpp rename to dbms/Functions/URL/cutFragment.cpp diff --git a/dbms/src/Functions/URL/cutQueryString.cpp b/dbms/Functions/URL/cutQueryString.cpp similarity index 100% rename from dbms/src/Functions/URL/cutQueryString.cpp rename to dbms/Functions/URL/cutQueryString.cpp diff --git a/dbms/src/Functions/URL/cutQueryStringAndFragment.cpp b/dbms/Functions/URL/cutQueryStringAndFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/cutQueryStringAndFragment.cpp rename to dbms/Functions/URL/cutQueryStringAndFragment.cpp diff --git a/dbms/src/Functions/URL/cutToFirstSignificantSubdomain.cpp b/dbms/Functions/URL/cutToFirstSignificantSubdomain.cpp similarity index 100% rename from dbms/src/Functions/URL/cutToFirstSignificantSubdomain.cpp rename to dbms/Functions/URL/cutToFirstSignificantSubdomain.cpp diff --git a/dbms/src/Functions/URL/cutURLParameter.cpp b/dbms/Functions/URL/cutURLParameter.cpp similarity index 100% rename from dbms/src/Functions/URL/cutURLParameter.cpp rename to dbms/Functions/URL/cutURLParameter.cpp diff --git a/dbms/src/Functions/URL/cutWWW.cpp b/dbms/Functions/URL/cutWWW.cpp similarity index 100% rename from dbms/src/Functions/URL/cutWWW.cpp rename to dbms/Functions/URL/cutWWW.cpp diff --git a/dbms/src/Functions/URL/decodeURLComponent.cpp b/dbms/Functions/URL/decodeURLComponent.cpp similarity index 100% rename from dbms/src/Functions/URL/decodeURLComponent.cpp rename to dbms/Functions/URL/decodeURLComponent.cpp diff --git a/dbms/src/Functions/URL/domain.cpp b/dbms/Functions/URL/domain.cpp similarity index 100% rename from dbms/src/Functions/URL/domain.cpp rename to dbms/Functions/URL/domain.cpp diff --git a/dbms/src/Functions/URL/domain.h b/dbms/Functions/URL/domain.h similarity index 100% rename from dbms/src/Functions/URL/domain.h rename to dbms/Functions/URL/domain.h diff --git a/dbms/src/Functions/URL/domainWithoutWWW.cpp b/dbms/Functions/URL/domainWithoutWWW.cpp similarity index 100% rename from dbms/src/Functions/URL/domainWithoutWWW.cpp rename to dbms/Functions/URL/domainWithoutWWW.cpp diff --git a/dbms/src/Functions/URL/extractURLParameter.cpp b/dbms/Functions/URL/extractURLParameter.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameter.cpp rename to dbms/Functions/URL/extractURLParameter.cpp diff --git a/dbms/src/Functions/URL/extractURLParameterNames.cpp b/dbms/Functions/URL/extractURLParameterNames.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameterNames.cpp rename to dbms/Functions/URL/extractURLParameterNames.cpp diff --git a/dbms/src/Functions/URL/extractURLParameters.cpp b/dbms/Functions/URL/extractURLParameters.cpp similarity index 100% rename from dbms/src/Functions/URL/extractURLParameters.cpp rename to dbms/Functions/URL/extractURLParameters.cpp diff --git a/dbms/src/Functions/URL/firstSignificantSubdomain.cpp b/dbms/Functions/URL/firstSignificantSubdomain.cpp similarity index 100% rename from dbms/src/Functions/URL/firstSignificantSubdomain.cpp rename to dbms/Functions/URL/firstSignificantSubdomain.cpp diff --git a/dbms/src/Functions/URL/firstSignificantSubdomain.h b/dbms/Functions/URL/firstSignificantSubdomain.h similarity index 100% rename from dbms/src/Functions/URL/firstSignificantSubdomain.h rename to dbms/Functions/URL/firstSignificantSubdomain.h diff --git a/dbms/src/Functions/URL/fragment.cpp b/dbms/Functions/URL/fragment.cpp similarity index 100% rename from dbms/src/Functions/URL/fragment.cpp rename to dbms/Functions/URL/fragment.cpp diff --git a/dbms/src/Functions/URL/fragment.h b/dbms/Functions/URL/fragment.h similarity index 100% rename from dbms/src/Functions/URL/fragment.h rename to dbms/Functions/URL/fragment.h diff --git a/dbms/src/Functions/URL/path.cpp b/dbms/Functions/URL/path.cpp similarity index 100% rename from dbms/src/Functions/URL/path.cpp rename to dbms/Functions/URL/path.cpp diff --git a/dbms/src/Functions/URL/pathFull.cpp b/dbms/Functions/URL/pathFull.cpp similarity index 100% rename from dbms/src/Functions/URL/pathFull.cpp rename to dbms/Functions/URL/pathFull.cpp diff --git a/dbms/src/Functions/URL/protocol.cpp b/dbms/Functions/URL/protocol.cpp similarity index 100% rename from dbms/src/Functions/URL/protocol.cpp rename to dbms/Functions/URL/protocol.cpp diff --git a/dbms/src/Functions/URL/protocol.h b/dbms/Functions/URL/protocol.h similarity index 100% rename from dbms/src/Functions/URL/protocol.h rename to dbms/Functions/URL/protocol.h diff --git a/dbms/src/Functions/URL/queryString.cpp b/dbms/Functions/URL/queryString.cpp similarity index 100% rename from dbms/src/Functions/URL/queryString.cpp rename to dbms/Functions/URL/queryString.cpp diff --git a/dbms/src/Functions/URL/queryString.h b/dbms/Functions/URL/queryString.h similarity index 100% rename from dbms/src/Functions/URL/queryString.h rename to dbms/Functions/URL/queryString.h diff --git a/dbms/src/Functions/URL/queryStringAndFragment.cpp b/dbms/Functions/URL/queryStringAndFragment.cpp similarity index 100% rename from dbms/src/Functions/URL/queryStringAndFragment.cpp rename to dbms/Functions/URL/queryStringAndFragment.cpp diff --git a/dbms/src/Functions/URL/queryStringAndFragment.h b/dbms/Functions/URL/queryStringAndFragment.h similarity index 100% rename from dbms/src/Functions/URL/queryStringAndFragment.h rename to dbms/Functions/URL/queryStringAndFragment.h diff --git a/dbms/src/Functions/URL/registerFunctionsURL.cpp b/dbms/Functions/URL/registerFunctionsURL.cpp similarity index 100% rename from dbms/src/Functions/URL/registerFunctionsURL.cpp rename to dbms/Functions/URL/registerFunctionsURL.cpp diff --git a/dbms/src/Functions/URL/tldLookup.generated.cpp b/dbms/Functions/URL/tldLookup.generated.cpp similarity index 100% rename from dbms/src/Functions/URL/tldLookup.generated.cpp rename to dbms/Functions/URL/tldLookup.generated.cpp diff --git a/dbms/src/Functions/URL/tldLookup.gperf b/dbms/Functions/URL/tldLookup.gperf similarity index 100% rename from dbms/src/Functions/URL/tldLookup.gperf rename to dbms/Functions/URL/tldLookup.gperf diff --git a/dbms/src/Functions/URL/tldLookup.h b/dbms/Functions/URL/tldLookup.h similarity index 100% rename from dbms/src/Functions/URL/tldLookup.h rename to dbms/Functions/URL/tldLookup.h diff --git a/dbms/src/Functions/URL/tldLookup.sh b/dbms/Functions/URL/tldLookup.sh similarity index 100% rename from dbms/src/Functions/URL/tldLookup.sh rename to dbms/Functions/URL/tldLookup.sh diff --git a/dbms/src/Functions/URL/topLevelDomain.cpp b/dbms/Functions/URL/topLevelDomain.cpp similarity index 100% rename from dbms/src/Functions/URL/topLevelDomain.cpp rename to dbms/Functions/URL/topLevelDomain.cpp diff --git a/dbms/src/Functions/abs.cpp b/dbms/Functions/abs.cpp similarity index 100% rename from dbms/src/Functions/abs.cpp rename to dbms/Functions/abs.cpp diff --git a/dbms/src/Functions/acos.cpp b/dbms/Functions/acos.cpp similarity index 100% rename from dbms/src/Functions/acos.cpp rename to dbms/Functions/acos.cpp diff --git a/dbms/src/Functions/addDays.cpp b/dbms/Functions/addDays.cpp similarity index 100% rename from dbms/src/Functions/addDays.cpp rename to dbms/Functions/addDays.cpp diff --git a/dbms/src/Functions/addHours.cpp b/dbms/Functions/addHours.cpp similarity index 100% rename from dbms/src/Functions/addHours.cpp rename to dbms/Functions/addHours.cpp diff --git a/dbms/src/Functions/addMinutes.cpp b/dbms/Functions/addMinutes.cpp similarity index 100% rename from dbms/src/Functions/addMinutes.cpp rename to dbms/Functions/addMinutes.cpp diff --git a/dbms/src/Functions/addMonths.cpp b/dbms/Functions/addMonths.cpp similarity index 100% rename from dbms/src/Functions/addMonths.cpp rename to dbms/Functions/addMonths.cpp diff --git a/dbms/src/Functions/addQuarters.cpp b/dbms/Functions/addQuarters.cpp similarity index 100% rename from dbms/src/Functions/addQuarters.cpp rename to dbms/Functions/addQuarters.cpp diff --git a/dbms/src/Functions/addSeconds.cpp b/dbms/Functions/addSeconds.cpp similarity index 100% rename from dbms/src/Functions/addSeconds.cpp rename to dbms/Functions/addSeconds.cpp diff --git a/dbms/src/Functions/addWeeks.cpp b/dbms/Functions/addWeeks.cpp similarity index 100% rename from dbms/src/Functions/addWeeks.cpp rename to dbms/Functions/addWeeks.cpp diff --git a/dbms/src/Functions/addYears.cpp b/dbms/Functions/addYears.cpp similarity index 100% rename from dbms/src/Functions/addYears.cpp rename to dbms/Functions/addYears.cpp diff --git a/dbms/src/Functions/addressToLine.cpp b/dbms/Functions/addressToLine.cpp similarity index 100% rename from dbms/src/Functions/addressToLine.cpp rename to dbms/Functions/addressToLine.cpp diff --git a/dbms/src/Functions/addressToSymbol.cpp b/dbms/Functions/addressToSymbol.cpp similarity index 100% rename from dbms/src/Functions/addressToSymbol.cpp rename to dbms/Functions/addressToSymbol.cpp diff --git a/dbms/src/Functions/appendTrailingCharIfAbsent.cpp b/dbms/Functions/appendTrailingCharIfAbsent.cpp similarity index 100% rename from dbms/src/Functions/appendTrailingCharIfAbsent.cpp rename to dbms/Functions/appendTrailingCharIfAbsent.cpp diff --git a/dbms/src/Functions/array/CMakeLists.txt b/dbms/Functions/array/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/array/CMakeLists.txt rename to dbms/Functions/array/CMakeLists.txt diff --git a/dbms/src/Functions/array/FunctionArrayMapped.h b/dbms/Functions/array/FunctionArrayMapped.h similarity index 100% rename from dbms/src/Functions/array/FunctionArrayMapped.h rename to dbms/Functions/array/FunctionArrayMapped.h diff --git a/dbms/src/Functions/array/array.cpp b/dbms/Functions/array/array.cpp similarity index 100% rename from dbms/src/Functions/array/array.cpp rename to dbms/Functions/array/array.cpp diff --git a/dbms/src/Functions/array/arrayAUC.cpp b/dbms/Functions/array/arrayAUC.cpp similarity index 100% rename from dbms/src/Functions/array/arrayAUC.cpp rename to dbms/Functions/array/arrayAUC.cpp diff --git a/dbms/src/Functions/array/arrayAll.cpp b/dbms/Functions/array/arrayAll.cpp similarity index 100% rename from dbms/src/Functions/array/arrayAll.cpp rename to dbms/Functions/array/arrayAll.cpp diff --git a/dbms/src/Functions/array/arrayCompact.cpp b/dbms/Functions/array/arrayCompact.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCompact.cpp rename to dbms/Functions/array/arrayCompact.cpp diff --git a/dbms/src/Functions/array/arrayConcat.cpp b/dbms/Functions/array/arrayConcat.cpp similarity index 100% rename from dbms/src/Functions/array/arrayConcat.cpp rename to dbms/Functions/array/arrayConcat.cpp diff --git a/dbms/src/Functions/array/arrayCount.cpp b/dbms/Functions/array/arrayCount.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCount.cpp rename to dbms/Functions/array/arrayCount.cpp diff --git a/dbms/src/Functions/array/arrayCumSum.cpp b/dbms/Functions/array/arrayCumSum.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCumSum.cpp rename to dbms/Functions/array/arrayCumSum.cpp diff --git a/dbms/src/Functions/array/arrayCumSumNonNegative.cpp b/dbms/Functions/array/arrayCumSumNonNegative.cpp similarity index 100% rename from dbms/src/Functions/array/arrayCumSumNonNegative.cpp rename to dbms/Functions/array/arrayCumSumNonNegative.cpp diff --git a/dbms/src/Functions/array/arrayDifference.cpp b/dbms/Functions/array/arrayDifference.cpp similarity index 100% rename from dbms/src/Functions/array/arrayDifference.cpp rename to dbms/Functions/array/arrayDifference.cpp diff --git a/dbms/src/Functions/array/arrayDistinct.cpp b/dbms/Functions/array/arrayDistinct.cpp similarity index 100% rename from dbms/src/Functions/array/arrayDistinct.cpp rename to dbms/Functions/array/arrayDistinct.cpp diff --git a/dbms/src/Functions/array/arrayElement.cpp b/dbms/Functions/array/arrayElement.cpp similarity index 100% rename from dbms/src/Functions/array/arrayElement.cpp rename to dbms/Functions/array/arrayElement.cpp diff --git a/dbms/src/Functions/array/arrayEnumerate.cpp b/dbms/Functions/array/arrayEnumerate.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerate.cpp rename to dbms/Functions/array/arrayEnumerate.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateDense.cpp b/dbms/Functions/array/arrayEnumerateDense.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateDense.cpp rename to dbms/Functions/array/arrayEnumerateDense.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateDenseRanked.cpp b/dbms/Functions/array/arrayEnumerateDenseRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateDenseRanked.cpp rename to dbms/Functions/array/arrayEnumerateDenseRanked.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateExtended.h b/dbms/Functions/array/arrayEnumerateExtended.h similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateExtended.h rename to dbms/Functions/array/arrayEnumerateExtended.h diff --git a/dbms/src/Functions/array/arrayEnumerateRanked.cpp b/dbms/Functions/array/arrayEnumerateRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateRanked.cpp rename to dbms/Functions/array/arrayEnumerateRanked.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateRanked.h b/dbms/Functions/array/arrayEnumerateRanked.h similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateRanked.h rename to dbms/Functions/array/arrayEnumerateRanked.h diff --git a/dbms/src/Functions/array/arrayEnumerateUniq.cpp b/dbms/Functions/array/arrayEnumerateUniq.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateUniq.cpp rename to dbms/Functions/array/arrayEnumerateUniq.cpp diff --git a/dbms/src/Functions/array/arrayEnumerateUniqRanked.cpp b/dbms/Functions/array/arrayEnumerateUniqRanked.cpp similarity index 100% rename from dbms/src/Functions/array/arrayEnumerateUniqRanked.cpp rename to dbms/Functions/array/arrayEnumerateUniqRanked.cpp diff --git a/dbms/src/Functions/array/arrayExists.cpp b/dbms/Functions/array/arrayExists.cpp similarity index 100% rename from dbms/src/Functions/array/arrayExists.cpp rename to dbms/Functions/array/arrayExists.cpp diff --git a/dbms/src/Functions/array/arrayFill.cpp b/dbms/Functions/array/arrayFill.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFill.cpp rename to dbms/Functions/array/arrayFill.cpp diff --git a/dbms/src/Functions/array/arrayFilter.cpp b/dbms/Functions/array/arrayFilter.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFilter.cpp rename to dbms/Functions/array/arrayFilter.cpp diff --git a/dbms/src/Functions/array/arrayFirst.cpp b/dbms/Functions/array/arrayFirst.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFirst.cpp rename to dbms/Functions/array/arrayFirst.cpp diff --git a/dbms/src/Functions/array/arrayFirstIndex.cpp b/dbms/Functions/array/arrayFirstIndex.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFirstIndex.cpp rename to dbms/Functions/array/arrayFirstIndex.cpp diff --git a/dbms/src/Functions/array/arrayFlatten.cpp b/dbms/Functions/array/arrayFlatten.cpp similarity index 100% rename from dbms/src/Functions/array/arrayFlatten.cpp rename to dbms/Functions/array/arrayFlatten.cpp diff --git a/dbms/src/Functions/array/arrayIndex.h b/dbms/Functions/array/arrayIndex.h similarity index 100% rename from dbms/src/Functions/array/arrayIndex.h rename to dbms/Functions/array/arrayIndex.h diff --git a/dbms/src/Functions/array/arrayIntersect.cpp b/dbms/Functions/array/arrayIntersect.cpp similarity index 100% rename from dbms/src/Functions/array/arrayIntersect.cpp rename to dbms/Functions/array/arrayIntersect.cpp diff --git a/dbms/src/Functions/array/arrayJoin.cpp b/dbms/Functions/array/arrayJoin.cpp similarity index 100% rename from dbms/src/Functions/array/arrayJoin.cpp rename to dbms/Functions/array/arrayJoin.cpp diff --git a/dbms/src/Functions/array/arrayMap.cpp b/dbms/Functions/array/arrayMap.cpp similarity index 100% rename from dbms/src/Functions/array/arrayMap.cpp rename to dbms/Functions/array/arrayMap.cpp diff --git a/dbms/src/Functions/array/arrayPop.h b/dbms/Functions/array/arrayPop.h similarity index 100% rename from dbms/src/Functions/array/arrayPop.h rename to dbms/Functions/array/arrayPop.h diff --git a/dbms/src/Functions/array/arrayPopBack.cpp b/dbms/Functions/array/arrayPopBack.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPopBack.cpp rename to dbms/Functions/array/arrayPopBack.cpp diff --git a/dbms/src/Functions/array/arrayPopFront.cpp b/dbms/Functions/array/arrayPopFront.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPopFront.cpp rename to dbms/Functions/array/arrayPopFront.cpp diff --git a/dbms/src/Functions/array/arrayPush.h b/dbms/Functions/array/arrayPush.h similarity index 100% rename from dbms/src/Functions/array/arrayPush.h rename to dbms/Functions/array/arrayPush.h diff --git a/dbms/src/Functions/array/arrayPushBack.cpp b/dbms/Functions/array/arrayPushBack.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPushBack.cpp rename to dbms/Functions/array/arrayPushBack.cpp diff --git a/dbms/src/Functions/array/arrayPushFront.cpp b/dbms/Functions/array/arrayPushFront.cpp similarity index 100% rename from dbms/src/Functions/array/arrayPushFront.cpp rename to dbms/Functions/array/arrayPushFront.cpp diff --git a/dbms/src/Functions/array/arrayReduce.cpp b/dbms/Functions/array/arrayReduce.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReduce.cpp rename to dbms/Functions/array/arrayReduce.cpp diff --git a/dbms/src/Functions/array/arrayReduceInRanges.cpp b/dbms/Functions/array/arrayReduceInRanges.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReduceInRanges.cpp rename to dbms/Functions/array/arrayReduceInRanges.cpp diff --git a/dbms/src/Functions/array/arrayResize.cpp b/dbms/Functions/array/arrayResize.cpp similarity index 100% rename from dbms/src/Functions/array/arrayResize.cpp rename to dbms/Functions/array/arrayResize.cpp diff --git a/dbms/src/Functions/array/arrayReverse.cpp b/dbms/Functions/array/arrayReverse.cpp similarity index 100% rename from dbms/src/Functions/array/arrayReverse.cpp rename to dbms/Functions/array/arrayReverse.cpp diff --git a/dbms/src/Functions/array/arrayScalarProduct.h b/dbms/Functions/array/arrayScalarProduct.h similarity index 100% rename from dbms/src/Functions/array/arrayScalarProduct.h rename to dbms/Functions/array/arrayScalarProduct.h diff --git a/dbms/src/Functions/array/arraySlice.cpp b/dbms/Functions/array/arraySlice.cpp similarity index 100% rename from dbms/src/Functions/array/arraySlice.cpp rename to dbms/Functions/array/arraySlice.cpp diff --git a/dbms/src/Functions/array/arraySort.cpp b/dbms/Functions/array/arraySort.cpp similarity index 100% rename from dbms/src/Functions/array/arraySort.cpp rename to dbms/Functions/array/arraySort.cpp diff --git a/dbms/src/Functions/array/arraySplit.cpp b/dbms/Functions/array/arraySplit.cpp similarity index 100% rename from dbms/src/Functions/array/arraySplit.cpp rename to dbms/Functions/array/arraySplit.cpp diff --git a/dbms/src/Functions/array/arraySum.cpp b/dbms/Functions/array/arraySum.cpp similarity index 100% rename from dbms/src/Functions/array/arraySum.cpp rename to dbms/Functions/array/arraySum.cpp diff --git a/dbms/src/Functions/array/arrayUniq.cpp b/dbms/Functions/array/arrayUniq.cpp similarity index 100% rename from dbms/src/Functions/array/arrayUniq.cpp rename to dbms/Functions/array/arrayUniq.cpp diff --git a/dbms/src/Functions/array/arrayWithConstant.cpp b/dbms/Functions/array/arrayWithConstant.cpp similarity index 100% rename from dbms/src/Functions/array/arrayWithConstant.cpp rename to dbms/Functions/array/arrayWithConstant.cpp diff --git a/dbms/src/Functions/array/arrayZip.cpp b/dbms/Functions/array/arrayZip.cpp similarity index 100% rename from dbms/src/Functions/array/arrayZip.cpp rename to dbms/Functions/array/arrayZip.cpp diff --git a/dbms/src/Functions/array/countEqual.cpp b/dbms/Functions/array/countEqual.cpp similarity index 100% rename from dbms/src/Functions/array/countEqual.cpp rename to dbms/Functions/array/countEqual.cpp diff --git a/dbms/src/Functions/array/emptyArray.cpp b/dbms/Functions/array/emptyArray.cpp similarity index 100% rename from dbms/src/Functions/array/emptyArray.cpp rename to dbms/Functions/array/emptyArray.cpp diff --git a/dbms/src/Functions/array/emptyArrayToSingle.cpp b/dbms/Functions/array/emptyArrayToSingle.cpp similarity index 100% rename from dbms/src/Functions/array/emptyArrayToSingle.cpp rename to dbms/Functions/array/emptyArrayToSingle.cpp diff --git a/dbms/src/Functions/array/has.cpp b/dbms/Functions/array/has.cpp similarity index 100% rename from dbms/src/Functions/array/has.cpp rename to dbms/Functions/array/has.cpp diff --git a/dbms/src/Functions/array/hasAll.cpp b/dbms/Functions/array/hasAll.cpp similarity index 100% rename from dbms/src/Functions/array/hasAll.cpp rename to dbms/Functions/array/hasAll.cpp diff --git a/dbms/src/Functions/array/hasAllAny.h b/dbms/Functions/array/hasAllAny.h similarity index 100% rename from dbms/src/Functions/array/hasAllAny.h rename to dbms/Functions/array/hasAllAny.h diff --git a/dbms/src/Functions/array/hasAny.cpp b/dbms/Functions/array/hasAny.cpp similarity index 100% rename from dbms/src/Functions/array/hasAny.cpp rename to dbms/Functions/array/hasAny.cpp diff --git a/dbms/src/Functions/array/indexOf.cpp b/dbms/Functions/array/indexOf.cpp similarity index 100% rename from dbms/src/Functions/array/indexOf.cpp rename to dbms/Functions/array/indexOf.cpp diff --git a/dbms/src/Functions/array/length.cpp b/dbms/Functions/array/length.cpp similarity index 100% rename from dbms/src/Functions/array/length.cpp rename to dbms/Functions/array/length.cpp diff --git a/dbms/src/Functions/array/range.cpp b/dbms/Functions/array/range.cpp similarity index 100% rename from dbms/src/Functions/array/range.cpp rename to dbms/Functions/array/range.cpp diff --git a/dbms/src/Functions/array/registerFunctionsArray.cpp b/dbms/Functions/array/registerFunctionsArray.cpp similarity index 100% rename from dbms/src/Functions/array/registerFunctionsArray.cpp rename to dbms/Functions/array/registerFunctionsArray.cpp diff --git a/dbms/src/Functions/asin.cpp b/dbms/Functions/asin.cpp similarity index 100% rename from dbms/src/Functions/asin.cpp rename to dbms/Functions/asin.cpp diff --git a/dbms/src/Functions/assumeNotNull.cpp b/dbms/Functions/assumeNotNull.cpp similarity index 100% rename from dbms/src/Functions/assumeNotNull.cpp rename to dbms/Functions/assumeNotNull.cpp diff --git a/dbms/src/Functions/atan.cpp b/dbms/Functions/atan.cpp similarity index 100% rename from dbms/src/Functions/atan.cpp rename to dbms/Functions/atan.cpp diff --git a/dbms/src/Functions/bar.cpp b/dbms/Functions/bar.cpp similarity index 100% rename from dbms/src/Functions/bar.cpp rename to dbms/Functions/bar.cpp diff --git a/dbms/src/Functions/base64Decode.cpp b/dbms/Functions/base64Decode.cpp similarity index 100% rename from dbms/src/Functions/base64Decode.cpp rename to dbms/Functions/base64Decode.cpp diff --git a/dbms/src/Functions/base64Encode.cpp b/dbms/Functions/base64Encode.cpp similarity index 100% rename from dbms/src/Functions/base64Encode.cpp rename to dbms/Functions/base64Encode.cpp diff --git a/dbms/src/Functions/bitAnd.cpp b/dbms/Functions/bitAnd.cpp similarity index 100% rename from dbms/src/Functions/bitAnd.cpp rename to dbms/Functions/bitAnd.cpp diff --git a/dbms/src/Functions/bitBoolMaskAnd.cpp b/dbms/Functions/bitBoolMaskAnd.cpp similarity index 96% rename from dbms/src/Functions/bitBoolMaskAnd.cpp rename to dbms/Functions/bitBoolMaskAnd.cpp index c37a1ebc1d7..09d2122abde 100644 --- a/dbms/src/Functions/bitBoolMaskAnd.cpp +++ b/dbms/Functions/bitBoolMaskAnd.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). /// This function provides "AND" operation for BoolMasks. /// Returns: "can be true" = A."can be true" AND B."can be true" /// "can be false" = A."can be false" OR B."can be false" diff --git a/dbms/src/Functions/bitBoolMaskOr.cpp b/dbms/Functions/bitBoolMaskOr.cpp similarity index 96% rename from dbms/src/Functions/bitBoolMaskOr.cpp rename to dbms/Functions/bitBoolMaskOr.cpp index ec3d4e266f1..02882c0bb3f 100644 --- a/dbms/src/Functions/bitBoolMaskOr.cpp +++ b/dbms/Functions/bitBoolMaskOr.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). /// This function provides "OR" operation for BoolMasks. /// Returns: "can be true" = A."can be true" OR B."can be true" /// "can be false" = A."can be false" AND B."can be false" diff --git a/dbms/src/Functions/bitCount.cpp b/dbms/Functions/bitCount.cpp similarity index 100% rename from dbms/src/Functions/bitCount.cpp rename to dbms/Functions/bitCount.cpp diff --git a/dbms/src/Functions/bitNot.cpp b/dbms/Functions/bitNot.cpp similarity index 100% rename from dbms/src/Functions/bitNot.cpp rename to dbms/Functions/bitNot.cpp diff --git a/dbms/src/Functions/bitOr.cpp b/dbms/Functions/bitOr.cpp similarity index 100% rename from dbms/src/Functions/bitOr.cpp rename to dbms/Functions/bitOr.cpp diff --git a/dbms/src/Functions/bitRotateLeft.cpp b/dbms/Functions/bitRotateLeft.cpp similarity index 100% rename from dbms/src/Functions/bitRotateLeft.cpp rename to dbms/Functions/bitRotateLeft.cpp diff --git a/dbms/src/Functions/bitRotateRight.cpp b/dbms/Functions/bitRotateRight.cpp similarity index 100% rename from dbms/src/Functions/bitRotateRight.cpp rename to dbms/Functions/bitRotateRight.cpp diff --git a/dbms/src/Functions/bitShiftLeft.cpp b/dbms/Functions/bitShiftLeft.cpp similarity index 100% rename from dbms/src/Functions/bitShiftLeft.cpp rename to dbms/Functions/bitShiftLeft.cpp diff --git a/dbms/src/Functions/bitShiftRight.cpp b/dbms/Functions/bitShiftRight.cpp similarity index 100% rename from dbms/src/Functions/bitShiftRight.cpp rename to dbms/Functions/bitShiftRight.cpp diff --git a/dbms/src/Functions/bitSwapLastTwo.cpp b/dbms/Functions/bitSwapLastTwo.cpp similarity index 97% rename from dbms/src/Functions/bitSwapLastTwo.cpp rename to dbms/Functions/bitSwapLastTwo.cpp index 11b52eca66f..5356d98d791 100644 --- a/dbms/src/Functions/bitSwapLastTwo.cpp +++ b/dbms/Functions/bitSwapLastTwo.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). /// This function provides "NOT" operation for BoolMasks by swapping last two bits ("can be true" <-> "can be false"). template struct BitSwapLastTwoImpl diff --git a/dbms/src/Functions/bitTest.cpp b/dbms/Functions/bitTest.cpp similarity index 100% rename from dbms/src/Functions/bitTest.cpp rename to dbms/Functions/bitTest.cpp diff --git a/dbms/src/Functions/bitTestAll.cpp b/dbms/Functions/bitTestAll.cpp similarity index 100% rename from dbms/src/Functions/bitTestAll.cpp rename to dbms/Functions/bitTestAll.cpp diff --git a/dbms/src/Functions/bitTestAny.cpp b/dbms/Functions/bitTestAny.cpp similarity index 100% rename from dbms/src/Functions/bitTestAny.cpp rename to dbms/Functions/bitTestAny.cpp diff --git a/dbms/src/Functions/bitWrapperFunc.cpp b/dbms/Functions/bitWrapperFunc.cpp similarity index 96% rename from dbms/src/Functions/bitWrapperFunc.cpp rename to dbms/Functions/bitWrapperFunc.cpp index 2de8c0feb99..447f8a4f62b 100644 --- a/dbms/src/Functions/bitWrapperFunc.cpp +++ b/dbms/Functions/bitWrapperFunc.cpp @@ -9,7 +9,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). /// This function wraps bool atomic functions /// and transforms their boolean return value to the BoolMask ("can be false" and "can be true" bits). template diff --git a/dbms/src/Functions/bitXor.cpp b/dbms/Functions/bitXor.cpp similarity index 100% rename from dbms/src/Functions/bitXor.cpp rename to dbms/Functions/bitXor.cpp diff --git a/dbms/src/Functions/blockNumber.cpp b/dbms/Functions/blockNumber.cpp similarity index 100% rename from dbms/src/Functions/blockNumber.cpp rename to dbms/Functions/blockNumber.cpp diff --git a/dbms/src/Functions/blockSerializedSize.cpp b/dbms/Functions/blockSerializedSize.cpp similarity index 100% rename from dbms/src/Functions/blockSerializedSize.cpp rename to dbms/Functions/blockSerializedSize.cpp diff --git a/dbms/src/Functions/blockSize.cpp b/dbms/Functions/blockSize.cpp similarity index 100% rename from dbms/src/Functions/blockSize.cpp rename to dbms/Functions/blockSize.cpp diff --git a/dbms/src/Functions/caseWithExpression.cpp b/dbms/Functions/caseWithExpression.cpp similarity index 100% rename from dbms/src/Functions/caseWithExpression.cpp rename to dbms/Functions/caseWithExpression.cpp diff --git a/dbms/src/Functions/castTypeToEither.h b/dbms/Functions/castTypeToEither.h similarity index 100% rename from dbms/src/Functions/castTypeToEither.h rename to dbms/Functions/castTypeToEither.h diff --git a/dbms/src/Functions/cbrt.cpp b/dbms/Functions/cbrt.cpp similarity index 100% rename from dbms/src/Functions/cbrt.cpp rename to dbms/Functions/cbrt.cpp diff --git a/dbms/src/Functions/coalesce.cpp b/dbms/Functions/coalesce.cpp similarity index 100% rename from dbms/src/Functions/coalesce.cpp rename to dbms/Functions/coalesce.cpp diff --git a/dbms/src/Functions/concat.cpp b/dbms/Functions/concat.cpp similarity index 100% rename from dbms/src/Functions/concat.cpp rename to dbms/Functions/concat.cpp diff --git a/dbms/src/Functions/config_functions.h.in b/dbms/Functions/config_functions.h.in similarity index 100% rename from dbms/src/Functions/config_functions.h.in rename to dbms/Functions/config_functions.h.in diff --git a/dbms/src/Functions/convertCharset.cpp b/dbms/Functions/convertCharset.cpp similarity index 100% rename from dbms/src/Functions/convertCharset.cpp rename to dbms/Functions/convertCharset.cpp diff --git a/dbms/src/Functions/cos.cpp b/dbms/Functions/cos.cpp similarity index 100% rename from dbms/src/Functions/cos.cpp rename to dbms/Functions/cos.cpp diff --git a/dbms/src/Functions/currentDatabase.cpp b/dbms/Functions/currentDatabase.cpp similarity index 100% rename from dbms/src/Functions/currentDatabase.cpp rename to dbms/Functions/currentDatabase.cpp diff --git a/dbms/src/Functions/currentQuota.cpp b/dbms/Functions/currentQuota.cpp similarity index 100% rename from dbms/src/Functions/currentQuota.cpp rename to dbms/Functions/currentQuota.cpp diff --git a/dbms/src/Functions/currentRowPolicies.cpp b/dbms/Functions/currentRowPolicies.cpp similarity index 100% rename from dbms/src/Functions/currentRowPolicies.cpp rename to dbms/Functions/currentRowPolicies.cpp diff --git a/dbms/src/Functions/currentUser.cpp b/dbms/Functions/currentUser.cpp similarity index 100% rename from dbms/src/Functions/currentUser.cpp rename to dbms/Functions/currentUser.cpp diff --git a/dbms/src/Functions/dateDiff.cpp b/dbms/Functions/dateDiff.cpp similarity index 100% rename from dbms/src/Functions/dateDiff.cpp rename to dbms/Functions/dateDiff.cpp diff --git a/dbms/src/Functions/defaultValueOfArgumentType.cpp b/dbms/Functions/defaultValueOfArgumentType.cpp similarity index 100% rename from dbms/src/Functions/defaultValueOfArgumentType.cpp rename to dbms/Functions/defaultValueOfArgumentType.cpp diff --git a/dbms/src/Functions/demange.cpp b/dbms/Functions/demange.cpp similarity index 100% rename from dbms/src/Functions/demange.cpp rename to dbms/Functions/demange.cpp diff --git a/dbms/src/Functions/divide.cpp b/dbms/Functions/divide.cpp similarity index 100% rename from dbms/src/Functions/divide.cpp rename to dbms/Functions/divide.cpp diff --git a/dbms/src/Functions/dumpColumnStructure.cpp b/dbms/Functions/dumpColumnStructure.cpp similarity index 100% rename from dbms/src/Functions/dumpColumnStructure.cpp rename to dbms/Functions/dumpColumnStructure.cpp diff --git a/dbms/src/Functions/e.cpp b/dbms/Functions/e.cpp similarity index 100% rename from dbms/src/Functions/e.cpp rename to dbms/Functions/e.cpp diff --git a/dbms/src/Functions/empty.cpp b/dbms/Functions/empty.cpp similarity index 100% rename from dbms/src/Functions/empty.cpp rename to dbms/Functions/empty.cpp diff --git a/dbms/src/Functions/endsWith.cpp b/dbms/Functions/endsWith.cpp similarity index 100% rename from dbms/src/Functions/endsWith.cpp rename to dbms/Functions/endsWith.cpp diff --git a/dbms/src/Functions/equals.cpp b/dbms/Functions/equals.cpp similarity index 100% rename from dbms/src/Functions/equals.cpp rename to dbms/Functions/equals.cpp diff --git a/dbms/src/Functions/erf.cpp b/dbms/Functions/erf.cpp similarity index 100% rename from dbms/src/Functions/erf.cpp rename to dbms/Functions/erf.cpp diff --git a/dbms/src/Functions/erfc.cpp b/dbms/Functions/erfc.cpp similarity index 100% rename from dbms/src/Functions/erfc.cpp rename to dbms/Functions/erfc.cpp diff --git a/dbms/src/Functions/evalMLMethod.cpp b/dbms/Functions/evalMLMethod.cpp similarity index 100% rename from dbms/src/Functions/evalMLMethod.cpp rename to dbms/Functions/evalMLMethod.cpp diff --git a/dbms/src/Functions/exp.cpp b/dbms/Functions/exp.cpp similarity index 100% rename from dbms/src/Functions/exp.cpp rename to dbms/Functions/exp.cpp diff --git a/dbms/src/Functions/exp10.cpp b/dbms/Functions/exp10.cpp similarity index 100% rename from dbms/src/Functions/exp10.cpp rename to dbms/Functions/exp10.cpp diff --git a/dbms/src/Functions/exp2.cpp b/dbms/Functions/exp2.cpp similarity index 100% rename from dbms/src/Functions/exp2.cpp rename to dbms/Functions/exp2.cpp diff --git a/dbms/src/Functions/extractTimeZoneFromFunctionArguments.cpp b/dbms/Functions/extractTimeZoneFromFunctionArguments.cpp similarity index 100% rename from dbms/src/Functions/extractTimeZoneFromFunctionArguments.cpp rename to dbms/Functions/extractTimeZoneFromFunctionArguments.cpp diff --git a/dbms/src/Functions/extractTimeZoneFromFunctionArguments.h b/dbms/Functions/extractTimeZoneFromFunctionArguments.h similarity index 100% rename from dbms/src/Functions/extractTimeZoneFromFunctionArguments.h rename to dbms/Functions/extractTimeZoneFromFunctionArguments.h diff --git a/dbms/src/Functions/filesystem.cpp b/dbms/Functions/filesystem.cpp similarity index 100% rename from dbms/src/Functions/filesystem.cpp rename to dbms/Functions/filesystem.cpp diff --git a/dbms/src/Functions/finalizeAggregation.cpp b/dbms/Functions/finalizeAggregation.cpp similarity index 100% rename from dbms/src/Functions/finalizeAggregation.cpp rename to dbms/Functions/finalizeAggregation.cpp diff --git a/dbms/src/Functions/formatDateTime.cpp b/dbms/Functions/formatDateTime.cpp similarity index 100% rename from dbms/src/Functions/formatDateTime.cpp rename to dbms/Functions/formatDateTime.cpp diff --git a/dbms/src/Functions/formatString.cpp b/dbms/Functions/formatString.cpp similarity index 100% rename from dbms/src/Functions/formatString.cpp rename to dbms/Functions/formatString.cpp diff --git a/dbms/src/Functions/formatString.h b/dbms/Functions/formatString.h similarity index 100% rename from dbms/src/Functions/formatString.h rename to dbms/Functions/formatString.h diff --git a/dbms/src/Functions/gcd.cpp b/dbms/Functions/gcd.cpp similarity index 100% rename from dbms/src/Functions/gcd.cpp rename to dbms/Functions/gcd.cpp diff --git a/dbms/src/Functions/generateUUIDv4.cpp b/dbms/Functions/generateUUIDv4.cpp similarity index 100% rename from dbms/src/Functions/generateUUIDv4.cpp rename to dbms/Functions/generateUUIDv4.cpp diff --git a/dbms/src/Functions/geoToH3.cpp b/dbms/Functions/geoToH3.cpp similarity index 100% rename from dbms/src/Functions/geoToH3.cpp rename to dbms/Functions/geoToH3.cpp diff --git a/dbms/src/Functions/geohashDecode.cpp b/dbms/Functions/geohashDecode.cpp similarity index 100% rename from dbms/src/Functions/geohashDecode.cpp rename to dbms/Functions/geohashDecode.cpp diff --git a/dbms/src/Functions/geohashEncode.cpp b/dbms/Functions/geohashEncode.cpp similarity index 100% rename from dbms/src/Functions/geohashEncode.cpp rename to dbms/Functions/geohashEncode.cpp diff --git a/dbms/src/Functions/geohashesInBox.cpp b/dbms/Functions/geohashesInBox.cpp similarity index 100% rename from dbms/src/Functions/geohashesInBox.cpp rename to dbms/Functions/geohashesInBox.cpp diff --git a/dbms/src/Functions/getMacro.cpp b/dbms/Functions/getMacro.cpp similarity index 100% rename from dbms/src/Functions/getMacro.cpp rename to dbms/Functions/getMacro.cpp diff --git a/dbms/src/Functions/getScalar.cpp b/dbms/Functions/getScalar.cpp similarity index 100% rename from dbms/src/Functions/getScalar.cpp rename to dbms/Functions/getScalar.cpp diff --git a/dbms/src/Functions/getSizeOfEnumType.cpp b/dbms/Functions/getSizeOfEnumType.cpp similarity index 100% rename from dbms/src/Functions/getSizeOfEnumType.cpp rename to dbms/Functions/getSizeOfEnumType.cpp diff --git a/dbms/src/Functions/greatCircleDistance.cpp b/dbms/Functions/greatCircleDistance.cpp similarity index 100% rename from dbms/src/Functions/greatCircleDistance.cpp rename to dbms/Functions/greatCircleDistance.cpp diff --git a/dbms/src/Functions/greater.cpp b/dbms/Functions/greater.cpp similarity index 100% rename from dbms/src/Functions/greater.cpp rename to dbms/Functions/greater.cpp diff --git a/dbms/src/Functions/greaterOrEquals.cpp b/dbms/Functions/greaterOrEquals.cpp similarity index 100% rename from dbms/src/Functions/greaterOrEquals.cpp rename to dbms/Functions/greaterOrEquals.cpp diff --git a/dbms/src/Functions/greatest.cpp b/dbms/Functions/greatest.cpp similarity index 100% rename from dbms/src/Functions/greatest.cpp rename to dbms/Functions/greatest.cpp diff --git a/dbms/src/Functions/h3EdgeAngle.cpp b/dbms/Functions/h3EdgeAngle.cpp similarity index 100% rename from dbms/src/Functions/h3EdgeAngle.cpp rename to dbms/Functions/h3EdgeAngle.cpp diff --git a/dbms/src/Functions/h3EdgeLengthM.cpp b/dbms/Functions/h3EdgeLengthM.cpp similarity index 100% rename from dbms/src/Functions/h3EdgeLengthM.cpp rename to dbms/Functions/h3EdgeLengthM.cpp diff --git a/dbms/src/Functions/h3GetBaseCell.cpp b/dbms/Functions/h3GetBaseCell.cpp similarity index 100% rename from dbms/src/Functions/h3GetBaseCell.cpp rename to dbms/Functions/h3GetBaseCell.cpp diff --git a/dbms/src/Functions/h3GetResolution.cpp b/dbms/Functions/h3GetResolution.cpp similarity index 100% rename from dbms/src/Functions/h3GetResolution.cpp rename to dbms/Functions/h3GetResolution.cpp diff --git a/dbms/src/Functions/h3HexAreaM2.cpp b/dbms/Functions/h3HexAreaM2.cpp similarity index 100% rename from dbms/src/Functions/h3HexAreaM2.cpp rename to dbms/Functions/h3HexAreaM2.cpp diff --git a/dbms/src/Functions/h3IndexesAreNeighbors.cpp b/dbms/Functions/h3IndexesAreNeighbors.cpp similarity index 100% rename from dbms/src/Functions/h3IndexesAreNeighbors.cpp rename to dbms/Functions/h3IndexesAreNeighbors.cpp diff --git a/dbms/src/Functions/h3IsValid.cpp b/dbms/Functions/h3IsValid.cpp similarity index 100% rename from dbms/src/Functions/h3IsValid.cpp rename to dbms/Functions/h3IsValid.cpp diff --git a/dbms/src/Functions/h3ToChildren.cpp b/dbms/Functions/h3ToChildren.cpp similarity index 100% rename from dbms/src/Functions/h3ToChildren.cpp rename to dbms/Functions/h3ToChildren.cpp diff --git a/dbms/src/Functions/h3ToParent.cpp b/dbms/Functions/h3ToParent.cpp similarity index 100% rename from dbms/src/Functions/h3ToParent.cpp rename to dbms/Functions/h3ToParent.cpp diff --git a/dbms/src/Functions/h3ToString.cpp b/dbms/Functions/h3ToString.cpp similarity index 100% rename from dbms/src/Functions/h3ToString.cpp rename to dbms/Functions/h3ToString.cpp diff --git a/dbms/src/Functions/h3kRing.cpp b/dbms/Functions/h3kRing.cpp similarity index 100% rename from dbms/src/Functions/h3kRing.cpp rename to dbms/Functions/h3kRing.cpp diff --git a/dbms/src/Functions/hasColumnInTable.cpp b/dbms/Functions/hasColumnInTable.cpp similarity index 100% rename from dbms/src/Functions/hasColumnInTable.cpp rename to dbms/Functions/hasColumnInTable.cpp diff --git a/dbms/src/Functions/hasToken.cpp b/dbms/Functions/hasToken.cpp similarity index 100% rename from dbms/src/Functions/hasToken.cpp rename to dbms/Functions/hasToken.cpp diff --git a/dbms/src/Functions/hasTokenCaseInsensitive.cpp b/dbms/Functions/hasTokenCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/hasTokenCaseInsensitive.cpp rename to dbms/Functions/hasTokenCaseInsensitive.cpp diff --git a/dbms/src/Functions/hostName.cpp b/dbms/Functions/hostName.cpp similarity index 100% rename from dbms/src/Functions/hostName.cpp rename to dbms/Functions/hostName.cpp diff --git a/dbms/src/Functions/identity.cpp b/dbms/Functions/identity.cpp similarity index 100% rename from dbms/src/Functions/identity.cpp rename to dbms/Functions/identity.cpp diff --git a/dbms/src/Functions/if.cpp b/dbms/Functions/if.cpp similarity index 100% rename from dbms/src/Functions/if.cpp rename to dbms/Functions/if.cpp diff --git a/dbms/src/Functions/ifNotFinite.cpp b/dbms/Functions/ifNotFinite.cpp similarity index 100% rename from dbms/src/Functions/ifNotFinite.cpp rename to dbms/Functions/ifNotFinite.cpp diff --git a/dbms/src/Functions/ifNull.cpp b/dbms/Functions/ifNull.cpp similarity index 100% rename from dbms/src/Functions/ifNull.cpp rename to dbms/Functions/ifNull.cpp diff --git a/dbms/src/Functions/ignore.cpp b/dbms/Functions/ignore.cpp similarity index 100% rename from dbms/src/Functions/ignore.cpp rename to dbms/Functions/ignore.cpp diff --git a/dbms/src/Functions/ignoreExceptNull.cpp b/dbms/Functions/ignoreExceptNull.cpp similarity index 100% rename from dbms/src/Functions/ignoreExceptNull.cpp rename to dbms/Functions/ignoreExceptNull.cpp diff --git a/dbms/src/Functions/in.cpp b/dbms/Functions/in.cpp similarity index 100% rename from dbms/src/Functions/in.cpp rename to dbms/Functions/in.cpp diff --git a/dbms/src/Functions/intDiv.cpp b/dbms/Functions/intDiv.cpp similarity index 100% rename from dbms/src/Functions/intDiv.cpp rename to dbms/Functions/intDiv.cpp diff --git a/dbms/src/Functions/intDivOrZero.cpp b/dbms/Functions/intDivOrZero.cpp similarity index 100% rename from dbms/src/Functions/intDivOrZero.cpp rename to dbms/Functions/intDivOrZero.cpp diff --git a/dbms/src/Functions/intExp10.cpp b/dbms/Functions/intExp10.cpp similarity index 100% rename from dbms/src/Functions/intExp10.cpp rename to dbms/Functions/intExp10.cpp diff --git a/dbms/src/Functions/intExp2.cpp b/dbms/Functions/intExp2.cpp similarity index 100% rename from dbms/src/Functions/intExp2.cpp rename to dbms/Functions/intExp2.cpp diff --git a/dbms/src/Functions/isFinite.cpp b/dbms/Functions/isFinite.cpp similarity index 100% rename from dbms/src/Functions/isFinite.cpp rename to dbms/Functions/isFinite.cpp diff --git a/dbms/src/Functions/isInfinite.cpp b/dbms/Functions/isInfinite.cpp similarity index 100% rename from dbms/src/Functions/isInfinite.cpp rename to dbms/Functions/isInfinite.cpp diff --git a/dbms/src/Functions/isNaN.cpp b/dbms/Functions/isNaN.cpp similarity index 100% rename from dbms/src/Functions/isNaN.cpp rename to dbms/Functions/isNaN.cpp diff --git a/dbms/src/Functions/isNotNull.cpp b/dbms/Functions/isNotNull.cpp similarity index 100% rename from dbms/src/Functions/isNotNull.cpp rename to dbms/Functions/isNotNull.cpp diff --git a/dbms/src/Functions/isNull.cpp b/dbms/Functions/isNull.cpp similarity index 100% rename from dbms/src/Functions/isNull.cpp rename to dbms/Functions/isNull.cpp diff --git a/dbms/src/Functions/isValidUTF8.cpp b/dbms/Functions/isValidUTF8.cpp similarity index 100% rename from dbms/src/Functions/isValidUTF8.cpp rename to dbms/Functions/isValidUTF8.cpp diff --git a/dbms/src/Functions/jumpConsistentHash.cpp b/dbms/Functions/jumpConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/jumpConsistentHash.cpp rename to dbms/Functions/jumpConsistentHash.cpp diff --git a/dbms/src/Functions/lcm.cpp b/dbms/Functions/lcm.cpp similarity index 100% rename from dbms/src/Functions/lcm.cpp rename to dbms/Functions/lcm.cpp diff --git a/dbms/src/Functions/least.cpp b/dbms/Functions/least.cpp similarity index 100% rename from dbms/src/Functions/least.cpp rename to dbms/Functions/least.cpp diff --git a/dbms/src/Functions/lengthUTF8.cpp b/dbms/Functions/lengthUTF8.cpp similarity index 100% rename from dbms/src/Functions/lengthUTF8.cpp rename to dbms/Functions/lengthUTF8.cpp diff --git a/dbms/src/Functions/less.cpp b/dbms/Functions/less.cpp similarity index 100% rename from dbms/src/Functions/less.cpp rename to dbms/Functions/less.cpp diff --git a/dbms/src/Functions/lessOrEquals.cpp b/dbms/Functions/lessOrEquals.cpp similarity index 100% rename from dbms/src/Functions/lessOrEquals.cpp rename to dbms/Functions/lessOrEquals.cpp diff --git a/dbms/src/Functions/lgamma.cpp b/dbms/Functions/lgamma.cpp similarity index 100% rename from dbms/src/Functions/lgamma.cpp rename to dbms/Functions/lgamma.cpp diff --git a/dbms/src/Functions/likePatternToRegexp.h b/dbms/Functions/likePatternToRegexp.h similarity index 100% rename from dbms/src/Functions/likePatternToRegexp.h rename to dbms/Functions/likePatternToRegexp.h diff --git a/dbms/src/Functions/log.cpp b/dbms/Functions/log.cpp similarity index 100% rename from dbms/src/Functions/log.cpp rename to dbms/Functions/log.cpp diff --git a/dbms/src/Functions/log10.cpp b/dbms/Functions/log10.cpp similarity index 100% rename from dbms/src/Functions/log10.cpp rename to dbms/Functions/log10.cpp diff --git a/dbms/src/Functions/log2.cpp b/dbms/Functions/log2.cpp similarity index 100% rename from dbms/src/Functions/log2.cpp rename to dbms/Functions/log2.cpp diff --git a/dbms/src/Functions/lowCardinalityIndices.cpp b/dbms/Functions/lowCardinalityIndices.cpp similarity index 100% rename from dbms/src/Functions/lowCardinalityIndices.cpp rename to dbms/Functions/lowCardinalityIndices.cpp diff --git a/dbms/src/Functions/lowCardinalityKeys.cpp b/dbms/Functions/lowCardinalityKeys.cpp similarity index 100% rename from dbms/src/Functions/lowCardinalityKeys.cpp rename to dbms/Functions/lowCardinalityKeys.cpp diff --git a/dbms/src/Functions/lower.cpp b/dbms/Functions/lower.cpp similarity index 100% rename from dbms/src/Functions/lower.cpp rename to dbms/Functions/lower.cpp diff --git a/dbms/src/Functions/lowerUTF8.cpp b/dbms/Functions/lowerUTF8.cpp similarity index 100% rename from dbms/src/Functions/lowerUTF8.cpp rename to dbms/Functions/lowerUTF8.cpp diff --git a/dbms/src/Functions/materialize.cpp b/dbms/Functions/materialize.cpp similarity index 100% rename from dbms/src/Functions/materialize.cpp rename to dbms/Functions/materialize.cpp diff --git a/dbms/src/Functions/minus.cpp b/dbms/Functions/minus.cpp similarity index 100% rename from dbms/src/Functions/minus.cpp rename to dbms/Functions/minus.cpp diff --git a/dbms/src/Functions/modulo.cpp b/dbms/Functions/modulo.cpp similarity index 100% rename from dbms/src/Functions/modulo.cpp rename to dbms/Functions/modulo.cpp diff --git a/dbms/src/Functions/moduloOrZero.cpp b/dbms/Functions/moduloOrZero.cpp similarity index 100% rename from dbms/src/Functions/moduloOrZero.cpp rename to dbms/Functions/moduloOrZero.cpp diff --git a/dbms/src/Functions/multiIf.cpp b/dbms/Functions/multiIf.cpp similarity index 100% rename from dbms/src/Functions/multiIf.cpp rename to dbms/Functions/multiIf.cpp diff --git a/dbms/src/Functions/multiSearchAllPositions.cpp b/dbms/Functions/multiSearchAllPositions.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositions.cpp rename to dbms/Functions/multiSearchAllPositions.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp b/dbms/Functions/multiSearchAllPositionsCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp rename to dbms/Functions/multiSearchAllPositionsCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp b/dbms/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp rename to dbms/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAllPositionsUTF8.cpp b/dbms/Functions/multiSearchAllPositionsUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAllPositionsUTF8.cpp rename to dbms/Functions/multiSearchAllPositionsUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAny.cpp b/dbms/Functions/multiSearchAny.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAny.cpp rename to dbms/Functions/multiSearchAny.cpp diff --git a/dbms/src/Functions/multiSearchAnyCaseInsensitive.cpp b/dbms/Functions/multiSearchAnyCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyCaseInsensitive.cpp rename to dbms/Functions/multiSearchAnyCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp b/dbms/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp rename to dbms/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchAnyUTF8.cpp b/dbms/Functions/multiSearchAnyUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchAnyUTF8.cpp rename to dbms/Functions/multiSearchAnyUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndex.cpp b/dbms/Functions/multiSearchFirstIndex.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndex.cpp rename to dbms/Functions/multiSearchFirstIndex.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp b/dbms/Functions/multiSearchFirstIndexCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp rename to dbms/Functions/multiSearchFirstIndexCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp b/dbms/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp rename to dbms/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstIndexUTF8.cpp b/dbms/Functions/multiSearchFirstIndexUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstIndexUTF8.cpp rename to dbms/Functions/multiSearchFirstIndexUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstPosition.cpp b/dbms/Functions/multiSearchFirstPosition.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPosition.cpp rename to dbms/Functions/multiSearchFirstPosition.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp b/dbms/Functions/multiSearchFirstPositionCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp rename to dbms/Functions/multiSearchFirstPositionCaseInsensitive.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp b/dbms/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp rename to dbms/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/multiSearchFirstPositionUTF8.cpp b/dbms/Functions/multiSearchFirstPositionUTF8.cpp similarity index 100% rename from dbms/src/Functions/multiSearchFirstPositionUTF8.cpp rename to dbms/Functions/multiSearchFirstPositionUTF8.cpp diff --git a/dbms/src/Functions/multiply.cpp b/dbms/Functions/multiply.cpp similarity index 100% rename from dbms/src/Functions/multiply.cpp rename to dbms/Functions/multiply.cpp diff --git a/dbms/src/Functions/negate.cpp b/dbms/Functions/negate.cpp similarity index 100% rename from dbms/src/Functions/negate.cpp rename to dbms/Functions/negate.cpp diff --git a/dbms/src/Functions/neighbor.cpp b/dbms/Functions/neighbor.cpp similarity index 100% rename from dbms/src/Functions/neighbor.cpp rename to dbms/Functions/neighbor.cpp diff --git a/dbms/src/Functions/notEmpty.cpp b/dbms/Functions/notEmpty.cpp similarity index 100% rename from dbms/src/Functions/notEmpty.cpp rename to dbms/Functions/notEmpty.cpp diff --git a/dbms/src/Functions/notEquals.cpp b/dbms/Functions/notEquals.cpp similarity index 100% rename from dbms/src/Functions/notEquals.cpp rename to dbms/Functions/notEquals.cpp diff --git a/dbms/src/Functions/now.cpp b/dbms/Functions/now.cpp similarity index 100% rename from dbms/src/Functions/now.cpp rename to dbms/Functions/now.cpp diff --git a/dbms/src/Functions/now64.cpp b/dbms/Functions/now64.cpp similarity index 100% rename from dbms/src/Functions/now64.cpp rename to dbms/Functions/now64.cpp diff --git a/dbms/src/Functions/nullIf.cpp b/dbms/Functions/nullIf.cpp similarity index 100% rename from dbms/src/Functions/nullIf.cpp rename to dbms/Functions/nullIf.cpp diff --git a/dbms/src/Functions/pi.cpp b/dbms/Functions/pi.cpp similarity index 100% rename from dbms/src/Functions/pi.cpp rename to dbms/Functions/pi.cpp diff --git a/dbms/src/Functions/plus.cpp b/dbms/Functions/plus.cpp similarity index 100% rename from dbms/src/Functions/plus.cpp rename to dbms/Functions/plus.cpp diff --git a/dbms/src/Functions/pointInEllipses.cpp b/dbms/Functions/pointInEllipses.cpp similarity index 100% rename from dbms/src/Functions/pointInEllipses.cpp rename to dbms/Functions/pointInEllipses.cpp diff --git a/dbms/src/Functions/pointInPolygon.cpp b/dbms/Functions/pointInPolygon.cpp similarity index 100% rename from dbms/src/Functions/pointInPolygon.cpp rename to dbms/Functions/pointInPolygon.cpp diff --git a/dbms/src/Functions/position.cpp b/dbms/Functions/position.cpp similarity index 100% rename from dbms/src/Functions/position.cpp rename to dbms/Functions/position.cpp diff --git a/dbms/src/Functions/positionCaseInsensitive.cpp b/dbms/Functions/positionCaseInsensitive.cpp similarity index 100% rename from dbms/src/Functions/positionCaseInsensitive.cpp rename to dbms/Functions/positionCaseInsensitive.cpp diff --git a/dbms/src/Functions/positionCaseInsensitiveUTF8.cpp b/dbms/Functions/positionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/src/Functions/positionCaseInsensitiveUTF8.cpp rename to dbms/Functions/positionCaseInsensitiveUTF8.cpp diff --git a/dbms/src/Functions/positionUTF8.cpp b/dbms/Functions/positionUTF8.cpp similarity index 100% rename from dbms/src/Functions/positionUTF8.cpp rename to dbms/Functions/positionUTF8.cpp diff --git a/dbms/src/Functions/pow.cpp b/dbms/Functions/pow.cpp similarity index 100% rename from dbms/src/Functions/pow.cpp rename to dbms/Functions/pow.cpp diff --git a/dbms/src/Functions/rand.cpp b/dbms/Functions/rand.cpp similarity index 100% rename from dbms/src/Functions/rand.cpp rename to dbms/Functions/rand.cpp diff --git a/dbms/src/Functions/rand64.cpp b/dbms/Functions/rand64.cpp similarity index 100% rename from dbms/src/Functions/rand64.cpp rename to dbms/Functions/rand64.cpp diff --git a/dbms/src/Functions/randConstant.cpp b/dbms/Functions/randConstant.cpp similarity index 100% rename from dbms/src/Functions/randConstant.cpp rename to dbms/Functions/randConstant.cpp diff --git a/dbms/src/Functions/randomPrintableASCII.cpp b/dbms/Functions/randomPrintableASCII.cpp similarity index 100% rename from dbms/src/Functions/randomPrintableASCII.cpp rename to dbms/Functions/randomPrintableASCII.cpp diff --git a/dbms/src/Functions/regexpQuoteMeta.cpp b/dbms/Functions/regexpQuoteMeta.cpp similarity index 100% rename from dbms/src/Functions/regexpQuoteMeta.cpp rename to dbms/Functions/regexpQuoteMeta.cpp diff --git a/dbms/src/Functions/registerFunctions.cpp b/dbms/Functions/registerFunctions.cpp similarity index 100% rename from dbms/src/Functions/registerFunctions.cpp rename to dbms/Functions/registerFunctions.cpp diff --git a/dbms/src/Functions/registerFunctions.h b/dbms/Functions/registerFunctions.h similarity index 100% rename from dbms/src/Functions/registerFunctions.h rename to dbms/Functions/registerFunctions.h diff --git a/dbms/src/Functions/registerFunctionsArithmetic.cpp b/dbms/Functions/registerFunctionsArithmetic.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsArithmetic.cpp rename to dbms/Functions/registerFunctionsArithmetic.cpp diff --git a/dbms/src/Functions/registerFunctionsComparison.cpp b/dbms/Functions/registerFunctionsComparison.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsComparison.cpp rename to dbms/Functions/registerFunctionsComparison.cpp diff --git a/dbms/src/Functions/registerFunctionsConditional.cpp b/dbms/Functions/registerFunctionsConditional.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsConditional.cpp rename to dbms/Functions/registerFunctionsConditional.cpp diff --git a/dbms/src/Functions/registerFunctionsConsistentHashing.cpp b/dbms/Functions/registerFunctionsConsistentHashing.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsConsistentHashing.cpp rename to dbms/Functions/registerFunctionsConsistentHashing.cpp diff --git a/dbms/src/Functions/registerFunctionsDateTime.cpp b/dbms/Functions/registerFunctionsDateTime.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsDateTime.cpp rename to dbms/Functions/registerFunctionsDateTime.cpp diff --git a/dbms/src/Functions/registerFunctionsGeo.cpp b/dbms/Functions/registerFunctionsGeo.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsGeo.cpp rename to dbms/Functions/registerFunctionsGeo.cpp diff --git a/dbms/src/Functions/registerFunctionsHigherOrder.cpp b/dbms/Functions/registerFunctionsHigherOrder.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsHigherOrder.cpp rename to dbms/Functions/registerFunctionsHigherOrder.cpp diff --git a/dbms/src/Functions/registerFunctionsIntrospection.cpp b/dbms/Functions/registerFunctionsIntrospection.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsIntrospection.cpp rename to dbms/Functions/registerFunctionsIntrospection.cpp diff --git a/dbms/src/Functions/registerFunctionsMath.cpp b/dbms/Functions/registerFunctionsMath.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsMath.cpp rename to dbms/Functions/registerFunctionsMath.cpp diff --git a/dbms/src/Functions/registerFunctionsMiscellaneous.cpp b/dbms/Functions/registerFunctionsMiscellaneous.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsMiscellaneous.cpp rename to dbms/Functions/registerFunctionsMiscellaneous.cpp diff --git a/dbms/src/Functions/registerFunctionsNull.cpp b/dbms/Functions/registerFunctionsNull.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsNull.cpp rename to dbms/Functions/registerFunctionsNull.cpp diff --git a/dbms/src/Functions/registerFunctionsRandom.cpp b/dbms/Functions/registerFunctionsRandom.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsRandom.cpp rename to dbms/Functions/registerFunctionsRandom.cpp diff --git a/dbms/src/Functions/registerFunctionsReinterpret.cpp b/dbms/Functions/registerFunctionsReinterpret.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsReinterpret.cpp rename to dbms/Functions/registerFunctionsReinterpret.cpp diff --git a/dbms/src/Functions/registerFunctionsString.cpp b/dbms/Functions/registerFunctionsString.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsString.cpp rename to dbms/Functions/registerFunctionsString.cpp diff --git a/dbms/src/Functions/registerFunctionsStringSearch.cpp b/dbms/Functions/registerFunctionsStringSearch.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsStringSearch.cpp rename to dbms/Functions/registerFunctionsStringSearch.cpp diff --git a/dbms/src/Functions/registerFunctionsTuple.cpp b/dbms/Functions/registerFunctionsTuple.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsTuple.cpp rename to dbms/Functions/registerFunctionsTuple.cpp diff --git a/dbms/src/Functions/registerFunctionsVisitParam.cpp b/dbms/Functions/registerFunctionsVisitParam.cpp similarity index 100% rename from dbms/src/Functions/registerFunctionsVisitParam.cpp rename to dbms/Functions/registerFunctionsVisitParam.cpp diff --git a/dbms/src/Functions/reinterpretAsFixedString.cpp b/dbms/Functions/reinterpretAsFixedString.cpp similarity index 100% rename from dbms/src/Functions/reinterpretAsFixedString.cpp rename to dbms/Functions/reinterpretAsFixedString.cpp diff --git a/dbms/src/Functions/reinterpretAsString.cpp b/dbms/Functions/reinterpretAsString.cpp similarity index 100% rename from dbms/src/Functions/reinterpretAsString.cpp rename to dbms/Functions/reinterpretAsString.cpp diff --git a/dbms/src/Functions/reinterpretStringAs.cpp b/dbms/Functions/reinterpretStringAs.cpp similarity index 100% rename from dbms/src/Functions/reinterpretStringAs.cpp rename to dbms/Functions/reinterpretStringAs.cpp diff --git a/dbms/src/Functions/repeat.cpp b/dbms/Functions/repeat.cpp similarity index 100% rename from dbms/src/Functions/repeat.cpp rename to dbms/Functions/repeat.cpp diff --git a/dbms/src/Functions/replicate.cpp b/dbms/Functions/replicate.cpp similarity index 100% rename from dbms/src/Functions/replicate.cpp rename to dbms/Functions/replicate.cpp diff --git a/dbms/src/Functions/reverse.cpp b/dbms/Functions/reverse.cpp similarity index 100% rename from dbms/src/Functions/reverse.cpp rename to dbms/Functions/reverse.cpp diff --git a/dbms/src/Functions/reverseUTF8.cpp b/dbms/Functions/reverseUTF8.cpp similarity index 100% rename from dbms/src/Functions/reverseUTF8.cpp rename to dbms/Functions/reverseUTF8.cpp diff --git a/dbms/src/Functions/roundAge.cpp b/dbms/Functions/roundAge.cpp similarity index 100% rename from dbms/src/Functions/roundAge.cpp rename to dbms/Functions/roundAge.cpp diff --git a/dbms/src/Functions/roundDuration.cpp b/dbms/Functions/roundDuration.cpp similarity index 100% rename from dbms/src/Functions/roundDuration.cpp rename to dbms/Functions/roundDuration.cpp diff --git a/dbms/src/Functions/roundToExp2.cpp b/dbms/Functions/roundToExp2.cpp similarity index 100% rename from dbms/src/Functions/roundToExp2.cpp rename to dbms/Functions/roundToExp2.cpp diff --git a/dbms/src/Functions/rowNumberInAllBlocks.cpp b/dbms/Functions/rowNumberInAllBlocks.cpp similarity index 100% rename from dbms/src/Functions/rowNumberInAllBlocks.cpp rename to dbms/Functions/rowNumberInAllBlocks.cpp diff --git a/dbms/src/Functions/rowNumberInBlock.cpp b/dbms/Functions/rowNumberInBlock.cpp similarity index 100% rename from dbms/src/Functions/rowNumberInBlock.cpp rename to dbms/Functions/rowNumberInBlock.cpp diff --git a/dbms/src/Functions/runningAccumulate.cpp b/dbms/Functions/runningAccumulate.cpp similarity index 100% rename from dbms/src/Functions/runningAccumulate.cpp rename to dbms/Functions/runningAccumulate.cpp diff --git a/dbms/src/Functions/runningDifference.cpp b/dbms/Functions/runningDifference.cpp similarity index 100% rename from dbms/src/Functions/runningDifference.cpp rename to dbms/Functions/runningDifference.cpp diff --git a/dbms/src/Functions/runningDifference.h b/dbms/Functions/runningDifference.h similarity index 100% rename from dbms/src/Functions/runningDifference.h rename to dbms/Functions/runningDifference.h diff --git a/dbms/src/Functions/runningDifferenceStartingWithFirstValue.cpp b/dbms/Functions/runningDifferenceStartingWithFirstValue.cpp similarity index 100% rename from dbms/src/Functions/runningDifferenceStartingWithFirstValue.cpp rename to dbms/Functions/runningDifferenceStartingWithFirstValue.cpp diff --git a/dbms/src/Functions/sigmoid.cpp b/dbms/Functions/sigmoid.cpp similarity index 100% rename from dbms/src/Functions/sigmoid.cpp rename to dbms/Functions/sigmoid.cpp diff --git a/dbms/src/Functions/sin.cpp b/dbms/Functions/sin.cpp similarity index 100% rename from dbms/src/Functions/sin.cpp rename to dbms/Functions/sin.cpp diff --git a/dbms/src/Functions/sleep.cpp b/dbms/Functions/sleep.cpp similarity index 100% rename from dbms/src/Functions/sleep.cpp rename to dbms/Functions/sleep.cpp diff --git a/dbms/src/Functions/sleep.h b/dbms/Functions/sleep.h similarity index 100% rename from dbms/src/Functions/sleep.h rename to dbms/Functions/sleep.h diff --git a/dbms/src/Functions/sleepEachRow.cpp b/dbms/Functions/sleepEachRow.cpp similarity index 100% rename from dbms/src/Functions/sleepEachRow.cpp rename to dbms/Functions/sleepEachRow.cpp diff --git a/dbms/src/Functions/sqrt.cpp b/dbms/Functions/sqrt.cpp similarity index 100% rename from dbms/src/Functions/sqrt.cpp rename to dbms/Functions/sqrt.cpp diff --git a/dbms/src/Functions/startsWith.cpp b/dbms/Functions/startsWith.cpp similarity index 100% rename from dbms/src/Functions/startsWith.cpp rename to dbms/Functions/startsWith.cpp diff --git a/dbms/src/Functions/stringToH3.cpp b/dbms/Functions/stringToH3.cpp similarity index 100% rename from dbms/src/Functions/stringToH3.cpp rename to dbms/Functions/stringToH3.cpp diff --git a/dbms/src/Functions/substring.cpp b/dbms/Functions/substring.cpp similarity index 100% rename from dbms/src/Functions/substring.cpp rename to dbms/Functions/substring.cpp diff --git a/dbms/src/Functions/subtractDays.cpp b/dbms/Functions/subtractDays.cpp similarity index 100% rename from dbms/src/Functions/subtractDays.cpp rename to dbms/Functions/subtractDays.cpp diff --git a/dbms/src/Functions/subtractHours.cpp b/dbms/Functions/subtractHours.cpp similarity index 100% rename from dbms/src/Functions/subtractHours.cpp rename to dbms/Functions/subtractHours.cpp diff --git a/dbms/src/Functions/subtractMinutes.cpp b/dbms/Functions/subtractMinutes.cpp similarity index 100% rename from dbms/src/Functions/subtractMinutes.cpp rename to dbms/Functions/subtractMinutes.cpp diff --git a/dbms/src/Functions/subtractMonths.cpp b/dbms/Functions/subtractMonths.cpp similarity index 100% rename from dbms/src/Functions/subtractMonths.cpp rename to dbms/Functions/subtractMonths.cpp diff --git a/dbms/src/Functions/subtractQuarters.cpp b/dbms/Functions/subtractQuarters.cpp similarity index 100% rename from dbms/src/Functions/subtractQuarters.cpp rename to dbms/Functions/subtractQuarters.cpp diff --git a/dbms/src/Functions/subtractSeconds.cpp b/dbms/Functions/subtractSeconds.cpp similarity index 100% rename from dbms/src/Functions/subtractSeconds.cpp rename to dbms/Functions/subtractSeconds.cpp diff --git a/dbms/src/Functions/subtractWeeks.cpp b/dbms/Functions/subtractWeeks.cpp similarity index 100% rename from dbms/src/Functions/subtractWeeks.cpp rename to dbms/Functions/subtractWeeks.cpp diff --git a/dbms/src/Functions/subtractYears.cpp b/dbms/Functions/subtractYears.cpp similarity index 100% rename from dbms/src/Functions/subtractYears.cpp rename to dbms/Functions/subtractYears.cpp diff --git a/dbms/src/Functions/sumburConsistentHash.cpp b/dbms/Functions/sumburConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/sumburConsistentHash.cpp rename to dbms/Functions/sumburConsistentHash.cpp diff --git a/dbms/src/Functions/tan.cpp b/dbms/Functions/tan.cpp similarity index 100% rename from dbms/src/Functions/tan.cpp rename to dbms/Functions/tan.cpp diff --git a/dbms/src/Functions/tanh.cpp b/dbms/Functions/tanh.cpp similarity index 100% rename from dbms/src/Functions/tanh.cpp rename to dbms/Functions/tanh.cpp diff --git a/dbms/src/Functions/tests/CMakeLists.txt b/dbms/Functions/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Functions/tests/CMakeLists.txt rename to dbms/Functions/tests/CMakeLists.txt diff --git a/dbms/src/Functions/tests/number_traits.cpp b/dbms/Functions/tests/number_traits.cpp similarity index 100% rename from dbms/src/Functions/tests/number_traits.cpp rename to dbms/Functions/tests/number_traits.cpp diff --git a/dbms/src/Functions/tgamma.cpp b/dbms/Functions/tgamma.cpp similarity index 100% rename from dbms/src/Functions/tgamma.cpp rename to dbms/Functions/tgamma.cpp diff --git a/dbms/src/Functions/throwIf.cpp b/dbms/Functions/throwIf.cpp similarity index 100% rename from dbms/src/Functions/throwIf.cpp rename to dbms/Functions/throwIf.cpp diff --git a/dbms/src/Functions/timeSlot.cpp b/dbms/Functions/timeSlot.cpp similarity index 100% rename from dbms/src/Functions/timeSlot.cpp rename to dbms/Functions/timeSlot.cpp diff --git a/dbms/src/Functions/timeSlots.cpp b/dbms/Functions/timeSlots.cpp similarity index 100% rename from dbms/src/Functions/timeSlots.cpp rename to dbms/Functions/timeSlots.cpp diff --git a/dbms/src/Functions/timezone.cpp b/dbms/Functions/timezone.cpp similarity index 100% rename from dbms/src/Functions/timezone.cpp rename to dbms/Functions/timezone.cpp diff --git a/dbms/src/Functions/toColumnTypeName.cpp b/dbms/Functions/toColumnTypeName.cpp similarity index 100% rename from dbms/src/Functions/toColumnTypeName.cpp rename to dbms/Functions/toColumnTypeName.cpp diff --git a/dbms/src/Functions/toCustomWeek.cpp b/dbms/Functions/toCustomWeek.cpp similarity index 100% rename from dbms/src/Functions/toCustomWeek.cpp rename to dbms/Functions/toCustomWeek.cpp diff --git a/dbms/src/Functions/toDayOfMonth.cpp b/dbms/Functions/toDayOfMonth.cpp similarity index 100% rename from dbms/src/Functions/toDayOfMonth.cpp rename to dbms/Functions/toDayOfMonth.cpp diff --git a/dbms/src/Functions/toDayOfWeek.cpp b/dbms/Functions/toDayOfWeek.cpp similarity index 100% rename from dbms/src/Functions/toDayOfWeek.cpp rename to dbms/Functions/toDayOfWeek.cpp diff --git a/dbms/src/Functions/toDayOfYear.cpp b/dbms/Functions/toDayOfYear.cpp similarity index 100% rename from dbms/src/Functions/toDayOfYear.cpp rename to dbms/Functions/toDayOfYear.cpp diff --git a/dbms/src/Functions/toHour.cpp b/dbms/Functions/toHour.cpp similarity index 100% rename from dbms/src/Functions/toHour.cpp rename to dbms/Functions/toHour.cpp diff --git a/dbms/src/Functions/toISOWeek.cpp b/dbms/Functions/toISOWeek.cpp similarity index 100% rename from dbms/src/Functions/toISOWeek.cpp rename to dbms/Functions/toISOWeek.cpp diff --git a/dbms/src/Functions/toISOYear.cpp b/dbms/Functions/toISOYear.cpp similarity index 100% rename from dbms/src/Functions/toISOYear.cpp rename to dbms/Functions/toISOYear.cpp diff --git a/dbms/src/Functions/toLowCardinality.cpp b/dbms/Functions/toLowCardinality.cpp similarity index 100% rename from dbms/src/Functions/toLowCardinality.cpp rename to dbms/Functions/toLowCardinality.cpp diff --git a/dbms/src/Functions/toMinute.cpp b/dbms/Functions/toMinute.cpp similarity index 100% rename from dbms/src/Functions/toMinute.cpp rename to dbms/Functions/toMinute.cpp diff --git a/dbms/src/Functions/toMonday.cpp b/dbms/Functions/toMonday.cpp similarity index 100% rename from dbms/src/Functions/toMonday.cpp rename to dbms/Functions/toMonday.cpp diff --git a/dbms/src/Functions/toMonth.cpp b/dbms/Functions/toMonth.cpp similarity index 100% rename from dbms/src/Functions/toMonth.cpp rename to dbms/Functions/toMonth.cpp diff --git a/dbms/src/Functions/toNullable.cpp b/dbms/Functions/toNullable.cpp similarity index 100% rename from dbms/src/Functions/toNullable.cpp rename to dbms/Functions/toNullable.cpp diff --git a/dbms/src/Functions/toQuarter.cpp b/dbms/Functions/toQuarter.cpp similarity index 100% rename from dbms/src/Functions/toQuarter.cpp rename to dbms/Functions/toQuarter.cpp diff --git a/dbms/src/Functions/toRelativeDayNum.cpp b/dbms/Functions/toRelativeDayNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeDayNum.cpp rename to dbms/Functions/toRelativeDayNum.cpp diff --git a/dbms/src/Functions/toRelativeHourNum.cpp b/dbms/Functions/toRelativeHourNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeHourNum.cpp rename to dbms/Functions/toRelativeHourNum.cpp diff --git a/dbms/src/Functions/toRelativeMinuteNum.cpp b/dbms/Functions/toRelativeMinuteNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeMinuteNum.cpp rename to dbms/Functions/toRelativeMinuteNum.cpp diff --git a/dbms/src/Functions/toRelativeMonthNum.cpp b/dbms/Functions/toRelativeMonthNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeMonthNum.cpp rename to dbms/Functions/toRelativeMonthNum.cpp diff --git a/dbms/src/Functions/toRelativeQuarterNum.cpp b/dbms/Functions/toRelativeQuarterNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeQuarterNum.cpp rename to dbms/Functions/toRelativeQuarterNum.cpp diff --git a/dbms/src/Functions/toRelativeSecondNum.cpp b/dbms/Functions/toRelativeSecondNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeSecondNum.cpp rename to dbms/Functions/toRelativeSecondNum.cpp diff --git a/dbms/src/Functions/toRelativeWeekNum.cpp b/dbms/Functions/toRelativeWeekNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeWeekNum.cpp rename to dbms/Functions/toRelativeWeekNum.cpp diff --git a/dbms/src/Functions/toRelativeYearNum.cpp b/dbms/Functions/toRelativeYearNum.cpp similarity index 100% rename from dbms/src/Functions/toRelativeYearNum.cpp rename to dbms/Functions/toRelativeYearNum.cpp diff --git a/dbms/src/Functions/toSecond.cpp b/dbms/Functions/toSecond.cpp similarity index 100% rename from dbms/src/Functions/toSecond.cpp rename to dbms/Functions/toSecond.cpp diff --git a/dbms/src/Functions/toStartOfDay.cpp b/dbms/Functions/toStartOfDay.cpp similarity index 100% rename from dbms/src/Functions/toStartOfDay.cpp rename to dbms/Functions/toStartOfDay.cpp diff --git a/dbms/src/Functions/toStartOfFifteenMinutes.cpp b/dbms/Functions/toStartOfFifteenMinutes.cpp similarity index 100% rename from dbms/src/Functions/toStartOfFifteenMinutes.cpp rename to dbms/Functions/toStartOfFifteenMinutes.cpp diff --git a/dbms/src/Functions/toStartOfFiveMinute.cpp b/dbms/Functions/toStartOfFiveMinute.cpp similarity index 100% rename from dbms/src/Functions/toStartOfFiveMinute.cpp rename to dbms/Functions/toStartOfFiveMinute.cpp diff --git a/dbms/src/Functions/toStartOfHour.cpp b/dbms/Functions/toStartOfHour.cpp similarity index 100% rename from dbms/src/Functions/toStartOfHour.cpp rename to dbms/Functions/toStartOfHour.cpp diff --git a/dbms/src/Functions/toStartOfISOYear.cpp b/dbms/Functions/toStartOfISOYear.cpp similarity index 100% rename from dbms/src/Functions/toStartOfISOYear.cpp rename to dbms/Functions/toStartOfISOYear.cpp diff --git a/dbms/src/Functions/toStartOfInterval.cpp b/dbms/Functions/toStartOfInterval.cpp similarity index 100% rename from dbms/src/Functions/toStartOfInterval.cpp rename to dbms/Functions/toStartOfInterval.cpp diff --git a/dbms/src/Functions/toStartOfMinute.cpp b/dbms/Functions/toStartOfMinute.cpp similarity index 100% rename from dbms/src/Functions/toStartOfMinute.cpp rename to dbms/Functions/toStartOfMinute.cpp diff --git a/dbms/src/Functions/toStartOfMonth.cpp b/dbms/Functions/toStartOfMonth.cpp similarity index 100% rename from dbms/src/Functions/toStartOfMonth.cpp rename to dbms/Functions/toStartOfMonth.cpp diff --git a/dbms/src/Functions/toStartOfQuarter.cpp b/dbms/Functions/toStartOfQuarter.cpp similarity index 100% rename from dbms/src/Functions/toStartOfQuarter.cpp rename to dbms/Functions/toStartOfQuarter.cpp diff --git a/dbms/src/Functions/toStartOfTenMinutes.cpp b/dbms/Functions/toStartOfTenMinutes.cpp similarity index 100% rename from dbms/src/Functions/toStartOfTenMinutes.cpp rename to dbms/Functions/toStartOfTenMinutes.cpp diff --git a/dbms/src/Functions/toStartOfYear.cpp b/dbms/Functions/toStartOfYear.cpp similarity index 100% rename from dbms/src/Functions/toStartOfYear.cpp rename to dbms/Functions/toStartOfYear.cpp diff --git a/dbms/src/Functions/toTime.cpp b/dbms/Functions/toTime.cpp similarity index 100% rename from dbms/src/Functions/toTime.cpp rename to dbms/Functions/toTime.cpp diff --git a/dbms/src/Functions/toTimeZone.cpp b/dbms/Functions/toTimeZone.cpp similarity index 100% rename from dbms/src/Functions/toTimeZone.cpp rename to dbms/Functions/toTimeZone.cpp diff --git a/dbms/src/Functions/toTypeName.cpp b/dbms/Functions/toTypeName.cpp similarity index 100% rename from dbms/src/Functions/toTypeName.cpp rename to dbms/Functions/toTypeName.cpp diff --git a/dbms/src/Functions/toValidUTF8.cpp b/dbms/Functions/toValidUTF8.cpp similarity index 100% rename from dbms/src/Functions/toValidUTF8.cpp rename to dbms/Functions/toValidUTF8.cpp diff --git a/dbms/src/Functions/toYYYYMM.cpp b/dbms/Functions/toYYYYMM.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMM.cpp rename to dbms/Functions/toYYYYMM.cpp diff --git a/dbms/src/Functions/toYYYYMMDD.cpp b/dbms/Functions/toYYYYMMDD.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMMDD.cpp rename to dbms/Functions/toYYYYMMDD.cpp diff --git a/dbms/src/Functions/toYYYYMMDDhhmmss.cpp b/dbms/Functions/toYYYYMMDDhhmmss.cpp similarity index 100% rename from dbms/src/Functions/toYYYYMMDDhhmmss.cpp rename to dbms/Functions/toYYYYMMDDhhmmss.cpp diff --git a/dbms/src/Functions/toYear.cpp b/dbms/Functions/toYear.cpp similarity index 100% rename from dbms/src/Functions/toYear.cpp rename to dbms/Functions/toYear.cpp diff --git a/dbms/src/Functions/today.cpp b/dbms/Functions/today.cpp similarity index 100% rename from dbms/src/Functions/today.cpp rename to dbms/Functions/today.cpp diff --git a/dbms/src/Functions/transform.cpp b/dbms/Functions/transform.cpp similarity index 100% rename from dbms/src/Functions/transform.cpp rename to dbms/Functions/transform.cpp diff --git a/dbms/src/Functions/trap.cpp b/dbms/Functions/trap.cpp similarity index 100% rename from dbms/src/Functions/trap.cpp rename to dbms/Functions/trap.cpp diff --git a/dbms/src/Functions/trim.cpp b/dbms/Functions/trim.cpp similarity index 100% rename from dbms/src/Functions/trim.cpp rename to dbms/Functions/trim.cpp diff --git a/dbms/src/Functions/tryBase64Decode.cpp b/dbms/Functions/tryBase64Decode.cpp similarity index 100% rename from dbms/src/Functions/tryBase64Decode.cpp rename to dbms/Functions/tryBase64Decode.cpp diff --git a/dbms/src/Functions/tuple.cpp b/dbms/Functions/tuple.cpp similarity index 100% rename from dbms/src/Functions/tuple.cpp rename to dbms/Functions/tuple.cpp diff --git a/dbms/src/Functions/tupleElement.cpp b/dbms/Functions/tupleElement.cpp similarity index 100% rename from dbms/src/Functions/tupleElement.cpp rename to dbms/Functions/tupleElement.cpp diff --git a/dbms/src/Functions/upper.cpp b/dbms/Functions/upper.cpp similarity index 100% rename from dbms/src/Functions/upper.cpp rename to dbms/Functions/upper.cpp diff --git a/dbms/src/Functions/upperUTF8.cpp b/dbms/Functions/upperUTF8.cpp similarity index 100% rename from dbms/src/Functions/upperUTF8.cpp rename to dbms/Functions/upperUTF8.cpp diff --git a/dbms/src/Functions/uptime.cpp b/dbms/Functions/uptime.cpp similarity index 100% rename from dbms/src/Functions/uptime.cpp rename to dbms/Functions/uptime.cpp diff --git a/dbms/src/Functions/version.cpp b/dbms/Functions/version.cpp similarity index 100% rename from dbms/src/Functions/version.cpp rename to dbms/Functions/version.cpp diff --git a/dbms/src/Functions/visibleWidth.cpp b/dbms/Functions/visibleWidth.cpp similarity index 100% rename from dbms/src/Functions/visibleWidth.cpp rename to dbms/Functions/visibleWidth.cpp diff --git a/dbms/src/Functions/visitParamExtractBool.cpp b/dbms/Functions/visitParamExtractBool.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractBool.cpp rename to dbms/Functions/visitParamExtractBool.cpp diff --git a/dbms/src/Functions/visitParamExtractFloat.cpp b/dbms/Functions/visitParamExtractFloat.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractFloat.cpp rename to dbms/Functions/visitParamExtractFloat.cpp diff --git a/dbms/src/Functions/visitParamExtractInt.cpp b/dbms/Functions/visitParamExtractInt.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractInt.cpp rename to dbms/Functions/visitParamExtractInt.cpp diff --git a/dbms/src/Functions/visitParamExtractRaw.cpp b/dbms/Functions/visitParamExtractRaw.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractRaw.cpp rename to dbms/Functions/visitParamExtractRaw.cpp diff --git a/dbms/src/Functions/visitParamExtractString.cpp b/dbms/Functions/visitParamExtractString.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractString.cpp rename to dbms/Functions/visitParamExtractString.cpp diff --git a/dbms/src/Functions/visitParamExtractUInt.cpp b/dbms/Functions/visitParamExtractUInt.cpp similarity index 100% rename from dbms/src/Functions/visitParamExtractUInt.cpp rename to dbms/Functions/visitParamExtractUInt.cpp diff --git a/dbms/src/Functions/visitParamHas.cpp b/dbms/Functions/visitParamHas.cpp similarity index 100% rename from dbms/src/Functions/visitParamHas.cpp rename to dbms/Functions/visitParamHas.cpp diff --git a/dbms/src/Functions/yandexConsistentHash.cpp b/dbms/Functions/yandexConsistentHash.cpp similarity index 100% rename from dbms/src/Functions/yandexConsistentHash.cpp rename to dbms/Functions/yandexConsistentHash.cpp diff --git a/dbms/src/Functions/yesterday.cpp b/dbms/Functions/yesterday.cpp similarity index 100% rename from dbms/src/Functions/yesterday.cpp rename to dbms/Functions/yesterday.cpp diff --git a/dbms/src/IO/AIO.cpp b/dbms/IO/AIO.cpp similarity index 100% rename from dbms/src/IO/AIO.cpp rename to dbms/IO/AIO.cpp diff --git a/dbms/src/IO/AIO.h b/dbms/IO/AIO.h similarity index 100% rename from dbms/src/IO/AIO.h rename to dbms/IO/AIO.h diff --git a/dbms/src/IO/AIOContextPool.cpp b/dbms/IO/AIOContextPool.cpp similarity index 100% rename from dbms/src/IO/AIOContextPool.cpp rename to dbms/IO/AIOContextPool.cpp diff --git a/dbms/src/IO/AIOContextPool.h b/dbms/IO/AIOContextPool.h similarity index 100% rename from dbms/src/IO/AIOContextPool.h rename to dbms/IO/AIOContextPool.h diff --git a/dbms/src/IO/AsynchronousWriteBuffer.h b/dbms/IO/AsynchronousWriteBuffer.h similarity index 100% rename from dbms/src/IO/AsynchronousWriteBuffer.h rename to dbms/IO/AsynchronousWriteBuffer.h diff --git a/dbms/src/IO/BitHelpers.h b/dbms/IO/BitHelpers.h similarity index 100% rename from dbms/src/IO/BitHelpers.h rename to dbms/IO/BitHelpers.h diff --git a/dbms/src/IO/BrotliReadBuffer.cpp b/dbms/IO/BrotliReadBuffer.cpp similarity index 100% rename from dbms/src/IO/BrotliReadBuffer.cpp rename to dbms/IO/BrotliReadBuffer.cpp diff --git a/dbms/src/IO/BrotliReadBuffer.h b/dbms/IO/BrotliReadBuffer.h similarity index 100% rename from dbms/src/IO/BrotliReadBuffer.h rename to dbms/IO/BrotliReadBuffer.h diff --git a/dbms/src/IO/BrotliWriteBuffer.cpp b/dbms/IO/BrotliWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/BrotliWriteBuffer.cpp rename to dbms/IO/BrotliWriteBuffer.cpp diff --git a/dbms/src/IO/BrotliWriteBuffer.h b/dbms/IO/BrotliWriteBuffer.h similarity index 100% rename from dbms/src/IO/BrotliWriteBuffer.h rename to dbms/IO/BrotliWriteBuffer.h diff --git a/dbms/src/IO/BufferBase.h b/dbms/IO/BufferBase.h similarity index 100% rename from dbms/src/IO/BufferBase.h rename to dbms/IO/BufferBase.h diff --git a/dbms/src/IO/BufferWithOwnMemory.h b/dbms/IO/BufferWithOwnMemory.h similarity index 100% rename from dbms/src/IO/BufferWithOwnMemory.h rename to dbms/IO/BufferWithOwnMemory.h diff --git a/dbms/src/IO/CMakeLists.txt b/dbms/IO/CMakeLists.txt similarity index 100% rename from dbms/src/IO/CMakeLists.txt rename to dbms/IO/CMakeLists.txt diff --git a/dbms/src/IO/CascadeWriteBuffer.cpp b/dbms/IO/CascadeWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/CascadeWriteBuffer.cpp rename to dbms/IO/CascadeWriteBuffer.cpp diff --git a/dbms/src/IO/CascadeWriteBuffer.h b/dbms/IO/CascadeWriteBuffer.h similarity index 100% rename from dbms/src/IO/CascadeWriteBuffer.h rename to dbms/IO/CascadeWriteBuffer.h diff --git a/dbms/src/IO/CompressionMethod.cpp b/dbms/IO/CompressionMethod.cpp similarity index 100% rename from dbms/src/IO/CompressionMethod.cpp rename to dbms/IO/CompressionMethod.cpp diff --git a/dbms/src/IO/CompressionMethod.h b/dbms/IO/CompressionMethod.h similarity index 100% rename from dbms/src/IO/CompressionMethod.h rename to dbms/IO/CompressionMethod.h diff --git a/dbms/src/IO/ConcatReadBuffer.h b/dbms/IO/ConcatReadBuffer.h similarity index 100% rename from dbms/src/IO/ConcatReadBuffer.h rename to dbms/IO/ConcatReadBuffer.h diff --git a/dbms/src/IO/ConnectionTimeouts.h b/dbms/IO/ConnectionTimeouts.h similarity index 100% rename from dbms/src/IO/ConnectionTimeouts.h rename to dbms/IO/ConnectionTimeouts.h diff --git a/dbms/src/IO/DoubleConverter.cpp b/dbms/IO/DoubleConverter.cpp similarity index 100% rename from dbms/src/IO/DoubleConverter.cpp rename to dbms/IO/DoubleConverter.cpp diff --git a/dbms/src/IO/DoubleConverter.h b/dbms/IO/DoubleConverter.h similarity index 100% rename from dbms/src/IO/DoubleConverter.h rename to dbms/IO/DoubleConverter.h diff --git a/dbms/src/IO/HDFSCommon.cpp b/dbms/IO/HDFSCommon.cpp similarity index 100% rename from dbms/src/IO/HDFSCommon.cpp rename to dbms/IO/HDFSCommon.cpp diff --git a/dbms/src/IO/HDFSCommon.h b/dbms/IO/HDFSCommon.h similarity index 100% rename from dbms/src/IO/HDFSCommon.h rename to dbms/IO/HDFSCommon.h diff --git a/dbms/src/IO/HTTPCommon.cpp b/dbms/IO/HTTPCommon.cpp similarity index 100% rename from dbms/src/IO/HTTPCommon.cpp rename to dbms/IO/HTTPCommon.cpp diff --git a/dbms/src/IO/HTTPCommon.h b/dbms/IO/HTTPCommon.h similarity index 100% rename from dbms/src/IO/HTTPCommon.h rename to dbms/IO/HTTPCommon.h diff --git a/dbms/src/IO/HashingReadBuffer.h b/dbms/IO/HashingReadBuffer.h similarity index 100% rename from dbms/src/IO/HashingReadBuffer.h rename to dbms/IO/HashingReadBuffer.h diff --git a/dbms/src/IO/HashingWriteBuffer.cpp b/dbms/IO/HashingWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/HashingWriteBuffer.cpp rename to dbms/IO/HashingWriteBuffer.cpp diff --git a/dbms/src/IO/HashingWriteBuffer.h b/dbms/IO/HashingWriteBuffer.h similarity index 100% rename from dbms/src/IO/HashingWriteBuffer.h rename to dbms/IO/HashingWriteBuffer.h diff --git a/dbms/src/IO/HexWriteBuffer.cpp b/dbms/IO/HexWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/HexWriteBuffer.cpp rename to dbms/IO/HexWriteBuffer.cpp diff --git a/dbms/src/IO/HexWriteBuffer.h b/dbms/IO/HexWriteBuffer.h similarity index 100% rename from dbms/src/IO/HexWriteBuffer.h rename to dbms/IO/HexWriteBuffer.h diff --git a/dbms/src/IO/IReadableWriteBuffer.h b/dbms/IO/IReadableWriteBuffer.h similarity index 100% rename from dbms/src/IO/IReadableWriteBuffer.h rename to dbms/IO/IReadableWriteBuffer.h diff --git a/dbms/src/IO/LimitReadBuffer.cpp b/dbms/IO/LimitReadBuffer.cpp similarity index 100% rename from dbms/src/IO/LimitReadBuffer.cpp rename to dbms/IO/LimitReadBuffer.cpp diff --git a/dbms/src/IO/LimitReadBuffer.h b/dbms/IO/LimitReadBuffer.h similarity index 100% rename from dbms/src/IO/LimitReadBuffer.h rename to dbms/IO/LimitReadBuffer.h diff --git a/dbms/src/IO/MMapReadBufferFromFile.cpp b/dbms/IO/MMapReadBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFile.cpp rename to dbms/IO/MMapReadBufferFromFile.cpp diff --git a/dbms/src/IO/MMapReadBufferFromFile.h b/dbms/IO/MMapReadBufferFromFile.h similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFile.h rename to dbms/IO/MMapReadBufferFromFile.h diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp b/dbms/IO/MMapReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp rename to dbms/IO/MMapReadBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h b/dbms/IO/MMapReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/MMapReadBufferFromFileDescriptor.h rename to dbms/IO/MMapReadBufferFromFileDescriptor.h diff --git a/dbms/src/IO/MemoryReadWriteBuffer.cpp b/dbms/IO/MemoryReadWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/MemoryReadWriteBuffer.cpp rename to dbms/IO/MemoryReadWriteBuffer.cpp diff --git a/dbms/src/IO/MemoryReadWriteBuffer.h b/dbms/IO/MemoryReadWriteBuffer.h similarity index 100% rename from dbms/src/IO/MemoryReadWriteBuffer.h rename to dbms/IO/MemoryReadWriteBuffer.h diff --git a/dbms/src/IO/NullWriteBuffer.cpp b/dbms/IO/NullWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/NullWriteBuffer.cpp rename to dbms/IO/NullWriteBuffer.cpp diff --git a/dbms/src/IO/NullWriteBuffer.h b/dbms/IO/NullWriteBuffer.h similarity index 100% rename from dbms/src/IO/NullWriteBuffer.h rename to dbms/IO/NullWriteBuffer.h diff --git a/dbms/src/IO/Operators.h b/dbms/IO/Operators.h similarity index 100% rename from dbms/src/IO/Operators.h rename to dbms/IO/Operators.h diff --git a/dbms/src/IO/PeekableReadBuffer.cpp b/dbms/IO/PeekableReadBuffer.cpp similarity index 100% rename from dbms/src/IO/PeekableReadBuffer.cpp rename to dbms/IO/PeekableReadBuffer.cpp diff --git a/dbms/src/IO/PeekableReadBuffer.h b/dbms/IO/PeekableReadBuffer.h similarity index 100% rename from dbms/src/IO/PeekableReadBuffer.h rename to dbms/IO/PeekableReadBuffer.h diff --git a/dbms/src/IO/Progress.cpp b/dbms/IO/Progress.cpp similarity index 100% rename from dbms/src/IO/Progress.cpp rename to dbms/IO/Progress.cpp diff --git a/dbms/src/IO/Progress.h b/dbms/IO/Progress.h similarity index 100% rename from dbms/src/IO/Progress.h rename to dbms/IO/Progress.h diff --git a/dbms/src/IO/ReadBuffer.h b/dbms/IO/ReadBuffer.h similarity index 100% rename from dbms/src/IO/ReadBuffer.h rename to dbms/IO/ReadBuffer.h diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/IO/ReadBufferAIO.cpp similarity index 100% rename from dbms/src/IO/ReadBufferAIO.cpp rename to dbms/IO/ReadBufferAIO.cpp diff --git a/dbms/src/IO/ReadBufferAIO.h b/dbms/IO/ReadBufferAIO.h similarity index 100% rename from dbms/src/IO/ReadBufferAIO.h rename to dbms/IO/ReadBufferAIO.h diff --git a/dbms/src/IO/ReadBufferFromFile.cpp b/dbms/IO/ReadBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFile.cpp rename to dbms/IO/ReadBufferFromFile.cpp diff --git a/dbms/src/IO/ReadBufferFromFile.h b/dbms/IO/ReadBufferFromFile.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFile.h rename to dbms/IO/ReadBufferFromFile.h diff --git a/dbms/src/IO/ReadBufferFromFileBase.cpp b/dbms/IO/ReadBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFileBase.cpp rename to dbms/IO/ReadBufferFromFileBase.cpp diff --git a/dbms/src/IO/ReadBufferFromFileBase.h b/dbms/IO/ReadBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFileBase.h rename to dbms/IO/ReadBufferFromFileBase.h diff --git a/dbms/src/IO/ReadBufferFromFileDescriptor.cpp b/dbms/IO/ReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromFileDescriptor.cpp rename to dbms/IO/ReadBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/ReadBufferFromFileDescriptor.h b/dbms/IO/ReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/ReadBufferFromFileDescriptor.h rename to dbms/IO/ReadBufferFromFileDescriptor.h diff --git a/dbms/src/IO/ReadBufferFromHDFS.cpp b/dbms/IO/ReadBufferFromHDFS.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromHDFS.cpp rename to dbms/IO/ReadBufferFromHDFS.cpp diff --git a/dbms/src/IO/ReadBufferFromHDFS.h b/dbms/IO/ReadBufferFromHDFS.h similarity index 100% rename from dbms/src/IO/ReadBufferFromHDFS.h rename to dbms/IO/ReadBufferFromHDFS.h diff --git a/dbms/src/IO/ReadBufferFromIStream.cpp b/dbms/IO/ReadBufferFromIStream.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromIStream.cpp rename to dbms/IO/ReadBufferFromIStream.cpp diff --git a/dbms/src/IO/ReadBufferFromIStream.h b/dbms/IO/ReadBufferFromIStream.h similarity index 100% rename from dbms/src/IO/ReadBufferFromIStream.h rename to dbms/IO/ReadBufferFromIStream.h diff --git a/dbms/src/IO/ReadBufferFromMemory.cpp b/dbms/IO/ReadBufferFromMemory.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromMemory.cpp rename to dbms/IO/ReadBufferFromMemory.cpp diff --git a/dbms/src/IO/ReadBufferFromMemory.h b/dbms/IO/ReadBufferFromMemory.h similarity index 100% rename from dbms/src/IO/ReadBufferFromMemory.h rename to dbms/IO/ReadBufferFromMemory.h diff --git a/dbms/src/IO/ReadBufferFromPocoSocket.cpp b/dbms/IO/ReadBufferFromPocoSocket.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromPocoSocket.cpp rename to dbms/IO/ReadBufferFromPocoSocket.cpp diff --git a/dbms/src/IO/ReadBufferFromPocoSocket.h b/dbms/IO/ReadBufferFromPocoSocket.h similarity index 100% rename from dbms/src/IO/ReadBufferFromPocoSocket.h rename to dbms/IO/ReadBufferFromPocoSocket.h diff --git a/dbms/src/IO/ReadBufferFromS3.cpp b/dbms/IO/ReadBufferFromS3.cpp similarity index 100% rename from dbms/src/IO/ReadBufferFromS3.cpp rename to dbms/IO/ReadBufferFromS3.cpp diff --git a/dbms/src/IO/ReadBufferFromS3.h b/dbms/IO/ReadBufferFromS3.h similarity index 100% rename from dbms/src/IO/ReadBufferFromS3.h rename to dbms/IO/ReadBufferFromS3.h diff --git a/dbms/src/IO/ReadBufferFromString.h b/dbms/IO/ReadBufferFromString.h similarity index 100% rename from dbms/src/IO/ReadBufferFromString.h rename to dbms/IO/ReadBufferFromString.h diff --git a/dbms/src/IO/ReadHelpers.cpp b/dbms/IO/ReadHelpers.cpp similarity index 100% rename from dbms/src/IO/ReadHelpers.cpp rename to dbms/IO/ReadHelpers.cpp diff --git a/dbms/src/IO/ReadHelpers.h b/dbms/IO/ReadHelpers.h similarity index 100% rename from dbms/src/IO/ReadHelpers.h rename to dbms/IO/ReadHelpers.h diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp b/dbms/IO/ReadWriteBufferFromHTTP.cpp similarity index 100% rename from dbms/src/IO/ReadWriteBufferFromHTTP.cpp rename to dbms/IO/ReadWriteBufferFromHTTP.cpp diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.h b/dbms/IO/ReadWriteBufferFromHTTP.h similarity index 100% rename from dbms/src/IO/ReadWriteBufferFromHTTP.h rename to dbms/IO/ReadWriteBufferFromHTTP.h diff --git a/dbms/src/IO/S3Common.cpp b/dbms/IO/S3Common.cpp similarity index 100% rename from dbms/src/IO/S3Common.cpp rename to dbms/IO/S3Common.cpp diff --git a/dbms/src/IO/S3Common.h b/dbms/IO/S3Common.h similarity index 100% rename from dbms/src/IO/S3Common.h rename to dbms/IO/S3Common.h diff --git a/dbms/src/IO/SeekableReadBuffer.h b/dbms/IO/SeekableReadBuffer.h similarity index 100% rename from dbms/src/IO/SeekableReadBuffer.h rename to dbms/IO/SeekableReadBuffer.h diff --git a/dbms/src/IO/UncompressedCache.h b/dbms/IO/UncompressedCache.h similarity index 100% rename from dbms/src/IO/UncompressedCache.h rename to dbms/IO/UncompressedCache.h diff --git a/dbms/src/IO/UseSSL.cpp b/dbms/IO/UseSSL.cpp similarity index 100% rename from dbms/src/IO/UseSSL.cpp rename to dbms/IO/UseSSL.cpp diff --git a/dbms/src/IO/UseSSL.h b/dbms/IO/UseSSL.h similarity index 100% rename from dbms/src/IO/UseSSL.h rename to dbms/IO/UseSSL.h diff --git a/dbms/src/IO/VarInt.h b/dbms/IO/VarInt.h similarity index 100% rename from dbms/src/IO/VarInt.h rename to dbms/IO/VarInt.h diff --git a/dbms/src/IO/WriteBuffer.h b/dbms/IO/WriteBuffer.h similarity index 100% rename from dbms/src/IO/WriteBuffer.h rename to dbms/IO/WriteBuffer.h diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/IO/WriteBufferAIO.cpp similarity index 100% rename from dbms/src/IO/WriteBufferAIO.cpp rename to dbms/IO/WriteBufferAIO.cpp diff --git a/dbms/src/IO/WriteBufferAIO.h b/dbms/IO/WriteBufferAIO.h similarity index 100% rename from dbms/src/IO/WriteBufferAIO.h rename to dbms/IO/WriteBufferAIO.h diff --git a/dbms/src/IO/WriteBufferFromArena.h b/dbms/IO/WriteBufferFromArena.h similarity index 100% rename from dbms/src/IO/WriteBufferFromArena.h rename to dbms/IO/WriteBufferFromArena.h diff --git a/dbms/src/IO/WriteBufferFromFile.cpp b/dbms/IO/WriteBufferFromFile.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFile.cpp rename to dbms/IO/WriteBufferFromFile.cpp diff --git a/dbms/src/IO/WriteBufferFromFile.h b/dbms/IO/WriteBufferFromFile.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFile.h rename to dbms/IO/WriteBufferFromFile.h diff --git a/dbms/src/IO/WriteBufferFromFileBase.cpp b/dbms/IO/WriteBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileBase.cpp rename to dbms/IO/WriteBufferFromFileBase.cpp diff --git a/dbms/src/IO/WriteBufferFromFileBase.h b/dbms/IO/WriteBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileBase.h rename to dbms/IO/WriteBufferFromFileBase.h diff --git a/dbms/src/IO/WriteBufferFromFileDescriptor.cpp b/dbms/IO/WriteBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptor.cpp rename to dbms/IO/WriteBufferFromFileDescriptor.cpp diff --git a/dbms/src/IO/WriteBufferFromFileDescriptor.h b/dbms/IO/WriteBufferFromFileDescriptor.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptor.h rename to dbms/IO/WriteBufferFromFileDescriptor.h diff --git a/dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp b/dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp rename to dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp diff --git a/dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h b/dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h similarity index 100% rename from dbms/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h rename to dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h diff --git a/dbms/src/IO/WriteBufferFromHDFS.cpp b/dbms/IO/WriteBufferFromHDFS.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHDFS.cpp rename to dbms/IO/WriteBufferFromHDFS.cpp diff --git a/dbms/src/IO/WriteBufferFromHDFS.h b/dbms/IO/WriteBufferFromHDFS.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHDFS.h rename to dbms/IO/WriteBufferFromHDFS.h diff --git a/dbms/src/IO/WriteBufferFromHTTP.cpp b/dbms/IO/WriteBufferFromHTTP.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTP.cpp rename to dbms/IO/WriteBufferFromHTTP.cpp diff --git a/dbms/src/IO/WriteBufferFromHTTP.h b/dbms/IO/WriteBufferFromHTTP.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTP.h rename to dbms/IO/WriteBufferFromHTTP.h diff --git a/dbms/src/IO/WriteBufferFromHTTPServerResponse.cpp b/dbms/IO/WriteBufferFromHTTPServerResponse.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTPServerResponse.cpp rename to dbms/IO/WriteBufferFromHTTPServerResponse.cpp diff --git a/dbms/src/IO/WriteBufferFromHTTPServerResponse.h b/dbms/IO/WriteBufferFromHTTPServerResponse.h similarity index 100% rename from dbms/src/IO/WriteBufferFromHTTPServerResponse.h rename to dbms/IO/WriteBufferFromHTTPServerResponse.h diff --git a/dbms/src/IO/WriteBufferFromOStream.cpp b/dbms/IO/WriteBufferFromOStream.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromOStream.cpp rename to dbms/IO/WriteBufferFromOStream.cpp diff --git a/dbms/src/IO/WriteBufferFromOStream.h b/dbms/IO/WriteBufferFromOStream.h similarity index 100% rename from dbms/src/IO/WriteBufferFromOStream.h rename to dbms/IO/WriteBufferFromOStream.h diff --git a/dbms/src/IO/WriteBufferFromPocoSocket.cpp b/dbms/IO/WriteBufferFromPocoSocket.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromPocoSocket.cpp rename to dbms/IO/WriteBufferFromPocoSocket.cpp diff --git a/dbms/src/IO/WriteBufferFromPocoSocket.h b/dbms/IO/WriteBufferFromPocoSocket.h similarity index 100% rename from dbms/src/IO/WriteBufferFromPocoSocket.h rename to dbms/IO/WriteBufferFromPocoSocket.h diff --git a/dbms/src/IO/WriteBufferFromS3.cpp b/dbms/IO/WriteBufferFromS3.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromS3.cpp rename to dbms/IO/WriteBufferFromS3.cpp diff --git a/dbms/src/IO/WriteBufferFromS3.h b/dbms/IO/WriteBufferFromS3.h similarity index 100% rename from dbms/src/IO/WriteBufferFromS3.h rename to dbms/IO/WriteBufferFromS3.h diff --git a/dbms/src/IO/WriteBufferFromString.h b/dbms/IO/WriteBufferFromString.h similarity index 100% rename from dbms/src/IO/WriteBufferFromString.h rename to dbms/IO/WriteBufferFromString.h diff --git a/dbms/src/IO/WriteBufferFromTemporaryFile.cpp b/dbms/IO/WriteBufferFromTemporaryFile.cpp similarity index 100% rename from dbms/src/IO/WriteBufferFromTemporaryFile.cpp rename to dbms/IO/WriteBufferFromTemporaryFile.cpp diff --git a/dbms/src/IO/WriteBufferFromTemporaryFile.h b/dbms/IO/WriteBufferFromTemporaryFile.h similarity index 100% rename from dbms/src/IO/WriteBufferFromTemporaryFile.h rename to dbms/IO/WriteBufferFromTemporaryFile.h diff --git a/dbms/src/IO/WriteBufferFromVector.h b/dbms/IO/WriteBufferFromVector.h similarity index 100% rename from dbms/src/IO/WriteBufferFromVector.h rename to dbms/IO/WriteBufferFromVector.h diff --git a/dbms/src/IO/WriteBufferValidUTF8.cpp b/dbms/IO/WriteBufferValidUTF8.cpp similarity index 100% rename from dbms/src/IO/WriteBufferValidUTF8.cpp rename to dbms/IO/WriteBufferValidUTF8.cpp diff --git a/dbms/src/IO/WriteBufferValidUTF8.h b/dbms/IO/WriteBufferValidUTF8.h similarity index 100% rename from dbms/src/IO/WriteBufferValidUTF8.h rename to dbms/IO/WriteBufferValidUTF8.h diff --git a/dbms/src/IO/WriteHelpers.cpp b/dbms/IO/WriteHelpers.cpp similarity index 100% rename from dbms/src/IO/WriteHelpers.cpp rename to dbms/IO/WriteHelpers.cpp diff --git a/dbms/src/IO/WriteHelpers.h b/dbms/IO/WriteHelpers.h similarity index 100% rename from dbms/src/IO/WriteHelpers.h rename to dbms/IO/WriteHelpers.h diff --git a/dbms/src/IO/WriteIntText.h b/dbms/IO/WriteIntText.h similarity index 100% rename from dbms/src/IO/WriteIntText.h rename to dbms/IO/WriteIntText.h diff --git a/dbms/src/IO/ZlibDeflatingWriteBuffer.cpp b/dbms/IO/ZlibDeflatingWriteBuffer.cpp similarity index 100% rename from dbms/src/IO/ZlibDeflatingWriteBuffer.cpp rename to dbms/IO/ZlibDeflatingWriteBuffer.cpp diff --git a/dbms/src/IO/ZlibDeflatingWriteBuffer.h b/dbms/IO/ZlibDeflatingWriteBuffer.h similarity index 100% rename from dbms/src/IO/ZlibDeflatingWriteBuffer.h rename to dbms/IO/ZlibDeflatingWriteBuffer.h diff --git a/dbms/src/IO/ZlibInflatingReadBuffer.cpp b/dbms/IO/ZlibInflatingReadBuffer.cpp similarity index 100% rename from dbms/src/IO/ZlibInflatingReadBuffer.cpp rename to dbms/IO/ZlibInflatingReadBuffer.cpp diff --git a/dbms/src/IO/ZlibInflatingReadBuffer.h b/dbms/IO/ZlibInflatingReadBuffer.h similarity index 100% rename from dbms/src/IO/ZlibInflatingReadBuffer.h rename to dbms/IO/ZlibInflatingReadBuffer.h diff --git a/dbms/src/IO/copyData.cpp b/dbms/IO/copyData.cpp similarity index 100% rename from dbms/src/IO/copyData.cpp rename to dbms/IO/copyData.cpp diff --git a/dbms/src/IO/copyData.h b/dbms/IO/copyData.h similarity index 100% rename from dbms/src/IO/copyData.h rename to dbms/IO/copyData.h diff --git a/dbms/src/IO/createReadBufferFromFileBase.cpp b/dbms/IO/createReadBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/createReadBufferFromFileBase.cpp rename to dbms/IO/createReadBufferFromFileBase.cpp diff --git a/dbms/src/IO/createReadBufferFromFileBase.h b/dbms/IO/createReadBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/createReadBufferFromFileBase.h rename to dbms/IO/createReadBufferFromFileBase.h diff --git a/dbms/src/IO/createWriteBufferFromFileBase.cpp b/dbms/IO/createWriteBufferFromFileBase.cpp similarity index 100% rename from dbms/src/IO/createWriteBufferFromFileBase.cpp rename to dbms/IO/createWriteBufferFromFileBase.cpp diff --git a/dbms/src/IO/createWriteBufferFromFileBase.h b/dbms/IO/createWriteBufferFromFileBase.h similarity index 100% rename from dbms/src/IO/createWriteBufferFromFileBase.h rename to dbms/IO/createWriteBufferFromFileBase.h diff --git a/dbms/src/IO/parseDateTimeBestEffort.cpp b/dbms/IO/parseDateTimeBestEffort.cpp similarity index 100% rename from dbms/src/IO/parseDateTimeBestEffort.cpp rename to dbms/IO/parseDateTimeBestEffort.cpp diff --git a/dbms/src/IO/parseDateTimeBestEffort.h b/dbms/IO/parseDateTimeBestEffort.h similarity index 100% rename from dbms/src/IO/parseDateTimeBestEffort.h rename to dbms/IO/parseDateTimeBestEffort.h diff --git a/dbms/src/IO/readDecimalText.h b/dbms/IO/readDecimalText.h similarity index 100% rename from dbms/src/IO/readDecimalText.h rename to dbms/IO/readDecimalText.h diff --git a/dbms/src/IO/readFloatText.cpp b/dbms/IO/readFloatText.cpp similarity index 100% rename from dbms/src/IO/readFloatText.cpp rename to dbms/IO/readFloatText.cpp diff --git a/dbms/src/IO/readFloatText.h b/dbms/IO/readFloatText.h similarity index 100% rename from dbms/src/IO/readFloatText.h rename to dbms/IO/readFloatText.h diff --git a/dbms/src/IO/tests/CMakeLists.txt b/dbms/IO/tests/CMakeLists.txt similarity index 100% rename from dbms/src/IO/tests/CMakeLists.txt rename to dbms/IO/tests/CMakeLists.txt diff --git a/dbms/src/IO/tests/async_write.cpp b/dbms/IO/tests/async_write.cpp similarity index 100% rename from dbms/src/IO/tests/async_write.cpp rename to dbms/IO/tests/async_write.cpp diff --git a/dbms/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp b/dbms/IO/tests/gtest_DateTime64_parsing_and_writing.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp rename to dbms/IO/tests/gtest_DateTime64_parsing_and_writing.cpp diff --git a/dbms/src/IO/tests/gtest_DateTimeToString.cpp b/dbms/IO/tests/gtest_DateTimeToString.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_DateTimeToString.cpp rename to dbms/IO/tests/gtest_DateTimeToString.cpp diff --git a/dbms/src/IO/tests/gtest_aio_seek_back_after_eof.cpp b/dbms/IO/tests/gtest_aio_seek_back_after_eof.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_aio_seek_back_after_eof.cpp rename to dbms/IO/tests/gtest_aio_seek_back_after_eof.cpp diff --git a/dbms/src/IO/tests/gtest_bit_io.cpp b/dbms/IO/tests/gtest_bit_io.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_bit_io.cpp rename to dbms/IO/tests/gtest_bit_io.cpp diff --git a/dbms/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp b/dbms/IO/tests/gtest_cascade_and_memory_write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp rename to dbms/IO/tests/gtest_cascade_and_memory_write_buffer.cpp diff --git a/dbms/src/IO/tests/gtest_peekable_read_buffer.cpp b/dbms/IO/tests/gtest_peekable_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_peekable_read_buffer.cpp rename to dbms/IO/tests/gtest_peekable_read_buffer.cpp diff --git a/dbms/src/IO/tests/hashing_buffer.h b/dbms/IO/tests/hashing_buffer.h similarity index 100% rename from dbms/src/IO/tests/hashing_buffer.h rename to dbms/IO/tests/hashing_buffer.h diff --git a/dbms/src/IO/tests/hashing_read_buffer.cpp b/dbms/IO/tests/hashing_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/hashing_read_buffer.cpp rename to dbms/IO/tests/hashing_read_buffer.cpp diff --git a/dbms/src/IO/tests/hashing_write_buffer.cpp b/dbms/IO/tests/hashing_write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/hashing_write_buffer.cpp rename to dbms/IO/tests/hashing_write_buffer.cpp diff --git a/dbms/src/IO/tests/io_operators.cpp b/dbms/IO/tests/io_operators.cpp similarity index 100% rename from dbms/src/IO/tests/io_operators.cpp rename to dbms/IO/tests/io_operators.cpp diff --git a/dbms/src/IO/tests/limit_read_buffer.cpp b/dbms/IO/tests/limit_read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.cpp rename to dbms/IO/tests/limit_read_buffer.cpp diff --git a/dbms/src/IO/tests/limit_read_buffer.reference b/dbms/IO/tests/limit_read_buffer.reference similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.reference rename to dbms/IO/tests/limit_read_buffer.reference diff --git a/dbms/src/IO/tests/limit_read_buffer.sh b/dbms/IO/tests/limit_read_buffer.sh similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer.sh rename to dbms/IO/tests/limit_read_buffer.sh diff --git a/dbms/src/IO/tests/limit_read_buffer2.cpp b/dbms/IO/tests/limit_read_buffer2.cpp similarity index 100% rename from dbms/src/IO/tests/limit_read_buffer2.cpp rename to dbms/IO/tests/limit_read_buffer2.cpp diff --git a/dbms/src/IO/tests/mempbrk.cpp b/dbms/IO/tests/mempbrk.cpp similarity index 100% rename from dbms/src/IO/tests/mempbrk.cpp rename to dbms/IO/tests/mempbrk.cpp diff --git a/dbms/src/IO/tests/o_direct_and_dirty_pages.cpp b/dbms/IO/tests/o_direct_and_dirty_pages.cpp similarity index 100% rename from dbms/src/IO/tests/o_direct_and_dirty_pages.cpp rename to dbms/IO/tests/o_direct_and_dirty_pages.cpp diff --git a/dbms/src/IO/tests/parse_date_time_best_effort.cpp b/dbms/IO/tests/parse_date_time_best_effort.cpp similarity index 100% rename from dbms/src/IO/tests/parse_date_time_best_effort.cpp rename to dbms/IO/tests/parse_date_time_best_effort.cpp diff --git a/dbms/src/IO/tests/parse_int_perf.cpp b/dbms/IO/tests/parse_int_perf.cpp similarity index 100% rename from dbms/src/IO/tests/parse_int_perf.cpp rename to dbms/IO/tests/parse_int_perf.cpp diff --git a/dbms/src/IO/tests/parse_int_perf2.cpp b/dbms/IO/tests/parse_int_perf2.cpp similarity index 100% rename from dbms/src/IO/tests/parse_int_perf2.cpp rename to dbms/IO/tests/parse_int_perf2.cpp diff --git a/dbms/src/IO/tests/read_buffer.cpp b/dbms/IO/tests/read_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer.cpp rename to dbms/IO/tests/read_buffer.cpp diff --git a/dbms/src/IO/tests/read_buffer_aio.cpp b/dbms/IO/tests/read_buffer_aio.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer_aio.cpp rename to dbms/IO/tests/read_buffer_aio.cpp diff --git a/dbms/src/IO/tests/read_buffer_perf.cpp b/dbms/IO/tests/read_buffer_perf.cpp similarity index 100% rename from dbms/src/IO/tests/read_buffer_perf.cpp rename to dbms/IO/tests/read_buffer_perf.cpp diff --git a/dbms/src/IO/tests/read_escaped_string.cpp b/dbms/IO/tests/read_escaped_string.cpp similarity index 100% rename from dbms/src/IO/tests/read_escaped_string.cpp rename to dbms/IO/tests/read_escaped_string.cpp diff --git a/dbms/src/IO/tests/read_float_perf.cpp b/dbms/IO/tests/read_float_perf.cpp similarity index 100% rename from dbms/src/IO/tests/read_float_perf.cpp rename to dbms/IO/tests/read_float_perf.cpp diff --git a/dbms/src/IO/tests/read_write_int.cpp b/dbms/IO/tests/read_write_int.cpp similarity index 100% rename from dbms/src/IO/tests/read_write_int.cpp rename to dbms/IO/tests/read_write_int.cpp diff --git a/dbms/src/IO/tests/ryu_test.cpp b/dbms/IO/tests/ryu_test.cpp similarity index 100% rename from dbms/src/IO/tests/ryu_test.cpp rename to dbms/IO/tests/ryu_test.cpp diff --git a/dbms/src/IO/tests/valid_utf8.cpp b/dbms/IO/tests/valid_utf8.cpp similarity index 100% rename from dbms/src/IO/tests/valid_utf8.cpp rename to dbms/IO/tests/valid_utf8.cpp diff --git a/dbms/src/IO/tests/valid_utf8_perf.cpp b/dbms/IO/tests/valid_utf8_perf.cpp similarity index 100% rename from dbms/src/IO/tests/valid_utf8_perf.cpp rename to dbms/IO/tests/valid_utf8_perf.cpp diff --git a/dbms/src/IO/tests/var_uint.cpp b/dbms/IO/tests/var_uint.cpp similarity index 100% rename from dbms/src/IO/tests/var_uint.cpp rename to dbms/IO/tests/var_uint.cpp diff --git a/dbms/src/IO/tests/write_buffer.cpp b/dbms/IO/tests/write_buffer.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer.cpp rename to dbms/IO/tests/write_buffer.cpp diff --git a/dbms/src/IO/tests/write_buffer_aio.cpp b/dbms/IO/tests/write_buffer_aio.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer_aio.cpp rename to dbms/IO/tests/write_buffer_aio.cpp diff --git a/dbms/src/IO/tests/write_buffer_perf.cpp b/dbms/IO/tests/write_buffer_perf.cpp similarity index 100% rename from dbms/src/IO/tests/write_buffer_perf.cpp rename to dbms/IO/tests/write_buffer_perf.cpp diff --git a/dbms/src/IO/tests/write_int.cpp b/dbms/IO/tests/write_int.cpp similarity index 100% rename from dbms/src/IO/tests/write_int.cpp rename to dbms/IO/tests/write_int.cpp diff --git a/dbms/src/IO/tests/zlib_buffers.cpp b/dbms/IO/tests/zlib_buffers.cpp similarity index 100% rename from dbms/src/IO/tests/zlib_buffers.cpp rename to dbms/IO/tests/zlib_buffers.cpp diff --git a/dbms/src/IO/tests/zlib_ng_bug.cpp b/dbms/IO/tests/zlib_ng_bug.cpp similarity index 100% rename from dbms/src/IO/tests/zlib_ng_bug.cpp rename to dbms/IO/tests/zlib_ng_bug.cpp diff --git a/dbms/src/Interpreters/ActionLocksManager.cpp b/dbms/Interpreters/ActionLocksManager.cpp similarity index 100% rename from dbms/src/Interpreters/ActionLocksManager.cpp rename to dbms/Interpreters/ActionLocksManager.cpp diff --git a/dbms/src/Interpreters/ActionLocksManager.h b/dbms/Interpreters/ActionLocksManager.h similarity index 100% rename from dbms/src/Interpreters/ActionLocksManager.h rename to dbms/Interpreters/ActionLocksManager.h diff --git a/dbms/src/Interpreters/ActionsVisitor.cpp b/dbms/Interpreters/ActionsVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ActionsVisitor.cpp rename to dbms/Interpreters/ActionsVisitor.cpp diff --git a/dbms/src/Interpreters/ActionsVisitor.h b/dbms/Interpreters/ActionsVisitor.h similarity index 100% rename from dbms/src/Interpreters/ActionsVisitor.h rename to dbms/Interpreters/ActionsVisitor.h diff --git a/dbms/src/Interpreters/AddDefaultDatabaseVisitor.h b/dbms/Interpreters/AddDefaultDatabaseVisitor.h similarity index 100% rename from dbms/src/Interpreters/AddDefaultDatabaseVisitor.h rename to dbms/Interpreters/AddDefaultDatabaseVisitor.h diff --git a/dbms/src/Interpreters/AggregateDescription.h b/dbms/Interpreters/AggregateDescription.h similarity index 100% rename from dbms/src/Interpreters/AggregateDescription.h rename to dbms/Interpreters/AggregateDescription.h diff --git a/dbms/src/Interpreters/AggregationCommon.h b/dbms/Interpreters/AggregationCommon.h similarity index 100% rename from dbms/src/Interpreters/AggregationCommon.h rename to dbms/Interpreters/AggregationCommon.h diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/Interpreters/Aggregator.cpp similarity index 100% rename from dbms/src/Interpreters/Aggregator.cpp rename to dbms/Interpreters/Aggregator.cpp diff --git a/dbms/src/Interpreters/Aggregator.h b/dbms/Interpreters/Aggregator.h similarity index 100% rename from dbms/src/Interpreters/Aggregator.h rename to dbms/Interpreters/Aggregator.h diff --git a/dbms/src/Interpreters/Aliases.h b/dbms/Interpreters/Aliases.h similarity index 100% rename from dbms/src/Interpreters/Aliases.h rename to dbms/Interpreters/Aliases.h diff --git a/dbms/src/Interpreters/AnalyzedJoin.cpp b/dbms/Interpreters/AnalyzedJoin.cpp similarity index 100% rename from dbms/src/Interpreters/AnalyzedJoin.cpp rename to dbms/Interpreters/AnalyzedJoin.cpp diff --git a/dbms/src/Interpreters/AnalyzedJoin.h b/dbms/Interpreters/AnalyzedJoin.h similarity index 100% rename from dbms/src/Interpreters/AnalyzedJoin.h rename to dbms/Interpreters/AnalyzedJoin.h diff --git a/dbms/src/Interpreters/ArrayJoinAction.cpp b/dbms/Interpreters/ArrayJoinAction.cpp similarity index 100% rename from dbms/src/Interpreters/ArrayJoinAction.cpp rename to dbms/Interpreters/ArrayJoinAction.cpp diff --git a/dbms/src/Interpreters/ArrayJoinAction.h b/dbms/Interpreters/ArrayJoinAction.h similarity index 100% rename from dbms/src/Interpreters/ArrayJoinAction.h rename to dbms/Interpreters/ArrayJoinAction.h diff --git a/dbms/src/Interpreters/ArrayJoinedColumnsVisitor.h b/dbms/Interpreters/ArrayJoinedColumnsVisitor.h similarity index 100% rename from dbms/src/Interpreters/ArrayJoinedColumnsVisitor.h rename to dbms/Interpreters/ArrayJoinedColumnsVisitor.h diff --git a/dbms/src/Interpreters/AsteriskSemantic.h b/dbms/Interpreters/AsteriskSemantic.h similarity index 100% rename from dbms/src/Interpreters/AsteriskSemantic.h rename to dbms/Interpreters/AsteriskSemantic.h diff --git a/dbms/src/Interpreters/AsynchronousMetrics.cpp b/dbms/Interpreters/AsynchronousMetrics.cpp similarity index 100% rename from dbms/src/Interpreters/AsynchronousMetrics.cpp rename to dbms/Interpreters/AsynchronousMetrics.cpp diff --git a/dbms/src/Interpreters/AsynchronousMetrics.h b/dbms/Interpreters/AsynchronousMetrics.h similarity index 100% rename from dbms/src/Interpreters/AsynchronousMetrics.h rename to dbms/Interpreters/AsynchronousMetrics.h diff --git a/dbms/src/Interpreters/BloomFilter.cpp b/dbms/Interpreters/BloomFilter.cpp similarity index 100% rename from dbms/src/Interpreters/BloomFilter.cpp rename to dbms/Interpreters/BloomFilter.cpp diff --git a/dbms/src/Interpreters/BloomFilter.h b/dbms/Interpreters/BloomFilter.h similarity index 100% rename from dbms/src/Interpreters/BloomFilter.h rename to dbms/Interpreters/BloomFilter.h diff --git a/dbms/src/Interpreters/BloomFilterHash.h b/dbms/Interpreters/BloomFilterHash.h similarity index 100% rename from dbms/src/Interpreters/BloomFilterHash.h rename to dbms/Interpreters/BloomFilterHash.h diff --git a/dbms/src/Interpreters/CMakeLists.txt b/dbms/Interpreters/CMakeLists.txt similarity index 100% rename from dbms/src/Interpreters/CMakeLists.txt rename to dbms/Interpreters/CMakeLists.txt diff --git a/dbms/src/Interpreters/CancellationCode.h b/dbms/Interpreters/CancellationCode.h similarity index 100% rename from dbms/src/Interpreters/CancellationCode.h rename to dbms/Interpreters/CancellationCode.h diff --git a/dbms/src/Interpreters/CatBoostModel.cpp b/dbms/Interpreters/CatBoostModel.cpp similarity index 100% rename from dbms/src/Interpreters/CatBoostModel.cpp rename to dbms/Interpreters/CatBoostModel.cpp diff --git a/dbms/src/Interpreters/CatBoostModel.h b/dbms/Interpreters/CatBoostModel.h similarity index 100% rename from dbms/src/Interpreters/CatBoostModel.h rename to dbms/Interpreters/CatBoostModel.h diff --git a/dbms/src/Interpreters/ClientInfo.cpp b/dbms/Interpreters/ClientInfo.cpp similarity index 100% rename from dbms/src/Interpreters/ClientInfo.cpp rename to dbms/Interpreters/ClientInfo.cpp diff --git a/dbms/src/Interpreters/ClientInfo.h b/dbms/Interpreters/ClientInfo.h similarity index 100% rename from dbms/src/Interpreters/ClientInfo.h rename to dbms/Interpreters/ClientInfo.h diff --git a/dbms/src/Interpreters/Cluster.cpp b/dbms/Interpreters/Cluster.cpp similarity index 100% rename from dbms/src/Interpreters/Cluster.cpp rename to dbms/Interpreters/Cluster.cpp diff --git a/dbms/src/Interpreters/Cluster.h b/dbms/Interpreters/Cluster.h similarity index 100% rename from dbms/src/Interpreters/Cluster.h rename to dbms/Interpreters/Cluster.h diff --git a/dbms/src/Interpreters/ClusterProxy/IStreamFactory.h b/dbms/Interpreters/ClusterProxy/IStreamFactory.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/IStreamFactory.h rename to dbms/Interpreters/ClusterProxy/IStreamFactory.h diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/dbms/Interpreters/ClusterProxy/SelectStreamFactory.cpp similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp rename to dbms/Interpreters/ClusterProxy/SelectStreamFactory.cpp diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/dbms/Interpreters/ClusterProxy/SelectStreamFactory.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h rename to dbms/Interpreters/ClusterProxy/SelectStreamFactory.h diff --git a/dbms/src/Interpreters/ClusterProxy/executeQuery.cpp b/dbms/Interpreters/ClusterProxy/executeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/executeQuery.cpp rename to dbms/Interpreters/ClusterProxy/executeQuery.cpp diff --git a/dbms/src/Interpreters/ClusterProxy/executeQuery.h b/dbms/Interpreters/ClusterProxy/executeQuery.h similarity index 100% rename from dbms/src/Interpreters/ClusterProxy/executeQuery.h rename to dbms/Interpreters/ClusterProxy/executeQuery.h diff --git a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/dbms/Interpreters/CollectJoinOnKeysVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/CollectJoinOnKeysVisitor.cpp rename to dbms/Interpreters/CollectJoinOnKeysVisitor.cpp diff --git a/dbms/src/Interpreters/CollectJoinOnKeysVisitor.h b/dbms/Interpreters/CollectJoinOnKeysVisitor.h similarity index 100% rename from dbms/src/Interpreters/CollectJoinOnKeysVisitor.h rename to dbms/Interpreters/CollectJoinOnKeysVisitor.h diff --git a/dbms/src/Interpreters/ColumnNamesContext.cpp b/dbms/Interpreters/ColumnNamesContext.cpp similarity index 100% rename from dbms/src/Interpreters/ColumnNamesContext.cpp rename to dbms/Interpreters/ColumnNamesContext.cpp diff --git a/dbms/src/Interpreters/ColumnNamesContext.h b/dbms/Interpreters/ColumnNamesContext.h similarity index 100% rename from dbms/src/Interpreters/ColumnNamesContext.h rename to dbms/Interpreters/ColumnNamesContext.h diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/Interpreters/Context.cpp similarity index 100% rename from dbms/src/Interpreters/Context.cpp rename to dbms/Interpreters/Context.cpp diff --git a/dbms/src/Interpreters/Context.h b/dbms/Interpreters/Context.h similarity index 100% rename from dbms/src/Interpreters/Context.h rename to dbms/Interpreters/Context.h diff --git a/dbms/src/Interpreters/CrossToInnerJoinVisitor.cpp b/dbms/Interpreters/CrossToInnerJoinVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/CrossToInnerJoinVisitor.cpp rename to dbms/Interpreters/CrossToInnerJoinVisitor.cpp diff --git a/dbms/src/Interpreters/CrossToInnerJoinVisitor.h b/dbms/Interpreters/CrossToInnerJoinVisitor.h similarity index 100% rename from dbms/src/Interpreters/CrossToInnerJoinVisitor.h rename to dbms/Interpreters/CrossToInnerJoinVisitor.h diff --git a/dbms/src/Interpreters/DDLWorker.cpp b/dbms/Interpreters/DDLWorker.cpp similarity index 100% rename from dbms/src/Interpreters/DDLWorker.cpp rename to dbms/Interpreters/DDLWorker.cpp diff --git a/dbms/src/Interpreters/DDLWorker.h b/dbms/Interpreters/DDLWorker.h similarity index 100% rename from dbms/src/Interpreters/DDLWorker.h rename to dbms/Interpreters/DDLWorker.h diff --git a/dbms/src/Interpreters/DNSCacheUpdater.cpp b/dbms/Interpreters/DNSCacheUpdater.cpp similarity index 100% rename from dbms/src/Interpreters/DNSCacheUpdater.cpp rename to dbms/Interpreters/DNSCacheUpdater.cpp diff --git a/dbms/src/Interpreters/DNSCacheUpdater.h b/dbms/Interpreters/DNSCacheUpdater.h similarity index 100% rename from dbms/src/Interpreters/DNSCacheUpdater.h rename to dbms/Interpreters/DNSCacheUpdater.h diff --git a/dbms/src/Interpreters/DatabaseAndTableWithAlias.cpp b/dbms/Interpreters/DatabaseAndTableWithAlias.cpp similarity index 100% rename from dbms/src/Interpreters/DatabaseAndTableWithAlias.cpp rename to dbms/Interpreters/DatabaseAndTableWithAlias.cpp diff --git a/dbms/src/Interpreters/DatabaseAndTableWithAlias.h b/dbms/Interpreters/DatabaseAndTableWithAlias.h similarity index 100% rename from dbms/src/Interpreters/DatabaseAndTableWithAlias.h rename to dbms/Interpreters/DatabaseAndTableWithAlias.h diff --git a/dbms/src/Interpreters/DatabaseCatalog.cpp b/dbms/Interpreters/DatabaseCatalog.cpp similarity index 100% rename from dbms/src/Interpreters/DatabaseCatalog.cpp rename to dbms/Interpreters/DatabaseCatalog.cpp diff --git a/dbms/src/Interpreters/DatabaseCatalog.h b/dbms/Interpreters/DatabaseCatalog.h similarity index 100% rename from dbms/src/Interpreters/DatabaseCatalog.h rename to dbms/Interpreters/DatabaseCatalog.h diff --git a/dbms/src/Interpreters/EmbeddedDictionaries.cpp b/dbms/Interpreters/EmbeddedDictionaries.cpp similarity index 100% rename from dbms/src/Interpreters/EmbeddedDictionaries.cpp rename to dbms/Interpreters/EmbeddedDictionaries.cpp diff --git a/dbms/src/Interpreters/EmbeddedDictionaries.h b/dbms/Interpreters/EmbeddedDictionaries.h similarity index 100% rename from dbms/src/Interpreters/EmbeddedDictionaries.h rename to dbms/Interpreters/EmbeddedDictionaries.h diff --git a/dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/dbms/Interpreters/ExecuteScalarSubqueriesVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp rename to dbms/Interpreters/ExecuteScalarSubqueriesVisitor.cpp diff --git a/dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.h b/dbms/Interpreters/ExecuteScalarSubqueriesVisitor.h similarity index 100% rename from dbms/src/Interpreters/ExecuteScalarSubqueriesVisitor.h rename to dbms/Interpreters/ExecuteScalarSubqueriesVisitor.h diff --git a/dbms/src/Interpreters/ExpressionActions.cpp b/dbms/Interpreters/ExpressionActions.cpp similarity index 100% rename from dbms/src/Interpreters/ExpressionActions.cpp rename to dbms/Interpreters/ExpressionActions.cpp diff --git a/dbms/src/Interpreters/ExpressionActions.h b/dbms/Interpreters/ExpressionActions.h similarity index 100% rename from dbms/src/Interpreters/ExpressionActions.h rename to dbms/Interpreters/ExpressionActions.h diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/Interpreters/ExpressionAnalyzer.cpp similarity index 100% rename from dbms/src/Interpreters/ExpressionAnalyzer.cpp rename to dbms/Interpreters/ExpressionAnalyzer.cpp diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/Interpreters/ExpressionAnalyzer.h similarity index 100% rename from dbms/src/Interpreters/ExpressionAnalyzer.h rename to dbms/Interpreters/ExpressionAnalyzer.h diff --git a/dbms/src/Interpreters/ExpressionJIT.cpp b/dbms/Interpreters/ExpressionJIT.cpp similarity index 100% rename from dbms/src/Interpreters/ExpressionJIT.cpp rename to dbms/Interpreters/ExpressionJIT.cpp diff --git a/dbms/src/Interpreters/ExpressionJIT.h b/dbms/Interpreters/ExpressionJIT.h similarity index 100% rename from dbms/src/Interpreters/ExpressionJIT.h rename to dbms/Interpreters/ExpressionJIT.h diff --git a/dbms/src/Interpreters/ExternalDictionariesLoader.cpp b/dbms/Interpreters/ExternalDictionariesLoader.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalDictionariesLoader.cpp rename to dbms/Interpreters/ExternalDictionariesLoader.cpp diff --git a/dbms/src/Interpreters/ExternalDictionariesLoader.h b/dbms/Interpreters/ExternalDictionariesLoader.h similarity index 100% rename from dbms/src/Interpreters/ExternalDictionariesLoader.h rename to dbms/Interpreters/ExternalDictionariesLoader.h diff --git a/dbms/src/Interpreters/ExternalLoader.cpp b/dbms/Interpreters/ExternalLoader.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoader.cpp rename to dbms/Interpreters/ExternalLoader.cpp diff --git a/dbms/src/Interpreters/ExternalLoader.h b/dbms/Interpreters/ExternalLoader.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoader.h rename to dbms/Interpreters/ExternalLoader.h diff --git a/dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp b/dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp rename to dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h b/dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h rename to dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalLoaderTempConfigRepository.cpp b/dbms/Interpreters/ExternalLoaderTempConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderTempConfigRepository.cpp rename to dbms/Interpreters/ExternalLoaderTempConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderTempConfigRepository.h b/dbms/Interpreters/ExternalLoaderTempConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderTempConfigRepository.h rename to dbms/Interpreters/ExternalLoaderTempConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp b/dbms/Interpreters/ExternalLoaderXMLConfigRepository.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp rename to dbms/Interpreters/ExternalLoaderXMLConfigRepository.cpp diff --git a/dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.h b/dbms/Interpreters/ExternalLoaderXMLConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/ExternalLoaderXMLConfigRepository.h rename to dbms/Interpreters/ExternalLoaderXMLConfigRepository.h diff --git a/dbms/src/Interpreters/ExternalModelsLoader.cpp b/dbms/Interpreters/ExternalModelsLoader.cpp similarity index 100% rename from dbms/src/Interpreters/ExternalModelsLoader.cpp rename to dbms/Interpreters/ExternalModelsLoader.cpp diff --git a/dbms/src/Interpreters/ExternalModelsLoader.h b/dbms/Interpreters/ExternalModelsLoader.h similarity index 100% rename from dbms/src/Interpreters/ExternalModelsLoader.h rename to dbms/Interpreters/ExternalModelsLoader.h diff --git a/dbms/src/Interpreters/ExtractExpressionInfoVisitor.cpp b/dbms/Interpreters/ExtractExpressionInfoVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ExtractExpressionInfoVisitor.cpp rename to dbms/Interpreters/ExtractExpressionInfoVisitor.cpp diff --git a/dbms/src/Interpreters/ExtractExpressionInfoVisitor.h b/dbms/Interpreters/ExtractExpressionInfoVisitor.h similarity index 100% rename from dbms/src/Interpreters/ExtractExpressionInfoVisitor.h rename to dbms/Interpreters/ExtractExpressionInfoVisitor.h diff --git a/dbms/src/Interpreters/FillingRow.cpp b/dbms/Interpreters/FillingRow.cpp similarity index 100% rename from dbms/src/Interpreters/FillingRow.cpp rename to dbms/Interpreters/FillingRow.cpp diff --git a/dbms/src/Interpreters/FillingRow.h b/dbms/Interpreters/FillingRow.h similarity index 100% rename from dbms/src/Interpreters/FillingRow.h rename to dbms/Interpreters/FillingRow.h diff --git a/dbms/src/Interpreters/GetAggregatesVisitor.h b/dbms/Interpreters/GetAggregatesVisitor.h similarity index 100% rename from dbms/src/Interpreters/GetAggregatesVisitor.h rename to dbms/Interpreters/GetAggregatesVisitor.h diff --git a/dbms/src/Interpreters/GlobalSubqueriesVisitor.h b/dbms/Interpreters/GlobalSubqueriesVisitor.h similarity index 100% rename from dbms/src/Interpreters/GlobalSubqueriesVisitor.h rename to dbms/Interpreters/GlobalSubqueriesVisitor.h diff --git a/dbms/src/Interpreters/IExternalLoadable.cpp b/dbms/Interpreters/IExternalLoadable.cpp similarity index 100% rename from dbms/src/Interpreters/IExternalLoadable.cpp rename to dbms/Interpreters/IExternalLoadable.cpp diff --git a/dbms/src/Interpreters/IExternalLoadable.h b/dbms/Interpreters/IExternalLoadable.h similarity index 100% rename from dbms/src/Interpreters/IExternalLoadable.h rename to dbms/Interpreters/IExternalLoadable.h diff --git a/dbms/src/Interpreters/IExternalLoaderConfigRepository.h b/dbms/Interpreters/IExternalLoaderConfigRepository.h similarity index 100% rename from dbms/src/Interpreters/IExternalLoaderConfigRepository.h rename to dbms/Interpreters/IExternalLoaderConfigRepository.h diff --git a/dbms/src/Interpreters/IInterpreter.h b/dbms/Interpreters/IInterpreter.h similarity index 100% rename from dbms/src/Interpreters/IInterpreter.h rename to dbms/Interpreters/IInterpreter.h diff --git a/dbms/src/Interpreters/IJoin.h b/dbms/Interpreters/IJoin.h similarity index 100% rename from dbms/src/Interpreters/IJoin.h rename to dbms/Interpreters/IJoin.h diff --git a/dbms/src/Interpreters/IdentifierSemantic.cpp b/dbms/Interpreters/IdentifierSemantic.cpp similarity index 100% rename from dbms/src/Interpreters/IdentifierSemantic.cpp rename to dbms/Interpreters/IdentifierSemantic.cpp diff --git a/dbms/src/Interpreters/IdentifierSemantic.h b/dbms/Interpreters/IdentifierSemantic.h similarity index 100% rename from dbms/src/Interpreters/IdentifierSemantic.h rename to dbms/Interpreters/IdentifierSemantic.h diff --git a/dbms/src/Interpreters/InDepthNodeVisitor.h b/dbms/Interpreters/InDepthNodeVisitor.h similarity index 100% rename from dbms/src/Interpreters/InDepthNodeVisitor.h rename to dbms/Interpreters/InDepthNodeVisitor.h diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.cpp b/dbms/Interpreters/InJoinSubqueriesPreprocessor.cpp similarity index 100% rename from dbms/src/Interpreters/InJoinSubqueriesPreprocessor.cpp rename to dbms/Interpreters/InJoinSubqueriesPreprocessor.cpp diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h b/dbms/Interpreters/InJoinSubqueriesPreprocessor.h similarity index 100% rename from dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h rename to dbms/Interpreters/InJoinSubqueriesPreprocessor.h diff --git a/dbms/src/Interpreters/InternalTextLogsQueue.cpp b/dbms/Interpreters/InternalTextLogsQueue.cpp similarity index 100% rename from dbms/src/Interpreters/InternalTextLogsQueue.cpp rename to dbms/Interpreters/InternalTextLogsQueue.cpp diff --git a/dbms/src/Interpreters/InternalTextLogsQueue.h b/dbms/Interpreters/InternalTextLogsQueue.h similarity index 100% rename from dbms/src/Interpreters/InternalTextLogsQueue.h rename to dbms/Interpreters/InternalTextLogsQueue.h diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/Interpreters/InterpreterAlterQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterAlterQuery.cpp rename to dbms/Interpreters/InterpreterAlterQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.h b/dbms/Interpreters/InterpreterAlterQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterAlterQuery.h rename to dbms/Interpreters/InterpreterAlterQuery.h diff --git a/dbms/src/Interpreters/InterpreterCheckQuery.cpp b/dbms/Interpreters/InterpreterCheckQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCheckQuery.cpp rename to dbms/Interpreters/InterpreterCheckQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCheckQuery.h b/dbms/Interpreters/InterpreterCheckQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCheckQuery.h rename to dbms/Interpreters/InterpreterCheckQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/Interpreters/InterpreterCreateQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuery.cpp rename to dbms/Interpreters/InterpreterCreateQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.h b/dbms/Interpreters/InterpreterCreateQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuery.h rename to dbms/Interpreters/InterpreterCreateQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateQuotaQuery.cpp b/dbms/Interpreters/InterpreterCreateQuotaQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuotaQuery.cpp rename to dbms/Interpreters/InterpreterCreateQuotaQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateQuotaQuery.h b/dbms/Interpreters/InterpreterCreateQuotaQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateQuotaQuery.h rename to dbms/Interpreters/InterpreterCreateQuotaQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateRoleQuery.cpp b/dbms/Interpreters/InterpreterCreateRoleQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRoleQuery.cpp rename to dbms/Interpreters/InterpreterCreateRoleQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateRoleQuery.h b/dbms/Interpreters/InterpreterCreateRoleQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRoleQuery.h rename to dbms/Interpreters/InterpreterCreateRoleQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/dbms/Interpreters/InterpreterCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp rename to dbms/Interpreters/InterpreterCreateRowPolicyQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.h b/dbms/Interpreters/InterpreterCreateRowPolicyQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateRowPolicyQuery.h rename to dbms/Interpreters/InterpreterCreateRowPolicyQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp b/dbms/Interpreters/InterpreterCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp rename to dbms/Interpreters/InterpreterCreateSettingsProfileQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.h b/dbms/Interpreters/InterpreterCreateSettingsProfileQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateSettingsProfileQuery.h rename to dbms/Interpreters/InterpreterCreateSettingsProfileQuery.h diff --git a/dbms/src/Interpreters/InterpreterCreateUserQuery.cpp b/dbms/Interpreters/InterpreterCreateUserQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateUserQuery.cpp rename to dbms/Interpreters/InterpreterCreateUserQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterCreateUserQuery.h b/dbms/Interpreters/InterpreterCreateUserQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterCreateUserQuery.h rename to dbms/Interpreters/InterpreterCreateUserQuery.h diff --git a/dbms/src/Interpreters/InterpreterDescribeQuery.cpp b/dbms/Interpreters/InterpreterDescribeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterDescribeQuery.cpp rename to dbms/Interpreters/InterpreterDescribeQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterDescribeQuery.h b/dbms/Interpreters/InterpreterDescribeQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDescribeQuery.h rename to dbms/Interpreters/InterpreterDescribeQuery.h diff --git a/dbms/src/Interpreters/InterpreterDropAccessEntityQuery.cpp b/dbms/Interpreters/InterpreterDropAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterDropAccessEntityQuery.cpp rename to dbms/Interpreters/InterpreterDropAccessEntityQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterDropAccessEntityQuery.h b/dbms/Interpreters/InterpreterDropAccessEntityQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDropAccessEntityQuery.h rename to dbms/Interpreters/InterpreterDropAccessEntityQuery.h diff --git a/dbms/src/Interpreters/InterpreterDropQuery.cpp b/dbms/Interpreters/InterpreterDropQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterDropQuery.cpp rename to dbms/Interpreters/InterpreterDropQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterDropQuery.h b/dbms/Interpreters/InterpreterDropQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterDropQuery.h rename to dbms/Interpreters/InterpreterDropQuery.h diff --git a/dbms/src/Interpreters/InterpreterExistsQuery.cpp b/dbms/Interpreters/InterpreterExistsQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterExistsQuery.cpp rename to dbms/Interpreters/InterpreterExistsQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterExistsQuery.h b/dbms/Interpreters/InterpreterExistsQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterExistsQuery.h rename to dbms/Interpreters/InterpreterExistsQuery.h diff --git a/dbms/src/Interpreters/InterpreterExplainQuery.cpp b/dbms/Interpreters/InterpreterExplainQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterExplainQuery.cpp rename to dbms/Interpreters/InterpreterExplainQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterExplainQuery.h b/dbms/Interpreters/InterpreterExplainQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterExplainQuery.h rename to dbms/Interpreters/InterpreterExplainQuery.h diff --git a/dbms/src/Interpreters/InterpreterFactory.cpp b/dbms/Interpreters/InterpreterFactory.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterFactory.cpp rename to dbms/Interpreters/InterpreterFactory.cpp diff --git a/dbms/src/Interpreters/InterpreterFactory.h b/dbms/Interpreters/InterpreterFactory.h similarity index 100% rename from dbms/src/Interpreters/InterpreterFactory.h rename to dbms/Interpreters/InterpreterFactory.h diff --git a/dbms/src/Interpreters/InterpreterGrantQuery.cpp b/dbms/Interpreters/InterpreterGrantQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterGrantQuery.cpp rename to dbms/Interpreters/InterpreterGrantQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterGrantQuery.h b/dbms/Interpreters/InterpreterGrantQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterGrantQuery.h rename to dbms/Interpreters/InterpreterGrantQuery.h diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.cpp b/dbms/Interpreters/InterpreterInsertQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterInsertQuery.cpp rename to dbms/Interpreters/InterpreterInsertQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterInsertQuery.h b/dbms/Interpreters/InterpreterInsertQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterInsertQuery.h rename to dbms/Interpreters/InterpreterInsertQuery.h diff --git a/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp b/dbms/Interpreters/InterpreterKillQueryQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterKillQueryQuery.cpp rename to dbms/Interpreters/InterpreterKillQueryQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterKillQueryQuery.h b/dbms/Interpreters/InterpreterKillQueryQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterKillQueryQuery.h rename to dbms/Interpreters/InterpreterKillQueryQuery.h diff --git a/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp b/dbms/Interpreters/InterpreterOptimizeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterOptimizeQuery.cpp rename to dbms/Interpreters/InterpreterOptimizeQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterOptimizeQuery.h b/dbms/Interpreters/InterpreterOptimizeQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterOptimizeQuery.h rename to dbms/Interpreters/InterpreterOptimizeQuery.h diff --git a/dbms/src/Interpreters/InterpreterRenameQuery.cpp b/dbms/Interpreters/InterpreterRenameQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterRenameQuery.cpp rename to dbms/Interpreters/InterpreterRenameQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterRenameQuery.h b/dbms/Interpreters/InterpreterRenameQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterRenameQuery.h rename to dbms/Interpreters/InterpreterRenameQuery.h diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/Interpreters/InterpreterSelectQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSelectQuery.cpp rename to dbms/Interpreters/InterpreterSelectQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/dbms/Interpreters/InterpreterSelectQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSelectQuery.h rename to dbms/Interpreters/InterpreterSelectQuery.h diff --git a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/dbms/Interpreters/InterpreterSelectWithUnionQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp rename to dbms/Interpreters/InterpreterSelectWithUnionQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.h b/dbms/Interpreters/InterpreterSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSelectWithUnionQuery.h rename to dbms/Interpreters/InterpreterSelectWithUnionQuery.h diff --git a/dbms/src/Interpreters/InterpreterSetQuery.cpp b/dbms/Interpreters/InterpreterSetQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSetQuery.cpp rename to dbms/Interpreters/InterpreterSetQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSetQuery.h b/dbms/Interpreters/InterpreterSetQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSetQuery.h rename to dbms/Interpreters/InterpreterSetQuery.h diff --git a/dbms/src/Interpreters/InterpreterSetRoleQuery.cpp b/dbms/Interpreters/InterpreterSetRoleQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSetRoleQuery.cpp rename to dbms/Interpreters/InterpreterSetRoleQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSetRoleQuery.h b/dbms/Interpreters/InterpreterSetRoleQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSetRoleQuery.h rename to dbms/Interpreters/InterpreterSetRoleQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp rename to dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h b/dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h rename to dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowCreateQuery.cpp b/dbms/Interpreters/InterpreterShowCreateQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowCreateQuery.cpp rename to dbms/Interpreters/InterpreterShowCreateQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowCreateQuery.h b/dbms/Interpreters/InterpreterShowCreateQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowCreateQuery.h rename to dbms/Interpreters/InterpreterShowCreateQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowGrantsQuery.cpp b/dbms/Interpreters/InterpreterShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowGrantsQuery.cpp rename to dbms/Interpreters/InterpreterShowGrantsQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowGrantsQuery.h b/dbms/Interpreters/InterpreterShowGrantsQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowGrantsQuery.h rename to dbms/Interpreters/InterpreterShowGrantsQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowProcesslistQuery.cpp b/dbms/Interpreters/InterpreterShowProcesslistQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowProcesslistQuery.cpp rename to dbms/Interpreters/InterpreterShowProcesslistQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowProcesslistQuery.h b/dbms/Interpreters/InterpreterShowProcesslistQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowProcesslistQuery.h rename to dbms/Interpreters/InterpreterShowProcesslistQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowQuotasQuery.cpp b/dbms/Interpreters/InterpreterShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowQuotasQuery.cpp rename to dbms/Interpreters/InterpreterShowQuotasQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowQuotasQuery.h b/dbms/Interpreters/InterpreterShowQuotasQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowQuotasQuery.h rename to dbms/Interpreters/InterpreterShowQuotasQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp b/dbms/Interpreters/InterpreterShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp rename to dbms/Interpreters/InterpreterShowRowPoliciesQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.h b/dbms/Interpreters/InterpreterShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowRowPoliciesQuery.h rename to dbms/Interpreters/InterpreterShowRowPoliciesQuery.h diff --git a/dbms/src/Interpreters/InterpreterShowTablesQuery.cpp b/dbms/Interpreters/InterpreterShowTablesQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterShowTablesQuery.cpp rename to dbms/Interpreters/InterpreterShowTablesQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterShowTablesQuery.h b/dbms/Interpreters/InterpreterShowTablesQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterShowTablesQuery.h rename to dbms/Interpreters/InterpreterShowTablesQuery.h diff --git a/dbms/src/Interpreters/InterpreterSystemQuery.cpp b/dbms/Interpreters/InterpreterSystemQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterSystemQuery.cpp rename to dbms/Interpreters/InterpreterSystemQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterSystemQuery.h b/dbms/Interpreters/InterpreterSystemQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterSystemQuery.h rename to dbms/Interpreters/InterpreterSystemQuery.h diff --git a/dbms/src/Interpreters/InterpreterUseQuery.cpp b/dbms/Interpreters/InterpreterUseQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterUseQuery.cpp rename to dbms/Interpreters/InterpreterUseQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterUseQuery.h b/dbms/Interpreters/InterpreterUseQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterUseQuery.h rename to dbms/Interpreters/InterpreterUseQuery.h diff --git a/dbms/src/Interpreters/InterpreterWatchQuery.cpp b/dbms/Interpreters/InterpreterWatchQuery.cpp similarity index 100% rename from dbms/src/Interpreters/InterpreterWatchQuery.cpp rename to dbms/Interpreters/InterpreterWatchQuery.cpp diff --git a/dbms/src/Interpreters/InterpreterWatchQuery.h b/dbms/Interpreters/InterpreterWatchQuery.h similarity index 100% rename from dbms/src/Interpreters/InterpreterWatchQuery.h rename to dbms/Interpreters/InterpreterWatchQuery.h diff --git a/dbms/src/Interpreters/InterserverIOHandler.h b/dbms/Interpreters/InterserverIOHandler.h similarity index 100% rename from dbms/src/Interpreters/InterserverIOHandler.h rename to dbms/Interpreters/InterserverIOHandler.h diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/Interpreters/Join.cpp similarity index 100% rename from dbms/src/Interpreters/Join.cpp rename to dbms/Interpreters/Join.cpp diff --git a/dbms/src/Interpreters/Join.h b/dbms/Interpreters/Join.h similarity index 100% rename from dbms/src/Interpreters/Join.h rename to dbms/Interpreters/Join.h diff --git a/dbms/src/Interpreters/JoinSwitcher.cpp b/dbms/Interpreters/JoinSwitcher.cpp similarity index 100% rename from dbms/src/Interpreters/JoinSwitcher.cpp rename to dbms/Interpreters/JoinSwitcher.cpp diff --git a/dbms/src/Interpreters/JoinSwitcher.h b/dbms/Interpreters/JoinSwitcher.h similarity index 100% rename from dbms/src/Interpreters/JoinSwitcher.h rename to dbms/Interpreters/JoinSwitcher.h diff --git a/dbms/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/dbms/Interpreters/JoinToSubqueryTransformVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/JoinToSubqueryTransformVisitor.cpp rename to dbms/Interpreters/JoinToSubqueryTransformVisitor.cpp diff --git a/dbms/src/Interpreters/JoinToSubqueryTransformVisitor.h b/dbms/Interpreters/JoinToSubqueryTransformVisitor.h similarity index 100% rename from dbms/src/Interpreters/JoinToSubqueryTransformVisitor.h rename to dbms/Interpreters/JoinToSubqueryTransformVisitor.h diff --git a/dbms/src/Interpreters/JoinedTables.cpp b/dbms/Interpreters/JoinedTables.cpp similarity index 100% rename from dbms/src/Interpreters/JoinedTables.cpp rename to dbms/Interpreters/JoinedTables.cpp diff --git a/dbms/src/Interpreters/JoinedTables.h b/dbms/Interpreters/JoinedTables.h similarity index 100% rename from dbms/src/Interpreters/JoinedTables.h rename to dbms/Interpreters/JoinedTables.h diff --git a/dbms/src/Interpreters/LogicalExpressionsOptimizer.cpp b/dbms/Interpreters/LogicalExpressionsOptimizer.cpp similarity index 100% rename from dbms/src/Interpreters/LogicalExpressionsOptimizer.cpp rename to dbms/Interpreters/LogicalExpressionsOptimizer.cpp diff --git a/dbms/src/Interpreters/LogicalExpressionsOptimizer.h b/dbms/Interpreters/LogicalExpressionsOptimizer.h similarity index 100% rename from dbms/src/Interpreters/LogicalExpressionsOptimizer.h rename to dbms/Interpreters/LogicalExpressionsOptimizer.h diff --git a/dbms/src/Interpreters/MarkTableIdentifiersVisitor.cpp b/dbms/Interpreters/MarkTableIdentifiersVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/MarkTableIdentifiersVisitor.cpp rename to dbms/Interpreters/MarkTableIdentifiersVisitor.cpp diff --git a/dbms/src/Interpreters/MarkTableIdentifiersVisitor.h b/dbms/Interpreters/MarkTableIdentifiersVisitor.h similarity index 100% rename from dbms/src/Interpreters/MarkTableIdentifiersVisitor.h rename to dbms/Interpreters/MarkTableIdentifiersVisitor.h diff --git a/dbms/src/Interpreters/MergeJoin.cpp b/dbms/Interpreters/MergeJoin.cpp similarity index 100% rename from dbms/src/Interpreters/MergeJoin.cpp rename to dbms/Interpreters/MergeJoin.cpp diff --git a/dbms/src/Interpreters/MergeJoin.h b/dbms/Interpreters/MergeJoin.h similarity index 100% rename from dbms/src/Interpreters/MergeJoin.h rename to dbms/Interpreters/MergeJoin.h diff --git a/dbms/src/Interpreters/MetricLog.cpp b/dbms/Interpreters/MetricLog.cpp similarity index 100% rename from dbms/src/Interpreters/MetricLog.cpp rename to dbms/Interpreters/MetricLog.cpp diff --git a/dbms/src/Interpreters/MetricLog.h b/dbms/Interpreters/MetricLog.h similarity index 100% rename from dbms/src/Interpreters/MetricLog.h rename to dbms/Interpreters/MetricLog.h diff --git a/dbms/src/Interpreters/MutationsInterpreter.cpp b/dbms/Interpreters/MutationsInterpreter.cpp similarity index 100% rename from dbms/src/Interpreters/MutationsInterpreter.cpp rename to dbms/Interpreters/MutationsInterpreter.cpp diff --git a/dbms/src/Interpreters/MutationsInterpreter.h b/dbms/Interpreters/MutationsInterpreter.h similarity index 100% rename from dbms/src/Interpreters/MutationsInterpreter.h rename to dbms/Interpreters/MutationsInterpreter.h diff --git a/dbms/src/Interpreters/NullableUtils.cpp b/dbms/Interpreters/NullableUtils.cpp similarity index 100% rename from dbms/src/Interpreters/NullableUtils.cpp rename to dbms/Interpreters/NullableUtils.cpp diff --git a/dbms/src/Interpreters/NullableUtils.h b/dbms/Interpreters/NullableUtils.h similarity index 100% rename from dbms/src/Interpreters/NullableUtils.h rename to dbms/Interpreters/NullableUtils.h diff --git a/dbms/src/Interpreters/OptimizeIfChains.cpp b/dbms/Interpreters/OptimizeIfChains.cpp similarity index 100% rename from dbms/src/Interpreters/OptimizeIfChains.cpp rename to dbms/Interpreters/OptimizeIfChains.cpp diff --git a/dbms/src/Interpreters/OptimizeIfChains.h b/dbms/Interpreters/OptimizeIfChains.h similarity index 100% rename from dbms/src/Interpreters/OptimizeIfChains.h rename to dbms/Interpreters/OptimizeIfChains.h diff --git a/dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp rename to dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp diff --git a/dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h b/dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.h similarity index 100% rename from dbms/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h rename to dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.h diff --git a/dbms/src/Interpreters/PartLog.cpp b/dbms/Interpreters/PartLog.cpp similarity index 100% rename from dbms/src/Interpreters/PartLog.cpp rename to dbms/Interpreters/PartLog.cpp diff --git a/dbms/src/Interpreters/PartLog.h b/dbms/Interpreters/PartLog.h similarity index 100% rename from dbms/src/Interpreters/PartLog.h rename to dbms/Interpreters/PartLog.h diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.cpp b/dbms/Interpreters/PredicateExpressionsOptimizer.cpp similarity index 100% rename from dbms/src/Interpreters/PredicateExpressionsOptimizer.cpp rename to dbms/Interpreters/PredicateExpressionsOptimizer.cpp diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h b/dbms/Interpreters/PredicateExpressionsOptimizer.h similarity index 100% rename from dbms/src/Interpreters/PredicateExpressionsOptimizer.h rename to dbms/Interpreters/PredicateExpressionsOptimizer.h diff --git a/dbms/src/Interpreters/PredicateRewriteVisitor.cpp b/dbms/Interpreters/PredicateRewriteVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/PredicateRewriteVisitor.cpp rename to dbms/Interpreters/PredicateRewriteVisitor.cpp diff --git a/dbms/src/Interpreters/PredicateRewriteVisitor.h b/dbms/Interpreters/PredicateRewriteVisitor.h similarity index 100% rename from dbms/src/Interpreters/PredicateRewriteVisitor.h rename to dbms/Interpreters/PredicateRewriteVisitor.h diff --git a/dbms/src/Interpreters/PreparedSets.h b/dbms/Interpreters/PreparedSets.h similarity index 100% rename from dbms/src/Interpreters/PreparedSets.h rename to dbms/Interpreters/PreparedSets.h diff --git a/dbms/src/Interpreters/ProcessList.cpp b/dbms/Interpreters/ProcessList.cpp similarity index 100% rename from dbms/src/Interpreters/ProcessList.cpp rename to dbms/Interpreters/ProcessList.cpp diff --git a/dbms/src/Interpreters/ProcessList.h b/dbms/Interpreters/ProcessList.h similarity index 100% rename from dbms/src/Interpreters/ProcessList.h rename to dbms/Interpreters/ProcessList.h diff --git a/dbms/src/Interpreters/ProfileEventsExt.cpp b/dbms/Interpreters/ProfileEventsExt.cpp similarity index 100% rename from dbms/src/Interpreters/ProfileEventsExt.cpp rename to dbms/Interpreters/ProfileEventsExt.cpp diff --git a/dbms/src/Interpreters/ProfileEventsExt.h b/dbms/Interpreters/ProfileEventsExt.h similarity index 100% rename from dbms/src/Interpreters/ProfileEventsExt.h rename to dbms/Interpreters/ProfileEventsExt.h diff --git a/dbms/src/Interpreters/QueryAliasesVisitor.cpp b/dbms/Interpreters/QueryAliasesVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/QueryAliasesVisitor.cpp rename to dbms/Interpreters/QueryAliasesVisitor.cpp diff --git a/dbms/src/Interpreters/QueryAliasesVisitor.h b/dbms/Interpreters/QueryAliasesVisitor.h similarity index 100% rename from dbms/src/Interpreters/QueryAliasesVisitor.h rename to dbms/Interpreters/QueryAliasesVisitor.h diff --git a/dbms/src/Interpreters/QueryLog.cpp b/dbms/Interpreters/QueryLog.cpp similarity index 100% rename from dbms/src/Interpreters/QueryLog.cpp rename to dbms/Interpreters/QueryLog.cpp diff --git a/dbms/src/Interpreters/QueryLog.h b/dbms/Interpreters/QueryLog.h similarity index 100% rename from dbms/src/Interpreters/QueryLog.h rename to dbms/Interpreters/QueryLog.h diff --git a/dbms/src/Interpreters/QueryNormalizer.cpp b/dbms/Interpreters/QueryNormalizer.cpp similarity index 100% rename from dbms/src/Interpreters/QueryNormalizer.cpp rename to dbms/Interpreters/QueryNormalizer.cpp diff --git a/dbms/src/Interpreters/QueryNormalizer.h b/dbms/Interpreters/QueryNormalizer.h similarity index 100% rename from dbms/src/Interpreters/QueryNormalizer.h rename to dbms/Interpreters/QueryNormalizer.h diff --git a/dbms/src/Interpreters/QueryPriorities.h b/dbms/Interpreters/QueryPriorities.h similarity index 100% rename from dbms/src/Interpreters/QueryPriorities.h rename to dbms/Interpreters/QueryPriorities.h diff --git a/dbms/src/Interpreters/QueryThreadLog.cpp b/dbms/Interpreters/QueryThreadLog.cpp similarity index 100% rename from dbms/src/Interpreters/QueryThreadLog.cpp rename to dbms/Interpreters/QueryThreadLog.cpp diff --git a/dbms/src/Interpreters/QueryThreadLog.h b/dbms/Interpreters/QueryThreadLog.h similarity index 100% rename from dbms/src/Interpreters/QueryThreadLog.h rename to dbms/Interpreters/QueryThreadLog.h diff --git a/dbms/src/Interpreters/ReplaceQueryParameterVisitor.cpp b/dbms/Interpreters/ReplaceQueryParameterVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/ReplaceQueryParameterVisitor.cpp rename to dbms/Interpreters/ReplaceQueryParameterVisitor.cpp diff --git a/dbms/src/Interpreters/ReplaceQueryParameterVisitor.h b/dbms/Interpreters/ReplaceQueryParameterVisitor.h similarity index 100% rename from dbms/src/Interpreters/ReplaceQueryParameterVisitor.h rename to dbms/Interpreters/ReplaceQueryParameterVisitor.h diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/dbms/Interpreters/RequiredSourceColumnsVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/RequiredSourceColumnsVisitor.cpp rename to dbms/Interpreters/RequiredSourceColumnsVisitor.cpp diff --git a/dbms/src/Interpreters/RequiredSourceColumnsVisitor.h b/dbms/Interpreters/RequiredSourceColumnsVisitor.h similarity index 100% rename from dbms/src/Interpreters/RequiredSourceColumnsVisitor.h rename to dbms/Interpreters/RequiredSourceColumnsVisitor.h diff --git a/dbms/src/Interpreters/RowRefs.cpp b/dbms/Interpreters/RowRefs.cpp similarity index 100% rename from dbms/src/Interpreters/RowRefs.cpp rename to dbms/Interpreters/RowRefs.cpp diff --git a/dbms/src/Interpreters/RowRefs.h b/dbms/Interpreters/RowRefs.h similarity index 100% rename from dbms/src/Interpreters/RowRefs.h rename to dbms/Interpreters/RowRefs.h diff --git a/dbms/src/Interpreters/SelectQueryOptions.h b/dbms/Interpreters/SelectQueryOptions.h similarity index 100% rename from dbms/src/Interpreters/SelectQueryOptions.h rename to dbms/Interpreters/SelectQueryOptions.h diff --git a/dbms/src/Interpreters/Set.cpp b/dbms/Interpreters/Set.cpp similarity index 100% rename from dbms/src/Interpreters/Set.cpp rename to dbms/Interpreters/Set.cpp diff --git a/dbms/src/Interpreters/Set.h b/dbms/Interpreters/Set.h similarity index 100% rename from dbms/src/Interpreters/Set.h rename to dbms/Interpreters/Set.h diff --git a/dbms/src/Interpreters/SetVariants.cpp b/dbms/Interpreters/SetVariants.cpp similarity index 100% rename from dbms/src/Interpreters/SetVariants.cpp rename to dbms/Interpreters/SetVariants.cpp diff --git a/dbms/src/Interpreters/SetVariants.h b/dbms/Interpreters/SetVariants.h similarity index 100% rename from dbms/src/Interpreters/SetVariants.h rename to dbms/Interpreters/SetVariants.h diff --git a/dbms/src/Interpreters/StorageID.cpp b/dbms/Interpreters/StorageID.cpp similarity index 100% rename from dbms/src/Interpreters/StorageID.cpp rename to dbms/Interpreters/StorageID.cpp diff --git a/dbms/src/Interpreters/StorageID.h b/dbms/Interpreters/StorageID.h similarity index 100% rename from dbms/src/Interpreters/StorageID.h rename to dbms/Interpreters/StorageID.h diff --git a/dbms/src/Interpreters/SubqueryForSet.cpp b/dbms/Interpreters/SubqueryForSet.cpp similarity index 100% rename from dbms/src/Interpreters/SubqueryForSet.cpp rename to dbms/Interpreters/SubqueryForSet.cpp diff --git a/dbms/src/Interpreters/SubqueryForSet.h b/dbms/Interpreters/SubqueryForSet.h similarity index 100% rename from dbms/src/Interpreters/SubqueryForSet.h rename to dbms/Interpreters/SubqueryForSet.h diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.cpp b/dbms/Interpreters/SyntaxAnalyzer.cpp similarity index 100% rename from dbms/src/Interpreters/SyntaxAnalyzer.cpp rename to dbms/Interpreters/SyntaxAnalyzer.cpp diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.h b/dbms/Interpreters/SyntaxAnalyzer.h similarity index 100% rename from dbms/src/Interpreters/SyntaxAnalyzer.h rename to dbms/Interpreters/SyntaxAnalyzer.h diff --git a/dbms/src/Interpreters/SystemLog.cpp b/dbms/Interpreters/SystemLog.cpp similarity index 100% rename from dbms/src/Interpreters/SystemLog.cpp rename to dbms/Interpreters/SystemLog.cpp diff --git a/dbms/src/Interpreters/SystemLog.h b/dbms/Interpreters/SystemLog.h similarity index 100% rename from dbms/src/Interpreters/SystemLog.h rename to dbms/Interpreters/SystemLog.h diff --git a/dbms/src/Interpreters/TablesStatus.cpp b/dbms/Interpreters/TablesStatus.cpp similarity index 100% rename from dbms/src/Interpreters/TablesStatus.cpp rename to dbms/Interpreters/TablesStatus.cpp diff --git a/dbms/src/Interpreters/TablesStatus.h b/dbms/Interpreters/TablesStatus.h similarity index 100% rename from dbms/src/Interpreters/TablesStatus.h rename to dbms/Interpreters/TablesStatus.h diff --git a/dbms/src/Interpreters/TextLog.cpp b/dbms/Interpreters/TextLog.cpp similarity index 100% rename from dbms/src/Interpreters/TextLog.cpp rename to dbms/Interpreters/TextLog.cpp diff --git a/dbms/src/Interpreters/TextLog.h b/dbms/Interpreters/TextLog.h similarity index 100% rename from dbms/src/Interpreters/TextLog.h rename to dbms/Interpreters/TextLog.h diff --git a/dbms/src/Interpreters/ThreadStatusExt.cpp b/dbms/Interpreters/ThreadStatusExt.cpp similarity index 100% rename from dbms/src/Interpreters/ThreadStatusExt.cpp rename to dbms/Interpreters/ThreadStatusExt.cpp diff --git a/dbms/src/Interpreters/TraceLog.cpp b/dbms/Interpreters/TraceLog.cpp similarity index 100% rename from dbms/src/Interpreters/TraceLog.cpp rename to dbms/Interpreters/TraceLog.cpp diff --git a/dbms/src/Interpreters/TraceLog.h b/dbms/Interpreters/TraceLog.h similarity index 100% rename from dbms/src/Interpreters/TraceLog.h rename to dbms/Interpreters/TraceLog.h diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/dbms/Interpreters/TranslateQualifiedNamesVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp rename to dbms/Interpreters/TranslateQualifiedNamesVisitor.cpp diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h b/dbms/Interpreters/TranslateQualifiedNamesVisitor.h similarity index 100% rename from dbms/src/Interpreters/TranslateQualifiedNamesVisitor.h rename to dbms/Interpreters/TranslateQualifiedNamesVisitor.h diff --git a/dbms/src/Interpreters/addMissingDefaults.cpp b/dbms/Interpreters/addMissingDefaults.cpp similarity index 100% rename from dbms/src/Interpreters/addMissingDefaults.cpp rename to dbms/Interpreters/addMissingDefaults.cpp diff --git a/dbms/src/Interpreters/addMissingDefaults.h b/dbms/Interpreters/addMissingDefaults.h similarity index 100% rename from dbms/src/Interpreters/addMissingDefaults.h rename to dbms/Interpreters/addMissingDefaults.h diff --git a/dbms/src/Interpreters/addTypeConversionToAST.cpp b/dbms/Interpreters/addTypeConversionToAST.cpp similarity index 100% rename from dbms/src/Interpreters/addTypeConversionToAST.cpp rename to dbms/Interpreters/addTypeConversionToAST.cpp diff --git a/dbms/src/Interpreters/addTypeConversionToAST.h b/dbms/Interpreters/addTypeConversionToAST.h similarity index 100% rename from dbms/src/Interpreters/addTypeConversionToAST.h rename to dbms/Interpreters/addTypeConversionToAST.h diff --git a/dbms/src/Interpreters/asof.h b/dbms/Interpreters/asof.h similarity index 100% rename from dbms/src/Interpreters/asof.h rename to dbms/Interpreters/asof.h diff --git a/dbms/src/Interpreters/castColumn.cpp b/dbms/Interpreters/castColumn.cpp similarity index 100% rename from dbms/src/Interpreters/castColumn.cpp rename to dbms/Interpreters/castColumn.cpp diff --git a/dbms/src/Interpreters/castColumn.h b/dbms/Interpreters/castColumn.h similarity index 100% rename from dbms/src/Interpreters/castColumn.h rename to dbms/Interpreters/castColumn.h diff --git a/dbms/src/Interpreters/convertFieldToType.cpp b/dbms/Interpreters/convertFieldToType.cpp similarity index 100% rename from dbms/src/Interpreters/convertFieldToType.cpp rename to dbms/Interpreters/convertFieldToType.cpp diff --git a/dbms/src/Interpreters/convertFieldToType.h b/dbms/Interpreters/convertFieldToType.h similarity index 100% rename from dbms/src/Interpreters/convertFieldToType.h rename to dbms/Interpreters/convertFieldToType.h diff --git a/dbms/src/Interpreters/createBlockSelector.cpp b/dbms/Interpreters/createBlockSelector.cpp similarity index 100% rename from dbms/src/Interpreters/createBlockSelector.cpp rename to dbms/Interpreters/createBlockSelector.cpp diff --git a/dbms/src/Interpreters/createBlockSelector.h b/dbms/Interpreters/createBlockSelector.h similarity index 100% rename from dbms/src/Interpreters/createBlockSelector.h rename to dbms/Interpreters/createBlockSelector.h diff --git a/dbms/src/Interpreters/evaluateConstantExpression.cpp b/dbms/Interpreters/evaluateConstantExpression.cpp similarity index 100% rename from dbms/src/Interpreters/evaluateConstantExpression.cpp rename to dbms/Interpreters/evaluateConstantExpression.cpp diff --git a/dbms/src/Interpreters/evaluateConstantExpression.h b/dbms/Interpreters/evaluateConstantExpression.h similarity index 100% rename from dbms/src/Interpreters/evaluateConstantExpression.h rename to dbms/Interpreters/evaluateConstantExpression.h diff --git a/dbms/src/Interpreters/executeQuery.cpp b/dbms/Interpreters/executeQuery.cpp similarity index 100% rename from dbms/src/Interpreters/executeQuery.cpp rename to dbms/Interpreters/executeQuery.cpp diff --git a/dbms/src/Interpreters/executeQuery.h b/dbms/Interpreters/executeQuery.h similarity index 100% rename from dbms/src/Interpreters/executeQuery.h rename to dbms/Interpreters/executeQuery.h diff --git a/dbms/src/Interpreters/getClusterName.cpp b/dbms/Interpreters/getClusterName.cpp similarity index 100% rename from dbms/src/Interpreters/getClusterName.cpp rename to dbms/Interpreters/getClusterName.cpp diff --git a/dbms/src/Interpreters/getClusterName.h b/dbms/Interpreters/getClusterName.h similarity index 100% rename from dbms/src/Interpreters/getClusterName.h rename to dbms/Interpreters/getClusterName.h diff --git a/dbms/src/Interpreters/getTableExpressions.cpp b/dbms/Interpreters/getTableExpressions.cpp similarity index 100% rename from dbms/src/Interpreters/getTableExpressions.cpp rename to dbms/Interpreters/getTableExpressions.cpp diff --git a/dbms/src/Interpreters/getTableExpressions.h b/dbms/Interpreters/getTableExpressions.h similarity index 100% rename from dbms/src/Interpreters/getTableExpressions.h rename to dbms/Interpreters/getTableExpressions.h diff --git a/dbms/src/Interpreters/inplaceBlockConversions.cpp b/dbms/Interpreters/inplaceBlockConversions.cpp similarity index 100% rename from dbms/src/Interpreters/inplaceBlockConversions.cpp rename to dbms/Interpreters/inplaceBlockConversions.cpp diff --git a/dbms/src/Interpreters/inplaceBlockConversions.h b/dbms/Interpreters/inplaceBlockConversions.h similarity index 100% rename from dbms/src/Interpreters/inplaceBlockConversions.h rename to dbms/Interpreters/inplaceBlockConversions.h diff --git a/dbms/src/Interpreters/interpretSubquery.cpp b/dbms/Interpreters/interpretSubquery.cpp similarity index 100% rename from dbms/src/Interpreters/interpretSubquery.cpp rename to dbms/Interpreters/interpretSubquery.cpp diff --git a/dbms/src/Interpreters/interpretSubquery.h b/dbms/Interpreters/interpretSubquery.h similarity index 100% rename from dbms/src/Interpreters/interpretSubquery.h rename to dbms/Interpreters/interpretSubquery.h diff --git a/dbms/src/Interpreters/joinDispatch.h b/dbms/Interpreters/joinDispatch.h similarity index 100% rename from dbms/src/Interpreters/joinDispatch.h rename to dbms/Interpreters/joinDispatch.h diff --git a/dbms/src/Interpreters/join_common.cpp b/dbms/Interpreters/join_common.cpp similarity index 100% rename from dbms/src/Interpreters/join_common.cpp rename to dbms/Interpreters/join_common.cpp diff --git a/dbms/src/Interpreters/join_common.h b/dbms/Interpreters/join_common.h similarity index 100% rename from dbms/src/Interpreters/join_common.h rename to dbms/Interpreters/join_common.h diff --git a/dbms/src/Interpreters/loadMetadata.cpp b/dbms/Interpreters/loadMetadata.cpp similarity index 100% rename from dbms/src/Interpreters/loadMetadata.cpp rename to dbms/Interpreters/loadMetadata.cpp diff --git a/dbms/src/Interpreters/loadMetadata.h b/dbms/Interpreters/loadMetadata.h similarity index 100% rename from dbms/src/Interpreters/loadMetadata.h rename to dbms/Interpreters/loadMetadata.h diff --git a/dbms/src/Interpreters/misc.h b/dbms/Interpreters/misc.h similarity index 100% rename from dbms/src/Interpreters/misc.h rename to dbms/Interpreters/misc.h diff --git a/dbms/src/Interpreters/sortBlock.cpp b/dbms/Interpreters/sortBlock.cpp similarity index 100% rename from dbms/src/Interpreters/sortBlock.cpp rename to dbms/Interpreters/sortBlock.cpp diff --git a/dbms/src/Interpreters/sortBlock.h b/dbms/Interpreters/sortBlock.h similarity index 100% rename from dbms/src/Interpreters/sortBlock.h rename to dbms/Interpreters/sortBlock.h diff --git a/dbms/src/Interpreters/tests/CMakeLists.txt b/dbms/Interpreters/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Interpreters/tests/CMakeLists.txt rename to dbms/Interpreters/tests/CMakeLists.txt diff --git a/dbms/src/Interpreters/tests/aggregate.cpp b/dbms/Interpreters/tests/aggregate.cpp similarity index 100% rename from dbms/src/Interpreters/tests/aggregate.cpp rename to dbms/Interpreters/tests/aggregate.cpp diff --git a/dbms/src/Interpreters/tests/create_query.cpp b/dbms/Interpreters/tests/create_query.cpp similarity index 100% rename from dbms/src/Interpreters/tests/create_query.cpp rename to dbms/Interpreters/tests/create_query.cpp diff --git a/dbms/src/Interpreters/tests/expression.cpp b/dbms/Interpreters/tests/expression.cpp similarity index 100% rename from dbms/src/Interpreters/tests/expression.cpp rename to dbms/Interpreters/tests/expression.cpp diff --git a/dbms/src/Interpreters/tests/expression_analyzer.cpp b/dbms/Interpreters/tests/expression_analyzer.cpp similarity index 100% rename from dbms/src/Interpreters/tests/expression_analyzer.cpp rename to dbms/Interpreters/tests/expression_analyzer.cpp diff --git a/dbms/src/Interpreters/tests/gtest_cycle_aliases.cpp b/dbms/Interpreters/tests/gtest_cycle_aliases.cpp similarity index 100% rename from dbms/src/Interpreters/tests/gtest_cycle_aliases.cpp rename to dbms/Interpreters/tests/gtest_cycle_aliases.cpp diff --git a/dbms/src/Interpreters/tests/gtest_merge_tree_set_index.cpp b/dbms/Interpreters/tests/gtest_merge_tree_set_index.cpp similarity index 100% rename from dbms/src/Interpreters/tests/gtest_merge_tree_set_index.cpp rename to dbms/Interpreters/tests/gtest_merge_tree_set_index.cpp diff --git a/dbms/src/Interpreters/tests/hash_map.cpp b/dbms/Interpreters/tests/hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map.cpp rename to dbms/Interpreters/tests/hash_map.cpp diff --git a/dbms/src/Interpreters/tests/hash_map3.cpp b/dbms/Interpreters/tests/hash_map3.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map3.cpp rename to dbms/Interpreters/tests/hash_map3.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_lookup.cpp b/dbms/Interpreters/tests/hash_map_lookup.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_lookup.cpp rename to dbms/Interpreters/tests/hash_map_lookup.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string.cpp b/dbms/Interpreters/tests/hash_map_string.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string.cpp rename to dbms/Interpreters/tests/hash_map_string.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_2.cpp b/dbms/Interpreters/tests/hash_map_string_2.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_2.cpp rename to dbms/Interpreters/tests/hash_map_string_2.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_3.cpp b/dbms/Interpreters/tests/hash_map_string_3.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_3.cpp rename to dbms/Interpreters/tests/hash_map_string_3.cpp diff --git a/dbms/src/Interpreters/tests/hash_map_string_small.cpp b/dbms/Interpreters/tests/hash_map_string_small.cpp similarity index 100% rename from dbms/src/Interpreters/tests/hash_map_string_small.cpp rename to dbms/Interpreters/tests/hash_map_string_small.cpp diff --git a/dbms/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp b/dbms/Interpreters/tests/in_join_subqueries_preprocessor.cpp similarity index 100% rename from dbms/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp rename to dbms/Interpreters/tests/in_join_subqueries_preprocessor.cpp diff --git a/dbms/src/Interpreters/tests/internal_iotop.cpp b/dbms/Interpreters/tests/internal_iotop.cpp similarity index 100% rename from dbms/src/Interpreters/tests/internal_iotop.cpp rename to dbms/Interpreters/tests/internal_iotop.cpp diff --git a/dbms/src/Interpreters/tests/logical_expressions_optimizer.cpp b/dbms/Interpreters/tests/logical_expressions_optimizer.cpp similarity index 100% rename from dbms/src/Interpreters/tests/logical_expressions_optimizer.cpp rename to dbms/Interpreters/tests/logical_expressions_optimizer.cpp diff --git a/dbms/src/Interpreters/tests/select_query.cpp b/dbms/Interpreters/tests/select_query.cpp similarity index 100% rename from dbms/src/Interpreters/tests/select_query.cpp rename to dbms/Interpreters/tests/select_query.cpp diff --git a/dbms/src/Interpreters/tests/string_hash_map.cpp b/dbms/Interpreters/tests/string_hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/string_hash_map.cpp rename to dbms/Interpreters/tests/string_hash_map.cpp diff --git a/dbms/src/Interpreters/tests/two_level_hash_map.cpp b/dbms/Interpreters/tests/two_level_hash_map.cpp similarity index 100% rename from dbms/src/Interpreters/tests/two_level_hash_map.cpp rename to dbms/Interpreters/tests/two_level_hash_map.cpp diff --git a/dbms/src/Interpreters/tests/users.cpp b/dbms/Interpreters/tests/users.cpp similarity index 100% rename from dbms/src/Interpreters/tests/users.cpp rename to dbms/Interpreters/tests/users.cpp diff --git a/dbms/src/NOTICE b/dbms/NOTICE similarity index 100% rename from dbms/src/NOTICE rename to dbms/NOTICE diff --git a/dbms/src/Parsers/ASTAlterQuery.cpp b/dbms/Parsers/ASTAlterQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTAlterQuery.cpp rename to dbms/Parsers/ASTAlterQuery.cpp diff --git a/dbms/src/Parsers/ASTAlterQuery.h b/dbms/Parsers/ASTAlterQuery.h similarity index 100% rename from dbms/src/Parsers/ASTAlterQuery.h rename to dbms/Parsers/ASTAlterQuery.h diff --git a/dbms/src/Parsers/ASTAssignment.h b/dbms/Parsers/ASTAssignment.h similarity index 100% rename from dbms/src/Parsers/ASTAssignment.h rename to dbms/Parsers/ASTAssignment.h diff --git a/dbms/src/Parsers/ASTAsterisk.cpp b/dbms/Parsers/ASTAsterisk.cpp similarity index 100% rename from dbms/src/Parsers/ASTAsterisk.cpp rename to dbms/Parsers/ASTAsterisk.cpp diff --git a/dbms/src/Parsers/ASTAsterisk.h b/dbms/Parsers/ASTAsterisk.h similarity index 100% rename from dbms/src/Parsers/ASTAsterisk.h rename to dbms/Parsers/ASTAsterisk.h diff --git a/dbms/src/Parsers/ASTCheckQuery.h b/dbms/Parsers/ASTCheckQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCheckQuery.h rename to dbms/Parsers/ASTCheckQuery.h diff --git a/dbms/src/Parsers/ASTColumnDeclaration.cpp b/dbms/Parsers/ASTColumnDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTColumnDeclaration.cpp rename to dbms/Parsers/ASTColumnDeclaration.cpp diff --git a/dbms/src/Parsers/ASTColumnDeclaration.h b/dbms/Parsers/ASTColumnDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTColumnDeclaration.h rename to dbms/Parsers/ASTColumnDeclaration.h diff --git a/dbms/src/Parsers/ASTColumnsMatcher.cpp b/dbms/Parsers/ASTColumnsMatcher.cpp similarity index 100% rename from dbms/src/Parsers/ASTColumnsMatcher.cpp rename to dbms/Parsers/ASTColumnsMatcher.cpp diff --git a/dbms/src/Parsers/ASTColumnsMatcher.h b/dbms/Parsers/ASTColumnsMatcher.h similarity index 100% rename from dbms/src/Parsers/ASTColumnsMatcher.h rename to dbms/Parsers/ASTColumnsMatcher.h diff --git a/dbms/src/Parsers/ASTConstraintDeclaration.cpp b/dbms/Parsers/ASTConstraintDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTConstraintDeclaration.cpp rename to dbms/Parsers/ASTConstraintDeclaration.cpp diff --git a/dbms/src/Parsers/ASTConstraintDeclaration.h b/dbms/Parsers/ASTConstraintDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTConstraintDeclaration.h rename to dbms/Parsers/ASTConstraintDeclaration.h diff --git a/dbms/src/Parsers/ASTCreateQuery.cpp b/dbms/Parsers/ASTCreateQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateQuery.cpp rename to dbms/Parsers/ASTCreateQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateQuery.h b/dbms/Parsers/ASTCreateQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateQuery.h rename to dbms/Parsers/ASTCreateQuery.h diff --git a/dbms/src/Parsers/ASTCreateQuotaQuery.cpp b/dbms/Parsers/ASTCreateQuotaQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateQuotaQuery.cpp rename to dbms/Parsers/ASTCreateQuotaQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateQuotaQuery.h b/dbms/Parsers/ASTCreateQuotaQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateQuotaQuery.h rename to dbms/Parsers/ASTCreateQuotaQuery.h diff --git a/dbms/src/Parsers/ASTCreateRoleQuery.cpp b/dbms/Parsers/ASTCreateRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateRoleQuery.cpp rename to dbms/Parsers/ASTCreateRoleQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateRoleQuery.h b/dbms/Parsers/ASTCreateRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateRoleQuery.h rename to dbms/Parsers/ASTCreateRoleQuery.h diff --git a/dbms/src/Parsers/ASTCreateRowPolicyQuery.cpp b/dbms/Parsers/ASTCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateRowPolicyQuery.cpp rename to dbms/Parsers/ASTCreateRowPolicyQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateRowPolicyQuery.h b/dbms/Parsers/ASTCreateRowPolicyQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateRowPolicyQuery.h rename to dbms/Parsers/ASTCreateRowPolicyQuery.h diff --git a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.cpp b/dbms/Parsers/ASTCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateSettingsProfileQuery.cpp rename to dbms/Parsers/ASTCreateSettingsProfileQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateSettingsProfileQuery.h b/dbms/Parsers/ASTCreateSettingsProfileQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateSettingsProfileQuery.h rename to dbms/Parsers/ASTCreateSettingsProfileQuery.h diff --git a/dbms/src/Parsers/ASTCreateUserQuery.cpp b/dbms/Parsers/ASTCreateUserQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTCreateUserQuery.cpp rename to dbms/Parsers/ASTCreateUserQuery.cpp diff --git a/dbms/src/Parsers/ASTCreateUserQuery.h b/dbms/Parsers/ASTCreateUserQuery.h similarity index 100% rename from dbms/src/Parsers/ASTCreateUserQuery.h rename to dbms/Parsers/ASTCreateUserQuery.h diff --git a/dbms/src/Parsers/ASTDictionary.cpp b/dbms/Parsers/ASTDictionary.cpp similarity index 100% rename from dbms/src/Parsers/ASTDictionary.cpp rename to dbms/Parsers/ASTDictionary.cpp diff --git a/dbms/src/Parsers/ASTDictionary.h b/dbms/Parsers/ASTDictionary.h similarity index 100% rename from dbms/src/Parsers/ASTDictionary.h rename to dbms/Parsers/ASTDictionary.h diff --git a/dbms/src/Parsers/ASTDictionaryAttributeDeclaration.cpp b/dbms/Parsers/ASTDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ASTDictionaryAttributeDeclaration.cpp rename to dbms/Parsers/ASTDictionaryAttributeDeclaration.cpp diff --git a/dbms/src/Parsers/ASTDictionaryAttributeDeclaration.h b/dbms/Parsers/ASTDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTDictionaryAttributeDeclaration.h rename to dbms/Parsers/ASTDictionaryAttributeDeclaration.h diff --git a/dbms/src/Parsers/ASTDropAccessEntityQuery.cpp b/dbms/Parsers/ASTDropAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTDropAccessEntityQuery.cpp rename to dbms/Parsers/ASTDropAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ASTDropAccessEntityQuery.h b/dbms/Parsers/ASTDropAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ASTDropAccessEntityQuery.h rename to dbms/Parsers/ASTDropAccessEntityQuery.h diff --git a/dbms/src/Parsers/ASTDropQuery.cpp b/dbms/Parsers/ASTDropQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTDropQuery.cpp rename to dbms/Parsers/ASTDropQuery.cpp diff --git a/dbms/src/Parsers/ASTDropQuery.h b/dbms/Parsers/ASTDropQuery.h similarity index 100% rename from dbms/src/Parsers/ASTDropQuery.h rename to dbms/Parsers/ASTDropQuery.h diff --git a/dbms/src/Parsers/ASTEnumElement.h b/dbms/Parsers/ASTEnumElement.h similarity index 100% rename from dbms/src/Parsers/ASTEnumElement.h rename to dbms/Parsers/ASTEnumElement.h diff --git a/dbms/src/Parsers/ASTExplainQuery.h b/dbms/Parsers/ASTExplainQuery.h similarity index 100% rename from dbms/src/Parsers/ASTExplainQuery.h rename to dbms/Parsers/ASTExplainQuery.h diff --git a/dbms/src/Parsers/ASTExpressionList.cpp b/dbms/Parsers/ASTExpressionList.cpp similarity index 100% rename from dbms/src/Parsers/ASTExpressionList.cpp rename to dbms/Parsers/ASTExpressionList.cpp diff --git a/dbms/src/Parsers/ASTExpressionList.h b/dbms/Parsers/ASTExpressionList.h similarity index 100% rename from dbms/src/Parsers/ASTExpressionList.h rename to dbms/Parsers/ASTExpressionList.h diff --git a/dbms/src/Parsers/ASTExtendedRoleSet.cpp b/dbms/Parsers/ASTExtendedRoleSet.cpp similarity index 100% rename from dbms/src/Parsers/ASTExtendedRoleSet.cpp rename to dbms/Parsers/ASTExtendedRoleSet.cpp diff --git a/dbms/src/Parsers/ASTExtendedRoleSet.h b/dbms/Parsers/ASTExtendedRoleSet.h similarity index 100% rename from dbms/src/Parsers/ASTExtendedRoleSet.h rename to dbms/Parsers/ASTExtendedRoleSet.h diff --git a/dbms/src/Parsers/ASTFunction.cpp b/dbms/Parsers/ASTFunction.cpp similarity index 100% rename from dbms/src/Parsers/ASTFunction.cpp rename to dbms/Parsers/ASTFunction.cpp diff --git a/dbms/src/Parsers/ASTFunction.h b/dbms/Parsers/ASTFunction.h similarity index 100% rename from dbms/src/Parsers/ASTFunction.h rename to dbms/Parsers/ASTFunction.h diff --git a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.cpp b/dbms/Parsers/ASTFunctionWithKeyValueArguments.cpp similarity index 100% rename from dbms/src/Parsers/ASTFunctionWithKeyValueArguments.cpp rename to dbms/Parsers/ASTFunctionWithKeyValueArguments.cpp diff --git a/dbms/src/Parsers/ASTFunctionWithKeyValueArguments.h b/dbms/Parsers/ASTFunctionWithKeyValueArguments.h similarity index 100% rename from dbms/src/Parsers/ASTFunctionWithKeyValueArguments.h rename to dbms/Parsers/ASTFunctionWithKeyValueArguments.h diff --git a/dbms/src/Parsers/ASTGrantQuery.cpp b/dbms/Parsers/ASTGrantQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTGrantQuery.cpp rename to dbms/Parsers/ASTGrantQuery.cpp diff --git a/dbms/src/Parsers/ASTGrantQuery.h b/dbms/Parsers/ASTGrantQuery.h similarity index 100% rename from dbms/src/Parsers/ASTGrantQuery.h rename to dbms/Parsers/ASTGrantQuery.h diff --git a/dbms/src/Parsers/ASTIdentifier.cpp b/dbms/Parsers/ASTIdentifier.cpp similarity index 100% rename from dbms/src/Parsers/ASTIdentifier.cpp rename to dbms/Parsers/ASTIdentifier.cpp diff --git a/dbms/src/Parsers/ASTIdentifier.h b/dbms/Parsers/ASTIdentifier.h similarity index 100% rename from dbms/src/Parsers/ASTIdentifier.h rename to dbms/Parsers/ASTIdentifier.h diff --git a/dbms/src/Parsers/ASTIndexDeclaration.h b/dbms/Parsers/ASTIndexDeclaration.h similarity index 100% rename from dbms/src/Parsers/ASTIndexDeclaration.h rename to dbms/Parsers/ASTIndexDeclaration.h diff --git a/dbms/src/Parsers/ASTInsertQuery.cpp b/dbms/Parsers/ASTInsertQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTInsertQuery.cpp rename to dbms/Parsers/ASTInsertQuery.cpp diff --git a/dbms/src/Parsers/ASTInsertQuery.h b/dbms/Parsers/ASTInsertQuery.h similarity index 100% rename from dbms/src/Parsers/ASTInsertQuery.h rename to dbms/Parsers/ASTInsertQuery.h diff --git a/dbms/src/Parsers/ASTKillQueryQuery.cpp b/dbms/Parsers/ASTKillQueryQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTKillQueryQuery.cpp rename to dbms/Parsers/ASTKillQueryQuery.cpp diff --git a/dbms/src/Parsers/ASTKillQueryQuery.h b/dbms/Parsers/ASTKillQueryQuery.h similarity index 100% rename from dbms/src/Parsers/ASTKillQueryQuery.h rename to dbms/Parsers/ASTKillQueryQuery.h diff --git a/dbms/src/Parsers/ASTLiteral.cpp b/dbms/Parsers/ASTLiteral.cpp similarity index 100% rename from dbms/src/Parsers/ASTLiteral.cpp rename to dbms/Parsers/ASTLiteral.cpp diff --git a/dbms/src/Parsers/ASTLiteral.h b/dbms/Parsers/ASTLiteral.h similarity index 100% rename from dbms/src/Parsers/ASTLiteral.h rename to dbms/Parsers/ASTLiteral.h diff --git a/dbms/src/Parsers/ASTNameTypePair.h b/dbms/Parsers/ASTNameTypePair.h similarity index 100% rename from dbms/src/Parsers/ASTNameTypePair.h rename to dbms/Parsers/ASTNameTypePair.h diff --git a/dbms/src/Parsers/ASTOptimizeQuery.cpp b/dbms/Parsers/ASTOptimizeQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTOptimizeQuery.cpp rename to dbms/Parsers/ASTOptimizeQuery.cpp diff --git a/dbms/src/Parsers/ASTOptimizeQuery.h b/dbms/Parsers/ASTOptimizeQuery.h similarity index 100% rename from dbms/src/Parsers/ASTOptimizeQuery.h rename to dbms/Parsers/ASTOptimizeQuery.h diff --git a/dbms/src/Parsers/ASTOrderByElement.cpp b/dbms/Parsers/ASTOrderByElement.cpp similarity index 100% rename from dbms/src/Parsers/ASTOrderByElement.cpp rename to dbms/Parsers/ASTOrderByElement.cpp diff --git a/dbms/src/Parsers/ASTOrderByElement.h b/dbms/Parsers/ASTOrderByElement.h similarity index 100% rename from dbms/src/Parsers/ASTOrderByElement.h rename to dbms/Parsers/ASTOrderByElement.h diff --git a/dbms/src/Parsers/ASTPartition.cpp b/dbms/Parsers/ASTPartition.cpp similarity index 100% rename from dbms/src/Parsers/ASTPartition.cpp rename to dbms/Parsers/ASTPartition.cpp diff --git a/dbms/src/Parsers/ASTPartition.h b/dbms/Parsers/ASTPartition.h similarity index 100% rename from dbms/src/Parsers/ASTPartition.h rename to dbms/Parsers/ASTPartition.h diff --git a/dbms/src/Parsers/ASTQualifiedAsterisk.cpp b/dbms/Parsers/ASTQualifiedAsterisk.cpp similarity index 100% rename from dbms/src/Parsers/ASTQualifiedAsterisk.cpp rename to dbms/Parsers/ASTQualifiedAsterisk.cpp diff --git a/dbms/src/Parsers/ASTQualifiedAsterisk.h b/dbms/Parsers/ASTQualifiedAsterisk.h similarity index 100% rename from dbms/src/Parsers/ASTQualifiedAsterisk.h rename to dbms/Parsers/ASTQualifiedAsterisk.h diff --git a/dbms/src/Parsers/ASTQueryParameter.cpp b/dbms/Parsers/ASTQueryParameter.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryParameter.cpp rename to dbms/Parsers/ASTQueryParameter.cpp diff --git a/dbms/src/Parsers/ASTQueryParameter.h b/dbms/Parsers/ASTQueryParameter.h similarity index 100% rename from dbms/src/Parsers/ASTQueryParameter.h rename to dbms/Parsers/ASTQueryParameter.h diff --git a/dbms/src/Parsers/ASTQueryWithOnCluster.cpp b/dbms/Parsers/ASTQueryWithOnCluster.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOnCluster.cpp rename to dbms/Parsers/ASTQueryWithOnCluster.cpp diff --git a/dbms/src/Parsers/ASTQueryWithOnCluster.h b/dbms/Parsers/ASTQueryWithOnCluster.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOnCluster.h rename to dbms/Parsers/ASTQueryWithOnCluster.h diff --git a/dbms/src/Parsers/ASTQueryWithOutput.cpp b/dbms/Parsers/ASTQueryWithOutput.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOutput.cpp rename to dbms/Parsers/ASTQueryWithOutput.cpp diff --git a/dbms/src/Parsers/ASTQueryWithOutput.h b/dbms/Parsers/ASTQueryWithOutput.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithOutput.h rename to dbms/Parsers/ASTQueryWithOutput.h diff --git a/dbms/src/Parsers/ASTQueryWithTableAndOutput.cpp b/dbms/Parsers/ASTQueryWithTableAndOutput.cpp similarity index 100% rename from dbms/src/Parsers/ASTQueryWithTableAndOutput.cpp rename to dbms/Parsers/ASTQueryWithTableAndOutput.cpp diff --git a/dbms/src/Parsers/ASTQueryWithTableAndOutput.h b/dbms/Parsers/ASTQueryWithTableAndOutput.h similarity index 100% rename from dbms/src/Parsers/ASTQueryWithTableAndOutput.h rename to dbms/Parsers/ASTQueryWithTableAndOutput.h diff --git a/dbms/src/Parsers/ASTRenameQuery.h b/dbms/Parsers/ASTRenameQuery.h similarity index 100% rename from dbms/src/Parsers/ASTRenameQuery.h rename to dbms/Parsers/ASTRenameQuery.h diff --git a/dbms/src/Parsers/ASTSampleRatio.cpp b/dbms/Parsers/ASTSampleRatio.cpp similarity index 100% rename from dbms/src/Parsers/ASTSampleRatio.cpp rename to dbms/Parsers/ASTSampleRatio.cpp diff --git a/dbms/src/Parsers/ASTSampleRatio.h b/dbms/Parsers/ASTSampleRatio.h similarity index 100% rename from dbms/src/Parsers/ASTSampleRatio.h rename to dbms/Parsers/ASTSampleRatio.h diff --git a/dbms/src/Parsers/ASTSelectQuery.cpp b/dbms/Parsers/ASTSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSelectQuery.cpp rename to dbms/Parsers/ASTSelectQuery.cpp diff --git a/dbms/src/Parsers/ASTSelectQuery.h b/dbms/Parsers/ASTSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSelectQuery.h rename to dbms/Parsers/ASTSelectQuery.h diff --git a/dbms/src/Parsers/ASTSelectWithUnionQuery.cpp b/dbms/Parsers/ASTSelectWithUnionQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSelectWithUnionQuery.cpp rename to dbms/Parsers/ASTSelectWithUnionQuery.cpp diff --git a/dbms/src/Parsers/ASTSelectWithUnionQuery.h b/dbms/Parsers/ASTSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSelectWithUnionQuery.h rename to dbms/Parsers/ASTSelectWithUnionQuery.h diff --git a/dbms/src/Parsers/ASTSetQuery.h b/dbms/Parsers/ASTSetQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSetQuery.h rename to dbms/Parsers/ASTSetQuery.h diff --git a/dbms/src/Parsers/ASTSetRoleQuery.cpp b/dbms/Parsers/ASTSetRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSetRoleQuery.cpp rename to dbms/Parsers/ASTSetRoleQuery.cpp diff --git a/dbms/src/Parsers/ASTSetRoleQuery.h b/dbms/Parsers/ASTSetRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSetRoleQuery.h rename to dbms/Parsers/ASTSetRoleQuery.h diff --git a/dbms/src/Parsers/ASTSettingsProfileElement.cpp b/dbms/Parsers/ASTSettingsProfileElement.cpp similarity index 100% rename from dbms/src/Parsers/ASTSettingsProfileElement.cpp rename to dbms/Parsers/ASTSettingsProfileElement.cpp diff --git a/dbms/src/Parsers/ASTSettingsProfileElement.h b/dbms/Parsers/ASTSettingsProfileElement.h similarity index 100% rename from dbms/src/Parsers/ASTSettingsProfileElement.h rename to dbms/Parsers/ASTSettingsProfileElement.h diff --git a/dbms/src/Parsers/ASTShowCreateAccessEntityQuery.cpp b/dbms/Parsers/ASTShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowCreateAccessEntityQuery.cpp rename to dbms/Parsers/ASTShowCreateAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ASTShowCreateAccessEntityQuery.h b/dbms/Parsers/ASTShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowCreateAccessEntityQuery.h rename to dbms/Parsers/ASTShowCreateAccessEntityQuery.h diff --git a/dbms/src/Parsers/ASTShowGrantsQuery.cpp b/dbms/Parsers/ASTShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowGrantsQuery.cpp rename to dbms/Parsers/ASTShowGrantsQuery.cpp diff --git a/dbms/src/Parsers/ASTShowGrantsQuery.h b/dbms/Parsers/ASTShowGrantsQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowGrantsQuery.h rename to dbms/Parsers/ASTShowGrantsQuery.h diff --git a/dbms/src/Parsers/ASTShowProcesslistQuery.h b/dbms/Parsers/ASTShowProcesslistQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowProcesslistQuery.h rename to dbms/Parsers/ASTShowProcesslistQuery.h diff --git a/dbms/src/Parsers/ASTShowQuotasQuery.cpp b/dbms/Parsers/ASTShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowQuotasQuery.cpp rename to dbms/Parsers/ASTShowQuotasQuery.cpp diff --git a/dbms/src/Parsers/ASTShowQuotasQuery.h b/dbms/Parsers/ASTShowQuotasQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowQuotasQuery.h rename to dbms/Parsers/ASTShowQuotasQuery.h diff --git a/dbms/src/Parsers/ASTShowRowPoliciesQuery.cpp b/dbms/Parsers/ASTShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowRowPoliciesQuery.cpp rename to dbms/Parsers/ASTShowRowPoliciesQuery.cpp diff --git a/dbms/src/Parsers/ASTShowRowPoliciesQuery.h b/dbms/Parsers/ASTShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowRowPoliciesQuery.h rename to dbms/Parsers/ASTShowRowPoliciesQuery.h diff --git a/dbms/src/Parsers/ASTShowTablesQuery.cpp b/dbms/Parsers/ASTShowTablesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTShowTablesQuery.cpp rename to dbms/Parsers/ASTShowTablesQuery.cpp diff --git a/dbms/src/Parsers/ASTShowTablesQuery.h b/dbms/Parsers/ASTShowTablesQuery.h similarity index 100% rename from dbms/src/Parsers/ASTShowTablesQuery.h rename to dbms/Parsers/ASTShowTablesQuery.h diff --git a/dbms/src/Parsers/ASTSubquery.cpp b/dbms/Parsers/ASTSubquery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSubquery.cpp rename to dbms/Parsers/ASTSubquery.cpp diff --git a/dbms/src/Parsers/ASTSubquery.h b/dbms/Parsers/ASTSubquery.h similarity index 100% rename from dbms/src/Parsers/ASTSubquery.h rename to dbms/Parsers/ASTSubquery.h diff --git a/dbms/src/Parsers/ASTSystemQuery.cpp b/dbms/Parsers/ASTSystemQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTSystemQuery.cpp rename to dbms/Parsers/ASTSystemQuery.cpp diff --git a/dbms/src/Parsers/ASTSystemQuery.h b/dbms/Parsers/ASTSystemQuery.h similarity index 100% rename from dbms/src/Parsers/ASTSystemQuery.h rename to dbms/Parsers/ASTSystemQuery.h diff --git a/dbms/src/Parsers/ASTTTLElement.cpp b/dbms/Parsers/ASTTTLElement.cpp similarity index 100% rename from dbms/src/Parsers/ASTTTLElement.cpp rename to dbms/Parsers/ASTTTLElement.cpp diff --git a/dbms/src/Parsers/ASTTTLElement.h b/dbms/Parsers/ASTTTLElement.h similarity index 100% rename from dbms/src/Parsers/ASTTTLElement.h rename to dbms/Parsers/ASTTTLElement.h diff --git a/dbms/src/Parsers/ASTTablesInSelectQuery.cpp b/dbms/Parsers/ASTTablesInSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ASTTablesInSelectQuery.cpp rename to dbms/Parsers/ASTTablesInSelectQuery.cpp diff --git a/dbms/src/Parsers/ASTTablesInSelectQuery.h b/dbms/Parsers/ASTTablesInSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ASTTablesInSelectQuery.h rename to dbms/Parsers/ASTTablesInSelectQuery.h diff --git a/dbms/src/Parsers/ASTUseQuery.h b/dbms/Parsers/ASTUseQuery.h similarity index 100% rename from dbms/src/Parsers/ASTUseQuery.h rename to dbms/Parsers/ASTUseQuery.h diff --git a/dbms/src/Parsers/ASTWatchQuery.h b/dbms/Parsers/ASTWatchQuery.h similarity index 100% rename from dbms/src/Parsers/ASTWatchQuery.h rename to dbms/Parsers/ASTWatchQuery.h diff --git a/dbms/src/Parsers/ASTWithAlias.cpp b/dbms/Parsers/ASTWithAlias.cpp similarity index 100% rename from dbms/src/Parsers/ASTWithAlias.cpp rename to dbms/Parsers/ASTWithAlias.cpp diff --git a/dbms/src/Parsers/ASTWithAlias.h b/dbms/Parsers/ASTWithAlias.h similarity index 100% rename from dbms/src/Parsers/ASTWithAlias.h rename to dbms/Parsers/ASTWithAlias.h diff --git a/dbms/src/Parsers/CMakeLists.txt b/dbms/Parsers/CMakeLists.txt similarity index 87% rename from dbms/src/Parsers/CMakeLists.txt rename to dbms/Parsers/CMakeLists.txt index 086384196aa..6424cdfe9ea 100644 --- a/dbms/src/Parsers/CMakeLists.txt +++ b/dbms/Parsers/CMakeLists.txt @@ -5,7 +5,7 @@ target_link_libraries(clickhouse_parsers PUBLIC clickhouse_common_io) target_include_directories(clickhouse_parsers PUBLIC ${DBMS_INCLUDE_DIR}) if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Parsers/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/Parsers/iostream_debug_helpers.h") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () diff --git a/dbms/src/Parsers/CommonParsers.cpp b/dbms/Parsers/CommonParsers.cpp similarity index 100% rename from dbms/src/Parsers/CommonParsers.cpp rename to dbms/Parsers/CommonParsers.cpp diff --git a/dbms/src/Parsers/CommonParsers.h b/dbms/Parsers/CommonParsers.h similarity index 100% rename from dbms/src/Parsers/CommonParsers.h rename to dbms/Parsers/CommonParsers.h diff --git a/dbms/src/Parsers/DumpASTNode.h b/dbms/Parsers/DumpASTNode.h similarity index 100% rename from dbms/src/Parsers/DumpASTNode.h rename to dbms/Parsers/DumpASTNode.h diff --git a/dbms/src/Parsers/ExpressionElementParsers.cpp b/dbms/Parsers/ExpressionElementParsers.cpp similarity index 100% rename from dbms/src/Parsers/ExpressionElementParsers.cpp rename to dbms/Parsers/ExpressionElementParsers.cpp diff --git a/dbms/src/Parsers/ExpressionElementParsers.h b/dbms/Parsers/ExpressionElementParsers.h similarity index 100% rename from dbms/src/Parsers/ExpressionElementParsers.h rename to dbms/Parsers/ExpressionElementParsers.h diff --git a/dbms/src/Parsers/ExpressionListParsers.cpp b/dbms/Parsers/ExpressionListParsers.cpp similarity index 100% rename from dbms/src/Parsers/ExpressionListParsers.cpp rename to dbms/Parsers/ExpressionListParsers.cpp diff --git a/dbms/src/Parsers/ExpressionListParsers.h b/dbms/Parsers/ExpressionListParsers.h similarity index 100% rename from dbms/src/Parsers/ExpressionListParsers.h rename to dbms/Parsers/ExpressionListParsers.h diff --git a/dbms/src/Parsers/IAST.cpp b/dbms/Parsers/IAST.cpp similarity index 100% rename from dbms/src/Parsers/IAST.cpp rename to dbms/Parsers/IAST.cpp diff --git a/dbms/src/Parsers/IAST.h b/dbms/Parsers/IAST.h similarity index 100% rename from dbms/src/Parsers/IAST.h rename to dbms/Parsers/IAST.h diff --git a/dbms/src/Parsers/IAST_fwd.h b/dbms/Parsers/IAST_fwd.h similarity index 100% rename from dbms/src/Parsers/IAST_fwd.h rename to dbms/Parsers/IAST_fwd.h diff --git a/dbms/src/Parsers/IParser.h b/dbms/Parsers/IParser.h similarity index 100% rename from dbms/src/Parsers/IParser.h rename to dbms/Parsers/IParser.h diff --git a/dbms/src/Parsers/IParserBase.cpp b/dbms/Parsers/IParserBase.cpp similarity index 100% rename from dbms/src/Parsers/IParserBase.cpp rename to dbms/Parsers/IParserBase.cpp diff --git a/dbms/src/Parsers/IParserBase.h b/dbms/Parsers/IParserBase.h similarity index 100% rename from dbms/src/Parsers/IParserBase.h rename to dbms/Parsers/IParserBase.h diff --git a/dbms/src/Parsers/IdentifierQuotingStyle.h b/dbms/Parsers/IdentifierQuotingStyle.h similarity index 100% rename from dbms/src/Parsers/IdentifierQuotingStyle.h rename to dbms/Parsers/IdentifierQuotingStyle.h diff --git a/dbms/src/Parsers/Lexer.cpp b/dbms/Parsers/Lexer.cpp similarity index 100% rename from dbms/src/Parsers/Lexer.cpp rename to dbms/Parsers/Lexer.cpp diff --git a/dbms/src/Parsers/Lexer.h b/dbms/Parsers/Lexer.h similarity index 100% rename from dbms/src/Parsers/Lexer.h rename to dbms/Parsers/Lexer.h diff --git a/dbms/src/Parsers/ParserAlterQuery.cpp b/dbms/Parsers/ParserAlterQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserAlterQuery.cpp rename to dbms/Parsers/ParserAlterQuery.cpp diff --git a/dbms/src/Parsers/ParserAlterQuery.h b/dbms/Parsers/ParserAlterQuery.h similarity index 100% rename from dbms/src/Parsers/ParserAlterQuery.h rename to dbms/Parsers/ParserAlterQuery.h diff --git a/dbms/src/Parsers/ParserCase.cpp b/dbms/Parsers/ParserCase.cpp similarity index 100% rename from dbms/src/Parsers/ParserCase.cpp rename to dbms/Parsers/ParserCase.cpp diff --git a/dbms/src/Parsers/ParserCase.h b/dbms/Parsers/ParserCase.h similarity index 100% rename from dbms/src/Parsers/ParserCase.h rename to dbms/Parsers/ParserCase.h diff --git a/dbms/src/Parsers/ParserCheckQuery.cpp b/dbms/Parsers/ParserCheckQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCheckQuery.cpp rename to dbms/Parsers/ParserCheckQuery.cpp diff --git a/dbms/src/Parsers/ParserCheckQuery.h b/dbms/Parsers/ParserCheckQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCheckQuery.h rename to dbms/Parsers/ParserCheckQuery.h diff --git a/dbms/src/Parsers/ParserCreateQuery.cpp b/dbms/Parsers/ParserCreateQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateQuery.cpp rename to dbms/Parsers/ParserCreateQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateQuery.h b/dbms/Parsers/ParserCreateQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateQuery.h rename to dbms/Parsers/ParserCreateQuery.h diff --git a/dbms/src/Parsers/ParserCreateQuotaQuery.cpp b/dbms/Parsers/ParserCreateQuotaQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateQuotaQuery.cpp rename to dbms/Parsers/ParserCreateQuotaQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateQuotaQuery.h b/dbms/Parsers/ParserCreateQuotaQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateQuotaQuery.h rename to dbms/Parsers/ParserCreateQuotaQuery.h diff --git a/dbms/src/Parsers/ParserCreateRoleQuery.cpp b/dbms/Parsers/ParserCreateRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateRoleQuery.cpp rename to dbms/Parsers/ParserCreateRoleQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateRoleQuery.h b/dbms/Parsers/ParserCreateRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateRoleQuery.h rename to dbms/Parsers/ParserCreateRoleQuery.h diff --git a/dbms/src/Parsers/ParserCreateRowPolicyQuery.cpp b/dbms/Parsers/ParserCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateRowPolicyQuery.cpp rename to dbms/Parsers/ParserCreateRowPolicyQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateRowPolicyQuery.h b/dbms/Parsers/ParserCreateRowPolicyQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateRowPolicyQuery.h rename to dbms/Parsers/ParserCreateRowPolicyQuery.h diff --git a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/dbms/Parsers/ParserCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateSettingsProfileQuery.cpp rename to dbms/Parsers/ParserCreateSettingsProfileQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateSettingsProfileQuery.h b/dbms/Parsers/ParserCreateSettingsProfileQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateSettingsProfileQuery.h rename to dbms/Parsers/ParserCreateSettingsProfileQuery.h diff --git a/dbms/src/Parsers/ParserCreateUserQuery.cpp b/dbms/Parsers/ParserCreateUserQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserCreateUserQuery.cpp rename to dbms/Parsers/ParserCreateUserQuery.cpp diff --git a/dbms/src/Parsers/ParserCreateUserQuery.h b/dbms/Parsers/ParserCreateUserQuery.h similarity index 100% rename from dbms/src/Parsers/ParserCreateUserQuery.h rename to dbms/Parsers/ParserCreateUserQuery.h diff --git a/dbms/src/Parsers/ParserDescribeTableQuery.cpp b/dbms/Parsers/ParserDescribeTableQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserDescribeTableQuery.cpp rename to dbms/Parsers/ParserDescribeTableQuery.cpp diff --git a/dbms/src/Parsers/ParserDescribeTableQuery.h b/dbms/Parsers/ParserDescribeTableQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDescribeTableQuery.h rename to dbms/Parsers/ParserDescribeTableQuery.h diff --git a/dbms/src/Parsers/ParserDictionary.cpp b/dbms/Parsers/ParserDictionary.cpp similarity index 100% rename from dbms/src/Parsers/ParserDictionary.cpp rename to dbms/Parsers/ParserDictionary.cpp diff --git a/dbms/src/Parsers/ParserDictionary.h b/dbms/Parsers/ParserDictionary.h similarity index 100% rename from dbms/src/Parsers/ParserDictionary.h rename to dbms/Parsers/ParserDictionary.h diff --git a/dbms/src/Parsers/ParserDictionaryAttributeDeclaration.cpp b/dbms/Parsers/ParserDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/src/Parsers/ParserDictionaryAttributeDeclaration.cpp rename to dbms/Parsers/ParserDictionaryAttributeDeclaration.cpp diff --git a/dbms/src/Parsers/ParserDictionaryAttributeDeclaration.h b/dbms/Parsers/ParserDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/src/Parsers/ParserDictionaryAttributeDeclaration.h rename to dbms/Parsers/ParserDictionaryAttributeDeclaration.h diff --git a/dbms/src/Parsers/ParserDropAccessEntityQuery.cpp b/dbms/Parsers/ParserDropAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserDropAccessEntityQuery.cpp rename to dbms/Parsers/ParserDropAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ParserDropAccessEntityQuery.h b/dbms/Parsers/ParserDropAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDropAccessEntityQuery.h rename to dbms/Parsers/ParserDropAccessEntityQuery.h diff --git a/dbms/src/Parsers/ParserDropQuery.cpp b/dbms/Parsers/ParserDropQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserDropQuery.cpp rename to dbms/Parsers/ParserDropQuery.cpp diff --git a/dbms/src/Parsers/ParserDropQuery.h b/dbms/Parsers/ParserDropQuery.h similarity index 100% rename from dbms/src/Parsers/ParserDropQuery.h rename to dbms/Parsers/ParserDropQuery.h diff --git a/dbms/src/Parsers/ParserExtendedRoleSet.cpp b/dbms/Parsers/ParserExtendedRoleSet.cpp similarity index 100% rename from dbms/src/Parsers/ParserExtendedRoleSet.cpp rename to dbms/Parsers/ParserExtendedRoleSet.cpp diff --git a/dbms/src/Parsers/ParserExtendedRoleSet.h b/dbms/Parsers/ParserExtendedRoleSet.h similarity index 100% rename from dbms/src/Parsers/ParserExtendedRoleSet.h rename to dbms/Parsers/ParserExtendedRoleSet.h diff --git a/dbms/src/Parsers/ParserGrantQuery.cpp b/dbms/Parsers/ParserGrantQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserGrantQuery.cpp rename to dbms/Parsers/ParserGrantQuery.cpp diff --git a/dbms/src/Parsers/ParserGrantQuery.h b/dbms/Parsers/ParserGrantQuery.h similarity index 100% rename from dbms/src/Parsers/ParserGrantQuery.h rename to dbms/Parsers/ParserGrantQuery.h diff --git a/dbms/src/Parsers/ParserInsertQuery.cpp b/dbms/Parsers/ParserInsertQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserInsertQuery.cpp rename to dbms/Parsers/ParserInsertQuery.cpp diff --git a/dbms/src/Parsers/ParserInsertQuery.h b/dbms/Parsers/ParserInsertQuery.h similarity index 100% rename from dbms/src/Parsers/ParserInsertQuery.h rename to dbms/Parsers/ParserInsertQuery.h diff --git a/dbms/src/Parsers/ParserKillQueryQuery.cpp b/dbms/Parsers/ParserKillQueryQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserKillQueryQuery.cpp rename to dbms/Parsers/ParserKillQueryQuery.cpp diff --git a/dbms/src/Parsers/ParserKillQueryQuery.h b/dbms/Parsers/ParserKillQueryQuery.h similarity index 100% rename from dbms/src/Parsers/ParserKillQueryQuery.h rename to dbms/Parsers/ParserKillQueryQuery.h diff --git a/dbms/src/Parsers/ParserOptimizeQuery.cpp b/dbms/Parsers/ParserOptimizeQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserOptimizeQuery.cpp rename to dbms/Parsers/ParserOptimizeQuery.cpp diff --git a/dbms/src/Parsers/ParserOptimizeQuery.h b/dbms/Parsers/ParserOptimizeQuery.h similarity index 100% rename from dbms/src/Parsers/ParserOptimizeQuery.h rename to dbms/Parsers/ParserOptimizeQuery.h diff --git a/dbms/src/Parsers/ParserPartition.cpp b/dbms/Parsers/ParserPartition.cpp similarity index 100% rename from dbms/src/Parsers/ParserPartition.cpp rename to dbms/Parsers/ParserPartition.cpp diff --git a/dbms/src/Parsers/ParserPartition.h b/dbms/Parsers/ParserPartition.h similarity index 100% rename from dbms/src/Parsers/ParserPartition.h rename to dbms/Parsers/ParserPartition.h diff --git a/dbms/src/Parsers/ParserQuery.cpp b/dbms/Parsers/ParserQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserQuery.cpp rename to dbms/Parsers/ParserQuery.cpp diff --git a/dbms/src/Parsers/ParserQuery.h b/dbms/Parsers/ParserQuery.h similarity index 100% rename from dbms/src/Parsers/ParserQuery.h rename to dbms/Parsers/ParserQuery.h diff --git a/dbms/src/Parsers/ParserQueryWithOutput.cpp b/dbms/Parsers/ParserQueryWithOutput.cpp similarity index 100% rename from dbms/src/Parsers/ParserQueryWithOutput.cpp rename to dbms/Parsers/ParserQueryWithOutput.cpp diff --git a/dbms/src/Parsers/ParserQueryWithOutput.h b/dbms/Parsers/ParserQueryWithOutput.h similarity index 100% rename from dbms/src/Parsers/ParserQueryWithOutput.h rename to dbms/Parsers/ParserQueryWithOutput.h diff --git a/dbms/src/Parsers/ParserRenameQuery.cpp b/dbms/Parsers/ParserRenameQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserRenameQuery.cpp rename to dbms/Parsers/ParserRenameQuery.cpp diff --git a/dbms/src/Parsers/ParserRenameQuery.h b/dbms/Parsers/ParserRenameQuery.h similarity index 100% rename from dbms/src/Parsers/ParserRenameQuery.h rename to dbms/Parsers/ParserRenameQuery.h diff --git a/dbms/src/Parsers/ParserSampleRatio.cpp b/dbms/Parsers/ParserSampleRatio.cpp similarity index 100% rename from dbms/src/Parsers/ParserSampleRatio.cpp rename to dbms/Parsers/ParserSampleRatio.cpp diff --git a/dbms/src/Parsers/ParserSampleRatio.h b/dbms/Parsers/ParserSampleRatio.h similarity index 100% rename from dbms/src/Parsers/ParserSampleRatio.h rename to dbms/Parsers/ParserSampleRatio.h diff --git a/dbms/src/Parsers/ParserSelectQuery.cpp b/dbms/Parsers/ParserSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSelectQuery.cpp rename to dbms/Parsers/ParserSelectQuery.cpp diff --git a/dbms/src/Parsers/ParserSelectQuery.h b/dbms/Parsers/ParserSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSelectQuery.h rename to dbms/Parsers/ParserSelectQuery.h diff --git a/dbms/src/Parsers/ParserSelectWithUnionQuery.cpp b/dbms/Parsers/ParserSelectWithUnionQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSelectWithUnionQuery.cpp rename to dbms/Parsers/ParserSelectWithUnionQuery.cpp diff --git a/dbms/src/Parsers/ParserSelectWithUnionQuery.h b/dbms/Parsers/ParserSelectWithUnionQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSelectWithUnionQuery.h rename to dbms/Parsers/ParserSelectWithUnionQuery.h diff --git a/dbms/src/Parsers/ParserSetQuery.cpp b/dbms/Parsers/ParserSetQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSetQuery.cpp rename to dbms/Parsers/ParserSetQuery.cpp diff --git a/dbms/src/Parsers/ParserSetQuery.h b/dbms/Parsers/ParserSetQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSetQuery.h rename to dbms/Parsers/ParserSetQuery.h diff --git a/dbms/src/Parsers/ParserSetRoleQuery.cpp b/dbms/Parsers/ParserSetRoleQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSetRoleQuery.cpp rename to dbms/Parsers/ParserSetRoleQuery.cpp diff --git a/dbms/src/Parsers/ParserSetRoleQuery.h b/dbms/Parsers/ParserSetRoleQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSetRoleQuery.h rename to dbms/Parsers/ParserSetRoleQuery.h diff --git a/dbms/src/Parsers/ParserSettingsProfileElement.cpp b/dbms/Parsers/ParserSettingsProfileElement.cpp similarity index 100% rename from dbms/src/Parsers/ParserSettingsProfileElement.cpp rename to dbms/Parsers/ParserSettingsProfileElement.cpp diff --git a/dbms/src/Parsers/ParserSettingsProfileElement.h b/dbms/Parsers/ParserSettingsProfileElement.h similarity index 100% rename from dbms/src/Parsers/ParserSettingsProfileElement.h rename to dbms/Parsers/ParserSettingsProfileElement.h diff --git a/dbms/src/Parsers/ParserShowCreateAccessEntityQuery.cpp b/dbms/Parsers/ParserShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowCreateAccessEntityQuery.cpp rename to dbms/Parsers/ParserShowCreateAccessEntityQuery.cpp diff --git a/dbms/src/Parsers/ParserShowCreateAccessEntityQuery.h b/dbms/Parsers/ParserShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowCreateAccessEntityQuery.h rename to dbms/Parsers/ParserShowCreateAccessEntityQuery.h diff --git a/dbms/src/Parsers/ParserShowGrantsQuery.cpp b/dbms/Parsers/ParserShowGrantsQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowGrantsQuery.cpp rename to dbms/Parsers/ParserShowGrantsQuery.cpp diff --git a/dbms/src/Parsers/ParserShowGrantsQuery.h b/dbms/Parsers/ParserShowGrantsQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowGrantsQuery.h rename to dbms/Parsers/ParserShowGrantsQuery.h diff --git a/dbms/src/Parsers/ParserShowProcesslistQuery.h b/dbms/Parsers/ParserShowProcesslistQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowProcesslistQuery.h rename to dbms/Parsers/ParserShowProcesslistQuery.h diff --git a/dbms/src/Parsers/ParserShowQuotasQuery.cpp b/dbms/Parsers/ParserShowQuotasQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowQuotasQuery.cpp rename to dbms/Parsers/ParserShowQuotasQuery.cpp diff --git a/dbms/src/Parsers/ParserShowQuotasQuery.h b/dbms/Parsers/ParserShowQuotasQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowQuotasQuery.h rename to dbms/Parsers/ParserShowQuotasQuery.h diff --git a/dbms/src/Parsers/ParserShowRowPoliciesQuery.cpp b/dbms/Parsers/ParserShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowRowPoliciesQuery.cpp rename to dbms/Parsers/ParserShowRowPoliciesQuery.cpp diff --git a/dbms/src/Parsers/ParserShowRowPoliciesQuery.h b/dbms/Parsers/ParserShowRowPoliciesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowRowPoliciesQuery.h rename to dbms/Parsers/ParserShowRowPoliciesQuery.h diff --git a/dbms/src/Parsers/ParserShowTablesQuery.cpp b/dbms/Parsers/ParserShowTablesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserShowTablesQuery.cpp rename to dbms/Parsers/ParserShowTablesQuery.cpp diff --git a/dbms/src/Parsers/ParserShowTablesQuery.h b/dbms/Parsers/ParserShowTablesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserShowTablesQuery.h rename to dbms/Parsers/ParserShowTablesQuery.h diff --git a/dbms/src/Parsers/ParserSystemQuery.cpp b/dbms/Parsers/ParserSystemQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserSystemQuery.cpp rename to dbms/Parsers/ParserSystemQuery.cpp diff --git a/dbms/src/Parsers/ParserSystemQuery.h b/dbms/Parsers/ParserSystemQuery.h similarity index 100% rename from dbms/src/Parsers/ParserSystemQuery.h rename to dbms/Parsers/ParserSystemQuery.h diff --git a/dbms/src/Parsers/ParserTablePropertiesQuery.cpp b/dbms/Parsers/ParserTablePropertiesQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserTablePropertiesQuery.cpp rename to dbms/Parsers/ParserTablePropertiesQuery.cpp diff --git a/dbms/src/Parsers/ParserTablePropertiesQuery.h b/dbms/Parsers/ParserTablePropertiesQuery.h similarity index 100% rename from dbms/src/Parsers/ParserTablePropertiesQuery.h rename to dbms/Parsers/ParserTablePropertiesQuery.h diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.cpp b/dbms/Parsers/ParserTablesInSelectQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserTablesInSelectQuery.cpp rename to dbms/Parsers/ParserTablesInSelectQuery.cpp diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.h b/dbms/Parsers/ParserTablesInSelectQuery.h similarity index 100% rename from dbms/src/Parsers/ParserTablesInSelectQuery.h rename to dbms/Parsers/ParserTablesInSelectQuery.h diff --git a/dbms/src/Parsers/ParserUnionQueryElement.cpp b/dbms/Parsers/ParserUnionQueryElement.cpp similarity index 100% rename from dbms/src/Parsers/ParserUnionQueryElement.cpp rename to dbms/Parsers/ParserUnionQueryElement.cpp diff --git a/dbms/src/Parsers/ParserUnionQueryElement.h b/dbms/Parsers/ParserUnionQueryElement.h similarity index 100% rename from dbms/src/Parsers/ParserUnionQueryElement.h rename to dbms/Parsers/ParserUnionQueryElement.h diff --git a/dbms/src/Parsers/ParserUseQuery.cpp b/dbms/Parsers/ParserUseQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserUseQuery.cpp rename to dbms/Parsers/ParserUseQuery.cpp diff --git a/dbms/src/Parsers/ParserUseQuery.h b/dbms/Parsers/ParserUseQuery.h similarity index 100% rename from dbms/src/Parsers/ParserUseQuery.h rename to dbms/Parsers/ParserUseQuery.h diff --git a/dbms/src/Parsers/ParserWatchQuery.cpp b/dbms/Parsers/ParserWatchQuery.cpp similarity index 100% rename from dbms/src/Parsers/ParserWatchQuery.cpp rename to dbms/Parsers/ParserWatchQuery.cpp diff --git a/dbms/src/Parsers/ParserWatchQuery.h b/dbms/Parsers/ParserWatchQuery.h similarity index 100% rename from dbms/src/Parsers/ParserWatchQuery.h rename to dbms/Parsers/ParserWatchQuery.h diff --git a/dbms/src/Parsers/StringRange.h b/dbms/Parsers/StringRange.h similarity index 100% rename from dbms/src/Parsers/StringRange.h rename to dbms/Parsers/StringRange.h diff --git a/dbms/src/Parsers/TablePropertiesQueriesASTs.h b/dbms/Parsers/TablePropertiesQueriesASTs.h similarity index 100% rename from dbms/src/Parsers/TablePropertiesQueriesASTs.h rename to dbms/Parsers/TablePropertiesQueriesASTs.h diff --git a/dbms/src/Parsers/TokenIterator.cpp b/dbms/Parsers/TokenIterator.cpp similarity index 100% rename from dbms/src/Parsers/TokenIterator.cpp rename to dbms/Parsers/TokenIterator.cpp diff --git a/dbms/src/Parsers/TokenIterator.h b/dbms/Parsers/TokenIterator.h similarity index 100% rename from dbms/src/Parsers/TokenIterator.h rename to dbms/Parsers/TokenIterator.h diff --git a/dbms/src/Parsers/formatAST.cpp b/dbms/Parsers/formatAST.cpp similarity index 100% rename from dbms/src/Parsers/formatAST.cpp rename to dbms/Parsers/formatAST.cpp diff --git a/dbms/src/Parsers/formatAST.h b/dbms/Parsers/formatAST.h similarity index 100% rename from dbms/src/Parsers/formatAST.h rename to dbms/Parsers/formatAST.h diff --git a/dbms/src/Parsers/iostream_debug_helpers.cpp b/dbms/Parsers/iostream_debug_helpers.cpp similarity index 100% rename from dbms/src/Parsers/iostream_debug_helpers.cpp rename to dbms/Parsers/iostream_debug_helpers.cpp diff --git a/dbms/src/Parsers/iostream_debug_helpers.h b/dbms/Parsers/iostream_debug_helpers.h similarity index 100% rename from dbms/src/Parsers/iostream_debug_helpers.h rename to dbms/Parsers/iostream_debug_helpers.h diff --git a/dbms/src/Parsers/parseDatabaseAndTableName.cpp b/dbms/Parsers/parseDatabaseAndTableName.cpp similarity index 100% rename from dbms/src/Parsers/parseDatabaseAndTableName.cpp rename to dbms/Parsers/parseDatabaseAndTableName.cpp diff --git a/dbms/src/Parsers/parseDatabaseAndTableName.h b/dbms/Parsers/parseDatabaseAndTableName.h similarity index 100% rename from dbms/src/Parsers/parseDatabaseAndTableName.h rename to dbms/Parsers/parseDatabaseAndTableName.h diff --git a/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp b/dbms/Parsers/parseIdentifierOrStringLiteral.cpp similarity index 100% rename from dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp rename to dbms/Parsers/parseIdentifierOrStringLiteral.cpp diff --git a/dbms/src/Parsers/parseIdentifierOrStringLiteral.h b/dbms/Parsers/parseIdentifierOrStringLiteral.h similarity index 100% rename from dbms/src/Parsers/parseIdentifierOrStringLiteral.h rename to dbms/Parsers/parseIdentifierOrStringLiteral.h diff --git a/dbms/src/Parsers/parseIntervalKind.cpp b/dbms/Parsers/parseIntervalKind.cpp similarity index 100% rename from dbms/src/Parsers/parseIntervalKind.cpp rename to dbms/Parsers/parseIntervalKind.cpp diff --git a/dbms/src/Parsers/parseIntervalKind.h b/dbms/Parsers/parseIntervalKind.h similarity index 100% rename from dbms/src/Parsers/parseIntervalKind.h rename to dbms/Parsers/parseIntervalKind.h diff --git a/dbms/src/Parsers/parseQuery.cpp b/dbms/Parsers/parseQuery.cpp similarity index 100% rename from dbms/src/Parsers/parseQuery.cpp rename to dbms/Parsers/parseQuery.cpp diff --git a/dbms/src/Parsers/parseQuery.h b/dbms/Parsers/parseQuery.h similarity index 100% rename from dbms/src/Parsers/parseQuery.h rename to dbms/Parsers/parseQuery.h diff --git a/dbms/src/Parsers/parseUserName.cpp b/dbms/Parsers/parseUserName.cpp similarity index 100% rename from dbms/src/Parsers/parseUserName.cpp rename to dbms/Parsers/parseUserName.cpp diff --git a/dbms/src/Parsers/parseUserName.h b/dbms/Parsers/parseUserName.h similarity index 100% rename from dbms/src/Parsers/parseUserName.h rename to dbms/Parsers/parseUserName.h diff --git a/dbms/src/Parsers/queryToString.cpp b/dbms/Parsers/queryToString.cpp similarity index 100% rename from dbms/src/Parsers/queryToString.cpp rename to dbms/Parsers/queryToString.cpp diff --git a/dbms/src/Parsers/queryToString.h b/dbms/Parsers/queryToString.h similarity index 100% rename from dbms/src/Parsers/queryToString.h rename to dbms/Parsers/queryToString.h diff --git a/dbms/src/Parsers/tests/CMakeLists.txt b/dbms/Parsers/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Parsers/tests/CMakeLists.txt rename to dbms/Parsers/tests/CMakeLists.txt diff --git a/dbms/src/Parsers/tests/create_parser.cpp b/dbms/Parsers/tests/create_parser.cpp similarity index 100% rename from dbms/src/Parsers/tests/create_parser.cpp rename to dbms/Parsers/tests/create_parser.cpp diff --git a/dbms/src/Parsers/tests/gtest_dictionary_parser.cpp b/dbms/Parsers/tests/gtest_dictionary_parser.cpp similarity index 100% rename from dbms/src/Parsers/tests/gtest_dictionary_parser.cpp rename to dbms/Parsers/tests/gtest_dictionary_parser.cpp diff --git a/dbms/src/Parsers/tests/lexer.cpp b/dbms/Parsers/tests/lexer.cpp similarity index 96% rename from dbms/src/Parsers/tests/lexer.cpp rename to dbms/Parsers/tests/lexer.cpp index d9135b08c28..074338d15b9 100644 --- a/dbms/src/Parsers/tests/lexer.cpp +++ b/dbms/Parsers/tests/lexer.cpp @@ -9,7 +9,7 @@ /// How to test: -/// for i in ~/work/ClickHouse/dbms/tests/queries/0_stateless/*.sql; do echo $i; grep -q 'FORMAT' $i || ./lexer < $i || break; done +/// for i in ~/work/ClickHouse/tests/queries/0_stateless/*.sql; do echo $i; grep -q 'FORMAT' $i || ./lexer < $i || break; done /// diff --git a/dbms/src/Parsers/tests/select_parser.cpp b/dbms/Parsers/tests/select_parser.cpp similarity index 100% rename from dbms/src/Parsers/tests/select_parser.cpp rename to dbms/Parsers/tests/select_parser.cpp diff --git a/dbms/src/Processors/CMakeLists.txt b/dbms/Processors/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/CMakeLists.txt rename to dbms/Processors/CMakeLists.txt diff --git a/dbms/src/Processors/Chunk.cpp b/dbms/Processors/Chunk.cpp similarity index 100% rename from dbms/src/Processors/Chunk.cpp rename to dbms/Processors/Chunk.cpp diff --git a/dbms/src/Processors/Chunk.h b/dbms/Processors/Chunk.h similarity index 100% rename from dbms/src/Processors/Chunk.h rename to dbms/Processors/Chunk.h diff --git a/dbms/src/Processors/ConcatProcessor.cpp b/dbms/Processors/ConcatProcessor.cpp similarity index 100% rename from dbms/src/Processors/ConcatProcessor.cpp rename to dbms/Processors/ConcatProcessor.cpp diff --git a/dbms/src/Processors/ConcatProcessor.h b/dbms/Processors/ConcatProcessor.h similarity index 100% rename from dbms/src/Processors/ConcatProcessor.h rename to dbms/Processors/ConcatProcessor.h diff --git a/dbms/src/Processors/DelayedPortsProcessor.cpp b/dbms/Processors/DelayedPortsProcessor.cpp similarity index 100% rename from dbms/src/Processors/DelayedPortsProcessor.cpp rename to dbms/Processors/DelayedPortsProcessor.cpp diff --git a/dbms/src/Processors/DelayedPortsProcessor.h b/dbms/Processors/DelayedPortsProcessor.h similarity index 100% rename from dbms/src/Processors/DelayedPortsProcessor.h rename to dbms/Processors/DelayedPortsProcessor.h diff --git a/dbms/src/Processors/Executors/ParallelPipelineExecutor.cpp b/dbms/Processors/Executors/ParallelPipelineExecutor.cpp similarity index 100% rename from dbms/src/Processors/Executors/ParallelPipelineExecutor.cpp rename to dbms/Processors/Executors/ParallelPipelineExecutor.cpp diff --git a/dbms/src/Processors/Executors/ParallelPipelineExecutor.h b/dbms/Processors/Executors/ParallelPipelineExecutor.h similarity index 100% rename from dbms/src/Processors/Executors/ParallelPipelineExecutor.h rename to dbms/Processors/Executors/ParallelPipelineExecutor.h diff --git a/dbms/src/Processors/Executors/PipelineExecutor.cpp b/dbms/Processors/Executors/PipelineExecutor.cpp similarity index 100% rename from dbms/src/Processors/Executors/PipelineExecutor.cpp rename to dbms/Processors/Executors/PipelineExecutor.cpp diff --git a/dbms/src/Processors/Executors/PipelineExecutor.h b/dbms/Processors/Executors/PipelineExecutor.h similarity index 100% rename from dbms/src/Processors/Executors/PipelineExecutor.h rename to dbms/Processors/Executors/PipelineExecutor.h diff --git a/dbms/src/Processors/Executors/SequentialPipelineExecutor.cpp b/dbms/Processors/Executors/SequentialPipelineExecutor.cpp similarity index 100% rename from dbms/src/Processors/Executors/SequentialPipelineExecutor.cpp rename to dbms/Processors/Executors/SequentialPipelineExecutor.cpp diff --git a/dbms/src/Processors/Executors/SequentialPipelineExecutor.h b/dbms/Processors/Executors/SequentialPipelineExecutor.h similarity index 100% rename from dbms/src/Processors/Executors/SequentialPipelineExecutor.h rename to dbms/Processors/Executors/SequentialPipelineExecutor.h diff --git a/dbms/src/Processors/Executors/ThreadsQueue.h b/dbms/Processors/Executors/ThreadsQueue.h similarity index 100% rename from dbms/src/Processors/Executors/ThreadsQueue.h rename to dbms/Processors/Executors/ThreadsQueue.h diff --git a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.cpp b/dbms/Processors/Executors/TreeExecutorBlockInputStream.cpp similarity index 100% rename from dbms/src/Processors/Executors/TreeExecutorBlockInputStream.cpp rename to dbms/Processors/Executors/TreeExecutorBlockInputStream.cpp diff --git a/dbms/src/Processors/Executors/TreeExecutorBlockInputStream.h b/dbms/Processors/Executors/TreeExecutorBlockInputStream.h similarity index 100% rename from dbms/src/Processors/Executors/TreeExecutorBlockInputStream.h rename to dbms/Processors/Executors/TreeExecutorBlockInputStream.h diff --git a/dbms/src/Processors/Executors/traverse.h b/dbms/Processors/Executors/traverse.h similarity index 100% rename from dbms/src/Processors/Executors/traverse.h rename to dbms/Processors/Executors/traverse.h diff --git a/dbms/src/Processors/ForkProcessor.cpp b/dbms/Processors/ForkProcessor.cpp similarity index 100% rename from dbms/src/Processors/ForkProcessor.cpp rename to dbms/Processors/ForkProcessor.cpp diff --git a/dbms/src/Processors/ForkProcessor.h b/dbms/Processors/ForkProcessor.h similarity index 100% rename from dbms/src/Processors/ForkProcessor.h rename to dbms/Processors/ForkProcessor.h diff --git a/dbms/src/Processors/Formats/IInputFormat.cpp b/dbms/Processors/Formats/IInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IInputFormat.cpp rename to dbms/Processors/Formats/IInputFormat.cpp diff --git a/dbms/src/Processors/Formats/IInputFormat.h b/dbms/Processors/Formats/IInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IInputFormat.h rename to dbms/Processors/Formats/IInputFormat.h diff --git a/dbms/src/Processors/Formats/IOutputFormat.cpp b/dbms/Processors/Formats/IOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IOutputFormat.cpp rename to dbms/Processors/Formats/IOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/IOutputFormat.h b/dbms/Processors/Formats/IOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IOutputFormat.h rename to dbms/Processors/Formats/IOutputFormat.h diff --git a/dbms/src/Processors/Formats/IRowInputFormat.cpp b/dbms/Processors/Formats/IRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IRowInputFormat.cpp rename to dbms/Processors/Formats/IRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/IRowInputFormat.h b/dbms/Processors/Formats/IRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IRowInputFormat.h rename to dbms/Processors/Formats/IRowInputFormat.h diff --git a/dbms/src/Processors/Formats/IRowOutputFormat.cpp b/dbms/Processors/Formats/IRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/IRowOutputFormat.cpp rename to dbms/Processors/Formats/IRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/IRowOutputFormat.h b/dbms/Processors/Formats/IRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/IRowOutputFormat.h rename to dbms/Processors/Formats/IRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp rename to dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp diff --git a/dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h b/dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h rename to dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.h diff --git a/dbms/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/dbms/Processors/Formats/Impl/AvroRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/AvroRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/AvroRowInputFormat.h b/dbms/Processors/Formats/Impl/AvroRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowInputFormat.h rename to dbms/Processors/Formats/Impl/AvroRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/AvroRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/AvroRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.h b/dbms/Processors/Formats/Impl/AvroRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/AvroRowOutputFormat.h rename to dbms/Processors/Formats/Impl/AvroRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp b/dbms/Processors/Formats/Impl/BinaryRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/BinaryRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h b/dbms/Processors/Formats/Impl/BinaryRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h rename to dbms/Processors/Formats/Impl/BinaryRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/BinaryRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/BinaryRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.h b/dbms/Processors/Formats/Impl/BinaryRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/BinaryRowOutputFormat.h rename to dbms/Processors/Formats/Impl/BinaryRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CMakeLists.txt b/dbms/Processors/Formats/Impl/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/Formats/Impl/CMakeLists.txt rename to dbms/Processors/Formats/Impl/CMakeLists.txt diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/dbms/Processors/Formats/Impl/CSVRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/CSVRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h b/dbms/Processors/Formats/Impl/CSVRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h rename to dbms/Processors/Formats/Impl/CSVRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/CSVRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/CSVRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h b/dbms/Processors/Formats/Impl/CSVRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h rename to dbms/Processors/Formats/Impl/CSVRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h rename to dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/dbms/Processors/Formats/Impl/ConstantExpressionTemplate.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp rename to dbms/Processors/Formats/Impl/ConstantExpressionTemplate.cpp diff --git a/dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.h b/dbms/Processors/Formats/Impl/ConstantExpressionTemplate.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ConstantExpressionTemplate.h rename to dbms/Processors/Formats/Impl/ConstantExpressionTemplate.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp b/dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h b/dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h rename to dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h b/dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h rename to dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h b/dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h rename to dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h rename to dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h rename to dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h b/dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h rename to dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/JSONRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/JSONRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.h b/dbms/Processors/Formats/Impl/JSONRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/JSONRowOutputFormat.h rename to dbms/Processors/Formats/Impl/JSONRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/dbms/Processors/Formats/Impl/MySQLOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp rename to dbms/Processors/Formats/Impl/MySQLOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h b/dbms/Processors/Formats/Impl/MySQLOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h rename to dbms/Processors/Formats/Impl/MySQLOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/NativeFormat.cpp b/dbms/Processors/Formats/Impl/NativeFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/NativeFormat.cpp rename to dbms/Processors/Formats/Impl/NativeFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/NullFormat.cpp b/dbms/Processors/Formats/Impl/NullFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/NullFormat.cpp rename to dbms/Processors/Formats/Impl/NullFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h rename to dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h b/dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/dbms/Processors/Formats/Impl/ORCBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp rename to dbms/Processors/Formats/Impl/ORCBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.h b/dbms/Processors/Formats/Impl/ORCBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ORCBlockInputFormat.h rename to dbms/Processors/Formats/Impl/ORCBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/dbms/Processors/Formats/Impl/ParquetBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp rename to dbms/Processors/Formats/Impl/ParquetBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/dbms/Processors/Formats/Impl/ParquetBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h rename to dbms/Processors/Formats/Impl/ParquetBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h b/dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h b/dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/dbms/Processors/Formats/Impl/ProtobufRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/ProtobufRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h b/dbms/Processors/Formats/Impl/ProtobufRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h rename to dbms/Processors/Formats/Impl/ProtobufRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h b/dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h rename to dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/dbms/Processors/Formats/Impl/RegexpRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/RegexpRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.h b/dbms/Processors/Formats/Impl/RegexpRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/RegexpRowInputFormat.h rename to dbms/Processors/Formats/Impl/RegexpRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/dbms/Processors/Formats/Impl/TSKVRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/TSKVRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h b/dbms/Processors/Formats/Impl/TSKVRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h rename to dbms/Processors/Formats/Impl/TSKVRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/TSKVRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/TSKVRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.h b/dbms/Processors/Formats/Impl/TSKVRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TSKVRowOutputFormat.h rename to dbms/Processors/Formats/Impl/TSKVRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h b/dbms/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h rename to dbms/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h rename to dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h rename to dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp rename to dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h b/dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h rename to dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/dbms/Processors/Formats/Impl/TemplateRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp rename to dbms/Processors/Formats/Impl/TemplateRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.h b/dbms/Processors/Formats/Impl/TemplateRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/TemplateRowInputFormat.h rename to dbms/Processors/Formats/Impl/TemplateRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/dbms/Processors/Formats/Impl/ValuesBlockInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp rename to dbms/Processors/Formats/Impl/ValuesBlockInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.h b/dbms/Processors/Formats/Impl/ValuesBlockInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesBlockInputFormat.h rename to dbms/Processors/Formats/Impl/ValuesBlockInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/ValuesRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/ValuesRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h b/dbms/Processors/Formats/Impl/ValuesRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h rename to dbms/Processors/Formats/Impl/ValuesRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/VerticalRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/VerticalRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/dbms/Processors/Formats/Impl/VerticalRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h rename to dbms/Processors/Formats/Impl/VerticalRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/dbms/Processors/Formats/Impl/XMLRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp rename to dbms/Processors/Formats/Impl/XMLRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/dbms/Processors/Formats/Impl/XMLRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h rename to dbms/Processors/Formats/Impl/XMLRowOutputFormat.h diff --git a/dbms/src/Processors/Formats/InputStreamFromInputFormat.h b/dbms/Processors/Formats/InputStreamFromInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/InputStreamFromInputFormat.h rename to dbms/Processors/Formats/InputStreamFromInputFormat.h diff --git a/dbms/src/Processors/Formats/LazyOutputFormat.cpp b/dbms/Processors/Formats/LazyOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/LazyOutputFormat.cpp rename to dbms/Processors/Formats/LazyOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/LazyOutputFormat.h b/dbms/Processors/Formats/LazyOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/LazyOutputFormat.h rename to dbms/Processors/Formats/LazyOutputFormat.h diff --git a/dbms/src/Processors/Formats/OutputStreamToOutputFormat.cpp b/dbms/Processors/Formats/OutputStreamToOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/OutputStreamToOutputFormat.cpp rename to dbms/Processors/Formats/OutputStreamToOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/OutputStreamToOutputFormat.h b/dbms/Processors/Formats/OutputStreamToOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/OutputStreamToOutputFormat.h rename to dbms/Processors/Formats/OutputStreamToOutputFormat.h diff --git a/dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp b/dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp similarity index 100% rename from dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp rename to dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp diff --git a/dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h b/dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.h similarity index 100% rename from dbms/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h rename to dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.h diff --git a/dbms/src/Processors/IAccumulatingTransform.cpp b/dbms/Processors/IAccumulatingTransform.cpp similarity index 100% rename from dbms/src/Processors/IAccumulatingTransform.cpp rename to dbms/Processors/IAccumulatingTransform.cpp diff --git a/dbms/src/Processors/IAccumulatingTransform.h b/dbms/Processors/IAccumulatingTransform.h similarity index 100% rename from dbms/src/Processors/IAccumulatingTransform.h rename to dbms/Processors/IAccumulatingTransform.h diff --git a/dbms/src/Processors/IInflatingTransform.cpp b/dbms/Processors/IInflatingTransform.cpp similarity index 100% rename from dbms/src/Processors/IInflatingTransform.cpp rename to dbms/Processors/IInflatingTransform.cpp diff --git a/dbms/src/Processors/IInflatingTransform.h b/dbms/Processors/IInflatingTransform.h similarity index 100% rename from dbms/src/Processors/IInflatingTransform.h rename to dbms/Processors/IInflatingTransform.h diff --git a/dbms/src/Processors/IProcessor.cpp b/dbms/Processors/IProcessor.cpp similarity index 100% rename from dbms/src/Processors/IProcessor.cpp rename to dbms/Processors/IProcessor.cpp diff --git a/dbms/src/Processors/IProcessor.h b/dbms/Processors/IProcessor.h similarity index 100% rename from dbms/src/Processors/IProcessor.h rename to dbms/Processors/IProcessor.h diff --git a/dbms/src/Processors/ISimpleTransform.cpp b/dbms/Processors/ISimpleTransform.cpp similarity index 100% rename from dbms/src/Processors/ISimpleTransform.cpp rename to dbms/Processors/ISimpleTransform.cpp diff --git a/dbms/src/Processors/ISimpleTransform.h b/dbms/Processors/ISimpleTransform.h similarity index 100% rename from dbms/src/Processors/ISimpleTransform.h rename to dbms/Processors/ISimpleTransform.h diff --git a/dbms/src/Processors/ISink.cpp b/dbms/Processors/ISink.cpp similarity index 100% rename from dbms/src/Processors/ISink.cpp rename to dbms/Processors/ISink.cpp diff --git a/dbms/src/Processors/ISink.h b/dbms/Processors/ISink.h similarity index 100% rename from dbms/src/Processors/ISink.h rename to dbms/Processors/ISink.h diff --git a/dbms/src/Processors/ISource.cpp b/dbms/Processors/ISource.cpp similarity index 100% rename from dbms/src/Processors/ISource.cpp rename to dbms/Processors/ISource.cpp diff --git a/dbms/src/Processors/ISource.h b/dbms/Processors/ISource.h similarity index 100% rename from dbms/src/Processors/ISource.h rename to dbms/Processors/ISource.h diff --git a/dbms/src/Processors/LimitTransform.cpp b/dbms/Processors/LimitTransform.cpp similarity index 100% rename from dbms/src/Processors/LimitTransform.cpp rename to dbms/Processors/LimitTransform.cpp diff --git a/dbms/src/Processors/LimitTransform.h b/dbms/Processors/LimitTransform.h similarity index 100% rename from dbms/src/Processors/LimitTransform.h rename to dbms/Processors/LimitTransform.h diff --git a/dbms/src/Processors/NullSink.h b/dbms/Processors/NullSink.h similarity index 100% rename from dbms/src/Processors/NullSink.h rename to dbms/Processors/NullSink.h diff --git a/dbms/src/Processors/Pipe.cpp b/dbms/Processors/Pipe.cpp similarity index 100% rename from dbms/src/Processors/Pipe.cpp rename to dbms/Processors/Pipe.cpp diff --git a/dbms/src/Processors/Pipe.h b/dbms/Processors/Pipe.h similarity index 100% rename from dbms/src/Processors/Pipe.h rename to dbms/Processors/Pipe.h diff --git a/dbms/src/Processors/Port.cpp b/dbms/Processors/Port.cpp similarity index 100% rename from dbms/src/Processors/Port.cpp rename to dbms/Processors/Port.cpp diff --git a/dbms/src/Processors/Port.h b/dbms/Processors/Port.h similarity index 100% rename from dbms/src/Processors/Port.h rename to dbms/Processors/Port.h diff --git a/dbms/src/Processors/QueryPipeline.cpp b/dbms/Processors/QueryPipeline.cpp similarity index 100% rename from dbms/src/Processors/QueryPipeline.cpp rename to dbms/Processors/QueryPipeline.cpp diff --git a/dbms/src/Processors/QueryPipeline.h b/dbms/Processors/QueryPipeline.h similarity index 100% rename from dbms/src/Processors/QueryPipeline.h rename to dbms/Processors/QueryPipeline.h diff --git a/dbms/src/Processors/QueueBuffer.h b/dbms/Processors/QueueBuffer.h similarity index 100% rename from dbms/src/Processors/QueueBuffer.h rename to dbms/Processors/QueueBuffer.h diff --git a/dbms/src/Processors/ResizeProcessor.cpp b/dbms/Processors/ResizeProcessor.cpp similarity index 100% rename from dbms/src/Processors/ResizeProcessor.cpp rename to dbms/Processors/ResizeProcessor.cpp diff --git a/dbms/src/Processors/ResizeProcessor.h b/dbms/Processors/ResizeProcessor.h similarity index 100% rename from dbms/src/Processors/ResizeProcessor.h rename to dbms/Processors/ResizeProcessor.h diff --git a/dbms/src/Processors/RowsBeforeLimitCounter.h b/dbms/Processors/RowsBeforeLimitCounter.h similarity index 100% rename from dbms/src/Processors/RowsBeforeLimitCounter.h rename to dbms/Processors/RowsBeforeLimitCounter.h diff --git a/dbms/src/Processors/Sources/NullSource.h b/dbms/Processors/Sources/NullSource.h similarity index 100% rename from dbms/src/Processors/Sources/NullSource.h rename to dbms/Processors/Sources/NullSource.h diff --git a/dbms/src/Processors/Sources/SinkToOutputStream.cpp b/dbms/Processors/Sources/SinkToOutputStream.cpp similarity index 100% rename from dbms/src/Processors/Sources/SinkToOutputStream.cpp rename to dbms/Processors/Sources/SinkToOutputStream.cpp diff --git a/dbms/src/Processors/Sources/SinkToOutputStream.h b/dbms/Processors/Sources/SinkToOutputStream.h similarity index 100% rename from dbms/src/Processors/Sources/SinkToOutputStream.h rename to dbms/Processors/Sources/SinkToOutputStream.h diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.cpp b/dbms/Processors/Sources/SourceFromInputStream.cpp similarity index 100% rename from dbms/src/Processors/Sources/SourceFromInputStream.cpp rename to dbms/Processors/Sources/SourceFromInputStream.cpp diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.h b/dbms/Processors/Sources/SourceFromInputStream.h similarity index 100% rename from dbms/src/Processors/Sources/SourceFromInputStream.h rename to dbms/Processors/Sources/SourceFromInputStream.h diff --git a/dbms/src/Processors/Sources/SourceFromSingleChunk.h b/dbms/Processors/Sources/SourceFromSingleChunk.h similarity index 100% rename from dbms/src/Processors/Sources/SourceFromSingleChunk.h rename to dbms/Processors/Sources/SourceFromSingleChunk.h diff --git a/dbms/src/Processors/Sources/SourceWithProgress.cpp b/dbms/Processors/Sources/SourceWithProgress.cpp similarity index 100% rename from dbms/src/Processors/Sources/SourceWithProgress.cpp rename to dbms/Processors/Sources/SourceWithProgress.cpp diff --git a/dbms/src/Processors/Sources/SourceWithProgress.h b/dbms/Processors/Sources/SourceWithProgress.h similarity index 100% rename from dbms/src/Processors/Sources/SourceWithProgress.h rename to dbms/Processors/Sources/SourceWithProgress.h diff --git a/dbms/src/Processors/Transforms/AddingConstColumnTransform.h b/dbms/Processors/Transforms/AddingConstColumnTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/AddingConstColumnTransform.h rename to dbms/Processors/Transforms/AddingConstColumnTransform.h diff --git a/dbms/src/Processors/Transforms/AddingMissedTransform.cpp b/dbms/Processors/Transforms/AddingMissedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/AddingMissedTransform.cpp rename to dbms/Processors/Transforms/AddingMissedTransform.cpp diff --git a/dbms/src/Processors/Transforms/AddingMissedTransform.h b/dbms/Processors/Transforms/AddingMissedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/AddingMissedTransform.h rename to dbms/Processors/Transforms/AddingMissedTransform.h diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.cpp b/dbms/Processors/Transforms/AggregatingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/AggregatingTransform.cpp rename to dbms/Processors/Transforms/AggregatingTransform.cpp diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.h b/dbms/Processors/Transforms/AggregatingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/AggregatingTransform.h rename to dbms/Processors/Transforms/AggregatingTransform.h diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.cpp b/dbms/Processors/Transforms/ConvertingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ConvertingTransform.cpp rename to dbms/Processors/Transforms/ConvertingTransform.cpp diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.h b/dbms/Processors/Transforms/ConvertingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ConvertingTransform.h rename to dbms/Processors/Transforms/ConvertingTransform.h diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp b/dbms/Processors/Transforms/CreatingSetsTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/CreatingSetsTransform.cpp rename to dbms/Processors/Transforms/CreatingSetsTransform.cpp diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.h b/dbms/Processors/Transforms/CreatingSetsTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/CreatingSetsTransform.h rename to dbms/Processors/Transforms/CreatingSetsTransform.h diff --git a/dbms/src/Processors/Transforms/CubeTransform.cpp b/dbms/Processors/Transforms/CubeTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/CubeTransform.cpp rename to dbms/Processors/Transforms/CubeTransform.cpp diff --git a/dbms/src/Processors/Transforms/CubeTransform.h b/dbms/Processors/Transforms/CubeTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/CubeTransform.h rename to dbms/Processors/Transforms/CubeTransform.h diff --git a/dbms/src/Processors/Transforms/DistinctTransform.cpp b/dbms/Processors/Transforms/DistinctTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/DistinctTransform.cpp rename to dbms/Processors/Transforms/DistinctTransform.cpp diff --git a/dbms/src/Processors/Transforms/DistinctTransform.h b/dbms/Processors/Transforms/DistinctTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/DistinctTransform.h rename to dbms/Processors/Transforms/DistinctTransform.h diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.cpp b/dbms/Processors/Transforms/ExpressionTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ExpressionTransform.cpp rename to dbms/Processors/Transforms/ExpressionTransform.cpp diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.h b/dbms/Processors/Transforms/ExpressionTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ExpressionTransform.h rename to dbms/Processors/Transforms/ExpressionTransform.h diff --git a/dbms/src/Processors/Transforms/ExtremesTransform.cpp b/dbms/Processors/Transforms/ExtremesTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ExtremesTransform.cpp rename to dbms/Processors/Transforms/ExtremesTransform.cpp diff --git a/dbms/src/Processors/Transforms/ExtremesTransform.h b/dbms/Processors/Transforms/ExtremesTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ExtremesTransform.h rename to dbms/Processors/Transforms/ExtremesTransform.h diff --git a/dbms/src/Processors/Transforms/FillingTransform.cpp b/dbms/Processors/Transforms/FillingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FillingTransform.cpp rename to dbms/Processors/Transforms/FillingTransform.cpp diff --git a/dbms/src/Processors/Transforms/FillingTransform.h b/dbms/Processors/Transforms/FillingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FillingTransform.h rename to dbms/Processors/Transforms/FillingTransform.h diff --git a/dbms/src/Processors/Transforms/FilterTransform.cpp b/dbms/Processors/Transforms/FilterTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FilterTransform.cpp rename to dbms/Processors/Transforms/FilterTransform.cpp diff --git a/dbms/src/Processors/Transforms/FilterTransform.h b/dbms/Processors/Transforms/FilterTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FilterTransform.h rename to dbms/Processors/Transforms/FilterTransform.h diff --git a/dbms/src/Processors/Transforms/FinishSortingTransform.cpp b/dbms/Processors/Transforms/FinishSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/FinishSortingTransform.cpp rename to dbms/Processors/Transforms/FinishSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/FinishSortingTransform.h b/dbms/Processors/Transforms/FinishSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/FinishSortingTransform.h rename to dbms/Processors/Transforms/FinishSortingTransform.h diff --git a/dbms/src/Processors/Transforms/InflatingExpressionTransform.cpp b/dbms/Processors/Transforms/InflatingExpressionTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/InflatingExpressionTransform.cpp rename to dbms/Processors/Transforms/InflatingExpressionTransform.cpp diff --git a/dbms/src/Processors/Transforms/InflatingExpressionTransform.h b/dbms/Processors/Transforms/InflatingExpressionTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/InflatingExpressionTransform.h rename to dbms/Processors/Transforms/InflatingExpressionTransform.h diff --git a/dbms/src/Processors/Transforms/LimitByTransform.cpp b/dbms/Processors/Transforms/LimitByTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/LimitByTransform.cpp rename to dbms/Processors/Transforms/LimitByTransform.cpp diff --git a/dbms/src/Processors/Transforms/LimitByTransform.h b/dbms/Processors/Transforms/LimitByTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/LimitByTransform.h rename to dbms/Processors/Transforms/LimitByTransform.h diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp b/dbms/Processors/Transforms/LimitsCheckingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp rename to dbms/Processors/Transforms/LimitsCheckingTransform.cpp diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h b/dbms/Processors/Transforms/LimitsCheckingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/LimitsCheckingTransform.h rename to dbms/Processors/Transforms/LimitsCheckingTransform.h diff --git a/dbms/src/Processors/Transforms/MaterializingTransform.cpp b/dbms/Processors/Transforms/MaterializingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MaterializingTransform.cpp rename to dbms/Processors/Transforms/MaterializingTransform.cpp diff --git a/dbms/src/Processors/Transforms/MaterializingTransform.h b/dbms/Processors/Transforms/MaterializingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MaterializingTransform.h rename to dbms/Processors/Transforms/MaterializingTransform.h diff --git a/dbms/src/Processors/Transforms/MergeSortingTransform.cpp b/dbms/Processors/Transforms/MergeSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergeSortingTransform.cpp rename to dbms/Processors/Transforms/MergeSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergeSortingTransform.h b/dbms/Processors/Transforms/MergeSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergeSortingTransform.h rename to dbms/Processors/Transforms/MergeSortingTransform.h diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp rename to dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h b/dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h rename to dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp b/dbms/Processors/Transforms/MergingAggregatedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp rename to dbms/Processors/Transforms/MergingAggregatedTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h b/dbms/Processors/Transforms/MergingAggregatedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingAggregatedTransform.h rename to dbms/Processors/Transforms/MergingAggregatedTransform.h diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp b/dbms/Processors/Transforms/MergingSortedTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/MergingSortedTransform.cpp rename to dbms/Processors/Transforms/MergingSortedTransform.cpp diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.h b/dbms/Processors/Transforms/MergingSortedTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/MergingSortedTransform.h rename to dbms/Processors/Transforms/MergingSortedTransform.h diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp b/dbms/Processors/Transforms/PartialSortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/PartialSortingTransform.cpp rename to dbms/Processors/Transforms/PartialSortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.h b/dbms/Processors/Transforms/PartialSortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/PartialSortingTransform.h rename to dbms/Processors/Transforms/PartialSortingTransform.h diff --git a/dbms/src/Processors/Transforms/ReverseTransform.cpp b/dbms/Processors/Transforms/ReverseTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/ReverseTransform.cpp rename to dbms/Processors/Transforms/ReverseTransform.cpp diff --git a/dbms/src/Processors/Transforms/ReverseTransform.h b/dbms/Processors/Transforms/ReverseTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/ReverseTransform.h rename to dbms/Processors/Transforms/ReverseTransform.h diff --git a/dbms/src/Processors/Transforms/RollupTransform.cpp b/dbms/Processors/Transforms/RollupTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/RollupTransform.cpp rename to dbms/Processors/Transforms/RollupTransform.cpp diff --git a/dbms/src/Processors/Transforms/RollupTransform.h b/dbms/Processors/Transforms/RollupTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/RollupTransform.h rename to dbms/Processors/Transforms/RollupTransform.h diff --git a/dbms/src/Processors/Transforms/SortingTransform.cpp b/dbms/Processors/Transforms/SortingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/SortingTransform.cpp rename to dbms/Processors/Transforms/SortingTransform.cpp diff --git a/dbms/src/Processors/Transforms/SortingTransform.h b/dbms/Processors/Transforms/SortingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/SortingTransform.h rename to dbms/Processors/Transforms/SortingTransform.h diff --git a/dbms/src/Processors/Transforms/TotalsHavingTransform.cpp b/dbms/Processors/Transforms/TotalsHavingTransform.cpp similarity index 100% rename from dbms/src/Processors/Transforms/TotalsHavingTransform.cpp rename to dbms/Processors/Transforms/TotalsHavingTransform.cpp diff --git a/dbms/src/Processors/Transforms/TotalsHavingTransform.h b/dbms/Processors/Transforms/TotalsHavingTransform.h similarity index 100% rename from dbms/src/Processors/Transforms/TotalsHavingTransform.h rename to dbms/Processors/Transforms/TotalsHavingTransform.h diff --git a/dbms/src/Processors/printPipeline.h b/dbms/Processors/printPipeline.h similarity index 100% rename from dbms/src/Processors/printPipeline.h rename to dbms/Processors/printPipeline.h diff --git a/dbms/src/Processors/tests/CMakeLists.txt b/dbms/Processors/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Processors/tests/CMakeLists.txt rename to dbms/Processors/tests/CMakeLists.txt diff --git a/dbms/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp b/dbms/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp similarity index 100% rename from dbms/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp rename to dbms/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp diff --git a/dbms/src/Processors/tests/processors_test.cpp b/dbms/Processors/tests/processors_test.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test.cpp rename to dbms/Processors/tests/processors_test.cpp diff --git a/dbms/src/Processors/tests/processors_test_aggregation.cpp b/dbms/Processors/tests/processors_test_aggregation.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_aggregation.cpp rename to dbms/Processors/tests/processors_test_aggregation.cpp diff --git a/dbms/src/Processors/tests/processors_test_chain.cpp b/dbms/Processors/tests/processors_test_chain.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_chain.cpp rename to dbms/Processors/tests/processors_test_chain.cpp diff --git a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp b/dbms/Processors/tests/processors_test_expand_pipeline.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_expand_pipeline.cpp rename to dbms/Processors/tests/processors_test_expand_pipeline.cpp diff --git a/dbms/src/Processors/tests/processors_test_merge.cpp b/dbms/Processors/tests/processors_test_merge.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merge.cpp rename to dbms/Processors/tests/processors_test_merge.cpp diff --git a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp b/dbms/Processors/tests/processors_test_merge_sorting_transform.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp rename to dbms/Processors/tests/processors_test_merge_sorting_transform.cpp diff --git a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp b/dbms/Processors/tests/processors_test_merging_sorted_transform.cpp similarity index 100% rename from dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp rename to dbms/Processors/tests/processors_test_merging_sorted_transform.cpp diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/Storages/AlterCommands.cpp similarity index 100% rename from dbms/src/Storages/AlterCommands.cpp rename to dbms/Storages/AlterCommands.cpp diff --git a/dbms/src/Storages/AlterCommands.h b/dbms/Storages/AlterCommands.h similarity index 100% rename from dbms/src/Storages/AlterCommands.h rename to dbms/Storages/AlterCommands.h diff --git a/dbms/src/Storages/CMakeLists.txt b/dbms/Storages/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/CMakeLists.txt rename to dbms/Storages/CMakeLists.txt diff --git a/dbms/src/Storages/CheckResults.h b/dbms/Storages/CheckResults.h similarity index 100% rename from dbms/src/Storages/CheckResults.h rename to dbms/Storages/CheckResults.h diff --git a/dbms/src/Storages/ColumnCodec.h b/dbms/Storages/ColumnCodec.h similarity index 100% rename from dbms/src/Storages/ColumnCodec.h rename to dbms/Storages/ColumnCodec.h diff --git a/dbms/src/Storages/ColumnDefault.cpp b/dbms/Storages/ColumnDefault.cpp similarity index 100% rename from dbms/src/Storages/ColumnDefault.cpp rename to dbms/Storages/ColumnDefault.cpp diff --git a/dbms/src/Storages/ColumnDefault.h b/dbms/Storages/ColumnDefault.h similarity index 100% rename from dbms/src/Storages/ColumnDefault.h rename to dbms/Storages/ColumnDefault.h diff --git a/dbms/src/Storages/ColumnDependency.h b/dbms/Storages/ColumnDependency.h similarity index 100% rename from dbms/src/Storages/ColumnDependency.h rename to dbms/Storages/ColumnDependency.h diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/Storages/ColumnsDescription.cpp similarity index 100% rename from dbms/src/Storages/ColumnsDescription.cpp rename to dbms/Storages/ColumnsDescription.cpp diff --git a/dbms/src/Storages/ColumnsDescription.h b/dbms/Storages/ColumnsDescription.h similarity index 100% rename from dbms/src/Storages/ColumnsDescription.h rename to dbms/Storages/ColumnsDescription.h diff --git a/dbms/src/Storages/CompressionCodecSelector.h b/dbms/Storages/CompressionCodecSelector.h similarity index 100% rename from dbms/src/Storages/CompressionCodecSelector.h rename to dbms/Storages/CompressionCodecSelector.h diff --git a/dbms/src/Storages/ConstraintsDescription.cpp b/dbms/Storages/ConstraintsDescription.cpp similarity index 100% rename from dbms/src/Storages/ConstraintsDescription.cpp rename to dbms/Storages/ConstraintsDescription.cpp diff --git a/dbms/src/Storages/ConstraintsDescription.h b/dbms/Storages/ConstraintsDescription.h similarity index 100% rename from dbms/src/Storages/ConstraintsDescription.h rename to dbms/Storages/ConstraintsDescription.h diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp b/dbms/Storages/Distributed/DirectoryMonitor.cpp similarity index 100% rename from dbms/src/Storages/Distributed/DirectoryMonitor.cpp rename to dbms/Storages/Distributed/DirectoryMonitor.cpp diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.h b/dbms/Storages/Distributed/DirectoryMonitor.h similarity index 100% rename from dbms/src/Storages/Distributed/DirectoryMonitor.h rename to dbms/Storages/Distributed/DirectoryMonitor.h diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/dbms/Storages/Distributed/DistributedBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp rename to dbms/Storages/Distributed/DistributedBlockOutputStream.cpp diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h b/dbms/Storages/Distributed/DistributedBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/Distributed/DistributedBlockOutputStream.h rename to dbms/Storages/Distributed/DistributedBlockOutputStream.h diff --git a/dbms/src/Storages/IStorage.cpp b/dbms/Storages/IStorage.cpp similarity index 100% rename from dbms/src/Storages/IStorage.cpp rename to dbms/Storages/IStorage.cpp diff --git a/dbms/src/Storages/IStorage.h b/dbms/Storages/IStorage.h similarity index 100% rename from dbms/src/Storages/IStorage.h rename to dbms/Storages/IStorage.h diff --git a/dbms/src/Storages/IStorage_fwd.h b/dbms/Storages/IStorage_fwd.h similarity index 100% rename from dbms/src/Storages/IStorage_fwd.h rename to dbms/Storages/IStorage_fwd.h diff --git a/dbms/src/Storages/IndicesDescription.cpp b/dbms/Storages/IndicesDescription.cpp similarity index 100% rename from dbms/src/Storages/IndicesDescription.cpp rename to dbms/Storages/IndicesDescription.cpp diff --git a/dbms/src/Storages/IndicesDescription.h b/dbms/Storages/IndicesDescription.h similarity index 100% rename from dbms/src/Storages/IndicesDescription.h rename to dbms/Storages/IndicesDescription.h diff --git a/dbms/src/Storages/Kafka/Buffer_fwd.h b/dbms/Storages/Kafka/Buffer_fwd.h similarity index 100% rename from dbms/src/Storages/Kafka/Buffer_fwd.h rename to dbms/Storages/Kafka/Buffer_fwd.h diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/Storages/Kafka/KafkaBlockInputStream.cpp similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp rename to dbms/Storages/Kafka/KafkaBlockInputStream.cpp diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.h b/dbms/Storages/Kafka/KafkaBlockInputStream.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockInputStream.h rename to dbms/Storages/Kafka/KafkaBlockInputStream.h diff --git a/dbms/src/Storages/Kafka/KafkaBlockOutputStream.cpp b/dbms/Storages/Kafka/KafkaBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockOutputStream.cpp rename to dbms/Storages/Kafka/KafkaBlockOutputStream.cpp diff --git a/dbms/src/Storages/Kafka/KafkaBlockOutputStream.h b/dbms/Storages/Kafka/KafkaBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaBlockOutputStream.h rename to dbms/Storages/Kafka/KafkaBlockOutputStream.h diff --git a/dbms/src/Storages/Kafka/KafkaSettings.cpp b/dbms/Storages/Kafka/KafkaSettings.cpp similarity index 100% rename from dbms/src/Storages/Kafka/KafkaSettings.cpp rename to dbms/Storages/Kafka/KafkaSettings.cpp diff --git a/dbms/src/Storages/Kafka/KafkaSettings.h b/dbms/Storages/Kafka/KafkaSettings.h similarity index 100% rename from dbms/src/Storages/Kafka/KafkaSettings.h rename to dbms/Storages/Kafka/KafkaSettings.h diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp b/dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp similarity index 100% rename from dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp rename to dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.h similarity index 100% rename from dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h rename to dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.h diff --git a/dbms/src/Storages/Kafka/StorageKafka.cpp b/dbms/Storages/Kafka/StorageKafka.cpp similarity index 100% rename from dbms/src/Storages/Kafka/StorageKafka.cpp rename to dbms/Storages/Kafka/StorageKafka.cpp diff --git a/dbms/src/Storages/Kafka/StorageKafka.h b/dbms/Storages/Kafka/StorageKafka.h similarity index 100% rename from dbms/src/Storages/Kafka/StorageKafka.h rename to dbms/Storages/Kafka/StorageKafka.h diff --git a/dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp b/dbms/Storages/Kafka/WriteBufferToKafkaProducer.cpp similarity index 100% rename from dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp rename to dbms/Storages/Kafka/WriteBufferToKafkaProducer.cpp diff --git a/dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.h b/dbms/Storages/Kafka/WriteBufferToKafkaProducer.h similarity index 100% rename from dbms/src/Storages/Kafka/WriteBufferToKafkaProducer.h rename to dbms/Storages/Kafka/WriteBufferToKafkaProducer.h diff --git a/dbms/src/Storages/LiveView/LiveViewBlockInputStream.h b/dbms/Storages/LiveView/LiveViewBlockInputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewBlockInputStream.h rename to dbms/Storages/LiveView/LiveViewBlockInputStream.h diff --git a/dbms/src/Storages/LiveView/LiveViewBlockOutputStream.h b/dbms/Storages/LiveView/LiveViewBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewBlockOutputStream.h rename to dbms/Storages/LiveView/LiveViewBlockOutputStream.h diff --git a/dbms/src/Storages/LiveView/LiveViewCommands.h b/dbms/Storages/LiveView/LiveViewCommands.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewCommands.h rename to dbms/Storages/LiveView/LiveViewCommands.h diff --git a/dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h b/dbms/Storages/LiveView/LiveViewEventsBlockInputStream.h similarity index 100% rename from dbms/src/Storages/LiveView/LiveViewEventsBlockInputStream.h rename to dbms/Storages/LiveView/LiveViewEventsBlockInputStream.h diff --git a/dbms/src/Storages/LiveView/StorageBlocks.h b/dbms/Storages/LiveView/StorageBlocks.h similarity index 100% rename from dbms/src/Storages/LiveView/StorageBlocks.h rename to dbms/Storages/LiveView/StorageBlocks.h diff --git a/dbms/src/Storages/LiveView/StorageLiveView.cpp b/dbms/Storages/LiveView/StorageLiveView.cpp similarity index 100% rename from dbms/src/Storages/LiveView/StorageLiveView.cpp rename to dbms/Storages/LiveView/StorageLiveView.cpp diff --git a/dbms/src/Storages/LiveView/StorageLiveView.h b/dbms/Storages/LiveView/StorageLiveView.h similarity index 100% rename from dbms/src/Storages/LiveView/StorageLiveView.h rename to dbms/Storages/LiveView/StorageLiveView.h diff --git a/dbms/src/Storages/MarkCache.h b/dbms/Storages/MarkCache.h similarity index 100% rename from dbms/src/Storages/MarkCache.h rename to dbms/Storages/MarkCache.h diff --git a/dbms/src/Storages/MergeTree/ActiveDataPartSet.cpp b/dbms/Storages/MergeTree/ActiveDataPartSet.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ActiveDataPartSet.cpp rename to dbms/Storages/MergeTree/ActiveDataPartSet.cpp diff --git a/dbms/src/Storages/MergeTree/ActiveDataPartSet.h b/dbms/Storages/MergeTree/ActiveDataPartSet.h similarity index 100% rename from dbms/src/Storages/MergeTree/ActiveDataPartSet.h rename to dbms/Storages/MergeTree/ActiveDataPartSet.h diff --git a/dbms/src/Storages/MergeTree/AllMergeSelector.cpp b/dbms/Storages/MergeTree/AllMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/AllMergeSelector.cpp rename to dbms/Storages/MergeTree/AllMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/AllMergeSelector.h b/dbms/Storages/MergeTree/AllMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/AllMergeSelector.h rename to dbms/Storages/MergeTree/AllMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/BackgroundProcessingPool.cpp b/dbms/Storages/MergeTree/BackgroundProcessingPool.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/BackgroundProcessingPool.cpp rename to dbms/Storages/MergeTree/BackgroundProcessingPool.cpp diff --git a/dbms/src/Storages/MergeTree/BackgroundProcessingPool.h b/dbms/Storages/MergeTree/BackgroundProcessingPool.h similarity index 100% rename from dbms/src/Storages/MergeTree/BackgroundProcessingPool.h rename to dbms/Storages/MergeTree/BackgroundProcessingPool.h diff --git a/dbms/src/Storages/MergeTree/BoolMask.cpp b/dbms/Storages/MergeTree/BoolMask.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/BoolMask.cpp rename to dbms/Storages/MergeTree/BoolMask.cpp diff --git a/dbms/src/Storages/MergeTree/BoolMask.h b/dbms/Storages/MergeTree/BoolMask.h similarity index 100% rename from dbms/src/Storages/MergeTree/BoolMask.h rename to dbms/Storages/MergeTree/BoolMask.h diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp b/dbms/Storages/MergeTree/DataPartsExchange.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/DataPartsExchange.cpp rename to dbms/Storages/MergeTree/DataPartsExchange.cpp diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.h b/dbms/Storages/MergeTree/DataPartsExchange.h similarity index 100% rename from dbms/src/Storages/MergeTree/DataPartsExchange.h rename to dbms/Storages/MergeTree/DataPartsExchange.h diff --git a/dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/dbms/Storages/MergeTree/EphemeralLockInZooKeeper.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp rename to dbms/Storages/MergeTree/EphemeralLockInZooKeeper.cpp diff --git a/dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.h b/dbms/Storages/MergeTree/EphemeralLockInZooKeeper.h similarity index 100% rename from dbms/src/Storages/MergeTree/EphemeralLockInZooKeeper.h rename to dbms/Storages/MergeTree/EphemeralLockInZooKeeper.h diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/dbms/Storages/MergeTree/IMergeTreeDataPart.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPart.cpp rename to dbms/Storages/MergeTree/IMergeTreeDataPart.cpp diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPart.h b/dbms/Storages/MergeTree/IMergeTreeDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPart.h rename to dbms/Storages/MergeTree/IMergeTreeDataPart.h diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/dbms/Storages/MergeTree/IMergeTreeDataPartWriter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp rename to dbms/Storages/MergeTree/IMergeTreeDataPartWriter.cpp diff --git a/dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/dbms/Storages/MergeTree/IMergeTreeDataPartWriter.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeDataPartWriter.h rename to dbms/Storages/MergeTree/IMergeTreeDataPartWriter.h diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.cpp b/dbms/Storages/MergeTree/IMergeTreeReader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeReader.cpp rename to dbms/Storages/MergeTree/IMergeTreeReader.cpp diff --git a/dbms/src/Storages/MergeTree/IMergeTreeReader.h b/dbms/Storages/MergeTree/IMergeTreeReader.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergeTreeReader.h rename to dbms/Storages/MergeTree/IMergeTreeReader.h diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/dbms/Storages/MergeTree/IMergedBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp rename to dbms/Storages/MergeTree/IMergedBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h b/dbms/Storages/MergeTree/IMergedBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h rename to dbms/Storages/MergeTree/IMergedBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/KeyCondition.cpp b/dbms/Storages/MergeTree/KeyCondition.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/KeyCondition.cpp rename to dbms/Storages/MergeTree/KeyCondition.cpp diff --git a/dbms/src/Storages/MergeTree/KeyCondition.h b/dbms/Storages/MergeTree/KeyCondition.h similarity index 100% rename from dbms/src/Storages/MergeTree/KeyCondition.h rename to dbms/Storages/MergeTree/KeyCondition.h diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.cpp b/dbms/Storages/MergeTree/LevelMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/LevelMergeSelector.cpp rename to dbms/Storages/MergeTree/LevelMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.h b/dbms/Storages/MergeTree/LevelMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/LevelMergeSelector.h rename to dbms/Storages/MergeTree/LevelMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/MarkRange.h b/dbms/Storages/MergeTree/MarkRange.h similarity index 100% rename from dbms/src/Storages/MergeTree/MarkRange.h rename to dbms/Storages/MergeTree/MarkRange.h diff --git a/dbms/src/Storages/MergeTree/MergeList.cpp b/dbms/Storages/MergeTree/MergeList.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeList.cpp rename to dbms/Storages/MergeTree/MergeList.cpp diff --git a/dbms/src/Storages/MergeTree/MergeList.h b/dbms/Storages/MergeTree/MergeList.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeList.h rename to dbms/Storages/MergeTree/MergeList.h diff --git a/dbms/src/Storages/MergeTree/MergeSelector.h b/dbms/Storages/MergeTree/MergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeSelector.h rename to dbms/Storages/MergeTree/MergeSelector.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp rename to dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h rename to dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp b/dbms/Storages/MergeTree/MergeTreeBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp rename to dbms/Storages/MergeTree/MergeTreeBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h b/dbms/Storages/MergeTree/MergeTreeBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h rename to dbms/Storages/MergeTree/MergeTreeBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/dbms/Storages/MergeTree/MergeTreeBlockReadUtils.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp rename to dbms/Storages/MergeTree/MergeTreeBlockReadUtils.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h b/dbms/Storages/MergeTree/MergeTreeBlockReadUtils.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h rename to dbms/Storages/MergeTree/MergeTreeBlockReadUtils.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/Storages/MergeTree/MergeTreeData.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeData.cpp rename to dbms/Storages/MergeTree/MergeTreeData.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/Storages/MergeTree/MergeTreeData.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeData.h rename to dbms/Storages/MergeTree/MergeTreeData.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataFormatVersion.h b/dbms/Storages/MergeTree/MergeTreeDataFormatVersion.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataFormatVersion.h rename to dbms/Storages/MergeTree/MergeTreeDataFormatVersion.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp rename to dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h rename to dbms/Storages/MergeTree/MergeTreeDataMergerMutator.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartChecksum.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartChecksum.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.h b/dbms/Storages/MergeTree/MergeTreeDataPartChecksum.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartChecksum.h rename to dbms/Storages/MergeTree/MergeTreeDataPartChecksum.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartCompact.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartCompact.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/dbms/Storages/MergeTree/MergeTreeDataPartCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartCompact.h rename to dbms/Storages/MergeTree/MergeTreeDataPartCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h b/dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h rename to dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartType.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartType.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartType.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartType.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartType.h b/dbms/Storages/MergeTree/MergeTreeDataPartType.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartType.h rename to dbms/Storages/MergeTree/MergeTreeDataPartType.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartWide.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWide.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartWide.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWide.h b/dbms/Storages/MergeTree/MergeTreeDataPartWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWide.h rename to dbms/Storages/MergeTree/MergeTreeDataPartWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h rename to dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp rename to dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h rename to dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp rename to dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h rename to dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/dbms/Storages/MergeTree/MergeTreeDataWriter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataWriter.cpp rename to dbms/Storages/MergeTree/MergeTreeDataWriter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataWriter.h b/dbms/Storages/MergeTree/MergeTreeDataWriter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeDataWriter.h rename to dbms/Storages/MergeTree/MergeTreeDataWriter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIOSettings.h b/dbms/Storages/MergeTree/MergeTreeIOSettings.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIOSettings.h rename to dbms/Storages/MergeTree/MergeTreeIOSettings.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp b/dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h b/dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h rename to dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h b/dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h rename to dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h rename to dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h b/dbms/Storages/MergeTree/MergeTreeIndexFullText.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h rename to dbms/Storages/MergeTree/MergeTreeIndexFullText.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/dbms/Storages/MergeTree/MergeTreeIndexGranularity.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexGranularity.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/dbms/Storages/MergeTree/MergeTreeIndexGranularity.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularity.h rename to dbms/Storages/MergeTree/MergeTreeIndexGranularity.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h rename to dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch b/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch rename to dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp b/dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h b/dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h rename to dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/dbms/Storages/MergeTree/MergeTreeIndexMinMax.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexMinMax.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/dbms/Storages/MergeTree/MergeTreeIndexMinMax.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h rename to dbms/Storages/MergeTree/MergeTreeIndexMinMax.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/dbms/Storages/MergeTree/MergeTreeIndexReader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexReader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h b/dbms/Storages/MergeTree/MergeTreeIndexReader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexReader.h rename to dbms/Storages/MergeTree/MergeTreeIndexReader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/dbms/Storages/MergeTree/MergeTreeIndexSet.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp rename to dbms/Storages/MergeTree/MergeTreeIndexSet.cpp index f68184e2691..e888fb38822 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/dbms/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -239,7 +239,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( return; /// Replace logical functions with bit functions. - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/src/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). traverseAST(expression_ast); auto syntax_analyzer_result = SyntaxAnalyzer(context).analyze( diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h b/dbms/Storages/MergeTree/MergeTreeIndexSet.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndexSet.h rename to dbms/Storages/MergeTree/MergeTreeIndexSet.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.cpp b/dbms/Storages/MergeTree/MergeTreeIndices.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndices.cpp rename to dbms/Storages/MergeTree/MergeTreeIndices.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.h b/dbms/Storages/MergeTree/MergeTreeIndices.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeIndices.h rename to dbms/Storages/MergeTree/MergeTreeIndices.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeMarksLoader.cpp b/dbms/Storages/MergeTree/MergeTreeMarksLoader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMarksLoader.cpp rename to dbms/Storages/MergeTree/MergeTreeMarksLoader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeMarksLoader.h b/dbms/Storages/MergeTree/MergeTreeMarksLoader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMarksLoader.h rename to dbms/Storages/MergeTree/MergeTreeMarksLoader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/dbms/Storages/MergeTree/MergeTreeMutationEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationEntry.cpp rename to dbms/Storages/MergeTree/MergeTreeMutationEntry.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h b/dbms/Storages/MergeTree/MergeTreeMutationEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h rename to dbms/Storages/MergeTree/MergeTreeMutationEntry.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationStatus.h b/dbms/Storages/MergeTree/MergeTreeMutationStatus.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeMutationStatus.h rename to dbms/Storages/MergeTree/MergeTreeMutationStatus.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartInfo.cpp b/dbms/Storages/MergeTree/MergeTreePartInfo.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartInfo.cpp rename to dbms/Storages/MergeTree/MergeTreePartInfo.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreePartInfo.h b/dbms/Storages/MergeTree/MergeTreePartInfo.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartInfo.h rename to dbms/Storages/MergeTree/MergeTreePartInfo.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartition.cpp b/dbms/Storages/MergeTree/MergeTreePartition.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartition.cpp rename to dbms/Storages/MergeTree/MergeTreePartition.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreePartition.h b/dbms/Storages/MergeTree/MergeTreePartition.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartition.h rename to dbms/Storages/MergeTree/MergeTreePartition.h diff --git a/dbms/src/Storages/MergeTree/MergeTreePartsMover.cpp b/dbms/Storages/MergeTree/MergeTreePartsMover.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartsMover.cpp rename to dbms/Storages/MergeTree/MergeTreePartsMover.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreePartsMover.h b/dbms/Storages/MergeTree/MergeTreePartsMover.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreePartsMover.h rename to dbms/Storages/MergeTree/MergeTreePartsMover.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/dbms/Storages/MergeTree/MergeTreeRangeReader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp rename to dbms/Storages/MergeTree/MergeTreeRangeReader.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h b/dbms/Storages/MergeTree/MergeTreeRangeReader.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeRangeReader.h rename to dbms/Storages/MergeTree/MergeTreeRangeReader.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp b/dbms/Storages/MergeTree/MergeTreeReadPool.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp rename to dbms/Storages/MergeTree/MergeTreeReadPool.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h b/dbms/Storages/MergeTree/MergeTreeReadPool.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReadPool.h rename to dbms/Storages/MergeTree/MergeTreeReadPool.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderCompact.cpp rename to dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderCompact.h b/dbms/Storages/MergeTree/MergeTreeReaderCompact.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderCompact.h rename to dbms/Storages/MergeTree/MergeTreeReaderCompact.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp b/dbms/Storages/MergeTree/MergeTreeReaderStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp rename to dbms/Storages/MergeTree/MergeTreeReaderStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderStream.h b/dbms/Storages/MergeTree/MergeTreeReaderStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderStream.h rename to dbms/Storages/MergeTree/MergeTreeReaderStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/dbms/Storages/MergeTree/MergeTreeReaderWide.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderWide.cpp rename to dbms/Storages/MergeTree/MergeTreeReaderWide.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReaderWide.h b/dbms/Storages/MergeTree/MergeTreeReaderWide.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReaderWide.h rename to dbms/Storages/MergeTree/MergeTreeReaderWide.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp rename to dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h rename to dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/dbms/Storages/MergeTree/MergeTreeSelectProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp rename to dbms/Storages/MergeTree/MergeTreeSelectProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/dbms/Storages/MergeTree/MergeTreeSelectProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSelectProcessor.h rename to dbms/Storages/MergeTree/MergeTreeSelectProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp b/dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp rename to dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h b/dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h rename to dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.cpp b/dbms/Storages/MergeTree/MergeTreeSettings.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSettings.cpp rename to dbms/Storages/MergeTree/MergeTreeSettings.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.h b/dbms/Storages/MergeTree/MergeTreeSettings.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeSettings.h rename to dbms/Storages/MergeTree/MergeTreeSettings.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp rename to dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h b/dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h rename to dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/dbms/Storages/MergeTree/MergeTreeWhereOptimizer.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp rename to dbms/Storages/MergeTree/MergeTreeWhereOptimizer.cpp diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/dbms/Storages/MergeTree/MergeTreeWhereOptimizer.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h rename to dbms/Storages/MergeTree/MergeTreeWhereOptimizer.h diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/dbms/Storages/MergeTree/MergedBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp rename to dbms/Storages/MergeTree/MergedBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.h b/dbms/Storages/MergeTree/MergedBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergedBlockOutputStream.h rename to dbms/Storages/MergeTree/MergedBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp rename to dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h rename to dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.h diff --git a/dbms/src/Storages/MergeTree/PartDestinationType.h b/dbms/Storages/MergeTree/PartDestinationType.h similarity index 100% rename from dbms/src/Storages/MergeTree/PartDestinationType.h rename to dbms/Storages/MergeTree/PartDestinationType.h diff --git a/dbms/src/Storages/MergeTree/RPNBuilder.h b/dbms/Storages/MergeTree/RPNBuilder.h similarity index 100% rename from dbms/src/Storages/MergeTree/RPNBuilder.h rename to dbms/Storages/MergeTree/RPNBuilder.h diff --git a/dbms/src/Storages/MergeTree/RangesInDataPart.h b/dbms/Storages/MergeTree/RangesInDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/RangesInDataPart.h rename to dbms/Storages/MergeTree/RangesInDataPart.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h b/dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp rename to dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h b/dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h similarity index 100% rename from dbms/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h rename to dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.cpp b/dbms/Storages/MergeTree/SimpleMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/SimpleMergeSelector.cpp rename to dbms/Storages/MergeTree/SimpleMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h b/dbms/Storages/MergeTree/SimpleMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/SimpleMergeSelector.h rename to dbms/Storages/MergeTree/SimpleMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h rename to dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h diff --git a/dbms/src/Storages/MergeTree/TTLMergeSelector.cpp b/dbms/Storages/MergeTree/TTLMergeSelector.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/TTLMergeSelector.cpp rename to dbms/Storages/MergeTree/TTLMergeSelector.cpp diff --git a/dbms/src/Storages/MergeTree/TTLMergeSelector.h b/dbms/Storages/MergeTree/TTLMergeSelector.h similarity index 100% rename from dbms/src/Storages/MergeTree/TTLMergeSelector.h rename to dbms/Storages/MergeTree/TTLMergeSelector.h diff --git a/dbms/src/Storages/MergeTree/checkDataPart.cpp b/dbms/Storages/MergeTree/checkDataPart.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/checkDataPart.cpp rename to dbms/Storages/MergeTree/checkDataPart.cpp diff --git a/dbms/src/Storages/MergeTree/checkDataPart.h b/dbms/Storages/MergeTree/checkDataPart.h similarity index 100% rename from dbms/src/Storages/MergeTree/checkDataPart.h rename to dbms/Storages/MergeTree/checkDataPart.h diff --git a/dbms/src/Storages/MergeTree/localBackup.cpp b/dbms/Storages/MergeTree/localBackup.cpp similarity index 100% rename from dbms/src/Storages/MergeTree/localBackup.cpp rename to dbms/Storages/MergeTree/localBackup.cpp diff --git a/dbms/src/Storages/MergeTree/localBackup.h b/dbms/Storages/MergeTree/localBackup.h similarity index 100% rename from dbms/src/Storages/MergeTree/localBackup.h rename to dbms/Storages/MergeTree/localBackup.h diff --git a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp b/dbms/Storages/MergeTree/registerStorageMergeTree.cpp similarity index 99% rename from dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp rename to dbms/Storages/MergeTree/registerStorageMergeTree.cpp index e6a6beff57d..79ac28eb145 100644 --- a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/dbms/Storages/MergeTree/registerStorageMergeTree.cpp @@ -280,7 +280,7 @@ Careful choice of the primary key is extremely important for processing short-ti Optional sampling expression can be specified in the SAMPLE BY clause. It is used to implement the SAMPLE clause in a SELECT query for approximate query execution. Sampling expression must be one of the elements of the primary key tuple. For example, if your primary key is (CounterID, EventDate, intHash64(UserID)), your sampling expression might be intHash64(UserID). -Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'dbms/src/Storages/MergeTree/MergeTreeSettings.h' file. +Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'dbms/Storages/MergeTree/MergeTreeSettings.h' file. E.g. you can specify the index (primary key) granularity with SETTINGS index_granularity = 8192. Examples: diff --git a/dbms/src/Storages/MutationCommands.cpp b/dbms/Storages/MutationCommands.cpp similarity index 100% rename from dbms/src/Storages/MutationCommands.cpp rename to dbms/Storages/MutationCommands.cpp diff --git a/dbms/src/Storages/MutationCommands.h b/dbms/Storages/MutationCommands.h similarity index 100% rename from dbms/src/Storages/MutationCommands.h rename to dbms/Storages/MutationCommands.h diff --git a/dbms/src/Storages/PartitionCommands.cpp b/dbms/Storages/PartitionCommands.cpp similarity index 100% rename from dbms/src/Storages/PartitionCommands.cpp rename to dbms/Storages/PartitionCommands.cpp diff --git a/dbms/src/Storages/PartitionCommands.h b/dbms/Storages/PartitionCommands.h similarity index 100% rename from dbms/src/Storages/PartitionCommands.h rename to dbms/Storages/PartitionCommands.h diff --git a/dbms/src/Storages/ReadInOrderOptimizer.cpp b/dbms/Storages/ReadInOrderOptimizer.cpp similarity index 100% rename from dbms/src/Storages/ReadInOrderOptimizer.cpp rename to dbms/Storages/ReadInOrderOptimizer.cpp diff --git a/dbms/src/Storages/ReadInOrderOptimizer.h b/dbms/Storages/ReadInOrderOptimizer.h similarity index 100% rename from dbms/src/Storages/ReadInOrderOptimizer.h rename to dbms/Storages/ReadInOrderOptimizer.h diff --git a/dbms/src/Storages/SelectQueryInfo.h b/dbms/Storages/SelectQueryInfo.h similarity index 100% rename from dbms/src/Storages/SelectQueryInfo.h rename to dbms/Storages/SelectQueryInfo.h diff --git a/dbms/src/Storages/StorageBuffer.cpp b/dbms/Storages/StorageBuffer.cpp similarity index 100% rename from dbms/src/Storages/StorageBuffer.cpp rename to dbms/Storages/StorageBuffer.cpp diff --git a/dbms/src/Storages/StorageBuffer.h b/dbms/Storages/StorageBuffer.h similarity index 100% rename from dbms/src/Storages/StorageBuffer.h rename to dbms/Storages/StorageBuffer.h diff --git a/dbms/src/Storages/StorageDictionary.cpp b/dbms/Storages/StorageDictionary.cpp similarity index 100% rename from dbms/src/Storages/StorageDictionary.cpp rename to dbms/Storages/StorageDictionary.cpp diff --git a/dbms/src/Storages/StorageDictionary.h b/dbms/Storages/StorageDictionary.h similarity index 100% rename from dbms/src/Storages/StorageDictionary.h rename to dbms/Storages/StorageDictionary.h diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/Storages/StorageDistributed.cpp similarity index 100% rename from dbms/src/Storages/StorageDistributed.cpp rename to dbms/Storages/StorageDistributed.cpp diff --git a/dbms/src/Storages/StorageDistributed.h b/dbms/Storages/StorageDistributed.h similarity index 100% rename from dbms/src/Storages/StorageDistributed.h rename to dbms/Storages/StorageDistributed.h diff --git a/dbms/src/Storages/StorageFactory.cpp b/dbms/Storages/StorageFactory.cpp similarity index 100% rename from dbms/src/Storages/StorageFactory.cpp rename to dbms/Storages/StorageFactory.cpp diff --git a/dbms/src/Storages/StorageFactory.h b/dbms/Storages/StorageFactory.h similarity index 100% rename from dbms/src/Storages/StorageFactory.h rename to dbms/Storages/StorageFactory.h diff --git a/dbms/src/Storages/StorageFile.cpp b/dbms/Storages/StorageFile.cpp similarity index 100% rename from dbms/src/Storages/StorageFile.cpp rename to dbms/Storages/StorageFile.cpp diff --git a/dbms/src/Storages/StorageFile.h b/dbms/Storages/StorageFile.h similarity index 100% rename from dbms/src/Storages/StorageFile.h rename to dbms/Storages/StorageFile.h diff --git a/dbms/src/Storages/StorageGenerateRandom.cpp b/dbms/Storages/StorageGenerateRandom.cpp similarity index 100% rename from dbms/src/Storages/StorageGenerateRandom.cpp rename to dbms/Storages/StorageGenerateRandom.cpp diff --git a/dbms/src/Storages/StorageGenerateRandom.h b/dbms/Storages/StorageGenerateRandom.h similarity index 100% rename from dbms/src/Storages/StorageGenerateRandom.h rename to dbms/Storages/StorageGenerateRandom.h diff --git a/dbms/src/Storages/StorageHDFS.cpp b/dbms/Storages/StorageHDFS.cpp similarity index 100% rename from dbms/src/Storages/StorageHDFS.cpp rename to dbms/Storages/StorageHDFS.cpp diff --git a/dbms/src/Storages/StorageHDFS.h b/dbms/Storages/StorageHDFS.h similarity index 100% rename from dbms/src/Storages/StorageHDFS.h rename to dbms/Storages/StorageHDFS.h diff --git a/dbms/src/Storages/StorageInMemoryMetadata.cpp b/dbms/Storages/StorageInMemoryMetadata.cpp similarity index 100% rename from dbms/src/Storages/StorageInMemoryMetadata.cpp rename to dbms/Storages/StorageInMemoryMetadata.cpp diff --git a/dbms/src/Storages/StorageInMemoryMetadata.h b/dbms/Storages/StorageInMemoryMetadata.h similarity index 100% rename from dbms/src/Storages/StorageInMemoryMetadata.h rename to dbms/Storages/StorageInMemoryMetadata.h diff --git a/dbms/src/Storages/StorageInput.cpp b/dbms/Storages/StorageInput.cpp similarity index 100% rename from dbms/src/Storages/StorageInput.cpp rename to dbms/Storages/StorageInput.cpp diff --git a/dbms/src/Storages/StorageInput.h b/dbms/Storages/StorageInput.h similarity index 100% rename from dbms/src/Storages/StorageInput.h rename to dbms/Storages/StorageInput.h diff --git a/dbms/src/Storages/StorageJoin.cpp b/dbms/Storages/StorageJoin.cpp similarity index 100% rename from dbms/src/Storages/StorageJoin.cpp rename to dbms/Storages/StorageJoin.cpp diff --git a/dbms/src/Storages/StorageJoin.h b/dbms/Storages/StorageJoin.h similarity index 100% rename from dbms/src/Storages/StorageJoin.h rename to dbms/Storages/StorageJoin.h diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/Storages/StorageLog.cpp similarity index 100% rename from dbms/src/Storages/StorageLog.cpp rename to dbms/Storages/StorageLog.cpp diff --git a/dbms/src/Storages/StorageLog.h b/dbms/Storages/StorageLog.h similarity index 100% rename from dbms/src/Storages/StorageLog.h rename to dbms/Storages/StorageLog.h diff --git a/dbms/src/Storages/StorageLogSettings.cpp b/dbms/Storages/StorageLogSettings.cpp similarity index 100% rename from dbms/src/Storages/StorageLogSettings.cpp rename to dbms/Storages/StorageLogSettings.cpp diff --git a/dbms/src/Storages/StorageLogSettings.h b/dbms/Storages/StorageLogSettings.h similarity index 100% rename from dbms/src/Storages/StorageLogSettings.h rename to dbms/Storages/StorageLogSettings.h diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/dbms/Storages/StorageMaterializedView.cpp similarity index 100% rename from dbms/src/Storages/StorageMaterializedView.cpp rename to dbms/Storages/StorageMaterializedView.cpp diff --git a/dbms/src/Storages/StorageMaterializedView.h b/dbms/Storages/StorageMaterializedView.h similarity index 100% rename from dbms/src/Storages/StorageMaterializedView.h rename to dbms/Storages/StorageMaterializedView.h diff --git a/dbms/src/Storages/StorageMemory.cpp b/dbms/Storages/StorageMemory.cpp similarity index 100% rename from dbms/src/Storages/StorageMemory.cpp rename to dbms/Storages/StorageMemory.cpp diff --git a/dbms/src/Storages/StorageMemory.h b/dbms/Storages/StorageMemory.h similarity index 100% rename from dbms/src/Storages/StorageMemory.h rename to dbms/Storages/StorageMemory.h diff --git a/dbms/src/Storages/StorageMerge.cpp b/dbms/Storages/StorageMerge.cpp similarity index 100% rename from dbms/src/Storages/StorageMerge.cpp rename to dbms/Storages/StorageMerge.cpp diff --git a/dbms/src/Storages/StorageMerge.h b/dbms/Storages/StorageMerge.h similarity index 100% rename from dbms/src/Storages/StorageMerge.h rename to dbms/Storages/StorageMerge.h diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/Storages/StorageMergeTree.cpp similarity index 100% rename from dbms/src/Storages/StorageMergeTree.cpp rename to dbms/Storages/StorageMergeTree.cpp diff --git a/dbms/src/Storages/StorageMergeTree.h b/dbms/Storages/StorageMergeTree.h similarity index 100% rename from dbms/src/Storages/StorageMergeTree.h rename to dbms/Storages/StorageMergeTree.h diff --git a/dbms/src/Storages/StorageMySQL.cpp b/dbms/Storages/StorageMySQL.cpp similarity index 100% rename from dbms/src/Storages/StorageMySQL.cpp rename to dbms/Storages/StorageMySQL.cpp diff --git a/dbms/src/Storages/StorageMySQL.h b/dbms/Storages/StorageMySQL.h similarity index 100% rename from dbms/src/Storages/StorageMySQL.h rename to dbms/Storages/StorageMySQL.h diff --git a/dbms/src/Storages/StorageNull.cpp b/dbms/Storages/StorageNull.cpp similarity index 100% rename from dbms/src/Storages/StorageNull.cpp rename to dbms/Storages/StorageNull.cpp diff --git a/dbms/src/Storages/StorageNull.h b/dbms/Storages/StorageNull.h similarity index 100% rename from dbms/src/Storages/StorageNull.h rename to dbms/Storages/StorageNull.h diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/Storages/StorageReplicatedMergeTree.cpp similarity index 100% rename from dbms/src/Storages/StorageReplicatedMergeTree.cpp rename to dbms/Storages/StorageReplicatedMergeTree.cpp diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/Storages/StorageReplicatedMergeTree.h similarity index 100% rename from dbms/src/Storages/StorageReplicatedMergeTree.h rename to dbms/Storages/StorageReplicatedMergeTree.h diff --git a/dbms/src/Storages/StorageS3.cpp b/dbms/Storages/StorageS3.cpp similarity index 100% rename from dbms/src/Storages/StorageS3.cpp rename to dbms/Storages/StorageS3.cpp diff --git a/dbms/src/Storages/StorageS3.h b/dbms/Storages/StorageS3.h similarity index 100% rename from dbms/src/Storages/StorageS3.h rename to dbms/Storages/StorageS3.h diff --git a/dbms/src/Storages/StorageSet.cpp b/dbms/Storages/StorageSet.cpp similarity index 100% rename from dbms/src/Storages/StorageSet.cpp rename to dbms/Storages/StorageSet.cpp diff --git a/dbms/src/Storages/StorageSet.h b/dbms/Storages/StorageSet.h similarity index 100% rename from dbms/src/Storages/StorageSet.h rename to dbms/Storages/StorageSet.h diff --git a/dbms/src/Storages/StorageStripeLog.cpp b/dbms/Storages/StorageStripeLog.cpp similarity index 100% rename from dbms/src/Storages/StorageStripeLog.cpp rename to dbms/Storages/StorageStripeLog.cpp diff --git a/dbms/src/Storages/StorageStripeLog.h b/dbms/Storages/StorageStripeLog.h similarity index 100% rename from dbms/src/Storages/StorageStripeLog.h rename to dbms/Storages/StorageStripeLog.h diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/Storages/StorageTinyLog.cpp similarity index 100% rename from dbms/src/Storages/StorageTinyLog.cpp rename to dbms/Storages/StorageTinyLog.cpp diff --git a/dbms/src/Storages/StorageTinyLog.h b/dbms/Storages/StorageTinyLog.h similarity index 100% rename from dbms/src/Storages/StorageTinyLog.h rename to dbms/Storages/StorageTinyLog.h diff --git a/dbms/src/Storages/StorageURL.cpp b/dbms/Storages/StorageURL.cpp similarity index 100% rename from dbms/src/Storages/StorageURL.cpp rename to dbms/Storages/StorageURL.cpp diff --git a/dbms/src/Storages/StorageURL.h b/dbms/Storages/StorageURL.h similarity index 100% rename from dbms/src/Storages/StorageURL.h rename to dbms/Storages/StorageURL.h diff --git a/dbms/src/Storages/StorageValues.cpp b/dbms/Storages/StorageValues.cpp similarity index 100% rename from dbms/src/Storages/StorageValues.cpp rename to dbms/Storages/StorageValues.cpp diff --git a/dbms/src/Storages/StorageValues.h b/dbms/Storages/StorageValues.h similarity index 100% rename from dbms/src/Storages/StorageValues.h rename to dbms/Storages/StorageValues.h diff --git a/dbms/src/Storages/StorageView.cpp b/dbms/Storages/StorageView.cpp similarity index 100% rename from dbms/src/Storages/StorageView.cpp rename to dbms/Storages/StorageView.cpp diff --git a/dbms/src/Storages/StorageView.h b/dbms/Storages/StorageView.h similarity index 100% rename from dbms/src/Storages/StorageView.h rename to dbms/Storages/StorageView.h diff --git a/dbms/src/Storages/StorageXDBC.cpp b/dbms/Storages/StorageXDBC.cpp similarity index 100% rename from dbms/src/Storages/StorageXDBC.cpp rename to dbms/Storages/StorageXDBC.cpp diff --git a/dbms/src/Storages/StorageXDBC.h b/dbms/Storages/StorageXDBC.h similarity index 100% rename from dbms/src/Storages/StorageXDBC.h rename to dbms/Storages/StorageXDBC.h diff --git a/dbms/src/Storages/System/CMakeLists.txt b/dbms/Storages/System/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/System/CMakeLists.txt rename to dbms/Storages/System/CMakeLists.txt diff --git a/dbms/src/Storages/System/IStorageSystemOneBlock.h b/dbms/Storages/System/IStorageSystemOneBlock.h similarity index 100% rename from dbms/src/Storages/System/IStorageSystemOneBlock.h rename to dbms/Storages/System/IStorageSystemOneBlock.h diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp b/dbms/Storages/System/StorageSystemAggregateFunctionCombinators.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp rename to dbms/Storages/System/StorageSystemAggregateFunctionCombinators.cpp diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h b/dbms/Storages/System/StorageSystemAggregateFunctionCombinators.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h rename to dbms/Storages/System/StorageSystemAggregateFunctionCombinators.h diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp b/dbms/Storages/System/StorageSystemAsynchronousMetrics.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp rename to dbms/Storages/System/StorageSystemAsynchronousMetrics.cpp diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h b/dbms/Storages/System/StorageSystemAsynchronousMetrics.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h rename to dbms/Storages/System/StorageSystemAsynchronousMetrics.h diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.cpp b/dbms/Storages/System/StorageSystemBuildOptions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.cpp rename to dbms/Storages/System/StorageSystemBuildOptions.cpp diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in b/dbms/Storages/System/StorageSystemBuildOptions.generated.cpp.in similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in rename to dbms/Storages/System/StorageSystemBuildOptions.generated.cpp.in diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.h b/dbms/Storages/System/StorageSystemBuildOptions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemBuildOptions.h rename to dbms/Storages/System/StorageSystemBuildOptions.h diff --git a/dbms/src/Storages/System/StorageSystemClusters.cpp b/dbms/Storages/System/StorageSystemClusters.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemClusters.cpp rename to dbms/Storages/System/StorageSystemClusters.cpp diff --git a/dbms/src/Storages/System/StorageSystemClusters.h b/dbms/Storages/System/StorageSystemClusters.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemClusters.h rename to dbms/Storages/System/StorageSystemClusters.h diff --git a/dbms/src/Storages/System/StorageSystemCollations.cpp b/dbms/Storages/System/StorageSystemCollations.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemCollations.cpp rename to dbms/Storages/System/StorageSystemCollations.cpp diff --git a/dbms/src/Storages/System/StorageSystemCollations.h b/dbms/Storages/System/StorageSystemCollations.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemCollations.h rename to dbms/Storages/System/StorageSystemCollations.h diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/Storages/System/StorageSystemColumns.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemColumns.cpp rename to dbms/Storages/System/StorageSystemColumns.cpp diff --git a/dbms/src/Storages/System/StorageSystemColumns.h b/dbms/Storages/System/StorageSystemColumns.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemColumns.h rename to dbms/Storages/System/StorageSystemColumns.h diff --git a/dbms/src/Storages/System/StorageSystemContributors.cpp b/dbms/Storages/System/StorageSystemContributors.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.cpp rename to dbms/Storages/System/StorageSystemContributors.cpp diff --git a/dbms/src/Storages/System/StorageSystemContributors.generated.cpp b/dbms/Storages/System/StorageSystemContributors.generated.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.generated.cpp rename to dbms/Storages/System/StorageSystemContributors.generated.cpp diff --git a/dbms/src/Storages/System/StorageSystemContributors.h b/dbms/Storages/System/StorageSystemContributors.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.h rename to dbms/Storages/System/StorageSystemContributors.h diff --git a/dbms/src/Storages/System/StorageSystemContributors.sh b/dbms/Storages/System/StorageSystemContributors.sh similarity index 100% rename from dbms/src/Storages/System/StorageSystemContributors.sh rename to dbms/Storages/System/StorageSystemContributors.sh diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp b/dbms/Storages/System/StorageSystemDataTypeFamilies.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp rename to dbms/Storages/System/StorageSystemDataTypeFamilies.cpp diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.h b/dbms/Storages/System/StorageSystemDataTypeFamilies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDataTypeFamilies.h rename to dbms/Storages/System/StorageSystemDataTypeFamilies.h diff --git a/dbms/src/Storages/System/StorageSystemDatabases.cpp b/dbms/Storages/System/StorageSystemDatabases.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDatabases.cpp rename to dbms/Storages/System/StorageSystemDatabases.cpp diff --git a/dbms/src/Storages/System/StorageSystemDatabases.h b/dbms/Storages/System/StorageSystemDatabases.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDatabases.h rename to dbms/Storages/System/StorageSystemDatabases.h diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp b/dbms/Storages/System/StorageSystemDetachedParts.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDetachedParts.cpp rename to dbms/Storages/System/StorageSystemDetachedParts.cpp diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.h b/dbms/Storages/System/StorageSystemDetachedParts.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDetachedParts.h rename to dbms/Storages/System/StorageSystemDetachedParts.h diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.cpp b/dbms/Storages/System/StorageSystemDictionaries.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDictionaries.cpp rename to dbms/Storages/System/StorageSystemDictionaries.cpp diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.h b/dbms/Storages/System/StorageSystemDictionaries.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDictionaries.h rename to dbms/Storages/System/StorageSystemDictionaries.h diff --git a/dbms/src/Storages/System/StorageSystemDisks.cpp b/dbms/Storages/System/StorageSystemDisks.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemDisks.cpp rename to dbms/Storages/System/StorageSystemDisks.cpp diff --git a/dbms/src/Storages/System/StorageSystemDisks.h b/dbms/Storages/System/StorageSystemDisks.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemDisks.h rename to dbms/Storages/System/StorageSystemDisks.h diff --git a/dbms/src/Storages/System/StorageSystemEvents.cpp b/dbms/Storages/System/StorageSystemEvents.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemEvents.cpp rename to dbms/Storages/System/StorageSystemEvents.cpp diff --git a/dbms/src/Storages/System/StorageSystemEvents.h b/dbms/Storages/System/StorageSystemEvents.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemEvents.h rename to dbms/Storages/System/StorageSystemEvents.h diff --git a/dbms/src/Storages/System/StorageSystemFormats.cpp b/dbms/Storages/System/StorageSystemFormats.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemFormats.cpp rename to dbms/Storages/System/StorageSystemFormats.cpp diff --git a/dbms/src/Storages/System/StorageSystemFormats.h b/dbms/Storages/System/StorageSystemFormats.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemFormats.h rename to dbms/Storages/System/StorageSystemFormats.h diff --git a/dbms/src/Storages/System/StorageSystemFunctions.cpp b/dbms/Storages/System/StorageSystemFunctions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemFunctions.cpp rename to dbms/Storages/System/StorageSystemFunctions.cpp diff --git a/dbms/src/Storages/System/StorageSystemFunctions.h b/dbms/Storages/System/StorageSystemFunctions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemFunctions.h rename to dbms/Storages/System/StorageSystemFunctions.h diff --git a/dbms/src/Storages/System/StorageSystemGraphite.cpp b/dbms/Storages/System/StorageSystemGraphite.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemGraphite.cpp rename to dbms/Storages/System/StorageSystemGraphite.cpp diff --git a/dbms/src/Storages/System/StorageSystemGraphite.h b/dbms/Storages/System/StorageSystemGraphite.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemGraphite.h rename to dbms/Storages/System/StorageSystemGraphite.h diff --git a/dbms/src/Storages/System/StorageSystemMacros.cpp b/dbms/Storages/System/StorageSystemMacros.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMacros.cpp rename to dbms/Storages/System/StorageSystemMacros.cpp diff --git a/dbms/src/Storages/System/StorageSystemMacros.h b/dbms/Storages/System/StorageSystemMacros.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMacros.h rename to dbms/Storages/System/StorageSystemMacros.h diff --git a/dbms/src/Storages/System/StorageSystemMergeTreeSettings.cpp b/dbms/Storages/System/StorageSystemMergeTreeSettings.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMergeTreeSettings.cpp rename to dbms/Storages/System/StorageSystemMergeTreeSettings.cpp diff --git a/dbms/src/Storages/System/StorageSystemMergeTreeSettings.h b/dbms/Storages/System/StorageSystemMergeTreeSettings.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMergeTreeSettings.h rename to dbms/Storages/System/StorageSystemMergeTreeSettings.h diff --git a/dbms/src/Storages/System/StorageSystemMerges.cpp b/dbms/Storages/System/StorageSystemMerges.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMerges.cpp rename to dbms/Storages/System/StorageSystemMerges.cpp diff --git a/dbms/src/Storages/System/StorageSystemMerges.h b/dbms/Storages/System/StorageSystemMerges.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMerges.h rename to dbms/Storages/System/StorageSystemMerges.h diff --git a/dbms/src/Storages/System/StorageSystemMetrics.cpp b/dbms/Storages/System/StorageSystemMetrics.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMetrics.cpp rename to dbms/Storages/System/StorageSystemMetrics.cpp diff --git a/dbms/src/Storages/System/StorageSystemMetrics.h b/dbms/Storages/System/StorageSystemMetrics.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMetrics.h rename to dbms/Storages/System/StorageSystemMetrics.h diff --git a/dbms/src/Storages/System/StorageSystemModels.cpp b/dbms/Storages/System/StorageSystemModels.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemModels.cpp rename to dbms/Storages/System/StorageSystemModels.cpp diff --git a/dbms/src/Storages/System/StorageSystemModels.h b/dbms/Storages/System/StorageSystemModels.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemModels.h rename to dbms/Storages/System/StorageSystemModels.h diff --git a/dbms/src/Storages/System/StorageSystemMutations.cpp b/dbms/Storages/System/StorageSystemMutations.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemMutations.cpp rename to dbms/Storages/System/StorageSystemMutations.cpp diff --git a/dbms/src/Storages/System/StorageSystemMutations.h b/dbms/Storages/System/StorageSystemMutations.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemMutations.h rename to dbms/Storages/System/StorageSystemMutations.h diff --git a/dbms/src/Storages/System/StorageSystemNumbers.cpp b/dbms/Storages/System/StorageSystemNumbers.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemNumbers.cpp rename to dbms/Storages/System/StorageSystemNumbers.cpp diff --git a/dbms/src/Storages/System/StorageSystemNumbers.h b/dbms/Storages/System/StorageSystemNumbers.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemNumbers.h rename to dbms/Storages/System/StorageSystemNumbers.h diff --git a/dbms/src/Storages/System/StorageSystemOne.cpp b/dbms/Storages/System/StorageSystemOne.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemOne.cpp rename to dbms/Storages/System/StorageSystemOne.cpp diff --git a/dbms/src/Storages/System/StorageSystemOne.h b/dbms/Storages/System/StorageSystemOne.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemOne.h rename to dbms/Storages/System/StorageSystemOne.h diff --git a/dbms/src/Storages/System/StorageSystemParts.cpp b/dbms/Storages/System/StorageSystemParts.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemParts.cpp rename to dbms/Storages/System/StorageSystemParts.cpp diff --git a/dbms/src/Storages/System/StorageSystemParts.h b/dbms/Storages/System/StorageSystemParts.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemParts.h rename to dbms/Storages/System/StorageSystemParts.h diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/dbms/Storages/System/StorageSystemPartsBase.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsBase.cpp rename to dbms/Storages/System/StorageSystemPartsBase.cpp diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.h b/dbms/Storages/System/StorageSystemPartsBase.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsBase.h rename to dbms/Storages/System/StorageSystemPartsBase.h diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp b/dbms/Storages/System/StorageSystemPartsColumns.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsColumns.cpp rename to dbms/Storages/System/StorageSystemPartsColumns.cpp diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.h b/dbms/Storages/System/StorageSystemPartsColumns.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemPartsColumns.h rename to dbms/Storages/System/StorageSystemPartsColumns.h diff --git a/dbms/src/Storages/System/StorageSystemProcesses.cpp b/dbms/Storages/System/StorageSystemProcesses.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemProcesses.cpp rename to dbms/Storages/System/StorageSystemProcesses.cpp diff --git a/dbms/src/Storages/System/StorageSystemProcesses.h b/dbms/Storages/System/StorageSystemProcesses.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemProcesses.h rename to dbms/Storages/System/StorageSystemProcesses.h diff --git a/dbms/src/Storages/System/StorageSystemQuotaUsage.cpp b/dbms/Storages/System/StorageSystemQuotaUsage.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotaUsage.cpp rename to dbms/Storages/System/StorageSystemQuotaUsage.cpp diff --git a/dbms/src/Storages/System/StorageSystemQuotaUsage.h b/dbms/Storages/System/StorageSystemQuotaUsage.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotaUsage.h rename to dbms/Storages/System/StorageSystemQuotaUsage.h diff --git a/dbms/src/Storages/System/StorageSystemQuotas.cpp b/dbms/Storages/System/StorageSystemQuotas.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotas.cpp rename to dbms/Storages/System/StorageSystemQuotas.cpp diff --git a/dbms/src/Storages/System/StorageSystemQuotas.h b/dbms/Storages/System/StorageSystemQuotas.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemQuotas.h rename to dbms/Storages/System/StorageSystemQuotas.h diff --git a/dbms/src/Storages/System/StorageSystemReplicas.cpp b/dbms/Storages/System/StorageSystemReplicas.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicas.cpp rename to dbms/Storages/System/StorageSystemReplicas.cpp diff --git a/dbms/src/Storages/System/StorageSystemReplicas.h b/dbms/Storages/System/StorageSystemReplicas.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicas.h rename to dbms/Storages/System/StorageSystemReplicas.h diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp b/dbms/Storages/System/StorageSystemReplicationQueue.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicationQueue.cpp rename to dbms/Storages/System/StorageSystemReplicationQueue.cpp diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.h b/dbms/Storages/System/StorageSystemReplicationQueue.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemReplicationQueue.h rename to dbms/Storages/System/StorageSystemReplicationQueue.h diff --git a/dbms/src/Storages/System/StorageSystemRowPolicies.cpp b/dbms/Storages/System/StorageSystemRowPolicies.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemRowPolicies.cpp rename to dbms/Storages/System/StorageSystemRowPolicies.cpp diff --git a/dbms/src/Storages/System/StorageSystemRowPolicies.h b/dbms/Storages/System/StorageSystemRowPolicies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemRowPolicies.h rename to dbms/Storages/System/StorageSystemRowPolicies.h diff --git a/dbms/src/Storages/System/StorageSystemSettings.cpp b/dbms/Storages/System/StorageSystemSettings.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemSettings.cpp rename to dbms/Storages/System/StorageSystemSettings.cpp diff --git a/dbms/src/Storages/System/StorageSystemSettings.h b/dbms/Storages/System/StorageSystemSettings.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemSettings.h rename to dbms/Storages/System/StorageSystemSettings.h diff --git a/dbms/src/Storages/System/StorageSystemStackTrace.cpp b/dbms/Storages/System/StorageSystemStackTrace.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemStackTrace.cpp rename to dbms/Storages/System/StorageSystemStackTrace.cpp diff --git a/dbms/src/Storages/System/StorageSystemStackTrace.h b/dbms/Storages/System/StorageSystemStackTrace.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemStackTrace.h rename to dbms/Storages/System/StorageSystemStackTrace.h diff --git a/dbms/src/Storages/System/StorageSystemStoragePolicies.cpp b/dbms/Storages/System/StorageSystemStoragePolicies.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemStoragePolicies.cpp rename to dbms/Storages/System/StorageSystemStoragePolicies.cpp diff --git a/dbms/src/Storages/System/StorageSystemStoragePolicies.h b/dbms/Storages/System/StorageSystemStoragePolicies.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemStoragePolicies.h rename to dbms/Storages/System/StorageSystemStoragePolicies.h diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.cpp b/dbms/Storages/System/StorageSystemTableEngines.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableEngines.cpp rename to dbms/Storages/System/StorageSystemTableEngines.cpp diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.h b/dbms/Storages/System/StorageSystemTableEngines.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableEngines.h rename to dbms/Storages/System/StorageSystemTableEngines.h diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp b/dbms/Storages/System/StorageSystemTableFunctions.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableFunctions.cpp rename to dbms/Storages/System/StorageSystemTableFunctions.cpp diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.h b/dbms/Storages/System/StorageSystemTableFunctions.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTableFunctions.h rename to dbms/Storages/System/StorageSystemTableFunctions.h diff --git a/dbms/src/Storages/System/StorageSystemTables.cpp b/dbms/Storages/System/StorageSystemTables.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemTables.cpp rename to dbms/Storages/System/StorageSystemTables.cpp diff --git a/dbms/src/Storages/System/StorageSystemTables.h b/dbms/Storages/System/StorageSystemTables.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemTables.h rename to dbms/Storages/System/StorageSystemTables.h diff --git a/dbms/src/Storages/System/StorageSystemZeros.cpp b/dbms/Storages/System/StorageSystemZeros.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemZeros.cpp rename to dbms/Storages/System/StorageSystemZeros.cpp diff --git a/dbms/src/Storages/System/StorageSystemZeros.h b/dbms/Storages/System/StorageSystemZeros.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemZeros.h rename to dbms/Storages/System/StorageSystemZeros.h diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.cpp b/dbms/Storages/System/StorageSystemZooKeeper.cpp similarity index 100% rename from dbms/src/Storages/System/StorageSystemZooKeeper.cpp rename to dbms/Storages/System/StorageSystemZooKeeper.cpp diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.h b/dbms/Storages/System/StorageSystemZooKeeper.h similarity index 100% rename from dbms/src/Storages/System/StorageSystemZooKeeper.h rename to dbms/Storages/System/StorageSystemZooKeeper.h diff --git a/dbms/src/Storages/System/attachSystemTables.cpp b/dbms/Storages/System/attachSystemTables.cpp similarity index 100% rename from dbms/src/Storages/System/attachSystemTables.cpp rename to dbms/Storages/System/attachSystemTables.cpp diff --git a/dbms/src/Storages/System/attachSystemTables.h b/dbms/Storages/System/attachSystemTables.h similarity index 100% rename from dbms/src/Storages/System/attachSystemTables.h rename to dbms/Storages/System/attachSystemTables.h diff --git a/dbms/src/Storages/TableStructureLockHolder.h b/dbms/Storages/TableStructureLockHolder.h similarity index 100% rename from dbms/src/Storages/TableStructureLockHolder.h rename to dbms/Storages/TableStructureLockHolder.h diff --git a/dbms/src/Storages/VirtualColumnUtils.cpp b/dbms/Storages/VirtualColumnUtils.cpp similarity index 100% rename from dbms/src/Storages/VirtualColumnUtils.cpp rename to dbms/Storages/VirtualColumnUtils.cpp diff --git a/dbms/src/Storages/VirtualColumnUtils.h b/dbms/Storages/VirtualColumnUtils.h similarity index 100% rename from dbms/src/Storages/VirtualColumnUtils.h rename to dbms/Storages/VirtualColumnUtils.h diff --git a/dbms/src/Storages/getStructureOfRemoteTable.cpp b/dbms/Storages/getStructureOfRemoteTable.cpp similarity index 100% rename from dbms/src/Storages/getStructureOfRemoteTable.cpp rename to dbms/Storages/getStructureOfRemoteTable.cpp diff --git a/dbms/src/Storages/getStructureOfRemoteTable.h b/dbms/Storages/getStructureOfRemoteTable.h similarity index 100% rename from dbms/src/Storages/getStructureOfRemoteTable.h rename to dbms/Storages/getStructureOfRemoteTable.h diff --git a/dbms/src/Storages/registerStorages.cpp b/dbms/Storages/registerStorages.cpp similarity index 100% rename from dbms/src/Storages/registerStorages.cpp rename to dbms/Storages/registerStorages.cpp diff --git a/dbms/src/Storages/registerStorages.h b/dbms/Storages/registerStorages.h similarity index 100% rename from dbms/src/Storages/registerStorages.h rename to dbms/Storages/registerStorages.h diff --git a/dbms/src/Storages/tests/CMakeLists.txt b/dbms/Storages/tests/CMakeLists.txt similarity index 100% rename from dbms/src/Storages/tests/CMakeLists.txt rename to dbms/Storages/tests/CMakeLists.txt diff --git a/dbms/src/Storages/tests/active_parts.py b/dbms/Storages/tests/active_parts.py similarity index 100% rename from dbms/src/Storages/tests/active_parts.py rename to dbms/Storages/tests/active_parts.py diff --git a/dbms/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp b/dbms/Storages/tests/get_abandonable_lock_in_all_partitions.cpp similarity index 100% rename from dbms/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp rename to dbms/Storages/tests/get_abandonable_lock_in_all_partitions.cpp diff --git a/dbms/src/Storages/tests/get_current_inserts_in_replicated.cpp b/dbms/Storages/tests/get_current_inserts_in_replicated.cpp similarity index 100% rename from dbms/src/Storages/tests/get_current_inserts_in_replicated.cpp rename to dbms/Storages/tests/get_current_inserts_in_replicated.cpp diff --git a/dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp b/dbms/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp rename to dbms/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp diff --git a/dbms/src/Storages/tests/gtest_row_source_bits_test.cpp b/dbms/Storages/tests/gtest_row_source_bits_test.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_row_source_bits_test.cpp rename to dbms/Storages/tests/gtest_row_source_bits_test.cpp diff --git a/dbms/src/Storages/tests/gtest_storage_log.cpp b/dbms/Storages/tests/gtest_storage_log.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_storage_log.cpp rename to dbms/Storages/tests/gtest_storage_log.cpp diff --git a/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/dbms/Storages/tests/gtest_transform_query_for_external_database.cpp similarity index 100% rename from dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp rename to dbms/Storages/tests/gtest_transform_query_for_external_database.cpp diff --git a/dbms/src/Storages/tests/merge_selector.cpp b/dbms/Storages/tests/merge_selector.cpp similarity index 100% rename from dbms/src/Storages/tests/merge_selector.cpp rename to dbms/Storages/tests/merge_selector.cpp diff --git a/dbms/src/Storages/tests/merge_selector2.cpp b/dbms/Storages/tests/merge_selector2.cpp similarity index 100% rename from dbms/src/Storages/tests/merge_selector2.cpp rename to dbms/Storages/tests/merge_selector2.cpp diff --git a/dbms/src/Storages/tests/part_name.cpp b/dbms/Storages/tests/part_name.cpp similarity index 100% rename from dbms/src/Storages/tests/part_name.cpp rename to dbms/Storages/tests/part_name.cpp diff --git a/dbms/src/Storages/tests/remove_symlink_directory.cpp b/dbms/Storages/tests/remove_symlink_directory.cpp similarity index 100% rename from dbms/src/Storages/tests/remove_symlink_directory.cpp rename to dbms/Storages/tests/remove_symlink_directory.cpp diff --git a/dbms/src/Storages/tests/storage_log.cpp b/dbms/Storages/tests/storage_log.cpp similarity index 100% rename from dbms/src/Storages/tests/storage_log.cpp rename to dbms/Storages/tests/storage_log.cpp diff --git a/dbms/src/Storages/tests/system_numbers.cpp b/dbms/Storages/tests/system_numbers.cpp similarity index 100% rename from dbms/src/Storages/tests/system_numbers.cpp rename to dbms/Storages/tests/system_numbers.cpp diff --git a/dbms/src/Storages/tests/test_alter_distributed.sql b/dbms/Storages/tests/test_alter_distributed.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_distributed.sql rename to dbms/Storages/tests/test_alter_distributed.sql diff --git a/dbms/src/Storages/tests/test_alter_merge.sql b/dbms/Storages/tests/test_alter_merge.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_merge.sql rename to dbms/Storages/tests/test_alter_merge.sql diff --git a/dbms/src/Storages/tests/test_alter_merge_tree.sql b/dbms/Storages/tests/test_alter_merge_tree.sql similarity index 100% rename from dbms/src/Storages/tests/test_alter_merge_tree.sql rename to dbms/Storages/tests/test_alter_merge_tree.sql diff --git a/dbms/src/Storages/tests/transform_part_zk_nodes.cpp b/dbms/Storages/tests/transform_part_zk_nodes.cpp similarity index 100% rename from dbms/src/Storages/tests/transform_part_zk_nodes.cpp rename to dbms/Storages/tests/transform_part_zk_nodes.cpp diff --git a/dbms/src/Storages/transformQueryForExternalDatabase.cpp b/dbms/Storages/transformQueryForExternalDatabase.cpp similarity index 100% rename from dbms/src/Storages/transformQueryForExternalDatabase.cpp rename to dbms/Storages/transformQueryForExternalDatabase.cpp diff --git a/dbms/src/Storages/transformQueryForExternalDatabase.h b/dbms/Storages/transformQueryForExternalDatabase.h similarity index 100% rename from dbms/src/Storages/transformQueryForExternalDatabase.h rename to dbms/Storages/transformQueryForExternalDatabase.h diff --git a/dbms/src/TableFunctions/CMakeLists.txt b/dbms/TableFunctions/CMakeLists.txt similarity index 100% rename from dbms/src/TableFunctions/CMakeLists.txt rename to dbms/TableFunctions/CMakeLists.txt diff --git a/dbms/src/TableFunctions/ITableFunction.cpp b/dbms/TableFunctions/ITableFunction.cpp similarity index 100% rename from dbms/src/TableFunctions/ITableFunction.cpp rename to dbms/TableFunctions/ITableFunction.cpp diff --git a/dbms/src/TableFunctions/ITableFunction.h b/dbms/TableFunctions/ITableFunction.h similarity index 100% rename from dbms/src/TableFunctions/ITableFunction.h rename to dbms/TableFunctions/ITableFunction.h diff --git a/dbms/src/TableFunctions/ITableFunctionFileLike.cpp b/dbms/TableFunctions/ITableFunctionFileLike.cpp similarity index 100% rename from dbms/src/TableFunctions/ITableFunctionFileLike.cpp rename to dbms/TableFunctions/ITableFunctionFileLike.cpp diff --git a/dbms/src/TableFunctions/ITableFunctionFileLike.h b/dbms/TableFunctions/ITableFunctionFileLike.h similarity index 100% rename from dbms/src/TableFunctions/ITableFunctionFileLike.h rename to dbms/TableFunctions/ITableFunctionFileLike.h diff --git a/dbms/src/TableFunctions/ITableFunctionXDBC.cpp b/dbms/TableFunctions/ITableFunctionXDBC.cpp similarity index 100% rename from dbms/src/TableFunctions/ITableFunctionXDBC.cpp rename to dbms/TableFunctions/ITableFunctionXDBC.cpp diff --git a/dbms/src/TableFunctions/ITableFunctionXDBC.h b/dbms/TableFunctions/ITableFunctionXDBC.h similarity index 100% rename from dbms/src/TableFunctions/ITableFunctionXDBC.h rename to dbms/TableFunctions/ITableFunctionXDBC.h diff --git a/dbms/src/TableFunctions/TableFunctionFactory.cpp b/dbms/TableFunctions/TableFunctionFactory.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFactory.cpp rename to dbms/TableFunctions/TableFunctionFactory.cpp diff --git a/dbms/src/TableFunctions/TableFunctionFactory.h b/dbms/TableFunctions/TableFunctionFactory.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFactory.h rename to dbms/TableFunctions/TableFunctionFactory.h diff --git a/dbms/src/TableFunctions/TableFunctionFile.cpp b/dbms/TableFunctions/TableFunctionFile.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFile.cpp rename to dbms/TableFunctions/TableFunctionFile.cpp diff --git a/dbms/src/TableFunctions/TableFunctionFile.h b/dbms/TableFunctions/TableFunctionFile.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionFile.h rename to dbms/TableFunctions/TableFunctionFile.h diff --git a/dbms/src/TableFunctions/TableFunctionGenerateRandom.cpp b/dbms/TableFunctions/TableFunctionGenerateRandom.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionGenerateRandom.cpp rename to dbms/TableFunctions/TableFunctionGenerateRandom.cpp diff --git a/dbms/src/TableFunctions/TableFunctionGenerateRandom.h b/dbms/TableFunctions/TableFunctionGenerateRandom.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionGenerateRandom.h rename to dbms/TableFunctions/TableFunctionGenerateRandom.h diff --git a/dbms/src/TableFunctions/TableFunctionHDFS.cpp b/dbms/TableFunctions/TableFunctionHDFS.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionHDFS.cpp rename to dbms/TableFunctions/TableFunctionHDFS.cpp diff --git a/dbms/src/TableFunctions/TableFunctionHDFS.h b/dbms/TableFunctions/TableFunctionHDFS.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionHDFS.h rename to dbms/TableFunctions/TableFunctionHDFS.h diff --git a/dbms/src/TableFunctions/TableFunctionInput.cpp b/dbms/TableFunctions/TableFunctionInput.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionInput.cpp rename to dbms/TableFunctions/TableFunctionInput.cpp diff --git a/dbms/src/TableFunctions/TableFunctionInput.h b/dbms/TableFunctions/TableFunctionInput.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionInput.h rename to dbms/TableFunctions/TableFunctionInput.h diff --git a/dbms/src/TableFunctions/TableFunctionMerge.cpp b/dbms/TableFunctions/TableFunctionMerge.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionMerge.cpp rename to dbms/TableFunctions/TableFunctionMerge.cpp diff --git a/dbms/src/TableFunctions/TableFunctionMerge.h b/dbms/TableFunctions/TableFunctionMerge.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionMerge.h rename to dbms/TableFunctions/TableFunctionMerge.h diff --git a/dbms/src/TableFunctions/TableFunctionMySQL.cpp b/dbms/TableFunctions/TableFunctionMySQL.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionMySQL.cpp rename to dbms/TableFunctions/TableFunctionMySQL.cpp diff --git a/dbms/src/TableFunctions/TableFunctionMySQL.h b/dbms/TableFunctions/TableFunctionMySQL.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionMySQL.h rename to dbms/TableFunctions/TableFunctionMySQL.h diff --git a/dbms/src/TableFunctions/TableFunctionNumbers.cpp b/dbms/TableFunctions/TableFunctionNumbers.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionNumbers.cpp rename to dbms/TableFunctions/TableFunctionNumbers.cpp diff --git a/dbms/src/TableFunctions/TableFunctionNumbers.h b/dbms/TableFunctions/TableFunctionNumbers.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionNumbers.h rename to dbms/TableFunctions/TableFunctionNumbers.h diff --git a/dbms/src/TableFunctions/TableFunctionRemote.cpp b/dbms/TableFunctions/TableFunctionRemote.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionRemote.cpp rename to dbms/TableFunctions/TableFunctionRemote.cpp diff --git a/dbms/src/TableFunctions/TableFunctionRemote.h b/dbms/TableFunctions/TableFunctionRemote.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionRemote.h rename to dbms/TableFunctions/TableFunctionRemote.h diff --git a/dbms/src/TableFunctions/TableFunctionS3.cpp b/dbms/TableFunctions/TableFunctionS3.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionS3.cpp rename to dbms/TableFunctions/TableFunctionS3.cpp diff --git a/dbms/src/TableFunctions/TableFunctionS3.h b/dbms/TableFunctions/TableFunctionS3.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionS3.h rename to dbms/TableFunctions/TableFunctionS3.h diff --git a/dbms/src/TableFunctions/TableFunctionURL.cpp b/dbms/TableFunctions/TableFunctionURL.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionURL.cpp rename to dbms/TableFunctions/TableFunctionURL.cpp diff --git a/dbms/src/TableFunctions/TableFunctionURL.h b/dbms/TableFunctions/TableFunctionURL.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionURL.h rename to dbms/TableFunctions/TableFunctionURL.h diff --git a/dbms/src/TableFunctions/TableFunctionValues.cpp b/dbms/TableFunctions/TableFunctionValues.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionValues.cpp rename to dbms/TableFunctions/TableFunctionValues.cpp diff --git a/dbms/src/TableFunctions/TableFunctionValues.h b/dbms/TableFunctions/TableFunctionValues.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionValues.h rename to dbms/TableFunctions/TableFunctionValues.h diff --git a/dbms/src/TableFunctions/TableFunctionZeros.cpp b/dbms/TableFunctions/TableFunctionZeros.cpp similarity index 100% rename from dbms/src/TableFunctions/TableFunctionZeros.cpp rename to dbms/TableFunctions/TableFunctionZeros.cpp diff --git a/dbms/src/TableFunctions/TableFunctionZeros.h b/dbms/TableFunctions/TableFunctionZeros.h similarity index 100% rename from dbms/src/TableFunctions/TableFunctionZeros.h rename to dbms/TableFunctions/TableFunctionZeros.h diff --git a/dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp b/dbms/TableFunctions/parseColumnsListForTableFunction.cpp similarity index 100% rename from dbms/src/TableFunctions/parseColumnsListForTableFunction.cpp rename to dbms/TableFunctions/parseColumnsListForTableFunction.cpp diff --git a/dbms/src/TableFunctions/parseColumnsListForTableFunction.h b/dbms/TableFunctions/parseColumnsListForTableFunction.h similarity index 100% rename from dbms/src/TableFunctions/parseColumnsListForTableFunction.h rename to dbms/TableFunctions/parseColumnsListForTableFunction.h diff --git a/dbms/src/TableFunctions/registerTableFunctions.cpp b/dbms/TableFunctions/registerTableFunctions.cpp similarity index 100% rename from dbms/src/TableFunctions/registerTableFunctions.cpp rename to dbms/TableFunctions/registerTableFunctions.cpp diff --git a/dbms/src/TableFunctions/registerTableFunctions.h b/dbms/TableFunctions/registerTableFunctions.h similarity index 100% rename from dbms/src/TableFunctions/registerTableFunctions.h rename to dbms/TableFunctions/registerTableFunctions.h diff --git a/dbms/src/CMakeLists.txt b/dbms/src/CMakeLists.txt deleted file mode 100644 index b54266f4693..00000000000 --- a/dbms/src/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -add_subdirectory (Access) -add_subdirectory (Columns) -add_subdirectory (Common) -add_subdirectory (Core) -add_subdirectory (DataStreams) -add_subdirectory (DataTypes) -add_subdirectory (Dictionaries) -add_subdirectory (Disks) -add_subdirectory (Storages) -add_subdirectory (Parsers) -add_subdirectory (IO) -add_subdirectory (Functions) -add_subdirectory (Interpreters) -add_subdirectory (AggregateFunctions) -add_subdirectory (Client) -add_subdirectory (TableFunctions) -add_subdirectory (Processors) -add_subdirectory (Formats) -add_subdirectory (Compression) diff --git a/docker/builder/README.md b/docker/builder/README.md index 7fd8fe42335..5ae9a95a646 100644 --- a/docker/builder/README.md +++ b/docker/builder/README.md @@ -13,21 +13,21 @@ Run build: make build ``` -Before run, ensure that your user has access to docker: -To check, that you have access to Docker, run `docker ps`. -If not, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and relogin. +Before run, ensure that your user has access to docker: +To check, that you have access to Docker, run `docker ps`. +If not, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and relogin. (You must close all your sessions. For example, restart your computer.) -Build results are available in `build_docker` directory at top level of your working copy. +Build results are available in `build_docker` directory at top level of your working copy. It builds only binaries, not packages. For example, run server: ``` -cd $(git rev-parse --show-toplevel)/dbms/src/Server -$(git rev-parse --show-toplevel)/docker/builder/dbms/programs/clickhouse server --config-file $(git rev-parse --show-toplevel)/dbms/programs/server/config.xml +cd $(git rev-parse --show-toplevel)/dbms/Server +$(git rev-parse --show-toplevel)/docker/builder/programs/clickhouse server --config-file $(git rev-parse --show-toplevel)/programs/server/config.xml ``` Run client: ``` -$(git rev-parse --show-toplevel)/docker/builder/dbms/programs/clickhouse client +$(git rev-parse --show-toplevel)/docker/builder/programs/clickhouse client ``` diff --git a/docker/images.json b/docker/images.json index d21365fd49d..c7dfc82d906 100644 --- a/docker/images.json +++ b/docker/images.json @@ -14,5 +14,5 @@ "docker/test/unit": "yandex/clickhouse-unit-test", "docker/test/stress": "yandex/clickhouse-stress-test", "docker/test/split_build_smoke_test": "yandex/clickhouse-split-build-smoke-test", - "dbms/tests/integration/image": "yandex/clickhouse-integration-tests-runner" + "tests/integration/image": "yandex/clickhouse-integration-tests-runner" } diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 94615a5a39d..e254fde8c52 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -16,7 +16,7 @@ rm -f CMakeCache.txt cmake .. -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS ninja ccache --show-stats ||: -mv ./dbms/programs/clickhouse* /output +mv ./programs/clickhouse* /output mv ./dbms/unit_tests_dbms /output find . -name '*.so' -print -exec mv '{}' /output \; find . -name '*.so.*' -print -exec mv '{}' /output \; @@ -24,7 +24,7 @@ find . -name '*.so.*' -print -exec mv '{}' /output \; # Different files for performance test. if [ "performance" == "$COMBINED_OUTPUT" ] then - cp -r ../dbms/tests/performance /output + cp -r ../tests/performance /output cp -r ../docker/test/performance-comparison/config /output ||: rm /output/unit_tests_dbms ||: rm /output/clickhouse-odbc-bridge ||: @@ -36,9 +36,9 @@ fi if [ "" != "$COMBINED_OUTPUT" ] then mkdir -p /output/config - cp ../dbms/programs/server/config.xml /output/config - cp ../dbms/programs/server/users.xml /output/config - cp -r ../dbms/programs/server/config.d /output/config + cp ../programs/server/config.xml /output/config + cp ../programs/server/users.xml /output/config + cp -r ../programs/server/config.d /output/config tar -czvf "$COMBINED_OUTPUT.tgz" /output rm -r /output/* mv "$COMBINED_OUTPUT.tgz" /output diff --git a/docker/packager/packager b/docker/packager/packager index 3c10788e662..506ac1bc19b 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -101,7 +101,7 @@ def run_vagrant_box_with_env(image_path, output_dir, ch_root): logging.info("Running build") vagrant.execute_cmd("cd ~/ClickHouse && cmake . && ninja") logging.info("Copying binary back") - vagrant.copy_from_image("~/ClickHouse/dbms/programs/clickhouse", output_dir) + vagrant.copy_from_image("~/ClickHouse/programs/clickhouse", output_dir) def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage): CLANG_PREFIX = "clang" diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index 4176a1b1d7d..1aba59e982a 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -82,7 +82,7 @@ if [ "$REF_PR" == "" ]; then echo Reference PR is not specified ; exit 1 ; fi ) | tee right-commit.txt # Prepare the list of changed tests for use by compare.sh -git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"~ master)" -- dbms/tests/performance | tee changed-tests.txt +git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"~ master)" -- tests/performance | tee changed-tests.txt # Set python output encoding so that we can print queries with Russian letters. export PYTHONIOENCODING=utf-8 diff --git a/docker/test/performance-comparison/performance_comparison.md b/docker/test/performance-comparison/performance_comparison.md index 7c5172bf110..7407702b475 100644 --- a/docker/test/performance-comparison/performance_comparison.md +++ b/docker/test/performance-comparison/performance_comparison.md @@ -50,7 +50,7 @@ More stages are available, e.g. restart servers or run the tests. See the code. #### Run a single test on the already configured servers ``` -docker/test/performance-comparison/perf.py --host=localhost --port=9000 --runs=1 dbms/tests/performance/logical_functions_small.xml +docker/test/performance-comparison/perf.py --host=localhost --port=9000 --runs=1 tests/performance/logical_functions_small.xml ``` ### References diff --git a/docker/test/test_runner.sh b/docker/test/test_runner.sh index 6e6d4537603..76e142e61f9 100755 --- a/docker/test/test_runner.sh +++ b/docker/test/test_runner.sh @@ -42,8 +42,8 @@ if [ ${CLICKHOUSE_PACKAGES_ARG} != ${NO_REBUILD_FLAG} ]; then fi # # Create a bind-volume to the clickhouse-test script file -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/dbms/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/dbms/tests/queries --opt o=bind clickhouse-test-queries-dir-volume +# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume +# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/queries --opt o=bind clickhouse-test-queries-dir-volume # Build server image (optional) from local packages if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then @@ -81,4 +81,4 @@ CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \ docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \ run \ --name test-runner \ - test-runner \ No newline at end of file + test-runner diff --git a/docker/test/test_runner_docker_compose.yaml b/docker/test/test_runner_docker_compose.yaml index ba2e525b3a5..2aef6a48d77 100644 --- a/docker/test/test_runner_docker_compose.yaml +++ b/docker/test/test_runner_docker_compose.yaml @@ -27,8 +27,8 @@ services: # NOTE: Dev-mode: mount newest versions of the queries and clickhouse-test script into container. # volumes: - # - /home/enmk/proj/ClickHouse_master/dbms/tests/queries:/usr/share/clickhouse-test/queries:ro - # - /home/enmk/proj/ClickHouse_master/dbms/tests/clickhouse-test:/usr/bin/clickhouse-test:ro + # - /home/enmk/proj/ClickHouse_master/tests/queries:/usr/share/clickhouse-test/queries:ro + # - /home/enmk/proj/ClickHouse_master/tests/clickhouse-test:/usr/bin/clickhouse-test:ro # String-form instead of list-form to allow multiple arguments in "${CLICKHOUSE_TEST_ARGS}" entrypoint: "clickhouse-test ${CLICKHOUSE_TEST_ARGS}" diff --git a/docs/en/development/browse_code.md b/docs/en/development/browse_code.md index 2a109c9323c..d6994c293ac 100644 --- a/docs/en/development/browse_code.md +++ b/docs/en/development/browse_code.md @@ -1,6 +1,6 @@ # Browse ClickHouse Source Code {#browse-clickhouse-source-code} -You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. +You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 57873226f7c..65558060925 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -60,7 +60,7 @@ $ cd .. ``` To create an executable, run `ninja clickhouse`. -This will create the `dbms/programs/clickhouse` executable, which can be used with `client` or `server` arguments. +This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments. # How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux} diff --git a/docs/en/development/developer_instruction.md b/docs/en/development/developer_instruction.md index c9cfd10a070..66294f1fbd4 100644 --- a/docs/en/development/developer_instruction.md +++ b/docs/en/development/developer_instruction.md @@ -193,19 +193,19 @@ Upon the successful start of the building process, you’ll see the build progre While building messages about protobuf files in libhdfs2 library like `libprotobuf WARNING` may show up. They affect nothing and are safe to be ignored. -Upon successful build you get an executable file `ClickHouse//dbms/programs/clickhouse`: +Upon successful build you get an executable file `ClickHouse//programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # Running the built executable of ClickHouse {#running-the-built-executable-of-clickhouse} -To run the server under the current user you need to navigate to `ClickHouse/dbms/programs/server/` (located outside of `build`) and run: +To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server In this case, ClickHouse will use config files located in the current directory. You can run `clickhouse server` from any directory specifying the path to a config file as a command-line parameter `--config-file`. -To connect to ClickHouse with clickhouse-client in another terminal navigate to `ClickHouse/build/dbms/programs/` and run `clickhouse client`. +To connect to ClickHouse with clickhouse-client in another terminal navigate to `ClickHouse/build/programs/` and run `clickhouse client`. If you get `Connection refused` message on Mac OS X or FreeBSD, try specifying host address 127.0.0.1: @@ -214,7 +214,7 @@ If you get `Connection refused` message on Mac OS X or FreeBSD, try specifying h You can replace the production version of ClickHouse binary installed in your system with your custom-built ClickHouse binary. To do that install ClickHouse on your machine following the instructions from the official website. Next, run the following: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start Note that `clickhouse-client`, `clickhouse-server` and others are symlinks to the commonly shared `clickhouse` binary. @@ -222,7 +222,7 @@ Note that `clickhouse-client`, `clickhouse-server` and others are symlinks to th You can also run your custom-built ClickHouse binary with the config file from the ClickHouse package installed on your system: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # IDE (Integrated Development Environment) {#ide-integrated-development-environment} @@ -242,7 +242,7 @@ The Code Style Guide: https://clickhouse.tech/docs/en/development/style/ Writing tests: https://clickhouse.tech/docs/en/development/tests/ -List of tasks: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_en.md +List of tasks: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md # Test Data {#test-data} diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index e061f1c3144..ac0051fed70 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -6,15 +6,15 @@ Functional tests are the most simple and convenient to use. Most of ClickHouse f Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. -Tests are located in `dbms/tests/queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. +Tests are located in `testsies` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. -To run all tests, use `dbms/tests/clickhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. +To run all tests, use `testskhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. -To add new test, create a `.sql` or `.sh` file in `dbms/tests/queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `testsies/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. @@ -29,13 +29,13 @@ disable these groups of tests using `--no-zookeeper`, `--no-shard` and ## Known bugs {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `dbms/tests/queries/bugs` directory. These tests will be moved to `dbms/tests/queries/0_stateless` when bugs are fixed. +If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `testsies/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. ## Integration Tests {#integration-tests} Integration tests allow to test ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. -See `dbms/tests/integration/README.md` on how to run these tests. +See `testsgration/README.md` on how to run these tests. Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don’t have integration tests with our JDBC and ODBC drivers. @@ -47,7 +47,7 @@ It’s not necessarily to have unit tests if the code is already covered by func ## Performance Tests {#performance-tests} -Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Tests are located at `dbms/tests/performance`. Each test is represented by `.xml` file with description of test case. Tests are run with `clickhouse performance-test` tool (that is embedded in `clickhouse` binary). See `--help` for invocation. +Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Tests are located at `tests/performance`. Each test is represented by `.xml` file with description of test case. Tests are run with `clickhouse performance-test` tool (that is embedded in `clickhouse` binary). See `--help` for invocation. Each test run one or miltiple queries (possibly with combinations of parameters) in a loop with some conditions for stop (like “maximum execution speed is not changing in three seconds”) and measure some metrics about query performance (like “maximum execution speed”). Some tests can contain preconditions on preloaded test dataset. @@ -55,13 +55,13 @@ If you want to improve performance of ClickHouse in some scenario, and if improv ## Test Tools And Scripts {#test-tools-and-scripts} -Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `dbms/src/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. +Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `dbms/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated. ## Miscellanous Tests {#miscellanous-tests} -There are tests for external dictionaries located at `dbms/tests/external_dictionaries` and for machine learned models in `dbms/tests/external_models`. These tests are not updated and must be transferred to integration tests. +There are tests for external dictionaries located at `tests/external_dictionaries` and for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests. There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not. @@ -71,9 +71,9 @@ Quorum test was written by separate team before ClickHouse was open-sourced. Thi When you develop a new feature, it is reasonable to also test it manually. You can do it with the following steps: -Build ClickHouse. Run ClickHouse from the terminal: change directory to `dbms/src/programs/clickhouse-server` and run it with `./clickhouse-server`. It will use configuration (`config.xml`, `users.xml` and files within `config.d` and `users.d` directories) from the current directory by default. To connect to ClickHouse server, run `dbms/src/programs/clickhouse-client/clickhouse-client`. +Build ClickHouse. Run ClickHouse from the terminal: change directory to `programs/clickhouse-server` and run it with `./clickhouse-server`. It will use configuration (`config.xml`, `users.xml` and files within `config.d` and `users.d` directories) from the current directory by default. To connect to ClickHouse server, run `programs/clickhouse-client/clickhouse-client`. -Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `dbms/src/programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`. +Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`. Alternatively you can install ClickHouse package: either stable release from Yandex repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo service clickhouse-server start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`. @@ -202,7 +202,7 @@ People from Yandex Cloud department do some basic overview of ClickHouse capabil ## Static Analyzers {#static-analyzers} -We run `PVS-Studio` on per-commit basis. We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. You will find instructions for usage in `dbms/tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/). +We run `PVS-Studio` on per-commit basis. We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/). If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box. @@ -242,3 +242,4 @@ We don’t use Travis CI due to the limit on time and computational power. We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins. [Original article](https://clickhouse.tech/docs/en/development/tests/) +velopment/tests/) diff --git a/docs/en/getting_started/example_datasets/metrica.md b/docs/en/getting_started/example_datasets/metrica.md index e855f4b98a3..d4373948c03 100644 --- a/docs/en/getting_started/example_datasets/metrica.md +++ b/docs/en/getting_started/example_datasets/metrica.md @@ -60,4 +60,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [ClickHouse tutorial](../../getting_started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial. -Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there). +Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there). diff --git a/docs/en/getting_started/install.md b/docs/en/getting_started/install.md index c54051581f6..3af47b81c88 100644 --- a/docs/en/getting_started/install.md +++ b/docs/en/getting_started/install.md @@ -104,10 +104,10 @@ To manually compile ClickHouse, follow the instructions for [Linux](../developme You can compile packages and install them or use programs without installing packages. Also by building manually you can disable SSE 4.2 requirement or build for AArch64 CPUs. - Client: dbms/programs/clickhouse-client - Server: dbms/programs/clickhouse-server + Client: programs/clickhouse-client + Server: programs/clickhouse-server -You’ll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/dbms/programs/server/config.xml), by default they are: +You’ll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/programs/server/config.xml), by default they are: /opt/clickhouse/data/default/ /opt/clickhouse/metadata/default/ diff --git a/docs/en/interfaces/tcp.md b/docs/en/interfaces/tcp.md index 03122beb8c3..876ccf12f8a 100644 --- a/docs/en/interfaces/tcp.md +++ b/docs/en/interfaces/tcp.md @@ -1,5 +1,5 @@ # Native Interface (TCP) {#native-interface-tcp} -The native protocol is used in the [command-line client](cli.md), for inter-server communication during distributed query processing, and also in other C++ programs. Unfortunately, native ClickHouse protocol does not have formal specification yet, but it can be reverse-engineered from ClickHouse source code (starting [around here](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client)) and/or by intercepting and analyzing TCP traffic. +The native protocol is used in the [command-line client](cli.md), for inter-server communication during distributed query processing, and also in other C++ programs. Unfortunately, native ClickHouse protocol does not have formal specification yet, but it can be reverse-engineered from ClickHouse source code (starting [around here](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) and/or by intercepting and analyzing TCP traffic. [Original article](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index d9d9cf3f284..52f7705f568 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -1,6 +1,6 @@ # Data Backup {#data-backup} -While [replication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/dbms/programs/server/config.xml#L322-L330). However, these safeguards don’t cover all possible cases and can be circumvented. +While [replication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). However, these safeguards don’t cover all possible cases and can be circumvented. In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. diff --git a/docs/en/operations/performance/sampling_query_profiler_example_result.txt b/docs/en/operations/performance/sampling_query_profiler_example_result.txt index 8e4e0e0fd70..56c2fdf9c65 100644 --- a/docs/en/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/en/operations/performance/sampling_query_profiler_example_result.txt @@ -2,55 +2,55 @@ Row 1: ────── count(): 6344 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:63 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 DB::MergeTreeReaderStream::seekToMark(unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp:200 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:212 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const /usr/local/include/c++/9.1.0/bits/std_function.h:690 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:487 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -66,9 +66,9 @@ Row 2: ────── count(): 3295 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 __pthread_cond_wait @@ -82,11 +82,11 @@ DB::UnionBlockInputStream::readImpl() DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Core/Block.h:90 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::LimitBlockInputStream::readImpl() @@ -100,7 +100,7 @@ std::_Function_handler::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/ThreadPool.h:146 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 ThreadPoolImpl::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 execute_native_thread_routine @@ -114,47 +114,47 @@ Row 3: ────── count(): 1978 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -170,47 +170,47 @@ Row 4: ────── count(): 1913 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -226,47 +226,47 @@ Row 5: ────── count(): 1672 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -282,53 +282,53 @@ Row 6: ────── count(): 1531 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:53 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -344,47 +344,47 @@ Row 7: ────── count(): 1034 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -400,9 +400,9 @@ Row 8: ────── count(): 989 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 __lll_lock_wait @@ -412,7 +412,7 @@ pthread_mutex_lock DB::MergeTreeReaderStream::loadMarks() /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp:107 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) @@ -422,21 +422,21 @@ DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -452,45 +452,45 @@ Row 9: ─────── count(): 779 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -506,45 +506,45 @@ Row 10: ─────── count(): 666 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index 3413bc87346..c7cb0e2d78f 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -23,19 +23,19 @@ With this instruction you can run basic ClickHouse performance test on any serve - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/users.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml 1. Download benchmark files: - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/queries.sql + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql 1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). diff --git a/docs/en/operations/settings/query_complexity.md b/docs/en/operations/settings/query_complexity.md index 6f3819d48e9..f62fa3b0008 100644 --- a/docs/en/operations/settings/query_complexity.md +++ b/docs/en/operations/settings/query_complexity.md @@ -37,7 +37,7 @@ Memory consumption is also restricted by the parameters `max_memory_usage_for_us The maximum amount of RAM to use for running a user’s queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L288). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). @@ -45,7 +45,7 @@ See also the description of [max\_memory\_usage](#settings_max_memory_usage). The maximum amount of RAM to use for running all queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L289). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 1ef90001ae1..4bd58052196 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -252,7 +252,7 @@ Columns: - `value` ([Int64](../data_types/int_uint.md)) — Metric value. - `description` ([String](../data_types/string.md)) — Metric description. -The list of supported metrics you can find in the [dbms/src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/CurrentMetrics.cpp) source file of ClickHouse. +The list of supported metrics you can find in the [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) source file of ClickHouse. **Example** diff --git a/docs/en/query_language/alter.md b/docs/en/query_language/alter.md index 8e9915194c9..625b460c0ef 100644 --- a/docs/en/query_language/alter.md +++ b/docs/en/query_language/alter.md @@ -442,7 +442,7 @@ All the rules above are also true for the [OPTIMIZE](misc.md#misc_operations-opt OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). +The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). ### Manipulations with Table TTL {#manipulations-with-table-ttl} diff --git a/docs/en/query_language/create.md b/docs/en/query_language/create.md index e0ee7b71d9d..066eb396b19 100644 --- a/docs/en/query_language/create.md +++ b/docs/en/query_language/create.md @@ -147,7 +147,7 @@ ENGINE = If a codec is specified, the default codec doesn’t apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. !!! warning "Warning" - You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor) utility. + You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. Compression is supported for the following table engines: diff --git a/docs/en/query_language/functions/introspection.md b/docs/en/query_language/functions/introspection.md index 389a0555315..5193d3a85a7 100644 --- a/docs/en/query_language/functions/introspection.md +++ b/docs/en/query_language/functions/introspection.md @@ -35,7 +35,7 @@ addressToLine(address_of_binary_instruction) - Source code filename and the line number in this file delimited by colon. - For example, `/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. - Name of a binary, if the function couldn’t find the debug information. @@ -80,7 +80,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 ``` Applying the function to the whole stack trace: @@ -100,8 +100,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/en/query_language/operators.md b/docs/en/query_language/operators.md index 5630ce31e34..700a5c87c9a 100644 --- a/docs/en/query_language/operators.md +++ b/docs/en/query_language/operators.md @@ -129,7 +129,7 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} diff --git a/docs/es/changelog/index.md b/docs/es/changelog/index.md index 4ee7c076082..26a516494f3 100644 --- a/docs/es/changelog/index.md +++ b/docs/es/changelog/index.md @@ -241,7 +241,7 @@ machine_translated: true - Comprobación actualizada de consultas colgadas en el script de prueba de clickhouse [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alejandro Kazakov](https://github.com/Akazz)) - Se eliminaron algunos archivos inútiles del repositorio. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Tipo cambiado de perftests matemáticos de `once` a `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) - Suprima algunas fallas de prueba bajo MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Aceleración “exception while insert” prueba. Esta prueba a menudo se agota en la compilación de depuración con cobertura. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Actualizar `libcxx` y `libcxxabi` dominar. En preparación para [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/es/development/browse_code.md b/docs/es/development/browse_code.md index d3d4e54f264..05af0a77d99 100644 --- a/docs/es/development/browse_code.md +++ b/docs/es/development/browse_code.md @@ -4,7 +4,7 @@ machine_translated: true # Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code} -Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. +Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. Además, puede navegar por las fuentes en [GitHub](https://github.com/ClickHouse/ClickHouse) como de costumbre. diff --git a/docs/es/development/build.md b/docs/es/development/build.md index 9e4ab7628f2..0f93822d1e9 100644 --- a/docs/es/development/build.md +++ b/docs/es/development/build.md @@ -64,7 +64,7 @@ $ cd .. ``` Para crear un ejecutable, ejecute `ninja clickhouse`. -Esto creará el `dbms/programs/clickhouse` ejecutable, que se puede usar con `client` o `server` argumento. +Esto creará el `programs/clickhouse` ejecutable, que se puede usar con `client` o `server` argumento. # Cómo construir ClickHouse en cualquier Linux {#how-to-build-clickhouse-on-any-linux} diff --git a/docs/es/development/developer_instruction.md b/docs/es/development/developer_instruction.md index 62d22eecb9e..f0c84485c55 100644 --- a/docs/es/development/developer_instruction.md +++ b/docs/es/development/developer_instruction.md @@ -197,19 +197,19 @@ Cuando se inicie correctamente el proceso de construcción, verá el progreso de Al crear mensajes sobre archivos protobuf en la biblioteca libhdfs2, como `libprotobuf WARNING` puede aparecer. Afectan a nada y son seguros para ser ignorado. -Tras la compilación exitosa, obtienes un archivo ejecutable `ClickHouse//dbms/programs/clickhouse`: +Tras la compilación exitosa, obtienes un archivo ejecutable `ClickHouse//programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # Ejecutando el ejecutable construido de ClickHouse {#running-the-built-executable-of-clickhouse} -Para ejecutar el servidor bajo el usuario actual, debe navegar hasta `ClickHouse/dbms/programs/server/` (situado fuera de `build`) y ejecutar: +Para ejecutar el servidor bajo el usuario actual, debe navegar hasta `ClickHouse/programs/server/` (situado fuera de `build`) y ejecutar: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server En este caso, ClickHouse usará archivos de configuración ubicados en el directorio actual. Puede ejecutar `clickhouse server` desde cualquier directorio que especifique la ruta a un archivo de configuración como un parámetro de línea de comandos `--config-file`. -Para conectarse a ClickHouse con clickhouse-client en otro terminal, vaya a `ClickHouse/build/dbms/programs/` y ejecutar `clickhouse client`. +Para conectarse a ClickHouse con clickhouse-client en otro terminal, vaya a `ClickHouse/build/programs/` y ejecutar `clickhouse client`. Si usted consigue `Connection refused` mensaje en Mac OS X o FreeBSD, intente especificar la dirección de host 127.0.0.1: @@ -218,7 +218,7 @@ Si usted consigue `Connection refused` mensaje en Mac OS X o FreeBSD, intente es Puede reemplazar la versión de producción del binario ClickHouse instalado en su sistema con su binario ClickHouse personalizado. Para ello, instale ClickHouse en su máquina siguiendo las instrucciones del sitio web oficial. A continuación, ejecute lo siguiente: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start Tenga en cuenta que `clickhouse-client`, `clickhouse-server` y otros son enlaces simbólicos a los comúnmente compartidos `clickhouse` Binario. @@ -226,7 +226,7 @@ Tenga en cuenta que `clickhouse-client`, `clickhouse-server` y otros son enlaces También puede ejecutar su binario ClickHouse personalizado con el archivo de configuración del paquete ClickHouse instalado en su sistema: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # IDE (entorno de desarrollo integrado) {#ide-integrated-development-environment} @@ -246,7 +246,7 @@ La Guía de estilo de código: https://clickhouse.tech/docs/es/development/style Pruebas de escritura: https://clickhouse.tech/docs/es/development/tests/ -Lista de tareas: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_en.md +Lista de tareas: https://github.com/ClickHouse/ClickHouse/blob/master/tests/instructions/easy\_tasks\_sorted\_en.md # Datos de prueba {#test-data} diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md index 1cfd6d253c0..b7d0c182e6d 100644 --- a/docs/es/development/tests.md +++ b/docs/es/development/tests.md @@ -10,15 +10,15 @@ Las pruebas funcionales son las más simples y cómodas de usar. La mayoría de Cada prueba funcional envía una o varias consultas al servidor ClickHouse en ejecución y compara el resultado con la referencia. -Las pruebas se encuentran en `dbms/tests/queries` Directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. +Las pruebas se encuentran en `tests/queries` Directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. Cada prueba puede ser de dos tipos: `.sql` y `.sh`. `.sql` prueba es el script SQL simple que se canaliza a `clickhouse-client --multiquery --testmode`. `.sh` test es un script que se ejecuta por sí mismo. -Para ejecutar todas las pruebas, use `dbms/tests/clickhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. +Para ejecutar todas las pruebas, use `tests/clickhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. La forma más sencilla de invocar pruebas funcionales es copiar `clickhouse-client` Naciones `/usr/bin/`, ejecutar `clickhouse-server` y luego ejecutar `./clickhouse-test` de su propio directorio. -Atracciones cercanas al hotel `.sql` o `.sh` archivo en `dbms/tests/queries/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. +Atracciones cercanas al hotel `.sql` o `.sh` archivo en `tests/queries/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. Las pruebas deben usar (crear, soltar, etc.) solo tablas en `test` base de datos que se supone que se crea de antemano; también las pruebas pueden usar tablas temporales. @@ -33,13 +33,13 @@ Deshabilitar estos grupos de pruebas utilizando `--no-zookeeper`, `--no-shard` y ## Errores conocidos {#known-bugs} -Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `dbms/tests/queries/bugs` directorio. Estas pruebas se moverán a `dbms/tests/queries/0_stateless` cuando se corrigen errores. +Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `tests/queries/bugs` directorio. Estas pruebas se moverán a `tests/queries/0_stateless` cuando se corrigen errores. ## Pruebas de integración {#integration-tests} Las pruebas de integración permiten probar ClickHouse en la configuración agrupada y la interacción de ClickHouse con otros servidores como MySQL, Postgres, MongoDB. Son útiles para emular divisiones de red, caídas de paquetes, etc. Estas pruebas se ejecutan bajo Docker y crean múltiples contenedores con varios software. -Ver `dbms/tests/integration/README.md` sobre cómo ejecutar estas pruebas. +Ver `tests/integration/README.md` sobre cómo ejecutar estas pruebas. Tenga en cuenta que la integración de ClickHouse con controladores de terceros no se ha probado. Además, actualmente no tenemos pruebas de integración con nuestros controladores JDBC y ODBC. @@ -51,7 +51,7 @@ No es necesariamente tener pruebas unitarias si el código ya está cubierto por ## Pruebas de rendimiento {#performance-tests} -Las pruebas de rendimiento permiten medir y comparar el rendimiento de alguna parte aislada de ClickHouse en consultas sintéticas. Las pruebas se encuentran en `dbms/tests/performance`. Cada prueba está representada por `.xml` archivo con la descripción del caso de prueba. Las pruebas se ejecutan con `clickhouse performance-test` herramienta (que está incrustada en `clickhouse` binario). Ver `--help` para la invocación. +Las pruebas de rendimiento permiten medir y comparar el rendimiento de alguna parte aislada de ClickHouse en consultas sintéticas. Las pruebas se encuentran en `tests/performance`. Cada prueba está representada por `.xml` archivo con la descripción del caso de prueba. Las pruebas se ejecutan con `clickhouse performance-test` herramienta (que está incrustada en `clickhouse` binario). Ver `--help` para la invocación. Cada prueba ejecuta una o múltiples consultas (posiblemente con combinaciones de parámetros) en un bucle con algunas condiciones para stop (como “maximum execution speed is not changing in three seconds”) y medir algunas métricas sobre el rendimiento de las consultas (como “maximum execution speed”). Algunas pruebas pueden contener condiciones previas en el conjunto de datos de pruebas precargado. @@ -59,13 +59,13 @@ Si desea mejorar el rendimiento de ClickHouse en algún escenario, y si se puede ## Herramientas de prueba y secuencias de comandos {#test-tools-and-scripts} -Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, párr `Lexer` hay una herramienta `dbms/src/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. +Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, párr `Lexer` hay una herramienta `dbms/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. También puede colocar un par de archivos `.sh` y `.reference` junto con la herramienta para ejecutarlo en alguna entrada predefinida, entonces el resultado del script se puede comparar con `.reference` file. Este tipo de pruebas no están automatizadas. ## Pruebas misceláneas {#miscellanous-tests} -Hay pruebas para diccionarios externos ubicados en `dbms/tests/external_dictionaries` y para modelos aprendidos a máquina en `dbms/tests/external_models`. Estas pruebas no se actualizan y deben transferirse a pruebas de integración. +Hay pruebas para diccionarios externos ubicados en `tests/external_dictionaries` y para modelos aprendidos a máquina en `tests/external_models`. Estas pruebas no se actualizan y deben transferirse a pruebas de integración. Hay una prueba separada para inserciones de quórum. Esta prueba ejecuta el clúster ClickHouse en servidores separados y emula varios casos de fallas: división de red, caída de paquetes (entre nodos ClickHouse, entre ClickHouse y ZooKeeper, entre el servidor ClickHouse y el cliente, etc.), `kill -9`, `kill -STOP` y `kill -CONT` , como [Jepsen](https://aphyr.com/tags/Jepsen). A continuación, la prueba comprueba que todas las inserciones reconocidas se escribieron y todas las inserciones rechazadas no. @@ -75,9 +75,9 @@ La prueba de quórum fue escrita por un equipo separado antes de que ClickHouse Cuando desarrolla una nueva característica, es razonable probarla también manualmente. Puede hacerlo con los siguientes pasos: -Construir ClickHouse. Ejecute ClickHouse desde el terminal: cambie el directorio a `dbms/src/programs/clickhouse-server` y ejecutarlo con `./clickhouse-server`. Se utilizará la configuración (`config.xml`, `users.xml` y archivos dentro de `config.d` y `users.d` directorios) desde el directorio actual de forma predeterminada. Para conectarse al servidor ClickHouse, ejecute `dbms/src/programs/clickhouse-client/clickhouse-client`. +Construir ClickHouse. Ejecute ClickHouse desde el terminal: cambie el directorio a `programs/clickhouse-server` y ejecutarlo con `./clickhouse-server`. Se utilizará la configuración (`config.xml`, `users.xml` y archivos dentro de `config.d` y `users.d` directorios) desde el directorio actual de forma predeterminada. Para conectarse al servidor ClickHouse, ejecute `programs/clickhouse-client/clickhouse-client`. -Tenga en cuenta que todas las herramientas de clickhouse (servidor, cliente, etc.) son solo enlaces simbólicos a un único binario llamado `clickhouse`. Puede encontrar este binario en `dbms/src/programs/clickhouse`. Todas las herramientas también se pueden invocar como `clickhouse tool` es lugar de `clickhouse-tool`. +Tenga en cuenta que todas las herramientas de clickhouse (servidor, cliente, etc.) son solo enlaces simbólicos a un único binario llamado `clickhouse`. Puede encontrar este binario en `programs/clickhouse`. Todas las herramientas también se pueden invocar como `clickhouse tool` es lugar de `clickhouse-tool`. Alternativamente, puede instalar el paquete ClickHouse: ya sea una versión estable del repositorio de Yandex o puede crear un paquete para usted con `./release` en la raíz de fuentes de ClickHouse. Luego inicie el servidor con `sudo service clickhouse-server start` (o detener para detener el servidor). Busque registros en `/etc/clickhouse-server/clickhouse-server.log`. @@ -206,7 +206,7 @@ La gente del departamento de Yandex Cloud hace una visión general básica de la ## Analizadores estáticos {#static-analyzers} -Corremos `PVS-Studio` por compromiso. Hemos evaluado `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Encontrará instrucciones de uso en `dbms/tests/instructions/` directorio. También puedes leer [el artículo en ruso](https://habr.com/company/yandex/blog/342018/). +Corremos `PVS-Studio` por compromiso. Hemos evaluado `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Encontrará instrucciones de uso en `tests/instructions/` directorio. También puedes leer [el artículo en ruso](https://habr.com/company/yandex/blog/342018/). Si usted estados unidos `CLion` Como IDE, puede aprovechar algunos `clang-tidy` comprueba fuera de la caja. diff --git a/docs/es/getting_started/example_datasets/metrica.md b/docs/es/getting_started/example_datasets/metrica.md index d5d8067333c..4c74ebce034 100644 --- a/docs/es/getting_started/example_datasets/metrica.md +++ b/docs/es/getting_started/example_datasets/metrica.md @@ -64,4 +64,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [Tutorial de ClickHouse](../../getting_started/tutorial.md) se basa en Yandex.El conjunto de datos de Metrica y la forma recomendada de comenzar con este conjunto de datos es simplemente pasar por el tutorial. -Se pueden encontrar ejemplos adicionales de consultas a estas tablas entre [pruebas estatales](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) de ClickHouse (se nombran `test.hists` y `test.visits` Todos los derechos reservados. +Se pueden encontrar ejemplos adicionales de consultas a estas tablas entre [pruebas estatales](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) de ClickHouse (se nombran `test.hists` y `test.visits` Todos los derechos reservados. diff --git a/docs/es/getting_started/install.md b/docs/es/getting_started/install.md index 014afc99ce5..e3997f8d1f5 100644 --- a/docs/es/getting_started/install.md +++ b/docs/es/getting_started/install.md @@ -108,10 +108,10 @@ Para compilar manualmente ClickHouse, siga las instrucciones para [Linux](../dev Puede compilar paquetes e instalarlos o usar programas sin instalar paquetes. Además, al construir manualmente, puede deshabilitar el requisito de SSE 4.2 o compilar para CPU AArch64. - Client: dbms/programs/clickhouse-client - Server: dbms/programs/clickhouse-server + Client: programs/clickhouse-client + Server: programs/clickhouse-server -Tendrá que crear carpetas de datos y metadatos y `chown` para el usuario deseado. Sus rutas se pueden cambiar en la configuración del servidor (src/dbms/programs/server/config .xml), por defecto son: +Tendrá que crear carpetas de datos y metadatos y `chown` para el usuario deseado. Sus rutas se pueden cambiar en la configuración del servidor (src/programs/server/config .xml), por defecto son: /opt/clickhouse/data/default/ /opt/clickhouse/metadata/default/ diff --git a/docs/es/interfaces/tcp.md b/docs/es/interfaces/tcp.md index 660f506ee04..868ec644104 100644 --- a/docs/es/interfaces/tcp.md +++ b/docs/es/interfaces/tcp.md @@ -4,6 +4,6 @@ machine_translated: true # Interfaz nativa (TCP) {#native-interface-tcp} -El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. +El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. [Artículo Original](https://clickhouse.tech/docs/es/interfaces/tcp/) diff --git a/docs/es/operations/backup.md b/docs/es/operations/backup.md index 5f1ffc79648..17a57d40487 100644 --- a/docs/es/operations/backup.md +++ b/docs/es/operations/backup.md @@ -4,7 +4,7 @@ machine_translated: true # Copia de seguridad de datos {#data-backup} -Mientras [replicación](table_engines/replication.md) proporciona protección contra fallas de hardware, no protege contra errores humanos: eliminación accidental de datos, eliminación de la tabla incorrecta o una tabla en el clúster incorrecto y errores de software que resultan en un procesamiento incorrecto de datos o daños en los datos. En muchos casos, errores como estos afectarán a todas las réplicas. ClickHouse tiene protecciones integradas para evitar algunos tipos de errores, por ejemplo, de forma predeterminada [no puede simplemente colocar tablas con un motor similar a MergeTree que contenga más de 50 Gb de datos](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/dbms/programs/server/config.xml#L322-L330). Sin embargo, estas garantías no cubren todos los casos posibles y pueden eludirse. +Mientras [replicación](table_engines/replication.md) proporciona protección contra fallas de hardware, no protege contra errores humanos: eliminación accidental de datos, eliminación de la tabla incorrecta o una tabla en el clúster incorrecto y errores de software que resultan en un procesamiento incorrecto de datos o daños en los datos. En muchos casos, errores como estos afectarán a todas las réplicas. ClickHouse tiene protecciones integradas para evitar algunos tipos de errores, por ejemplo, de forma predeterminada [no puede simplemente colocar tablas con un motor similar a MergeTree que contenga más de 50 Gb de datos](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Sin embargo, estas garantías no cubren todos los casos posibles y pueden eludirse. Para mitigar eficazmente los posibles errores humanos, debe preparar cuidadosamente una estrategia para realizar copias de seguridad y restaurar sus datos **previamente**. diff --git a/docs/es/operations/performance/sampling_query_profiler_example_result.txt b/docs/es/operations/performance/sampling_query_profiler_example_result.txt index 8e4e0e0fd70..56c2fdf9c65 100644 --- a/docs/es/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/es/operations/performance/sampling_query_profiler_example_result.txt @@ -2,55 +2,55 @@ Row 1: ────── count(): 6344 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:63 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 DB::MergeTreeReaderStream::seekToMark(unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp:200 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:212 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const /usr/local/include/c++/9.1.0/bits/std_function.h:690 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:487 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -66,9 +66,9 @@ Row 2: ────── count(): 3295 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 __pthread_cond_wait @@ -82,11 +82,11 @@ DB::UnionBlockInputStream::readImpl() DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Core/Block.h:90 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::LimitBlockInputStream::readImpl() @@ -100,7 +100,7 @@ std::_Function_handler::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/ThreadPool.h:146 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 ThreadPoolImpl::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 execute_native_thread_routine @@ -114,47 +114,47 @@ Row 3: ────── count(): 1978 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -170,47 +170,47 @@ Row 4: ────── count(): 1913 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -226,47 +226,47 @@ Row 5: ────── count(): 1672 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -282,53 +282,53 @@ Row 6: ────── count(): 1531 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/ReadBuffer.h:53 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -344,47 +344,47 @@ Row 7: ────── count(): 1034 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) - /opt/milovidov/ClickHouse/build_gcc9/dbms/programs/clickhouse + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -400,9 +400,9 @@ Row 8: ────── count(): 989 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 __lll_lock_wait @@ -412,7 +412,7 @@ pthread_mutex_lock DB::MergeTreeReaderStream::loadMarks() /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReaderStream.cpp:107 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) @@ -422,21 +422,21 @@ DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -452,45 +452,45 @@ Row 9: ─────── count(): 779 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -506,45 +506,45 @@ Row 10: ─────── count(): 666 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/src/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) diff --git a/docs/es/operations/performance_test.md b/docs/es/operations/performance_test.md index 62e5078691b..3e1448ffa87 100644 --- a/docs/es/operations/performance_test.md +++ b/docs/es/operations/performance_test.md @@ -27,19 +27,19 @@ Con esta instrucción, puede ejecutar una prueba de rendimiento básica de Click - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/users.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml 1. Descargar archivos de referencia: - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/queries.sql + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql 1. Descargue los datos de prueba de acuerdo con el [El Yandex.Conjunto de datos de Metrica](../getting_started/example_datasets/metrica.md) instrucción (“hits” tabla que contiene 100 millones de filas). diff --git a/docs/es/operations/settings/query_complexity.md b/docs/es/operations/settings/query_complexity.md index da39fd01a7b..4fe64302213 100644 --- a/docs/es/operations/settings/query_complexity.md +++ b/docs/es/operations/settings/query_complexity.md @@ -41,7 +41,7 @@ El consumo de memoria también está restringido por los parámetros `max_memory La cantidad máxima de RAM que se utilizará para ejecutar las consultas de un usuario en un único servidor. -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L288). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_user = 0`). +Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_user = 0`). Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). @@ -49,7 +49,7 @@ Ver también la descripción de [Método de codificación de datos:](#settings_m La cantidad máxima de RAM que se utilizará para ejecutar todas las consultas en un único servidor. -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L289). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_all_queries = 0`). +Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_all_queries = 0`). Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). diff --git a/docs/es/operations/system_tables.md b/docs/es/operations/system_tables.md index af073bc0fcc..5129abfb1ba 100644 --- a/docs/es/operations/system_tables.md +++ b/docs/es/operations/system_tables.md @@ -256,7 +256,7 @@ Columna: - `value` ([Int64](../data_types/int_uint.md)) — Valor métrico. - `description` ([Cadena](../data_types/string.md)) — Descripción métrica. -La lista de métricas admitidas que puede encontrar en el [dbms/src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. +La lista de métricas admitidas que puede encontrar en el [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. **Ejemplo** diff --git a/docs/es/query_language/alter.md b/docs/es/query_language/alter.md index 31342199735..c823381c4ee 100644 --- a/docs/es/query_language/alter.md +++ b/docs/es/query_language/alter.md @@ -446,7 +446,7 @@ Todas las reglas anteriores también son ciertas para el [OPTIMIZAR](misc.md#mis OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -Los ejemplos de `ALTER ... PARTITION` las consultas se demuestran en las pruebas [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql) y [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). +Los ejemplos de `ALTER ... PARTITION` las consultas se demuestran en las pruebas [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) y [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). ### Manipulaciones con Tabla TTL {#manipulations-with-table-ttl} diff --git a/docs/es/query_language/create.md b/docs/es/query_language/create.md index ba6f5f1849b..d7663b01839 100644 --- a/docs/es/query_language/create.md +++ b/docs/es/query_language/create.md @@ -151,7 +151,7 @@ ENGINE = Si se especifica un códec, el códec predeterminado no se aplica. Los códecs se pueden combinar en una tubería, por ejemplo, `CODEC(Delta, ZSTD)`. Para seleccionar la mejor combinación de códecs para su proyecto, pase puntos de referencia similares a los descritos en Altinity [Nuevas codificaciones para mejorar la eficiencia de ClickHouse](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) artículo. !!! warning "Advertencia" - No puede descomprimir archivos de base de datos ClickHouse con utilidades externas como `lz4`. En su lugar, use el especial [Compresor de clickhouse](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor) utilidad. + No puede descomprimir archivos de base de datos ClickHouse con utilidades externas como `lz4`. En su lugar, use el especial [Compresor de clickhouse](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utilidad. La compresión es compatible con los siguientes motores de tablas: diff --git a/docs/es/query_language/functions/introspection.md b/docs/es/query_language/functions/introspection.md index 634cf52118c..5169da4e5a2 100644 --- a/docs/es/query_language/functions/introspection.md +++ b/docs/es/query_language/functions/introspection.md @@ -39,7 +39,7 @@ addressToLine(address_of_binary_instruction) - Nombre de archivo del código fuente y el número de línea en este archivo delimitado por dos puntos. - For example, `/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. - Nombre de un binario, si la función no pudo encontrar la información de depuración. @@ -84,7 +84,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 ``` Aplicando la función a todo el seguimiento de la pila: @@ -104,8 +104,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/es/query_language/operators.md b/docs/es/query_language/operators.md index 19d6d058f00..5710064ee1c 100644 --- a/docs/es/query_language/operators.md +++ b/docs/es/query_language/operators.md @@ -133,7 +133,7 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -Puedes ver más ejemplos en [prueba](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +Puedes ver más ejemplos en [prueba](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVALO {#operator-interval} diff --git a/docs/fa/getting_started/example_datasets/metrica.md b/docs/fa/getting_started/example_datasets/metrica.md index 1546e4f79d4..1081001e3b8 100644 --- a/docs/fa/getting_started/example_datasets/metrica.md +++ b/docs/fa/getting_started/example_datasets/metrica.md @@ -60,4 +60,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [اموزش کلیک](../../getting_started/tutorial.md) است در یاندکس بر اساس.مجموعه داده های متریکا و راه توصیه شده برای شروع این مجموعه داده ها فقط از طریق تدریس خصوصی است. -نمونه های اضافی از نمایش داده شد به این جداول را می توان در میان یافت [تست های نفرت انگیز](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) از کلیک هاوس (به نام `test.hists` و `test.visits` وجود دارد). +نمونه های اضافی از نمایش داده شد به این جداول را می توان در میان یافت [تست های نفرت انگیز](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) از کلیک هاوس (به نام `test.hists` و `test.visits` وجود دارد). diff --git a/docs/fa/getting_started/install.md b/docs/fa/getting_started/install.md index 11a59d26395..805d6b7e480 100644 --- a/docs/fa/getting_started/install.md +++ b/docs/fa/getting_started/install.md @@ -84,8 +84,8 @@ sudo yum install clickhouse-server clickhouse-client
- Client: dbms/programs/clickhouse-client - Server: dbms/programs/clickhouse-server + Client: programs/clickhouse-client + Server: programs/clickhouse-server
@@ -100,7 +100,7 @@ sudo yum install clickhouse-server clickhouse-client (قابل تنظیم در تنظیمات سرور). ‘chown’ را برای کاربر دلخواه اجرا کنید. -به مسیر لاگ ها در تنظیمات سرور توجه کنید (src/dbms/programs/config.xml). +به مسیر لاگ ها در تنظیمات سرور توجه کنید (src/programs/config.xml). ### روش های دیگر نصب {#from-docker-image} @@ -165,7 +165,7 @@ clickhouse-client --host=example.com
``` bash -milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client +milovidov@hostname:~/work/metrica/src/dbms/Client$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. diff --git a/docs/fa/interfaces/tcp.md b/docs/fa/interfaces/tcp.md index f7e455de4e1..00a069189db 100644 --- a/docs/fa/interfaces/tcp.md +++ b/docs/fa/interfaces/tcp.md @@ -2,7 +2,7 @@ # رابط بومی (TCP) {#rbt-bwmy-tcp} -پروتکل بومی در \[خط فرمان خط\] (cli.md)، برای برقراری ارتباط بین سرور در طی پردازش پرس و جو توزیع شده، و همچنین در سایر برنامه های C ++ استفاده می شود. متاسفانه، پروتکل ClickHouse بومی هنوز مشخصات رسمی ندارد، اما می توان آن را از کد منبع ClickHouse (شروع [از اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client)) و / یا با رهگیری و تجزیه و تحلیل ترافیک TCP. +پروتکل بومی در \[خط فرمان خط\] (cli.md)، برای برقراری ارتباط بین سرور در طی پردازش پرس و جو توزیع شده، و همچنین در سایر برنامه های C ++ استفاده می شود. متاسفانه، پروتکل ClickHouse بومی هنوز مشخصات رسمی ندارد، اما می توان آن را از کد منبع ClickHouse (شروع [از اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) و / یا با رهگیری و تجزیه و تحلیل ترافیک TCP.
diff --git a/docs/ru/development/browse_code.md b/docs/ru/development/browse_code.md index 55a1628301f..d5f38bf6984 100644 --- a/docs/ru/development/browse_code.md +++ b/docs/ru/development/browse_code.md @@ -1,6 +1,6 @@ # Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse} -Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. +Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse). diff --git a/docs/ru/development/developer_instruction.md b/docs/ru/development/developer_instruction.md index 4613c3ca409..4bc2ada8c1e 100644 --- a/docs/ru/development/developer_instruction.md +++ b/docs/ru/development/developer_instruction.md @@ -195,19 +195,19 @@ Mac OS X: В процессе сборки могут появится сообщения `libprotobuf WARNING` про protobuf файлы в библиотеке libhdfs2. Это не имеет значения. -При успешной сборке, вы получите готовый исполняемый файл `ClickHouse/build/dbms/programs/clickhouse`: +При успешной сборке, вы получите готовый исполняемый файл `ClickHouse/build/programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # Запуск собранной версии ClickHouse {#zapusk-sobrannoi-versii-clickhouse} -Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/dbms/programs/server/` (эта директория находится не в директории build) и выполните: +Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server В этом случае, ClickHouse будет использовать конфигурационные файлы, расположенные в текущей директории. Вы можете запустить `clickhouse server` из любой директории, передав ему путь к конфигурационному файлу в аргументе командной строки `--config-file`. -Для подключения к ClickHouse с помощью clickhouse-client, в соседнем терминале, зайдите в директорию `ClickHouse/build/dbms/programs/` и выполните `clickhouse client`. +Для подключения к ClickHouse с помощью clickhouse-client, в соседнем терминале, зайдите в директорию `ClickHouse/build/programs/` и выполните `clickhouse client`. Если вы получили сообщение `Connection refused` на Mac OS X или FreeBSD, то укажите для клиента 127.0.0.1 в качестве имени хоста: @@ -216,7 +216,7 @@ Mac OS X: Вы можете заменить собранным вами ClickHouse продакшен версию, установленную в системе. Для этого, установите ClickHouse на свою машину по инструкции с официального сайта. Затем выполните: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start Обратите внимание, что `clickhouse-client`, `clickhouse-server` и другие, являеются симлинками на общий бинарник `clickhouse`. @@ -224,7 +224,7 @@ Mac OS X: Также вы можете запустить собранный вами ClickHouse с конфигурационным файлом системного ClickHouse: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # Среда разработки {#sreda-razrabotki} @@ -244,7 +244,7 @@ Mac OS X: Разработка тестов: https://clickhouse.tech/docs/ru/development/tests/ -Список задач: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_ru.md +Список задач: https://github.com/ClickHouse/ClickHouse/blob/master/tests/instructions/easy\_tasks\_sorted\_ru.md # Тестовые данные {#testovye-dannye} diff --git a/docs/ru/getting_started/example_datasets/metrica.md b/docs/ru/getting_started/example_datasets/metrica.md index 00404d77c83..ab2fe3d6c64 100644 --- a/docs/ru/getting_started/example_datasets/metrica.md +++ b/docs/ru/getting_started/example_datasets/metrica.md @@ -56,4 +56,4 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ## Запросы {#zaprosy} -Примеры запросов к этим таблицам (они называются `test.hits` и `test.visits`) можно найти среди [stateful тестов](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) и в некоторых [performance тестах](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/performance) ClickHouse. +Примеры запросов к этим таблицам (они называются `test.hits` и `test.visits`) можно найти среди [stateful тестов](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) и в некоторых [performance тестах](https://github.com/ClickHouse/ClickHouse/tree/master/tests/performance) ClickHouse. diff --git a/docs/ru/getting_started/install.md b/docs/ru/getting_started/install.md index 6f48dd4fa55..7caffb498e9 100644 --- a/docs/ru/getting_started/install.md +++ b/docs/ru/getting_started/install.md @@ -97,10 +97,10 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh Можно скомпилировать пакеты и установить их, либо использовать программы без установки пакетов. Также при ручой сборке можно отключить необходимость поддержки набора инструкций SSE 4.2 или собрать под процессоры архитектуры AArch64. - Client: dbms/programs/clickhouse-client - Server: dbms/programs/clickhouse-server + Client: programs/clickhouse-client + Server: programs/clickhouse-server -Для работы собранного вручную сервера необходимо создать директории для данных и метаданных, а также сделать их `chown` для желаемого пользователя. Пути к этим директориям могут быть изменены в конфигурационном файле сервера (src/dbms/programs/server/config.xml), по умолчанию используются следующие: +Для работы собранного вручную сервера необходимо создать директории для данных и метаданных, а также сделать их `chown` для желаемого пользователя. Пути к этим директориям могут быть изменены в конфигурационном файле сервера (src/programs/server/config.xml), по умолчанию используются следующие: /opt/clickhouse/data/default/ /opt/clickhouse/metadata/default/ diff --git a/docs/ru/interfaces/tcp.md b/docs/ru/interfaces/tcp.md index 3bb99e2450b..194f54ce6c7 100644 --- a/docs/ru/interfaces/tcp.md +++ b/docs/ru/interfaces/tcp.md @@ -1,5 +1,5 @@ # Родной интерфейс (TCP) {#rodnoi-interfeis-tcp} -Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client)) и/или путем перехвата и анализа TCP трафика. +Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) и/или путем перехвата и анализа TCP трафика. [Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/tcp/) diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index dd01ea9057f..4888f2b418e 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -1,6 +1,6 @@ # Резервное копирование данных {#rezervnoe-kopirovanie-dannykh} -[Репликация](table_engines/replication.md) обеспечивает защиту от аппаратных сбоев, но не защищает от человеческих ошибок: случайного удаления данных, удаления не той таблицы, которую надо было, или таблицы на не том кластере, а также программных ошибок, которые приводят к неправильной обработке данных или их повреждению. Во многих случаях подобные ошибки влияют на все реплики. ClickHouse имеет встроенные средства защиты для предотвращения некоторых типов ошибок — например, по умолчанию [не получится удалить таблицы \*MergeTree, содержащие более 50 Гб данных, одной командой](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/dbms/programs/server/config.xml#L322-L330). Однако эти средства защиты не охватывают все возможные случаи и могут быть обойдены. +[Репликация](table_engines/replication.md) обеспечивает защиту от аппаратных сбоев, но не защищает от человеческих ошибок: случайного удаления данных, удаления не той таблицы, которую надо было, или таблицы на не том кластере, а также программных ошибок, которые приводят к неправильной обработке данных или их повреждению. Во многих случаях подобные ошибки влияют на все реплики. ClickHouse имеет встроенные средства защиты для предотвращения некоторых типов ошибок — например, по умолчанию [не получится удалить таблицы \*MergeTree, содержащие более 50 Гб данных, одной командой](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Однако эти средства защиты не охватывают все возможные случаи и могут быть обойдены. Для того чтобы эффективно уменьшить возможные человеческие ошибки, следует тщательно подготовить стратегию резервного копирования и восстановления данных **заранее**. diff --git a/docs/ru/operations/settings/query_complexity.md b/docs/ru/operations/settings/query_complexity.md index 991139cbfaa..4dbc2aed1d3 100644 --- a/docs/ru/operations/settings/query_complexity.md +++ b/docs/ru/operations/settings/query_complexity.md @@ -38,7 +38,7 @@ Максимальный возможный объём оперативной памяти для запросов пользователя на одном сервере. -Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L288). По умолчанию размер не ограничен (`max_memory_usage_for_user = 0`). +Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). По умолчанию размер не ограничен (`max_memory_usage_for_user = 0`). Смотрите также описание настройки [max\_memory\_usage](#settings_max_memory_usage). @@ -46,7 +46,7 @@ Максимальный возможный объём оперативной памяти для всех запросов на одном сервере. -Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L289). По умолчанию размер не ограничен (`max_memory_usage_for_all_queries = 0`). +Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). По умолчанию размер не ограничен (`max_memory_usage_for_all_queries = 0`). Смотрите также описание настройки [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index d856e64e7b0..3afb5ff740c 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -245,7 +245,7 @@ SELECT * FROM system.events LIMIT 5 - `value` ([Int64](../data_types/int_uint.md)) — значение метрики. - `description` ([String](../data_types/string.md)) — описание метрики. -Список поддержанных метрик смотрите в файле [dbms/src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/CurrentMetrics.cpp). +Список поддержанных метрик смотрите в файле [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp). **Пример** diff --git a/docs/ru/query_language/alter.md b/docs/ru/query_language/alter.md index 87be1a3fdb8..401d7e3bcbc 100644 --- a/docs/ru/query_language/alter.md +++ b/docs/ru/query_language/alter.md @@ -444,7 +444,7 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). +Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). ### Манипуляции с TTL таблицы {#manipuliatsii-s-ttl-tablitsy} diff --git a/docs/ru/query_language/create.md b/docs/ru/query_language/create.md index c50c75e24fc..dfaae11a359 100644 --- a/docs/ru/query_language/create.md +++ b/docs/ru/query_language/create.md @@ -145,7 +145,7 @@ ENGINE = Если задать кодек для столбца, то кодек по умолчанию не применяется. Кодеки можно последовательно комбинировать, например, `CODEC(Delta, ZSTD)`. Чтобы выбрать наиболее подходящую для вашего проекта комбинацию кодеков, необходимо провести сравнительные тесты, подобные тем, что описаны в статье Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse). !!! warning "Предупреждение" - Нельзя распаковать базу данных ClickHouse с помощью сторонних утилит наподобие `lz4`. Необходимо использовать специальную утилиту [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor). + Нельзя распаковать базу данных ClickHouse с помощью сторонних утилит наподобие `lz4`. Необходимо использовать специальную утилиту [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor). Сжатие поддерживается для следующих движков таблиц: diff --git a/docs/ru/query_language/functions/introspection.md b/docs/ru/query_language/functions/introspection.md index 0907dffcd48..7cd994840e2 100644 --- a/docs/ru/query_language/functions/introspection.md +++ b/docs/ru/query_language/functions/introspection.md @@ -35,7 +35,7 @@ addressToLine(address_of_binary_instruction) - Имя файла исходного кода и номер строки в этом файле разделяются двоеточием. - Например, `/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199`, где `199` — номер строки. + Например, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, где `199` — номер строки. - Имя бинарного файла, если функция не может найти отладочную информацию. @@ -80,7 +80,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 ``` Применение функции ко всему стектрейсу: @@ -100,8 +100,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/ru/query_language/operators.md b/docs/ru/query_language/operators.md index 920928077dd..670990b0967 100644 --- a/docs/ru/query_language/operators.md +++ b/docs/ru/query_language/operators.md @@ -129,7 +129,7 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -Больше примеров приведено в [тестах](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +Больше примеров приведено в [тестах](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index 3a81077fb0b..6a46d6f2cc7 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -79,6 +79,6 @@ cd .. ``` 若要创建一个执行文件, 执行 `ninja clickhouse`。 -这个命令会使得 `dbms/programs/clickhouse` 文件可执行,您可以使用 `client` or `server` 参数运行。 +这个命令会使得 `programs/clickhouse` 文件可执行,您可以使用 `client` or `server` 参数运行。 [来源文章](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/zh/development/developer_instruction.md b/docs/zh/development/developer_instruction.md index 27f3c1ad8b2..6d865afb2c4 100644 --- a/docs/zh/development/developer_instruction.md +++ b/docs/zh/development/developer_instruction.md @@ -185,19 +185,19 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 在libhdfs2库中生成有关protobuf文件的消息时,可能会显示诸如`libprotobuf WARNING`。它们没有影响,可以忽略不计。 -成功构建后,会得到一个可执行文件`ClickHouse//dbms/programs/clickhouse`: +成功构建后,会得到一个可执行文件`ClickHouse//programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # 运行ClickHouse可执行文件 {#yun-xing-clickhouseke-zhi-xing-wen-jian} -要以当前的用户身份运行服务,请进入到`ClickHouse/dbms/programs/server/` 目录(在`build`文件夹外)并运行: +要以当前的用户身份运行服务,请进入到`ClickHouse/programs/server/` 目录(在`build`文件夹外)并运行: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server 在这种情况下,ClickHouse将使用位于当前目录中的配置文件。您可以从任何目录运行`Clickhouse server`,并将配置文件`--config-file`的路径指定为命令行参数。 -在另外一个终端上连接ClickHouse的clickhouse-client客户端,请进入到`ClickHouse/build/dbms/programs/` 并运行`clickhouse client`。 +在另外一个终端上连接ClickHouse的clickhouse-client客户端,请进入到`ClickHouse/build/programs/` 并运行`clickhouse client`。 如果您在Mac OS X 或者 FreeBSD上收到`Connection refused`的消息,请尝试指定主机地址为127.0.0.1: @@ -206,7 +206,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 您可以使用自定义构建的ClickHouse二进制文件替换系统中安装的ClickHouse二进制文件的生成版本。为此,请参照官方网站上的说明在计算机上安装ClickHouse。 接下来,运行以下命令: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start 请注意,`clickhouse-client`,`clickhouse-server`和其他服务通常共享`clickhouse`二进制文件的符号链接。 @@ -214,7 +214,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性 您还可以使用系统上安装的ClickHouse软件包中的配置文件运行自定义构建的ClickHouse二进制文件: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # IDE (集成开发环境) {#ide-ji-cheng-kai-fa-huan-jing} @@ -234,7 +234,7 @@ ClickHouse的架构描述可以在此处查看:https://clickhouse.tech/docs/en 编写测试用例:https://clickhouse.tech/docs/en/development/tests/ -任务列表:https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_en.md +任务列表:https://github.com/ClickHouse/ClickHouse/blob/master/tests/instructions/easy\_tasks\_sorted\_en.md # 测试数据 {#ce-shi-shu-ju} diff --git a/docs/zh/development/tests.md b/docs/zh/development/tests.md index 0416daf307c..bdc7d5d4e79 100644 --- a/docs/zh/development/tests.md +++ b/docs/zh/development/tests.md @@ -6,15 +6,15 @@ 每个功能测试会向正在运行的 ClickHouse服 务器发送一个或多个查询,并将结果与预期结果进行比较。 -测试用例在 `dbms/src/tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。 无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。 我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。 +测试用例在 `tests/queries` 目录中。这里有两个子目录:`stateless` 和 `stateful`目录。 无状态的测试无需预加载测试数据集 - 通常是在测试运行期间动态创建小量的数据集。有状态测试需要来自 Yandex.Metrica 的预加载测试数据,而不向一般公众提供。 我们倾向于仅使用«无状态»测试并避免添加新的«有状态»测试。 每个测试用例可以是两种类型之一:`.sql` 和 `.sh`。`.sql` 测试文件是用于管理`clickhouse-client --multiquery --testmode`的简单SQL脚本。`.sh` 测试文件是一个可以自己运行的脚本。 -要运行所有测试,请使用 `dbms/tests/clickhouse-test` 工具,用 `--help` 可以获取所有的选项列表。您可以简单地运行所有测试或运行测试名称中的子字符串过滤的测试子集:`./clickhouse-test substring`。 +要运行所有测试,请使用 `tests/clickhouse-test` 工具,用 `--help` 可以获取所有的选项列表。您可以简单地运行所有测试或运行测试名称中的子字符串过滤的测试子集:`./clickhouse-test substring`。 调用功能测试最简单的方法是将 `clickhouse-client` 复制到`/usr/bin/`,运行`clickhouse-server`,然后从自己的目录运行`./ clickhouse-test`。 -要添加新测试,请在 `dbms/src/tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`。 +要添加新测试,请在 `tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`。 测试应该只使用(创建,删除等)`test` 数据库中的表,这些表假定是事先创建的; 测试也可以使用临时表。 @@ -24,13 +24,13 @@ ## 已知的bug {#yi-zhi-de-bug} -如果我们知道一些可以通过功能测试轻松复制的错误,我们将准备好的功能测试放在 `dbms/src/tests/queries/bugs` 目录中。当修复错误时,这些测试将被移动到 `dbms/src/tests/queries/0_stateless` 目录中。 +如果我们知道一些可以通过功能测试轻松复制的错误,我们将准备好的功能测试放在 `tests/queries/bugs` 目录中。当修复错误时,这些测试将被移动到 `tests/queries/0_stateless` 目录中。 ## 集成测试 {#ji-cheng-ce-shi} 集成测试允许在集群配置中测试 ClickHouse,并与其他服务器(如MySQL,Postgres,MongoDB)进行 ClickHouse 交互。它们可用于模拟网络拆分,数据包丢弃等。这些测试在Docker 下运行,并使用各种软件创建多个容器。 -参考 `dbms/tests/integration/README.md` 文档关于如何使用集成测试。 +参考 `tests/integration/README.md` 文档关于如何使用集成测试。 请注意,ClickHouse 与第三方驱动程序的集成未经过测试。此外,我们目前还没有与 JDBC 和ODBC 驱动程序进行集成测试。 @@ -42,7 +42,7 @@ ## 性能测试 {#xing-neng-ce-shi} -性能测试允许测量和比较综合查询中 ClickHouse 的某些独立部分的性能。测试位于`dbms/tests/performance` 目录中。每个测试都由 `.xml` 文件表示,并附有测试用例的描述。使用 `clickhouse performance-test` 工具(嵌入在 `clickhouse` 二进制文件中)运行测试。请参阅 `--help` 以进行调用。 +性能测试允许测量和比较综合查询中 ClickHouse 的某些独立部分的性能。测试位于`tests/performance` 目录中。每个测试都由 `.xml` 文件表示,并附有测试用例的描述。使用 `clickhouse performance-test` 工具(嵌入在 `clickhouse` 二进制文件中)运行测试。请参阅 `--help` 以进行调用。 每个测试在循环中运行一个或多个查询(可能带有参数组合),并具有一些停止条件(如«最大执行速度不会在三秒内更改»)并测量一些有关查询性能的指标(如«最大执行速度»))。某些测试可以包含预加载的测试数据集的前提条件。 @@ -52,13 +52,13 @@ ## 测试工具和脚本 {#ce-shi-gong-ju-he-jiao-ben} -`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`dbms/src/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 +`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`dbms/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 您还可以将一对文件 `.sh` 和 `.reference` 与工具放在一些预定义的输入上运行它 - 然后可以将脚本结果与 `.reference` 文件进行比较。这些测试不是自动化的。 ## 杂项测试 {#za-xiang-ce-shi} -有一些外部字典的测试位于 `dbms/tests/external_dictionaries`,机器学习模型在`dbms/tests/external_models`目录。这些测试未更新,必须转移到集成测试。 +有一些外部字典的测试位于 `tests/external_dictionaries`,机器学习模型在`tests/external_models`目录。这些测试未更新,必须转移到集成测试。 对于分布式数据的插入,有单独的测试。此测试在单独的服务器上运行 ClickHouse 集群并模拟各种故障情况:网络拆分,数据包丢弃(ClickHouse 节点之间,ClickHouse 和 ZooKeeper之间,ClickHouse 服务器和客户端之间等),进行 `kill -9`,`kill -STOP` 和`kill -CONT` 等操作,类似[Jepsen](https://aphyr.com/tags/Jepsen)。然后,测试检查是否已写入所有已确认的插入,并且所有已拒绝的插入都未写入。 @@ -68,7 +68,7 @@ 当您开发了新的功能,做手动测试也是合理的。可以按照以下步骤来进行: -编译 ClickHouse。在命令行中运行 ClickHouse:进入 `dbms/src/programs/clickhouse-server` 目录并运行 `./clickhouse-server`。它会默认使用当前目录的配置文件 (`config.xml`, `users.xml` 以及在 `config.d` 和 `users.d` 目录的文件)。可以使用 `dbms/src/programs/clickhouse-client/clickhouse-client` 来连接数据库。 +编译 ClickHouse。在命令行中运行 ClickHouse:进入 `programs/clickhouse-server` 目录并运行 `./clickhouse-server`。它会默认使用当前目录的配置文件 (`config.xml`, `users.xml` 以及在 `config.d` 和 `users.d` 目录的文件)。可以使用 `programs/clickhouse-client/clickhouse-client` 来连接数据库。 或者,您可以安装 ClickHouse 软件包:从 Yandex 存储库中获得稳定版本,或者您可以在ClickHouse源根目录中使用 `./release` 构建自己的软件包。然后使用 `sudo service clickhouse-server start` 启动服务器(或停止服务器)。在 `/etc/clickhouse-server/clickhouse-server.log` 中查找日志。 @@ -172,7 +172,7 @@ Clang 有更多有用的警告 - 您可以使用 `-Weverything` 查找它们并 **Debug allocator.** 您可以使用 `DEBUG_TCMALLOC` CMake 选项启用 `tcmalloc` 的调试版本。我们在每次提交的基础上使用调试分配器运行测试。 -更多请参阅 `dbms/tests/instructions/sanitizers.txt`。 +更多请参阅 `tests/instructions/sanitizers.txt`。 ## 模糊测试 {#mo-hu-ce-shi} @@ -186,7 +186,7 @@ Yandex Cloud 部门的人员从安全角度对 ClickHouse 功能进行了一些 ## 静态分析 {#jing-tai-fen-xi} -我们偶尔使用静态分析。我们已经评估过 `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`。您将在 `dbms/tests/instructions/` 目录中找到使用说明。你也可以阅读[俄文文章](https://habr.com/company/yandex/blog/342018/). +我们偶尔使用静态分析。我们已经评估过 `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`。您将在 `tests/instructions/` 目录中找到使用说明。你也可以阅读[俄文文章](https://habr.com/company/yandex/blog/342018/). 如果您使用 `CLion` 作为 IDE,您可以开箱即用一些 `clang-tidy` 检查。 diff --git a/docs/zh/getting_started/install.md b/docs/zh/getting_started/install.md index bf0ace6824f..6f26c82608b 100644 --- a/docs/zh/getting_started/install.md +++ b/docs/zh/getting_started/install.md @@ -72,8 +72,8 @@ sudo yum install clickhouse-server clickhouse-client 你也可以直接使用而不进行安装。 ``` text -Client: dbms/programs/clickhouse-client -Server: dbms/programs/clickhouse-server +Client: programs/clickhouse-client +Server: programs/clickhouse-server ``` 在服务器中为数据创建如下目录: @@ -86,7 +86,7 @@ Server: dbms/programs/clickhouse-server (它们可以在server config中配置。) 为需要的用户运行‘chown’ -日志的路径可以在server config (src/dbms/programs/server/config.xml)中配置。 +日志的路径可以在server config (src/programs/server/config.xml)中配置。 ## 启动 {#qi-dong} @@ -127,7 +127,7 @@ clickhouse-client --host=example.com 检查系统是否工作: ``` bash -milovidov@hostname:~/work/metrica/src/dbms/src/Client$ ./clickhouse-client +milovidov@hostname:~/work/metrica/src/dbms/Client$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. diff --git a/docs/zh/interfaces/tcp.md b/docs/zh/interfaces/tcp.md index 9fd4e6b108f..08a52ce1185 100644 --- a/docs/zh/interfaces/tcp.md +++ b/docs/zh/interfaces/tcp.md @@ -1,5 +1,5 @@ # 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp} -本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client))和/或拦截和分析TCP流量。 +本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client))和/或拦截和分析TCP流量。 [来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) diff --git a/docs/zh/operations/settings/query_complexity.md b/docs/zh/operations/settings/query_complexity.md index a06c65ec072..64520f55a0b 100644 --- a/docs/zh/operations/settings/query_complexity.md +++ b/docs/zh/operations/settings/query_complexity.md @@ -45,7 +45,7 @@ Memory consumption is also restricted by the parameters `max_memory_usage_for_us The maximum amount of RAM to use for running a user’s queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). @@ -53,7 +53,7 @@ See also the description of [max\_memory\_usage](#settings_max_memory_usage). The maximum amount of RAM to use for running all queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/operations/table_engines/mergetree.md index 3ae2e9bc615..cb1a77e11cb 100644 --- a/docs/zh/operations/table_engines/mergetree.md +++ b/docs/zh/operations/table_engines/mergetree.md @@ -69,7 +69,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 - `SETTINGS` — 影响 `MergeTree` 性能的额外参数: - - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Storages/MergeTree/MergeTreeSettings.h) 。 + - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Storages/MergeTree/MergeTreeSettings.h) 。 - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果仅按数据行数限制索引粒度, 请设置为0(不建议)。 - `enable_mixed_granularity_parts` — 启用或禁用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从大表(数十或数百兆)中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表内数据量很大,可以开启这项配置用以提升`SELECT` 查询的性能。 - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 diff --git a/docs/zh/query_language/create.md b/docs/zh/query_language/create.md index 24cacdd9477..94c4ea3669c 100644 --- a/docs/zh/query_language/create.md +++ b/docs/zh/query_language/create.md @@ -121,7 +121,7 @@ ENGINE = If a codec is specified, the default codec doesn’t apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. !!! warning "Warning" - You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor) utility. + You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. Compression is supported for the following table engines: diff --git a/docs/zh/query_language/operators.md b/docs/zh/query_language/operators.md index df136a5a1fc..2e73f85d5ec 100644 --- a/docs/zh/query_language/operators.md +++ b/docs/zh/query_language/operators.md @@ -146,7 +146,7 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} diff --git a/dbms/programs/CMakeLists.txt b/programs/CMakeLists.txt similarity index 100% rename from dbms/programs/CMakeLists.txt rename to programs/CMakeLists.txt diff --git a/dbms/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp similarity index 100% rename from dbms/programs/benchmark/Benchmark.cpp rename to programs/benchmark/Benchmark.cpp diff --git a/dbms/programs/benchmark/CMakeLists.txt b/programs/benchmark/CMakeLists.txt similarity index 100% rename from dbms/programs/benchmark/CMakeLists.txt rename to programs/benchmark/CMakeLists.txt diff --git a/dbms/programs/benchmark/clickhouse-benchmark.cpp b/programs/benchmark/clickhouse-benchmark.cpp similarity index 100% rename from dbms/programs/benchmark/clickhouse-benchmark.cpp rename to programs/benchmark/clickhouse-benchmark.cpp diff --git a/dbms/programs/clickhouse-split-helper b/programs/clickhouse-split-helper similarity index 100% rename from dbms/programs/clickhouse-split-helper rename to programs/clickhouse-split-helper diff --git a/dbms/programs/client/CMakeLists.txt b/programs/client/CMakeLists.txt similarity index 100% rename from dbms/programs/client/CMakeLists.txt rename to programs/client/CMakeLists.txt diff --git a/dbms/programs/client/Client.cpp b/programs/client/Client.cpp similarity index 100% rename from dbms/programs/client/Client.cpp rename to programs/client/Client.cpp diff --git a/dbms/programs/client/ConnectionParameters.cpp b/programs/client/ConnectionParameters.cpp similarity index 100% rename from dbms/programs/client/ConnectionParameters.cpp rename to programs/client/ConnectionParameters.cpp diff --git a/dbms/programs/client/ConnectionParameters.h b/programs/client/ConnectionParameters.h similarity index 100% rename from dbms/programs/client/ConnectionParameters.h rename to programs/client/ConnectionParameters.h diff --git a/dbms/programs/client/Suggest.cpp b/programs/client/Suggest.cpp similarity index 100% rename from dbms/programs/client/Suggest.cpp rename to programs/client/Suggest.cpp diff --git a/dbms/programs/client/Suggest.h b/programs/client/Suggest.h similarity index 100% rename from dbms/programs/client/Suggest.h rename to programs/client/Suggest.h diff --git a/dbms/programs/client/TestHint.h b/programs/client/TestHint.h similarity index 100% rename from dbms/programs/client/TestHint.h rename to programs/client/TestHint.h diff --git a/dbms/programs/client/clickhouse-client.cpp b/programs/client/clickhouse-client.cpp similarity index 100% rename from dbms/programs/client/clickhouse-client.cpp rename to programs/client/clickhouse-client.cpp diff --git a/dbms/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml similarity index 100% rename from dbms/programs/client/clickhouse-client.xml rename to programs/client/clickhouse-client.xml diff --git a/dbms/programs/client/config_client.h.in b/programs/client/config_client.h.in similarity index 100% rename from dbms/programs/client/config_client.h.in rename to programs/client/config_client.h.in diff --git a/dbms/programs/client/readpassphrase/CMakeLists.txt b/programs/client/readpassphrase/CMakeLists.txt similarity index 100% rename from dbms/programs/client/readpassphrase/CMakeLists.txt rename to programs/client/readpassphrase/CMakeLists.txt diff --git a/dbms/programs/client/readpassphrase/includes.h.in b/programs/client/readpassphrase/includes.h.in similarity index 100% rename from dbms/programs/client/readpassphrase/includes.h.in rename to programs/client/readpassphrase/includes.h.in diff --git a/dbms/programs/client/readpassphrase/readpassphrase.c b/programs/client/readpassphrase/readpassphrase.c similarity index 100% rename from dbms/programs/client/readpassphrase/readpassphrase.c rename to programs/client/readpassphrase/readpassphrase.c diff --git a/dbms/programs/client/readpassphrase/readpassphrase.h b/programs/client/readpassphrase/readpassphrase.h similarity index 100% rename from dbms/programs/client/readpassphrase/readpassphrase.h rename to programs/client/readpassphrase/readpassphrase.h diff --git a/dbms/programs/compressor/CMakeLists.txt b/programs/compressor/CMakeLists.txt similarity index 100% rename from dbms/programs/compressor/CMakeLists.txt rename to programs/compressor/CMakeLists.txt diff --git a/dbms/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp similarity index 100% rename from dbms/programs/compressor/Compressor.cpp rename to programs/compressor/Compressor.cpp diff --git a/dbms/programs/compressor/README.md b/programs/compressor/README.md similarity index 100% rename from dbms/programs/compressor/README.md rename to programs/compressor/README.md diff --git a/dbms/programs/compressor/clickhouse-compressor.cpp b/programs/compressor/clickhouse-compressor.cpp similarity index 100% rename from dbms/programs/compressor/clickhouse-compressor.cpp rename to programs/compressor/clickhouse-compressor.cpp diff --git a/dbms/programs/config_tools.h.in b/programs/config_tools.h.in similarity index 100% rename from dbms/programs/config_tools.h.in rename to programs/config_tools.h.in diff --git a/dbms/programs/copier/Aliases.h b/programs/copier/Aliases.h similarity index 100% rename from dbms/programs/copier/Aliases.h rename to programs/copier/Aliases.h diff --git a/dbms/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt similarity index 100% rename from dbms/programs/copier/CMakeLists.txt rename to programs/copier/CMakeLists.txt diff --git a/dbms/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp similarity index 100% rename from dbms/programs/copier/ClusterCopier.cpp rename to programs/copier/ClusterCopier.cpp diff --git a/dbms/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h similarity index 100% rename from dbms/programs/copier/ClusterCopier.h rename to programs/copier/ClusterCopier.h diff --git a/dbms/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp similarity index 100% rename from dbms/programs/copier/ClusterCopierApp.cpp rename to programs/copier/ClusterCopierApp.cpp diff --git a/dbms/programs/copier/ClusterCopierApp.h b/programs/copier/ClusterCopierApp.h similarity index 100% rename from dbms/programs/copier/ClusterCopierApp.h rename to programs/copier/ClusterCopierApp.h diff --git a/dbms/programs/copier/ClusterPartition.h b/programs/copier/ClusterPartition.h similarity index 100% rename from dbms/programs/copier/ClusterPartition.h rename to programs/copier/ClusterPartition.h diff --git a/dbms/programs/copier/Internals.cpp b/programs/copier/Internals.cpp similarity index 100% rename from dbms/programs/copier/Internals.cpp rename to programs/copier/Internals.cpp diff --git a/dbms/programs/copier/Internals.h b/programs/copier/Internals.h similarity index 100% rename from dbms/programs/copier/Internals.h rename to programs/copier/Internals.h diff --git a/dbms/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h similarity index 100% rename from dbms/programs/copier/ShardPartition.h rename to programs/copier/ShardPartition.h diff --git a/dbms/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h similarity index 100% rename from dbms/programs/copier/ShardPartitionPiece.h rename to programs/copier/ShardPartitionPiece.h diff --git a/dbms/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h similarity index 100% rename from dbms/programs/copier/TaskCluster.h rename to programs/copier/TaskCluster.h diff --git a/dbms/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h similarity index 100% rename from dbms/programs/copier/TaskTableAndShard.h rename to programs/copier/TaskTableAndShard.h diff --git a/dbms/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h similarity index 100% rename from dbms/programs/copier/ZooKeeperStaff.h rename to programs/copier/ZooKeeperStaff.h diff --git a/dbms/programs/copier/clickhouse-copier.cpp b/programs/copier/clickhouse-copier.cpp similarity index 100% rename from dbms/programs/copier/clickhouse-copier.cpp rename to programs/copier/clickhouse-copier.cpp diff --git a/dbms/programs/extract-from-config/CMakeLists.txt b/programs/extract-from-config/CMakeLists.txt similarity index 100% rename from dbms/programs/extract-from-config/CMakeLists.txt rename to programs/extract-from-config/CMakeLists.txt diff --git a/dbms/programs/extract-from-config/ExtractFromConfig.cpp b/programs/extract-from-config/ExtractFromConfig.cpp similarity index 100% rename from dbms/programs/extract-from-config/ExtractFromConfig.cpp rename to programs/extract-from-config/ExtractFromConfig.cpp diff --git a/dbms/programs/extract-from-config/clickhouse-extract-from-config.cpp b/programs/extract-from-config/clickhouse-extract-from-config.cpp similarity index 100% rename from dbms/programs/extract-from-config/clickhouse-extract-from-config.cpp rename to programs/extract-from-config/clickhouse-extract-from-config.cpp diff --git a/dbms/programs/format/CMakeLists.txt b/programs/format/CMakeLists.txt similarity index 100% rename from dbms/programs/format/CMakeLists.txt rename to programs/format/CMakeLists.txt diff --git a/dbms/programs/format/Format.cpp b/programs/format/Format.cpp similarity index 100% rename from dbms/programs/format/Format.cpp rename to programs/format/Format.cpp diff --git a/dbms/programs/format/clickhouse-format.cpp b/programs/format/clickhouse-format.cpp similarity index 100% rename from dbms/programs/format/clickhouse-format.cpp rename to programs/format/clickhouse-format.cpp diff --git a/dbms/programs/local/CMakeLists.txt b/programs/local/CMakeLists.txt similarity index 100% rename from dbms/programs/local/CMakeLists.txt rename to programs/local/CMakeLists.txt diff --git a/dbms/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp similarity index 100% rename from dbms/programs/local/LocalServer.cpp rename to programs/local/LocalServer.cpp diff --git a/dbms/programs/local/LocalServer.h b/programs/local/LocalServer.h similarity index 100% rename from dbms/programs/local/LocalServer.h rename to programs/local/LocalServer.h diff --git a/dbms/programs/local/clickhouse-local.cpp b/programs/local/clickhouse-local.cpp similarity index 100% rename from dbms/programs/local/clickhouse-local.cpp rename to programs/local/clickhouse-local.cpp diff --git a/dbms/programs/main.cpp b/programs/main.cpp similarity index 100% rename from dbms/programs/main.cpp rename to programs/main.cpp diff --git a/dbms/programs/obfuscator/CMakeLists.txt b/programs/obfuscator/CMakeLists.txt similarity index 100% rename from dbms/programs/obfuscator/CMakeLists.txt rename to programs/obfuscator/CMakeLists.txt diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp similarity index 100% rename from dbms/programs/obfuscator/Obfuscator.cpp rename to programs/obfuscator/Obfuscator.cpp diff --git a/dbms/programs/obfuscator/clickhouse-obfuscator.cpp b/programs/obfuscator/clickhouse-obfuscator.cpp similarity index 100% rename from dbms/programs/obfuscator/clickhouse-obfuscator.cpp rename to programs/obfuscator/clickhouse-obfuscator.cpp diff --git a/dbms/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt similarity index 100% rename from dbms/programs/odbc-bridge/CMakeLists.txt rename to programs/odbc-bridge/CMakeLists.txt diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/ColumnInfoHandler.cpp rename to programs/odbc-bridge/ColumnInfoHandler.cpp diff --git a/dbms/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/ColumnInfoHandler.h rename to programs/odbc-bridge/ColumnInfoHandler.h diff --git a/dbms/programs/odbc-bridge/HandlerFactory.cpp b/programs/odbc-bridge/HandlerFactory.cpp similarity index 100% rename from dbms/programs/odbc-bridge/HandlerFactory.cpp rename to programs/odbc-bridge/HandlerFactory.cpp diff --git a/dbms/programs/odbc-bridge/HandlerFactory.h b/programs/odbc-bridge/HandlerFactory.h similarity index 100% rename from dbms/programs/odbc-bridge/HandlerFactory.h rename to programs/odbc-bridge/HandlerFactory.h diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/IdentifierQuoteHandler.cpp rename to programs/odbc-bridge/IdentifierQuoteHandler.cpp diff --git a/dbms/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/IdentifierQuoteHandler.h rename to programs/odbc-bridge/IdentifierQuoteHandler.h diff --git a/dbms/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/MainHandler.cpp rename to programs/odbc-bridge/MainHandler.cpp diff --git a/dbms/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/MainHandler.h rename to programs/odbc-bridge/MainHandler.h diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp rename to programs/odbc-bridge/ODBCBlockInputStream.cpp diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBlockInputStream.h rename to programs/odbc-bridge/ODBCBlockInputStream.h diff --git a/dbms/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBridge.cpp rename to programs/odbc-bridge/ODBCBridge.cpp diff --git a/dbms/programs/odbc-bridge/ODBCBridge.h b/programs/odbc-bridge/ODBCBridge.h similarity index 100% rename from dbms/programs/odbc-bridge/ODBCBridge.h rename to programs/odbc-bridge/ODBCBridge.h diff --git a/dbms/programs/odbc-bridge/PingHandler.cpp b/programs/odbc-bridge/PingHandler.cpp similarity index 100% rename from dbms/programs/odbc-bridge/PingHandler.cpp rename to programs/odbc-bridge/PingHandler.cpp diff --git a/dbms/programs/odbc-bridge/PingHandler.h b/programs/odbc-bridge/PingHandler.h similarity index 100% rename from dbms/programs/odbc-bridge/PingHandler.h rename to programs/odbc-bridge/PingHandler.h diff --git a/dbms/programs/odbc-bridge/README.md b/programs/odbc-bridge/README.md similarity index 100% rename from dbms/programs/odbc-bridge/README.md rename to programs/odbc-bridge/README.md diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.cpp b/programs/odbc-bridge/getIdentifierQuote.cpp similarity index 100% rename from dbms/programs/odbc-bridge/getIdentifierQuote.cpp rename to programs/odbc-bridge/getIdentifierQuote.cpp diff --git a/dbms/programs/odbc-bridge/getIdentifierQuote.h b/programs/odbc-bridge/getIdentifierQuote.h similarity index 100% rename from dbms/programs/odbc-bridge/getIdentifierQuote.h rename to programs/odbc-bridge/getIdentifierQuote.h diff --git a/dbms/programs/odbc-bridge/odbc-bridge.cpp b/programs/odbc-bridge/odbc-bridge.cpp similarity index 100% rename from dbms/programs/odbc-bridge/odbc-bridge.cpp rename to programs/odbc-bridge/odbc-bridge.cpp diff --git a/dbms/programs/odbc-bridge/tests/CMakeLists.txt b/programs/odbc-bridge/tests/CMakeLists.txt similarity index 100% rename from dbms/programs/odbc-bridge/tests/CMakeLists.txt rename to programs/odbc-bridge/tests/CMakeLists.txt diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp b/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.cpp rename to programs/odbc-bridge/tests/validate-odbc-connection-string.cpp diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.reference b/programs/odbc-bridge/tests/validate-odbc-connection-string.reference similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.reference rename to programs/odbc-bridge/tests/validate-odbc-connection-string.reference diff --git a/dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.sh b/programs/odbc-bridge/tests/validate-odbc-connection-string.sh similarity index 100% rename from dbms/programs/odbc-bridge/tests/validate-odbc-connection-string.sh rename to programs/odbc-bridge/tests/validate-odbc-connection-string.sh diff --git a/dbms/programs/odbc-bridge/validateODBCConnectionString.cpp b/programs/odbc-bridge/validateODBCConnectionString.cpp similarity index 100% rename from dbms/programs/odbc-bridge/validateODBCConnectionString.cpp rename to programs/odbc-bridge/validateODBCConnectionString.cpp diff --git a/dbms/programs/odbc-bridge/validateODBCConnectionString.h b/programs/odbc-bridge/validateODBCConnectionString.h similarity index 100% rename from dbms/programs/odbc-bridge/validateODBCConnectionString.h rename to programs/odbc-bridge/validateODBCConnectionString.h diff --git a/dbms/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt similarity index 100% rename from dbms/programs/server/CMakeLists.txt rename to programs/server/CMakeLists.txt diff --git a/dbms/programs/server/HTTPHandler.cpp b/programs/server/HTTPHandler.cpp similarity index 100% rename from dbms/programs/server/HTTPHandler.cpp rename to programs/server/HTTPHandler.cpp diff --git a/dbms/programs/server/HTTPHandler.h b/programs/server/HTTPHandler.h similarity index 100% rename from dbms/programs/server/HTTPHandler.h rename to programs/server/HTTPHandler.h diff --git a/dbms/programs/server/HTTPHandlerFactory.cpp b/programs/server/HTTPHandlerFactory.cpp similarity index 100% rename from dbms/programs/server/HTTPHandlerFactory.cpp rename to programs/server/HTTPHandlerFactory.cpp diff --git a/dbms/programs/server/HTTPHandlerFactory.h b/programs/server/HTTPHandlerFactory.h similarity index 100% rename from dbms/programs/server/HTTPHandlerFactory.h rename to programs/server/HTTPHandlerFactory.h diff --git a/dbms/programs/server/IServer.h b/programs/server/IServer.h similarity index 100% rename from dbms/programs/server/IServer.h rename to programs/server/IServer.h diff --git a/dbms/programs/server/InterserverIOHTTPHandler.cpp b/programs/server/InterserverIOHTTPHandler.cpp similarity index 100% rename from dbms/programs/server/InterserverIOHTTPHandler.cpp rename to programs/server/InterserverIOHTTPHandler.cpp diff --git a/dbms/programs/server/InterserverIOHTTPHandler.h b/programs/server/InterserverIOHTTPHandler.h similarity index 100% rename from dbms/programs/server/InterserverIOHTTPHandler.h rename to programs/server/InterserverIOHTTPHandler.h diff --git a/dbms/programs/server/MetricsTransmitter.cpp b/programs/server/MetricsTransmitter.cpp similarity index 100% rename from dbms/programs/server/MetricsTransmitter.cpp rename to programs/server/MetricsTransmitter.cpp diff --git a/dbms/programs/server/MetricsTransmitter.h b/programs/server/MetricsTransmitter.h similarity index 100% rename from dbms/programs/server/MetricsTransmitter.h rename to programs/server/MetricsTransmitter.h diff --git a/dbms/programs/server/MySQLHandler.cpp b/programs/server/MySQLHandler.cpp similarity index 100% rename from dbms/programs/server/MySQLHandler.cpp rename to programs/server/MySQLHandler.cpp diff --git a/dbms/programs/server/MySQLHandler.h b/programs/server/MySQLHandler.h similarity index 100% rename from dbms/programs/server/MySQLHandler.h rename to programs/server/MySQLHandler.h diff --git a/dbms/programs/server/MySQLHandlerFactory.cpp b/programs/server/MySQLHandlerFactory.cpp similarity index 100% rename from dbms/programs/server/MySQLHandlerFactory.cpp rename to programs/server/MySQLHandlerFactory.cpp diff --git a/dbms/programs/server/MySQLHandlerFactory.h b/programs/server/MySQLHandlerFactory.h similarity index 100% rename from dbms/programs/server/MySQLHandlerFactory.h rename to programs/server/MySQLHandlerFactory.h diff --git a/dbms/programs/server/NotFoundHandler.cpp b/programs/server/NotFoundHandler.cpp similarity index 100% rename from dbms/programs/server/NotFoundHandler.cpp rename to programs/server/NotFoundHandler.cpp diff --git a/dbms/programs/server/NotFoundHandler.h b/programs/server/NotFoundHandler.h similarity index 100% rename from dbms/programs/server/NotFoundHandler.h rename to programs/server/NotFoundHandler.h diff --git a/dbms/programs/server/PingRequestHandler.cpp b/programs/server/PingRequestHandler.cpp similarity index 100% rename from dbms/programs/server/PingRequestHandler.cpp rename to programs/server/PingRequestHandler.cpp diff --git a/dbms/programs/server/PingRequestHandler.h b/programs/server/PingRequestHandler.h similarity index 100% rename from dbms/programs/server/PingRequestHandler.h rename to programs/server/PingRequestHandler.h diff --git a/dbms/programs/server/PrometheusMetricsWriter.cpp b/programs/server/PrometheusMetricsWriter.cpp similarity index 100% rename from dbms/programs/server/PrometheusMetricsWriter.cpp rename to programs/server/PrometheusMetricsWriter.cpp diff --git a/dbms/programs/server/PrometheusMetricsWriter.h b/programs/server/PrometheusMetricsWriter.h similarity index 100% rename from dbms/programs/server/PrometheusMetricsWriter.h rename to programs/server/PrometheusMetricsWriter.h diff --git a/dbms/programs/server/PrometheusRequestHandler.cpp b/programs/server/PrometheusRequestHandler.cpp similarity index 100% rename from dbms/programs/server/PrometheusRequestHandler.cpp rename to programs/server/PrometheusRequestHandler.cpp diff --git a/dbms/programs/server/PrometheusRequestHandler.h b/programs/server/PrometheusRequestHandler.h similarity index 100% rename from dbms/programs/server/PrometheusRequestHandler.h rename to programs/server/PrometheusRequestHandler.h diff --git a/dbms/programs/server/ReplicasStatusHandler.cpp b/programs/server/ReplicasStatusHandler.cpp similarity index 100% rename from dbms/programs/server/ReplicasStatusHandler.cpp rename to programs/server/ReplicasStatusHandler.cpp diff --git a/dbms/programs/server/ReplicasStatusHandler.h b/programs/server/ReplicasStatusHandler.h similarity index 100% rename from dbms/programs/server/ReplicasStatusHandler.h rename to programs/server/ReplicasStatusHandler.h diff --git a/dbms/programs/server/RootRequestHandler.cpp b/programs/server/RootRequestHandler.cpp similarity index 100% rename from dbms/programs/server/RootRequestHandler.cpp rename to programs/server/RootRequestHandler.cpp diff --git a/dbms/programs/server/RootRequestHandler.h b/programs/server/RootRequestHandler.h similarity index 100% rename from dbms/programs/server/RootRequestHandler.h rename to programs/server/RootRequestHandler.h diff --git a/dbms/programs/server/Server.cpp b/programs/server/Server.cpp similarity index 100% rename from dbms/programs/server/Server.cpp rename to programs/server/Server.cpp diff --git a/dbms/programs/server/Server.h b/programs/server/Server.h similarity index 100% rename from dbms/programs/server/Server.h rename to programs/server/Server.h diff --git a/dbms/programs/server/TCPHandler.cpp b/programs/server/TCPHandler.cpp similarity index 100% rename from dbms/programs/server/TCPHandler.cpp rename to programs/server/TCPHandler.cpp diff --git a/dbms/programs/server/TCPHandler.h b/programs/server/TCPHandler.h similarity index 100% rename from dbms/programs/server/TCPHandler.h rename to programs/server/TCPHandler.h diff --git a/dbms/programs/server/TCPHandlerFactory.h b/programs/server/TCPHandlerFactory.h similarity index 100% rename from dbms/programs/server/TCPHandlerFactory.h rename to programs/server/TCPHandlerFactory.h diff --git a/dbms/programs/server/clickhouse-server.cpp b/programs/server/clickhouse-server.cpp similarity index 100% rename from dbms/programs/server/clickhouse-server.cpp rename to programs/server/clickhouse-server.cpp diff --git a/dbms/programs/server/config.d/listen.xml.disabled b/programs/server/config.d/listen.xml.disabled similarity index 100% rename from dbms/programs/server/config.d/listen.xml.disabled rename to programs/server/config.d/listen.xml.disabled diff --git a/dbms/programs/server/config.d/log_to_console.xml b/programs/server/config.d/log_to_console.xml similarity index 100% rename from dbms/programs/server/config.d/log_to_console.xml rename to programs/server/config.d/log_to_console.xml diff --git a/dbms/programs/server/config.d/macros.xml b/programs/server/config.d/macros.xml similarity index 100% rename from dbms/programs/server/config.d/macros.xml rename to programs/server/config.d/macros.xml diff --git a/dbms/programs/server/config.d/metric_log.xml b/programs/server/config.d/metric_log.xml similarity index 100% rename from dbms/programs/server/config.d/metric_log.xml rename to programs/server/config.d/metric_log.xml diff --git a/dbms/programs/server/config.d/more_clusters.xml b/programs/server/config.d/more_clusters.xml similarity index 100% rename from dbms/programs/server/config.d/more_clusters.xml rename to programs/server/config.d/more_clusters.xml diff --git a/dbms/programs/server/config.d/part_log.xml b/programs/server/config.d/part_log.xml similarity index 100% rename from dbms/programs/server/config.d/part_log.xml rename to programs/server/config.d/part_log.xml diff --git a/dbms/programs/server/config.d/path.xml b/programs/server/config.d/path.xml similarity index 100% rename from dbms/programs/server/config.d/path.xml rename to programs/server/config.d/path.xml diff --git a/dbms/programs/server/config.d/query_masking_rules.xml b/programs/server/config.d/query_masking_rules.xml similarity index 100% rename from dbms/programs/server/config.d/query_masking_rules.xml rename to programs/server/config.d/query_masking_rules.xml diff --git a/dbms/programs/server/config.d/text_log.xml b/programs/server/config.d/text_log.xml similarity index 100% rename from dbms/programs/server/config.d/text_log.xml rename to programs/server/config.d/text_log.xml diff --git a/dbms/programs/server/config.d/tls.xml.disabled b/programs/server/config.d/tls.xml.disabled similarity index 100% rename from dbms/programs/server/config.d/tls.xml.disabled rename to programs/server/config.d/tls.xml.disabled diff --git a/dbms/programs/server/config.d/zookeeper.xml b/programs/server/config.d/zookeeper.xml similarity index 100% rename from dbms/programs/server/config.d/zookeeper.xml rename to programs/server/config.d/zookeeper.xml diff --git a/dbms/programs/server/config.xml b/programs/server/config.xml similarity index 100% rename from dbms/programs/server/config.xml rename to programs/server/config.xml diff --git a/dbms/programs/server/data/.gitignore b/programs/server/data/.gitignore similarity index 100% rename from dbms/programs/server/data/.gitignore rename to programs/server/data/.gitignore diff --git a/dbms/programs/server/data/default/.gitignore b/programs/server/data/default/.gitignore similarity index 100% rename from dbms/programs/server/data/default/.gitignore rename to programs/server/data/default/.gitignore diff --git a/dbms/programs/server/metadata/default/.gitignore b/programs/server/metadata/default/.gitignore similarity index 100% rename from dbms/programs/server/metadata/default/.gitignore rename to programs/server/metadata/default/.gitignore diff --git a/dbms/programs/server/users.d/allow_only_from_localhost.xml b/programs/server/users.d/allow_only_from_localhost.xml similarity index 100% rename from dbms/programs/server/users.d/allow_only_from_localhost.xml rename to programs/server/users.d/allow_only_from_localhost.xml diff --git a/dbms/programs/server/users.d/log_queries.xml b/programs/server/users.d/log_queries.xml similarity index 100% rename from dbms/programs/server/users.d/log_queries.xml rename to programs/server/users.d/log_queries.xml diff --git a/dbms/programs/server/users.d/readonly.xml b/programs/server/users.d/readonly.xml similarity index 100% rename from dbms/programs/server/users.d/readonly.xml rename to programs/server/users.d/readonly.xml diff --git a/dbms/programs/server/users.xml b/programs/server/users.xml similarity index 100% rename from dbms/programs/server/users.xml rename to programs/server/users.xml diff --git a/dbms/tests/.gitignore b/tests/.gitignore similarity index 100% rename from dbms/tests/.gitignore rename to tests/.gitignore diff --git a/dbms/tests/CMakeLists.txt b/tests/CMakeLists.txt similarity index 100% rename from dbms/tests/CMakeLists.txt rename to tests/CMakeLists.txt diff --git a/dbms/tests/CTestCustom.cmake b/tests/CTestCustom.cmake similarity index 100% rename from dbms/tests/CTestCustom.cmake rename to tests/CTestCustom.cmake diff --git a/dbms/tests/clickhouse-client.xml b/tests/clickhouse-client.xml similarity index 100% rename from dbms/tests/clickhouse-client.xml rename to tests/clickhouse-client.xml diff --git a/dbms/tests/clickhouse-test b/tests/clickhouse-test similarity index 100% rename from dbms/tests/clickhouse-test rename to tests/clickhouse-test diff --git a/dbms/tests/clickhouse-test-server b/tests/clickhouse-test-server similarity index 93% rename from dbms/tests/clickhouse-test-server rename to tests/clickhouse-test-server index 831fd05fd82..7195abbfde8 100755 --- a/dbms/tests/clickhouse-test-server +++ b/tests/clickhouse-test-server @@ -10,11 +10,11 @@ DATA_DIR=${DATA_DIR:=`mktemp -d /tmp/clickhouse.test..XXXXX`} DATA_DIR_PATTERN=${DATA_DIR_PATTERN:=/tmp/clickhouse} # path from config file, will be replaced to temporary LOG_DIR=${LOG_DIR:=$DATA_DIR/log} export CLICKHOUSE_BINARY_NAME=${CLICKHOUSE_BINARY_NAME:="clickhouse"} -( [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] || [ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir +( [ -x "$ROOT_DIR/programs/${CLICKHOUSE_BINARY_NAME}-server" ] || [ -x "$ROOT_DIR/programs/${CLICKHOUSE_BINARY_NAME}" ] ) && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir [ -d "$ROOT_DIR/build${BUILD_TYPE}" ] && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR/build${BUILD_TYPE}} BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} [ -x ${CLICKHOUSE_BINARY_NAME}-server" ] && [ -x ${CLICKHOUSE_BINARY_NAME}-client" ] && BIN_DIR= # Allow run in /usr/bin -( [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}" ] || [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY_NAME}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/dbms/programs/} +( [ -x "$BUILD_DIR/programs/${CLICKHOUSE_BINARY_NAME}" ] || [ -x "$BUILD_DIR/programs/${CLICKHOUSE_BINARY_NAME}-server" ] ) && BIN_DIR=${BIN_DIR:=$BUILD_DIR/programs/} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-server} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}" ] && CLICKHOUSE_SERVER=${CLICKHOUSE_SERVER:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME} server} [ -x "$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client" ] && CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:=$BIN_DIR/${CLICKHOUSE_BINARY_NAME}-client} @@ -91,7 +91,7 @@ fi VERSION=`$CLICKHOUSE_CLIENT --version-clean` # If run from compile dir - use in-place compile binary and headers -[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=${INTERNAL_COMPILER_BIN_ROOT:=$BUILD_DIR/dbms/programs/} --compiler_headers=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/dbms/programs/clang/headers/$VERSION/" +[ -n "$BIN_DIR" ] && INTERNAL_COMPILER_PARAMS="--compiler_executable_root=${INTERNAL_COMPILER_BIN_ROOT:=$BUILD_DIR/programs/} --compiler_headers=$BUILD_DIR/programs/clang/headers/$VERSION/ --compiler_headers_root=$BUILD_DIR/programs/clang/headers/$VERSION/" $GDB $CLICKHOUSE_SERVER --config-file=$CLICKHOUSE_CONFIG --log=$CLICKHOUSE_LOG $TEST_SERVER_PARAMS -- \ --http_port=$CLICKHOUSE_PORT_HTTP \ diff --git a/dbms/tests/client-test.xml b/tests/client-test.xml similarity index 100% rename from dbms/tests/client-test.xml rename to tests/client-test.xml diff --git a/dbms/tests/config/client_config.xml b/tests/config/client_config.xml similarity index 100% rename from dbms/tests/config/client_config.xml rename to tests/config/client_config.xml diff --git a/dbms/tests/config/decimals_dictionary.xml b/tests/config/decimals_dictionary.xml similarity index 100% rename from dbms/tests/config/decimals_dictionary.xml rename to tests/config/decimals_dictionary.xml diff --git a/dbms/tests/config/dhparam.pem b/tests/config/dhparam.pem similarity index 100% rename from dbms/tests/config/dhparam.pem rename to tests/config/dhparam.pem diff --git a/dbms/tests/config/disks.xml b/tests/config/disks.xml similarity index 100% rename from dbms/tests/config/disks.xml rename to tests/config/disks.xml diff --git a/dbms/tests/config/ints_dictionary.xml b/tests/config/ints_dictionary.xml similarity index 100% rename from dbms/tests/config/ints_dictionary.xml rename to tests/config/ints_dictionary.xml diff --git a/dbms/tests/config/listen.xml b/tests/config/listen.xml similarity index 100% rename from dbms/tests/config/listen.xml rename to tests/config/listen.xml diff --git a/dbms/tests/config/log_queries.xml b/tests/config/log_queries.xml similarity index 100% rename from dbms/tests/config/log_queries.xml rename to tests/config/log_queries.xml diff --git a/dbms/tests/config/macros.xml b/tests/config/macros.xml similarity index 100% rename from dbms/tests/config/macros.xml rename to tests/config/macros.xml diff --git a/dbms/tests/config/metric_log.xml b/tests/config/metric_log.xml similarity index 100% rename from dbms/tests/config/metric_log.xml rename to tests/config/metric_log.xml diff --git a/dbms/tests/config/part_log.xml b/tests/config/part_log.xml similarity index 100% rename from dbms/tests/config/part_log.xml rename to tests/config/part_log.xml diff --git a/dbms/tests/config/query_masking_rules.xml b/tests/config/query_masking_rules.xml similarity index 100% rename from dbms/tests/config/query_masking_rules.xml rename to tests/config/query_masking_rules.xml diff --git a/dbms/tests/config/readonly.xml b/tests/config/readonly.xml similarity index 100% rename from dbms/tests/config/readonly.xml rename to tests/config/readonly.xml diff --git a/dbms/tests/config/secure_ports.xml b/tests/config/secure_ports.xml similarity index 100% rename from dbms/tests/config/secure_ports.xml rename to tests/config/secure_ports.xml diff --git a/dbms/tests/config/server.crt b/tests/config/server.crt similarity index 100% rename from dbms/tests/config/server.crt rename to tests/config/server.crt diff --git a/dbms/tests/config/server.key b/tests/config/server.key similarity index 100% rename from dbms/tests/config/server.key rename to tests/config/server.key diff --git a/dbms/tests/config/strings_dictionary.xml b/tests/config/strings_dictionary.xml similarity index 100% rename from dbms/tests/config/strings_dictionary.xml rename to tests/config/strings_dictionary.xml diff --git a/dbms/tests/config/text_log.xml b/tests/config/text_log.xml similarity index 100% rename from dbms/tests/config/text_log.xml rename to tests/config/text_log.xml diff --git a/dbms/tests/config/zookeeper.xml b/tests/config/zookeeper.xml similarity index 100% rename from dbms/tests/config/zookeeper.xml rename to tests/config/zookeeper.xml diff --git a/dbms/tests/decimals_dictionary.xml b/tests/decimals_dictionary.xml similarity index 100% rename from dbms/tests/decimals_dictionary.xml rename to tests/decimals_dictionary.xml diff --git a/dbms/tests/external_models/catboost/data/build_catboost.sh b/tests/external_models/catboost/data/build_catboost.sh similarity index 100% rename from dbms/tests/external_models/catboost/data/build_catboost.sh rename to tests/external_models/catboost/data/build_catboost.sh diff --git a/dbms/tests/external_models/catboost/helpers/__init__.py b/tests/external_models/catboost/helpers/__init__.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/__init__.py rename to tests/external_models/catboost/helpers/__init__.py diff --git a/dbms/tests/external_models/catboost/helpers/client.py b/tests/external_models/catboost/helpers/client.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/client.py rename to tests/external_models/catboost/helpers/client.py diff --git a/dbms/tests/external_models/catboost/helpers/generate.py b/tests/external_models/catboost/helpers/generate.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/generate.py rename to tests/external_models/catboost/helpers/generate.py diff --git a/dbms/tests/external_models/catboost/helpers/server.py b/tests/external_models/catboost/helpers/server.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/server.py rename to tests/external_models/catboost/helpers/server.py diff --git a/dbms/tests/external_models/catboost/helpers/server_with_models.py b/tests/external_models/catboost/helpers/server_with_models.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/server_with_models.py rename to tests/external_models/catboost/helpers/server_with_models.py diff --git a/dbms/tests/external_models/catboost/helpers/table.py b/tests/external_models/catboost/helpers/table.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/table.py rename to tests/external_models/catboost/helpers/table.py diff --git a/dbms/tests/external_models/catboost/helpers/train.py b/tests/external_models/catboost/helpers/train.py similarity index 100% rename from dbms/tests/external_models/catboost/helpers/train.py rename to tests/external_models/catboost/helpers/train.py diff --git a/dbms/tests/external_models/catboost/pytest.ini b/tests/external_models/catboost/pytest.ini similarity index 100% rename from dbms/tests/external_models/catboost/pytest.ini rename to tests/external_models/catboost/pytest.ini diff --git a/dbms/tests/external_models/catboost/test_apply_catboost_model/test.py b/tests/external_models/catboost/test_apply_catboost_model/test.py similarity index 100% rename from dbms/tests/external_models/catboost/test_apply_catboost_model/test.py rename to tests/external_models/catboost/test_apply_catboost_model/test.py diff --git a/dbms/tests/instructions/clang-tidy.txt b/tests/instructions/clang-tidy.txt similarity index 100% rename from dbms/tests/instructions/clang-tidy.txt rename to tests/instructions/clang-tidy.txt diff --git a/dbms/tests/instructions/coverity.txt b/tests/instructions/coverity.txt similarity index 100% rename from dbms/tests/instructions/coverity.txt rename to tests/instructions/coverity.txt diff --git a/dbms/tests/instructions/cppcheck.txt b/tests/instructions/cppcheck.txt similarity index 100% rename from dbms/tests/instructions/cppcheck.txt rename to tests/instructions/cppcheck.txt diff --git a/dbms/tests/instructions/developer_instruction_en.md b/tests/instructions/developer_instruction_en.md similarity index 100% rename from dbms/tests/instructions/developer_instruction_en.md rename to tests/instructions/developer_instruction_en.md diff --git a/dbms/tests/instructions/developer_instruction_ru.md b/tests/instructions/developer_instruction_ru.md similarity index 100% rename from dbms/tests/instructions/developer_instruction_ru.md rename to tests/instructions/developer_instruction_ru.md diff --git a/dbms/tests/instructions/easy_tasks_sorted_ru.md b/tests/instructions/easy_tasks_sorted_ru.md similarity index 99% rename from dbms/tests/instructions/easy_tasks_sorted_ru.md rename to tests/instructions/easy_tasks_sorted_ru.md index cb94fa1885f..2dd60f97db3 100644 --- a/dbms/tests/instructions/easy_tasks_sorted_ru.md +++ b/tests/instructions/easy_tasks_sorted_ru.md @@ -2,7 +2,7 @@ ## Недостатки юзабилити, если пользователь не может прочитать конфиг клиента. -`dbms/programs/client/Client.cpp` +`programs/client/Client.cpp` Делаем `chmod 000 /etc/clickhouse-client/config.xml` и смотрим, что получится. diff --git a/dbms/tests/instructions/heap-profiler.txt b/tests/instructions/heap-profiler.txt similarity index 92% rename from dbms/tests/instructions/heap-profiler.txt rename to tests/instructions/heap-profiler.txt index dd188f751a5..3c35e9cf518 100644 --- a/dbms/tests/instructions/heap-profiler.txt +++ b/tests/instructions/heap-profiler.txt @@ -1,7 +1,7 @@ Build clickhouse without tcmalloc. cmake -D ENABLE_TCMALLOC=0 Copy clickhouse binary to your server. -scp dbms/programs/clickhouse server:~ +scp programs/clickhouse server:~ ssh to your server diff --git a/dbms/tests/instructions/jemalloc_memory_profile.txt b/tests/instructions/jemalloc_memory_profile.txt similarity index 100% rename from dbms/tests/instructions/jemalloc_memory_profile.txt rename to tests/instructions/jemalloc_memory_profile.txt diff --git a/dbms/tests/instructions/kafka.txt b/tests/instructions/kafka.txt similarity index 100% rename from dbms/tests/instructions/kafka.txt rename to tests/instructions/kafka.txt diff --git a/dbms/tests/instructions/ninja_trace.txt b/tests/instructions/ninja_trace.txt similarity index 100% rename from dbms/tests/instructions/ninja_trace.txt rename to tests/instructions/ninja_trace.txt diff --git a/dbms/tests/instructions/pvs-studio.txt b/tests/instructions/pvs-studio.txt similarity index 100% rename from dbms/tests/instructions/pvs-studio.txt rename to tests/instructions/pvs-studio.txt diff --git a/dbms/tests/instructions/sanitizers.md b/tests/instructions/sanitizers.md similarity index 96% rename from dbms/tests/instructions/sanitizers.md rename to tests/instructions/sanitizers.md index 45e1304e2a1..b501f946b46 100644 --- a/dbms/tests/instructions/sanitizers.md +++ b/tests/instructions/sanitizers.md @@ -16,7 +16,7 @@ ninja ## Copy binary to your server ``` -scp ./dbms/programs/clickhouse yourserver:~/clickhouse-asan +scp ./programs/clickhouse yourserver:~/clickhouse-asan ``` ## Start ClickHouse and run tests diff --git a/dbms/tests/instructions/syntax.txt b/tests/instructions/syntax.txt similarity index 100% rename from dbms/tests/instructions/syntax.txt rename to tests/instructions/syntax.txt diff --git a/dbms/tests/instructions/tscancode.txt b/tests/instructions/tscancode.txt similarity index 100% rename from dbms/tests/instructions/tscancode.txt rename to tests/instructions/tscancode.txt diff --git a/dbms/tests/integration/.dockerignore b/tests/integration/.dockerignore similarity index 100% rename from dbms/tests/integration/.dockerignore rename to tests/integration/.dockerignore diff --git a/dbms/tests/integration/.gitignore b/tests/integration/.gitignore similarity index 100% rename from dbms/tests/integration/.gitignore rename to tests/integration/.gitignore diff --git a/dbms/tests/integration/CMakeLists.txt b/tests/integration/CMakeLists.txt similarity index 69% rename from dbms/tests/integration/CMakeLists.txt rename to tests/integration/CMakeLists.txt index 54d5f5e727a..8280464051f 100644 --- a/dbms/tests/integration/CMakeLists.txt +++ b/tests/integration/CMakeLists.txt @@ -1,7 +1,7 @@ if(CLICKHOUSE_SPLIT_BINARY) - set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse-server CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse-client) + set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse-server CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse-client) else() - set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse) + set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse) endif() find_program(DOCKER_CMD docker) @@ -12,13 +12,13 @@ find_program(SUDO_CMD sudo) # will mount only one binary to docker container - build with .so cant work if(MAKE_STATIC_LIBRARIES AND DOCKER_CMD) if(INTEGRATION_USE_RUNNER AND SUDO_CMD) - add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/dbms/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/dbms/programs/server/) + add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/programs/server/) message(STATUS "Using tests in docker with runner SUDO=${SUDO_CMD}; DOCKER=${DOCKER_CMD};") endif() if(NOT INTEGRATION_USE_RUNNER AND DOCKER_COMPOSE_CMD AND PYTEST_CMD) # To run one test with debug: # cmake . -DPYTEST_OPT="-ss;test_cluster_copier" - add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/dbms/programs/server/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) + add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/programs/server/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) message(STATUS "Using tests in docker DOCKER=${DOCKER_CMD}; DOCKER_COMPOSE=${DOCKER_COMPOSE_CMD}; PYTEST=${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}") endif() endif() diff --git a/dbms/tests/integration/README.md b/tests/integration/README.md similarity index 89% rename from dbms/tests/integration/README.md rename to tests/integration/README.md index 64d8b29e35a..6ac224c449f 100644 --- a/dbms/tests/integration/README.md +++ b/tests/integration/README.md @@ -44,11 +44,11 @@ Notes: You can run tests via `./runner` script and pass pytest arguments as last arg: ``` -$ ./runner --binary $HOME/ClickHouse/dbms/programs/clickhouse --bridge-binary $HOME/ClickHouse/dbms/programs/clickhouse-odbc-bridge --configs-dir $HOME/ClickHouse/dbms/programs/server/ 'test_odbc_interaction -ss' +$ ./runner --binary $HOME/ClickHouse/programs/clickhouse --bridge-binary $HOME/ClickHouse/programs/clickhouse-odbc-bridge --configs-dir $HOME/ClickHouse/programs/server/ 'test_odbc_interaction -ss' Start tests ============================= test session starts ============================== platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 -rootdir: /ClickHouse/dbms/tests/integration, inifile: pytest.ini +rootdir: /ClickHouse/tests/integration, inifile: pytest.ini collected 6 items test_odbc_interaction/test.py Removing network clickhouse_default @@ -68,15 +68,15 @@ Removing network roottestodbcinteraction_default Path to binary and configs maybe specified via env variables: ``` -$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/dbms/programs/server/ -$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/dbms/programs/clickhouse -$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/dbms/programs/clickhouse-odbc-bridge +$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/ +$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse +$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge $ ./runner 'test_odbc_interaction' $ # or ./runner '-v -ss' Start tests ============================= test session starts ============================== platform linux2 -- Python 2.7.15rc1, pytest-4.0.0, py-1.7.0, pluggy-0.8.0 -rootdir: /ClickHouse/dbms/tests/integration, inifile: pytest.ini +rootdir: /ClickHouse/tests/integration, inifile: pytest.ini collected 6 items test_odbc_interaction/test.py ...... [100%] @@ -94,7 +94,7 @@ cd docker/test/integration docker build -t yandex/clickhouse-integration-test . ``` -The helper container used by the `runner` script is in `dbms/tests/integration/image/Dockerfile`. +The helper container used by the `runner` script is in `tests/integration/image/Dockerfile`. ### Adding new tests diff --git a/dbms/tests/integration/conftest.py b/tests/integration/conftest.py similarity index 100% rename from dbms/tests/integration/conftest.py rename to tests/integration/conftest.py diff --git a/dbms/tests/integration/helpers/0_common_instance_config.xml b/tests/integration/helpers/0_common_instance_config.xml similarity index 100% rename from dbms/tests/integration/helpers/0_common_instance_config.xml rename to tests/integration/helpers/0_common_instance_config.xml diff --git a/dbms/tests/integration/helpers/__init__.py b/tests/integration/helpers/__init__.py similarity index 100% rename from dbms/tests/integration/helpers/__init__.py rename to tests/integration/helpers/__init__.py diff --git a/dbms/tests/integration/helpers/client.py b/tests/integration/helpers/client.py similarity index 100% rename from dbms/tests/integration/helpers/client.py rename to tests/integration/helpers/client.py diff --git a/dbms/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py similarity index 100% rename from dbms/tests/integration/helpers/cluster.py rename to tests/integration/helpers/cluster.py diff --git a/dbms/tests/integration/helpers/docker_compose_hdfs.yml b/tests/integration/helpers/docker_compose_hdfs.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_hdfs.yml rename to tests/integration/helpers/docker_compose_hdfs.yml diff --git a/dbms/tests/integration/helpers/docker_compose_kafka.yml b/tests/integration/helpers/docker_compose_kafka.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_kafka.yml rename to tests/integration/helpers/docker_compose_kafka.yml diff --git a/dbms/tests/integration/helpers/docker_compose_minio.yml b/tests/integration/helpers/docker_compose_minio.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_minio.yml rename to tests/integration/helpers/docker_compose_minio.yml diff --git a/dbms/tests/integration/helpers/docker_compose_mongo.yml b/tests/integration/helpers/docker_compose_mongo.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_mongo.yml rename to tests/integration/helpers/docker_compose_mongo.yml diff --git a/dbms/tests/integration/helpers/docker_compose_mysql.yml b/tests/integration/helpers/docker_compose_mysql.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_mysql.yml rename to tests/integration/helpers/docker_compose_mysql.yml diff --git a/dbms/tests/integration/helpers/docker_compose_net.yml b/tests/integration/helpers/docker_compose_net.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_net.yml rename to tests/integration/helpers/docker_compose_net.yml diff --git a/dbms/tests/integration/helpers/docker_compose_postgres.yml b/tests/integration/helpers/docker_compose_postgres.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_postgres.yml rename to tests/integration/helpers/docker_compose_postgres.yml diff --git a/dbms/tests/integration/helpers/docker_compose_redis.yml b/tests/integration/helpers/docker_compose_redis.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_redis.yml rename to tests/integration/helpers/docker_compose_redis.yml diff --git a/dbms/tests/integration/helpers/docker_compose_zookeeper.yml b/tests/integration/helpers/docker_compose_zookeeper.yml similarity index 100% rename from dbms/tests/integration/helpers/docker_compose_zookeeper.yml rename to tests/integration/helpers/docker_compose_zookeeper.yml diff --git a/dbms/tests/integration/helpers/hdfs_api.py b/tests/integration/helpers/hdfs_api.py similarity index 100% rename from dbms/tests/integration/helpers/hdfs_api.py rename to tests/integration/helpers/hdfs_api.py diff --git a/dbms/tests/integration/helpers/helper_container/Dockerfile b/tests/integration/helpers/helper_container/Dockerfile similarity index 100% rename from dbms/tests/integration/helpers/helper_container/Dockerfile rename to tests/integration/helpers/helper_container/Dockerfile diff --git a/dbms/tests/integration/helpers/network.py b/tests/integration/helpers/network.py similarity index 100% rename from dbms/tests/integration/helpers/network.py rename to tests/integration/helpers/network.py diff --git a/dbms/tests/integration/helpers/test_tools.py b/tests/integration/helpers/test_tools.py similarity index 100% rename from dbms/tests/integration/helpers/test_tools.py rename to tests/integration/helpers/test_tools.py diff --git a/dbms/tests/integration/helpers/zookeeper_config.xml b/tests/integration/helpers/zookeeper_config.xml similarity index 100% rename from dbms/tests/integration/helpers/zookeeper_config.xml rename to tests/integration/helpers/zookeeper_config.xml diff --git a/dbms/tests/integration/image/Dockerfile b/tests/integration/image/Dockerfile similarity index 100% rename from dbms/tests/integration/image/Dockerfile rename to tests/integration/image/Dockerfile diff --git a/dbms/tests/integration/image/dockerd-entrypoint.sh b/tests/integration/image/dockerd-entrypoint.sh similarity index 92% rename from dbms/tests/integration/image/dockerd-entrypoint.sh rename to tests/integration/image/dockerd-entrypoint.sh index 89ccd78823d..8b0682396f8 100755 --- a/dbms/tests/integration/image/dockerd-entrypoint.sh +++ b/tests/integration/image/dockerd-entrypoint.sh @@ -22,5 +22,5 @@ export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge -cd /ClickHouse/dbms/tests/integration -exec "$@" \ No newline at end of file +cd /ClickHouse/tests/integration +exec "$@" diff --git a/dbms/tests/integration/image/modprobe.sh b/tests/integration/image/modprobe.sh similarity index 100% rename from dbms/tests/integration/image/modprobe.sh rename to tests/integration/image/modprobe.sh diff --git a/dbms/tests/integration/pytest.ini b/tests/integration/pytest.ini similarity index 100% rename from dbms/tests/integration/pytest.ini rename to tests/integration/pytest.ini diff --git a/dbms/tests/integration/runner b/tests/integration/runner similarity index 98% rename from dbms/tests/integration/runner rename to tests/integration/runner index cd148d1fe72..8cd37e0386c 100755 --- a/dbms/tests/integration/runner +++ b/tests/integration/runner @@ -57,7 +57,7 @@ if __name__ == "__main__": parser.add_argument( "--configs-dir", - default=os.environ.get("CLICKHOUSE_TESTS_BASE_CONFIG_DIR", os.path.join(DEFAULT_CLICKHOUSE_ROOT, "dbms/programs/server")), + default=os.environ.get("CLICKHOUSE_TESTS_BASE_CONFIG_DIR", os.path.join(DEFAULT_CLICKHOUSE_ROOT, "programs/server")), help="Path to clickhouse configs directory") parser.add_argument( diff --git a/dbms/tests/integration/test_adaptive_granularity/__init__.py b/tests/integration/test_adaptive_granularity/__init__.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/__init__.py rename to tests/integration/test_adaptive_granularity/__init__.py diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/log_conf.xml b/tests/integration/test_adaptive_granularity/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/log_conf.xml rename to tests/integration/test_adaptive_granularity/configs/log_conf.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml b/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml rename to tests/integration/test_adaptive_granularity/configs/merge_tree_settings.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/configs/remote_servers.xml b/tests/integration/test_adaptive_granularity/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/configs/remote_servers.xml rename to tests/integration/test_adaptive_granularity/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_adaptive_granularity/test.py b/tests/integration/test_adaptive_granularity/test.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity/test.py rename to tests/integration/test_adaptive_granularity/test.py diff --git a/dbms/tests/integration/test_adaptive_granularity_replicated/__init__.py b/tests/integration/test_adaptive_granularity_replicated/__init__.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity_replicated/__init__.py rename to tests/integration/test_adaptive_granularity_replicated/__init__.py diff --git a/dbms/tests/integration/test_adaptive_granularity_replicated/test.py b/tests/integration/test_adaptive_granularity_replicated/test.py similarity index 100% rename from dbms/tests/integration/test_adaptive_granularity_replicated/test.py rename to tests/integration/test_adaptive_granularity_replicated/test.py diff --git a/dbms/tests/integration/test_aggregation_memory_efficient/__init__.py b/tests/integration/test_aggregation_memory_efficient/__init__.py similarity index 100% rename from dbms/tests/integration/test_aggregation_memory_efficient/__init__.py rename to tests/integration/test_aggregation_memory_efficient/__init__.py diff --git a/dbms/tests/integration/test_aggregation_memory_efficient/test.py b/tests/integration/test_aggregation_memory_efficient/test.py similarity index 100% rename from dbms/tests/integration/test_aggregation_memory_efficient/test.py rename to tests/integration/test_aggregation_memory_efficient/test.py diff --git a/dbms/tests/integration/test_allowed_client_hosts/__init__.py b/tests/integration/test_allowed_client_hosts/__init__.py similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/__init__.py rename to tests/integration/test_allowed_client_hosts/__init__.py diff --git a/dbms/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml b/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/configs/users.d/network.xml rename to tests/integration/test_allowed_client_hosts/configs/users.d/network.xml diff --git a/dbms/tests/integration/test_allowed_client_hosts/configs/users.xml b/tests/integration/test_allowed_client_hosts/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/configs/users.xml rename to tests/integration/test_allowed_client_hosts/configs/users.xml diff --git a/dbms/tests/integration/test_allowed_client_hosts/test.py b/tests/integration/test_allowed_client_hosts/test.py similarity index 100% rename from dbms/tests/integration/test_allowed_client_hosts/test.py rename to tests/integration/test_allowed_client_hosts/test.py diff --git a/dbms/tests/integration/test_allowed_url_from_config/__init__.py b/tests/integration/test_allowed_url_from_config/__init__.py similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/__init__.py rename to tests/integration/test_allowed_url_from_config/__init__.py diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml b/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml rename to tests/integration/test_allowed_url_from_config/configs/config_for_redirect.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml b/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml rename to tests/integration/test_allowed_url_from_config/configs/config_for_remote.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_only_primary_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_with_only_regexp_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml b/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml rename to tests/integration/test_allowed_url_from_config/configs/config_without_allowed_hosts.xml diff --git a/dbms/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py similarity index 100% rename from dbms/tests/integration/test_allowed_url_from_config/test.py rename to tests/integration/test_allowed_url_from_config/test.py diff --git a/dbms/tests/integration/test_atomic_drop_table/__init__.py b/tests/integration/test_atomic_drop_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/__init__.py rename to tests/integration/test_atomic_drop_table/__init__.py diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml b/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_atomic_drop_table/configs/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml b/tests/integration/test_atomic_drop_table/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/configs/remote_servers.xml rename to tests/integration/test_atomic_drop_table/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_atomic_drop_table/test.py b/tests/integration/test_atomic_drop_table/test.py similarity index 100% rename from dbms/tests/integration/test_atomic_drop_table/test.py rename to tests/integration/test_atomic_drop_table/test.py diff --git a/dbms/tests/integration/test_authentication/__init__.py b/tests/integration/test_authentication/__init__.py similarity index 100% rename from dbms/tests/integration/test_authentication/__init__.py rename to tests/integration/test_authentication/__init__.py diff --git a/dbms/tests/integration/test_authentication/test.py b/tests/integration/test_authentication/test.py similarity index 100% rename from dbms/tests/integration/test_authentication/test.py rename to tests/integration/test_authentication/test.py diff --git a/dbms/tests/integration/test_backup_restore/__init__.py b/tests/integration/test_backup_restore/__init__.py similarity index 100% rename from dbms/tests/integration/test_backup_restore/__init__.py rename to tests/integration/test_backup_restore/__init__.py diff --git a/dbms/tests/integration/test_backup_restore/test.py b/tests/integration/test_backup_restore/test.py similarity index 100% rename from dbms/tests/integration/test_backup_restore/test.py rename to tests/integration/test_backup_restore/test.py diff --git a/dbms/tests/integration/test_backward_compatability/__init__.py b/tests/integration/test_backward_compatability/__init__.py similarity index 100% rename from dbms/tests/integration/test_backward_compatability/__init__.py rename to tests/integration/test_backward_compatability/__init__.py diff --git a/dbms/tests/integration/test_backward_compatability/test.py b/tests/integration/test_backward_compatability/test.py similarity index 100% rename from dbms/tests/integration/test_backward_compatability/test.py rename to tests/integration/test_backward_compatability/test.py diff --git a/dbms/tests/integration/test_block_structure_mismatch/__init__.py b/tests/integration/test_block_structure_mismatch/__init__.py similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/__init__.py rename to tests/integration/test_block_structure_mismatch/__init__.py diff --git a/dbms/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml b/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/configs/remote_servers.xml rename to tests/integration/test_block_structure_mismatch/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_block_structure_mismatch/test.py b/tests/integration/test_block_structure_mismatch/test.py similarity index 100% rename from dbms/tests/integration/test_block_structure_mismatch/test.py rename to tests/integration/test_block_structure_mismatch/test.py diff --git a/dbms/tests/integration/test_check_table/__init__.py b/tests/integration/test_check_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_check_table/__init__.py rename to tests/integration/test_check_table/__init__.py diff --git a/dbms/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py similarity index 100% rename from dbms/tests/integration/test_check_table/test.py rename to tests/integration/test_check_table/test.py diff --git a/dbms/tests/integration/test_cluster_all_replicas/__init__.py b/tests/integration/test_cluster_all_replicas/__init__.py similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/__init__.py rename to tests/integration/test_cluster_all_replicas/__init__.py diff --git a/dbms/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml b/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/configs/remote_servers.xml rename to tests/integration/test_cluster_all_replicas/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_cluster_all_replicas/test.py b/tests/integration/test_cluster_all_replicas/test.py similarity index 100% rename from dbms/tests/integration/test_cluster_all_replicas/test.py rename to tests/integration/test_cluster_all_replicas/test.py diff --git a/dbms/tests/integration/test_cluster_copier/__init__.py b/tests/integration/test_cluster_copier/__init__.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/__init__.py rename to tests/integration/test_cluster_copier/__init__.py diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml b/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/clusters.xml rename to tests/integration/test_cluster_copier/configs/conf.d/clusters.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml b/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/ddl.xml rename to tests/integration/test_cluster_copier/configs/conf.d/ddl.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml b/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/conf.d/query_log.xml rename to tests/integration/test_cluster_copier/configs/conf.d/query_log.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/config-copier.xml b/tests/integration/test_cluster_copier/configs/config-copier.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/config-copier.xml rename to tests/integration/test_cluster_copier/configs/config-copier.xml diff --git a/dbms/tests/integration/test_cluster_copier/configs/users.xml b/tests/integration/test_cluster_copier/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/configs/users.xml rename to tests/integration/test_cluster_copier/configs/users.xml diff --git a/dbms/tests/integration/test_cluster_copier/task0_description.xml b/tests/integration/test_cluster_copier/task0_description.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task0_description.xml rename to tests/integration/test_cluster_copier/task0_description.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_month_to_week_description.xml b/tests/integration/test_cluster_copier/task_month_to_week_description.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_month_to_week_description.xml rename to tests/integration/test_cluster_copier/task_month_to_week_description.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_no_arg.xml b/tests/integration/test_cluster_copier/task_no_arg.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_no_arg.xml rename to tests/integration/test_cluster_copier/task_no_arg.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_no_index.xml b/tests/integration/test_cluster_copier/task_no_index.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_no_index.xml rename to tests/integration/test_cluster_copier/task_no_index.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_test_block_size.xml b/tests/integration/test_cluster_copier/task_test_block_size.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_test_block_size.xml rename to tests/integration/test_cluster_copier/task_test_block_size.xml diff --git a/dbms/tests/integration/test_cluster_copier/task_trivial.xml b/tests/integration/test_cluster_copier/task_trivial.xml similarity index 100% rename from dbms/tests/integration/test_cluster_copier/task_trivial.xml rename to tests/integration/test_cluster_copier/task_trivial.xml diff --git a/dbms/tests/integration/test_cluster_copier/test.py b/tests/integration/test_cluster_copier/test.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/test.py rename to tests/integration/test_cluster_copier/test.py diff --git a/dbms/tests/integration/test_cluster_copier/trivial_test.py b/tests/integration/test_cluster_copier/trivial_test.py similarity index 100% rename from dbms/tests/integration/test_cluster_copier/trivial_test.py rename to tests/integration/test_cluster_copier/trivial_test.py diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py b/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/__init__.py rename to tests/integration/test_concurrent_queries_for_user_restriction/__init__.py diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml b/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml rename to tests/integration/test_concurrent_queries_for_user_restriction/configs/user_restrictions.xml diff --git a/dbms/tests/integration/test_concurrent_queries_for_user_restriction/test.py b/tests/integration/test_concurrent_queries_for_user_restriction/test.py similarity index 100% rename from dbms/tests/integration/test_concurrent_queries_for_user_restriction/test.py rename to tests/integration/test_concurrent_queries_for_user_restriction/test.py diff --git a/dbms/tests/integration/test_config_corresponding_root/__init__.py b/tests/integration/test_config_corresponding_root/__init__.py similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/__init__.py rename to tests/integration/test_config_corresponding_root/__init__.py diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml b/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/configs/config.d/bad.xml rename to tests/integration/test_config_corresponding_root/configs/config.d/bad.xml diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/config.xml b/tests/integration/test_config_corresponding_root/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/configs/config.xml rename to tests/integration/test_config_corresponding_root/configs/config.xml diff --git a/dbms/tests/integration/test_config_corresponding_root/configs/users.xml b/tests/integration/test_config_corresponding_root/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/configs/users.xml rename to tests/integration/test_config_corresponding_root/configs/users.xml diff --git a/dbms/tests/integration/test_config_corresponding_root/test.py b/tests/integration/test_config_corresponding_root/test.py similarity index 100% rename from dbms/tests/integration/test_config_corresponding_root/test.py rename to tests/integration/test_config_corresponding_root/test.py diff --git a/dbms/tests/integration/test_config_substitutions/__init__.py b/tests/integration/test_config_substitutions/__init__.py similarity index 100% rename from dbms/tests/integration/test_config_substitutions/__init__.py rename to tests/integration/test_config_substitutions/__init__.py diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_allow_databases.xml b/tests/integration/test_config_substitutions/configs/config_allow_databases.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_allow_databases.xml rename to tests/integration/test_config_substitutions/configs/config_allow_databases.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_env.xml b/tests/integration/test_config_substitutions/configs/config_env.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_env.xml rename to tests/integration/test_config_substitutions/configs/config_env.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_incl.xml b/tests/integration/test_config_substitutions/configs/config_incl.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_incl.xml rename to tests/integration/test_config_substitutions/configs/config_incl.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_include_from_env.xml b/tests/integration/test_config_substitutions/configs/config_include_from_env.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_include_from_env.xml rename to tests/integration/test_config_substitutions/configs/config_include_from_env.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml b/tests/integration/test_config_substitutions/configs/config_no_substs.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml rename to tests/integration/test_config_substitutions/configs/config_no_substs.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_zk.xml b/tests/integration/test_config_substitutions/configs/config_zk.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/config_zk.xml rename to tests/integration/test_config_substitutions/configs/config_zk.xml diff --git a/dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml b/tests/integration/test_config_substitutions/configs/max_query_size.xml similarity index 100% rename from dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml rename to tests/integration/test_config_substitutions/configs/max_query_size.xml diff --git a/dbms/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py similarity index 100% rename from dbms/tests/integration/test_config_substitutions/test.py rename to tests/integration/test_config_substitutions/test.py diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/__init__.py b/tests/integration/test_consistant_parts_after_move_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/__init__.py rename to tests/integration/test_consistant_parts_after_move_partition/__init__.py diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml b/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml rename to tests/integration/test_consistant_parts_after_move_partition/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_consistant_parts_after_move_partition/test.py b/tests/integration/test_consistant_parts_after_move_partition/test.py similarity index 100% rename from dbms/tests/integration/test_consistant_parts_after_move_partition/test.py rename to tests/integration/test_consistant_parts_after_move_partition/test.py diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/__init__.py b/tests/integration/test_consistent_parts_after_clone_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/__init__.py rename to tests/integration/test_consistent_parts_after_clone_replica/__init__.py diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml b/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml rename to tests/integration/test_consistent_parts_after_clone_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_consistent_parts_after_clone_replica/test.py b/tests/integration/test_consistent_parts_after_clone_replica/test.py similarity index 100% rename from dbms/tests/integration/test_consistent_parts_after_clone_replica/test.py rename to tests/integration/test_consistent_parts_after_clone_replica/test.py diff --git a/dbms/tests/integration/test_cross_replication/__init__.py b/tests/integration/test_cross_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_cross_replication/__init__.py rename to tests/integration/test_cross_replication/__init__.py diff --git a/dbms/tests/integration/test_cross_replication/configs/remote_servers.xml b/tests/integration/test_cross_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_cross_replication/configs/remote_servers.xml rename to tests/integration/test_cross_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_cross_replication/test.py b/tests/integration/test_cross_replication/test.py similarity index 100% rename from dbms/tests/integration/test_cross_replication/test.py rename to tests/integration/test_cross_replication/test.py diff --git a/dbms/tests/integration/test_delayed_replica_failover/__init__.py b/tests/integration/test_delayed_replica_failover/__init__.py similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/__init__.py rename to tests/integration/test_delayed_replica_failover/__init__.py diff --git a/dbms/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml b/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/configs/remote_servers.xml rename to tests/integration/test_delayed_replica_failover/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py similarity index 100% rename from dbms/tests/integration/test_delayed_replica_failover/test.py rename to tests/integration/test_delayed_replica_failover/test.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py b/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/dictionaries/.gitkeep diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml b/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml rename to tests/integration/test_dictionaries_all_layouts_and_sources/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py b/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/dictionary.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py b/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/external_sources.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem b/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem rename to tests/integration/test_dictionaries_all_layouts_and_sources/fake_cert.pem diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py b/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/http_server.py diff --git a/dbms/tests/integration/test_dictionaries_all_layouts_and_sources/test.py b/tests/integration/test_dictionaries_all_layouts_and_sources/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_all_layouts_and_sources/test.py rename to tests/integration/test_dictionaries_all_layouts_and_sources/test.py diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py b/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/__init__.py rename to tests/integration/test_dictionaries_complex_key_cache_string/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/dictionaries/complex_key_cache_string.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml b/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml rename to tests/integration/test_dictionaries_complex_key_cache_string/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_complex_key_cache_string/test.py b/tests/integration/test_dictionaries_complex_key_cache_string/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_complex_key_cache_string/test.py rename to tests/integration/test_dictionaries_complex_key_cache_string/test.py diff --git a/dbms/tests/integration/test_dictionaries_ddl/__init__.py b/tests/integration/test_dictionaries_ddl/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/__init__.py rename to tests/integration/test_dictionaries_ddl/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/config.xml b/tests/integration/test_dictionaries_ddl/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/config.xml rename to tests/integration/test_dictionaries_ddl/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/dictionary_with_conflict_name.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/lazy_load.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml b/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml rename to tests/integration/test_dictionaries_ddl/configs/dictionaries/simple_dictionary.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/configs/users.xml b/tests/integration/test_dictionaries_ddl/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/configs/users.xml rename to tests/integration/test_dictionaries_ddl/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_ddl/test.py b/tests/integration/test_dictionaries_ddl/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_ddl/test.py rename to tests/integration/test_dictionaries_ddl/test.py diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py b/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py rename to tests/integration/test_dictionaries_depend_on_dictionaries/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_x.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_y.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/dictionaries/dep_z.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml b/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml rename to tests/integration/test_dictionaries_depend_on_dictionaries/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_depend_on_dictionaries/test.py b/tests/integration/test_dictionaries_depend_on_dictionaries/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_depend_on_dictionaries/test.py rename to tests/integration/test_dictionaries_depend_on_dictionaries/test.py diff --git a/dbms/tests/integration/test_dictionaries_mysql/__init__.py b/tests/integration/test_dictionaries_mysql/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/__init__.py rename to tests/integration/test_dictionaries_mysql/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/config.xml b/tests/integration/test_dictionaries_mysql/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/config.xml rename to tests/integration/test_dictionaries_mysql/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml b/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml rename to tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict1.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml b/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml rename to tests/integration/test_dictionaries_mysql/configs/dictionaries/mysql_dict2.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml b/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/remote_servers.xml rename to tests/integration/test_dictionaries_mysql/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/configs/users.xml b/tests/integration/test_dictionaries_mysql/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/configs/users.xml rename to tests/integration/test_dictionaries_mysql/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_mysql/test.py b/tests/integration/test_dictionaries_mysql/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_mysql/test.py rename to tests/integration/test_dictionaries_mysql/test.py diff --git a/dbms/tests/integration/test_dictionaries_null_value/__init__.py b/tests/integration/test_dictionaries_null_value/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/__init__.py rename to tests/integration/test_dictionaries_null_value/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/config.xml b/tests/integration/test_dictionaries_null_value/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/config.xml rename to tests/integration/test_dictionaries_null_value/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml b/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml rename to tests/integration/test_dictionaries_null_value/configs/dictionaries/cache.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/configs/users.xml b/tests/integration/test_dictionaries_null_value/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/configs/users.xml rename to tests/integration/test_dictionaries_null_value/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_null_value/test.py b/tests/integration/test_dictionaries_null_value/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_null_value/test.py rename to tests/integration/test_dictionaries_null_value/test.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/__init__.py b/tests/integration/test_dictionaries_select_all/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/__init__.py rename to tests/integration/test_dictionaries_select_all/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/config.xml b/tests/integration/test_dictionaries_select_all/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/config.xml rename to tests/integration/test_dictionaries_select_all/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore b/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore rename to tests/integration/test_dictionaries_select_all/configs/dictionaries/.gitignore diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv b/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv rename to tests/integration/test_dictionaries_select_all/configs/dictionaries/source.tsv diff --git a/dbms/tests/integration/test_dictionaries_select_all/configs/users.xml b/tests/integration/test_dictionaries_select_all/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/configs/users.xml rename to tests/integration/test_dictionaries_select_all/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_select_all/generate_dictionaries.py b/tests/integration/test_dictionaries_select_all/generate_dictionaries.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/generate_dictionaries.py rename to tests/integration/test_dictionaries_select_all/generate_dictionaries.py diff --git a/dbms/tests/integration/test_dictionaries_select_all/test.py b/tests/integration/test_dictionaries_select_all/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_select_all/test.py rename to tests/integration/test_dictionaries_select_all/test.py diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/__init__.py b/tests/integration/test_dictionaries_update_and_reload/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/__init__.py rename to tests/integration/test_dictionaries_update_and_reload/__init__.py diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/config.xml b/tests/integration/test_dictionaries_update_and_reload/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/config.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/config.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/cache_xypairs.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/executable.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.txt diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/file.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml b/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/dictionaries/slow.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/configs/users.xml b/tests/integration/test_dictionaries_update_and_reload/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/configs/users.xml rename to tests/integration/test_dictionaries_update_and_reload/configs/users.xml diff --git a/dbms/tests/integration/test_dictionaries_update_and_reload/test.py b/tests/integration/test_dictionaries_update_and_reload/test.py similarity index 100% rename from dbms/tests/integration/test_dictionaries_update_and_reload/test.py rename to tests/integration/test_dictionaries_update_and_reload/test.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py b/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/__init__.py rename to tests/integration/test_dictionary_allow_read_expired_keys/__init__.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/config.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/dictionaries/cache_ints_dictionary.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml b/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml rename to tests/integration/test_dictionary_allow_read_expired_keys/configs/users.xml diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py diff --git a/dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py similarity index 100% rename from dbms/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py rename to tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/__init__.py b/tests/integration/test_dictionary_ddl_on_cluster/__init__.py similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/__init__.py rename to tests/integration/test_dictionary_ddl_on_cluster/__init__.py diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml b/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml rename to tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml b/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml rename to tests/integration/test_dictionary_ddl_on_cluster/configs/config.d/ddl.xml diff --git a/dbms/tests/integration/test_dictionary_ddl_on_cluster/test.py b/tests/integration/test_dictionary_ddl_on_cluster/test.py similarity index 100% rename from dbms/tests/integration/test_dictionary_ddl_on_cluster/test.py rename to tests/integration/test_dictionary_ddl_on_cluster/test.py diff --git a/dbms/tests/integration/test_disk_access_storage/__init__.py b/tests/integration/test_disk_access_storage/__init__.py similarity index 100% rename from dbms/tests/integration/test_disk_access_storage/__init__.py rename to tests/integration/test_disk_access_storage/__init__.py diff --git a/dbms/tests/integration/test_disk_access_storage/configs/access_control_path.xml b/tests/integration/test_disk_access_storage/configs/access_control_path.xml similarity index 100% rename from dbms/tests/integration/test_disk_access_storage/configs/access_control_path.xml rename to tests/integration/test_disk_access_storage/configs/access_control_path.xml diff --git a/dbms/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py similarity index 100% rename from dbms/tests/integration/test_disk_access_storage/test.py rename to tests/integration/test_disk_access_storage/test.py diff --git a/dbms/tests/integration/test_distributed_ddl/__init__.py b/tests/integration/test_distributed_ddl/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/__init__.py rename to tests/integration/test_distributed_ddl/__init__.py diff --git a/dbms/tests/integration/test_distributed_ddl/cluster.py b/tests/integration/test_distributed_ddl/cluster.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/cluster.py rename to tests/integration/test_distributed_ddl/cluster.py diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml b/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/clusters.xml rename to tests/integration/test_distributed_ddl/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml b/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/ddl.xml rename to tests/integration/test_distributed_ddl/configs/config.d/ddl.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/macro.xml b/tests/integration/test_distributed_ddl/configs/config.d/macro.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/macro.xml rename to tests/integration/test_distributed_ddl/configs/config.d/macro.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml b/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs/config.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml b/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_distributed_ddl/configs/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml b/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/users.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs/users.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml b/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml rename to tests/integration/test_distributed_ddl/configs/users.d/restricted_user.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/ddl.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/macro.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/ssl_conf.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml b/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml rename to tests/integration/test_distributed_ddl/configs_secure/config.d/zookeeper_session_timeout.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem b/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/dhparam.pem rename to tests/integration/test_distributed_ddl/configs_secure/dhparam.pem diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/server.crt b/tests/integration/test_distributed_ddl/configs_secure/server.crt similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/server.crt rename to tests/integration/test_distributed_ddl/configs_secure/server.crt diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/server.key b/tests/integration/test_distributed_ddl/configs_secure/server.key similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/server.key rename to tests/integration/test_distributed_ddl/configs_secure/server.key diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml b/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml rename to tests/integration/test_distributed_ddl/configs_secure/users.d/query_log.xml diff --git a/dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml b/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml rename to tests/integration/test_distributed_ddl/configs_secure/users.d/restricted_user.xml diff --git a/dbms/tests/integration/test_distributed_ddl/test.py b/tests/integration/test_distributed_ddl/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/test.py rename to tests/integration/test_distributed_ddl/test.py diff --git a/dbms/tests/integration/test_distributed_ddl/test_replicated_alter.py b/tests/integration/test_distributed_ddl/test_replicated_alter.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl/test_replicated_alter.py rename to tests/integration/test_distributed_ddl/test_replicated_alter.py diff --git a/dbms/tests/integration/test_distributed_ddl_password/__init__.py b/tests/integration/test_distributed_ddl_password/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/__init__.py rename to tests/integration/test_distributed_ddl_password/__init__.py diff --git a/dbms/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml b/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml rename to tests/integration/test_distributed_ddl_password/configs/config.d/clusters.xml diff --git a/dbms/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml b/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml rename to tests/integration/test_distributed_ddl_password/configs/users.d/default_with_password.xml diff --git a/dbms/tests/integration/test_distributed_ddl_password/test.py b/tests/integration/test_distributed_ddl_password/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_ddl_password/test.py rename to tests/integration/test_distributed_ddl_password/test.py diff --git a/dbms/tests/integration/test_distributed_format/__init__.py b/tests/integration/test_distributed_format/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_format/__init__.py rename to tests/integration/test_distributed_format/__init__.py diff --git a/dbms/tests/integration/test_distributed_format/configs/remote_servers.xml b/tests/integration/test_distributed_format/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_format/configs/remote_servers.xml rename to tests/integration/test_distributed_format/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_format/test.py b/tests/integration/test_distributed_format/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_format/test.py rename to tests/integration/test_distributed_format/test.py diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/__init__.py b/tests/integration/test_distributed_respect_user_timeouts/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/__init__.py rename to tests/integration/test_distributed_respect_user_timeouts/__init__.py diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml b/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs/config.d/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml b/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs/users.d/set_distributed_defaults.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/config.d/ssl_conf.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/dhparam.pem diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.crt diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/server.key diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml b/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml rename to tests/integration/test_distributed_respect_user_timeouts/configs_secure/users.d/set_distributed_defaults.xml diff --git a/dbms/tests/integration/test_distributed_respect_user_timeouts/test.py b/tests/integration/test_distributed_respect_user_timeouts/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_respect_user_timeouts/test.py rename to tests/integration/test_distributed_respect_user_timeouts/test.py diff --git a/dbms/tests/integration/test_distributed_storage_configuration/__init__.py b/tests/integration/test_distributed_storage_configuration/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/__init__.py rename to tests/integration/test_distributed_storage_configuration/__init__.py diff --git a/dbms/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml b/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml rename to tests/integration/test_distributed_storage_configuration/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_storage_configuration/test.py rename to tests/integration/test_distributed_storage_configuration/test.py diff --git a/dbms/tests/integration/test_distributed_system_query/__init__.py b/tests/integration/test_distributed_system_query/__init__.py similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/__init__.py rename to tests/integration/test_distributed_system_query/__init__.py diff --git a/dbms/tests/integration/test_distributed_system_query/configs/remote_servers.xml b/tests/integration/test_distributed_system_query/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/configs/remote_servers.xml rename to tests/integration/test_distributed_system_query/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_distributed_system_query/test.py b/tests/integration/test_distributed_system_query/test.py similarity index 100% rename from dbms/tests/integration/test_distributed_system_query/test.py rename to tests/integration/test_distributed_system_query/test.py diff --git a/dbms/tests/integration/test_extreme_deduplication/__init__.py b/tests/integration/test_extreme_deduplication/__init__.py similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/__init__.py rename to tests/integration/test_extreme_deduplication/__init__.py diff --git a/dbms/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml b/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml rename to tests/integration/test_extreme_deduplication/configs/conf.d/merge_tree.xml diff --git a/dbms/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml b/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml rename to tests/integration/test_extreme_deduplication/configs/conf.d/remote_servers.xml diff --git a/dbms/tests/integration/test_extreme_deduplication/test.py b/tests/integration/test_extreme_deduplication/test.py similarity index 100% rename from dbms/tests/integration/test_extreme_deduplication/test.py rename to tests/integration/test_extreme_deduplication/test.py diff --git a/dbms/tests/integration/test_filesystem_layout/__init__.py b/tests/integration/test_filesystem_layout/__init__.py similarity index 100% rename from dbms/tests/integration/test_filesystem_layout/__init__.py rename to tests/integration/test_filesystem_layout/__init__.py diff --git a/dbms/tests/integration/test_filesystem_layout/test.py b/tests/integration/test_filesystem_layout/test.py similarity index 100% rename from dbms/tests/integration/test_filesystem_layout/test.py rename to tests/integration/test_filesystem_layout/test.py diff --git a/dbms/tests/integration/test_force_deduplication/__init__.py b/tests/integration/test_force_deduplication/__init__.py similarity index 100% rename from dbms/tests/integration/test_force_deduplication/__init__.py rename to tests/integration/test_force_deduplication/__init__.py diff --git a/dbms/tests/integration/test_force_deduplication/test.py b/tests/integration/test_force_deduplication/test.py similarity index 100% rename from dbms/tests/integration/test_force_deduplication/test.py rename to tests/integration/test_force_deduplication/test.py diff --git a/dbms/tests/integration/test_format_avro_confluent/__init__.py b/tests/integration/test_format_avro_confluent/__init__.py similarity index 100% rename from dbms/tests/integration/test_format_avro_confluent/__init__.py rename to tests/integration/test_format_avro_confluent/__init__.py diff --git a/dbms/tests/integration/test_format_avro_confluent/test.py b/tests/integration/test_format_avro_confluent/test.py similarity index 100% rename from dbms/tests/integration/test_format_avro_confluent/test.py rename to tests/integration/test_format_avro_confluent/test.py diff --git a/dbms/tests/integration/test_format_schema_on_server/__init__.py b/tests/integration/test_format_schema_on_server/__init__.py similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/__init__.py rename to tests/integration/test_format_schema_on_server/__init__.py diff --git a/dbms/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto b/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto rename to tests/integration/test_format_schema_on_server/clickhouse_path/format_schemas/simple.proto diff --git a/dbms/tests/integration/test_format_schema_on_server/test.py b/tests/integration/test_format_schema_on_server/test.py similarity index 100% rename from dbms/tests/integration/test_format_schema_on_server/test.py rename to tests/integration/test_format_schema_on_server/test.py diff --git a/dbms/tests/integration/test_globs_in_filepath/__init__.py b/tests/integration/test_globs_in_filepath/__init__.py similarity index 100% rename from dbms/tests/integration/test_globs_in_filepath/__init__.py rename to tests/integration/test_globs_in_filepath/__init__.py diff --git a/dbms/tests/integration/test_globs_in_filepath/test.py b/tests/integration/test_globs_in_filepath/test.py similarity index 100% rename from dbms/tests/integration/test_globs_in_filepath/test.py rename to tests/integration/test_globs_in_filepath/test.py diff --git a/dbms/tests/integration/test_grant_and_revoke/__init__.py b/tests/integration/test_grant_and_revoke/__init__.py similarity index 100% rename from dbms/tests/integration/test_grant_and_revoke/__init__.py rename to tests/integration/test_grant_and_revoke/__init__.py diff --git a/dbms/tests/integration/test_grant_and_revoke/configs/users.xml b/tests/integration/test_grant_and_revoke/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_grant_and_revoke/configs/users.xml rename to tests/integration/test_grant_and_revoke/configs/users.xml diff --git a/dbms/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py similarity index 100% rename from dbms/tests/integration/test_grant_and_revoke/test.py rename to tests/integration/test_grant_and_revoke/test.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/__init__.py b/tests/integration/test_graphite_merge_tree/__init__.py similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/__init__.py rename to tests/integration/test_graphite_merge_tree/__init__.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml b/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml rename to tests/integration/test_graphite_merge_tree/configs/graphite_rollup.xml diff --git a/dbms/tests/integration/test_graphite_merge_tree/test.py b/tests/integration/test_graphite_merge_tree/test.py similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/test.py rename to tests/integration/test_graphite_merge_tree/test.py diff --git a/dbms/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference b/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference similarity index 100% rename from dbms/tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference rename to tests/integration/test_graphite_merge_tree/test_multiple_paths_and_versions.reference diff --git a/dbms/tests/integration/test_host_ip_change/__init__.py b/tests/integration/test_host_ip_change/__init__.py similarity index 100% rename from dbms/tests/integration/test_host_ip_change/__init__.py rename to tests/integration/test_host_ip_change/__init__.py diff --git a/dbms/tests/integration/test_host_ip_change/configs/dns_update_long.xml b/tests/integration/test_host_ip_change/configs/dns_update_long.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/dns_update_long.xml rename to tests/integration/test_host_ip_change/configs/dns_update_long.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/dns_update_short.xml b/tests/integration/test_host_ip_change/configs/dns_update_short.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/dns_update_short.xml rename to tests/integration/test_host_ip_change/configs/dns_update_short.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/listen_host.xml b/tests/integration/test_host_ip_change/configs/listen_host.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/listen_host.xml rename to tests/integration/test_host_ip_change/configs/listen_host.xml diff --git a/dbms/tests/integration/test_host_ip_change/configs/remote_servers.xml b/tests/integration/test_host_ip_change/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_host_ip_change/configs/remote_servers.xml rename to tests/integration/test_host_ip_change/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_host_ip_change/test.py b/tests/integration/test_host_ip_change/test.py similarity index 100% rename from dbms/tests/integration/test_host_ip_change/test.py rename to tests/integration/test_host_ip_change/test.py diff --git a/dbms/tests/integration/test_http_and_readonly/__init__.py b/tests/integration/test_http_and_readonly/__init__.py similarity index 100% rename from dbms/tests/integration/test_http_and_readonly/__init__.py rename to tests/integration/test_http_and_readonly/__init__.py diff --git a/dbms/tests/integration/test_http_and_readonly/test.py b/tests/integration/test_http_and_readonly/test.py similarity index 100% rename from dbms/tests/integration/test_http_and_readonly/test.py rename to tests/integration/test_http_and_readonly/test.py diff --git a/dbms/tests/integration/test_https_replication/__init__.py b/tests/integration/test_https_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_https_replication/__init__.py rename to tests/integration/test_https_replication/__init__.py diff --git a/dbms/tests/integration/test_https_replication/configs/config.xml b/tests/integration/test_https_replication/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/config.xml rename to tests/integration/test_https_replication/configs/config.xml diff --git a/dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml b/tests/integration/test_https_replication/configs/no_ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml rename to tests/integration/test_https_replication/configs/no_ssl_conf.xml diff --git a/dbms/tests/integration/test_https_replication/configs/remote_servers.xml b/tests/integration/test_https_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/remote_servers.xml rename to tests/integration/test_https_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_https_replication/configs/server.crt b/tests/integration/test_https_replication/configs/server.crt similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/server.crt rename to tests/integration/test_https_replication/configs/server.crt diff --git a/dbms/tests/integration/test_https_replication/configs/server.key b/tests/integration/test_https_replication/configs/server.key similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/server.key rename to tests/integration/test_https_replication/configs/server.key diff --git a/dbms/tests/integration/test_https_replication/configs/ssl_conf.xml b/tests/integration/test_https_replication/configs/ssl_conf.xml similarity index 100% rename from dbms/tests/integration/test_https_replication/configs/ssl_conf.xml rename to tests/integration/test_https_replication/configs/ssl_conf.xml diff --git a/dbms/tests/integration/test_https_replication/test.py b/tests/integration/test_https_replication/test.py similarity index 100% rename from dbms/tests/integration/test_https_replication/test.py rename to tests/integration/test_https_replication/test.py diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/__init__.py b/tests/integration/test_inherit_multiple_profiles/__init__.py similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/__init__.py rename to tests/integration/test_inherit_multiple_profiles/__init__.py diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml b/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml rename to tests/integration/test_inherit_multiple_profiles/configs/combined_profile.xml diff --git a/dbms/tests/integration/test_inherit_multiple_profiles/test.py b/tests/integration/test_inherit_multiple_profiles/test.py similarity index 100% rename from dbms/tests/integration/test_inherit_multiple_profiles/test.py rename to tests/integration/test_inherit_multiple_profiles/test.py diff --git a/dbms/tests/integration/test_insert_into_distributed/__init__.py b/tests/integration/test_insert_into_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/__init__.py rename to tests/integration/test_insert_into_distributed/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml b/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml rename to tests/integration/test_insert_into_distributed/configs/enable_distributed_inserts_batching.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml b/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml rename to tests/integration/test_insert_into_distributed/configs/forbid_background_merges.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed/test.py b/tests/integration/test_insert_into_distributed/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed/test.py rename to tests/integration/test_insert_into_distributed/test.py diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/__init__.py b/tests/integration/test_insert_into_distributed_sync_async/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/__init__.py rename to tests/integration/test_insert_into_distributed_sync_async/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed_sync_async/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_sync_async/test.py b/tests/integration/test_insert_into_distributed_sync_async/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_sync_async/test.py rename to tests/integration/test_insert_into_distributed_sync_async/test.py diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py b/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py rename to tests/integration/test_insert_into_distributed_through_materialized_view/__init__.py diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/enable_distributed_inserts_batching.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/forbid_background_merges.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml b/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml rename to tests/integration/test_insert_into_distributed_through_materialized_view/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_insert_into_distributed_through_materialized_view/test.py b/tests/integration/test_insert_into_distributed_through_materialized_view/test.py similarity index 100% rename from dbms/tests/integration/test_insert_into_distributed_through_materialized_view/test.py rename to tests/integration/test_insert_into_distributed_through_materialized_view/test.py diff --git a/dbms/tests/integration/test_log_family_s3/__init__.py b/tests/integration/test_log_family_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_log_family_s3/__init__.py rename to tests/integration/test_log_family_s3/__init__.py diff --git a/dbms/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml b/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/config.d/log_conf.xml rename to tests/integration/test_log_family_s3/configs/config.d/log_conf.xml diff --git a/dbms/tests/integration/test_log_family_s3/configs/config.xml b/tests/integration/test_log_family_s3/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/config.xml rename to tests/integration/test_log_family_s3/configs/config.xml diff --git a/dbms/tests/integration/test_log_family_s3/configs/users.xml b/tests/integration/test_log_family_s3/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_log_family_s3/configs/users.xml rename to tests/integration/test_log_family_s3/configs/users.xml diff --git a/dbms/tests/integration/test_log_family_s3/test.py b/tests/integration/test_log_family_s3/test.py similarity index 100% rename from dbms/tests/integration/test_log_family_s3/test.py rename to tests/integration/test_log_family_s3/test.py diff --git a/dbms/tests/integration/test_logs_level/__init__.py b/tests/integration/test_logs_level/__init__.py similarity index 100% rename from dbms/tests/integration/test_logs_level/__init__.py rename to tests/integration/test_logs_level/__init__.py diff --git a/dbms/tests/integration/test_logs_level/configs/config_information.xml b/tests/integration/test_logs_level/configs/config_information.xml similarity index 100% rename from dbms/tests/integration/test_logs_level/configs/config_information.xml rename to tests/integration/test_logs_level/configs/config_information.xml diff --git a/dbms/tests/integration/test_logs_level/test.py b/tests/integration/test_logs_level/test.py similarity index 100% rename from dbms/tests/integration/test_logs_level/test.py rename to tests/integration/test_logs_level/test.py diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/__init__.py b/tests/integration/test_match_process_uid_against_data_owner/__init__.py similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/__init__.py rename to tests/integration/test_match_process_uid_against_data_owner/__init__.py diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml b/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/configs/config.xml rename to tests/integration/test_match_process_uid_against_data_owner/configs/config.xml diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml b/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/configs/users.xml rename to tests/integration/test_match_process_uid_against_data_owner/configs/users.xml diff --git a/dbms/tests/integration/test_match_process_uid_against_data_owner/test.py b/tests/integration/test_match_process_uid_against_data_owner/test.py similarity index 100% rename from dbms/tests/integration/test_match_process_uid_against_data_owner/test.py rename to tests/integration/test_match_process_uid_against_data_owner/test.py diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/__init__.py b/tests/integration/test_max_http_connections_for_replication/__init__.py similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/__init__.py rename to tests/integration/test_max_http_connections_for_replication/__init__.py diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml b/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml rename to tests/integration/test_max_http_connections_for_replication/configs/log_conf.xml diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml b/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml rename to tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_max_http_connections_for_replication/test.py b/tests/integration/test_max_http_connections_for_replication/test.py similarity index 100% rename from dbms/tests/integration/test_max_http_connections_for_replication/test.py rename to tests/integration/test_max_http_connections_for_replication/test.py diff --git a/dbms/tests/integration/test_merge_table_over_distributed/__init__.py b/tests/integration/test_merge_table_over_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/__init__.py rename to tests/integration/test_merge_table_over_distributed/__init__.py diff --git a/dbms/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml b/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml rename to tests/integration/test_merge_table_over_distributed/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_merge_table_over_distributed/test.py b/tests/integration/test_merge_table_over_distributed/test.py similarity index 100% rename from dbms/tests/integration/test_merge_table_over_distributed/test.py rename to tests/integration/test_merge_table_over_distributed/test.py diff --git a/dbms/tests/integration/test_merge_tree_s3/__init__.py b/tests/integration/test_merge_tree_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/__init__.py rename to tests/integration/test_merge_tree_s3/__init__.py diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/bg_processing_pool_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/log_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.d/users.xml b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.d/users.xml rename to tests/integration/test_merge_tree_s3/configs/config.d/users.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/configs/config.xml b/tests/integration/test_merge_tree_s3/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/configs/config.xml rename to tests/integration/test_merge_tree_s3/configs/config.xml diff --git a/dbms/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py similarity index 100% rename from dbms/tests/integration/test_merge_tree_s3/test.py rename to tests/integration/test_merge_tree_s3/test.py diff --git a/dbms/tests/integration/test_multiple_disks/__init__.py b/tests/integration/test_multiple_disks/__init__.py similarity index 100% rename from dbms/tests/integration/test_multiple_disks/__init__.py rename to tests/integration/test_multiple_disks/__init__.py diff --git a/dbms/tests/integration/test_multiple_disks/configs/config.d/cluster.xml b/tests/integration/test_multiple_disks/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/config.d/cluster.xml rename to tests/integration/test_multiple_disks/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml rename to tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_multiple_disks/configs/logs_config.xml b/tests/integration/test_multiple_disks/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_multiple_disks/configs/logs_config.xml rename to tests/integration/test_multiple_disks/configs/logs_config.xml diff --git a/dbms/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py similarity index 100% rename from dbms/tests/integration/test_multiple_disks/test.py rename to tests/integration/test_multiple_disks/test.py diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/__init__.py b/tests/integration/test_mutations_with_merge_tree/__init__.py similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/__init__.py rename to tests/integration/test_mutations_with_merge_tree/__init__.py diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/configs/config.xml b/tests/integration/test_mutations_with_merge_tree/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/configs/config.xml rename to tests/integration/test_mutations_with_merge_tree/configs/config.xml diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/configs/users.xml b/tests/integration/test_mutations_with_merge_tree/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/configs/users.xml rename to tests/integration/test_mutations_with_merge_tree/configs/users.xml diff --git a/dbms/tests/integration/test_mutations_with_merge_tree/test.py b/tests/integration/test_mutations_with_merge_tree/test.py similarity index 100% rename from dbms/tests/integration/test_mutations_with_merge_tree/test.py rename to tests/integration/test_mutations_with_merge_tree/test.py diff --git a/dbms/tests/integration/test_mysql_database_engine/__init__.py b/tests/integration/test_mysql_database_engine/__init__.py similarity index 100% rename from dbms/tests/integration/test_mysql_database_engine/__init__.py rename to tests/integration/test_mysql_database_engine/__init__.py diff --git a/dbms/tests/integration/test_mysql_database_engine/configs/remote_servers.xml b/tests/integration/test_mysql_database_engine/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_mysql_database_engine/configs/remote_servers.xml rename to tests/integration/test_mysql_database_engine/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py similarity index 100% rename from dbms/tests/integration/test_mysql_database_engine/test.py rename to tests/integration/test_mysql_database_engine/test.py diff --git a/dbms/tests/integration/test_mysql_protocol/__init__.py b/tests/integration/test_mysql_protocol/__init__.py similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/__init__.py rename to tests/integration/test_mysql_protocol/__init__.py diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/0.reference b/tests/integration/test_mysql_protocol/clients/golang/0.reference similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/0.reference rename to tests/integration/test_mysql_protocol/clients/golang/0.reference diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/Dockerfile b/tests/integration/test_mysql_protocol/clients/golang/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/Dockerfile rename to tests/integration/test_mysql_protocol/clients/golang/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/golang/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/golang/main.go b/tests/integration/test_mysql_protocol/clients/golang/main.go similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/golang/main.go rename to tests/integration/test_mysql_protocol/clients/golang/main.go diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/mysql/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile b/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile rename to tests/integration/test_mysql_protocol/clients/mysqljs/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/mysqljs/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/mysqljs/test.js b/tests/integration/test_mysql_protocol/clients/mysqljs/test.js similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/mysqljs/test.js rename to tests/integration/test_mysql_protocol/clients/mysqljs/test.js diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/Dockerfile diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.crt diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/client.key diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/docker_compose.yml diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/test.php diff --git a/dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php b/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php rename to tests/integration/test_mysql_protocol/clients/php-mysqlnd/test_ssl.php diff --git a/dbms/tests/integration/test_mysql_protocol/configs/config.xml b/tests/integration/test_mysql_protocol/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/config.xml rename to tests/integration/test_mysql_protocol/configs/config.xml diff --git a/dbms/tests/integration/test_mysql_protocol/configs/dhparam.pem b/tests/integration/test_mysql_protocol/configs/dhparam.pem similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/dhparam.pem rename to tests/integration/test_mysql_protocol/configs/dhparam.pem diff --git a/dbms/tests/integration/test_mysql_protocol/configs/server.crt b/tests/integration/test_mysql_protocol/configs/server.crt similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/server.crt rename to tests/integration/test_mysql_protocol/configs/server.crt diff --git a/dbms/tests/integration/test_mysql_protocol/configs/server.key b/tests/integration/test_mysql_protocol/configs/server.key similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/server.key rename to tests/integration/test_mysql_protocol/configs/server.key diff --git a/dbms/tests/integration/test_mysql_protocol/configs/users.xml b/tests/integration/test_mysql_protocol/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/configs/users.xml rename to tests/integration/test_mysql_protocol/configs/users.xml diff --git a/dbms/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py similarity index 100% rename from dbms/tests/integration/test_mysql_protocol/test.py rename to tests/integration/test_mysql_protocol/test.py diff --git a/dbms/tests/integration/test_non_default_compression/__init__.py b/tests/integration/test_non_default_compression/__init__.py similarity index 100% rename from dbms/tests/integration/test_non_default_compression/__init__.py rename to tests/integration/test_non_default_compression/__init__.py diff --git a/dbms/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/custom_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml b/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml rename to tests/integration/test_non_default_compression/configs/enable_uncompressed_cache.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/lz4hc_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml similarity index 100% rename from dbms/tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml rename to tests/integration/test_non_default_compression/configs/zstd_compression_by_default.xml diff --git a/dbms/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py similarity index 100% rename from dbms/tests/integration/test_non_default_compression/test.py rename to tests/integration/test_non_default_compression/test.py diff --git a/dbms/tests/integration/test_odbc_interaction/__init__.py b/tests/integration/test_odbc_interaction/__init__.py similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/__init__.py rename to tests/integration/test_odbc_interaction/__init__.py diff --git a/dbms/tests/integration/test_odbc_interaction/configs/config.xml b/tests/integration/test_odbc_interaction/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/config.xml rename to tests/integration/test_odbc_interaction/configs/config.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/postgres_odbc_hashed_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_cached_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml b/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml rename to tests/integration/test_odbc_interaction/configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml diff --git a/dbms/tests/integration/test_odbc_interaction/configs/users.xml b/tests/integration/test_odbc_interaction/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/configs/users.xml rename to tests/integration/test_odbc_interaction/configs/users.xml diff --git a/dbms/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py similarity index 100% rename from dbms/tests/integration/test_odbc_interaction/test.py rename to tests/integration/test_odbc_interaction/test.py diff --git a/dbms/tests/integration/test_old_versions/__init__.py b/tests/integration/test_old_versions/__init__.py similarity index 100% rename from dbms/tests/integration/test_old_versions/__init__.py rename to tests/integration/test_old_versions/__init__.py diff --git a/dbms/tests/integration/test_old_versions/configs/config.d/test_cluster.xml b/tests/integration/test_old_versions/configs/config.d/test_cluster.xml similarity index 100% rename from dbms/tests/integration/test_old_versions/configs/config.d/test_cluster.xml rename to tests/integration/test_old_versions/configs/config.d/test_cluster.xml diff --git a/dbms/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py similarity index 100% rename from dbms/tests/integration/test_old_versions/test.py rename to tests/integration/test_old_versions/test.py diff --git a/dbms/tests/integration/test_part_log_table/__init__.py b/tests/integration/test_part_log_table/__init__.py similarity index 100% rename from dbms/tests/integration/test_part_log_table/__init__.py rename to tests/integration/test_part_log_table/__init__.py diff --git a/dbms/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml b/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml similarity index 100% rename from dbms/tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml rename to tests/integration/test_part_log_table/configs/config_with_non_standard_part_log.xml diff --git a/dbms/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml b/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml similarity index 100% rename from dbms/tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml rename to tests/integration/test_part_log_table/configs/config_with_standard_part_log.xml diff --git a/dbms/tests/integration/test_part_log_table/test.py b/tests/integration/test_part_log_table/test.py similarity index 100% rename from dbms/tests/integration/test_part_log_table/test.py rename to tests/integration/test_part_log_table/test.py diff --git a/dbms/tests/integration/test_partition/__init__.py b/tests/integration/test_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_partition/__init__.py rename to tests/integration/test_partition/__init__.py diff --git a/dbms/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py similarity index 100% rename from dbms/tests/integration/test_partition/test.py rename to tests/integration/test_partition/test.py diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/__init__.py b/tests/integration/test_parts_delete_zookeeper/__init__.py similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/__init__.py rename to tests/integration/test_parts_delete_zookeeper/__init__.py diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml b/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml rename to tests/integration/test_parts_delete_zookeeper/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_parts_delete_zookeeper/test.py b/tests/integration/test_parts_delete_zookeeper/test.py similarity index 100% rename from dbms/tests/integration/test_parts_delete_zookeeper/test.py rename to tests/integration/test_parts_delete_zookeeper/test.py diff --git a/dbms/tests/integration/test_polymorphic_parts/__init__.py b/tests/integration/test_polymorphic_parts/__init__.py similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/__init__.py rename to tests/integration/test_polymorphic_parts/__init__.py diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/compact_parts.xml b/tests/integration/test_polymorphic_parts/configs/compact_parts.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/compact_parts.xml rename to tests/integration/test_polymorphic_parts/configs/compact_parts.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/no_leader.xml b/tests/integration/test_polymorphic_parts/configs/no_leader.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/no_leader.xml rename to tests/integration/test_polymorphic_parts/configs/no_leader.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml b/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml rename to tests/integration/test_polymorphic_parts/configs/users.d/not_optimize_count.xml diff --git a/dbms/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py similarity index 100% rename from dbms/tests/integration/test_polymorphic_parts/test.py rename to tests/integration/test_polymorphic_parts/test.py diff --git a/dbms/tests/integration/test_prometheus_endpoint/__init__.py b/tests/integration/test_prometheus_endpoint/__init__.py similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/__init__.py rename to tests/integration/test_prometheus_endpoint/__init__.py diff --git a/dbms/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml b/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/configs/prom_conf.xml rename to tests/integration/test_prometheus_endpoint/configs/prom_conf.xml diff --git a/dbms/tests/integration/test_prometheus_endpoint/test.py b/tests/integration/test_prometheus_endpoint/test.py similarity index 100% rename from dbms/tests/integration/test_prometheus_endpoint/test.py rename to tests/integration/test_prometheus_endpoint/test.py diff --git a/dbms/tests/integration/test_quota/__init__.py b/tests/integration/test_quota/__init__.py similarity index 100% rename from dbms/tests/integration/test_quota/__init__.py rename to tests/integration/test_quota/__init__.py diff --git a/dbms/tests/integration/test_quota/configs/users.d/quota.xml b/tests/integration/test_quota/configs/users.d/quota.xml similarity index 100% rename from dbms/tests/integration/test_quota/configs/users.d/quota.xml rename to tests/integration/test_quota/configs/users.d/quota.xml diff --git a/dbms/tests/integration/test_quota/configs/users.xml b/tests/integration/test_quota/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_quota/configs/users.xml rename to tests/integration/test_quota/configs/users.xml diff --git a/dbms/tests/integration/test_quota/no_quotas.xml b/tests/integration/test_quota/no_quotas.xml similarity index 100% rename from dbms/tests/integration/test_quota/no_quotas.xml rename to tests/integration/test_quota/no_quotas.xml diff --git a/dbms/tests/integration/test_quota/normal_limits.xml b/tests/integration/test_quota/normal_limits.xml similarity index 100% rename from dbms/tests/integration/test_quota/normal_limits.xml rename to tests/integration/test_quota/normal_limits.xml diff --git a/dbms/tests/integration/test_quota/simpliest.xml b/tests/integration/test_quota/simpliest.xml similarity index 100% rename from dbms/tests/integration/test_quota/simpliest.xml rename to tests/integration/test_quota/simpliest.xml diff --git a/dbms/tests/integration/test_quota/test.py b/tests/integration/test_quota/test.py similarity index 100% rename from dbms/tests/integration/test_quota/test.py rename to tests/integration/test_quota/test.py diff --git a/dbms/tests/integration/test_quota/tiny_limits.xml b/tests/integration/test_quota/tiny_limits.xml similarity index 100% rename from dbms/tests/integration/test_quota/tiny_limits.xml rename to tests/integration/test_quota/tiny_limits.xml diff --git a/dbms/tests/integration/test_quota/tracking.xml b/tests/integration/test_quota/tracking.xml similarity index 100% rename from dbms/tests/integration/test_quota/tracking.xml rename to tests/integration/test_quota/tracking.xml diff --git a/dbms/tests/integration/test_quota/two_intervals.xml b/tests/integration/test_quota/two_intervals.xml similarity index 100% rename from dbms/tests/integration/test_quota/two_intervals.xml rename to tests/integration/test_quota/two_intervals.xml diff --git a/dbms/tests/integration/test_quota/two_quotas.xml b/tests/integration/test_quota/two_quotas.xml similarity index 100% rename from dbms/tests/integration/test_quota/two_quotas.xml rename to tests/integration/test_quota/two_quotas.xml diff --git a/dbms/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml b/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml rename to tests/integration/test_random_inserts/configs/conf.d/merge_tree.xml diff --git a/dbms/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml b/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml rename to tests/integration/test_random_inserts/configs/conf.d/remote_servers.xml diff --git a/dbms/tests/integration/test_random_inserts/test.py b/tests/integration/test_random_inserts/test.py similarity index 100% rename from dbms/tests/integration/test_random_inserts/test.py rename to tests/integration/test_random_inserts/test.py diff --git a/dbms/tests/integration/test_random_inserts/test.sh b/tests/integration/test_random_inserts/test.sh similarity index 100% rename from dbms/tests/integration/test_random_inserts/test.sh rename to tests/integration/test_random_inserts/test.sh diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py b/tests/integration/test_read_temporary_tables_on_failure/__init__.py similarity index 100% rename from dbms/tests/integration/test_read_temporary_tables_on_failure/__init__.py rename to tests/integration/test_read_temporary_tables_on_failure/__init__.py diff --git a/dbms/tests/integration/test_read_temporary_tables_on_failure/test.py b/tests/integration/test_read_temporary_tables_on_failure/test.py similarity index 100% rename from dbms/tests/integration/test_read_temporary_tables_on_failure/test.py rename to tests/integration/test_read_temporary_tables_on_failure/test.py diff --git a/dbms/tests/integration/test_recovery_replica/__init__.py b/tests/integration/test_recovery_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_recovery_replica/__init__.py rename to tests/integration/test_recovery_replica/__init__.py diff --git a/dbms/tests/integration/test_recovery_replica/configs/remote_servers.xml b/tests/integration/test_recovery_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_recovery_replica/configs/remote_servers.xml rename to tests/integration/test_recovery_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_recovery_replica/test.py b/tests/integration/test_recovery_replica/test.py similarity index 100% rename from dbms/tests/integration/test_recovery_replica/test.py rename to tests/integration/test_recovery_replica/test.py diff --git a/dbms/tests/integration/test_redirect_url_storage/__init__.py b/tests/integration/test_redirect_url_storage/__init__.py similarity index 100% rename from dbms/tests/integration/test_redirect_url_storage/__init__.py rename to tests/integration/test_redirect_url_storage/__init__.py diff --git a/dbms/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py similarity index 100% rename from dbms/tests/integration/test_redirect_url_storage/test.py rename to tests/integration/test_redirect_url_storage/test.py diff --git a/dbms/tests/integration/test_relative_filepath/__init__.py b/tests/integration/test_relative_filepath/__init__.py similarity index 100% rename from dbms/tests/integration/test_relative_filepath/__init__.py rename to tests/integration/test_relative_filepath/__init__.py diff --git a/dbms/tests/integration/test_relative_filepath/configs/config.xml b/tests/integration/test_relative_filepath/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_relative_filepath/configs/config.xml rename to tests/integration/test_relative_filepath/configs/config.xml diff --git a/dbms/tests/integration/test_relative_filepath/test.py b/tests/integration/test_relative_filepath/test.py similarity index 100% rename from dbms/tests/integration/test_relative_filepath/test.py rename to tests/integration/test_relative_filepath/test.py diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/__init__.py b/tests/integration/test_reload_max_table_size_to_drop/__init__.py similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/__init__.py rename to tests/integration/test_reload_max_table_size_to_drop/__init__.py diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml b/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/configs/config.xml rename to tests/integration/test_reload_max_table_size_to_drop/configs/config.xml diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml b/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/configs/users.xml rename to tests/integration/test_reload_max_table_size_to_drop/configs/users.xml diff --git a/dbms/tests/integration/test_reload_max_table_size_to_drop/test.py b/tests/integration/test_reload_max_table_size_to_drop/test.py similarity index 100% rename from dbms/tests/integration/test_reload_max_table_size_to_drop/test.py rename to tests/integration/test_reload_max_table_size_to_drop/test.py diff --git a/dbms/tests/integration/test_reloading_storage_configuration/__init__.py b/tests/integration/test_reloading_storage_configuration/__init__.py similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/__init__.py rename to tests/integration/test_reloading_storage_configuration/__init__.py diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml b/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml rename to tests/integration/test_reloading_storage_configuration/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml b/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml rename to tests/integration/test_reloading_storage_configuration/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml b/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/configs/logs_config.xml rename to tests/integration/test_reloading_storage_configuration/configs/logs_config.xml diff --git a/dbms/tests/integration/test_reloading_storage_configuration/test.py b/tests/integration/test_reloading_storage_configuration/test.py similarity index 100% rename from dbms/tests/integration/test_reloading_storage_configuration/test.py rename to tests/integration/test_reloading_storage_configuration/test.py diff --git a/dbms/tests/integration/test_remote_prewhere/__init__.py b/tests/integration/test_remote_prewhere/__init__.py similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/__init__.py rename to tests/integration/test_remote_prewhere/__init__.py diff --git a/dbms/tests/integration/test_remote_prewhere/configs/log_conf.xml b/tests/integration/test_remote_prewhere/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/configs/log_conf.xml rename to tests/integration/test_remote_prewhere/configs/log_conf.xml diff --git a/dbms/tests/integration/test_remote_prewhere/test.py b/tests/integration/test_remote_prewhere/test.py similarity index 100% rename from dbms/tests/integration/test_remote_prewhere/test.py rename to tests/integration/test_remote_prewhere/test.py diff --git a/dbms/tests/integration/test_replace_partition/__init__.py b/tests/integration/test_replace_partition/__init__.py similarity index 100% rename from dbms/tests/integration/test_replace_partition/__init__.py rename to tests/integration/test_replace_partition/__init__.py diff --git a/dbms/tests/integration/test_replace_partition/configs/remote_servers.xml b/tests/integration/test_replace_partition/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replace_partition/configs/remote_servers.xml rename to tests/integration/test_replace_partition/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replace_partition/test.py b/tests/integration/test_replace_partition/test.py similarity index 100% rename from dbms/tests/integration/test_replace_partition/test.py rename to tests/integration/test_replace_partition/test.py diff --git a/dbms/tests/integration/test_replica_can_become_leader/__init__.py b/tests/integration/test_replica_can_become_leader/__init__.py similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/__init__.py rename to tests/integration/test_replica_can_become_leader/__init__.py diff --git a/dbms/tests/integration/test_replica_can_become_leader/configs/notleader.xml b/tests/integration/test_replica_can_become_leader/configs/notleader.xml similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/configs/notleader.xml rename to tests/integration/test_replica_can_become_leader/configs/notleader.xml diff --git a/dbms/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml b/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml rename to tests/integration/test_replica_can_become_leader/configs/notleaderignorecase.xml diff --git a/dbms/tests/integration/test_replica_can_become_leader/test.py b/tests/integration/test_replica_can_become_leader/test.py similarity index 100% rename from dbms/tests/integration/test_replica_can_become_leader/test.py rename to tests/integration/test_replica_can_become_leader/test.py diff --git a/dbms/tests/integration/test_replicated_mutations/__init__.py b/tests/integration/test_replicated_mutations/__init__.py similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/__init__.py rename to tests/integration/test_replicated_mutations/__init__.py diff --git a/dbms/tests/integration/test_replicated_mutations/configs/merge_tree.xml b/tests/integration/test_replicated_mutations/configs/merge_tree.xml similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/configs/merge_tree.xml rename to tests/integration/test_replicated_mutations/configs/merge_tree.xml diff --git a/dbms/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml b/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml rename to tests/integration/test_replicated_mutations/configs/merge_tree_max_parts.xml diff --git a/dbms/tests/integration/test_replicated_mutations/test.py b/tests/integration/test_replicated_mutations/test.py similarity index 100% rename from dbms/tests/integration/test_replicated_mutations/test.py rename to tests/integration/test_replicated_mutations/test.py diff --git a/dbms/tests/integration/test_replicating_constants/__init__.py b/tests/integration/test_replicating_constants/__init__.py similarity index 100% rename from dbms/tests/integration/test_replicating_constants/__init__.py rename to tests/integration/test_replicating_constants/__init__.py diff --git a/dbms/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py similarity index 100% rename from dbms/tests/integration/test_replicating_constants/test.py rename to tests/integration/test_replicating_constants/test.py diff --git a/dbms/tests/integration/test_replication_credentials/__init__.py b/tests/integration/test_replication_credentials/__init__.py similarity index 100% rename from dbms/tests/integration/test_replication_credentials/__init__.py rename to tests/integration/test_replication_credentials/__init__.py diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials1.xml b/tests/integration/test_replication_credentials/configs/credentials1.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/credentials1.xml rename to tests/integration/test_replication_credentials/configs/credentials1.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials2.xml b/tests/integration/test_replication_credentials/configs/credentials2.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/credentials2.xml rename to tests/integration/test_replication_credentials/configs/credentials2.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml b/tests/integration/test_replication_credentials/configs/no_credentials.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml rename to tests/integration/test_replication_credentials/configs/no_credentials.xml diff --git a/dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml b/tests/integration/test_replication_credentials/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml rename to tests/integration/test_replication_credentials/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replication_credentials/test.py b/tests/integration/test_replication_credentials/test.py similarity index 100% rename from dbms/tests/integration/test_replication_credentials/test.py rename to tests/integration/test_replication_credentials/test.py diff --git a/dbms/tests/integration/test_replication_without_zookeeper/__init__.py b/tests/integration/test_replication_without_zookeeper/__init__.py similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/__init__.py rename to tests/integration/test_replication_without_zookeeper/__init__.py diff --git a/dbms/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml b/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml rename to tests/integration/test_replication_without_zookeeper/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_replication_without_zookeeper/test.py b/tests/integration/test_replication_without_zookeeper/test.py similarity index 100% rename from dbms/tests/integration/test_replication_without_zookeeper/test.py rename to tests/integration/test_replication_without_zookeeper/test.py diff --git a/dbms/tests/integration/test_row_policy/__init__.py b/tests/integration/test_row_policy/__init__.py similarity index 100% rename from dbms/tests/integration/test_row_policy/__init__.py rename to tests/integration/test_row_policy/__init__.py diff --git a/dbms/tests/integration/test_row_policy/all_rows.xml b/tests/integration/test_row_policy/all_rows.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/all_rows.xml rename to tests/integration/test_row_policy/all_rows.xml diff --git a/dbms/tests/integration/test_row_policy/configs/config.d/remote_servers.xml b/tests/integration/test_row_policy/configs/config.d/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/configs/config.d/remote_servers.xml rename to tests/integration/test_row_policy/configs/config.d/remote_servers.xml diff --git a/dbms/tests/integration/test_row_policy/configs/users.d/row_policy.xml b/tests/integration/test_row_policy/configs/users.d/row_policy.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/configs/users.d/row_policy.xml rename to tests/integration/test_row_policy/configs/users.d/row_policy.xml diff --git a/dbms/tests/integration/test_row_policy/configs/users.xml b/tests/integration/test_row_policy/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/configs/users.xml rename to tests/integration/test_row_policy/configs/users.xml diff --git a/dbms/tests/integration/test_row_policy/multiple_tags_with_table_names.xml b/tests/integration/test_row_policy/multiple_tags_with_table_names.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/multiple_tags_with_table_names.xml rename to tests/integration/test_row_policy/multiple_tags_with_table_names.xml diff --git a/dbms/tests/integration/test_row_policy/no_filters.xml b/tests/integration/test_row_policy/no_filters.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/no_filters.xml rename to tests/integration/test_row_policy/no_filters.xml diff --git a/dbms/tests/integration/test_row_policy/no_rows.xml b/tests/integration/test_row_policy/no_rows.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/no_rows.xml rename to tests/integration/test_row_policy/no_rows.xml diff --git a/dbms/tests/integration/test_row_policy/normal_filters.xml b/tests/integration/test_row_policy/normal_filters.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/normal_filters.xml rename to tests/integration/test_row_policy/normal_filters.xml diff --git a/dbms/tests/integration/test_row_policy/tag_with_table_name.xml b/tests/integration/test_row_policy/tag_with_table_name.xml similarity index 100% rename from dbms/tests/integration/test_row_policy/tag_with_table_name.xml rename to tests/integration/test_row_policy/tag_with_table_name.xml diff --git a/dbms/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py similarity index 100% rename from dbms/tests/integration/test_row_policy/test.py rename to tests/integration/test_row_policy/test.py diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/__init__.py b/tests/integration/test_send_request_to_leader_replica/__init__.py similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/__init__.py rename to tests/integration/test_send_request_to_leader_replica/__init__.py diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml b/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml rename to tests/integration/test_send_request_to_leader_replica/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml b/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml rename to tests/integration/test_send_request_to_leader_replica/configs/user_good_allowed.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml b/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml rename to tests/integration/test_send_request_to_leader_replica/configs/user_good_restricted.xml diff --git a/dbms/tests/integration/test_send_request_to_leader_replica/test.py b/tests/integration/test_send_request_to_leader_replica/test.py similarity index 100% rename from dbms/tests/integration/test_send_request_to_leader_replica/test.py rename to tests/integration/test_send_request_to_leader_replica/test.py diff --git a/dbms/tests/integration/test_server_initialization/__init__.py b/tests/integration/test_server_initialization/__init__.py similarity index 100% rename from dbms/tests/integration/test_server_initialization/__init__.py rename to tests/integration/test_server_initialization/__init__.py diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV b/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV rename to tests/integration/test_server_initialization/clickhouse_path/data/default/should_be_restored/data.CSV diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_dropped.sql.tmp_drop diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/should_be_restored.sql.tmp_drop diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql b/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql rename to tests/integration/test_server_initialization/clickhouse_path/metadata/default/sophisticated_default.sql diff --git a/dbms/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql b/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql similarity index 100% rename from dbms/tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql rename to tests/integration/test_server_initialization/clickhouse_path_fail/metadata/default.sql diff --git a/dbms/tests/integration/test_server_initialization/test.py b/tests/integration/test_server_initialization/test.py similarity index 100% rename from dbms/tests/integration/test_server_initialization/test.py rename to tests/integration/test_server_initialization/test.py diff --git a/dbms/tests/integration/test_settings_constraints/__init__.py b/tests/integration/test_settings_constraints/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints/__init__.py rename to tests/integration/test_settings_constraints/__init__.py diff --git a/dbms/tests/integration/test_settings_constraints/configs/users.xml b/tests/integration/test_settings_constraints/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_settings_constraints/configs/users.xml rename to tests/integration/test_settings_constraints/configs/users.xml diff --git a/dbms/tests/integration/test_settings_constraints/test.py b/tests/integration/test_settings_constraints/test.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints/test.py rename to tests/integration/test_settings_constraints/test.py diff --git a/dbms/tests/integration/test_settings_constraints_distributed/__init__.py b/tests/integration/test_settings_constraints_distributed/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints_distributed/__init__.py rename to tests/integration/test_settings_constraints_distributed/__init__.py diff --git a/dbms/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml b/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml rename to tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py similarity index 100% rename from dbms/tests/integration/test_settings_constraints_distributed/test.py rename to tests/integration/test_settings_constraints_distributed/test.py diff --git a/dbms/tests/integration/test_settings_profile/__init__.py b/tests/integration/test_settings_profile/__init__.py similarity index 100% rename from dbms/tests/integration/test_settings_profile/__init__.py rename to tests/integration/test_settings_profile/__init__.py diff --git a/dbms/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py similarity index 100% rename from dbms/tests/integration/test_settings_profile/test.py rename to tests/integration/test_settings_profile/test.py diff --git a/dbms/tests/integration/test_storage_hdfs/__init__.py b/tests/integration/test_storage_hdfs/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/__init__.py rename to tests/integration/test_storage_hdfs/__init__.py diff --git a/dbms/tests/integration/test_storage_hdfs/configs/log_conf.xml b/tests/integration/test_storage_hdfs/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/configs/log_conf.xml rename to tests/integration/test_storage_hdfs/configs/log_conf.xml diff --git a/dbms/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py similarity index 100% rename from dbms/tests/integration/test_storage_hdfs/test.py rename to tests/integration/test_storage_hdfs/test.py diff --git a/dbms/tests/integration/test_storage_kafka/__init__.py b/tests/integration/test_storage_kafka/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/__init__.py rename to tests/integration/test_storage_kafka/__init__.py diff --git a/dbms/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto similarity index 100% rename from dbms/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto rename to tests/integration/test_storage_kafka/clickhouse_path/format_schemas/kafka.proto diff --git a/dbms/tests/integration/test_storage_kafka/configs/kafka.xml b/tests/integration/test_storage_kafka/configs/kafka.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/kafka.xml rename to tests/integration/test_storage_kafka/configs/kafka.xml diff --git a/dbms/tests/integration/test_storage_kafka/configs/log_conf.xml b/tests/integration/test_storage_kafka/configs/log_conf.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/log_conf.xml rename to tests/integration/test_storage_kafka/configs/log_conf.xml diff --git a/dbms/tests/integration/test_storage_kafka/configs/users.xml b/tests/integration/test_storage_kafka/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_storage_kafka/configs/users.xml rename to tests/integration/test_storage_kafka/configs/users.xml diff --git a/dbms/tests/integration/test_storage_kafka/kafka_pb2.py b/tests/integration/test_storage_kafka/kafka_pb2.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/kafka_pb2.py rename to tests/integration/test_storage_kafka/kafka_pb2.py diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test.py rename to tests/integration/test_storage_kafka/test.py diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_json.reference b/tests/integration/test_storage_kafka/test_kafka_json.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_json.reference rename to tests/integration/test_storage_kafka/test_kafka_json.reference diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference b/tests/integration/test_storage_kafka/test_kafka_virtual1.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_virtual1.reference rename to tests/integration/test_storage_kafka/test_kafka_virtual1.reference diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference b/tests/integration/test_storage_kafka/test_kafka_virtual2.reference similarity index 100% rename from dbms/tests/integration/test_storage_kafka/test_kafka_virtual2.reference rename to tests/integration/test_storage_kafka/test_kafka_virtual2.reference diff --git a/dbms/tests/integration/test_storage_mysql/__init__.py b/tests/integration/test_storage_mysql/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_mysql/__init__.py rename to tests/integration/test_storage_mysql/__init__.py diff --git a/dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml b/tests/integration/test_storage_mysql/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_storage_mysql/configs/remote_servers.xml rename to tests/integration/test_storage_mysql/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py similarity index 100% rename from dbms/tests/integration/test_storage_mysql/test.py rename to tests/integration/test_storage_mysql/test.py diff --git a/dbms/tests/integration/test_storage_s3/__init__.py b/tests/integration/test_storage_s3/__init__.py similarity index 100% rename from dbms/tests/integration/test_storage_s3/__init__.py rename to tests/integration/test_storage_s3/__init__.py diff --git a/dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml b/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml similarity index 100% rename from dbms/tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml rename to tests/integration/test_storage_s3/configs/config_for_test_remote_host_filter.xml diff --git a/dbms/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py similarity index 100% rename from dbms/tests/integration/test_storage_s3/test.py rename to tests/integration/test_storage_s3/test.py diff --git a/dbms/tests/integration/test_system_merges/__init__.py b/tests/integration/test_system_merges/__init__.py similarity index 100% rename from dbms/tests/integration/test_system_merges/__init__.py rename to tests/integration/test_system_merges/__init__.py diff --git a/dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml b/tests/integration/test_system_merges/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_system_merges/configs/config.d/cluster.xml rename to tests/integration/test_system_merges/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_system_merges/configs/logs_config.xml b/tests/integration/test_system_merges/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_system_merges/configs/logs_config.xml rename to tests/integration/test_system_merges/configs/logs_config.xml diff --git a/dbms/tests/integration/test_system_merges/test.py b/tests/integration/test_system_merges/test.py similarity index 100% rename from dbms/tests/integration/test_system_merges/test.py rename to tests/integration/test_system_merges/test.py diff --git a/dbms/tests/integration/test_system_queries/__init__.py b/tests/integration/test_system_queries/__init__.py similarity index 100% rename from dbms/tests/integration/test_system_queries/__init__.py rename to tests/integration/test_system_queries/__init__.py diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/clusters_config.xml b/tests/integration/test_system_queries/configs/config.d/clusters_config.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/clusters_config.xml rename to tests/integration/test_system_queries/configs/config.d/clusters_config.xml diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml b/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml rename to tests/integration/test_system_queries/configs/config.d/dictionaries_config.xml diff --git a/dbms/tests/integration/test_system_queries/configs/config.d/query_log.xml b/tests/integration/test_system_queries/configs/config.d/query_log.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/config.d/query_log.xml rename to tests/integration/test_system_queries/configs/config.d/query_log.xml diff --git a/dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml b/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml rename to tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_cache.xml diff --git a/dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml b/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml rename to tests/integration/test_system_queries/configs/dictionaries/dictionary_clickhouse_flat.xml diff --git a/dbms/tests/integration/test_system_queries/configs/users.xml b/tests/integration/test_system_queries/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_system_queries/configs/users.xml rename to tests/integration/test_system_queries/configs/users.xml diff --git a/dbms/tests/integration/test_system_queries/test.py b/tests/integration/test_system_queries/test.py similarity index 100% rename from dbms/tests/integration/test_system_queries/test.py rename to tests/integration/test_system_queries/test.py diff --git a/dbms/tests/integration/test_text_log_level/__init__.py b/tests/integration/test_text_log_level/__init__.py similarity index 100% rename from dbms/tests/integration/test_text_log_level/__init__.py rename to tests/integration/test_text_log_level/__init__.py diff --git a/dbms/tests/integration/test_text_log_level/configs/config.d/text_log.xml b/tests/integration/test_text_log_level/configs/config.d/text_log.xml similarity index 100% rename from dbms/tests/integration/test_text_log_level/configs/config.d/text_log.xml rename to tests/integration/test_text_log_level/configs/config.d/text_log.xml diff --git a/dbms/tests/integration/test_text_log_level/test.py b/tests/integration/test_text_log_level/test.py similarity index 100% rename from dbms/tests/integration/test_text_log_level/test.py rename to tests/integration/test_text_log_level/test.py diff --git a/dbms/tests/integration/test_timezone_config/__init__.py b/tests/integration/test_timezone_config/__init__.py similarity index 100% rename from dbms/tests/integration/test_timezone_config/__init__.py rename to tests/integration/test_timezone_config/__init__.py diff --git a/dbms/tests/integration/test_timezone_config/configs/config.xml b/tests/integration/test_timezone_config/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_timezone_config/configs/config.xml rename to tests/integration/test_timezone_config/configs/config.xml diff --git a/dbms/tests/integration/test_timezone_config/test.py b/tests/integration/test_timezone_config/test.py similarity index 100% rename from dbms/tests/integration/test_timezone_config/test.py rename to tests/integration/test_timezone_config/test.py diff --git a/dbms/tests/integration/test_tmp_policy/__init__.py b/tests/integration/test_tmp_policy/__init__.py similarity index 100% rename from dbms/tests/integration/test_tmp_policy/__init__.py rename to tests/integration/test_tmp_policy/__init__.py diff --git a/dbms/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml b/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml rename to tests/integration/test_tmp_policy/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py similarity index 100% rename from dbms/tests/integration/test_tmp_policy/test.py rename to tests/integration/test_tmp_policy/test.py diff --git a/dbms/tests/integration/test_ttl_move/__init__.py b/tests/integration/test_ttl_move/__init__.py similarity index 100% rename from dbms/tests/integration/test_ttl_move/__init__.py rename to tests/integration/test_ttl_move/__init__.py diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/cluster.xml b/tests/integration/test_ttl_move/configs/config.d/cluster.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/cluster.xml rename to tests/integration/test_ttl_move/configs/config.d/cluster.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml b/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/instant_moves.xml rename to tests/integration/test_ttl_move/configs/config.d/instant_moves.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml b/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml rename to tests/integration/test_ttl_move/configs/config.d/storage_configuration.xml diff --git a/dbms/tests/integration/test_ttl_move/configs/logs_config.xml b/tests/integration/test_ttl_move/configs/logs_config.xml similarity index 100% rename from dbms/tests/integration/test_ttl_move/configs/logs_config.xml rename to tests/integration/test_ttl_move/configs/logs_config.xml diff --git a/dbms/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py similarity index 100% rename from dbms/tests/integration/test_ttl_move/test.py rename to tests/integration/test_ttl_move/test.py diff --git a/dbms/tests/integration/test_ttl_replicated/__init__.py b/tests/integration/test_ttl_replicated/__init__.py similarity index 100% rename from dbms/tests/integration/test_ttl_replicated/__init__.py rename to tests/integration/test_ttl_replicated/__init__.py diff --git a/dbms/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py similarity index 100% rename from dbms/tests/integration/test_ttl_replicated/test.py rename to tests/integration/test_ttl_replicated/test.py diff --git a/dbms/tests/integration/test_union_header/__init__.py b/tests/integration/test_union_header/__init__.py similarity index 100% rename from dbms/tests/integration/test_union_header/__init__.py rename to tests/integration/test_union_header/__init__.py diff --git a/dbms/tests/integration/test_union_header/configs/remote_servers.xml b/tests/integration/test_union_header/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_union_header/configs/remote_servers.xml rename to tests/integration/test_union_header/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_union_header/test.py b/tests/integration/test_union_header/test.py similarity index 100% rename from dbms/tests/integration/test_union_header/test.py rename to tests/integration/test_union_header/test.py diff --git a/dbms/tests/integration/test_user_ip_restrictions/__init__.py b/tests/integration/test_user_ip_restrictions/__init__.py similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/__init__.py rename to tests/integration/test_user_ip_restrictions/__init__.py diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml b/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml rename to tests/integration/test_user_ip_restrictions/configs/config_ipv6.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml b/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml rename to tests/integration/test_user_ip_restrictions/configs/users_ipv4.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml b/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml rename to tests/integration/test_user_ip_restrictions/configs/users_ipv6.xml diff --git a/dbms/tests/integration/test_user_ip_restrictions/test.py b/tests/integration/test_user_ip_restrictions/test.py similarity index 100% rename from dbms/tests/integration/test_user_ip_restrictions/test.py rename to tests/integration/test_user_ip_restrictions/test.py diff --git a/dbms/tests/integration/test_user_zero_database_access/__init__.py b/tests/integration/test_user_zero_database_access/__init__.py similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/__init__.py rename to tests/integration/test_user_zero_database_access/__init__.py diff --git a/dbms/tests/integration/test_user_zero_database_access/configs/config.xml b/tests/integration/test_user_zero_database_access/configs/config.xml similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/configs/config.xml rename to tests/integration/test_user_zero_database_access/configs/config.xml diff --git a/dbms/tests/integration/test_user_zero_database_access/configs/users.xml b/tests/integration/test_user_zero_database_access/configs/users.xml similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/configs/users.xml rename to tests/integration/test_user_zero_database_access/configs/users.xml diff --git a/dbms/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py b/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py similarity index 100% rename from dbms/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py rename to tests/integration/test_user_zero_database_access/test_user_zero_database_access.py diff --git a/dbms/tests/integration/test_version_update_after_mutation/__init__.py b/tests/integration/test_version_update_after_mutation/__init__.py similarity index 100% rename from dbms/tests/integration/test_version_update_after_mutation/__init__.py rename to tests/integration/test_version_update_after_mutation/__init__.py diff --git a/dbms/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py similarity index 100% rename from dbms/tests/integration/test_version_update_after_mutation/test.py rename to tests/integration/test_version_update_after_mutation/test.py diff --git a/dbms/tests/integration/test_zookeeper_config/__init__.py b/tests/integration/test_zookeeper_config/__init__.py similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/__init__.py rename to tests/integration/test_zookeeper_config/__init__.py diff --git a/dbms/tests/integration/test_zookeeper_config/configs/remote_servers.xml b/tests/integration/test_zookeeper_config/configs/remote_servers.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/remote_servers.xml rename to tests/integration/test_zookeeper_config/configs/remote_servers.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_root_a.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_root_b.xml diff --git a/dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml rename to tests/integration/test_zookeeper_config/configs/zookeeper_config_with_password.xml diff --git a/dbms/tests/integration/test_zookeeper_config/test.py b/tests/integration/test_zookeeper_config/test.py similarity index 100% rename from dbms/tests/integration/test_zookeeper_config/test.py rename to tests/integration/test_zookeeper_config/test.py diff --git a/dbms/tests/ints_dictionary.xml b/tests/ints_dictionary.xml similarity index 100% rename from dbms/tests/ints_dictionary.xml rename to tests/ints_dictionary.xml diff --git a/dbms/tests/msan_suppressions.txt b/tests/msan_suppressions.txt similarity index 100% rename from dbms/tests/msan_suppressions.txt rename to tests/msan_suppressions.txt diff --git a/dbms/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh b/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh similarity index 100% rename from dbms/tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh rename to tests/perf_drafts/accurate_comparisons/accurate_comparisons.sh diff --git a/dbms/tests/perf_drafts/vert_merge/add_id_to_csv b/tests/perf_drafts/vert_merge/add_id_to_csv similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/add_id_to_csv rename to tests/perf_drafts/vert_merge/add_id_to_csv diff --git a/dbms/tests/perf_drafts/vert_merge/ontime.struct b/tests/perf_drafts/vert_merge/ontime.struct similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/ontime.struct rename to tests/perf_drafts/vert_merge/ontime.struct diff --git a/dbms/tests/perf_drafts/vert_merge/test_merges b/tests/perf_drafts/vert_merge/test_merges similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/test_merges rename to tests/perf_drafts/vert_merge/test_merges diff --git a/dbms/tests/perf_drafts/vert_merge/wait_clickhouse_server b/tests/perf_drafts/vert_merge/wait_clickhouse_server similarity index 100% rename from dbms/tests/perf_drafts/vert_merge/wait_clickhouse_server rename to tests/perf_drafts/vert_merge/wait_clickhouse_server diff --git a/dbms/tests/performance/IPv4.xml b/tests/performance/IPv4.xml similarity index 100% rename from dbms/tests/performance/IPv4.xml rename to tests/performance/IPv4.xml diff --git a/dbms/tests/performance/IPv6.xml b/tests/performance/IPv6.xml similarity index 100% rename from dbms/tests/performance/IPv6.xml rename to tests/performance/IPv6.xml diff --git a/dbms/tests/performance/README.md b/tests/performance/README.md similarity index 100% rename from dbms/tests/performance/README.md rename to tests/performance/README.md diff --git a/dbms/tests/performance/agg_functions_min_max_any.xml b/tests/performance/agg_functions_min_max_any.xml similarity index 100% rename from dbms/tests/performance/agg_functions_min_max_any.xml rename to tests/performance/agg_functions_min_max_any.xml diff --git a/dbms/tests/performance/analyze_array_tuples.xml b/tests/performance/analyze_array_tuples.xml similarity index 100% rename from dbms/tests/performance/analyze_array_tuples.xml rename to tests/performance/analyze_array_tuples.xml diff --git a/dbms/tests/performance/and_function.xml b/tests/performance/and_function.xml similarity index 100% rename from dbms/tests/performance/and_function.xml rename to tests/performance/and_function.xml diff --git a/dbms/tests/performance/arithmetic.xml b/tests/performance/arithmetic.xml similarity index 100% rename from dbms/tests/performance/arithmetic.xml rename to tests/performance/arithmetic.xml diff --git a/dbms/tests/performance/array_auc.xml b/tests/performance/array_auc.xml similarity index 100% rename from dbms/tests/performance/array_auc.xml rename to tests/performance/array_auc.xml diff --git a/dbms/tests/performance/array_element.xml b/tests/performance/array_element.xml similarity index 100% rename from dbms/tests/performance/array_element.xml rename to tests/performance/array_element.xml diff --git a/dbms/tests/performance/array_fill.xml b/tests/performance/array_fill.xml similarity index 100% rename from dbms/tests/performance/array_fill.xml rename to tests/performance/array_fill.xml diff --git a/dbms/tests/performance/array_join.xml b/tests/performance/array_join.xml similarity index 100% rename from dbms/tests/performance/array_join.xml rename to tests/performance/array_join.xml diff --git a/dbms/tests/performance/array_reduce.xml b/tests/performance/array_reduce.xml similarity index 100% rename from dbms/tests/performance/array_reduce.xml rename to tests/performance/array_reduce.xml diff --git a/dbms/tests/performance/base64.xml b/tests/performance/base64.xml similarity index 100% rename from dbms/tests/performance/base64.xml rename to tests/performance/base64.xml diff --git a/dbms/tests/performance/base64_hits.xml b/tests/performance/base64_hits.xml similarity index 100% rename from dbms/tests/performance/base64_hits.xml rename to tests/performance/base64_hits.xml diff --git a/dbms/tests/performance/basename.xml b/tests/performance/basename.xml similarity index 100% rename from dbms/tests/performance/basename.xml rename to tests/performance/basename.xml diff --git a/dbms/tests/performance/bitCount.xml b/tests/performance/bitCount.xml similarity index 100% rename from dbms/tests/performance/bitCount.xml rename to tests/performance/bitCount.xml diff --git a/dbms/tests/performance/bit_operations_fixed_string.xml b/tests/performance/bit_operations_fixed_string.xml similarity index 100% rename from dbms/tests/performance/bit_operations_fixed_string.xml rename to tests/performance/bit_operations_fixed_string.xml diff --git a/dbms/tests/performance/bit_operations_fixed_string_numbers.xml b/tests/performance/bit_operations_fixed_string_numbers.xml similarity index 100% rename from dbms/tests/performance/bit_operations_fixed_string_numbers.xml rename to tests/performance/bit_operations_fixed_string_numbers.xml diff --git a/dbms/tests/performance/bloom_filter.xml b/tests/performance/bloom_filter.xml similarity index 100% rename from dbms/tests/performance/bloom_filter.xml rename to tests/performance/bloom_filter.xml diff --git a/dbms/tests/performance/bounding_ratio.xml b/tests/performance/bounding_ratio.xml similarity index 100% rename from dbms/tests/performance/bounding_ratio.xml rename to tests/performance/bounding_ratio.xml diff --git a/dbms/tests/performance/cidr.xml b/tests/performance/cidr.xml similarity index 100% rename from dbms/tests/performance/cidr.xml rename to tests/performance/cidr.xml diff --git a/dbms/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml similarity index 100% rename from dbms/tests/performance/codecs_float_insert.xml rename to tests/performance/codecs_float_insert.xml diff --git a/dbms/tests/performance/codecs_float_select.xml b/tests/performance/codecs_float_select.xml similarity index 100% rename from dbms/tests/performance/codecs_float_select.xml rename to tests/performance/codecs_float_select.xml diff --git a/dbms/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml similarity index 100% rename from dbms/tests/performance/codecs_int_insert.xml rename to tests/performance/codecs_int_insert.xml diff --git a/dbms/tests/performance/codecs_int_select.xml b/tests/performance/codecs_int_select.xml similarity index 100% rename from dbms/tests/performance/codecs_int_select.xml rename to tests/performance/codecs_int_select.xml diff --git a/dbms/tests/performance/collations.xml b/tests/performance/collations.xml similarity index 100% rename from dbms/tests/performance/collations.xml rename to tests/performance/collations.xml diff --git a/dbms/tests/performance/column_column_comparison.xml b/tests/performance/column_column_comparison.xml similarity index 100% rename from dbms/tests/performance/column_column_comparison.xml rename to tests/performance/column_column_comparison.xml diff --git a/dbms/tests/performance/columns_hashing.xml b/tests/performance/columns_hashing.xml similarity index 100% rename from dbms/tests/performance/columns_hashing.xml rename to tests/performance/columns_hashing.xml diff --git a/dbms/tests/performance/complex_array_creation.xml b/tests/performance/complex_array_creation.xml similarity index 100% rename from dbms/tests/performance/complex_array_creation.xml rename to tests/performance/complex_array_creation.xml diff --git a/dbms/tests/performance/concat_hits.xml b/tests/performance/concat_hits.xml similarity index 100% rename from dbms/tests/performance/concat_hits.xml rename to tests/performance/concat_hits.xml diff --git a/dbms/tests/performance/conditional.xml b/tests/performance/conditional.xml similarity index 100% rename from dbms/tests/performance/conditional.xml rename to tests/performance/conditional.xml diff --git a/dbms/tests/performance/consistent_hashes.xml b/tests/performance/consistent_hashes.xml similarity index 100% rename from dbms/tests/performance/consistent_hashes.xml rename to tests/performance/consistent_hashes.xml diff --git a/dbms/tests/performance/constant_column_comparison.xml b/tests/performance/constant_column_comparison.xml similarity index 100% rename from dbms/tests/performance/constant_column_comparison.xml rename to tests/performance/constant_column_comparison.xml diff --git a/dbms/tests/performance/constant_column_search.xml b/tests/performance/constant_column_search.xml similarity index 100% rename from dbms/tests/performance/constant_column_search.xml rename to tests/performance/constant_column_search.xml diff --git a/dbms/tests/performance/count.xml b/tests/performance/count.xml similarity index 100% rename from dbms/tests/performance/count.xml rename to tests/performance/count.xml diff --git a/dbms/tests/performance/cpu_synthetic.xml b/tests/performance/cpu_synthetic.xml similarity index 100% rename from dbms/tests/performance/cpu_synthetic.xml rename to tests/performance/cpu_synthetic.xml diff --git a/dbms/tests/performance/create_benchmark_page.py b/tests/performance/create_benchmark_page.py similarity index 100% rename from dbms/tests/performance/create_benchmark_page.py rename to tests/performance/create_benchmark_page.py diff --git a/dbms/tests/performance/cryptographic_hashes.xml b/tests/performance/cryptographic_hashes.xml similarity index 100% rename from dbms/tests/performance/cryptographic_hashes.xml rename to tests/performance/cryptographic_hashes.xml diff --git a/dbms/tests/performance/date_parsing.xml b/tests/performance/date_parsing.xml similarity index 100% rename from dbms/tests/performance/date_parsing.xml rename to tests/performance/date_parsing.xml diff --git a/dbms/tests/performance/date_time.xml b/tests/performance/date_time.xml similarity index 100% rename from dbms/tests/performance/date_time.xml rename to tests/performance/date_time.xml diff --git a/dbms/tests/performance/date_time_64.xml b/tests/performance/date_time_64.xml similarity index 100% rename from dbms/tests/performance/date_time_64.xml rename to tests/performance/date_time_64.xml diff --git a/dbms/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml similarity index 100% rename from dbms/tests/performance/decimal_aggregates.xml rename to tests/performance/decimal_aggregates.xml diff --git a/dbms/tests/performance/early_constant_folding.xml b/tests/performance/early_constant_folding.xml similarity index 100% rename from dbms/tests/performance/early_constant_folding.xml rename to tests/performance/early_constant_folding.xml diff --git a/dbms/tests/performance/empty_string_deserialization.xml b/tests/performance/empty_string_deserialization.xml similarity index 100% rename from dbms/tests/performance/empty_string_deserialization.xml rename to tests/performance/empty_string_deserialization.xml diff --git a/dbms/tests/performance/empty_string_serialization.xml b/tests/performance/empty_string_serialization.xml similarity index 100% rename from dbms/tests/performance/empty_string_serialization.xml rename to tests/performance/empty_string_serialization.xml diff --git a/dbms/tests/performance/entropy.xml b/tests/performance/entropy.xml similarity index 100% rename from dbms/tests/performance/entropy.xml rename to tests/performance/entropy.xml diff --git a/dbms/tests/performance/first_significant_subdomain.xml b/tests/performance/first_significant_subdomain.xml similarity index 100% rename from dbms/tests/performance/first_significant_subdomain.xml rename to tests/performance/first_significant_subdomain.xml diff --git a/dbms/tests/performance/fixed_string16.xml b/tests/performance/fixed_string16.xml similarity index 100% rename from dbms/tests/performance/fixed_string16.xml rename to tests/performance/fixed_string16.xml diff --git a/dbms/tests/performance/float_formatting.xml b/tests/performance/float_formatting.xml similarity index 100% rename from dbms/tests/performance/float_formatting.xml rename to tests/performance/float_formatting.xml diff --git a/dbms/tests/performance/float_parsing.xml b/tests/performance/float_parsing.xml similarity index 100% rename from dbms/tests/performance/float_parsing.xml rename to tests/performance/float_parsing.xml diff --git a/dbms/tests/performance/format_date_time.xml b/tests/performance/format_date_time.xml similarity index 100% rename from dbms/tests/performance/format_date_time.xml rename to tests/performance/format_date_time.xml diff --git a/dbms/tests/performance/functions_coding.xml b/tests/performance/functions_coding.xml similarity index 100% rename from dbms/tests/performance/functions_coding.xml rename to tests/performance/functions_coding.xml diff --git a/dbms/tests/performance/functions_geo.xml b/tests/performance/functions_geo.xml similarity index 100% rename from dbms/tests/performance/functions_geo.xml rename to tests/performance/functions_geo.xml diff --git a/dbms/tests/performance/general_purpose_hashes.xml b/tests/performance/general_purpose_hashes.xml similarity index 100% rename from dbms/tests/performance/general_purpose_hashes.xml rename to tests/performance/general_purpose_hashes.xml diff --git a/dbms/tests/performance/general_purpose_hashes_on_UUID.xml b/tests/performance/general_purpose_hashes_on_UUID.xml similarity index 100% rename from dbms/tests/performance/general_purpose_hashes_on_UUID.xml rename to tests/performance/general_purpose_hashes_on_UUID.xml diff --git a/dbms/tests/performance/generate_table_function.xml b/tests/performance/generate_table_function.xml similarity index 100% rename from dbms/tests/performance/generate_table_function.xml rename to tests/performance/generate_table_function.xml diff --git a/dbms/tests/performance/great_circle_dist.xml b/tests/performance/great_circle_dist.xml similarity index 100% rename from dbms/tests/performance/great_circle_dist.xml rename to tests/performance/great_circle_dist.xml diff --git a/dbms/tests/performance/group_array_moving_sum.xml b/tests/performance/group_array_moving_sum.xml similarity index 100% rename from dbms/tests/performance/group_array_moving_sum.xml rename to tests/performance/group_array_moving_sum.xml diff --git a/dbms/tests/performance/h3.xml b/tests/performance/h3.xml similarity index 100% rename from dbms/tests/performance/h3.xml rename to tests/performance/h3.xml diff --git a/dbms/tests/performance/if_array_num.xml b/tests/performance/if_array_num.xml similarity index 100% rename from dbms/tests/performance/if_array_num.xml rename to tests/performance/if_array_num.xml diff --git a/dbms/tests/performance/if_array_string.xml b/tests/performance/if_array_string.xml similarity index 100% rename from dbms/tests/performance/if_array_string.xml rename to tests/performance/if_array_string.xml diff --git a/dbms/tests/performance/if_string_const.xml b/tests/performance/if_string_const.xml similarity index 100% rename from dbms/tests/performance/if_string_const.xml rename to tests/performance/if_string_const.xml diff --git a/dbms/tests/performance/if_string_hits.xml b/tests/performance/if_string_hits.xml similarity index 100% rename from dbms/tests/performance/if_string_hits.xml rename to tests/performance/if_string_hits.xml diff --git a/dbms/tests/performance/if_to_multiif.xml b/tests/performance/if_to_multiif.xml similarity index 100% rename from dbms/tests/performance/if_to_multiif.xml rename to tests/performance/if_to_multiif.xml diff --git a/dbms/tests/performance/information_value.xml b/tests/performance/information_value.xml similarity index 100% rename from dbms/tests/performance/information_value.xml rename to tests/performance/information_value.xml diff --git a/dbms/tests/performance/insert_values_with_expressions.xml b/tests/performance/insert_values_with_expressions.xml similarity index 100% rename from dbms/tests/performance/insert_values_with_expressions.xml rename to tests/performance/insert_values_with_expressions.xml diff --git a/dbms/tests/performance/inserts_arrays_lowcardinality.xml b/tests/performance/inserts_arrays_lowcardinality.xml similarity index 100% rename from dbms/tests/performance/inserts_arrays_lowcardinality.xml rename to tests/performance/inserts_arrays_lowcardinality.xml diff --git a/dbms/tests/performance/int_parsing.xml b/tests/performance/int_parsing.xml similarity index 100% rename from dbms/tests/performance/int_parsing.xml rename to tests/performance/int_parsing.xml diff --git a/dbms/tests/performance/jit_large_requests.xml b/tests/performance/jit_large_requests.xml similarity index 100% rename from dbms/tests/performance/jit_large_requests.xml rename to tests/performance/jit_large_requests.xml diff --git a/dbms/tests/performance/jit_small_requests.xml b/tests/performance/jit_small_requests.xml similarity index 100% rename from dbms/tests/performance/jit_small_requests.xml rename to tests/performance/jit_small_requests.xml diff --git a/dbms/tests/performance/joins_in_memory.xml b/tests/performance/joins_in_memory.xml similarity index 100% rename from dbms/tests/performance/joins_in_memory.xml rename to tests/performance/joins_in_memory.xml diff --git a/dbms/tests/performance/joins_in_memory_pmj.xml b/tests/performance/joins_in_memory_pmj.xml similarity index 100% rename from dbms/tests/performance/joins_in_memory_pmj.xml rename to tests/performance/joins_in_memory_pmj.xml diff --git a/dbms/tests/performance/json_extract_rapidjson.xml b/tests/performance/json_extract_rapidjson.xml similarity index 100% rename from dbms/tests/performance/json_extract_rapidjson.xml rename to tests/performance/json_extract_rapidjson.xml diff --git a/dbms/tests/performance/json_extract_simdjson.xml b/tests/performance/json_extract_simdjson.xml similarity index 100% rename from dbms/tests/performance/json_extract_simdjson.xml rename to tests/performance/json_extract_simdjson.xml diff --git a/dbms/tests/performance/leftpad.xml b/tests/performance/leftpad.xml similarity index 100% rename from dbms/tests/performance/leftpad.xml rename to tests/performance/leftpad.xml diff --git a/dbms/tests/performance/linear_regression.xml b/tests/performance/linear_regression.xml similarity index 100% rename from dbms/tests/performance/linear_regression.xml rename to tests/performance/linear_regression.xml diff --git a/dbms/tests/performance/logical_functions_large.xml b/tests/performance/logical_functions_large.xml similarity index 100% rename from dbms/tests/performance/logical_functions_large.xml rename to tests/performance/logical_functions_large.xml diff --git a/dbms/tests/performance/logical_functions_medium.xml b/tests/performance/logical_functions_medium.xml similarity index 100% rename from dbms/tests/performance/logical_functions_medium.xml rename to tests/performance/logical_functions_medium.xml diff --git a/dbms/tests/performance/logical_functions_small.xml b/tests/performance/logical_functions_small.xml similarity index 100% rename from dbms/tests/performance/logical_functions_small.xml rename to tests/performance/logical_functions_small.xml diff --git a/dbms/tests/performance/math.xml b/tests/performance/math.xml similarity index 100% rename from dbms/tests/performance/math.xml rename to tests/performance/math.xml diff --git a/dbms/tests/performance/merge_table_streams.xml b/tests/performance/merge_table_streams.xml similarity index 100% rename from dbms/tests/performance/merge_table_streams.xml rename to tests/performance/merge_table_streams.xml diff --git a/dbms/tests/performance/merge_tree_huge_pk.xml b/tests/performance/merge_tree_huge_pk.xml similarity index 100% rename from dbms/tests/performance/merge_tree_huge_pk.xml rename to tests/performance/merge_tree_huge_pk.xml diff --git a/dbms/tests/performance/merge_tree_many_partitions.xml b/tests/performance/merge_tree_many_partitions.xml similarity index 100% rename from dbms/tests/performance/merge_tree_many_partitions.xml rename to tests/performance/merge_tree_many_partitions.xml diff --git a/dbms/tests/performance/merge_tree_many_partitions_2.xml b/tests/performance/merge_tree_many_partitions_2.xml similarity index 100% rename from dbms/tests/performance/merge_tree_many_partitions_2.xml rename to tests/performance/merge_tree_many_partitions_2.xml diff --git a/dbms/tests/performance/merge_tree_simple_select.xml b/tests/performance/merge_tree_simple_select.xml similarity index 100% rename from dbms/tests/performance/merge_tree_simple_select.xml rename to tests/performance/merge_tree_simple_select.xml diff --git a/dbms/tests/performance/mingroupby-orderbylimit1.xml b/tests/performance/mingroupby-orderbylimit1.xml similarity index 100% rename from dbms/tests/performance/mingroupby-orderbylimit1.xml rename to tests/performance/mingroupby-orderbylimit1.xml diff --git a/dbms/tests/performance/modulo.xml b/tests/performance/modulo.xml similarity index 100% rename from dbms/tests/performance/modulo.xml rename to tests/performance/modulo.xml diff --git a/dbms/tests/performance/ngram_distance.xml b/tests/performance/ngram_distance.xml similarity index 100% rename from dbms/tests/performance/ngram_distance.xml rename to tests/performance/ngram_distance.xml diff --git a/dbms/tests/performance/number_formatting_formats.xml b/tests/performance/number_formatting_formats.xml similarity index 100% rename from dbms/tests/performance/number_formatting_formats.xml rename to tests/performance/number_formatting_formats.xml diff --git a/dbms/tests/performance/nyc_taxi.xml b/tests/performance/nyc_taxi.xml similarity index 100% rename from dbms/tests/performance/nyc_taxi.xml rename to tests/performance/nyc_taxi.xml diff --git a/dbms/tests/performance/order_by_decimals.xml b/tests/performance/order_by_decimals.xml similarity index 100% rename from dbms/tests/performance/order_by_decimals.xml rename to tests/performance/order_by_decimals.xml diff --git a/dbms/tests/performance/order_by_read_in_order.xml b/tests/performance/order_by_read_in_order.xml similarity index 100% rename from dbms/tests/performance/order_by_read_in_order.xml rename to tests/performance/order_by_read_in_order.xml diff --git a/dbms/tests/performance/order_by_single_column.xml b/tests/performance/order_by_single_column.xml similarity index 100% rename from dbms/tests/performance/order_by_single_column.xml rename to tests/performance/order_by_single_column.xml diff --git a/dbms/tests/performance/parallel_insert.xml b/tests/performance/parallel_insert.xml similarity index 100% rename from dbms/tests/performance/parallel_insert.xml rename to tests/performance/parallel_insert.xml diff --git a/dbms/tests/performance/parse_engine_file.xml b/tests/performance/parse_engine_file.xml similarity index 100% rename from dbms/tests/performance/parse_engine_file.xml rename to tests/performance/parse_engine_file.xml diff --git a/dbms/tests/performance/pre_limit_no_sorting.xml b/tests/performance/pre_limit_no_sorting.xml similarity index 100% rename from dbms/tests/performance/pre_limit_no_sorting.xml rename to tests/performance/pre_limit_no_sorting.xml diff --git a/dbms/tests/performance/prewhere.xml b/tests/performance/prewhere.xml similarity index 100% rename from dbms/tests/performance/prewhere.xml rename to tests/performance/prewhere.xml diff --git a/dbms/tests/performance/random_printable_ascii.xml b/tests/performance/random_printable_ascii.xml similarity index 100% rename from dbms/tests/performance/random_printable_ascii.xml rename to tests/performance/random_printable_ascii.xml diff --git a/dbms/tests/performance/range.xml b/tests/performance/range.xml similarity index 100% rename from dbms/tests/performance/range.xml rename to tests/performance/range.xml diff --git a/dbms/tests/performance/read_hits_with_aio.xml b/tests/performance/read_hits_with_aio.xml similarity index 100% rename from dbms/tests/performance/read_hits_with_aio.xml rename to tests/performance/read_hits_with_aio.xml diff --git a/dbms/tests/performance/right.xml b/tests/performance/right.xml similarity index 100% rename from dbms/tests/performance/right.xml rename to tests/performance/right.xml diff --git a/dbms/tests/performance/round_down.xml b/tests/performance/round_down.xml similarity index 100% rename from dbms/tests/performance/round_down.xml rename to tests/performance/round_down.xml diff --git a/dbms/tests/performance/round_methods.xml b/tests/performance/round_methods.xml similarity index 100% rename from dbms/tests/performance/round_methods.xml rename to tests/performance/round_methods.xml diff --git a/dbms/tests/performance/scalar.xml b/tests/performance/scalar.xml similarity index 100% rename from dbms/tests/performance/scalar.xml rename to tests/performance/scalar.xml diff --git a/dbms/tests/performance/select_format.xml b/tests/performance/select_format.xml similarity index 100% rename from dbms/tests/performance/select_format.xml rename to tests/performance/select_format.xml diff --git a/dbms/tests/performance/set.xml b/tests/performance/set.xml similarity index 100% rename from dbms/tests/performance/set.xml rename to tests/performance/set.xml diff --git a/dbms/tests/performance/set_hits.xml b/tests/performance/set_hits.xml similarity index 100% rename from dbms/tests/performance/set_hits.xml rename to tests/performance/set_hits.xml diff --git a/dbms/tests/performance/set_index.xml b/tests/performance/set_index.xml similarity index 100% rename from dbms/tests/performance/set_index.xml rename to tests/performance/set_index.xml diff --git a/dbms/tests/performance/simple_join_query.xml b/tests/performance/simple_join_query.xml similarity index 100% rename from dbms/tests/performance/simple_join_query.xml rename to tests/performance/simple_join_query.xml diff --git a/dbms/tests/performance/slices_hits.xml b/tests/performance/slices_hits.xml similarity index 100% rename from dbms/tests/performance/slices_hits.xml rename to tests/performance/slices_hits.xml diff --git a/dbms/tests/performance/sort.xml b/tests/performance/sort.xml similarity index 100% rename from dbms/tests/performance/sort.xml rename to tests/performance/sort.xml diff --git a/dbms/tests/performance/string_join.xml b/tests/performance/string_join.xml similarity index 100% rename from dbms/tests/performance/string_join.xml rename to tests/performance/string_join.xml diff --git a/dbms/tests/performance/string_set.xml b/tests/performance/string_set.xml similarity index 100% rename from dbms/tests/performance/string_set.xml rename to tests/performance/string_set.xml diff --git a/dbms/tests/performance/string_sort.xml b/tests/performance/string_sort.xml similarity index 100% rename from dbms/tests/performance/string_sort.xml rename to tests/performance/string_sort.xml diff --git a/dbms/tests/performance/sum_map.xml b/tests/performance/sum_map.xml similarity index 100% rename from dbms/tests/performance/sum_map.xml rename to tests/performance/sum_map.xml diff --git a/dbms/tests/performance/synthetic_hardware_benchmark.xml b/tests/performance/synthetic_hardware_benchmark.xml similarity index 100% rename from dbms/tests/performance/synthetic_hardware_benchmark.xml rename to tests/performance/synthetic_hardware_benchmark.xml diff --git a/dbms/tests/performance/trim_numbers.xml b/tests/performance/trim_numbers.xml similarity index 100% rename from dbms/tests/performance/trim_numbers.xml rename to tests/performance/trim_numbers.xml diff --git a/dbms/tests/performance/trim_urls.xml b/tests/performance/trim_urls.xml similarity index 100% rename from dbms/tests/performance/trim_urls.xml rename to tests/performance/trim_urls.xml diff --git a/dbms/tests/performance/trim_whitespace.xml b/tests/performance/trim_whitespace.xml similarity index 100% rename from dbms/tests/performance/trim_whitespace.xml rename to tests/performance/trim_whitespace.xml diff --git a/dbms/tests/performance/uniq.xml b/tests/performance/uniq.xml similarity index 100% rename from dbms/tests/performance/uniq.xml rename to tests/performance/uniq.xml diff --git a/dbms/tests/performance/url_hits.xml b/tests/performance/url_hits.xml similarity index 100% rename from dbms/tests/performance/url_hits.xml rename to tests/performance/url_hits.xml diff --git a/dbms/tests/performance/vectorize_aggregation_combinators.xml b/tests/performance/vectorize_aggregation_combinators.xml similarity index 100% rename from dbms/tests/performance/vectorize_aggregation_combinators.xml rename to tests/performance/vectorize_aggregation_combinators.xml diff --git a/dbms/tests/performance/visit_param_extract_raw.xml b/tests/performance/visit_param_extract_raw.xml similarity index 100% rename from dbms/tests/performance/visit_param_extract_raw.xml rename to tests/performance/visit_param_extract_raw.xml diff --git a/dbms/tests/performance/website.xml b/tests/performance/website.xml similarity index 100% rename from dbms/tests/performance/website.xml rename to tests/performance/website.xml diff --git a/dbms/tests/queries/.gitignore b/tests/queries/.gitignore similarity index 100% rename from dbms/tests/queries/.gitignore rename to tests/queries/.gitignore diff --git a/dbms/tests/queries/0_stateless/00001_select_1.reference b/tests/queries/0_stateless/00001_select_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00001_select_1.reference rename to tests/queries/0_stateless/00001_select_1.reference diff --git a/dbms/tests/queries/0_stateless/00001_select_1.sql b/tests/queries/0_stateless/00001_select_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00001_select_1.sql rename to tests/queries/0_stateless/00001_select_1.sql diff --git a/dbms/tests/queries/0_stateless/00002_system_numbers.reference b/tests/queries/0_stateless/00002_system_numbers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00002_system_numbers.reference rename to tests/queries/0_stateless/00002_system_numbers.reference diff --git a/dbms/tests/queries/0_stateless/00002_system_numbers.sql b/tests/queries/0_stateless/00002_system_numbers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00002_system_numbers.sql rename to tests/queries/0_stateless/00002_system_numbers.sql diff --git a/dbms/tests/queries/0_stateless/00003_reinterpret_as_string.reference b/tests/queries/0_stateless/00003_reinterpret_as_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00003_reinterpret_as_string.reference rename to tests/queries/0_stateless/00003_reinterpret_as_string.reference diff --git a/dbms/tests/queries/0_stateless/00003_reinterpret_as_string.sql b/tests/queries/0_stateless/00003_reinterpret_as_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00003_reinterpret_as_string.sql rename to tests/queries/0_stateless/00003_reinterpret_as_string.sql diff --git a/dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference b/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference rename to tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.reference diff --git a/dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql b/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql rename to tests/queries/0_stateless/00004_shard_format_ast_and_remote_table.sql diff --git a/dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference b/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference rename to tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.reference diff --git a/dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql b/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql rename to tests/queries/0_stateless/00005_shard_format_ast_and_remote_table_lambda.sql diff --git a/dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference b/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.reference rename to tests/queries/0_stateless/00006_extremes_and_subquery_from.reference diff --git a/dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql b/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00006_extremes_and_subquery_from.sql rename to tests/queries/0_stateless/00006_extremes_and_subquery_from.sql diff --git a/dbms/tests/queries/0_stateless/00007_array.reference b/tests/queries/0_stateless/00007_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00007_array.reference rename to tests/queries/0_stateless/00007_array.reference diff --git a/dbms/tests/queries/0_stateless/00007_array.sql b/tests/queries/0_stateless/00007_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00007_array.sql rename to tests/queries/0_stateless/00007_array.sql diff --git a/dbms/tests/queries/0_stateless/00008_array_join.reference b/tests/queries/0_stateless/00008_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00008_array_join.reference rename to tests/queries/0_stateless/00008_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00008_array_join.sql b/tests/queries/0_stateless/00008_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00008_array_join.sql rename to tests/queries/0_stateless/00008_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00009_array_join_subquery.reference b/tests/queries/0_stateless/00009_array_join_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00009_array_join_subquery.reference rename to tests/queries/0_stateless/00009_array_join_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00009_array_join_subquery.sql b/tests/queries/0_stateless/00009_array_join_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00009_array_join_subquery.sql rename to tests/queries/0_stateless/00009_array_join_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00010_big_array_join.reference b/tests/queries/0_stateless/00010_big_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00010_big_array_join.reference rename to tests/queries/0_stateless/00010_big_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00010_big_array_join.sql b/tests/queries/0_stateless/00010_big_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00010_big_array_join.sql rename to tests/queries/0_stateless/00010_big_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00011_array_join_alias.reference b/tests/queries/0_stateless/00011_array_join_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00011_array_join_alias.reference rename to tests/queries/0_stateless/00011_array_join_alias.reference diff --git a/dbms/tests/queries/0_stateless/00011_array_join_alias.sql b/tests/queries/0_stateless/00011_array_join_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00011_array_join_alias.sql rename to tests/queries/0_stateless/00011_array_join_alias.sql diff --git a/dbms/tests/queries/0_stateless/00012_array_join_alias_2.reference b/tests/queries/0_stateless/00012_array_join_alias_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00012_array_join_alias_2.reference rename to tests/queries/0_stateless/00012_array_join_alias_2.reference diff --git a/dbms/tests/queries/0_stateless/00012_array_join_alias_2.sql b/tests/queries/0_stateless/00012_array_join_alias_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00012_array_join_alias_2.sql rename to tests/queries/0_stateless/00012_array_join_alias_2.sql diff --git a/dbms/tests/queries/0_stateless/00013_create_table_with_arrays.reference b/tests/queries/0_stateless/00013_create_table_with_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00013_create_table_with_arrays.reference rename to tests/queries/0_stateless/00013_create_table_with_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00013_create_table_with_arrays.sql b/tests/queries/0_stateless/00013_create_table_with_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00013_create_table_with_arrays.sql rename to tests/queries/0_stateless/00013_create_table_with_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.reference b/tests/queries/0_stateless/00014_select_from_table_with_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.reference rename to tests/queries/0_stateless/00014_select_from_table_with_nested.reference diff --git a/dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.sql b/tests/queries/0_stateless/00014_select_from_table_with_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00014_select_from_table_with_nested.sql rename to tests/queries/0_stateless/00014_select_from_table_with_nested.sql diff --git a/dbms/tests/queries/0_stateless/00015_totals_having_constants.reference b/tests/queries/0_stateless/00015_totals_having_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00015_totals_having_constants.reference rename to tests/queries/0_stateless/00015_totals_having_constants.reference diff --git a/dbms/tests/queries/0_stateless/00015_totals_having_constants.sql b/tests/queries/0_stateless/00015_totals_having_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00015_totals_having_constants.sql rename to tests/queries/0_stateless/00015_totals_having_constants.sql diff --git a/dbms/tests/queries/0_stateless/00016_totals_having_constants.reference b/tests/queries/0_stateless/00016_totals_having_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00016_totals_having_constants.reference rename to tests/queries/0_stateless/00016_totals_having_constants.reference diff --git a/dbms/tests/queries/0_stateless/00016_totals_having_constants.sql b/tests/queries/0_stateless/00016_totals_having_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00016_totals_having_constants.sql rename to tests/queries/0_stateless/00016_totals_having_constants.sql diff --git a/dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference b/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference rename to tests/queries/0_stateless/00017_in_subquery_with_empty_result.reference diff --git a/dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql b/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql rename to tests/queries/0_stateless/00017_in_subquery_with_empty_result.sql diff --git a/dbms/tests/queries/0_stateless/00018_distinct_in_subquery.reference b/tests/queries/0_stateless/00018_distinct_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00018_distinct_in_subquery.reference rename to tests/queries/0_stateless/00018_distinct_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00018_distinct_in_subquery.sql b/tests/queries/0_stateless/00018_distinct_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00018_distinct_in_subquery.sql rename to tests/queries/0_stateless/00018_distinct_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference b/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference rename to tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql b/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql rename to tests/queries/0_stateless/00019_shard_quantiles_totals_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00020_sorting_arrays.reference b/tests/queries/0_stateless/00020_sorting_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00020_sorting_arrays.reference rename to tests/queries/0_stateless/00020_sorting_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00020_sorting_arrays.sql b/tests/queries/0_stateless/00020_sorting_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00020_sorting_arrays.sql rename to tests/queries/0_stateless/00020_sorting_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00021_sorting_arrays.reference b/tests/queries/0_stateless/00021_sorting_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00021_sorting_arrays.reference rename to tests/queries/0_stateless/00021_sorting_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00021_sorting_arrays.sql b/tests/queries/0_stateless/00021_sorting_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00021_sorting_arrays.sql rename to tests/queries/0_stateless/00021_sorting_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference b/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.reference rename to tests/queries/0_stateless/00022_func_higher_order_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql b/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql rename to tests/queries/0_stateless/00022_func_higher_order_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference b/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.reference rename to tests/queries/0_stateless/00023_agg_select_agg_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql b/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00023_agg_select_agg_subquery.sql rename to tests/queries/0_stateless/00023_agg_select_agg_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference b/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference rename to tests/queries/0_stateless/00024_unused_array_join_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql b/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql rename to tests/queries/0_stateless/00024_unused_array_join_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference b/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference rename to tests/queries/0_stateless/00025_implicitly_used_subquery_column.reference diff --git a/dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql b/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql rename to tests/queries/0_stateless/00025_implicitly_used_subquery_column.sql diff --git a/dbms/tests/queries/0_stateless/00026_shard_something_distributed.reference b/tests/queries/0_stateless/00026_shard_something_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00026_shard_something_distributed.reference rename to tests/queries/0_stateless/00026_shard_something_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00026_shard_something_distributed.sql b/tests/queries/0_stateless/00026_shard_something_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00026_shard_something_distributed.sql rename to tests/queries/0_stateless/00026_shard_something_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00027_distinct_and_order_by.reference b/tests/queries/0_stateless/00027_distinct_and_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00027_distinct_and_order_by.reference rename to tests/queries/0_stateless/00027_distinct_and_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00027_distinct_and_order_by.sql b/tests/queries/0_stateless/00027_distinct_and_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00027_distinct_and_order_by.sql rename to tests/queries/0_stateless/00027_distinct_and_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00027_simple_argMinArray.reference b/tests/queries/0_stateless/00027_simple_argMinArray.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00027_simple_argMinArray.reference rename to tests/queries/0_stateless/00027_simple_argMinArray.reference diff --git a/dbms/tests/queries/0_stateless/00027_simple_argMinArray.sql b/tests/queries/0_stateless/00027_simple_argMinArray.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00027_simple_argMinArray.sql rename to tests/queries/0_stateless/00027_simple_argMinArray.sql diff --git a/dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference b/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference rename to tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql b/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql rename to tests/queries/0_stateless/00028_shard_big_agg_aj_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference rename to tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.reference diff --git a/dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh rename to tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh diff --git a/dbms/tests/queries/0_stateless/00030_alter_table.reference b/tests/queries/0_stateless/00030_alter_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00030_alter_table.reference rename to tests/queries/0_stateless/00030_alter_table.reference diff --git a/dbms/tests/queries/0_stateless/00030_alter_table.sql b/tests/queries/0_stateless/00030_alter_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00030_alter_table.sql rename to tests/queries/0_stateless/00030_alter_table.sql diff --git a/dbms/tests/queries/0_stateless/00031_parser_number.reference b/tests/queries/0_stateless/00031_parser_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00031_parser_number.reference rename to tests/queries/0_stateless/00031_parser_number.reference diff --git a/dbms/tests/queries/0_stateless/00031_parser_number.sql b/tests/queries/0_stateless/00031_parser_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00031_parser_number.sql rename to tests/queries/0_stateless/00031_parser_number.sql diff --git a/dbms/tests/queries/0_stateless/00032_fixed_string_to_string.reference b/tests/queries/0_stateless/00032_fixed_string_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00032_fixed_string_to_string.reference rename to tests/queries/0_stateless/00032_fixed_string_to_string.reference diff --git a/dbms/tests/queries/0_stateless/00032_fixed_string_to_string.sql b/tests/queries/0_stateless/00032_fixed_string_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00032_fixed_string_to_string.sql rename to tests/queries/0_stateless/00032_fixed_string_to_string.sql diff --git a/dbms/tests/queries/0_stateless/00033_fixed_string_to_string.reference b/tests/queries/0_stateless/00033_fixed_string_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00033_fixed_string_to_string.reference rename to tests/queries/0_stateless/00033_fixed_string_to_string.reference diff --git a/dbms/tests/queries/0_stateless/00033_fixed_string_to_string.sql b/tests/queries/0_stateless/00033_fixed_string_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00033_fixed_string_to_string.sql rename to tests/queries/0_stateless/00033_fixed_string_to_string.sql diff --git a/dbms/tests/queries/0_stateless/00034_fixed_string_to_number.reference b/tests/queries/0_stateless/00034_fixed_string_to_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00034_fixed_string_to_number.reference rename to tests/queries/0_stateless/00034_fixed_string_to_number.reference diff --git a/dbms/tests/queries/0_stateless/00034_fixed_string_to_number.sql b/tests/queries/0_stateless/00034_fixed_string_to_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00034_fixed_string_to_number.sql rename to tests/queries/0_stateless/00034_fixed_string_to_number.sql diff --git a/dbms/tests/queries/0_stateless/00035_function_array_return_type.reference b/tests/queries/0_stateless/00035_function_array_return_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00035_function_array_return_type.reference rename to tests/queries/0_stateless/00035_function_array_return_type.reference diff --git a/dbms/tests/queries/0_stateless/00035_function_array_return_type.sql b/tests/queries/0_stateless/00035_function_array_return_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00035_function_array_return_type.sql rename to tests/queries/0_stateless/00035_function_array_return_type.sql diff --git a/dbms/tests/queries/0_stateless/00036_array_element.reference b/tests/queries/0_stateless/00036_array_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00036_array_element.reference rename to tests/queries/0_stateless/00036_array_element.reference diff --git a/dbms/tests/queries/0_stateless/00036_array_element.sql b/tests/queries/0_stateless/00036_array_element.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00036_array_element.sql rename to tests/queries/0_stateless/00036_array_element.sql diff --git a/dbms/tests/queries/0_stateless/00037_totals_limit.reference b/tests/queries/0_stateless/00037_totals_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00037_totals_limit.reference rename to tests/queries/0_stateless/00037_totals_limit.reference diff --git a/dbms/tests/queries/0_stateless/00037_totals_limit.sql b/tests/queries/0_stateless/00037_totals_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00037_totals_limit.sql rename to tests/queries/0_stateless/00037_totals_limit.sql diff --git a/dbms/tests/queries/0_stateless/00038_totals_limit.reference b/tests/queries/0_stateless/00038_totals_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00038_totals_limit.reference rename to tests/queries/0_stateless/00038_totals_limit.reference diff --git a/dbms/tests/queries/0_stateless/00038_totals_limit.sql b/tests/queries/0_stateless/00038_totals_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00038_totals_limit.sql rename to tests/queries/0_stateless/00038_totals_limit.sql diff --git a/dbms/tests/queries/0_stateless/00039_inserts_through_http.reference b/tests/queries/0_stateless/00039_inserts_through_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00039_inserts_through_http.reference rename to tests/queries/0_stateless/00039_inserts_through_http.reference diff --git a/dbms/tests/queries/0_stateless/00039_inserts_through_http.sh b/tests/queries/0_stateless/00039_inserts_through_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00039_inserts_through_http.sh rename to tests/queries/0_stateless/00039_inserts_through_http.sh diff --git a/dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.reference b/tests/queries/0_stateless/00040_array_enumerate_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.reference rename to tests/queries/0_stateless/00040_array_enumerate_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.sql b/tests/queries/0_stateless/00040_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00040_array_enumerate_uniq.sql rename to tests/queries/0_stateless/00040_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00041_aggregation_remap.reference b/tests/queries/0_stateless/00041_aggregation_remap.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00041_aggregation_remap.reference rename to tests/queries/0_stateless/00041_aggregation_remap.reference diff --git a/dbms/tests/queries/0_stateless/00041_aggregation_remap.sql b/tests/queries/0_stateless/00041_aggregation_remap.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00041_aggregation_remap.sql rename to tests/queries/0_stateless/00041_aggregation_remap.sql diff --git a/dbms/tests/queries/0_stateless/00041_big_array_join.reference b/tests/queries/0_stateless/00041_big_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00041_big_array_join.reference rename to tests/queries/0_stateless/00041_big_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00041_big_array_join.sql b/tests/queries/0_stateless/00041_big_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00041_big_array_join.sql rename to tests/queries/0_stateless/00041_big_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00042_set.reference b/tests/queries/0_stateless/00042_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00042_set.reference rename to tests/queries/0_stateless/00042_set.reference diff --git a/dbms/tests/queries/0_stateless/00042_set.sql b/tests/queries/0_stateless/00042_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00042_set.sql rename to tests/queries/0_stateless/00042_set.sql diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference b/tests/queries/0_stateless/00043_summing_empty_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00043_summing_empty_part.reference rename to tests/queries/0_stateless/00043_summing_empty_part.reference diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.sql b/tests/queries/0_stateless/00043_summing_empty_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00043_summing_empty_part.sql rename to tests/queries/0_stateless/00043_summing_empty_part.sql diff --git a/dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.reference b/tests/queries/0_stateless/00044_sorting_by_string_descending.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.reference rename to tests/queries/0_stateless/00044_sorting_by_string_descending.reference diff --git a/dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.sql b/tests/queries/0_stateless/00044_sorting_by_string_descending.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00044_sorting_by_string_descending.sql rename to tests/queries/0_stateless/00044_sorting_by_string_descending.sql diff --git a/dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference b/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference rename to tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.reference diff --git a/dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql b/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql rename to tests/queries/0_stateless/00045_sorting_by_fixed_string_descending.sql diff --git a/dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.reference b/tests/queries/0_stateless/00046_stored_aggregates_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.reference rename to tests/queries/0_stateless/00046_stored_aggregates_simple.reference diff --git a/dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.sql b/tests/queries/0_stateless/00046_stored_aggregates_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00046_stored_aggregates_simple.sql rename to tests/queries/0_stateless/00046_stored_aggregates_simple.sql diff --git a/dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.reference b/tests/queries/0_stateless/00047_stored_aggregates_complex.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.reference rename to tests/queries/0_stateless/00047_stored_aggregates_complex.reference diff --git a/dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.sql b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00047_stored_aggregates_complex.sql rename to tests/queries/0_stateless/00047_stored_aggregates_complex.sql diff --git a/dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference b/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference rename to tests/queries/0_stateless/00048_a_stored_aggregates_merge.reference diff --git a/dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql b/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql rename to tests/queries/0_stateless/00048_a_stored_aggregates_merge.sql diff --git a/dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference b/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference rename to tests/queries/0_stateless/00048_b_stored_aggregates_merge.reference diff --git a/dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql b/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql rename to tests/queries/0_stateless/00048_b_stored_aggregates_merge.sql diff --git a/dbms/tests/queries/0_stateless/00049_any_left_join.reference b/tests/queries/0_stateless/00049_any_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00049_any_left_join.reference rename to tests/queries/0_stateless/00049_any_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00049_any_left_join.sql b/tests/queries/0_stateless/00049_any_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00049_any_left_join.sql rename to tests/queries/0_stateless/00049_any_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00050_any_left_join.reference b/tests/queries/0_stateless/00050_any_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00050_any_left_join.reference rename to tests/queries/0_stateless/00050_any_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00050_any_left_join.sql b/tests/queries/0_stateless/00050_any_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00050_any_left_join.sql rename to tests/queries/0_stateless/00050_any_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00051_any_inner_join.reference b/tests/queries/0_stateless/00051_any_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00051_any_inner_join.reference rename to tests/queries/0_stateless/00051_any_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00051_any_inner_join.sql b/tests/queries/0_stateless/00051_any_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00051_any_inner_join.sql rename to tests/queries/0_stateless/00051_any_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00052_all_left_join.reference b/tests/queries/0_stateless/00052_all_left_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00052_all_left_join.reference rename to tests/queries/0_stateless/00052_all_left_join.reference diff --git a/dbms/tests/queries/0_stateless/00052_all_left_join.sql b/tests/queries/0_stateless/00052_all_left_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00052_all_left_join.sql rename to tests/queries/0_stateless/00052_all_left_join.sql diff --git a/dbms/tests/queries/0_stateless/00053_all_inner_join.reference b/tests/queries/0_stateless/00053_all_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00053_all_inner_join.reference rename to tests/queries/0_stateless/00053_all_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00053_all_inner_join.sql b/tests/queries/0_stateless/00053_all_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00053_all_inner_join.sql rename to tests/queries/0_stateless/00053_all_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00054_join_string.reference b/tests/queries/0_stateless/00054_join_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00054_join_string.reference rename to tests/queries/0_stateless/00054_join_string.reference diff --git a/dbms/tests/queries/0_stateless/00054_join_string.sql b/tests/queries/0_stateless/00054_join_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00054_join_string.sql rename to tests/queries/0_stateless/00054_join_string.sql diff --git a/dbms/tests/queries/0_stateless/00055_join_two_numbers.reference b/tests/queries/0_stateless/00055_join_two_numbers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00055_join_two_numbers.reference rename to tests/queries/0_stateless/00055_join_two_numbers.reference diff --git a/dbms/tests/queries/0_stateless/00055_join_two_numbers.sql b/tests/queries/0_stateless/00055_join_two_numbers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00055_join_two_numbers.sql rename to tests/queries/0_stateless/00055_join_two_numbers.sql diff --git a/dbms/tests/queries/0_stateless/00056_join_number_string.reference b/tests/queries/0_stateless/00056_join_number_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00056_join_number_string.reference rename to tests/queries/0_stateless/00056_join_number_string.reference diff --git a/dbms/tests/queries/0_stateless/00056_join_number_string.sql b/tests/queries/0_stateless/00056_join_number_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00056_join_number_string.sql rename to tests/queries/0_stateless/00056_join_number_string.sql diff --git a/dbms/tests/queries/0_stateless/00057_join_aliases.reference b/tests/queries/0_stateless/00057_join_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00057_join_aliases.reference rename to tests/queries/0_stateless/00057_join_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00057_join_aliases.sql b/tests/queries/0_stateless/00057_join_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00057_join_aliases.sql rename to tests/queries/0_stateless/00057_join_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00059_shard_global_in.reference b/tests/queries/0_stateless/00059_shard_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00059_shard_global_in.reference rename to tests/queries/0_stateless/00059_shard_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00059_shard_global_in.sql b/tests/queries/0_stateless/00059_shard_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00059_shard_global_in.sql rename to tests/queries/0_stateless/00059_shard_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00060_date_lut.reference b/tests/queries/0_stateless/00060_date_lut.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00060_date_lut.reference rename to tests/queries/0_stateless/00060_date_lut.reference diff --git a/dbms/tests/queries/0_stateless/00060_date_lut.sql b/tests/queries/0_stateless/00060_date_lut.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00060_date_lut.sql rename to tests/queries/0_stateless/00060_date_lut.sql diff --git a/dbms/tests/queries/0_stateless/00061_merge_tree_alter.reference b/tests/queries/0_stateless/00061_merge_tree_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00061_merge_tree_alter.reference rename to tests/queries/0_stateless/00061_merge_tree_alter.reference diff --git a/dbms/tests/queries/0_stateless/00061_merge_tree_alter.sql b/tests/queries/0_stateless/00061_merge_tree_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00061_merge_tree_alter.sql rename to tests/queries/0_stateless/00061_merge_tree_alter.sql diff --git a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference rename to tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql rename to tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00063_check_query.reference b/tests/queries/0_stateless/00063_check_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00063_check_query.reference rename to tests/queries/0_stateless/00063_check_query.reference diff --git a/dbms/tests/queries/0_stateless/00063_check_query.sql b/tests/queries/0_stateless/00063_check_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00063_check_query.sql rename to tests/queries/0_stateless/00063_check_query.sql diff --git a/dbms/tests/queries/0_stateless/00064_negate_bug.reference b/tests/queries/0_stateless/00064_negate_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00064_negate_bug.reference rename to tests/queries/0_stateless/00064_negate_bug.reference diff --git a/dbms/tests/queries/0_stateless/00064_negate_bug.sql b/tests/queries/0_stateless/00064_negate_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00064_negate_bug.sql rename to tests/queries/0_stateless/00064_negate_bug.sql diff --git a/dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference b/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.reference rename to tests/queries/0_stateless/00065_shard_float_literals_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql b/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00065_shard_float_literals_formatting.sql rename to tests/queries/0_stateless/00065_shard_float_literals_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00066_group_by_in.reference b/tests/queries/0_stateless/00066_group_by_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00066_group_by_in.reference rename to tests/queries/0_stateless/00066_group_by_in.reference diff --git a/dbms/tests/queries/0_stateless/00066_group_by_in.sql b/tests/queries/0_stateless/00066_group_by_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00066_group_by_in.sql rename to tests/queries/0_stateless/00066_group_by_in.sql diff --git a/dbms/tests/queries/0_stateless/00067_replicate_segfault.reference b/tests/queries/0_stateless/00067_replicate_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00067_replicate_segfault.reference rename to tests/queries/0_stateless/00067_replicate_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00067_replicate_segfault.sql b/tests/queries/0_stateless/00067_replicate_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00067_replicate_segfault.sql rename to tests/queries/0_stateless/00067_replicate_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00068_empty_tiny_log.reference b/tests/queries/0_stateless/00068_empty_tiny_log.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00068_empty_tiny_log.reference rename to tests/queries/0_stateless/00068_empty_tiny_log.reference diff --git a/dbms/tests/queries/0_stateless/00068_empty_tiny_log.sql b/tests/queries/0_stateless/00068_empty_tiny_log.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00068_empty_tiny_log.sql rename to tests/queries/0_stateless/00068_empty_tiny_log.sql diff --git a/dbms/tests/queries/0_stateless/00069_date_arithmetic.reference b/tests/queries/0_stateless/00069_date_arithmetic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00069_date_arithmetic.reference rename to tests/queries/0_stateless/00069_date_arithmetic.reference diff --git a/dbms/tests/queries/0_stateless/00069_date_arithmetic.sql b/tests/queries/0_stateless/00069_date_arithmetic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00069_date_arithmetic.sql rename to tests/queries/0_stateless/00069_date_arithmetic.sql diff --git a/dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference b/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.reference rename to tests/queries/0_stateless/00070_insert_fewer_columns_http.reference diff --git a/dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh b/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh rename to tests/queries/0_stateless/00070_insert_fewer_columns_http.sh diff --git a/dbms/tests/queries/0_stateless/00071_insert_fewer_columns.reference b/tests/queries/0_stateless/00071_insert_fewer_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00071_insert_fewer_columns.reference rename to tests/queries/0_stateless/00071_insert_fewer_columns.reference diff --git a/dbms/tests/queries/0_stateless/00071_insert_fewer_columns.sql b/tests/queries/0_stateless/00071_insert_fewer_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00071_insert_fewer_columns.sql rename to tests/queries/0_stateless/00071_insert_fewer_columns.sql diff --git a/dbms/tests/queries/0_stateless/00072_in_types.reference b/tests/queries/0_stateless/00072_in_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00072_in_types.reference rename to tests/queries/0_stateless/00072_in_types.reference diff --git a/dbms/tests/queries/0_stateless/00072_in_types.sql b/tests/queries/0_stateless/00072_in_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00072_in_types.sql rename to tests/queries/0_stateless/00072_in_types.sql diff --git a/dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference b/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference rename to tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.reference diff --git a/dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql b/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql rename to tests/queries/0_stateless/00073_merge_sorting_empty_array_joined.sql diff --git a/dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference b/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference rename to tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.reference diff --git a/dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql b/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql rename to tests/queries/0_stateless/00075_shard_formatting_negate_of_negative_literal.sql diff --git a/dbms/tests/queries/0_stateless/00076_ip_coding_functions.reference b/tests/queries/0_stateless/00076_ip_coding_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00076_ip_coding_functions.reference rename to tests/queries/0_stateless/00076_ip_coding_functions.reference diff --git a/dbms/tests/queries/0_stateless/00076_ip_coding_functions.sql b/tests/queries/0_stateless/00076_ip_coding_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00076_ip_coding_functions.sql rename to tests/queries/0_stateless/00076_ip_coding_functions.sql diff --git a/dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference b/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference rename to tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql b/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql rename to tests/queries/0_stateless/00077_set_keys_fit_128_bits_many_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00078_string_concat.reference b/tests/queries/0_stateless/00078_string_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00078_string_concat.reference rename to tests/queries/0_stateless/00078_string_concat.reference diff --git a/dbms/tests/queries/0_stateless/00078_string_concat.sql b/tests/queries/0_stateless/00078_string_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00078_string_concat.sql rename to tests/queries/0_stateless/00078_string_concat.sql diff --git a/dbms/tests/queries/0_stateless/00079_defaulted_columns.reference b/tests/queries/0_stateless/00079_defaulted_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00079_defaulted_columns.reference rename to tests/queries/0_stateless/00079_defaulted_columns.reference diff --git a/dbms/tests/queries/0_stateless/00079_defaulted_columns.sql b/tests/queries/0_stateless/00079_defaulted_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00079_defaulted_columns.sql rename to tests/queries/0_stateless/00079_defaulted_columns.sql diff --git a/dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference b/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.reference rename to tests/queries/0_stateless/00080_show_tables_and_system_tables.reference diff --git a/dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql b/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00080_show_tables_and_system_tables.sql rename to tests/queries/0_stateless/00080_show_tables_and_system_tables.sql diff --git a/dbms/tests/queries/0_stateless/00081_int_div_or_zero.reference b/tests/queries/0_stateless/00081_int_div_or_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00081_int_div_or_zero.reference rename to tests/queries/0_stateless/00081_int_div_or_zero.reference diff --git a/dbms/tests/queries/0_stateless/00081_int_div_or_zero.sql b/tests/queries/0_stateless/00081_int_div_or_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00081_int_div_or_zero.sql rename to tests/queries/0_stateless/00081_int_div_or_zero.sql diff --git a/dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference b/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference rename to tests/queries/0_stateless/00082_append_trailing_char_if_absent.reference diff --git a/dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql b/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql rename to tests/queries/0_stateless/00082_append_trailing_char_if_absent.sql diff --git a/dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00084_summing_merge_tree.reference b/tests/queries/0_stateless/00084_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00084_summing_merge_tree.reference rename to tests/queries/0_stateless/00084_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00084_summing_merge_tree.sql b/tests/queries/0_stateless/00084_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00084_summing_merge_tree.sql rename to tests/queries/0_stateless/00084_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference b/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference rename to tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.reference diff --git a/dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql b/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql rename to tests/queries/0_stateless/00085_visible_width_of_tuple_of_dates.sql diff --git a/dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference rename to tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql rename to tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference b/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference rename to tests/queries/0_stateless/00087_distinct_of_empty_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql b/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql rename to tests/queries/0_stateless/00087_distinct_of_empty_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00087_math_functions.reference b/tests/queries/0_stateless/00087_math_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00087_math_functions.reference rename to tests/queries/0_stateless/00087_math_functions.reference diff --git a/dbms/tests/queries/0_stateless/00087_math_functions.sql b/tests/queries/0_stateless/00087_math_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00087_math_functions.sql rename to tests/queries/0_stateless/00087_math_functions.sql diff --git a/dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference b/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference rename to tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.reference diff --git a/dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql b/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql rename to tests/queries/0_stateless/00088_distinct_of_arrays_of_strings.sql diff --git a/dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference b/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference rename to tests/queries/0_stateless/00089_group_by_arrays_of_fixed.reference diff --git a/dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql b/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql rename to tests/queries/0_stateless/00089_group_by_arrays_of_fixed.sql diff --git a/dbms/tests/queries/0_stateless/00090_union_race_conditions_1.reference b/tests/queries/0_stateless/00090_union_race_conditions_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00090_union_race_conditions_1.reference rename to tests/queries/0_stateless/00090_union_race_conditions_1.reference diff --git a/dbms/tests/queries/0_stateless/00090_union_race_conditions_1.sh b/tests/queries/0_stateless/00090_union_race_conditions_1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00090_union_race_conditions_1.sh rename to tests/queries/0_stateless/00090_union_race_conditions_1.sh diff --git a/dbms/tests/queries/0_stateless/00091_union_race_conditions_2.reference b/tests/queries/0_stateless/00091_union_race_conditions_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00091_union_race_conditions_2.reference rename to tests/queries/0_stateless/00091_union_race_conditions_2.reference diff --git a/dbms/tests/queries/0_stateless/00091_union_race_conditions_2.sh b/tests/queries/0_stateless/00091_union_race_conditions_2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00091_union_race_conditions_2.sh rename to tests/queries/0_stateless/00091_union_race_conditions_2.sh diff --git a/dbms/tests/queries/0_stateless/00092_union_race_conditions_3.reference b/tests/queries/0_stateless/00092_union_race_conditions_3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00092_union_race_conditions_3.reference rename to tests/queries/0_stateless/00092_union_race_conditions_3.reference diff --git a/dbms/tests/queries/0_stateless/00092_union_race_conditions_3.sh b/tests/queries/0_stateless/00092_union_race_conditions_3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00092_union_race_conditions_3.sh rename to tests/queries/0_stateless/00092_union_race_conditions_3.sh diff --git a/dbms/tests/queries/0_stateless/00093_union_race_conditions_4.reference b/tests/queries/0_stateless/00093_union_race_conditions_4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00093_union_race_conditions_4.reference rename to tests/queries/0_stateless/00093_union_race_conditions_4.reference diff --git a/dbms/tests/queries/0_stateless/00093_union_race_conditions_4.sh b/tests/queries/0_stateless/00093_union_race_conditions_4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00093_union_race_conditions_4.sh rename to tests/queries/0_stateless/00093_union_race_conditions_4.sh diff --git a/dbms/tests/queries/0_stateless/00094_union_race_conditions_5.reference b/tests/queries/0_stateless/00094_union_race_conditions_5.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00094_union_race_conditions_5.reference rename to tests/queries/0_stateless/00094_union_race_conditions_5.reference diff --git a/dbms/tests/queries/0_stateless/00094_union_race_conditions_5.sh b/tests/queries/0_stateless/00094_union_race_conditions_5.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00094_union_race_conditions_5.sh rename to tests/queries/0_stateless/00094_union_race_conditions_5.sh diff --git a/dbms/tests/queries/0_stateless/00096_aggregation_min_if.reference b/tests/queries/0_stateless/00096_aggregation_min_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00096_aggregation_min_if.reference rename to tests/queries/0_stateless/00096_aggregation_min_if.reference diff --git a/dbms/tests/queries/0_stateless/00096_aggregation_min_if.sql b/tests/queries/0_stateless/00096_aggregation_min_if.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00096_aggregation_min_if.sql rename to tests/queries/0_stateless/00096_aggregation_min_if.sql diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.reference diff --git a/dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh rename to tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.reference b/tests/queries/0_stateless/00098_1_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_1_union_all.reference rename to tests/queries/0_stateless/00098_1_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.sql b/tests/queries/0_stateless/00098_1_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_1_union_all.sql rename to tests/queries/0_stateless/00098_1_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_2_union_all.reference b/tests/queries/0_stateless/00098_2_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_2_union_all.reference rename to tests/queries/0_stateless/00098_2_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_2_union_all.sql b/tests/queries/0_stateless/00098_2_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_2_union_all.sql rename to tests/queries/0_stateless/00098_2_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_3_union_all.reference b/tests/queries/0_stateless/00098_3_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_3_union_all.reference rename to tests/queries/0_stateless/00098_3_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_3_union_all.sql b/tests/queries/0_stateless/00098_3_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_3_union_all.sql rename to tests/queries/0_stateless/00098_3_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_4_union_all.reference b/tests/queries/0_stateless/00098_4_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_4_union_all.reference rename to tests/queries/0_stateless/00098_4_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_4_union_all.sql b/tests/queries/0_stateless/00098_4_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_4_union_all.sql rename to tests/queries/0_stateless/00098_4_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_5_union_all.reference b/tests/queries/0_stateless/00098_5_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_5_union_all.reference rename to tests/queries/0_stateless/00098_5_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_5_union_all.sql b/tests/queries/0_stateless/00098_5_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_5_union_all.sql rename to tests/queries/0_stateless/00098_5_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_6_union_all.reference b/tests/queries/0_stateless/00098_6_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_6_union_all.reference rename to tests/queries/0_stateless/00098_6_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_6_union_all.sql b/tests/queries/0_stateless/00098_6_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_6_union_all.sql rename to tests/queries/0_stateless/00098_6_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_7_union_all.reference b/tests/queries/0_stateless/00098_7_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_7_union_all.reference rename to tests/queries/0_stateless/00098_7_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_7_union_all.sql b/tests/queries/0_stateless/00098_7_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_7_union_all.sql rename to tests/queries/0_stateless/00098_7_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_8_union_all.reference b/tests/queries/0_stateless/00098_8_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_8_union_all.reference rename to tests/queries/0_stateless/00098_8_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_8_union_all.sql b/tests/queries/0_stateless/00098_8_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_8_union_all.sql rename to tests/queries/0_stateless/00098_8_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_9_union_all.reference b/tests/queries/0_stateless/00098_9_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_9_union_all.reference rename to tests/queries/0_stateless/00098_9_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_9_union_all.sql b/tests/queries/0_stateless/00098_9_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_9_union_all.sql rename to tests/queries/0_stateless/00098_9_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_a_union_all.reference b/tests/queries/0_stateless/00098_a_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_a_union_all.reference rename to tests/queries/0_stateless/00098_a_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_a_union_all.sql b/tests/queries/0_stateless/00098_a_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_a_union_all.sql rename to tests/queries/0_stateless/00098_a_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_b_union_all.reference b/tests/queries/0_stateless/00098_b_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_b_union_all.reference rename to tests/queries/0_stateless/00098_b_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_b_union_all.sql b/tests/queries/0_stateless/00098_b_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_b_union_all.sql rename to tests/queries/0_stateless/00098_b_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_c_union_all.reference b/tests/queries/0_stateless/00098_c_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_c_union_all.reference rename to tests/queries/0_stateless/00098_c_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_c_union_all.sql b/tests/queries/0_stateless/00098_c_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_c_union_all.sql rename to tests/queries/0_stateless/00098_c_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_d_union_all.reference b/tests/queries/0_stateless/00098_d_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_d_union_all.reference rename to tests/queries/0_stateless/00098_d_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_d_union_all.sql b/tests/queries/0_stateless/00098_d_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_d_union_all.sql rename to tests/queries/0_stateless/00098_d_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_e_union_all.reference b/tests/queries/0_stateless/00098_e_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_e_union_all.reference rename to tests/queries/0_stateless/00098_e_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_e_union_all.sql b/tests/queries/0_stateless/00098_e_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_e_union_all.sql rename to tests/queries/0_stateless/00098_e_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_f_union_all.reference b/tests/queries/0_stateless/00098_f_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_f_union_all.reference rename to tests/queries/0_stateless/00098_f_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_f_union_all.sql b/tests/queries/0_stateless/00098_f_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_f_union_all.sql rename to tests/queries/0_stateless/00098_f_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_g_union_all.reference b/tests/queries/0_stateless/00098_g_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_g_union_all.reference rename to tests/queries/0_stateless/00098_g_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_g_union_all.sql b/tests/queries/0_stateless/00098_g_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_g_union_all.sql rename to tests/queries/0_stateless/00098_g_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_h_union_all.reference b/tests/queries/0_stateless/00098_h_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_h_union_all.reference rename to tests/queries/0_stateless/00098_h_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_h_union_all.sql b/tests/queries/0_stateless/00098_h_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_h_union_all.sql rename to tests/queries/0_stateless/00098_h_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_j_union_all.reference b/tests/queries/0_stateless/00098_j_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_j_union_all.reference rename to tests/queries/0_stateless/00098_j_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_j_union_all.sql b/tests/queries/0_stateless/00098_j_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_j_union_all.sql rename to tests/queries/0_stateless/00098_j_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_k_union_all.reference b/tests/queries/0_stateless/00098_k_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_k_union_all.reference rename to tests/queries/0_stateless/00098_k_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_k_union_all.sql b/tests/queries/0_stateless/00098_k_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_k_union_all.sql rename to tests/queries/0_stateless/00098_k_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_l_union_all.reference b/tests/queries/0_stateless/00098_l_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_l_union_all.reference rename to tests/queries/0_stateless/00098_l_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_l_union_all.sql b/tests/queries/0_stateless/00098_l_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_l_union_all.sql rename to tests/queries/0_stateless/00098_l_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00098_shard_i_union_all.reference b/tests/queries/0_stateless/00098_shard_i_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00098_shard_i_union_all.reference rename to tests/queries/0_stateless/00098_shard_i_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00098_shard_i_union_all.sql b/tests/queries/0_stateless/00098_shard_i_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00098_shard_i_union_all.sql rename to tests/queries/0_stateless/00098_shard_i_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference b/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.reference rename to tests/queries/0_stateless/00099_join_many_blocks_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql b/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00099_join_many_blocks_segfault.sql rename to tests/queries/0_stateless/00099_join_many_blocks_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00100_subquery_table_identifier.reference b/tests/queries/0_stateless/00100_subquery_table_identifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00100_subquery_table_identifier.reference rename to tests/queries/0_stateless/00100_subquery_table_identifier.reference diff --git a/dbms/tests/queries/0_stateless/00100_subquery_table_identifier.sh b/tests/queries/0_stateless/00100_subquery_table_identifier.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00100_subquery_table_identifier.sh rename to tests/queries/0_stateless/00100_subquery_table_identifier.sh diff --git a/dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference b/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference rename to tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.reference diff --git a/dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql b/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql rename to tests/queries/0_stateless/00101_materialized_views_and_insert_without_explicit_database.sql diff --git a/dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.reference b/tests/queries/0_stateless/00102_insert_into_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.reference rename to tests/queries/0_stateless/00102_insert_into_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.sql b/tests/queries/0_stateless/00102_insert_into_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00102_insert_into_temporary_table.sql rename to tests/queries/0_stateless/00102_insert_into_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference b/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference rename to tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.reference diff --git a/dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql b/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql rename to tests/queries/0_stateless/00103_ipv4_num_to_string_class_c.sql diff --git a/dbms/tests/queries/0_stateless/00104_totals_having_mode.reference b/tests/queries/0_stateless/00104_totals_having_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00104_totals_having_mode.reference rename to tests/queries/0_stateless/00104_totals_having_mode.reference diff --git a/dbms/tests/queries/0_stateless/00104_totals_having_mode.sql b/tests/queries/0_stateless/00104_totals_having_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00104_totals_having_mode.sql rename to tests/queries/0_stateless/00104_totals_having_mode.sql diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.reference b/tests/queries/0_stateless/00105_shard_collations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00105_shard_collations.reference rename to tests/queries/0_stateless/00105_shard_collations.reference diff --git a/dbms/tests/queries/0_stateless/00105_shard_collations.sql b/tests/queries/0_stateless/00105_shard_collations.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00105_shard_collations.sql rename to tests/queries/0_stateless/00105_shard_collations.sql diff --git a/dbms/tests/queries/0_stateless/00106_totals_after_having.reference b/tests/queries/0_stateless/00106_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00106_totals_after_having.reference rename to tests/queries/0_stateless/00106_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00106_totals_after_having.sql b/tests/queries/0_stateless/00106_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00106_totals_after_having.sql rename to tests/queries/0_stateless/00106_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00107_totals_after_having.reference b/tests/queries/0_stateless/00107_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00107_totals_after_having.reference rename to tests/queries/0_stateless/00107_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00107_totals_after_having.sql b/tests/queries/0_stateless/00107_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00107_totals_after_having.sql rename to tests/queries/0_stateless/00107_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00108_shard_totals_after_having.reference b/tests/queries/0_stateless/00108_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00108_shard_totals_after_having.reference rename to tests/queries/0_stateless/00108_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00108_shard_totals_after_having.sql b/tests/queries/0_stateless/00108_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00108_shard_totals_after_having.sql rename to tests/queries/0_stateless/00108_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.reference b/tests/queries/0_stateless/00109_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00109_shard_totals_after_having.reference rename to tests/queries/0_stateless/00109_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql b/tests/queries/0_stateless/00109_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00109_shard_totals_after_having.sql rename to tests/queries/0_stateless/00109_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00110_external_sort.reference b/tests/queries/0_stateless/00110_external_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00110_external_sort.reference rename to tests/queries/0_stateless/00110_external_sort.reference diff --git a/dbms/tests/queries/0_stateless/00110_external_sort.sql b/tests/queries/0_stateless/00110_external_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00110_external_sort.sql rename to tests/queries/0_stateless/00110_external_sort.sql diff --git a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference rename to tests/queries/0_stateless/00111_shard_external_sort_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql rename to tests/queries/0_stateless/00111_shard_external_sort_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00112_shard_totals_after_having.reference b/tests/queries/0_stateless/00112_shard_totals_after_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00112_shard_totals_after_having.reference rename to tests/queries/0_stateless/00112_shard_totals_after_having.reference diff --git a/dbms/tests/queries/0_stateless/00112_shard_totals_after_having.sql b/tests/queries/0_stateless/00112_shard_totals_after_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00112_shard_totals_after_having.sql rename to tests/queries/0_stateless/00112_shard_totals_after_having.sql diff --git a/dbms/tests/queries/0_stateless/00113_shard_group_array.reference b/tests/queries/0_stateless/00113_shard_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00113_shard_group_array.reference rename to tests/queries/0_stateless/00113_shard_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00113_shard_group_array.sql b/tests/queries/0_stateless/00113_shard_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00113_shard_group_array.sql rename to tests/queries/0_stateless/00113_shard_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00114_float_type_result_of_division.reference b/tests/queries/0_stateless/00114_float_type_result_of_division.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00114_float_type_result_of_division.reference rename to tests/queries/0_stateless/00114_float_type_result_of_division.reference diff --git a/dbms/tests/queries/0_stateless/00114_float_type_result_of_division.sql b/tests/queries/0_stateless/00114_float_type_result_of_division.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00114_float_type_result_of_division.sql rename to tests/queries/0_stateless/00114_float_type_result_of_division.sql diff --git a/dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference b/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.reference rename to tests/queries/0_stateless/00115_shard_in_incomplete_result.reference diff --git a/dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh rename to tests/queries/0_stateless/00115_shard_in_incomplete_result.sh diff --git a/dbms/tests/queries/0_stateless/00116_storage_set.reference b/tests/queries/0_stateless/00116_storage_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00116_storage_set.reference rename to tests/queries/0_stateless/00116_storage_set.reference diff --git a/dbms/tests/queries/0_stateless/00116_storage_set.sql b/tests/queries/0_stateless/00116_storage_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00116_storage_set.sql rename to tests/queries/0_stateless/00116_storage_set.sql diff --git a/dbms/tests/queries/0_stateless/00117_parsing_arrays.reference b/tests/queries/0_stateless/00117_parsing_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00117_parsing_arrays.reference rename to tests/queries/0_stateless/00117_parsing_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00117_parsing_arrays.sql b/tests/queries/0_stateless/00117_parsing_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00117_parsing_arrays.sql rename to tests/queries/0_stateless/00117_parsing_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00118_storage_join.reference b/tests/queries/0_stateless/00118_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00118_storage_join.reference rename to tests/queries/0_stateless/00118_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00118_storage_join.sql b/tests/queries/0_stateless/00118_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00118_storage_join.sql rename to tests/queries/0_stateless/00118_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00119_storage_join.reference b/tests/queries/0_stateless/00119_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00119_storage_join.reference rename to tests/queries/0_stateless/00119_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00119_storage_join.sql b/tests/queries/0_stateless/00119_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00119_storage_join.sql rename to tests/queries/0_stateless/00119_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00120_join_and_group_by.reference b/tests/queries/0_stateless/00120_join_and_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00120_join_and_group_by.reference rename to tests/queries/0_stateless/00120_join_and_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00120_join_and_group_by.sql b/tests/queries/0_stateless/00120_join_and_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00120_join_and_group_by.sql rename to tests/queries/0_stateless/00120_join_and_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.reference b/tests/queries/0_stateless/00121_drop_column_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.reference rename to tests/queries/0_stateless/00121_drop_column_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.sql b/tests/queries/0_stateless/00121_drop_column_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00121_drop_column_zookeeper.sql rename to tests/queries/0_stateless/00121_drop_column_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference b/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference rename to tests/queries/0_stateless/00122_join_with_subquery_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql b/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql rename to tests/queries/0_stateless/00122_join_with_subquery_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference b/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference rename to tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.reference diff --git a/dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql b/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql rename to tests/queries/0_stateless/00123_shard_unmerged_result_when_max_distributed_connections_is_one.sql diff --git a/dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference b/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference rename to tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.reference diff --git a/dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql b/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql rename to tests/queries/0_stateless/00124_shard_distributed_with_many_replicas.sql diff --git a/dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference b/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference rename to tests/queries/0_stateless/00125_array_element_of_array_of_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql b/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql rename to tests/queries/0_stateless/00125_array_element_of_array_of_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00126_buffer.reference b/tests/queries/0_stateless/00126_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00126_buffer.reference rename to tests/queries/0_stateless/00126_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00126_buffer.sql b/tests/queries/0_stateless/00126_buffer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00126_buffer.sql rename to tests/queries/0_stateless/00126_buffer.sql diff --git a/dbms/tests/queries/0_stateless/00127_group_by_concat.reference b/tests/queries/0_stateless/00127_group_by_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00127_group_by_concat.reference rename to tests/queries/0_stateless/00127_group_by_concat.reference diff --git a/dbms/tests/queries/0_stateless/00127_group_by_concat.sql b/tests/queries/0_stateless/00127_group_by_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00127_group_by_concat.sql rename to tests/queries/0_stateless/00127_group_by_concat.sql diff --git a/dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference b/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference rename to tests/queries/0_stateless/00128_group_by_number_and_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql b/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql rename to tests/queries/0_stateless/00128_group_by_number_and_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.reference b/tests/queries/0_stateless/00129_quantile_timing_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.reference rename to tests/queries/0_stateless/00129_quantile_timing_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.sql b/tests/queries/0_stateless/00129_quantile_timing_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00129_quantile_timing_weighted.sql rename to tests/queries/0_stateless/00129_quantile_timing_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00131_set_hashed.reference b/tests/queries/0_stateless/00131_set_hashed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00131_set_hashed.reference rename to tests/queries/0_stateless/00131_set_hashed.reference diff --git a/dbms/tests/queries/0_stateless/00131_set_hashed.sql b/tests/queries/0_stateless/00131_set_hashed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00131_set_hashed.sql rename to tests/queries/0_stateless/00131_set_hashed.sql diff --git a/dbms/tests/queries/0_stateless/00132_sets.reference b/tests/queries/0_stateless/00132_sets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00132_sets.reference rename to tests/queries/0_stateless/00132_sets.reference diff --git a/dbms/tests/queries/0_stateless/00132_sets.sql b/tests/queries/0_stateless/00132_sets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00132_sets.sql rename to tests/queries/0_stateless/00132_sets.sql diff --git a/dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference rename to tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.reference diff --git a/dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh rename to tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh diff --git a/dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference b/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference rename to tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.reference diff --git a/dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql b/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql rename to tests/queries/0_stateless/00134_aggregation_by_fixed_string_of_size_1_2_4_8.sql diff --git a/dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference b/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference rename to tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql b/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql rename to tests/queries/0_stateless/00135_duplicate_group_by_keys_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference b/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.reference rename to tests/queries/0_stateless/00136_duplicate_order_by_elems.reference diff --git a/dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql b/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00136_duplicate_order_by_elems.sql rename to tests/queries/0_stateless/00136_duplicate_order_by_elems.sql diff --git a/dbms/tests/queries/0_stateless/00137_in_constants.reference b/tests/queries/0_stateless/00137_in_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00137_in_constants.reference rename to tests/queries/0_stateless/00137_in_constants.reference diff --git a/dbms/tests/queries/0_stateless/00137_in_constants.sql b/tests/queries/0_stateless/00137_in_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00137_in_constants.sql rename to tests/queries/0_stateless/00137_in_constants.sql diff --git a/dbms/tests/queries/0_stateless/00138_table_aliases.reference b/tests/queries/0_stateless/00138_table_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00138_table_aliases.reference rename to tests/queries/0_stateless/00138_table_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00138_table_aliases.sql b/tests/queries/0_stateless/00138_table_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00138_table_aliases.sql rename to tests/queries/0_stateless/00138_table_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference b/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql b/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00140_parse_unix_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00140_prewhere_column_order.reference b/tests/queries/0_stateless/00140_prewhere_column_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00140_prewhere_column_order.reference rename to tests/queries/0_stateless/00140_prewhere_column_order.reference diff --git a/dbms/tests/queries/0_stateless/00140_prewhere_column_order.sql b/tests/queries/0_stateless/00140_prewhere_column_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00140_prewhere_column_order.sql rename to tests/queries/0_stateless/00140_prewhere_column_order.sql diff --git a/dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference b/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00141_parse_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql b/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00141_parse_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference b/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference rename to tests/queries/0_stateless/00142_parse_timestamp_as_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql b/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql rename to tests/queries/0_stateless/00142_parse_timestamp_as_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00143_number_classification_functions.reference b/tests/queries/0_stateless/00143_number_classification_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00143_number_classification_functions.reference rename to tests/queries/0_stateless/00143_number_classification_functions.reference diff --git a/dbms/tests/queries/0_stateless/00143_number_classification_functions.sql b/tests/queries/0_stateless/00143_number_classification_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00143_number_classification_functions.sql rename to tests/queries/0_stateless/00143_number_classification_functions.sql diff --git a/dbms/tests/queries/0_stateless/00144_empty_regexp.reference b/tests/queries/0_stateless/00144_empty_regexp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00144_empty_regexp.reference rename to tests/queries/0_stateless/00144_empty_regexp.reference diff --git a/dbms/tests/queries/0_stateless/00144_empty_regexp.sql b/tests/queries/0_stateless/00144_empty_regexp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00144_empty_regexp.sql rename to tests/queries/0_stateless/00144_empty_regexp.sql diff --git a/dbms/tests/queries/0_stateless/00145_empty_likes.reference b/tests/queries/0_stateless/00145_empty_likes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00145_empty_likes.reference rename to tests/queries/0_stateless/00145_empty_likes.reference diff --git a/dbms/tests/queries/0_stateless/00145_empty_likes.sql b/tests/queries/0_stateless/00145_empty_likes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00145_empty_likes.sql rename to tests/queries/0_stateless/00145_empty_likes.sql diff --git a/dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference b/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference rename to tests/queries/0_stateless/00146_summing_merge_tree_nested_map.reference diff --git a/dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql b/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql rename to tests/queries/0_stateless/00146_summing_merge_tree_nested_map.sql diff --git a/dbms/tests/queries/0_stateless/00147_alter_nested_default.reference b/tests/queries/0_stateless/00147_alter_nested_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00147_alter_nested_default.reference rename to tests/queries/0_stateless/00147_alter_nested_default.reference diff --git a/dbms/tests/queries/0_stateless/00147_alter_nested_default.sql b/tests/queries/0_stateless/00147_alter_nested_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00147_alter_nested_default.sql rename to tests/queries/0_stateless/00147_alter_nested_default.sql diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference b/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference rename to tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql b/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql rename to tests/queries/0_stateless/00148_summing_merge_tree_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference b/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference rename to tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.reference diff --git a/dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql b/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql rename to tests/queries/0_stateless/00148_summing_merge_tree_nested_map_multiple_values.sql diff --git a/dbms/tests/queries/0_stateless/00149_function_url_hash.reference b/tests/queries/0_stateless/00149_function_url_hash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00149_function_url_hash.reference rename to tests/queries/0_stateless/00149_function_url_hash.reference diff --git a/dbms/tests/queries/0_stateless/00149_function_url_hash.sql b/tests/queries/0_stateless/00149_function_url_hash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00149_function_url_hash.sql rename to tests/queries/0_stateless/00149_function_url_hash.sql diff --git a/dbms/tests/queries/0_stateless/00150_with_totals_and_join.reference b/tests/queries/0_stateless/00150_with_totals_and_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00150_with_totals_and_join.reference rename to tests/queries/0_stateless/00150_with_totals_and_join.reference diff --git a/dbms/tests/queries/0_stateless/00150_with_totals_and_join.sql b/tests/queries/0_stateless/00150_with_totals_and_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00150_with_totals_and_join.sql rename to tests/queries/0_stateless/00150_with_totals_and_join.sql diff --git a/dbms/tests/queries/0_stateless/00151_tuple_with_array.reference b/tests/queries/0_stateless/00151_tuple_with_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00151_tuple_with_array.reference rename to tests/queries/0_stateless/00151_tuple_with_array.reference diff --git a/dbms/tests/queries/0_stateless/00151_tuple_with_array.sql b/tests/queries/0_stateless/00151_tuple_with_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00151_tuple_with_array.sql rename to tests/queries/0_stateless/00151_tuple_with_array.sql diff --git a/dbms/tests/queries/0_stateless/00152_totals_in_subquery.reference b/tests/queries/0_stateless/00152_totals_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00152_totals_in_subquery.reference rename to tests/queries/0_stateless/00152_totals_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00152_totals_in_subquery.sql b/tests/queries/0_stateless/00152_totals_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00152_totals_in_subquery.sql rename to tests/queries/0_stateless/00152_totals_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00153_transform.reference b/tests/queries/0_stateless/00153_transform.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00153_transform.reference rename to tests/queries/0_stateless/00153_transform.reference diff --git a/dbms/tests/queries/0_stateless/00153_transform.sql b/tests/queries/0_stateless/00153_transform.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00153_transform.sql rename to tests/queries/0_stateless/00153_transform.sql diff --git a/dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference b/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference rename to tests/queries/0_stateless/00154_shard_distributed_with_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql b/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql rename to tests/queries/0_stateless/00154_shard_distributed_with_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00155_long_merges.reference b/tests/queries/0_stateless/00155_long_merges.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00155_long_merges.reference rename to tests/queries/0_stateless/00155_long_merges.reference diff --git a/dbms/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00155_long_merges.sh rename to tests/queries/0_stateless/00155_long_merges.sh diff --git a/dbms/tests/queries/0_stateless/00156_array_map_to_constant.reference b/tests/queries/0_stateless/00156_array_map_to_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00156_array_map_to_constant.reference rename to tests/queries/0_stateless/00156_array_map_to_constant.reference diff --git a/dbms/tests/queries/0_stateless/00156_array_map_to_constant.sql b/tests/queries/0_stateless/00156_array_map_to_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00156_array_map_to_constant.sql rename to tests/queries/0_stateless/00156_array_map_to_constant.sql diff --git a/dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference b/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference rename to tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.reference diff --git a/dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql b/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql rename to tests/queries/0_stateless/00157_aliases_and_lambda_formal_parameters.sql diff --git a/dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference b/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference rename to tests/queries/0_stateless/00158_buffer_and_nonexistent_table.reference diff --git a/dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql b/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql rename to tests/queries/0_stateless/00158_buffer_and_nonexistent_table.sql diff --git a/dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference b/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.reference rename to tests/queries/0_stateless/00159_whitespace_in_columns_list.reference diff --git a/dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql b/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00159_whitespace_in_columns_list.sql rename to tests/queries/0_stateless/00159_whitespace_in_columns_list.sql diff --git a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.reference b/tests/queries/0_stateless/00160_merge_and_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.reference rename to tests/queries/0_stateless/00160_merge_and_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql b/tests/queries/0_stateless/00160_merge_and_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00160_merge_and_index_in_in.sql rename to tests/queries/0_stateless/00160_merge_and_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00161_rounding_functions.reference b/tests/queries/0_stateless/00161_rounding_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00161_rounding_functions.reference rename to tests/queries/0_stateless/00161_rounding_functions.reference diff --git a/dbms/tests/queries/0_stateless/00161_rounding_functions.sql b/tests/queries/0_stateless/00161_rounding_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00161_rounding_functions.sql rename to tests/queries/0_stateless/00161_rounding_functions.sql diff --git a/dbms/tests/queries/0_stateless/00162_shard_global_join.reference b/tests/queries/0_stateless/00162_shard_global_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00162_shard_global_join.reference rename to tests/queries/0_stateless/00162_shard_global_join.reference diff --git a/dbms/tests/queries/0_stateless/00162_shard_global_join.sql b/tests/queries/0_stateless/00162_shard_global_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00162_shard_global_join.sql rename to tests/queries/0_stateless/00162_shard_global_join.sql diff --git a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference b/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.reference rename to tests/queries/0_stateless/00163_shard_join_with_empty_table.reference diff --git a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql b/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql rename to tests/queries/0_stateless/00163_shard_join_with_empty_table.sql diff --git a/dbms/tests/queries/0_stateless/00164_not_chain.reference b/tests/queries/0_stateless/00164_not_chain.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00164_not_chain.reference rename to tests/queries/0_stateless/00164_not_chain.reference diff --git a/dbms/tests/queries/0_stateless/00164_not_chain.sql b/tests/queries/0_stateless/00164_not_chain.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00164_not_chain.sql rename to tests/queries/0_stateless/00164_not_chain.sql diff --git a/dbms/tests/queries/0_stateless/00165_transform_non_const_default.reference b/tests/queries/0_stateless/00165_transform_non_const_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00165_transform_non_const_default.reference rename to tests/queries/0_stateless/00165_transform_non_const_default.reference diff --git a/dbms/tests/queries/0_stateless/00165_transform_non_const_default.sql b/tests/queries/0_stateless/00165_transform_non_const_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00165_transform_non_const_default.sql rename to tests/queries/0_stateless/00165_transform_non_const_default.sql diff --git a/dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference b/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.reference rename to tests/queries/0_stateless/00166_functions_of_aggregation_states.reference diff --git a/dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql b/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql rename to tests/queries/0_stateless/00166_functions_of_aggregation_states.sql diff --git a/dbms/tests/queries/0_stateless/00167_settings_inside_query.reference b/tests/queries/0_stateless/00167_settings_inside_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00167_settings_inside_query.reference rename to tests/queries/0_stateless/00167_settings_inside_query.reference diff --git a/dbms/tests/queries/0_stateless/00167_settings_inside_query.sql b/tests/queries/0_stateless/00167_settings_inside_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00167_settings_inside_query.sql rename to tests/queries/0_stateless/00167_settings_inside_query.sql diff --git a/dbms/tests/queries/0_stateless/00168_buffer_defaults.reference b/tests/queries/0_stateless/00168_buffer_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00168_buffer_defaults.reference rename to tests/queries/0_stateless/00168_buffer_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00168_buffer_defaults.sql b/tests/queries/0_stateless/00168_buffer_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00168_buffer_defaults.sql rename to tests/queries/0_stateless/00168_buffer_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00169_join_constant_keys.reference b/tests/queries/0_stateless/00169_join_constant_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00169_join_constant_keys.reference rename to tests/queries/0_stateless/00169_join_constant_keys.reference diff --git a/dbms/tests/queries/0_stateless/00169_join_constant_keys.sql b/tests/queries/0_stateless/00169_join_constant_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00169_join_constant_keys.sql rename to tests/queries/0_stateless/00169_join_constant_keys.sql diff --git a/dbms/tests/queries/0_stateless/00170_lower_upper_utf8.reference b/tests/queries/0_stateless/00170_lower_upper_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00170_lower_upper_utf8.reference rename to tests/queries/0_stateless/00170_lower_upper_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00170_lower_upper_utf8.sql b/tests/queries/0_stateless/00170_lower_upper_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00170_lower_upper_utf8.sql rename to tests/queries/0_stateless/00170_lower_upper_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference b/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference rename to tests/queries/0_stateless/00171_shard_array_of_tuple_remote.reference diff --git a/dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql b/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql rename to tests/queries/0_stateless/00171_shard_array_of_tuple_remote.sql diff --git a/dbms/tests/queries/0_stateless/00172_constexprs_in_set.reference b/tests/queries/0_stateless/00172_constexprs_in_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00172_constexprs_in_set.reference rename to tests/queries/0_stateless/00172_constexprs_in_set.reference diff --git a/dbms/tests/queries/0_stateless/00172_constexprs_in_set.sql b/tests/queries/0_stateless/00172_constexprs_in_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00172_constexprs_in_set.sql rename to tests/queries/0_stateless/00172_constexprs_in_set.sql diff --git a/dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference b/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference rename to tests/queries/0_stateless/00173_compare_date_time_with_constant_string.reference diff --git a/dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql b/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql rename to tests/queries/0_stateless/00173_compare_date_time_with_constant_string.sql diff --git a/dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference b/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference rename to tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql b/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql rename to tests/queries/0_stateless/00174_compare_date_time_with_constant_string_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00175_if_num_arrays.reference b/tests/queries/0_stateless/00175_if_num_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00175_if_num_arrays.reference rename to tests/queries/0_stateless/00175_if_num_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00175_if_num_arrays.sql b/tests/queries/0_stateless/00175_if_num_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00175_if_num_arrays.sql rename to tests/queries/0_stateless/00175_if_num_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00176_if_string_arrays.reference b/tests/queries/0_stateless/00176_if_string_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00176_if_string_arrays.reference rename to tests/queries/0_stateless/00176_if_string_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00176_if_string_arrays.sql b/tests/queries/0_stateless/00176_if_string_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00176_if_string_arrays.sql rename to tests/queries/0_stateless/00176_if_string_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.reference b/tests/queries/0_stateless/00177_inserts_through_http_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.reference rename to tests/queries/0_stateless/00177_inserts_through_http_parts.reference diff --git a/dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.sh b/tests/queries/0_stateless/00177_inserts_through_http_parts.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00177_inserts_through_http_parts.sh rename to tests/queries/0_stateless/00177_inserts_through_http_parts.sh diff --git a/dbms/tests/queries/0_stateless/00178_function_replicate.reference b/tests/queries/0_stateless/00178_function_replicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00178_function_replicate.reference rename to tests/queries/0_stateless/00178_function_replicate.reference diff --git a/dbms/tests/queries/0_stateless/00178_function_replicate.sql b/tests/queries/0_stateless/00178_function_replicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00178_function_replicate.sql rename to tests/queries/0_stateless/00178_function_replicate.sql diff --git a/dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference b/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference rename to tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.reference diff --git a/dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql b/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql rename to tests/queries/0_stateless/00179_lambdas_with_common_expressions_and_filter.sql diff --git a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.reference b/tests/queries/0_stateless/00180_attach_materialized_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00180_attach_materialized_view.reference rename to tests/queries/0_stateless/00180_attach_materialized_view.reference diff --git a/dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql b/tests/queries/0_stateless/00180_attach_materialized_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00180_attach_materialized_view.sql rename to tests/queries/0_stateless/00180_attach_materialized_view.sql diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference b/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.reference rename to tests/queries/0_stateless/00181_aggregate_functions_statistics.reference diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql b/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql rename to tests/queries/0_stateless/00181_aggregate_functions_statistics.sql diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference b/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference rename to tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.reference diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql b/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql rename to tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql diff --git a/dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference b/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference rename to tests/queries/0_stateless/00182_functions_higher_order_and_consts.reference diff --git a/dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql b/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql rename to tests/queries/0_stateless/00182_functions_higher_order_and_consts.sql diff --git a/dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.reference b/tests/queries/0_stateless/00183_skip_unavailable_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.reference rename to tests/queries/0_stateless/00183_skip_unavailable_shards.reference diff --git a/dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.sql b/tests/queries/0_stateless/00183_skip_unavailable_shards.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00183_skip_unavailable_shards.sql rename to tests/queries/0_stateless/00183_skip_unavailable_shards.sql diff --git a/dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference rename to tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference diff --git a/dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql rename to tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql diff --git a/dbms/tests/queries/0_stateless/00185_array_literals.reference b/tests/queries/0_stateless/00185_array_literals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00185_array_literals.reference rename to tests/queries/0_stateless/00185_array_literals.reference diff --git a/dbms/tests/queries/0_stateless/00185_array_literals.sql b/tests/queries/0_stateless/00185_array_literals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00185_array_literals.sql rename to tests/queries/0_stateless/00185_array_literals.sql diff --git a/dbms/tests/queries/0_stateless/00186_very_long_arrays.reference b/tests/queries/0_stateless/00186_very_long_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00186_very_long_arrays.reference rename to tests/queries/0_stateless/00186_very_long_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00186_very_long_arrays.sh b/tests/queries/0_stateless/00186_very_long_arrays.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00186_very_long_arrays.sh rename to tests/queries/0_stateless/00186_very_long_arrays.sh diff --git a/dbms/tests/queries/0_stateless/00187_like_regexp_prefix.reference b/tests/queries/0_stateless/00187_like_regexp_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00187_like_regexp_prefix.reference rename to tests/queries/0_stateless/00187_like_regexp_prefix.reference diff --git a/dbms/tests/queries/0_stateless/00187_like_regexp_prefix.sql b/tests/queries/0_stateless/00187_like_regexp_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00187_like_regexp_prefix.sql rename to tests/queries/0_stateless/00187_like_regexp_prefix.sql diff --git a/dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference b/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference rename to tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql b/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql rename to tests/queries/0_stateless/00188_constants_as_arguments_of_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00189_time_zones.reference b/tests/queries/0_stateless/00189_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00189_time_zones.reference rename to tests/queries/0_stateless/00189_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00189_time_zones.sql b/tests/queries/0_stateless/00189_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00189_time_zones.sql rename to tests/queries/0_stateless/00189_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference b/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference rename to tests/queries/0_stateless/00190_non_constant_array_of_constant_data.reference diff --git a/dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql b/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql rename to tests/queries/0_stateless/00190_non_constant_array_of_constant_data.sql diff --git a/dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference b/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference rename to tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.reference diff --git a/dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql b/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql rename to tests/queries/0_stateless/00191_aggregating_merge_tree_and_final.sql diff --git a/dbms/tests/queries/0_stateless/00192_least_greatest.reference b/tests/queries/0_stateless/00192_least_greatest.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00192_least_greatest.reference rename to tests/queries/0_stateless/00192_least_greatest.reference diff --git a/dbms/tests/queries/0_stateless/00192_least_greatest.sql b/tests/queries/0_stateless/00192_least_greatest.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00192_least_greatest.sql rename to tests/queries/0_stateless/00192_least_greatest.sql diff --git a/dbms/tests/queries/0_stateless/00193_parallel_replicas.reference b/tests/queries/0_stateless/00193_parallel_replicas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00193_parallel_replicas.reference rename to tests/queries/0_stateless/00193_parallel_replicas.reference diff --git a/dbms/tests/queries/0_stateless/00193_parallel_replicas.sql b/tests/queries/0_stateless/00193_parallel_replicas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00193_parallel_replicas.sql rename to tests/queries/0_stateless/00193_parallel_replicas.sql diff --git a/dbms/tests/queries/0_stateless/00194_identity.reference b/tests/queries/0_stateless/00194_identity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00194_identity.reference rename to tests/queries/0_stateless/00194_identity.reference diff --git a/dbms/tests/queries/0_stateless/00194_identity.sql b/tests/queries/0_stateless/00194_identity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00194_identity.sql rename to tests/queries/0_stateless/00194_identity.sql diff --git a/dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference b/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference rename to tests/queries/0_stateless/00195_shard_union_all_and_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql b/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql rename to tests/queries/0_stateless/00195_shard_union_all_and_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00196_float32_formatting.reference b/tests/queries/0_stateless/00196_float32_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00196_float32_formatting.reference rename to tests/queries/0_stateless/00196_float32_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00196_float32_formatting.sql b/tests/queries/0_stateless/00196_float32_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00196_float32_formatting.sql rename to tests/queries/0_stateless/00196_float32_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00197_if_fixed_string.reference b/tests/queries/0_stateless/00197_if_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00197_if_fixed_string.reference rename to tests/queries/0_stateless/00197_if_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00197_if_fixed_string.sql b/tests/queries/0_stateless/00197_if_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00197_if_fixed_string.sql rename to tests/queries/0_stateless/00197_if_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.reference b/tests/queries/0_stateless/00198_group_by_empty_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.reference rename to tests/queries/0_stateless/00198_group_by_empty_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.sql b/tests/queries/0_stateless/00198_group_by_empty_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00198_group_by_empty_arrays.sql rename to tests/queries/0_stateless/00198_group_by_empty_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.reference b/tests/queries/0_stateless/00199_ternary_operator_type_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.reference rename to tests/queries/0_stateless/00199_ternary_operator_type_check.reference diff --git a/dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.sql b/tests/queries/0_stateless/00199_ternary_operator_type_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00199_ternary_operator_type_check.sql rename to tests/queries/0_stateless/00199_ternary_operator_type_check.sql diff --git a/dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference b/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference rename to tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql b/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql rename to tests/queries/0_stateless/00200_shard_distinct_order_by_limit_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00201_array_uniq.reference b/tests/queries/0_stateless/00201_array_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00201_array_uniq.reference rename to tests/queries/0_stateless/00201_array_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00201_array_uniq.sql b/tests/queries/0_stateless/00201_array_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00201_array_uniq.sql rename to tests/queries/0_stateless/00201_array_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00202_cross_join.reference b/tests/queries/0_stateless/00202_cross_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00202_cross_join.reference rename to tests/queries/0_stateless/00202_cross_join.reference diff --git a/dbms/tests/queries/0_stateless/00202_cross_join.sql b/tests/queries/0_stateless/00202_cross_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00202_cross_join.sql rename to tests/queries/0_stateless/00202_cross_join.sql diff --git a/dbms/tests/queries/0_stateless/00203_full_join.reference b/tests/queries/0_stateless/00203_full_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00203_full_join.reference rename to tests/queries/0_stateless/00203_full_join.reference diff --git a/dbms/tests/queries/0_stateless/00203_full_join.sql b/tests/queries/0_stateless/00203_full_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00203_full_join.sql rename to tests/queries/0_stateless/00203_full_join.sql diff --git a/dbms/tests/queries/0_stateless/00204_extract_url_parameter.reference b/tests/queries/0_stateless/00204_extract_url_parameter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00204_extract_url_parameter.reference rename to tests/queries/0_stateless/00204_extract_url_parameter.reference diff --git a/dbms/tests/queries/0_stateless/00204_extract_url_parameter.sql b/tests/queries/0_stateless/00204_extract_url_parameter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00204_extract_url_parameter.sql rename to tests/queries/0_stateless/00204_extract_url_parameter.sql diff --git a/dbms/tests/queries/0_stateless/00205_scalar_subqueries.reference b/tests/queries/0_stateless/00205_scalar_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00205_scalar_subqueries.reference rename to tests/queries/0_stateless/00205_scalar_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00205_scalar_subqueries.sql b/tests/queries/0_stateless/00205_scalar_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00205_scalar_subqueries.sql rename to tests/queries/0_stateless/00205_scalar_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00206_empty_array_to_single.reference b/tests/queries/0_stateless/00206_empty_array_to_single.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00206_empty_array_to_single.reference rename to tests/queries/0_stateless/00206_empty_array_to_single.reference diff --git a/dbms/tests/queries/0_stateless/00206_empty_array_to_single.sql b/tests/queries/0_stateless/00206_empty_array_to_single.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00206_empty_array_to_single.sql rename to tests/queries/0_stateless/00206_empty_array_to_single.sql diff --git a/dbms/tests/queries/0_stateless/00207_left_array_join.reference b/tests/queries/0_stateless/00207_left_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00207_left_array_join.reference rename to tests/queries/0_stateless/00207_left_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00207_left_array_join.sql b/tests/queries/0_stateless/00207_left_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00207_left_array_join.sql rename to tests/queries/0_stateless/00207_left_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00208_agg_state_merge.reference b/tests/queries/0_stateless/00208_agg_state_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00208_agg_state_merge.reference rename to tests/queries/0_stateless/00208_agg_state_merge.reference diff --git a/dbms/tests/queries/0_stateless/00208_agg_state_merge.sql b/tests/queries/0_stateless/00208_agg_state_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00208_agg_state_merge.sql rename to tests/queries/0_stateless/00208_agg_state_merge.sql diff --git a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference b/tests/queries/0_stateless/00209_insert_select_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference rename to tests/queries/0_stateless/00209_insert_select_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.sql b/tests/queries/0_stateless/00209_insert_select_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00209_insert_select_extremes.sql rename to tests/queries/0_stateless/00209_insert_select_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference b/tests/queries/0_stateless/00210_insert_select_extremes_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference rename to tests/queries/0_stateless/00210_insert_select_extremes_http.reference diff --git a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.sh b/tests/queries/0_stateless/00210_insert_select_extremes_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.sh rename to tests/queries/0_stateless/00210_insert_select_extremes_http.sh diff --git a/dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference b/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference rename to tests/queries/0_stateless/00211_shard_query_formatting_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql b/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql rename to tests/queries/0_stateless/00211_shard_query_formatting_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference b/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference rename to tests/queries/0_stateless/00212_shard_aggregate_function_uniq.reference diff --git a/dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql b/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql rename to tests/queries/0_stateless/00212_shard_aggregate_function_uniq.sql diff --git a/dbms/tests/queries/0_stateless/00213_multiple_global_in.reference b/tests/queries/0_stateless/00213_multiple_global_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00213_multiple_global_in.reference rename to tests/queries/0_stateless/00213_multiple_global_in.reference diff --git a/dbms/tests/queries/0_stateless/00213_multiple_global_in.sql b/tests/queries/0_stateless/00213_multiple_global_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00213_multiple_global_in.sql rename to tests/queries/0_stateless/00213_multiple_global_in.sql diff --git a/dbms/tests/queries/0_stateless/00214_primary_key_order.reference b/tests/queries/0_stateless/00214_primary_key_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00214_primary_key_order.reference rename to tests/queries/0_stateless/00214_primary_key_order.reference diff --git a/dbms/tests/queries/0_stateless/00214_primary_key_order.sql b/tests/queries/0_stateless/00214_primary_key_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00214_primary_key_order.sql rename to tests/queries/0_stateless/00214_primary_key_order.sql diff --git a/dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference b/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql b/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00216_bit_test_function_family.reference b/tests/queries/0_stateless/00216_bit_test_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00216_bit_test_function_family.reference rename to tests/queries/0_stateless/00216_bit_test_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00216_bit_test_function_family.sql b/tests/queries/0_stateless/00216_bit_test_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00216_bit_test_function_family.sql rename to tests/queries/0_stateless/00216_bit_test_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference b/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference rename to tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.reference diff --git a/dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql b/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql rename to tests/queries/0_stateless/00217_shard_global_subquery_columns_with_same_name.sql diff --git a/dbms/tests/queries/0_stateless/00218_like_regexp_newline.reference b/tests/queries/0_stateless/00218_like_regexp_newline.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00218_like_regexp_newline.reference rename to tests/queries/0_stateless/00218_like_regexp_newline.reference diff --git a/dbms/tests/queries/0_stateless/00218_like_regexp_newline.sql b/tests/queries/0_stateless/00218_like_regexp_newline.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00218_like_regexp_newline.sql rename to tests/queries/0_stateless/00218_like_regexp_newline.sql diff --git a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.reference b/tests/queries/0_stateless/00219_full_right_join_column_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00219_full_right_join_column_order.reference rename to tests/queries/0_stateless/00219_full_right_join_column_order.reference diff --git a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql b/tests/queries/0_stateless/00219_full_right_join_column_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql rename to tests/queries/0_stateless/00219_full_right_join_column_order.sql diff --git a/dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference b/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference rename to tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.reference diff --git a/dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql b/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql rename to tests/queries/0_stateless/00220_shard_with_totals_in_subquery_remote_and_limit.sql diff --git a/dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference b/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference rename to tests/queries/0_stateless/00222_sequence_aggregate_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql b/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql rename to tests/queries/0_stateless/00222_sequence_aggregate_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference b/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference rename to tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.reference diff --git a/dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql b/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql rename to tests/queries/0_stateless/00223_shard_distributed_aggregation_memory_efficient.sql diff --git a/dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference b/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference rename to tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.reference diff --git a/dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql b/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql rename to tests/queries/0_stateless/00224_shard_distributed_aggregation_memory_efficient_and_overflows.sql diff --git a/dbms/tests/queries/0_stateless/00225_join_duplicate_columns.reference b/tests/queries/0_stateless/00225_join_duplicate_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00225_join_duplicate_columns.reference rename to tests/queries/0_stateless/00225_join_duplicate_columns.reference diff --git a/dbms/tests/queries/0_stateless/00225_join_duplicate_columns.sql b/tests/queries/0_stateless/00225_join_duplicate_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00225_join_duplicate_columns.sql rename to tests/queries/0_stateless/00225_join_duplicate_columns.sql diff --git a/dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference diff --git a/dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql diff --git a/dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference b/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference rename to tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.reference diff --git a/dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql b/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql rename to tests/queries/0_stateless/00227_quantiles_timing_arbitrary_order.sql diff --git a/dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference b/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference rename to tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql b/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql rename to tests/queries/0_stateless/00228_shard_quantiles_deterministic_merge_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00229_prewhere_column_missing.reference b/tests/queries/0_stateless/00229_prewhere_column_missing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00229_prewhere_column_missing.reference rename to tests/queries/0_stateless/00229_prewhere_column_missing.reference diff --git a/dbms/tests/queries/0_stateless/00229_prewhere_column_missing.sql b/tests/queries/0_stateless/00229_prewhere_column_missing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00229_prewhere_column_missing.sql rename to tests/queries/0_stateless/00229_prewhere_column_missing.sql diff --git a/dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference b/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference rename to tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.reference diff --git a/dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql b/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql rename to tests/queries/0_stateless/00230_array_functions_has_count_equal_index_of_non_const_second_arg.sql diff --git a/dbms/tests/queries/0_stateless/00231_format_vertical_raw.reference b/tests/queries/0_stateless/00231_format_vertical_raw.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00231_format_vertical_raw.reference rename to tests/queries/0_stateless/00231_format_vertical_raw.reference diff --git a/dbms/tests/queries/0_stateless/00231_format_vertical_raw.sql b/tests/queries/0_stateless/00231_format_vertical_raw.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00231_format_vertical_raw.sql rename to tests/queries/0_stateless/00231_format_vertical_raw.sql diff --git a/dbms/tests/queries/0_stateless/00232_format_readable_size.reference b/tests/queries/0_stateless/00232_format_readable_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00232_format_readable_size.reference rename to tests/queries/0_stateless/00232_format_readable_size.reference diff --git a/dbms/tests/queries/0_stateless/00232_format_readable_size.sql b/tests/queries/0_stateless/00232_format_readable_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00232_format_readable_size.sql rename to tests/queries/0_stateless/00232_format_readable_size.sql diff --git a/dbms/tests/queries/0_stateless/00233_position_function_family.reference b/tests/queries/0_stateless/00233_position_function_family.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00233_position_function_family.reference rename to tests/queries/0_stateless/00233_position_function_family.reference diff --git a/dbms/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00233_position_function_family.sql rename to tests/queries/0_stateless/00233_position_function_family.sql diff --git a/dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference b/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference rename to tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.reference diff --git a/dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql b/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql rename to tests/queries/0_stateless/00234_disjunctive_equality_chains_optimization.sql diff --git a/dbms/tests/queries/0_stateless/00235_create_temporary_table_as.reference b/tests/queries/0_stateless/00235_create_temporary_table_as.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00235_create_temporary_table_as.reference rename to tests/queries/0_stateless/00235_create_temporary_table_as.reference diff --git a/dbms/tests/queries/0_stateless/00235_create_temporary_table_as.sql b/tests/queries/0_stateless/00235_create_temporary_table_as.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00235_create_temporary_table_as.sql rename to tests/queries/0_stateless/00235_create_temporary_table_as.sql diff --git a/dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00237_group_by_arrays.reference b/tests/queries/0_stateless/00237_group_by_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00237_group_by_arrays.reference rename to tests/queries/0_stateless/00237_group_by_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00237_group_by_arrays.sql b/tests/queries/0_stateless/00237_group_by_arrays.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00237_group_by_arrays.sql rename to tests/queries/0_stateless/00237_group_by_arrays.sql diff --git a/dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference b/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.reference rename to tests/queries/0_stateless/00238_removal_of_temporary_columns.reference diff --git a/dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql b/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00238_removal_of_temporary_columns.sql rename to tests/queries/0_stateless/00238_removal_of_temporary_columns.sql diff --git a/dbms/tests/queries/0_stateless/00239_type_conversion_in_in.reference b/tests/queries/0_stateless/00239_type_conversion_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00239_type_conversion_in_in.reference rename to tests/queries/0_stateless/00239_type_conversion_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00239_type_conversion_in_in.sql b/tests/queries/0_stateless/00239_type_conversion_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00239_type_conversion_in_in.sql rename to tests/queries/0_stateless/00239_type_conversion_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00240_replace_substring_loop.reference b/tests/queries/0_stateless/00240_replace_substring_loop.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00240_replace_substring_loop.reference rename to tests/queries/0_stateless/00240_replace_substring_loop.reference diff --git a/dbms/tests/queries/0_stateless/00240_replace_substring_loop.sql b/tests/queries/0_stateless/00240_replace_substring_loop.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00240_replace_substring_loop.sql rename to tests/queries/0_stateless/00240_replace_substring_loop.sql diff --git a/dbms/tests/queries/0_stateless/00250_tuple_comparison.reference b/tests/queries/0_stateless/00250_tuple_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00250_tuple_comparison.reference rename to tests/queries/0_stateless/00250_tuple_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00250_tuple_comparison.sql b/tests/queries/0_stateless/00250_tuple_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00250_tuple_comparison.sql rename to tests/queries/0_stateless/00250_tuple_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00251_has_types.reference b/tests/queries/0_stateless/00251_has_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00251_has_types.reference rename to tests/queries/0_stateless/00251_has_types.reference diff --git a/dbms/tests/queries/0_stateless/00251_has_types.sql b/tests/queries/0_stateless/00251_has_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00251_has_types.sql rename to tests/queries/0_stateless/00251_has_types.sql diff --git a/dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference b/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference rename to tests/queries/0_stateless/00252_shard_global_in_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql b/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql rename to tests/queries/0_stateless/00252_shard_global_in_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.reference b/tests/queries/0_stateless/00253_insert_recursive_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.reference rename to tests/queries/0_stateless/00253_insert_recursive_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.sql b/tests/queries/0_stateless/00253_insert_recursive_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00253_insert_recursive_defaults.sql rename to tests/queries/0_stateless/00253_insert_recursive_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00254_tuple_extremes.reference b/tests/queries/0_stateless/00254_tuple_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00254_tuple_extremes.reference rename to tests/queries/0_stateless/00254_tuple_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00254_tuple_extremes.sql b/tests/queries/0_stateless/00254_tuple_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00254_tuple_extremes.sql rename to tests/queries/0_stateless/00254_tuple_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00255_array_concat_string.reference b/tests/queries/0_stateless/00255_array_concat_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00255_array_concat_string.reference rename to tests/queries/0_stateless/00255_array_concat_string.reference diff --git a/dbms/tests/queries/0_stateless/00255_array_concat_string.sql b/tests/queries/0_stateless/00255_array_concat_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00255_array_concat_string.sql rename to tests/queries/0_stateless/00255_array_concat_string.sql diff --git a/dbms/tests/queries/0_stateless/00256_reverse.reference b/tests/queries/0_stateless/00256_reverse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00256_reverse.reference rename to tests/queries/0_stateless/00256_reverse.reference diff --git a/dbms/tests/queries/0_stateless/00256_reverse.sql b/tests/queries/0_stateless/00256_reverse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00256_reverse.sql rename to tests/queries/0_stateless/00256_reverse.sql diff --git a/dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference rename to tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference diff --git a/dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql rename to tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql diff --git a/dbms/tests/queries/0_stateless/00258_materializing_tuples.reference b/tests/queries/0_stateless/00258_materializing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00258_materializing_tuples.reference rename to tests/queries/0_stateless/00258_materializing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00258_materializing_tuples.sql b/tests/queries/0_stateless/00258_materializing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00258_materializing_tuples.sql rename to tests/queries/0_stateless/00258_materializing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00259_hashing_tuples.reference b/tests/queries/0_stateless/00259_hashing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00259_hashing_tuples.reference rename to tests/queries/0_stateless/00259_hashing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00259_hashing_tuples.sql b/tests/queries/0_stateless/00259_hashing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00259_hashing_tuples.sql rename to tests/queries/0_stateless/00259_hashing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00260_like_and_curly_braces.reference b/tests/queries/0_stateless/00260_like_and_curly_braces.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00260_like_and_curly_braces.reference rename to tests/queries/0_stateless/00260_like_and_curly_braces.reference diff --git a/dbms/tests/queries/0_stateless/00260_like_and_curly_braces.sql b/tests/queries/0_stateless/00260_like_and_curly_braces.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00260_like_and_curly_braces.sql rename to tests/queries/0_stateless/00260_like_and_curly_braces.sql diff --git a/dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference b/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference rename to tests/queries/0_stateless/00261_storage_aliases_and_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql b/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql rename to tests/queries/0_stateless/00261_storage_aliases_and_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00262_alter_alias.reference b/tests/queries/0_stateless/00262_alter_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00262_alter_alias.reference rename to tests/queries/0_stateless/00262_alter_alias.reference diff --git a/dbms/tests/queries/0_stateless/00262_alter_alias.sql b/tests/queries/0_stateless/00262_alter_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00262_alter_alias.sql rename to tests/queries/0_stateless/00262_alter_alias.sql diff --git a/dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference b/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference rename to tests/queries/0_stateless/00263_merge_aggregates_and_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql b/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql rename to tests/queries/0_stateless/00263_merge_aggregates_and_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00264_uniq_many_args.reference b/tests/queries/0_stateless/00264_uniq_many_args.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00264_uniq_many_args.reference rename to tests/queries/0_stateless/00264_uniq_many_args.reference diff --git a/dbms/tests/queries/0_stateless/00264_uniq_many_args.sql b/tests/queries/0_stateless/00264_uniq_many_args.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00264_uniq_many_args.sql rename to tests/queries/0_stateless/00264_uniq_many_args.sql diff --git a/dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference b/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.reference rename to tests/queries/0_stateless/00265_http_content_type_format_timezone.reference diff --git a/dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh b/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh rename to tests/queries/0_stateless/00265_http_content_type_format_timezone.sh diff --git a/dbms/tests/queries/0_stateless/00266_read_overflow_mode.reference b/tests/queries/0_stateless/00266_read_overflow_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00266_read_overflow_mode.reference rename to tests/queries/0_stateless/00266_read_overflow_mode.reference diff --git a/dbms/tests/queries/0_stateless/00266_read_overflow_mode.sql b/tests/queries/0_stateless/00266_read_overflow_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00266_read_overflow_mode.sql rename to tests/queries/0_stateless/00266_read_overflow_mode.sql diff --git a/dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference b/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference rename to tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql b/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql rename to tests/queries/0_stateless/00266_shard_global_subquery_and_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference b/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference rename to tests/queries/0_stateless/00267_tuple_array_access_operators_priority.reference diff --git a/dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql b/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql rename to tests/queries/0_stateless/00267_tuple_array_access_operators_priority.sql diff --git a/dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference b/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.reference rename to tests/queries/0_stateless/00268_aliases_without_as_keyword.reference diff --git a/dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql b/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00268_aliases_without_as_keyword.sql rename to tests/queries/0_stateless/00268_aliases_without_as_keyword.sql diff --git a/dbms/tests/queries/0_stateless/00269_database_table_whitespace.reference b/tests/queries/0_stateless/00269_database_table_whitespace.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00269_database_table_whitespace.reference rename to tests/queries/0_stateless/00269_database_table_whitespace.reference diff --git a/dbms/tests/queries/0_stateless/00269_database_table_whitespace.sql b/tests/queries/0_stateless/00269_database_table_whitespace.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00269_database_table_whitespace.sql rename to tests/queries/0_stateless/00269_database_table_whitespace.sql diff --git a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.reference b/tests/queries/0_stateless/00270_views_query_processing_stage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00270_views_query_processing_stage.reference rename to tests/queries/0_stateless/00270_views_query_processing_stage.reference diff --git a/dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql b/tests/queries/0_stateless/00270_views_query_processing_stage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00270_views_query_processing_stage.sql rename to tests/queries/0_stateless/00270_views_query_processing_stage.sql diff --git a/dbms/tests/queries/0_stateless/00271_agg_state_and_totals.reference b/tests/queries/0_stateless/00271_agg_state_and_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00271_agg_state_and_totals.reference rename to tests/queries/0_stateless/00271_agg_state_and_totals.reference diff --git a/dbms/tests/queries/0_stateless/00271_agg_state_and_totals.sql b/tests/queries/0_stateless/00271_agg_state_and_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00271_agg_state_and_totals.sql rename to tests/queries/0_stateless/00271_agg_state_and_totals.sql diff --git a/dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference b/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.reference rename to tests/queries/0_stateless/00272_union_all_and_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql b/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00272_union_all_and_in_subquery.sql rename to tests/queries/0_stateless/00272_union_all_and_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00273_quantiles.reference b/tests/queries/0_stateless/00273_quantiles.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00273_quantiles.reference rename to tests/queries/0_stateless/00273_quantiles.reference diff --git a/dbms/tests/queries/0_stateless/00273_quantiles.sql b/tests/queries/0_stateless/00273_quantiles.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00273_quantiles.sql rename to tests/queries/0_stateless/00273_quantiles.sql diff --git a/dbms/tests/queries/0_stateless/00274_shard_group_array.reference b/tests/queries/0_stateless/00274_shard_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00274_shard_group_array.reference rename to tests/queries/0_stateless/00274_shard_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00274_shard_group_array.sql b/tests/queries/0_stateless/00274_shard_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00274_shard_group_array.sql rename to tests/queries/0_stateless/00274_shard_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference b/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.reference rename to tests/queries/0_stateless/00275_shard_quantiles_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql b/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00275_shard_quantiles_weighted.sql rename to tests/queries/0_stateless/00275_shard_quantiles_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00276_sample.reference b/tests/queries/0_stateless/00276_sample.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00276_sample.reference rename to tests/queries/0_stateless/00276_sample.reference diff --git a/dbms/tests/queries/0_stateless/00276_sample.sql b/tests/queries/0_stateless/00276_sample.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00276_sample.sql rename to tests/queries/0_stateless/00276_sample.sql diff --git a/dbms/tests/queries/0_stateless/00277_array_filter.reference b/tests/queries/0_stateless/00277_array_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00277_array_filter.reference rename to tests/queries/0_stateless/00277_array_filter.reference diff --git a/dbms/tests/queries/0_stateless/00277_array_filter.sql b/tests/queries/0_stateless/00277_array_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00277_array_filter.sql rename to tests/queries/0_stateless/00277_array_filter.sql diff --git a/dbms/tests/queries/0_stateless/00278_insert_already_sorted.reference b/tests/queries/0_stateless/00278_insert_already_sorted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00278_insert_already_sorted.reference rename to tests/queries/0_stateless/00278_insert_already_sorted.reference diff --git a/dbms/tests/queries/0_stateless/00278_insert_already_sorted.sql b/tests/queries/0_stateless/00278_insert_already_sorted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00278_insert_already_sorted.sql rename to tests/queries/0_stateless/00278_insert_already_sorted.sql diff --git a/dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.reference b/tests/queries/0_stateless/00279_quantiles_permuted_args.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.reference rename to tests/queries/0_stateless/00279_quantiles_permuted_args.reference diff --git a/dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.sql b/tests/queries/0_stateless/00279_quantiles_permuted_args.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00279_quantiles_permuted_args.sql rename to tests/queries/0_stateless/00279_quantiles_permuted_args.sql diff --git a/dbms/tests/queries/0_stateless/00280_hex_escape_sequence.reference b/tests/queries/0_stateless/00280_hex_escape_sequence.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00280_hex_escape_sequence.reference rename to tests/queries/0_stateless/00280_hex_escape_sequence.reference diff --git a/dbms/tests/queries/0_stateless/00280_hex_escape_sequence.sql b/tests/queries/0_stateless/00280_hex_escape_sequence.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00280_hex_escape_sequence.sql rename to tests/queries/0_stateless/00280_hex_escape_sequence.sql diff --git a/dbms/tests/queries/0_stateless/00281_compile_sizeof_packed.re b/tests/queries/0_stateless/00281_compile_sizeof_packed.re similarity index 100% rename from dbms/tests/queries/0_stateless/00281_compile_sizeof_packed.re rename to tests/queries/0_stateless/00281_compile_sizeof_packed.re diff --git a/dbms/tests/queries/0_stateless/00282_merging.reference b/tests/queries/0_stateless/00282_merging.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00282_merging.reference rename to tests/queries/0_stateless/00282_merging.reference diff --git a/dbms/tests/queries/0_stateless/00282_merging.sql b/tests/queries/0_stateless/00282_merging.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00282_merging.sql rename to tests/queries/0_stateless/00282_merging.sql diff --git a/dbms/tests/queries/0_stateless/00283_column_cut.reference b/tests/queries/0_stateless/00283_column_cut.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00283_column_cut.reference rename to tests/queries/0_stateless/00283_column_cut.reference diff --git a/dbms/tests/queries/0_stateless/00283_column_cut.sql b/tests/queries/0_stateless/00283_column_cut.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00283_column_cut.sql rename to tests/queries/0_stateless/00283_column_cut.sql diff --git a/dbms/tests/queries/0_stateless/00284_external_aggregation.reference b/tests/queries/0_stateless/00284_external_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00284_external_aggregation.reference rename to tests/queries/0_stateless/00284_external_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00284_external_aggregation.sql b/tests/queries/0_stateless/00284_external_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00284_external_aggregation.sql rename to tests/queries/0_stateless/00284_external_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.reference b/tests/queries/0_stateless/00285_not_all_data_in_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.reference rename to tests/queries/0_stateless/00285_not_all_data_in_totals.reference diff --git a/dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.sql b/tests/queries/0_stateless/00285_not_all_data_in_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00285_not_all_data_in_totals.sql rename to tests/queries/0_stateless/00285_not_all_data_in_totals.sql diff --git a/dbms/tests/queries/0_stateless/00286_format_long_negative_float.reference b/tests/queries/0_stateless/00286_format_long_negative_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00286_format_long_negative_float.reference rename to tests/queries/0_stateless/00286_format_long_negative_float.reference diff --git a/dbms/tests/queries/0_stateless/00286_format_long_negative_float.sql b/tests/queries/0_stateless/00286_format_long_negative_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00286_format_long_negative_float.sql rename to tests/queries/0_stateless/00286_format_long_negative_float.sql diff --git a/dbms/tests/queries/0_stateless/00287_column_const_with_nan.reference b/tests/queries/0_stateless/00287_column_const_with_nan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00287_column_const_with_nan.reference rename to tests/queries/0_stateless/00287_column_const_with_nan.reference diff --git a/dbms/tests/queries/0_stateless/00287_column_const_with_nan.sql b/tests/queries/0_stateless/00287_column_const_with_nan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00287_column_const_with_nan.sql rename to tests/queries/0_stateless/00287_column_const_with_nan.sql diff --git a/dbms/tests/queries/0_stateless/00288_empty_stripelog.reference b/tests/queries/0_stateless/00288_empty_stripelog.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00288_empty_stripelog.reference rename to tests/queries/0_stateless/00288_empty_stripelog.reference diff --git a/dbms/tests/queries/0_stateless/00288_empty_stripelog.sql b/tests/queries/0_stateless/00288_empty_stripelog.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00288_empty_stripelog.sql rename to tests/queries/0_stateless/00288_empty_stripelog.sql diff --git a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference b/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference rename to tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.reference diff --git a/dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql b/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql rename to tests/queries/0_stateless/00290_shard_aggregation_memory_efficient.sql diff --git a/dbms/tests/queries/0_stateless/00291_array_reduce.reference b/tests/queries/0_stateless/00291_array_reduce.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00291_array_reduce.reference rename to tests/queries/0_stateless/00291_array_reduce.reference diff --git a/dbms/tests/queries/0_stateless/00291_array_reduce.sql b/tests/queries/0_stateless/00291_array_reduce.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00291_array_reduce.sql rename to tests/queries/0_stateless/00291_array_reduce.sql diff --git a/dbms/tests/queries/0_stateless/00292_parser_tuple_element.reference b/tests/queries/0_stateless/00292_parser_tuple_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00292_parser_tuple_element.reference rename to tests/queries/0_stateless/00292_parser_tuple_element.reference diff --git a/dbms/tests/queries/0_stateless/00292_parser_tuple_element.sql b/tests/queries/0_stateless/00292_parser_tuple_element.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00292_parser_tuple_element.sql rename to tests/queries/0_stateless/00292_parser_tuple_element.sql diff --git a/dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference b/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.reference rename to tests/queries/0_stateless/00293_shard_max_subquery_depth.reference diff --git a/dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql b/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00293_shard_max_subquery_depth.sql rename to tests/queries/0_stateless/00293_shard_max_subquery_depth.sql diff --git a/dbms/tests/queries/0_stateless/00294_shard_enums.reference b/tests/queries/0_stateless/00294_shard_enums.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00294_shard_enums.reference rename to tests/queries/0_stateless/00294_shard_enums.reference diff --git a/dbms/tests/queries/0_stateless/00294_shard_enums.sql b/tests/queries/0_stateless/00294_shard_enums.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00294_shard_enums.sql rename to tests/queries/0_stateless/00294_shard_enums.sql diff --git a/dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference b/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference rename to tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.reference diff --git a/dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql b/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql rename to tests/queries/0_stateless/00295_global_in_one_shard_rows_before_limit.sql diff --git a/dbms/tests/queries/0_stateless/00296_url_parameters.reference b/tests/queries/0_stateless/00296_url_parameters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00296_url_parameters.reference rename to tests/queries/0_stateless/00296_url_parameters.reference diff --git a/dbms/tests/queries/0_stateless/00296_url_parameters.sql b/tests/queries/0_stateless/00296_url_parameters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00296_url_parameters.sql rename to tests/queries/0_stateless/00296_url_parameters.sql diff --git a/dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference b/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference rename to tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled b/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled rename to tests/queries/0_stateless/00297_attach_negative_numbers_zookeeper.sql.disabled diff --git a/dbms/tests/queries/0_stateless/00298_enum_width_and_cast.reference b/tests/queries/0_stateless/00298_enum_width_and_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00298_enum_width_and_cast.reference rename to tests/queries/0_stateless/00298_enum_width_and_cast.reference diff --git a/dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql b/tests/queries/0_stateless/00298_enum_width_and_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql rename to tests/queries/0_stateless/00298_enum_width_and_cast.sql diff --git a/dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference b/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference rename to tests/queries/0_stateless/00299_stripe_log_multiple_inserts.reference diff --git a/dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql b/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql rename to tests/queries/0_stateless/00299_stripe_log_multiple_inserts.sql diff --git a/dbms/tests/queries/0_stateless/00300_csv.reference b/tests/queries/0_stateless/00300_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00300_csv.reference rename to tests/queries/0_stateless/00300_csv.reference diff --git a/dbms/tests/queries/0_stateless/00300_csv.sql b/tests/queries/0_stateless/00300_csv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00300_csv.sql rename to tests/queries/0_stateless/00300_csv.sql diff --git a/dbms/tests/queries/0_stateless/00301_csv.reference b/tests/queries/0_stateless/00301_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00301_csv.reference rename to tests/queries/0_stateless/00301_csv.reference diff --git a/dbms/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00301_csv.sh rename to tests/queries/0_stateless/00301_csv.sh diff --git a/dbms/tests/queries/0_stateless/00302_http_compression.reference b/tests/queries/0_stateless/00302_http_compression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00302_http_compression.reference rename to tests/queries/0_stateless/00302_http_compression.reference diff --git a/dbms/tests/queries/0_stateless/00302_http_compression.sh b/tests/queries/0_stateless/00302_http_compression.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00302_http_compression.sh rename to tests/queries/0_stateless/00302_http_compression.sh diff --git a/dbms/tests/queries/0_stateless/00304_http_external_data.reference b/tests/queries/0_stateless/00304_http_external_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00304_http_external_data.reference rename to tests/queries/0_stateless/00304_http_external_data.reference diff --git a/dbms/tests/queries/0_stateless/00304_http_external_data.sh b/tests/queries/0_stateless/00304_http_external_data.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00304_http_external_data.sh rename to tests/queries/0_stateless/00304_http_external_data.sh diff --git a/dbms/tests/queries/0_stateless/00305_http_and_readonly.reference b/tests/queries/0_stateless/00305_http_and_readonly.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00305_http_and_readonly.reference rename to tests/queries/0_stateless/00305_http_and_readonly.reference diff --git a/dbms/tests/queries/0_stateless/00305_http_and_readonly.sh b/tests/queries/0_stateless/00305_http_and_readonly.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00305_http_and_readonly.sh rename to tests/queries/0_stateless/00305_http_and_readonly.sh diff --git a/dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.reference b/tests/queries/0_stateless/00306_insert_values_and_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.reference rename to tests/queries/0_stateless/00306_insert_values_and_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.sql b/tests/queries/0_stateless/00306_insert_values_and_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00306_insert_values_and_expressions.sql rename to tests/queries/0_stateless/00306_insert_values_and_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00307_format_xml.reference b/tests/queries/0_stateless/00307_format_xml.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00307_format_xml.reference rename to tests/queries/0_stateless/00307_format_xml.reference diff --git a/dbms/tests/queries/0_stateless/00307_format_xml.sql b/tests/queries/0_stateless/00307_format_xml.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00307_format_xml.sql rename to tests/queries/0_stateless/00307_format_xml.sql diff --git a/dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference b/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference rename to tests/queries/0_stateless/00308_write_buffer_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql b/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql rename to tests/queries/0_stateless/00308_write_buffer_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00309_formats.reference rename to tests/queries/0_stateless/00309_formats.reference diff --git a/dbms/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00309_formats.sql rename to tests/queries/0_stateless/00309_formats.sql diff --git a/dbms/tests/queries/0_stateless/00310_tskv.reference b/tests/queries/0_stateless/00310_tskv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00310_tskv.reference rename to tests/queries/0_stateless/00310_tskv.reference diff --git a/dbms/tests/queries/0_stateless/00310_tskv.sh b/tests/queries/0_stateless/00310_tskv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00310_tskv.sh rename to tests/queries/0_stateless/00310_tskv.sh diff --git a/dbms/tests/queries/0_stateless/00311_array_primary_key.reference b/tests/queries/0_stateless/00311_array_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00311_array_primary_key.reference rename to tests/queries/0_stateless/00311_array_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00311_array_primary_key.sql b/tests/queries/0_stateless/00311_array_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00311_array_primary_key.sql rename to tests/queries/0_stateless/00311_array_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference b/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference rename to tests/queries/0_stateless/00312_position_case_insensitive_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql b/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql rename to tests/queries/0_stateless/00312_position_case_insensitive_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00313_const_totals_extremes.reference b/tests/queries/0_stateless/00313_const_totals_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00313_const_totals_extremes.reference rename to tests/queries/0_stateless/00313_const_totals_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00313_const_totals_extremes.sh b/tests/queries/0_stateless/00313_const_totals_extremes.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00313_const_totals_extremes.sh rename to tests/queries/0_stateless/00313_const_totals_extremes.sh diff --git a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference b/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.reference rename to tests/queries/0_stateless/00314_sample_factor_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql b/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql rename to tests/queries/0_stateless/00314_sample_factor_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00315_quantile_off_by_one.reference b/tests/queries/0_stateless/00315_quantile_off_by_one.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00315_quantile_off_by_one.reference rename to tests/queries/0_stateless/00315_quantile_off_by_one.reference diff --git a/dbms/tests/queries/0_stateless/00315_quantile_off_by_one.sql b/tests/queries/0_stateless/00315_quantile_off_by_one.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00315_quantile_off_by_one.sql rename to tests/queries/0_stateless/00315_quantile_off_by_one.sql diff --git a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference b/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference rename to tests/queries/0_stateless/00316_rounding_functions_and_empty_block.reference diff --git a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql b/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql rename to tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql diff --git a/dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference b/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference rename to tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.reference diff --git a/dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql b/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql rename to tests/queries/0_stateless/00317_in_tuples_and_out_of_range_values.sql diff --git a/dbms/tests/queries/0_stateless/00318_pk_tuple_order.reference b/tests/queries/0_stateless/00318_pk_tuple_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00318_pk_tuple_order.reference rename to tests/queries/0_stateless/00318_pk_tuple_order.reference diff --git a/dbms/tests/queries/0_stateless/00318_pk_tuple_order.sql b/tests/queries/0_stateless/00318_pk_tuple_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00318_pk_tuple_order.sql rename to tests/queries/0_stateless/00318_pk_tuple_order.sql diff --git a/dbms/tests/queries/0_stateless/00319_index_for_like.reference b/tests/queries/0_stateless/00319_index_for_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00319_index_for_like.reference rename to tests/queries/0_stateless/00319_index_for_like.reference diff --git a/dbms/tests/queries/0_stateless/00319_index_for_like.sql b/tests/queries/0_stateless/00319_index_for_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00319_index_for_like.sql rename to tests/queries/0_stateless/00319_index_for_like.sql diff --git a/dbms/tests/queries/0_stateless/00320_between.reference b/tests/queries/0_stateless/00320_between.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00320_between.reference rename to tests/queries/0_stateless/00320_between.reference diff --git a/dbms/tests/queries/0_stateless/00320_between.sql b/tests/queries/0_stateless/00320_between.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00320_between.sql rename to tests/queries/0_stateless/00320_between.sql diff --git a/dbms/tests/queries/0_stateless/00321_pk_set.reference b/tests/queries/0_stateless/00321_pk_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00321_pk_set.reference rename to tests/queries/0_stateless/00321_pk_set.reference diff --git a/dbms/tests/queries/0_stateless/00321_pk_set.sql b/tests/queries/0_stateless/00321_pk_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00321_pk_set.sql rename to tests/queries/0_stateless/00321_pk_set.sql diff --git a/dbms/tests/queries/0_stateless/00322_disable_checksumming.reference b/tests/queries/0_stateless/00322_disable_checksumming.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00322_disable_checksumming.reference rename to tests/queries/0_stateless/00322_disable_checksumming.reference diff --git a/dbms/tests/queries/0_stateless/00322_disable_checksumming.sh b/tests/queries/0_stateless/00322_disable_checksumming.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00322_disable_checksumming.sh rename to tests/queries/0_stateless/00322_disable_checksumming.sh diff --git a/dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.reference b/tests/queries/0_stateless/00323_quantiles_timing_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.reference rename to tests/queries/0_stateless/00323_quantiles_timing_bug.reference diff --git a/dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.sql b/tests/queries/0_stateless/00323_quantiles_timing_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00323_quantiles_timing_bug.sql rename to tests/queries/0_stateless/00323_quantiles_timing_bug.sql diff --git a/dbms/tests/queries/0_stateless/00324_hashing_enums.reference b/tests/queries/0_stateless/00324_hashing_enums.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00324_hashing_enums.reference rename to tests/queries/0_stateless/00324_hashing_enums.reference diff --git a/dbms/tests/queries/0_stateless/00324_hashing_enums.sql b/tests/queries/0_stateless/00324_hashing_enums.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00324_hashing_enums.sql rename to tests/queries/0_stateless/00324_hashing_enums.sql diff --git a/dbms/tests/queries/0_stateless/00325_replacing_merge_tree.reference b/tests/queries/0_stateless/00325_replacing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00325_replacing_merge_tree.reference rename to tests/queries/0_stateless/00325_replacing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled b/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled rename to tests/queries/0_stateless/00325_replacing_merge_tree.sql.disabled diff --git a/dbms/tests/queries/0_stateless/00326_long_function_multi_if.reference b/tests/queries/0_stateless/00326_long_function_multi_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00326_long_function_multi_if.reference rename to tests/queries/0_stateless/00326_long_function_multi_if.reference diff --git a/dbms/tests/queries/0_stateless/00326_long_function_multi_if.sql b/tests/queries/0_stateless/00326_long_function_multi_if.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00326_long_function_multi_if.sql rename to tests/queries/0_stateless/00326_long_function_multi_if.sql diff --git a/dbms/tests/queries/0_stateless/00327_summing_composite_nested.reference b/tests/queries/0_stateless/00327_summing_composite_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00327_summing_composite_nested.reference rename to tests/queries/0_stateless/00327_summing_composite_nested.reference diff --git a/dbms/tests/queries/0_stateless/00327_summing_composite_nested.sql b/tests/queries/0_stateless/00327_summing_composite_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00327_summing_composite_nested.sql rename to tests/queries/0_stateless/00327_summing_composite_nested.sql diff --git a/dbms/tests/queries/0_stateless/00328_long_case_construction.reference b/tests/queries/0_stateless/00328_long_case_construction.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00328_long_case_construction.reference rename to tests/queries/0_stateless/00328_long_case_construction.reference diff --git a/dbms/tests/queries/0_stateless/00328_long_case_construction.sql b/tests/queries/0_stateless/00328_long_case_construction.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00328_long_case_construction.sql rename to tests/queries/0_stateless/00328_long_case_construction.sql diff --git a/dbms/tests/queries/0_stateless/00330_view_subqueries.reference b/tests/queries/0_stateless/00330_view_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00330_view_subqueries.reference rename to tests/queries/0_stateless/00330_view_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00330_view_subqueries.sql b/tests/queries/0_stateless/00330_view_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00330_view_subqueries.sql rename to tests/queries/0_stateless/00330_view_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00331_final_and_prewhere.reference b/tests/queries/0_stateless/00331_final_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00331_final_and_prewhere.reference rename to tests/queries/0_stateless/00331_final_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00331_final_and_prewhere.sql b/tests/queries/0_stateless/00331_final_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00331_final_and_prewhere.sql rename to tests/queries/0_stateless/00331_final_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference b/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference rename to tests/queries/0_stateless/00332_quantile_timing_memory_leak.reference diff --git a/dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql b/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql rename to tests/queries/0_stateless/00332_quantile_timing_memory_leak.sql diff --git a/dbms/tests/queries/0_stateless/00333_parser_number_bug.reference b/tests/queries/0_stateless/00333_parser_number_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00333_parser_number_bug.reference rename to tests/queries/0_stateless/00333_parser_number_bug.reference diff --git a/dbms/tests/queries/0_stateless/00333_parser_number_bug.sql b/tests/queries/0_stateless/00333_parser_number_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00333_parser_number_bug.sql rename to tests/queries/0_stateless/00333_parser_number_bug.sql diff --git a/dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference b/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.reference rename to tests/queries/0_stateless/00334_column_aggregate_function_limit.reference diff --git a/dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql b/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00334_column_aggregate_function_limit.sql rename to tests/queries/0_stateless/00334_column_aggregate_function_limit.sql diff --git a/dbms/tests/queries/0_stateless/00335_bom.reference b/tests/queries/0_stateless/00335_bom.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00335_bom.reference rename to tests/queries/0_stateless/00335_bom.reference diff --git a/dbms/tests/queries/0_stateless/00335_bom.sh b/tests/queries/0_stateless/00335_bom.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00335_bom.sh rename to tests/queries/0_stateless/00335_bom.sh diff --git a/dbms/tests/queries/0_stateless/00336_shard_stack_trace.reference b/tests/queries/0_stateless/00336_shard_stack_trace.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00336_shard_stack_trace.reference rename to tests/queries/0_stateless/00336_shard_stack_trace.reference diff --git a/dbms/tests/queries/0_stateless/00336_shard_stack_trace.sh b/tests/queries/0_stateless/00336_shard_stack_trace.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00336_shard_stack_trace.sh rename to tests/queries/0_stateless/00336_shard_stack_trace.sh diff --git a/dbms/tests/queries/0_stateless/00337_shard_any_heavy.reference b/tests/queries/0_stateless/00337_shard_any_heavy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00337_shard_any_heavy.reference rename to tests/queries/0_stateless/00337_shard_any_heavy.reference diff --git a/dbms/tests/queries/0_stateless/00337_shard_any_heavy.sql b/tests/queries/0_stateless/00337_shard_any_heavy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00337_shard_any_heavy.sql rename to tests/queries/0_stateless/00337_shard_any_heavy.sql diff --git a/dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.reference b/tests/queries/0_stateless/00338_replicate_array_of_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.reference rename to tests/queries/0_stateless/00338_replicate_array_of_strings.reference diff --git a/dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.sql b/tests/queries/0_stateless/00338_replicate_array_of_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00338_replicate_array_of_strings.sql rename to tests/queries/0_stateless/00338_replicate_array_of_strings.sql diff --git a/dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.reference b/tests/queries/0_stateless/00339_parsing_bad_arrays.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.reference rename to tests/queries/0_stateless/00339_parsing_bad_arrays.reference diff --git a/dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.sh b/tests/queries/0_stateless/00339_parsing_bad_arrays.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00339_parsing_bad_arrays.sh rename to tests/queries/0_stateless/00339_parsing_bad_arrays.sh diff --git a/dbms/tests/queries/0_stateless/00340_squashing_insert_select.reference b/tests/queries/0_stateless/00340_squashing_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00340_squashing_insert_select.reference rename to tests/queries/0_stateless/00340_squashing_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/00340_squashing_insert_select.sql b/tests/queries/0_stateless/00340_squashing_insert_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00340_squashing_insert_select.sql rename to tests/queries/0_stateless/00340_squashing_insert_select.sql diff --git a/dbms/tests/queries/0_stateless/00341_squashing_insert_select2.reference b/tests/queries/0_stateless/00341_squashing_insert_select2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00341_squashing_insert_select2.reference rename to tests/queries/0_stateless/00341_squashing_insert_select2.reference diff --git a/dbms/tests/queries/0_stateless/00341_squashing_insert_select2.sql b/tests/queries/0_stateless/00341_squashing_insert_select2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00341_squashing_insert_select2.sql rename to tests/queries/0_stateless/00341_squashing_insert_select2.sql diff --git a/dbms/tests/queries/0_stateless/00342_escape_sequences.reference b/tests/queries/0_stateless/00342_escape_sequences.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00342_escape_sequences.reference rename to tests/queries/0_stateless/00342_escape_sequences.reference diff --git a/dbms/tests/queries/0_stateless/00342_escape_sequences.sql b/tests/queries/0_stateless/00342_escape_sequences.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00342_escape_sequences.sql rename to tests/queries/0_stateless/00342_escape_sequences.sql diff --git a/dbms/tests/queries/0_stateless/00343_array_element_generic.reference b/tests/queries/0_stateless/00343_array_element_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00343_array_element_generic.reference rename to tests/queries/0_stateless/00343_array_element_generic.reference diff --git a/dbms/tests/queries/0_stateless/00343_array_element_generic.sql b/tests/queries/0_stateless/00343_array_element_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00343_array_element_generic.sql rename to tests/queries/0_stateless/00343_array_element_generic.sql diff --git a/dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference b/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.reference rename to tests/queries/0_stateless/00344_row_number_in_all_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql b/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00344_row_number_in_all_blocks.sql rename to tests/queries/0_stateless/00344_row_number_in_all_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00345_index_accurate_comparison.reference b/tests/queries/0_stateless/00345_index_accurate_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00345_index_accurate_comparison.reference rename to tests/queries/0_stateless/00345_index_accurate_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00345_index_accurate_comparison.sql b/tests/queries/0_stateless/00345_index_accurate_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00345_index_accurate_comparison.sql rename to tests/queries/0_stateless/00345_index_accurate_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00346_if_tuple.reference b/tests/queries/0_stateless/00346_if_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00346_if_tuple.reference rename to tests/queries/0_stateless/00346_if_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00346_if_tuple.sql b/tests/queries/0_stateless/00346_if_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00346_if_tuple.sql rename to tests/queries/0_stateless/00346_if_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00347_has_tuple.reference b/tests/queries/0_stateless/00347_has_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00347_has_tuple.reference rename to tests/queries/0_stateless/00347_has_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00347_has_tuple.sql b/tests/queries/0_stateless/00347_has_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00347_has_tuple.sql rename to tests/queries/0_stateless/00347_has_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00348_tuples.reference b/tests/queries/0_stateless/00348_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00348_tuples.reference rename to tests/queries/0_stateless/00348_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00348_tuples.sql b/tests/queries/0_stateless/00348_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00348_tuples.sql rename to tests/queries/0_stateless/00348_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00349_visible_width.reference b/tests/queries/0_stateless/00349_visible_width.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00349_visible_width.reference rename to tests/queries/0_stateless/00349_visible_width.reference diff --git a/dbms/tests/queries/0_stateless/00349_visible_width.sql b/tests/queries/0_stateless/00349_visible_width.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00349_visible_width.sql rename to tests/queries/0_stateless/00349_visible_width.sql diff --git a/dbms/tests/queries/0_stateless/00350_count_distinct.reference b/tests/queries/0_stateless/00350_count_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00350_count_distinct.reference rename to tests/queries/0_stateless/00350_count_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00350_count_distinct.sql b/tests/queries/0_stateless/00350_count_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00350_count_distinct.sql rename to tests/queries/0_stateless/00350_count_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference b/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference rename to tests/queries/0_stateless/00351_select_distinct_arrays_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql b/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql rename to tests/queries/0_stateless/00351_select_distinct_arrays_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.reference b/tests/queries/0_stateless/00352_external_sorting_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.reference rename to tests/queries/0_stateless/00352_external_sorting_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.sql b/tests/queries/0_stateless/00352_external_sorting_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00352_external_sorting_and_constants.sql rename to tests/queries/0_stateless/00352_external_sorting_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00353_join_by_tuple.reference b/tests/queries/0_stateless/00353_join_by_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00353_join_by_tuple.reference rename to tests/queries/0_stateless/00353_join_by_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql b/tests/queries/0_stateless/00353_join_by_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00353_join_by_tuple.sql rename to tests/queries/0_stateless/00353_join_by_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00354_host_command_line_option.reference b/tests/queries/0_stateless/00354_host_command_line_option.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00354_host_command_line_option.reference rename to tests/queries/0_stateless/00354_host_command_line_option.reference diff --git a/dbms/tests/queries/0_stateless/00354_host_command_line_option.sh b/tests/queries/0_stateless/00354_host_command_line_option.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00354_host_command_line_option.sh rename to tests/queries/0_stateless/00354_host_command_line_option.sh diff --git a/dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference b/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference rename to tests/queries/0_stateless/00355_array_of_non_const_convertible_types.reference diff --git a/dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql b/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql rename to tests/queries/0_stateless/00355_array_of_non_const_convertible_types.sql diff --git a/dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference b/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference rename to tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql b/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql rename to tests/queries/0_stateless/00356_analyze_aggregations_and_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00357_to_string_complex_types.reference b/tests/queries/0_stateless/00357_to_string_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00357_to_string_complex_types.reference rename to tests/queries/0_stateless/00357_to_string_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00357_to_string_complex_types.sql b/tests/queries/0_stateless/00357_to_string_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00357_to_string_complex_types.sql rename to tests/queries/0_stateless/00357_to_string_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00358_from_string_complex_types.reference b/tests/queries/0_stateless/00358_from_string_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00358_from_string_complex_types.reference rename to tests/queries/0_stateless/00358_from_string_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00358_from_string_complex_types.sql b/tests/queries/0_stateless/00358_from_string_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00358_from_string_complex_types.sql rename to tests/queries/0_stateless/00358_from_string_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.reference b/tests/queries/0_stateless/00359_convert_or_zero_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.reference rename to tests/queries/0_stateless/00359_convert_or_zero_functions.reference diff --git a/dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.sql b/tests/queries/0_stateless/00359_convert_or_zero_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00359_convert_or_zero_functions.sql rename to tests/queries/0_stateless/00359_convert_or_zero_functions.sql diff --git a/dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference b/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference rename to tests/queries/0_stateless/00360_to_date_from_string_with_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql b/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql rename to tests/queries/0_stateless/00360_to_date_from_string_with_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference b/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference rename to tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.reference diff --git a/dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql b/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql rename to tests/queries/0_stateless/00361_shared_array_offsets_and_squash_blocks.sql diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.reference b/tests/queries/0_stateless/00362_great_circle_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00362_great_circle_distance.reference rename to tests/queries/0_stateless/00362_great_circle_distance.reference diff --git a/dbms/tests/queries/0_stateless/00362_great_circle_distance.sql b/tests/queries/0_stateless/00362_great_circle_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00362_great_circle_distance.sql rename to tests/queries/0_stateless/00362_great_circle_distance.sql diff --git a/dbms/tests/queries/0_stateless/00363_defaults.reference b/tests/queries/0_stateless/00363_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00363_defaults.reference rename to tests/queries/0_stateless/00363_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00363_defaults.sql b/tests/queries/0_stateless/00363_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00363_defaults.sql rename to tests/queries/0_stateless/00363_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00364_java_style_denormals.reference b/tests/queries/0_stateless/00364_java_style_denormals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00364_java_style_denormals.reference rename to tests/queries/0_stateless/00364_java_style_denormals.reference diff --git a/dbms/tests/queries/0_stateless/00364_java_style_denormals.sql b/tests/queries/0_stateless/00364_java_style_denormals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00364_java_style_denormals.sql rename to tests/queries/0_stateless/00364_java_style_denormals.sql diff --git a/dbms/tests/queries/0_stateless/00365_statistics_in_formats.reference b/tests/queries/0_stateless/00365_statistics_in_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00365_statistics_in_formats.reference rename to tests/queries/0_stateless/00365_statistics_in_formats.reference diff --git a/dbms/tests/queries/0_stateless/00365_statistics_in_formats.sh b/tests/queries/0_stateless/00365_statistics_in_formats.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00365_statistics_in_formats.sh rename to tests/queries/0_stateless/00365_statistics_in_formats.sh diff --git a/dbms/tests/queries/0_stateless/00366_multi_statements.reference b/tests/queries/0_stateless/00366_multi_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00366_multi_statements.reference rename to tests/queries/0_stateless/00366_multi_statements.reference diff --git a/dbms/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00366_multi_statements.sh rename to tests/queries/0_stateless/00366_multi_statements.sh diff --git a/dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference b/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference rename to tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.reference diff --git a/dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql b/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql rename to tests/queries/0_stateless/00367_visible_width_of_array_tuple_enum.sql diff --git a/dbms/tests/queries/0_stateless/00368_format_option_collision.reference b/tests/queries/0_stateless/00368_format_option_collision.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00368_format_option_collision.reference rename to tests/queries/0_stateless/00368_format_option_collision.reference diff --git a/dbms/tests/queries/0_stateless/00368_format_option_collision.sh b/tests/queries/0_stateless/00368_format_option_collision.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00368_format_option_collision.sh rename to tests/queries/0_stateless/00368_format_option_collision.sh diff --git a/dbms/tests/queries/0_stateless/00369_int_div_of_float.reference b/tests/queries/0_stateless/00369_int_div_of_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00369_int_div_of_float.reference rename to tests/queries/0_stateless/00369_int_div_of_float.reference diff --git a/dbms/tests/queries/0_stateless/00369_int_div_of_float.sql b/tests/queries/0_stateless/00369_int_div_of_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00369_int_div_of_float.sql rename to tests/queries/0_stateless/00369_int_div_of_float.sql diff --git a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference rename to tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql rename to tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00371_union_all.reference b/tests/queries/0_stateless/00371_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00371_union_all.reference rename to tests/queries/0_stateless/00371_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00371_union_all.sql b/tests/queries/0_stateless/00371_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00371_union_all.sql rename to tests/queries/0_stateless/00371_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00372_cors_header.reference b/tests/queries/0_stateless/00372_cors_header.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00372_cors_header.reference rename to tests/queries/0_stateless/00372_cors_header.reference diff --git a/dbms/tests/queries/0_stateless/00372_cors_header.sh b/tests/queries/0_stateless/00372_cors_header.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00372_cors_header.sh rename to tests/queries/0_stateless/00372_cors_header.sh diff --git a/dbms/tests/queries/0_stateless/00373_group_by_tuple.reference b/tests/queries/0_stateless/00373_group_by_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00373_group_by_tuple.reference rename to tests/queries/0_stateless/00373_group_by_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00373_group_by_tuple.sql b/tests/queries/0_stateless/00373_group_by_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00373_group_by_tuple.sql rename to tests/queries/0_stateless/00373_group_by_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00374_any_last_if_merge.reference b/tests/queries/0_stateless/00374_any_last_if_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00374_any_last_if_merge.reference rename to tests/queries/0_stateless/00374_any_last_if_merge.reference diff --git a/dbms/tests/queries/0_stateless/00374_any_last_if_merge.sql b/tests/queries/0_stateless/00374_any_last_if_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00374_any_last_if_merge.sql rename to tests/queries/0_stateless/00374_any_last_if_merge.sql diff --git a/dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference rename to tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.reference diff --git a/dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh rename to tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh diff --git a/dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference rename to tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.reference diff --git a/dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql rename to tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql diff --git a/dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference rename to tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.reference diff --git a/dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql rename to tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql diff --git a/dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference rename to tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.reference diff --git a/dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql rename to tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql diff --git a/dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference b/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.reference rename to tests/queries/0_stateless/00378_json_quote_64bit_integers.reference diff --git a/dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql b/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql rename to tests/queries/0_stateless/00378_json_quote_64bit_integers.sql diff --git a/dbms/tests/queries/0_stateless/00379_system_processes_port.reference b/tests/queries/0_stateless/00379_system_processes_port.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00379_system_processes_port.reference rename to tests/queries/0_stateless/00379_system_processes_port.reference diff --git a/dbms/tests/queries/0_stateless/00379_system_processes_port.sh b/tests/queries/0_stateless/00379_system_processes_port.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00379_system_processes_port.sh rename to tests/queries/0_stateless/00379_system_processes_port.sh diff --git a/dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference rename to tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.reference diff --git a/dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh rename to tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh diff --git a/dbms/tests/queries/0_stateless/00381_first_significant_subdomain.reference b/tests/queries/0_stateless/00381_first_significant_subdomain.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00381_first_significant_subdomain.reference rename to tests/queries/0_stateless/00381_first_significant_subdomain.reference diff --git a/dbms/tests/queries/0_stateless/00381_first_significant_subdomain.sql b/tests/queries/0_stateless/00381_first_significant_subdomain.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00381_first_significant_subdomain.sql rename to tests/queries/0_stateless/00381_first_significant_subdomain.sql diff --git a/dbms/tests/queries/0_stateless/00383_utf8_validation.reference b/tests/queries/0_stateless/00383_utf8_validation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00383_utf8_validation.reference rename to tests/queries/0_stateless/00383_utf8_validation.reference diff --git a/dbms/tests/queries/0_stateless/00383_utf8_validation.sql b/tests/queries/0_stateless/00383_utf8_validation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00383_utf8_validation.sql rename to tests/queries/0_stateless/00383_utf8_validation.sql diff --git a/dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference b/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference rename to tests/queries/0_stateless/00384_column_aggregate_function_insert_from.reference diff --git a/dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql b/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql rename to tests/queries/0_stateless/00384_column_aggregate_function_insert_from.sql diff --git a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference rename to tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.reference diff --git a/dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh rename to tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh diff --git a/dbms/tests/queries/0_stateless/00386_enum_in_pk.reference b/tests/queries/0_stateless/00386_enum_in_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_enum_in_pk.reference rename to tests/queries/0_stateless/00386_enum_in_pk.reference diff --git a/dbms/tests/queries/0_stateless/00386_enum_in_pk.sql b/tests/queries/0_stateless/00386_enum_in_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00386_enum_in_pk.sql rename to tests/queries/0_stateless/00386_enum_in_pk.sql diff --git a/dbms/tests/queries/0_stateless/00386_has_column_in_table.reference b/tests/queries/0_stateless/00386_has_column_in_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_has_column_in_table.reference rename to tests/queries/0_stateless/00386_has_column_in_table.reference diff --git a/dbms/tests/queries/0_stateless/00386_has_column_in_table.sql b/tests/queries/0_stateless/00386_has_column_in_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00386_has_column_in_table.sql rename to tests/queries/0_stateless/00386_has_column_in_table.sql diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.python b/tests/queries/0_stateless/00386_long_in_pk.python similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.python rename to tests/queries/0_stateless/00386_long_in_pk.python diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.reference b/tests/queries/0_stateless/00386_long_in_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.reference rename to tests/queries/0_stateless/00386_long_in_pk.reference diff --git a/dbms/tests/queries/0_stateless/00386_long_in_pk.sh b/tests/queries/0_stateless/00386_long_in_pk.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00386_long_in_pk.sh rename to tests/queries/0_stateless/00386_long_in_pk.sh diff --git a/dbms/tests/queries/0_stateless/00387_use_client_time_zone.reference b/tests/queries/0_stateless/00387_use_client_time_zone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00387_use_client_time_zone.reference rename to tests/queries/0_stateless/00387_use_client_time_zone.reference diff --git a/dbms/tests/queries/0_stateless/00387_use_client_time_zone.sh b/tests/queries/0_stateless/00387_use_client_time_zone.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00387_use_client_time_zone.sh rename to tests/queries/0_stateless/00387_use_client_time_zone.sh diff --git a/dbms/tests/queries/0_stateless/00388_enum_with_totals.reference b/tests/queries/0_stateless/00388_enum_with_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00388_enum_with_totals.reference rename to tests/queries/0_stateless/00388_enum_with_totals.reference diff --git a/dbms/tests/queries/0_stateless/00388_enum_with_totals.sql b/tests/queries/0_stateless/00388_enum_with_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00388_enum_with_totals.sql rename to tests/queries/0_stateless/00388_enum_with_totals.sql diff --git a/dbms/tests/queries/0_stateless/00389_concat_operator.reference b/tests/queries/0_stateless/00389_concat_operator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00389_concat_operator.reference rename to tests/queries/0_stateless/00389_concat_operator.reference diff --git a/dbms/tests/queries/0_stateless/00389_concat_operator.sql b/tests/queries/0_stateless/00389_concat_operator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00389_concat_operator.sql rename to tests/queries/0_stateless/00389_concat_operator.sql diff --git a/dbms/tests/queries/0_stateless/00390_array_sort.reference b/tests/queries/0_stateless/00390_array_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00390_array_sort.reference rename to tests/queries/0_stateless/00390_array_sort.reference diff --git a/dbms/tests/queries/0_stateless/00390_array_sort.sql b/tests/queries/0_stateless/00390_array_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00390_array_sort.sql rename to tests/queries/0_stateless/00390_array_sort.sql diff --git a/dbms/tests/queries/0_stateless/00392_enum_nested_alter.reference b/tests/queries/0_stateless/00392_enum_nested_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00392_enum_nested_alter.reference rename to tests/queries/0_stateless/00392_enum_nested_alter.reference diff --git a/dbms/tests/queries/0_stateless/00392_enum_nested_alter.sql b/tests/queries/0_stateless/00392_enum_nested_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00392_enum_nested_alter.sql rename to tests/queries/0_stateless/00392_enum_nested_alter.sql diff --git a/dbms/tests/queries/0_stateless/00393_if_with_constant_condition.reference b/tests/queries/0_stateless/00393_if_with_constant_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00393_if_with_constant_condition.reference rename to tests/queries/0_stateless/00393_if_with_constant_condition.reference diff --git a/dbms/tests/queries/0_stateless/00393_if_with_constant_condition.sql b/tests/queries/0_stateless/00393_if_with_constant_condition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00393_if_with_constant_condition.sql rename to tests/queries/0_stateless/00393_if_with_constant_condition.sql diff --git a/dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference b/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference rename to tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.reference diff --git a/dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql b/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql rename to tests/queries/0_stateless/00394_new_nested_column_keeps_offsets.sql diff --git a/dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference b/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.reference rename to tests/queries/0_stateless/00394_replaceall_vector_fixed.reference diff --git a/dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql b/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00394_replaceall_vector_fixed.sql rename to tests/queries/0_stateless/00394_replaceall_vector_fixed.sql diff --git a/dbms/tests/queries/0_stateless/00395_nullable.reference b/tests/queries/0_stateless/00395_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00395_nullable.reference rename to tests/queries/0_stateless/00395_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00395_nullable.sql b/tests/queries/0_stateless/00395_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00395_nullable.sql rename to tests/queries/0_stateless/00395_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00396_uuid.reference b/tests/queries/0_stateless/00396_uuid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00396_uuid.reference rename to tests/queries/0_stateless/00396_uuid.reference diff --git a/dbms/tests/queries/0_stateless/00396_uuid.sql b/tests/queries/0_stateless/00396_uuid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00396_uuid.sql rename to tests/queries/0_stateless/00396_uuid.sql diff --git a/dbms/tests/queries/0_stateless/00397_tsv_format_synonym.reference b/tests/queries/0_stateless/00397_tsv_format_synonym.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00397_tsv_format_synonym.reference rename to tests/queries/0_stateless/00397_tsv_format_synonym.reference diff --git a/dbms/tests/queries/0_stateless/00397_tsv_format_synonym.sql b/tests/queries/0_stateless/00397_tsv_format_synonym.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00397_tsv_format_synonym.sql rename to tests/queries/0_stateless/00397_tsv_format_synonym.sql diff --git a/dbms/tests/queries/0_stateless/00398_url_functions.reference b/tests/queries/0_stateless/00398_url_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00398_url_functions.reference rename to tests/queries/0_stateless/00398_url_functions.reference diff --git a/dbms/tests/queries/0_stateless/00398_url_functions.sql b/tests/queries/0_stateless/00398_url_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00398_url_functions.sql rename to tests/queries/0_stateless/00398_url_functions.sql diff --git a/dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference b/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference rename to tests/queries/0_stateless/00399_group_uniq_array_date_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql b/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql rename to tests/queries/0_stateless/00399_group_uniq_array_date_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00400_client_external_options.reference b/tests/queries/0_stateless/00400_client_external_options.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00400_client_external_options.reference rename to tests/queries/0_stateless/00400_client_external_options.reference diff --git a/dbms/tests/queries/0_stateless/00400_client_external_options.sh b/tests/queries/0_stateless/00400_client_external_options.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00400_client_external_options.sh rename to tests/queries/0_stateless/00400_client_external_options.sh diff --git a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.reference b/tests/queries/0_stateless/00401_merge_and_stripelog.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00401_merge_and_stripelog.reference rename to tests/queries/0_stateless/00401_merge_and_stripelog.reference diff --git a/dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql b/tests/queries/0_stateless/00401_merge_and_stripelog.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00401_merge_and_stripelog.sql rename to tests/queries/0_stateless/00401_merge_and_stripelog.sql diff --git a/dbms/tests/queries/0_stateless/00402_nan_and_extremes.reference b/tests/queries/0_stateless/00402_nan_and_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00402_nan_and_extremes.reference rename to tests/queries/0_stateless/00402_nan_and_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00402_nan_and_extremes.sql b/tests/queries/0_stateless/00402_nan_and_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00402_nan_and_extremes.sql rename to tests/queries/0_stateless/00402_nan_and_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00403_to_start_of_day.reference b/tests/queries/0_stateless/00403_to_start_of_day.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00403_to_start_of_day.reference rename to tests/queries/0_stateless/00403_to_start_of_day.reference diff --git a/dbms/tests/queries/0_stateless/00403_to_start_of_day.sql b/tests/queries/0_stateless/00403_to_start_of_day.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00403_to_start_of_day.sql rename to tests/queries/0_stateless/00403_to_start_of_day.sql diff --git a/dbms/tests/queries/0_stateless/00404_null_literal.reference b/tests/queries/0_stateless/00404_null_literal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00404_null_literal.reference rename to tests/queries/0_stateless/00404_null_literal.reference diff --git a/dbms/tests/queries/0_stateless/00404_null_literal.sql b/tests/queries/0_stateless/00404_null_literal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00404_null_literal.sql rename to tests/queries/0_stateless/00404_null_literal.sql diff --git a/dbms/tests/queries/0_stateless/00405_pretty_formats.reference b/tests/queries/0_stateless/00405_pretty_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00405_pretty_formats.reference rename to tests/queries/0_stateless/00405_pretty_formats.reference diff --git a/dbms/tests/queries/0_stateless/00405_pretty_formats.sql b/tests/queries/0_stateless/00405_pretty_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00405_pretty_formats.sql rename to tests/queries/0_stateless/00405_pretty_formats.sql diff --git a/dbms/tests/queries/0_stateless/00406_tuples_with_nulls.reference b/tests/queries/0_stateless/00406_tuples_with_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00406_tuples_with_nulls.reference rename to tests/queries/0_stateless/00406_tuples_with_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00406_tuples_with_nulls.sql b/tests/queries/0_stateless/00406_tuples_with_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00406_tuples_with_nulls.sql rename to tests/queries/0_stateless/00406_tuples_with_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00407_parsing_nulls.reference b/tests/queries/0_stateless/00407_parsing_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00407_parsing_nulls.reference rename to tests/queries/0_stateless/00407_parsing_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00407_parsing_nulls.sh b/tests/queries/0_stateless/00407_parsing_nulls.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00407_parsing_nulls.sh rename to tests/queries/0_stateless/00407_parsing_nulls.sh diff --git a/dbms/tests/queries/0_stateless/00408_http_keep_alive.reference b/tests/queries/0_stateless/00408_http_keep_alive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00408_http_keep_alive.reference rename to tests/queries/0_stateless/00408_http_keep_alive.reference diff --git a/dbms/tests/queries/0_stateless/00408_http_keep_alive.sh b/tests/queries/0_stateless/00408_http_keep_alive.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00408_http_keep_alive.sh rename to tests/queries/0_stateless/00408_http_keep_alive.sh diff --git a/dbms/tests/queries/0_stateless/00409_shard_limit_by.reference b/tests/queries/0_stateless/00409_shard_limit_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00409_shard_limit_by.reference rename to tests/queries/0_stateless/00409_shard_limit_by.reference diff --git a/dbms/tests/queries/0_stateless/00409_shard_limit_by.sql b/tests/queries/0_stateless/00409_shard_limit_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00409_shard_limit_by.sql rename to tests/queries/0_stateless/00409_shard_limit_by.sql diff --git a/dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference rename to tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.reference diff --git a/dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql rename to tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison.python b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison.python rename to tests/queries/0_stateless/00411_long_accurate_number_comparison.python diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_float.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.reference diff --git a/dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh rename to tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh diff --git a/dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference b/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference rename to tests/queries/0_stateless/00411_merge_tree_where_const_in_set.reference diff --git a/dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql b/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql rename to tests/queries/0_stateless/00411_merge_tree_where_const_in_set.sql diff --git a/dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference b/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.reference rename to tests/queries/0_stateless/00412_logical_expressions_optimizer.reference diff --git a/dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql b/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00412_logical_expressions_optimizer.sql rename to tests/queries/0_stateless/00412_logical_expressions_optimizer.sql diff --git a/dbms/tests/queries/0_stateless/00413_distinct.reference b/tests/queries/0_stateless/00413_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00413_distinct.reference rename to tests/queries/0_stateless/00413_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00413_distinct.sql b/tests/queries/0_stateless/00413_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00413_distinct.sql rename to tests/queries/0_stateless/00413_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference b/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.reference rename to tests/queries/0_stateless/00413_least_greatest_new_behavior.reference diff --git a/dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql b/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00413_least_greatest_new_behavior.sql rename to tests/queries/0_stateless/00413_least_greatest_new_behavior.sql diff --git a/dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference b/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.reference rename to tests/queries/0_stateless/00414_time_zones_direct_conversion.reference diff --git a/dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql b/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00414_time_zones_direct_conversion.sql rename to tests/queries/0_stateless/00414_time_zones_direct_conversion.sql diff --git a/dbms/tests/queries/0_stateless/00415_into_outfile.reference b/tests/queries/0_stateless/00415_into_outfile.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00415_into_outfile.reference rename to tests/queries/0_stateless/00415_into_outfile.reference diff --git a/dbms/tests/queries/0_stateless/00415_into_outfile.sh b/tests/queries/0_stateless/00415_into_outfile.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00415_into_outfile.sh rename to tests/queries/0_stateless/00415_into_outfile.sh diff --git a/dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference rename to tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.reference diff --git a/dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh rename to tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh diff --git a/dbms/tests/queries/0_stateless/00417_kill_query.reference b/tests/queries/0_stateless/00417_kill_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00417_kill_query.reference rename to tests/queries/0_stateless/00417_kill_query.reference diff --git a/dbms/tests/queries/0_stateless/00417_kill_query.sh b/tests/queries/0_stateless/00417_kill_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00417_kill_query.sh rename to tests/queries/0_stateless/00417_kill_query.sh diff --git a/dbms/tests/queries/0_stateless/00417_system_build_options.reference b/tests/queries/0_stateless/00417_system_build_options.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00417_system_build_options.reference rename to tests/queries/0_stateless/00417_system_build_options.reference diff --git a/dbms/tests/queries/0_stateless/00417_system_build_options.sh b/tests/queries/0_stateless/00417_system_build_options.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00417_system_build_options.sh rename to tests/queries/0_stateless/00417_system_build_options.sh diff --git a/dbms/tests/queries/0_stateless/00418_input_format_allow_errors.reference b/tests/queries/0_stateless/00418_input_format_allow_errors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00418_input_format_allow_errors.reference rename to tests/queries/0_stateless/00418_input_format_allow_errors.reference diff --git a/dbms/tests/queries/0_stateless/00418_input_format_allow_errors.sh b/tests/queries/0_stateless/00418_input_format_allow_errors.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00418_input_format_allow_errors.sh rename to tests/queries/0_stateless/00418_input_format_allow_errors.sh diff --git a/dbms/tests/queries/0_stateless/00419_show_sql_queries.reference b/tests/queries/0_stateless/00419_show_sql_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00419_show_sql_queries.reference rename to tests/queries/0_stateless/00419_show_sql_queries.reference diff --git a/dbms/tests/queries/0_stateless/00419_show_sql_queries.sh b/tests/queries/0_stateless/00419_show_sql_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00419_show_sql_queries.sh rename to tests/queries/0_stateless/00419_show_sql_queries.sh diff --git a/dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference b/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference rename to tests/queries/0_stateless/00420_null_in_scalar_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql b/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql rename to tests/queries/0_stateless/00420_null_in_scalar_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00421_storage_merge__table_index.reference b/tests/queries/0_stateless/00421_storage_merge__table_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00421_storage_merge__table_index.reference rename to tests/queries/0_stateless/00421_storage_merge__table_index.reference diff --git a/dbms/tests/queries/0_stateless/00421_storage_merge__table_index.sh b/tests/queries/0_stateless/00421_storage_merge__table_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00421_storage_merge__table_index.sh rename to tests/queries/0_stateless/00421_storage_merge__table_index.sh diff --git a/dbms/tests/queries/0_stateless/00422_hash_function_constexpr.reference b/tests/queries/0_stateless/00422_hash_function_constexpr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00422_hash_function_constexpr.reference rename to tests/queries/0_stateless/00422_hash_function_constexpr.reference diff --git a/dbms/tests/queries/0_stateless/00422_hash_function_constexpr.sql b/tests/queries/0_stateless/00422_hash_function_constexpr.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00422_hash_function_constexpr.sql rename to tests/queries/0_stateless/00422_hash_function_constexpr.sql diff --git a/dbms/tests/queries/0_stateless/00423_storage_log_single_thread.reference b/tests/queries/0_stateless/00423_storage_log_single_thread.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00423_storage_log_single_thread.reference rename to tests/queries/0_stateless/00423_storage_log_single_thread.reference diff --git a/dbms/tests/queries/0_stateless/00423_storage_log_single_thread.sql b/tests/queries/0_stateless/00423_storage_log_single_thread.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00423_storage_log_single_thread.sql rename to tests/queries/0_stateless/00423_storage_log_single_thread.sql diff --git a/dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference b/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference rename to tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql b/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql rename to tests/queries/0_stateless/00424_shard_aggregate_functions_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00425_count_nullable.reference b/tests/queries/0_stateless/00425_count_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00425_count_nullable.reference rename to tests/queries/0_stateless/00425_count_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00425_count_nullable.sql b/tests/queries/0_stateless/00425_count_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00425_count_nullable.sql rename to tests/queries/0_stateless/00425_count_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00426_nulls_sorting.reference b/tests/queries/0_stateless/00426_nulls_sorting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00426_nulls_sorting.reference rename to tests/queries/0_stateless/00426_nulls_sorting.reference diff --git a/dbms/tests/queries/0_stateless/00426_nulls_sorting.sql b/tests/queries/0_stateless/00426_nulls_sorting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00426_nulls_sorting.sql rename to tests/queries/0_stateless/00426_nulls_sorting.sql diff --git a/dbms/tests/queries/0_stateless/00427_alter_primary_key.reference b/tests/queries/0_stateless/00427_alter_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00427_alter_primary_key.reference rename to tests/queries/0_stateless/00427_alter_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00427_alter_primary_key.sh b/tests/queries/0_stateless/00427_alter_primary_key.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00427_alter_primary_key.sh rename to tests/queries/0_stateless/00427_alter_primary_key.sh diff --git a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.reference b/tests/queries/0_stateless/00429_long_http_bufferization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00429_long_http_bufferization.reference rename to tests/queries/0_stateless/00429_long_http_bufferization.reference diff --git a/dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh b/tests/queries/0_stateless/00429_long_http_bufferization.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00429_long_http_bufferization.sh rename to tests/queries/0_stateless/00429_long_http_bufferization.sh diff --git a/dbms/tests/queries/0_stateless/00429_point_in_ellipses.reference b/tests/queries/0_stateless/00429_point_in_ellipses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00429_point_in_ellipses.reference rename to tests/queries/0_stateless/00429_point_in_ellipses.reference diff --git a/dbms/tests/queries/0_stateless/00429_point_in_ellipses.sql b/tests/queries/0_stateless/00429_point_in_ellipses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00429_point_in_ellipses.sql rename to tests/queries/0_stateless/00429_point_in_ellipses.sql diff --git a/dbms/tests/queries/0_stateless/00430_https_server.reference b/tests/queries/0_stateless/00430_https_server.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00430_https_server.reference rename to tests/queries/0_stateless/00430_https_server.reference diff --git a/dbms/tests/queries/0_stateless/00430_https_server.sh b/tests/queries/0_stateless/00430_https_server.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00430_https_server.sh rename to tests/queries/0_stateless/00430_https_server.sh diff --git a/dbms/tests/queries/0_stateless/00431_if_nulls.reference b/tests/queries/0_stateless/00431_if_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00431_if_nulls.reference rename to tests/queries/0_stateless/00431_if_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00431_if_nulls.sql b/tests/queries/0_stateless/00431_if_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00431_if_nulls.sql rename to tests/queries/0_stateless/00431_if_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference b/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference rename to tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql b/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql rename to tests/queries/0_stateless/00432_aggregate_function_scalars_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.reference b/tests/queries/0_stateless/00433_ifnull.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00433_ifnull.reference rename to tests/queries/0_stateless/00433_ifnull.reference diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.sql b/tests/queries/0_stateless/00433_ifnull.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00433_ifnull.sql rename to tests/queries/0_stateless/00433_ifnull.sql diff --git a/dbms/tests/queries/0_stateless/00434_tonullable.reference b/tests/queries/0_stateless/00434_tonullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00434_tonullable.reference rename to tests/queries/0_stateless/00434_tonullable.reference diff --git a/dbms/tests/queries/0_stateless/00434_tonullable.sql b/tests/queries/0_stateless/00434_tonullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00434_tonullable.sql rename to tests/queries/0_stateless/00434_tonullable.sql diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.reference b/tests/queries/0_stateless/00435_coalesce.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00435_coalesce.reference rename to tests/queries/0_stateless/00435_coalesce.reference diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.sql b/tests/queries/0_stateless/00435_coalesce.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00435_coalesce.sql rename to tests/queries/0_stateless/00435_coalesce.sql diff --git a/dbms/tests/queries/0_stateless/00436_convert_charset.reference b/tests/queries/0_stateless/00436_convert_charset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00436_convert_charset.reference rename to tests/queries/0_stateless/00436_convert_charset.reference diff --git a/dbms/tests/queries/0_stateless/00436_convert_charset.sql b/tests/queries/0_stateless/00436_convert_charset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00436_convert_charset.sql rename to tests/queries/0_stateless/00436_convert_charset.sql diff --git a/dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference b/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference rename to tests/queries/0_stateless/00436_fixed_string_16_comparisons.reference diff --git a/dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql b/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql rename to tests/queries/0_stateless/00436_fixed_string_16_comparisons.sql diff --git a/dbms/tests/queries/0_stateless/00437_nulls_first_last.reference b/tests/queries/0_stateless/00437_nulls_first_last.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00437_nulls_first_last.reference rename to tests/queries/0_stateless/00437_nulls_first_last.reference diff --git a/dbms/tests/queries/0_stateless/00437_nulls_first_last.sql b/tests/queries/0_stateless/00437_nulls_first_last.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00437_nulls_first_last.sql rename to tests/queries/0_stateless/00437_nulls_first_last.sql diff --git a/dbms/tests/queries/0_stateless/00438_bit_rotate.reference b/tests/queries/0_stateless/00438_bit_rotate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00438_bit_rotate.reference rename to tests/queries/0_stateless/00438_bit_rotate.reference diff --git a/dbms/tests/queries/0_stateless/00438_bit_rotate.sql b/tests/queries/0_stateless/00438_bit_rotate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00438_bit_rotate.sql rename to tests/queries/0_stateless/00438_bit_rotate.sql diff --git a/dbms/tests/queries/0_stateless/00439_fixed_string_filter.reference b/tests/queries/0_stateless/00439_fixed_string_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00439_fixed_string_filter.reference rename to tests/queries/0_stateless/00439_fixed_string_filter.reference diff --git a/dbms/tests/queries/0_stateless/00439_fixed_string_filter.sql b/tests/queries/0_stateless/00439_fixed_string_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00439_fixed_string_filter.sql rename to tests/queries/0_stateless/00439_fixed_string_filter.sql diff --git a/dbms/tests/queries/0_stateless/00440_nulls_merge_tree.reference b/tests/queries/0_stateless/00440_nulls_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00440_nulls_merge_tree.reference rename to tests/queries/0_stateless/00440_nulls_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00440_nulls_merge_tree.sql b/tests/queries/0_stateless/00440_nulls_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00440_nulls_merge_tree.sql rename to tests/queries/0_stateless/00440_nulls_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00441_nulls_in.reference b/tests/queries/0_stateless/00441_nulls_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00441_nulls_in.reference rename to tests/queries/0_stateless/00441_nulls_in.reference diff --git a/dbms/tests/queries/0_stateless/00441_nulls_in.sql b/tests/queries/0_stateless/00441_nulls_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00441_nulls_in.sql rename to tests/queries/0_stateless/00441_nulls_in.sql diff --git a/dbms/tests/queries/0_stateless/00442_filter_by_nullable.reference b/tests/queries/0_stateless/00442_filter_by_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00442_filter_by_nullable.reference rename to tests/queries/0_stateless/00442_filter_by_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00442_filter_by_nullable.sql b/tests/queries/0_stateless/00442_filter_by_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00442_filter_by_nullable.sql rename to tests/queries/0_stateless/00442_filter_by_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference b/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference rename to tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.reference diff --git a/dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh b/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh rename to tests/queries/0_stateless/00443_merge_tree_uniform_read_distribution_0.sh diff --git a/dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference rename to tests/queries/0_stateless/00443_optimize_final_vertical_merge.reference diff --git a/dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh rename to tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh diff --git a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference b/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.reference rename to tests/queries/0_stateless/00443_preferred_block_size_bytes.reference diff --git a/dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh rename to tests/queries/0_stateless/00443_preferred_block_size_bytes.sh diff --git a/dbms/tests/queries/0_stateless/00444_join_use_nulls.reference b/tests/queries/0_stateless/00444_join_use_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00444_join_use_nulls.reference rename to tests/queries/0_stateless/00444_join_use_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql b/tests/queries/0_stateless/00444_join_use_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00444_join_use_nulls.sql rename to tests/queries/0_stateless/00444_join_use_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.reference b/tests/queries/0_stateless/00445_join_nullable_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00445_join_nullable_keys.reference rename to tests/queries/0_stateless/00445_join_nullable_keys.reference diff --git a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql b/tests/queries/0_stateless/00445_join_nullable_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql rename to tests/queries/0_stateless/00445_join_nullable_keys.sql diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference rename to tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh rename to tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00447_foreach_modifier.reference b/tests/queries/0_stateless/00447_foreach_modifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00447_foreach_modifier.reference rename to tests/queries/0_stateless/00447_foreach_modifier.reference diff --git a/dbms/tests/queries/0_stateless/00447_foreach_modifier.sql b/tests/queries/0_stateless/00447_foreach_modifier.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00447_foreach_modifier.sql rename to tests/queries/0_stateless/00447_foreach_modifier.sql diff --git a/dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference b/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference rename to tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.reference diff --git a/dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql b/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql rename to tests/queries/0_stateless/00448_replicate_nullable_tuple_generic.sql diff --git a/dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference b/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.reference rename to tests/queries/0_stateless/00448_to_string_cut_to_zero.reference diff --git a/dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql b/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00448_to_string_cut_to_zero.sql rename to tests/queries/0_stateless/00448_to_string_cut_to_zero.sql diff --git a/dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference b/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference rename to tests/queries/0_stateless/00449_filter_array_nullable_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql b/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql rename to tests/queries/0_stateless/00449_filter_array_nullable_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.reference b/tests/queries/0_stateless/00450_higher_order_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.reference rename to tests/queries/0_stateless/00450_higher_order_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.sql b/tests/queries/0_stateless/00450_higher_order_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00450_higher_order_and_nullable.sql rename to tests/queries/0_stateless/00450_higher_order_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.reference b/tests/queries/0_stateless/00451_left_array_join_and_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.reference rename to tests/queries/0_stateless/00451_left_array_join_and_constants.reference diff --git a/dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.sql b/tests/queries/0_stateless/00451_left_array_join_and_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00451_left_array_join_and_constants.sql rename to tests/queries/0_stateless/00451_left_array_join_and_constants.sql diff --git a/dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference b/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.reference rename to tests/queries/0_stateless/00452_left_array_join_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql b/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00452_left_array_join_and_nullable.sql rename to tests/queries/0_stateless/00452_left_array_join_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00453_cast_enum.reference b/tests/queries/0_stateless/00453_cast_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00453_cast_enum.reference rename to tests/queries/0_stateless/00453_cast_enum.reference diff --git a/dbms/tests/queries/0_stateless/00453_cast_enum.sql b/tests/queries/0_stateless/00453_cast_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00453_cast_enum.sql rename to tests/queries/0_stateless/00453_cast_enum.sql diff --git a/dbms/tests/queries/0_stateless/00453_top_k.reference b/tests/queries/0_stateless/00453_top_k.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00453_top_k.reference rename to tests/queries/0_stateless/00453_top_k.reference diff --git a/dbms/tests/queries/0_stateless/00453_top_k.sql b/tests/queries/0_stateless/00453_top_k.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00453_top_k.sql rename to tests/queries/0_stateless/00453_top_k.sql diff --git a/dbms/tests/queries/0_stateless/00456_alter_nullable.reference b/tests/queries/0_stateless/00456_alter_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00456_alter_nullable.reference rename to tests/queries/0_stateless/00456_alter_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00456_alter_nullable.sql b/tests/queries/0_stateless/00456_alter_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00456_alter_nullable.sql rename to tests/queries/0_stateless/00456_alter_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference b/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference rename to tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql b/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql rename to tests/queries/0_stateless/00457_log_tinylog_stripelog_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00458_merge_type_cast.reference b/tests/queries/0_stateless/00458_merge_type_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00458_merge_type_cast.reference rename to tests/queries/0_stateless/00458_merge_type_cast.reference diff --git a/dbms/tests/queries/0_stateless/00458_merge_type_cast.sql b/tests/queries/0_stateless/00458_merge_type_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00458_merge_type_cast.sql rename to tests/queries/0_stateless/00458_merge_type_cast.sql diff --git a/dbms/tests/queries/0_stateless/00459_group_array_insert_at.reference b/tests/queries/0_stateless/00459_group_array_insert_at.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00459_group_array_insert_at.reference rename to tests/queries/0_stateless/00459_group_array_insert_at.reference diff --git a/dbms/tests/queries/0_stateless/00459_group_array_insert_at.sql b/tests/queries/0_stateless/00459_group_array_insert_at.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00459_group_array_insert_at.sql rename to tests/queries/0_stateless/00459_group_array_insert_at.sql diff --git a/dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference b/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference rename to tests/queries/0_stateless/00460_vertical_and_totals_extremes.reference diff --git a/dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql b/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql rename to tests/queries/0_stateless/00460_vertical_and_totals_extremes.sql diff --git a/dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.reference b/tests/queries/0_stateless/00461_default_value_of_argument_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.reference rename to tests/queries/0_stateless/00461_default_value_of_argument_type.reference diff --git a/dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.sql b/tests/queries/0_stateless/00461_default_value_of_argument_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00461_default_value_of_argument_type.sql rename to tests/queries/0_stateless/00461_default_value_of_argument_type.sql diff --git a/dbms/tests/queries/0_stateless/00462_json_true_false_literals.reference b/tests/queries/0_stateless/00462_json_true_false_literals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00462_json_true_false_literals.reference rename to tests/queries/0_stateless/00462_json_true_false_literals.reference diff --git a/dbms/tests/queries/0_stateless/00462_json_true_false_literals.sql b/tests/queries/0_stateless/00462_json_true_false_literals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00462_json_true_false_literals.sql rename to tests/queries/0_stateless/00462_json_true_false_literals.sql diff --git a/dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference rename to tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference diff --git a/dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh rename to tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh diff --git a/dbms/tests/queries/0_stateless/00464_array_element_out_of_range.reference b/tests/queries/0_stateless/00464_array_element_out_of_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00464_array_element_out_of_range.reference rename to tests/queries/0_stateless/00464_array_element_out_of_range.reference diff --git a/dbms/tests/queries/0_stateless/00464_array_element_out_of_range.sql b/tests/queries/0_stateless/00464_array_element_out_of_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00464_array_element_out_of_range.sql rename to tests/queries/0_stateless/00464_array_element_out_of_range.sql diff --git a/dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.reference b/tests/queries/0_stateless/00464_sort_all_constant_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.reference rename to tests/queries/0_stateless/00464_sort_all_constant_columns.reference diff --git a/dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.sql b/tests/queries/0_stateless/00464_sort_all_constant_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00464_sort_all_constant_columns.sql rename to tests/queries/0_stateless/00464_sort_all_constant_columns.sql diff --git a/dbms/tests/queries/0_stateless/00465_nullable_default.reference b/tests/queries/0_stateless/00465_nullable_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00465_nullable_default.reference rename to tests/queries/0_stateless/00465_nullable_default.reference diff --git a/dbms/tests/queries/0_stateless/00465_nullable_default.sql b/tests/queries/0_stateless/00465_nullable_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00465_nullable_default.sql rename to tests/queries/0_stateless/00465_nullable_default.sql diff --git a/dbms/tests/queries/0_stateless/00466_comments_in_keyword.reference b/tests/queries/0_stateless/00466_comments_in_keyword.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00466_comments_in_keyword.reference rename to tests/queries/0_stateless/00466_comments_in_keyword.reference diff --git a/dbms/tests/queries/0_stateless/00466_comments_in_keyword.sql b/tests/queries/0_stateless/00466_comments_in_keyword.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00466_comments_in_keyword.sql rename to tests/queries/0_stateless/00466_comments_in_keyword.sql diff --git a/dbms/tests/queries/0_stateless/00467_qualified_names.reference b/tests/queries/0_stateless/00467_qualified_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00467_qualified_names.reference rename to tests/queries/0_stateless/00467_qualified_names.reference diff --git a/dbms/tests/queries/0_stateless/00467_qualified_names.sql b/tests/queries/0_stateless/00467_qualified_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00467_qualified_names.sql rename to tests/queries/0_stateless/00467_qualified_names.sql diff --git a/dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference b/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference rename to tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.reference diff --git a/dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql b/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql rename to tests/queries/0_stateless/00468_array_join_multiple_arrays_and_use_original_column.sql diff --git a/dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference b/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference rename to tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.reference diff --git a/dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql b/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql rename to tests/queries/0_stateless/00469_comparison_of_strings_containing_null_char.sql diff --git a/dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference b/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference rename to tests/queries/0_stateless/00470_identifiers_in_double_quotes.reference diff --git a/dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql b/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql rename to tests/queries/0_stateless/00470_identifiers_in_double_quotes.sql diff --git a/dbms/tests/queries/0_stateless/00471_sql_style_quoting.reference b/tests/queries/0_stateless/00471_sql_style_quoting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00471_sql_style_quoting.reference rename to tests/queries/0_stateless/00471_sql_style_quoting.reference diff --git a/dbms/tests/queries/0_stateless/00471_sql_style_quoting.sql b/tests/queries/0_stateless/00471_sql_style_quoting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00471_sql_style_quoting.sql rename to tests/queries/0_stateless/00471_sql_style_quoting.sql diff --git a/dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference b/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference rename to tests/queries/0_stateless/00472_compare_uuid_with_constant_string.reference diff --git a/dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql b/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql rename to tests/queries/0_stateless/00472_compare_uuid_with_constant_string.sql diff --git a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.reference b/tests/queries/0_stateless/00472_create_view_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.reference rename to tests/queries/0_stateless/00472_create_view_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql b/tests/queries/0_stateless/00472_create_view_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00472_create_view_if_not_exists.sql rename to tests/queries/0_stateless/00472_create_view_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference rename to tests/queries/0_stateless/00473_output_format_json_quote_denormals.reference diff --git a/dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh rename to tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh diff --git a/dbms/tests/queries/0_stateless/00474_readonly_settings.reference b/tests/queries/0_stateless/00474_readonly_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00474_readonly_settings.reference rename to tests/queries/0_stateless/00474_readonly_settings.reference diff --git a/dbms/tests/queries/0_stateless/00474_readonly_settings.sh b/tests/queries/0_stateless/00474_readonly_settings.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00474_readonly_settings.sh rename to tests/queries/0_stateless/00474_readonly_settings.sh diff --git a/dbms/tests/queries/0_stateless/00475_in_join_db_table.reference b/tests/queries/0_stateless/00475_in_join_db_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00475_in_join_db_table.reference rename to tests/queries/0_stateless/00475_in_join_db_table.reference diff --git a/dbms/tests/queries/0_stateless/00475_in_join_db_table.sql b/tests/queries/0_stateless/00475_in_join_db_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00475_in_join_db_table.sql rename to tests/queries/0_stateless/00475_in_join_db_table.sql diff --git a/dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference b/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.reference rename to tests/queries/0_stateless/00476_pretty_formats_and_widths.reference diff --git a/dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql b/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00476_pretty_formats_and_widths.sql rename to tests/queries/0_stateless/00476_pretty_formats_and_widths.sql diff --git a/dbms/tests/queries/0_stateless/00477_parsing_data_types.reference b/tests/queries/0_stateless/00477_parsing_data_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00477_parsing_data_types.reference rename to tests/queries/0_stateless/00477_parsing_data_types.reference diff --git a/dbms/tests/queries/0_stateless/00477_parsing_data_types.sql b/tests/queries/0_stateless/00477_parsing_data_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00477_parsing_data_types.sql rename to tests/queries/0_stateless/00477_parsing_data_types.sql diff --git a/dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference b/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.reference rename to tests/queries/0_stateless/00479_date_and_datetime_to_number.reference diff --git a/dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql b/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00479_date_and_datetime_to_number.sql rename to tests/queries/0_stateless/00479_date_and_datetime_to_number.sql diff --git a/dbms/tests/queries/0_stateless/00480_mac_addresses.reference b/tests/queries/0_stateless/00480_mac_addresses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00480_mac_addresses.reference rename to tests/queries/0_stateless/00480_mac_addresses.reference diff --git a/dbms/tests/queries/0_stateless/00480_mac_addresses.sql b/tests/queries/0_stateless/00480_mac_addresses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00480_mac_addresses.sql rename to tests/queries/0_stateless/00480_mac_addresses.sql diff --git a/dbms/tests/queries/0_stateless/00481_create_view_for_null.reference b/tests/queries/0_stateless/00481_create_view_for_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00481_create_view_for_null.reference rename to tests/queries/0_stateless/00481_create_view_for_null.reference diff --git a/dbms/tests/queries/0_stateless/00481_create_view_for_null.sql b/tests/queries/0_stateless/00481_create_view_for_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00481_create_view_for_null.sql rename to tests/queries/0_stateless/00481_create_view_for_null.sql diff --git a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.reference b/tests/queries/0_stateless/00481_reading_from_last_granula.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00481_reading_from_last_granula.reference rename to tests/queries/0_stateless/00481_reading_from_last_granula.reference diff --git a/dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql b/tests/queries/0_stateless/00481_reading_from_last_granula.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00481_reading_from_last_granula.sql rename to tests/queries/0_stateless/00481_reading_from_last_granula.sql diff --git a/dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.reference b/tests/queries/0_stateless/00482_subqueries_and_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.reference rename to tests/queries/0_stateless/00482_subqueries_and_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.sql b/tests/queries/0_stateless/00482_subqueries_and_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00482_subqueries_and_aliases.sql rename to tests/queries/0_stateless/00482_subqueries_and_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00483_cast_syntax.reference b/tests/queries/0_stateless/00483_cast_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00483_cast_syntax.reference rename to tests/queries/0_stateless/00483_cast_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00483_cast_syntax.sql b/tests/queries/0_stateless/00483_cast_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00483_cast_syntax.sql rename to tests/queries/0_stateless/00483_cast_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00483_reading_from_array_structure.reference b/tests/queries/0_stateless/00483_reading_from_array_structure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00483_reading_from_array_structure.reference rename to tests/queries/0_stateless/00483_reading_from_array_structure.reference diff --git a/dbms/tests/queries/0_stateless/00483_reading_from_array_structure.sql b/tests/queries/0_stateless/00483_reading_from_array_structure.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00483_reading_from_array_structure.sql rename to tests/queries/0_stateless/00483_reading_from_array_structure.sql diff --git a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference rename to tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.reference diff --git a/dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql b/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql rename to tests/queries/0_stateless/00484_preferred_max_column_in_block_size_bytes.sql diff --git a/dbms/tests/queries/0_stateless/00485_http_insert_format.reference b/tests/queries/0_stateless/00485_http_insert_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00485_http_insert_format.reference rename to tests/queries/0_stateless/00485_http_insert_format.reference diff --git a/dbms/tests/queries/0_stateless/00485_http_insert_format.sh b/tests/queries/0_stateless/00485_http_insert_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00485_http_insert_format.sh rename to tests/queries/0_stateless/00485_http_insert_format.sh diff --git a/dbms/tests/queries/0_stateless/00486_if_fixed_string.reference b/tests/queries/0_stateless/00486_if_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00486_if_fixed_string.reference rename to tests/queries/0_stateless/00486_if_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00486_if_fixed_string.sql b/tests/queries/0_stateless/00486_if_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00486_if_fixed_string.sql rename to tests/queries/0_stateless/00486_if_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00487_if_array_fixed_string.reference b/tests/queries/0_stateless/00487_if_array_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00487_if_array_fixed_string.reference rename to tests/queries/0_stateless/00487_if_array_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00487_if_array_fixed_string.sql b/tests/queries/0_stateless/00487_if_array_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00487_if_array_fixed_string.sql rename to tests/queries/0_stateless/00487_if_array_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00488_non_ascii_column_names.reference b/tests/queries/0_stateless/00488_non_ascii_column_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00488_non_ascii_column_names.reference rename to tests/queries/0_stateless/00488_non_ascii_column_names.reference diff --git a/dbms/tests/queries/0_stateless/00488_non_ascii_column_names.sql b/tests/queries/0_stateless/00488_non_ascii_column_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00488_non_ascii_column_names.sql rename to tests/queries/0_stateless/00488_non_ascii_column_names.sql diff --git a/dbms/tests/queries/0_stateless/00489_pk_subexpression.reference b/tests/queries/0_stateless/00489_pk_subexpression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00489_pk_subexpression.reference rename to tests/queries/0_stateless/00489_pk_subexpression.reference diff --git a/dbms/tests/queries/0_stateless/00489_pk_subexpression.sql b/tests/queries/0_stateless/00489_pk_subexpression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00489_pk_subexpression.sql rename to tests/queries/0_stateless/00489_pk_subexpression.sql diff --git a/dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference b/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference rename to tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.reference diff --git a/dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql b/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql rename to tests/queries/0_stateless/00490_special_line_separators_and_characters_outside_of_bmp.sql diff --git a/dbms/tests/queries/0_stateless/00490_with_select.reference b/tests/queries/0_stateless/00490_with_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00490_with_select.reference rename to tests/queries/0_stateless/00490_with_select.reference diff --git a/dbms/tests/queries/0_stateless/00490_with_select.sql b/tests/queries/0_stateless/00490_with_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00490_with_select.sql rename to tests/queries/0_stateless/00490_with_select.sql diff --git a/dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference b/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference rename to tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.reference diff --git a/dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql b/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql rename to tests/queries/0_stateless/00491_shard_distributed_and_aliases_in_where_having.sql diff --git a/dbms/tests/queries/0_stateless/00492_drop_temporary_table.reference b/tests/queries/0_stateless/00492_drop_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00492_drop_temporary_table.reference rename to tests/queries/0_stateless/00492_drop_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00492_drop_temporary_table.sql b/tests/queries/0_stateless/00492_drop_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00492_drop_temporary_table.sql rename to tests/queries/0_stateless/00492_drop_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.reference b/tests/queries/0_stateless/00493_substring_of_fixedstring.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.reference rename to tests/queries/0_stateless/00493_substring_of_fixedstring.reference diff --git a/dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.sql b/tests/queries/0_stateless/00493_substring_of_fixedstring.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00493_substring_of_fixedstring.sql rename to tests/queries/0_stateless/00493_substring_of_fixedstring.sql diff --git a/dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference b/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference rename to tests/queries/0_stateless/00494_shard_alias_substitution_bug.reference diff --git a/dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql b/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql rename to tests/queries/0_stateless/00494_shard_alias_substitution_bug.sql diff --git a/dbms/tests/queries/0_stateless/00495_reading_const_zero_column.reference b/tests/queries/0_stateless/00495_reading_const_zero_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00495_reading_const_zero_column.reference rename to tests/queries/0_stateless/00495_reading_const_zero_column.reference diff --git a/dbms/tests/queries/0_stateless/00495_reading_const_zero_column.sql b/tests/queries/0_stateless/00495_reading_const_zero_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00495_reading_const_zero_column.sql rename to tests/queries/0_stateless/00495_reading_const_zero_column.sql diff --git a/dbms/tests/queries/0_stateless/00496_substring_negative_offset.reference b/tests/queries/0_stateless/00496_substring_negative_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00496_substring_negative_offset.reference rename to tests/queries/0_stateless/00496_substring_negative_offset.reference diff --git a/dbms/tests/queries/0_stateless/00496_substring_negative_offset.sql b/tests/queries/0_stateless/00496_substring_negative_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00496_substring_negative_offset.sql rename to tests/queries/0_stateless/00496_substring_negative_offset.sql diff --git a/dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.reference b/tests/queries/0_stateless/00497_whitespaces_in_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.reference rename to tests/queries/0_stateless/00497_whitespaces_in_insert.reference diff --git a/dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.sh b/tests/queries/0_stateless/00497_whitespaces_in_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00497_whitespaces_in_insert.sh rename to tests/queries/0_stateless/00497_whitespaces_in_insert.sh diff --git a/dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference b/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference rename to tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.reference diff --git a/dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql b/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql rename to tests/queries/0_stateless/00498_array_functions_concat_slice_push_pop.sql diff --git a/dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference b/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference rename to tests/queries/0_stateless/00498_bitwise_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql b/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql rename to tests/queries/0_stateless/00498_bitwise_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00499_json_enum_insert.reference b/tests/queries/0_stateless/00499_json_enum_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00499_json_enum_insert.reference rename to tests/queries/0_stateless/00499_json_enum_insert.reference diff --git a/dbms/tests/queries/0_stateless/00499_json_enum_insert.sql b/tests/queries/0_stateless/00499_json_enum_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00499_json_enum_insert.sql rename to tests/queries/0_stateless/00499_json_enum_insert.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon.reference b/tests/queries/0_stateless/00500_point_in_polygon.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon.reference rename to tests/queries/0_stateless/00500_point_in_polygon.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon.sql b/tests/queries/0_stateless/00500_point_in_polygon.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon.sql rename to tests/queries/0_stateless/00500_point_in_polygon.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference b/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference rename to tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.reference diff --git a/dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql b/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql rename to tests/queries/0_stateless/00500_point_in_polygon_bug_3_linestring_rotation_precision.sql diff --git a/dbms/tests/queries/0_stateless/00501_http_head.reference b/tests/queries/0_stateless/00501_http_head.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00501_http_head.reference rename to tests/queries/0_stateless/00501_http_head.reference diff --git a/dbms/tests/queries/0_stateless/00501_http_head.sh b/tests/queries/0_stateless/00501_http_head.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00501_http_head.sh rename to tests/queries/0_stateless/00501_http_head.sh diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.reference b/tests/queries/0_stateless/00502_custom_partitioning_local.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_local.reference rename to tests/queries/0_stateless/00502_custom_partitioning_local.reference diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql b/tests/queries/0_stateless/00502_custom_partitioning_local.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql rename to tests/queries/0_stateless/00502_custom_partitioning_local.sql diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00502_string_concat_with_array.reference b/tests/queries/0_stateless/00502_string_concat_with_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_string_concat_with_array.reference rename to tests/queries/0_stateless/00502_string_concat_with_array.reference diff --git a/dbms/tests/queries/0_stateless/00502_string_concat_with_array.sql b/tests/queries/0_stateless/00502_string_concat_with_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_string_concat_with_array.sql rename to tests/queries/0_stateless/00502_string_concat_with_array.sql diff --git a/dbms/tests/queries/0_stateless/00502_sum_map.reference b/tests/queries/0_stateless/00502_sum_map.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00502_sum_map.reference rename to tests/queries/0_stateless/00502_sum_map.reference diff --git a/dbms/tests/queries/0_stateless/00502_sum_map.sql b/tests/queries/0_stateless/00502_sum_map.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00502_sum_map.sql rename to tests/queries/0_stateless/00502_sum_map.sql diff --git a/dbms/tests/queries/0_stateless/00503_cast_const_nullable.reference b/tests/queries/0_stateless/00503_cast_const_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00503_cast_const_nullable.reference rename to tests/queries/0_stateless/00503_cast_const_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00503_cast_const_nullable.sql b/tests/queries/0_stateless/00503_cast_const_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00503_cast_const_nullable.sql rename to tests/queries/0_stateless/00503_cast_const_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00504_insert_miss_columns.reference b/tests/queries/0_stateless/00504_insert_miss_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00504_insert_miss_columns.reference rename to tests/queries/0_stateless/00504_insert_miss_columns.reference diff --git a/dbms/tests/queries/0_stateless/00504_insert_miss_columns.sh b/tests/queries/0_stateless/00504_insert_miss_columns.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00504_insert_miss_columns.sh rename to tests/queries/0_stateless/00504_insert_miss_columns.sh diff --git a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference b/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.reference rename to tests/queries/0_stateless/00504_mergetree_arrays_rw.reference diff --git a/dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql b/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00504_mergetree_arrays_rw.sql rename to tests/queries/0_stateless/00504_mergetree_arrays_rw.sql diff --git a/dbms/tests/queries/0_stateless/00505_distributed_secure.data b/tests/queries/0_stateless/00505_distributed_secure.data similarity index 100% rename from dbms/tests/queries/0_stateless/00505_distributed_secure.data rename to tests/queries/0_stateless/00505_distributed_secure.data diff --git a/dbms/tests/queries/0_stateless/00505_secure.reference b/tests/queries/0_stateless/00505_secure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00505_secure.reference rename to tests/queries/0_stateless/00505_secure.reference diff --git a/dbms/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00505_secure.sh rename to tests/queries/0_stateless/00505_secure.sh diff --git a/dbms/tests/queries/0_stateless/00505_shard_secure.reference b/tests/queries/0_stateless/00505_shard_secure.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00505_shard_secure.reference rename to tests/queries/0_stateless/00505_shard_secure.reference diff --git a/dbms/tests/queries/0_stateless/00505_shard_secure.sh b/tests/queries/0_stateless/00505_shard_secure.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00505_shard_secure.sh rename to tests/queries/0_stateless/00505_shard_secure.sh diff --git a/dbms/tests/queries/0_stateless/00506_shard_global_in_union.reference b/tests/queries/0_stateless/00506_shard_global_in_union.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00506_shard_global_in_union.reference rename to tests/queries/0_stateless/00506_shard_global_in_union.reference diff --git a/dbms/tests/queries/0_stateless/00506_shard_global_in_union.sql b/tests/queries/0_stateless/00506_shard_global_in_union.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00506_shard_global_in_union.sql rename to tests/queries/0_stateless/00506_shard_global_in_union.sql diff --git a/dbms/tests/queries/0_stateless/00506_union_distributed.reference b/tests/queries/0_stateless/00506_union_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00506_union_distributed.reference rename to tests/queries/0_stateless/00506_union_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00506_union_distributed.sql b/tests/queries/0_stateless/00506_union_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00506_union_distributed.sql rename to tests/queries/0_stateless/00506_union_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00507_array_no_params.reference b/tests/queries/0_stateless/00507_array_no_params.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00507_array_no_params.reference rename to tests/queries/0_stateless/00507_array_no_params.reference diff --git a/dbms/tests/queries/0_stateless/00507_array_no_params.sh b/tests/queries/0_stateless/00507_array_no_params.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00507_array_no_params.sh rename to tests/queries/0_stateless/00507_array_no_params.sh diff --git a/dbms/tests/queries/0_stateless/00507_sumwithoverflow.reference b/tests/queries/0_stateless/00507_sumwithoverflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00507_sumwithoverflow.reference rename to tests/queries/0_stateless/00507_sumwithoverflow.reference diff --git a/dbms/tests/queries/0_stateless/00507_sumwithoverflow.sql b/tests/queries/0_stateless/00507_sumwithoverflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00507_sumwithoverflow.sql rename to tests/queries/0_stateless/00507_sumwithoverflow.sql diff --git a/dbms/tests/queries/0_stateless/00508_materialized_view_to.reference b/tests/queries/0_stateless/00508_materialized_view_to.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00508_materialized_view_to.reference rename to tests/queries/0_stateless/00508_materialized_view_to.reference diff --git a/dbms/tests/queries/0_stateless/00508_materialized_view_to.sql b/tests/queries/0_stateless/00508_materialized_view_to.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00508_materialized_view_to.sql rename to tests/queries/0_stateless/00508_materialized_view_to.sql diff --git a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference rename to tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql rename to tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference rename to tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql rename to tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00511_get_size_of_enum.reference b/tests/queries/0_stateless/00511_get_size_of_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00511_get_size_of_enum.reference rename to tests/queries/0_stateless/00511_get_size_of_enum.reference diff --git a/dbms/tests/queries/0_stateless/00511_get_size_of_enum.sql b/tests/queries/0_stateless/00511_get_size_of_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00511_get_size_of_enum.sql rename to tests/queries/0_stateless/00511_get_size_of_enum.sql diff --git a/dbms/tests/queries/0_stateless/00512_fractional_time_zones.reference b/tests/queries/0_stateless/00512_fractional_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00512_fractional_time_zones.reference rename to tests/queries/0_stateless/00512_fractional_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00512_fractional_time_zones.sh b/tests/queries/0_stateless/00512_fractional_time_zones.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00512_fractional_time_zones.sh rename to tests/queries/0_stateless/00512_fractional_time_zones.sh diff --git a/dbms/tests/queries/0_stateless/00513_fractional_time_zones.reference b/tests/queries/0_stateless/00513_fractional_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00513_fractional_time_zones.reference rename to tests/queries/0_stateless/00513_fractional_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00513_fractional_time_zones.sql b/tests/queries/0_stateless/00513_fractional_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00513_fractional_time_zones.sql rename to tests/queries/0_stateless/00513_fractional_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00514_interval_operators.reference b/tests/queries/0_stateless/00514_interval_operators.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00514_interval_operators.reference rename to tests/queries/0_stateless/00514_interval_operators.reference diff --git a/dbms/tests/queries/0_stateless/00514_interval_operators.sql b/tests/queries/0_stateless/00514_interval_operators.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00514_interval_operators.sql rename to tests/queries/0_stateless/00514_interval_operators.sql diff --git a/dbms/tests/queries/0_stateless/00515_enhanced_time_zones.reference b/tests/queries/0_stateless/00515_enhanced_time_zones.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_enhanced_time_zones.reference rename to tests/queries/0_stateless/00515_enhanced_time_zones.reference diff --git a/dbms/tests/queries/0_stateless/00515_enhanced_time_zones.sql b/tests/queries/0_stateless/00515_enhanced_time_zones.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_enhanced_time_zones.sql rename to tests/queries/0_stateless/00515_enhanced_time_zones.sql diff --git a/dbms/tests/queries/0_stateless/00515_gcd_lcm.reference b/tests/queries/0_stateless/00515_gcd_lcm.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_gcd_lcm.reference rename to tests/queries/0_stateless/00515_gcd_lcm.reference diff --git a/dbms/tests/queries/0_stateless/00515_gcd_lcm.sql b/tests/queries/0_stateless/00515_gcd_lcm.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_gcd_lcm.sql rename to tests/queries/0_stateless/00515_gcd_lcm.sql diff --git a/dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference b/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference rename to tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql b/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql rename to tests/queries/0_stateless/00515_shard_desc_table_functions_and_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference rename to tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql rename to tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00516_is_inf_nan.reference b/tests/queries/0_stateless/00516_is_inf_nan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_is_inf_nan.reference rename to tests/queries/0_stateless/00516_is_inf_nan.reference diff --git a/dbms/tests/queries/0_stateless/00516_is_inf_nan.sql b/tests/queries/0_stateless/00516_is_inf_nan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_is_inf_nan.sql rename to tests/queries/0_stateless/00516_is_inf_nan.sql diff --git a/dbms/tests/queries/0_stateless/00516_modulo.reference b/tests/queries/0_stateless/00516_modulo.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00516_modulo.reference rename to tests/queries/0_stateless/00516_modulo.reference diff --git a/dbms/tests/queries/0_stateless/00516_modulo.sql b/tests/queries/0_stateless/00516_modulo.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00516_modulo.sql rename to tests/queries/0_stateless/00516_modulo.sql diff --git a/dbms/tests/queries/0_stateless/00517_date_parsing.reference b/tests/queries/0_stateless/00517_date_parsing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00517_date_parsing.reference rename to tests/queries/0_stateless/00517_date_parsing.reference diff --git a/dbms/tests/queries/0_stateless/00517_date_parsing.sql b/tests/queries/0_stateless/00517_date_parsing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00517_date_parsing.sql rename to tests/queries/0_stateless/00517_date_parsing.sql diff --git a/dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference b/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference rename to tests/queries/0_stateless/00518_extract_all_and_empty_matches.reference diff --git a/dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql b/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql rename to tests/queries/0_stateless/00518_extract_all_and_empty_matches.sql diff --git a/dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference b/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference rename to tests/queries/0_stateless/00519_create_as_select_from_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql b/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql rename to tests/queries/0_stateless/00519_create_as_select_from_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00520_http_nullable.reference b/tests/queries/0_stateless/00520_http_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00520_http_nullable.reference rename to tests/queries/0_stateless/00520_http_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00520_http_nullable.sh b/tests/queries/0_stateless/00520_http_nullable.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00520_http_nullable.sh rename to tests/queries/0_stateless/00520_http_nullable.sh diff --git a/dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.reference b/tests/queries/0_stateless/00520_tuple_values_interpreter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.reference rename to tests/queries/0_stateless/00520_tuple_values_interpreter.reference diff --git a/dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.sql b/tests/queries/0_stateless/00520_tuple_values_interpreter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00520_tuple_values_interpreter.sql rename to tests/queries/0_stateless/00520_tuple_values_interpreter.sql diff --git a/dbms/tests/queries/0_stateless/00521_multidimensional.reference b/tests/queries/0_stateless/00521_multidimensional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00521_multidimensional.reference rename to tests/queries/0_stateless/00521_multidimensional.reference diff --git a/dbms/tests/queries/0_stateless/00521_multidimensional.sql b/tests/queries/0_stateless/00521_multidimensional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00521_multidimensional.sql rename to tests/queries/0_stateless/00521_multidimensional.sql diff --git a/dbms/tests/queries/0_stateless/00522_multidimensional.reference b/tests/queries/0_stateless/00522_multidimensional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00522_multidimensional.reference rename to tests/queries/0_stateless/00522_multidimensional.reference diff --git a/dbms/tests/queries/0_stateless/00522_multidimensional.sql b/tests/queries/0_stateless/00522_multidimensional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00522_multidimensional.sql rename to tests/queries/0_stateless/00522_multidimensional.sql diff --git a/dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference b/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference rename to tests/queries/0_stateless/00523_aggregate_functions_in_group_array.reference diff --git a/dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql b/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql rename to tests/queries/0_stateless/00523_aggregate_functions_in_group_array.sql diff --git a/dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference b/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.reference rename to tests/queries/0_stateless/00524_time_intervals_months_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql b/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00524_time_intervals_months_underflow.sql rename to tests/queries/0_stateless/00524_time_intervals_months_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference b/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference rename to tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql b/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql rename to tests/queries/0_stateless/00525_aggregate_functions_of_nullable_that_return_non_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference b/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference rename to tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql b/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql rename to tests/queries/0_stateless/00526_array_join_with_arrays_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00527_totals_having_nullable.reference b/tests/queries/0_stateless/00527_totals_having_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00527_totals_having_nullable.reference rename to tests/queries/0_stateless/00527_totals_having_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00527_totals_having_nullable.sql b/tests/queries/0_stateless/00527_totals_having_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00527_totals_having_nullable.sql rename to tests/queries/0_stateless/00527_totals_having_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00528_const_of_nullable.reference b/tests/queries/0_stateless/00528_const_of_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00528_const_of_nullable.reference rename to tests/queries/0_stateless/00528_const_of_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00528_const_of_nullable.sql b/tests/queries/0_stateless/00528_const_of_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00528_const_of_nullable.sql rename to tests/queries/0_stateless/00528_const_of_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00529_orantius.reference b/tests/queries/0_stateless/00529_orantius.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00529_orantius.reference rename to tests/queries/0_stateless/00529_orantius.reference diff --git a/dbms/tests/queries/0_stateless/00529_orantius.sql b/tests/queries/0_stateless/00529_orantius.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00529_orantius.sql rename to tests/queries/0_stateless/00529_orantius.sql diff --git a/dbms/tests/queries/0_stateless/00530_arrays_of_nothing.reference b/tests/queries/0_stateless/00530_arrays_of_nothing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00530_arrays_of_nothing.reference rename to tests/queries/0_stateless/00530_arrays_of_nothing.reference diff --git a/dbms/tests/queries/0_stateless/00530_arrays_of_nothing.sql b/tests/queries/0_stateless/00530_arrays_of_nothing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00530_arrays_of_nothing.sql rename to tests/queries/0_stateless/00530_arrays_of_nothing.sql diff --git a/dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.reference b/tests/queries/0_stateless/00531_aggregate_over_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.reference rename to tests/queries/0_stateless/00531_aggregate_over_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.sql b/tests/queries/0_stateless/00531_aggregate_over_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00531_aggregate_over_nullable.sql rename to tests/queries/0_stateless/00531_aggregate_over_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00531_client_ignore_error.reference b/tests/queries/0_stateless/00531_client_ignore_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00531_client_ignore_error.reference rename to tests/queries/0_stateless/00531_client_ignore_error.reference diff --git a/dbms/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00531_client_ignore_error.sh rename to tests/queries/0_stateless/00531_client_ignore_error.sh diff --git a/dbms/tests/queries/0_stateless/00532_topk_generic.reference b/tests/queries/0_stateless/00532_topk_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00532_topk_generic.reference rename to tests/queries/0_stateless/00532_topk_generic.reference diff --git a/dbms/tests/queries/0_stateless/00532_topk_generic.sql b/tests/queries/0_stateless/00532_topk_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00532_topk_generic.sql rename to tests/queries/0_stateless/00532_topk_generic.sql diff --git a/dbms/tests/queries/0_stateless/00533_uniq_array.reference b/tests/queries/0_stateless/00533_uniq_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00533_uniq_array.reference rename to tests/queries/0_stateless/00533_uniq_array.reference diff --git a/dbms/tests/queries/0_stateless/00533_uniq_array.sql b/tests/queries/0_stateless/00533_uniq_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00533_uniq_array.sql rename to tests/queries/0_stateless/00533_uniq_array.sql diff --git a/dbms/tests/queries/0_stateless/00534_client_ignore_error.reference b/tests/queries/0_stateless/00534_client_ignore_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_client_ignore_error.reference rename to tests/queries/0_stateless/00534_client_ignore_error.reference diff --git a/dbms/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_client_ignore_error.sh rename to tests/queries/0_stateless/00534_client_ignore_error.sh diff --git a/dbms/tests/queries/0_stateless/00534_exp10.reference b/tests/queries/0_stateless/00534_exp10.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_exp10.reference rename to tests/queries/0_stateless/00534_exp10.reference diff --git a/dbms/tests/queries/0_stateless/00534_exp10.sql b/tests/queries/0_stateless/00534_exp10.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00534_exp10.sql rename to tests/queries/0_stateless/00534_exp10.sql diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.data b/tests/queries/0_stateless/00534_filimonov.data similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.data rename to tests/queries/0_stateless/00534_filimonov.data diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.reference b/tests/queries/0_stateless/00534_filimonov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.reference rename to tests/queries/0_stateless/00534_filimonov.reference diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.sh b/tests/queries/0_stateless/00534_filimonov.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_filimonov.sh rename to tests/queries/0_stateless/00534_filimonov.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments.lib b/tests/queries/0_stateless/00534_functions_bad_arguments.lib similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments.lib rename to tests/queries/0_stateless/00534_functions_bad_arguments.lib diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.reference b/tests/queries/0_stateless/00534_functions_bad_arguments1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments1.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.sh b/tests/queries/0_stateless/00534_functions_bad_arguments1.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments1.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments1.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.reference b/tests/queries/0_stateless/00534_functions_bad_arguments10.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments10.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.sh b/tests/queries/0_stateless/00534_functions_bad_arguments10.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments10.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments10.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.reference b/tests/queries/0_stateless/00534_functions_bad_arguments11.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments11.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.sh b/tests/queries/0_stateless/00534_functions_bad_arguments11.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments11.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments11.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.reference b/tests/queries/0_stateless/00534_functions_bad_arguments12.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments12.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.sh b/tests/queries/0_stateless/00534_functions_bad_arguments12.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments12.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments12.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.reference b/tests/queries/0_stateless/00534_functions_bad_arguments13.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments13.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.sh b/tests/queries/0_stateless/00534_functions_bad_arguments13.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments13.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments13.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.reference b/tests/queries/0_stateless/00534_functions_bad_arguments2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments2.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.sh b/tests/queries/0_stateless/00534_functions_bad_arguments2.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments2.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments2.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.reference b/tests/queries/0_stateless/00534_functions_bad_arguments3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments3.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.sh b/tests/queries/0_stateless/00534_functions_bad_arguments3.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments3.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments3.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.reference b/tests/queries/0_stateless/00534_functions_bad_arguments4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments4.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.sh b/tests/queries/0_stateless/00534_functions_bad_arguments4.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments4.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments4.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.reference b/tests/queries/0_stateless/00534_functions_bad_arguments5.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments5.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.sh b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments5.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments5.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.reference b/tests/queries/0_stateless/00534_functions_bad_arguments6.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments6.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.sh b/tests/queries/0_stateless/00534_functions_bad_arguments6.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments6.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments6.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.reference b/tests/queries/0_stateless/00534_functions_bad_arguments7.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments7.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.sh b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments7.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments7.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.reference b/tests/queries/0_stateless/00534_functions_bad_arguments8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments8.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.sh b/tests/queries/0_stateless/00534_functions_bad_arguments8.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments8.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments8.sh diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.reference b/tests/queries/0_stateless/00534_functions_bad_arguments9.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.reference rename to tests/queries/0_stateless/00534_functions_bad_arguments9.reference diff --git a/dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.sh b/tests/queries/0_stateless/00534_functions_bad_arguments9.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00534_functions_bad_arguments9.sh rename to tests/queries/0_stateless/00534_functions_bad_arguments9.sh diff --git a/dbms/tests/queries/0_stateless/00535_parse_float_scientific.reference b/tests/queries/0_stateless/00535_parse_float_scientific.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00535_parse_float_scientific.reference rename to tests/queries/0_stateless/00535_parse_float_scientific.reference diff --git a/dbms/tests/queries/0_stateless/00535_parse_float_scientific.sql b/tests/queries/0_stateless/00535_parse_float_scientific.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00535_parse_float_scientific.sql rename to tests/queries/0_stateless/00535_parse_float_scientific.sql diff --git a/dbms/tests/queries/0_stateless/00536_int_exp.reference b/tests/queries/0_stateless/00536_int_exp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00536_int_exp.reference rename to tests/queries/0_stateless/00536_int_exp.reference diff --git a/dbms/tests/queries/0_stateless/00536_int_exp.sql b/tests/queries/0_stateless/00536_int_exp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00536_int_exp.sql rename to tests/queries/0_stateless/00536_int_exp.sql diff --git a/dbms/tests/queries/0_stateless/00537_quarters.reference b/tests/queries/0_stateless/00537_quarters.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00537_quarters.reference rename to tests/queries/0_stateless/00537_quarters.reference diff --git a/dbms/tests/queries/0_stateless/00537_quarters.sql b/tests/queries/0_stateless/00537_quarters.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00537_quarters.sql rename to tests/queries/0_stateless/00537_quarters.sql diff --git a/dbms/tests/queries/0_stateless/00538_datediff.reference b/tests/queries/0_stateless/00538_datediff.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00538_datediff.reference rename to tests/queries/0_stateless/00538_datediff.reference diff --git a/dbms/tests/queries/0_stateless/00538_datediff.sql b/tests/queries/0_stateless/00538_datediff.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00538_datediff.sql rename to tests/queries/0_stateless/00538_datediff.sql diff --git a/dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.reference b/tests/queries/0_stateless/00539_functions_for_working_with_json.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.reference rename to tests/queries/0_stateless/00539_functions_for_working_with_json.reference diff --git a/dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.sql b/tests/queries/0_stateless/00539_functions_for_working_with_json.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00539_functions_for_working_with_json.sql rename to tests/queries/0_stateless/00539_functions_for_working_with_json.sql diff --git a/dbms/tests/queries/0_stateless/00540_bad_data_types.reference b/tests/queries/0_stateless/00540_bad_data_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00540_bad_data_types.reference rename to tests/queries/0_stateless/00540_bad_data_types.reference diff --git a/dbms/tests/queries/0_stateless/00540_bad_data_types.sh b/tests/queries/0_stateless/00540_bad_data_types.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00540_bad_data_types.sh rename to tests/queries/0_stateless/00540_bad_data_types.sh diff --git a/dbms/tests/queries/0_stateless/00541_kahan_sum.reference b/tests/queries/0_stateless/00541_kahan_sum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00541_kahan_sum.reference rename to tests/queries/0_stateless/00541_kahan_sum.reference diff --git a/dbms/tests/queries/0_stateless/00541_kahan_sum.sql b/tests/queries/0_stateless/00541_kahan_sum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00541_kahan_sum.sql rename to tests/queries/0_stateless/00541_kahan_sum.sql diff --git a/dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference b/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference rename to tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.reference diff --git a/dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql b/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql rename to tests/queries/0_stateless/00541_to_start_of_fifteen_minutes.sql diff --git a/dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference b/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference rename to tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.reference diff --git a/dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql b/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql rename to tests/queries/0_stateless/00542_access_to_temporary_table_in_readonly_mode.sql diff --git a/dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference b/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference rename to tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.reference diff --git a/dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql b/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql rename to tests/queries/0_stateless/00542_materialized_view_and_time_zone_tag.sql diff --git a/dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference rename to tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.reference diff --git a/dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh rename to tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh diff --git a/dbms/tests/queries/0_stateless/00543_null_and_prewhere.reference b/tests/queries/0_stateless/00543_null_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00543_null_and_prewhere.reference rename to tests/queries/0_stateless/00543_null_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00543_null_and_prewhere.sql b/tests/queries/0_stateless/00543_null_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00543_null_and_prewhere.sql rename to tests/queries/0_stateless/00543_null_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference b/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference rename to tests/queries/0_stateless/00544_agg_foreach_of_two_arg.reference diff --git a/dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql b/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql rename to tests/queries/0_stateless/00544_agg_foreach_of_two_arg.sql diff --git a/dbms/tests/queries/0_stateless/00544_insert_with_select.reference b/tests/queries/0_stateless/00544_insert_with_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00544_insert_with_select.reference rename to tests/queries/0_stateless/00544_insert_with_select.reference diff --git a/dbms/tests/queries/0_stateless/00544_insert_with_select.sql b/tests/queries/0_stateless/00544_insert_with_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00544_insert_with_select.sql rename to tests/queries/0_stateless/00544_insert_with_select.sql diff --git a/dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.reference b/tests/queries/0_stateless/00545_weird_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.reference rename to tests/queries/0_stateless/00545_weird_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.sql b/tests/queries/0_stateless/00545_weird_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00545_weird_aggregate_functions.sql rename to tests/queries/0_stateless/00545_weird_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference b/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference rename to tests/queries/0_stateless/00546_shard_tuple_element_formatting.reference diff --git a/dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql b/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql rename to tests/queries/0_stateless/00546_shard_tuple_element_formatting.sql diff --git a/dbms/tests/queries/0_stateless/00547_named_tuples.reference b/tests/queries/0_stateless/00547_named_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00547_named_tuples.reference rename to tests/queries/0_stateless/00547_named_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00547_named_tuples.sql b/tests/queries/0_stateless/00547_named_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00547_named_tuples.sql rename to tests/queries/0_stateless/00547_named_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00548_slice_of_nested.reference b/tests/queries/0_stateless/00548_slice_of_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00548_slice_of_nested.reference rename to tests/queries/0_stateless/00548_slice_of_nested.reference diff --git a/dbms/tests/queries/0_stateless/00548_slice_of_nested.sql b/tests/queries/0_stateless/00548_slice_of_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00548_slice_of_nested.sql rename to tests/queries/0_stateless/00548_slice_of_nested.sql diff --git a/dbms/tests/queries/0_stateless/00549_join_use_nulls.reference b/tests/queries/0_stateless/00549_join_use_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00549_join_use_nulls.reference rename to tests/queries/0_stateless/00549_join_use_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00549_join_use_nulls.sql b/tests/queries/0_stateless/00549_join_use_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00549_join_use_nulls.sql rename to tests/queries/0_stateless/00549_join_use_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00550_join_insert_select.reference b/tests/queries/0_stateless/00550_join_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00550_join_insert_select.reference rename to tests/queries/0_stateless/00550_join_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/00550_join_insert_select.sh b/tests/queries/0_stateless/00550_join_insert_select.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00550_join_insert_select.sh rename to tests/queries/0_stateless/00550_join_insert_select.sh diff --git a/dbms/tests/queries/0_stateless/00551_parse_or_null.reference b/tests/queries/0_stateless/00551_parse_or_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00551_parse_or_null.reference rename to tests/queries/0_stateless/00551_parse_or_null.reference diff --git a/dbms/tests/queries/0_stateless/00551_parse_or_null.sql b/tests/queries/0_stateless/00551_parse_or_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00551_parse_or_null.sql rename to tests/queries/0_stateless/00551_parse_or_null.sql diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_simple.reference b/tests/queries/0_stateless/00552_logical_functions_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_simple.reference rename to tests/queries/0_stateless/00552_logical_functions_simple.reference diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_simple.sql b/tests/queries/0_stateless/00552_logical_functions_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_simple.sql rename to tests/queries/0_stateless/00552_logical_functions_simple.sql diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_ternary.reference b/tests/queries/0_stateless/00552_logical_functions_ternary.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_ternary.reference rename to tests/queries/0_stateless/00552_logical_functions_ternary.reference diff --git a/dbms/tests/queries/0_stateless/00552_logical_functions_ternary.sql b/tests/queries/0_stateless/00552_logical_functions_ternary.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_logical_functions_ternary.sql rename to tests/queries/0_stateless/00552_logical_functions_ternary.sql diff --git a/dbms/tests/queries/0_stateless/00552_or_nullable.reference b/tests/queries/0_stateless/00552_or_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00552_or_nullable.reference rename to tests/queries/0_stateless/00552_or_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00552_or_nullable.sql b/tests/queries/0_stateless/00552_or_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00552_or_nullable.sql rename to tests/queries/0_stateless/00552_or_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference b/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.reference rename to tests/queries/0_stateless/00553_buff_exists_materlized_column.reference diff --git a/dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql b/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00553_buff_exists_materlized_column.sql rename to tests/queries/0_stateless/00553_buff_exists_materlized_column.sql diff --git a/dbms/tests/queries/0_stateless/00553_invalid_nested_name.reference b/tests/queries/0_stateless/00553_invalid_nested_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00553_invalid_nested_name.reference rename to tests/queries/0_stateless/00553_invalid_nested_name.reference diff --git a/dbms/tests/queries/0_stateless/00553_invalid_nested_name.sql b/tests/queries/0_stateless/00553_invalid_nested_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00553_invalid_nested_name.sql rename to tests/queries/0_stateless/00553_invalid_nested_name.sql diff --git a/dbms/tests/queries/0_stateless/00554_nested_and_table_engines.reference b/tests/queries/0_stateless/00554_nested_and_table_engines.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00554_nested_and_table_engines.reference rename to tests/queries/0_stateless/00554_nested_and_table_engines.reference diff --git a/dbms/tests/queries/0_stateless/00554_nested_and_table_engines.sql b/tests/queries/0_stateless/00554_nested_and_table_engines.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00554_nested_and_table_engines.sql rename to tests/queries/0_stateless/00554_nested_and_table_engines.sql diff --git a/dbms/tests/queries/0_stateless/00555_hasAll_hasAny.reference b/tests/queries/0_stateless/00555_hasAll_hasAny.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00555_hasAll_hasAny.reference rename to tests/queries/0_stateless/00555_hasAll_hasAny.reference diff --git a/dbms/tests/queries/0_stateless/00555_hasAll_hasAny.sql b/tests/queries/0_stateless/00555_hasAll_hasAny.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00555_hasAll_hasAny.sql rename to tests/queries/0_stateless/00555_hasAll_hasAny.sql diff --git a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.reference b/tests/queries/0_stateless/00555_right_join_excessive_rows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.reference rename to tests/queries/0_stateless/00555_right_join_excessive_rows.reference diff --git a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql b/tests/queries/0_stateless/00555_right_join_excessive_rows.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql rename to tests/queries/0_stateless/00555_right_join_excessive_rows.sql diff --git a/dbms/tests/queries/0_stateless/00556_array_intersect.reference b/tests/queries/0_stateless/00556_array_intersect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00556_array_intersect.reference rename to tests/queries/0_stateless/00556_array_intersect.reference diff --git a/dbms/tests/queries/0_stateless/00556_array_intersect.sql b/tests/queries/0_stateless/00556_array_intersect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00556_array_intersect.sql rename to tests/queries/0_stateless/00556_array_intersect.sql diff --git a/dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference b/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.reference rename to tests/queries/0_stateless/00556_remove_columns_from_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql b/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00556_remove_columns_from_subquery.sql rename to tests/queries/0_stateless/00556_remove_columns_from_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.reference b/tests/queries/0_stateless/00557_alter_null_storage_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.reference rename to tests/queries/0_stateless/00557_alter_null_storage_tables.reference diff --git a/dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.sql b/tests/queries/0_stateless/00557_alter_null_storage_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00557_alter_null_storage_tables.sql rename to tests/queries/0_stateless/00557_alter_null_storage_tables.sql diff --git a/dbms/tests/queries/0_stateless/00557_array_resize.reference b/tests/queries/0_stateless/00557_array_resize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_array_resize.reference rename to tests/queries/0_stateless/00557_array_resize.reference diff --git a/dbms/tests/queries/0_stateless/00557_array_resize.sql b/tests/queries/0_stateless/00557_array_resize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00557_array_resize.sql rename to tests/queries/0_stateless/00557_array_resize.sql diff --git a/dbms/tests/queries/0_stateless/00557_remote_port.reference b/tests/queries/0_stateless/00557_remote_port.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00557_remote_port.reference rename to tests/queries/0_stateless/00557_remote_port.reference diff --git a/dbms/tests/queries/0_stateless/00557_remote_port.sh b/tests/queries/0_stateless/00557_remote_port.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00557_remote_port.sh rename to tests/queries/0_stateless/00557_remote_port.sh diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference b/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference rename to tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.reference diff --git a/dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql b/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql rename to tests/queries/0_stateless/00558_aggregate_merge_totals_with_arenas.sql diff --git a/dbms/tests/queries/0_stateless/00558_parse_floats.reference b/tests/queries/0_stateless/00558_parse_floats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00558_parse_floats.reference rename to tests/queries/0_stateless/00558_parse_floats.reference diff --git a/dbms/tests/queries/0_stateless/00558_parse_floats.sql b/tests/queries/0_stateless/00558_parse_floats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00558_parse_floats.sql rename to tests/queries/0_stateless/00558_parse_floats.sql diff --git a/dbms/tests/queries/0_stateless/00559_filter_array_generic.reference b/tests/queries/0_stateless/00559_filter_array_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00559_filter_array_generic.reference rename to tests/queries/0_stateless/00559_filter_array_generic.reference diff --git a/dbms/tests/queries/0_stateless/00559_filter_array_generic.sql b/tests/queries/0_stateless/00559_filter_array_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00559_filter_array_generic.sql rename to tests/queries/0_stateless/00559_filter_array_generic.sql diff --git a/dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference rename to tests/queries/0_stateless/00560_float_leading_plus_in_exponent.reference diff --git a/dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql rename to tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql diff --git a/dbms/tests/queries/0_stateless/00561_storage_join.reference b/tests/queries/0_stateless/00561_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00561_storage_join.reference rename to tests/queries/0_stateless/00561_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00561_storage_join.sql b/tests/queries/0_stateless/00561_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00561_storage_join.sql rename to tests/queries/0_stateless/00561_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference b/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.reference rename to tests/queries/0_stateless/00562_in_subquery_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql b/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00562_in_subquery_merge_tree.sql rename to tests/queries/0_stateless/00562_in_subquery_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference b/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference rename to tests/queries/0_stateless/00562_rewrite_select_expression_with_union.reference diff --git a/dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql b/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql rename to tests/queries/0_stateless/00562_rewrite_select_expression_with_union.sql diff --git a/dbms/tests/queries/0_stateless/00563_complex_in_expression.reference b/tests/queries/0_stateless/00563_complex_in_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_complex_in_expression.reference rename to tests/queries/0_stateless/00563_complex_in_expression.reference diff --git a/dbms/tests/queries/0_stateless/00563_complex_in_expression.sql b/tests/queries/0_stateless/00563_complex_in_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_complex_in_expression.sql rename to tests/queries/0_stateless/00563_complex_in_expression.sql diff --git a/dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.reference b/tests/queries/0_stateless/00563_shard_insert_into_remote.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.reference rename to tests/queries/0_stateless/00563_shard_insert_into_remote.reference diff --git a/dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.sql b/tests/queries/0_stateless/00563_shard_insert_into_remote.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00563_shard_insert_into_remote.sql rename to tests/queries/0_stateless/00563_shard_insert_into_remote.sql diff --git a/dbms/tests/queries/0_stateless/00564_enum_order.reference b/tests/queries/0_stateless/00564_enum_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_enum_order.reference rename to tests/queries/0_stateless/00564_enum_order.reference diff --git a/dbms/tests/queries/0_stateless/00564_enum_order.sh b/tests/queries/0_stateless/00564_enum_order.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00564_enum_order.sh rename to tests/queries/0_stateless/00564_enum_order.sh diff --git a/dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference b/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference rename to tests/queries/0_stateless/00564_initial_column_values_with_default_expression.reference diff --git a/dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql b/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql rename to tests/queries/0_stateless/00564_initial_column_values_with_default_expression.sql diff --git a/dbms/tests/queries/0_stateless/00564_temporary_table_management.reference b/tests/queries/0_stateless/00564_temporary_table_management.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_temporary_table_management.reference rename to tests/queries/0_stateless/00564_temporary_table_management.reference diff --git a/dbms/tests/queries/0_stateless/00564_temporary_table_management.sql b/tests/queries/0_stateless/00564_temporary_table_management.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_temporary_table_management.sql rename to tests/queries/0_stateless/00564_temporary_table_management.sql diff --git a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00565_enum_order.reference b/tests/queries/0_stateless/00565_enum_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00565_enum_order.reference rename to tests/queries/0_stateless/00565_enum_order.reference diff --git a/dbms/tests/queries/0_stateless/00565_enum_order.sh b/tests/queries/0_stateless/00565_enum_order.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00565_enum_order.sh rename to tests/queries/0_stateless/00565_enum_order.sh diff --git a/dbms/tests/queries/0_stateless/00566_enum_min_max.reference b/tests/queries/0_stateless/00566_enum_min_max.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00566_enum_min_max.reference rename to tests/queries/0_stateless/00566_enum_min_max.reference diff --git a/dbms/tests/queries/0_stateless/00566_enum_min_max.sql b/tests/queries/0_stateless/00566_enum_min_max.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00566_enum_min_max.sql rename to tests/queries/0_stateless/00566_enum_min_max.sql diff --git a/dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference b/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference rename to tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.reference diff --git a/dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql b/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql rename to tests/queries/0_stateless/00567_parse_datetime_as_unix_timestamp.sql diff --git a/dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference b/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference rename to tests/queries/0_stateless/00568_empty_function_with_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql b/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql rename to tests/queries/0_stateless/00568_empty_function_with_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference rename to tests/queries/0_stateless/00569_parse_date_time_best_effort.reference diff --git a/dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql b/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00569_parse_date_time_best_effort.sql rename to tests/queries/0_stateless/00569_parse_date_time_best_effort.sql diff --git a/dbms/tests/queries/0_stateless/00570_empty_array_is_const.reference b/tests/queries/0_stateless/00570_empty_array_is_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00570_empty_array_is_const.reference rename to tests/queries/0_stateless/00570_empty_array_is_const.reference diff --git a/dbms/tests/queries/0_stateless/00570_empty_array_is_const.sql b/tests/queries/0_stateless/00570_empty_array_is_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00570_empty_array_is_const.sql rename to tests/queries/0_stateless/00570_empty_array_is_const.sql diff --git a/dbms/tests/queries/0_stateless/00571_alter_nullable.reference b/tests/queries/0_stateless/00571_alter_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00571_alter_nullable.reference rename to tests/queries/0_stateless/00571_alter_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00571_alter_nullable.sql b/tests/queries/0_stateless/00571_alter_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00571_alter_nullable.sql rename to tests/queries/0_stateless/00571_alter_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference b/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference rename to tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.reference diff --git a/dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql b/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql rename to tests/queries/0_stateless/00571_non_exist_database_when_create_materializ_view.sql diff --git a/dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference b/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.reference rename to tests/queries/0_stateless/00572_aggregation_by_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql b/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00572_aggregation_by_empty_set.sql rename to tests/queries/0_stateless/00572_aggregation_by_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference b/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference rename to tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql b/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql rename to tests/queries/0_stateless/00573_shard_aggregation_by_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.reference b/tests/queries/0_stateless/00574_empty_strings_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.reference rename to tests/queries/0_stateless/00574_empty_strings_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.sh b/tests/queries/0_stateless/00574_empty_strings_deserialization.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00574_empty_strings_deserialization.sh rename to tests/queries/0_stateless/00574_empty_strings_deserialization.sh diff --git a/dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference rename to tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.reference diff --git a/dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh rename to tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh diff --git a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference b/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference rename to tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql b/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql rename to tests/queries/0_stateless/00575_merge_and_index_with_function_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00576_nested_and_prewhere.reference b/tests/queries/0_stateless/00576_nested_and_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00576_nested_and_prewhere.reference rename to tests/queries/0_stateless/00576_nested_and_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00576_nested_and_prewhere.sql b/tests/queries/0_stateless/00576_nested_and_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00576_nested_and_prewhere.sql rename to tests/queries/0_stateless/00576_nested_and_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00577_full_join_segfault.reference b/tests/queries/0_stateless/00577_full_join_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00577_full_join_segfault.reference rename to tests/queries/0_stateless/00577_full_join_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql b/tests/queries/0_stateless/00577_full_join_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00577_full_join_segfault.sql rename to tests/queries/0_stateless/00577_full_join_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference rename to tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.reference diff --git a/dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql rename to tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference b/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference rename to tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql b/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql rename to tests/queries/0_stateless/00578_merge_table_and_table_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_sampling.reference b/tests/queries/0_stateless/00578_merge_table_sampling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_sampling.reference rename to tests/queries/0_stateless/00578_merge_table_sampling.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_sampling.sql b/tests/queries/0_stateless/00578_merge_table_sampling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_sampling.sql rename to tests/queries/0_stateless/00578_merge_table_sampling.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference b/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference rename to tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql b/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql rename to tests/queries/0_stateless/00578_merge_table_shadow_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference b/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference rename to tests/queries/0_stateless/00578_merge_trees_without_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql b/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql rename to tests/queries/0_stateless/00578_merge_trees_without_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference b/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference rename to tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.reference diff --git a/dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql b/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql rename to tests/queries/0_stateless/00579_merge_tree_partition_and_primary_keys_using_same_expression.sql diff --git a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference b/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.reference rename to tests/queries/0_stateless/00579_virtual_column_and_lazy.reference diff --git a/dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql b/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00579_virtual_column_and_lazy.sql rename to tests/queries/0_stateless/00579_virtual_column_and_lazy.sql diff --git a/dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference b/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference rename to tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql b/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql rename to tests/queries/0_stateless/00580_cast_nullable_to_non_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.reference b/tests/queries/0_stateless/00580_consistent_hashing_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.reference rename to tests/queries/0_stateless/00580_consistent_hashing_functions.reference diff --git a/dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.sql b/tests/queries/0_stateless/00580_consistent_hashing_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00580_consistent_hashing_functions.sql rename to tests/queries/0_stateless/00580_consistent_hashing_functions.sql diff --git a/dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference b/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference rename to tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.reference diff --git a/dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql b/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql rename to tests/queries/0_stateless/00581_limit_on_result_and_subquery_and_insert.sql diff --git a/dbms/tests/queries/0_stateless/00582_not_aliasing_functions.reference b/tests/queries/0_stateless/00582_not_aliasing_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00582_not_aliasing_functions.reference rename to tests/queries/0_stateless/00582_not_aliasing_functions.reference diff --git a/dbms/tests/queries/0_stateless/00582_not_aliasing_functions.sql b/tests/queries/0_stateless/00582_not_aliasing_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00582_not_aliasing_functions.sql rename to tests/queries/0_stateless/00582_not_aliasing_functions.sql diff --git a/dbms/tests/queries/0_stateless/00583_limit_by_expressions.reference b/tests/queries/0_stateless/00583_limit_by_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00583_limit_by_expressions.reference rename to tests/queries/0_stateless/00583_limit_by_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00583_limit_by_expressions.sql b/tests/queries/0_stateless/00583_limit_by_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00583_limit_by_expressions.sql rename to tests/queries/0_stateless/00583_limit_by_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00584_view_union_all.reference b/tests/queries/0_stateless/00584_view_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00584_view_union_all.reference rename to tests/queries/0_stateless/00584_view_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00584_view_union_all.sql b/tests/queries/0_stateless/00584_view_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00584_view_union_all.sql rename to tests/queries/0_stateless/00584_view_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference rename to tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference diff --git a/dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql rename to tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference b/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference rename to tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql b/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql rename to tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00587_union_all_type_conversions.reference b/tests/queries/0_stateless/00587_union_all_type_conversions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00587_union_all_type_conversions.reference rename to tests/queries/0_stateless/00587_union_all_type_conversions.reference diff --git a/dbms/tests/queries/0_stateless/00587_union_all_type_conversions.sql b/tests/queries/0_stateless/00587_union_all_type_conversions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00587_union_all_type_conversions.sql rename to tests/queries/0_stateless/00587_union_all_type_conversions.sql diff --git a/dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference b/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.reference rename to tests/queries/0_stateless/00588_shard_distributed_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql b/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00588_shard_distributed_prewhere.sql rename to tests/queries/0_stateless/00588_shard_distributed_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference b/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference rename to tests/queries/0_stateless/00589_removal_unused_columns_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql b/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql rename to tests/queries/0_stateless/00589_removal_unused_columns_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00590_limit_by_column_removal.reference b/tests/queries/0_stateless/00590_limit_by_column_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00590_limit_by_column_removal.reference rename to tests/queries/0_stateless/00590_limit_by_column_removal.reference diff --git a/dbms/tests/queries/0_stateless/00590_limit_by_column_removal.sql b/tests/queries/0_stateless/00590_limit_by_column_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00590_limit_by_column_removal.sql rename to tests/queries/0_stateless/00590_limit_by_column_removal.sql diff --git a/dbms/tests/queries/0_stateless/00591_columns_removal_union_all.reference b/tests/queries/0_stateless/00591_columns_removal_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00591_columns_removal_union_all.reference rename to tests/queries/0_stateless/00591_columns_removal_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00591_columns_removal_union_all.sql b/tests/queries/0_stateless/00591_columns_removal_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00591_columns_removal_union_all.sql rename to tests/queries/0_stateless/00591_columns_removal_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00592_union_all_different_aliases.reference b/tests/queries/0_stateless/00592_union_all_different_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00592_union_all_different_aliases.reference rename to tests/queries/0_stateless/00592_union_all_different_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00592_union_all_different_aliases.sql b/tests/queries/0_stateless/00592_union_all_different_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00592_union_all_different_aliases.sql rename to tests/queries/0_stateless/00592_union_all_different_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference b/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference rename to tests/queries/0_stateless/00593_union_all_assert_columns_removed.reference diff --git a/dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql b/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql rename to tests/queries/0_stateless/00593_union_all_assert_columns_removed.sql diff --git a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.reference b/tests/queries/0_stateless/00594_alias_in_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00594_alias_in_distributed.reference rename to tests/queries/0_stateless/00594_alias_in_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql b/tests/queries/0_stateless/00594_alias_in_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00594_alias_in_distributed.sql rename to tests/queries/0_stateless/00594_alias_in_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00595_insert_into_view.reference b/tests/queries/0_stateless/00595_insert_into_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00595_insert_into_view.reference rename to tests/queries/0_stateless/00595_insert_into_view.reference diff --git a/dbms/tests/queries/0_stateless/00595_insert_into_view.sh b/tests/queries/0_stateless/00595_insert_into_view.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00595_insert_into_view.sh rename to tests/queries/0_stateless/00595_insert_into_view.sh diff --git a/dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference b/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.reference rename to tests/queries/0_stateless/00596_limit_on_expanded_ast.reference diff --git a/dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh b/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh rename to tests/queries/0_stateless/00596_limit_on_expanded_ast.sh diff --git a/dbms/tests/queries/0_stateless/00597_push_down_predicate.reference b/tests/queries/0_stateless/00597_push_down_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00597_push_down_predicate.reference rename to tests/queries/0_stateless/00597_push_down_predicate.reference diff --git a/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql b/tests/queries/0_stateless/00597_push_down_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00597_push_down_predicate.sql rename to tests/queries/0_stateless/00597_push_down_predicate.sql diff --git a/dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference b/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.reference rename to tests/queries/0_stateless/00597_with_totals_on_empty_set.reference diff --git a/dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql b/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00597_with_totals_on_empty_set.sql rename to tests/queries/0_stateless/00597_with_totals_on_empty_set.sql diff --git a/dbms/tests/queries/0_stateless/00598_create_as_select_http.reference b/tests/queries/0_stateless/00598_create_as_select_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00598_create_as_select_http.reference rename to tests/queries/0_stateless/00598_create_as_select_http.reference diff --git a/dbms/tests/queries/0_stateless/00598_create_as_select_http.sh b/tests/queries/0_stateless/00598_create_as_select_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00598_create_as_select_http.sh rename to tests/queries/0_stateless/00598_create_as_select_http.sh diff --git a/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00599_create_view_with_subquery.reference rename to tests/queries/0_stateless/00599_create_view_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00599_create_view_with_subquery.sql b/tests/queries/0_stateless/00599_create_view_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00599_create_view_with_subquery.sql rename to tests/queries/0_stateless/00599_create_view_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference b/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference rename to tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql b/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql rename to tests/queries/0_stateless/00600_create_temporary_table_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/00600_replace_running_query.reference b/tests/queries/0_stateless/00600_replace_running_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00600_replace_running_query.reference rename to tests/queries/0_stateless/00600_replace_running_query.reference diff --git a/dbms/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00600_replace_running_query.sh rename to tests/queries/0_stateless/00600_replace_running_query.sh diff --git a/dbms/tests/queries/0_stateless/00601_kill_running_query.reference b/tests/queries/0_stateless/00601_kill_running_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00601_kill_running_query.reference rename to tests/queries/0_stateless/00601_kill_running_query.reference diff --git a/dbms/tests/queries/0_stateless/00601_kill_running_query.sh b/tests/queries/0_stateless/00601_kill_running_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00601_kill_running_query.sh rename to tests/queries/0_stateless/00601_kill_running_query.sh diff --git a/dbms/tests/queries/0_stateless/00602_throw_if.reference b/tests/queries/0_stateless/00602_throw_if.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00602_throw_if.reference rename to tests/queries/0_stateless/00602_throw_if.reference diff --git a/dbms/tests/queries/0_stateless/00602_throw_if.sh b/tests/queries/0_stateless/00602_throw_if.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00602_throw_if.sh rename to tests/queries/0_stateless/00602_throw_if.sh diff --git a/dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference b/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference rename to tests/queries/0_stateless/00603_system_parts_nonexistent_database.reference diff --git a/dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql b/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql rename to tests/queries/0_stateless/00603_system_parts_nonexistent_database.sql diff --git a/dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference b/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference rename to tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql b/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql rename to tests/queries/0_stateless/00604_shard_remote_and_columns_with_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00604_show_create_database.reference b/tests/queries/0_stateless/00604_show_create_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00604_show_create_database.reference rename to tests/queries/0_stateless/00604_show_create_database.reference diff --git a/dbms/tests/queries/0_stateless/00604_show_create_database.sql b/tests/queries/0_stateless/00604_show_create_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00604_show_create_database.sql rename to tests/queries/0_stateless/00604_show_create_database.sql diff --git a/dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference b/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.reference rename to tests/queries/0_stateless/00605_intersections_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql b/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00605_intersections_aggregate_functions.sql rename to tests/queries/0_stateless/00605_intersections_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/00606_quantiles_and_nans.reference b/tests/queries/0_stateless/00606_quantiles_and_nans.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00606_quantiles_and_nans.reference rename to tests/queries/0_stateless/00606_quantiles_and_nans.reference diff --git a/dbms/tests/queries/0_stateless/00606_quantiles_and_nans.sql b/tests/queries/0_stateless/00606_quantiles_and_nans.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00606_quantiles_and_nans.sql rename to tests/queries/0_stateless/00606_quantiles_and_nans.sql diff --git a/dbms/tests/queries/0_stateless/00607_index_in_in.reference b/tests/queries/0_stateless/00607_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00607_index_in_in.reference rename to tests/queries/0_stateless/00607_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00607_index_in_in.sql b/tests/queries/0_stateless/00607_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00607_index_in_in.sql rename to tests/queries/0_stateless/00607_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00608_uniq_array.reference b/tests/queries/0_stateless/00608_uniq_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00608_uniq_array.reference rename to tests/queries/0_stateless/00608_uniq_array.reference diff --git a/dbms/tests/queries/0_stateless/00608_uniq_array.sql b/tests/queries/0_stateless/00608_uniq_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00608_uniq_array.sql rename to tests/queries/0_stateless/00608_uniq_array.sql diff --git a/dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference b/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.reference rename to tests/queries/0_stateless/00609_distributed_with_case_when_then.reference diff --git a/dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql b/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_distributed_with_case_when_then.sql rename to tests/queries/0_stateless/00609_distributed_with_case_when_then.sql diff --git a/dbms/tests/queries/0_stateless/00609_mv_index_in_in.reference b/tests/queries/0_stateless/00609_mv_index_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_mv_index_in_in.reference rename to tests/queries/0_stateless/00609_mv_index_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_mv_index_in_in.sql rename to tests/queries/0_stateless/00609_mv_index_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00609_prewhere_and_default.reference b/tests/queries/0_stateless/00609_prewhere_and_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00609_prewhere_and_default.reference rename to tests/queries/0_stateless/00609_prewhere_and_default.reference diff --git a/dbms/tests/queries/0_stateless/00609_prewhere_and_default.sql b/tests/queries/0_stateless/00609_prewhere_and_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00609_prewhere_and_default.sql rename to tests/queries/0_stateless/00609_prewhere_and_default.sql diff --git a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference b/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference rename to tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.reference diff --git a/dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql b/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql rename to tests/queries/0_stateless/00610_materialized_view_forward_alter_partition_statements.sql diff --git a/dbms/tests/queries/0_stateless/00612_count.reference b/tests/queries/0_stateless/00612_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_count.reference rename to tests/queries/0_stateless/00612_count.reference diff --git a/dbms/tests/queries/0_stateless/00612_count.sql b/tests/queries/0_stateless/00612_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_count.sql rename to tests/queries/0_stateless/00612_count.sql diff --git a/dbms/tests/queries/0_stateless/00612_http_max_query_size.reference b/tests/queries/0_stateless/00612_http_max_query_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_http_max_query_size.reference rename to tests/queries/0_stateless/00612_http_max_query_size.reference diff --git a/dbms/tests/queries/0_stateless/00612_http_max_query_size.sh b/tests/queries/0_stateless/00612_http_max_query_size.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00612_http_max_query_size.sh rename to tests/queries/0_stateless/00612_http_max_query_size.sh diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference b/tests/queries/0_stateless/00612_pk_in_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_pk_in_tuple.reference rename to tests/queries/0_stateless/00612_pk_in_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql b/tests/queries/0_stateless/00612_pk_in_tuple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_pk_in_tuple.sql rename to tests/queries/0_stateless/00612_pk_in_tuple.sql diff --git a/dbms/tests/queries/0_stateless/00612_shard_count.reference b/tests/queries/0_stateless/00612_shard_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_shard_count.reference rename to tests/queries/0_stateless/00612_shard_count.reference diff --git a/dbms/tests/queries/0_stateless/00612_shard_count.sql b/tests/queries/0_stateless/00612_shard_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_shard_count.sql rename to tests/queries/0_stateless/00612_shard_count.sql diff --git a/dbms/tests/queries/0_stateless/00612_union_query_with_subquery.reference b/tests/queries/0_stateless/00612_union_query_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00612_union_query_with_subquery.reference rename to tests/queries/0_stateless/00612_union_query_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00612_union_query_with_subquery.sql b/tests/queries/0_stateless/00612_union_query_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00612_union_query_with_subquery.sql rename to tests/queries/0_stateless/00612_union_query_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference b/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference rename to tests/queries/0_stateless/00613_shard_distributed_max_execution_time.reference diff --git a/dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql b/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql rename to tests/queries/0_stateless/00613_shard_distributed_max_execution_time.sql diff --git a/dbms/tests/queries/0_stateless/00614_array_nullable.reference b/tests/queries/0_stateless/00614_array_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00614_array_nullable.reference rename to tests/queries/0_stateless/00614_array_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00614_array_nullable.sql b/tests/queries/0_stateless/00614_array_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00614_array_nullable.sql rename to tests/queries/0_stateless/00614_array_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference b/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference rename to tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.reference diff --git a/dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql b/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql rename to tests/queries/0_stateless/00614_shard_same_header_for_local_and_remote_node_in_distributed_query.sql diff --git a/dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.reference b/tests/queries/0_stateless/00615_nullable_alter_optimize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.reference rename to tests/queries/0_stateless/00615_nullable_alter_optimize.reference diff --git a/dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.sql b/tests/queries/0_stateless/00615_nullable_alter_optimize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00615_nullable_alter_optimize.sql rename to tests/queries/0_stateless/00615_nullable_alter_optimize.sql diff --git a/dbms/tests/queries/0_stateless/00616_final_single_part.reference b/tests/queries/0_stateless/00616_final_single_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00616_final_single_part.reference rename to tests/queries/0_stateless/00616_final_single_part.reference diff --git a/dbms/tests/queries/0_stateless/00616_final_single_part.sql b/tests/queries/0_stateless/00616_final_single_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00616_final_single_part.sql rename to tests/queries/0_stateless/00616_final_single_part.sql diff --git a/dbms/tests/queries/0_stateless/00617_array_in.reference b/tests/queries/0_stateless/00617_array_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00617_array_in.reference rename to tests/queries/0_stateless/00617_array_in.reference diff --git a/dbms/tests/queries/0_stateless/00617_array_in.sql b/tests/queries/0_stateless/00617_array_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00617_array_in.sql rename to tests/queries/0_stateless/00617_array_in.sql diff --git a/dbms/tests/queries/0_stateless/00618_nullable_in.reference b/tests/queries/0_stateless/00618_nullable_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00618_nullable_in.reference rename to tests/queries/0_stateless/00618_nullable_in.reference diff --git a/dbms/tests/queries/0_stateless/00618_nullable_in.sql b/tests/queries/0_stateless/00618_nullable_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00618_nullable_in.sql rename to tests/queries/0_stateless/00618_nullable_in.sql diff --git a/dbms/tests/queries/0_stateless/00619_extract.reference b/tests/queries/0_stateless/00619_extract.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00619_extract.reference rename to tests/queries/0_stateless/00619_extract.reference diff --git a/dbms/tests/queries/0_stateless/00619_extract.sql b/tests/queries/0_stateless/00619_extract.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00619_extract.sql rename to tests/queries/0_stateless/00619_extract.sql diff --git a/dbms/tests/queries/0_stateless/00619_union_highlite.reference b/tests/queries/0_stateless/00619_union_highlite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00619_union_highlite.reference rename to tests/queries/0_stateless/00619_union_highlite.reference diff --git a/dbms/tests/queries/0_stateless/00619_union_highlite.sql b/tests/queries/0_stateless/00619_union_highlite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00619_union_highlite.sql rename to tests/queries/0_stateless/00619_union_highlite.sql diff --git a/dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference b/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference rename to tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql b/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql rename to tests/queries/0_stateless/00620_optimize_on_nonleader_replica_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00621_regression_for_in_operator.reference b/tests/queries/0_stateless/00621_regression_for_in_operator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00621_regression_for_in_operator.reference rename to tests/queries/0_stateless/00621_regression_for_in_operator.reference diff --git a/dbms/tests/queries/0_stateless/00621_regression_for_in_operator.sql b/tests/queries/0_stateless/00621_regression_for_in_operator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00621_regression_for_in_operator.sql rename to tests/queries/0_stateless/00621_regression_for_in_operator.sql diff --git a/dbms/tests/queries/0_stateless/00622_select_in_parens.reference b/tests/queries/0_stateless/00622_select_in_parens.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00622_select_in_parens.reference rename to tests/queries/0_stateless/00622_select_in_parens.reference diff --git a/dbms/tests/queries/0_stateless/00622_select_in_parens.sql b/tests/queries/0_stateless/00622_select_in_parens.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00622_select_in_parens.sql rename to tests/queries/0_stateless/00622_select_in_parens.sql diff --git a/dbms/tests/queries/0_stateless/00623_in_partition_key.reference b/tests/queries/0_stateless/00623_in_partition_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_in_partition_key.reference rename to tests/queries/0_stateless/00623_in_partition_key.reference diff --git a/dbms/tests/queries/0_stateless/00623_in_partition_key.sql b/tests/queries/0_stateless/00623_in_partition_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_in_partition_key.sql rename to tests/queries/0_stateless/00623_in_partition_key.sql diff --git a/dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table.reference b/tests/queries/0_stateless/00623_truncate_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table.reference rename to tests/queries/0_stateless/00623_truncate_table.reference diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table.sql b/tests/queries/0_stateless/00623_truncate_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table.sql rename to tests/queries/0_stateless/00623_truncate_table.sql diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference b/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.reference rename to tests/queries/0_stateless/00623_truncate_table_throw_exception.reference diff --git a/dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh rename to tests/queries/0_stateless/00623_truncate_table_throw_exception.sh diff --git a/dbms/tests/queries/0_stateless/00624_length_utf8.reference b/tests/queries/0_stateless/00624_length_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00624_length_utf8.reference rename to tests/queries/0_stateless/00624_length_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00624_length_utf8.sql b/tests/queries/0_stateless/00624_length_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00624_length_utf8.sql rename to tests/queries/0_stateless/00624_length_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00625_arrays_in_nested.reference b/tests/queries/0_stateless/00625_arrays_in_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_arrays_in_nested.reference rename to tests/queries/0_stateless/00625_arrays_in_nested.reference diff --git a/dbms/tests/queries/0_stateless/00625_arrays_in_nested.sql b/tests/queries/0_stateless/00625_arrays_in_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00625_arrays_in_nested.sql rename to tests/queries/0_stateless/00625_arrays_in_nested.sql diff --git a/dbms/tests/queries/0_stateless/00625_query_in_form_data.reference b/tests/queries/0_stateless/00625_query_in_form_data.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_query_in_form_data.reference rename to tests/queries/0_stateless/00625_query_in_form_data.reference diff --git a/dbms/tests/queries/0_stateless/00625_query_in_form_data.sh b/tests/queries/0_stateless/00625_query_in_form_data.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00625_query_in_form_data.sh rename to tests/queries/0_stateless/00625_query_in_form_data.sh diff --git a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference b/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.reference rename to tests/queries/0_stateless/00625_summing_merge_tree_merge.reference diff --git a/dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql b/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00625_summing_merge_tree_merge.sql rename to tests/queries/0_stateless/00625_summing_merge_tree_merge.sql diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.reference b/tests/queries/0_stateless/00626_in_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_in_syntax.reference rename to tests/queries/0_stateless/00626_in_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00626_in_syntax.sql b/tests/queries/0_stateless/00626_in_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00626_in_syntax.sql rename to tests/queries/0_stateless/00626_in_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table.reference b/tests/queries/0_stateless/00626_replace_partition_from_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table.reference rename to tests/queries/0_stateless/00626_replace_partition_from_table.reference diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table.sql b/tests/queries/0_stateless/00626_replace_partition_from_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table.sql rename to tests/queries/0_stateless/00626_replace_partition_from_table.sql diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00627_recursive_alias.reference b/tests/queries/0_stateless/00627_recursive_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00627_recursive_alias.reference rename to tests/queries/0_stateless/00627_recursive_alias.reference diff --git a/dbms/tests/queries/0_stateless/00627_recursive_alias.sql b/tests/queries/0_stateless/00627_recursive_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00627_recursive_alias.sql rename to tests/queries/0_stateless/00627_recursive_alias.sql diff --git a/dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference b/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference rename to tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.reference diff --git a/dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql b/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql rename to tests/queries/0_stateless/00628_in_lambda_on_merge_table_bug.sql diff --git a/dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference rename to tests/queries/0_stateless/00630_arbitrary_csv_delimiter.reference diff --git a/dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh rename to tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh diff --git a/dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.reference b/tests/queries/0_stateless/00632_aggregation_window_funnel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.reference rename to tests/queries/0_stateless/00632_aggregation_window_funnel.reference diff --git a/dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.sql b/tests/queries/0_stateless/00632_aggregation_window_funnel.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00632_aggregation_window_funnel.sql rename to tests/queries/0_stateless/00632_aggregation_window_funnel.sql diff --git a/dbms/tests/queries/0_stateless/00632_get_sample_block_cache.reference b/tests/queries/0_stateless/00632_get_sample_block_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00632_get_sample_block_cache.reference rename to tests/queries/0_stateless/00632_get_sample_block_cache.reference diff --git a/dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql b/tests/queries/0_stateless/00632_get_sample_block_cache.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00632_get_sample_block_cache.sql rename to tests/queries/0_stateless/00632_get_sample_block_cache.sql diff --git a/dbms/tests/queries/0_stateless/00633_func_or_in.reference b/tests/queries/0_stateless/00633_func_or_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00633_func_or_in.reference rename to tests/queries/0_stateless/00633_func_or_in.reference diff --git a/dbms/tests/queries/0_stateless/00633_func_or_in.sql b/tests/queries/0_stateless/00633_func_or_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00633_func_or_in.sql rename to tests/queries/0_stateless/00633_func_or_in.sql diff --git a/dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference rename to tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh rename to tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.reference b/tests/queries/0_stateless/00634_logging_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_logging_shard.reference rename to tests/queries/0_stateless/00634_logging_shard.reference diff --git a/dbms/tests/queries/0_stateless/00634_logging_shard.sh b/tests/queries/0_stateless/00634_logging_shard.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00634_logging_shard.sh rename to tests/queries/0_stateless/00634_logging_shard.sh diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference b/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.reference rename to tests/queries/0_stateless/00634_performance_introspection_and_logging.reference diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh rename to tests/queries/0_stateless/00634_performance_introspection_and_logging.sh diff --git a/dbms/tests/queries/0_stateless/00634_rename_view.reference b/tests/queries/0_stateless/00634_rename_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00634_rename_view.reference rename to tests/queries/0_stateless/00634_rename_view.reference diff --git a/dbms/tests/queries/0_stateless/00634_rename_view.sql b/tests/queries/0_stateless/00634_rename_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00634_rename_view.sql rename to tests/queries/0_stateless/00634_rename_view.sql diff --git a/dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.reference b/tests/queries/0_stateless/00635_shard_distinct_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.reference rename to tests/queries/0_stateless/00635_shard_distinct_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.sql b/tests/queries/0_stateless/00635_shard_distinct_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00635_shard_distinct_order_by.sql rename to tests/queries/0_stateless/00635_shard_distinct_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference b/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.reference rename to tests/queries/0_stateless/00636_partition_key_parts_pruning.reference diff --git a/dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh rename to tests/queries/0_stateless/00636_partition_key_parts_pruning.sh diff --git a/dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference rename to tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.reference diff --git a/dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh rename to tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh diff --git a/dbms/tests/queries/0_stateless/00638_remote_ssrf.reference b/tests/queries/0_stateless/00638_remote_ssrf.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00638_remote_ssrf.reference rename to tests/queries/0_stateless/00638_remote_ssrf.reference diff --git a/dbms/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled b/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00638_remote_ssrf.sh.disabled rename to tests/queries/0_stateless/00638_remote_ssrf.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00639_startsWith.reference b/tests/queries/0_stateless/00639_startsWith.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00639_startsWith.reference rename to tests/queries/0_stateless/00639_startsWith.reference diff --git a/dbms/tests/queries/0_stateless/00639_startsWith.sql b/tests/queries/0_stateless/00639_startsWith.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00639_startsWith.sql rename to tests/queries/0_stateless/00639_startsWith.sql diff --git a/dbms/tests/queries/0_stateless/00640_endsWith.reference b/tests/queries/0_stateless/00640_endsWith.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00640_endsWith.reference rename to tests/queries/0_stateless/00640_endsWith.reference diff --git a/dbms/tests/queries/0_stateless/00640_endsWith.sql b/tests/queries/0_stateless/00640_endsWith.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00640_endsWith.sql rename to tests/queries/0_stateless/00640_endsWith.sql diff --git a/dbms/tests/queries/0_stateless/00642_cast.reference b/tests/queries/0_stateless/00642_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00642_cast.reference rename to tests/queries/0_stateless/00642_cast.reference diff --git a/dbms/tests/queries/0_stateless/00642_cast.sql b/tests/queries/0_stateless/00642_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00642_cast.sql rename to tests/queries/0_stateless/00642_cast.sql diff --git a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.reference b/tests/queries/0_stateless/00643_cast_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00643_cast_zookeeper.reference rename to tests/queries/0_stateless/00643_cast_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql b/tests/queries/0_stateless/00643_cast_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00643_cast_zookeeper.sql rename to tests/queries/0_stateless/00643_cast_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference b/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference rename to tests/queries/0_stateless/00644_different_expressions_with_same_alias.reference diff --git a/dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql b/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql rename to tests/queries/0_stateless/00644_different_expressions_with_same_alias.sql diff --git a/dbms/tests/queries/0_stateless/00645_date_time_input_format.reference b/tests/queries/0_stateless/00645_date_time_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00645_date_time_input_format.reference rename to tests/queries/0_stateless/00645_date_time_input_format.reference diff --git a/dbms/tests/queries/0_stateless/00645_date_time_input_format.sql b/tests/queries/0_stateless/00645_date_time_input_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00645_date_time_input_format.sql rename to tests/queries/0_stateless/00645_date_time_input_format.sql diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.python b/tests/queries/0_stateless/00646_url_engine.python similarity index 100% rename from dbms/tests/queries/0_stateless/00646_url_engine.python rename to tests/queries/0_stateless/00646_url_engine.python diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.reference b/tests/queries/0_stateless/00646_url_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00646_url_engine.reference rename to tests/queries/0_stateless/00646_url_engine.reference diff --git a/dbms/tests/queries/0_stateless/00646_url_engine.sh b/tests/queries/0_stateless/00646_url_engine.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00646_url_engine.sh rename to tests/queries/0_stateless/00646_url_engine.sh diff --git a/dbms/tests/queries/0_stateless/00646_weird_mmx.reference b/tests/queries/0_stateless/00646_weird_mmx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00646_weird_mmx.reference rename to tests/queries/0_stateless/00646_weird_mmx.reference diff --git a/dbms/tests/queries/0_stateless/00646_weird_mmx.sql b/tests/queries/0_stateless/00646_weird_mmx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00646_weird_mmx.sql rename to tests/queries/0_stateless/00646_weird_mmx.sql diff --git a/dbms/tests/queries/0_stateless/00647_histogram.reference b/tests/queries/0_stateless/00647_histogram.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_histogram.reference rename to tests/queries/0_stateless/00647_histogram.reference diff --git a/dbms/tests/queries/0_stateless/00647_histogram.sql b/tests/queries/0_stateless/00647_histogram.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_histogram.sql rename to tests/queries/0_stateless/00647_histogram.sql diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference b/tests/queries/0_stateless/00647_multiply_aggregation_state.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.reference rename to tests/queries/0_stateless/00647_multiply_aggregation_state.reference diff --git a/dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql b/tests/queries/0_stateless/00647_multiply_aggregation_state.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_multiply_aggregation_state.sql rename to tests/queries/0_stateless/00647_multiply_aggregation_state.sql diff --git a/dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.reference b/tests/queries/0_stateless/00647_select_numbers_with_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.reference rename to tests/queries/0_stateless/00647_select_numbers_with_offset.reference diff --git a/dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.sql b/tests/queries/0_stateless/00647_select_numbers_with_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00647_select_numbers_with_offset.sql rename to tests/queries/0_stateless/00647_select_numbers_with_offset.sql diff --git a/dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference b/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference rename to tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql b/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql rename to tests/queries/0_stateless/00648_replacing_empty_set_from_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference b/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.reference rename to tests/queries/0_stateless/00649_quantile_tdigest_negative.reference diff --git a/dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql b/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00649_quantile_tdigest_negative.sql rename to tests/queries/0_stateless/00649_quantile_tdigest_negative.sql diff --git a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference b/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference rename to tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql b/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql rename to tests/queries/0_stateless/00650_array_enumerate_uniq_with_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference rename to tests/queries/0_stateless/00650_csv_with_specified_quote_rule.reference diff --git a/dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh rename to tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference rename to tests/queries/0_stateless/00651_default_database_on_client_reconnect.reference diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh rename to tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference b/tests/queries/0_stateless/00652_mergetree_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference rename to tests/queries/0_stateless/00652_mergetree_mutations.reference diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh rename to tests/queries/0_stateless/00652_mergetree_mutations.sh diff --git a/dbms/tests/queries/0_stateless/00652_mutations_alter_update.reference b/tests/queries/0_stateless/00652_mutations_alter_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_alter_update.reference rename to tests/queries/0_stateless/00652_mutations_alter_update.reference diff --git a/dbms/tests/queries/0_stateless/00652_mutations_alter_update.sh b/tests/queries/0_stateless/00652_mutations_alter_update.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_alter_update.sh rename to tests/queries/0_stateless/00652_mutations_alter_update.sh diff --git a/dbms/tests/queries/0_stateless/00652_mutations_default_database.reference b/tests/queries/0_stateless/00652_mutations_default_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_default_database.reference rename to tests/queries/0_stateless/00652_mutations_default_database.reference diff --git a/dbms/tests/queries/0_stateless/00652_mutations_default_database.sh b/tests/queries/0_stateless/00652_mutations_default_database.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_mutations_default_database.sh rename to tests/queries/0_stateless/00652_mutations_default_database.sh diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference rename to tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh rename to tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference rename to tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh rename to tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.reference b/tests/queries/0_stateless/00653_monotonic_integer_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.reference rename to tests/queries/0_stateless/00653_monotonic_integer_cast.reference diff --git a/dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.sql b/tests/queries/0_stateless/00653_monotonic_integer_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00653_monotonic_integer_cast.sql rename to tests/queries/0_stateless/00653_monotonic_integer_cast.sql diff --git a/dbms/tests/queries/0_stateless/00653_running_difference.reference b/tests/queries/0_stateless/00653_running_difference.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_running_difference.reference rename to tests/queries/0_stateless/00653_running_difference.reference diff --git a/dbms/tests/queries/0_stateless/00653_running_difference.sql b/tests/queries/0_stateless/00653_running_difference.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00653_running_difference.sql rename to tests/queries/0_stateless/00653_running_difference.sql diff --git a/dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference b/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.reference rename to tests/queries/0_stateless/00653_verification_monotonic_data_load.reference diff --git a/dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh rename to tests/queries/0_stateless/00653_verification_monotonic_data_load.sh diff --git a/dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.reference b/tests/queries/0_stateless/00660_optimize_final_without_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.reference rename to tests/queries/0_stateless/00660_optimize_final_without_partition.reference diff --git a/dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.sql b/tests/queries/0_stateless/00660_optimize_final_without_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00660_optimize_final_without_partition.sql rename to tests/queries/0_stateless/00660_optimize_final_without_partition.sql diff --git a/dbms/tests/queries/0_stateless/00661_array_has_silviucpp.reference b/tests/queries/0_stateless/00661_array_has_silviucpp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00661_array_has_silviucpp.reference rename to tests/queries/0_stateless/00661_array_has_silviucpp.reference diff --git a/dbms/tests/queries/0_stateless/00661_array_has_silviucpp.sql b/tests/queries/0_stateless/00661_array_has_silviucpp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00661_array_has_silviucpp.sql rename to tests/queries/0_stateless/00661_array_has_silviucpp.sql diff --git a/dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference b/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference rename to tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql b/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql rename to tests/queries/0_stateless/00661_optimize_final_replicated_without_partition_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00662_array_has_nullable.reference b/tests/queries/0_stateless/00662_array_has_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00662_array_has_nullable.reference rename to tests/queries/0_stateless/00662_array_has_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00662_array_has_nullable.sql b/tests/queries/0_stateless/00662_array_has_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00662_array_has_nullable.sql rename to tests/queries/0_stateless/00662_array_has_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference b/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.reference rename to tests/queries/0_stateless/00663_tiny_log_empty_insert.reference diff --git a/dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql b/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00663_tiny_log_empty_insert.sql rename to tests/queries/0_stateless/00663_tiny_log_empty_insert.sql diff --git a/dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference b/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference rename to tests/queries/0_stateless/00664_cast_from_string_to_nullable.reference diff --git a/dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql b/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql rename to tests/queries/0_stateless/00664_cast_from_string_to_nullable.sql diff --git a/dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference b/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference rename to tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.reference diff --git a/dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql b/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql rename to tests/queries/0_stateless/00665_alter_nullable_string_to_nullable_uint8.sql diff --git a/dbms/tests/queries/0_stateless/00666_uniq_complex_types.reference b/tests/queries/0_stateless/00666_uniq_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00666_uniq_complex_types.reference rename to tests/queries/0_stateless/00666_uniq_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00666_uniq_complex_types.sql b/tests/queries/0_stateless/00666_uniq_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00666_uniq_complex_types.sql rename to tests/queries/0_stateless/00666_uniq_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference b/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference rename to tests/queries/0_stateless/00667_compare_arrays_of_different_types.reference diff --git a/dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql b/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql rename to tests/queries/0_stateless/00667_compare_arrays_of_different_types.sql diff --git a/dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference b/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference rename to tests/queries/0_stateless/00668_compare_arrays_silviucpp.reference diff --git a/dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql b/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql rename to tests/queries/0_stateless/00668_compare_arrays_silviucpp.sql diff --git a/dbms/tests/queries/0_stateless/00670_truncate_temporary_table.reference b/tests/queries/0_stateless/00670_truncate_temporary_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00670_truncate_temporary_table.reference rename to tests/queries/0_stateless/00670_truncate_temporary_table.reference diff --git a/dbms/tests/queries/0_stateless/00670_truncate_temporary_table.sql b/tests/queries/0_stateless/00670_truncate_temporary_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00670_truncate_temporary_table.sql rename to tests/queries/0_stateless/00670_truncate_temporary_table.sql diff --git a/dbms/tests/queries/0_stateless/00671_max_intersections.reference b/tests/queries/0_stateless/00671_max_intersections.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00671_max_intersections.reference rename to tests/queries/0_stateless/00671_max_intersections.reference diff --git a/dbms/tests/queries/0_stateless/00671_max_intersections.sql b/tests/queries/0_stateless/00671_max_intersections.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00671_max_intersections.sql rename to tests/queries/0_stateless/00671_max_intersections.sql diff --git a/dbms/tests/queries/0_stateless/00672_arrayDistinct.reference b/tests/queries/0_stateless/00672_arrayDistinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00672_arrayDistinct.reference rename to tests/queries/0_stateless/00672_arrayDistinct.reference diff --git a/dbms/tests/queries/0_stateless/00672_arrayDistinct.sql b/tests/queries/0_stateless/00672_arrayDistinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00672_arrayDistinct.sql rename to tests/queries/0_stateless/00672_arrayDistinct.sql diff --git a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference rename to tests/queries/0_stateless/00673_subquery_prepared_set_performance.reference diff --git a/dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql rename to tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.reference b/tests/queries/0_stateless/00674_has_array_enum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00674_has_array_enum.reference rename to tests/queries/0_stateless/00674_has_array_enum.reference diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.sql b/tests/queries/0_stateless/00674_has_array_enum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00674_has_array_enum.sql rename to tests/queries/0_stateless/00674_has_array_enum.sql diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.reference b/tests/queries/0_stateless/00674_join_on_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00674_join_on_syntax.reference rename to tests/queries/0_stateless/00674_join_on_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.sql b/tests/queries/0_stateless/00674_join_on_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00674_join_on_syntax.sql rename to tests/queries/0_stateless/00674_join_on_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference b/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference rename to tests/queries/0_stateless/00675_shard_remote_with_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql b/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql rename to tests/queries/0_stateless/00675_shard_remote_with_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.reference b/tests/queries/0_stateless/00676_group_by_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00676_group_by_in.reference rename to tests/queries/0_stateless/00676_group_by_in.reference diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.sql b/tests/queries/0_stateless/00676_group_by_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00676_group_by_in.sql rename to tests/queries/0_stateless/00676_group_by_in.sql diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference b/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference rename to tests/queries/0_stateless/00677_shard_any_heavy_merge.reference diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql b/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql rename to tests/queries/0_stateless/00677_shard_any_heavy_merge.sql diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.reference b/tests/queries/0_stateless/00678_murmurhash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00678_murmurhash.reference rename to tests/queries/0_stateless/00678_murmurhash.reference diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.sql b/tests/queries/0_stateless/00678_murmurhash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00678_murmurhash.sql rename to tests/queries/0_stateless/00678_murmurhash.sql diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference b/tests/queries/0_stateless/00678_shard_funnel_window.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference rename to tests/queries/0_stateless/00678_shard_funnel_window.reference diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql b/tests/queries/0_stateless/00678_shard_funnel_window.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql rename to tests/queries/0_stateless/00678_shard_funnel_window.sql diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.reference b/tests/queries/0_stateless/00679_replace_asterisk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00679_replace_asterisk.reference rename to tests/queries/0_stateless/00679_replace_asterisk.reference diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql b/tests/queries/0_stateless/00679_replace_asterisk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00679_replace_asterisk.sql rename to tests/queries/0_stateless/00679_replace_asterisk.sql diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.reference b/tests/queries/0_stateless/00679_uuid_in_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00679_uuid_in_key.reference rename to tests/queries/0_stateless/00679_uuid_in_key.reference diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.sql b/tests/queries/0_stateless/00679_uuid_in_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00679_uuid_in_key.sql rename to tests/queries/0_stateless/00679_uuid_in_key.sql diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference b/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference rename to tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql b/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql rename to tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference b/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference rename to tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql b/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql rename to tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference b/tests/queries/0_stateless/00682_empty_parts_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference rename to tests/queries/0_stateless/00682_empty_parts_merge.reference diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh b/tests/queries/0_stateless/00682_empty_parts_merge.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh rename to tests/queries/0_stateless/00682_empty_parts_merge.sh diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference b/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference rename to tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql b/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql rename to tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.reference b/tests/queries/0_stateless/00686_client_exit_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00686_client_exit_code.reference rename to tests/queries/0_stateless/00686_client_exit_code.reference diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00686_client_exit_code.sh rename to tests/queries/0_stateless/00686_client_exit_code.sh diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.reference b/tests/queries/0_stateless/00687_insert_into_mv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00687_insert_into_mv.reference rename to tests/queries/0_stateless/00687_insert_into_mv.reference diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.sql b/tests/queries/0_stateless/00687_insert_into_mv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00687_insert_into_mv.sql rename to tests/queries/0_stateless/00687_insert_into_mv.sql diff --git a/dbms/tests/queries/0_stateless/00687_top_and_offset.reference b/tests/queries/0_stateless/00687_top_and_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00687_top_and_offset.reference rename to tests/queries/0_stateless/00687_top_and_offset.reference diff --git a/dbms/tests/queries/0_stateless/00687_top_and_offset.sh b/tests/queries/0_stateless/00687_top_and_offset.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00687_top_and_offset.sh rename to tests/queries/0_stateless/00687_top_and_offset.sh diff --git a/dbms/tests/queries/0_stateless/00688_aggregation_retention.reference b/tests/queries/0_stateless/00688_aggregation_retention.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_aggregation_retention.reference rename to tests/queries/0_stateless/00688_aggregation_retention.reference diff --git a/dbms/tests/queries/0_stateless/00688_aggregation_retention.sql b/tests/queries/0_stateless/00688_aggregation_retention.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_aggregation_retention.sql rename to tests/queries/0_stateless/00688_aggregation_retention.sql diff --git a/dbms/tests/queries/0_stateless/00688_case_without_else.reference b/tests/queries/0_stateless/00688_case_without_else.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_case_without_else.reference rename to tests/queries/0_stateless/00688_case_without_else.reference diff --git a/dbms/tests/queries/0_stateless/00688_case_without_else.sql b/tests/queries/0_stateless/00688_case_without_else.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_case_without_else.sql rename to tests/queries/0_stateless/00688_case_without_else.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference b/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference rename to tests/queries/0_stateless/00688_low_cardinality_alter_add_column.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql b/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql rename to tests/queries/0_stateless/00688_low_cardinality_alter_add_column.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.reference b/tests/queries/0_stateless/00688_low_cardinality_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.reference rename to tests/queries/0_stateless/00688_low_cardinality_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.sql b/tests/queries/0_stateless/00688_low_cardinality_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_defaults.sql rename to tests/queries/0_stateless/00688_low_cardinality_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference rename to tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql rename to tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.reference b/tests/queries/0_stateless/00688_low_cardinality_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_in.reference rename to tests/queries/0_stateless/00688_low_cardinality_in.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql b/tests/queries/0_stateless/00688_low_cardinality_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_in.sql rename to tests/queries/0_stateless/00688_low_cardinality_in.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference b/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference rename to tests/queries/0_stateless/00688_low_cardinality_nullable_cast.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql b/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql rename to tests/queries/0_stateless/00688_low_cardinality_nullable_cast.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference b/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.reference rename to tests/queries/0_stateless/00688_low_cardinality_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql b/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql rename to tests/queries/0_stateless/00688_low_cardinality_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.reference b/tests/queries/0_stateless/00688_low_cardinality_serialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.reference rename to tests/queries/0_stateless/00688_low_cardinality_serialization.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.sql b/tests/queries/0_stateless/00688_low_cardinality_serialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_serialization.sql rename to tests/queries/0_stateless/00688_low_cardinality_serialization.sql diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.reference b/tests/queries/0_stateless/00688_low_cardinality_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.reference rename to tests/queries/0_stateless/00688_low_cardinality_syntax.reference diff --git a/dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.sql b/tests/queries/0_stateless/00688_low_cardinality_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00688_low_cardinality_syntax.sql rename to tests/queries/0_stateless/00688_low_cardinality_syntax.sql diff --git a/dbms/tests/queries/0_stateless/00689_file.txt b/tests/queries/0_stateless/00689_file.txt similarity index 100% rename from dbms/tests/queries/0_stateless/00689_file.txt rename to tests/queries/0_stateless/00689_file.txt diff --git a/dbms/tests/queries/0_stateless/00689_join_table_function.reference b/tests/queries/0_stateless/00689_join_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00689_join_table_function.reference rename to tests/queries/0_stateless/00689_join_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00689_join_table_function.sql b/tests/queries/0_stateless/00689_join_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00689_join_table_function.sql rename to tests/queries/0_stateless/00689_join_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference rename to tests/queries/0_stateless/00690_insert_select_converting_exception_message.reference diff --git a/dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh rename to tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh diff --git a/dbms/tests/queries/0_stateless/00691_array_distinct.reference b/tests/queries/0_stateless/00691_array_distinct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00691_array_distinct.reference rename to tests/queries/0_stateless/00691_array_distinct.reference diff --git a/dbms/tests/queries/0_stateless/00691_array_distinct.sql b/tests/queries/0_stateless/00691_array_distinct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00691_array_distinct.sql rename to tests/queries/0_stateless/00691_array_distinct.sql diff --git a/dbms/tests/queries/0_stateless/00692_if_exception_code.reference b/tests/queries/0_stateless/00692_if_exception_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00692_if_exception_code.reference rename to tests/queries/0_stateless/00692_if_exception_code.reference diff --git a/dbms/tests/queries/0_stateless/00692_if_exception_code.sql b/tests/queries/0_stateless/00692_if_exception_code.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00692_if_exception_code.sql rename to tests/queries/0_stateless/00692_if_exception_code.sql diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference b/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference rename to tests/queries/0_stateless/00693_max_block_size_system_tables_columns.reference diff --git a/dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql b/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql rename to tests/queries/0_stateless/00693_max_block_size_system_tables_columns.sql diff --git a/dbms/tests/queries/0_stateless/00694_max_block_size_zero.reference b/tests/queries/0_stateless/00694_max_block_size_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00694_max_block_size_zero.reference rename to tests/queries/0_stateless/00694_max_block_size_zero.reference diff --git a/dbms/tests/queries/0_stateless/00694_max_block_size_zero.sql b/tests/queries/0_stateless/00694_max_block_size_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00694_max_block_size_zero.sql rename to tests/queries/0_stateless/00694_max_block_size_zero.sql diff --git a/dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference rename to tests/queries/0_stateless/00695_pretty_max_column_pad_width.reference diff --git a/dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql b/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql rename to tests/queries/0_stateless/00695_pretty_max_column_pad_width.sql diff --git a/dbms/tests/queries/0_stateless/00696_system_columns_limit.reference b/tests/queries/0_stateless/00696_system_columns_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00696_system_columns_limit.reference rename to tests/queries/0_stateless/00696_system_columns_limit.reference diff --git a/dbms/tests/queries/0_stateless/00696_system_columns_limit.sql b/tests/queries/0_stateless/00696_system_columns_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00696_system_columns_limit.sql rename to tests/queries/0_stateless/00696_system_columns_limit.sql diff --git a/dbms/tests/queries/0_stateless/00697_in_subquery_shard.reference b/tests/queries/0_stateless/00697_in_subquery_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00697_in_subquery_shard.reference rename to tests/queries/0_stateless/00697_in_subquery_shard.reference diff --git a/dbms/tests/queries/0_stateless/00697_in_subquery_shard.sql b/tests/queries/0_stateless/00697_in_subquery_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00697_in_subquery_shard.sql rename to tests/queries/0_stateless/00697_in_subquery_shard.sql diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested.reference diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested.sql diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.reference diff --git a/dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql b/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql rename to tests/queries/0_stateless/00698_validate_array_sizes_for_nested_kshvakov.sql diff --git a/dbms/tests/queries/0_stateless/00699_materialized_view_mutations.reference b/tests/queries/0_stateless/00699_materialized_view_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00699_materialized_view_mutations.reference rename to tests/queries/0_stateless/00699_materialized_view_mutations.reference diff --git a/dbms/tests/queries/0_stateless/00699_materialized_view_mutations.sh b/tests/queries/0_stateless/00699_materialized_view_mutations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00699_materialized_view_mutations.sh rename to tests/queries/0_stateless/00699_materialized_view_mutations.sh diff --git a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.reference b/tests/queries/0_stateless/00700_decimal_aggregates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_aggregates.reference rename to tests/queries/0_stateless/00700_decimal_aggregates.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql b/tests/queries/0_stateless/00700_decimal_aggregates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_aggregates.sql rename to tests/queries/0_stateless/00700_decimal_aggregates.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_arithm.reference b/tests/queries/0_stateless/00700_decimal_arithm.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_arithm.reference rename to tests/queries/0_stateless/00700_decimal_arithm.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_arithm.sql b/tests/queries/0_stateless/00700_decimal_arithm.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_arithm.sql rename to tests/queries/0_stateless/00700_decimal_arithm.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference b/tests/queries/0_stateless/00700_decimal_array_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_array_functions.reference rename to tests/queries/0_stateless/00700_decimal_array_functions.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql b/tests/queries/0_stateless/00700_decimal_array_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_array_functions.sql rename to tests/queries/0_stateless/00700_decimal_array_functions.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_bounds.reference b/tests/queries/0_stateless/00700_decimal_bounds.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_bounds.reference rename to tests/queries/0_stateless/00700_decimal_bounds.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_bounds.sql b/tests/queries/0_stateless/00700_decimal_bounds.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_bounds.sql rename to tests/queries/0_stateless/00700_decimal_bounds.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_casts.reference b/tests/queries/0_stateless/00700_decimal_casts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_casts.reference rename to tests/queries/0_stateless/00700_decimal_casts.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_casts.sql b/tests/queries/0_stateless/00700_decimal_casts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_casts.sql rename to tests/queries/0_stateless/00700_decimal_casts.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_compare.reference b/tests/queries/0_stateless/00700_decimal_compare.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_compare.reference rename to tests/queries/0_stateless/00700_decimal_compare.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_compare.sql b/tests/queries/0_stateless/00700_decimal_compare.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_compare.sql rename to tests/queries/0_stateless/00700_decimal_compare.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.reference b/tests/queries/0_stateless/00700_decimal_complex_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_complex_types.reference rename to tests/queries/0_stateless/00700_decimal_complex_types.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql b/tests/queries/0_stateless/00700_decimal_complex_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_complex_types.sql rename to tests/queries/0_stateless/00700_decimal_complex_types.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_defaults.reference b/tests/queries/0_stateless/00700_decimal_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_defaults.reference rename to tests/queries/0_stateless/00700_decimal_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_defaults.sql b/tests/queries/0_stateless/00700_decimal_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_defaults.sql rename to tests/queries/0_stateless/00700_decimal_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference b/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.reference rename to tests/queries/0_stateless/00700_decimal_empty_aggregates.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql b/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_empty_aggregates.sql rename to tests/queries/0_stateless/00700_decimal_empty_aggregates.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_formats.reference b/tests/queries/0_stateless/00700_decimal_formats.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_formats.reference rename to tests/queries/0_stateless/00700_decimal_formats.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_formats.sql b/tests/queries/0_stateless/00700_decimal_formats.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_formats.sql rename to tests/queries/0_stateless/00700_decimal_formats.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.reference b/tests/queries/0_stateless/00700_decimal_gathers.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_gathers.reference rename to tests/queries/0_stateless/00700_decimal_gathers.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_gathers.sql b/tests/queries/0_stateless/00700_decimal_gathers.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_gathers.sql rename to tests/queries/0_stateless/00700_decimal_gathers.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.reference b/tests/queries/0_stateless/00700_decimal_in_keys.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_in_keys.reference rename to tests/queries/0_stateless/00700_decimal_in_keys.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql b/tests/queries/0_stateless/00700_decimal_in_keys.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_in_keys.sql rename to tests/queries/0_stateless/00700_decimal_in_keys.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_math.reference b/tests/queries/0_stateless/00700_decimal_math.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_math.reference rename to tests/queries/0_stateless/00700_decimal_math.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_math.sql b/tests/queries/0_stateless/00700_decimal_math.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_math.sql rename to tests/queries/0_stateless/00700_decimal_math.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_null.reference b/tests/queries/0_stateless/00700_decimal_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_null.reference rename to tests/queries/0_stateless/00700_decimal_null.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_null.sql b/tests/queries/0_stateless/00700_decimal_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_null.sql rename to tests/queries/0_stateless/00700_decimal_null.sql diff --git a/dbms/tests/queries/0_stateless/00700_decimal_round.reference b/tests/queries/0_stateless/00700_decimal_round.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_round.reference rename to tests/queries/0_stateless/00700_decimal_round.reference diff --git a/dbms/tests/queries/0_stateless/00700_decimal_round.sql b/tests/queries/0_stateless/00700_decimal_round.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_decimal_round.sql rename to tests/queries/0_stateless/00700_decimal_round.sql diff --git a/dbms/tests/queries/0_stateless/00700_to_decimal_or_something.reference b/tests/queries/0_stateless/00700_to_decimal_or_something.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00700_to_decimal_or_something.reference rename to tests/queries/0_stateless/00700_to_decimal_or_something.reference diff --git a/dbms/tests/queries/0_stateless/00700_to_decimal_or_something.sql b/tests/queries/0_stateless/00700_to_decimal_or_something.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00700_to_decimal_or_something.sql rename to tests/queries/0_stateless/00700_to_decimal_or_something.sql diff --git a/dbms/tests/queries/0_stateless/00701_context_use_after_free.reference b/tests/queries/0_stateless/00701_context_use_after_free.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_context_use_after_free.reference rename to tests/queries/0_stateless/00701_context_use_after_free.reference diff --git a/dbms/tests/queries/0_stateless/00701_context_use_after_free.sql b/tests/queries/0_stateless/00701_context_use_after_free.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_context_use_after_free.sql rename to tests/queries/0_stateless/00701_context_use_after_free.sql diff --git a/dbms/tests/queries/0_stateless/00701_join_default_strictness.reference b/tests/queries/0_stateless/00701_join_default_strictness.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_join_default_strictness.reference rename to tests/queries/0_stateless/00701_join_default_strictness.reference diff --git a/dbms/tests/queries/0_stateless/00701_join_default_strictness.sql b/tests/queries/0_stateless/00701_join_default_strictness.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_join_default_strictness.sql rename to tests/queries/0_stateless/00701_join_default_strictness.sql diff --git a/dbms/tests/queries/0_stateless/00701_rollup.reference b/tests/queries/0_stateless/00701_rollup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00701_rollup.reference rename to tests/queries/0_stateless/00701_rollup.reference diff --git a/dbms/tests/queries/0_stateless/00701_rollup.sql b/tests/queries/0_stateless/00701_rollup.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00701_rollup.sql rename to tests/queries/0_stateless/00701_rollup.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_on_dups.reference b/tests/queries/0_stateless/00702_join_on_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_on_dups.reference rename to tests/queries/0_stateless/00702_join_on_dups.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_on_dups.sql b/tests/queries/0_stateless/00702_join_on_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_on_dups.sql rename to tests/queries/0_stateless/00702_join_on_dups.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using.reference b/tests/queries/0_stateless/00702_join_with_using.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using.reference rename to tests/queries/0_stateless/00702_join_with_using.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using.sql b/tests/queries/0_stateless/00702_join_with_using.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using.sql rename to tests/queries/0_stateless/00702_join_with_using.sql diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using_dups.reference b/tests/queries/0_stateless/00702_join_with_using_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using_dups.reference rename to tests/queries/0_stateless/00702_join_with_using_dups.reference diff --git a/dbms/tests/queries/0_stateless/00702_join_with_using_dups.sql b/tests/queries/0_stateless/00702_join_with_using_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_join_with_using_dups.sql rename to tests/queries/0_stateless/00702_join_with_using_dups.sql diff --git a/dbms/tests/queries/0_stateless/00702_where_with_quailified_names.reference b/tests/queries/0_stateless/00702_where_with_quailified_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00702_where_with_quailified_names.reference rename to tests/queries/0_stateless/00702_where_with_quailified_names.reference diff --git a/dbms/tests/queries/0_stateless/00702_where_with_quailified_names.sql b/tests/queries/0_stateless/00702_where_with_quailified_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00702_where_with_quailified_names.sql rename to tests/queries/0_stateless/00702_where_with_quailified_names.sql diff --git a/dbms/tests/queries/0_stateless/00703_join_crash.reference b/tests/queries/0_stateless/00703_join_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00703_join_crash.reference rename to tests/queries/0_stateless/00703_join_crash.reference diff --git a/dbms/tests/queries/0_stateless/00703_join_crash.sql b/tests/queries/0_stateless/00703_join_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00703_join_crash.sql rename to tests/queries/0_stateless/00703_join_crash.sql diff --git a/dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference b/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference rename to tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.reference diff --git a/dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql b/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql rename to tests/queries/0_stateless/00704_arrayCumSumLimited_arrayDifference.sql diff --git a/dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference b/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.reference rename to tests/queries/0_stateless/00704_drop_truncate_memory_table.reference diff --git a/dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh rename to tests/queries/0_stateless/00704_drop_truncate_memory_table.sh diff --git a/dbms/tests/queries/0_stateless/00705_aggregate_states_addition.reference b/tests/queries/0_stateless/00705_aggregate_states_addition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00705_aggregate_states_addition.reference rename to tests/queries/0_stateless/00705_aggregate_states_addition.reference diff --git a/dbms/tests/queries/0_stateless/00705_aggregate_states_addition.sql b/tests/queries/0_stateless/00705_aggregate_states_addition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00705_aggregate_states_addition.sql rename to tests/queries/0_stateless/00705_aggregate_states_addition.sql diff --git a/dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.reference b/tests/queries/0_stateless/00705_drop_create_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.reference rename to tests/queries/0_stateless/00705_drop_create_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00705_drop_create_merge_tree.sh rename to tests/queries/0_stateless/00705_drop_create_merge_tree.sh diff --git a/dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference b/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference rename to tests/queries/0_stateless/00706_iso_week_and_day_of_year.reference diff --git a/dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql b/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql rename to tests/queries/0_stateless/00706_iso_week_and_day_of_year.sql diff --git a/dbms/tests/queries/0_stateless/00707_float_csv_delimiter.reference b/tests/queries/0_stateless/00707_float_csv_delimiter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00707_float_csv_delimiter.reference rename to tests/queries/0_stateless/00707_float_csv_delimiter.reference diff --git a/dbms/tests/queries/0_stateless/00707_float_csv_delimiter.sql b/tests/queries/0_stateless/00707_float_csv_delimiter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00707_float_csv_delimiter.sql rename to tests/queries/0_stateless/00707_float_csv_delimiter.sql diff --git a/dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.reference b/tests/queries/0_stateless/00709_virtual_column_partition_id.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.reference rename to tests/queries/0_stateless/00709_virtual_column_partition_id.reference diff --git a/dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.sql b/tests/queries/0_stateless/00709_virtual_column_partition_id.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00709_virtual_column_partition_id.sql rename to tests/queries/0_stateless/00709_virtual_column_partition_id.sql diff --git a/dbms/tests/queries/0_stateless/00710_array_enumerate_dense.reference b/tests/queries/0_stateless/00710_array_enumerate_dense.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00710_array_enumerate_dense.reference rename to tests/queries/0_stateless/00710_array_enumerate_dense.reference diff --git a/dbms/tests/queries/0_stateless/00710_array_enumerate_dense.sql b/tests/queries/0_stateless/00710_array_enumerate_dense.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00710_array_enumerate_dense.sql rename to tests/queries/0_stateless/00710_array_enumerate_dense.sql diff --git a/dbms/tests/queries/0_stateless/00711_array_enumerate_variants.reference b/tests/queries/0_stateless/00711_array_enumerate_variants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00711_array_enumerate_variants.reference rename to tests/queries/0_stateless/00711_array_enumerate_variants.reference diff --git a/dbms/tests/queries/0_stateless/00711_array_enumerate_variants.sql b/tests/queries/0_stateless/00711_array_enumerate_variants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00711_array_enumerate_variants.sql rename to tests/queries/0_stateless/00711_array_enumerate_variants.sql diff --git a/dbms/tests/queries/0_stateless/00712_nan_comparison.reference b/tests/queries/0_stateless/00712_nan_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_nan_comparison.reference rename to tests/queries/0_stateless/00712_nan_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00712_nan_comparison.sql b/tests/queries/0_stateless/00712_nan_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_nan_comparison.sql rename to tests/queries/0_stateless/00712_nan_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference b/tests/queries/0_stateless/00712_prewhere_with_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql b/tests/queries/0_stateless/00712_prewhere_with_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_and_virtual_column.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference b/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql b/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql rename to tests/queries/0_stateless/00712_prewhere_with_alias_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_final.reference b/tests/queries/0_stateless/00712_prewhere_with_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_final.reference rename to tests/queries/0_stateless/00712_prewhere_with_final.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_final.sql b/tests/queries/0_stateless/00712_prewhere_with_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_final.sql rename to tests/queries/0_stateless/00712_prewhere_with_final.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference b/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql b/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference b/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql b/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql rename to tests/queries/0_stateless/00712_prewhere_with_missing_columns_2.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.reference b/tests/queries/0_stateless/00712_prewhere_with_sampling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.reference rename to tests/queries/0_stateless/00712_prewhere_with_sampling.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql b/tests/queries/0_stateless/00712_prewhere_with_sampling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling.sql rename to tests/queries/0_stateless/00712_prewhere_with_sampling.sql diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference b/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference rename to tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.reference diff --git a/dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql b/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql rename to tests/queries/0_stateless/00712_prewhere_with_sampling_and_alias.sql diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference b/tests/queries/0_stateless/00713_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00713_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql b/tests/queries/0_stateless/00713_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00713_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00713_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00714_alter_uuid.reference b/tests/queries/0_stateless/00714_alter_uuid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00714_alter_uuid.reference rename to tests/queries/0_stateless/00714_alter_uuid.reference diff --git a/dbms/tests/queries/0_stateless/00714_alter_uuid.sql b/tests/queries/0_stateless/00714_alter_uuid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00714_alter_uuid.sql rename to tests/queries/0_stateless/00714_alter_uuid.sql diff --git a/dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference b/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference rename to tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.reference diff --git a/dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql b/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql rename to tests/queries/0_stateless/00714_create_temporary_table_with_in_clause.sql diff --git a/dbms/tests/queries/0_stateless/00715_bounding_ratio.reference b/tests/queries/0_stateless/00715_bounding_ratio.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_bounding_ratio.reference rename to tests/queries/0_stateless/00715_bounding_ratio.reference diff --git a/dbms/tests/queries/0_stateless/00715_bounding_ratio.sql b/tests/queries/0_stateless/00715_bounding_ratio.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00715_bounding_ratio.sql rename to tests/queries/0_stateless/00715_bounding_ratio.sql diff --git a/dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference rename to tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh rename to tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.reference b/tests/queries/0_stateless/00715_json_each_row_input_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.reference rename to tests/queries/0_stateless/00715_json_each_row_input_nested.reference diff --git a/dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.sh b/tests/queries/0_stateless/00715_json_each_row_input_nested.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00715_json_each_row_input_nested.sh rename to tests/queries/0_stateless/00715_json_each_row_input_nested.sh diff --git a/dbms/tests/queries/0_stateless/00716_allow_ddl.reference b/tests/queries/0_stateless/00716_allow_ddl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00716_allow_ddl.reference rename to tests/queries/0_stateless/00716_allow_ddl.reference diff --git a/dbms/tests/queries/0_stateless/00716_allow_ddl.sql b/tests/queries/0_stateless/00716_allow_ddl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00716_allow_ddl.sql rename to tests/queries/0_stateless/00716_allow_ddl.sql diff --git a/dbms/tests/queries/0_stateless/00717_default_join_type.reference b/tests/queries/0_stateless/00717_default_join_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_default_join_type.reference rename to tests/queries/0_stateless/00717_default_join_type.reference diff --git a/dbms/tests/queries/0_stateless/00717_default_join_type.sql b/tests/queries/0_stateless/00717_default_join_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_default_join_type.sql rename to tests/queries/0_stateless/00717_default_join_type.sql diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference b/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference rename to tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql b/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql rename to tests/queries/0_stateless/00717_low_cardinaliry_distributed_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference rename to tests/queries/0_stateless/00717_low_cardinaliry_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql rename to tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00717_merge_and_distributed.reference b/tests/queries/0_stateless/00717_merge_and_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00717_merge_and_distributed.reference rename to tests/queries/0_stateless/00717_merge_and_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00717_merge_and_distributed.sql b/tests/queries/0_stateless/00717_merge_and_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00717_merge_and_distributed.sql rename to tests/queries/0_stateless/00717_merge_and_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00718_format_datetime.reference rename to tests/queries/0_stateless/00718_format_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00718_format_datetime.sql rename to tests/queries/0_stateless/00718_format_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference b/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.reference rename to tests/queries/0_stateless/00718_low_cardinaliry_alter.reference diff --git a/dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql b/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql rename to tests/queries/0_stateless/00718_low_cardinaliry_alter.sql diff --git a/dbms/tests/queries/0_stateless/00719_format_datetime_rand.reference b/tests/queries/0_stateless/00719_format_datetime_rand.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_format_datetime_rand.reference rename to tests/queries/0_stateless/00719_format_datetime_rand.reference diff --git a/dbms/tests/queries/0_stateless/00719_format_datetime_rand.sql b/tests/queries/0_stateless/00719_format_datetime_rand.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00719_format_datetime_rand.sql rename to tests/queries/0_stateless/00719_format_datetime_rand.sql diff --git a/dbms/tests/queries/0_stateless/00719_insert_block_without_column.reference b/tests/queries/0_stateless/00719_insert_block_without_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_insert_block_without_column.reference rename to tests/queries/0_stateless/00719_insert_block_without_column.reference diff --git a/dbms/tests/queries/0_stateless/00719_insert_block_without_column.sh b/tests/queries/0_stateless/00719_insert_block_without_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_insert_block_without_column.sh rename to tests/queries/0_stateless/00719_insert_block_without_column.sh diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_db.reference b/tests/queries/0_stateless/00719_parallel_ddl_db.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_db.reference rename to tests/queries/0_stateless/00719_parallel_ddl_db.reference diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_db.sh b/tests/queries/0_stateless/00719_parallel_ddl_db.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_db.sh rename to tests/queries/0_stateless/00719_parallel_ddl_db.sh diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_table.reference b/tests/queries/0_stateless/00719_parallel_ddl_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_table.reference rename to tests/queries/0_stateless/00719_parallel_ddl_table.reference diff --git a/dbms/tests/queries/0_stateless/00719_parallel_ddl_table.sh b/tests/queries/0_stateless/00719_parallel_ddl_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00719_parallel_ddl_table.sh rename to tests/queries/0_stateless/00719_parallel_ddl_table.sh diff --git a/dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference b/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference rename to tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.reference diff --git a/dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql b/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql rename to tests/queries/0_stateless/00720_combinations_of_aggregate_combinators.sql diff --git a/dbms/tests/queries/0_stateless/00720_with_cube.reference b/tests/queries/0_stateless/00720_with_cube.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00720_with_cube.reference rename to tests/queries/0_stateless/00720_with_cube.reference diff --git a/dbms/tests/queries/0_stateless/00720_with_cube.sql b/tests/queries/0_stateless/00720_with_cube.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00720_with_cube.sql rename to tests/queries/0_stateless/00720_with_cube.sql diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00722_inner_join.reference b/tests/queries/0_stateless/00722_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00722_inner_join.reference rename to tests/queries/0_stateless/00722_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00722_inner_join.sql b/tests/queries/0_stateless/00722_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00722_inner_join.sql rename to tests/queries/0_stateless/00722_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00723_remerge_sort.reference b/tests/queries/0_stateless/00723_remerge_sort.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00723_remerge_sort.reference rename to tests/queries/0_stateless/00723_remerge_sort.reference diff --git a/dbms/tests/queries/0_stateless/00723_remerge_sort.sql b/tests/queries/0_stateless/00723_remerge_sort.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00723_remerge_sort.sql rename to tests/queries/0_stateless/00723_remerge_sort.sql diff --git a/dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference b/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference rename to tests/queries/0_stateless/00724_insert_values_datetime_conversion.reference diff --git a/dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql b/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql rename to tests/queries/0_stateless/00724_insert_values_datetime_conversion.sql diff --git a/dbms/tests/queries/0_stateless/00725_comment_columns.reference b/tests/queries/0_stateless/00725_comment_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_comment_columns.reference rename to tests/queries/0_stateless/00725_comment_columns.reference diff --git a/dbms/tests/queries/0_stateless/00725_comment_columns.sql b/tests/queries/0_stateless/00725_comment_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_comment_columns.sql rename to tests/queries/0_stateless/00725_comment_columns.sql diff --git a/dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference rename to tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference diff --git a/dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql rename to tests/queries/0_stateless/00725_ipv4_ipv6_domains.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_1.reference b/tests/queries/0_stateless/00725_join_on_bug_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_1.reference rename to tests/queries/0_stateless/00725_join_on_bug_1.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_1.sql b/tests/queries/0_stateless/00725_join_on_bug_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_1.sql rename to tests/queries/0_stateless/00725_join_on_bug_1.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.reference b/tests/queries/0_stateless/00725_join_on_bug_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_2.reference rename to tests/queries/0_stateless/00725_join_on_bug_2.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql b/tests/queries/0_stateless/00725_join_on_bug_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_2.sql rename to tests/queries/0_stateless/00725_join_on_bug_2.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.reference b/tests/queries/0_stateless/00725_join_on_bug_3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_3.reference rename to tests/queries/0_stateless/00725_join_on_bug_3.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql b/tests/queries/0_stateless/00725_join_on_bug_3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_3.sql rename to tests/queries/0_stateless/00725_join_on_bug_3.sql diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.reference b/tests/queries/0_stateless/00725_join_on_bug_4.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_4.reference rename to tests/queries/0_stateless/00725_join_on_bug_4.reference diff --git a/dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql b/tests/queries/0_stateless/00725_join_on_bug_4.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_join_on_bug_4.sql rename to tests/queries/0_stateless/00725_join_on_bug_4.sql diff --git a/dbms/tests/queries/0_stateless/00725_memory_tracking.reference b/tests/queries/0_stateless/00725_memory_tracking.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_memory_tracking.reference rename to tests/queries/0_stateless/00725_memory_tracking.reference diff --git a/dbms/tests/queries/0_stateless/00725_memory_tracking.sql b/tests/queries/0_stateless/00725_memory_tracking.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_memory_tracking.sql rename to tests/queries/0_stateless/00725_memory_tracking.sql diff --git a/dbms/tests/queries/0_stateless/00725_quantiles_shard.reference b/tests/queries/0_stateless/00725_quantiles_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00725_quantiles_shard.reference rename to tests/queries/0_stateless/00725_quantiles_shard.reference diff --git a/dbms/tests/queries/0_stateless/00725_quantiles_shard.sql b/tests/queries/0_stateless/00725_quantiles_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00725_quantiles_shard.sql rename to tests/queries/0_stateless/00725_quantiles_shard.sql diff --git a/dbms/tests/queries/0_stateless/00726_length_aliases.reference b/tests/queries/0_stateless/00726_length_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_length_aliases.reference rename to tests/queries/0_stateless/00726_length_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00726_length_aliases.sql b/tests/queries/0_stateless/00726_length_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_length_aliases.sql rename to tests/queries/0_stateless/00726_length_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.reference b/tests/queries/0_stateless/00726_materialized_view_concurrent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.reference rename to tests/queries/0_stateless/00726_materialized_view_concurrent.reference diff --git a/dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.sql b/tests/queries/0_stateless/00726_materialized_view_concurrent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_materialized_view_concurrent.sql rename to tests/queries/0_stateless/00726_materialized_view_concurrent.sql diff --git a/dbms/tests/queries/0_stateless/00726_modulo_for_date.reference b/tests/queries/0_stateless/00726_modulo_for_date.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00726_modulo_for_date.reference rename to tests/queries/0_stateless/00726_modulo_for_date.reference diff --git a/dbms/tests/queries/0_stateless/00726_modulo_for_date.sql b/tests/queries/0_stateless/00726_modulo_for_date.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00726_modulo_for_date.sql rename to tests/queries/0_stateless/00726_modulo_for_date.sql diff --git a/dbms/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00727_concat.reference rename to tests/queries/0_stateless/00727_concat.reference diff --git a/dbms/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00727_concat.sql rename to tests/queries/0_stateless/00727_concat.sql diff --git a/dbms/tests/queries/0_stateless/00728_json_each_row_parsing.reference b/tests/queries/0_stateless/00728_json_each_row_parsing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00728_json_each_row_parsing.reference rename to tests/queries/0_stateless/00728_json_each_row_parsing.reference diff --git a/dbms/tests/queries/0_stateless/00728_json_each_row_parsing.sh b/tests/queries/0_stateless/00728_json_each_row_parsing.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00728_json_each_row_parsing.sh rename to tests/queries/0_stateless/00728_json_each_row_parsing.sh diff --git a/dbms/tests/queries/0_stateless/00729_prewhere_array_join.reference b/tests/queries/0_stateless/00729_prewhere_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00729_prewhere_array_join.reference rename to tests/queries/0_stateless/00729_prewhere_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00729_prewhere_array_join.sql b/tests/queries/0_stateless/00729_prewhere_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00729_prewhere_array_join.sql rename to tests/queries/0_stateless/00729_prewhere_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.reference b/tests/queries/0_stateless/00730_unicode_terminal_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00730_unicode_terminal_format.reference rename to tests/queries/0_stateless/00730_unicode_terminal_format.reference diff --git a/dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql b/tests/queries/0_stateless/00730_unicode_terminal_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00730_unicode_terminal_format.sql rename to tests/queries/0_stateless/00730_unicode_terminal_format.sql diff --git a/dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference rename to tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.reference diff --git a/dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh rename to tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh diff --git a/dbms/tests/queries/0_stateless/00732_base64_functions.reference b/tests/queries/0_stateless/00732_base64_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_base64_functions.reference rename to tests/queries/0_stateless/00732_base64_functions.reference diff --git a/dbms/tests/queries/0_stateless/00732_base64_functions.sql b/tests/queries/0_stateless/00732_base64_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_base64_functions.sql rename to tests/queries/0_stateless/00732_base64_functions.sql diff --git a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference b/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference rename to tests/queries/0_stateless/00732_decimal_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql b/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql rename to tests/queries/0_stateless/00732_decimal_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00733_if_datetime.reference b/tests/queries/0_stateless/00733_if_datetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00733_if_datetime.reference rename to tests/queries/0_stateless/00733_if_datetime.reference diff --git a/dbms/tests/queries/0_stateless/00733_if_datetime.sql b/tests/queries/0_stateless/00733_if_datetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00733_if_datetime.sql rename to tests/queries/0_stateless/00733_if_datetime.sql diff --git a/dbms/tests/queries/0_stateless/00734_timeslot.reference b/tests/queries/0_stateless/00734_timeslot.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00734_timeslot.reference rename to tests/queries/0_stateless/00734_timeslot.reference diff --git a/dbms/tests/queries/0_stateless/00734_timeslot.sql b/tests/queries/0_stateless/00734_timeslot.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00734_timeslot.sql rename to tests/queries/0_stateless/00734_timeslot.sql diff --git a/dbms/tests/queries/0_stateless/00735_conditional.reference b/tests/queries/0_stateless/00735_conditional.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00735_conditional.reference rename to tests/queries/0_stateless/00735_conditional.reference diff --git a/dbms/tests/queries/0_stateless/00735_conditional.sql b/tests/queries/0_stateless/00735_conditional.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00735_conditional.sql rename to tests/queries/0_stateless/00735_conditional.sql diff --git a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference b/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.reference rename to tests/queries/0_stateless/00735_or_expr_optimize_bug.reference diff --git a/dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql b/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00735_or_expr_optimize_bug.sql rename to tests/queries/0_stateless/00735_or_expr_optimize_bug.sql diff --git a/dbms/tests/queries/0_stateless/00736_disjunction_optimisation.reference b/tests/queries/0_stateless/00736_disjunction_optimisation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00736_disjunction_optimisation.reference rename to tests/queries/0_stateless/00736_disjunction_optimisation.reference diff --git a/dbms/tests/queries/0_stateless/00736_disjunction_optimisation.sql b/tests/queries/0_stateless/00736_disjunction_optimisation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00736_disjunction_optimisation.sql rename to tests/queries/0_stateless/00736_disjunction_optimisation.sql diff --git a/dbms/tests/queries/0_stateless/00737_decimal_group_by.reference b/tests/queries/0_stateless/00737_decimal_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00737_decimal_group_by.reference rename to tests/queries/0_stateless/00737_decimal_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00737_decimal_group_by.sql b/tests/queries/0_stateless/00737_decimal_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00737_decimal_group_by.sql rename to tests/queries/0_stateless/00737_decimal_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.reference b/tests/queries/0_stateless/00738_lock_for_inner_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00738_lock_for_inner_table.reference rename to tests/queries/0_stateless/00738_lock_for_inner_table.reference diff --git a/dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00738_lock_for_inner_table.sh rename to tests/queries/0_stateless/00738_lock_for_inner_table.sh diff --git a/dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference rename to tests/queries/0_stateless/00738_nested_merge_multidimensional_array.reference diff --git a/dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql b/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql rename to tests/queries/0_stateless/00738_nested_merge_multidimensional_array.sql diff --git a/dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference b/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference rename to tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.reference diff --git a/dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql b/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql rename to tests/queries/0_stateless/00739_array_element_nullable_string_mattrobenolt.sql diff --git a/dbms/tests/queries/0_stateless/00740_database_in_nested_view.reference b/tests/queries/0_stateless/00740_database_in_nested_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00740_database_in_nested_view.reference rename to tests/queries/0_stateless/00740_database_in_nested_view.reference diff --git a/dbms/tests/queries/0_stateless/00740_database_in_nested_view.sql b/tests/queries/0_stateless/00740_database_in_nested_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00740_database_in_nested_view.sql rename to tests/queries/0_stateless/00740_database_in_nested_view.sql diff --git a/dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.reference b/tests/queries/0_stateless/00740_optimize_predicate_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.reference rename to tests/queries/0_stateless/00740_optimize_predicate_expression.reference diff --git a/dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.sql b/tests/queries/0_stateless/00740_optimize_predicate_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00740_optimize_predicate_expression.sql rename to tests/queries/0_stateless/00740_optimize_predicate_expression.sql diff --git a/dbms/tests/queries/0_stateless/00741_client_comment_multiline.reference b/tests/queries/0_stateless/00741_client_comment_multiline.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00741_client_comment_multiline.reference rename to tests/queries/0_stateless/00741_client_comment_multiline.reference diff --git a/dbms/tests/queries/0_stateless/00741_client_comment_multiline.sql b/tests/queries/0_stateless/00741_client_comment_multiline.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00741_client_comment_multiline.sql rename to tests/queries/0_stateless/00741_client_comment_multiline.sql diff --git a/dbms/tests/queries/0_stateless/00742_require_join_strictness.reference b/tests/queries/0_stateless/00742_require_join_strictness.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00742_require_join_strictness.reference rename to tests/queries/0_stateless/00742_require_join_strictness.reference diff --git a/dbms/tests/queries/0_stateless/00742_require_join_strictness.sql b/tests/queries/0_stateless/00742_require_join_strictness.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00742_require_join_strictness.sql rename to tests/queries/0_stateless/00742_require_join_strictness.sql diff --git a/dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.reference b/tests/queries/0_stateless/00743_limit_by_not_found_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.reference rename to tests/queries/0_stateless/00743_limit_by_not_found_column.reference diff --git a/dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.sql b/tests/queries/0_stateless/00743_limit_by_not_found_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00743_limit_by_not_found_column.sql rename to tests/queries/0_stateless/00743_limit_by_not_found_column.sql diff --git a/dbms/tests/queries/0_stateless/00744_join_not_found_column.reference b/tests/queries/0_stateless/00744_join_not_found_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00744_join_not_found_column.reference rename to tests/queries/0_stateless/00744_join_not_found_column.reference diff --git a/dbms/tests/queries/0_stateless/00744_join_not_found_column.sql b/tests/queries/0_stateless/00744_join_not_found_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00744_join_not_found_column.sql rename to tests/queries/0_stateless/00744_join_not_found_column.sql diff --git a/dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.reference b/tests/queries/0_stateless/00745_compile_scalar_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.reference rename to tests/queries/0_stateless/00745_compile_scalar_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.sql b/tests/queries/0_stateless/00745_compile_scalar_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00745_compile_scalar_subquery.sql rename to tests/queries/0_stateless/00745_compile_scalar_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference b/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.reference rename to tests/queries/0_stateless/00746_compile_non_deterministic_function.reference diff --git a/dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql b/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00746_compile_non_deterministic_function.sql rename to tests/queries/0_stateless/00746_compile_non_deterministic_function.sql diff --git a/dbms/tests/queries/0_stateless/00746_hashing_tuples.reference b/tests/queries/0_stateless/00746_hashing_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_hashing_tuples.reference rename to tests/queries/0_stateless/00746_hashing_tuples.reference diff --git a/dbms/tests/queries/0_stateless/00746_hashing_tuples.sql b/tests/queries/0_stateless/00746_hashing_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00746_hashing_tuples.sql rename to tests/queries/0_stateless/00746_hashing_tuples.sql diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl b/tests/queries/0_stateless/00746_sql_fuzzy.pl similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl rename to tests/queries/0_stateless/00746_sql_fuzzy.pl diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference b/tests/queries/0_stateless/00746_sql_fuzzy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference rename to tests/queries/0_stateless/00746_sql_fuzzy.reference diff --git a/dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh b/tests/queries/0_stateless/00746_sql_fuzzy.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh rename to tests/queries/0_stateless/00746_sql_fuzzy.sh diff --git a/dbms/tests/queries/0_stateless/00747_contributors.reference b/tests/queries/0_stateless/00747_contributors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00747_contributors.reference rename to tests/queries/0_stateless/00747_contributors.reference diff --git a/dbms/tests/queries/0_stateless/00747_contributors.sql b/tests/queries/0_stateless/00747_contributors.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00747_contributors.sql rename to tests/queries/0_stateless/00747_contributors.sql diff --git a/dbms/tests/queries/0_stateless/00748_insert_array_with_null.reference b/tests/queries/0_stateless/00748_insert_array_with_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00748_insert_array_with_null.reference rename to tests/queries/0_stateless/00748_insert_array_with_null.reference diff --git a/dbms/tests/queries/0_stateless/00748_insert_array_with_null.sql b/tests/queries/0_stateless/00748_insert_array_with_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00748_insert_array_with_null.sql rename to tests/queries/0_stateless/00748_insert_array_with_null.sql diff --git a/dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference b/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference rename to tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.reference diff --git a/dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql b/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql rename to tests/queries/0_stateless/00749_inner_join_of_unnamed_subqueries.sql diff --git a/dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference b/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference rename to tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.reference diff --git a/dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql b/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql rename to tests/queries/0_stateless/00750_merge_tree_merge_with_o_direct.sql diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/tests/queries/0_stateless/00751_default_databasename_for_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00751_default_databasename_for_view.reference rename to tests/queries/0_stateless/00751_default_databasename_for_view.reference diff --git a/dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql b/tests/queries/0_stateless/00751_default_databasename_for_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_default_databasename_for_view.sql rename to tests/queries/0_stateless/00751_default_databasename_for_view.sql diff --git a/dbms/tests/queries/0_stateless/00751_hashing_ints.reference b/tests/queries/0_stateless/00751_hashing_ints.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00751_hashing_ints.reference rename to tests/queries/0_stateless/00751_hashing_ints.reference diff --git a/dbms/tests/queries/0_stateless/00751_hashing_ints.sql b/tests/queries/0_stateless/00751_hashing_ints.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_hashing_ints.sql rename to tests/queries/0_stateless/00751_hashing_ints.sql diff --git a/dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference b/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference rename to tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.reference diff --git a/dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql b/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql rename to tests/queries/0_stateless/00751_low_cardinality_nullable_group_by.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.reference b/tests/queries/0_stateless/00752_low_cardinality_array_result.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.reference rename to tests/queries/0_stateless/00752_low_cardinality_array_result.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.sql b/tests/queries/0_stateless/00752_low_cardinality_array_result.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_array_result.sql rename to tests/queries/0_stateless/00752_low_cardinality_array_result.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference b/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference rename to tests/queries/0_stateless/00752_low_cardinality_lambda_argument.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql b/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql rename to tests/queries/0_stateless/00752_low_cardinality_lambda_argument.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference b/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference rename to tests/queries/0_stateless/00752_low_cardinality_left_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql b/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql rename to tests/queries/0_stateless/00752_low_cardinality_left_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference b/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.reference rename to tests/queries/0_stateless/00752_low_cardinality_mv_1.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql b/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql rename to tests/queries/0_stateless/00752_low_cardinality_mv_1.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference b/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.reference rename to tests/queries/0_stateless/00752_low_cardinality_mv_2.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql b/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_mv_2.sql rename to tests/queries/0_stateless/00752_low_cardinality_mv_2.sql diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_permute.reference b/tests/queries/0_stateless/00752_low_cardinality_permute.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_permute.reference rename to tests/queries/0_stateless/00752_low_cardinality_permute.reference diff --git a/dbms/tests/queries/0_stateless/00752_low_cardinality_permute.sql b/tests/queries/0_stateless/00752_low_cardinality_permute.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00752_low_cardinality_permute.sql rename to tests/queries/0_stateless/00752_low_cardinality_permute.sql diff --git a/dbms/tests/queries/0_stateless/00753_alter_attach.reference b/tests/queries/0_stateless/00753_alter_attach.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_attach.reference rename to tests/queries/0_stateless/00753_alter_attach.reference diff --git a/dbms/tests/queries/0_stateless/00753_alter_attach.sql b/tests/queries/0_stateless/00753_alter_attach.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_attach.sql rename to tests/queries/0_stateless/00753_alter_attach.sql diff --git a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference b/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference rename to tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql b/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql rename to tests/queries/0_stateless/00753_alter_destination_for_storage_buffer.sql diff --git a/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference rename to tests/queries/0_stateless/00753_comment_columns_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql b/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql rename to tests/queries/0_stateless/00753_comment_columns_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00753_quantile_format.reference b/tests/queries/0_stateless/00753_quantile_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_quantile_format.reference rename to tests/queries/0_stateless/00753_quantile_format.reference diff --git a/dbms/tests/queries/0_stateless/00753_quantile_format.sql b/tests/queries/0_stateless/00753_quantile_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_quantile_format.sql rename to tests/queries/0_stateless/00753_quantile_format.sql diff --git a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.reference rename to tests/queries/0_stateless/00753_system_columns_and_system_tables.reference diff --git a/dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_system_columns_and_system_tables.sql rename to tests/queries/0_stateless/00753_system_columns_and_system_tables.sql diff --git a/dbms/tests/queries/0_stateless/00753_with_with_single_alias.reference b/tests/queries/0_stateless/00753_with_with_single_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00753_with_with_single_alias.reference rename to tests/queries/0_stateless/00753_with_with_single_alias.reference diff --git a/dbms/tests/queries/0_stateless/00753_with_with_single_alias.sql b/tests/queries/0_stateless/00753_with_with_single_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00753_with_with_single_alias.sql rename to tests/queries/0_stateless/00753_with_with_single_alias.sql diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference rename to tests/queries/0_stateless/00754_alter_modify_column_partitions.reference diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql b/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_column_partitions.sql rename to tests/queries/0_stateless/00754_alter_modify_column_partitions.sql diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.reference b/tests/queries/0_stateless/00754_alter_modify_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by.reference rename to tests/queries/0_stateless/00754_alter_modify_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by.sql b/tests/queries/0_stateless/00754_alter_modify_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by.sql rename to tests/queries/0_stateless/00754_alter_modify_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference rename to tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql rename to tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.reference diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh rename to tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh diff --git a/dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference b/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.reference rename to tests/queries/0_stateless/00754_first_significant_subdomain_more.reference diff --git a/dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql b/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00754_first_significant_subdomain_more.sql rename to tests/queries/0_stateless/00754_first_significant_subdomain_more.sql diff --git a/dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference rename to tests/queries/0_stateless/00755_avg_value_size_hint_passing.reference diff --git a/dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql rename to tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql diff --git a/dbms/tests/queries/0_stateless/00756_power_alias.reference b/tests/queries/0_stateless/00756_power_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00756_power_alias.reference rename to tests/queries/0_stateless/00756_power_alias.reference diff --git a/dbms/tests/queries/0_stateless/00756_power_alias.sql b/tests/queries/0_stateless/00756_power_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00756_power_alias.sql rename to tests/queries/0_stateless/00756_power_alias.sql diff --git a/dbms/tests/queries/0_stateless/00757_enum_defaults.reference b/tests/queries/0_stateless/00757_enum_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00757_enum_defaults.reference rename to tests/queries/0_stateless/00757_enum_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00757_enum_defaults.sql b/tests/queries/0_stateless/00757_enum_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00757_enum_defaults.sql rename to tests/queries/0_stateless/00757_enum_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00758_array_reverse.reference b/tests/queries/0_stateless/00758_array_reverse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00758_array_reverse.reference rename to tests/queries/0_stateless/00758_array_reverse.reference diff --git a/dbms/tests/queries/0_stateless/00758_array_reverse.sql b/tests/queries/0_stateless/00758_array_reverse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00758_array_reverse.sql rename to tests/queries/0_stateless/00758_array_reverse.sql diff --git a/dbms/tests/queries/0_stateless/00759_kodieg.reference b/tests/queries/0_stateless/00759_kodieg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00759_kodieg.reference rename to tests/queries/0_stateless/00759_kodieg.reference diff --git a/dbms/tests/queries/0_stateless/00759_kodieg.sql b/tests/queries/0_stateless/00759_kodieg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00759_kodieg.sql rename to tests/queries/0_stateless/00759_kodieg.sql diff --git a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.reference b/tests/queries/0_stateless/00760_insert_json_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.reference rename to tests/queries/0_stateless/00760_insert_json_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql b/tests/queries/0_stateless/00760_insert_json_with_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00760_insert_json_with_defaults.sql rename to tests/queries/0_stateless/00760_insert_json_with_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00760_url_functions_overflow.reference b/tests/queries/0_stateless/00760_url_functions_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00760_url_functions_overflow.reference rename to tests/queries/0_stateless/00760_url_functions_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00760_url_functions_overflow.sql b/tests/queries/0_stateless/00760_url_functions_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00760_url_functions_overflow.sql rename to tests/queries/0_stateless/00760_url_functions_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00761_lower_utf8_bug.reference b/tests/queries/0_stateless/00761_lower_utf8_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00761_lower_utf8_bug.reference rename to tests/queries/0_stateless/00761_lower_utf8_bug.reference diff --git a/dbms/tests/queries/0_stateless/00761_lower_utf8_bug.sql b/tests/queries/0_stateless/00761_lower_utf8_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00761_lower_utf8_bug.sql rename to tests/queries/0_stateless/00761_lower_utf8_bug.sql diff --git a/dbms/tests/queries/0_stateless/00762_date_comparsion.reference b/tests/queries/0_stateless/00762_date_comparsion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00762_date_comparsion.reference rename to tests/queries/0_stateless/00762_date_comparsion.reference diff --git a/dbms/tests/queries/0_stateless/00762_date_comparsion.sql b/tests/queries/0_stateless/00762_date_comparsion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00762_date_comparsion.sql rename to tests/queries/0_stateless/00762_date_comparsion.sql diff --git a/dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference b/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference rename to tests/queries/0_stateless/00763_create_query_as_table_engine_bug.reference diff --git a/dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql b/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql rename to tests/queries/0_stateless/00763_create_query_as_table_engine_bug.sql diff --git a/dbms/tests/queries/0_stateless/00763_lock_buffer.reference b/tests/queries/0_stateless/00763_lock_buffer.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_lock_buffer.reference rename to tests/queries/0_stateless/00763_lock_buffer.reference diff --git a/dbms/tests/queries/0_stateless/00763_lock_buffer.sh b/tests/queries/0_stateless/00763_lock_buffer.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00763_lock_buffer.sh rename to tests/queries/0_stateless/00763_lock_buffer.sh diff --git a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference rename to tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.reference diff --git a/dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh rename to tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh diff --git a/dbms/tests/queries/0_stateless/00764_max_query_size_allocation.reference b/tests/queries/0_stateless/00764_max_query_size_allocation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00764_max_query_size_allocation.reference rename to tests/queries/0_stateless/00764_max_query_size_allocation.reference diff --git a/dbms/tests/queries/0_stateless/00764_max_query_size_allocation.sh b/tests/queries/0_stateless/00764_max_query_size_allocation.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00764_max_query_size_allocation.sh rename to tests/queries/0_stateless/00764_max_query_size_allocation.sh diff --git a/dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference b/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.reference rename to tests/queries/0_stateless/00765_sql_compatibility_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql b/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00765_sql_compatibility_aliases.sql rename to tests/queries/0_stateless/00765_sql_compatibility_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference b/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.reference rename to tests/queries/0_stateless/00779_all_right_join_max_block_size.reference diff --git a/dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql b/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00779_all_right_join_max_block_size.sql rename to tests/queries/0_stateless/00779_all_right_join_max_block_size.sql diff --git a/dbms/tests/queries/0_stateless/00780_unaligned_array_join.reference b/tests/queries/0_stateless/00780_unaligned_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00780_unaligned_array_join.reference rename to tests/queries/0_stateless/00780_unaligned_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00780_unaligned_array_join.sql b/tests/queries/0_stateless/00780_unaligned_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00780_unaligned_array_join.sql rename to tests/queries/0_stateless/00780_unaligned_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference b/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference rename to tests/queries/0_stateless/00794_materialized_view_with_column_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql b/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql rename to tests/queries/0_stateless/00794_materialized_view_with_column_defaults.sql diff --git a/dbms/tests/queries/0_stateless/00799_function_dry_run.reference b/tests/queries/0_stateless/00799_function_dry_run.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00799_function_dry_run.reference rename to tests/queries/0_stateless/00799_function_dry_run.reference diff --git a/dbms/tests/queries/0_stateless/00799_function_dry_run.sql b/tests/queries/0_stateless/00799_function_dry_run.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00799_function_dry_run.sql rename to tests/queries/0_stateless/00799_function_dry_run.sql diff --git a/dbms/tests/queries/0_stateless/00800_function_java_hash.reference b/tests/queries/0_stateless/00800_function_java_hash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_function_java_hash.reference rename to tests/queries/0_stateless/00800_function_java_hash.reference diff --git a/dbms/tests/queries/0_stateless/00800_function_java_hash.sql b/tests/queries/0_stateless/00800_function_java_hash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_function_java_hash.sql rename to tests/queries/0_stateless/00800_function_java_hash.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference b/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference rename to tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql b/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql rename to tests/queries/0_stateless/00800_low_cardinality_array_group_by_arg.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference b/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference rename to tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql b/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql rename to tests/queries/0_stateless/00800_low_cardinality_distinct_numeric.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference b/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference rename to tests/queries/0_stateless/00800_low_cardinality_distributed_insert.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql b/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql rename to tests/queries/0_stateless/00800_low_cardinality_distributed_insert.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference b/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.reference rename to tests/queries/0_stateless/00800_low_cardinality_empty_array.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql b/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_empty_array.sql rename to tests/queries/0_stateless/00800_low_cardinality_empty_array.sql diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_join.reference b/tests/queries/0_stateless/00800_low_cardinality_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_join.reference rename to tests/queries/0_stateless/00800_low_cardinality_join.reference diff --git a/dbms/tests/queries/0_stateless/00800_low_cardinality_join.sql b/tests/queries/0_stateless/00800_low_cardinality_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_low_cardinality_join.sql rename to tests/queries/0_stateless/00800_low_cardinality_join.sql diff --git a/dbms/tests/queries/0_stateless/00800_versatile_storage_join.reference b/tests/queries/0_stateless/00800_versatile_storage_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00800_versatile_storage_join.reference rename to tests/queries/0_stateless/00800_versatile_storage_join.reference diff --git a/dbms/tests/queries/0_stateless/00800_versatile_storage_join.sql b/tests/queries/0_stateless/00800_versatile_storage_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00800_versatile_storage_join.sql rename to tests/queries/0_stateless/00800_versatile_storage_join.sql diff --git a/dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference b/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference rename to tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql b/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql rename to tests/queries/0_stateless/00801_daylight_saving_time_hour_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference b/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference rename to tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.reference diff --git a/dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql b/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql rename to tests/queries/0_stateless/00802_daylight_saving_time_shift_backwards_at_midnight.sql diff --git a/dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference b/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference rename to tests/queries/0_stateless/00802_system_parts_with_datetime_partition.reference diff --git a/dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql b/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql rename to tests/queries/0_stateless/00802_system_parts_with_datetime_partition.sql diff --git a/dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.reference b/tests/queries/0_stateless/00803_odbc_driver_2_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.reference rename to tests/queries/0_stateless/00803_odbc_driver_2_format.reference diff --git a/dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.sql b/tests/queries/0_stateless/00803_odbc_driver_2_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00803_odbc_driver_2_format.sql rename to tests/queries/0_stateless/00803_odbc_driver_2_format.sql diff --git a/dbms/tests/queries/0_stateless/00803_xxhash.reference b/tests/queries/0_stateless/00803_xxhash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00803_xxhash.reference rename to tests/queries/0_stateless/00803_xxhash.reference diff --git a/dbms/tests/queries/0_stateless/00803_xxhash.sql b/tests/queries/0_stateless/00803_xxhash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00803_xxhash.sql rename to tests/queries/0_stateless/00803_xxhash.sql diff --git a/dbms/tests/queries/0_stateless/00804_rollup_with_having.reference b/tests/queries/0_stateless/00804_rollup_with_having.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_rollup_with_having.reference rename to tests/queries/0_stateless/00804_rollup_with_having.reference diff --git a/dbms/tests/queries/0_stateless/00804_rollup_with_having.sql b/tests/queries/0_stateless/00804_rollup_with_having.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_rollup_with_having.sql rename to tests/queries/0_stateless/00804_rollup_with_having.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.reference rename to tests/queries/0_stateless/00804_test_alter_compression_codecs.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_alter_compression_codecs.sql rename to tests/queries/0_stateless/00804_test_alter_compression_codecs.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference rename to tests/queries/0_stateless/00804_test_custom_compression_codecs.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql b/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codecs.sql rename to tests/queries/0_stateless/00804_test_custom_compression_codecs.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference rename to tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql rename to tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.reference b/tests/queries/0_stateless/00804_test_delta_codec_compression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.reference rename to tests/queries/0_stateless/00804_test_delta_codec_compression.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_compression.sql rename to tests/queries/0_stateless/00804_test_delta_codec_compression.sql diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference b/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference rename to tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.reference diff --git a/dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql b/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql rename to tests/queries/0_stateless/00804_test_delta_codec_no_type_alter.sql diff --git a/dbms/tests/queries/0_stateless/00805_round_down.reference b/tests/queries/0_stateless/00805_round_down.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00805_round_down.reference rename to tests/queries/0_stateless/00805_round_down.reference diff --git a/dbms/tests/queries/0_stateless/00805_round_down.sql b/tests/queries/0_stateless/00805_round_down.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00805_round_down.sql rename to tests/queries/0_stateless/00805_round_down.sql diff --git a/dbms/tests/queries/0_stateless/00806_alter_update.reference b/tests/queries/0_stateless/00806_alter_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00806_alter_update.reference rename to tests/queries/0_stateless/00806_alter_update.reference diff --git a/dbms/tests/queries/0_stateless/00806_alter_update.sql b/tests/queries/0_stateless/00806_alter_update.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00806_alter_update.sql rename to tests/queries/0_stateless/00806_alter_update.sql diff --git a/dbms/tests/queries/0_stateless/00807_regexp_quote_meta.reference b/tests/queries/0_stateless/00807_regexp_quote_meta.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00807_regexp_quote_meta.reference rename to tests/queries/0_stateless/00807_regexp_quote_meta.reference diff --git a/dbms/tests/queries/0_stateless/00807_regexp_quote_meta.sql b/tests/queries/0_stateless/00807_regexp_quote_meta.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00807_regexp_quote_meta.sql rename to tests/queries/0_stateless/00807_regexp_quote_meta.sql diff --git a/dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.reference b/tests/queries/0_stateless/00808_array_enumerate_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.reference rename to tests/queries/0_stateless/00808_array_enumerate_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.sql b/tests/queries/0_stateless/00808_array_enumerate_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00808_array_enumerate_segfault.sql rename to tests/queries/0_stateless/00808_array_enumerate_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00808_not_optimize_predicate.reference b/tests/queries/0_stateless/00808_not_optimize_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00808_not_optimize_predicate.reference rename to tests/queries/0_stateless/00808_not_optimize_predicate.reference diff --git a/dbms/tests/queries/0_stateless/00808_not_optimize_predicate.sql b/tests/queries/0_stateless/00808_not_optimize_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00808_not_optimize_predicate.sql rename to tests/queries/0_stateless/00808_not_optimize_predicate.sql diff --git a/dbms/tests/queries/0_stateless/00809_add_days_segfault.reference b/tests/queries/0_stateless/00809_add_days_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00809_add_days_segfault.reference rename to tests/queries/0_stateless/00809_add_days_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00809_add_days_segfault.sql b/tests/queries/0_stateless/00809_add_days_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00809_add_days_segfault.sql rename to tests/queries/0_stateless/00809_add_days_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00810_in_operators_segfault.reference b/tests/queries/0_stateless/00810_in_operators_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00810_in_operators_segfault.reference rename to tests/queries/0_stateless/00810_in_operators_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00810_in_operators_segfault.sql b/tests/queries/0_stateless/00810_in_operators_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00810_in_operators_segfault.sql rename to tests/queries/0_stateless/00810_in_operators_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00811_garbage.reference b/tests/queries/0_stateless/00811_garbage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00811_garbage.reference rename to tests/queries/0_stateless/00811_garbage.reference diff --git a/dbms/tests/queries/0_stateless/00811_garbage.sql b/tests/queries/0_stateless/00811_garbage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00811_garbage.sql rename to tests/queries/0_stateless/00811_garbage.sql diff --git a/dbms/tests/queries/0_stateless/00812_prewhere_alias_array.reference b/tests/queries/0_stateless/00812_prewhere_alias_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00812_prewhere_alias_array.reference rename to tests/queries/0_stateless/00812_prewhere_alias_array.reference diff --git a/dbms/tests/queries/0_stateless/00812_prewhere_alias_array.sql b/tests/queries/0_stateless/00812_prewhere_alias_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00812_prewhere_alias_array.sql rename to tests/queries/0_stateless/00812_prewhere_alias_array.sql diff --git a/dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference rename to tests/queries/0_stateless/00813_parse_date_time_best_effort_more.reference diff --git a/dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql b/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql rename to tests/queries/0_stateless/00813_parse_date_time_best_effort_more.sql diff --git a/dbms/tests/queries/0_stateless/00814_parsing_ub.reference b/tests/queries/0_stateless/00814_parsing_ub.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00814_parsing_ub.reference rename to tests/queries/0_stateless/00814_parsing_ub.reference diff --git a/dbms/tests/queries/0_stateless/00814_parsing_ub.sql b/tests/queries/0_stateless/00814_parsing_ub.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00814_parsing_ub.sql rename to tests/queries/0_stateless/00814_parsing_ub.sql diff --git a/dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference rename to tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql rename to tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.reference b/tests/queries/0_stateless/00815_left_join_on_stepanel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.reference rename to tests/queries/0_stateless/00815_left_join_on_stepanel.reference diff --git a/dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.sql b/tests/queries/0_stateless/00815_left_join_on_stepanel.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00815_left_join_on_stepanel.sql rename to tests/queries/0_stateless/00815_left_join_on_stepanel.sql diff --git a/dbms/tests/queries/0_stateless/00816_join_column_names_sarg.reference b/tests/queries/0_stateless/00816_join_column_names_sarg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00816_join_column_names_sarg.reference rename to tests/queries/0_stateless/00816_join_column_names_sarg.reference diff --git a/dbms/tests/queries/0_stateless/00816_join_column_names_sarg.sql b/tests/queries/0_stateless/00816_join_column_names_sarg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00816_join_column_names_sarg.sql rename to tests/queries/0_stateless/00816_join_column_names_sarg.sql diff --git a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference b/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.reference rename to tests/queries/0_stateless/00816_long_concurrent_alter_column.reference diff --git a/dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh b/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh rename to tests/queries/0_stateless/00816_long_concurrent_alter_column.sh diff --git a/dbms/tests/queries/0_stateless/00817_with_simple.reference b/tests/queries/0_stateless/00817_with_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00817_with_simple.reference rename to tests/queries/0_stateless/00817_with_simple.reference diff --git a/dbms/tests/queries/0_stateless/00817_with_simple.sql b/tests/queries/0_stateless/00817_with_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00817_with_simple.sql rename to tests/queries/0_stateless/00817_with_simple.sql diff --git a/dbms/tests/queries/0_stateless/00818_alias_bug_4110.reference b/tests/queries/0_stateless/00818_alias_bug_4110.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00818_alias_bug_4110.reference rename to tests/queries/0_stateless/00818_alias_bug_4110.reference diff --git a/dbms/tests/queries/0_stateless/00818_alias_bug_4110.sql b/tests/queries/0_stateless/00818_alias_bug_4110.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00818_alias_bug_4110.sql rename to tests/queries/0_stateless/00818_alias_bug_4110.sql diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference b/tests/queries/0_stateless/00818_inner_join_bug_3567.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.reference rename to tests/queries/0_stateless/00818_inner_join_bug_3567.reference diff --git a/dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00818_inner_join_bug_3567.sql rename to tests/queries/0_stateless/00818_inner_join_bug_3567.sql diff --git a/dbms/tests/queries/0_stateless/00818_join_bug_4271.reference b/tests/queries/0_stateless/00818_join_bug_4271.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00818_join_bug_4271.reference rename to tests/queries/0_stateless/00818_join_bug_4271.reference diff --git a/dbms/tests/queries/0_stateless/00818_join_bug_4271.sql b/tests/queries/0_stateless/00818_join_bug_4271.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00818_join_bug_4271.sql rename to tests/queries/0_stateless/00818_join_bug_4271.sql diff --git a/dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference b/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.reference rename to tests/queries/0_stateless/00819_ast_refactoring_bugs.reference diff --git a/dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql b/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00819_ast_refactoring_bugs.sql rename to tests/queries/0_stateless/00819_ast_refactoring_bugs.sql diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference rename to tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.reference diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql rename to tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins.reference b/tests/queries/0_stateless/00820_multiple_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins.reference rename to tests/queries/0_stateless/00820_multiple_joins.reference diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins.sql b/tests/queries/0_stateless/00820_multiple_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins.sql rename to tests/queries/0_stateless/00820_multiple_joins.sql diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference rename to tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference diff --git a/dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql rename to tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql diff --git a/dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference b/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference rename to tests/queries/0_stateless/00821_distributed_storage_with_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql b/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql rename to tests/queries/0_stateless/00821_distributed_storage_with_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00822_array_insert_default.reference b/tests/queries/0_stateless/00822_array_insert_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00822_array_insert_default.reference rename to tests/queries/0_stateless/00822_array_insert_default.reference diff --git a/dbms/tests/queries/0_stateless/00822_array_insert_default.sql b/tests/queries/0_stateless/00822_array_insert_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00822_array_insert_default.sql rename to tests/queries/0_stateless/00822_array_insert_default.sql diff --git a/dbms/tests/queries/0_stateless/00823_capnproto_input.reference b/tests/queries/0_stateless/00823_capnproto_input.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00823_capnproto_input.reference rename to tests/queries/0_stateless/00823_capnproto_input.reference diff --git a/dbms/tests/queries/0_stateless/00823_capnproto_input.sh b/tests/queries/0_stateless/00823_capnproto_input.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00823_capnproto_input.sh rename to tests/queries/0_stateless/00823_capnproto_input.sh diff --git a/dbms/tests/queries/0_stateless/00823_sequence_match_dfa.reference b/tests/queries/0_stateless/00823_sequence_match_dfa.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00823_sequence_match_dfa.reference rename to tests/queries/0_stateless/00823_sequence_match_dfa.reference diff --git a/dbms/tests/queries/0_stateless/00823_sequence_match_dfa.sql b/tests/queries/0_stateless/00823_sequence_match_dfa.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00823_sequence_match_dfa.sql rename to tests/queries/0_stateless/00823_sequence_match_dfa.sql diff --git a/dbms/tests/queries/0_stateless/00824_filesystem.reference b/tests/queries/0_stateless/00824_filesystem.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00824_filesystem.reference rename to tests/queries/0_stateless/00824_filesystem.reference diff --git a/dbms/tests/queries/0_stateless/00824_filesystem.sql b/tests/queries/0_stateless/00824_filesystem.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00824_filesystem.sql rename to tests/queries/0_stateless/00824_filesystem.sql diff --git a/dbms/tests/queries/0_stateless/00825_http_header_query_id.reference b/tests/queries/0_stateless/00825_http_header_query_id.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_http_header_query_id.reference rename to tests/queries/0_stateless/00825_http_header_query_id.reference diff --git a/dbms/tests/queries/0_stateless/00825_http_header_query_id.sh b/tests/queries/0_stateless/00825_http_header_query_id.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_http_header_query_id.sh rename to tests/queries/0_stateless/00825_http_header_query_id.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format.proto b/tests/queries/0_stateless/00825_protobuf_format.proto similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format.proto rename to tests/queries/0_stateless/00825_protobuf_format.proto diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.insh b/tests/queries/0_stateless/00825_protobuf_format_input.insh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.insh rename to tests/queries/0_stateless/00825_protobuf_format_input.insh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.reference b/tests/queries/0_stateless/00825_protobuf_format_input.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.reference rename to tests/queries/0_stateless/00825_protobuf_format_input.reference diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh b/tests/queries/0_stateless/00825_protobuf_format_input.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_input.sh rename to tests/queries/0_stateless/00825_protobuf_format_input.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.reference b/tests/queries/0_stateless/00825_protobuf_format_output.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_output.reference rename to tests/queries/0_stateless/00825_protobuf_format_output.reference diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh b/tests/queries/0_stateless/00825_protobuf_format_output.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_output.sh rename to tests/queries/0_stateless/00825_protobuf_format_output.sh diff --git a/dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto b/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto similarity index 100% rename from dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto rename to tests/queries/0_stateless/00825_protobuf_format_syntax2.proto diff --git a/dbms/tests/queries/0_stateless/00826_cross_to_inner_join.reference b/tests/queries/0_stateless/00826_cross_to_inner_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00826_cross_to_inner_join.reference rename to tests/queries/0_stateless/00826_cross_to_inner_join.reference diff --git a/dbms/tests/queries/0_stateless/00826_cross_to_inner_join.sql b/tests/queries/0_stateless/00826_cross_to_inner_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00826_cross_to_inner_join.sql rename to tests/queries/0_stateless/00826_cross_to_inner_join.sql diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.reference b/tests/queries/0_stateless/00829_bitmap_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00829_bitmap_function.reference rename to tests/queries/0_stateless/00829_bitmap_function.reference diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.sql b/tests/queries/0_stateless/00829_bitmap_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00829_bitmap_function.sql rename to tests/queries/0_stateless/00829_bitmap_function.sql diff --git a/dbms/tests/queries/0_stateless/00830_join_overwrite.reference b/tests/queries/0_stateless/00830_join_overwrite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00830_join_overwrite.reference rename to tests/queries/0_stateless/00830_join_overwrite.reference diff --git a/dbms/tests/queries/0_stateless/00830_join_overwrite.sql b/tests/queries/0_stateless/00830_join_overwrite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00830_join_overwrite.sql rename to tests/queries/0_stateless/00830_join_overwrite.sql diff --git a/dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference b/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference rename to tests/queries/0_stateless/00831_quantile_weighted_parameter_check.reference diff --git a/dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql b/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql rename to tests/queries/0_stateless/00831_quantile_weighted_parameter_check.sql diff --git a/dbms/tests/queries/0_stateless/00832_storage_file_lock.reference b/tests/queries/0_stateless/00832_storage_file_lock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00832_storage_file_lock.reference rename to tests/queries/0_stateless/00832_storage_file_lock.reference diff --git a/dbms/tests/queries/0_stateless/00832_storage_file_lock.sql b/tests/queries/0_stateless/00832_storage_file_lock.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00832_storage_file_lock.sql rename to tests/queries/0_stateless/00832_storage_file_lock.sql diff --git a/dbms/tests/queries/0_stateless/00833_sleep_overflow.reference b/tests/queries/0_stateless/00833_sleep_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00833_sleep_overflow.reference rename to tests/queries/0_stateless/00833_sleep_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00833_sleep_overflow.sql b/tests/queries/0_stateless/00833_sleep_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00833_sleep_overflow.sql rename to tests/queries/0_stateless/00833_sleep_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference rename to tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.reference diff --git a/dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh rename to tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh diff --git a/dbms/tests/queries/0_stateless/00834_date_datetime_cmp.reference b/tests/queries/0_stateless/00834_date_datetime_cmp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_date_datetime_cmp.reference rename to tests/queries/0_stateless/00834_date_datetime_cmp.reference diff --git a/dbms/tests/queries/0_stateless/00834_date_datetime_cmp.sql b/tests/queries/0_stateless/00834_date_datetime_cmp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_date_datetime_cmp.sql rename to tests/queries/0_stateless/00834_date_datetime_cmp.sql diff --git a/dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference rename to tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.reference diff --git a/dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh rename to tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh diff --git a/dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference b/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.reference rename to tests/queries/0_stateless/00834_hints_for_type_function_typos.reference diff --git a/dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh b/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh rename to tests/queries/0_stateless/00834_hints_for_type_function_typos.sh diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation.reference b/tests/queries/0_stateless/00834_kill_mutation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation.reference rename to tests/queries/0_stateless/00834_kill_mutation.reference diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation.sh b/tests/queries/0_stateless/00834_kill_mutation.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation.sh rename to tests/queries/0_stateless/00834_kill_mutation.sh diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference rename to tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh rename to tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference b/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.reference rename to tests/queries/0_stateless/00834_limit_with_constant_expressions.reference diff --git a/dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql b/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_limit_with_constant_expressions.sql rename to tests/queries/0_stateless/00834_limit_with_constant_expressions.sql diff --git a/dbms/tests/queries/0_stateless/00834_not_between.reference b/tests/queries/0_stateless/00834_not_between.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00834_not_between.reference rename to tests/queries/0_stateless/00834_not_between.reference diff --git a/dbms/tests/queries/0_stateless/00834_not_between.sql b/tests/queries/0_stateless/00834_not_between.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00834_not_between.sql rename to tests/queries/0_stateless/00834_not_between.sql diff --git a/dbms/tests/queries/0_stateless/00835_if_generic_case.reference b/tests/queries/0_stateless/00835_if_generic_case.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00835_if_generic_case.reference rename to tests/queries/0_stateless/00835_if_generic_case.reference diff --git a/dbms/tests/queries/0_stateless/00835_if_generic_case.sql b/tests/queries/0_stateless/00835_if_generic_case.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00835_if_generic_case.sql rename to tests/queries/0_stateless/00835_if_generic_case.sql diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter.reference b/tests/queries/0_stateless/00836_indices_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter.reference rename to tests/queries/0_stateless/00836_indices_alter.reference diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter.sql b/tests/queries/0_stateless/00836_indices_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter.sql rename to tests/queries/0_stateless/00836_indices_alter.sql diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference rename to tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql rename to tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.reference b/tests/queries/0_stateless/00836_numbers_table_function_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.reference rename to tests/queries/0_stateless/00836_numbers_table_function_zero.reference diff --git a/dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.sql b/tests/queries/0_stateless/00836_numbers_table_function_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00836_numbers_table_function_zero.sql rename to tests/queries/0_stateless/00836_numbers_table_function_zero.sql diff --git a/dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference b/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference rename to tests/queries/0_stateless/00837_insert_select_and_read_prefix.reference diff --git a/dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql b/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql rename to tests/queries/0_stateless/00837_insert_select_and_read_prefix.sql diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index.reference b/tests/queries/0_stateless/00837_minmax_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index.reference rename to tests/queries/0_stateless/00837_minmax_index.reference diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index.sh rename to tests/queries/0_stateless/00837_minmax_index.sh diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference b/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.reference rename to tests/queries/0_stateless/00838_system_tables_drop_table_race.reference diff --git a/dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh rename to tests/queries/0_stateless/00838_system_tables_drop_table_race.sh diff --git a/dbms/tests/queries/0_stateless/00838_unique_index.reference b/tests/queries/0_stateless/00838_unique_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00838_unique_index.reference rename to tests/queries/0_stateless/00838_unique_index.reference diff --git a/dbms/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00838_unique_index.sh rename to tests/queries/0_stateless/00838_unique_index.sh diff --git a/dbms/tests/queries/0_stateless/00839_bitmask_negative.reference b/tests/queries/0_stateless/00839_bitmask_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00839_bitmask_negative.reference rename to tests/queries/0_stateless/00839_bitmask_negative.reference diff --git a/dbms/tests/queries/0_stateless/00839_bitmask_negative.sql b/tests/queries/0_stateless/00839_bitmask_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00839_bitmask_negative.sql rename to tests/queries/0_stateless/00839_bitmask_negative.sql diff --git a/dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference rename to tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh rename to tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/00840_top_k_weighted.reference b/tests/queries/0_stateless/00840_top_k_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00840_top_k_weighted.reference rename to tests/queries/0_stateless/00840_top_k_weighted.reference diff --git a/dbms/tests/queries/0_stateless/00840_top_k_weighted.sql b/tests/queries/0_stateless/00840_top_k_weighted.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00840_top_k_weighted.sql rename to tests/queries/0_stateless/00840_top_k_weighted.sql diff --git a/dbms/tests/queries/0_stateless/00841_temporary_table_database.reference b/tests/queries/0_stateless/00841_temporary_table_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00841_temporary_table_database.reference rename to tests/queries/0_stateless/00841_temporary_table_database.reference diff --git a/dbms/tests/queries/0_stateless/00841_temporary_table_database.sql b/tests/queries/0_stateless/00841_temporary_table_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00841_temporary_table_database.sql rename to tests/queries/0_stateless/00841_temporary_table_database.sql diff --git a/dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.reference b/tests/queries/0_stateless/00842_array_with_constant_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.reference rename to tests/queries/0_stateless/00842_array_with_constant_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.sql b/tests/queries/0_stateless/00842_array_with_constant_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00842_array_with_constant_overflow.sql rename to tests/queries/0_stateless/00842_array_with_constant_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference b/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference rename to tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.reference diff --git a/dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql b/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql rename to tests/queries/0_stateless/00843_optimize_predicate_and_rename_table.sql diff --git a/dbms/tests/queries/0_stateless/00844_join_lightee2.reference b/tests/queries/0_stateless/00844_join_lightee2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00844_join_lightee2.reference rename to tests/queries/0_stateless/00844_join_lightee2.reference diff --git a/dbms/tests/queries/0_stateless/00844_join_lightee2.sql b/tests/queries/0_stateless/00844_join_lightee2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00844_join_lightee2.sql rename to tests/queries/0_stateless/00844_join_lightee2.sql diff --git a/dbms/tests/queries/0_stateless/00845_join_on_aliases.reference b/tests/queries/0_stateless/00845_join_on_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00845_join_on_aliases.reference rename to tests/queries/0_stateless/00845_join_on_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00845_join_on_aliases.sql b/tests/queries/0_stateless/00845_join_on_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00845_join_on_aliases.sql rename to tests/queries/0_stateless/00845_join_on_aliases.sql diff --git a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.reference b/tests/queries/0_stateless/00846_join_using_tuple_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.reference rename to tests/queries/0_stateless/00846_join_using_tuple_crash.reference diff --git a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql b/tests/queries/0_stateless/00846_join_using_tuple_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql rename to tests/queries/0_stateless/00846_join_using_tuple_crash.sql diff --git a/dbms/tests/queries/0_stateless/00847_multiple_join_same_column.reference b/tests/queries/0_stateless/00847_multiple_join_same_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00847_multiple_join_same_column.reference rename to tests/queries/0_stateless/00847_multiple_join_same_column.reference diff --git a/dbms/tests/queries/0_stateless/00847_multiple_join_same_column.sql b/tests/queries/0_stateless/00847_multiple_join_same_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00847_multiple_join_same_column.sql rename to tests/queries/0_stateless/00847_multiple_join_same_column.sql diff --git a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference b/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.reference rename to tests/queries/0_stateless/00848_join_use_nulls_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql b/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql rename to tests/queries/0_stateless/00848_join_use_nulls_segfault.sql diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join.reference b/tests/queries/0_stateless/00849_multiple_comma_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join.reference rename to tests/queries/0_stateless/00849_multiple_comma_join.reference diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join.sql b/tests/queries/0_stateless/00849_multiple_comma_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join.sql rename to tests/queries/0_stateless/00849_multiple_comma_join.sql diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.reference b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.reference rename to tests/queries/0_stateless/00849_multiple_comma_join_2.reference diff --git a/dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.sql b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00849_multiple_comma_join_2.sql rename to tests/queries/0_stateless/00849_multiple_comma_join_2.sql diff --git a/dbms/tests/queries/0_stateless/00850_global_join_dups.reference b/tests/queries/0_stateless/00850_global_join_dups.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00850_global_join_dups.reference rename to tests/queries/0_stateless/00850_global_join_dups.reference diff --git a/dbms/tests/queries/0_stateless/00850_global_join_dups.sql b/tests/queries/0_stateless/00850_global_join_dups.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00850_global_join_dups.sql rename to tests/queries/0_stateless/00850_global_join_dups.sql diff --git a/dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.reference b/tests/queries/0_stateless/00851_http_insert_json_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.reference rename to tests/queries/0_stateless/00851_http_insert_json_defaults.reference diff --git a/dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.sh b/tests/queries/0_stateless/00851_http_insert_json_defaults.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00851_http_insert_json_defaults.sh rename to tests/queries/0_stateless/00851_http_insert_json_defaults.sh diff --git a/dbms/tests/queries/0_stateless/00852_any_join_nulls.reference b/tests/queries/0_stateless/00852_any_join_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00852_any_join_nulls.reference rename to tests/queries/0_stateless/00852_any_join_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00852_any_join_nulls.sql b/tests/queries/0_stateless/00852_any_join_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00852_any_join_nulls.sql rename to tests/queries/0_stateless/00852_any_join_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.reference b/tests/queries/0_stateless/00853_join_with_nulls_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.reference rename to tests/queries/0_stateless/00853_join_with_nulls_crash.reference diff --git a/dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.sql b/tests/queries/0_stateless/00853_join_with_nulls_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00853_join_with_nulls_crash.sql rename to tests/queries/0_stateless/00853_join_with_nulls_crash.sql diff --git a/dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.reference b/tests/queries/0_stateless/00854_multiple_join_asterisks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.reference rename to tests/queries/0_stateless/00854_multiple_join_asterisks.reference diff --git a/dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.sql b/tests/queries/0_stateless/00854_multiple_join_asterisks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00854_multiple_join_asterisks.sql rename to tests/queries/0_stateless/00854_multiple_join_asterisks.sql diff --git a/dbms/tests/queries/0_stateless/00855_join_with_array_join.reference b/tests/queries/0_stateless/00855_join_with_array_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00855_join_with_array_join.reference rename to tests/queries/0_stateless/00855_join_with_array_join.reference diff --git a/dbms/tests/queries/0_stateless/00855_join_with_array_join.sql b/tests/queries/0_stateless/00855_join_with_array_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00855_join_with_array_join.sql rename to tests/queries/0_stateless/00855_join_with_array_join.sql diff --git a/dbms/tests/queries/0_stateless/00856_no_column_issue_4242.reference b/tests/queries/0_stateless/00856_no_column_issue_4242.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00856_no_column_issue_4242.reference rename to tests/queries/0_stateless/00856_no_column_issue_4242.reference diff --git a/dbms/tests/queries/0_stateless/00856_no_column_issue_4242.sql b/tests/queries/0_stateless/00856_no_column_issue_4242.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00856_no_column_issue_4242.sql rename to tests/queries/0_stateless/00856_no_column_issue_4242.sql diff --git a/dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference rename to tests/queries/0_stateless/00857_global_joinsavel_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql rename to tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00858_issue_4756.reference b/tests/queries/0_stateless/00858_issue_4756.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00858_issue_4756.reference rename to tests/queries/0_stateless/00858_issue_4756.reference diff --git a/dbms/tests/queries/0_stateless/00858_issue_4756.sql b/tests/queries/0_stateless/00858_issue_4756.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00858_issue_4756.sql rename to tests/queries/0_stateless/00858_issue_4756.sql diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.reference b/tests/queries/0_stateless/00859_distinct_with_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00859_distinct_with_join.reference rename to tests/queries/0_stateless/00859_distinct_with_join.reference diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql b/tests/queries/0_stateless/00859_distinct_with_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00859_distinct_with_join.sql rename to tests/queries/0_stateless/00859_distinct_with_join.sql diff --git a/dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.reference b/tests/queries/0_stateless/00860_unknown_identifier_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.reference rename to tests/queries/0_stateless/00860_unknown_identifier_bug.reference diff --git a/dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.sql b/tests/queries/0_stateless/00860_unknown_identifier_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00860_unknown_identifier_bug.sql rename to tests/queries/0_stateless/00860_unknown_identifier_bug.sql diff --git a/dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.reference b/tests/queries/0_stateless/00861_decimal_quoted_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.reference rename to tests/queries/0_stateless/00861_decimal_quoted_csv.reference diff --git a/dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.sql b/tests/queries/0_stateless/00861_decimal_quoted_csv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00861_decimal_quoted_csv.sql rename to tests/queries/0_stateless/00861_decimal_quoted_csv.sql diff --git a/dbms/tests/queries/0_stateless/00862_decimal_in.reference b/tests/queries/0_stateless/00862_decimal_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00862_decimal_in.reference rename to tests/queries/0_stateless/00862_decimal_in.reference diff --git a/dbms/tests/queries/0_stateless/00862_decimal_in.sql b/tests/queries/0_stateless/00862_decimal_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00862_decimal_in.sql rename to tests/queries/0_stateless/00862_decimal_in.sql diff --git a/dbms/tests/queries/0_stateless/00863_comma_join_in.reference b/tests/queries/0_stateless/00863_comma_join_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00863_comma_join_in.reference rename to tests/queries/0_stateless/00863_comma_join_in.reference diff --git a/dbms/tests/queries/0_stateless/00863_comma_join_in.sql b/tests/queries/0_stateless/00863_comma_join_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00863_comma_join_in.sql rename to tests/queries/0_stateless/00863_comma_join_in.sql diff --git a/dbms/tests/queries/0_stateless/00864_union_all_supertype.reference b/tests/queries/0_stateless/00864_union_all_supertype.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00864_union_all_supertype.reference rename to tests/queries/0_stateless/00864_union_all_supertype.reference diff --git a/dbms/tests/queries/0_stateless/00864_union_all_supertype.sql b/tests/queries/0_stateless/00864_union_all_supertype.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00864_union_all_supertype.sql rename to tests/queries/0_stateless/00864_union_all_supertype.sql diff --git a/dbms/tests/queries/0_stateless/00870_t64_codec.reference b/tests/queries/0_stateless/00870_t64_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00870_t64_codec.reference rename to tests/queries/0_stateless/00870_t64_codec.reference diff --git a/dbms/tests/queries/0_stateless/00870_t64_codec.sql b/tests/queries/0_stateless/00870_t64_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00870_t64_codec.sql rename to tests/queries/0_stateless/00870_t64_codec.sql diff --git a/dbms/tests/queries/0_stateless/00871_t64_codec_signed.reference b/tests/queries/0_stateless/00871_t64_codec_signed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00871_t64_codec_signed.reference rename to tests/queries/0_stateless/00871_t64_codec_signed.reference diff --git a/dbms/tests/queries/0_stateless/00871_t64_codec_signed.sql b/tests/queries/0_stateless/00871_t64_codec_signed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00871_t64_codec_signed.sql rename to tests/queries/0_stateless/00871_t64_codec_signed.sql diff --git a/dbms/tests/queries/0_stateless/00872_t64_bit_codec.reference b/tests/queries/0_stateless/00872_t64_bit_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00872_t64_bit_codec.reference rename to tests/queries/0_stateless/00872_t64_bit_codec.reference diff --git a/dbms/tests/queries/0_stateless/00872_t64_bit_codec.sql b/tests/queries/0_stateless/00872_t64_bit_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00872_t64_bit_codec.sql rename to tests/queries/0_stateless/00872_t64_bit_codec.sql diff --git a/dbms/tests/queries/0_stateless/00874_issue_3495.reference b/tests/queries/0_stateless/00874_issue_3495.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00874_issue_3495.reference rename to tests/queries/0_stateless/00874_issue_3495.reference diff --git a/dbms/tests/queries/0_stateless/00874_issue_3495.sql b/tests/queries/0_stateless/00874_issue_3495.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00874_issue_3495.sql rename to tests/queries/0_stateless/00874_issue_3495.sql diff --git a/dbms/tests/queries/0_stateless/00875_join_right_nulls.reference b/tests/queries/0_stateless/00875_join_right_nulls.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00875_join_right_nulls.reference rename to tests/queries/0_stateless/00875_join_right_nulls.reference diff --git a/dbms/tests/queries/0_stateless/00875_join_right_nulls.sql b/tests/queries/0_stateless/00875_join_right_nulls.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00875_join_right_nulls.sql rename to tests/queries/0_stateless/00875_join_right_nulls.sql diff --git a/dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference b/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.reference rename to tests/queries/0_stateless/00876_wrong_arraj_join_column.reference diff --git a/dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql b/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00876_wrong_arraj_join_column.sql rename to tests/queries/0_stateless/00876_wrong_arraj_join_column.sql diff --git a/dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference rename to tests/queries/0_stateless/00877_memory_limit_for_new_delete.reference diff --git a/dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql rename to tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql diff --git a/dbms/tests/queries/0_stateless/00878_join_unexpected_results.reference b/tests/queries/0_stateless/00878_join_unexpected_results.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00878_join_unexpected_results.reference rename to tests/queries/0_stateless/00878_join_unexpected_results.reference diff --git a/dbms/tests/queries/0_stateless/00878_join_unexpected_results.sql b/tests/queries/0_stateless/00878_join_unexpected_results.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00878_join_unexpected_results.sql rename to tests/queries/0_stateless/00878_join_unexpected_results.sql diff --git a/dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference b/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.reference rename to tests/queries/0_stateless/00879_cast_to_decimal_crash.reference diff --git a/dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql b/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00879_cast_to_decimal_crash.sql rename to tests/queries/0_stateless/00879_cast_to_decimal_crash.sql diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.reference b/tests/queries/0_stateless/00880_decimal_in_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00880_decimal_in_key.reference rename to tests/queries/0_stateless/00880_decimal_in_key.reference diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.sql b/tests/queries/0_stateless/00880_decimal_in_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00880_decimal_in_key.sql rename to tests/queries/0_stateless/00880_decimal_in_key.sql diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference b/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.reference rename to tests/queries/0_stateless/00881_unknown_identifier_in_in.reference diff --git a/dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql b/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00881_unknown_identifier_in_in.sql rename to tests/queries/0_stateless/00881_unknown_identifier_in_in.sql diff --git a/dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.reference b/tests/queries/0_stateless/00882_multiple_join_no_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.reference rename to tests/queries/0_stateless/00882_multiple_join_no_alias.reference diff --git a/dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.sql b/tests/queries/0_stateless/00882_multiple_join_no_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00882_multiple_join_no_alias.sql rename to tests/queries/0_stateless/00882_multiple_join_no_alias.sql diff --git a/dbms/tests/queries/0_stateless/00897_flatten.reference b/tests/queries/0_stateless/00897_flatten.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00897_flatten.reference rename to tests/queries/0_stateless/00897_flatten.reference diff --git a/dbms/tests/queries/0_stateless/00897_flatten.sql b/tests/queries/0_stateless/00897_flatten.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00897_flatten.sql rename to tests/queries/0_stateless/00897_flatten.sql diff --git a/dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference rename to tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.reference diff --git a/dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh rename to tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh diff --git a/dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference b/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference rename to tests/queries/0_stateless/00898_quantile_timing_parameter_check.reference diff --git a/dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql b/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql rename to tests/queries/0_stateless/00898_quantile_timing_parameter_check.sql diff --git a/dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.reference b/tests/queries/0_stateless/00899_long_attach_memory_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.reference rename to tests/queries/0_stateless/00899_long_attach_memory_limit.reference diff --git a/dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.sql b/tests/queries/0_stateless/00899_long_attach_memory_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00899_long_attach_memory_limit.sql rename to tests/queries/0_stateless/00899_long_attach_memory_limit.sql diff --git a/dbms/tests/queries/0_stateless/00900_entropy_shard.reference b/tests/queries/0_stateless/00900_entropy_shard.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_entropy_shard.reference rename to tests/queries/0_stateless/00900_entropy_shard.reference diff --git a/dbms/tests/queries/0_stateless/00900_entropy_shard.sql b/tests/queries/0_stateless/00900_entropy_shard.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00900_entropy_shard.sql rename to tests/queries/0_stateless/00900_entropy_shard.sql diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.reference b/tests/queries/0_stateless/00900_orc_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_orc_load.reference rename to tests/queries/0_stateless/00900_orc_load.reference diff --git a/dbms/tests/queries/0_stateless/00900_orc_load.sh b/tests/queries/0_stateless/00900_orc_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_orc_load.sh rename to tests/queries/0_stateless/00900_orc_load.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet.reference b/tests/queries/0_stateless/00900_parquet.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet.reference rename to tests/queries/0_stateless/00900_parquet.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet.sh b/tests/queries/0_stateless/00900_parquet.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet.sh rename to tests/queries/0_stateless/00900_parquet.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet_create_table_columns.pl b/tests/queries/0_stateless/00900_parquet_create_table_columns.pl similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_create_table_columns.pl rename to tests/queries/0_stateless/00900_parquet_create_table_columns.pl diff --git a/dbms/tests/queries/0_stateless/00900_parquet_decimal.reference b/tests/queries/0_stateless/00900_parquet_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_decimal.reference rename to tests/queries/0_stateless/00900_parquet_decimal.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet_decimal.sh b/tests/queries/0_stateless/00900_parquet_decimal.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_decimal.sh rename to tests/queries/0_stateless/00900_parquet_decimal.sh diff --git a/dbms/tests/queries/0_stateless/00900_parquet_load.reference b/tests/queries/0_stateless/00900_parquet_load.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_load.reference rename to tests/queries/0_stateless/00900_parquet_load.reference diff --git a/dbms/tests/queries/0_stateless/00900_parquet_load.sh b/tests/queries/0_stateless/00900_parquet_load.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00900_parquet_load.sh rename to tests/queries/0_stateless/00900_parquet_load.sh diff --git a/dbms/tests/queries/0_stateless/00901_joint_entropy.reference b/tests/queries/0_stateless/00901_joint_entropy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00901_joint_entropy.reference rename to tests/queries/0_stateless/00901_joint_entropy.reference diff --git a/dbms/tests/queries/0_stateless/00901_joint_entropy.sql b/tests/queries/0_stateless/00901_joint_entropy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00901_joint_entropy.sql rename to tests/queries/0_stateless/00901_joint_entropy.sql diff --git a/dbms/tests/queries/0_stateless/00902_entropy.reference b/tests/queries/0_stateless/00902_entropy.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00902_entropy.reference rename to tests/queries/0_stateless/00902_entropy.reference diff --git a/dbms/tests/queries/0_stateless/00902_entropy.sql b/tests/queries/0_stateless/00902_entropy.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00902_entropy.sql rename to tests/queries/0_stateless/00902_entropy.sql diff --git a/dbms/tests/queries/0_stateless/00903_array_with_constant_function.reference b/tests/queries/0_stateless/00903_array_with_constant_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00903_array_with_constant_function.reference rename to tests/queries/0_stateless/00903_array_with_constant_function.reference diff --git a/dbms/tests/queries/0_stateless/00903_array_with_constant_function.sql b/tests/queries/0_stateless/00903_array_with_constant_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00903_array_with_constant_function.sql rename to tests/queries/0_stateless/00903_array_with_constant_function.sql diff --git a/dbms/tests/queries/0_stateless/00904_array_with_constant_2.reference b/tests/queries/0_stateless/00904_array_with_constant_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00904_array_with_constant_2.reference rename to tests/queries/0_stateless/00904_array_with_constant_2.reference diff --git a/dbms/tests/queries/0_stateless/00904_array_with_constant_2.sql b/tests/queries/0_stateless/00904_array_with_constant_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00904_array_with_constant_2.sql rename to tests/queries/0_stateless/00904_array_with_constant_2.sql diff --git a/dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference b/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference rename to tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.reference diff --git a/dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql b/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql rename to tests/queries/0_stateless/00905_compile_expressions_compare_big_dates.sql diff --git a/dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference b/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference rename to tests/queries/0_stateless/00905_field_with_aggregate_function_state.reference diff --git a/dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql b/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql rename to tests/queries/0_stateless/00905_field_with_aggregate_function_state.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.reference b/tests/queries/0_stateless/00906_low_cardinality_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_cache.reference rename to tests/queries/0_stateless/00906_low_cardinality_cache.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_cache.sql rename to tests/queries/0_stateless/00906_low_cardinality_cache.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference b/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.reference rename to tests/queries/0_stateless/00906_low_cardinality_const_argument.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql b/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_const_argument.sql rename to tests/queries/0_stateless/00906_low_cardinality_const_argument.sql diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.reference b/tests/queries/0_stateless/00906_low_cardinality_rollup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.reference rename to tests/queries/0_stateless/00906_low_cardinality_rollup.reference diff --git a/dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.sql b/tests/queries/0_stateless/00906_low_cardinality_rollup.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00906_low_cardinality_rollup.sql rename to tests/queries/0_stateless/00906_low_cardinality_rollup.sql diff --git a/dbms/tests/queries/0_stateless/00907_set_index_max_rows.reference b/tests/queries/0_stateless/00907_set_index_max_rows.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_max_rows.reference rename to tests/queries/0_stateless/00907_set_index_max_rows.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_max_rows.sh rename to tests/queries/0_stateless/00907_set_index_max_rows.sh diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality.sql diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.reference diff --git a/dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql b/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql rename to tests/queries/0_stateless/00907_set_index_with_nullable_and_low_cardinality_bug.sql diff --git a/dbms/tests/queries/0_stateless/00908_analyze_query.reference b/tests/queries/0_stateless/00908_analyze_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_analyze_query.reference rename to tests/queries/0_stateless/00908_analyze_query.reference diff --git a/dbms/tests/queries/0_stateless/00908_analyze_query.sql b/tests/queries/0_stateless/00908_analyze_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00908_analyze_query.sql rename to tests/queries/0_stateless/00908_analyze_query.sql diff --git a/dbms/tests/queries/0_stateless/00908_bloom_filter_index.reference b/tests/queries/0_stateless/00908_bloom_filter_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_bloom_filter_index.reference rename to tests/queries/0_stateless/00908_bloom_filter_index.reference diff --git a/dbms/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00908_bloom_filter_index.sh rename to tests/queries/0_stateless/00908_bloom_filter_index.sh diff --git a/dbms/tests/queries/0_stateless/00908_long_http_insert.reference b/tests/queries/0_stateless/00908_long_http_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00908_long_http_insert.reference rename to tests/queries/0_stateless/00908_long_http_insert.reference diff --git a/dbms/tests/queries/0_stateless/00908_long_http_insert.sh b/tests/queries/0_stateless/00908_long_http_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00908_long_http_insert.sh rename to tests/queries/0_stateless/00908_long_http_insert.sh diff --git a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference b/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.reference rename to tests/queries/0_stateless/00909_arrayEnumerateUniq.reference diff --git a/dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql b/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00909_arrayEnumerateUniq.sql rename to tests/queries/0_stateless/00909_arrayEnumerateUniq.sql diff --git a/dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.reference b/tests/queries/0_stateless/00909_kill_not_initialized_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.reference rename to tests/queries/0_stateless/00909_kill_not_initialized_query.reference diff --git a/dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.sh b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00909_kill_not_initialized_query.sh rename to tests/queries/0_stateless/00909_kill_not_initialized_query.sh diff --git a/dbms/tests/queries/0_stateless/00909_ngram_distance.reference b/tests/queries/0_stateless/00909_ngram_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00909_ngram_distance.reference rename to tests/queries/0_stateless/00909_ngram_distance.reference diff --git a/dbms/tests/queries/0_stateless/00909_ngram_distance.sql b/tests/queries/0_stateless/00909_ngram_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00909_ngram_distance.sql rename to tests/queries/0_stateless/00909_ngram_distance.sql diff --git a/dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference b/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference rename to tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.reference diff --git a/dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql b/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql rename to tests/queries/0_stateless/00910_aggregation_timeseriesgroupsum.sql diff --git a/dbms/tests/queries/0_stateless/00910_buffer_prewhere.reference b/tests/queries/0_stateless/00910_buffer_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_buffer_prewhere.reference rename to tests/queries/0_stateless/00910_buffer_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql b/tests/queries/0_stateless/00910_buffer_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_buffer_prewhere.sql rename to tests/queries/0_stateless/00910_buffer_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00910_client_window_size_detection.reference b/tests/queries/0_stateless/00910_client_window_size_detection.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_client_window_size_detection.reference rename to tests/queries/0_stateless/00910_client_window_size_detection.reference diff --git a/dbms/tests/queries/0_stateless/00910_client_window_size_detection.sh b/tests/queries/0_stateless/00910_client_window_size_detection.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00910_client_window_size_detection.sh rename to tests/queries/0_stateless/00910_client_window_size_detection.sh diff --git a/dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference b/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference rename to tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.reference diff --git a/dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql b/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql rename to tests/queries/0_stateless/00910_crash_when_distributed_modify_order_by.sql diff --git a/dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference b/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference rename to tests/queries/0_stateless/00910_decimal_group_array_crash_3783.reference diff --git a/dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql b/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql rename to tests/queries/0_stateless/00910_decimal_group_array_crash_3783.sql diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference rename to tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql rename to tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference rename to tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.reference diff --git a/dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql rename to tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs.sql diff --git a/dbms/tests/queries/0_stateless/00911_tautological_compare.reference b/tests/queries/0_stateless/00911_tautological_compare.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00911_tautological_compare.reference rename to tests/queries/0_stateless/00911_tautological_compare.reference diff --git a/dbms/tests/queries/0_stateless/00911_tautological_compare.sql b/tests/queries/0_stateless/00911_tautological_compare.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00911_tautological_compare.sql rename to tests/queries/0_stateless/00911_tautological_compare.sql diff --git a/dbms/tests/queries/0_stateless/00912_string_comparison.reference b/tests/queries/0_stateless/00912_string_comparison.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00912_string_comparison.reference rename to tests/queries/0_stateless/00912_string_comparison.reference diff --git a/dbms/tests/queries/0_stateless/00912_string_comparison.sql b/tests/queries/0_stateless/00912_string_comparison.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00912_string_comparison.sql rename to tests/queries/0_stateless/00912_string_comparison.sql diff --git a/dbms/tests/queries/0_stateless/00913_many_threads.reference b/tests/queries/0_stateless/00913_many_threads.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00913_many_threads.reference rename to tests/queries/0_stateless/00913_many_threads.reference diff --git a/dbms/tests/queries/0_stateless/00913_many_threads.sql b/tests/queries/0_stateless/00913_many_threads.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00913_many_threads.sql rename to tests/queries/0_stateless/00913_many_threads.sql diff --git a/dbms/tests/queries/0_stateless/00914_join_bgranvea.reference b/tests/queries/0_stateless/00914_join_bgranvea.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00914_join_bgranvea.reference rename to tests/queries/0_stateless/00914_join_bgranvea.reference diff --git a/dbms/tests/queries/0_stateless/00914_join_bgranvea.sql b/tests/queries/0_stateless/00914_join_bgranvea.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00914_join_bgranvea.sql rename to tests/queries/0_stateless/00914_join_bgranvea.sql diff --git a/dbms/tests/queries/0_stateless/00914_replicate.reference b/tests/queries/0_stateless/00914_replicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00914_replicate.reference rename to tests/queries/0_stateless/00914_replicate.reference diff --git a/dbms/tests/queries/0_stateless/00914_replicate.sql b/tests/queries/0_stateless/00914_replicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00914_replicate.sql rename to tests/queries/0_stateless/00914_replicate.sql diff --git a/dbms/tests/queries/0_stateless/00915_simple_aggregate_function.reference b/tests/queries/0_stateless/00915_simple_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00915_simple_aggregate_function.reference rename to tests/queries/0_stateless/00915_simple_aggregate_function.reference diff --git a/dbms/tests/queries/0_stateless/00915_simple_aggregate_function.sql b/tests/queries/0_stateless/00915_simple_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00915_simple_aggregate_function.sql rename to tests/queries/0_stateless/00915_simple_aggregate_function.sql diff --git a/dbms/tests/queries/0_stateless/00915_tuple_orantius.reference b/tests/queries/0_stateless/00915_tuple_orantius.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00915_tuple_orantius.reference rename to tests/queries/0_stateless/00915_tuple_orantius.reference diff --git a/dbms/tests/queries/0_stateless/00915_tuple_orantius.sql b/tests/queries/0_stateless/00915_tuple_orantius.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00915_tuple_orantius.sql rename to tests/queries/0_stateless/00915_tuple_orantius.sql diff --git a/dbms/tests/queries/0_stateless/00916_add_materialized_column_after.reference b/tests/queries/0_stateless/00916_add_materialized_column_after.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00916_add_materialized_column_after.reference rename to tests/queries/0_stateless/00916_add_materialized_column_after.reference diff --git a/dbms/tests/queries/0_stateless/00916_add_materialized_column_after.sql b/tests/queries/0_stateless/00916_add_materialized_column_after.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_add_materialized_column_after.sql rename to tests/queries/0_stateless/00916_add_materialized_column_after.sql diff --git a/dbms/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00916_create_or_replace_view.reference rename to tests/queries/0_stateless/00916_create_or_replace_view.reference diff --git a/dbms/tests/queries/0_stateless/00916_create_or_replace_view.sql b/tests/queries/0_stateless/00916_create_or_replace_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_create_or_replace_view.sql rename to tests/queries/0_stateless/00916_create_or_replace_view.sql diff --git a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference b/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.reference rename to tests/queries/0_stateless/00916_join_using_duplicate_columns.reference diff --git a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql b/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql rename to tests/queries/0_stateless/00916_join_using_duplicate_columns.sql diff --git a/dbms/tests/queries/0_stateless/00917_least_sqr.reference b/tests/queries/0_stateless/00917_least_sqr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00917_least_sqr.reference rename to tests/queries/0_stateless/00917_least_sqr.reference diff --git a/dbms/tests/queries/0_stateless/00917_least_sqr.sql b/tests/queries/0_stateless/00917_least_sqr.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00917_least_sqr.sql rename to tests/queries/0_stateless/00917_least_sqr.sql diff --git a/dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference b/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference rename to tests/queries/0_stateless/00917_multiple_joins_denny_crane.reference diff --git a/dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql b/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql rename to tests/queries/0_stateless/00917_multiple_joins_denny_crane.sql diff --git a/dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference b/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.reference rename to tests/queries/0_stateless/00918_has_unsufficient_type_check.reference diff --git a/dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql b/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00918_has_unsufficient_type_check.sql rename to tests/queries/0_stateless/00918_has_unsufficient_type_check.sql diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.reference b/tests/queries/0_stateless/00918_json_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00918_json_functions.reference rename to tests/queries/0_stateless/00918_json_functions.reference diff --git a/dbms/tests/queries/0_stateless/00918_json_functions.sql b/tests/queries/0_stateless/00918_json_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00918_json_functions.sql rename to tests/queries/0_stateless/00918_json_functions.sql diff --git a/dbms/tests/queries/0_stateless/00919_histogram_merge.reference b/tests/queries/0_stateless/00919_histogram_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00919_histogram_merge.reference rename to tests/queries/0_stateless/00919_histogram_merge.reference diff --git a/dbms/tests/queries/0_stateless/00919_histogram_merge.sql b/tests/queries/0_stateless/00919_histogram_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00919_histogram_merge.sql rename to tests/queries/0_stateless/00919_histogram_merge.sql diff --git a/dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference b/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference rename to tests/queries/0_stateless/00919_sum_aggregate_states_constants.reference diff --git a/dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql b/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql rename to tests/queries/0_stateless/00919_sum_aggregate_states_constants.sql diff --git a/dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference b/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference rename to tests/queries/0_stateless/00920_multiply_aggregate_states_constants.reference diff --git a/dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql b/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql rename to tests/queries/0_stateless/00920_multiply_aggregate_states_constants.sql diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_basic.reference b/tests/queries/0_stateless/00921_datetime64_basic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_basic.reference rename to tests/queries/0_stateless/00921_datetime64_basic.reference diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_basic.sql b/tests/queries/0_stateless/00921_datetime64_basic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_basic.sql rename to tests/queries/0_stateless/00921_datetime64_basic.sql diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.python b/tests/queries/0_stateless/00921_datetime64_compatibility.python similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.python rename to tests/queries/0_stateless/00921_datetime64_compatibility.python diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.reference b/tests/queries/0_stateless/00921_datetime64_compatibility.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.reference rename to tests/queries/0_stateless/00921_datetime64_compatibility.reference diff --git a/dbms/tests/queries/0_stateless/00921_datetime64_compatibility.sh b/tests/queries/0_stateless/00921_datetime64_compatibility.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00921_datetime64_compatibility.sh rename to tests/queries/0_stateless/00921_datetime64_compatibility.sh diff --git a/dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference diff --git a/dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_pk.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference rename to tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql rename to tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00926_geo_to_h3.reference b/tests/queries/0_stateless/00926_geo_to_h3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_geo_to_h3.reference rename to tests/queries/0_stateless/00926_geo_to_h3.reference diff --git a/dbms/tests/queries/0_stateless/00926_geo_to_h3.sql b/tests/queries/0_stateless/00926_geo_to_h3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_geo_to_h3.sql rename to tests/queries/0_stateless/00926_geo_to_h3.sql diff --git a/dbms/tests/queries/0_stateless/00926_multimatch.reference b/tests/queries/0_stateless/00926_multimatch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_multimatch.reference rename to tests/queries/0_stateless/00926_multimatch.reference diff --git a/dbms/tests/queries/0_stateless/00926_multimatch.sql b/tests/queries/0_stateless/00926_multimatch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_multimatch.sql rename to tests/queries/0_stateless/00926_multimatch.sql diff --git a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.reference b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.reference rename to tests/queries/0_stateless/00927_asof_join_correct_bt.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.sql b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_correct_bt.sql rename to tests/queries/0_stateless/00927_asof_join_correct_bt.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_long.reference b/tests/queries/0_stateless/00927_asof_join_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_long.reference rename to tests/queries/0_stateless/00927_asof_join_long.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_long.sql b/tests/queries/0_stateless/00927_asof_join_long.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_long.sql rename to tests/queries/0_stateless/00927_asof_join_long.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.reference b/tests/queries/0_stateless/00927_asof_join_noninclusive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.reference rename to tests/queries/0_stateless/00927_asof_join_noninclusive.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.sql b/tests/queries/0_stateless/00927_asof_join_noninclusive.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_noninclusive.sql rename to tests/queries/0_stateless/00927_asof_join_noninclusive.sql diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_other_types.reference b/tests/queries/0_stateless/00927_asof_join_other_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_other_types.reference rename to tests/queries/0_stateless/00927_asof_join_other_types.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_join_other_types.sh b/tests/queries/0_stateless/00927_asof_join_other_types.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_join_other_types.sh rename to tests/queries/0_stateless/00927_asof_join_other_types.sh diff --git a/dbms/tests/queries/0_stateless/00927_asof_joins.reference b/tests/queries/0_stateless/00927_asof_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_joins.reference rename to tests/queries/0_stateless/00927_asof_joins.reference diff --git a/dbms/tests/queries/0_stateless/00927_asof_joins.sql b/tests/queries/0_stateless/00927_asof_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_asof_joins.sql rename to tests/queries/0_stateless/00927_asof_joins.sql diff --git a/dbms/tests/queries/0_stateless/00927_disable_hyperscan.reference b/tests/queries/0_stateless/00927_disable_hyperscan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00927_disable_hyperscan.reference rename to tests/queries/0_stateless/00927_disable_hyperscan.reference diff --git a/dbms/tests/queries/0_stateless/00927_disable_hyperscan.sql b/tests/queries/0_stateless/00927_disable_hyperscan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00927_disable_hyperscan.sql rename to tests/queries/0_stateless/00927_disable_hyperscan.sql diff --git a/dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.reference b/tests/queries/0_stateless/00928_multi_match_constant_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.reference rename to tests/queries/0_stateless/00928_multi_match_constant_constant.reference diff --git a/dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.sql b/tests/queries/0_stateless/00928_multi_match_constant_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00928_multi_match_constant_constant.sql rename to tests/queries/0_stateless/00928_multi_match_constant_constant.sql diff --git a/dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.reference b/tests/queries/0_stateless/00929_multi_match_edit_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.reference rename to tests/queries/0_stateless/00929_multi_match_edit_distance.reference diff --git a/dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.sql b/tests/queries/0_stateless/00929_multi_match_edit_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00929_multi_match_edit_distance.sql rename to tests/queries/0_stateless/00929_multi_match_edit_distance.sql diff --git a/dbms/tests/queries/0_stateless/00930_arrayIntersect.reference b/tests/queries/0_stateless/00930_arrayIntersect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00930_arrayIntersect.reference rename to tests/queries/0_stateless/00930_arrayIntersect.reference diff --git a/dbms/tests/queries/0_stateless/00930_arrayIntersect.sql b/tests/queries/0_stateless/00930_arrayIntersect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00930_arrayIntersect.sql rename to tests/queries/0_stateless/00930_arrayIntersect.sql diff --git a/dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference b/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference rename to tests/queries/0_stateless/00930_max_partitions_per_insert_block.reference diff --git a/dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql b/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql rename to tests/queries/0_stateless/00930_max_partitions_per_insert_block.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference b/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference rename to tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql b/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql rename to tests/queries/0_stateless/00931_low_cardinality_nullable_aggregate_function_type.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference b/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference rename to tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql b/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql rename to tests/queries/0_stateless/00931_low_cardinality_read_with_empty_array.sql diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference b/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference rename to tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.reference diff --git a/dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql b/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql rename to tests/queries/0_stateless/00931_low_cardinality_set_index_in_key_condition.sql diff --git a/dbms/tests/queries/0_stateless/00932_array_intersect_bug.reference b/tests/queries/0_stateless/00932_array_intersect_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00932_array_intersect_bug.reference rename to tests/queries/0_stateless/00932_array_intersect_bug.reference diff --git a/dbms/tests/queries/0_stateless/00932_array_intersect_bug.sql b/tests/queries/0_stateless/00932_array_intersect_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00932_array_intersect_bug.sql rename to tests/queries/0_stateless/00932_array_intersect_bug.sql diff --git a/dbms/tests/queries/0_stateless/00932_geohash_support.reference b/tests/queries/0_stateless/00932_geohash_support.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00932_geohash_support.reference rename to tests/queries/0_stateless/00932_geohash_support.reference diff --git a/dbms/tests/queries/0_stateless/00932_geohash_support.sql b/tests/queries/0_stateless/00932_geohash_support.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00932_geohash_support.sql rename to tests/queries/0_stateless/00932_geohash_support.sql diff --git a/dbms/tests/queries/0_stateless/00933_alter_ttl.reference b/tests/queries/0_stateless/00933_alter_ttl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_alter_ttl.reference rename to tests/queries/0_stateless/00933_alter_ttl.reference diff --git a/dbms/tests/queries/0_stateless/00933_alter_ttl.sql b/tests/queries/0_stateless/00933_alter_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_alter_ttl.sql rename to tests/queries/0_stateless/00933_alter_ttl.sql diff --git a/dbms/tests/queries/0_stateless/00933_reserved_word.reference b/tests/queries/0_stateless/00933_reserved_word.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_reserved_word.reference rename to tests/queries/0_stateless/00933_reserved_word.reference diff --git a/dbms/tests/queries/0_stateless/00933_reserved_word.sql b/tests/queries/0_stateless/00933_reserved_word.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_reserved_word.sql rename to tests/queries/0_stateless/00933_reserved_word.sql diff --git a/dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference rename to tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.reference diff --git a/dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh rename to tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh diff --git a/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference rename to tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql rename to tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00933_ttl_simple.reference b/tests/queries/0_stateless/00933_ttl_simple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_simple.reference rename to tests/queries/0_stateless/00933_ttl_simple.reference diff --git a/dbms/tests/queries/0_stateless/00933_ttl_simple.sql b/tests/queries/0_stateless/00933_ttl_simple.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_simple.sql rename to tests/queries/0_stateless/00933_ttl_simple.sql diff --git a/dbms/tests/queries/0_stateless/00933_ttl_with_default.reference b/tests/queries/0_stateless/00933_ttl_with_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_with_default.reference rename to tests/queries/0_stateless/00933_ttl_with_default.reference diff --git a/dbms/tests/queries/0_stateless/00933_ttl_with_default.sql b/tests/queries/0_stateless/00933_ttl_with_default.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00933_ttl_with_default.sql rename to tests/queries/0_stateless/00933_ttl_with_default.sql diff --git a/dbms/tests/queries/0_stateless/00934_is_valid_utf8.reference b/tests/queries/0_stateless/00934_is_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00934_is_valid_utf8.reference rename to tests/queries/0_stateless/00934_is_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00934_is_valid_utf8.sql b/tests/queries/0_stateless/00934_is_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00934_is_valid_utf8.sql rename to tests/queries/0_stateless/00934_is_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.reference b/tests/queries/0_stateless/00935_to_iso_week_first_year.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.reference rename to tests/queries/0_stateless/00935_to_iso_week_first_year.reference diff --git a/dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.sql b/tests/queries/0_stateless/00935_to_iso_week_first_year.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00935_to_iso_week_first_year.sql rename to tests/queries/0_stateless/00935_to_iso_week_first_year.sql diff --git a/dbms/tests/queries/0_stateless/00936_crc_functions.reference b/tests/queries/0_stateless/00936_crc_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_crc_functions.reference rename to tests/queries/0_stateless/00936_crc_functions.reference diff --git a/dbms/tests/queries/0_stateless/00936_crc_functions.sql b/tests/queries/0_stateless/00936_crc_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_crc_functions.sql rename to tests/queries/0_stateless/00936_crc_functions.sql diff --git a/dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.reference b/tests/queries/0_stateless/00936_function_result_with_operator_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.reference rename to tests/queries/0_stateless/00936_function_result_with_operator_in.reference diff --git a/dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.sql b/tests/queries/0_stateless/00936_function_result_with_operator_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_function_result_with_operator_in.sql rename to tests/queries/0_stateless/00936_function_result_with_operator_in.sql diff --git a/dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.reference b/tests/queries/0_stateless/00936_substring_utf8_non_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.reference rename to tests/queries/0_stateless/00936_substring_utf8_non_const.reference diff --git a/dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.sql b/tests/queries/0_stateless/00936_substring_utf8_non_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00936_substring_utf8_non_const.sql rename to tests/queries/0_stateless/00936_substring_utf8_non_const.sql diff --git a/dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.reference b/tests/queries/0_stateless/00937_ipv4_cidr_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.reference rename to tests/queries/0_stateless/00937_ipv4_cidr_range.reference diff --git a/dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.sql b/tests/queries/0_stateless/00937_ipv4_cidr_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00937_ipv4_cidr_range.sql rename to tests/queries/0_stateless/00937_ipv4_cidr_range.sql diff --git a/dbms/tests/queries/0_stateless/00937_template_output_format.reference b/tests/queries/0_stateless/00937_template_output_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_template_output_format.reference rename to tests/queries/0_stateless/00937_template_output_format.reference diff --git a/dbms/tests/queries/0_stateless/00937_template_output_format.sh b/tests/queries/0_stateless/00937_template_output_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_template_output_format.sh rename to tests/queries/0_stateless/00937_template_output_format.sh diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_csv.reference b/tests/queries/0_stateless/00937_test_use_header_csv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_csv.reference rename to tests/queries/0_stateless/00937_test_use_header_csv.reference diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_csv.sh b/tests/queries/0_stateless/00937_test_use_header_csv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_csv.sh rename to tests/queries/0_stateless/00937_test_use_header_csv.sh diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_tsv.reference b/tests/queries/0_stateless/00937_test_use_header_tsv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_tsv.reference rename to tests/queries/0_stateless/00937_test_use_header_tsv.reference diff --git a/dbms/tests/queries/0_stateless/00937_test_use_header_tsv.sh b/tests/queries/0_stateless/00937_test_use_header_tsv.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00937_test_use_header_tsv.sh rename to tests/queries/0_stateless/00937_test_use_header_tsv.sh diff --git a/dbms/tests/queries/0_stateless/00938_basename.reference b/tests/queries/0_stateless/00938_basename.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_basename.reference rename to tests/queries/0_stateless/00938_basename.reference diff --git a/dbms/tests/queries/0_stateless/00938_basename.sql b/tests/queries/0_stateless/00938_basename.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_basename.sql rename to tests/queries/0_stateless/00938_basename.sql diff --git a/dbms/tests/queries/0_stateless/00938_dataset_test.reference b/tests/queries/0_stateless/00938_dataset_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_dataset_test.reference rename to tests/queries/0_stateless/00938_dataset_test.reference diff --git a/dbms/tests/queries/0_stateless/00938_dataset_test.sql b/tests/queries/0_stateless/00938_dataset_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_dataset_test.sql rename to tests/queries/0_stateless/00938_dataset_test.sql diff --git a/dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference b/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.reference rename to tests/queries/0_stateless/00938_fix_rwlock_segfault.reference diff --git a/dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh b/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh rename to tests/queries/0_stateless/00938_fix_rwlock_segfault.sh diff --git a/dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.reference b/tests/queries/0_stateless/00938_ipv6_cidr_range.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.reference rename to tests/queries/0_stateless/00938_ipv6_cidr_range.reference diff --git a/dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.sql b/tests/queries/0_stateless/00938_ipv6_cidr_range.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_ipv6_cidr_range.sql rename to tests/queries/0_stateless/00938_ipv6_cidr_range.sql diff --git a/dbms/tests/queries/0_stateless/00938_template_input_format.reference b/tests/queries/0_stateless/00938_template_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_template_input_format.reference rename to tests/queries/0_stateless/00938_template_input_format.reference diff --git a/dbms/tests/queries/0_stateless/00938_template_input_format.sh b/tests/queries/0_stateless/00938_template_input_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00938_template_input_format.sh rename to tests/queries/0_stateless/00938_template_input_format.sh diff --git a/dbms/tests/queries/0_stateless/00938_test_retention_function.reference b/tests/queries/0_stateless/00938_test_retention_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00938_test_retention_function.reference rename to tests/queries/0_stateless/00938_test_retention_function.reference diff --git a/dbms/tests/queries/0_stateless/00938_test_retention_function.sql b/tests/queries/0_stateless/00938_test_retention_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00938_test_retention_function.sql rename to tests/queries/0_stateless/00938_test_retention_function.sql diff --git a/dbms/tests/queries/0_stateless/00939_limit_by_offset.reference b/tests/queries/0_stateless/00939_limit_by_offset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00939_limit_by_offset.reference rename to tests/queries/0_stateless/00939_limit_by_offset.reference diff --git a/dbms/tests/queries/0_stateless/00939_limit_by_offset.sql b/tests/queries/0_stateless/00939_limit_by_offset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00939_limit_by_offset.sql rename to tests/queries/0_stateless/00939_limit_by_offset.sql diff --git a/dbms/tests/queries/0_stateless/00939_test_null_in.reference b/tests/queries/0_stateless/00939_test_null_in.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00939_test_null_in.reference rename to tests/queries/0_stateless/00939_test_null_in.reference diff --git a/dbms/tests/queries/0_stateless/00939_test_null_in.sql b/tests/queries/0_stateless/00939_test_null_in.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00939_test_null_in.sql rename to tests/queries/0_stateless/00939_test_null_in.sql diff --git a/dbms/tests/queries/0_stateless/00940_max_parts_in_total.reference b/tests/queries/0_stateless/00940_max_parts_in_total.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00940_max_parts_in_total.reference rename to tests/queries/0_stateless/00940_max_parts_in_total.reference diff --git a/dbms/tests/queries/0_stateless/00940_max_parts_in_total.sql b/tests/queries/0_stateless/00940_max_parts_in_total.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00940_max_parts_in_total.sql rename to tests/queries/0_stateless/00940_max_parts_in_total.sql diff --git a/dbms/tests/queries/0_stateless/00940_order_by_read_in_order.reference b/tests/queries/0_stateless/00940_order_by_read_in_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00940_order_by_read_in_order.reference rename to tests/queries/0_stateless/00940_order_by_read_in_order.reference diff --git a/dbms/tests/queries/0_stateless/00940_order_by_read_in_order.sql b/tests/queries/0_stateless/00940_order_by_read_in_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00940_order_by_read_in_order.sql rename to tests/queries/0_stateless/00940_order_by_read_in_order.sql diff --git a/dbms/tests/queries/0_stateless/00941_system_columns_race_condition.reference b/tests/queries/0_stateless/00941_system_columns_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00941_system_columns_race_condition.reference rename to tests/queries/0_stateless/00941_system_columns_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00941_system_columns_race_condition.sh b/tests/queries/0_stateless/00941_system_columns_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00941_system_columns_race_condition.sh rename to tests/queries/0_stateless/00941_system_columns_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00941_to_custom_week.reference b/tests/queries/0_stateless/00941_to_custom_week.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00941_to_custom_week.reference rename to tests/queries/0_stateless/00941_to_custom_week.reference diff --git a/dbms/tests/queries/0_stateless/00941_to_custom_week.sql b/tests/queries/0_stateless/00941_to_custom_week.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00941_to_custom_week.sql rename to tests/queries/0_stateless/00941_to_custom_week.sql diff --git a/dbms/tests/queries/0_stateless/00942_dataparts_500.reference b/tests/queries/0_stateless/00942_dataparts_500.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_dataparts_500.reference rename to tests/queries/0_stateless/00942_dataparts_500.reference diff --git a/dbms/tests/queries/0_stateless/00942_dataparts_500.sh b/tests/queries/0_stateless/00942_dataparts_500.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00942_dataparts_500.sh rename to tests/queries/0_stateless/00942_dataparts_500.sh diff --git a/dbms/tests/queries/0_stateless/00942_mutate_index.reference b/tests/queries/0_stateless/00942_mutate_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mutate_index.reference rename to tests/queries/0_stateless/00942_mutate_index.reference diff --git a/dbms/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mutate_index.sh rename to tests/queries/0_stateless/00942_mutate_index.sh diff --git a/dbms/tests/queries/0_stateless/00942_mv_rename_table.reference b/tests/queries/0_stateless/00942_mv_rename_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mv_rename_table.reference rename to tests/queries/0_stateless/00942_mv_rename_table.reference diff --git a/dbms/tests/queries/0_stateless/00942_mv_rename_table.sql b/tests/queries/0_stateless/00942_mv_rename_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00942_mv_rename_table.sql rename to tests/queries/0_stateless/00942_mv_rename_table.sql diff --git a/dbms/tests/queries/0_stateless/00943_materialize_index.reference b/tests/queries/0_stateless/00943_materialize_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00943_materialize_index.reference rename to tests/queries/0_stateless/00943_materialize_index.reference diff --git a/dbms/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00943_materialize_index.sh rename to tests/queries/0_stateless/00943_materialize_index.sh diff --git a/dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference b/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference rename to tests/queries/0_stateless/00943_mv_rename_without_inner_table.reference diff --git a/dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql b/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql rename to tests/queries/0_stateless/00943_mv_rename_without_inner_table.sql diff --git a/dbms/tests/queries/0_stateless/00944_clear_index_in_partition.reference b/tests/queries/0_stateless/00944_clear_index_in_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_clear_index_in_partition.reference rename to tests/queries/0_stateless/00944_clear_index_in_partition.reference diff --git a/dbms/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00944_clear_index_in_partition.sh rename to tests/queries/0_stateless/00944_clear_index_in_partition.sh diff --git a/dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference rename to tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh rename to tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh diff --git a/dbms/tests/queries/0_stateless/00944_minmax_null.reference b/tests/queries/0_stateless/00944_minmax_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_minmax_null.reference rename to tests/queries/0_stateless/00944_minmax_null.reference diff --git a/dbms/tests/queries/0_stateless/00944_minmax_null.sql b/tests/queries/0_stateless/00944_minmax_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00944_minmax_null.sql rename to tests/queries/0_stateless/00944_minmax_null.sql diff --git a/dbms/tests/queries/0_stateless/00944_ml_test.reference b/tests/queries/0_stateless/00944_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00944_ml_test.reference rename to tests/queries/0_stateless/00944_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00944_ml_test.sql b/tests/queries/0_stateless/00944_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00944_ml_test.sql rename to tests/queries/0_stateless/00944_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00945_bloom_filter_index.reference b/tests/queries/0_stateless/00945_bloom_filter_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00945_bloom_filter_index.reference rename to tests/queries/0_stateless/00945_bloom_filter_index.reference diff --git a/dbms/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00945_bloom_filter_index.sql rename to tests/queries/0_stateless/00945_bloom_filter_index.sql diff --git a/dbms/tests/queries/0_stateless/00945_ml_test.reference b/tests/queries/0_stateless/00945_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00945_ml_test.reference rename to tests/queries/0_stateless/00945_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00945_ml_test.sql b/tests/queries/0_stateless/00945_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00945_ml_test.sql rename to tests/queries/0_stateless/00945_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00946_ml_test.reference b/tests/queries/0_stateless/00946_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00946_ml_test.reference rename to tests/queries/0_stateless/00946_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00946_ml_test.sql b/tests/queries/0_stateless/00946_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00946_ml_test.sql rename to tests/queries/0_stateless/00946_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00947_ml_test.reference b/tests/queries/0_stateless/00947_ml_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00947_ml_test.reference rename to tests/queries/0_stateless/00947_ml_test.reference diff --git a/dbms/tests/queries/0_stateless/00947_ml_test.sql b/tests/queries/0_stateless/00947_ml_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00947_ml_test.sql rename to tests/queries/0_stateless/00947_ml_test.sql diff --git a/dbms/tests/queries/0_stateless/00948_format_in_with_single_element.reference b/tests/queries/0_stateless/00948_format_in_with_single_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_format_in_with_single_element.reference rename to tests/queries/0_stateless/00948_format_in_with_single_element.reference diff --git a/dbms/tests/queries/0_stateless/00948_format_in_with_single_element.sh b/tests/queries/0_stateless/00948_format_in_with_single_element.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00948_format_in_with_single_element.sh rename to tests/queries/0_stateless/00948_format_in_with_single_element.sh diff --git a/dbms/tests/queries/0_stateless/00948_to_valid_utf8.reference b/tests/queries/0_stateless/00948_to_valid_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_to_valid_utf8.reference rename to tests/queries/0_stateless/00948_to_valid_utf8.reference diff --git a/dbms/tests/queries/0_stateless/00948_to_valid_utf8.sql b/tests/queries/0_stateless/00948_to_valid_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00948_to_valid_utf8.sql rename to tests/queries/0_stateless/00948_to_valid_utf8.sql diff --git a/dbms/tests/queries/0_stateless/00948_values_interpreter_template.reference b/tests/queries/0_stateless/00948_values_interpreter_template.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00948_values_interpreter_template.reference rename to tests/queries/0_stateless/00948_values_interpreter_template.reference diff --git a/dbms/tests/queries/0_stateless/00948_values_interpreter_template.sql b/tests/queries/0_stateless/00948_values_interpreter_template.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00948_values_interpreter_template.sql rename to tests/queries/0_stateless/00948_values_interpreter_template.sql diff --git a/dbms/tests/queries/0_stateless/00949_format.reference b/tests/queries/0_stateless/00949_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00949_format.reference rename to tests/queries/0_stateless/00949_format.reference diff --git a/dbms/tests/queries/0_stateless/00949_format.sql b/tests/queries/0_stateless/00949_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00949_format.sql rename to tests/queries/0_stateless/00949_format.sql diff --git a/dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference b/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference rename to tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.reference diff --git a/dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql b/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql rename to tests/queries/0_stateless/00950_bad_alloc_when_truncate_join_storage.sql diff --git a/dbms/tests/queries/0_stateless/00950_default_prewhere.reference b/tests/queries/0_stateless/00950_default_prewhere.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_default_prewhere.reference rename to tests/queries/0_stateless/00950_default_prewhere.reference diff --git a/dbms/tests/queries/0_stateless/00950_default_prewhere.sql b/tests/queries/0_stateless/00950_default_prewhere.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_default_prewhere.sql rename to tests/queries/0_stateless/00950_default_prewhere.sql diff --git a/dbms/tests/queries/0_stateless/00950_dict_get.reference b/tests/queries/0_stateless/00950_dict_get.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_dict_get.reference rename to tests/queries/0_stateless/00950_dict_get.reference diff --git a/dbms/tests/queries/0_stateless/00950_dict_get.sql b/tests/queries/0_stateless/00950_dict_get.sql similarity index 99% rename from dbms/tests/queries/0_stateless/00950_dict_get.sql rename to tests/queries/0_stateless/00950_dict_get.sql index 2483a21c0d3..73fddce91c7 100644 --- a/dbms/tests/queries/0_stateless/00950_dict_get.sql +++ b/tests/queries/0_stateless/00950_dict_get.sql @@ -1,4 +1,4 @@ --- Must use `test_00950` database and these tables - they're configured in dbms/tests/*_dictionary.xml +-- Must use `test_00950` database and these tables - they're configured in tests/*_dictionary.xml create database if not exists test_00950; use test_00950; drop table if exists ints; diff --git a/dbms/tests/queries/0_stateless/00950_test_double_delta_codec.reference b/tests/queries/0_stateless/00950_test_double_delta_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_double_delta_codec.reference rename to tests/queries/0_stateless/00950_test_double_delta_codec.reference diff --git a/dbms/tests/queries/0_stateless/00950_test_double_delta_codec.sql b/tests/queries/0_stateless/00950_test_double_delta_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_double_delta_codec.sql rename to tests/queries/0_stateless/00950_test_double_delta_codec.sql diff --git a/dbms/tests/queries/0_stateless/00950_test_gorilla_codec.reference b/tests/queries/0_stateless/00950_test_gorilla_codec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_gorilla_codec.reference rename to tests/queries/0_stateless/00950_test_gorilla_codec.reference diff --git a/dbms/tests/queries/0_stateless/00950_test_gorilla_codec.sql b/tests/queries/0_stateless/00950_test_gorilla_codec.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00950_test_gorilla_codec.sql rename to tests/queries/0_stateless/00950_test_gorilla_codec.sql diff --git a/dbms/tests/queries/0_stateless/00951_ngram_search.reference b/tests/queries/0_stateless/00951_ngram_search.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00951_ngram_search.reference rename to tests/queries/0_stateless/00951_ngram_search.reference diff --git a/dbms/tests/queries/0_stateless/00951_ngram_search.sql b/tests/queries/0_stateless/00951_ngram_search.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00951_ngram_search.sql rename to tests/queries/0_stateless/00951_ngram_search.sql diff --git a/dbms/tests/queries/0_stateless/00952_basic_constraints.reference b/tests/queries/0_stateless/00952_basic_constraints.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_basic_constraints.reference rename to tests/queries/0_stateless/00952_basic_constraints.reference diff --git a/dbms/tests/queries/0_stateless/00952_basic_constraints.sh b/tests/queries/0_stateless/00952_basic_constraints.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00952_basic_constraints.sh rename to tests/queries/0_stateless/00952_basic_constraints.sh diff --git a/dbms/tests/queries/0_stateless/00952_input_function.reference b/tests/queries/0_stateless/00952_input_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_input_function.reference rename to tests/queries/0_stateless/00952_input_function.reference diff --git a/dbms/tests/queries/0_stateless/00952_input_function.sh b/tests/queries/0_stateless/00952_input_function.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00952_input_function.sh rename to tests/queries/0_stateless/00952_input_function.sh diff --git a/dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference b/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference rename to tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.reference diff --git a/dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql b/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql rename to tests/queries/0_stateless/00952_insert_into_distributed_with_materialized_column.sql diff --git a/dbms/tests/queries/0_stateless/00952_part_frozen_info.reference b/tests/queries/0_stateless/00952_part_frozen_info.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00952_part_frozen_info.reference rename to tests/queries/0_stateless/00952_part_frozen_info.reference diff --git a/dbms/tests/queries/0_stateless/00952_part_frozen_info.sql b/tests/queries/0_stateless/00952_part_frozen_info.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00952_part_frozen_info.sql rename to tests/queries/0_stateless/00952_part_frozen_info.sql diff --git a/dbms/tests/queries/0_stateless/00953_constraints_operations.reference b/tests/queries/0_stateless/00953_constraints_operations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_constraints_operations.reference rename to tests/queries/0_stateless/00953_constraints_operations.reference diff --git a/dbms/tests/queries/0_stateless/00953_constraints_operations.sh b/tests/queries/0_stateless/00953_constraints_operations.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_constraints_operations.sh rename to tests/queries/0_stateless/00953_constraints_operations.sh diff --git a/dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.reference b/tests/queries/0_stateless/00953_indices_alter_exceptions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.reference rename to tests/queries/0_stateless/00953_indices_alter_exceptions.reference diff --git a/dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.sh b/tests/queries/0_stateless/00953_indices_alter_exceptions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_indices_alter_exceptions.sh rename to tests/queries/0_stateless/00953_indices_alter_exceptions.sh diff --git a/dbms/tests/queries/0_stateless/00953_moving_functions.reference b/tests/queries/0_stateless/00953_moving_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_moving_functions.reference rename to tests/queries/0_stateless/00953_moving_functions.reference diff --git a/dbms/tests/queries/0_stateless/00953_moving_functions.sql b/tests/queries/0_stateless/00953_moving_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00953_moving_functions.sql rename to tests/queries/0_stateless/00953_moving_functions.sql diff --git a/dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference rename to tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.reference diff --git a/dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh rename to tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh diff --git a/dbms/tests/queries/0_stateless/00954_client_prepared_statements.reference b/tests/queries/0_stateless/00954_client_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00954_client_prepared_statements.reference rename to tests/queries/0_stateless/00954_client_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00954_client_prepared_statements.sh b/tests/queries/0_stateless/00954_client_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00954_client_prepared_statements.sh rename to tests/queries/0_stateless/00954_client_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00954_resample_combinator.reference b/tests/queries/0_stateless/00954_resample_combinator.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00954_resample_combinator.reference rename to tests/queries/0_stateless/00954_resample_combinator.reference diff --git a/dbms/tests/queries/0_stateless/00954_resample_combinator.sql b/tests/queries/0_stateless/00954_resample_combinator.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00954_resample_combinator.sql rename to tests/queries/0_stateless/00954_resample_combinator.sql diff --git a/dbms/tests/queries/0_stateless/00955_complex_prepared_statements.reference b/tests/queries/0_stateless/00955_complex_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_complex_prepared_statements.reference rename to tests/queries/0_stateless/00955_complex_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00955_complex_prepared_statements.sh b/tests/queries/0_stateless/00955_complex_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00955_complex_prepared_statements.sh rename to tests/queries/0_stateless/00955_complex_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark.reference b/tests/queries/0_stateless/00955_test_final_mark.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark.reference rename to tests/queries/0_stateless/00955_test_final_mark.reference diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark.sql b/tests/queries/0_stateless/00955_test_final_mark.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark.sql rename to tests/queries/0_stateless/00955_test_final_mark.sql diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark_use.reference b/tests/queries/0_stateless/00955_test_final_mark_use.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark_use.reference rename to tests/queries/0_stateless/00955_test_final_mark_use.reference diff --git a/dbms/tests/queries/0_stateless/00955_test_final_mark_use.sh b/tests/queries/0_stateless/00955_test_final_mark_use.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00955_test_final_mark_use.sh rename to tests/queries/0_stateless/00955_test_final_mark_use.sh diff --git a/dbms/tests/queries/0_stateless/00956_http_prepared_statements.reference b/tests/queries/0_stateless/00956_http_prepared_statements.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_http_prepared_statements.reference rename to tests/queries/0_stateless/00956_http_prepared_statements.reference diff --git a/dbms/tests/queries/0_stateless/00956_http_prepared_statements.sh b/tests/queries/0_stateless/00956_http_prepared_statements.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00956_http_prepared_statements.sh rename to tests/queries/0_stateless/00956_http_prepared_statements.sh diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference rename to tests/queries/0_stateless/00956_join_use_nulls_with_array_column.reference diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql rename to tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql diff --git a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.reference b/tests/queries/0_stateless/00956_sensitive_data_masking.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00956_sensitive_data_masking.reference rename to tests/queries/0_stateless/00956_sensitive_data_masking.reference diff --git a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh similarity index 97% rename from dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh rename to tests/queries/0_stateless/00956_sensitive_data_masking.sh index 0c5bd753f26..0f76c34eaff 100755 --- a/dbms/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -2,9 +2,9 @@ # Get all server logs export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" -#export CLICKHOUSE_BINARY='../../../../build-vscode/Debug/dbms/programs/clickhouse' +#export CLICKHOUSE_BINARY='../../../../build-vscode/Debug/programs/clickhouse' #export CLICKHOUSE_PORT_TCP=59000 -#export CLICKHOUSE_CLIENT_BINARY='../../../../cmake-build-debug/dbms/programs/clickhouse client' +#export CLICKHOUSE_CLIENT_BINARY='../../../../cmake-build-debug/programs/clickhouse client' CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh diff --git a/dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference b/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference rename to tests/queries/0_stateless/00957_coalesce_const_nullable_crash.reference diff --git a/dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql b/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql rename to tests/queries/0_stateless/00957_coalesce_const_nullable_crash.sql diff --git a/dbms/tests/queries/0_stateless/00957_delta_diff_bug.reference b/tests/queries/0_stateless/00957_delta_diff_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_delta_diff_bug.reference rename to tests/queries/0_stateless/00957_delta_diff_bug.reference diff --git a/dbms/tests/queries/0_stateless/00957_delta_diff_bug.sql b/tests/queries/0_stateless/00957_delta_diff_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_delta_diff_bug.sql rename to tests/queries/0_stateless/00957_delta_diff_bug.sql diff --git a/dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference b/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference rename to tests/queries/0_stateless/00957_format_with_clashed_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh b/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh rename to tests/queries/0_stateless/00957_format_with_clashed_aliases.sh diff --git a/dbms/tests/queries/0_stateless/00957_neighbor.reference b/tests/queries/0_stateless/00957_neighbor.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00957_neighbor.reference rename to tests/queries/0_stateless/00957_neighbor.reference diff --git a/dbms/tests/queries/0_stateless/00957_neighbor.sql b/tests/queries/0_stateless/00957_neighbor.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00957_neighbor.sql rename to tests/queries/0_stateless/00957_neighbor.sql diff --git a/dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference b/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference rename to tests/queries/0_stateless/00958_format_of_tuple_array_element.reference diff --git a/dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh b/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh rename to tests/queries/0_stateless/00958_format_of_tuple_array_element.sh diff --git a/dbms/tests/queries/0_stateless/00959_format_with_different_aliases.reference b/tests/queries/0_stateless/00959_format_with_different_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00959_format_with_different_aliases.reference rename to tests/queries/0_stateless/00959_format_with_different_aliases.reference diff --git a/dbms/tests/queries/0_stateless/00959_format_with_different_aliases.sh b/tests/queries/0_stateless/00959_format_with_different_aliases.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00959_format_with_different_aliases.sh rename to tests/queries/0_stateless/00959_format_with_different_aliases.sh diff --git a/dbms/tests/queries/0_stateless/00960_eval_ml_method_const.reference b/tests/queries/0_stateless/00960_eval_ml_method_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00960_eval_ml_method_const.reference rename to tests/queries/0_stateless/00960_eval_ml_method_const.reference diff --git a/dbms/tests/queries/0_stateless/00960_eval_ml_method_const.sql b/tests/queries/0_stateless/00960_eval_ml_method_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00960_eval_ml_method_const.sql rename to tests/queries/0_stateless/00960_eval_ml_method_const.sql diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py b/tests/queries/0_stateless/00960_live_view_watch_events_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.py rename to tests/queries/0_stateless/00960_live_view_watch_events_live.py diff --git a/dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference b/tests/queries/0_stateless/00960_live_view_watch_events_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00960_live_view_watch_events_live.reference rename to tests/queries/0_stateless/00960_live_view_watch_events_live.reference diff --git a/dbms/tests/queries/0_stateless/00961_check_table.reference b/tests/queries/0_stateless/00961_check_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_check_table.reference rename to tests/queries/0_stateless/00961_check_table.reference diff --git a/dbms/tests/queries/0_stateless/00961_check_table.sql b/tests/queries/0_stateless/00961_check_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_check_table.sql rename to tests/queries/0_stateless/00961_check_table.sql diff --git a/dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference rename to tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.reference diff --git a/dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql rename to tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference b/tests/queries/0_stateless/00961_temporary_live_view_watch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.reference rename to tests/queries/0_stateless/00961_temporary_live_view_watch.reference diff --git a/dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql b/tests/queries/0_stateless/00961_temporary_live_view_watch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_temporary_live_view_watch.sql rename to tests/queries/0_stateless/00961_temporary_live_view_watch.sql diff --git a/dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference b/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference rename to tests/queries/0_stateless/00961_visit_param_buffer_underflow.reference diff --git a/dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql b/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql rename to tests/queries/0_stateless/00961_visit_param_buffer_underflow.sql diff --git a/dbms/tests/queries/0_stateless/00962_enumNotExect.reference b/tests/queries/0_stateless/00962_enumNotExect.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_enumNotExect.reference rename to tests/queries/0_stateless/00962_enumNotExect.reference diff --git a/dbms/tests/queries/0_stateless/00962_enumNotExect.sql b/tests/queries/0_stateless/00962_enumNotExect.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00962_enumNotExect.sql rename to tests/queries/0_stateless/00962_enumNotExect.sql diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py rename to tests/queries/0_stateless/00962_temporary_live_view_watch_live.py diff --git a/dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference rename to tests/queries/0_stateless/00962_temporary_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00962_visit_param_various.reference b/tests/queries/0_stateless/00962_visit_param_various.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00962_visit_param_various.reference rename to tests/queries/0_stateless/00962_visit_param_various.reference diff --git a/dbms/tests/queries/0_stateless/00962_visit_param_various.sql b/tests/queries/0_stateless/00962_visit_param_various.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00962_visit_param_various.sql rename to tests/queries/0_stateless/00962_visit_param_various.sql diff --git a/dbms/tests/queries/0_stateless/00963_achimbab.reference b/tests/queries/0_stateless/00963_achimbab.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_achimbab.reference rename to tests/queries/0_stateless/00963_achimbab.reference diff --git a/dbms/tests/queries/0_stateless/00963_achimbab.sql b/tests/queries/0_stateless/00963_achimbab.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00963_achimbab.sql rename to tests/queries/0_stateless/00963_achimbab.sql diff --git a/dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference b/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.reference rename to tests/queries/0_stateless/00963_startsWith_force_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql b/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00963_startsWith_force_primary_key.sql rename to tests/queries/0_stateless/00963_startsWith_force_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled rename to tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.py.disabled diff --git a/dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference b/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference rename to tests/queries/0_stateless/00963_temporary_live_view_watch_live_timeout.reference diff --git a/dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.reference b/tests/queries/0_stateless/00964_bloom_index_string_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.reference rename to tests/queries/0_stateless/00964_bloom_index_string_functions.reference diff --git a/dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00964_bloom_index_string_functions.sh rename to tests/queries/0_stateless/00964_bloom_index_string_functions.sh diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py rename to tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference rename to tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00964_os_thread_priority.reference b/tests/queries/0_stateless/00964_os_thread_priority.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00964_os_thread_priority.reference rename to tests/queries/0_stateless/00964_os_thread_priority.reference diff --git a/dbms/tests/queries/0_stateless/00964_os_thread_priority.sql b/tests/queries/0_stateless/00964_os_thread_priority.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00964_os_thread_priority.sql rename to tests/queries/0_stateless/00964_os_thread_priority.sql diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py rename to tests/queries/0_stateless/00965_live_view_watch_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference rename to tests/queries/0_stateless/00965_live_view_watch_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00965_logs_level_bugfix.reference b/tests/queries/0_stateless/00965_logs_level_bugfix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_logs_level_bugfix.reference rename to tests/queries/0_stateless/00965_logs_level_bugfix.reference diff --git a/dbms/tests/queries/0_stateless/00965_logs_level_bugfix.sh b/tests/queries/0_stateless/00965_logs_level_bugfix.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_logs_level_bugfix.sh rename to tests/queries/0_stateless/00965_logs_level_bugfix.sh diff --git a/dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference rename to tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.reference diff --git a/dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh rename to tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh diff --git a/dbms/tests/queries/0_stateless/00965_set_index_string_functions.reference b/tests/queries/0_stateless/00965_set_index_string_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_set_index_string_functions.reference rename to tests/queries/0_stateless/00965_set_index_string_functions.reference diff --git a/dbms/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00965_set_index_string_functions.sh rename to tests/queries/0_stateless/00965_set_index_string_functions.sh diff --git a/dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference rename to tests/queries/0_stateless/00965_shard_unresolvable_addresses.reference diff --git a/dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql rename to tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql diff --git a/dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference b/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference rename to tests/queries/0_stateless/00966_invalid_json_must_not_parse.reference diff --git a/dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql b/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql rename to tests/queries/0_stateless/00966_invalid_json_must_not_parse.sql diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py b/tests/queries/0_stateless/00966_live_view_watch_events_http.py similarity index 100% rename from dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.py rename to tests/queries/0_stateless/00966_live_view_watch_events_http.py diff --git a/dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference b/tests/queries/0_stateless/00966_live_view_watch_events_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00966_live_view_watch_events_http.reference rename to tests/queries/0_stateless/00966_live_view_watch_events_http.reference diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.py b/tests/queries/0_stateless/00967_live_view_watch_http.py similarity index 100% rename from dbms/tests/queries/0_stateless/00967_live_view_watch_http.py rename to tests/queries/0_stateless/00967_live_view_watch_http.py diff --git a/dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference b/tests/queries/0_stateless/00967_live_view_watch_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00967_live_view_watch_http.reference rename to tests/queries/0_stateless/00967_live_view_watch_http.reference diff --git a/dbms/tests/queries/0_stateless/00967_ubsan_bit_test.reference b/tests/queries/0_stateless/00967_ubsan_bit_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00967_ubsan_bit_test.reference rename to tests/queries/0_stateless/00967_ubsan_bit_test.reference diff --git a/dbms/tests/queries/0_stateless/00967_ubsan_bit_test.sql b/tests/queries/0_stateless/00967_ubsan_bit_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00967_ubsan_bit_test.sql rename to tests/queries/0_stateless/00967_ubsan_bit_test.sql diff --git a/dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.reference b/tests/queries/0_stateless/00968_file_engine_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.reference rename to tests/queries/0_stateless/00968_file_engine_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.sql b/tests/queries/0_stateless/00968_file_engine_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_file_engine_in_subquery.sql rename to tests/queries/0_stateless/00968_file_engine_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference b/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference rename to tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.reference diff --git a/dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql b/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql rename to tests/queries/0_stateless/00968_live_view_select_format_jsoneachrowwithprogress.sql diff --git a/dbms/tests/queries/0_stateless/00968_roundAge.reference b/tests/queries/0_stateless/00968_roundAge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00968_roundAge.reference rename to tests/queries/0_stateless/00968_roundAge.reference diff --git a/dbms/tests/queries/0_stateless/00968_roundAge.sql b/tests/queries/0_stateless/00968_roundAge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00968_roundAge.sql rename to tests/queries/0_stateless/00968_roundAge.sql diff --git a/dbms/tests/queries/0_stateless/00969_columns_clause.reference b/tests/queries/0_stateless/00969_columns_clause.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_columns_clause.reference rename to tests/queries/0_stateless/00969_columns_clause.reference diff --git a/dbms/tests/queries/0_stateless/00969_columns_clause.sql b/tests/queries/0_stateless/00969_columns_clause.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_columns_clause.sql rename to tests/queries/0_stateless/00969_columns_clause.sql diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference b/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference rename to tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.reference diff --git a/dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql b/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql rename to tests/queries/0_stateless/00969_live_view_watch_format_jsoneachrowwithprogress.sql diff --git a/dbms/tests/queries/0_stateless/00969_roundDuration.reference b/tests/queries/0_stateless/00969_roundDuration.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00969_roundDuration.reference rename to tests/queries/0_stateless/00969_roundDuration.reference diff --git a/dbms/tests/queries/0_stateless/00969_roundDuration.sql b/tests/queries/0_stateless/00969_roundDuration.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00969_roundDuration.sql rename to tests/queries/0_stateless/00969_roundDuration.sql diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py rename to tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference rename to tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00970_substring_arg_validation.reference b/tests/queries/0_stateless/00970_substring_arg_validation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00970_substring_arg_validation.reference rename to tests/queries/0_stateless/00970_substring_arg_validation.reference diff --git a/dbms/tests/queries/0_stateless/00970_substring_arg_validation.sql b/tests/queries/0_stateless/00970_substring_arg_validation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00970_substring_arg_validation.sql rename to tests/queries/0_stateless/00970_substring_arg_validation.sql diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py similarity index 100% rename from dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py rename to tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py diff --git a/dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference rename to tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference b/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference rename to tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.reference diff --git a/dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql b/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql rename to tests/queries/0_stateless/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read.sql diff --git a/dbms/tests/queries/0_stateless/00971_query_id_in_logs.reference b/tests/queries/0_stateless/00971_query_id_in_logs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00971_query_id_in_logs.reference rename to tests/queries/0_stateless/00971_query_id_in_logs.reference diff --git a/dbms/tests/queries/0_stateless/00971_query_id_in_logs.sh b/tests/queries/0_stateless/00971_query_id_in_logs.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00971_query_id_in_logs.sh rename to tests/queries/0_stateless/00971_query_id_in_logs.sh diff --git a/dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference b/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.reference rename to tests/queries/0_stateless/00972_desc_table_virtual_columns.reference diff --git a/dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql b/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_desc_table_virtual_columns.sql rename to tests/queries/0_stateless/00972_desc_table_virtual_columns.sql diff --git a/dbms/tests/queries/0_stateless/00972_geohashesInBox.reference b/tests/queries/0_stateless/00972_geohashesInBox.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_geohashesInBox.reference rename to tests/queries/0_stateless/00972_geohashesInBox.reference diff --git a/dbms/tests/queries/0_stateless/00972_geohashesInBox.sql b/tests/queries/0_stateless/00972_geohashesInBox.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_geohashesInBox.sql rename to tests/queries/0_stateless/00972_geohashesInBox.sql diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.reference b/tests/queries/0_stateless/00972_live_view_select_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00972_live_view_select_1.reference rename to tests/queries/0_stateless/00972_live_view_select_1.reference diff --git a/dbms/tests/queries/0_stateless/00972_live_view_select_1.sql b/tests/queries/0_stateless/00972_live_view_select_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00972_live_view_select_1.sql rename to tests/queries/0_stateless/00972_live_view_select_1.sql diff --git a/dbms/tests/queries/0_stateless/00973_create_table_as_table_function.reference b/tests/queries/0_stateless/00973_create_table_as_table_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_create_table_as_table_function.reference rename to tests/queries/0_stateless/00973_create_table_as_table_function.reference diff --git a/dbms/tests/queries/0_stateless/00973_create_table_as_table_function.sql b/tests/queries/0_stateless/00973_create_table_as_table_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_create_table_as_table_function.sql rename to tests/queries/0_stateless/00973_create_table_as_table_function.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.reference b/tests/queries/0_stateless/00973_live_view_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_select.reference rename to tests/queries/0_stateless/00973_live_view_select.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_select.sql b/tests/queries/0_stateless/00973_live_view_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_select.sql rename to tests/queries/0_stateless/00973_live_view_select.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_join_no_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_nested_with_aggregation_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_table_alias.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql b/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql rename to tests/queries/0_stateless/00973_live_view_with_subquery_select_with_aggregation_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/00973_uniq_non_associativity.reference b/tests/queries/0_stateless/00973_uniq_non_associativity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00973_uniq_non_associativity.reference rename to tests/queries/0_stateless/00973_uniq_non_associativity.reference diff --git a/dbms/tests/queries/0_stateless/00973_uniq_non_associativity.sql b/tests/queries/0_stateless/00973_uniq_non_associativity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00973_uniq_non_associativity.sql rename to tests/queries/0_stateless/00973_uniq_non_associativity.sql diff --git a/dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference b/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference rename to tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.reference diff --git a/dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql b/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql rename to tests/queries/0_stateless/00974_adaptive_granularity_secondary_index.sql diff --git a/dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference b/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference rename to tests/queries/0_stateless/00974_bitmapContains_with_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql b/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql rename to tests/queries/0_stateless/00974_bitmapContains_with_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/00974_distributed_join_on.reference b/tests/queries/0_stateless/00974_distributed_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_distributed_join_on.reference rename to tests/queries/0_stateless/00974_distributed_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00974_distributed_join_on.sql b/tests/queries/0_stateless/00974_distributed_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_distributed_join_on.sql rename to tests/queries/0_stateless/00974_distributed_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00974_final_predicate_push_down.reference b/tests/queries/0_stateless/00974_final_predicate_push_down.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_final_predicate_push_down.reference rename to tests/queries/0_stateless/00974_final_predicate_push_down.reference diff --git a/dbms/tests/queries/0_stateless/00974_final_predicate_push_down.sql b/tests/queries/0_stateless/00974_final_predicate_push_down.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_final_predicate_push_down.sql rename to tests/queries/0_stateless/00974_final_predicate_push_down.sql diff --git a/dbms/tests/queries/0_stateless/00974_fix_join_on.reference b/tests/queries/0_stateless/00974_fix_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_fix_join_on.reference rename to tests/queries/0_stateless/00974_fix_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00974_fix_join_on.sql b/tests/queries/0_stateless/00974_fix_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_fix_join_on.sql rename to tests/queries/0_stateless/00974_fix_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00974_full_outer_join.reference b/tests/queries/0_stateless/00974_full_outer_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_full_outer_join.reference rename to tests/queries/0_stateless/00974_full_outer_join.reference diff --git a/dbms/tests/queries/0_stateless/00974_full_outer_join.sql b/tests/queries/0_stateless/00974_full_outer_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_full_outer_join.sql rename to tests/queries/0_stateless/00974_full_outer_join.sql diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference b/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference rename to tests/queries/0_stateless/00974_live_view_select_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql b/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql rename to tests/queries/0_stateless/00974_live_view_select_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00974_low_cardinality_cast.reference b/tests/queries/0_stateless/00974_low_cardinality_cast.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_low_cardinality_cast.reference rename to tests/queries/0_stateless/00974_low_cardinality_cast.reference diff --git a/dbms/tests/queries/0_stateless/00974_low_cardinality_cast.sql b/tests/queries/0_stateless/00974_low_cardinality_cast.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_low_cardinality_cast.sql rename to tests/queries/0_stateless/00974_low_cardinality_cast.sql diff --git a/dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference rename to tests/queries/0_stateless/00974_primary_key_for_lowCardinality.reference diff --git a/dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh rename to tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh diff --git a/dbms/tests/queries/0_stateless/00974_query_profiler.reference b/tests/queries/0_stateless/00974_query_profiler.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_query_profiler.reference rename to tests/queries/0_stateless/00974_query_profiler.reference diff --git a/dbms/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00974_query_profiler.sql rename to tests/queries/0_stateless/00974_query_profiler.sql diff --git a/dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.reference b/tests/queries/0_stateless/00974_text_log_table_not_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.reference rename to tests/queries/0_stateless/00974_text_log_table_not_empty.reference diff --git a/dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.sh b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00974_text_log_table_not_empty.sh rename to tests/queries/0_stateless/00974_text_log_table_not_empty.sh diff --git a/dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00975_json_hang.reference b/tests/queries/0_stateless/00975_json_hang.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_json_hang.reference rename to tests/queries/0_stateless/00975_json_hang.reference diff --git a/dbms/tests/queries/0_stateless/00975_json_hang.sql b/tests/queries/0_stateless/00975_json_hang.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_json_hang.sql rename to tests/queries/0_stateless/00975_json_hang.sql diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.reference b/tests/queries/0_stateless/00975_live_view_create.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_live_view_create.reference rename to tests/queries/0_stateless/00975_live_view_create.reference diff --git a/dbms/tests/queries/0_stateless/00975_live_view_create.sql b/tests/queries/0_stateless/00975_live_view_create.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_live_view_create.sql rename to tests/queries/0_stateless/00975_live_view_create.sql diff --git a/dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.reference b/tests/queries/0_stateless/00975_move_partition_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.reference rename to tests/queries/0_stateless/00975_move_partition_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.sql b/tests/queries/0_stateless/00975_move_partition_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_move_partition_merge_tree.sql rename to tests/queries/0_stateless/00975_move_partition_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/00975_recursive_materialized_view.reference b/tests/queries/0_stateless/00975_recursive_materialized_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_recursive_materialized_view.reference rename to tests/queries/0_stateless/00975_recursive_materialized_view.reference diff --git a/dbms/tests/queries/0_stateless/00975_recursive_materialized_view.sql b/tests/queries/0_stateless/00975_recursive_materialized_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_recursive_materialized_view.sql rename to tests/queries/0_stateless/00975_recursive_materialized_view.sql diff --git a/dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference b/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.reference rename to tests/queries/0_stateless/00975_sample_prewhere_distributed.reference diff --git a/dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql b/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_sample_prewhere_distributed.sql rename to tests/queries/0_stateless/00975_sample_prewhere_distributed.sql diff --git a/dbms/tests/queries/0_stateless/00975_values_list.reference b/tests/queries/0_stateless/00975_values_list.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00975_values_list.reference rename to tests/queries/0_stateless/00975_values_list.reference diff --git a/dbms/tests/queries/0_stateless/00975_values_list.sql b/tests/queries/0_stateless/00975_values_list.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00975_values_list.sql rename to tests/queries/0_stateless/00975_values_list.sql diff --git a/dbms/tests/queries/0_stateless/00976_asof_join_on.reference b/tests/queries/0_stateless/00976_asof_join_on.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_asof_join_on.reference rename to tests/queries/0_stateless/00976_asof_join_on.reference diff --git a/dbms/tests/queries/0_stateless/00976_asof_join_on.sql b/tests/queries/0_stateless/00976_asof_join_on.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_asof_join_on.sql rename to tests/queries/0_stateless/00976_asof_join_on.sql diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.reference b/tests/queries/0_stateless/00976_live_view_select_version.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_live_view_select_version.reference rename to tests/queries/0_stateless/00976_live_view_select_version.reference diff --git a/dbms/tests/queries/0_stateless/00976_live_view_select_version.sql b/tests/queries/0_stateless/00976_live_view_select_version.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_live_view_select_version.sql rename to tests/queries/0_stateless/00976_live_view_select_version.sql diff --git a/dbms/tests/queries/0_stateless/00976_max_execution_speed.reference b/tests/queries/0_stateless/00976_max_execution_speed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_max_execution_speed.reference rename to tests/queries/0_stateless/00976_max_execution_speed.reference diff --git a/dbms/tests/queries/0_stateless/00976_max_execution_speed.sql b/tests/queries/0_stateless/00976_max_execution_speed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_max_execution_speed.sql rename to tests/queries/0_stateless/00976_max_execution_speed.sql diff --git a/dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference b/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference rename to tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.reference diff --git a/dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql b/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql rename to tests/queries/0_stateless/00976_shard_low_cardinality_achimbab.sql diff --git a/dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference b/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.reference rename to tests/queries/0_stateless/00976_system_stop_ttl_merges.reference diff --git a/dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql b/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql rename to tests/queries/0_stateless/00976_system_stop_ttl_merges.sql diff --git a/dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.reference b/tests/queries/0_stateless/00976_ttl_with_old_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.reference rename to tests/queries/0_stateless/00976_ttl_with_old_parts.reference diff --git a/dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.sql b/tests/queries/0_stateless/00976_ttl_with_old_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00976_ttl_with_old_parts.sql rename to tests/queries/0_stateless/00976_ttl_with_old_parts.sql diff --git a/dbms/tests/queries/0_stateless/00977_int_div.reference b/tests/queries/0_stateless/00977_int_div.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_int_div.reference rename to tests/queries/0_stateless/00977_int_div.reference diff --git a/dbms/tests/queries/0_stateless/00977_int_div.sql b/tests/queries/0_stateless/00977_int_div.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_int_div.sql rename to tests/queries/0_stateless/00977_int_div.sql diff --git a/dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference b/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference rename to tests/queries/0_stateless/00977_join_use_nulls_denny_crane.reference diff --git a/dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql b/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql rename to tests/queries/0_stateless/00977_join_use_nulls_denny_crane.sql diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference b/tests/queries/0_stateless/00977_live_view_watch_events.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00977_live_view_watch_events.reference rename to tests/queries/0_stateless/00977_live_view_watch_events.reference diff --git a/dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql b/tests/queries/0_stateless/00977_live_view_watch_events.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00977_live_view_watch_events.sql rename to tests/queries/0_stateless/00977_live_view_watch_events.sql diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.reference b/tests/queries/0_stateless/00978_live_view_watch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_live_view_watch.reference rename to tests/queries/0_stateless/00978_live_view_watch.reference diff --git a/dbms/tests/queries/0_stateless/00978_live_view_watch.sql b/tests/queries/0_stateless/00978_live_view_watch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_live_view_watch.sql rename to tests/queries/0_stateless/00978_live_view_watch.sql diff --git a/dbms/tests/queries/0_stateless/00978_ml_math.reference b/tests/queries/0_stateless/00978_ml_math.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_ml_math.reference rename to tests/queries/0_stateless/00978_ml_math.reference diff --git a/dbms/tests/queries/0_stateless/00978_ml_math.sql b/tests/queries/0_stateless/00978_ml_math.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_ml_math.sql rename to tests/queries/0_stateless/00978_ml_math.sql diff --git a/dbms/tests/queries/0_stateless/00978_sum_map_bugfix.reference b/tests/queries/0_stateless/00978_sum_map_bugfix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_sum_map_bugfix.reference rename to tests/queries/0_stateless/00978_sum_map_bugfix.reference diff --git a/dbms/tests/queries/0_stateless/00978_sum_map_bugfix.sql b/tests/queries/0_stateless/00978_sum_map_bugfix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_sum_map_bugfix.sql rename to tests/queries/0_stateless/00978_sum_map_bugfix.sql diff --git a/dbms/tests/queries/0_stateless/00978_table_function_values_alias.reference b/tests/queries/0_stateless/00978_table_function_values_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00978_table_function_values_alias.reference rename to tests/queries/0_stateless/00978_table_function_values_alias.reference diff --git a/dbms/tests/queries/0_stateless/00978_table_function_values_alias.sql b/tests/queries/0_stateless/00978_table_function_values_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00978_table_function_values_alias.sql rename to tests/queries/0_stateless/00978_table_function_values_alias.sql diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.py b/tests/queries/0_stateless/00979_live_view_watch_live.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live.py rename to tests/queries/0_stateless/00979_live_view_watch_live.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference b/tests/queries/0_stateless/00979_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live.reference rename to tests/queries/0_stateless/00979_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py rename to tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference b/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference rename to tests/queries/0_stateless/00979_live_view_watch_live_moving_avg.reference diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py rename to tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py diff --git a/dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference rename to tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference b/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference rename to tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.reference diff --git a/dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql b/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql rename to tests/queries/0_stateless/00979_quantileExcatExclusive_and_Inclusive.sql diff --git a/dbms/tests/queries/0_stateless/00979_set_index_not.reference b/tests/queries/0_stateless/00979_set_index_not.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_set_index_not.reference rename to tests/queries/0_stateless/00979_set_index_not.reference diff --git a/dbms/tests/queries/0_stateless/00979_set_index_not.sql b/tests/queries/0_stateless/00979_set_index_not.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_set_index_not.sql rename to tests/queries/0_stateless/00979_set_index_not.sql diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference b/tests/queries/0_stateless/00979_toFloat_monotonicity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference rename to tests/queries/0_stateless/00979_toFloat_monotonicity.reference diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql b/tests/queries/0_stateless/00979_toFloat_monotonicity.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql rename to tests/queries/0_stateless/00979_toFloat_monotonicity.sql diff --git a/dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference rename to tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.reference diff --git a/dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql rename to tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql diff --git a/dbms/tests/queries/0_stateless/00980_alter_settings_race.reference b/tests/queries/0_stateless/00980_alter_settings_race.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_alter_settings_race.reference rename to tests/queries/0_stateless/00980_alter_settings_race.reference diff --git a/dbms/tests/queries/0_stateless/00980_alter_settings_race.sh b/tests/queries/0_stateless/00980_alter_settings_race.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00980_alter_settings_race.sh rename to tests/queries/0_stateless/00980_alter_settings_race.sh diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference b/tests/queries/0_stateless/00980_crash_nullable_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference rename to tests/queries/0_stateless/00980_crash_nullable_decimal.reference diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql b/tests/queries/0_stateless/00980_crash_nullable_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql rename to tests/queries/0_stateless/00980_crash_nullable_decimal.sql diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference b/tests/queries/0_stateless/00980_create_temporary_live_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_create_temporary_live_view.reference rename to tests/queries/0_stateless/00980_create_temporary_live_view.reference diff --git a/dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql b/tests/queries/0_stateless/00980_create_temporary_live_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_create_temporary_live_view.sql rename to tests/queries/0_stateless/00980_create_temporary_live_view.sql diff --git a/dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference b/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference rename to tests/queries/0_stateless/00980_full_join_crash_fancyqlx.reference diff --git a/dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql b/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql rename to tests/queries/0_stateless/00980_full_join_crash_fancyqlx.sql diff --git a/dbms/tests/queries/0_stateless/00980_merge_alter_settings.reference b/tests/queries/0_stateless/00980_merge_alter_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_merge_alter_settings.reference rename to tests/queries/0_stateless/00980_merge_alter_settings.reference diff --git a/dbms/tests/queries/0_stateless/00980_merge_alter_settings.sql b/tests/queries/0_stateless/00980_merge_alter_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_merge_alter_settings.sql rename to tests/queries/0_stateless/00980_merge_alter_settings.sql diff --git a/dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference b/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference rename to tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.reference diff --git a/dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql b/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql rename to tests/queries/0_stateless/00980_shard_aggregation_state_deserialization.sql diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference b/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference rename to tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql b/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql rename to tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql diff --git a/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference rename to tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference diff --git a/dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql rename to tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql diff --git a/dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference b/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.reference rename to tests/queries/0_stateless/00981_in_subquery_with_tuple.reference diff --git a/dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh b/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh rename to tests/queries/0_stateless/00981_in_subquery_with_tuple.sh diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns.reference b/tests/queries/0_stateless/00981_no_virtual_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_no_virtual_columns.reference rename to tests/queries/0_stateless/00981_no_virtual_columns.reference diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns.sql b/tests/queries/0_stateless/00981_no_virtual_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00981_no_virtual_columns.sql rename to tests/queries/0_stateless/00981_no_virtual_columns.sql diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference b/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.reference rename to tests/queries/0_stateless/00981_topK_topKWeighted_long.reference diff --git a/dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql b/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00981_topK_topKWeighted_long.sql rename to tests/queries/0_stateless/00981_topK_topKWeighted_long.sql diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference b/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference rename to tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.reference diff --git a/dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql b/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql rename to tests/queries/0_stateless/00982_array_enumerate_uniq_ranked.sql diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference rename to tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql rename to tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference b/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference rename to tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql b/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql rename to tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference b/tests/queries/0_stateless/00984_materialized_view_to_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.reference rename to tests/queries/0_stateless/00984_materialized_view_to_columns.reference diff --git a/dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql b/tests/queries/0_stateless/00984_materialized_view_to_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00984_materialized_view_to_columns.sql rename to tests/queries/0_stateless/00984_materialized_view_to_columns.sql diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/tests/queries/0_stateless/00984_parser_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00984_parser_stack_overflow.reference rename to tests/queries/0_stateless/00984_parser_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/tests/queries/0_stateless/00984_parser_stack_overflow.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00984_parser_stack_overflow.sh rename to tests/queries/0_stateless/00984_parser_stack_overflow.sh diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference b/tests/queries/0_stateless/00985_merge_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00985_merge_stack_overflow.reference rename to tests/queries/0_stateless/00985_merge_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql b/tests/queries/0_stateless/00985_merge_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00985_merge_stack_overflow.sql rename to tests/queries/0_stateless/00985_merge_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference b/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference rename to tests/queries/0_stateless/00986_materialized_view_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql b/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql rename to tests/queries/0_stateless/00986_materialized_view_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference b/tests/queries/0_stateless/00987_distributed_stack_overflow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.reference rename to tests/queries/0_stateless/00987_distributed_stack_overflow.reference diff --git a/dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql b/tests/queries/0_stateless/00987_distributed_stack_overflow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00987_distributed_stack_overflow.sql rename to tests/queries/0_stateless/00987_distributed_stack_overflow.sql diff --git a/dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference b/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql b/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference b/tests/queries/0_stateless/00988_expansion_aliases_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.reference rename to tests/queries/0_stateless/00988_expansion_aliases_limit.reference diff --git a/dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql b/tests/queries/0_stateless/00988_expansion_aliases_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_expansion_aliases_limit.sql rename to tests/queries/0_stateless/00988_expansion_aliases_limit.sql diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference b/tests/queries/0_stateless/00988_parallel_parts_removal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00988_parallel_parts_removal.reference rename to tests/queries/0_stateless/00988_parallel_parts_removal.reference diff --git a/dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00988_parallel_parts_removal.sql rename to tests/queries/0_stateless/00988_parallel_parts_removal.sql diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference b/tests/queries/0_stateless/00989_parallel_parts_loading.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00989_parallel_parts_loading.reference rename to tests/queries/0_stateless/00989_parallel_parts_loading.reference diff --git a/dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql b/tests/queries/0_stateless/00989_parallel_parts_loading.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00989_parallel_parts_loading.sql rename to tests/queries/0_stateless/00989_parallel_parts_loading.sql diff --git a/dbms/tests/queries/0_stateless/00990_function_current_user.reference b/tests/queries/0_stateless/00990_function_current_user.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_function_current_user.reference rename to tests/queries/0_stateless/00990_function_current_user.reference diff --git a/dbms/tests/queries/0_stateless/00990_function_current_user.sql b/tests/queries/0_stateless/00990_function_current_user.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_function_current_user.sql rename to tests/queries/0_stateless/00990_function_current_user.sql diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.python b/tests/queries/0_stateless/00990_hasToken.python similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken.python rename to tests/queries/0_stateless/00990_hasToken.python diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.reference b/tests/queries/0_stateless/00990_hasToken.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken.reference rename to tests/queries/0_stateless/00990_hasToken.reference diff --git a/dbms/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken.sh rename to tests/queries/0_stateless/00990_hasToken.sh diff --git a/dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference b/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference rename to tests/queries/0_stateless/00990_hasToken_and_tokenbf.reference diff --git a/dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql b/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql rename to tests/queries/0_stateless/00990_hasToken_and_tokenbf.sql diff --git a/dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference b/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.reference rename to tests/queries/0_stateless/00990_metric_log_table_not_empty.reference diff --git a/dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql b/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_metric_log_table_not_empty.sql rename to tests/queries/0_stateless/00990_metric_log_table_not_empty.sql diff --git a/dbms/tests/queries/0_stateless/00990_request_splitting.reference b/tests/queries/0_stateless/00990_request_splitting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00990_request_splitting.reference rename to tests/queries/0_stateless/00990_request_splitting.reference diff --git a/dbms/tests/queries/0_stateless/00990_request_splitting.sql b/tests/queries/0_stateless/00990_request_splitting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00990_request_splitting.sql rename to tests/queries/0_stateless/00990_request_splitting.sql diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python b/tests/queries/0_stateless/00991_live_view_watch_event_live.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.python rename to tests/queries/0_stateless/00991_live_view_watch_event_live.python diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference b/tests/queries/0_stateless/00991_live_view_watch_event_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.reference rename to tests/queries/0_stateless/00991_live_view_watch_event_live.reference diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled rename to tests/queries/0_stateless/00991_live_view_watch_event_live.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.python b/tests/queries/0_stateless/00991_live_view_watch_http.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.python rename to tests/queries/0_stateless/00991_live_view_watch_http.python diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference b/tests/queries/0_stateless/00991_live_view_watch_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.reference rename to tests/queries/0_stateless/00991_live_view_watch_http.reference diff --git a/dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled b/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled rename to tests/queries/0_stateless/00991_live_view_watch_http.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00991_system_parts_race_condition.reference b/tests/queries/0_stateless/00991_system_parts_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_system_parts_race_condition.reference rename to tests/queries/0_stateless/00991_system_parts_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/00991_system_parts_race_condition.sh b/tests/queries/0_stateless/00991_system_parts_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00991_system_parts_race_condition.sh rename to tests/queries/0_stateless/00991_system_parts_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.python diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.reference diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled rename to tests/queries/0_stateless/00991_temporary_live_view_watch_events_heartbeat.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.python rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.python diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.reference diff --git a/dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled b/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled similarity index 100% rename from dbms/tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled rename to tests/queries/0_stateless/00991_temporary_live_view_watch_live.sh.disabled diff --git a/dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference rename to tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh rename to tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.reference b/tests/queries/0_stateless/00994_table_function_numbers_mt.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.reference rename to tests/queries/0_stateless/00994_table_function_numbers_mt.reference diff --git a/dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.sql b/tests/queries/0_stateless/00994_table_function_numbers_mt.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00994_table_function_numbers_mt.sql rename to tests/queries/0_stateless/00994_table_function_numbers_mt.sql diff --git a/dbms/tests/queries/0_stateless/00995_exception_while_insert.reference b/tests/queries/0_stateless/00995_exception_while_insert.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_exception_while_insert.reference rename to tests/queries/0_stateless/00995_exception_while_insert.reference diff --git a/dbms/tests/queries/0_stateless/00995_exception_while_insert.sh b/tests/queries/0_stateless/00995_exception_while_insert.sh similarity index 100% rename from dbms/tests/queries/0_stateless/00995_exception_while_insert.sh rename to tests/queries/0_stateless/00995_exception_while_insert.sh diff --git a/dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference b/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference rename to tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql b/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql rename to tests/queries/0_stateless/00995_optimize_read_in_order_with_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/00995_order_by_with_fill.reference b/tests/queries/0_stateless/00995_order_by_with_fill.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00995_order_by_with_fill.reference rename to tests/queries/0_stateless/00995_order_by_with_fill.reference diff --git a/dbms/tests/queries/0_stateless/00995_order_by_with_fill.sql b/tests/queries/0_stateless/00995_order_by_with_fill.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00995_order_by_with_fill.sql rename to tests/queries/0_stateless/00995_order_by_with_fill.sql diff --git a/dbms/tests/queries/0_stateless/00996_limit_with_ties.reference b/tests/queries/0_stateless/00996_limit_with_ties.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00996_limit_with_ties.reference rename to tests/queries/0_stateless/00996_limit_with_ties.reference diff --git a/dbms/tests/queries/0_stateless/00996_limit_with_ties.sql b/tests/queries/0_stateless/00996_limit_with_ties.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00996_limit_with_ties.sql rename to tests/queries/0_stateless/00996_limit_with_ties.sql diff --git a/dbms/tests/queries/0_stateless/00996_neighbor.reference b/tests/queries/0_stateless/00996_neighbor.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00996_neighbor.reference rename to tests/queries/0_stateless/00996_neighbor.reference diff --git a/dbms/tests/queries/0_stateless/00996_neighbor.sql b/tests/queries/0_stateless/00996_neighbor.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00996_neighbor.sql rename to tests/queries/0_stateless/00996_neighbor.sql diff --git a/dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.reference b/tests/queries/0_stateless/00997_extract_all_crash_6627.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.reference rename to tests/queries/0_stateless/00997_extract_all_crash_6627.reference diff --git a/dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.sql b/tests/queries/0_stateless/00997_extract_all_crash_6627.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_extract_all_crash_6627.sql rename to tests/queries/0_stateless/00997_extract_all_crash_6627.sql diff --git a/dbms/tests/queries/0_stateless/00997_set_index_array.reference b/tests/queries/0_stateless/00997_set_index_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_set_index_array.reference rename to tests/queries/0_stateless/00997_set_index_array.reference diff --git a/dbms/tests/queries/0_stateless/00997_set_index_array.sql b/tests/queries/0_stateless/00997_set_index_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_set_index_array.sql rename to tests/queries/0_stateless/00997_set_index_array.sql diff --git a/dbms/tests/queries/0_stateless/00997_trim.reference b/tests/queries/0_stateless/00997_trim.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00997_trim.reference rename to tests/queries/0_stateless/00997_trim.reference diff --git a/dbms/tests/queries/0_stateless/00997_trim.sql b/tests/queries/0_stateless/00997_trim.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00997_trim.sql rename to tests/queries/0_stateless/00997_trim.sql diff --git a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.reference b/tests/queries/0_stateless/00998_constraints_all_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00998_constraints_all_tables.reference rename to tests/queries/0_stateless/00998_constraints_all_tables.reference diff --git a/dbms/tests/queries/0_stateless/00998_constraints_all_tables.sql b/tests/queries/0_stateless/00998_constraints_all_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00998_constraints_all_tables.sql rename to tests/queries/0_stateless/00998_constraints_all_tables.sql diff --git a/dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference b/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference rename to tests/queries/0_stateless/00999_full_join_dup_keys_crash.reference diff --git a/dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql b/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql rename to tests/queries/0_stateless/00999_full_join_dup_keys_crash.sql diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference b/tests/queries/0_stateless/00999_join_not_nullable_types.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_not_nullable_types.reference rename to tests/queries/0_stateless/00999_join_not_nullable_types.reference diff --git a/dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql b/tests/queries/0_stateless/00999_join_not_nullable_types.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_not_nullable_types.sql rename to tests/queries/0_stateless/00999_join_not_nullable_types.sql diff --git a/dbms/tests/queries/0_stateless/00999_join_on_expression.reference b/tests/queries/0_stateless/00999_join_on_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_on_expression.reference rename to tests/queries/0_stateless/00999_join_on_expression.reference diff --git a/dbms/tests/queries/0_stateless/00999_join_on_expression.sql b/tests/queries/0_stateless/00999_join_on_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_join_on_expression.sql rename to tests/queries/0_stateless/00999_join_on_expression.sql diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference b/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.reference rename to tests/queries/0_stateless/00999_nullable_nested_types_4877.reference diff --git a/dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql b/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_nullable_nested_types_4877.sql rename to tests/queries/0_stateless/00999_nullable_nested_types_4877.sql diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference b/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.reference rename to tests/queries/0_stateless/00999_settings_no_extra_quotes.reference diff --git a/dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql b/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_settings_no_extra_quotes.sql rename to tests/queries/0_stateless/00999_settings_no_extra_quotes.sql diff --git a/dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference b/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference similarity index 100% rename from dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference rename to tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.reference diff --git a/dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql b/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql similarity index 100% rename from dbms/tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql rename to tests/queries/0_stateless/00999_test_skip_indices_with_alter_and_merge.sql diff --git a/dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference b/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference rename to tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.reference diff --git a/dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql b/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql rename to tests/queries/0_stateless/01000_bad_size_of_marks_skip_idx.sql diff --git a/dbms/tests/queries/0_stateless/01000_subquery_requires_alias.reference b/tests/queries/0_stateless/01000_subquery_requires_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_subquery_requires_alias.reference rename to tests/queries/0_stateless/01000_subquery_requires_alias.reference diff --git a/dbms/tests/queries/0_stateless/01000_subquery_requires_alias.sql b/tests/queries/0_stateless/01000_subquery_requires_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01000_subquery_requires_alias.sql rename to tests/queries/0_stateless/01000_subquery_requires_alias.sql diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference b/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.reference rename to tests/queries/0_stateless/01000_unneeded_substitutions_client.reference diff --git a/dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh b/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh rename to tests/queries/0_stateless/01000_unneeded_substitutions_client.sh diff --git a/dbms/tests/queries/0_stateless/01001_enums_in_in_section.reference b/tests/queries/0_stateless/01001_enums_in_in_section.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01001_enums_in_in_section.reference rename to tests/queries/0_stateless/01001_enums_in_in_section.reference diff --git a/dbms/tests/queries/0_stateless/01001_enums_in_in_section.sql b/tests/queries/0_stateless/01001_enums_in_in_section.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01001_enums_in_in_section.sql rename to tests/queries/0_stateless/01001_enums_in_in_section.sql diff --git a/dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.reference b/tests/queries/0_stateless/01001_rename_merge_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.reference rename to tests/queries/0_stateless/01001_rename_merge_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.sh b/tests/queries/0_stateless/01001_rename_merge_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01001_rename_merge_race_condition.sh rename to tests/queries/0_stateless/01001_rename_merge_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference rename to tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.reference diff --git a/dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh rename to tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh diff --git a/dbms/tests/queries/0_stateless/01003_kill_query_race_condition.reference b/tests/queries/0_stateless/01003_kill_query_race_condition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01003_kill_query_race_condition.reference rename to tests/queries/0_stateless/01003_kill_query_race_condition.reference diff --git a/dbms/tests/queries/0_stateless/01003_kill_query_race_condition.sh b/tests/queries/0_stateless/01003_kill_query_race_condition.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01003_kill_query_race_condition.sh rename to tests/queries/0_stateless/01003_kill_query_race_condition.sh diff --git a/dbms/tests/queries/0_stateless/01004_rename_deadlock.reference b/tests/queries/0_stateless/01004_rename_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01004_rename_deadlock.reference rename to tests/queries/0_stateless/01004_rename_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01004_rename_deadlock.sh b/tests/queries/0_stateless/01004_rename_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01004_rename_deadlock.sh rename to tests/queries/0_stateless/01004_rename_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference b/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.reference rename to tests/queries/0_stateless/01005_rwr_shard_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh rename to tests/queries/0_stateless/01005_rwr_shard_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference rename to tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.reference diff --git a/dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh rename to tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh diff --git a/dbms/tests/queries/0_stateless/01006_ttl_with_default_2.reference b/tests/queries/0_stateless/01006_ttl_with_default_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01006_ttl_with_default_2.reference rename to tests/queries/0_stateless/01006_ttl_with_default_2.reference diff --git a/dbms/tests/queries/0_stateless/01006_ttl_with_default_2.sql b/tests/queries/0_stateless/01006_ttl_with_default_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01006_ttl_with_default_2.sql rename to tests/queries/0_stateless/01006_ttl_with_default_2.sql diff --git a/dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference rename to tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh rename to tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference b/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference rename to tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.reference diff --git a/dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql b/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql rename to tests/queries/0_stateless/01008_materialized_view_henyihanwobushi.sql diff --git a/dbms/tests/queries/0_stateless/01009_global_array_join_names.reference b/tests/queries/0_stateless/01009_global_array_join_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_global_array_join_names.reference rename to tests/queries/0_stateless/01009_global_array_join_names.reference diff --git a/dbms/tests/queries/0_stateless/01009_global_array_join_names.sql b/tests/queries/0_stateless/01009_global_array_join_names.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_global_array_join_names.sql rename to tests/queries/0_stateless/01009_global_array_join_names.sql diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.reference b/tests/queries/0_stateless/01009_insert_select_data_loss.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_data_loss.reference rename to tests/queries/0_stateless/01009_insert_select_data_loss.reference diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql b/tests/queries/0_stateless/01009_insert_select_data_loss.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_data_loss.sql rename to tests/queries/0_stateless/01009_insert_select_data_loss.sql diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.reference b/tests/queries/0_stateless/01009_insert_select_nicelulu.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.reference rename to tests/queries/0_stateless/01009_insert_select_nicelulu.reference diff --git a/dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql b/tests/queries/0_stateless/01009_insert_select_nicelulu.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01009_insert_select_nicelulu.sql rename to tests/queries/0_stateless/01009_insert_select_nicelulu.sql diff --git a/dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference rename to tests/queries/0_stateless/01010_low_cardinality_and_native_http.reference diff --git a/dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh rename to tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join.reference b/tests/queries/0_stateless/01010_partial_merge_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join.reference rename to tests/queries/0_stateless/01010_partial_merge_join.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join.sql b/tests/queries/0_stateless/01010_partial_merge_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join.sql rename to tests/queries/0_stateless/01010_partial_merge_join.sql diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference b/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference rename to tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql b/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql rename to tests/queries/0_stateless/01010_partial_merge_join_const_and_lc.sql diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.reference b/tests/queries/0_stateless/01010_partial_merge_join_negative.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.reference rename to tests/queries/0_stateless/01010_partial_merge_join_negative.reference diff --git a/dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.sql b/tests/queries/0_stateless/01010_partial_merge_join_negative.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_partial_merge_join_negative.sql rename to tests/queries/0_stateless/01010_partial_merge_join_negative.sql diff --git a/dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference b/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.reference rename to tests/queries/0_stateless/01010_pm_join_all_join_bug.reference diff --git a/dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql b/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pm_join_all_join_bug.sql rename to tests/queries/0_stateless/01010_pm_join_all_join_bug.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_on_disk.reference b/tests/queries/0_stateless/01010_pmj_on_disk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_on_disk.reference rename to tests/queries/0_stateless/01010_pmj_on_disk.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_on_disk.sql b/tests/queries/0_stateless/01010_pmj_on_disk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_on_disk.sql rename to tests/queries/0_stateless/01010_pmj_on_disk.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference b/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.reference rename to tests/queries/0_stateless/01010_pmj_one_row_blocks.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql b/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_one_row_blocks.sql rename to tests/queries/0_stateless/01010_pmj_one_row_blocks.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference rename to tests/queries/0_stateless/01010_pmj_right_table_memory_limits.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql rename to tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql diff --git a/dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.reference b/tests/queries/0_stateless/01010_pmj_skip_blocks.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.reference rename to tests/queries/0_stateless/01010_pmj_skip_blocks.reference diff --git a/dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.sql b/tests/queries/0_stateless/01010_pmj_skip_blocks.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01010_pmj_skip_blocks.sql rename to tests/queries/0_stateless/01010_pmj_skip_blocks.sql diff --git a/dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference b/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.reference rename to tests/queries/0_stateless/01011_group_uniq_array_memsan.reference diff --git a/dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql b/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01011_group_uniq_array_memsan.sql rename to tests/queries/0_stateless/01011_group_uniq_array_memsan.sql diff --git a/dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference b/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.reference rename to tests/queries/0_stateless/01011_test_create_as_skip_indices.reference diff --git a/dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql b/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01011_test_create_as_skip_indices.sql rename to tests/queries/0_stateless/01011_test_create_as_skip_indices.sql diff --git a/dbms/tests/queries/0_stateless/01012_reset_running_accumulate.reference b/tests/queries/0_stateless/01012_reset_running_accumulate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_reset_running_accumulate.reference rename to tests/queries/0_stateless/01012_reset_running_accumulate.reference diff --git a/dbms/tests/queries/0_stateless/01012_reset_running_accumulate.sql b/tests/queries/0_stateless/01012_reset_running_accumulate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_reset_running_accumulate.sql rename to tests/queries/0_stateless/01012_reset_running_accumulate.sql diff --git a/dbms/tests/queries/0_stateless/01012_select_limit_x_0.reference b/tests/queries/0_stateless/01012_select_limit_x_0.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_select_limit_x_0.reference rename to tests/queries/0_stateless/01012_select_limit_x_0.reference diff --git a/dbms/tests/queries/0_stateless/01012_select_limit_x_0.sql b/tests/queries/0_stateless/01012_select_limit_x_0.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_select_limit_x_0.sql rename to tests/queries/0_stateless/01012_select_limit_x_0.sql diff --git a/dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference b/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.reference rename to tests/queries/0_stateless/01012_serialize_array_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql b/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_serialize_array_memory_usage.sql rename to tests/queries/0_stateless/01012_serialize_array_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01012_show_tables_limit.reference b/tests/queries/0_stateless/01012_show_tables_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01012_show_tables_limit.reference rename to tests/queries/0_stateless/01012_show_tables_limit.reference diff --git a/dbms/tests/queries/0_stateless/01012_show_tables_limit.sql b/tests/queries/0_stateless/01012_show_tables_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01012_show_tables_limit.sql rename to tests/queries/0_stateless/01012_show_tables_limit.sql diff --git a/dbms/tests/queries/0_stateless/01013_hex_decimal.reference b/tests/queries/0_stateless/01013_hex_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_decimal.reference rename to tests/queries/0_stateless/01013_hex_decimal.reference diff --git a/dbms/tests/queries/0_stateless/01013_hex_decimal.sql b/tests/queries/0_stateless/01013_hex_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_decimal.sql rename to tests/queries/0_stateless/01013_hex_decimal.sql diff --git a/dbms/tests/queries/0_stateless/01013_hex_float.reference b/tests/queries/0_stateless/01013_hex_float.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_float.reference rename to tests/queries/0_stateless/01013_hex_float.reference diff --git a/dbms/tests/queries/0_stateless/01013_hex_float.sql b/tests/queries/0_stateless/01013_hex_float.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_hex_float.sql rename to tests/queries/0_stateless/01013_hex_float.sql diff --git a/dbms/tests/queries/0_stateless/01013_repeat_function.reference b/tests/queries/0_stateless/01013_repeat_function.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_repeat_function.reference rename to tests/queries/0_stateless/01013_repeat_function.reference diff --git a/dbms/tests/queries/0_stateless/01013_repeat_function.sql b/tests/queries/0_stateless/01013_repeat_function.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_repeat_function.sql rename to tests/queries/0_stateless/01013_repeat_function.sql diff --git a/dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference rename to tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh rename to tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01013_totals_without_aggregation.reference b/tests/queries/0_stateless/01013_totals_without_aggregation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01013_totals_without_aggregation.reference rename to tests/queries/0_stateless/01013_totals_without_aggregation.reference diff --git a/dbms/tests/queries/0_stateless/01013_totals_without_aggregation.sql b/tests/queries/0_stateless/01013_totals_without_aggregation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01013_totals_without_aggregation.sql rename to tests/queries/0_stateless/01013_totals_without_aggregation.sql diff --git a/dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.reference b/tests/queries/0_stateless/01014_count_of_merges_metrics.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.reference rename to tests/queries/0_stateless/01014_count_of_merges_metrics.reference diff --git a/dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.sql b/tests/queries/0_stateless/01014_count_of_merges_metrics.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01014_count_of_merges_metrics.sql rename to tests/queries/0_stateless/01014_count_of_merges_metrics.sql diff --git a/dbms/tests/queries/0_stateless/01014_format_custom_separated.reference b/tests/queries/0_stateless/01014_format_custom_separated.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_format_custom_separated.reference rename to tests/queries/0_stateless/01014_format_custom_separated.reference diff --git a/dbms/tests/queries/0_stateless/01014_format_custom_separated.sh b/tests/queries/0_stateless/01014_format_custom_separated.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01014_format_custom_separated.sh rename to tests/queries/0_stateless/01014_format_custom_separated.sh diff --git a/dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference b/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.reference rename to tests/queries/0_stateless/01014_function_repeat_corner_cases.reference diff --git a/dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql b/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01014_function_repeat_corner_cases.sql rename to tests/queries/0_stateless/01014_function_repeat_corner_cases.sql diff --git a/dbms/tests/queries/0_stateless/01014_lazy_database_basic.reference b/tests/queries/0_stateless/01014_lazy_database_basic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01014_lazy_database_basic.reference rename to tests/queries/0_stateless/01014_lazy_database_basic.reference diff --git a/dbms/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01014_lazy_database_basic.sh rename to tests/queries/0_stateless/01014_lazy_database_basic.sh diff --git a/dbms/tests/queries/0_stateless/01015_array_split.reference b/tests/queries/0_stateless/01015_array_split.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_array_split.reference rename to tests/queries/0_stateless/01015_array_split.reference diff --git a/dbms/tests/queries/0_stateless/01015_array_split.sql b/tests/queries/0_stateless/01015_array_split.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_array_split.sql rename to tests/queries/0_stateless/01015_array_split.sql diff --git a/dbms/tests/queries/0_stateless/01015_attach_part.reference b/tests/queries/0_stateless/01015_attach_part.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_attach_part.reference rename to tests/queries/0_stateless/01015_attach_part.reference diff --git a/dbms/tests/queries/0_stateless/01015_attach_part.sql b/tests/queries/0_stateless/01015_attach_part.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_attach_part.sql rename to tests/queries/0_stateless/01015_attach_part.sql diff --git a/dbms/tests/queries/0_stateless/01015_database_bad_tables.reference b/tests/queries/0_stateless/01015_database_bad_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_database_bad_tables.reference rename to tests/queries/0_stateless/01015_database_bad_tables.reference diff --git a/dbms/tests/queries/0_stateless/01015_database_bad_tables.sql b/tests/queries/0_stateless/01015_database_bad_tables.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_database_bad_tables.sql rename to tests/queries/0_stateless/01015_database_bad_tables.sql diff --git a/dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference b/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.reference rename to tests/queries/0_stateless/01015_empty_in_inner_right_join.reference diff --git a/dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql rename to tests/queries/0_stateless/01015_empty_in_inner_right_join.sql diff --git a/dbms/tests/queries/0_stateless/01015_insert_values_parametrized.reference b/tests/queries/0_stateless/01015_insert_values_parametrized.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_insert_values_parametrized.reference rename to tests/queries/0_stateless/01015_insert_values_parametrized.reference diff --git a/dbms/tests/queries/0_stateless/01015_insert_values_parametrized.sh b/tests/queries/0_stateless/01015_insert_values_parametrized.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01015_insert_values_parametrized.sh rename to tests/queries/0_stateless/01015_insert_values_parametrized.sh diff --git a/dbms/tests/queries/0_stateless/01015_random_constant.reference b/tests/queries/0_stateless/01015_random_constant.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01015_random_constant.reference rename to tests/queries/0_stateless/01015_random_constant.reference diff --git a/dbms/tests/queries/0_stateless/01015_random_constant.sql b/tests/queries/0_stateless/01015_random_constant.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01015_random_constant.sql rename to tests/queries/0_stateless/01015_random_constant.sql diff --git a/dbms/tests/queries/0_stateless/01016_index_tuple_field_type.reference b/tests/queries/0_stateless/01016_index_tuple_field_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_index_tuple_field_type.reference rename to tests/queries/0_stateless/01016_index_tuple_field_type.reference diff --git a/dbms/tests/queries/0_stateless/01016_index_tuple_field_type.sql b/tests/queries/0_stateless/01016_index_tuple_field_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_index_tuple_field_type.sql rename to tests/queries/0_stateless/01016_index_tuple_field_type.sql diff --git a/dbms/tests/queries/0_stateless/01016_input_null_as_default.reference b/tests/queries/0_stateless/01016_input_null_as_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_input_null_as_default.reference rename to tests/queries/0_stateless/01016_input_null_as_default.reference diff --git a/dbms/tests/queries/0_stateless/01016_input_null_as_default.sh b/tests/queries/0_stateless/01016_input_null_as_default.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01016_input_null_as_default.sh rename to tests/queries/0_stateless/01016_input_null_as_default.sh diff --git a/dbms/tests/queries/0_stateless/01016_macros.reference b/tests/queries/0_stateless/01016_macros.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_macros.reference rename to tests/queries/0_stateless/01016_macros.reference diff --git a/dbms/tests/queries/0_stateless/01016_macros.sql b/tests/queries/0_stateless/01016_macros.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_macros.sql rename to tests/queries/0_stateless/01016_macros.sql diff --git a/dbms/tests/queries/0_stateless/01016_null_part_minmax.reference b/tests/queries/0_stateless/01016_null_part_minmax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_null_part_minmax.reference rename to tests/queries/0_stateless/01016_null_part_minmax.reference diff --git a/dbms/tests/queries/0_stateless/01016_null_part_minmax.sql b/tests/queries/0_stateless/01016_null_part_minmax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_null_part_minmax.sql rename to tests/queries/0_stateless/01016_null_part_minmax.sql diff --git a/dbms/tests/queries/0_stateless/01016_uniqCombined64.reference b/tests/queries/0_stateless/01016_uniqCombined64.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01016_uniqCombined64.reference rename to tests/queries/0_stateless/01016_uniqCombined64.reference diff --git a/dbms/tests/queries/0_stateless/01016_uniqCombined64.sql b/tests/queries/0_stateless/01016_uniqCombined64.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01016_uniqCombined64.sql rename to tests/queries/0_stateless/01016_uniqCombined64.sql diff --git a/dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference b/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference rename to tests/queries/0_stateless/01017_in_unconvertible_complex_type.reference diff --git a/dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql b/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql rename to tests/queries/0_stateless/01017_in_unconvertible_complex_type.sql diff --git a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference rename to tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh rename to tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.reference b/tests/queries/0_stateless/01017_tsv_empty_as_default.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.reference rename to tests/queries/0_stateless/01017_tsv_empty_as_default.reference diff --git a/dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.sh b/tests/queries/0_stateless/01017_tsv_empty_as_default.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01017_tsv_empty_as_default.sh rename to tests/queries/0_stateless/01017_tsv_empty_as_default.sh diff --git a/dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference rename to tests/queries/0_stateless/01017_uniqCombined_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql rename to tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01018_Distributed__shard_num.reference b/tests/queries/0_stateless/01018_Distributed__shard_num.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_Distributed__shard_num.reference rename to tests/queries/0_stateless/01018_Distributed__shard_num.reference diff --git a/dbms/tests/queries/0_stateless/01018_Distributed__shard_num.sql b/tests/queries/0_stateless/01018_Distributed__shard_num.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_Distributed__shard_num.sql rename to tests/queries/0_stateless/01018_Distributed__shard_num.sql diff --git a/dbms/tests/queries/0_stateless/01018_ambiguous_column.reference b/tests/queries/0_stateless/01018_ambiguous_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ambiguous_column.reference rename to tests/queries/0_stateless/01018_ambiguous_column.reference diff --git a/dbms/tests/queries/0_stateless/01018_ambiguous_column.sql b/tests/queries/0_stateless/01018_ambiguous_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ambiguous_column.sql rename to tests/queries/0_stateless/01018_ambiguous_column.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh rename to tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh rename to tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_create.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_create.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_select.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_select.sql diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.reference rename to tests/queries/0_stateless/01018_ddl_dictionaries_special.reference diff --git a/dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_ddl_dictionaries_special.sql rename to tests/queries/0_stateless/01018_ddl_dictionaries_special.sql diff --git a/dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference rename to tests/queries/0_stateless/01018_dictionaries_from_dictionaries.reference diff --git a/dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql rename to tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql diff --git a/dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.reference b/tests/queries/0_stateless/01018_empty_aggregation_filling.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.reference rename to tests/queries/0_stateless/01018_empty_aggregation_filling.reference diff --git a/dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.sql b/tests/queries/0_stateless/01018_empty_aggregation_filling.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_empty_aggregation_filling.sql rename to tests/queries/0_stateless/01018_empty_aggregation_filling.sql diff --git a/dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference rename to tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.reference diff --git a/dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh rename to tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh diff --git a/dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference b/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference rename to tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.reference diff --git a/dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql b/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql rename to tests/queries/0_stateless/01018_optimize_read_in_order_with_in_subquery.sql diff --git a/dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference rename to tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.reference diff --git a/dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql rename to tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_atomic.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh rename to tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_consistent.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh b/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh rename to tests/queries/0_stateless/01019_alter_materialized_view_consistent.sh diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.reference b/tests/queries/0_stateless/01019_alter_materialized_view_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.reference rename to tests/queries/0_stateless/01019_alter_materialized_view_query.reference diff --git a/dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.sql b/tests/queries/0_stateless/01019_alter_materialized_view_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_alter_materialized_view_query.sql rename to tests/queries/0_stateless/01019_alter_materialized_view_query.sql diff --git a/dbms/tests/queries/0_stateless/01019_array_fill.reference b/tests/queries/0_stateless/01019_array_fill.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_array_fill.reference rename to tests/queries/0_stateless/01019_array_fill.reference diff --git a/dbms/tests/queries/0_stateless/01019_array_fill.sql b/tests/queries/0_stateless/01019_array_fill.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_array_fill.sql rename to tests/queries/0_stateless/01019_array_fill.sql diff --git a/dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference b/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference rename to tests/queries/0_stateless/01019_materialized_view_select_extra_columns.reference diff --git a/dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql b/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql rename to tests/queries/0_stateless/01019_materialized_view_select_extra_columns.sql diff --git a/dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference b/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.reference rename to tests/queries/0_stateless/01019_parallel_parsing_cancel.reference diff --git a/dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh b/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01019_parallel_parsing_cancel.sh rename to tests/queries/0_stateless/01019_parallel_parsing_cancel.sh diff --git a/dbms/tests/queries/0_stateless/01020_function_array_compact.reference b/tests/queries/0_stateless/01020_function_array_compact.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_array_compact.reference rename to tests/queries/0_stateless/01020_function_array_compact.reference diff --git a/dbms/tests/queries/0_stateless/01020_function_array_compact.sql b/tests/queries/0_stateless/01020_function_array_compact.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_array_compact.sql rename to tests/queries/0_stateless/01020_function_array_compact.sql diff --git a/dbms/tests/queries/0_stateless/01020_function_char.reference b/tests/queries/0_stateless/01020_function_char.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_char.reference rename to tests/queries/0_stateless/01020_function_char.reference diff --git a/dbms/tests/queries/0_stateless/01020_function_char.sql b/tests/queries/0_stateless/01020_function_char.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_function_char.sql rename to tests/queries/0_stateless/01020_function_char.sql diff --git a/dbms/tests/queries/0_stateless/01020_having_without_group_by.reference b/tests/queries/0_stateless/01020_having_without_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01020_having_without_group_by.reference rename to tests/queries/0_stateless/01020_having_without_group_by.reference diff --git a/dbms/tests/queries/0_stateless/01020_having_without_group_by.sql b/tests/queries/0_stateless/01020_having_without_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01020_having_without_group_by.sql rename to tests/queries/0_stateless/01020_having_without_group_by.sql diff --git a/dbms/tests/queries/0_stateless/01021_create_as_select.reference b/tests/queries/0_stateless/01021_create_as_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_create_as_select.reference rename to tests/queries/0_stateless/01021_create_as_select.reference diff --git a/dbms/tests/queries/0_stateless/01021_create_as_select.sql b/tests/queries/0_stateless/01021_create_as_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_create_as_select.sql rename to tests/queries/0_stateless/01021_create_as_select.sql diff --git a/dbms/tests/queries/0_stateless/01021_only_tuple_columns.reference b/tests/queries/0_stateless/01021_only_tuple_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_only_tuple_columns.reference rename to tests/queries/0_stateless/01021_only_tuple_columns.reference diff --git a/dbms/tests/queries/0_stateless/01021_only_tuple_columns.sql b/tests/queries/0_stateless/01021_only_tuple_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_only_tuple_columns.sql rename to tests/queries/0_stateless/01021_only_tuple_columns.sql diff --git a/dbms/tests/queries/0_stateless/01021_tuple_parser.reference b/tests/queries/0_stateless/01021_tuple_parser.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01021_tuple_parser.reference rename to tests/queries/0_stateless/01021_tuple_parser.reference diff --git a/dbms/tests/queries/0_stateless/01021_tuple_parser.sql b/tests/queries/0_stateless/01021_tuple_parser.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01021_tuple_parser.sql rename to tests/queries/0_stateless/01021_tuple_parser.sql diff --git a/dbms/tests/queries/0_stateless/01023_materialized_view_query_context.reference b/tests/queries/0_stateless/01023_materialized_view_query_context.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01023_materialized_view_query_context.reference rename to tests/queries/0_stateless/01023_materialized_view_query_context.reference diff --git a/dbms/tests/queries/0_stateless/01023_materialized_view_query_context.sql b/tests/queries/0_stateless/01023_materialized_view_query_context.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01023_materialized_view_query_context.sql rename to tests/queries/0_stateless/01023_materialized_view_query_context.sql diff --git a/dbms/tests/queries/0_stateless/01024__getScalar.reference b/tests/queries/0_stateless/01024__getScalar.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01024__getScalar.reference rename to tests/queries/0_stateless/01024__getScalar.reference diff --git a/dbms/tests/queries/0_stateless/01024__getScalar.sql b/tests/queries/0_stateless/01024__getScalar.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01024__getScalar.sql rename to tests/queries/0_stateless/01024__getScalar.sql diff --git a/dbms/tests/queries/0_stateless/01025_array_compact_generic.reference b/tests/queries/0_stateless/01025_array_compact_generic.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01025_array_compact_generic.reference rename to tests/queries/0_stateless/01025_array_compact_generic.reference diff --git a/dbms/tests/queries/0_stateless/01025_array_compact_generic.sql b/tests/queries/0_stateless/01025_array_compact_generic.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01025_array_compact_generic.sql rename to tests/queries/0_stateless/01025_array_compact_generic.sql diff --git a/dbms/tests/queries/0_stateless/01026_char_utf8.reference b/tests/queries/0_stateless/01026_char_utf8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01026_char_utf8.reference rename to tests/queries/0_stateless/01026_char_utf8.reference diff --git a/dbms/tests/queries/0_stateless/01026_char_utf8.sql b/tests/queries/0_stateless/01026_char_utf8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01026_char_utf8.sql rename to tests/queries/0_stateless/01026_char_utf8.sql diff --git a/dbms/tests/queries/0_stateless/01029_early_constant_folding.reference b/tests/queries/0_stateless/01029_early_constant_folding.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01029_early_constant_folding.reference rename to tests/queries/0_stateless/01029_early_constant_folding.reference diff --git a/dbms/tests/queries/0_stateless/01029_early_constant_folding.sql b/tests/queries/0_stateless/01029_early_constant_folding.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01029_early_constant_folding.sql rename to tests/queries/0_stateless/01029_early_constant_folding.sql diff --git a/dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference b/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference rename to tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.reference diff --git a/dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql b/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql rename to tests/queries/0_stateless/01030_concatenate_equal_fixed_strings.sql diff --git a/dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference b/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference rename to tests/queries/0_stateless/01030_final_mark_empty_primary_key.reference diff --git a/dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql b/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql rename to tests/queries/0_stateless/01030_final_mark_empty_primary_key.sql diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference b/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference rename to tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql b/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql rename to tests/queries/0_stateless/01030_incorrect_count_summing_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference b/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.reference rename to tests/queries/0_stateless/01030_limit_by_with_ties_error.reference diff --git a/dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh b/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01030_limit_by_with_ties_error.sh rename to tests/queries/0_stateless/01030_limit_by_with_ties_error.sh diff --git a/dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference b/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.reference rename to tests/queries/0_stateless/01030_storage_hdfs_syntax.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql b/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_hdfs_syntax.sql rename to tests/queries/0_stateless/01030_storage_hdfs_syntax.sql diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference b/tests/queries/0_stateless/01030_storage_set_supports_read.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_set_supports_read.reference rename to tests/queries/0_stateless/01030_storage_set_supports_read.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql b/tests/queries/0_stateless/01030_storage_set_supports_read.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_set_supports_read.sql rename to tests/queries/0_stateless/01030_storage_set_supports_read.sql diff --git a/dbms/tests/queries/0_stateless/01030_storage_url_syntax.reference b/tests/queries/0_stateless/01030_storage_url_syntax.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_url_syntax.reference rename to tests/queries/0_stateless/01030_storage_url_syntax.reference diff --git a/dbms/tests/queries/0_stateless/01030_storage_url_syntax.sql b/tests/queries/0_stateless/01030_storage_url_syntax.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01030_storage_url_syntax.sql rename to tests/queries/0_stateless/01030_storage_url_syntax.sql diff --git a/dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference b/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference rename to tests/queries/0_stateless/01031_mutations_interpreter_and_context.reference diff --git a/dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh b/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh rename to tests/queries/0_stateless/01031_mutations_interpreter_and_context.sh diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.reference b/tests/queries/0_stateless/01031_new_any_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_new_any_join.reference rename to tests/queries/0_stateless/01031_new_any_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_new_any_join.sql b/tests/queries/0_stateless/01031_new_any_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_new_any_join.sql rename to tests/queries/0_stateless/01031_new_any_join.sql diff --git a/dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference b/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference rename to tests/queries/0_stateless/01031_pmj_new_any_semi_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql b/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql rename to tests/queries/0_stateless/01031_pmj_new_any_semi_join.sql diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.reference b/tests/queries/0_stateless/01031_semi_anti_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01031_semi_anti_join.reference rename to tests/queries/0_stateless/01031_semi_anti_join.reference diff --git a/dbms/tests/queries/0_stateless/01031_semi_anti_join.sql b/tests/queries/0_stateless/01031_semi_anti_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01031_semi_anti_join.sql rename to tests/queries/0_stateless/01031_semi_anti_join.sql diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference b/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.reference rename to tests/queries/0_stateless/01032_cityHash64_for_UUID.reference diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql b/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_UUID.sql rename to tests/queries/0_stateless/01032_cityHash64_for_UUID.sql diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference b/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.reference rename to tests/queries/0_stateless/01032_cityHash64_for_decimal.reference diff --git a/dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql b/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_cityHash64_for_decimal.sql rename to tests/queries/0_stateless/01032_cityHash64_for_decimal.sql diff --git a/dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference b/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.reference rename to tests/queries/0_stateless/01032_duplicate_column_insert_query.reference diff --git a/dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql b/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01032_duplicate_column_insert_query.sql rename to tests/queries/0_stateless/01032_duplicate_column_insert_query.sql diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference b/tests/queries/0_stateless/01033_dictionaries_lifetime.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.reference rename to tests/queries/0_stateless/01033_dictionaries_lifetime.reference diff --git a/dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql b/tests/queries/0_stateless/01033_dictionaries_lifetime.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_dictionaries_lifetime.sql rename to tests/queries/0_stateless/01033_dictionaries_lifetime.sql diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.reference b/tests/queries/0_stateless/01033_quota_dcl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_quota_dcl.reference rename to tests/queries/0_stateless/01033_quota_dcl.reference diff --git a/dbms/tests/queries/0_stateless/01033_quota_dcl.sql b/tests/queries/0_stateless/01033_quota_dcl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_quota_dcl.sql rename to tests/queries/0_stateless/01033_quota_dcl.sql diff --git a/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference rename to tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference diff --git a/dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql rename to tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.sql diff --git a/dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.reference b/tests/queries/0_stateless/01033_substr_negative_size_arg.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.reference rename to tests/queries/0_stateless/01033_substr_negative_size_arg.reference diff --git a/dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.sql b/tests/queries/0_stateless/01033_substr_negative_size_arg.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01033_substr_negative_size_arg.sql rename to tests/queries/0_stateless/01033_substr_negative_size_arg.sql diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference b/tests/queries/0_stateless/01034_JSONCompactEachRow.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.reference rename to tests/queries/0_stateless/01034_JSONCompactEachRow.reference diff --git a/dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql b/tests/queries/0_stateless/01034_JSONCompactEachRow.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_JSONCompactEachRow.sql rename to tests/queries/0_stateless/01034_JSONCompactEachRow.sql diff --git a/dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.reference b/tests/queries/0_stateless/01034_order_by_pk_prefix.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.reference rename to tests/queries/0_stateless/01034_order_by_pk_prefix.reference diff --git a/dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql b/tests/queries/0_stateless/01034_order_by_pk_prefix.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql rename to tests/queries/0_stateless/01034_order_by_pk_prefix.sql diff --git a/dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference rename to tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql rename to tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01034_sample_final_distributed.reference b/tests/queries/0_stateless/01034_sample_final_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_sample_final_distributed.reference rename to tests/queries/0_stateless/01034_sample_final_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01034_sample_final_distributed.sql b/tests/queries/0_stateless/01034_sample_final_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_sample_final_distributed.sql rename to tests/queries/0_stateless/01034_sample_final_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference b/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference rename to tests/queries/0_stateless/01034_unknown_qualified_column_in_join.reference diff --git a/dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql b/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql rename to tests/queries/0_stateless/01034_unknown_qualified_column_in_join.sql diff --git a/dbms/tests/queries/0_stateless/01034_values_parse_float_bug.reference b/tests/queries/0_stateless/01034_values_parse_float_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_values_parse_float_bug.reference rename to tests/queries/0_stateless/01034_values_parse_float_bug.reference diff --git a/dbms/tests/queries/0_stateless/01034_values_parse_float_bug.sh b/tests/queries/0_stateless/01034_values_parse_float_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01034_values_parse_float_bug.sh rename to tests/queries/0_stateless/01034_values_parse_float_bug.sh diff --git a/dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference b/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference rename to tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.reference diff --git a/dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql b/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql rename to tests/queries/0_stateless/01034_with_fill_and_push_down_predicate.sql diff --git a/dbms/tests/queries/0_stateless/01035_avg_weighted.reference b/tests/queries/0_stateless/01035_avg_weighted.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_avg_weighted.reference rename to tests/queries/0_stateless/01035_avg_weighted.reference diff --git a/dbms/tests/queries/0_stateless/01035_avg_weighted.sh b/tests/queries/0_stateless/01035_avg_weighted.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_avg_weighted.sh rename to tests/queries/0_stateless/01035_avg_weighted.sh diff --git a/dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference rename to tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh rename to tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.reference b/tests/queries/0_stateless/01035_enum_conversion_native_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.reference rename to tests/queries/0_stateless/01035_enum_conversion_native_format.reference diff --git a/dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.sh b/tests/queries/0_stateless/01035_enum_conversion_native_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_enum_conversion_native_format.sh rename to tests/queries/0_stateless/01035_enum_conversion_native_format.sh diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference b/tests/queries/0_stateless/01035_lc_empty_part_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.reference rename to tests/queries/0_stateless/01035_lc_empty_part_bug.reference diff --git a/dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh b/tests/queries/0_stateless/01035_lc_empty_part_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01035_lc_empty_part_bug.sh rename to tests/queries/0_stateless/01035_lc_empty_part_bug.sh diff --git a/dbms/tests/queries/0_stateless/01035_prewhere_with_alias.reference b/tests/queries/0_stateless/01035_prewhere_with_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01035_prewhere_with_alias.reference rename to tests/queries/0_stateless/01035_prewhere_with_alias.reference diff --git a/dbms/tests/queries/0_stateless/01035_prewhere_with_alias.sql b/tests/queries/0_stateless/01035_prewhere_with_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01035_prewhere_with_alias.sql rename to tests/queries/0_stateless/01035_prewhere_with_alias.sql diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.reference diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database.sql diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.reference diff --git a/dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql b/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql rename to tests/queries/0_stateless/01036_no_superfluous_dict_reload_on_create_database_2.sql diff --git a/dbms/tests/queries/0_stateless/01036_union_different_columns.reference b/tests/queries/0_stateless/01036_union_different_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01036_union_different_columns.reference rename to tests/queries/0_stateless/01036_union_different_columns.reference diff --git a/dbms/tests/queries/0_stateless/01036_union_different_columns.sql b/tests/queries/0_stateless/01036_union_different_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01036_union_different_columns.sql rename to tests/queries/0_stateless/01036_union_different_columns.sql diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference b/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference rename to tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql b/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql rename to tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference rename to tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference diff --git a/dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql rename to tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference rename to tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.reference diff --git a/dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql rename to tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql diff --git a/dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference b/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference rename to tests/queries/0_stateless/01038_array_of_unnamed_tuples.reference diff --git a/dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql b/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql rename to tests/queries/0_stateless/01038_array_of_unnamed_tuples.sql diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference b/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference rename to tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.reference diff --git a/dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh b/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh rename to tests/queries/0_stateless/01038_dictionary_lifetime_min_zero_sec.sh diff --git a/dbms/tests/queries/0_stateless/01039_mergetree_exec_time.reference b/tests/queries/0_stateless/01039_mergetree_exec_time.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_mergetree_exec_time.reference rename to tests/queries/0_stateless/01039_mergetree_exec_time.reference diff --git a/dbms/tests/queries/0_stateless/01039_mergetree_exec_time.sql b/tests/queries/0_stateless/01039_mergetree_exec_time.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_mergetree_exec_time.sql rename to tests/queries/0_stateless/01039_mergetree_exec_time.sql diff --git a/dbms/tests/queries/0_stateless/01039_row_policy_dcl.reference b/tests/queries/0_stateless/01039_row_policy_dcl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_row_policy_dcl.reference rename to tests/queries/0_stateless/01039_row_policy_dcl.reference diff --git a/dbms/tests/queries/0_stateless/01039_row_policy_dcl.sql b/tests/queries/0_stateless/01039_row_policy_dcl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_row_policy_dcl.sql rename to tests/queries/0_stateless/01039_row_policy_dcl.sql diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.reference b/tests/queries/0_stateless/01039_test_setting_parse.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01039_test_setting_parse.reference rename to tests/queries/0_stateless/01039_test_setting_parse.reference diff --git a/dbms/tests/queries/0_stateless/01039_test_setting_parse.sql b/tests/queries/0_stateless/01039_test_setting_parse.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01039_test_setting_parse.sql rename to tests/queries/0_stateless/01039_test_setting_parse.sql diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference rename to tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.reference diff --git a/dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh rename to tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference rename to tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference diff --git a/dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql rename to tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql diff --git a/dbms/tests/queries/0_stateless/01040_h3_get_resolution.reference b/tests/queries/0_stateless/01040_h3_get_resolution.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01040_h3_get_resolution.reference rename to tests/queries/0_stateless/01040_h3_get_resolution.reference diff --git a/dbms/tests/queries/0_stateless/01040_h3_get_resolution.sql b/tests/queries/0_stateless/01040_h3_get_resolution.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01040_h3_get_resolution.sql rename to tests/queries/0_stateless/01040_h3_get_resolution.sql diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference rename to tests/queries/0_stateless/01041_create_dictionary_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql rename to tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/01041_h3_is_valid.reference b/tests/queries/0_stateless/01041_h3_is_valid.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01041_h3_is_valid.reference rename to tests/queries/0_stateless/01041_h3_is_valid.reference diff --git a/dbms/tests/queries/0_stateless/01041_h3_is_valid.sql b/tests/queries/0_stateless/01041_h3_is_valid.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01041_h3_is_valid.sql rename to tests/queries/0_stateless/01041_h3_is_valid.sql diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference rename to tests/queries/0_stateless/01042_check_query_and_last_granule_size.reference diff --git a/dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql rename to tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql diff --git a/dbms/tests/queries/0_stateless/01042_h3_k_ring.reference b/tests/queries/0_stateless/01042_h3_k_ring.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_h3_k_ring.reference rename to tests/queries/0_stateless/01042_h3_k_ring.reference diff --git a/dbms/tests/queries/0_stateless/01042_h3_k_ring.sql b/tests/queries/0_stateless/01042_h3_k_ring.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01042_h3_k_ring.sql rename to tests/queries/0_stateless/01042_h3_k_ring.sql diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference rename to tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference diff --git a/dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh rename to tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh diff --git a/dbms/tests/queries/0_stateless/01043_categorical_iv.reference b/tests/queries/0_stateless/01043_categorical_iv.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_categorical_iv.reference rename to tests/queries/0_stateless/01043_categorical_iv.reference diff --git a/dbms/tests/queries/0_stateless/01043_categorical_iv.sql b/tests/queries/0_stateless/01043_categorical_iv.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_categorical_iv.sql rename to tests/queries/0_stateless/01043_categorical_iv.sql diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference b/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference rename to tests/queries/0_stateless/01043_dictionary_attribute_properties_values.reference diff --git a/dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql b/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql rename to tests/queries/0_stateless/01043_dictionary_attribute_properties_values.sql diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.reference b/tests/queries/0_stateless/01043_geo_distance.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_geo_distance.reference rename to tests/queries/0_stateless/01043_geo_distance.reference diff --git a/dbms/tests/queries/0_stateless/01043_geo_distance.sql b/tests/queries/0_stateless/01043_geo_distance.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_geo_distance.sql rename to tests/queries/0_stateless/01043_geo_distance.sql diff --git a/dbms/tests/queries/0_stateless/01043_h3_edge_length_m.reference b/tests/queries/0_stateless/01043_h3_edge_length_m.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01043_h3_edge_length_m.reference rename to tests/queries/0_stateless/01043_h3_edge_length_m.reference diff --git a/dbms/tests/queries/0_stateless/01043_h3_edge_length_m.sql b/tests/queries/0_stateless/01043_h3_edge_length_m.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01043_h3_edge_length_m.sql rename to tests/queries/0_stateless/01043_h3_edge_length_m.sql diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.reference b/tests/queries/0_stateless/01044_great_circle_angle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01044_great_circle_angle.reference rename to tests/queries/0_stateless/01044_great_circle_angle.reference diff --git a/dbms/tests/queries/0_stateless/01044_great_circle_angle.sql b/tests/queries/0_stateless/01044_great_circle_angle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01044_great_circle_angle.sql rename to tests/queries/0_stateless/01044_great_circle_angle.sql diff --git a/dbms/tests/queries/0_stateless/01044_h3_edge_angle.reference b/tests/queries/0_stateless/01044_h3_edge_angle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01044_h3_edge_angle.reference rename to tests/queries/0_stateless/01044_h3_edge_angle.reference diff --git a/dbms/tests/queries/0_stateless/01044_h3_edge_angle.sql b/tests/queries/0_stateless/01044_h3_edge_angle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01044_h3_edge_angle.sql rename to tests/queries/0_stateless/01044_h3_edge_angle.sql diff --git a/dbms/tests/queries/0_stateless/01045_array_zip.reference b/tests/queries/0_stateless/01045_array_zip.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_array_zip.reference rename to tests/queries/0_stateless/01045_array_zip.reference diff --git a/dbms/tests/queries/0_stateless/01045_array_zip.sql b/tests/queries/0_stateless/01045_array_zip.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_array_zip.sql rename to tests/queries/0_stateless/01045_array_zip.sql diff --git a/dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.reference b/tests/queries/0_stateless/01045_bloom_filter_null_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.reference rename to tests/queries/0_stateless/01045_bloom_filter_null_array.reference diff --git a/dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.sql b/tests/queries/0_stateless/01045_bloom_filter_null_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_bloom_filter_null_array.sql rename to tests/queries/0_stateless/01045_bloom_filter_null_array.sql diff --git a/dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.reference b/tests/queries/0_stateless/01045_dictionaries_restrictions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.reference rename to tests/queries/0_stateless/01045_dictionaries_restrictions.reference diff --git a/dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.sql b/tests/queries/0_stateless/01045_dictionaries_restrictions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01045_dictionaries_restrictions.sql rename to tests/queries/0_stateless/01045_dictionaries_restrictions.sql diff --git a/dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference b/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.reference rename to tests/queries/0_stateless/01045_order_by_pk_special_storages.reference diff --git a/dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh rename to tests/queries/0_stateless/01045_order_by_pk_special_storages.sh diff --git a/dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference rename to tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.reference diff --git a/dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh rename to tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh diff --git a/dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference b/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference rename to tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql b/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql rename to tests/queries/0_stateless/01046_materialized_view_with_join_over_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference b/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.reference rename to tests/queries/0_stateless/01046_trivial_count_query_distributed.reference diff --git a/dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql b/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01046_trivial_count_query_distributed.sql rename to tests/queries/0_stateless/01046_trivial_count_query_distributed.sql diff --git a/dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference b/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference rename to tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.reference diff --git a/dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql b/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql rename to tests/queries/0_stateless/01047_no_alias_columns_with_table_aliases.sql diff --git a/dbms/tests/queries/0_stateless/01047_nullable_rand.reference b/tests/queries/0_stateless/01047_nullable_rand.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_nullable_rand.reference rename to tests/queries/0_stateless/01047_nullable_rand.reference diff --git a/dbms/tests/queries/0_stateless/01047_nullable_rand.sql b/tests/queries/0_stateless/01047_nullable_rand.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_nullable_rand.sql rename to tests/queries/0_stateless/01047_nullable_rand.sql diff --git a/dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference b/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference rename to tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.reference diff --git a/dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql b/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql rename to tests/queries/0_stateless/01047_simple_aggregate_sizes_of_columns_bug.sql diff --git a/dbms/tests/queries/0_stateless/01048_exists_query.reference b/tests/queries/0_stateless/01048_exists_query.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01048_exists_query.reference rename to tests/queries/0_stateless/01048_exists_query.reference diff --git a/dbms/tests/queries/0_stateless/01048_exists_query.sql b/tests/queries/0_stateless/01048_exists_query.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01048_exists_query.sql rename to tests/queries/0_stateless/01048_exists_query.sql diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_bug.reference b/tests/queries/0_stateless/01049_join_low_card_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_bug.reference rename to tests/queries/0_stateless/01049_join_low_card_bug.reference diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_bug.sql b/tests/queries/0_stateless/01049_join_low_card_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_bug.sql rename to tests/queries/0_stateless/01049_join_low_card_bug.sql diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_crash.reference b/tests/queries/0_stateless/01049_join_low_card_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_crash.reference rename to tests/queries/0_stateless/01049_join_low_card_crash.reference diff --git a/dbms/tests/queries/0_stateless/01049_join_low_card_crash.sql b/tests/queries/0_stateless/01049_join_low_card_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_join_low_card_crash.sql rename to tests/queries/0_stateless/01049_join_low_card_crash.sql diff --git a/dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference diff --git a/dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql diff --git a/dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference rename to tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.reference diff --git a/dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql rename to tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_crash.reference b/tests/queries/0_stateless/01050_engine_join_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_crash.reference rename to tests/queries/0_stateless/01050_engine_join_crash.reference diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_crash.sql b/tests/queries/0_stateless/01050_engine_join_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_crash.sql rename to tests/queries/0_stateless/01050_engine_join_crash.sql diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_view_crash.reference b/tests/queries/0_stateless/01050_engine_join_view_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_view_crash.reference rename to tests/queries/0_stateless/01050_engine_join_view_crash.reference diff --git a/dbms/tests/queries/0_stateless/01050_engine_join_view_crash.sql b/tests/queries/0_stateless/01050_engine_join_view_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_engine_join_view_crash.sql rename to tests/queries/0_stateless/01050_engine_join_view_crash.sql diff --git a/dbms/tests/queries/0_stateless/01050_group_array_sample.reference b/tests/queries/0_stateless/01050_group_array_sample.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01050_group_array_sample.reference rename to tests/queries/0_stateless/01050_group_array_sample.reference diff --git a/dbms/tests/queries/0_stateless/01050_group_array_sample.sql b/tests/queries/0_stateless/01050_group_array_sample.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01050_group_array_sample.sql rename to tests/queries/0_stateless/01050_group_array_sample.sql diff --git a/dbms/tests/queries/0_stateless/01051_aggregate_function_crash.reference b/tests/queries/0_stateless/01051_aggregate_function_crash.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_aggregate_function_crash.reference rename to tests/queries/0_stateless/01051_aggregate_function_crash.reference diff --git a/dbms/tests/queries/0_stateless/01051_aggregate_function_crash.sql b/tests/queries/0_stateless/01051_aggregate_function_crash.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_aggregate_function_crash.sql rename to tests/queries/0_stateless/01051_aggregate_function_crash.sql diff --git a/dbms/tests/queries/0_stateless/01051_all_join_engine.reference b/tests/queries/0_stateless/01051_all_join_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_all_join_engine.reference rename to tests/queries/0_stateless/01051_all_join_engine.reference diff --git a/dbms/tests/queries/0_stateless/01051_all_join_engine.sql b/tests/queries/0_stateless/01051_all_join_engine.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_all_join_engine.sql rename to tests/queries/0_stateless/01051_all_join_engine.sql diff --git a/dbms/tests/queries/0_stateless/01051_new_any_join_engine.reference b/tests/queries/0_stateless/01051_new_any_join_engine.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_new_any_join_engine.reference rename to tests/queries/0_stateless/01051_new_any_join_engine.reference diff --git a/dbms/tests/queries/0_stateless/01051_new_any_join_engine.sql b/tests/queries/0_stateless/01051_new_any_join_engine.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_new_any_join_engine.sql rename to tests/queries/0_stateless/01051_new_any_join_engine.sql diff --git a/dbms/tests/queries/0_stateless/01051_random_printable_ascii.reference b/tests/queries/0_stateless/01051_random_printable_ascii.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_random_printable_ascii.reference rename to tests/queries/0_stateless/01051_random_printable_ascii.reference diff --git a/dbms/tests/queries/0_stateless/01051_random_printable_ascii.sql b/tests/queries/0_stateless/01051_random_printable_ascii.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_random_printable_ascii.sql rename to tests/queries/0_stateless/01051_random_printable_ascii.sql diff --git a/dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference b/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.reference rename to tests/queries/0_stateless/01051_same_name_alias_with_joins.reference diff --git a/dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql b/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01051_same_name_alias_with_joins.sql rename to tests/queries/0_stateless/01051_same_name_alias_with_joins.sql diff --git a/dbms/tests/queries/0_stateless/01052_array_reduce_exception.reference b/tests/queries/0_stateless/01052_array_reduce_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01052_array_reduce_exception.reference rename to tests/queries/0_stateless/01052_array_reduce_exception.reference diff --git a/dbms/tests/queries/0_stateless/01052_array_reduce_exception.sql b/tests/queries/0_stateless/01052_array_reduce_exception.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01052_array_reduce_exception.sql rename to tests/queries/0_stateless/01052_array_reduce_exception.sql diff --git a/dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.reference b/tests/queries/0_stateless/01052_compression_buffer_overrun.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.reference rename to tests/queries/0_stateless/01052_compression_buffer_overrun.reference diff --git a/dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.sh b/tests/queries/0_stateless/01052_compression_buffer_overrun.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01052_compression_buffer_overrun.sh rename to tests/queries/0_stateless/01052_compression_buffer_overrun.sh diff --git a/dbms/tests/queries/0_stateless/01053_drop_database_mat_view.reference b/tests/queries/0_stateless/01053_drop_database_mat_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01053_drop_database_mat_view.reference rename to tests/queries/0_stateless/01053_drop_database_mat_view.reference diff --git a/dbms/tests/queries/0_stateless/01053_drop_database_mat_view.sql b/tests/queries/0_stateless/01053_drop_database_mat_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01053_drop_database_mat_view.sql rename to tests/queries/0_stateless/01053_drop_database_mat_view.sql diff --git a/dbms/tests/queries/0_stateless/01053_if_chain_check.reference b/tests/queries/0_stateless/01053_if_chain_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01053_if_chain_check.reference rename to tests/queries/0_stateless/01053_if_chain_check.reference diff --git a/dbms/tests/queries/0_stateless/01053_if_chain_check.sql b/tests/queries/0_stateless/01053_if_chain_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01053_if_chain_check.sql rename to tests/queries/0_stateless/01053_if_chain_check.sql diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference rename to tests/queries/0_stateless/01054_cache_dictionary_bunch_update.reference diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh b/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh rename to tests/queries/0_stateless/01054_cache_dictionary_bunch_update.sh diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference b/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference rename to tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.reference diff --git a/dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql b/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql rename to tests/queries/0_stateless/01054_cache_dictionary_overflow_cell.sql diff --git a/dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference b/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference rename to tests/queries/0_stateless/01054_random_printable_ascii_ubsan.reference diff --git a/dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh b/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh rename to tests/queries/0_stateless/01054_random_printable_ascii_ubsan.sh diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts.reference b/tests/queries/0_stateless/01055_compact_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts.reference rename to tests/queries/0_stateless/01055_compact_parts.reference diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts.sql b/tests/queries/0_stateless/01055_compact_parts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts.sql rename to tests/queries/0_stateless/01055_compact_parts.sql diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_1.reference b/tests/queries/0_stateless/01055_compact_parts_1.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_1.reference rename to tests/queries/0_stateless/01055_compact_parts_1.reference diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_1.sql b/tests/queries/0_stateless/01055_compact_parts_1.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_1.sql rename to tests/queries/0_stateless/01055_compact_parts_1.sql diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_granularity.reference b/tests/queries/0_stateless/01055_compact_parts_granularity.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_granularity.reference rename to tests/queries/0_stateless/01055_compact_parts_granularity.reference diff --git a/dbms/tests/queries/0_stateless/01055_compact_parts_granularity.sh b/tests/queries/0_stateless/01055_compact_parts_granularity.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01055_compact_parts_granularity.sh rename to tests/queries/0_stateless/01055_compact_parts_granularity.sh diff --git a/dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference b/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.reference rename to tests/queries/0_stateless/01055_minmax_index_compact_parts.reference diff --git a/dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh rename to tests/queries/0_stateless/01055_minmax_index_compact_parts.sh diff --git a/dbms/tests/queries/0_stateless/01055_prewhere_bugs.reference b/tests/queries/0_stateless/01055_prewhere_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01055_prewhere_bugs.reference rename to tests/queries/0_stateless/01055_prewhere_bugs.reference diff --git a/dbms/tests/queries/0_stateless/01055_prewhere_bugs.sql b/tests/queries/0_stateless/01055_prewhere_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01055_prewhere_bugs.sql rename to tests/queries/0_stateless/01055_prewhere_bugs.sql diff --git a/dbms/tests/queries/0_stateless/01056_create_table_as.reference b/tests/queries/0_stateless/01056_create_table_as.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_create_table_as.reference rename to tests/queries/0_stateless/01056_create_table_as.reference diff --git a/dbms/tests/queries/0_stateless/01056_create_table_as.sql b/tests/queries/0_stateless/01056_create_table_as.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01056_create_table_as.sql rename to tests/queries/0_stateless/01056_create_table_as.sql diff --git a/dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference b/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.reference rename to tests/queries/0_stateless/01056_negative_with_bloom_filter.reference diff --git a/dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql b/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01056_negative_with_bloom_filter.sql rename to tests/queries/0_stateless/01056_negative_with_bloom_filter.sql diff --git a/dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference rename to tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference diff --git a/dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql rename to tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql diff --git a/dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference b/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference rename to tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.reference diff --git a/dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh b/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh rename to tests/queries/0_stateless/01056_prepared_statements_null_and_escaping.sh diff --git a/dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference b/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference rename to tests/queries/0_stateless/01057_http_compression_prefer_brotli.reference diff --git a/dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh b/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh rename to tests/queries/0_stateless/01057_http_compression_prefer_brotli.sh diff --git a/dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference b/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference rename to tests/queries/0_stateless/01058_zlib_ng_level1_bug.reference diff --git a/dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh b/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh rename to tests/queries/0_stateless/01058_zlib_ng_level1_bug.sh diff --git a/dbms/tests/queries/0_stateless/01059_storage_file_brotli.reference b/tests/queries/0_stateless/01059_storage_file_brotli.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01059_storage_file_brotli.reference rename to tests/queries/0_stateless/01059_storage_file_brotli.reference diff --git a/dbms/tests/queries/0_stateless/01059_storage_file_brotli.sql b/tests/queries/0_stateless/01059_storage_file_brotli.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01059_storage_file_brotli.sql rename to tests/queries/0_stateless/01059_storage_file_brotli.sql diff --git a/dbms/tests/queries/0_stateless/01060_avro.reference b/tests/queries/0_stateless/01060_avro.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_avro.reference rename to tests/queries/0_stateless/01060_avro.reference diff --git a/dbms/tests/queries/0_stateless/01060_avro.sh b/tests/queries/0_stateless/01060_avro.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01060_avro.sh rename to tests/queries/0_stateless/01060_avro.sh diff --git a/dbms/tests/queries/0_stateless/01060_defaults_all_columns.reference b/tests/queries/0_stateless/01060_defaults_all_columns.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_defaults_all_columns.reference rename to tests/queries/0_stateless/01060_defaults_all_columns.reference diff --git a/dbms/tests/queries/0_stateless/01060_defaults_all_columns.sql b/tests/queries/0_stateless/01060_defaults_all_columns.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_defaults_all_columns.sql rename to tests/queries/0_stateless/01060_defaults_all_columns.sql diff --git a/dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference b/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.reference rename to tests/queries/0_stateless/01060_shutdown_table_after_detach.reference diff --git a/dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql b/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_shutdown_table_after_detach.sql rename to tests/queries/0_stateless/01060_shutdown_table_after_detach.sql diff --git a/dbms/tests/queries/0_stateless/01060_substring_negative_size.reference b/tests/queries/0_stateless/01060_substring_negative_size.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01060_substring_negative_size.reference rename to tests/queries/0_stateless/01060_substring_negative_size.reference diff --git a/dbms/tests/queries/0_stateless/01060_substring_negative_size.sql b/tests/queries/0_stateless/01060_substring_negative_size.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01060_substring_negative_size.sql rename to tests/queries/0_stateless/01060_substring_negative_size.sql diff --git a/dbms/tests/queries/0_stateless/01061_alter_codec_with_type.reference b/tests/queries/0_stateless/01061_alter_codec_with_type.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01061_alter_codec_with_type.reference rename to tests/queries/0_stateless/01061_alter_codec_with_type.reference diff --git a/dbms/tests/queries/0_stateless/01061_alter_codec_with_type.sql b/tests/queries/0_stateless/01061_alter_codec_with_type.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01061_alter_codec_with_type.sql rename to tests/queries/0_stateless/01061_alter_codec_with_type.sql diff --git a/dbms/tests/queries/0_stateless/01062_alter_on_mutataion.reference b/tests/queries/0_stateless/01062_alter_on_mutataion.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_alter_on_mutataion.reference rename to tests/queries/0_stateless/01062_alter_on_mutataion.reference diff --git a/dbms/tests/queries/0_stateless/01062_alter_on_mutataion.sql b/tests/queries/0_stateless/01062_alter_on_mutataion.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_alter_on_mutataion.sql rename to tests/queries/0_stateless/01062_alter_on_mutataion.sql diff --git a/dbms/tests/queries/0_stateless/01062_max_parser_depth.reference b/tests/queries/0_stateless/01062_max_parser_depth.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_max_parser_depth.reference rename to tests/queries/0_stateless/01062_max_parser_depth.reference diff --git a/dbms/tests/queries/0_stateless/01062_max_parser_depth.sh b/tests/queries/0_stateless/01062_max_parser_depth.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01062_max_parser_depth.sh rename to tests/queries/0_stateless/01062_max_parser_depth.sh diff --git a/dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference b/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference rename to tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.reference diff --git a/dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql b/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql rename to tests/queries/0_stateless/01062_pm_all_join_with_block_continuation.sql diff --git a/dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference b/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference rename to tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.reference diff --git a/dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql b/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql rename to tests/queries/0_stateless/01062_pm_multiple_all_join_same_value.sql diff --git a/dbms/tests/queries/0_stateless/01063_create_column_set.reference b/tests/queries/0_stateless/01063_create_column_set.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01063_create_column_set.reference rename to tests/queries/0_stateless/01063_create_column_set.reference diff --git a/dbms/tests/queries/0_stateless/01063_create_column_set.sql b/tests/queries/0_stateless/01063_create_column_set.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01063_create_column_set.sql rename to tests/queries/0_stateless/01063_create_column_set.sql diff --git a/dbms/tests/queries/0_stateless/01064_array_auc.reference b/tests/queries/0_stateless/01064_array_auc.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_array_auc.reference rename to tests/queries/0_stateless/01064_array_auc.reference diff --git a/dbms/tests/queries/0_stateless/01064_array_auc.sql b/tests/queries/0_stateless/01064_array_auc.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_array_auc.sql rename to tests/queries/0_stateless/01064_array_auc.sql diff --git a/dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference rename to tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.reference diff --git a/dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql rename to tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql diff --git a/dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference b/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference rename to tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.reference diff --git a/dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql b/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql rename to tests/queries/0_stateless/01064_pm_all_join_const_and_nullable.sql diff --git a/dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.reference b/tests/queries/0_stateless/01065_array_zip_mixed_const.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.reference rename to tests/queries/0_stateless/01065_array_zip_mixed_const.reference diff --git a/dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.sql b/tests/queries/0_stateless/01065_array_zip_mixed_const.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01065_array_zip_mixed_const.sql rename to tests/queries/0_stateless/01065_array_zip_mixed_const.sql diff --git a/dbms/tests/queries/0_stateless/01065_if_not_finite.reference b/tests/queries/0_stateless/01065_if_not_finite.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01065_if_not_finite.reference rename to tests/queries/0_stateless/01065_if_not_finite.reference diff --git a/dbms/tests/queries/0_stateless/01065_if_not_finite.sql b/tests/queries/0_stateless/01065_if_not_finite.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01065_if_not_finite.sql rename to tests/queries/0_stateless/01065_if_not_finite.sql diff --git a/dbms/tests/queries/0_stateless/01066_bit_count.reference b/tests/queries/0_stateless/01066_bit_count.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01066_bit_count.reference rename to tests/queries/0_stateless/01066_bit_count.reference diff --git a/dbms/tests/queries/0_stateless/01066_bit_count.sql b/tests/queries/0_stateless/01066_bit_count.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01066_bit_count.sql rename to tests/queries/0_stateless/01066_bit_count.sql diff --git a/dbms/tests/queries/0_stateless/01067_join_null.reference b/tests/queries/0_stateless/01067_join_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01067_join_null.reference rename to tests/queries/0_stateless/01067_join_null.reference diff --git a/dbms/tests/queries/0_stateless/01067_join_null.sql b/tests/queries/0_stateless/01067_join_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01067_join_null.sql rename to tests/queries/0_stateless/01067_join_null.sql diff --git a/dbms/tests/queries/0_stateless/01068_parens.reference b/tests/queries/0_stateless/01068_parens.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01068_parens.reference rename to tests/queries/0_stateless/01068_parens.reference diff --git a/dbms/tests/queries/0_stateless/01068_parens.sql b/tests/queries/0_stateless/01068_parens.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01068_parens.sql rename to tests/queries/0_stateless/01068_parens.sql diff --git a/dbms/tests/queries/0_stateless/01069_database_memory.reference b/tests/queries/0_stateless/01069_database_memory.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_database_memory.reference rename to tests/queries/0_stateless/01069_database_memory.reference diff --git a/dbms/tests/queries/0_stateless/01069_database_memory.sql b/tests/queries/0_stateless/01069_database_memory.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_database_memory.sql rename to tests/queries/0_stateless/01069_database_memory.sql diff --git a/dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference b/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference rename to tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.reference diff --git a/dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql b/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql rename to tests/queries/0_stateless/01069_insert_float_as_nullable_unit8.sql diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference b/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table.reference diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql b/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table.sql diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference b/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.reference diff --git a/dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql b/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql rename to tests/queries/0_stateless/01069_materialized_view_alter_target_table_with_default_expression.sql diff --git a/dbms/tests/queries/0_stateless/01069_set_in_group_by.reference b/tests/queries/0_stateless/01069_set_in_group_by.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01069_set_in_group_by.reference rename to tests/queries/0_stateless/01069_set_in_group_by.reference diff --git a/dbms/tests/queries/0_stateless/01069_set_in_group_by.sql b/tests/queries/0_stateless/01069_set_in_group_by.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01069_set_in_group_by.sql rename to tests/queries/0_stateless/01069_set_in_group_by.sql diff --git a/dbms/tests/queries/0_stateless/01070_alter_with_ttl.reference b/tests/queries/0_stateless/01070_alter_with_ttl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_alter_with_ttl.reference rename to tests/queries/0_stateless/01070_alter_with_ttl.reference diff --git a/dbms/tests/queries/0_stateless/01070_alter_with_ttl.sql b/tests/queries/0_stateless/01070_alter_with_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_alter_with_ttl.sql rename to tests/queries/0_stateless/01070_alter_with_ttl.sql diff --git a/dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference rename to tests/queries/0_stateless/01070_exception_code_in_query_log_table.reference diff --git a/dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql rename to tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_get_base_cell.reference b/tests/queries/0_stateless/01070_h3_get_base_cell.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_get_base_cell.reference rename to tests/queries/0_stateless/01070_h3_get_base_cell.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_get_base_cell.sql b/tests/queries/0_stateless/01070_h3_get_base_cell.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_get_base_cell.sql rename to tests/queries/0_stateless/01070_h3_get_base_cell.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.reference b/tests/queries/0_stateless/01070_h3_hex_area_m2.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.reference rename to tests/queries/0_stateless/01070_h3_hex_area_m2.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.sql b/tests/queries/0_stateless/01070_h3_hex_area_m2.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_hex_area_m2.sql rename to tests/queries/0_stateless/01070_h3_hex_area_m2.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference b/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference rename to tests/queries/0_stateless/01070_h3_indexes_are_neighbors.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql b/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql rename to tests/queries/0_stateless/01070_h3_indexes_are_neighbors.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_children.reference b/tests/queries/0_stateless/01070_h3_to_children.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_children.reference rename to tests/queries/0_stateless/01070_h3_to_children.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_children.sql b/tests/queries/0_stateless/01070_h3_to_children.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_children.sql rename to tests/queries/0_stateless/01070_h3_to_children.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_parent.reference b/tests/queries/0_stateless/01070_h3_to_parent.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_parent.reference rename to tests/queries/0_stateless/01070_h3_to_parent.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_parent.sql b/tests/queries/0_stateless/01070_h3_to_parent.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_parent.sql rename to tests/queries/0_stateless/01070_h3_to_parent.sql diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_string.reference b/tests/queries/0_stateless/01070_h3_to_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_string.reference rename to tests/queries/0_stateless/01070_h3_to_string.reference diff --git a/dbms/tests/queries/0_stateless/01070_h3_to_string.sql b/tests/queries/0_stateless/01070_h3_to_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_h3_to_string.sql rename to tests/queries/0_stateless/01070_h3_to_string.sql diff --git a/dbms/tests/queries/0_stateless/01070_materialize_ttl.reference b/tests/queries/0_stateless/01070_materialize_ttl.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_materialize_ttl.reference rename to tests/queries/0_stateless/01070_materialize_ttl.reference diff --git a/dbms/tests/queries/0_stateless/01070_materialize_ttl.sql b/tests/queries/0_stateless/01070_materialize_ttl.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_materialize_ttl.sql rename to tests/queries/0_stateless/01070_materialize_ttl.sql diff --git a/dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.reference b/tests/queries/0_stateless/01070_mutations_with_dependencies.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.reference rename to tests/queries/0_stateless/01070_mutations_with_dependencies.reference diff --git a/dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.sql b/tests/queries/0_stateless/01070_mutations_with_dependencies.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_mutations_with_dependencies.sql rename to tests/queries/0_stateless/01070_mutations_with_dependencies.sql diff --git a/dbms/tests/queries/0_stateless/01070_string_to_h3.reference b/tests/queries/0_stateless/01070_string_to_h3.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_string_to_h3.reference rename to tests/queries/0_stateless/01070_string_to_h3.reference diff --git a/dbms/tests/queries/0_stateless/01070_string_to_h3.sql b/tests/queries/0_stateless/01070_string_to_h3.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_string_to_h3.sql rename to tests/queries/0_stateless/01070_string_to_h3.sql diff --git a/dbms/tests/queries/0_stateless/01070_template_empty_file.reference b/tests/queries/0_stateless/01070_template_empty_file.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_template_empty_file.reference rename to tests/queries/0_stateless/01070_template_empty_file.reference diff --git a/dbms/tests/queries/0_stateless/01070_template_empty_file.sql b/tests/queries/0_stateless/01070_template_empty_file.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_template_empty_file.sql rename to tests/queries/0_stateless/01070_template_empty_file.sql diff --git a/dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference b/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference rename to tests/queries/0_stateless/01070_to_decimal_or_null_exception.reference diff --git a/dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql b/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql rename to tests/queries/0_stateless/01070_to_decimal_or_null_exception.sql diff --git a/dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference b/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference rename to tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.reference diff --git a/dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql b/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql rename to tests/queries/0_stateless/01071_force_optimize_skip_unused_shards.sql diff --git a/dbms/tests/queries/0_stateless/01071_http_header_exception_code.reference b/tests/queries/0_stateless/01071_http_header_exception_code.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_http_header_exception_code.reference rename to tests/queries/0_stateless/01071_http_header_exception_code.reference diff --git a/dbms/tests/queries/0_stateless/01071_http_header_exception_code.sh b/tests/queries/0_stateless/01071_http_header_exception_code.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01071_http_header_exception_code.sh rename to tests/queries/0_stateless/01071_http_header_exception_code.sh diff --git a/dbms/tests/queries/0_stateless/01071_in_array.reference b/tests/queries/0_stateless/01071_in_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_in_array.reference rename to tests/queries/0_stateless/01071_in_array.reference diff --git a/dbms/tests/queries/0_stateless/01071_in_array.sql b/tests/queries/0_stateless/01071_in_array.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_in_array.sql rename to tests/queries/0_stateless/01071_in_array.sql diff --git a/dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.reference b/tests/queries/0_stateless/01071_live_view_detach_dependency.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.reference rename to tests/queries/0_stateless/01071_live_view_detach_dependency.reference diff --git a/dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.sql b/tests/queries/0_stateless/01071_live_view_detach_dependency.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_live_view_detach_dependency.sql rename to tests/queries/0_stateless/01071_live_view_detach_dependency.sql diff --git a/dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference b/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference rename to tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.reference diff --git a/dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql b/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql rename to tests/queries/0_stateless/01071_prohibition_secondary_index_with_old_format_merge_tree.sql diff --git a/dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference b/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference rename to tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.reference diff --git a/dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql b/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql rename to tests/queries/0_stateless/01072_drop_temporary_table_with_same_name.sql diff --git a/dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference rename to tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.reference diff --git a/dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql rename to tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql diff --git a/dbms/tests/queries/0_stateless/01072_nullable_jit.reference b/tests/queries/0_stateless/01072_nullable_jit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_nullable_jit.reference rename to tests/queries/0_stateless/01072_nullable_jit.reference diff --git a/dbms/tests/queries/0_stateless/01072_nullable_jit.sql b/tests/queries/0_stateless/01072_nullable_jit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_nullable_jit.sql rename to tests/queries/0_stateless/01072_nullable_jit.sql diff --git a/dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference rename to tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.reference diff --git a/dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql b/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql rename to tests/queries/0_stateless/01072_optimize_skip_unused_shards_const_expr_eval.sql diff --git a/dbms/tests/queries/0_stateless/01072_select_constant_limit.reference b/tests/queries/0_stateless/01072_select_constant_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01072_select_constant_limit.reference rename to tests/queries/0_stateless/01072_select_constant_limit.reference diff --git a/dbms/tests/queries/0_stateless/01072_select_constant_limit.sql b/tests/queries/0_stateless/01072_select_constant_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01072_select_constant_limit.sql rename to tests/queries/0_stateless/01072_select_constant_limit.sql diff --git a/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.reference b/tests/queries/0_stateless/01073_attach_if_not_exists.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_attach_if_not_exists.reference rename to tests/queries/0_stateless/01073_attach_if_not_exists.reference diff --git a/dbms/tests/queries/0_stateless/01073_attach_if_not_exists.sql b/tests/queries/0_stateless/01073_attach_if_not_exists.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_attach_if_not_exists.sql rename to tests/queries/0_stateless/01073_attach_if_not_exists.sql diff --git a/dbms/tests/queries/0_stateless/01073_bad_alter_partition.reference b/tests/queries/0_stateless/01073_bad_alter_partition.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_bad_alter_partition.reference rename to tests/queries/0_stateless/01073_bad_alter_partition.reference diff --git a/dbms/tests/queries/0_stateless/01073_bad_alter_partition.sql b/tests/queries/0_stateless/01073_bad_alter_partition.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_bad_alter_partition.sql rename to tests/queries/0_stateless/01073_bad_alter_partition.sql diff --git a/dbms/tests/queries/0_stateless/01073_blockSerializedSize.reference b/tests/queries/0_stateless/01073_blockSerializedSize.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_blockSerializedSize.reference rename to tests/queries/0_stateless/01073_blockSerializedSize.reference diff --git a/dbms/tests/queries/0_stateless/01073_blockSerializedSize.sql b/tests/queries/0_stateless/01073_blockSerializedSize.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_blockSerializedSize.sql rename to tests/queries/0_stateless/01073_blockSerializedSize.sql diff --git a/dbms/tests/queries/0_stateless/01073_crlf_end_of_line.reference b/tests/queries/0_stateless/01073_crlf_end_of_line.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_crlf_end_of_line.reference rename to tests/queries/0_stateless/01073_crlf_end_of_line.reference diff --git a/dbms/tests/queries/0_stateless/01073_crlf_end_of_line.sql b/tests/queries/0_stateless/01073_crlf_end_of_line.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_crlf_end_of_line.sql rename to tests/queries/0_stateless/01073_crlf_end_of_line.sql diff --git a/dbms/tests/queries/0_stateless/01073_grant_and_revoke.reference b/tests/queries/0_stateless/01073_grant_and_revoke.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_grant_and_revoke.reference rename to tests/queries/0_stateless/01073_grant_and_revoke.reference diff --git a/dbms/tests/queries/0_stateless/01073_grant_and_revoke.sql b/tests/queries/0_stateless/01073_grant_and_revoke.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_grant_and_revoke.sql rename to tests/queries/0_stateless/01073_grant_and_revoke.sql diff --git a/dbms/tests/queries/0_stateless/01073_show_tables_not_like.reference b/tests/queries/0_stateless/01073_show_tables_not_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01073_show_tables_not_like.reference rename to tests/queries/0_stateless/01073_show_tables_not_like.reference diff --git a/dbms/tests/queries/0_stateless/01073_show_tables_not_like.sql b/tests/queries/0_stateless/01073_show_tables_not_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01073_show_tables_not_like.sql rename to tests/queries/0_stateless/01073_show_tables_not_like.sql diff --git a/dbms/tests/queries/0_stateless/01074_h3_range_check.reference b/tests/queries/0_stateless/01074_h3_range_check.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01074_h3_range_check.reference rename to tests/queries/0_stateless/01074_h3_range_check.reference diff --git a/dbms/tests/queries/0_stateless/01074_h3_range_check.sql b/tests/queries/0_stateless/01074_h3_range_check.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01074_h3_range_check.sql rename to tests/queries/0_stateless/01074_h3_range_check.sql diff --git a/dbms/tests/queries/0_stateless/01074_partial_revokes.reference b/tests/queries/0_stateless/01074_partial_revokes.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01074_partial_revokes.reference rename to tests/queries/0_stateless/01074_partial_revokes.reference diff --git a/dbms/tests/queries/0_stateless/01074_partial_revokes.sql b/tests/queries/0_stateless/01074_partial_revokes.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01074_partial_revokes.sql rename to tests/queries/0_stateless/01074_partial_revokes.sql diff --git a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.reference b/tests/queries/0_stateless/01075_allowed_client_hosts.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01075_allowed_client_hosts.reference rename to tests/queries/0_stateless/01075_allowed_client_hosts.reference diff --git a/dbms/tests/queries/0_stateless/01075_allowed_client_hosts.sql b/tests/queries/0_stateless/01075_allowed_client_hosts.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01075_allowed_client_hosts.sql rename to tests/queries/0_stateless/01075_allowed_client_hosts.sql diff --git a/dbms/tests/queries/0_stateless/01075_in_arrays_enmk.reference b/tests/queries/0_stateless/01075_in_arrays_enmk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01075_in_arrays_enmk.reference rename to tests/queries/0_stateless/01075_in_arrays_enmk.reference diff --git a/dbms/tests/queries/0_stateless/01075_in_arrays_enmk.sql b/tests/queries/0_stateless/01075_in_arrays_enmk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01075_in_arrays_enmk.sql rename to tests/queries/0_stateless/01075_in_arrays_enmk.sql diff --git a/dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference rename to tests/queries/0_stateless/01076_array_join_prewhere_const_folding.reference diff --git a/dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql b/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql rename to tests/queries/0_stateless/01076_array_join_prewhere_const_folding.sql diff --git a/dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference rename to tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.reference diff --git a/dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh rename to tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh diff --git a/dbms/tests/queries/0_stateless/01076_json_each_row_array.reference b/tests/queries/0_stateless/01076_json_each_row_array.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_json_each_row_array.reference rename to tests/queries/0_stateless/01076_json_each_row_array.reference diff --git a/dbms/tests/queries/0_stateless/01076_json_each_row_array.sh b/tests/queries/0_stateless/01076_json_each_row_array.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_json_each_row_array.sh rename to tests/queries/0_stateless/01076_json_each_row_array.sh diff --git a/dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference rename to tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh rename to tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference rename to tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference diff --git a/dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql rename to tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql diff --git a/dbms/tests/queries/0_stateless/01076_range_reader_segfault.reference b/tests/queries/0_stateless/01076_range_reader_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01076_range_reader_segfault.reference rename to tests/queries/0_stateless/01076_range_reader_segfault.reference diff --git a/dbms/tests/queries/0_stateless/01076_range_reader_segfault.sql b/tests/queries/0_stateless/01076_range_reader_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01076_range_reader_segfault.sql rename to tests/queries/0_stateless/01076_range_reader_segfault.sql diff --git a/dbms/tests/queries/0_stateless/01077_mutations_index_consistency.reference b/tests/queries/0_stateless/01077_mutations_index_consistency.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01077_mutations_index_consistency.reference rename to tests/queries/0_stateless/01077_mutations_index_consistency.reference diff --git a/dbms/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01077_mutations_index_consistency.sh rename to tests/queries/0_stateless/01077_mutations_index_consistency.sh diff --git a/dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference b/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.reference rename to tests/queries/0_stateless/01077_yet_another_prewhere_test.reference diff --git a/dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql b/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01077_yet_another_prewhere_test.sql rename to tests/queries/0_stateless/01077_yet_another_prewhere_test.sql diff --git a/dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference b/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference rename to tests/queries/0_stateless/01078_bloom_filter_operator_not_has.reference diff --git a/dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql b/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql rename to tests/queries/0_stateless/01078_bloom_filter_operator_not_has.sql diff --git a/dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference b/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference rename to tests/queries/0_stateless/01078_merge_tree_read_one_thread.reference diff --git a/dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql b/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql rename to tests/queries/0_stateless/01078_merge_tree_read_one_thread.sql diff --git a/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.reference rename to tests/queries/0_stateless/01079_alter_default_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.sql b/tests/queries/0_stateless/01079_alter_default_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_alter_default_zookeeper.sql rename to tests/queries/0_stateless/01079_alter_default_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference rename to tests/queries/0_stateless/01079_bad_alters_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh rename to tests/queries/0_stateless/01079_bad_alters_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference b/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.reference rename to tests/queries/0_stateless/01079_bit_operations_using_bitset.reference diff --git a/dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql b/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_bit_operations_using_bitset.sql rename to tests/queries/0_stateless/01079_bit_operations_using_bitset.sql diff --git a/dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.reference b/tests/queries/0_stateless/01079_new_range_reader_segfault.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.reference rename to tests/queries/0_stateless/01079_new_range_reader_segfault.reference diff --git a/dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.sql b/tests/queries/0_stateless/01079_new_range_reader_segfault.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_new_range_reader_segfault.sql rename to tests/queries/0_stateless/01079_new_range_reader_segfault.sql diff --git a/dbms/tests/queries/0_stateless/01079_order_by_pk.reference b/tests/queries/0_stateless/01079_order_by_pk.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_order_by_pk.reference rename to tests/queries/0_stateless/01079_order_by_pk.reference diff --git a/dbms/tests/queries/0_stateless/01079_order_by_pk.sql b/tests/queries/0_stateless/01079_order_by_pk.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_order_by_pk.sql rename to tests/queries/0_stateless/01079_order_by_pk.sql diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.referece diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference b/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference rename to tests/queries/0_stateless/01079_reinterpret_as_fixed_string.reference diff --git a/dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql b/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql rename to tests/queries/0_stateless/01079_reinterpret_as_fixed_string.sql diff --git a/dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference b/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference rename to tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.reference diff --git a/dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql b/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql rename to tests/queries/0_stateless/01080_check_for_error_incorrect_size_of_nested_column.sql diff --git a/dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference b/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference rename to tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.reference diff --git a/dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql b/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql rename to tests/queries/0_stateless/01080_engine_merge_prewhere_tupleelement_error.sql diff --git a/dbms/tests/queries/0_stateless/01080_join_get_null.reference b/tests/queries/0_stateless/01080_join_get_null.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01080_join_get_null.reference rename to tests/queries/0_stateless/01080_join_get_null.reference diff --git a/dbms/tests/queries/0_stateless/01080_join_get_null.sql b/tests/queries/0_stateless/01080_join_get_null.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01080_join_get_null.sql rename to tests/queries/0_stateless/01080_join_get_null.sql diff --git a/dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference b/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference rename to tests/queries/0_stateless/01081_PartialSortingTransform_full_column.reference diff --git a/dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql b/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql rename to tests/queries/0_stateless/01081_PartialSortingTransform_full_column.sql diff --git a/dbms/tests/queries/0_stateless/01081_demangle.reference b/tests/queries/0_stateless/01081_demangle.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_demangle.reference rename to tests/queries/0_stateless/01081_demangle.reference diff --git a/dbms/tests/queries/0_stateless/01081_demangle.sql b/tests/queries/0_stateless/01081_demangle.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_demangle.sql rename to tests/queries/0_stateless/01081_demangle.sql diff --git a/dbms/tests/queries/0_stateless/01081_keywords_formatting.reference b/tests/queries/0_stateless/01081_keywords_formatting.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01081_keywords_formatting.reference rename to tests/queries/0_stateless/01081_keywords_formatting.reference diff --git a/dbms/tests/queries/0_stateless/01081_keywords_formatting.sql b/tests/queries/0_stateless/01081_keywords_formatting.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01081_keywords_formatting.sql rename to tests/queries/0_stateless/01081_keywords_formatting.sql diff --git a/dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference b/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.reference rename to tests/queries/0_stateless/01082_bit_test_out_of_bound.reference diff --git a/dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql b/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01082_bit_test_out_of_bound.sql rename to tests/queries/0_stateless/01082_bit_test_out_of_bound.sql diff --git a/dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference b/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference rename to tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.reference diff --git a/dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql b/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql rename to tests/queries/0_stateless/01083_aggregation_memory_efficient_bug.sql diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference b/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference rename to tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.reference diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql b/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql rename to tests/queries/0_stateless/01083_cross_to_inner_with_in_bug.sql diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference b/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference rename to tests/queries/0_stateless/01083_cross_to_inner_with_like.reference diff --git a/dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql b/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql rename to tests/queries/0_stateless/01083_cross_to_inner_with_like.sql diff --git a/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference rename to tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference diff --git a/dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql rename to tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql diff --git a/dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference b/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.reference rename to tests/queries/0_stateless/01083_functional_index_in_mergetree.reference diff --git a/dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql b/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_functional_index_in_mergetree.sql rename to tests/queries/0_stateless/01083_functional_index_in_mergetree.sql diff --git a/dbms/tests/queries/0_stateless/01083_log_family_disk_memory.reference b/tests/queries/0_stateless/01083_log_family_disk_memory.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_family_disk_memory.reference rename to tests/queries/0_stateless/01083_log_family_disk_memory.reference diff --git a/dbms/tests/queries/0_stateless/01083_log_family_disk_memory.sql b/tests/queries/0_stateless/01083_log_family_disk_memory.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_family_disk_memory.sql rename to tests/queries/0_stateless/01083_log_family_disk_memory.sql diff --git a/dbms/tests/queries/0_stateless/01083_log_first_column_alias.reference b/tests/queries/0_stateless/01083_log_first_column_alias.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_first_column_alias.reference rename to tests/queries/0_stateless/01083_log_first_column_alias.reference diff --git a/dbms/tests/queries/0_stateless/01083_log_first_column_alias.sql b/tests/queries/0_stateless/01083_log_first_column_alias.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_log_first_column_alias.sql rename to tests/queries/0_stateless/01083_log_first_column_alias.sql diff --git a/dbms/tests/queries/0_stateless/01083_match_zero_byte.reference b/tests/queries/0_stateless/01083_match_zero_byte.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01083_match_zero_byte.reference rename to tests/queries/0_stateless/01083_match_zero_byte.reference diff --git a/dbms/tests/queries/0_stateless/01083_match_zero_byte.sql b/tests/queries/0_stateless/01083_match_zero_byte.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01083_match_zero_byte.sql rename to tests/queries/0_stateless/01083_match_zero_byte.sql diff --git a/dbms/tests/queries/0_stateless/01084_defaults_on_aliases.reference b/tests/queries/0_stateless/01084_defaults_on_aliases.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01084_defaults_on_aliases.reference rename to tests/queries/0_stateless/01084_defaults_on_aliases.reference diff --git a/dbms/tests/queries/0_stateless/01084_defaults_on_aliases.sql b/tests/queries/0_stateless/01084_defaults_on_aliases.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01084_defaults_on_aliases.sql rename to tests/queries/0_stateless/01084_defaults_on_aliases.sql diff --git a/dbms/tests/queries/0_stateless/01084_regexp_empty.reference b/tests/queries/0_stateless/01084_regexp_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01084_regexp_empty.reference rename to tests/queries/0_stateless/01084_regexp_empty.reference diff --git a/dbms/tests/queries/0_stateless/01084_regexp_empty.sql b/tests/queries/0_stateless/01084_regexp_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01084_regexp_empty.sql rename to tests/queries/0_stateless/01084_regexp_empty.sql diff --git a/dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference b/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference rename to tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.reference diff --git a/dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql b/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql rename to tests/queries/0_stateless/01085_datetime_arithmetic_preserve_timezone.sql diff --git a/dbms/tests/queries/0_stateless/01085_extract_all_empty.reference b/tests/queries/0_stateless/01085_extract_all_empty.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_extract_all_empty.reference rename to tests/queries/0_stateless/01085_extract_all_empty.reference diff --git a/dbms/tests/queries/0_stateless/01085_extract_all_empty.sql b/tests/queries/0_stateless/01085_extract_all_empty.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_extract_all_empty.sql rename to tests/queries/0_stateless/01085_extract_all_empty.sql diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections.reference b/tests/queries/0_stateless/01085_max_distributed_connections.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections.reference rename to tests/queries/0_stateless/01085_max_distributed_connections.reference diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections.sh b/tests/queries/0_stateless/01085_max_distributed_connections.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections.sh rename to tests/queries/0_stateless/01085_max_distributed_connections.sh diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.reference b/tests/queries/0_stateless/01085_max_distributed_connections_http.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.reference rename to tests/queries/0_stateless/01085_max_distributed_connections_http.reference diff --git a/dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.sh b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_max_distributed_connections_http.sh rename to tests/queries/0_stateless/01085_max_distributed_connections_http.sh diff --git a/dbms/tests/queries/0_stateless/01085_regexp_input_format.reference b/tests/queries/0_stateless/01085_regexp_input_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_regexp_input_format.reference rename to tests/queries/0_stateless/01085_regexp_input_format.reference diff --git a/dbms/tests/queries/0_stateless/01085_regexp_input_format.sh b/tests/queries/0_stateless/01085_regexp_input_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01085_regexp_input_format.sh rename to tests/queries/0_stateless/01085_regexp_input_format.sh diff --git a/dbms/tests/queries/0_stateless/01085_simdjson_uint64.reference b/tests/queries/0_stateless/01085_simdjson_uint64.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01085_simdjson_uint64.reference rename to tests/queries/0_stateless/01085_simdjson_uint64.reference diff --git a/dbms/tests/queries/0_stateless/01085_simdjson_uint64.sql b/tests/queries/0_stateless/01085_simdjson_uint64.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01085_simdjson_uint64.sql rename to tests/queries/0_stateless/01085_simdjson_uint64.sql diff --git a/dbms/tests/queries/0_stateless/01086_modulo_or_zero.reference b/tests/queries/0_stateless/01086_modulo_or_zero.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_modulo_or_zero.reference rename to tests/queries/0_stateless/01086_modulo_or_zero.reference diff --git a/dbms/tests/queries/0_stateless/01086_modulo_or_zero.sql b/tests/queries/0_stateless/01086_modulo_or_zero.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01086_modulo_or_zero.sql rename to tests/queries/0_stateless/01086_modulo_or_zero.sql diff --git a/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.reference b/tests/queries/0_stateless/01086_odbc_roundtrip.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_odbc_roundtrip.reference rename to tests/queries/0_stateless/01086_odbc_roundtrip.reference diff --git a/dbms/tests/queries/0_stateless/01086_odbc_roundtrip.sql b/tests/queries/0_stateless/01086_odbc_roundtrip.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01086_odbc_roundtrip.sql rename to tests/queries/0_stateless/01086_odbc_roundtrip.sql diff --git a/dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference rename to tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.reference diff --git a/dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh rename to tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh diff --git a/dbms/tests/queries/0_stateless/01087_index_set_ubsan.reference b/tests/queries/0_stateless/01087_index_set_ubsan.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01087_index_set_ubsan.reference rename to tests/queries/0_stateless/01087_index_set_ubsan.reference diff --git a/dbms/tests/queries/0_stateless/01087_index_set_ubsan.sql b/tests/queries/0_stateless/01087_index_set_ubsan.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01087_index_set_ubsan.sql rename to tests/queries/0_stateless/01087_index_set_ubsan.sql diff --git a/dbms/tests/queries/0_stateless/01087_storage_generate.reference b/tests/queries/0_stateless/01087_storage_generate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01087_storage_generate.reference rename to tests/queries/0_stateless/01087_storage_generate.reference diff --git a/dbms/tests/queries/0_stateless/01087_storage_generate.sql b/tests/queries/0_stateless/01087_storage_generate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01087_storage_generate.sql rename to tests/queries/0_stateless/01087_storage_generate.sql diff --git a/dbms/tests/queries/0_stateless/01087_table_function_generate.reference b/tests/queries/0_stateless/01087_table_function_generate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01087_table_function_generate.reference rename to tests/queries/0_stateless/01087_table_function_generate.reference diff --git a/dbms/tests/queries/0_stateless/01087_table_function_generate.sql b/tests/queries/0_stateless/01087_table_function_generate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01087_table_function_generate.sql rename to tests/queries/0_stateless/01087_table_function_generate.sql diff --git a/dbms/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.reference b/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.reference rename to tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.reference diff --git a/dbms/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql b/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql rename to tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql diff --git a/dbms/tests/queries/0_stateless/01088_benchmark_query_id.reference b/tests/queries/0_stateless/01088_benchmark_query_id.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01088_benchmark_query_id.reference rename to tests/queries/0_stateless/01088_benchmark_query_id.reference diff --git a/dbms/tests/queries/0_stateless/01088_benchmark_query_id.sh b/tests/queries/0_stateless/01088_benchmark_query_id.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01088_benchmark_query_id.sh rename to tests/queries/0_stateless/01088_benchmark_query_id.sh diff --git a/dbms/tests/queries/0_stateless/01089_alter_settings_old_format.reference b/tests/queries/0_stateless/01089_alter_settings_old_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01089_alter_settings_old_format.reference rename to tests/queries/0_stateless/01089_alter_settings_old_format.reference diff --git a/dbms/tests/queries/0_stateless/01089_alter_settings_old_format.sql b/tests/queries/0_stateless/01089_alter_settings_old_format.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01089_alter_settings_old_format.sql rename to tests/queries/0_stateless/01089_alter_settings_old_format.sql diff --git a/dbms/tests/queries/0_stateless/01090_fixed_string_bit_ops.reference b/tests/queries/0_stateless/01090_fixed_string_bit_ops.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01090_fixed_string_bit_ops.reference rename to tests/queries/0_stateless/01090_fixed_string_bit_ops.reference diff --git a/dbms/tests/queries/0_stateless/01090_fixed_string_bit_ops.sql b/tests/queries/0_stateless/01090_fixed_string_bit_ops.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01090_fixed_string_bit_ops.sql rename to tests/queries/0_stateless/01090_fixed_string_bit_ops.sql diff --git a/dbms/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference rename to tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference diff --git a/dbms/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql rename to tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql diff --git a/dbms/tests/queries/0_stateless/01091_insert_with_default_json.reference b/tests/queries/0_stateless/01091_insert_with_default_json.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01091_insert_with_default_json.reference rename to tests/queries/0_stateless/01091_insert_with_default_json.reference diff --git a/dbms/tests/queries/0_stateless/01091_insert_with_default_json.sql b/tests/queries/0_stateless/01091_insert_with_default_json.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01091_insert_with_default_json.sql rename to tests/queries/0_stateless/01091_insert_with_default_json.sql diff --git a/dbms/tests/queries/0_stateless/01091_num_threads.reference b/tests/queries/0_stateless/01091_num_threads.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01091_num_threads.reference rename to tests/queries/0_stateless/01091_num_threads.reference diff --git a/dbms/tests/queries/0_stateless/01091_num_threads.sql b/tests/queries/0_stateless/01091_num_threads.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01091_num_threads.sql rename to tests/queries/0_stateless/01091_num_threads.sql diff --git a/dbms/tests/queries/0_stateless/01091_query_profiler_does_not_hang.reference b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01091_query_profiler_does_not_hang.reference rename to tests/queries/0_stateless/01091_query_profiler_does_not_hang.reference diff --git a/dbms/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql rename to tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql diff --git a/dbms/tests/queries/0_stateless/01092_base64.reference b/tests/queries/0_stateless/01092_base64.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01092_base64.reference rename to tests/queries/0_stateless/01092_base64.reference diff --git a/dbms/tests/queries/0_stateless/01092_base64.sql b/tests/queries/0_stateless/01092_base64.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01092_base64.sql rename to tests/queries/0_stateless/01092_base64.sql diff --git a/dbms/tests/queries/0_stateless/01092_memory_profiler.reference b/tests/queries/0_stateless/01092_memory_profiler.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01092_memory_profiler.reference rename to tests/queries/0_stateless/01092_memory_profiler.reference diff --git a/dbms/tests/queries/0_stateless/01092_memory_profiler.sql b/tests/queries/0_stateless/01092_memory_profiler.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01092_memory_profiler.sql rename to tests/queries/0_stateless/01092_memory_profiler.sql diff --git a/dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference b/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference rename to tests/queries/0_stateless/01093_cyclic_defaults_filimonov.reference diff --git a/dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.sql b/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01093_cyclic_defaults_filimonov.sql rename to tests/queries/0_stateless/01093_cyclic_defaults_filimonov.sql diff --git a/dbms/tests/queries/0_stateless/01095_tpch_like_smoke.reference b/tests/queries/0_stateless/01095_tpch_like_smoke.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01095_tpch_like_smoke.reference rename to tests/queries/0_stateless/01095_tpch_like_smoke.reference diff --git a/dbms/tests/queries/0_stateless/01095_tpch_like_smoke.sql b/tests/queries/0_stateless/01095_tpch_like_smoke.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01095_tpch_like_smoke.sql rename to tests/queries/0_stateless/01095_tpch_like_smoke.sql diff --git a/dbms/tests/queries/0_stateless/01096_array_reduce_in_ranges.reference b/tests/queries/0_stateless/01096_array_reduce_in_ranges.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01096_array_reduce_in_ranges.reference rename to tests/queries/0_stateless/01096_array_reduce_in_ranges.reference diff --git a/dbms/tests/queries/0_stateless/01096_array_reduce_in_ranges.sql b/tests/queries/0_stateless/01096_array_reduce_in_ranges.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01096_array_reduce_in_ranges.sql rename to tests/queries/0_stateless/01096_array_reduce_in_ranges.sql diff --git a/dbms/tests/queries/0_stateless/01096_block_serialized_state.reference b/tests/queries/0_stateless/01096_block_serialized_state.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01096_block_serialized_state.reference rename to tests/queries/0_stateless/01096_block_serialized_state.reference diff --git a/dbms/tests/queries/0_stateless/01096_block_serialized_state.sql b/tests/queries/0_stateless/01096_block_serialized_state.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01096_block_serialized_state.sql rename to tests/queries/0_stateless/01096_block_serialized_state.sql diff --git a/dbms/tests/queries/0_stateless/01096_zeros.reference b/tests/queries/0_stateless/01096_zeros.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01096_zeros.reference rename to tests/queries/0_stateless/01096_zeros.reference diff --git a/dbms/tests/queries/0_stateless/01096_zeros.sql b/tests/queries/0_stateless/01096_zeros.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01096_zeros.sql rename to tests/queries/0_stateless/01096_zeros.sql diff --git a/dbms/tests/queries/0_stateless/01097_cyclic_defaults.reference b/tests/queries/0_stateless/01097_cyclic_defaults.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01097_cyclic_defaults.reference rename to tests/queries/0_stateless/01097_cyclic_defaults.reference diff --git a/dbms/tests/queries/0_stateless/01097_cyclic_defaults.sql b/tests/queries/0_stateless/01097_cyclic_defaults.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01097_cyclic_defaults.sql rename to tests/queries/0_stateless/01097_cyclic_defaults.sql diff --git a/dbms/tests/queries/0_stateless/01097_one_more_range_reader_test.reference b/tests/queries/0_stateless/01097_one_more_range_reader_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01097_one_more_range_reader_test.reference rename to tests/queries/0_stateless/01097_one_more_range_reader_test.reference diff --git a/dbms/tests/queries/0_stateless/01097_one_more_range_reader_test.sql b/tests/queries/0_stateless/01097_one_more_range_reader_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01097_one_more_range_reader_test.sql rename to tests/queries/0_stateless/01097_one_more_range_reader_test.sql diff --git a/dbms/tests/queries/0_stateless/01097_pre_limit.reference b/tests/queries/0_stateless/01097_pre_limit.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01097_pre_limit.reference rename to tests/queries/0_stateless/01097_pre_limit.reference diff --git a/dbms/tests/queries/0_stateless/01097_pre_limit.sql b/tests/queries/0_stateless/01097_pre_limit.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01097_pre_limit.sql rename to tests/queries/0_stateless/01097_pre_limit.sql diff --git a/dbms/tests/queries/0_stateless/01098_sum.reference b/tests/queries/0_stateless/01098_sum.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01098_sum.reference rename to tests/queries/0_stateless/01098_sum.reference diff --git a/dbms/tests/queries/0_stateless/01098_sum.sql b/tests/queries/0_stateless/01098_sum.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01098_sum.sql rename to tests/queries/0_stateless/01098_sum.sql diff --git a/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.reference b/tests/queries/0_stateless/01098_temporary_and_external_tables.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.reference rename to tests/queries/0_stateless/01098_temporary_and_external_tables.reference diff --git a/dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.sh b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01098_temporary_and_external_tables.sh rename to tests/queries/0_stateless/01098_temporary_and_external_tables.sh diff --git a/dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference b/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.reference rename to tests/queries/0_stateless/01099_operators_date_and_timestamp.reference diff --git a/dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql b/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01099_operators_date_and_timestamp.sql rename to tests/queries/0_stateless/01099_operators_date_and_timestamp.sql diff --git a/dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference rename to tests/queries/0_stateless/01099_parallel_distributed_insert_select.reference diff --git a/dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql rename to tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql diff --git a/dbms/tests/queries/0_stateless/01100_split_by_string.reference b/tests/queries/0_stateless/01100_split_by_string.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01100_split_by_string.reference rename to tests/queries/0_stateless/01100_split_by_string.reference diff --git a/dbms/tests/queries/0_stateless/01100_split_by_string.sql b/tests/queries/0_stateless/01100_split_by_string.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01100_split_by_string.sql rename to tests/queries/0_stateless/01100_split_by_string.sql diff --git a/dbms/tests/queries/0_stateless/01101_prewhere_after_alter.reference b/tests/queries/0_stateless/01101_prewhere_after_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01101_prewhere_after_alter.reference rename to tests/queries/0_stateless/01101_prewhere_after_alter.reference diff --git a/dbms/tests/queries/0_stateless/01101_prewhere_after_alter.sql b/tests/queries/0_stateless/01101_prewhere_after_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01101_prewhere_after_alter.sql rename to tests/queries/0_stateless/01101_prewhere_after_alter.sql diff --git a/dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.reference b/tests/queries/0_stateless/01102_distributed_local_in_bug.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.reference rename to tests/queries/0_stateless/01102_distributed_local_in_bug.reference diff --git a/dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.sql b/tests/queries/0_stateless/01102_distributed_local_in_bug.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01102_distributed_local_in_bug.sql rename to tests/queries/0_stateless/01102_distributed_local_in_bug.sql diff --git a/dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference rename to tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.reference diff --git a/dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh rename to tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh diff --git a/dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference b/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference rename to tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.reference diff --git a/dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql b/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql rename to tests/queries/0_stateless/01103_distributed_product_mode_local_column_renames.sql diff --git a/dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference rename to tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh rename to tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh diff --git a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.reference b/tests/queries/0_stateless/01104_distributed_numbers_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01104_distributed_numbers_test.reference rename to tests/queries/0_stateless/01104_distributed_numbers_test.reference diff --git a/dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql b/tests/queries/0_stateless/01104_distributed_numbers_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01104_distributed_numbers_test.sql rename to tests/queries/0_stateless/01104_distributed_numbers_test.sql diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.reference b/tests/queries/0_stateless/01104_distributed_one_test.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01104_distributed_one_test.reference rename to tests/queries/0_stateless/01104_distributed_one_test.reference diff --git a/dbms/tests/queries/0_stateless/01104_distributed_one_test.sql b/tests/queries/0_stateless/01104_distributed_one_test.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01104_distributed_one_test.sql rename to tests/queries/0_stateless/01104_distributed_one_test.sql diff --git a/dbms/tests/queries/0_stateless/01104_fixed_string_like.reference b/tests/queries/0_stateless/01104_fixed_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01104_fixed_string_like.reference rename to tests/queries/0_stateless/01104_fixed_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01104_fixed_string_like.sql b/tests/queries/0_stateless/01104_fixed_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01104_fixed_string_like.sql rename to tests/queries/0_stateless/01104_fixed_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01105_string_like.reference b/tests/queries/0_stateless/01105_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01105_string_like.reference rename to tests/queries/0_stateless/01105_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01105_string_like.sql b/tests/queries/0_stateless/01105_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01105_string_like.sql rename to tests/queries/0_stateless/01105_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01106_const_fixed_string_like.reference b/tests/queries/0_stateless/01106_const_fixed_string_like.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01106_const_fixed_string_like.reference rename to tests/queries/0_stateless/01106_const_fixed_string_like.reference diff --git a/dbms/tests/queries/0_stateless/01106_const_fixed_string_like.sql b/tests/queries/0_stateless/01106_const_fixed_string_like.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01106_const_fixed_string_like.sql rename to tests/queries/0_stateless/01106_const_fixed_string_like.sql diff --git a/dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.reference b/tests/queries/0_stateless/01200_mutations_memory_consumption.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.reference rename to tests/queries/0_stateless/01200_mutations_memory_consumption.reference diff --git a/dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.sql b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01200_mutations_memory_consumption.sql rename to tests/queries/0_stateless/01200_mutations_memory_consumption.sql diff --git a/dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated.reference diff --git a/dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated.sql diff --git a/dbms/tests/queries/0_stateless/01202_array_auc_special.reference b/tests/queries/0_stateless/01202_array_auc_special.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01202_array_auc_special.reference rename to tests/queries/0_stateless/01202_array_auc_special.reference diff --git a/dbms/tests/queries/0_stateless/01202_array_auc_special.sql b/tests/queries/0_stateless/01202_array_auc_special.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01202_array_auc_special.sql rename to tests/queries/0_stateless/01202_array_auc_special.sql diff --git a/dbms/tests/queries/0_stateless/01210_drop_view.reference b/tests/queries/0_stateless/01210_drop_view.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01210_drop_view.reference rename to tests/queries/0_stateless/01210_drop_view.reference diff --git a/dbms/tests/queries/0_stateless/01210_drop_view.sql b/tests/queries/0_stateless/01210_drop_view.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01210_drop_view.sql rename to tests/queries/0_stateless/01210_drop_view.sql diff --git a/dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference rename to tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.reference diff --git a/dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql b/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql rename to tests/queries/0_stateless/01211_optimize_skip_unused_shards_type_mismatch.sql diff --git a/dbms/tests/queries/0_stateless/01212_empty_join_and_totals.reference b/tests/queries/0_stateless/01212_empty_join_and_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01212_empty_join_and_totals.reference rename to tests/queries/0_stateless/01212_empty_join_and_totals.reference diff --git a/dbms/tests/queries/0_stateless/01212_empty_join_and_totals.sql b/tests/queries/0_stateless/01212_empty_join_and_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01212_empty_join_and_totals.sql rename to tests/queries/0_stateless/01212_empty_join_and_totals.sql diff --git a/dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference b/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference rename to tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.reference diff --git a/dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql b/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql rename to tests/queries/0_stateless/01213_optimize_skip_unused_shards_DISTINCT.sql diff --git a/dbms/tests/queries/0_stateless/01213_point_in_Myanmar.reference b/tests/queries/0_stateless/01213_point_in_Myanmar.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_point_in_Myanmar.reference rename to tests/queries/0_stateless/01213_point_in_Myanmar.reference diff --git a/dbms/tests/queries/0_stateless/01213_point_in_Myanmar.sql b/tests/queries/0_stateless/01213_point_in_Myanmar.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_point_in_Myanmar.sql rename to tests/queries/0_stateless/01213_point_in_Myanmar.sql diff --git a/dbms/tests/queries/0_stateless/01214_point_in_Mecca.reference b/tests/queries/0_stateless/01214_point_in_Mecca.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01214_point_in_Mecca.reference rename to tests/queries/0_stateless/01214_point_in_Mecca.reference diff --git a/dbms/tests/queries/0_stateless/01214_point_in_Mecca.sql b/tests/queries/0_stateless/01214_point_in_Mecca.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01214_point_in_Mecca.sql rename to tests/queries/0_stateless/01214_point_in_Mecca.sql diff --git a/dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference b/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference rename to tests/queries/0_stateless/01220_scalar_optimization_in_alter.reference diff --git a/dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql b/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql rename to tests/queries/0_stateless/01220_scalar_optimization_in_alter.sql diff --git a/dbms/tests/queries/0_stateless/01221_system_settings.reference b/tests/queries/0_stateless/01221_system_settings.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01221_system_settings.reference rename to tests/queries/0_stateless/01221_system_settings.reference diff --git a/dbms/tests/queries/0_stateless/01221_system_settings.sql b/tests/queries/0_stateless/01221_system_settings.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01221_system_settings.sql rename to tests/queries/0_stateless/01221_system_settings.sql diff --git a/dbms/tests/queries/0_stateless/01230_join_get_truncate.reference b/tests/queries/0_stateless/01230_join_get_truncate.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01230_join_get_truncate.reference rename to tests/queries/0_stateless/01230_join_get_truncate.reference diff --git a/dbms/tests/queries/0_stateless/01230_join_get_truncate.sql b/tests/queries/0_stateless/01230_join_get_truncate.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01230_join_get_truncate.sql rename to tests/queries/0_stateless/01230_join_get_truncate.sql diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.avro b/tests/queries/0_stateless/data_avro/complex.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.avro rename to tests/queries/0_stateless/data_avro/complex.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.avsc b/tests/queries/0_stateless/data_avro/complex.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.avsc rename to tests/queries/0_stateless/data_avro/complex.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/complex.json b/tests/queries/0_stateless/data_avro/complex.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/complex.json rename to tests/queries/0_stateless/data_avro/complex.json diff --git a/dbms/tests/queries/0_stateless/data_avro/empty.avro b/tests/queries/0_stateless/data_avro/empty.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/empty.avro rename to tests/queries/0_stateless/data_avro/empty.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/empty.avsc b/tests/queries/0_stateless/data_avro/empty.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/empty.avsc rename to tests/queries/0_stateless/data_avro/empty.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/empty.json b/tests/queries/0_stateless/data_avro/empty.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/empty.json rename to tests/queries/0_stateless/data_avro/empty.json diff --git a/dbms/tests/queries/0_stateless/data_avro/generate_avro.sh b/tests/queries/0_stateless/data_avro/generate_avro.sh similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/generate_avro.sh rename to tests/queries/0_stateless/data_avro/generate_avro.sh diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.avro b/tests/queries/0_stateless/data_avro/logical_types.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.avro rename to tests/queries/0_stateless/data_avro/logical_types.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.avsc b/tests/queries/0_stateless/data_avro/logical_types.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.avsc rename to tests/queries/0_stateless/data_avro/logical_types.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/logical_types.json b/tests/queries/0_stateless/data_avro/logical_types.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/logical_types.json rename to tests/queries/0_stateless/data_avro/logical_types.json diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.avro b/tests/queries/0_stateless/data_avro/primitive.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.avro rename to tests/queries/0_stateless/data_avro/primitive.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.avsc b/tests/queries/0_stateless/data_avro/primitive.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.avsc rename to tests/queries/0_stateless/data_avro/primitive.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/primitive.json b/tests/queries/0_stateless/data_avro/primitive.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/primitive.json rename to tests/queries/0_stateless/data_avro/primitive.json diff --git a/dbms/tests/queries/0_stateless/data_avro/references.avro b/tests/queries/0_stateless/data_avro/references.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.avro rename to tests/queries/0_stateless/data_avro/references.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/references.avsc b/tests/queries/0_stateless/data_avro/references.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.avsc rename to tests/queries/0_stateless/data_avro/references.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/references.json b/tests/queries/0_stateless/data_avro/references.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/references.json rename to tests/queries/0_stateless/data_avro/references.json diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.avsc b/tests/queries/0_stateless/data_avro/simple.avsc similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.avsc rename to tests/queries/0_stateless/data_avro/simple.avsc diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.deflate.avro b/tests/queries/0_stateless/data_avro/simple.deflate.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.deflate.avro rename to tests/queries/0_stateless/data_avro/simple.deflate.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.json b/tests/queries/0_stateless/data_avro/simple.json similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.json rename to tests/queries/0_stateless/data_avro/simple.json diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.null.avro b/tests/queries/0_stateless/data_avro/simple.null.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.null.avro rename to tests/queries/0_stateless/data_avro/simple.null.avro diff --git a/dbms/tests/queries/0_stateless/data_avro/simple.snappy.avro b/tests/queries/0_stateless/data_avro/simple.snappy.avro similarity index 100% rename from dbms/tests/queries/0_stateless/data_avro/simple.snappy.avro rename to tests/queries/0_stateless/data_avro/simple.snappy.avro diff --git a/dbms/tests/queries/0_stateless/data_orc/test.orc b/tests/queries/0_stateless/data_orc/test.orc similarity index 100% rename from dbms/tests/queries/0_stateless/data_orc/test.orc rename to tests/queries/0_stateless/data_orc/test.orc diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet b/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_dictionary.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet b/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_plain.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_plain.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet b/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet rename to tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/alltypes_plain.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet b/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet rename to tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/byte_array_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet b/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet rename to tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/datapage_v2.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns b/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns rename to tests/queries/0_stateless/data_parquet/fixed_length_decimal_legacy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet b/tests/queries/0_stateless/data_parquet/int32_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet rename to tests/queries/0_stateless/data_parquet/int32_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/int32_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet b/tests/queries/0_stateless/data_parquet/int64_decimal.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet rename to tests/queries/0_stateless/data_parquet/int64_decimal.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns b/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns rename to tests/queries/0_stateless/data_parquet/int64_decimal.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet b/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet rename to tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns b/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns rename to tests/queries/0_stateless/data_parquet/nation.dict-malformed.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet b/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nested_lists.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet b/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nested_maps.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet b/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet rename to tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns b/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns rename to tests/queries/0_stateless/data_parquet/nonnullable.impala.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet b/tests/queries/0_stateless/data_parquet/nullable.impala.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet rename to tests/queries/0_stateless/data_parquet/nullable.impala.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns b/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns rename to tests/queries/0_stateless/data_parquet/nullable.impala.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet b/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet rename to tests/queries/0_stateless/data_parquet/nulls.snappy.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns b/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns rename to tests/queries/0_stateless/data_parquet/nulls.snappy.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet b/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet rename to tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns b/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns rename to tests/queries/0_stateless/data_parquet/repeated_no_annotation.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet b/tests/queries/0_stateless/data_parquet/userdata1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet rename to tests/queries/0_stateless/data_parquet/userdata1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata1.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet b/tests/queries/0_stateless/data_parquet/userdata2.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet rename to tests/queries/0_stateless/data_parquet/userdata2.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata2.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata2.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet b/tests/queries/0_stateless/data_parquet/userdata3.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet rename to tests/queries/0_stateless/data_parquet/userdata3.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata3.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata3.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet b/tests/queries/0_stateless/data_parquet/userdata4.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet rename to tests/queries/0_stateless/data_parquet/userdata4.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata4.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata4.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet b/tests/queries/0_stateless/data_parquet/userdata5.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet rename to tests/queries/0_stateless/data_parquet/userdata5.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns b/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/userdata5.parquet.columns rename to tests/queries/0_stateless/data_parquet/userdata5.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.all-named-index.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.column-metadata-handling.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.parquet.columns diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet b/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet rename to tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet diff --git a/dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns b/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns similarity index 100% rename from dbms/tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns rename to tests/queries/0_stateless/data_parquet/v0.7.1.some-named-index.parquet.columns diff --git a/dbms/tests/queries/0_stateless/helpers/client.py b/tests/queries/0_stateless/helpers/client.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/client.py rename to tests/queries/0_stateless/helpers/client.py diff --git a/dbms/tests/queries/0_stateless/helpers/httpclient.py b/tests/queries/0_stateless/helpers/httpclient.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/httpclient.py rename to tests/queries/0_stateless/helpers/httpclient.py diff --git a/dbms/tests/queries/0_stateless/helpers/httpexpect.py b/tests/queries/0_stateless/helpers/httpexpect.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/httpexpect.py rename to tests/queries/0_stateless/helpers/httpexpect.py diff --git a/dbms/tests/queries/0_stateless/helpers/uexpect.py b/tests/queries/0_stateless/helpers/uexpect.py similarity index 100% rename from dbms/tests/queries/0_stateless/helpers/uexpect.py rename to tests/queries/0_stateless/helpers/uexpect.py diff --git a/dbms/tests/queries/0_stateless/mergetree_mutations.lib b/tests/queries/0_stateless/mergetree_mutations.lib similarity index 100% rename from dbms/tests/queries/0_stateless/mergetree_mutations.lib rename to tests/queries/0_stateless/mergetree_mutations.lib diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.reference b/tests/queries/1_stateful/00001_count_hits.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00001_count_hits.reference rename to tests/queries/1_stateful/00001_count_hits.reference diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.sql b/tests/queries/1_stateful/00001_count_hits.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00001_count_hits.sql rename to tests/queries/1_stateful/00001_count_hits.sql diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.reference b/tests/queries/1_stateful/00002_count_visits.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00002_count_visits.reference rename to tests/queries/1_stateful/00002_count_visits.reference diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.sql b/tests/queries/1_stateful/00002_count_visits.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00002_count_visits.sql rename to tests/queries/1_stateful/00002_count_visits.sql diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.reference b/tests/queries/1_stateful/00004_top_counters.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00004_top_counters.reference rename to tests/queries/1_stateful/00004_top_counters.reference diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.sql b/tests/queries/1_stateful/00004_top_counters.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00004_top_counters.sql rename to tests/queries/1_stateful/00004_top_counters.sql diff --git a/dbms/tests/queries/1_stateful/00005_filtering.reference b/tests/queries/1_stateful/00005_filtering.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00005_filtering.reference rename to tests/queries/1_stateful/00005_filtering.reference diff --git a/dbms/tests/queries/1_stateful/00005_filtering.sql b/tests/queries/1_stateful/00005_filtering.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00005_filtering.sql rename to tests/queries/1_stateful/00005_filtering.sql diff --git a/dbms/tests/queries/1_stateful/00006_agregates.reference b/tests/queries/1_stateful/00006_agregates.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00006_agregates.reference rename to tests/queries/1_stateful/00006_agregates.reference diff --git a/dbms/tests/queries/1_stateful/00006_agregates.sql b/tests/queries/1_stateful/00006_agregates.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00006_agregates.sql rename to tests/queries/1_stateful/00006_agregates.sql diff --git a/dbms/tests/queries/1_stateful/00007_uniq.reference b/tests/queries/1_stateful/00007_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00007_uniq.reference rename to tests/queries/1_stateful/00007_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00007_uniq.sql b/tests/queries/1_stateful/00007_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00007_uniq.sql rename to tests/queries/1_stateful/00007_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00008_uniq.reference b/tests/queries/1_stateful/00008_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00008_uniq.reference rename to tests/queries/1_stateful/00008_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00008_uniq.sql b/tests/queries/1_stateful/00008_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00008_uniq.sql rename to tests/queries/1_stateful/00008_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.reference b/tests/queries/1_stateful/00009_uniq_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00009_uniq_distributed.reference rename to tests/queries/1_stateful/00009_uniq_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.sql b/tests/queries/1_stateful/00009_uniq_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00009_uniq_distributed.sql rename to tests/queries/1_stateful/00009_uniq_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference b/tests/queries/1_stateful/00010_quantiles_segfault.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference rename to tests/queries/1_stateful/00010_quantiles_segfault.reference diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql b/tests/queries/1_stateful/00010_quantiles_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql rename to tests/queries/1_stateful/00010_quantiles_segfault.sql diff --git a/dbms/tests/queries/1_stateful/00011_sorting.reference b/tests/queries/1_stateful/00011_sorting.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00011_sorting.reference rename to tests/queries/1_stateful/00011_sorting.reference diff --git a/dbms/tests/queries/1_stateful/00011_sorting.sql b/tests/queries/1_stateful/00011_sorting.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00011_sorting.sql rename to tests/queries/1_stateful/00011_sorting.sql diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.reference b/tests/queries/1_stateful/00012_sorting_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00012_sorting_distributed.reference rename to tests/queries/1_stateful/00012_sorting_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.sql b/tests/queries/1_stateful/00012_sorting_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00012_sorting_distributed.sql rename to tests/queries/1_stateful/00012_sorting_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference b/tests/queries/1_stateful/00013_sorting_of_nested.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference rename to tests/queries/1_stateful/00013_sorting_of_nested.reference diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql b/tests/queries/1_stateful/00013_sorting_of_nested.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql rename to tests/queries/1_stateful/00013_sorting_of_nested.sql diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.reference b/tests/queries/1_stateful/00014_filtering_arrays.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00014_filtering_arrays.reference rename to tests/queries/1_stateful/00014_filtering_arrays.reference diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.sql b/tests/queries/1_stateful/00014_filtering_arrays.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00014_filtering_arrays.sql rename to tests/queries/1_stateful/00014_filtering_arrays.sql diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference b/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference rename to tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql b/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql rename to tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference rename to tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql rename to tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference b/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference rename to tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql b/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql rename to tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference b/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference rename to tests/queries/1_stateful/00020_distinct_order_by_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql b/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql rename to tests/queries/1_stateful/00020_distinct_order_by_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.reference b/tests/queries/1_stateful/00021_1_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_1_select_with_in.reference rename to tests/queries/1_stateful/00021_1_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.sql b/tests/queries/1_stateful/00021_1_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_1_select_with_in.sql rename to tests/queries/1_stateful/00021_1_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.reference b/tests/queries/1_stateful/00021_2_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_2_select_with_in.reference rename to tests/queries/1_stateful/00021_2_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.sql b/tests/queries/1_stateful/00021_2_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_2_select_with_in.sql rename to tests/queries/1_stateful/00021_2_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.reference b/tests/queries/1_stateful/00021_3_select_with_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00021_3_select_with_in.reference rename to tests/queries/1_stateful/00021_3_select_with_in.reference diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.sql b/tests/queries/1_stateful/00021_3_select_with_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00021_3_select_with_in.sql rename to tests/queries/1_stateful/00021_3_select_with_in.sql diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.reference b/tests/queries/1_stateful/00022_merge_prewhere.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00022_merge_prewhere.reference rename to tests/queries/1_stateful/00022_merge_prewhere.reference diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.sql b/tests/queries/1_stateful/00022_merge_prewhere.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00022_merge_prewhere.sql rename to tests/queries/1_stateful/00022_merge_prewhere.sql diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.reference b/tests/queries/1_stateful/00023_totals_limit.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00023_totals_limit.reference rename to tests/queries/1_stateful/00023_totals_limit.reference diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.sql b/tests/queries/1_stateful/00023_totals_limit.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00023_totals_limit.sql rename to tests/queries/1_stateful/00023_totals_limit.sql diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.reference b/tests/queries/1_stateful/00024_random_counters.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00024_random_counters.reference rename to tests/queries/1_stateful/00024_random_counters.reference diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.sql b/tests/queries/1_stateful/00024_random_counters.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00024_random_counters.sql rename to tests/queries/1_stateful/00024_random_counters.sql diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference b/tests/queries/1_stateful/00030_array_enumerate_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference rename to tests/queries/1_stateful/00030_array_enumerate_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql b/tests/queries/1_stateful/00030_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql rename to tests/queries/1_stateful/00030_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference b/tests/queries/1_stateful/00031_array_enumerate_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference rename to tests/queries/1_stateful/00031_array_enumerate_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql b/tests/queries/1_stateful/00031_array_enumerate_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql rename to tests/queries/1_stateful/00031_array_enumerate_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.reference b/tests/queries/1_stateful/00032_aggregate_key64.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00032_aggregate_key64.reference rename to tests/queries/1_stateful/00032_aggregate_key64.reference diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.sql b/tests/queries/1_stateful/00032_aggregate_key64.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00032_aggregate_key64.sql rename to tests/queries/1_stateful/00032_aggregate_key64.sql diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference b/tests/queries/1_stateful/00033_aggregate_key_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference rename to tests/queries/1_stateful/00033_aggregate_key_string.reference diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql b/tests/queries/1_stateful/00033_aggregate_key_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql rename to tests/queries/1_stateful/00033_aggregate_key_string.sql diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference b/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference rename to tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql b/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql rename to tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference b/tests/queries/1_stateful/00035_aggregate_keys128.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference rename to tests/queries/1_stateful/00035_aggregate_keys128.reference diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql b/tests/queries/1_stateful/00035_aggregate_keys128.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql rename to tests/queries/1_stateful/00035_aggregate_keys128.sql diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference b/tests/queries/1_stateful/00036_aggregate_hashed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference rename to tests/queries/1_stateful/00036_aggregate_hashed.reference diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql b/tests/queries/1_stateful/00036_aggregate_hashed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql rename to tests/queries/1_stateful/00036_aggregate_hashed.sql diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference b/tests/queries/1_stateful/00037_uniq_state_merge1.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference rename to tests/queries/1_stateful/00037_uniq_state_merge1.reference diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql b/tests/queries/1_stateful/00037_uniq_state_merge1.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql rename to tests/queries/1_stateful/00037_uniq_state_merge1.sql diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference b/tests/queries/1_stateful/00038_uniq_state_merge2.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference rename to tests/queries/1_stateful/00038_uniq_state_merge2.reference diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql b/tests/queries/1_stateful/00038_uniq_state_merge2.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql rename to tests/queries/1_stateful/00038_uniq_state_merge2.sql diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.reference b/tests/queries/1_stateful/00039_primary_key.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00039_primary_key.reference rename to tests/queries/1_stateful/00039_primary_key.reference diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.sql b/tests/queries/1_stateful/00039_primary_key.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00039_primary_key.sql rename to tests/queries/1_stateful/00039_primary_key.sql diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference b/tests/queries/1_stateful/00040_aggregating_materialized_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference rename to tests/queries/1_stateful/00040_aggregating_materialized_view.reference diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql b/tests/queries/1_stateful/00040_aggregating_materialized_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql rename to tests/queries/1_stateful/00040_aggregating_materialized_view.sql diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference b/tests/queries/1_stateful/00041_aggregating_materialized_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference rename to tests/queries/1_stateful/00041_aggregating_materialized_view.reference diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql b/tests/queries/1_stateful/00041_aggregating_materialized_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql rename to tests/queries/1_stateful/00041_aggregating_materialized_view.sql diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.reference b/tests/queries/1_stateful/00042_any_left_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00042_any_left_join.reference rename to tests/queries/1_stateful/00042_any_left_join.reference diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.sql b/tests/queries/1_stateful/00042_any_left_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00042_any_left_join.sql rename to tests/queries/1_stateful/00042_any_left_join.sql diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.reference b/tests/queries/1_stateful/00043_any_left_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00043_any_left_join.reference rename to tests/queries/1_stateful/00043_any_left_join.reference diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.sql b/tests/queries/1_stateful/00043_any_left_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00043_any_left_join.sql rename to tests/queries/1_stateful/00043_any_left_join.sql diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.reference b/tests/queries/1_stateful/00044_any_left_join_string.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00044_any_left_join_string.reference rename to tests/queries/1_stateful/00044_any_left_join_string.reference diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.sql b/tests/queries/1_stateful/00044_any_left_join_string.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00044_any_left_join_string.sql rename to tests/queries/1_stateful/00044_any_left_join_string.sql diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.reference b/tests/queries/1_stateful/00045_uniq_upto.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00045_uniq_upto.reference rename to tests/queries/1_stateful/00045_uniq_upto.reference diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.sql b/tests/queries/1_stateful/00045_uniq_upto.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00045_uniq_upto.sql rename to tests/queries/1_stateful/00045_uniq_upto.sql diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference b/tests/queries/1_stateful/00046_uniq_upto_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference rename to tests/queries/1_stateful/00046_uniq_upto_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql b/tests/queries/1_stateful/00046_uniq_upto_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql rename to tests/queries/1_stateful/00046_uniq_upto_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00047_bar.reference b/tests/queries/1_stateful/00047_bar.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00047_bar.reference rename to tests/queries/1_stateful/00047_bar.reference diff --git a/dbms/tests/queries/1_stateful/00047_bar.sql b/tests/queries/1_stateful/00047_bar.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00047_bar.sql rename to tests/queries/1_stateful/00047_bar.sql diff --git a/dbms/tests/queries/1_stateful/00048_min_max.reference b/tests/queries/1_stateful/00048_min_max.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00048_min_max.reference rename to tests/queries/1_stateful/00048_min_max.reference diff --git a/dbms/tests/queries/1_stateful/00048_min_max.sql b/tests/queries/1_stateful/00048_min_max.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00048_min_max.sql rename to tests/queries/1_stateful/00048_min_max.sql diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.reference b/tests/queries/1_stateful/00049_max_string_if.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00049_max_string_if.reference rename to tests/queries/1_stateful/00049_max_string_if.reference diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.sql b/tests/queries/1_stateful/00049_max_string_if.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00049_max_string_if.sql rename to tests/queries/1_stateful/00049_max_string_if.sql diff --git a/dbms/tests/queries/1_stateful/00050_min_max.reference b/tests/queries/1_stateful/00050_min_max.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00050_min_max.reference rename to tests/queries/1_stateful/00050_min_max.reference diff --git a/dbms/tests/queries/1_stateful/00050_min_max.sql b/tests/queries/1_stateful/00050_min_max.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00050_min_max.sql rename to tests/queries/1_stateful/00050_min_max.sql diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.reference b/tests/queries/1_stateful/00051_min_max_array.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00051_min_max_array.reference rename to tests/queries/1_stateful/00051_min_max_array.reference diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.sql b/tests/queries/1_stateful/00051_min_max_array.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00051_min_max_array.sql rename to tests/queries/1_stateful/00051_min_max_array.sql diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.reference b/tests/queries/1_stateful/00052_group_by_in.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00052_group_by_in.reference rename to tests/queries/1_stateful/00052_group_by_in.reference diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.sql b/tests/queries/1_stateful/00052_group_by_in.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00052_group_by_in.sql rename to tests/queries/1_stateful/00052_group_by_in.sql diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.reference b/tests/queries/1_stateful/00053_replicate_segfault.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00053_replicate_segfault.reference rename to tests/queries/1_stateful/00053_replicate_segfault.reference diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.sql b/tests/queries/1_stateful/00053_replicate_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00053_replicate_segfault.sql rename to tests/queries/1_stateful/00053_replicate_segfault.sql diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference b/tests/queries/1_stateful/00054_merge_tree_partitions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference rename to tests/queries/1_stateful/00054_merge_tree_partitions.reference diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql b/tests/queries/1_stateful/00054_merge_tree_partitions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql rename to tests/queries/1_stateful/00054_merge_tree_partitions.sql diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.reference b/tests/queries/1_stateful/00055_index_and_not.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00055_index_and_not.reference rename to tests/queries/1_stateful/00055_index_and_not.reference diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.sql b/tests/queries/1_stateful/00055_index_and_not.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00055_index_and_not.sql rename to tests/queries/1_stateful/00055_index_and_not.sql diff --git a/dbms/tests/queries/1_stateful/00056_view.reference b/tests/queries/1_stateful/00056_view.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00056_view.reference rename to tests/queries/1_stateful/00056_view.reference diff --git a/dbms/tests/queries/1_stateful/00056_view.sql b/tests/queries/1_stateful/00056_view.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00056_view.sql rename to tests/queries/1_stateful/00056_view.sql diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference b/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference rename to tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql b/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql rename to tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference b/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference rename to tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql b/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql rename to tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.reference b/tests/queries/1_stateful/00061_storage_buffer.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00061_storage_buffer.reference rename to tests/queries/1_stateful/00061_storage_buffer.reference diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.sql b/tests/queries/1_stateful/00061_storage_buffer.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00061_storage_buffer.sql rename to tests/queries/1_stateful/00061_storage_buffer.sql diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.reference b/tests/queries/1_stateful/00062_loyalty.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00062_loyalty.reference rename to tests/queries/1_stateful/00062_loyalty.reference diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.sql b/tests/queries/1_stateful/00062_loyalty.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00062_loyalty.sql rename to tests/queries/1_stateful/00062_loyalty.sql diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.reference b/tests/queries/1_stateful/00063_loyalty_joins.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00063_loyalty_joins.reference rename to tests/queries/1_stateful/00063_loyalty_joins.reference diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql b/tests/queries/1_stateful/00063_loyalty_joins.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00063_loyalty_joins.sql rename to tests/queries/1_stateful/00063_loyalty_joins.sql diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference b/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference rename to tests/queries/1_stateful/00065_loyalty_with_storage_join.reference diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql rename to tests/queries/1_stateful/00065_loyalty_with_storage_join.sql diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference rename to tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql rename to tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql diff --git a/dbms/tests/queries/1_stateful/00067_union_all.reference b/tests/queries/1_stateful/00067_union_all.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00067_union_all.reference rename to tests/queries/1_stateful/00067_union_all.reference diff --git a/dbms/tests/queries/1_stateful/00067_union_all.sql b/tests/queries/1_stateful/00067_union_all.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00067_union_all.sql rename to tests/queries/1_stateful/00067_union_all.sql diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference b/tests/queries/1_stateful/00068_subquery_in_prewhere.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference rename to tests/queries/1_stateful/00068_subquery_in_prewhere.reference diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql b/tests/queries/1_stateful/00068_subquery_in_prewhere.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql rename to tests/queries/1_stateful/00068_subquery_in_prewhere.sql diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference b/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference rename to tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql b/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql rename to tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference b/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference rename to tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql b/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql rename to tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference b/tests/queries/1_stateful/00072_compare_date_and_string_index.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference rename to tests/queries/1_stateful/00072_compare_date_and_string_index.reference diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql b/tests/queries/1_stateful/00072_compare_date_and_string_index.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql rename to tests/queries/1_stateful/00072_compare_date_and_string_index.sql diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.reference b/tests/queries/1_stateful/00073_uniq_array.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00073_uniq_array.reference rename to tests/queries/1_stateful/00073_uniq_array.reference diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.sql b/tests/queries/1_stateful/00073_uniq_array.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00073_uniq_array.sql rename to tests/queries/1_stateful/00073_uniq_array.sql diff --git a/dbms/tests/queries/1_stateful/00074_full_join.reference b/tests/queries/1_stateful/00074_full_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00074_full_join.reference rename to tests/queries/1_stateful/00074_full_join.reference diff --git a/dbms/tests/queries/1_stateful/00074_full_join.sql b/tests/queries/1_stateful/00074_full_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00074_full_join.sql rename to tests/queries/1_stateful/00074_full_join.sql diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.reference b/tests/queries/1_stateful/00075_left_array_join.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00075_left_array_join.reference rename to tests/queries/1_stateful/00075_left_array_join.reference diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.sql b/tests/queries/1_stateful/00075_left_array_join.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00075_left_array_join.sql rename to tests/queries/1_stateful/00075_left_array_join.sql diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference b/tests/queries/1_stateful/00076_system_columns_bytes.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference rename to tests/queries/1_stateful/00076_system_columns_bytes.reference diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql b/tests/queries/1_stateful/00076_system_columns_bytes.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql rename to tests/queries/1_stateful/00076_system_columns_bytes.sql diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference b/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference rename to tests/queries/1_stateful/00077_log_tinylog_stripelog.reference diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql b/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql rename to tests/queries/1_stateful/00077_log_tinylog_stripelog.sql diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.reference b/tests/queries/1_stateful/00078_group_by_arrays.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00078_group_by_arrays.reference rename to tests/queries/1_stateful/00078_group_by_arrays.reference diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.sql b/tests/queries/1_stateful/00078_group_by_arrays.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00078_group_by_arrays.sql rename to tests/queries/1_stateful/00078_group_by_arrays.sql diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference rename to tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql rename to tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.reference b/tests/queries/1_stateful/00080_array_join_and_union.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00080_array_join_and_union.reference rename to tests/queries/1_stateful/00080_array_join_and_union.reference diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.sql b/tests/queries/1_stateful/00080_array_join_and_union.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00080_array_join_and_union.sql rename to tests/queries/1_stateful/00080_array_join_and_union.sql diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference rename to tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql rename to tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.reference b/tests/queries/1_stateful/00082_quantiles.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00082_quantiles.reference rename to tests/queries/1_stateful/00082_quantiles.reference diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.sql b/tests/queries/1_stateful/00082_quantiles.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00082_quantiles.sql rename to tests/queries/1_stateful/00082_quantiles.sql diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.reference b/tests/queries/1_stateful/00083_array_filter.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00083_array_filter.reference rename to tests/queries/1_stateful/00083_array_filter.reference diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.sql b/tests/queries/1_stateful/00083_array_filter.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00083_array_filter.sql rename to tests/queries/1_stateful/00083_array_filter.sql diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.reference b/tests/queries/1_stateful/00084_external_aggregation.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00084_external_aggregation.reference rename to tests/queries/1_stateful/00084_external_aggregation.reference diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.sql b/tests/queries/1_stateful/00084_external_aggregation.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00084_external_aggregation.sql rename to tests/queries/1_stateful/00084_external_aggregation.sql diff --git a/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference b/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference rename to tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference diff --git a/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql b/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql rename to tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.reference b/tests/queries/1_stateful/00086_array_reduce.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00086_array_reduce.reference rename to tests/queries/1_stateful/00086_array_reduce.reference diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.sql b/tests/queries/1_stateful/00086_array_reduce.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00086_array_reduce.sql rename to tests/queries/1_stateful/00086_array_reduce.sql diff --git a/dbms/tests/queries/1_stateful/00087_where_0.reference b/tests/queries/1_stateful/00087_where_0.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00087_where_0.reference rename to tests/queries/1_stateful/00087_where_0.reference diff --git a/dbms/tests/queries/1_stateful/00087_where_0.sql b/tests/queries/1_stateful/00087_where_0.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00087_where_0.sql rename to tests/queries/1_stateful/00087_where_0.sql diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference rename to tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql rename to tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference b/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference rename to tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql b/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql rename to tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference b/tests/queries/1_stateful/00090_thread_pool_deadlock.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference rename to tests/queries/1_stateful/00090_thread_pool_deadlock.reference diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh b/tests/queries/1_stateful/00090_thread_pool_deadlock.sh similarity index 100% rename from dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh rename to tests/queries/1_stateful/00090_thread_pool_deadlock.sh diff --git a/dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.reference b/tests/queries/1_stateful/00091_prewhere_two_conditions.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.reference rename to tests/queries/1_stateful/00091_prewhere_two_conditions.reference diff --git a/dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.sql b/tests/queries/1_stateful/00091_prewhere_two_conditions.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00091_prewhere_two_conditions.sql rename to tests/queries/1_stateful/00091_prewhere_two_conditions.sql diff --git a/dbms/tests/queries/1_stateful/00092_obfuscator.reference b/tests/queries/1_stateful/00092_obfuscator.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00092_obfuscator.reference rename to tests/queries/1_stateful/00092_obfuscator.reference diff --git a/dbms/tests/queries/1_stateful/00092_obfuscator.sh b/tests/queries/1_stateful/00092_obfuscator.sh similarity index 100% rename from dbms/tests/queries/1_stateful/00092_obfuscator.sh rename to tests/queries/1_stateful/00092_obfuscator.sh diff --git a/dbms/tests/queries/1_stateful/00139_like.reference b/tests/queries/1_stateful/00139_like.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00139_like.reference rename to tests/queries/1_stateful/00139_like.reference diff --git a/dbms/tests/queries/1_stateful/00139_like.sql b/tests/queries/1_stateful/00139_like.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00139_like.sql rename to tests/queries/1_stateful/00139_like.sql diff --git a/dbms/tests/queries/1_stateful/00140_rename.reference b/tests/queries/1_stateful/00140_rename.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00140_rename.reference rename to tests/queries/1_stateful/00140_rename.reference diff --git a/dbms/tests/queries/1_stateful/00140_rename.sql b/tests/queries/1_stateful/00140_rename.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00140_rename.sql rename to tests/queries/1_stateful/00140_rename.sql diff --git a/dbms/tests/queries/1_stateful/00141_transform.reference b/tests/queries/1_stateful/00141_transform.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00141_transform.reference rename to tests/queries/1_stateful/00141_transform.reference diff --git a/dbms/tests/queries/1_stateful/00141_transform.sql b/tests/queries/1_stateful/00141_transform.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00141_transform.sql rename to tests/queries/1_stateful/00141_transform.sql diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.reference b/tests/queries/1_stateful/00142_system_columns.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00142_system_columns.reference rename to tests/queries/1_stateful/00142_system_columns.reference diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.sql b/tests/queries/1_stateful/00142_system_columns.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00142_system_columns.sql rename to tests/queries/1_stateful/00142_system_columns.sql diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference b/tests/queries/1_stateful/00143_transform_non_const_default.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference rename to tests/queries/1_stateful/00143_transform_non_const_default.reference diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql b/tests/queries/1_stateful/00143_transform_non_const_default.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql rename to tests/queries/1_stateful/00143_transform_non_const_default.sql diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference b/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference rename to tests/queries/1_stateful/00144_functions_of_aggregation_states.reference diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql b/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql rename to tests/queries/1_stateful/00144_functions_of_aggregation_states.sql diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference b/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference rename to tests/queries/1_stateful/00145_aggregate_functions_statistics.reference diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql b/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql rename to tests/queries/1_stateful/00145_aggregate_functions_statistics.sql diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference b/tests/queries/1_stateful/00146_aggregate_function_uniq.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference rename to tests/queries/1_stateful/00146_aggregate_function_uniq.reference diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql rename to tests/queries/1_stateful/00146_aggregate_function_uniq.sql diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference b/tests/queries/1_stateful/00147_global_in_aggregate_function.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference rename to tests/queries/1_stateful/00147_global_in_aggregate_function.reference diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql rename to tests/queries/1_stateful/00147_global_in_aggregate_function.sql diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference b/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference rename to tests/queries/1_stateful/00148_monotonic_functions_and_index.reference diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql b/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql rename to tests/queries/1_stateful/00148_monotonic_functions_and_index.sql diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference b/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference rename to tests/queries/1_stateful/00149_quantiles_timing_distributed.reference diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql rename to tests/queries/1_stateful/00149_quantiles_timing_distributed.sql diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference b/tests/queries/1_stateful/00150_quantiles_timing_precision.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference rename to tests/queries/1_stateful/00150_quantiles_timing_precision.reference diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql b/tests/queries/1_stateful/00150_quantiles_timing_precision.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql rename to tests/queries/1_stateful/00150_quantiles_timing_precision.sql diff --git a/dbms/tests/queries/1_stateful/00151_order_by_read_in_order.reference b/tests/queries/1_stateful/00151_order_by_read_in_order.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00151_order_by_read_in_order.reference rename to tests/queries/1_stateful/00151_order_by_read_in_order.reference diff --git a/dbms/tests/queries/1_stateful/00151_order_by_read_in_order.sql b/tests/queries/1_stateful/00151_order_by_read_in_order.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00151_order_by_read_in_order.sql rename to tests/queries/1_stateful/00151_order_by_read_in_order.sql diff --git a/dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference b/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference rename to tests/queries/1_stateful/00151_replace_partition_with_different_granularity.reference diff --git a/dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql b/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql rename to tests/queries/1_stateful/00151_replace_partition_with_different_granularity.sql diff --git a/dbms/tests/queries/1_stateful/00152_insert_different_granularity.reference b/tests/queries/1_stateful/00152_insert_different_granularity.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00152_insert_different_granularity.reference rename to tests/queries/1_stateful/00152_insert_different_granularity.reference diff --git a/dbms/tests/queries/1_stateful/00152_insert_different_granularity.sql b/tests/queries/1_stateful/00152_insert_different_granularity.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00152_insert_different_granularity.sql rename to tests/queries/1_stateful/00152_insert_different_granularity.sql diff --git a/dbms/tests/queries/1_stateful/00153_aggregate_arena_race.reference b/tests/queries/1_stateful/00153_aggregate_arena_race.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00153_aggregate_arena_race.reference rename to tests/queries/1_stateful/00153_aggregate_arena_race.reference diff --git a/dbms/tests/queries/1_stateful/00153_aggregate_arena_race.sql b/tests/queries/1_stateful/00153_aggregate_arena_race.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00153_aggregate_arena_race.sql rename to tests/queries/1_stateful/00153_aggregate_arena_race.sql diff --git a/dbms/tests/queries/1_stateful/00154_avro.reference b/tests/queries/1_stateful/00154_avro.reference similarity index 100% rename from dbms/tests/queries/1_stateful/00154_avro.reference rename to tests/queries/1_stateful/00154_avro.reference diff --git a/dbms/tests/queries/1_stateful/00154_avro.sql b/tests/queries/1_stateful/00154_avro.sql similarity index 100% rename from dbms/tests/queries/1_stateful/00154_avro.sql rename to tests/queries/1_stateful/00154_avro.sql diff --git a/dbms/tests/queries/__init__.py b/tests/queries/__init__.py similarity index 100% rename from dbms/tests/queries/__init__.py rename to tests/queries/__init__.py diff --git a/dbms/tests/queries/bugs/00938_client_suggestions.sh b/tests/queries/bugs/00938_client_suggestions.sh similarity index 100% rename from dbms/tests/queries/bugs/00938_client_suggestions.sh rename to tests/queries/bugs/00938_client_suggestions.sh diff --git a/dbms/tests/queries/bugs/01060_defaults_all_columns.reference b/tests/queries/bugs/01060_defaults_all_columns.reference similarity index 100% rename from dbms/tests/queries/bugs/01060_defaults_all_columns.reference rename to tests/queries/bugs/01060_defaults_all_columns.reference diff --git a/dbms/tests/queries/bugs/default_prewhere.sql b/tests/queries/bugs/default_prewhere.sql similarity index 100% rename from dbms/tests/queries/bugs/default_prewhere.sql rename to tests/queries/bugs/default_prewhere.sql diff --git a/dbms/tests/queries/bugs/low_cardinality_remove.sql b/tests/queries/bugs/low_cardinality_remove.sql similarity index 100% rename from dbms/tests/queries/bugs/low_cardinality_remove.sql rename to tests/queries/bugs/low_cardinality_remove.sql diff --git a/dbms/tests/queries/bugs/missing_scalar_subquery_removal.sql b/tests/queries/bugs/missing_scalar_subquery_removal.sql similarity index 100% rename from dbms/tests/queries/bugs/missing_scalar_subquery_removal.sql rename to tests/queries/bugs/missing_scalar_subquery_removal.sql diff --git a/dbms/tests/queries/bugs/position_case_insensitive_utf8.sql b/tests/queries/bugs/position_case_insensitive_utf8.sql similarity index 100% rename from dbms/tests/queries/bugs/position_case_insensitive_utf8.sql rename to tests/queries/bugs/position_case_insensitive_utf8.sql diff --git a/dbms/tests/queries/bugs/remote_scalar_subquery.sql b/tests/queries/bugs/remote_scalar_subquery.sql similarity index 100% rename from dbms/tests/queries/bugs/remote_scalar_subquery.sql rename to tests/queries/bugs/remote_scalar_subquery.sql diff --git a/dbms/tests/queries/bugs/totals_rollup_having_block_header.sql b/tests/queries/bugs/totals_rollup_having_block_header.sql similarity index 100% rename from dbms/tests/queries/bugs/totals_rollup_having_block_header.sql rename to tests/queries/bugs/totals_rollup_having_block_header.sql diff --git a/dbms/tests/queries/bugs/view_bad_types.sql b/tests/queries/bugs/view_bad_types.sql similarity index 100% rename from dbms/tests/queries/bugs/view_bad_types.sql rename to tests/queries/bugs/view_bad_types.sql diff --git a/dbms/tests/queries/conftest.py b/tests/queries/conftest.py similarity index 100% rename from dbms/tests/queries/conftest.py rename to tests/queries/conftest.py diff --git a/dbms/tests/queries/query_test.py b/tests/queries/query_test.py similarity index 100% rename from dbms/tests/queries/query_test.py rename to tests/queries/query_test.py diff --git a/dbms/tests/queries/server.py b/tests/queries/server.py similarity index 100% rename from dbms/tests/queries/server.py rename to tests/queries/server.py diff --git a/dbms/tests/queries/shell_config.sh b/tests/queries/shell_config.sh similarity index 100% rename from dbms/tests/queries/shell_config.sh rename to tests/queries/shell_config.sh diff --git a/dbms/tests/server-test.xml b/tests/server-test.xml similarity index 100% rename from dbms/tests/server-test.xml rename to tests/server-test.xml diff --git a/dbms/tests/stress b/tests/stress similarity index 100% rename from dbms/tests/stress rename to tests/stress diff --git a/dbms/tests/strings_dictionary.xml b/tests/strings_dictionary.xml similarity index 100% rename from dbms/tests/strings_dictionary.xml rename to tests/strings_dictionary.xml diff --git a/dbms/tests/tsan_suppressions.txt b/tests/tsan_suppressions.txt similarity index 100% rename from dbms/tests/tsan_suppressions.txt rename to tests/tsan_suppressions.txt diff --git a/dbms/tests/users.d/readonly.xml b/tests/users.d/readonly.xml similarity index 100% rename from dbms/tests/users.d/readonly.xml rename to tests/users.d/readonly.xml diff --git a/dbms/tests/users.xml b/tests/users.xml similarity index 100% rename from dbms/tests/users.xml rename to tests/users.xml diff --git a/utils/build/build_debian.sh b/utils/build/build_debian.sh index 0767dc53450..0c194fe53db 100755 --- a/utils/build/build_debian.sh +++ b/utils/build/build_debian.sh @@ -29,7 +29,7 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client diff --git a/utils/build/build_freebsd.sh b/utils/build/build_freebsd.sh index 5f2b9735501..b2bf7243f12 100755 --- a/utils/build/build_freebsd.sh +++ b/utils/build/build_freebsd.sh @@ -44,7 +44,7 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client diff --git a/utils/build/build_macos.sh b/utils/build/build_macos.sh index 0e9bed37aa2..d27c8cf4367 100755 --- a/utils/build/build_macos.sh +++ b/utils/build/build_macos.sh @@ -43,10 +43,10 @@ cmake --build . cd .. # Run server: -# build/dbms/programs/clickhouse-server --config-file=ClickHouse/dbms/programs/server/config.xml & +# build/programs/clickhouse-server --config-file=ClickHouse/programs/server/config.xml & # Run client: -# build/dbms/programs/clickhouse-client +# build/programs/clickhouse-client ## Caveats diff --git a/utils/check-style/check-include b/utils/check-style/check-include index 3ced75d19d5..9cdcbbaea7b 100755 --- a/utils/check-style/check-include +++ b/utils/check-style/check-include @@ -59,8 +59,8 @@ inc="-I. \ -I./contrib/lz4/lib \ -I./contrib/hyperscan/src \ -I./contrib/simdjson/include \ --I./dbms/src \ --I${BUILD_DIR}/dbms/src" +-I./dbms \ +-I${BUILD_DIR}/dbms" if [ -z $1 ]; then cd ${ROOT_DIR=${CUR_DIR}../..} diff --git a/utils/ci/run-clickhouse-from-binaries.sh b/utils/ci/run-clickhouse-from-binaries.sh index f16d840316a..9b1a3f86b1f 100755 --- a/utils/ci/run-clickhouse-from-binaries.sh +++ b/utils/ci/run-clickhouse-from-binaries.sh @@ -6,8 +6,8 @@ set -e -x source default-config -SERVER_BIN="${WORKSPACE}/build/dbms/src/Server/clickhouse" -SERVER_CONF="${WORKSPACE}/sources/dbms/src/Server/config.xml" +SERVER_BIN="${WORKSPACE}/build/dbms/Server/clickhouse" +SERVER_CONF="${WORKSPACE}/sources/dbms/Server/config.xml" SERVER_DATADIR="${WORKSPACE}/clickhouse" [[ -x "$SERVER_BIN" ]] || die "Run build-normal.sh first" diff --git a/utils/package/arch/PKGBUILD.in b/utils/package/arch/PKGBUILD.in index 20de555f8a7..b5c63a3edea 100644 --- a/utils/package/arch/PKGBUILD.in +++ b/utils/package/arch/PKGBUILD.in @@ -20,11 +20,11 @@ package() { mkdir -p $pkgdir/usr/bin/ mkdir -p $pkgdir/usr/lib/systemd/system ln -s clickhouse-client $pkgdir/usr/bin/clickhouse-server - cp $SRC/dbms/programs/server/config.xml $SRC/dbms/programs/server/users.xml $pkgdir/etc/clickhouse-server/ - cp $BIN/dbms/programs/clickhouse $pkgdir/usr/bin/clickhouse-client + cp $SRC/programs/server/config.xml $SRC/programs/server/users.xml $pkgdir/etc/clickhouse-server/ + cp $BIN/programs/clickhouse $pkgdir/usr/bin/clickhouse-client patchelf --remove-rpath $pkgdir/usr/bin/clickhouse-client patchelf --replace-needed libz.so.1 libz-ng.so.1 $pkgdir/usr/bin/clickhouse-client - cp $SRC/dbms/programs/client/clickhouse-client.xml $pkgdir/etc/clickhouse-client/config.xml + cp $SRC/programs/client/clickhouse-client.xml $pkgdir/etc/clickhouse-client/config.xml compiler="libclickhouse-compiler.so" if ! pacman -Q clang | grep '^clang 7'; then compiler="" diff --git a/utils/release/release_lib.sh b/utils/release/release_lib.sh index ab395c9ad37..14e26631db9 100644 --- a/utils/release/release_lib.sh +++ b/utils/release/release_lib.sh @@ -101,8 +101,8 @@ function gen_revision_author { gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" gen_dockerfiles "$VERSION_STRING" - dbms/src/Storages/System/StorageSystemContributors.sh ||: - git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" dbms/cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/src/Storages/System/StorageSystemContributors.generated.cpp + dbms/Storages/System/StorageSystemContributors.sh ||: + git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" dbms/cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/Storages/System/StorageSystemContributors.generated.cpp if [ -z $NO_PUSH ]; then git push fi diff --git a/utils/test-data-generator/CMakeLists.txt b/utils/test-data-generator/CMakeLists.txt index 2e11b537873..c46853229d7 100644 --- a/utils/test-data-generator/CMakeLists.txt +++ b/utils/test-data-generator/CMakeLists.txt @@ -1,9 +1,9 @@ if(USE_PROTOBUF) - protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs ProtobufDelimitedMessagesSerializer_Hdrs ${CMAKE_CURRENT_SOURCE_DIR}/../../dbms/tests/queries/0_stateless/00825_protobuf_format.proto) - protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs2 ProtobufDelimitedMessagesSerializer_Hdrs2 ${CMAKE_CURRENT_SOURCE_DIR}/../../dbms/tests/queries/0_stateless/00825_protobuf_format_syntax2.proto) + protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs ProtobufDelimitedMessagesSerializer_Hdrs ${CMAKE_CURRENT_SOURCE_DIR}/../../tests/queries/0_stateless/00825_protobuf_format.proto) + protobuf_generate_cpp(ProtobufDelimitedMessagesSerializer_Srcs2 ProtobufDelimitedMessagesSerializer_Hdrs2 ${CMAKE_CURRENT_SOURCE_DIR}/../../tests/queries/0_stateless/00825_protobuf_format_syntax2.proto) add_executable (ProtobufDelimitedMessagesSerializer ProtobufDelimitedMessagesSerializer.cpp ${ProtobufDelimitedMessagesSerializer_Srcs} ${ProtobufDelimitedMessagesSerializer_Hdrs} ${ProtobufDelimitedMessagesSerializer_Srcs2} ${ProtobufDelimitedMessagesSerializer_Hdrs2}) target_include_directories (ProtobufDelimitedMessagesSerializer SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) target_link_libraries (ProtobufDelimitedMessagesSerializer PRIVATE ${Protobuf_LIBRARY} ${Boost_PROGRAM_OPTIONS_LIBRARY}) - get_filename_component(ProtobufDelimitedMessagesSerializer_OutputDir "${CMAKE_CURRENT_LIST_DIR}/../../dbms/tests/queries/0_stateless" REALPATH) + get_filename_component(ProtobufDelimitedMessagesSerializer_OutputDir "${CMAKE_CURRENT_LIST_DIR}/../../tests/queries/0_stateless" REALPATH) target_compile_definitions(ProtobufDelimitedMessagesSerializer PRIVATE OUTPUT_DIR="${ProtobufDelimitedMessagesSerializer_OutputDir}") endif() diff --git a/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp b/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp index c956dea8712..d16df83d12f 100644 --- a/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp +++ b/utils/test-data-generator/ProtobufDelimitedMessagesSerializer.cpp @@ -1,5 +1,5 @@ // Generator of protobuf delimited messages used in the protobuf IO tests -// dbms/tests/queries/0_stateless/00825_protobuf_format* +// tests/queries/0_stateless/00825_protobuf_format* #include #include From c0a595355a5022edb9be36ea1313cded82b673a7 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 03:32:39 +0300 Subject: [PATCH 041/484] Move more dbms/tests to top-level --- {dbms/tests => tests}/integration/test_random_inserts/__init__.py | 0 {dbms/tests => tests}/performance/distributed_aggregation.xml | 0 .../queries/0_stateless/01107_join_right_table_totals.reference | 0 .../queries/0_stateless/01107_join_right_table_totals.sql | 0 .../0_stateless/01107_tuples_arrays_parsing_exceptions.reference | 0 .../queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh | 0 .../0_stateless/01108_restart_replicas_rename_deadlock.reference | 0 .../queries/0_stateless/01108_restart_replicas_rename_deadlock.sh | 0 .../0_stateless/01201_read_single_thread_in_order.reference | 0 .../queries/0_stateless/01201_read_single_thread_in_order.sql | 0 .../queries/0_stateless/01223_dist_on_dist.reference | 0 {dbms/tests => tests}/queries/0_stateless/01223_dist_on_dist.sql | 0 .../queries/bugs/01224_dist_on_dist_global_in.reference | 0 .../tests => tests}/queries/bugs/01224_dist_on_dist_global_in.sql | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename {dbms/tests => tests}/integration/test_random_inserts/__init__.py (100%) rename {dbms/tests => tests}/performance/distributed_aggregation.xml (100%) rename {dbms/tests => tests}/queries/0_stateless/01107_join_right_table_totals.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01107_join_right_table_totals.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh (100%) rename {dbms/tests => tests}/queries/0_stateless/01201_read_single_thread_in_order.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01201_read_single_thread_in_order.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01223_dist_on_dist.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01223_dist_on_dist.sql (100%) rename {dbms/tests => tests}/queries/bugs/01224_dist_on_dist_global_in.reference (100%) rename {dbms/tests => tests}/queries/bugs/01224_dist_on_dist_global_in.sql (100%) diff --git a/dbms/tests/integration/test_random_inserts/__init__.py b/tests/integration/test_random_inserts/__init__.py similarity index 100% rename from dbms/tests/integration/test_random_inserts/__init__.py rename to tests/integration/test_random_inserts/__init__.py diff --git a/dbms/tests/performance/distributed_aggregation.xml b/tests/performance/distributed_aggregation.xml similarity index 100% rename from dbms/tests/performance/distributed_aggregation.xml rename to tests/performance/distributed_aggregation.xml diff --git a/dbms/tests/queries/0_stateless/01107_join_right_table_totals.reference b/tests/queries/0_stateless/01107_join_right_table_totals.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01107_join_right_table_totals.reference rename to tests/queries/0_stateless/01107_join_right_table_totals.reference diff --git a/dbms/tests/queries/0_stateless/01107_join_right_table_totals.sql b/tests/queries/0_stateless/01107_join_right_table_totals.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01107_join_right_table_totals.sql rename to tests/queries/0_stateless/01107_join_right_table_totals.sql diff --git a/dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference rename to tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.reference diff --git a/dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh rename to tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference rename to tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.reference diff --git a/dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh rename to tests/queries/0_stateless/01108_restart_replicas_rename_deadlock.sh diff --git a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference b/tests/queries/0_stateless/01201_read_single_thread_in_order.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.reference rename to tests/queries/0_stateless/01201_read_single_thread_in_order.reference diff --git a/dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql b/tests/queries/0_stateless/01201_read_single_thread_in_order.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01201_read_single_thread_in_order.sql rename to tests/queries/0_stateless/01201_read_single_thread_in_order.sql diff --git a/dbms/tests/queries/0_stateless/01223_dist_on_dist.reference b/tests/queries/0_stateless/01223_dist_on_dist.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01223_dist_on_dist.reference rename to tests/queries/0_stateless/01223_dist_on_dist.reference diff --git a/dbms/tests/queries/0_stateless/01223_dist_on_dist.sql b/tests/queries/0_stateless/01223_dist_on_dist.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01223_dist_on_dist.sql rename to tests/queries/0_stateless/01223_dist_on_dist.sql diff --git a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.reference b/tests/queries/bugs/01224_dist_on_dist_global_in.reference similarity index 100% rename from dbms/tests/queries/bugs/01224_dist_on_dist_global_in.reference rename to tests/queries/bugs/01224_dist_on_dist_global_in.reference diff --git a/dbms/tests/queries/bugs/01224_dist_on_dist_global_in.sql b/tests/queries/bugs/01224_dist_on_dist_global_in.sql similarity index 100% rename from dbms/tests/queries/bugs/01224_dist_on_dist_global_in.sql rename to tests/queries/bugs/01224_dist_on_dist_global_in.sql From 4d1749b34b21a6dc497ff195a63dbe91b2e506b1 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 03:36:53 +0300 Subject: [PATCH 042/484] Also move dbms/cmake/version.cmake to top-level --- {dbms/cmake => cmake}/version.cmake | 0 dbms/CMakeLists.txt | 2 +- utils/package/arch/CMakeLists.txt | 2 +- utils/release/release_lib.sh | 12 ++++++------ 4 files changed, 8 insertions(+), 8 deletions(-) rename {dbms/cmake => cmake}/version.cmake (100%) diff --git a/dbms/cmake/version.cmake b/cmake/version.cmake similarity index 100% rename from dbms/cmake/version.cmake rename to cmake/version.cmake diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index 1c891353aaa..f3f18d35f3c 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -21,7 +21,7 @@ include(../cmake/limit_jobs.cmake) set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/Common/config_version.h) set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/Common/config.h) -include (cmake/version.cmake) +include (../cmake/version.cmake) message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") configure_file (Common/config.h.in ${CONFIG_COMMON}) configure_file (Common/config_version.h.in ${CONFIG_VERSION}) diff --git a/utils/package/arch/CMakeLists.txt b/utils/package/arch/CMakeLists.txt index 07489cf9b19..e77819f6d98 100644 --- a/utils/package/arch/CMakeLists.txt +++ b/utils/package/arch/CMakeLists.txt @@ -1,2 +1,2 @@ -include (${ClickHouse_SOURCE_DIR}/dbms/cmake/version.cmake) +include (${ClickHouse_SOURCE_DIR}/cmake/version.cmake) configure_file (PKGBUILD.in PKGBUILD) diff --git a/utils/release/release_lib.sh b/utils/release/release_lib.sh index 14e26631db9..7e2608aac87 100644 --- a/utils/release/release_lib.sh +++ b/utils/release/release_lib.sh @@ -12,10 +12,10 @@ function gen_version_string { function get_version { if [ -z "$VERSION_MAJOR" ] && [ -z "$VERSION_MINOR" ] && [ -z "$VERSION_PATCH" ]; then BASEDIR=$(dirname "${BASH_SOURCE[0]}")/../../ - VERSION_REVISION=`grep "set(VERSION_REVISION" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'` - VERSION_MAJOR=`grep "set(VERSION_MAJOR" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'` - VERSION_MINOR=`grep "set(VERSION_MINOR" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'` - VERSION_PATCH=`grep "set(VERSION_PATCH" ${BASEDIR}/dbms/cmake/version.cmake | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_REVISION=`grep "set(VERSION_REVISION" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_REVISION \(.*\)$/\1/' | sed 's/[) ].*//'` + VERSION_MAJOR=`grep "set(VERSION_MAJOR" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_MAJOR \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_MINOR=`grep "set(VERSION_MINOR" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_MINOR \(.*\)/\1/' | sed 's/[) ].*//'` + VERSION_PATCH=`grep "set(VERSION_PATCH" ${BASEDIR}/cmake/version.cmake | sed 's/^.*VERSION_PATCH \(.*\)/\1/' | sed 's/[) ].*//'` fi VERSION_PREFIX="${VERSION_PREFIX:-v}" VERSION_POSTFIX_TAG="${VERSION_POSTFIX:--testing}" @@ -97,12 +97,12 @@ function gen_revision_author { -e "s/set(VERSION_MINOR [^) ]*/set(VERSION_MINOR $VERSION_MINOR/g;" \ -e "s/set(VERSION_PATCH [^) ]*/set(VERSION_PATCH $VERSION_PATCH/g;" \ -e "s/set(VERSION_STRING [^) ]*/set(VERSION_STRING $VERSION_STRING/g;" \ - dbms/cmake/version.cmake + cmake/version.cmake gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" gen_dockerfiles "$VERSION_STRING" dbms/Storages/System/StorageSystemContributors.sh ||: - git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" dbms/cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/Storages/System/StorageSystemContributors.generated.cpp + git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/Storages/System/StorageSystemContributors.generated.cpp if [ -z $NO_PUSH ]; then git push fi From 0dbbc64e063edb236cc3dfdb491a7a9bf1641594 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 06:47:04 +0300 Subject: [PATCH 043/484] Fix build --- dbms/Functions/FunctionsStringSearch.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dbms/Functions/FunctionsStringSearch.cpp b/dbms/Functions/FunctionsStringSearch.cpp index 8279ded5f81..a7e4cf4f8b1 100644 --- a/dbms/Functions/FunctionsStringSearch.cpp +++ b/dbms/Functions/FunctionsStringSearch.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include From 159e72ea7c01fec84fffa00f0721de27ded5dc6c Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 10:52:46 +0300 Subject: [PATCH 044/484] Trigger Docker image rebuild --- docker/packager/binary/Dockerfile | 1 + docs/fr/development/build.md | 2 +- docs/fr/development/developer_instruction.md | 14 +++++++------- docs/fr/getting_started/install.md | 4 ++-- docs/fr/operations/backup.md | 2 +- docs/fr/operations/performance_test.md | 8 ++++---- docs/fr/query_language/create.md | 2 +- 7 files changed, 17 insertions(+), 16 deletions(-) diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index a8faf3ceb01..60ac34167b9 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -1,3 +1,4 @@ +# Trigger new image build # docker build -t yandex/clickhouse-binary-builder . FROM ubuntu:19.10 diff --git a/docs/fr/development/build.md b/docs/fr/development/build.md index 7fa1bb066ea..5bf9e439849 100644 --- a/docs/fr/development/build.md +++ b/docs/fr/development/build.md @@ -64,7 +64,7 @@ $ cd .. ``` Pour créer un exécutable, exécutez `ninja clickhouse`. -Cela va créer de l' `dbms/programs/clickhouse` exécutable, qui peut être utilisé avec `client` ou `server` argument. +Cela va créer de l' `programs/clickhouse` exécutable, qui peut être utilisé avec `client` ou `server` argument. # Comment Construire ClickHouse sur N'importe quel Linux {#how-to-build-clickhouse-on-any-linux} diff --git a/docs/fr/development/developer_instruction.md b/docs/fr/development/developer_instruction.md index fedec292b2b..fc725d8afb4 100644 --- a/docs/fr/development/developer_instruction.md +++ b/docs/fr/development/developer_instruction.md @@ -197,19 +197,19 @@ Après le démarrage réussi du processus de construction, vous verrez la progre Lors de la construction de messages sur les fichiers protobuf dans la bibliothèque libhdfs2 comme `libprotobuf WARNING` peuvent apparaître. Ils touchent rien et sont sûrs d'être ignoré. -Lors de la construction, vous obtenez un fichier exécutable `ClickHouse//dbms/programs/clickhouse`: +Lors de la construction, vous obtenez un fichier exécutable `ClickHouse//programs/clickhouse`: - ls -l dbms/programs/clickhouse + ls -l programs/clickhouse # Exécution de L'exécutable construit de ClickHouse {#running-the-built-executable-of-clickhouse} -Pour exécuter le serveur sous l'utilisateur actuel vous devez naviguer vers `ClickHouse/dbms/programs/server/` (situé à l'extérieur de `build` et les exécuter: +Pour exécuter le serveur sous l'utilisateur actuel vous devez naviguer vers `ClickHouse/programs/server/` (situé à l'extérieur de `build` et les exécuter: - ../../../build/dbms/programs/clickhouse server + ../../../build/programs/clickhouse server Dans ce cas, ClickHouse utilisera les fichiers de configuration situés dans le répertoire courant. Vous pouvez l'exécuter `clickhouse server` depuis n'importe quel répertoire spécifiant le chemin d'accès à un fichier de configuration en tant que paramètre de ligne de commande `--config-file`. -Pour vous connecter à ClickHouse avec clickhouse-client dans un autre terminal, accédez à `ClickHouse/build/dbms/programs/` et exécuter `clickhouse client`. +Pour vous connecter à ClickHouse avec clickhouse-client dans un autre terminal, accédez à `ClickHouse/build/programs/` et exécuter `clickhouse client`. Si vous obtenez `Connection refused` message sur Mac OS X ou FreeBSD, essayez de spécifier l'adresse hôte 127.0.0.1: @@ -218,7 +218,7 @@ Si vous obtenez `Connection refused` message sur Mac OS X ou FreeBSD, essayez de Vous pouvez remplacer la version de production de clickhouse binary installée dans votre système par votre clickhouse Binary sur mesure. Pour ce faire, installez ClickHouse sur votre machine en suivant les instructions du site officiel. Ensuite, exécutez ce qui suit: sudo service clickhouse-server stop - sudo cp ClickHouse/build/dbms/programs/clickhouse /usr/bin/ + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start Notez que `clickhouse-client`, `clickhouse-server` et d'autres sont des liens symboliques à la commune `clickhouse` binaire. @@ -226,7 +226,7 @@ Notez que `clickhouse-client`, `clickhouse-server` et d'autres sont des liens sy Vous pouvez également exécuter votre binaire ClickHouse personnalisé avec le fichier de configuration du package clickhouse installé sur votre système: sudo service clickhouse-server stop - sudo -u clickhouse ClickHouse/build/dbms/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml # IDE (environnement de développement intégré) {#ide-integrated-development-environment} diff --git a/docs/fr/getting_started/install.md b/docs/fr/getting_started/install.md index 4ceb4deb796..f158d9ce629 100644 --- a/docs/fr/getting_started/install.md +++ b/docs/fr/getting_started/install.md @@ -108,8 +108,8 @@ Pour compiler manuellement ClickHouse, suivez les instructions pour [Linux](../d Vous pouvez compiler des paquets et les installer ou utiliser des programmes sans installer de paquets. En outre, en construisant manuellement, vous pouvez désactiver L'exigence SSE 4.2 ou construire pour les processeurs AArch64. - Client: dbms/programs/clickhouse-client - Server: dbms/programs/clickhouse-server + Client: programs/clickhouse-client + Server: programs/clickhouse-server Vous devrez créer un dossier de données et de métadonnées et `chown` pour l'utilisateur souhaité. Leurs chemins peuvent être modifiés dans la configuration du serveur (src / SGBD / programs / server / config.xml), par défaut, ils sont: diff --git a/docs/fr/operations/backup.md b/docs/fr/operations/backup.md index 2245cd29817..74d6a90afb1 100644 --- a/docs/fr/operations/backup.md +++ b/docs/fr/operations/backup.md @@ -4,7 +4,7 @@ machine_translated: true # La Sauvegarde Des Données {#data-backup} -Alors [réplication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [vous ne pouvez pas simplement supprimer des tables avec un moteur de type MergeTree contenant plus de 50 Go de données](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/dbms/programs/server/config.xml#L322-L330). Toutefois, ces garanties ne couvrent pas tous les cas possibles et peuvent être contournés. +Alors [réplication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [vous ne pouvez pas simplement supprimer des tables avec un moteur de type MergeTree contenant plus de 50 Go de données](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Toutefois, ces garanties ne couvrent pas tous les cas possibles et peuvent être contournés. Afin d'atténuer efficacement les erreurs humaines possibles, vous devez préparer soigneusement une stratégie de sauvegarde et de restauration de vos données **préalablement**. diff --git a/docs/fr/operations/performance_test.md b/docs/fr/operations/performance_test.md index dc9fc7f0e4a..cecf091c7cc 100644 --- a/docs/fr/operations/performance_test.md +++ b/docs/fr/operations/performance_test.md @@ -27,11 +27,11 @@ Avec cette instruction, vous pouvez exécuter le test de performance clickhouse - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/users.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml 1. Télécharger des fichiers de référence: diff --git a/docs/fr/query_language/create.md b/docs/fr/query_language/create.md index b4a6dea29fb..ffe2125146b 100644 --- a/docs/fr/query_language/create.md +++ b/docs/fr/query_language/create.md @@ -151,7 +151,7 @@ ENGINE = Si un codec est spécifié, le codec par défaut ne s'applique pas. Les Codecs peuvent être combinés dans un pipeline, par exemple, `CODEC(Delta, ZSTD)`. Pour sélectionner la meilleure combinaison de codecs pour votre projet, passez des benchmarks similaires à ceux décrits dans Altinity [Nouveaux encodages pour améliorer L'efficacité du ClickHouse](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. !!! warning "Avertissement" - Vous ne pouvez pas décompresser les fichiers de base de données ClickHouse avec des utilitaires externes tels que `lz4`. Au lieu de cela, utilisez le spécial [clickhouse-compresseur](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/programs/compressor) utilitaire. + Vous ne pouvez pas décompresser les fichiers de base de données ClickHouse avec des utilitaires externes tels que `lz4`. Au lieu de cela, utilisez le spécial [clickhouse-compresseur](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utilitaire. La Compression est prise en charge pour les moteurs de tableau suivants: From 918e145ea57a7ee03083d5b9a922911d98e6fe15 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 11:51:21 +0300 Subject: [PATCH 045/484] Fix integration test --- tests/integration/test_odbc_interaction/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 1e876e507f4..41f54ddd0e6 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -91,8 +91,8 @@ def test_mysql_simple_select_works(started_cluster): with conn.cursor() as cursor: cursor.execute("INSERT INTO clickhouse.{} VALUES(50, 'null-guy', 127, 255, NULL), (100, 'non-null-guy', 127, 255, 511);".format(table_name)) conn.commit() - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}') SETTINGS external_table_functions_use_nulls=1".format(mysql_setup["DSN"], table_name)) == '\\N\n511\n' - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}') SETTINGS external_table_functions_use_nulls=0".format(mysql_setup["DSN"], table_name)) == '0\n511\n' + assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "1"}) == '\\N\n511\n' + assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "0"}) == '0\n511\n' node1.query(''' CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nullable(UInt32)) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse'); From 3082e7ddf8e2b2edb00d9e637fd358acebc7e11d Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Thu, 2 Apr 2020 11:58:29 +0300 Subject: [PATCH 046/484] S3 URI parsing improvements. --- dbms/IO/S3Common.cpp | 28 ++++++++++++++++++++-------- dbms/src/IO/tests/gtest_s3_uri.cpp | 1 + 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/dbms/IO/S3Common.cpp b/dbms/IO/S3Common.cpp index e1952f5eafd..d434c41d3cc 100644 --- a/dbms/IO/S3Common.cpp +++ b/dbms/IO/S3Common.cpp @@ -105,37 +105,49 @@ namespace S3 URI::URI(const Poco::URI & uri_) { + /// Case when bucket name represented in domain name of S3 URL. + /// E.g. (https://bucket-name.s3.Region.amazonaws.com/key) + /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access static const RE2 virtual_hosted_style_pattern("(.+\\.)?s3[.-][a-z0-9-.]+"); + /// Case when bucket name and key represented in path of S3 URL. + /// E.g. (https://s3.Region.amazonaws.com/bucket-name/key) + /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access static const RE2 path_style_pattern("([^/]+)/(.*)"); uri = uri_; if (uri.getHost().empty()) - throw Exception("Invalid S3 URI host: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Host is empty in S3 URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); endpoint = uri.getScheme() + "://" + uri.getAuthority(); if (re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket)) { if (!bucket.empty()) - bucket = bucket.substr(0, bucket.length() - 1); - if (bucket.length() < 3 || bucket.length() > 63) - throw Exception("Invalid S3 URI bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + bucket.pop_back(); /// Remove '.' character from the end of the bucket name. + /// S3 specification requires at least 3 and at most 63 characters in bucket name. + /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html + if (bucket.length() < 3 || bucket.length() > 63) + throw Exception("Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + + /// Remove leading '/' from path to extract key. key = uri.getPath().substr(1); if (key.empty() || key == "/") - throw Exception("Invalid S3 URI key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); } else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key)) { + /// S3 specification requires at least 3 and at most 63 characters in bucket name. + /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html if (bucket.length() < 3 || bucket.length() > 63) - throw Exception("Invalid S3 URI bucket: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); if (key.empty() || key == "/") - throw Exception("Invalid S3 URI key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); } else - throw Exception("Invalid S3 URI bucket or key: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); + throw Exception("Bucket or key name are invalid in S3 URI: " + uri.toString(), ErrorCodes::BAD_ARGUMENTS); } } diff --git a/dbms/src/IO/tests/gtest_s3_uri.cpp b/dbms/src/IO/tests/gtest_s3_uri.cpp index c1714703615..accb81a6306 100644 --- a/dbms/src/IO/tests/gtest_s3_uri.cpp +++ b/dbms/src/IO/tests/gtest_s3_uri.cpp @@ -42,6 +42,7 @@ INSTANTIATE_TEST_SUITE_P( "https://jokserfn.s3.yandexcloud.net/", "https://.s3.yandexcloud.net/key", "https://s3.yandexcloud.net/key", + "https://jokserfn.s3yandexcloud.net/key", "https://s3.yandexcloud.net/key/", "https://s3.yandexcloud.net//", "https://yandexcloud.net/", From 4ac4c361c4196fd5b5208705db7e9c7c1e8207bd Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Thu, 2 Apr 2020 11:59:10 +0300 Subject: [PATCH 047/484] Code style issues. --- dbms/IO/S3Common.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dbms/IO/S3Common.cpp b/dbms/IO/S3Common.cpp index d434c41d3cc..c5cd1055111 100644 --- a/dbms/IO/S3Common.cpp +++ b/dbms/IO/S3Common.cpp @@ -129,7 +129,8 @@ namespace S3 /// S3 specification requires at least 3 and at most 63 characters in bucket name. /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html if (bucket.length() < 3 || bucket.length() > 63) - throw Exception("Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + throw Exception( + "Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); /// Remove leading '/' from path to extract key. key = uri.getPath().substr(1); @@ -141,7 +142,8 @@ namespace S3 /// S3 specification requires at least 3 and at most 63 characters in bucket name. /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html if (bucket.length() < 3 || bucket.length() > 63) - throw Exception("Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + throw Exception( + "Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); if (key.empty() || key == "/") throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); From 19c1eb03fa09f045026218fab4c18a3dc44fd7f8 Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Thu, 2 Apr 2020 12:00:03 +0300 Subject: [PATCH 048/484] S3 URI exception message minor fix. --- dbms/IO/S3Common.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/IO/S3Common.cpp b/dbms/IO/S3Common.cpp index c5cd1055111..1de48dc03c5 100644 --- a/dbms/IO/S3Common.cpp +++ b/dbms/IO/S3Common.cpp @@ -130,7 +130,7 @@ namespace S3 /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html if (bucket.length() < 3 || bucket.length() > 63) throw Exception( - "Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + "Bucket name length out of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); /// Remove leading '/' from path to extract key. key = uri.getPath().substr(1); @@ -143,7 +143,7 @@ namespace S3 /// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html if (bucket.length() < 3 || bucket.length() > 63) throw Exception( - "Bucket name length our of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); + "Bucket name length out of bounds in S3 URI: " + bucket + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); if (key.empty() || key == "/") throw Exception("Key name is empty in S3 URI: " + key + " (" + uri.toString() + ")", ErrorCodes::BAD_ARGUMENTS); From 3b716b07bad7cd0457008716980a7f68d229cda0 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 12:10:09 +0300 Subject: [PATCH 049/484] Fix broken links --- tests/decimals_dictionary.xml | 2 +- tests/instructions/developer_instruction_en.md | 2 +- tests/instructions/developer_instruction_ru.md | 2 +- tests/ints_dictionary.xml | 2 +- tests/strings_dictionary.xml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/decimals_dictionary.xml b/tests/decimals_dictionary.xml index a6661ee64e5..15f5b3800b3 120000 --- a/tests/decimals_dictionary.xml +++ b/tests/decimals_dictionary.xml @@ -1 +1 @@ -../../dbms/tests/config/decimals_dictionary.xml \ No newline at end of file +config/decimals_dictionary.xml \ No newline at end of file diff --git a/tests/instructions/developer_instruction_en.md b/tests/instructions/developer_instruction_en.md index 81ffff89507..7ce5ac9b690 120000 --- a/tests/instructions/developer_instruction_en.md +++ b/tests/instructions/developer_instruction_en.md @@ -1 +1 @@ -../../../docs/en/development/developer_instruction.md \ No newline at end of file +../../docs/en/development/developer_instruction.md \ No newline at end of file diff --git a/tests/instructions/developer_instruction_ru.md b/tests/instructions/developer_instruction_ru.md index 9f912ebfec9..3beb31f0d28 120000 --- a/tests/instructions/developer_instruction_ru.md +++ b/tests/instructions/developer_instruction_ru.md @@ -1 +1 @@ -../../../docs/ru/development/developer_instruction.md \ No newline at end of file +../../docs/ru/development/developer_instruction.md \ No newline at end of file diff --git a/tests/ints_dictionary.xml b/tests/ints_dictionary.xml index bdbf0690125..1daa76b8267 120000 --- a/tests/ints_dictionary.xml +++ b/tests/ints_dictionary.xml @@ -1 +1 @@ -../../dbms/tests/config/ints_dictionary.xml \ No newline at end of file +config/ints_dictionary.xml \ No newline at end of file diff --git a/tests/strings_dictionary.xml b/tests/strings_dictionary.xml index 603d99ef4e8..be66c1da224 120000 --- a/tests/strings_dictionary.xml +++ b/tests/strings_dictionary.xml @@ -1 +1 @@ -../../dbms/tests/config/strings_dictionary.xml \ No newline at end of file +config/strings_dictionary.xml \ No newline at end of file From d0b90abeee32c73a930585ca3ef93cf242935af8 Mon Sep 17 00:00:00 2001 From: Vladimir Goncharov Date: Thu, 2 Apr 2020 12:30:52 +0300 Subject: [PATCH 050/484] =?UTF-8?q?path=20=D0=B4=D0=BE=D0=BB=D0=B6=D0=B5?= =?UTF-8?q?=D0=BD=20=D0=B1=D1=8B=D1=82=D1=8C=20=D1=82=D0=B5=D1=80=D0=BC?= =?UTF-8?q?=D0=B8=D0=BD=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=20`/`.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/ru/operations/table_engines/mergetree.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ru/operations/table_engines/mergetree.md b/docs/ru/operations/table_engines/mergetree.md index 764472605ba..f8bbc983b74 100644 --- a/docs/ru/operations/table_engines/mergetree.md +++ b/docs/ru/operations/table_engines/mergetree.md @@ -494,14 +494,14 @@ ALTER TABLE example_table - /mnt/fast_ssd/clickhouse + /mnt/fast_ssd/clickhouse/ - /mnt/hdd1/clickhouse + /mnt/hdd1/clickhouse/ 10485760 - /mnt/hdd2/clickhouse + /mnt/hdd2/clickhouse/ 10485760 From 93907f09856e9de72ad6cc7e33b016aad1353f7f Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 13:09:44 +0300 Subject: [PATCH 051/484] Fix reference and move files --- .../queries/0_stateless/01213_alter_rename_column.reference | 0 .../queries/0_stateless/01213_alter_rename_column.sql | 0 .../0_stateless/01213_alter_rename_column_zookeeper.reference | 4 ++-- .../0_stateless/01213_alter_rename_column_zookeeper.sql | 0 .../queries/0_stateless/01213_alter_rename_nested.reference | 0 .../queries/0_stateless/01213_alter_rename_nested.sql | 0 .../01213_alter_rename_primary_key_zookeeper.reference | 0 .../0_stateless/01213_alter_rename_primary_key_zookeeper.sql | 0 .../0_stateless/01213_alter_table_rename_nested.reference | 0 .../queries/0_stateless/01213_alter_table_rename_nested.sql | 0 10 files changed, 2 insertions(+), 2 deletions(-) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_column.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_column.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_column_zookeeper.reference (70%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_column_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_nested.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_table_rename_nested.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01213_alter_table_rename_nested.sql (100%) diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.reference b/tests/queries/0_stateless/01213_alter_rename_column.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_column.reference rename to tests/queries/0_stateless/01213_alter_rename_column.reference diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column.sql b/tests/queries/0_stateless/01213_alter_rename_column.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_column.sql rename to tests/queries/0_stateless/01213_alter_rename_column.sql diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference similarity index 70% rename from dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference rename to tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference index a6c50f985c2..36be6811171 100644 --- a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -1,6 +1,6 @@ 1 -CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicaed\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicaed\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 1 date key renamed_value1 value2 value3 2019-10-02 1 1 1 1 diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql rename to tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference b/tests/queries/0_stateless/01213_alter_rename_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_nested.reference rename to tests/queries/0_stateless/01213_alter_rename_nested.reference diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql b/tests/queries/0_stateless/01213_alter_rename_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_nested.sql rename to tests/queries/0_stateless/01213_alter_rename_nested.sql diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference rename to tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference diff --git a/dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql rename to tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql diff --git a/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.reference rename to tests/queries/0_stateless/01213_alter_table_rename_nested.reference diff --git a/dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql b/tests/queries/0_stateless/01213_alter_table_rename_nested.sql similarity index 100% rename from dbms/tests/queries/0_stateless/01213_alter_table_rename_nested.sql rename to tests/queries/0_stateless/01213_alter_table_rename_nested.sql From 3cacef6b4478860639728b7d5cd64d16ea394bcc Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 2 Apr 2020 13:53:13 +0300 Subject: [PATCH 052/484] Fixed final garbage token Also, added getTrailingZeroBitsUnsafe(), updated comments and cleaned up test cases --- dbms/src/Common/BitHelpers.h | 15 +++++--- .../MergeTree/MergeTreeIndexFullText.cpp | 34 ++++++++++++------- .../tests/gtest_SplitTokenExtractor.cpp | 24 +++++++++---- 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/dbms/src/Common/BitHelpers.h b/dbms/src/Common/BitHelpers.h index ba6a4c60a49..bc6d7413def 100644 --- a/dbms/src/Common/BitHelpers.h +++ b/dbms/src/Common/BitHelpers.h @@ -53,12 +53,10 @@ inline size_t getLeadingZeroBits(T x) } } +// Unsafe since __builtin_ctz()-family explicitly state that result is undefined on x == 0 template -inline size_t getTrailingZeroBits(T x) +inline size_t getTrailingZeroBitsUnsafe(T x) { - if (!x) - return sizeof(x) * 8; - if constexpr (sizeof(T) <= sizeof(unsigned int)) { return __builtin_ctz(x); @@ -73,6 +71,15 @@ inline size_t getTrailingZeroBits(T x) } } +template +inline size_t getTrailingZeroBits(T x) +{ + if (!x) + return sizeof(x) * 8; + + return getTrailingZeroBitsUnsafe(x); +} + /** Returns a mask that has '1' for `bits` LSB set: * maskLowBits(3) => 00000111 */ diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 68d67a0c787..af979010dc0 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -614,7 +614,7 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size while (*pos < len) { #if defined(__SSE2__) - // NOTE: we assume that `data` string is padded from the right with 15 zero-bytes. + // NOTE: we assume that `data` string is padded from the right with 15 bytes. const __m128i haystack = _mm_loadu_si128(reinterpret_cast(data + *pos)); const size_t haystack_length = 16; @@ -632,40 +632,40 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size static const auto alpha_lower_end = _mm_set1_epi8('z' + 1); static const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); static const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); - static const auto zero = _mm_set1_epi8(0); + static const auto zero = _mm_set1_epi8(0); // every bit represents if `haystack` character `c` statisfies condition: // (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) - // < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and hence all chars > 0x80 are negative. + // < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and so all chars > 0x80 are negative. const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128( _mm_cmplt_epi8(haystack, zero), _mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end))), _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))), _mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_upper_begin), _mm_cmplt_epi8(haystack, alpha_upper_end)))); #endif - // NOTE: __builtin_ctz family explicitly state that result is UNDEFINED if argument is 0 if (result_bitmask == 0) { - // end of token started on previous haystack if (*token_len != 0) + // end of token started on previous haystack return true; *pos += haystack_length; continue; } - const auto start = getTrailingZeroBits(result_bitmask); + const auto token_start_pos_in_current_haystack = getTrailingZeroBitsUnsafe(result_bitmask); if (*token_len == 0) - *token_start = *pos + start; - else if (start != 0) - // token is not continued in this haystack + // new token + *token_start = *pos + token_start_pos_in_current_haystack; + else if (token_start_pos_in_current_haystack != 0) + // end of token starting in one of previous haystacks return true; - const auto l = getTrailingZeroBits(~(result_bitmask >> start)); - *token_len += l; + const auto token_bytes_in_current_haystack = getTrailingZeroBitsUnsafe(~(result_bitmask >> token_start_pos_in_current_haystack)); + *token_len += token_bytes_in_current_haystack; - *pos += start + l; - if (start + l == haystack_length) + *pos += token_start_pos_in_current_haystack + token_bytes_in_current_haystack; + if (token_start_pos_in_current_haystack + token_bytes_in_current_haystack == haystack_length) // check if there are leftovers in next `haystack` continue; @@ -686,6 +686,14 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size } #endif } + +#if defined(__SSE2__) + // Could happen only if string is not padded with zeroes, and we accidentally hopped over end of data. + if (*token_start > len) + return false; + *token_len = len - *token_start; +#endif + return *token_len > 0; } diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp index 2cd20a70821..b8686f962bc 100644 --- a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -55,13 +55,16 @@ TEST_P(SplitTokenExtractorTest, next) for (const auto & expected_token : param.tokens) { SCOPED_TRACE(++i); - EXPECT_TRUE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); + ASSERT_TRUE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); EXPECT_EQ(expected_token, param.source.substr(token_start, token_len)) << " token_start:" << token_start << " token_len: " << token_len; } + + ASSERT_FALSE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); } -#define BINARY_STRING(str) std::string{str, sizeof(str)-1} +// Helper to allow strings with embedded '\0' chars. +#define BINARY_STRING(str) std::string{str, sizeof(str) - 1} INSTANTIATE_TEST_SUITE_P(ShortSingleToken, SplitTokenExtractorTest, @@ -89,13 +92,13 @@ INSTANTIATE_TEST_SUITE_P(UTF8, ::testing::ValuesIn(std::initializer_list{ { "Single token with mixed ASCII and UTF-8 chars", - BINARY_STRING("abc\u0442" "123\u0447XYZ\u043A"), + "abc\u0442" "123\u0447XYZ\u043A", {"abc\u0442" "123\u0447XYZ\u043A"} }, { "Multiple UTF-8 tokens", - BINARY_STRING("\u043F\u0440\u0438\u0432\u0435\u0442, u043C\u0438\u0440!"), - {"\u043F\u0440\u0438\u0432\u0435\u0442", "u043C\u0438\u0440"} + "\u043F\u0440\u0438\u0432\u0435\u0442, \u043C\u0438\u0440!", + {"\u043F\u0440\u0438\u0432\u0435\u0442", "\u043C\u0438\u0440"} }, }) ); @@ -105,7 +108,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleTokens, ::testing::ValuesIn(std::initializer_list{ { "Multiple tokens separated by whitespace", - BINARY_STRING("\nabc 123\tXYZ\r"), + "\nabc 123\tXYZ\r", { "abc", "123", "XYZ" } @@ -130,7 +133,14 @@ INSTANTIATE_TEST_SUITE_P(MultipleTokens, "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"), { - "0123456789", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz" + "0123456789", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c" + "\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1" + "\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6" + "\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb" + "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" + "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" } } }) From 266cfc52f76b9fd98206b330e2c1b82fbf39cb1c Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Thu, 2 Apr 2020 14:44:09 +0300 Subject: [PATCH 053/484] Fix splitted build on master --- dbms/Functions/FunctionsStringSearch.cpp | 709 ----------------------- 1 file changed, 709 deletions(-) delete mode 100644 dbms/Functions/FunctionsStringSearch.cpp diff --git a/dbms/Functions/FunctionsStringSearch.cpp b/dbms/Functions/FunctionsStringSearch.cpp deleted file mode 100644 index a7e4cf4f8b1..00000000000 --- a/dbms/Functions/FunctionsStringSearch.cpp +++ /dev/null @@ -1,709 +0,0 @@ -#include "FunctionsStringSearch.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ -namespace ErrorCodes -{ - extern const int ILLEGAL_COLUMN; -} -/** Implementation details for functions of 'position' family depending on ASCII/UTF8 and case sensitiveness. - */ -struct PositionCaseSensitiveASCII -{ - /// For searching single substring inside big-enough contiguous chunk of data. Coluld have slightly expensive initialization. - using SearcherInBigHaystack = Volnitsky; - - /// For search many substrings in one string - using MultiSearcherInBigHaystack = MultiVolnitsky; - - /// For searching single substring, that is different each time. This object is created for each row of data. It must have cheap initialization. - using SearcherInSmallHaystack = LibCASCIICaseSensitiveStringSearcher; - - static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) - { - return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); - } - - static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) - { - return SearcherInSmallHaystack(needle_data, needle_size); - } - - static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) - { - return MultiSearcherInBigHaystack(needles); - } - - /// Number of code points between 'begin' and 'end' (this has different behaviour for ASCII and UTF-8). - static size_t countChars(const char * begin, const char * end) { return end - begin; } - - /// Convert string to lowercase. Only for case-insensitive search. - /// Implementation is permitted to be inefficient because it is called for single string. - static void toLowerIfNeed(std::string &) { } -}; - -struct PositionCaseInsensitiveASCII -{ - /// `Volnitsky` is not used here, because one person has measured that this is better. It will be good if you question it. - using SearcherInBigHaystack = ASCIICaseInsensitiveStringSearcher; - using MultiSearcherInBigHaystack = MultiVolnitskyCaseInsensitive; - using SearcherInSmallHaystack = LibCASCIICaseInsensitiveStringSearcher; - - static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t /*haystack_size_hint*/) - { - return SearcherInBigHaystack(needle_data, needle_size); - } - - static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) - { - return SearcherInSmallHaystack(needle_data, needle_size); - } - - static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) - { - return MultiSearcherInBigHaystack(needles); - } - - static size_t countChars(const char * begin, const char * end) { return end - begin; } - - static void toLowerIfNeed(std::string & s) { std::transform(std::begin(s), std::end(s), std::begin(s), tolower); } -}; - -struct PositionCaseSensitiveUTF8 -{ - using SearcherInBigHaystack = VolnitskyUTF8; - using MultiSearcherInBigHaystack = MultiVolnitskyUTF8; - using SearcherInSmallHaystack = LibCASCIICaseSensitiveStringSearcher; - - static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) - { - return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); - } - - static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) - { - return SearcherInSmallHaystack(needle_data, needle_size); - } - - static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) - { - return MultiSearcherInBigHaystack(needles); - } - - static size_t countChars(const char * begin, const char * end) - { - size_t res = 0; - for (auto it = begin; it != end; ++it) - if (!UTF8::isContinuationOctet(static_cast(*it))) - ++res; - return res; - } - - static void toLowerIfNeed(std::string &) { } -}; - -struct PositionCaseInsensitiveUTF8 -{ - using SearcherInBigHaystack = VolnitskyCaseInsensitiveUTF8; - using MultiSearcherInBigHaystack = MultiVolnitskyCaseInsensitiveUTF8; - using SearcherInSmallHaystack = UTF8CaseInsensitiveStringSearcher; /// TODO Very suboptimal. - - static SearcherInBigHaystack createSearcherInBigHaystack(const char * needle_data, size_t needle_size, size_t haystack_size_hint) - { - return SearcherInBigHaystack(needle_data, needle_size, haystack_size_hint); - } - - static SearcherInSmallHaystack createSearcherInSmallHaystack(const char * needle_data, size_t needle_size) - { - return SearcherInSmallHaystack(needle_data, needle_size); - } - - static MultiSearcherInBigHaystack createMultiSearcherInBigHaystack(const std::vector & needles) - { - return MultiSearcherInBigHaystack(needles); - } - - static size_t countChars(const char * begin, const char * end) - { - size_t res = 0; - for (auto it = begin; it != end; ++it) - if (!UTF8::isContinuationOctet(static_cast(*it))) - ++res; - return res; - } - - static void toLowerIfNeed(std::string & s) { Poco::UTF8::toLowerInPlace(s); } -}; - -template -struct PositionImpl -{ - static constexpr bool use_default_implementation_for_constants = false; - - using ResultType = UInt64; - - /// Find one substring in many strings. - static void vectorConstant( - const ColumnString::Chars & data, const ColumnString::Offsets & offsets, const std::string & needle, PaddedPODArray & res) - { - const UInt8 * begin = data.data(); - const UInt8 * pos = begin; - const UInt8 * end = pos + data.size(); - - /// Current index in the array of strings. - size_t i = 0; - - typename Impl::SearcherInBigHaystack searcher = Impl::createSearcherInBigHaystack(needle.data(), needle.size(), end - pos); - - /// We will search for the next occurrence in all strings at once. - while (pos < end && end != (pos = searcher.search(pos, end - pos))) - { - /// Determine which index it refers to. - while (begin + offsets[i] <= pos) - { - res[i] = 0; - ++i; - } - - /// We check that the entry does not pass through the boundaries of strings. - if (pos + needle.size() < begin + offsets[i]) - res[i] = 1 + Impl::countChars(reinterpret_cast(begin + offsets[i - 1]), reinterpret_cast(pos)); - else - res[i] = 0; - - pos = begin + offsets[i]; - ++i; - } - - if (i < res.size()) - memset(&res[i], 0, (res.size() - i) * sizeof(res[0])); - } - - /// Search for substring in string. - static void constantConstant(std::string data, std::string needle, UInt64 & res) - { - Impl::toLowerIfNeed(data); - Impl::toLowerIfNeed(needle); - - res = data.find(needle); - if (res == std::string::npos) - res = 0; - else - res = 1 + Impl::countChars(data.data(), data.data() + res); - } - - /// Search each time for a different single substring inside each time different string. - static void vectorVector( - const ColumnString::Chars & haystack_data, - const ColumnString::Offsets & haystack_offsets, - const ColumnString::Chars & needle_data, - const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) - { - ColumnString::Offset prev_haystack_offset = 0; - ColumnString::Offset prev_needle_offset = 0; - - size_t size = haystack_offsets.size(); - - for (size_t i = 0; i < size; ++i) - { - size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; - size_t haystack_size = haystack_offsets[i] - prev_haystack_offset - 1; - - if (0 == needle_size) - { - /// An empty string is always at the very beginning of `haystack`. - res[i] = 1; - } - else - { - /// It is assumed that the StringSearcher is not very difficult to initialize. - typename Impl::SearcherInSmallHaystack searcher = Impl::createSearcherInSmallHaystack( - reinterpret_cast(&needle_data[prev_needle_offset]), - needle_offsets[i] - prev_needle_offset - 1); /// zero byte at the end - - /// searcher returns a pointer to the found substring or to the end of `haystack`. - size_t pos = searcher.search(&haystack_data[prev_haystack_offset], &haystack_data[haystack_offsets[i] - 1]) - - &haystack_data[prev_haystack_offset]; - - if (pos != haystack_size) - { - res[i] = 1 - + Impl::countChars( - reinterpret_cast(&haystack_data[prev_haystack_offset]), - reinterpret_cast(&haystack_data[prev_haystack_offset + pos])); - } - else - res[i] = 0; - } - - prev_haystack_offset = haystack_offsets[i]; - prev_needle_offset = needle_offsets[i]; - } - } - - /// Find many substrings in single string. - static void constantVector( - const String & haystack, - const ColumnString::Chars & needle_data, - const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) - { - // NOTE You could use haystack indexing. But this is a rare case. - - ColumnString::Offset prev_needle_offset = 0; - - size_t size = needle_offsets.size(); - - for (size_t i = 0; i < size; ++i) - { - size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; - - if (0 == needle_size) - { - res[i] = 1; - } - else - { - typename Impl::SearcherInSmallHaystack searcher = Impl::createSearcherInSmallHaystack( - reinterpret_cast(&needle_data[prev_needle_offset]), needle_offsets[i] - prev_needle_offset - 1); - - size_t pos = searcher.search( - reinterpret_cast(haystack.data()), - reinterpret_cast(haystack.data()) + haystack.size()) - - reinterpret_cast(haystack.data()); - - if (pos != haystack.size()) - { - res[i] = 1 + Impl::countChars(haystack.data(), haystack.data() + pos); - } - else - res[i] = 0; - } - - prev_needle_offset = needle_offsets[i]; - } - } - - template - static void vectorFixedConstant(Args &&...) - { - throw Exception("Functions 'position' don't support FixedString haystack argument", ErrorCodes::ILLEGAL_COLUMN); - } -}; - -template -struct MultiSearchAllPositionsImpl -{ - using ResultType = UInt64; - - static void vectorConstant( - const ColumnString::Chars & haystack_data, - const ColumnString::Offsets & haystack_offsets, - const std::vector & needles, - PaddedPODArray & res) - { - auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 - { - return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); - }; - - auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - - const size_t haystack_string_size = haystack_offsets.size(); - const size_t needles_size = needles.size(); - - /// Something can be uninitialized after the search itself - std::fill(res.begin(), res.end(), 0); - - while (searcher.hasMoreToSearch()) - { - size_t prev_offset = 0; - for (size_t j = 0, from = 0; j < haystack_string_size; ++j, from += needles_size) - { - const auto * haystack = &haystack_data[prev_offset]; - const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; - searcher.searchOneAll(haystack, haystack_end, res.data() + from, res_callback); - prev_offset = haystack_offsets[j]; - } - } - } -}; - -template -struct MultiSearchImpl -{ - using ResultType = UInt8; - static constexpr bool is_using_hyperscan = false; - /// Variable for understanding, if we used offsets for the output, most - /// likely to determine whether the function returns ColumnVector of ColumnArray. - static constexpr bool is_column_array = false; - static auto getReturnType() { return std::make_shared>(); } - - static void vectorConstant( - const ColumnString::Chars & haystack_data, - const ColumnString::Offsets & haystack_offsets, - const std::vector & needles, - PaddedPODArray & res, - [[maybe_unused]] PaddedPODArray & offsets) - { - auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_string_size = haystack_offsets.size(); - res.resize(haystack_string_size); - size_t iteration = 0; - while (searcher.hasMoreToSearch()) - { - size_t prev_offset = 0; - for (size_t j = 0; j < haystack_string_size; ++j) - { - const auto * haystack = &haystack_data[prev_offset]; - const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; - if (iteration == 0 || !res[j]) - res[j] = searcher.searchOne(haystack, haystack_end); - prev_offset = haystack_offsets[j]; - } - ++iteration; - } - } -}; - -template -struct MultiSearchFirstPositionImpl -{ - using ResultType = UInt64; - static constexpr bool is_using_hyperscan = false; - /// Variable for understanding, if we used offsets for the output, most - /// likely to determine whether the function returns ColumnVector of ColumnArray. - static constexpr bool is_column_array = false; - static auto getReturnType() { return std::make_shared>(); } - - static void vectorConstant( - const ColumnString::Chars & haystack_data, - const ColumnString::Offsets & haystack_offsets, - const std::vector & needles, - PaddedPODArray & res, - [[maybe_unused]] PaddedPODArray & offsets) - { - auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 - { - return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); - }; - auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_string_size = haystack_offsets.size(); - res.resize(haystack_string_size); - size_t iteration = 0; - while (searcher.hasMoreToSearch()) - { - size_t prev_offset = 0; - for (size_t j = 0; j < haystack_string_size; ++j) - { - const auto * haystack = &haystack_data[prev_offset]; - const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; - if (iteration == 0 || res[j] == 0) - res[j] = searcher.searchOneFirstPosition(haystack, haystack_end, res_callback); - else - { - UInt64 result = searcher.searchOneFirstPosition(haystack, haystack_end, res_callback); - if (result != 0) - res[j] = std::min(result, res[j]); - } - prev_offset = haystack_offsets[j]; - } - ++iteration; - } - } -}; - -template -struct MultiSearchFirstIndexImpl -{ - using ResultType = UInt64; - static constexpr bool is_using_hyperscan = false; - /// Variable for understanding, if we used offsets for the output, most - /// likely to determine whether the function returns ColumnVector of ColumnArray. - static constexpr bool is_column_array = false; - static auto getReturnType() { return std::make_shared>(); } - - static void vectorConstant( - const ColumnString::Chars & haystack_data, - const ColumnString::Offsets & haystack_offsets, - const std::vector & needles, - PaddedPODArray & res, - [[maybe_unused]] PaddedPODArray & offsets) - { - auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_string_size = haystack_offsets.size(); - res.resize(haystack_string_size); - size_t iteration = 0; - while (searcher.hasMoreToSearch()) - { - size_t prev_offset = 0; - for (size_t j = 0; j < haystack_string_size; ++j) - { - const auto * haystack = &haystack_data[prev_offset]; - const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; - /// hasMoreToSearch traverse needles in increasing order - if (iteration == 0 || res[j] == 0) - res[j] = searcher.searchOneFirstIndex(haystack, haystack_end); - prev_offset = haystack_offsets[j]; - } - ++iteration; - } - } -}; - -/** Token search the string, means that needle must be surrounded by some separator chars, like whitespace or puctuation. - */ -template -struct HasTokenImpl -{ - using ResultType = UInt8; - - static constexpr bool use_default_implementation_for_constants = true; - - static void vectorConstant( - const ColumnString::Chars & data, const ColumnString::Offsets & offsets, const std::string & pattern, PaddedPODArray & res) - { - if (offsets.empty()) - return; - - const UInt8 * begin = data.data(); - const UInt8 * pos = begin; - const UInt8 * end = pos + data.size(); - - /// The current index in the array of strings. - size_t i = 0; - - TokenSearcher searcher(pattern.data(), pattern.size(), end - pos); - - /// We will search for the next occurrence in all rows at once. - while (pos < end && end != (pos = searcher.search(pos, end - pos))) - { - /// Let's determine which index it refers to. - while (begin + offsets[i] <= pos) - { - res[i] = negate_result; - ++i; - } - - /// We check that the entry does not pass through the boundaries of strings. - if (pos + pattern.size() < begin + offsets[i]) - res[i] = !negate_result; - else - res[i] = negate_result; - - pos = begin + offsets[i]; - ++i; - } - - /// Tail, in which there can be no substring. - if (i < res.size()) - memset(&res[i], negate_result, (res.size() - i) * sizeof(res[0])); - } - - template - static void vectorVector(Args &&...) - { - throw Exception("Function 'hasToken' does not support non-constant needle argument", ErrorCodes::ILLEGAL_COLUMN); - } - - /// Search different needles in single haystack. - template - static void constantVector(Args &&...) - { - throw Exception("Function 'hasToken' does not support non-constant needle argument", ErrorCodes::ILLEGAL_COLUMN); - } - - template - static void vectorFixedConstant(Args &&...) - { - throw Exception("Functions 'hasToken' don't support FixedString haystack argument", ErrorCodes::ILLEGAL_COLUMN); - } -}; - - -struct NamePosition -{ - static constexpr auto name = "position"; -}; -struct NamePositionUTF8 -{ - static constexpr auto name = "positionUTF8"; -}; -struct NamePositionCaseInsensitive -{ - static constexpr auto name = "positionCaseInsensitive"; -}; -struct NamePositionCaseInsensitiveUTF8 -{ - static constexpr auto name = "positionCaseInsensitiveUTF8"; -}; -struct NameMultiSearchAllPositions -{ - static constexpr auto name = "multiSearchAllPositions"; -}; -struct NameMultiSearchAllPositionsUTF8 -{ - static constexpr auto name = "multiSearchAllPositionsUTF8"; -}; -struct NameMultiSearchAllPositionsCaseInsensitive -{ - static constexpr auto name = "multiSearchAllPositionsCaseInsensitive"; -}; -struct NameMultiSearchAllPositionsCaseInsensitiveUTF8 -{ - static constexpr auto name = "multiSearchAllPositionsCaseInsensitiveUTF8"; -}; -struct NameMultiSearchAny -{ - static constexpr auto name = "multiSearchAny"; -}; -struct NameMultiSearchAnyUTF8 -{ - static constexpr auto name = "multiSearchAnyUTF8"; -}; -struct NameMultiSearchAnyCaseInsensitive -{ - static constexpr auto name = "multiSearchAnyCaseInsensitive"; -}; -struct NameMultiSearchAnyCaseInsensitiveUTF8 -{ - static constexpr auto name = "multiSearchAnyCaseInsensitiveUTF8"; -}; -struct NameMultiSearchFirstIndex -{ - static constexpr auto name = "multiSearchFirstIndex"; -}; -struct NameMultiSearchFirstIndexUTF8 -{ - static constexpr auto name = "multiSearchFirstIndexUTF8"; -}; -struct NameMultiSearchFirstIndexCaseInsensitive -{ - static constexpr auto name = "multiSearchFirstIndexCaseInsensitive"; -}; -struct NameMultiSearchFirstIndexCaseInsensitiveUTF8 -{ - static constexpr auto name = "multiSearchFirstIndexCaseInsensitiveUTF8"; -}; -struct NameMultiSearchFirstPosition -{ - static constexpr auto name = "multiSearchFirstPosition"; -}; -struct NameMultiSearchFirstPositionUTF8 -{ - static constexpr auto name = "multiSearchFirstPositionUTF8"; -}; -struct NameMultiSearchFirstPositionCaseInsensitive -{ - static constexpr auto name = "multiSearchFirstPositionCaseInsensitive"; -}; -struct NameMultiSearchFirstPositionCaseInsensitiveUTF8 -{ - static constexpr auto name = "multiSearchFirstPositionCaseInsensitiveUTF8"; -}; - -struct NameHasToken -{ - static constexpr auto name = "hasToken"; -}; - -struct NameHasTokenCaseInsensitive -{ - static constexpr auto name = "hasTokenCaseInsensitive"; -}; - - -using FunctionPosition = FunctionsStringSearch, NamePosition>; -using FunctionPositionUTF8 = FunctionsStringSearch, NamePositionUTF8>; -using FunctionPositionCaseInsensitive = FunctionsStringSearch, NamePositionCaseInsensitive>; -using FunctionPositionCaseInsensitiveUTF8 - = FunctionsStringSearch, NamePositionCaseInsensitiveUTF8>; - -using FunctionMultiSearchAllPositions - = FunctionsMultiStringPosition, NameMultiSearchAllPositions>; -using FunctionMultiSearchAllPositionsUTF8 - = FunctionsMultiStringPosition, NameMultiSearchAllPositionsUTF8>; -using FunctionMultiSearchAllPositionsCaseInsensitive - = FunctionsMultiStringPosition, NameMultiSearchAllPositionsCaseInsensitive>; -using FunctionMultiSearchAllPositionsCaseInsensitiveUTF8 = FunctionsMultiStringPosition< - MultiSearchAllPositionsImpl, - NameMultiSearchAllPositionsCaseInsensitiveUTF8>; - -using FunctionMultiSearch = FunctionsMultiStringSearch, NameMultiSearchAny>; -using FunctionMultiSearchUTF8 = FunctionsMultiStringSearch, NameMultiSearchAnyUTF8>; -using FunctionMultiSearchCaseInsensitive - = FunctionsMultiStringSearch, NameMultiSearchAnyCaseInsensitive>; -using FunctionMultiSearchCaseInsensitiveUTF8 - = FunctionsMultiStringSearch, NameMultiSearchAnyCaseInsensitiveUTF8>; - -using FunctionMultiSearchFirstIndex - = FunctionsMultiStringSearch, NameMultiSearchFirstIndex>; -using FunctionMultiSearchFirstIndexUTF8 - = FunctionsMultiStringSearch, NameMultiSearchFirstIndexUTF8>; -using FunctionMultiSearchFirstIndexCaseInsensitive - = FunctionsMultiStringSearch, NameMultiSearchFirstIndexCaseInsensitive>; -using FunctionMultiSearchFirstIndexCaseInsensitiveUTF8 - = FunctionsMultiStringSearch, NameMultiSearchFirstIndexCaseInsensitiveUTF8>; - -using FunctionMultiSearchFirstPosition - = FunctionsMultiStringSearch, NameMultiSearchFirstPosition>; -using FunctionMultiSearchFirstPositionUTF8 - = FunctionsMultiStringSearch, NameMultiSearchFirstPositionUTF8>; -using FunctionMultiSearchFirstPositionCaseInsensitive - = FunctionsMultiStringSearch, NameMultiSearchFirstPositionCaseInsensitive>; -using FunctionMultiSearchFirstPositionCaseInsensitiveUTF8 = FunctionsMultiStringSearch< - MultiSearchFirstPositionImpl, - NameMultiSearchFirstPositionCaseInsensitiveUTF8>; - -using FunctionHasToken = FunctionsStringSearch, NameHasToken>; -using FunctionHasTokenCaseInsensitive - = FunctionsStringSearch, NameHasTokenCaseInsensitive>; - -void registerFunctionsStringSearch(FunctionFactory & factory) -{ - factory.registerFunction(FunctionFactory::CaseInsensitive); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - - factory.registerFunction(); - factory.registerFunction(); - - factory.registerAlias("locate", NamePosition::name, FunctionFactory::CaseInsensitive); -} -} From 1ff7c2ac432c4d5414303012ab7b2d754676bd7e Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 1 Apr 2020 23:16:50 +0300 Subject: [PATCH 054/484] Fix parallel distributed INSERT SELECT for remote table. --- dbms/Interpreters/InterpreterInsertQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbms/Interpreters/InterpreterInsertQuery.cpp b/dbms/Interpreters/InterpreterInsertQuery.cpp index b4280ee20e6..39b99b10c0d 100644 --- a/dbms/Interpreters/InterpreterInsertQuery.cpp +++ b/dbms/Interpreters/InterpreterInsertQuery.cpp @@ -176,7 +176,7 @@ BlockIO InterpreterInsertQuery::execute() "Expected exactly one connection for shard " + toString(shard_info.shard_num), ErrorCodes::LOGICAL_ERROR); /// INSERT SELECT query returns empty block - auto in_stream = std::make_shared(*connections.front(), new_query_str, Block{}, context); + auto in_stream = std::make_shared(std::move(connections), new_query_str, Block{}, context); in_streams.push_back(in_stream); } out_streams.push_back(std::make_shared(Block())); From 5b133dd1ce7dcdd457a50219f4f76947933bd698 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 19:11:10 +0300 Subject: [PATCH 055/484] Add default columns alter --- .../Interpreters/RenameColumnVisitor.cpp | 0 .../Interpreters/RenameColumnVisitor.h | 0 dbms/Storages/AlterCommands.cpp | 14 +++++ dbms/Storages/MergeTree/MergeTreeData.cpp | 13 ++-- dbms/Storages/MergeTree/MergeTreeData.h | 5 +- dbms/Storages/StorageMergeTree.cpp | 2 +- dbms/Storages/StorageReplicatedMergeTree.cpp | 2 +- ...er_rename_with_default_zookeeper.reference | 17 +++++ ...13_alter_rename_with_default_zookeeper.sql | 63 +++++++++++++++++++ 9 files changed, 107 insertions(+), 9 deletions(-) rename dbms/{src => }/Interpreters/RenameColumnVisitor.cpp (100%) rename dbms/{src => }/Interpreters/RenameColumnVisitor.h (100%) create mode 100644 tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference create mode 100644 tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql diff --git a/dbms/src/Interpreters/RenameColumnVisitor.cpp b/dbms/Interpreters/RenameColumnVisitor.cpp similarity index 100% rename from dbms/src/Interpreters/RenameColumnVisitor.cpp rename to dbms/Interpreters/RenameColumnVisitor.cpp diff --git a/dbms/src/Interpreters/RenameColumnVisitor.h b/dbms/Interpreters/RenameColumnVisitor.h similarity index 100% rename from dbms/src/Interpreters/RenameColumnVisitor.h rename to dbms/Interpreters/RenameColumnVisitor.h diff --git a/dbms/Storages/AlterCommands.cpp b/dbms/Storages/AlterCommands.cpp index 1b135563095..16585190701 100644 --- a/dbms/Storages/AlterCommands.cpp +++ b/dbms/Storages/AlterCommands.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -451,6 +452,19 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const else if (type == RENAME_COLUMN) { metadata.columns.rename(column_name, rename_to); + RenameColumnData rename_data{column_name, rename_to}; + RenameColumnVisitor rename_visitor(rename_data); + for (auto & column : metadata.columns) + { + metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify) { + if (column_to_modify.default_desc.expression) + rename_visitor.visit(column_to_modify.default_desc.expression); + if (column_to_modify.ttl) + rename_visitor.visit(column_to_modify.ttl); + }); + } + if (metadata.ttl_for_table_ast) + rename_visitor.visit(metadata.ttl_for_table_ast); } else throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/Storages/MergeTree/MergeTreeData.cpp b/dbms/Storages/MergeTree/MergeTreeData.cpp index ab99c955701..bf0eedc4264 100644 --- a/dbms/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/Storages/MergeTree/MergeTreeData.cpp @@ -192,7 +192,7 @@ MergeTreeData::MergeTreeData( min_format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING; } - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast); /// format_file always contained on any data path PathWithDisk version_file; @@ -610,14 +610,17 @@ void checkTTLExpression(const ExpressionActionsPtr & ttl_expression, const Strin } -void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls, +void MergeTreeData::setTTLExpressions(const ColumnsDescription & columns, const ASTPtr & new_ttl_table_ast, bool only_check) { - auto create_ttl_entry = [this](ASTPtr ttl_ast) + + auto new_column_ttls = columns.getColumnTTLs(); + + auto create_ttl_entry = [this, &columns](ASTPtr ttl_ast) { TTLEntry result; - auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, getColumns().getAllPhysical()); + auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, columns.getAllPhysical()); result.expression = ExpressionAnalyzer(ttl_ast, syntax_result, global_context).getActions(false); result.destination_type = PartDestinationType::DELETE; result.result_column = ttl_ast->getColumnName(); @@ -1500,7 +1503,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S setProperties(metadata, /* only_check = */ true); - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast, /* only_check = */ true); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast, /* only_check = */ true); if (settings_ast) { diff --git a/dbms/Storages/MergeTree/MergeTreeData.h b/dbms/Storages/MergeTree/MergeTreeData.h index 5a9a8a61376..120ebd2e467 100644 --- a/dbms/Storages/MergeTree/MergeTreeData.h +++ b/dbms/Storages/MergeTree/MergeTreeData.h @@ -868,8 +868,9 @@ protected: void initPartitionKey(); - void setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls, - const ASTPtr & new_ttl_table_ast, bool only_check = false); + void setTTLExpressions(const ColumnsDescription & columns, + const ASTPtr & new_ttl_table_ast, bool only_check = false); + void checkStoragePolicy(const StoragePolicyPtr & new_storage_policy); void setStoragePolicy(const String & new_storage_policy_name, bool only_check = false); diff --git a/dbms/Storages/StorageMergeTree.cpp b/dbms/Storages/StorageMergeTree.cpp index 059f5c671e9..a0ca6964cbf 100644 --- a/dbms/Storages/StorageMergeTree.cpp +++ b/dbms/Storages/StorageMergeTree.cpp @@ -237,7 +237,7 @@ void StorageMergeTree::alter( /// Reinitialize primary key because primary key column types might have changed. setProperties(metadata); - setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast); DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata); diff --git a/dbms/Storages/StorageReplicatedMergeTree.cpp b/dbms/Storages/StorageReplicatedMergeTree.cpp index c739070ef54..0d8755ed99a 100644 --- a/dbms/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/Storages/StorageReplicatedMergeTree.cpp @@ -496,7 +496,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column /// Even if the primary/sorting keys didn't change we must reinitialize it /// because primary key column types might have changed. setProperties(metadata); - setTTLExpressions(new_columns.getColumnTTLs(), metadata.ttl_for_table_ast); + setTTLExpressions(new_columns, metadata.ttl_for_table_ast); } diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference new file mode 100644 index 00000000000..06f136d6dbc --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference @@ -0,0 +1,17 @@ +date key value1 value2 +2019-10-02 1 1 Hello 1 +CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `value1` String, `value2` String DEFAULT concat(\'Hello \', value1), `value3` String ALIAS concat(\'Word \', value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +date key renamed_value1 value2 +2019-10-02 1 1 Hello 1 +CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String DEFAULT concat(\'Hello \', renamed_value1), `value3` String ALIAS concat(\'Word \', renamed_value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +Hello 1 +Word 1 +date1 date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl (`date1` Date, `date2` Date, `value1` String, `value2` String TTL date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 +renamed_date1 date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 +renamed_date1 renamed_date2 value1 value2 +2019-10-02 2018-10-02 1 1 +CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `renamed_date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL renamed_date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql new file mode 100644 index 00000000000..fa4c02aa58c --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS table_rename_with_default; + +CREATE TABLE table_rename_with_default +( + date Date, + key UInt64, + value1 String, + value2 String DEFAULT concat('Hello ', value1), + value3 String ALIAS concat('Word ', value1) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_rename_with_default (date, key, value1) SELECT toDate('2019-10-01') + number % 3, number, toString(number) from numbers(9); + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +ALTER TABLE table_rename_with_default RENAME COLUMN value1 TO renamed_value1; + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +SELECT value2 FROM table_rename_with_default WHERE key = 1; +SELECT value3 FROM table_rename_with_default WHERE key = 1; + +DROP TABLE IF EXISTS table_rename_with_default; + +DROP TABLE IF EXISTS table_rename_with_ttl; + +CREATE TABLE table_rename_with_ttl +( + date1 Date, + date2 Date, + value1 String, + value2 String TTL date1 + INTERVAL 10000 MONTH +) +ENGINE = ReplicatedMergeTree('/clickhouse/test/table_rename_with_ttl', '1') +ORDER BY tuple() +TTL date2 + INTERVAL 10000 MONTH; + +INSERT INTO table_rename_with_ttl SELECT toDate('2019-10-01') + number % 3, toDate('2018-10-01') + number % 3, toString(number), toString(number) from numbers(9); + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date2 TO renamed_date2; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +DROP TABLE IF EXISTS table_rename_with_ttl; From cd76ba3c199b8594f1149e0a92b46522def6273e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 2 Apr 2020 20:19:57 +0300 Subject: [PATCH 056/484] Support trailing zero bytes in string hash map. --- dbms/Common/HashTable/StringHashMap.h | 20 +++++++++++++------ dbms/Common/HashTable/StringHashTable.h | 12 ++++++++++- .../HashTable/TwoLevelStringHashTable.h | 12 ++++++++++- ...0rp10_string_hash_map_zero_bytes.reference | 1 + ...109_sc0rp10_string_hash_map_zero_bytes.sql | 15 ++++++++++++++ 5 files changed, 52 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference create mode 100644 tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql diff --git a/dbms/Common/HashTable/StringHashMap.h b/dbms/Common/HashTable/StringHashMap.h index 3ee59c89a36..fe4eab16092 100644 --- a/dbms/Common/HashTable/StringHashMap.h +++ b/dbms/Common/HashTable/StringHashMap.h @@ -25,9 +25,13 @@ struct StringHashMapCell : public HashMapCellvalue.first, state); } - // Assuming String does not contain zero bytes. NOTE: Cannot be used in serialized method - static bool isZero(const StringKey16 & key, const HashTableNoState & /*state*/) { return key.low == 0; } - void setZero() { this->value.first.low = 0; } + + // Zero means unoccupied cells in hash table. Use key with last word = 0 as + // zero keys, because such keys are unrepresentable (no way to encode length). + static bool isZero(const StringKey16 & key, const HashTableNoState &) + { return key.high == 0; } + void setZero() { this->value.first.high = 0; } + // external const StringRef getKey() const { return toStringRef(this->value.first); } // internal @@ -42,9 +46,13 @@ struct StringHashMapCell : public HashMapCellvalue.first, state); } - // Assuming String does not contain zero bytes. NOTE: Cannot be used in serialized method - static bool isZero(const StringKey24 & key, const HashTableNoState & /*state*/) { return key.a == 0; } - void setZero() { this->value.first.a = 0; } + + // Zero means unoccupied cells in hash table. Use key with last word = 0 as + // zero keys, because such keys are unrepresentable (no way to encode length). + static bool isZero(const StringKey24 & key, const HashTableNoState &) + { return key.c == 0; } + void setZero() { this->value.first.c = 0; } + // external const StringRef getKey() const { return toStringRef(this->value.first); } // internal diff --git a/dbms/Common/HashTable/StringHashTable.h b/dbms/Common/HashTable/StringHashTable.h index d80b26c6a7c..101327ed809 100644 --- a/dbms/Common/HashTable/StringHashTable.h +++ b/dbms/Common/HashTable/StringHashTable.h @@ -18,14 +18,17 @@ struct StringKey24 inline StringRef ALWAYS_INLINE toStringRef(const StringKey8 & n) { + assert(n != 0); return {reinterpret_cast(&n), 8ul - (__builtin_clzll(n) >> 3)}; } inline StringRef ALWAYS_INLINE toStringRef(const StringKey16 & n) { + assert(n.high != 0); return {reinterpret_cast(&n), 16ul - (__builtin_clzll(n.high) >> 3)}; } inline StringRef ALWAYS_INLINE toStringRef(const StringKey24 & n) { + assert(n.c != 0); return {reinterpret_cast(&n), 24ul - (__builtin_clzll(n.c) >> 3)}; } @@ -229,6 +232,7 @@ public: template static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) { + StringHashTableHash hash; const StringRef & x = keyHolderGetKey(key_holder); const size_t sz = x.size; if (sz == 0) @@ -237,6 +241,13 @@ public: return func(self.m0, VoidKey{}, 0); } + if (x.data[sz - 1] == 0) + { + // Strings with trailing zeros are not representable as fixed-size + // string keys. Put them to the generic table. + return func(self.ms, std::forward(key_holder), hash(x)); + } + const char * p = x.data; // pending bits that needs to be shifted out const char s = (-sz & 7) * 8; @@ -247,7 +258,6 @@ public: StringKey24 k24; UInt64 n[3]; }; - StringHashTableHash hash; switch ((sz - 1) >> 3) { case 0: // 1..8 bytes diff --git a/dbms/Common/HashTable/TwoLevelStringHashTable.h b/dbms/Common/HashTable/TwoLevelStringHashTable.h index 88241c6c5fe..93bbcb2835d 100644 --- a/dbms/Common/HashTable/TwoLevelStringHashTable.h +++ b/dbms/Common/HashTable/TwoLevelStringHashTable.h @@ -77,6 +77,7 @@ public: template static auto ALWAYS_INLINE dispatch(Self & self, KeyHolder && key_holder, Func && func) { + StringHashTableHash hash; const StringRef & x = keyHolderGetKey(key_holder); const size_t sz = x.size; if (sz == 0) @@ -85,6 +86,16 @@ public: return func(self.impls[0].m0, VoidKey{}, 0); } + if (x.data[x.size - 1] == 0) + { + // Strings with trailing zeros are not representable as fixed-size + // string keys. Put them to the generic table. + auto res = hash(x); + auto buck = getBucketFromHash(res); + return func(self.impls[buck].ms, std::forward(key_holder), + res); + } + const char * p = x.data; // pending bits that needs to be shifted out const char s = (-sz & 7) * 8; @@ -95,7 +106,6 @@ public: StringKey24 k24; UInt64 n[3]; }; - StringHashTableHash hash; switch ((sz - 1) >> 3) { case 0: diff --git a/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql new file mode 100644 index 00000000000..b7ac6f1641f --- /dev/null +++ b/tests/queries/0_stateless/01109_sc0rp10_string_hash_map_zero_bytes.sql @@ -0,0 +1,15 @@ +-- Test that the string hash map works properly with keys containing zero +-- bytes. +-- Keys with no central '1' are mostly duplicates. The unique keys +-- in this group are '', '\0', ...., '\0 x 34', to a total of 35. All other +-- keys are unique. +select count(*) = 18 * 18 * 17 + 35 +from ( + select key + from ( + with 18 as n + select repeat('\0', number % n) + || repeat('1', intDiv(number, n) % n) + || repeat('\0', intDiv(number, n * n) % n) key + from numbers(18 * 18 * 18)) + group by key); From 4075f26583f847df0e7f5368375ff7060b59b83d Mon Sep 17 00:00:00 2001 From: BayoNet Date: Thu, 2 Apr 2020 20:55:11 +0300 Subject: [PATCH 057/484] DOCS-19: parseDateTimeBestEffort (#9994) Added the parseDateTimeBestEffort description in English and Russian language. --- .../functions/type_conversion_functions.md | 122 +++++++++++++++++- .../functions/type_conversion_functions.md | 118 ++++++++++++++++- 2 files changed, 233 insertions(+), 7 deletions(-) diff --git a/docs/en/query_language/functions/type_conversion_functions.md b/docs/en/query_language/functions/type_conversion_functions.md index d898bb517b9..40e0e3cc446 100644 --- a/docs/en/query_language/functions/type_conversion_functions.md +++ b/docs/en/query_language/functions/type_conversion_functions.md @@ -402,18 +402,128 @@ SELECT └───────────────────────────┴──────────────────────────────┘ ``` -## parseDateTimeBestEffort {#type_conversion_functions-parsedatetimebesteffort} +## parseDateTimeBestEffort {#parsedatetimebesteffort} -Parse a number type argument to a Date or DateTime type. -different from toDate and toDateTime, parseDateTimeBestEffort can progress more complex date format. -For more information, see the link: [Complex Date Format](https://xkcd.com/1179/) +Converts a date and time in the [String](../../data_types/string.md) representation to [DateTime](../../data_types/datetime.md#data_type-datetime) data type. + +The function parses [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse's and some other date and time formats. + + +**Syntax** + +```sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**Parameters** + +- `time_string` — String containing a date and time to convert. [String](../../data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../data_types/string.md). + + +**Supported non-standard formats** + +- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). +- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. +- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. +- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. +- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. + +For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**Returned value** + +- `time_string` converted to the `DateTime` data type. + +**Examples** + +Query: + +```sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +Result: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +Query: + +```sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +Result: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +Query: + +```sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +Result: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +Query: + +```sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +Result: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +Query: + +```sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +Result: + +```text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**See Also** + +- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [toDate](#todate) +- [toDateTime](#todatetime) ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} -Same as for [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) except that it returns null when it encounters a date format that cannot be processed. +Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns null when it encounters a date format that cannot be processed. ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} -Same as for [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. +Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. [Original article](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/ru/query_language/functions/type_conversion_functions.md b/docs/ru/query_language/functions/type_conversion_functions.md index 9cb478a2a02..00582cd61cb 100644 --- a/docs/ru/query_language/functions/type_conversion_functions.md +++ b/docs/ru/query_language/functions/type_conversion_functions.md @@ -360,7 +360,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null Приводит аргумент из числового типа данных к типу данных [IntervalType](../../data_types/special_data_types/interval.md). -**Синтксис** +**Синтаксис** ``` sql toIntervalSecond(number) @@ -399,4 +399,120 @@ SELECT └───────────────────────────┴──────────────────────────────┘ ``` +## parseDateTimeBestEffort {#parsedatetimebesteffort} + +Преобразует дату и время в [строковом](../../data_types/string.md) представлении к типу данных [DateTime](../../data_types/datetime.md#data_type-datetime). + +Функция распознаёт форматы [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), формат даты времени ClickHouse's а также некоторые другие форматы. + +**Синтаксис** + +```sql +parseDateTimeBestEffort(time_string[, time_zone]); +``` + +**Параметры** + +- `time_string` — строка, содержащая дату и время для преобразования. [String](../../data_types/string.md). +- `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с заданным часовым поясом. [String](../../data_types/string.md). + +**Поддерживаемые нестандартные форматы** + +- [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-время) в строковом представлении. 9 или 10 символов. +- Строка с датой и временем: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. +- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` и т.д. +- Строка с временем, и с днём: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` принимается равным `2000-01`. +- Строка, содержащая дату и время вместе с информацией о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm`, и т.д. Например, `2020-12-12 17:36:00 -5:00`. + +Для всех форматов с разделителями функция распознаёт названия месяцев, выраженных в виде полного англоязычного имени месяца или в виде первых трёх символов имени месяца. Примеры: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + + +**Возвращаемое значение** + +- `time_string` преобразованная к типу данных `DateTime`. + +**Примеры** + +Запрос: + +```sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +Результат: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +Запрос: + +```sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +Результат: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +Запрос: + +```sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +Результат: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +Запрос: + +```sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +Результат: + +```text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +Запрос: + +```sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +Результат: + +```text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**См. также** + +- [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [toDate](#todate) +- [toDateTime](#todatetime) + + [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/type_conversion_functions/) From 0606b7e4df76dbbf8530a43e7401a50ef5f40d95 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 21:24:11 +0300 Subject: [PATCH 058/484] Renames for compact parts --- dbms/Storages/MergeTree/MergeTreeData.cpp | 5 --- .../MergeTree/MergeTreeDataMergerMutator.cpp | 17 +++++++++- .../MergeTree/MergeTreeReaderCompact.cpp | 23 +++++++++++-- .../MergeTree/StorageFromMergeTreeDataPart.h | 2 -- dbms/Storages/StorageMergeTree.cpp | 11 +++++++ dbms/Storages/StorageMergeTree.h | 2 +- .../01213_alter_rename_compact_part.reference | 7 ++++ .../01213_alter_rename_compact_part.sql | 33 +++++++++++++++++++ 8 files changed, 88 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/01213_alter_rename_compact_part.reference create mode 100644 tests/queries/0_stateless/01213_alter_rename_compact_part.sql diff --git a/dbms/Storages/MergeTree/MergeTreeData.cpp b/dbms/Storages/MergeTree/MergeTreeData.cpp index bf0eedc4264..2e00dc864f8 100644 --- a/dbms/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/Storages/MergeTree/MergeTreeData.cpp @@ -3608,13 +3608,8 @@ MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const AlterConversions result{}; for (const auto & command : commands) - { if (command.type == MutationCommand::Type::RENAME_COLUMN) - { result.rename_map[command.rename_to] = command.column_name; - LOG_DEBUG(log, "Add to rename map:" << command.column_name); - } - } return result; } diff --git a/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index d1dd2861202..2b732d879b0 100644 --- a/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1261,6 +1261,20 @@ void MergeTreeDataMergerMutator::splitMutationCommands( { removed_columns_from_compact_part.emplace(command.column_name); } + else if (command.type == MutationCommand::Type::RENAME_COLUMN) + { + if (is_compact_part) + { + for_interpreter.push_back( + { + .type = MutationCommand::Type::READ_COLUMN, + .column_name = command.rename_to, + }); + already_changed_columns.emplace(command.column_name); + } + else + for_file_renames.push_back(command); + } else { for_file_renames.push_back(command); @@ -1273,7 +1287,8 @@ void MergeTreeDataMergerMutator::splitMutationCommands( /// we just don't read dropped columns for (const auto & column : part->getColumns()) { - if (!removed_columns_from_compact_part.count(column.name) && !already_changed_columns.count(column.name)) + if (!removed_columns_from_compact_part.count(column.name) + && !already_changed_columns.count(column.name)) { for_interpreter.emplace_back(MutationCommand { diff --git a/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp b/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp index 496371b6e4b..a59a87386a5 100644 --- a/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -81,14 +81,23 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( const auto & [name, type] = *name_and_type; auto position = data_part->getColumnPosition(name); - /// If array of Nested column is missing in part, - /// we have to read it's offsets if they exists. + if (!position) + { + auto renamed_it = alter_conversions.rename_map.find(name); + + if (renamed_it != alter_conversions.rename_map.end()) + position = data_part->getColumnPosition(renamed_it->second); + } + if (!position && typeid_cast(type.get())) { + /// If array of Nested column is missing in part, + /// we have to read it's offsets if they exists. position = findColumnForOffsets(name); read_only_offsets[i] = (position != std::nullopt); } + column_positions[i] = std::move(position); } @@ -125,7 +134,15 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading, if (!res_columns[pos]) continue; - const auto & [name, type] = *name_and_type; + auto [name, type] = *name_and_type; + + if (alter_conversions.rename_map.count(name)) + { + String original_name = alter_conversions.rename_map[name]; + if (!data_part->getColumnPosition(name) && data_part->getColumnPosition(original_name)) + name = original_name; + } + auto & column = mutable_columns[pos]; try diff --git a/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h index 0e93301e124..4d799522920 100644 --- a/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -52,12 +52,10 @@ public: return part->storage.getInMemoryMetadata(); } - bool hasSortingKey() const { return part->storage.hasSortingKey(); } Names getSortingKeyColumns() const override { return part->storage.getSortingKeyColumns(); } - protected: StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_) : IStorage(getIDFromPart(part_), part_->storage.getVirtuals()) diff --git a/dbms/Storages/StorageMergeTree.cpp b/dbms/Storages/StorageMergeTree.cpp index a0ca6964cbf..5742b3a811b 100644 --- a/dbms/Storages/StorageMergeTree.cpp +++ b/dbms/Storages/StorageMergeTree.cpp @@ -1260,4 +1260,15 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, const Context & c return results; } + +MutationCommands StorageMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const +{ + std::lock_guard lock(currently_processing_in_background_mutex); + + auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion()); + if (it == current_mutations_by_version.end()) + return {}; + return it->second.commands; +} + } diff --git a/dbms/Storages/StorageMergeTree.h b/dbms/Storages/StorageMergeTree.h index d6b8f98bff8..44ebce03802 100644 --- a/dbms/Storages/StorageMergeTree.h +++ b/dbms/Storages/StorageMergeTree.h @@ -166,7 +166,7 @@ protected: std::unique_ptr settings_, bool has_force_restore_data_flag); - MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & /* part */) const override { return {}; } + MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override; }; } diff --git a/tests/queries/0_stateless/01213_alter_rename_compact_part.reference b/tests/queries/0_stateless/01213_alter_rename_compact_part.reference new file mode 100644 index 00000000000..a5e642f56ad --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_compact_part.reference @@ -0,0 +1,7 @@ +1 +1 +date key renamed_value1 value2 value3 +2019-10-02 1 1 1 1 +7 7 +date key renamed_value1 renamed_value2 renamed_value3 +2019-10-02 7 7 7 7 diff --git a/tests/queries/0_stateless/01213_alter_rename_compact_part.sql b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql new file mode 100644 index 00000000000..07188ece519 --- /dev/null +++ b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS table_with_compact_parts; + +CREATE TABLE table_with_compact_parts +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key +settings index_granularity = 8, +min_rows_for_wide_part = 10; + +INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_with_compact_parts WHERE key = 1; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_with_compact_parts WHERE key = 1; + +SELECT * FROM table_with_compact_parts WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_with_compact_parts WHERE key = 7; + +SELECT * FROM table_with_compact_parts WHERE key = 7 FORMAT TSVWithNames; + +DROP TABLE IF EXISTS table_with_compact_parts; From 9e49d7c8cb4a8eeed215bf969dbf90b29d9ab7b1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 21:31:27 +0300 Subject: [PATCH 059/484] One more test for indices --- .../0_stateless/01213_alter_rename_primary_key_zookeeper.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql index 360a2f3745d..e03ef67212e 100644 --- a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql +++ b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql @@ -34,7 +34,8 @@ CREATE TABLE table_for_rename_with_primary_key key2 UInt64, key3 UInt64, value1 String, - value2 String + value2 String, + INDEX idx (value1) TYPE set(1) GRANULARITY 1 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1') PARTITION BY date @@ -49,5 +50,6 @@ ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2 ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError 44} +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN value1 TO renamed_value1; --{serverError 44} DROP TABLE IF EXISTS table_for_rename_with_primary_key; From f741a583e5b12fb8e56376e2c0b11cd39e4b20a8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 21:33:17 +0300 Subject: [PATCH 060/484] Fix style --- dbms/Storages/AlterCommands.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dbms/Storages/AlterCommands.cpp b/dbms/Storages/AlterCommands.cpp index 16585190701..ba9d64983e6 100644 --- a/dbms/Storages/AlterCommands.cpp +++ b/dbms/Storages/AlterCommands.cpp @@ -456,7 +456,8 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const RenameColumnVisitor rename_visitor(rename_data); for (auto & column : metadata.columns) { - metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify) { + metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify) + { if (column_to_modify.default_desc.expression) rename_visitor.visit(column_to_modify.default_desc.expression); if (column_to_modify.ttl) From b7f9d033551784f4ae69c6fc38ed4c0f75873946 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 2 Apr 2020 21:34:35 +0300 Subject: [PATCH 061/484] Remove unused method --- dbms/Storages/MergeTree/IMergeTreeReader.cpp | 5 ----- dbms/Storages/MergeTree/IMergeTreeReader.h | 3 --- 2 files changed, 8 deletions(-) diff --git a/dbms/Storages/MergeTree/IMergeTreeReader.cpp b/dbms/Storages/MergeTree/IMergeTreeReader.cpp index 7ccbe71938c..4978aeaaa58 100644 --- a/dbms/Storages/MergeTree/IMergeTreeReader.cpp +++ b/dbms/Storages/MergeTree/IMergeTreeReader.cpp @@ -63,11 +63,6 @@ static bool arrayHasNoElementsRead(const IColumn & column) } -//void IMergeTreeReader::renameColumnsWithAlters(Columns & res_columns) -//{ -// -//} - void IMergeTreeReader::fillMissingColumns(Columns & res_columns, bool & should_evaluate_missing_defaults, size_t num_rows) { try diff --git a/dbms/Storages/MergeTree/IMergeTreeReader.h b/dbms/Storages/MergeTree/IMergeTreeReader.h index f74530f2d5f..6f588276855 100644 --- a/dbms/Storages/MergeTree/IMergeTreeReader.h +++ b/dbms/Storages/MergeTree/IMergeTreeReader.h @@ -48,9 +48,6 @@ public: /// try to perform conversions of columns. void performRequiredConversions(Columns & res_columns); - /// TODO(alesap) - void renameColumnsWithAlters(Columns & res_columns); - const NamesAndTypesList & getColumns() const { return columns; } size_t numColumnsInResult() const { return columns.size(); } From cd88b5380c886e6d4f84d0b29d9a27e1a12d2cbd Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 2 Apr 2020 21:44:58 +0300 Subject: [PATCH 062/484] Performance comparison fixes --- docker/test/performance-comparison/compare.sh | 24 +- .../test/performance-comparison/entrypoint.sh | 12 +- docker/test/performance-comparison/report.py | 385 ++++++++++-------- tests/performance/analyze_array_tuples.xml | 12 +- tests/performance/array_fill.xml | 20 +- tests/performance/concat_hits.xml | 39 +- 6 files changed, 270 insertions(+), 222 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 119d90665aa..71f69202326 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -97,10 +97,6 @@ function run_tests touch "$x" done - # FIXME remove some broken long tests - rm "$test_prefix"/{IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format}.xml ||: - - test_files=$(ls "$test_prefix"/*.xml) # FIXME a quick crutch to bring the run time down for the unstable tests -- # if some performance tests xmls were changed in a PR, run only these ones. @@ -126,6 +122,17 @@ function run_tests test_files=$(ls "$test_prefix"/$CHPC_TEST_GLOB.xml) fi + if [ "$test_files" == "" ] + then + # FIXME remove some broken long tests + for test_name in {IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format,arithmetic,cryptographic_hashes,logical_functions_{medium,small}} + do + printf "$test_name\tMarked as broken (see compare.sh)" >> skipped-tests.tsv + rm "$test_prefix/$test_name.xml" ||: + done + test_files=$(ls "$test_prefix"/*.xml) + fi + # Run the tests. test_name="" for test in $test_files @@ -275,9 +282,11 @@ create table test_times_tsv engine File(TSV, 'test-times.tsv') as from test_time join wall_clock using test order by avg_real_per_query desc; -create table all_queries_tsv engine File(TSV, 'all-queries.tsv') as - select left, right, diff, rd, test, query - from queries order by rd[3] desc; +create table all_tests_tsv engine File(TSV, 'all-queries.tsv') as + select left, right, diff, + floor(left > right ? left / right : right / left, 3), + rd, test, query + from queries order by test, query; " 2> >(head -2 >> report-errors.rep) ||: for version in {right,left} @@ -429,6 +438,7 @@ case "$stage" in "report") time report ||: + time "$script_dir/report.py" --report=all-queries > all-queries.html 2> >(head -2 >> report-errors.rep) ||: time "$script_dir/report.py" > report.html ;& esac diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index 1aba59e982a..dc0480715d0 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -90,17 +90,23 @@ export PYTHONIOENCODING=utf-8 # Use a default number of runs if not told otherwise export CHPC_RUNS=${CHPC_RUNS:-7} +# By default, use the main comparison script from the tested package, so that we +# can change it in PRs. +script_path="right/scripts" +if [ -v CHPC_LOCAL_SCRIPT ] +then + script_path=".." +fi + # Even if we have some errors, try our best to save the logs. set +e -# Use main comparison script from the tested package, so that we can change it -# in PRs. # Older version use 'kill 0', so put the script into a separate process group # FIXME remove set +m in April 2020 set +m { \ time ../download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \ - time stage=configure right/scripts/compare.sh ; \ + time stage=configure "$script_path"/compare.sh ; \ } 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log set -m diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index c8ce51e158a..84b0239ccda 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 +import argparse import ast import collections import csv @@ -8,6 +9,11 @@ import os import sys import traceback +parser = argparse.ArgumentParser(description='Create performance test report') +parser.add_argument('--report', default='main', choices=['main', 'all-queries'], + help='Which report to build') +args = parser.parse_args() + report_errors = [] error_tests = 0 slow_average_tests = 0 @@ -16,7 +22,7 @@ slower_queries = 0 unstable_queries = 0 very_unstable_queries = 0 -print(""" +header_template = """ - {% endif %} - - {% endblock %} - {% for path in extra_css %} - - {% endfor %} - {% block extrahead %}{% endblock %} - - {% set direction = config.theme.direction %} - {% if palette.primary or palette.accent %} - {% set primary = palette.primary | replace(" ", "-") | lower %} - {% set accent = palette.accent | replace(" ", "-") | lower %} - - {% else %} - - {% endif %} - - - {% set platform = config.extra.repo_icon or config.repo_url %} - {% if "github" in platform %} - {% include "assets/images/icons/github.a4034fb1.svg" %} - {% elif "gitlab" in platform %} - {% include "assets/images/icons/gitlab.d80e5efc.svg" %} - {% elif "bitbucket" in platform %} - {% include "assets/images/icons/bitbucket.4ebea66e.svg" %} - {% endif %} - - - - - - {% block header %} - {% include "partials/header.html" %} - {% endblock %} -
- {% block hero %} - {% if page and page.meta and page.meta.hero %} - {% include "partials/hero.html" with context %} - {% endif %} - {% endblock %} - {% if feature.tabs %} - {% include "partials/tabs.html" %} - {% endif %} -
-
- {% block site_nav %} - {% if nav %} -
-
-
- {% if not config.extra.single_page %} - {% include "partials/nav.html" %} - {% else %} - {% include "partials/toc.html" %} - {% endif %} -
-
-
- {% endif %} - {% if page.toc %} -
-
-
- {% if not config.extra.single_page %} - {% include "partials/toc.html" %} - {% else %} - {% include "partials/nav.html" %} - {% endif %} -
-
-
- {% endif %} - {% endblock %} -
-
- {% block content %} - {% if config.extra.single_page %} - - {% else %} - {% if page.edit_url %} - - {% endif %} - {% endif %} - {% if not "\x3ch1" in page.content %} -

{{ page.title | default(config.site_name, true)}}

- {% endif %} -
- {% if not config.extra.single_page %} - {{ page.content }} - {% endif %} -
- {% block source %} - {% if page and page.meta and page.meta.source %} -

{{ lang.t("meta.source") }}

- {% set path = page.meta.path | default([""]) %} - {% set file = page.meta.source %} - - {{ file }} - - {% endif %} - {% endblock %} - {% endblock %} - {% block disqus %} - {% if config.extra.disqus and not page.is_homepage %} -

{{ lang.t("meta.comments") }}

- {% include "partials/integrations/disqus.html" %} - {% endif %} - {% endblock %} -
-
-
-
- {% block footer %} - {% include "partials/footer.html" %} - {% endblock %} -
- {% block scripts %} - {% block libs %} - - {% endblock %} - - {% if lang.t("search.language") != "en" %} - {% set languages = lang.t("search.language").split(",") %} - {% if languages | length and languages[0] != "" %} - {% set path = base_url + "/assets/javascripts/lunr" %} - - {% for language in languages | map("trim") %} - {% if language != "en" %} - {% if language == "jp" %} - - {% endif %} - - {% endif %} - {% endfor %} - {% if languages | length > 0 %} - - {% endif %} - {% endif %} - {% endif %} - {% for path in extra_javascript %} - - {% endfor %} - {% endblock %} - {% block analytics %} - {% if config.google_analytics %} - {% include "partials/integrations/analytics.html" %} - {% endif %} - {% endblock %} - - - - - - - - - - - diff --git a/docs/tools/mkdocs-material-theme/main.html b/docs/tools/mkdocs-material-theme/main.html deleted file mode 100644 index 94d9808cc76..00000000000 --- a/docs/tools/mkdocs-material-theme/main.html +++ /dev/null @@ -1 +0,0 @@ -{% extends "base.html" %} diff --git a/docs/tools/mkdocs-material-theme/mkdocs_theme.yml b/docs/tools/mkdocs-material-theme/mkdocs_theme.yml deleted file mode 100644 index c7e3e28c3f1..00000000000 --- a/docs/tools/mkdocs-material-theme/mkdocs_theme.yml +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2016-2017 Martin Donath - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. - -# Language for theme localization -language: en - -# Feature flags for functionality that alters behavior significantly, and thus -# may be a matter of taste -feature: - - # Another layer on top of the main navigation for larger screens in the form - # of tabs, especially useful for larger documentation projects - tabs: false - -# Sets the primary and accent color palettes as defined in the Material Design -# documentation - possible values can be looked up in the getting started guide -palette: - - # Primary color used for header, sidebar and links, default: indigo - primary: - - # Accent color for highlighting user interaction, default: indigo - accent: - -# Fonts used by Material, automatically loaded from Google Fonts - see the site -# for a list of available fonts -font: - - # Default font for text - text: Roboto - - # Fixed-width font for code listings - code: Roboto Mono - -# Favicon to be rendered -favicon: assets/images/favicon.png - -# The logo of the documentation shown in the header and navigation can either -# be a Material Icon ligature (see https://material.io/icons/) or an image URL -logo: - icon: "\uE80C" - -# Material includes the search in the header as a partial, not as a separate -# template, so it's correct that search.html is missing -include_search_page: false - -# Material doesn't use MkDocs search functionality but provides its own. For -# this reason, only the search index needs to be built -search_index_only: true - -# Static pages to build -static_templates: - - 404.html diff --git a/docs/tools/mkdocs-material-theme/partials/flags.html b/docs/tools/mkdocs-material-theme/partials/flags.html deleted file mode 100644 index c7b06fbc4d0..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/flags.html +++ /dev/null @@ -1,8 +0,0 @@ -{% set alt_langs = [['en', 'English'], ['ru', 'Russian'], ['zh', 'Chinese'], ['ja', 'Japanese'], ['fa', 'Farsi']] %} -{% for alt_lang, alt_title in alt_langs %} - - {% include "assets/flags/" + alt_lang + ".svg" %} - -{% endfor %} diff --git a/docs/tools/mkdocs-material-theme/partials/footer.html b/docs/tools/mkdocs-material-theme/partials/footer.html deleted file mode 100644 index b6cff19e6eb..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/footer.html +++ /dev/null @@ -1,63 +0,0 @@ -{% import "partials/language.html" as lang with context %} -
- {% if page.previous_page or page.next_page %} - - {% endif %} - -
diff --git a/docs/tools/mkdocs-material-theme/partials/header.html b/docs/tools/mkdocs-material-theme/partials/header.html deleted file mode 100644 index c122cf4ee59..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/header.html +++ /dev/null @@ -1,50 +0,0 @@ -
- - -
diff --git a/docs/tools/mkdocs-material-theme/partials/hero.html b/docs/tools/mkdocs-material-theme/partials/hero.html deleted file mode 100644 index d0c534fe229..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/hero.html +++ /dev/null @@ -1,9 +0,0 @@ -{% set class = "md-hero" %} -{% if not feature.tabs %} - {% set class = "md-hero md-hero--expand" %} -{% endif %} -
-
- {{ page.meta.hero }} -
-
diff --git a/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html b/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html deleted file mode 100644 index 2b0fcdfdc40..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/integrations/analytics.html +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html b/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html deleted file mode 100644 index 5f003ca41d9..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/integrations/disqus.html +++ /dev/null @@ -1,14 +0,0 @@ -
- diff --git a/docs/tools/mkdocs-material-theme/partials/language.html b/docs/tools/mkdocs-material-theme/partials/language.html deleted file mode 100644 index 278339b74a8..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language.html +++ /dev/null @@ -1,9 +0,0 @@ -{% import "partials/language/" + config.theme.language + ".html" as lang %} -{% macro t(key) %}{{ { - "search.language": ( - config.extra.search | default({}) - ).language | default(config.theme.language, true), - "search.tokenizer": ( - config.extra.search | default({}) - ).tokenizer | default("", true), -}[key] or lang.t(key) }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/da.html b/docs/tools/mkdocs-material-theme/partials/language/da.html deleted file mode 100644 index e123b499860..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/da.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "da", - "clipboard.copy": "Kopiér til udklipsholderen", - "clipboard.copied": "Kopieret til udklipsholderen", - "edit.link.title": "Redigér denne side", - "footer.previous": "Forrige", - "footer.next": "Næste", - "meta.comments": "Kommentarer", - "meta.source": "Kilde", - "search.placeholder": "Søg", - "search.result.placeholder": "Indtask søgeord", - "search.result.none": "Ingen resultater fundet", - "search.result.one": "1 resultat", - "search.result.other": "# resultater", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Åbn arkiv", - "toc.title": "Indholdsfortegnelse" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/de.html b/docs/tools/mkdocs-material-theme/partials/language/de.html deleted file mode 100644 index e5bbe53dfd0..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/de.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "de", - "clipboard.copy": "In Zwischenablage kopieren", - "clipboard.copied": "In Zwischenablage kopiert", - "edit.link.title": "Seite editieren", - "footer.previous": "Vorherige Seite", - "footer.next": "Nächste Seite", - "meta.comments": "Kommentare", - "meta.source": "Quellcode", - "search.placeholder": "Suche", - "search.result.placeholder": "Suchbegriff eingeben", - "search.result.none": "Keine Suchergebnisse", - "search.result.one": "1 Suchergebnis", - "search.result.other": "# Suchergebnisse", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Quellcode", - "toc.title": "Inhaltsverzeichnis" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/en.html b/docs/tools/mkdocs-material-theme/partials/language/en.html deleted file mode 100644 index 47e40c800bd..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/en.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "en", - "clipboard.copy": "Copy to clipboard", - "clipboard.copied": "Copied to clipboard", - "edit.link.title": "Edit this page", - "footer.previous": "Previous", - "footer.next": "Next", - "meta.comments": "Comments", - "meta.source": "Source", - "nav.latest": "master", - "nav.multi_page": "Multi page version", - "nav.pdf": "PDF version", - "nav.release": "Release", - "nav.single_page": "Single page version", - "nav.source": "ClickHouse source code", - "search.placeholder": "Search", - "search.result.placeholder": "Type to start searching", - "search.result.none": "No matching documents", - "search.result.one": "1 matching document", - "search.result.other": "# matching documents", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Go to repository", - "toc.title": "Table of contents" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/es.html b/docs/tools/mkdocs-material-theme/partials/language/es.html deleted file mode 100644 index 1e2dbf68fa4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/es.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "es", - "clipboard.copy": "Copiar al portapapeles", - "clipboard.copied": "Copiado al portapapeles", - "edit.link.title": "Editar esta página", - "footer.previous": "Anterior", - "footer.next": "Siguiente", - "meta.comments": "Comentarios", - "meta.source": "Fuente", - "search.placeholder": "Búsqueda", - "search.result.placeholder": "Teclee para comenzar búsqueda", - "search.result.none": "No se encontraron documentos", - "search.result.one": "1 documento encontrado", - "search.result.other": "# documentos encontrados", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Ir al repositorio", - "toc.title": "Tabla de contenidos" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/fa.html b/docs/tools/mkdocs-material-theme/partials/language/fa.html deleted file mode 100644 index b321e1319b8..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/fa.html +++ /dev/null @@ -1,28 +0,0 @@ -{% macro t(key) %}{{ { - "language": "fa", - "direction": "rtl", - "clipboard.copy": "کپی کردن", - "clipboard.copied": "کپی شد", - "edit.link.title": "این صفحه را ویرایش کنید", - "footer.previous": "قبلی", - "footer.next": "بعدی", - "meta.comments": "نظرات", - "meta.source": "منبع", - "nav.latest": "آخرین", - "nav.multi_page": "نسخه چند صفحه ای", - "nav.pdf": "نسخه PDF", - "nav.release": "رهایی", - "nav.single_page": "نسخه تک صفحه", - "nav.source": "کد منبع کلیک", - "search.language": "", - "search.pipeline.stopwords": false, - "search.pipeline.trimmer": false, - "search.placeholder": "جستجو", - "search.result.placeholder": "برای شروع جستجو تایپ کنید", - "search.result.none": "سندی یافت نشد", - "search.result.one": "1 سند یافت شد", - "search.result.other": "# سند یافت شد", - "skip.link.title": "پرش به محتویات", - "source.link.title": "رفتن به مخزن", - "toc.title": "فهرست موضوعات" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/fr.html b/docs/tools/mkdocs-material-theme/partials/language/fr.html deleted file mode 100644 index 87d7faa99fd..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/fr.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "fr", - "clipboard.copy": "Copier dans le presse-papier", - "clipboard.copied": "Copié dans le presse-papier", - "edit.link.title": "Editer cette page", - "footer.previous": "Précédent", - "footer.next": "Suivant", - "meta.comments": "Commentaires", - "meta.source": "Source", - "search.placeholder": "Rechercher", - "search.result.placeholder": "Taper pour démarrer la recherche", - "search.result.none": "Aucun document trouvé", - "search.result.one": "1 document trouvé", - "search.result.other": "# documents trouvés", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Aller au dépôt", - "toc.title": "Table des matières" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/it.html b/docs/tools/mkdocs-material-theme/partials/language/it.html deleted file mode 100644 index d9fe6fe745b..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/it.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "it", - "clipboard.copy": "Copia", - "clipboard.copied": "Copiato", - "edit.link.title": "Modifica", - "footer.previous": "Precedente", - "footer.next": "Prossimo", - "meta.comments": "Commenti", - "meta.source": "Sorgente", - "search.placeholder": "Cerca", - "search.result.placeholder": "Scrivi per iniziare a cercare", - "search.result.none": "Nessun documento trovato", - "search.result.one": "1 documento trovato", - "search.result.other": "# documenti trovati", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Apri repository", - "toc.title": "Indice" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/ja.html b/docs/tools/mkdocs-material-theme/partials/language/ja.html deleted file mode 100644 index 47341ab06ee..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/ja.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "ja", - "clipboard.copy": "クリップボードへコピー", - "clipboard.copied": "コピーしました", - "edit.link.title": "編集", - "footer.previous": "前", - "footer.next": "次", - "meta.comments": "コメント", - "meta.source": "ソース", - "nav.latest": "master", - "nav.multi_page": "マルチページ版", - "nav.pdf": "PDF版", - "nav.release": "リリース", - "nav.single_page": "シングルページ版", - "nav.source": "ClickHouseソースコード", - "search.placeholder": "検索", - "search.result.placeholder": "検索キーワードを入力してください", - "search.result.none": "何も見つかりませんでした", - "search.result.one": "1件見つかりました", - "search.result.other": "#件見つかりました", - "search.tokenizer": "[\s\- 、。,.]+", - "source.link.title": "リポジトリへ", - "toc.title": "目次" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/kr.html b/docs/tools/mkdocs-material-theme/partials/language/kr.html deleted file mode 100644 index 27163eb0bd4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/kr.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "kr", - "clipboard.copy": "클립보드로 복사", - "clipboard.copied": "클립보드에 복사됨", - "edit.link.title": "이 페이지를 편집", - "footer.previous": "이전", - "footer.next": "다음", - "meta.comments": "댓글", - "meta.source": "출처", - "search.placeholder": "검색", - "search.result.placeholder": "검색어를 입력하세요", - "search.result.none": "검색어와 일치하는 문서가 없습니다", - "search.result.one": "1개의 일치하는 문서", - "search.result.other": "#개의 일치하는 문서", - "search.tokenizer": "[\s\-]+", - "source.link.title": "저장소로 이동", - "toc.title": "목차" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/no.html b/docs/tools/mkdocs-material-theme/partials/language/no.html deleted file mode 100644 index 63484a9726b..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/no.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "no", - "clipboard.copy": "Kopier til utklippstavlen", - "clipboard.copied": "Kopiert til utklippstavlen", - "edit.link.title": "Rediger denne siden", - "footer.previous": "Forrige", - "footer.next": "Neste", - "meta.comments": "Kommentarer", - "meta.source": "Kilde", - "search.placeholder": "Søk", - "search.result.placeholder": "Skriv søkeord", - "search.result.none": "Ingen treff", - "search.result.one": "1 treff", - "search.result.other": "# treff", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Gå til kilde", - "toc.title": "Innholdsfortegnelse" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/pl.html b/docs/tools/mkdocs-material-theme/partials/language/pl.html deleted file mode 100644 index 54889e5c35e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/pl.html +++ /dev/null @@ -1 +0,0 @@ -{% macro t(key) %}{{ { "language": "pl", "clipboard.copy": "Kopiuj do schowka", "clipboard.copied": "Skopiowane", "edit.link.title": "Edytuj tę stronę", "footer.previous": "Poprzednia strona", "footer.next": "Następna strona", "meta.comments": "Komentarze", "meta.source": "Kod źródłowy", "search.placeholder": "Szukaj", "search.result.placeholder": "Zacznij pisać, aby szukać", "search.result.none": "Brak wyników wyszukiwania", "search.result.one": "Wyniki wyszukiwania: 1", "search.result.other": "Wyniki wyszukiwania: #", "search.tokenizer": "[\s\-]+", "source.link.title": "Idź do repozytorium", "toc.title": "Spis treści" }[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/pt.html b/docs/tools/mkdocs-material-theme/partials/language/pt.html deleted file mode 100644 index 2e43fc9ed71..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/pt.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "pt", - "clipboard.copy": "Copiar para área de transferência", - "clipboard.copied": "Copiado para área de transferência", - "edit.link.title": "Editar esta página", - "footer.previous": "Anterior", - "footer.next": "Próximo", - "meta.comments": "Comentários", - "meta.source": "Fonte", - "search.placeholder": "Buscar", - "search.result.placeholder": "Digite para iniciar a busca", - "search.result.none": "Nenhum resultado encontrado", - "search.result.one": "1 resultado encontrado", - "search.result.other": "# resultados encontrados", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Ir ao repositório", - "toc.title": "Índice" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/ru.html b/docs/tools/mkdocs-material-theme/partials/language/ru.html deleted file mode 100644 index eb8a31e86a4..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/ru.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "ru", - "clipboard.copy": "Копировать в буфер", - "clipboard.copied": "Скопировано в буфер", - "edit.link.title": "Редактировать страницу", - "footer.previous": "Назад", - "footer.next": "Вперед", - "meta.comments": "Комментарии", - "meta.source": "Исходный код", - "nav.latest": "последний", - "nav.multi_page": "Многостраничная версия", - "nav.pdf": "PDF версия", - "nav.release": "Релиз", - "nav.single_page": "Одностраничная версия", - "nav.source": "Исходный код ClickHouse", - "search.placeholder": "Поиск", - "search.result.placeholder": "Начните печатать для поиска", - "search.result.none": "Совпадений не найдено", - "search.result.one": "Найдено 1 совпадение", - "search.result.other": "Найдено # совпадений", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Перейти к репозиторию", - "toc.title": "Содержание" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/sv.html b/docs/tools/mkdocs-material-theme/partials/language/sv.html deleted file mode 100644 index 1d164713eba..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/sv.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "sv", - "clipboard.copy": "Kopiera till urklipp", - "clipboard.copied": "Kopierat till urklipp", - "edit.link.title": "Redigera sidan", - "footer.previous": "Föregående", - "footer.next": "Nästa", - "meta.comments": "Kommentarer", - "meta.source": "Källa", - "search.placeholder": "Sök", - "search.result.placeholder": "Skriv sökord", - "search.result.none": "Inga sökresultat", - "search.result.one": "1 sökresultat", - "search.result.other": "# sökresultat", - "search.tokenizer": "[\s\-]+", - "source.link.title": "Gå till datakatalog", - "toc.title": "Innehållsförteckning" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html b/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html deleted file mode 100644 index f13eca9be8e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/zh-Hant.html +++ /dev/null @@ -1,18 +0,0 @@ -{% macro t(key) %}{{ { - "language": "zh-Hant", - "clipboard.copy": "拷貝", - "clipboard.copied": "已拷貝", - "edit.link.title": "編輯此頁", - "footer.previous": "前進", - "footer.next": "後退", - "meta.comments": "評論", - "meta.source": "來源", - "search.placeholder": "搜尋", - "search.result.placeholder": "鍵入以開始檢索", - "search.result.none": "沒有找到符合條件的結果", - "search.result.one": "找到 1 个符合條件的結果", - "search.result.other": "# 個符合條件的結果", - "search.tokenizer": "[\,\。]+", - "source.link.title": "前往 Github 倉庫", - "toc.title": "目錄" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/language/zh.html b/docs/tools/mkdocs-material-theme/partials/language/zh.html deleted file mode 100644 index 36f681c8a0e..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/language/zh.html +++ /dev/null @@ -1,24 +0,0 @@ -{% macro t(key) %}{{ { - "language": "zh", - "clipboard.copy": "复制", - "clipboard.copied": "已复制", - "edit.link.title": "编辑此页", - "footer.previous": "后退", - "footer.next": "前进", - "meta.comments": "评论", - "meta.source": "来源", - "nav.latest": "最新", - "nav.multi_page": "多页版本", - "nav.pdf": "PDF版本", - "nav.release": "发布", - "nav.single_page": "单页版本", - "nav.source": "ClickHouse源代码", - "search.placeholder": "搜索", - "search.result.placeholder": "键入以开始搜索", - "search.result.none": "没有找到符合条件的结果", - "search.result.one": "找到 1 个符合条件的结果", - "search.result.other": "# 个符合条件的结果", - "search.tokenizer": "[\,\。]+", - "source.link.title": "前往 Github 仓库", - "toc.title": "目录" -}[key] }}{% endmacro %} diff --git a/docs/tools/mkdocs-material-theme/partials/nav-item.html b/docs/tools/mkdocs-material-theme/partials/nav-item.html deleted file mode 100644 index 4b31a079912..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/nav-item.html +++ /dev/null @@ -1,56 +0,0 @@ -{% if nav_item.title != "hidden" %} -{% set class = "md-nav__item" %} -{% if nav_item.active %} - {% set class = "md-nav__item md-nav__item--active" %} -{% endif %} -{% if nav_item.children %} -
  • - {% if nav_item.active %} - - {% else %} - - {% endif %} - - -
  • -{% elif nav_item == page %} -
  • - {% set toc_ = page.toc %} - - {% if toc_ | first is defined and "\x3ch1 id=" in page.content %} - {% set toc_ = (toc_ | first).children %} - {% endif %} - {% if toc_ | first is defined %} - - {% endif %} - - {{ nav_item.title }} - - {% if toc_ | first is defined %} - {% include "partials/toc.html" %} - {% endif %} -
  • -{% else %} -
  • - - {{ nav_item.title }} - -
  • -{% endif %} -{% endif %} diff --git a/docs/tools/mkdocs-material-theme/partials/nav.html b/docs/tools/mkdocs-material-theme/partials/nav.html deleted file mode 100644 index 9bf1076c349..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/nav.html +++ /dev/null @@ -1,55 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/search.html b/docs/tools/mkdocs-material-theme/partials/search.html deleted file mode 100644 index 84428bdf69c..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/search.html +++ /dev/null @@ -1,21 +0,0 @@ -{% import "partials/language.html" as lang with context %} - diff --git a/docs/tools/mkdocs-material-theme/partials/social.html b/docs/tools/mkdocs-material-theme/partials/social.html deleted file mode 100644 index b990921bb8a..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/social.html +++ /dev/null @@ -1,3 +0,0 @@ - diff --git a/docs/tools/mkdocs-material-theme/partials/source.html b/docs/tools/mkdocs-material-theme/partials/source.html deleted file mode 100644 index 48d4eb1aaff..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/source.html +++ /dev/null @@ -1,25 +0,0 @@ -{% import "partials/language.html" as lang with context %} -{% set platform = config.extra.repo_icon or config.repo_url %} -{% if "github" in platform %} - {% set repo_type = "github" %} -{% elif "gitlab" in platform %} - {% set repo_type = "gitlab" %} -{% elif "bitbucket" in platform %} - {% set repo_type = "bitbucket" %} -{% else %} - {% set repo_type = "" %} -{% endif %} -{% block repo %} - - {% if repo_type %} -
    - - - -
    - {% endif %} -
    - {{ config.repo_name }} -
    -
    -{% endblock %} diff --git a/docs/tools/mkdocs-material-theme/partials/tabs-item.html b/docs/tools/mkdocs-material-theme/partials/tabs-item.html deleted file mode 100644 index 686b5a59b92..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/tabs-item.html +++ /dev/null @@ -1,31 +0,0 @@ -{% if nav_item.is_homepage %} -
  • - {% if not page.ancestors | length and nav | selectattr("url", page.url) %} - - {{ nav_item.title }} - - {% else %} - - {{ nav_item.title }} - - {% endif %} -
  • -{% elif nav_item.children and nav_item.children | length > 0 %} - {% set title = title | default(nav_item.title) %} - {% if (nav_item.children | first).children | length > 0 %} - {% set nav_item = nav_item.children | first %} - {% include "partials/tabs-item.html" %} - {% else %} -
  • - {% if nav_item.active %} - - {{ title }} - - {% else %} - - {{ title }} - - {% endif %} -
  • - {% endif %} -{% endif %} diff --git a/docs/tools/mkdocs-material-theme/partials/tabs.html b/docs/tools/mkdocs-material-theme/partials/tabs.html deleted file mode 100644 index e040436bf10..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/tabs.html +++ /dev/null @@ -1,13 +0,0 @@ -{% set class = "md-tabs" %} -{% if page.ancestors | length > 0 %} - {% set class = "md-tabs md-tabs--active" %} -{% endif %} - diff --git a/docs/tools/mkdocs-material-theme/partials/toc-item.html b/docs/tools/mkdocs-material-theme/partials/toc-item.html deleted file mode 100644 index 3b4f4d76cee..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/toc-item.html +++ /dev/null @@ -1,14 +0,0 @@ -
  • - - {{ toc_item.title }} - - {% if toc_item.children %} - - {% endif %} -
  • diff --git a/docs/tools/mkdocs-material-theme/partials/toc.html b/docs/tools/mkdocs-material-theme/partials/toc.html deleted file mode 100644 index f268ac0c6cf..00000000000 --- a/docs/tools/mkdocs-material-theme/partials/toc.html +++ /dev/null @@ -1,29 +0,0 @@ -{% import "partials/language.html" as lang with context %} - diff --git a/docs/tools/translate/add_meta_flag.py b/docs/tools/translate/add_meta_flag.py new file mode 100755 index 00000000000..d87aa044faf --- /dev/null +++ b/docs/tools/translate/add_meta_flag.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +import sys + +import util + +if __name__ == '__main__': + flag_name = sys.argv[1] + path = sys.argv[2] + meta, content = util.read_md_file(path) + meta[flag_name] = True + util.write_md_file(path, meta, content) diff --git a/docs/tools/translate/filter.py b/docs/tools/translate/filter.py index c56685226e8..2e0b624f398 100755 --- a/docs/tools/translate/filter.py +++ b/docs/tools/translate/filter.py @@ -8,10 +8,13 @@ import pandocfilters import slugify import translate +import util is_debug = os.environ.get('DEBUG') is not None +filename = os.getenv('INPUT') + def debug(*args): if is_debug: @@ -41,7 +44,7 @@ def process_buffer(buffer, new_value, item=None, is_header=False): if text.endswith(' ') and not translated_text.endswith(' '): translated_text = translated_text + ' ' - title_case = is_header and translate.default_target_language == 'en' and text[0].isupper() + title_case = False # is_header and translate.default_target_language == 'en' and text[0].isupper() title_case_whitelist = {'a', 'an', 'the', 'and', 'or'} for token in translated_text.split(' '): if title_case and not token.isupper(): @@ -113,7 +116,7 @@ def translate_filter(key, value, _format, _): else: remaining_para_value.append(item) - break_value = [pandocfilters.LineBreak(),pandocfilters.Str(' ' * 4)] + break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)] if admonition_value[-1].get('t') == 'Quoted': text = process_sentence(admonition_value[-1]['c'][-1]) text[0]['c'] = '"' + text[0]['c'] @@ -139,7 +142,24 @@ def translate_filter(key, value, _format, _): return pandocfilters.Str(value[2][0]) except IndexError: pass + value[1] = process_sentence(value[1]) + href = value[2][0] + if not (href.startswith('http') or href.startswith('#')): + anchor = None + attempts = 10 + if '#' in href: + href, anchor = href.split('#', 1) + + if filename: + while attempts and not os.path.exists(href): + href = f'../{href}' + attempts -= 1 + if anchor: + href = f'{href}#{anchor}' + + if attempts: + value[2][0] = href return cls(*value) elif key == 'Header': if value[1][0].islower() and '_' not in value[1][0]: # Preserve some manually specified anchors @@ -155,4 +175,9 @@ def translate_filter(key, value, _format, _): if __name__ == "__main__": - pandocfilters.toJSONFilter(translate_filter) + pwd = os.path.dirname(filename or '.') + if pwd: + with util.cd(pwd): + pandocfilters.toJSONFilter(translate_filter) + else: + pandocfilters.toJSONFilter(translate_filter) diff --git a/docs/tools/translate/normalize-markdown.sh b/docs/tools/translate/normalize-markdown.sh index d25c3ee65b2..7850fa34b1d 100755 --- a/docs/tools/translate/normalize-markdown.sh +++ b/docs/tools/translate/normalize-markdown.sh @@ -7,6 +7,7 @@ trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT INPUT="$1" if [[ ! -L "${INPUT}" ]] then + export INPUT cat "${INPUT}" > "${TEMP_FILE}" "${BASE_DIR}/translate.sh" "en" "${TEMP_FILE}" "${INPUT}" fi diff --git a/docs/tools/translate/replace-with-translation.sh b/docs/tools/translate/replace-with-translation.sh index 6106b1e1e06..453dd86ba9e 100755 --- a/docs/tools/translate/replace-with-translation.sh +++ b/docs/tools/translate/replace-with-translation.sh @@ -5,7 +5,7 @@ BASE_DIR=$(dirname $(readlink -f $0)) TEMP_FILE=$(mktemp) trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT TARGET_LANGUAGE="$1" -INPUT="$2" +export INPUT="$2" cat "${INPUT}" > "${TEMP_FILE}" if [[ ! -z $SLEEP ]] then diff --git a/docs/tools/translate/split_meta.py b/docs/tools/translate/split_meta.py new file mode 100755 index 00000000000..021fe273790 --- /dev/null +++ b/docs/tools/translate/split_meta.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +import os +import subprocess +import sys + +import util + + +if __name__ == '__main__': + path = sys.argv[1] + content_path = f'{path}.content' + meta_path = f'{path}.meta' + meta, content = util.read_md_file(path) + + target_language = os.getenv('TARGET_LANGUAGE') + if target_language is not None and target_language != 'en': + rev = subprocess.check_output( + 'git rev-parse HEAD', shell=True + ).decode('utf-8').strip() + meta['machine_translated'] = True + meta['machine_translated_rev'] = rev + + with open(content_path, 'w') as f: + print(content, file=f) + + util.write_md_file(meta_path, meta, '') diff --git a/docs/tools/translate/translate.sh b/docs/tools/translate/translate.sh index 89225e0cfcd..d9f8501184f 100755 --- a/docs/tools/translate/translate.sh +++ b/docs/tools/translate/translate.sh @@ -6,9 +6,16 @@ OUTPUT=${3:-/dev/stdout} export TARGET_LANGUAGE="$1" export DEBUG TEMP_FILE=$(mktemp) -trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT +export INPUT_PATH="$2" +INPUT_META="${INPUT_PATH}.meta" +INPUT_CONTENT="${INPUT_PATH}.content" + +trap 'rm -f -- "${TEMP_FILE}" "${INPUT_META}" "${INPUT_CONTENT}"' INT TERM HUP EXIT source "${BASE_DIR}/venv/bin/activate" -pandoc "$2" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \ + +${BASE_DIR}/split_meta.py "${INPUT_PATH}" + +pandoc "${INPUT_CONTENT}" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \ -f "markdown-space_in_atx_header" -t "markdown_strict+pipe_tables+markdown_attribute+all_symbols_escapable+backtick_code_blocks+autolink_bare_uris-link_attributes+markdown_attribute+mmd_link_attributes-raw_attribute+header_attributes-grid_tables" \ --atx-headers --wrap=none --columns=99999 --tab-stop=4 perl -pi -e 's/{\\#\\#/{##/g' "${TEMP_FILE}" @@ -19,4 +26,4 @@ then perl -pi -e 's/“/«/gg' "${TEMP_FILE}" perl -pi -e 's/”/»/gg' "${TEMP_FILE}" fi -cat "${TEMP_FILE}" > "${OUTPUT}" +cat "${INPUT_META}" "${TEMP_FILE}" > "${OUTPUT}" diff --git a/docs/tools/translate/util.py b/docs/tools/translate/util.py new file mode 120000 index 00000000000..7f16d68497e --- /dev/null +++ b/docs/tools/translate/util.py @@ -0,0 +1 @@ +../util.py \ No newline at end of file diff --git a/docs/tools/util.py b/docs/tools/util.py index 3dc58807612..a5a751020f0 100644 --- a/docs/tools/util.py +++ b/docs/tools/util.py @@ -1,3 +1,4 @@ +import collections import contextlib import multiprocessing import os @@ -7,6 +8,8 @@ import socket import tempfile import threading +import yaml + @contextlib.contextmanager def temp_dir(): @@ -57,3 +60,54 @@ def run_function_in_parallel(func, args_list, threads=False): exit_code = process.exitcode if exit_code: sys.exit(exit_code) + + +def read_md_file(path): + in_meta = False + meta = {} + meta_text = [] + content = [] + if os.path.exists(path): + with open(path, 'r') as f: + for line in f: + if line.startswith('---'): + if in_meta: + in_meta = False + meta = yaml.full_load(''.join(meta_text)) + else: + in_meta = True + else: + if in_meta: + meta_text.append(line) + else: + content.append(line) + return meta, ''.join(content) + + +def write_md_file(path, meta, content): + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + with open(path, 'w') as f: + if meta: + print('---', file=f) + yaml.dump(meta, f) + print('---', file=f) + if not content.startswith('\n'): + print('', file=f) + f.write(content) + + +def represent_ordereddict(dumper, data): + value = [] + for item_key, item_value in data.items(): + node_key = dumper.represent_data(item_key) + node_value = dumper.represent_data(item_value) + + value.append((node_key, node_value)) + + return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) + + +yaml.add_representer(collections.OrderedDict, represent_ordereddict) diff --git a/docs/zh/changelog/2017.md b/docs/zh/changelog/2017.md deleted file mode 120000 index bf4fe14279d..00000000000 --- a/docs/zh/changelog/2017.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2017.md \ No newline at end of file diff --git a/docs/zh/changelog/2017.md b/docs/zh/changelog/2017.md new file mode 100644 index 00000000000..95156754100 --- /dev/null +++ b/docs/zh/changelog/2017.md @@ -0,0 +1,265 @@ +--- +en_copy: true +--- + +### ClickHouse release 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +This release contains bug fixes for the previous release 1.1.54318: + +- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don’t see these messages in logs. + +### ClickHouse release 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} + +This release contains bug fixes for the previous release 1.1.54310: + +- Fixed incorrect row deletions during merges in the SummingMergeTree engine +- Fixed a memory leak in unreplicated MergeTree engines +- Fixed performance degradation with frequent inserts in MergeTree engines +- Fixed an issue that was causing the replication queue to stop running +- Fixed rotation and archiving of server logs + +### ClickHouse release 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### New features: {#new-features} + +- Custom partitioning key for the MergeTree family of table engines. +- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. +- Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. +- Added support for time zones with non-integer offsets from UTC. +- Added support for arithmetic operations with time intervals. +- The range of values for the Date and DateTime types is extended to the year 2105. +- Added the `CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view). +- Added the `ATTACH TABLE` query without arguments. +- The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly. +- Max size of the IP trie dictionary is increased to 128M entries. +- Added the getSizeOfEnumType function. +- Added the sumWithOverflow aggregate function. +- Added support for the Cap’n Proto input format. +- You can now customize compression level when using the zstd algorithm. + +#### Backward incompatible changes: {#backward-incompatible-changes} + +- Creation of temporary tables with an engine other than Memory is not allowed. +- Explicit creation of tables with the View or MaterializedView engine is not allowed. +- During table creation, a new check verifies that the sampling key expression is included in the primary key. + +#### Bug fixes: {#bug-fixes} + +- Fixed hangups when synchronously inserting into a Distributed table. +- Fixed nonatomic adding and removing of parts in Replicated tables. +- Data inserted into a materialized view is not subjected to unnecessary deduplication. +- Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. +- Users don’t need access permissions to the `default` database to create temporary tables anymore. +- Fixed crashing when specifying the Array type without arguments. +- Fixed hangups when the disk volume containing server logs is full. +- Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. + +#### Build improvements: {#build-improvements} + +- Several third-party libraries (notably Poco) were updated and converted to git submodules. + +### ClickHouse release 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### New features: {#new-features-1} + +- TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ). + +#### Bug fixes: {#bug-fixes-1} + +- `ALTER` for replicated tables now tries to start running as soon as possible. +- Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.` +- Fixed crashes of `clickhouse-client` when pressing `Page Down` +- Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL` +- `FREEZE PARTITION` always works atomically now. +- Empty POST requests now return a response with code 411. +- Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).` +- Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables. +- Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b` +- Users are updated correctly with invalid `users.xml` +- Correct handling when an executable dictionary returns a non-zero response code. + +### ClickHouse release 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### New features: {#new-features-2} + +- Added the `pointInPolygon` function for working with coordinates on a coordinate plane. +- Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. +- Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. +- The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting `compile = 1` , which is not used by default). +- Reduced the time needed for dynamic compilation of queries. + +#### Bug fixes: {#bug-fixes-2} + +- Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency. +- Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. +- Removed excessive logging when restoring replicas. +- Fixed an error in the UNION ALL implementation. +- Fixed an error in the concat function that occurred if the first column in a block has the Array type. +- Progress is now displayed correctly in the system.merges table. + +### ClickHouse release 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### New features: {#new-features-3} + +- `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. +- Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`). +- External dictionaries can be loaded from MySQL by specifying a socket in the filesystem. +- External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters). +- Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user. +- Support for `DROP TABLE` for temporary tables. +- Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats. +- Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes). +- FIFO locking is used during ALTER: an ALTER query isn’t blocked indefinitely for continuously running queries. +- Option to set `umask` in the config file. +- Improved performance for queries with `DISTINCT` . + +#### Bug fixes: {#bug-fixes-3} + +- Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn’t get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. +- Fixed randomization when choosing hosts for the connection to ZooKeeper. +- Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. +- Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running `ALTER MODIFY` on an element in a `Nested` structure. +- Fixed an error that could cause SELECT queries to “hang”. +- Improvements to distributed DDL queries. +- Fixed the query `CREATE TABLE ... AS `. +- Resolved the deadlock in the `ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables. +- Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats. +- Resolved the appearance of zombie processes when using a dictionary with an `executable` source. +- Fixed segfault for the HEAD query. + +#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- You can use `pbuilder` to build ClickHouse. +- You can use `libc++` instead of `libstdc++` for builds on Linux. +- Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`. + +#### Please note when upgrading: {#please-note-when-upgrading} + +- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don’t need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ``` ``107374182400 ``` and restart the server. + +### ClickHouse release 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} + +- This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper. + +### ClickHouse release 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} + +This release contains bug fixes for the previous release 1.1.54276: + +- Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table. +- Fixed parsing when inserting in RowBinary format if input data starts with’;’. +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### Clickhouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} + +#### New features: {#new-features-4} + +- Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` +- INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert\_distributed\_sync=1. +- Added the UUID data type for working with 16-byte identifiers. +- Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau. +- Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers. +- You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries. +- Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` +- Added the max\_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. + +#### Main changes: {#main-changes} + +- Security improvements: all server files are created with 0640 permissions (can be changed via config parameter). +- Improved error messages for queries with invalid syntax. +- Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data. +- Significantly increased the performance of data merges for the ReplacingMergeTree engine. +- Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed\_directory\_monitor\_batch\_inserts=1. + +#### Backward incompatible changes: {#backward-incompatible-changes-1} + +- Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. + +#### Complete list of changes: {#complete-list-of-changes} + +- Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. +- Optimized stream allocation when reading from a Distributed table. +- Settings can be configured in readonly mode if the value doesn’t change. +- Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred\_block\_size\_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. +- Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` +- Added new settings for MergeTree engines (the merge\_tree section in config.xml): + - replicated\_deduplication\_window\_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables. + - cleanup\_delay\_period sets how often to start cleanup to remove outdated data. + - replicated\_can\_become\_leader can prevent a replica from becoming the leader (and assigning merges). +- Accelerated cleanup to remove outdated data from ZooKeeper. +- Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed\_ddl\_task\_timeout, which limits the time to wait for a response from the servers in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. +- Improved display of stack traces in the server logs. +- Added the “none” value for the compression method. +- You can use multiple dictionaries\_config sections in config.xml. +- It is possible to connect to MySQL through a socket in the file system. +- The system.parts table has a new column with information about the size of marks, in bytes. + +#### Bug fixes: {#bug-fixes-4} + +- Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field. +- Fixed a rare race condition in ReplicatedMergeTree when checking data parts. +- Fixed possible freezing on “leader election” when starting a server. +- The max\_replica\_delay\_for\_distributed\_queries setting was ignored when using a local replica of the data source. This has been fixed. +- Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column. +- Fixed an exception in the multiIf function when using empty arrays or strings. +- Fixed excessive memory allocations when deserializing Native format. +- Fixed incorrect auto-update of Trie dictionaries. +- Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE. +- Fixed a crash of GROUP BY when using distributed\_aggregation\_memory\_efficient=1. +- Now you can specify the database.table in the right side of IN and JOIN. +- Too many threads were used for parallel aggregation. This has been fixed. +- Fixed how the “if” function works with FixedString arguments. +- SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed. +- Running `CREATE VIEW IF EXISTS no longer causes crashes.` +- Fixed incorrect behavior when input\_format\_skip\_unknown\_fields=1 is set and there are negative numbers. +- Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary. +- Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables. +- Fixed an incorrect interpretation of a SELECT query from Dictionary tables. +- Fixed the “Cannot mremap” error when using arrays in IN and JOIN clauses with more than 2 billion elements. +- Fixed the failover for dictionaries with MySQL as the source. + +#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- Builds can be assembled in Arcadia. +- You can use gcc 7 to compile ClickHouse. +- Parallel builds using ccache+distcc are faster now. + +### ClickHouse release 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} + +#### New features: {#new-features-5} + +- Distributed DDL (for example, `CREATE TABLE ON CLUSTER`) +- The replicated query `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- The engine for Dictionary tables (access to dictionary data in the form of a table). +- Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries). +- You can check for updates to the dictionary by sending a request to the source. +- Qualified column names +- Quoting identifiers using double quotation marks. +- Sessions in the HTTP interface. +- The OPTIMIZE query for a Replicated table can can run not only on the leader. + +#### Backward incompatible changes: {#backward-incompatible-changes-2} + +- Removed SET GLOBAL. + +#### Minor changes: {#minor-changes} + +- Now after an alert is triggered, the log prints the full stack trace. +- Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives). + +#### Bug fixes: {#bug-fixes-5} + +- Fixed a bad connection “sticking” when inserting into a Distributed table. +- GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. +- The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed. +- Changes in how an executable source of cached external dictionaries works. +- Fixed the comparison of strings containing null characters. +- Fixed the comparison of Float32 primary key fields with constants. +- Previously, an incorrect estimate of the size of a field could lead to overly large allocations. +- Fixed a crash when querying a Nullable column added to a table using ALTER. +- Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT. +- Fixed an ORDER BY subquery consisting of only constant values. +- Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE. +- Aliases for scalar subqueries with empty results are no longer lost. +- Now a query that used compilation does not fail with an error if the .so file gets damaged. diff --git a/docs/zh/changelog/2018.md b/docs/zh/changelog/2018.md deleted file mode 120000 index 20799251f43..00000000000 --- a/docs/zh/changelog/2018.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2018.md \ No newline at end of file diff --git a/docs/zh/changelog/2018.md b/docs/zh/changelog/2018.md new file mode 100644 index 00000000000..49bef18cbf3 --- /dev/null +++ b/docs/zh/changelog/2018.md @@ -0,0 +1,1060 @@ +--- +en_copy: true +--- + +## ClickHouse release 18.16 {#clickhouse-release-18-16} + +### ClickHouse release 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} + +#### Bug fixes: {#bug-fixes} + +- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- JIT compilation of aggregate functions now works with LowCardinality columns. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### Improvements: {#improvements} + +- Added the `low_cardinality_allow_in_native_format` setting (enabled by default). When disabled, LowCardinality columns will be converted to ordinary columns for SELECT queries and ordinary columns will be expected for INSERT queries. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### Build improvements: {#build-improvements} + +- Fixes for builds on macOS and ARM. + +### ClickHouse release 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} + +#### New features: {#new-features} + +- `DEFAULT` expressions are evaluated for missing fields when loading data in semi-structured input formats (`JSONEachRow`, `TSKV`). The feature is enabled with the `insert_sample_with_metadata` setting. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- The `ALTER TABLE` query now has the `MODIFY ORDER BY` action for changing the sorting key when adding or removing a table column. This is useful for tables in the `MergeTree` family that perform additional tasks when merging based on this sorting key, such as `SummingMergeTree`, `AggregatingMergeTree`, and so on. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- For tables in the `MergeTree` family, now you can specify a different sorting key (`ORDER BY`) and index (`PRIMARY KEY`). The sorting key can be longer than the index. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- Added the `hdfs` table function and the `HDFS` table engine for importing and exporting data to HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- Added functions for working with base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) +- Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- Added `dictGet` and `dictGetOrDefault` functions that don’t require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) +- Now you can specify comments for a column in the table description and change it using `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Added the `joinGet` function that allows you to use a `Join` type table like a dictionary. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Added the `partition_key`, `sorting_key`, `primary_key`, and `sampling_key` columns to the `system.tables` table in order to provide information about table keys. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Added the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, and `is_in_sampling_key` columns to the `system.columns` table. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Added the `min_time` and `max_time` columns to the `system.parts` table. These columns are populated when the partitioning key is an expression consisting of `DateTime` columns. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### Bug fixes: {#bug-fixes-1} + +- Fixes and performance improvements for the `LowCardinality` data type. `GROUP BY` using `LowCardinality(Nullable(...))`. Getting the values of `extremes`. Processing high-order functions. `LEFT ARRAY JOIN`. Distributed `GROUP BY`. Functions that return `Array`. Execution of `ORDER BY`. Writing to `Distributed` tables (nicelulu). Backward compatibility for `INSERT` queries from old clients that implement the `Native` protocol. Support for `LowCardinality` for `JOIN`. Improved performance when working in a single stream. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- Fixed how the `select_sequential_consistency` option works. Previously, when this setting was enabled, an incomplete result was sometimes returned after beginning to write to a new partition. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- Databases are correctly specified when executing DDL `ON CLUSTER` queries and `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Databases are correctly specified for subqueries inside a VIEW. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Fixed a bug in `PREWHERE` with `FINAL` for `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- Now you can use `KILL QUERY` to cancel queries that have not started yet because they are waiting for the table to be locked. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- Corrected date and time calculations if the clocks were moved back at midnight (this happens in Iran, and happened in Moscow from 1981 to 1983). Previously, this led to the time being reset a day earlier than necessary, and also caused incorrect formatting of the date and time in text format. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- Fixed bugs in some cases of `VIEW` and subqueries that omit the database. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Fixed a race condition when simultaneously reading from a `MATERIALIZED VIEW` and deleting a `MATERIALIZED VIEW` due to not locking the internal `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- Fixed the error `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- Fixed query processing when the `compile_expressions` option is enabled (it’s enabled by default). Nondeterministic constant expressions like the `now` function are no longer unfolded. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- Fixed a crash when specifying a non-constant scale argument in `toDecimal32/64/128` functions. +- Fixed an error when trying to insert an array with `NULL` elements in the `Values` format into a column of type `Array` without `Nullable` (if `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- Fixed continuous error logging in `DDLWorker` if ZooKeeper is not available. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- Fixed the return type for `quantile*` functions from `Date` and `DateTime` types of arguments. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- Fixed the `WITH` clause if it specifies a simple alias without expressions. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- Fixed processing of queries with named sub-queries and qualified column names when `enable_optimize_predicate_expression` is enabled. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) +- Fixed the error `Attempt to attach to nullptr thread group` when working with materialized views. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- Fixed a crash when passing certain incorrect arguments to the `arrayReverse` function. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- Fixed the buffer overflow in the `extractURLParameter` function. Improved performance. Added correct processing of strings containing zero bytes. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- Fixed buffer overflow in the `lowerUTF8` and `upperUTF8` functions. Removed the ability to execute these functions over `FixedString` type arguments. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- Fixed a rare race condition when deleting `MergeTree` tables. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- Fixed a race condition when reading from `Buffer` tables and simultaneously performing `ALTER` or `DROP` on the target tables. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### Improvements: {#improvements-1} + +- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn’t have write access for the `clickhouse` user, which improves security. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- Accelerated server start when there is a very large number of tables. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- If the query syntax is invalid, the `400 Bad Request` code is returned in the `HTTP` interface (500 was returned previously). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- The `join_default_strictness` option is set to `ALL` by default for compatibility. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- Removed logging to `stderr` from the `re2` library for invalid or complex regular expressions. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- Added for the `Kafka` table engine: checks for subscriptions before beginning to read from Kafka; the kafka\_max\_block\_size setting for the table. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- The `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, and `murmurHash3_64` functions now work for any number of arguments and for arguments in the form of tuples. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- The `arrayReverse` function now works with any types of arrays. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- Added an optional parameter: the slot size for the `timeSlots` function. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) +- For `FULL` and `RIGHT JOIN`, the `max_block_size` setting is used for a stream of non-joined data from the right table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3699) +- Added the `--secure` command line parameter in `clickhouse-benchmark` and `clickhouse-performance-test` to enable TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- Type conversion when the structure of a `Buffer` type table does not match the structure of the destination table. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) +- Added the `tcp_keep_alive_timeout` option to enable keep-alive packets after inactivity for the specified time interval. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- Removed unnecessary quoting of values for the partition key in the `system.parts` table if it consists of a single column. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- The modulo function works for `Date` and `DateTime` data types. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- Added synonyms for the `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, and `MID` functions. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Some function names are case-insensitive for compatibility with the SQL standard. Added syntactic sugar `SUBSTRING(expr FROM start FOR length)` for compatibility with SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- Added the ability to `mlock` memory pages corresponding to `clickhouse-server` executable code to prevent it from being forced out of memory. This feature is disabled by default. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- Improved performance when reading from `O_DIRECT` (with the `min_bytes_to_use_direct_io` option enabled). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- Improved performance of the `dictGet...OrDefault` function for a constant key argument and a non-constant default argument. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3563) +- The `firstSignificantSubdomain` function now processes the domains `gov`, `mil`, and `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Improved performance. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- Ability to specify custom environment variables for starting `clickhouse-server` using the `SYS-V init.d` script by defining `CLICKHOUSE_PROGRAM_ENV` in `/etc/default/clickhouse`. + [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Correct return code for the clickhouse-server init script. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- The `system.metrics` table now has the `VersionInteger` metric, and `system.build_options` has the added line `VERSION_INTEGER`, which contains the numeric form of the ClickHouse version, such as `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- Removed the ability to compare the `Date` type with a number to avoid potential errors like `date = 2018-12-17`, where quotes around the date are omitted by mistake. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- Fixed the behavior of stateful functions like `rowNumberInAllBlocks`. They previously output a result that was one number larger due to starting during query analysis. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3729) +- If the `force_restore_data` file can’t be deleted, an error message is displayed. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### Build improvements: {#build-improvements-1} + +- Updated the `jemalloc` library, which fixes a potential memory leak. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3557) +- Profiling with `jemalloc` is enabled by default in order to debug builds. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- Added the ability to run integration tests when only `Docker` is installed on the system. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- Added the fuzz expression test in SELECT queries. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- Added a stress test for commits, which performs functional tests in parallel and in random order to detect more race conditions. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- Improved the method for starting clickhouse-server in a Docker image. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) +- For a Docker image, added support for initializing databases using files in the `/docker-entrypoint-initdb.d` directory. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) +- Fixes for builds on ARM. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### Backward incompatible changes: {#backward-incompatible-changes} + +- Removed the ability to compare the `Date` type with a number. Instead of `toDate('2018-12-18') = 17883`, you must use explicit type conversion `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouse release 18.14 {#clickhouse-release-18-14} + +### ClickHouse release 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} + +#### Bug fixes: {#bug-fixes-2} + +- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- Databases are correctly specified when executing DDL `ON CLUSTER` queries. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### Build improvements: {#build-improvements-2} + +- Fixes for builds on ARM. + +### ClickHouse release 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} + +#### Bug fixes: {#bug-fixes-3} + +- Fixed error in `dictGet...` function for dictionaries of type `range`, if one of the arguments is constant and other is not. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- Fixed error that caused messages `netlink: '...': attribute type 1 has an invalid length` to be printed in Linux kernel log, that was happening only on fresh enough versions of Linux kernel. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- Fixed segfault in function `empty` for argument of `FixedString` type. [Daniel, Dao Quang Minh](https://github.com/ClickHouse/ClickHouse/pull/3703) +- Fixed excessive memory allocation when using large value of `max_query_size` setting (a memory chunk of `max_query_size` bytes was preallocated at once). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### Build changes: {#build-changes} + +- Fixed build with LLVM/Clang libraries of version 7 from the OS packages (these libraries are used for runtime query compilation). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse release 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} + +#### Bug fixes: {#bug-fixes-4} + +- Fixed cases when the ODBC bridge process did not terminate with the main server process. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- Fixed synchronous insertion into the `Distributed` table with a columns list that differs from the column list of the remote table. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- Fixed a rare race condition that can lead to a crash when dropping a MergeTree table. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Fixed a query deadlock in case when query thread creation fails with the `Resource temporarily unavailable` error. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Fixed parsing of the `ENGINE` clause when the `CREATE AS table` syntax was used and the `ENGINE` clause was specified before the `AS table` (the error resulted in ignoring the specified engine). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### ClickHouse release 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} + +#### Bug fixes: {#bug-fixes-5} + +- The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to “Memory limit exceeded” errors. The issue appeared in version 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### ClickHouse release 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} + +#### Bug fixes: {#bug-fixes-6} + +- Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### Build changes: {#build-changes-1} + +- Fixed problems (llvm-7 from system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse release 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} + +#### Bug fixes: {#bug-fixes-7} + +- Fixed the `Block structure mismatch in MergingSorted stream` error. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- Fixed `ON CLUSTER` queries in case when secure connections were turned on in the cluster config (the `` flag). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- Fixed an error in queries that used `SAMPLE`, `PREWHERE` and alias columns. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- Fixed a rare `unknown compression method` error when the `min_bytes_to_use_direct_io` setting was enabled. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### Performance improvements: {#performance-improvements} + +- Fixed performance regression of queries with `GROUP BY` of columns of UInt16 or Date type when executing on AMD EPYC processors. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) +- Fixed performance regression of queries that process long strings. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### Build improvements: {#build-improvements-3} + +- Improvements for simplifying the Arcadia build. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### ClickHouse release 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} + +#### Bug fixes: {#bug-fixes-8} + +- Fixed a crash on joining two unnamed subqueries. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- Fixed generating incorrect queries (with an empty `WHERE` clause) when querying external databases. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) +- Fixed using an incorrect timeout value in ODBC dictionaries. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### ClickHouse release 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} + +#### Bug fixes: {#bug-fixes-9} + +- Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- Fixed errors when merging data in tables containing arrays inside Nested structures. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- Fixed an error on inserts to a Distributed table in Native format. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### ClickHouse release 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} + +- The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- The `enable_optimize_predicate_expression` setting is disabled by default. + +### ClickHouse release 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} + +#### New features: {#new-features-1} + +- The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) +- Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) +- Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### Experimental features: {#experimental-features} + +- Optimization of the GROUP BY clause for `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- Optimized calculation of expressions for `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### Improvements: {#improvements-2} + +- Significantly reduced memory consumption for queries with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- In the absence of `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` is assumed. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) +- The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- The `compile_expressions` setting (JIT compilation of expressions) is enabled by default. +- Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message “File … already exists”, and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` can be specified for `ALTER UPDATE` queries. +- Improved performance for reading data in `JSONEachRow` format. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- Added the `TIMESTAMP` synonym for the `DateTime` type. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- There is always space reserved for query\_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools. +- Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) +- In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- Support for the `Decimal` data type in external dictionaries. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- Support for the `Decimal` data type in `SummingMergeTree` tables. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- Added specializations for `UUID` in `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### Bug fixes: {#bug-fixes-10} + +- Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn’t be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the query if the `JOIN` is only performed on remote servers. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn’t start. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- Corrected type conversion between `Decimal` and integer numbers. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) +- Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) +- Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- Bug fixes in the `ALTER UPDATE` query. +- Fixed bugs in the `odbc` table function that appeared in version 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- Fixed the operation of aggregate functions with `StateArray` combinators. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- Fixed output of types for operations using `Decimal` and integer arguments. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- Fixed an error when using `FINAL` with `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) +- Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### Backward incompatible changes: {#backward-incompatible-changes-1} + +- Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouse release 18.12 {#clickhouse-release-18-12} + +### ClickHouse release 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} + +#### New features: {#new-features-2} + +- `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) +- The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- The `system.part_log` table now has the `partition_id` column. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Bug fixes: {#bug-fixes-11} + +- `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) +- Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) +- Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn’t happen). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### Backward incompatible changes: {#backward-incompatible-changes-2} + +- The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### ClickHouse release 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} + +#### New features: {#new-features-3} + +- Added support for `ALTER UPDATE` queries. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- Added the `allow_ddl` option, which restricts the user’s access to DDL queries. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- The `system.merges` system table now contains the `partition_id` column. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### Improvements {#improvements-3} + +- If a data part remains unchanged during mutation, it isn’t downloaded by replicas. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- Autocomplete is available for names of settings when working with `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### Bug fixes: {#bug-fixes-12} + +- Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13. +- Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) +- Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### ClickHouse release 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} + +#### New features: {#new-features-4} + +- Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) +- Added support for JOIN with table functions. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) +- Ctrl+C in clickhouse-client clears a query that was entered. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- Each line of the server log related to query processing shows the query ID. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- The `system.metrics` and `system.events` tables now have built-in documentation. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2975) +- Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) +- Added the `retention` aggregate function. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) +- Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- Tables in the MergeTree family now have the virtual column `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Experimental features: {#experimental-features-1} + +- Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### Improvements: {#improvements-4} + +- Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag. +- Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length. +- Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`. +- Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2955) +- Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- Fixed a performance problem in the case of a large stream of queries that result in an error (the `_dl_addr` function is visible in `perf top`, but the server isn’t using much CPU). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Improvements to the functionality for the `UUID` data type. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- The `UUID` data type is supported in The-Alchemist dictionaries. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) +- When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn’t happen as often. +- The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) +- Duplicate columns can be used in a `USING` clause for `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2885) +- Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) +- The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`. +- `ALTER DELETE` queries work for materialized views. +- Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables. +- Support for `ATTACH TABLE ... ON CLUSTER` queries. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### Bug fixes: {#bug-fixes-13} + +- Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- Fixed a segfault during `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- Fixed the “Not found column” error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- Fixed the incorrect result when comparing `nan` with integers. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously. +- Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) +- Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) +- Fixed the segfault when re-initializing the ZooKeeper session. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- Fixed potential blocking when working with ZooKeeper. +- Fixed incorrect code for adding nested data structures in a `SummingMergeTree`. +- When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### Security fix: {#security-fix} + +- Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- Fixed incorrect validation of the file path in the `catBoostPool` table function. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user’s configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### Backward incompatible changes: {#backward-incompatible-changes-3} + +- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. + +#### Build changes: {#build-changes-2} + +- Most integration tests can now be run by commit. +- Code style checks can also be run by commit. +- The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) +- When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- Debugging the build uses the `jemalloc` debug option. +- The interface of the library for interacting with ZooKeeper is declared abstract. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouse release 18.10 {#clickhouse-release-18-10} + +### ClickHouse release 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} + +#### New features: {#new-features-5} + +- HTTPS can be used for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- Support for `UUID` in the key columns. + +#### Improvements: {#improvements-5} + +- Clusters can be removed without restarting the server when they are deleted from the config files. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- External dictionaries can be removed without restarting the server when they are removed from config files. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) +- Improvements for the `UUID` data type (not yet complete). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- Old records of completed mutations are deleted (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) +- The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) +- Added the `max_partition_size_to_drop` config option. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) +- Added the `max_fetch_partition_retries_count` setting. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### Bug fixes: {#bug-fixes-14} + +- Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0. +- Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2823) +- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) +- Fixed a memory leak if an exception occurred when connecting to a MySQL server. +- Fixed incorrect clickhouse-client response code in case of a query error. +- Fixed incorrect behavior of materialized views containing DISTINCT. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### Backward incompatible changes {#backward-incompatible-changes-4} + +- Removed support for CHECK TABLE queries for Distributed tables. + +#### Build changes: {#build-changes-3} + +- The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- Use of libressl from a submodule. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- Use of unixodbc from a submodule. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- Use of mariadb-connector-c from a submodule. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself). + +## ClickHouse release 18.6 {#clickhouse-release-18-6} + +### ClickHouse release 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} + +#### New features: {#new-features-6} + +- Added support for ON expressions for the JOIN ON syntax: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- HTTPS can be enabled for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### Improvements: {#improvements-6} + +- The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouse release 18.5 {#clickhouse-release-18-5} + +### ClickHouse release 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} + +#### New features: {#new-features-7} + +- Added the hash function `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### Improvements: {#improvements-7} + +- Now you can use the `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) attribute to set values in config files from environment variables. +- Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### Bug fixes: {#bug-fixes-15} + +- Fixed a possible bug when starting a replica [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouse release 18.4 {#clickhouse-release-18-4} + +### ClickHouse release 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} + +#### New features: {#new-features-8} + +- Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- Support for `HTTP Basic` authentication in the replication protocol [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). +- Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### Improvements: {#improvements-8} + +- The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. +- Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### Bug fixes: {#bug-fixes-16} + +- Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). +- Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- Fixed server crash when using the `countArray()` aggregate function. + +#### Backward incompatible changes: {#backward-incompatible-changes-5} + +- Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. + +## ClickHouse release 18.1 {#clickhouse-release-18-1} + +### ClickHouse release 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} + +#### New features: {#new-features-9} + +- Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- Support for arbitrary types for the `uniq*` family of aggregate functions ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- Support for arbitrary types in comparison operators ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- Added the `arrayDistinct` function ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### Improvements: {#improvements-9} + +- Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog. +- Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### Bug fixes: {#bug-fixes-17} + +- Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- Fixed an error during a CAST to Nullable types ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn’t in uppercase letters ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- Added missing quoting of identifiers for queries to an external DBMS ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### Backward incompatible changes: {#backward-incompatible-changes-6} + +- Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. + +## ClickHouse release 1.1 {#clickhouse-release-1-1} + +### ClickHouse release 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} + +#### New features: {#new-features-10} + +- Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### Bug fixes: {#bug-fixes-18} + +- Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. +- Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. +- The `has` function now works correctly for an array with Nullable elements ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were queried from the table. +- Fixed how an empty `TinyLog` table works after inserting an empty data block ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. + +### ClickHouse release 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} + +#### New features: {#new-features-11} + +- Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### Improvements: {#improvements-10} + +- Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. +- Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- Added `Nullable` support for the `runningDifference` function ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- Improved query analysis performance when there is a very large number of expressions ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### Bug fixes: {#bug-fixes-19} + +- Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. +- Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. +- Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- Fixed segfault if `macros` are used but they aren’t in the config file ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- Fixed switching to the default database when reconnecting the client ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. + +#### Security fix: {#security-fix-1} + +- Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). + +### ClickHouse release 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} + +#### New features: {#new-features-12} + +- Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. +- Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables. +- Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- The password to `clickhouse-client` can be entered interactively. +- Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. +- Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. + +#### Experimental features: {#experimental-features-2} + +- Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- JIT compilation to native code is now available for some expressions ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### Bug fixes: {#bug-fixes-20} + +- Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. +- Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. +- Fixed an error when reading an array column from a Nested structure ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`. +- Fixed an error when analyzing queries with recursive aliases. +- Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- User profile settings were not applied when using sessions in the HTTP interface. +- Fixed how settings are applied from the command line parameters in clickhouse-local. +- The ZooKeeper client library now uses the session timeout received from the server. +- Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. +- Fixed pruning of parts for queries with conditions on partition key columns ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- Merges are now possible after `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- Fixed syntactic parsing and formatting of the `CAST` operator. +- Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- Fixed SSRF in the remote() table function. +- Fixed exit behavior of `clickhouse-client` in multiline mode ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### Improvements: {#improvements-11} + +- Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- Improved LZ4 compression performance. +- Faster analysis for queries with a large number of JOINs and sub-queries. +- The DNS cache is now updated automatically when there are too many network errors. +- Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. +- Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. +- Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. +- A server with replicated tables can start even if you haven’t configured ZooKeeper. +- When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### Build changes: {#build-changes-4} + +- The gcc8 compiler can be used for builds. +- Added the ability to build llvm from submodule. +- The version of the librdkafka library has been updated to v0.11.4. +- Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. +- Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmake now generates files for ninja by default (like when using `-G Ninja`). +- Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- Fixed a header file conflict in Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### Backward incompatible changes: {#backward-incompatible-changes-7} + +- Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. +- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn’t have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. + +### ClickHouse release 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} + +#### Bug fixes: {#bug-fixes-21} + +- Fixed an error that in some cases caused ZooKeeper operations to block. + +### ClickHouse release 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} + +#### Bug fixes: {#bug-fixes-22} + +- Fixed a slowdown of replication queue if a table has many replicas. + +### ClickHouse release 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} + +#### Bug fixes: {#bug-fixes-23} + +- Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. + +### ClickHouse release 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} + +#### New features: {#new-features-13} + +- Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### Improvements: {#improvements-12} + +- Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`. +- Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit. + +#### Bug fixes: {#bug-fixes-24} + +- Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`. +- Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`. +- Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table. +- Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica. +- Fixed freezing of `KILL QUERY`. +- Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration. + +#### Backward incompatible changes: {#backward-incompatible-changes-8} + +- Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors. + +### ClickHouse release 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} + +#### New features: {#new-features-14} + +- Logging level can be changed without restarting the server. +- Added the `SHOW CREATE DATABASE` query. +- The `query_id` can be passed to `clickhouse-client` (elBroom). +- New setting: `max_network_bandwidth_for_all_users`. +- Added support for `ALTER TABLE ... PARTITION ...` for `MATERIALIZED VIEW`. +- Added information about the size of data parts in uncompressed form in the system table. +- Server-to-server encryption support for distributed tables (`1` in the replica config in ``). +- Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` +- Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server’s display name can be changed. It’s also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). +- Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) +- When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result. + +#### Improvements: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. +- `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. +- A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov). +- The `lengthUTF8` function runs faster (zhang2014). +- Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards. +- The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket’s `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa). +- More robust crash recovery for asynchronous insertion into `Distributed` tables. +- The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). + +#### Bug fixes: {#bug-fixes-25} + +- Fixed an error with `IN` when the left side of the expression is `Nullable`. +- Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. +- The `max_execution_time` limit now works correctly with distributed queries. +- Fixed errors when calculating the size of composite columns in the `system.columns` table. +- Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- Fixed errors in `StorageKafka` (\#\#2075) +- Fixed server crashes from invalid arguments of certain aggregate functions. +- Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables. +- `Too many parts` state is less likely to happen when inserting into aggregated materialized views (\#\#2084). +- Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level. +- Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`. +- `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. +- Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. + +#### Build changes: {#build-changes-5} + +- The build supports `ninja` instead of `make` and uses `ninja` by default for building releases. +- Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. + +#### Backward incompatible changes: {#backward-incompatible-changes-9} + +- Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as “at least one `arr` element belongs to the `set`”. To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. +- Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. + +### ClickHouse release 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} + +#### New features: {#new-features-15} + +- Added the `system.macros` table and auto updating of macros when the config file is changed. +- Added the `SYSTEM RELOAD CONFIG` query. +- Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the “maximum” interval. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### Improvements: {#improvements-14} + +- When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). +- Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### Bug fixes: {#bug-fixes-26} + +- Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables. +- Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers. +- Fixed a race condition when reading from system `system.parts_columns tables.` +- Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout. +- Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query. +- Fixed incorrect dates in the `system.parts` table. +- Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster. +- Fixed the vertical merging algorithm for an empty `ORDER BY` table. +- Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362. +- Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. +- Removed extraneous error-level logging of `Not found column ... in block`. + +### Clickhouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} + +#### New features: {#new-features-16} + +- Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. +- Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. +- Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`. +- An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). +- Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta). +- Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings. +- Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- Added the `arrayCumSum` function (Javi Santana). +- Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats. +- Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan). +- Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier. +- The `remote` and `cluster` table functions can be used in `INSERT` queries. +- Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual. +- Added the `data_path` and `metadata_path` columns to `system.tables`and`system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables. +- Added additional information about merges in the `system.part_log` table. +- An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov). +- The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014). +- Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014). +- Support for `SHOW CREATE TABLE` for temporary tables (zhang2014). +- Added the `system_profile` configuration parameter for the settings used by internal processes. +- Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko). +- Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko). +- Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes. +- Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table. +- Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`. +- Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014). +- Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can’t be listened to (useful for systems with disabled support for IPv4 or IPv6). +- Added the `VersionedCollapsingMergeTree` table engine. +- Support for rows and arbitrary numeric types for the `library` dictionary source. +- `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`). +- A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`. +- `RENAME TABLE` can be performed for `VIEW`. +- Added the `throwIf` function. +- Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). +- The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns. + +#### Improvements: {#improvements-15} + +- Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. +- Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. +- Added the `allow_distributed_ddl` option. +- Nondeterministic functions are not allowed in expressions for `MergeTree` table keys. +- Files with substitutions from `config.d` directories are loaded in alphabetical order. +- Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`. +- The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks). +- When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server. +- The `MkDocs` documentation generator is used. +- When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014). +- Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342. +- `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. +- Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. + +#### Bug fixes: {#bug-fixes-27} + +- Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. +- Fixed a bug in merges for `ReplacingMergeTree` tables. +- Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`). +- Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries. +- Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`. +- Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table. +- Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata. +- Fixed the `DROP DATABASE` query for `Dictionary` databases. +- Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov). +- Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014). +- Fixed a rare case when a query to a `MergeTree` table couldn’t finish (chenxing-xc). +- Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc). +- Fixed a slight performance regression with functions that use regular expressions. +- Fixed a performance regression when creating multidimensional arrays from complex expressions. +- Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata. +- Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table. +- Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand). +- Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`. +- Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables. +- Fixed a bug when using `ALIAS` columns in `Distributed` tables. +- Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family. +- Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries. +- Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. +- Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. +- Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled). + +#### Backward incompatible changes: {#backward-incompatible-changes-10} + +- Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. +- Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. +- Removed the `UnsortedMergeTree` engine. + +### Clickhouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} + +- Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. +- Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. + +### Clickhouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} + +This release contains bug fixes for the previous release 1.1.54337: + +- Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. +- Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d. +- Fixed a regression in 1.1.54337: wrong default configuration in the Docker image. +- Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`). +- Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`). +- Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). +- Fixed a bug in implementation of NULL. + +### Clickhouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} + +#### New features: {#new-features-17} + +- Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables. +- Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`. +- Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected. +- Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive. +- Added the `toStartOfFifteenMinutes` function (Kirill Shvakov). +- Added the `clickhouse format` tool for formatting queries. +- Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory. +- Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin). +- Added a column with documentation for the `system.settings` table (Kirill Shvakov). +- Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables. +- Added the `system.models` table with information about loaded `CatBoost` machine learning models. +- Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage. +- Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function). +- Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors. +- The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments. +- Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`. +- Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov). +- Users with the `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša). +- Added the `intExp3` and `intExp4` functions. +- Added the `sumKahan` aggregate function. +- Added the to \* Number\* OrNull functions, where \* Number\* is a numeric type. +- Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014). +- Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded. +- Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova). +- The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory. +- Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. +- Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). + +#### Performance optimizations: {#performance-optimizations} + +- Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments. +- Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- Improved performance of parsing and formatting `Date` and `DateTime` type values in text format. +- Improved performance and precision of parsing floating point numbers. +- Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` . +- Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### Bug fixes: {#bug-fixes-28} + +- Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates. +- Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE` . +- Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration. +- Fixed unexpected results of passing the `Date` argument to `toStartOfDay` . +- Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for `INTERVAL n MONTH` in cases when the result has the previous year. +- Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete. +- Fixed `SummingMergeTree` behavior in cases when the rows summed to zero. +- Various fixes for the `Kafka` engine (Marek Vavruša). +- Fixed incorrect behavior of the `Join` table engine (Amos Bird). +- Fixed incorrect allocator behavior under FreeBSD and OS X. +- The `extractAll` function now supports empty matches. +- Fixed an error that blocked usage of `libressl` instead of `openssl` . +- Fixed the `CREATE TABLE AS SELECT` query from temporary tables. +- Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts. +- Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod). +- `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config). +- Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key. +- Fixed parsing of tuples (values of the `Tuple` data type) in text formats. +- Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions. +- Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to `NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc. +- Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc. +- Stricter checks for allowed combinations of composite columns. +- Fixed the overflow when specifying a very large parameter for the `FixedString` data type. +- Fixed a bug in the `topK` aggregate function in a generic case. +- Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator. +- Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322). +- Fixed the precision of the `exp10` function. +- Fixed the behavior of the `visitParamExtract` function for better compliance with documentation. +- Fixed the crash when incorrect data types are specified. +- Fixed the behavior of `DISTINCT` in the case when all columns are constants. +- Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index. +- Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries. +- Fixed a bug that leads to excessive rows in the result of `FULL` and `RIGHT JOIN` (Amos Bird). +- Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload. +- Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. +- Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### Build improvements: {#build-improvements-4} + +- The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment. +- A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. +- Added the `clickhouse-test` package. It can be used to run functional tests. +- The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub. +- Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run. +- Added support for `Cap'n'Proto` in the default build. +- Changed the format of documentation sources from `Restricted Text` to `Markdown`. +- Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually. +- For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as `clickhouse clang` and `clickhouse lld` . +- Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`. +- Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. + +#### Backward incompatible changes: {#backward-incompatible-changes-11} + +- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn’t have `Nullable` columns or if the type of your table is not `Log`, then you don’t need to do anything. +- Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. +- The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. +- Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). +- Removed the `BlockTabSeparated` format that was used solely for demonstration purposes. +- Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com. +- In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. +- Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release. + +#### Please note when upgrading: {#please-note-when-upgrading} + +- When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. +- If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes. + +## [Changelog for 2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) diff --git a/docs/zh/changelog/2019.md b/docs/zh/changelog/2019.md deleted file mode 120000 index 105ca144fca..00000000000 --- a/docs/zh/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -../../en/changelog/2019.md \ No newline at end of file diff --git a/docs/zh/changelog/2019.md b/docs/zh/changelog/2019.md new file mode 100644 index 00000000000..01a0756af14 --- /dev/null +++ b/docs/zh/changelog/2019.md @@ -0,0 +1,2071 @@ +--- +en_copy: true +--- + +## ClickHouse release v19.17 {#clickhouse-release-v19-17} + +### ClickHouse release v19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### Bug Fix {#bug-fix} + +- Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn’t cause the error `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed checking if a client host is allowed by host\_regexp specified in users.xml. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +- Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn’t exist. Now in this case file would be created and then insert would be processed. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) +- Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn’t throw exception if `db` doesn’t exist. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- If a table wasn’t completely dropped because of server crash, the server will try to restore and load it [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- Fixed a trivial count query for a distributed table if there are more than two shard local table. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- Fixed `ALTER table MOVE part` executed immediately after merging the specified part, which could cause moving a part which the specified part merged into. Now it correctly moves the specified part. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Expressions for dictionaries can be specified as strings now. This is useful for calculation of attributes while extracting data from non-ClickHouse sources because it allows to use non-ClickHouse syntax for those expressions. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +- Fixed a very rare race in `clickhouse-copier` because of an overflow in ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- Fixed the bug when after the query failed (due to “Too many simultaneous queries” for example) it would not read external tables info, and the + next request would interpret this info as the beginning of the next query causing an error like `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- Avoid null dereference after “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- Restore support of all ICU locales, add the ability to apply collations for constant expressions and add language name to system.collations table. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +- Number of streams for read from `StorageFile` and `StorageHDFS` is now limited, to avoid exceeding the memory limit. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +- Fixed `CHECK TABLE` query for `*MergeTree` tables without key. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +- Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +- Fixed the bug that mutations are skipped for some attached parts due to their data\_version are larger than the table mutation version. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) +- Allow starting the server with redundant copies of parts after moving them to another device. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixed the error “Sizes of columns doesn’t match” that might appear when using aggregate function columns. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it’s possible to use TOP with LIMIT BY. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) + +### ClickHouse release v19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### Backward Incompatible Change {#backward-incompatible-change} + +- Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird)) + +#### New Feature {#new-feature} + +- Add the ability to create dictionaries with DDL queries. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) +- Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Add function `isValidJSON` to check that passed string is a valid json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- Implement `arrayCompact` function [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) +- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn’t delete last zero bytes. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) +- Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- Add `CRC32IEEE()`/`CRC64()` support [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) +- Implemented `javaHashUTF16LE()` function [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) +- Add `_shard_num` virtual column for the Distributed engine [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### Experimental Feature {#experimental-feature} + +- Support for processors (new query execution pipeline) in `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Bug Fix {#bug-fix-1} + +- Fix incorrect float parsing in `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- Fix rare deadlock which can happen when trace\_log is enabled. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- Prevent message duplication when producing Kafka table has any MVs selecting from it [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) +- Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) +- Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Fix aggregation (`avg` and quantiles) over empty decimal columns [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) +- Fix `INSERT` into Distributed with `MATERIALIZED` columns [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixed bug with `keep_free_space_ratio` not being read from disks configuration [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) +- Do not account memory for Buffer engine in max\_memory\_usage limit [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) +- Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) +- Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Fix crash in this case. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix `Not found column in block` when joining on expression with RIGHT or FULL JOIN. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- One more attempt to fix infinite loop in `PrettySpace` format [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fix bug in `concat` function when all arguments were `FixedString` of the same size. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) +- Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fix scope of the InterpreterSelectQuery for views with query [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### Improvement {#improvement} + +- `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- Write current batch for distributed send atomically [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- Throw an exception if we cannot detect table for column name in query. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- Add `merge_max_block_size` setting to `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird)) +- Support parsing `(X,)` as tuple similar to python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird)) +- Make `range` function behaviors almost like pythonic one. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- Add `constraints` columns to table `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) +- Better Null format for tcp handler, so that it’s possible to use `select ignore() from table format Null` for perf measure via clickhouse-client [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird)) +- Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### Performance Improvement {#performance-improvement} + +- The performance of aggregation over short string keys is improved. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird)) +- Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird)) +- Use storage meta info to evaluate trivial `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) +- Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) +- Minor improvements in performance of `Kafka` consumption [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement} + +- Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) +- Unpack darwin-x86\_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) +- Update Docker Image for Binary Packager [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) +- Fixed compile errors on MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) +- Some refactoring in query analysis logic: split complex class into several simple ones. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix build without submodules [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- Better `add_globs` in CMake files [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird)) +- Remove hardcoded paths in `unwind` target [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- Allow to use mysql format without ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### Other {#other} + +- Added ANTLR4 grammar for ClickHouse SQL dialect [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse release v19.16 {#clickhouse-release-v19-16} + +#### Clickhouse release v19.16.14.65, 2020-03-25 + +* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) This bugfix was backported to version 19.16 by a special request from Altinity. + +#### Clickhouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- Fix distributed subqueries incompatibility with older CH versions. Fixes [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Now background merges in `*MergeTree` table engines family preserve storage policy volume order more accurately. + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +- Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) +- Allow using `MaterializedView` with subqueries above `Kafka` tables. + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) + +#### New Feature {#new-feature-1} + +- Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse release v19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### Backward Incompatible Change {#backward-incompatible-change-1} + +- Add missing arity validation for count/counIf. + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- Remove legacy `asterisk_left_columns_only` setting (it was disabled by default). + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem + Zuikov](https://github.com/4ertus2)) +- Format strings for Template data format are now specified in files. + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### New Feature {#new-feature-2} + +- Introduce uniqCombined64() to calculate cardinality greater than UINT\_MAX. + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- Support Bloom filter indexes on Array columns. + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbab](https://github.com/achimbab)) +- Add a function `getMacro(name)` that returns String with the value of corresponding `` + from server configuration. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Set two configuration options for a dictionary based on an HTTP source: `credentials` and + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- Add a new ProfileEvent `Merge` that counts the number of launched background merges. + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail + Korotov](https://github.com/millb)) +- Add fullHostName function that returns a fully qualified domain name. + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- Add function `arraySplit` and `arrayReverseSplit` which split an array by “cut off” + conditions. They are useful in time sequence handling. + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- Add new functions that return the Array of all matched indices in multiMatch family of functions. + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila + Kutenin](https://github.com/danlark1)) +- Add a new database engine `Lazy` that is optimized for storing a large number of small -Log + tables. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita + Vasilev](https://github.com/nikvas0)) +- Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang + Yu](https://github.com/yuzhichang)) +- Add aggregate function combinators -OrNull and -OrDefault, which return null + or default values when there is nothing to aggregate. + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- Introduce CustomSeparated data format that supports custom escaping and + delimiter rules. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- Support Redis as source of external dictionary. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton + Popov](https://github.com/CurtizJ)) + +#### Bug Fix {#bug-fix-2} + +- Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is + used. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton + Popov](https://github.com/CurtizJ)) +- Disabled MariaDB authentication plugin, which depends on files outside of project. + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy + Baranov](https://github.com/yurriy)) +- Fix exception `Cannot convert column ... because it is constant but values of constants are different in source and result` which could rarely happen when functions `now()`, `today()`, + `yesterday()`, `randConstant()` are used. + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai + Kochetov](https://github.com/KochetovNicolai)) +- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily + Nemkov](https://github.com/Enmk)) +- Fixed a segmentation fault in groupBitmapOr (issue [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang + Yu](https://github.com/yuzhichang)) +- For materialized views the commit for Kafka is called after all data were written. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) +- Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off. + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests. + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Serialize NULL values correctly in min/max indexes of MergeTree parts. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Don’t put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) +- Fix segmentation fault in `ATTACH PART` query. + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([alesapin](https://github.com/alesapin)) +- Fix wrong result for some queries given by the optimization of empty IN subqueries and empty + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai + Kochetov](https://github.com/KochetovNicolai)) +- Fixing AddressSanitizer error in the LIVE VIEW getHeader() method. + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### Improvement {#improvement-1} + +- Add a message in case of queue\_wait\_max\_ms wait takes place. + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- Made setting `s3_min_upload_part_size` table-level. + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Check TTL in StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- Squash left-hand blocks in partial merge join (optimization). + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem + Zuikov](https://github.com/4ertus2)) +- Do not allow non-deterministic functions in mutations of Replicated table engines, because this + can introduce inconsistencies between replicas. + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander + Kazakov](https://github.com/Akazz)) +- Disable memory tracker while converting exception stack trace to string. It can prevent the loss + of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read after eof` exception on client. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Miscellaneous format improvements. Resolves + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouse ignores values on the right side of IN operator that are not convertible to the left + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Support missing inequalities for ASOF JOIN. It’s possible to join less-or-equal variant and strict + greater and less variants for ASOF column in ON syntax. + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem + Zuikov](https://github.com/4ertus2)) +- Optimize partial merge join. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- Do not use more than 98K of memory in uniqCombined functions. + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough + memory). Load data back when needed. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### Performance Improvement {#performance-improvement-1} + +- Speed up joinGet with const arguments by avoiding data duplication. + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos + Bird](https://github.com/amosbird)) +- Return early if the subquery is empty. + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- Optimize parsing of SQL expression in Values. + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-1} + +- Disable some contribs for cross-compilation to Mac OS. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) +- Add missing linking with PocoXML for clickhouse\_common\_io. + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- Accept multiple test filter arguments in clickhouse-test. + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Enable musl and jemalloc for ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([Amos Bird](https://github.com/amosbird)) +- Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client. + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai + Kochetov](https://github.com/KochetovNicolai)) +- Preserve existing configs on rpm package upgrade. + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([filimonov](https://github.com/filimonov)) +- Fix errors detected by PVS. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem + Zuikov](https://github.com/4ertus2)) +- Fix build for Darwin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([Ivan](https://github.com/abyss7)) +- glibc 2.29 compatibility. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos + Bird](https://github.com/amosbird)) +- Make sure dh\_clean does not touch potential source files. + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos + Bird](https://github.com/amosbird)) +- Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately + in clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([filimonov](https://github.com/filimonov)) +- Optimize some header files for faster rebuilds. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Add performance tests for Date and DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily + Nemkov](https://github.com/Enmk)) +- Fix some tests that contained non-deterministic mutations. + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander + Kazakov](https://github.com/Akazz)) +- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Avoid use of uninitialized values in MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- Fix some issues in Fields found by MemorySanitizer. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander + Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([Amos Bird](https://github.com/amosbird)) +- Fix undefined behavior in murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos + Bird](https://github.com/amosbird)) +- Fix undefined behavior in StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous + versions it wasn’t working for multiple constant expressions and was not working at all for Date, + DateTime and UUID. This fixes [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no\_users\_thread variable. + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Get rid of malloc symbols in libcommon + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos + Bird](https://github.com/amosbird)) +- Add global flag ENABLE\_LIBRARIES for disabling all libraries. + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### Code cleanup {#code-cleanup} + +- Generalize configuration repository to prepare for DDL for Dictionaries. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([alesapin](https://github.com/alesapin)) +- Parser for dictionaries DDL without any semantic. + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([alesapin](https://github.com/alesapin)) +- Split ParserCreateQuery into different smaller parsers. + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([alesapin](https://github.com/alesapin)) +- Small refactoring and renaming near external dictionaries. + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([alesapin](https://github.com/alesapin)) +- Refactor some code to prepare for role-based access control. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly + Baranov](https://github.com/vitlibar)) +- Some improvements in DatabaseOrdinary code. + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita + Vasilev](https://github.com/nikvas0)) +- Do not use iterators in find() and emplace() methods of hash tables. + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Fix getMultipleValuesFromConfig in case when parameter root is not empty. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([Mikhail Korotov](https://github.com/millb)) +- Remove some copy-paste (TemporaryFile and TemporaryFileStream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem + Zuikov](https://github.com/4ertus2)) +- Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws + an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and + fix comments to make obvious that it may throw. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## ClickHouse release 19.15 {#clickhouse-release-19-15} + +### ClickHouse release 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### Bug Fix {#bug-fix-3} + +- Added handling of SQL\_TINYINT and SQL\_BIGINT, and fix handling of SQL\_FLOAT data source types in ODBC Bridge. + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Allowed to have some parts on destination disk or volume in MOVE PARTITION. + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixed NULL-values in nullable columns through ODBC-bridge. + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- Fixed INSERT into Distributed non local node with MATERIALIZED columns. + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Fixed function getMultipleValuesFromConfig. + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) +- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) +- Wait for all jobs to finish on exception (fixes rare segfaults). + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- Don’t push to MVs when inserting into Kafka table. + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) +- Disable memory tracker for exception stack. + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed bad code in transforming query for external database. + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid use of uninitialized values in MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### Bug Fix {#bug-fix-4} + +- Fixed bad\_variant in hashed dictionary. + ([alesapin](https://github.com/alesapin)) +- Fixed up bug with segmentation fault in ATTACH PART query. + ([alesapin](https://github.com/alesapin)) +- Fixed time calculation in `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- Commit to Kafka explicitly after the writing is finalized. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) +- Serialize NULL values correctly in min/max indexes of MergeTree parts. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### ClickHouse release 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### New Feature {#new-feature-3} + +- Tiered storage: support to use multiple storage volumes for tables with MergeTree engine. It’s possible to store fresh data on SSD and automatically move old data to HDD. ([example](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) +- Add table function `input` for reading incoming data in `INSERT SELECT` query. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) +- Add a `sparse_hashed` dictionary layout, that is functionally equivalent to the `hashed` layout, but is more memory efficient. It uses about twice as less memory at the cost of slower value retrieval. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- Implement ability to define list of users for access to dictionaries. Only current connected database using. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Add `LIMIT` option to `SHOW` query. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Add `bitmapSubsetLimit(bitmap, range_start, limit)` function, that returns subset of the smallest `limit` values in set that is no smaller than `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) +- Add `bitmapMin` and `bitmapMax` functions. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) +- Add function `repeat` related to [issue-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) + +#### Experimental Feature {#experimental-feature-1} + +- Implement (in memory) Merge Join variant that does not change current pipeline. Result is partially sorted by merge key. Set `partial_merge_join = 1` to use this feature. The Merge Join is still in development. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- Add `S3` engine and table function. It is still in development (no authentication support yet). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### Improvement {#improvement-2} + +- Every message read from Kafka is inserted atomically. This resolves almost all known issues with Kafka engine. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) +- Improvements for failover of Distributed queries. Shorten recovery time, also it is now configurable and can be seen in `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) +- Support numeric values for Enums directly in `IN` section. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- Support (optional, disabled by default) redirects on URL storage. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- Add information message when client with an older version connects to a server. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Remove maximum backoff sleep time limit for sending data in Distributed tables [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- Add ability to send profile events (counters) with cumulative values to graphite. It can be enabled under `` in server `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- Add automatically cast type `T` to `LowCardinality(T)` while inserting data in column of type `LowCardinality(T)` in Native format via HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Add ability to use function `hex` without using `reinterpretAsString` for `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-2} + +- Add gdb-index to clickhouse binary with debug info. It will speed up startup time of `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) +- Speed up deb packaging with patched dpkg-deb which uses `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) +- Set `enable_fuzzing = 1` to enable libfuzzer instrumentation of all the project code. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- Add split build smoke test in CI. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) +- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Replace `libsparsehash` with `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### Bug Fix {#bug-fix-5} + +- Fixed performance degradation of index analysis on complex keys on large tables. This fixes \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) +- Fix too early MySQL connection close in `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Returned support for very old Linux kernels (fix [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix possible data loss in `insert select` query in case of empty block in input stream. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Fix complex queries with array joins and global subqueries. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) +- Fix `Unknown identifier` error in ORDER BY and GROUP BY with multiple JOINs [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed `MSan` warning while executing function with `LowCardinality` argument. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Backward Incompatible Change {#backward-incompatible-change-2} + +- Changed serialization format of bitmap\* aggregate function states to improve performance. Serialized states of bitmap\* from previous versions cannot be read. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) + +## ClickHouse release 19.14 {#clickhouse-release-19-14} + +### ClickHouse release 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### Bug Fix {#bug-fix-6} + +- This release also contains all bug fixes from 19.11.12.69. +- Fixed compatibility for distributed queries between 19.14 and earlier versions. This fixes [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### Bug Fix {#bug-fix-7} + +- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Fixed subquery name in queries with `ARRAY JOIN` and `GLOBAL IN subquery` with alias. Use subquery alias for external table name if it is specified. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-3} + +- Fix [flapping](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` by rewriting it to a shell scripts because it needs to wait for mutations to apply. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) +- Fixed UBSan and MemSan failure in function `groupUniqArray` with emtpy array argument. It was caused by placing of empty `PaddedPODArray` into hash table zero cell because constructor for zero cell value was not called. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Bird](https://github.com/amosbird)) + +### ClickHouse release 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### New Feature {#new-feature-4} + +- `WITH FILL` modifier for `ORDER BY`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- `WITH TIES` modifier for `LIMIT`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- Parse unquoted `NULL` literal as NULL (if setting `format_csv_unquoted_null_literal_as_null=1`). Initialize null fields with default values if data type of this field is not nullable (if setting `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- Support for wildcards in paths of table functions `file` and `hdfs`. If the path contains wildcards, the table will be readonly. Example of usage: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` and `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- New `system.metric_log` table which stores values of `system.events` and `system.metrics` with specified time interval. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Allow to write ClickHouse text logs to `system.text_log` table. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Show private symbols in stack traces (this is done via parsing symbol tables of ELF files). Added information about file and line number in stack traces if debug info is present. Speedup symbol name lookup with indexing symbols present in program. Added new SQL functions for introspection: `demangle` and `addressToLine`. Renamed function `symbolizeAddress` to `addressToSymbol` for consistency. Function `addressToSymbol` will return mangled name for performance reasons and you have to apply `demangle`. Added setting `allow_introspection_functions` which is turned off by default. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Table function `values` (the name is case-insensitive). It allows to read from `VALUES` list proposed in [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Example: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- Added an ability to alter storage settings. Syntax: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) +- Support for removing of detached parts. Syntax: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- Table constraints. Allows to add constraint to table definition which will be checked at insert. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Suppport for cascaded materialized views. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Bird](https://github.com/amosbird)) +- Turn on query profiler by default to sample every query execution thread once a second. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Input format `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- Added two new functions: `sigmoid` and `tanh` (that are useful for machine learning applications). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Function `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` to check if given token is in haystack. Token is a maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack). Token must be a constant string. Supported by tokenbf\_v1 index specialization. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) +- New function `neighbor(value, offset[, default_value])`. Allows to reach prev/next value within column in a block of data. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- Created a function `currentUser()`, returning login of authorized user. Added alias `user()` for compatibility with MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) +- New aggregate functions `quantilesExactInclusive` and `quantilesExactExclusive` which were proposed in [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- Function `bitmapRange(bitmap, range_begin, range_end)` which returns new set with specified range (not include the `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) +- Function `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` which creates array of precision-long strings of geohash-boxes covering provided area. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) +- Implement support for INSERT query with `Kafka` tables. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) +- Added support for `_partition` and `_timestamp` virtual columns to Kafka engine. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) +- Possibility to remove sensitive data from `query_log`, server logs, process list with regexp-based rules. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) + +#### Experimental Feature {#experimental-feature-2} + +- Input and output data format `Template`. It allows to specify custom format string for input and output. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- Implementation of `LIVE VIEW` tables that were originally proposed in [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), prepared in [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), and then updated in [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). See [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) for detailed description. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note that `LIVE VIEW` feature may be removed in next versions. + +#### Bug Fix {#bug-fix-8} + +- This release also contains all bug fixes from 19.13 and 19.11. +- Fix segmentation fault when the table has skip indices and vertical merge happens. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) +- Fix per-column TTL with non-trivial column defaults. Previously in case of force TTL merge with `OPTIMIZE ... FINAL` query, expired values was replaced by type defaults instead of user-specified column defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) +- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) +- Fixed infinite loop when reading Kafka messages. Do not pause/resume consumer on subscription at all - otherwise it may get paused indefinitely in some scenarios. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) +- Fix `Key expression contains comparison between inconvertible types` exception in `bitmapContains` function. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) +- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Removed extra verbose logging in MySQL interface [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Return the ability to parse boolean settings from ‘true’ and ‘false’ in the configuration file. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) +- Fix JOIN results for key columns when used with `join_use_nulls`. Attach Nulls instead of columns defaults. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix for skip indices with vertical merge and alter. Fix for `Bad size of marks file` exception. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) +- Fix rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) +- Fixed unsafe code around `getIdentifier` function. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed bug in MySQL wire protocol (is used while connecting to ClickHouse form MySQL client). Caused by heap buffer overflow in `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) +- Fixed memory leak in `bitmapSubsetInRange` function. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fix rare bug when mutation executed after granularity change. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) +- Allow protobuf message with all fields by default. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) +- Resolve a bug with `nullIf` function when we send a `NULL` argument on the second argument. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Fix rare bug with wrong memory allocation/deallocation in complex key cache dictionaries with string fields which leads to infinite memory consumption (looks like memory leak). Bug reproduces when string size was a power of two starting from eight (8, 16, 32, etc). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- Fixed Gorilla encoding on small sequences which caused exception `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) +- Allow to use not nullable types in JOINs with `join_use_nulls` enabled. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- Disable `Poco::AbstractConfiguration` substitutions in query in `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid deadlock in `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Using `arrayReduce` for constant arguments may lead to segfault. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix inconsistent parts which can appear if replica was restored after `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Fixed hang in `JSONExtractRaw` function. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix bug with incorrect skip indices serialization and aggregation with adaptive granularity. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) +- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) +- Clearing the data buffer from the previous read operation that was completed with an error. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) +- Fix bug with enabling adaptive granularity when creating a new replica for Replicated\*MergeTree table. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Fix crash in `yandexConsistentHash` function. Found by fuzz test. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix segfault when decoding symbol table. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) +- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Removed extra quoting of description in `system.settings` table. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid possible deadlock in `TRUNCATE` of Replicated table. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix reading in order of sorting key. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) +- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Fix bug opened by [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn’t query any columns (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix `FormatFactory` behaviour for input streams which are not implemented as processor. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed typo. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- Typo in the error message ( is -\> are ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- Fixed error while parsing of columns list from string if type contained a comma (this issue was relevant for `File`, `URL`, `HDFS` storages) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### Security Fix {#security-fix} + +- This release also contains all bug security fixes from 19.13 and 19.11. +- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser. Fixed the possibility of stack overflow in Merge and Distributed tables, materialized views and conditions for row-level security that involve subqueries. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Improvement {#improvement-3} + +- Correct implementation of ternary logic for `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) +- Now values and rows with expired TTL will be removed after `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` query. Added queries `SYSTEM STOP/START TTL MERGES` to disallow/allow assign merges with TTL and filter expired values in all merges. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) +- Possibility to change the location of ClickHouse history file for client using `CLICKHOUSE_HISTORY_FILE` env. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) +- Remove `dry_run` flag from `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Support `ASOF JOIN` with `ON` section. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- Better support of skip indexes for mutations and replication. Support for `MATERIALIZE/CLEAR INDEX ... IN PARTITION` query. `UPDATE x = x` recalculates all indices that use column `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) +- Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Throw an exception if `config.d` file doesn’t have the corresponding root element as the config file. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- Print extra info in exception message for `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) +- Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse can work on filesystems without `O_DIRECT` support (such as ZFS and BtrFS) without additional tuning. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Support push down predicate for final subquery. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Better `JOIN ON` keys extraction [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Optimize selecting of smallest column for `SELECT count()` query. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Bird](https://github.com/amosbird)) +- Added `strict` parameter in `windowFunnel()`. When the `strict` is set, the `windowFunnel()` applies conditions only for the unique values. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) +- Safer interface of `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- Options line size when executing with `--help` option now corresponds with terminal size. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- Disable “read in order” optimization for aggregation without keys. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) +- HTTP status code for `INCORRECT_DATA` and `TYPE_MISMATCH` error codes was changed from default `500 Internal Server Error` to `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) +- Move Join object from `ExpressionAction` into `AnalyzedJoin`. `ExpressionAnalyzer` and `ExpressionAction` do not know about `Join` class anymore. Its logic is hidden by `AnalyzedJoin` iface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) +- Move AST alias interpreting logic out of parser that doesn’t have to know anything about query semantics. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- Slightly more safe parsing of `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- Added optional message argument in `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- Server exception got while sending insertion data is now being processed in client as well. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- Added a metric `DistributedFilesToInsert` that shows the total number of files in filesystem that are selected to send to remote servers by Distributed tables. The number is summed across all shards. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Move most of JOINs prepare logic from `ExpressionAction/ExpressionAnalyzer` to `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix TSan [warning](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) +- Better information messages about lack of Linux capabilities. Logging fatal errors with “fatal” level, that will make it easier to find in `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- When enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`, `ORDER BY`, it didn’t check the free disk space. The fix add a new setting `min_free_disk_space`, when the free disk space it smaller then the threshold, the query will stop and throw `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Removed recursive rwlock by thread. It makes no sense, because threads are reused between queries. `SELECT` query may acquire a lock in one thread, hold a lock from another thread and exit from first thread. In the same time, first thread can be reused by `DROP` query. This will lead to false “Attempt to acquire exclusive lock recursively” messages. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Split `ExpressionAnalyzer.appendJoin()`. Prepare a place in `ExpressionAnalyzer` for `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- Added `mysql_native_password` authentication plugin to MySQL compatibility server. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) +- Less number of `clock_gettime` calls; fixed ABI compatibility between debug/release in `Allocator` (insignificant issue). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Move `collectUsedColumns` from `ExpressionAnalyzer` to `SyntaxAnalyzer`. `SyntaxAnalyzer` makes `required_source_columns` itself now. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- Add setting `joined_subquery_requires_alias` to require aliases for subselects and table functions in `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- Extract `GetAggregatesVisitor` class from `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`: change data type of `type` column to `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Static linking of `sha256_password` authentication plugin. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) +- Avoid extra dependency for the setting `compile` to work. In previous versions, the user may get error like `cannot open crti.o`, `unable to find library -lc` etc. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- More validation of the input that may come from malicious replica. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Now `clickhouse-obfuscator` file is available in `clickhouse-client` package. In previous versions it was available as `clickhouse obfuscator` (with whitespace). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- Fixed deadlock when we have at least two queries that read at least two tables in different order and another query that performs DDL operation on one of tables. Fixed another very rare deadlock. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added `os_thread_ids` column to `system.processes` and `system.query_log` for better debugging possibilities. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- A workaround for PHP mysqlnd extension bugs which occur when `sha256_password` is used as a default authentication plugin (described in [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) +- Remove unneeded place with changed nullability columns. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- Set default value of `queue_max_wait_ms` to zero, because current value (five seconds) makes no sense. There are rare circumstances when this settings has any use. Added settings `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` and `connection_pool_max_wait_ms` for disambiguation. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Extract `SelectQueryExpressionAnalyzer` from `ExpressionAnalyzer`. Keep the last one for non-select queries. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- Removed duplicating input and output formats. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `MergeTree` now has an additional option `ttl_only_drop_parts` (disabled by default) to avoid partial pruning of parts, so that they dropped completely when all the rows in a part are expired. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) +- Type checks for set index functions. Throw exception if function got a wrong type. This fixes fuzz test with UBSan. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Performance Improvement {#performance-improvement-2} + +- Optimize queries with `ORDER BY expressions` clause, where `expressions` have coinciding prefix with sorting key in `MergeTree` tables. This optimization is controlled by `optimize_read_in_order` setting. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) +- Allow to use multiple threads during parts loading and removal. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Implemented batch variant of updating aggregate function states. It may lead to performance benefits. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Using `FastOps` library for functions `exp`, `log`, `sigmoid`, `tanh`. FastOps is a fast vector math library from Michael Parakhin (Yandex CTO). Improved performance of `exp` and `log` functions more than 6 times. The functions `exp` and `log` from `Float32` argument will return `Float32` (in previous versions they always return `Float64`). Now `exp(nan)` may return `inf`. The result of `exp` and `log` functions may be not the nearest machine representable number to the true answer. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Using Danila Kutenin variant to make fastops working [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Disable consecutive key optimization for `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) +- Improved performance of `simdjson` library by getting rid of dynamic allocation in `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) +- Pre-fault pages when allocating memory with `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) +- Fix performance bug in `Decimal` comparison. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-4} + +- Remove Compiler (runtime template instantiation) because we’ve win over it’s performance. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added performance test to show degradation of performance in gcc-9 in more isolated way. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added table function `numbers_mt`, which is multithreaded version of `numbers`. Updated performance tests with hash functions. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Comparison mode in `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- Best effort for printing stack traces. Also added `SIGPROF` as a debugging signal to print stack trace of a running thread. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Every function in its own file, part 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Remove doubled const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) +- Formatting changes for `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) +- Better subquery for join creation in `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- Remove a redundant condition (found by PVS Studio). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) +- Separate the hash table interface for `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) +- Refactoring of settings. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) +- Add comments for `set` index functions. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) +- Increase OOM score in debug version on Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) +- HDFS HA now work in debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) +- Added a test to `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add test for multiple materialized views for Kafka table. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) +- Make a better build scheme. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) +- Fixed `test_external_dictionaries` integration in case it was executed under non root user. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- The bug reproduces when total size of written packets exceeds `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) +- Added a test for `RENAME` table race condition [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid data race on Settings in `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add integration test for handling errors by a cache dictionary. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) +- Disable parsing of ELF object files on Mac OS, because it makes no sense. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Attempt to make changelog generator better. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Adding `-Wshadow` switch to the GCC. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Removed obsolete code for `mimalloc` support. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `zlib-ng` determines x86 capabilities and saves this info to global variables. This is done in defalteInit call, which may be made by different threads simultaneously. To avoid multithreaded writes, do it on library startup. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) +- Regression test for a bug which in join which was fixed in [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- Fixed MSan report. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix flapping TTL test. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) +- Fixed false data race in `MergeTreeDataPart::is_frozen` field. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed timeouts in fuzz test. In previous version, it managed to find false hangup in query `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added debug checks to `static_cast` of columns. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Support for Oracle Linux in official RPM packages. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Changed json perftests from `once` to `loop` type. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` defines `main()` so it should not be included in `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) +- Test for crash in `FULL|RIGHT JOIN` with nulls in right table’s keys. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- Added a test for the limit on expansion of aliases just in case. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Switched from `boost::filesystem` to `std::filesystem` where appropriate. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added RPM packages to website. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add a test for fixed `Unknown identifier` exception in `IN` section. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- Simplify `shared_ptr_helper` because people facing difficulties understanding it. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added performance tests for fixed Gorilla and DoubleDelta codec. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) +- Split the integration test `test_dictionaries` into 4 separate tests. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix PVS-Studio warning in `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Allow to use `library` dictionary source with ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added option to generate changelog from a list of PRs. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Lock the `TinyLog` storage when reading. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) +- Check for broken symlinks in CI. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Increase timeout for “stack overflow” test because it may take a long time in debug build. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added a check for double whitespaces. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix `new/delete` memory tracking when build with sanitizers. Tracking is not clear. It only prevents memory limit exceptions in tests. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- Enable back the check of undefined symbols while linking. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) +- Avoid rebuilding `hyperscan` every day. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed UBSan report in `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Don’t allow to use query profiler with sanitizers because it is not compatible. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add test for reloading a dictionary after fail by timer. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix inconsistency in `PipelineExecutor::prepareProcessor` argument type. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Added a test for bad URIs. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added more checks to `CAST` function. This should get more information about segmentation fault in fuzzy test. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Added `gcc-9` support to `docker/builder` container that builds image locally. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Test for primary key with `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- Fixed tests affected by slow stack traces printing. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add a test case for crash in `groupUniqArray` fixed in [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) +- Fixed indices mutations tests. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) +- In performance test, do not read query log for queries we didn’t run. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) +- Materialized view now could be created with any low cardinality types regardless to the setting about suspicious low cardinality types. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- Updated tests for `send_logs_level` setting. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix build under gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- Fix build with internal libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) +- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) +- Fixes for Mac OS build (incomplete). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- Fix “splitted” build. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Other build fixes: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Bird](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### Backward Incompatible Change {#backward-incompatible-change-3} + +- Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## ClickHouse release 19.13 {#clickhouse-release-19-13} + +### ClickHouse release 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### Bug Fix {#bug-fix-9} + +- This release also contains all bug fixes from 19.11.12.69. + +### ClickHouse release 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### Bug Fix {#bug-fix-10} + +- This release also contains all bug fixes from 19.14.6.12. +- Fixed possible inconsistent state of table while executing `DROP` query for replicated table while zookeeper is not accessible. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Fix for data race in StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix bug introduced in query profiler which leads to endless recv from socket. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) +- Fix excessive CPU usage while executing `JSONExtractRaw` function over a boolean value. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixes the regression while pushing to materialized view. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) +- Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. This issue was found by [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix useless `AST` check in Set index. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) +- Fixed parsing of `AggregateFunction` values embedded in query. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fixed wrong behaviour of `trim` functions family. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### Bug Fix {#bug-fix-11} + +- This release also contains all bug security fixes from 19.11.9.52 and 19.11.10.54. +- Fixed data race in `system.parts` table and `ALTER` query. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed mismatched header in streams happened in case of reading from empty distributed table with sample and prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed crash when using `IN` clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Fix case with same column names in `GLOBAL JOIN ON` section. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix crash when casting types to `Decimal` that do not support it. Throw exception instead. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed crash in `extractAll()` function. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- Query transformation for `MySQL`, `ODBC`, `JDBC` table functions now works properly for `SELECT WHERE` queries with multiple `AND` expressions. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- Added previous declaration checks for MySQL 8 integration. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) + +#### Security Fix {#security-fix-1} + +- Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse release 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### Bug Fix {#bug-fix-12} + +- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Security Fix {#security-fix-2} + +- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### New Feature {#new-feature-5} + +- Sampling profiler on query level. [Example](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- Allow to specify a list of columns with `COLUMNS('regexp')` expression that works like a more sophisticated variant of `*` asterisk. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` is now possible [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- Adam optimizer for stochastic gradient descent is used by default in `stochasticLinearRegression()` and `stochasticLogisticRegression()` aggregate functions, because it shows good quality without almost any tuning. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) +- `RENAME` queries now work with all storages. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) +- Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) + +#### Backward Incompatible Change {#backward-incompatible-change-4} + +- The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) + +#### Experimental features {#experimental-features} + +- New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Bug Fix {#bug-fix-13} + +- Kafka integration has been fixed in this version. +- Fixed `DoubleDelta` encoding of `Int64` for large `DoubleDelta` values, improved `DoubleDelta` encoding for random data for `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) +- Fixed overestimation of `max_rows_to_read` if the setting `merge_tree_uniform_read_distribution` is set to 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Improvement {#improvement-4} + +- Throws an exception if `config.d` file doesn’t have the corresponding root element as the config file [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### Performance Improvement {#performance-improvement-3} + +- Optimize `count()`. Now it uses the smallest column (if possible). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Bird](https://github.com/amosbird)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-5} + +- Report memory usage in performance tests. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) +- Fix build with external `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) +- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) + +## ClickHouse release 19.11 {#clickhouse-release-19-11} + +### ClickHouse release 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### Bug Fix {#bug-fix-14} + +- Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) +- Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin)) + +### ClickHouse release 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### Bug Fix {#bug-fix-15} + +- Fixed performance degradation of index analysis on complex keys on large tables. This fixes [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid rare SIGSEGV while sending data in tables with Distributed engine (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- Fix `Unknown identifier` with multiple joins. This fixes [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse release 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) +- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouse release 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### Bug Fix {#bug-fix-16} + +- Do store offsets for Kafka messages manually to be able to commit them all at once for all partitions. Fixes potential duplication in “one consumer - many partitions” scenario. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) + +### ClickHouse release 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- Improve error handling in cache dictionaries. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixed bug in function `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- Fix `JSONExtract` function while extracting a `Tuple` from JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) +- Fixed performance test. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Parquet: Fix reading boolean columns. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed wrong behaviour of `nullIf` function for constant arguments. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) +- Fixed an issue when long `ALTER UPDATE` or `ALTER DELETE` may prevent regular merges to run. Prevent mutations from executing if there is no enough free threads available. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- Fixed error with processing “timezone” in server configuration file. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix kafka tests. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) + +#### Security Fix {#security-fix-3} + +- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### Bug Fix {#bug-fix-17} + +- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### Bug fix {#bug-fix-18} + +- Kafka integration has been fixed in this version. +- Fix segfault when using `arrayReduce` for constant arguments. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed `toFloat()` monotonicity. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Removed extra verbose logging from MySQL handler. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- Do not expose virtual columns in `system.columns` table. This is required for backward compatibility. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix bug with memory allocation for string fields in complex key cache dictionary. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- Fix bug with enabling adaptive granularity when creating new replica for `Replicated*MergeTree` table. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- Fix infinite loop when reading Kafka messages. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser and possibility of stack overflow in `Merge` and `Distributed` tables [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed Gorilla encoding error on small sequences. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### Improvement {#improvement-5} + +- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### Bug fix {#bug-fix-19} + +- Fixed the possibility of hanging queries when server is overloaded. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix FPE in yandexConsistentHash function. This fixes [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix parsing of `bool` settings from `true` and `false` strings in configuration files. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- Fix rare bug with incompatible stream headers in queries to `Distributed` table over `MergeTree` table when part of `WHERE` moves to `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- Fixed overflow in integer division of signed type to unsigned type. This fixes [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Backward Incompatible Change {#backward-incompatible-change-5} + +- `Kafka` still broken. + +### ClickHouse release 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### Bug Fix {#bug-fix-20} + +- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Fixed hang in `JSONExtractRaw` function. Fixed [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix segfault in ExternalLoader::reloadOutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix non-deterministic result of “uniq” aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Fixed small memory leak when server throw many exceptions from many different contexts. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix the situation when consumer got paused before subscription and not resumed afterwards. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. +- Clearing the Kafka data buffer from the previous read operation that was completed with an error [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Note that Kafka is broken in this version. +- Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-6} + +- Added official `rpm` packages. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) +- Add an ability to build `.rpm` and `.tgz` packages with `packager` script. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) +- Fixes for “Arcadia” build system. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### Backward Incompatible Change {#backward-incompatible-change-6} + +- `Kafka` is broken in this version. + +### ClickHouse release 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### New Feature {#new-feature-6} + +- Added support for prepared statements. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `DoubleDelta` and `Gorilla` column codecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) +- Added `os_thread_priority` setting that allows to control the “nice” value of query processing threads that is used by OS to adjust dynamic scheduling priority. It requires `CAP_SYS_NICE` capabilities to work. This implements [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Implement `_topic`, `_offset`, `_key` columns for Kafka engine [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. +- Add aggregate function combinator `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- Aggregate functions `groupArrayMovingSum(win_size)(x)` and `groupArrayMovingAvg(win_size)(x)`, which calculate moving sum/avg with or without window-size limitation. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- Add synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Intergate H3 function `geoToH3` from Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Bug Fix {#bug-fix-21} + +- Implement DNS cache with asynchronous update. Separate thread resolves all hosts and updates DNS cache with period (setting `dns_cache_update_period`). It should help, when ip of hosts changes frequently. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) +- Fix segfault in `Delta` codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Fix rare bug in checking of part with `LowCardinality` column. Previously `checkDataPart` always fails for part with `LowCardinality` column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- Avoid hanging connections when server thread pool is full. It is important for connections from `remote` table function or connections to a shard without replicas when there is long connection timeout. This fixes [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Support for constant arguments to `evalMLModel` function. This fixes [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed the issue when ClickHouse determines default time zone as `UCT` instead of `UTC`. This fixes [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed buffer underflow in `visitParamExtractRaw`. This fixes [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Now distributed `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` queries will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- Fix `coalesce` for `ColumnConst` with `ColumnNullable` + related changes. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix the `ReadBufferFromKafkaConsumer` so that it keeps reading new messages after `commit()` even if it was stalled before [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) +- Fix `FULL` and `RIGHT` JOIN results when joining on `Nullable` keys in right table. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- Possible fix of infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix race condition, which cause that some queries may not appear in query\_log after `SYSTEM FLUSH LOGS` query. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Fixed `heap-use-after-free` ASan warning in ClusterCopier caused by watch which try to use already removed copier object. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed wrong `StringRef` pointer returned by some implementations of `IColumn::deserializeAndInsertFromArena`. This bug affected only unit-tests. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Prevent source and intermediate array join columns of masking same name columns. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix insert and select query to MySQL engine with MySQL style identifier quoting. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Winter Zhang](https://github.com/zhang2014)) +- Now `CHECK TABLE` query can work with MergeTree engine family. It returns check status and message if any for each part (or file in case of simplier engines). Also, fix bug in fetch of a broken part. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) +- Fix SPLIT\_SHARED\_LIBRARIES runtime [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- Fixed time zone initialization when `/etc/localtime` is a relative symlink like `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- clickhouse-copier: Fix use-after free on shutdown [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- Updated `simdjson`. Fixed the issue that some invalid JSONs with zero bytes successfully parse. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix shutdown of SystemLogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) +- Fix hanging when condition in invalidate\_query depends on a dictionary. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### Improvement {#improvement-6} + +- Allow unresolvable addresses in cluster configuration. They will be considered unavailable and tried to resolve at every connection attempt. This is especially useful for Kubernetes. This fixes [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Close idle TCP connections (with one hour timeout by default). This is especially important for large clusters with multiple distributed tables on every server, because every server can possibly keep a connection pool to every other server, and after peak query concurrency, connections will stall. This fixes [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Better quality of `topK` function. Changed the SavingSpace set behavior to remove the last element if the new element have a bigger weight. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) +- URL functions to work with domains now can work for incomplete URLs without scheme [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) +- Checksums added to the `system.parts_columns` table. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Added `Enum` data type as a synonim for `Enum8` or `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- Full bit transpose variant for `T64` codec. Could lead to better compression with `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- Condition on `startsWith` function now can uses primary key. This fixes [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) and [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- Allow to use `clickhouse-copier` with cross-replication cluster topology by permitting empty database name. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) +- Use `UTC` as default timezone on a system without `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` was printed and server or client refused to start. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Returned back support for floating point argument in function `quantileTiming` for backward compatibility. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Show which table is missing column in error messages. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) +- Disallow run query with same query\_id by various users [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- More robust code for sending metrics to Graphite. It will work even during long multiple `RENAME TABLE` operation. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- More informative error messages will be displayed when ThreadPool cannot schedule a task for execution. This fixes [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Inverting ngramSearch to be more intuitive [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- Add user parsing in HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- Update default value of `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- Added a notion of obsolete settings. The obsolete setting `allow_experimental_low_cardinality_type` can be used with no effect. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### Performance Improvement {#performance-improvement-4} + +- Increase number of streams to SELECT from Merge table for more uniform distribution of threads. Added setting `max_streams_multiplier_for_merge_tables`. This fixes [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-7} + +- Add a backward compatibility test for client-server interaction with different versions of clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) +- Test coverage information in every commit and pull request. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) +- Cooperate with address sanitizer to support our custom allocators (`Arena` and `ArenaWithFreeLists`) for better debugging of “use-after-free” errors. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) +- Switch to [LLVM libunwind implementation](https://github.com/llvm-mirror/libunwind) for C++ exception handling and for stack traces printing [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) +- Add two more warnings from -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Allow to build ClickHouse with Memory Sanitizer. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed ubsan report about `bitTest` function in fuzz test. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Docker: added possibility to init a ClickHouse instance which requires authentication. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) +- Update librdkafka to version 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) +- Add global timeout for integration tests and disable some of them in tests code. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) +- Fix some ThreadSanitizer failures. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) +- The `--no-undefined` option forces the linker to check all external names for existence while linking. It’s very useful to track real dependencies between libraries in the split build mode. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) +- Added performance test for [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed compatibility with gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added support for gcc-9. This fixes [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error when libunwind can be linked incorrectly. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed a few warnings found by PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added initial support for `clang-tidy` static analyzer. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Convert BSD/Linux endian macros( ‘be64toh’ and ‘htobe64’) to the Mac OS X equivalents [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) +- Improved integration tests guide. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Fixing build at macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) +- Fix a hard-to-spot typo: aggreAGte -\> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) +- Fix freebsd build [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- Add link to experimental YouTube channel to website [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) +- CMake: add option for coverage flags: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- Fix initial size of some inline PODArray’s. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) +- clickhouse-server.postinst: fix os detection for centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- Added Arch linux package generation. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Split Common/config.h by libs (dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- Fixes for “Arcadia” build platform [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- Fixes for unconventional build (gcc9, no submodules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- Require explicit type in unalignedStore because it was proven to be bug-prone [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) +- Fixes MacOS build [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) +- Performance test concerning the new JIT feature with bigger dataset, as requested here [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Run stateful tests in stress test [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) + +#### Backward Incompatible Change {#backward-incompatible-change-7} + +- `Kafka` is broken in this version. +- Enable `adaptive_index_granularity` = 10MB by default for new `MergeTree` tables. If you created new MergeTree tables on version 19.11+, downgrade to versions prior to 19.6 will be impossible. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) +- Removed obsolete undocumented embedded dictionaries that were used by Yandex.Metrica. The functions `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` are no longer available. If you are using these functions, write email to clickhouse-feedback@yandex-team.com. Note: at the last moment we decided to keep these functions for a while. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse release 19.10 {#clickhouse-release-19-10} + +### ClickHouse release 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### New Feature {#new-feature-7} + +- Add new column codec: `T64`. Made for (U)IntX/EnumX/Data(Time)/DecimalX columns. It should be good for columns with constant or small range values. Codec itself allows enlarge or shrink data type without re-compression. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- Add database engine `MySQL` that allow to view all the tables in remote MySQL server [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Winter Zhang](https://github.com/zhang2014)) +- `bitmapContains` implementation. It’s 2x faster than `bitmapHasAny` if the second bitmap contains one element. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) +- Support for `crc32` function (with behaviour exactly as in MySQL or PHP). Do not use it if you need a hash function. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- Implemented `SYSTEM START/STOP DISTRIBUTED SENDS` queries to control asynchronous inserts into `Distributed` tables. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Winter Zhang](https://github.com/zhang2014)) + +#### Bug Fix {#bug-fix-22} + +- Ignore query execution limits and max parts size for merge limits while executing mutations. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) +- Fix bug which may lead to deduplication of normal blocks (extremely rare) and insertion of duplicate blocks (more often). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) +- Fix of function `arrayEnumerateUniqRanked` for arguments with empty arrays [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- Don’t subscribe to Kafka topics without intent to poll any messages. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) +- Make setting `join_use_nulls` get no effect for types that cannot be inside Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixed `Incorrect size of index granularity` errors [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) +- Fix Float to Decimal convert overflow [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) +- Flush buffer when `WriteBufferFromHDFS`’s destructor is called. This fixes writing into `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) + +#### Improvement {#improvement-7} + +- Treat empty cells in `CSV` as default values when the setting `input_format_defaults_for_omitted_fields` is enabled. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) +- Non-blocking loading of external dictionaries. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) +- Network timeouts can be dynamically changed for already established connections according to the settings. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- Using “public\_suffix\_list” for functions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. It’s using a perfect hash table generated by `gperf` with a list generated from the file: https://publicsuffix.org/list/public\_suffix\_list.dat. (for example, now we recognize the domain `ac.uk` as non-significant). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Adopted `IPv6` data type in system tables; unified client info columns in `system.processes` and `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Using sessions for connections with MySQL compatibility protocol. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) +- Support more `ALTER` queries `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- Support `` section in `clickhouse-local` config file. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- Allow run query with `remote` table function in `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### Performance Improvement {#performance-improvement-5} + +- Add the possibility to write the final mark at the end of MergeTree columns. It allows to avoid useless reads for keys that are out of table data range. It is enabled only if adaptive index granularity is in use. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) +- Improved performance of MergeTree tables on very slow filesystems by reducing number of `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed performance degradation in reading from MergeTree tables that was introduced in version 19.6. Fixes \#5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-8} + +- Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn’t affect each other. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) +- Remove `` and `` from performance tests [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixed “select\_format” performance test for `Pretty` formats [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouse release 19.9 {#clickhouse-release-19-9} + +### ClickHouse release 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### Bug Fix {#bug-fix-23} + +- Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- Fix rare bug in checking of part with LowCardinality column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Fix potential infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix how ClickHouse determines default time zone as UCT instead of UTC. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- Fix race condition, which cause that some queries may not appear in query\_log instantly after SYSTEM FLUSH LOGS query. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Added missing support for constant arguments to `evalMLModel` function. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### New Feature {#new-feature-8} + +- Print information about frozen parts in `system.parts` table. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- Ask client password on clickhouse-client start on tty if not set in arguments [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- Implement `dictGet` and `dictGetOrDefault` functions for Decimal types. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Improvement {#improvement-8} + +- Debian init: Add service stop timeout [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- Add setting forbidden by default to create table with suspicious types for LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- Regression functions return model weights when not used as State in function `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- Rename and improve regression methods. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- Clearer interfaces of string searchers. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### Bug Fix {#bug-fix-24} + +- Fix potential data loss in Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) +- Fix potential infinite loop in `PrettySpace` format when called with zero columns [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Fix segfault with `bitmapHasAny` in scalar subquery [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) +- Fix INSERT into Distributed table with MATERIALIZED column [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- Fix bad alloc when truncate Join storage [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- In recent versions of package tzdata some of files are symlinks now. The current mechanism for detecting default timezone gets broken and gives wrong names for some timezones. Now at least we force the timezone name to the contents of TZ if provided. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) +- Fix some extremely rare cases with MultiVolnitsky searcher when the constant needles in sum are at least 16KB long. The algorithm missed or overwrote the previous results which can lead to the incorrect result of `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- Fix the issue when settings for ExternalData requests couldn’t use ClickHouse settings. Also, for now, settings `date_time_input_format` and `low_cardinality_allow_in_native_format` cannot be used because of the ambiguity of names (in external data it can be interpreted as table format and in the query it can be a setting). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- Remove debug logging from MySQL protocol [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Skip ZNONODE during DDL query processing [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Fix mix `UNION ALL` result column type. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- Throw an exception on wrong integers in `dictGetT` functions instead of crash. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix wrong element\_count and load\_factor for hashed dictionary in `system.dictionaries` table. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-9} + +- Fixed build without `Brotli` HTTP compression support (`ENABLE_BROTLI=OFF` cmake variable). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- Include roaring.h as roaring/roaring.h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) +- Fix gcc9 warnings in hyperscan (\#line directive is evil!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- Fix all warnings when compiling with gcc-9. Fix some contrib issues. Fix gcc9 ICE and submit it to bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- Fixed linking with lld [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Remove unused specializations in dictionaries [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- Improvement performance tests for formatting and parsing tables for different types of files [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixes for parallel test run [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker: use configs from clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- Fix compile for FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- Upgrade boost to 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- Fix build clickhouse as submodule [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- Improve JSONExtract performance tests [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) + +## ClickHouse release 19.8 {#clickhouse-release-19-8} + +### ClickHouse release 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### New Features {#new-features} + +- Added functions to work with JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) +- Add a function basename, with a similar behaviour to a basename function, which exists in a lot of languages (`os.path.basename` in python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Added `LIMIT n, m BY` or `LIMIT m OFFSET n BY` syntax to set offset of n for LIMIT BY clause. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) +- Added new data type `SimpleAggregateFunction`, which allows to have columns with light aggregation in an `AggregatingMergeTree`. This can only be used with simple functions like `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- Added support for non-constant arguments in function `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- Added functions `skewPop`, `skewSamp`, `kurtPop` and `kurtSamp` to compute for sequence skewness, sample skewness, kurtosis and sample kurtosis respectively. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- Support rename operation for `MaterializeView` storage. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Added server which allows connecting to ClickHouse using MySQL client. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) +- Add `toDecimal*OrZero` and `toDecimal*OrNull` functions. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- Support Decimal types in functions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- Added `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- Added `format` function. Formatting constant pattern (simplified Python format pattern) with the strings listed in the arguments. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- Added `system.detached_parts` table containing information about detached parts of `MergeTree` tables. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) +- Added `ngramSearch` function to calculate the non-symmetric difference between needle and haystack. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- Implementation of basic machine learning methods (stochastic linear regression and logistic regression) using aggregate functions interface. Has different strategies for updating model weights (simple gradient descent, momentum method, Nesterov method). Also supports mini-batches of custom size. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- Implementation of `geohashEncode` and `geohashDecode` functions. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) +- Added aggregate function `timeSeriesGroupSum`, which can aggregate different time series that sample timestamp not alignment. It will use linear interpolation between two sample timestamp and then sum time-series together. Added aggregate function `timeSeriesGroupRateSum`, which calculates the rate of time-series and then sum rates together. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- Added functions `IPv4CIDRtoIPv4Range` and `IPv6CIDRtoIPv6Range` to calculate the lower and higher bounds for an IP in the subnet using a CIDR. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Add a X-ClickHouse-Summary header when we send a query using HTTP with enabled setting `send_progress_in_http_headers`. Return the usual information of X-ClickHouse-Progress, with additional information like how many rows and bytes were inserted in the query. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) + +#### Improvements {#improvements} + +- Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn’t inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) +- Now `if` and `multiIf` functions don’t rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) +- `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) +- Check the time limit every (flush\_interval / poll\_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) +- Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) +- Batched version of RowRefList for ALL JOINS. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse-server: more informative listen error messages. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- Support dictionaries in clickhouse-copier for functions in `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- Add new setting `kafka_commit_every_batch` to regulate Kafka committing policy. + It allows to set commit mode: after every batch of messages is handled, or after the whole block is written to the storage. It’s a trade-off between losing some messages or reading them twice in some extreme situations. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) +- Make `windowFunnel` support other Unsigned Integer Types. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- Allow to shadow virtual column `_table` in Merge engine. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) +- Make `sequenceMatch` aggregate functions support other unsigned Integer types [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- Better error messages if checksum mismatch is most likely caused by hardware failures. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Check that underlying tables support sampling for `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Improvements of MySQL Wire Protocol. Changed name of format to MySQLWire. Using RAII for calling RSA\_free. Disabling SSL if context cannot be created. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- Respect query settings in asynchronous INSERTs into Distributed tables. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- Renamed functions `leastSqr` to `simpleLinearRegression`, `LinearRegression` to `linearRegression`, `LogisticRegression` to `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Performance Improvements {#performance-improvements} + +- Parallelize processing of parts of non-replicated MergeTree tables in ALTER MODIFY query. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) +- Optimizations in regular expressions extraction. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- Do not add right join key column to join result if it’s used only in join on section. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- Freeze the Kafka buffer after first empty response. It avoids multiple invokations of `ReadBuffer::next()` for empty result in some row-parsing streams. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) +- `concat` function optimization for multiple arguments. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- Upgrade our LZ4 implementation with reference one to have faster decompression. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- Implemented MSD radix sort (based on kxsort), and partial sorting. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### Bug Fixes {#bug-fixes} + +- Fix push require columns with join [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) +- Fixed bug, when ClickHouse is run by systemd, the command `sudo service clickhouse-server forcerestart` was not working as expected. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- Fix http error codes in DataPartsExchange (interserver http server on 9009 port always returned code 200, even on errors). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- Fix SimpleAggregateFunction for String longer than MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- Fix error for `Decimal` to `Nullable(Decimal)` conversion in IN. Support other Decimal to Decimal conversions (including different scales). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed FPU clobbering in simdjson library that lead to wrong calculation of `uniqHLL` and `uniqCombined` aggregate function and math functions such as `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed handling mixed const/nonconst cases in JSON functions. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix `retention` function. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- Fix result type for `quantileExact` with Decimals. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Documentation {#documentation} + +- Translate documentation for `CollapsingMergeTree` to chinese. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- Translate some documentation about table engines to chinese. + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([never lee](https://github.com/neverlee)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements} + +- Fix some sanitizer reports that show probable use-after-free.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) +- Move performance tests out of separate directories for convenience. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix incorrect performance tests. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) +- Added a tool to calculate checksums caused by bit flips to debug hardware issues. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Make runner script more usable. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) +- Add small instruction how to write performance tests. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) +- Add ability to make substitutions in create, fill and drop query in performance tests [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## ClickHouse release 19.7 {#clickhouse-release-19-7} + +### ClickHouse release 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### Bug Fix {#bug-fix-25} + +- Fix performance regression in some queries with JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) + +### ClickHouse release 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### New features {#new-features-1} + +- Added bitmap related functions `bitmapHasAny` and `bitmapHasAll` analogous to `hasAny` and `hasAll` functions for arrays. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) + +#### Bug Fixes {#bug-fixes-1} + +- Fix segfault on `minmax` INDEX with Null value. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) +- Mark all input columns in LIMIT BY as required output. It fixes ‘Not found column’ error in some distributed queries. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) +- Fix “Column ‘0’ already exists” error in `SELECT .. PREWHERE` on column with DEFAULT [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- Fix `ALTER MODIFY TTL` query on `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) +- Don’t crash the server when Kafka consumers have failed to start. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) +- Fixed bitmap functions produce wrong result. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) +- Fix element\_count for hashed dictionary (do not include duplicates) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) +- Do not try to convert integers in `dictGetT` functions, because it doesn’t work correctly. Throw an exception instead. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix settings in ExternalData HTTP request. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila + Kutenin](https://github.com/danlark1)) +- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- Fix segmentation fault in `bitmapHasAny` function. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn’t raise an exception if provided index does not exist. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that + did not process it, but already get list of children, will terminate the DDLWorker thread. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Fix INSERT into Distributed() table with MATERIALIZED column. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### ClickHouse release 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### New Features {#new-features-2} + +- Allow to limit the range of a setting that can be specified by user. + These constraints can be set up in user settings profile. + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly + Baranov](https://github.com/vitlibar)) +- Add a second version of the function `groupUniqArray` with an optional + `max_size` parameter that limits the size of the resulting array. This + behavior is similar to `groupArray(max_size)(x)` function. + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- For TSVWithNames/CSVWithNames input file formats, column order can now be + determined from file header. This is controlled by + `input_format_with_names_use_header` parameter. + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([Alexander](https://github.com/Akazz)) + +#### Bug Fixes {#bug-fixes-2} + +- Crash with uncompressed\_cache + JOIN during merge (\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila + Kutenin](https://github.com/danlark1)) +- Segmentation fault on a clickhouse-client query to system tables. \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([Ivan](https://github.com/abyss7)) +- Data loss on heavy load via KafkaEngine (\#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([Ivan](https://github.com/abyss7)) +- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Performance Improvements {#performance-improvements-1} + +- Use radix sort for sorting by single numeric column in `ORDER BY` without + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Documentation {#documentation-1} + +- Translate documentation for some table engines to Chinese. + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([never + lee](https://github.com/neverlee)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-1} + +- Print UTF-8 characters properly in `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add command line parameter for clickhouse-client to always load suggestion + data. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Resolve some of PVS-Studio warnings. + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Update LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila + Kutenin](https://github.com/danlark1)) +- Add gperf to build requirements for upcoming pull request \#5030. + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouse release 19.6 {#clickhouse-release-19-6} + +### ClickHouse release 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### Bug Fixes {#bug-fixes-3} + +- Fixed IN condition pushdown for queries from table functions `mysql` and `odbc` and corresponding table engines. This fixes \#3540 and \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix deadlock in Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- Allow quoted decimals in CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- Disallow conversion from float Inf/NaN into Decimals (throw exception). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix data race in rename query. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Winter Zhang](https://github.com/zhang2014)) +- Temporarily disable LFAlloc. Usage of LFAlloc might lead to a lot of MAP\_FAILED in allocating UncompressedCache and in a result to crashes of queries at high loaded servers. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### ClickHouse release 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### New Features {#new-features-3} + +- TTL expressions for columns and tables. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) +- Added support for `brotli` compression for HTTP responses (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) +- Added new function `isValidUTF8` for checking whether a set of bytes is correctly utf-8 encoded. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- Add new load balancing policy `first_or_random` which sends queries to the first specified host and if it’s inaccessible send queries to random hosts of shard. Useful for cross-replication topology setups. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) + +#### Experimental Features {#experimental-features-1} + +- Add setting `index_granularity_bytes` (adaptive index granularity) for MergeTree\* tables family. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) + +#### Improvements {#improvements-1} + +- Added support for non-constant and negative size and length arguments for function `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Disable push-down to right table in left join, left table in right join, and both tables in full join. This fixes wrong JOIN results in some cases. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) +- `clickhouse-copier`: auto upload task configuration from `--task-file` option [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- Added typos handler for storage factory and table functions factory. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- Support asterisks and qualified asterisks for multiple joins without subqueries [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- Make missing column error message more user friendly. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Performance Improvements {#performance-improvements-2} + +- Significant speedup of ASOF JOIN [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### Backward Incompatible Changes {#backward-incompatible-changes} + +- HTTP header `Query-Id` was renamed to `X-ClickHouse-Query-Id` for consistency. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) + +#### Bug Fixes {#bug-fixes-4} + +- Fixed potential null pointer dereference in `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- Fixed error on query with JOIN + ARRAY JOIN [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed hanging on start of the server when a dictionary depends on another dictionary via a database with engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It’s possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There’s not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix potentially wrong result for `SELECT DISTINCT` with `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-2} + +- Fixed test failures when running clickhouse-server on different host [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) +- clickhouse-test: Disable color control sequences in non tty environment. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) +- clickhouse-test: Allow use any test database (remove `test.` qualification where it possible) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- Fix ubsan errors [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Yandex LFAlloc was added to ClickHouse to allocate MarkCache and UncompressedCache data in different ways to catch segfaults more reliable [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- Python util to help with backports and changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) + +## ClickHouse release 19.5 {#clickhouse-release-19-5} + +### ClickHouse release 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### Bug fixes {#bug-fixes-5} + +- Fixed possible crash in bitmap\* functions [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) +- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. This error happened if LowCardinality column was the part of primary key. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Modification of retention function: If a row satisfies both the first and NTH condition, only the first satisfied condition is added to the data state. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### ClickHouse release 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### Bug fixes {#bug-fixes-6} + +- Fixed type of setting `max_partitions_per_insert_block` from boolean to UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) + +### ClickHouse release 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### New Features {#new-features-4} + +- [Hyperscan](https://github.com/intel/hyperscan) multiple regular expression matching was added (functions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` function was added. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- Implement the predefined expression filter per row for tables. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) +- A new type of data skipping indices based on bloom filters (can be used for `equal`, `in` and `like` functions). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) +- Added `ASOF JOIN` which allows to run queries that join to the most recent value known. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- Rewrite multiple `COMMA JOIN` to `CROSS JOIN`. Then rewrite them to `INNER JOIN` if possible. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Improvement {#improvement-9} + +- `topK` and `topKWeighted` now supports custom `loadFactor` (fixes issue [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) +- Allow to use `parallel_replicas_count > 1` even for tables without sampling (the setting is simply ignored for them). In previous versions it was lead to exception. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- Support for `CREATE OR REPLACE VIEW`. Allow to create a view or set a new definition in a single statement. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` table engine now supports `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- Add ability to start replicated table without metadata in zookeeper in `readonly` mode. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) +- Fixed flicker of progress bar in clickhouse-client. The issue was most noticeable when using `FORMAT Null` with streaming queries. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Allow to disable functions with `hyperscan` library on per user basis to limit potentially excessive and uncontrolled resource usage. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add version number logging in all errors. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- Added restriction to the `multiMatch` functions which requires string size to fit into `unsigned int`. Also added the number of arguments limit to the `multiSearch` functions. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- Improved usage of scratch space and error handling in Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- Fill `system.graphite_detentions` from a table config of `*GraphiteMergeTree` engine tables. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Rename `trigramDistance` function to `ngramDistance` and add more functions with `CaseInsensitive` and `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- Improved data skipping indices calculation. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) +- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### Bug Fix {#bug-fix-26} + +- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) +- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) +- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix crash of `JOIN` on not-nullable vs nullable column. Fix `NULLs` in right keys in `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fix incorrect result in `FULL/RIGHT JOIN` with const column. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix duplicates in `GLOBAL JOIN` with asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix wrong name qualification in `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### Backward Incompatible Change {#backward-incompatible-change-8} + +- Rename setting `insert_sample_with_metadata` to setting `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- Added setting `max_partitions_per_insert_block` (with value 100 by default). If inserted block contains larger number of partitions, an exception is thrown. Set it to 0 if you want to remove the limit (not recommended). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Multi-search functions were renamed (`multiPosition` to `multiSearchAllPositions`, `multiSearch` to `multiSearchAny`, `firstMatch` to `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### Performance Improvement {#performance-improvement-6} + +- Optimize Volnitsky searcher by inlining, giving about 5-10% search improvement for queries with many needles or many similar bigrams. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- Fix performance issue when setting `use_uncompressed_cache` is greater than zero, which appeared when all read data contained in cache. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-10} + +- Hardening debug build: more granular memory mappings and ASLR; add memory protection for mark cache and index. This allows to find more memory stomping bugs in case when ASan and MSan cannot do it. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add support for cmake variables `ENABLE_PROTOBUF`, `ENABLE_PARQUET` and `ENABLE_BROTLI` which allows to enable/disable the above features (same as we can do for librdkafka, mysql, etc). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- Add ability to print process list and stacktraces of all threads if some queries are hung after test run. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) +- Add retries on `Connection loss` error in `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) +- Add freebsd build with vagrant and build with thread sanitizer to packager script. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) +- Now user asked for password for user `'default'` during installation. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- Suppress warning in `rdkafka` library. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Allow ability to build without ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Upgrade contrib boost to 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- Disable usage of `mremap` when compiled with Thread Sanitizer. Surprisingly enough, TSan does not intercept `mremap` (though it does intercept `mmap`, `munmap`) that leads to false positives. Fixed TSan report in stateful tests. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add test checking using format schema via HTTP interface. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) + +## ClickHouse release 19.4 {#clickhouse-release-19-4} + +### ClickHouse release 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### Bug Fixes {#bug-fixes-7} + +- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) +- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) +- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### Improvements {#improvements-2} + +- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### ClickHouse release 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### Bug Fixes {#bug-fixes-8} + +- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-11} + +- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse release 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### Bug Fixes {#bug-fixes-9} + +- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### ClickHouse release 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### Bug Fixes {#bug-fixes-10} + +- Fixed remote queries which contain both `LIMIT BY` and `LIMIT`. Previously, if `LIMIT BY` and `LIMIT` were used for remote query, `LIMIT` could happen before `LIMIT BY`, which led to too filtered result. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) + +### ClickHouse release 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### New Features {#new-features-5} + +- Added full support for `Protobuf` format (input and output, nested data structures). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) +- Added bitmap functions with Roaring Bitmaps. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) +- Parquet format support. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- N-gram distance was added for fuzzy string comparison. It is similar to q-gram metrics in R language. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- Combine rules for graphite rollup from dedicated aggregation and retention patterns. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Added `max_execution_speed` and `max_execution_speed_bytes` to limit resource usage. Added `min_execution_speed_bytes` setting to complement the `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Winter Zhang](https://github.com/zhang2014)) +- Implemented function `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- Added functions `arrayEnumerateDenseRanked` and `arrayEnumerateUniqRanked` (it’s like `arrayEnumerateUniq` but allows to fine tune array depth to look inside multidimensional arrays). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Bug Fixes {#bug-fixes-11} + +- This release also contains all bug fixes from 19.3 and 19.1. +- Fixed bug in data skipping indices: order of granules after INSERT was incorrect. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) +- Fixed `set` index for `Nullable` and `LowCardinality` columns. Before it, `set` index with `Nullable` or `LowCardinality` column led to error `Data type must be deserialized with multiple streams` while selecting. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correctly set update\_time on full `executable` dictionary update. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- Fix broken progress bar in 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) +- Fixed inconsistent values of MemoryTracker when memory region was shrinked, in certain cases. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed undefined behaviour in ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed a very rare crash with the message `mutex lock failed: Invalid argument` that could happen when a MergeTree table was dropped concurrently with a SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- ODBC driver compatibility with `LowCardinality` data type. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD: Fixup for `AIOcontextPool: Found io_event with unknown id 0` error. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` table was created regardless to configuration. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix undefined behaviour in `dictIsIn` function for cache dictionaries. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- Disable compile\_expressions by default until we get own `llvm` contrib and can test it with `clang` and `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) +- Prevent `std::terminate` when `invalidate_query` for `clickhouse` external dictionary source has returned wrong resultset (empty or more than one row or more than one column). Fixed issue when the `invalidate_query` was performed every five seconds regardless to the `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Avoid deadlock when the `invalidate_query` for a dictionary with `clickhouse` source was involving `system.dictionaries` table or `Dictionaries` database (rare case). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixes for CROSS JOIN with empty WHERE. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed segfault in function “replicate” when constant argument is passed. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix lambda function with predicate optimizer. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Winter Zhang](https://github.com/zhang2014)) +- Multiple JOINs multiple fixes. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Improvements {#improvements-3} + +- Support aliases in JOIN ON section for right table columns. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- Result of multiple JOINs need correct result names to be used in subselects. Replace flat aliases with source names in result. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- Improve push-down logic for joined statements. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) + +#### Performance Improvements {#performance-improvements-3} + +- Improved heuristics of “move to PREWHERE” optimization. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Use proper lookup tables that uses HashTable’s API for 8-bit and 16-bit keys. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) +- Improved performance of string comparison. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Cleanup distributed DDL queue in a separate thread so that it doesn’t slow down the main loop that processes distributed DDL tasks. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O\_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-12} + +- Added support for clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix wrong `__asm__` instructions (again) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- Add ability to specify settings for `clickhouse-performance-test` from command line. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) +- Add dictionaries tests to integration tests. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) +- Added queries from the benchmark on the website to automated performance tests. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `xxhash.h` does not exist in external lz4 because it is an implementation detail and its symbols are namespaced with `XXH_NAMESPACE` macro. When lz4 is external, xxHash has to be external too, and the dependents have to link to it. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) +- Fixed a case when `quantileTiming` aggregate function can be called with negative or floating point argument (this fixes fuzz test with undefined behaviour sanitizer). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Spelling error correction. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- Fix compilation on Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) +- Build fixes for FreeBSD and various unusual build configurations. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## ClickHouse release 19.3 {#clickhouse-release-19-3} + +### ClickHouse release 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### Bug Fixes {#bug-fixes-12} + +- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-13} + +- Add a way to launch clickhouse-server image from a custom user [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse release 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### Bug fixes {#bug-fixes-13} + +- Fixed error in \#3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### Bug fixes {#bug-fixes-14} + +- When there are more than 1000 threads in a thread pool, `std::terminate` may happen on thread exit. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Now it’s possible to create `ReplicatedMergeTree*` tables with comments on columns without defaults and tables with columns codecs without comments and defaults. Also fix comparison of codecs. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) +- Fixed crash on JOIN with array or tuple. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed crash in clickhouse-copier with the message `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed hangup on server shutdown if distributed DDLs were used. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- Incorrect column numbers were printed in error message about text format parsing for columns with number greater than 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-3} + +- Fixed build with AVX enabled. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Enable extended accounting and IO accounting based on good known version instead of kernel under which it is compiled. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) +- Allow to skip setting of core\_dump.size\_limit, warning instead of throw if limit set fail. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- Removed the `inline` tags of `void readBinary(...)` in `Field.cpp`. Also merged redundant `namespace DB` blocks. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### ClickHouse release 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### Bug fixes {#bug-fixes-15} + +- Fixed bug with large http insert queries processing. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) +- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### Improvements {#improvements-4} + +- Table index size is not accounted for memory limits when doing `ATTACH TABLE` query. Avoided the possibility that a table cannot be attached after being detached. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Slightly raised up the limit on max string and array size received from ZooKeeper. It allows to continue to work with increased size of `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` on ZooKeeper. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Allow to repair abandoned replica even if it already has huge number of nodes in its queue. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Add one required argument to `SET` index (max stored rows number). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Bug Fixes {#bug-fixes-16} + +- Fixed `WITH ROLLUP` result for group by single `LowCardinality` key. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixed bug in the set index (dropping a granule if it contains more than `max_rows` rows). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) +- A lot of FreeBSD build fixes. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- Fixed aliases substitution in queries with subquery containing same alias (issue [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-4} + +- Add ability to run `clickhouse-server` for stateless tests in docker image. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) + +### ClickHouse release 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### New Features {#new-features-6} + +- Added the `KILL MUTATION` statement that allows removing mutations that are for some reasons stuck. Added `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` fields to the `system.mutations` table for easier troubleshooting. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- Added aggregate function `entropy` which computes Shannon entropy. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- Added ability to send queries `INSERT INTO tbl VALUES (....` to server without splitting on `query` and `data` parts. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) +- Generic implementation of `arrayWithConstant` function was added. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Implemented `NOT BETWEEN` comparison operator. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) +- Implement `sumMapFiltered` in order to be able to limit the number of keys for which values will be summed by `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Added support of `Nullable` types in `mysql` table function. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- Support for arbitrary constant expressions in `LIMIT` clause. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- Added `topKWeighted` aggregate function that takes additional argument with (unsigned integer) weight. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) +- `StorageJoin` now supports `join_any_take_last_row` setting that allows overwriting existing values of the same key. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Bird](https://github.com/amosbird) +- Added function `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) +- Added `RowBinaryWithNamesAndTypes` format. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) +- Added `IPv4` and `IPv6` data types. More effective implementations of `IPv*` functions. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) +- Added function `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) +- Added `Protobuf` output format. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) +- Added brotli support for HTTP interface for data import (INSERTs). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) +- Added hints while user make typo in function name or type in command line client. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- Added `Query-Id` to Server’s HTTP Response header. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) + +#### Experimental features {#experimental-features-2} + +- Added `minmax` and `set` data skipping indices for MergeTree table engines family. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- Added conversion of `CROSS JOIN` to `INNER JOIN` if possible. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Bug Fixes {#bug-fixes-17} + +- Fixed `Not found column` for duplicate columns in `JOIN ON` section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed crash on dictionary reload if dictionary not available. This bug was appeared in 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed segmentation fault with `use_uncompressed_cache=1` and exception with wrong uncompressed size. This bug was appeared in 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) +- Fixed `Illegal instruction` error when using base64 functions on old CPUs. This error has been reproduced only when ClickHouse was compiled with gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse dictionaries now load within `clickhouse` process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) +- Added `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Fixed segfault with `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed rare race condition when removing of old data parts can fail with `File not found` error. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-5} + +- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Various build fixes for FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- Added ability to create, fill and drop tables in perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) +- Added a script to check for duplicate includes. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added ability to run queries by index in performance test. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) +- Package with debug symbols is suggested to be installed. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Refactoring of performance-test. Better logging and signals handling. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) +- Added docs to anonymized Yandex.Metrika datasets. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- Added docs about two datasets in s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) +- Added script which creates changelog from pull requests description. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Added puppet module for Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- Added docs for a group of undocumented functions. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) +- ARM build fixes. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- Dictionary tests now able to run from `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added checking SSE and AVX instruction at start. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- Init script will wait server until start. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### Backward Incompatible Changes {#backward-incompatible-changes-1} + +- Removed `allow_experimental_low_cardinality_type` setting. `LowCardinality` data types are production ready. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Reduce mark cache size and uncompressed cache size accordingly to available memory amount. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) +- Added keyword `INDEX` in `CREATE TABLE` query. A column with name `index` must be quoted with backticks or double quotes: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- `sumMap` now promote result type instead of overflow. The old `sumMap` behavior can be obtained by using `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### Performance Improvements {#performance-improvements-4} + +- `std::sort` replaced by `pdqsort` for queries without `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- Now server reuse threads from global thread pool. This affects performance in some corner cases. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Improvements {#improvements-5} + +- Implemented AIO support for FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` now return `a` and `b` columns only from the left table. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- Allow `-C` option of client to work as `-c` option. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- Now option `--password` used without value requires password from stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- Added highlighting of unescaped metacharacters in string literals that contain `LIKE` expressions or regexps. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added cancelling of HTTP read only queries if client socket goes away. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) +- Now server reports progress to keep client connections alive. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) +- Slightly better message with reason for OPTIMIZE query with `optimize_throw_if_noop` setting enabled. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added support of `--version` option for clickhouse server. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) +- Added `--help/-h` option to `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) +- Added support for scalar subqueries with aggregate function state result. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Improved server shutdown time and ALTERs waiting time. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added info about the replicated\_can\_become\_leader setting to system.replicas and add logging if the replica won’t try to become leader. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## ClickHouse release 19.1 {#clickhouse-release-19-1} + +### ClickHouse release 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- Fixed error `Column ... queried more than once` that may happen if the setting `asterisk_left_columns_only` is set to 1 in case of using `GLOBAL JOIN` with `SELECT *` (rare case). The issue does not exist in 19.3 and newer. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouse release 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +This release contains exactly the same set of patches as 19.3.7. + +### ClickHouse release 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +This release contains exactly the same set of patches as 19.3.6. + +## ClickHouse release 19.1 {#clickhouse-release-19-1-1} + +### ClickHouse release 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### Bug fixes {#bug-fixes-18} + +- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### Bug Fixes {#bug-fixes-19} + +- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## ClickHouse release 19.1 {#clickhouse-release-19-1-2} + +### ClickHouse release 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### Bug Fixes {#bug-fixes-20} + +- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) +- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) +- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- Fixed segmentation fault with `uncompressed_cache=1` and exception with wrong uncompressed size. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Fixed `Not found column` for duplicate columns in JOIN ON section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed crash on dictionary reload if dictionary not available. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouse release 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### New Features {#new-features-7} + +- Custom per column compression codecs for tables. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Winter Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) +- Added compression codec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) +- Allow to `ALTER` compression codecs. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) +- Added functions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` for SQL standard compatibility. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) +- Support for write in `HDFS` tables and `hdfs` table function. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) +- Added functions to search for multiple constant strings from big haystack: `multiPosition`, `multiSearch` ,`firstMatch` also with `-UTF8`, `-CaseInsensitive`, and `-CaseInsensitiveUTF8` variants. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- Pruning of unused shards if `SELECT` query filters by sharding key (setting `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) +- Allow `Kafka` engine to ignore some number of parsing errors per block. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) +- Added support for `CatBoost` multiclass models evaluation. Function `modelEvaluate` returns tuple with per-class raw predictions for multiclass models. `libcatboostmodel.so` should be built with [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Added functions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- Added hashing functions `xxHash64` and `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) +- Added `gccMurmurHash` hashing function (GCC flavoured Murmur hash) which uses the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- Added hashing functions `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) +- Added table function `remoteSecure`. Function works as `remote`, but uses secure connection. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### Experimental features {#experimental-features-3} + +- Added multiple JOINs emulation (`allow_experimental_multiple_joins_emulation` setting). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Bug Fixes {#bug-fixes-21} + +- Make `compiled_expression_cache_size` setting limited by default to lower memory consumption. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) +- Fix a bug that led to hangups in threads that perform ALTERs of Replicated tables and in the thread that updates configuration from ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fixed a race condition when executing a distributed ALTER task. The race condition led to more than one replica trying to execute the task and all replicas except one failing with a ZooKeeper error. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fix a bug when `from_zk` config elements weren’t refreshed after a request to ZooKeeper timed out. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fix bug with wrong prefix for IPv4 subnet masks. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) +- Fixed crash (`std::terminate`) in rare cases when a new thread cannot be created due to exhausted resources. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix bug when in `remote` table function execution when wrong restrictions were used for in `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) +- Fix a leak of netlink sockets. They were placed in a pool where they were never deleted and new sockets were created at the start of a new thread when all current sockets were in use. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fix bug with closing `/proc/self/fd` directory earlier than all fds were read from `/proc` after forking `odbc-bridge` subprocess. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) +- Fixed String to UInt monotonic conversion in case of usage String in primary key. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Winter Zhang](https://github.com/zhang2014)) +- Fixed error in calculation of integer conversion function monotonicity. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed segfault in `arrayEnumerateUniq`, `arrayEnumerateDense` functions in case of some invalid arguments. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fix UB in StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Bird](https://github.com/amosbird)) +- Fixed segfault in functions `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error: functions `round`, `floor`, `trunc`, `ceil` may return bogus result when executed on integer argument and large negative scale. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed a bug induced by ‘kill query sync’ which leads to a core dump. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- Fix bug with long delay after empty replication queue. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) +- Fixed excessive memory usage in case of inserting into table with `LowCardinality` primary key. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixed `LowCardinality` serialization for `Native` format in case of empty arrays. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixed incorrect result while using distinct by single LowCardinality numeric column. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixed specialized aggregation with LowCardinality key (in case when `compile` setting is enabled). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fix user and password forwarding for replicated tables queries. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- Fixed very rare race condition that can happen when listing tables in Dictionary database while reloading dictionaries. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed incorrect result when HAVING was used with ROLLUP or CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) +- Fixed column aliases for query with `JOIN ON` syntax and distributed tables. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Winter Zhang](https://github.com/zhang2014)) +- Fixed error in internal implementation of `quantileTDigest` (found by Artem Vakhrushev). This error never happens in ClickHouse and was relevant only for those who use ClickHouse codebase as a library directly. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Improvements {#improvements-6} + +- Support for `IF NOT EXISTS` in `ALTER TABLE ADD COLUMN` statements along with `IF EXISTS` in `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- Function `parseDateTimeBestEffort`: support for formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` and similar. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` now support jagged structures. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- Usability improvement: added a check that server process is started from the data directory’s owner. Do not allow to start server from root if the data belongs to non-root user. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) +- Better logic of checking required columns during analysis of queries with JOINs. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- Decreased the number of connections in case of large number of Distributed tables in a single server. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Winter Zhang](https://github.com/zhang2014)) +- Supported totals row for `WITH TOTALS` query for ODBC driver. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- Allowed to use `Enum`s as integers inside if function. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) +- Added `low_cardinality_allow_in_native_format` setting. If disabled, do not use `LowCadrinality` type in `Native` format. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Removed some redundant objects from compiled expressions cache to lower memory usage. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) +- Add check that `SET send_logs_level = 'value'` query accept appropriate value. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) +- Fixed data type check in type conversion functions. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Winter Zhang](https://github.com/zhang2014)) + +#### Performance Improvements {#performance-improvements-5} + +- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn’t support it. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn’t contain time. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Performance improvement for integer numbers serialization. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) +- Zero left padding PODArray so that -1 element is always valid and zeroed. It’s used for branchless calculation of offsets. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) +- Reverted `jemalloc` version which lead to performance degradation. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Backward Incompatible Changes {#backward-incompatible-changes-2} + +- Removed undocumented feature `ALTER MODIFY PRIMARY KEY` because it was superseded by the `ALTER MODIFY ORDER BY` command. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- Removed function `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Forbid using scalar subqueries with result of type `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) + +#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-6} + +- Added support for PowerPC (`ppc64le`) build. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- Stateful functional tests are run on public available dataset. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed error when the server cannot start with the `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message within Docker or systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Updated `rdkafka` library to v1.0.0-RC5. Used cppkafka instead of raw C interface. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) +- Updated `mariadb-client` library. Fixed one of issues found by UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Some fixes for UBSan builds. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added per-commit runs of tests with UBSan build. +- Added per-commit runs of PVS-Studio static analyzer. +- Fixed bugs found by PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed glibc compatibility issues. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Move Docker images to 18.10 and add compatibility file for glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) +- Add env variable if user don’t want to chown directories in server Docker image. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) +- Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Added a few more warnings that are available only in clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) +- Added sanitizer variables for test images. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) +- `clickhouse-server` debian package will recommend `libcap2-bin` package to use `setcap` tool for setting capabilities. This is optional. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Improved compilation time, fixed includes. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- Added performance tests for hash functions. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) +- Fixed cyclic library dependences. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- Improved compilation with low available memory. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- Added test script to reproduce performance degradation in `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fixed misspells in comments and string literals under `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) +- Fixed typos in comments. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [Changelog for 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md deleted file mode 120000 index 79b747aee1b..00000000000 --- a/docs/zh/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md new file mode 100644 index 00000000000..1a89e03c333 --- /dev/null +++ b/docs/zh/changelog/index.md @@ -0,0 +1,5 @@ +--- +en_copy: true +--- + +../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md deleted file mode 120000 index eb58e4a90be..00000000000 --- a/docs/zh/commercial/cloud.md +++ /dev/null @@ -1 +0,0 @@ -../../en/commercial/cloud.md \ No newline at end of file diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md new file mode 100644 index 00000000000..f096bdb92cf --- /dev/null +++ b/docs/zh/commercial/cloud.md @@ -0,0 +1,20 @@ +--- +en_copy: true +--- + +# ClickHouse Cloud Service Providers {#clickhouse-cloud-service-providers} + +!!! info "Info" + If you have launched a public cloud with managed ClickHouse service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) adding it to the following list. + +## Yandex Cloud {#yandex-cloud} + +[Yandex Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) provides the following key features: + +- Fully managed ZooKeeper service for [ClickHouse replication](../operations/table_engines/replication.md) +- Multiple storage type choices +- Replicas in different availability zones +- Encryption and isolation +- Automated maintenance + +{## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/zh/data_types/datetime64.md b/docs/zh/data_types/datetime64.md deleted file mode 120000 index e59b41ea1ae..00000000000 --- a/docs/zh/data_types/datetime64.md +++ /dev/null @@ -1 +0,0 @@ -../../en/data_types/datetime64.md \ No newline at end of file diff --git a/docs/zh/data_types/datetime64.md b/docs/zh/data_types/datetime64.md new file mode 100644 index 00000000000..e28390bbdd4 --- /dev/null +++ b/docs/zh/data_types/datetime64.md @@ -0,0 +1,101 @@ +--- +en_copy: true +--- + +# DateTime64 {#data_type-datetime64} + +Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision + +Tick size (precision): 10-precision seconds + +Syntax: + +``` sql +DateTime64(precision, [timezone]) +``` + +Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](datetime.md). + +## Examples {#examples} + +**1.** Creating a table with `DateTime64`-type column and inserting data into it: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'` +- When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. + +**2.** Filtering on `DateTime64` values + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically + +**3.** Getting a time zone for a `DateTime64`-type value: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** Timezone conversion + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## See Also {#see-also} + +- [Type conversion functions](../query_language/functions/type_conversion_functions.md) +- [Functions for working with dates and times](../query_language/functions/date_time_functions.md) +- [Functions for working with arrays](../query_language/functions/array_functions.md) +- [The `date_time_input_format` setting](../operations/settings/settings.md#settings-date_time_input_format) +- [The `timezone` server configuration parameter](../operations/server_settings/settings.md#server_settings-timezone) +- [Operators for working with dates and times](../query_language/operators.md#operators-datetime) +- [`Date` data type](date.md) +- [`DateTime` data type](datetime.md) diff --git a/docs/zh/data_types/special_data_types/interval.md b/docs/zh/data_types/special_data_types/interval.md deleted file mode 120000 index 6829f5ced00..00000000000 --- a/docs/zh/data_types/special_data_types/interval.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/data_types/special_data_types/interval.md \ No newline at end of file diff --git a/docs/zh/data_types/special_data_types/interval.md b/docs/zh/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..8a37476579c --- /dev/null +++ b/docs/zh/data_types/special_data_types/interval.md @@ -0,0 +1,82 @@ +--- +en_copy: true +--- + +# Interval {#data-type-interval} + +The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../query_language/operators.md#operator-interval) operator. + +!!! warning "Warning" + `Interval` data type values can’t be stored in tables. + +Structure: + +- Time interval as an unsigned integer value. +- Type of an interval. + +Supported interval types: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +For each interval type, there is a separate data type. For example, the `DAY` interval corresponds to the `IntervalDay` data type: + +``` sql +SELECT toTypeName(INTERVAL 4 DAY) +``` + +``` text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## Usage Remarks {#data-type-interval-usage-remarks} + +You can use `Interval`-type values in arithmetical operations with [Date](../../data_types/date.md) and [DateTime](../../data_types/datetime.md)-type values. For example, you can add 4 days to the current time: + +``` sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` + +``` text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +Intervals with different types can’t be combined. You can’t use intervals like `4 DAY 1 HOUR`. Specify intervals in units that are smaller or equal to the smallest unit of the interval, for example, the interval `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`. + +You can’t perform arithmetical operations with `Interval`-type values, but you can add intervals of different types consequently to values in `Date` or `DateTime` data types. For example: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +The following query causes an exception: + +``` sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` + +``` text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## See Also {#see-also} + +- [INTERVAL](../../query_language/operators.md#operator-interval) operator +- [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) type convertion functions diff --git a/docs/zh/data_types/uuid.md b/docs/zh/data_types/uuid.md deleted file mode 120000 index aba05e889ac..00000000000 --- a/docs/zh/data_types/uuid.md +++ /dev/null @@ -1 +0,0 @@ -../../en/data_types/uuid.md \ No newline at end of file diff --git a/docs/zh/data_types/uuid.md b/docs/zh/data_types/uuid.md new file mode 100644 index 00000000000..4546be19371 --- /dev/null +++ b/docs/zh/data_types/uuid.md @@ -0,0 +1,74 @@ +--- +en_copy: true +--- + +# UUID {#uuid-data-type} + +A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +The example of UUID type value is represented below: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## How to generate {#how-to-generate} + +To generate the UUID value, ClickHouse provides the [generateUUIDv4](../query_language/functions/uuid_functions.md) function. + +## Usage example {#usage-example} + +**Example 1** + +This example demonstrates creating a table with the UUID type column and inserting a value into the table. + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**Example 2** + +In this example, the UUID column value is not specified when inserting a new record. + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## Restrictions {#restrictions} + +The UUID data type only supports functions which [String](string.md) data type also supports (for example, [min](../query_language/agg_functions/reference.md#agg_function-min), [max](../query_language/agg_functions/reference.md#agg_function-max), and [count](../query_language/agg_functions/reference.md#agg_function-count)). + +The UUID data type is not supported by arithmetic operations (for example, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) or aggregate functions, such as [sum](../query_language/agg_functions/reference.md#agg_function-sum) and [avg](../query_language/agg_functions/reference.md#agg_function-avg). + +[Original article](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/zh/database_engines/lazy.md b/docs/zh/database_engines/lazy.md deleted file mode 120000 index 66830dcdb2f..00000000000 --- a/docs/zh/database_engines/lazy.md +++ /dev/null @@ -1 +0,0 @@ -../../en/database_engines/lazy.md \ No newline at end of file diff --git a/docs/zh/database_engines/lazy.md b/docs/zh/database_engines/lazy.md new file mode 100644 index 00000000000..45c5fd602d7 --- /dev/null +++ b/docs/zh/database_engines/lazy.md @@ -0,0 +1,15 @@ +--- +en_copy: true +--- + +# Lazy {#lazy} + +Keeps tables in RAM only `expiration_time_in_seconds` seconds after last access. Can be used only with \*Log tables. + +It’s optimized for storing many small \*Log tables, for which there is a long time interval between accesses. + +## Creating a Database {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[Original article](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md deleted file mode 120000 index 8c08c622129..00000000000 --- a/docs/zh/development/browse_code.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/browse_code.md \ No newline at end of file diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md new file mode 100644 index 00000000000..c3016d5e1dc --- /dev/null +++ b/docs/zh/development/browse_code.md @@ -0,0 +1,11 @@ +--- +en_copy: true +--- + +# Browse ClickHouse Source Code {#browse-clickhouse-source-code} + +You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. + +Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. + +If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favourite IDE. Vim and Emacs also count. diff --git a/docs/zh/development/build_cross_arm.md b/docs/zh/development/build_cross_arm.md deleted file mode 120000 index 983a9872dc1..00000000000 --- a/docs/zh/development/build_cross_arm.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/zh/development/build_cross_arm.md b/docs/zh/development/build_cross_arm.md new file mode 100644 index 00000000000..0936a3133b2 --- /dev/null +++ b/docs/zh/development/build_cross_arm.md @@ -0,0 +1,40 @@ +--- +en_copy: true +--- + +# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} + +This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. + +The cross-build for AARCH64 is based on the [Build instructions](build.md), follow them first. + +# Install Clang-8 {#install-clang-8} + +Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. +For example, in Ubuntu Bionic you can use the following commands: + +``` bash +echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list +sudo apt-get update +sudo apt-get install clang-8 +``` + +# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} + +``` bash +cd ClickHouse +mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 +wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 +``` + +# Build ClickHouse {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-arm64 +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +ninja -C build-arm64 +``` + +The resulting binary will run only on Linux with the AARCH64 CPU architecture. diff --git a/docs/zh/getting_started/example_datasets/index.md b/docs/zh/getting_started/example_datasets/index.md deleted file mode 120000 index c891314f915..00000000000 --- a/docs/zh/getting_started/example_datasets/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/index.md \ No newline at end of file diff --git a/docs/zh/getting_started/example_datasets/index.md b/docs/zh/getting_started/example_datasets/index.md new file mode 100644 index 00000000000..a07ff8b0010 --- /dev/null +++ b/docs/zh/getting_started/example_datasets/index.md @@ -0,0 +1,18 @@ +--- +en_copy: true +--- + +# Example Datasets + +This section describes how to obtain example datasets and import them into ClickHouse. +For some datasets example queries are also available. + +* [Anonymized Yandex.Metrica Dataset](metrica.md) +* [Star Schema Benchmark](star_schema.md) +* [WikiStat](wikistat.md) +* [Terabyte of Click Logs from Criteo](criteo.md) +* [AMPLab Big Data Benchmark](amplab_benchmark.md) +* [New York Taxi Data](nyc_taxi.md) +* [OnTime](ontime.md) + +[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md deleted file mode 120000 index 984023973eb..00000000000 --- a/docs/zh/getting_started/example_datasets/metrica.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/metrica.md \ No newline at end of file diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md new file mode 100644 index 00000000000..d2a91a02a89 --- /dev/null +++ b/docs/zh/getting_started/example_datasets/metrica.md @@ -0,0 +1,67 @@ +--- +en_copy: true +--- + +# Anonymized Yandex.Metrica Data {#anonymized-yandex-metrica-data} + +Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section. + +The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz. + +## Obtaining Tables from Prepared Partitions {#obtaining-tables-from-prepared-partitions} + +Download and import hits table: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar +tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +Download and import visits: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar +tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## Obtaining Tables from Compressed TSV File {#obtaining-tables-from-compressed-tsv-file} + +Download and import hits from compressed TSV file: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +Download and import visits from compressed tsv-file: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## Example Queries {#example-queries} + +[ClickHouse tutorial](../../getting_started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial. + +Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there). diff --git a/docs/zh/getting_started/playground.md b/docs/zh/getting_started/playground.md deleted file mode 120000 index de5b41f453e..00000000000 --- a/docs/zh/getting_started/playground.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/playground.md \ No newline at end of file diff --git a/docs/zh/getting_started/playground.md b/docs/zh/getting_started/playground.md new file mode 100644 index 00000000000..186cb9030c2 --- /dev/null +++ b/docs/zh/getting_started/playground.md @@ -0,0 +1,45 @@ +--- +en_copy: true +--- + +# ClickHouse Playground {#clickhouse-playground} + +[ClickHouse Playground](https://play.clickhouse.tech?file=welcome) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. +Several example datasets are available in the Playground as well as sample queries that show ClickHouse features. + +The queries are executed as a read-only user. It implies some limitations: + +- DDL queries are not allowed +- INSERT queries are not allowed + +The following settings are also enforced: +- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) +- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) +- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) +- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) + +ClickHouse Playground gives the experience of m2.small +[Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) +instance hosted in [Yandex.Cloud](https://cloud.yandex.com/). +More information about [cloud providers](../commercial/cloud.md). + +ClickHouse Playground web interface makes requests via ClickHouse [HTTP API](../interfaces/http.md). +The Playground backend is just a ClickHouse cluster without any additional server-side application. +ClickHouse HTTPS endpoint is also available as a part of the Playground. + +You can make queries to playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. +More information about software products that support ClickHouse is available [here](../interfaces/index.md). + +| Parameter | Value | +|:----------|:--------------------------------------| +| Endpoint | https://play-api.clickhouse.tech:8443 | +| User | `playground` | +| Password | `clickhouse` | + +Note that this endpoint requires a secure connection. + +Example: + +``` bash +curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" +``` diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md deleted file mode 120000 index 8bc40816ab2..00000000000 --- a/docs/zh/getting_started/tutorial.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md new file mode 100644 index 00000000000..4f23dbe756d --- /dev/null +++ b/docs/zh/getting_started/tutorial.md @@ -0,0 +1,668 @@ +--- +en_copy: true +--- + +# ClickHouse Tutorial {#clickhouse-tutorial} + +## What to Expect from This Tutorial? {#what-to-expect-from-this-tutorial} + +By going through this tutorial, you’ll learn how to set up a simple ClickHouse cluster. It’ll be small, but fault-tolerant and scalable. Then we will use one of the example datasets to fill it with data and execute some demo queries. + +## Single Node Setup {#single-node-setup} + +To postpone the complexities of a distributed environment, we’ll start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](index.md#install-from-deb-packages) or [rpm](index.md#from-rpm-packages) packages, but there are [alternatives](index.md#from-docker-image) for the operating systems that do no support them. + +For example, you have chosen `deb` packages and executed: + +``` bash +sudo apt-get install dirmngr +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 + +echo "deb http://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client +``` + +What do we have in the packages that got installed: + +- `clickhouse-client` package contains [clickhouse-client](../interfaces/cli.md) application, interactive ClickHouse console client. +- `clickhouse-common` package contains a ClickHouse executable file. +- `clickhouse-server` package contains configuration files to run ClickHouse as a server. + +Server config files are located in `/etc/clickhouse-server/`. Before going further, please notice the `` element in `config.xml`. Path determines the location for data storage, so it should be located on volume with large disk capacity; the default value is `/var/lib/clickhouse/`. If you want to adjust the configuration, it’s not handy to directly edit `config.xml` file, considering it might get rewritten on future package updates. The recommended way to override the config elements is to create [files in config.d directory](../operations/configuration_files.md) which serve as “patches” to config.xml. + +As you might have noticed, `clickhouse-server` is not launched automatically after package installation. It won’t be automatically restarted after updates, either. The way you start the server depends on your init system, usually, it is: + +``` bash +sudo service clickhouse-server start +``` + +or + +``` bash +sudo /etc/init.d/clickhouse-server start +``` + +The default location for server logs is `/var/log/clickhouse-server/`. The server is ready to handle client connections once it logs the `Ready for connections` message. + +Once the `clickhouse-server` is up and running, we can use `clickhouse-client` to connect to the server and run some test queries like `SELECT "Hello, world!";`. + +
    + +Quick tips for clickhouse-client +Interactive mode: + +``` bash +clickhouse-client +clickhouse-client --host=... --port=... --user=... --password=... +``` + +Enable multiline queries: + +``` bash +clickhouse-client -m +clickhouse-client --multiline +``` + +Run queries in batch-mode: + +``` bash +clickhouse-client --query='SELECT 1' +echo 'SELECT 1' | clickhouse-client +clickhouse-client <<< 'SELECT 1' +``` + +Insert data from a file in specified format: + +``` bash +clickhouse-client --query='INSERT INTO table VALUES' < data.txt +clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv +``` + +
    + +## Import Sample Dataset {#import-sample-dataset} + +Now it’s time to fill our ClickHouse server with some sample data. In this tutorial, we’ll use the anonymized data of Yandex.Metrica, the first service that runs ClickHouse in production way before it became open-source (more on that in [history section](../introduction/history.md)). There are [multiple ways to import Yandex.Metrica dataset](example_datasets/metrica.md), and for the sake of the tutorial, we’ll go with the most realistic one. + +### Download and Extract Table Data {#download-and-extract-table-data} + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +``` + +The extracted files are about 10GB in size. + +### Create Tables {#create-tables} + +As in most databases management systems, ClickHouse logically groups tables into “databases”. There’s a `default` database, but we’ll create a new one named `tutorial`: + +``` bash +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" +``` + +Syntax for creating tables is way more complicated compared to databases (see [reference](../query_language/create.md). In general `CREATE TABLE` statement has to specify three key things: + +1. Name of table to create. +2. Table schema, i.e. list of columns and their [data types](../data_types/index.md). +3. [Table engine](../operations/table_engines/index.md) and it’s settings, which determines all the details on how queries to this table will be physically executed. + +Yandex.Metrica is a web analytics service, and sample dataset doesn’t cover its full functionality, so there are only two tables to create: + +- `hits` is a table with each action done by all users on all websites covered by the service. +- `visits` is a table that contains pre-built sessions instead of individual actions. + +Let’s see and execute the real create table queries for these tables: + +``` sql +CREATE TABLE tutorial.hits_v1 +( + `WatchID` UInt64, + `JavaEnable` UInt8, + `Title` String, + `GoodEvent` Int16, + `EventTime` DateTime, + `EventDate` Date, + `CounterID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RegionID` UInt32, + `UserID` UInt64, + `CounterClass` Int8, + `OS` UInt8, + `UserAgent` UInt8, + `URL` String, + `Referer` String, + `URLDomain` String, + `RefererDomain` String, + `Refresh` UInt8, + `IsRobot` UInt8, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `FlashMinor2` String, + `NetMajor` UInt8, + `NetMinor` UInt8, + `UserAgentMajor` UInt16, + `UserAgentMinor` FixedString(2), + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `MobilePhone` UInt8, + `MobilePhoneModel` String, + `Params` String, + `IPNetworkID` UInt32, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `IsArtifical` UInt8, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `ClientTimeZone` Int16, + `ClientEventTime` DateTime, + `SilverlightVersion1` UInt8, + `SilverlightVersion2` UInt8, + `SilverlightVersion3` UInt32, + `SilverlightVersion4` UInt16, + `PageCharset` String, + `CodeVersion` UInt32, + `IsLink` UInt8, + `IsDownload` UInt8, + `IsNotBounce` UInt8, + `FUniqID` UInt64, + `HID` UInt32, + `IsOldCounter` UInt8, + `IsEvent` UInt8, + `IsParameter` UInt8, + `DontCountHits` UInt8, + `WithHash` UInt8, + `HitColor` FixedString(1), + `UTCEventTime` DateTime, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `WindowName` Int32, + `OpenerName` Int32, + `HistoryLength` Int16, + `BrowserLanguage` FixedString(2), + `BrowserCountry` FixedString(2), + `SocialNetwork` String, + `SocialAction` String, + `HTTPError` UInt16, + `SendTiming` Int32, + `DNSTiming` Int32, + `ConnectTiming` Int32, + `ResponseStartTiming` Int32, + `ResponseEndTiming` Int32, + `FetchTiming` Int32, + `RedirectTiming` Int32, + `DOMInteractiveTiming` Int32, + `DOMContentLoadedTiming` Int32, + `DOMCompleteTiming` Int32, + `LoadEventStartTiming` Int32, + `LoadEventEndTiming` Int32, + `NSToDOMContentLoadedTiming` Int32, + `FirstPaintTiming` Int32, + `RedirectCount` Int8, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `ParamPrice` Int64, + `ParamOrderID` String, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `GoalsReached` Array(UInt32), + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `RefererHash` UInt64, + `URLHash` UInt64, + `CLID` UInt32, + `YCLID` UInt64, + `ShareService` String, + `ShareURL` String, + `ShareTitle` String, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `IslandID` FixedString(16), + `RequestNum` UInt32, + `RequestTry` UInt8 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +``` sql +CREATE TABLE tutorial.visits_v1 +( + `CounterID` UInt32, + `StartDate` Date, + `Sign` Int8, + `IsNew` UInt8, + `VisitID` UInt64, + `UserID` UInt64, + `StartTime` DateTime, + `Duration` UInt32, + `UTCStartTime` DateTime, + `PageViews` Int32, + `Hits` Int32, + `IsBounce` UInt8, + `Referer` String, + `StartURL` String, + `RefererDomain` String, + `StartURLDomain` String, + `EndURL` String, + `LinkURL` String, + `IsDownload` UInt8, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `PlaceID` Int32, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `IsYandex` UInt8, + `GoalReachesDepth` Int32, + `GoalReachesURL` Int32, + `GoalReachesAny` Int32, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `MobilePhoneModel` String, + `ClientEventTime` DateTime, + `RegionID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `IPNetworkID` UInt32, + `SilverlightVersion3` UInt32, + `CodeVersion` UInt32, + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `UserAgentMajor` UInt16, + `UserAgentMinor` UInt16, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `SilverlightVersion2` UInt8, + `SilverlightVersion4` UInt16, + `FlashVersion3` UInt16, + `FlashVersion4` UInt16, + `ClientTimeZone` Int16, + `OS` UInt8, + `UserAgent` UInt8, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `NetMajor` UInt8, + `NetMinor` UInt8, + `MobilePhone` UInt8, + `SilverlightVersion1` UInt8, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `JavaEnable` UInt8, + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `BrowserLanguage` UInt16, + `BrowserCountry` UInt16, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `Params` Array(String), + `Goals` Nested( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32), + `WatchIDs` Array(UInt64), + `ParamSumPrice` Int64, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `ClickLogID` UInt64, + `ClickEventID` Int32, + `ClickGoodEvent` Int32, + `ClickEventTime` DateTime, + `ClickPriorityID` Int32, + `ClickPhraseID` Int32, + `ClickPageID` Int32, + `ClickPlaceID` Int32, + `ClickTypeID` Int32, + `ClickResourceID` Int32, + `ClickCost` UInt32, + `ClickClientIP` UInt32, + `ClickDomainID` UInt32, + `ClickURL` String, + `ClickAttempt` UInt8, + `ClickOrderID` UInt32, + `ClickBannerID` UInt32, + `ClickMarketCategoryID` UInt32, + `ClickMarketPP` UInt32, + `ClickMarketCategoryName` String, + `ClickMarketPPName` String, + `ClickAWAPSCampaignName` String, + `ClickPageName` String, + `ClickTargetType` UInt16, + `ClickTargetPhraseID` UInt64, + `ClickContextType` UInt8, + `ClickSelectType` Int8, + `ClickOptions` String, + `ClickGroupBannerID` Int32, + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `FirstVisit` DateTime, + `PredLastVisit` Date, + `LastVisit` Date, + `TotalVisits` UInt32, + `TraficSource` Nested( + ID Int8, + SearchEngineID UInt16, + AdvEngineID UInt8, + PlaceID UInt16, + SocialSourceNetworkID UInt8, + Domain String, + SearchPhrase String, + SocialSourcePage String), + `Attendance` FixedString(16), + `CLID` UInt32, + `YCLID` UInt64, + `NormalizedRefererHash` UInt64, + `SearchPhraseHash` UInt64, + `RefererDomainHash` UInt64, + `NormalizedStartURLHash` UInt64, + `StartURLDomainHash` UInt64, + `NormalizedEndURLHash` UInt64, + `TopLevelDomain` UInt64, + `URLScheme` UInt64, + `OpenstatServiceNameHash` UInt64, + `OpenstatCampaignIDHash` UInt64, + `OpenstatAdIDHash` UInt64, + `OpenstatSourceIDHash` UInt64, + `UTMSourceHash` UInt64, + `UTMMediumHash` UInt64, + `UTMCampaignHash` UInt64, + `UTMContentHash` UInt64, + `UTMTermHash` UInt64, + `FromHash` UInt64, + `WebVisorEnabled` UInt8, + `WebVisorActivity` UInt32, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `Market` Nested( + Type UInt8, + GoalID UInt32, + OrderID String, + OrderPrice Int64, + PP UInt32, + DirectPlaceID UInt32, + DirectOrderID UInt32, + DirectBannerID UInt32, + GoodID String, + GoodName String, + GoodQuantity Int32, + GoodPrice Int64), + `IslandID` FixedString(16) +) +ENGINE = CollapsingMergeTree(Sign) +PARTITION BY toYYYYMM(StartDate) +ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want. + +As we can see, `hits_v1` uses the [basic MergeTree engine](../operations/table_engines/mergetree.md), while the `visits_v1` uses the [Collapsing](../operations/table_engines/collapsingmergetree.md) variant. + +### Import Data {#import-data} + +Data import to ClickHouse is done via [INSERT INTO](../query_language/insert_into.md) query like in many other SQL databases. However, data is usually provided in one of the [supported serialization formats](../interfaces/formats.md) instead of `VALUES` clause (which is also supported). + +The files we downloaded earlier are in tab-separated format, so here’s how to import them via console client: + +``` bash +clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv +clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv +``` + +ClickHouse has a lot of [settings to tune](../operations/settings/index.md) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: + +``` sql +SELECT name, value, changed, description +FROM system.settings +WHERE name LIKE '%max_insert_b%' +FORMAT TSV + +max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." +``` + +Optionally you can [OPTIMIZE](../query_language/misc/#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later: + +``` bash +clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" +clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" +``` + +These queries start an I/O and CPU intensive operation, so if the table consistently receives new data, it’s better to leave it alone and let merges run in the background. + +Now we can check if the table import was successful: + +``` bash +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" +``` + +## Example Queries {#example-queries} + +``` sql +SELECT + StartURL AS URL, + AVG(Duration) AS AvgDuration +FROM tutorial.visits_v1 +WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' +GROUP BY URL +ORDER BY AvgDuration DESC +LIMIT 10 +``` + +``` sql +SELECT + sum(Sign) AS visits, + sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, + (100. * goal_visits) / visits AS goal_percent +FROM tutorial.visits_v1 +WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') +``` + +## Cluster Deployment {#cluster-deployment} + +ClickHouse cluster is a homogenous cluster. Steps to set up: + +1. Install ClickHouse server on all machines of the cluster +2. Set up cluster configs in configuration files +3. Create local tables on each instance +4. Create a [Distributed table](../operations/table_engines/distributed.md) + +[Distributed table](../operations/table_engines/distributed.md) is actually a kind of “view” to local tables of ClickHouse cluster. SELECT query from a distributed table executes using resources of all cluster’s shards. You may specify configs for multiple clusters and create multiple distributed tables providing views to different clusters. + +Example config for a cluster with three shards, one replica each: + +``` xml + + + + + example-perftest01j.yandex.ru + 9000 + + + + + example-perftest02j.yandex.ru + 9000 + + + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +For further demonstration, let’s create a new local table with the same `CREATE TABLE` query that we used for `hits_v1`, but different table name: + +``` sql +CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... +``` + +Creating a distributed table providing a view into local tables of the cluster: + +``` sql +CREATE TABLE tutorial.hits_all AS tutorial.hits_local +ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); +``` + +A common practice is to create similar Distributed tables on all machines of the cluster. It allows running distributed queries on any machine of the cluster. Also there’s an alternative option to create temporary distributed table for a given SELECT query using [remote](../query_language/table_functions/remote.md) table function. + +Let’s run [INSERT SELECT](../query_language/insert_into.md) into the Distributed table to spread the table to multiple servers. + +``` sql +INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; +``` + +!!! warning "Notice" + This approach is not suitable for the sharding of large tables. There’s a separate tool [clickhouse-copier](../operations/utils/clickhouse-copier.md) that can re-shard arbitrary large tables. + +As you could expect, computationally heavy queries run N times faster if they utilize 3 servers instead of one. + +In this case, we have used a cluster with 3 shards, and each contains a single replica. + +To provide resilience in a production environment, we recommend that each shard should contain 2-3 replicas spread between multiple availability zones or datacenters (or at least racks). Note that ClickHouse supports an unlimited number of replicas. + +Example config for a cluster of one shard containing three replicas: + +``` xml + + ... + + + + example-perftest01j.yandex.ru + 9000 + + + example-perftest02j.yandex.ru + 9000 + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +To enable native replication [ZooKeeper](http://zookeeper.apache.org/) is required. ClickHouse takes care of data consistency on all replicas and runs restore procedure after failure automatically. It’s recommended to deploy the ZooKeeper cluster on separate servers (where no other processes including ClickHouse are running). + +!!! note "Note" + ZooKeeper is not a strict requirement: in some simple cases, you can duplicate the data by writing it into all the replicas from your application code. This approach is **not** recommended, in this case, ClickHouse won’t be able to guarantee data consistency on all replicas. Thus it becomes the responsibility of your application. + +ZooKeeper locations are specified in the configuration file: + +``` xml + + + zoo01.yandex.ru + 2181 + + + zoo02.yandex.ru + 2181 + + + zoo03.yandex.ru + 2181 + + +``` + +Also, we need to set macros for identifying each shard and replica which are used on table creation: + +``` xml + + 01 + 01 + +``` + +If there are no replicas at the moment on replicated table creation, a new first replica is instantiated. If there are already live replicas, the new replica clones data from existing ones. You have an option to create all replicated tables first, and then insert data to it. Another option is to create some replicas and add the others after or during data insertion. + +``` sql +CREATE TABLE tutorial.hits_replica (...) +ENGINE = ReplcatedMergeTree( + '/clickhouse_perftest/tables/{shard}/hits', + '{replica}' +) +... +``` + +Here we use [ReplicatedMergeTree](../operations/table_engines/replication.md) table engine. In parameters we specify ZooKeeper path containing shard and replica identifiers. + +``` sql +INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; +``` + +Replication operates in multi-master mode. Data can be loaded into any replica, and the system then syncs it with other instances automatically. Replication is asynchronous so at a given moment, not all replicas may contain recently inserted data. At least one replica should be up to allow data ingestion. Others will sync up data and repair consistency once they will become active again. Note that this approach allows for the low possibility of a loss of recently inserted data. + +[Original article](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/zh/guides/apply_catboost_model.md b/docs/zh/guides/apply_catboost_model.md deleted file mode 120000 index dd36e885974..00000000000 --- a/docs/zh/guides/apply_catboost_model.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/apply_catboost_model.md \ No newline at end of file diff --git a/docs/zh/guides/apply_catboost_model.md b/docs/zh/guides/apply_catboost_model.md new file mode 100644 index 00000000000..62eb386147f --- /dev/null +++ b/docs/zh/guides/apply_catboost_model.md @@ -0,0 +1,236 @@ +--- +en_copy: true +--- + +# Applying a Catboost Model in ClickHouse {#applying-catboost-model-in-clickhouse} + +[CatBoost](https://catboost.ai) is a free and open-source gradient boosting library developed at [Yandex](https://yandex.com/company/) for machine learning. + +With this instruction, you will learn to apply pre-trained models in ClickHouse by running model inference from SQL. + +To apply a CatBoost model in ClickHouse: + +1. [Create a Table](#create-table). +2. [Insert the Data to the Table](#insert-data-to-table). +3. [Integrate CatBoost into ClickHouse](#integrate-catboost-into-clickhouse) (Optional step). +4. [Run the Model Inference from SQL](#run-model-inference). + +For more information about training CatBoost models, see [Training and applying models](https://catboost.ai/docs/features/training.html#training). + +## Prerequisites {#prerequisites} + +If you don’t have the [Docker](https://docs.docker.com/install/) yet, install it. + +!!! note "Note" + [Docker](https://www.docker.com) is a software platform that allows you to create containers that isolate a CatBoost and ClickHouse installation from the rest of the system. + +Before applying a CatBoost model: + +**1.** Pull the [Docker image](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) from the registry: + +``` bash +$ docker pull yandex/tutorial-catboost-clickhouse +``` + +This Docker image contains everything you need to run CatBoost and ClickHouse: code, runtime, libraries, environment variables, and configuration files. + +**2.** Make sure the Docker image has been successfully pulled: + +``` bash +$ docker image ls +REPOSITORY TAG IMAGE ID CREATED SIZE +yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB +``` + +**3.** Start a Docker container based on this image: + +``` bash +$ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse +``` + +## 1. Create a Table {#create-table} + +To create a ClickHouse table for the training sample: + +**1.** Start ClickHouse console client in the interactive mode: + +``` bash +$ clickhouse client +``` + +!!! note "Note" + The ClickHouse server is already running inside the Docker container. + +**2.** Create the table using the command: + +``` sql +:) CREATE TABLE amazon_train +( + date Date MATERIALIZED today(), + ACTION UInt8, + RESOURCE UInt32, + MGR_ID UInt32, + ROLE_ROLLUP_1 UInt32, + ROLE_ROLLUP_2 UInt32, + ROLE_DEPTNAME UInt32, + ROLE_TITLE UInt32, + ROLE_FAMILY_DESC UInt32, + ROLE_FAMILY UInt32, + ROLE_CODE UInt32 +) +ENGINE = MergeTree ORDER BY date +``` + +**3.** Exit from ClickHouse console client: + +``` sql +:) exit +``` + +## 2. Insert the Data to the Table {#insert-data-to-table} + +To insert the data: + +**1.** Run the following command: + +``` bash +$ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv +``` + +**2.** Start ClickHouse console client in the interactive mode: + +``` bash +$ clickhouse client +``` + +**3.** Make sure the data has been uploaded: + +``` sql +:) SELECT count() FROM amazon_train + +SELECT count() +FROM amazon_train + ++-count()-+ +| 65538 | ++---------+ +``` + +## 3. Integrate CatBoost into ClickHouse {#integrate-catboost-into-clickhouse} + +!!! note "Note" + **Optional step.** The Docker image contains everything you need to run CatBoost and ClickHouse. + +To integrate CatBoost into ClickHouse: + +**1.** Build the evaluation library. + +The fastest way to evaluate a CatBoost model is compile `libcatboostmodel.` library. For more information about how to build the library, see [CatBoost documentation](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). + +**2.** Create a new directory anywhere and with any name, for example, `data` and put the created library in it. The Docker image already contains the library `data/libcatboostmodel.so`. + +**3.** Create a new directory for config model anywhere and with any name, for example, `models`. + +**4.** Create a model configuration file with any name, for example, `models/amazon_model.xml`. + +**5.** Describe the model configuration: + +``` xml + + + + catboost + + amazon + + /home/catboost/tutorial/catboost_model.bin + + 0 + + +``` + +**6.** Add the path to CatBoost and the model configuration to the ClickHouse configuration: + +``` xml + +/home/catboost/data/libcatboostmodel.so +/home/catboost/models/*_model.xml +``` + +## 4. Run the Model Inference from SQL {#run-model-inference} + +For test model run the ClickHouse client `$ clickhouse client`. + +Let’s make sure that the model is working: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) > 0 AS prediction, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "Note" + Function [modelEvaluate](../query_language/functions/other_functions.md#function-modelevaluate) returns tuple with per-class raw predictions for multiclass models. + +Let’s predict the probability: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1 + exp(-prediction)) AS probability, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "Note" + More info about [exp()](../query_language/functions/math_functions.md) function. + +Let’s calculate LogLoss on the sample: + +``` sql +:) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss +FROM +( + SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1. + exp(-prediction)) AS prob, + ACTION AS tg + FROM amazon_train +) +``` + +!!! note "Note" + More info about [avg()](../query_language/agg_functions/reference.md#agg_function-avg) and [log()](../query_language/functions/math_functions.md) functions. + +[Original article](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md deleted file mode 120000 index 162dcbc3b8f..00000000000 --- a/docs/zh/guides/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/index.md \ No newline at end of file diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md new file mode 100644 index 00000000000..c1968730961 --- /dev/null +++ b/docs/zh/guides/index.md @@ -0,0 +1,12 @@ +--- +en_copy: true +--- + +# ClickHouse Guides {#clickhouse-guides} + +List of detailed step-by-step instructions that help to solve various tasks using ClickHouse: + +- [Tutorial on simple cluster set-up](../getting_started/tutorial.md) +- [Applying a CatBoost model in ClickHouse](apply_catboost_model.md) + +[Original article](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md deleted file mode 120000 index df728b35f80..00000000000 --- a/docs/zh/interfaces/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/mysql.md \ No newline at end of file diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md new file mode 100644 index 00000000000..668c0b7b9c3 --- /dev/null +++ b/docs/zh/interfaces/mysql.md @@ -0,0 +1,46 @@ +--- +en_copy: true +--- + +# MySQL interface {#mysql-interface} + +ClickHouse supports MySQL wire protocol. It can be enabled by [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) setting in configuration file: + +``` xml +9004 +``` + +Example of connecting using command-line tool `mysql`: + +``` bash +$ mysql --protocol tcp -u default -P 9004 +``` + +Output if a connection succeeded: + +``` text +Welcome to the MySQL monitor. Commands end with ; or \g. +Your MySQL connection id is 4 +Server version: 20.2.1.1-ClickHouse + +Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> +``` + +For compatibility with all MySQL clients, it is recommended to specify user password with [double SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) in configuration file. +If user password is specified using [SHA256](../operations/settings/settings_users.md#password_sha256_hex), some clients won’t be able to authenticate (mysqljs and old versions of command-line tool mysql). + +Restrictions: + +- prepared queries are not supported + +- some data types are sent as strings + +[Original article](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md deleted file mode 120000 index 659153d5f6c..00000000000 --- a/docs/zh/introduction/adopters.md +++ /dev/null @@ -1 +0,0 @@ -../../en/introduction/adopters.md \ No newline at end of file diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md new file mode 100644 index 00000000000..ef841b2fa05 --- /dev/null +++ b/docs/zh/introduction/adopters.md @@ -0,0 +1,79 @@ +--- +en_copy: true +--- + +# ClickHouse Adopters {#clickhouse-adopters} + +!!! warning "Disclaimer" + The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We’d appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won’t have any NDA issues by doing so. Providing updates with publications from other companies is also useful. + +| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size\* | Reference | +|-----------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | +| [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [Badoo](https://badoo.com) | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [Benocs](https://www.benocs.com/) | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [Citymobil](https://city-mobil.ru) | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [ContentSquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [Corunet](https://coru.net/) | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [Criteo/Storetail](https://www.criteo.com/) | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [Deutsche Bank](https://db.com) | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [Diva-e](https://www.diva-e.com) | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Exness](https://www.exness.com) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [Geniee](https://geniee.co.jp) | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Infovista](https://www.infovista.com/) | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [InnoGames](https://www.innogames.com) | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [Integros](https://integros.com) | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Kodiak Data](https://www.kodiakdata.com/) | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Kontur](https://kontur.ru) | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | +| [Mail.ru Cloud Solutions](https://mcs.mail.ru/) | Cloud services | Main product | — | — | [Running ClickHouse Instance, in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | Ad network | Web-analytics | — | — | [Our experience in implementing analytical DBMS ClickHouse, in Russian](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [Pragma Innovation](http://www.pragma-innovation.fr/) | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qrator](https://qrator.net) | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [Beijing PERCENT Information Technology Co., Ltd.](https://www.percent.cn/) | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [Rambler](https://rambler.ru) | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [Tencent](https://www.tencent.com) | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [Traffic Stars](https://trafficstars.com/) | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7 Airlines](https://www.s7.ru) | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [SEMrush](https://www.semrush.com/) | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [scireum GmbH](https://www.scireum.de/) | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [Sentry](https://sentry.io/) | Software developer | Backend for product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [Sina](http://english.sina.com/index.html) | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | +| [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [Ximalaya](https://www.ximalaya.com/) | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Yandex Market](https://market.yandex.ru/) | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | + +[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md deleted file mode 120000 index 1003fb30e61..00000000000 --- a/docs/zh/operations/backup.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/backup.md \ No newline at end of file diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md new file mode 100644 index 00000000000..90efb613098 --- /dev/null +++ b/docs/zh/operations/backup.md @@ -0,0 +1,38 @@ +--- +en_copy: true +--- + +# Data Backup {#data-backup} + +While [replication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). However, these safeguards don’t cover all possible cases and can be circumvented. + +In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. + +Each company has different resources available and business requirements, so there’s no universal solution for ClickHouse backups and restores that will fit every situation. What works for one gigabyte of data likely won’t work for tens of petabytes. There are a variety of possible approaches with their own pros and cons, which will be discussed below. It is a good idea to use several approaches instead of just one in order to compensate for their various shortcomings. + +!!! note "Note" + Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. + +## Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else} + +Often data that is ingested into ClickHouse is delivered through some sort of persistent queue, such as [Apache Kafka](https://kafka.apache.org). In this case it is possible to configure an additional set of subscribers that will read the same data stream while it is being written to ClickHouse and store it in cold storage somewhere. Most companies already have some default recommended cold storage, which could be an object store or a distributed filesystem like [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). + +## Filesystem Snapshots {#filesystem-snapshots} + +Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](table_engines/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective. + +## clickhouse-copier {#clickhouse-copier} + +[clickhouse-copier](utils/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters. + +For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well. + +## Manipulations with Parts {#manipulations-with-parts} + +ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that doesn’t require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). + +For more information about queries related to partition manipulations, see the [ALTER documentation](../query_language/alter.md#alter_manipulations-with-partitions). + +A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). + +[Original article](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/zh/operations/performance/sampling_query_profiler.md b/docs/zh/operations/performance/sampling_query_profiler.md deleted file mode 120000 index c55c58684ba..00000000000 --- a/docs/zh/operations/performance/sampling_query_profiler.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/zh/operations/performance/sampling_query_profiler.md b/docs/zh/operations/performance/sampling_query_profiler.md new file mode 100644 index 00000000000..25368fcd883 --- /dev/null +++ b/docs/zh/operations/performance/sampling_query_profiler.md @@ -0,0 +1,61 @@ +--- +en_copy: true +--- + +# Sampling Query Profiler {#sampling-query-profiler} + +ClickHouse runs sampling profiler that allows analyzing query execution. Using profiler you can find source code routines that used the most frequently during query execution. You can trace CPU time and wall-clock time spent including idle time. + +To use profiler: + +- Setup the [trace\_log](../server_settings/settings.md#server_settings-trace_log) section of the server configuration. + + This section configures the [trace\_log](../system_tables.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid. + +- Setup the [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) or [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously. + + These settings allow you to configure profiler timers. As these are the session settings, you can get different sampling frequency for the whole server, individual users or user profiles, for your interactive session, and for each individual query. + +The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler doesn’t affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. + +To analyze the `trace_log` system table: + +- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting_started/install.md#install-from-deb-packages). + +- Allow introspection functions by the [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) setting. + + For security reasons, introspection functions are disabled by default. + +- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../query_language/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces. + +If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). + +## Example {#example} + +In this example we: + +- Filtering `trace_log` data by a query identifier and the current date. + +- Aggregating by stack trace. + +- Using introspection functions, we will get a report of: + + - Names of symbols and corresponding source code functions. + - Source code locations of these functions. + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt deleted file mode 120000 index 58c5abe7122..00000000000 --- a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler_example_result.txt \ No newline at end of file diff --git a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt new file mode 100644 index 00000000000..a5f6d71ca95 --- /dev/null +++ b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt @@ -0,0 +1,560 @@ +--- +en_copy: true +--- + +Row 1: +────── +count(): 6344 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +read + +DB::ReadBufferFromFileDescriptor::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 +DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 +DB::MergeTreeReaderStream::seekToMark(unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 +std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 +DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const + /usr/local/include/c++/9.1.0/bits/std_function.h:690 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 2: +────── +count(): 3295 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +__pthread_cond_wait + +std::condition_variable::wait(std::unique_lock&) + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/src/c++11/../../../../../gcc-9.1.0/libstdc++-v3/src/c++11/condition_variable.cc:55 +Poco::Semaphore::wait() + /home/milovidov/ClickHouse/build_gcc9/../contrib/poco/Foundation/src/Semaphore.cpp:61 +DB::UnionBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::LimitBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::AsynchronousBlockInputStream::calculate() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +std::_Function_handler::_M_invoke(std::_Any_data const&) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:551 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 3: +────── +count(): 1978 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 4: +────── +count(): 1913 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 5: +────── +count(): 1672 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 6: +────── +count(): 1531 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +read + +DB::ReadBufferFromFileDescriptor::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 7: +────── +count(): 1034 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::MatchImpl::vector_constant(DB::PODArray, 15ul, 16ul> const&, DB::PODArray, 15ul, 16ul> const&, std::__cxx11::basic_string, std::allocator > const&, DB::PODArray, 15ul, 16ul>&) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::FunctionsStringSearch, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) + /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&, bool) const + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::ExpressionActions::execute(DB::Block&, bool) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 8: +────── +count(): 989 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +__lll_lock_wait + +pthread_mutex_lock + +DB::MergeTreeReaderStream::loadMarks() + /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 +DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 +std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) + /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 +DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 +DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string, std::allocator > const&, std::shared_ptr const&, DB::NamesAndTypesList const&, DB::UncompressedCache*, DB::MarkCache*, bool, DB::MergeTreeData const&, std::vector > const&, unsigned long, unsigned long, std::map, std::allocator >, double, std::less, std::allocator > >, std::allocator, std::allocator > const, double> > > const&, std::function const&, int) + /usr/local/include/c++/9.1.0/bits/stl_list.h:303 +DB::MergeTreeThreadSelectBlockInputStream::getNewTask() + /usr/local/include/c++/9.1.0/bits/std_function.h:259 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:54 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 9: +─────── +count(): 779 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone + + +Row 10: +─────── +count(): 666 +sym: StackTrace::StackTrace(ucontext_t const&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 +DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] + /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + + +void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) + /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 +DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 +DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 +DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ExpressionBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::PartialSortingBlockInputStream::readImpl() + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() + /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 +DB::ParallelInputsProcessor::loop(unsigned long) + /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 +DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) + /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 +ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const + /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 +ThreadPoolImpl::worker(std::_List_iterator) + /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 +execute_native_thread_routine + /home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86_64-pc-linux-gnu/libstdc++-v3/include/bits/unique_ptr.h:81 +start_thread + +__clone diff --git a/docs/zh/operations/performance_test.md b/docs/zh/operations/performance_test.md deleted file mode 120000 index a74c126c63f..00000000000 --- a/docs/zh/operations/performance_test.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/performance_test.md \ No newline at end of file diff --git a/docs/zh/operations/performance_test.md b/docs/zh/operations/performance_test.md new file mode 100644 index 00000000000..ae4c5752703 --- /dev/null +++ b/docs/zh/operations/performance_test.md @@ -0,0 +1,79 @@ +--- +en_copy: true +--- + +# How To Test Your Hardware With ClickHouse {#how-to-test-your-hardware-with-clickhouse} + +With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. + +1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master + +2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. + +3. Copy the link to “clickhouse” binary for amd64 or aarch64. + +4. ssh to the server and download it with wget: + + + + # For amd64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse + # For aarch64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse + # Then do: + chmod a+x clickhouse + +1. Download configs: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml + mkdir config.d + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + +1. Download benchmark files: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh + chmod a+x benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql + +1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). + + + + wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz + tar xvf hits_100m_obfuscated_v1.tar.xz -C . + mv hits_100m_obfuscated_v1/* . + +1. Run the server: + + + + ./clickhouse server + +1. Check the data: ssh to the server in another terminal + + + + ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" + 100000000 + +1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. + + + + mcedit benchmark-new.sh + +1. Run the benchmark: + + + + ./benchmark-new.sh hits_100m_obfuscated + +1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com + +All the results are published here: https://clickhouse.tech/benchmark\_hardware.html diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md deleted file mode 120000 index a71283af25c..00000000000 --- a/docs/zh/operations/requirements.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/requirements.md \ No newline at end of file diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md new file mode 100644 index 00000000000..9dd5553a241 --- /dev/null +++ b/docs/zh/operations/requirements.md @@ -0,0 +1,58 @@ +--- +en_copy: true +--- + +# Requirements {#requirements} + +## CPU {#cpu} + +For installation from prebuilt deb packages, use a CPU with x86\_64 architecture and support for SSE 4.2 instructions. To run ClickHouse with processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should build ClickHouse from sources. + +ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz. + +Use of **Turbo Boost** and **hyper-threading** technologies is recommended. It significantly improves performance with a typical load. + +## RAM {#ram} + +We recommend to use a minimum of 4GB of RAM in order to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries. + +The required volume of RAM depends on: + +- The complexity of queries. +- The amount of data that is processed in queries. + +To calculate the required volume of RAM, you should estimate the size of temporary data for [GROUP BY](../query_language/select.md#select-group-by-clause), [DISTINCT](../query_language/select.md#select-distinct), [JOIN](../query_language/select.md#select-join) and other operations you use. + +ClickHouse can use external memory for temporary data. See [GROUP BY in External Memory](../query_language/select.md#select-group-by-in-external-memory) for details. + +## Swap File {#swap-file} + +Disable the swap file for production environments. + +## Storage Subsystem {#storage-subsystem} + +You need to have 2GB of free disk space to install ClickHouse. + +The volume of storage required for your data should be calculated separately. Assessment should include: + +- Estimation of the data volume. + + You can take a sample of the data and get the average size of a row from it. Then multiply the value by the number of rows you plan to store. + +- The data compression coefficient. + + To estimate the data compression coefficient, load a sample of your data into ClickHouse and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times. + +To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas. + +## Network {#network} + +If possible, use networks of 10G or higher class. + +The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. In addition, network speed affects replication processes. + +## Software {#software} + +ClickHouse is developed for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system. + +ClickHouse can also work in other operating system families. See details in the [Getting started](../getting_started/index.md) section of the documentation. diff --git a/docs/zh/operations/server_settings/settings.md b/docs/zh/operations/server_settings/settings.md deleted file mode 120000 index 19cd2e82ce7..00000000000 --- a/docs/zh/operations/server_settings/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/server_settings/settings.md \ No newline at end of file diff --git a/docs/zh/operations/server_settings/settings.md b/docs/zh/operations/server_settings/settings.md new file mode 100644 index 00000000000..bfc1aca7217 --- /dev/null +++ b/docs/zh/operations/server_settings/settings.md @@ -0,0 +1,869 @@ +--- +en_copy: true +--- + +# Server Settings {#server-settings} + +## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} + +The interval in seconds before reloading built-in dictionaries. + +ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries “on the fly” without restarting the server. + +Default value: 3600. + +**Example** + +``` xml +3600 +``` + +## compression {#server-settings-compression} + +Data compression settings for [MergeTree](../table_engines/mergetree.md)-engine tables. + +!!! warning "Warning" + Don’t use it if you have just started using ClickHouse. + +Configuration template: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` fields: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` or `zstd`. + +You can configure multiple `` sections. + +Actions when conditions are met: + +- If a data part matches a condition set, ClickHouse uses the specified compression method. +- If a data part matches multiple condition sets, ClickHouse uses the first matched condition set. + +If no conditions met for a data part, ClickHouse uses the `lz4` compression. + +**Example** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## default\_database {#default-database} + +The default database. + +To get a list of databases, use the [SHOW DATABASES](../../query_language/show.md#show-databases) query. + +**Example** + +``` xml +default +``` + +## default\_profile {#default-profile} + +Default settings profile. + +Settings profiles are located in the file specified in the parameter `user_config`. + +**Example** + +``` xml +default +``` + +## dictionaries\_config {#server_settings-dictionaries_config} + +The path to the config file for external dictionaries. + +Path: + +- Specify the absolute path or the path relative to the server config file. +- The path can contain wildcards \* and ?. + +See also “[External dictionaries](../../query_language/dicts/external_dicts.md)”. + +**Example** + +``` xml +*_dictionary.xml +``` + +## dictionaries\_lazy\_load {#server_settings-dictionaries_lazy_load} + +Lazy loading of dictionaries. + +If `true`, then each dictionary is created on first use. If dictionary creation failed, the function that was using the dictionary throws an exception. + +If `false`, all dictionaries are created when the server starts, and if there is an error, the server shuts down. + +The default is `true`. + +**Example** + +``` xml +true +``` + +## format\_schema\_path {#server_settings-format_schema_path} + +The path to the directory with the schemes for the input data, such as schemas for the [CapnProto](../../interfaces/formats.md#capnproto) format. + +**Example** + +``` xml + + format_schemas/ +``` + +## graphite {#server_settings-graphite} + +Sending data to [Graphite](https://github.com/graphite-project). + +Settings: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [system.metrics](../system_tables.md#system_tables-metrics) table. +- events – Sending deltas data accumulated for the time period from the [system.events](../system_tables.md#system_tables-events) table. +- events\_cumulative – Sending cumulative data from the [system.events](../system_tables.md#system_tables-events) table. +- asynchronous\_metrics – Sending data from the [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) table. + +You can configure multiple `` clauses. For instance, you can use this for sending different data at different intervals. + +**Example** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## graphite\_rollup {#server_settings-graphite-rollup} + +Settings for thinning data for Graphite. + +For more details, see [GraphiteMergeTree](../table_engines/graphitemergetree.md). + +**Example** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## http\_port/https\_port {#http-porthttps-port} + +The port for connecting to the server over HTTP(s). + +If `https_port` is specified, [openSSL](#server_settings-openssl) must be configured. + +If `http_port` is specified, the OpenSSL configuration is ignored even if it is set. + +**Example** + +``` xml +0000 +``` + +## http\_server\_default\_response {#server_settings-http_server_default_response} + +The page that is shown by default when you access the ClickHouse HTTP(s) server. +The default value is “Ok.” (with a line feed at the end) + +**Example** + +Opens `https://tabix.io/` when accessing `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## include\_from {#server_settings-include_from} + +The path to the file with substitutions. + +For more information, see the section “[Configuration files](../configuration_files.md#configuration_files)”. + +**Example** + +``` xml +/etc/metrica.xml +``` + +## interserver\_http\_port {#interserver-http-port} + +Port for exchanging data between ClickHouse servers. + +**Example** + +``` xml +9009 +``` + +## interserver\_http\_host {#interserver-http-host} + +The hostname that can be used by other servers to access this server. + +If omitted, it is defined in the same way as the `hostname-f` command. + +Useful for breaking away from a specific network interface. + +**Example** + +``` xml +example.yandex.ru +``` + +## interserver\_http\_credentials {#server-settings-interserver-http-credentials} + +The username and password used to authenticate during [replication](../table_engines/replication.md) with the Replicated\* engines. These credentials are used only for communication between replicas and are unrelated to credentials for ClickHouse clients. The server is checking these credentials for connecting replicas and use the same credentials when connecting to other replicas. So, these credentials should be set the same for all replicas in a cluster. +By default, the authentication is not used. + +This section contains the following parameters: + +- `user` — username. +- `password` — password. + +**Example** + +``` xml + + admin + 222 + +``` + +## keep\_alive\_timeout {#keep-alive-timeout} + +The number of seconds that ClickHouse waits for incoming requests before closing the connection. Defaults to 3 seconds. + +**Example** + +``` xml +3 +``` + +## listen\_host {#server_settings-listen_host} + +Restriction on hosts that requests can come from. If you want the server to answer all of them, specify `::`. + +Examples: + +``` xml +::1 +127.0.0.1 +``` + +## logger {#server_settings-logger} + +Logging settings. + +Keys: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`and`errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place. +- count – The number of archived log files that ClickHouse stores. + +**Example** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +Writing to the syslog is also supported. Config example: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +Keys: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG\_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on). + Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` and `syslog.` + +## macros {#macros} + +Parameter substitutions for replicated tables. + +Can be omitted if replicated tables are not used. + +For more information, see the section “[Creating replicated tables](../../operations/table_engines/replication.md)”. + +**Example** + +``` xml + +``` + +## mark\_cache\_size {#server-mark-cache-size} + +Approximate size (in bytes) of the cache of marks used by table engines of the [MergeTree](../table_engines/mergetree.md) family. + +The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120. + +**Example** + +``` xml +5368709120 +``` + +## max\_concurrent\_queries {#max-concurrent-queries} + +The maximum number of simultaneously processed requests. + +**Example** + +``` xml +100 +``` + +## max\_connections {#max-connections} + +The maximum number of inbound connections. + +**Example** + +``` xml +4096 +``` + +## max\_open\_files {#max-open-files} + +The maximum number of open files. + +By default: `maximum`. + +We recommend using this option in Mac OS X since the `getrlimit()` function returns an incorrect value. + +**Example** + +``` xml +262144 +``` + +## max\_table\_size\_to\_drop {#max-table-size-to-drop} + +Restriction on deleting tables. + +If the size of a [MergeTree](../table_engines/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a DROP query. + +If you still need to delete the table without restarting the ClickHouse server, create the `/flags/force_drop_table` file and run the DROP query. + +Default value: 50 GB. + +The value 0 means that you can delete all tables without any restrictions. + +**Example** + +``` xml +0 +``` + +## merge\_tree {#server_settings-merge_tree} + +Fine tuning for tables in the [MergeTree](../table_engines/mergetree.md). + +For more information, see the MergeTreeSettings.h header file. + +**Example** + +``` xml + + 5 + +``` + +## openSSL {#server_settings-openssl} + +SSL client/server configuration. + +Support for SSL is provided by the `libpoco` library. The interface is described in the file [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +Keys for server/client settings: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contains the certificate. +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**Example of settings:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## part\_log {#server_settings-part-log} + +Logging events that are associated with [MergeTree](../table_engines/mergetree.md). For instance, adding or merging data. You can use the log to simulate merge algorithms and compare their characteristics. You can visualize the merge process. + +Queries are logged in the [system.part\_log](../system_tables.md#system_tables-part-log) table, not in a separate file. You can configure the name of this table in the `table` parameter (see below). + +Use the following parameters to configure logging: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**Example** + +``` xml + + system +
    part_log
    + toMonday(event_date) + 7500 + +``` + +## path {#server_settings-path} + +The path to the directory containing data. + +!!! note "Note" + The trailing slash is mandatory. + +**Example** + +``` xml +/var/lib/clickhouse/ +``` + +## query\_log {#server_settings-query-log} + +Setting for logging queries received with the [log\_queries=1](../settings/settings.md) setting. + +Queries are logged in the [system.query\_log](../system_tables.md#system_tables-query_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). + +Use the following parameters to configure logging: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a table. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +If the table doesn’t exist, ClickHouse will create it. If the structure of the query log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. + +**Example** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## query\_thread\_log {#server_settings-query-thread-log} + +Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting. + +Queries are logged in the [system.query\_thread\_log](../system_tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). + +Use the following parameters to configure logging: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +If the table doesn’t exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. + +**Example** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## trace\_log {#server_settings-trace_log} + +Settings for the [trace\_log](../system_tables.md#system_tables-trace_log) system table operation. + +Parameters: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [Custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +The default server configuration file `config.xml` contains the following settings section: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## query\_masking\_rules {#query-masking-rules} + +Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, +`system.query_log`, `system.text_log`, `system.processes` table, and in logs sent to the client. That allows preventing +sensitive data leakage from SQL queries (like names, emails, personal +identifiers or credit card numbers) to logs. + +**Example** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +Config fields: +- `name` - name for the rule (optional) +- `regexp` - RE2 compatible regular expression (mandatory) +- `replace` - substitution string for sensitive data (optional, by default - six asterisks) + +The masking rules are applied to the whole query (to prevent leaks of sensitive data from malformed / non-parsable queries). + +`system.events` table have counter `QueryMaskingRulesMatch` which have an overall number of query masking rules matches. + +For distributed queries each server have to be configured separately, otherwise, subqueries passed to other +nodes will be stored without masking. + +## remote\_servers {#server-settings-remote-servers} + +Configuration of clusters used by the [Distributed](../../operations/table_engines/distributed.md) table engine and by the `cluster` table function. + +**Example** + +``` xml + +``` + +For the value of the `incl` attribute, see the section “[Configuration files](../configuration_files.md#configuration_files)”. + +**See Also** + +- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) + +## timezone {#server_settings-timezone} + +The server’s time zone. + +Specified as an IANA identifier for the UTC timezone or geographic location (for example, Africa/Abidjan). + +The time zone is necessary for conversions between String and DateTime formats when DateTime fields are output to text format (printed on the screen or in a file), and when getting DateTime from a string. Besides, the time zone is used in functions that work with the time and date if they didn’t receive the time zone in the input parameters. + +**Example** + +``` xml +Europe/Moscow +``` + +## tcp\_port {#server_settings-tcp_port} + +Port for communicating with clients over the TCP protocol. + +**Example** + +``` xml +9000 +``` + +## tcp\_port\_secure {#server_settings-tcp_port-secure} + +TCP port for secure communication with clients. Use it with [OpenSSL](#server_settings-openssl) settings. + +**Possible values** + +Positive integer. + +**Default value** + +``` xml +9440 +``` + +## mysql\_port {#server_settings-mysql_port} + +Port for communicating with clients over MySQL protocol. + +**Possible values** + +Positive integer. + +Example + +``` xml +9004 +``` + +## tmp\_path {#server-settings-tmp_path} + +Path to temporary data for processing large queries. + +!!! note "Note" + The trailing slash is mandatory. + +**Example** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## tmp\_policy {#server-settings-tmp-policy} + +Policy from [`storage_configuration`](../table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) to store temporary files. +If not set [`tmp_path`](#server-settings-tmp_path) is used, otherwise it is ignored. + +!!! note "Note" + - `move_factor` is ignored +- `keep_free_space_bytes` is ignored +- `max_data_part_size_bytes` is ignored +- you must have exactly one volume in that policy + +## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} + +Cache size (in bytes) for uncompressed data used by table engines from the [MergeTree](../table_engines/mergetree.md). + +There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) is enabled. + +The uncompressed cache is advantageous for very short queries in individual cases. + +**Example** + +``` xml +8589934592 +``` + +## user\_files\_path {#server_settings-user_files_path} + +The directory with user files. Used in the table function [file()](../../query_language/table_functions/file.md). + +**Example** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## users\_config {#users-config} + +Path to the file that contains: + +- User configurations. +- Access rights. +- Settings profiles. +- Quota settings. + +**Example** + +``` xml +users.xml +``` + +## zookeeper {#server-settings_zookeeper} + +Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster. + +ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted. + +This section contains the following parameters: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + For example: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional. +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**Example configuration** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**See Also** + +- [Replication](../../operations/table_engines/replication.md) +- [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} + +Storage method for data part headers in ZooKeeper. + +This setting only applies to the `MergeTree` family. It can be specified: + +- Globally in the [merge\_tree](#server_settings-merge_tree) section of the `config.xml` file. + + ClickHouse uses the setting for all the tables on the server. You can change the setting at any time. Existing tables change their behaviour when the setting changes. + +- For each table. + + When creating a table, specify the corresponding [engine setting](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). The behaviour of an existing table with this setting does not change, even if the global setting changes. + +**Possible values** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +If `use_minimalistic_part_header_in_zookeeper = 1`, then [replicated](../table_engines/replication.md) tables store the headers of the data parts compactly using a single `znode`. If the table contains many columns, this storage method significantly reduces the volume of the data stored in Zookeeper. + +!!! attention "Attention" + After applying `use_minimalistic_part_header_in_zookeeper = 1`, you can’t downgrade the ClickHouse server to a version that doesn’t support this setting. Be careful when upgrading ClickHouse on servers in a cluster. Don’t upgrade all the servers at once. It is safer to test new versions of ClickHouse in a test environment, or on just a few servers of a cluster. + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**Default value:** 0. + +## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} + +Disables the internal DNS cache. Recommended for operating ClickHouse in systems +with frequently changing infrastructure such as Kubernetes. + +**Default value:** 0. + +## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} + +The period of updating IP addresses stored in the ClickHouse internal DNS cache (in seconds). +The update is performed asynchronously, in a separate system thread. + +**Default value**: 15. + +[Original article](https://clickhouse.tech/docs/en/operations/server_settings/settings/) diff --git a/docs/zh/operations/settings/constraints_on_settings.md b/docs/zh/operations/settings/constraints_on_settings.md deleted file mode 120000 index 4dacf908662..00000000000 --- a/docs/zh/operations/settings/constraints_on_settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/constraints_on_settings.md \ No newline at end of file diff --git a/docs/zh/operations/settings/constraints_on_settings.md b/docs/zh/operations/settings/constraints_on_settings.md new file mode 100644 index 00000000000..b0037813199 --- /dev/null +++ b/docs/zh/operations/settings/constraints_on_settings.md @@ -0,0 +1,72 @@ +--- +en_copy: true +--- + +# Constraints on Settings {#constraints-on-settings} + +The constraints on settings can be defined in the `profiles` section of the `user.xml` configuration file and prohibit users from changing some of the settings with the `SET` query. +The constraints are defined as the following: + +``` xml + + + + + lower_boundary + + + upper_boundary + + + lower_boundary + upper_boundary + + + + + + + +``` + +If the user tries to violate the constraints an exception is thrown and the setting isn’t changed. +There are supported three types of constraints: `min`, `max`, `readonly`. The `min` and `max` constraints specify upper and lower boundaries for a numeric setting and can be used in combination. The `readonly` constraint specifies that the user cannot change the corresponding setting at all. + +**Example:** Let `users.xml` includes lines: + +``` xml + + + 10000000000 + 0 + ... + + + 5000000000 + 20000000000 + + + + + + + +``` + +The following queries all throw exceptions: + +``` sql +SET max_memory_usage=20000000001; +SET max_memory_usage=4999999999; +SET force_index_by_date=1; +``` + +``` text +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be greater than 20000000000. +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be less than 5000000000. +Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. +``` + +**Note:** the `default` profile has special handling: all the constraints defined for the `default` profile become the default constraints, so they restrict all the users until they’re overridden explicitly for these users. + +[Original article](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/zh/operations/settings/permissions_for_queries.md b/docs/zh/operations/settings/permissions_for_queries.md deleted file mode 120000 index ce8473bf01c..00000000000 --- a/docs/zh/operations/settings/permissions_for_queries.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/permissions_for_queries.md \ No newline at end of file diff --git a/docs/zh/operations/settings/permissions_for_queries.md b/docs/zh/operations/settings/permissions_for_queries.md new file mode 100644 index 00000000000..60942e1926b --- /dev/null +++ b/docs/zh/operations/settings/permissions_for_queries.md @@ -0,0 +1,58 @@ +--- +en_copy: true +--- + +# Permissions for Queries {#permissions_for_queries} + +Queries in ClickHouse can be divided into several types: + +1. Read data queries: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. Write data queries: `INSERT`, `OPTIMIZE`. +3. Change settings query: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +5. `KILL QUERY`. + +The following settings regulate user permissions by the type of query: + +- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. + +`KILL QUERY` can be performed with any settings. + +## readonly {#settings_readonly} + +Restricts permissions for reading data, write data and change settings queries. + +See how the queries are divided into types [above](#permissions_for_queries). + +Possible values: + +- 0 — All queries are allowed. +- 1 — Only read data queries are allowed. +- 2 — Read data and change settings queries are allowed. + +After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session. + +When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method. + +Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user +from changing only specific settings, for details see [constraints on settings](constraints_on_settings.md). + +Default value: 0 + +## allow\_ddl {#settings_allow_ddl} + +Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries. + +See how the queries are divided into types [above](#permissions_for_queries). + +Possible values: + +- 0 — DDL queries are not allowed. +- 1 — DDL queries are allowed. + +You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session. + +Default value: 1 + +[Original article](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md deleted file mode 120000 index 0c8df3cfc90..00000000000 --- a/docs/zh/operations/settings/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md new file mode 100644 index 00000000000..0475642124a --- /dev/null +++ b/docs/zh/operations/settings/settings.md @@ -0,0 +1,1199 @@ +--- +en_copy: true +--- + +# Settings {#settings} + +## distributed\_product\_mode {#distributed-product-mode} + +Changes the behavior of [distributed subqueries](../../query_language/select.md). + +ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. + +Restrictions: + +- Only applied for IN and JOIN subqueries. +- Only if the FROM section uses a distributed table containing more than one shard. +- If the subquery concerns a distributed table containing more than one shard. +- Not used for a table-valued [remote](../../query_language/table_functions/remote.md) function. + +Possible values: + +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” exception). +- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` +- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.` +- `allow` — Allows the use of these types of subqueries. + +## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} + +Turns on predicate pushdown in `SELECT` queries. + +Predicate pushdown may significantly reduce network traffic for distributed queries. + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1. + +Usage + +Consider the following queries: + +1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` +2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` + +If `enable_optimize_predicate_expression = 1`, then the execution time of these queries is equal because ClickHouse applies `WHERE` to the subquery when processing it. + +If `enable_optimize_predicate_expression = 0`, then the execution time of the second query is much longer, because the `WHERE` clause applies to all the data after the subquery finishes. + +## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} + +Forces a query to an out-of-date replica if updated data is not available. See [Replication](../table_engines/replication.md). + +ClickHouse selects the most relevant from the outdated replicas of the table. + +Used when performing `SELECT` from a distributed table that points to replicated tables. + +By default, 1 (enabled). + +## force\_index\_by\_date {#settings-force_index_by_date} + +Disables query execution if the index can’t be used by date. + +Works with tables in the MergeTree family. + +If `force_index_by_date=1`, ClickHouse checks whether the query has a date key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For example, the condition `Date != ' 2000-01-01 '` is acceptable even when it matches all the data in the table (i.e., running the query requires a full scan). For more information about ranges of data in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). + +## force\_primary\_key {#force-primary-key} + +Disables query execution if indexing by the primary key is not possible. + +Works with tables in the MergeTree family. + +If `force_primary_key=1`, ClickHouse checks to see if the query has a primary key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For more information about data ranges in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). + +## format\_schema {#format-schema} + +This parameter is useful when you are using formats that require a schema definition, such as [Cap’n Proto](https://capnproto.org/) or [Protobuf](https://developers.google.com/protocol-buffers/). The value depends on the format. + +## fsync\_metadata {#fsync-metadata} + +Enables or disables [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) when writing `.sql` files. Enabled by default. + +It makes sense to disable it if the server has millions of tiny tables that are constantly being created and destroyed. + +## enable\_http\_compression {#settings-enable_http_compression} + +Enables or disables data compression in the response to an HTTP request. + +For more information, read the [HTTP interface description](../../interfaces/http.md). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} + +Sets the level of data compression in the response to an HTTP request if [enable\_http\_compression = 1](#settings-enable_http_compression). + +Possible values: Numbers from 1 to 9. + +Default value: 3. + +## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} + +Enables or disables checksum verification when decompressing the HTTP POST data from the client. Used only for ClickHouse native compression format (not used with `gzip` or `deflate`). + +For more information, read the [HTTP interface description](../../interfaces/http.md). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} + +Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. + +For more information, read the [HTTP interface description](../../interfaces/http.md). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +## max\_http\_get\_redirects {#setting-max_http_get_redirects} + +Limits the maximum number of HTTP GET redirect hops for [URL](../table_engines/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../query_language/create/#create-table-query) query and by the [url](../../query_language/table_functions/url.md) table function. + +Possible values: + +- Any positive integer number of hops. +- 0 — No hops allowed. + +Default value: 0. + +## input\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} + +Sets the maximum number of acceptable errors when reading from text formats (CSV, TSV, etc.). + +The default value is 0. + +Always pair it with `input_format_allow_errors_ratio`. + +If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_num`, ClickHouse ignores the row and moves on to the next one. + +If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. + +## input\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} + +Sets the maximum percentage of errors allowed when reading from text formats (CSV, TSV, etc.). +The percentage of errors is set as a floating-point number between 0 and 1. + +The default value is 0. + +Always pair it with `input_format_allow_errors_num`. + +If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_ratio`, ClickHouse ignores the row and moves on to the next one. + +If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. + +## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} + +Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../query_language/syntax.md) section. + +Possible values: + +- 0 — Disabled. + + In this case, you must provide formatted data. See the [Formats](../../interfaces/formats.md) section. + +- 1 — Enabled. + + In this case, you can use an SQL expression as a value, but data insertion is much slower this way. If you insert only formatted data, then ClickHouse behaves as if the setting value is 0. + +Default value: 1. + +Example of Use + +Insert the [DateTime](../../data_types/datetime.md) type value with the different settings. + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Exception on client: +Code: 27. DB::Exception: Cannot parse input: expected ) before: now()): (at row 1) +``` + +``` sql +SET input_format_values_interpret_expressions = 1; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Ok. +``` + +The last query is equivalent to the following: + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t SELECT now() +``` + +``` text +Ok. +``` + +## input\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} + +Enables or disables template deduction for an SQL expressions in [Values](../../interfaces/formats.md#data-format-values) format. It allows to parse and interpret expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse will try to deduce template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows. For the following query: + +``` sql +INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... +``` + +- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=0` expressions will be interpreted separately for each row (this is very slow for large number of rows) +- if `input_format_values_interpret_expressions=0` and `format_values_deduce_templates_of_expressions=1` expressions in the first, second and third rows will be parsed using template `lower(String)` and interpreted together, expression is the forth row will be parsed with another template (`upper(String)`) +- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=1` - the same as in previous case, but also allows fallback to interpreting expressions separately if it’s not possible to deduce template. + +Enabled by default. + +## input\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} + +This setting is used only when `input_format_values_deduce_templates_of_expressions = 1`. It can happen, that expressions for some column have the same structure, but contain numeric literals of different types, e.g + +``` sql +(..., abs(0), ...), -- UInt64 literal +(..., abs(3.141592654), ...), -- Float64 literal +(..., abs(-1), ...), -- Int64 literal +``` + +When this setting is enabled, ClickHouse will check the actual type of literal and will use an expression template of the corresponding type. In some cases, it may significantly slow down expression evaluation in `Values`. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` or `Int64` instead of `UInt64` for `42`), but it may cause overflow and precision issues. +Enabled by default. + +## input\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} + +When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) and [TabSeparated](../../interfaces/formats.md#tabseparated) formats. + +!!! note "Note" + When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance. + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1. + +## input\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} + +When enabled, replace empty input fields in TSV with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too. + +Disabled by default. + +## input\_format\_null\_as\_default {#settings-input-format-null-as-default} + +Enables or disables using default values if input data contain `NULL`, but data type of the corresponding column in not `Nullable(T)` (for text input formats). + +## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} + +Enables or disables skipping insertion of extra data. + +When writing data, ClickHouse throws an exception if input data contain columns that do not exist in the target table. If skipping is enabled, ClickHouse doesn’t insert extra data and doesn’t throw an exception. + +Supported formats: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) +- [TSKV](../../interfaces/formats.md#tskv) + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +## input\_format\_import\_nested\_json {#settings-input_format_import_nested_json} + +Enables or disables the insertion of JSON data with nested objects. + +Supported formats: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +See also: + +- [Usage of Nested Structures](../../interfaces/formats.md#jsoneachrow-nested) with the `JSONEachRow` format. + +## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} + +Enables or disables checking the column order when inserting data. + +To improve insert performance, we recommend disabling this check if you are sure that the column order of the input data is the same as in the target table. + +Supported formats: + +- [CSVWithNames](../../interfaces/formats.md#csvwithnames) +- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1. + +## date\_time\_input\_format {#settings-date_time_input_format} + +Allows choosing a parser of the text representation of date and time. + +The setting doesn’t apply to [date and time functions](../../query_language/functions/date_time_functions.md). + +Possible values: + +- `'best_effort'` — Enables extended parsing. + + ClickHouse can parse the basic `YYYY-MM-DD HH:MM:SS` format and all [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) date and time formats. For example, `'2018-06-08T01:02:03.000Z'`. + +- `'basic'` — Use basic parser. + + ClickHouse can parse only the basic `YYYY-MM-DD HH:MM:SS` format. For example, `'2019-08-20 10:18:56'`. + +Default value: `'basic'`. + +See also: + +- [DateTime data type.](../../data_types/datetime.md) +- [Functions for working with dates and times.](../../query_language/functions/date_time_functions.md) + +## join\_default\_strictness {#settings-join_default_strictness} + +Sets default strictness for [JOIN clauses](../../query_language/select.md#select-join). + +Possible values: + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from matching rows. This is the normal `JOIN` behaviour from standard SQL. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` and `ALL` are the same. +- `ASOF` — For joining sequences with an uncertain match. +- `Empty string` — If `ALL` or `ANY` is not specified in the query, ClickHouse throws an exception. + +Default value: `ALL`. + +## join\_any\_take\_last\_row {#settings-join_any_take_last_row} + +Changes behaviour of join operations with `ANY` strictness. + +!!! warning "Attention" + This setting applies only for `JOIN` operations with [Join](../table_engines/join.md) engine tables. + +Possible values: + +- 0 — If the right table has more than one matching row, only the first one found is joined. +- 1 — If the right table has more than one matching row, only the last one found is joined. + +Default value: 0. + +See also: + +- [JOIN clause](../../query_language/select.md#select-join) +- [Join table engine](../table_engines/join.md) +- [join\_default\_strictness](#settings-join_default_strictness) + +## join\_use\_nulls {#join_use_nulls} + +Sets the type of [JOIN](../../query_language/select.md) behavior. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. + +Possible values: + +- 0 — The empty cells are filled with the default value of the corresponding field type. +- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../data_types/nullable.md#data_type-nullable), and empty cells are filled with [NULL](../../query_language/syntax.md). + +Default value: 0. + +## max\_block\_size {#setting-max_block_size} + +In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. + +Default value: 65,536. + +Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed. + +## preferred\_block\_size\_bytes {#preferred-block-size-bytes} + +Used for the same purpose as `max_block_size`, but it sets the recommended block size in bytes by adapting it to the number of rows in the block. +However, the block size cannot be more than `max_block_size` rows. +By default: 1,000,000. It only works when reading from MergeTree engines. + +## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} + +If the number of rows to be read from a file of a [MergeTree](../table_engines/mergetree.md) table exceeds `merge_tree_min_rows_for_concurrent_read` then ClickHouse tries to perform a concurrent reading from this file on several threads. + +Possible values: + +- Any positive integer. + +Default value: 163840. + +## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} + +If the number of bytes to read from one file of a [MergeTree](../table_engines/mergetree.md)-engine table exceeds `merge_tree_min_bytes_for_concurrent_read`, then ClickHouse tries to concurrently read from this file in several threads. + +Possible value: + +- Any positive integer. + +Default value: 251658240. + +## merge\_tree\_min\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} + +If the distance between two data blocks to be read in one file is less than `merge_tree_min_rows_for_seek` rows, then ClickHouse does not seek through the file but reads the data sequentially. + +Possible values: + +- Any positive integer. + +Default value: 0. + +## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} + +If the distance between two data blocks to be read in one file is less than `merge_tree_min_bytes_for_seek` bytes, then ClickHouse sequentially reads a range of file that contains both blocks, thus avoiding extra seek. + +Possible values: + +- Any positive integer. + +Default value: 0. + +## merge\_tree\_coarse\_index\_granularity {#setting-merge-tree-coarse-index-granularity} + +When searching for data, ClickHouse checks the data marks in the index file. If ClickHouse finds that required keys are in some range, it divides this range into `merge_tree_coarse_index_granularity` subranges and searches the required keys there recursively. + +Possible values: + +- Any positive even integer. + +Default value: 8. + +## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} + +If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it doesn’t use the cache of uncompressed blocks. + +The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. + +Possible values: + +- Any positive integer. + +Default value: 128 ✕ 8192. + +## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} + +If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it doesn’t use the cache of uncompressed blocks. + +The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. + +Possible value: + +- Any positive integer. + +Default value: 2013265920. + +## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} + +The minimum data volume required for using direct I/O access to the storage disk. + +ClickHouse uses this setting when reading data from tables. If the total storage volume of all the data to be read exceeds `min_bytes_to_use_direct_io` bytes, then ClickHouse reads the data from the storage disk with the `O_DIRECT` option. + +Possible values: + +- 0 — Direct I/O is disabled. +- Positive integer. + +Default value: 0. + +## log\_queries {#settings-log-queries} + +Setting up query logging. + +Queries sent to ClickHouse with this setup are logged according to the rules in the [query\_log](../server_settings/settings.md#server_settings-query-log) server configuration parameter. + +Example: + +``` text +log_queries=1 +``` + +## log\_query\_threads {#settings-log-query-threads} + +Setting up query threads logging. + +Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server_settings/settings.md#server_settings-query-thread-log) server configuration parameter. + +Example: + +``` text +log_query_threads=1 +``` + +## max\_insert\_block\_size {#settings-max_insert_block_size} + +The size of blocks to form for insertion into a table. +This setting only applies in cases when the server forms the blocks. +For example, for an INSERT via the HTTP interface, the server parses the data format and forms blocks of the specified size. +But when using clickhouse-client, the client parses the data itself, and the ‘max\_insert\_block\_size’ setting on the server doesn’t affect the size of the inserted blocks. +The setting also doesn’t have a purpose when using INSERT SELECT, since data is inserted using the same blocks that are formed after SELECT. + +Default value: 1,048,576. + +The default is slightly more than `max_block_size`. The reason for this is because certain table engines (`*MergeTree`) form a data part on the disk for each inserted block, which is a fairly large entity. Similarly, `*MergeTree` tables sort data during insertion and a large enough block size allow sorting more data in RAM. + +## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} + +Disables lagging replicas for distributed queries. See [Replication](../../operations/table_engines/replication.md). + +Sets the time in seconds. If a replica lags more than the set value, this replica is not used. + +Default value: 300. + +Used when performing `SELECT` from a distributed table that points to replicated tables. + +## max\_threads {#settings-max_threads} + +The maximum number of query processing threads, excluding threads for retrieving data from remote servers (see the ‘max\_distributed\_connections’ parameter). + +This parameter applies to threads that perform the same stages of the query processing pipeline in parallel. +For example, when reading from a table, if it is possible to evaluate expressions with functions, filter with WHERE and pre-aggregate for GROUP BY in parallel using at least ‘max\_threads’ number of threads, then ‘max\_threads’ are used. + +Default value: the number of physical CPU cores. + +If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores. + +For queries that are completed quickly because of a LIMIT, you can set a lower ‘max\_threads’. For example, if the necessary number of entries are located in every block and max\_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one. + +The smaller the `max_threads` value, the less memory is consumed. + +## max\_insert\_threads {#settings-max-insert-threads} + +The maximum number of threads to execute the `INSERT SELECT` query. + +Possible values: + +- 0 (or 1) — `INSERT SELECT` no parallel execution. +- Positive integer. Bigger than 1. + +Default value: 0. + +Parallel `INSERT SELECT` has effect only if the `SELECT` part is executed in parallel, see [max\_threads](#settings-max_threads) setting. +Higher values will lead to higher memory usage. + +## max\_compress\_block\_size {#max-compress-block-size} + +The maximum size of blocks of uncompressed data before compressing for writing to a table. By default, 1,048,576 (1 MiB). If the size is reduced, the compression rate is significantly reduced, the compression and decompression speed increases slightly due to cache locality, and memory consumption is reduced. There usually isn’t any reason to change this setting. + +Don’t confuse blocks for compression (a chunk of memory consisting of bytes) with blocks for query processing (a set of rows from a table). + +## min\_compress\_block\_size {#min-compress-block-size} + +For [MergeTree](../table_engines/mergetree.md)" tables. In order to reduce latency when processing queries, a block is compressed when writing the next mark if its size is at least ‘min\_compress\_block\_size’. By default, 65,536. + +The actual size of the block, if the uncompressed data is less than ‘max\_compress\_block\_size’, is no less than this value and no less than the volume of data for one mark. + +Let’s look at an example. Assume that ‘index\_granularity’ was set to 8192 during table creation. + +We are writing a UInt32-type column (4 bytes per value). When writing 8192 rows, the total will be 32 KB of data. Since min\_compress\_block\_size = 65,536, a compressed block will be formed for every two marks. + +We are writing a URL column with the String type (average size of 60 bytes per value). When writing 8192 rows, the average will be slightly less than 500 KB of data. Since this is more than 65,536, a compressed block will be formed for each mark. In this case, when reading data from the disk in the range of a single mark, extra data won’t be decompressed. + +There usually isn’t any reason to change this setting. + +## max\_query\_size {#settings-max_query_size} + +The maximum part of a query that can be taken to RAM for parsing with the SQL parser. +The INSERT query also contains data for INSERT that is processed by a separate stream parser (that consumes O(1) RAM), which is not included in this restriction. + +Default value: 256 KiB. + +## interactive\_delay {#interactive-delay} + +The interval in microseconds for checking whether request execution has been cancelled and sending the progress. + +Default value: 100,000 (checks for cancelling and sends the progress ten times per second). + +## connect\_timeout, receive\_timeout, send\_timeout {#connect-timeout-receive-timeout-send-timeout} + +Timeouts in seconds on the socket used for communicating with the client. + +Default value: 10, 300, 300. + +## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} + +Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. + +Default value: 0 + +## poll\_interval {#poll-interval} + +Lock in a wait loop for the specified number of seconds. + +Default value: 10. + +## max\_distributed\_connections {#max-distributed-connections} + +The maximum number of simultaneous connections with remote servers for distributed processing of a single query to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. + +Default value: 1024. + +The following parameters are only used when creating Distributed tables (and when launching a server), so there is no reason to change them at runtime. + +## distributed\_connections\_pool\_size {#distributed-connections-pool-size} + +The maximum number of simultaneous connections with remote servers for distributed processing of all queries to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. + +Default value: 1024. + +## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} + +The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. +If unsuccessful, several attempts are made to connect to various replicas. + +Default value: 50. + +## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} + +The maximum number of connection attempts with each replica for the Distributed table engine. + +Default value: 3. + +## extremes {#extremes} + +Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled). +For more information, see the section “Extreme values”. + +## use\_uncompressed\_cache {#setting-use_uncompressed_cache} + +Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled). +Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. + +For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use\_uncompressed\_cache’ setting always set to 1. + +## replace\_running\_query {#replace-running-query} + +When using the HTTP interface, the ‘query\_id’ parameter can be passed. This is any string that serves as the query identifier. +If a query from the same user with the same ‘query\_id’ already exists at this time, the behaviour depends on the ‘replace\_running\_query’ parameter. + +`0` (default) – Throw an exception (don’t allow the query to run if a query with the same ‘query\_id’ is already running). + +`1` – Cancel the old query and start running the new one. + +Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled. + +## stream\_flush\_interval\_ms {#stream-flush-interval-ms} + +Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows. + +The default value is 7500. + +The smaller the value, the more often data is flushed into the table. Setting the value too low leads to poor performance. + +## load\_balancing {#settings-load_balancing} + +Specifies the algorithm of replicas selection that is used for distributed query processing. + +ClickHouse supports the following algorithms of choosing replicas: + +- [Random](#load_balancing-random) (by default) +- [Nearest hostname](#load_balancing-nearest_hostname) +- [In order](#load_balancing-in_order) +- [First or random](#load_balancing-first_or_random) + +### Random (by default) {#load_balancing-random} + +``` sql +load_balancing = random +``` + +The number of errors is counted for each replica. The query is sent to the replica with the fewest errors, and if there are several of these, to anyone of them. +Disadvantages: Server proximity is not accounted for; if the replicas have different data, you will also get different data. + +### Nearest Hostname {#load_balancing-nearest_hostname} + +``` sql +load_balancing = nearest_hostname +``` + +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server’s hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). + +For instance, example01-01-1 and example01-01-2.yandex.ru are different in one position, while example01-01-1 and example01-02-2 differ in two places. +This method might seem primitive, but it doesn’t require external data about network topology, and it doesn’t compare IP addresses, which would be complicated for our IPv6 addresses. + +Thus, if there are equivalent replicas, the closest one by name is preferred. +We can also assume that when sending a query to the same server, in the absence of failures, a distributed query will also go to the same servers. So even if different data is placed on the replicas, the query will return mostly the same results. + +### In Order {#load_balancing-in_order} + +``` sql +load_balancing = in_order +``` + +Replicas with the same number of errors are accessed in the same order as they are specified in the configuration. +This method is appropriate when you know exactly which replica is preferable. + +### First or Random {#load_balancing-first_or_random} + +``` sql +load_balancing = first_or_random +``` + +This algorithm chooses the first replica in the set or a random replica if the first is unavailable. It’s effective in cross-replication topology setups, but useless in other configurations. + +The `first_or_random` algorithm solves the problem of the `in_order` algorithm. With `in_order`, if one replica goes down, the next one gets a double load while the remaining replicas handle the usual amount of traffic. When using the `first_or_random` algorithm, the load is evenly distributed among replicas that are still available. + +## prefer\_localhost\_replica {#settings-prefer-localhost-replica} + +Enables/disables preferable using the localhost replica when processing distributed queries. + +Possible values: + +- 1 — ClickHouse always sends a query to the localhost replica if it exists. +- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) setting. + +Default value: 1. + +!!! warning "Warning" + Disable this setting if you use [max\_parallel\_replicas](#settings-max_parallel_replicas). + +## totals\_mode {#totals-mode} + +How to calculate TOTALS when HAVING is present, as well as when max\_rows\_to\_group\_by and group\_by\_overflow\_mode = ‘any’ are present. +See the section “WITH TOTALS modifier”. + +## totals\_auto\_threshold {#totals-auto-threshold} + +The threshold for `totals_mode = 'auto'`. +See the section “WITH TOTALS modifier”. + +## max\_parallel\_replicas {#settings-max_parallel_replicas} + +The maximum number of replicas for each shard when executing a query. +For consistency (to get different parts of the same data split), this option only works when the sampling key is set. +Replica lag is not controlled. + +## compile {#compile} + +Enable compilation of queries. By default, 0 (disabled). + +The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). +If this portion of the pipeline was compiled, the query may run faster due to deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution. + +## min\_count\_to\_compile {#min-count-to-compile} + +How many times to potentially use a compiled chunk of code before running compilation. By default, 3. +For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. +If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running. + +Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don’t use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. + +## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} + +If the value is true, integers appear in quotes when using JSON\* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. + +## format\_csv\_delimiter {#settings-format_csv_delimiter} + +The character interpreted as a delimiter in the CSV data. By default, the delimiter is `,`. + +## input\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} + +For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`). + +## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} + +Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF). + +## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} + +Use DOC/Windows-style line separator (CRLF) in TSV instead of Unix style (LF). + +## insert\_quorum {#settings-insert_quorum} + +Enables the quorum writes. + +- If `insert_quorum < 2`, the quorum writes are disabled. +- If `insert_quorum >= 2`, the quorum writes are enabled. + +Default value: 0. + +Quorum writes + +`INSERT` succeeds only when ClickHouse manages to correctly write data to the `insert_quorum` of replicas during the `insert_quorum_timeout`. If for any reason the number of replicas with successful writes does not reach the `insert_quorum`, the write is considered failed and ClickHouse will delete the inserted block from all the replicas where data has already been written. + +All the replicas in the quorum are consistent, i.e., they contain data from all previous `INSERT` queries. The `INSERT` sequence is linearized. + +When reading the data written from the `insert_quorum`, you can use the [select\_sequential\_consistency](#settings-select_sequential_consistency) option. + +ClickHouse generates an exception + +- If the number of available replicas at the time of the query is less than the `insert_quorum`. +- At an attempt to write data when the previous block has not yet been inserted in the `insert_quorum` of replicas. This situation may occur if the user tries to perform an `INSERT` before the previous one with the `insert_quorum` is completed. + +See also: + +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## insert\_quorum\_timeout {#settings-insert_quorum-timeout} + +Write to quorum timeout in seconds. If the timeout has passed and no write has taken place yet, ClickHouse will generate an exception and the client must repeat the query to write the same block to the same or any other replica. + +Default value: 60 seconds. + +See also: + +- [insert\_quorum](#settings-insert_quorum) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## select\_sequential\_consistency {#settings-select_sequential_consistency} + +Enables or disables sequential consistency for `SELECT` queries: + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + +Usage + +When sequential consistency is enabled, ClickHouse allows the client to execute the `SELECT` query only for those replicas that contain data from all previous `INSERT` queries executed with `insert_quorum`. If the client refers to a partial replica, ClickHouse will generate an exception. The SELECT query will not include data that has not yet been written to the quorum of replicas. + +See also: + +- [insert\_quorum](#settings-insert_quorum) +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) + +## insert\_deduplicate {#settings-insert-deduplicate} + +Enables or disables block deduplication of `INSERT` (for Replicated\* tables). + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 1. + +By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see \[Data Replication\] (../ table\_engines/replication.md). + +## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} + +Enables or disables the deduplication check for materialized views that receive data from Replicated\* tables. + +Possible values: + + 0 — Disabled. + 1 — Enabled. + +Default value: 0. + +Usage + +By default, deduplication is not performed for materialized views but is done upstream, in the source table. +If an INSERTed block is skipped due to deduplication in the source table, there will be no insertion into attached materialized views. This behaviour exists to enable insertion of highly aggregated data into materialized views, for cases where inserted blocks are the same after materialized view aggregation but derived from different INSERTs into the source table. +At the same time, this behaviour “breaks” `INSERT` idempotency. If an `INSERT` into the main table was successful and `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won’t receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` allows for changing this behaviour. On retry, a materialized view will receive the repeat insert and will perform deduplication check by itself, +ignoring check result for the source table, and will insert rows lost because of the first failure. + +## max\_network\_bytes {#settings-max-network-bytes} + +Limits the data volume (in bytes) that is received or transmitted over the network when executing a query. This setting applies to every individual query. + +Possible values: + +- Positive integer. +- 0 — Data volume control is disabled. + +Default value: 0. + +## max\_network\_bandwidth {#settings-max-network-bandwidth} + +Limits the speed of the data exchange over the network in bytes per second. This setting applies to every query. + +Possible values: + +- Positive integer. +- 0 — Bandwidth control is disabled. + +Default value: 0. + +## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} + +Limits the speed of the data exchange over the network in bytes per second. This setting applies to all concurrently running queries performed by a single user. + +Possible values: + +- Positive integer. +- 0 — Control of the data speed is disabled. + +Default value: 0. + +## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} + +Limits the speed that data is exchanged at over the network in bytes per second. This setting applies to all concurrently running queries on the server. + +Possible values: + +- Positive integer. +- 0 — Control of the data speed is disabled. + +Default value: 0. + +## count\_distinct\_implementation {#settings-count_distinct_implementation} + +Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count) construction. + +Possible values: + +- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) + +Default value: `uniqExact`. + +## skip\_unavailable\_shards {#settings-skip_unavailable_shards} + +Enables or disables silently skipping of unavailable shards. + +Shard is considered unavailable if all its replicas are unavailable. A replica is unavailable in the following cases: + +- ClickHouse can’t connect to replica for any reason. + + When connecting to a replica, ClickHouse performs several attempts. If all these attempts fail, the replica is considered unavailable. + +- Replica can’t be resolved through DNS. + + If replica’s hostname can’t be resolved through DNS, it can indicate the following situations: + + - Replica’s host has no DNS record. It can occur in systems with dynamic DNS, for example, [Kubernetes](https://kubernetes.io), where nodes can be unresolvable during downtime, and this is not an error. + + - Configuration error. ClickHouse configuration file contains a wrong hostname. + +Possible values: + +- 1 — skipping enabled. + + If a shard is unavailable, ClickHouse returns a result based on partial data and doesn’t report node availability issues. + +- 0 — skipping disabled. + + If a shard is unavailable, ClickHouse throws an exception. + +Default value: 0. + +## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} + +Enables or disables skipping of unused shards for SELECT queries that have sharding key condition in PREWHERE/WHERE (assumes that the data is distributed by sharding key, otherwise do nothing). + +Default value: 0 + +## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} + +Enables or disables query execution if [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) enabled and skipping of unused shards is not possible. If the skipping is not possible and the setting is enabled exception will be thrown. + +Possible values: + +- 0 - Disabled (do not throws) +- 1 - Disable query execution only if the table has sharding key +- 2 - Disable query execution regardless sharding key is defined for the table + +Default value: 0 + +## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} + +Reset [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) for nested `Distributed` table + +Possible values: + +- 1 — Enabled. +- 0 — Disabled. + +Default value: 0. + +## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} + +Enables or disables throwing an exception if an [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query didn’t perform a merge. + +By default, `OPTIMIZE` returns successfully even if it didn’t do anything. This setting lets you differentiate these situations and get the reason in an exception message. + +Possible values: + +- 1 — Throwing an exception is enabled. +- 0 — Throwing an exception is disabled. + +Default value: 0. + +## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} + +- Type: seconds +- Default value: 60 seconds + +Controls how fast errors in distributed tables are zeroed. If a replica is unavailable for some time, accumulates 5 errors, and distributed\_replica\_error\_half\_life is set to 1 second, then the replica is considered normal 3 seconds after last error. + +See also: + +- [Table engine Distributed](../../operations/table_engines/distributed.md) +- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) + +## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} + +- Type: unsigned int +- Default value: 1000 + +Error count of each replica is capped at this value, preventing a single replica from accumulating too many errors. + +See also: + +- [Table engine Distributed](../../operations/table_engines/distributed.md) +- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) + +## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} + +Base interval for the [Distributed](../table_engines/distributed.md) table engine to send data. The actual interval grows exponentially in the event of errors. + +Possible values: + +- A positive integer number of milliseconds. + +Default value: 100 milliseconds. + +## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} + +Maximum interval for the [Distributed](../table_engines/distributed.md) table engine to send data. Limits exponential growth of the interval set in the [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) setting. + +Possible values: + +- A positive integer number of milliseconds. + +Default value: 30000 milliseconds (30 seconds). + +## distributed\_directory\_monitor\_batch\_inserts {#distributed_directory_monitor_batch_inserts} + +Enables/disables sending of inserted data in batches. + +When batch sending is enabled, the [Distributed](../table_engines/distributed.md) table engine tries to send multiple files of inserted data in one operation instead of sending them separately. Batch sending improves cluster performance by better-utilizing server and network resources. + +Possible values: + +- 1 — Enabled. +- 0 — Disabled. + +Default value: 0. + +## os\_thread\_priority {#setting-os-thread-priority} + +Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. + +!!! warning "Warning" + To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments don’t allow you to set the `CAP_SYS_NICE` capability. In this case, `clickhouse-server` shows a message about it at the start. + +Possible values: + +- You can set values in the range `[-20, 19]`. + +Lower values mean higher priority. Threads with low `nice` priority values are executed more frequently than threads with high values. High values are preferable for long-running non-interactive queries because it allows them to quickly give up resources in favour of short interactive queries when they arrive. + +Default value: 0. + +## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} + +Sets the period for a real clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). Real clock timer counts wall-clock time. + +Possible values: + +- Positive integer number, in nanoseconds. + + Recommended values: + + - 10000000 (100 times a second) nanoseconds and less for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0 for turning off the timer. + +Type: [UInt64](../../data_types/int_uint.md). + +Default value: 1000000000 nanoseconds (once a second). + +See also: + +- System table [trace\_log](../system_tables.md#system_tables-trace_log) + +## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} + +Sets the period for a CPU clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). This timer counts only CPU time. + +Possible values: + +- A positive integer number of nanoseconds. + + Recommended values: + + - 10000000 (100 times a second) nanoseconds and more for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- 0 for turning off the timer. + +Type: [UInt64](../../data_types/int_uint.md). + +Default value: 1000000000 nanoseconds. + +See also: + +- System table [trace\_log](../system_tables.md#system_tables-trace_log) + +## allow\_introspection\_functions {#settings-allow_introspection_functions} + +Enables of disables [introspections functions](../../query_language/functions/introspection.md) for query profiling. + +Possible values: + +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. + +Default value: 0. + +**See Also** + +- [Sampling Query Profiler](../performance/sampling_query_profiler.md) +- System table [trace\_log](../system_tables.md#system_tables-trace_log) + +## input\_format\_parallel\_parsing {#input-format-parallel-parsing} + +- Type: bool +- Default value: True + +Enable order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. + +## min\_chunk\_bytes\_for\_parallel\_parsing {#min-chunk-bytes-for-parallel-parsing} + +- Type: unsigned int +- Default value: 1 MiB + +The minimum chunk size in bytes, which each thread will parse in parallel. + +## output\_format\_avro\_codec {#settings-output_format_avro_codec} + +Sets the compression codec used for output Avro file. + +Type: string + +Possible values: + +- `null` — No compression +- `deflate` — Compress with Deflate (zlib) +- `snappy` — Compress with [Snappy](https://google.github.io/snappy/) + +Default value: `snappy` (if available) or `deflate`. + +## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} + +Sets minimum data size (in bytes) between synchronization markers for output Avro file. + +Type: unsigned int + +Possible values: 32 (32 bytes) - 1073741824 (1 GiB) + +Default value: 32768 (32 KiB) + +## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} + +Sets Confluent Schema Registry URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format + +Type: URL + +Default value: Empty + +[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/zh/operations/settings/settings_profiles.md b/docs/zh/operations/settings/settings_profiles.md deleted file mode 120000 index 35d9747ad56..00000000000 --- a/docs/zh/operations/settings/settings_profiles.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_profiles.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings_profiles.md b/docs/zh/operations/settings/settings_profiles.md new file mode 100644 index 00000000000..3c694c0889e --- /dev/null +++ b/docs/zh/operations/settings/settings_profiles.md @@ -0,0 +1,68 @@ +--- +en_copy: true +--- + +# Settings Profiles {#settings-profiles} + +A settings profile is a collection of settings grouped under the same name. Each ClickHouse user has a profile. +To apply all the settings in a profile, set the `profile` setting. + +Example: + +Install the `web` profile. + +``` sql +SET profile = 'web' +``` + +Settings profiles are declared in the user config file. This is usually `users.xml`. + +Example: + +``` xml + + + + + + 8 + + + + + 1000000000 + 100000000000 + + 1000000 + any + + 1000000 + 1000000000 + + 100000 + 100000000 + break + + 600 + 1000000 + 15 + + 25 + 100 + 50 + + 2 + 25 + 50 + 100 + + 1 + + +``` + +The example specifies two profiles: `default` and `web`. The `default` profile has a special purpose: it must always be present and is applied when starting the server. In other words, the `default` profile contains default settings. The `web` profile is a regular profile that can be set using the `SET` query or using a URL parameter in an HTTP query. + +Settings profiles can inherit from each other. To use inheritance, indicate one or multiple `profile` settings before the other settings that are listed in the profile. In case when one setting is defined in different profiles, the latest defined is used. + +[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/zh/operations/settings/settings_users.md b/docs/zh/operations/settings/settings_users.md deleted file mode 120000 index 3a6a7cf6948..00000000000 --- a/docs/zh/operations/settings/settings_users.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_users.md \ No newline at end of file diff --git a/docs/zh/operations/settings/settings_users.md b/docs/zh/operations/settings/settings_users.md new file mode 100644 index 00000000000..8b852380f5b --- /dev/null +++ b/docs/zh/operations/settings/settings_users.md @@ -0,0 +1,145 @@ +--- +en_copy: true +--- + +# User Settings {#user-settings} + +The `users` section of the `user.xml` configuration file contains user settings. + +Structure of the `users` section: + +``` xml + + + + + + + + + + + profile_name + + default + + + + + expression + + + + + + +``` + +### user\_name/password {#user-namepassword} + +Password can be specified in plaintext or in SHA256 (hex format). + +- To assign a password in plaintext (**not recommended**), place it in a `password` element. + + For example, `qwerty`. The password can be left blank. + + + +- To assign a password using its SHA256 hash, place it in a `password_sha256_hex` element. + + For example, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + + Example of how to generate a password from shell: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' + + The first line of the result is the password. The second line is the corresponding SHA256 hash. + + + +- For compatibility with MySQL clients, password can be specified in double SHA1 hash. Place it in `password_double_sha1_hex` element. + + For example, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + + Example of how to generate a password from shell: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' + + The first line of the result is the password. The second line is the corresponding double SHA1 hash. + +### user\_name/networks {#user-namenetworks} + +List of networks from which the user can connect to the ClickHouse server. + +Each element of the list can have one of the following forms: + +- `` — IP address or network mask. + + Examples: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + +- `` — Hostname. + + Example: `example01.host.ru`. + + To check access, a DNS query is performed, and all returned IP addresses are compared to the peer address. + +- `` — Regular expression for hostnames. + + Example, `^example\d\d-\d\d-\d\.host\.ru$` + + To check access, a [DNS PTR query](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) is performed for the peer address and then the specified regexp is applied. Then, another DNS query is performed for the results of the PTR query and all the received addresses are compared to the peer address. We strongly recommend that regexp ends with $. + +All results of DNS requests are cached until the server restarts. + +**Examples** + +To open access for user from any network, specify: + +``` xml +::/0 +``` + +!!! warning "Warning" + It’s insecure to open access from any network unless you have a firewall properly configured or the server is not directly connected to Internet. + +To open access only from localhost, specify: + +``` xml +::1 +127.0.0.1 +``` + +### user\_name/profile {#user-nameprofile} + +You can assign a settings profile for the user. Settings profiles are configured in a separate section of the `users.xml` file. For more information, see [Profiles of Settings](settings_profiles.md). + +### user\_name/quota {#user-namequota} + +Quotas allow you to track or limit resource usage over a period of time. Quotas are configured in the `quotas` +section of the `users.xml` configuration file. + +You can assign a quotas set for the user. For a detailed description of quotas configuration, see [Quotas](../quotas.md#quotas). + +### user\_name/databases {#user-namedatabases} + +In this section, you can you can limit rows that are returned by ClickHouse for `SELECT` queries made by the current user, thus implementing basic row-level security. + +**Example** + +The following configuration forces that user `user1` can only see the rows of `table1` as the result of `SELECT` queries, where the value of the `id` field is 1000. + +``` xml + + + + + id = 1000 + + + + +``` + +The `filter` can be any expression resulting in a [UInt8](../../data_types/int_uint.md)-type value. It usually contains comparisons and logical operators. Rows from `database_name.table1` where filter results to 0 are not returned for this user. The filtering is incompatible with `PREWHERE` operations and disables `WHERE→PREWHERE` optimization. + +[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/zh/operations/system_tables.md b/docs/zh/operations/system_tables.md deleted file mode 120000 index c5701190dca..00000000000 --- a/docs/zh/operations/system_tables.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/system_tables.md \ No newline at end of file diff --git a/docs/zh/operations/system_tables.md b/docs/zh/operations/system_tables.md new file mode 100644 index 00000000000..af47b99222a --- /dev/null +++ b/docs/zh/operations/system_tables.md @@ -0,0 +1,1078 @@ +--- +en_copy: true +--- + +# System tables {#system-tables} + +System tables are used for implementing part of the system’s functionality, and for providing access to information about how the system is working. +You can’t delete a system table (but you can perform DETACH). +System tables don’t have files with data on the disk or files with metadata. The server creates all the system tables when it starts. +System tables are read-only. +They are located in the ‘system’ database. + +## system.asynchronous\_metrics {#system_tables-asynchronous_metrics} + +Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use. + +Columns: + +- `metric` ([String](../data_types/string.md)) — Metric name. +- `value` ([Float64](../data_types/float.md)) — Metric value. + +**Example** + +``` sql +SELECT * FROM system.asynchronous_metrics LIMIT 10 +``` + +``` text +┌─metric──────────────────────────────────┬──────value─┐ +│ jemalloc.background_thread.run_interval │ 0 │ +│ jemalloc.background_thread.num_runs │ 0 │ +│ jemalloc.background_thread.num_threads │ 0 │ +│ jemalloc.retained │ 422551552 │ +│ jemalloc.mapped │ 1682989056 │ +│ jemalloc.resident │ 1656446976 │ +│ jemalloc.metadata_thp │ 0 │ +│ jemalloc.metadata │ 10226856 │ +│ UncompressedCacheCells │ 0 │ +│ MarkCacheFiles │ 0 │ +└─────────────────────────────────────────┴────────────┘ +``` + +**See Also** + +- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. +- [system.events](#system_tables-events) — Contains a number of events that have occurred. +- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. + +## system.clusters {#system-clusters} + +Contains information about clusters available in the config file and the servers in them. + +Columns: + +- `cluster` (String) — The cluster name. +- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. +- `shard_weight` (UInt32) — The relative weight of the shard when writing data. +- `replica_num` (UInt32) — The replica number in the shard, starting from 1. +- `host_name` (String) — The host name, as specified in the config. +- `host_address` (String) — The host IP address obtained from DNS. +- `port` (UInt16) — The port to use for connecting to the server. +- `user` (String) — The name of the user for connecting to the server. +- `errors_count` (UInt32) - number of times this host failed to reach replica. +- `estimated_recovery_time` (UInt32) - seconds left until replica error count is zeroed and it is considered to be back to normal. + +Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors. + +**See also** + +- [Table engine Distributed](table_engines/distributed.md) +- [distributed\_replica\_error\_cap setting](settings/settings.md#settings-distributed_replica_error_cap) +- [distributed\_replica\_error\_half\_life setting](settings/settings.md#settings-distributed_replica_error_half_life) + +## system.columns {#system-columns} + +Contains information about columns in all the tables. + +You can use this table to get information similar to the [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table) query, but for multiple tables at once. + +The `system.columns` table contains the following columns (the column type is shown in brackets): + +- `database` (String) — Database name. +- `table` (String) — Table name. +- `name` (String) — Column name. +- `type` (String) — Column type. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined. +- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. +- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. +- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. +- `marks_bytes` (UInt64) — The size of marks, in bytes. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +## system.contributors {#system-contributors} + +Contains information about contributors. All constributors in random order. The order is random at query execution time. + +Columns: + +- `name` (String) — Contributor (author) name from git log. + +**Example** + +``` sql +SELECT * FROM system.contributors LIMIT 10 +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +│ Max Vetrov │ +│ LiuYangkuan │ +│ svladykin │ +│ zamulla │ +│ Šimon Podlipský │ +│ BayoNet │ +│ Ilya Khomutov │ +│ Amy Krishnevsky │ +│ Loud_Scream │ +└──────────────────┘ +``` + +To find out yourself in the table, use a query: + +``` sql +SELECT * FROM system.contributors WHERE name='Olga Khvostikova' +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +└──────────────────┘ +``` + +## system.databases {#system-databases} + +This table contains a single String column called ‘name’ – the name of a database. +Each database that the server knows about has a corresponding entry in the table. +This system table is used for implementing the `SHOW DATABASES` query. + +## system.detached\_parts {#system_tables-detached_parts} + +Contains information about detached parts of [MergeTree](table_engines/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). + +## system.dictionaries {#system-dictionaries} + +Contains information about external dictionaries. + +Columns: + +- `name` (String) — Dictionary name. +- `type` (String) — Dictionary type: Flat, Hashed, Cache. +- `origin` (String) — Path to the configuration file that describes the dictionary. +- `attribute.names` (Array(String)) — Array of attribute names provided by the dictionary. +- `attribute.types` (Array(String)) — Corresponding array of attribute types that are provided by the dictionary. +- `has_hierarchy` (UInt8) — Whether the dictionary is hierarchical. +- `bytes_allocated` (UInt64) — The amount of RAM the dictionary uses. +- `hit_rate` (Float64) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` (UInt64) — The number of items stored in the dictionary. +- `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created. +- `source` (String) — Text describing the data source for the dictionary. + +Note that the amount of memory used by the dictionary is not proportional to the number of items stored in it. So for flat and cached dictionaries, all the memory cells are pre-assigned, regardless of how full the dictionary actually is. + +## system.events {#system_tables-events} + +Contains information about the number of events that have occurred in the system. For example, in the table, you can find how many `SELECT` queries were processed since the ClickHouse server started. + +Columns: + +- `event` ([String](../data_types/string.md)) — Event name. +- `value` ([UInt64](../data_types/int_uint.md)) — Number of events occurred. +- `description` ([String](../data_types/string.md)) — Event description. + +**Example** + +``` sql +SELECT * FROM system.events LIMIT 5 +``` + +``` text +┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ +│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │ +│ FileOpen │ 73 │ Number of files opened. │ +│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ +│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │ +└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**See Also** + +- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. +- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. + +## system.functions {#system-functions} + +Contains information about normal and aggregate functions. + +Columns: + +- `name`(`String`) – The name of the function. +- `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +## system.graphite\_retentions {#system-graphite-retentions} + +Contains information about parameters [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [\*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. + +Columns: + +- `config_name` (String) - `graphite_rollup` parameter name. +- `regexp` (String) - A pattern for the metric name. +- `function` (String) - The name of the aggregating function. +- `age` (UInt64) - The minimum age of the data in seconds. +- `precision` (UInt64) - How precisely to define the age of the data in seconds. +- `priority` (UInt16) - Pattern priority. +- `is_default` (UInt8) - Whether the pattern is the default. +- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. +- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. + +## system.merges {#system-merges} + +Contains information about merges and part mutations currently in process for tables in the MergeTree family. + +Columns: + +- `database` (String) — The name of the database the table is in. +- `table` (String) — Table name. +- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. +- `progress` (Float64) — The percentage of completed work from 0 to 1. +- `num_parts` (UInt64) — The number of pieces to be merged. +- `result_part_name` (String) — The name of the part that will be formed as the result of merging. +- `is_mutation` (UInt8) - 1 if this process is a part mutation. +- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. +- `total_size_marks` (UInt64) — The total number of marks in the merged parts. +- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. +- `rows_read` (UInt64) — Number of rows read. +- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. +- `rows_written` (UInt64) — Number of rows written. + +## system.metrics {#system_tables-metrics} + +Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date. + +Columns: + +- `metric` ([String](../data_types/string.md)) — Metric name. +- `value` ([Int64](../data_types/int_uint.md)) — Metric value. +- `description` ([String](../data_types/string.md)) — Metric description. + +The list of supported metrics you can find in the [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) source file of ClickHouse. + +**Example** + +``` sql +SELECT * FROM system.metrics LIMIT 10 +``` + +``` text +┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 1 │ Number of executing queries │ +│ Merge │ 0 │ Number of executing background merges │ +│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ +│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │ +│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ +│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ +│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │ +│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │ +│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │ +│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │ +└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**See Also** + +- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [system.events](#system_tables-events) — Contains a number of events that occurred. +- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. + +## system.metric\_log {#system_tables-metric_log} + +Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. +To turn on metrics history collection on `system.metric_log`, create `/etc/clickhouse-server/config.d/metric_log.xml` with following content: + +``` xml + + + system + metric_log
    + 7500 + 1000 +
    +
    +``` + +**Example** + +``` sql +SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +event_date: 2020-02-18 +event_time: 2020-02-18 07:15:33 +milliseconds: 554 +ProfileEvent_Query: 0 +ProfileEvent_SelectQuery: 0 +ProfileEvent_InsertQuery: 0 +ProfileEvent_FileOpen: 0 +ProfileEvent_Seek: 0 +ProfileEvent_ReadBufferFromFileDescriptorRead: 1 +ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0 +ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0 +ProfileEvent_WriteBufferFromFileDescriptorWrite: 1 +ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0 +ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56 +... +CurrentMetric_Query: 0 +CurrentMetric_Merge: 0 +CurrentMetric_PartMutation: 0 +CurrentMetric_ReplicatedFetch: 0 +CurrentMetric_ReplicatedSend: 0 +CurrentMetric_ReplicatedChecks: 0 +... +``` + +**See also** + +- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [system.events](#system_tables-events) — Contains a number of events that occurred. +- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. +- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. + +## system.numbers {#system-numbers} + +This table contains a single UInt64 column named ‘number’ that contains almost all the natural numbers starting from zero. +You can use this table for tests, or if you need to do a brute force search. +Reads from this table are not parallelized. + +## system.numbers\_mt {#system-numbers-mt} + +The same as ‘system.numbers’ but reads are parallelized. The numbers can be returned in any order. +Used for tests. + +## system.one {#system-one} + +This table contains a single row with a single ‘dummy’ UInt8 column containing the value 0. +This table is used if a SELECT query doesn’t specify the FROM clause. +This is similar to the DUAL table found in other DBMSs. + +## system.parts {#system_tables-parts} + +Contains information about parts of [MergeTree](table_engines/mergetree.md) tables. + +Each row describes one data part. + +Columns: + +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../query_language/alter.md#query_language_queries_alter) query. + + Formats: + + - `YYYYMM` for automatic partitioning by month. + - `any_string` when partitioning manually. + +- `name` (`String`) – Name of the data part. + +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. + +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). + +- `rows` (`UInt64`) – The number of rows. + +- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. + +- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `marks_bytes` (`UInt64`) – The size of the file with marks. + +- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| + +- `remove_time` (`DateTime`) – The time when the data part became inactive. + +- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. + +- `min_date` (`Date`) – The minimum value of the date key in the data part. + +- `max_date` (`Date`) – The maximum value of the date key in the data part. + +- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. + +- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. + +- `partition_id` (`String`) – ID of the partition. + +- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. + +- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. + +- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. + +- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). + +- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. + +- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. + +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../query_language/alter.md#alter_freeze-partition) + +- `database` (`String`) – Name of the database. + +- `table` (`String`) – Name of the table. + +- `engine` (`String`) – Name of the table engine without parameters. + +- `path` (`String`) – Absolute path to the folder with data part files. + +- `disk` (`String`) – Name of a disk that stores the data part. + +- `hash_of_all_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of compressed files. + +- `hash_of_uncompressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.). + +- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed. + +- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. + +- `marks_size` (`UInt64`) – Alias for `marks_bytes`. + +## system.part\_log {#system_tables-part-log} + +The `system.part_log` table is created only if the [part\_log](server_settings/settings.md#server_settings-part-log) server setting is specified. + +This table contains information about events that occurred with [data parts](table_engines/custom_partitioning_key.md) in the [MergeTree](table_engines/mergetree.md) family tables, such as adding or merging data. + +The `system.part_log` table contains the following columns: + +- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: + - `NEW_PART` — Inserting of a new data part. + - `MERGE_PARTS` — Merging of data parts. + - `DOWNLOAD_PART` — Downloading a data part. + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). + - `MUTATE_PART` — Mutating of a data part. + - `MOVE_PART` — Moving the data part from the one disk to another one. +- `event_date` (Date) — Event date. +- `event_time` (DateTime) — Event time. +- `duration_ms` (UInt64) — Duration. +- `database` (String) — Name of the database the data part is in. +- `table` (String) — Name of the table the data part is in. +- `part_name` (String) — Name of the data part. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ value if the partitioning is by `tuple()`. +- `rows` (UInt64) — The number of rows in the data part. +- `size_in_bytes` (UInt64) — Size of the data part in bytes. +- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). +- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. +- `read_rows` (UInt64) — The number of rows was read during the merge. +- `read_bytes` (UInt64) — The number of bytes was read during the merge. +- `error` (UInt16) — The code number of the occurred error. +- `exception` (String) — Text message of the occurred error. + +The `system.part_log` table is created after the first inserting data to the `MergeTree` table. + +## system.processes {#system_tables-processes} + +This system table is used for implementing the `SHOW PROCESSLIST` query. + +Columns: + +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` user. The field contains the username for a specific query, not for a query that this query initiated. +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` on the query requestor server. +- `elapsed` (Float64) – The time in seconds since request execution started. +- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) setting. +- `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert. +- `query_id` (String) – Query ID, if defined. + +## system.text\_log {#system-tables-text-log} + +Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting. + +Columns: + +- `event_date` (`Date`) - Date of the entry. +- `event_time` (`DateTime`) - Time of the entry. +- `microseconds` (`UInt32`) - Microseconds of the entry. +- `thread_name` (String) — Name of the thread from which the logging was done. +- `thread_id` (UInt64) — OS thread ID. +- `level` (`Enum8`) - Entry level. + - `'Fatal' = 1` + - `'Critical' = 2` + - `'Error' = 3` + - `'Warning' = 4` + - `'Notice' = 5` + - `'Information' = 6` + - `'Debug' = 7` + - `'Trace' = 8` +- `query_id` (`String`) - ID of the query. +- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) +- `message` (`String`) - The message itself. +- `revision` (`UInt32`) - ClickHouse revision. +- `source_file` (`LowCardinality(String)`) - Source file from which the logging was done. +- `source_line` (`UInt64`) - Source line from which the logging was done. + +## system.query\_log {#system_tables-query_log} + +Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information. + +!!! note "Note" + The table doesn’t contain input data for `INSERT` queries. + +ClickHouse creates this table only if the [query\_log](server_settings/settings.md#server_settings-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. + +To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section. + +The `system.query_log` table registers two kinds of queries: + +1. Initial queries that were run directly by the client. +2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns. + +Columns: + +- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: + - `'QueryStart' = 1` — Successful start of query execution. + - `'QueryFinish' = 2` — Successful end of query execution. + - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. + - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. +- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `result_rows` (UInt64) — Number of rows in the result. +- `result_bytes` (UInt64) — Number of bytes in the result. +- `memory_usage` (UInt64) — Memory consumption by the query. +- `query` (String) — Query string. +- `exception` (String) — Exception message. +- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. +- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. +- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` method was used. + - 2 — `POST` method was used. +- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. +- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column. +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column. + +Each query creates one or two rows in the `query_log` table, depending on the status of the query: + +1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column). +2. If an error occurred during query processing, two events with types 1 and 4 are created. +3. If an error occurred before launching the query, a single event with type 3 is created. + +By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. + +When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. + +!!! note "Note" + The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. + +You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `partition_by` parameter). + +## system.query\_thread\_log {#system_tables-query-thread-log} + +The table contains information about each query execution thread. + +ClickHouse creates this table only if the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. + +To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section. + +Columns: + +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. +- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_id` (UInt64) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. +- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. +- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. +- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` method was used. + - 2 — `POST` method was used. +- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. +- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column. + +By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. + +When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. + +!!! note "Note" + The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. + +You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `partition_by` parameter). + +## system.trace\_log {#system_tables-trace_log} + +Contains stack traces collected by the sampling query profiler. + +ClickHouse creates this table when the [trace\_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. + +To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions. + +Columns: + +- `event_date`([Date](../data_types/date.md)) — Date of sampling moment. + +- `event_time`([DateTime](../data_types/datetime.md)) — Timestamp of sampling moment. + +- `revision`([UInt32](../data_types/int_uint.md)) — ClickHouse server build revision. + + When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. + +- `timer_type`([Enum8](../data_types/enum.md)) — Timer type: + + - `Real` represents wall-clock time. + - `CPU` represents CPU time. + +- `thread_number`([UInt32](../data_types/int_uint.md)) — Thread identifier. + +- `query_id`([String](../data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) system table. + +- `trace`([Array(UInt64)](../data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. + +**Example** + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-15 +event_time: 2019-11-15 15:09:38 +revision: 54428 +timer_type: Real +thread_number: 48 +query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 +trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] +``` + +## system.replicas {#system_tables-replicas} + +Contains information and status for replicated tables residing on the local server. +This table can be used for monitoring. The table contains a row for every Replicated\* table. + +Example: + +``` sql +SELECT * +FROM system.replicas +WHERE table = 'visits' +FORMAT Vertical +``` + +``` text +Row 1: +────── +database: merge +table: visits +engine: ReplicatedCollapsingMergeTree +is_leader: 1 +can_become_leader: 1 +is_readonly: 0 +is_session_expired: 0 +future_parts: 1 +parts_to_check: 0 +zookeeper_path: /clickhouse/tables/01-06/visits +replica_name: example01-06-1.yandex.ru +replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru +columns_version: 9 +queue_size: 1 +inserts_in_queue: 0 +merges_in_queue: 1 +part_mutations_in_queue: 0 +queue_oldest_time: 2020-02-20 08:34:30 +inserts_oldest_time: 0000-00-00 00:00:00 +merges_oldest_time: 2020-02-20 08:34:30 +part_mutations_oldest_time: 0000-00-00 00:00:00 +oldest_part_to_get: +oldest_part_to_merge_to: 20200220_20284_20840_7 +oldest_part_to_mutate_to: +log_max_index: 596273 +log_pointer: 596274 +last_queue_update: 2020-02-20 08:34:32 +absolute_delay: 0 +total_replicas: 2 +active_replicas: 2 +``` + +Columns: + +- `database` (`String`) - Database name +- `table` (`String`) - Table name +- `engine` (`String`) - Table engine name +- `is_leader` (`UInt8`) - Whether the replica is the leader. + Only one replica at a time can be the leader. The leader is responsible for selecting background merges to perform. + Note that writes can be performed to any replica that is available and has a session in ZK, regardless of whether it is a leader. +- `can_become_leader` (`UInt8`) - Whether the replica can be elected as a leader. +- `is_readonly` (`UInt8`) - Whether the replica is in read-only mode. + This mode is turned on if the config doesn’t have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. +- `is_session_expired` (`UInt8`) - the session with ZooKeeper has expired. Basically the same as `is_readonly`. +- `future_parts` (`UInt32`) - The number of data parts that will appear as the result of INSERTs or merges that haven’t been done yet. +- `parts_to_check` (`UInt32`) - The number of data parts in the queue for verification. A part is put in the verification queue if there is suspicion that it might be damaged. +- `zookeeper_path` (`String`) - Path to table data in ZooKeeper. +- `replica_name` (`String`) - Replica name in ZooKeeper. Different replicas of the same table have different names. +- `replica_path` (`String`) - Path to replica data in ZooKeeper. The same as concatenating ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`) - Version number of the table structure. Indicates how many times ALTER was performed. If replicas have different versions, it means some replicas haven’t made all of the ALTERs yet. +- `queue_size` (`UInt32`) - Size of the queue for operations waiting to be performed. Operations include inserting blocks of data, merges, and certain other actions. It usually coincides with `future_parts`. +- `inserts_in_queue` (`UInt32`) - Number of inserts of blocks of data that need to be made. Insertions are usually replicated fairly quickly. If this number is large, it means something is wrong. +- `merges_in_queue` (`UInt32`) - The number of merges waiting to be made. Sometimes merges are lengthy, so this value may be greater than zero for a long time. +- `part_mutations_in_queue` (`UInt32`) - The number of mutations waiting to be made. +- `queue_oldest_time` (`DateTime`) - If `queue_size` greater than 0, shows when the oldest operation was added to the queue. +- `inserts_oldest_time` (`DateTime`) - See `queue_oldest_time` +- `merges_oldest_time` (`DateTime`) - See `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime`) - See `queue_oldest_time` + +The next 4 columns have a non-zero value only where there is an active session with ZK. + +- `log_max_index` (`UInt64`) - Maximum entry number in the log of general activity. +- `log_pointer` (`UInt64`) - Maximum entry number in the log of general activity that the replica copied to its execution queue, plus one. If `log_pointer` is much smaller than `log_max_index`, something is wrong. +- `last_queue_update` (`DateTime`) - When the queue was updated last time. +- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has. +- `total_replicas` (`UInt8`) - The total number of known replicas of this table. +- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas). + +If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row. +If you don’t request the last 4 columns (log\_max\_index, log\_pointer, total\_replicas, active\_replicas), the table works quickly. + +For example, you can check that everything is working correctly like this: + +``` sql +SELECT + database, + table, + is_leader, + is_readonly, + is_session_expired, + future_parts, + parts_to_check, + columns_version, + queue_size, + inserts_in_queue, + merges_in_queue, + log_max_index, + log_pointer, + total_replicas, + active_replicas +FROM system.replicas +WHERE + is_readonly + OR is_session_expired + OR future_parts > 20 + OR parts_to_check > 10 + OR queue_size > 20 + OR inserts_in_queue > 10 + OR log_max_index - log_pointer > 10 + OR total_replicas < 2 + OR active_replicas < total_replicas +``` + +If this query doesn’t return anything, it means that everything is fine. + +## system.settings {#system-settings} + +Contains information about settings that are currently in use. +I.e. used for executing the query you are using to read from the system.settings table. + +Columns: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. +- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). +- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). +- `readonly` (UInt8) — Can user change this setting (for more info, look into [constraints](settings/constraints_on_settings.md#constraints-on-settings)). + +Example: + +``` sql +SELECT name, value +FROM system.settings +WHERE changed +``` + +``` text +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ +``` + +## system.merge\_tree\_settings {#system-merge_tree_settings} + +Contains information about settings for `MergeTree` tables. + +Columns: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + +## system.table\_engines {#system-table-engines} + +Contains description of table engines supported by server and their feature support information. + +This table contains the following columns (the column type is shown in brackets): + +- `name` (String) — The name of table engine. +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clause. +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [skipping indices](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](table_engines/replication/). +- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. + +Example: + +``` sql +SELECT * +FROM system.table_engines +WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') +``` + +``` text +┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐ +│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │ +│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │ +│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ +└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ +``` + +**See also** + +- MergeTree family [query clauses](table_engines/mergetree.md#mergetree-query-clauses) +- Kafka [settings](table_engines/kafka.md#table_engine-kafka-creating-a-table) +- Join [settings](table_engines/join.md#join-limitations-and-settings) + +## system.tables {#system-tables} + +Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. + +This table contains the following columns (the column type is shown in brackets): + +- `database` (String) — The name of the database the table is in. +- `name` (String) — Table name. +- `engine` (String) — Table engine name (without parameters). +- `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. +- `data_path` (String) - Path to the table data in the file system. +- `metadata_path` (String) - Path to the table metadata in the file system. +- `metadata_modification_time` (DateTime) - Time of latest modification of the table metadata. +- `dependencies_database` (Array(String)) - Database dependencies. +- `dependencies_table` (Array(String)) - Table dependencies ([MaterializedView](table_engines/materializedview.md) tables based on the current table). +- `create_table_query` (String) - The query that was used to create the table. +- `engine_full` (String) - Parameters of the table engine. +- `partition_key` (String) - The partition key expression specified in the table. +- `sorting_key` (String) - The sorting key expression specified in the table. +- `primary_key` (String) - The primary key expression specified in the table. +- `sampling_key` (String) - The sampling key expression specified in the table. +- `storage_policy` (String) - The storage policy: + + - [MergeTree](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) + - [Distributed](table_engines/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64)) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `Null` (including underying `Buffer` table). +- `total_bytes` (Nullable(UInt64)) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `Null` (**does not** includes any underlying storage). + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - If the table stores data in memory, returns approximated number of used bytes in memory. + +The `system.tables` table is used in `SHOW TABLES` query implementation. + +## system.zookeeper {#system-zookeeper} + +The table does not exist if ZooKeeper is not configured. Allows reading data from the ZooKeeper cluster defined in the config. +The query must have a ‘path’ equality condition in the WHERE clause. This is the path in ZooKeeper for the children that you want to get data for. + +The query `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` outputs data for all children on the `/clickhouse` node. +To output data for all root nodes, write path = ‘/’. +If the path specified in ‘path’ doesn’t exist, an exception will be thrown. + +Columns: + +- `name` (String) — The name of the node. +- `path` (String) — The path to the node. +- `value` (String) — Node value. +- `dataLength` (Int32) — Size of the value. +- `numChildren` (Int32) — Number of descendants. +- `czxid` (Int64) — ID of the transaction that created the node. +- `mzxid` (Int64) — ID of the transaction that last changed the node. +- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. +- `ctime` (DateTime) — Time of node creation. +- `mtime` (DateTime) — Time of the last modification of the node. +- `version` (Int32) — Node version: the number of times the node was changed. +- `cversion` (Int32) — Number of added or removed descendants. +- `aversion` (Int32) — Number of changes to the ACL. +- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. + +Example: + +``` sql +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/tables/01-08/visits/replicas' +FORMAT Vertical +``` + +``` text +Row 1: +────── +name: example01-08-1.yandex.ru +value: +czxid: 932998691229 +mzxid: 932998691229 +ctime: 2015-03-27 16:49:51 +mtime: 2015-03-27 16:49:51 +version: 0 +cversion: 47 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021031383 +path: /clickhouse/tables/01-08/visits/replicas + +Row 2: +────── +name: example01-08-2.yandex.ru +value: +czxid: 933002738135 +mzxid: 933002738135 +ctime: 2015-03-27 16:57:01 +mtime: 2015-03-27 16:57:01 +version: 0 +cversion: 37 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021252247 +path: /clickhouse/tables/01-08/visits/replicas +``` + +## system.mutations {#system_tables-mutations} + +The table contains information about [mutations](../query_language/alter.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns: + +**database**, **table** - The name of the database and table to which the mutation was applied. + +**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table. + +**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`). + +**create\_time** - When this mutation command was submitted for execution. + +**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. + +**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish. + +**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated. + +If there were problems with mutating some parts, the following columns contain additional information: + +**latest\_failed\_part** - The name of the most recent part that could not be mutated. + +**latest\_fail\_time** - The time of the most recent part mutation failure. + +**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure. + +## system.disks {#system_tables-disks} + +Contains information about disks defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +Columns: + +- `name` ([String](../data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([String](../data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration. + +## system.storage\_policies {#system_tables-storage_policies} + +Contains information about storage policies and volumes defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +Columns: + +- `policy_name` ([String](../data_types/string.md)) — Name of the storage policy. +- `volume_name` ([String](../data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([Array(String)](../data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. + +If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table. + +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/zh/operations/table_engines/generate.md b/docs/zh/operations/table_engines/generate.md deleted file mode 120000 index 28cd09533e5..00000000000 --- a/docs/zh/operations/table_engines/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/generate.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/generate.md b/docs/zh/operations/table_engines/generate.md new file mode 100644 index 00000000000..051369d2e1c --- /dev/null +++ b/docs/zh/operations/table_engines/generate.md @@ -0,0 +1,58 @@ +--- +en_copy: true +--- + +# GenerateRandom {#table_engines-generate} + +The GenerateRandom table engine produces random data for given table schema. + +Usage examples: + +- Use in test to populate reproducible large table. +- Generate random input for fuzzing tests. + +## Usage in ClickHouse Server {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +The `max_array_length` and `max_string_length` parameters specify maximum length of all +array columns and strings correspondingly in generated data. + +Generate table engine supports only `SELECT` queries. + +It supports all [DataTypes](../../data_types/index.md) that can be stored in a table except `LowCardinality` and `AggregateFunction`. + +**Example:** + +**1.** Set up the `generate_engine_table` table: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** Query the data: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## Details of Implementation {#details-of-implementation} + +- Not supported: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - Indices + - Replication + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/zh/operations/table_engines/graphitemergetree.md b/docs/zh/operations/table_engines/graphitemergetree.md deleted file mode 120000 index 654425d050a..00000000000 --- a/docs/zh/operations/table_engines/graphitemergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/graphitemergetree.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/graphitemergetree.md b/docs/zh/operations/table_engines/graphitemergetree.md new file mode 100644 index 00000000000..6916441acd0 --- /dev/null +++ b/docs/zh/operations/table_engines/graphitemergetree.md @@ -0,0 +1,171 @@ +--- +en_copy: true +--- + +# GraphiteMergeTree {#graphitemergetree} + +This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. + +You can use any ClickHouse table engine to store the Graphite data if you don’t need rollup, but if you need a rollup use `GraphiteMergeTree`. The engine reduces the volume of storage and increases the efficiency of queries from Graphite. + +The engine inherits properties from [MergeTree](mergetree.md). + +## Creating a Table {#creating-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE = GraphiteMergeTree(config_section) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. + +A table for the Graphite data should have the following columns for the following data: + +- Metric name (Graphite sensor). Data type: `String`. + +- Time of measuring the metric. Data type: `DateTime`. + +- Value of the metric. Data type: any numeric. + +- Version of the metric. Data type: any numeric. + + ClickHouse saves the rows with the highest version or the last written if versions are the same. Other rows are deleted during the merge of data parts. + +The names of these columns should be set in the rollup configuration. + +**GraphiteMergeTree parameters** + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +**Query clauses** + +When creating a `GraphiteMergeTree` table, the same [clauses](mergetree.md#table_engine-mergetree-creating-a-table) are required, as when creating a `MergeTree` table. + +
    + +Deprecated Method for Creating a Table + +!!! attention "Attention" + Do not use this method in new projects and, if possible, switch the old projects to the method described above. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + EventDate Date, + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) +``` + +All of the parameters excepting `config_section` have the same meaning as in `MergeTree`. + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +
    + +## Rollup configuration {#rollup-configuration} + +The settings for rollup are defined by the [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) parameter in the server configuration. The name of the parameter could be any. You can create several configurations and use them for different tables. + +Rollup configuration structure: + + required-columns + patterns + +### Required Columns {#required-columns} + +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Default value: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. + +### Patterns {#patterns} + +Structure of the `patterns` section: + +``` text +pattern + regexp + function +pattern + regexp + age + precision + ... +pattern + regexp + function + age + precision + ... +pattern + ... +default + function + age + precision + ... +``` + +!!! warning "Attention" + Patterns must be strictly ordered: + + 1. Patterns without `function` or `retention`. + 1. Patterns with both `function` and `retention`. + 1. Pattern `default`. + +When processing a row, ClickHouse checks the rules in the `pattern` sections. Each of `pattern` (including `default`) sections can contain `function` parameter for aggregation, `retention` parameters or both. If the metric name matches the `regexp`, the rules from the `pattern` section (or sections) are applied; otherwise, the rules from the `default` section are used. + +Fields for `pattern` and `default` sections: + +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. + +### Configuration Example {#configuration-example} + +``` xml + + Version + + click_cost + any + + 0 + 5 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/zh/operations/table_engines/hdfs.md b/docs/zh/operations/table_engines/hdfs.md deleted file mode 120000 index d4dbfa46e68..00000000000 --- a/docs/zh/operations/table_engines/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/hdfs.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/hdfs.md b/docs/zh/operations/table_engines/hdfs.md new file mode 100644 index 00000000000..07bd0800aa5 --- /dev/null +++ b/docs/zh/operations/table_engines/hdfs.md @@ -0,0 +1,120 @@ +--- +en_copy: true +--- + +# HDFS {#table_engines-hdfs} + +This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. This engine is similar +to the [File](file.md) and [URL](url.md) engines, but provides Hadoop-specific features. + +## Usage {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +The `URI` parameter is the whole file URI in HDFS. +The `format` parameter specifies one of the available file formats. To perform +`SELECT` queries, the format must be supported for input, and to perform +`INSERT` queries – for output. The available formats are listed in the +[Formats](../../interfaces/formats.md#formats) section. +The path part of `URI` may contain globs. In this case the table would be readonly. + +**Example:** + +**1.** Set up the `hdfs_engine_table` table: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** Fill file: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** Query the data: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## Implementation Details {#implementation-details} + +- Reads and writes can be parallel +- Not supported: + - `ALTER` and `SELECT...SAMPLE` operations. + - Indexes. + - Replication. + +**Globs in path** + +Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern. Listing of files determines during `SELECT` (not at `CREATE` moment). + +- `*` — Substitutes any number of any characters except `/` including empty string. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +Constructions with `{}` are similar to the [remote](../../query_language/table_functions/remote.md) table function. + +**Example** + +1. Suppose we have several files in TSV format with the following URIs on HDFS: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. There are several ways to make a table consisting of all six files: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +Another way: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +Table consists of all the files in both directories (all files should satisfy format and schema described in query): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "Warning" + If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Create table with files named `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**See Also** + +- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/zh/operations/table_engines/jdbc.md b/docs/zh/operations/table_engines/jdbc.md deleted file mode 120000 index 5165d704b9a..00000000000 --- a/docs/zh/operations/table_engines/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/jdbc.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/jdbc.md b/docs/zh/operations/table_engines/jdbc.md new file mode 100644 index 00000000000..576c7182907 --- /dev/null +++ b/docs/zh/operations/table_engines/jdbc.md @@ -0,0 +1,87 @@ +--- +en_copy: true +--- + +# JDBC {#table-engine-jdbc} + +Allows ClickHouse to connect to external databases via [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). + +To implement the JDBC connection, ClickHouse uses the separate program [clickhouse-jdbc-bridge](https://github.com/alex-krash/clickhouse-jdbc-bridge) that should run as a daemon. + +This engine supports the [Nullable](../../data_types/nullable.md) data type. + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + columns list... +) +ENGINE = JDBC(dbms_uri, external_database, external_table) +``` + +**Engine Parameters** + +- `dbms_uri` — URI of an external DBMS. + + Format: `jdbc:://:/?user=&password=`. + Example for MySQL: `jdbc:mysql://localhost:3306/?user=root&password=root`. + +- `external_database` — Database in an external DBMS. + +- `external_table` — Name of the table in `external_database`. + +## Usage Example {#usage-example} + +Creating a table in MySQL server by connecting directly with it’s console client: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++--------+--------------+-------+----------------+ +| int_id | int_nullable | float | float_nullable | ++--------+--------------+-------+----------------+ +| 1 | NULL | 2 | NULL | ++--------+--------------+-------+----------------+ +1 row in set (0,00 sec) +``` + +Creating a table in ClickHouse server and selecting data from it: + +``` sql +CREATE TABLE jdbc_table +( + `int_id` Int32, + `int_nullable` Nullable(Int32), + `float` Float32, + `float_nullable` Nullable(Float32) +) +ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') +``` + +``` sql +SELECT * +FROM jdbc_table +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## See Also {#see-also} + +- [JDBC table function](../../query_language/table_functions/jdbc.md). + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/zh/operations/table_engines/odbc.md b/docs/zh/operations/table_engines/odbc.md deleted file mode 120000 index 06091fd5377..00000000000 --- a/docs/zh/operations/table_engines/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/odbc.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/odbc.md b/docs/zh/operations/table_engines/odbc.md new file mode 100644 index 00000000000..69003623e0a --- /dev/null +++ b/docs/zh/operations/table_engines/odbc.md @@ -0,0 +1,129 @@ +--- +en_copy: true +--- + +# ODBC {#table-engine-odbc} + +Allows ClickHouse to connect to external databases via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. + +This engine supports the [Nullable](../../data_types/nullable.md) data type. + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. + +The table structure can differ from the source table structure: + +- Column names should be the same as in the source table, but you can use just some of these columns and in any order. +- Column types may differ from those in the source table. ClickHouse tries to [cast](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) values to the ClickHouse data types. + +**Engine Parameters** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## Usage Example {#usage-example} + +**Retrieving data from the local MySQL installation via ODBC** + +This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. + +Ensure that unixODBC and MySQL Connector are installed. + +By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus, you need to create and configure this user in the MySQL server. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +Then configure the connection in `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +You can check the connection using the `isql` utility from the unixODBC installation. + +``` bash +$ isql -v mysqlconn ++---------------------------------------+ +| Connected! | +| | +... +``` + +Table in MySQL: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++--------+--------------+-------+----------------+ +| int_id | int_nullable | float | float_nullable | ++--------+--------------+-------+----------------+ +| 1 | NULL | 2 | NULL | ++--------+--------------+-------+----------------+ +1 row in set (0,00 sec) +``` + +Table in ClickHouse, retrieving data from the MySQL table: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## See Also {#see-also} + +- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC table function](../../query_language/table_functions/odbc.md) + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md b/docs/zh/operations/table_engines/versionedcollapsingmergetree.md deleted file mode 120000 index 5843fba70b8..00000000000 --- a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/table_engines/versionedcollapsingmergetree.md \ No newline at end of file diff --git a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md b/docs/zh/operations/table_engines/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..29f6d44d748 --- /dev/null +++ b/docs/zh/operations/table_engines/versionedcollapsingmergetree.md @@ -0,0 +1,235 @@ +--- +en_copy: true +--- + +# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} + +This engine: + +- Allows quick writing of object states that are continually changing. +- Deletes old object states in the background. This significantly reduces the volume of storage. + +See the section [Collapsing](#table_engines_versionedcollapsingmergetree) for details. + +The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree) and adds the logic for collapsing rows to the algorithm for merging data parts. `VersionedCollapsingMergeTree` serves the same purpose as [CollapsingMergeTree](collapsingmergetree.md) but uses a different collapsing algorithm that allows inserting the data in any order with multiple threads. In particular, the `Version` column helps to collapse the rows properly even if they are inserted in the wrong order. In contrast, `CollapsingMergeTree` allows only strictly consecutive insertion. + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +For a description of query parameters, see the [query description](../../query_language/create.md). + +**Engine Parameters** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. + + The column data type should be `Int8`. + +- `version` — Name of the column with the version of the object state. + + The column data type should be `UInt*`. + +**Query Clauses** + +When creating a `VersionedCollapsingMergeTree` table, the same [clauses](mergetree.md) are required as when creating a `MergeTree` table. + +
    + +Deprecated Method for Creating a Table + +!!! attention "Attention" + Do not use this method in new projects. If possible, switch the old projects to the method described above. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +All of the parameters except `sign` and `version` have the same meaning as in `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + The column data type should be `UInt*`. + +
    + +## Collapsing {#table_engines-versionedcollapsingmergetree} + +### Data {#data} + +Consider a situation where you need to save continually changing data for some object. It is reasonable to have one row for an object and update the row whenever there are changes. However, the update operation is expensive and slow for a DBMS because it requires rewriting the data in the storage. Update is not acceptable if you need to write data quickly, but you can write the changes to an object sequentially as follows. + +Use the `Sign` column when writing the row. If `Sign = 1` it means that the row is a state of an object (let’s call it the “state” row). If `Sign = -1` it indicates the cancellation of the state of an object with the same attributes (let’s call it the “cancel” row). Also use the `Version` column, which should identify each state of an object with a separate number. + +For example, we want to calculate how many pages users visited on some site and how long they were there. At some point in time we write the following row with the state of user activity: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +At some point later we register the change of user activity and write it with the following two rows. + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +The first row cancels the previous state of the object (user). It should copy all of the fields of the canceled state except `Sign`. + +The second row contains the current state. + +Because we need only the last state of user activity, the rows + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +can be deleted, collapsing the invalid (old) state of the object. `VersionedCollapsingMergeTree` does this while merging the data parts. + +To find out why we need two rows for each change, see [Algorithm](#table_engines-versionedcollapsingmergetree-algorithm). + +**Notes on Usage** + +1. The program that writes the data should remember the state of an object in order to cancel it. The “cancel” string should be a copy of the “state” string with the opposite `Sign`. This increases the initial size of storage but allows to write the data quickly. +2. Long growing arrays in columns reduce the efficiency of the engine due to the load for writing. The more straightforward the data, the better the efficiency. +3. `SELECT` results depend strongly on the consistency of the history of object changes. Be accurate when preparing data for inserting. You can get unpredictable results with inconsistent data, such as negative values for non-negative metrics like session depth. + +### Algorithm {#table_engines-versionedcollapsingmergetree-algorithm} + +When ClickHouse merges data parts, it deletes each pair of rows that have the same primary key and version and different `Sign`. The order of rows does not matter. + +When ClickHouse inserts data, it orders rows by the primary key. If the `Version` column is not in the primary key, ClickHouse adds it to the primary key implicitly as the last field and uses it for ordering. + +## Selecting Data {#selecting-data} + +ClickHouse doesn’t guarantee that all of the rows with the same primary key will be in the same resulting data part or even on the same physical server. This is true both for writing the data and for subsequent merging of the data parts. In addition, ClickHouse processes `SELECT` queries with multiple threads, and it cannot predict the order of rows in the result. This means that aggregation is required if there is a need to get completely “collapsed” data from a `VersionedCollapsingMergeTree` table. + +To finalize collapsing, write a query with a `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and add `HAVING sum(Sign) > 0`. + +The aggregates `count`, `sum` and `avg` can be calculated this way. The aggregate `uniq` can be calculated if an object has at least one non-collapsed state. The aggregates `min` and `max` can’t be calculated because `VersionedCollapsingMergeTree` does not save the history of values of collapsed states. + +If you need to extract the data with “collapsing” but without aggregation (for example, to check whether rows are present whose newest values match certain conditions), you can use the `FINAL` modifier for the `FROM` clause. This approach is inefficient and should not be used with large tables. + +## Example of Use {#example-of-use} + +Example data: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +Creating the table: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +Inserting the data: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +We use two `INSERT` queries to create two different data parts. If we insert the data with a single query, ClickHouse creates one data part and will never perform any merge. + +Getting the data: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +What do we see here and where are the collapsed parts? +We created two data parts using two `INSERT` queries. The `SELECT` query was performed in two threads, and the result is a random order of rows. +Collapsing did not occur because the data parts have not been merged yet. ClickHouse merges data parts at an unknown point in time which we cannot predict. + +This is why we need aggregation: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +If we don’t need aggregation and want to force collapsing, we can use the `FINAL` modifier for the `FROM` clause. + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +This is a very inefficient way to select data. Don’t use it for large tables. + +[Original article](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md deleted file mode 120000 index 84f0ff34f41..00000000000 --- a/docs/zh/operations/troubleshooting.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/troubleshooting.md \ No newline at end of file diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md new file mode 100644 index 00000000000..d48e2b4b7f6 --- /dev/null +++ b/docs/zh/operations/troubleshooting.md @@ -0,0 +1,143 @@ +--- +en_copy: true +--- + +# Troubleshooting {#troubleshooting} + +- [Installation](#troubleshooting-installation-errors) +- [Connecting to the server](#troubleshooting-accepts-no-connections) +- [Query processing](#troubleshooting-does-not-process-queries) +- [Efficiency of query processing](#troubleshooting-too-slow) + +## Installation {#troubleshooting-installation-errors} + +### You Cannot Get Deb Packages from ClickHouse Repository With apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} + +- Check firewall settings. +- If you cannot access the repository for any reason, download packages as described in the [Getting started](../getting_started/index.md) article and install them manually using the `sudo dpkg -i ` command. You will also need the `tzdata` package. + +## Connecting to the Server {#troubleshooting-accepts-no-connections} + +Possible issues: + +- The server is not running. +- Unexpected or wrong configuration parameters. + +### Server Is Not Running {#server-is-not-running} + +**Check if server is runnnig** + +Command: + +``` bash +$ sudo service clickhouse-server status +``` + +If the server is not running, start it with the command: + +``` bash +$ sudo service clickhouse-server start +``` + +**Check logs** + +The main log of `clickhouse-server` is in `/var/log/clickhouse-server/clickhouse-server.log` by default. + +If the server started successfully, you should see the strings: + +- ` Application: starting up.` — Server started. +- ` Application: Ready for connections.` — Server is running and ready for connections. + +If `clickhouse-server` start failed with a configuration error, you should see the `` string with an error description. For example: + +``` text +2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused +``` + +If you don’t see an error at the end of the file, look through the entire file starting from the string: + +``` text + Application: starting up. +``` + +If you try to start a second instance of `clickhouse-server` on the server, you see the following log: + +``` text +2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 +2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up +2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: +PID: 8510 +Started at: 2019-01-11 15:24:23 +Revision: 54413 + +2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. +2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down +2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem +2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread +``` + +**See system.d logs** + +If you don’t find any useful information in `clickhouse-server` logs or there aren’t any logs, you can view `system.d` logs using the command: + +``` bash +$ sudo journalctl -u clickhouse-server +``` + +**Start clickhouse-server in interactive mode** + +``` bash +$ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml +``` + +This command starts the server as an interactive app with standard parameters of the autostart script. In this mode `clickhouse-server` prints all the event messages in the console. + +### Configuration Parameters {#configuration-parameters} + +Check: + +- Docker settings. + + If you run ClickHouse in Docker in an IPv6 network, make sure that `network=host` is set. + +- Endpoint settings. + + Check [listen\_host](server_settings/settings.md#server_settings-listen_host) and [tcp\_port](server_settings/settings.md#server_settings-tcp_port) settings. + + ClickHouse server accepts localhost connections only by default. + +- HTTP protocol settings. + + Check protocol settings for the HTTP API. + +- Secure connection settings. + + Check: + + - The [tcp\_port\_secure](server_settings/settings.md#server_settings-tcp_port_secure) setting. + - Settings for [SSL sertificates](server_settings/settings.md#server_settings-openssl). + + Use proper parameters while connecting. For example, use the `port_secure` parameter with `clickhouse_client`. + +- User settings. + + You might be using the wrong user name or password. + +## Query Processing {#troubleshooting-does-not-process-queries} + +If ClickHouse is not able to process the query, it sends an error description to the client. In the `clickhouse-client` you get a description of the error in the console. If you are using the HTTP interface, ClickHouse sends the error description in the response body. For example: + +``` bash +$ curl 'http://localhost:8123/' --data-binary "SELECT a" +Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception +``` + +If you start `clickhouse-client` with the `stack-trace` parameter, ClickHouse returns the server stack trace with the description of an error. + +You might see a message about a broken connection. In this case, you can repeat the query. If the connection breaks every time you perform the query, check the server logs for errors. + +## Efficiency of Query Processing {#troubleshooting-too-slow} + +If you see that ClickHouse is working too slowly, you need to profile the load on the server resources and network for your queries. + +You can use the clickhouse-benchmark utility to profile queries. It shows the number of queries processed per second, the number of rows processed per second, and percentiles of query processing times. diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md deleted file mode 120000 index 88a092c0dff..00000000000 --- a/docs/zh/operations/update.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/update.md \ No newline at end of file diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md new file mode 100644 index 00000000000..b09eb707e77 --- /dev/null +++ b/docs/zh/operations/update.md @@ -0,0 +1,17 @@ +--- +en_copy: true +--- + +# ClickHouse Update {#clickhouse-update} + +If ClickHouse was installed from deb packages, execute the following commands on the server: + +``` bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-client clickhouse-server +$ sudo service clickhouse-server restart +``` + +If you installed ClickHouse using something other than the recommended deb packages, use the appropriate update method. + +ClickHouse does not support a distributed update. The operation should be performed consecutively on each separate server. Do not update all the servers on a cluster simultaneously, or the cluster will be unavailable for some time. diff --git a/docs/zh/operations/utils/clickhouse-benchmark.md b/docs/zh/operations/utils/clickhouse-benchmark.md deleted file mode 120000 index 133b4d2e511..00000000000 --- a/docs/zh/operations/utils/clickhouse-benchmark.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utils/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/zh/operations/utils/clickhouse-benchmark.md b/docs/zh/operations/utils/clickhouse-benchmark.md new file mode 100644 index 00000000000..1d8ac3dec46 --- /dev/null +++ b/docs/zh/operations/utils/clickhouse-benchmark.md @@ -0,0 +1,153 @@ +--- +en_copy: true +--- + +# clickhouse-benchmark {#clickhouse-benchmark} + +Connects to a ClickHouse server and repeatedly sends specified queries. + +Syntax: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +or + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +If you want to send a set of queries, create a text file and place each query on the individual string in this file. For example: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +Then pass this file to a standard input of `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## Keys {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1. +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys. +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-p` keys. +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` stops sending queries when the specified time limit is reached. Default value: 0 (time limit disabled). +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) test to determine whether the two distributions aren’t different with the selected level of confidence. +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file. +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions. +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`. +- `--help` — Shows the help message. + +If you want to apply some [settings](../../operations/settings/index.md) for queries, pass them as a key `--= SETTING_VALUE`. For example, `--max_memory_usage=1048576`. + +## Output {#clickhouse-benchmark-output} + +By default, `clickhouse-benchmark` reports for each `--delay` interval. + +Example of the report: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +In the report you can find: + +- Number of queries in the `Queries executed:` field. + +- Status string containing (in order): + + - Endpoint of ClickHouse server. + - Number of processed queries. + - QPS: QPS: How many queries server performed per second during a period specified in the `--delay` argument. + - RPS: How many rows server read per second during a period specified in the `--delay` argument. + - MiB/s: How many mebibytes server read per second during a period specified in the `--delay` argument. + - result RPS: How many rows placed by server to the result of a query per second during a period specified in the `--delay` argument. + - result MiB/s. How many mebibytes placed by server to the result of a query per second during a period specified in the `--delay` argument. + +- Percentiles of queries execution time. + +## Comparison mode {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` can compare performances for two running ClickHouse servers. + +To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately. + +## Example {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/zh/query_language/agg_functions/combinators.md b/docs/zh/query_language/agg_functions/combinators.md deleted file mode 120000 index 2b914cebd15..00000000000 --- a/docs/zh/query_language/agg_functions/combinators.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/combinators.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/combinators.md b/docs/zh/query_language/agg_functions/combinators.md new file mode 100644 index 00000000000..a173e56fbea --- /dev/null +++ b/docs/zh/query_language/agg_functions/combinators.md @@ -0,0 +1,163 @@ +--- +en_copy: true +--- + +# Aggregate function combinators {#aggregate_functions_combinators} + +The name of an aggregate function can have a suffix appended to it. This changes the way the aggregate function works. + +## -If {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` and so on. + +With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, in Yandex.Metrica, conditional aggregate functions are used to implement the segment comparison functionality. + +## -Array {#agg-functions-combinator-array} + +The -Array suffix can be appended to any aggregate function. In this case, the aggregate function takes arguments of the ‘Array(T)’ type (arrays) instead of ‘T’ type arguments. If the aggregate function accepts multiple arguments, this must be arrays of equal lengths. When processing arrays, the aggregate function works like the original aggregate function across all array elements. + +Example 1: `sumArray(arr)` - Totals all the elements of all ‘arr’ arrays. In this example, it could have been written more simply: `sum(arraySum(arr))`. + +Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ arrays. This could be done an easier way: `uniq(arrayJoin(arr))`, but it’s not always possible to add ‘arrayJoin’ to a query. + +-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array. + +## -State {#agg-functions-combinator-state} + +If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](reference.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. + +To work with these states, use: + +- [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) table engine. +- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) function. +- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) function. +- [-Merge](#aggregate_functions_combinators_merge) combinator. +- [-MergeState](#aggregate_functions_combinators_mergestate) combinator. + +## -Merge {#aggregate_functions_combinators-merge} + +If you apply this combinator, the aggregate function takes the intermediate aggregation state as an argument, combines the states to finish aggregation, and returns the resulting value. + +## -MergeState {#aggregate_functions_combinators-mergestate} + +Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it doesn’t return the resulting value, but an intermediate aggregation state, similar to the -State combinator. + +## -ForEach {#agg-functions-combinator-foreach} + +Converts an aggregate function for tables into an aggregate function for arrays that aggregates the corresponding array items and returns an array of results. For example, `sumForEach` for the arrays `[1, 2]`, `[3, 4, 5]`and`[6, 7]`returns the result `[10, 13, 5]` after adding together the corresponding array items. + +## -OrDefault {#agg-functions-combinator-ordefault} + +Fills the default value of the aggregate function’s return type if there is nothing to aggregate. + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## -OrNull {#agg-functions-combinator-ornull} + +Fills `null` if there is nothing to aggregate. The return column will be nullable. + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +-OrDefault and -OrNull can be combined with other combinators. It is useful when the aggregate function does not accept the empty input. + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## -Resample {#agg-functions-combinator-resample} + +Lets you divide data into groups, and then separately aggregates the data in those groups. Groups are created by splitting the values from one column into intervals. + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**Parameters** + +- `start` — Starting value of the whole required interval for `resampling_key` values. +- `stop` — Ending value of the whole required interval for `resampling_key` values. The whole interval doesn’t include the `stop` value `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` is executed over each of those subintervals independently. +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` parameters. + +**Returned values** + +- Array of `aggFunction` results for each subinterval. + +**Example** + +Consider the `people` table with the following data: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +Let’s get the names of the people whose age lies in the intervals of `[30,60)` and `[60,75)`. Since we use integer representation for age, we get ages in the `[30, 59]` and `[60,74]` intervals. + +To aggregate names in an array, we use the [groupArray](reference.md#agg_function-grouparray) aggregate function. It takes one argument. In our case, it’s the `name` column. The `groupArrayResample` function should use the `age` column to aggregate names by age. To define the required intervals, we pass the `30, 75, 30` arguments into the `groupArrayResample` function. + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +Consider the results. + +`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals. + +Now let’s count the total number of people and their average wage in the specified age intervals. + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/zh/query_language/agg_functions/index.md b/docs/zh/query_language/agg_functions/index.md deleted file mode 120000 index 2fcf67abdeb..00000000000 --- a/docs/zh/query_language/agg_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/index.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/index.md b/docs/zh/query_language/agg_functions/index.md new file mode 100644 index 00000000000..c439ddb1e6a --- /dev/null +++ b/docs/zh/query_language/agg_functions/index.md @@ -0,0 +1,58 @@ +--- +en_copy: true +--- + +# Aggregate functions {#aggregate-functions} + +Aggregate functions work in the [normal](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) way as expected by database experts. + +ClickHouse also supports: + +- [Parametric aggregate functions](parametric_functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns. +- [Combinators](combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions. + +## NULL processing {#null-processing} + +During aggregation, all `NULL`s are skipped. + +**Examples:** + +Consider this table: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +Let’s say you need to total the values in the `y` column: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`. + +Now you can use the `groupArray` function to create an array from the `y` column: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` does not include `NULL` in the resulting array. + +[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/zh/query_language/agg_functions/parametric_functions.md b/docs/zh/query_language/agg_functions/parametric_functions.md deleted file mode 120000 index fd3ffafcc5b..00000000000 --- a/docs/zh/query_language/agg_functions/parametric_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/parametric_functions.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/parametric_functions.md b/docs/zh/query_language/agg_functions/parametric_functions.md new file mode 100644 index 00000000000..d4e29feff0e --- /dev/null +++ b/docs/zh/query_language/agg_functions/parametric_functions.md @@ -0,0 +1,496 @@ +--- +en_copy: true +--- + +# Parametric aggregate functions {#aggregate_functions_parametric} + +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. + +## histogram {#histogram} + +Calculates an adaptive histogram. It doesn’t guarantee precise results. + +``` sql +histogram(number_of_bins)(values) +``` + +The functions uses [A Streaming Parallel Decision Tree Algorithm](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). The borders of histogram bins are adjusted as new data enters a function. In common case, the widths of bins are not equal. + +**Parameters** + +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. +`values` — [Expression](../syntax.md#syntax-expressions) resulting in input values. + +**Returned values** + +- [Array](../../data_types/array.md) of [Tuples](../../data_types/tuple.md) of the following format: + + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. + +**Example** + +``` sql +SELECT histogram(5)(number + 1) +FROM ( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ +│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +You can visualize a histogram with the [bar](../functions/other_functions.md#function-bar) function, for example: + +``` sql +WITH histogram(5)(rand() % 100) AS hist +SELECT + arrayJoin(hist).3 AS height, + bar(height, 0, 6, 5) AS bar +FROM +( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─height─┬─bar───┐ +│ 2.125 │ █▋ │ +│ 3.25 │ ██▌ │ +│ 5.625 │ ████▏ │ +│ 5.625 │ ████▏ │ +│ 3.375 │ ██▌ │ +└────────┴───────┘ +``` + +In this case, you should remember that you don’t know the histogram bin borders. + +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} + +Checks whether the sequence contains an event chain that matches the pattern. + +``` sql +sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +``` + +!!! warning "Warning" + Events that occur at the same second may lay in the sequence in an undefined order affecting the result. + +**Parameters** + +- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. + +**Returned values** + +- 1, if the pattern is matched. +- 0, if the pattern isn’t matched. + +Type: `UInt8`. + + +**Pattern syntax** + +- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. + +- `.*` — Matches any number of events. You don’t need conditional arguments to match this element of the pattern. + +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=` operators. + +**Examples** + +Consider data in the `t` table: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +└──────┴────────┘ +``` + +Perform the query: + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 1 │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +The function found the event chain where number 2 follows number 1. It skipped number 3 between them, because the number is not described as an event. If we want to take this number into account when searching for the event chain given in the example, we should make a condition for it. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ +│ 0 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +In this case, the function couldn’t find the event chain matching the pattern, because the event for number 3 occured between 1 and 2. If in the same case we checked the condition for number 4, the sequence would match the pattern. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ +│ 1 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**See Also** + +- [sequenceCount](#function-sequencecount) + +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} + +Counts the number of event chains that matched the pattern. The function searches event chains that don’t overlap. It starts to search for the next chain after the current chain is matched. + +!!! warning "Warning" + Events that occur at the same second may lay in the sequence in an undefined order affecting the result. + +``` sql +sequenceCount(pattern)(timestamp, cond1, cond2, ...) +``` + +**Parameters** + +- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. + +**Returned values** + +- Number of non-overlapping event chains that are matched. + +Type: `UInt64`. + +**Example** + +Consider data in the `t` table: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +│ 4 │ 1 │ +│ 5 │ 3 │ +│ 6 │ 2 │ +└──────┴────────┘ +``` + +Count how many times the number 2 occurs after the number 1 with any amount of other numbers between them: + +``` sql +SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 2 │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +**See Also** + +- [sequenceMatch](#function-sequencematch) + +## windowFunnel {#windowfunnel} + +Searches for event chains in a sliding time window and calculates the maximum number of events that occurred from the chain. + +The function works according to the algorithm: + +- The function searches for data that triggers the first condition in the chain and sets the event counter to 1. This is the moment when the sliding window starts. + +- If events from the chain occur sequentially within the window, the counter is incremented. If the sequence of events is disrupted, the counter isn’t incremented. + +- If the data has multiple event chains at varying points of completion, the function will only output the size of the longest chain. + +**Syntax** + +``` sql +windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) +``` + +**Parameters** + +- `window` — Length of the sliding window in seconds. +- `mode` - It is an optional argument. + - `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. +- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md#data_type-datetime) and other unsigned integer types (note that even though timestamp supports the `UInt64` type, it’s value can’t exceed the Int64 maximum, which is 2^63 - 1). +- `cond` — Conditions or data describing the chain of events. [UInt8](../../data_types/int_uint.md). + +**Returned value** + +The maximum number of consecutive triggered conditions from the chain within the sliding time window. +All the chains in the selection are analyzed. + +Type: `Integer`. + +**Example** + +Determine if a set period of time is enough for the user to select a phone and purchase it twice in the online store. + +Set the following chain of events: + +1. The user logged in to their account on the store (`eventID = 1003`). +2. The user searches for a phone (`eventID = 1007, product = 'phone'`). +3. The user placed an order (`eventID = 1009`). +4. The user made the order again (`eventID = 1010`). + +Input table: + +``` text +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +``` + +Find out how far the user `user_id` could get through the chain in a period in January-February of 2019. + +Query: + +``` sql +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level + FROM trend + WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') + GROUP BY user_id +) +GROUP BY level +ORDER BY level ASC +``` + +Result: + +``` text +┌─level─┬─c─┐ +│ 4 │ 1 │ +└───────┴───┘ +``` + +## retention {#retention} + +The function takes as arguments a set of conditions from 1 to 32 arguments of type `UInt8` that indicate whether a certain condition was met for the event. +Any condition can be specified as an argument (as in [WHERE](../../query_language/select.md#select-where)). + +The conditions, except the first, apply in pairs: the result of the second will be true if the first and second are true, of the third if the first and fird are true, etc. + +**Syntax** + +``` sql +retention(cond1, cond2, ..., cond32); +``` + +**Parameters** + +- `cond` — an expression that returns a `UInt8` result (1 or 0). + +**Returned value** + +The array of 1 or 0. + +- 1 — condition was met for the event. +- 0 — condition wasn’t met for the event. + +Type: `UInt8`. + +**Example** + +Let’s consider an example of calculating the `retention` function to determine site traffic. + +**1.** Сreate a table to illustrate an example. + +``` sql +CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; + +INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); +INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); +INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); +``` + +Input table: + +Query: + +``` sql +SELECT * FROM retention_test +``` + +Result: + +``` text +┌───────date─┬─uid─┐ +│ 2020-01-01 │ 0 │ +│ 2020-01-01 │ 1 │ +│ 2020-01-01 │ 2 │ +│ 2020-01-01 │ 3 │ +│ 2020-01-01 │ 4 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-02 │ 0 │ +│ 2020-01-02 │ 1 │ +│ 2020-01-02 │ 2 │ +│ 2020-01-02 │ 3 │ +│ 2020-01-02 │ 4 │ +│ 2020-01-02 │ 5 │ +│ 2020-01-02 │ 6 │ +│ 2020-01-02 │ 7 │ +│ 2020-01-02 │ 8 │ +│ 2020-01-02 │ 9 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-03 │ 0 │ +│ 2020-01-03 │ 1 │ +│ 2020-01-03 │ 2 │ +│ 2020-01-03 │ 3 │ +│ 2020-01-03 │ 4 │ +│ 2020-01-03 │ 5 │ +│ 2020-01-03 │ 6 │ +│ 2020-01-03 │ 7 │ +│ 2020-01-03 │ 8 │ +│ 2020-01-03 │ 9 │ +│ 2020-01-03 │ 10 │ +│ 2020-01-03 │ 11 │ +│ 2020-01-03 │ 12 │ +│ 2020-01-03 │ 13 │ +│ 2020-01-03 │ 14 │ +└────────────┴─────┘ +``` + +**2.** Group users by unique ID `uid` using the `retention` function. + +Query: + +``` sql +SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r +FROM retention_test +WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') +GROUP BY uid +ORDER BY uid ASC +``` + +Result: + +``` text +┌─uid─┬─r───────┐ +│ 0 │ [1,1,1] │ +│ 1 │ [1,1,1] │ +│ 2 │ [1,1,1] │ +│ 3 │ [1,1,1] │ +│ 4 │ [1,1,1] │ +│ 5 │ [0,0,0] │ +│ 6 │ [0,0,0] │ +│ 7 │ [0,0,0] │ +│ 8 │ [0,0,0] │ +│ 9 │ [0,0,0] │ +│ 10 │ [0,0,0] │ +│ 11 │ [0,0,0] │ +│ 12 │ [0,0,0] │ +│ 13 │ [0,0,0] │ +│ 14 │ [0,0,0] │ +└─────┴─────────┘ +``` + +**3.** Calculate the total number of site visits per day. + +Query: + +``` sql +SELECT + sum(r[1]) AS r1, + sum(r[2]) AS r2, + sum(r[3]) AS r3 +FROM +( + SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r + FROM retention_test + WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') + GROUP BY uid +) +``` + +Result: + +``` text +┌─r1─┬─r2─┬─r3─┐ +│ 5 │ 5 │ 5 │ +└────┴────┴────┘ +``` + +Where: + +- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition). +- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions). +- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions). + +## uniqUpTo(N)(x) {#uniquptonx} + +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. + +Recommended for use with small Ns, up to 10. The maximum value of N is 100. + +For the state of an aggregate function, it uses the amount of memory equal to 1 + N \* the size of one value of bytes. +For strings, it stores a non-cryptographic hash of 8 bytes. That is, the calculation is approximated for strings. + +The function also works for several arguments. + +It works as fast as possible, except for cases when a large N value is used and the number of unique values is slightly less than N. + +Usage example: + +``` text +Problem: Generate a report that shows only keywords that produced at least 5 unique users. +Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) + +## sumMapFiltered(keys\_to\_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values} + +Same behavior as [sumMap](reference.md#agg_functions-summap) except that an array of keys is passed as a parameter. This can be especially useful when working with a high cardinality of keys. diff --git a/docs/zh/query_language/agg_functions/reference.md b/docs/zh/query_language/agg_functions/reference.md deleted file mode 120000 index c5651cb0793..00000000000 --- a/docs/zh/query_language/agg_functions/reference.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/agg_functions/reference.md \ No newline at end of file diff --git a/docs/zh/query_language/agg_functions/reference.md b/docs/zh/query_language/agg_functions/reference.md new file mode 100644 index 00000000000..31de8bf1226 --- /dev/null +++ b/docs/zh/query_language/agg_functions/reference.md @@ -0,0 +1,1834 @@ +--- +en_copy: true +--- + +# Function Reference {#function-reference} + +## count {#agg_function-count} + +Counts the number of rows or not-NULL values. + +ClickHouse supports the following syntaxes for `count`: +- `count(expr)` or `COUNT(DISTINCT expr)`. +- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific. + +**Parameters** + +The function can take: + +- Zero parameters. +- One [expression](../syntax.md#syntax-expressions). + +**Returned value** + +- If the function is called without parameters it counts the number of rows. +- If the [expression](../syntax.md#syntax-expressions) is passed, then the function counts how many times this expression returned not null. If the expression returns a [Nullable](../../data_types/nullable.md)-type value, then the result of `count` stays not `Nullable`. The function returns 0 if the expression returned `NULL` for all the rows. + +In both cases the type of the returned value is [UInt64](../../data_types/int_uint.md). + +**Details** + +ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](#agg_function-uniqexact) function. + +The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. + +**Examples** + +Example 1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +Example 2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +This example shows that `count(DISTINCT num)` is performed by the `uniqExact` function according to the `count_distinct_implementation` setting value. + +## any(x) {#agg_function-any} + +Selects the first encountered value. +The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate. +To get a determinate result, you can use the ‘min’ or ‘max’ function instead of ‘any’. + +In some cases, you can rely on the order of execution. This applies to cases when SELECT comes from a subquery that uses ORDER BY. + +When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function. + +## anyHeavy(x) {#anyheavyx} + +Selects a frequently occurring value using the [heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) algorithm. If there is a value that occurs more than in half the cases in each of the query’s execution threads, this value is returned. Normally, the result is nondeterministic. + +``` sql +anyHeavy(column) +``` + +**Arguments** + +- `column` – The column name. + +**Example** + +Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select any frequently occurring value in the `AirlineID` column. + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## anyLast(x) {#anylastx} + +Selects the last value encountered. +The result is just as indeterminate as for the `any` function. + +## groupBitAnd {#groupbitand} + +Applies bitwise `AND` for series of numbers. + +``` sql +groupBitAnd(expr) +``` + +**Parameters** + +`expr` – An expression that results in `UInt*` type. + +**Return value** + +Value of the `UInt*` type. + +**Example** + +Test data: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Query: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +Where `num` is the column with the test data. + +Result: + +``` text +binary decimal +00000100 = 4 +``` + +## groupBitOr {#groupbitor} + +Applies bitwise `OR` for series of numbers. + +``` sql +groupBitOr(expr) +``` + +**Parameters** + +`expr` – An expression that results in `UInt*` type. + +**Return value** + +Value of the `UInt*` type. + +**Example** + +Test data: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Query: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +Where `num` is the column with the test data. + +Result: + +``` text +binary decimal +01111101 = 125 +``` + +## groupBitXor {#groupbitxor} + +Applies bitwise `XOR` for series of numbers. + +``` sql +groupBitXor(expr) +``` + +**Parameters** + +`expr` – An expression that results in `UInt*` type. + +**Return value** + +Value of the `UInt*` type. + +**Example** + +Test data: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Query: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +Where `num` is the column with the test data. + +Result: + +``` text +binary decimal +01101000 = 104 +``` + +## groupBitmap {#groupbitmap} + +Bitmap or Aggregate calculations from a unsigned integer column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**Parameters** + +`expr` – An expression that results in `UInt*` type. + +**Return value** + +Value of the `UInt64` type. + +**Example** + +Test data: + +``` text +UserID +1 +1 +2 +3 +``` + +Query: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +Result: + +``` text +num +3 +``` + +## min(x) {#agg_function-min} + +Calculates the minimum. + +## max(x) {#agg_function-max} + +Calculates the maximum. + +## argMin(arg, val) {#agg-function-argmin} + +Calculates the ‘arg’ value for a minimal ‘val’ value. If there are several different values of ‘arg’ for minimal values of ‘val’, the first of these values encountered is output. + +**Example:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## argMax(arg, val) {#agg-function-argmax} + +Calculates the ‘arg’ value for a maximum ‘val’ value. If there are several different values of ‘arg’ for maximum values of ‘val’, the first of these values encountered is output. + +## sum(x) {#agg_function-sum} + +Calculates the sum. +Only works for numbers. + +## sumWithOverflow(x) {#sumwithoverflowx} + +Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, the function returns an error. + +Only works for numbers. + +## sumMap(key, value) {#agg_functions-summap} + +Totals the ‘value’ array according to the keys specified in the ‘key’ array. +The number of elements in ‘key’ and ‘value’ must be the same for each row that is totaled. +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +Example: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## skewPop {#skewpop} + +Computes the [skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. + +``` sql +skewPop(expr) +``` + +**Parameters** + +`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. + +**Returned value** + +The skewness of the given distribution. Type — [Float64](../../data_types/float.md) + +**Example** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## skewSamp {#skewsamp} + +Computes the [sample skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. + +It represents an unbiased estimate of the skewness of a random variable if passed values form its sample. + +``` sql +skewSamp(expr) +``` + +**Parameters** + +`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. + +**Returned value** + +The skewness of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is the size of the sample), then the function returns `nan`. + +**Example** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## kurtPop {#kurtpop} + +Computes the [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. + +``` sql +kurtPop(expr) +``` + +**Parameters** + +`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. + +**Returned value** + +The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md) + +**Example** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## kurtSamp {#kurtsamp} + +Computes the [sample kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. + +It represents an unbiased estimate of the kurtosis of a random variable if passed values form its sample. + +``` sql +kurtSamp(expr) +``` + +**Parameters** + +`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. + +**Returned value** + +The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is a size of the sample), then the function returns `nan`. + +**Example** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## timeSeriesGroupSum(uid, timestamp, value) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` can aggregate different time series that sample timestamp not alignment. +It will use linear interpolation between two sample timestamp and then sum time-series together. + +- `uid` is the time series unique id, `UInt64`. +- `timestamp` is Int64 type in order to support millisecond or microsecond. +- `value` is the metric. + +The function returns array of tuples with `(timestamp, aggregated_value)` pairs. + +Before using this function make sure `timestamp` is in ascending order. + +Example: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +And the result will be: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## timeSeriesGroupRateSum(uid, ts, val) {#agg-function-timeseriesgroupratesum} + +Similarly timeSeriesGroupRateSum, timeSeriesGroupRateSum will Calculate the rate of time-series and then sum rates together. +Also, timestamp should be in ascend order before use this function. + +Use this function, the result above case will be: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## avg(x) {#agg_function-avg} + +Calculates the average. +Only works for numbers. +The result is always Float64. + +## uniq {#agg_function-uniq} + +Calculates the approximate number of different values of the argument. + +``` sql +uniq(x[, ...]) +``` + +**Parameters** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +**Returned value** + +- A [UInt64](../../data_types/int_uint.md)-type number. + +**Implementation details** + +Function: + +- Calculates a hash for all parameters in the aggregate, then uses it in calculations. + +- Uses an adaptive sampling algorithm. For the calculation state, the function uses a sample of element hash values up to 65536. + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- Provides the result deterministically (it doesn’t depend on the query processing order). + +We recommend using this function in almost all scenarios. + +**See Also** + +- [uniqCombined](#agg_function-uniqcombined) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +Calculates the approximate number of different argument values. + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +The `uniqCombined` function is a good choice for calculating the number of different values. + +**Parameters** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +`HLL_precision` is the base-2 logarithm of the number of cells in [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Optional, you can use the function as `uniqCombined(x[, ...])`. The default value for `HLL_precision` is 17, which is effectively 96 KiB of space (2^17 cells, 6 bits each). + +**Returned value** + +- A number [UInt64](../../data_types/int_uint.md)-type number. + +**Implementation details** + +Function: + +- Calculates a hash (64-bit hash for `String` and 32-bit otherwise) for all parameters in the aggregate, then uses it in calculations. + +- Uses a combination of three algorithms: array, hash table, and HyperLogLog with an error correction table. + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- Provides the result deterministically (it doesn’t depend on the query processing order). + +!!! note "Note" + Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](#agg_function-uniqcombined64) + +Compared to the [uniq](#agg_function-uniq) function, the `uniqCombined`: + +- Consumes several times less memory. +- Calculates with several times higher accuracy. +- Usually has slightly lower performance. In some scenarios, `uniqCombined` can perform better than `uniq`, for example, with distributed queries that transmit a large number of aggregation states over the network. + +**See Also** + +- [uniq](#agg_function-uniq) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined64 {#agg_function-uniqcombined64} + +Same as [uniqCombined](#agg_function-uniqcombined), but uses 64-bit hash for all data types. + +## uniqHLL12 {#agg_function-uniqhll12} + +Calculates the approximate number of different argument values, using the [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algorithm. + +``` sql +uniqHLL12(x[, ...]) +``` + +**Parameters** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +**Returned value** + +- A [UInt64](../../data_types/int_uint.md)-type number. + +**Implementation details** + +Function: + +- Calculates a hash for all parameters in the aggregate, then uses it in calculations. + +- Uses the HyperLogLog algorithm to approximate the number of different argument values. + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- Provides the determinate result (it doesn’t depend on the query processing order). + +We don’t recommend using this function. In most cases, use the [uniq](#agg_function-uniq) or [uniqCombined](#agg_function-uniqcombined) function. + +**See Also** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqExact](#agg_function-uniqexact) + +## uniqExact {#agg_function-uniqexact} + +Calculates the exact number of different argument values. + +``` sql +uniqExact(x[, ...]) +``` + +Use the `uniqExact` function if you absolutely need an exact result. Otherwise use the [uniq](#agg_function-uniq) function. + +The `uniqExact` function uses more memory than `uniq`, because the size of the state has unbounded growth as the number of different values increases. + +**Parameters** + +The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. + +**See Also** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqHLL12](#agg_function-uniqhll12) + +## groupArray(x), groupArray(max\_size)(x) {#agg_function-grouparray} + +Creates an array of argument values. +Values can be added to the array in any (indeterminate) order. + +The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. +For example, `groupArray (1) (x)` is equivalent to `[any (x)]`. + +In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`. + +## groupArrayInsertAt(value, position) {#grouparrayinsertatvalue-position} + +Inserts a value into the array in the specified position. + +!!! note "Note" + This function uses zero-based positions, contrary to the conventional one-based positions for SQL arrays. + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +Optional parameters: + +- The default value for substituting in empty positions. +- The length of the resulting array. This allows you to receive arrays of the same size for all the aggregate keys. When using this parameter, the default value must be specified. + +## groupArrayMovingSum {#agg_function-grouparraymovingsum} + +Calculates the moving sum of input values. + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. + +**Parameters** + +- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. +- `window_size` — Size of the calculation window. + +**Returned values** + +- Array of the same size and type as the input data. + +**Example** + +The sample table: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +The queries: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## groupArrayMovingAvg {#agg_function-grouparraymovingavg} + +Calculates the moving average of input values. + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. + +**Parameters** + +- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. +- `window_size` — Size of the calculation window. + +**Returned values** + +- Array of the same size and type as the input data. + +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). It truncates the decimal places insignificant for the resulting data type. + +**Example** + +The sample table `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +The queries: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## groupUniqArray(x), groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} + +Creates an array from different argument values. Memory consumption is the same as for the `uniqExact` function. + +The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. +For example, `groupUniqArray(1)(x)` is equivalent to `[any(x)]`. + +## quantile {#quantile} + +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and a random number generator for sampling. The result is non-deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantile(level)(expr) +``` + +Alias: `median`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). + +**Returned value** + +- Approximate quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Input table: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Query: + +``` sql +SELECT quantile(val) FROM t +``` + +Result: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileDeterministic {#quantiledeterministic} + +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and deterministic algorithm of sampling. The result is deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +Alias: `medianDeterministic`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**Returned value** + +- Approximate quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Input table: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Query: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +Result: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileExact {#quantileexact} + +Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileExact(level)(expr) +``` + +Alias: `medianExact`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). + +**Returned value** + +- Quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +Result: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileExactWeighted {#quantileexactweighted} + +Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence, taking into account the weight of each element. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). You can use this function instead of `quantileExact` and specify the weight 1. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +Alias: `medianExactWeighted`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**Returned value** + +- Quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Input table: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +Query: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +Result: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileTiming {#quantiletiming} + +With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. + +The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileTiming(level)(expr) +``` + +Alias: `medianTiming`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). + +- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**Accuracy** + +The calculation is accurate if: + +- Total number of values doesn’t exceed 5670. +- Total number of values exceeds 5670, but the page loading time is less than 1024ms. + +Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. + +!!! note "Note" + For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). + +**Returned value** + +- Quantile of the specified level. + +Type: `Float32`. + +!!! note "Note" + If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. + +**Example** + +Input table: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +Query: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +Result: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileTimingWeighted {#quantiletimingweighted} + +With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence according to the weight of each sequence member. + +The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +Alias: `medianTimingWeighted`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). + +- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**Accuracy** + +The calculation is accurate if: + +- Total number of values doesn’t exceed 5670. +- Total number of values exceeds 5670, but the page loading time is less than 1024ms. + +Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. + +!!! note "Note" + For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). + +**Returned value** + +- Quantile of the specified level. + +Type: `Float32`. + +!!! note "Note" + If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. + +**Example** + +Input table: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +Query: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +Result: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileTDigest {#quantiletdigest} + +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. + +The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. + +The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileTDigest(level)(expr) +``` + +Alias: `medianTDigest`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). + +**Returned value** + +- Approximate quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +Result: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## quantileTDigestWeighted {#quantiletdigestweighted} + +Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. The function takes into account the weight of each sequence member. The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. + +The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. + +The result depends on the order of running the query, and is nondeterministic. + +When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. + +**Syntax** + +``` sql +quantileTDigest(level)(expr) +``` + +Alias: `medianTDigest`. + +**Parameters** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**Returned value** + +- Approximate quantile of the specified level. + +Type: + +- [Float64](../../data_types/float.md) for numeric data type input. +- [Date](../../data_types/date.md) if input values have the `Date` type. +- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. + +**Example** + +Query: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +Result: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**See Also** + +- [median](#median) +- [quantiles](#quantiles) + +## median {#median} + +The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample. + +Functions: + +- `median` — Alias for [quantile](#quantile). +- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](#quantileexact). +- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). +- `medianTiming` — Alias for [quantileTiming](#quantiletiming). +- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). + +**Example** + +Input table: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Query: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +Result: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. + +## varSamp(x) {#varsampx} + +Calculates the amount `Σ((x - x̅)^2) / (n - 1)`, where `n` is the sample size and `x̅`is the average value of `x`. + +It represents an unbiased estimate of the variance of a random variable if passed values form its sample. + +Returns `Float64`. When `n <= 1`, returns `+∞`. + +## varPop(x) {#varpopx} + +Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. + +In other words, dispersion for a set of values. Returns `Float64`. + +## stddevSamp(x) {#stddevsampx} + +The result is equal to the square root of `varSamp(x)`. + +## stddevPop(x) {#stddevpopx} + +The result is equal to the square root of `varPop(x)`. + +## topK(N)(x) {#topknx} + +Returns an array of the approximately most frequent values in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). + +Implements the [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) algorithm for analyzing TopK, based on the reduce-and-combine algorithm from [Parallel Space Saving](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +This function doesn’t provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. + +We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. + +**Parameters** + +- ‘N’ is the number of elements to return. + +If the parameter is omitted, default value 10 is used. + +**Arguments** + +- ’ x ’ – The value to calculate frequency. + +**Example** + +Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select the three most frequently occurring values in the `AirlineID` column. + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## topKWeighted {#topkweighted} + +Similar to `topK` but takes one additional argument of integer type - `weight`. Every value is accounted `weight` times for frequency calculation. + +**Syntax** + +``` sql +topKWeighted(N)(x, weight) +``` + +**Parameters** + +- `N` — The number of elements to return. + +**Arguments** + +- `x` – The value. +- `weight` — The weight. [UInt8](../../data_types/int_uint.md). + +**Returned value** + +Returns an array of the values with maximum approximate sum of weights. + +**Example** + +Query: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +Result: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## covarSamp(x, y) {#covarsampx-y} + +Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +Returns Float64. When `n <= 1`, returns +∞. + +## covarPop(x, y) {#covarpopx-y} + +Calculates the value of `Σ((x - x̅)(y - y̅)) / n`. + +## corr(x, y) {#corrx-y} + +Calculates the Pearson correlation coefficient: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## categoricalInformationValue {#categoricalinformationvalue} + +Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category. + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +The result indicates how a discrete (categorical) feature `[category1, category2, ...]` contribute to a learning model which predicting the value of `tag`. + +## simpleLinearRegression {#simplelinearregression} + +Performs simple (unidimensional) linear regression. + +``` sql +simpleLinearRegression(x, y) +``` + +Parameters: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +Returned values: + +Constants `(a, b)` of the resulting line `y = a*x + b`. + +**Examples** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## stochasticLinearRegression {#agg_functions-stochasticlinearregression} + +This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### Parameters {#agg_functions-stochasticlinearregression-parameters} + +There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning. + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`. +2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`. +3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`. +4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. + +### Usage {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc). +To predict we use function [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on. + + + +**1.** Fitting + +Such query may be used. + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +Here we also need to insert data into `train_data` table. The number of parameters is not fixed, it depends only on number of arguments, passed into `linearRegressionState`. They all must be numeric values. +Note that the column with target value(which we would like to learn to predict) is inserted as the first argument. + +**2.** Predicting + +After saving a state into the table, we may use it multiple times for prediction, or even merge with other states and create new even better models. + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +The query will return a column of predicted values. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + +`test_data` is a table like `train_data` but may not contain target value. + +### Notes {#agg_functions-stochasticlinearregression-notes} + +1. To merge two models user may create such query: + `sql SELECT state1 + state2 FROM your_models` + where `your_models` table contains both models. This query will return new `AggregateFunctionState` object. + +2. User may fetch weights of the created model for its own purposes without saving the model if no `-State` combinator is used. + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + Such query will fit the model and return its weights - first are weights, which correspond to the parameters of the model, the last one is bias. So in the example above the query will return a column with 3 values. + +**See Also** + +- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) +- [Difference between linear and logistic regressions](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +This function implements stochastic logistic regression. It can be used for binary classification problem, supports the same custom parameters as stochasticLinearRegression and works the same way. + +### Parameters {#agg_functions-stochasticlogisticregression-parameters} + +Parameters are exactly the same as in stochasticLinearRegression: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +For more information see [parameters](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. Fitting + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. Predicting + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**See Also** + +- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) +- [Difference between linear and logistic regressions.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## groupBitmapAnd {#groupbitmapand} + +Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**Parameters** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. + +**Return value** + +Value of the `UInt64` type. + +**Example** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## groupBitmapOr {#groupbitmapor} + +Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). This is equivalent to `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**Parameters** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. + +**Return value** + +Value of the `UInt64` type. + +**Example** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## groupBitmapXor {#groupbitmapxor} + +Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**Parameters** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. + +**Return value** + +Value of the `UInt64` type. + +**Example** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/zh/query_language/alter.md b/docs/zh/query_language/alter.md deleted file mode 120000 index 44f4ecf9737..00000000000 --- a/docs/zh/query_language/alter.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/alter.md \ No newline at end of file diff --git a/docs/zh/query_language/alter.md b/docs/zh/query_language/alter.md new file mode 100644 index 00000000000..a2b05037315 --- /dev/null +++ b/docs/zh/query_language/alter.md @@ -0,0 +1,502 @@ +--- +en_copy: true +--- + +## ALTER {#query_language_queries_alter} + +The `ALTER` query is only supported for `*MergeTree` tables, as well as `Merge`and`Distributed`. The query has several variations. + +### Column Manipulations {#column-manipulations} + +Changing the table structure. + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +In the query, specify a list of one or more comma-separated actions. +Each action is an operation on a column. + +The following actions are supported: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL. + +These actions are described in detail below. + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +Adds a new column to the table with the specified `name`, `type`, [`codec`](create.md#codecs) and `default_expr` (see the section [Default expressions](create.md#create-default-values)). + +If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. Otherwise, the column is added to the end of the table. Note that there is no way to add a column to the beginning of a table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. + +Adding a column just changes the table structure, without performing any actions with data. The data doesn’t appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../operations/table_engines/mergetree.md)). + +This approach allows us to complete the `ALTER` query instantly, without increasing the volume of old data. + +Example: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. + +Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. + +Example: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. + +Example: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. + +Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment. + +Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](misc.md#misc-describe-table) query. + +Example: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +This query changes the `name` column properties: + +- Type + +- Default expression + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../operations/table_engines/mergetree.md#mergetree-column-ttl). + +If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. + +When changing the type, values are converted as if the [toType](functions/type_conversion_functions.md) functions were applied to them. If only the default expression is changed, the query doesn’t do anything complex, and is completed almost instantly. + +Example: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +There are several processing stages: + +- Preparing temporary (new) files with modified data. +- Renaming old files. +- Renaming the temporary (new) files to the old names. +- Deleting the old files. + +Only the first stage takes time. If there is a failure at this stage, the data is not changed. +If there is a failure during one of the successive stages, data can be restored manually. The exception is if the old files were deleted from the file system but the data for the new files did not get written to the disk and was lost. + +The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously. + +#### ALTER Query Limitations {#alter-query-limitations} + +The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot. + +There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`). + +If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](insert_into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](misc.md#misc_operations-rename) query and delete the old table. You can use the [clickhouse-copier](../operations/utils/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. + +The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. + +For tables that don’t store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. + +### Manipulations With Key Expressions {#manipulations-with-key-expressions} + +The following command is supported: + +``` sql +MODIFY ORDER BY new_expression +``` + +It only works for tables in the [`MergeTree`](../operations/table_engines/mergetree.md) family (including +[replicated](../operations/table_engines/replication.md) tables). The command changes the +[sorting key](../operations/table_engines/mergetree.md) of the table +to `new_expression` (an expression or a tuple of expressions). Primary key remains the same. + +The command is lightweight in a sense that it only changes metadata. To keep the property that data part +rows are ordered by the sorting key expression you cannot add expressions containing existing columns +to the sorting key (only columns added by the `ADD COLUMN` command in the same `ALTER` query). + +### Manipulations With Data Skipping Indices {#manipulations-with-data-skipping-indices} + +It only works for tables in the [`*MergeTree`](../operations/table_engines/mergetree.md) family (including +[replicated](../operations/table_engines/replication.md) tables). The following operations +are available: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Adds index description to tables metadata. + +- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. + +These commands are lightweight in a sense that they only change metadata or remove files. +Also, they are replicated (syncing indices metadata through ZooKeeper). + +### Manipulations with constraints {#manipulations-with-constraints} + +See more on [constraints](create.md#constraints) + +Constraints could be added or deleted using following syntax: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +Queries will add or remove metadata about constraints from table so they are processed immediately. + +Constraint check *will not be executed* on existing data if it was added. + +All changes on replicated tables are broadcasting to ZooKeeper so will be applied on other replicas. + +### Manipulations With Partitions and Parts {#alter_manipulations-with-partitions} + +The following operations with [partitions](../operations/table_engines/custom_partitioning_key.md) are available: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` directory and forget it. +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` directory to the table. +- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another. +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another and replaces. +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - Move the data partition from one table to another. +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Resets the value of a specified column in a partition. +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Resets the specified secondary index in a partition. +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### DETACH PARTITION {\#alter\_detach-partition} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +Moves all data for the specified partition to the `detached` directory. The server forgets about the detached data partition as if it does not exist. The server will not know about this data until you make the [ATTACH](#alter_attach-partition) query. + +Example: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../operations/system_tables.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replica. + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +Deletes the specified partition from the table. This query tags the partition as inactive and deletes data completely, approximately in 10 minutes. + +Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +Removes the specified part or all parts of the specified partition from `detached`. +Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +Adds data to the table from the `detached` directory. It is possible to add data for an entire partition or for a separate part. Examples: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. All other replicas download the data from the replica-initiator. + +So you can put data to the `detached` directory on one replica, and use the `ALTER ... ATTACH` query to add it to the table on all replicas. + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +This query copies the data partition from the `table1` to `table2` adds data to exsisting in the `table2`. Note that data won’t be deleted from `table1`. + +For the query to run successfully, the following conditions must be met: + +- Both tables must have the same structure. +- Both tables must have the same partition key. + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data won’t be deleted from `table1`. + +For the query to run successfully, the following conditions must be met: + +- Both tables must have the same structure. +- Both tables must have the same partition key. + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +This query move the data partition from the `table_source` to `table_dest` with deleting the data from `table_source`. + +For the query to run successfully, the following conditions must be met: + +- Both tables must have the same structure. +- Both tables must have the same partition key. +- Both tables must be the same engine family. (replicated or non-replicated) +- Both tables must have the same storage policy. + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +Resets all values in the specified column in a partition. If the `DEFAULT` clause was determined when creating a table, this query sets the column value to a specified default value. + +Example: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once. + +!!! note "Note" + The entire backup process is performed without stopping the server. + +Note that for old-styled tables you can specify the prefix of the partition name (for example, ‘2019’) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). + +At the time of execution, for a data snapshot, the query creates hardlinks to a table data. Hardlinks are placed in the directory `/var/lib/clickhouse/shadow/N/...`, where: + +- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config. +- `N` is the incremental number of the backup. + +!!! note "Note" + If you use [a set of disks for data storage in a table](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression. + +The same structure of directories is created inside the backup as inside `/var/lib/clickhouse/`. The query performs ‘chmod’ for all files, forbidding writing into them. + +After creating the backup, you can copy the data from `/var/lib/clickhouse/shadow/` to the remote server and then delete it from the local server. Note that the `ALTER t FREEZE PARTITION` query is not replicated. It creates a local backup only on the local server. + +The query creates backup almost instantly (but first it waits for the current queries to the corresponding table to finish running). + +`ALTER TABLE t FREEZE PARTITION` copies only the data, not table metadata. To make a backup of table metadata, copy the file `/var/lib/clickhouse/metadata/database/table.sql` + +To restore data from a backup, do the following: + +1. Create the table if it does not exist. To view the query, use the .sql file (replace `ATTACH` in it with `CREATE`). +2. Copy the data from the `data/database/table/` directory inside the backup to the `/var/lib/clickhouse/data/database/table/detached/` directory. +3. Run `ALTER TABLE t ATTACH PARTITION` queries to add the data to a table. + +Restoring from a backup doesn’t require stopping the server. + +For more information about backups and restoring data, see the [Data Backup](../operations/backup.md) section. + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +Downloads a partition from another server. This query only works for the replicated tables. + +The query does the following: + +1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. +2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. + +For example: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +Note that: + +- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. +- The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. + +Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. + +Although the query is called `ALTER TABLE`, it does not change the table structure and does not immediately change the data available in the table. + +#### MOVE PARTITION\|PART {#alter_move-partition} + +Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +The `ALTER TABLE t MOVE` query: + +- Not replicated, because different replicas can have different storage policies. +- Returns an error if the specified disk or volume is not configured. Query also returns an error if conditions of data moving, that specified in the storage policy, can’t be applied. +- Can return an error in the case, when data to be moved is already moved by a background process, concurrent `ALTER TABLE t MOVE` query or as a result of background data merging. A user shouldn’t perform any additional actions in this case. + +Example: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### How To Set Partition Expression {#alter-how-to-specify-part-expr} + +You can specify the partition expression in `ALTER ... PARTITION` queries in different ways: + +- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`. +- As the expression from the table column. Constants and constant expressions are supported. For example, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached\_parts](../operations/system_tables.md#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed. + +For old-style tables, you can specify the partition either as a number `201901` or a string `'201901'`. The syntax for the new-style tables is stricter with types (similar to the parser for the VALUES input format). + +All the rules above are also true for the [OPTIMIZE](misc.md#misc_operations-optimize) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### Manipulations with Table TTL {#manipulations-with-table-ttl} + +You can change [table TTL](../operations/table_engines/mergetree.md#mergetree-table-ttl) with a request of the following form: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} + +For non-replicatable tables, all `ALTER` queries are performed synchronously. For replicatable tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. + +For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. +Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### Mutations {#alter-mutations} + +Mutations are an ALTER query variant that allows changing or deleting rows in a table. In contrast to standard `UPDATE` and `DELETE` queries that are intended for point data changes, mutations are intended for heavy operations that change a lot of rows in a table. Supported for the `MergeTree` family of table engines including the engines with replication support. + +Existing tables are ready for mutations as-is (no conversion necessary), but after the first mutation is applied to a table, its metadata format becomes incompatible with previous server versions and falling back to a previous version becomes impossible. + +Currently available commands: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value. + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported. + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +The query rebuilds the secondary index `name` in the partition `partition_name`. + +One query can contain several commands separated by commas. + +For \*MergeTree tables mutations execute by rewriting whole data parts. There is no atomicity - parts are substituted for mutated parts as soon as they are ready and a `SELECT` query that started executing during a mutation will see data from parts that have already been mutated along with data from parts that have not been mutated yet. + +Mutations are totally ordered by their creation order and are applied to each part in that order. Mutations are also partially ordered with INSERTs - data that was inserted into the table before the mutation was submitted will be mutated and data that was inserted after that will not be mutated. Note that mutations do not block INSERTs in any way. + +A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for nonreplicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](misc.md#kill-mutation) query. + +Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. + +[Original article](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/zh/query_language/dicts/external_dicts.md b/docs/zh/query_language/dicts/external_dicts.md deleted file mode 120000 index 491b94bffe6..00000000000 --- a/docs/zh/query_language/dicts/external_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts.md b/docs/zh/query_language/dicts/external_dicts.md new file mode 100644 index 00000000000..ef41a48f95f --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts.md @@ -0,0 +1,53 @@ +--- +en_copy: true +--- + +# External Dictionaries {#dicts-external-dicts} + +You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](external_dicts_dict_sources.md)”. + +ClickHouse: + +- Fully or partially stores dictionaries in RAM. +- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically. +- Allows to create external dictionaries with xml files or [DDL queries](../create.md#create-dictionary-query). + +The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries\_config](../../operations/server_settings/settings.md#server_settings-dictionaries_config) parameter. + +Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries\_lazy\_load](../../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load) setting. + +The dictionary configuration file has the following format: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +You can [configure](external_dicts_dict.md) any number of dictionaries in the same file. + +[DDL queries for dictionaries](../create.md#create-dictionary-query) doesn’t require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views. + +!!! attention "Attention" + You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../functions/other_functions.md) function). This functionality is not related to external dictionaries. + +## See also {#ext-dicts-see-also} + +- [Configuring an External Dictionary](external_dicts_dict.md) +- [Storing Dictionaries in Memory](external_dicts_dict_layout.md) +- [Dictionary Updates](external_dicts_dict_lifetime.md) +- [Sources of External Dictionaries](external_dicts_dict_sources.md) +- [Dictionary Key and Fields](external_dicts_dict_structure.md) +- [Functions for Working with External Dictionaries](../functions/ext_dict_functions.md) + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict.md b/docs/zh/query_language/dicts/external_dicts_dict.md deleted file mode 120000 index e27820fee60..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict.md b/docs/zh/query_language/dicts/external_dicts_dict.md new file mode 100644 index 00000000000..0519cd381f4 --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict.md @@ -0,0 +1,50 @@ +--- +en_copy: true +--- + +# Configuring an External Dictionary {#dicts-external-dicts-dict} + +If dictionary is configured using xml file, than dictionary configuration has the following structure: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +Corresponding [DDL-query](../create.md#create-dictionary-query) has the following structure: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [source](external_dicts_dict_sources.md) — Source of the dictionary. +- [layout](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [structure](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [lifetime](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md deleted file mode 120000 index 3f244dc84de..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_hierarchical.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..1a1232f95cd --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md @@ -0,0 +1,67 @@ +--- +en_copy: true +--- + +# Hierarchical Dictionaries {#hierarchical-dictionaries} + +ClickHouse supports hierarchical dictionaries with a [numeric key](external_dicts_dict_structure.md#ext_dict-numeric-key). + +Look at the following hierarchical structure: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +This hierarchy can be expressed as the following dictionary table. + +| region\_id | parent\_region | region\_name | +|------------|----------------|---------------| +| 1 | 0 | Russia | +| 2 | 1 | Moscow | +| 3 | 2 | Center | +| 4 | 0 | Great Britain | +| 5 | 4 | London | + +This table contains a column `parent_region` that contains the key of the nearest parent for the element. + +ClickHouse supports the [hierarchical](external_dicts_dict_structure.md#hierarchical-dict-attr) property for [external dictionary](index.md) attributes. This property allows you to configure the hierarchical dictionary similar to described above. + +The [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) function allows you to get the parent chain of an element. + +For our example, the structure of dictionary can be the following: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_layout.md b/docs/zh/query_language/dicts/external_dicts_dict_layout.md deleted file mode 120000 index e391c5be723..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_layout.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_layout.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_layout.md b/docs/zh/query_language/dicts/external_dicts_dict_layout.md new file mode 100644 index 00000000000..c6aa101da46 --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict_layout.md @@ -0,0 +1,370 @@ +--- +en_copy: true +--- + +# Storing Dictionaries in Memory {#dicts-external-dicts-dict-layout} + +There are a variety of ways to store dictionaries in memory. + +We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex\_key\_hashed](#complex-key-hashed). which provide optimal processing speed. + +Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section “[cache](#cache)”. + +There are several ways to improve dictionary performance: + +- Call the function for working with the dictionary after `GROUP BY`. +- Mark attributes to extract as injective. An attribute is called injective if different attribute values correspond to different keys. So when `GROUP BY` uses a function that fetches an attribute value by the key, this function is automatically taken out of `GROUP BY`. + +ClickHouse generates an exception for errors with dictionaries. Examples of errors: + +- The dictionary being accessed could not be loaded. +- Error querying a `cached` dictionary. + +You can view the list of external dictionaries and their statuses in the `system.dictionaries` table. + +The configuration looks like this: + +``` xml + + + ... + + + + + + ... + + +``` + +Corresponding [DDL-query](../create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## Ways to Store Dictionaries in Memory {#ways-to-store-dictionaries-in-memory} + +- [flat](#flat) +- [hashed](#dicts-external_dicts_dict_layout-hashed) +- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) +- [cache](#cache) +- [range\_hashed](#range-hashed) +- [complex\_key\_hashed](#complex-key-hashed) +- [complex\_key\_cache](#complex-key-cache) +- [ip\_trie](#ip-trie) + +### flat {#flat} + +The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). + +The dictionary key has the `UInt64` type and the value is limited to 500,000. If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. + +All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. + +This method provides the best performance among all available methods of storing the dictionary. + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(FLAT()) +``` + +### hashed {#dicts-external_dicts_dict_layout-hashed} + +The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. + +All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. + +Configuration example: + +``` xml + + + +``` + +or + +``` sql +LAYOUT(HASHED()) +``` + +### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} + +Similar to `hashed`, but uses less memory in favor more CPU usage. + +Configuration example: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### complex\_key\_hashed {#complex-key-hashed} + +This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `hashed`. + +Configuration example: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### range\_hashed {#range-hashed} + +The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. + +This storage method works the same way as hashed and allows using date/time (arbitrary numeric type) ranges in addition to the key. + +Example: The table contains discounts for each advertiser in the format: + +``` text ++---------------|---------------------|-------------------|--------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------------|---------------------|-------------------|--------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------------|---------------------|-------------------|--------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------------|---------------------|-------------------|--------+ +``` + +To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](external_dicts_dict_structure.md). These elements must contain elements `name` and`type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). + +Example: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +or + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +This function returns the value for the specified `id`s and the date range that includes the passed date. + +Details of the algorithm: + +- If the `id` is not found or a range is not found for the `id`, it returns the default value for the dictionary. +- If there are overlapping ranges, you can use any. +- If the range delimiter is `NULL` or an invalid date (such as 1900-01-01 or 2039-01-01), the range is left open. The range can be open on both sides. + +Configuration example: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +or + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### cache {#cache} + +The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. + +When searching for a dictionary, the cache is searched first. For each block of data, all keys that are not found in the cache or are outdated are requested from the source using `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. The received data is then written to the cache. + +For cache dictionaries, the expiration [lifetime](external_dicts_dict_lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used, and it is re-requested the next time it needs to be used. +This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the `system.dictionaries` table. + +To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. + +Supported [sources](external_dicts_dict_sources.md): MySQL, ClickHouse, executable, HTTP. + +Example of settings: + +``` xml + + + + 1000000000 + + +``` + +or + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +Set a large enough cache size. You need to experiment to select the number of cells: + +1. Set some value. +2. Run queries until the cache is completely full. +3. Assess memory consumption using the `system.dictionaries` table. +4. Increase or decrease the number of cells until the required memory consumption is reached. + +!!! warning "Warning" + Do not use ClickHouse as a source, because it is slow to process queries with random reads. + +### complex\_key\_cache {#complex-key-cache} + +This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `cache`. + +### ip\_trie {#ip-trie} + +This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. + +Example: The table contains network prefixes and their corresponding AS number and country code: + +``` text + +-----------------|-------|--------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------------|-------|--------+ + | 2620:0:870::/48 | 3856 | US | + +-----------------|-------|--------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------------|-------|--------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------------|-------|--------+ +``` + +When using this type of layout, the structure must have a composite key. + +Example: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +or + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet. + +For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. + +Data is stored in a `trie`. It must completely fit into RAM. + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md b/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md deleted file mode 120000 index 03b53c09077..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_lifetime.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md b/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..3a90e437681 --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md @@ -0,0 +1,83 @@ +--- +en_copy: true +--- + +# Dictionary Updates {#dictionary-updates} + +ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `` tag in seconds. + +Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +Example of settings: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. + +You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. + +Example of settings: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +or + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](external_dicts_dict_sources.md): + +- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. +- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query. +- Dictionaries from other sources are updated every time by default. + +For MySQL (InnoDB), ODBC and ClickHouse sources, you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: + +- The dictionary table must have a field that always changes when the source data is updated. +- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](external_dicts_dict_sources.md). + +Example of settings: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +or + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_sources.md b/docs/zh/query_language/dicts/external_dicts_dict_sources.md deleted file mode 120000 index d4f4bf8ef3e..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_sources.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_sources.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_sources.md b/docs/zh/query_language/dicts/external_dicts_dict_sources.md new file mode 100644 index 00000000000..37d050a8e72 --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict_sources.md @@ -0,0 +1,605 @@ +--- +en_copy: true +--- + +# Sources of External Dictionaries {#dicts-external-dicts-dict-sources} + +An external dictionary can be connected from many different sources. + +If dictionary is configured using xml-file, the configuration looks like this: + +``` xml + + + ... + + + + + + ... + + ... + +``` + +In case of [DDL-query](../create.md#create-dictionary-query), equal configuration will looks like: + +``` sql +CREATE DICTIONARY dict_name (...) +... +SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration +... +``` + +The source is configured in the `source` section. + +Types of sources (`source_type`): + +- [Local file](#dicts-external_dicts_dict_sources-local_file) +- [Executable file](#dicts-external_dicts_dict_sources-executable) +- [HTTP(s)](#dicts-external_dicts_dict_sources-http) +- DBMS + - [ODBC](#dicts-external_dicts_dict_sources-odbc) + - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) + - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) + - [Redis](#dicts-external_dicts_dict_sources-redis) + +## Local File {#dicts-external_dicts_dict_sources-local_file} + +Example of settings: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + +``` + +or + +``` sql +SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +Setting fields: + +- `path` – The absolute path to the file. +- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. + +## Executable File {#dicts-external_dicts_dict_sources-executable} + +Working with executable files depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data. + +Example of settings: + +``` xml + + + cat /opt/dictionaries/os.tsv + TabSeparated + + +``` + +or + +``` sql +SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +Setting fields: + +- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). +- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. + +## HTTP(s) {#dicts-external_dicts_dict_sources-http} + +Working with an HTTP(s) server depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. + +Example of settings: + +``` xml + + + http://[::1]/os.tsv + TabSeparated + + user + password + + +
    + API-KEY + key +
    +
    +
    + +``` + +or + +``` sql +SOURCE(HTTP( + url 'http://[::1]/os.tsv' + format 'TabSeparated' + credentials(user 'user' password 'password') + headers(header(name 'API-KEY' value 'key')) +)) +``` + +In order for ClickHouse to access an HTTPS resource, you must [configure openSSL](../../operations/server_settings/settings.md#server_settings-openssl) in the server configuration. + +Setting fields: + +- `url` – The source URL. +- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `credentials` – Basic HTTP authentication. Optional parameter. + - `user` – Username required for the authentication. + - `password` – Password required for the authentication. +- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. + - `header` – Single HTTP header entry. + - `name` – Identifiant name used for the header send on the request. + - `value` – Value set for a specific identifiant name. + +## ODBC {#dicts-external_dicts_dict_sources-odbc} + +You can use this method to connect any database that has an ODBC driver. + +Example of settings: + +``` xml + + + DatabaseName + ShemaName.TableName
    + DSN=some_parameters + SQL_QUERY +
    + +``` + +or + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' +)) +``` + +Setting fields: + +- `db` – Name of the database. Omit it if the database name is set in the `` parameters. +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). + +ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. + +If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../faq/general.md#oracle-odbc-encodings) article. + +### Known vulnerability of the ODBC dictionary functionality {#known-vulnerability-of-the-odbc-dictionary-functionality} + +!!! attention "Attention" + When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. + +**Example of insecure use** + +Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: + +``` text +[gregtest] +Driver = /usr/lib/psqlodbca.so +Servername = localhost +PORT = 5432 +DATABASE = test_db +#OPTION = 3 +USERNAME = test +PASSWORD = test +``` + +If you then make a query such as + +``` sql +SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); +``` + +ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. + +### Example of Connecting PostgreSQL {#example-of-connecting-postgresql} + +Ubuntu OS. + +Installing unixODBC and the ODBC driver for PostgreSQL: + +``` bash +$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql +``` + +Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): + +``` text + [DEFAULT] + Driver = myconnection + + [myconnection] + Description = PostgreSQL connection to my_db + Driver = PostgreSQL Unicode + Database = my_db + Servername = 127.0.0.1 + UserName = username + Password = password + Port = 5432 + Protocol = 9.3 + ReadOnly = No + RowVersioning = No + ShowSystemTables = No + ConnSettings = +``` + +The dictionary configuration in ClickHouse: + +``` xml + + + table_name + + + + + DSN=myconnection + postgresql_table
    +
    + + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
    +
    +``` + +or + +``` sql +CREATE DICTIONARY table_name ( + id UInt64, + some_column UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 300 MAX 360) +``` + +You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. + +### Example of Connecting MS SQL Server {#example-of-connecting-ms-sql-server} + +Ubuntu OS. + +Installing the driver: : + +``` bash +$ sudo apt-get install tdsodbc freetds-bin sqsh +``` + +Configuring the driver: + +``` bash + $ cat /etc/freetds/freetds.conf + ... + + [MSSQL] + host = 192.168.56.101 + port = 1433 + tds version = 7.0 + client charset = UTF-8 + + $ cat /etc/odbcinst.ini + ... + + [FreeTDS] + Description = FreeTDS + Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so + Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so + FileUsage = 1 + UsageCount = 5 + + $ cat ~/.odbc.ini + ... + + [MSSQL] + Description = FreeTDS + Driver = FreeTDS + Servername = MSSQL + Database = test + UID = test + PWD = test + Port = 1433 +``` + +Configuring the dictionary in ClickHouse: + +``` xml + + + test + + + dict
    + DSN=MSSQL;UID=test;PWD=test +
    + + + + 300 + 360 + + + + + + + + + k + + + s + String + + + +
    +
    +``` + +or + +``` sql +CREATE DICTIONARY test ( + k UInt64, + s String DEFAULT '' +) +PRIMARY KEY k +SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) +LAYOUT(FLAT()) +LIFETIME(MIN 300 MAX 360) +``` + +## DBMS {#dbms} + +### MySQL {#dicts-external_dicts_dict_sources-mysql} + +Example of settings: + +``` xml + + + 3306 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 1 + + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +or + +``` sql +SOURCE(MYSQL( + port 3306 + user 'clickhouse' + password 'qwerty' + replica(host 'example01-1' priority 1) + replica(host 'example01-2' priority 1) + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +Setting fields: + +- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). + +- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `replica` – Section of replica configurations. There can be multiple sections. + + - `replica/host` – The MySQL host. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. + +- `db` – Name of the database. + +- `table` – Name of the table. + +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. + +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). + +MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. + +Example of settings: + +``` xml + + + localhost + /path/to/socket/file.sock + clickhouse + qwerty + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +or + +``` sql +SOURCE(MYSQL( + host 'localhost' + socket '/path/to/socket/file.sock' + user 'clickhouse' + password 'qwerty' + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} + +Example of settings: + +``` xml + + + example01-01-1 + 9000 + default + + default + ids
    + id=10 +
    + +``` + +or + +``` sql +SOURCE(CLICKHOUSE( + host 'example01-01-1' + port 9000 + user 'default' + password '' + db 'default' + table 'ids' + where 'id=10' +)) +``` + +Setting fields: + +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distributed](../../operations/table_engines/distributed.md) table and enter it in subsequent configurations. +- `port` – The port on the ClickHouse server. +- `user` – Name of the ClickHouse user. +- `password` – Password of the ClickHouse user. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. May be omitted. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). + +### MongoDB {#dicts-external_dicts_dict_sources-mongodb} + +Example of settings: + +``` xml + + + localhost + 27017 + + + test + dictionary_source + + +``` + +or + +``` sql +SOURCE(MONGO( + host 'localhost' + port 27017 + user '' + password '' + db 'test' + collection 'dictionary_source' +)) +``` + +Setting fields: + +- `host` – The MongoDB host. +- `port` – The port on the MongoDB server. +- `user` – Name of the MongoDB user. +- `password` – Password of the MongoDB user. +- `db` – Name of the database. +- `collection` – Name of the collection. + +### Redis {#dicts-external_dicts_dict_sources-redis} + +Example of settings: + +``` xml + + + localhost + 6379 + simple + 0 + + +``` + +or + +``` sql +SOURCE(REDIS( + host 'localhost' + port 6379 + storage_type 'simple' + db_index 0 +)) +``` + +Setting fields: + +- `host` – The Redis host. +- `port` – The port on the Redis server. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. +- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_structure.md b/docs/zh/query_language/dicts/external_dicts_dict_structure.md deleted file mode 120000 index 69ff759caea..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_structure.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/external_dicts_dict_structure.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/external_dicts_dict_structure.md b/docs/zh/query_language/dicts/external_dicts_dict_structure.md new file mode 100644 index 00000000000..acb0ce36875 --- /dev/null +++ b/docs/zh/query_language/dicts/external_dicts_dict_structure.md @@ -0,0 +1,172 @@ +--- +en_copy: true +--- + +# Dictionary Key and Fields {#dictionary-key-and-fields} + +The `` clause describes the dictionary key and fields available for queries. + +XML description: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +Attributes are described in the elements: + +- `` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. + +DDL query: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +Attributes are described in the query body: + +- `PRIMARY KEY` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. + +## Key {#ext_dict_structure-key} + +ClickHouse supports the following types of keys: + +- Numeric key. `UInt64`. Defined in the `` tag or using `PRIMARY KEY` keyword. +- Composite key. Set of values of different types. Defined in the tag `` or `PRIMARY KEY` keyword. + +An xml structure can contain either `` or ``. DDL-query must contain single `PRIMARY KEY`. + +!!! warning "Warning" + You must not describe key as an attribute. + +### Numeric Key {#ext_dict-numeric-key} + +Type: `UInt64`. + +Configuration example: + +``` xml + + Id + +``` + +Configuration fields: + +- `name` – The name of the column with keys. + +For DDL-query: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### Composite Key {#composite-key} + +The key can be a `tuple` from any types of fields. The [layout](external_dicts_dict_layout.md) in this case must be `complex_key_hashed` or `complex_key_cache`. + +!!! tip "Tip" + A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. + +The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](external_dicts_dict_structure.md). Example: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +or + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## Attributes {#ext_dict_structure-attributes} + +Configuration example: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +or + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +Configuration fields: + +| Tag | Description | Required | +|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `name` | Column name. | Yes | +| `type` | ClickHouse data type.
    ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
    [Nullable](../../data_types/nullable.md) is not supported. | Yes | +| `null_value` | Default value for a non-existing element.
    In the example, it is an empty string. You cannot use `NULL` in this field. | Yes | +| `expression` | [Expression](../syntax.md#syntax-expressions) that ClickHouse executes on the value.
    The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

    Default value: no expression. | No | +| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](external_dicts_dict_hierarchical.md).

    Default value: `false`. | No | +| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
    If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

    Default value: `false`. | No | +| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

    Default value: `false`. | No | + +## See Also {#see-also} + +- [Functions for working with external dictionaries](../functions/ext_dict_functions.md). + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/zh/query_language/dicts/index.md b/docs/zh/query_language/dicts/index.md deleted file mode 120000 index fdc188ca2a2..00000000000 --- a/docs/zh/query_language/dicts/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/index.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/index.md b/docs/zh/query_language/dicts/index.md new file mode 100644 index 00000000000..9c7883cf7a1 --- /dev/null +++ b/docs/zh/query_language/dicts/index.md @@ -0,0 +1,18 @@ +--- +en_copy: true +--- + +# Dictionaries {#dictionaries} + +A dictionary is a mapping (`key -> attributes`) that is convenient for various types of reference lists. + +ClickHouse supports special functions for working with dictionaries that can be used in queries. It is easier and more efficient to use dictionaries with functions than a `JOIN` with reference tables. + +[NULL](../syntax.md#null) values can’t be stored in a dictionary. + +ClickHouse supports: + +- [Built-in dictionaries](internal_dicts.md#internal_dicts) with a specific [set of functions](../functions/ym_dict_functions.md). +- [Plug-in (external) dictionaries](external_dicts.md) with a [set of functions](../functions/ext_dict_functions.md). + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/zh/query_language/dicts/internal_dicts.md b/docs/zh/query_language/dicts/internal_dicts.md deleted file mode 120000 index 3f9408dcd45..00000000000 --- a/docs/zh/query_language/dicts/internal_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/dicts/internal_dicts.md \ No newline at end of file diff --git a/docs/zh/query_language/dicts/internal_dicts.md b/docs/zh/query_language/dicts/internal_dicts.md new file mode 100644 index 00000000000..a7ac9fe7d8c --- /dev/null +++ b/docs/zh/query_language/dicts/internal_dicts.md @@ -0,0 +1,52 @@ +--- +en_copy: true +--- + +# Internal dictionaries {#internal_dicts} + +ClickHouse contains a built-in feature for working with a geobase. + +This allows you to: + +- Use a region’s ID to get its name in the desired language. +- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. +- Check whether a region is part of another region. +- Get a chain of parent regions. + +All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with Yandex.Metrica dictionaries”. + +The internal dictionaries are disabled in the default package. +To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. + +The geobase is loaded from text files. + +Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. + +Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. + +You can also create these files yourself. The file format is as follows: + +`regions_hierarchy*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- parent region ID (`UInt32`) +- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types don’t have values +- population (`UInt32`) — optional column + +`regions_names_*.txt`: TabSeparated (no header), columns: + +- region ID (`UInt32`) +- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. + +A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. + +Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. +For updates, the file modification times are checked. If a file has changed, the dictionary is updated. +The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. +Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. + +There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldn’t be used. + +[Original article](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/zh/query_language/functions/introspection.md b/docs/zh/query_language/functions/introspection.md deleted file mode 120000 index b1a487e9c77..00000000000 --- a/docs/zh/query_language/functions/introspection.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/functions/introspection.md \ No newline at end of file diff --git a/docs/zh/query_language/functions/introspection.md b/docs/zh/query_language/functions/introspection.md new file mode 100644 index 00000000000..bb1d884d15b --- /dev/null +++ b/docs/zh/query_language/functions/introspection.md @@ -0,0 +1,307 @@ +--- +en_copy: true +--- + +# Introspection Functions {#introspection-functions} + +You can use functions described in this chapter to introspect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) and [DWARF](https://en.wikipedia.org/wiki/DWARF) for query profiling. + +!!! warning "Warning" + These functions are slow and may impose security considerations. + +For proper operation of introspection functions: + +- Install the `clickhouse-common-static-dbg` package. + +- Set the [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1. + + For security reasons introspection functions are disabled by default. + +ClickHouse saves profiler reports to the [trace\_log](../../operations/system_tables.md#system_tables-trace_log) system table. Make sure the table and profiler are configured properly. + +## addressToLine {#addresstoline} + +Converts virtual memory address inside ClickHouse server process to the filename and the line number in ClickHouse source code. + +If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package. + +**Syntax** + +``` sql +addressToLine(address_of_binary_instruction) +``` + +**Parameters** + +- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. + +**Returned value** + +- Source code filename and the line number in this file delimited by colon. + + For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. + +- Name of a binary, if the function couldn’t find the debug information. + +- Empty string, if the address is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +``` sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-19 +event_time: 2019-11-19 18:57:23 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 421b6855-1858-45a5-8f37-f383409d6d72 +trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting the source code filename and the line number for a single address: + +``` sql +SELECT addressToLine(94784076370703) \G +``` + +``` text +Row 1: +────── +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +``` + +Applying the function to the whole stack trace: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToLine` function. The result of this processing you see in the `trace_source_code_lines` column of output. + +``` text +Row 1: +────── +trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so +/usr/lib/debug/usr/bin/clickhouse +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/usr/include/c++/9/bits/atomic_base.h:551 +/usr/lib/debug/usr/bin/clickhouse +/lib/x86_64-linux-gnu/libpthread-2.27.so +/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 +``` + +## addressToSymbol {#addresstosymbol} + +Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files. + +**Syntax** + +``` sql +addressToSymbol(address_of_binary_instruction) +``` + +**Parameters** + +- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. + +**Returned value** + +- Symbol from ClickHouse object files. +- Empty string, if the address is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +``` sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting a symbol for a single address: + +``` sql +SELECT addressToSymbol(94138803686098) \G +``` + +``` text +Row 1: +────── +addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +``` + +Applying the function to the whole stack trace: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToSymbols` function. The result of this processing you see in the `trace_symbols` column of output. + +``` text +Row 1: +────── +trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE +_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb +_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb +_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE +_ZN2DB27AggregatingBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB28AsynchronousBlockInputStream9calculateEv +_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data +_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E +_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv +_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E +execute_native_thread_routine +start_thread +clone +``` + +## demangle {#demangle} + +Converts a symbol that you can get using the [addressToSymbol](#addresstosymbol) function to the C++ function name. + +**Syntax** + +``` sql +demangle(symbol) +``` + +**Parameters** + +- `symbol` ([String](../../data_types/string.md)) — Symbol from an object file. + +**Returned value** + +- Name of the C++ function. +- Empty string if a symbol is not valid. + +Type: [String](../../data_types/string.md). + +**Example** + +Enabling introspection functions: + +``` sql +SET allow_introspection_functions=1 +``` + +Selecting the first string from the `trace_log` system table: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +The `trace` field contains the stack trace at the moment of sampling. + +Getting a function name for a single address: + +``` sql +SELECT demangle(addressToSymbol(94138803686098)) \G +``` + +``` text +Row 1: +────── +demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +``` + +Applying the function to the whole stack trace: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions +FROM system.trace_log +LIMIT 1 +\G +``` + +The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `demangle` function. The result of this processing you see in the `trace_functions` column of output. + +``` text +Row 1: +────── +trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const +DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) +DB::AggregatingBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::AsynchronousBlockInputStream::calculate() +std::_Function_handler::_M_invoke(std::_Any_data const&) +ThreadPoolImpl::worker(std::_List_iterator) +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const +ThreadPoolImpl::worker(std::_List_iterator) +execute_native_thread_routine +start_thread +clone +``` diff --git a/docs/zh/query_language/index.md b/docs/zh/query_language/index.md deleted file mode 120000 index 44dfff9bb18..00000000000 --- a/docs/zh/query_language/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/index.md \ No newline at end of file diff --git a/docs/zh/query_language/index.md b/docs/zh/query_language/index.md new file mode 100644 index 00000000000..07950fb56a6 --- /dev/null +++ b/docs/zh/query_language/index.md @@ -0,0 +1,13 @@ +--- +en_copy: true +--- + +# SQL Reference {#sql-reference} + +- [SELECT](select.md) +- [INSERT INTO](insert_into.md) +- [CREATE](create.md) +- [ALTER](alter.md#query_language_queries_alter) +- [Other types of queries](misc.md) + +[Original article](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/zh/query_language/misc.md b/docs/zh/query_language/misc.md deleted file mode 120000 index 3bd814f3568..00000000000 --- a/docs/zh/query_language/misc.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/misc.md \ No newline at end of file diff --git a/docs/zh/query_language/misc.md b/docs/zh/query_language/misc.md new file mode 100644 index 00000000000..152dc0dd3b4 --- /dev/null +++ b/docs/zh/query_language/misc.md @@ -0,0 +1,249 @@ +--- +en_copy: true +--- + +# Miscellaneous Queries {#miscellaneous-queries} + +## ATTACH {#attach} + +This query is exactly the same as `CREATE`, but + +- Instead of the word `CREATE` it uses the word `ATTACH`. +- The query does not create data on the disk, but assumes that data is already in the appropriate places, and just adds information about the table to the server. + After executing an ATTACH query, the server will know about the existence of the table. + +If the table was previously detached (`DETACH`), meaning that its structure is known, you can use shorthand without defining the structure. + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of system tables, which are explicitly created on the server). + +## CHECK TABLE {#check-table} + +Checks if the data in the table is corrupted. + +``` sql +CHECK TABLE [db.]name +``` + +The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution. + +The query response contains the `result` column with a single row. The row has a value of +[Boolean](../data_types/boolean.md) type: + +- 0 - The data in the table is corrupted. +- 1 - The data maintains integrity. + +The `CHECK TABLE` query supports the following table engines: + +- [Log](../operations/table_engines/log.md) +- [TinyLog](../operations/table_engines/tinylog.md) +- [StripeLog](../operations/table_engines/stripelog.md) +- [MergeTree family](../operations/table_engines/mergetree.md) + +Performed over the tables with another table engines causes an exception. + +Engines from the `*Log` family don’t provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. + +For `MergeTree` family engines, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. + +**If the data is corrupted** + +If the table is corrupted, you can copy the non-corrupted data to another table. To do this: + +1. Create a new table with the same structure as damaged table. To do this execute the query `CREATE TABLE AS `. +2. Set the [max\_threads](../operations/settings/settings.md#settings-max_threads) value to 1 to process the next query in a single thread. To do this run the query `SET max_threads = 1`. +3. Execute the query `INSERT INTO SELECT * FROM `. This request copies the non-corrupted data from the damaged table to another table. Only the data before the corrupted part will be copied. +4. Restart the `clickhouse-client` to reset the `max_threads` value. + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +Returns the following `String` type columns: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [default expression](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` or `ALIAS`). Column contains an empty string, if the default expression isn’t specified. +- `default_expression` — Value specified in the `DEFAULT` clause. +- `comment_expression` — Comment text. + +Nested data structures are output in “expanded” format. Each column is shown separately, with the name after a dot. + +## DETACH {#detach} + +Deletes information about the ‘name’ table from the server. The server stops knowing about the table’s existence. + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. +Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them). + +There is no `DETACH DATABASE` query. + +## DROP {#drop} + +This query has two types: `DROP DATABASE` and `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +Deletes all tables inside the ‘db’ database, then deletes the ‘db’ database itself. +If `IF EXISTS` is specified, it doesn’t return an error if the database doesn’t exist. + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Deletes the table. +If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. + + DROP DICTIONARY [IF EXISTS] [db.]name + +Delets the dictionary. +If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +Returns a single `UInt8`-type column, which contains the single value `0` if the table or database doesn’t exist, or `1` if the table exists in the specified database. + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +Attempts to forcibly terminate the currently running queries. +The queries to terminate are selected from the system.processes table using the criteria defined in the `WHERE` clause of the `KILL` query. + +Examples: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +Read-only users can only stop their own queries. + +By default, the asynchronous version of queries is used (`ASYNC`), which doesn’t wait for confirmation that queries have stopped. + +The synchronous version (`SYNC`) waits for all queries to stop and displays information about each process as it stops. +The response contains the `kill_status` column, which can take the following values: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can’t be stopped. + +A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +Tries to cancel and remove [mutations](alter.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. + +A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. + +Examples: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +Changes already made by the mutation are not rolled back. + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../operations/table_engines/mergetree.md) family. + +The `OPTMIZE` query is also supported for the [MaterializedView](../operations/table_engines/materializedview.md) and the [Buffer](../operations/table_engines/buffer.md) engines. Other table engines aren’t supported. + +When `OPTIMIZE` is used with the [ReplicatedMergeTree](../operations/table_engines/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled). + +- If `OPTIMIZE` doesn’t perform a merge for any reason, it doesn’t notify the client. To enable notifications, use the [optimize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. +- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter.md#alter-how-to-specify-part-expr). +- If you specify `FINAL`, optimization is performed even when all the data is already in one part. +- If you specify `DEDUPLICATE`, then completely identical rows will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. + +!!! warning "Warning" + `OPTIMIZE` can’t fix the “Too many parts” error. + +## RENAME {#misc_operations-rename} + +Renames one or more tables. + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +All tables are renamed under global locking. Renaming tables is a light operation. If you indicated another database after TO, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). + +## SET {#query-set} + +``` sql +SET param = value +``` + +Assigns `value` to the `param` [setting](../operations/settings/index.md) for the current session. You cannot change [server settings](../operations/server_settings/index.md) this way. + +You can also set all the values from the specified settings profile in a single query. + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +For more information, see [Settings](../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist. + +The `TRUNCATE` query is not supported for [View](../operations/table_engines/view.md), [File](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) and [Null](../operations/table_engines/null.md) table engines. + +## USE {#use} + +``` sql +USE db +``` + +Lets you set the current database for the session. +The current database is used for searching for tables if the database is not explicitly defined in the query with a dot before the table name. +This query can’t be made when using the HTTP protocol, since there is no concept of a session. + +[Original article](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/zh/query_language/show.md b/docs/zh/query_language/show.md deleted file mode 120000 index 4c2f4cf2c4f..00000000000 --- a/docs/zh/query_language/show.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/show.md \ No newline at end of file diff --git a/docs/zh/query_language/show.md b/docs/zh/query_language/show.md new file mode 100644 index 00000000000..840a2fc9766 --- /dev/null +++ b/docs/zh/query_language/show.md @@ -0,0 +1,102 @@ +--- +en_copy: true +--- + +# SHOW Queries {#show-queries} + +## SHOW CREATE TABLE {#show-create-table} + +``` sql +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object. + +## SHOW DATABASES {#show-databases} + +``` sql +SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] +``` + +Prints a list of all databases. +This query is identical to `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. + +## SHOW PROCESSLIST {#show-processlist} + +``` sql +SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] +``` + +Outputs the content of the [system.processes](../operations/system_tables.md#system_tables-processes) table, that contains a list of queries that is being processed at the moment, excepting `SHOW PROCESSLIST` queries. + +The `SELECT * FROM system.processes` query returns data about all the current queries. + +Tip (execute in the console): + +``` bash +$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" +``` + +## SHOW TABLES {#show-tables} + +Displays a list of tables. + +``` sql +SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +If the `FROM` clause is not specified, the query returns the list of tables from the current database. + +You can get the same results as the `SHOW TABLES` query in the following way: + +``` sql +SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**Example** + +The following query selects the first two rows from the list of tables in the `system` database, whose names contain `co`. + +``` sql +SHOW TABLES FROM system LIKE '%co%' LIMIT 2 +``` + +``` text +┌─name───────────────────────────┐ +│ aggregate_function_combinators │ +│ collations │ +└────────────────────────────────┘ +``` + +## SHOW DICTIONARIES {#show-dictionaries} + +Displays a list of [external dictionaries](dicts/external_dicts.md). + +``` sql +SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +If the `FROM` clause is not specified, the query returns the list of dictionaries from the current database. + +You can get the same results as the `SHOW DICTIONARIES` query in the following way: + +``` sql +SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**Example** + +The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`. + +``` sql +SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 +``` + +``` text +┌─name─────────┐ +│ regions │ +│ region_names │ +└──────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/zh/query_language/syntax.md b/docs/zh/query_language/syntax.md deleted file mode 120000 index 5307fd51ae8..00000000000 --- a/docs/zh/query_language/syntax.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/syntax.md \ No newline at end of file diff --git a/docs/zh/query_language/syntax.md b/docs/zh/query_language/syntax.md new file mode 100644 index 00000000000..fb86f56e7bd --- /dev/null +++ b/docs/zh/query_language/syntax.md @@ -0,0 +1,184 @@ +--- +en_copy: true +--- + +# Syntax {#syntax} + +There are two types of parsers in the system: the full SQL parser (a recursive descent parser), and the data format parser (a fast stream parser). +In all cases except the `INSERT` query, only the full SQL parser is used. +The `INSERT` query uses both parsers: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions). + +Data can have any format. When a query is received, the server calculates no more than [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. +This means the system doesn’t have problems with large `INSERT` queries, like MySQL does. + +When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited. + +Next we will cover the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. + +## Spaces {#spaces} + +There may be any number of space symbols between syntactical constructions (including the beginning and end of a query). Space symbols include the space, tab, line feed, CR, and form feed. + +## Comments {#comments} + +SQL-style and C-style comments are supported. +SQL-style comments: from `--` to the end of the line. The space after `--` can be omitted. +Comments in C-style: from `/*` to `*/`. These comments can be multiline. Spaces are not required here, either. + +## Keywords {#syntax-keywords} + +Keywords are case-insensitive when they correspond to: + +- SQL standard. For example, `SELECT`, `select` and `SeLeCt` are all valid. +- Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is same as `datetime`. + +Whether data type name is case-sensitive can be checked in the `system.data_type_families` table. + +In contrast to standard SQL all other keywords (including functions names) are **case-sensitive**. + +Keywords are not reserved (they are just parsed as keywords in the corresponding context). If you use [identifiers](#syntax-identifiers) the same as the keywords, enclose them into quotes. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. + +## Identifiers {#syntax-identifiers} + +Identifiers are: + +- Cluster, database, table, partition and column names. +- Functions. +- Data types. +- [Expression aliases](#syntax-expression_aliases). + +Identifiers can be quoted or non-quoted. It is recommended to use non-quoted identifiers. + +Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.` + +If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``. + +## Literals {#literals} + +There are: numeric, string, compound and `NULL` literals. + +### Numeric {#numeric} + +A numeric literal tries to be parsed: + +- First as a 64-bit signed number, using the [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) function. +- If unsuccessful, as a 64-bit unsigned number, using the [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) function. +- If unsuccessful, as a floating-point number using the [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) function. +- Otherwise, an error is returned. + +The corresponding value will have the smallest type that the value fits in. +For example, 1 is parsed as `UInt8`, but 256 is parsed as `UInt16`. For more information, see [Data types](../data_types/index.md). + +Examples: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### String {#syntax-string-literal} + +Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. This means that you can use the sequences `\'`and`\\`. The value will have the [String](../data_types/string.md) type. + +The minimum set of characters that you need to escape in string literals: `'` and `\`. Single quote can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. + +### Compound {#compound} + +Constructions are supported for arrays: `[1, 2, 3]` and tuples: `(1, 'Hello, world!', 2)`.. +Actually, these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. +An array must consist of at least one item, and a tuple must have at least two items. +Tuples have a special purpose for use in the `IN` clause of a `SELECT` query. Tuples can be obtained as the result of a query, but they can’t be saved to a database (with the exception of [Memory](../operations/table_engines/memory.md) tables). + +### NULL {#null-literal} + +Indicates that the value is missing. + +In order to store `NULL` in a table field, it must be of the [Nullable](../data_types/nullable.md) type. + +Depending on the data format (input or output), `NULL` may have a different representation. For more information, see the documentation for [data formats](../interfaces/formats.md#formats). + +There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation will also be `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation. + +In queries, you can check `NULL` using the [IS NULL](operators.md#operator-is-null) and [IS NOT NULL](operators.md) operators and the related functions `isNull` and `isNotNull`. + +## Functions {#functions} + +Functions are written like an identifier with a list of arguments (possibly empty) in brackets. In contrast to standard SQL, the brackets are required, even for an empty arguments list. Example: `now()`. +There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions. + +## Operators {#operators} + +Operators are converted to their corresponding functions during query parsing, taking their priority and associativity into account. +For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, multiply(2, 3)), 4)`. + +## Data Types and Database Table Engines {#data_types-and-database-table-engines} + +Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an arguments list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. + +## Expression Aliases {#syntax-expression_aliases} + +An alias is a user-defined name for an expression in a query. + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause without using the `AS` keyword. + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. Aliases should comply with the [identifiers](#syntax-identifiers) syntax. + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### Notes on Usage {#notes-on-usage} + +Aliases are global for a query or subquery and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. + +Aliases are not visible in subqueries and between subqueries. For example, while executing the query `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse generates the exception `Unknown identifier: num`. + +If an alias is defined for the result columns in the `SELECT` clause of a subquery, these columns are visible in the outer query. For example, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +Be careful with aliases that are the same as column or table names. Let’s consider the following example: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. + +## Asterisk {#asterisk} + +In a `SELECT` query, an asterisk can replace the expression. For more information, see the section “SELECT”. + +## Expressions {#syntax-expressions} + +An expression is a function, identifier, literal, application of an operator, expression in brackets, subquery, or asterisk. It can also contain an alias. +A list of expressions is one or more expressions separated by commas. +Functions and operators, in turn, can have expressions as arguments. + +[Original article](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/zh/query_language/system.md b/docs/zh/query_language/system.md deleted file mode 120000 index 6061858c3f2..00000000000 --- a/docs/zh/query_language/system.md +++ /dev/null @@ -1 +0,0 @@ -../../en/query_language/system.md \ No newline at end of file diff --git a/docs/zh/query_language/system.md b/docs/zh/query_language/system.md new file mode 100644 index 00000000000..a6b72d63ead --- /dev/null +++ b/docs/zh/query_language/system.md @@ -0,0 +1,110 @@ +--- +en_copy: true +--- + +# SYSTEM Queries {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +Reloads all dictionaries that have been successfully loaded before. +By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED). +Always returns `Ok.` regardless of the result of the dictionary update. + +## RELOAD DICTIONARY dictionary\_name {#query_language-system-reload-dictionary} + +Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED). +Always returns `Ok.` regardless of the result of updating the dictionary. +The status of the dictionary can be checked by querying the `system.dictionaries` table. + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +Resets ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries). + +For more convenient (automatic) cache management, see disable\_internal\_dns\_cache, dns\_cache\_update\_period parameters. + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +Resets the mark cache. Used in development of ClickHouse and performance tests. + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeeper. + +## SHUTDOWN {#query_language-system-shutdown} + +Normally shuts down ClickHouse (like `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`) + +## Managing Distributed Tables {#query-language-system-distributed} + +ClickHouse can manage [distributed](../operations/table_engines/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting. + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +Disables background data distribution when inserting data into distributed tables. + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +Forces ClickHouse to send data to cluster nodes synchronously. If any nodes are unavailable, ClickHouse throws an exception and stops query execution. You can retry the query until it succeeds, which will happen when all nodes are back online. + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +Enables background data distribution when inserting data into distributed tables. + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +Provides possibility to stop background merges for tables in the MergeTree family: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "Note" + `DETACH / ATTACH` table will start background merges for the table even in case when merges have been stopped for all MergeTree tables before. + +### START MERGES {#query_language-system-start-merges} + +Provides possibility to start background merges for tables in the MergeTree family: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/zh/query_language/table_functions/file.md b/docs/zh/query_language/table_functions/file.md deleted file mode 120000 index a514547109a..00000000000 --- a/docs/zh/query_language/table_functions/file.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/file.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/file.md b/docs/zh/query_language/table_functions/file.md new file mode 100644 index 00000000000..95c3a9378bc --- /dev/null +++ b/docs/zh/query_language/table_functions/file.md @@ -0,0 +1,118 @@ +--- +en_copy: true +--- + +# file {#file} + +Creates a table from a file. This table function is similar to [url](url.md) and [hdfs](hdfs.md) ones. + +``` sql +file(path, format, structure) +``` + +**Input parameters** + +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Returned value** + +A table with the specified structure for reading or writing data in the specified file. + +**Example** + +Setting `user_files_path` and the contents of the file `test.csv`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ + +$ cat /var/lib/clickhouse/user_files/test.csv + 1,2,3 + 3,2,1 + 78,43,45 +``` + +Table from`test.csv` and selection of the first two rows from it: + +``` sql +SELECT * +FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +``` sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` + +**Globs in path** + +Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). + +- `*` — Substitutes any number of any characters except `/` including empty string. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). + +**Example** + +1. Suppose we have several files with the following relative paths: + +- ‘some\_dir/some\_file\_1’ +- ‘some\_dir/some\_file\_2’ +- ‘some\_dir/some\_file\_3’ +- ‘another\_dir/some\_file\_1’ +- ‘another\_dir/some\_file\_2’ +- ‘another\_dir/some\_file\_3’ + +1. Query the amount of rows in these files: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. Query the amount of rows in all files of these two directories: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "Warning" + If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Query the data from files named `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**See Also** + +- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/zh/query_language/table_functions/generate.md b/docs/zh/query_language/table_functions/generate.md deleted file mode 120000 index 141c05da1e3..00000000000 --- a/docs/zh/query_language/table_functions/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/generate.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/generate.md b/docs/zh/query_language/table_functions/generate.md new file mode 100644 index 00000000000..273b5bd7e23 --- /dev/null +++ b/docs/zh/query_language/table_functions/generate.md @@ -0,0 +1,42 @@ +--- +en_copy: true +--- + +# generateRandom {#generaterandom} + +Generates random data with given schema. +Allows to populate test tables with data. +Supports all data types that can be stored in table except `LowCardinality` and `AggregateFunction`. + +``` sql +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +``` + +**Parameters** + +- `name` — Name of corresponding column. +- `TypeName` — Type of corresponding column. +- `limit` — Number of rows to generate. +- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. +- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. +- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. + +**Returned Value** + +A table object with requested schema. + +## Usage Example {#usage-example} + +``` sql +SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); +``` + +``` text +┌─a────────┬────────────d─┬─c──────────────────────────────────────────────────────────────────┐ +│ [77] │ -124167.6723 │ ('2061-04-17 21:59:44.573','3f72f405-ec3e-13c8-44ca-66ef335f7835') │ +│ [32,110] │ -141397.7312 │ ('1979-02-09 03:43:48.526','982486d1-5a5d-a308-e525-7bd8b80ffa73') │ +│ [68] │ -67417.0770 │ ('2080-03-12 14:17:31.269','110425e5-413f-10a6-05ba-fa6b3e929f15') │ +└──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/zh/query_language/table_functions/hdfs.md b/docs/zh/query_language/table_functions/hdfs.md deleted file mode 120000 index 2616e737eb6..00000000000 --- a/docs/zh/query_language/table_functions/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/hdfs.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/hdfs.md b/docs/zh/query_language/table_functions/hdfs.md new file mode 100644 index 00000000000..f636b7d19bb --- /dev/null +++ b/docs/zh/query_language/table_functions/hdfs.md @@ -0,0 +1,101 @@ +--- +en_copy: true +--- + +# hdfs {#hdfs} + +Creates a table from files in HDFS. This table function is similar to [url](url.md) and [file](file.md) ones. + +``` sql +hdfs(URI, format, structure) +``` + +**Input parameters** + +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**Returned value** + +A table with the specified structure for reading or writing data in the specified file. + +**Example** + +Table from `hdfs://hdfs1:9000/test` and selection of the first two rows from it: + +``` sql +SELECT * +FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**Globs in path** + +Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). + +- `*` — Substitutes any number of any characters except `/` including empty string. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). + +**Example** + +1. Suppose that we have several files with following URIs on HDFS: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. Query the amount of rows in these files: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. Query the amount of rows in all files of these two directories: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "Warning" + If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Query the data from files named `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**See Also** + +- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/zh/query_language/table_functions/index.md b/docs/zh/query_language/table_functions/index.md deleted file mode 120000 index 89b22522859..00000000000 --- a/docs/zh/query_language/table_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/index.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/index.md b/docs/zh/query_language/table_functions/index.md new file mode 100644 index 00000000000..ba231a6eeea --- /dev/null +++ b/docs/zh/query_language/table_functions/index.md @@ -0,0 +1,34 @@ +--- +en_copy: true +--- + +# Table Functions {#table-functions} + +Table functions are methods for constructing tables. + +You can use table functions in: + +- [FROM](../select.md#select-from) clause of the `SELECT` query. + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [CREATE TABLE AS \](../create.md#create-table-query) query. + + It's one of the methods of creating a table. + +!!! warning "Warning" + You can’t use table functions if the [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) setting is disabled. + +| Function | Description | +|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------| +| [file](file.md) | Creates a [File](../../operations/table_engines/file.md)-engine table. | +| [merge](merge.md) | Creates a [Merge](../../operations/table_engines/merge.md)-engine table. | +| [numbers](numbers.md) | Creates a table with a single column filled with integer numbers. | +| [remote](remote.md) | Allows you to access remote servers without creating a [Distributed](../../operations/table_engines/distributed.md)-engine table. | +| [url](url.md) | Creates a [Url](../../operations/table_engines/url.md)-engine table. | +| [mysql](mysql.md) | Creates a [MySQL](../../operations/table_engines/mysql.md)-engine table. | +| [jdbc](jdbc.md) | Creates a [JDBC](../../operations/table_engines/jdbc.md)-engine table. | +| [odbc](odbc.md) | Creates a [ODBC](../../operations/table_engines/odbc.md)-engine table. | +| [hdfs](hdfs.md) | Creates a [HDFS](../../operations/table_engines/hdfs.md)-engine table. | + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/zh/query_language/table_functions/input.md b/docs/zh/query_language/table_functions/input.md deleted file mode 120000 index f23cc8ee673..00000000000 --- a/docs/zh/query_language/table_functions/input.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/input.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/input.md b/docs/zh/query_language/table_functions/input.md new file mode 100644 index 00000000000..7536a9bffc2 --- /dev/null +++ b/docs/zh/query_language/table_functions/input.md @@ -0,0 +1,44 @@ +--- +en_copy: true +--- + +# input {#input} + +`input(structure)` - table function that allows effectively convert and insert data sent to the +server with given structure to the table with another structure. + +`structure` - structure of data sent to the server in following format `'column1_name column1_type, column2_name column2_type, ...'`. +For example, `'id UInt32, name String'`. + +This function can be used only in `INSERT SELECT` query and only once but otherwise behaves like ordinary table function +(for example, it can be used in subquery, etc.). + +Data can be sent in any way like for ordinary `INSERT` query and passed in any available [format](../../interfaces/formats.md#formats) +that must be specified in the end of query (unlike ordinary `INSERT SELECT`). + +The main feature of this function is that when server receives data from client it simultaneously converts it +according to the list of expressions in the `SELECT` clause and inserts into the target table. Temporary table +with all transferred data is not created. + +**Examples** + +- Let the `test` table has the following structure `(a String, b String)` + and data in `data.csv` has a different structure `(col1 String, col2 Date, col3 Int32)`. Query for insert + data from the `data.csv` into the `test` table with simultaneous conversion looks like this: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- If `data.csv` contains data of the same structure `test_structure` as the table `test` then these two queries are equal: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/zh/query_language/table_functions/jdbc.md b/docs/zh/query_language/table_functions/jdbc.md deleted file mode 120000 index 73bec80ca58..00000000000 --- a/docs/zh/query_language/table_functions/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/jdbc.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/jdbc.md b/docs/zh/query_language/table_functions/jdbc.md new file mode 100644 index 00000000000..e1ba7b362bd --- /dev/null +++ b/docs/zh/query_language/table_functions/jdbc.md @@ -0,0 +1,26 @@ +--- +en_copy: true +--- + +# jdbc {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` - returns table that is connected via JDBC driver. + +This table function requires separate `clickhouse-jdbc-bridge` program to be running. +It supports Nullable types (based on DDL of remote table that is queried). + +**Examples** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/query_language/table_functions/merge.md b/docs/zh/query_language/table_functions/merge.md deleted file mode 120000 index 383f6c88331..00000000000 --- a/docs/zh/query_language/table_functions/merge.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/merge.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/merge.md b/docs/zh/query_language/table_functions/merge.md new file mode 100644 index 00000000000..3638fad418d --- /dev/null +++ b/docs/zh/query_language/table_functions/merge.md @@ -0,0 +1,11 @@ +--- +en_copy: true +--- + +# merge {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +The table structure is taken from the first table encountered that matches the regular expression. + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/zh/query_language/table_functions/mysql.md b/docs/zh/query_language/table_functions/mysql.md deleted file mode 120000 index 75c032cc63f..00000000000 --- a/docs/zh/query_language/table_functions/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/mysql.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/mysql.md b/docs/zh/query_language/table_functions/mysql.md new file mode 100644 index 00000000000..5a8e8d4fd96 --- /dev/null +++ b/docs/zh/query_language/table_functions/mysql.md @@ -0,0 +1,83 @@ +--- +en_copy: true +--- + +# mysql {#mysql} + +Allows `SELECT` queries to be performed on data that is stored on a remote MySQL server. + +``` sql +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +**Parameters** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. If `replace_query=1`, the query is replaced. + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query. + + Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. + + To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. + +Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on the MySQL server. + +The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. + +**Returned Value** + +A table object with the same columns as the original MySQL table. + +## Usage Example {#usage-example} + +Table in MySQL: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++--------+--------------+-------+----------------+ +| int_id | int_nullable | float | float_nullable | ++--------+--------------+-------+----------------+ +| 1 | NULL | 2 | NULL | ++--------+--------------+-------+----------------+ +1 row in set (0,00 sec) +``` + +Selecting data from ClickHouse: + +``` sql +SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## See Also {#see-also} + +- [The ‘MySQL’ table engine](../../operations/table_engines/mysql.md) +- [Using MySQL as a source of external dictionary](../dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/zh/query_language/table_functions/numbers.md b/docs/zh/query_language/table_functions/numbers.md deleted file mode 120000 index a679b915669..00000000000 --- a/docs/zh/query_language/table_functions/numbers.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/numbers.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/numbers.md b/docs/zh/query_language/table_functions/numbers.md new file mode 100644 index 00000000000..5aec0b3c96b --- /dev/null +++ b/docs/zh/query_language/table_functions/numbers.md @@ -0,0 +1,27 @@ +--- +en_copy: true +--- + +# numbers {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ column (UInt64) that contains integers from 0 to N-1. +`numbers(N, M)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1). + +Similar to the `system.numbers` table, it can be used for testing and generating successive values, `numbers(N, M)` more efficient than `system.numbers`. + +The following queries are equivalent: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +Examples: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/zh/query_language/table_functions/odbc.md b/docs/zh/query_language/table_functions/odbc.md deleted file mode 120000 index 7620f920494..00000000000 --- a/docs/zh/query_language/table_functions/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/odbc.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/odbc.md b/docs/zh/query_language/table_functions/odbc.md new file mode 100644 index 00000000000..8c972b1f93a --- /dev/null +++ b/docs/zh/query_language/table_functions/odbc.md @@ -0,0 +1,105 @@ +--- +en_copy: true +--- + +# odbc {#table-functions-odbc} + +Returns table that is connected via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +Parameters: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. + +The fields with the `NULL` values from the external table are converted into the default values for the base data type. For example, if a remote MySQL table field has the `INT NULL` type it is converted to 0 (the default value for ClickHouse `Int32` data type). + +## Usage example {#usage-example} + +**Getting data from the local MySQL installation via ODBC** + +This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. + +Ensure that unixODBC and MySQL Connector are installed. + +By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus you need to create and configure this user in the MySQL server. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +Then configure the connection in `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +You can check the connection using the `isql` utility from the unixODBC installation. + +``` bash +$ isql -v mysqlconn ++---------------------------------------+ +| Connected! | +| | +... +``` + +Table in MySQL: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++--------+--------------+-------+----------------+ +| int_id | int_nullable | float | float_nullable | ++--------+--------------+-------+----------------+ +| 1 | NULL | 2 | NULL | ++--------+--------------+-------+----------------+ +1 row in set (0,00 sec) +``` + +Retrieving data from the MySQL table in ClickHouse: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## See Also {#see-also} + +- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC table engine](../../operations/table_engines/odbc.md). + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/query_language/table_functions/remote.md b/docs/zh/query_language/table_functions/remote.md deleted file mode 120000 index b157c4076d3..00000000000 --- a/docs/zh/query_language/table_functions/remote.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/remote.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/remote.md b/docs/zh/query_language/table_functions/remote.md new file mode 100644 index 00000000000..e8c751af7e2 --- /dev/null +++ b/docs/zh/query_language/table_functions/remote.md @@ -0,0 +1,80 @@ +--- +en_copy: true +--- + +# remote, remoteSecure {#remote-remotesecure} + +Allows you to access remote servers without creating a `Distributed` table. + +Signatures: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. The port is the TCP port on the remote server. If the port is omitted, it uses `tcp_port` from the server’s config file (by default, 9000). + +!!! important "Important" + The port is required for an IPv6 address. + +Examples: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like to shards with different data). + +Example: + +``` text +example01-01-1,example01-02-1 +``` + +Part of the expression can be specified in curly brackets. The previous example can be written as follows: + +``` text +example01-0{1,2}-1 +``` + +Curly brackets can contain a range of numbers separated by two dots (non-negative integers). In this case, the range is expanded to a set of values that generate shard addresses. If the first number starts with zero, the values are formed with the same zero alignment. The previous example can be written as follows: + +``` text +example01-{01..02}-1 +``` + +If you have multiple pairs of curly brackets, it generates the direct product of the corresponding sets. + +Addresses and parts of addresses in curly brackets can be separated by the pipe symbol (\|). In this case, the corresponding sets of addresses are interpreted as replicas, and the query will be sent to the first healthy replica. However, the replicas are iterated in the order currently set in the [load\_balancing](../../operations/settings/settings.md) setting. + +Example: + +``` text +example01-{01..02}-{1|2} +``` + +This example specifies two shards that each have two replicas. + +The number of addresses generated is limited by a constant. Right now this is 1000 addresses. + +Using the `remote` table function is less optimal than creating a `Distributed` table, because in this case, the server connection is re-established for every request. In addition, if host names are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and don’t use the `remote` table function. + +The `remote` table function can be useful in the following cases: + +- Accessing a specific server for data comparison, debugging, and testing. +- Queries between various ClickHouse clusters for research purposes. +- Infrequent distributed requests that are made manually. +- Distributed requests where the set of servers is re-defined each time. + +If the user is not specified, `default` is used. +If the password is not specified, an empty password is used. + +`remoteSecure` - same as `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_settings/settings.md#server_settings-tcp_port_secure) from config or 9440. + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/zh/query_language/table_functions/url.md b/docs/zh/query_language/table_functions/url.md deleted file mode 120000 index 038e08f7ba9..00000000000 --- a/docs/zh/query_language/table_functions/url.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/query_language/table_functions/url.md \ No newline at end of file diff --git a/docs/zh/query_language/table_functions/url.md b/docs/zh/query_language/table_functions/url.md new file mode 100644 index 00000000000..e1250b438ab --- /dev/null +++ b/docs/zh/query_language/table_functions/url.md @@ -0,0 +1,23 @@ +--- +en_copy: true +--- + +# url {#url} + +`url(URL, format, structure)` - returns a table created from the `URL` with given +`format` and `structure`. + +URL - HTTP or HTTPS server address, which can accept `GET` and/or `POST` requests. + +format - [format](../../interfaces/formats.md#formats) of the data. + +structure - table structure in `'UserID UInt64, Name String'` format. Determines column names and types. + +**Example** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/website/Dockerfile b/website/Dockerfile deleted file mode 100644 index 64eb0ce5e33..00000000000 --- a/website/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM nginx:mainline -COPY . /usr/share/nginx/html -COPY nginx/nginx.conf /etc/nginx/nginx.conf -COPY nginx/default.conf /etc/nginx/conf.d/default.conf diff --git a/website/js/base.js b/website/js/base.js index 8dfd426d6ed..d8fc8eaf639 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -62,12 +62,14 @@ d.addEventListener("DOMContentLoaded", f, false); } else { f(); } })(document, window, "yandex_metrika_callbacks2"); + var beforePrint = function() { var details = document.getElementsByTagName("details"); for (var i = 0; i < details.length; ++i) { details[i].open = 1; } }; + if (window.matchMedia) { window.matchMedia('print').addListener(function(q) { if (q.matches) { diff --git a/website/js/docs.js b/website/js/docs.js index d54644cd040..3d0ff4364a3 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -85,7 +85,11 @@ $(document).ready(function () { advancedSyntax: true, clickAnalytics: true, hitsPerPage: 25, - 'facetFilters': ["lang:" + $('html').attr('lang')] + 'facetFilters': [ + 'lang:' + $('html').attr('lang'), + 'version': $('html').attr('data-version'), + 'single-page': $('html').attr('single-page'), + ] }, debug: true }); diff --git a/website/templates/docs/sidebar.html b/website/templates/docs/sidebar.html index 96715fbb6c4..f3ce0b5be7e 100644 --- a/website/templates/docs/sidebar.html +++ b/website/templates/docs/sidebar.html @@ -32,7 +32,7 @@ {% set level = 1 %} {% include "templates/docs/sidebar-item.html" %} {% endfor %} - + {{ _('PDF version') }}
    {{ _('PDF version') }}
    From 36f950c13e3f5493b8327b739ffa8949fcbe92aa Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 3 Apr 2020 17:14:33 +0300 Subject: [PATCH 081/484] fix mistype in docs.js --- website/js/docs.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/js/docs.js b/website/js/docs.js index 3d0ff4364a3..7438f5a5872 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -87,8 +87,8 @@ $(document).ready(function () { hitsPerPage: 25, 'facetFilters': [ 'lang:' + $('html').attr('lang'), - 'version': $('html').attr('data-version'), - 'single-page': $('html').attr('single-page'), + 'version' + $('html').attr('data-version'), + 'single-page' + $('html').attr('single-page'), ] }, debug: true From 3e185d24c9fe772c7cf03d5475247fb829a21dfa Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 3 Apr 2020 17:15:22 +0300 Subject: [PATCH 082/484] Update docs.js --- website/js/docs.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/js/docs.js b/website/js/docs.js index 7438f5a5872..879fbdbca3c 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -87,8 +87,8 @@ $(document).ready(function () { hitsPerPage: 25, 'facetFilters': [ 'lang:' + $('html').attr('lang'), - 'version' + $('html').attr('data-version'), - 'single-page' + $('html').attr('single-page'), + 'version:' + $('html').attr('data-version'), + 'single-page:' + $('html').attr('single-page'), ] }, debug: true From 38a50f44d34d65580f47d4bf678a7fc9ab163ac6 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 3 Apr 2020 17:21:24 +0300 Subject: [PATCH 083/484] Remove empty line --- dbms/Storages/StorageReplicatedMergeTree.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/dbms/Storages/StorageReplicatedMergeTree.cpp b/dbms/Storages/StorageReplicatedMergeTree.cpp index 0d8755ed99a..7107328e4ff 100644 --- a/dbms/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/Storages/StorageReplicatedMergeTree.cpp @@ -5292,7 +5292,6 @@ bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const } - MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const { return queue.getFirstAlterMutationCommandsForPart(part); From 06446b4f08a142d6f1bc30664c47ded88ab51782 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Fri, 3 Apr 2020 18:14:31 +0300 Subject: [PATCH 084/484] =?UTF-8?q?dbms/=20=E2=86=92=20src/?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 200 ++++++++--------- .gitlab-ci.yml | 4 +- CMakeLists.txt | 2 +- cmake/analysis.cmake | 2 +- cmake/lib_name.cmake | 2 +- dbms/Client/CMakeLists.txt | 8 - docker/builder/README.md | 2 +- docker/packager/binary/build.sh | 2 +- docker/test/performance-comparison/compare.sh | 2 +- docs/en/development/browse_code.md | 2 +- docs/en/development/tests.md | 2 +- docs/en/interfaces/tcp.md | 2 +- ...sampling_query_profiler_example_result.txt | 212 +++++++++--------- .../operations/settings/query_complexity.md | 4 +- docs/en/operations/system_tables.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/es/changelog/index.md | 2 +- docs/es/development/browse_code.md | 2 +- docs/es/development/tests.md | 2 +- docs/es/interfaces/tcp.md | 2 +- ...sampling_query_profiler_example_result.txt | 212 +++++++++--------- .../operations/settings/query_complexity.md | 4 +- docs/es/operations/system_tables.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/fa/getting_started/install.md | 2 +- docs/fa/interfaces/tcp.md | 2 +- docs/fr/changelog/index.md | 2 +- docs/fr/development/browse_code.md | 2 +- docs/fr/development/developer_instruction.md | 2 +- docs/fr/development/tests.md | 22 +- .../example_datasets/metrica.md | 2 +- docs/fr/interfaces/tcp.md | 2 +- docs/fr/operations/performance_test.md | 4 +- .../operations/settings/query_complexity.md | 4 +- docs/fr/operations/system_tables.md | 2 +- docs/fr/query_language/alter.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/fr/query_language/operators.md | 2 +- docs/ru/development/browse_code.md | 2 +- docs/ru/interfaces/tcp.md | 2 +- .../operations/settings/query_complexity.md | 4 +- docs/ru/operations/system_tables.md | 2 +- .../query_language/functions/introspection.md | 8 +- docs/zh/development/tests.md | 2 +- docs/zh/getting_started/install.md | 2 +- docs/zh/interfaces/tcp.md | 2 +- .../operations/settings/query_complexity.md | 4 +- docs/zh/operations/table_engines/mergetree.md | 2 +- {dbms => src}/Access/AccessControlManager.cpp | 0 {dbms => src}/Access/AccessControlManager.h | 0 {dbms => src}/Access/AccessFlags.h | 0 {dbms => src}/Access/AccessRights.cpp | 0 {dbms => src}/Access/AccessRights.h | 0 {dbms => src}/Access/AccessRightsElement.cpp | 0 {dbms => src}/Access/AccessRightsElement.h | 0 {dbms => src}/Access/AccessType.h | 0 {dbms => src}/Access/AllowedClientHosts.cpp | 0 {dbms => src}/Access/AllowedClientHosts.h | 0 {dbms => src}/Access/Authentication.cpp | 0 {dbms => src}/Access/Authentication.h | 0 {dbms => src}/Access/CMakeLists.txt | 0 {dbms => src}/Access/ContextAccess.cpp | 0 {dbms => src}/Access/ContextAccess.h | 0 {dbms => src}/Access/DiskAccessStorage.cpp | 0 {dbms => src}/Access/DiskAccessStorage.h | 0 {dbms => src}/Access/EnabledQuota.cpp | 0 {dbms => src}/Access/EnabledQuota.h | 0 {dbms => src}/Access/EnabledRoles.cpp | 0 {dbms => src}/Access/EnabledRoles.h | 0 {dbms => src}/Access/EnabledRolesInfo.cpp | 0 {dbms => src}/Access/EnabledRolesInfo.h | 0 {dbms => src}/Access/EnabledRowPolicies.cpp | 0 {dbms => src}/Access/EnabledRowPolicies.h | 0 {dbms => src}/Access/EnabledSettings.cpp | 0 {dbms => src}/Access/EnabledSettings.h | 0 {dbms => src}/Access/ExtendedRoleSet.cpp | 0 {dbms => src}/Access/ExtendedRoleSet.h | 0 {dbms => src}/Access/IAccessEntity.cpp | 0 {dbms => src}/Access/IAccessEntity.h | 0 {dbms => src}/Access/IAccessStorage.cpp | 0 {dbms => src}/Access/IAccessStorage.h | 0 {dbms => src}/Access/MemoryAccessStorage.cpp | 0 {dbms => src}/Access/MemoryAccessStorage.h | 0 .../Access/MultipleAccessStorage.cpp | 0 {dbms => src}/Access/MultipleAccessStorage.h | 0 {dbms => src}/Access/Quota.cpp | 0 {dbms => src}/Access/Quota.h | 0 {dbms => src}/Access/QuotaCache.cpp | 0 {dbms => src}/Access/QuotaCache.h | 0 {dbms => src}/Access/QuotaUsageInfo.cpp | 0 {dbms => src}/Access/QuotaUsageInfo.h | 0 {dbms => src}/Access/Role.cpp | 0 {dbms => src}/Access/Role.h | 0 {dbms => src}/Access/RoleCache.cpp | 0 {dbms => src}/Access/RoleCache.h | 0 {dbms => src}/Access/RowPolicy.cpp | 0 {dbms => src}/Access/RowPolicy.h | 0 {dbms => src}/Access/RowPolicyCache.cpp | 0 {dbms => src}/Access/RowPolicyCache.h | 0 {dbms => src}/Access/SettingsConstraints.cpp | 0 {dbms => src}/Access/SettingsConstraints.h | 0 {dbms => src}/Access/SettingsProfile.cpp | 0 {dbms => src}/Access/SettingsProfile.h | 0 .../Access/SettingsProfileElement.cpp | 0 {dbms => src}/Access/SettingsProfileElement.h | 0 .../Access/SettingsProfilesCache.cpp | 0 {dbms => src}/Access/SettingsProfilesCache.h | 0 {dbms => src}/Access/User.cpp | 0 {dbms => src}/Access/User.h | 0 .../Access/UsersConfigAccessStorage.cpp | 0 .../Access/UsersConfigAccessStorage.h | 0 .../AggregateFunctionAggThrow.cpp | 0 .../AggregateFunctionArgMinMax.h | 0 .../AggregateFunctionArray.cpp | 0 .../AggregateFunctionArray.h | 0 .../AggregateFunctionAvg.cpp | 0 .../AggregateFunctions/AggregateFunctionAvg.h | 0 .../AggregateFunctionAvgWeighted.cpp | 0 .../AggregateFunctionAvgWeighted.h | 0 .../AggregateFunctionBitwise.cpp | 0 .../AggregateFunctionBitwise.h | 0 .../AggregateFunctionBoundingRatio.cpp | 0 .../AggregateFunctionBoundingRatio.h | 0 ...ateFunctionCategoricalInformationValue.cpp | 0 ...egateFunctionCategoricalInformationValue.h | 0 .../AggregateFunctionCombinatorFactory.cpp | 0 .../AggregateFunctionCombinatorFactory.h | 0 .../AggregateFunctionCount.cpp | 0 .../AggregateFunctionCount.h | 0 .../AggregateFunctionEntropy.cpp | 0 .../AggregateFunctionEntropy.h | 0 .../AggregateFunctionFactory.cpp | 0 .../AggregateFunctionFactory.h | 0 .../AggregateFunctionForEach.cpp | 0 .../AggregateFunctionForEach.h | 0 .../AggregateFunctionGroupArray.cpp | 0 .../AggregateFunctionGroupArray.h | 0 .../AggregateFunctionGroupArrayInsertAt.cpp | 0 .../AggregateFunctionGroupArrayInsertAt.h | 0 .../AggregateFunctionGroupArrayMoving.cpp | 0 .../AggregateFunctionGroupArrayMoving.h | 0 .../AggregateFunctionGroupBitmap.cpp | 0 .../AggregateFunctionGroupBitmap.h | 0 .../AggregateFunctionGroupBitmapData.h | 0 .../AggregateFunctionGroupUniqArray.cpp | 0 .../AggregateFunctionGroupUniqArray.h | 0 .../AggregateFunctionHistogram.cpp | 0 .../AggregateFunctionHistogram.h | 0 .../AggregateFunctionIf.cpp | 0 .../AggregateFunctions/AggregateFunctionIf.h | 0 .../AggregateFunctionMLMethod.cpp | 0 .../AggregateFunctionMLMethod.h | 0 .../AggregateFunctionMaxIntersections.cpp | 0 .../AggregateFunctionMaxIntersections.h | 0 .../AggregateFunctionMerge.cpp | 0 .../AggregateFunctionMerge.h | 0 .../AggregateFunctionMinMaxAny.cpp | 0 .../AggregateFunctionMinMaxAny.h | 0 .../AggregateFunctionNothing.h | 0 .../AggregateFunctionNull.cpp | 0 .../AggregateFunctionNull.h | 0 .../AggregateFunctionOrFill.cpp | 0 .../AggregateFunctionOrFill.h | 0 .../AggregateFunctionQuantile.cpp | 0 .../AggregateFunctionQuantile.h | 0 .../AggregateFunctionResample.cpp | 0 .../AggregateFunctionResample.h | 0 .../AggregateFunctionRetention.cpp | 0 .../AggregateFunctionRetention.h | 0 .../AggregateFunctionSequenceMatch.cpp | 0 .../AggregateFunctionSequenceMatch.h | 0 ...ggregateFunctionSimpleLinearRegression.cpp | 0 .../AggregateFunctionSimpleLinearRegression.h | 0 .../AggregateFunctionState.cpp | 0 .../AggregateFunctionState.h | 0 .../AggregateFunctionStatistics.cpp | 0 .../AggregateFunctionStatistics.h | 0 .../AggregateFunctionStatisticsSimple.cpp | 0 .../AggregateFunctionStatisticsSimple.h | 0 .../AggregateFunctionSum.cpp | 0 .../AggregateFunctions/AggregateFunctionSum.h | 0 .../AggregateFunctionSumMap.cpp | 0 .../AggregateFunctionSumMap.h | 0 .../AggregateFunctionTimeSeriesGroupSum.cpp | 0 .../AggregateFunctionTimeSeriesGroupSum.h | 0 .../AggregateFunctionTopK.cpp | 0 .../AggregateFunctionTopK.h | 0 .../AggregateFunctionUniq.cpp | 0 .../AggregateFunctionUniq.h | 0 .../AggregateFunctionUniqCombined.cpp | 0 .../AggregateFunctionUniqCombined.h | 0 .../AggregateFunctionUniqUpTo.cpp | 0 .../AggregateFunctionUniqUpTo.h | 0 .../AggregateFunctionWindowFunnel.cpp | 0 .../AggregateFunctionWindowFunnel.h | 0 .../AggregateFunctions/CMakeLists.txt | 0 .../AggregateFunctions/FactoryHelpers.h | 0 {dbms => src}/AggregateFunctions/Helpers.h | 0 .../AggregateFunctions/HelpersMinMaxAny.h | 0 .../AggregateFunctions/IAggregateFunction.h | 0 .../IAggregateFunctionCombinator.h | 0 .../AggregateFunctions/QuantileExact.h | 0 .../QuantileExactWeighted.h | 0 .../QuantileReservoirSampler.h | 0 .../QuantileReservoirSamplerDeterministic.h | 0 .../AggregateFunctions/QuantileTDigest.h | 0 .../AggregateFunctions/QuantileTiming.h | 0 .../AggregateFunctions/QuantilesCommon.h | 0 .../AggregateFunctions/ReservoirSampler.h | 0 .../ReservoirSamplerDeterministic.h | 0 .../UniqCombinedBiasData.cpp | 0 .../AggregateFunctions/UniqCombinedBiasData.h | 4 +- .../AggregateFunctions/UniqVariadicHash.cpp | 0 .../AggregateFunctions/UniqVariadicHash.h | 0 .../AggregateFunctions/UniquesHashSet.h | 0 .../parseAggregateFunctionParameters.cpp | 0 .../parseAggregateFunctionParameters.h | 0 .../registerAggregateFunctions.cpp | 0 .../registerAggregateFunctions.h | 0 .../AggregateFunctions/tests/CMakeLists.txt | 0 .../tests/quantile-t-digest.cpp | 0 {dbms => src}/CMakeLists.txt | 4 +- src/Client/CMakeLists.txt | 1 + {dbms => src}/Client/Connection.cpp | 0 {dbms => src}/Client/Connection.h | 0 {dbms => src}/Client/ConnectionPool.h | 0 .../Client/ConnectionPoolWithFailover.cpp | 0 .../Client/ConnectionPoolWithFailover.h | 0 .../Client/MultiplexedConnections.cpp | 0 {dbms => src}/Client/MultiplexedConnections.h | 0 {dbms => src}/Client/TimeoutSetter.cpp | 0 {dbms => src}/Client/TimeoutSetter.h | 0 {dbms => src}/Client/tests/CMakeLists.txt | 0 {dbms => src}/Client/tests/test_connect.cpp | 0 {dbms => src}/Columns/CMakeLists.txt | 0 {dbms => src}/Columns/Collator.cpp | 0 {dbms => src}/Columns/Collator.h | 0 .../Columns/ColumnAggregateFunction.cpp | 0 .../Columns/ColumnAggregateFunction.h | 0 {dbms => src}/Columns/ColumnArray.cpp | 0 {dbms => src}/Columns/ColumnArray.h | 0 {dbms => src}/Columns/ColumnConst.cpp | 0 {dbms => src}/Columns/ColumnConst.h | 0 {dbms => src}/Columns/ColumnDecimal.cpp | 0 {dbms => src}/Columns/ColumnDecimal.h | 0 {dbms => src}/Columns/ColumnFixedString.cpp | 0 {dbms => src}/Columns/ColumnFixedString.h | 0 {dbms => src}/Columns/ColumnFunction.cpp | 0 {dbms => src}/Columns/ColumnFunction.h | 0 .../Columns/ColumnLowCardinality.cpp | 0 {dbms => src}/Columns/ColumnLowCardinality.h | 0 {dbms => src}/Columns/ColumnNothing.h | 0 {dbms => src}/Columns/ColumnNullable.cpp | 0 {dbms => src}/Columns/ColumnNullable.h | 0 {dbms => src}/Columns/ColumnSet.h | 0 {dbms => src}/Columns/ColumnString.cpp | 0 {dbms => src}/Columns/ColumnString.h | 0 {dbms => src}/Columns/ColumnTuple.cpp | 0 {dbms => src}/Columns/ColumnTuple.h | 0 {dbms => src}/Columns/ColumnUnique.h | 0 {dbms => src}/Columns/ColumnVector.cpp | 0 {dbms => src}/Columns/ColumnVector.h | 0 {dbms => src}/Columns/ColumnVectorHelper.h | 0 {dbms => src}/Columns/ColumnsCommon.cpp | 0 {dbms => src}/Columns/ColumnsCommon.h | 0 {dbms => src}/Columns/ColumnsNumber.h | 0 {dbms => src}/Columns/FilterDescription.cpp | 0 {dbms => src}/Columns/FilterDescription.h | 0 {dbms => src}/Columns/IColumn.cpp | 0 {dbms => src}/Columns/IColumn.h | 0 {dbms => src}/Columns/IColumnDummy.h | 0 {dbms => src}/Columns/IColumnImpl.h | 0 {dbms => src}/Columns/IColumnUnique.h | 0 {dbms => src}/Columns/ReverseIndex.h | 0 {dbms => src}/Columns/getLeastSuperColumn.cpp | 0 {dbms => src}/Columns/getLeastSuperColumn.h | 0 {dbms => src}/Columns/tests/CMakeLists.txt | 0 .../Columns/tests/gtest_column_unique.cpp | 0 .../Columns/tests/gtest_weak_hash_32.cpp | 0 {dbms => src}/Common/ActionBlocker.h | 0 {dbms => src}/Common/ActionLock.cpp | 0 {dbms => src}/Common/ActionLock.h | 0 {dbms => src}/Common/AlignedBuffer.cpp | 0 {dbms => src}/Common/AlignedBuffer.h | 0 {dbms => src}/Common/Allocator.h | 0 {dbms => src}/Common/Allocator_fwd.h | 0 {dbms => src}/Common/Arena.h | 0 {dbms => src}/Common/ArenaAllocator.h | 0 {dbms => src}/Common/ArenaWithFreeLists.h | 0 {dbms => src}/Common/ArrayCache.h | 0 {dbms => src}/Common/AutoArray.h | 0 {dbms => src}/Common/BitHelpers.h | 0 {dbms => src}/Common/CMakeLists.txt | 0 {dbms => src}/Common/COW.h | 0 {dbms => src}/Common/ClickHouseRevision.cpp | 0 {dbms => src}/Common/ClickHouseRevision.h | 0 {dbms => src}/Common/ColumnsHashing.h | 0 {dbms => src}/Common/ColumnsHashingImpl.h | 0 .../Common/CombinedCardinalityEstimator.h | 0 {dbms => src}/Common/CompactArray.h | 0 {dbms => src}/Common/ConcurrentBoundedQueue.h | 0 .../AbstractConfigurationComparison.cpp | 0 .../Config/AbstractConfigurationComparison.h | 0 {dbms => src}/Common/Config/CMakeLists.txt | 0 .../Common/Config/ConfigProcessor.cpp | 0 {dbms => src}/Common/Config/ConfigProcessor.h | 0 .../Common/Config/ConfigReloader.cpp | 0 {dbms => src}/Common/Config/ConfigReloader.h | 0 .../Common/Config/configReadClient.cpp | 0 .../Common/Config/configReadClient.h | 0 {dbms => src}/Common/CounterInFile.h | 0 {dbms => src}/Common/CpuId.h | 0 {dbms => src}/Common/CurrentMetrics.cpp | 0 {dbms => src}/Common/CurrentMetrics.h | 0 {dbms => src}/Common/CurrentThread.cpp | 0 {dbms => src}/Common/CurrentThread.h | 0 {dbms => src}/Common/DNSResolver.cpp | 0 {dbms => src}/Common/DNSResolver.h | 0 {dbms => src}/Common/Dwarf.cpp | 0 {dbms => src}/Common/Dwarf.h | 0 {dbms => src}/Common/Elf.cpp | 0 {dbms => src}/Common/Elf.h | 0 {dbms => src}/Common/ErrorCodes.cpp | 0 {dbms => src}/Common/EventCounter.h | 0 {dbms => src}/Common/Exception.cpp | 0 {dbms => src}/Common/Exception.h | 0 {dbms => src}/Common/ExternalLoaderStatus.cpp | 0 {dbms => src}/Common/ExternalLoaderStatus.h | 0 {dbms => src}/Common/FieldVisitors.cpp | 0 {dbms => src}/Common/FieldVisitors.h | 0 {dbms => src}/Common/FileChecker.cpp | 0 {dbms => src}/Common/FileChecker.h | 0 {dbms => src}/Common/FileUpdatesTracker.h | 0 {dbms => src}/Common/HTMLForm.h | 0 .../Common/HashTable/ClearableHashMap.h | 0 .../Common/HashTable/ClearableHashSet.h | 0 .../Common/HashTable/FixedClearableHashMap.h | 0 .../Common/HashTable/FixedClearableHashSet.h | 0 {dbms => src}/Common/HashTable/FixedHashMap.h | 0 {dbms => src}/Common/HashTable/FixedHashSet.h | 0 .../Common/HashTable/FixedHashTable.h | 0 {dbms => src}/Common/HashTable/Hash.h | 0 {dbms => src}/Common/HashTable/HashMap.h | 0 {dbms => src}/Common/HashTable/HashSet.h | 0 {dbms => src}/Common/HashTable/HashTable.h | 0 .../Common/HashTable/HashTableAllocator.h | 0 .../Common/HashTable/HashTableKeyHolder.h | 0 {dbms => src}/Common/HashTable/SmallTable.h | 0 .../Common/HashTable/StringHashMap.h | 0 .../Common/HashTable/StringHashTable.h | 0 .../Common/HashTable/TwoLevelHashMap.h | 0 .../Common/HashTable/TwoLevelHashTable.h | 0 .../Common/HashTable/TwoLevelStringHashMap.h | 0 .../HashTable/TwoLevelStringHashTable.h | 0 .../Common/HyperLogLogBiasEstimator.h | 0 {dbms => src}/Common/HyperLogLogCounter.h | 0 .../HyperLogLogWithSmallSetOptimization.h | 0 {dbms => src}/Common/IFactoryWithAliases.h | 0 {dbms => src}/Common/IPv6ToBinary.cpp | 0 {dbms => src}/Common/IPv6ToBinary.h | 0 {dbms => src}/Common/Increment.h | 0 {dbms => src}/Common/InterruptListener.h | 0 {dbms => src}/Common/IntervalKind.cpp | 0 {dbms => src}/Common/IntervalKind.h | 0 {dbms => src}/Common/LRUCache.h | 0 {dbms => src}/Common/Macros.cpp | 0 {dbms => src}/Common/Macros.h | 0 {dbms => src}/Common/MemorySanitizer.h | 0 {dbms => src}/Common/MemoryTracker.cpp | 0 {dbms => src}/Common/MemoryTracker.h | 0 {dbms => src}/Common/MultiVersion.h | 0 {dbms => src}/Common/NaNUtils.h | 0 {dbms => src}/Common/NamePrompter.h | 0 {dbms => src}/Common/NetException.h | 0 {dbms => src}/Common/ObjectPool.h | 0 {dbms => src}/Common/OpenSSLHelpers.cpp | 0 {dbms => src}/Common/OpenSSLHelpers.h | 0 .../Common/OptimizedRegularExpression.cpp | 0 .../Common/OptimizedRegularExpression.h | 0 {dbms => src}/Common/PODArray.cpp | 0 {dbms => src}/Common/PODArray.h | 0 {dbms => src}/Common/PODArray_fwd.h | 0 {dbms => src}/Common/PipeFDs.cpp | 0 {dbms => src}/Common/PipeFDs.h | 0 {dbms => src}/Common/PoolBase.h | 0 {dbms => src}/Common/PoolWithFailoverBase.h | 0 {dbms => src}/Common/ProfileEvents.cpp | 0 {dbms => src}/Common/ProfileEvents.h | 0 {dbms => src}/Common/ProfilingScopedRWLock.h | 0 {dbms => src}/Common/QueryProfiler.cpp | 0 {dbms => src}/Common/QueryProfiler.h | 0 {dbms => src}/Common/RWLock.cpp | 0 {dbms => src}/Common/RWLock.h | 0 {dbms => src}/Common/RadixSort.h | 0 {dbms => src}/Common/RemoteHostFilter.cpp | 0 {dbms => src}/Common/RemoteHostFilter.h | 0 {dbms => src}/Common/SensitiveDataMasker.cpp | 0 {dbms => src}/Common/SensitiveDataMasker.h | 0 {dbms => src}/Common/SettingsChanges.h | 0 {dbms => src}/Common/SharedBlockRowRef.h | 0 {dbms => src}/Common/SharedLibrary.cpp | 0 {dbms => src}/Common/SharedLibrary.h | 0 {dbms => src}/Common/ShellCommand.cpp | 0 {dbms => src}/Common/ShellCommand.h | 0 {dbms => src}/Common/SimpleActionBlocker.h | 0 {dbms => src}/Common/SimpleIncrement.h | 0 {dbms => src}/Common/SipHash.h | 0 {dbms => src}/Common/SmallObjectPool.h | 0 {dbms => src}/Common/SpaceSaving.h | 0 {dbms => src}/Common/StackTrace.cpp | 0 {dbms => src}/Common/StackTrace.h | 0 {dbms => src}/Common/StatusFile.cpp | 0 {dbms => src}/Common/StatusFile.h | 0 {dbms => src}/Common/StatusInfo.cpp | 0 {dbms => src}/Common/StatusInfo.h | 0 {dbms => src}/Common/Stopwatch.cpp | 0 {dbms => src}/Common/Stopwatch.h | 0 {dbms => src}/Common/StringSearcher.h | 0 .../Common/StringUtils/CMakeLists.txt | 0 .../Common/StringUtils/StringUtils.cpp | 0 .../Common/StringUtils/StringUtils.h | 0 {dbms => src}/Common/StudentTTest.cpp | 0 {dbms => src}/Common/StudentTTest.h | 0 {dbms => src}/Common/SymbolIndex.cpp | 0 {dbms => src}/Common/SymbolIndex.h | 0 {dbms => src}/Common/TaskStatsInfoGetter.cpp | 0 {dbms => src}/Common/TaskStatsInfoGetter.h | 0 {dbms => src}/Common/TerminalSize.cpp | 0 {dbms => src}/Common/TerminalSize.h | 0 {dbms => src}/Common/ThreadFuzzer.cpp | 0 {dbms => src}/Common/ThreadFuzzer.h | 0 {dbms => src}/Common/ThreadPool.cpp | 0 {dbms => src}/Common/ThreadPool.h | 0 {dbms => src}/Common/ThreadProfileEvents.h | 0 {dbms => src}/Common/ThreadStatus.cpp | 0 {dbms => src}/Common/ThreadStatus.h | 0 {dbms => src}/Common/Throttler.h | 0 {dbms => src}/Common/TraceCollector.cpp | 0 {dbms => src}/Common/TraceCollector.h | 0 {dbms => src}/Common/TypeList.h | 0 {dbms => src}/Common/TypePromotion.h | 0 {dbms => src}/Common/UInt128.h | 0 {dbms => src}/Common/UTF8Helpers.cpp | 0 {dbms => src}/Common/UTF8Helpers.h | 0 {dbms => src}/Common/UnicodeBar.h | 0 {dbms => src}/Common/VariableContext.h | 0 {dbms => src}/Common/Visitor.h | 0 {dbms => src}/Common/Volnitsky.h | 0 {dbms => src}/Common/WeakHash.cpp | 0 {dbms => src}/Common/WeakHash.h | 0 {dbms => src}/Common/XDBCBridgeHelper.h | 0 {dbms => src}/Common/ZooKeeper/CMakeLists.txt | 0 {dbms => src}/Common/ZooKeeper/Common.h | 0 {dbms => src}/Common/ZooKeeper/IKeeper.cpp | 0 {dbms => src}/Common/ZooKeeper/IKeeper.h | 0 {dbms => src}/Common/ZooKeeper/Increment.h | 0 .../Common/ZooKeeper/KeeperException.h | 0 .../Common/ZooKeeper/LeaderElection.h | 0 {dbms => src}/Common/ZooKeeper/Lock.cpp | 0 {dbms => src}/Common/ZooKeeper/Lock.h | 0 {dbms => src}/Common/ZooKeeper/TestKeeper.cpp | 0 {dbms => src}/Common/ZooKeeper/TestKeeper.h | 0 {dbms => src}/Common/ZooKeeper/Types.h | 0 {dbms => src}/Common/ZooKeeper/ZooKeeper.cpp | 0 {dbms => src}/Common/ZooKeeper/ZooKeeper.h | 0 .../Common/ZooKeeper/ZooKeeperHolder.cpp | 0 .../Common/ZooKeeper/ZooKeeperHolder.h | 0 .../Common/ZooKeeper/ZooKeeperImpl.cpp | 0 .../Common/ZooKeeper/ZooKeeperImpl.h | 0 .../Common/ZooKeeper/ZooKeeperNodeCache.cpp | 0 .../Common/ZooKeeper/ZooKeeperNodeCache.h | 0 .../Common/ZooKeeper/tests/CMakeLists.txt | 0 .../gtest_zkutil_test_multi_exception.cpp | 0 {dbms => src}/Common/ZooKeeper/tests/nozk.sh | 0 {dbms => src}/Common/ZooKeeper/tests/yeszk.sh | 0 .../tests/zk_many_watches_reconnect.cpp | 0 .../tests/zkutil_expiration_test.cpp | 0 .../ZooKeeper/tests/zkutil_test_async.cpp | 0 .../ZooKeeper/tests/zkutil_test_commands.cpp | 0 .../tests/zkutil_test_commands_new_lib.cpp | 0 .../ZooKeeper/tests/zkutil_test_lock.cpp | 0 .../tests/zkutil_zookeeper_holder.cpp | 0 .../Common/ZooKeeper/tests/zookeeper_impl.cpp | 0 {dbms => src}/Common/assert_cast.h | 0 {dbms => src}/Common/checkStackSize.cpp | 0 {dbms => src}/Common/checkStackSize.h | 0 {dbms => src}/Common/config.h.in | 0 {dbms => src}/Common/config_version.h.in | 0 {dbms => src}/Common/createHardLink.cpp | 0 {dbms => src}/Common/createHardLink.h | 0 {dbms => src}/Common/escapeForFileName.cpp | 0 {dbms => src}/Common/escapeForFileName.h | 0 {dbms => src}/Common/filesystemHelpers.cpp | 0 {dbms => src}/Common/filesystemHelpers.h | 0 {dbms => src}/Common/formatIPv6.cpp | 0 {dbms => src}/Common/formatIPv6.h | 0 {dbms => src}/Common/formatReadable.cpp | 0 {dbms => src}/Common/formatReadable.h | 0 {dbms => src}/Common/getExecutablePath.cpp | 0 {dbms => src}/Common/getExecutablePath.h | 0 .../Common/getMultipleKeysFromConfig.cpp | 0 .../Common/getMultipleKeysFromConfig.h | 0 .../Common/getNumberOfPhysicalCPUCores.cpp | 0 .../Common/getNumberOfPhysicalCPUCores.h | 0 {dbms => src}/Common/hasLinuxCapability.cpp | 0 {dbms => src}/Common/hasLinuxCapability.h | 0 {dbms => src}/Common/hex.cpp | 0 {dbms => src}/Common/hex.h | 0 {dbms => src}/Common/intExp.h | 0 {dbms => src}/Common/interpolate.h | 0 {dbms => src}/Common/isLocalAddress.cpp | 0 {dbms => src}/Common/isLocalAddress.h | 0 {dbms => src}/Common/malloc.cpp | 0 {dbms => src}/Common/memcmpSmall.h | 0 {dbms => src}/Common/memcpySmall.h | 0 {dbms => src}/Common/new_delete.cpp | 0 {dbms => src}/Common/parseAddress.cpp | 0 {dbms => src}/Common/parseAddress.h | 0 {dbms => src}/Common/parseGlobs.cpp | 0 {dbms => src}/Common/parseGlobs.h | 0 .../Common/parseRemoteDescription.cpp | 0 {dbms => src}/Common/parseRemoteDescription.h | 0 {dbms => src}/Common/quoteString.cpp | 0 {dbms => src}/Common/quoteString.h | 0 {dbms => src}/Common/randomSeed.cpp | 0 {dbms => src}/Common/randomSeed.h | 0 {dbms => src}/Common/setThreadName.cpp | 0 {dbms => src}/Common/setThreadName.h | 0 {dbms => src}/Common/tests/CMakeLists.txt | 0 .../Common/tests/arena_with_free_lists.cpp | 0 {dbms => src}/Common/tests/array_cache.cpp | 0 {dbms => src}/Common/tests/auto_array.cpp | 0 .../Common/tests/chaos_sanitizer.cpp | 0 {dbms => src}/Common/tests/compact_array.cpp | 0 {dbms => src}/Common/tests/cow_columns.cpp | 0 .../Common/tests/cow_compositions.cpp | 0 .../gtest_getMultipleValuesFromConfig.cpp | 0 .../Common/tests/gtest_global_context.h | 0 .../gtest_makeRegexpPatternFromGlobs.cpp | 0 .../Common/tests/gtest_pod_array.cpp | 0 {dbms => src}/Common/tests/gtest_rw_lock.cpp | 0 .../tests/gtest_sensitive_data_masker.cpp | 0 .../Common/tests/gtest_shell_command.cpp | 0 .../gtest_thread_pool_concurrent_wait.cpp | 0 .../tests/gtest_thread_pool_global_full.cpp | 0 .../Common/tests/gtest_thread_pool_limit.cpp | 0 .../Common/tests/gtest_thread_pool_loop.cpp | 0 .../gtest_thread_pool_schedule_exception.cpp | 0 .../tests/gtest_unescapeForFileName.cpp | 0 {dbms => src}/Common/tests/hash_table.cpp | 0 {dbms => src}/Common/tests/hashes_test.cpp | 0 .../Common/tests/int_hashes_perf.cpp | 0 .../tests/integer_hash_tables_and_hashes.cpp | 0 .../Common/tests/parallel_aggregation.cpp | 0 .../Common/tests/parallel_aggregation2.cpp | 0 {dbms => src}/Common/tests/pod_array.cpp | 0 {dbms => src}/Common/tests/radix_sort.cpp | 0 {dbms => src}/Common/tests/simple_cache.cpp | 0 {dbms => src}/Common/tests/sip_hash_perf.cpp | 0 {dbms => src}/Common/tests/small_table.cpp | 0 {dbms => src}/Common/tests/space_saving.cpp | 0 {dbms => src}/Common/tests/stopwatch.cpp | 0 {dbms => src}/Common/tests/symbol_index.cpp | 0 .../Common/tests/thread_creation_latency.cpp | 0 {dbms => src}/Common/thread_local_rng.cpp | 0 {dbms => src}/Common/thread_local_rng.h | 0 {dbms => src}/Common/typeid_cast.h | 0 {dbms => src}/Compression/CMakeLists.txt | 0 .../CachedCompressedReadBuffer.cpp | 0 .../Compression/CachedCompressedReadBuffer.h | 0 .../Compression/CompressedReadBuffer.cpp | 0 .../Compression/CompressedReadBuffer.h | 0 .../Compression/CompressedReadBufferBase.cpp | 0 .../Compression/CompressedReadBufferBase.h | 0 .../CompressedReadBufferFromFile.cpp | 0 .../CompressedReadBufferFromFile.h | 0 .../Compression/CompressedWriteBuffer.cpp | 0 .../Compression/CompressedWriteBuffer.h | 0 .../Compression/CompressionCodecDelta.cpp | 0 .../Compression/CompressionCodecDelta.h | 0 .../CompressionCodecDoubleDelta.cpp | 0 .../Compression/CompressionCodecDoubleDelta.h | 0 .../Compression/CompressionCodecGorilla.cpp | 0 .../Compression/CompressionCodecGorilla.h | 0 .../Compression/CompressionCodecLZ4.cpp | 0 .../Compression/CompressionCodecLZ4.h | 0 .../Compression/CompressionCodecMultiple.cpp | 0 .../Compression/CompressionCodecMultiple.h | 0 .../Compression/CompressionCodecNone.cpp | 0 .../Compression/CompressionCodecNone.h | 0 .../Compression/CompressionCodecT64.cpp | 0 .../Compression/CompressionCodecT64.h | 0 .../Compression/CompressionCodecZSTD.cpp | 0 .../Compression/CompressionCodecZSTD.h | 0 .../Compression/CompressionFactory.cpp | 0 .../Compression/CompressionFactory.h | 0 {dbms => src}/Compression/CompressionInfo.h | 0 .../Compression/ICompressionCodec.cpp | 0 {dbms => src}/Compression/ICompressionCodec.h | 0 .../Compression/LZ4_decompress_faster.cpp | 0 .../Compression/LZ4_decompress_faster.h | 0 .../Compression/tests/CMakeLists.txt | 0 .../tests/cached_compressed_read_buffer.cpp | 0 .../Compression/tests/compressed_buffer.cpp | 0 .../tests/compressed_buffer_fuzz.cpp | 0 .../tests/gtest_compressionCodec.cpp | 0 {dbms => src}/Core/AccurateComparison.h | 0 {dbms => src}/Core/BackgroundSchedulePool.cpp | 0 {dbms => src}/Core/BackgroundSchedulePool.h | 0 {dbms => src}/Core/Block.cpp | 0 {dbms => src}/Core/Block.h | 0 {dbms => src}/Core/BlockInfo.cpp | 0 {dbms => src}/Core/BlockInfo.h | 0 {dbms => src}/Core/CMakeLists.txt | 0 {dbms => src}/Core/ColumnNumbers.h | 0 {dbms => src}/Core/ColumnWithTypeAndName.cpp | 0 {dbms => src}/Core/ColumnWithTypeAndName.h | 0 {dbms => src}/Core/ColumnsWithTypeAndName.h | 0 {dbms => src}/Core/DecimalComparison.h | 0 {dbms => src}/Core/DecimalFunctions.h | 0 {dbms => src}/Core/Defines.h | 0 .../Core/ExternalResultDescription.cpp | 0 .../Core/ExternalResultDescription.h | 0 {dbms => src}/Core/ExternalTable.cpp | 0 {dbms => src}/Core/ExternalTable.h | 0 {dbms => src}/Core/Field.cpp | 0 {dbms => src}/Core/Field.h | 0 {dbms => src}/Core/MySQLProtocol.cpp | 0 {dbms => src}/Core/MySQLProtocol.h | 0 {dbms => src}/Core/Names.h | 0 {dbms => src}/Core/NamesAndTypes.cpp | 0 {dbms => src}/Core/NamesAndTypes.h | 0 {dbms => src}/Core/Protocol.h | 0 {dbms => src}/Core/QualifiedTableName.h | 0 {dbms => src}/Core/QueryProcessingStage.h | 0 {dbms => src}/Core/Row.h | 0 {dbms => src}/Core/Settings.cpp | 0 {dbms => src}/Core/Settings.h | 0 {dbms => src}/Core/SettingsCollection.cpp | 0 {dbms => src}/Core/SettingsCollection.h | 0 {dbms => src}/Core/SettingsCollectionImpl.h | 0 {dbms => src}/Core/SortCursor.h | 0 {dbms => src}/Core/SortDescription.h | 0 {dbms => src}/Core/TypeListNumber.h | 0 {dbms => src}/Core/Types.h | 0 {dbms => src}/Core/UUID.h | 0 {dbms => src}/Core/callOnTypeIndex.h | 0 {dbms => src}/Core/config_core.h.in | 0 {dbms => src}/Core/iostream_debug_helpers.cpp | 0 {dbms => src}/Core/iostream_debug_helpers.h | 0 {dbms => src}/Core/tests/CMakeLists.txt | 0 {dbms => src}/Core/tests/field.cpp | 0 .../Core/tests/gtest_DecimalFunctions.cpp | 0 {dbms => src}/Core/tests/move_field.cpp | 0 {dbms => src}/Core/tests/string_pool.cpp | 0 {dbms => src}/Core/tests/string_ref_hash.cpp | 0 .../AddingConstColumnBlockInputStream.h | 0 .../AddingDefaultBlockOutputStream.cpp | 0 .../AddingDefaultBlockOutputStream.h | 0 .../AddingDefaultsBlockInputStream.cpp | 0 .../AddingDefaultsBlockInputStream.h | 0 .../AggregatingBlockInputStream.cpp | 0 .../DataStreams/AggregatingBlockInputStream.h | 0 .../AggregatingSortedBlockInputStream.cpp | 0 .../AggregatingSortedBlockInputStream.h | 0 .../AsynchronousBlockInputStream.cpp | 0 .../AsynchronousBlockInputStream.h | 0 {dbms => src}/DataStreams/BlockIO.cpp | 0 {dbms => src}/DataStreams/BlockIO.h | 0 .../DataStreams/BlockStreamProfileInfo.cpp | 0 .../DataStreams/BlockStreamProfileInfo.h | 0 .../DataStreams/BlocksBlockInputStream.h | 0 .../DataStreams/BlocksListBlockInputStream.h | 0 {dbms => src}/DataStreams/CMakeLists.txt | 0 .../CheckConstraintsBlockOutputStream.cpp | 0 .../CheckConstraintsBlockOutputStream.h | 0 .../CheckSortedBlockInputStream.cpp | 0 .../DataStreams/CheckSortedBlockInputStream.h | 0 .../CollapsingFinalBlockInputStream.cpp | 0 .../CollapsingFinalBlockInputStream.h | 0 .../CollapsingSortedBlockInputStream.cpp | 0 .../CollapsingSortedBlockInputStream.h | 0 .../DataStreams/ColumnGathererStream.cpp | 0 .../DataStreams/ColumnGathererStream.h | 0 .../DataStreams/ConcatBlockInputStream.h | 0 ...lumnLowCardinalityToFullBlockInputStream.h | 0 .../ConvertingBlockInputStream.cpp | 0 .../DataStreams/ConvertingBlockInputStream.h | 0 .../DataStreams/CountingBlockOutputStream.cpp | 0 .../DataStreams/CountingBlockOutputStream.h | 0 .../CreatingSetsBlockInputStream.cpp | 0 .../CreatingSetsBlockInputStream.h | 0 .../DataStreams/CubeBlockInputStream.cpp | 0 .../DataStreams/CubeBlockInputStream.h | 0 .../DataStreams/DistinctBlockInputStream.cpp | 0 .../DataStreams/DistinctBlockInputStream.h | 0 .../DistinctSortedBlockInputStream.cpp | 0 .../DistinctSortedBlockInputStream.h | 0 .../DataStreams/ExecutionSpeedLimits.cpp | 0 .../DataStreams/ExecutionSpeedLimits.h | 0 .../ExpressionBlockInputStream.cpp | 0 .../DataStreams/ExpressionBlockInputStream.h | 0 .../DataStreams/FillingBlockInputStream.cpp | 0 .../DataStreams/FillingBlockInputStream.h | 0 .../DataStreams/FilterBlockInputStream.cpp | 0 .../DataStreams/FilterBlockInputStream.h | 0 .../FilterColumnsBlockInputStream.cpp | 0 .../FilterColumnsBlockInputStream.h | 0 .../FinishSortingBlockInputStream.cpp | 0 .../FinishSortingBlockInputStream.h | 0 .../GraphiteRollupSortedBlockInputStream.cpp | 0 .../GraphiteRollupSortedBlockInputStream.h | 0 .../DataStreams/IBlockInputStream.cpp | 0 {dbms => src}/DataStreams/IBlockInputStream.h | 0 .../DataStreams/IBlockOutputStream.h | 0 {dbms => src}/DataStreams/IBlockStream_fwd.h | 0 .../InputStreamFromASTInsertQuery.cpp | 0 .../InputStreamFromASTInsertQuery.h | 0 .../InternalTextLogsRowOutputStream.cpp | 0 .../InternalTextLogsRowOutputStream.h | 0 .../DataStreams/LazyBlockInputStream.h | 0 .../DataStreams/LimitBlockInputStream.cpp | 0 .../DataStreams/LimitBlockInputStream.h | 0 .../DataStreams/LimitByBlockInputStream.cpp | 0 .../DataStreams/LimitByBlockInputStream.h | 0 .../DataStreams/MarkInCompressedFile.h | 0 .../MaterializingBlockInputStream.cpp | 0 .../MaterializingBlockInputStream.h | 0 .../MaterializingBlockOutputStream.h | 0 .../MergeSortingBlockInputStream.cpp | 0 .../MergeSortingBlockInputStream.h | 0 .../MergingAggregatedBlockInputStream.cpp | 0 .../MergingAggregatedBlockInputStream.h | 0 ...regatedMemoryEfficientBlockInputStream.cpp | 0 ...ggregatedMemoryEfficientBlockInputStream.h | 0 .../MergingSortedBlockInputStream.cpp | 0 .../MergingSortedBlockInputStream.h | 0 .../DataStreams/NativeBlockInputStream.cpp | 0 .../DataStreams/NativeBlockInputStream.h | 0 .../DataStreams/NativeBlockOutputStream.cpp | 0 .../DataStreams/NativeBlockOutputStream.h | 0 .../NullAndDoCopyBlockInputStream.h | 0 .../DataStreams/NullBlockInputStream.h | 0 .../DataStreams/NullBlockOutputStream.h | 0 .../DataStreams/OneBlockInputStream.h | 0 .../DataStreams/OwningBlockInputStream.h | 0 .../ParallelAggregatingBlockInputStream.cpp | 0 .../ParallelAggregatingBlockInputStream.h | 0 .../DataStreams/ParallelInputsProcessor.h | 0 .../ParallelParsingBlockInputStream.cpp | 0 .../ParallelParsingBlockInputStream.h | 0 .../PartialSortingBlockInputStream.cpp | 0 .../PartialSortingBlockInputStream.h | 0 .../PushingToViewsBlockOutputStream.cpp | 0 .../PushingToViewsBlockOutputStream.h | 0 .../DataStreams/RemoteBlockInputStream.cpp | 0 .../DataStreams/RemoteBlockInputStream.h | 0 .../DataStreams/RemoteBlockOutputStream.cpp | 0 .../DataStreams/RemoteBlockOutputStream.h | 0 .../ReplacingSortedBlockInputStream.cpp | 0 .../ReplacingSortedBlockInputStream.h | 0 .../DataStreams/ReverseBlockInputStream.cpp | 0 .../DataStreams/ReverseBlockInputStream.h | 0 .../DataStreams/RollupBlockInputStream.cpp | 0 .../DataStreams/RollupBlockInputStream.h | 0 {dbms => src}/DataStreams/SizeLimits.cpp | 0 {dbms => src}/DataStreams/SizeLimits.h | 0 .../DataStreams/SquashingBlockInputStream.cpp | 0 .../DataStreams/SquashingBlockInputStream.h | 0 .../SquashingBlockOutputStream.cpp | 0 .../DataStreams/SquashingBlockOutputStream.h | 0 .../DataStreams/SquashingTransform.cpp | 0 .../DataStreams/SquashingTransform.h | 0 .../SummingSortedBlockInputStream.cpp | 0 .../SummingSortedBlockInputStream.h | 0 .../DataStreams/TTLBlockInputStream.cpp | 0 .../DataStreams/TTLBlockInputStream.h | 0 .../DataStreams/TemporaryFileStream.h | 0 .../TotalsHavingBlockInputStream.cpp | 0 .../TotalsHavingBlockInputStream.h | 0 .../DataStreams/UnionBlockInputStream.h | 0 ...sionedCollapsingSortedBlockInputStream.cpp | 0 ...ersionedCollapsingSortedBlockInputStream.h | 0 {dbms => src}/DataStreams/copyData.cpp | 0 {dbms => src}/DataStreams/copyData.h | 0 {dbms => src}/DataStreams/finalizeBlock.cpp | 0 {dbms => src}/DataStreams/finalizeBlock.h | 0 .../DataStreams/materializeBlock.cpp | 0 {dbms => src}/DataStreams/materializeBlock.h | 0 .../DataStreams/narrowBlockInputStreams.cpp | 0 .../DataStreams/narrowBlockInputStreams.h | 0 .../DataStreams/processConstants.cpp | 0 {dbms => src}/DataStreams/processConstants.h | 0 .../DataStreams/tests/CMakeLists.txt | 0 .../tests/collapsing_sorted_stream.cpp | 0 .../DataStreams/tests/expression_stream.cpp | 0 .../DataStreams/tests/filter_stream.cpp | 0 .../tests/finish_sorting_stream.cpp | 0 .../gtest_blocks_size_merging_streams.cpp | 0 .../tests/gtest_check_sorted_stream.cpp | 0 .../DataStreams/tests/union_stream2.cpp | 0 {dbms => src}/DataTypes/CMakeLists.txt | 0 .../DataTypes/DataTypeAggregateFunction.cpp | 0 .../DataTypes/DataTypeAggregateFunction.h | 0 {dbms => src}/DataTypes/DataTypeArray.cpp | 0 {dbms => src}/DataTypes/DataTypeArray.h | 0 {dbms => src}/DataTypes/DataTypeCustom.h | 0 .../DataTypes/DataTypeCustomIPv4AndIPv6.cpp | 0 .../DataTypeCustomSimpleAggregateFunction.cpp | 0 .../DataTypeCustomSimpleAggregateFunction.h | 0 .../DataTypeCustomSimpleTextSerialization.cpp | 0 .../DataTypeCustomSimpleTextSerialization.h | 0 {dbms => src}/DataTypes/DataTypeDate.cpp | 0 {dbms => src}/DataTypes/DataTypeDate.h | 0 {dbms => src}/DataTypes/DataTypeDateTime.cpp | 0 {dbms => src}/DataTypes/DataTypeDateTime.h | 0 .../DataTypes/DataTypeDateTime64.cpp | 0 {dbms => src}/DataTypes/DataTypeDateTime64.h | 0 .../DataTypes/DataTypeDecimalBase.cpp | 0 {dbms => src}/DataTypes/DataTypeDecimalBase.h | 0 {dbms => src}/DataTypes/DataTypeEnum.cpp | 0 {dbms => src}/DataTypes/DataTypeEnum.h | 0 {dbms => src}/DataTypes/DataTypeFactory.cpp | 0 {dbms => src}/DataTypes/DataTypeFactory.h | 0 .../DataTypes/DataTypeFixedString.cpp | 0 {dbms => src}/DataTypes/DataTypeFixedString.h | 0 {dbms => src}/DataTypes/DataTypeFunction.cpp | 0 {dbms => src}/DataTypes/DataTypeFunction.h | 0 {dbms => src}/DataTypes/DataTypeInterval.cpp | 0 {dbms => src}/DataTypes/DataTypeInterval.h | 0 .../DataTypes/DataTypeLowCardinality.cpp | 0 .../DataTypes/DataTypeLowCardinality.h | 0 .../DataTypeLowCardinalityHelpers.cpp | 0 {dbms => src}/DataTypes/DataTypeNothing.cpp | 0 {dbms => src}/DataTypes/DataTypeNothing.h | 0 {dbms => src}/DataTypes/DataTypeNullable.cpp | 0 {dbms => src}/DataTypes/DataTypeNullable.h | 0 .../DataTypes/DataTypeNumberBase.cpp | 0 {dbms => src}/DataTypes/DataTypeNumberBase.h | 0 {dbms => src}/DataTypes/DataTypeSet.h | 0 {dbms => src}/DataTypes/DataTypeString.cpp | 0 {dbms => src}/DataTypes/DataTypeString.h | 0 {dbms => src}/DataTypes/DataTypeTuple.cpp | 0 {dbms => src}/DataTypes/DataTypeTuple.h | 0 {dbms => src}/DataTypes/DataTypeUUID.cpp | 0 {dbms => src}/DataTypes/DataTypeUUID.h | 0 .../DataTypeWithSimpleSerialization.h | 0 {dbms => src}/DataTypes/DataTypesDecimal.cpp | 0 {dbms => src}/DataTypes/DataTypesDecimal.h | 0 {dbms => src}/DataTypes/DataTypesNumber.cpp | 0 {dbms => src}/DataTypes/DataTypesNumber.h | 0 {dbms => src}/DataTypes/FieldToDataType.cpp | 0 {dbms => src}/DataTypes/FieldToDataType.h | 0 {dbms => src}/DataTypes/IDataType.cpp | 0 {dbms => src}/DataTypes/IDataType.h | 0 {dbms => src}/DataTypes/IDataTypeDummy.h | 0 {dbms => src}/DataTypes/Native.h | 0 {dbms => src}/DataTypes/NestedUtils.cpp | 0 {dbms => src}/DataTypes/NestedUtils.h | 0 {dbms => src}/DataTypes/NumberTraits.h | 0 .../DataTypes/convertMySQLDataType.cpp | 0 .../DataTypes/convertMySQLDataType.h | 0 {dbms => src}/DataTypes/getLeastSupertype.cpp | 0 {dbms => src}/DataTypes/getLeastSupertype.h | 0 {dbms => src}/DataTypes/getMostSubtype.cpp | 0 {dbms => src}/DataTypes/getMostSubtype.h | 0 {dbms => src}/DataTypes/tests/CMakeLists.txt | 0 .../DataTypes/tests/data_type_string.cpp | 0 .../tests/data_types_number_fixed.cpp | 0 .../tests/gtest_data_type_get_common_type.cpp | 0 .../Databases/DatabaseDictionary.cpp | 0 {dbms => src}/Databases/DatabaseDictionary.h | 0 {dbms => src}/Databases/DatabaseFactory.cpp | 0 {dbms => src}/Databases/DatabaseFactory.h | 0 {dbms => src}/Databases/DatabaseLazy.cpp | 0 {dbms => src}/Databases/DatabaseLazy.h | 0 {dbms => src}/Databases/DatabaseMemory.cpp | 0 {dbms => src}/Databases/DatabaseMemory.h | 0 {dbms => src}/Databases/DatabaseMySQL.cpp | 0 {dbms => src}/Databases/DatabaseMySQL.h | 0 {dbms => src}/Databases/DatabaseOnDisk.cpp | 0 {dbms => src}/Databases/DatabaseOnDisk.h | 0 {dbms => src}/Databases/DatabaseOrdinary.cpp | 0 {dbms => src}/Databases/DatabaseOrdinary.h | 0 .../Databases/DatabaseWithDictionaries.cpp | 0 .../Databases/DatabaseWithDictionaries.h | 0 {dbms => src}/Databases/DatabasesCommon.cpp | 0 {dbms => src}/Databases/DatabasesCommon.h | 0 {dbms => src}/Databases/IDatabase.h | 0 {dbms => src}/Dictionaries/CMakeLists.txt | 0 .../Dictionaries/CacheDictionary.cpp | 0 {dbms => src}/Dictionaries/CacheDictionary.h | 0 .../Dictionaries/CacheDictionary.inc.h | 0 .../CacheDictionary_generate1.cpp.in | 0 .../CacheDictionary_generate2.cpp.in | 0 .../CacheDictionary_generate3.cpp.in | 0 .../ClickHouseDictionarySource.cpp | 0 .../Dictionaries/ClickHouseDictionarySource.h | 0 .../ComplexKeyCacheDictionary.cpp | 0 .../Dictionaries/ComplexKeyCacheDictionary.h | 0 ...acheDictionary_createAttributeWithType.cpp | 0 ...ComplexKeyCacheDictionary_generate1.cpp.in | 0 ...ComplexKeyCacheDictionary_generate2.cpp.in | 0 ...ComplexKeyCacheDictionary_generate3.cpp.in | 0 ...exKeyCacheDictionary_setAttributeValue.cpp | 0 ...cheDictionary_setDefaultAttributeValue.cpp | 0 .../ComplexKeyHashedDictionary.cpp | 0 .../Dictionaries/ComplexKeyHashedDictionary.h | 0 .../Dictionaries/DictionaryBlockInputStream.h | 0 .../DictionaryBlockInputStreamBase.cpp | 0 .../DictionaryBlockInputStreamBase.h | 0 .../Dictionaries/DictionaryFactory.cpp | 0 .../Dictionaries/DictionaryFactory.h | 0 .../Dictionaries/DictionarySourceFactory.cpp | 0 .../Dictionaries/DictionarySourceFactory.h | 0 .../Dictionaries/DictionarySourceHelpers.cpp | 0 .../Dictionaries/DictionarySourceHelpers.h | 0 .../Dictionaries/DictionaryStructure.cpp | 0 .../Dictionaries/DictionaryStructure.h | 0 .../Dictionaries/Embedded/CMakeLists.txt | 0 .../Embedded/GeoDictionariesLoader.cpp | 0 .../Embedded/GeoDictionariesLoader.h | 0 .../Embedded/GeodataProviders/Entries.h | 0 .../GeodataProviders/HierarchiesProvider.cpp | 0 .../GeodataProviders/HierarchiesProvider.h | 0 .../HierarchyFormatReader.cpp | 0 .../GeodataProviders/HierarchyFormatReader.h | 0 .../GeodataProviders/IHierarchiesProvider.h | 0 .../GeodataProviders/INamesProvider.h | 0 .../GeodataProviders/NamesFormatReader.cpp | 0 .../GeodataProviders/NamesFormatReader.h | 0 .../GeodataProviders/NamesProvider.cpp | 0 .../Embedded/GeodataProviders/NamesProvider.h | 0 .../Embedded/GeodataProviders/Types.h | 0 .../Embedded/RegionsHierarchies.cpp | 0 .../Embedded/RegionsHierarchies.h | 0 .../Embedded/RegionsHierarchy.cpp | 0 .../Dictionaries/Embedded/RegionsHierarchy.h | 0 .../Dictionaries/Embedded/RegionsNames.cpp | 0 .../Dictionaries/Embedded/RegionsNames.h | 0 .../ExecutableDictionarySource.cpp | 0 .../Dictionaries/ExecutableDictionarySource.h | 0 .../Dictionaries/ExternalQueryBuilder.cpp | 0 .../Dictionaries/ExternalQueryBuilder.h | 0 .../Dictionaries/FileDictionarySource.cpp | 0 .../Dictionaries/FileDictionarySource.h | 0 {dbms => src}/Dictionaries/FlatDictionary.cpp | 0 {dbms => src}/Dictionaries/FlatDictionary.h | 0 .../Dictionaries/HTTPDictionarySource.cpp | 0 .../Dictionaries/HTTPDictionarySource.h | 0 .../Dictionaries/HashedDictionary.cpp | 0 {dbms => src}/Dictionaries/HashedDictionary.h | 0 {dbms => src}/Dictionaries/IDictionary.h | 0 .../Dictionaries/IDictionarySource.h | 0 .../Dictionaries/LibraryDictionarySource.cpp | 0 .../Dictionaries/LibraryDictionarySource.h | 0 .../LibraryDictionarySourceExternal.cpp | 0 .../LibraryDictionarySourceExternal.h | 0 .../Dictionaries/MongoDBBlockInputStream.cpp | 0 .../Dictionaries/MongoDBBlockInputStream.h | 0 .../Dictionaries/MongoDBDictionarySource.cpp | 2 +- .../Dictionaries/MongoDBDictionarySource.h | 0 .../Dictionaries/MySQLDictionarySource.cpp | 0 .../Dictionaries/MySQLDictionarySource.h | 0 .../Dictionaries/PolygonDictionary.cpp | 0 .../Dictionaries/PolygonDictionary.h | 0 .../RangeDictionaryBlockInputStream.h | 0 .../Dictionaries/RangeHashedDictionary.cpp | 0 .../Dictionaries/RangeHashedDictionary.h | 0 .../Dictionaries/RedisBlockInputStream.cpp | 0 .../Dictionaries/RedisBlockInputStream.h | 0 .../Dictionaries/RedisDictionarySource.cpp | 0 .../Dictionaries/RedisDictionarySource.h | 0 {dbms => src}/Dictionaries/TrieDictionary.cpp | 0 {dbms => src}/Dictionaries/TrieDictionary.h | 0 .../Dictionaries/XDBCDictionarySource.cpp | 0 .../Dictionaries/XDBCDictionarySource.h | 0 .../getDictionaryConfigurationFromAST.cpp | 0 .../getDictionaryConfigurationFromAST.h | 0 .../Dictionaries/readInvalidateQuery.cpp | 0 .../Dictionaries/readInvalidateQuery.h | 0 .../Dictionaries/registerDictionaries.cpp | 0 .../Dictionaries/registerDictionaries.h | 0 .../Dictionaries/tests/CMakeLists.txt | 0 .../tests/gtest_dictionary_configuration.cpp | 0 .../Dictionaries/writeParenthesisedString.cpp | 0 .../Dictionaries/writeParenthesisedString.h | 0 {dbms => src}/Disks/CMakeLists.txt | 0 {dbms => src}/Disks/DiskFactory.cpp | 0 {dbms => src}/Disks/DiskFactory.h | 0 {dbms => src}/Disks/DiskLocal.cpp | 0 {dbms => src}/Disks/DiskLocal.h | 0 {dbms => src}/Disks/DiskMemory.cpp | 0 {dbms => src}/Disks/DiskMemory.h | 0 {dbms => src}/Disks/DiskS3.cpp | 0 {dbms => src}/Disks/DiskS3.h | 0 {dbms => src}/Disks/DiskSpaceMonitor.cpp | 0 {dbms => src}/Disks/DiskSpaceMonitor.h | 0 {dbms => src}/Disks/IDisk.cpp | 0 {dbms => src}/Disks/IDisk.h | 0 {dbms => src}/Disks/registerDisks.cpp | 0 {dbms => src}/Disks/registerDisks.h | 0 {dbms => src}/Disks/tests/CMakeLists.txt | 0 {dbms => src}/Disks/tests/gtest_disk.cpp | 0 {dbms => src}/Disks/tests/gtest_disk.h | 0 .../Disks/tests/gtest_path_functions.cpp | 0 {dbms => src}/Formats/CMakeLists.txt | 0 {dbms => src}/Formats/FormatFactory.cpp | 0 {dbms => src}/Formats/FormatFactory.h | 0 {dbms => src}/Formats/FormatSchemaInfo.cpp | 0 {dbms => src}/Formats/FormatSchemaInfo.h | 0 {dbms => src}/Formats/FormatSettings.h | 0 {dbms => src}/Formats/IRowInputStream.cpp | 0 {dbms => src}/Formats/IRowInputStream.h | 0 {dbms => src}/Formats/IRowOutputStream.cpp | 0 {dbms => src}/Formats/IRowOutputStream.h | 0 .../Formats/MySQLBlockInputStream.cpp | 0 {dbms => src}/Formats/MySQLBlockInputStream.h | 0 {dbms => src}/Formats/NativeFormat.cpp | 0 {dbms => src}/Formats/NullFormat.cpp | 0 .../Formats/ParsedTemplateFormatString.cpp | 0 .../Formats/ParsedTemplateFormatString.h | 0 .../Formats/ProtobufColumnMatcher.cpp | 0 {dbms => src}/Formats/ProtobufColumnMatcher.h | 0 {dbms => src}/Formats/ProtobufReader.cpp | 0 {dbms => src}/Formats/ProtobufReader.h | 0 {dbms => src}/Formats/ProtobufSchemas.cpp | 0 {dbms => src}/Formats/ProtobufSchemas.h | 0 {dbms => src}/Formats/ProtobufWriter.cpp | 0 {dbms => src}/Formats/ProtobufWriter.h | 0 {dbms => src}/Formats/config_formats.h.in | 0 {dbms => src}/Formats/tests/CMakeLists.txt | 0 .../Formats/tests/tab_separated_streams.cpp | 0 {dbms => src}/Formats/verbosePrintString.cpp | 0 {dbms => src}/Formats/verbosePrintString.h | 0 {dbms => src}/Functions/CMakeLists.txt | 0 {dbms => src}/Functions/CRC.cpp | 0 .../Functions/CustomWeekTransforms.h | 0 {dbms => src}/Functions/DateTimeTransforms.h | 0 {dbms => src}/Functions/DivisionUtils.h | 0 {dbms => src}/Functions/DummyJSONParser.h | 0 {dbms => src}/Functions/EmptyImpl.h | 0 .../Functions/FunctionBase64Conversion.h | 0 .../Functions/FunctionBinaryArithmetic.h | 0 {dbms => src}/Functions/FunctionBitTestMany.h | 0 .../Functions/FunctionCustomWeekToSomething.h | 0 .../FunctionDateOrDateTimeAddInterval.h | 0 .../FunctionDateOrDateTimeToSomething.h | 0 {dbms => src}/Functions/FunctionFQDN.cpp | 0 {dbms => src}/Functions/FunctionFactory.cpp | 0 {dbms => src}/Functions/FunctionFactory.h | 0 {dbms => src}/Functions/FunctionHelpers.cpp | 0 {dbms => src}/Functions/FunctionHelpers.h | 0 {dbms => src}/Functions/FunctionIfBase.h | 0 {dbms => src}/Functions/FunctionJoinGet.cpp | 0 {dbms => src}/Functions/FunctionJoinGet.h | 0 .../Functions/FunctionMathBinaryFloat64.h | 0 .../Functions/FunctionMathConstFloat64.h | 0 {dbms => src}/Functions/FunctionMathUnary.h | 0 .../Functions/FunctionNumericPredicate.h | 0 .../Functions/FunctionStartsEndsWith.h | 0 .../Functions/FunctionStringOrArrayToT.h | 0 .../Functions/FunctionStringToString.h | 0 .../Functions/FunctionUnaryArithmetic.h | 0 {dbms => src}/Functions/FunctionsBitmap.cpp | 0 {dbms => src}/Functions/FunctionsBitmap.h | 0 {dbms => src}/Functions/FunctionsCoding.cpp | 0 {dbms => src}/Functions/FunctionsCoding.h | 0 {dbms => src}/Functions/FunctionsComparison.h | 0 .../Functions/FunctionsConsistentHashing.h | 0 .../Functions/FunctionsConversion.cpp | 0 {dbms => src}/Functions/FunctionsConversion.h | 0 .../FunctionsEmbeddedDictionaries.cpp | 0 .../Functions/FunctionsEmbeddedDictionaries.h | 0 .../FunctionsExternalDictionaries.cpp | 0 .../Functions/FunctionsExternalDictionaries.h | 0 .../Functions/FunctionsExternalModels.cpp | 0 .../Functions/FunctionsExternalModels.h | 0 .../Functions/FunctionsFormatting.cpp | 0 {dbms => src}/Functions/FunctionsFormatting.h | 0 {dbms => src}/Functions/FunctionsHashing.cpp | 0 {dbms => src}/Functions/FunctionsHashing.h | 0 {dbms => src}/Functions/FunctionsJSON.cpp | 0 {dbms => src}/Functions/FunctionsJSON.h | 0 {dbms => src}/Functions/FunctionsLogical.cpp | 0 {dbms => src}/Functions/FunctionsLogical.h | 0 .../Functions/FunctionsMiscellaneous.h | 0 .../Functions/FunctionsMultiStringPosition.h | 0 .../Functions/FunctionsMultiStringSearch.h | 0 {dbms => src}/Functions/FunctionsRandom.cpp | 0 {dbms => src}/Functions/FunctionsRandom.h | 0 {dbms => src}/Functions/FunctionsRound.cpp | 0 {dbms => src}/Functions/FunctionsRound.h | 0 .../Functions/FunctionsStringArray.cpp | 0 .../Functions/FunctionsStringArray.h | 0 .../Functions/FunctionsStringRegex.cpp | 0 .../Functions/FunctionsStringRegex.h | 0 .../Functions/FunctionsStringSearch.h | 0 .../Functions/FunctionsStringSearchToString.h | 0 .../Functions/FunctionsStringSimilarity.cpp | 0 .../Functions/FunctionsStringSimilarity.h | 0 {dbms => src}/Functions/FunctionsVisitParam.h | 0 .../Functions/GatherUtils/Algorithms.h | 0 .../Functions/GatherUtils/ArraySinkVisitor.h | 0 .../GatherUtils/ArraySourceVisitor.h | 0 .../Functions/GatherUtils/CMakeLists.txt | 0 .../Functions/GatherUtils/GatherUtils.h | 0 .../Functions/GatherUtils/IArraySink.h | 0 .../Functions/GatherUtils/IArraySource.h | 0 .../Functions/GatherUtils/IValueSource.h | 0 .../Functions/GatherUtils/Selectors.h | 0 {dbms => src}/Functions/GatherUtils/Sinks.h | 0 {dbms => src}/Functions/GatherUtils/Slices.h | 0 {dbms => src}/Functions/GatherUtils/Sources.h | 0 .../GatherUtils/ValueSourceVisitor.h | 0 .../Functions/GatherUtils/concat.cpp | 0 .../Functions/GatherUtils/createArraySink.cpp | 0 .../GatherUtils/createArraySource.cpp | 0 .../GatherUtils/createValueSource.cpp | 0 {dbms => src}/Functions/GatherUtils/has.cpp | 0 {dbms => src}/Functions/GatherUtils/push.cpp | 0 .../GatherUtils/resizeConstantSize.cpp | 0 .../GatherUtils/resizeDynamicSize.cpp | 0 .../GatherUtils/sliceDynamicOffsetBounded.cpp | 0 .../sliceDynamicOffsetUnbounded.cpp | 0 .../sliceFromLeftConstantOffsetBounded.cpp | 0 .../sliceFromLeftConstantOffsetUnbounded.cpp | 0 .../sliceFromRightConstantOffsetBounded.cpp | 0 .../sliceFromRightConstantOffsetUnbounded.cpp | 0 {dbms => src}/Functions/GeoHash.cpp | 0 {dbms => src}/Functions/GeoHash.h | 0 {dbms => src}/Functions/HasTokenImpl.h | 0 {dbms => src}/Functions/IFunction.cpp | 0 {dbms => src}/Functions/IFunction.h | 0 {dbms => src}/Functions/IFunctionAdaptors.h | 0 {dbms => src}/Functions/IFunctionImpl.h | 0 {dbms => src}/Functions/LowerUpperImpl.h | 0 {dbms => src}/Functions/LowerUpperUTF8Impl.h | 0 .../Functions/MultiSearchAllPositionsImpl.h | 0 .../Functions/MultiSearchFirstIndexImpl.h | 0 .../Functions/MultiSearchFirstPositionImpl.h | 0 {dbms => src}/Functions/MultiSearchImpl.h | 0 {dbms => src}/Functions/PolygonUtils.h | 0 {dbms => src}/Functions/PositionImpl.h | 0 {dbms => src}/Functions/RapidJSONParser.h | 0 {dbms => src}/Functions/Regexps.h | 0 {dbms => src}/Functions/SimdJSONParser.h | 0 {dbms => src}/Functions/URL/CMakeLists.txt | 0 {dbms => src}/Functions/URL/FunctionsURL.h | 0 {dbms => src}/Functions/URL/URLHierarchy.cpp | 0 .../Functions/URL/URLPathHierarchy.cpp | 0 {dbms => src}/Functions/URL/basename.cpp | 0 .../Functions/URL/config_functions_url.h.in | 0 {dbms => src}/Functions/URL/cutFragment.cpp | 0 .../Functions/URL/cutQueryString.cpp | 0 .../URL/cutQueryStringAndFragment.cpp | 0 .../URL/cutToFirstSignificantSubdomain.cpp | 0 .../Functions/URL/cutURLParameter.cpp | 0 {dbms => src}/Functions/URL/cutWWW.cpp | 0 .../Functions/URL/decodeURLComponent.cpp | 0 {dbms => src}/Functions/URL/domain.cpp | 0 {dbms => src}/Functions/URL/domain.h | 0 .../Functions/URL/domainWithoutWWW.cpp | 0 .../Functions/URL/extractURLParameter.cpp | 0 .../URL/extractURLParameterNames.cpp | 0 .../Functions/URL/extractURLParameters.cpp | 0 .../URL/firstSignificantSubdomain.cpp | 0 .../Functions/URL/firstSignificantSubdomain.h | 0 {dbms => src}/Functions/URL/fragment.cpp | 0 {dbms => src}/Functions/URL/fragment.h | 0 {dbms => src}/Functions/URL/path.cpp | 0 {dbms => src}/Functions/URL/pathFull.cpp | 0 {dbms => src}/Functions/URL/protocol.cpp | 0 {dbms => src}/Functions/URL/protocol.h | 0 {dbms => src}/Functions/URL/queryString.cpp | 0 {dbms => src}/Functions/URL/queryString.h | 0 .../Functions/URL/queryStringAndFragment.cpp | 0 .../Functions/URL/queryStringAndFragment.h | 0 .../Functions/URL/registerFunctionsURL.cpp | 0 .../Functions/URL/tldLookup.generated.cpp | 0 {dbms => src}/Functions/URL/tldLookup.gperf | 0 {dbms => src}/Functions/URL/tldLookup.h | 0 {dbms => src}/Functions/URL/tldLookup.sh | 0 .../Functions/URL/topLevelDomain.cpp | 0 {dbms => src}/Functions/abs.cpp | 0 {dbms => src}/Functions/acos.cpp | 0 {dbms => src}/Functions/addDays.cpp | 0 {dbms => src}/Functions/addHours.cpp | 0 {dbms => src}/Functions/addMinutes.cpp | 0 {dbms => src}/Functions/addMonths.cpp | 0 {dbms => src}/Functions/addQuarters.cpp | 0 {dbms => src}/Functions/addSeconds.cpp | 0 {dbms => src}/Functions/addWeeks.cpp | 0 {dbms => src}/Functions/addYears.cpp | 0 {dbms => src}/Functions/addressToLine.cpp | 0 {dbms => src}/Functions/addressToSymbol.cpp | 0 .../Functions/appendTrailingCharIfAbsent.cpp | 0 {dbms => src}/Functions/array/CMakeLists.txt | 0 .../Functions/array/FunctionArrayMapped.h | 0 {dbms => src}/Functions/array/array.cpp | 0 {dbms => src}/Functions/array/arrayAUC.cpp | 0 {dbms => src}/Functions/array/arrayAll.cpp | 0 .../Functions/array/arrayCompact.cpp | 0 {dbms => src}/Functions/array/arrayConcat.cpp | 0 {dbms => src}/Functions/array/arrayCount.cpp | 0 {dbms => src}/Functions/array/arrayCumSum.cpp | 0 .../array/arrayCumSumNonNegative.cpp | 0 .../Functions/array/arrayDifference.cpp | 0 .../Functions/array/arrayDistinct.cpp | 0 .../Functions/array/arrayElement.cpp | 0 .../Functions/array/arrayEnumerate.cpp | 0 .../Functions/array/arrayEnumerateDense.cpp | 0 .../array/arrayEnumerateDenseRanked.cpp | 0 .../Functions/array/arrayEnumerateExtended.h | 0 .../Functions/array/arrayEnumerateRanked.cpp | 0 .../Functions/array/arrayEnumerateRanked.h | 0 .../Functions/array/arrayEnumerateUniq.cpp | 0 .../array/arrayEnumerateUniqRanked.cpp | 0 {dbms => src}/Functions/array/arrayExists.cpp | 0 {dbms => src}/Functions/array/arrayFill.cpp | 0 {dbms => src}/Functions/array/arrayFilter.cpp | 0 {dbms => src}/Functions/array/arrayFirst.cpp | 0 .../Functions/array/arrayFirstIndex.cpp | 0 .../Functions/array/arrayFlatten.cpp | 0 {dbms => src}/Functions/array/arrayIndex.h | 0 .../Functions/array/arrayIntersect.cpp | 0 {dbms => src}/Functions/array/arrayJoin.cpp | 0 {dbms => src}/Functions/array/arrayMap.cpp | 0 {dbms => src}/Functions/array/arrayPop.h | 0 .../Functions/array/arrayPopBack.cpp | 0 .../Functions/array/arrayPopFront.cpp | 0 {dbms => src}/Functions/array/arrayPush.h | 0 .../Functions/array/arrayPushBack.cpp | 0 .../Functions/array/arrayPushFront.cpp | 0 {dbms => src}/Functions/array/arrayReduce.cpp | 0 .../Functions/array/arrayReduceInRanges.cpp | 0 {dbms => src}/Functions/array/arrayResize.cpp | 0 .../Functions/array/arrayReverse.cpp | 0 .../Functions/array/arrayScalarProduct.h | 0 {dbms => src}/Functions/array/arraySlice.cpp | 0 {dbms => src}/Functions/array/arraySort.cpp | 0 {dbms => src}/Functions/array/arraySplit.cpp | 0 {dbms => src}/Functions/array/arraySum.cpp | 0 {dbms => src}/Functions/array/arrayUniq.cpp | 0 .../Functions/array/arrayWithConstant.cpp | 0 {dbms => src}/Functions/array/arrayZip.cpp | 0 {dbms => src}/Functions/array/countEqual.cpp | 0 {dbms => src}/Functions/array/emptyArray.cpp | 0 .../Functions/array/emptyArrayToSingle.cpp | 0 {dbms => src}/Functions/array/has.cpp | 0 {dbms => src}/Functions/array/hasAll.cpp | 0 {dbms => src}/Functions/array/hasAllAny.h | 0 {dbms => src}/Functions/array/hasAny.cpp | 0 {dbms => src}/Functions/array/indexOf.cpp | 0 {dbms => src}/Functions/array/length.cpp | 0 {dbms => src}/Functions/array/range.cpp | 0 .../array/registerFunctionsArray.cpp | 0 {dbms => src}/Functions/asin.cpp | 0 {dbms => src}/Functions/assumeNotNull.cpp | 0 {dbms => src}/Functions/atan.cpp | 0 {dbms => src}/Functions/bar.cpp | 0 {dbms => src}/Functions/base64Decode.cpp | 0 {dbms => src}/Functions/base64Encode.cpp | 0 {dbms => src}/Functions/bitAnd.cpp | 0 {dbms => src}/Functions/bitBoolMaskAnd.cpp | 2 +- {dbms => src}/Functions/bitBoolMaskOr.cpp | 2 +- {dbms => src}/Functions/bitCount.cpp | 0 {dbms => src}/Functions/bitNot.cpp | 0 {dbms => src}/Functions/bitOr.cpp | 0 {dbms => src}/Functions/bitRotateLeft.cpp | 0 {dbms => src}/Functions/bitRotateRight.cpp | 0 {dbms => src}/Functions/bitShiftLeft.cpp | 0 {dbms => src}/Functions/bitShiftRight.cpp | 0 {dbms => src}/Functions/bitSwapLastTwo.cpp | 2 +- {dbms => src}/Functions/bitTest.cpp | 0 {dbms => src}/Functions/bitTestAll.cpp | 0 {dbms => src}/Functions/bitTestAny.cpp | 0 {dbms => src}/Functions/bitWrapperFunc.cpp | 2 +- {dbms => src}/Functions/bitXor.cpp | 0 {dbms => src}/Functions/blockNumber.cpp | 0 .../Functions/blockSerializedSize.cpp | 0 {dbms => src}/Functions/blockSize.cpp | 0 .../Functions/caseWithExpression.cpp | 0 {dbms => src}/Functions/castTypeToEither.h | 0 {dbms => src}/Functions/cbrt.cpp | 0 {dbms => src}/Functions/coalesce.cpp | 0 {dbms => src}/Functions/concat.cpp | 0 {dbms => src}/Functions/config_functions.h.in | 0 {dbms => src}/Functions/convertCharset.cpp | 0 {dbms => src}/Functions/cos.cpp | 0 {dbms => src}/Functions/currentDatabase.cpp | 0 {dbms => src}/Functions/currentQuota.cpp | 0 .../Functions/currentRowPolicies.cpp | 0 {dbms => src}/Functions/currentUser.cpp | 0 {dbms => src}/Functions/dateDiff.cpp | 0 .../Functions/defaultValueOfArgumentType.cpp | 0 {dbms => src}/Functions/demange.cpp | 0 {dbms => src}/Functions/divide.cpp | 0 .../Functions/dumpColumnStructure.cpp | 0 {dbms => src}/Functions/e.cpp | 0 {dbms => src}/Functions/empty.cpp | 0 {dbms => src}/Functions/endsWith.cpp | 0 {dbms => src}/Functions/equals.cpp | 0 {dbms => src}/Functions/erf.cpp | 0 {dbms => src}/Functions/erfc.cpp | 0 {dbms => src}/Functions/evalMLMethod.cpp | 0 {dbms => src}/Functions/exp.cpp | 0 {dbms => src}/Functions/exp10.cpp | 0 {dbms => src}/Functions/exp2.cpp | 0 .../extractTimeZoneFromFunctionArguments.cpp | 0 .../extractTimeZoneFromFunctionArguments.h | 0 {dbms => src}/Functions/filesystem.cpp | 0 .../Functions/finalizeAggregation.cpp | 0 {dbms => src}/Functions/formatDateTime.cpp | 0 {dbms => src}/Functions/formatString.cpp | 0 {dbms => src}/Functions/formatString.h | 0 {dbms => src}/Functions/gcd.cpp | 0 {dbms => src}/Functions/generateUUIDv4.cpp | 0 {dbms => src}/Functions/geoToH3.cpp | 0 {dbms => src}/Functions/geohashDecode.cpp | 0 {dbms => src}/Functions/geohashEncode.cpp | 0 {dbms => src}/Functions/geohashesInBox.cpp | 0 {dbms => src}/Functions/getMacro.cpp | 0 {dbms => src}/Functions/getScalar.cpp | 0 {dbms => src}/Functions/getSizeOfEnumType.cpp | 0 .../Functions/greatCircleDistance.cpp | 0 {dbms => src}/Functions/greater.cpp | 0 {dbms => src}/Functions/greaterOrEquals.cpp | 0 {dbms => src}/Functions/greatest.cpp | 0 {dbms => src}/Functions/h3EdgeAngle.cpp | 0 {dbms => src}/Functions/h3EdgeLengthM.cpp | 0 {dbms => src}/Functions/h3GetBaseCell.cpp | 0 {dbms => src}/Functions/h3GetResolution.cpp | 0 {dbms => src}/Functions/h3HexAreaM2.cpp | 0 .../Functions/h3IndexesAreNeighbors.cpp | 0 {dbms => src}/Functions/h3IsValid.cpp | 0 {dbms => src}/Functions/h3ToChildren.cpp | 0 {dbms => src}/Functions/h3ToParent.cpp | 0 {dbms => src}/Functions/h3ToString.cpp | 0 {dbms => src}/Functions/h3kRing.cpp | 0 {dbms => src}/Functions/hasColumnInTable.cpp | 0 {dbms => src}/Functions/hasToken.cpp | 0 .../Functions/hasTokenCaseInsensitive.cpp | 0 {dbms => src}/Functions/hostName.cpp | 0 {dbms => src}/Functions/identity.cpp | 0 {dbms => src}/Functions/if.cpp | 0 {dbms => src}/Functions/ifNotFinite.cpp | 0 {dbms => src}/Functions/ifNull.cpp | 0 {dbms => src}/Functions/ignore.cpp | 0 {dbms => src}/Functions/ignoreExceptNull.cpp | 0 {dbms => src}/Functions/in.cpp | 0 {dbms => src}/Functions/intDiv.cpp | 0 {dbms => src}/Functions/intDivOrZero.cpp | 0 {dbms => src}/Functions/intExp10.cpp | 0 {dbms => src}/Functions/intExp2.cpp | 0 {dbms => src}/Functions/isFinite.cpp | 0 {dbms => src}/Functions/isInfinite.cpp | 0 {dbms => src}/Functions/isNaN.cpp | 0 {dbms => src}/Functions/isNotNull.cpp | 0 {dbms => src}/Functions/isNull.cpp | 0 {dbms => src}/Functions/isValidUTF8.cpp | 0 .../Functions/jumpConsistentHash.cpp | 0 {dbms => src}/Functions/lcm.cpp | 0 {dbms => src}/Functions/least.cpp | 0 {dbms => src}/Functions/lengthUTF8.cpp | 0 {dbms => src}/Functions/less.cpp | 0 {dbms => src}/Functions/lessOrEquals.cpp | 0 {dbms => src}/Functions/lgamma.cpp | 0 {dbms => src}/Functions/likePatternToRegexp.h | 0 {dbms => src}/Functions/log.cpp | 0 {dbms => src}/Functions/log10.cpp | 0 {dbms => src}/Functions/log2.cpp | 0 .../Functions/lowCardinalityIndices.cpp | 0 .../Functions/lowCardinalityKeys.cpp | 0 {dbms => src}/Functions/lower.cpp | 0 {dbms => src}/Functions/lowerUTF8.cpp | 0 {dbms => src}/Functions/materialize.cpp | 0 {dbms => src}/Functions/minus.cpp | 0 {dbms => src}/Functions/modulo.cpp | 0 {dbms => src}/Functions/moduloOrZero.cpp | 0 {dbms => src}/Functions/multiIf.cpp | 0 .../Functions/multiSearchAllPositions.cpp | 0 ...multiSearchAllPositionsCaseInsensitive.cpp | 0 ...iSearchAllPositionsCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchAllPositionsUTF8.cpp | 0 {dbms => src}/Functions/multiSearchAny.cpp | 0 .../multiSearchAnyCaseInsensitive.cpp | 0 .../multiSearchAnyCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchAnyUTF8.cpp | 0 .../Functions/multiSearchFirstIndex.cpp | 0 .../multiSearchFirstIndexCaseInsensitive.cpp | 0 ...ltiSearchFirstIndexCaseInsensitiveUTF8.cpp | 0 .../Functions/multiSearchFirstIndexUTF8.cpp | 0 .../Functions/multiSearchFirstPosition.cpp | 0 ...ultiSearchFirstPositionCaseInsensitive.cpp | 0 ...SearchFirstPositionCaseInsensitiveUTF8.cpp | 0 .../multiSearchFirstPositionUTF8.cpp | 0 {dbms => src}/Functions/multiply.cpp | 0 {dbms => src}/Functions/negate.cpp | 0 {dbms => src}/Functions/neighbor.cpp | 0 {dbms => src}/Functions/notEmpty.cpp | 0 {dbms => src}/Functions/notEquals.cpp | 0 {dbms => src}/Functions/now.cpp | 0 {dbms => src}/Functions/now64.cpp | 0 {dbms => src}/Functions/nullIf.cpp | 0 {dbms => src}/Functions/pi.cpp | 0 {dbms => src}/Functions/plus.cpp | 0 {dbms => src}/Functions/pointInEllipses.cpp | 0 {dbms => src}/Functions/pointInPolygon.cpp | 0 {dbms => src}/Functions/position.cpp | 0 .../Functions/positionCaseInsensitive.cpp | 0 .../Functions/positionCaseInsensitiveUTF8.cpp | 0 {dbms => src}/Functions/positionUTF8.cpp | 0 {dbms => src}/Functions/pow.cpp | 0 {dbms => src}/Functions/rand.cpp | 0 {dbms => src}/Functions/rand64.cpp | 0 {dbms => src}/Functions/randConstant.cpp | 0 .../Functions/randomPrintableASCII.cpp | 0 {dbms => src}/Functions/regexpQuoteMeta.cpp | 0 {dbms => src}/Functions/registerFunctions.cpp | 0 {dbms => src}/Functions/registerFunctions.h | 0 .../Functions/registerFunctionsArithmetic.cpp | 0 .../Functions/registerFunctionsComparison.cpp | 0 .../registerFunctionsConditional.cpp | 0 .../registerFunctionsConsistentHashing.cpp | 0 .../Functions/registerFunctionsDateTime.cpp | 0 .../Functions/registerFunctionsGeo.cpp | 0 .../registerFunctionsHigherOrder.cpp | 0 .../registerFunctionsIntrospection.cpp | 0 .../Functions/registerFunctionsMath.cpp | 0 .../registerFunctionsMiscellaneous.cpp | 0 .../Functions/registerFunctionsNull.cpp | 0 .../Functions/registerFunctionsRandom.cpp | 0 .../registerFunctionsReinterpret.cpp | 0 .../Functions/registerFunctionsString.cpp | 0 .../registerFunctionsStringSearch.cpp | 0 .../Functions/registerFunctionsTuple.cpp | 0 .../Functions/registerFunctionsVisitParam.cpp | 0 .../Functions/reinterpretAsFixedString.cpp | 0 .../Functions/reinterpretAsString.cpp | 0 .../Functions/reinterpretStringAs.cpp | 0 {dbms => src}/Functions/repeat.cpp | 0 {dbms => src}/Functions/replicate.cpp | 0 {dbms => src}/Functions/reverse.cpp | 0 {dbms => src}/Functions/reverseUTF8.cpp | 0 {dbms => src}/Functions/roundAge.cpp | 0 {dbms => src}/Functions/roundDuration.cpp | 0 {dbms => src}/Functions/roundToExp2.cpp | 0 .../Functions/rowNumberInAllBlocks.cpp | 0 {dbms => src}/Functions/rowNumberInBlock.cpp | 0 {dbms => src}/Functions/runningAccumulate.cpp | 0 {dbms => src}/Functions/runningDifference.cpp | 0 {dbms => src}/Functions/runningDifference.h | 0 ...unningDifferenceStartingWithFirstValue.cpp | 0 {dbms => src}/Functions/sigmoid.cpp | 0 {dbms => src}/Functions/sin.cpp | 0 {dbms => src}/Functions/sleep.cpp | 0 {dbms => src}/Functions/sleep.h | 0 {dbms => src}/Functions/sleepEachRow.cpp | 0 {dbms => src}/Functions/sqrt.cpp | 0 {dbms => src}/Functions/startsWith.cpp | 0 {dbms => src}/Functions/stringToH3.cpp | 0 {dbms => src}/Functions/substring.cpp | 0 {dbms => src}/Functions/subtractDays.cpp | 0 {dbms => src}/Functions/subtractHours.cpp | 0 {dbms => src}/Functions/subtractMinutes.cpp | 0 {dbms => src}/Functions/subtractMonths.cpp | 0 {dbms => src}/Functions/subtractQuarters.cpp | 0 {dbms => src}/Functions/subtractSeconds.cpp | 0 {dbms => src}/Functions/subtractWeeks.cpp | 0 {dbms => src}/Functions/subtractYears.cpp | 0 .../Functions/sumburConsistentHash.cpp | 0 {dbms => src}/Functions/tan.cpp | 0 {dbms => src}/Functions/tanh.cpp | 0 {dbms => src}/Functions/tests/CMakeLists.txt | 0 .../Functions/tests/number_traits.cpp | 0 {dbms => src}/Functions/tgamma.cpp | 0 {dbms => src}/Functions/throwIf.cpp | 0 {dbms => src}/Functions/timeSlot.cpp | 0 {dbms => src}/Functions/timeSlots.cpp | 0 {dbms => src}/Functions/timezone.cpp | 0 {dbms => src}/Functions/toColumnTypeName.cpp | 0 {dbms => src}/Functions/toCustomWeek.cpp | 0 {dbms => src}/Functions/toDayOfMonth.cpp | 0 {dbms => src}/Functions/toDayOfWeek.cpp | 0 {dbms => src}/Functions/toDayOfYear.cpp | 0 {dbms => src}/Functions/toHour.cpp | 0 {dbms => src}/Functions/toISOWeek.cpp | 0 {dbms => src}/Functions/toISOYear.cpp | 0 {dbms => src}/Functions/toLowCardinality.cpp | 0 {dbms => src}/Functions/toMinute.cpp | 0 {dbms => src}/Functions/toMonday.cpp | 0 {dbms => src}/Functions/toMonth.cpp | 0 {dbms => src}/Functions/toNullable.cpp | 0 {dbms => src}/Functions/toQuarter.cpp | 0 {dbms => src}/Functions/toRelativeDayNum.cpp | 0 {dbms => src}/Functions/toRelativeHourNum.cpp | 0 .../Functions/toRelativeMinuteNum.cpp | 0 .../Functions/toRelativeMonthNum.cpp | 0 .../Functions/toRelativeQuarterNum.cpp | 0 .../Functions/toRelativeSecondNum.cpp | 0 {dbms => src}/Functions/toRelativeWeekNum.cpp | 0 {dbms => src}/Functions/toRelativeYearNum.cpp | 0 {dbms => src}/Functions/toSecond.cpp | 0 {dbms => src}/Functions/toStartOfDay.cpp | 0 .../Functions/toStartOfFifteenMinutes.cpp | 0 .../Functions/toStartOfFiveMinute.cpp | 0 {dbms => src}/Functions/toStartOfHour.cpp | 0 {dbms => src}/Functions/toStartOfISOYear.cpp | 0 {dbms => src}/Functions/toStartOfInterval.cpp | 0 {dbms => src}/Functions/toStartOfMinute.cpp | 0 {dbms => src}/Functions/toStartOfMonth.cpp | 0 {dbms => src}/Functions/toStartOfQuarter.cpp | 0 .../Functions/toStartOfTenMinutes.cpp | 0 {dbms => src}/Functions/toStartOfYear.cpp | 0 {dbms => src}/Functions/toTime.cpp | 0 {dbms => src}/Functions/toTimeZone.cpp | 0 {dbms => src}/Functions/toTypeName.cpp | 0 {dbms => src}/Functions/toValidUTF8.cpp | 0 {dbms => src}/Functions/toYYYYMM.cpp | 0 {dbms => src}/Functions/toYYYYMMDD.cpp | 0 {dbms => src}/Functions/toYYYYMMDDhhmmss.cpp | 0 {dbms => src}/Functions/toYear.cpp | 0 {dbms => src}/Functions/today.cpp | 0 {dbms => src}/Functions/transform.cpp | 0 {dbms => src}/Functions/trap.cpp | 0 {dbms => src}/Functions/trim.cpp | 0 {dbms => src}/Functions/tryBase64Decode.cpp | 0 {dbms => src}/Functions/tuple.cpp | 0 {dbms => src}/Functions/tupleElement.cpp | 0 {dbms => src}/Functions/upper.cpp | 0 {dbms => src}/Functions/upperUTF8.cpp | 0 {dbms => src}/Functions/uptime.cpp | 0 {dbms => src}/Functions/version.cpp | 0 {dbms => src}/Functions/visibleWidth.cpp | 0 .../Functions/visitParamExtractBool.cpp | 0 .../Functions/visitParamExtractFloat.cpp | 0 .../Functions/visitParamExtractInt.cpp | 0 .../Functions/visitParamExtractRaw.cpp | 0 .../Functions/visitParamExtractString.cpp | 0 .../Functions/visitParamExtractUInt.cpp | 0 {dbms => src}/Functions/visitParamHas.cpp | 0 .../Functions/yandexConsistentHash.cpp | 0 {dbms => src}/Functions/yesterday.cpp | 0 {dbms => src}/IO/AIO.cpp | 0 {dbms => src}/IO/AIO.h | 0 {dbms => src}/IO/AIOContextPool.cpp | 0 {dbms => src}/IO/AIOContextPool.h | 0 {dbms => src}/IO/AsynchronousWriteBuffer.h | 0 {dbms => src}/IO/BitHelpers.h | 0 {dbms => src}/IO/BrotliReadBuffer.cpp | 0 {dbms => src}/IO/BrotliReadBuffer.h | 0 {dbms => src}/IO/BrotliWriteBuffer.cpp | 0 {dbms => src}/IO/BrotliWriteBuffer.h | 0 {dbms => src}/IO/BufferBase.h | 0 {dbms => src}/IO/BufferWithOwnMemory.h | 0 {dbms => src}/IO/CMakeLists.txt | 0 {dbms => src}/IO/CascadeWriteBuffer.cpp | 0 {dbms => src}/IO/CascadeWriteBuffer.h | 0 {dbms => src}/IO/CompressionMethod.cpp | 0 {dbms => src}/IO/CompressionMethod.h | 0 {dbms => src}/IO/ConcatReadBuffer.h | 0 {dbms => src}/IO/ConnectionTimeouts.h | 0 {dbms => src}/IO/DoubleConverter.cpp | 0 {dbms => src}/IO/DoubleConverter.h | 0 {dbms => src}/IO/HDFSCommon.cpp | 0 {dbms => src}/IO/HDFSCommon.h | 0 {dbms => src}/IO/HTTPCommon.cpp | 0 {dbms => src}/IO/HTTPCommon.h | 0 {dbms => src}/IO/HashingReadBuffer.h | 0 {dbms => src}/IO/HashingWriteBuffer.cpp | 0 {dbms => src}/IO/HashingWriteBuffer.h | 0 {dbms => src}/IO/HexWriteBuffer.cpp | 0 {dbms => src}/IO/HexWriteBuffer.h | 0 {dbms => src}/IO/IReadableWriteBuffer.h | 0 {dbms => src}/IO/LimitReadBuffer.cpp | 0 {dbms => src}/IO/LimitReadBuffer.h | 0 {dbms => src}/IO/MMapReadBufferFromFile.cpp | 0 {dbms => src}/IO/MMapReadBufferFromFile.h | 0 .../IO/MMapReadBufferFromFileDescriptor.cpp | 0 .../IO/MMapReadBufferFromFileDescriptor.h | 0 {dbms => src}/IO/MemoryReadWriteBuffer.cpp | 0 {dbms => src}/IO/MemoryReadWriteBuffer.h | 0 {dbms => src}/IO/NullWriteBuffer.cpp | 0 {dbms => src}/IO/NullWriteBuffer.h | 0 {dbms => src}/IO/Operators.h | 0 {dbms => src}/IO/PeekableReadBuffer.cpp | 0 {dbms => src}/IO/PeekableReadBuffer.h | 0 {dbms => src}/IO/Progress.cpp | 0 {dbms => src}/IO/Progress.h | 0 {dbms => src}/IO/ReadBuffer.h | 0 {dbms => src}/IO/ReadBufferAIO.cpp | 0 {dbms => src}/IO/ReadBufferAIO.h | 0 {dbms => src}/IO/ReadBufferFromFile.cpp | 0 {dbms => src}/IO/ReadBufferFromFile.h | 0 {dbms => src}/IO/ReadBufferFromFileBase.cpp | 0 {dbms => src}/IO/ReadBufferFromFileBase.h | 0 .../IO/ReadBufferFromFileDescriptor.cpp | 0 .../IO/ReadBufferFromFileDescriptor.h | 0 {dbms => src}/IO/ReadBufferFromHDFS.cpp | 0 {dbms => src}/IO/ReadBufferFromHDFS.h | 0 {dbms => src}/IO/ReadBufferFromIStream.cpp | 0 {dbms => src}/IO/ReadBufferFromIStream.h | 0 {dbms => src}/IO/ReadBufferFromMemory.cpp | 0 {dbms => src}/IO/ReadBufferFromMemory.h | 0 {dbms => src}/IO/ReadBufferFromPocoSocket.cpp | 0 {dbms => src}/IO/ReadBufferFromPocoSocket.h | 0 {dbms => src}/IO/ReadBufferFromS3.cpp | 0 {dbms => src}/IO/ReadBufferFromS3.h | 0 {dbms => src}/IO/ReadBufferFromString.h | 0 {dbms => src}/IO/ReadHelpers.cpp | 0 {dbms => src}/IO/ReadHelpers.h | 0 {dbms => src}/IO/ReadWriteBufferFromHTTP.cpp | 0 {dbms => src}/IO/ReadWriteBufferFromHTTP.h | 0 {dbms => src}/IO/S3Common.cpp | 0 {dbms => src}/IO/S3Common.h | 0 {dbms => src}/IO/SeekableReadBuffer.h | 0 {dbms => src}/IO/UncompressedCache.h | 0 {dbms => src}/IO/UseSSL.cpp | 0 {dbms => src}/IO/UseSSL.h | 0 {dbms => src}/IO/VarInt.h | 0 {dbms => src}/IO/WriteBuffer.h | 0 {dbms => src}/IO/WriteBufferAIO.cpp | 0 {dbms => src}/IO/WriteBufferAIO.h | 0 {dbms => src}/IO/WriteBufferFromArena.h | 0 {dbms => src}/IO/WriteBufferFromFile.cpp | 0 {dbms => src}/IO/WriteBufferFromFile.h | 0 {dbms => src}/IO/WriteBufferFromFileBase.cpp | 0 {dbms => src}/IO/WriteBufferFromFileBase.h | 0 .../IO/WriteBufferFromFileDescriptor.cpp | 0 .../IO/WriteBufferFromFileDescriptor.h | 0 ...fferFromFileDescriptorDiscardOnFailure.cpp | 0 ...BufferFromFileDescriptorDiscardOnFailure.h | 0 {dbms => src}/IO/WriteBufferFromHDFS.cpp | 0 {dbms => src}/IO/WriteBufferFromHDFS.h | 0 {dbms => src}/IO/WriteBufferFromHTTP.cpp | 0 {dbms => src}/IO/WriteBufferFromHTTP.h | 0 .../IO/WriteBufferFromHTTPServerResponse.cpp | 0 .../IO/WriteBufferFromHTTPServerResponse.h | 0 {dbms => src}/IO/WriteBufferFromOStream.cpp | 0 {dbms => src}/IO/WriteBufferFromOStream.h | 0 .../IO/WriteBufferFromPocoSocket.cpp | 0 {dbms => src}/IO/WriteBufferFromPocoSocket.h | 0 {dbms => src}/IO/WriteBufferFromS3.cpp | 0 {dbms => src}/IO/WriteBufferFromS3.h | 0 {dbms => src}/IO/WriteBufferFromString.h | 0 .../IO/WriteBufferFromTemporaryFile.cpp | 0 .../IO/WriteBufferFromTemporaryFile.h | 0 {dbms => src}/IO/WriteBufferFromVector.h | 0 {dbms => src}/IO/WriteBufferValidUTF8.cpp | 0 {dbms => src}/IO/WriteBufferValidUTF8.h | 0 {dbms => src}/IO/WriteHelpers.cpp | 0 {dbms => src}/IO/WriteHelpers.h | 0 {dbms => src}/IO/WriteIntText.h | 0 {dbms => src}/IO/ZlibDeflatingWriteBuffer.cpp | 0 {dbms => src}/IO/ZlibDeflatingWriteBuffer.h | 0 {dbms => src}/IO/ZlibInflatingReadBuffer.cpp | 0 {dbms => src}/IO/ZlibInflatingReadBuffer.h | 0 {dbms => src}/IO/copyData.cpp | 0 {dbms => src}/IO/copyData.h | 0 .../IO/createReadBufferFromFileBase.cpp | 0 .../IO/createReadBufferFromFileBase.h | 0 .../IO/createWriteBufferFromFileBase.cpp | 0 .../IO/createWriteBufferFromFileBase.h | 0 {dbms => src}/IO/parseDateTimeBestEffort.cpp | 0 {dbms => src}/IO/parseDateTimeBestEffort.h | 0 {dbms => src}/IO/readDecimalText.h | 0 {dbms => src}/IO/readFloatText.cpp | 0 {dbms => src}/IO/readFloatText.h | 0 {dbms => src}/IO/tests/CMakeLists.txt | 0 {dbms => src}/IO/tests/async_write.cpp | 0 .../gtest_DateTime64_parsing_and_writing.cpp | 0 .../IO/tests/gtest_DateTimeToString.cpp | 0 .../tests/gtest_aio_seek_back_after_eof.cpp | 0 {dbms => src}/IO/tests/gtest_bit_io.cpp | 0 .../gtest_cascade_and_memory_write_buffer.cpp | 0 .../IO/tests/gtest_peekable_read_buffer.cpp | 0 {dbms => src}/IO/tests/hashing_buffer.h | 0 .../IO/tests/hashing_read_buffer.cpp | 0 .../IO/tests/hashing_write_buffer.cpp | 0 {dbms => src}/IO/tests/io_operators.cpp | 0 {dbms => src}/IO/tests/limit_read_buffer.cpp | 0 .../IO/tests/limit_read_buffer.reference | 0 {dbms => src}/IO/tests/limit_read_buffer.sh | 0 {dbms => src}/IO/tests/limit_read_buffer2.cpp | 0 {dbms => src}/IO/tests/mempbrk.cpp | 0 .../IO/tests/o_direct_and_dirty_pages.cpp | 0 .../IO/tests/parse_date_time_best_effort.cpp | 0 {dbms => src}/IO/tests/parse_int_perf.cpp | 0 {dbms => src}/IO/tests/parse_int_perf2.cpp | 0 {dbms => src}/IO/tests/read_buffer.cpp | 0 {dbms => src}/IO/tests/read_buffer_aio.cpp | 0 {dbms => src}/IO/tests/read_buffer_perf.cpp | 0 .../IO/tests/read_escaped_string.cpp | 0 {dbms => src}/IO/tests/read_float_perf.cpp | 0 {dbms => src}/IO/tests/read_write_int.cpp | 0 {dbms => src}/IO/tests/ryu_test.cpp | 0 {dbms => src}/IO/tests/valid_utf8.cpp | 0 {dbms => src}/IO/tests/valid_utf8_perf.cpp | 0 {dbms => src}/IO/tests/var_uint.cpp | 0 {dbms => src}/IO/tests/write_buffer.cpp | 0 {dbms => src}/IO/tests/write_buffer_aio.cpp | 0 {dbms => src}/IO/tests/write_buffer_perf.cpp | 0 {dbms => src}/IO/tests/write_int.cpp | 0 {dbms => src}/IO/tests/zlib_buffers.cpp | 0 {dbms => src}/IO/tests/zlib_ng_bug.cpp | 0 .../Interpreters/ActionLocksManager.cpp | 0 .../Interpreters/ActionLocksManager.h | 0 {dbms => src}/Interpreters/ActionsVisitor.cpp | 0 {dbms => src}/Interpreters/ActionsVisitor.h | 0 .../Interpreters/AddDefaultDatabaseVisitor.h | 0 .../Interpreters/AggregateDescription.h | 0 .../Interpreters/AggregationCommon.h | 0 {dbms => src}/Interpreters/Aggregator.cpp | 0 {dbms => src}/Interpreters/Aggregator.h | 0 {dbms => src}/Interpreters/Aliases.h | 0 {dbms => src}/Interpreters/AnalyzedJoin.cpp | 0 {dbms => src}/Interpreters/AnalyzedJoin.h | 0 .../Interpreters/ArrayJoinAction.cpp | 0 {dbms => src}/Interpreters/ArrayJoinAction.h | 0 .../Interpreters/ArrayJoinedColumnsVisitor.h | 0 {dbms => src}/Interpreters/AsteriskSemantic.h | 0 .../Interpreters/AsynchronousMetrics.cpp | 0 .../Interpreters/AsynchronousMetrics.h | 0 {dbms => src}/Interpreters/BloomFilter.cpp | 0 {dbms => src}/Interpreters/BloomFilter.h | 0 {dbms => src}/Interpreters/BloomFilterHash.h | 0 {dbms => src}/Interpreters/CMakeLists.txt | 0 {dbms => src}/Interpreters/CancellationCode.h | 0 {dbms => src}/Interpreters/CatBoostModel.cpp | 0 {dbms => src}/Interpreters/CatBoostModel.h | 0 {dbms => src}/Interpreters/ClientInfo.cpp | 0 {dbms => src}/Interpreters/ClientInfo.h | 0 {dbms => src}/Interpreters/Cluster.cpp | 0 {dbms => src}/Interpreters/Cluster.h | 0 .../ClusterProxy/IStreamFactory.h | 0 .../ClusterProxy/SelectStreamFactory.cpp | 0 .../ClusterProxy/SelectStreamFactory.h | 0 .../ClusterProxy/executeQuery.cpp | 0 .../Interpreters/ClusterProxy/executeQuery.h | 0 .../Interpreters/CollectJoinOnKeysVisitor.cpp | 0 .../Interpreters/CollectJoinOnKeysVisitor.h | 0 .../Interpreters/ColumnNamesContext.cpp | 0 .../Interpreters/ColumnNamesContext.h | 0 {dbms => src}/Interpreters/Context.cpp | 0 {dbms => src}/Interpreters/Context.h | 0 .../Interpreters/CrossToInnerJoinVisitor.cpp | 0 .../Interpreters/CrossToInnerJoinVisitor.h | 0 {dbms => src}/Interpreters/DDLWorker.cpp | 0 {dbms => src}/Interpreters/DDLWorker.h | 0 .../Interpreters/DNSCacheUpdater.cpp | 0 {dbms => src}/Interpreters/DNSCacheUpdater.h | 0 .../DatabaseAndTableWithAlias.cpp | 0 .../Interpreters/DatabaseAndTableWithAlias.h | 0 .../Interpreters/DatabaseCatalog.cpp | 0 {dbms => src}/Interpreters/DatabaseCatalog.h | 0 .../Interpreters/EmbeddedDictionaries.cpp | 0 .../Interpreters/EmbeddedDictionaries.h | 0 .../ExecuteScalarSubqueriesVisitor.cpp | 0 .../ExecuteScalarSubqueriesVisitor.h | 0 .../Interpreters/ExpressionActions.cpp | 0 .../Interpreters/ExpressionActions.h | 0 .../Interpreters/ExpressionAnalyzer.cpp | 0 .../Interpreters/ExpressionAnalyzer.h | 0 {dbms => src}/Interpreters/ExpressionJIT.cpp | 0 {dbms => src}/Interpreters/ExpressionJIT.h | 0 .../ExternalDictionariesLoader.cpp | 0 .../Interpreters/ExternalDictionariesLoader.h | 0 {dbms => src}/Interpreters/ExternalLoader.cpp | 0 {dbms => src}/Interpreters/ExternalLoader.h | 0 ...ExternalLoaderDatabaseConfigRepository.cpp | 0 .../ExternalLoaderDatabaseConfigRepository.h | 0 .../ExternalLoaderTempConfigRepository.cpp | 0 .../ExternalLoaderTempConfigRepository.h | 0 .../ExternalLoaderXMLConfigRepository.cpp | 0 .../ExternalLoaderXMLConfigRepository.h | 0 .../Interpreters/ExternalModelsLoader.cpp | 0 .../Interpreters/ExternalModelsLoader.h | 0 .../ExtractExpressionInfoVisitor.cpp | 0 .../ExtractExpressionInfoVisitor.h | 0 {dbms => src}/Interpreters/FillingRow.cpp | 0 {dbms => src}/Interpreters/FillingRow.h | 0 .../Interpreters/GetAggregatesVisitor.h | 0 .../Interpreters/GlobalSubqueriesVisitor.h | 0 .../Interpreters/IExternalLoadable.cpp | 0 .../Interpreters/IExternalLoadable.h | 0 .../IExternalLoaderConfigRepository.h | 0 {dbms => src}/Interpreters/IInterpreter.h | 0 {dbms => src}/Interpreters/IJoin.h | 0 .../Interpreters/IdentifierSemantic.cpp | 0 .../Interpreters/IdentifierSemantic.h | 0 .../Interpreters/InDepthNodeVisitor.h | 0 .../InJoinSubqueriesPreprocessor.cpp | 0 .../InJoinSubqueriesPreprocessor.h | 0 .../Interpreters/InternalTextLogsQueue.cpp | 0 .../Interpreters/InternalTextLogsQueue.h | 0 .../Interpreters/InterpreterAlterQuery.cpp | 0 .../Interpreters/InterpreterAlterQuery.h | 0 .../Interpreters/InterpreterCheckQuery.cpp | 0 .../Interpreters/InterpreterCheckQuery.h | 0 .../Interpreters/InterpreterCreateQuery.cpp | 0 .../Interpreters/InterpreterCreateQuery.h | 0 .../InterpreterCreateQuotaQuery.cpp | 0 .../InterpreterCreateQuotaQuery.h | 0 .../InterpreterCreateRoleQuery.cpp | 0 .../Interpreters/InterpreterCreateRoleQuery.h | 0 .../InterpreterCreateRowPolicyQuery.cpp | 0 .../InterpreterCreateRowPolicyQuery.h | 0 .../InterpreterCreateSettingsProfileQuery.cpp | 0 .../InterpreterCreateSettingsProfileQuery.h | 0 .../InterpreterCreateUserQuery.cpp | 0 .../Interpreters/InterpreterCreateUserQuery.h | 0 .../Interpreters/InterpreterDescribeQuery.cpp | 0 .../Interpreters/InterpreterDescribeQuery.h | 0 .../InterpreterDropAccessEntityQuery.cpp | 0 .../InterpreterDropAccessEntityQuery.h | 0 .../Interpreters/InterpreterDropQuery.cpp | 0 .../Interpreters/InterpreterDropQuery.h | 0 .../Interpreters/InterpreterExistsQuery.cpp | 0 .../Interpreters/InterpreterExistsQuery.h | 0 .../Interpreters/InterpreterExplainQuery.cpp | 0 .../Interpreters/InterpreterExplainQuery.h | 0 .../Interpreters/InterpreterFactory.cpp | 0 .../Interpreters/InterpreterFactory.h | 0 .../Interpreters/InterpreterGrantQuery.cpp | 0 .../Interpreters/InterpreterGrantQuery.h | 0 .../Interpreters/InterpreterInsertQuery.cpp | 0 .../Interpreters/InterpreterInsertQuery.h | 0 .../InterpreterKillQueryQuery.cpp | 0 .../Interpreters/InterpreterKillQueryQuery.h | 0 .../Interpreters/InterpreterOptimizeQuery.cpp | 0 .../Interpreters/InterpreterOptimizeQuery.h | 0 .../Interpreters/InterpreterRenameQuery.cpp | 0 .../Interpreters/InterpreterRenameQuery.h | 0 .../Interpreters/InterpreterSelectQuery.cpp | 0 .../Interpreters/InterpreterSelectQuery.h | 0 .../InterpreterSelectWithUnionQuery.cpp | 0 .../InterpreterSelectWithUnionQuery.h | 0 .../Interpreters/InterpreterSetQuery.cpp | 0 .../Interpreters/InterpreterSetQuery.h | 0 .../Interpreters/InterpreterSetRoleQuery.cpp | 0 .../Interpreters/InterpreterSetRoleQuery.h | 0 ...InterpreterShowCreateAccessEntityQuery.cpp | 0 .../InterpreterShowCreateAccessEntityQuery.h | 0 .../InterpreterShowCreateQuery.cpp | 0 .../Interpreters/InterpreterShowCreateQuery.h | 0 .../InterpreterShowGrantsQuery.cpp | 0 .../Interpreters/InterpreterShowGrantsQuery.h | 0 .../InterpreterShowProcesslistQuery.cpp | 0 .../InterpreterShowProcesslistQuery.h | 0 .../InterpreterShowQuotasQuery.cpp | 0 .../Interpreters/InterpreterShowQuotasQuery.h | 0 .../InterpreterShowRowPoliciesQuery.cpp | 0 .../InterpreterShowRowPoliciesQuery.h | 0 .../InterpreterShowTablesQuery.cpp | 0 .../Interpreters/InterpreterShowTablesQuery.h | 0 .../Interpreters/InterpreterSystemQuery.cpp | 0 .../Interpreters/InterpreterSystemQuery.h | 0 .../Interpreters/InterpreterUseQuery.cpp | 0 .../Interpreters/InterpreterUseQuery.h | 0 .../Interpreters/InterpreterWatchQuery.cpp | 0 .../Interpreters/InterpreterWatchQuery.h | 0 .../Interpreters/InterserverIOHandler.h | 0 {dbms => src}/Interpreters/Join.cpp | 0 {dbms => src}/Interpreters/Join.h | 0 {dbms => src}/Interpreters/JoinSwitcher.cpp | 0 {dbms => src}/Interpreters/JoinSwitcher.h | 0 .../JoinToSubqueryTransformVisitor.cpp | 0 .../JoinToSubqueryTransformVisitor.h | 0 {dbms => src}/Interpreters/JoinedTables.cpp | 0 {dbms => src}/Interpreters/JoinedTables.h | 0 .../LogicalExpressionsOptimizer.cpp | 0 .../LogicalExpressionsOptimizer.h | 0 .../MarkTableIdentifiersVisitor.cpp | 0 .../MarkTableIdentifiersVisitor.h | 0 {dbms => src}/Interpreters/MergeJoin.cpp | 0 {dbms => src}/Interpreters/MergeJoin.h | 0 {dbms => src}/Interpreters/MetricLog.cpp | 0 {dbms => src}/Interpreters/MetricLog.h | 0 .../Interpreters/MutationsInterpreter.cpp | 0 .../Interpreters/MutationsInterpreter.h | 0 {dbms => src}/Interpreters/NullableUtils.cpp | 0 {dbms => src}/Interpreters/NullableUtils.h | 0 .../Interpreters/OptimizeIfChains.cpp | 0 {dbms => src}/Interpreters/OptimizeIfChains.h | 0 ...OptimizeIfWithConstantConditionVisitor.cpp | 0 .../OptimizeIfWithConstantConditionVisitor.h | 0 {dbms => src}/Interpreters/PartLog.cpp | 0 {dbms => src}/Interpreters/PartLog.h | 0 .../PredicateExpressionsOptimizer.cpp | 0 .../PredicateExpressionsOptimizer.h | 0 .../Interpreters/PredicateRewriteVisitor.cpp | 0 .../Interpreters/PredicateRewriteVisitor.h | 0 {dbms => src}/Interpreters/PreparedSets.h | 0 {dbms => src}/Interpreters/ProcessList.cpp | 0 {dbms => src}/Interpreters/ProcessList.h | 0 .../Interpreters/ProfileEventsExt.cpp | 0 {dbms => src}/Interpreters/ProfileEventsExt.h | 0 .../Interpreters/QueryAliasesVisitor.cpp | 0 .../Interpreters/QueryAliasesVisitor.h | 0 {dbms => src}/Interpreters/QueryLog.cpp | 0 {dbms => src}/Interpreters/QueryLog.h | 0 .../Interpreters/QueryNormalizer.cpp | 0 {dbms => src}/Interpreters/QueryNormalizer.h | 0 {dbms => src}/Interpreters/QueryPriorities.h | 0 {dbms => src}/Interpreters/QueryThreadLog.cpp | 0 {dbms => src}/Interpreters/QueryThreadLog.h | 0 .../ReplaceQueryParameterVisitor.cpp | 0 .../ReplaceQueryParameterVisitor.h | 0 .../RequiredSourceColumnsVisitor.cpp | 0 .../RequiredSourceColumnsVisitor.h | 0 {dbms => src}/Interpreters/RowRefs.cpp | 0 {dbms => src}/Interpreters/RowRefs.h | 0 .../Interpreters/SelectQueryOptions.h | 0 {dbms => src}/Interpreters/Set.cpp | 0 {dbms => src}/Interpreters/Set.h | 0 {dbms => src}/Interpreters/SetVariants.cpp | 0 {dbms => src}/Interpreters/SetVariants.h | 0 {dbms => src}/Interpreters/StorageID.cpp | 0 {dbms => src}/Interpreters/StorageID.h | 0 {dbms => src}/Interpreters/SubqueryForSet.cpp | 0 {dbms => src}/Interpreters/SubqueryForSet.h | 0 {dbms => src}/Interpreters/SyntaxAnalyzer.cpp | 0 {dbms => src}/Interpreters/SyntaxAnalyzer.h | 0 {dbms => src}/Interpreters/SystemLog.cpp | 0 {dbms => src}/Interpreters/SystemLog.h | 0 {dbms => src}/Interpreters/TablesStatus.cpp | 0 {dbms => src}/Interpreters/TablesStatus.h | 0 {dbms => src}/Interpreters/TextLog.cpp | 0 {dbms => src}/Interpreters/TextLog.h | 0 .../Interpreters/ThreadStatusExt.cpp | 0 {dbms => src}/Interpreters/TraceLog.cpp | 0 {dbms => src}/Interpreters/TraceLog.h | 0 .../TranslateQualifiedNamesVisitor.cpp | 0 .../TranslateQualifiedNamesVisitor.h | 0 .../Interpreters/addMissingDefaults.cpp | 0 .../Interpreters/addMissingDefaults.h | 0 .../Interpreters/addTypeConversionToAST.cpp | 0 .../Interpreters/addTypeConversionToAST.h | 0 {dbms => src}/Interpreters/asof.h | 0 {dbms => src}/Interpreters/castColumn.cpp | 0 {dbms => src}/Interpreters/castColumn.h | 0 .../Interpreters/convertFieldToType.cpp | 0 .../Interpreters/convertFieldToType.h | 0 .../Interpreters/createBlockSelector.cpp | 0 .../Interpreters/createBlockSelector.h | 0 .../evaluateConstantExpression.cpp | 0 .../Interpreters/evaluateConstantExpression.h | 0 {dbms => src}/Interpreters/executeQuery.cpp | 0 {dbms => src}/Interpreters/executeQuery.h | 0 {dbms => src}/Interpreters/getClusterName.cpp | 0 {dbms => src}/Interpreters/getClusterName.h | 0 .../Interpreters/getTableExpressions.cpp | 0 .../Interpreters/getTableExpressions.h | 0 .../Interpreters/inplaceBlockConversions.cpp | 0 .../Interpreters/inplaceBlockConversions.h | 0 .../Interpreters/interpretSubquery.cpp | 0 .../Interpreters/interpretSubquery.h | 0 {dbms => src}/Interpreters/joinDispatch.h | 0 {dbms => src}/Interpreters/join_common.cpp | 0 {dbms => src}/Interpreters/join_common.h | 0 {dbms => src}/Interpreters/loadMetadata.cpp | 0 {dbms => src}/Interpreters/loadMetadata.h | 0 {dbms => src}/Interpreters/misc.h | 0 {dbms => src}/Interpreters/sortBlock.cpp | 0 {dbms => src}/Interpreters/sortBlock.h | 0 .../Interpreters/tests/CMakeLists.txt | 0 .../Interpreters/tests/aggregate.cpp | 0 .../Interpreters/tests/create_query.cpp | 0 .../Interpreters/tests/expression.cpp | 0 .../tests/expression_analyzer.cpp | 0 .../tests/gtest_cycle_aliases.cpp | 0 .../tests/gtest_merge_tree_set_index.cpp | 0 {dbms => src}/Interpreters/tests/hash_map.cpp | 0 .../Interpreters/tests/hash_map3.cpp | 0 .../Interpreters/tests/hash_map_lookup.cpp | 0 .../Interpreters/tests/hash_map_string.cpp | 0 .../Interpreters/tests/hash_map_string_2.cpp | 0 .../Interpreters/tests/hash_map_string_3.cpp | 0 .../tests/hash_map_string_small.cpp | 0 .../tests/in_join_subqueries_preprocessor.cpp | 0 .../Interpreters/tests/internal_iotop.cpp | 0 .../tests/logical_expressions_optimizer.cpp | 0 .../Interpreters/tests/select_query.cpp | 0 .../Interpreters/tests/string_hash_map.cpp | 0 .../Interpreters/tests/two_level_hash_map.cpp | 0 {dbms => src}/Interpreters/tests/users.cpp | 0 {dbms => src}/NOTICE | 0 {dbms => src}/Parsers/ASTAlterQuery.cpp | 0 {dbms => src}/Parsers/ASTAlterQuery.h | 0 {dbms => src}/Parsers/ASTAssignment.h | 0 {dbms => src}/Parsers/ASTAsterisk.cpp | 0 {dbms => src}/Parsers/ASTAsterisk.h | 0 {dbms => src}/Parsers/ASTCheckQuery.h | 0 .../Parsers/ASTColumnDeclaration.cpp | 0 {dbms => src}/Parsers/ASTColumnDeclaration.h | 0 {dbms => src}/Parsers/ASTColumnsMatcher.cpp | 0 {dbms => src}/Parsers/ASTColumnsMatcher.h | 0 .../Parsers/ASTConstraintDeclaration.cpp | 0 .../Parsers/ASTConstraintDeclaration.h | 0 {dbms => src}/Parsers/ASTCreateQuery.cpp | 0 {dbms => src}/Parsers/ASTCreateQuery.h | 0 {dbms => src}/Parsers/ASTCreateQuotaQuery.cpp | 0 {dbms => src}/Parsers/ASTCreateQuotaQuery.h | 0 {dbms => src}/Parsers/ASTCreateRoleQuery.cpp | 0 {dbms => src}/Parsers/ASTCreateRoleQuery.h | 0 .../Parsers/ASTCreateRowPolicyQuery.cpp | 0 .../Parsers/ASTCreateRowPolicyQuery.h | 0 .../Parsers/ASTCreateSettingsProfileQuery.cpp | 0 .../Parsers/ASTCreateSettingsProfileQuery.h | 0 {dbms => src}/Parsers/ASTCreateUserQuery.cpp | 0 {dbms => src}/Parsers/ASTCreateUserQuery.h | 0 {dbms => src}/Parsers/ASTDictionary.cpp | 0 {dbms => src}/Parsers/ASTDictionary.h | 0 .../ASTDictionaryAttributeDeclaration.cpp | 0 .../ASTDictionaryAttributeDeclaration.h | 0 .../Parsers/ASTDropAccessEntityQuery.cpp | 0 .../Parsers/ASTDropAccessEntityQuery.h | 0 {dbms => src}/Parsers/ASTDropQuery.cpp | 0 {dbms => src}/Parsers/ASTDropQuery.h | 0 {dbms => src}/Parsers/ASTEnumElement.h | 0 {dbms => src}/Parsers/ASTExplainQuery.h | 0 {dbms => src}/Parsers/ASTExpressionList.cpp | 0 {dbms => src}/Parsers/ASTExpressionList.h | 0 {dbms => src}/Parsers/ASTExtendedRoleSet.cpp | 0 {dbms => src}/Parsers/ASTExtendedRoleSet.h | 0 {dbms => src}/Parsers/ASTFunction.cpp | 0 {dbms => src}/Parsers/ASTFunction.h | 0 .../ASTFunctionWithKeyValueArguments.cpp | 0 .../ASTFunctionWithKeyValueArguments.h | 0 {dbms => src}/Parsers/ASTGrantQuery.cpp | 0 {dbms => src}/Parsers/ASTGrantQuery.h | 0 {dbms => src}/Parsers/ASTIdentifier.cpp | 0 {dbms => src}/Parsers/ASTIdentifier.h | 0 {dbms => src}/Parsers/ASTIndexDeclaration.h | 0 {dbms => src}/Parsers/ASTInsertQuery.cpp | 0 {dbms => src}/Parsers/ASTInsertQuery.h | 0 {dbms => src}/Parsers/ASTKillQueryQuery.cpp | 0 {dbms => src}/Parsers/ASTKillQueryQuery.h | 0 {dbms => src}/Parsers/ASTLiteral.cpp | 0 {dbms => src}/Parsers/ASTLiteral.h | 0 {dbms => src}/Parsers/ASTNameTypePair.h | 0 {dbms => src}/Parsers/ASTOptimizeQuery.cpp | 0 {dbms => src}/Parsers/ASTOptimizeQuery.h | 0 {dbms => src}/Parsers/ASTOrderByElement.cpp | 0 {dbms => src}/Parsers/ASTOrderByElement.h | 0 {dbms => src}/Parsers/ASTPartition.cpp | 0 {dbms => src}/Parsers/ASTPartition.h | 0 .../Parsers/ASTQualifiedAsterisk.cpp | 0 {dbms => src}/Parsers/ASTQualifiedAsterisk.h | 0 {dbms => src}/Parsers/ASTQueryParameter.cpp | 0 {dbms => src}/Parsers/ASTQueryParameter.h | 0 .../Parsers/ASTQueryWithOnCluster.cpp | 0 {dbms => src}/Parsers/ASTQueryWithOnCluster.h | 0 {dbms => src}/Parsers/ASTQueryWithOutput.cpp | 0 {dbms => src}/Parsers/ASTQueryWithOutput.h | 0 .../Parsers/ASTQueryWithTableAndOutput.cpp | 0 .../Parsers/ASTQueryWithTableAndOutput.h | 0 {dbms => src}/Parsers/ASTRenameQuery.h | 0 {dbms => src}/Parsers/ASTSampleRatio.cpp | 0 {dbms => src}/Parsers/ASTSampleRatio.h | 0 {dbms => src}/Parsers/ASTSelectQuery.cpp | 0 {dbms => src}/Parsers/ASTSelectQuery.h | 0 .../Parsers/ASTSelectWithUnionQuery.cpp | 0 .../Parsers/ASTSelectWithUnionQuery.h | 0 {dbms => src}/Parsers/ASTSetQuery.h | 0 {dbms => src}/Parsers/ASTSetRoleQuery.cpp | 0 {dbms => src}/Parsers/ASTSetRoleQuery.h | 0 .../Parsers/ASTSettingsProfileElement.cpp | 0 .../Parsers/ASTSettingsProfileElement.h | 0 .../ASTShowCreateAccessEntityQuery.cpp | 0 .../Parsers/ASTShowCreateAccessEntityQuery.h | 0 {dbms => src}/Parsers/ASTShowGrantsQuery.cpp | 0 {dbms => src}/Parsers/ASTShowGrantsQuery.h | 0 .../Parsers/ASTShowProcesslistQuery.h | 0 {dbms => src}/Parsers/ASTShowQuotasQuery.cpp | 0 {dbms => src}/Parsers/ASTShowQuotasQuery.h | 0 .../Parsers/ASTShowRowPoliciesQuery.cpp | 0 .../Parsers/ASTShowRowPoliciesQuery.h | 0 {dbms => src}/Parsers/ASTShowTablesQuery.cpp | 0 {dbms => src}/Parsers/ASTShowTablesQuery.h | 0 {dbms => src}/Parsers/ASTSubquery.cpp | 0 {dbms => src}/Parsers/ASTSubquery.h | 0 {dbms => src}/Parsers/ASTSystemQuery.cpp | 0 {dbms => src}/Parsers/ASTSystemQuery.h | 0 {dbms => src}/Parsers/ASTTTLElement.cpp | 0 {dbms => src}/Parsers/ASTTTLElement.h | 0 .../Parsers/ASTTablesInSelectQuery.cpp | 0 .../Parsers/ASTTablesInSelectQuery.h | 0 {dbms => src}/Parsers/ASTUseQuery.h | 0 {dbms => src}/Parsers/ASTWatchQuery.h | 0 {dbms => src}/Parsers/ASTWithAlias.cpp | 0 {dbms => src}/Parsers/ASTWithAlias.h | 0 {dbms => src}/Parsers/CMakeLists.txt | 2 +- {dbms => src}/Parsers/CommonParsers.cpp | 0 {dbms => src}/Parsers/CommonParsers.h | 0 {dbms => src}/Parsers/DumpASTNode.h | 0 .../Parsers/ExpressionElementParsers.cpp | 0 .../Parsers/ExpressionElementParsers.h | 0 .../Parsers/ExpressionListParsers.cpp | 0 {dbms => src}/Parsers/ExpressionListParsers.h | 0 {dbms => src}/Parsers/IAST.cpp | 0 {dbms => src}/Parsers/IAST.h | 0 {dbms => src}/Parsers/IAST_fwd.h | 0 {dbms => src}/Parsers/IParser.h | 0 {dbms => src}/Parsers/IParserBase.cpp | 0 {dbms => src}/Parsers/IParserBase.h | 0 .../Parsers/IdentifierQuotingStyle.h | 0 {dbms => src}/Parsers/Lexer.cpp | 0 {dbms => src}/Parsers/Lexer.h | 0 {dbms => src}/Parsers/ParserAlterQuery.cpp | 0 {dbms => src}/Parsers/ParserAlterQuery.h | 0 {dbms => src}/Parsers/ParserCase.cpp | 0 {dbms => src}/Parsers/ParserCase.h | 0 {dbms => src}/Parsers/ParserCheckQuery.cpp | 0 {dbms => src}/Parsers/ParserCheckQuery.h | 0 {dbms => src}/Parsers/ParserCreateQuery.cpp | 0 {dbms => src}/Parsers/ParserCreateQuery.h | 0 .../Parsers/ParserCreateQuotaQuery.cpp | 0 .../Parsers/ParserCreateQuotaQuery.h | 0 .../Parsers/ParserCreateRoleQuery.cpp | 0 {dbms => src}/Parsers/ParserCreateRoleQuery.h | 0 .../Parsers/ParserCreateRowPolicyQuery.cpp | 0 .../Parsers/ParserCreateRowPolicyQuery.h | 0 .../ParserCreateSettingsProfileQuery.cpp | 0 .../ParserCreateSettingsProfileQuery.h | 0 .../Parsers/ParserCreateUserQuery.cpp | 0 {dbms => src}/Parsers/ParserCreateUserQuery.h | 0 .../Parsers/ParserDescribeTableQuery.cpp | 0 .../Parsers/ParserDescribeTableQuery.h | 0 {dbms => src}/Parsers/ParserDictionary.cpp | 0 {dbms => src}/Parsers/ParserDictionary.h | 0 .../ParserDictionaryAttributeDeclaration.cpp | 0 .../ParserDictionaryAttributeDeclaration.h | 0 .../Parsers/ParserDropAccessEntityQuery.cpp | 0 .../Parsers/ParserDropAccessEntityQuery.h | 0 {dbms => src}/Parsers/ParserDropQuery.cpp | 0 {dbms => src}/Parsers/ParserDropQuery.h | 0 .../Parsers/ParserExtendedRoleSet.cpp | 0 {dbms => src}/Parsers/ParserExtendedRoleSet.h | 0 {dbms => src}/Parsers/ParserGrantQuery.cpp | 0 {dbms => src}/Parsers/ParserGrantQuery.h | 0 {dbms => src}/Parsers/ParserInsertQuery.cpp | 0 {dbms => src}/Parsers/ParserInsertQuery.h | 0 .../Parsers/ParserKillQueryQuery.cpp | 0 {dbms => src}/Parsers/ParserKillQueryQuery.h | 0 {dbms => src}/Parsers/ParserOptimizeQuery.cpp | 0 {dbms => src}/Parsers/ParserOptimizeQuery.h | 0 {dbms => src}/Parsers/ParserPartition.cpp | 0 {dbms => src}/Parsers/ParserPartition.h | 0 {dbms => src}/Parsers/ParserQuery.cpp | 0 {dbms => src}/Parsers/ParserQuery.h | 0 .../Parsers/ParserQueryWithOutput.cpp | 0 {dbms => src}/Parsers/ParserQueryWithOutput.h | 0 {dbms => src}/Parsers/ParserRenameQuery.cpp | 0 {dbms => src}/Parsers/ParserRenameQuery.h | 0 {dbms => src}/Parsers/ParserSampleRatio.cpp | 0 {dbms => src}/Parsers/ParserSampleRatio.h | 0 {dbms => src}/Parsers/ParserSelectQuery.cpp | 0 {dbms => src}/Parsers/ParserSelectQuery.h | 0 .../Parsers/ParserSelectWithUnionQuery.cpp | 0 .../Parsers/ParserSelectWithUnionQuery.h | 0 {dbms => src}/Parsers/ParserSetQuery.cpp | 0 {dbms => src}/Parsers/ParserSetQuery.h | 0 {dbms => src}/Parsers/ParserSetRoleQuery.cpp | 0 {dbms => src}/Parsers/ParserSetRoleQuery.h | 0 .../Parsers/ParserSettingsProfileElement.cpp | 0 .../Parsers/ParserSettingsProfileElement.h | 0 .../ParserShowCreateAccessEntityQuery.cpp | 0 .../ParserShowCreateAccessEntityQuery.h | 0 .../Parsers/ParserShowGrantsQuery.cpp | 0 {dbms => src}/Parsers/ParserShowGrantsQuery.h | 0 .../Parsers/ParserShowProcesslistQuery.h | 0 .../Parsers/ParserShowQuotasQuery.cpp | 0 {dbms => src}/Parsers/ParserShowQuotasQuery.h | 0 .../Parsers/ParserShowRowPoliciesQuery.cpp | 0 .../Parsers/ParserShowRowPoliciesQuery.h | 0 .../Parsers/ParserShowTablesQuery.cpp | 0 {dbms => src}/Parsers/ParserShowTablesQuery.h | 0 {dbms => src}/Parsers/ParserSystemQuery.cpp | 0 {dbms => src}/Parsers/ParserSystemQuery.h | 0 .../Parsers/ParserTablePropertiesQuery.cpp | 0 .../Parsers/ParserTablePropertiesQuery.h | 0 .../Parsers/ParserTablesInSelectQuery.cpp | 0 .../Parsers/ParserTablesInSelectQuery.h | 0 .../Parsers/ParserUnionQueryElement.cpp | 0 .../Parsers/ParserUnionQueryElement.h | 0 {dbms => src}/Parsers/ParserUseQuery.cpp | 0 {dbms => src}/Parsers/ParserUseQuery.h | 0 {dbms => src}/Parsers/ParserWatchQuery.cpp | 0 {dbms => src}/Parsers/ParserWatchQuery.h | 0 {dbms => src}/Parsers/StringRange.h | 0 .../Parsers/TablePropertiesQueriesASTs.h | 0 {dbms => src}/Parsers/TokenIterator.cpp | 0 {dbms => src}/Parsers/TokenIterator.h | 0 {dbms => src}/Parsers/formatAST.cpp | 0 {dbms => src}/Parsers/formatAST.h | 0 .../Parsers/iostream_debug_helpers.cpp | 0 .../Parsers/iostream_debug_helpers.h | 0 .../Parsers/parseDatabaseAndTableName.cpp | 0 .../Parsers/parseDatabaseAndTableName.h | 0 .../parseIdentifierOrStringLiteral.cpp | 0 .../Parsers/parseIdentifierOrStringLiteral.h | 0 {dbms => src}/Parsers/parseIntervalKind.cpp | 0 {dbms => src}/Parsers/parseIntervalKind.h | 0 {dbms => src}/Parsers/parseQuery.cpp | 0 {dbms => src}/Parsers/parseQuery.h | 0 {dbms => src}/Parsers/parseUserName.cpp | 0 {dbms => src}/Parsers/parseUserName.h | 0 {dbms => src}/Parsers/queryToString.cpp | 0 {dbms => src}/Parsers/queryToString.h | 0 {dbms => src}/Parsers/tests/CMakeLists.txt | 0 {dbms => src}/Parsers/tests/create_parser.cpp | 0 .../Parsers/tests/gtest_dictionary_parser.cpp | 0 {dbms => src}/Parsers/tests/lexer.cpp | 0 {dbms => src}/Parsers/tests/select_parser.cpp | 0 {dbms => src}/Processors/CMakeLists.txt | 0 {dbms => src}/Processors/Chunk.cpp | 0 {dbms => src}/Processors/Chunk.h | 0 {dbms => src}/Processors/ConcatProcessor.cpp | 0 {dbms => src}/Processors/ConcatProcessor.h | 0 .../Processors/DelayedPortsProcessor.cpp | 0 .../Processors/DelayedPortsProcessor.h | 0 .../Executors/ParallelPipelineExecutor.cpp | 0 .../Executors/ParallelPipelineExecutor.h | 0 .../Processors/Executors/PipelineExecutor.cpp | 0 .../Processors/Executors/PipelineExecutor.h | 0 .../Executors/SequentialPipelineExecutor.cpp | 0 .../Executors/SequentialPipelineExecutor.h | 0 .../Processors/Executors/ThreadsQueue.h | 0 .../TreeExecutorBlockInputStream.cpp | 0 .../Executors/TreeExecutorBlockInputStream.h | 0 {dbms => src}/Processors/Executors/traverse.h | 0 {dbms => src}/Processors/ForkProcessor.cpp | 0 {dbms => src}/Processors/ForkProcessor.h | 0 .../Processors/Formats/IInputFormat.cpp | 0 .../Processors/Formats/IInputFormat.h | 0 .../Processors/Formats/IOutputFormat.cpp | 0 .../Processors/Formats/IOutputFormat.h | 0 .../Processors/Formats/IRowInputFormat.cpp | 0 .../Processors/Formats/IRowInputFormat.h | 0 .../Processors/Formats/IRowOutputFormat.cpp | 0 .../Processors/Formats/IRowOutputFormat.h | 0 .../Formats/Impl/ArrowColumnToCHColumn.cpp | 0 .../Formats/Impl/ArrowColumnToCHColumn.h | 0 .../Formats/Impl/AvroRowInputFormat.cpp | 0 .../Formats/Impl/AvroRowInputFormat.h | 0 .../Formats/Impl/AvroRowOutputFormat.cpp | 0 .../Formats/Impl/AvroRowOutputFormat.h | 0 .../Formats/Impl/BinaryRowInputFormat.cpp | 0 .../Formats/Impl/BinaryRowInputFormat.h | 0 .../Formats/Impl/BinaryRowOutputFormat.cpp | 0 .../Formats/Impl/BinaryRowOutputFormat.h | 0 .../Processors/Formats/Impl/CMakeLists.txt | 0 .../Formats/Impl/CSVRowInputFormat.cpp | 0 .../Formats/Impl/CSVRowInputFormat.h | 0 .../Formats/Impl/CSVRowOutputFormat.cpp | 0 .../Formats/Impl/CSVRowOutputFormat.h | 0 .../Formats/Impl/CapnProtoRowInputFormat.cpp | 0 .../Formats/Impl/CapnProtoRowInputFormat.h | 0 .../Impl/ConstantExpressionTemplate.cpp | 0 .../Formats/Impl/ConstantExpressionTemplate.h | 0 .../Impl/JSONCompactEachRowRowInputFormat.cpp | 0 .../Impl/JSONCompactEachRowRowInputFormat.h | 0 .../JSONCompactEachRowRowOutputFormat.cpp | 0 .../Impl/JSONCompactEachRowRowOutputFormat.h | 0 .../Impl/JSONCompactRowOutputFormat.cpp | 0 .../Formats/Impl/JSONCompactRowOutputFormat.h | 0 .../Impl/JSONEachRowRowInputFormat.cpp | 0 .../Formats/Impl/JSONEachRowRowInputFormat.h | 0 .../Impl/JSONEachRowRowOutputFormat.cpp | 0 .../Formats/Impl/JSONEachRowRowOutputFormat.h | 0 ...JSONEachRowWithProgressRowOutputFormat.cpp | 0 .../JSONEachRowWithProgressRowOutputFormat.h | 0 .../Formats/Impl/JSONRowOutputFormat.cpp | 0 .../Formats/Impl/JSONRowOutputFormat.h | 0 .../Formats/Impl/MySQLOutputFormat.cpp | 0 .../Formats/Impl/MySQLOutputFormat.h | 0 .../Processors/Formats/Impl/NativeFormat.cpp | 0 .../Processors/Formats/Impl/NullFormat.cpp | 0 .../Impl/ODBCDriver2BlockOutputFormat.cpp | 0 .../Impl/ODBCDriver2BlockOutputFormat.h | 0 .../Impl/ODBCDriverBlockOutputFormat.cpp | 0 .../Impl/ODBCDriverBlockOutputFormat.h | 0 .../Formats/Impl/ORCBlockInputFormat.cpp | 0 .../Formats/Impl/ORCBlockInputFormat.h | 0 .../Formats/Impl/ParquetBlockInputFormat.cpp | 0 .../Formats/Impl/ParquetBlockInputFormat.h | 0 .../Formats/Impl/ParquetBlockOutputFormat.cpp | 0 .../Formats/Impl/ParquetBlockOutputFormat.h | 0 .../Formats/Impl/PrettyBlockOutputFormat.cpp | 0 .../Formats/Impl/PrettyBlockOutputFormat.h | 0 .../Impl/PrettyCompactBlockOutputFormat.cpp | 0 .../Impl/PrettyCompactBlockOutputFormat.h | 0 .../Impl/PrettySpaceBlockOutputFormat.cpp | 0 .../Impl/PrettySpaceBlockOutputFormat.h | 0 .../Formats/Impl/ProtobufRowInputFormat.cpp | 0 .../Formats/Impl/ProtobufRowInputFormat.h | 0 .../Formats/Impl/ProtobufRowOutputFormat.cpp | 0 .../Formats/Impl/ProtobufRowOutputFormat.h | 0 .../Formats/Impl/RegexpRowInputFormat.cpp | 0 .../Formats/Impl/RegexpRowInputFormat.h | 0 .../Formats/Impl/TSKVRowInputFormat.cpp | 0 .../Formats/Impl/TSKVRowInputFormat.h | 0 .../Formats/Impl/TSKVRowOutputFormat.cpp | 0 .../Formats/Impl/TSKVRowOutputFormat.h | 0 .../Impl/TabSeparatedRawRowOutputFormat.h | 0 .../Impl/TabSeparatedRowInputFormat.cpp | 0 .../Formats/Impl/TabSeparatedRowInputFormat.h | 0 .../Impl/TabSeparatedRowOutputFormat.cpp | 0 .../Impl/TabSeparatedRowOutputFormat.h | 0 .../Impl/TemplateBlockOutputFormat.cpp | 0 .../Formats/Impl/TemplateBlockOutputFormat.h | 0 .../Formats/Impl/TemplateRowInputFormat.cpp | 0 .../Formats/Impl/TemplateRowInputFormat.h | 0 .../Formats/Impl/ValuesBlockInputFormat.cpp | 0 .../Formats/Impl/ValuesBlockInputFormat.h | 0 .../Formats/Impl/ValuesRowOutputFormat.cpp | 0 .../Formats/Impl/ValuesRowOutputFormat.h | 0 .../Formats/Impl/VerticalRowOutputFormat.cpp | 0 .../Formats/Impl/VerticalRowOutputFormat.h | 0 .../Formats/Impl/XMLRowOutputFormat.cpp | 0 .../Formats/Impl/XMLRowOutputFormat.h | 0 .../Formats/InputStreamFromInputFormat.h | 0 .../Processors/Formats/LazyOutputFormat.cpp | 0 .../Processors/Formats/LazyOutputFormat.h | 0 .../Formats/OutputStreamToOutputFormat.cpp | 0 .../Formats/OutputStreamToOutputFormat.h | 0 .../RowInputFormatWithDiagnosticInfo.cpp | 0 .../RowInputFormatWithDiagnosticInfo.h | 0 .../Processors/IAccumulatingTransform.cpp | 0 .../Processors/IAccumulatingTransform.h | 0 .../Processors/IInflatingTransform.cpp | 0 .../Processors/IInflatingTransform.h | 0 {dbms => src}/Processors/IProcessor.cpp | 0 {dbms => src}/Processors/IProcessor.h | 0 {dbms => src}/Processors/ISimpleTransform.cpp | 0 {dbms => src}/Processors/ISimpleTransform.h | 0 {dbms => src}/Processors/ISink.cpp | 0 {dbms => src}/Processors/ISink.h | 0 {dbms => src}/Processors/ISource.cpp | 0 {dbms => src}/Processors/ISource.h | 0 {dbms => src}/Processors/LimitTransform.cpp | 0 {dbms => src}/Processors/LimitTransform.h | 0 {dbms => src}/Processors/NullSink.h | 0 {dbms => src}/Processors/Pipe.cpp | 0 {dbms => src}/Processors/Pipe.h | 0 {dbms => src}/Processors/Port.cpp | 0 {dbms => src}/Processors/Port.h | 0 {dbms => src}/Processors/QueryPipeline.cpp | 0 {dbms => src}/Processors/QueryPipeline.h | 0 {dbms => src}/Processors/QueueBuffer.h | 0 {dbms => src}/Processors/ResizeProcessor.cpp | 0 {dbms => src}/Processors/ResizeProcessor.h | 0 .../Processors/RowsBeforeLimitCounter.h | 0 {dbms => src}/Processors/Sources/NullSource.h | 0 .../Processors/Sources/SinkToOutputStream.cpp | 0 .../Processors/Sources/SinkToOutputStream.h | 0 .../Sources/SourceFromInputStream.cpp | 0 .../Sources/SourceFromInputStream.h | 0 .../Sources/SourceFromSingleChunk.h | 0 .../Processors/Sources/SourceWithProgress.cpp | 0 .../Processors/Sources/SourceWithProgress.h | 0 .../Transforms/AddingConstColumnTransform.h | 0 .../Transforms/AddingMissedTransform.cpp | 0 .../Transforms/AddingMissedTransform.h | 0 .../Transforms/AggregatingTransform.cpp | 0 .../Transforms/AggregatingTransform.h | 0 .../Transforms/ConvertingTransform.cpp | 0 .../Transforms/ConvertingTransform.h | 0 .../Transforms/CreatingSetsTransform.cpp | 0 .../Transforms/CreatingSetsTransform.h | 0 .../Processors/Transforms/CubeTransform.cpp | 0 .../Processors/Transforms/CubeTransform.h | 0 .../Transforms/DistinctTransform.cpp | 0 .../Processors/Transforms/DistinctTransform.h | 0 .../Transforms/ExpressionTransform.cpp | 0 .../Transforms/ExpressionTransform.h | 0 .../Transforms/ExtremesTransform.cpp | 0 .../Processors/Transforms/ExtremesTransform.h | 0 .../Transforms/FillingTransform.cpp | 0 .../Processors/Transforms/FillingTransform.h | 0 .../Processors/Transforms/FilterTransform.cpp | 0 .../Processors/Transforms/FilterTransform.h | 0 .../Transforms/FinishSortingTransform.cpp | 0 .../Transforms/FinishSortingTransform.h | 0 .../InflatingExpressionTransform.cpp | 0 .../Transforms/InflatingExpressionTransform.h | 0 .../Transforms/LimitByTransform.cpp | 0 .../Processors/Transforms/LimitByTransform.h | 0 .../Transforms/LimitsCheckingTransform.cpp | 0 .../Transforms/LimitsCheckingTransform.h | 0 .../Transforms/MaterializingTransform.cpp | 0 .../Transforms/MaterializingTransform.h | 0 .../Transforms/MergeSortingTransform.cpp | 0 .../Transforms/MergeSortingTransform.h | 0 ...gingAggregatedMemoryEfficientTransform.cpp | 0 ...ergingAggregatedMemoryEfficientTransform.h | 0 .../Transforms/MergingAggregatedTransform.cpp | 0 .../Transforms/MergingAggregatedTransform.h | 0 .../Transforms/MergingSortedTransform.cpp | 0 .../Transforms/MergingSortedTransform.h | 0 .../Transforms/PartialSortingTransform.cpp | 0 .../Transforms/PartialSortingTransform.h | 0 .../Transforms/ReverseTransform.cpp | 0 .../Processors/Transforms/ReverseTransform.h | 0 .../Processors/Transforms/RollupTransform.cpp | 0 .../Processors/Transforms/RollupTransform.h | 0 .../Transforms/SortingTransform.cpp | 0 .../Processors/Transforms/SortingTransform.h | 0 .../Transforms/TotalsHavingTransform.cpp | 0 .../Transforms/TotalsHavingTransform.h | 0 {dbms => src}/Processors/printPipeline.h | 0 {dbms => src}/Processors/tests/CMakeLists.txt | 0 .../gtest_exception_on_incorrect_pipeline.cpp | 0 .../Processors/tests/processors_test.cpp | 0 .../tests/processors_test_aggregation.cpp | 0 .../tests/processors_test_chain.cpp | 0 .../tests/processors_test_expand_pipeline.cpp | 0 .../tests/processors_test_merge.cpp | 0 ...rocessors_test_merge_sorting_transform.cpp | 0 ...ocessors_test_merging_sorted_transform.cpp | 0 {dbms => src}/Storages/AlterCommands.cpp | 0 {dbms => src}/Storages/AlterCommands.h | 0 {dbms => src}/Storages/CMakeLists.txt | 0 {dbms => src}/Storages/CheckResults.h | 0 {dbms => src}/Storages/ColumnCodec.h | 0 {dbms => src}/Storages/ColumnDefault.cpp | 0 {dbms => src}/Storages/ColumnDefault.h | 0 {dbms => src}/Storages/ColumnDependency.h | 0 {dbms => src}/Storages/ColumnsDescription.cpp | 0 {dbms => src}/Storages/ColumnsDescription.h | 0 .../Storages/CompressionCodecSelector.h | 0 .../Storages/ConstraintsDescription.cpp | 0 .../Storages/ConstraintsDescription.h | 0 .../Storages/Distributed/DirectoryMonitor.cpp | 0 .../Storages/Distributed/DirectoryMonitor.h | 0 .../DistributedBlockOutputStream.cpp | 0 .../DistributedBlockOutputStream.h | 0 {dbms => src}/Storages/IStorage.cpp | 0 {dbms => src}/Storages/IStorage.h | 0 {dbms => src}/Storages/IStorage_fwd.h | 0 {dbms => src}/Storages/IndicesDescription.cpp | 0 {dbms => src}/Storages/IndicesDescription.h | 0 {dbms => src}/Storages/Kafka/Buffer_fwd.h | 0 .../Storages/Kafka/KafkaBlockInputStream.cpp | 0 .../Storages/Kafka/KafkaBlockInputStream.h | 0 .../Storages/Kafka/KafkaBlockOutputStream.cpp | 0 .../Storages/Kafka/KafkaBlockOutputStream.h | 0 .../Storages/Kafka/KafkaSettings.cpp | 0 {dbms => src}/Storages/Kafka/KafkaSettings.h | 0 .../Kafka/ReadBufferFromKafkaConsumer.cpp | 0 .../Kafka/ReadBufferFromKafkaConsumer.h | 0 {dbms => src}/Storages/Kafka/StorageKafka.cpp | 0 {dbms => src}/Storages/Kafka/StorageKafka.h | 0 .../Kafka/WriteBufferToKafkaProducer.cpp | 0 .../Kafka/WriteBufferToKafkaProducer.h | 0 .../LiveView/LiveViewBlockInputStream.h | 0 .../LiveView/LiveViewBlockOutputStream.h | 0 .../Storages/LiveView/LiveViewCommands.h | 0 .../LiveView/LiveViewEventsBlockInputStream.h | 0 .../Storages/LiveView/StorageBlocks.h | 0 .../Storages/LiveView/StorageLiveView.cpp | 0 .../Storages/LiveView/StorageLiveView.h | 0 {dbms => src}/Storages/MarkCache.h | 0 .../Storages/MergeTree/ActiveDataPartSet.cpp | 0 .../Storages/MergeTree/ActiveDataPartSet.h | 0 .../Storages/MergeTree/AllMergeSelector.cpp | 0 .../Storages/MergeTree/AllMergeSelector.h | 0 .../MergeTree/BackgroundProcessingPool.cpp | 0 .../MergeTree/BackgroundProcessingPool.h | 0 {dbms => src}/Storages/MergeTree/BoolMask.cpp | 0 {dbms => src}/Storages/MergeTree/BoolMask.h | 0 .../Storages/MergeTree/DataPartsExchange.cpp | 0 .../Storages/MergeTree/DataPartsExchange.h | 0 .../MergeTree/EphemeralLockInZooKeeper.cpp | 0 .../MergeTree/EphemeralLockInZooKeeper.h | 0 .../Storages/MergeTree/IMergeTreeDataPart.cpp | 0 .../Storages/MergeTree/IMergeTreeDataPart.h | 0 .../MergeTree/IMergeTreeDataPartWriter.cpp | 0 .../MergeTree/IMergeTreeDataPartWriter.h | 0 .../Storages/MergeTree/IMergeTreeReader.cpp | 0 .../Storages/MergeTree/IMergeTreeReader.h | 0 .../MergeTree/IMergedBlockOutputStream.cpp | 0 .../MergeTree/IMergedBlockOutputStream.h | 0 .../Storages/MergeTree/KeyCondition.cpp | 0 .../Storages/MergeTree/KeyCondition.h | 0 .../Storages/MergeTree/LevelMergeSelector.cpp | 0 .../Storages/MergeTree/LevelMergeSelector.h | 0 {dbms => src}/Storages/MergeTree/MarkRange.h | 0 .../Storages/MergeTree/MergeList.cpp | 0 {dbms => src}/Storages/MergeTree/MergeList.h | 0 .../Storages/MergeTree/MergeSelector.h | 0 .../MergeTreeBaseSelectProcessor.cpp | 0 .../MergeTree/MergeTreeBaseSelectProcessor.h | 0 .../MergeTree/MergeTreeBlockOutputStream.cpp | 0 .../MergeTree/MergeTreeBlockOutputStream.h | 0 .../MergeTree/MergeTreeBlockReadUtils.cpp | 0 .../MergeTree/MergeTreeBlockReadUtils.h | 0 .../Storages/MergeTree/MergeTreeData.cpp | 0 .../Storages/MergeTree/MergeTreeData.h | 0 .../MergeTree/MergeTreeDataFormatVersion.h | 0 .../MergeTree/MergeTreeDataMergerMutator.cpp | 0 .../MergeTree/MergeTreeDataMergerMutator.h | 0 .../MergeTree/MergeTreeDataPartChecksum.cpp | 0 .../MergeTree/MergeTreeDataPartChecksum.h | 0 .../MergeTree/MergeTreeDataPartCompact.cpp | 0 .../MergeTree/MergeTreeDataPartCompact.h | 0 .../MergeTree/MergeTreeDataPartTTLInfo.cpp | 0 .../MergeTree/MergeTreeDataPartTTLInfo.h | 0 .../MergeTree/MergeTreeDataPartType.cpp | 0 .../MergeTree/MergeTreeDataPartType.h | 0 .../MergeTree/MergeTreeDataPartWide.cpp | 0 .../MergeTree/MergeTreeDataPartWide.h | 0 .../MergeTreeDataPartWriterCompact.cpp | 0 .../MergeTreeDataPartWriterCompact.h | 0 .../MergeTree/MergeTreeDataPartWriterWide.cpp | 0 .../MergeTree/MergeTreeDataPartWriterWide.h | 0 .../MergeTree/MergeTreeDataSelectExecutor.cpp | 0 .../MergeTree/MergeTreeDataSelectExecutor.h | 0 .../MergeTree/MergeTreeDataWriter.cpp | 0 .../Storages/MergeTree/MergeTreeDataWriter.h | 0 .../Storages/MergeTree/MergeTreeIOSettings.h | 0 .../MergeTreeIndexAggregatorBloomFilter.cpp | 0 .../MergeTreeIndexAggregatorBloomFilter.h | 0 .../MergeTree/MergeTreeIndexBloomFilter.cpp | 0 .../MergeTree/MergeTreeIndexBloomFilter.h | 0 .../MergeTreeIndexConditionBloomFilter.cpp | 0 .../MergeTreeIndexConditionBloomFilter.h | 0 .../MergeTree/MergeTreeIndexFullText.cpp | 0 .../MergeTree/MergeTreeIndexFullText.h | 0 .../MergeTree/MergeTreeIndexGranularity.cpp | 0 .../MergeTree/MergeTreeIndexGranularity.h | 0 .../MergeTreeIndexGranularityInfo.cpp | 0 .../MergeTree/MergeTreeIndexGranularityInfo.h | 0 .../MergeTreeIndexGranularityInfo.h.gch | Bin .../MergeTreeIndexGranuleBloomFilter.cpp | 0 .../MergeTreeIndexGranuleBloomFilter.h | 0 .../MergeTree/MergeTreeIndexMinMax.cpp | 0 .../Storages/MergeTree/MergeTreeIndexMinMax.h | 0 .../MergeTree/MergeTreeIndexReader.cpp | 0 .../Storages/MergeTree/MergeTreeIndexReader.h | 0 .../Storages/MergeTree/MergeTreeIndexSet.cpp | 2 +- .../Storages/MergeTree/MergeTreeIndexSet.h | 0 .../Storages/MergeTree/MergeTreeIndices.cpp | 0 .../Storages/MergeTree/MergeTreeIndices.h | 0 .../MergeTree/MergeTreeMarksLoader.cpp | 0 .../Storages/MergeTree/MergeTreeMarksLoader.h | 0 .../MergeTree/MergeTreeMutationEntry.cpp | 0 .../MergeTree/MergeTreeMutationEntry.h | 0 .../MergeTree/MergeTreeMutationStatus.h | 0 .../Storages/MergeTree/MergeTreePartInfo.cpp | 0 .../Storages/MergeTree/MergeTreePartInfo.h | 0 .../Storages/MergeTree/MergeTreePartition.cpp | 0 .../Storages/MergeTree/MergeTreePartition.h | 0 .../MergeTree/MergeTreePartsMover.cpp | 0 .../Storages/MergeTree/MergeTreePartsMover.h | 0 .../MergeTree/MergeTreeRangeReader.cpp | 0 .../Storages/MergeTree/MergeTreeRangeReader.h | 0 .../Storages/MergeTree/MergeTreeReadPool.cpp | 0 .../Storages/MergeTree/MergeTreeReadPool.h | 0 .../MergeTree/MergeTreeReaderCompact.cpp | 0 .../MergeTree/MergeTreeReaderCompact.h | 0 .../MergeTree/MergeTreeReaderStream.cpp | 0 .../MergeTree/MergeTreeReaderStream.h | 0 .../MergeTree/MergeTreeReaderWide.cpp | 0 .../Storages/MergeTree/MergeTreeReaderWide.h | 0 .../MergeTreeReverseSelectProcessor.cpp | 0 .../MergeTreeReverseSelectProcessor.h | 0 .../MergeTree/MergeTreeSelectProcessor.cpp | 0 .../MergeTree/MergeTreeSelectProcessor.h | 0 .../MergeTreeSequentialBlockInputStream.cpp | 0 .../MergeTreeSequentialBlockInputStream.h | 0 .../Storages/MergeTree/MergeTreeSettings.cpp | 0 .../Storages/MergeTree/MergeTreeSettings.h | 0 ...rgeTreeThreadSelectBlockInputProcessor.cpp | 0 ...MergeTreeThreadSelectBlockInputProcessor.h | 0 .../MergeTree/MergeTreeWhereOptimizer.cpp | 0 .../MergeTree/MergeTreeWhereOptimizer.h | 0 .../MergeTree/MergedBlockOutputStream.cpp | 0 .../MergeTree/MergedBlockOutputStream.h | 0 .../MergedColumnOnlyOutputStream.cpp | 0 .../MergeTree/MergedColumnOnlyOutputStream.h | 0 .../Storages/MergeTree/PartDestinationType.h | 0 {dbms => src}/Storages/MergeTree/RPNBuilder.h | 0 .../Storages/MergeTree/RangesInDataPart.h | 0 .../MergeTree/ReplicatedMergeTreeAddress.cpp | 0 .../MergeTree/ReplicatedMergeTreeAddress.h | 0 .../ReplicatedMergeTreeAltersSequence.cpp | 0 .../ReplicatedMergeTreeAltersSequence.h | 0 .../ReplicatedMergeTreeBlockOutputStream.cpp | 0 .../ReplicatedMergeTreeBlockOutputStream.h | 0 .../ReplicatedMergeTreeCleanupThread.cpp | 0 .../ReplicatedMergeTreeCleanupThread.h | 0 .../MergeTree/ReplicatedMergeTreeLogEntry.cpp | 0 .../MergeTree/ReplicatedMergeTreeLogEntry.h | 0 .../ReplicatedMergeTreeMutationEntry.cpp | 0 .../ReplicatedMergeTreeMutationEntry.h | 0 .../ReplicatedMergeTreePartCheckThread.cpp | 0 .../ReplicatedMergeTreePartCheckThread.h | 0 .../ReplicatedMergeTreePartHeader.cpp | 0 .../MergeTree/ReplicatedMergeTreePartHeader.h | 0 .../MergeTree/ReplicatedMergeTreeQueue.cpp | 0 .../MergeTree/ReplicatedMergeTreeQueue.h | 0 .../ReplicatedMergeTreeQuorumAddedParts.h | 0 .../ReplicatedMergeTreeQuorumEntry.h | 0 .../ReplicatedMergeTreeRestartingThread.cpp | 0 .../ReplicatedMergeTreeRestartingThread.h | 0 .../ReplicatedMergeTreeTableMetadata.cpp | 0 .../ReplicatedMergeTreeTableMetadata.h | 0 .../MergeTree/SimpleMergeSelector.cpp | 0 .../Storages/MergeTree/SimpleMergeSelector.h | 0 .../MergeTree/StorageFromMergeTreeDataPart.h | 0 .../Storages/MergeTree/TTLMergeSelector.cpp | 0 .../Storages/MergeTree/TTLMergeSelector.h | 0 .../Storages/MergeTree/checkDataPart.cpp | 0 .../Storages/MergeTree/checkDataPart.h | 0 .../Storages/MergeTree/localBackup.cpp | 0 .../Storages/MergeTree/localBackup.h | 0 .../MergeTree/registerStorageMergeTree.cpp | 2 +- {dbms => src}/Storages/MutationCommands.cpp | 0 {dbms => src}/Storages/MutationCommands.h | 0 {dbms => src}/Storages/PartitionCommands.cpp | 0 {dbms => src}/Storages/PartitionCommands.h | 0 .../Storages/ReadInOrderOptimizer.cpp | 0 {dbms => src}/Storages/ReadInOrderOptimizer.h | 0 {dbms => src}/Storages/SelectQueryInfo.h | 0 {dbms => src}/Storages/StorageBuffer.cpp | 0 {dbms => src}/Storages/StorageBuffer.h | 0 {dbms => src}/Storages/StorageDictionary.cpp | 0 {dbms => src}/Storages/StorageDictionary.h | 0 {dbms => src}/Storages/StorageDistributed.cpp | 0 {dbms => src}/Storages/StorageDistributed.h | 0 {dbms => src}/Storages/StorageFactory.cpp | 0 {dbms => src}/Storages/StorageFactory.h | 0 {dbms => src}/Storages/StorageFile.cpp | 0 {dbms => src}/Storages/StorageFile.h | 0 .../Storages/StorageGenerateRandom.cpp | 0 .../Storages/StorageGenerateRandom.h | 0 {dbms => src}/Storages/StorageHDFS.cpp | 0 {dbms => src}/Storages/StorageHDFS.h | 0 .../Storages/StorageInMemoryMetadata.cpp | 0 .../Storages/StorageInMemoryMetadata.h | 0 {dbms => src}/Storages/StorageInput.cpp | 0 {dbms => src}/Storages/StorageInput.h | 0 {dbms => src}/Storages/StorageJoin.cpp | 0 {dbms => src}/Storages/StorageJoin.h | 0 {dbms => src}/Storages/StorageLog.cpp | 0 {dbms => src}/Storages/StorageLog.h | 0 {dbms => src}/Storages/StorageLogSettings.cpp | 0 {dbms => src}/Storages/StorageLogSettings.h | 0 .../Storages/StorageMaterializedView.cpp | 0 .../Storages/StorageMaterializedView.h | 0 {dbms => src}/Storages/StorageMemory.cpp | 0 {dbms => src}/Storages/StorageMemory.h | 0 {dbms => src}/Storages/StorageMerge.cpp | 0 {dbms => src}/Storages/StorageMerge.h | 0 {dbms => src}/Storages/StorageMergeTree.cpp | 0 {dbms => src}/Storages/StorageMergeTree.h | 0 {dbms => src}/Storages/StorageMySQL.cpp | 0 {dbms => src}/Storages/StorageMySQL.h | 0 {dbms => src}/Storages/StorageNull.cpp | 0 {dbms => src}/Storages/StorageNull.h | 0 .../Storages/StorageReplicatedMergeTree.cpp | 0 .../Storages/StorageReplicatedMergeTree.h | 0 {dbms => src}/Storages/StorageS3.cpp | 0 {dbms => src}/Storages/StorageS3.h | 0 {dbms => src}/Storages/StorageSet.cpp | 0 {dbms => src}/Storages/StorageSet.h | 0 {dbms => src}/Storages/StorageStripeLog.cpp | 0 {dbms => src}/Storages/StorageStripeLog.h | 0 {dbms => src}/Storages/StorageTinyLog.cpp | 0 {dbms => src}/Storages/StorageTinyLog.h | 0 {dbms => src}/Storages/StorageURL.cpp | 0 {dbms => src}/Storages/StorageURL.h | 0 {dbms => src}/Storages/StorageValues.cpp | 0 {dbms => src}/Storages/StorageValues.h | 0 {dbms => src}/Storages/StorageView.cpp | 0 {dbms => src}/Storages/StorageView.h | 0 {dbms => src}/Storages/StorageXDBC.cpp | 0 {dbms => src}/Storages/StorageXDBC.h | 0 {dbms => src}/Storages/System/CMakeLists.txt | 0 .../Storages/System/IStorageSystemOneBlock.h | 0 ...rageSystemAggregateFunctionCombinators.cpp | 0 ...torageSystemAggregateFunctionCombinators.h | 0 .../StorageSystemAsynchronousMetrics.cpp | 0 .../System/StorageSystemAsynchronousMetrics.h | 0 .../System/StorageSystemBuildOptions.cpp | 0 ...StorageSystemBuildOptions.generated.cpp.in | 0 .../System/StorageSystemBuildOptions.h | 0 .../Storages/System/StorageSystemClusters.cpp | 0 .../Storages/System/StorageSystemClusters.h | 0 .../System/StorageSystemCollations.cpp | 0 .../Storages/System/StorageSystemCollations.h | 0 .../Storages/System/StorageSystemColumns.cpp | 0 .../Storages/System/StorageSystemColumns.h | 0 .../System/StorageSystemContributors.cpp | 0 .../StorageSystemContributors.generated.cpp | 0 .../System/StorageSystemContributors.h | 0 .../System/StorageSystemContributors.sh | 0 .../System/StorageSystemDataTypeFamilies.cpp | 0 .../System/StorageSystemDataTypeFamilies.h | 0 .../System/StorageSystemDatabases.cpp | 0 .../Storages/System/StorageSystemDatabases.h | 0 .../System/StorageSystemDetachedParts.cpp | 0 .../System/StorageSystemDetachedParts.h | 0 .../System/StorageSystemDictionaries.cpp | 0 .../System/StorageSystemDictionaries.h | 0 .../Storages/System/StorageSystemDisks.cpp | 0 .../Storages/System/StorageSystemDisks.h | 0 .../Storages/System/StorageSystemEvents.cpp | 0 .../Storages/System/StorageSystemEvents.h | 0 .../Storages/System/StorageSystemFormats.cpp | 0 .../Storages/System/StorageSystemFormats.h | 0 .../System/StorageSystemFunctions.cpp | 0 .../Storages/System/StorageSystemFunctions.h | 0 .../Storages/System/StorageSystemGraphite.cpp | 0 .../Storages/System/StorageSystemGraphite.h | 0 .../Storages/System/StorageSystemMacros.cpp | 0 .../Storages/System/StorageSystemMacros.h | 0 .../System/StorageSystemMergeTreeSettings.cpp | 0 .../System/StorageSystemMergeTreeSettings.h | 0 .../Storages/System/StorageSystemMerges.cpp | 0 .../Storages/System/StorageSystemMerges.h | 0 .../Storages/System/StorageSystemMetrics.cpp | 0 .../Storages/System/StorageSystemMetrics.h | 0 .../Storages/System/StorageSystemModels.cpp | 0 .../Storages/System/StorageSystemModels.h | 0 .../System/StorageSystemMutations.cpp | 0 .../Storages/System/StorageSystemMutations.h | 0 .../Storages/System/StorageSystemNumbers.cpp | 0 .../Storages/System/StorageSystemNumbers.h | 0 .../Storages/System/StorageSystemOne.cpp | 0 .../Storages/System/StorageSystemOne.h | 0 .../Storages/System/StorageSystemParts.cpp | 0 .../Storages/System/StorageSystemParts.h | 0 .../System/StorageSystemPartsBase.cpp | 0 .../Storages/System/StorageSystemPartsBase.h | 0 .../System/StorageSystemPartsColumns.cpp | 0 .../System/StorageSystemPartsColumns.h | 0 .../System/StorageSystemProcesses.cpp | 0 .../Storages/System/StorageSystemProcesses.h | 0 .../System/StorageSystemQuotaUsage.cpp | 0 .../Storages/System/StorageSystemQuotaUsage.h | 0 .../Storages/System/StorageSystemQuotas.cpp | 0 .../Storages/System/StorageSystemQuotas.h | 0 .../Storages/System/StorageSystemReplicas.cpp | 0 .../Storages/System/StorageSystemReplicas.h | 0 .../System/StorageSystemReplicationQueue.cpp | 0 .../System/StorageSystemReplicationQueue.h | 0 .../System/StorageSystemRowPolicies.cpp | 0 .../System/StorageSystemRowPolicies.h | 0 .../Storages/System/StorageSystemSettings.cpp | 0 .../Storages/System/StorageSystemSettings.h | 0 .../System/StorageSystemStackTrace.cpp | 0 .../Storages/System/StorageSystemStackTrace.h | 0 .../System/StorageSystemStoragePolicies.cpp | 0 .../System/StorageSystemStoragePolicies.h | 0 .../System/StorageSystemTableEngines.cpp | 0 .../System/StorageSystemTableEngines.h | 0 .../System/StorageSystemTableFunctions.cpp | 0 .../System/StorageSystemTableFunctions.h | 0 .../Storages/System/StorageSystemTables.cpp | 0 .../Storages/System/StorageSystemTables.h | 0 .../Storages/System/StorageSystemZeros.cpp | 0 .../Storages/System/StorageSystemZeros.h | 0 .../System/StorageSystemZooKeeper.cpp | 0 .../Storages/System/StorageSystemZooKeeper.h | 0 .../Storages/System/attachSystemTables.cpp | 0 .../Storages/System/attachSystemTables.h | 0 .../Storages/TableStructureLockHolder.h | 0 {dbms => src}/Storages/VirtualColumnUtils.cpp | 0 {dbms => src}/Storages/VirtualColumnUtils.h | 0 .../Storages/getStructureOfRemoteTable.cpp | 0 .../Storages/getStructureOfRemoteTable.h | 0 {dbms => src}/Storages/registerStorages.cpp | 0 {dbms => src}/Storages/registerStorages.h | 0 {dbms => src}/Storages/tests/CMakeLists.txt | 0 {dbms => src}/Storages/tests/active_parts.py | 0 ...get_abandonable_lock_in_all_partitions.cpp | 0 .../get_current_inserts_in_replicated.cpp | 0 ...est_aux_funcs_for_adaptive_granularity.cpp | 0 .../tests/gtest_row_source_bits_test.cpp | 0 .../Storages/tests/gtest_storage_log.cpp | 0 ..._transform_query_for_external_database.cpp | 0 .../Storages/tests/merge_selector.cpp | 0 .../Storages/tests/merge_selector2.cpp | 0 {dbms => src}/Storages/tests/part_name.cpp | 0 .../tests/remove_symlink_directory.cpp | 0 {dbms => src}/Storages/tests/storage_log.cpp | 0 .../Storages/tests/system_numbers.cpp | 0 .../Storages/tests/test_alter_distributed.sql | 0 .../Storages/tests/test_alter_merge.sql | 0 .../Storages/tests/test_alter_merge_tree.sql | 0 .../tests/transform_part_zk_nodes.cpp | 0 .../transformQueryForExternalDatabase.cpp | 0 .../transformQueryForExternalDatabase.h | 0 {dbms => src}/TableFunctions/CMakeLists.txt | 0 .../TableFunctions/ITableFunction.cpp | 0 {dbms => src}/TableFunctions/ITableFunction.h | 0 .../TableFunctions/ITableFunctionFileLike.cpp | 0 .../TableFunctions/ITableFunctionFileLike.h | 0 .../TableFunctions/ITableFunctionXDBC.cpp | 0 .../TableFunctions/ITableFunctionXDBC.h | 0 .../TableFunctions/TableFunctionFactory.cpp | 0 .../TableFunctions/TableFunctionFactory.h | 0 .../TableFunctions/TableFunctionFile.cpp | 0 .../TableFunctions/TableFunctionFile.h | 0 .../TableFunctionGenerateRandom.cpp | 0 .../TableFunctionGenerateRandom.h | 0 .../TableFunctions/TableFunctionHDFS.cpp | 0 .../TableFunctions/TableFunctionHDFS.h | 0 .../TableFunctions/TableFunctionInput.cpp | 0 .../TableFunctions/TableFunctionInput.h | 0 .../TableFunctions/TableFunctionMerge.cpp | 0 .../TableFunctions/TableFunctionMerge.h | 0 .../TableFunctions/TableFunctionMySQL.cpp | 0 .../TableFunctions/TableFunctionMySQL.h | 0 .../TableFunctions/TableFunctionNumbers.cpp | 0 .../TableFunctions/TableFunctionNumbers.h | 0 .../TableFunctions/TableFunctionRemote.cpp | 0 .../TableFunctions/TableFunctionRemote.h | 0 .../TableFunctions/TableFunctionS3.cpp | 0 .../TableFunctions/TableFunctionS3.h | 0 .../TableFunctions/TableFunctionURL.cpp | 0 .../TableFunctions/TableFunctionURL.h | 0 .../TableFunctions/TableFunctionValues.cpp | 0 .../TableFunctions/TableFunctionValues.h | 0 .../TableFunctions/TableFunctionZeros.cpp | 0 .../TableFunctions/TableFunctionZeros.h | 0 .../parseColumnsListForTableFunction.cpp | 0 .../parseColumnsListForTableFunction.h | 0 .../TableFunctions/registerTableFunctions.cpp | 0 .../TableFunctions/registerTableFunctions.h | 0 utils/ci/run-clickhouse-from-binaries.sh | 4 +- utils/release/release_lib.sh | 4 +- 2836 files changed, 402 insertions(+), 409 deletions(-) delete mode 100644 dbms/Client/CMakeLists.txt rename {dbms => src}/Access/AccessControlManager.cpp (100%) rename {dbms => src}/Access/AccessControlManager.h (100%) rename {dbms => src}/Access/AccessFlags.h (100%) rename {dbms => src}/Access/AccessRights.cpp (100%) rename {dbms => src}/Access/AccessRights.h (100%) rename {dbms => src}/Access/AccessRightsElement.cpp (100%) rename {dbms => src}/Access/AccessRightsElement.h (100%) rename {dbms => src}/Access/AccessType.h (100%) rename {dbms => src}/Access/AllowedClientHosts.cpp (100%) rename {dbms => src}/Access/AllowedClientHosts.h (100%) rename {dbms => src}/Access/Authentication.cpp (100%) rename {dbms => src}/Access/Authentication.h (100%) rename {dbms => src}/Access/CMakeLists.txt (100%) rename {dbms => src}/Access/ContextAccess.cpp (100%) rename {dbms => src}/Access/ContextAccess.h (100%) rename {dbms => src}/Access/DiskAccessStorage.cpp (100%) rename {dbms => src}/Access/DiskAccessStorage.h (100%) rename {dbms => src}/Access/EnabledQuota.cpp (100%) rename {dbms => src}/Access/EnabledQuota.h (100%) rename {dbms => src}/Access/EnabledRoles.cpp (100%) rename {dbms => src}/Access/EnabledRoles.h (100%) rename {dbms => src}/Access/EnabledRolesInfo.cpp (100%) rename {dbms => src}/Access/EnabledRolesInfo.h (100%) rename {dbms => src}/Access/EnabledRowPolicies.cpp (100%) rename {dbms => src}/Access/EnabledRowPolicies.h (100%) rename {dbms => src}/Access/EnabledSettings.cpp (100%) rename {dbms => src}/Access/EnabledSettings.h (100%) rename {dbms => src}/Access/ExtendedRoleSet.cpp (100%) rename {dbms => src}/Access/ExtendedRoleSet.h (100%) rename {dbms => src}/Access/IAccessEntity.cpp (100%) rename {dbms => src}/Access/IAccessEntity.h (100%) rename {dbms => src}/Access/IAccessStorage.cpp (100%) rename {dbms => src}/Access/IAccessStorage.h (100%) rename {dbms => src}/Access/MemoryAccessStorage.cpp (100%) rename {dbms => src}/Access/MemoryAccessStorage.h (100%) rename {dbms => src}/Access/MultipleAccessStorage.cpp (100%) rename {dbms => src}/Access/MultipleAccessStorage.h (100%) rename {dbms => src}/Access/Quota.cpp (100%) rename {dbms => src}/Access/Quota.h (100%) rename {dbms => src}/Access/QuotaCache.cpp (100%) rename {dbms => src}/Access/QuotaCache.h (100%) rename {dbms => src}/Access/QuotaUsageInfo.cpp (100%) rename {dbms => src}/Access/QuotaUsageInfo.h (100%) rename {dbms => src}/Access/Role.cpp (100%) rename {dbms => src}/Access/Role.h (100%) rename {dbms => src}/Access/RoleCache.cpp (100%) rename {dbms => src}/Access/RoleCache.h (100%) rename {dbms => src}/Access/RowPolicy.cpp (100%) rename {dbms => src}/Access/RowPolicy.h (100%) rename {dbms => src}/Access/RowPolicyCache.cpp (100%) rename {dbms => src}/Access/RowPolicyCache.h (100%) rename {dbms => src}/Access/SettingsConstraints.cpp (100%) rename {dbms => src}/Access/SettingsConstraints.h (100%) rename {dbms => src}/Access/SettingsProfile.cpp (100%) rename {dbms => src}/Access/SettingsProfile.h (100%) rename {dbms => src}/Access/SettingsProfileElement.cpp (100%) rename {dbms => src}/Access/SettingsProfileElement.h (100%) rename {dbms => src}/Access/SettingsProfilesCache.cpp (100%) rename {dbms => src}/Access/SettingsProfilesCache.h (100%) rename {dbms => src}/Access/User.cpp (100%) rename {dbms => src}/Access/User.h (100%) rename {dbms => src}/Access/UsersConfigAccessStorage.cpp (100%) rename {dbms => src}/Access/UsersConfigAccessStorage.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionAggThrow.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionArgMinMax.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionArray.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionArray.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionAvg.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionAvg.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionAvgWeighted.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionAvgWeighted.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionBitwise.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionBitwise.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionBoundingRatio.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionBoundingRatio.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCombinatorFactory.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCount.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionCount.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionEntropy.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionEntropy.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionFactory.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionFactory.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionForEach.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionForEach.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArray.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArray.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupArrayMoving.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupBitmap.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupBitmap.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupBitmapData.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionGroupUniqArray.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionHistogram.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionHistogram.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionIf.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionIf.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMLMethod.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMLMethod.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMaxIntersections.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMaxIntersections.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMerge.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMerge.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMinMaxAny.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionMinMaxAny.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionNothing.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionNull.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionNull.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionOrFill.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionOrFill.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionQuantile.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionQuantile.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionResample.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionResample.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionRetention.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionRetention.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSequenceMatch.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSequenceMatch.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionState.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionState.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionStatistics.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionStatistics.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionStatisticsSimple.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSum.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSum.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSumMap.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionSumMap.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionTopK.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionTopK.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniq.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniq.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniqCombined.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniqCombined.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniqUpTo.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionUniqUpTo.h (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionWindowFunnel.cpp (100%) rename {dbms => src}/AggregateFunctions/AggregateFunctionWindowFunnel.h (100%) rename {dbms => src}/AggregateFunctions/CMakeLists.txt (100%) rename {dbms => src}/AggregateFunctions/FactoryHelpers.h (100%) rename {dbms => src}/AggregateFunctions/Helpers.h (100%) rename {dbms => src}/AggregateFunctions/HelpersMinMaxAny.h (100%) rename {dbms => src}/AggregateFunctions/IAggregateFunction.h (100%) rename {dbms => src}/AggregateFunctions/IAggregateFunctionCombinator.h (100%) rename {dbms => src}/AggregateFunctions/QuantileExact.h (100%) rename {dbms => src}/AggregateFunctions/QuantileExactWeighted.h (100%) rename {dbms => src}/AggregateFunctions/QuantileReservoirSampler.h (100%) rename {dbms => src}/AggregateFunctions/QuantileReservoirSamplerDeterministic.h (100%) rename {dbms => src}/AggregateFunctions/QuantileTDigest.h (100%) rename {dbms => src}/AggregateFunctions/QuantileTiming.h (100%) rename {dbms => src}/AggregateFunctions/QuantilesCommon.h (100%) rename {dbms => src}/AggregateFunctions/ReservoirSampler.h (100%) rename {dbms => src}/AggregateFunctions/ReservoirSamplerDeterministic.h (100%) rename {dbms => src}/AggregateFunctions/UniqCombinedBiasData.cpp (100%) rename {dbms => src}/AggregateFunctions/UniqCombinedBiasData.h (89%) rename {dbms => src}/AggregateFunctions/UniqVariadicHash.cpp (100%) rename {dbms => src}/AggregateFunctions/UniqVariadicHash.h (100%) rename {dbms => src}/AggregateFunctions/UniquesHashSet.h (100%) rename {dbms => src}/AggregateFunctions/parseAggregateFunctionParameters.cpp (100%) rename {dbms => src}/AggregateFunctions/parseAggregateFunctionParameters.h (100%) rename {dbms => src}/AggregateFunctions/registerAggregateFunctions.cpp (100%) rename {dbms => src}/AggregateFunctions/registerAggregateFunctions.h (100%) rename {dbms => src}/AggregateFunctions/tests/CMakeLists.txt (100%) rename {dbms => src}/AggregateFunctions/tests/quantile-t-digest.cpp (100%) rename {dbms => src}/CMakeLists.txt (99%) create mode 100644 src/Client/CMakeLists.txt rename {dbms => src}/Client/Connection.cpp (100%) rename {dbms => src}/Client/Connection.h (100%) rename {dbms => src}/Client/ConnectionPool.h (100%) rename {dbms => src}/Client/ConnectionPoolWithFailover.cpp (100%) rename {dbms => src}/Client/ConnectionPoolWithFailover.h (100%) rename {dbms => src}/Client/MultiplexedConnections.cpp (100%) rename {dbms => src}/Client/MultiplexedConnections.h (100%) rename {dbms => src}/Client/TimeoutSetter.cpp (100%) rename {dbms => src}/Client/TimeoutSetter.h (100%) rename {dbms => src}/Client/tests/CMakeLists.txt (100%) rename {dbms => src}/Client/tests/test_connect.cpp (100%) rename {dbms => src}/Columns/CMakeLists.txt (100%) rename {dbms => src}/Columns/Collator.cpp (100%) rename {dbms => src}/Columns/Collator.h (100%) rename {dbms => src}/Columns/ColumnAggregateFunction.cpp (100%) rename {dbms => src}/Columns/ColumnAggregateFunction.h (100%) rename {dbms => src}/Columns/ColumnArray.cpp (100%) rename {dbms => src}/Columns/ColumnArray.h (100%) rename {dbms => src}/Columns/ColumnConst.cpp (100%) rename {dbms => src}/Columns/ColumnConst.h (100%) rename {dbms => src}/Columns/ColumnDecimal.cpp (100%) rename {dbms => src}/Columns/ColumnDecimal.h (100%) rename {dbms => src}/Columns/ColumnFixedString.cpp (100%) rename {dbms => src}/Columns/ColumnFixedString.h (100%) rename {dbms => src}/Columns/ColumnFunction.cpp (100%) rename {dbms => src}/Columns/ColumnFunction.h (100%) rename {dbms => src}/Columns/ColumnLowCardinality.cpp (100%) rename {dbms => src}/Columns/ColumnLowCardinality.h (100%) rename {dbms => src}/Columns/ColumnNothing.h (100%) rename {dbms => src}/Columns/ColumnNullable.cpp (100%) rename {dbms => src}/Columns/ColumnNullable.h (100%) rename {dbms => src}/Columns/ColumnSet.h (100%) rename {dbms => src}/Columns/ColumnString.cpp (100%) rename {dbms => src}/Columns/ColumnString.h (100%) rename {dbms => src}/Columns/ColumnTuple.cpp (100%) rename {dbms => src}/Columns/ColumnTuple.h (100%) rename {dbms => src}/Columns/ColumnUnique.h (100%) rename {dbms => src}/Columns/ColumnVector.cpp (100%) rename {dbms => src}/Columns/ColumnVector.h (100%) rename {dbms => src}/Columns/ColumnVectorHelper.h (100%) rename {dbms => src}/Columns/ColumnsCommon.cpp (100%) rename {dbms => src}/Columns/ColumnsCommon.h (100%) rename {dbms => src}/Columns/ColumnsNumber.h (100%) rename {dbms => src}/Columns/FilterDescription.cpp (100%) rename {dbms => src}/Columns/FilterDescription.h (100%) rename {dbms => src}/Columns/IColumn.cpp (100%) rename {dbms => src}/Columns/IColumn.h (100%) rename {dbms => src}/Columns/IColumnDummy.h (100%) rename {dbms => src}/Columns/IColumnImpl.h (100%) rename {dbms => src}/Columns/IColumnUnique.h (100%) rename {dbms => src}/Columns/ReverseIndex.h (100%) rename {dbms => src}/Columns/getLeastSuperColumn.cpp (100%) rename {dbms => src}/Columns/getLeastSuperColumn.h (100%) rename {dbms => src}/Columns/tests/CMakeLists.txt (100%) rename {dbms => src}/Columns/tests/gtest_column_unique.cpp (100%) rename {dbms => src}/Columns/tests/gtest_weak_hash_32.cpp (100%) rename {dbms => src}/Common/ActionBlocker.h (100%) rename {dbms => src}/Common/ActionLock.cpp (100%) rename {dbms => src}/Common/ActionLock.h (100%) rename {dbms => src}/Common/AlignedBuffer.cpp (100%) rename {dbms => src}/Common/AlignedBuffer.h (100%) rename {dbms => src}/Common/Allocator.h (100%) rename {dbms => src}/Common/Allocator_fwd.h (100%) rename {dbms => src}/Common/Arena.h (100%) rename {dbms => src}/Common/ArenaAllocator.h (100%) rename {dbms => src}/Common/ArenaWithFreeLists.h (100%) rename {dbms => src}/Common/ArrayCache.h (100%) rename {dbms => src}/Common/AutoArray.h (100%) rename {dbms => src}/Common/BitHelpers.h (100%) rename {dbms => src}/Common/CMakeLists.txt (100%) rename {dbms => src}/Common/COW.h (100%) rename {dbms => src}/Common/ClickHouseRevision.cpp (100%) rename {dbms => src}/Common/ClickHouseRevision.h (100%) rename {dbms => src}/Common/ColumnsHashing.h (100%) rename {dbms => src}/Common/ColumnsHashingImpl.h (100%) rename {dbms => src}/Common/CombinedCardinalityEstimator.h (100%) rename {dbms => src}/Common/CompactArray.h (100%) rename {dbms => src}/Common/ConcurrentBoundedQueue.h (100%) rename {dbms => src}/Common/Config/AbstractConfigurationComparison.cpp (100%) rename {dbms => src}/Common/Config/AbstractConfigurationComparison.h (100%) rename {dbms => src}/Common/Config/CMakeLists.txt (100%) rename {dbms => src}/Common/Config/ConfigProcessor.cpp (100%) rename {dbms => src}/Common/Config/ConfigProcessor.h (100%) rename {dbms => src}/Common/Config/ConfigReloader.cpp (100%) rename {dbms => src}/Common/Config/ConfigReloader.h (100%) rename {dbms => src}/Common/Config/configReadClient.cpp (100%) rename {dbms => src}/Common/Config/configReadClient.h (100%) rename {dbms => src}/Common/CounterInFile.h (100%) rename {dbms => src}/Common/CpuId.h (100%) rename {dbms => src}/Common/CurrentMetrics.cpp (100%) rename {dbms => src}/Common/CurrentMetrics.h (100%) rename {dbms => src}/Common/CurrentThread.cpp (100%) rename {dbms => src}/Common/CurrentThread.h (100%) rename {dbms => src}/Common/DNSResolver.cpp (100%) rename {dbms => src}/Common/DNSResolver.h (100%) rename {dbms => src}/Common/Dwarf.cpp (100%) rename {dbms => src}/Common/Dwarf.h (100%) rename {dbms => src}/Common/Elf.cpp (100%) rename {dbms => src}/Common/Elf.h (100%) rename {dbms => src}/Common/ErrorCodes.cpp (100%) rename {dbms => src}/Common/EventCounter.h (100%) rename {dbms => src}/Common/Exception.cpp (100%) rename {dbms => src}/Common/Exception.h (100%) rename {dbms => src}/Common/ExternalLoaderStatus.cpp (100%) rename {dbms => src}/Common/ExternalLoaderStatus.h (100%) rename {dbms => src}/Common/FieldVisitors.cpp (100%) rename {dbms => src}/Common/FieldVisitors.h (100%) rename {dbms => src}/Common/FileChecker.cpp (100%) rename {dbms => src}/Common/FileChecker.h (100%) rename {dbms => src}/Common/FileUpdatesTracker.h (100%) rename {dbms => src}/Common/HTMLForm.h (100%) rename {dbms => src}/Common/HashTable/ClearableHashMap.h (100%) rename {dbms => src}/Common/HashTable/ClearableHashSet.h (100%) rename {dbms => src}/Common/HashTable/FixedClearableHashMap.h (100%) rename {dbms => src}/Common/HashTable/FixedClearableHashSet.h (100%) rename {dbms => src}/Common/HashTable/FixedHashMap.h (100%) rename {dbms => src}/Common/HashTable/FixedHashSet.h (100%) rename {dbms => src}/Common/HashTable/FixedHashTable.h (100%) rename {dbms => src}/Common/HashTable/Hash.h (100%) rename {dbms => src}/Common/HashTable/HashMap.h (100%) rename {dbms => src}/Common/HashTable/HashSet.h (100%) rename {dbms => src}/Common/HashTable/HashTable.h (100%) rename {dbms => src}/Common/HashTable/HashTableAllocator.h (100%) rename {dbms => src}/Common/HashTable/HashTableKeyHolder.h (100%) rename {dbms => src}/Common/HashTable/SmallTable.h (100%) rename {dbms => src}/Common/HashTable/StringHashMap.h (100%) rename {dbms => src}/Common/HashTable/StringHashTable.h (100%) rename {dbms => src}/Common/HashTable/TwoLevelHashMap.h (100%) rename {dbms => src}/Common/HashTable/TwoLevelHashTable.h (100%) rename {dbms => src}/Common/HashTable/TwoLevelStringHashMap.h (100%) rename {dbms => src}/Common/HashTable/TwoLevelStringHashTable.h (100%) rename {dbms => src}/Common/HyperLogLogBiasEstimator.h (100%) rename {dbms => src}/Common/HyperLogLogCounter.h (100%) rename {dbms => src}/Common/HyperLogLogWithSmallSetOptimization.h (100%) rename {dbms => src}/Common/IFactoryWithAliases.h (100%) rename {dbms => src}/Common/IPv6ToBinary.cpp (100%) rename {dbms => src}/Common/IPv6ToBinary.h (100%) rename {dbms => src}/Common/Increment.h (100%) rename {dbms => src}/Common/InterruptListener.h (100%) rename {dbms => src}/Common/IntervalKind.cpp (100%) rename {dbms => src}/Common/IntervalKind.h (100%) rename {dbms => src}/Common/LRUCache.h (100%) rename {dbms => src}/Common/Macros.cpp (100%) rename {dbms => src}/Common/Macros.h (100%) rename {dbms => src}/Common/MemorySanitizer.h (100%) rename {dbms => src}/Common/MemoryTracker.cpp (100%) rename {dbms => src}/Common/MemoryTracker.h (100%) rename {dbms => src}/Common/MultiVersion.h (100%) rename {dbms => src}/Common/NaNUtils.h (100%) rename {dbms => src}/Common/NamePrompter.h (100%) rename {dbms => src}/Common/NetException.h (100%) rename {dbms => src}/Common/ObjectPool.h (100%) rename {dbms => src}/Common/OpenSSLHelpers.cpp (100%) rename {dbms => src}/Common/OpenSSLHelpers.h (100%) rename {dbms => src}/Common/OptimizedRegularExpression.cpp (100%) rename {dbms => src}/Common/OptimizedRegularExpression.h (100%) rename {dbms => src}/Common/PODArray.cpp (100%) rename {dbms => src}/Common/PODArray.h (100%) rename {dbms => src}/Common/PODArray_fwd.h (100%) rename {dbms => src}/Common/PipeFDs.cpp (100%) rename {dbms => src}/Common/PipeFDs.h (100%) rename {dbms => src}/Common/PoolBase.h (100%) rename {dbms => src}/Common/PoolWithFailoverBase.h (100%) rename {dbms => src}/Common/ProfileEvents.cpp (100%) rename {dbms => src}/Common/ProfileEvents.h (100%) rename {dbms => src}/Common/ProfilingScopedRWLock.h (100%) rename {dbms => src}/Common/QueryProfiler.cpp (100%) rename {dbms => src}/Common/QueryProfiler.h (100%) rename {dbms => src}/Common/RWLock.cpp (100%) rename {dbms => src}/Common/RWLock.h (100%) rename {dbms => src}/Common/RadixSort.h (100%) rename {dbms => src}/Common/RemoteHostFilter.cpp (100%) rename {dbms => src}/Common/RemoteHostFilter.h (100%) rename {dbms => src}/Common/SensitiveDataMasker.cpp (100%) rename {dbms => src}/Common/SensitiveDataMasker.h (100%) rename {dbms => src}/Common/SettingsChanges.h (100%) rename {dbms => src}/Common/SharedBlockRowRef.h (100%) rename {dbms => src}/Common/SharedLibrary.cpp (100%) rename {dbms => src}/Common/SharedLibrary.h (100%) rename {dbms => src}/Common/ShellCommand.cpp (100%) rename {dbms => src}/Common/ShellCommand.h (100%) rename {dbms => src}/Common/SimpleActionBlocker.h (100%) rename {dbms => src}/Common/SimpleIncrement.h (100%) rename {dbms => src}/Common/SipHash.h (100%) rename {dbms => src}/Common/SmallObjectPool.h (100%) rename {dbms => src}/Common/SpaceSaving.h (100%) rename {dbms => src}/Common/StackTrace.cpp (100%) rename {dbms => src}/Common/StackTrace.h (100%) rename {dbms => src}/Common/StatusFile.cpp (100%) rename {dbms => src}/Common/StatusFile.h (100%) rename {dbms => src}/Common/StatusInfo.cpp (100%) rename {dbms => src}/Common/StatusInfo.h (100%) rename {dbms => src}/Common/Stopwatch.cpp (100%) rename {dbms => src}/Common/Stopwatch.h (100%) rename {dbms => src}/Common/StringSearcher.h (100%) rename {dbms => src}/Common/StringUtils/CMakeLists.txt (100%) rename {dbms => src}/Common/StringUtils/StringUtils.cpp (100%) rename {dbms => src}/Common/StringUtils/StringUtils.h (100%) rename {dbms => src}/Common/StudentTTest.cpp (100%) rename {dbms => src}/Common/StudentTTest.h (100%) rename {dbms => src}/Common/SymbolIndex.cpp (100%) rename {dbms => src}/Common/SymbolIndex.h (100%) rename {dbms => src}/Common/TaskStatsInfoGetter.cpp (100%) rename {dbms => src}/Common/TaskStatsInfoGetter.h (100%) rename {dbms => src}/Common/TerminalSize.cpp (100%) rename {dbms => src}/Common/TerminalSize.h (100%) rename {dbms => src}/Common/ThreadFuzzer.cpp (100%) rename {dbms => src}/Common/ThreadFuzzer.h (100%) rename {dbms => src}/Common/ThreadPool.cpp (100%) rename {dbms => src}/Common/ThreadPool.h (100%) rename {dbms => src}/Common/ThreadProfileEvents.h (100%) rename {dbms => src}/Common/ThreadStatus.cpp (100%) rename {dbms => src}/Common/ThreadStatus.h (100%) rename {dbms => src}/Common/Throttler.h (100%) rename {dbms => src}/Common/TraceCollector.cpp (100%) rename {dbms => src}/Common/TraceCollector.h (100%) rename {dbms => src}/Common/TypeList.h (100%) rename {dbms => src}/Common/TypePromotion.h (100%) rename {dbms => src}/Common/UInt128.h (100%) rename {dbms => src}/Common/UTF8Helpers.cpp (100%) rename {dbms => src}/Common/UTF8Helpers.h (100%) rename {dbms => src}/Common/UnicodeBar.h (100%) rename {dbms => src}/Common/VariableContext.h (100%) rename {dbms => src}/Common/Visitor.h (100%) rename {dbms => src}/Common/Volnitsky.h (100%) rename {dbms => src}/Common/WeakHash.cpp (100%) rename {dbms => src}/Common/WeakHash.h (100%) rename {dbms => src}/Common/XDBCBridgeHelper.h (100%) rename {dbms => src}/Common/ZooKeeper/CMakeLists.txt (100%) rename {dbms => src}/Common/ZooKeeper/Common.h (100%) rename {dbms => src}/Common/ZooKeeper/IKeeper.cpp (100%) rename {dbms => src}/Common/ZooKeeper/IKeeper.h (100%) rename {dbms => src}/Common/ZooKeeper/Increment.h (100%) rename {dbms => src}/Common/ZooKeeper/KeeperException.h (100%) rename {dbms => src}/Common/ZooKeeper/LeaderElection.h (100%) rename {dbms => src}/Common/ZooKeeper/Lock.cpp (100%) rename {dbms => src}/Common/ZooKeeper/Lock.h (100%) rename {dbms => src}/Common/ZooKeeper/TestKeeper.cpp (100%) rename {dbms => src}/Common/ZooKeeper/TestKeeper.h (100%) rename {dbms => src}/Common/ZooKeeper/Types.h (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeper.cpp (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeper.h (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperHolder.cpp (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperHolder.h (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperImpl.cpp (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperImpl.h (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperNodeCache.cpp (100%) rename {dbms => src}/Common/ZooKeeper/ZooKeeperNodeCache.h (100%) rename {dbms => src}/Common/ZooKeeper/tests/CMakeLists.txt (100%) rename {dbms => src}/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/nozk.sh (100%) rename {dbms => src}/Common/ZooKeeper/tests/yeszk.sh (100%) rename {dbms => src}/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_expiration_test.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_test_async.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_test_commands.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_test_lock.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp (100%) rename {dbms => src}/Common/ZooKeeper/tests/zookeeper_impl.cpp (100%) rename {dbms => src}/Common/assert_cast.h (100%) rename {dbms => src}/Common/checkStackSize.cpp (100%) rename {dbms => src}/Common/checkStackSize.h (100%) rename {dbms => src}/Common/config.h.in (100%) rename {dbms => src}/Common/config_version.h.in (100%) rename {dbms => src}/Common/createHardLink.cpp (100%) rename {dbms => src}/Common/createHardLink.h (100%) rename {dbms => src}/Common/escapeForFileName.cpp (100%) rename {dbms => src}/Common/escapeForFileName.h (100%) rename {dbms => src}/Common/filesystemHelpers.cpp (100%) rename {dbms => src}/Common/filesystemHelpers.h (100%) rename {dbms => src}/Common/formatIPv6.cpp (100%) rename {dbms => src}/Common/formatIPv6.h (100%) rename {dbms => src}/Common/formatReadable.cpp (100%) rename {dbms => src}/Common/formatReadable.h (100%) rename {dbms => src}/Common/getExecutablePath.cpp (100%) rename {dbms => src}/Common/getExecutablePath.h (100%) rename {dbms => src}/Common/getMultipleKeysFromConfig.cpp (100%) rename {dbms => src}/Common/getMultipleKeysFromConfig.h (100%) rename {dbms => src}/Common/getNumberOfPhysicalCPUCores.cpp (100%) rename {dbms => src}/Common/getNumberOfPhysicalCPUCores.h (100%) rename {dbms => src}/Common/hasLinuxCapability.cpp (100%) rename {dbms => src}/Common/hasLinuxCapability.h (100%) rename {dbms => src}/Common/hex.cpp (100%) rename {dbms => src}/Common/hex.h (100%) rename {dbms => src}/Common/intExp.h (100%) rename {dbms => src}/Common/interpolate.h (100%) rename {dbms => src}/Common/isLocalAddress.cpp (100%) rename {dbms => src}/Common/isLocalAddress.h (100%) rename {dbms => src}/Common/malloc.cpp (100%) rename {dbms => src}/Common/memcmpSmall.h (100%) rename {dbms => src}/Common/memcpySmall.h (100%) rename {dbms => src}/Common/new_delete.cpp (100%) rename {dbms => src}/Common/parseAddress.cpp (100%) rename {dbms => src}/Common/parseAddress.h (100%) rename {dbms => src}/Common/parseGlobs.cpp (100%) rename {dbms => src}/Common/parseGlobs.h (100%) rename {dbms => src}/Common/parseRemoteDescription.cpp (100%) rename {dbms => src}/Common/parseRemoteDescription.h (100%) rename {dbms => src}/Common/quoteString.cpp (100%) rename {dbms => src}/Common/quoteString.h (100%) rename {dbms => src}/Common/randomSeed.cpp (100%) rename {dbms => src}/Common/randomSeed.h (100%) rename {dbms => src}/Common/setThreadName.cpp (100%) rename {dbms => src}/Common/setThreadName.h (100%) rename {dbms => src}/Common/tests/CMakeLists.txt (100%) rename {dbms => src}/Common/tests/arena_with_free_lists.cpp (100%) rename {dbms => src}/Common/tests/array_cache.cpp (100%) rename {dbms => src}/Common/tests/auto_array.cpp (100%) rename {dbms => src}/Common/tests/chaos_sanitizer.cpp (100%) rename {dbms => src}/Common/tests/compact_array.cpp (100%) rename {dbms => src}/Common/tests/cow_columns.cpp (100%) rename {dbms => src}/Common/tests/cow_compositions.cpp (100%) rename {dbms => src}/Common/tests/gtest_getMultipleValuesFromConfig.cpp (100%) rename {dbms => src}/Common/tests/gtest_global_context.h (100%) rename {dbms => src}/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp (100%) rename {dbms => src}/Common/tests/gtest_pod_array.cpp (100%) rename {dbms => src}/Common/tests/gtest_rw_lock.cpp (100%) rename {dbms => src}/Common/tests/gtest_sensitive_data_masker.cpp (100%) rename {dbms => src}/Common/tests/gtest_shell_command.cpp (100%) rename {dbms => src}/Common/tests/gtest_thread_pool_concurrent_wait.cpp (100%) rename {dbms => src}/Common/tests/gtest_thread_pool_global_full.cpp (100%) rename {dbms => src}/Common/tests/gtest_thread_pool_limit.cpp (100%) rename {dbms => src}/Common/tests/gtest_thread_pool_loop.cpp (100%) rename {dbms => src}/Common/tests/gtest_thread_pool_schedule_exception.cpp (100%) rename {dbms => src}/Common/tests/gtest_unescapeForFileName.cpp (100%) rename {dbms => src}/Common/tests/hash_table.cpp (100%) rename {dbms => src}/Common/tests/hashes_test.cpp (100%) rename {dbms => src}/Common/tests/int_hashes_perf.cpp (100%) rename {dbms => src}/Common/tests/integer_hash_tables_and_hashes.cpp (100%) rename {dbms => src}/Common/tests/parallel_aggregation.cpp (100%) rename {dbms => src}/Common/tests/parallel_aggregation2.cpp (100%) rename {dbms => src}/Common/tests/pod_array.cpp (100%) rename {dbms => src}/Common/tests/radix_sort.cpp (100%) rename {dbms => src}/Common/tests/simple_cache.cpp (100%) rename {dbms => src}/Common/tests/sip_hash_perf.cpp (100%) rename {dbms => src}/Common/tests/small_table.cpp (100%) rename {dbms => src}/Common/tests/space_saving.cpp (100%) rename {dbms => src}/Common/tests/stopwatch.cpp (100%) rename {dbms => src}/Common/tests/symbol_index.cpp (100%) rename {dbms => src}/Common/tests/thread_creation_latency.cpp (100%) rename {dbms => src}/Common/thread_local_rng.cpp (100%) rename {dbms => src}/Common/thread_local_rng.h (100%) rename {dbms => src}/Common/typeid_cast.h (100%) rename {dbms => src}/Compression/CMakeLists.txt (100%) rename {dbms => src}/Compression/CachedCompressedReadBuffer.cpp (100%) rename {dbms => src}/Compression/CachedCompressedReadBuffer.h (100%) rename {dbms => src}/Compression/CompressedReadBuffer.cpp (100%) rename {dbms => src}/Compression/CompressedReadBuffer.h (100%) rename {dbms => src}/Compression/CompressedReadBufferBase.cpp (100%) rename {dbms => src}/Compression/CompressedReadBufferBase.h (100%) rename {dbms => src}/Compression/CompressedReadBufferFromFile.cpp (100%) rename {dbms => src}/Compression/CompressedReadBufferFromFile.h (100%) rename {dbms => src}/Compression/CompressedWriteBuffer.cpp (100%) rename {dbms => src}/Compression/CompressedWriteBuffer.h (100%) rename {dbms => src}/Compression/CompressionCodecDelta.cpp (100%) rename {dbms => src}/Compression/CompressionCodecDelta.h (100%) rename {dbms => src}/Compression/CompressionCodecDoubleDelta.cpp (100%) rename {dbms => src}/Compression/CompressionCodecDoubleDelta.h (100%) rename {dbms => src}/Compression/CompressionCodecGorilla.cpp (100%) rename {dbms => src}/Compression/CompressionCodecGorilla.h (100%) rename {dbms => src}/Compression/CompressionCodecLZ4.cpp (100%) rename {dbms => src}/Compression/CompressionCodecLZ4.h (100%) rename {dbms => src}/Compression/CompressionCodecMultiple.cpp (100%) rename {dbms => src}/Compression/CompressionCodecMultiple.h (100%) rename {dbms => src}/Compression/CompressionCodecNone.cpp (100%) rename {dbms => src}/Compression/CompressionCodecNone.h (100%) rename {dbms => src}/Compression/CompressionCodecT64.cpp (100%) rename {dbms => src}/Compression/CompressionCodecT64.h (100%) rename {dbms => src}/Compression/CompressionCodecZSTD.cpp (100%) rename {dbms => src}/Compression/CompressionCodecZSTD.h (100%) rename {dbms => src}/Compression/CompressionFactory.cpp (100%) rename {dbms => src}/Compression/CompressionFactory.h (100%) rename {dbms => src}/Compression/CompressionInfo.h (100%) rename {dbms => src}/Compression/ICompressionCodec.cpp (100%) rename {dbms => src}/Compression/ICompressionCodec.h (100%) rename {dbms => src}/Compression/LZ4_decompress_faster.cpp (100%) rename {dbms => src}/Compression/LZ4_decompress_faster.h (100%) rename {dbms => src}/Compression/tests/CMakeLists.txt (100%) rename {dbms => src}/Compression/tests/cached_compressed_read_buffer.cpp (100%) rename {dbms => src}/Compression/tests/compressed_buffer.cpp (100%) rename {dbms => src}/Compression/tests/compressed_buffer_fuzz.cpp (100%) rename {dbms => src}/Compression/tests/gtest_compressionCodec.cpp (100%) rename {dbms => src}/Core/AccurateComparison.h (100%) rename {dbms => src}/Core/BackgroundSchedulePool.cpp (100%) rename {dbms => src}/Core/BackgroundSchedulePool.h (100%) rename {dbms => src}/Core/Block.cpp (100%) rename {dbms => src}/Core/Block.h (100%) rename {dbms => src}/Core/BlockInfo.cpp (100%) rename {dbms => src}/Core/BlockInfo.h (100%) rename {dbms => src}/Core/CMakeLists.txt (100%) rename {dbms => src}/Core/ColumnNumbers.h (100%) rename {dbms => src}/Core/ColumnWithTypeAndName.cpp (100%) rename {dbms => src}/Core/ColumnWithTypeAndName.h (100%) rename {dbms => src}/Core/ColumnsWithTypeAndName.h (100%) rename {dbms => src}/Core/DecimalComparison.h (100%) rename {dbms => src}/Core/DecimalFunctions.h (100%) rename {dbms => src}/Core/Defines.h (100%) rename {dbms => src}/Core/ExternalResultDescription.cpp (100%) rename {dbms => src}/Core/ExternalResultDescription.h (100%) rename {dbms => src}/Core/ExternalTable.cpp (100%) rename {dbms => src}/Core/ExternalTable.h (100%) rename {dbms => src}/Core/Field.cpp (100%) rename {dbms => src}/Core/Field.h (100%) rename {dbms => src}/Core/MySQLProtocol.cpp (100%) rename {dbms => src}/Core/MySQLProtocol.h (100%) rename {dbms => src}/Core/Names.h (100%) rename {dbms => src}/Core/NamesAndTypes.cpp (100%) rename {dbms => src}/Core/NamesAndTypes.h (100%) rename {dbms => src}/Core/Protocol.h (100%) rename {dbms => src}/Core/QualifiedTableName.h (100%) rename {dbms => src}/Core/QueryProcessingStage.h (100%) rename {dbms => src}/Core/Row.h (100%) rename {dbms => src}/Core/Settings.cpp (100%) rename {dbms => src}/Core/Settings.h (100%) rename {dbms => src}/Core/SettingsCollection.cpp (100%) rename {dbms => src}/Core/SettingsCollection.h (100%) rename {dbms => src}/Core/SettingsCollectionImpl.h (100%) rename {dbms => src}/Core/SortCursor.h (100%) rename {dbms => src}/Core/SortDescription.h (100%) rename {dbms => src}/Core/TypeListNumber.h (100%) rename {dbms => src}/Core/Types.h (100%) rename {dbms => src}/Core/UUID.h (100%) rename {dbms => src}/Core/callOnTypeIndex.h (100%) rename {dbms => src}/Core/config_core.h.in (100%) rename {dbms => src}/Core/iostream_debug_helpers.cpp (100%) rename {dbms => src}/Core/iostream_debug_helpers.h (100%) rename {dbms => src}/Core/tests/CMakeLists.txt (100%) rename {dbms => src}/Core/tests/field.cpp (100%) rename {dbms => src}/Core/tests/gtest_DecimalFunctions.cpp (100%) rename {dbms => src}/Core/tests/move_field.cpp (100%) rename {dbms => src}/Core/tests/string_pool.cpp (100%) rename {dbms => src}/Core/tests/string_ref_hash.cpp (100%) rename {dbms => src}/DataStreams/AddingConstColumnBlockInputStream.h (100%) rename {dbms => src}/DataStreams/AddingDefaultBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/AddingDefaultBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/AddingDefaultsBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/AddingDefaultsBlockInputStream.h (100%) rename {dbms => src}/DataStreams/AggregatingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/AggregatingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/AggregatingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/AggregatingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/AsynchronousBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/AsynchronousBlockInputStream.h (100%) rename {dbms => src}/DataStreams/BlockIO.cpp (100%) rename {dbms => src}/DataStreams/BlockIO.h (100%) rename {dbms => src}/DataStreams/BlockStreamProfileInfo.cpp (100%) rename {dbms => src}/DataStreams/BlockStreamProfileInfo.h (100%) rename {dbms => src}/DataStreams/BlocksBlockInputStream.h (100%) rename {dbms => src}/DataStreams/BlocksListBlockInputStream.h (100%) rename {dbms => src}/DataStreams/CMakeLists.txt (100%) rename {dbms => src}/DataStreams/CheckConstraintsBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/CheckConstraintsBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/CheckSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/CheckSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/CollapsingFinalBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/CollapsingFinalBlockInputStream.h (100%) rename {dbms => src}/DataStreams/CollapsingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/CollapsingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ColumnGathererStream.cpp (100%) rename {dbms => src}/DataStreams/ColumnGathererStream.h (100%) rename {dbms => src}/DataStreams/ConcatBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ConvertingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ConvertingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/CountingBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/CountingBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/CreatingSetsBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/CreatingSetsBlockInputStream.h (100%) rename {dbms => src}/DataStreams/CubeBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/CubeBlockInputStream.h (100%) rename {dbms => src}/DataStreams/DistinctBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/DistinctBlockInputStream.h (100%) rename {dbms => src}/DataStreams/DistinctSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/DistinctSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ExecutionSpeedLimits.cpp (100%) rename {dbms => src}/DataStreams/ExecutionSpeedLimits.h (100%) rename {dbms => src}/DataStreams/ExpressionBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ExpressionBlockInputStream.h (100%) rename {dbms => src}/DataStreams/FillingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/FillingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/FilterBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/FilterBlockInputStream.h (100%) rename {dbms => src}/DataStreams/FilterColumnsBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/FilterColumnsBlockInputStream.h (100%) rename {dbms => src}/DataStreams/FinishSortingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/FinishSortingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/GraphiteRollupSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/GraphiteRollupSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/IBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/IBlockInputStream.h (100%) rename {dbms => src}/DataStreams/IBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/IBlockStream_fwd.h (100%) rename {dbms => src}/DataStreams/InputStreamFromASTInsertQuery.cpp (100%) rename {dbms => src}/DataStreams/InputStreamFromASTInsertQuery.h (100%) rename {dbms => src}/DataStreams/InternalTextLogsRowOutputStream.cpp (100%) rename {dbms => src}/DataStreams/InternalTextLogsRowOutputStream.h (100%) rename {dbms => src}/DataStreams/LazyBlockInputStream.h (100%) rename {dbms => src}/DataStreams/LimitBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/LimitBlockInputStream.h (100%) rename {dbms => src}/DataStreams/LimitByBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/LimitByBlockInputStream.h (100%) rename {dbms => src}/DataStreams/MarkInCompressedFile.h (100%) rename {dbms => src}/DataStreams/MaterializingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/MaterializingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/MaterializingBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/MergeSortingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/MergeSortingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/MergingAggregatedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/MergingAggregatedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h (100%) rename {dbms => src}/DataStreams/MergingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/MergingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/NativeBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/NativeBlockInputStream.h (100%) rename {dbms => src}/DataStreams/NativeBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/NativeBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/NullAndDoCopyBlockInputStream.h (100%) rename {dbms => src}/DataStreams/NullBlockInputStream.h (100%) rename {dbms => src}/DataStreams/NullBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/OneBlockInputStream.h (100%) rename {dbms => src}/DataStreams/OwningBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ParallelAggregatingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ParallelAggregatingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ParallelInputsProcessor.h (100%) rename {dbms => src}/DataStreams/ParallelParsingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ParallelParsingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/PartialSortingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/PartialSortingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/PushingToViewsBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/PushingToViewsBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/RemoteBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/RemoteBlockInputStream.h (100%) rename {dbms => src}/DataStreams/RemoteBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/RemoteBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/ReplacingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ReplacingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/ReverseBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/ReverseBlockInputStream.h (100%) rename {dbms => src}/DataStreams/RollupBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/RollupBlockInputStream.h (100%) rename {dbms => src}/DataStreams/SizeLimits.cpp (100%) rename {dbms => src}/DataStreams/SizeLimits.h (100%) rename {dbms => src}/DataStreams/SquashingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/SquashingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/SquashingBlockOutputStream.cpp (100%) rename {dbms => src}/DataStreams/SquashingBlockOutputStream.h (100%) rename {dbms => src}/DataStreams/SquashingTransform.cpp (100%) rename {dbms => src}/DataStreams/SquashingTransform.h (100%) rename {dbms => src}/DataStreams/SummingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/SummingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/TTLBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/TTLBlockInputStream.h (100%) rename {dbms => src}/DataStreams/TemporaryFileStream.h (100%) rename {dbms => src}/DataStreams/TotalsHavingBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/TotalsHavingBlockInputStream.h (100%) rename {dbms => src}/DataStreams/UnionBlockInputStream.h (100%) rename {dbms => src}/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp (100%) rename {dbms => src}/DataStreams/VersionedCollapsingSortedBlockInputStream.h (100%) rename {dbms => src}/DataStreams/copyData.cpp (100%) rename {dbms => src}/DataStreams/copyData.h (100%) rename {dbms => src}/DataStreams/finalizeBlock.cpp (100%) rename {dbms => src}/DataStreams/finalizeBlock.h (100%) rename {dbms => src}/DataStreams/materializeBlock.cpp (100%) rename {dbms => src}/DataStreams/materializeBlock.h (100%) rename {dbms => src}/DataStreams/narrowBlockInputStreams.cpp (100%) rename {dbms => src}/DataStreams/narrowBlockInputStreams.h (100%) rename {dbms => src}/DataStreams/processConstants.cpp (100%) rename {dbms => src}/DataStreams/processConstants.h (100%) rename {dbms => src}/DataStreams/tests/CMakeLists.txt (100%) rename {dbms => src}/DataStreams/tests/collapsing_sorted_stream.cpp (100%) rename {dbms => src}/DataStreams/tests/expression_stream.cpp (100%) rename {dbms => src}/DataStreams/tests/filter_stream.cpp (100%) rename {dbms => src}/DataStreams/tests/finish_sorting_stream.cpp (100%) rename {dbms => src}/DataStreams/tests/gtest_blocks_size_merging_streams.cpp (100%) rename {dbms => src}/DataStreams/tests/gtest_check_sorted_stream.cpp (100%) rename {dbms => src}/DataStreams/tests/union_stream2.cpp (100%) rename {dbms => src}/DataTypes/CMakeLists.txt (100%) rename {dbms => src}/DataTypes/DataTypeAggregateFunction.cpp (100%) rename {dbms => src}/DataTypes/DataTypeAggregateFunction.h (100%) rename {dbms => src}/DataTypes/DataTypeArray.cpp (100%) rename {dbms => src}/DataTypes/DataTypeArray.h (100%) rename {dbms => src}/DataTypes/DataTypeCustom.h (100%) rename {dbms => src}/DataTypes/DataTypeCustomIPv4AndIPv6.cpp (100%) rename {dbms => src}/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp (100%) rename {dbms => src}/DataTypes/DataTypeCustomSimpleAggregateFunction.h (100%) rename {dbms => src}/DataTypes/DataTypeCustomSimpleTextSerialization.cpp (100%) rename {dbms => src}/DataTypes/DataTypeCustomSimpleTextSerialization.h (100%) rename {dbms => src}/DataTypes/DataTypeDate.cpp (100%) rename {dbms => src}/DataTypes/DataTypeDate.h (100%) rename {dbms => src}/DataTypes/DataTypeDateTime.cpp (100%) rename {dbms => src}/DataTypes/DataTypeDateTime.h (100%) rename {dbms => src}/DataTypes/DataTypeDateTime64.cpp (100%) rename {dbms => src}/DataTypes/DataTypeDateTime64.h (100%) rename {dbms => src}/DataTypes/DataTypeDecimalBase.cpp (100%) rename {dbms => src}/DataTypes/DataTypeDecimalBase.h (100%) rename {dbms => src}/DataTypes/DataTypeEnum.cpp (100%) rename {dbms => src}/DataTypes/DataTypeEnum.h (100%) rename {dbms => src}/DataTypes/DataTypeFactory.cpp (100%) rename {dbms => src}/DataTypes/DataTypeFactory.h (100%) rename {dbms => src}/DataTypes/DataTypeFixedString.cpp (100%) rename {dbms => src}/DataTypes/DataTypeFixedString.h (100%) rename {dbms => src}/DataTypes/DataTypeFunction.cpp (100%) rename {dbms => src}/DataTypes/DataTypeFunction.h (100%) rename {dbms => src}/DataTypes/DataTypeInterval.cpp (100%) rename {dbms => src}/DataTypes/DataTypeInterval.h (100%) rename {dbms => src}/DataTypes/DataTypeLowCardinality.cpp (100%) rename {dbms => src}/DataTypes/DataTypeLowCardinality.h (100%) rename {dbms => src}/DataTypes/DataTypeLowCardinalityHelpers.cpp (100%) rename {dbms => src}/DataTypes/DataTypeNothing.cpp (100%) rename {dbms => src}/DataTypes/DataTypeNothing.h (100%) rename {dbms => src}/DataTypes/DataTypeNullable.cpp (100%) rename {dbms => src}/DataTypes/DataTypeNullable.h (100%) rename {dbms => src}/DataTypes/DataTypeNumberBase.cpp (100%) rename {dbms => src}/DataTypes/DataTypeNumberBase.h (100%) rename {dbms => src}/DataTypes/DataTypeSet.h (100%) rename {dbms => src}/DataTypes/DataTypeString.cpp (100%) rename {dbms => src}/DataTypes/DataTypeString.h (100%) rename {dbms => src}/DataTypes/DataTypeTuple.cpp (100%) rename {dbms => src}/DataTypes/DataTypeTuple.h (100%) rename {dbms => src}/DataTypes/DataTypeUUID.cpp (100%) rename {dbms => src}/DataTypes/DataTypeUUID.h (100%) rename {dbms => src}/DataTypes/DataTypeWithSimpleSerialization.h (100%) rename {dbms => src}/DataTypes/DataTypesDecimal.cpp (100%) rename {dbms => src}/DataTypes/DataTypesDecimal.h (100%) rename {dbms => src}/DataTypes/DataTypesNumber.cpp (100%) rename {dbms => src}/DataTypes/DataTypesNumber.h (100%) rename {dbms => src}/DataTypes/FieldToDataType.cpp (100%) rename {dbms => src}/DataTypes/FieldToDataType.h (100%) rename {dbms => src}/DataTypes/IDataType.cpp (100%) rename {dbms => src}/DataTypes/IDataType.h (100%) rename {dbms => src}/DataTypes/IDataTypeDummy.h (100%) rename {dbms => src}/DataTypes/Native.h (100%) rename {dbms => src}/DataTypes/NestedUtils.cpp (100%) rename {dbms => src}/DataTypes/NestedUtils.h (100%) rename {dbms => src}/DataTypes/NumberTraits.h (100%) rename {dbms => src}/DataTypes/convertMySQLDataType.cpp (100%) rename {dbms => src}/DataTypes/convertMySQLDataType.h (100%) rename {dbms => src}/DataTypes/getLeastSupertype.cpp (100%) rename {dbms => src}/DataTypes/getLeastSupertype.h (100%) rename {dbms => src}/DataTypes/getMostSubtype.cpp (100%) rename {dbms => src}/DataTypes/getMostSubtype.h (100%) rename {dbms => src}/DataTypes/tests/CMakeLists.txt (100%) rename {dbms => src}/DataTypes/tests/data_type_string.cpp (100%) rename {dbms => src}/DataTypes/tests/data_types_number_fixed.cpp (100%) rename {dbms => src}/DataTypes/tests/gtest_data_type_get_common_type.cpp (100%) rename {dbms => src}/Databases/DatabaseDictionary.cpp (100%) rename {dbms => src}/Databases/DatabaseDictionary.h (100%) rename {dbms => src}/Databases/DatabaseFactory.cpp (100%) rename {dbms => src}/Databases/DatabaseFactory.h (100%) rename {dbms => src}/Databases/DatabaseLazy.cpp (100%) rename {dbms => src}/Databases/DatabaseLazy.h (100%) rename {dbms => src}/Databases/DatabaseMemory.cpp (100%) rename {dbms => src}/Databases/DatabaseMemory.h (100%) rename {dbms => src}/Databases/DatabaseMySQL.cpp (100%) rename {dbms => src}/Databases/DatabaseMySQL.h (100%) rename {dbms => src}/Databases/DatabaseOnDisk.cpp (100%) rename {dbms => src}/Databases/DatabaseOnDisk.h (100%) rename {dbms => src}/Databases/DatabaseOrdinary.cpp (100%) rename {dbms => src}/Databases/DatabaseOrdinary.h (100%) rename {dbms => src}/Databases/DatabaseWithDictionaries.cpp (100%) rename {dbms => src}/Databases/DatabaseWithDictionaries.h (100%) rename {dbms => src}/Databases/DatabasesCommon.cpp (100%) rename {dbms => src}/Databases/DatabasesCommon.h (100%) rename {dbms => src}/Databases/IDatabase.h (100%) rename {dbms => src}/Dictionaries/CMakeLists.txt (100%) rename {dbms => src}/Dictionaries/CacheDictionary.cpp (100%) rename {dbms => src}/Dictionaries/CacheDictionary.h (100%) rename {dbms => src}/Dictionaries/CacheDictionary.inc.h (100%) rename {dbms => src}/Dictionaries/CacheDictionary_generate1.cpp.in (100%) rename {dbms => src}/Dictionaries/CacheDictionary_generate2.cpp.in (100%) rename {dbms => src}/Dictionaries/CacheDictionary_generate3.cpp.in (100%) rename {dbms => src}/Dictionaries/ClickHouseDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/ClickHouseDictionarySource.h (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary.cpp (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary.h (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp (100%) rename {dbms => src}/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp (100%) rename {dbms => src}/Dictionaries/ComplexKeyHashedDictionary.cpp (100%) rename {dbms => src}/Dictionaries/ComplexKeyHashedDictionary.h (100%) rename {dbms => src}/Dictionaries/DictionaryBlockInputStream.h (100%) rename {dbms => src}/Dictionaries/DictionaryBlockInputStreamBase.cpp (100%) rename {dbms => src}/Dictionaries/DictionaryBlockInputStreamBase.h (100%) rename {dbms => src}/Dictionaries/DictionaryFactory.cpp (100%) rename {dbms => src}/Dictionaries/DictionaryFactory.h (100%) rename {dbms => src}/Dictionaries/DictionarySourceFactory.cpp (100%) rename {dbms => src}/Dictionaries/DictionarySourceFactory.h (100%) rename {dbms => src}/Dictionaries/DictionarySourceHelpers.cpp (100%) rename {dbms => src}/Dictionaries/DictionarySourceHelpers.h (100%) rename {dbms => src}/Dictionaries/DictionaryStructure.cpp (100%) rename {dbms => src}/Dictionaries/DictionaryStructure.h (100%) rename {dbms => src}/Dictionaries/Embedded/CMakeLists.txt (100%) rename {dbms => src}/Dictionaries/Embedded/GeoDictionariesLoader.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/GeoDictionariesLoader.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/Entries.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/INamesProvider.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/NamesProvider.h (100%) rename {dbms => src}/Dictionaries/Embedded/GeodataProviders/Types.h (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsHierarchies.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsHierarchies.h (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsHierarchy.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsHierarchy.h (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsNames.cpp (100%) rename {dbms => src}/Dictionaries/Embedded/RegionsNames.h (100%) rename {dbms => src}/Dictionaries/ExecutableDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/ExecutableDictionarySource.h (100%) rename {dbms => src}/Dictionaries/ExternalQueryBuilder.cpp (100%) rename {dbms => src}/Dictionaries/ExternalQueryBuilder.h (100%) rename {dbms => src}/Dictionaries/FileDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/FileDictionarySource.h (100%) rename {dbms => src}/Dictionaries/FlatDictionary.cpp (100%) rename {dbms => src}/Dictionaries/FlatDictionary.h (100%) rename {dbms => src}/Dictionaries/HTTPDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/HTTPDictionarySource.h (100%) rename {dbms => src}/Dictionaries/HashedDictionary.cpp (100%) rename {dbms => src}/Dictionaries/HashedDictionary.h (100%) rename {dbms => src}/Dictionaries/IDictionary.h (100%) rename {dbms => src}/Dictionaries/IDictionarySource.h (100%) rename {dbms => src}/Dictionaries/LibraryDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/LibraryDictionarySource.h (100%) rename {dbms => src}/Dictionaries/LibraryDictionarySourceExternal.cpp (100%) rename {dbms => src}/Dictionaries/LibraryDictionarySourceExternal.h (100%) rename {dbms => src}/Dictionaries/MongoDBBlockInputStream.cpp (100%) rename {dbms => src}/Dictionaries/MongoDBBlockInputStream.h (100%) rename {dbms => src}/Dictionaries/MongoDBDictionarySource.cpp (99%) rename {dbms => src}/Dictionaries/MongoDBDictionarySource.h (100%) rename {dbms => src}/Dictionaries/MySQLDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/MySQLDictionarySource.h (100%) rename {dbms => src}/Dictionaries/PolygonDictionary.cpp (100%) rename {dbms => src}/Dictionaries/PolygonDictionary.h (100%) rename {dbms => src}/Dictionaries/RangeDictionaryBlockInputStream.h (100%) rename {dbms => src}/Dictionaries/RangeHashedDictionary.cpp (100%) rename {dbms => src}/Dictionaries/RangeHashedDictionary.h (100%) rename {dbms => src}/Dictionaries/RedisBlockInputStream.cpp (100%) rename {dbms => src}/Dictionaries/RedisBlockInputStream.h (100%) rename {dbms => src}/Dictionaries/RedisDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/RedisDictionarySource.h (100%) rename {dbms => src}/Dictionaries/TrieDictionary.cpp (100%) rename {dbms => src}/Dictionaries/TrieDictionary.h (100%) rename {dbms => src}/Dictionaries/XDBCDictionarySource.cpp (100%) rename {dbms => src}/Dictionaries/XDBCDictionarySource.h (100%) rename {dbms => src}/Dictionaries/getDictionaryConfigurationFromAST.cpp (100%) rename {dbms => src}/Dictionaries/getDictionaryConfigurationFromAST.h (100%) rename {dbms => src}/Dictionaries/readInvalidateQuery.cpp (100%) rename {dbms => src}/Dictionaries/readInvalidateQuery.h (100%) rename {dbms => src}/Dictionaries/registerDictionaries.cpp (100%) rename {dbms => src}/Dictionaries/registerDictionaries.h (100%) rename {dbms => src}/Dictionaries/tests/CMakeLists.txt (100%) rename {dbms => src}/Dictionaries/tests/gtest_dictionary_configuration.cpp (100%) rename {dbms => src}/Dictionaries/writeParenthesisedString.cpp (100%) rename {dbms => src}/Dictionaries/writeParenthesisedString.h (100%) rename {dbms => src}/Disks/CMakeLists.txt (100%) rename {dbms => src}/Disks/DiskFactory.cpp (100%) rename {dbms => src}/Disks/DiskFactory.h (100%) rename {dbms => src}/Disks/DiskLocal.cpp (100%) rename {dbms => src}/Disks/DiskLocal.h (100%) rename {dbms => src}/Disks/DiskMemory.cpp (100%) rename {dbms => src}/Disks/DiskMemory.h (100%) rename {dbms => src}/Disks/DiskS3.cpp (100%) rename {dbms => src}/Disks/DiskS3.h (100%) rename {dbms => src}/Disks/DiskSpaceMonitor.cpp (100%) rename {dbms => src}/Disks/DiskSpaceMonitor.h (100%) rename {dbms => src}/Disks/IDisk.cpp (100%) rename {dbms => src}/Disks/IDisk.h (100%) rename {dbms => src}/Disks/registerDisks.cpp (100%) rename {dbms => src}/Disks/registerDisks.h (100%) rename {dbms => src}/Disks/tests/CMakeLists.txt (100%) rename {dbms => src}/Disks/tests/gtest_disk.cpp (100%) rename {dbms => src}/Disks/tests/gtest_disk.h (100%) rename {dbms => src}/Disks/tests/gtest_path_functions.cpp (100%) rename {dbms => src}/Formats/CMakeLists.txt (100%) rename {dbms => src}/Formats/FormatFactory.cpp (100%) rename {dbms => src}/Formats/FormatFactory.h (100%) rename {dbms => src}/Formats/FormatSchemaInfo.cpp (100%) rename {dbms => src}/Formats/FormatSchemaInfo.h (100%) rename {dbms => src}/Formats/FormatSettings.h (100%) rename {dbms => src}/Formats/IRowInputStream.cpp (100%) rename {dbms => src}/Formats/IRowInputStream.h (100%) rename {dbms => src}/Formats/IRowOutputStream.cpp (100%) rename {dbms => src}/Formats/IRowOutputStream.h (100%) rename {dbms => src}/Formats/MySQLBlockInputStream.cpp (100%) rename {dbms => src}/Formats/MySQLBlockInputStream.h (100%) rename {dbms => src}/Formats/NativeFormat.cpp (100%) rename {dbms => src}/Formats/NullFormat.cpp (100%) rename {dbms => src}/Formats/ParsedTemplateFormatString.cpp (100%) rename {dbms => src}/Formats/ParsedTemplateFormatString.h (100%) rename {dbms => src}/Formats/ProtobufColumnMatcher.cpp (100%) rename {dbms => src}/Formats/ProtobufColumnMatcher.h (100%) rename {dbms => src}/Formats/ProtobufReader.cpp (100%) rename {dbms => src}/Formats/ProtobufReader.h (100%) rename {dbms => src}/Formats/ProtobufSchemas.cpp (100%) rename {dbms => src}/Formats/ProtobufSchemas.h (100%) rename {dbms => src}/Formats/ProtobufWriter.cpp (100%) rename {dbms => src}/Formats/ProtobufWriter.h (100%) rename {dbms => src}/Formats/config_formats.h.in (100%) rename {dbms => src}/Formats/tests/CMakeLists.txt (100%) rename {dbms => src}/Formats/tests/tab_separated_streams.cpp (100%) rename {dbms => src}/Formats/verbosePrintString.cpp (100%) rename {dbms => src}/Formats/verbosePrintString.h (100%) rename {dbms => src}/Functions/CMakeLists.txt (100%) rename {dbms => src}/Functions/CRC.cpp (100%) rename {dbms => src}/Functions/CustomWeekTransforms.h (100%) rename {dbms => src}/Functions/DateTimeTransforms.h (100%) rename {dbms => src}/Functions/DivisionUtils.h (100%) rename {dbms => src}/Functions/DummyJSONParser.h (100%) rename {dbms => src}/Functions/EmptyImpl.h (100%) rename {dbms => src}/Functions/FunctionBase64Conversion.h (100%) rename {dbms => src}/Functions/FunctionBinaryArithmetic.h (100%) rename {dbms => src}/Functions/FunctionBitTestMany.h (100%) rename {dbms => src}/Functions/FunctionCustomWeekToSomething.h (100%) rename {dbms => src}/Functions/FunctionDateOrDateTimeAddInterval.h (100%) rename {dbms => src}/Functions/FunctionDateOrDateTimeToSomething.h (100%) rename {dbms => src}/Functions/FunctionFQDN.cpp (100%) rename {dbms => src}/Functions/FunctionFactory.cpp (100%) rename {dbms => src}/Functions/FunctionFactory.h (100%) rename {dbms => src}/Functions/FunctionHelpers.cpp (100%) rename {dbms => src}/Functions/FunctionHelpers.h (100%) rename {dbms => src}/Functions/FunctionIfBase.h (100%) rename {dbms => src}/Functions/FunctionJoinGet.cpp (100%) rename {dbms => src}/Functions/FunctionJoinGet.h (100%) rename {dbms => src}/Functions/FunctionMathBinaryFloat64.h (100%) rename {dbms => src}/Functions/FunctionMathConstFloat64.h (100%) rename {dbms => src}/Functions/FunctionMathUnary.h (100%) rename {dbms => src}/Functions/FunctionNumericPredicate.h (100%) rename {dbms => src}/Functions/FunctionStartsEndsWith.h (100%) rename {dbms => src}/Functions/FunctionStringOrArrayToT.h (100%) rename {dbms => src}/Functions/FunctionStringToString.h (100%) rename {dbms => src}/Functions/FunctionUnaryArithmetic.h (100%) rename {dbms => src}/Functions/FunctionsBitmap.cpp (100%) rename {dbms => src}/Functions/FunctionsBitmap.h (100%) rename {dbms => src}/Functions/FunctionsCoding.cpp (100%) rename {dbms => src}/Functions/FunctionsCoding.h (100%) rename {dbms => src}/Functions/FunctionsComparison.h (100%) rename {dbms => src}/Functions/FunctionsConsistentHashing.h (100%) rename {dbms => src}/Functions/FunctionsConversion.cpp (100%) rename {dbms => src}/Functions/FunctionsConversion.h (100%) rename {dbms => src}/Functions/FunctionsEmbeddedDictionaries.cpp (100%) rename {dbms => src}/Functions/FunctionsEmbeddedDictionaries.h (100%) rename {dbms => src}/Functions/FunctionsExternalDictionaries.cpp (100%) rename {dbms => src}/Functions/FunctionsExternalDictionaries.h (100%) rename {dbms => src}/Functions/FunctionsExternalModels.cpp (100%) rename {dbms => src}/Functions/FunctionsExternalModels.h (100%) rename {dbms => src}/Functions/FunctionsFormatting.cpp (100%) rename {dbms => src}/Functions/FunctionsFormatting.h (100%) rename {dbms => src}/Functions/FunctionsHashing.cpp (100%) rename {dbms => src}/Functions/FunctionsHashing.h (100%) rename {dbms => src}/Functions/FunctionsJSON.cpp (100%) rename {dbms => src}/Functions/FunctionsJSON.h (100%) rename {dbms => src}/Functions/FunctionsLogical.cpp (100%) rename {dbms => src}/Functions/FunctionsLogical.h (100%) rename {dbms => src}/Functions/FunctionsMiscellaneous.h (100%) rename {dbms => src}/Functions/FunctionsMultiStringPosition.h (100%) rename {dbms => src}/Functions/FunctionsMultiStringSearch.h (100%) rename {dbms => src}/Functions/FunctionsRandom.cpp (100%) rename {dbms => src}/Functions/FunctionsRandom.h (100%) rename {dbms => src}/Functions/FunctionsRound.cpp (100%) rename {dbms => src}/Functions/FunctionsRound.h (100%) rename {dbms => src}/Functions/FunctionsStringArray.cpp (100%) rename {dbms => src}/Functions/FunctionsStringArray.h (100%) rename {dbms => src}/Functions/FunctionsStringRegex.cpp (100%) rename {dbms => src}/Functions/FunctionsStringRegex.h (100%) rename {dbms => src}/Functions/FunctionsStringSearch.h (100%) rename {dbms => src}/Functions/FunctionsStringSearchToString.h (100%) rename {dbms => src}/Functions/FunctionsStringSimilarity.cpp (100%) rename {dbms => src}/Functions/FunctionsStringSimilarity.h (100%) rename {dbms => src}/Functions/FunctionsVisitParam.h (100%) rename {dbms => src}/Functions/GatherUtils/Algorithms.h (100%) rename {dbms => src}/Functions/GatherUtils/ArraySinkVisitor.h (100%) rename {dbms => src}/Functions/GatherUtils/ArraySourceVisitor.h (100%) rename {dbms => src}/Functions/GatherUtils/CMakeLists.txt (100%) rename {dbms => src}/Functions/GatherUtils/GatherUtils.h (100%) rename {dbms => src}/Functions/GatherUtils/IArraySink.h (100%) rename {dbms => src}/Functions/GatherUtils/IArraySource.h (100%) rename {dbms => src}/Functions/GatherUtils/IValueSource.h (100%) rename {dbms => src}/Functions/GatherUtils/Selectors.h (100%) rename {dbms => src}/Functions/GatherUtils/Sinks.h (100%) rename {dbms => src}/Functions/GatherUtils/Slices.h (100%) rename {dbms => src}/Functions/GatherUtils/Sources.h (100%) rename {dbms => src}/Functions/GatherUtils/ValueSourceVisitor.h (100%) rename {dbms => src}/Functions/GatherUtils/concat.cpp (100%) rename {dbms => src}/Functions/GatherUtils/createArraySink.cpp (100%) rename {dbms => src}/Functions/GatherUtils/createArraySource.cpp (100%) rename {dbms => src}/Functions/GatherUtils/createValueSource.cpp (100%) rename {dbms => src}/Functions/GatherUtils/has.cpp (100%) rename {dbms => src}/Functions/GatherUtils/push.cpp (100%) rename {dbms => src}/Functions/GatherUtils/resizeConstantSize.cpp (100%) rename {dbms => src}/Functions/GatherUtils/resizeDynamicSize.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp (100%) rename {dbms => src}/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp (100%) rename {dbms => src}/Functions/GeoHash.cpp (100%) rename {dbms => src}/Functions/GeoHash.h (100%) rename {dbms => src}/Functions/HasTokenImpl.h (100%) rename {dbms => src}/Functions/IFunction.cpp (100%) rename {dbms => src}/Functions/IFunction.h (100%) rename {dbms => src}/Functions/IFunctionAdaptors.h (100%) rename {dbms => src}/Functions/IFunctionImpl.h (100%) rename {dbms => src}/Functions/LowerUpperImpl.h (100%) rename {dbms => src}/Functions/LowerUpperUTF8Impl.h (100%) rename {dbms => src}/Functions/MultiSearchAllPositionsImpl.h (100%) rename {dbms => src}/Functions/MultiSearchFirstIndexImpl.h (100%) rename {dbms => src}/Functions/MultiSearchFirstPositionImpl.h (100%) rename {dbms => src}/Functions/MultiSearchImpl.h (100%) rename {dbms => src}/Functions/PolygonUtils.h (100%) rename {dbms => src}/Functions/PositionImpl.h (100%) rename {dbms => src}/Functions/RapidJSONParser.h (100%) rename {dbms => src}/Functions/Regexps.h (100%) rename {dbms => src}/Functions/SimdJSONParser.h (100%) rename {dbms => src}/Functions/URL/CMakeLists.txt (100%) rename {dbms => src}/Functions/URL/FunctionsURL.h (100%) rename {dbms => src}/Functions/URL/URLHierarchy.cpp (100%) rename {dbms => src}/Functions/URL/URLPathHierarchy.cpp (100%) rename {dbms => src}/Functions/URL/basename.cpp (100%) rename {dbms => src}/Functions/URL/config_functions_url.h.in (100%) rename {dbms => src}/Functions/URL/cutFragment.cpp (100%) rename {dbms => src}/Functions/URL/cutQueryString.cpp (100%) rename {dbms => src}/Functions/URL/cutQueryStringAndFragment.cpp (100%) rename {dbms => src}/Functions/URL/cutToFirstSignificantSubdomain.cpp (100%) rename {dbms => src}/Functions/URL/cutURLParameter.cpp (100%) rename {dbms => src}/Functions/URL/cutWWW.cpp (100%) rename {dbms => src}/Functions/URL/decodeURLComponent.cpp (100%) rename {dbms => src}/Functions/URL/domain.cpp (100%) rename {dbms => src}/Functions/URL/domain.h (100%) rename {dbms => src}/Functions/URL/domainWithoutWWW.cpp (100%) rename {dbms => src}/Functions/URL/extractURLParameter.cpp (100%) rename {dbms => src}/Functions/URL/extractURLParameterNames.cpp (100%) rename {dbms => src}/Functions/URL/extractURLParameters.cpp (100%) rename {dbms => src}/Functions/URL/firstSignificantSubdomain.cpp (100%) rename {dbms => src}/Functions/URL/firstSignificantSubdomain.h (100%) rename {dbms => src}/Functions/URL/fragment.cpp (100%) rename {dbms => src}/Functions/URL/fragment.h (100%) rename {dbms => src}/Functions/URL/path.cpp (100%) rename {dbms => src}/Functions/URL/pathFull.cpp (100%) rename {dbms => src}/Functions/URL/protocol.cpp (100%) rename {dbms => src}/Functions/URL/protocol.h (100%) rename {dbms => src}/Functions/URL/queryString.cpp (100%) rename {dbms => src}/Functions/URL/queryString.h (100%) rename {dbms => src}/Functions/URL/queryStringAndFragment.cpp (100%) rename {dbms => src}/Functions/URL/queryStringAndFragment.h (100%) rename {dbms => src}/Functions/URL/registerFunctionsURL.cpp (100%) rename {dbms => src}/Functions/URL/tldLookup.generated.cpp (100%) rename {dbms => src}/Functions/URL/tldLookup.gperf (100%) rename {dbms => src}/Functions/URL/tldLookup.h (100%) rename {dbms => src}/Functions/URL/tldLookup.sh (100%) rename {dbms => src}/Functions/URL/topLevelDomain.cpp (100%) rename {dbms => src}/Functions/abs.cpp (100%) rename {dbms => src}/Functions/acos.cpp (100%) rename {dbms => src}/Functions/addDays.cpp (100%) rename {dbms => src}/Functions/addHours.cpp (100%) rename {dbms => src}/Functions/addMinutes.cpp (100%) rename {dbms => src}/Functions/addMonths.cpp (100%) rename {dbms => src}/Functions/addQuarters.cpp (100%) rename {dbms => src}/Functions/addSeconds.cpp (100%) rename {dbms => src}/Functions/addWeeks.cpp (100%) rename {dbms => src}/Functions/addYears.cpp (100%) rename {dbms => src}/Functions/addressToLine.cpp (100%) rename {dbms => src}/Functions/addressToSymbol.cpp (100%) rename {dbms => src}/Functions/appendTrailingCharIfAbsent.cpp (100%) rename {dbms => src}/Functions/array/CMakeLists.txt (100%) rename {dbms => src}/Functions/array/FunctionArrayMapped.h (100%) rename {dbms => src}/Functions/array/array.cpp (100%) rename {dbms => src}/Functions/array/arrayAUC.cpp (100%) rename {dbms => src}/Functions/array/arrayAll.cpp (100%) rename {dbms => src}/Functions/array/arrayCompact.cpp (100%) rename {dbms => src}/Functions/array/arrayConcat.cpp (100%) rename {dbms => src}/Functions/array/arrayCount.cpp (100%) rename {dbms => src}/Functions/array/arrayCumSum.cpp (100%) rename {dbms => src}/Functions/array/arrayCumSumNonNegative.cpp (100%) rename {dbms => src}/Functions/array/arrayDifference.cpp (100%) rename {dbms => src}/Functions/array/arrayDistinct.cpp (100%) rename {dbms => src}/Functions/array/arrayElement.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerate.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerateDense.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerateDenseRanked.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerateExtended.h (100%) rename {dbms => src}/Functions/array/arrayEnumerateRanked.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerateRanked.h (100%) rename {dbms => src}/Functions/array/arrayEnumerateUniq.cpp (100%) rename {dbms => src}/Functions/array/arrayEnumerateUniqRanked.cpp (100%) rename {dbms => src}/Functions/array/arrayExists.cpp (100%) rename {dbms => src}/Functions/array/arrayFill.cpp (100%) rename {dbms => src}/Functions/array/arrayFilter.cpp (100%) rename {dbms => src}/Functions/array/arrayFirst.cpp (100%) rename {dbms => src}/Functions/array/arrayFirstIndex.cpp (100%) rename {dbms => src}/Functions/array/arrayFlatten.cpp (100%) rename {dbms => src}/Functions/array/arrayIndex.h (100%) rename {dbms => src}/Functions/array/arrayIntersect.cpp (100%) rename {dbms => src}/Functions/array/arrayJoin.cpp (100%) rename {dbms => src}/Functions/array/arrayMap.cpp (100%) rename {dbms => src}/Functions/array/arrayPop.h (100%) rename {dbms => src}/Functions/array/arrayPopBack.cpp (100%) rename {dbms => src}/Functions/array/arrayPopFront.cpp (100%) rename {dbms => src}/Functions/array/arrayPush.h (100%) rename {dbms => src}/Functions/array/arrayPushBack.cpp (100%) rename {dbms => src}/Functions/array/arrayPushFront.cpp (100%) rename {dbms => src}/Functions/array/arrayReduce.cpp (100%) rename {dbms => src}/Functions/array/arrayReduceInRanges.cpp (100%) rename {dbms => src}/Functions/array/arrayResize.cpp (100%) rename {dbms => src}/Functions/array/arrayReverse.cpp (100%) rename {dbms => src}/Functions/array/arrayScalarProduct.h (100%) rename {dbms => src}/Functions/array/arraySlice.cpp (100%) rename {dbms => src}/Functions/array/arraySort.cpp (100%) rename {dbms => src}/Functions/array/arraySplit.cpp (100%) rename {dbms => src}/Functions/array/arraySum.cpp (100%) rename {dbms => src}/Functions/array/arrayUniq.cpp (100%) rename {dbms => src}/Functions/array/arrayWithConstant.cpp (100%) rename {dbms => src}/Functions/array/arrayZip.cpp (100%) rename {dbms => src}/Functions/array/countEqual.cpp (100%) rename {dbms => src}/Functions/array/emptyArray.cpp (100%) rename {dbms => src}/Functions/array/emptyArrayToSingle.cpp (100%) rename {dbms => src}/Functions/array/has.cpp (100%) rename {dbms => src}/Functions/array/hasAll.cpp (100%) rename {dbms => src}/Functions/array/hasAllAny.h (100%) rename {dbms => src}/Functions/array/hasAny.cpp (100%) rename {dbms => src}/Functions/array/indexOf.cpp (100%) rename {dbms => src}/Functions/array/length.cpp (100%) rename {dbms => src}/Functions/array/range.cpp (100%) rename {dbms => src}/Functions/array/registerFunctionsArray.cpp (100%) rename {dbms => src}/Functions/asin.cpp (100%) rename {dbms => src}/Functions/assumeNotNull.cpp (100%) rename {dbms => src}/Functions/atan.cpp (100%) rename {dbms => src}/Functions/bar.cpp (100%) rename {dbms => src}/Functions/base64Decode.cpp (100%) rename {dbms => src}/Functions/base64Encode.cpp (100%) rename {dbms => src}/Functions/bitAnd.cpp (100%) rename {dbms => src}/Functions/bitBoolMaskAnd.cpp (96%) rename {dbms => src}/Functions/bitBoolMaskOr.cpp (96%) rename {dbms => src}/Functions/bitCount.cpp (100%) rename {dbms => src}/Functions/bitNot.cpp (100%) rename {dbms => src}/Functions/bitOr.cpp (100%) rename {dbms => src}/Functions/bitRotateLeft.cpp (100%) rename {dbms => src}/Functions/bitRotateRight.cpp (100%) rename {dbms => src}/Functions/bitShiftLeft.cpp (100%) rename {dbms => src}/Functions/bitShiftRight.cpp (100%) rename {dbms => src}/Functions/bitSwapLastTwo.cpp (97%) rename {dbms => src}/Functions/bitTest.cpp (100%) rename {dbms => src}/Functions/bitTestAll.cpp (100%) rename {dbms => src}/Functions/bitTestAny.cpp (100%) rename {dbms => src}/Functions/bitWrapperFunc.cpp (96%) rename {dbms => src}/Functions/bitXor.cpp (100%) rename {dbms => src}/Functions/blockNumber.cpp (100%) rename {dbms => src}/Functions/blockSerializedSize.cpp (100%) rename {dbms => src}/Functions/blockSize.cpp (100%) rename {dbms => src}/Functions/caseWithExpression.cpp (100%) rename {dbms => src}/Functions/castTypeToEither.h (100%) rename {dbms => src}/Functions/cbrt.cpp (100%) rename {dbms => src}/Functions/coalesce.cpp (100%) rename {dbms => src}/Functions/concat.cpp (100%) rename {dbms => src}/Functions/config_functions.h.in (100%) rename {dbms => src}/Functions/convertCharset.cpp (100%) rename {dbms => src}/Functions/cos.cpp (100%) rename {dbms => src}/Functions/currentDatabase.cpp (100%) rename {dbms => src}/Functions/currentQuota.cpp (100%) rename {dbms => src}/Functions/currentRowPolicies.cpp (100%) rename {dbms => src}/Functions/currentUser.cpp (100%) rename {dbms => src}/Functions/dateDiff.cpp (100%) rename {dbms => src}/Functions/defaultValueOfArgumentType.cpp (100%) rename {dbms => src}/Functions/demange.cpp (100%) rename {dbms => src}/Functions/divide.cpp (100%) rename {dbms => src}/Functions/dumpColumnStructure.cpp (100%) rename {dbms => src}/Functions/e.cpp (100%) rename {dbms => src}/Functions/empty.cpp (100%) rename {dbms => src}/Functions/endsWith.cpp (100%) rename {dbms => src}/Functions/equals.cpp (100%) rename {dbms => src}/Functions/erf.cpp (100%) rename {dbms => src}/Functions/erfc.cpp (100%) rename {dbms => src}/Functions/evalMLMethod.cpp (100%) rename {dbms => src}/Functions/exp.cpp (100%) rename {dbms => src}/Functions/exp10.cpp (100%) rename {dbms => src}/Functions/exp2.cpp (100%) rename {dbms => src}/Functions/extractTimeZoneFromFunctionArguments.cpp (100%) rename {dbms => src}/Functions/extractTimeZoneFromFunctionArguments.h (100%) rename {dbms => src}/Functions/filesystem.cpp (100%) rename {dbms => src}/Functions/finalizeAggregation.cpp (100%) rename {dbms => src}/Functions/formatDateTime.cpp (100%) rename {dbms => src}/Functions/formatString.cpp (100%) rename {dbms => src}/Functions/formatString.h (100%) rename {dbms => src}/Functions/gcd.cpp (100%) rename {dbms => src}/Functions/generateUUIDv4.cpp (100%) rename {dbms => src}/Functions/geoToH3.cpp (100%) rename {dbms => src}/Functions/geohashDecode.cpp (100%) rename {dbms => src}/Functions/geohashEncode.cpp (100%) rename {dbms => src}/Functions/geohashesInBox.cpp (100%) rename {dbms => src}/Functions/getMacro.cpp (100%) rename {dbms => src}/Functions/getScalar.cpp (100%) rename {dbms => src}/Functions/getSizeOfEnumType.cpp (100%) rename {dbms => src}/Functions/greatCircleDistance.cpp (100%) rename {dbms => src}/Functions/greater.cpp (100%) rename {dbms => src}/Functions/greaterOrEquals.cpp (100%) rename {dbms => src}/Functions/greatest.cpp (100%) rename {dbms => src}/Functions/h3EdgeAngle.cpp (100%) rename {dbms => src}/Functions/h3EdgeLengthM.cpp (100%) rename {dbms => src}/Functions/h3GetBaseCell.cpp (100%) rename {dbms => src}/Functions/h3GetResolution.cpp (100%) rename {dbms => src}/Functions/h3HexAreaM2.cpp (100%) rename {dbms => src}/Functions/h3IndexesAreNeighbors.cpp (100%) rename {dbms => src}/Functions/h3IsValid.cpp (100%) rename {dbms => src}/Functions/h3ToChildren.cpp (100%) rename {dbms => src}/Functions/h3ToParent.cpp (100%) rename {dbms => src}/Functions/h3ToString.cpp (100%) rename {dbms => src}/Functions/h3kRing.cpp (100%) rename {dbms => src}/Functions/hasColumnInTable.cpp (100%) rename {dbms => src}/Functions/hasToken.cpp (100%) rename {dbms => src}/Functions/hasTokenCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/hostName.cpp (100%) rename {dbms => src}/Functions/identity.cpp (100%) rename {dbms => src}/Functions/if.cpp (100%) rename {dbms => src}/Functions/ifNotFinite.cpp (100%) rename {dbms => src}/Functions/ifNull.cpp (100%) rename {dbms => src}/Functions/ignore.cpp (100%) rename {dbms => src}/Functions/ignoreExceptNull.cpp (100%) rename {dbms => src}/Functions/in.cpp (100%) rename {dbms => src}/Functions/intDiv.cpp (100%) rename {dbms => src}/Functions/intDivOrZero.cpp (100%) rename {dbms => src}/Functions/intExp10.cpp (100%) rename {dbms => src}/Functions/intExp2.cpp (100%) rename {dbms => src}/Functions/isFinite.cpp (100%) rename {dbms => src}/Functions/isInfinite.cpp (100%) rename {dbms => src}/Functions/isNaN.cpp (100%) rename {dbms => src}/Functions/isNotNull.cpp (100%) rename {dbms => src}/Functions/isNull.cpp (100%) rename {dbms => src}/Functions/isValidUTF8.cpp (100%) rename {dbms => src}/Functions/jumpConsistentHash.cpp (100%) rename {dbms => src}/Functions/lcm.cpp (100%) rename {dbms => src}/Functions/least.cpp (100%) rename {dbms => src}/Functions/lengthUTF8.cpp (100%) rename {dbms => src}/Functions/less.cpp (100%) rename {dbms => src}/Functions/lessOrEquals.cpp (100%) rename {dbms => src}/Functions/lgamma.cpp (100%) rename {dbms => src}/Functions/likePatternToRegexp.h (100%) rename {dbms => src}/Functions/log.cpp (100%) rename {dbms => src}/Functions/log10.cpp (100%) rename {dbms => src}/Functions/log2.cpp (100%) rename {dbms => src}/Functions/lowCardinalityIndices.cpp (100%) rename {dbms => src}/Functions/lowCardinalityKeys.cpp (100%) rename {dbms => src}/Functions/lower.cpp (100%) rename {dbms => src}/Functions/lowerUTF8.cpp (100%) rename {dbms => src}/Functions/materialize.cpp (100%) rename {dbms => src}/Functions/minus.cpp (100%) rename {dbms => src}/Functions/modulo.cpp (100%) rename {dbms => src}/Functions/moduloOrZero.cpp (100%) rename {dbms => src}/Functions/multiIf.cpp (100%) rename {dbms => src}/Functions/multiSearchAllPositions.cpp (100%) rename {dbms => src}/Functions/multiSearchAllPositionsCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchAllPositionsUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchAny.cpp (100%) rename {dbms => src}/Functions/multiSearchAnyCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchAnyUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstIndex.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstIndexCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstIndexUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstPosition.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstPositionCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp (100%) rename {dbms => src}/Functions/multiSearchFirstPositionUTF8.cpp (100%) rename {dbms => src}/Functions/multiply.cpp (100%) rename {dbms => src}/Functions/negate.cpp (100%) rename {dbms => src}/Functions/neighbor.cpp (100%) rename {dbms => src}/Functions/notEmpty.cpp (100%) rename {dbms => src}/Functions/notEquals.cpp (100%) rename {dbms => src}/Functions/now.cpp (100%) rename {dbms => src}/Functions/now64.cpp (100%) rename {dbms => src}/Functions/nullIf.cpp (100%) rename {dbms => src}/Functions/pi.cpp (100%) rename {dbms => src}/Functions/plus.cpp (100%) rename {dbms => src}/Functions/pointInEllipses.cpp (100%) rename {dbms => src}/Functions/pointInPolygon.cpp (100%) rename {dbms => src}/Functions/position.cpp (100%) rename {dbms => src}/Functions/positionCaseInsensitive.cpp (100%) rename {dbms => src}/Functions/positionCaseInsensitiveUTF8.cpp (100%) rename {dbms => src}/Functions/positionUTF8.cpp (100%) rename {dbms => src}/Functions/pow.cpp (100%) rename {dbms => src}/Functions/rand.cpp (100%) rename {dbms => src}/Functions/rand64.cpp (100%) rename {dbms => src}/Functions/randConstant.cpp (100%) rename {dbms => src}/Functions/randomPrintableASCII.cpp (100%) rename {dbms => src}/Functions/regexpQuoteMeta.cpp (100%) rename {dbms => src}/Functions/registerFunctions.cpp (100%) rename {dbms => src}/Functions/registerFunctions.h (100%) rename {dbms => src}/Functions/registerFunctionsArithmetic.cpp (100%) rename {dbms => src}/Functions/registerFunctionsComparison.cpp (100%) rename {dbms => src}/Functions/registerFunctionsConditional.cpp (100%) rename {dbms => src}/Functions/registerFunctionsConsistentHashing.cpp (100%) rename {dbms => src}/Functions/registerFunctionsDateTime.cpp (100%) rename {dbms => src}/Functions/registerFunctionsGeo.cpp (100%) rename {dbms => src}/Functions/registerFunctionsHigherOrder.cpp (100%) rename {dbms => src}/Functions/registerFunctionsIntrospection.cpp (100%) rename {dbms => src}/Functions/registerFunctionsMath.cpp (100%) rename {dbms => src}/Functions/registerFunctionsMiscellaneous.cpp (100%) rename {dbms => src}/Functions/registerFunctionsNull.cpp (100%) rename {dbms => src}/Functions/registerFunctionsRandom.cpp (100%) rename {dbms => src}/Functions/registerFunctionsReinterpret.cpp (100%) rename {dbms => src}/Functions/registerFunctionsString.cpp (100%) rename {dbms => src}/Functions/registerFunctionsStringSearch.cpp (100%) rename {dbms => src}/Functions/registerFunctionsTuple.cpp (100%) rename {dbms => src}/Functions/registerFunctionsVisitParam.cpp (100%) rename {dbms => src}/Functions/reinterpretAsFixedString.cpp (100%) rename {dbms => src}/Functions/reinterpretAsString.cpp (100%) rename {dbms => src}/Functions/reinterpretStringAs.cpp (100%) rename {dbms => src}/Functions/repeat.cpp (100%) rename {dbms => src}/Functions/replicate.cpp (100%) rename {dbms => src}/Functions/reverse.cpp (100%) rename {dbms => src}/Functions/reverseUTF8.cpp (100%) rename {dbms => src}/Functions/roundAge.cpp (100%) rename {dbms => src}/Functions/roundDuration.cpp (100%) rename {dbms => src}/Functions/roundToExp2.cpp (100%) rename {dbms => src}/Functions/rowNumberInAllBlocks.cpp (100%) rename {dbms => src}/Functions/rowNumberInBlock.cpp (100%) rename {dbms => src}/Functions/runningAccumulate.cpp (100%) rename {dbms => src}/Functions/runningDifference.cpp (100%) rename {dbms => src}/Functions/runningDifference.h (100%) rename {dbms => src}/Functions/runningDifferenceStartingWithFirstValue.cpp (100%) rename {dbms => src}/Functions/sigmoid.cpp (100%) rename {dbms => src}/Functions/sin.cpp (100%) rename {dbms => src}/Functions/sleep.cpp (100%) rename {dbms => src}/Functions/sleep.h (100%) rename {dbms => src}/Functions/sleepEachRow.cpp (100%) rename {dbms => src}/Functions/sqrt.cpp (100%) rename {dbms => src}/Functions/startsWith.cpp (100%) rename {dbms => src}/Functions/stringToH3.cpp (100%) rename {dbms => src}/Functions/substring.cpp (100%) rename {dbms => src}/Functions/subtractDays.cpp (100%) rename {dbms => src}/Functions/subtractHours.cpp (100%) rename {dbms => src}/Functions/subtractMinutes.cpp (100%) rename {dbms => src}/Functions/subtractMonths.cpp (100%) rename {dbms => src}/Functions/subtractQuarters.cpp (100%) rename {dbms => src}/Functions/subtractSeconds.cpp (100%) rename {dbms => src}/Functions/subtractWeeks.cpp (100%) rename {dbms => src}/Functions/subtractYears.cpp (100%) rename {dbms => src}/Functions/sumburConsistentHash.cpp (100%) rename {dbms => src}/Functions/tan.cpp (100%) rename {dbms => src}/Functions/tanh.cpp (100%) rename {dbms => src}/Functions/tests/CMakeLists.txt (100%) rename {dbms => src}/Functions/tests/number_traits.cpp (100%) rename {dbms => src}/Functions/tgamma.cpp (100%) rename {dbms => src}/Functions/throwIf.cpp (100%) rename {dbms => src}/Functions/timeSlot.cpp (100%) rename {dbms => src}/Functions/timeSlots.cpp (100%) rename {dbms => src}/Functions/timezone.cpp (100%) rename {dbms => src}/Functions/toColumnTypeName.cpp (100%) rename {dbms => src}/Functions/toCustomWeek.cpp (100%) rename {dbms => src}/Functions/toDayOfMonth.cpp (100%) rename {dbms => src}/Functions/toDayOfWeek.cpp (100%) rename {dbms => src}/Functions/toDayOfYear.cpp (100%) rename {dbms => src}/Functions/toHour.cpp (100%) rename {dbms => src}/Functions/toISOWeek.cpp (100%) rename {dbms => src}/Functions/toISOYear.cpp (100%) rename {dbms => src}/Functions/toLowCardinality.cpp (100%) rename {dbms => src}/Functions/toMinute.cpp (100%) rename {dbms => src}/Functions/toMonday.cpp (100%) rename {dbms => src}/Functions/toMonth.cpp (100%) rename {dbms => src}/Functions/toNullable.cpp (100%) rename {dbms => src}/Functions/toQuarter.cpp (100%) rename {dbms => src}/Functions/toRelativeDayNum.cpp (100%) rename {dbms => src}/Functions/toRelativeHourNum.cpp (100%) rename {dbms => src}/Functions/toRelativeMinuteNum.cpp (100%) rename {dbms => src}/Functions/toRelativeMonthNum.cpp (100%) rename {dbms => src}/Functions/toRelativeQuarterNum.cpp (100%) rename {dbms => src}/Functions/toRelativeSecondNum.cpp (100%) rename {dbms => src}/Functions/toRelativeWeekNum.cpp (100%) rename {dbms => src}/Functions/toRelativeYearNum.cpp (100%) rename {dbms => src}/Functions/toSecond.cpp (100%) rename {dbms => src}/Functions/toStartOfDay.cpp (100%) rename {dbms => src}/Functions/toStartOfFifteenMinutes.cpp (100%) rename {dbms => src}/Functions/toStartOfFiveMinute.cpp (100%) rename {dbms => src}/Functions/toStartOfHour.cpp (100%) rename {dbms => src}/Functions/toStartOfISOYear.cpp (100%) rename {dbms => src}/Functions/toStartOfInterval.cpp (100%) rename {dbms => src}/Functions/toStartOfMinute.cpp (100%) rename {dbms => src}/Functions/toStartOfMonth.cpp (100%) rename {dbms => src}/Functions/toStartOfQuarter.cpp (100%) rename {dbms => src}/Functions/toStartOfTenMinutes.cpp (100%) rename {dbms => src}/Functions/toStartOfYear.cpp (100%) rename {dbms => src}/Functions/toTime.cpp (100%) rename {dbms => src}/Functions/toTimeZone.cpp (100%) rename {dbms => src}/Functions/toTypeName.cpp (100%) rename {dbms => src}/Functions/toValidUTF8.cpp (100%) rename {dbms => src}/Functions/toYYYYMM.cpp (100%) rename {dbms => src}/Functions/toYYYYMMDD.cpp (100%) rename {dbms => src}/Functions/toYYYYMMDDhhmmss.cpp (100%) rename {dbms => src}/Functions/toYear.cpp (100%) rename {dbms => src}/Functions/today.cpp (100%) rename {dbms => src}/Functions/transform.cpp (100%) rename {dbms => src}/Functions/trap.cpp (100%) rename {dbms => src}/Functions/trim.cpp (100%) rename {dbms => src}/Functions/tryBase64Decode.cpp (100%) rename {dbms => src}/Functions/tuple.cpp (100%) rename {dbms => src}/Functions/tupleElement.cpp (100%) rename {dbms => src}/Functions/upper.cpp (100%) rename {dbms => src}/Functions/upperUTF8.cpp (100%) rename {dbms => src}/Functions/uptime.cpp (100%) rename {dbms => src}/Functions/version.cpp (100%) rename {dbms => src}/Functions/visibleWidth.cpp (100%) rename {dbms => src}/Functions/visitParamExtractBool.cpp (100%) rename {dbms => src}/Functions/visitParamExtractFloat.cpp (100%) rename {dbms => src}/Functions/visitParamExtractInt.cpp (100%) rename {dbms => src}/Functions/visitParamExtractRaw.cpp (100%) rename {dbms => src}/Functions/visitParamExtractString.cpp (100%) rename {dbms => src}/Functions/visitParamExtractUInt.cpp (100%) rename {dbms => src}/Functions/visitParamHas.cpp (100%) rename {dbms => src}/Functions/yandexConsistentHash.cpp (100%) rename {dbms => src}/Functions/yesterday.cpp (100%) rename {dbms => src}/IO/AIO.cpp (100%) rename {dbms => src}/IO/AIO.h (100%) rename {dbms => src}/IO/AIOContextPool.cpp (100%) rename {dbms => src}/IO/AIOContextPool.h (100%) rename {dbms => src}/IO/AsynchronousWriteBuffer.h (100%) rename {dbms => src}/IO/BitHelpers.h (100%) rename {dbms => src}/IO/BrotliReadBuffer.cpp (100%) rename {dbms => src}/IO/BrotliReadBuffer.h (100%) rename {dbms => src}/IO/BrotliWriteBuffer.cpp (100%) rename {dbms => src}/IO/BrotliWriteBuffer.h (100%) rename {dbms => src}/IO/BufferBase.h (100%) rename {dbms => src}/IO/BufferWithOwnMemory.h (100%) rename {dbms => src}/IO/CMakeLists.txt (100%) rename {dbms => src}/IO/CascadeWriteBuffer.cpp (100%) rename {dbms => src}/IO/CascadeWriteBuffer.h (100%) rename {dbms => src}/IO/CompressionMethod.cpp (100%) rename {dbms => src}/IO/CompressionMethod.h (100%) rename {dbms => src}/IO/ConcatReadBuffer.h (100%) rename {dbms => src}/IO/ConnectionTimeouts.h (100%) rename {dbms => src}/IO/DoubleConverter.cpp (100%) rename {dbms => src}/IO/DoubleConverter.h (100%) rename {dbms => src}/IO/HDFSCommon.cpp (100%) rename {dbms => src}/IO/HDFSCommon.h (100%) rename {dbms => src}/IO/HTTPCommon.cpp (100%) rename {dbms => src}/IO/HTTPCommon.h (100%) rename {dbms => src}/IO/HashingReadBuffer.h (100%) rename {dbms => src}/IO/HashingWriteBuffer.cpp (100%) rename {dbms => src}/IO/HashingWriteBuffer.h (100%) rename {dbms => src}/IO/HexWriteBuffer.cpp (100%) rename {dbms => src}/IO/HexWriteBuffer.h (100%) rename {dbms => src}/IO/IReadableWriteBuffer.h (100%) rename {dbms => src}/IO/LimitReadBuffer.cpp (100%) rename {dbms => src}/IO/LimitReadBuffer.h (100%) rename {dbms => src}/IO/MMapReadBufferFromFile.cpp (100%) rename {dbms => src}/IO/MMapReadBufferFromFile.h (100%) rename {dbms => src}/IO/MMapReadBufferFromFileDescriptor.cpp (100%) rename {dbms => src}/IO/MMapReadBufferFromFileDescriptor.h (100%) rename {dbms => src}/IO/MemoryReadWriteBuffer.cpp (100%) rename {dbms => src}/IO/MemoryReadWriteBuffer.h (100%) rename {dbms => src}/IO/NullWriteBuffer.cpp (100%) rename {dbms => src}/IO/NullWriteBuffer.h (100%) rename {dbms => src}/IO/Operators.h (100%) rename {dbms => src}/IO/PeekableReadBuffer.cpp (100%) rename {dbms => src}/IO/PeekableReadBuffer.h (100%) rename {dbms => src}/IO/Progress.cpp (100%) rename {dbms => src}/IO/Progress.h (100%) rename {dbms => src}/IO/ReadBuffer.h (100%) rename {dbms => src}/IO/ReadBufferAIO.cpp (100%) rename {dbms => src}/IO/ReadBufferAIO.h (100%) rename {dbms => src}/IO/ReadBufferFromFile.cpp (100%) rename {dbms => src}/IO/ReadBufferFromFile.h (100%) rename {dbms => src}/IO/ReadBufferFromFileBase.cpp (100%) rename {dbms => src}/IO/ReadBufferFromFileBase.h (100%) rename {dbms => src}/IO/ReadBufferFromFileDescriptor.cpp (100%) rename {dbms => src}/IO/ReadBufferFromFileDescriptor.h (100%) rename {dbms => src}/IO/ReadBufferFromHDFS.cpp (100%) rename {dbms => src}/IO/ReadBufferFromHDFS.h (100%) rename {dbms => src}/IO/ReadBufferFromIStream.cpp (100%) rename {dbms => src}/IO/ReadBufferFromIStream.h (100%) rename {dbms => src}/IO/ReadBufferFromMemory.cpp (100%) rename {dbms => src}/IO/ReadBufferFromMemory.h (100%) rename {dbms => src}/IO/ReadBufferFromPocoSocket.cpp (100%) rename {dbms => src}/IO/ReadBufferFromPocoSocket.h (100%) rename {dbms => src}/IO/ReadBufferFromS3.cpp (100%) rename {dbms => src}/IO/ReadBufferFromS3.h (100%) rename {dbms => src}/IO/ReadBufferFromString.h (100%) rename {dbms => src}/IO/ReadHelpers.cpp (100%) rename {dbms => src}/IO/ReadHelpers.h (100%) rename {dbms => src}/IO/ReadWriteBufferFromHTTP.cpp (100%) rename {dbms => src}/IO/ReadWriteBufferFromHTTP.h (100%) rename {dbms => src}/IO/S3Common.cpp (100%) rename {dbms => src}/IO/S3Common.h (100%) rename {dbms => src}/IO/SeekableReadBuffer.h (100%) rename {dbms => src}/IO/UncompressedCache.h (100%) rename {dbms => src}/IO/UseSSL.cpp (100%) rename {dbms => src}/IO/UseSSL.h (100%) rename {dbms => src}/IO/VarInt.h (100%) rename {dbms => src}/IO/WriteBuffer.h (100%) rename {dbms => src}/IO/WriteBufferAIO.cpp (100%) rename {dbms => src}/IO/WriteBufferAIO.h (100%) rename {dbms => src}/IO/WriteBufferFromArena.h (100%) rename {dbms => src}/IO/WriteBufferFromFile.cpp (100%) rename {dbms => src}/IO/WriteBufferFromFile.h (100%) rename {dbms => src}/IO/WriteBufferFromFileBase.cpp (100%) rename {dbms => src}/IO/WriteBufferFromFileBase.h (100%) rename {dbms => src}/IO/WriteBufferFromFileDescriptor.cpp (100%) rename {dbms => src}/IO/WriteBufferFromFileDescriptor.h (100%) rename {dbms => src}/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp (100%) rename {dbms => src}/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h (100%) rename {dbms => src}/IO/WriteBufferFromHDFS.cpp (100%) rename {dbms => src}/IO/WriteBufferFromHDFS.h (100%) rename {dbms => src}/IO/WriteBufferFromHTTP.cpp (100%) rename {dbms => src}/IO/WriteBufferFromHTTP.h (100%) rename {dbms => src}/IO/WriteBufferFromHTTPServerResponse.cpp (100%) rename {dbms => src}/IO/WriteBufferFromHTTPServerResponse.h (100%) rename {dbms => src}/IO/WriteBufferFromOStream.cpp (100%) rename {dbms => src}/IO/WriteBufferFromOStream.h (100%) rename {dbms => src}/IO/WriteBufferFromPocoSocket.cpp (100%) rename {dbms => src}/IO/WriteBufferFromPocoSocket.h (100%) rename {dbms => src}/IO/WriteBufferFromS3.cpp (100%) rename {dbms => src}/IO/WriteBufferFromS3.h (100%) rename {dbms => src}/IO/WriteBufferFromString.h (100%) rename {dbms => src}/IO/WriteBufferFromTemporaryFile.cpp (100%) rename {dbms => src}/IO/WriteBufferFromTemporaryFile.h (100%) rename {dbms => src}/IO/WriteBufferFromVector.h (100%) rename {dbms => src}/IO/WriteBufferValidUTF8.cpp (100%) rename {dbms => src}/IO/WriteBufferValidUTF8.h (100%) rename {dbms => src}/IO/WriteHelpers.cpp (100%) rename {dbms => src}/IO/WriteHelpers.h (100%) rename {dbms => src}/IO/WriteIntText.h (100%) rename {dbms => src}/IO/ZlibDeflatingWriteBuffer.cpp (100%) rename {dbms => src}/IO/ZlibDeflatingWriteBuffer.h (100%) rename {dbms => src}/IO/ZlibInflatingReadBuffer.cpp (100%) rename {dbms => src}/IO/ZlibInflatingReadBuffer.h (100%) rename {dbms => src}/IO/copyData.cpp (100%) rename {dbms => src}/IO/copyData.h (100%) rename {dbms => src}/IO/createReadBufferFromFileBase.cpp (100%) rename {dbms => src}/IO/createReadBufferFromFileBase.h (100%) rename {dbms => src}/IO/createWriteBufferFromFileBase.cpp (100%) rename {dbms => src}/IO/createWriteBufferFromFileBase.h (100%) rename {dbms => src}/IO/parseDateTimeBestEffort.cpp (100%) rename {dbms => src}/IO/parseDateTimeBestEffort.h (100%) rename {dbms => src}/IO/readDecimalText.h (100%) rename {dbms => src}/IO/readFloatText.cpp (100%) rename {dbms => src}/IO/readFloatText.h (100%) rename {dbms => src}/IO/tests/CMakeLists.txt (100%) rename {dbms => src}/IO/tests/async_write.cpp (100%) rename {dbms => src}/IO/tests/gtest_DateTime64_parsing_and_writing.cpp (100%) rename {dbms => src}/IO/tests/gtest_DateTimeToString.cpp (100%) rename {dbms => src}/IO/tests/gtest_aio_seek_back_after_eof.cpp (100%) rename {dbms => src}/IO/tests/gtest_bit_io.cpp (100%) rename {dbms => src}/IO/tests/gtest_cascade_and_memory_write_buffer.cpp (100%) rename {dbms => src}/IO/tests/gtest_peekable_read_buffer.cpp (100%) rename {dbms => src}/IO/tests/hashing_buffer.h (100%) rename {dbms => src}/IO/tests/hashing_read_buffer.cpp (100%) rename {dbms => src}/IO/tests/hashing_write_buffer.cpp (100%) rename {dbms => src}/IO/tests/io_operators.cpp (100%) rename {dbms => src}/IO/tests/limit_read_buffer.cpp (100%) rename {dbms => src}/IO/tests/limit_read_buffer.reference (100%) rename {dbms => src}/IO/tests/limit_read_buffer.sh (100%) rename {dbms => src}/IO/tests/limit_read_buffer2.cpp (100%) rename {dbms => src}/IO/tests/mempbrk.cpp (100%) rename {dbms => src}/IO/tests/o_direct_and_dirty_pages.cpp (100%) rename {dbms => src}/IO/tests/parse_date_time_best_effort.cpp (100%) rename {dbms => src}/IO/tests/parse_int_perf.cpp (100%) rename {dbms => src}/IO/tests/parse_int_perf2.cpp (100%) rename {dbms => src}/IO/tests/read_buffer.cpp (100%) rename {dbms => src}/IO/tests/read_buffer_aio.cpp (100%) rename {dbms => src}/IO/tests/read_buffer_perf.cpp (100%) rename {dbms => src}/IO/tests/read_escaped_string.cpp (100%) rename {dbms => src}/IO/tests/read_float_perf.cpp (100%) rename {dbms => src}/IO/tests/read_write_int.cpp (100%) rename {dbms => src}/IO/tests/ryu_test.cpp (100%) rename {dbms => src}/IO/tests/valid_utf8.cpp (100%) rename {dbms => src}/IO/tests/valid_utf8_perf.cpp (100%) rename {dbms => src}/IO/tests/var_uint.cpp (100%) rename {dbms => src}/IO/tests/write_buffer.cpp (100%) rename {dbms => src}/IO/tests/write_buffer_aio.cpp (100%) rename {dbms => src}/IO/tests/write_buffer_perf.cpp (100%) rename {dbms => src}/IO/tests/write_int.cpp (100%) rename {dbms => src}/IO/tests/zlib_buffers.cpp (100%) rename {dbms => src}/IO/tests/zlib_ng_bug.cpp (100%) rename {dbms => src}/Interpreters/ActionLocksManager.cpp (100%) rename {dbms => src}/Interpreters/ActionLocksManager.h (100%) rename {dbms => src}/Interpreters/ActionsVisitor.cpp (100%) rename {dbms => src}/Interpreters/ActionsVisitor.h (100%) rename {dbms => src}/Interpreters/AddDefaultDatabaseVisitor.h (100%) rename {dbms => src}/Interpreters/AggregateDescription.h (100%) rename {dbms => src}/Interpreters/AggregationCommon.h (100%) rename {dbms => src}/Interpreters/Aggregator.cpp (100%) rename {dbms => src}/Interpreters/Aggregator.h (100%) rename {dbms => src}/Interpreters/Aliases.h (100%) rename {dbms => src}/Interpreters/AnalyzedJoin.cpp (100%) rename {dbms => src}/Interpreters/AnalyzedJoin.h (100%) rename {dbms => src}/Interpreters/ArrayJoinAction.cpp (100%) rename {dbms => src}/Interpreters/ArrayJoinAction.h (100%) rename {dbms => src}/Interpreters/ArrayJoinedColumnsVisitor.h (100%) rename {dbms => src}/Interpreters/AsteriskSemantic.h (100%) rename {dbms => src}/Interpreters/AsynchronousMetrics.cpp (100%) rename {dbms => src}/Interpreters/AsynchronousMetrics.h (100%) rename {dbms => src}/Interpreters/BloomFilter.cpp (100%) rename {dbms => src}/Interpreters/BloomFilter.h (100%) rename {dbms => src}/Interpreters/BloomFilterHash.h (100%) rename {dbms => src}/Interpreters/CMakeLists.txt (100%) rename {dbms => src}/Interpreters/CancellationCode.h (100%) rename {dbms => src}/Interpreters/CatBoostModel.cpp (100%) rename {dbms => src}/Interpreters/CatBoostModel.h (100%) rename {dbms => src}/Interpreters/ClientInfo.cpp (100%) rename {dbms => src}/Interpreters/ClientInfo.h (100%) rename {dbms => src}/Interpreters/Cluster.cpp (100%) rename {dbms => src}/Interpreters/Cluster.h (100%) rename {dbms => src}/Interpreters/ClusterProxy/IStreamFactory.h (100%) rename {dbms => src}/Interpreters/ClusterProxy/SelectStreamFactory.cpp (100%) rename {dbms => src}/Interpreters/ClusterProxy/SelectStreamFactory.h (100%) rename {dbms => src}/Interpreters/ClusterProxy/executeQuery.cpp (100%) rename {dbms => src}/Interpreters/ClusterProxy/executeQuery.h (100%) rename {dbms => src}/Interpreters/CollectJoinOnKeysVisitor.cpp (100%) rename {dbms => src}/Interpreters/CollectJoinOnKeysVisitor.h (100%) rename {dbms => src}/Interpreters/ColumnNamesContext.cpp (100%) rename {dbms => src}/Interpreters/ColumnNamesContext.h (100%) rename {dbms => src}/Interpreters/Context.cpp (100%) rename {dbms => src}/Interpreters/Context.h (100%) rename {dbms => src}/Interpreters/CrossToInnerJoinVisitor.cpp (100%) rename {dbms => src}/Interpreters/CrossToInnerJoinVisitor.h (100%) rename {dbms => src}/Interpreters/DDLWorker.cpp (100%) rename {dbms => src}/Interpreters/DDLWorker.h (100%) rename {dbms => src}/Interpreters/DNSCacheUpdater.cpp (100%) rename {dbms => src}/Interpreters/DNSCacheUpdater.h (100%) rename {dbms => src}/Interpreters/DatabaseAndTableWithAlias.cpp (100%) rename {dbms => src}/Interpreters/DatabaseAndTableWithAlias.h (100%) rename {dbms => src}/Interpreters/DatabaseCatalog.cpp (100%) rename {dbms => src}/Interpreters/DatabaseCatalog.h (100%) rename {dbms => src}/Interpreters/EmbeddedDictionaries.cpp (100%) rename {dbms => src}/Interpreters/EmbeddedDictionaries.h (100%) rename {dbms => src}/Interpreters/ExecuteScalarSubqueriesVisitor.cpp (100%) rename {dbms => src}/Interpreters/ExecuteScalarSubqueriesVisitor.h (100%) rename {dbms => src}/Interpreters/ExpressionActions.cpp (100%) rename {dbms => src}/Interpreters/ExpressionActions.h (100%) rename {dbms => src}/Interpreters/ExpressionAnalyzer.cpp (100%) rename {dbms => src}/Interpreters/ExpressionAnalyzer.h (100%) rename {dbms => src}/Interpreters/ExpressionJIT.cpp (100%) rename {dbms => src}/Interpreters/ExpressionJIT.h (100%) rename {dbms => src}/Interpreters/ExternalDictionariesLoader.cpp (100%) rename {dbms => src}/Interpreters/ExternalDictionariesLoader.h (100%) rename {dbms => src}/Interpreters/ExternalLoader.cpp (100%) rename {dbms => src}/Interpreters/ExternalLoader.h (100%) rename {dbms => src}/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp (100%) rename {dbms => src}/Interpreters/ExternalLoaderDatabaseConfigRepository.h (100%) rename {dbms => src}/Interpreters/ExternalLoaderTempConfigRepository.cpp (100%) rename {dbms => src}/Interpreters/ExternalLoaderTempConfigRepository.h (100%) rename {dbms => src}/Interpreters/ExternalLoaderXMLConfigRepository.cpp (100%) rename {dbms => src}/Interpreters/ExternalLoaderXMLConfigRepository.h (100%) rename {dbms => src}/Interpreters/ExternalModelsLoader.cpp (100%) rename {dbms => src}/Interpreters/ExternalModelsLoader.h (100%) rename {dbms => src}/Interpreters/ExtractExpressionInfoVisitor.cpp (100%) rename {dbms => src}/Interpreters/ExtractExpressionInfoVisitor.h (100%) rename {dbms => src}/Interpreters/FillingRow.cpp (100%) rename {dbms => src}/Interpreters/FillingRow.h (100%) rename {dbms => src}/Interpreters/GetAggregatesVisitor.h (100%) rename {dbms => src}/Interpreters/GlobalSubqueriesVisitor.h (100%) rename {dbms => src}/Interpreters/IExternalLoadable.cpp (100%) rename {dbms => src}/Interpreters/IExternalLoadable.h (100%) rename {dbms => src}/Interpreters/IExternalLoaderConfigRepository.h (100%) rename {dbms => src}/Interpreters/IInterpreter.h (100%) rename {dbms => src}/Interpreters/IJoin.h (100%) rename {dbms => src}/Interpreters/IdentifierSemantic.cpp (100%) rename {dbms => src}/Interpreters/IdentifierSemantic.h (100%) rename {dbms => src}/Interpreters/InDepthNodeVisitor.h (100%) rename {dbms => src}/Interpreters/InJoinSubqueriesPreprocessor.cpp (100%) rename {dbms => src}/Interpreters/InJoinSubqueriesPreprocessor.h (100%) rename {dbms => src}/Interpreters/InternalTextLogsQueue.cpp (100%) rename {dbms => src}/Interpreters/InternalTextLogsQueue.h (100%) rename {dbms => src}/Interpreters/InterpreterAlterQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterAlterQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCheckQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCheckQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateQuotaQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateQuotaQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateRoleQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateRoleQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateRowPolicyQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateRowPolicyQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateSettingsProfileQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateSettingsProfileQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterCreateUserQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterCreateUserQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterDescribeQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterDescribeQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterDropAccessEntityQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterDropAccessEntityQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterDropQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterDropQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterExistsQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterExistsQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterExplainQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterExplainQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterFactory.cpp (100%) rename {dbms => src}/Interpreters/InterpreterFactory.h (100%) rename {dbms => src}/Interpreters/InterpreterGrantQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterGrantQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterInsertQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterInsertQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterKillQueryQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterKillQueryQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterOptimizeQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterOptimizeQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterRenameQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterRenameQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterSelectQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterSelectQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterSelectWithUnionQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterSelectWithUnionQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterSetQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterSetQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterSetRoleQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterSetRoleQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowCreateAccessEntityQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowCreateQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowCreateQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowGrantsQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowGrantsQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowProcesslistQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowProcesslistQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowQuotasQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowQuotasQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowRowPoliciesQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowRowPoliciesQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterShowTablesQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterShowTablesQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterSystemQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterSystemQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterUseQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterUseQuery.h (100%) rename {dbms => src}/Interpreters/InterpreterWatchQuery.cpp (100%) rename {dbms => src}/Interpreters/InterpreterWatchQuery.h (100%) rename {dbms => src}/Interpreters/InterserverIOHandler.h (100%) rename {dbms => src}/Interpreters/Join.cpp (100%) rename {dbms => src}/Interpreters/Join.h (100%) rename {dbms => src}/Interpreters/JoinSwitcher.cpp (100%) rename {dbms => src}/Interpreters/JoinSwitcher.h (100%) rename {dbms => src}/Interpreters/JoinToSubqueryTransformVisitor.cpp (100%) rename {dbms => src}/Interpreters/JoinToSubqueryTransformVisitor.h (100%) rename {dbms => src}/Interpreters/JoinedTables.cpp (100%) rename {dbms => src}/Interpreters/JoinedTables.h (100%) rename {dbms => src}/Interpreters/LogicalExpressionsOptimizer.cpp (100%) rename {dbms => src}/Interpreters/LogicalExpressionsOptimizer.h (100%) rename {dbms => src}/Interpreters/MarkTableIdentifiersVisitor.cpp (100%) rename {dbms => src}/Interpreters/MarkTableIdentifiersVisitor.h (100%) rename {dbms => src}/Interpreters/MergeJoin.cpp (100%) rename {dbms => src}/Interpreters/MergeJoin.h (100%) rename {dbms => src}/Interpreters/MetricLog.cpp (100%) rename {dbms => src}/Interpreters/MetricLog.h (100%) rename {dbms => src}/Interpreters/MutationsInterpreter.cpp (100%) rename {dbms => src}/Interpreters/MutationsInterpreter.h (100%) rename {dbms => src}/Interpreters/NullableUtils.cpp (100%) rename {dbms => src}/Interpreters/NullableUtils.h (100%) rename {dbms => src}/Interpreters/OptimizeIfChains.cpp (100%) rename {dbms => src}/Interpreters/OptimizeIfChains.h (100%) rename {dbms => src}/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp (100%) rename {dbms => src}/Interpreters/OptimizeIfWithConstantConditionVisitor.h (100%) rename {dbms => src}/Interpreters/PartLog.cpp (100%) rename {dbms => src}/Interpreters/PartLog.h (100%) rename {dbms => src}/Interpreters/PredicateExpressionsOptimizer.cpp (100%) rename {dbms => src}/Interpreters/PredicateExpressionsOptimizer.h (100%) rename {dbms => src}/Interpreters/PredicateRewriteVisitor.cpp (100%) rename {dbms => src}/Interpreters/PredicateRewriteVisitor.h (100%) rename {dbms => src}/Interpreters/PreparedSets.h (100%) rename {dbms => src}/Interpreters/ProcessList.cpp (100%) rename {dbms => src}/Interpreters/ProcessList.h (100%) rename {dbms => src}/Interpreters/ProfileEventsExt.cpp (100%) rename {dbms => src}/Interpreters/ProfileEventsExt.h (100%) rename {dbms => src}/Interpreters/QueryAliasesVisitor.cpp (100%) rename {dbms => src}/Interpreters/QueryAliasesVisitor.h (100%) rename {dbms => src}/Interpreters/QueryLog.cpp (100%) rename {dbms => src}/Interpreters/QueryLog.h (100%) rename {dbms => src}/Interpreters/QueryNormalizer.cpp (100%) rename {dbms => src}/Interpreters/QueryNormalizer.h (100%) rename {dbms => src}/Interpreters/QueryPriorities.h (100%) rename {dbms => src}/Interpreters/QueryThreadLog.cpp (100%) rename {dbms => src}/Interpreters/QueryThreadLog.h (100%) rename {dbms => src}/Interpreters/ReplaceQueryParameterVisitor.cpp (100%) rename {dbms => src}/Interpreters/ReplaceQueryParameterVisitor.h (100%) rename {dbms => src}/Interpreters/RequiredSourceColumnsVisitor.cpp (100%) rename {dbms => src}/Interpreters/RequiredSourceColumnsVisitor.h (100%) rename {dbms => src}/Interpreters/RowRefs.cpp (100%) rename {dbms => src}/Interpreters/RowRefs.h (100%) rename {dbms => src}/Interpreters/SelectQueryOptions.h (100%) rename {dbms => src}/Interpreters/Set.cpp (100%) rename {dbms => src}/Interpreters/Set.h (100%) rename {dbms => src}/Interpreters/SetVariants.cpp (100%) rename {dbms => src}/Interpreters/SetVariants.h (100%) rename {dbms => src}/Interpreters/StorageID.cpp (100%) rename {dbms => src}/Interpreters/StorageID.h (100%) rename {dbms => src}/Interpreters/SubqueryForSet.cpp (100%) rename {dbms => src}/Interpreters/SubqueryForSet.h (100%) rename {dbms => src}/Interpreters/SyntaxAnalyzer.cpp (100%) rename {dbms => src}/Interpreters/SyntaxAnalyzer.h (100%) rename {dbms => src}/Interpreters/SystemLog.cpp (100%) rename {dbms => src}/Interpreters/SystemLog.h (100%) rename {dbms => src}/Interpreters/TablesStatus.cpp (100%) rename {dbms => src}/Interpreters/TablesStatus.h (100%) rename {dbms => src}/Interpreters/TextLog.cpp (100%) rename {dbms => src}/Interpreters/TextLog.h (100%) rename {dbms => src}/Interpreters/ThreadStatusExt.cpp (100%) rename {dbms => src}/Interpreters/TraceLog.cpp (100%) rename {dbms => src}/Interpreters/TraceLog.h (100%) rename {dbms => src}/Interpreters/TranslateQualifiedNamesVisitor.cpp (100%) rename {dbms => src}/Interpreters/TranslateQualifiedNamesVisitor.h (100%) rename {dbms => src}/Interpreters/addMissingDefaults.cpp (100%) rename {dbms => src}/Interpreters/addMissingDefaults.h (100%) rename {dbms => src}/Interpreters/addTypeConversionToAST.cpp (100%) rename {dbms => src}/Interpreters/addTypeConversionToAST.h (100%) rename {dbms => src}/Interpreters/asof.h (100%) rename {dbms => src}/Interpreters/castColumn.cpp (100%) rename {dbms => src}/Interpreters/castColumn.h (100%) rename {dbms => src}/Interpreters/convertFieldToType.cpp (100%) rename {dbms => src}/Interpreters/convertFieldToType.h (100%) rename {dbms => src}/Interpreters/createBlockSelector.cpp (100%) rename {dbms => src}/Interpreters/createBlockSelector.h (100%) rename {dbms => src}/Interpreters/evaluateConstantExpression.cpp (100%) rename {dbms => src}/Interpreters/evaluateConstantExpression.h (100%) rename {dbms => src}/Interpreters/executeQuery.cpp (100%) rename {dbms => src}/Interpreters/executeQuery.h (100%) rename {dbms => src}/Interpreters/getClusterName.cpp (100%) rename {dbms => src}/Interpreters/getClusterName.h (100%) rename {dbms => src}/Interpreters/getTableExpressions.cpp (100%) rename {dbms => src}/Interpreters/getTableExpressions.h (100%) rename {dbms => src}/Interpreters/inplaceBlockConversions.cpp (100%) rename {dbms => src}/Interpreters/inplaceBlockConversions.h (100%) rename {dbms => src}/Interpreters/interpretSubquery.cpp (100%) rename {dbms => src}/Interpreters/interpretSubquery.h (100%) rename {dbms => src}/Interpreters/joinDispatch.h (100%) rename {dbms => src}/Interpreters/join_common.cpp (100%) rename {dbms => src}/Interpreters/join_common.h (100%) rename {dbms => src}/Interpreters/loadMetadata.cpp (100%) rename {dbms => src}/Interpreters/loadMetadata.h (100%) rename {dbms => src}/Interpreters/misc.h (100%) rename {dbms => src}/Interpreters/sortBlock.cpp (100%) rename {dbms => src}/Interpreters/sortBlock.h (100%) rename {dbms => src}/Interpreters/tests/CMakeLists.txt (100%) rename {dbms => src}/Interpreters/tests/aggregate.cpp (100%) rename {dbms => src}/Interpreters/tests/create_query.cpp (100%) rename {dbms => src}/Interpreters/tests/expression.cpp (100%) rename {dbms => src}/Interpreters/tests/expression_analyzer.cpp (100%) rename {dbms => src}/Interpreters/tests/gtest_cycle_aliases.cpp (100%) rename {dbms => src}/Interpreters/tests/gtest_merge_tree_set_index.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map3.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map_lookup.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map_string.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map_string_2.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map_string_3.cpp (100%) rename {dbms => src}/Interpreters/tests/hash_map_string_small.cpp (100%) rename {dbms => src}/Interpreters/tests/in_join_subqueries_preprocessor.cpp (100%) rename {dbms => src}/Interpreters/tests/internal_iotop.cpp (100%) rename {dbms => src}/Interpreters/tests/logical_expressions_optimizer.cpp (100%) rename {dbms => src}/Interpreters/tests/select_query.cpp (100%) rename {dbms => src}/Interpreters/tests/string_hash_map.cpp (100%) rename {dbms => src}/Interpreters/tests/two_level_hash_map.cpp (100%) rename {dbms => src}/Interpreters/tests/users.cpp (100%) rename {dbms => src}/NOTICE (100%) rename {dbms => src}/Parsers/ASTAlterQuery.cpp (100%) rename {dbms => src}/Parsers/ASTAlterQuery.h (100%) rename {dbms => src}/Parsers/ASTAssignment.h (100%) rename {dbms => src}/Parsers/ASTAsterisk.cpp (100%) rename {dbms => src}/Parsers/ASTAsterisk.h (100%) rename {dbms => src}/Parsers/ASTCheckQuery.h (100%) rename {dbms => src}/Parsers/ASTColumnDeclaration.cpp (100%) rename {dbms => src}/Parsers/ASTColumnDeclaration.h (100%) rename {dbms => src}/Parsers/ASTColumnsMatcher.cpp (100%) rename {dbms => src}/Parsers/ASTColumnsMatcher.h (100%) rename {dbms => src}/Parsers/ASTConstraintDeclaration.cpp (100%) rename {dbms => src}/Parsers/ASTConstraintDeclaration.h (100%) rename {dbms => src}/Parsers/ASTCreateQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateQuery.h (100%) rename {dbms => src}/Parsers/ASTCreateQuotaQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateQuotaQuery.h (100%) rename {dbms => src}/Parsers/ASTCreateRoleQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateRoleQuery.h (100%) rename {dbms => src}/Parsers/ASTCreateRowPolicyQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateRowPolicyQuery.h (100%) rename {dbms => src}/Parsers/ASTCreateSettingsProfileQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateSettingsProfileQuery.h (100%) rename {dbms => src}/Parsers/ASTCreateUserQuery.cpp (100%) rename {dbms => src}/Parsers/ASTCreateUserQuery.h (100%) rename {dbms => src}/Parsers/ASTDictionary.cpp (100%) rename {dbms => src}/Parsers/ASTDictionary.h (100%) rename {dbms => src}/Parsers/ASTDictionaryAttributeDeclaration.cpp (100%) rename {dbms => src}/Parsers/ASTDictionaryAttributeDeclaration.h (100%) rename {dbms => src}/Parsers/ASTDropAccessEntityQuery.cpp (100%) rename {dbms => src}/Parsers/ASTDropAccessEntityQuery.h (100%) rename {dbms => src}/Parsers/ASTDropQuery.cpp (100%) rename {dbms => src}/Parsers/ASTDropQuery.h (100%) rename {dbms => src}/Parsers/ASTEnumElement.h (100%) rename {dbms => src}/Parsers/ASTExplainQuery.h (100%) rename {dbms => src}/Parsers/ASTExpressionList.cpp (100%) rename {dbms => src}/Parsers/ASTExpressionList.h (100%) rename {dbms => src}/Parsers/ASTExtendedRoleSet.cpp (100%) rename {dbms => src}/Parsers/ASTExtendedRoleSet.h (100%) rename {dbms => src}/Parsers/ASTFunction.cpp (100%) rename {dbms => src}/Parsers/ASTFunction.h (100%) rename {dbms => src}/Parsers/ASTFunctionWithKeyValueArguments.cpp (100%) rename {dbms => src}/Parsers/ASTFunctionWithKeyValueArguments.h (100%) rename {dbms => src}/Parsers/ASTGrantQuery.cpp (100%) rename {dbms => src}/Parsers/ASTGrantQuery.h (100%) rename {dbms => src}/Parsers/ASTIdentifier.cpp (100%) rename {dbms => src}/Parsers/ASTIdentifier.h (100%) rename {dbms => src}/Parsers/ASTIndexDeclaration.h (100%) rename {dbms => src}/Parsers/ASTInsertQuery.cpp (100%) rename {dbms => src}/Parsers/ASTInsertQuery.h (100%) rename {dbms => src}/Parsers/ASTKillQueryQuery.cpp (100%) rename {dbms => src}/Parsers/ASTKillQueryQuery.h (100%) rename {dbms => src}/Parsers/ASTLiteral.cpp (100%) rename {dbms => src}/Parsers/ASTLiteral.h (100%) rename {dbms => src}/Parsers/ASTNameTypePair.h (100%) rename {dbms => src}/Parsers/ASTOptimizeQuery.cpp (100%) rename {dbms => src}/Parsers/ASTOptimizeQuery.h (100%) rename {dbms => src}/Parsers/ASTOrderByElement.cpp (100%) rename {dbms => src}/Parsers/ASTOrderByElement.h (100%) rename {dbms => src}/Parsers/ASTPartition.cpp (100%) rename {dbms => src}/Parsers/ASTPartition.h (100%) rename {dbms => src}/Parsers/ASTQualifiedAsterisk.cpp (100%) rename {dbms => src}/Parsers/ASTQualifiedAsterisk.h (100%) rename {dbms => src}/Parsers/ASTQueryParameter.cpp (100%) rename {dbms => src}/Parsers/ASTQueryParameter.h (100%) rename {dbms => src}/Parsers/ASTQueryWithOnCluster.cpp (100%) rename {dbms => src}/Parsers/ASTQueryWithOnCluster.h (100%) rename {dbms => src}/Parsers/ASTQueryWithOutput.cpp (100%) rename {dbms => src}/Parsers/ASTQueryWithOutput.h (100%) rename {dbms => src}/Parsers/ASTQueryWithTableAndOutput.cpp (100%) rename {dbms => src}/Parsers/ASTQueryWithTableAndOutput.h (100%) rename {dbms => src}/Parsers/ASTRenameQuery.h (100%) rename {dbms => src}/Parsers/ASTSampleRatio.cpp (100%) rename {dbms => src}/Parsers/ASTSampleRatio.h (100%) rename {dbms => src}/Parsers/ASTSelectQuery.cpp (100%) rename {dbms => src}/Parsers/ASTSelectQuery.h (100%) rename {dbms => src}/Parsers/ASTSelectWithUnionQuery.cpp (100%) rename {dbms => src}/Parsers/ASTSelectWithUnionQuery.h (100%) rename {dbms => src}/Parsers/ASTSetQuery.h (100%) rename {dbms => src}/Parsers/ASTSetRoleQuery.cpp (100%) rename {dbms => src}/Parsers/ASTSetRoleQuery.h (100%) rename {dbms => src}/Parsers/ASTSettingsProfileElement.cpp (100%) rename {dbms => src}/Parsers/ASTSettingsProfileElement.h (100%) rename {dbms => src}/Parsers/ASTShowCreateAccessEntityQuery.cpp (100%) rename {dbms => src}/Parsers/ASTShowCreateAccessEntityQuery.h (100%) rename {dbms => src}/Parsers/ASTShowGrantsQuery.cpp (100%) rename {dbms => src}/Parsers/ASTShowGrantsQuery.h (100%) rename {dbms => src}/Parsers/ASTShowProcesslistQuery.h (100%) rename {dbms => src}/Parsers/ASTShowQuotasQuery.cpp (100%) rename {dbms => src}/Parsers/ASTShowQuotasQuery.h (100%) rename {dbms => src}/Parsers/ASTShowRowPoliciesQuery.cpp (100%) rename {dbms => src}/Parsers/ASTShowRowPoliciesQuery.h (100%) rename {dbms => src}/Parsers/ASTShowTablesQuery.cpp (100%) rename {dbms => src}/Parsers/ASTShowTablesQuery.h (100%) rename {dbms => src}/Parsers/ASTSubquery.cpp (100%) rename {dbms => src}/Parsers/ASTSubquery.h (100%) rename {dbms => src}/Parsers/ASTSystemQuery.cpp (100%) rename {dbms => src}/Parsers/ASTSystemQuery.h (100%) rename {dbms => src}/Parsers/ASTTTLElement.cpp (100%) rename {dbms => src}/Parsers/ASTTTLElement.h (100%) rename {dbms => src}/Parsers/ASTTablesInSelectQuery.cpp (100%) rename {dbms => src}/Parsers/ASTTablesInSelectQuery.h (100%) rename {dbms => src}/Parsers/ASTUseQuery.h (100%) rename {dbms => src}/Parsers/ASTWatchQuery.h (100%) rename {dbms => src}/Parsers/ASTWithAlias.cpp (100%) rename {dbms => src}/Parsers/ASTWithAlias.h (100%) rename {dbms => src}/Parsers/CMakeLists.txt (88%) rename {dbms => src}/Parsers/CommonParsers.cpp (100%) rename {dbms => src}/Parsers/CommonParsers.h (100%) rename {dbms => src}/Parsers/DumpASTNode.h (100%) rename {dbms => src}/Parsers/ExpressionElementParsers.cpp (100%) rename {dbms => src}/Parsers/ExpressionElementParsers.h (100%) rename {dbms => src}/Parsers/ExpressionListParsers.cpp (100%) rename {dbms => src}/Parsers/ExpressionListParsers.h (100%) rename {dbms => src}/Parsers/IAST.cpp (100%) rename {dbms => src}/Parsers/IAST.h (100%) rename {dbms => src}/Parsers/IAST_fwd.h (100%) rename {dbms => src}/Parsers/IParser.h (100%) rename {dbms => src}/Parsers/IParserBase.cpp (100%) rename {dbms => src}/Parsers/IParserBase.h (100%) rename {dbms => src}/Parsers/IdentifierQuotingStyle.h (100%) rename {dbms => src}/Parsers/Lexer.cpp (100%) rename {dbms => src}/Parsers/Lexer.h (100%) rename {dbms => src}/Parsers/ParserAlterQuery.cpp (100%) rename {dbms => src}/Parsers/ParserAlterQuery.h (100%) rename {dbms => src}/Parsers/ParserCase.cpp (100%) rename {dbms => src}/Parsers/ParserCase.h (100%) rename {dbms => src}/Parsers/ParserCheckQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCheckQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateQuotaQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateQuotaQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateRoleQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateRoleQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateRowPolicyQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateRowPolicyQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateSettingsProfileQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateSettingsProfileQuery.h (100%) rename {dbms => src}/Parsers/ParserCreateUserQuery.cpp (100%) rename {dbms => src}/Parsers/ParserCreateUserQuery.h (100%) rename {dbms => src}/Parsers/ParserDescribeTableQuery.cpp (100%) rename {dbms => src}/Parsers/ParserDescribeTableQuery.h (100%) rename {dbms => src}/Parsers/ParserDictionary.cpp (100%) rename {dbms => src}/Parsers/ParserDictionary.h (100%) rename {dbms => src}/Parsers/ParserDictionaryAttributeDeclaration.cpp (100%) rename {dbms => src}/Parsers/ParserDictionaryAttributeDeclaration.h (100%) rename {dbms => src}/Parsers/ParserDropAccessEntityQuery.cpp (100%) rename {dbms => src}/Parsers/ParserDropAccessEntityQuery.h (100%) rename {dbms => src}/Parsers/ParserDropQuery.cpp (100%) rename {dbms => src}/Parsers/ParserDropQuery.h (100%) rename {dbms => src}/Parsers/ParserExtendedRoleSet.cpp (100%) rename {dbms => src}/Parsers/ParserExtendedRoleSet.h (100%) rename {dbms => src}/Parsers/ParserGrantQuery.cpp (100%) rename {dbms => src}/Parsers/ParserGrantQuery.h (100%) rename {dbms => src}/Parsers/ParserInsertQuery.cpp (100%) rename {dbms => src}/Parsers/ParserInsertQuery.h (100%) rename {dbms => src}/Parsers/ParserKillQueryQuery.cpp (100%) rename {dbms => src}/Parsers/ParserKillQueryQuery.h (100%) rename {dbms => src}/Parsers/ParserOptimizeQuery.cpp (100%) rename {dbms => src}/Parsers/ParserOptimizeQuery.h (100%) rename {dbms => src}/Parsers/ParserPartition.cpp (100%) rename {dbms => src}/Parsers/ParserPartition.h (100%) rename {dbms => src}/Parsers/ParserQuery.cpp (100%) rename {dbms => src}/Parsers/ParserQuery.h (100%) rename {dbms => src}/Parsers/ParserQueryWithOutput.cpp (100%) rename {dbms => src}/Parsers/ParserQueryWithOutput.h (100%) rename {dbms => src}/Parsers/ParserRenameQuery.cpp (100%) rename {dbms => src}/Parsers/ParserRenameQuery.h (100%) rename {dbms => src}/Parsers/ParserSampleRatio.cpp (100%) rename {dbms => src}/Parsers/ParserSampleRatio.h (100%) rename {dbms => src}/Parsers/ParserSelectQuery.cpp (100%) rename {dbms => src}/Parsers/ParserSelectQuery.h (100%) rename {dbms => src}/Parsers/ParserSelectWithUnionQuery.cpp (100%) rename {dbms => src}/Parsers/ParserSelectWithUnionQuery.h (100%) rename {dbms => src}/Parsers/ParserSetQuery.cpp (100%) rename {dbms => src}/Parsers/ParserSetQuery.h (100%) rename {dbms => src}/Parsers/ParserSetRoleQuery.cpp (100%) rename {dbms => src}/Parsers/ParserSetRoleQuery.h (100%) rename {dbms => src}/Parsers/ParserSettingsProfileElement.cpp (100%) rename {dbms => src}/Parsers/ParserSettingsProfileElement.h (100%) rename {dbms => src}/Parsers/ParserShowCreateAccessEntityQuery.cpp (100%) rename {dbms => src}/Parsers/ParserShowCreateAccessEntityQuery.h (100%) rename {dbms => src}/Parsers/ParserShowGrantsQuery.cpp (100%) rename {dbms => src}/Parsers/ParserShowGrantsQuery.h (100%) rename {dbms => src}/Parsers/ParserShowProcesslistQuery.h (100%) rename {dbms => src}/Parsers/ParserShowQuotasQuery.cpp (100%) rename {dbms => src}/Parsers/ParserShowQuotasQuery.h (100%) rename {dbms => src}/Parsers/ParserShowRowPoliciesQuery.cpp (100%) rename {dbms => src}/Parsers/ParserShowRowPoliciesQuery.h (100%) rename {dbms => src}/Parsers/ParserShowTablesQuery.cpp (100%) rename {dbms => src}/Parsers/ParserShowTablesQuery.h (100%) rename {dbms => src}/Parsers/ParserSystemQuery.cpp (100%) rename {dbms => src}/Parsers/ParserSystemQuery.h (100%) rename {dbms => src}/Parsers/ParserTablePropertiesQuery.cpp (100%) rename {dbms => src}/Parsers/ParserTablePropertiesQuery.h (100%) rename {dbms => src}/Parsers/ParserTablesInSelectQuery.cpp (100%) rename {dbms => src}/Parsers/ParserTablesInSelectQuery.h (100%) rename {dbms => src}/Parsers/ParserUnionQueryElement.cpp (100%) rename {dbms => src}/Parsers/ParserUnionQueryElement.h (100%) rename {dbms => src}/Parsers/ParserUseQuery.cpp (100%) rename {dbms => src}/Parsers/ParserUseQuery.h (100%) rename {dbms => src}/Parsers/ParserWatchQuery.cpp (100%) rename {dbms => src}/Parsers/ParserWatchQuery.h (100%) rename {dbms => src}/Parsers/StringRange.h (100%) rename {dbms => src}/Parsers/TablePropertiesQueriesASTs.h (100%) rename {dbms => src}/Parsers/TokenIterator.cpp (100%) rename {dbms => src}/Parsers/TokenIterator.h (100%) rename {dbms => src}/Parsers/formatAST.cpp (100%) rename {dbms => src}/Parsers/formatAST.h (100%) rename {dbms => src}/Parsers/iostream_debug_helpers.cpp (100%) rename {dbms => src}/Parsers/iostream_debug_helpers.h (100%) rename {dbms => src}/Parsers/parseDatabaseAndTableName.cpp (100%) rename {dbms => src}/Parsers/parseDatabaseAndTableName.h (100%) rename {dbms => src}/Parsers/parseIdentifierOrStringLiteral.cpp (100%) rename {dbms => src}/Parsers/parseIdentifierOrStringLiteral.h (100%) rename {dbms => src}/Parsers/parseIntervalKind.cpp (100%) rename {dbms => src}/Parsers/parseIntervalKind.h (100%) rename {dbms => src}/Parsers/parseQuery.cpp (100%) rename {dbms => src}/Parsers/parseQuery.h (100%) rename {dbms => src}/Parsers/parseUserName.cpp (100%) rename {dbms => src}/Parsers/parseUserName.h (100%) rename {dbms => src}/Parsers/queryToString.cpp (100%) rename {dbms => src}/Parsers/queryToString.h (100%) rename {dbms => src}/Parsers/tests/CMakeLists.txt (100%) rename {dbms => src}/Parsers/tests/create_parser.cpp (100%) rename {dbms => src}/Parsers/tests/gtest_dictionary_parser.cpp (100%) rename {dbms => src}/Parsers/tests/lexer.cpp (100%) rename {dbms => src}/Parsers/tests/select_parser.cpp (100%) rename {dbms => src}/Processors/CMakeLists.txt (100%) rename {dbms => src}/Processors/Chunk.cpp (100%) rename {dbms => src}/Processors/Chunk.h (100%) rename {dbms => src}/Processors/ConcatProcessor.cpp (100%) rename {dbms => src}/Processors/ConcatProcessor.h (100%) rename {dbms => src}/Processors/DelayedPortsProcessor.cpp (100%) rename {dbms => src}/Processors/DelayedPortsProcessor.h (100%) rename {dbms => src}/Processors/Executors/ParallelPipelineExecutor.cpp (100%) rename {dbms => src}/Processors/Executors/ParallelPipelineExecutor.h (100%) rename {dbms => src}/Processors/Executors/PipelineExecutor.cpp (100%) rename {dbms => src}/Processors/Executors/PipelineExecutor.h (100%) rename {dbms => src}/Processors/Executors/SequentialPipelineExecutor.cpp (100%) rename {dbms => src}/Processors/Executors/SequentialPipelineExecutor.h (100%) rename {dbms => src}/Processors/Executors/ThreadsQueue.h (100%) rename {dbms => src}/Processors/Executors/TreeExecutorBlockInputStream.cpp (100%) rename {dbms => src}/Processors/Executors/TreeExecutorBlockInputStream.h (100%) rename {dbms => src}/Processors/Executors/traverse.h (100%) rename {dbms => src}/Processors/ForkProcessor.cpp (100%) rename {dbms => src}/Processors/ForkProcessor.h (100%) rename {dbms => src}/Processors/Formats/IInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/IInputFormat.h (100%) rename {dbms => src}/Processors/Formats/IOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/IOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/IRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/IRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/IRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/IRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ArrowColumnToCHColumn.h (100%) rename {dbms => src}/Processors/Formats/Impl/AvroRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/AvroRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/AvroRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/AvroRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/BinaryRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/BinaryRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/BinaryRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/BinaryRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/CMakeLists.txt (100%) rename {dbms => src}/Processors/Formats/Impl/CSVRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/CSVRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/CSVRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/CSVRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/CapnProtoRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ConstantExpressionTemplate.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ConstantExpressionTemplate.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONCompactRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/JSONRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/JSONRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/MySQLOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/MySQLOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/NativeFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/NullFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ORCBlockInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ORCBlockInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ParquetBlockInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ParquetBlockInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ParquetBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/PrettyBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ProtobufRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ProtobufRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ProtobufRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/RegexpRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/RegexpRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TSKVRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TSKVRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TSKVRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TSKVRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TabSeparatedRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TemplateBlockOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/TemplateRowInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/TemplateRowInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ValuesBlockInputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ValuesBlockInputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/ValuesRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/ValuesRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/VerticalRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/VerticalRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/Impl/XMLRowOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/Impl/XMLRowOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/InputStreamFromInputFormat.h (100%) rename {dbms => src}/Processors/Formats/LazyOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/LazyOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/OutputStreamToOutputFormat.cpp (100%) rename {dbms => src}/Processors/Formats/OutputStreamToOutputFormat.h (100%) rename {dbms => src}/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp (100%) rename {dbms => src}/Processors/Formats/RowInputFormatWithDiagnosticInfo.h (100%) rename {dbms => src}/Processors/IAccumulatingTransform.cpp (100%) rename {dbms => src}/Processors/IAccumulatingTransform.h (100%) rename {dbms => src}/Processors/IInflatingTransform.cpp (100%) rename {dbms => src}/Processors/IInflatingTransform.h (100%) rename {dbms => src}/Processors/IProcessor.cpp (100%) rename {dbms => src}/Processors/IProcessor.h (100%) rename {dbms => src}/Processors/ISimpleTransform.cpp (100%) rename {dbms => src}/Processors/ISimpleTransform.h (100%) rename {dbms => src}/Processors/ISink.cpp (100%) rename {dbms => src}/Processors/ISink.h (100%) rename {dbms => src}/Processors/ISource.cpp (100%) rename {dbms => src}/Processors/ISource.h (100%) rename {dbms => src}/Processors/LimitTransform.cpp (100%) rename {dbms => src}/Processors/LimitTransform.h (100%) rename {dbms => src}/Processors/NullSink.h (100%) rename {dbms => src}/Processors/Pipe.cpp (100%) rename {dbms => src}/Processors/Pipe.h (100%) rename {dbms => src}/Processors/Port.cpp (100%) rename {dbms => src}/Processors/Port.h (100%) rename {dbms => src}/Processors/QueryPipeline.cpp (100%) rename {dbms => src}/Processors/QueryPipeline.h (100%) rename {dbms => src}/Processors/QueueBuffer.h (100%) rename {dbms => src}/Processors/ResizeProcessor.cpp (100%) rename {dbms => src}/Processors/ResizeProcessor.h (100%) rename {dbms => src}/Processors/RowsBeforeLimitCounter.h (100%) rename {dbms => src}/Processors/Sources/NullSource.h (100%) rename {dbms => src}/Processors/Sources/SinkToOutputStream.cpp (100%) rename {dbms => src}/Processors/Sources/SinkToOutputStream.h (100%) rename {dbms => src}/Processors/Sources/SourceFromInputStream.cpp (100%) rename {dbms => src}/Processors/Sources/SourceFromInputStream.h (100%) rename {dbms => src}/Processors/Sources/SourceFromSingleChunk.h (100%) rename {dbms => src}/Processors/Sources/SourceWithProgress.cpp (100%) rename {dbms => src}/Processors/Sources/SourceWithProgress.h (100%) rename {dbms => src}/Processors/Transforms/AddingConstColumnTransform.h (100%) rename {dbms => src}/Processors/Transforms/AddingMissedTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/AddingMissedTransform.h (100%) rename {dbms => src}/Processors/Transforms/AggregatingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/AggregatingTransform.h (100%) rename {dbms => src}/Processors/Transforms/ConvertingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/ConvertingTransform.h (100%) rename {dbms => src}/Processors/Transforms/CreatingSetsTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/CreatingSetsTransform.h (100%) rename {dbms => src}/Processors/Transforms/CubeTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/CubeTransform.h (100%) rename {dbms => src}/Processors/Transforms/DistinctTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/DistinctTransform.h (100%) rename {dbms => src}/Processors/Transforms/ExpressionTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/ExpressionTransform.h (100%) rename {dbms => src}/Processors/Transforms/ExtremesTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/ExtremesTransform.h (100%) rename {dbms => src}/Processors/Transforms/FillingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/FillingTransform.h (100%) rename {dbms => src}/Processors/Transforms/FilterTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/FilterTransform.h (100%) rename {dbms => src}/Processors/Transforms/FinishSortingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/FinishSortingTransform.h (100%) rename {dbms => src}/Processors/Transforms/InflatingExpressionTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/InflatingExpressionTransform.h (100%) rename {dbms => src}/Processors/Transforms/LimitByTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/LimitByTransform.h (100%) rename {dbms => src}/Processors/Transforms/LimitsCheckingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/LimitsCheckingTransform.h (100%) rename {dbms => src}/Processors/Transforms/MaterializingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/MaterializingTransform.h (100%) rename {dbms => src}/Processors/Transforms/MergeSortingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/MergeSortingTransform.h (100%) rename {dbms => src}/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h (100%) rename {dbms => src}/Processors/Transforms/MergingAggregatedTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/MergingAggregatedTransform.h (100%) rename {dbms => src}/Processors/Transforms/MergingSortedTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/MergingSortedTransform.h (100%) rename {dbms => src}/Processors/Transforms/PartialSortingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/PartialSortingTransform.h (100%) rename {dbms => src}/Processors/Transforms/ReverseTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/ReverseTransform.h (100%) rename {dbms => src}/Processors/Transforms/RollupTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/RollupTransform.h (100%) rename {dbms => src}/Processors/Transforms/SortingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/SortingTransform.h (100%) rename {dbms => src}/Processors/Transforms/TotalsHavingTransform.cpp (100%) rename {dbms => src}/Processors/Transforms/TotalsHavingTransform.h (100%) rename {dbms => src}/Processors/printPipeline.h (100%) rename {dbms => src}/Processors/tests/CMakeLists.txt (100%) rename {dbms => src}/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp (100%) rename {dbms => src}/Processors/tests/processors_test.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_aggregation.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_chain.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_expand_pipeline.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_merge.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_merge_sorting_transform.cpp (100%) rename {dbms => src}/Processors/tests/processors_test_merging_sorted_transform.cpp (100%) rename {dbms => src}/Storages/AlterCommands.cpp (100%) rename {dbms => src}/Storages/AlterCommands.h (100%) rename {dbms => src}/Storages/CMakeLists.txt (100%) rename {dbms => src}/Storages/CheckResults.h (100%) rename {dbms => src}/Storages/ColumnCodec.h (100%) rename {dbms => src}/Storages/ColumnDefault.cpp (100%) rename {dbms => src}/Storages/ColumnDefault.h (100%) rename {dbms => src}/Storages/ColumnDependency.h (100%) rename {dbms => src}/Storages/ColumnsDescription.cpp (100%) rename {dbms => src}/Storages/ColumnsDescription.h (100%) rename {dbms => src}/Storages/CompressionCodecSelector.h (100%) rename {dbms => src}/Storages/ConstraintsDescription.cpp (100%) rename {dbms => src}/Storages/ConstraintsDescription.h (100%) rename {dbms => src}/Storages/Distributed/DirectoryMonitor.cpp (100%) rename {dbms => src}/Storages/Distributed/DirectoryMonitor.h (100%) rename {dbms => src}/Storages/Distributed/DistributedBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/Distributed/DistributedBlockOutputStream.h (100%) rename {dbms => src}/Storages/IStorage.cpp (100%) rename {dbms => src}/Storages/IStorage.h (100%) rename {dbms => src}/Storages/IStorage_fwd.h (100%) rename {dbms => src}/Storages/IndicesDescription.cpp (100%) rename {dbms => src}/Storages/IndicesDescription.h (100%) rename {dbms => src}/Storages/Kafka/Buffer_fwd.h (100%) rename {dbms => src}/Storages/Kafka/KafkaBlockInputStream.cpp (100%) rename {dbms => src}/Storages/Kafka/KafkaBlockInputStream.h (100%) rename {dbms => src}/Storages/Kafka/KafkaBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/Kafka/KafkaBlockOutputStream.h (100%) rename {dbms => src}/Storages/Kafka/KafkaSettings.cpp (100%) rename {dbms => src}/Storages/Kafka/KafkaSettings.h (100%) rename {dbms => src}/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp (100%) rename {dbms => src}/Storages/Kafka/ReadBufferFromKafkaConsumer.h (100%) rename {dbms => src}/Storages/Kafka/StorageKafka.cpp (100%) rename {dbms => src}/Storages/Kafka/StorageKafka.h (100%) rename {dbms => src}/Storages/Kafka/WriteBufferToKafkaProducer.cpp (100%) rename {dbms => src}/Storages/Kafka/WriteBufferToKafkaProducer.h (100%) rename {dbms => src}/Storages/LiveView/LiveViewBlockInputStream.h (100%) rename {dbms => src}/Storages/LiveView/LiveViewBlockOutputStream.h (100%) rename {dbms => src}/Storages/LiveView/LiveViewCommands.h (100%) rename {dbms => src}/Storages/LiveView/LiveViewEventsBlockInputStream.h (100%) rename {dbms => src}/Storages/LiveView/StorageBlocks.h (100%) rename {dbms => src}/Storages/LiveView/StorageLiveView.cpp (100%) rename {dbms => src}/Storages/LiveView/StorageLiveView.h (100%) rename {dbms => src}/Storages/MarkCache.h (100%) rename {dbms => src}/Storages/MergeTree/ActiveDataPartSet.cpp (100%) rename {dbms => src}/Storages/MergeTree/ActiveDataPartSet.h (100%) rename {dbms => src}/Storages/MergeTree/AllMergeSelector.cpp (100%) rename {dbms => src}/Storages/MergeTree/AllMergeSelector.h (100%) rename {dbms => src}/Storages/MergeTree/BackgroundProcessingPool.cpp (100%) rename {dbms => src}/Storages/MergeTree/BackgroundProcessingPool.h (100%) rename {dbms => src}/Storages/MergeTree/BoolMask.cpp (100%) rename {dbms => src}/Storages/MergeTree/BoolMask.h (100%) rename {dbms => src}/Storages/MergeTree/DataPartsExchange.cpp (100%) rename {dbms => src}/Storages/MergeTree/DataPartsExchange.h (100%) rename {dbms => src}/Storages/MergeTree/EphemeralLockInZooKeeper.cpp (100%) rename {dbms => src}/Storages/MergeTree/EphemeralLockInZooKeeper.h (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeDataPart.cpp (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeDataPart.h (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeDataPartWriter.cpp (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeDataPartWriter.h (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeReader.cpp (100%) rename {dbms => src}/Storages/MergeTree/IMergeTreeReader.h (100%) rename {dbms => src}/Storages/MergeTree/IMergedBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/IMergedBlockOutputStream.h (100%) rename {dbms => src}/Storages/MergeTree/KeyCondition.cpp (100%) rename {dbms => src}/Storages/MergeTree/KeyCondition.h (100%) rename {dbms => src}/Storages/MergeTree/LevelMergeSelector.cpp (100%) rename {dbms => src}/Storages/MergeTree/LevelMergeSelector.h (100%) rename {dbms => src}/Storages/MergeTree/MarkRange.h (100%) rename {dbms => src}/Storages/MergeTree/MergeList.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeList.h (100%) rename {dbms => src}/Storages/MergeTree/MergeSelector.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBaseSelectProcessor.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBlockOutputStream.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBlockReadUtils.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeBlockReadUtils.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeData.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeData.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataFormatVersion.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataMergerMutator.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataMergerMutator.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartChecksum.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartChecksum.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartCompact.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartCompact.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartTTLInfo.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartType.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartType.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWide.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWide.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWriterCompact.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataPartWriterWide.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataSelectExecutor.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataWriter.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeDataWriter.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIOSettings.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexBloomFilter.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexFullText.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexFullText.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranularity.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranularity.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranularityInfo.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexMinMax.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexMinMax.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexReader.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexReader.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexSet.cpp (99%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndexSet.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndices.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeIndices.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeMarksLoader.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeMarksLoader.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeMutationEntry.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeMutationEntry.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeMutationStatus.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartInfo.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartInfo.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartition.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartition.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartsMover.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreePartsMover.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeRangeReader.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeRangeReader.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReadPool.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReadPool.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderCompact.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderCompact.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderStream.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderWide.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReaderWide.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeReverseSelectProcessor.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSelectProcessor.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSelectProcessor.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSettings.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeSettings.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeWhereOptimizer.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergeTreeWhereOptimizer.h (100%) rename {dbms => src}/Storages/MergeTree/MergedBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergedBlockOutputStream.h (100%) rename {dbms => src}/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/MergedColumnOnlyOutputStream.h (100%) rename {dbms => src}/Storages/MergeTree/PartDestinationType.h (100%) rename {dbms => src}/Storages/MergeTree/RPNBuilder.h (100%) rename {dbms => src}/Storages/MergeTree/RangesInDataPart.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeAddress.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreePartHeader.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeQueue.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp (100%) rename {dbms => src}/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h (100%) rename {dbms => src}/Storages/MergeTree/SimpleMergeSelector.cpp (100%) rename {dbms => src}/Storages/MergeTree/SimpleMergeSelector.h (100%) rename {dbms => src}/Storages/MergeTree/StorageFromMergeTreeDataPart.h (100%) rename {dbms => src}/Storages/MergeTree/TTLMergeSelector.cpp (100%) rename {dbms => src}/Storages/MergeTree/TTLMergeSelector.h (100%) rename {dbms => src}/Storages/MergeTree/checkDataPart.cpp (100%) rename {dbms => src}/Storages/MergeTree/checkDataPart.h (100%) rename {dbms => src}/Storages/MergeTree/localBackup.cpp (100%) rename {dbms => src}/Storages/MergeTree/localBackup.h (100%) rename {dbms => src}/Storages/MergeTree/registerStorageMergeTree.cpp (99%) rename {dbms => src}/Storages/MutationCommands.cpp (100%) rename {dbms => src}/Storages/MutationCommands.h (100%) rename {dbms => src}/Storages/PartitionCommands.cpp (100%) rename {dbms => src}/Storages/PartitionCommands.h (100%) rename {dbms => src}/Storages/ReadInOrderOptimizer.cpp (100%) rename {dbms => src}/Storages/ReadInOrderOptimizer.h (100%) rename {dbms => src}/Storages/SelectQueryInfo.h (100%) rename {dbms => src}/Storages/StorageBuffer.cpp (100%) rename {dbms => src}/Storages/StorageBuffer.h (100%) rename {dbms => src}/Storages/StorageDictionary.cpp (100%) rename {dbms => src}/Storages/StorageDictionary.h (100%) rename {dbms => src}/Storages/StorageDistributed.cpp (100%) rename {dbms => src}/Storages/StorageDistributed.h (100%) rename {dbms => src}/Storages/StorageFactory.cpp (100%) rename {dbms => src}/Storages/StorageFactory.h (100%) rename {dbms => src}/Storages/StorageFile.cpp (100%) rename {dbms => src}/Storages/StorageFile.h (100%) rename {dbms => src}/Storages/StorageGenerateRandom.cpp (100%) rename {dbms => src}/Storages/StorageGenerateRandom.h (100%) rename {dbms => src}/Storages/StorageHDFS.cpp (100%) rename {dbms => src}/Storages/StorageHDFS.h (100%) rename {dbms => src}/Storages/StorageInMemoryMetadata.cpp (100%) rename {dbms => src}/Storages/StorageInMemoryMetadata.h (100%) rename {dbms => src}/Storages/StorageInput.cpp (100%) rename {dbms => src}/Storages/StorageInput.h (100%) rename {dbms => src}/Storages/StorageJoin.cpp (100%) rename {dbms => src}/Storages/StorageJoin.h (100%) rename {dbms => src}/Storages/StorageLog.cpp (100%) rename {dbms => src}/Storages/StorageLog.h (100%) rename {dbms => src}/Storages/StorageLogSettings.cpp (100%) rename {dbms => src}/Storages/StorageLogSettings.h (100%) rename {dbms => src}/Storages/StorageMaterializedView.cpp (100%) rename {dbms => src}/Storages/StorageMaterializedView.h (100%) rename {dbms => src}/Storages/StorageMemory.cpp (100%) rename {dbms => src}/Storages/StorageMemory.h (100%) rename {dbms => src}/Storages/StorageMerge.cpp (100%) rename {dbms => src}/Storages/StorageMerge.h (100%) rename {dbms => src}/Storages/StorageMergeTree.cpp (100%) rename {dbms => src}/Storages/StorageMergeTree.h (100%) rename {dbms => src}/Storages/StorageMySQL.cpp (100%) rename {dbms => src}/Storages/StorageMySQL.h (100%) rename {dbms => src}/Storages/StorageNull.cpp (100%) rename {dbms => src}/Storages/StorageNull.h (100%) rename {dbms => src}/Storages/StorageReplicatedMergeTree.cpp (100%) rename {dbms => src}/Storages/StorageReplicatedMergeTree.h (100%) rename {dbms => src}/Storages/StorageS3.cpp (100%) rename {dbms => src}/Storages/StorageS3.h (100%) rename {dbms => src}/Storages/StorageSet.cpp (100%) rename {dbms => src}/Storages/StorageSet.h (100%) rename {dbms => src}/Storages/StorageStripeLog.cpp (100%) rename {dbms => src}/Storages/StorageStripeLog.h (100%) rename {dbms => src}/Storages/StorageTinyLog.cpp (100%) rename {dbms => src}/Storages/StorageTinyLog.h (100%) rename {dbms => src}/Storages/StorageURL.cpp (100%) rename {dbms => src}/Storages/StorageURL.h (100%) rename {dbms => src}/Storages/StorageValues.cpp (100%) rename {dbms => src}/Storages/StorageValues.h (100%) rename {dbms => src}/Storages/StorageView.cpp (100%) rename {dbms => src}/Storages/StorageView.h (100%) rename {dbms => src}/Storages/StorageXDBC.cpp (100%) rename {dbms => src}/Storages/StorageXDBC.h (100%) rename {dbms => src}/Storages/System/CMakeLists.txt (100%) rename {dbms => src}/Storages/System/IStorageSystemOneBlock.h (100%) rename {dbms => src}/Storages/System/StorageSystemAggregateFunctionCombinators.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemAggregateFunctionCombinators.h (100%) rename {dbms => src}/Storages/System/StorageSystemAsynchronousMetrics.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemAsynchronousMetrics.h (100%) rename {dbms => src}/Storages/System/StorageSystemBuildOptions.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemBuildOptions.generated.cpp.in (100%) rename {dbms => src}/Storages/System/StorageSystemBuildOptions.h (100%) rename {dbms => src}/Storages/System/StorageSystemClusters.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemClusters.h (100%) rename {dbms => src}/Storages/System/StorageSystemCollations.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemCollations.h (100%) rename {dbms => src}/Storages/System/StorageSystemColumns.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemColumns.h (100%) rename {dbms => src}/Storages/System/StorageSystemContributors.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemContributors.generated.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemContributors.h (100%) rename {dbms => src}/Storages/System/StorageSystemContributors.sh (100%) rename {dbms => src}/Storages/System/StorageSystemDataTypeFamilies.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemDataTypeFamilies.h (100%) rename {dbms => src}/Storages/System/StorageSystemDatabases.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemDatabases.h (100%) rename {dbms => src}/Storages/System/StorageSystemDetachedParts.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemDetachedParts.h (100%) rename {dbms => src}/Storages/System/StorageSystemDictionaries.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemDictionaries.h (100%) rename {dbms => src}/Storages/System/StorageSystemDisks.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemDisks.h (100%) rename {dbms => src}/Storages/System/StorageSystemEvents.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemEvents.h (100%) rename {dbms => src}/Storages/System/StorageSystemFormats.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemFormats.h (100%) rename {dbms => src}/Storages/System/StorageSystemFunctions.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemFunctions.h (100%) rename {dbms => src}/Storages/System/StorageSystemGraphite.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemGraphite.h (100%) rename {dbms => src}/Storages/System/StorageSystemMacros.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemMacros.h (100%) rename {dbms => src}/Storages/System/StorageSystemMergeTreeSettings.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemMergeTreeSettings.h (100%) rename {dbms => src}/Storages/System/StorageSystemMerges.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemMerges.h (100%) rename {dbms => src}/Storages/System/StorageSystemMetrics.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemMetrics.h (100%) rename {dbms => src}/Storages/System/StorageSystemModels.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemModels.h (100%) rename {dbms => src}/Storages/System/StorageSystemMutations.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemMutations.h (100%) rename {dbms => src}/Storages/System/StorageSystemNumbers.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemNumbers.h (100%) rename {dbms => src}/Storages/System/StorageSystemOne.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemOne.h (100%) rename {dbms => src}/Storages/System/StorageSystemParts.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemParts.h (100%) rename {dbms => src}/Storages/System/StorageSystemPartsBase.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemPartsBase.h (100%) rename {dbms => src}/Storages/System/StorageSystemPartsColumns.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemPartsColumns.h (100%) rename {dbms => src}/Storages/System/StorageSystemProcesses.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemProcesses.h (100%) rename {dbms => src}/Storages/System/StorageSystemQuotaUsage.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemQuotaUsage.h (100%) rename {dbms => src}/Storages/System/StorageSystemQuotas.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemQuotas.h (100%) rename {dbms => src}/Storages/System/StorageSystemReplicas.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemReplicas.h (100%) rename {dbms => src}/Storages/System/StorageSystemReplicationQueue.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemReplicationQueue.h (100%) rename {dbms => src}/Storages/System/StorageSystemRowPolicies.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemRowPolicies.h (100%) rename {dbms => src}/Storages/System/StorageSystemSettings.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemSettings.h (100%) rename {dbms => src}/Storages/System/StorageSystemStackTrace.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemStackTrace.h (100%) rename {dbms => src}/Storages/System/StorageSystemStoragePolicies.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemStoragePolicies.h (100%) rename {dbms => src}/Storages/System/StorageSystemTableEngines.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemTableEngines.h (100%) rename {dbms => src}/Storages/System/StorageSystemTableFunctions.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemTableFunctions.h (100%) rename {dbms => src}/Storages/System/StorageSystemTables.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemTables.h (100%) rename {dbms => src}/Storages/System/StorageSystemZeros.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemZeros.h (100%) rename {dbms => src}/Storages/System/StorageSystemZooKeeper.cpp (100%) rename {dbms => src}/Storages/System/StorageSystemZooKeeper.h (100%) rename {dbms => src}/Storages/System/attachSystemTables.cpp (100%) rename {dbms => src}/Storages/System/attachSystemTables.h (100%) rename {dbms => src}/Storages/TableStructureLockHolder.h (100%) rename {dbms => src}/Storages/VirtualColumnUtils.cpp (100%) rename {dbms => src}/Storages/VirtualColumnUtils.h (100%) rename {dbms => src}/Storages/getStructureOfRemoteTable.cpp (100%) rename {dbms => src}/Storages/getStructureOfRemoteTable.h (100%) rename {dbms => src}/Storages/registerStorages.cpp (100%) rename {dbms => src}/Storages/registerStorages.h (100%) rename {dbms => src}/Storages/tests/CMakeLists.txt (100%) rename {dbms => src}/Storages/tests/active_parts.py (100%) rename {dbms => src}/Storages/tests/get_abandonable_lock_in_all_partitions.cpp (100%) rename {dbms => src}/Storages/tests/get_current_inserts_in_replicated.cpp (100%) rename {dbms => src}/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp (100%) rename {dbms => src}/Storages/tests/gtest_row_source_bits_test.cpp (100%) rename {dbms => src}/Storages/tests/gtest_storage_log.cpp (100%) rename {dbms => src}/Storages/tests/gtest_transform_query_for_external_database.cpp (100%) rename {dbms => src}/Storages/tests/merge_selector.cpp (100%) rename {dbms => src}/Storages/tests/merge_selector2.cpp (100%) rename {dbms => src}/Storages/tests/part_name.cpp (100%) rename {dbms => src}/Storages/tests/remove_symlink_directory.cpp (100%) rename {dbms => src}/Storages/tests/storage_log.cpp (100%) rename {dbms => src}/Storages/tests/system_numbers.cpp (100%) rename {dbms => src}/Storages/tests/test_alter_distributed.sql (100%) rename {dbms => src}/Storages/tests/test_alter_merge.sql (100%) rename {dbms => src}/Storages/tests/test_alter_merge_tree.sql (100%) rename {dbms => src}/Storages/tests/transform_part_zk_nodes.cpp (100%) rename {dbms => src}/Storages/transformQueryForExternalDatabase.cpp (100%) rename {dbms => src}/Storages/transformQueryForExternalDatabase.h (100%) rename {dbms => src}/TableFunctions/CMakeLists.txt (100%) rename {dbms => src}/TableFunctions/ITableFunction.cpp (100%) rename {dbms => src}/TableFunctions/ITableFunction.h (100%) rename {dbms => src}/TableFunctions/ITableFunctionFileLike.cpp (100%) rename {dbms => src}/TableFunctions/ITableFunctionFileLike.h (100%) rename {dbms => src}/TableFunctions/ITableFunctionXDBC.cpp (100%) rename {dbms => src}/TableFunctions/ITableFunctionXDBC.h (100%) rename {dbms => src}/TableFunctions/TableFunctionFactory.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionFactory.h (100%) rename {dbms => src}/TableFunctions/TableFunctionFile.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionFile.h (100%) rename {dbms => src}/TableFunctions/TableFunctionGenerateRandom.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionGenerateRandom.h (100%) rename {dbms => src}/TableFunctions/TableFunctionHDFS.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionHDFS.h (100%) rename {dbms => src}/TableFunctions/TableFunctionInput.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionInput.h (100%) rename {dbms => src}/TableFunctions/TableFunctionMerge.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionMerge.h (100%) rename {dbms => src}/TableFunctions/TableFunctionMySQL.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionMySQL.h (100%) rename {dbms => src}/TableFunctions/TableFunctionNumbers.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionNumbers.h (100%) rename {dbms => src}/TableFunctions/TableFunctionRemote.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionRemote.h (100%) rename {dbms => src}/TableFunctions/TableFunctionS3.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionS3.h (100%) rename {dbms => src}/TableFunctions/TableFunctionURL.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionURL.h (100%) rename {dbms => src}/TableFunctions/TableFunctionValues.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionValues.h (100%) rename {dbms => src}/TableFunctions/TableFunctionZeros.cpp (100%) rename {dbms => src}/TableFunctions/TableFunctionZeros.h (100%) rename {dbms => src}/TableFunctions/parseColumnsListForTableFunction.cpp (100%) rename {dbms => src}/TableFunctions/parseColumnsListForTableFunction.h (100%) rename {dbms => src}/TableFunctions/registerTableFunctions.cpp (100%) rename {dbms => src}/TableFunctions/registerTableFunctions.h (100%) diff --git a/.gitignore b/.gitignore index 703306f9232..8e2c1d21ede 100644 --- a/.gitignore +++ b/.gitignore @@ -73,100 +73,100 @@ contrib/libpoco/Poco/ contrib/libpoco/bin/ contrib/libpoco/cmake_uninstall.cmake contrib/libre2/re2_st/ -dbms/Client/clickhouse-benchmark -dbms/Client/clickhouse-client -dbms/Client/tests/test-connect -dbms/Common/tests/arena_with_free_lists -dbms/Common/tests/auto_array -dbms/Common/tests/compact_array -dbms/Common/tests/hash_table -dbms/Common/tests/hashes_test -dbms/Common/tests/int_hashes_perf -dbms/Common/tests/lru_cache -dbms/Common/tests/parallel_aggregation -dbms/Common/tests/parallel_aggregation2 -dbms/Common/tests/radix_sort -dbms/Common/tests/shell_command_test -dbms/Common/tests/simple_cache -dbms/Common/tests/sip_hash -dbms/Common/tests/sip_hash_perf -dbms/Common/tests/small_table -dbms/Core/tests/exception -dbms/Core/tests/field -dbms/Core/tests/rvo_test -dbms/Core/tests/string_pool -dbms/DataStreams/tests/aggregating_stream -dbms/DataStreams/tests/block_tab_separated_streams -dbms/DataStreams/tests/collapsing_sorted_stream -dbms/DataStreams/tests/expression_stream -dbms/DataStreams/tests/filter_stream -dbms/DataStreams/tests/filter_stream_hitlog -dbms/DataStreams/tests/fork_streams -dbms/DataStreams/tests/glue_streams -dbms/DataStreams/tests/json_streams -dbms/DataStreams/tests/native_streams -dbms/DataStreams/tests/sorting_stream -dbms/DataStreams/tests/tab_separated_streams -dbms/DataStreams/tests/union_stream -dbms/DataStreams/tests/union_stream2 -dbms/DataTypes/tests/data_type_string -dbms/DataTypes/tests/data_types_number_fixed -dbms/Functions/tests/functions_arithmetic -dbms/Functions/tests/logical_functions_performance -dbms/Functions/tests/number_traits -dbms/IO/tests/async_write -dbms/IO/tests/cached_compressed_read_buffer -dbms/IO/tests/compressed_buffer -dbms/IO/tests/hashing_read_buffer -dbms/IO/tests/hashing_write_buffer -dbms/IO/tests/io_and_exceptions -dbms/IO/tests/io_operators -dbms/IO/tests/mempbrk -dbms/IO/tests/o_direct_and_dirty_pages -dbms/IO/tests/parse_int_perf -dbms/IO/tests/parse_int_perf2 -dbms/IO/tests/read_buffer -dbms/IO/tests/read_buffer_aio -dbms/IO/tests/read_buffer_perf -dbms/IO/tests/read_escaped_string -dbms/IO/tests/read_float_perf -dbms/IO/tests/read_write_int -dbms/IO/tests/valid_utf8 -dbms/IO/tests/valid_utf8_perf -dbms/IO/tests/var_uint -dbms/IO/tests/write_buffer -dbms/IO/tests/write_buffer_aio -dbms/IO/tests/write_buffer_perf -dbms/Interpreters/tests/address_patterns -dbms/Interpreters/tests/aggregate -dbms/Interpreters/tests/compiler_test -dbms/Interpreters/tests/create_query -dbms/Interpreters/tests/expression -dbms/Interpreters/tests/expression_analyzer -dbms/Interpreters/tests/hash_map -dbms/Interpreters/tests/hash_map2 -dbms/Interpreters/tests/hash_map3 -dbms/Interpreters/tests/hash_map_string -dbms/Interpreters/tests/hash_map_string_2 -dbms/Interpreters/tests/hash_map_string_3 -dbms/Interpreters/tests/hash_map_string_small -dbms/Interpreters/tests/in_join_subqueries_preprocessor -dbms/Interpreters/tests/logical_expressions_optimizer -dbms/Interpreters/tests/select_query -dbms/Interpreters/tests/two_level_hash_map -dbms/Interpreters/tests/users -dbms/Parsers/tests/create_parser -dbms/Parsers/tests/select_parser -dbms/Server/clickhouse-server -dbms/Server/clickhouse-server.init -dbms/Storages/tests/hit_log -dbms/Storages/tests/merge_tree -dbms/Storages/tests/part_checker -dbms/Storages/tests/part_name -dbms/Storages/tests/pk_condition -dbms/Storages/tests/seek_speed_test -dbms/Storages/tests/storage_log -dbms/Storages/tests/system_numbers +src/Client/clickhouse-benchmark +src/Client/clickhouse-client +src/Client/tests/test-connect +src/Common/tests/arena_with_free_lists +src/Common/tests/auto_array +src/Common/tests/compact_array +src/Common/tests/hash_table +src/Common/tests/hashes_test +src/Common/tests/int_hashes_perf +src/Common/tests/lru_cache +src/Common/tests/parallel_aggregation +src/Common/tests/parallel_aggregation2 +src/Common/tests/radix_sort +src/Common/tests/shell_command_test +src/Common/tests/simple_cache +src/Common/tests/sip_hash +src/Common/tests/sip_hash_perf +src/Common/tests/small_table +src/Core/tests/exception +src/Core/tests/field +src/Core/tests/rvo_test +src/Core/tests/string_pool +src/DataStreams/tests/aggregating_stream +src/DataStreams/tests/block_tab_separated_streams +src/DataStreams/tests/collapsing_sorted_stream +src/DataStreams/tests/expression_stream +src/DataStreams/tests/filter_stream +src/DataStreams/tests/filter_stream_hitlog +src/DataStreams/tests/fork_streams +src/DataStreams/tests/glue_streams +src/DataStreams/tests/json_streams +src/DataStreams/tests/native_streams +src/DataStreams/tests/sorting_stream +src/DataStreams/tests/tab_separated_streams +src/DataStreams/tests/union_stream +src/DataStreams/tests/union_stream2 +src/DataTypes/tests/data_type_string +src/DataTypes/tests/data_types_number_fixed +src/Functions/tests/functions_arithmetic +src/Functions/tests/logical_functions_performance +src/Functions/tests/number_traits +src/IO/tests/async_write +src/IO/tests/cached_compressed_read_buffer +src/IO/tests/compressed_buffer +src/IO/tests/hashing_read_buffer +src/IO/tests/hashing_write_buffer +src/IO/tests/io_and_exceptions +src/IO/tests/io_operators +src/IO/tests/mempbrk +src/IO/tests/o_direct_and_dirty_pages +src/IO/tests/parse_int_perf +src/IO/tests/parse_int_perf2 +src/IO/tests/read_buffer +src/IO/tests/read_buffer_aio +src/IO/tests/read_buffer_perf +src/IO/tests/read_escaped_string +src/IO/tests/read_float_perf +src/IO/tests/read_write_int +src/IO/tests/valid_utf8 +src/IO/tests/valid_utf8_perf +src/IO/tests/var_uint +src/IO/tests/write_buffer +src/IO/tests/write_buffer_aio +src/IO/tests/write_buffer_perf +src/Interpreters/tests/address_patterns +src/Interpreters/tests/aggregate +src/Interpreters/tests/compiler_test +src/Interpreters/tests/create_query +src/Interpreters/tests/expression +src/Interpreters/tests/expression_analyzer +src/Interpreters/tests/hash_map +src/Interpreters/tests/hash_map2 +src/Interpreters/tests/hash_map3 +src/Interpreters/tests/hash_map_string +src/Interpreters/tests/hash_map_string_2 +src/Interpreters/tests/hash_map_string_3 +src/Interpreters/tests/hash_map_string_small +src/Interpreters/tests/in_join_subqueries_preprocessor +src/Interpreters/tests/logical_expressions_optimizer +src/Interpreters/tests/select_query +src/Interpreters/tests/two_level_hash_map +src/Interpreters/tests/users +src/Parsers/tests/create_parser +src/Parsers/tests/select_parser +src/Server/clickhouse-server +src/Server/clickhouse-server.init +src/Storages/tests/hit_log +src/Storages/tests/merge_tree +src/Storages/tests/part_checker +src/Storages/tests/part_name +src/Storages/tests/pk_condition +src/Storages/tests/seek_speed_test +src/Storages/tests/storage_log +src/Storages/tests/system_numbers libs/libcommon/src/revision.h libs/libcommon/src/tests/date_lut2 libs/libcommon/src/tests/date_lut3 @@ -184,15 +184,15 @@ libs/libzkutil/src/tests/zkutil_zookeeper_holder utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download-part utils/zookeeper-dump-tree/zookeeper-dump-tree utils/zookeeper-remove-by-list/zookeeper-remove-by-list -dbms/Storages/tests/remove_symlink_directory +src/Storages/tests/remove_symlink_directory libs/libcommon/src/tests/json_test utils/compressor/zstd_test utils/wikistat-loader/wikistat-loader -dbms/Common/tests/pod_array +src/Common/tests/pod_array -dbms/Server/data/* -dbms/Server/metadata/* -dbms/Server/status +src/Server/data/* +src/Server/metadata/* +src/Server/status config-9001.xml *-preprocessed.xml @@ -242,7 +242,7 @@ website/package-lock.json */.DS_Store # Ignore files for locally disabled tests -/dbms/queries/**/*.disabled +/src/queries/**/*.disabled # cquery cache /.cquery-cache diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 972edf11384..12afadc55a6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,11 +31,11 @@ build: - docker pull $CI_REGISTRY/yandex/clickhouse-builder - docker run --rm --volumes-from "${HOSTNAME}-build" --workdir "${CI_PROJECT_DIR}" --env CI_PROJECT_DIR=${CI_PROJECT_DIR} $CI_REGISTRY/yandex/clickhouse-builder /build_gitlab_ci.sh # You can upload your binary to nexus - - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./dbms/Server/clickhouse + - curl -v --keepalive-time 60 --keepalive --user "$NEXUS_USER:$NEXUS_PASSWORD" -XPUT "http://$NEXUS_HOST/repository/binaries/$CI_PROJECT_NAME" --upload-file ./src/Server/clickhouse # Or download artifacts from gitlab artifacts: paths: - - ./dbms/Server/clickhouse + - ./src/Server/clickhouse expire_in: 1 day tags: - docker diff --git a/CMakeLists.txt b/CMakeLists.txt index c1342a5ca97..00033d28475 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -384,8 +384,8 @@ set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNA include_directories(${ConfigIncludePath}) add_subdirectory (base) -add_subdirectory (dbms) add_subdirectory (programs) +add_subdirectory (src) add_subdirectory (tests) add_subdirectory (utils) diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake index a6a93774817..287c36a8de7 100644 --- a/cmake/analysis.cmake +++ b/cmake/analysis.cmake @@ -10,7 +10,7 @@ if (ENABLE_CLANG_TIDY) if (CLANG_TIDY_PATH) message(STATUS "Using clang-tidy: ${CLANG_TIDY_PATH}. The checks will be run during build process. See the .clang-tidy file at the root directory to configure the checks.") set (USE_CLANG_TIDY 1) - # The variable CMAKE_CXX_CLANG_TIDY will be set inside dbms and base directories with non third-party code. + # The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code. # set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") else () message(STATUS "clang-tidy is not found. This is normal - the tool is used only for static code analysis and not essential for build.") diff --git a/cmake/lib_name.cmake b/cmake/lib_name.cmake index 9f53c7bc6d8..8f5bebf4abe 100644 --- a/cmake/lib_name.cmake +++ b/cmake/lib_name.cmake @@ -1,5 +1,5 @@ set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide) -set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/dbms ${ClickHouse_BINARY_DIR}/dbms) +set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/src ${ClickHouse_BINARY_DIR}/src) set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion) set(METROHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libmetrohash/src) set(PCG_RANDOM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpcg-random/include) diff --git a/dbms/Client/CMakeLists.txt b/dbms/Client/CMakeLists.txt deleted file mode 100644 index 9b9ec442a3c..00000000000 --- a/dbms/Client/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# TODO: make separate lib datastream, block, ... -#include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake) -#add_headers_and_sources(clickhouse_client .) -#add_library(clickhouse_client ${clickhouse_client_headers} ${clickhouse_client_sources}) -#target_link_libraries (clickhouse_client clickhouse_common_io ${Poco_Net_LIBRARY}) -#target_include_directories (clickhouse_client PRIVATE ${DBMS_INCLUDE_DIR}) - -add_subdirectory(tests) diff --git a/docker/builder/README.md b/docker/builder/README.md index 5ae9a95a646..cb9fb7d1b77 100644 --- a/docker/builder/README.md +++ b/docker/builder/README.md @@ -23,7 +23,7 @@ It builds only binaries, not packages. For example, run server: ``` -cd $(git rev-parse --show-toplevel)/dbms/Server +cd $(git rev-parse --show-toplevel)/src/Server $(git rev-parse --show-toplevel)/docker/builder/programs/clickhouse server --config-file $(git rev-parse --show-toplevel)/programs/server/config.xml ``` diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index e254fde8c52..bb9a3fcab5f 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -17,7 +17,7 @@ cmake .. -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS ninja ccache --show-stats ||: mv ./programs/clickhouse* /output -mv ./dbms/unit_tests_dbms /output +mv ./src/unit_tests_dbms /output find . -name '*.so' -print -exec mv '{}' /output \; find . -name '*.so.*' -print -exec mv '{}' /output \; diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 83801130556..99d1045e401 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -107,7 +107,7 @@ function run_tests # and not always correct (e.g. when the reference SHA is really old and # has some other differences to the tested SHA, besides the one introduced # by the PR). - test_files_override=$(sed "s/dbms\/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) + test_files_override=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) if [ "$test_files_override" != "" ] then test_files=$test_files_override diff --git a/docs/en/development/browse_code.md b/docs/en/development/browse_code.md index d6994c293ac..11e6a909b6a 100644 --- a/docs/en/development/browse_code.md +++ b/docs/en/development/browse_code.md @@ -1,6 +1,6 @@ # Browse ClickHouse Source Code {#browse-clickhouse-source-code} -You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. +You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index ac0051fed70..537553dd660 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -55,7 +55,7 @@ If you want to improve performance of ClickHouse in some scenario, and if improv ## Test Tools And Scripts {#test-tools-and-scripts} -Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `dbms/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. +Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `src/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated. diff --git a/docs/en/interfaces/tcp.md b/docs/en/interfaces/tcp.md index 876ccf12f8a..3600dfbf1d8 100644 --- a/docs/en/interfaces/tcp.md +++ b/docs/en/interfaces/tcp.md @@ -1,5 +1,5 @@ # Native Interface (TCP) {#native-interface-tcp} -The native protocol is used in the [command-line client](cli.md), for inter-server communication during distributed query processing, and also in other C++ programs. Unfortunately, native ClickHouse protocol does not have formal specification yet, but it can be reverse-engineered from ClickHouse source code (starting [around here](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) and/or by intercepting and analyzing TCP traffic. +The native protocol is used in the [command-line client](cli.md), for inter-server communication during distributed query processing, and also in other C++ programs. Unfortunately, native ClickHouse protocol does not have formal specification yet, but it can be reverse-engineered from ClickHouse source code (starting [around here](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) and/or by intercepting and analyzing TCP traffic. [Original article](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/en/operations/performance/sampling_query_profiler_example_result.txt b/docs/en/operations/performance/sampling_query_profiler_example_result.txt index 56c2fdf9c65..df33da2c9aa 100644 --- a/docs/en/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/en/operations/performance/sampling_query_profiler_example_result.txt @@ -2,55 +2,55 @@ Row 1: ────── count(): 6344 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:22 DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:63 DB::MergeTreeReaderStream::seekToMark(unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReaderStream.cpp:200 std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:212 DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const /usr/local/include/c++/9.1.0/bits/std_function.h:690 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:487 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -66,9 +66,9 @@ Row 2: ────── count(): 3295 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 __pthread_cond_wait @@ -82,11 +82,11 @@ DB::UnionBlockInputStream::readImpl() DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 + /home/milovidov/ClickHouse/build_gcc9/../src/Core/Block.h:90 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::LimitBlockInputStream::readImpl() @@ -100,7 +100,7 @@ std::_Function_handler::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/ThreadPool.h:146 ThreadPoolImpl::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 execute_native_thread_routine @@ -114,9 +114,9 @@ Row 3: ────── count(): 1978 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -126,35 +126,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -170,9 +170,9 @@ Row 4: ────── count(): 1913 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -182,35 +182,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -226,9 +226,9 @@ Row 5: ────── count(): 1672 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -238,35 +238,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -282,53 +282,53 @@ Row 6: ────── count(): 1531 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:22 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:53 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -344,9 +344,9 @@ Row 7: ────── count(): 1034 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -356,35 +356,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -400,9 +400,9 @@ Row 8: ────── count(): 989 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 __lll_lock_wait @@ -412,7 +412,7 @@ pthread_mutex_lock DB::MergeTreeReaderStream::loadMarks() /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReaderStream.cpp:107 std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) @@ -422,21 +422,21 @@ DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -452,45 +452,45 @@ Row 9: ─────── count(): 779 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -506,45 +506,45 @@ Row 10: ─────── count(): 666 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) diff --git a/docs/en/operations/settings/query_complexity.md b/docs/en/operations/settings/query_complexity.md index f62fa3b0008..f4c9830ce52 100644 --- a/docs/en/operations/settings/query_complexity.md +++ b/docs/en/operations/settings/query_complexity.md @@ -37,7 +37,7 @@ Memory consumption is also restricted by the parameters `max_memory_usage_for_us The maximum amount of RAM to use for running a user’s queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L288). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). @@ -45,7 +45,7 @@ See also the description of [max\_memory\_usage](#settings_max_memory_usage). The maximum amount of RAM to use for running all queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L289). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 4bd58052196..57275c5b80f 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -252,7 +252,7 @@ Columns: - `value` ([Int64](../data_types/int_uint.md)) — Metric value. - `description` ([String](../data_types/string.md)) — Metric description. -The list of supported metrics you can find in the [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) source file of ClickHouse. +The list of supported metrics you can find in the [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) source file of ClickHouse. **Example** diff --git a/docs/en/query_language/functions/introspection.md b/docs/en/query_language/functions/introspection.md index 5193d3a85a7..c31faf2cb4f 100644 --- a/docs/en/query_language/functions/introspection.md +++ b/docs/en/query_language/functions/introspection.md @@ -35,7 +35,7 @@ addressToLine(address_of_binary_instruction) - Source code filename and the line number in this file delimited by colon. - For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. - Name of a binary, if the function couldn’t find the debug information. @@ -80,7 +80,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 ``` Applying the function to the whole stack trace: @@ -100,8 +100,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/es/changelog/index.md b/docs/es/changelog/index.md index 26a516494f3..96e1b6f687c 100644 --- a/docs/es/changelog/index.md +++ b/docs/es/changelog/index.md @@ -241,7 +241,7 @@ machine_translated: true - Comprobación actualizada de consultas colgadas en el script de prueba de clickhouse [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alejandro Kazakov](https://github.com/Akazz)) - Se eliminaron algunos archivos inútiles del repositorio. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Tipo cambiado de perftests matemáticos de `once` a `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html) - Suprima algunas fallas de prueba bajo MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Aceleración “exception while insert” prueba. Esta prueba a menudo se agota en la compilación de depuración con cobertura. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Actualizar `libcxx` y `libcxxabi` dominar. En preparación para [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/es/development/browse_code.md b/docs/es/development/browse_code.md index 05af0a77d99..96a1c4bb939 100644 --- a/docs/es/development/browse_code.md +++ b/docs/es/development/browse_code.md @@ -4,7 +4,7 @@ machine_translated: true # Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code} -Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. +Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. Además, puede navegar por las fuentes en [GitHub](https://github.com/ClickHouse/ClickHouse) como de costumbre. diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md index b7d0c182e6d..1126cead128 100644 --- a/docs/es/development/tests.md +++ b/docs/es/development/tests.md @@ -59,7 +59,7 @@ Si desea mejorar el rendimiento de ClickHouse en algún escenario, y si se puede ## Herramientas de prueba y secuencias de comandos {#test-tools-and-scripts} -Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, párr `Lexer` hay una herramienta `dbms/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. +Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, párr `Lexer` hay una herramienta `src/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. También puede colocar un par de archivos `.sh` y `.reference` junto con la herramienta para ejecutarlo en alguna entrada predefinida, entonces el resultado del script se puede comparar con `.reference` file. Este tipo de pruebas no están automatizadas. diff --git a/docs/es/interfaces/tcp.md b/docs/es/interfaces/tcp.md index 868ec644104..6a67fe09c99 100644 --- a/docs/es/interfaces/tcp.md +++ b/docs/es/interfaces/tcp.md @@ -4,6 +4,6 @@ machine_translated: true # Interfaz nativa (TCP) {#native-interface-tcp} -El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. +El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. [Artículo Original](https://clickhouse.tech/docs/es/interfaces/tcp/) diff --git a/docs/es/operations/performance/sampling_query_profiler_example_result.txt b/docs/es/operations/performance/sampling_query_profiler_example_result.txt index 56c2fdf9c65..df33da2c9aa 100644 --- a/docs/es/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/es/operations/performance/sampling_query_profiler_example_result.txt @@ -2,55 +2,55 @@ Row 1: ────── count(): 6344 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:22 DB::CompressedReadBufferFromFile::seek(unsigned long, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:63 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:63 DB::MergeTreeReaderStream::seekToMark(unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:200 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReaderStream.cpp:200 std::_Function_handler > const&), DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)#1}::operator()(bool) const::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:212 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:212 DB::IDataType::deserializeBinaryBulkWithMultipleStreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings&, std::shared_ptr&) const /usr/local/include/c++/9.1.0/bits/std_function.h:690 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:487 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:487 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -66,9 +66,9 @@ Row 2: ────── count(): 3295 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 __pthread_cond_wait @@ -82,11 +82,11 @@ DB::UnionBlockInputStream::readImpl() DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Core/Block.h:90 + /home/milovidov/ClickHouse/build_gcc9/../src/Core/Block.h:90 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::LimitBlockInputStream::readImpl() @@ -100,7 +100,7 @@ std::_Function_handler::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/x86_64-pc-linux-gnu/bits/gthr-default.h:748 ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/ThreadPool.h:146 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/ThreadPool.h:146 ThreadPoolImpl::worker(std::_List_iterator) /usr/local/include/c++/9.1.0/bits/unique_lock.h:69 execute_native_thread_routine @@ -114,9 +114,9 @@ Row 3: ────── count(): 1978 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -126,35 +126,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -170,9 +170,9 @@ Row 4: ────── count(): 1913 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -182,35 +182,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -226,9 +226,9 @@ Row 5: ────── count(): 1672 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -238,35 +238,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -282,53 +282,53 @@ Row 6: ────── count(): 1531 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 read DB::ReadBufferFromFileDescriptor::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBufferFromFileDescriptor.cpp:56 DB::CompressedReadBufferBase::readCompressedData(unsigned long&, unsigned long&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:54 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:54 DB::CompressedReadBufferFromFile::nextImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Compression/CompressedReadBufferFromFile.cpp:22 + /home/milovidov/ClickHouse/build_gcc9/../src/Compression/CompressedReadBufferFromFile.cpp:22 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/ReadBuffer.h:53 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/ReadBuffer.h:53 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -344,9 +344,9 @@ Row 7: ────── count(): 1034 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 DB::VolnitskyBase >::search(unsigned char const*, unsigned long) const @@ -356,35 +356,35 @@ DB::MatchImpl::vector_constant(DB::PODArray, DB::NameLike>::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long) /opt/milovidov/ClickHouse/build_gcc9/programs/clickhouse DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Functions/IFunction.cpp:464 + /home/milovidov/ClickHouse/build_gcc9/../src/Functions/IFunction.cpp:464 DB::ExpressionAction::execute(DB::Block&, bool) const /usr/local/include/c++/9.1.0/bits/stl_vector.h:677 DB::ExpressionActions::execute(DB::Block&, bool) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/Interpreters/ExpressionActions.cpp:739 + /home/milovidov/ClickHouse/build_gcc9/../src/Interpreters/ExpressionActions.cpp:739 DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:660 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:660 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:546 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:546 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -400,9 +400,9 @@ Row 8: ────── count(): 989 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 __lll_lock_wait @@ -412,7 +412,7 @@ pthread_mutex_lock DB::MergeTreeReaderStream::loadMarks() /usr/local/include/c++/9.1.0/bits/std_mutex.h:103 DB::MergeTreeReaderStream::MergeTreeReaderStream(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, unsigned long, std::vector > const&, DB::MarkCache*, bool, DB::UncompressedCache*, unsigned long, unsigned long, unsigned long, DB::MergeTreeIndexGranularityInfo const*, std::function const&, int) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReaderStream.cpp:107 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReaderStream.cpp:107 std::_Function_handler > const&), DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int)::{lambda(std::vector > const&)#1}>::_M_invoke(std::_Any_data const&, std::vector > const&) /usr/local/include/c++/9.1.0/bits/unique_ptr.h:147 DB::MergeTreeReader::addStreams(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, std::function const&, int) @@ -422,21 +422,21 @@ DB::MergeTreeReader::MergeTreeReader(std::__cxx11::basic_string::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -452,45 +452,45 @@ Row 9: ─────── count(): 779 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) @@ -506,45 +506,45 @@ Row 10: ─────── count(): 666 sym: StackTrace::StackTrace(ucontext_t const&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Common/StackTrace.cpp:208 + /home/milovidov/ClickHouse/build_gcc9/../src/Common/StackTrace.cpp:208 DB::(anonymous namespace)::writeTraceInfo(DB::TimerType, int, siginfo_t*, void*) [clone .isra.0] - /home/milovidov/ClickHouse/build_gcc9/../dbms/IO/BufferBase.h:99 + /home/milovidov/ClickHouse/build_gcc9/../src/IO/BufferBase.h:99 void DB::deserializeBinarySSE2<4>(DB::PODArray, 15ul, 16ul>&, DB::PODArray, 15ul, 16ul>&, DB::ReadBuffer&, unsigned long) /usr/local/lib/gcc/x86_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataTypes/DataTypeString.cpp:202 + /home/milovidov/ClickHouse/build_gcc9/../src/DataTypes/DataTypeString.cpp:202 DB::MergeTreeReader::readData(std::__cxx11::basic_string, std::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:232 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:232 DB::MergeTreeReader::readRows(unsigned long, bool, unsigned long, DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeReader.cpp:111 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeReader.cpp:111 DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:35 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:35 DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::vector >&) - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeRangeReader.cpp:219 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeRangeReader.cpp:219 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeRangeReader::read(unsigned long, std::vector >&) /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 + /home/milovidov/ClickHouse/build_gcc9/../src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp:158 DB::MergeTreeBaseSelectBlockInputStream::readImpl() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ExpressionBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ExpressionBlockInputStream.cpp:34 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ExpressionBlockInputStream.cpp:34 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::PartialSortingBlockInputStream::readImpl() - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/PartialSortingBlockInputStream.cpp:13 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/PartialSortingBlockInputStream.cpp:13 DB::IBlockInputStream::read() /usr/local/include/c++/9.1.0/bits/stl_vector.h:108 DB::ParallelInputsProcessor::loop(unsigned long) /usr/local/include/c++/9.1.0/bits/atomic_base.h:419 DB::ParallelInputsProcessor::thread(std::shared_ptr, unsigned long) - /home/milovidov/ClickHouse/build_gcc9/../dbms/DataStreams/ParallelInputsProcessor.h:215 + /home/milovidov/ClickHouse/build_gcc9/../src/DataStreams/ParallelInputsProcessor.h:215 ThreadFromGlobalPool::ThreadFromGlobalPool::*)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*, std::shared_ptr, unsigned long&>(void (DB::ParallelInputsProcessor::*&&)(std::shared_ptr, unsigned long), DB::ParallelInputsProcessor*&&, std::shared_ptr&&, unsigned long&)::{lambda()#1}::operator()() const /usr/local/include/c++/9.1.0/bits/shared_ptr_base.h:729 ThreadPoolImpl::worker(std::_List_iterator) diff --git a/docs/es/operations/settings/query_complexity.md b/docs/es/operations/settings/query_complexity.md index 4fe64302213..cfe667b5cf6 100644 --- a/docs/es/operations/settings/query_complexity.md +++ b/docs/es/operations/settings/query_complexity.md @@ -41,7 +41,7 @@ El consumo de memoria también está restringido por los parámetros `max_memory La cantidad máxima de RAM que se utilizará para ejecutar las consultas de un usuario en un único servidor. -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_user = 0`). +Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L288). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_user = 0`). Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). @@ -49,7 +49,7 @@ Ver también la descripción de [Método de codificación de datos:](#settings_m La cantidad máxima de RAM que se utilizará para ejecutar todas las consultas en un único servidor. -Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_all_queries = 0`). +Los valores predeterminados se definen en [Configuración.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L289). De forma predeterminada, el importe no está restringido (`max_memory_usage_for_all_queries = 0`). Ver también la descripción de [Método de codificación de datos:](#settings_max_memory_usage). diff --git a/docs/es/operations/system_tables.md b/docs/es/operations/system_tables.md index 5129abfb1ba..d2f1bf8b1b0 100644 --- a/docs/es/operations/system_tables.md +++ b/docs/es/operations/system_tables.md @@ -256,7 +256,7 @@ Columna: - `value` ([Int64](../data_types/int_uint.md)) — Valor métrico. - `description` ([Cadena](../data_types/string.md)) — Descripción métrica. -La lista de métricas admitidas que puede encontrar en el [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. +La lista de métricas admitidas que puede encontrar en el [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. **Ejemplo** diff --git a/docs/es/query_language/functions/introspection.md b/docs/es/query_language/functions/introspection.md index 5169da4e5a2..7dae8a35254 100644 --- a/docs/es/query_language/functions/introspection.md +++ b/docs/es/query_language/functions/introspection.md @@ -39,7 +39,7 @@ addressToLine(address_of_binary_instruction) - Nombre de archivo del código fuente y el número de línea en este archivo delimitado por dos puntos. - For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. - Nombre de un binario, si la función no pudo encontrar la información de depuración. @@ -84,7 +84,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 ``` Aplicando la función a todo el seguimiento de la pila: @@ -104,8 +104,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/fa/getting_started/install.md b/docs/fa/getting_started/install.md index 805d6b7e480..75b4131239d 100644 --- a/docs/fa/getting_started/install.md +++ b/docs/fa/getting_started/install.md @@ -165,7 +165,7 @@ clickhouse-client --host=example.com
    ``` bash -milovidov@hostname:~/work/metrica/src/dbms/Client$ ./clickhouse-client +milovidov@hostname:~/work/metrica/src/src/Client$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. diff --git a/docs/fa/interfaces/tcp.md b/docs/fa/interfaces/tcp.md index 00a069189db..efd041886f9 100644 --- a/docs/fa/interfaces/tcp.md +++ b/docs/fa/interfaces/tcp.md @@ -2,7 +2,7 @@ # رابط بومی (TCP) {#rbt-bwmy-tcp} -پروتکل بومی در \[خط فرمان خط\] (cli.md)، برای برقراری ارتباط بین سرور در طی پردازش پرس و جو توزیع شده، و همچنین در سایر برنامه های C ++ استفاده می شود. متاسفانه، پروتکل ClickHouse بومی هنوز مشخصات رسمی ندارد، اما می توان آن را از کد منبع ClickHouse (شروع [از اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) و / یا با رهگیری و تجزیه و تحلیل ترافیک TCP. +پروتکل بومی در \[خط فرمان خط\] (cli.md)، برای برقراری ارتباط بین سرور در طی پردازش پرس و جو توزیع شده، و همچنین در سایر برنامه های C ++ استفاده می شود. متاسفانه، پروتکل ClickHouse بومی هنوز مشخصات رسمی ندارد، اما می توان آن را از کد منبع ClickHouse (شروع [از اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) و / یا با رهگیری و تجزیه و تحلیل ترافیک TCP.
    diff --git a/docs/fr/changelog/index.md b/docs/fr/changelog/index.md index 710f11ba1d5..f6bcc4593ff 100644 --- a/docs/fr/changelog/index.md +++ b/docs/fr/changelog/index.md @@ -245,7 +245,7 @@ machine_translated: true - Mise à jour de la vérification des requêtes suspendues dans le script clickhouse-test [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) - Suppression de certains fichiers inutiles du référentiel. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexeï-milovidov](https://github.com/alexey-milovidov)) - Changement de type de math perftests de `once` de `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Ajouter une image docker qui permet de créer un rapport HTML interactif du navigateur de code pour notre base de code. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alésapine](https://github.com/alesapin)) Voir [Navigateur De Code Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- Ajouter une image docker qui permet de créer un rapport HTML interactif du navigateur de code pour notre base de code. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alésapine](https://github.com/alesapin)) Voir [Navigateur De Code Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/src/index.html) - Supprimer certains échecs de test sous MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - SpeedUp “exception while insert” test. Ce test expire souvent dans la construction debug-with-coverage. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexeï-milovidov](https://github.com/alexey-milovidov)) - Mettre `libcxx` et `libcxxabi` maîtriser. En préparation à [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexeï-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/fr/development/browse_code.md b/docs/fr/development/browse_code.md index 49f49dcb26e..664f0dc9d48 100644 --- a/docs/fr/development/browse_code.md +++ b/docs/fr/development/browse_code.md @@ -4,7 +4,7 @@ machine_translated: true # Parcourir Le Code Source De ClickHouse {#browse-clickhouse-source-code} -Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. +Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/src/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. Aussi, vous pouvez parcourir les sources sur [GitHub](https://github.com/ClickHouse/ClickHouse) comme à l'habitude. diff --git a/docs/fr/development/developer_instruction.md b/docs/fr/development/developer_instruction.md index fc725d8afb4..d5082869b9b 100644 --- a/docs/fr/development/developer_instruction.md +++ b/docs/fr/development/developer_instruction.md @@ -246,7 +246,7 @@ Le code Style Guide: https://clickhouse.tech/docs/fr/développement/style/ Rédaction de tests: https://clickhouse.tech/docs/fr/développement/tests/ -Liste des tâches: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/instructions/easy\_tasks\_sorted\_en.md +Liste des tâches: https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/instructions/easy\_tasks\_sorted\_en.md # Des Données De Test {#test-data} diff --git a/docs/fr/development/tests.md b/docs/fr/development/tests.md index 2060223c80f..b4f746e46e0 100644 --- a/docs/fr/development/tests.md +++ b/docs/fr/development/tests.md @@ -10,15 +10,15 @@ Les tests fonctionnels sont les plus simples et pratiques à utiliser. La plupar Chaque test fonctionnel envoie une ou plusieurs requêtes au serveur clickhouse en cours d'exécution et compare le résultat avec la référence. -Les Tests sont situés dans `dbms/tests/queries` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. +Les Tests sont situés dans `src/tests/queries` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. Chaque test peut être de deux types: `.sql` et `.sh`. `.sql` test est le script SQL simple qui est canalisé vers `clickhouse-client --multiquery --testmode`. `.sh` test est un script qui est exécuté par lui-même. -Pour exécuter tous les tests, utilisez `dbms/tests/clickhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. +Pour exécuter tous les tests, utilisez `src/tests/clickhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. Le moyen le plus simple d'invoquer des tests fonctionnels est de copier `clickhouse-client` de `/usr/bin/`, exécuter `clickhouse-server` et puis exécutez `./clickhouse-test` à partir de son propre répertoire. -Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `dbms/tests/queries/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. +Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `src/tests/queries/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. Les Tests doivent utiliser (create, drop, etc) uniquement des tables dans `test` base de données supposée être créée au préalable; les tests peuvent également utiliser des tables temporaires. @@ -33,13 +33,13 @@ désactivez ces groupes de tests en utilisant `--no-zookeeper`, `--no-shard` et ## Bugs connus {#known-bugs} -Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `dbms/tests/queries/bugs` répertoire. Ces tests seront déplacés à `dbms/tests/queries/0_stateless` quand les bugs sont corrigés. +Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `src/tests/queries/bugs` répertoire. Ces tests seront déplacés à `src/tests/queries/0_stateless` quand les bugs sont corrigés. ## Les Tests D'Intégration {#integration-tests} Les tests d'intégration permettent de tester ClickHouse en configuration cluster et clickhouse interaction avec D'autres serveurs comme MySQL, Postgres, MongoDB. Ils sont utiles pour émuler les splits réseau, les chutes de paquets, etc. Ces tests sont exécutés sous Docker et créent plusieurs conteneurs avec divers logiciels. -Voir `dbms/tests/integration/README.md` sur la façon d'exécuter ces tests. +Voir `src/tests/integration/README.md` sur la façon d'exécuter ces tests. Notez que l'intégration de ClickHouse avec des pilotes tiers n'est pas testée. De plus, nous n'avons actuellement pas de tests d'intégration avec nos pilotes JDBC et ODBC. @@ -51,7 +51,7 @@ Ce n'est pas nécessairement d'avoir des tests unitaires si le code est déjà c ## Tests De Performance {#performance-tests} -Les tests de Performance permettent de mesurer et de comparer les performances d'une partie isolée de ClickHouse sur des requêtes synthétiques. Les Tests sont situés à `dbms/tests/performance`. Chaque test est représenté par `.xml` fichier avec description du cas de test. Les Tests sont exécutés avec `clickhouse performance-test` outil (qui est incorporé dans `clickhouse` binaire). Voir `--help` pour l'invocation. +Les tests de Performance permettent de mesurer et de comparer les performances d'une partie isolée de ClickHouse sur des requêtes synthétiques. Les Tests sont situés à `src/tests/performance`. Chaque test est représenté par `.xml` fichier avec description du cas de test. Les Tests sont exécutés avec `clickhouse performance-test` outil (qui est incorporé dans `clickhouse` binaire). Voir `--help` pour l'invocation. Chaque essai un ou miltiple requêtes (éventuellement avec des combinaisons de paramètres) dans une boucle avec certaines conditions pour l'arrêt (comme “maximum execution speed is not changing in three seconds”) et mesurer certaines mesures sur les performances de la requête (comme “maximum execution speed”). Certains tests peuvent contenir des conditions préalables sur un ensemble de données de test préchargé. @@ -59,13 +59,13 @@ Si vous souhaitez améliorer les performances de ClickHouse dans certains scéna ## Outils Et Scripts De Test {#test-tools-and-scripts} -Certains programmes dans `tests` directory ne sont pas des tests préparés, mais sont des outils de test. Par exemple, pour `Lexer` il est un outil `dbms/src/Parsers/tests/lexer` Cela fait juste la tokenisation de stdin et écrit le résultat colorisé dans stdout. Vous pouvez utiliser ce genre d'outils comme exemples de code et pour l'exploration et les tests manuels. +Certains programmes dans `tests` directory ne sont pas des tests préparés, mais sont des outils de test. Par exemple, pour `Lexer` il est un outil `src/src/Parsers/tests/lexer` Cela fait juste la tokenisation de stdin et écrit le résultat colorisé dans stdout. Vous pouvez utiliser ce genre d'outils comme exemples de code et pour l'exploration et les tests manuels. Vous pouvez également placer une paire de fichiers `.sh` et `.reference` avec l'outil pour l'exécuter sur une entrée prédéfinie - alors le résultat du script peut être comparé à `.reference` fichier. Ce genre de tests ne sont pas automatisés. ## Tests Divers {#miscellanous-tests} -Il existe des tests pour les dictionnaires externes situés à `dbms/tests/external_dictionaries` et pour machine appris modèles dans `dbms/tests/external_models`. Ces tests ne sont pas mis à jour et doivent être transférés aux tests d'intégration. +Il existe des tests pour les dictionnaires externes situés à `src/tests/external_dictionaries` et pour machine appris modèles dans `src/tests/external_models`. Ces tests ne sont pas mis à jour et doivent être transférés aux tests d'intégration. Il y a un test séparé pour les inserts de quorum. Ce test exécute le cluster ClickHouse sur des serveurs séparés et émule divers cas d'échec: scission réseau, chute de paquets (entre les nœuds ClickHouse, entre Clickhouse et ZooKeeper, entre le serveur ClickHouse et le client, etc.), `kill -9`, `kill -STOP` et `kill -CONT` , comme [Jepsen](https://aphyr.com/tags/Jepsen). Ensuite, le test vérifie que toutes les insertions reconnues ont été écrites et que toutes les insertions rejetées ne l'ont pas été. @@ -75,9 +75,9 @@ Le test de Quorum a été écrit par une équipe distincte avant que ClickHouse Lorsque vous développez une nouvelle fonctionnalité, il est raisonnable de tester également manuellement. Vous pouvez le faire avec les étapes suivantes: -Construire ClickHouse. Exécuter ClickHouse à partir du terminal: changer le répertoire à `dbms/src/programs/clickhouse-server` et de l'exécuter avec `./clickhouse-server`. Il utilisera la configuration (`config.xml`, `users.xml` et les fichiers à l'intérieur `config.d` et `users.d` répertoires) à partir du répertoire courant par défaut. Pour vous connecter au serveur ClickHouse, exécutez `dbms/src/programs/clickhouse-client/clickhouse-client`. +Construire ClickHouse. Exécuter ClickHouse à partir du terminal: changer le répertoire à `src/src/programs/clickhouse-server` et de l'exécuter avec `./clickhouse-server`. Il utilisera la configuration (`config.xml`, `users.xml` et les fichiers à l'intérieur `config.d` et `users.d` répertoires) à partir du répertoire courant par défaut. Pour vous connecter au serveur ClickHouse, exécutez `src/src/programs/clickhouse-client/clickhouse-client`. -Notez que tous les outils clickhouse (serveur, client, etc.) ne sont que des liens symboliques vers un seul binaire nommé `clickhouse`. Vous pouvez trouver ce binaire à `dbms/src/programs/clickhouse`. Tous les outils peuvent également être invoquée comme `clickhouse tool` plutôt `clickhouse-tool`. +Notez que tous les outils clickhouse (serveur, client, etc.) ne sont que des liens symboliques vers un seul binaire nommé `clickhouse`. Vous pouvez trouver ce binaire à `src/src/programs/clickhouse`. Tous les outils peuvent également être invoquée comme `clickhouse tool` plutôt `clickhouse-tool`. Alternativement, vous pouvez installer le paquet ClickHouse: soit une version stable du référentiel Yandex, soit vous pouvez créer un paquet pour vous-même avec `./release` dans les sources de ClickHouse racine. Puis démarrez le serveur avec `sudo service clickhouse-server start` (ou stop pour arrêter le serveur). Rechercher des journaux à `/etc/clickhouse-server/clickhouse-server.log`. @@ -206,7 +206,7 @@ Les gens du Département Cloud de Yandex font un aperçu de base des capacités ## Analyseurs Statiques {#static-analyzers} -Nous courons `PVS-Studio` par commettre base. Nous avons évalué `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Vous trouverez des instructions pour l'utilisation dans `dbms/tests/instructions/` répertoire. Aussi, vous pouvez lire [l'article en russe](https://habr.com/company/yandex/blog/342018/). +Nous courons `PVS-Studio` par commettre base. Nous avons évalué `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Vous trouverez des instructions pour l'utilisation dans `src/tests/instructions/` répertoire. Aussi, vous pouvez lire [l'article en russe](https://habr.com/company/yandex/blog/342018/). Si vous utilisez `CLion` en tant QU'IDE, vous pouvez tirer parti de certains `clang-tidy` contrôles de la boîte. diff --git a/docs/fr/getting_started/example_datasets/metrica.md b/docs/fr/getting_started/example_datasets/metrica.md index 7ddca6462eb..01af9b4fdd6 100644 --- a/docs/fr/getting_started/example_datasets/metrica.md +++ b/docs/fr/getting_started/example_datasets/metrica.md @@ -64,4 +64,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [Tutoriel ClickHouse](../../getting_started/tutorial.md) est basé sur Yandex.Metrica dataset et la façon recommandée pour commencer avec cet ensemble de données est de simplement passer par tutoriel. -D'autres exemples de requêtes pour ces tables peuvent être trouvés parmi [tests avec État](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/tests/queries/1_stateful) de ClickHouse (ils sont nommés `test.hists` et `test.visits` y). +D'autres exemples de requêtes pour ces tables peuvent être trouvés parmi [tests avec État](https://github.com/ClickHouse/ClickHouse/tree/master/src/tests/queries/1_stateful) de ClickHouse (ils sont nommés `test.hists` et `test.visits` y). diff --git a/docs/fr/interfaces/tcp.md b/docs/fr/interfaces/tcp.md index a2566ae6a43..7678cdc2878 100644 --- a/docs/fr/interfaces/tcp.md +++ b/docs/fr/interfaces/tcp.md @@ -4,6 +4,6 @@ machine_translated: true # Interface Native (TCP) {#native-interface-tcp} -Le protocole natif est utilisé dans le [client de ligne de commande](cli.md), pour la communication inter-serveur pendant le traitement de requête distribué, et également dans d'autres programmes C++. Malheureusement, le protocole clickhouse natif n'a pas encore de spécification formelle, mais il peut être rétro-conçu à partir du code source ClickHouse (démarrage [ici](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/src/Client)) et/ou en interceptant et en analysant le trafic TCP. +Le protocole natif est utilisé dans le [client de ligne de commande](cli.md), pour la communication inter-serveur pendant le traitement de requête distribué, et également dans d'autres programmes C++. Malheureusement, le protocole clickhouse natif n'a pas encore de spécification formelle, mais il peut être rétro-conçu à partir du code source ClickHouse (démarrage [ici](https://github.com/ClickHouse/ClickHouse/tree/master/src/src/Client)) et/ou en interceptant et en analysant le trafic TCP. [Article Original](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/fr/operations/performance_test.md b/docs/fr/operations/performance_test.md index cecf091c7cc..9c0424b4c22 100644 --- a/docs/fr/operations/performance_test.md +++ b/docs/fr/operations/performance_test.md @@ -37,9 +37,9 @@ Avec cette instruction, vous pouvez exécuter le test de performance clickhouse - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/src/benchmark/clickhouse/benchmark-new.sh chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/dbms/benchmark/clickhouse/queries.sql + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/src/benchmark/clickhouse/queries.sql 1. Télécharger les données de test selon le [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” tableau contenant 100 millions de lignes). diff --git a/docs/fr/operations/settings/query_complexity.md b/docs/fr/operations/settings/query_complexity.md index bf3e7d2bba8..fc9ce0b522c 100644 --- a/docs/fr/operations/settings/query_complexity.md +++ b/docs/fr/operations/settings/query_complexity.md @@ -41,7 +41,7 @@ La consommation de mémoire est également limitée par les paramètres `max_mem Quantité maximale de RAM à utiliser pour exécuter les requêtes d'un utilisateur sur un seul serveur. -Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L288). Par défaut, le montant n'est pas limité (`max_memory_usage_for_user = 0`). +Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Core/Settings.h#L288). Par défaut, le montant n'est pas limité (`max_memory_usage_for_user = 0`). Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). @@ -49,7 +49,7 @@ Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). La quantité maximale de RAM à utiliser pour exécuter toutes les requêtes sur un seul serveur. -Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Core/Settings.h#L289). Par défaut, le montant n'est pas limité (`max_memory_usage_for_all_queries = 0`). +Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Core/Settings.h#L289). Par défaut, le montant n'est pas limité (`max_memory_usage_for_all_queries = 0`). Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/fr/operations/system_tables.md b/docs/fr/operations/system_tables.md index 469a0c710bd..85fbf31b3d1 100644 --- a/docs/fr/operations/system_tables.md +++ b/docs/fr/operations/system_tables.md @@ -256,7 +256,7 @@ Colonne: - `value` ([Int64](../data_types/int_uint.md)) — Metric value. - `description` ([Chaîne](../data_types/string.md)) — Metric description. -La liste des mesures que vous pouvez trouver dans le [SGBD / src / Common / CurrentMetrics.rpc](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/CurrentMetrics.cpp) fichier source de ClickHouse. +La liste des mesures que vous pouvez trouver dans le [SGBD / src / Common / CurrentMetrics.rpc](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Common/CurrentMetrics.cpp) fichier source de ClickHouse. **Exemple** diff --git a/docs/fr/query_language/alter.md b/docs/fr/query_language/alter.md index 13c57c0ec0c..8e0a435207d 100644 --- a/docs/fr/query_language/alter.md +++ b/docs/fr/query_language/alter.md @@ -446,7 +446,7 @@ Toutes les règles ci-dessus sont aussi valables pour la [OPTIMIZE](misc.md#misc OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -Les exemples de `ALTER ... PARTITION` les requêtes sont démontrées dans les tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_local.sql) et [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). +Les exemples de `ALTER ... PARTITION` les requêtes sont démontrées dans les tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00502_custom_partitioning_local.sql) et [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). ### Manipulations avec Table TTL {#manipulations-with-table-ttl} diff --git a/docs/fr/query_language/functions/introspection.md b/docs/fr/query_language/functions/introspection.md index e724c155d51..722c76fb7e1 100644 --- a/docs/fr/query_language/functions/introspection.md +++ b/docs/fr/query_language/functions/introspection.md @@ -39,7 +39,7 @@ addressToLine(address_of_binary_instruction) - Nom de fichier du code Source et le numéro de ligne dans ce fichier délimité par deux-points. - For example, `/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199`, where `199` is a line number. - Nom d'un binaire, si la fonction n'a pas pu trouver les informations de débogage. @@ -84,7 +84,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199 ``` Application de la fonction à la trace de la pile entière: @@ -104,8 +104,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/src/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/fr/query_language/operators.md b/docs/fr/query_language/operators.md index 76477cfae4a..29dd4813e29 100644 --- a/docs/fr/query_language/operators.md +++ b/docs/fr/query_language/operators.md @@ -133,7 +133,7 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -Vous pouvez voir plus d'exemples de [test](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/tests/queries/0_stateless/00619_extract.sql). +Vous pouvez voir plus d'exemples de [test](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} diff --git a/docs/ru/development/browse_code.md b/docs/ru/development/browse_code.md index d5f38bf6984..f87e3be7f4b 100644 --- a/docs/ru/development/browse_code.md +++ b/docs/ru/development/browse_code.md @@ -1,6 +1,6 @@ # Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse} -Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. +Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse). diff --git a/docs/ru/interfaces/tcp.md b/docs/ru/interfaces/tcp.md index 194f54ce6c7..6bdfb286ac2 100644 --- a/docs/ru/interfaces/tcp.md +++ b/docs/ru/interfaces/tcp.md @@ -1,5 +1,5 @@ # Родной интерфейс (TCP) {#rodnoi-interfeis-tcp} -Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) и/или путем перехвата и анализа TCP трафика. +Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) и/или путем перехвата и анализа TCP трафика. [Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/tcp/) diff --git a/docs/ru/operations/settings/query_complexity.md b/docs/ru/operations/settings/query_complexity.md index 4dbc2aed1d3..94791d79420 100644 --- a/docs/ru/operations/settings/query_complexity.md +++ b/docs/ru/operations/settings/query_complexity.md @@ -38,7 +38,7 @@ Максимальный возможный объём оперативной памяти для запросов пользователя на одном сервере. -Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). По умолчанию размер не ограничен (`max_memory_usage_for_user = 0`). +Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L288). По умолчанию размер не ограничен (`max_memory_usage_for_user = 0`). Смотрите также описание настройки [max\_memory\_usage](#settings_max_memory_usage). @@ -46,7 +46,7 @@ Максимальный возможный объём оперативной памяти для всех запросов на одном сервере. -Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). По умолчанию размер не ограничен (`max_memory_usage_for_all_queries = 0`). +Значения по умолчанию определены в файле [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Settings.h#L289). По умолчанию размер не ограничен (`max_memory_usage_for_all_queries = 0`). Смотрите также описание настройки [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index 3afb5ff740c..fc4ed0446e6 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -245,7 +245,7 @@ SELECT * FROM system.events LIMIT 5 - `value` ([Int64](../data_types/int_uint.md)) — значение метрики. - `description` ([String](../data_types/string.md)) — описание метрики. -Список поддержанных метрик смотрите в файле [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp). +Список поддержанных метрик смотрите в файле [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp). **Пример** diff --git a/docs/ru/query_language/functions/introspection.md b/docs/ru/query_language/functions/introspection.md index 7cd994840e2..50b4cbb44bf 100644 --- a/docs/ru/query_language/functions/introspection.md +++ b/docs/ru/query_language/functions/introspection.md @@ -35,7 +35,7 @@ addressToLine(address_of_binary_instruction) - Имя файла исходного кода и номер строки в этом файле разделяются двоеточием. - Например, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, где `199` — номер строки. + Например, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, где `199` — номер строки. - Имя бинарного файла, если функция не может найти отладочную информацию. @@ -80,7 +80,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 ``` Применение функции ко всему стектрейсу: @@ -100,8 +100,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so diff --git a/docs/zh/development/tests.md b/docs/zh/development/tests.md index bdc7d5d4e79..b3a3468e31c 100644 --- a/docs/zh/development/tests.md +++ b/docs/zh/development/tests.md @@ -52,7 +52,7 @@ ## 测试工具和脚本 {#ce-shi-gong-ju-he-jiao-ben} -`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`dbms/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 +`tests`目录中的一些程序不是准备测试,而是测试工具。例如,对于`Lexer`,有一个工具`src/Parsers/tests/lexer` 标准输出。您可以使用这些工具作为代码示例以及探索和手动测试。 您还可以将一对文件 `.sh` 和 `.reference` 与工具放在一些预定义的输入上运行它 - 然后可以将脚本结果与 `.reference` 文件进行比较。这些测试不是自动化的。 diff --git a/docs/zh/getting_started/install.md b/docs/zh/getting_started/install.md index 6f26c82608b..55aeff892a2 100644 --- a/docs/zh/getting_started/install.md +++ b/docs/zh/getting_started/install.md @@ -127,7 +127,7 @@ clickhouse-client --host=example.com 检查系统是否工作: ``` bash -milovidov@hostname:~/work/metrica/src/dbms/Client$ ./clickhouse-client +milovidov@hostname:~/work/metrica/src/src/Client$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. diff --git a/docs/zh/interfaces/tcp.md b/docs/zh/interfaces/tcp.md index 08a52ce1185..b783a8c3959 100644 --- a/docs/zh/interfaces/tcp.md +++ b/docs/zh/interfaces/tcp.md @@ -1,5 +1,5 @@ # 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp} -本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client))和/或拦截和分析TCP流量。 +本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client))和/或拦截和分析TCP流量。 [来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) diff --git a/docs/zh/operations/settings/query_complexity.md b/docs/zh/operations/settings/query_complexity.md index 64520f55a0b..ccb8acd3da5 100644 --- a/docs/zh/operations/settings/query_complexity.md +++ b/docs/zh/operations/settings/query_complexity.md @@ -45,7 +45,7 @@ Memory consumption is also restricted by the parameters `max_memory_usage_for_us The maximum amount of RAM to use for running a user’s queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). @@ -53,7 +53,7 @@ See also the description of [max\_memory\_usage](#settings_max_memory_usage). The maximum amount of RAM to use for running all queries on a single server. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). See also the description of [max\_memory\_usage](#settings_max_memory_usage). diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/operations/table_engines/mergetree.md index cb1a77e11cb..61d36fea9fa 100644 --- a/docs/zh/operations/table_engines/mergetree.md +++ b/docs/zh/operations/table_engines/mergetree.md @@ -69,7 +69,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 - `SETTINGS` — 影响 `MergeTree` 性能的额外参数: - - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Storages/MergeTree/MergeTreeSettings.h) 。 + - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Storages/MergeTree/MergeTreeSettings.h) 。 - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果仅按数据行数限制索引粒度, 请设置为0(不建议)。 - `enable_mixed_granularity_parts` — 启用或禁用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从大表(数十或数百兆)中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表内数据量很大,可以开启这项配置用以提升`SELECT` 查询的性能。 - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 diff --git a/dbms/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp similarity index 100% rename from dbms/Access/AccessControlManager.cpp rename to src/Access/AccessControlManager.cpp diff --git a/dbms/Access/AccessControlManager.h b/src/Access/AccessControlManager.h similarity index 100% rename from dbms/Access/AccessControlManager.h rename to src/Access/AccessControlManager.h diff --git a/dbms/Access/AccessFlags.h b/src/Access/AccessFlags.h similarity index 100% rename from dbms/Access/AccessFlags.h rename to src/Access/AccessFlags.h diff --git a/dbms/Access/AccessRights.cpp b/src/Access/AccessRights.cpp similarity index 100% rename from dbms/Access/AccessRights.cpp rename to src/Access/AccessRights.cpp diff --git a/dbms/Access/AccessRights.h b/src/Access/AccessRights.h similarity index 100% rename from dbms/Access/AccessRights.h rename to src/Access/AccessRights.h diff --git a/dbms/Access/AccessRightsElement.cpp b/src/Access/AccessRightsElement.cpp similarity index 100% rename from dbms/Access/AccessRightsElement.cpp rename to src/Access/AccessRightsElement.cpp diff --git a/dbms/Access/AccessRightsElement.h b/src/Access/AccessRightsElement.h similarity index 100% rename from dbms/Access/AccessRightsElement.h rename to src/Access/AccessRightsElement.h diff --git a/dbms/Access/AccessType.h b/src/Access/AccessType.h similarity index 100% rename from dbms/Access/AccessType.h rename to src/Access/AccessType.h diff --git a/dbms/Access/AllowedClientHosts.cpp b/src/Access/AllowedClientHosts.cpp similarity index 100% rename from dbms/Access/AllowedClientHosts.cpp rename to src/Access/AllowedClientHosts.cpp diff --git a/dbms/Access/AllowedClientHosts.h b/src/Access/AllowedClientHosts.h similarity index 100% rename from dbms/Access/AllowedClientHosts.h rename to src/Access/AllowedClientHosts.h diff --git a/dbms/Access/Authentication.cpp b/src/Access/Authentication.cpp similarity index 100% rename from dbms/Access/Authentication.cpp rename to src/Access/Authentication.cpp diff --git a/dbms/Access/Authentication.h b/src/Access/Authentication.h similarity index 100% rename from dbms/Access/Authentication.h rename to src/Access/Authentication.h diff --git a/dbms/Access/CMakeLists.txt b/src/Access/CMakeLists.txt similarity index 100% rename from dbms/Access/CMakeLists.txt rename to src/Access/CMakeLists.txt diff --git a/dbms/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp similarity index 100% rename from dbms/Access/ContextAccess.cpp rename to src/Access/ContextAccess.cpp diff --git a/dbms/Access/ContextAccess.h b/src/Access/ContextAccess.h similarity index 100% rename from dbms/Access/ContextAccess.h rename to src/Access/ContextAccess.h diff --git a/dbms/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp similarity index 100% rename from dbms/Access/DiskAccessStorage.cpp rename to src/Access/DiskAccessStorage.cpp diff --git a/dbms/Access/DiskAccessStorage.h b/src/Access/DiskAccessStorage.h similarity index 100% rename from dbms/Access/DiskAccessStorage.h rename to src/Access/DiskAccessStorage.h diff --git a/dbms/Access/EnabledQuota.cpp b/src/Access/EnabledQuota.cpp similarity index 100% rename from dbms/Access/EnabledQuota.cpp rename to src/Access/EnabledQuota.cpp diff --git a/dbms/Access/EnabledQuota.h b/src/Access/EnabledQuota.h similarity index 100% rename from dbms/Access/EnabledQuota.h rename to src/Access/EnabledQuota.h diff --git a/dbms/Access/EnabledRoles.cpp b/src/Access/EnabledRoles.cpp similarity index 100% rename from dbms/Access/EnabledRoles.cpp rename to src/Access/EnabledRoles.cpp diff --git a/dbms/Access/EnabledRoles.h b/src/Access/EnabledRoles.h similarity index 100% rename from dbms/Access/EnabledRoles.h rename to src/Access/EnabledRoles.h diff --git a/dbms/Access/EnabledRolesInfo.cpp b/src/Access/EnabledRolesInfo.cpp similarity index 100% rename from dbms/Access/EnabledRolesInfo.cpp rename to src/Access/EnabledRolesInfo.cpp diff --git a/dbms/Access/EnabledRolesInfo.h b/src/Access/EnabledRolesInfo.h similarity index 100% rename from dbms/Access/EnabledRolesInfo.h rename to src/Access/EnabledRolesInfo.h diff --git a/dbms/Access/EnabledRowPolicies.cpp b/src/Access/EnabledRowPolicies.cpp similarity index 100% rename from dbms/Access/EnabledRowPolicies.cpp rename to src/Access/EnabledRowPolicies.cpp diff --git a/dbms/Access/EnabledRowPolicies.h b/src/Access/EnabledRowPolicies.h similarity index 100% rename from dbms/Access/EnabledRowPolicies.h rename to src/Access/EnabledRowPolicies.h diff --git a/dbms/Access/EnabledSettings.cpp b/src/Access/EnabledSettings.cpp similarity index 100% rename from dbms/Access/EnabledSettings.cpp rename to src/Access/EnabledSettings.cpp diff --git a/dbms/Access/EnabledSettings.h b/src/Access/EnabledSettings.h similarity index 100% rename from dbms/Access/EnabledSettings.h rename to src/Access/EnabledSettings.h diff --git a/dbms/Access/ExtendedRoleSet.cpp b/src/Access/ExtendedRoleSet.cpp similarity index 100% rename from dbms/Access/ExtendedRoleSet.cpp rename to src/Access/ExtendedRoleSet.cpp diff --git a/dbms/Access/ExtendedRoleSet.h b/src/Access/ExtendedRoleSet.h similarity index 100% rename from dbms/Access/ExtendedRoleSet.h rename to src/Access/ExtendedRoleSet.h diff --git a/dbms/Access/IAccessEntity.cpp b/src/Access/IAccessEntity.cpp similarity index 100% rename from dbms/Access/IAccessEntity.cpp rename to src/Access/IAccessEntity.cpp diff --git a/dbms/Access/IAccessEntity.h b/src/Access/IAccessEntity.h similarity index 100% rename from dbms/Access/IAccessEntity.h rename to src/Access/IAccessEntity.h diff --git a/dbms/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp similarity index 100% rename from dbms/Access/IAccessStorage.cpp rename to src/Access/IAccessStorage.cpp diff --git a/dbms/Access/IAccessStorage.h b/src/Access/IAccessStorage.h similarity index 100% rename from dbms/Access/IAccessStorage.h rename to src/Access/IAccessStorage.h diff --git a/dbms/Access/MemoryAccessStorage.cpp b/src/Access/MemoryAccessStorage.cpp similarity index 100% rename from dbms/Access/MemoryAccessStorage.cpp rename to src/Access/MemoryAccessStorage.cpp diff --git a/dbms/Access/MemoryAccessStorage.h b/src/Access/MemoryAccessStorage.h similarity index 100% rename from dbms/Access/MemoryAccessStorage.h rename to src/Access/MemoryAccessStorage.h diff --git a/dbms/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp similarity index 100% rename from dbms/Access/MultipleAccessStorage.cpp rename to src/Access/MultipleAccessStorage.cpp diff --git a/dbms/Access/MultipleAccessStorage.h b/src/Access/MultipleAccessStorage.h similarity index 100% rename from dbms/Access/MultipleAccessStorage.h rename to src/Access/MultipleAccessStorage.h diff --git a/dbms/Access/Quota.cpp b/src/Access/Quota.cpp similarity index 100% rename from dbms/Access/Quota.cpp rename to src/Access/Quota.cpp diff --git a/dbms/Access/Quota.h b/src/Access/Quota.h similarity index 100% rename from dbms/Access/Quota.h rename to src/Access/Quota.h diff --git a/dbms/Access/QuotaCache.cpp b/src/Access/QuotaCache.cpp similarity index 100% rename from dbms/Access/QuotaCache.cpp rename to src/Access/QuotaCache.cpp diff --git a/dbms/Access/QuotaCache.h b/src/Access/QuotaCache.h similarity index 100% rename from dbms/Access/QuotaCache.h rename to src/Access/QuotaCache.h diff --git a/dbms/Access/QuotaUsageInfo.cpp b/src/Access/QuotaUsageInfo.cpp similarity index 100% rename from dbms/Access/QuotaUsageInfo.cpp rename to src/Access/QuotaUsageInfo.cpp diff --git a/dbms/Access/QuotaUsageInfo.h b/src/Access/QuotaUsageInfo.h similarity index 100% rename from dbms/Access/QuotaUsageInfo.h rename to src/Access/QuotaUsageInfo.h diff --git a/dbms/Access/Role.cpp b/src/Access/Role.cpp similarity index 100% rename from dbms/Access/Role.cpp rename to src/Access/Role.cpp diff --git a/dbms/Access/Role.h b/src/Access/Role.h similarity index 100% rename from dbms/Access/Role.h rename to src/Access/Role.h diff --git a/dbms/Access/RoleCache.cpp b/src/Access/RoleCache.cpp similarity index 100% rename from dbms/Access/RoleCache.cpp rename to src/Access/RoleCache.cpp diff --git a/dbms/Access/RoleCache.h b/src/Access/RoleCache.h similarity index 100% rename from dbms/Access/RoleCache.h rename to src/Access/RoleCache.h diff --git a/dbms/Access/RowPolicy.cpp b/src/Access/RowPolicy.cpp similarity index 100% rename from dbms/Access/RowPolicy.cpp rename to src/Access/RowPolicy.cpp diff --git a/dbms/Access/RowPolicy.h b/src/Access/RowPolicy.h similarity index 100% rename from dbms/Access/RowPolicy.h rename to src/Access/RowPolicy.h diff --git a/dbms/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp similarity index 100% rename from dbms/Access/RowPolicyCache.cpp rename to src/Access/RowPolicyCache.cpp diff --git a/dbms/Access/RowPolicyCache.h b/src/Access/RowPolicyCache.h similarity index 100% rename from dbms/Access/RowPolicyCache.h rename to src/Access/RowPolicyCache.h diff --git a/dbms/Access/SettingsConstraints.cpp b/src/Access/SettingsConstraints.cpp similarity index 100% rename from dbms/Access/SettingsConstraints.cpp rename to src/Access/SettingsConstraints.cpp diff --git a/dbms/Access/SettingsConstraints.h b/src/Access/SettingsConstraints.h similarity index 100% rename from dbms/Access/SettingsConstraints.h rename to src/Access/SettingsConstraints.h diff --git a/dbms/Access/SettingsProfile.cpp b/src/Access/SettingsProfile.cpp similarity index 100% rename from dbms/Access/SettingsProfile.cpp rename to src/Access/SettingsProfile.cpp diff --git a/dbms/Access/SettingsProfile.h b/src/Access/SettingsProfile.h similarity index 100% rename from dbms/Access/SettingsProfile.h rename to src/Access/SettingsProfile.h diff --git a/dbms/Access/SettingsProfileElement.cpp b/src/Access/SettingsProfileElement.cpp similarity index 100% rename from dbms/Access/SettingsProfileElement.cpp rename to src/Access/SettingsProfileElement.cpp diff --git a/dbms/Access/SettingsProfileElement.h b/src/Access/SettingsProfileElement.h similarity index 100% rename from dbms/Access/SettingsProfileElement.h rename to src/Access/SettingsProfileElement.h diff --git a/dbms/Access/SettingsProfilesCache.cpp b/src/Access/SettingsProfilesCache.cpp similarity index 100% rename from dbms/Access/SettingsProfilesCache.cpp rename to src/Access/SettingsProfilesCache.cpp diff --git a/dbms/Access/SettingsProfilesCache.h b/src/Access/SettingsProfilesCache.h similarity index 100% rename from dbms/Access/SettingsProfilesCache.h rename to src/Access/SettingsProfilesCache.h diff --git a/dbms/Access/User.cpp b/src/Access/User.cpp similarity index 100% rename from dbms/Access/User.cpp rename to src/Access/User.cpp diff --git a/dbms/Access/User.h b/src/Access/User.h similarity index 100% rename from dbms/Access/User.h rename to src/Access/User.h diff --git a/dbms/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp similarity index 100% rename from dbms/Access/UsersConfigAccessStorage.cpp rename to src/Access/UsersConfigAccessStorage.cpp diff --git a/dbms/Access/UsersConfigAccessStorage.h b/src/Access/UsersConfigAccessStorage.h similarity index 100% rename from dbms/Access/UsersConfigAccessStorage.h rename to src/Access/UsersConfigAccessStorage.h diff --git a/dbms/AggregateFunctions/AggregateFunctionAggThrow.cpp b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionAggThrow.cpp rename to src/AggregateFunctions/AggregateFunctionAggThrow.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionArgMinMax.h b/src/AggregateFunctions/AggregateFunctionArgMinMax.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionArgMinMax.h rename to src/AggregateFunctions/AggregateFunctionArgMinMax.h diff --git a/dbms/AggregateFunctions/AggregateFunctionArray.cpp b/src/AggregateFunctions/AggregateFunctionArray.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionArray.cpp rename to src/AggregateFunctions/AggregateFunctionArray.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionArray.h b/src/AggregateFunctions/AggregateFunctionArray.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionArray.h rename to src/AggregateFunctions/AggregateFunctionArray.h diff --git a/dbms/AggregateFunctions/AggregateFunctionAvg.cpp b/src/AggregateFunctions/AggregateFunctionAvg.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionAvg.cpp rename to src/AggregateFunctions/AggregateFunctionAvg.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionAvg.h b/src/AggregateFunctions/AggregateFunctionAvg.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionAvg.h rename to src/AggregateFunctions/AggregateFunctionAvg.h diff --git a/dbms/AggregateFunctions/AggregateFunctionAvgWeighted.cpp b/src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionAvgWeighted.cpp rename to src/AggregateFunctions/AggregateFunctionAvgWeighted.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionAvgWeighted.h b/src/AggregateFunctions/AggregateFunctionAvgWeighted.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionAvgWeighted.h rename to src/AggregateFunctions/AggregateFunctionAvgWeighted.h diff --git a/dbms/AggregateFunctions/AggregateFunctionBitwise.cpp b/src/AggregateFunctions/AggregateFunctionBitwise.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionBitwise.cpp rename to src/AggregateFunctions/AggregateFunctionBitwise.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionBitwise.h b/src/AggregateFunctions/AggregateFunctionBitwise.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionBitwise.h rename to src/AggregateFunctions/AggregateFunctionBitwise.h diff --git a/dbms/AggregateFunctions/AggregateFunctionBoundingRatio.cpp b/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionBoundingRatio.cpp rename to src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionBoundingRatio.h b/src/AggregateFunctions/AggregateFunctionBoundingRatio.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionBoundingRatio.h rename to src/AggregateFunctions/AggregateFunctionBoundingRatio.h diff --git a/dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp rename to src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h b/src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h rename to src/AggregateFunctions/AggregateFunctionCategoricalInformationValue.h diff --git a/dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp rename to src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCombinatorFactory.h rename to src/AggregateFunctions/AggregateFunctionCombinatorFactory.h diff --git a/dbms/AggregateFunctions/AggregateFunctionCount.cpp b/src/AggregateFunctions/AggregateFunctionCount.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCount.cpp rename to src/AggregateFunctions/AggregateFunctionCount.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionCount.h rename to src/AggregateFunctions/AggregateFunctionCount.h diff --git a/dbms/AggregateFunctions/AggregateFunctionEntropy.cpp b/src/AggregateFunctions/AggregateFunctionEntropy.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionEntropy.cpp rename to src/AggregateFunctions/AggregateFunctionEntropy.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionEntropy.h b/src/AggregateFunctions/AggregateFunctionEntropy.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionEntropy.h rename to src/AggregateFunctions/AggregateFunctionEntropy.h diff --git a/dbms/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionFactory.cpp rename to src/AggregateFunctions/AggregateFunctionFactory.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionFactory.h b/src/AggregateFunctions/AggregateFunctionFactory.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionFactory.h rename to src/AggregateFunctions/AggregateFunctionFactory.h diff --git a/dbms/AggregateFunctions/AggregateFunctionForEach.cpp b/src/AggregateFunctions/AggregateFunctionForEach.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionForEach.cpp rename to src/AggregateFunctions/AggregateFunctionForEach.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionForEach.h b/src/AggregateFunctions/AggregateFunctionForEach.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionForEach.h rename to src/AggregateFunctions/AggregateFunctionForEach.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArray.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArray.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArray.h rename to src/AggregateFunctions/AggregateFunctionGroupArray.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h b/src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h rename to src/AggregateFunctions/AggregateFunctionGroupArrayInsertAt.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp rename to src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.h b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupArrayMoving.h rename to src/AggregateFunctions/AggregateFunctionGroupArrayMoving.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupBitmap.cpp b/src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupBitmap.cpp rename to src/AggregateFunctions/AggregateFunctionGroupBitmap.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupBitmap.h b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupBitmap.h rename to src/AggregateFunctions/AggregateFunctionGroupBitmap.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupBitmapData.h rename to src/AggregateFunctions/AggregateFunctionGroupBitmapData.h diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp rename to src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionGroupUniqArray.h rename to src/AggregateFunctions/AggregateFunctionGroupUniqArray.h diff --git a/dbms/AggregateFunctions/AggregateFunctionHistogram.cpp b/src/AggregateFunctions/AggregateFunctionHistogram.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionHistogram.cpp rename to src/AggregateFunctions/AggregateFunctionHistogram.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionHistogram.h b/src/AggregateFunctions/AggregateFunctionHistogram.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionHistogram.h rename to src/AggregateFunctions/AggregateFunctionHistogram.h diff --git a/dbms/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionIf.cpp rename to src/AggregateFunctions/AggregateFunctionIf.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionIf.h rename to src/AggregateFunctions/AggregateFunctionIf.h diff --git a/dbms/AggregateFunctions/AggregateFunctionMLMethod.cpp b/src/AggregateFunctions/AggregateFunctionMLMethod.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMLMethod.cpp rename to src/AggregateFunctions/AggregateFunctionMLMethod.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionMLMethod.h b/src/AggregateFunctions/AggregateFunctionMLMethod.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMLMethod.h rename to src/AggregateFunctions/AggregateFunctionMLMethod.h diff --git a/dbms/AggregateFunctions/AggregateFunctionMaxIntersections.cpp b/src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMaxIntersections.cpp rename to src/AggregateFunctions/AggregateFunctionMaxIntersections.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionMaxIntersections.h b/src/AggregateFunctions/AggregateFunctionMaxIntersections.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMaxIntersections.h rename to src/AggregateFunctions/AggregateFunctionMaxIntersections.h diff --git a/dbms/AggregateFunctions/AggregateFunctionMerge.cpp b/src/AggregateFunctions/AggregateFunctionMerge.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMerge.cpp rename to src/AggregateFunctions/AggregateFunctionMerge.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionMerge.h b/src/AggregateFunctions/AggregateFunctionMerge.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMerge.h rename to src/AggregateFunctions/AggregateFunctionMerge.h diff --git a/dbms/AggregateFunctions/AggregateFunctionMinMaxAny.cpp b/src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMinMaxAny.cpp rename to src/AggregateFunctions/AggregateFunctionMinMaxAny.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionMinMaxAny.h rename to src/AggregateFunctions/AggregateFunctionMinMaxAny.h diff --git a/dbms/AggregateFunctions/AggregateFunctionNothing.h b/src/AggregateFunctions/AggregateFunctionNothing.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionNothing.h rename to src/AggregateFunctions/AggregateFunctionNothing.h diff --git a/dbms/AggregateFunctions/AggregateFunctionNull.cpp b/src/AggregateFunctions/AggregateFunctionNull.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionNull.cpp rename to src/AggregateFunctions/AggregateFunctionNull.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionNull.h rename to src/AggregateFunctions/AggregateFunctionNull.h diff --git a/dbms/AggregateFunctions/AggregateFunctionOrFill.cpp b/src/AggregateFunctions/AggregateFunctionOrFill.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionOrFill.cpp rename to src/AggregateFunctions/AggregateFunctionOrFill.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionOrFill.h b/src/AggregateFunctions/AggregateFunctionOrFill.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionOrFill.h rename to src/AggregateFunctions/AggregateFunctionOrFill.h diff --git a/dbms/AggregateFunctions/AggregateFunctionQuantile.cpp b/src/AggregateFunctions/AggregateFunctionQuantile.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionQuantile.cpp rename to src/AggregateFunctions/AggregateFunctionQuantile.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionQuantile.h b/src/AggregateFunctions/AggregateFunctionQuantile.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionQuantile.h rename to src/AggregateFunctions/AggregateFunctionQuantile.h diff --git a/dbms/AggregateFunctions/AggregateFunctionResample.cpp b/src/AggregateFunctions/AggregateFunctionResample.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionResample.cpp rename to src/AggregateFunctions/AggregateFunctionResample.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionResample.h b/src/AggregateFunctions/AggregateFunctionResample.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionResample.h rename to src/AggregateFunctions/AggregateFunctionResample.h diff --git a/dbms/AggregateFunctions/AggregateFunctionRetention.cpp b/src/AggregateFunctions/AggregateFunctionRetention.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionRetention.cpp rename to src/AggregateFunctions/AggregateFunctionRetention.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionRetention.h b/src/AggregateFunctions/AggregateFunctionRetention.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionRetention.h rename to src/AggregateFunctions/AggregateFunctionRetention.h diff --git a/dbms/AggregateFunctions/AggregateFunctionSequenceMatch.cpp b/src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSequenceMatch.cpp rename to src/AggregateFunctions/AggregateFunctionSequenceMatch.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionSequenceMatch.h b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSequenceMatch.h rename to src/AggregateFunctions/AggregateFunctionSequenceMatch.h diff --git a/dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp rename to src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h b/src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h rename to src/AggregateFunctions/AggregateFunctionSimpleLinearRegression.h diff --git a/dbms/AggregateFunctions/AggregateFunctionState.cpp b/src/AggregateFunctions/AggregateFunctionState.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionState.cpp rename to src/AggregateFunctions/AggregateFunctionState.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionState.h b/src/AggregateFunctions/AggregateFunctionState.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionState.h rename to src/AggregateFunctions/AggregateFunctionState.h diff --git a/dbms/AggregateFunctions/AggregateFunctionStatistics.cpp b/src/AggregateFunctions/AggregateFunctionStatistics.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionStatistics.cpp rename to src/AggregateFunctions/AggregateFunctionStatistics.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionStatistics.h b/src/AggregateFunctions/AggregateFunctionStatistics.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionStatistics.h rename to src/AggregateFunctions/AggregateFunctionStatistics.h diff --git a/dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp rename to src/AggregateFunctions/AggregateFunctionStatisticsSimple.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.h b/src/AggregateFunctions/AggregateFunctionStatisticsSimple.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionStatisticsSimple.h rename to src/AggregateFunctions/AggregateFunctionStatisticsSimple.h diff --git a/dbms/AggregateFunctions/AggregateFunctionSum.cpp b/src/AggregateFunctions/AggregateFunctionSum.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSum.cpp rename to src/AggregateFunctions/AggregateFunctionSum.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSum.h rename to src/AggregateFunctions/AggregateFunctionSum.h diff --git a/dbms/AggregateFunctions/AggregateFunctionSumMap.cpp b/src/AggregateFunctions/AggregateFunctionSumMap.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSumMap.cpp rename to src/AggregateFunctions/AggregateFunctionSumMap.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionSumMap.h rename to src/AggregateFunctions/AggregateFunctionSumMap.h diff --git a/dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp rename to src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h rename to src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h diff --git a/dbms/AggregateFunctions/AggregateFunctionTopK.cpp b/src/AggregateFunctions/AggregateFunctionTopK.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionTopK.cpp rename to src/AggregateFunctions/AggregateFunctionTopK.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionTopK.h b/src/AggregateFunctions/AggregateFunctionTopK.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionTopK.h rename to src/AggregateFunctions/AggregateFunctionTopK.h diff --git a/dbms/AggregateFunctions/AggregateFunctionUniq.cpp b/src/AggregateFunctions/AggregateFunctionUniq.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniq.cpp rename to src/AggregateFunctions/AggregateFunctionUniq.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniq.h rename to src/AggregateFunctions/AggregateFunctionUniq.h diff --git a/dbms/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniqCombined.cpp rename to src/AggregateFunctions/AggregateFunctionUniqCombined.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniqCombined.h rename to src/AggregateFunctions/AggregateFunctionUniqCombined.h diff --git a/dbms/AggregateFunctions/AggregateFunctionUniqUpTo.cpp b/src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniqUpTo.cpp rename to src/AggregateFunctions/AggregateFunctionUniqUpTo.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionUniqUpTo.h b/src/AggregateFunctions/AggregateFunctionUniqUpTo.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionUniqUpTo.h rename to src/AggregateFunctions/AggregateFunctionUniqUpTo.h diff --git a/dbms/AggregateFunctions/AggregateFunctionWindowFunnel.cpp b/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionWindowFunnel.cpp rename to src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp diff --git a/dbms/AggregateFunctions/AggregateFunctionWindowFunnel.h b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h similarity index 100% rename from dbms/AggregateFunctions/AggregateFunctionWindowFunnel.h rename to src/AggregateFunctions/AggregateFunctionWindowFunnel.h diff --git a/dbms/AggregateFunctions/CMakeLists.txt b/src/AggregateFunctions/CMakeLists.txt similarity index 100% rename from dbms/AggregateFunctions/CMakeLists.txt rename to src/AggregateFunctions/CMakeLists.txt diff --git a/dbms/AggregateFunctions/FactoryHelpers.h b/src/AggregateFunctions/FactoryHelpers.h similarity index 100% rename from dbms/AggregateFunctions/FactoryHelpers.h rename to src/AggregateFunctions/FactoryHelpers.h diff --git a/dbms/AggregateFunctions/Helpers.h b/src/AggregateFunctions/Helpers.h similarity index 100% rename from dbms/AggregateFunctions/Helpers.h rename to src/AggregateFunctions/Helpers.h diff --git a/dbms/AggregateFunctions/HelpersMinMaxAny.h b/src/AggregateFunctions/HelpersMinMaxAny.h similarity index 100% rename from dbms/AggregateFunctions/HelpersMinMaxAny.h rename to src/AggregateFunctions/HelpersMinMaxAny.h diff --git a/dbms/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h similarity index 100% rename from dbms/AggregateFunctions/IAggregateFunction.h rename to src/AggregateFunctions/IAggregateFunction.h diff --git a/dbms/AggregateFunctions/IAggregateFunctionCombinator.h b/src/AggregateFunctions/IAggregateFunctionCombinator.h similarity index 100% rename from dbms/AggregateFunctions/IAggregateFunctionCombinator.h rename to src/AggregateFunctions/IAggregateFunctionCombinator.h diff --git a/dbms/AggregateFunctions/QuantileExact.h b/src/AggregateFunctions/QuantileExact.h similarity index 100% rename from dbms/AggregateFunctions/QuantileExact.h rename to src/AggregateFunctions/QuantileExact.h diff --git a/dbms/AggregateFunctions/QuantileExactWeighted.h b/src/AggregateFunctions/QuantileExactWeighted.h similarity index 100% rename from dbms/AggregateFunctions/QuantileExactWeighted.h rename to src/AggregateFunctions/QuantileExactWeighted.h diff --git a/dbms/AggregateFunctions/QuantileReservoirSampler.h b/src/AggregateFunctions/QuantileReservoirSampler.h similarity index 100% rename from dbms/AggregateFunctions/QuantileReservoirSampler.h rename to src/AggregateFunctions/QuantileReservoirSampler.h diff --git a/dbms/AggregateFunctions/QuantileReservoirSamplerDeterministic.h b/src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h similarity index 100% rename from dbms/AggregateFunctions/QuantileReservoirSamplerDeterministic.h rename to src/AggregateFunctions/QuantileReservoirSamplerDeterministic.h diff --git a/dbms/AggregateFunctions/QuantileTDigest.h b/src/AggregateFunctions/QuantileTDigest.h similarity index 100% rename from dbms/AggregateFunctions/QuantileTDigest.h rename to src/AggregateFunctions/QuantileTDigest.h diff --git a/dbms/AggregateFunctions/QuantileTiming.h b/src/AggregateFunctions/QuantileTiming.h similarity index 100% rename from dbms/AggregateFunctions/QuantileTiming.h rename to src/AggregateFunctions/QuantileTiming.h diff --git a/dbms/AggregateFunctions/QuantilesCommon.h b/src/AggregateFunctions/QuantilesCommon.h similarity index 100% rename from dbms/AggregateFunctions/QuantilesCommon.h rename to src/AggregateFunctions/QuantilesCommon.h diff --git a/dbms/AggregateFunctions/ReservoirSampler.h b/src/AggregateFunctions/ReservoirSampler.h similarity index 100% rename from dbms/AggregateFunctions/ReservoirSampler.h rename to src/AggregateFunctions/ReservoirSampler.h diff --git a/dbms/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h similarity index 100% rename from dbms/AggregateFunctions/ReservoirSamplerDeterministic.h rename to src/AggregateFunctions/ReservoirSamplerDeterministic.h diff --git a/dbms/AggregateFunctions/UniqCombinedBiasData.cpp b/src/AggregateFunctions/UniqCombinedBiasData.cpp similarity index 100% rename from dbms/AggregateFunctions/UniqCombinedBiasData.cpp rename to src/AggregateFunctions/UniqCombinedBiasData.cpp diff --git a/dbms/AggregateFunctions/UniqCombinedBiasData.h b/src/AggregateFunctions/UniqCombinedBiasData.h similarity index 89% rename from dbms/AggregateFunctions/UniqCombinedBiasData.h rename to src/AggregateFunctions/UniqCombinedBiasData.h index 0a69a211206..2b009bfdfd8 100644 --- a/dbms/AggregateFunctions/UniqCombinedBiasData.h +++ b/src/AggregateFunctions/UniqCombinedBiasData.h @@ -8,11 +8,11 @@ namespace DB /** Data for HyperLogLogBiasEstimator in the uniqCombined function. * The development plan is as follows: * 1. Assemble ClickHouse. - * 2. Run the script src/dbms/scripts/gen-bias-data.py, which returns one array for getRawEstimates() + * 2. Run the script src/src/scripts/gen-bias-data.py, which returns one array for getRawEstimates() * and another array for getBiases(). * 3. Update `raw_estimates` and `biases` arrays. Also update the size of arrays in InterpolatedData. * 4. Assemble ClickHouse. - * 5. Run the script src/dbms/scripts/linear-counting-threshold.py, which creates 3 files: + * 5. Run the script src/src/scripts/linear-counting-threshold.py, which creates 3 files: * - raw_graph.txt (1st column: the present number of unique values; * 2nd column: relative error in the case of HyperLogLog without applying any corrections) * - linear_counting_graph.txt (1st column: the present number of unique values; diff --git a/dbms/AggregateFunctions/UniqVariadicHash.cpp b/src/AggregateFunctions/UniqVariadicHash.cpp similarity index 100% rename from dbms/AggregateFunctions/UniqVariadicHash.cpp rename to src/AggregateFunctions/UniqVariadicHash.cpp diff --git a/dbms/AggregateFunctions/UniqVariadicHash.h b/src/AggregateFunctions/UniqVariadicHash.h similarity index 100% rename from dbms/AggregateFunctions/UniqVariadicHash.h rename to src/AggregateFunctions/UniqVariadicHash.h diff --git a/dbms/AggregateFunctions/UniquesHashSet.h b/src/AggregateFunctions/UniquesHashSet.h similarity index 100% rename from dbms/AggregateFunctions/UniquesHashSet.h rename to src/AggregateFunctions/UniquesHashSet.h diff --git a/dbms/AggregateFunctions/parseAggregateFunctionParameters.cpp b/src/AggregateFunctions/parseAggregateFunctionParameters.cpp similarity index 100% rename from dbms/AggregateFunctions/parseAggregateFunctionParameters.cpp rename to src/AggregateFunctions/parseAggregateFunctionParameters.cpp diff --git a/dbms/AggregateFunctions/parseAggregateFunctionParameters.h b/src/AggregateFunctions/parseAggregateFunctionParameters.h similarity index 100% rename from dbms/AggregateFunctions/parseAggregateFunctionParameters.h rename to src/AggregateFunctions/parseAggregateFunctionParameters.h diff --git a/dbms/AggregateFunctions/registerAggregateFunctions.cpp b/src/AggregateFunctions/registerAggregateFunctions.cpp similarity index 100% rename from dbms/AggregateFunctions/registerAggregateFunctions.cpp rename to src/AggregateFunctions/registerAggregateFunctions.cpp diff --git a/dbms/AggregateFunctions/registerAggregateFunctions.h b/src/AggregateFunctions/registerAggregateFunctions.h similarity index 100% rename from dbms/AggregateFunctions/registerAggregateFunctions.h rename to src/AggregateFunctions/registerAggregateFunctions.h diff --git a/dbms/AggregateFunctions/tests/CMakeLists.txt b/src/AggregateFunctions/tests/CMakeLists.txt similarity index 100% rename from dbms/AggregateFunctions/tests/CMakeLists.txt rename to src/AggregateFunctions/tests/CMakeLists.txt diff --git a/dbms/AggregateFunctions/tests/quantile-t-digest.cpp b/src/AggregateFunctions/tests/quantile-t-digest.cpp similarity index 100% rename from dbms/AggregateFunctions/tests/quantile-t-digest.cpp rename to src/AggregateFunctions/tests/quantile-t-digest.cpp diff --git a/dbms/CMakeLists.txt b/src/CMakeLists.txt similarity index 99% rename from dbms/CMakeLists.txt rename to src/CMakeLists.txt index f3f18d35f3c..154d7c911cf 100644 --- a/dbms/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -32,7 +32,7 @@ if (NOT MSVC) endif () if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/Core/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () @@ -196,7 +196,7 @@ if (COMPILER_GCC) # (gdb) bt #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 - #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/Common/memcpySmall.h:37 + #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../src/Common/memcpySmall.h:37 add_definitions ("-fno-tree-loop-distribute-patterns") endif () diff --git a/src/Client/CMakeLists.txt b/src/Client/CMakeLists.txt new file mode 100644 index 00000000000..88c05163602 --- /dev/null +++ b/src/Client/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(tests) diff --git a/dbms/Client/Connection.cpp b/src/Client/Connection.cpp similarity index 100% rename from dbms/Client/Connection.cpp rename to src/Client/Connection.cpp diff --git a/dbms/Client/Connection.h b/src/Client/Connection.h similarity index 100% rename from dbms/Client/Connection.h rename to src/Client/Connection.h diff --git a/dbms/Client/ConnectionPool.h b/src/Client/ConnectionPool.h similarity index 100% rename from dbms/Client/ConnectionPool.h rename to src/Client/ConnectionPool.h diff --git a/dbms/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp similarity index 100% rename from dbms/Client/ConnectionPoolWithFailover.cpp rename to src/Client/ConnectionPoolWithFailover.cpp diff --git a/dbms/Client/ConnectionPoolWithFailover.h b/src/Client/ConnectionPoolWithFailover.h similarity index 100% rename from dbms/Client/ConnectionPoolWithFailover.h rename to src/Client/ConnectionPoolWithFailover.h diff --git a/dbms/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp similarity index 100% rename from dbms/Client/MultiplexedConnections.cpp rename to src/Client/MultiplexedConnections.cpp diff --git a/dbms/Client/MultiplexedConnections.h b/src/Client/MultiplexedConnections.h similarity index 100% rename from dbms/Client/MultiplexedConnections.h rename to src/Client/MultiplexedConnections.h diff --git a/dbms/Client/TimeoutSetter.cpp b/src/Client/TimeoutSetter.cpp similarity index 100% rename from dbms/Client/TimeoutSetter.cpp rename to src/Client/TimeoutSetter.cpp diff --git a/dbms/Client/TimeoutSetter.h b/src/Client/TimeoutSetter.h similarity index 100% rename from dbms/Client/TimeoutSetter.h rename to src/Client/TimeoutSetter.h diff --git a/dbms/Client/tests/CMakeLists.txt b/src/Client/tests/CMakeLists.txt similarity index 100% rename from dbms/Client/tests/CMakeLists.txt rename to src/Client/tests/CMakeLists.txt diff --git a/dbms/Client/tests/test_connect.cpp b/src/Client/tests/test_connect.cpp similarity index 100% rename from dbms/Client/tests/test_connect.cpp rename to src/Client/tests/test_connect.cpp diff --git a/dbms/Columns/CMakeLists.txt b/src/Columns/CMakeLists.txt similarity index 100% rename from dbms/Columns/CMakeLists.txt rename to src/Columns/CMakeLists.txt diff --git a/dbms/Columns/Collator.cpp b/src/Columns/Collator.cpp similarity index 100% rename from dbms/Columns/Collator.cpp rename to src/Columns/Collator.cpp diff --git a/dbms/Columns/Collator.h b/src/Columns/Collator.h similarity index 100% rename from dbms/Columns/Collator.h rename to src/Columns/Collator.h diff --git a/dbms/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp similarity index 100% rename from dbms/Columns/ColumnAggregateFunction.cpp rename to src/Columns/ColumnAggregateFunction.cpp diff --git a/dbms/Columns/ColumnAggregateFunction.h b/src/Columns/ColumnAggregateFunction.h similarity index 100% rename from dbms/Columns/ColumnAggregateFunction.h rename to src/Columns/ColumnAggregateFunction.h diff --git a/dbms/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp similarity index 100% rename from dbms/Columns/ColumnArray.cpp rename to src/Columns/ColumnArray.cpp diff --git a/dbms/Columns/ColumnArray.h b/src/Columns/ColumnArray.h similarity index 100% rename from dbms/Columns/ColumnArray.h rename to src/Columns/ColumnArray.h diff --git a/dbms/Columns/ColumnConst.cpp b/src/Columns/ColumnConst.cpp similarity index 100% rename from dbms/Columns/ColumnConst.cpp rename to src/Columns/ColumnConst.cpp diff --git a/dbms/Columns/ColumnConst.h b/src/Columns/ColumnConst.h similarity index 100% rename from dbms/Columns/ColumnConst.h rename to src/Columns/ColumnConst.h diff --git a/dbms/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp similarity index 100% rename from dbms/Columns/ColumnDecimal.cpp rename to src/Columns/ColumnDecimal.cpp diff --git a/dbms/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h similarity index 100% rename from dbms/Columns/ColumnDecimal.h rename to src/Columns/ColumnDecimal.h diff --git a/dbms/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp similarity index 100% rename from dbms/Columns/ColumnFixedString.cpp rename to src/Columns/ColumnFixedString.cpp diff --git a/dbms/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h similarity index 100% rename from dbms/Columns/ColumnFixedString.h rename to src/Columns/ColumnFixedString.h diff --git a/dbms/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp similarity index 100% rename from dbms/Columns/ColumnFunction.cpp rename to src/Columns/ColumnFunction.cpp diff --git a/dbms/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h similarity index 100% rename from dbms/Columns/ColumnFunction.h rename to src/Columns/ColumnFunction.h diff --git a/dbms/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp similarity index 100% rename from dbms/Columns/ColumnLowCardinality.cpp rename to src/Columns/ColumnLowCardinality.cpp diff --git a/dbms/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h similarity index 100% rename from dbms/Columns/ColumnLowCardinality.h rename to src/Columns/ColumnLowCardinality.h diff --git a/dbms/Columns/ColumnNothing.h b/src/Columns/ColumnNothing.h similarity index 100% rename from dbms/Columns/ColumnNothing.h rename to src/Columns/ColumnNothing.h diff --git a/dbms/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp similarity index 100% rename from dbms/Columns/ColumnNullable.cpp rename to src/Columns/ColumnNullable.cpp diff --git a/dbms/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h similarity index 100% rename from dbms/Columns/ColumnNullable.h rename to src/Columns/ColumnNullable.h diff --git a/dbms/Columns/ColumnSet.h b/src/Columns/ColumnSet.h similarity index 100% rename from dbms/Columns/ColumnSet.h rename to src/Columns/ColumnSet.h diff --git a/dbms/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp similarity index 100% rename from dbms/Columns/ColumnString.cpp rename to src/Columns/ColumnString.cpp diff --git a/dbms/Columns/ColumnString.h b/src/Columns/ColumnString.h similarity index 100% rename from dbms/Columns/ColumnString.h rename to src/Columns/ColumnString.h diff --git a/dbms/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp similarity index 100% rename from dbms/Columns/ColumnTuple.cpp rename to src/Columns/ColumnTuple.cpp diff --git a/dbms/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h similarity index 100% rename from dbms/Columns/ColumnTuple.h rename to src/Columns/ColumnTuple.h diff --git a/dbms/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h similarity index 100% rename from dbms/Columns/ColumnUnique.h rename to src/Columns/ColumnUnique.h diff --git a/dbms/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp similarity index 100% rename from dbms/Columns/ColumnVector.cpp rename to src/Columns/ColumnVector.cpp diff --git a/dbms/Columns/ColumnVector.h b/src/Columns/ColumnVector.h similarity index 100% rename from dbms/Columns/ColumnVector.h rename to src/Columns/ColumnVector.h diff --git a/dbms/Columns/ColumnVectorHelper.h b/src/Columns/ColumnVectorHelper.h similarity index 100% rename from dbms/Columns/ColumnVectorHelper.h rename to src/Columns/ColumnVectorHelper.h diff --git a/dbms/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp similarity index 100% rename from dbms/Columns/ColumnsCommon.cpp rename to src/Columns/ColumnsCommon.cpp diff --git a/dbms/Columns/ColumnsCommon.h b/src/Columns/ColumnsCommon.h similarity index 100% rename from dbms/Columns/ColumnsCommon.h rename to src/Columns/ColumnsCommon.h diff --git a/dbms/Columns/ColumnsNumber.h b/src/Columns/ColumnsNumber.h similarity index 100% rename from dbms/Columns/ColumnsNumber.h rename to src/Columns/ColumnsNumber.h diff --git a/dbms/Columns/FilterDescription.cpp b/src/Columns/FilterDescription.cpp similarity index 100% rename from dbms/Columns/FilterDescription.cpp rename to src/Columns/FilterDescription.cpp diff --git a/dbms/Columns/FilterDescription.h b/src/Columns/FilterDescription.h similarity index 100% rename from dbms/Columns/FilterDescription.h rename to src/Columns/FilterDescription.h diff --git a/dbms/Columns/IColumn.cpp b/src/Columns/IColumn.cpp similarity index 100% rename from dbms/Columns/IColumn.cpp rename to src/Columns/IColumn.cpp diff --git a/dbms/Columns/IColumn.h b/src/Columns/IColumn.h similarity index 100% rename from dbms/Columns/IColumn.h rename to src/Columns/IColumn.h diff --git a/dbms/Columns/IColumnDummy.h b/src/Columns/IColumnDummy.h similarity index 100% rename from dbms/Columns/IColumnDummy.h rename to src/Columns/IColumnDummy.h diff --git a/dbms/Columns/IColumnImpl.h b/src/Columns/IColumnImpl.h similarity index 100% rename from dbms/Columns/IColumnImpl.h rename to src/Columns/IColumnImpl.h diff --git a/dbms/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h similarity index 100% rename from dbms/Columns/IColumnUnique.h rename to src/Columns/IColumnUnique.h diff --git a/dbms/Columns/ReverseIndex.h b/src/Columns/ReverseIndex.h similarity index 100% rename from dbms/Columns/ReverseIndex.h rename to src/Columns/ReverseIndex.h diff --git a/dbms/Columns/getLeastSuperColumn.cpp b/src/Columns/getLeastSuperColumn.cpp similarity index 100% rename from dbms/Columns/getLeastSuperColumn.cpp rename to src/Columns/getLeastSuperColumn.cpp diff --git a/dbms/Columns/getLeastSuperColumn.h b/src/Columns/getLeastSuperColumn.h similarity index 100% rename from dbms/Columns/getLeastSuperColumn.h rename to src/Columns/getLeastSuperColumn.h diff --git a/dbms/Columns/tests/CMakeLists.txt b/src/Columns/tests/CMakeLists.txt similarity index 100% rename from dbms/Columns/tests/CMakeLists.txt rename to src/Columns/tests/CMakeLists.txt diff --git a/dbms/Columns/tests/gtest_column_unique.cpp b/src/Columns/tests/gtest_column_unique.cpp similarity index 100% rename from dbms/Columns/tests/gtest_column_unique.cpp rename to src/Columns/tests/gtest_column_unique.cpp diff --git a/dbms/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp similarity index 100% rename from dbms/Columns/tests/gtest_weak_hash_32.cpp rename to src/Columns/tests/gtest_weak_hash_32.cpp diff --git a/dbms/Common/ActionBlocker.h b/src/Common/ActionBlocker.h similarity index 100% rename from dbms/Common/ActionBlocker.h rename to src/Common/ActionBlocker.h diff --git a/dbms/Common/ActionLock.cpp b/src/Common/ActionLock.cpp similarity index 100% rename from dbms/Common/ActionLock.cpp rename to src/Common/ActionLock.cpp diff --git a/dbms/Common/ActionLock.h b/src/Common/ActionLock.h similarity index 100% rename from dbms/Common/ActionLock.h rename to src/Common/ActionLock.h diff --git a/dbms/Common/AlignedBuffer.cpp b/src/Common/AlignedBuffer.cpp similarity index 100% rename from dbms/Common/AlignedBuffer.cpp rename to src/Common/AlignedBuffer.cpp diff --git a/dbms/Common/AlignedBuffer.h b/src/Common/AlignedBuffer.h similarity index 100% rename from dbms/Common/AlignedBuffer.h rename to src/Common/AlignedBuffer.h diff --git a/dbms/Common/Allocator.h b/src/Common/Allocator.h similarity index 100% rename from dbms/Common/Allocator.h rename to src/Common/Allocator.h diff --git a/dbms/Common/Allocator_fwd.h b/src/Common/Allocator_fwd.h similarity index 100% rename from dbms/Common/Allocator_fwd.h rename to src/Common/Allocator_fwd.h diff --git a/dbms/Common/Arena.h b/src/Common/Arena.h similarity index 100% rename from dbms/Common/Arena.h rename to src/Common/Arena.h diff --git a/dbms/Common/ArenaAllocator.h b/src/Common/ArenaAllocator.h similarity index 100% rename from dbms/Common/ArenaAllocator.h rename to src/Common/ArenaAllocator.h diff --git a/dbms/Common/ArenaWithFreeLists.h b/src/Common/ArenaWithFreeLists.h similarity index 100% rename from dbms/Common/ArenaWithFreeLists.h rename to src/Common/ArenaWithFreeLists.h diff --git a/dbms/Common/ArrayCache.h b/src/Common/ArrayCache.h similarity index 100% rename from dbms/Common/ArrayCache.h rename to src/Common/ArrayCache.h diff --git a/dbms/Common/AutoArray.h b/src/Common/AutoArray.h similarity index 100% rename from dbms/Common/AutoArray.h rename to src/Common/AutoArray.h diff --git a/dbms/Common/BitHelpers.h b/src/Common/BitHelpers.h similarity index 100% rename from dbms/Common/BitHelpers.h rename to src/Common/BitHelpers.h diff --git a/dbms/Common/CMakeLists.txt b/src/Common/CMakeLists.txt similarity index 100% rename from dbms/Common/CMakeLists.txt rename to src/Common/CMakeLists.txt diff --git a/dbms/Common/COW.h b/src/Common/COW.h similarity index 100% rename from dbms/Common/COW.h rename to src/Common/COW.h diff --git a/dbms/Common/ClickHouseRevision.cpp b/src/Common/ClickHouseRevision.cpp similarity index 100% rename from dbms/Common/ClickHouseRevision.cpp rename to src/Common/ClickHouseRevision.cpp diff --git a/dbms/Common/ClickHouseRevision.h b/src/Common/ClickHouseRevision.h similarity index 100% rename from dbms/Common/ClickHouseRevision.h rename to src/Common/ClickHouseRevision.h diff --git a/dbms/Common/ColumnsHashing.h b/src/Common/ColumnsHashing.h similarity index 100% rename from dbms/Common/ColumnsHashing.h rename to src/Common/ColumnsHashing.h diff --git a/dbms/Common/ColumnsHashingImpl.h b/src/Common/ColumnsHashingImpl.h similarity index 100% rename from dbms/Common/ColumnsHashingImpl.h rename to src/Common/ColumnsHashingImpl.h diff --git a/dbms/Common/CombinedCardinalityEstimator.h b/src/Common/CombinedCardinalityEstimator.h similarity index 100% rename from dbms/Common/CombinedCardinalityEstimator.h rename to src/Common/CombinedCardinalityEstimator.h diff --git a/dbms/Common/CompactArray.h b/src/Common/CompactArray.h similarity index 100% rename from dbms/Common/CompactArray.h rename to src/Common/CompactArray.h diff --git a/dbms/Common/ConcurrentBoundedQueue.h b/src/Common/ConcurrentBoundedQueue.h similarity index 100% rename from dbms/Common/ConcurrentBoundedQueue.h rename to src/Common/ConcurrentBoundedQueue.h diff --git a/dbms/Common/Config/AbstractConfigurationComparison.cpp b/src/Common/Config/AbstractConfigurationComparison.cpp similarity index 100% rename from dbms/Common/Config/AbstractConfigurationComparison.cpp rename to src/Common/Config/AbstractConfigurationComparison.cpp diff --git a/dbms/Common/Config/AbstractConfigurationComparison.h b/src/Common/Config/AbstractConfigurationComparison.h similarity index 100% rename from dbms/Common/Config/AbstractConfigurationComparison.h rename to src/Common/Config/AbstractConfigurationComparison.h diff --git a/dbms/Common/Config/CMakeLists.txt b/src/Common/Config/CMakeLists.txt similarity index 100% rename from dbms/Common/Config/CMakeLists.txt rename to src/Common/Config/CMakeLists.txt diff --git a/dbms/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp similarity index 100% rename from dbms/Common/Config/ConfigProcessor.cpp rename to src/Common/Config/ConfigProcessor.cpp diff --git a/dbms/Common/Config/ConfigProcessor.h b/src/Common/Config/ConfigProcessor.h similarity index 100% rename from dbms/Common/Config/ConfigProcessor.h rename to src/Common/Config/ConfigProcessor.h diff --git a/dbms/Common/Config/ConfigReloader.cpp b/src/Common/Config/ConfigReloader.cpp similarity index 100% rename from dbms/Common/Config/ConfigReloader.cpp rename to src/Common/Config/ConfigReloader.cpp diff --git a/dbms/Common/Config/ConfigReloader.h b/src/Common/Config/ConfigReloader.h similarity index 100% rename from dbms/Common/Config/ConfigReloader.h rename to src/Common/Config/ConfigReloader.h diff --git a/dbms/Common/Config/configReadClient.cpp b/src/Common/Config/configReadClient.cpp similarity index 100% rename from dbms/Common/Config/configReadClient.cpp rename to src/Common/Config/configReadClient.cpp diff --git a/dbms/Common/Config/configReadClient.h b/src/Common/Config/configReadClient.h similarity index 100% rename from dbms/Common/Config/configReadClient.h rename to src/Common/Config/configReadClient.h diff --git a/dbms/Common/CounterInFile.h b/src/Common/CounterInFile.h similarity index 100% rename from dbms/Common/CounterInFile.h rename to src/Common/CounterInFile.h diff --git a/dbms/Common/CpuId.h b/src/Common/CpuId.h similarity index 100% rename from dbms/Common/CpuId.h rename to src/Common/CpuId.h diff --git a/dbms/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp similarity index 100% rename from dbms/Common/CurrentMetrics.cpp rename to src/Common/CurrentMetrics.cpp diff --git a/dbms/Common/CurrentMetrics.h b/src/Common/CurrentMetrics.h similarity index 100% rename from dbms/Common/CurrentMetrics.h rename to src/Common/CurrentMetrics.h diff --git a/dbms/Common/CurrentThread.cpp b/src/Common/CurrentThread.cpp similarity index 100% rename from dbms/Common/CurrentThread.cpp rename to src/Common/CurrentThread.cpp diff --git a/dbms/Common/CurrentThread.h b/src/Common/CurrentThread.h similarity index 100% rename from dbms/Common/CurrentThread.h rename to src/Common/CurrentThread.h diff --git a/dbms/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp similarity index 100% rename from dbms/Common/DNSResolver.cpp rename to src/Common/DNSResolver.cpp diff --git a/dbms/Common/DNSResolver.h b/src/Common/DNSResolver.h similarity index 100% rename from dbms/Common/DNSResolver.h rename to src/Common/DNSResolver.h diff --git a/dbms/Common/Dwarf.cpp b/src/Common/Dwarf.cpp similarity index 100% rename from dbms/Common/Dwarf.cpp rename to src/Common/Dwarf.cpp diff --git a/dbms/Common/Dwarf.h b/src/Common/Dwarf.h similarity index 100% rename from dbms/Common/Dwarf.h rename to src/Common/Dwarf.h diff --git a/dbms/Common/Elf.cpp b/src/Common/Elf.cpp similarity index 100% rename from dbms/Common/Elf.cpp rename to src/Common/Elf.cpp diff --git a/dbms/Common/Elf.h b/src/Common/Elf.h similarity index 100% rename from dbms/Common/Elf.h rename to src/Common/Elf.h diff --git a/dbms/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp similarity index 100% rename from dbms/Common/ErrorCodes.cpp rename to src/Common/ErrorCodes.cpp diff --git a/dbms/Common/EventCounter.h b/src/Common/EventCounter.h similarity index 100% rename from dbms/Common/EventCounter.h rename to src/Common/EventCounter.h diff --git a/dbms/Common/Exception.cpp b/src/Common/Exception.cpp similarity index 100% rename from dbms/Common/Exception.cpp rename to src/Common/Exception.cpp diff --git a/dbms/Common/Exception.h b/src/Common/Exception.h similarity index 100% rename from dbms/Common/Exception.h rename to src/Common/Exception.h diff --git a/dbms/Common/ExternalLoaderStatus.cpp b/src/Common/ExternalLoaderStatus.cpp similarity index 100% rename from dbms/Common/ExternalLoaderStatus.cpp rename to src/Common/ExternalLoaderStatus.cpp diff --git a/dbms/Common/ExternalLoaderStatus.h b/src/Common/ExternalLoaderStatus.h similarity index 100% rename from dbms/Common/ExternalLoaderStatus.h rename to src/Common/ExternalLoaderStatus.h diff --git a/dbms/Common/FieldVisitors.cpp b/src/Common/FieldVisitors.cpp similarity index 100% rename from dbms/Common/FieldVisitors.cpp rename to src/Common/FieldVisitors.cpp diff --git a/dbms/Common/FieldVisitors.h b/src/Common/FieldVisitors.h similarity index 100% rename from dbms/Common/FieldVisitors.h rename to src/Common/FieldVisitors.h diff --git a/dbms/Common/FileChecker.cpp b/src/Common/FileChecker.cpp similarity index 100% rename from dbms/Common/FileChecker.cpp rename to src/Common/FileChecker.cpp diff --git a/dbms/Common/FileChecker.h b/src/Common/FileChecker.h similarity index 100% rename from dbms/Common/FileChecker.h rename to src/Common/FileChecker.h diff --git a/dbms/Common/FileUpdatesTracker.h b/src/Common/FileUpdatesTracker.h similarity index 100% rename from dbms/Common/FileUpdatesTracker.h rename to src/Common/FileUpdatesTracker.h diff --git a/dbms/Common/HTMLForm.h b/src/Common/HTMLForm.h similarity index 100% rename from dbms/Common/HTMLForm.h rename to src/Common/HTMLForm.h diff --git a/dbms/Common/HashTable/ClearableHashMap.h b/src/Common/HashTable/ClearableHashMap.h similarity index 100% rename from dbms/Common/HashTable/ClearableHashMap.h rename to src/Common/HashTable/ClearableHashMap.h diff --git a/dbms/Common/HashTable/ClearableHashSet.h b/src/Common/HashTable/ClearableHashSet.h similarity index 100% rename from dbms/Common/HashTable/ClearableHashSet.h rename to src/Common/HashTable/ClearableHashSet.h diff --git a/dbms/Common/HashTable/FixedClearableHashMap.h b/src/Common/HashTable/FixedClearableHashMap.h similarity index 100% rename from dbms/Common/HashTable/FixedClearableHashMap.h rename to src/Common/HashTable/FixedClearableHashMap.h diff --git a/dbms/Common/HashTable/FixedClearableHashSet.h b/src/Common/HashTable/FixedClearableHashSet.h similarity index 100% rename from dbms/Common/HashTable/FixedClearableHashSet.h rename to src/Common/HashTable/FixedClearableHashSet.h diff --git a/dbms/Common/HashTable/FixedHashMap.h b/src/Common/HashTable/FixedHashMap.h similarity index 100% rename from dbms/Common/HashTable/FixedHashMap.h rename to src/Common/HashTable/FixedHashMap.h diff --git a/dbms/Common/HashTable/FixedHashSet.h b/src/Common/HashTable/FixedHashSet.h similarity index 100% rename from dbms/Common/HashTable/FixedHashSet.h rename to src/Common/HashTable/FixedHashSet.h diff --git a/dbms/Common/HashTable/FixedHashTable.h b/src/Common/HashTable/FixedHashTable.h similarity index 100% rename from dbms/Common/HashTable/FixedHashTable.h rename to src/Common/HashTable/FixedHashTable.h diff --git a/dbms/Common/HashTable/Hash.h b/src/Common/HashTable/Hash.h similarity index 100% rename from dbms/Common/HashTable/Hash.h rename to src/Common/HashTable/Hash.h diff --git a/dbms/Common/HashTable/HashMap.h b/src/Common/HashTable/HashMap.h similarity index 100% rename from dbms/Common/HashTable/HashMap.h rename to src/Common/HashTable/HashMap.h diff --git a/dbms/Common/HashTable/HashSet.h b/src/Common/HashTable/HashSet.h similarity index 100% rename from dbms/Common/HashTable/HashSet.h rename to src/Common/HashTable/HashSet.h diff --git a/dbms/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h similarity index 100% rename from dbms/Common/HashTable/HashTable.h rename to src/Common/HashTable/HashTable.h diff --git a/dbms/Common/HashTable/HashTableAllocator.h b/src/Common/HashTable/HashTableAllocator.h similarity index 100% rename from dbms/Common/HashTable/HashTableAllocator.h rename to src/Common/HashTable/HashTableAllocator.h diff --git a/dbms/Common/HashTable/HashTableKeyHolder.h b/src/Common/HashTable/HashTableKeyHolder.h similarity index 100% rename from dbms/Common/HashTable/HashTableKeyHolder.h rename to src/Common/HashTable/HashTableKeyHolder.h diff --git a/dbms/Common/HashTable/SmallTable.h b/src/Common/HashTable/SmallTable.h similarity index 100% rename from dbms/Common/HashTable/SmallTable.h rename to src/Common/HashTable/SmallTable.h diff --git a/dbms/Common/HashTable/StringHashMap.h b/src/Common/HashTable/StringHashMap.h similarity index 100% rename from dbms/Common/HashTable/StringHashMap.h rename to src/Common/HashTable/StringHashMap.h diff --git a/dbms/Common/HashTable/StringHashTable.h b/src/Common/HashTable/StringHashTable.h similarity index 100% rename from dbms/Common/HashTable/StringHashTable.h rename to src/Common/HashTable/StringHashTable.h diff --git a/dbms/Common/HashTable/TwoLevelHashMap.h b/src/Common/HashTable/TwoLevelHashMap.h similarity index 100% rename from dbms/Common/HashTable/TwoLevelHashMap.h rename to src/Common/HashTable/TwoLevelHashMap.h diff --git a/dbms/Common/HashTable/TwoLevelHashTable.h b/src/Common/HashTable/TwoLevelHashTable.h similarity index 100% rename from dbms/Common/HashTable/TwoLevelHashTable.h rename to src/Common/HashTable/TwoLevelHashTable.h diff --git a/dbms/Common/HashTable/TwoLevelStringHashMap.h b/src/Common/HashTable/TwoLevelStringHashMap.h similarity index 100% rename from dbms/Common/HashTable/TwoLevelStringHashMap.h rename to src/Common/HashTable/TwoLevelStringHashMap.h diff --git a/dbms/Common/HashTable/TwoLevelStringHashTable.h b/src/Common/HashTable/TwoLevelStringHashTable.h similarity index 100% rename from dbms/Common/HashTable/TwoLevelStringHashTable.h rename to src/Common/HashTable/TwoLevelStringHashTable.h diff --git a/dbms/Common/HyperLogLogBiasEstimator.h b/src/Common/HyperLogLogBiasEstimator.h similarity index 100% rename from dbms/Common/HyperLogLogBiasEstimator.h rename to src/Common/HyperLogLogBiasEstimator.h diff --git a/dbms/Common/HyperLogLogCounter.h b/src/Common/HyperLogLogCounter.h similarity index 100% rename from dbms/Common/HyperLogLogCounter.h rename to src/Common/HyperLogLogCounter.h diff --git a/dbms/Common/HyperLogLogWithSmallSetOptimization.h b/src/Common/HyperLogLogWithSmallSetOptimization.h similarity index 100% rename from dbms/Common/HyperLogLogWithSmallSetOptimization.h rename to src/Common/HyperLogLogWithSmallSetOptimization.h diff --git a/dbms/Common/IFactoryWithAliases.h b/src/Common/IFactoryWithAliases.h similarity index 100% rename from dbms/Common/IFactoryWithAliases.h rename to src/Common/IFactoryWithAliases.h diff --git a/dbms/Common/IPv6ToBinary.cpp b/src/Common/IPv6ToBinary.cpp similarity index 100% rename from dbms/Common/IPv6ToBinary.cpp rename to src/Common/IPv6ToBinary.cpp diff --git a/dbms/Common/IPv6ToBinary.h b/src/Common/IPv6ToBinary.h similarity index 100% rename from dbms/Common/IPv6ToBinary.h rename to src/Common/IPv6ToBinary.h diff --git a/dbms/Common/Increment.h b/src/Common/Increment.h similarity index 100% rename from dbms/Common/Increment.h rename to src/Common/Increment.h diff --git a/dbms/Common/InterruptListener.h b/src/Common/InterruptListener.h similarity index 100% rename from dbms/Common/InterruptListener.h rename to src/Common/InterruptListener.h diff --git a/dbms/Common/IntervalKind.cpp b/src/Common/IntervalKind.cpp similarity index 100% rename from dbms/Common/IntervalKind.cpp rename to src/Common/IntervalKind.cpp diff --git a/dbms/Common/IntervalKind.h b/src/Common/IntervalKind.h similarity index 100% rename from dbms/Common/IntervalKind.h rename to src/Common/IntervalKind.h diff --git a/dbms/Common/LRUCache.h b/src/Common/LRUCache.h similarity index 100% rename from dbms/Common/LRUCache.h rename to src/Common/LRUCache.h diff --git a/dbms/Common/Macros.cpp b/src/Common/Macros.cpp similarity index 100% rename from dbms/Common/Macros.cpp rename to src/Common/Macros.cpp diff --git a/dbms/Common/Macros.h b/src/Common/Macros.h similarity index 100% rename from dbms/Common/Macros.h rename to src/Common/Macros.h diff --git a/dbms/Common/MemorySanitizer.h b/src/Common/MemorySanitizer.h similarity index 100% rename from dbms/Common/MemorySanitizer.h rename to src/Common/MemorySanitizer.h diff --git a/dbms/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp similarity index 100% rename from dbms/Common/MemoryTracker.cpp rename to src/Common/MemoryTracker.cpp diff --git a/dbms/Common/MemoryTracker.h b/src/Common/MemoryTracker.h similarity index 100% rename from dbms/Common/MemoryTracker.h rename to src/Common/MemoryTracker.h diff --git a/dbms/Common/MultiVersion.h b/src/Common/MultiVersion.h similarity index 100% rename from dbms/Common/MultiVersion.h rename to src/Common/MultiVersion.h diff --git a/dbms/Common/NaNUtils.h b/src/Common/NaNUtils.h similarity index 100% rename from dbms/Common/NaNUtils.h rename to src/Common/NaNUtils.h diff --git a/dbms/Common/NamePrompter.h b/src/Common/NamePrompter.h similarity index 100% rename from dbms/Common/NamePrompter.h rename to src/Common/NamePrompter.h diff --git a/dbms/Common/NetException.h b/src/Common/NetException.h similarity index 100% rename from dbms/Common/NetException.h rename to src/Common/NetException.h diff --git a/dbms/Common/ObjectPool.h b/src/Common/ObjectPool.h similarity index 100% rename from dbms/Common/ObjectPool.h rename to src/Common/ObjectPool.h diff --git a/dbms/Common/OpenSSLHelpers.cpp b/src/Common/OpenSSLHelpers.cpp similarity index 100% rename from dbms/Common/OpenSSLHelpers.cpp rename to src/Common/OpenSSLHelpers.cpp diff --git a/dbms/Common/OpenSSLHelpers.h b/src/Common/OpenSSLHelpers.h similarity index 100% rename from dbms/Common/OpenSSLHelpers.h rename to src/Common/OpenSSLHelpers.h diff --git a/dbms/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp similarity index 100% rename from dbms/Common/OptimizedRegularExpression.cpp rename to src/Common/OptimizedRegularExpression.cpp diff --git a/dbms/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h similarity index 100% rename from dbms/Common/OptimizedRegularExpression.h rename to src/Common/OptimizedRegularExpression.h diff --git a/dbms/Common/PODArray.cpp b/src/Common/PODArray.cpp similarity index 100% rename from dbms/Common/PODArray.cpp rename to src/Common/PODArray.cpp diff --git a/dbms/Common/PODArray.h b/src/Common/PODArray.h similarity index 100% rename from dbms/Common/PODArray.h rename to src/Common/PODArray.h diff --git a/dbms/Common/PODArray_fwd.h b/src/Common/PODArray_fwd.h similarity index 100% rename from dbms/Common/PODArray_fwd.h rename to src/Common/PODArray_fwd.h diff --git a/dbms/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp similarity index 100% rename from dbms/Common/PipeFDs.cpp rename to src/Common/PipeFDs.cpp diff --git a/dbms/Common/PipeFDs.h b/src/Common/PipeFDs.h similarity index 100% rename from dbms/Common/PipeFDs.h rename to src/Common/PipeFDs.h diff --git a/dbms/Common/PoolBase.h b/src/Common/PoolBase.h similarity index 100% rename from dbms/Common/PoolBase.h rename to src/Common/PoolBase.h diff --git a/dbms/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h similarity index 100% rename from dbms/Common/PoolWithFailoverBase.h rename to src/Common/PoolWithFailoverBase.h diff --git a/dbms/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp similarity index 100% rename from dbms/Common/ProfileEvents.cpp rename to src/Common/ProfileEvents.cpp diff --git a/dbms/Common/ProfileEvents.h b/src/Common/ProfileEvents.h similarity index 100% rename from dbms/Common/ProfileEvents.h rename to src/Common/ProfileEvents.h diff --git a/dbms/Common/ProfilingScopedRWLock.h b/src/Common/ProfilingScopedRWLock.h similarity index 100% rename from dbms/Common/ProfilingScopedRWLock.h rename to src/Common/ProfilingScopedRWLock.h diff --git a/dbms/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp similarity index 100% rename from dbms/Common/QueryProfiler.cpp rename to src/Common/QueryProfiler.cpp diff --git a/dbms/Common/QueryProfiler.h b/src/Common/QueryProfiler.h similarity index 100% rename from dbms/Common/QueryProfiler.h rename to src/Common/QueryProfiler.h diff --git a/dbms/Common/RWLock.cpp b/src/Common/RWLock.cpp similarity index 100% rename from dbms/Common/RWLock.cpp rename to src/Common/RWLock.cpp diff --git a/dbms/Common/RWLock.h b/src/Common/RWLock.h similarity index 100% rename from dbms/Common/RWLock.h rename to src/Common/RWLock.h diff --git a/dbms/Common/RadixSort.h b/src/Common/RadixSort.h similarity index 100% rename from dbms/Common/RadixSort.h rename to src/Common/RadixSort.h diff --git a/dbms/Common/RemoteHostFilter.cpp b/src/Common/RemoteHostFilter.cpp similarity index 100% rename from dbms/Common/RemoteHostFilter.cpp rename to src/Common/RemoteHostFilter.cpp diff --git a/dbms/Common/RemoteHostFilter.h b/src/Common/RemoteHostFilter.h similarity index 100% rename from dbms/Common/RemoteHostFilter.h rename to src/Common/RemoteHostFilter.h diff --git a/dbms/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp similarity index 100% rename from dbms/Common/SensitiveDataMasker.cpp rename to src/Common/SensitiveDataMasker.cpp diff --git a/dbms/Common/SensitiveDataMasker.h b/src/Common/SensitiveDataMasker.h similarity index 100% rename from dbms/Common/SensitiveDataMasker.h rename to src/Common/SensitiveDataMasker.h diff --git a/dbms/Common/SettingsChanges.h b/src/Common/SettingsChanges.h similarity index 100% rename from dbms/Common/SettingsChanges.h rename to src/Common/SettingsChanges.h diff --git a/dbms/Common/SharedBlockRowRef.h b/src/Common/SharedBlockRowRef.h similarity index 100% rename from dbms/Common/SharedBlockRowRef.h rename to src/Common/SharedBlockRowRef.h diff --git a/dbms/Common/SharedLibrary.cpp b/src/Common/SharedLibrary.cpp similarity index 100% rename from dbms/Common/SharedLibrary.cpp rename to src/Common/SharedLibrary.cpp diff --git a/dbms/Common/SharedLibrary.h b/src/Common/SharedLibrary.h similarity index 100% rename from dbms/Common/SharedLibrary.h rename to src/Common/SharedLibrary.h diff --git a/dbms/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp similarity index 100% rename from dbms/Common/ShellCommand.cpp rename to src/Common/ShellCommand.cpp diff --git a/dbms/Common/ShellCommand.h b/src/Common/ShellCommand.h similarity index 100% rename from dbms/Common/ShellCommand.h rename to src/Common/ShellCommand.h diff --git a/dbms/Common/SimpleActionBlocker.h b/src/Common/SimpleActionBlocker.h similarity index 100% rename from dbms/Common/SimpleActionBlocker.h rename to src/Common/SimpleActionBlocker.h diff --git a/dbms/Common/SimpleIncrement.h b/src/Common/SimpleIncrement.h similarity index 100% rename from dbms/Common/SimpleIncrement.h rename to src/Common/SimpleIncrement.h diff --git a/dbms/Common/SipHash.h b/src/Common/SipHash.h similarity index 100% rename from dbms/Common/SipHash.h rename to src/Common/SipHash.h diff --git a/dbms/Common/SmallObjectPool.h b/src/Common/SmallObjectPool.h similarity index 100% rename from dbms/Common/SmallObjectPool.h rename to src/Common/SmallObjectPool.h diff --git a/dbms/Common/SpaceSaving.h b/src/Common/SpaceSaving.h similarity index 100% rename from dbms/Common/SpaceSaving.h rename to src/Common/SpaceSaving.h diff --git a/dbms/Common/StackTrace.cpp b/src/Common/StackTrace.cpp similarity index 100% rename from dbms/Common/StackTrace.cpp rename to src/Common/StackTrace.cpp diff --git a/dbms/Common/StackTrace.h b/src/Common/StackTrace.h similarity index 100% rename from dbms/Common/StackTrace.h rename to src/Common/StackTrace.h diff --git a/dbms/Common/StatusFile.cpp b/src/Common/StatusFile.cpp similarity index 100% rename from dbms/Common/StatusFile.cpp rename to src/Common/StatusFile.cpp diff --git a/dbms/Common/StatusFile.h b/src/Common/StatusFile.h similarity index 100% rename from dbms/Common/StatusFile.h rename to src/Common/StatusFile.h diff --git a/dbms/Common/StatusInfo.cpp b/src/Common/StatusInfo.cpp similarity index 100% rename from dbms/Common/StatusInfo.cpp rename to src/Common/StatusInfo.cpp diff --git a/dbms/Common/StatusInfo.h b/src/Common/StatusInfo.h similarity index 100% rename from dbms/Common/StatusInfo.h rename to src/Common/StatusInfo.h diff --git a/dbms/Common/Stopwatch.cpp b/src/Common/Stopwatch.cpp similarity index 100% rename from dbms/Common/Stopwatch.cpp rename to src/Common/Stopwatch.cpp diff --git a/dbms/Common/Stopwatch.h b/src/Common/Stopwatch.h similarity index 100% rename from dbms/Common/Stopwatch.h rename to src/Common/Stopwatch.h diff --git a/dbms/Common/StringSearcher.h b/src/Common/StringSearcher.h similarity index 100% rename from dbms/Common/StringSearcher.h rename to src/Common/StringSearcher.h diff --git a/dbms/Common/StringUtils/CMakeLists.txt b/src/Common/StringUtils/CMakeLists.txt similarity index 100% rename from dbms/Common/StringUtils/CMakeLists.txt rename to src/Common/StringUtils/CMakeLists.txt diff --git a/dbms/Common/StringUtils/StringUtils.cpp b/src/Common/StringUtils/StringUtils.cpp similarity index 100% rename from dbms/Common/StringUtils/StringUtils.cpp rename to src/Common/StringUtils/StringUtils.cpp diff --git a/dbms/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h similarity index 100% rename from dbms/Common/StringUtils/StringUtils.h rename to src/Common/StringUtils/StringUtils.h diff --git a/dbms/Common/StudentTTest.cpp b/src/Common/StudentTTest.cpp similarity index 100% rename from dbms/Common/StudentTTest.cpp rename to src/Common/StudentTTest.cpp diff --git a/dbms/Common/StudentTTest.h b/src/Common/StudentTTest.h similarity index 100% rename from dbms/Common/StudentTTest.h rename to src/Common/StudentTTest.h diff --git a/dbms/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp similarity index 100% rename from dbms/Common/SymbolIndex.cpp rename to src/Common/SymbolIndex.cpp diff --git a/dbms/Common/SymbolIndex.h b/src/Common/SymbolIndex.h similarity index 100% rename from dbms/Common/SymbolIndex.h rename to src/Common/SymbolIndex.h diff --git a/dbms/Common/TaskStatsInfoGetter.cpp b/src/Common/TaskStatsInfoGetter.cpp similarity index 100% rename from dbms/Common/TaskStatsInfoGetter.cpp rename to src/Common/TaskStatsInfoGetter.cpp diff --git a/dbms/Common/TaskStatsInfoGetter.h b/src/Common/TaskStatsInfoGetter.h similarity index 100% rename from dbms/Common/TaskStatsInfoGetter.h rename to src/Common/TaskStatsInfoGetter.h diff --git a/dbms/Common/TerminalSize.cpp b/src/Common/TerminalSize.cpp similarity index 100% rename from dbms/Common/TerminalSize.cpp rename to src/Common/TerminalSize.cpp diff --git a/dbms/Common/TerminalSize.h b/src/Common/TerminalSize.h similarity index 100% rename from dbms/Common/TerminalSize.h rename to src/Common/TerminalSize.h diff --git a/dbms/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp similarity index 100% rename from dbms/Common/ThreadFuzzer.cpp rename to src/Common/ThreadFuzzer.cpp diff --git a/dbms/Common/ThreadFuzzer.h b/src/Common/ThreadFuzzer.h similarity index 100% rename from dbms/Common/ThreadFuzzer.h rename to src/Common/ThreadFuzzer.h diff --git a/dbms/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp similarity index 100% rename from dbms/Common/ThreadPool.cpp rename to src/Common/ThreadPool.cpp diff --git a/dbms/Common/ThreadPool.h b/src/Common/ThreadPool.h similarity index 100% rename from dbms/Common/ThreadPool.h rename to src/Common/ThreadPool.h diff --git a/dbms/Common/ThreadProfileEvents.h b/src/Common/ThreadProfileEvents.h similarity index 100% rename from dbms/Common/ThreadProfileEvents.h rename to src/Common/ThreadProfileEvents.h diff --git a/dbms/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp similarity index 100% rename from dbms/Common/ThreadStatus.cpp rename to src/Common/ThreadStatus.cpp diff --git a/dbms/Common/ThreadStatus.h b/src/Common/ThreadStatus.h similarity index 100% rename from dbms/Common/ThreadStatus.h rename to src/Common/ThreadStatus.h diff --git a/dbms/Common/Throttler.h b/src/Common/Throttler.h similarity index 100% rename from dbms/Common/Throttler.h rename to src/Common/Throttler.h diff --git a/dbms/Common/TraceCollector.cpp b/src/Common/TraceCollector.cpp similarity index 100% rename from dbms/Common/TraceCollector.cpp rename to src/Common/TraceCollector.cpp diff --git a/dbms/Common/TraceCollector.h b/src/Common/TraceCollector.h similarity index 100% rename from dbms/Common/TraceCollector.h rename to src/Common/TraceCollector.h diff --git a/dbms/Common/TypeList.h b/src/Common/TypeList.h similarity index 100% rename from dbms/Common/TypeList.h rename to src/Common/TypeList.h diff --git a/dbms/Common/TypePromotion.h b/src/Common/TypePromotion.h similarity index 100% rename from dbms/Common/TypePromotion.h rename to src/Common/TypePromotion.h diff --git a/dbms/Common/UInt128.h b/src/Common/UInt128.h similarity index 100% rename from dbms/Common/UInt128.h rename to src/Common/UInt128.h diff --git a/dbms/Common/UTF8Helpers.cpp b/src/Common/UTF8Helpers.cpp similarity index 100% rename from dbms/Common/UTF8Helpers.cpp rename to src/Common/UTF8Helpers.cpp diff --git a/dbms/Common/UTF8Helpers.h b/src/Common/UTF8Helpers.h similarity index 100% rename from dbms/Common/UTF8Helpers.h rename to src/Common/UTF8Helpers.h diff --git a/dbms/Common/UnicodeBar.h b/src/Common/UnicodeBar.h similarity index 100% rename from dbms/Common/UnicodeBar.h rename to src/Common/UnicodeBar.h diff --git a/dbms/Common/VariableContext.h b/src/Common/VariableContext.h similarity index 100% rename from dbms/Common/VariableContext.h rename to src/Common/VariableContext.h diff --git a/dbms/Common/Visitor.h b/src/Common/Visitor.h similarity index 100% rename from dbms/Common/Visitor.h rename to src/Common/Visitor.h diff --git a/dbms/Common/Volnitsky.h b/src/Common/Volnitsky.h similarity index 100% rename from dbms/Common/Volnitsky.h rename to src/Common/Volnitsky.h diff --git a/dbms/Common/WeakHash.cpp b/src/Common/WeakHash.cpp similarity index 100% rename from dbms/Common/WeakHash.cpp rename to src/Common/WeakHash.cpp diff --git a/dbms/Common/WeakHash.h b/src/Common/WeakHash.h similarity index 100% rename from dbms/Common/WeakHash.h rename to src/Common/WeakHash.h diff --git a/dbms/Common/XDBCBridgeHelper.h b/src/Common/XDBCBridgeHelper.h similarity index 100% rename from dbms/Common/XDBCBridgeHelper.h rename to src/Common/XDBCBridgeHelper.h diff --git a/dbms/Common/ZooKeeper/CMakeLists.txt b/src/Common/ZooKeeper/CMakeLists.txt similarity index 100% rename from dbms/Common/ZooKeeper/CMakeLists.txt rename to src/Common/ZooKeeper/CMakeLists.txt diff --git a/dbms/Common/ZooKeeper/Common.h b/src/Common/ZooKeeper/Common.h similarity index 100% rename from dbms/Common/ZooKeeper/Common.h rename to src/Common/ZooKeeper/Common.h diff --git a/dbms/Common/ZooKeeper/IKeeper.cpp b/src/Common/ZooKeeper/IKeeper.cpp similarity index 100% rename from dbms/Common/ZooKeeper/IKeeper.cpp rename to src/Common/ZooKeeper/IKeeper.cpp diff --git a/dbms/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h similarity index 100% rename from dbms/Common/ZooKeeper/IKeeper.h rename to src/Common/ZooKeeper/IKeeper.h diff --git a/dbms/Common/ZooKeeper/Increment.h b/src/Common/ZooKeeper/Increment.h similarity index 100% rename from dbms/Common/ZooKeeper/Increment.h rename to src/Common/ZooKeeper/Increment.h diff --git a/dbms/Common/ZooKeeper/KeeperException.h b/src/Common/ZooKeeper/KeeperException.h similarity index 100% rename from dbms/Common/ZooKeeper/KeeperException.h rename to src/Common/ZooKeeper/KeeperException.h diff --git a/dbms/Common/ZooKeeper/LeaderElection.h b/src/Common/ZooKeeper/LeaderElection.h similarity index 100% rename from dbms/Common/ZooKeeper/LeaderElection.h rename to src/Common/ZooKeeper/LeaderElection.h diff --git a/dbms/Common/ZooKeeper/Lock.cpp b/src/Common/ZooKeeper/Lock.cpp similarity index 100% rename from dbms/Common/ZooKeeper/Lock.cpp rename to src/Common/ZooKeeper/Lock.cpp diff --git a/dbms/Common/ZooKeeper/Lock.h b/src/Common/ZooKeeper/Lock.h similarity index 100% rename from dbms/Common/ZooKeeper/Lock.h rename to src/Common/ZooKeeper/Lock.h diff --git a/dbms/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp similarity index 100% rename from dbms/Common/ZooKeeper/TestKeeper.cpp rename to src/Common/ZooKeeper/TestKeeper.cpp diff --git a/dbms/Common/ZooKeeper/TestKeeper.h b/src/Common/ZooKeeper/TestKeeper.h similarity index 100% rename from dbms/Common/ZooKeeper/TestKeeper.h rename to src/Common/ZooKeeper/TestKeeper.h diff --git a/dbms/Common/ZooKeeper/Types.h b/src/Common/ZooKeeper/Types.h similarity index 100% rename from dbms/Common/ZooKeeper/Types.h rename to src/Common/ZooKeeper/Types.h diff --git a/dbms/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeper.cpp rename to src/Common/ZooKeeper/ZooKeeper.cpp diff --git a/dbms/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeper.h rename to src/Common/ZooKeeper/ZooKeeper.h diff --git a/dbms/Common/ZooKeeper/ZooKeeperHolder.cpp b/src/Common/ZooKeeper/ZooKeeperHolder.cpp similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperHolder.cpp rename to src/Common/ZooKeeper/ZooKeeperHolder.cpp diff --git a/dbms/Common/ZooKeeper/ZooKeeperHolder.h b/src/Common/ZooKeeper/ZooKeeperHolder.h similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperHolder.h rename to src/Common/ZooKeeper/ZooKeeperHolder.h diff --git a/dbms/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperImpl.cpp rename to src/Common/ZooKeeper/ZooKeeperImpl.cpp diff --git a/dbms/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperImpl.h rename to src/Common/ZooKeeper/ZooKeeperImpl.h diff --git a/dbms/Common/ZooKeeper/ZooKeeperNodeCache.cpp b/src/Common/ZooKeeper/ZooKeeperNodeCache.cpp similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperNodeCache.cpp rename to src/Common/ZooKeeper/ZooKeeperNodeCache.cpp diff --git a/dbms/Common/ZooKeeper/ZooKeeperNodeCache.h b/src/Common/ZooKeeper/ZooKeeperNodeCache.h similarity index 100% rename from dbms/Common/ZooKeeper/ZooKeeperNodeCache.h rename to src/Common/ZooKeeper/ZooKeeperNodeCache.h diff --git a/dbms/Common/ZooKeeper/tests/CMakeLists.txt b/src/Common/ZooKeeper/tests/CMakeLists.txt similarity index 100% rename from dbms/Common/ZooKeeper/tests/CMakeLists.txt rename to src/Common/ZooKeeper/tests/CMakeLists.txt diff --git a/dbms/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp b/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp rename to src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp diff --git a/dbms/Common/ZooKeeper/tests/nozk.sh b/src/Common/ZooKeeper/tests/nozk.sh similarity index 100% rename from dbms/Common/ZooKeeper/tests/nozk.sh rename to src/Common/ZooKeeper/tests/nozk.sh diff --git a/dbms/Common/ZooKeeper/tests/yeszk.sh b/src/Common/ZooKeeper/tests/yeszk.sh similarity index 100% rename from dbms/Common/ZooKeeper/tests/yeszk.sh rename to src/Common/ZooKeeper/tests/yeszk.sh diff --git a/dbms/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp b/src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp rename to src/Common/ZooKeeper/tests/zk_many_watches_reconnect.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_expiration_test.cpp b/src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_expiration_test.cpp rename to src/Common/ZooKeeper/tests/zkutil_expiration_test.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_test_async.cpp b/src/Common/ZooKeeper/tests/zkutil_test_async.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_test_async.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_async.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_test_commands.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_test_commands.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_commands.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_test_lock.cpp b/src/Common/ZooKeeper/tests/zkutil_test_lock.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_test_lock.cpp rename to src/Common/ZooKeeper/tests/zkutil_test_lock.cpp diff --git a/dbms/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp b/src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp rename to src/Common/ZooKeeper/tests/zkutil_zookeeper_holder.cpp diff --git a/dbms/Common/ZooKeeper/tests/zookeeper_impl.cpp b/src/Common/ZooKeeper/tests/zookeeper_impl.cpp similarity index 100% rename from dbms/Common/ZooKeeper/tests/zookeeper_impl.cpp rename to src/Common/ZooKeeper/tests/zookeeper_impl.cpp diff --git a/dbms/Common/assert_cast.h b/src/Common/assert_cast.h similarity index 100% rename from dbms/Common/assert_cast.h rename to src/Common/assert_cast.h diff --git a/dbms/Common/checkStackSize.cpp b/src/Common/checkStackSize.cpp similarity index 100% rename from dbms/Common/checkStackSize.cpp rename to src/Common/checkStackSize.cpp diff --git a/dbms/Common/checkStackSize.h b/src/Common/checkStackSize.h similarity index 100% rename from dbms/Common/checkStackSize.h rename to src/Common/checkStackSize.h diff --git a/dbms/Common/config.h.in b/src/Common/config.h.in similarity index 100% rename from dbms/Common/config.h.in rename to src/Common/config.h.in diff --git a/dbms/Common/config_version.h.in b/src/Common/config_version.h.in similarity index 100% rename from dbms/Common/config_version.h.in rename to src/Common/config_version.h.in diff --git a/dbms/Common/createHardLink.cpp b/src/Common/createHardLink.cpp similarity index 100% rename from dbms/Common/createHardLink.cpp rename to src/Common/createHardLink.cpp diff --git a/dbms/Common/createHardLink.h b/src/Common/createHardLink.h similarity index 100% rename from dbms/Common/createHardLink.h rename to src/Common/createHardLink.h diff --git a/dbms/Common/escapeForFileName.cpp b/src/Common/escapeForFileName.cpp similarity index 100% rename from dbms/Common/escapeForFileName.cpp rename to src/Common/escapeForFileName.cpp diff --git a/dbms/Common/escapeForFileName.h b/src/Common/escapeForFileName.h similarity index 100% rename from dbms/Common/escapeForFileName.h rename to src/Common/escapeForFileName.h diff --git a/dbms/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp similarity index 100% rename from dbms/Common/filesystemHelpers.cpp rename to src/Common/filesystemHelpers.cpp diff --git a/dbms/Common/filesystemHelpers.h b/src/Common/filesystemHelpers.h similarity index 100% rename from dbms/Common/filesystemHelpers.h rename to src/Common/filesystemHelpers.h diff --git a/dbms/Common/formatIPv6.cpp b/src/Common/formatIPv6.cpp similarity index 100% rename from dbms/Common/formatIPv6.cpp rename to src/Common/formatIPv6.cpp diff --git a/dbms/Common/formatIPv6.h b/src/Common/formatIPv6.h similarity index 100% rename from dbms/Common/formatIPv6.h rename to src/Common/formatIPv6.h diff --git a/dbms/Common/formatReadable.cpp b/src/Common/formatReadable.cpp similarity index 100% rename from dbms/Common/formatReadable.cpp rename to src/Common/formatReadable.cpp diff --git a/dbms/Common/formatReadable.h b/src/Common/formatReadable.h similarity index 100% rename from dbms/Common/formatReadable.h rename to src/Common/formatReadable.h diff --git a/dbms/Common/getExecutablePath.cpp b/src/Common/getExecutablePath.cpp similarity index 100% rename from dbms/Common/getExecutablePath.cpp rename to src/Common/getExecutablePath.cpp diff --git a/dbms/Common/getExecutablePath.h b/src/Common/getExecutablePath.h similarity index 100% rename from dbms/Common/getExecutablePath.h rename to src/Common/getExecutablePath.h diff --git a/dbms/Common/getMultipleKeysFromConfig.cpp b/src/Common/getMultipleKeysFromConfig.cpp similarity index 100% rename from dbms/Common/getMultipleKeysFromConfig.cpp rename to src/Common/getMultipleKeysFromConfig.cpp diff --git a/dbms/Common/getMultipleKeysFromConfig.h b/src/Common/getMultipleKeysFromConfig.h similarity index 100% rename from dbms/Common/getMultipleKeysFromConfig.h rename to src/Common/getMultipleKeysFromConfig.h diff --git a/dbms/Common/getNumberOfPhysicalCPUCores.cpp b/src/Common/getNumberOfPhysicalCPUCores.cpp similarity index 100% rename from dbms/Common/getNumberOfPhysicalCPUCores.cpp rename to src/Common/getNumberOfPhysicalCPUCores.cpp diff --git a/dbms/Common/getNumberOfPhysicalCPUCores.h b/src/Common/getNumberOfPhysicalCPUCores.h similarity index 100% rename from dbms/Common/getNumberOfPhysicalCPUCores.h rename to src/Common/getNumberOfPhysicalCPUCores.h diff --git a/dbms/Common/hasLinuxCapability.cpp b/src/Common/hasLinuxCapability.cpp similarity index 100% rename from dbms/Common/hasLinuxCapability.cpp rename to src/Common/hasLinuxCapability.cpp diff --git a/dbms/Common/hasLinuxCapability.h b/src/Common/hasLinuxCapability.h similarity index 100% rename from dbms/Common/hasLinuxCapability.h rename to src/Common/hasLinuxCapability.h diff --git a/dbms/Common/hex.cpp b/src/Common/hex.cpp similarity index 100% rename from dbms/Common/hex.cpp rename to src/Common/hex.cpp diff --git a/dbms/Common/hex.h b/src/Common/hex.h similarity index 100% rename from dbms/Common/hex.h rename to src/Common/hex.h diff --git a/dbms/Common/intExp.h b/src/Common/intExp.h similarity index 100% rename from dbms/Common/intExp.h rename to src/Common/intExp.h diff --git a/dbms/Common/interpolate.h b/src/Common/interpolate.h similarity index 100% rename from dbms/Common/interpolate.h rename to src/Common/interpolate.h diff --git a/dbms/Common/isLocalAddress.cpp b/src/Common/isLocalAddress.cpp similarity index 100% rename from dbms/Common/isLocalAddress.cpp rename to src/Common/isLocalAddress.cpp diff --git a/dbms/Common/isLocalAddress.h b/src/Common/isLocalAddress.h similarity index 100% rename from dbms/Common/isLocalAddress.h rename to src/Common/isLocalAddress.h diff --git a/dbms/Common/malloc.cpp b/src/Common/malloc.cpp similarity index 100% rename from dbms/Common/malloc.cpp rename to src/Common/malloc.cpp diff --git a/dbms/Common/memcmpSmall.h b/src/Common/memcmpSmall.h similarity index 100% rename from dbms/Common/memcmpSmall.h rename to src/Common/memcmpSmall.h diff --git a/dbms/Common/memcpySmall.h b/src/Common/memcpySmall.h similarity index 100% rename from dbms/Common/memcpySmall.h rename to src/Common/memcpySmall.h diff --git a/dbms/Common/new_delete.cpp b/src/Common/new_delete.cpp similarity index 100% rename from dbms/Common/new_delete.cpp rename to src/Common/new_delete.cpp diff --git a/dbms/Common/parseAddress.cpp b/src/Common/parseAddress.cpp similarity index 100% rename from dbms/Common/parseAddress.cpp rename to src/Common/parseAddress.cpp diff --git a/dbms/Common/parseAddress.h b/src/Common/parseAddress.h similarity index 100% rename from dbms/Common/parseAddress.h rename to src/Common/parseAddress.h diff --git a/dbms/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp similarity index 100% rename from dbms/Common/parseGlobs.cpp rename to src/Common/parseGlobs.cpp diff --git a/dbms/Common/parseGlobs.h b/src/Common/parseGlobs.h similarity index 100% rename from dbms/Common/parseGlobs.h rename to src/Common/parseGlobs.h diff --git a/dbms/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp similarity index 100% rename from dbms/Common/parseRemoteDescription.cpp rename to src/Common/parseRemoteDescription.cpp diff --git a/dbms/Common/parseRemoteDescription.h b/src/Common/parseRemoteDescription.h similarity index 100% rename from dbms/Common/parseRemoteDescription.h rename to src/Common/parseRemoteDescription.h diff --git a/dbms/Common/quoteString.cpp b/src/Common/quoteString.cpp similarity index 100% rename from dbms/Common/quoteString.cpp rename to src/Common/quoteString.cpp diff --git a/dbms/Common/quoteString.h b/src/Common/quoteString.h similarity index 100% rename from dbms/Common/quoteString.h rename to src/Common/quoteString.h diff --git a/dbms/Common/randomSeed.cpp b/src/Common/randomSeed.cpp similarity index 100% rename from dbms/Common/randomSeed.cpp rename to src/Common/randomSeed.cpp diff --git a/dbms/Common/randomSeed.h b/src/Common/randomSeed.h similarity index 100% rename from dbms/Common/randomSeed.h rename to src/Common/randomSeed.h diff --git a/dbms/Common/setThreadName.cpp b/src/Common/setThreadName.cpp similarity index 100% rename from dbms/Common/setThreadName.cpp rename to src/Common/setThreadName.cpp diff --git a/dbms/Common/setThreadName.h b/src/Common/setThreadName.h similarity index 100% rename from dbms/Common/setThreadName.h rename to src/Common/setThreadName.h diff --git a/dbms/Common/tests/CMakeLists.txt b/src/Common/tests/CMakeLists.txt similarity index 100% rename from dbms/Common/tests/CMakeLists.txt rename to src/Common/tests/CMakeLists.txt diff --git a/dbms/Common/tests/arena_with_free_lists.cpp b/src/Common/tests/arena_with_free_lists.cpp similarity index 100% rename from dbms/Common/tests/arena_with_free_lists.cpp rename to src/Common/tests/arena_with_free_lists.cpp diff --git a/dbms/Common/tests/array_cache.cpp b/src/Common/tests/array_cache.cpp similarity index 100% rename from dbms/Common/tests/array_cache.cpp rename to src/Common/tests/array_cache.cpp diff --git a/dbms/Common/tests/auto_array.cpp b/src/Common/tests/auto_array.cpp similarity index 100% rename from dbms/Common/tests/auto_array.cpp rename to src/Common/tests/auto_array.cpp diff --git a/dbms/Common/tests/chaos_sanitizer.cpp b/src/Common/tests/chaos_sanitizer.cpp similarity index 100% rename from dbms/Common/tests/chaos_sanitizer.cpp rename to src/Common/tests/chaos_sanitizer.cpp diff --git a/dbms/Common/tests/compact_array.cpp b/src/Common/tests/compact_array.cpp similarity index 100% rename from dbms/Common/tests/compact_array.cpp rename to src/Common/tests/compact_array.cpp diff --git a/dbms/Common/tests/cow_columns.cpp b/src/Common/tests/cow_columns.cpp similarity index 100% rename from dbms/Common/tests/cow_columns.cpp rename to src/Common/tests/cow_columns.cpp diff --git a/dbms/Common/tests/cow_compositions.cpp b/src/Common/tests/cow_compositions.cpp similarity index 100% rename from dbms/Common/tests/cow_compositions.cpp rename to src/Common/tests/cow_compositions.cpp diff --git a/dbms/Common/tests/gtest_getMultipleValuesFromConfig.cpp b/src/Common/tests/gtest_getMultipleValuesFromConfig.cpp similarity index 100% rename from dbms/Common/tests/gtest_getMultipleValuesFromConfig.cpp rename to src/Common/tests/gtest_getMultipleValuesFromConfig.cpp diff --git a/dbms/Common/tests/gtest_global_context.h b/src/Common/tests/gtest_global_context.h similarity index 100% rename from dbms/Common/tests/gtest_global_context.h rename to src/Common/tests/gtest_global_context.h diff --git a/dbms/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp b/src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp similarity index 100% rename from dbms/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp rename to src/Common/tests/gtest_makeRegexpPatternFromGlobs.cpp diff --git a/dbms/Common/tests/gtest_pod_array.cpp b/src/Common/tests/gtest_pod_array.cpp similarity index 100% rename from dbms/Common/tests/gtest_pod_array.cpp rename to src/Common/tests/gtest_pod_array.cpp diff --git a/dbms/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp similarity index 100% rename from dbms/Common/tests/gtest_rw_lock.cpp rename to src/Common/tests/gtest_rw_lock.cpp diff --git a/dbms/Common/tests/gtest_sensitive_data_masker.cpp b/src/Common/tests/gtest_sensitive_data_masker.cpp similarity index 100% rename from dbms/Common/tests/gtest_sensitive_data_masker.cpp rename to src/Common/tests/gtest_sensitive_data_masker.cpp diff --git a/dbms/Common/tests/gtest_shell_command.cpp b/src/Common/tests/gtest_shell_command.cpp similarity index 100% rename from dbms/Common/tests/gtest_shell_command.cpp rename to src/Common/tests/gtest_shell_command.cpp diff --git a/dbms/Common/tests/gtest_thread_pool_concurrent_wait.cpp b/src/Common/tests/gtest_thread_pool_concurrent_wait.cpp similarity index 100% rename from dbms/Common/tests/gtest_thread_pool_concurrent_wait.cpp rename to src/Common/tests/gtest_thread_pool_concurrent_wait.cpp diff --git a/dbms/Common/tests/gtest_thread_pool_global_full.cpp b/src/Common/tests/gtest_thread_pool_global_full.cpp similarity index 100% rename from dbms/Common/tests/gtest_thread_pool_global_full.cpp rename to src/Common/tests/gtest_thread_pool_global_full.cpp diff --git a/dbms/Common/tests/gtest_thread_pool_limit.cpp b/src/Common/tests/gtest_thread_pool_limit.cpp similarity index 100% rename from dbms/Common/tests/gtest_thread_pool_limit.cpp rename to src/Common/tests/gtest_thread_pool_limit.cpp diff --git a/dbms/Common/tests/gtest_thread_pool_loop.cpp b/src/Common/tests/gtest_thread_pool_loop.cpp similarity index 100% rename from dbms/Common/tests/gtest_thread_pool_loop.cpp rename to src/Common/tests/gtest_thread_pool_loop.cpp diff --git a/dbms/Common/tests/gtest_thread_pool_schedule_exception.cpp b/src/Common/tests/gtest_thread_pool_schedule_exception.cpp similarity index 100% rename from dbms/Common/tests/gtest_thread_pool_schedule_exception.cpp rename to src/Common/tests/gtest_thread_pool_schedule_exception.cpp diff --git a/dbms/Common/tests/gtest_unescapeForFileName.cpp b/src/Common/tests/gtest_unescapeForFileName.cpp similarity index 100% rename from dbms/Common/tests/gtest_unescapeForFileName.cpp rename to src/Common/tests/gtest_unescapeForFileName.cpp diff --git a/dbms/Common/tests/hash_table.cpp b/src/Common/tests/hash_table.cpp similarity index 100% rename from dbms/Common/tests/hash_table.cpp rename to src/Common/tests/hash_table.cpp diff --git a/dbms/Common/tests/hashes_test.cpp b/src/Common/tests/hashes_test.cpp similarity index 100% rename from dbms/Common/tests/hashes_test.cpp rename to src/Common/tests/hashes_test.cpp diff --git a/dbms/Common/tests/int_hashes_perf.cpp b/src/Common/tests/int_hashes_perf.cpp similarity index 100% rename from dbms/Common/tests/int_hashes_perf.cpp rename to src/Common/tests/int_hashes_perf.cpp diff --git a/dbms/Common/tests/integer_hash_tables_and_hashes.cpp b/src/Common/tests/integer_hash_tables_and_hashes.cpp similarity index 100% rename from dbms/Common/tests/integer_hash_tables_and_hashes.cpp rename to src/Common/tests/integer_hash_tables_and_hashes.cpp diff --git a/dbms/Common/tests/parallel_aggregation.cpp b/src/Common/tests/parallel_aggregation.cpp similarity index 100% rename from dbms/Common/tests/parallel_aggregation.cpp rename to src/Common/tests/parallel_aggregation.cpp diff --git a/dbms/Common/tests/parallel_aggregation2.cpp b/src/Common/tests/parallel_aggregation2.cpp similarity index 100% rename from dbms/Common/tests/parallel_aggregation2.cpp rename to src/Common/tests/parallel_aggregation2.cpp diff --git a/dbms/Common/tests/pod_array.cpp b/src/Common/tests/pod_array.cpp similarity index 100% rename from dbms/Common/tests/pod_array.cpp rename to src/Common/tests/pod_array.cpp diff --git a/dbms/Common/tests/radix_sort.cpp b/src/Common/tests/radix_sort.cpp similarity index 100% rename from dbms/Common/tests/radix_sort.cpp rename to src/Common/tests/radix_sort.cpp diff --git a/dbms/Common/tests/simple_cache.cpp b/src/Common/tests/simple_cache.cpp similarity index 100% rename from dbms/Common/tests/simple_cache.cpp rename to src/Common/tests/simple_cache.cpp diff --git a/dbms/Common/tests/sip_hash_perf.cpp b/src/Common/tests/sip_hash_perf.cpp similarity index 100% rename from dbms/Common/tests/sip_hash_perf.cpp rename to src/Common/tests/sip_hash_perf.cpp diff --git a/dbms/Common/tests/small_table.cpp b/src/Common/tests/small_table.cpp similarity index 100% rename from dbms/Common/tests/small_table.cpp rename to src/Common/tests/small_table.cpp diff --git a/dbms/Common/tests/space_saving.cpp b/src/Common/tests/space_saving.cpp similarity index 100% rename from dbms/Common/tests/space_saving.cpp rename to src/Common/tests/space_saving.cpp diff --git a/dbms/Common/tests/stopwatch.cpp b/src/Common/tests/stopwatch.cpp similarity index 100% rename from dbms/Common/tests/stopwatch.cpp rename to src/Common/tests/stopwatch.cpp diff --git a/dbms/Common/tests/symbol_index.cpp b/src/Common/tests/symbol_index.cpp similarity index 100% rename from dbms/Common/tests/symbol_index.cpp rename to src/Common/tests/symbol_index.cpp diff --git a/dbms/Common/tests/thread_creation_latency.cpp b/src/Common/tests/thread_creation_latency.cpp similarity index 100% rename from dbms/Common/tests/thread_creation_latency.cpp rename to src/Common/tests/thread_creation_latency.cpp diff --git a/dbms/Common/thread_local_rng.cpp b/src/Common/thread_local_rng.cpp similarity index 100% rename from dbms/Common/thread_local_rng.cpp rename to src/Common/thread_local_rng.cpp diff --git a/dbms/Common/thread_local_rng.h b/src/Common/thread_local_rng.h similarity index 100% rename from dbms/Common/thread_local_rng.h rename to src/Common/thread_local_rng.h diff --git a/dbms/Common/typeid_cast.h b/src/Common/typeid_cast.h similarity index 100% rename from dbms/Common/typeid_cast.h rename to src/Common/typeid_cast.h diff --git a/dbms/Compression/CMakeLists.txt b/src/Compression/CMakeLists.txt similarity index 100% rename from dbms/Compression/CMakeLists.txt rename to src/Compression/CMakeLists.txt diff --git a/dbms/Compression/CachedCompressedReadBuffer.cpp b/src/Compression/CachedCompressedReadBuffer.cpp similarity index 100% rename from dbms/Compression/CachedCompressedReadBuffer.cpp rename to src/Compression/CachedCompressedReadBuffer.cpp diff --git a/dbms/Compression/CachedCompressedReadBuffer.h b/src/Compression/CachedCompressedReadBuffer.h similarity index 100% rename from dbms/Compression/CachedCompressedReadBuffer.h rename to src/Compression/CachedCompressedReadBuffer.h diff --git a/dbms/Compression/CompressedReadBuffer.cpp b/src/Compression/CompressedReadBuffer.cpp similarity index 100% rename from dbms/Compression/CompressedReadBuffer.cpp rename to src/Compression/CompressedReadBuffer.cpp diff --git a/dbms/Compression/CompressedReadBuffer.h b/src/Compression/CompressedReadBuffer.h similarity index 100% rename from dbms/Compression/CompressedReadBuffer.h rename to src/Compression/CompressedReadBuffer.h diff --git a/dbms/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp similarity index 100% rename from dbms/Compression/CompressedReadBufferBase.cpp rename to src/Compression/CompressedReadBufferBase.cpp diff --git a/dbms/Compression/CompressedReadBufferBase.h b/src/Compression/CompressedReadBufferBase.h similarity index 100% rename from dbms/Compression/CompressedReadBufferBase.h rename to src/Compression/CompressedReadBufferBase.h diff --git a/dbms/Compression/CompressedReadBufferFromFile.cpp b/src/Compression/CompressedReadBufferFromFile.cpp similarity index 100% rename from dbms/Compression/CompressedReadBufferFromFile.cpp rename to src/Compression/CompressedReadBufferFromFile.cpp diff --git a/dbms/Compression/CompressedReadBufferFromFile.h b/src/Compression/CompressedReadBufferFromFile.h similarity index 100% rename from dbms/Compression/CompressedReadBufferFromFile.h rename to src/Compression/CompressedReadBufferFromFile.h diff --git a/dbms/Compression/CompressedWriteBuffer.cpp b/src/Compression/CompressedWriteBuffer.cpp similarity index 100% rename from dbms/Compression/CompressedWriteBuffer.cpp rename to src/Compression/CompressedWriteBuffer.cpp diff --git a/dbms/Compression/CompressedWriteBuffer.h b/src/Compression/CompressedWriteBuffer.h similarity index 100% rename from dbms/Compression/CompressedWriteBuffer.h rename to src/Compression/CompressedWriteBuffer.h diff --git a/dbms/Compression/CompressionCodecDelta.cpp b/src/Compression/CompressionCodecDelta.cpp similarity index 100% rename from dbms/Compression/CompressionCodecDelta.cpp rename to src/Compression/CompressionCodecDelta.cpp diff --git a/dbms/Compression/CompressionCodecDelta.h b/src/Compression/CompressionCodecDelta.h similarity index 100% rename from dbms/Compression/CompressionCodecDelta.h rename to src/Compression/CompressionCodecDelta.h diff --git a/dbms/Compression/CompressionCodecDoubleDelta.cpp b/src/Compression/CompressionCodecDoubleDelta.cpp similarity index 100% rename from dbms/Compression/CompressionCodecDoubleDelta.cpp rename to src/Compression/CompressionCodecDoubleDelta.cpp diff --git a/dbms/Compression/CompressionCodecDoubleDelta.h b/src/Compression/CompressionCodecDoubleDelta.h similarity index 100% rename from dbms/Compression/CompressionCodecDoubleDelta.h rename to src/Compression/CompressionCodecDoubleDelta.h diff --git a/dbms/Compression/CompressionCodecGorilla.cpp b/src/Compression/CompressionCodecGorilla.cpp similarity index 100% rename from dbms/Compression/CompressionCodecGorilla.cpp rename to src/Compression/CompressionCodecGorilla.cpp diff --git a/dbms/Compression/CompressionCodecGorilla.h b/src/Compression/CompressionCodecGorilla.h similarity index 100% rename from dbms/Compression/CompressionCodecGorilla.h rename to src/Compression/CompressionCodecGorilla.h diff --git a/dbms/Compression/CompressionCodecLZ4.cpp b/src/Compression/CompressionCodecLZ4.cpp similarity index 100% rename from dbms/Compression/CompressionCodecLZ4.cpp rename to src/Compression/CompressionCodecLZ4.cpp diff --git a/dbms/Compression/CompressionCodecLZ4.h b/src/Compression/CompressionCodecLZ4.h similarity index 100% rename from dbms/Compression/CompressionCodecLZ4.h rename to src/Compression/CompressionCodecLZ4.h diff --git a/dbms/Compression/CompressionCodecMultiple.cpp b/src/Compression/CompressionCodecMultiple.cpp similarity index 100% rename from dbms/Compression/CompressionCodecMultiple.cpp rename to src/Compression/CompressionCodecMultiple.cpp diff --git a/dbms/Compression/CompressionCodecMultiple.h b/src/Compression/CompressionCodecMultiple.h similarity index 100% rename from dbms/Compression/CompressionCodecMultiple.h rename to src/Compression/CompressionCodecMultiple.h diff --git a/dbms/Compression/CompressionCodecNone.cpp b/src/Compression/CompressionCodecNone.cpp similarity index 100% rename from dbms/Compression/CompressionCodecNone.cpp rename to src/Compression/CompressionCodecNone.cpp diff --git a/dbms/Compression/CompressionCodecNone.h b/src/Compression/CompressionCodecNone.h similarity index 100% rename from dbms/Compression/CompressionCodecNone.h rename to src/Compression/CompressionCodecNone.h diff --git a/dbms/Compression/CompressionCodecT64.cpp b/src/Compression/CompressionCodecT64.cpp similarity index 100% rename from dbms/Compression/CompressionCodecT64.cpp rename to src/Compression/CompressionCodecT64.cpp diff --git a/dbms/Compression/CompressionCodecT64.h b/src/Compression/CompressionCodecT64.h similarity index 100% rename from dbms/Compression/CompressionCodecT64.h rename to src/Compression/CompressionCodecT64.h diff --git a/dbms/Compression/CompressionCodecZSTD.cpp b/src/Compression/CompressionCodecZSTD.cpp similarity index 100% rename from dbms/Compression/CompressionCodecZSTD.cpp rename to src/Compression/CompressionCodecZSTD.cpp diff --git a/dbms/Compression/CompressionCodecZSTD.h b/src/Compression/CompressionCodecZSTD.h similarity index 100% rename from dbms/Compression/CompressionCodecZSTD.h rename to src/Compression/CompressionCodecZSTD.h diff --git a/dbms/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp similarity index 100% rename from dbms/Compression/CompressionFactory.cpp rename to src/Compression/CompressionFactory.cpp diff --git a/dbms/Compression/CompressionFactory.h b/src/Compression/CompressionFactory.h similarity index 100% rename from dbms/Compression/CompressionFactory.h rename to src/Compression/CompressionFactory.h diff --git a/dbms/Compression/CompressionInfo.h b/src/Compression/CompressionInfo.h similarity index 100% rename from dbms/Compression/CompressionInfo.h rename to src/Compression/CompressionInfo.h diff --git a/dbms/Compression/ICompressionCodec.cpp b/src/Compression/ICompressionCodec.cpp similarity index 100% rename from dbms/Compression/ICompressionCodec.cpp rename to src/Compression/ICompressionCodec.cpp diff --git a/dbms/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h similarity index 100% rename from dbms/Compression/ICompressionCodec.h rename to src/Compression/ICompressionCodec.h diff --git a/dbms/Compression/LZ4_decompress_faster.cpp b/src/Compression/LZ4_decompress_faster.cpp similarity index 100% rename from dbms/Compression/LZ4_decompress_faster.cpp rename to src/Compression/LZ4_decompress_faster.cpp diff --git a/dbms/Compression/LZ4_decompress_faster.h b/src/Compression/LZ4_decompress_faster.h similarity index 100% rename from dbms/Compression/LZ4_decompress_faster.h rename to src/Compression/LZ4_decompress_faster.h diff --git a/dbms/Compression/tests/CMakeLists.txt b/src/Compression/tests/CMakeLists.txt similarity index 100% rename from dbms/Compression/tests/CMakeLists.txt rename to src/Compression/tests/CMakeLists.txt diff --git a/dbms/Compression/tests/cached_compressed_read_buffer.cpp b/src/Compression/tests/cached_compressed_read_buffer.cpp similarity index 100% rename from dbms/Compression/tests/cached_compressed_read_buffer.cpp rename to src/Compression/tests/cached_compressed_read_buffer.cpp diff --git a/dbms/Compression/tests/compressed_buffer.cpp b/src/Compression/tests/compressed_buffer.cpp similarity index 100% rename from dbms/Compression/tests/compressed_buffer.cpp rename to src/Compression/tests/compressed_buffer.cpp diff --git a/dbms/Compression/tests/compressed_buffer_fuzz.cpp b/src/Compression/tests/compressed_buffer_fuzz.cpp similarity index 100% rename from dbms/Compression/tests/compressed_buffer_fuzz.cpp rename to src/Compression/tests/compressed_buffer_fuzz.cpp diff --git a/dbms/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp similarity index 100% rename from dbms/Compression/tests/gtest_compressionCodec.cpp rename to src/Compression/tests/gtest_compressionCodec.cpp diff --git a/dbms/Core/AccurateComparison.h b/src/Core/AccurateComparison.h similarity index 100% rename from dbms/Core/AccurateComparison.h rename to src/Core/AccurateComparison.h diff --git a/dbms/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp similarity index 100% rename from dbms/Core/BackgroundSchedulePool.cpp rename to src/Core/BackgroundSchedulePool.cpp diff --git a/dbms/Core/BackgroundSchedulePool.h b/src/Core/BackgroundSchedulePool.h similarity index 100% rename from dbms/Core/BackgroundSchedulePool.h rename to src/Core/BackgroundSchedulePool.h diff --git a/dbms/Core/Block.cpp b/src/Core/Block.cpp similarity index 100% rename from dbms/Core/Block.cpp rename to src/Core/Block.cpp diff --git a/dbms/Core/Block.h b/src/Core/Block.h similarity index 100% rename from dbms/Core/Block.h rename to src/Core/Block.h diff --git a/dbms/Core/BlockInfo.cpp b/src/Core/BlockInfo.cpp similarity index 100% rename from dbms/Core/BlockInfo.cpp rename to src/Core/BlockInfo.cpp diff --git a/dbms/Core/BlockInfo.h b/src/Core/BlockInfo.h similarity index 100% rename from dbms/Core/BlockInfo.h rename to src/Core/BlockInfo.h diff --git a/dbms/Core/CMakeLists.txt b/src/Core/CMakeLists.txt similarity index 100% rename from dbms/Core/CMakeLists.txt rename to src/Core/CMakeLists.txt diff --git a/dbms/Core/ColumnNumbers.h b/src/Core/ColumnNumbers.h similarity index 100% rename from dbms/Core/ColumnNumbers.h rename to src/Core/ColumnNumbers.h diff --git a/dbms/Core/ColumnWithTypeAndName.cpp b/src/Core/ColumnWithTypeAndName.cpp similarity index 100% rename from dbms/Core/ColumnWithTypeAndName.cpp rename to src/Core/ColumnWithTypeAndName.cpp diff --git a/dbms/Core/ColumnWithTypeAndName.h b/src/Core/ColumnWithTypeAndName.h similarity index 100% rename from dbms/Core/ColumnWithTypeAndName.h rename to src/Core/ColumnWithTypeAndName.h diff --git a/dbms/Core/ColumnsWithTypeAndName.h b/src/Core/ColumnsWithTypeAndName.h similarity index 100% rename from dbms/Core/ColumnsWithTypeAndName.h rename to src/Core/ColumnsWithTypeAndName.h diff --git a/dbms/Core/DecimalComparison.h b/src/Core/DecimalComparison.h similarity index 100% rename from dbms/Core/DecimalComparison.h rename to src/Core/DecimalComparison.h diff --git a/dbms/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h similarity index 100% rename from dbms/Core/DecimalFunctions.h rename to src/Core/DecimalFunctions.h diff --git a/dbms/Core/Defines.h b/src/Core/Defines.h similarity index 100% rename from dbms/Core/Defines.h rename to src/Core/Defines.h diff --git a/dbms/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp similarity index 100% rename from dbms/Core/ExternalResultDescription.cpp rename to src/Core/ExternalResultDescription.cpp diff --git a/dbms/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h similarity index 100% rename from dbms/Core/ExternalResultDescription.h rename to src/Core/ExternalResultDescription.h diff --git a/dbms/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp similarity index 100% rename from dbms/Core/ExternalTable.cpp rename to src/Core/ExternalTable.cpp diff --git a/dbms/Core/ExternalTable.h b/src/Core/ExternalTable.h similarity index 100% rename from dbms/Core/ExternalTable.h rename to src/Core/ExternalTable.h diff --git a/dbms/Core/Field.cpp b/src/Core/Field.cpp similarity index 100% rename from dbms/Core/Field.cpp rename to src/Core/Field.cpp diff --git a/dbms/Core/Field.h b/src/Core/Field.h similarity index 100% rename from dbms/Core/Field.h rename to src/Core/Field.h diff --git a/dbms/Core/MySQLProtocol.cpp b/src/Core/MySQLProtocol.cpp similarity index 100% rename from dbms/Core/MySQLProtocol.cpp rename to src/Core/MySQLProtocol.cpp diff --git a/dbms/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h similarity index 100% rename from dbms/Core/MySQLProtocol.h rename to src/Core/MySQLProtocol.h diff --git a/dbms/Core/Names.h b/src/Core/Names.h similarity index 100% rename from dbms/Core/Names.h rename to src/Core/Names.h diff --git a/dbms/Core/NamesAndTypes.cpp b/src/Core/NamesAndTypes.cpp similarity index 100% rename from dbms/Core/NamesAndTypes.cpp rename to src/Core/NamesAndTypes.cpp diff --git a/dbms/Core/NamesAndTypes.h b/src/Core/NamesAndTypes.h similarity index 100% rename from dbms/Core/NamesAndTypes.h rename to src/Core/NamesAndTypes.h diff --git a/dbms/Core/Protocol.h b/src/Core/Protocol.h similarity index 100% rename from dbms/Core/Protocol.h rename to src/Core/Protocol.h diff --git a/dbms/Core/QualifiedTableName.h b/src/Core/QualifiedTableName.h similarity index 100% rename from dbms/Core/QualifiedTableName.h rename to src/Core/QualifiedTableName.h diff --git a/dbms/Core/QueryProcessingStage.h b/src/Core/QueryProcessingStage.h similarity index 100% rename from dbms/Core/QueryProcessingStage.h rename to src/Core/QueryProcessingStage.h diff --git a/dbms/Core/Row.h b/src/Core/Row.h similarity index 100% rename from dbms/Core/Row.h rename to src/Core/Row.h diff --git a/dbms/Core/Settings.cpp b/src/Core/Settings.cpp similarity index 100% rename from dbms/Core/Settings.cpp rename to src/Core/Settings.cpp diff --git a/dbms/Core/Settings.h b/src/Core/Settings.h similarity index 100% rename from dbms/Core/Settings.h rename to src/Core/Settings.h diff --git a/dbms/Core/SettingsCollection.cpp b/src/Core/SettingsCollection.cpp similarity index 100% rename from dbms/Core/SettingsCollection.cpp rename to src/Core/SettingsCollection.cpp diff --git a/dbms/Core/SettingsCollection.h b/src/Core/SettingsCollection.h similarity index 100% rename from dbms/Core/SettingsCollection.h rename to src/Core/SettingsCollection.h diff --git a/dbms/Core/SettingsCollectionImpl.h b/src/Core/SettingsCollectionImpl.h similarity index 100% rename from dbms/Core/SettingsCollectionImpl.h rename to src/Core/SettingsCollectionImpl.h diff --git a/dbms/Core/SortCursor.h b/src/Core/SortCursor.h similarity index 100% rename from dbms/Core/SortCursor.h rename to src/Core/SortCursor.h diff --git a/dbms/Core/SortDescription.h b/src/Core/SortDescription.h similarity index 100% rename from dbms/Core/SortDescription.h rename to src/Core/SortDescription.h diff --git a/dbms/Core/TypeListNumber.h b/src/Core/TypeListNumber.h similarity index 100% rename from dbms/Core/TypeListNumber.h rename to src/Core/TypeListNumber.h diff --git a/dbms/Core/Types.h b/src/Core/Types.h similarity index 100% rename from dbms/Core/Types.h rename to src/Core/Types.h diff --git a/dbms/Core/UUID.h b/src/Core/UUID.h similarity index 100% rename from dbms/Core/UUID.h rename to src/Core/UUID.h diff --git a/dbms/Core/callOnTypeIndex.h b/src/Core/callOnTypeIndex.h similarity index 100% rename from dbms/Core/callOnTypeIndex.h rename to src/Core/callOnTypeIndex.h diff --git a/dbms/Core/config_core.h.in b/src/Core/config_core.h.in similarity index 100% rename from dbms/Core/config_core.h.in rename to src/Core/config_core.h.in diff --git a/dbms/Core/iostream_debug_helpers.cpp b/src/Core/iostream_debug_helpers.cpp similarity index 100% rename from dbms/Core/iostream_debug_helpers.cpp rename to src/Core/iostream_debug_helpers.cpp diff --git a/dbms/Core/iostream_debug_helpers.h b/src/Core/iostream_debug_helpers.h similarity index 100% rename from dbms/Core/iostream_debug_helpers.h rename to src/Core/iostream_debug_helpers.h diff --git a/dbms/Core/tests/CMakeLists.txt b/src/Core/tests/CMakeLists.txt similarity index 100% rename from dbms/Core/tests/CMakeLists.txt rename to src/Core/tests/CMakeLists.txt diff --git a/dbms/Core/tests/field.cpp b/src/Core/tests/field.cpp similarity index 100% rename from dbms/Core/tests/field.cpp rename to src/Core/tests/field.cpp diff --git a/dbms/Core/tests/gtest_DecimalFunctions.cpp b/src/Core/tests/gtest_DecimalFunctions.cpp similarity index 100% rename from dbms/Core/tests/gtest_DecimalFunctions.cpp rename to src/Core/tests/gtest_DecimalFunctions.cpp diff --git a/dbms/Core/tests/move_field.cpp b/src/Core/tests/move_field.cpp similarity index 100% rename from dbms/Core/tests/move_field.cpp rename to src/Core/tests/move_field.cpp diff --git a/dbms/Core/tests/string_pool.cpp b/src/Core/tests/string_pool.cpp similarity index 100% rename from dbms/Core/tests/string_pool.cpp rename to src/Core/tests/string_pool.cpp diff --git a/dbms/Core/tests/string_ref_hash.cpp b/src/Core/tests/string_ref_hash.cpp similarity index 100% rename from dbms/Core/tests/string_ref_hash.cpp rename to src/Core/tests/string_ref_hash.cpp diff --git a/dbms/DataStreams/AddingConstColumnBlockInputStream.h b/src/DataStreams/AddingConstColumnBlockInputStream.h similarity index 100% rename from dbms/DataStreams/AddingConstColumnBlockInputStream.h rename to src/DataStreams/AddingConstColumnBlockInputStream.h diff --git a/dbms/DataStreams/AddingDefaultBlockOutputStream.cpp b/src/DataStreams/AddingDefaultBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/AddingDefaultBlockOutputStream.cpp rename to src/DataStreams/AddingDefaultBlockOutputStream.cpp diff --git a/dbms/DataStreams/AddingDefaultBlockOutputStream.h b/src/DataStreams/AddingDefaultBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/AddingDefaultBlockOutputStream.h rename to src/DataStreams/AddingDefaultBlockOutputStream.h diff --git a/dbms/DataStreams/AddingDefaultsBlockInputStream.cpp b/src/DataStreams/AddingDefaultsBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/AddingDefaultsBlockInputStream.cpp rename to src/DataStreams/AddingDefaultsBlockInputStream.cpp diff --git a/dbms/DataStreams/AddingDefaultsBlockInputStream.h b/src/DataStreams/AddingDefaultsBlockInputStream.h similarity index 100% rename from dbms/DataStreams/AddingDefaultsBlockInputStream.h rename to src/DataStreams/AddingDefaultsBlockInputStream.h diff --git a/dbms/DataStreams/AggregatingBlockInputStream.cpp b/src/DataStreams/AggregatingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/AggregatingBlockInputStream.cpp rename to src/DataStreams/AggregatingBlockInputStream.cpp diff --git a/dbms/DataStreams/AggregatingBlockInputStream.h b/src/DataStreams/AggregatingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/AggregatingBlockInputStream.h rename to src/DataStreams/AggregatingBlockInputStream.h diff --git a/dbms/DataStreams/AggregatingSortedBlockInputStream.cpp b/src/DataStreams/AggregatingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/AggregatingSortedBlockInputStream.cpp rename to src/DataStreams/AggregatingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/AggregatingSortedBlockInputStream.h b/src/DataStreams/AggregatingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/AggregatingSortedBlockInputStream.h rename to src/DataStreams/AggregatingSortedBlockInputStream.h diff --git a/dbms/DataStreams/AsynchronousBlockInputStream.cpp b/src/DataStreams/AsynchronousBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/AsynchronousBlockInputStream.cpp rename to src/DataStreams/AsynchronousBlockInputStream.cpp diff --git a/dbms/DataStreams/AsynchronousBlockInputStream.h b/src/DataStreams/AsynchronousBlockInputStream.h similarity index 100% rename from dbms/DataStreams/AsynchronousBlockInputStream.h rename to src/DataStreams/AsynchronousBlockInputStream.h diff --git a/dbms/DataStreams/BlockIO.cpp b/src/DataStreams/BlockIO.cpp similarity index 100% rename from dbms/DataStreams/BlockIO.cpp rename to src/DataStreams/BlockIO.cpp diff --git a/dbms/DataStreams/BlockIO.h b/src/DataStreams/BlockIO.h similarity index 100% rename from dbms/DataStreams/BlockIO.h rename to src/DataStreams/BlockIO.h diff --git a/dbms/DataStreams/BlockStreamProfileInfo.cpp b/src/DataStreams/BlockStreamProfileInfo.cpp similarity index 100% rename from dbms/DataStreams/BlockStreamProfileInfo.cpp rename to src/DataStreams/BlockStreamProfileInfo.cpp diff --git a/dbms/DataStreams/BlockStreamProfileInfo.h b/src/DataStreams/BlockStreamProfileInfo.h similarity index 100% rename from dbms/DataStreams/BlockStreamProfileInfo.h rename to src/DataStreams/BlockStreamProfileInfo.h diff --git a/dbms/DataStreams/BlocksBlockInputStream.h b/src/DataStreams/BlocksBlockInputStream.h similarity index 100% rename from dbms/DataStreams/BlocksBlockInputStream.h rename to src/DataStreams/BlocksBlockInputStream.h diff --git a/dbms/DataStreams/BlocksListBlockInputStream.h b/src/DataStreams/BlocksListBlockInputStream.h similarity index 100% rename from dbms/DataStreams/BlocksListBlockInputStream.h rename to src/DataStreams/BlocksListBlockInputStream.h diff --git a/dbms/DataStreams/CMakeLists.txt b/src/DataStreams/CMakeLists.txt similarity index 100% rename from dbms/DataStreams/CMakeLists.txt rename to src/DataStreams/CMakeLists.txt diff --git a/dbms/DataStreams/CheckConstraintsBlockOutputStream.cpp b/src/DataStreams/CheckConstraintsBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/CheckConstraintsBlockOutputStream.cpp rename to src/DataStreams/CheckConstraintsBlockOutputStream.cpp diff --git a/dbms/DataStreams/CheckConstraintsBlockOutputStream.h b/src/DataStreams/CheckConstraintsBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/CheckConstraintsBlockOutputStream.h rename to src/DataStreams/CheckConstraintsBlockOutputStream.h diff --git a/dbms/DataStreams/CheckSortedBlockInputStream.cpp b/src/DataStreams/CheckSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/CheckSortedBlockInputStream.cpp rename to src/DataStreams/CheckSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/CheckSortedBlockInputStream.h b/src/DataStreams/CheckSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/CheckSortedBlockInputStream.h rename to src/DataStreams/CheckSortedBlockInputStream.h diff --git a/dbms/DataStreams/CollapsingFinalBlockInputStream.cpp b/src/DataStreams/CollapsingFinalBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/CollapsingFinalBlockInputStream.cpp rename to src/DataStreams/CollapsingFinalBlockInputStream.cpp diff --git a/dbms/DataStreams/CollapsingFinalBlockInputStream.h b/src/DataStreams/CollapsingFinalBlockInputStream.h similarity index 100% rename from dbms/DataStreams/CollapsingFinalBlockInputStream.h rename to src/DataStreams/CollapsingFinalBlockInputStream.h diff --git a/dbms/DataStreams/CollapsingSortedBlockInputStream.cpp b/src/DataStreams/CollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/CollapsingSortedBlockInputStream.cpp rename to src/DataStreams/CollapsingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/CollapsingSortedBlockInputStream.h b/src/DataStreams/CollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/CollapsingSortedBlockInputStream.h rename to src/DataStreams/CollapsingSortedBlockInputStream.h diff --git a/dbms/DataStreams/ColumnGathererStream.cpp b/src/DataStreams/ColumnGathererStream.cpp similarity index 100% rename from dbms/DataStreams/ColumnGathererStream.cpp rename to src/DataStreams/ColumnGathererStream.cpp diff --git a/dbms/DataStreams/ColumnGathererStream.h b/src/DataStreams/ColumnGathererStream.h similarity index 100% rename from dbms/DataStreams/ColumnGathererStream.h rename to src/DataStreams/ColumnGathererStream.h diff --git a/dbms/DataStreams/ConcatBlockInputStream.h b/src/DataStreams/ConcatBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ConcatBlockInputStream.h rename to src/DataStreams/ConcatBlockInputStream.h diff --git a/dbms/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h b/src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h rename to src/DataStreams/ConvertColumnLowCardinalityToFullBlockInputStream.h diff --git a/dbms/DataStreams/ConvertingBlockInputStream.cpp b/src/DataStreams/ConvertingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ConvertingBlockInputStream.cpp rename to src/DataStreams/ConvertingBlockInputStream.cpp diff --git a/dbms/DataStreams/ConvertingBlockInputStream.h b/src/DataStreams/ConvertingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ConvertingBlockInputStream.h rename to src/DataStreams/ConvertingBlockInputStream.h diff --git a/dbms/DataStreams/CountingBlockOutputStream.cpp b/src/DataStreams/CountingBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/CountingBlockOutputStream.cpp rename to src/DataStreams/CountingBlockOutputStream.cpp diff --git a/dbms/DataStreams/CountingBlockOutputStream.h b/src/DataStreams/CountingBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/CountingBlockOutputStream.h rename to src/DataStreams/CountingBlockOutputStream.h diff --git a/dbms/DataStreams/CreatingSetsBlockInputStream.cpp b/src/DataStreams/CreatingSetsBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/CreatingSetsBlockInputStream.cpp rename to src/DataStreams/CreatingSetsBlockInputStream.cpp diff --git a/dbms/DataStreams/CreatingSetsBlockInputStream.h b/src/DataStreams/CreatingSetsBlockInputStream.h similarity index 100% rename from dbms/DataStreams/CreatingSetsBlockInputStream.h rename to src/DataStreams/CreatingSetsBlockInputStream.h diff --git a/dbms/DataStreams/CubeBlockInputStream.cpp b/src/DataStreams/CubeBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/CubeBlockInputStream.cpp rename to src/DataStreams/CubeBlockInputStream.cpp diff --git a/dbms/DataStreams/CubeBlockInputStream.h b/src/DataStreams/CubeBlockInputStream.h similarity index 100% rename from dbms/DataStreams/CubeBlockInputStream.h rename to src/DataStreams/CubeBlockInputStream.h diff --git a/dbms/DataStreams/DistinctBlockInputStream.cpp b/src/DataStreams/DistinctBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/DistinctBlockInputStream.cpp rename to src/DataStreams/DistinctBlockInputStream.cpp diff --git a/dbms/DataStreams/DistinctBlockInputStream.h b/src/DataStreams/DistinctBlockInputStream.h similarity index 100% rename from dbms/DataStreams/DistinctBlockInputStream.h rename to src/DataStreams/DistinctBlockInputStream.h diff --git a/dbms/DataStreams/DistinctSortedBlockInputStream.cpp b/src/DataStreams/DistinctSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/DistinctSortedBlockInputStream.cpp rename to src/DataStreams/DistinctSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/DistinctSortedBlockInputStream.h b/src/DataStreams/DistinctSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/DistinctSortedBlockInputStream.h rename to src/DataStreams/DistinctSortedBlockInputStream.h diff --git a/dbms/DataStreams/ExecutionSpeedLimits.cpp b/src/DataStreams/ExecutionSpeedLimits.cpp similarity index 100% rename from dbms/DataStreams/ExecutionSpeedLimits.cpp rename to src/DataStreams/ExecutionSpeedLimits.cpp diff --git a/dbms/DataStreams/ExecutionSpeedLimits.h b/src/DataStreams/ExecutionSpeedLimits.h similarity index 100% rename from dbms/DataStreams/ExecutionSpeedLimits.h rename to src/DataStreams/ExecutionSpeedLimits.h diff --git a/dbms/DataStreams/ExpressionBlockInputStream.cpp b/src/DataStreams/ExpressionBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ExpressionBlockInputStream.cpp rename to src/DataStreams/ExpressionBlockInputStream.cpp diff --git a/dbms/DataStreams/ExpressionBlockInputStream.h b/src/DataStreams/ExpressionBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ExpressionBlockInputStream.h rename to src/DataStreams/ExpressionBlockInputStream.h diff --git a/dbms/DataStreams/FillingBlockInputStream.cpp b/src/DataStreams/FillingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/FillingBlockInputStream.cpp rename to src/DataStreams/FillingBlockInputStream.cpp diff --git a/dbms/DataStreams/FillingBlockInputStream.h b/src/DataStreams/FillingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/FillingBlockInputStream.h rename to src/DataStreams/FillingBlockInputStream.h diff --git a/dbms/DataStreams/FilterBlockInputStream.cpp b/src/DataStreams/FilterBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/FilterBlockInputStream.cpp rename to src/DataStreams/FilterBlockInputStream.cpp diff --git a/dbms/DataStreams/FilterBlockInputStream.h b/src/DataStreams/FilterBlockInputStream.h similarity index 100% rename from dbms/DataStreams/FilterBlockInputStream.h rename to src/DataStreams/FilterBlockInputStream.h diff --git a/dbms/DataStreams/FilterColumnsBlockInputStream.cpp b/src/DataStreams/FilterColumnsBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/FilterColumnsBlockInputStream.cpp rename to src/DataStreams/FilterColumnsBlockInputStream.cpp diff --git a/dbms/DataStreams/FilterColumnsBlockInputStream.h b/src/DataStreams/FilterColumnsBlockInputStream.h similarity index 100% rename from dbms/DataStreams/FilterColumnsBlockInputStream.h rename to src/DataStreams/FilterColumnsBlockInputStream.h diff --git a/dbms/DataStreams/FinishSortingBlockInputStream.cpp b/src/DataStreams/FinishSortingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/FinishSortingBlockInputStream.cpp rename to src/DataStreams/FinishSortingBlockInputStream.cpp diff --git a/dbms/DataStreams/FinishSortingBlockInputStream.h b/src/DataStreams/FinishSortingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/FinishSortingBlockInputStream.h rename to src/DataStreams/FinishSortingBlockInputStream.h diff --git a/dbms/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/GraphiteRollupSortedBlockInputStream.cpp rename to src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/GraphiteRollupSortedBlockInputStream.h b/src/DataStreams/GraphiteRollupSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/GraphiteRollupSortedBlockInputStream.h rename to src/DataStreams/GraphiteRollupSortedBlockInputStream.h diff --git a/dbms/DataStreams/IBlockInputStream.cpp b/src/DataStreams/IBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/IBlockInputStream.cpp rename to src/DataStreams/IBlockInputStream.cpp diff --git a/dbms/DataStreams/IBlockInputStream.h b/src/DataStreams/IBlockInputStream.h similarity index 100% rename from dbms/DataStreams/IBlockInputStream.h rename to src/DataStreams/IBlockInputStream.h diff --git a/dbms/DataStreams/IBlockOutputStream.h b/src/DataStreams/IBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/IBlockOutputStream.h rename to src/DataStreams/IBlockOutputStream.h diff --git a/dbms/DataStreams/IBlockStream_fwd.h b/src/DataStreams/IBlockStream_fwd.h similarity index 100% rename from dbms/DataStreams/IBlockStream_fwd.h rename to src/DataStreams/IBlockStream_fwd.h diff --git a/dbms/DataStreams/InputStreamFromASTInsertQuery.cpp b/src/DataStreams/InputStreamFromASTInsertQuery.cpp similarity index 100% rename from dbms/DataStreams/InputStreamFromASTInsertQuery.cpp rename to src/DataStreams/InputStreamFromASTInsertQuery.cpp diff --git a/dbms/DataStreams/InputStreamFromASTInsertQuery.h b/src/DataStreams/InputStreamFromASTInsertQuery.h similarity index 100% rename from dbms/DataStreams/InputStreamFromASTInsertQuery.h rename to src/DataStreams/InputStreamFromASTInsertQuery.h diff --git a/dbms/DataStreams/InternalTextLogsRowOutputStream.cpp b/src/DataStreams/InternalTextLogsRowOutputStream.cpp similarity index 100% rename from dbms/DataStreams/InternalTextLogsRowOutputStream.cpp rename to src/DataStreams/InternalTextLogsRowOutputStream.cpp diff --git a/dbms/DataStreams/InternalTextLogsRowOutputStream.h b/src/DataStreams/InternalTextLogsRowOutputStream.h similarity index 100% rename from dbms/DataStreams/InternalTextLogsRowOutputStream.h rename to src/DataStreams/InternalTextLogsRowOutputStream.h diff --git a/dbms/DataStreams/LazyBlockInputStream.h b/src/DataStreams/LazyBlockInputStream.h similarity index 100% rename from dbms/DataStreams/LazyBlockInputStream.h rename to src/DataStreams/LazyBlockInputStream.h diff --git a/dbms/DataStreams/LimitBlockInputStream.cpp b/src/DataStreams/LimitBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/LimitBlockInputStream.cpp rename to src/DataStreams/LimitBlockInputStream.cpp diff --git a/dbms/DataStreams/LimitBlockInputStream.h b/src/DataStreams/LimitBlockInputStream.h similarity index 100% rename from dbms/DataStreams/LimitBlockInputStream.h rename to src/DataStreams/LimitBlockInputStream.h diff --git a/dbms/DataStreams/LimitByBlockInputStream.cpp b/src/DataStreams/LimitByBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/LimitByBlockInputStream.cpp rename to src/DataStreams/LimitByBlockInputStream.cpp diff --git a/dbms/DataStreams/LimitByBlockInputStream.h b/src/DataStreams/LimitByBlockInputStream.h similarity index 100% rename from dbms/DataStreams/LimitByBlockInputStream.h rename to src/DataStreams/LimitByBlockInputStream.h diff --git a/dbms/DataStreams/MarkInCompressedFile.h b/src/DataStreams/MarkInCompressedFile.h similarity index 100% rename from dbms/DataStreams/MarkInCompressedFile.h rename to src/DataStreams/MarkInCompressedFile.h diff --git a/dbms/DataStreams/MaterializingBlockInputStream.cpp b/src/DataStreams/MaterializingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/MaterializingBlockInputStream.cpp rename to src/DataStreams/MaterializingBlockInputStream.cpp diff --git a/dbms/DataStreams/MaterializingBlockInputStream.h b/src/DataStreams/MaterializingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/MaterializingBlockInputStream.h rename to src/DataStreams/MaterializingBlockInputStream.h diff --git a/dbms/DataStreams/MaterializingBlockOutputStream.h b/src/DataStreams/MaterializingBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/MaterializingBlockOutputStream.h rename to src/DataStreams/MaterializingBlockOutputStream.h diff --git a/dbms/DataStreams/MergeSortingBlockInputStream.cpp b/src/DataStreams/MergeSortingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/MergeSortingBlockInputStream.cpp rename to src/DataStreams/MergeSortingBlockInputStream.cpp diff --git a/dbms/DataStreams/MergeSortingBlockInputStream.h b/src/DataStreams/MergeSortingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/MergeSortingBlockInputStream.h rename to src/DataStreams/MergeSortingBlockInputStream.h diff --git a/dbms/DataStreams/MergingAggregatedBlockInputStream.cpp b/src/DataStreams/MergingAggregatedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/MergingAggregatedBlockInputStream.cpp rename to src/DataStreams/MergingAggregatedBlockInputStream.cpp diff --git a/dbms/DataStreams/MergingAggregatedBlockInputStream.h b/src/DataStreams/MergingAggregatedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/MergingAggregatedBlockInputStream.h rename to src/DataStreams/MergingAggregatedBlockInputStream.h diff --git a/dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp rename to src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.cpp diff --git a/dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h b/src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h similarity index 100% rename from dbms/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h rename to src/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h diff --git a/dbms/DataStreams/MergingSortedBlockInputStream.cpp b/src/DataStreams/MergingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/MergingSortedBlockInputStream.cpp rename to src/DataStreams/MergingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/MergingSortedBlockInputStream.h b/src/DataStreams/MergingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/MergingSortedBlockInputStream.h rename to src/DataStreams/MergingSortedBlockInputStream.h diff --git a/dbms/DataStreams/NativeBlockInputStream.cpp b/src/DataStreams/NativeBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/NativeBlockInputStream.cpp rename to src/DataStreams/NativeBlockInputStream.cpp diff --git a/dbms/DataStreams/NativeBlockInputStream.h b/src/DataStreams/NativeBlockInputStream.h similarity index 100% rename from dbms/DataStreams/NativeBlockInputStream.h rename to src/DataStreams/NativeBlockInputStream.h diff --git a/dbms/DataStreams/NativeBlockOutputStream.cpp b/src/DataStreams/NativeBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/NativeBlockOutputStream.cpp rename to src/DataStreams/NativeBlockOutputStream.cpp diff --git a/dbms/DataStreams/NativeBlockOutputStream.h b/src/DataStreams/NativeBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/NativeBlockOutputStream.h rename to src/DataStreams/NativeBlockOutputStream.h diff --git a/dbms/DataStreams/NullAndDoCopyBlockInputStream.h b/src/DataStreams/NullAndDoCopyBlockInputStream.h similarity index 100% rename from dbms/DataStreams/NullAndDoCopyBlockInputStream.h rename to src/DataStreams/NullAndDoCopyBlockInputStream.h diff --git a/dbms/DataStreams/NullBlockInputStream.h b/src/DataStreams/NullBlockInputStream.h similarity index 100% rename from dbms/DataStreams/NullBlockInputStream.h rename to src/DataStreams/NullBlockInputStream.h diff --git a/dbms/DataStreams/NullBlockOutputStream.h b/src/DataStreams/NullBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/NullBlockOutputStream.h rename to src/DataStreams/NullBlockOutputStream.h diff --git a/dbms/DataStreams/OneBlockInputStream.h b/src/DataStreams/OneBlockInputStream.h similarity index 100% rename from dbms/DataStreams/OneBlockInputStream.h rename to src/DataStreams/OneBlockInputStream.h diff --git a/dbms/DataStreams/OwningBlockInputStream.h b/src/DataStreams/OwningBlockInputStream.h similarity index 100% rename from dbms/DataStreams/OwningBlockInputStream.h rename to src/DataStreams/OwningBlockInputStream.h diff --git a/dbms/DataStreams/ParallelAggregatingBlockInputStream.cpp b/src/DataStreams/ParallelAggregatingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ParallelAggregatingBlockInputStream.cpp rename to src/DataStreams/ParallelAggregatingBlockInputStream.cpp diff --git a/dbms/DataStreams/ParallelAggregatingBlockInputStream.h b/src/DataStreams/ParallelAggregatingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ParallelAggregatingBlockInputStream.h rename to src/DataStreams/ParallelAggregatingBlockInputStream.h diff --git a/dbms/DataStreams/ParallelInputsProcessor.h b/src/DataStreams/ParallelInputsProcessor.h similarity index 100% rename from dbms/DataStreams/ParallelInputsProcessor.h rename to src/DataStreams/ParallelInputsProcessor.h diff --git a/dbms/DataStreams/ParallelParsingBlockInputStream.cpp b/src/DataStreams/ParallelParsingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ParallelParsingBlockInputStream.cpp rename to src/DataStreams/ParallelParsingBlockInputStream.cpp diff --git a/dbms/DataStreams/ParallelParsingBlockInputStream.h b/src/DataStreams/ParallelParsingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ParallelParsingBlockInputStream.h rename to src/DataStreams/ParallelParsingBlockInputStream.h diff --git a/dbms/DataStreams/PartialSortingBlockInputStream.cpp b/src/DataStreams/PartialSortingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/PartialSortingBlockInputStream.cpp rename to src/DataStreams/PartialSortingBlockInputStream.cpp diff --git a/dbms/DataStreams/PartialSortingBlockInputStream.h b/src/DataStreams/PartialSortingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/PartialSortingBlockInputStream.h rename to src/DataStreams/PartialSortingBlockInputStream.h diff --git a/dbms/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/PushingToViewsBlockOutputStream.cpp rename to src/DataStreams/PushingToViewsBlockOutputStream.cpp diff --git a/dbms/DataStreams/PushingToViewsBlockOutputStream.h b/src/DataStreams/PushingToViewsBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/PushingToViewsBlockOutputStream.h rename to src/DataStreams/PushingToViewsBlockOutputStream.h diff --git a/dbms/DataStreams/RemoteBlockInputStream.cpp b/src/DataStreams/RemoteBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/RemoteBlockInputStream.cpp rename to src/DataStreams/RemoteBlockInputStream.cpp diff --git a/dbms/DataStreams/RemoteBlockInputStream.h b/src/DataStreams/RemoteBlockInputStream.h similarity index 100% rename from dbms/DataStreams/RemoteBlockInputStream.h rename to src/DataStreams/RemoteBlockInputStream.h diff --git a/dbms/DataStreams/RemoteBlockOutputStream.cpp b/src/DataStreams/RemoteBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/RemoteBlockOutputStream.cpp rename to src/DataStreams/RemoteBlockOutputStream.cpp diff --git a/dbms/DataStreams/RemoteBlockOutputStream.h b/src/DataStreams/RemoteBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/RemoteBlockOutputStream.h rename to src/DataStreams/RemoteBlockOutputStream.h diff --git a/dbms/DataStreams/ReplacingSortedBlockInputStream.cpp b/src/DataStreams/ReplacingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ReplacingSortedBlockInputStream.cpp rename to src/DataStreams/ReplacingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/ReplacingSortedBlockInputStream.h b/src/DataStreams/ReplacingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ReplacingSortedBlockInputStream.h rename to src/DataStreams/ReplacingSortedBlockInputStream.h diff --git a/dbms/DataStreams/ReverseBlockInputStream.cpp b/src/DataStreams/ReverseBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/ReverseBlockInputStream.cpp rename to src/DataStreams/ReverseBlockInputStream.cpp diff --git a/dbms/DataStreams/ReverseBlockInputStream.h b/src/DataStreams/ReverseBlockInputStream.h similarity index 100% rename from dbms/DataStreams/ReverseBlockInputStream.h rename to src/DataStreams/ReverseBlockInputStream.h diff --git a/dbms/DataStreams/RollupBlockInputStream.cpp b/src/DataStreams/RollupBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/RollupBlockInputStream.cpp rename to src/DataStreams/RollupBlockInputStream.cpp diff --git a/dbms/DataStreams/RollupBlockInputStream.h b/src/DataStreams/RollupBlockInputStream.h similarity index 100% rename from dbms/DataStreams/RollupBlockInputStream.h rename to src/DataStreams/RollupBlockInputStream.h diff --git a/dbms/DataStreams/SizeLimits.cpp b/src/DataStreams/SizeLimits.cpp similarity index 100% rename from dbms/DataStreams/SizeLimits.cpp rename to src/DataStreams/SizeLimits.cpp diff --git a/dbms/DataStreams/SizeLimits.h b/src/DataStreams/SizeLimits.h similarity index 100% rename from dbms/DataStreams/SizeLimits.h rename to src/DataStreams/SizeLimits.h diff --git a/dbms/DataStreams/SquashingBlockInputStream.cpp b/src/DataStreams/SquashingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/SquashingBlockInputStream.cpp rename to src/DataStreams/SquashingBlockInputStream.cpp diff --git a/dbms/DataStreams/SquashingBlockInputStream.h b/src/DataStreams/SquashingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/SquashingBlockInputStream.h rename to src/DataStreams/SquashingBlockInputStream.h diff --git a/dbms/DataStreams/SquashingBlockOutputStream.cpp b/src/DataStreams/SquashingBlockOutputStream.cpp similarity index 100% rename from dbms/DataStreams/SquashingBlockOutputStream.cpp rename to src/DataStreams/SquashingBlockOutputStream.cpp diff --git a/dbms/DataStreams/SquashingBlockOutputStream.h b/src/DataStreams/SquashingBlockOutputStream.h similarity index 100% rename from dbms/DataStreams/SquashingBlockOutputStream.h rename to src/DataStreams/SquashingBlockOutputStream.h diff --git a/dbms/DataStreams/SquashingTransform.cpp b/src/DataStreams/SquashingTransform.cpp similarity index 100% rename from dbms/DataStreams/SquashingTransform.cpp rename to src/DataStreams/SquashingTransform.cpp diff --git a/dbms/DataStreams/SquashingTransform.h b/src/DataStreams/SquashingTransform.h similarity index 100% rename from dbms/DataStreams/SquashingTransform.h rename to src/DataStreams/SquashingTransform.h diff --git a/dbms/DataStreams/SummingSortedBlockInputStream.cpp b/src/DataStreams/SummingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/SummingSortedBlockInputStream.cpp rename to src/DataStreams/SummingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/SummingSortedBlockInputStream.h b/src/DataStreams/SummingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/SummingSortedBlockInputStream.h rename to src/DataStreams/SummingSortedBlockInputStream.h diff --git a/dbms/DataStreams/TTLBlockInputStream.cpp b/src/DataStreams/TTLBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/TTLBlockInputStream.cpp rename to src/DataStreams/TTLBlockInputStream.cpp diff --git a/dbms/DataStreams/TTLBlockInputStream.h b/src/DataStreams/TTLBlockInputStream.h similarity index 100% rename from dbms/DataStreams/TTLBlockInputStream.h rename to src/DataStreams/TTLBlockInputStream.h diff --git a/dbms/DataStreams/TemporaryFileStream.h b/src/DataStreams/TemporaryFileStream.h similarity index 100% rename from dbms/DataStreams/TemporaryFileStream.h rename to src/DataStreams/TemporaryFileStream.h diff --git a/dbms/DataStreams/TotalsHavingBlockInputStream.cpp b/src/DataStreams/TotalsHavingBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/TotalsHavingBlockInputStream.cpp rename to src/DataStreams/TotalsHavingBlockInputStream.cpp diff --git a/dbms/DataStreams/TotalsHavingBlockInputStream.h b/src/DataStreams/TotalsHavingBlockInputStream.h similarity index 100% rename from dbms/DataStreams/TotalsHavingBlockInputStream.h rename to src/DataStreams/TotalsHavingBlockInputStream.h diff --git a/dbms/DataStreams/UnionBlockInputStream.h b/src/DataStreams/UnionBlockInputStream.h similarity index 100% rename from dbms/DataStreams/UnionBlockInputStream.h rename to src/DataStreams/UnionBlockInputStream.h diff --git a/dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp b/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp similarity index 100% rename from dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp rename to src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp diff --git a/dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.h b/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h similarity index 100% rename from dbms/DataStreams/VersionedCollapsingSortedBlockInputStream.h rename to src/DataStreams/VersionedCollapsingSortedBlockInputStream.h diff --git a/dbms/DataStreams/copyData.cpp b/src/DataStreams/copyData.cpp similarity index 100% rename from dbms/DataStreams/copyData.cpp rename to src/DataStreams/copyData.cpp diff --git a/dbms/DataStreams/copyData.h b/src/DataStreams/copyData.h similarity index 100% rename from dbms/DataStreams/copyData.h rename to src/DataStreams/copyData.h diff --git a/dbms/DataStreams/finalizeBlock.cpp b/src/DataStreams/finalizeBlock.cpp similarity index 100% rename from dbms/DataStreams/finalizeBlock.cpp rename to src/DataStreams/finalizeBlock.cpp diff --git a/dbms/DataStreams/finalizeBlock.h b/src/DataStreams/finalizeBlock.h similarity index 100% rename from dbms/DataStreams/finalizeBlock.h rename to src/DataStreams/finalizeBlock.h diff --git a/dbms/DataStreams/materializeBlock.cpp b/src/DataStreams/materializeBlock.cpp similarity index 100% rename from dbms/DataStreams/materializeBlock.cpp rename to src/DataStreams/materializeBlock.cpp diff --git a/dbms/DataStreams/materializeBlock.h b/src/DataStreams/materializeBlock.h similarity index 100% rename from dbms/DataStreams/materializeBlock.h rename to src/DataStreams/materializeBlock.h diff --git a/dbms/DataStreams/narrowBlockInputStreams.cpp b/src/DataStreams/narrowBlockInputStreams.cpp similarity index 100% rename from dbms/DataStreams/narrowBlockInputStreams.cpp rename to src/DataStreams/narrowBlockInputStreams.cpp diff --git a/dbms/DataStreams/narrowBlockInputStreams.h b/src/DataStreams/narrowBlockInputStreams.h similarity index 100% rename from dbms/DataStreams/narrowBlockInputStreams.h rename to src/DataStreams/narrowBlockInputStreams.h diff --git a/dbms/DataStreams/processConstants.cpp b/src/DataStreams/processConstants.cpp similarity index 100% rename from dbms/DataStreams/processConstants.cpp rename to src/DataStreams/processConstants.cpp diff --git a/dbms/DataStreams/processConstants.h b/src/DataStreams/processConstants.h similarity index 100% rename from dbms/DataStreams/processConstants.h rename to src/DataStreams/processConstants.h diff --git a/dbms/DataStreams/tests/CMakeLists.txt b/src/DataStreams/tests/CMakeLists.txt similarity index 100% rename from dbms/DataStreams/tests/CMakeLists.txt rename to src/DataStreams/tests/CMakeLists.txt diff --git a/dbms/DataStreams/tests/collapsing_sorted_stream.cpp b/src/DataStreams/tests/collapsing_sorted_stream.cpp similarity index 100% rename from dbms/DataStreams/tests/collapsing_sorted_stream.cpp rename to src/DataStreams/tests/collapsing_sorted_stream.cpp diff --git a/dbms/DataStreams/tests/expression_stream.cpp b/src/DataStreams/tests/expression_stream.cpp similarity index 100% rename from dbms/DataStreams/tests/expression_stream.cpp rename to src/DataStreams/tests/expression_stream.cpp diff --git a/dbms/DataStreams/tests/filter_stream.cpp b/src/DataStreams/tests/filter_stream.cpp similarity index 100% rename from dbms/DataStreams/tests/filter_stream.cpp rename to src/DataStreams/tests/filter_stream.cpp diff --git a/dbms/DataStreams/tests/finish_sorting_stream.cpp b/src/DataStreams/tests/finish_sorting_stream.cpp similarity index 100% rename from dbms/DataStreams/tests/finish_sorting_stream.cpp rename to src/DataStreams/tests/finish_sorting_stream.cpp diff --git a/dbms/DataStreams/tests/gtest_blocks_size_merging_streams.cpp b/src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp similarity index 100% rename from dbms/DataStreams/tests/gtest_blocks_size_merging_streams.cpp rename to src/DataStreams/tests/gtest_blocks_size_merging_streams.cpp diff --git a/dbms/DataStreams/tests/gtest_check_sorted_stream.cpp b/src/DataStreams/tests/gtest_check_sorted_stream.cpp similarity index 100% rename from dbms/DataStreams/tests/gtest_check_sorted_stream.cpp rename to src/DataStreams/tests/gtest_check_sorted_stream.cpp diff --git a/dbms/DataStreams/tests/union_stream2.cpp b/src/DataStreams/tests/union_stream2.cpp similarity index 100% rename from dbms/DataStreams/tests/union_stream2.cpp rename to src/DataStreams/tests/union_stream2.cpp diff --git a/dbms/DataTypes/CMakeLists.txt b/src/DataTypes/CMakeLists.txt similarity index 100% rename from dbms/DataTypes/CMakeLists.txt rename to src/DataTypes/CMakeLists.txt diff --git a/dbms/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp similarity index 100% rename from dbms/DataTypes/DataTypeAggregateFunction.cpp rename to src/DataTypes/DataTypeAggregateFunction.cpp diff --git a/dbms/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h similarity index 100% rename from dbms/DataTypes/DataTypeAggregateFunction.h rename to src/DataTypes/DataTypeAggregateFunction.h diff --git a/dbms/DataTypes/DataTypeArray.cpp b/src/DataTypes/DataTypeArray.cpp similarity index 100% rename from dbms/DataTypes/DataTypeArray.cpp rename to src/DataTypes/DataTypeArray.cpp diff --git a/dbms/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h similarity index 100% rename from dbms/DataTypes/DataTypeArray.h rename to src/DataTypes/DataTypeArray.h diff --git a/dbms/DataTypes/DataTypeCustom.h b/src/DataTypes/DataTypeCustom.h similarity index 100% rename from dbms/DataTypes/DataTypeCustom.h rename to src/DataTypes/DataTypeCustom.h diff --git a/dbms/DataTypes/DataTypeCustomIPv4AndIPv6.cpp b/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp similarity index 100% rename from dbms/DataTypes/DataTypeCustomIPv4AndIPv6.cpp rename to src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp diff --git a/dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp similarity index 100% rename from dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp rename to src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp diff --git a/dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.h b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.h similarity index 100% rename from dbms/DataTypes/DataTypeCustomSimpleAggregateFunction.h rename to src/DataTypes/DataTypeCustomSimpleAggregateFunction.h diff --git a/dbms/DataTypes/DataTypeCustomSimpleTextSerialization.cpp b/src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp similarity index 100% rename from dbms/DataTypes/DataTypeCustomSimpleTextSerialization.cpp rename to src/DataTypes/DataTypeCustomSimpleTextSerialization.cpp diff --git a/dbms/DataTypes/DataTypeCustomSimpleTextSerialization.h b/src/DataTypes/DataTypeCustomSimpleTextSerialization.h similarity index 100% rename from dbms/DataTypes/DataTypeCustomSimpleTextSerialization.h rename to src/DataTypes/DataTypeCustomSimpleTextSerialization.h diff --git a/dbms/DataTypes/DataTypeDate.cpp b/src/DataTypes/DataTypeDate.cpp similarity index 100% rename from dbms/DataTypes/DataTypeDate.cpp rename to src/DataTypes/DataTypeDate.cpp diff --git a/dbms/DataTypes/DataTypeDate.h b/src/DataTypes/DataTypeDate.h similarity index 100% rename from dbms/DataTypes/DataTypeDate.h rename to src/DataTypes/DataTypeDate.h diff --git a/dbms/DataTypes/DataTypeDateTime.cpp b/src/DataTypes/DataTypeDateTime.cpp similarity index 100% rename from dbms/DataTypes/DataTypeDateTime.cpp rename to src/DataTypes/DataTypeDateTime.cpp diff --git a/dbms/DataTypes/DataTypeDateTime.h b/src/DataTypes/DataTypeDateTime.h similarity index 100% rename from dbms/DataTypes/DataTypeDateTime.h rename to src/DataTypes/DataTypeDateTime.h diff --git a/dbms/DataTypes/DataTypeDateTime64.cpp b/src/DataTypes/DataTypeDateTime64.cpp similarity index 100% rename from dbms/DataTypes/DataTypeDateTime64.cpp rename to src/DataTypes/DataTypeDateTime64.cpp diff --git a/dbms/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h similarity index 100% rename from dbms/DataTypes/DataTypeDateTime64.h rename to src/DataTypes/DataTypeDateTime64.h diff --git a/dbms/DataTypes/DataTypeDecimalBase.cpp b/src/DataTypes/DataTypeDecimalBase.cpp similarity index 100% rename from dbms/DataTypes/DataTypeDecimalBase.cpp rename to src/DataTypes/DataTypeDecimalBase.cpp diff --git a/dbms/DataTypes/DataTypeDecimalBase.h b/src/DataTypes/DataTypeDecimalBase.h similarity index 100% rename from dbms/DataTypes/DataTypeDecimalBase.h rename to src/DataTypes/DataTypeDecimalBase.h diff --git a/dbms/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp similarity index 100% rename from dbms/DataTypes/DataTypeEnum.cpp rename to src/DataTypes/DataTypeEnum.cpp diff --git a/dbms/DataTypes/DataTypeEnum.h b/src/DataTypes/DataTypeEnum.h similarity index 100% rename from dbms/DataTypes/DataTypeEnum.h rename to src/DataTypes/DataTypeEnum.h diff --git a/dbms/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp similarity index 100% rename from dbms/DataTypes/DataTypeFactory.cpp rename to src/DataTypes/DataTypeFactory.cpp diff --git a/dbms/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h similarity index 100% rename from dbms/DataTypes/DataTypeFactory.h rename to src/DataTypes/DataTypeFactory.h diff --git a/dbms/DataTypes/DataTypeFixedString.cpp b/src/DataTypes/DataTypeFixedString.cpp similarity index 100% rename from dbms/DataTypes/DataTypeFixedString.cpp rename to src/DataTypes/DataTypeFixedString.cpp diff --git a/dbms/DataTypes/DataTypeFixedString.h b/src/DataTypes/DataTypeFixedString.h similarity index 100% rename from dbms/DataTypes/DataTypeFixedString.h rename to src/DataTypes/DataTypeFixedString.h diff --git a/dbms/DataTypes/DataTypeFunction.cpp b/src/DataTypes/DataTypeFunction.cpp similarity index 100% rename from dbms/DataTypes/DataTypeFunction.cpp rename to src/DataTypes/DataTypeFunction.cpp diff --git a/dbms/DataTypes/DataTypeFunction.h b/src/DataTypes/DataTypeFunction.h similarity index 100% rename from dbms/DataTypes/DataTypeFunction.h rename to src/DataTypes/DataTypeFunction.h diff --git a/dbms/DataTypes/DataTypeInterval.cpp b/src/DataTypes/DataTypeInterval.cpp similarity index 100% rename from dbms/DataTypes/DataTypeInterval.cpp rename to src/DataTypes/DataTypeInterval.cpp diff --git a/dbms/DataTypes/DataTypeInterval.h b/src/DataTypes/DataTypeInterval.h similarity index 100% rename from dbms/DataTypes/DataTypeInterval.h rename to src/DataTypes/DataTypeInterval.h diff --git a/dbms/DataTypes/DataTypeLowCardinality.cpp b/src/DataTypes/DataTypeLowCardinality.cpp similarity index 100% rename from dbms/DataTypes/DataTypeLowCardinality.cpp rename to src/DataTypes/DataTypeLowCardinality.cpp diff --git a/dbms/DataTypes/DataTypeLowCardinality.h b/src/DataTypes/DataTypeLowCardinality.h similarity index 100% rename from dbms/DataTypes/DataTypeLowCardinality.h rename to src/DataTypes/DataTypeLowCardinality.h diff --git a/dbms/DataTypes/DataTypeLowCardinalityHelpers.cpp b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp similarity index 100% rename from dbms/DataTypes/DataTypeLowCardinalityHelpers.cpp rename to src/DataTypes/DataTypeLowCardinalityHelpers.cpp diff --git a/dbms/DataTypes/DataTypeNothing.cpp b/src/DataTypes/DataTypeNothing.cpp similarity index 100% rename from dbms/DataTypes/DataTypeNothing.cpp rename to src/DataTypes/DataTypeNothing.cpp diff --git a/dbms/DataTypes/DataTypeNothing.h b/src/DataTypes/DataTypeNothing.h similarity index 100% rename from dbms/DataTypes/DataTypeNothing.h rename to src/DataTypes/DataTypeNothing.h diff --git a/dbms/DataTypes/DataTypeNullable.cpp b/src/DataTypes/DataTypeNullable.cpp similarity index 100% rename from dbms/DataTypes/DataTypeNullable.cpp rename to src/DataTypes/DataTypeNullable.cpp diff --git a/dbms/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h similarity index 100% rename from dbms/DataTypes/DataTypeNullable.h rename to src/DataTypes/DataTypeNullable.h diff --git a/dbms/DataTypes/DataTypeNumberBase.cpp b/src/DataTypes/DataTypeNumberBase.cpp similarity index 100% rename from dbms/DataTypes/DataTypeNumberBase.cpp rename to src/DataTypes/DataTypeNumberBase.cpp diff --git a/dbms/DataTypes/DataTypeNumberBase.h b/src/DataTypes/DataTypeNumberBase.h similarity index 100% rename from dbms/DataTypes/DataTypeNumberBase.h rename to src/DataTypes/DataTypeNumberBase.h diff --git a/dbms/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h similarity index 100% rename from dbms/DataTypes/DataTypeSet.h rename to src/DataTypes/DataTypeSet.h diff --git a/dbms/DataTypes/DataTypeString.cpp b/src/DataTypes/DataTypeString.cpp similarity index 100% rename from dbms/DataTypes/DataTypeString.cpp rename to src/DataTypes/DataTypeString.cpp diff --git a/dbms/DataTypes/DataTypeString.h b/src/DataTypes/DataTypeString.h similarity index 100% rename from dbms/DataTypes/DataTypeString.h rename to src/DataTypes/DataTypeString.h diff --git a/dbms/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp similarity index 100% rename from dbms/DataTypes/DataTypeTuple.cpp rename to src/DataTypes/DataTypeTuple.cpp diff --git a/dbms/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h similarity index 100% rename from dbms/DataTypes/DataTypeTuple.h rename to src/DataTypes/DataTypeTuple.h diff --git a/dbms/DataTypes/DataTypeUUID.cpp b/src/DataTypes/DataTypeUUID.cpp similarity index 100% rename from dbms/DataTypes/DataTypeUUID.cpp rename to src/DataTypes/DataTypeUUID.cpp diff --git a/dbms/DataTypes/DataTypeUUID.h b/src/DataTypes/DataTypeUUID.h similarity index 100% rename from dbms/DataTypes/DataTypeUUID.h rename to src/DataTypes/DataTypeUUID.h diff --git a/dbms/DataTypes/DataTypeWithSimpleSerialization.h b/src/DataTypes/DataTypeWithSimpleSerialization.h similarity index 100% rename from dbms/DataTypes/DataTypeWithSimpleSerialization.h rename to src/DataTypes/DataTypeWithSimpleSerialization.h diff --git a/dbms/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp similarity index 100% rename from dbms/DataTypes/DataTypesDecimal.cpp rename to src/DataTypes/DataTypesDecimal.cpp diff --git a/dbms/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h similarity index 100% rename from dbms/DataTypes/DataTypesDecimal.h rename to src/DataTypes/DataTypesDecimal.h diff --git a/dbms/DataTypes/DataTypesNumber.cpp b/src/DataTypes/DataTypesNumber.cpp similarity index 100% rename from dbms/DataTypes/DataTypesNumber.cpp rename to src/DataTypes/DataTypesNumber.cpp diff --git a/dbms/DataTypes/DataTypesNumber.h b/src/DataTypes/DataTypesNumber.h similarity index 100% rename from dbms/DataTypes/DataTypesNumber.h rename to src/DataTypes/DataTypesNumber.h diff --git a/dbms/DataTypes/FieldToDataType.cpp b/src/DataTypes/FieldToDataType.cpp similarity index 100% rename from dbms/DataTypes/FieldToDataType.cpp rename to src/DataTypes/FieldToDataType.cpp diff --git a/dbms/DataTypes/FieldToDataType.h b/src/DataTypes/FieldToDataType.h similarity index 100% rename from dbms/DataTypes/FieldToDataType.h rename to src/DataTypes/FieldToDataType.h diff --git a/dbms/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp similarity index 100% rename from dbms/DataTypes/IDataType.cpp rename to src/DataTypes/IDataType.cpp diff --git a/dbms/DataTypes/IDataType.h b/src/DataTypes/IDataType.h similarity index 100% rename from dbms/DataTypes/IDataType.h rename to src/DataTypes/IDataType.h diff --git a/dbms/DataTypes/IDataTypeDummy.h b/src/DataTypes/IDataTypeDummy.h similarity index 100% rename from dbms/DataTypes/IDataTypeDummy.h rename to src/DataTypes/IDataTypeDummy.h diff --git a/dbms/DataTypes/Native.h b/src/DataTypes/Native.h similarity index 100% rename from dbms/DataTypes/Native.h rename to src/DataTypes/Native.h diff --git a/dbms/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp similarity index 100% rename from dbms/DataTypes/NestedUtils.cpp rename to src/DataTypes/NestedUtils.cpp diff --git a/dbms/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h similarity index 100% rename from dbms/DataTypes/NestedUtils.h rename to src/DataTypes/NestedUtils.h diff --git a/dbms/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h similarity index 100% rename from dbms/DataTypes/NumberTraits.h rename to src/DataTypes/NumberTraits.h diff --git a/dbms/DataTypes/convertMySQLDataType.cpp b/src/DataTypes/convertMySQLDataType.cpp similarity index 100% rename from dbms/DataTypes/convertMySQLDataType.cpp rename to src/DataTypes/convertMySQLDataType.cpp diff --git a/dbms/DataTypes/convertMySQLDataType.h b/src/DataTypes/convertMySQLDataType.h similarity index 100% rename from dbms/DataTypes/convertMySQLDataType.h rename to src/DataTypes/convertMySQLDataType.h diff --git a/dbms/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp similarity index 100% rename from dbms/DataTypes/getLeastSupertype.cpp rename to src/DataTypes/getLeastSupertype.cpp diff --git a/dbms/DataTypes/getLeastSupertype.h b/src/DataTypes/getLeastSupertype.h similarity index 100% rename from dbms/DataTypes/getLeastSupertype.h rename to src/DataTypes/getLeastSupertype.h diff --git a/dbms/DataTypes/getMostSubtype.cpp b/src/DataTypes/getMostSubtype.cpp similarity index 100% rename from dbms/DataTypes/getMostSubtype.cpp rename to src/DataTypes/getMostSubtype.cpp diff --git a/dbms/DataTypes/getMostSubtype.h b/src/DataTypes/getMostSubtype.h similarity index 100% rename from dbms/DataTypes/getMostSubtype.h rename to src/DataTypes/getMostSubtype.h diff --git a/dbms/DataTypes/tests/CMakeLists.txt b/src/DataTypes/tests/CMakeLists.txt similarity index 100% rename from dbms/DataTypes/tests/CMakeLists.txt rename to src/DataTypes/tests/CMakeLists.txt diff --git a/dbms/DataTypes/tests/data_type_string.cpp b/src/DataTypes/tests/data_type_string.cpp similarity index 100% rename from dbms/DataTypes/tests/data_type_string.cpp rename to src/DataTypes/tests/data_type_string.cpp diff --git a/dbms/DataTypes/tests/data_types_number_fixed.cpp b/src/DataTypes/tests/data_types_number_fixed.cpp similarity index 100% rename from dbms/DataTypes/tests/data_types_number_fixed.cpp rename to src/DataTypes/tests/data_types_number_fixed.cpp diff --git a/dbms/DataTypes/tests/gtest_data_type_get_common_type.cpp b/src/DataTypes/tests/gtest_data_type_get_common_type.cpp similarity index 100% rename from dbms/DataTypes/tests/gtest_data_type_get_common_type.cpp rename to src/DataTypes/tests/gtest_data_type_get_common_type.cpp diff --git a/dbms/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp similarity index 100% rename from dbms/Databases/DatabaseDictionary.cpp rename to src/Databases/DatabaseDictionary.cpp diff --git a/dbms/Databases/DatabaseDictionary.h b/src/Databases/DatabaseDictionary.h similarity index 100% rename from dbms/Databases/DatabaseDictionary.h rename to src/Databases/DatabaseDictionary.h diff --git a/dbms/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp similarity index 100% rename from dbms/Databases/DatabaseFactory.cpp rename to src/Databases/DatabaseFactory.cpp diff --git a/dbms/Databases/DatabaseFactory.h b/src/Databases/DatabaseFactory.h similarity index 100% rename from dbms/Databases/DatabaseFactory.h rename to src/Databases/DatabaseFactory.h diff --git a/dbms/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp similarity index 100% rename from dbms/Databases/DatabaseLazy.cpp rename to src/Databases/DatabaseLazy.cpp diff --git a/dbms/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h similarity index 100% rename from dbms/Databases/DatabaseLazy.h rename to src/Databases/DatabaseLazy.h diff --git a/dbms/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp similarity index 100% rename from dbms/Databases/DatabaseMemory.cpp rename to src/Databases/DatabaseMemory.cpp diff --git a/dbms/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h similarity index 100% rename from dbms/Databases/DatabaseMemory.h rename to src/Databases/DatabaseMemory.h diff --git a/dbms/Databases/DatabaseMySQL.cpp b/src/Databases/DatabaseMySQL.cpp similarity index 100% rename from dbms/Databases/DatabaseMySQL.cpp rename to src/Databases/DatabaseMySQL.cpp diff --git a/dbms/Databases/DatabaseMySQL.h b/src/Databases/DatabaseMySQL.h similarity index 100% rename from dbms/Databases/DatabaseMySQL.h rename to src/Databases/DatabaseMySQL.h diff --git a/dbms/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp similarity index 100% rename from dbms/Databases/DatabaseOnDisk.cpp rename to src/Databases/DatabaseOnDisk.cpp diff --git a/dbms/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h similarity index 100% rename from dbms/Databases/DatabaseOnDisk.h rename to src/Databases/DatabaseOnDisk.h diff --git a/dbms/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp similarity index 100% rename from dbms/Databases/DatabaseOrdinary.cpp rename to src/Databases/DatabaseOrdinary.cpp diff --git a/dbms/Databases/DatabaseOrdinary.h b/src/Databases/DatabaseOrdinary.h similarity index 100% rename from dbms/Databases/DatabaseOrdinary.h rename to src/Databases/DatabaseOrdinary.h diff --git a/dbms/Databases/DatabaseWithDictionaries.cpp b/src/Databases/DatabaseWithDictionaries.cpp similarity index 100% rename from dbms/Databases/DatabaseWithDictionaries.cpp rename to src/Databases/DatabaseWithDictionaries.cpp diff --git a/dbms/Databases/DatabaseWithDictionaries.h b/src/Databases/DatabaseWithDictionaries.h similarity index 100% rename from dbms/Databases/DatabaseWithDictionaries.h rename to src/Databases/DatabaseWithDictionaries.h diff --git a/dbms/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp similarity index 100% rename from dbms/Databases/DatabasesCommon.cpp rename to src/Databases/DatabasesCommon.cpp diff --git a/dbms/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h similarity index 100% rename from dbms/Databases/DatabasesCommon.h rename to src/Databases/DatabasesCommon.h diff --git a/dbms/Databases/IDatabase.h b/src/Databases/IDatabase.h similarity index 100% rename from dbms/Databases/IDatabase.h rename to src/Databases/IDatabase.h diff --git a/dbms/Dictionaries/CMakeLists.txt b/src/Dictionaries/CMakeLists.txt similarity index 100% rename from dbms/Dictionaries/CMakeLists.txt rename to src/Dictionaries/CMakeLists.txt diff --git a/dbms/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp similarity index 100% rename from dbms/Dictionaries/CacheDictionary.cpp rename to src/Dictionaries/CacheDictionary.cpp diff --git a/dbms/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h similarity index 100% rename from dbms/Dictionaries/CacheDictionary.h rename to src/Dictionaries/CacheDictionary.h diff --git a/dbms/Dictionaries/CacheDictionary.inc.h b/src/Dictionaries/CacheDictionary.inc.h similarity index 100% rename from dbms/Dictionaries/CacheDictionary.inc.h rename to src/Dictionaries/CacheDictionary.inc.h diff --git a/dbms/Dictionaries/CacheDictionary_generate1.cpp.in b/src/Dictionaries/CacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/Dictionaries/CacheDictionary_generate1.cpp.in rename to src/Dictionaries/CacheDictionary_generate1.cpp.in diff --git a/dbms/Dictionaries/CacheDictionary_generate2.cpp.in b/src/Dictionaries/CacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/Dictionaries/CacheDictionary_generate2.cpp.in rename to src/Dictionaries/CacheDictionary_generate2.cpp.in diff --git a/dbms/Dictionaries/CacheDictionary_generate3.cpp.in b/src/Dictionaries/CacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/Dictionaries/CacheDictionary_generate3.cpp.in rename to src/Dictionaries/CacheDictionary_generate3.cpp.in diff --git a/dbms/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/ClickHouseDictionarySource.cpp rename to src/Dictionaries/ClickHouseDictionarySource.cpp diff --git a/dbms/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h similarity index 100% rename from dbms/Dictionaries/ClickHouseDictionarySource.h rename to src/Dictionaries/ClickHouseDictionarySource.h diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary.cpp b/src/Dictionaries/ComplexKeyCacheDictionary.cpp similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary.cpp diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary.h b/src/Dictionaries/ComplexKeyCacheDictionary.h similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary.h rename to src/Dictionaries/ComplexKeyCacheDictionary.h diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in b/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in rename to src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp diff --git a/dbms/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp b/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp similarity index 100% rename from dbms/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp rename to src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp diff --git a/dbms/Dictionaries/ComplexKeyHashedDictionary.cpp b/src/Dictionaries/ComplexKeyHashedDictionary.cpp similarity index 100% rename from dbms/Dictionaries/ComplexKeyHashedDictionary.cpp rename to src/Dictionaries/ComplexKeyHashedDictionary.cpp diff --git a/dbms/Dictionaries/ComplexKeyHashedDictionary.h b/src/Dictionaries/ComplexKeyHashedDictionary.h similarity index 100% rename from dbms/Dictionaries/ComplexKeyHashedDictionary.h rename to src/Dictionaries/ComplexKeyHashedDictionary.h diff --git a/dbms/Dictionaries/DictionaryBlockInputStream.h b/src/Dictionaries/DictionaryBlockInputStream.h similarity index 100% rename from dbms/Dictionaries/DictionaryBlockInputStream.h rename to src/Dictionaries/DictionaryBlockInputStream.h diff --git a/dbms/Dictionaries/DictionaryBlockInputStreamBase.cpp b/src/Dictionaries/DictionaryBlockInputStreamBase.cpp similarity index 100% rename from dbms/Dictionaries/DictionaryBlockInputStreamBase.cpp rename to src/Dictionaries/DictionaryBlockInputStreamBase.cpp diff --git a/dbms/Dictionaries/DictionaryBlockInputStreamBase.h b/src/Dictionaries/DictionaryBlockInputStreamBase.h similarity index 100% rename from dbms/Dictionaries/DictionaryBlockInputStreamBase.h rename to src/Dictionaries/DictionaryBlockInputStreamBase.h diff --git a/dbms/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp similarity index 100% rename from dbms/Dictionaries/DictionaryFactory.cpp rename to src/Dictionaries/DictionaryFactory.cpp diff --git a/dbms/Dictionaries/DictionaryFactory.h b/src/Dictionaries/DictionaryFactory.h similarity index 100% rename from dbms/Dictionaries/DictionaryFactory.h rename to src/Dictionaries/DictionaryFactory.h diff --git a/dbms/Dictionaries/DictionarySourceFactory.cpp b/src/Dictionaries/DictionarySourceFactory.cpp similarity index 100% rename from dbms/Dictionaries/DictionarySourceFactory.cpp rename to src/Dictionaries/DictionarySourceFactory.cpp diff --git a/dbms/Dictionaries/DictionarySourceFactory.h b/src/Dictionaries/DictionarySourceFactory.h similarity index 100% rename from dbms/Dictionaries/DictionarySourceFactory.h rename to src/Dictionaries/DictionarySourceFactory.h diff --git a/dbms/Dictionaries/DictionarySourceHelpers.cpp b/src/Dictionaries/DictionarySourceHelpers.cpp similarity index 100% rename from dbms/Dictionaries/DictionarySourceHelpers.cpp rename to src/Dictionaries/DictionarySourceHelpers.cpp diff --git a/dbms/Dictionaries/DictionarySourceHelpers.h b/src/Dictionaries/DictionarySourceHelpers.h similarity index 100% rename from dbms/Dictionaries/DictionarySourceHelpers.h rename to src/Dictionaries/DictionarySourceHelpers.h diff --git a/dbms/Dictionaries/DictionaryStructure.cpp b/src/Dictionaries/DictionaryStructure.cpp similarity index 100% rename from dbms/Dictionaries/DictionaryStructure.cpp rename to src/Dictionaries/DictionaryStructure.cpp diff --git a/dbms/Dictionaries/DictionaryStructure.h b/src/Dictionaries/DictionaryStructure.h similarity index 100% rename from dbms/Dictionaries/DictionaryStructure.h rename to src/Dictionaries/DictionaryStructure.h diff --git a/dbms/Dictionaries/Embedded/CMakeLists.txt b/src/Dictionaries/Embedded/CMakeLists.txt similarity index 100% rename from dbms/Dictionaries/Embedded/CMakeLists.txt rename to src/Dictionaries/Embedded/CMakeLists.txt diff --git a/dbms/Dictionaries/Embedded/GeoDictionariesLoader.cpp b/src/Dictionaries/Embedded/GeoDictionariesLoader.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/GeoDictionariesLoader.cpp rename to src/Dictionaries/Embedded/GeoDictionariesLoader.cpp diff --git a/dbms/Dictionaries/Embedded/GeoDictionariesLoader.h b/src/Dictionaries/Embedded/GeoDictionariesLoader.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeoDictionariesLoader.h rename to src/Dictionaries/Embedded/GeoDictionariesLoader.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/Entries.h b/src/Dictionaries/Embedded/GeodataProviders/Entries.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/Entries.h rename to src/Dictionaries/Embedded/GeodataProviders/Entries.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp rename to src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp rename to src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h rename to src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/INamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/INamesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp rename to src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h rename to src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp rename to src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/NamesProvider.h rename to src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h diff --git a/dbms/Dictionaries/Embedded/GeodataProviders/Types.h b/src/Dictionaries/Embedded/GeodataProviders/Types.h similarity index 100% rename from dbms/Dictionaries/Embedded/GeodataProviders/Types.h rename to src/Dictionaries/Embedded/GeodataProviders/Types.h diff --git a/dbms/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsHierarchies.cpp rename to src/Dictionaries/Embedded/RegionsHierarchies.cpp diff --git a/dbms/Dictionaries/Embedded/RegionsHierarchies.h b/src/Dictionaries/Embedded/RegionsHierarchies.h similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsHierarchies.h rename to src/Dictionaries/Embedded/RegionsHierarchies.h diff --git a/dbms/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsHierarchy.cpp rename to src/Dictionaries/Embedded/RegionsHierarchy.cpp diff --git a/dbms/Dictionaries/Embedded/RegionsHierarchy.h b/src/Dictionaries/Embedded/RegionsHierarchy.h similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsHierarchy.h rename to src/Dictionaries/Embedded/RegionsHierarchy.h diff --git a/dbms/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsNames.cpp rename to src/Dictionaries/Embedded/RegionsNames.cpp diff --git a/dbms/Dictionaries/Embedded/RegionsNames.h b/src/Dictionaries/Embedded/RegionsNames.h similarity index 100% rename from dbms/Dictionaries/Embedded/RegionsNames.h rename to src/Dictionaries/Embedded/RegionsNames.h diff --git a/dbms/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/ExecutableDictionarySource.cpp rename to src/Dictionaries/ExecutableDictionarySource.cpp diff --git a/dbms/Dictionaries/ExecutableDictionarySource.h b/src/Dictionaries/ExecutableDictionarySource.h similarity index 100% rename from dbms/Dictionaries/ExecutableDictionarySource.h rename to src/Dictionaries/ExecutableDictionarySource.h diff --git a/dbms/Dictionaries/ExternalQueryBuilder.cpp b/src/Dictionaries/ExternalQueryBuilder.cpp similarity index 100% rename from dbms/Dictionaries/ExternalQueryBuilder.cpp rename to src/Dictionaries/ExternalQueryBuilder.cpp diff --git a/dbms/Dictionaries/ExternalQueryBuilder.h b/src/Dictionaries/ExternalQueryBuilder.h similarity index 100% rename from dbms/Dictionaries/ExternalQueryBuilder.h rename to src/Dictionaries/ExternalQueryBuilder.h diff --git a/dbms/Dictionaries/FileDictionarySource.cpp b/src/Dictionaries/FileDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/FileDictionarySource.cpp rename to src/Dictionaries/FileDictionarySource.cpp diff --git a/dbms/Dictionaries/FileDictionarySource.h b/src/Dictionaries/FileDictionarySource.h similarity index 100% rename from dbms/Dictionaries/FileDictionarySource.h rename to src/Dictionaries/FileDictionarySource.h diff --git a/dbms/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp similarity index 100% rename from dbms/Dictionaries/FlatDictionary.cpp rename to src/Dictionaries/FlatDictionary.cpp diff --git a/dbms/Dictionaries/FlatDictionary.h b/src/Dictionaries/FlatDictionary.h similarity index 100% rename from dbms/Dictionaries/FlatDictionary.h rename to src/Dictionaries/FlatDictionary.h diff --git a/dbms/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/HTTPDictionarySource.cpp rename to src/Dictionaries/HTTPDictionarySource.cpp diff --git a/dbms/Dictionaries/HTTPDictionarySource.h b/src/Dictionaries/HTTPDictionarySource.h similarity index 100% rename from dbms/Dictionaries/HTTPDictionarySource.h rename to src/Dictionaries/HTTPDictionarySource.h diff --git a/dbms/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp similarity index 100% rename from dbms/Dictionaries/HashedDictionary.cpp rename to src/Dictionaries/HashedDictionary.cpp diff --git a/dbms/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h similarity index 100% rename from dbms/Dictionaries/HashedDictionary.h rename to src/Dictionaries/HashedDictionary.h diff --git a/dbms/Dictionaries/IDictionary.h b/src/Dictionaries/IDictionary.h similarity index 100% rename from dbms/Dictionaries/IDictionary.h rename to src/Dictionaries/IDictionary.h diff --git a/dbms/Dictionaries/IDictionarySource.h b/src/Dictionaries/IDictionarySource.h similarity index 100% rename from dbms/Dictionaries/IDictionarySource.h rename to src/Dictionaries/IDictionarySource.h diff --git a/dbms/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/LibraryDictionarySource.cpp rename to src/Dictionaries/LibraryDictionarySource.cpp diff --git a/dbms/Dictionaries/LibraryDictionarySource.h b/src/Dictionaries/LibraryDictionarySource.h similarity index 100% rename from dbms/Dictionaries/LibraryDictionarySource.h rename to src/Dictionaries/LibraryDictionarySource.h diff --git a/dbms/Dictionaries/LibraryDictionarySourceExternal.cpp b/src/Dictionaries/LibraryDictionarySourceExternal.cpp similarity index 100% rename from dbms/Dictionaries/LibraryDictionarySourceExternal.cpp rename to src/Dictionaries/LibraryDictionarySourceExternal.cpp diff --git a/dbms/Dictionaries/LibraryDictionarySourceExternal.h b/src/Dictionaries/LibraryDictionarySourceExternal.h similarity index 100% rename from dbms/Dictionaries/LibraryDictionarySourceExternal.h rename to src/Dictionaries/LibraryDictionarySourceExternal.h diff --git a/dbms/Dictionaries/MongoDBBlockInputStream.cpp b/src/Dictionaries/MongoDBBlockInputStream.cpp similarity index 100% rename from dbms/Dictionaries/MongoDBBlockInputStream.cpp rename to src/Dictionaries/MongoDBBlockInputStream.cpp diff --git a/dbms/Dictionaries/MongoDBBlockInputStream.h b/src/Dictionaries/MongoDBBlockInputStream.h similarity index 100% rename from dbms/Dictionaries/MongoDBBlockInputStream.h rename to src/Dictionaries/MongoDBBlockInputStream.h diff --git a/dbms/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp similarity index 99% rename from dbms/Dictionaries/MongoDBDictionarySource.cpp rename to src/Dictionaries/MongoDBDictionarySource.cpp index ba219d3c0fa..7247d8a4613 100644 --- a/dbms/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -48,7 +48,7 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) // only after poco // naming conflict: // Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value); -// dbms/IO/WriteHelpers.h:146 #define writeCString(s, buf) +// src/IO/WriteHelpers.h:146 #define writeCString(s, buf) # include # include # include diff --git a/dbms/Dictionaries/MongoDBDictionarySource.h b/src/Dictionaries/MongoDBDictionarySource.h similarity index 100% rename from dbms/Dictionaries/MongoDBDictionarySource.h rename to src/Dictionaries/MongoDBDictionarySource.h diff --git a/dbms/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/MySQLDictionarySource.cpp rename to src/Dictionaries/MySQLDictionarySource.cpp diff --git a/dbms/Dictionaries/MySQLDictionarySource.h b/src/Dictionaries/MySQLDictionarySource.h similarity index 100% rename from dbms/Dictionaries/MySQLDictionarySource.h rename to src/Dictionaries/MySQLDictionarySource.h diff --git a/dbms/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp similarity index 100% rename from dbms/Dictionaries/PolygonDictionary.cpp rename to src/Dictionaries/PolygonDictionary.cpp diff --git a/dbms/Dictionaries/PolygonDictionary.h b/src/Dictionaries/PolygonDictionary.h similarity index 100% rename from dbms/Dictionaries/PolygonDictionary.h rename to src/Dictionaries/PolygonDictionary.h diff --git a/dbms/Dictionaries/RangeDictionaryBlockInputStream.h b/src/Dictionaries/RangeDictionaryBlockInputStream.h similarity index 100% rename from dbms/Dictionaries/RangeDictionaryBlockInputStream.h rename to src/Dictionaries/RangeDictionaryBlockInputStream.h diff --git a/dbms/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp similarity index 100% rename from dbms/Dictionaries/RangeHashedDictionary.cpp rename to src/Dictionaries/RangeHashedDictionary.cpp diff --git a/dbms/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h similarity index 100% rename from dbms/Dictionaries/RangeHashedDictionary.h rename to src/Dictionaries/RangeHashedDictionary.h diff --git a/dbms/Dictionaries/RedisBlockInputStream.cpp b/src/Dictionaries/RedisBlockInputStream.cpp similarity index 100% rename from dbms/Dictionaries/RedisBlockInputStream.cpp rename to src/Dictionaries/RedisBlockInputStream.cpp diff --git a/dbms/Dictionaries/RedisBlockInputStream.h b/src/Dictionaries/RedisBlockInputStream.h similarity index 100% rename from dbms/Dictionaries/RedisBlockInputStream.h rename to src/Dictionaries/RedisBlockInputStream.h diff --git a/dbms/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/RedisDictionarySource.cpp rename to src/Dictionaries/RedisDictionarySource.cpp diff --git a/dbms/Dictionaries/RedisDictionarySource.h b/src/Dictionaries/RedisDictionarySource.h similarity index 100% rename from dbms/Dictionaries/RedisDictionarySource.h rename to src/Dictionaries/RedisDictionarySource.h diff --git a/dbms/Dictionaries/TrieDictionary.cpp b/src/Dictionaries/TrieDictionary.cpp similarity index 100% rename from dbms/Dictionaries/TrieDictionary.cpp rename to src/Dictionaries/TrieDictionary.cpp diff --git a/dbms/Dictionaries/TrieDictionary.h b/src/Dictionaries/TrieDictionary.h similarity index 100% rename from dbms/Dictionaries/TrieDictionary.h rename to src/Dictionaries/TrieDictionary.h diff --git a/dbms/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp similarity index 100% rename from dbms/Dictionaries/XDBCDictionarySource.cpp rename to src/Dictionaries/XDBCDictionarySource.cpp diff --git a/dbms/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h similarity index 100% rename from dbms/Dictionaries/XDBCDictionarySource.h rename to src/Dictionaries/XDBCDictionarySource.h diff --git a/dbms/Dictionaries/getDictionaryConfigurationFromAST.cpp b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp similarity index 100% rename from dbms/Dictionaries/getDictionaryConfigurationFromAST.cpp rename to src/Dictionaries/getDictionaryConfigurationFromAST.cpp diff --git a/dbms/Dictionaries/getDictionaryConfigurationFromAST.h b/src/Dictionaries/getDictionaryConfigurationFromAST.h similarity index 100% rename from dbms/Dictionaries/getDictionaryConfigurationFromAST.h rename to src/Dictionaries/getDictionaryConfigurationFromAST.h diff --git a/dbms/Dictionaries/readInvalidateQuery.cpp b/src/Dictionaries/readInvalidateQuery.cpp similarity index 100% rename from dbms/Dictionaries/readInvalidateQuery.cpp rename to src/Dictionaries/readInvalidateQuery.cpp diff --git a/dbms/Dictionaries/readInvalidateQuery.h b/src/Dictionaries/readInvalidateQuery.h similarity index 100% rename from dbms/Dictionaries/readInvalidateQuery.h rename to src/Dictionaries/readInvalidateQuery.h diff --git a/dbms/Dictionaries/registerDictionaries.cpp b/src/Dictionaries/registerDictionaries.cpp similarity index 100% rename from dbms/Dictionaries/registerDictionaries.cpp rename to src/Dictionaries/registerDictionaries.cpp diff --git a/dbms/Dictionaries/registerDictionaries.h b/src/Dictionaries/registerDictionaries.h similarity index 100% rename from dbms/Dictionaries/registerDictionaries.h rename to src/Dictionaries/registerDictionaries.h diff --git a/dbms/Dictionaries/tests/CMakeLists.txt b/src/Dictionaries/tests/CMakeLists.txt similarity index 100% rename from dbms/Dictionaries/tests/CMakeLists.txt rename to src/Dictionaries/tests/CMakeLists.txt diff --git a/dbms/Dictionaries/tests/gtest_dictionary_configuration.cpp b/src/Dictionaries/tests/gtest_dictionary_configuration.cpp similarity index 100% rename from dbms/Dictionaries/tests/gtest_dictionary_configuration.cpp rename to src/Dictionaries/tests/gtest_dictionary_configuration.cpp diff --git a/dbms/Dictionaries/writeParenthesisedString.cpp b/src/Dictionaries/writeParenthesisedString.cpp similarity index 100% rename from dbms/Dictionaries/writeParenthesisedString.cpp rename to src/Dictionaries/writeParenthesisedString.cpp diff --git a/dbms/Dictionaries/writeParenthesisedString.h b/src/Dictionaries/writeParenthesisedString.h similarity index 100% rename from dbms/Dictionaries/writeParenthesisedString.h rename to src/Dictionaries/writeParenthesisedString.h diff --git a/dbms/Disks/CMakeLists.txt b/src/Disks/CMakeLists.txt similarity index 100% rename from dbms/Disks/CMakeLists.txt rename to src/Disks/CMakeLists.txt diff --git a/dbms/Disks/DiskFactory.cpp b/src/Disks/DiskFactory.cpp similarity index 100% rename from dbms/Disks/DiskFactory.cpp rename to src/Disks/DiskFactory.cpp diff --git a/dbms/Disks/DiskFactory.h b/src/Disks/DiskFactory.h similarity index 100% rename from dbms/Disks/DiskFactory.h rename to src/Disks/DiskFactory.h diff --git a/dbms/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp similarity index 100% rename from dbms/Disks/DiskLocal.cpp rename to src/Disks/DiskLocal.cpp diff --git a/dbms/Disks/DiskLocal.h b/src/Disks/DiskLocal.h similarity index 100% rename from dbms/Disks/DiskLocal.h rename to src/Disks/DiskLocal.h diff --git a/dbms/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp similarity index 100% rename from dbms/Disks/DiskMemory.cpp rename to src/Disks/DiskMemory.cpp diff --git a/dbms/Disks/DiskMemory.h b/src/Disks/DiskMemory.h similarity index 100% rename from dbms/Disks/DiskMemory.h rename to src/Disks/DiskMemory.h diff --git a/dbms/Disks/DiskS3.cpp b/src/Disks/DiskS3.cpp similarity index 100% rename from dbms/Disks/DiskS3.cpp rename to src/Disks/DiskS3.cpp diff --git a/dbms/Disks/DiskS3.h b/src/Disks/DiskS3.h similarity index 100% rename from dbms/Disks/DiskS3.h rename to src/Disks/DiskS3.h diff --git a/dbms/Disks/DiskSpaceMonitor.cpp b/src/Disks/DiskSpaceMonitor.cpp similarity index 100% rename from dbms/Disks/DiskSpaceMonitor.cpp rename to src/Disks/DiskSpaceMonitor.cpp diff --git a/dbms/Disks/DiskSpaceMonitor.h b/src/Disks/DiskSpaceMonitor.h similarity index 100% rename from dbms/Disks/DiskSpaceMonitor.h rename to src/Disks/DiskSpaceMonitor.h diff --git a/dbms/Disks/IDisk.cpp b/src/Disks/IDisk.cpp similarity index 100% rename from dbms/Disks/IDisk.cpp rename to src/Disks/IDisk.cpp diff --git a/dbms/Disks/IDisk.h b/src/Disks/IDisk.h similarity index 100% rename from dbms/Disks/IDisk.h rename to src/Disks/IDisk.h diff --git a/dbms/Disks/registerDisks.cpp b/src/Disks/registerDisks.cpp similarity index 100% rename from dbms/Disks/registerDisks.cpp rename to src/Disks/registerDisks.cpp diff --git a/dbms/Disks/registerDisks.h b/src/Disks/registerDisks.h similarity index 100% rename from dbms/Disks/registerDisks.h rename to src/Disks/registerDisks.h diff --git a/dbms/Disks/tests/CMakeLists.txt b/src/Disks/tests/CMakeLists.txt similarity index 100% rename from dbms/Disks/tests/CMakeLists.txt rename to src/Disks/tests/CMakeLists.txt diff --git a/dbms/Disks/tests/gtest_disk.cpp b/src/Disks/tests/gtest_disk.cpp similarity index 100% rename from dbms/Disks/tests/gtest_disk.cpp rename to src/Disks/tests/gtest_disk.cpp diff --git a/dbms/Disks/tests/gtest_disk.h b/src/Disks/tests/gtest_disk.h similarity index 100% rename from dbms/Disks/tests/gtest_disk.h rename to src/Disks/tests/gtest_disk.h diff --git a/dbms/Disks/tests/gtest_path_functions.cpp b/src/Disks/tests/gtest_path_functions.cpp similarity index 100% rename from dbms/Disks/tests/gtest_path_functions.cpp rename to src/Disks/tests/gtest_path_functions.cpp diff --git a/dbms/Formats/CMakeLists.txt b/src/Formats/CMakeLists.txt similarity index 100% rename from dbms/Formats/CMakeLists.txt rename to src/Formats/CMakeLists.txt diff --git a/dbms/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp similarity index 100% rename from dbms/Formats/FormatFactory.cpp rename to src/Formats/FormatFactory.cpp diff --git a/dbms/Formats/FormatFactory.h b/src/Formats/FormatFactory.h similarity index 100% rename from dbms/Formats/FormatFactory.h rename to src/Formats/FormatFactory.h diff --git a/dbms/Formats/FormatSchemaInfo.cpp b/src/Formats/FormatSchemaInfo.cpp similarity index 100% rename from dbms/Formats/FormatSchemaInfo.cpp rename to src/Formats/FormatSchemaInfo.cpp diff --git a/dbms/Formats/FormatSchemaInfo.h b/src/Formats/FormatSchemaInfo.h similarity index 100% rename from dbms/Formats/FormatSchemaInfo.h rename to src/Formats/FormatSchemaInfo.h diff --git a/dbms/Formats/FormatSettings.h b/src/Formats/FormatSettings.h similarity index 100% rename from dbms/Formats/FormatSettings.h rename to src/Formats/FormatSettings.h diff --git a/dbms/Formats/IRowInputStream.cpp b/src/Formats/IRowInputStream.cpp similarity index 100% rename from dbms/Formats/IRowInputStream.cpp rename to src/Formats/IRowInputStream.cpp diff --git a/dbms/Formats/IRowInputStream.h b/src/Formats/IRowInputStream.h similarity index 100% rename from dbms/Formats/IRowInputStream.h rename to src/Formats/IRowInputStream.h diff --git a/dbms/Formats/IRowOutputStream.cpp b/src/Formats/IRowOutputStream.cpp similarity index 100% rename from dbms/Formats/IRowOutputStream.cpp rename to src/Formats/IRowOutputStream.cpp diff --git a/dbms/Formats/IRowOutputStream.h b/src/Formats/IRowOutputStream.h similarity index 100% rename from dbms/Formats/IRowOutputStream.h rename to src/Formats/IRowOutputStream.h diff --git a/dbms/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp similarity index 100% rename from dbms/Formats/MySQLBlockInputStream.cpp rename to src/Formats/MySQLBlockInputStream.cpp diff --git a/dbms/Formats/MySQLBlockInputStream.h b/src/Formats/MySQLBlockInputStream.h similarity index 100% rename from dbms/Formats/MySQLBlockInputStream.h rename to src/Formats/MySQLBlockInputStream.h diff --git a/dbms/Formats/NativeFormat.cpp b/src/Formats/NativeFormat.cpp similarity index 100% rename from dbms/Formats/NativeFormat.cpp rename to src/Formats/NativeFormat.cpp diff --git a/dbms/Formats/NullFormat.cpp b/src/Formats/NullFormat.cpp similarity index 100% rename from dbms/Formats/NullFormat.cpp rename to src/Formats/NullFormat.cpp diff --git a/dbms/Formats/ParsedTemplateFormatString.cpp b/src/Formats/ParsedTemplateFormatString.cpp similarity index 100% rename from dbms/Formats/ParsedTemplateFormatString.cpp rename to src/Formats/ParsedTemplateFormatString.cpp diff --git a/dbms/Formats/ParsedTemplateFormatString.h b/src/Formats/ParsedTemplateFormatString.h similarity index 100% rename from dbms/Formats/ParsedTemplateFormatString.h rename to src/Formats/ParsedTemplateFormatString.h diff --git a/dbms/Formats/ProtobufColumnMatcher.cpp b/src/Formats/ProtobufColumnMatcher.cpp similarity index 100% rename from dbms/Formats/ProtobufColumnMatcher.cpp rename to src/Formats/ProtobufColumnMatcher.cpp diff --git a/dbms/Formats/ProtobufColumnMatcher.h b/src/Formats/ProtobufColumnMatcher.h similarity index 100% rename from dbms/Formats/ProtobufColumnMatcher.h rename to src/Formats/ProtobufColumnMatcher.h diff --git a/dbms/Formats/ProtobufReader.cpp b/src/Formats/ProtobufReader.cpp similarity index 100% rename from dbms/Formats/ProtobufReader.cpp rename to src/Formats/ProtobufReader.cpp diff --git a/dbms/Formats/ProtobufReader.h b/src/Formats/ProtobufReader.h similarity index 100% rename from dbms/Formats/ProtobufReader.h rename to src/Formats/ProtobufReader.h diff --git a/dbms/Formats/ProtobufSchemas.cpp b/src/Formats/ProtobufSchemas.cpp similarity index 100% rename from dbms/Formats/ProtobufSchemas.cpp rename to src/Formats/ProtobufSchemas.cpp diff --git a/dbms/Formats/ProtobufSchemas.h b/src/Formats/ProtobufSchemas.h similarity index 100% rename from dbms/Formats/ProtobufSchemas.h rename to src/Formats/ProtobufSchemas.h diff --git a/dbms/Formats/ProtobufWriter.cpp b/src/Formats/ProtobufWriter.cpp similarity index 100% rename from dbms/Formats/ProtobufWriter.cpp rename to src/Formats/ProtobufWriter.cpp diff --git a/dbms/Formats/ProtobufWriter.h b/src/Formats/ProtobufWriter.h similarity index 100% rename from dbms/Formats/ProtobufWriter.h rename to src/Formats/ProtobufWriter.h diff --git a/dbms/Formats/config_formats.h.in b/src/Formats/config_formats.h.in similarity index 100% rename from dbms/Formats/config_formats.h.in rename to src/Formats/config_formats.h.in diff --git a/dbms/Formats/tests/CMakeLists.txt b/src/Formats/tests/CMakeLists.txt similarity index 100% rename from dbms/Formats/tests/CMakeLists.txt rename to src/Formats/tests/CMakeLists.txt diff --git a/dbms/Formats/tests/tab_separated_streams.cpp b/src/Formats/tests/tab_separated_streams.cpp similarity index 100% rename from dbms/Formats/tests/tab_separated_streams.cpp rename to src/Formats/tests/tab_separated_streams.cpp diff --git a/dbms/Formats/verbosePrintString.cpp b/src/Formats/verbosePrintString.cpp similarity index 100% rename from dbms/Formats/verbosePrintString.cpp rename to src/Formats/verbosePrintString.cpp diff --git a/dbms/Formats/verbosePrintString.h b/src/Formats/verbosePrintString.h similarity index 100% rename from dbms/Formats/verbosePrintString.h rename to src/Formats/verbosePrintString.h diff --git a/dbms/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt similarity index 100% rename from dbms/Functions/CMakeLists.txt rename to src/Functions/CMakeLists.txt diff --git a/dbms/Functions/CRC.cpp b/src/Functions/CRC.cpp similarity index 100% rename from dbms/Functions/CRC.cpp rename to src/Functions/CRC.cpp diff --git a/dbms/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h similarity index 100% rename from dbms/Functions/CustomWeekTransforms.h rename to src/Functions/CustomWeekTransforms.h diff --git a/dbms/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h similarity index 100% rename from dbms/Functions/DateTimeTransforms.h rename to src/Functions/DateTimeTransforms.h diff --git a/dbms/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h similarity index 100% rename from dbms/Functions/DivisionUtils.h rename to src/Functions/DivisionUtils.h diff --git a/dbms/Functions/DummyJSONParser.h b/src/Functions/DummyJSONParser.h similarity index 100% rename from dbms/Functions/DummyJSONParser.h rename to src/Functions/DummyJSONParser.h diff --git a/dbms/Functions/EmptyImpl.h b/src/Functions/EmptyImpl.h similarity index 100% rename from dbms/Functions/EmptyImpl.h rename to src/Functions/EmptyImpl.h diff --git a/dbms/Functions/FunctionBase64Conversion.h b/src/Functions/FunctionBase64Conversion.h similarity index 100% rename from dbms/Functions/FunctionBase64Conversion.h rename to src/Functions/FunctionBase64Conversion.h diff --git a/dbms/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h similarity index 100% rename from dbms/Functions/FunctionBinaryArithmetic.h rename to src/Functions/FunctionBinaryArithmetic.h diff --git a/dbms/Functions/FunctionBitTestMany.h b/src/Functions/FunctionBitTestMany.h similarity index 100% rename from dbms/Functions/FunctionBitTestMany.h rename to src/Functions/FunctionBitTestMany.h diff --git a/dbms/Functions/FunctionCustomWeekToSomething.h b/src/Functions/FunctionCustomWeekToSomething.h similarity index 100% rename from dbms/Functions/FunctionCustomWeekToSomething.h rename to src/Functions/FunctionCustomWeekToSomething.h diff --git a/dbms/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h similarity index 100% rename from dbms/Functions/FunctionDateOrDateTimeAddInterval.h rename to src/Functions/FunctionDateOrDateTimeAddInterval.h diff --git a/dbms/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h similarity index 100% rename from dbms/Functions/FunctionDateOrDateTimeToSomething.h rename to src/Functions/FunctionDateOrDateTimeToSomething.h diff --git a/dbms/Functions/FunctionFQDN.cpp b/src/Functions/FunctionFQDN.cpp similarity index 100% rename from dbms/Functions/FunctionFQDN.cpp rename to src/Functions/FunctionFQDN.cpp diff --git a/dbms/Functions/FunctionFactory.cpp b/src/Functions/FunctionFactory.cpp similarity index 100% rename from dbms/Functions/FunctionFactory.cpp rename to src/Functions/FunctionFactory.cpp diff --git a/dbms/Functions/FunctionFactory.h b/src/Functions/FunctionFactory.h similarity index 100% rename from dbms/Functions/FunctionFactory.h rename to src/Functions/FunctionFactory.h diff --git a/dbms/Functions/FunctionHelpers.cpp b/src/Functions/FunctionHelpers.cpp similarity index 100% rename from dbms/Functions/FunctionHelpers.cpp rename to src/Functions/FunctionHelpers.cpp diff --git a/dbms/Functions/FunctionHelpers.h b/src/Functions/FunctionHelpers.h similarity index 100% rename from dbms/Functions/FunctionHelpers.h rename to src/Functions/FunctionHelpers.h diff --git a/dbms/Functions/FunctionIfBase.h b/src/Functions/FunctionIfBase.h similarity index 100% rename from dbms/Functions/FunctionIfBase.h rename to src/Functions/FunctionIfBase.h diff --git a/dbms/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp similarity index 100% rename from dbms/Functions/FunctionJoinGet.cpp rename to src/Functions/FunctionJoinGet.cpp diff --git a/dbms/Functions/FunctionJoinGet.h b/src/Functions/FunctionJoinGet.h similarity index 100% rename from dbms/Functions/FunctionJoinGet.h rename to src/Functions/FunctionJoinGet.h diff --git a/dbms/Functions/FunctionMathBinaryFloat64.h b/src/Functions/FunctionMathBinaryFloat64.h similarity index 100% rename from dbms/Functions/FunctionMathBinaryFloat64.h rename to src/Functions/FunctionMathBinaryFloat64.h diff --git a/dbms/Functions/FunctionMathConstFloat64.h b/src/Functions/FunctionMathConstFloat64.h similarity index 100% rename from dbms/Functions/FunctionMathConstFloat64.h rename to src/Functions/FunctionMathConstFloat64.h diff --git a/dbms/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h similarity index 100% rename from dbms/Functions/FunctionMathUnary.h rename to src/Functions/FunctionMathUnary.h diff --git a/dbms/Functions/FunctionNumericPredicate.h b/src/Functions/FunctionNumericPredicate.h similarity index 100% rename from dbms/Functions/FunctionNumericPredicate.h rename to src/Functions/FunctionNumericPredicate.h diff --git a/dbms/Functions/FunctionStartsEndsWith.h b/src/Functions/FunctionStartsEndsWith.h similarity index 100% rename from dbms/Functions/FunctionStartsEndsWith.h rename to src/Functions/FunctionStartsEndsWith.h diff --git a/dbms/Functions/FunctionStringOrArrayToT.h b/src/Functions/FunctionStringOrArrayToT.h similarity index 100% rename from dbms/Functions/FunctionStringOrArrayToT.h rename to src/Functions/FunctionStringOrArrayToT.h diff --git a/dbms/Functions/FunctionStringToString.h b/src/Functions/FunctionStringToString.h similarity index 100% rename from dbms/Functions/FunctionStringToString.h rename to src/Functions/FunctionStringToString.h diff --git a/dbms/Functions/FunctionUnaryArithmetic.h b/src/Functions/FunctionUnaryArithmetic.h similarity index 100% rename from dbms/Functions/FunctionUnaryArithmetic.h rename to src/Functions/FunctionUnaryArithmetic.h diff --git a/dbms/Functions/FunctionsBitmap.cpp b/src/Functions/FunctionsBitmap.cpp similarity index 100% rename from dbms/Functions/FunctionsBitmap.cpp rename to src/Functions/FunctionsBitmap.cpp diff --git a/dbms/Functions/FunctionsBitmap.h b/src/Functions/FunctionsBitmap.h similarity index 100% rename from dbms/Functions/FunctionsBitmap.h rename to src/Functions/FunctionsBitmap.h diff --git a/dbms/Functions/FunctionsCoding.cpp b/src/Functions/FunctionsCoding.cpp similarity index 100% rename from dbms/Functions/FunctionsCoding.cpp rename to src/Functions/FunctionsCoding.cpp diff --git a/dbms/Functions/FunctionsCoding.h b/src/Functions/FunctionsCoding.h similarity index 100% rename from dbms/Functions/FunctionsCoding.h rename to src/Functions/FunctionsCoding.h diff --git a/dbms/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h similarity index 100% rename from dbms/Functions/FunctionsComparison.h rename to src/Functions/FunctionsComparison.h diff --git a/dbms/Functions/FunctionsConsistentHashing.h b/src/Functions/FunctionsConsistentHashing.h similarity index 100% rename from dbms/Functions/FunctionsConsistentHashing.h rename to src/Functions/FunctionsConsistentHashing.h diff --git a/dbms/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp similarity index 100% rename from dbms/Functions/FunctionsConversion.cpp rename to src/Functions/FunctionsConversion.cpp diff --git a/dbms/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h similarity index 100% rename from dbms/Functions/FunctionsConversion.h rename to src/Functions/FunctionsConversion.h diff --git a/dbms/Functions/FunctionsEmbeddedDictionaries.cpp b/src/Functions/FunctionsEmbeddedDictionaries.cpp similarity index 100% rename from dbms/Functions/FunctionsEmbeddedDictionaries.cpp rename to src/Functions/FunctionsEmbeddedDictionaries.cpp diff --git a/dbms/Functions/FunctionsEmbeddedDictionaries.h b/src/Functions/FunctionsEmbeddedDictionaries.h similarity index 100% rename from dbms/Functions/FunctionsEmbeddedDictionaries.h rename to src/Functions/FunctionsEmbeddedDictionaries.h diff --git a/dbms/Functions/FunctionsExternalDictionaries.cpp b/src/Functions/FunctionsExternalDictionaries.cpp similarity index 100% rename from dbms/Functions/FunctionsExternalDictionaries.cpp rename to src/Functions/FunctionsExternalDictionaries.cpp diff --git a/dbms/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h similarity index 100% rename from dbms/Functions/FunctionsExternalDictionaries.h rename to src/Functions/FunctionsExternalDictionaries.h diff --git a/dbms/Functions/FunctionsExternalModels.cpp b/src/Functions/FunctionsExternalModels.cpp similarity index 100% rename from dbms/Functions/FunctionsExternalModels.cpp rename to src/Functions/FunctionsExternalModels.cpp diff --git a/dbms/Functions/FunctionsExternalModels.h b/src/Functions/FunctionsExternalModels.h similarity index 100% rename from dbms/Functions/FunctionsExternalModels.h rename to src/Functions/FunctionsExternalModels.h diff --git a/dbms/Functions/FunctionsFormatting.cpp b/src/Functions/FunctionsFormatting.cpp similarity index 100% rename from dbms/Functions/FunctionsFormatting.cpp rename to src/Functions/FunctionsFormatting.cpp diff --git a/dbms/Functions/FunctionsFormatting.h b/src/Functions/FunctionsFormatting.h similarity index 100% rename from dbms/Functions/FunctionsFormatting.h rename to src/Functions/FunctionsFormatting.h diff --git a/dbms/Functions/FunctionsHashing.cpp b/src/Functions/FunctionsHashing.cpp similarity index 100% rename from dbms/Functions/FunctionsHashing.cpp rename to src/Functions/FunctionsHashing.cpp diff --git a/dbms/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h similarity index 100% rename from dbms/Functions/FunctionsHashing.h rename to src/Functions/FunctionsHashing.h diff --git a/dbms/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp similarity index 100% rename from dbms/Functions/FunctionsJSON.cpp rename to src/Functions/FunctionsJSON.cpp diff --git a/dbms/Functions/FunctionsJSON.h b/src/Functions/FunctionsJSON.h similarity index 100% rename from dbms/Functions/FunctionsJSON.h rename to src/Functions/FunctionsJSON.h diff --git a/dbms/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp similarity index 100% rename from dbms/Functions/FunctionsLogical.cpp rename to src/Functions/FunctionsLogical.cpp diff --git a/dbms/Functions/FunctionsLogical.h b/src/Functions/FunctionsLogical.h similarity index 100% rename from dbms/Functions/FunctionsLogical.h rename to src/Functions/FunctionsLogical.h diff --git a/dbms/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h similarity index 100% rename from dbms/Functions/FunctionsMiscellaneous.h rename to src/Functions/FunctionsMiscellaneous.h diff --git a/dbms/Functions/FunctionsMultiStringPosition.h b/src/Functions/FunctionsMultiStringPosition.h similarity index 100% rename from dbms/Functions/FunctionsMultiStringPosition.h rename to src/Functions/FunctionsMultiStringPosition.h diff --git a/dbms/Functions/FunctionsMultiStringSearch.h b/src/Functions/FunctionsMultiStringSearch.h similarity index 100% rename from dbms/Functions/FunctionsMultiStringSearch.h rename to src/Functions/FunctionsMultiStringSearch.h diff --git a/dbms/Functions/FunctionsRandom.cpp b/src/Functions/FunctionsRandom.cpp similarity index 100% rename from dbms/Functions/FunctionsRandom.cpp rename to src/Functions/FunctionsRandom.cpp diff --git a/dbms/Functions/FunctionsRandom.h b/src/Functions/FunctionsRandom.h similarity index 100% rename from dbms/Functions/FunctionsRandom.h rename to src/Functions/FunctionsRandom.h diff --git a/dbms/Functions/FunctionsRound.cpp b/src/Functions/FunctionsRound.cpp similarity index 100% rename from dbms/Functions/FunctionsRound.cpp rename to src/Functions/FunctionsRound.cpp diff --git a/dbms/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h similarity index 100% rename from dbms/Functions/FunctionsRound.h rename to src/Functions/FunctionsRound.h diff --git a/dbms/Functions/FunctionsStringArray.cpp b/src/Functions/FunctionsStringArray.cpp similarity index 100% rename from dbms/Functions/FunctionsStringArray.cpp rename to src/Functions/FunctionsStringArray.cpp diff --git a/dbms/Functions/FunctionsStringArray.h b/src/Functions/FunctionsStringArray.h similarity index 100% rename from dbms/Functions/FunctionsStringArray.h rename to src/Functions/FunctionsStringArray.h diff --git a/dbms/Functions/FunctionsStringRegex.cpp b/src/Functions/FunctionsStringRegex.cpp similarity index 100% rename from dbms/Functions/FunctionsStringRegex.cpp rename to src/Functions/FunctionsStringRegex.cpp diff --git a/dbms/Functions/FunctionsStringRegex.h b/src/Functions/FunctionsStringRegex.h similarity index 100% rename from dbms/Functions/FunctionsStringRegex.h rename to src/Functions/FunctionsStringRegex.h diff --git a/dbms/Functions/FunctionsStringSearch.h b/src/Functions/FunctionsStringSearch.h similarity index 100% rename from dbms/Functions/FunctionsStringSearch.h rename to src/Functions/FunctionsStringSearch.h diff --git a/dbms/Functions/FunctionsStringSearchToString.h b/src/Functions/FunctionsStringSearchToString.h similarity index 100% rename from dbms/Functions/FunctionsStringSearchToString.h rename to src/Functions/FunctionsStringSearchToString.h diff --git a/dbms/Functions/FunctionsStringSimilarity.cpp b/src/Functions/FunctionsStringSimilarity.cpp similarity index 100% rename from dbms/Functions/FunctionsStringSimilarity.cpp rename to src/Functions/FunctionsStringSimilarity.cpp diff --git a/dbms/Functions/FunctionsStringSimilarity.h b/src/Functions/FunctionsStringSimilarity.h similarity index 100% rename from dbms/Functions/FunctionsStringSimilarity.h rename to src/Functions/FunctionsStringSimilarity.h diff --git a/dbms/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h similarity index 100% rename from dbms/Functions/FunctionsVisitParam.h rename to src/Functions/FunctionsVisitParam.h diff --git a/dbms/Functions/GatherUtils/Algorithms.h b/src/Functions/GatherUtils/Algorithms.h similarity index 100% rename from dbms/Functions/GatherUtils/Algorithms.h rename to src/Functions/GatherUtils/Algorithms.h diff --git a/dbms/Functions/GatherUtils/ArraySinkVisitor.h b/src/Functions/GatherUtils/ArraySinkVisitor.h similarity index 100% rename from dbms/Functions/GatherUtils/ArraySinkVisitor.h rename to src/Functions/GatherUtils/ArraySinkVisitor.h diff --git a/dbms/Functions/GatherUtils/ArraySourceVisitor.h b/src/Functions/GatherUtils/ArraySourceVisitor.h similarity index 100% rename from dbms/Functions/GatherUtils/ArraySourceVisitor.h rename to src/Functions/GatherUtils/ArraySourceVisitor.h diff --git a/dbms/Functions/GatherUtils/CMakeLists.txt b/src/Functions/GatherUtils/CMakeLists.txt similarity index 100% rename from dbms/Functions/GatherUtils/CMakeLists.txt rename to src/Functions/GatherUtils/CMakeLists.txt diff --git a/dbms/Functions/GatherUtils/GatherUtils.h b/src/Functions/GatherUtils/GatherUtils.h similarity index 100% rename from dbms/Functions/GatherUtils/GatherUtils.h rename to src/Functions/GatherUtils/GatherUtils.h diff --git a/dbms/Functions/GatherUtils/IArraySink.h b/src/Functions/GatherUtils/IArraySink.h similarity index 100% rename from dbms/Functions/GatherUtils/IArraySink.h rename to src/Functions/GatherUtils/IArraySink.h diff --git a/dbms/Functions/GatherUtils/IArraySource.h b/src/Functions/GatherUtils/IArraySource.h similarity index 100% rename from dbms/Functions/GatherUtils/IArraySource.h rename to src/Functions/GatherUtils/IArraySource.h diff --git a/dbms/Functions/GatherUtils/IValueSource.h b/src/Functions/GatherUtils/IValueSource.h similarity index 100% rename from dbms/Functions/GatherUtils/IValueSource.h rename to src/Functions/GatherUtils/IValueSource.h diff --git a/dbms/Functions/GatherUtils/Selectors.h b/src/Functions/GatherUtils/Selectors.h similarity index 100% rename from dbms/Functions/GatherUtils/Selectors.h rename to src/Functions/GatherUtils/Selectors.h diff --git a/dbms/Functions/GatherUtils/Sinks.h b/src/Functions/GatherUtils/Sinks.h similarity index 100% rename from dbms/Functions/GatherUtils/Sinks.h rename to src/Functions/GatherUtils/Sinks.h diff --git a/dbms/Functions/GatherUtils/Slices.h b/src/Functions/GatherUtils/Slices.h similarity index 100% rename from dbms/Functions/GatherUtils/Slices.h rename to src/Functions/GatherUtils/Slices.h diff --git a/dbms/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h similarity index 100% rename from dbms/Functions/GatherUtils/Sources.h rename to src/Functions/GatherUtils/Sources.h diff --git a/dbms/Functions/GatherUtils/ValueSourceVisitor.h b/src/Functions/GatherUtils/ValueSourceVisitor.h similarity index 100% rename from dbms/Functions/GatherUtils/ValueSourceVisitor.h rename to src/Functions/GatherUtils/ValueSourceVisitor.h diff --git a/dbms/Functions/GatherUtils/concat.cpp b/src/Functions/GatherUtils/concat.cpp similarity index 100% rename from dbms/Functions/GatherUtils/concat.cpp rename to src/Functions/GatherUtils/concat.cpp diff --git a/dbms/Functions/GatherUtils/createArraySink.cpp b/src/Functions/GatherUtils/createArraySink.cpp similarity index 100% rename from dbms/Functions/GatherUtils/createArraySink.cpp rename to src/Functions/GatherUtils/createArraySink.cpp diff --git a/dbms/Functions/GatherUtils/createArraySource.cpp b/src/Functions/GatherUtils/createArraySource.cpp similarity index 100% rename from dbms/Functions/GatherUtils/createArraySource.cpp rename to src/Functions/GatherUtils/createArraySource.cpp diff --git a/dbms/Functions/GatherUtils/createValueSource.cpp b/src/Functions/GatherUtils/createValueSource.cpp similarity index 100% rename from dbms/Functions/GatherUtils/createValueSource.cpp rename to src/Functions/GatherUtils/createValueSource.cpp diff --git a/dbms/Functions/GatherUtils/has.cpp b/src/Functions/GatherUtils/has.cpp similarity index 100% rename from dbms/Functions/GatherUtils/has.cpp rename to src/Functions/GatherUtils/has.cpp diff --git a/dbms/Functions/GatherUtils/push.cpp b/src/Functions/GatherUtils/push.cpp similarity index 100% rename from dbms/Functions/GatherUtils/push.cpp rename to src/Functions/GatherUtils/push.cpp diff --git a/dbms/Functions/GatherUtils/resizeConstantSize.cpp b/src/Functions/GatherUtils/resizeConstantSize.cpp similarity index 100% rename from dbms/Functions/GatherUtils/resizeConstantSize.cpp rename to src/Functions/GatherUtils/resizeConstantSize.cpp diff --git a/dbms/Functions/GatherUtils/resizeDynamicSize.cpp b/src/Functions/GatherUtils/resizeDynamicSize.cpp similarity index 100% rename from dbms/Functions/GatherUtils/resizeDynamicSize.cpp rename to src/Functions/GatherUtils/resizeDynamicSize.cpp diff --git a/dbms/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceDynamicOffsetBounded.cpp diff --git a/dbms/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceDynamicOffsetUnbounded.cpp diff --git a/dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceFromLeftConstantOffsetBounded.cpp diff --git a/dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceFromLeftConstantOffsetUnbounded.cpp diff --git a/dbms/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp rename to src/Functions/GatherUtils/sliceFromRightConstantOffsetBounded.cpp diff --git a/dbms/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp b/src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp similarity index 100% rename from dbms/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp rename to src/Functions/GatherUtils/sliceFromRightConstantOffsetUnbounded.cpp diff --git a/dbms/Functions/GeoHash.cpp b/src/Functions/GeoHash.cpp similarity index 100% rename from dbms/Functions/GeoHash.cpp rename to src/Functions/GeoHash.cpp diff --git a/dbms/Functions/GeoHash.h b/src/Functions/GeoHash.h similarity index 100% rename from dbms/Functions/GeoHash.h rename to src/Functions/GeoHash.h diff --git a/dbms/Functions/HasTokenImpl.h b/src/Functions/HasTokenImpl.h similarity index 100% rename from dbms/Functions/HasTokenImpl.h rename to src/Functions/HasTokenImpl.h diff --git a/dbms/Functions/IFunction.cpp b/src/Functions/IFunction.cpp similarity index 100% rename from dbms/Functions/IFunction.cpp rename to src/Functions/IFunction.cpp diff --git a/dbms/Functions/IFunction.h b/src/Functions/IFunction.h similarity index 100% rename from dbms/Functions/IFunction.h rename to src/Functions/IFunction.h diff --git a/dbms/Functions/IFunctionAdaptors.h b/src/Functions/IFunctionAdaptors.h similarity index 100% rename from dbms/Functions/IFunctionAdaptors.h rename to src/Functions/IFunctionAdaptors.h diff --git a/dbms/Functions/IFunctionImpl.h b/src/Functions/IFunctionImpl.h similarity index 100% rename from dbms/Functions/IFunctionImpl.h rename to src/Functions/IFunctionImpl.h diff --git a/dbms/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h similarity index 100% rename from dbms/Functions/LowerUpperImpl.h rename to src/Functions/LowerUpperImpl.h diff --git a/dbms/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h similarity index 100% rename from dbms/Functions/LowerUpperUTF8Impl.h rename to src/Functions/LowerUpperUTF8Impl.h diff --git a/dbms/Functions/MultiSearchAllPositionsImpl.h b/src/Functions/MultiSearchAllPositionsImpl.h similarity index 100% rename from dbms/Functions/MultiSearchAllPositionsImpl.h rename to src/Functions/MultiSearchAllPositionsImpl.h diff --git a/dbms/Functions/MultiSearchFirstIndexImpl.h b/src/Functions/MultiSearchFirstIndexImpl.h similarity index 100% rename from dbms/Functions/MultiSearchFirstIndexImpl.h rename to src/Functions/MultiSearchFirstIndexImpl.h diff --git a/dbms/Functions/MultiSearchFirstPositionImpl.h b/src/Functions/MultiSearchFirstPositionImpl.h similarity index 100% rename from dbms/Functions/MultiSearchFirstPositionImpl.h rename to src/Functions/MultiSearchFirstPositionImpl.h diff --git a/dbms/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h similarity index 100% rename from dbms/Functions/MultiSearchImpl.h rename to src/Functions/MultiSearchImpl.h diff --git a/dbms/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h similarity index 100% rename from dbms/Functions/PolygonUtils.h rename to src/Functions/PolygonUtils.h diff --git a/dbms/Functions/PositionImpl.h b/src/Functions/PositionImpl.h similarity index 100% rename from dbms/Functions/PositionImpl.h rename to src/Functions/PositionImpl.h diff --git a/dbms/Functions/RapidJSONParser.h b/src/Functions/RapidJSONParser.h similarity index 100% rename from dbms/Functions/RapidJSONParser.h rename to src/Functions/RapidJSONParser.h diff --git a/dbms/Functions/Regexps.h b/src/Functions/Regexps.h similarity index 100% rename from dbms/Functions/Regexps.h rename to src/Functions/Regexps.h diff --git a/dbms/Functions/SimdJSONParser.h b/src/Functions/SimdJSONParser.h similarity index 100% rename from dbms/Functions/SimdJSONParser.h rename to src/Functions/SimdJSONParser.h diff --git a/dbms/Functions/URL/CMakeLists.txt b/src/Functions/URL/CMakeLists.txt similarity index 100% rename from dbms/Functions/URL/CMakeLists.txt rename to src/Functions/URL/CMakeLists.txt diff --git a/dbms/Functions/URL/FunctionsURL.h b/src/Functions/URL/FunctionsURL.h similarity index 100% rename from dbms/Functions/URL/FunctionsURL.h rename to src/Functions/URL/FunctionsURL.h diff --git a/dbms/Functions/URL/URLHierarchy.cpp b/src/Functions/URL/URLHierarchy.cpp similarity index 100% rename from dbms/Functions/URL/URLHierarchy.cpp rename to src/Functions/URL/URLHierarchy.cpp diff --git a/dbms/Functions/URL/URLPathHierarchy.cpp b/src/Functions/URL/URLPathHierarchy.cpp similarity index 100% rename from dbms/Functions/URL/URLPathHierarchy.cpp rename to src/Functions/URL/URLPathHierarchy.cpp diff --git a/dbms/Functions/URL/basename.cpp b/src/Functions/URL/basename.cpp similarity index 100% rename from dbms/Functions/URL/basename.cpp rename to src/Functions/URL/basename.cpp diff --git a/dbms/Functions/URL/config_functions_url.h.in b/src/Functions/URL/config_functions_url.h.in similarity index 100% rename from dbms/Functions/URL/config_functions_url.h.in rename to src/Functions/URL/config_functions_url.h.in diff --git a/dbms/Functions/URL/cutFragment.cpp b/src/Functions/URL/cutFragment.cpp similarity index 100% rename from dbms/Functions/URL/cutFragment.cpp rename to src/Functions/URL/cutFragment.cpp diff --git a/dbms/Functions/URL/cutQueryString.cpp b/src/Functions/URL/cutQueryString.cpp similarity index 100% rename from dbms/Functions/URL/cutQueryString.cpp rename to src/Functions/URL/cutQueryString.cpp diff --git a/dbms/Functions/URL/cutQueryStringAndFragment.cpp b/src/Functions/URL/cutQueryStringAndFragment.cpp similarity index 100% rename from dbms/Functions/URL/cutQueryStringAndFragment.cpp rename to src/Functions/URL/cutQueryStringAndFragment.cpp diff --git a/dbms/Functions/URL/cutToFirstSignificantSubdomain.cpp b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp similarity index 100% rename from dbms/Functions/URL/cutToFirstSignificantSubdomain.cpp rename to src/Functions/URL/cutToFirstSignificantSubdomain.cpp diff --git a/dbms/Functions/URL/cutURLParameter.cpp b/src/Functions/URL/cutURLParameter.cpp similarity index 100% rename from dbms/Functions/URL/cutURLParameter.cpp rename to src/Functions/URL/cutURLParameter.cpp diff --git a/dbms/Functions/URL/cutWWW.cpp b/src/Functions/URL/cutWWW.cpp similarity index 100% rename from dbms/Functions/URL/cutWWW.cpp rename to src/Functions/URL/cutWWW.cpp diff --git a/dbms/Functions/URL/decodeURLComponent.cpp b/src/Functions/URL/decodeURLComponent.cpp similarity index 100% rename from dbms/Functions/URL/decodeURLComponent.cpp rename to src/Functions/URL/decodeURLComponent.cpp diff --git a/dbms/Functions/URL/domain.cpp b/src/Functions/URL/domain.cpp similarity index 100% rename from dbms/Functions/URL/domain.cpp rename to src/Functions/URL/domain.cpp diff --git a/dbms/Functions/URL/domain.h b/src/Functions/URL/domain.h similarity index 100% rename from dbms/Functions/URL/domain.h rename to src/Functions/URL/domain.h diff --git a/dbms/Functions/URL/domainWithoutWWW.cpp b/src/Functions/URL/domainWithoutWWW.cpp similarity index 100% rename from dbms/Functions/URL/domainWithoutWWW.cpp rename to src/Functions/URL/domainWithoutWWW.cpp diff --git a/dbms/Functions/URL/extractURLParameter.cpp b/src/Functions/URL/extractURLParameter.cpp similarity index 100% rename from dbms/Functions/URL/extractURLParameter.cpp rename to src/Functions/URL/extractURLParameter.cpp diff --git a/dbms/Functions/URL/extractURLParameterNames.cpp b/src/Functions/URL/extractURLParameterNames.cpp similarity index 100% rename from dbms/Functions/URL/extractURLParameterNames.cpp rename to src/Functions/URL/extractURLParameterNames.cpp diff --git a/dbms/Functions/URL/extractURLParameters.cpp b/src/Functions/URL/extractURLParameters.cpp similarity index 100% rename from dbms/Functions/URL/extractURLParameters.cpp rename to src/Functions/URL/extractURLParameters.cpp diff --git a/dbms/Functions/URL/firstSignificantSubdomain.cpp b/src/Functions/URL/firstSignificantSubdomain.cpp similarity index 100% rename from dbms/Functions/URL/firstSignificantSubdomain.cpp rename to src/Functions/URL/firstSignificantSubdomain.cpp diff --git a/dbms/Functions/URL/firstSignificantSubdomain.h b/src/Functions/URL/firstSignificantSubdomain.h similarity index 100% rename from dbms/Functions/URL/firstSignificantSubdomain.h rename to src/Functions/URL/firstSignificantSubdomain.h diff --git a/dbms/Functions/URL/fragment.cpp b/src/Functions/URL/fragment.cpp similarity index 100% rename from dbms/Functions/URL/fragment.cpp rename to src/Functions/URL/fragment.cpp diff --git a/dbms/Functions/URL/fragment.h b/src/Functions/URL/fragment.h similarity index 100% rename from dbms/Functions/URL/fragment.h rename to src/Functions/URL/fragment.h diff --git a/dbms/Functions/URL/path.cpp b/src/Functions/URL/path.cpp similarity index 100% rename from dbms/Functions/URL/path.cpp rename to src/Functions/URL/path.cpp diff --git a/dbms/Functions/URL/pathFull.cpp b/src/Functions/URL/pathFull.cpp similarity index 100% rename from dbms/Functions/URL/pathFull.cpp rename to src/Functions/URL/pathFull.cpp diff --git a/dbms/Functions/URL/protocol.cpp b/src/Functions/URL/protocol.cpp similarity index 100% rename from dbms/Functions/URL/protocol.cpp rename to src/Functions/URL/protocol.cpp diff --git a/dbms/Functions/URL/protocol.h b/src/Functions/URL/protocol.h similarity index 100% rename from dbms/Functions/URL/protocol.h rename to src/Functions/URL/protocol.h diff --git a/dbms/Functions/URL/queryString.cpp b/src/Functions/URL/queryString.cpp similarity index 100% rename from dbms/Functions/URL/queryString.cpp rename to src/Functions/URL/queryString.cpp diff --git a/dbms/Functions/URL/queryString.h b/src/Functions/URL/queryString.h similarity index 100% rename from dbms/Functions/URL/queryString.h rename to src/Functions/URL/queryString.h diff --git a/dbms/Functions/URL/queryStringAndFragment.cpp b/src/Functions/URL/queryStringAndFragment.cpp similarity index 100% rename from dbms/Functions/URL/queryStringAndFragment.cpp rename to src/Functions/URL/queryStringAndFragment.cpp diff --git a/dbms/Functions/URL/queryStringAndFragment.h b/src/Functions/URL/queryStringAndFragment.h similarity index 100% rename from dbms/Functions/URL/queryStringAndFragment.h rename to src/Functions/URL/queryStringAndFragment.h diff --git a/dbms/Functions/URL/registerFunctionsURL.cpp b/src/Functions/URL/registerFunctionsURL.cpp similarity index 100% rename from dbms/Functions/URL/registerFunctionsURL.cpp rename to src/Functions/URL/registerFunctionsURL.cpp diff --git a/dbms/Functions/URL/tldLookup.generated.cpp b/src/Functions/URL/tldLookup.generated.cpp similarity index 100% rename from dbms/Functions/URL/tldLookup.generated.cpp rename to src/Functions/URL/tldLookup.generated.cpp diff --git a/dbms/Functions/URL/tldLookup.gperf b/src/Functions/URL/tldLookup.gperf similarity index 100% rename from dbms/Functions/URL/tldLookup.gperf rename to src/Functions/URL/tldLookup.gperf diff --git a/dbms/Functions/URL/tldLookup.h b/src/Functions/URL/tldLookup.h similarity index 100% rename from dbms/Functions/URL/tldLookup.h rename to src/Functions/URL/tldLookup.h diff --git a/dbms/Functions/URL/tldLookup.sh b/src/Functions/URL/tldLookup.sh similarity index 100% rename from dbms/Functions/URL/tldLookup.sh rename to src/Functions/URL/tldLookup.sh diff --git a/dbms/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp similarity index 100% rename from dbms/Functions/URL/topLevelDomain.cpp rename to src/Functions/URL/topLevelDomain.cpp diff --git a/dbms/Functions/abs.cpp b/src/Functions/abs.cpp similarity index 100% rename from dbms/Functions/abs.cpp rename to src/Functions/abs.cpp diff --git a/dbms/Functions/acos.cpp b/src/Functions/acos.cpp similarity index 100% rename from dbms/Functions/acos.cpp rename to src/Functions/acos.cpp diff --git a/dbms/Functions/addDays.cpp b/src/Functions/addDays.cpp similarity index 100% rename from dbms/Functions/addDays.cpp rename to src/Functions/addDays.cpp diff --git a/dbms/Functions/addHours.cpp b/src/Functions/addHours.cpp similarity index 100% rename from dbms/Functions/addHours.cpp rename to src/Functions/addHours.cpp diff --git a/dbms/Functions/addMinutes.cpp b/src/Functions/addMinutes.cpp similarity index 100% rename from dbms/Functions/addMinutes.cpp rename to src/Functions/addMinutes.cpp diff --git a/dbms/Functions/addMonths.cpp b/src/Functions/addMonths.cpp similarity index 100% rename from dbms/Functions/addMonths.cpp rename to src/Functions/addMonths.cpp diff --git a/dbms/Functions/addQuarters.cpp b/src/Functions/addQuarters.cpp similarity index 100% rename from dbms/Functions/addQuarters.cpp rename to src/Functions/addQuarters.cpp diff --git a/dbms/Functions/addSeconds.cpp b/src/Functions/addSeconds.cpp similarity index 100% rename from dbms/Functions/addSeconds.cpp rename to src/Functions/addSeconds.cpp diff --git a/dbms/Functions/addWeeks.cpp b/src/Functions/addWeeks.cpp similarity index 100% rename from dbms/Functions/addWeeks.cpp rename to src/Functions/addWeeks.cpp diff --git a/dbms/Functions/addYears.cpp b/src/Functions/addYears.cpp similarity index 100% rename from dbms/Functions/addYears.cpp rename to src/Functions/addYears.cpp diff --git a/dbms/Functions/addressToLine.cpp b/src/Functions/addressToLine.cpp similarity index 100% rename from dbms/Functions/addressToLine.cpp rename to src/Functions/addressToLine.cpp diff --git a/dbms/Functions/addressToSymbol.cpp b/src/Functions/addressToSymbol.cpp similarity index 100% rename from dbms/Functions/addressToSymbol.cpp rename to src/Functions/addressToSymbol.cpp diff --git a/dbms/Functions/appendTrailingCharIfAbsent.cpp b/src/Functions/appendTrailingCharIfAbsent.cpp similarity index 100% rename from dbms/Functions/appendTrailingCharIfAbsent.cpp rename to src/Functions/appendTrailingCharIfAbsent.cpp diff --git a/dbms/Functions/array/CMakeLists.txt b/src/Functions/array/CMakeLists.txt similarity index 100% rename from dbms/Functions/array/CMakeLists.txt rename to src/Functions/array/CMakeLists.txt diff --git a/dbms/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h similarity index 100% rename from dbms/Functions/array/FunctionArrayMapped.h rename to src/Functions/array/FunctionArrayMapped.h diff --git a/dbms/Functions/array/array.cpp b/src/Functions/array/array.cpp similarity index 100% rename from dbms/Functions/array/array.cpp rename to src/Functions/array/array.cpp diff --git a/dbms/Functions/array/arrayAUC.cpp b/src/Functions/array/arrayAUC.cpp similarity index 100% rename from dbms/Functions/array/arrayAUC.cpp rename to src/Functions/array/arrayAUC.cpp diff --git a/dbms/Functions/array/arrayAll.cpp b/src/Functions/array/arrayAll.cpp similarity index 100% rename from dbms/Functions/array/arrayAll.cpp rename to src/Functions/array/arrayAll.cpp diff --git a/dbms/Functions/array/arrayCompact.cpp b/src/Functions/array/arrayCompact.cpp similarity index 100% rename from dbms/Functions/array/arrayCompact.cpp rename to src/Functions/array/arrayCompact.cpp diff --git a/dbms/Functions/array/arrayConcat.cpp b/src/Functions/array/arrayConcat.cpp similarity index 100% rename from dbms/Functions/array/arrayConcat.cpp rename to src/Functions/array/arrayConcat.cpp diff --git a/dbms/Functions/array/arrayCount.cpp b/src/Functions/array/arrayCount.cpp similarity index 100% rename from dbms/Functions/array/arrayCount.cpp rename to src/Functions/array/arrayCount.cpp diff --git a/dbms/Functions/array/arrayCumSum.cpp b/src/Functions/array/arrayCumSum.cpp similarity index 100% rename from dbms/Functions/array/arrayCumSum.cpp rename to src/Functions/array/arrayCumSum.cpp diff --git a/dbms/Functions/array/arrayCumSumNonNegative.cpp b/src/Functions/array/arrayCumSumNonNegative.cpp similarity index 100% rename from dbms/Functions/array/arrayCumSumNonNegative.cpp rename to src/Functions/array/arrayCumSumNonNegative.cpp diff --git a/dbms/Functions/array/arrayDifference.cpp b/src/Functions/array/arrayDifference.cpp similarity index 100% rename from dbms/Functions/array/arrayDifference.cpp rename to src/Functions/array/arrayDifference.cpp diff --git a/dbms/Functions/array/arrayDistinct.cpp b/src/Functions/array/arrayDistinct.cpp similarity index 100% rename from dbms/Functions/array/arrayDistinct.cpp rename to src/Functions/array/arrayDistinct.cpp diff --git a/dbms/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp similarity index 100% rename from dbms/Functions/array/arrayElement.cpp rename to src/Functions/array/arrayElement.cpp diff --git a/dbms/Functions/array/arrayEnumerate.cpp b/src/Functions/array/arrayEnumerate.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerate.cpp rename to src/Functions/array/arrayEnumerate.cpp diff --git a/dbms/Functions/array/arrayEnumerateDense.cpp b/src/Functions/array/arrayEnumerateDense.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerateDense.cpp rename to src/Functions/array/arrayEnumerateDense.cpp diff --git a/dbms/Functions/array/arrayEnumerateDenseRanked.cpp b/src/Functions/array/arrayEnumerateDenseRanked.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerateDenseRanked.cpp rename to src/Functions/array/arrayEnumerateDenseRanked.cpp diff --git a/dbms/Functions/array/arrayEnumerateExtended.h b/src/Functions/array/arrayEnumerateExtended.h similarity index 100% rename from dbms/Functions/array/arrayEnumerateExtended.h rename to src/Functions/array/arrayEnumerateExtended.h diff --git a/dbms/Functions/array/arrayEnumerateRanked.cpp b/src/Functions/array/arrayEnumerateRanked.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerateRanked.cpp rename to src/Functions/array/arrayEnumerateRanked.cpp diff --git a/dbms/Functions/array/arrayEnumerateRanked.h b/src/Functions/array/arrayEnumerateRanked.h similarity index 100% rename from dbms/Functions/array/arrayEnumerateRanked.h rename to src/Functions/array/arrayEnumerateRanked.h diff --git a/dbms/Functions/array/arrayEnumerateUniq.cpp b/src/Functions/array/arrayEnumerateUniq.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerateUniq.cpp rename to src/Functions/array/arrayEnumerateUniq.cpp diff --git a/dbms/Functions/array/arrayEnumerateUniqRanked.cpp b/src/Functions/array/arrayEnumerateUniqRanked.cpp similarity index 100% rename from dbms/Functions/array/arrayEnumerateUniqRanked.cpp rename to src/Functions/array/arrayEnumerateUniqRanked.cpp diff --git a/dbms/Functions/array/arrayExists.cpp b/src/Functions/array/arrayExists.cpp similarity index 100% rename from dbms/Functions/array/arrayExists.cpp rename to src/Functions/array/arrayExists.cpp diff --git a/dbms/Functions/array/arrayFill.cpp b/src/Functions/array/arrayFill.cpp similarity index 100% rename from dbms/Functions/array/arrayFill.cpp rename to src/Functions/array/arrayFill.cpp diff --git a/dbms/Functions/array/arrayFilter.cpp b/src/Functions/array/arrayFilter.cpp similarity index 100% rename from dbms/Functions/array/arrayFilter.cpp rename to src/Functions/array/arrayFilter.cpp diff --git a/dbms/Functions/array/arrayFirst.cpp b/src/Functions/array/arrayFirst.cpp similarity index 100% rename from dbms/Functions/array/arrayFirst.cpp rename to src/Functions/array/arrayFirst.cpp diff --git a/dbms/Functions/array/arrayFirstIndex.cpp b/src/Functions/array/arrayFirstIndex.cpp similarity index 100% rename from dbms/Functions/array/arrayFirstIndex.cpp rename to src/Functions/array/arrayFirstIndex.cpp diff --git a/dbms/Functions/array/arrayFlatten.cpp b/src/Functions/array/arrayFlatten.cpp similarity index 100% rename from dbms/Functions/array/arrayFlatten.cpp rename to src/Functions/array/arrayFlatten.cpp diff --git a/dbms/Functions/array/arrayIndex.h b/src/Functions/array/arrayIndex.h similarity index 100% rename from dbms/Functions/array/arrayIndex.h rename to src/Functions/array/arrayIndex.h diff --git a/dbms/Functions/array/arrayIntersect.cpp b/src/Functions/array/arrayIntersect.cpp similarity index 100% rename from dbms/Functions/array/arrayIntersect.cpp rename to src/Functions/array/arrayIntersect.cpp diff --git a/dbms/Functions/array/arrayJoin.cpp b/src/Functions/array/arrayJoin.cpp similarity index 100% rename from dbms/Functions/array/arrayJoin.cpp rename to src/Functions/array/arrayJoin.cpp diff --git a/dbms/Functions/array/arrayMap.cpp b/src/Functions/array/arrayMap.cpp similarity index 100% rename from dbms/Functions/array/arrayMap.cpp rename to src/Functions/array/arrayMap.cpp diff --git a/dbms/Functions/array/arrayPop.h b/src/Functions/array/arrayPop.h similarity index 100% rename from dbms/Functions/array/arrayPop.h rename to src/Functions/array/arrayPop.h diff --git a/dbms/Functions/array/arrayPopBack.cpp b/src/Functions/array/arrayPopBack.cpp similarity index 100% rename from dbms/Functions/array/arrayPopBack.cpp rename to src/Functions/array/arrayPopBack.cpp diff --git a/dbms/Functions/array/arrayPopFront.cpp b/src/Functions/array/arrayPopFront.cpp similarity index 100% rename from dbms/Functions/array/arrayPopFront.cpp rename to src/Functions/array/arrayPopFront.cpp diff --git a/dbms/Functions/array/arrayPush.h b/src/Functions/array/arrayPush.h similarity index 100% rename from dbms/Functions/array/arrayPush.h rename to src/Functions/array/arrayPush.h diff --git a/dbms/Functions/array/arrayPushBack.cpp b/src/Functions/array/arrayPushBack.cpp similarity index 100% rename from dbms/Functions/array/arrayPushBack.cpp rename to src/Functions/array/arrayPushBack.cpp diff --git a/dbms/Functions/array/arrayPushFront.cpp b/src/Functions/array/arrayPushFront.cpp similarity index 100% rename from dbms/Functions/array/arrayPushFront.cpp rename to src/Functions/array/arrayPushFront.cpp diff --git a/dbms/Functions/array/arrayReduce.cpp b/src/Functions/array/arrayReduce.cpp similarity index 100% rename from dbms/Functions/array/arrayReduce.cpp rename to src/Functions/array/arrayReduce.cpp diff --git a/dbms/Functions/array/arrayReduceInRanges.cpp b/src/Functions/array/arrayReduceInRanges.cpp similarity index 100% rename from dbms/Functions/array/arrayReduceInRanges.cpp rename to src/Functions/array/arrayReduceInRanges.cpp diff --git a/dbms/Functions/array/arrayResize.cpp b/src/Functions/array/arrayResize.cpp similarity index 100% rename from dbms/Functions/array/arrayResize.cpp rename to src/Functions/array/arrayResize.cpp diff --git a/dbms/Functions/array/arrayReverse.cpp b/src/Functions/array/arrayReverse.cpp similarity index 100% rename from dbms/Functions/array/arrayReverse.cpp rename to src/Functions/array/arrayReverse.cpp diff --git a/dbms/Functions/array/arrayScalarProduct.h b/src/Functions/array/arrayScalarProduct.h similarity index 100% rename from dbms/Functions/array/arrayScalarProduct.h rename to src/Functions/array/arrayScalarProduct.h diff --git a/dbms/Functions/array/arraySlice.cpp b/src/Functions/array/arraySlice.cpp similarity index 100% rename from dbms/Functions/array/arraySlice.cpp rename to src/Functions/array/arraySlice.cpp diff --git a/dbms/Functions/array/arraySort.cpp b/src/Functions/array/arraySort.cpp similarity index 100% rename from dbms/Functions/array/arraySort.cpp rename to src/Functions/array/arraySort.cpp diff --git a/dbms/Functions/array/arraySplit.cpp b/src/Functions/array/arraySplit.cpp similarity index 100% rename from dbms/Functions/array/arraySplit.cpp rename to src/Functions/array/arraySplit.cpp diff --git a/dbms/Functions/array/arraySum.cpp b/src/Functions/array/arraySum.cpp similarity index 100% rename from dbms/Functions/array/arraySum.cpp rename to src/Functions/array/arraySum.cpp diff --git a/dbms/Functions/array/arrayUniq.cpp b/src/Functions/array/arrayUniq.cpp similarity index 100% rename from dbms/Functions/array/arrayUniq.cpp rename to src/Functions/array/arrayUniq.cpp diff --git a/dbms/Functions/array/arrayWithConstant.cpp b/src/Functions/array/arrayWithConstant.cpp similarity index 100% rename from dbms/Functions/array/arrayWithConstant.cpp rename to src/Functions/array/arrayWithConstant.cpp diff --git a/dbms/Functions/array/arrayZip.cpp b/src/Functions/array/arrayZip.cpp similarity index 100% rename from dbms/Functions/array/arrayZip.cpp rename to src/Functions/array/arrayZip.cpp diff --git a/dbms/Functions/array/countEqual.cpp b/src/Functions/array/countEqual.cpp similarity index 100% rename from dbms/Functions/array/countEqual.cpp rename to src/Functions/array/countEqual.cpp diff --git a/dbms/Functions/array/emptyArray.cpp b/src/Functions/array/emptyArray.cpp similarity index 100% rename from dbms/Functions/array/emptyArray.cpp rename to src/Functions/array/emptyArray.cpp diff --git a/dbms/Functions/array/emptyArrayToSingle.cpp b/src/Functions/array/emptyArrayToSingle.cpp similarity index 100% rename from dbms/Functions/array/emptyArrayToSingle.cpp rename to src/Functions/array/emptyArrayToSingle.cpp diff --git a/dbms/Functions/array/has.cpp b/src/Functions/array/has.cpp similarity index 100% rename from dbms/Functions/array/has.cpp rename to src/Functions/array/has.cpp diff --git a/dbms/Functions/array/hasAll.cpp b/src/Functions/array/hasAll.cpp similarity index 100% rename from dbms/Functions/array/hasAll.cpp rename to src/Functions/array/hasAll.cpp diff --git a/dbms/Functions/array/hasAllAny.h b/src/Functions/array/hasAllAny.h similarity index 100% rename from dbms/Functions/array/hasAllAny.h rename to src/Functions/array/hasAllAny.h diff --git a/dbms/Functions/array/hasAny.cpp b/src/Functions/array/hasAny.cpp similarity index 100% rename from dbms/Functions/array/hasAny.cpp rename to src/Functions/array/hasAny.cpp diff --git a/dbms/Functions/array/indexOf.cpp b/src/Functions/array/indexOf.cpp similarity index 100% rename from dbms/Functions/array/indexOf.cpp rename to src/Functions/array/indexOf.cpp diff --git a/dbms/Functions/array/length.cpp b/src/Functions/array/length.cpp similarity index 100% rename from dbms/Functions/array/length.cpp rename to src/Functions/array/length.cpp diff --git a/dbms/Functions/array/range.cpp b/src/Functions/array/range.cpp similarity index 100% rename from dbms/Functions/array/range.cpp rename to src/Functions/array/range.cpp diff --git a/dbms/Functions/array/registerFunctionsArray.cpp b/src/Functions/array/registerFunctionsArray.cpp similarity index 100% rename from dbms/Functions/array/registerFunctionsArray.cpp rename to src/Functions/array/registerFunctionsArray.cpp diff --git a/dbms/Functions/asin.cpp b/src/Functions/asin.cpp similarity index 100% rename from dbms/Functions/asin.cpp rename to src/Functions/asin.cpp diff --git a/dbms/Functions/assumeNotNull.cpp b/src/Functions/assumeNotNull.cpp similarity index 100% rename from dbms/Functions/assumeNotNull.cpp rename to src/Functions/assumeNotNull.cpp diff --git a/dbms/Functions/atan.cpp b/src/Functions/atan.cpp similarity index 100% rename from dbms/Functions/atan.cpp rename to src/Functions/atan.cpp diff --git a/dbms/Functions/bar.cpp b/src/Functions/bar.cpp similarity index 100% rename from dbms/Functions/bar.cpp rename to src/Functions/bar.cpp diff --git a/dbms/Functions/base64Decode.cpp b/src/Functions/base64Decode.cpp similarity index 100% rename from dbms/Functions/base64Decode.cpp rename to src/Functions/base64Decode.cpp diff --git a/dbms/Functions/base64Encode.cpp b/src/Functions/base64Encode.cpp similarity index 100% rename from dbms/Functions/base64Encode.cpp rename to src/Functions/base64Encode.cpp diff --git a/dbms/Functions/bitAnd.cpp b/src/Functions/bitAnd.cpp similarity index 100% rename from dbms/Functions/bitAnd.cpp rename to src/Functions/bitAnd.cpp diff --git a/dbms/Functions/bitBoolMaskAnd.cpp b/src/Functions/bitBoolMaskAnd.cpp similarity index 96% rename from dbms/Functions/bitBoolMaskAnd.cpp rename to src/Functions/bitBoolMaskAnd.cpp index 09d2122abde..2c55e39506c 100644 --- a/dbms/Functions/bitBoolMaskAnd.cpp +++ b/src/Functions/bitBoolMaskAnd.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "AND" operation for BoolMasks. /// Returns: "can be true" = A."can be true" AND B."can be true" /// "can be false" = A."can be false" OR B."can be false" diff --git a/dbms/Functions/bitBoolMaskOr.cpp b/src/Functions/bitBoolMaskOr.cpp similarity index 96% rename from dbms/Functions/bitBoolMaskOr.cpp rename to src/Functions/bitBoolMaskOr.cpp index 02882c0bb3f..0b439165fca 100644 --- a/dbms/Functions/bitBoolMaskOr.cpp +++ b/src/Functions/bitBoolMaskOr.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "OR" operation for BoolMasks. /// Returns: "can be true" = A."can be true" OR B."can be true" /// "can be false" = A."can be false" AND B."can be false" diff --git a/dbms/Functions/bitCount.cpp b/src/Functions/bitCount.cpp similarity index 100% rename from dbms/Functions/bitCount.cpp rename to src/Functions/bitCount.cpp diff --git a/dbms/Functions/bitNot.cpp b/src/Functions/bitNot.cpp similarity index 100% rename from dbms/Functions/bitNot.cpp rename to src/Functions/bitNot.cpp diff --git a/dbms/Functions/bitOr.cpp b/src/Functions/bitOr.cpp similarity index 100% rename from dbms/Functions/bitOr.cpp rename to src/Functions/bitOr.cpp diff --git a/dbms/Functions/bitRotateLeft.cpp b/src/Functions/bitRotateLeft.cpp similarity index 100% rename from dbms/Functions/bitRotateLeft.cpp rename to src/Functions/bitRotateLeft.cpp diff --git a/dbms/Functions/bitRotateRight.cpp b/src/Functions/bitRotateRight.cpp similarity index 100% rename from dbms/Functions/bitRotateRight.cpp rename to src/Functions/bitRotateRight.cpp diff --git a/dbms/Functions/bitShiftLeft.cpp b/src/Functions/bitShiftLeft.cpp similarity index 100% rename from dbms/Functions/bitShiftLeft.cpp rename to src/Functions/bitShiftLeft.cpp diff --git a/dbms/Functions/bitShiftRight.cpp b/src/Functions/bitShiftRight.cpp similarity index 100% rename from dbms/Functions/bitShiftRight.cpp rename to src/Functions/bitShiftRight.cpp diff --git a/dbms/Functions/bitSwapLastTwo.cpp b/src/Functions/bitSwapLastTwo.cpp similarity index 97% rename from dbms/Functions/bitSwapLastTwo.cpp rename to src/Functions/bitSwapLastTwo.cpp index 5356d98d791..d6fa9a39ec3 100644 --- a/dbms/Functions/bitSwapLastTwo.cpp +++ b/src/Functions/bitSwapLastTwo.cpp @@ -10,7 +10,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function provides "NOT" operation for BoolMasks by swapping last two bits ("can be true" <-> "can be false"). template struct BitSwapLastTwoImpl diff --git a/dbms/Functions/bitTest.cpp b/src/Functions/bitTest.cpp similarity index 100% rename from dbms/Functions/bitTest.cpp rename to src/Functions/bitTest.cpp diff --git a/dbms/Functions/bitTestAll.cpp b/src/Functions/bitTestAll.cpp similarity index 100% rename from dbms/Functions/bitTestAll.cpp rename to src/Functions/bitTestAll.cpp diff --git a/dbms/Functions/bitTestAny.cpp b/src/Functions/bitTestAny.cpp similarity index 100% rename from dbms/Functions/bitTestAny.cpp rename to src/Functions/bitTestAny.cpp diff --git a/dbms/Functions/bitWrapperFunc.cpp b/src/Functions/bitWrapperFunc.cpp similarity index 96% rename from dbms/Functions/bitWrapperFunc.cpp rename to src/Functions/bitWrapperFunc.cpp index 447f8a4f62b..9f7276fbf98 100644 --- a/dbms/Functions/bitWrapperFunc.cpp +++ b/src/Functions/bitWrapperFunc.cpp @@ -9,7 +9,7 @@ namespace DB extern const int BAD_CAST; } - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). /// This function wraps bool atomic functions /// and transforms their boolean return value to the BoolMask ("can be false" and "can be true" bits). template diff --git a/dbms/Functions/bitXor.cpp b/src/Functions/bitXor.cpp similarity index 100% rename from dbms/Functions/bitXor.cpp rename to src/Functions/bitXor.cpp diff --git a/dbms/Functions/blockNumber.cpp b/src/Functions/blockNumber.cpp similarity index 100% rename from dbms/Functions/blockNumber.cpp rename to src/Functions/blockNumber.cpp diff --git a/dbms/Functions/blockSerializedSize.cpp b/src/Functions/blockSerializedSize.cpp similarity index 100% rename from dbms/Functions/blockSerializedSize.cpp rename to src/Functions/blockSerializedSize.cpp diff --git a/dbms/Functions/blockSize.cpp b/src/Functions/blockSize.cpp similarity index 100% rename from dbms/Functions/blockSize.cpp rename to src/Functions/blockSize.cpp diff --git a/dbms/Functions/caseWithExpression.cpp b/src/Functions/caseWithExpression.cpp similarity index 100% rename from dbms/Functions/caseWithExpression.cpp rename to src/Functions/caseWithExpression.cpp diff --git a/dbms/Functions/castTypeToEither.h b/src/Functions/castTypeToEither.h similarity index 100% rename from dbms/Functions/castTypeToEither.h rename to src/Functions/castTypeToEither.h diff --git a/dbms/Functions/cbrt.cpp b/src/Functions/cbrt.cpp similarity index 100% rename from dbms/Functions/cbrt.cpp rename to src/Functions/cbrt.cpp diff --git a/dbms/Functions/coalesce.cpp b/src/Functions/coalesce.cpp similarity index 100% rename from dbms/Functions/coalesce.cpp rename to src/Functions/coalesce.cpp diff --git a/dbms/Functions/concat.cpp b/src/Functions/concat.cpp similarity index 100% rename from dbms/Functions/concat.cpp rename to src/Functions/concat.cpp diff --git a/dbms/Functions/config_functions.h.in b/src/Functions/config_functions.h.in similarity index 100% rename from dbms/Functions/config_functions.h.in rename to src/Functions/config_functions.h.in diff --git a/dbms/Functions/convertCharset.cpp b/src/Functions/convertCharset.cpp similarity index 100% rename from dbms/Functions/convertCharset.cpp rename to src/Functions/convertCharset.cpp diff --git a/dbms/Functions/cos.cpp b/src/Functions/cos.cpp similarity index 100% rename from dbms/Functions/cos.cpp rename to src/Functions/cos.cpp diff --git a/dbms/Functions/currentDatabase.cpp b/src/Functions/currentDatabase.cpp similarity index 100% rename from dbms/Functions/currentDatabase.cpp rename to src/Functions/currentDatabase.cpp diff --git a/dbms/Functions/currentQuota.cpp b/src/Functions/currentQuota.cpp similarity index 100% rename from dbms/Functions/currentQuota.cpp rename to src/Functions/currentQuota.cpp diff --git a/dbms/Functions/currentRowPolicies.cpp b/src/Functions/currentRowPolicies.cpp similarity index 100% rename from dbms/Functions/currentRowPolicies.cpp rename to src/Functions/currentRowPolicies.cpp diff --git a/dbms/Functions/currentUser.cpp b/src/Functions/currentUser.cpp similarity index 100% rename from dbms/Functions/currentUser.cpp rename to src/Functions/currentUser.cpp diff --git a/dbms/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp similarity index 100% rename from dbms/Functions/dateDiff.cpp rename to src/Functions/dateDiff.cpp diff --git a/dbms/Functions/defaultValueOfArgumentType.cpp b/src/Functions/defaultValueOfArgumentType.cpp similarity index 100% rename from dbms/Functions/defaultValueOfArgumentType.cpp rename to src/Functions/defaultValueOfArgumentType.cpp diff --git a/dbms/Functions/demange.cpp b/src/Functions/demange.cpp similarity index 100% rename from dbms/Functions/demange.cpp rename to src/Functions/demange.cpp diff --git a/dbms/Functions/divide.cpp b/src/Functions/divide.cpp similarity index 100% rename from dbms/Functions/divide.cpp rename to src/Functions/divide.cpp diff --git a/dbms/Functions/dumpColumnStructure.cpp b/src/Functions/dumpColumnStructure.cpp similarity index 100% rename from dbms/Functions/dumpColumnStructure.cpp rename to src/Functions/dumpColumnStructure.cpp diff --git a/dbms/Functions/e.cpp b/src/Functions/e.cpp similarity index 100% rename from dbms/Functions/e.cpp rename to src/Functions/e.cpp diff --git a/dbms/Functions/empty.cpp b/src/Functions/empty.cpp similarity index 100% rename from dbms/Functions/empty.cpp rename to src/Functions/empty.cpp diff --git a/dbms/Functions/endsWith.cpp b/src/Functions/endsWith.cpp similarity index 100% rename from dbms/Functions/endsWith.cpp rename to src/Functions/endsWith.cpp diff --git a/dbms/Functions/equals.cpp b/src/Functions/equals.cpp similarity index 100% rename from dbms/Functions/equals.cpp rename to src/Functions/equals.cpp diff --git a/dbms/Functions/erf.cpp b/src/Functions/erf.cpp similarity index 100% rename from dbms/Functions/erf.cpp rename to src/Functions/erf.cpp diff --git a/dbms/Functions/erfc.cpp b/src/Functions/erfc.cpp similarity index 100% rename from dbms/Functions/erfc.cpp rename to src/Functions/erfc.cpp diff --git a/dbms/Functions/evalMLMethod.cpp b/src/Functions/evalMLMethod.cpp similarity index 100% rename from dbms/Functions/evalMLMethod.cpp rename to src/Functions/evalMLMethod.cpp diff --git a/dbms/Functions/exp.cpp b/src/Functions/exp.cpp similarity index 100% rename from dbms/Functions/exp.cpp rename to src/Functions/exp.cpp diff --git a/dbms/Functions/exp10.cpp b/src/Functions/exp10.cpp similarity index 100% rename from dbms/Functions/exp10.cpp rename to src/Functions/exp10.cpp diff --git a/dbms/Functions/exp2.cpp b/src/Functions/exp2.cpp similarity index 100% rename from dbms/Functions/exp2.cpp rename to src/Functions/exp2.cpp diff --git a/dbms/Functions/extractTimeZoneFromFunctionArguments.cpp b/src/Functions/extractTimeZoneFromFunctionArguments.cpp similarity index 100% rename from dbms/Functions/extractTimeZoneFromFunctionArguments.cpp rename to src/Functions/extractTimeZoneFromFunctionArguments.cpp diff --git a/dbms/Functions/extractTimeZoneFromFunctionArguments.h b/src/Functions/extractTimeZoneFromFunctionArguments.h similarity index 100% rename from dbms/Functions/extractTimeZoneFromFunctionArguments.h rename to src/Functions/extractTimeZoneFromFunctionArguments.h diff --git a/dbms/Functions/filesystem.cpp b/src/Functions/filesystem.cpp similarity index 100% rename from dbms/Functions/filesystem.cpp rename to src/Functions/filesystem.cpp diff --git a/dbms/Functions/finalizeAggregation.cpp b/src/Functions/finalizeAggregation.cpp similarity index 100% rename from dbms/Functions/finalizeAggregation.cpp rename to src/Functions/finalizeAggregation.cpp diff --git a/dbms/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp similarity index 100% rename from dbms/Functions/formatDateTime.cpp rename to src/Functions/formatDateTime.cpp diff --git a/dbms/Functions/formatString.cpp b/src/Functions/formatString.cpp similarity index 100% rename from dbms/Functions/formatString.cpp rename to src/Functions/formatString.cpp diff --git a/dbms/Functions/formatString.h b/src/Functions/formatString.h similarity index 100% rename from dbms/Functions/formatString.h rename to src/Functions/formatString.h diff --git a/dbms/Functions/gcd.cpp b/src/Functions/gcd.cpp similarity index 100% rename from dbms/Functions/gcd.cpp rename to src/Functions/gcd.cpp diff --git a/dbms/Functions/generateUUIDv4.cpp b/src/Functions/generateUUIDv4.cpp similarity index 100% rename from dbms/Functions/generateUUIDv4.cpp rename to src/Functions/generateUUIDv4.cpp diff --git a/dbms/Functions/geoToH3.cpp b/src/Functions/geoToH3.cpp similarity index 100% rename from dbms/Functions/geoToH3.cpp rename to src/Functions/geoToH3.cpp diff --git a/dbms/Functions/geohashDecode.cpp b/src/Functions/geohashDecode.cpp similarity index 100% rename from dbms/Functions/geohashDecode.cpp rename to src/Functions/geohashDecode.cpp diff --git a/dbms/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp similarity index 100% rename from dbms/Functions/geohashEncode.cpp rename to src/Functions/geohashEncode.cpp diff --git a/dbms/Functions/geohashesInBox.cpp b/src/Functions/geohashesInBox.cpp similarity index 100% rename from dbms/Functions/geohashesInBox.cpp rename to src/Functions/geohashesInBox.cpp diff --git a/dbms/Functions/getMacro.cpp b/src/Functions/getMacro.cpp similarity index 100% rename from dbms/Functions/getMacro.cpp rename to src/Functions/getMacro.cpp diff --git a/dbms/Functions/getScalar.cpp b/src/Functions/getScalar.cpp similarity index 100% rename from dbms/Functions/getScalar.cpp rename to src/Functions/getScalar.cpp diff --git a/dbms/Functions/getSizeOfEnumType.cpp b/src/Functions/getSizeOfEnumType.cpp similarity index 100% rename from dbms/Functions/getSizeOfEnumType.cpp rename to src/Functions/getSizeOfEnumType.cpp diff --git a/dbms/Functions/greatCircleDistance.cpp b/src/Functions/greatCircleDistance.cpp similarity index 100% rename from dbms/Functions/greatCircleDistance.cpp rename to src/Functions/greatCircleDistance.cpp diff --git a/dbms/Functions/greater.cpp b/src/Functions/greater.cpp similarity index 100% rename from dbms/Functions/greater.cpp rename to src/Functions/greater.cpp diff --git a/dbms/Functions/greaterOrEquals.cpp b/src/Functions/greaterOrEquals.cpp similarity index 100% rename from dbms/Functions/greaterOrEquals.cpp rename to src/Functions/greaterOrEquals.cpp diff --git a/dbms/Functions/greatest.cpp b/src/Functions/greatest.cpp similarity index 100% rename from dbms/Functions/greatest.cpp rename to src/Functions/greatest.cpp diff --git a/dbms/Functions/h3EdgeAngle.cpp b/src/Functions/h3EdgeAngle.cpp similarity index 100% rename from dbms/Functions/h3EdgeAngle.cpp rename to src/Functions/h3EdgeAngle.cpp diff --git a/dbms/Functions/h3EdgeLengthM.cpp b/src/Functions/h3EdgeLengthM.cpp similarity index 100% rename from dbms/Functions/h3EdgeLengthM.cpp rename to src/Functions/h3EdgeLengthM.cpp diff --git a/dbms/Functions/h3GetBaseCell.cpp b/src/Functions/h3GetBaseCell.cpp similarity index 100% rename from dbms/Functions/h3GetBaseCell.cpp rename to src/Functions/h3GetBaseCell.cpp diff --git a/dbms/Functions/h3GetResolution.cpp b/src/Functions/h3GetResolution.cpp similarity index 100% rename from dbms/Functions/h3GetResolution.cpp rename to src/Functions/h3GetResolution.cpp diff --git a/dbms/Functions/h3HexAreaM2.cpp b/src/Functions/h3HexAreaM2.cpp similarity index 100% rename from dbms/Functions/h3HexAreaM2.cpp rename to src/Functions/h3HexAreaM2.cpp diff --git a/dbms/Functions/h3IndexesAreNeighbors.cpp b/src/Functions/h3IndexesAreNeighbors.cpp similarity index 100% rename from dbms/Functions/h3IndexesAreNeighbors.cpp rename to src/Functions/h3IndexesAreNeighbors.cpp diff --git a/dbms/Functions/h3IsValid.cpp b/src/Functions/h3IsValid.cpp similarity index 100% rename from dbms/Functions/h3IsValid.cpp rename to src/Functions/h3IsValid.cpp diff --git a/dbms/Functions/h3ToChildren.cpp b/src/Functions/h3ToChildren.cpp similarity index 100% rename from dbms/Functions/h3ToChildren.cpp rename to src/Functions/h3ToChildren.cpp diff --git a/dbms/Functions/h3ToParent.cpp b/src/Functions/h3ToParent.cpp similarity index 100% rename from dbms/Functions/h3ToParent.cpp rename to src/Functions/h3ToParent.cpp diff --git a/dbms/Functions/h3ToString.cpp b/src/Functions/h3ToString.cpp similarity index 100% rename from dbms/Functions/h3ToString.cpp rename to src/Functions/h3ToString.cpp diff --git a/dbms/Functions/h3kRing.cpp b/src/Functions/h3kRing.cpp similarity index 100% rename from dbms/Functions/h3kRing.cpp rename to src/Functions/h3kRing.cpp diff --git a/dbms/Functions/hasColumnInTable.cpp b/src/Functions/hasColumnInTable.cpp similarity index 100% rename from dbms/Functions/hasColumnInTable.cpp rename to src/Functions/hasColumnInTable.cpp diff --git a/dbms/Functions/hasToken.cpp b/src/Functions/hasToken.cpp similarity index 100% rename from dbms/Functions/hasToken.cpp rename to src/Functions/hasToken.cpp diff --git a/dbms/Functions/hasTokenCaseInsensitive.cpp b/src/Functions/hasTokenCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/hasTokenCaseInsensitive.cpp rename to src/Functions/hasTokenCaseInsensitive.cpp diff --git a/dbms/Functions/hostName.cpp b/src/Functions/hostName.cpp similarity index 100% rename from dbms/Functions/hostName.cpp rename to src/Functions/hostName.cpp diff --git a/dbms/Functions/identity.cpp b/src/Functions/identity.cpp similarity index 100% rename from dbms/Functions/identity.cpp rename to src/Functions/identity.cpp diff --git a/dbms/Functions/if.cpp b/src/Functions/if.cpp similarity index 100% rename from dbms/Functions/if.cpp rename to src/Functions/if.cpp diff --git a/dbms/Functions/ifNotFinite.cpp b/src/Functions/ifNotFinite.cpp similarity index 100% rename from dbms/Functions/ifNotFinite.cpp rename to src/Functions/ifNotFinite.cpp diff --git a/dbms/Functions/ifNull.cpp b/src/Functions/ifNull.cpp similarity index 100% rename from dbms/Functions/ifNull.cpp rename to src/Functions/ifNull.cpp diff --git a/dbms/Functions/ignore.cpp b/src/Functions/ignore.cpp similarity index 100% rename from dbms/Functions/ignore.cpp rename to src/Functions/ignore.cpp diff --git a/dbms/Functions/ignoreExceptNull.cpp b/src/Functions/ignoreExceptNull.cpp similarity index 100% rename from dbms/Functions/ignoreExceptNull.cpp rename to src/Functions/ignoreExceptNull.cpp diff --git a/dbms/Functions/in.cpp b/src/Functions/in.cpp similarity index 100% rename from dbms/Functions/in.cpp rename to src/Functions/in.cpp diff --git a/dbms/Functions/intDiv.cpp b/src/Functions/intDiv.cpp similarity index 100% rename from dbms/Functions/intDiv.cpp rename to src/Functions/intDiv.cpp diff --git a/dbms/Functions/intDivOrZero.cpp b/src/Functions/intDivOrZero.cpp similarity index 100% rename from dbms/Functions/intDivOrZero.cpp rename to src/Functions/intDivOrZero.cpp diff --git a/dbms/Functions/intExp10.cpp b/src/Functions/intExp10.cpp similarity index 100% rename from dbms/Functions/intExp10.cpp rename to src/Functions/intExp10.cpp diff --git a/dbms/Functions/intExp2.cpp b/src/Functions/intExp2.cpp similarity index 100% rename from dbms/Functions/intExp2.cpp rename to src/Functions/intExp2.cpp diff --git a/dbms/Functions/isFinite.cpp b/src/Functions/isFinite.cpp similarity index 100% rename from dbms/Functions/isFinite.cpp rename to src/Functions/isFinite.cpp diff --git a/dbms/Functions/isInfinite.cpp b/src/Functions/isInfinite.cpp similarity index 100% rename from dbms/Functions/isInfinite.cpp rename to src/Functions/isInfinite.cpp diff --git a/dbms/Functions/isNaN.cpp b/src/Functions/isNaN.cpp similarity index 100% rename from dbms/Functions/isNaN.cpp rename to src/Functions/isNaN.cpp diff --git a/dbms/Functions/isNotNull.cpp b/src/Functions/isNotNull.cpp similarity index 100% rename from dbms/Functions/isNotNull.cpp rename to src/Functions/isNotNull.cpp diff --git a/dbms/Functions/isNull.cpp b/src/Functions/isNull.cpp similarity index 100% rename from dbms/Functions/isNull.cpp rename to src/Functions/isNull.cpp diff --git a/dbms/Functions/isValidUTF8.cpp b/src/Functions/isValidUTF8.cpp similarity index 100% rename from dbms/Functions/isValidUTF8.cpp rename to src/Functions/isValidUTF8.cpp diff --git a/dbms/Functions/jumpConsistentHash.cpp b/src/Functions/jumpConsistentHash.cpp similarity index 100% rename from dbms/Functions/jumpConsistentHash.cpp rename to src/Functions/jumpConsistentHash.cpp diff --git a/dbms/Functions/lcm.cpp b/src/Functions/lcm.cpp similarity index 100% rename from dbms/Functions/lcm.cpp rename to src/Functions/lcm.cpp diff --git a/dbms/Functions/least.cpp b/src/Functions/least.cpp similarity index 100% rename from dbms/Functions/least.cpp rename to src/Functions/least.cpp diff --git a/dbms/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp similarity index 100% rename from dbms/Functions/lengthUTF8.cpp rename to src/Functions/lengthUTF8.cpp diff --git a/dbms/Functions/less.cpp b/src/Functions/less.cpp similarity index 100% rename from dbms/Functions/less.cpp rename to src/Functions/less.cpp diff --git a/dbms/Functions/lessOrEquals.cpp b/src/Functions/lessOrEquals.cpp similarity index 100% rename from dbms/Functions/lessOrEquals.cpp rename to src/Functions/lessOrEquals.cpp diff --git a/dbms/Functions/lgamma.cpp b/src/Functions/lgamma.cpp similarity index 100% rename from dbms/Functions/lgamma.cpp rename to src/Functions/lgamma.cpp diff --git a/dbms/Functions/likePatternToRegexp.h b/src/Functions/likePatternToRegexp.h similarity index 100% rename from dbms/Functions/likePatternToRegexp.h rename to src/Functions/likePatternToRegexp.h diff --git a/dbms/Functions/log.cpp b/src/Functions/log.cpp similarity index 100% rename from dbms/Functions/log.cpp rename to src/Functions/log.cpp diff --git a/dbms/Functions/log10.cpp b/src/Functions/log10.cpp similarity index 100% rename from dbms/Functions/log10.cpp rename to src/Functions/log10.cpp diff --git a/dbms/Functions/log2.cpp b/src/Functions/log2.cpp similarity index 100% rename from dbms/Functions/log2.cpp rename to src/Functions/log2.cpp diff --git a/dbms/Functions/lowCardinalityIndices.cpp b/src/Functions/lowCardinalityIndices.cpp similarity index 100% rename from dbms/Functions/lowCardinalityIndices.cpp rename to src/Functions/lowCardinalityIndices.cpp diff --git a/dbms/Functions/lowCardinalityKeys.cpp b/src/Functions/lowCardinalityKeys.cpp similarity index 100% rename from dbms/Functions/lowCardinalityKeys.cpp rename to src/Functions/lowCardinalityKeys.cpp diff --git a/dbms/Functions/lower.cpp b/src/Functions/lower.cpp similarity index 100% rename from dbms/Functions/lower.cpp rename to src/Functions/lower.cpp diff --git a/dbms/Functions/lowerUTF8.cpp b/src/Functions/lowerUTF8.cpp similarity index 100% rename from dbms/Functions/lowerUTF8.cpp rename to src/Functions/lowerUTF8.cpp diff --git a/dbms/Functions/materialize.cpp b/src/Functions/materialize.cpp similarity index 100% rename from dbms/Functions/materialize.cpp rename to src/Functions/materialize.cpp diff --git a/dbms/Functions/minus.cpp b/src/Functions/minus.cpp similarity index 100% rename from dbms/Functions/minus.cpp rename to src/Functions/minus.cpp diff --git a/dbms/Functions/modulo.cpp b/src/Functions/modulo.cpp similarity index 100% rename from dbms/Functions/modulo.cpp rename to src/Functions/modulo.cpp diff --git a/dbms/Functions/moduloOrZero.cpp b/src/Functions/moduloOrZero.cpp similarity index 100% rename from dbms/Functions/moduloOrZero.cpp rename to src/Functions/moduloOrZero.cpp diff --git a/dbms/Functions/multiIf.cpp b/src/Functions/multiIf.cpp similarity index 100% rename from dbms/Functions/multiIf.cpp rename to src/Functions/multiIf.cpp diff --git a/dbms/Functions/multiSearchAllPositions.cpp b/src/Functions/multiSearchAllPositions.cpp similarity index 100% rename from dbms/Functions/multiSearchAllPositions.cpp rename to src/Functions/multiSearchAllPositions.cpp diff --git a/dbms/Functions/multiSearchAllPositionsCaseInsensitive.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/multiSearchAllPositionsCaseInsensitive.cpp rename to src/Functions/multiSearchAllPositionsCaseInsensitive.cpp diff --git a/dbms/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp diff --git a/dbms/Functions/multiSearchAllPositionsUTF8.cpp b/src/Functions/multiSearchAllPositionsUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchAllPositionsUTF8.cpp rename to src/Functions/multiSearchAllPositionsUTF8.cpp diff --git a/dbms/Functions/multiSearchAny.cpp b/src/Functions/multiSearchAny.cpp similarity index 100% rename from dbms/Functions/multiSearchAny.cpp rename to src/Functions/multiSearchAny.cpp diff --git a/dbms/Functions/multiSearchAnyCaseInsensitive.cpp b/src/Functions/multiSearchAnyCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/multiSearchAnyCaseInsensitive.cpp rename to src/Functions/multiSearchAnyCaseInsensitive.cpp diff --git a/dbms/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchAnyCaseInsensitiveUTF8.cpp diff --git a/dbms/Functions/multiSearchAnyUTF8.cpp b/src/Functions/multiSearchAnyUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchAnyUTF8.cpp rename to src/Functions/multiSearchAnyUTF8.cpp diff --git a/dbms/Functions/multiSearchFirstIndex.cpp b/src/Functions/multiSearchFirstIndex.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstIndex.cpp rename to src/Functions/multiSearchFirstIndex.cpp diff --git a/dbms/Functions/multiSearchFirstIndexCaseInsensitive.cpp b/src/Functions/multiSearchFirstIndexCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstIndexCaseInsensitive.cpp rename to src/Functions/multiSearchFirstIndexCaseInsensitive.cpp diff --git a/dbms/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchFirstIndexCaseInsensitiveUTF8.cpp diff --git a/dbms/Functions/multiSearchFirstIndexUTF8.cpp b/src/Functions/multiSearchFirstIndexUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstIndexUTF8.cpp rename to src/Functions/multiSearchFirstIndexUTF8.cpp diff --git a/dbms/Functions/multiSearchFirstPosition.cpp b/src/Functions/multiSearchFirstPosition.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstPosition.cpp rename to src/Functions/multiSearchFirstPosition.cpp diff --git a/dbms/Functions/multiSearchFirstPositionCaseInsensitive.cpp b/src/Functions/multiSearchFirstPositionCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstPositionCaseInsensitive.cpp rename to src/Functions/multiSearchFirstPositionCaseInsensitive.cpp diff --git a/dbms/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp rename to src/Functions/multiSearchFirstPositionCaseInsensitiveUTF8.cpp diff --git a/dbms/Functions/multiSearchFirstPositionUTF8.cpp b/src/Functions/multiSearchFirstPositionUTF8.cpp similarity index 100% rename from dbms/Functions/multiSearchFirstPositionUTF8.cpp rename to src/Functions/multiSearchFirstPositionUTF8.cpp diff --git a/dbms/Functions/multiply.cpp b/src/Functions/multiply.cpp similarity index 100% rename from dbms/Functions/multiply.cpp rename to src/Functions/multiply.cpp diff --git a/dbms/Functions/negate.cpp b/src/Functions/negate.cpp similarity index 100% rename from dbms/Functions/negate.cpp rename to src/Functions/negate.cpp diff --git a/dbms/Functions/neighbor.cpp b/src/Functions/neighbor.cpp similarity index 100% rename from dbms/Functions/neighbor.cpp rename to src/Functions/neighbor.cpp diff --git a/dbms/Functions/notEmpty.cpp b/src/Functions/notEmpty.cpp similarity index 100% rename from dbms/Functions/notEmpty.cpp rename to src/Functions/notEmpty.cpp diff --git a/dbms/Functions/notEquals.cpp b/src/Functions/notEquals.cpp similarity index 100% rename from dbms/Functions/notEquals.cpp rename to src/Functions/notEquals.cpp diff --git a/dbms/Functions/now.cpp b/src/Functions/now.cpp similarity index 100% rename from dbms/Functions/now.cpp rename to src/Functions/now.cpp diff --git a/dbms/Functions/now64.cpp b/src/Functions/now64.cpp similarity index 100% rename from dbms/Functions/now64.cpp rename to src/Functions/now64.cpp diff --git a/dbms/Functions/nullIf.cpp b/src/Functions/nullIf.cpp similarity index 100% rename from dbms/Functions/nullIf.cpp rename to src/Functions/nullIf.cpp diff --git a/dbms/Functions/pi.cpp b/src/Functions/pi.cpp similarity index 100% rename from dbms/Functions/pi.cpp rename to src/Functions/pi.cpp diff --git a/dbms/Functions/plus.cpp b/src/Functions/plus.cpp similarity index 100% rename from dbms/Functions/plus.cpp rename to src/Functions/plus.cpp diff --git a/dbms/Functions/pointInEllipses.cpp b/src/Functions/pointInEllipses.cpp similarity index 100% rename from dbms/Functions/pointInEllipses.cpp rename to src/Functions/pointInEllipses.cpp diff --git a/dbms/Functions/pointInPolygon.cpp b/src/Functions/pointInPolygon.cpp similarity index 100% rename from dbms/Functions/pointInPolygon.cpp rename to src/Functions/pointInPolygon.cpp diff --git a/dbms/Functions/position.cpp b/src/Functions/position.cpp similarity index 100% rename from dbms/Functions/position.cpp rename to src/Functions/position.cpp diff --git a/dbms/Functions/positionCaseInsensitive.cpp b/src/Functions/positionCaseInsensitive.cpp similarity index 100% rename from dbms/Functions/positionCaseInsensitive.cpp rename to src/Functions/positionCaseInsensitive.cpp diff --git a/dbms/Functions/positionCaseInsensitiveUTF8.cpp b/src/Functions/positionCaseInsensitiveUTF8.cpp similarity index 100% rename from dbms/Functions/positionCaseInsensitiveUTF8.cpp rename to src/Functions/positionCaseInsensitiveUTF8.cpp diff --git a/dbms/Functions/positionUTF8.cpp b/src/Functions/positionUTF8.cpp similarity index 100% rename from dbms/Functions/positionUTF8.cpp rename to src/Functions/positionUTF8.cpp diff --git a/dbms/Functions/pow.cpp b/src/Functions/pow.cpp similarity index 100% rename from dbms/Functions/pow.cpp rename to src/Functions/pow.cpp diff --git a/dbms/Functions/rand.cpp b/src/Functions/rand.cpp similarity index 100% rename from dbms/Functions/rand.cpp rename to src/Functions/rand.cpp diff --git a/dbms/Functions/rand64.cpp b/src/Functions/rand64.cpp similarity index 100% rename from dbms/Functions/rand64.cpp rename to src/Functions/rand64.cpp diff --git a/dbms/Functions/randConstant.cpp b/src/Functions/randConstant.cpp similarity index 100% rename from dbms/Functions/randConstant.cpp rename to src/Functions/randConstant.cpp diff --git a/dbms/Functions/randomPrintableASCII.cpp b/src/Functions/randomPrintableASCII.cpp similarity index 100% rename from dbms/Functions/randomPrintableASCII.cpp rename to src/Functions/randomPrintableASCII.cpp diff --git a/dbms/Functions/regexpQuoteMeta.cpp b/src/Functions/regexpQuoteMeta.cpp similarity index 100% rename from dbms/Functions/regexpQuoteMeta.cpp rename to src/Functions/regexpQuoteMeta.cpp diff --git a/dbms/Functions/registerFunctions.cpp b/src/Functions/registerFunctions.cpp similarity index 100% rename from dbms/Functions/registerFunctions.cpp rename to src/Functions/registerFunctions.cpp diff --git a/dbms/Functions/registerFunctions.h b/src/Functions/registerFunctions.h similarity index 100% rename from dbms/Functions/registerFunctions.h rename to src/Functions/registerFunctions.h diff --git a/dbms/Functions/registerFunctionsArithmetic.cpp b/src/Functions/registerFunctionsArithmetic.cpp similarity index 100% rename from dbms/Functions/registerFunctionsArithmetic.cpp rename to src/Functions/registerFunctionsArithmetic.cpp diff --git a/dbms/Functions/registerFunctionsComparison.cpp b/src/Functions/registerFunctionsComparison.cpp similarity index 100% rename from dbms/Functions/registerFunctionsComparison.cpp rename to src/Functions/registerFunctionsComparison.cpp diff --git a/dbms/Functions/registerFunctionsConditional.cpp b/src/Functions/registerFunctionsConditional.cpp similarity index 100% rename from dbms/Functions/registerFunctionsConditional.cpp rename to src/Functions/registerFunctionsConditional.cpp diff --git a/dbms/Functions/registerFunctionsConsistentHashing.cpp b/src/Functions/registerFunctionsConsistentHashing.cpp similarity index 100% rename from dbms/Functions/registerFunctionsConsistentHashing.cpp rename to src/Functions/registerFunctionsConsistentHashing.cpp diff --git a/dbms/Functions/registerFunctionsDateTime.cpp b/src/Functions/registerFunctionsDateTime.cpp similarity index 100% rename from dbms/Functions/registerFunctionsDateTime.cpp rename to src/Functions/registerFunctionsDateTime.cpp diff --git a/dbms/Functions/registerFunctionsGeo.cpp b/src/Functions/registerFunctionsGeo.cpp similarity index 100% rename from dbms/Functions/registerFunctionsGeo.cpp rename to src/Functions/registerFunctionsGeo.cpp diff --git a/dbms/Functions/registerFunctionsHigherOrder.cpp b/src/Functions/registerFunctionsHigherOrder.cpp similarity index 100% rename from dbms/Functions/registerFunctionsHigherOrder.cpp rename to src/Functions/registerFunctionsHigherOrder.cpp diff --git a/dbms/Functions/registerFunctionsIntrospection.cpp b/src/Functions/registerFunctionsIntrospection.cpp similarity index 100% rename from dbms/Functions/registerFunctionsIntrospection.cpp rename to src/Functions/registerFunctionsIntrospection.cpp diff --git a/dbms/Functions/registerFunctionsMath.cpp b/src/Functions/registerFunctionsMath.cpp similarity index 100% rename from dbms/Functions/registerFunctionsMath.cpp rename to src/Functions/registerFunctionsMath.cpp diff --git a/dbms/Functions/registerFunctionsMiscellaneous.cpp b/src/Functions/registerFunctionsMiscellaneous.cpp similarity index 100% rename from dbms/Functions/registerFunctionsMiscellaneous.cpp rename to src/Functions/registerFunctionsMiscellaneous.cpp diff --git a/dbms/Functions/registerFunctionsNull.cpp b/src/Functions/registerFunctionsNull.cpp similarity index 100% rename from dbms/Functions/registerFunctionsNull.cpp rename to src/Functions/registerFunctionsNull.cpp diff --git a/dbms/Functions/registerFunctionsRandom.cpp b/src/Functions/registerFunctionsRandom.cpp similarity index 100% rename from dbms/Functions/registerFunctionsRandom.cpp rename to src/Functions/registerFunctionsRandom.cpp diff --git a/dbms/Functions/registerFunctionsReinterpret.cpp b/src/Functions/registerFunctionsReinterpret.cpp similarity index 100% rename from dbms/Functions/registerFunctionsReinterpret.cpp rename to src/Functions/registerFunctionsReinterpret.cpp diff --git a/dbms/Functions/registerFunctionsString.cpp b/src/Functions/registerFunctionsString.cpp similarity index 100% rename from dbms/Functions/registerFunctionsString.cpp rename to src/Functions/registerFunctionsString.cpp diff --git a/dbms/Functions/registerFunctionsStringSearch.cpp b/src/Functions/registerFunctionsStringSearch.cpp similarity index 100% rename from dbms/Functions/registerFunctionsStringSearch.cpp rename to src/Functions/registerFunctionsStringSearch.cpp diff --git a/dbms/Functions/registerFunctionsTuple.cpp b/src/Functions/registerFunctionsTuple.cpp similarity index 100% rename from dbms/Functions/registerFunctionsTuple.cpp rename to src/Functions/registerFunctionsTuple.cpp diff --git a/dbms/Functions/registerFunctionsVisitParam.cpp b/src/Functions/registerFunctionsVisitParam.cpp similarity index 100% rename from dbms/Functions/registerFunctionsVisitParam.cpp rename to src/Functions/registerFunctionsVisitParam.cpp diff --git a/dbms/Functions/reinterpretAsFixedString.cpp b/src/Functions/reinterpretAsFixedString.cpp similarity index 100% rename from dbms/Functions/reinterpretAsFixedString.cpp rename to src/Functions/reinterpretAsFixedString.cpp diff --git a/dbms/Functions/reinterpretAsString.cpp b/src/Functions/reinterpretAsString.cpp similarity index 100% rename from dbms/Functions/reinterpretAsString.cpp rename to src/Functions/reinterpretAsString.cpp diff --git a/dbms/Functions/reinterpretStringAs.cpp b/src/Functions/reinterpretStringAs.cpp similarity index 100% rename from dbms/Functions/reinterpretStringAs.cpp rename to src/Functions/reinterpretStringAs.cpp diff --git a/dbms/Functions/repeat.cpp b/src/Functions/repeat.cpp similarity index 100% rename from dbms/Functions/repeat.cpp rename to src/Functions/repeat.cpp diff --git a/dbms/Functions/replicate.cpp b/src/Functions/replicate.cpp similarity index 100% rename from dbms/Functions/replicate.cpp rename to src/Functions/replicate.cpp diff --git a/dbms/Functions/reverse.cpp b/src/Functions/reverse.cpp similarity index 100% rename from dbms/Functions/reverse.cpp rename to src/Functions/reverse.cpp diff --git a/dbms/Functions/reverseUTF8.cpp b/src/Functions/reverseUTF8.cpp similarity index 100% rename from dbms/Functions/reverseUTF8.cpp rename to src/Functions/reverseUTF8.cpp diff --git a/dbms/Functions/roundAge.cpp b/src/Functions/roundAge.cpp similarity index 100% rename from dbms/Functions/roundAge.cpp rename to src/Functions/roundAge.cpp diff --git a/dbms/Functions/roundDuration.cpp b/src/Functions/roundDuration.cpp similarity index 100% rename from dbms/Functions/roundDuration.cpp rename to src/Functions/roundDuration.cpp diff --git a/dbms/Functions/roundToExp2.cpp b/src/Functions/roundToExp2.cpp similarity index 100% rename from dbms/Functions/roundToExp2.cpp rename to src/Functions/roundToExp2.cpp diff --git a/dbms/Functions/rowNumberInAllBlocks.cpp b/src/Functions/rowNumberInAllBlocks.cpp similarity index 100% rename from dbms/Functions/rowNumberInAllBlocks.cpp rename to src/Functions/rowNumberInAllBlocks.cpp diff --git a/dbms/Functions/rowNumberInBlock.cpp b/src/Functions/rowNumberInBlock.cpp similarity index 100% rename from dbms/Functions/rowNumberInBlock.cpp rename to src/Functions/rowNumberInBlock.cpp diff --git a/dbms/Functions/runningAccumulate.cpp b/src/Functions/runningAccumulate.cpp similarity index 100% rename from dbms/Functions/runningAccumulate.cpp rename to src/Functions/runningAccumulate.cpp diff --git a/dbms/Functions/runningDifference.cpp b/src/Functions/runningDifference.cpp similarity index 100% rename from dbms/Functions/runningDifference.cpp rename to src/Functions/runningDifference.cpp diff --git a/dbms/Functions/runningDifference.h b/src/Functions/runningDifference.h similarity index 100% rename from dbms/Functions/runningDifference.h rename to src/Functions/runningDifference.h diff --git a/dbms/Functions/runningDifferenceStartingWithFirstValue.cpp b/src/Functions/runningDifferenceStartingWithFirstValue.cpp similarity index 100% rename from dbms/Functions/runningDifferenceStartingWithFirstValue.cpp rename to src/Functions/runningDifferenceStartingWithFirstValue.cpp diff --git a/dbms/Functions/sigmoid.cpp b/src/Functions/sigmoid.cpp similarity index 100% rename from dbms/Functions/sigmoid.cpp rename to src/Functions/sigmoid.cpp diff --git a/dbms/Functions/sin.cpp b/src/Functions/sin.cpp similarity index 100% rename from dbms/Functions/sin.cpp rename to src/Functions/sin.cpp diff --git a/dbms/Functions/sleep.cpp b/src/Functions/sleep.cpp similarity index 100% rename from dbms/Functions/sleep.cpp rename to src/Functions/sleep.cpp diff --git a/dbms/Functions/sleep.h b/src/Functions/sleep.h similarity index 100% rename from dbms/Functions/sleep.h rename to src/Functions/sleep.h diff --git a/dbms/Functions/sleepEachRow.cpp b/src/Functions/sleepEachRow.cpp similarity index 100% rename from dbms/Functions/sleepEachRow.cpp rename to src/Functions/sleepEachRow.cpp diff --git a/dbms/Functions/sqrt.cpp b/src/Functions/sqrt.cpp similarity index 100% rename from dbms/Functions/sqrt.cpp rename to src/Functions/sqrt.cpp diff --git a/dbms/Functions/startsWith.cpp b/src/Functions/startsWith.cpp similarity index 100% rename from dbms/Functions/startsWith.cpp rename to src/Functions/startsWith.cpp diff --git a/dbms/Functions/stringToH3.cpp b/src/Functions/stringToH3.cpp similarity index 100% rename from dbms/Functions/stringToH3.cpp rename to src/Functions/stringToH3.cpp diff --git a/dbms/Functions/substring.cpp b/src/Functions/substring.cpp similarity index 100% rename from dbms/Functions/substring.cpp rename to src/Functions/substring.cpp diff --git a/dbms/Functions/subtractDays.cpp b/src/Functions/subtractDays.cpp similarity index 100% rename from dbms/Functions/subtractDays.cpp rename to src/Functions/subtractDays.cpp diff --git a/dbms/Functions/subtractHours.cpp b/src/Functions/subtractHours.cpp similarity index 100% rename from dbms/Functions/subtractHours.cpp rename to src/Functions/subtractHours.cpp diff --git a/dbms/Functions/subtractMinutes.cpp b/src/Functions/subtractMinutes.cpp similarity index 100% rename from dbms/Functions/subtractMinutes.cpp rename to src/Functions/subtractMinutes.cpp diff --git a/dbms/Functions/subtractMonths.cpp b/src/Functions/subtractMonths.cpp similarity index 100% rename from dbms/Functions/subtractMonths.cpp rename to src/Functions/subtractMonths.cpp diff --git a/dbms/Functions/subtractQuarters.cpp b/src/Functions/subtractQuarters.cpp similarity index 100% rename from dbms/Functions/subtractQuarters.cpp rename to src/Functions/subtractQuarters.cpp diff --git a/dbms/Functions/subtractSeconds.cpp b/src/Functions/subtractSeconds.cpp similarity index 100% rename from dbms/Functions/subtractSeconds.cpp rename to src/Functions/subtractSeconds.cpp diff --git a/dbms/Functions/subtractWeeks.cpp b/src/Functions/subtractWeeks.cpp similarity index 100% rename from dbms/Functions/subtractWeeks.cpp rename to src/Functions/subtractWeeks.cpp diff --git a/dbms/Functions/subtractYears.cpp b/src/Functions/subtractYears.cpp similarity index 100% rename from dbms/Functions/subtractYears.cpp rename to src/Functions/subtractYears.cpp diff --git a/dbms/Functions/sumburConsistentHash.cpp b/src/Functions/sumburConsistentHash.cpp similarity index 100% rename from dbms/Functions/sumburConsistentHash.cpp rename to src/Functions/sumburConsistentHash.cpp diff --git a/dbms/Functions/tan.cpp b/src/Functions/tan.cpp similarity index 100% rename from dbms/Functions/tan.cpp rename to src/Functions/tan.cpp diff --git a/dbms/Functions/tanh.cpp b/src/Functions/tanh.cpp similarity index 100% rename from dbms/Functions/tanh.cpp rename to src/Functions/tanh.cpp diff --git a/dbms/Functions/tests/CMakeLists.txt b/src/Functions/tests/CMakeLists.txt similarity index 100% rename from dbms/Functions/tests/CMakeLists.txt rename to src/Functions/tests/CMakeLists.txt diff --git a/dbms/Functions/tests/number_traits.cpp b/src/Functions/tests/number_traits.cpp similarity index 100% rename from dbms/Functions/tests/number_traits.cpp rename to src/Functions/tests/number_traits.cpp diff --git a/dbms/Functions/tgamma.cpp b/src/Functions/tgamma.cpp similarity index 100% rename from dbms/Functions/tgamma.cpp rename to src/Functions/tgamma.cpp diff --git a/dbms/Functions/throwIf.cpp b/src/Functions/throwIf.cpp similarity index 100% rename from dbms/Functions/throwIf.cpp rename to src/Functions/throwIf.cpp diff --git a/dbms/Functions/timeSlot.cpp b/src/Functions/timeSlot.cpp similarity index 100% rename from dbms/Functions/timeSlot.cpp rename to src/Functions/timeSlot.cpp diff --git a/dbms/Functions/timeSlots.cpp b/src/Functions/timeSlots.cpp similarity index 100% rename from dbms/Functions/timeSlots.cpp rename to src/Functions/timeSlots.cpp diff --git a/dbms/Functions/timezone.cpp b/src/Functions/timezone.cpp similarity index 100% rename from dbms/Functions/timezone.cpp rename to src/Functions/timezone.cpp diff --git a/dbms/Functions/toColumnTypeName.cpp b/src/Functions/toColumnTypeName.cpp similarity index 100% rename from dbms/Functions/toColumnTypeName.cpp rename to src/Functions/toColumnTypeName.cpp diff --git a/dbms/Functions/toCustomWeek.cpp b/src/Functions/toCustomWeek.cpp similarity index 100% rename from dbms/Functions/toCustomWeek.cpp rename to src/Functions/toCustomWeek.cpp diff --git a/dbms/Functions/toDayOfMonth.cpp b/src/Functions/toDayOfMonth.cpp similarity index 100% rename from dbms/Functions/toDayOfMonth.cpp rename to src/Functions/toDayOfMonth.cpp diff --git a/dbms/Functions/toDayOfWeek.cpp b/src/Functions/toDayOfWeek.cpp similarity index 100% rename from dbms/Functions/toDayOfWeek.cpp rename to src/Functions/toDayOfWeek.cpp diff --git a/dbms/Functions/toDayOfYear.cpp b/src/Functions/toDayOfYear.cpp similarity index 100% rename from dbms/Functions/toDayOfYear.cpp rename to src/Functions/toDayOfYear.cpp diff --git a/dbms/Functions/toHour.cpp b/src/Functions/toHour.cpp similarity index 100% rename from dbms/Functions/toHour.cpp rename to src/Functions/toHour.cpp diff --git a/dbms/Functions/toISOWeek.cpp b/src/Functions/toISOWeek.cpp similarity index 100% rename from dbms/Functions/toISOWeek.cpp rename to src/Functions/toISOWeek.cpp diff --git a/dbms/Functions/toISOYear.cpp b/src/Functions/toISOYear.cpp similarity index 100% rename from dbms/Functions/toISOYear.cpp rename to src/Functions/toISOYear.cpp diff --git a/dbms/Functions/toLowCardinality.cpp b/src/Functions/toLowCardinality.cpp similarity index 100% rename from dbms/Functions/toLowCardinality.cpp rename to src/Functions/toLowCardinality.cpp diff --git a/dbms/Functions/toMinute.cpp b/src/Functions/toMinute.cpp similarity index 100% rename from dbms/Functions/toMinute.cpp rename to src/Functions/toMinute.cpp diff --git a/dbms/Functions/toMonday.cpp b/src/Functions/toMonday.cpp similarity index 100% rename from dbms/Functions/toMonday.cpp rename to src/Functions/toMonday.cpp diff --git a/dbms/Functions/toMonth.cpp b/src/Functions/toMonth.cpp similarity index 100% rename from dbms/Functions/toMonth.cpp rename to src/Functions/toMonth.cpp diff --git a/dbms/Functions/toNullable.cpp b/src/Functions/toNullable.cpp similarity index 100% rename from dbms/Functions/toNullable.cpp rename to src/Functions/toNullable.cpp diff --git a/dbms/Functions/toQuarter.cpp b/src/Functions/toQuarter.cpp similarity index 100% rename from dbms/Functions/toQuarter.cpp rename to src/Functions/toQuarter.cpp diff --git a/dbms/Functions/toRelativeDayNum.cpp b/src/Functions/toRelativeDayNum.cpp similarity index 100% rename from dbms/Functions/toRelativeDayNum.cpp rename to src/Functions/toRelativeDayNum.cpp diff --git a/dbms/Functions/toRelativeHourNum.cpp b/src/Functions/toRelativeHourNum.cpp similarity index 100% rename from dbms/Functions/toRelativeHourNum.cpp rename to src/Functions/toRelativeHourNum.cpp diff --git a/dbms/Functions/toRelativeMinuteNum.cpp b/src/Functions/toRelativeMinuteNum.cpp similarity index 100% rename from dbms/Functions/toRelativeMinuteNum.cpp rename to src/Functions/toRelativeMinuteNum.cpp diff --git a/dbms/Functions/toRelativeMonthNum.cpp b/src/Functions/toRelativeMonthNum.cpp similarity index 100% rename from dbms/Functions/toRelativeMonthNum.cpp rename to src/Functions/toRelativeMonthNum.cpp diff --git a/dbms/Functions/toRelativeQuarterNum.cpp b/src/Functions/toRelativeQuarterNum.cpp similarity index 100% rename from dbms/Functions/toRelativeQuarterNum.cpp rename to src/Functions/toRelativeQuarterNum.cpp diff --git a/dbms/Functions/toRelativeSecondNum.cpp b/src/Functions/toRelativeSecondNum.cpp similarity index 100% rename from dbms/Functions/toRelativeSecondNum.cpp rename to src/Functions/toRelativeSecondNum.cpp diff --git a/dbms/Functions/toRelativeWeekNum.cpp b/src/Functions/toRelativeWeekNum.cpp similarity index 100% rename from dbms/Functions/toRelativeWeekNum.cpp rename to src/Functions/toRelativeWeekNum.cpp diff --git a/dbms/Functions/toRelativeYearNum.cpp b/src/Functions/toRelativeYearNum.cpp similarity index 100% rename from dbms/Functions/toRelativeYearNum.cpp rename to src/Functions/toRelativeYearNum.cpp diff --git a/dbms/Functions/toSecond.cpp b/src/Functions/toSecond.cpp similarity index 100% rename from dbms/Functions/toSecond.cpp rename to src/Functions/toSecond.cpp diff --git a/dbms/Functions/toStartOfDay.cpp b/src/Functions/toStartOfDay.cpp similarity index 100% rename from dbms/Functions/toStartOfDay.cpp rename to src/Functions/toStartOfDay.cpp diff --git a/dbms/Functions/toStartOfFifteenMinutes.cpp b/src/Functions/toStartOfFifteenMinutes.cpp similarity index 100% rename from dbms/Functions/toStartOfFifteenMinutes.cpp rename to src/Functions/toStartOfFifteenMinutes.cpp diff --git a/dbms/Functions/toStartOfFiveMinute.cpp b/src/Functions/toStartOfFiveMinute.cpp similarity index 100% rename from dbms/Functions/toStartOfFiveMinute.cpp rename to src/Functions/toStartOfFiveMinute.cpp diff --git a/dbms/Functions/toStartOfHour.cpp b/src/Functions/toStartOfHour.cpp similarity index 100% rename from dbms/Functions/toStartOfHour.cpp rename to src/Functions/toStartOfHour.cpp diff --git a/dbms/Functions/toStartOfISOYear.cpp b/src/Functions/toStartOfISOYear.cpp similarity index 100% rename from dbms/Functions/toStartOfISOYear.cpp rename to src/Functions/toStartOfISOYear.cpp diff --git a/dbms/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp similarity index 100% rename from dbms/Functions/toStartOfInterval.cpp rename to src/Functions/toStartOfInterval.cpp diff --git a/dbms/Functions/toStartOfMinute.cpp b/src/Functions/toStartOfMinute.cpp similarity index 100% rename from dbms/Functions/toStartOfMinute.cpp rename to src/Functions/toStartOfMinute.cpp diff --git a/dbms/Functions/toStartOfMonth.cpp b/src/Functions/toStartOfMonth.cpp similarity index 100% rename from dbms/Functions/toStartOfMonth.cpp rename to src/Functions/toStartOfMonth.cpp diff --git a/dbms/Functions/toStartOfQuarter.cpp b/src/Functions/toStartOfQuarter.cpp similarity index 100% rename from dbms/Functions/toStartOfQuarter.cpp rename to src/Functions/toStartOfQuarter.cpp diff --git a/dbms/Functions/toStartOfTenMinutes.cpp b/src/Functions/toStartOfTenMinutes.cpp similarity index 100% rename from dbms/Functions/toStartOfTenMinutes.cpp rename to src/Functions/toStartOfTenMinutes.cpp diff --git a/dbms/Functions/toStartOfYear.cpp b/src/Functions/toStartOfYear.cpp similarity index 100% rename from dbms/Functions/toStartOfYear.cpp rename to src/Functions/toStartOfYear.cpp diff --git a/dbms/Functions/toTime.cpp b/src/Functions/toTime.cpp similarity index 100% rename from dbms/Functions/toTime.cpp rename to src/Functions/toTime.cpp diff --git a/dbms/Functions/toTimeZone.cpp b/src/Functions/toTimeZone.cpp similarity index 100% rename from dbms/Functions/toTimeZone.cpp rename to src/Functions/toTimeZone.cpp diff --git a/dbms/Functions/toTypeName.cpp b/src/Functions/toTypeName.cpp similarity index 100% rename from dbms/Functions/toTypeName.cpp rename to src/Functions/toTypeName.cpp diff --git a/dbms/Functions/toValidUTF8.cpp b/src/Functions/toValidUTF8.cpp similarity index 100% rename from dbms/Functions/toValidUTF8.cpp rename to src/Functions/toValidUTF8.cpp diff --git a/dbms/Functions/toYYYYMM.cpp b/src/Functions/toYYYYMM.cpp similarity index 100% rename from dbms/Functions/toYYYYMM.cpp rename to src/Functions/toYYYYMM.cpp diff --git a/dbms/Functions/toYYYYMMDD.cpp b/src/Functions/toYYYYMMDD.cpp similarity index 100% rename from dbms/Functions/toYYYYMMDD.cpp rename to src/Functions/toYYYYMMDD.cpp diff --git a/dbms/Functions/toYYYYMMDDhhmmss.cpp b/src/Functions/toYYYYMMDDhhmmss.cpp similarity index 100% rename from dbms/Functions/toYYYYMMDDhhmmss.cpp rename to src/Functions/toYYYYMMDDhhmmss.cpp diff --git a/dbms/Functions/toYear.cpp b/src/Functions/toYear.cpp similarity index 100% rename from dbms/Functions/toYear.cpp rename to src/Functions/toYear.cpp diff --git a/dbms/Functions/today.cpp b/src/Functions/today.cpp similarity index 100% rename from dbms/Functions/today.cpp rename to src/Functions/today.cpp diff --git a/dbms/Functions/transform.cpp b/src/Functions/transform.cpp similarity index 100% rename from dbms/Functions/transform.cpp rename to src/Functions/transform.cpp diff --git a/dbms/Functions/trap.cpp b/src/Functions/trap.cpp similarity index 100% rename from dbms/Functions/trap.cpp rename to src/Functions/trap.cpp diff --git a/dbms/Functions/trim.cpp b/src/Functions/trim.cpp similarity index 100% rename from dbms/Functions/trim.cpp rename to src/Functions/trim.cpp diff --git a/dbms/Functions/tryBase64Decode.cpp b/src/Functions/tryBase64Decode.cpp similarity index 100% rename from dbms/Functions/tryBase64Decode.cpp rename to src/Functions/tryBase64Decode.cpp diff --git a/dbms/Functions/tuple.cpp b/src/Functions/tuple.cpp similarity index 100% rename from dbms/Functions/tuple.cpp rename to src/Functions/tuple.cpp diff --git a/dbms/Functions/tupleElement.cpp b/src/Functions/tupleElement.cpp similarity index 100% rename from dbms/Functions/tupleElement.cpp rename to src/Functions/tupleElement.cpp diff --git a/dbms/Functions/upper.cpp b/src/Functions/upper.cpp similarity index 100% rename from dbms/Functions/upper.cpp rename to src/Functions/upper.cpp diff --git a/dbms/Functions/upperUTF8.cpp b/src/Functions/upperUTF8.cpp similarity index 100% rename from dbms/Functions/upperUTF8.cpp rename to src/Functions/upperUTF8.cpp diff --git a/dbms/Functions/uptime.cpp b/src/Functions/uptime.cpp similarity index 100% rename from dbms/Functions/uptime.cpp rename to src/Functions/uptime.cpp diff --git a/dbms/Functions/version.cpp b/src/Functions/version.cpp similarity index 100% rename from dbms/Functions/version.cpp rename to src/Functions/version.cpp diff --git a/dbms/Functions/visibleWidth.cpp b/src/Functions/visibleWidth.cpp similarity index 100% rename from dbms/Functions/visibleWidth.cpp rename to src/Functions/visibleWidth.cpp diff --git a/dbms/Functions/visitParamExtractBool.cpp b/src/Functions/visitParamExtractBool.cpp similarity index 100% rename from dbms/Functions/visitParamExtractBool.cpp rename to src/Functions/visitParamExtractBool.cpp diff --git a/dbms/Functions/visitParamExtractFloat.cpp b/src/Functions/visitParamExtractFloat.cpp similarity index 100% rename from dbms/Functions/visitParamExtractFloat.cpp rename to src/Functions/visitParamExtractFloat.cpp diff --git a/dbms/Functions/visitParamExtractInt.cpp b/src/Functions/visitParamExtractInt.cpp similarity index 100% rename from dbms/Functions/visitParamExtractInt.cpp rename to src/Functions/visitParamExtractInt.cpp diff --git a/dbms/Functions/visitParamExtractRaw.cpp b/src/Functions/visitParamExtractRaw.cpp similarity index 100% rename from dbms/Functions/visitParamExtractRaw.cpp rename to src/Functions/visitParamExtractRaw.cpp diff --git a/dbms/Functions/visitParamExtractString.cpp b/src/Functions/visitParamExtractString.cpp similarity index 100% rename from dbms/Functions/visitParamExtractString.cpp rename to src/Functions/visitParamExtractString.cpp diff --git a/dbms/Functions/visitParamExtractUInt.cpp b/src/Functions/visitParamExtractUInt.cpp similarity index 100% rename from dbms/Functions/visitParamExtractUInt.cpp rename to src/Functions/visitParamExtractUInt.cpp diff --git a/dbms/Functions/visitParamHas.cpp b/src/Functions/visitParamHas.cpp similarity index 100% rename from dbms/Functions/visitParamHas.cpp rename to src/Functions/visitParamHas.cpp diff --git a/dbms/Functions/yandexConsistentHash.cpp b/src/Functions/yandexConsistentHash.cpp similarity index 100% rename from dbms/Functions/yandexConsistentHash.cpp rename to src/Functions/yandexConsistentHash.cpp diff --git a/dbms/Functions/yesterday.cpp b/src/Functions/yesterday.cpp similarity index 100% rename from dbms/Functions/yesterday.cpp rename to src/Functions/yesterday.cpp diff --git a/dbms/IO/AIO.cpp b/src/IO/AIO.cpp similarity index 100% rename from dbms/IO/AIO.cpp rename to src/IO/AIO.cpp diff --git a/dbms/IO/AIO.h b/src/IO/AIO.h similarity index 100% rename from dbms/IO/AIO.h rename to src/IO/AIO.h diff --git a/dbms/IO/AIOContextPool.cpp b/src/IO/AIOContextPool.cpp similarity index 100% rename from dbms/IO/AIOContextPool.cpp rename to src/IO/AIOContextPool.cpp diff --git a/dbms/IO/AIOContextPool.h b/src/IO/AIOContextPool.h similarity index 100% rename from dbms/IO/AIOContextPool.h rename to src/IO/AIOContextPool.h diff --git a/dbms/IO/AsynchronousWriteBuffer.h b/src/IO/AsynchronousWriteBuffer.h similarity index 100% rename from dbms/IO/AsynchronousWriteBuffer.h rename to src/IO/AsynchronousWriteBuffer.h diff --git a/dbms/IO/BitHelpers.h b/src/IO/BitHelpers.h similarity index 100% rename from dbms/IO/BitHelpers.h rename to src/IO/BitHelpers.h diff --git a/dbms/IO/BrotliReadBuffer.cpp b/src/IO/BrotliReadBuffer.cpp similarity index 100% rename from dbms/IO/BrotliReadBuffer.cpp rename to src/IO/BrotliReadBuffer.cpp diff --git a/dbms/IO/BrotliReadBuffer.h b/src/IO/BrotliReadBuffer.h similarity index 100% rename from dbms/IO/BrotliReadBuffer.h rename to src/IO/BrotliReadBuffer.h diff --git a/dbms/IO/BrotliWriteBuffer.cpp b/src/IO/BrotliWriteBuffer.cpp similarity index 100% rename from dbms/IO/BrotliWriteBuffer.cpp rename to src/IO/BrotliWriteBuffer.cpp diff --git a/dbms/IO/BrotliWriteBuffer.h b/src/IO/BrotliWriteBuffer.h similarity index 100% rename from dbms/IO/BrotliWriteBuffer.h rename to src/IO/BrotliWriteBuffer.h diff --git a/dbms/IO/BufferBase.h b/src/IO/BufferBase.h similarity index 100% rename from dbms/IO/BufferBase.h rename to src/IO/BufferBase.h diff --git a/dbms/IO/BufferWithOwnMemory.h b/src/IO/BufferWithOwnMemory.h similarity index 100% rename from dbms/IO/BufferWithOwnMemory.h rename to src/IO/BufferWithOwnMemory.h diff --git a/dbms/IO/CMakeLists.txt b/src/IO/CMakeLists.txt similarity index 100% rename from dbms/IO/CMakeLists.txt rename to src/IO/CMakeLists.txt diff --git a/dbms/IO/CascadeWriteBuffer.cpp b/src/IO/CascadeWriteBuffer.cpp similarity index 100% rename from dbms/IO/CascadeWriteBuffer.cpp rename to src/IO/CascadeWriteBuffer.cpp diff --git a/dbms/IO/CascadeWriteBuffer.h b/src/IO/CascadeWriteBuffer.h similarity index 100% rename from dbms/IO/CascadeWriteBuffer.h rename to src/IO/CascadeWriteBuffer.h diff --git a/dbms/IO/CompressionMethod.cpp b/src/IO/CompressionMethod.cpp similarity index 100% rename from dbms/IO/CompressionMethod.cpp rename to src/IO/CompressionMethod.cpp diff --git a/dbms/IO/CompressionMethod.h b/src/IO/CompressionMethod.h similarity index 100% rename from dbms/IO/CompressionMethod.h rename to src/IO/CompressionMethod.h diff --git a/dbms/IO/ConcatReadBuffer.h b/src/IO/ConcatReadBuffer.h similarity index 100% rename from dbms/IO/ConcatReadBuffer.h rename to src/IO/ConcatReadBuffer.h diff --git a/dbms/IO/ConnectionTimeouts.h b/src/IO/ConnectionTimeouts.h similarity index 100% rename from dbms/IO/ConnectionTimeouts.h rename to src/IO/ConnectionTimeouts.h diff --git a/dbms/IO/DoubleConverter.cpp b/src/IO/DoubleConverter.cpp similarity index 100% rename from dbms/IO/DoubleConverter.cpp rename to src/IO/DoubleConverter.cpp diff --git a/dbms/IO/DoubleConverter.h b/src/IO/DoubleConverter.h similarity index 100% rename from dbms/IO/DoubleConverter.h rename to src/IO/DoubleConverter.h diff --git a/dbms/IO/HDFSCommon.cpp b/src/IO/HDFSCommon.cpp similarity index 100% rename from dbms/IO/HDFSCommon.cpp rename to src/IO/HDFSCommon.cpp diff --git a/dbms/IO/HDFSCommon.h b/src/IO/HDFSCommon.h similarity index 100% rename from dbms/IO/HDFSCommon.h rename to src/IO/HDFSCommon.h diff --git a/dbms/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp similarity index 100% rename from dbms/IO/HTTPCommon.cpp rename to src/IO/HTTPCommon.cpp diff --git a/dbms/IO/HTTPCommon.h b/src/IO/HTTPCommon.h similarity index 100% rename from dbms/IO/HTTPCommon.h rename to src/IO/HTTPCommon.h diff --git a/dbms/IO/HashingReadBuffer.h b/src/IO/HashingReadBuffer.h similarity index 100% rename from dbms/IO/HashingReadBuffer.h rename to src/IO/HashingReadBuffer.h diff --git a/dbms/IO/HashingWriteBuffer.cpp b/src/IO/HashingWriteBuffer.cpp similarity index 100% rename from dbms/IO/HashingWriteBuffer.cpp rename to src/IO/HashingWriteBuffer.cpp diff --git a/dbms/IO/HashingWriteBuffer.h b/src/IO/HashingWriteBuffer.h similarity index 100% rename from dbms/IO/HashingWriteBuffer.h rename to src/IO/HashingWriteBuffer.h diff --git a/dbms/IO/HexWriteBuffer.cpp b/src/IO/HexWriteBuffer.cpp similarity index 100% rename from dbms/IO/HexWriteBuffer.cpp rename to src/IO/HexWriteBuffer.cpp diff --git a/dbms/IO/HexWriteBuffer.h b/src/IO/HexWriteBuffer.h similarity index 100% rename from dbms/IO/HexWriteBuffer.h rename to src/IO/HexWriteBuffer.h diff --git a/dbms/IO/IReadableWriteBuffer.h b/src/IO/IReadableWriteBuffer.h similarity index 100% rename from dbms/IO/IReadableWriteBuffer.h rename to src/IO/IReadableWriteBuffer.h diff --git a/dbms/IO/LimitReadBuffer.cpp b/src/IO/LimitReadBuffer.cpp similarity index 100% rename from dbms/IO/LimitReadBuffer.cpp rename to src/IO/LimitReadBuffer.cpp diff --git a/dbms/IO/LimitReadBuffer.h b/src/IO/LimitReadBuffer.h similarity index 100% rename from dbms/IO/LimitReadBuffer.h rename to src/IO/LimitReadBuffer.h diff --git a/dbms/IO/MMapReadBufferFromFile.cpp b/src/IO/MMapReadBufferFromFile.cpp similarity index 100% rename from dbms/IO/MMapReadBufferFromFile.cpp rename to src/IO/MMapReadBufferFromFile.cpp diff --git a/dbms/IO/MMapReadBufferFromFile.h b/src/IO/MMapReadBufferFromFile.h similarity index 100% rename from dbms/IO/MMapReadBufferFromFile.h rename to src/IO/MMapReadBufferFromFile.h diff --git a/dbms/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/IO/MMapReadBufferFromFileDescriptor.cpp rename to src/IO/MMapReadBufferFromFileDescriptor.cpp diff --git a/dbms/IO/MMapReadBufferFromFileDescriptor.h b/src/IO/MMapReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/IO/MMapReadBufferFromFileDescriptor.h rename to src/IO/MMapReadBufferFromFileDescriptor.h diff --git a/dbms/IO/MemoryReadWriteBuffer.cpp b/src/IO/MemoryReadWriteBuffer.cpp similarity index 100% rename from dbms/IO/MemoryReadWriteBuffer.cpp rename to src/IO/MemoryReadWriteBuffer.cpp diff --git a/dbms/IO/MemoryReadWriteBuffer.h b/src/IO/MemoryReadWriteBuffer.h similarity index 100% rename from dbms/IO/MemoryReadWriteBuffer.h rename to src/IO/MemoryReadWriteBuffer.h diff --git a/dbms/IO/NullWriteBuffer.cpp b/src/IO/NullWriteBuffer.cpp similarity index 100% rename from dbms/IO/NullWriteBuffer.cpp rename to src/IO/NullWriteBuffer.cpp diff --git a/dbms/IO/NullWriteBuffer.h b/src/IO/NullWriteBuffer.h similarity index 100% rename from dbms/IO/NullWriteBuffer.h rename to src/IO/NullWriteBuffer.h diff --git a/dbms/IO/Operators.h b/src/IO/Operators.h similarity index 100% rename from dbms/IO/Operators.h rename to src/IO/Operators.h diff --git a/dbms/IO/PeekableReadBuffer.cpp b/src/IO/PeekableReadBuffer.cpp similarity index 100% rename from dbms/IO/PeekableReadBuffer.cpp rename to src/IO/PeekableReadBuffer.cpp diff --git a/dbms/IO/PeekableReadBuffer.h b/src/IO/PeekableReadBuffer.h similarity index 100% rename from dbms/IO/PeekableReadBuffer.h rename to src/IO/PeekableReadBuffer.h diff --git a/dbms/IO/Progress.cpp b/src/IO/Progress.cpp similarity index 100% rename from dbms/IO/Progress.cpp rename to src/IO/Progress.cpp diff --git a/dbms/IO/Progress.h b/src/IO/Progress.h similarity index 100% rename from dbms/IO/Progress.h rename to src/IO/Progress.h diff --git a/dbms/IO/ReadBuffer.h b/src/IO/ReadBuffer.h similarity index 100% rename from dbms/IO/ReadBuffer.h rename to src/IO/ReadBuffer.h diff --git a/dbms/IO/ReadBufferAIO.cpp b/src/IO/ReadBufferAIO.cpp similarity index 100% rename from dbms/IO/ReadBufferAIO.cpp rename to src/IO/ReadBufferAIO.cpp diff --git a/dbms/IO/ReadBufferAIO.h b/src/IO/ReadBufferAIO.h similarity index 100% rename from dbms/IO/ReadBufferAIO.h rename to src/IO/ReadBufferAIO.h diff --git a/dbms/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp similarity index 100% rename from dbms/IO/ReadBufferFromFile.cpp rename to src/IO/ReadBufferFromFile.cpp diff --git a/dbms/IO/ReadBufferFromFile.h b/src/IO/ReadBufferFromFile.h similarity index 100% rename from dbms/IO/ReadBufferFromFile.h rename to src/IO/ReadBufferFromFile.h diff --git a/dbms/IO/ReadBufferFromFileBase.cpp b/src/IO/ReadBufferFromFileBase.cpp similarity index 100% rename from dbms/IO/ReadBufferFromFileBase.cpp rename to src/IO/ReadBufferFromFileBase.cpp diff --git a/dbms/IO/ReadBufferFromFileBase.h b/src/IO/ReadBufferFromFileBase.h similarity index 100% rename from dbms/IO/ReadBufferFromFileBase.h rename to src/IO/ReadBufferFromFileBase.h diff --git a/dbms/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/IO/ReadBufferFromFileDescriptor.cpp rename to src/IO/ReadBufferFromFileDescriptor.cpp diff --git a/dbms/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h similarity index 100% rename from dbms/IO/ReadBufferFromFileDescriptor.h rename to src/IO/ReadBufferFromFileDescriptor.h diff --git a/dbms/IO/ReadBufferFromHDFS.cpp b/src/IO/ReadBufferFromHDFS.cpp similarity index 100% rename from dbms/IO/ReadBufferFromHDFS.cpp rename to src/IO/ReadBufferFromHDFS.cpp diff --git a/dbms/IO/ReadBufferFromHDFS.h b/src/IO/ReadBufferFromHDFS.h similarity index 100% rename from dbms/IO/ReadBufferFromHDFS.h rename to src/IO/ReadBufferFromHDFS.h diff --git a/dbms/IO/ReadBufferFromIStream.cpp b/src/IO/ReadBufferFromIStream.cpp similarity index 100% rename from dbms/IO/ReadBufferFromIStream.cpp rename to src/IO/ReadBufferFromIStream.cpp diff --git a/dbms/IO/ReadBufferFromIStream.h b/src/IO/ReadBufferFromIStream.h similarity index 100% rename from dbms/IO/ReadBufferFromIStream.h rename to src/IO/ReadBufferFromIStream.h diff --git a/dbms/IO/ReadBufferFromMemory.cpp b/src/IO/ReadBufferFromMemory.cpp similarity index 100% rename from dbms/IO/ReadBufferFromMemory.cpp rename to src/IO/ReadBufferFromMemory.cpp diff --git a/dbms/IO/ReadBufferFromMemory.h b/src/IO/ReadBufferFromMemory.h similarity index 100% rename from dbms/IO/ReadBufferFromMemory.h rename to src/IO/ReadBufferFromMemory.h diff --git a/dbms/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp similarity index 100% rename from dbms/IO/ReadBufferFromPocoSocket.cpp rename to src/IO/ReadBufferFromPocoSocket.cpp diff --git a/dbms/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h similarity index 100% rename from dbms/IO/ReadBufferFromPocoSocket.h rename to src/IO/ReadBufferFromPocoSocket.h diff --git a/dbms/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp similarity index 100% rename from dbms/IO/ReadBufferFromS3.cpp rename to src/IO/ReadBufferFromS3.cpp diff --git a/dbms/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h similarity index 100% rename from dbms/IO/ReadBufferFromS3.h rename to src/IO/ReadBufferFromS3.h diff --git a/dbms/IO/ReadBufferFromString.h b/src/IO/ReadBufferFromString.h similarity index 100% rename from dbms/IO/ReadBufferFromString.h rename to src/IO/ReadBufferFromString.h diff --git a/dbms/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp similarity index 100% rename from dbms/IO/ReadHelpers.cpp rename to src/IO/ReadHelpers.cpp diff --git a/dbms/IO/ReadHelpers.h b/src/IO/ReadHelpers.h similarity index 100% rename from dbms/IO/ReadHelpers.h rename to src/IO/ReadHelpers.h diff --git a/dbms/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp similarity index 100% rename from dbms/IO/ReadWriteBufferFromHTTP.cpp rename to src/IO/ReadWriteBufferFromHTTP.cpp diff --git a/dbms/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h similarity index 100% rename from dbms/IO/ReadWriteBufferFromHTTP.h rename to src/IO/ReadWriteBufferFromHTTP.h diff --git a/dbms/IO/S3Common.cpp b/src/IO/S3Common.cpp similarity index 100% rename from dbms/IO/S3Common.cpp rename to src/IO/S3Common.cpp diff --git a/dbms/IO/S3Common.h b/src/IO/S3Common.h similarity index 100% rename from dbms/IO/S3Common.h rename to src/IO/S3Common.h diff --git a/dbms/IO/SeekableReadBuffer.h b/src/IO/SeekableReadBuffer.h similarity index 100% rename from dbms/IO/SeekableReadBuffer.h rename to src/IO/SeekableReadBuffer.h diff --git a/dbms/IO/UncompressedCache.h b/src/IO/UncompressedCache.h similarity index 100% rename from dbms/IO/UncompressedCache.h rename to src/IO/UncompressedCache.h diff --git a/dbms/IO/UseSSL.cpp b/src/IO/UseSSL.cpp similarity index 100% rename from dbms/IO/UseSSL.cpp rename to src/IO/UseSSL.cpp diff --git a/dbms/IO/UseSSL.h b/src/IO/UseSSL.h similarity index 100% rename from dbms/IO/UseSSL.h rename to src/IO/UseSSL.h diff --git a/dbms/IO/VarInt.h b/src/IO/VarInt.h similarity index 100% rename from dbms/IO/VarInt.h rename to src/IO/VarInt.h diff --git a/dbms/IO/WriteBuffer.h b/src/IO/WriteBuffer.h similarity index 100% rename from dbms/IO/WriteBuffer.h rename to src/IO/WriteBuffer.h diff --git a/dbms/IO/WriteBufferAIO.cpp b/src/IO/WriteBufferAIO.cpp similarity index 100% rename from dbms/IO/WriteBufferAIO.cpp rename to src/IO/WriteBufferAIO.cpp diff --git a/dbms/IO/WriteBufferAIO.h b/src/IO/WriteBufferAIO.h similarity index 100% rename from dbms/IO/WriteBufferAIO.h rename to src/IO/WriteBufferAIO.h diff --git a/dbms/IO/WriteBufferFromArena.h b/src/IO/WriteBufferFromArena.h similarity index 100% rename from dbms/IO/WriteBufferFromArena.h rename to src/IO/WriteBufferFromArena.h diff --git a/dbms/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp similarity index 100% rename from dbms/IO/WriteBufferFromFile.cpp rename to src/IO/WriteBufferFromFile.cpp diff --git a/dbms/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h similarity index 100% rename from dbms/IO/WriteBufferFromFile.h rename to src/IO/WriteBufferFromFile.h diff --git a/dbms/IO/WriteBufferFromFileBase.cpp b/src/IO/WriteBufferFromFileBase.cpp similarity index 100% rename from dbms/IO/WriteBufferFromFileBase.cpp rename to src/IO/WriteBufferFromFileBase.cpp diff --git a/dbms/IO/WriteBufferFromFileBase.h b/src/IO/WriteBufferFromFileBase.h similarity index 100% rename from dbms/IO/WriteBufferFromFileBase.h rename to src/IO/WriteBufferFromFileBase.h diff --git a/dbms/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp similarity index 100% rename from dbms/IO/WriteBufferFromFileDescriptor.cpp rename to src/IO/WriteBufferFromFileDescriptor.cpp diff --git a/dbms/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h similarity index 100% rename from dbms/IO/WriteBufferFromFileDescriptor.h rename to src/IO/WriteBufferFromFileDescriptor.h diff --git a/dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp b/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp similarity index 100% rename from dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp rename to src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp diff --git a/dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h b/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h similarity index 100% rename from dbms/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h rename to src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.h diff --git a/dbms/IO/WriteBufferFromHDFS.cpp b/src/IO/WriteBufferFromHDFS.cpp similarity index 100% rename from dbms/IO/WriteBufferFromHDFS.cpp rename to src/IO/WriteBufferFromHDFS.cpp diff --git a/dbms/IO/WriteBufferFromHDFS.h b/src/IO/WriteBufferFromHDFS.h similarity index 100% rename from dbms/IO/WriteBufferFromHDFS.h rename to src/IO/WriteBufferFromHDFS.h diff --git a/dbms/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp similarity index 100% rename from dbms/IO/WriteBufferFromHTTP.cpp rename to src/IO/WriteBufferFromHTTP.cpp diff --git a/dbms/IO/WriteBufferFromHTTP.h b/src/IO/WriteBufferFromHTTP.h similarity index 100% rename from dbms/IO/WriteBufferFromHTTP.h rename to src/IO/WriteBufferFromHTTP.h diff --git a/dbms/IO/WriteBufferFromHTTPServerResponse.cpp b/src/IO/WriteBufferFromHTTPServerResponse.cpp similarity index 100% rename from dbms/IO/WriteBufferFromHTTPServerResponse.cpp rename to src/IO/WriteBufferFromHTTPServerResponse.cpp diff --git a/dbms/IO/WriteBufferFromHTTPServerResponse.h b/src/IO/WriteBufferFromHTTPServerResponse.h similarity index 100% rename from dbms/IO/WriteBufferFromHTTPServerResponse.h rename to src/IO/WriteBufferFromHTTPServerResponse.h diff --git a/dbms/IO/WriteBufferFromOStream.cpp b/src/IO/WriteBufferFromOStream.cpp similarity index 100% rename from dbms/IO/WriteBufferFromOStream.cpp rename to src/IO/WriteBufferFromOStream.cpp diff --git a/dbms/IO/WriteBufferFromOStream.h b/src/IO/WriteBufferFromOStream.h similarity index 100% rename from dbms/IO/WriteBufferFromOStream.h rename to src/IO/WriteBufferFromOStream.h diff --git a/dbms/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp similarity index 100% rename from dbms/IO/WriteBufferFromPocoSocket.cpp rename to src/IO/WriteBufferFromPocoSocket.cpp diff --git a/dbms/IO/WriteBufferFromPocoSocket.h b/src/IO/WriteBufferFromPocoSocket.h similarity index 100% rename from dbms/IO/WriteBufferFromPocoSocket.h rename to src/IO/WriteBufferFromPocoSocket.h diff --git a/dbms/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp similarity index 100% rename from dbms/IO/WriteBufferFromS3.cpp rename to src/IO/WriteBufferFromS3.cpp diff --git a/dbms/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h similarity index 100% rename from dbms/IO/WriteBufferFromS3.h rename to src/IO/WriteBufferFromS3.h diff --git a/dbms/IO/WriteBufferFromString.h b/src/IO/WriteBufferFromString.h similarity index 100% rename from dbms/IO/WriteBufferFromString.h rename to src/IO/WriteBufferFromString.h diff --git a/dbms/IO/WriteBufferFromTemporaryFile.cpp b/src/IO/WriteBufferFromTemporaryFile.cpp similarity index 100% rename from dbms/IO/WriteBufferFromTemporaryFile.cpp rename to src/IO/WriteBufferFromTemporaryFile.cpp diff --git a/dbms/IO/WriteBufferFromTemporaryFile.h b/src/IO/WriteBufferFromTemporaryFile.h similarity index 100% rename from dbms/IO/WriteBufferFromTemporaryFile.h rename to src/IO/WriteBufferFromTemporaryFile.h diff --git a/dbms/IO/WriteBufferFromVector.h b/src/IO/WriteBufferFromVector.h similarity index 100% rename from dbms/IO/WriteBufferFromVector.h rename to src/IO/WriteBufferFromVector.h diff --git a/dbms/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp similarity index 100% rename from dbms/IO/WriteBufferValidUTF8.cpp rename to src/IO/WriteBufferValidUTF8.cpp diff --git a/dbms/IO/WriteBufferValidUTF8.h b/src/IO/WriteBufferValidUTF8.h similarity index 100% rename from dbms/IO/WriteBufferValidUTF8.h rename to src/IO/WriteBufferValidUTF8.h diff --git a/dbms/IO/WriteHelpers.cpp b/src/IO/WriteHelpers.cpp similarity index 100% rename from dbms/IO/WriteHelpers.cpp rename to src/IO/WriteHelpers.cpp diff --git a/dbms/IO/WriteHelpers.h b/src/IO/WriteHelpers.h similarity index 100% rename from dbms/IO/WriteHelpers.h rename to src/IO/WriteHelpers.h diff --git a/dbms/IO/WriteIntText.h b/src/IO/WriteIntText.h similarity index 100% rename from dbms/IO/WriteIntText.h rename to src/IO/WriteIntText.h diff --git a/dbms/IO/ZlibDeflatingWriteBuffer.cpp b/src/IO/ZlibDeflatingWriteBuffer.cpp similarity index 100% rename from dbms/IO/ZlibDeflatingWriteBuffer.cpp rename to src/IO/ZlibDeflatingWriteBuffer.cpp diff --git a/dbms/IO/ZlibDeflatingWriteBuffer.h b/src/IO/ZlibDeflatingWriteBuffer.h similarity index 100% rename from dbms/IO/ZlibDeflatingWriteBuffer.h rename to src/IO/ZlibDeflatingWriteBuffer.h diff --git a/dbms/IO/ZlibInflatingReadBuffer.cpp b/src/IO/ZlibInflatingReadBuffer.cpp similarity index 100% rename from dbms/IO/ZlibInflatingReadBuffer.cpp rename to src/IO/ZlibInflatingReadBuffer.cpp diff --git a/dbms/IO/ZlibInflatingReadBuffer.h b/src/IO/ZlibInflatingReadBuffer.h similarity index 100% rename from dbms/IO/ZlibInflatingReadBuffer.h rename to src/IO/ZlibInflatingReadBuffer.h diff --git a/dbms/IO/copyData.cpp b/src/IO/copyData.cpp similarity index 100% rename from dbms/IO/copyData.cpp rename to src/IO/copyData.cpp diff --git a/dbms/IO/copyData.h b/src/IO/copyData.h similarity index 100% rename from dbms/IO/copyData.h rename to src/IO/copyData.h diff --git a/dbms/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp similarity index 100% rename from dbms/IO/createReadBufferFromFileBase.cpp rename to src/IO/createReadBufferFromFileBase.cpp diff --git a/dbms/IO/createReadBufferFromFileBase.h b/src/IO/createReadBufferFromFileBase.h similarity index 100% rename from dbms/IO/createReadBufferFromFileBase.h rename to src/IO/createReadBufferFromFileBase.h diff --git a/dbms/IO/createWriteBufferFromFileBase.cpp b/src/IO/createWriteBufferFromFileBase.cpp similarity index 100% rename from dbms/IO/createWriteBufferFromFileBase.cpp rename to src/IO/createWriteBufferFromFileBase.cpp diff --git a/dbms/IO/createWriteBufferFromFileBase.h b/src/IO/createWriteBufferFromFileBase.h similarity index 100% rename from dbms/IO/createWriteBufferFromFileBase.h rename to src/IO/createWriteBufferFromFileBase.h diff --git a/dbms/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp similarity index 100% rename from dbms/IO/parseDateTimeBestEffort.cpp rename to src/IO/parseDateTimeBestEffort.cpp diff --git a/dbms/IO/parseDateTimeBestEffort.h b/src/IO/parseDateTimeBestEffort.h similarity index 100% rename from dbms/IO/parseDateTimeBestEffort.h rename to src/IO/parseDateTimeBestEffort.h diff --git a/dbms/IO/readDecimalText.h b/src/IO/readDecimalText.h similarity index 100% rename from dbms/IO/readDecimalText.h rename to src/IO/readDecimalText.h diff --git a/dbms/IO/readFloatText.cpp b/src/IO/readFloatText.cpp similarity index 100% rename from dbms/IO/readFloatText.cpp rename to src/IO/readFloatText.cpp diff --git a/dbms/IO/readFloatText.h b/src/IO/readFloatText.h similarity index 100% rename from dbms/IO/readFloatText.h rename to src/IO/readFloatText.h diff --git a/dbms/IO/tests/CMakeLists.txt b/src/IO/tests/CMakeLists.txt similarity index 100% rename from dbms/IO/tests/CMakeLists.txt rename to src/IO/tests/CMakeLists.txt diff --git a/dbms/IO/tests/async_write.cpp b/src/IO/tests/async_write.cpp similarity index 100% rename from dbms/IO/tests/async_write.cpp rename to src/IO/tests/async_write.cpp diff --git a/dbms/IO/tests/gtest_DateTime64_parsing_and_writing.cpp b/src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp similarity index 100% rename from dbms/IO/tests/gtest_DateTime64_parsing_and_writing.cpp rename to src/IO/tests/gtest_DateTime64_parsing_and_writing.cpp diff --git a/dbms/IO/tests/gtest_DateTimeToString.cpp b/src/IO/tests/gtest_DateTimeToString.cpp similarity index 100% rename from dbms/IO/tests/gtest_DateTimeToString.cpp rename to src/IO/tests/gtest_DateTimeToString.cpp diff --git a/dbms/IO/tests/gtest_aio_seek_back_after_eof.cpp b/src/IO/tests/gtest_aio_seek_back_after_eof.cpp similarity index 100% rename from dbms/IO/tests/gtest_aio_seek_back_after_eof.cpp rename to src/IO/tests/gtest_aio_seek_back_after_eof.cpp diff --git a/dbms/IO/tests/gtest_bit_io.cpp b/src/IO/tests/gtest_bit_io.cpp similarity index 100% rename from dbms/IO/tests/gtest_bit_io.cpp rename to src/IO/tests/gtest_bit_io.cpp diff --git a/dbms/IO/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp similarity index 100% rename from dbms/IO/tests/gtest_cascade_and_memory_write_buffer.cpp rename to src/IO/tests/gtest_cascade_and_memory_write_buffer.cpp diff --git a/dbms/IO/tests/gtest_peekable_read_buffer.cpp b/src/IO/tests/gtest_peekable_read_buffer.cpp similarity index 100% rename from dbms/IO/tests/gtest_peekable_read_buffer.cpp rename to src/IO/tests/gtest_peekable_read_buffer.cpp diff --git a/dbms/IO/tests/hashing_buffer.h b/src/IO/tests/hashing_buffer.h similarity index 100% rename from dbms/IO/tests/hashing_buffer.h rename to src/IO/tests/hashing_buffer.h diff --git a/dbms/IO/tests/hashing_read_buffer.cpp b/src/IO/tests/hashing_read_buffer.cpp similarity index 100% rename from dbms/IO/tests/hashing_read_buffer.cpp rename to src/IO/tests/hashing_read_buffer.cpp diff --git a/dbms/IO/tests/hashing_write_buffer.cpp b/src/IO/tests/hashing_write_buffer.cpp similarity index 100% rename from dbms/IO/tests/hashing_write_buffer.cpp rename to src/IO/tests/hashing_write_buffer.cpp diff --git a/dbms/IO/tests/io_operators.cpp b/src/IO/tests/io_operators.cpp similarity index 100% rename from dbms/IO/tests/io_operators.cpp rename to src/IO/tests/io_operators.cpp diff --git a/dbms/IO/tests/limit_read_buffer.cpp b/src/IO/tests/limit_read_buffer.cpp similarity index 100% rename from dbms/IO/tests/limit_read_buffer.cpp rename to src/IO/tests/limit_read_buffer.cpp diff --git a/dbms/IO/tests/limit_read_buffer.reference b/src/IO/tests/limit_read_buffer.reference similarity index 100% rename from dbms/IO/tests/limit_read_buffer.reference rename to src/IO/tests/limit_read_buffer.reference diff --git a/dbms/IO/tests/limit_read_buffer.sh b/src/IO/tests/limit_read_buffer.sh similarity index 100% rename from dbms/IO/tests/limit_read_buffer.sh rename to src/IO/tests/limit_read_buffer.sh diff --git a/dbms/IO/tests/limit_read_buffer2.cpp b/src/IO/tests/limit_read_buffer2.cpp similarity index 100% rename from dbms/IO/tests/limit_read_buffer2.cpp rename to src/IO/tests/limit_read_buffer2.cpp diff --git a/dbms/IO/tests/mempbrk.cpp b/src/IO/tests/mempbrk.cpp similarity index 100% rename from dbms/IO/tests/mempbrk.cpp rename to src/IO/tests/mempbrk.cpp diff --git a/dbms/IO/tests/o_direct_and_dirty_pages.cpp b/src/IO/tests/o_direct_and_dirty_pages.cpp similarity index 100% rename from dbms/IO/tests/o_direct_and_dirty_pages.cpp rename to src/IO/tests/o_direct_and_dirty_pages.cpp diff --git a/dbms/IO/tests/parse_date_time_best_effort.cpp b/src/IO/tests/parse_date_time_best_effort.cpp similarity index 100% rename from dbms/IO/tests/parse_date_time_best_effort.cpp rename to src/IO/tests/parse_date_time_best_effort.cpp diff --git a/dbms/IO/tests/parse_int_perf.cpp b/src/IO/tests/parse_int_perf.cpp similarity index 100% rename from dbms/IO/tests/parse_int_perf.cpp rename to src/IO/tests/parse_int_perf.cpp diff --git a/dbms/IO/tests/parse_int_perf2.cpp b/src/IO/tests/parse_int_perf2.cpp similarity index 100% rename from dbms/IO/tests/parse_int_perf2.cpp rename to src/IO/tests/parse_int_perf2.cpp diff --git a/dbms/IO/tests/read_buffer.cpp b/src/IO/tests/read_buffer.cpp similarity index 100% rename from dbms/IO/tests/read_buffer.cpp rename to src/IO/tests/read_buffer.cpp diff --git a/dbms/IO/tests/read_buffer_aio.cpp b/src/IO/tests/read_buffer_aio.cpp similarity index 100% rename from dbms/IO/tests/read_buffer_aio.cpp rename to src/IO/tests/read_buffer_aio.cpp diff --git a/dbms/IO/tests/read_buffer_perf.cpp b/src/IO/tests/read_buffer_perf.cpp similarity index 100% rename from dbms/IO/tests/read_buffer_perf.cpp rename to src/IO/tests/read_buffer_perf.cpp diff --git a/dbms/IO/tests/read_escaped_string.cpp b/src/IO/tests/read_escaped_string.cpp similarity index 100% rename from dbms/IO/tests/read_escaped_string.cpp rename to src/IO/tests/read_escaped_string.cpp diff --git a/dbms/IO/tests/read_float_perf.cpp b/src/IO/tests/read_float_perf.cpp similarity index 100% rename from dbms/IO/tests/read_float_perf.cpp rename to src/IO/tests/read_float_perf.cpp diff --git a/dbms/IO/tests/read_write_int.cpp b/src/IO/tests/read_write_int.cpp similarity index 100% rename from dbms/IO/tests/read_write_int.cpp rename to src/IO/tests/read_write_int.cpp diff --git a/dbms/IO/tests/ryu_test.cpp b/src/IO/tests/ryu_test.cpp similarity index 100% rename from dbms/IO/tests/ryu_test.cpp rename to src/IO/tests/ryu_test.cpp diff --git a/dbms/IO/tests/valid_utf8.cpp b/src/IO/tests/valid_utf8.cpp similarity index 100% rename from dbms/IO/tests/valid_utf8.cpp rename to src/IO/tests/valid_utf8.cpp diff --git a/dbms/IO/tests/valid_utf8_perf.cpp b/src/IO/tests/valid_utf8_perf.cpp similarity index 100% rename from dbms/IO/tests/valid_utf8_perf.cpp rename to src/IO/tests/valid_utf8_perf.cpp diff --git a/dbms/IO/tests/var_uint.cpp b/src/IO/tests/var_uint.cpp similarity index 100% rename from dbms/IO/tests/var_uint.cpp rename to src/IO/tests/var_uint.cpp diff --git a/dbms/IO/tests/write_buffer.cpp b/src/IO/tests/write_buffer.cpp similarity index 100% rename from dbms/IO/tests/write_buffer.cpp rename to src/IO/tests/write_buffer.cpp diff --git a/dbms/IO/tests/write_buffer_aio.cpp b/src/IO/tests/write_buffer_aio.cpp similarity index 100% rename from dbms/IO/tests/write_buffer_aio.cpp rename to src/IO/tests/write_buffer_aio.cpp diff --git a/dbms/IO/tests/write_buffer_perf.cpp b/src/IO/tests/write_buffer_perf.cpp similarity index 100% rename from dbms/IO/tests/write_buffer_perf.cpp rename to src/IO/tests/write_buffer_perf.cpp diff --git a/dbms/IO/tests/write_int.cpp b/src/IO/tests/write_int.cpp similarity index 100% rename from dbms/IO/tests/write_int.cpp rename to src/IO/tests/write_int.cpp diff --git a/dbms/IO/tests/zlib_buffers.cpp b/src/IO/tests/zlib_buffers.cpp similarity index 100% rename from dbms/IO/tests/zlib_buffers.cpp rename to src/IO/tests/zlib_buffers.cpp diff --git a/dbms/IO/tests/zlib_ng_bug.cpp b/src/IO/tests/zlib_ng_bug.cpp similarity index 100% rename from dbms/IO/tests/zlib_ng_bug.cpp rename to src/IO/tests/zlib_ng_bug.cpp diff --git a/dbms/Interpreters/ActionLocksManager.cpp b/src/Interpreters/ActionLocksManager.cpp similarity index 100% rename from dbms/Interpreters/ActionLocksManager.cpp rename to src/Interpreters/ActionLocksManager.cpp diff --git a/dbms/Interpreters/ActionLocksManager.h b/src/Interpreters/ActionLocksManager.h similarity index 100% rename from dbms/Interpreters/ActionLocksManager.h rename to src/Interpreters/ActionLocksManager.h diff --git a/dbms/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp similarity index 100% rename from dbms/Interpreters/ActionsVisitor.cpp rename to src/Interpreters/ActionsVisitor.cpp diff --git a/dbms/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h similarity index 100% rename from dbms/Interpreters/ActionsVisitor.h rename to src/Interpreters/ActionsVisitor.h diff --git a/dbms/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h similarity index 100% rename from dbms/Interpreters/AddDefaultDatabaseVisitor.h rename to src/Interpreters/AddDefaultDatabaseVisitor.h diff --git a/dbms/Interpreters/AggregateDescription.h b/src/Interpreters/AggregateDescription.h similarity index 100% rename from dbms/Interpreters/AggregateDescription.h rename to src/Interpreters/AggregateDescription.h diff --git a/dbms/Interpreters/AggregationCommon.h b/src/Interpreters/AggregationCommon.h similarity index 100% rename from dbms/Interpreters/AggregationCommon.h rename to src/Interpreters/AggregationCommon.h diff --git a/dbms/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp similarity index 100% rename from dbms/Interpreters/Aggregator.cpp rename to src/Interpreters/Aggregator.cpp diff --git a/dbms/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h similarity index 100% rename from dbms/Interpreters/Aggregator.h rename to src/Interpreters/Aggregator.h diff --git a/dbms/Interpreters/Aliases.h b/src/Interpreters/Aliases.h similarity index 100% rename from dbms/Interpreters/Aliases.h rename to src/Interpreters/Aliases.h diff --git a/dbms/Interpreters/AnalyzedJoin.cpp b/src/Interpreters/AnalyzedJoin.cpp similarity index 100% rename from dbms/Interpreters/AnalyzedJoin.cpp rename to src/Interpreters/AnalyzedJoin.cpp diff --git a/dbms/Interpreters/AnalyzedJoin.h b/src/Interpreters/AnalyzedJoin.h similarity index 100% rename from dbms/Interpreters/AnalyzedJoin.h rename to src/Interpreters/AnalyzedJoin.h diff --git a/dbms/Interpreters/ArrayJoinAction.cpp b/src/Interpreters/ArrayJoinAction.cpp similarity index 100% rename from dbms/Interpreters/ArrayJoinAction.cpp rename to src/Interpreters/ArrayJoinAction.cpp diff --git a/dbms/Interpreters/ArrayJoinAction.h b/src/Interpreters/ArrayJoinAction.h similarity index 100% rename from dbms/Interpreters/ArrayJoinAction.h rename to src/Interpreters/ArrayJoinAction.h diff --git a/dbms/Interpreters/ArrayJoinedColumnsVisitor.h b/src/Interpreters/ArrayJoinedColumnsVisitor.h similarity index 100% rename from dbms/Interpreters/ArrayJoinedColumnsVisitor.h rename to src/Interpreters/ArrayJoinedColumnsVisitor.h diff --git a/dbms/Interpreters/AsteriskSemantic.h b/src/Interpreters/AsteriskSemantic.h similarity index 100% rename from dbms/Interpreters/AsteriskSemantic.h rename to src/Interpreters/AsteriskSemantic.h diff --git a/dbms/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp similarity index 100% rename from dbms/Interpreters/AsynchronousMetrics.cpp rename to src/Interpreters/AsynchronousMetrics.cpp diff --git a/dbms/Interpreters/AsynchronousMetrics.h b/src/Interpreters/AsynchronousMetrics.h similarity index 100% rename from dbms/Interpreters/AsynchronousMetrics.h rename to src/Interpreters/AsynchronousMetrics.h diff --git a/dbms/Interpreters/BloomFilter.cpp b/src/Interpreters/BloomFilter.cpp similarity index 100% rename from dbms/Interpreters/BloomFilter.cpp rename to src/Interpreters/BloomFilter.cpp diff --git a/dbms/Interpreters/BloomFilter.h b/src/Interpreters/BloomFilter.h similarity index 100% rename from dbms/Interpreters/BloomFilter.h rename to src/Interpreters/BloomFilter.h diff --git a/dbms/Interpreters/BloomFilterHash.h b/src/Interpreters/BloomFilterHash.h similarity index 100% rename from dbms/Interpreters/BloomFilterHash.h rename to src/Interpreters/BloomFilterHash.h diff --git a/dbms/Interpreters/CMakeLists.txt b/src/Interpreters/CMakeLists.txt similarity index 100% rename from dbms/Interpreters/CMakeLists.txt rename to src/Interpreters/CMakeLists.txt diff --git a/dbms/Interpreters/CancellationCode.h b/src/Interpreters/CancellationCode.h similarity index 100% rename from dbms/Interpreters/CancellationCode.h rename to src/Interpreters/CancellationCode.h diff --git a/dbms/Interpreters/CatBoostModel.cpp b/src/Interpreters/CatBoostModel.cpp similarity index 100% rename from dbms/Interpreters/CatBoostModel.cpp rename to src/Interpreters/CatBoostModel.cpp diff --git a/dbms/Interpreters/CatBoostModel.h b/src/Interpreters/CatBoostModel.h similarity index 100% rename from dbms/Interpreters/CatBoostModel.h rename to src/Interpreters/CatBoostModel.h diff --git a/dbms/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp similarity index 100% rename from dbms/Interpreters/ClientInfo.cpp rename to src/Interpreters/ClientInfo.cpp diff --git a/dbms/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h similarity index 100% rename from dbms/Interpreters/ClientInfo.h rename to src/Interpreters/ClientInfo.h diff --git a/dbms/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp similarity index 100% rename from dbms/Interpreters/Cluster.cpp rename to src/Interpreters/Cluster.cpp diff --git a/dbms/Interpreters/Cluster.h b/src/Interpreters/Cluster.h similarity index 100% rename from dbms/Interpreters/Cluster.h rename to src/Interpreters/Cluster.h diff --git a/dbms/Interpreters/ClusterProxy/IStreamFactory.h b/src/Interpreters/ClusterProxy/IStreamFactory.h similarity index 100% rename from dbms/Interpreters/ClusterProxy/IStreamFactory.h rename to src/Interpreters/ClusterProxy/IStreamFactory.h diff --git a/dbms/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp similarity index 100% rename from dbms/Interpreters/ClusterProxy/SelectStreamFactory.cpp rename to src/Interpreters/ClusterProxy/SelectStreamFactory.cpp diff --git a/dbms/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h similarity index 100% rename from dbms/Interpreters/ClusterProxy/SelectStreamFactory.h rename to src/Interpreters/ClusterProxy/SelectStreamFactory.h diff --git a/dbms/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp similarity index 100% rename from dbms/Interpreters/ClusterProxy/executeQuery.cpp rename to src/Interpreters/ClusterProxy/executeQuery.cpp diff --git a/dbms/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h similarity index 100% rename from dbms/Interpreters/ClusterProxy/executeQuery.h rename to src/Interpreters/ClusterProxy/executeQuery.h diff --git a/dbms/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp similarity index 100% rename from dbms/Interpreters/CollectJoinOnKeysVisitor.cpp rename to src/Interpreters/CollectJoinOnKeysVisitor.cpp diff --git a/dbms/Interpreters/CollectJoinOnKeysVisitor.h b/src/Interpreters/CollectJoinOnKeysVisitor.h similarity index 100% rename from dbms/Interpreters/CollectJoinOnKeysVisitor.h rename to src/Interpreters/CollectJoinOnKeysVisitor.h diff --git a/dbms/Interpreters/ColumnNamesContext.cpp b/src/Interpreters/ColumnNamesContext.cpp similarity index 100% rename from dbms/Interpreters/ColumnNamesContext.cpp rename to src/Interpreters/ColumnNamesContext.cpp diff --git a/dbms/Interpreters/ColumnNamesContext.h b/src/Interpreters/ColumnNamesContext.h similarity index 100% rename from dbms/Interpreters/ColumnNamesContext.h rename to src/Interpreters/ColumnNamesContext.h diff --git a/dbms/Interpreters/Context.cpp b/src/Interpreters/Context.cpp similarity index 100% rename from dbms/Interpreters/Context.cpp rename to src/Interpreters/Context.cpp diff --git a/dbms/Interpreters/Context.h b/src/Interpreters/Context.h similarity index 100% rename from dbms/Interpreters/Context.h rename to src/Interpreters/Context.h diff --git a/dbms/Interpreters/CrossToInnerJoinVisitor.cpp b/src/Interpreters/CrossToInnerJoinVisitor.cpp similarity index 100% rename from dbms/Interpreters/CrossToInnerJoinVisitor.cpp rename to src/Interpreters/CrossToInnerJoinVisitor.cpp diff --git a/dbms/Interpreters/CrossToInnerJoinVisitor.h b/src/Interpreters/CrossToInnerJoinVisitor.h similarity index 100% rename from dbms/Interpreters/CrossToInnerJoinVisitor.h rename to src/Interpreters/CrossToInnerJoinVisitor.h diff --git a/dbms/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp similarity index 100% rename from dbms/Interpreters/DDLWorker.cpp rename to src/Interpreters/DDLWorker.cpp diff --git a/dbms/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h similarity index 100% rename from dbms/Interpreters/DDLWorker.h rename to src/Interpreters/DDLWorker.h diff --git a/dbms/Interpreters/DNSCacheUpdater.cpp b/src/Interpreters/DNSCacheUpdater.cpp similarity index 100% rename from dbms/Interpreters/DNSCacheUpdater.cpp rename to src/Interpreters/DNSCacheUpdater.cpp diff --git a/dbms/Interpreters/DNSCacheUpdater.h b/src/Interpreters/DNSCacheUpdater.h similarity index 100% rename from dbms/Interpreters/DNSCacheUpdater.h rename to src/Interpreters/DNSCacheUpdater.h diff --git a/dbms/Interpreters/DatabaseAndTableWithAlias.cpp b/src/Interpreters/DatabaseAndTableWithAlias.cpp similarity index 100% rename from dbms/Interpreters/DatabaseAndTableWithAlias.cpp rename to src/Interpreters/DatabaseAndTableWithAlias.cpp diff --git a/dbms/Interpreters/DatabaseAndTableWithAlias.h b/src/Interpreters/DatabaseAndTableWithAlias.h similarity index 100% rename from dbms/Interpreters/DatabaseAndTableWithAlias.h rename to src/Interpreters/DatabaseAndTableWithAlias.h diff --git a/dbms/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp similarity index 100% rename from dbms/Interpreters/DatabaseCatalog.cpp rename to src/Interpreters/DatabaseCatalog.cpp diff --git a/dbms/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h similarity index 100% rename from dbms/Interpreters/DatabaseCatalog.h rename to src/Interpreters/DatabaseCatalog.h diff --git a/dbms/Interpreters/EmbeddedDictionaries.cpp b/src/Interpreters/EmbeddedDictionaries.cpp similarity index 100% rename from dbms/Interpreters/EmbeddedDictionaries.cpp rename to src/Interpreters/EmbeddedDictionaries.cpp diff --git a/dbms/Interpreters/EmbeddedDictionaries.h b/src/Interpreters/EmbeddedDictionaries.h similarity index 100% rename from dbms/Interpreters/EmbeddedDictionaries.h rename to src/Interpreters/EmbeddedDictionaries.h diff --git a/dbms/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp similarity index 100% rename from dbms/Interpreters/ExecuteScalarSubqueriesVisitor.cpp rename to src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp diff --git a/dbms/Interpreters/ExecuteScalarSubqueriesVisitor.h b/src/Interpreters/ExecuteScalarSubqueriesVisitor.h similarity index 100% rename from dbms/Interpreters/ExecuteScalarSubqueriesVisitor.h rename to src/Interpreters/ExecuteScalarSubqueriesVisitor.h diff --git a/dbms/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp similarity index 100% rename from dbms/Interpreters/ExpressionActions.cpp rename to src/Interpreters/ExpressionActions.cpp diff --git a/dbms/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h similarity index 100% rename from dbms/Interpreters/ExpressionActions.h rename to src/Interpreters/ExpressionActions.h diff --git a/dbms/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp similarity index 100% rename from dbms/Interpreters/ExpressionAnalyzer.cpp rename to src/Interpreters/ExpressionAnalyzer.cpp diff --git a/dbms/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h similarity index 100% rename from dbms/Interpreters/ExpressionAnalyzer.h rename to src/Interpreters/ExpressionAnalyzer.h diff --git a/dbms/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp similarity index 100% rename from dbms/Interpreters/ExpressionJIT.cpp rename to src/Interpreters/ExpressionJIT.cpp diff --git a/dbms/Interpreters/ExpressionJIT.h b/src/Interpreters/ExpressionJIT.h similarity index 100% rename from dbms/Interpreters/ExpressionJIT.h rename to src/Interpreters/ExpressionJIT.h diff --git a/dbms/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp similarity index 100% rename from dbms/Interpreters/ExternalDictionariesLoader.cpp rename to src/Interpreters/ExternalDictionariesLoader.cpp diff --git a/dbms/Interpreters/ExternalDictionariesLoader.h b/src/Interpreters/ExternalDictionariesLoader.h similarity index 100% rename from dbms/Interpreters/ExternalDictionariesLoader.h rename to src/Interpreters/ExternalDictionariesLoader.h diff --git a/dbms/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp similarity index 100% rename from dbms/Interpreters/ExternalLoader.cpp rename to src/Interpreters/ExternalLoader.cpp diff --git a/dbms/Interpreters/ExternalLoader.h b/src/Interpreters/ExternalLoader.h similarity index 100% rename from dbms/Interpreters/ExternalLoader.h rename to src/Interpreters/ExternalLoader.h diff --git a/dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp b/src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp similarity index 100% rename from dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp rename to src/Interpreters/ExternalLoaderDatabaseConfigRepository.cpp diff --git a/dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.h b/src/Interpreters/ExternalLoaderDatabaseConfigRepository.h similarity index 100% rename from dbms/Interpreters/ExternalLoaderDatabaseConfigRepository.h rename to src/Interpreters/ExternalLoaderDatabaseConfigRepository.h diff --git a/dbms/Interpreters/ExternalLoaderTempConfigRepository.cpp b/src/Interpreters/ExternalLoaderTempConfigRepository.cpp similarity index 100% rename from dbms/Interpreters/ExternalLoaderTempConfigRepository.cpp rename to src/Interpreters/ExternalLoaderTempConfigRepository.cpp diff --git a/dbms/Interpreters/ExternalLoaderTempConfigRepository.h b/src/Interpreters/ExternalLoaderTempConfigRepository.h similarity index 100% rename from dbms/Interpreters/ExternalLoaderTempConfigRepository.h rename to src/Interpreters/ExternalLoaderTempConfigRepository.h diff --git a/dbms/Interpreters/ExternalLoaderXMLConfigRepository.cpp b/src/Interpreters/ExternalLoaderXMLConfigRepository.cpp similarity index 100% rename from dbms/Interpreters/ExternalLoaderXMLConfigRepository.cpp rename to src/Interpreters/ExternalLoaderXMLConfigRepository.cpp diff --git a/dbms/Interpreters/ExternalLoaderXMLConfigRepository.h b/src/Interpreters/ExternalLoaderXMLConfigRepository.h similarity index 100% rename from dbms/Interpreters/ExternalLoaderXMLConfigRepository.h rename to src/Interpreters/ExternalLoaderXMLConfigRepository.h diff --git a/dbms/Interpreters/ExternalModelsLoader.cpp b/src/Interpreters/ExternalModelsLoader.cpp similarity index 100% rename from dbms/Interpreters/ExternalModelsLoader.cpp rename to src/Interpreters/ExternalModelsLoader.cpp diff --git a/dbms/Interpreters/ExternalModelsLoader.h b/src/Interpreters/ExternalModelsLoader.h similarity index 100% rename from dbms/Interpreters/ExternalModelsLoader.h rename to src/Interpreters/ExternalModelsLoader.h diff --git a/dbms/Interpreters/ExtractExpressionInfoVisitor.cpp b/src/Interpreters/ExtractExpressionInfoVisitor.cpp similarity index 100% rename from dbms/Interpreters/ExtractExpressionInfoVisitor.cpp rename to src/Interpreters/ExtractExpressionInfoVisitor.cpp diff --git a/dbms/Interpreters/ExtractExpressionInfoVisitor.h b/src/Interpreters/ExtractExpressionInfoVisitor.h similarity index 100% rename from dbms/Interpreters/ExtractExpressionInfoVisitor.h rename to src/Interpreters/ExtractExpressionInfoVisitor.h diff --git a/dbms/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp similarity index 100% rename from dbms/Interpreters/FillingRow.cpp rename to src/Interpreters/FillingRow.cpp diff --git a/dbms/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h similarity index 100% rename from dbms/Interpreters/FillingRow.h rename to src/Interpreters/FillingRow.h diff --git a/dbms/Interpreters/GetAggregatesVisitor.h b/src/Interpreters/GetAggregatesVisitor.h similarity index 100% rename from dbms/Interpreters/GetAggregatesVisitor.h rename to src/Interpreters/GetAggregatesVisitor.h diff --git a/dbms/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h similarity index 100% rename from dbms/Interpreters/GlobalSubqueriesVisitor.h rename to src/Interpreters/GlobalSubqueriesVisitor.h diff --git a/dbms/Interpreters/IExternalLoadable.cpp b/src/Interpreters/IExternalLoadable.cpp similarity index 100% rename from dbms/Interpreters/IExternalLoadable.cpp rename to src/Interpreters/IExternalLoadable.cpp diff --git a/dbms/Interpreters/IExternalLoadable.h b/src/Interpreters/IExternalLoadable.h similarity index 100% rename from dbms/Interpreters/IExternalLoadable.h rename to src/Interpreters/IExternalLoadable.h diff --git a/dbms/Interpreters/IExternalLoaderConfigRepository.h b/src/Interpreters/IExternalLoaderConfigRepository.h similarity index 100% rename from dbms/Interpreters/IExternalLoaderConfigRepository.h rename to src/Interpreters/IExternalLoaderConfigRepository.h diff --git a/dbms/Interpreters/IInterpreter.h b/src/Interpreters/IInterpreter.h similarity index 100% rename from dbms/Interpreters/IInterpreter.h rename to src/Interpreters/IInterpreter.h diff --git a/dbms/Interpreters/IJoin.h b/src/Interpreters/IJoin.h similarity index 100% rename from dbms/Interpreters/IJoin.h rename to src/Interpreters/IJoin.h diff --git a/dbms/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp similarity index 100% rename from dbms/Interpreters/IdentifierSemantic.cpp rename to src/Interpreters/IdentifierSemantic.cpp diff --git a/dbms/Interpreters/IdentifierSemantic.h b/src/Interpreters/IdentifierSemantic.h similarity index 100% rename from dbms/Interpreters/IdentifierSemantic.h rename to src/Interpreters/IdentifierSemantic.h diff --git a/dbms/Interpreters/InDepthNodeVisitor.h b/src/Interpreters/InDepthNodeVisitor.h similarity index 100% rename from dbms/Interpreters/InDepthNodeVisitor.h rename to src/Interpreters/InDepthNodeVisitor.h diff --git a/dbms/Interpreters/InJoinSubqueriesPreprocessor.cpp b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp similarity index 100% rename from dbms/Interpreters/InJoinSubqueriesPreprocessor.cpp rename to src/Interpreters/InJoinSubqueriesPreprocessor.cpp diff --git a/dbms/Interpreters/InJoinSubqueriesPreprocessor.h b/src/Interpreters/InJoinSubqueriesPreprocessor.h similarity index 100% rename from dbms/Interpreters/InJoinSubqueriesPreprocessor.h rename to src/Interpreters/InJoinSubqueriesPreprocessor.h diff --git a/dbms/Interpreters/InternalTextLogsQueue.cpp b/src/Interpreters/InternalTextLogsQueue.cpp similarity index 100% rename from dbms/Interpreters/InternalTextLogsQueue.cpp rename to src/Interpreters/InternalTextLogsQueue.cpp diff --git a/dbms/Interpreters/InternalTextLogsQueue.h b/src/Interpreters/InternalTextLogsQueue.h similarity index 100% rename from dbms/Interpreters/InternalTextLogsQueue.h rename to src/Interpreters/InternalTextLogsQueue.h diff --git a/dbms/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterAlterQuery.cpp rename to src/Interpreters/InterpreterAlterQuery.cpp diff --git a/dbms/Interpreters/InterpreterAlterQuery.h b/src/Interpreters/InterpreterAlterQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterAlterQuery.h rename to src/Interpreters/InterpreterAlterQuery.h diff --git a/dbms/Interpreters/InterpreterCheckQuery.cpp b/src/Interpreters/InterpreterCheckQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCheckQuery.cpp rename to src/Interpreters/InterpreterCheckQuery.cpp diff --git a/dbms/Interpreters/InterpreterCheckQuery.h b/src/Interpreters/InterpreterCheckQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCheckQuery.h rename to src/Interpreters/InterpreterCheckQuery.h diff --git a/dbms/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateQuery.cpp rename to src/Interpreters/InterpreterCreateQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateQuery.h rename to src/Interpreters/InterpreterCreateQuery.h diff --git a/dbms/Interpreters/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/InterpreterCreateQuotaQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateQuotaQuery.cpp rename to src/Interpreters/InterpreterCreateQuotaQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateQuotaQuery.h b/src/Interpreters/InterpreterCreateQuotaQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateQuotaQuery.h rename to src/Interpreters/InterpreterCreateQuotaQuery.h diff --git a/dbms/Interpreters/InterpreterCreateRoleQuery.cpp b/src/Interpreters/InterpreterCreateRoleQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateRoleQuery.cpp rename to src/Interpreters/InterpreterCreateRoleQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateRoleQuery.h b/src/Interpreters/InterpreterCreateRoleQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateRoleQuery.h rename to src/Interpreters/InterpreterCreateRoleQuery.h diff --git a/dbms/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateRowPolicyQuery.cpp rename to src/Interpreters/InterpreterCreateRowPolicyQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateRowPolicyQuery.h b/src/Interpreters/InterpreterCreateRowPolicyQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateRowPolicyQuery.h rename to src/Interpreters/InterpreterCreateRowPolicyQuery.h diff --git a/dbms/Interpreters/InterpreterCreateSettingsProfileQuery.cpp b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateSettingsProfileQuery.cpp rename to src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateSettingsProfileQuery.h b/src/Interpreters/InterpreterCreateSettingsProfileQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateSettingsProfileQuery.h rename to src/Interpreters/InterpreterCreateSettingsProfileQuery.h diff --git a/dbms/Interpreters/InterpreterCreateUserQuery.cpp b/src/Interpreters/InterpreterCreateUserQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterCreateUserQuery.cpp rename to src/Interpreters/InterpreterCreateUserQuery.cpp diff --git a/dbms/Interpreters/InterpreterCreateUserQuery.h b/src/Interpreters/InterpreterCreateUserQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterCreateUserQuery.h rename to src/Interpreters/InterpreterCreateUserQuery.h diff --git a/dbms/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterDescribeQuery.cpp rename to src/Interpreters/InterpreterDescribeQuery.cpp diff --git a/dbms/Interpreters/InterpreterDescribeQuery.h b/src/Interpreters/InterpreterDescribeQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterDescribeQuery.h rename to src/Interpreters/InterpreterDescribeQuery.h diff --git a/dbms/Interpreters/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterDropAccessEntityQuery.cpp rename to src/Interpreters/InterpreterDropAccessEntityQuery.cpp diff --git a/dbms/Interpreters/InterpreterDropAccessEntityQuery.h b/src/Interpreters/InterpreterDropAccessEntityQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterDropAccessEntityQuery.h rename to src/Interpreters/InterpreterDropAccessEntityQuery.h diff --git a/dbms/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterDropQuery.cpp rename to src/Interpreters/InterpreterDropQuery.cpp diff --git a/dbms/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterDropQuery.h rename to src/Interpreters/InterpreterDropQuery.h diff --git a/dbms/Interpreters/InterpreterExistsQuery.cpp b/src/Interpreters/InterpreterExistsQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterExistsQuery.cpp rename to src/Interpreters/InterpreterExistsQuery.cpp diff --git a/dbms/Interpreters/InterpreterExistsQuery.h b/src/Interpreters/InterpreterExistsQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterExistsQuery.h rename to src/Interpreters/InterpreterExistsQuery.h diff --git a/dbms/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterExplainQuery.cpp rename to src/Interpreters/InterpreterExplainQuery.cpp diff --git a/dbms/Interpreters/InterpreterExplainQuery.h b/src/Interpreters/InterpreterExplainQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterExplainQuery.h rename to src/Interpreters/InterpreterExplainQuery.h diff --git a/dbms/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp similarity index 100% rename from dbms/Interpreters/InterpreterFactory.cpp rename to src/Interpreters/InterpreterFactory.cpp diff --git a/dbms/Interpreters/InterpreterFactory.h b/src/Interpreters/InterpreterFactory.h similarity index 100% rename from dbms/Interpreters/InterpreterFactory.h rename to src/Interpreters/InterpreterFactory.h diff --git a/dbms/Interpreters/InterpreterGrantQuery.cpp b/src/Interpreters/InterpreterGrantQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterGrantQuery.cpp rename to src/Interpreters/InterpreterGrantQuery.cpp diff --git a/dbms/Interpreters/InterpreterGrantQuery.h b/src/Interpreters/InterpreterGrantQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterGrantQuery.h rename to src/Interpreters/InterpreterGrantQuery.h diff --git a/dbms/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterInsertQuery.cpp rename to src/Interpreters/InterpreterInsertQuery.cpp diff --git a/dbms/Interpreters/InterpreterInsertQuery.h b/src/Interpreters/InterpreterInsertQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterInsertQuery.h rename to src/Interpreters/InterpreterInsertQuery.h diff --git a/dbms/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterKillQueryQuery.cpp rename to src/Interpreters/InterpreterKillQueryQuery.cpp diff --git a/dbms/Interpreters/InterpreterKillQueryQuery.h b/src/Interpreters/InterpreterKillQueryQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterKillQueryQuery.h rename to src/Interpreters/InterpreterKillQueryQuery.h diff --git a/dbms/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterOptimizeQuery.cpp rename to src/Interpreters/InterpreterOptimizeQuery.cpp diff --git a/dbms/Interpreters/InterpreterOptimizeQuery.h b/src/Interpreters/InterpreterOptimizeQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterOptimizeQuery.h rename to src/Interpreters/InterpreterOptimizeQuery.h diff --git a/dbms/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterRenameQuery.cpp rename to src/Interpreters/InterpreterRenameQuery.cpp diff --git a/dbms/Interpreters/InterpreterRenameQuery.h b/src/Interpreters/InterpreterRenameQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterRenameQuery.h rename to src/Interpreters/InterpreterRenameQuery.h diff --git a/dbms/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterSelectQuery.cpp rename to src/Interpreters/InterpreterSelectQuery.cpp diff --git a/dbms/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterSelectQuery.h rename to src/Interpreters/InterpreterSelectQuery.h diff --git a/dbms/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterSelectWithUnionQuery.cpp rename to src/Interpreters/InterpreterSelectWithUnionQuery.cpp diff --git a/dbms/Interpreters/InterpreterSelectWithUnionQuery.h b/src/Interpreters/InterpreterSelectWithUnionQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterSelectWithUnionQuery.h rename to src/Interpreters/InterpreterSelectWithUnionQuery.h diff --git a/dbms/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterSetQuery.cpp rename to src/Interpreters/InterpreterSetQuery.cpp diff --git a/dbms/Interpreters/InterpreterSetQuery.h b/src/Interpreters/InterpreterSetQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterSetQuery.h rename to src/Interpreters/InterpreterSetQuery.h diff --git a/dbms/Interpreters/InterpreterSetRoleQuery.cpp b/src/Interpreters/InterpreterSetRoleQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterSetRoleQuery.cpp rename to src/Interpreters/InterpreterSetRoleQuery.cpp diff --git a/dbms/Interpreters/InterpreterSetRoleQuery.h b/src/Interpreters/InterpreterSetRoleQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterSetRoleQuery.h rename to src/Interpreters/InterpreterSetRoleQuery.h diff --git a/dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp rename to src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.h b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowCreateAccessEntityQuery.h rename to src/Interpreters/InterpreterShowCreateAccessEntityQuery.h diff --git a/dbms/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowCreateQuery.cpp rename to src/Interpreters/InterpreterShowCreateQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowCreateQuery.h b/src/Interpreters/InterpreterShowCreateQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowCreateQuery.h rename to src/Interpreters/InterpreterShowCreateQuery.h diff --git a/dbms/Interpreters/InterpreterShowGrantsQuery.cpp b/src/Interpreters/InterpreterShowGrantsQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowGrantsQuery.cpp rename to src/Interpreters/InterpreterShowGrantsQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowGrantsQuery.h b/src/Interpreters/InterpreterShowGrantsQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowGrantsQuery.h rename to src/Interpreters/InterpreterShowGrantsQuery.h diff --git a/dbms/Interpreters/InterpreterShowProcesslistQuery.cpp b/src/Interpreters/InterpreterShowProcesslistQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowProcesslistQuery.cpp rename to src/Interpreters/InterpreterShowProcesslistQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowProcesslistQuery.h b/src/Interpreters/InterpreterShowProcesslistQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowProcesslistQuery.h rename to src/Interpreters/InterpreterShowProcesslistQuery.h diff --git a/dbms/Interpreters/InterpreterShowQuotasQuery.cpp b/src/Interpreters/InterpreterShowQuotasQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowQuotasQuery.cpp rename to src/Interpreters/InterpreterShowQuotasQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowQuotasQuery.h b/src/Interpreters/InterpreterShowQuotasQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowQuotasQuery.h rename to src/Interpreters/InterpreterShowQuotasQuery.h diff --git a/dbms/Interpreters/InterpreterShowRowPoliciesQuery.cpp b/src/Interpreters/InterpreterShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowRowPoliciesQuery.cpp rename to src/Interpreters/InterpreterShowRowPoliciesQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowRowPoliciesQuery.h b/src/Interpreters/InterpreterShowRowPoliciesQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowRowPoliciesQuery.h rename to src/Interpreters/InterpreterShowRowPoliciesQuery.h diff --git a/dbms/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterShowTablesQuery.cpp rename to src/Interpreters/InterpreterShowTablesQuery.cpp diff --git a/dbms/Interpreters/InterpreterShowTablesQuery.h b/src/Interpreters/InterpreterShowTablesQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterShowTablesQuery.h rename to src/Interpreters/InterpreterShowTablesQuery.h diff --git a/dbms/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterSystemQuery.cpp rename to src/Interpreters/InterpreterSystemQuery.cpp diff --git a/dbms/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterSystemQuery.h rename to src/Interpreters/InterpreterSystemQuery.h diff --git a/dbms/Interpreters/InterpreterUseQuery.cpp b/src/Interpreters/InterpreterUseQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterUseQuery.cpp rename to src/Interpreters/InterpreterUseQuery.cpp diff --git a/dbms/Interpreters/InterpreterUseQuery.h b/src/Interpreters/InterpreterUseQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterUseQuery.h rename to src/Interpreters/InterpreterUseQuery.h diff --git a/dbms/Interpreters/InterpreterWatchQuery.cpp b/src/Interpreters/InterpreterWatchQuery.cpp similarity index 100% rename from dbms/Interpreters/InterpreterWatchQuery.cpp rename to src/Interpreters/InterpreterWatchQuery.cpp diff --git a/dbms/Interpreters/InterpreterWatchQuery.h b/src/Interpreters/InterpreterWatchQuery.h similarity index 100% rename from dbms/Interpreters/InterpreterWatchQuery.h rename to src/Interpreters/InterpreterWatchQuery.h diff --git a/dbms/Interpreters/InterserverIOHandler.h b/src/Interpreters/InterserverIOHandler.h similarity index 100% rename from dbms/Interpreters/InterserverIOHandler.h rename to src/Interpreters/InterserverIOHandler.h diff --git a/dbms/Interpreters/Join.cpp b/src/Interpreters/Join.cpp similarity index 100% rename from dbms/Interpreters/Join.cpp rename to src/Interpreters/Join.cpp diff --git a/dbms/Interpreters/Join.h b/src/Interpreters/Join.h similarity index 100% rename from dbms/Interpreters/Join.h rename to src/Interpreters/Join.h diff --git a/dbms/Interpreters/JoinSwitcher.cpp b/src/Interpreters/JoinSwitcher.cpp similarity index 100% rename from dbms/Interpreters/JoinSwitcher.cpp rename to src/Interpreters/JoinSwitcher.cpp diff --git a/dbms/Interpreters/JoinSwitcher.h b/src/Interpreters/JoinSwitcher.h similarity index 100% rename from dbms/Interpreters/JoinSwitcher.h rename to src/Interpreters/JoinSwitcher.h diff --git a/dbms/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp similarity index 100% rename from dbms/Interpreters/JoinToSubqueryTransformVisitor.cpp rename to src/Interpreters/JoinToSubqueryTransformVisitor.cpp diff --git a/dbms/Interpreters/JoinToSubqueryTransformVisitor.h b/src/Interpreters/JoinToSubqueryTransformVisitor.h similarity index 100% rename from dbms/Interpreters/JoinToSubqueryTransformVisitor.h rename to src/Interpreters/JoinToSubqueryTransformVisitor.h diff --git a/dbms/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp similarity index 100% rename from dbms/Interpreters/JoinedTables.cpp rename to src/Interpreters/JoinedTables.cpp diff --git a/dbms/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h similarity index 100% rename from dbms/Interpreters/JoinedTables.h rename to src/Interpreters/JoinedTables.h diff --git a/dbms/Interpreters/LogicalExpressionsOptimizer.cpp b/src/Interpreters/LogicalExpressionsOptimizer.cpp similarity index 100% rename from dbms/Interpreters/LogicalExpressionsOptimizer.cpp rename to src/Interpreters/LogicalExpressionsOptimizer.cpp diff --git a/dbms/Interpreters/LogicalExpressionsOptimizer.h b/src/Interpreters/LogicalExpressionsOptimizer.h similarity index 100% rename from dbms/Interpreters/LogicalExpressionsOptimizer.h rename to src/Interpreters/LogicalExpressionsOptimizer.h diff --git a/dbms/Interpreters/MarkTableIdentifiersVisitor.cpp b/src/Interpreters/MarkTableIdentifiersVisitor.cpp similarity index 100% rename from dbms/Interpreters/MarkTableIdentifiersVisitor.cpp rename to src/Interpreters/MarkTableIdentifiersVisitor.cpp diff --git a/dbms/Interpreters/MarkTableIdentifiersVisitor.h b/src/Interpreters/MarkTableIdentifiersVisitor.h similarity index 100% rename from dbms/Interpreters/MarkTableIdentifiersVisitor.h rename to src/Interpreters/MarkTableIdentifiersVisitor.h diff --git a/dbms/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp similarity index 100% rename from dbms/Interpreters/MergeJoin.cpp rename to src/Interpreters/MergeJoin.cpp diff --git a/dbms/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h similarity index 100% rename from dbms/Interpreters/MergeJoin.h rename to src/Interpreters/MergeJoin.h diff --git a/dbms/Interpreters/MetricLog.cpp b/src/Interpreters/MetricLog.cpp similarity index 100% rename from dbms/Interpreters/MetricLog.cpp rename to src/Interpreters/MetricLog.cpp diff --git a/dbms/Interpreters/MetricLog.h b/src/Interpreters/MetricLog.h similarity index 100% rename from dbms/Interpreters/MetricLog.h rename to src/Interpreters/MetricLog.h diff --git a/dbms/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp similarity index 100% rename from dbms/Interpreters/MutationsInterpreter.cpp rename to src/Interpreters/MutationsInterpreter.cpp diff --git a/dbms/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h similarity index 100% rename from dbms/Interpreters/MutationsInterpreter.h rename to src/Interpreters/MutationsInterpreter.h diff --git a/dbms/Interpreters/NullableUtils.cpp b/src/Interpreters/NullableUtils.cpp similarity index 100% rename from dbms/Interpreters/NullableUtils.cpp rename to src/Interpreters/NullableUtils.cpp diff --git a/dbms/Interpreters/NullableUtils.h b/src/Interpreters/NullableUtils.h similarity index 100% rename from dbms/Interpreters/NullableUtils.h rename to src/Interpreters/NullableUtils.h diff --git a/dbms/Interpreters/OptimizeIfChains.cpp b/src/Interpreters/OptimizeIfChains.cpp similarity index 100% rename from dbms/Interpreters/OptimizeIfChains.cpp rename to src/Interpreters/OptimizeIfChains.cpp diff --git a/dbms/Interpreters/OptimizeIfChains.h b/src/Interpreters/OptimizeIfChains.h similarity index 100% rename from dbms/Interpreters/OptimizeIfChains.h rename to src/Interpreters/OptimizeIfChains.h diff --git a/dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp similarity index 100% rename from dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp rename to src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp diff --git a/dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.h b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.h similarity index 100% rename from dbms/Interpreters/OptimizeIfWithConstantConditionVisitor.h rename to src/Interpreters/OptimizeIfWithConstantConditionVisitor.h diff --git a/dbms/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp similarity index 100% rename from dbms/Interpreters/PartLog.cpp rename to src/Interpreters/PartLog.cpp diff --git a/dbms/Interpreters/PartLog.h b/src/Interpreters/PartLog.h similarity index 100% rename from dbms/Interpreters/PartLog.h rename to src/Interpreters/PartLog.h diff --git a/dbms/Interpreters/PredicateExpressionsOptimizer.cpp b/src/Interpreters/PredicateExpressionsOptimizer.cpp similarity index 100% rename from dbms/Interpreters/PredicateExpressionsOptimizer.cpp rename to src/Interpreters/PredicateExpressionsOptimizer.cpp diff --git a/dbms/Interpreters/PredicateExpressionsOptimizer.h b/src/Interpreters/PredicateExpressionsOptimizer.h similarity index 100% rename from dbms/Interpreters/PredicateExpressionsOptimizer.h rename to src/Interpreters/PredicateExpressionsOptimizer.h diff --git a/dbms/Interpreters/PredicateRewriteVisitor.cpp b/src/Interpreters/PredicateRewriteVisitor.cpp similarity index 100% rename from dbms/Interpreters/PredicateRewriteVisitor.cpp rename to src/Interpreters/PredicateRewriteVisitor.cpp diff --git a/dbms/Interpreters/PredicateRewriteVisitor.h b/src/Interpreters/PredicateRewriteVisitor.h similarity index 100% rename from dbms/Interpreters/PredicateRewriteVisitor.h rename to src/Interpreters/PredicateRewriteVisitor.h diff --git a/dbms/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h similarity index 100% rename from dbms/Interpreters/PreparedSets.h rename to src/Interpreters/PreparedSets.h diff --git a/dbms/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp similarity index 100% rename from dbms/Interpreters/ProcessList.cpp rename to src/Interpreters/ProcessList.cpp diff --git a/dbms/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h similarity index 100% rename from dbms/Interpreters/ProcessList.h rename to src/Interpreters/ProcessList.h diff --git a/dbms/Interpreters/ProfileEventsExt.cpp b/src/Interpreters/ProfileEventsExt.cpp similarity index 100% rename from dbms/Interpreters/ProfileEventsExt.cpp rename to src/Interpreters/ProfileEventsExt.cpp diff --git a/dbms/Interpreters/ProfileEventsExt.h b/src/Interpreters/ProfileEventsExt.h similarity index 100% rename from dbms/Interpreters/ProfileEventsExt.h rename to src/Interpreters/ProfileEventsExt.h diff --git a/dbms/Interpreters/QueryAliasesVisitor.cpp b/src/Interpreters/QueryAliasesVisitor.cpp similarity index 100% rename from dbms/Interpreters/QueryAliasesVisitor.cpp rename to src/Interpreters/QueryAliasesVisitor.cpp diff --git a/dbms/Interpreters/QueryAliasesVisitor.h b/src/Interpreters/QueryAliasesVisitor.h similarity index 100% rename from dbms/Interpreters/QueryAliasesVisitor.h rename to src/Interpreters/QueryAliasesVisitor.h diff --git a/dbms/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp similarity index 100% rename from dbms/Interpreters/QueryLog.cpp rename to src/Interpreters/QueryLog.cpp diff --git a/dbms/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h similarity index 100% rename from dbms/Interpreters/QueryLog.h rename to src/Interpreters/QueryLog.h diff --git a/dbms/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp similarity index 100% rename from dbms/Interpreters/QueryNormalizer.cpp rename to src/Interpreters/QueryNormalizer.cpp diff --git a/dbms/Interpreters/QueryNormalizer.h b/src/Interpreters/QueryNormalizer.h similarity index 100% rename from dbms/Interpreters/QueryNormalizer.h rename to src/Interpreters/QueryNormalizer.h diff --git a/dbms/Interpreters/QueryPriorities.h b/src/Interpreters/QueryPriorities.h similarity index 100% rename from dbms/Interpreters/QueryPriorities.h rename to src/Interpreters/QueryPriorities.h diff --git a/dbms/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp similarity index 100% rename from dbms/Interpreters/QueryThreadLog.cpp rename to src/Interpreters/QueryThreadLog.cpp diff --git a/dbms/Interpreters/QueryThreadLog.h b/src/Interpreters/QueryThreadLog.h similarity index 100% rename from dbms/Interpreters/QueryThreadLog.h rename to src/Interpreters/QueryThreadLog.h diff --git a/dbms/Interpreters/ReplaceQueryParameterVisitor.cpp b/src/Interpreters/ReplaceQueryParameterVisitor.cpp similarity index 100% rename from dbms/Interpreters/ReplaceQueryParameterVisitor.cpp rename to src/Interpreters/ReplaceQueryParameterVisitor.cpp diff --git a/dbms/Interpreters/ReplaceQueryParameterVisitor.h b/src/Interpreters/ReplaceQueryParameterVisitor.h similarity index 100% rename from dbms/Interpreters/ReplaceQueryParameterVisitor.h rename to src/Interpreters/ReplaceQueryParameterVisitor.h diff --git a/dbms/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp similarity index 100% rename from dbms/Interpreters/RequiredSourceColumnsVisitor.cpp rename to src/Interpreters/RequiredSourceColumnsVisitor.cpp diff --git a/dbms/Interpreters/RequiredSourceColumnsVisitor.h b/src/Interpreters/RequiredSourceColumnsVisitor.h similarity index 100% rename from dbms/Interpreters/RequiredSourceColumnsVisitor.h rename to src/Interpreters/RequiredSourceColumnsVisitor.h diff --git a/dbms/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp similarity index 100% rename from dbms/Interpreters/RowRefs.cpp rename to src/Interpreters/RowRefs.cpp diff --git a/dbms/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h similarity index 100% rename from dbms/Interpreters/RowRefs.h rename to src/Interpreters/RowRefs.h diff --git a/dbms/Interpreters/SelectQueryOptions.h b/src/Interpreters/SelectQueryOptions.h similarity index 100% rename from dbms/Interpreters/SelectQueryOptions.h rename to src/Interpreters/SelectQueryOptions.h diff --git a/dbms/Interpreters/Set.cpp b/src/Interpreters/Set.cpp similarity index 100% rename from dbms/Interpreters/Set.cpp rename to src/Interpreters/Set.cpp diff --git a/dbms/Interpreters/Set.h b/src/Interpreters/Set.h similarity index 100% rename from dbms/Interpreters/Set.h rename to src/Interpreters/Set.h diff --git a/dbms/Interpreters/SetVariants.cpp b/src/Interpreters/SetVariants.cpp similarity index 100% rename from dbms/Interpreters/SetVariants.cpp rename to src/Interpreters/SetVariants.cpp diff --git a/dbms/Interpreters/SetVariants.h b/src/Interpreters/SetVariants.h similarity index 100% rename from dbms/Interpreters/SetVariants.h rename to src/Interpreters/SetVariants.h diff --git a/dbms/Interpreters/StorageID.cpp b/src/Interpreters/StorageID.cpp similarity index 100% rename from dbms/Interpreters/StorageID.cpp rename to src/Interpreters/StorageID.cpp diff --git a/dbms/Interpreters/StorageID.h b/src/Interpreters/StorageID.h similarity index 100% rename from dbms/Interpreters/StorageID.h rename to src/Interpreters/StorageID.h diff --git a/dbms/Interpreters/SubqueryForSet.cpp b/src/Interpreters/SubqueryForSet.cpp similarity index 100% rename from dbms/Interpreters/SubqueryForSet.cpp rename to src/Interpreters/SubqueryForSet.cpp diff --git a/dbms/Interpreters/SubqueryForSet.h b/src/Interpreters/SubqueryForSet.h similarity index 100% rename from dbms/Interpreters/SubqueryForSet.h rename to src/Interpreters/SubqueryForSet.h diff --git a/dbms/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp similarity index 100% rename from dbms/Interpreters/SyntaxAnalyzer.cpp rename to src/Interpreters/SyntaxAnalyzer.cpp diff --git a/dbms/Interpreters/SyntaxAnalyzer.h b/src/Interpreters/SyntaxAnalyzer.h similarity index 100% rename from dbms/Interpreters/SyntaxAnalyzer.h rename to src/Interpreters/SyntaxAnalyzer.h diff --git a/dbms/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp similarity index 100% rename from dbms/Interpreters/SystemLog.cpp rename to src/Interpreters/SystemLog.cpp diff --git a/dbms/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h similarity index 100% rename from dbms/Interpreters/SystemLog.h rename to src/Interpreters/SystemLog.h diff --git a/dbms/Interpreters/TablesStatus.cpp b/src/Interpreters/TablesStatus.cpp similarity index 100% rename from dbms/Interpreters/TablesStatus.cpp rename to src/Interpreters/TablesStatus.cpp diff --git a/dbms/Interpreters/TablesStatus.h b/src/Interpreters/TablesStatus.h similarity index 100% rename from dbms/Interpreters/TablesStatus.h rename to src/Interpreters/TablesStatus.h diff --git a/dbms/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp similarity index 100% rename from dbms/Interpreters/TextLog.cpp rename to src/Interpreters/TextLog.cpp diff --git a/dbms/Interpreters/TextLog.h b/src/Interpreters/TextLog.h similarity index 100% rename from dbms/Interpreters/TextLog.h rename to src/Interpreters/TextLog.h diff --git a/dbms/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp similarity index 100% rename from dbms/Interpreters/ThreadStatusExt.cpp rename to src/Interpreters/ThreadStatusExt.cpp diff --git a/dbms/Interpreters/TraceLog.cpp b/src/Interpreters/TraceLog.cpp similarity index 100% rename from dbms/Interpreters/TraceLog.cpp rename to src/Interpreters/TraceLog.cpp diff --git a/dbms/Interpreters/TraceLog.h b/src/Interpreters/TraceLog.h similarity index 100% rename from dbms/Interpreters/TraceLog.h rename to src/Interpreters/TraceLog.h diff --git a/dbms/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp similarity index 100% rename from dbms/Interpreters/TranslateQualifiedNamesVisitor.cpp rename to src/Interpreters/TranslateQualifiedNamesVisitor.cpp diff --git a/dbms/Interpreters/TranslateQualifiedNamesVisitor.h b/src/Interpreters/TranslateQualifiedNamesVisitor.h similarity index 100% rename from dbms/Interpreters/TranslateQualifiedNamesVisitor.h rename to src/Interpreters/TranslateQualifiedNamesVisitor.h diff --git a/dbms/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp similarity index 100% rename from dbms/Interpreters/addMissingDefaults.cpp rename to src/Interpreters/addMissingDefaults.cpp diff --git a/dbms/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h similarity index 100% rename from dbms/Interpreters/addMissingDefaults.h rename to src/Interpreters/addMissingDefaults.h diff --git a/dbms/Interpreters/addTypeConversionToAST.cpp b/src/Interpreters/addTypeConversionToAST.cpp similarity index 100% rename from dbms/Interpreters/addTypeConversionToAST.cpp rename to src/Interpreters/addTypeConversionToAST.cpp diff --git a/dbms/Interpreters/addTypeConversionToAST.h b/src/Interpreters/addTypeConversionToAST.h similarity index 100% rename from dbms/Interpreters/addTypeConversionToAST.h rename to src/Interpreters/addTypeConversionToAST.h diff --git a/dbms/Interpreters/asof.h b/src/Interpreters/asof.h similarity index 100% rename from dbms/Interpreters/asof.h rename to src/Interpreters/asof.h diff --git a/dbms/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp similarity index 100% rename from dbms/Interpreters/castColumn.cpp rename to src/Interpreters/castColumn.cpp diff --git a/dbms/Interpreters/castColumn.h b/src/Interpreters/castColumn.h similarity index 100% rename from dbms/Interpreters/castColumn.h rename to src/Interpreters/castColumn.h diff --git a/dbms/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp similarity index 100% rename from dbms/Interpreters/convertFieldToType.cpp rename to src/Interpreters/convertFieldToType.cpp diff --git a/dbms/Interpreters/convertFieldToType.h b/src/Interpreters/convertFieldToType.h similarity index 100% rename from dbms/Interpreters/convertFieldToType.h rename to src/Interpreters/convertFieldToType.h diff --git a/dbms/Interpreters/createBlockSelector.cpp b/src/Interpreters/createBlockSelector.cpp similarity index 100% rename from dbms/Interpreters/createBlockSelector.cpp rename to src/Interpreters/createBlockSelector.cpp diff --git a/dbms/Interpreters/createBlockSelector.h b/src/Interpreters/createBlockSelector.h similarity index 100% rename from dbms/Interpreters/createBlockSelector.h rename to src/Interpreters/createBlockSelector.h diff --git a/dbms/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp similarity index 100% rename from dbms/Interpreters/evaluateConstantExpression.cpp rename to src/Interpreters/evaluateConstantExpression.cpp diff --git a/dbms/Interpreters/evaluateConstantExpression.h b/src/Interpreters/evaluateConstantExpression.h similarity index 100% rename from dbms/Interpreters/evaluateConstantExpression.h rename to src/Interpreters/evaluateConstantExpression.h diff --git a/dbms/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp similarity index 100% rename from dbms/Interpreters/executeQuery.cpp rename to src/Interpreters/executeQuery.cpp diff --git a/dbms/Interpreters/executeQuery.h b/src/Interpreters/executeQuery.h similarity index 100% rename from dbms/Interpreters/executeQuery.h rename to src/Interpreters/executeQuery.h diff --git a/dbms/Interpreters/getClusterName.cpp b/src/Interpreters/getClusterName.cpp similarity index 100% rename from dbms/Interpreters/getClusterName.cpp rename to src/Interpreters/getClusterName.cpp diff --git a/dbms/Interpreters/getClusterName.h b/src/Interpreters/getClusterName.h similarity index 100% rename from dbms/Interpreters/getClusterName.h rename to src/Interpreters/getClusterName.h diff --git a/dbms/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp similarity index 100% rename from dbms/Interpreters/getTableExpressions.cpp rename to src/Interpreters/getTableExpressions.cpp diff --git a/dbms/Interpreters/getTableExpressions.h b/src/Interpreters/getTableExpressions.h similarity index 100% rename from dbms/Interpreters/getTableExpressions.h rename to src/Interpreters/getTableExpressions.h diff --git a/dbms/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp similarity index 100% rename from dbms/Interpreters/inplaceBlockConversions.cpp rename to src/Interpreters/inplaceBlockConversions.cpp diff --git a/dbms/Interpreters/inplaceBlockConversions.h b/src/Interpreters/inplaceBlockConversions.h similarity index 100% rename from dbms/Interpreters/inplaceBlockConversions.h rename to src/Interpreters/inplaceBlockConversions.h diff --git a/dbms/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp similarity index 100% rename from dbms/Interpreters/interpretSubquery.cpp rename to src/Interpreters/interpretSubquery.cpp diff --git a/dbms/Interpreters/interpretSubquery.h b/src/Interpreters/interpretSubquery.h similarity index 100% rename from dbms/Interpreters/interpretSubquery.h rename to src/Interpreters/interpretSubquery.h diff --git a/dbms/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h similarity index 100% rename from dbms/Interpreters/joinDispatch.h rename to src/Interpreters/joinDispatch.h diff --git a/dbms/Interpreters/join_common.cpp b/src/Interpreters/join_common.cpp similarity index 100% rename from dbms/Interpreters/join_common.cpp rename to src/Interpreters/join_common.cpp diff --git a/dbms/Interpreters/join_common.h b/src/Interpreters/join_common.h similarity index 100% rename from dbms/Interpreters/join_common.h rename to src/Interpreters/join_common.h diff --git a/dbms/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp similarity index 100% rename from dbms/Interpreters/loadMetadata.cpp rename to src/Interpreters/loadMetadata.cpp diff --git a/dbms/Interpreters/loadMetadata.h b/src/Interpreters/loadMetadata.h similarity index 100% rename from dbms/Interpreters/loadMetadata.h rename to src/Interpreters/loadMetadata.h diff --git a/dbms/Interpreters/misc.h b/src/Interpreters/misc.h similarity index 100% rename from dbms/Interpreters/misc.h rename to src/Interpreters/misc.h diff --git a/dbms/Interpreters/sortBlock.cpp b/src/Interpreters/sortBlock.cpp similarity index 100% rename from dbms/Interpreters/sortBlock.cpp rename to src/Interpreters/sortBlock.cpp diff --git a/dbms/Interpreters/sortBlock.h b/src/Interpreters/sortBlock.h similarity index 100% rename from dbms/Interpreters/sortBlock.h rename to src/Interpreters/sortBlock.h diff --git a/dbms/Interpreters/tests/CMakeLists.txt b/src/Interpreters/tests/CMakeLists.txt similarity index 100% rename from dbms/Interpreters/tests/CMakeLists.txt rename to src/Interpreters/tests/CMakeLists.txt diff --git a/dbms/Interpreters/tests/aggregate.cpp b/src/Interpreters/tests/aggregate.cpp similarity index 100% rename from dbms/Interpreters/tests/aggregate.cpp rename to src/Interpreters/tests/aggregate.cpp diff --git a/dbms/Interpreters/tests/create_query.cpp b/src/Interpreters/tests/create_query.cpp similarity index 100% rename from dbms/Interpreters/tests/create_query.cpp rename to src/Interpreters/tests/create_query.cpp diff --git a/dbms/Interpreters/tests/expression.cpp b/src/Interpreters/tests/expression.cpp similarity index 100% rename from dbms/Interpreters/tests/expression.cpp rename to src/Interpreters/tests/expression.cpp diff --git a/dbms/Interpreters/tests/expression_analyzer.cpp b/src/Interpreters/tests/expression_analyzer.cpp similarity index 100% rename from dbms/Interpreters/tests/expression_analyzer.cpp rename to src/Interpreters/tests/expression_analyzer.cpp diff --git a/dbms/Interpreters/tests/gtest_cycle_aliases.cpp b/src/Interpreters/tests/gtest_cycle_aliases.cpp similarity index 100% rename from dbms/Interpreters/tests/gtest_cycle_aliases.cpp rename to src/Interpreters/tests/gtest_cycle_aliases.cpp diff --git a/dbms/Interpreters/tests/gtest_merge_tree_set_index.cpp b/src/Interpreters/tests/gtest_merge_tree_set_index.cpp similarity index 100% rename from dbms/Interpreters/tests/gtest_merge_tree_set_index.cpp rename to src/Interpreters/tests/gtest_merge_tree_set_index.cpp diff --git a/dbms/Interpreters/tests/hash_map.cpp b/src/Interpreters/tests/hash_map.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map.cpp rename to src/Interpreters/tests/hash_map.cpp diff --git a/dbms/Interpreters/tests/hash_map3.cpp b/src/Interpreters/tests/hash_map3.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map3.cpp rename to src/Interpreters/tests/hash_map3.cpp diff --git a/dbms/Interpreters/tests/hash_map_lookup.cpp b/src/Interpreters/tests/hash_map_lookup.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map_lookup.cpp rename to src/Interpreters/tests/hash_map_lookup.cpp diff --git a/dbms/Interpreters/tests/hash_map_string.cpp b/src/Interpreters/tests/hash_map_string.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map_string.cpp rename to src/Interpreters/tests/hash_map_string.cpp diff --git a/dbms/Interpreters/tests/hash_map_string_2.cpp b/src/Interpreters/tests/hash_map_string_2.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map_string_2.cpp rename to src/Interpreters/tests/hash_map_string_2.cpp diff --git a/dbms/Interpreters/tests/hash_map_string_3.cpp b/src/Interpreters/tests/hash_map_string_3.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map_string_3.cpp rename to src/Interpreters/tests/hash_map_string_3.cpp diff --git a/dbms/Interpreters/tests/hash_map_string_small.cpp b/src/Interpreters/tests/hash_map_string_small.cpp similarity index 100% rename from dbms/Interpreters/tests/hash_map_string_small.cpp rename to src/Interpreters/tests/hash_map_string_small.cpp diff --git a/dbms/Interpreters/tests/in_join_subqueries_preprocessor.cpp b/src/Interpreters/tests/in_join_subqueries_preprocessor.cpp similarity index 100% rename from dbms/Interpreters/tests/in_join_subqueries_preprocessor.cpp rename to src/Interpreters/tests/in_join_subqueries_preprocessor.cpp diff --git a/dbms/Interpreters/tests/internal_iotop.cpp b/src/Interpreters/tests/internal_iotop.cpp similarity index 100% rename from dbms/Interpreters/tests/internal_iotop.cpp rename to src/Interpreters/tests/internal_iotop.cpp diff --git a/dbms/Interpreters/tests/logical_expressions_optimizer.cpp b/src/Interpreters/tests/logical_expressions_optimizer.cpp similarity index 100% rename from dbms/Interpreters/tests/logical_expressions_optimizer.cpp rename to src/Interpreters/tests/logical_expressions_optimizer.cpp diff --git a/dbms/Interpreters/tests/select_query.cpp b/src/Interpreters/tests/select_query.cpp similarity index 100% rename from dbms/Interpreters/tests/select_query.cpp rename to src/Interpreters/tests/select_query.cpp diff --git a/dbms/Interpreters/tests/string_hash_map.cpp b/src/Interpreters/tests/string_hash_map.cpp similarity index 100% rename from dbms/Interpreters/tests/string_hash_map.cpp rename to src/Interpreters/tests/string_hash_map.cpp diff --git a/dbms/Interpreters/tests/two_level_hash_map.cpp b/src/Interpreters/tests/two_level_hash_map.cpp similarity index 100% rename from dbms/Interpreters/tests/two_level_hash_map.cpp rename to src/Interpreters/tests/two_level_hash_map.cpp diff --git a/dbms/Interpreters/tests/users.cpp b/src/Interpreters/tests/users.cpp similarity index 100% rename from dbms/Interpreters/tests/users.cpp rename to src/Interpreters/tests/users.cpp diff --git a/dbms/NOTICE b/src/NOTICE similarity index 100% rename from dbms/NOTICE rename to src/NOTICE diff --git a/dbms/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp similarity index 100% rename from dbms/Parsers/ASTAlterQuery.cpp rename to src/Parsers/ASTAlterQuery.cpp diff --git a/dbms/Parsers/ASTAlterQuery.h b/src/Parsers/ASTAlterQuery.h similarity index 100% rename from dbms/Parsers/ASTAlterQuery.h rename to src/Parsers/ASTAlterQuery.h diff --git a/dbms/Parsers/ASTAssignment.h b/src/Parsers/ASTAssignment.h similarity index 100% rename from dbms/Parsers/ASTAssignment.h rename to src/Parsers/ASTAssignment.h diff --git a/dbms/Parsers/ASTAsterisk.cpp b/src/Parsers/ASTAsterisk.cpp similarity index 100% rename from dbms/Parsers/ASTAsterisk.cpp rename to src/Parsers/ASTAsterisk.cpp diff --git a/dbms/Parsers/ASTAsterisk.h b/src/Parsers/ASTAsterisk.h similarity index 100% rename from dbms/Parsers/ASTAsterisk.h rename to src/Parsers/ASTAsterisk.h diff --git a/dbms/Parsers/ASTCheckQuery.h b/src/Parsers/ASTCheckQuery.h similarity index 100% rename from dbms/Parsers/ASTCheckQuery.h rename to src/Parsers/ASTCheckQuery.h diff --git a/dbms/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp similarity index 100% rename from dbms/Parsers/ASTColumnDeclaration.cpp rename to src/Parsers/ASTColumnDeclaration.cpp diff --git a/dbms/Parsers/ASTColumnDeclaration.h b/src/Parsers/ASTColumnDeclaration.h similarity index 100% rename from dbms/Parsers/ASTColumnDeclaration.h rename to src/Parsers/ASTColumnDeclaration.h diff --git a/dbms/Parsers/ASTColumnsMatcher.cpp b/src/Parsers/ASTColumnsMatcher.cpp similarity index 100% rename from dbms/Parsers/ASTColumnsMatcher.cpp rename to src/Parsers/ASTColumnsMatcher.cpp diff --git a/dbms/Parsers/ASTColumnsMatcher.h b/src/Parsers/ASTColumnsMatcher.h similarity index 100% rename from dbms/Parsers/ASTColumnsMatcher.h rename to src/Parsers/ASTColumnsMatcher.h diff --git a/dbms/Parsers/ASTConstraintDeclaration.cpp b/src/Parsers/ASTConstraintDeclaration.cpp similarity index 100% rename from dbms/Parsers/ASTConstraintDeclaration.cpp rename to src/Parsers/ASTConstraintDeclaration.cpp diff --git a/dbms/Parsers/ASTConstraintDeclaration.h b/src/Parsers/ASTConstraintDeclaration.h similarity index 100% rename from dbms/Parsers/ASTConstraintDeclaration.h rename to src/Parsers/ASTConstraintDeclaration.h diff --git a/dbms/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateQuery.cpp rename to src/Parsers/ASTCreateQuery.cpp diff --git a/dbms/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateQuery.h rename to src/Parsers/ASTCreateQuery.h diff --git a/dbms/Parsers/ASTCreateQuotaQuery.cpp b/src/Parsers/ASTCreateQuotaQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateQuotaQuery.cpp rename to src/Parsers/ASTCreateQuotaQuery.cpp diff --git a/dbms/Parsers/ASTCreateQuotaQuery.h b/src/Parsers/ASTCreateQuotaQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateQuotaQuery.h rename to src/Parsers/ASTCreateQuotaQuery.h diff --git a/dbms/Parsers/ASTCreateRoleQuery.cpp b/src/Parsers/ASTCreateRoleQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateRoleQuery.cpp rename to src/Parsers/ASTCreateRoleQuery.cpp diff --git a/dbms/Parsers/ASTCreateRoleQuery.h b/src/Parsers/ASTCreateRoleQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateRoleQuery.h rename to src/Parsers/ASTCreateRoleQuery.h diff --git a/dbms/Parsers/ASTCreateRowPolicyQuery.cpp b/src/Parsers/ASTCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateRowPolicyQuery.cpp rename to src/Parsers/ASTCreateRowPolicyQuery.cpp diff --git a/dbms/Parsers/ASTCreateRowPolicyQuery.h b/src/Parsers/ASTCreateRowPolicyQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateRowPolicyQuery.h rename to src/Parsers/ASTCreateRowPolicyQuery.h diff --git a/dbms/Parsers/ASTCreateSettingsProfileQuery.cpp b/src/Parsers/ASTCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateSettingsProfileQuery.cpp rename to src/Parsers/ASTCreateSettingsProfileQuery.cpp diff --git a/dbms/Parsers/ASTCreateSettingsProfileQuery.h b/src/Parsers/ASTCreateSettingsProfileQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateSettingsProfileQuery.h rename to src/Parsers/ASTCreateSettingsProfileQuery.h diff --git a/dbms/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp similarity index 100% rename from dbms/Parsers/ASTCreateUserQuery.cpp rename to src/Parsers/ASTCreateUserQuery.cpp diff --git a/dbms/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h similarity index 100% rename from dbms/Parsers/ASTCreateUserQuery.h rename to src/Parsers/ASTCreateUserQuery.h diff --git a/dbms/Parsers/ASTDictionary.cpp b/src/Parsers/ASTDictionary.cpp similarity index 100% rename from dbms/Parsers/ASTDictionary.cpp rename to src/Parsers/ASTDictionary.cpp diff --git a/dbms/Parsers/ASTDictionary.h b/src/Parsers/ASTDictionary.h similarity index 100% rename from dbms/Parsers/ASTDictionary.h rename to src/Parsers/ASTDictionary.h diff --git a/dbms/Parsers/ASTDictionaryAttributeDeclaration.cpp b/src/Parsers/ASTDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/Parsers/ASTDictionaryAttributeDeclaration.cpp rename to src/Parsers/ASTDictionaryAttributeDeclaration.cpp diff --git a/dbms/Parsers/ASTDictionaryAttributeDeclaration.h b/src/Parsers/ASTDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/Parsers/ASTDictionaryAttributeDeclaration.h rename to src/Parsers/ASTDictionaryAttributeDeclaration.h diff --git a/dbms/Parsers/ASTDropAccessEntityQuery.cpp b/src/Parsers/ASTDropAccessEntityQuery.cpp similarity index 100% rename from dbms/Parsers/ASTDropAccessEntityQuery.cpp rename to src/Parsers/ASTDropAccessEntityQuery.cpp diff --git a/dbms/Parsers/ASTDropAccessEntityQuery.h b/src/Parsers/ASTDropAccessEntityQuery.h similarity index 100% rename from dbms/Parsers/ASTDropAccessEntityQuery.h rename to src/Parsers/ASTDropAccessEntityQuery.h diff --git a/dbms/Parsers/ASTDropQuery.cpp b/src/Parsers/ASTDropQuery.cpp similarity index 100% rename from dbms/Parsers/ASTDropQuery.cpp rename to src/Parsers/ASTDropQuery.cpp diff --git a/dbms/Parsers/ASTDropQuery.h b/src/Parsers/ASTDropQuery.h similarity index 100% rename from dbms/Parsers/ASTDropQuery.h rename to src/Parsers/ASTDropQuery.h diff --git a/dbms/Parsers/ASTEnumElement.h b/src/Parsers/ASTEnumElement.h similarity index 100% rename from dbms/Parsers/ASTEnumElement.h rename to src/Parsers/ASTEnumElement.h diff --git a/dbms/Parsers/ASTExplainQuery.h b/src/Parsers/ASTExplainQuery.h similarity index 100% rename from dbms/Parsers/ASTExplainQuery.h rename to src/Parsers/ASTExplainQuery.h diff --git a/dbms/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp similarity index 100% rename from dbms/Parsers/ASTExpressionList.cpp rename to src/Parsers/ASTExpressionList.cpp diff --git a/dbms/Parsers/ASTExpressionList.h b/src/Parsers/ASTExpressionList.h similarity index 100% rename from dbms/Parsers/ASTExpressionList.h rename to src/Parsers/ASTExpressionList.h diff --git a/dbms/Parsers/ASTExtendedRoleSet.cpp b/src/Parsers/ASTExtendedRoleSet.cpp similarity index 100% rename from dbms/Parsers/ASTExtendedRoleSet.cpp rename to src/Parsers/ASTExtendedRoleSet.cpp diff --git a/dbms/Parsers/ASTExtendedRoleSet.h b/src/Parsers/ASTExtendedRoleSet.h similarity index 100% rename from dbms/Parsers/ASTExtendedRoleSet.h rename to src/Parsers/ASTExtendedRoleSet.h diff --git a/dbms/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp similarity index 100% rename from dbms/Parsers/ASTFunction.cpp rename to src/Parsers/ASTFunction.cpp diff --git a/dbms/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h similarity index 100% rename from dbms/Parsers/ASTFunction.h rename to src/Parsers/ASTFunction.h diff --git a/dbms/Parsers/ASTFunctionWithKeyValueArguments.cpp b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp similarity index 100% rename from dbms/Parsers/ASTFunctionWithKeyValueArguments.cpp rename to src/Parsers/ASTFunctionWithKeyValueArguments.cpp diff --git a/dbms/Parsers/ASTFunctionWithKeyValueArguments.h b/src/Parsers/ASTFunctionWithKeyValueArguments.h similarity index 100% rename from dbms/Parsers/ASTFunctionWithKeyValueArguments.h rename to src/Parsers/ASTFunctionWithKeyValueArguments.h diff --git a/dbms/Parsers/ASTGrantQuery.cpp b/src/Parsers/ASTGrantQuery.cpp similarity index 100% rename from dbms/Parsers/ASTGrantQuery.cpp rename to src/Parsers/ASTGrantQuery.cpp diff --git a/dbms/Parsers/ASTGrantQuery.h b/src/Parsers/ASTGrantQuery.h similarity index 100% rename from dbms/Parsers/ASTGrantQuery.h rename to src/Parsers/ASTGrantQuery.h diff --git a/dbms/Parsers/ASTIdentifier.cpp b/src/Parsers/ASTIdentifier.cpp similarity index 100% rename from dbms/Parsers/ASTIdentifier.cpp rename to src/Parsers/ASTIdentifier.cpp diff --git a/dbms/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h similarity index 100% rename from dbms/Parsers/ASTIdentifier.h rename to src/Parsers/ASTIdentifier.h diff --git a/dbms/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h similarity index 100% rename from dbms/Parsers/ASTIndexDeclaration.h rename to src/Parsers/ASTIndexDeclaration.h diff --git a/dbms/Parsers/ASTInsertQuery.cpp b/src/Parsers/ASTInsertQuery.cpp similarity index 100% rename from dbms/Parsers/ASTInsertQuery.cpp rename to src/Parsers/ASTInsertQuery.cpp diff --git a/dbms/Parsers/ASTInsertQuery.h b/src/Parsers/ASTInsertQuery.h similarity index 100% rename from dbms/Parsers/ASTInsertQuery.h rename to src/Parsers/ASTInsertQuery.h diff --git a/dbms/Parsers/ASTKillQueryQuery.cpp b/src/Parsers/ASTKillQueryQuery.cpp similarity index 100% rename from dbms/Parsers/ASTKillQueryQuery.cpp rename to src/Parsers/ASTKillQueryQuery.cpp diff --git a/dbms/Parsers/ASTKillQueryQuery.h b/src/Parsers/ASTKillQueryQuery.h similarity index 100% rename from dbms/Parsers/ASTKillQueryQuery.h rename to src/Parsers/ASTKillQueryQuery.h diff --git a/dbms/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp similarity index 100% rename from dbms/Parsers/ASTLiteral.cpp rename to src/Parsers/ASTLiteral.cpp diff --git a/dbms/Parsers/ASTLiteral.h b/src/Parsers/ASTLiteral.h similarity index 100% rename from dbms/Parsers/ASTLiteral.h rename to src/Parsers/ASTLiteral.h diff --git a/dbms/Parsers/ASTNameTypePair.h b/src/Parsers/ASTNameTypePair.h similarity index 100% rename from dbms/Parsers/ASTNameTypePair.h rename to src/Parsers/ASTNameTypePair.h diff --git a/dbms/Parsers/ASTOptimizeQuery.cpp b/src/Parsers/ASTOptimizeQuery.cpp similarity index 100% rename from dbms/Parsers/ASTOptimizeQuery.cpp rename to src/Parsers/ASTOptimizeQuery.cpp diff --git a/dbms/Parsers/ASTOptimizeQuery.h b/src/Parsers/ASTOptimizeQuery.h similarity index 100% rename from dbms/Parsers/ASTOptimizeQuery.h rename to src/Parsers/ASTOptimizeQuery.h diff --git a/dbms/Parsers/ASTOrderByElement.cpp b/src/Parsers/ASTOrderByElement.cpp similarity index 100% rename from dbms/Parsers/ASTOrderByElement.cpp rename to src/Parsers/ASTOrderByElement.cpp diff --git a/dbms/Parsers/ASTOrderByElement.h b/src/Parsers/ASTOrderByElement.h similarity index 100% rename from dbms/Parsers/ASTOrderByElement.h rename to src/Parsers/ASTOrderByElement.h diff --git a/dbms/Parsers/ASTPartition.cpp b/src/Parsers/ASTPartition.cpp similarity index 100% rename from dbms/Parsers/ASTPartition.cpp rename to src/Parsers/ASTPartition.cpp diff --git a/dbms/Parsers/ASTPartition.h b/src/Parsers/ASTPartition.h similarity index 100% rename from dbms/Parsers/ASTPartition.h rename to src/Parsers/ASTPartition.h diff --git a/dbms/Parsers/ASTQualifiedAsterisk.cpp b/src/Parsers/ASTQualifiedAsterisk.cpp similarity index 100% rename from dbms/Parsers/ASTQualifiedAsterisk.cpp rename to src/Parsers/ASTQualifiedAsterisk.cpp diff --git a/dbms/Parsers/ASTQualifiedAsterisk.h b/src/Parsers/ASTQualifiedAsterisk.h similarity index 100% rename from dbms/Parsers/ASTQualifiedAsterisk.h rename to src/Parsers/ASTQualifiedAsterisk.h diff --git a/dbms/Parsers/ASTQueryParameter.cpp b/src/Parsers/ASTQueryParameter.cpp similarity index 100% rename from dbms/Parsers/ASTQueryParameter.cpp rename to src/Parsers/ASTQueryParameter.cpp diff --git a/dbms/Parsers/ASTQueryParameter.h b/src/Parsers/ASTQueryParameter.h similarity index 100% rename from dbms/Parsers/ASTQueryParameter.h rename to src/Parsers/ASTQueryParameter.h diff --git a/dbms/Parsers/ASTQueryWithOnCluster.cpp b/src/Parsers/ASTQueryWithOnCluster.cpp similarity index 100% rename from dbms/Parsers/ASTQueryWithOnCluster.cpp rename to src/Parsers/ASTQueryWithOnCluster.cpp diff --git a/dbms/Parsers/ASTQueryWithOnCluster.h b/src/Parsers/ASTQueryWithOnCluster.h similarity index 100% rename from dbms/Parsers/ASTQueryWithOnCluster.h rename to src/Parsers/ASTQueryWithOnCluster.h diff --git a/dbms/Parsers/ASTQueryWithOutput.cpp b/src/Parsers/ASTQueryWithOutput.cpp similarity index 100% rename from dbms/Parsers/ASTQueryWithOutput.cpp rename to src/Parsers/ASTQueryWithOutput.cpp diff --git a/dbms/Parsers/ASTQueryWithOutput.h b/src/Parsers/ASTQueryWithOutput.h similarity index 100% rename from dbms/Parsers/ASTQueryWithOutput.h rename to src/Parsers/ASTQueryWithOutput.h diff --git a/dbms/Parsers/ASTQueryWithTableAndOutput.cpp b/src/Parsers/ASTQueryWithTableAndOutput.cpp similarity index 100% rename from dbms/Parsers/ASTQueryWithTableAndOutput.cpp rename to src/Parsers/ASTQueryWithTableAndOutput.cpp diff --git a/dbms/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h similarity index 100% rename from dbms/Parsers/ASTQueryWithTableAndOutput.h rename to src/Parsers/ASTQueryWithTableAndOutput.h diff --git a/dbms/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h similarity index 100% rename from dbms/Parsers/ASTRenameQuery.h rename to src/Parsers/ASTRenameQuery.h diff --git a/dbms/Parsers/ASTSampleRatio.cpp b/src/Parsers/ASTSampleRatio.cpp similarity index 100% rename from dbms/Parsers/ASTSampleRatio.cpp rename to src/Parsers/ASTSampleRatio.cpp diff --git a/dbms/Parsers/ASTSampleRatio.h b/src/Parsers/ASTSampleRatio.h similarity index 100% rename from dbms/Parsers/ASTSampleRatio.h rename to src/Parsers/ASTSampleRatio.h diff --git a/dbms/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp similarity index 100% rename from dbms/Parsers/ASTSelectQuery.cpp rename to src/Parsers/ASTSelectQuery.cpp diff --git a/dbms/Parsers/ASTSelectQuery.h b/src/Parsers/ASTSelectQuery.h similarity index 100% rename from dbms/Parsers/ASTSelectQuery.h rename to src/Parsers/ASTSelectQuery.h diff --git a/dbms/Parsers/ASTSelectWithUnionQuery.cpp b/src/Parsers/ASTSelectWithUnionQuery.cpp similarity index 100% rename from dbms/Parsers/ASTSelectWithUnionQuery.cpp rename to src/Parsers/ASTSelectWithUnionQuery.cpp diff --git a/dbms/Parsers/ASTSelectWithUnionQuery.h b/src/Parsers/ASTSelectWithUnionQuery.h similarity index 100% rename from dbms/Parsers/ASTSelectWithUnionQuery.h rename to src/Parsers/ASTSelectWithUnionQuery.h diff --git a/dbms/Parsers/ASTSetQuery.h b/src/Parsers/ASTSetQuery.h similarity index 100% rename from dbms/Parsers/ASTSetQuery.h rename to src/Parsers/ASTSetQuery.h diff --git a/dbms/Parsers/ASTSetRoleQuery.cpp b/src/Parsers/ASTSetRoleQuery.cpp similarity index 100% rename from dbms/Parsers/ASTSetRoleQuery.cpp rename to src/Parsers/ASTSetRoleQuery.cpp diff --git a/dbms/Parsers/ASTSetRoleQuery.h b/src/Parsers/ASTSetRoleQuery.h similarity index 100% rename from dbms/Parsers/ASTSetRoleQuery.h rename to src/Parsers/ASTSetRoleQuery.h diff --git a/dbms/Parsers/ASTSettingsProfileElement.cpp b/src/Parsers/ASTSettingsProfileElement.cpp similarity index 100% rename from dbms/Parsers/ASTSettingsProfileElement.cpp rename to src/Parsers/ASTSettingsProfileElement.cpp diff --git a/dbms/Parsers/ASTSettingsProfileElement.h b/src/Parsers/ASTSettingsProfileElement.h similarity index 100% rename from dbms/Parsers/ASTSettingsProfileElement.h rename to src/Parsers/ASTSettingsProfileElement.h diff --git a/dbms/Parsers/ASTShowCreateAccessEntityQuery.cpp b/src/Parsers/ASTShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/Parsers/ASTShowCreateAccessEntityQuery.cpp rename to src/Parsers/ASTShowCreateAccessEntityQuery.cpp diff --git a/dbms/Parsers/ASTShowCreateAccessEntityQuery.h b/src/Parsers/ASTShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/Parsers/ASTShowCreateAccessEntityQuery.h rename to src/Parsers/ASTShowCreateAccessEntityQuery.h diff --git a/dbms/Parsers/ASTShowGrantsQuery.cpp b/src/Parsers/ASTShowGrantsQuery.cpp similarity index 100% rename from dbms/Parsers/ASTShowGrantsQuery.cpp rename to src/Parsers/ASTShowGrantsQuery.cpp diff --git a/dbms/Parsers/ASTShowGrantsQuery.h b/src/Parsers/ASTShowGrantsQuery.h similarity index 100% rename from dbms/Parsers/ASTShowGrantsQuery.h rename to src/Parsers/ASTShowGrantsQuery.h diff --git a/dbms/Parsers/ASTShowProcesslistQuery.h b/src/Parsers/ASTShowProcesslistQuery.h similarity index 100% rename from dbms/Parsers/ASTShowProcesslistQuery.h rename to src/Parsers/ASTShowProcesslistQuery.h diff --git a/dbms/Parsers/ASTShowQuotasQuery.cpp b/src/Parsers/ASTShowQuotasQuery.cpp similarity index 100% rename from dbms/Parsers/ASTShowQuotasQuery.cpp rename to src/Parsers/ASTShowQuotasQuery.cpp diff --git a/dbms/Parsers/ASTShowQuotasQuery.h b/src/Parsers/ASTShowQuotasQuery.h similarity index 100% rename from dbms/Parsers/ASTShowQuotasQuery.h rename to src/Parsers/ASTShowQuotasQuery.h diff --git a/dbms/Parsers/ASTShowRowPoliciesQuery.cpp b/src/Parsers/ASTShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/Parsers/ASTShowRowPoliciesQuery.cpp rename to src/Parsers/ASTShowRowPoliciesQuery.cpp diff --git a/dbms/Parsers/ASTShowRowPoliciesQuery.h b/src/Parsers/ASTShowRowPoliciesQuery.h similarity index 100% rename from dbms/Parsers/ASTShowRowPoliciesQuery.h rename to src/Parsers/ASTShowRowPoliciesQuery.h diff --git a/dbms/Parsers/ASTShowTablesQuery.cpp b/src/Parsers/ASTShowTablesQuery.cpp similarity index 100% rename from dbms/Parsers/ASTShowTablesQuery.cpp rename to src/Parsers/ASTShowTablesQuery.cpp diff --git a/dbms/Parsers/ASTShowTablesQuery.h b/src/Parsers/ASTShowTablesQuery.h similarity index 100% rename from dbms/Parsers/ASTShowTablesQuery.h rename to src/Parsers/ASTShowTablesQuery.h diff --git a/dbms/Parsers/ASTSubquery.cpp b/src/Parsers/ASTSubquery.cpp similarity index 100% rename from dbms/Parsers/ASTSubquery.cpp rename to src/Parsers/ASTSubquery.cpp diff --git a/dbms/Parsers/ASTSubquery.h b/src/Parsers/ASTSubquery.h similarity index 100% rename from dbms/Parsers/ASTSubquery.h rename to src/Parsers/ASTSubquery.h diff --git a/dbms/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp similarity index 100% rename from dbms/Parsers/ASTSystemQuery.cpp rename to src/Parsers/ASTSystemQuery.cpp diff --git a/dbms/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h similarity index 100% rename from dbms/Parsers/ASTSystemQuery.h rename to src/Parsers/ASTSystemQuery.h diff --git a/dbms/Parsers/ASTTTLElement.cpp b/src/Parsers/ASTTTLElement.cpp similarity index 100% rename from dbms/Parsers/ASTTTLElement.cpp rename to src/Parsers/ASTTTLElement.cpp diff --git a/dbms/Parsers/ASTTTLElement.h b/src/Parsers/ASTTTLElement.h similarity index 100% rename from dbms/Parsers/ASTTTLElement.h rename to src/Parsers/ASTTTLElement.h diff --git a/dbms/Parsers/ASTTablesInSelectQuery.cpp b/src/Parsers/ASTTablesInSelectQuery.cpp similarity index 100% rename from dbms/Parsers/ASTTablesInSelectQuery.cpp rename to src/Parsers/ASTTablesInSelectQuery.cpp diff --git a/dbms/Parsers/ASTTablesInSelectQuery.h b/src/Parsers/ASTTablesInSelectQuery.h similarity index 100% rename from dbms/Parsers/ASTTablesInSelectQuery.h rename to src/Parsers/ASTTablesInSelectQuery.h diff --git a/dbms/Parsers/ASTUseQuery.h b/src/Parsers/ASTUseQuery.h similarity index 100% rename from dbms/Parsers/ASTUseQuery.h rename to src/Parsers/ASTUseQuery.h diff --git a/dbms/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h similarity index 100% rename from dbms/Parsers/ASTWatchQuery.h rename to src/Parsers/ASTWatchQuery.h diff --git a/dbms/Parsers/ASTWithAlias.cpp b/src/Parsers/ASTWithAlias.cpp similarity index 100% rename from dbms/Parsers/ASTWithAlias.cpp rename to src/Parsers/ASTWithAlias.cpp diff --git a/dbms/Parsers/ASTWithAlias.h b/src/Parsers/ASTWithAlias.h similarity index 100% rename from dbms/Parsers/ASTWithAlias.h rename to src/Parsers/ASTWithAlias.h diff --git a/dbms/Parsers/CMakeLists.txt b/src/Parsers/CMakeLists.txt similarity index 88% rename from dbms/Parsers/CMakeLists.txt rename to src/Parsers/CMakeLists.txt index 6424cdfe9ea..6e3ab9decb7 100644 --- a/dbms/Parsers/CMakeLists.txt +++ b/src/Parsers/CMakeLists.txt @@ -5,7 +5,7 @@ target_link_libraries(clickhouse_parsers PUBLIC clickhouse_common_io) target_include_directories(clickhouse_parsers PUBLIC ${DBMS_INCLUDE_DIR}) if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/Parsers/iostream_debug_helpers.h") + set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Parsers/iostream_debug_helpers.h") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") endif () diff --git a/dbms/Parsers/CommonParsers.cpp b/src/Parsers/CommonParsers.cpp similarity index 100% rename from dbms/Parsers/CommonParsers.cpp rename to src/Parsers/CommonParsers.cpp diff --git a/dbms/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h similarity index 100% rename from dbms/Parsers/CommonParsers.h rename to src/Parsers/CommonParsers.h diff --git a/dbms/Parsers/DumpASTNode.h b/src/Parsers/DumpASTNode.h similarity index 100% rename from dbms/Parsers/DumpASTNode.h rename to src/Parsers/DumpASTNode.h diff --git a/dbms/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp similarity index 100% rename from dbms/Parsers/ExpressionElementParsers.cpp rename to src/Parsers/ExpressionElementParsers.cpp diff --git a/dbms/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h similarity index 100% rename from dbms/Parsers/ExpressionElementParsers.h rename to src/Parsers/ExpressionElementParsers.h diff --git a/dbms/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp similarity index 100% rename from dbms/Parsers/ExpressionListParsers.cpp rename to src/Parsers/ExpressionListParsers.cpp diff --git a/dbms/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h similarity index 100% rename from dbms/Parsers/ExpressionListParsers.h rename to src/Parsers/ExpressionListParsers.h diff --git a/dbms/Parsers/IAST.cpp b/src/Parsers/IAST.cpp similarity index 100% rename from dbms/Parsers/IAST.cpp rename to src/Parsers/IAST.cpp diff --git a/dbms/Parsers/IAST.h b/src/Parsers/IAST.h similarity index 100% rename from dbms/Parsers/IAST.h rename to src/Parsers/IAST.h diff --git a/dbms/Parsers/IAST_fwd.h b/src/Parsers/IAST_fwd.h similarity index 100% rename from dbms/Parsers/IAST_fwd.h rename to src/Parsers/IAST_fwd.h diff --git a/dbms/Parsers/IParser.h b/src/Parsers/IParser.h similarity index 100% rename from dbms/Parsers/IParser.h rename to src/Parsers/IParser.h diff --git a/dbms/Parsers/IParserBase.cpp b/src/Parsers/IParserBase.cpp similarity index 100% rename from dbms/Parsers/IParserBase.cpp rename to src/Parsers/IParserBase.cpp diff --git a/dbms/Parsers/IParserBase.h b/src/Parsers/IParserBase.h similarity index 100% rename from dbms/Parsers/IParserBase.h rename to src/Parsers/IParserBase.h diff --git a/dbms/Parsers/IdentifierQuotingStyle.h b/src/Parsers/IdentifierQuotingStyle.h similarity index 100% rename from dbms/Parsers/IdentifierQuotingStyle.h rename to src/Parsers/IdentifierQuotingStyle.h diff --git a/dbms/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp similarity index 100% rename from dbms/Parsers/Lexer.cpp rename to src/Parsers/Lexer.cpp diff --git a/dbms/Parsers/Lexer.h b/src/Parsers/Lexer.h similarity index 100% rename from dbms/Parsers/Lexer.h rename to src/Parsers/Lexer.h diff --git a/dbms/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp similarity index 100% rename from dbms/Parsers/ParserAlterQuery.cpp rename to src/Parsers/ParserAlterQuery.cpp diff --git a/dbms/Parsers/ParserAlterQuery.h b/src/Parsers/ParserAlterQuery.h similarity index 100% rename from dbms/Parsers/ParserAlterQuery.h rename to src/Parsers/ParserAlterQuery.h diff --git a/dbms/Parsers/ParserCase.cpp b/src/Parsers/ParserCase.cpp similarity index 100% rename from dbms/Parsers/ParserCase.cpp rename to src/Parsers/ParserCase.cpp diff --git a/dbms/Parsers/ParserCase.h b/src/Parsers/ParserCase.h similarity index 100% rename from dbms/Parsers/ParserCase.h rename to src/Parsers/ParserCase.h diff --git a/dbms/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCheckQuery.cpp rename to src/Parsers/ParserCheckQuery.cpp diff --git a/dbms/Parsers/ParserCheckQuery.h b/src/Parsers/ParserCheckQuery.h similarity index 100% rename from dbms/Parsers/ParserCheckQuery.h rename to src/Parsers/ParserCheckQuery.h diff --git a/dbms/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateQuery.cpp rename to src/Parsers/ParserCreateQuery.cpp diff --git a/dbms/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateQuery.h rename to src/Parsers/ParserCreateQuery.h diff --git a/dbms/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateQuotaQuery.cpp rename to src/Parsers/ParserCreateQuotaQuery.cpp diff --git a/dbms/Parsers/ParserCreateQuotaQuery.h b/src/Parsers/ParserCreateQuotaQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateQuotaQuery.h rename to src/Parsers/ParserCreateQuotaQuery.h diff --git a/dbms/Parsers/ParserCreateRoleQuery.cpp b/src/Parsers/ParserCreateRoleQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateRoleQuery.cpp rename to src/Parsers/ParserCreateRoleQuery.cpp diff --git a/dbms/Parsers/ParserCreateRoleQuery.h b/src/Parsers/ParserCreateRoleQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateRoleQuery.h rename to src/Parsers/ParserCreateRoleQuery.h diff --git a/dbms/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateRowPolicyQuery.cpp rename to src/Parsers/ParserCreateRowPolicyQuery.cpp diff --git a/dbms/Parsers/ParserCreateRowPolicyQuery.h b/src/Parsers/ParserCreateRowPolicyQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateRowPolicyQuery.h rename to src/Parsers/ParserCreateRowPolicyQuery.h diff --git a/dbms/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateSettingsProfileQuery.cpp rename to src/Parsers/ParserCreateSettingsProfileQuery.cpp diff --git a/dbms/Parsers/ParserCreateSettingsProfileQuery.h b/src/Parsers/ParserCreateSettingsProfileQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateSettingsProfileQuery.h rename to src/Parsers/ParserCreateSettingsProfileQuery.h diff --git a/dbms/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp similarity index 100% rename from dbms/Parsers/ParserCreateUserQuery.cpp rename to src/Parsers/ParserCreateUserQuery.cpp diff --git a/dbms/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h similarity index 100% rename from dbms/Parsers/ParserCreateUserQuery.h rename to src/Parsers/ParserCreateUserQuery.h diff --git a/dbms/Parsers/ParserDescribeTableQuery.cpp b/src/Parsers/ParserDescribeTableQuery.cpp similarity index 100% rename from dbms/Parsers/ParserDescribeTableQuery.cpp rename to src/Parsers/ParserDescribeTableQuery.cpp diff --git a/dbms/Parsers/ParserDescribeTableQuery.h b/src/Parsers/ParserDescribeTableQuery.h similarity index 100% rename from dbms/Parsers/ParserDescribeTableQuery.h rename to src/Parsers/ParserDescribeTableQuery.h diff --git a/dbms/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp similarity index 100% rename from dbms/Parsers/ParserDictionary.cpp rename to src/Parsers/ParserDictionary.cpp diff --git a/dbms/Parsers/ParserDictionary.h b/src/Parsers/ParserDictionary.h similarity index 100% rename from dbms/Parsers/ParserDictionary.h rename to src/Parsers/ParserDictionary.h diff --git a/dbms/Parsers/ParserDictionaryAttributeDeclaration.cpp b/src/Parsers/ParserDictionaryAttributeDeclaration.cpp similarity index 100% rename from dbms/Parsers/ParserDictionaryAttributeDeclaration.cpp rename to src/Parsers/ParserDictionaryAttributeDeclaration.cpp diff --git a/dbms/Parsers/ParserDictionaryAttributeDeclaration.h b/src/Parsers/ParserDictionaryAttributeDeclaration.h similarity index 100% rename from dbms/Parsers/ParserDictionaryAttributeDeclaration.h rename to src/Parsers/ParserDictionaryAttributeDeclaration.h diff --git a/dbms/Parsers/ParserDropAccessEntityQuery.cpp b/src/Parsers/ParserDropAccessEntityQuery.cpp similarity index 100% rename from dbms/Parsers/ParserDropAccessEntityQuery.cpp rename to src/Parsers/ParserDropAccessEntityQuery.cpp diff --git a/dbms/Parsers/ParserDropAccessEntityQuery.h b/src/Parsers/ParserDropAccessEntityQuery.h similarity index 100% rename from dbms/Parsers/ParserDropAccessEntityQuery.h rename to src/Parsers/ParserDropAccessEntityQuery.h diff --git a/dbms/Parsers/ParserDropQuery.cpp b/src/Parsers/ParserDropQuery.cpp similarity index 100% rename from dbms/Parsers/ParserDropQuery.cpp rename to src/Parsers/ParserDropQuery.cpp diff --git a/dbms/Parsers/ParserDropQuery.h b/src/Parsers/ParserDropQuery.h similarity index 100% rename from dbms/Parsers/ParserDropQuery.h rename to src/Parsers/ParserDropQuery.h diff --git a/dbms/Parsers/ParserExtendedRoleSet.cpp b/src/Parsers/ParserExtendedRoleSet.cpp similarity index 100% rename from dbms/Parsers/ParserExtendedRoleSet.cpp rename to src/Parsers/ParserExtendedRoleSet.cpp diff --git a/dbms/Parsers/ParserExtendedRoleSet.h b/src/Parsers/ParserExtendedRoleSet.h similarity index 100% rename from dbms/Parsers/ParserExtendedRoleSet.h rename to src/Parsers/ParserExtendedRoleSet.h diff --git a/dbms/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp similarity index 100% rename from dbms/Parsers/ParserGrantQuery.cpp rename to src/Parsers/ParserGrantQuery.cpp diff --git a/dbms/Parsers/ParserGrantQuery.h b/src/Parsers/ParserGrantQuery.h similarity index 100% rename from dbms/Parsers/ParserGrantQuery.h rename to src/Parsers/ParserGrantQuery.h diff --git a/dbms/Parsers/ParserInsertQuery.cpp b/src/Parsers/ParserInsertQuery.cpp similarity index 100% rename from dbms/Parsers/ParserInsertQuery.cpp rename to src/Parsers/ParserInsertQuery.cpp diff --git a/dbms/Parsers/ParserInsertQuery.h b/src/Parsers/ParserInsertQuery.h similarity index 100% rename from dbms/Parsers/ParserInsertQuery.h rename to src/Parsers/ParserInsertQuery.h diff --git a/dbms/Parsers/ParserKillQueryQuery.cpp b/src/Parsers/ParserKillQueryQuery.cpp similarity index 100% rename from dbms/Parsers/ParserKillQueryQuery.cpp rename to src/Parsers/ParserKillQueryQuery.cpp diff --git a/dbms/Parsers/ParserKillQueryQuery.h b/src/Parsers/ParserKillQueryQuery.h similarity index 100% rename from dbms/Parsers/ParserKillQueryQuery.h rename to src/Parsers/ParserKillQueryQuery.h diff --git a/dbms/Parsers/ParserOptimizeQuery.cpp b/src/Parsers/ParserOptimizeQuery.cpp similarity index 100% rename from dbms/Parsers/ParserOptimizeQuery.cpp rename to src/Parsers/ParserOptimizeQuery.cpp diff --git a/dbms/Parsers/ParserOptimizeQuery.h b/src/Parsers/ParserOptimizeQuery.h similarity index 100% rename from dbms/Parsers/ParserOptimizeQuery.h rename to src/Parsers/ParserOptimizeQuery.h diff --git a/dbms/Parsers/ParserPartition.cpp b/src/Parsers/ParserPartition.cpp similarity index 100% rename from dbms/Parsers/ParserPartition.cpp rename to src/Parsers/ParserPartition.cpp diff --git a/dbms/Parsers/ParserPartition.h b/src/Parsers/ParserPartition.h similarity index 100% rename from dbms/Parsers/ParserPartition.h rename to src/Parsers/ParserPartition.h diff --git a/dbms/Parsers/ParserQuery.cpp b/src/Parsers/ParserQuery.cpp similarity index 100% rename from dbms/Parsers/ParserQuery.cpp rename to src/Parsers/ParserQuery.cpp diff --git a/dbms/Parsers/ParserQuery.h b/src/Parsers/ParserQuery.h similarity index 100% rename from dbms/Parsers/ParserQuery.h rename to src/Parsers/ParserQuery.h diff --git a/dbms/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp similarity index 100% rename from dbms/Parsers/ParserQueryWithOutput.cpp rename to src/Parsers/ParserQueryWithOutput.cpp diff --git a/dbms/Parsers/ParserQueryWithOutput.h b/src/Parsers/ParserQueryWithOutput.h similarity index 100% rename from dbms/Parsers/ParserQueryWithOutput.h rename to src/Parsers/ParserQueryWithOutput.h diff --git a/dbms/Parsers/ParserRenameQuery.cpp b/src/Parsers/ParserRenameQuery.cpp similarity index 100% rename from dbms/Parsers/ParserRenameQuery.cpp rename to src/Parsers/ParserRenameQuery.cpp diff --git a/dbms/Parsers/ParserRenameQuery.h b/src/Parsers/ParserRenameQuery.h similarity index 100% rename from dbms/Parsers/ParserRenameQuery.h rename to src/Parsers/ParserRenameQuery.h diff --git a/dbms/Parsers/ParserSampleRatio.cpp b/src/Parsers/ParserSampleRatio.cpp similarity index 100% rename from dbms/Parsers/ParserSampleRatio.cpp rename to src/Parsers/ParserSampleRatio.cpp diff --git a/dbms/Parsers/ParserSampleRatio.h b/src/Parsers/ParserSampleRatio.h similarity index 100% rename from dbms/Parsers/ParserSampleRatio.h rename to src/Parsers/ParserSampleRatio.h diff --git a/dbms/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp similarity index 100% rename from dbms/Parsers/ParserSelectQuery.cpp rename to src/Parsers/ParserSelectQuery.cpp diff --git a/dbms/Parsers/ParserSelectQuery.h b/src/Parsers/ParserSelectQuery.h similarity index 100% rename from dbms/Parsers/ParserSelectQuery.h rename to src/Parsers/ParserSelectQuery.h diff --git a/dbms/Parsers/ParserSelectWithUnionQuery.cpp b/src/Parsers/ParserSelectWithUnionQuery.cpp similarity index 100% rename from dbms/Parsers/ParserSelectWithUnionQuery.cpp rename to src/Parsers/ParserSelectWithUnionQuery.cpp diff --git a/dbms/Parsers/ParserSelectWithUnionQuery.h b/src/Parsers/ParserSelectWithUnionQuery.h similarity index 100% rename from dbms/Parsers/ParserSelectWithUnionQuery.h rename to src/Parsers/ParserSelectWithUnionQuery.h diff --git a/dbms/Parsers/ParserSetQuery.cpp b/src/Parsers/ParserSetQuery.cpp similarity index 100% rename from dbms/Parsers/ParserSetQuery.cpp rename to src/Parsers/ParserSetQuery.cpp diff --git a/dbms/Parsers/ParserSetQuery.h b/src/Parsers/ParserSetQuery.h similarity index 100% rename from dbms/Parsers/ParserSetQuery.h rename to src/Parsers/ParserSetQuery.h diff --git a/dbms/Parsers/ParserSetRoleQuery.cpp b/src/Parsers/ParserSetRoleQuery.cpp similarity index 100% rename from dbms/Parsers/ParserSetRoleQuery.cpp rename to src/Parsers/ParserSetRoleQuery.cpp diff --git a/dbms/Parsers/ParserSetRoleQuery.h b/src/Parsers/ParserSetRoleQuery.h similarity index 100% rename from dbms/Parsers/ParserSetRoleQuery.h rename to src/Parsers/ParserSetRoleQuery.h diff --git a/dbms/Parsers/ParserSettingsProfileElement.cpp b/src/Parsers/ParserSettingsProfileElement.cpp similarity index 100% rename from dbms/Parsers/ParserSettingsProfileElement.cpp rename to src/Parsers/ParserSettingsProfileElement.cpp diff --git a/dbms/Parsers/ParserSettingsProfileElement.h b/src/Parsers/ParserSettingsProfileElement.h similarity index 100% rename from dbms/Parsers/ParserSettingsProfileElement.h rename to src/Parsers/ParserSettingsProfileElement.h diff --git a/dbms/Parsers/ParserShowCreateAccessEntityQuery.cpp b/src/Parsers/ParserShowCreateAccessEntityQuery.cpp similarity index 100% rename from dbms/Parsers/ParserShowCreateAccessEntityQuery.cpp rename to src/Parsers/ParserShowCreateAccessEntityQuery.cpp diff --git a/dbms/Parsers/ParserShowCreateAccessEntityQuery.h b/src/Parsers/ParserShowCreateAccessEntityQuery.h similarity index 100% rename from dbms/Parsers/ParserShowCreateAccessEntityQuery.h rename to src/Parsers/ParserShowCreateAccessEntityQuery.h diff --git a/dbms/Parsers/ParserShowGrantsQuery.cpp b/src/Parsers/ParserShowGrantsQuery.cpp similarity index 100% rename from dbms/Parsers/ParserShowGrantsQuery.cpp rename to src/Parsers/ParserShowGrantsQuery.cpp diff --git a/dbms/Parsers/ParserShowGrantsQuery.h b/src/Parsers/ParserShowGrantsQuery.h similarity index 100% rename from dbms/Parsers/ParserShowGrantsQuery.h rename to src/Parsers/ParserShowGrantsQuery.h diff --git a/dbms/Parsers/ParserShowProcesslistQuery.h b/src/Parsers/ParserShowProcesslistQuery.h similarity index 100% rename from dbms/Parsers/ParserShowProcesslistQuery.h rename to src/Parsers/ParserShowProcesslistQuery.h diff --git a/dbms/Parsers/ParserShowQuotasQuery.cpp b/src/Parsers/ParserShowQuotasQuery.cpp similarity index 100% rename from dbms/Parsers/ParserShowQuotasQuery.cpp rename to src/Parsers/ParserShowQuotasQuery.cpp diff --git a/dbms/Parsers/ParserShowQuotasQuery.h b/src/Parsers/ParserShowQuotasQuery.h similarity index 100% rename from dbms/Parsers/ParserShowQuotasQuery.h rename to src/Parsers/ParserShowQuotasQuery.h diff --git a/dbms/Parsers/ParserShowRowPoliciesQuery.cpp b/src/Parsers/ParserShowRowPoliciesQuery.cpp similarity index 100% rename from dbms/Parsers/ParserShowRowPoliciesQuery.cpp rename to src/Parsers/ParserShowRowPoliciesQuery.cpp diff --git a/dbms/Parsers/ParserShowRowPoliciesQuery.h b/src/Parsers/ParserShowRowPoliciesQuery.h similarity index 100% rename from dbms/Parsers/ParserShowRowPoliciesQuery.h rename to src/Parsers/ParserShowRowPoliciesQuery.h diff --git a/dbms/Parsers/ParserShowTablesQuery.cpp b/src/Parsers/ParserShowTablesQuery.cpp similarity index 100% rename from dbms/Parsers/ParserShowTablesQuery.cpp rename to src/Parsers/ParserShowTablesQuery.cpp diff --git a/dbms/Parsers/ParserShowTablesQuery.h b/src/Parsers/ParserShowTablesQuery.h similarity index 100% rename from dbms/Parsers/ParserShowTablesQuery.h rename to src/Parsers/ParserShowTablesQuery.h diff --git a/dbms/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp similarity index 100% rename from dbms/Parsers/ParserSystemQuery.cpp rename to src/Parsers/ParserSystemQuery.cpp diff --git a/dbms/Parsers/ParserSystemQuery.h b/src/Parsers/ParserSystemQuery.h similarity index 100% rename from dbms/Parsers/ParserSystemQuery.h rename to src/Parsers/ParserSystemQuery.h diff --git a/dbms/Parsers/ParserTablePropertiesQuery.cpp b/src/Parsers/ParserTablePropertiesQuery.cpp similarity index 100% rename from dbms/Parsers/ParserTablePropertiesQuery.cpp rename to src/Parsers/ParserTablePropertiesQuery.cpp diff --git a/dbms/Parsers/ParserTablePropertiesQuery.h b/src/Parsers/ParserTablePropertiesQuery.h similarity index 100% rename from dbms/Parsers/ParserTablePropertiesQuery.h rename to src/Parsers/ParserTablePropertiesQuery.h diff --git a/dbms/Parsers/ParserTablesInSelectQuery.cpp b/src/Parsers/ParserTablesInSelectQuery.cpp similarity index 100% rename from dbms/Parsers/ParserTablesInSelectQuery.cpp rename to src/Parsers/ParserTablesInSelectQuery.cpp diff --git a/dbms/Parsers/ParserTablesInSelectQuery.h b/src/Parsers/ParserTablesInSelectQuery.h similarity index 100% rename from dbms/Parsers/ParserTablesInSelectQuery.h rename to src/Parsers/ParserTablesInSelectQuery.h diff --git a/dbms/Parsers/ParserUnionQueryElement.cpp b/src/Parsers/ParserUnionQueryElement.cpp similarity index 100% rename from dbms/Parsers/ParserUnionQueryElement.cpp rename to src/Parsers/ParserUnionQueryElement.cpp diff --git a/dbms/Parsers/ParserUnionQueryElement.h b/src/Parsers/ParserUnionQueryElement.h similarity index 100% rename from dbms/Parsers/ParserUnionQueryElement.h rename to src/Parsers/ParserUnionQueryElement.h diff --git a/dbms/Parsers/ParserUseQuery.cpp b/src/Parsers/ParserUseQuery.cpp similarity index 100% rename from dbms/Parsers/ParserUseQuery.cpp rename to src/Parsers/ParserUseQuery.cpp diff --git a/dbms/Parsers/ParserUseQuery.h b/src/Parsers/ParserUseQuery.h similarity index 100% rename from dbms/Parsers/ParserUseQuery.h rename to src/Parsers/ParserUseQuery.h diff --git a/dbms/Parsers/ParserWatchQuery.cpp b/src/Parsers/ParserWatchQuery.cpp similarity index 100% rename from dbms/Parsers/ParserWatchQuery.cpp rename to src/Parsers/ParserWatchQuery.cpp diff --git a/dbms/Parsers/ParserWatchQuery.h b/src/Parsers/ParserWatchQuery.h similarity index 100% rename from dbms/Parsers/ParserWatchQuery.h rename to src/Parsers/ParserWatchQuery.h diff --git a/dbms/Parsers/StringRange.h b/src/Parsers/StringRange.h similarity index 100% rename from dbms/Parsers/StringRange.h rename to src/Parsers/StringRange.h diff --git a/dbms/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h similarity index 100% rename from dbms/Parsers/TablePropertiesQueriesASTs.h rename to src/Parsers/TablePropertiesQueriesASTs.h diff --git a/dbms/Parsers/TokenIterator.cpp b/src/Parsers/TokenIterator.cpp similarity index 100% rename from dbms/Parsers/TokenIterator.cpp rename to src/Parsers/TokenIterator.cpp diff --git a/dbms/Parsers/TokenIterator.h b/src/Parsers/TokenIterator.h similarity index 100% rename from dbms/Parsers/TokenIterator.h rename to src/Parsers/TokenIterator.h diff --git a/dbms/Parsers/formatAST.cpp b/src/Parsers/formatAST.cpp similarity index 100% rename from dbms/Parsers/formatAST.cpp rename to src/Parsers/formatAST.cpp diff --git a/dbms/Parsers/formatAST.h b/src/Parsers/formatAST.h similarity index 100% rename from dbms/Parsers/formatAST.h rename to src/Parsers/formatAST.h diff --git a/dbms/Parsers/iostream_debug_helpers.cpp b/src/Parsers/iostream_debug_helpers.cpp similarity index 100% rename from dbms/Parsers/iostream_debug_helpers.cpp rename to src/Parsers/iostream_debug_helpers.cpp diff --git a/dbms/Parsers/iostream_debug_helpers.h b/src/Parsers/iostream_debug_helpers.h similarity index 100% rename from dbms/Parsers/iostream_debug_helpers.h rename to src/Parsers/iostream_debug_helpers.h diff --git a/dbms/Parsers/parseDatabaseAndTableName.cpp b/src/Parsers/parseDatabaseAndTableName.cpp similarity index 100% rename from dbms/Parsers/parseDatabaseAndTableName.cpp rename to src/Parsers/parseDatabaseAndTableName.cpp diff --git a/dbms/Parsers/parseDatabaseAndTableName.h b/src/Parsers/parseDatabaseAndTableName.h similarity index 100% rename from dbms/Parsers/parseDatabaseAndTableName.h rename to src/Parsers/parseDatabaseAndTableName.h diff --git a/dbms/Parsers/parseIdentifierOrStringLiteral.cpp b/src/Parsers/parseIdentifierOrStringLiteral.cpp similarity index 100% rename from dbms/Parsers/parseIdentifierOrStringLiteral.cpp rename to src/Parsers/parseIdentifierOrStringLiteral.cpp diff --git a/dbms/Parsers/parseIdentifierOrStringLiteral.h b/src/Parsers/parseIdentifierOrStringLiteral.h similarity index 100% rename from dbms/Parsers/parseIdentifierOrStringLiteral.h rename to src/Parsers/parseIdentifierOrStringLiteral.h diff --git a/dbms/Parsers/parseIntervalKind.cpp b/src/Parsers/parseIntervalKind.cpp similarity index 100% rename from dbms/Parsers/parseIntervalKind.cpp rename to src/Parsers/parseIntervalKind.cpp diff --git a/dbms/Parsers/parseIntervalKind.h b/src/Parsers/parseIntervalKind.h similarity index 100% rename from dbms/Parsers/parseIntervalKind.h rename to src/Parsers/parseIntervalKind.h diff --git a/dbms/Parsers/parseQuery.cpp b/src/Parsers/parseQuery.cpp similarity index 100% rename from dbms/Parsers/parseQuery.cpp rename to src/Parsers/parseQuery.cpp diff --git a/dbms/Parsers/parseQuery.h b/src/Parsers/parseQuery.h similarity index 100% rename from dbms/Parsers/parseQuery.h rename to src/Parsers/parseQuery.h diff --git a/dbms/Parsers/parseUserName.cpp b/src/Parsers/parseUserName.cpp similarity index 100% rename from dbms/Parsers/parseUserName.cpp rename to src/Parsers/parseUserName.cpp diff --git a/dbms/Parsers/parseUserName.h b/src/Parsers/parseUserName.h similarity index 100% rename from dbms/Parsers/parseUserName.h rename to src/Parsers/parseUserName.h diff --git a/dbms/Parsers/queryToString.cpp b/src/Parsers/queryToString.cpp similarity index 100% rename from dbms/Parsers/queryToString.cpp rename to src/Parsers/queryToString.cpp diff --git a/dbms/Parsers/queryToString.h b/src/Parsers/queryToString.h similarity index 100% rename from dbms/Parsers/queryToString.h rename to src/Parsers/queryToString.h diff --git a/dbms/Parsers/tests/CMakeLists.txt b/src/Parsers/tests/CMakeLists.txt similarity index 100% rename from dbms/Parsers/tests/CMakeLists.txt rename to src/Parsers/tests/CMakeLists.txt diff --git a/dbms/Parsers/tests/create_parser.cpp b/src/Parsers/tests/create_parser.cpp similarity index 100% rename from dbms/Parsers/tests/create_parser.cpp rename to src/Parsers/tests/create_parser.cpp diff --git a/dbms/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp similarity index 100% rename from dbms/Parsers/tests/gtest_dictionary_parser.cpp rename to src/Parsers/tests/gtest_dictionary_parser.cpp diff --git a/dbms/Parsers/tests/lexer.cpp b/src/Parsers/tests/lexer.cpp similarity index 100% rename from dbms/Parsers/tests/lexer.cpp rename to src/Parsers/tests/lexer.cpp diff --git a/dbms/Parsers/tests/select_parser.cpp b/src/Parsers/tests/select_parser.cpp similarity index 100% rename from dbms/Parsers/tests/select_parser.cpp rename to src/Parsers/tests/select_parser.cpp diff --git a/dbms/Processors/CMakeLists.txt b/src/Processors/CMakeLists.txt similarity index 100% rename from dbms/Processors/CMakeLists.txt rename to src/Processors/CMakeLists.txt diff --git a/dbms/Processors/Chunk.cpp b/src/Processors/Chunk.cpp similarity index 100% rename from dbms/Processors/Chunk.cpp rename to src/Processors/Chunk.cpp diff --git a/dbms/Processors/Chunk.h b/src/Processors/Chunk.h similarity index 100% rename from dbms/Processors/Chunk.h rename to src/Processors/Chunk.h diff --git a/dbms/Processors/ConcatProcessor.cpp b/src/Processors/ConcatProcessor.cpp similarity index 100% rename from dbms/Processors/ConcatProcessor.cpp rename to src/Processors/ConcatProcessor.cpp diff --git a/dbms/Processors/ConcatProcessor.h b/src/Processors/ConcatProcessor.h similarity index 100% rename from dbms/Processors/ConcatProcessor.h rename to src/Processors/ConcatProcessor.h diff --git a/dbms/Processors/DelayedPortsProcessor.cpp b/src/Processors/DelayedPortsProcessor.cpp similarity index 100% rename from dbms/Processors/DelayedPortsProcessor.cpp rename to src/Processors/DelayedPortsProcessor.cpp diff --git a/dbms/Processors/DelayedPortsProcessor.h b/src/Processors/DelayedPortsProcessor.h similarity index 100% rename from dbms/Processors/DelayedPortsProcessor.h rename to src/Processors/DelayedPortsProcessor.h diff --git a/dbms/Processors/Executors/ParallelPipelineExecutor.cpp b/src/Processors/Executors/ParallelPipelineExecutor.cpp similarity index 100% rename from dbms/Processors/Executors/ParallelPipelineExecutor.cpp rename to src/Processors/Executors/ParallelPipelineExecutor.cpp diff --git a/dbms/Processors/Executors/ParallelPipelineExecutor.h b/src/Processors/Executors/ParallelPipelineExecutor.h similarity index 100% rename from dbms/Processors/Executors/ParallelPipelineExecutor.h rename to src/Processors/Executors/ParallelPipelineExecutor.h diff --git a/dbms/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp similarity index 100% rename from dbms/Processors/Executors/PipelineExecutor.cpp rename to src/Processors/Executors/PipelineExecutor.cpp diff --git a/dbms/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h similarity index 100% rename from dbms/Processors/Executors/PipelineExecutor.h rename to src/Processors/Executors/PipelineExecutor.h diff --git a/dbms/Processors/Executors/SequentialPipelineExecutor.cpp b/src/Processors/Executors/SequentialPipelineExecutor.cpp similarity index 100% rename from dbms/Processors/Executors/SequentialPipelineExecutor.cpp rename to src/Processors/Executors/SequentialPipelineExecutor.cpp diff --git a/dbms/Processors/Executors/SequentialPipelineExecutor.h b/src/Processors/Executors/SequentialPipelineExecutor.h similarity index 100% rename from dbms/Processors/Executors/SequentialPipelineExecutor.h rename to src/Processors/Executors/SequentialPipelineExecutor.h diff --git a/dbms/Processors/Executors/ThreadsQueue.h b/src/Processors/Executors/ThreadsQueue.h similarity index 100% rename from dbms/Processors/Executors/ThreadsQueue.h rename to src/Processors/Executors/ThreadsQueue.h diff --git a/dbms/Processors/Executors/TreeExecutorBlockInputStream.cpp b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp similarity index 100% rename from dbms/Processors/Executors/TreeExecutorBlockInputStream.cpp rename to src/Processors/Executors/TreeExecutorBlockInputStream.cpp diff --git a/dbms/Processors/Executors/TreeExecutorBlockInputStream.h b/src/Processors/Executors/TreeExecutorBlockInputStream.h similarity index 100% rename from dbms/Processors/Executors/TreeExecutorBlockInputStream.h rename to src/Processors/Executors/TreeExecutorBlockInputStream.h diff --git a/dbms/Processors/Executors/traverse.h b/src/Processors/Executors/traverse.h similarity index 100% rename from dbms/Processors/Executors/traverse.h rename to src/Processors/Executors/traverse.h diff --git a/dbms/Processors/ForkProcessor.cpp b/src/Processors/ForkProcessor.cpp similarity index 100% rename from dbms/Processors/ForkProcessor.cpp rename to src/Processors/ForkProcessor.cpp diff --git a/dbms/Processors/ForkProcessor.h b/src/Processors/ForkProcessor.h similarity index 100% rename from dbms/Processors/ForkProcessor.h rename to src/Processors/ForkProcessor.h diff --git a/dbms/Processors/Formats/IInputFormat.cpp b/src/Processors/Formats/IInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/IInputFormat.cpp rename to src/Processors/Formats/IInputFormat.cpp diff --git a/dbms/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h similarity index 100% rename from dbms/Processors/Formats/IInputFormat.h rename to src/Processors/Formats/IInputFormat.h diff --git a/dbms/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/IOutputFormat.cpp rename to src/Processors/Formats/IOutputFormat.cpp diff --git a/dbms/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/IOutputFormat.h rename to src/Processors/Formats/IOutputFormat.h diff --git a/dbms/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/IRowInputFormat.cpp rename to src/Processors/Formats/IRowInputFormat.cpp diff --git a/dbms/Processors/Formats/IRowInputFormat.h b/src/Processors/Formats/IRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/IRowInputFormat.h rename to src/Processors/Formats/IRowInputFormat.h diff --git a/dbms/Processors/Formats/IRowOutputFormat.cpp b/src/Processors/Formats/IRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/IRowOutputFormat.cpp rename to src/Processors/Formats/IRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/IRowOutputFormat.h b/src/Processors/Formats/IRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/IRowOutputFormat.h rename to src/Processors/Formats/IRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp rename to src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp diff --git a/dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.h b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h similarity index 100% rename from dbms/Processors/Formats/Impl/ArrowColumnToCHColumn.h rename to src/Processors/Formats/Impl/ArrowColumnToCHColumn.h diff --git a/dbms/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/AvroRowInputFormat.cpp rename to src/Processors/Formats/Impl/AvroRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/AvroRowInputFormat.h b/src/Processors/Formats/Impl/AvroRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/AvroRowInputFormat.h rename to src/Processors/Formats/Impl/AvroRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/AvroRowOutputFormat.cpp rename to src/Processors/Formats/Impl/AvroRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/AvroRowOutputFormat.h b/src/Processors/Formats/Impl/AvroRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/AvroRowOutputFormat.h rename to src/Processors/Formats/Impl/AvroRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/BinaryRowInputFormat.cpp b/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/BinaryRowInputFormat.cpp rename to src/Processors/Formats/Impl/BinaryRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/BinaryRowInputFormat.h b/src/Processors/Formats/Impl/BinaryRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/BinaryRowInputFormat.h rename to src/Processors/Formats/Impl/BinaryRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/BinaryRowOutputFormat.cpp b/src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/BinaryRowOutputFormat.cpp rename to src/Processors/Formats/Impl/BinaryRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/BinaryRowOutputFormat.h b/src/Processors/Formats/Impl/BinaryRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/BinaryRowOutputFormat.h rename to src/Processors/Formats/Impl/BinaryRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/CMakeLists.txt b/src/Processors/Formats/Impl/CMakeLists.txt similarity index 100% rename from dbms/Processors/Formats/Impl/CMakeLists.txt rename to src/Processors/Formats/Impl/CMakeLists.txt diff --git a/dbms/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/CSVRowInputFormat.cpp rename to src/Processors/Formats/Impl/CSVRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/CSVRowInputFormat.h rename to src/Processors/Formats/Impl/CSVRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/CSVRowOutputFormat.cpp rename to src/Processors/Formats/Impl/CSVRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/CSVRowOutputFormat.h b/src/Processors/Formats/Impl/CSVRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/CSVRowOutputFormat.h rename to src/Processors/Formats/Impl/CSVRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp rename to src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/CapnProtoRowInputFormat.h rename to src/Processors/Formats/Impl/CapnProtoRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ConstantExpressionTemplate.cpp rename to src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp diff --git a/dbms/Processors/Formats/Impl/ConstantExpressionTemplate.h b/src/Processors/Formats/Impl/ConstantExpressionTemplate.h similarity index 100% rename from dbms/Processors/Formats/Impl/ConstantExpressionTemplate.h rename to src/Processors/Formats/Impl/ConstantExpressionTemplate.h diff --git a/dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h rename to src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONCompactRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONCompactRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowRowInputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/JSONRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/JSONRowOutputFormat.cpp rename to src/Processors/Formats/Impl/JSONRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/JSONRowOutputFormat.h b/src/Processors/Formats/Impl/JSONRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/JSONRowOutputFormat.h rename to src/Processors/Formats/Impl/JSONRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/MySQLOutputFormat.cpp b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/MySQLOutputFormat.cpp rename to src/Processors/Formats/Impl/MySQLOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/MySQLOutputFormat.h b/src/Processors/Formats/Impl/MySQLOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/MySQLOutputFormat.h rename to src/Processors/Formats/Impl/MySQLOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/NativeFormat.cpp b/src/Processors/Formats/Impl/NativeFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/NativeFormat.cpp rename to src/Processors/Formats/Impl/NativeFormat.cpp diff --git a/dbms/Processors/Formats/Impl/NullFormat.cpp b/src/Processors/Formats/Impl/NullFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/NullFormat.cpp rename to src/Processors/Formats/Impl/NullFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h rename to src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h b/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h rename to src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ORCBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ORCBlockInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ORCBlockInputFormat.h b/src/Processors/Formats/Impl/ORCBlockInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ORCBlockInputFormat.h rename to src/Processors/Formats/Impl/ORCBlockInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ParquetBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ParquetBlockInputFormat.h rename to src/Processors/Formats/Impl/ParquetBlockInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ParquetBlockOutputFormat.h rename to src/Processors/Formats/Impl/ParquetBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/PrettyBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettyBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h rename to src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ProtobufRowInputFormat.cpp rename to src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ProtobufRowInputFormat.h b/src/Processors/Formats/Impl/ProtobufRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ProtobufRowInputFormat.h rename to src/Processors/Formats/Impl/ProtobufRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp rename to src/Processors/Formats/Impl/ProtobufRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.h b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ProtobufRowOutputFormat.h rename to src/Processors/Formats/Impl/ProtobufRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/RegexpRowInputFormat.cpp rename to src/Processors/Formats/Impl/RegexpRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/RegexpRowInputFormat.h b/src/Processors/Formats/Impl/RegexpRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/RegexpRowInputFormat.h rename to src/Processors/Formats/Impl/RegexpRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TSKVRowInputFormat.cpp rename to src/Processors/Formats/Impl/TSKVRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TSKVRowInputFormat.h b/src/Processors/Formats/Impl/TSKVRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TSKVRowInputFormat.h rename to src/Processors/Formats/Impl/TSKVRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/TSKVRowOutputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TSKVRowOutputFormat.cpp rename to src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TSKVRowOutputFormat.h b/src/Processors/Formats/Impl/TSKVRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TSKVRowOutputFormat.h rename to src/Processors/Formats/Impl/TSKVRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp rename to src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TabSeparatedRowInputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp rename to src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h rename to src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp rename to src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.h b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TemplateBlockOutputFormat.h rename to src/Processors/Formats/Impl/TemplateBlockOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/TemplateRowInputFormat.cpp rename to src/Processors/Formats/Impl/TemplateRowInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/TemplateRowInputFormat.h b/src/Processors/Formats/Impl/TemplateRowInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/TemplateRowInputFormat.h rename to src/Processors/Formats/Impl/TemplateRowInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ValuesBlockInputFormat.cpp rename to src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ValuesBlockInputFormat.h b/src/Processors/Formats/Impl/ValuesBlockInputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ValuesBlockInputFormat.h rename to src/Processors/Formats/Impl/ValuesBlockInputFormat.h diff --git a/dbms/Processors/Formats/Impl/ValuesRowOutputFormat.cpp b/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/ValuesRowOutputFormat.cpp rename to src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/ValuesRowOutputFormat.h b/src/Processors/Formats/Impl/ValuesRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/ValuesRowOutputFormat.h rename to src/Processors/Formats/Impl/ValuesRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/VerticalRowOutputFormat.cpp rename to src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/VerticalRowOutputFormat.h b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/VerticalRowOutputFormat.h rename to src/Processors/Formats/Impl/VerticalRowOutputFormat.h diff --git a/dbms/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/Impl/XMLRowOutputFormat.cpp rename to src/Processors/Formats/Impl/XMLRowOutputFormat.cpp diff --git a/dbms/Processors/Formats/Impl/XMLRowOutputFormat.h b/src/Processors/Formats/Impl/XMLRowOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/Impl/XMLRowOutputFormat.h rename to src/Processors/Formats/Impl/XMLRowOutputFormat.h diff --git a/dbms/Processors/Formats/InputStreamFromInputFormat.h b/src/Processors/Formats/InputStreamFromInputFormat.h similarity index 100% rename from dbms/Processors/Formats/InputStreamFromInputFormat.h rename to src/Processors/Formats/InputStreamFromInputFormat.h diff --git a/dbms/Processors/Formats/LazyOutputFormat.cpp b/src/Processors/Formats/LazyOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/LazyOutputFormat.cpp rename to src/Processors/Formats/LazyOutputFormat.cpp diff --git a/dbms/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/LazyOutputFormat.h rename to src/Processors/Formats/LazyOutputFormat.h diff --git a/dbms/Processors/Formats/OutputStreamToOutputFormat.cpp b/src/Processors/Formats/OutputStreamToOutputFormat.cpp similarity index 100% rename from dbms/Processors/Formats/OutputStreamToOutputFormat.cpp rename to src/Processors/Formats/OutputStreamToOutputFormat.cpp diff --git a/dbms/Processors/Formats/OutputStreamToOutputFormat.h b/src/Processors/Formats/OutputStreamToOutputFormat.h similarity index 100% rename from dbms/Processors/Formats/OutputStreamToOutputFormat.h rename to src/Processors/Formats/OutputStreamToOutputFormat.h diff --git a/dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp b/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp similarity index 100% rename from dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp rename to src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp diff --git a/dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.h b/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h similarity index 100% rename from dbms/Processors/Formats/RowInputFormatWithDiagnosticInfo.h rename to src/Processors/Formats/RowInputFormatWithDiagnosticInfo.h diff --git a/dbms/Processors/IAccumulatingTransform.cpp b/src/Processors/IAccumulatingTransform.cpp similarity index 100% rename from dbms/Processors/IAccumulatingTransform.cpp rename to src/Processors/IAccumulatingTransform.cpp diff --git a/dbms/Processors/IAccumulatingTransform.h b/src/Processors/IAccumulatingTransform.h similarity index 100% rename from dbms/Processors/IAccumulatingTransform.h rename to src/Processors/IAccumulatingTransform.h diff --git a/dbms/Processors/IInflatingTransform.cpp b/src/Processors/IInflatingTransform.cpp similarity index 100% rename from dbms/Processors/IInflatingTransform.cpp rename to src/Processors/IInflatingTransform.cpp diff --git a/dbms/Processors/IInflatingTransform.h b/src/Processors/IInflatingTransform.h similarity index 100% rename from dbms/Processors/IInflatingTransform.h rename to src/Processors/IInflatingTransform.h diff --git a/dbms/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp similarity index 100% rename from dbms/Processors/IProcessor.cpp rename to src/Processors/IProcessor.cpp diff --git a/dbms/Processors/IProcessor.h b/src/Processors/IProcessor.h similarity index 100% rename from dbms/Processors/IProcessor.h rename to src/Processors/IProcessor.h diff --git a/dbms/Processors/ISimpleTransform.cpp b/src/Processors/ISimpleTransform.cpp similarity index 100% rename from dbms/Processors/ISimpleTransform.cpp rename to src/Processors/ISimpleTransform.cpp diff --git a/dbms/Processors/ISimpleTransform.h b/src/Processors/ISimpleTransform.h similarity index 100% rename from dbms/Processors/ISimpleTransform.h rename to src/Processors/ISimpleTransform.h diff --git a/dbms/Processors/ISink.cpp b/src/Processors/ISink.cpp similarity index 100% rename from dbms/Processors/ISink.cpp rename to src/Processors/ISink.cpp diff --git a/dbms/Processors/ISink.h b/src/Processors/ISink.h similarity index 100% rename from dbms/Processors/ISink.h rename to src/Processors/ISink.h diff --git a/dbms/Processors/ISource.cpp b/src/Processors/ISource.cpp similarity index 100% rename from dbms/Processors/ISource.cpp rename to src/Processors/ISource.cpp diff --git a/dbms/Processors/ISource.h b/src/Processors/ISource.h similarity index 100% rename from dbms/Processors/ISource.h rename to src/Processors/ISource.h diff --git a/dbms/Processors/LimitTransform.cpp b/src/Processors/LimitTransform.cpp similarity index 100% rename from dbms/Processors/LimitTransform.cpp rename to src/Processors/LimitTransform.cpp diff --git a/dbms/Processors/LimitTransform.h b/src/Processors/LimitTransform.h similarity index 100% rename from dbms/Processors/LimitTransform.h rename to src/Processors/LimitTransform.h diff --git a/dbms/Processors/NullSink.h b/src/Processors/NullSink.h similarity index 100% rename from dbms/Processors/NullSink.h rename to src/Processors/NullSink.h diff --git a/dbms/Processors/Pipe.cpp b/src/Processors/Pipe.cpp similarity index 100% rename from dbms/Processors/Pipe.cpp rename to src/Processors/Pipe.cpp diff --git a/dbms/Processors/Pipe.h b/src/Processors/Pipe.h similarity index 100% rename from dbms/Processors/Pipe.h rename to src/Processors/Pipe.h diff --git a/dbms/Processors/Port.cpp b/src/Processors/Port.cpp similarity index 100% rename from dbms/Processors/Port.cpp rename to src/Processors/Port.cpp diff --git a/dbms/Processors/Port.h b/src/Processors/Port.h similarity index 100% rename from dbms/Processors/Port.h rename to src/Processors/Port.h diff --git a/dbms/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp similarity index 100% rename from dbms/Processors/QueryPipeline.cpp rename to src/Processors/QueryPipeline.cpp diff --git a/dbms/Processors/QueryPipeline.h b/src/Processors/QueryPipeline.h similarity index 100% rename from dbms/Processors/QueryPipeline.h rename to src/Processors/QueryPipeline.h diff --git a/dbms/Processors/QueueBuffer.h b/src/Processors/QueueBuffer.h similarity index 100% rename from dbms/Processors/QueueBuffer.h rename to src/Processors/QueueBuffer.h diff --git a/dbms/Processors/ResizeProcessor.cpp b/src/Processors/ResizeProcessor.cpp similarity index 100% rename from dbms/Processors/ResizeProcessor.cpp rename to src/Processors/ResizeProcessor.cpp diff --git a/dbms/Processors/ResizeProcessor.h b/src/Processors/ResizeProcessor.h similarity index 100% rename from dbms/Processors/ResizeProcessor.h rename to src/Processors/ResizeProcessor.h diff --git a/dbms/Processors/RowsBeforeLimitCounter.h b/src/Processors/RowsBeforeLimitCounter.h similarity index 100% rename from dbms/Processors/RowsBeforeLimitCounter.h rename to src/Processors/RowsBeforeLimitCounter.h diff --git a/dbms/Processors/Sources/NullSource.h b/src/Processors/Sources/NullSource.h similarity index 100% rename from dbms/Processors/Sources/NullSource.h rename to src/Processors/Sources/NullSource.h diff --git a/dbms/Processors/Sources/SinkToOutputStream.cpp b/src/Processors/Sources/SinkToOutputStream.cpp similarity index 100% rename from dbms/Processors/Sources/SinkToOutputStream.cpp rename to src/Processors/Sources/SinkToOutputStream.cpp diff --git a/dbms/Processors/Sources/SinkToOutputStream.h b/src/Processors/Sources/SinkToOutputStream.h similarity index 100% rename from dbms/Processors/Sources/SinkToOutputStream.h rename to src/Processors/Sources/SinkToOutputStream.h diff --git a/dbms/Processors/Sources/SourceFromInputStream.cpp b/src/Processors/Sources/SourceFromInputStream.cpp similarity index 100% rename from dbms/Processors/Sources/SourceFromInputStream.cpp rename to src/Processors/Sources/SourceFromInputStream.cpp diff --git a/dbms/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h similarity index 100% rename from dbms/Processors/Sources/SourceFromInputStream.h rename to src/Processors/Sources/SourceFromInputStream.h diff --git a/dbms/Processors/Sources/SourceFromSingleChunk.h b/src/Processors/Sources/SourceFromSingleChunk.h similarity index 100% rename from dbms/Processors/Sources/SourceFromSingleChunk.h rename to src/Processors/Sources/SourceFromSingleChunk.h diff --git a/dbms/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp similarity index 100% rename from dbms/Processors/Sources/SourceWithProgress.cpp rename to src/Processors/Sources/SourceWithProgress.cpp diff --git a/dbms/Processors/Sources/SourceWithProgress.h b/src/Processors/Sources/SourceWithProgress.h similarity index 100% rename from dbms/Processors/Sources/SourceWithProgress.h rename to src/Processors/Sources/SourceWithProgress.h diff --git a/dbms/Processors/Transforms/AddingConstColumnTransform.h b/src/Processors/Transforms/AddingConstColumnTransform.h similarity index 100% rename from dbms/Processors/Transforms/AddingConstColumnTransform.h rename to src/Processors/Transforms/AddingConstColumnTransform.h diff --git a/dbms/Processors/Transforms/AddingMissedTransform.cpp b/src/Processors/Transforms/AddingMissedTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/AddingMissedTransform.cpp rename to src/Processors/Transforms/AddingMissedTransform.cpp diff --git a/dbms/Processors/Transforms/AddingMissedTransform.h b/src/Processors/Transforms/AddingMissedTransform.h similarity index 100% rename from dbms/Processors/Transforms/AddingMissedTransform.h rename to src/Processors/Transforms/AddingMissedTransform.h diff --git a/dbms/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/AggregatingTransform.cpp rename to src/Processors/Transforms/AggregatingTransform.cpp diff --git a/dbms/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h similarity index 100% rename from dbms/Processors/Transforms/AggregatingTransform.h rename to src/Processors/Transforms/AggregatingTransform.h diff --git a/dbms/Processors/Transforms/ConvertingTransform.cpp b/src/Processors/Transforms/ConvertingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/ConvertingTransform.cpp rename to src/Processors/Transforms/ConvertingTransform.cpp diff --git a/dbms/Processors/Transforms/ConvertingTransform.h b/src/Processors/Transforms/ConvertingTransform.h similarity index 100% rename from dbms/Processors/Transforms/ConvertingTransform.h rename to src/Processors/Transforms/ConvertingTransform.h diff --git a/dbms/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/CreatingSetsTransform.cpp rename to src/Processors/Transforms/CreatingSetsTransform.cpp diff --git a/dbms/Processors/Transforms/CreatingSetsTransform.h b/src/Processors/Transforms/CreatingSetsTransform.h similarity index 100% rename from dbms/Processors/Transforms/CreatingSetsTransform.h rename to src/Processors/Transforms/CreatingSetsTransform.h diff --git a/dbms/Processors/Transforms/CubeTransform.cpp b/src/Processors/Transforms/CubeTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/CubeTransform.cpp rename to src/Processors/Transforms/CubeTransform.cpp diff --git a/dbms/Processors/Transforms/CubeTransform.h b/src/Processors/Transforms/CubeTransform.h similarity index 100% rename from dbms/Processors/Transforms/CubeTransform.h rename to src/Processors/Transforms/CubeTransform.h diff --git a/dbms/Processors/Transforms/DistinctTransform.cpp b/src/Processors/Transforms/DistinctTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/DistinctTransform.cpp rename to src/Processors/Transforms/DistinctTransform.cpp diff --git a/dbms/Processors/Transforms/DistinctTransform.h b/src/Processors/Transforms/DistinctTransform.h similarity index 100% rename from dbms/Processors/Transforms/DistinctTransform.h rename to src/Processors/Transforms/DistinctTransform.h diff --git a/dbms/Processors/Transforms/ExpressionTransform.cpp b/src/Processors/Transforms/ExpressionTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/ExpressionTransform.cpp rename to src/Processors/Transforms/ExpressionTransform.cpp diff --git a/dbms/Processors/Transforms/ExpressionTransform.h b/src/Processors/Transforms/ExpressionTransform.h similarity index 100% rename from dbms/Processors/Transforms/ExpressionTransform.h rename to src/Processors/Transforms/ExpressionTransform.h diff --git a/dbms/Processors/Transforms/ExtremesTransform.cpp b/src/Processors/Transforms/ExtremesTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/ExtremesTransform.cpp rename to src/Processors/Transforms/ExtremesTransform.cpp diff --git a/dbms/Processors/Transforms/ExtremesTransform.h b/src/Processors/Transforms/ExtremesTransform.h similarity index 100% rename from dbms/Processors/Transforms/ExtremesTransform.h rename to src/Processors/Transforms/ExtremesTransform.h diff --git a/dbms/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/FillingTransform.cpp rename to src/Processors/Transforms/FillingTransform.cpp diff --git a/dbms/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h similarity index 100% rename from dbms/Processors/Transforms/FillingTransform.h rename to src/Processors/Transforms/FillingTransform.h diff --git a/dbms/Processors/Transforms/FilterTransform.cpp b/src/Processors/Transforms/FilterTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/FilterTransform.cpp rename to src/Processors/Transforms/FilterTransform.cpp diff --git a/dbms/Processors/Transforms/FilterTransform.h b/src/Processors/Transforms/FilterTransform.h similarity index 100% rename from dbms/Processors/Transforms/FilterTransform.h rename to src/Processors/Transforms/FilterTransform.h diff --git a/dbms/Processors/Transforms/FinishSortingTransform.cpp b/src/Processors/Transforms/FinishSortingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/FinishSortingTransform.cpp rename to src/Processors/Transforms/FinishSortingTransform.cpp diff --git a/dbms/Processors/Transforms/FinishSortingTransform.h b/src/Processors/Transforms/FinishSortingTransform.h similarity index 100% rename from dbms/Processors/Transforms/FinishSortingTransform.h rename to src/Processors/Transforms/FinishSortingTransform.h diff --git a/dbms/Processors/Transforms/InflatingExpressionTransform.cpp b/src/Processors/Transforms/InflatingExpressionTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/InflatingExpressionTransform.cpp rename to src/Processors/Transforms/InflatingExpressionTransform.cpp diff --git a/dbms/Processors/Transforms/InflatingExpressionTransform.h b/src/Processors/Transforms/InflatingExpressionTransform.h similarity index 100% rename from dbms/Processors/Transforms/InflatingExpressionTransform.h rename to src/Processors/Transforms/InflatingExpressionTransform.h diff --git a/dbms/Processors/Transforms/LimitByTransform.cpp b/src/Processors/Transforms/LimitByTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/LimitByTransform.cpp rename to src/Processors/Transforms/LimitByTransform.cpp diff --git a/dbms/Processors/Transforms/LimitByTransform.h b/src/Processors/Transforms/LimitByTransform.h similarity index 100% rename from dbms/Processors/Transforms/LimitByTransform.h rename to src/Processors/Transforms/LimitByTransform.h diff --git a/dbms/Processors/Transforms/LimitsCheckingTransform.cpp b/src/Processors/Transforms/LimitsCheckingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/LimitsCheckingTransform.cpp rename to src/Processors/Transforms/LimitsCheckingTransform.cpp diff --git a/dbms/Processors/Transforms/LimitsCheckingTransform.h b/src/Processors/Transforms/LimitsCheckingTransform.h similarity index 100% rename from dbms/Processors/Transforms/LimitsCheckingTransform.h rename to src/Processors/Transforms/LimitsCheckingTransform.h diff --git a/dbms/Processors/Transforms/MaterializingTransform.cpp b/src/Processors/Transforms/MaterializingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/MaterializingTransform.cpp rename to src/Processors/Transforms/MaterializingTransform.cpp diff --git a/dbms/Processors/Transforms/MaterializingTransform.h b/src/Processors/Transforms/MaterializingTransform.h similarity index 100% rename from dbms/Processors/Transforms/MaterializingTransform.h rename to src/Processors/Transforms/MaterializingTransform.h diff --git a/dbms/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/MergeSortingTransform.cpp rename to src/Processors/Transforms/MergeSortingTransform.cpp diff --git a/dbms/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h similarity index 100% rename from dbms/Processors/Transforms/MergeSortingTransform.h rename to src/Processors/Transforms/MergeSortingTransform.h diff --git a/dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp rename to src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp diff --git a/dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h similarity index 100% rename from dbms/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h rename to src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h diff --git a/dbms/Processors/Transforms/MergingAggregatedTransform.cpp b/src/Processors/Transforms/MergingAggregatedTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/MergingAggregatedTransform.cpp rename to src/Processors/Transforms/MergingAggregatedTransform.cpp diff --git a/dbms/Processors/Transforms/MergingAggregatedTransform.h b/src/Processors/Transforms/MergingAggregatedTransform.h similarity index 100% rename from dbms/Processors/Transforms/MergingAggregatedTransform.h rename to src/Processors/Transforms/MergingAggregatedTransform.h diff --git a/dbms/Processors/Transforms/MergingSortedTransform.cpp b/src/Processors/Transforms/MergingSortedTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/MergingSortedTransform.cpp rename to src/Processors/Transforms/MergingSortedTransform.cpp diff --git a/dbms/Processors/Transforms/MergingSortedTransform.h b/src/Processors/Transforms/MergingSortedTransform.h similarity index 100% rename from dbms/Processors/Transforms/MergingSortedTransform.h rename to src/Processors/Transforms/MergingSortedTransform.h diff --git a/dbms/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/PartialSortingTransform.cpp rename to src/Processors/Transforms/PartialSortingTransform.cpp diff --git a/dbms/Processors/Transforms/PartialSortingTransform.h b/src/Processors/Transforms/PartialSortingTransform.h similarity index 100% rename from dbms/Processors/Transforms/PartialSortingTransform.h rename to src/Processors/Transforms/PartialSortingTransform.h diff --git a/dbms/Processors/Transforms/ReverseTransform.cpp b/src/Processors/Transforms/ReverseTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/ReverseTransform.cpp rename to src/Processors/Transforms/ReverseTransform.cpp diff --git a/dbms/Processors/Transforms/ReverseTransform.h b/src/Processors/Transforms/ReverseTransform.h similarity index 100% rename from dbms/Processors/Transforms/ReverseTransform.h rename to src/Processors/Transforms/ReverseTransform.h diff --git a/dbms/Processors/Transforms/RollupTransform.cpp b/src/Processors/Transforms/RollupTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/RollupTransform.cpp rename to src/Processors/Transforms/RollupTransform.cpp diff --git a/dbms/Processors/Transforms/RollupTransform.h b/src/Processors/Transforms/RollupTransform.h similarity index 100% rename from dbms/Processors/Transforms/RollupTransform.h rename to src/Processors/Transforms/RollupTransform.h diff --git a/dbms/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/SortingTransform.cpp rename to src/Processors/Transforms/SortingTransform.cpp diff --git a/dbms/Processors/Transforms/SortingTransform.h b/src/Processors/Transforms/SortingTransform.h similarity index 100% rename from dbms/Processors/Transforms/SortingTransform.h rename to src/Processors/Transforms/SortingTransform.h diff --git a/dbms/Processors/Transforms/TotalsHavingTransform.cpp b/src/Processors/Transforms/TotalsHavingTransform.cpp similarity index 100% rename from dbms/Processors/Transforms/TotalsHavingTransform.cpp rename to src/Processors/Transforms/TotalsHavingTransform.cpp diff --git a/dbms/Processors/Transforms/TotalsHavingTransform.h b/src/Processors/Transforms/TotalsHavingTransform.h similarity index 100% rename from dbms/Processors/Transforms/TotalsHavingTransform.h rename to src/Processors/Transforms/TotalsHavingTransform.h diff --git a/dbms/Processors/printPipeline.h b/src/Processors/printPipeline.h similarity index 100% rename from dbms/Processors/printPipeline.h rename to src/Processors/printPipeline.h diff --git a/dbms/Processors/tests/CMakeLists.txt b/src/Processors/tests/CMakeLists.txt similarity index 100% rename from dbms/Processors/tests/CMakeLists.txt rename to src/Processors/tests/CMakeLists.txt diff --git a/dbms/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp similarity index 100% rename from dbms/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp rename to src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp diff --git a/dbms/Processors/tests/processors_test.cpp b/src/Processors/tests/processors_test.cpp similarity index 100% rename from dbms/Processors/tests/processors_test.cpp rename to src/Processors/tests/processors_test.cpp diff --git a/dbms/Processors/tests/processors_test_aggregation.cpp b/src/Processors/tests/processors_test_aggregation.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_aggregation.cpp rename to src/Processors/tests/processors_test_aggregation.cpp diff --git a/dbms/Processors/tests/processors_test_chain.cpp b/src/Processors/tests/processors_test_chain.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_chain.cpp rename to src/Processors/tests/processors_test_chain.cpp diff --git a/dbms/Processors/tests/processors_test_expand_pipeline.cpp b/src/Processors/tests/processors_test_expand_pipeline.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_expand_pipeline.cpp rename to src/Processors/tests/processors_test_expand_pipeline.cpp diff --git a/dbms/Processors/tests/processors_test_merge.cpp b/src/Processors/tests/processors_test_merge.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_merge.cpp rename to src/Processors/tests/processors_test_merge.cpp diff --git a/dbms/Processors/tests/processors_test_merge_sorting_transform.cpp b/src/Processors/tests/processors_test_merge_sorting_transform.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_merge_sorting_transform.cpp rename to src/Processors/tests/processors_test_merge_sorting_transform.cpp diff --git a/dbms/Processors/tests/processors_test_merging_sorted_transform.cpp b/src/Processors/tests/processors_test_merging_sorted_transform.cpp similarity index 100% rename from dbms/Processors/tests/processors_test_merging_sorted_transform.cpp rename to src/Processors/tests/processors_test_merging_sorted_transform.cpp diff --git a/dbms/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp similarity index 100% rename from dbms/Storages/AlterCommands.cpp rename to src/Storages/AlterCommands.cpp diff --git a/dbms/Storages/AlterCommands.h b/src/Storages/AlterCommands.h similarity index 100% rename from dbms/Storages/AlterCommands.h rename to src/Storages/AlterCommands.h diff --git a/dbms/Storages/CMakeLists.txt b/src/Storages/CMakeLists.txt similarity index 100% rename from dbms/Storages/CMakeLists.txt rename to src/Storages/CMakeLists.txt diff --git a/dbms/Storages/CheckResults.h b/src/Storages/CheckResults.h similarity index 100% rename from dbms/Storages/CheckResults.h rename to src/Storages/CheckResults.h diff --git a/dbms/Storages/ColumnCodec.h b/src/Storages/ColumnCodec.h similarity index 100% rename from dbms/Storages/ColumnCodec.h rename to src/Storages/ColumnCodec.h diff --git a/dbms/Storages/ColumnDefault.cpp b/src/Storages/ColumnDefault.cpp similarity index 100% rename from dbms/Storages/ColumnDefault.cpp rename to src/Storages/ColumnDefault.cpp diff --git a/dbms/Storages/ColumnDefault.h b/src/Storages/ColumnDefault.h similarity index 100% rename from dbms/Storages/ColumnDefault.h rename to src/Storages/ColumnDefault.h diff --git a/dbms/Storages/ColumnDependency.h b/src/Storages/ColumnDependency.h similarity index 100% rename from dbms/Storages/ColumnDependency.h rename to src/Storages/ColumnDependency.h diff --git a/dbms/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp similarity index 100% rename from dbms/Storages/ColumnsDescription.cpp rename to src/Storages/ColumnsDescription.cpp diff --git a/dbms/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h similarity index 100% rename from dbms/Storages/ColumnsDescription.h rename to src/Storages/ColumnsDescription.h diff --git a/dbms/Storages/CompressionCodecSelector.h b/src/Storages/CompressionCodecSelector.h similarity index 100% rename from dbms/Storages/CompressionCodecSelector.h rename to src/Storages/CompressionCodecSelector.h diff --git a/dbms/Storages/ConstraintsDescription.cpp b/src/Storages/ConstraintsDescription.cpp similarity index 100% rename from dbms/Storages/ConstraintsDescription.cpp rename to src/Storages/ConstraintsDescription.cpp diff --git a/dbms/Storages/ConstraintsDescription.h b/src/Storages/ConstraintsDescription.h similarity index 100% rename from dbms/Storages/ConstraintsDescription.h rename to src/Storages/ConstraintsDescription.h diff --git a/dbms/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp similarity index 100% rename from dbms/Storages/Distributed/DirectoryMonitor.cpp rename to src/Storages/Distributed/DirectoryMonitor.cpp diff --git a/dbms/Storages/Distributed/DirectoryMonitor.h b/src/Storages/Distributed/DirectoryMonitor.h similarity index 100% rename from dbms/Storages/Distributed/DirectoryMonitor.h rename to src/Storages/Distributed/DirectoryMonitor.h diff --git a/dbms/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/Distributed/DistributedBlockOutputStream.cpp rename to src/Storages/Distributed/DistributedBlockOutputStream.cpp diff --git a/dbms/Storages/Distributed/DistributedBlockOutputStream.h b/src/Storages/Distributed/DistributedBlockOutputStream.h similarity index 100% rename from dbms/Storages/Distributed/DistributedBlockOutputStream.h rename to src/Storages/Distributed/DistributedBlockOutputStream.h diff --git a/dbms/Storages/IStorage.cpp b/src/Storages/IStorage.cpp similarity index 100% rename from dbms/Storages/IStorage.cpp rename to src/Storages/IStorage.cpp diff --git a/dbms/Storages/IStorage.h b/src/Storages/IStorage.h similarity index 100% rename from dbms/Storages/IStorage.h rename to src/Storages/IStorage.h diff --git a/dbms/Storages/IStorage_fwd.h b/src/Storages/IStorage_fwd.h similarity index 100% rename from dbms/Storages/IStorage_fwd.h rename to src/Storages/IStorage_fwd.h diff --git a/dbms/Storages/IndicesDescription.cpp b/src/Storages/IndicesDescription.cpp similarity index 100% rename from dbms/Storages/IndicesDescription.cpp rename to src/Storages/IndicesDescription.cpp diff --git a/dbms/Storages/IndicesDescription.h b/src/Storages/IndicesDescription.h similarity index 100% rename from dbms/Storages/IndicesDescription.h rename to src/Storages/IndicesDescription.h diff --git a/dbms/Storages/Kafka/Buffer_fwd.h b/src/Storages/Kafka/Buffer_fwd.h similarity index 100% rename from dbms/Storages/Kafka/Buffer_fwd.h rename to src/Storages/Kafka/Buffer_fwd.h diff --git a/dbms/Storages/Kafka/KafkaBlockInputStream.cpp b/src/Storages/Kafka/KafkaBlockInputStream.cpp similarity index 100% rename from dbms/Storages/Kafka/KafkaBlockInputStream.cpp rename to src/Storages/Kafka/KafkaBlockInputStream.cpp diff --git a/dbms/Storages/Kafka/KafkaBlockInputStream.h b/src/Storages/Kafka/KafkaBlockInputStream.h similarity index 100% rename from dbms/Storages/Kafka/KafkaBlockInputStream.h rename to src/Storages/Kafka/KafkaBlockInputStream.h diff --git a/dbms/Storages/Kafka/KafkaBlockOutputStream.cpp b/src/Storages/Kafka/KafkaBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/Kafka/KafkaBlockOutputStream.cpp rename to src/Storages/Kafka/KafkaBlockOutputStream.cpp diff --git a/dbms/Storages/Kafka/KafkaBlockOutputStream.h b/src/Storages/Kafka/KafkaBlockOutputStream.h similarity index 100% rename from dbms/Storages/Kafka/KafkaBlockOutputStream.h rename to src/Storages/Kafka/KafkaBlockOutputStream.h diff --git a/dbms/Storages/Kafka/KafkaSettings.cpp b/src/Storages/Kafka/KafkaSettings.cpp similarity index 100% rename from dbms/Storages/Kafka/KafkaSettings.cpp rename to src/Storages/Kafka/KafkaSettings.cpp diff --git a/dbms/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h similarity index 100% rename from dbms/Storages/Kafka/KafkaSettings.h rename to src/Storages/Kafka/KafkaSettings.h diff --git a/dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp similarity index 100% rename from dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp rename to src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp diff --git a/dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.h b/src/Storages/Kafka/ReadBufferFromKafkaConsumer.h similarity index 100% rename from dbms/Storages/Kafka/ReadBufferFromKafkaConsumer.h rename to src/Storages/Kafka/ReadBufferFromKafkaConsumer.h diff --git a/dbms/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp similarity index 100% rename from dbms/Storages/Kafka/StorageKafka.cpp rename to src/Storages/Kafka/StorageKafka.cpp diff --git a/dbms/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h similarity index 100% rename from dbms/Storages/Kafka/StorageKafka.h rename to src/Storages/Kafka/StorageKafka.h diff --git a/dbms/Storages/Kafka/WriteBufferToKafkaProducer.cpp b/src/Storages/Kafka/WriteBufferToKafkaProducer.cpp similarity index 100% rename from dbms/Storages/Kafka/WriteBufferToKafkaProducer.cpp rename to src/Storages/Kafka/WriteBufferToKafkaProducer.cpp diff --git a/dbms/Storages/Kafka/WriteBufferToKafkaProducer.h b/src/Storages/Kafka/WriteBufferToKafkaProducer.h similarity index 100% rename from dbms/Storages/Kafka/WriteBufferToKafkaProducer.h rename to src/Storages/Kafka/WriteBufferToKafkaProducer.h diff --git a/dbms/Storages/LiveView/LiveViewBlockInputStream.h b/src/Storages/LiveView/LiveViewBlockInputStream.h similarity index 100% rename from dbms/Storages/LiveView/LiveViewBlockInputStream.h rename to src/Storages/LiveView/LiveViewBlockInputStream.h diff --git a/dbms/Storages/LiveView/LiveViewBlockOutputStream.h b/src/Storages/LiveView/LiveViewBlockOutputStream.h similarity index 100% rename from dbms/Storages/LiveView/LiveViewBlockOutputStream.h rename to src/Storages/LiveView/LiveViewBlockOutputStream.h diff --git a/dbms/Storages/LiveView/LiveViewCommands.h b/src/Storages/LiveView/LiveViewCommands.h similarity index 100% rename from dbms/Storages/LiveView/LiveViewCommands.h rename to src/Storages/LiveView/LiveViewCommands.h diff --git a/dbms/Storages/LiveView/LiveViewEventsBlockInputStream.h b/src/Storages/LiveView/LiveViewEventsBlockInputStream.h similarity index 100% rename from dbms/Storages/LiveView/LiveViewEventsBlockInputStream.h rename to src/Storages/LiveView/LiveViewEventsBlockInputStream.h diff --git a/dbms/Storages/LiveView/StorageBlocks.h b/src/Storages/LiveView/StorageBlocks.h similarity index 100% rename from dbms/Storages/LiveView/StorageBlocks.h rename to src/Storages/LiveView/StorageBlocks.h diff --git a/dbms/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp similarity index 100% rename from dbms/Storages/LiveView/StorageLiveView.cpp rename to src/Storages/LiveView/StorageLiveView.cpp diff --git a/dbms/Storages/LiveView/StorageLiveView.h b/src/Storages/LiveView/StorageLiveView.h similarity index 100% rename from dbms/Storages/LiveView/StorageLiveView.h rename to src/Storages/LiveView/StorageLiveView.h diff --git a/dbms/Storages/MarkCache.h b/src/Storages/MarkCache.h similarity index 100% rename from dbms/Storages/MarkCache.h rename to src/Storages/MarkCache.h diff --git a/dbms/Storages/MergeTree/ActiveDataPartSet.cpp b/src/Storages/MergeTree/ActiveDataPartSet.cpp similarity index 100% rename from dbms/Storages/MergeTree/ActiveDataPartSet.cpp rename to src/Storages/MergeTree/ActiveDataPartSet.cpp diff --git a/dbms/Storages/MergeTree/ActiveDataPartSet.h b/src/Storages/MergeTree/ActiveDataPartSet.h similarity index 100% rename from dbms/Storages/MergeTree/ActiveDataPartSet.h rename to src/Storages/MergeTree/ActiveDataPartSet.h diff --git a/dbms/Storages/MergeTree/AllMergeSelector.cpp b/src/Storages/MergeTree/AllMergeSelector.cpp similarity index 100% rename from dbms/Storages/MergeTree/AllMergeSelector.cpp rename to src/Storages/MergeTree/AllMergeSelector.cpp diff --git a/dbms/Storages/MergeTree/AllMergeSelector.h b/src/Storages/MergeTree/AllMergeSelector.h similarity index 100% rename from dbms/Storages/MergeTree/AllMergeSelector.h rename to src/Storages/MergeTree/AllMergeSelector.h diff --git a/dbms/Storages/MergeTree/BackgroundProcessingPool.cpp b/src/Storages/MergeTree/BackgroundProcessingPool.cpp similarity index 100% rename from dbms/Storages/MergeTree/BackgroundProcessingPool.cpp rename to src/Storages/MergeTree/BackgroundProcessingPool.cpp diff --git a/dbms/Storages/MergeTree/BackgroundProcessingPool.h b/src/Storages/MergeTree/BackgroundProcessingPool.h similarity index 100% rename from dbms/Storages/MergeTree/BackgroundProcessingPool.h rename to src/Storages/MergeTree/BackgroundProcessingPool.h diff --git a/dbms/Storages/MergeTree/BoolMask.cpp b/src/Storages/MergeTree/BoolMask.cpp similarity index 100% rename from dbms/Storages/MergeTree/BoolMask.cpp rename to src/Storages/MergeTree/BoolMask.cpp diff --git a/dbms/Storages/MergeTree/BoolMask.h b/src/Storages/MergeTree/BoolMask.h similarity index 100% rename from dbms/Storages/MergeTree/BoolMask.h rename to src/Storages/MergeTree/BoolMask.h diff --git a/dbms/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp similarity index 100% rename from dbms/Storages/MergeTree/DataPartsExchange.cpp rename to src/Storages/MergeTree/DataPartsExchange.cpp diff --git a/dbms/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h similarity index 100% rename from dbms/Storages/MergeTree/DataPartsExchange.h rename to src/Storages/MergeTree/DataPartsExchange.h diff --git a/dbms/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp similarity index 100% rename from dbms/Storages/MergeTree/EphemeralLockInZooKeeper.cpp rename to src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp diff --git a/dbms/Storages/MergeTree/EphemeralLockInZooKeeper.h b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h similarity index 100% rename from dbms/Storages/MergeTree/EphemeralLockInZooKeeper.h rename to src/Storages/MergeTree/EphemeralLockInZooKeeper.h diff --git a/dbms/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeDataPart.cpp rename to src/Storages/MergeTree/IMergeTreeDataPart.cpp diff --git a/dbms/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeDataPart.h rename to src/Storages/MergeTree/IMergeTreeDataPart.h diff --git a/dbms/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeDataPartWriter.cpp rename to src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp diff --git a/dbms/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeDataPartWriter.h rename to src/Storages/MergeTree/IMergeTreeDataPartWriter.h diff --git a/dbms/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeReader.cpp rename to src/Storages/MergeTree/IMergeTreeReader.cpp diff --git a/dbms/Storages/MergeTree/IMergeTreeReader.h b/src/Storages/MergeTree/IMergeTreeReader.h similarity index 100% rename from dbms/Storages/MergeTree/IMergeTreeReader.h rename to src/Storages/MergeTree/IMergeTreeReader.h diff --git a/dbms/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/IMergedBlockOutputStream.cpp rename to src/Storages/MergeTree/IMergedBlockOutputStream.cpp diff --git a/dbms/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h similarity index 100% rename from dbms/Storages/MergeTree/IMergedBlockOutputStream.h rename to src/Storages/MergeTree/IMergedBlockOutputStream.h diff --git a/dbms/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp similarity index 100% rename from dbms/Storages/MergeTree/KeyCondition.cpp rename to src/Storages/MergeTree/KeyCondition.cpp diff --git a/dbms/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h similarity index 100% rename from dbms/Storages/MergeTree/KeyCondition.h rename to src/Storages/MergeTree/KeyCondition.h diff --git a/dbms/Storages/MergeTree/LevelMergeSelector.cpp b/src/Storages/MergeTree/LevelMergeSelector.cpp similarity index 100% rename from dbms/Storages/MergeTree/LevelMergeSelector.cpp rename to src/Storages/MergeTree/LevelMergeSelector.cpp diff --git a/dbms/Storages/MergeTree/LevelMergeSelector.h b/src/Storages/MergeTree/LevelMergeSelector.h similarity index 100% rename from dbms/Storages/MergeTree/LevelMergeSelector.h rename to src/Storages/MergeTree/LevelMergeSelector.h diff --git a/dbms/Storages/MergeTree/MarkRange.h b/src/Storages/MergeTree/MarkRange.h similarity index 100% rename from dbms/Storages/MergeTree/MarkRange.h rename to src/Storages/MergeTree/MarkRange.h diff --git a/dbms/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeList.cpp rename to src/Storages/MergeTree/MergeList.cpp diff --git a/dbms/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h similarity index 100% rename from dbms/Storages/MergeTree/MergeList.h rename to src/Storages/MergeTree/MergeList.h diff --git a/dbms/Storages/MergeTree/MergeSelector.h b/src/Storages/MergeTree/MergeSelector.h similarity index 100% rename from dbms/Storages/MergeTree/MergeSelector.h rename to src/Storages/MergeTree/MergeSelector.h diff --git a/dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBaseSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h diff --git a/dbms/Storages/MergeTree/MergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBlockOutputStream.cpp rename to src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeBlockOutputStream.h b/src/Storages/MergeTree/MergeTreeBlockOutputStream.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBlockOutputStream.h rename to src/Storages/MergeTree/MergeTreeBlockOutputStream.h diff --git a/dbms/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBlockReadUtils.cpp rename to src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeBlockReadUtils.h b/src/Storages/MergeTree/MergeTreeBlockReadUtils.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeBlockReadUtils.h rename to src/Storages/MergeTree/MergeTreeBlockReadUtils.h diff --git a/dbms/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeData.cpp rename to src/Storages/MergeTree/MergeTreeData.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeData.h rename to src/Storages/MergeTree/MergeTreeData.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataFormatVersion.h b/src/Storages/MergeTree/MergeTreeDataFormatVersion.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataFormatVersion.h rename to src/Storages/MergeTree/MergeTreeDataFormatVersion.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataMergerMutator.cpp rename to src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataMergerMutator.h rename to src/Storages/MergeTree/MergeTreeDataMergerMutator.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartChecksum.cpp rename to src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartChecksum.h b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartChecksum.h rename to src/Storages/MergeTree/MergeTreeDataPartChecksum.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartCompact.cpp rename to src/Storages/MergeTree/MergeTreeDataPartCompact.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartCompact.h rename to src/Storages/MergeTree/MergeTreeDataPartCompact.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp rename to src/Storages/MergeTree/MergeTreeDataPartTTLInfo.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.h b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartTTLInfo.h rename to src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartType.cpp b/src/Storages/MergeTree/MergeTreeDataPartType.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartType.cpp rename to src/Storages/MergeTree/MergeTreeDataPartType.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartType.h b/src/Storages/MergeTree/MergeTreeDataPartType.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartType.h rename to src/Storages/MergeTree/MergeTreeDataPartType.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWide.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWide.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWide.h rename to src/Storages/MergeTree/MergeTreeDataPartWide.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWriterCompact.h rename to src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp rename to src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataPartWriterWide.h rename to src/Storages/MergeTree/MergeTreeDataPartWriterWide.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp rename to src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataSelectExecutor.h rename to src/Storages/MergeTree/MergeTreeDataSelectExecutor.h diff --git a/dbms/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataWriter.cpp rename to src/Storages/MergeTree/MergeTreeDataWriter.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeDataWriter.h rename to src/Storages/MergeTree/MergeTreeDataWriter.h diff --git a/dbms/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIOSettings.h rename to src/Storages/MergeTree/MergeTreeIOSettings.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexAggregatorBloomFilter.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexBloomFilter.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp rename to src/Storages/MergeTree/MergeTreeIndexFullText.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexFullText.h b/src/Storages/MergeTree/MergeTreeIndexFullText.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexFullText.h rename to src/Storages/MergeTree/MergeTreeIndexFullText.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranularity.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranularity.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranularity.h rename to src/Storages/MergeTree/MergeTreeIndexGranularity.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch rename to src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h.gch diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp rename to src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h rename to src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexMinMax.cpp rename to src/Storages/MergeTree/MergeTreeIndexMinMax.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexMinMax.h rename to src/Storages/MergeTree/MergeTreeIndexMinMax.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexReader.cpp b/src/Storages/MergeTree/MergeTreeIndexReader.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexReader.cpp rename to src/Storages/MergeTree/MergeTreeIndexReader.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndexReader.h b/src/Storages/MergeTree/MergeTreeIndexReader.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexReader.h rename to src/Storages/MergeTree/MergeTreeIndexReader.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp similarity index 99% rename from dbms/Storages/MergeTree/MergeTreeIndexSet.cpp rename to src/Storages/MergeTree/MergeTreeIndexSet.cpp index e888fb38822..8cc67adb097 100644 --- a/dbms/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -239,7 +239,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( return; /// Replace logical functions with bit functions. - /// Working with UInt8: last bit = can be true, previous = can be false (Like dbms/Storages/MergeTree/BoolMask.h). + /// Working with UInt8: last bit = can be true, previous = can be false (Like src/Storages/MergeTree/BoolMask.h). traverseAST(expression_ast); auto syntax_analyzer_result = SyntaxAnalyzer(context).analyze( diff --git a/dbms/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndexSet.h rename to src/Storages/MergeTree/MergeTreeIndexSet.h diff --git a/dbms/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndices.cpp rename to src/Storages/MergeTree/MergeTreeIndices.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeIndices.h rename to src/Storages/MergeTree/MergeTreeIndices.h diff --git a/dbms/Storages/MergeTree/MergeTreeMarksLoader.cpp b/src/Storages/MergeTree/MergeTreeMarksLoader.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeMarksLoader.cpp rename to src/Storages/MergeTree/MergeTreeMarksLoader.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeMarksLoader.h b/src/Storages/MergeTree/MergeTreeMarksLoader.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeMarksLoader.h rename to src/Storages/MergeTree/MergeTreeMarksLoader.h diff --git a/dbms/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeMutationEntry.cpp rename to src/Storages/MergeTree/MergeTreeMutationEntry.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeMutationEntry.h rename to src/Storages/MergeTree/MergeTreeMutationEntry.h diff --git a/dbms/Storages/MergeTree/MergeTreeMutationStatus.h b/src/Storages/MergeTree/MergeTreeMutationStatus.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeMutationStatus.h rename to src/Storages/MergeTree/MergeTreeMutationStatus.h diff --git a/dbms/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartInfo.cpp rename to src/Storages/MergeTree/MergeTreePartInfo.cpp diff --git a/dbms/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartInfo.h rename to src/Storages/MergeTree/MergeTreePartInfo.h diff --git a/dbms/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartition.cpp rename to src/Storages/MergeTree/MergeTreePartition.cpp diff --git a/dbms/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartition.h rename to src/Storages/MergeTree/MergeTreePartition.h diff --git a/dbms/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartsMover.cpp rename to src/Storages/MergeTree/MergeTreePartsMover.cpp diff --git a/dbms/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreePartsMover.h rename to src/Storages/MergeTree/MergeTreePartsMover.h diff --git a/dbms/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeRangeReader.cpp rename to src/Storages/MergeTree/MergeTreeRangeReader.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeRangeReader.h rename to src/Storages/MergeTree/MergeTreeRangeReader.h diff --git a/dbms/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReadPool.cpp rename to src/Storages/MergeTree/MergeTreeReadPool.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReadPool.h rename to src/Storages/MergeTree/MergeTreeReadPool.h diff --git a/dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderCompact.cpp rename to src/Storages/MergeTree/MergeTreeReaderCompact.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeReaderCompact.h b/src/Storages/MergeTree/MergeTreeReaderCompact.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderCompact.h rename to src/Storages/MergeTree/MergeTreeReaderCompact.h diff --git a/dbms/Storages/MergeTree/MergeTreeReaderStream.cpp b/src/Storages/MergeTree/MergeTreeReaderStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderStream.cpp rename to src/Storages/MergeTree/MergeTreeReaderStream.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeReaderStream.h b/src/Storages/MergeTree/MergeTreeReaderStream.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderStream.h rename to src/Storages/MergeTree/MergeTreeReaderStream.h diff --git a/dbms/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderWide.cpp rename to src/Storages/MergeTree/MergeTreeReaderWide.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeReaderWide.h b/src/Storages/MergeTree/MergeTreeReaderWide.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReaderWide.h rename to src/Storages/MergeTree/MergeTreeReaderWide.h diff --git a/dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeReverseSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h diff --git a/dbms/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSelectProcessor.cpp rename to src/Storages/MergeTree/MergeTreeSelectProcessor.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSelectProcessor.h rename to src/Storages/MergeTree/MergeTreeSelectProcessor.h diff --git a/dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp b/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp rename to src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h b/src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h rename to src/Storages/MergeTree/MergeTreeSequentialBlockInputStream.h diff --git a/dbms/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSettings.cpp rename to src/Storages/MergeTree/MergeTreeSettings.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeSettings.h rename to src/Storages/MergeTree/MergeTreeSettings.h diff --git a/dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp rename to src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h rename to src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.h diff --git a/dbms/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeWhereOptimizer.cpp rename to src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp diff --git a/dbms/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h similarity index 100% rename from dbms/Storages/MergeTree/MergeTreeWhereOptimizer.h rename to src/Storages/MergeTree/MergeTreeWhereOptimizer.h diff --git a/dbms/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergedBlockOutputStream.cpp rename to src/Storages/MergeTree/MergedBlockOutputStream.cpp diff --git a/dbms/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h similarity index 100% rename from dbms/Storages/MergeTree/MergedBlockOutputStream.h rename to src/Storages/MergeTree/MergedBlockOutputStream.h diff --git a/dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp rename to src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp diff --git a/dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h similarity index 100% rename from dbms/Storages/MergeTree/MergedColumnOnlyOutputStream.h rename to src/Storages/MergeTree/MergedColumnOnlyOutputStream.h diff --git a/dbms/Storages/MergeTree/PartDestinationType.h b/src/Storages/MergeTree/PartDestinationType.h similarity index 100% rename from dbms/Storages/MergeTree/PartDestinationType.h rename to src/Storages/MergeTree/PartDestinationType.h diff --git a/dbms/Storages/MergeTree/RPNBuilder.h b/src/Storages/MergeTree/RPNBuilder.h similarity index 100% rename from dbms/Storages/MergeTree/RPNBuilder.h rename to src/Storages/MergeTree/RPNBuilder.h diff --git a/dbms/Storages/MergeTree/RangesInDataPart.h b/src/Storages/MergeTree/RangesInDataPart.h similarity index 100% rename from dbms/Storages/MergeTree/RangesInDataPart.h rename to src/Storages/MergeTree/RangesInDataPart.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.h b/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeAddress.h rename to src/Storages/MergeTree/ReplicatedMergeTreeAddress.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h rename to src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h rename to src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreePartHeader.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.h b/src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreePartHeader.h rename to src/Storages/MergeTree/ReplicatedMergeTreePartHeader.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeQueue.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQueue.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h rename to src/Storages/MergeTree/ReplicatedMergeTreeQuorumEntry.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h rename to src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp rename to src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp diff --git a/dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h similarity index 100% rename from dbms/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h rename to src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h diff --git a/dbms/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp similarity index 100% rename from dbms/Storages/MergeTree/SimpleMergeSelector.cpp rename to src/Storages/MergeTree/SimpleMergeSelector.cpp diff --git a/dbms/Storages/MergeTree/SimpleMergeSelector.h b/src/Storages/MergeTree/SimpleMergeSelector.h similarity index 100% rename from dbms/Storages/MergeTree/SimpleMergeSelector.h rename to src/Storages/MergeTree/SimpleMergeSelector.h diff --git a/dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h similarity index 100% rename from dbms/Storages/MergeTree/StorageFromMergeTreeDataPart.h rename to src/Storages/MergeTree/StorageFromMergeTreeDataPart.h diff --git a/dbms/Storages/MergeTree/TTLMergeSelector.cpp b/src/Storages/MergeTree/TTLMergeSelector.cpp similarity index 100% rename from dbms/Storages/MergeTree/TTLMergeSelector.cpp rename to src/Storages/MergeTree/TTLMergeSelector.cpp diff --git a/dbms/Storages/MergeTree/TTLMergeSelector.h b/src/Storages/MergeTree/TTLMergeSelector.h similarity index 100% rename from dbms/Storages/MergeTree/TTLMergeSelector.h rename to src/Storages/MergeTree/TTLMergeSelector.h diff --git a/dbms/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp similarity index 100% rename from dbms/Storages/MergeTree/checkDataPart.cpp rename to src/Storages/MergeTree/checkDataPart.cpp diff --git a/dbms/Storages/MergeTree/checkDataPart.h b/src/Storages/MergeTree/checkDataPart.h similarity index 100% rename from dbms/Storages/MergeTree/checkDataPart.h rename to src/Storages/MergeTree/checkDataPart.h diff --git a/dbms/Storages/MergeTree/localBackup.cpp b/src/Storages/MergeTree/localBackup.cpp similarity index 100% rename from dbms/Storages/MergeTree/localBackup.cpp rename to src/Storages/MergeTree/localBackup.cpp diff --git a/dbms/Storages/MergeTree/localBackup.h b/src/Storages/MergeTree/localBackup.h similarity index 100% rename from dbms/Storages/MergeTree/localBackup.h rename to src/Storages/MergeTree/localBackup.h diff --git a/dbms/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp similarity index 99% rename from dbms/Storages/MergeTree/registerStorageMergeTree.cpp rename to src/Storages/MergeTree/registerStorageMergeTree.cpp index 79ac28eb145..b5d6bb9e975 100644 --- a/dbms/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -280,7 +280,7 @@ Careful choice of the primary key is extremely important for processing short-ti Optional sampling expression can be specified in the SAMPLE BY clause. It is used to implement the SAMPLE clause in a SELECT query for approximate query execution. Sampling expression must be one of the elements of the primary key tuple. For example, if your primary key is (CounterID, EventDate, intHash64(UserID)), your sampling expression might be intHash64(UserID). -Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'dbms/Storages/MergeTree/MergeTreeSettings.h' file. +Engine settings can be specified in the SETTINGS clause. Full list is in the source code in the 'src/Storages/MergeTree/MergeTreeSettings.h' file. E.g. you can specify the index (primary key) granularity with SETTINGS index_granularity = 8192. Examples: diff --git a/dbms/Storages/MutationCommands.cpp b/src/Storages/MutationCommands.cpp similarity index 100% rename from dbms/Storages/MutationCommands.cpp rename to src/Storages/MutationCommands.cpp diff --git a/dbms/Storages/MutationCommands.h b/src/Storages/MutationCommands.h similarity index 100% rename from dbms/Storages/MutationCommands.h rename to src/Storages/MutationCommands.h diff --git a/dbms/Storages/PartitionCommands.cpp b/src/Storages/PartitionCommands.cpp similarity index 100% rename from dbms/Storages/PartitionCommands.cpp rename to src/Storages/PartitionCommands.cpp diff --git a/dbms/Storages/PartitionCommands.h b/src/Storages/PartitionCommands.h similarity index 100% rename from dbms/Storages/PartitionCommands.h rename to src/Storages/PartitionCommands.h diff --git a/dbms/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp similarity index 100% rename from dbms/Storages/ReadInOrderOptimizer.cpp rename to src/Storages/ReadInOrderOptimizer.cpp diff --git a/dbms/Storages/ReadInOrderOptimizer.h b/src/Storages/ReadInOrderOptimizer.h similarity index 100% rename from dbms/Storages/ReadInOrderOptimizer.h rename to src/Storages/ReadInOrderOptimizer.h diff --git a/dbms/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h similarity index 100% rename from dbms/Storages/SelectQueryInfo.h rename to src/Storages/SelectQueryInfo.h diff --git a/dbms/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp similarity index 100% rename from dbms/Storages/StorageBuffer.cpp rename to src/Storages/StorageBuffer.cpp diff --git a/dbms/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h similarity index 100% rename from dbms/Storages/StorageBuffer.h rename to src/Storages/StorageBuffer.h diff --git a/dbms/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp similarity index 100% rename from dbms/Storages/StorageDictionary.cpp rename to src/Storages/StorageDictionary.cpp diff --git a/dbms/Storages/StorageDictionary.h b/src/Storages/StorageDictionary.h similarity index 100% rename from dbms/Storages/StorageDictionary.h rename to src/Storages/StorageDictionary.h diff --git a/dbms/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp similarity index 100% rename from dbms/Storages/StorageDistributed.cpp rename to src/Storages/StorageDistributed.cpp diff --git a/dbms/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h similarity index 100% rename from dbms/Storages/StorageDistributed.h rename to src/Storages/StorageDistributed.h diff --git a/dbms/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp similarity index 100% rename from dbms/Storages/StorageFactory.cpp rename to src/Storages/StorageFactory.cpp diff --git a/dbms/Storages/StorageFactory.h b/src/Storages/StorageFactory.h similarity index 100% rename from dbms/Storages/StorageFactory.h rename to src/Storages/StorageFactory.h diff --git a/dbms/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp similarity index 100% rename from dbms/Storages/StorageFile.cpp rename to src/Storages/StorageFile.cpp diff --git a/dbms/Storages/StorageFile.h b/src/Storages/StorageFile.h similarity index 100% rename from dbms/Storages/StorageFile.h rename to src/Storages/StorageFile.h diff --git a/dbms/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp similarity index 100% rename from dbms/Storages/StorageGenerateRandom.cpp rename to src/Storages/StorageGenerateRandom.cpp diff --git a/dbms/Storages/StorageGenerateRandom.h b/src/Storages/StorageGenerateRandom.h similarity index 100% rename from dbms/Storages/StorageGenerateRandom.h rename to src/Storages/StorageGenerateRandom.h diff --git a/dbms/Storages/StorageHDFS.cpp b/src/Storages/StorageHDFS.cpp similarity index 100% rename from dbms/Storages/StorageHDFS.cpp rename to src/Storages/StorageHDFS.cpp diff --git a/dbms/Storages/StorageHDFS.h b/src/Storages/StorageHDFS.h similarity index 100% rename from dbms/Storages/StorageHDFS.h rename to src/Storages/StorageHDFS.h diff --git a/dbms/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp similarity index 100% rename from dbms/Storages/StorageInMemoryMetadata.cpp rename to src/Storages/StorageInMemoryMetadata.cpp diff --git a/dbms/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h similarity index 100% rename from dbms/Storages/StorageInMemoryMetadata.h rename to src/Storages/StorageInMemoryMetadata.h diff --git a/dbms/Storages/StorageInput.cpp b/src/Storages/StorageInput.cpp similarity index 100% rename from dbms/Storages/StorageInput.cpp rename to src/Storages/StorageInput.cpp diff --git a/dbms/Storages/StorageInput.h b/src/Storages/StorageInput.h similarity index 100% rename from dbms/Storages/StorageInput.h rename to src/Storages/StorageInput.h diff --git a/dbms/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp similarity index 100% rename from dbms/Storages/StorageJoin.cpp rename to src/Storages/StorageJoin.cpp diff --git a/dbms/Storages/StorageJoin.h b/src/Storages/StorageJoin.h similarity index 100% rename from dbms/Storages/StorageJoin.h rename to src/Storages/StorageJoin.h diff --git a/dbms/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp similarity index 100% rename from dbms/Storages/StorageLog.cpp rename to src/Storages/StorageLog.cpp diff --git a/dbms/Storages/StorageLog.h b/src/Storages/StorageLog.h similarity index 100% rename from dbms/Storages/StorageLog.h rename to src/Storages/StorageLog.h diff --git a/dbms/Storages/StorageLogSettings.cpp b/src/Storages/StorageLogSettings.cpp similarity index 100% rename from dbms/Storages/StorageLogSettings.cpp rename to src/Storages/StorageLogSettings.cpp diff --git a/dbms/Storages/StorageLogSettings.h b/src/Storages/StorageLogSettings.h similarity index 100% rename from dbms/Storages/StorageLogSettings.h rename to src/Storages/StorageLogSettings.h diff --git a/dbms/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp similarity index 100% rename from dbms/Storages/StorageMaterializedView.cpp rename to src/Storages/StorageMaterializedView.cpp diff --git a/dbms/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h similarity index 100% rename from dbms/Storages/StorageMaterializedView.h rename to src/Storages/StorageMaterializedView.h diff --git a/dbms/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp similarity index 100% rename from dbms/Storages/StorageMemory.cpp rename to src/Storages/StorageMemory.cpp diff --git a/dbms/Storages/StorageMemory.h b/src/Storages/StorageMemory.h similarity index 100% rename from dbms/Storages/StorageMemory.h rename to src/Storages/StorageMemory.h diff --git a/dbms/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp similarity index 100% rename from dbms/Storages/StorageMerge.cpp rename to src/Storages/StorageMerge.cpp diff --git a/dbms/Storages/StorageMerge.h b/src/Storages/StorageMerge.h similarity index 100% rename from dbms/Storages/StorageMerge.h rename to src/Storages/StorageMerge.h diff --git a/dbms/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp similarity index 100% rename from dbms/Storages/StorageMergeTree.cpp rename to src/Storages/StorageMergeTree.cpp diff --git a/dbms/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h similarity index 100% rename from dbms/Storages/StorageMergeTree.h rename to src/Storages/StorageMergeTree.h diff --git a/dbms/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp similarity index 100% rename from dbms/Storages/StorageMySQL.cpp rename to src/Storages/StorageMySQL.cpp diff --git a/dbms/Storages/StorageMySQL.h b/src/Storages/StorageMySQL.h similarity index 100% rename from dbms/Storages/StorageMySQL.h rename to src/Storages/StorageMySQL.h diff --git a/dbms/Storages/StorageNull.cpp b/src/Storages/StorageNull.cpp similarity index 100% rename from dbms/Storages/StorageNull.cpp rename to src/Storages/StorageNull.cpp diff --git a/dbms/Storages/StorageNull.h b/src/Storages/StorageNull.h similarity index 100% rename from dbms/Storages/StorageNull.h rename to src/Storages/StorageNull.h diff --git a/dbms/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp similarity index 100% rename from dbms/Storages/StorageReplicatedMergeTree.cpp rename to src/Storages/StorageReplicatedMergeTree.cpp diff --git a/dbms/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h similarity index 100% rename from dbms/Storages/StorageReplicatedMergeTree.h rename to src/Storages/StorageReplicatedMergeTree.h diff --git a/dbms/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp similarity index 100% rename from dbms/Storages/StorageS3.cpp rename to src/Storages/StorageS3.cpp diff --git a/dbms/Storages/StorageS3.h b/src/Storages/StorageS3.h similarity index 100% rename from dbms/Storages/StorageS3.h rename to src/Storages/StorageS3.h diff --git a/dbms/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp similarity index 100% rename from dbms/Storages/StorageSet.cpp rename to src/Storages/StorageSet.cpp diff --git a/dbms/Storages/StorageSet.h b/src/Storages/StorageSet.h similarity index 100% rename from dbms/Storages/StorageSet.h rename to src/Storages/StorageSet.h diff --git a/dbms/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp similarity index 100% rename from dbms/Storages/StorageStripeLog.cpp rename to src/Storages/StorageStripeLog.cpp diff --git a/dbms/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h similarity index 100% rename from dbms/Storages/StorageStripeLog.h rename to src/Storages/StorageStripeLog.h diff --git a/dbms/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp similarity index 100% rename from dbms/Storages/StorageTinyLog.cpp rename to src/Storages/StorageTinyLog.cpp diff --git a/dbms/Storages/StorageTinyLog.h b/src/Storages/StorageTinyLog.h similarity index 100% rename from dbms/Storages/StorageTinyLog.h rename to src/Storages/StorageTinyLog.h diff --git a/dbms/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp similarity index 100% rename from dbms/Storages/StorageURL.cpp rename to src/Storages/StorageURL.cpp diff --git a/dbms/Storages/StorageURL.h b/src/Storages/StorageURL.h similarity index 100% rename from dbms/Storages/StorageURL.h rename to src/Storages/StorageURL.h diff --git a/dbms/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp similarity index 100% rename from dbms/Storages/StorageValues.cpp rename to src/Storages/StorageValues.cpp diff --git a/dbms/Storages/StorageValues.h b/src/Storages/StorageValues.h similarity index 100% rename from dbms/Storages/StorageValues.h rename to src/Storages/StorageValues.h diff --git a/dbms/Storages/StorageView.cpp b/src/Storages/StorageView.cpp similarity index 100% rename from dbms/Storages/StorageView.cpp rename to src/Storages/StorageView.cpp diff --git a/dbms/Storages/StorageView.h b/src/Storages/StorageView.h similarity index 100% rename from dbms/Storages/StorageView.h rename to src/Storages/StorageView.h diff --git a/dbms/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp similarity index 100% rename from dbms/Storages/StorageXDBC.cpp rename to src/Storages/StorageXDBC.cpp diff --git a/dbms/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h similarity index 100% rename from dbms/Storages/StorageXDBC.h rename to src/Storages/StorageXDBC.h diff --git a/dbms/Storages/System/CMakeLists.txt b/src/Storages/System/CMakeLists.txt similarity index 100% rename from dbms/Storages/System/CMakeLists.txt rename to src/Storages/System/CMakeLists.txt diff --git a/dbms/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h similarity index 100% rename from dbms/Storages/System/IStorageSystemOneBlock.h rename to src/Storages/System/IStorageSystemOneBlock.h diff --git a/dbms/Storages/System/StorageSystemAggregateFunctionCombinators.cpp b/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemAggregateFunctionCombinators.cpp rename to src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp diff --git a/dbms/Storages/System/StorageSystemAggregateFunctionCombinators.h b/src/Storages/System/StorageSystemAggregateFunctionCombinators.h similarity index 100% rename from dbms/Storages/System/StorageSystemAggregateFunctionCombinators.h rename to src/Storages/System/StorageSystemAggregateFunctionCombinators.h diff --git a/dbms/Storages/System/StorageSystemAsynchronousMetrics.cpp b/src/Storages/System/StorageSystemAsynchronousMetrics.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemAsynchronousMetrics.cpp rename to src/Storages/System/StorageSystemAsynchronousMetrics.cpp diff --git a/dbms/Storages/System/StorageSystemAsynchronousMetrics.h b/src/Storages/System/StorageSystemAsynchronousMetrics.h similarity index 100% rename from dbms/Storages/System/StorageSystemAsynchronousMetrics.h rename to src/Storages/System/StorageSystemAsynchronousMetrics.h diff --git a/dbms/Storages/System/StorageSystemBuildOptions.cpp b/src/Storages/System/StorageSystemBuildOptions.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemBuildOptions.cpp rename to src/Storages/System/StorageSystemBuildOptions.cpp diff --git a/dbms/Storages/System/StorageSystemBuildOptions.generated.cpp.in b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in similarity index 100% rename from dbms/Storages/System/StorageSystemBuildOptions.generated.cpp.in rename to src/Storages/System/StorageSystemBuildOptions.generated.cpp.in diff --git a/dbms/Storages/System/StorageSystemBuildOptions.h b/src/Storages/System/StorageSystemBuildOptions.h similarity index 100% rename from dbms/Storages/System/StorageSystemBuildOptions.h rename to src/Storages/System/StorageSystemBuildOptions.h diff --git a/dbms/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemClusters.cpp rename to src/Storages/System/StorageSystemClusters.cpp diff --git a/dbms/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h similarity index 100% rename from dbms/Storages/System/StorageSystemClusters.h rename to src/Storages/System/StorageSystemClusters.h diff --git a/dbms/Storages/System/StorageSystemCollations.cpp b/src/Storages/System/StorageSystemCollations.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemCollations.cpp rename to src/Storages/System/StorageSystemCollations.cpp diff --git a/dbms/Storages/System/StorageSystemCollations.h b/src/Storages/System/StorageSystemCollations.h similarity index 100% rename from dbms/Storages/System/StorageSystemCollations.h rename to src/Storages/System/StorageSystemCollations.h diff --git a/dbms/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemColumns.cpp rename to src/Storages/System/StorageSystemColumns.cpp diff --git a/dbms/Storages/System/StorageSystemColumns.h b/src/Storages/System/StorageSystemColumns.h similarity index 100% rename from dbms/Storages/System/StorageSystemColumns.h rename to src/Storages/System/StorageSystemColumns.h diff --git a/dbms/Storages/System/StorageSystemContributors.cpp b/src/Storages/System/StorageSystemContributors.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemContributors.cpp rename to src/Storages/System/StorageSystemContributors.cpp diff --git a/dbms/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemContributors.generated.cpp rename to src/Storages/System/StorageSystemContributors.generated.cpp diff --git a/dbms/Storages/System/StorageSystemContributors.h b/src/Storages/System/StorageSystemContributors.h similarity index 100% rename from dbms/Storages/System/StorageSystemContributors.h rename to src/Storages/System/StorageSystemContributors.h diff --git a/dbms/Storages/System/StorageSystemContributors.sh b/src/Storages/System/StorageSystemContributors.sh similarity index 100% rename from dbms/Storages/System/StorageSystemContributors.sh rename to src/Storages/System/StorageSystemContributors.sh diff --git a/dbms/Storages/System/StorageSystemDataTypeFamilies.cpp b/src/Storages/System/StorageSystemDataTypeFamilies.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemDataTypeFamilies.cpp rename to src/Storages/System/StorageSystemDataTypeFamilies.cpp diff --git a/dbms/Storages/System/StorageSystemDataTypeFamilies.h b/src/Storages/System/StorageSystemDataTypeFamilies.h similarity index 100% rename from dbms/Storages/System/StorageSystemDataTypeFamilies.h rename to src/Storages/System/StorageSystemDataTypeFamilies.h diff --git a/dbms/Storages/System/StorageSystemDatabases.cpp b/src/Storages/System/StorageSystemDatabases.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemDatabases.cpp rename to src/Storages/System/StorageSystemDatabases.cpp diff --git a/dbms/Storages/System/StorageSystemDatabases.h b/src/Storages/System/StorageSystemDatabases.h similarity index 100% rename from dbms/Storages/System/StorageSystemDatabases.h rename to src/Storages/System/StorageSystemDatabases.h diff --git a/dbms/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemDetachedParts.cpp rename to src/Storages/System/StorageSystemDetachedParts.cpp diff --git a/dbms/Storages/System/StorageSystemDetachedParts.h b/src/Storages/System/StorageSystemDetachedParts.h similarity index 100% rename from dbms/Storages/System/StorageSystemDetachedParts.h rename to src/Storages/System/StorageSystemDetachedParts.h diff --git a/dbms/Storages/System/StorageSystemDictionaries.cpp b/src/Storages/System/StorageSystemDictionaries.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemDictionaries.cpp rename to src/Storages/System/StorageSystemDictionaries.cpp diff --git a/dbms/Storages/System/StorageSystemDictionaries.h b/src/Storages/System/StorageSystemDictionaries.h similarity index 100% rename from dbms/Storages/System/StorageSystemDictionaries.h rename to src/Storages/System/StorageSystemDictionaries.h diff --git a/dbms/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemDisks.cpp rename to src/Storages/System/StorageSystemDisks.cpp diff --git a/dbms/Storages/System/StorageSystemDisks.h b/src/Storages/System/StorageSystemDisks.h similarity index 100% rename from dbms/Storages/System/StorageSystemDisks.h rename to src/Storages/System/StorageSystemDisks.h diff --git a/dbms/Storages/System/StorageSystemEvents.cpp b/src/Storages/System/StorageSystemEvents.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemEvents.cpp rename to src/Storages/System/StorageSystemEvents.cpp diff --git a/dbms/Storages/System/StorageSystemEvents.h b/src/Storages/System/StorageSystemEvents.h similarity index 100% rename from dbms/Storages/System/StorageSystemEvents.h rename to src/Storages/System/StorageSystemEvents.h diff --git a/dbms/Storages/System/StorageSystemFormats.cpp b/src/Storages/System/StorageSystemFormats.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemFormats.cpp rename to src/Storages/System/StorageSystemFormats.cpp diff --git a/dbms/Storages/System/StorageSystemFormats.h b/src/Storages/System/StorageSystemFormats.h similarity index 100% rename from dbms/Storages/System/StorageSystemFormats.h rename to src/Storages/System/StorageSystemFormats.h diff --git a/dbms/Storages/System/StorageSystemFunctions.cpp b/src/Storages/System/StorageSystemFunctions.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemFunctions.cpp rename to src/Storages/System/StorageSystemFunctions.cpp diff --git a/dbms/Storages/System/StorageSystemFunctions.h b/src/Storages/System/StorageSystemFunctions.h similarity index 100% rename from dbms/Storages/System/StorageSystemFunctions.h rename to src/Storages/System/StorageSystemFunctions.h diff --git a/dbms/Storages/System/StorageSystemGraphite.cpp b/src/Storages/System/StorageSystemGraphite.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemGraphite.cpp rename to src/Storages/System/StorageSystemGraphite.cpp diff --git a/dbms/Storages/System/StorageSystemGraphite.h b/src/Storages/System/StorageSystemGraphite.h similarity index 100% rename from dbms/Storages/System/StorageSystemGraphite.h rename to src/Storages/System/StorageSystemGraphite.h diff --git a/dbms/Storages/System/StorageSystemMacros.cpp b/src/Storages/System/StorageSystemMacros.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemMacros.cpp rename to src/Storages/System/StorageSystemMacros.cpp diff --git a/dbms/Storages/System/StorageSystemMacros.h b/src/Storages/System/StorageSystemMacros.h similarity index 100% rename from dbms/Storages/System/StorageSystemMacros.h rename to src/Storages/System/StorageSystemMacros.h diff --git a/dbms/Storages/System/StorageSystemMergeTreeSettings.cpp b/src/Storages/System/StorageSystemMergeTreeSettings.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemMergeTreeSettings.cpp rename to src/Storages/System/StorageSystemMergeTreeSettings.cpp diff --git a/dbms/Storages/System/StorageSystemMergeTreeSettings.h b/src/Storages/System/StorageSystemMergeTreeSettings.h similarity index 100% rename from dbms/Storages/System/StorageSystemMergeTreeSettings.h rename to src/Storages/System/StorageSystemMergeTreeSettings.h diff --git a/dbms/Storages/System/StorageSystemMerges.cpp b/src/Storages/System/StorageSystemMerges.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemMerges.cpp rename to src/Storages/System/StorageSystemMerges.cpp diff --git a/dbms/Storages/System/StorageSystemMerges.h b/src/Storages/System/StorageSystemMerges.h similarity index 100% rename from dbms/Storages/System/StorageSystemMerges.h rename to src/Storages/System/StorageSystemMerges.h diff --git a/dbms/Storages/System/StorageSystemMetrics.cpp b/src/Storages/System/StorageSystemMetrics.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemMetrics.cpp rename to src/Storages/System/StorageSystemMetrics.cpp diff --git a/dbms/Storages/System/StorageSystemMetrics.h b/src/Storages/System/StorageSystemMetrics.h similarity index 100% rename from dbms/Storages/System/StorageSystemMetrics.h rename to src/Storages/System/StorageSystemMetrics.h diff --git a/dbms/Storages/System/StorageSystemModels.cpp b/src/Storages/System/StorageSystemModels.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemModels.cpp rename to src/Storages/System/StorageSystemModels.cpp diff --git a/dbms/Storages/System/StorageSystemModels.h b/src/Storages/System/StorageSystemModels.h similarity index 100% rename from dbms/Storages/System/StorageSystemModels.h rename to src/Storages/System/StorageSystemModels.h diff --git a/dbms/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemMutations.cpp rename to src/Storages/System/StorageSystemMutations.cpp diff --git a/dbms/Storages/System/StorageSystemMutations.h b/src/Storages/System/StorageSystemMutations.h similarity index 100% rename from dbms/Storages/System/StorageSystemMutations.h rename to src/Storages/System/StorageSystemMutations.h diff --git a/dbms/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemNumbers.cpp rename to src/Storages/System/StorageSystemNumbers.cpp diff --git a/dbms/Storages/System/StorageSystemNumbers.h b/src/Storages/System/StorageSystemNumbers.h similarity index 100% rename from dbms/Storages/System/StorageSystemNumbers.h rename to src/Storages/System/StorageSystemNumbers.h diff --git a/dbms/Storages/System/StorageSystemOne.cpp b/src/Storages/System/StorageSystemOne.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemOne.cpp rename to src/Storages/System/StorageSystemOne.cpp diff --git a/dbms/Storages/System/StorageSystemOne.h b/src/Storages/System/StorageSystemOne.h similarity index 100% rename from dbms/Storages/System/StorageSystemOne.h rename to src/Storages/System/StorageSystemOne.h diff --git a/dbms/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemParts.cpp rename to src/Storages/System/StorageSystemParts.cpp diff --git a/dbms/Storages/System/StorageSystemParts.h b/src/Storages/System/StorageSystemParts.h similarity index 100% rename from dbms/Storages/System/StorageSystemParts.h rename to src/Storages/System/StorageSystemParts.h diff --git a/dbms/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemPartsBase.cpp rename to src/Storages/System/StorageSystemPartsBase.cpp diff --git a/dbms/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h similarity index 100% rename from dbms/Storages/System/StorageSystemPartsBase.h rename to src/Storages/System/StorageSystemPartsBase.h diff --git a/dbms/Storages/System/StorageSystemPartsColumns.cpp b/src/Storages/System/StorageSystemPartsColumns.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemPartsColumns.cpp rename to src/Storages/System/StorageSystemPartsColumns.cpp diff --git a/dbms/Storages/System/StorageSystemPartsColumns.h b/src/Storages/System/StorageSystemPartsColumns.h similarity index 100% rename from dbms/Storages/System/StorageSystemPartsColumns.h rename to src/Storages/System/StorageSystemPartsColumns.h diff --git a/dbms/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemProcesses.cpp rename to src/Storages/System/StorageSystemProcesses.cpp diff --git a/dbms/Storages/System/StorageSystemProcesses.h b/src/Storages/System/StorageSystemProcesses.h similarity index 100% rename from dbms/Storages/System/StorageSystemProcesses.h rename to src/Storages/System/StorageSystemProcesses.h diff --git a/dbms/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemQuotaUsage.cpp rename to src/Storages/System/StorageSystemQuotaUsage.cpp diff --git a/dbms/Storages/System/StorageSystemQuotaUsage.h b/src/Storages/System/StorageSystemQuotaUsage.h similarity index 100% rename from dbms/Storages/System/StorageSystemQuotaUsage.h rename to src/Storages/System/StorageSystemQuotaUsage.h diff --git a/dbms/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemQuotas.cpp rename to src/Storages/System/StorageSystemQuotas.cpp diff --git a/dbms/Storages/System/StorageSystemQuotas.h b/src/Storages/System/StorageSystemQuotas.h similarity index 100% rename from dbms/Storages/System/StorageSystemQuotas.h rename to src/Storages/System/StorageSystemQuotas.h diff --git a/dbms/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemReplicas.cpp rename to src/Storages/System/StorageSystemReplicas.cpp diff --git a/dbms/Storages/System/StorageSystemReplicas.h b/src/Storages/System/StorageSystemReplicas.h similarity index 100% rename from dbms/Storages/System/StorageSystemReplicas.h rename to src/Storages/System/StorageSystemReplicas.h diff --git a/dbms/Storages/System/StorageSystemReplicationQueue.cpp b/src/Storages/System/StorageSystemReplicationQueue.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemReplicationQueue.cpp rename to src/Storages/System/StorageSystemReplicationQueue.cpp diff --git a/dbms/Storages/System/StorageSystemReplicationQueue.h b/src/Storages/System/StorageSystemReplicationQueue.h similarity index 100% rename from dbms/Storages/System/StorageSystemReplicationQueue.h rename to src/Storages/System/StorageSystemReplicationQueue.h diff --git a/dbms/Storages/System/StorageSystemRowPolicies.cpp b/src/Storages/System/StorageSystemRowPolicies.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemRowPolicies.cpp rename to src/Storages/System/StorageSystemRowPolicies.cpp diff --git a/dbms/Storages/System/StorageSystemRowPolicies.h b/src/Storages/System/StorageSystemRowPolicies.h similarity index 100% rename from dbms/Storages/System/StorageSystemRowPolicies.h rename to src/Storages/System/StorageSystemRowPolicies.h diff --git a/dbms/Storages/System/StorageSystemSettings.cpp b/src/Storages/System/StorageSystemSettings.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemSettings.cpp rename to src/Storages/System/StorageSystemSettings.cpp diff --git a/dbms/Storages/System/StorageSystemSettings.h b/src/Storages/System/StorageSystemSettings.h similarity index 100% rename from dbms/Storages/System/StorageSystemSettings.h rename to src/Storages/System/StorageSystemSettings.h diff --git a/dbms/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemStackTrace.cpp rename to src/Storages/System/StorageSystemStackTrace.cpp diff --git a/dbms/Storages/System/StorageSystemStackTrace.h b/src/Storages/System/StorageSystemStackTrace.h similarity index 100% rename from dbms/Storages/System/StorageSystemStackTrace.h rename to src/Storages/System/StorageSystemStackTrace.h diff --git a/dbms/Storages/System/StorageSystemStoragePolicies.cpp b/src/Storages/System/StorageSystemStoragePolicies.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemStoragePolicies.cpp rename to src/Storages/System/StorageSystemStoragePolicies.cpp diff --git a/dbms/Storages/System/StorageSystemStoragePolicies.h b/src/Storages/System/StorageSystemStoragePolicies.h similarity index 100% rename from dbms/Storages/System/StorageSystemStoragePolicies.h rename to src/Storages/System/StorageSystemStoragePolicies.h diff --git a/dbms/Storages/System/StorageSystemTableEngines.cpp b/src/Storages/System/StorageSystemTableEngines.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemTableEngines.cpp rename to src/Storages/System/StorageSystemTableEngines.cpp diff --git a/dbms/Storages/System/StorageSystemTableEngines.h b/src/Storages/System/StorageSystemTableEngines.h similarity index 100% rename from dbms/Storages/System/StorageSystemTableEngines.h rename to src/Storages/System/StorageSystemTableEngines.h diff --git a/dbms/Storages/System/StorageSystemTableFunctions.cpp b/src/Storages/System/StorageSystemTableFunctions.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemTableFunctions.cpp rename to src/Storages/System/StorageSystemTableFunctions.cpp diff --git a/dbms/Storages/System/StorageSystemTableFunctions.h b/src/Storages/System/StorageSystemTableFunctions.h similarity index 100% rename from dbms/Storages/System/StorageSystemTableFunctions.h rename to src/Storages/System/StorageSystemTableFunctions.h diff --git a/dbms/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemTables.cpp rename to src/Storages/System/StorageSystemTables.cpp diff --git a/dbms/Storages/System/StorageSystemTables.h b/src/Storages/System/StorageSystemTables.h similarity index 100% rename from dbms/Storages/System/StorageSystemTables.h rename to src/Storages/System/StorageSystemTables.h diff --git a/dbms/Storages/System/StorageSystemZeros.cpp b/src/Storages/System/StorageSystemZeros.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemZeros.cpp rename to src/Storages/System/StorageSystemZeros.cpp diff --git a/dbms/Storages/System/StorageSystemZeros.h b/src/Storages/System/StorageSystemZeros.h similarity index 100% rename from dbms/Storages/System/StorageSystemZeros.h rename to src/Storages/System/StorageSystemZeros.h diff --git a/dbms/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp similarity index 100% rename from dbms/Storages/System/StorageSystemZooKeeper.cpp rename to src/Storages/System/StorageSystemZooKeeper.cpp diff --git a/dbms/Storages/System/StorageSystemZooKeeper.h b/src/Storages/System/StorageSystemZooKeeper.h similarity index 100% rename from dbms/Storages/System/StorageSystemZooKeeper.h rename to src/Storages/System/StorageSystemZooKeeper.h diff --git a/dbms/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp similarity index 100% rename from dbms/Storages/System/attachSystemTables.cpp rename to src/Storages/System/attachSystemTables.cpp diff --git a/dbms/Storages/System/attachSystemTables.h b/src/Storages/System/attachSystemTables.h similarity index 100% rename from dbms/Storages/System/attachSystemTables.h rename to src/Storages/System/attachSystemTables.h diff --git a/dbms/Storages/TableStructureLockHolder.h b/src/Storages/TableStructureLockHolder.h similarity index 100% rename from dbms/Storages/TableStructureLockHolder.h rename to src/Storages/TableStructureLockHolder.h diff --git a/dbms/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp similarity index 100% rename from dbms/Storages/VirtualColumnUtils.cpp rename to src/Storages/VirtualColumnUtils.cpp diff --git a/dbms/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h similarity index 100% rename from dbms/Storages/VirtualColumnUtils.h rename to src/Storages/VirtualColumnUtils.h diff --git a/dbms/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp similarity index 100% rename from dbms/Storages/getStructureOfRemoteTable.cpp rename to src/Storages/getStructureOfRemoteTable.cpp diff --git a/dbms/Storages/getStructureOfRemoteTable.h b/src/Storages/getStructureOfRemoteTable.h similarity index 100% rename from dbms/Storages/getStructureOfRemoteTable.h rename to src/Storages/getStructureOfRemoteTable.h diff --git a/dbms/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp similarity index 100% rename from dbms/Storages/registerStorages.cpp rename to src/Storages/registerStorages.cpp diff --git a/dbms/Storages/registerStorages.h b/src/Storages/registerStorages.h similarity index 100% rename from dbms/Storages/registerStorages.h rename to src/Storages/registerStorages.h diff --git a/dbms/Storages/tests/CMakeLists.txt b/src/Storages/tests/CMakeLists.txt similarity index 100% rename from dbms/Storages/tests/CMakeLists.txt rename to src/Storages/tests/CMakeLists.txt diff --git a/dbms/Storages/tests/active_parts.py b/src/Storages/tests/active_parts.py similarity index 100% rename from dbms/Storages/tests/active_parts.py rename to src/Storages/tests/active_parts.py diff --git a/dbms/Storages/tests/get_abandonable_lock_in_all_partitions.cpp b/src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp similarity index 100% rename from dbms/Storages/tests/get_abandonable_lock_in_all_partitions.cpp rename to src/Storages/tests/get_abandonable_lock_in_all_partitions.cpp diff --git a/dbms/Storages/tests/get_current_inserts_in_replicated.cpp b/src/Storages/tests/get_current_inserts_in_replicated.cpp similarity index 100% rename from dbms/Storages/tests/get_current_inserts_in_replicated.cpp rename to src/Storages/tests/get_current_inserts_in_replicated.cpp diff --git a/dbms/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp b/src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp similarity index 100% rename from dbms/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp rename to src/Storages/tests/gtest_aux_funcs_for_adaptive_granularity.cpp diff --git a/dbms/Storages/tests/gtest_row_source_bits_test.cpp b/src/Storages/tests/gtest_row_source_bits_test.cpp similarity index 100% rename from dbms/Storages/tests/gtest_row_source_bits_test.cpp rename to src/Storages/tests/gtest_row_source_bits_test.cpp diff --git a/dbms/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp similarity index 100% rename from dbms/Storages/tests/gtest_storage_log.cpp rename to src/Storages/tests/gtest_storage_log.cpp diff --git a/dbms/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp similarity index 100% rename from dbms/Storages/tests/gtest_transform_query_for_external_database.cpp rename to src/Storages/tests/gtest_transform_query_for_external_database.cpp diff --git a/dbms/Storages/tests/merge_selector.cpp b/src/Storages/tests/merge_selector.cpp similarity index 100% rename from dbms/Storages/tests/merge_selector.cpp rename to src/Storages/tests/merge_selector.cpp diff --git a/dbms/Storages/tests/merge_selector2.cpp b/src/Storages/tests/merge_selector2.cpp similarity index 100% rename from dbms/Storages/tests/merge_selector2.cpp rename to src/Storages/tests/merge_selector2.cpp diff --git a/dbms/Storages/tests/part_name.cpp b/src/Storages/tests/part_name.cpp similarity index 100% rename from dbms/Storages/tests/part_name.cpp rename to src/Storages/tests/part_name.cpp diff --git a/dbms/Storages/tests/remove_symlink_directory.cpp b/src/Storages/tests/remove_symlink_directory.cpp similarity index 100% rename from dbms/Storages/tests/remove_symlink_directory.cpp rename to src/Storages/tests/remove_symlink_directory.cpp diff --git a/dbms/Storages/tests/storage_log.cpp b/src/Storages/tests/storage_log.cpp similarity index 100% rename from dbms/Storages/tests/storage_log.cpp rename to src/Storages/tests/storage_log.cpp diff --git a/dbms/Storages/tests/system_numbers.cpp b/src/Storages/tests/system_numbers.cpp similarity index 100% rename from dbms/Storages/tests/system_numbers.cpp rename to src/Storages/tests/system_numbers.cpp diff --git a/dbms/Storages/tests/test_alter_distributed.sql b/src/Storages/tests/test_alter_distributed.sql similarity index 100% rename from dbms/Storages/tests/test_alter_distributed.sql rename to src/Storages/tests/test_alter_distributed.sql diff --git a/dbms/Storages/tests/test_alter_merge.sql b/src/Storages/tests/test_alter_merge.sql similarity index 100% rename from dbms/Storages/tests/test_alter_merge.sql rename to src/Storages/tests/test_alter_merge.sql diff --git a/dbms/Storages/tests/test_alter_merge_tree.sql b/src/Storages/tests/test_alter_merge_tree.sql similarity index 100% rename from dbms/Storages/tests/test_alter_merge_tree.sql rename to src/Storages/tests/test_alter_merge_tree.sql diff --git a/dbms/Storages/tests/transform_part_zk_nodes.cpp b/src/Storages/tests/transform_part_zk_nodes.cpp similarity index 100% rename from dbms/Storages/tests/transform_part_zk_nodes.cpp rename to src/Storages/tests/transform_part_zk_nodes.cpp diff --git a/dbms/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp similarity index 100% rename from dbms/Storages/transformQueryForExternalDatabase.cpp rename to src/Storages/transformQueryForExternalDatabase.cpp diff --git a/dbms/Storages/transformQueryForExternalDatabase.h b/src/Storages/transformQueryForExternalDatabase.h similarity index 100% rename from dbms/Storages/transformQueryForExternalDatabase.h rename to src/Storages/transformQueryForExternalDatabase.h diff --git a/dbms/TableFunctions/CMakeLists.txt b/src/TableFunctions/CMakeLists.txt similarity index 100% rename from dbms/TableFunctions/CMakeLists.txt rename to src/TableFunctions/CMakeLists.txt diff --git a/dbms/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp similarity index 100% rename from dbms/TableFunctions/ITableFunction.cpp rename to src/TableFunctions/ITableFunction.cpp diff --git a/dbms/TableFunctions/ITableFunction.h b/src/TableFunctions/ITableFunction.h similarity index 100% rename from dbms/TableFunctions/ITableFunction.h rename to src/TableFunctions/ITableFunction.h diff --git a/dbms/TableFunctions/ITableFunctionFileLike.cpp b/src/TableFunctions/ITableFunctionFileLike.cpp similarity index 100% rename from dbms/TableFunctions/ITableFunctionFileLike.cpp rename to src/TableFunctions/ITableFunctionFileLike.cpp diff --git a/dbms/TableFunctions/ITableFunctionFileLike.h b/src/TableFunctions/ITableFunctionFileLike.h similarity index 100% rename from dbms/TableFunctions/ITableFunctionFileLike.h rename to src/TableFunctions/ITableFunctionFileLike.h diff --git a/dbms/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp similarity index 100% rename from dbms/TableFunctions/ITableFunctionXDBC.cpp rename to src/TableFunctions/ITableFunctionXDBC.cpp diff --git a/dbms/TableFunctions/ITableFunctionXDBC.h b/src/TableFunctions/ITableFunctionXDBC.h similarity index 100% rename from dbms/TableFunctions/ITableFunctionXDBC.h rename to src/TableFunctions/ITableFunctionXDBC.h diff --git a/dbms/TableFunctions/TableFunctionFactory.cpp b/src/TableFunctions/TableFunctionFactory.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionFactory.cpp rename to src/TableFunctions/TableFunctionFactory.cpp diff --git a/dbms/TableFunctions/TableFunctionFactory.h b/src/TableFunctions/TableFunctionFactory.h similarity index 100% rename from dbms/TableFunctions/TableFunctionFactory.h rename to src/TableFunctions/TableFunctionFactory.h diff --git a/dbms/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionFile.cpp rename to src/TableFunctions/TableFunctionFile.cpp diff --git a/dbms/TableFunctions/TableFunctionFile.h b/src/TableFunctions/TableFunctionFile.h similarity index 100% rename from dbms/TableFunctions/TableFunctionFile.h rename to src/TableFunctions/TableFunctionFile.h diff --git a/dbms/TableFunctions/TableFunctionGenerateRandom.cpp b/src/TableFunctions/TableFunctionGenerateRandom.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionGenerateRandom.cpp rename to src/TableFunctions/TableFunctionGenerateRandom.cpp diff --git a/dbms/TableFunctions/TableFunctionGenerateRandom.h b/src/TableFunctions/TableFunctionGenerateRandom.h similarity index 100% rename from dbms/TableFunctions/TableFunctionGenerateRandom.h rename to src/TableFunctions/TableFunctionGenerateRandom.h diff --git a/dbms/TableFunctions/TableFunctionHDFS.cpp b/src/TableFunctions/TableFunctionHDFS.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionHDFS.cpp rename to src/TableFunctions/TableFunctionHDFS.cpp diff --git a/dbms/TableFunctions/TableFunctionHDFS.h b/src/TableFunctions/TableFunctionHDFS.h similarity index 100% rename from dbms/TableFunctions/TableFunctionHDFS.h rename to src/TableFunctions/TableFunctionHDFS.h diff --git a/dbms/TableFunctions/TableFunctionInput.cpp b/src/TableFunctions/TableFunctionInput.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionInput.cpp rename to src/TableFunctions/TableFunctionInput.cpp diff --git a/dbms/TableFunctions/TableFunctionInput.h b/src/TableFunctions/TableFunctionInput.h similarity index 100% rename from dbms/TableFunctions/TableFunctionInput.h rename to src/TableFunctions/TableFunctionInput.h diff --git a/dbms/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionMerge.cpp rename to src/TableFunctions/TableFunctionMerge.cpp diff --git a/dbms/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h similarity index 100% rename from dbms/TableFunctions/TableFunctionMerge.h rename to src/TableFunctions/TableFunctionMerge.h diff --git a/dbms/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionMySQL.cpp rename to src/TableFunctions/TableFunctionMySQL.cpp diff --git a/dbms/TableFunctions/TableFunctionMySQL.h b/src/TableFunctions/TableFunctionMySQL.h similarity index 100% rename from dbms/TableFunctions/TableFunctionMySQL.h rename to src/TableFunctions/TableFunctionMySQL.h diff --git a/dbms/TableFunctions/TableFunctionNumbers.cpp b/src/TableFunctions/TableFunctionNumbers.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionNumbers.cpp rename to src/TableFunctions/TableFunctionNumbers.cpp diff --git a/dbms/TableFunctions/TableFunctionNumbers.h b/src/TableFunctions/TableFunctionNumbers.h similarity index 100% rename from dbms/TableFunctions/TableFunctionNumbers.h rename to src/TableFunctions/TableFunctionNumbers.h diff --git a/dbms/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionRemote.cpp rename to src/TableFunctions/TableFunctionRemote.cpp diff --git a/dbms/TableFunctions/TableFunctionRemote.h b/src/TableFunctions/TableFunctionRemote.h similarity index 100% rename from dbms/TableFunctions/TableFunctionRemote.h rename to src/TableFunctions/TableFunctionRemote.h diff --git a/dbms/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionS3.cpp rename to src/TableFunctions/TableFunctionS3.cpp diff --git a/dbms/TableFunctions/TableFunctionS3.h b/src/TableFunctions/TableFunctionS3.h similarity index 100% rename from dbms/TableFunctions/TableFunctionS3.h rename to src/TableFunctions/TableFunctionS3.h diff --git a/dbms/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionURL.cpp rename to src/TableFunctions/TableFunctionURL.cpp diff --git a/dbms/TableFunctions/TableFunctionURL.h b/src/TableFunctions/TableFunctionURL.h similarity index 100% rename from dbms/TableFunctions/TableFunctionURL.h rename to src/TableFunctions/TableFunctionURL.h diff --git a/dbms/TableFunctions/TableFunctionValues.cpp b/src/TableFunctions/TableFunctionValues.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionValues.cpp rename to src/TableFunctions/TableFunctionValues.cpp diff --git a/dbms/TableFunctions/TableFunctionValues.h b/src/TableFunctions/TableFunctionValues.h similarity index 100% rename from dbms/TableFunctions/TableFunctionValues.h rename to src/TableFunctions/TableFunctionValues.h diff --git a/dbms/TableFunctions/TableFunctionZeros.cpp b/src/TableFunctions/TableFunctionZeros.cpp similarity index 100% rename from dbms/TableFunctions/TableFunctionZeros.cpp rename to src/TableFunctions/TableFunctionZeros.cpp diff --git a/dbms/TableFunctions/TableFunctionZeros.h b/src/TableFunctions/TableFunctionZeros.h similarity index 100% rename from dbms/TableFunctions/TableFunctionZeros.h rename to src/TableFunctions/TableFunctionZeros.h diff --git a/dbms/TableFunctions/parseColumnsListForTableFunction.cpp b/src/TableFunctions/parseColumnsListForTableFunction.cpp similarity index 100% rename from dbms/TableFunctions/parseColumnsListForTableFunction.cpp rename to src/TableFunctions/parseColumnsListForTableFunction.cpp diff --git a/dbms/TableFunctions/parseColumnsListForTableFunction.h b/src/TableFunctions/parseColumnsListForTableFunction.h similarity index 100% rename from dbms/TableFunctions/parseColumnsListForTableFunction.h rename to src/TableFunctions/parseColumnsListForTableFunction.h diff --git a/dbms/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp similarity index 100% rename from dbms/TableFunctions/registerTableFunctions.cpp rename to src/TableFunctions/registerTableFunctions.cpp diff --git a/dbms/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h similarity index 100% rename from dbms/TableFunctions/registerTableFunctions.h rename to src/TableFunctions/registerTableFunctions.h diff --git a/utils/ci/run-clickhouse-from-binaries.sh b/utils/ci/run-clickhouse-from-binaries.sh index 9b1a3f86b1f..5e9dc35869a 100755 --- a/utils/ci/run-clickhouse-from-binaries.sh +++ b/utils/ci/run-clickhouse-from-binaries.sh @@ -6,8 +6,8 @@ set -e -x source default-config -SERVER_BIN="${WORKSPACE}/build/dbms/Server/clickhouse" -SERVER_CONF="${WORKSPACE}/sources/dbms/Server/config.xml" +SERVER_BIN="${WORKSPACE}/build/src/Server/clickhouse" +SERVER_CONF="${WORKSPACE}/sources/src/Server/config.xml" SERVER_DATADIR="${WORKSPACE}/clickhouse" [[ -x "$SERVER_BIN" ]] || die "Run build-normal.sh first" diff --git a/utils/release/release_lib.sh b/utils/release/release_lib.sh index 7e2608aac87..148fe2c05df 100644 --- a/utils/release/release_lib.sh +++ b/utils/release/release_lib.sh @@ -101,8 +101,8 @@ function gen_revision_author { gen_changelog "$VERSION_STRING" "" "$AUTHOR" "" gen_dockerfiles "$VERSION_STRING" - dbms/Storages/System/StorageSystemContributors.sh ||: - git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/version.cmake debian/changelog docker/*/Dockerfile dbms/Storages/System/StorageSystemContributors.generated.cpp + src/Storages/System/StorageSystemContributors.sh ||: + git commit -m "$auto_message [$VERSION_STRING] [$VERSION_REVISION]" cmake/version.cmake debian/changelog docker/*/Dockerfile src/Storages/System/StorageSystemContributors.generated.cpp if [ -z $NO_PUSH ]; then git push fi From 49bc33ed3431e503a0bd454e4e6f78e2bd0d1262 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 3 Apr 2020 18:59:59 +0300 Subject: [PATCH 085/484] Update 01073_attach_if_not_exists.sql --- tests/queries/0_stateless/01073_attach_if_not_exists.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01073_attach_if_not_exists.sql b/tests/queries/0_stateless/01073_attach_if_not_exists.sql index ed32e023c24..8bb52556ccc 100644 --- a/tests/queries/0_stateless/01073_attach_if_not_exists.sql +++ b/tests/queries/0_stateless/01073_attach_if_not_exists.sql @@ -1,6 +1,6 @@ CREATE TABLE aine (a Int) ENGINE = Log; ATTACH TABLE aine; -- { serverError 57 } -ATTACH TABLE IF NOT EXISTS anie; +ATTACH TABLE IF NOT EXISTS aine; DETACH TABLE aine; ATTACH TABLE IF NOT EXISTS aine; EXISTS TABLE aine; From 7ab73e9083b7ab251229b998a8849ac582caf390 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 3 Apr 2020 19:33:25 +0300 Subject: [PATCH 086/484] Fix bug --- dbms/Parsers/ParserAlterQuery.cpp | 2 +- dbms/Storages/StorageMergeTree.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dbms/Parsers/ParserAlterQuery.cpp b/dbms/Parsers/ParserAlterQuery.cpp index 1375d1859a8..623bca440bb 100644 --- a/dbms/Parsers/ParserAlterQuery.cpp +++ b/dbms/Parsers/ParserAlterQuery.cpp @@ -126,7 +126,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected else if (s_rename_column.ignore(pos, expected)) { if (s_if_exists.ignore(pos, expected)) - command->if_not_exists = true; + command->if_exists = true; if (!parser_name.parse(pos, command->column, expected)) return false; diff --git a/dbms/Storages/StorageMergeTree.h b/dbms/Storages/StorageMergeTree.h index fc65692e726..1762ba19824 100644 --- a/dbms/Storages/StorageMergeTree.h +++ b/dbms/Storages/StorageMergeTree.h @@ -124,7 +124,7 @@ private: /// and into in-memory structures. Wake up merge-mutation task. Int64 startMutation(const MutationCommands & commands, String & mutation_file_name); /// Wait until mutation with version will finish mutation for all parts - void waitForMutation(Int64 version, const String & filename); + void waitForMutation(Int64 version, const String & file_name); /// Try and find a single part to mutate and mutate it. If some part was successfully mutated, return true. bool tryMutatePart(); From 1d1721b37c505e82ac5fe00e3557c64fe910ae67 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 3 Apr 2020 19:45:49 +0300 Subject: [PATCH 087/484] Fix if exists --- dbms/Storages/AlterCommands.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dbms/Storages/AlterCommands.cpp b/dbms/Storages/AlterCommands.cpp index ba9d64983e6..8eebaabf342 100644 --- a/dbms/Storages/AlterCommands.cpp +++ b/dbms/Storages/AlterCommands.cpp @@ -245,6 +245,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.type = AlterCommand::RENAME_COLUMN; command.column_name = command_ast->column->as().name; command.rename_to = command_ast->rename_to->as().name; + command.if_exists = command_ast->if_exists; return command; } else @@ -704,7 +705,8 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata) command.ignore = true; } else if (command.type == AlterCommand::DROP_COLUMN - || command.type == AlterCommand::COMMENT_COLUMN) + || command.type == AlterCommand::COMMENT_COLUMN + || command.type == AlterCommand::RENAME_COLUMN) { if (!has_column && command.if_exists) command.ignore = true; From 9465f97832c447ac75f5c4c126971ed794056fe1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 3 Apr 2020 21:23:11 +0300 Subject: [PATCH 088/484] performance comparison --- docker/test/performance-comparison/compare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 83801130556..3044050d12b 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -107,7 +107,7 @@ function run_tests # and not always correct (e.g. when the reference SHA is really old and # has some other differences to the tested SHA, besides the one introduced # by the PR). - test_files_override=$(sed "s/dbms\/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) + test_files_override=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-tests.txt) if [ "$test_files_override" != "" ] then test_files=$test_files_override @@ -406,7 +406,7 @@ unset IFS # Remember that grep sets error code when nothing is found, hence the bayan # operator. -grep -H -m2 '\(Exception\|Error\):[^:]' ./*-err.log | sed 's/:/\t/' > run-errors.tsv ||: +grep -H -m2 -i '\(Exception\|Error\):[^:]' ./*-err.log | sed 's/:/\t/' > run-errors.tsv ||: } case "$stage" in From 5fb5fd1884a7cf4f98992af11157ea1cd2f39d71 Mon Sep 17 00:00:00 2001 From: Sami Kerola Date: Fri, 3 Apr 2020 11:25:59 +0100 Subject: [PATCH 089/484] [docs] tell the cutIPv6() is cutting bytes not bits The IPv6CIDRToRange() appears to use bits while cutIPv6() will cut in bytes. WITH IPv6StringToNum('FFFF:FFFF:FFFF:FFFF:AAAA:AAAA:AAAA:AAAA') AS ipv6 SELECT cutIPv6(ipv6, 8, 0), tupleElement(IPv6CIDRToRange(ipv6, 64), 1) --- docs/en/sql_reference/functions/ip_address_functions.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql_reference/functions/ip_address_functions.md b/docs/en/sql_reference/functions/ip_address_functions.md index a7186a5d067..e412197d9ac 100644 --- a/docs/en/sql_reference/functions/ip_address_functions.md +++ b/docs/en/sql_reference/functions/ip_address_functions.md @@ -132,9 +132,9 @@ SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr └────────────────────┘ ``` -## cutIPv6(x, bitsToCutForIPv6, bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} +## cutIPv6(x, bytesToCutForIPv6, bytesToCutForIPv4) {#cutipv6x-bytestocutforipv6-bytestocutforipv4} -Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing the address of the specified number of bits removed in text format. For example: +Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing the address of the specified number of bytes removed in text format. For example: ``` sql WITH From 07e90a572578026f1bfaca8fafd4571059c5f83a Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 3 Apr 2020 22:25:20 +0300 Subject: [PATCH 090/484] Update docs.js --- website/js/docs.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/js/docs.js b/website/js/docs.js index 879fbdbca3c..084053f2c7d 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -87,11 +87,10 @@ $(document).ready(function () { hitsPerPage: 25, 'facetFilters': [ 'lang:' + $('html').attr('lang'), - 'version:' + $('html').attr('data-version'), - 'single-page:' + $('html').attr('single-page'), + 'version:' + $('html').attr('data-version') ] }, - debug: true + debug: false }); } }); From b66837ba32df2a57e0e61b2cc5bd773c2c0f50b1 Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Fri, 3 Apr 2020 23:27:28 +0400 Subject: [PATCH 091/484] Fix typos (#10045) --- docs/en/operations/performance_test.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index ca285a64cf5..d836d9478a3 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -24,7 +24,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -1. Download configs: +5. Download configs: @@ -34,7 +34,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -1. Download benchmark files: +6. Download benchmark files: @@ -42,7 +42,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +7. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). @@ -50,31 +50,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -1. Run the server: +8. Run the server: ./clickhouse server -1. Check the data: ssh to the server in another terminal +9. Check the data: ssh to the server in another terminal ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +10. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. mcedit benchmark-new.sh -1. Run the benchmark: +11. Run the benchmark: ./benchmark-new.sh hits_100m_obfuscated -1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark\_hardware.html +All the results are published here: https://clickhouse.tech/benchmark_hardware.html From 815f7448d003c48515a42e9fbc4752f54cc3fefa Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 3 Apr 2020 23:08:27 +0300 Subject: [PATCH 092/484] Update docs.js --- website/js/docs.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/js/docs.js b/website/js/docs.js index 084053f2c7d..743ead3bb2e 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -87,7 +87,8 @@ $(document).ready(function () { hitsPerPage: 25, 'facetFilters': [ 'lang:' + $('html').attr('lang'), - 'version:' + $('html').attr('data-version') + 'version:' + $('html').attr('data-version'), + 'single-page:' + $('html').attr('data-single-page') ] }, debug: false From 6ecfc03cbb9cddfadecc96d1817b7386eb418ecc Mon Sep 17 00:00:00 2001 From: Avogar Date: Fri, 3 Apr 2020 23:44:13 +0300 Subject: [PATCH 093/484] Change parsing msgpack data. --- .../Formats/Impl/MsgPackRowInputFormat.cpp | 37 ++++++++++++------ .../Formats/Impl/MsgPackRowInputFormat.h | 3 +- .../01098_msgpack_format.reference | 2 + .../0_stateless/01098_msgpack_format.sh | 31 +++++++++++++-- .../0_stateless/data_msgpack/all_types.msgpk | Bin 200 -> 0 bytes .../data_msgpack/nested_arrays.msgpk | 1 - 6 files changed, 56 insertions(+), 18 deletions(-) delete mode 100644 dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk delete mode 100644 dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 0b3fb3d58ed..32f89c816c5 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -24,21 +24,34 @@ namespace ErrorCodes } MsgPackRowInputFormat::MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_) - : IRowInputFormat(header_, in_, std::move(params_)), data_types(header_.getDataTypes()) {} + : IRowInputFormat(header_, in_, std::move(params_)), buf(in_), data_types(header_.getDataTypes()) {} bool MsgPackRowInputFormat::readObject() { - if (in.eof() && unpacker.nonparsed_size() == 0) + if (buf.eof()) return false; - while (!unpacker.next(object_handle)) + PeekableReadBufferCheckpoint checkpoint{buf}; + size_t offset; + bool need_more_data = true; + while (need_more_data) { - if (in.eof()) - throw Exception("Unexpected end of file while parsing MsgPack object.", ErrorCodes::INCORRECT_DATA); - unpacker.reserve_buffer(in.available()); - memcpy(unpacker.buffer(), in.position(), in.available()); - unpacker.buffer_consumed(in.available()); - in.position() += in.available(); + offset = 0; + try + { + object_handle = msgpack::unpack(buf.position(), buf.buffer().end() - buf.position(), offset); + need_more_data = false; + } + catch (msgpack::insufficient_bytes &) + { + buf.position() = buf.buffer().end(); + if (buf.eof()) + throw Exception("Unexpected end of file while parsing msgpack object.", ErrorCodes::INCORRECT_DATA); + buf.position() = buf.buffer().end(); + buf.makeContinuousMemoryFromCheckpointToPos(); + buf.rollbackToCheckpoint(); + } } + buf.position() += offset; return true; } @@ -168,9 +181,9 @@ bool MsgPackRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & void registerInputFormatProcessorMsgPack(FormatFactory & factory) { factory.registerInputFormatProcessor("MsgPack", []( - ReadBuffer &buf, - const Block &sample, - const RowInputFormatParams ¶ms, + ReadBuffer & buf, + const Block & sample, + const RowInputFormatParams & params, const FormatSettings &) { return std::make_shared(sample, buf, params); diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h index 7daac811374..8ed23a1e0f4 100644 --- a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -2,6 +2,7 @@ #include #include +#include #include namespace DB @@ -20,8 +21,8 @@ private: bool readObject(); void insertObject(IColumn & column, DataTypePtr type, const msgpack::object & object); + PeekableReadBuffer buf; DataTypes data_types; - msgpack::unpacker unpacker; msgpack::object_handle object_handle; }; diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.reference b/dbms/tests/queries/0_stateless/01098_msgpack_format.reference index aab048208bc..8059526a38f 100644 --- a/dbms/tests/queries/0_stateless/01098_msgpack_format.reference +++ b/dbms/tests/queries/0_stateless/01098_msgpack_format.reference @@ -6,3 +6,5 @@ 42 42 42 42 42 42 42 42 42.42 42.42 42 1970-02-12 1970-01-01 03:00:42 1970-01-01 03:00:00.042 [42] [[1,2,3],[1001,2002],[3167]] [[['one'],['two']],[['three']],[['four'],['five']]] [[1,2,3],[1001,2002],[3167]] [[['one'],['two']],[['three']],[['four'],['five']]] +[0,1,2,3,42,253,254,255] +[255,254,253,42,3,2,1,0] diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.sh b/dbms/tests/queries/0_stateless/01098_msgpack_format.sh index 2aaf2dfd527..afebd6de3dc 100755 --- a/dbms/tests/queries/0_stateless/01098_msgpack_format.sh +++ b/dbms/tests/queries/0_stateless/01098_msgpack_format.sh @@ -4,26 +4,49 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS msgpack"; + $CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (uint8 UInt8, uint16 UInt16, uint32 UInt32, uint64 UInt64, int8 Int8, int16 Int16, int32 Int32, int64 Int64, float Float32, double Float64, string String, date Date, datetime DateTime, datetime64 DateTime64, array Array(UInt32)) ENGINE = Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES (255, 65535, 4294967295, 100000000000, -128, -32768, -2147483648, -100000000000, 2.02, 10000.0000001, 'String', 18980, 1639872000, 1639872000000, [1,2,3,4,5]), (4, 1234, 3244467295, 500000000000, -1, -256, -14741221, -7000000000, 100.1, 14321.032141201, 'Another string', 20000, 1839882000, 1639872891123, [5,4,3,2,1]),(42, 42, 42, 42, 42, 42, 42, 42, 42.42, 42.42, '42', 42, 42, 42, [42])"; -$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/data_msgpack/all_types.msgpk; +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/tmp_msgpac_test_all_types.msgpk; -cat $CURDIR/data_msgpack/all_types.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +cat $CURDIR/tmp_msgpac_test_all_types.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; + +rm $CURDIR/tmp_msgpac_test_all_types.msgpk $CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; $CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + $CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array1 Array(Array(UInt32)), array2 Array(Array(Array(String)))) ENGINE = Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ([[1,2,3], [1001, 2002], [3167]], [[['one'], ['two']], [['three']],[['four'], ['five']]])"; -$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/data_msgpack/nested_arrays.msgpk; +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/tmp_msgpack_test_nested_arrays.msgpk; -cat $CURDIR/data_msgpack/nested_arrays.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +cat $CURDIR/tmp_msgpack_test_nested_arrays.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +rm $CURDIR/tmp_msgpack_test_nested_arrays.msgpk; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array Array(UInt8)) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ([0, 1, 2, 3, 42, 253, 254, 255]), ([255, 254, 253, 42, 3, 2, 1, 0])"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack FORMAT MsgPack" > $CURDIR/tmp_msgpack_type_conversion.msgpk; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (array Array(Int64)) ENGINE = Memory"; + +cat $CURDIR/tmp_msgpack_type_conversion.msgpk | $CLICKHOUSE_CLIENT --query="INSERT INTO msgpack FORMAT MsgPack"; +rm $CURDIR/tmp_msgpack_type_conversion.msgpk; $CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; diff --git a/dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk b/dbms/tests/queries/0_stateless/data_msgpack/all_types.msgpk deleted file mode 100644 index efefdf32a55f96112d8952e725c2023f9687cde3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 200 zcmX@}|Lp(&=l%o1c?JdsagVYW3>O+MHZWXj0CFz_g$*9;swCwUf}BZ jJe8G&nTb&g3Qjp`t`j-!;A(oPKStx*A`>H^_(UxL8(vuZ diff --git a/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk b/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk deleted file mode 100644 index 761ef1d5b6c..00000000000 --- a/dbms/tests/queries/0_stateless/data_msgpack/nested_arrays.msgpk +++ /dev/null @@ -1 +0,0 @@ -ґ _onetwothreefourfive \ No newline at end of file From 9b590a1edf285e15ec2dd777d18b47d030781af2 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Sat, 4 Apr 2020 03:04:46 +0300 Subject: [PATCH 094/484] Trigger rebuild after rename From e32c761e598cf38bd4c40074ec284d51202b0827 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Sat, 4 Apr 2020 08:30:40 +0300 Subject: [PATCH 095/484] Update docs.js --- website/js/docs.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/js/docs.js b/website/js/docs.js index 743ead3bb2e..084053f2c7d 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -87,8 +87,7 @@ $(document).ready(function () { hitsPerPage: 25, 'facetFilters': [ 'lang:' + $('html').attr('lang'), - 'version:' + $('html').attr('data-version'), - 'single-page:' + $('html').attr('data-single-page') + 'version:' + $('html').attr('data-version') ] }, debug: false From 6a2a29a579e6173bed4e71b2604c8765e3fd18ba Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Sat, 4 Apr 2020 09:33:00 +0300 Subject: [PATCH 096/484] Update requirements.md --- docs/en/operations/requirements.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/operations/requirements.md b/docs/en/operations/requirements.md index fc67ff7f989..ce32a46a3e3 100644 --- a/docs/en/operations/requirements.md +++ b/docs/en/operations/requirements.md @@ -11,11 +11,11 @@ For installation from prebuilt deb packages, use a CPU with x86\_64 architecture ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz. -Use of **Turbo Boost** and **hyper-threading** technologies is recommended. It significantly improves performance with a typical load. +It is recommended to use **Turbo Boost** and **hyper-threading** technologies. It significantly improves performance with a typical workload. ## RAM {#ram} -We recommend to use a minimum of 4GB of RAM in order to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries. +We recommend using a minimum of 4GB of RAM to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries. The required volume of RAM depends on: @@ -42,7 +42,7 @@ The volume of storage required for your data should be calculated separately. As - The data compression coefficient. - To estimate the data compression coefficient, load a sample of your data into ClickHouse and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times. + To estimate the data compression coefficient, load a sample of your data into ClickHouse, and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times. To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas. @@ -50,10 +50,10 @@ To calculate the final volume of data to be stored, apply the compression coeffi If possible, use networks of 10G or higher class. -The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. In addition, network speed affects replication processes. +The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. Besides, network speed affects replication processes. ## Software {#software} -ClickHouse is developed for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system. +ClickHouse is developed primarily for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system. ClickHouse can also work in other operating system families. See details in the [Getting started](../getting_started/index.md) section of the documentation. From 65c317e4ca2183be64479a18b0e2d4ae082410ae Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Sat, 4 Apr 2020 09:35:35 +0300 Subject: [PATCH 097/484] Update quotas.md --- docs/en/operations/quotas.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/operations/quotas.md b/docs/en/operations/quotas.md index aeef9f88c7c..deac786bc50 100644 --- a/docs/en/operations/quotas.md +++ b/docs/en/operations/quotas.md @@ -5,8 +5,8 @@ toc_title: Quotas # Quotas {#quotas} -Quotas allow you to limit resource usage over a period of time, or simply track the use of resources. -Quotas are set up in the user config. This is usually ‘users.xml’. +Quotas allow you to limit resource usage over a period of time or track the use of resources. +Quotas are set up in the user config, which is usually ‘users.xml’. The system also has a feature for limiting the complexity of a single query. See the section “Restrictions on query complexity”). @@ -37,7 +37,7 @@ Let’s look at the section of the ‘users.xml’ file that defines quotas. ``` -By default, the quota just tracks resource consumption for each hour, without limiting usage. +By default, the quota tracks resource consumption for each hour, without limiting usage. The resource consumption calculated for each interval is output to the server log after each request. ``` xml @@ -66,7 +66,7 @@ The resource consumption calculated for each interval is output to the server lo ``` -For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. +For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted, starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. When the interval ends, all collected values are cleared. For the next hour, the quota calculation starts over. @@ -76,15 +76,15 @@ Here are the amounts that can be restricted: `errors` – The number of queries that threw an exception. -`result_rows` – The total number of rows given as the result. +`result_rows` – The total number of rows given as a result. -`read_rows` – The total number of source rows read from tables for running the query, on all remote servers. +`read_rows` – The total number of source rows read from tables for running the query on all remote servers. `execution_time` – The total query execution time, in seconds (wall time). If the limit is exceeded for at least one time interval, an exception is thrown with a text about which restriction was exceeded, for which interval, and when the new interval begins (when queries can be sent again). -Quotas can use the “quota key” feature in order to report on resources for multiple keys independently. Here is an example of this: +Quotas can use the “quota key” feature to report on resources for multiple keys independently. Here is an example of this: ``` xml @@ -95,7 +95,7 @@ Quotas can use the “quota key” feature in order to report on resources for m so the quota will be counted separately for each username. Using keys makes sense only if quota_key is transmitted by the program, not by a user. - You can also write so the IP address is used as the quota key. + You can also write , so the IP address is used as the quota key. (But keep in mind that users can change the IPv6 address fairly easily.) --> From 765dd7c495a15389ded04f92a0249ff82a5333e8 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Sat, 4 Apr 2020 12:15:31 +0300 Subject: [PATCH 098/484] Update some docs translations (#10044) --- .gitignore | 2 +- docs/es/commercial/cloud.md | 7 +- docs/es/commercial/index.md | 8 + .../nested_data_structures/index.md | 7 - .../special_data_types/expression.md | 9 - .../es/data_types/special_data_types/index.md | 9 - docs/es/data_types/special_data_types/set.md | 9 - docs/es/database_engines/index.md | 17 - docs/es/development/architecture.md | 123 +- docs/es/development/browse_code.md | 3 + docs/es/development/build.md | 9 +- docs/es/development/build_cross_arm.md | 3 + docs/es/development/build_cross_osx.md | 5 +- docs/es/development/build_osx.md | 9 +- docs/es/development/contrib.md | 13 +- docs/es/development/developer_instruction.md | 35 +- docs/es/development/index.md | 7 +- docs/es/development/style.md | 53 +- docs/es/development/tests.md | 48 +- docs/es/engines/database_engines/index.md | 21 + .../es/{ => engines}/database_engines/lazy.md | 9 +- .../{ => engines}/database_engines/mysql.md | 57 +- docs/es/engines/index.md | 8 + docs/es/engines/table_engines/index.md | 85 + .../table_engines/integrations}/hdfs.md | 29 +- .../table_engines/integrations/index.md | 8 + .../table_engines/integrations}/jdbc.md | 23 +- .../table_engines/integrations}/kafka.md | 39 +- .../table_engines/integrations}/mysql.md | 39 +- .../table_engines/integrations}/odbc.md | 29 +- .../engines/table_engines/log_family/index.md | 8 + .../table_engines/log_family}/log.md | 5 +- .../table_engines/log_family}/log_family.md | 9 +- .../table_engines/log_family}/stripelog.md | 19 +- .../table_engines/log_family}/tinylog.md | 7 +- .../mergetree_family}/aggregatingmergetree.md | 15 +- .../mergetree_family}/collapsingmergetree.md | 37 +- .../custom_partitioning_key.md | 25 +- .../mergetree_family}/graphitemergetree.md | 33 +- .../table_engines/mergetree_family/index.md | 8 + .../mergetree_family}/mergetree.md | 171 +- .../mergetree_family}/replacingmergetree.md | 15 +- .../mergetree_family}/replication.md | 33 +- .../mergetree_family}/summingmergetree.md | 23 +- .../versionedcollapsingmergetree.md | 29 +- .../table_engines/special}/buffer.md | 25 +- .../table_engines/special}/dictionary.md | 9 +- .../table_engines/special}/distributed.md | 47 +- .../table_engines/special}/external_data.md | 17 +- .../table_engines/special}/file.md | 21 +- .../table_engines/special}/generate.md | 11 +- .../es/engines/table_engines/special/index.md | 8 + .../table_engines/special}/join.md | 35 +- .../table_engines/special/materializedview.md | 12 + .../table_engines/special}/memory.md | 5 +- .../table_engines/special}/merge.md | 19 +- .../table_engines/special}/null.md | 5 +- .../table_engines/special}/set.md | 5 +- .../table_engines/special}/url.md | 19 +- .../table_engines/special}/view.md | 5 +- docs/es/faq/general.md | 15 +- docs/es/faq/index.md | 8 + .../example_datasets/amplab_benchmark.md | 7 +- .../example_datasets/criteo.md | 5 +- .../getting_started/example_datasets/index.md | 23 +- .../example_datasets/metrica.md | 7 +- .../example_datasets/nyc_taxi.md | 17 +- .../example_datasets/ontime.md | 17 +- .../example_datasets/star_schema.md | 35 +- .../example_datasets/wikistat.md | 5 +- docs/es/getting_started/index.md | 7 +- docs/es/getting_started/install.md | 31 +- docs/es/getting_started/playground.md | 49 +- docs/es/getting_started/tutorial.md | 74 +- docs/es/guides/apply_catboost_model.md | 19 +- docs/es/guides/index.md | 9 +- docs/es/index.md | 49 +- docs/es/interfaces/cli.md | 7 +- docs/es/interfaces/cpp.md | 5 +- docs/es/interfaces/formats.md | 145 +- docs/es/interfaces/http.md | 242 +- docs/es/interfaces/index.md | 6 +- docs/es/interfaces/jdbc.md | 5 +- docs/es/interfaces/mysql.md | 9 +- docs/es/interfaces/odbc.md | 5 +- docs/es/interfaces/tcp.md | 5 +- .../third-party/client_libraries.md | 17 +- docs/es/interfaces/third-party/gui.md | 9 +- docs/es/interfaces/third-party/index.md | 8 + .../es/interfaces/third-party/integrations.md | 17 +- docs/es/interfaces/third-party/proxy.md | 5 +- docs/es/introduction/adopters.md | 29 +- docs/es/introduction/distinctive_features.md | 29 +- .../features_considered_disadvantages.md | 5 +- docs/es/introduction/history.md | 31 +- docs/es/introduction/index.md | 8 + docs/es/introduction/performance.md | 13 +- docs/es/operations/access_rights.md | 15 +- docs/es/operations/backup.md | 15 +- docs/es/operations/configuration_files.md | 20 +- docs/es/operations/index.md | 10 +- docs/es/operations/monitoring.md | 17 +- .../optimizing_performance/index.md | 8 + .../sampling_query_profiler.md | 11 +- docs/es/operations/performance_test.md | 9 +- docs/es/operations/quotas.md | 19 +- docs/es/operations/requirements.md | 13 +- .../index.md | 6 +- .../settings.md | 237 +- .../settings/constraints_on_settings.md | 9 +- docs/es/operations/settings/index.md | 6 +- .../settings/permissions_for_queries.md | 25 +- .../operations/settings/query_complexity.md | 57 +- docs/es/operations/settings/settings.md | 296 +-- .../operations/settings/settings_profiles.md | 5 +- docs/es/operations/settings/settings_users.md | 23 +- docs/es/operations/system_tables.md | 700 +++--- docs/es/operations/table_engines/index.md | 81 - .../table_engines/materializedview.md | 9 - docs/es/operations/tips.md | 21 +- docs/es/operations/troubleshooting.md | 17 +- docs/es/operations/update.md | 3 + .../clickhouse-benchmark.md | 41 +- .../{utils => utilities}/clickhouse-copier.md | 17 +- .../{utils => utilities}/clickhouse-local.md | 29 +- docs/es/operations/utilities/index.md | 15 + docs/es/operations/utils/index.md | 11 - docs/es/query_language/dicts/index.md | 18 - .../functions/comparison_functions.md | 36 - .../functions/machine_learning_functions.md | 17 - .../functions/splitting_merging_functions.md | 71 - docs/es/query_language/index.md | 13 - docs/es/query_language/select.md | 1379 ----------- docs/es/query_language/system.md | 110 - .../query_language/table_functions/index.md | 34 - .../query_language/table_functions/merge.md | 11 - .../aggregate_functions}/combinators.md | 33 +- .../aggregate_functions}/index.md | 16 +- .../parametric_functions.md | 71 +- .../aggregate_functions}/reference.md | 91 +- .../data_types}/aggregatefunction.md | 15 +- .../{ => sql_reference}/data_types/array.md | 15 +- .../{ => sql_reference}/data_types/boolean.md | 5 +- .../es/{ => sql_reference}/data_types/date.md | 5 +- .../data_types/datetime.md | 33 +- .../data_types/datetime64.md | 25 +- .../{ => sql_reference}/data_types/decimal.md | 7 +- .../sql_reference/data_types/domains/index.md | 8 + .../data_types/domains/ipv4.md | 15 +- .../data_types/domains/ipv6.md | 15 +- .../data_types/domains/overview.md | 18 +- .../es/{ => sql_reference}/data_types/enum.md | 7 +- .../data_types/fixedstring.md | 11 +- .../{ => sql_reference}/data_types/float.md | 13 +- .../{ => sql_reference}/data_types/index.md | 6 +- .../data_types/int_uint.md | 5 +- .../nested_data_structures/index.md | 12 + .../nested_data_structures/nested.md | 15 +- .../data_types/nullable.md | 13 +- .../special_data_types/expression.md | 12 + .../data_types/special_data_types/index.md | 14 + .../data_types/special_data_types/interval.md | 15 +- .../data_types/special_data_types/nothing.md | 7 +- .../data_types/special_data_types/set.md | 12 + .../{ => sql_reference}/data_types/string.md | 5 +- .../{ => sql_reference}/data_types/tuple.md | 15 +- .../es/{ => sql_reference}/data_types/uuid.md | 11 +- .../external_dictionaries}/external_dicts.md | 17 +- .../external_dicts_dict.md | 17 +- .../external_dicts_dict_hierarchical.md | 17 +- .../external_dicts_dict_layout.md | 33 +- .../external_dicts_dict_lifetime.md | 5 +- .../external_dicts_dict_sources.md | 35 +- .../external_dicts_dict_structure.md | 17 +- .../external_dictionaries/index.md | 8 + docs/es/sql_reference/dictionaries/index.md | 22 + .../dictionaries}/internal_dicts.md | 7 +- .../functions/arithmetic_functions.md | 17 +- .../functions/array_functions.md | 103 +- .../functions/array_join.md | 5 +- .../functions/bit_functions.md | 77 +- .../functions/bitmap_functions.md | 59 +- .../functions/comparison_functions.md | 37 + .../functions/conditional_functions.md | 15 +- .../functions/date_time_functions.md | 107 +- .../functions/encoding_functions.md | 11 +- .../functions/ext_dict_functions.md | 51 +- .../functions/functions_for_nulls.md | 31 +- .../functions/geo.md | 81 +- .../functions/hash_functions.md | 77 +- .../functions/higher_order_functions.md | 39 +- .../functions/in_functions.md | 11 +- .../functions/index.md | 16 +- .../functions/introspection.md | 17 +- .../functions/ip_address_functions.md | 15 +- .../functions/json_functions.md | 31 +- .../functions/logical_functions.md | 5 +- .../functions/machine_learning_functions.md | 20 + .../functions/math_functions.md | 9 +- .../functions/other_functions.md | 99 +- .../functions/random_functions.md | 5 +- .../functions/rounding_functions.md | 29 +- .../functions/splitting_merging_functions.md | 116 + .../functions/string_functions.md | 45 +- .../functions/string_replace_functions.md | 11 +- .../functions/string_search_functions.md | 75 +- .../functions/type_conversion_functions.md | 191 +- .../functions/url_functions.md | 13 +- .../functions/uuid_functions.md | 12 +- .../functions/ym_dict_functions.md | 41 +- docs/es/sql_reference/index.md | 18 + .../operators.md | 119 +- .../statements}/alter.md | 169 +- .../statements}/create.md | 87 +- docs/es/sql_reference/statements/index.md | 8 + .../statements}/insert_into.md | 23 +- .../statements}/misc.md | 87 +- docs/es/sql_reference/statements/select.md | 610 +++++ .../statements}/show.md | 27 +- docs/es/sql_reference/statements/system.md | 113 + .../syntax.md | 35 +- .../table_functions/file.md | 31 +- .../table_functions/generate.md | 21 +- .../table_functions/hdfs.md | 31 +- .../es/sql_reference/table_functions/index.md | 38 + .../table_functions/input.md | 11 +- .../table_functions/jdbc.md | 5 +- .../es/sql_reference/table_functions/merge.md | 14 + .../table_functions/mysql.md | 29 +- .../table_functions/numbers.md | 9 +- .../table_functions/odbc.md | 23 +- .../table_functions/remote.md | 11 +- .../table_functions/url.md | 7 +- docs/es/{ => whats_new}/changelog/2017.md | 27 +- docs/es/{ => whats_new}/changelog/2018.md | 7 +- docs/es/{ => whats_new}/changelog/2019.md | 33 +- docs/es/{ => whats_new}/changelog/index.md | 91 +- docs/es/whats_new/index.md | 8 + docs/es/whats_new/roadmap.md | 19 + docs/es/{ => whats_new}/security_changelog.md | 27 +- docs/fa/changelog/2017.md | 265 --- docs/fa/changelog/2018.md | 1060 --------- docs/fa/changelog/2019.md | 2071 ---------------- docs/fa/changelog/index.md | 5 - docs/fa/commercial/cloud.md | 25 +- docs/fa/commercial/index.md | 8 + docs/fa/data_types/array.md | 9 - docs/fa/data_types/boolean.md | 9 - docs/fa/data_types/date.md | 11 - docs/fa/data_types/datetime.md | 17 - docs/fa/data_types/datetime64.md | 101 - docs/fa/data_types/decimal.md | 106 - docs/fa/data_types/domains/overview.md | 29 - docs/fa/data_types/enum.md | 33 - docs/fa/data_types/fixedstring.md | 11 - docs/fa/data_types/float.md | 82 - docs/fa/data_types/index.md | 11 - docs/fa/data_types/int_uint.md | 23 - .../aggregatefunction.md | 9 - .../nested_data_structures/index.md | 7 - .../nested_data_structures/nested.md | 111 - docs/fa/data_types/nullable.md | 43 - .../special_data_types/expression.md | 9 - .../fa/data_types/special_data_types/index.md | 9 - .../data_types/special_data_types/interval.md | 82 - .../data_types/special_data_types/nothing.md | 23 - docs/fa/data_types/special_data_types/set.md | 9 - docs/fa/data_types/string.md | 13 - docs/fa/data_types/tuple.md | 11 - docs/fa/data_types/uuid.md | 74 - docs/fa/database_engines/index.md | 17 - docs/fa/database_engines/lazy.md | 15 - docs/fa/database_engines/mysql.md | 132 -- docs/fa/development/architecture.md | 206 +- docs/fa/development/browse_code.md | 14 +- docs/fa/development/build.md | 82 +- docs/fa/development/build_cross_arm.md | 25 +- docs/fa/development/build_cross_osx.md | 30 +- docs/fa/development/build_osx.md | 41 +- docs/fa/development/contrib.md | 75 +- docs/fa/development/developer_instruction.md | 195 +- docs/fa/development/index.md | 11 +- docs/fa/development/style.md | 500 ++-- docs/fa/development/tests.md | 236 +- docs/fa/engines/database_engines/index.md | 21 + docs/fa/engines/database_engines/lazy.md | 18 + docs/fa/engines/database_engines/mysql.md | 135 ++ docs/fa/engines/index.md | 8 + docs/fa/engines/table_engines/index.md | 85 + .../table_engines/integrations/hdfs.md | 123 + .../table_engines/integrations/index.md | 8 + .../table_engines/integrations/jdbc.md | 90 + .../table_engines/integrations/kafka.md | 176 ++ .../table_engines/integrations/mysql.md | 105 + .../table_engines/integrations/odbc.md | 132 ++ .../engines/table_engines/log_family/index.md | 8 + .../engines/table_engines/log_family/log.md | 16 + .../table_engines/log_family/log_family.md | 46 + .../table_engines/log_family/stripelog.md | 95 + .../table_engines/log_family/tinylog.md | 16 + .../mergetree_family/aggregatingmergetree.md | 102 + .../mergetree_family/collapsingmergetree.md | 309 +++ .../custom_partitioning_key.md | 128 + .../mergetree_family/graphitemergetree.md | 174 ++ .../table_engines/mergetree_family/index.md | 8 + .../mergetree_family/mergetree.md | 654 ++++++ .../mergetree_family/replacingmergetree.md | 69 + .../mergetree_family/replication.md | 218 ++ .../mergetree_family/summingmergetree.md | 141 ++ .../versionedcollapsingmergetree.md | 239 ++ .../engines/table_engines/special/buffer.md | 71 + .../table_engines/special}/dictionary.md | 25 +- .../table_engines/special/distributed.md | 152 ++ .../table_engines/special/external_data.md | 68 + docs/fa/engines/table_engines/special/file.md | 90 + .../engines/table_engines/special/generate.md | 61 + .../fa/engines/table_engines/special/index.md | 8 + docs/fa/engines/table_engines/special/join.md | 111 + .../table_engines/special/materializedview.md | 12 + .../engines/table_engines/special/memory.md | 19 + .../fa/engines/table_engines/special/merge.md | 70 + docs/fa/engines/table_engines/special/null.md | 14 + docs/fa/engines/table_engines/special/set.md | 19 + docs/fa/engines/table_engines/special/url.md | 82 + docs/fa/engines/table_engines/special/view.md | 12 + docs/fa/faq/general.md | 43 +- docs/fa/faq/index.md | 8 + .../example_datasets/amplab_benchmark.md | 69 +- .../example_datasets/criteo.md | 37 +- .../getting_started/example_datasets/index.md | 28 +- .../example_datasets/metrica.md | 18 +- .../example_datasets/nyc_taxi.md | 322 ++- .../example_datasets/ontime.md | 253 +- .../example_datasets/star_schema.md | 415 +++- .../example_datasets/wikistat.md | 27 +- docs/fa/getting_started/index.md | 22 +- docs/fa/getting_started/install.md | 214 +- docs/fa/getting_started/playground.md | 51 +- docs/fa/getting_started/tutorial.md | 103 +- docs/fa/guides/apply_catboost_model.md | 115 +- docs/fa/guides/index.md | 16 +- docs/fa/interfaces/cli.md | 151 +- docs/fa/interfaces/cpp.md | 14 +- docs/fa/interfaces/formats.md | 1119 +++++---- docs/fa/interfaces/http.md | 449 +++- docs/fa/interfaces/index.md | 38 +- docs/fa/interfaces/jdbc.md | 22 +- docs/fa/interfaces/mysql.md | 25 +- docs/fa/interfaces/odbc.md | 15 +- docs/fa/interfaces/tcp.md | 16 +- .../third-party/client_libraries.md | 94 +- docs/fa/interfaces/third-party/gui.md | 170 +- docs/fa/interfaces/third-party/index.md | 8 + .../fa/interfaces/third-party/integrations.md | 145 +- docs/fa/interfaces/third-party/proxy.md | 53 +- docs/fa/introduction/adopters.md | 149 +- docs/fa/introduction/distinctive_features.md | 2 +- docs/fa/introduction/history.md | 70 +- docs/fa/introduction/index.md | 8 + docs/fa/operations/access_rights.md | 39 +- docs/fa/operations/backup.md | 42 +- docs/fa/operations/configuration_files.md | 42 +- docs/fa/operations/index.md | 40 +- docs/fa/operations/monitoring.md | 45 +- .../optimizing_performance/index.md | 8 + .../sampling_query_profiler.md | 65 + .../performance/sampling_query_profiler.md | 61 - docs/fa/operations/performance_test.md | 35 +- docs/fa/operations/quotas.md | 43 +- docs/fa/operations/requirements.md | 57 +- .../server_configuration_parameters/index.md | 19 + .../settings.md | 873 +++++++ docs/fa/operations/server_settings/index.md | 15 - .../fa/operations/server_settings/settings.md | 869 ------- .../settings/constraints_on_settings.md | 24 +- docs/fa/operations/settings/index.md | 34 +- .../settings/permissions_for_queries.md | 58 +- .../operations/settings/query_complexity.md | 296 +-- docs/fa/operations/settings/settings.md | 1115 ++++----- .../operations/settings/settings_profiles.md | 26 +- docs/fa/operations/settings/settings_users.md | 87 +- docs/fa/operations/system_tables.md | 733 +++--- .../table_engines/aggregatingmergetree.md | 99 - docs/fa/operations/table_engines/buffer.md | 68 - .../table_engines/collapsingmergetree.md | 306 --- .../table_engines/custom_partitioning_key.md | 124 - .../operations/table_engines/distributed.md | 149 -- .../operations/table_engines/external_data.md | 65 - docs/fa/operations/table_engines/file.md | 87 - docs/fa/operations/table_engines/generate.md | 58 - .../table_engines/graphitemergetree.md | 171 -- docs/fa/operations/table_engines/hdfs.md | 120 - docs/fa/operations/table_engines/index.md | 81 - docs/fa/operations/table_engines/jdbc.md | 87 - docs/fa/operations/table_engines/join.md | 108 - docs/fa/operations/table_engines/kafka.md | 173 -- docs/fa/operations/table_engines/log.md | 13 - .../fa/operations/table_engines/log_family.md | 43 - .../table_engines/materializedview.md | 9 - docs/fa/operations/table_engines/memory.md | 16 - docs/fa/operations/table_engines/merge.md | 67 - docs/fa/operations/table_engines/mergetree.md | 653 ------ docs/fa/operations/table_engines/mysql.md | 102 - docs/fa/operations/table_engines/null.md | 11 - docs/fa/operations/table_engines/odbc.md | 129 - .../table_engines/replacingmergetree.md | 66 - .../operations/table_engines/replication.md | 215 -- docs/fa/operations/table_engines/set.md | 16 - docs/fa/operations/table_engines/stripelog.md | 92 - .../table_engines/summingmergetree.md | 138 -- docs/fa/operations/table_engines/tinylog.md | 13 - docs/fa/operations/table_engines/url.md | 79 - .../versionedcollapsingmergetree.md | 235 -- docs/fa/operations/table_engines/view.md | 9 - docs/fa/operations/tips.md | 120 +- docs/fa/operations/troubleshooting.md | 105 +- docs/fa/operations/update.md | 14 +- .../utilities/clickhouse-benchmark.md | 156 ++ .../{utils => utilities}/clickhouse-copier.md | 51 +- .../operations/utilities/clickhouse-local.md | 81 + docs/fa/operations/utilities/index.md | 15 + .../operations/utils/clickhouse-benchmark.md | 153 -- docs/fa/operations/utils/clickhouse-local.md | 78 - docs/fa/operations/utils/index.md | 11 - .../agg_functions/combinators.md | 163 -- docs/fa/query_language/agg_functions/index.md | 58 - .../agg_functions/parametric_functions.md | 496 ---- .../query_language/agg_functions/reference.md | 1834 --------------- docs/fa/query_language/alter.md | 502 ---- docs/fa/query_language/create.md | 306 --- .../fa/query_language/dicts/external_dicts.md | 53 - .../dicts/external_dicts_dict.md | 50 - .../dicts/external_dicts_dict_hierarchical.md | 67 - .../dicts/external_dicts_dict_layout.md | 370 --- .../dicts/external_dicts_dict_lifetime.md | 83 - .../dicts/external_dicts_dict_structure.md | 172 -- docs/fa/query_language/dicts/index.md | 18 - .../fa/query_language/dicts/internal_dicts.md | 52 - .../functions/arithmetic_functions.md | 84 - .../functions/array_functions.md | 1052 --------- .../fa/query_language/functions/array_join.md | 34 - .../query_language/functions/bit_functions.md | 251 -- .../functions/bitmap_functions.md | 493 ---- .../functions/comparison_functions.md | 34 - .../functions/conditional_functions.md | 204 -- .../functions/date_time_functions.md | 447 ---- .../functions/encoding_functions.md | 172 -- .../functions/ext_dict_functions.md | 202 -- docs/fa/query_language/functions/geo.md | 507 ---- .../functions/hash_functions.md | 443 ---- .../functions/higher_order_functions.md | 261 --- .../query_language/functions/in_functions.md | 23 - docs/fa/query_language/functions/index.md | 70 - .../functions/json_functions.md | 228 -- .../functions/logical_functions.md | 19 - .../functions/machine_learning_functions.md | 17 - .../functions/math_functions.md | 113 - .../functions/other_functions.md | 1076 --------- .../functions/random_functions.md | 27 - .../functions/rounding_functions.md | 187 -- .../functions/splitting_merging_functions.md | 113 - .../functions/string_functions.md | 486 ---- .../functions/string_replace_functions.md | 91 - .../functions/string_search_functions.md | 376 --- .../functions/type_conversion_functions.md | 533 ----- .../query_language/functions/url_functions.md | 206 -- .../functions/ym_dict_functions.md | 154 -- docs/fa/query_language/index.md | 13 - docs/fa/query_language/insert_into.md | 77 - docs/fa/query_language/misc.md | 249 -- docs/fa/query_language/operators.md | 275 --- docs/fa/query_language/select.md | 607 ----- docs/fa/query_language/show.md | 102 - docs/fa/query_language/syntax.md | 184 -- docs/fa/query_language/system.md | 110 - .../fa/query_language/table_functions/file.md | 118 - .../fa/query_language/table_functions/hdfs.md | 101 - .../query_language/table_functions/index.md | 34 - .../query_language/table_functions/input.md | 44 - .../fa/query_language/table_functions/jdbc.md | 26 - .../query_language/table_functions/merge.md | 11 - .../query_language/table_functions/mysql.md | 83 - .../query_language/table_functions/numbers.md | 27 - .../fa/query_language/table_functions/odbc.md | 105 - .../query_language/table_functions/remote.md | 80 - docs/fa/query_language/table_functions/url.md | 23 - docs/fa/roadmap.md | 12 - docs/fa/security_changelog.md | 69 - .../aggregate_functions/combinators.md | 167 ++ .../aggregate_functions/index.md | 62 + .../parametric_functions.md | 500 ++++ .../aggregate_functions/reference.md | 1837 +++++++++++++++ .../data_types/aggregatefunction.md | 71 + docs/fa/sql_reference/data_types/array.md | 77 + docs/fa/sql_reference/data_types/boolean.md | 12 + docs/fa/sql_reference/data_types/date.md | 15 + docs/fa/sql_reference/data_types/datetime.md | 129 + .../fa/sql_reference/data_types/datetime64.md | 104 + docs/fa/sql_reference/data_types/decimal.md | 109 + .../sql_reference/data_types/domains/index.md | 8 + .../data_types/domains/ipv4.md | 23 +- .../data_types/domains/ipv6.md | 23 +- .../data_types/domains/overview.md | 32 + docs/fa/sql_reference/data_types/enum.md | 132 ++ .../sql_reference/data_types/fixedstring.md | 63 + docs/fa/sql_reference/data_types/float.md | 87 + docs/fa/sql_reference/data_types/index.md | 15 + docs/fa/sql_reference/data_types/int_uint.md | 26 + .../nested_data_structures/index.md | 12 + .../nested_data_structures/nested.md | 106 + docs/fa/sql_reference/data_types/nullable.md | 46 + .../special_data_types/expression.md | 12 + .../data_types/special_data_types/index.md | 14 + .../data_types/special_data_types/interval.md | 85 + .../data_types/special_data_types/nothing.md | 26 + .../data_types/special_data_types/set.md | 12 + docs/fa/sql_reference/data_types/string.md | 20 + docs/fa/sql_reference/data_types/tuple.md | 52 + docs/fa/sql_reference/data_types/uuid.md | 77 + .../external_dictionaries/external_dicts.md | 56 + .../external_dicts_dict.md | 54 + .../external_dicts_dict_hierarchical.md | 71 + .../external_dicts_dict_layout.md | 374 +++ .../external_dicts_dict_lifetime.md | 87 + .../external_dicts_dict_sources.md | 170 +- .../external_dicts_dict_structure.md | 176 ++ .../external_dictionaries/index.md | 8 + docs/fa/sql_reference/dictionaries/index.md | 22 + .../dictionaries/internal_dicts.md | 56 + .../functions/arithmetic_functions.md | 87 + .../functions/array_functions.md | 1057 +++++++++ docs/fa/sql_reference/functions/array_join.md | 37 + .../sql_reference/functions/bit_functions.md | 255 ++ .../functions/bitmap_functions.md | 496 ++++ .../functions/comparison_functions.md | 37 + .../functions/conditional_functions.md | 207 ++ .../functions/date_time_functions.md | 451 ++++ .../functions/encoding_functions.md | 175 ++ .../functions/ext_dict_functions.md | 206 ++ .../functions/functions_for_nulls.md | 122 +- docs/fa/sql_reference/functions/geo.md | 511 ++++ .../sql_reference/functions/hash_functions.md | 446 ++++ .../functions/higher_order_functions.md | 264 +++ .../sql_reference/functions/in_functions.md | 27 + docs/fa/sql_reference/functions/index.md | 74 + .../functions/introspection.md | 121 +- .../functions/ip_address_functions.md | 58 +- .../sql_reference/functions/json_functions.md | 231 ++ .../functions/logical_functions.md | 22 + .../functions/machine_learning_functions.md | 21 + .../sql_reference/functions/math_functions.md | 116 + .../functions/other_functions.md | 1079 +++++++++ .../functions/random_functions.md | 31 + .../functions/rounding_functions.md | 190 ++ .../functions/splitting_merging_functions.md | 117 + .../functions/string_functions.md | 489 ++++ .../functions/string_replace_functions.md | 95 + .../functions/string_search_functions.md | 380 +++ .../functions/type_conversion_functions.md | 534 +++++ .../sql_reference/functions/url_functions.md | 210 ++ .../functions/uuid_functions.md | 58 +- .../functions/ym_dict_functions.md | 157 ++ docs/fa/sql_reference/index.md | 18 + docs/fa/sql_reference/operators.md | 278 +++ docs/fa/sql_reference/statements/alter.md | 505 ++++ docs/fa/sql_reference/statements/create.md | 309 +++ docs/fa/sql_reference/statements/index.md | 8 + .../sql_reference/statements/insert_into.md | 80 + docs/fa/sql_reference/statements/misc.md | 252 ++ docs/fa/sql_reference/statements/select.md | 610 +++++ docs/fa/sql_reference/statements/show.md | 105 + docs/fa/sql_reference/statements/system.md | 113 + docs/fa/sql_reference/syntax.md | 187 ++ docs/fa/sql_reference/table_functions/file.md | 121 + .../table_functions/generate.md | 33 +- docs/fa/sql_reference/table_functions/hdfs.md | 104 + .../fa/sql_reference/table_functions/index.md | 38 + .../fa/sql_reference/table_functions/input.md | 47 + docs/fa/sql_reference/table_functions/jdbc.md | 29 + .../fa/sql_reference/table_functions/merge.md | 14 + .../fa/sql_reference/table_functions/mysql.md | 86 + .../sql_reference/table_functions/numbers.md | 30 + docs/fa/sql_reference/table_functions/odbc.md | 108 + .../sql_reference/table_functions/remote.md | 83 + docs/fa/sql_reference/table_functions/url.md | 26 + docs/fa/whats_new/changelog/2017.md | 268 +++ docs/fa/whats_new/changelog/2018.md | 1063 +++++++++ docs/fa/whats_new/changelog/2019.md | 2074 ++++++++++++++++ docs/fa/whats_new/changelog/index.md | 668 ++++++ docs/fa/whats_new/index.md | 8 + docs/fa/whats_new/roadmap.md | 19 + docs/fa/whats_new/security_changelog.md | 77 + docs/fr/changelog/2019.md | 1 - docs/fr/commercial/cloud.md | 3 +- docs/fr/commercial/index.md | 8 + docs/fr/data_types/special_data_types/set.md | 9 - docs/fr/development/architecture.md | 5 +- docs/fr/development/browse_code.md | 5 +- docs/fr/development/build.md | 3 + docs/fr/development/build_cross_arm.md | 3 + docs/fr/development/build_cross_osx.md | 3 + docs/fr/development/build_osx.md | 3 + docs/fr/development/contrib.md | 3 + docs/fr/development/developer_instruction.md | 5 +- docs/fr/development/index.md | 5 + docs/fr/development/style.md | 3 + docs/fr/development/tests.md | 26 +- .../{ => engines}/database_engines/index.md | 6 +- .../fr/{ => engines}/database_engines/lazy.md | 3 + .../{ => engines}/database_engines/mysql.md | 47 +- docs/fr/engines/index.md | 8 + .../table_engines/index.md | 72 +- .../table_engines/integrations}/hdfs.md | 11 +- .../table_engines/integrations/index.md | 8 + .../table_engines/integrations}/jdbc.md | 13 +- .../table_engines/integrations}/kafka.md | 9 +- .../table_engines/integrations}/mysql.md | 19 +- .../table_engines/integrations}/odbc.md | 21 +- .../engines/table_engines/log_family/index.md | 8 + .../table_engines/log_family}/log.md | 3 + .../table_engines/log_family}/log_family.md | 7 +- .../table_engines/log_family}/stripelog.md | 9 +- .../table_engines/log_family}/tinylog.md | 3 + .../mergetree_family}/aggregatingmergetree.md | 11 +- .../mergetree_family}/collapsingmergetree.md | 11 +- .../custom_partitioning_key.md | 13 +- .../mergetree_family}/graphitemergetree.md | 11 +- .../table_engines/mergetree_family/index.md | 8 + .../mergetree_family}/mergetree.md | 91 +- .../mergetree_family}/replacingmergetree.md | 7 +- .../mergetree_family}/replication.md | 17 +- .../mergetree_family}/summingmergetree.md | 17 +- .../versionedcollapsingmergetree.md | 9 +- .../table_engines/special}/buffer.md | 3 + .../table_engines/special}/dictionary.md | 7 +- .../table_engines/special}/distributed.md | 23 +- .../table_engines/special}/external_data.md | 3 + .../table_engines/special}/file.md | 17 +- .../table_engines/special}/generate.md | 11 +- .../fr/engines/table_engines/special/index.md | 8 + .../table_engines/special}/join.md | 25 +- .../table_engines/special/materializedview.md | 12 + .../table_engines/special}/memory.md | 3 + .../table_engines/special}/merge.md | 5 +- .../table_engines/special}/null.md | 3 + .../table_engines/special}/set.md | 3 + .../table_engines/special}/url.md | 11 +- .../table_engines/special}/view.md | 3 + docs/fr/faq/general.md | 5 +- docs/fr/faq/index.md | 8 + .../example_datasets/amplab_benchmark.md | 3 + .../example_datasets/criteo.md | 3 + .../getting_started/example_datasets/index.md | 4 + .../example_datasets/metrica.md | 5 +- .../example_datasets/nyc_taxi.md | 3 + .../example_datasets/ontime.md | 3 + .../example_datasets/star_schema.md | 3 + .../example_datasets/wikistat.md | 3 + docs/fr/getting_started/index.md | 5 + docs/fr/getting_started/install.md | 5 +- docs/fr/getting_started/playground.md | 3 + docs/fr/getting_started/tutorial.md | 25 +- docs/fr/guides/apply_catboost_model.md | 11 +- docs/fr/guides/index.md | 4 + docs/fr/index.md | 7 +- docs/fr/interfaces/cli.md | 5 +- docs/fr/interfaces/cpp.md | 3 + docs/fr/interfaces/formats.md | 139 +- docs/fr/interfaces/http.md | 228 +- docs/fr/interfaces/index.md | 4 + docs/fr/interfaces/jdbc.md | 3 + docs/fr/interfaces/mysql.md | 5 +- docs/fr/interfaces/odbc.md | 3 + docs/fr/interfaces/tcp.md | 5 +- .../third-party/client_libraries.md | 3 + docs/fr/interfaces/third-party/gui.md | 3 + docs/fr/interfaces/third-party/index.md | 8 + .../fr/interfaces/third-party/integrations.md | 7 +- docs/fr/interfaces/third-party/proxy.md | 3 + docs/fr/introduction/adopters.md | 3 + docs/fr/introduction/distinctive_features.md | 5 +- .../features_considered_disadvantages.md | 4 + docs/fr/introduction/history.md | 3 + docs/fr/introduction/index.md | 8 + docs/fr/introduction/performance.md | 3 + docs/fr/operations/access_rights.md | 5 +- docs/fr/operations/backup.md | 13 +- docs/fr/operations/configuration_files.md | 5 +- docs/fr/operations/index.md | 8 +- docs/fr/operations/monitoring.md | 11 +- .../optimizing_performance/index.md | 8 + .../sampling_query_profiler.md | 9 +- ...sampling_query_profiler_example_result.txt | 543 ++++- docs/fr/operations/performance_test.md | 7 +- docs/fr/operations/quotas.md | 3 + docs/fr/operations/requirements.md | 7 +- .../index.md | 6 +- .../settings.md | 111 +- .../settings/constraints_on_settings.md | 3 + docs/fr/operations/settings/index.md | 4 + .../settings/permissions_for_queries.md | 3 + .../operations/settings/query_complexity.md | 21 +- docs/fr/operations/settings/settings.md | 99 +- .../operations/settings/settings_profiles.md | 3 + docs/fr/operations/settings/settings_users.md | 5 +- docs/fr/operations/system_tables.md | 176 +- .../table_engines/materializedview.md | 9 - docs/fr/operations/tips.md | 5 +- docs/fr/operations/troubleshooting.md | 15 +- docs/fr/operations/update.md | 3 + .../clickhouse-benchmark.md | 3 + .../{utils => utilities}/clickhouse-copier.md | 3 + .../{utils => utilities}/clickhouse-local.md | 5 +- .../operations/{utils => utilities}/index.md | 4 + .../functions/machine_learning_functions.md | 17 - docs/fr/query_language/index.md | 13 - docs/fr/query_language/select.md | 1379 ----------- .../query_language/table_functions/index.md | 34 - .../aggregate_functions}/combinators.md | 11 +- .../aggregate_functions}/index.md | 4 + .../parametric_functions.md | 19 +- .../aggregate_functions}/reference.md | 87 +- .../data_types}/aggregatefunction.md | 9 +- .../{ => sql_reference}/data_types/array.md | 11 +- .../{ => sql_reference}/data_types/boolean.md | 3 + .../fr/{ => sql_reference}/data_types/date.md | 3 + .../data_types/datetime.md | 25 +- .../data_types/datetime64.md | 17 +- .../{ => sql_reference}/data_types/decimal.md | 3 + .../sql_reference/data_types/domains/index.md | 8 + .../data_types/domains/ipv4.md | 3 + .../data_types/domains/ipv6.md | 3 + .../data_types/domains/overview.md | 3 + .../fr/{ => sql_reference}/data_types/enum.md | 7 +- .../data_types/fixedstring.md | 7 +- .../{ => sql_reference}/data_types/float.md | 5 +- .../{ => sql_reference}/data_types/index.md | 4 + .../data_types/int_uint.md | 3 + .../nested_data_structures/index.md | 5 + .../nested_data_structures/nested.md | 7 +- .../data_types/nullable.md | 11 +- .../special_data_types/expression.md | 3 + .../data_types/special_data_types/index.md | 5 + .../data_types/special_data_types/interval.md | 11 +- .../data_types/special_data_types/nothing.md | 5 +- .../data_types/special_data_types/set.md | 12 + .../{ => sql_reference}/data_types/string.md | 3 + .../{ => sql_reference}/data_types/tuple.md | 13 +- .../fr/{ => sql_reference}/data_types/uuid.md | 13 +- .../external_dictionaries}/external_dicts.md | 17 +- .../external_dicts_dict.md | 5 +- .../external_dicts_dict_hierarchical.md | 5 +- .../external_dicts_dict_layout.md | 27 +- .../external_dicts_dict_lifetime.md | 3 + .../external_dicts_dict_sources.md | 29 +- .../external_dicts_dict_structure.md | 23 +- .../external_dictionaries/index.md | 8 + .../dictionaries}/index.md | 8 +- .../dictionaries}/internal_dicts.md | 3 + .../functions/arithmetic_functions.md | 3 + .../functions/array_functions.md | 73 +- .../functions/array_join.md | 3 + .../functions/bit_functions.md | 5 +- .../functions/bitmap_functions.md | 13 +- .../functions/comparison_functions.md | 15 +- .../functions/conditional_functions.md | 3 + .../functions/date_time_functions.md | 9 +- .../functions/encoding_functions.md | 7 +- .../functions/ext_dict_functions.md | 31 +- .../functions/functions_for_nulls.md | 5 +- .../functions/geo.md | 51 +- .../functions/hash_functions.md | 43 +- .../functions/higher_order_functions.md | 3 + .../functions/in_functions.md | 7 +- .../functions/index.md | 4 + .../functions/introspection.md | 23 +- .../functions/ip_address_functions.md | 7 +- .../functions/json_functions.md | 3 + .../functions/logical_functions.md | 3 + .../functions/machine_learning_functions.md | 20 + .../functions/math_functions.md | 3 + .../functions/other_functions.md | 31 +- .../functions/random_functions.md | 3 + .../functions/rounding_functions.md | 7 +- .../functions/splitting_merging_functions.md | 15 +- .../functions/string_functions.md | 19 +- .../functions/string_replace_functions.md | 3 + .../functions/string_search_functions.md | 3 + .../functions/type_conversion_functions.md | 145 +- .../functions/url_functions.md | 7 +- .../functions/uuid_functions.md | 10 +- .../functions/ym_dict_functions.md | 25 + docs/fr/sql_reference/index.md | 18 + .../operators.md | 25 +- .../statements}/alter.md | 41 +- .../statements}/create.md | 33 +- docs/fr/sql_reference/statements/index.md | 8 + .../statements}/insert_into.md | 13 +- .../statements}/misc.md | 31 +- docs/fr/sql_reference/statements/select.md | 610 +++++ .../statements}/show.md | 7 +- .../statements}/system.md | 9 +- .../syntax.md | 13 +- .../table_functions/file.md | 7 +- .../table_functions/generate.md | 3 + .../table_functions/hdfs.md | 5 +- .../fr/sql_reference/table_functions/index.md | 38 + .../table_functions/input.md | 3 + .../table_functions/jdbc.md | 3 + .../table_functions/merge.md | 3 + .../table_functions/mysql.md | 13 +- .../table_functions/numbers.md | 3 + .../table_functions/odbc.md | 15 +- .../table_functions/remote.md | 5 +- .../table_functions/url.md | 3 + docs/fr/{ => whats_new}/changelog/2017.md | 3 + docs/fr/{ => whats_new}/changelog/2018.md | 3 + docs/fr/whats_new/changelog/2019.md | 2074 ++++++++++++++++ docs/fr/{ => whats_new}/changelog/index.md | 4 + docs/fr/whats_new/index.md | 8 + docs/fr/{ => whats_new}/roadmap.md | 3 + docs/fr/{ => whats_new}/security_changelog.md | 3 + docs/ja/commercial/cloud.md | 22 +- docs/ja/commercial/index.md | 9 +- docs/ja/development/architecture.md | 204 +- docs/ja/development/browse_code.md | 15 +- docs/ja/development/build.md | 142 +- docs/ja/development/build_cross_arm.md | 45 +- docs/ja/development/build_cross_osx.md | 66 +- docs/ja/development/build_osx.md | 94 +- docs/ja/development/contrib.md | 44 +- docs/ja/development/developer_instruction.md | 287 ++- docs/ja/development/index.md | 13 +- docs/ja/development/style.md | 842 ++++++- docs/ja/development/tests.md | 253 +- docs/ja/engines/database_engines/index.md | 22 +- docs/ja/engines/database_engines/lazy.md | 19 +- docs/ja/engines/database_engines/mysql.md | 136 +- docs/ja/engines/index.md | 9 +- docs/ja/engines/table_engines/index.md | 86 +- .../table_engines/integrations/hdfs.md | 124 +- .../table_engines/integrations/index.md | 9 +- .../table_engines/integrations/jdbc.md | 91 +- .../table_engines/integrations/kafka.md | 177 +- .../table_engines/integrations/mysql.md | 106 +- .../table_engines/integrations/odbc.md | 133 +- .../engines/table_engines/log_family/index.md | 9 +- .../engines/table_engines/log_family/log.md | 17 +- .../table_engines/log_family/log_family.md | 47 +- .../table_engines/log_family/stripelog.md | 96 +- .../table_engines/log_family/tinylog.md | 17 +- .../mergetree_family/aggregatingmergetree.md | 104 +- .../mergetree_family/collapsingmergetree.md | 310 ++- .../custom_partitioning_key.md | 128 +- .../mergetree_family/graphitemergetree.md | 176 +- .../table_engines/mergetree_family/index.md | 9 +- .../mergetree_family/mergetree.md | 655 +++++- .../mergetree_family/replacingmergetree.md | 71 +- .../mergetree_family/replication.md | 219 +- .../mergetree_family/summingmergetree.md | 142 +- .../versionedcollapsingmergetree.md | 239 +- .../engines/table_engines/special/buffer.md | 72 +- .../table_engines/special/dictionary.md | 98 +- .../table_engines/special/distributed.md | 153 +- .../table_engines/special/external_data.md | 69 +- docs/ja/engines/table_engines/special/file.md | 91 +- .../engines/table_engines/special/generate.md | 62 +- .../ja/engines/table_engines/special/index.md | 9 +- docs/ja/engines/table_engines/special/join.md | 112 +- .../table_engines/special/materializedview.md | 13 +- .../engines/table_engines/special/memory.md | 20 +- .../ja/engines/table_engines/special/merge.md | 71 +- docs/ja/engines/table_engines/special/null.md | 15 +- docs/ja/engines/table_engines/special/set.md | 20 +- docs/ja/engines/table_engines/special/url.md | 83 +- docs/ja/engines/table_engines/special/view.md | 13 +- docs/ja/faq/general.md | 61 +- docs/ja/faq/index.md | 9 +- .../example_datasets/amplab_benchmark.md | 130 +- .../example_datasets/criteo.md | 82 +- .../getting_started/example_datasets/index.md | 23 +- .../example_datasets/metrica.md | 71 +- .../example_datasets/nyc_taxi.md | 391 +++- .../example_datasets/ontime.md | 413 +++- .../example_datasets/star_schema.md | 372 ++- .../example_datasets/wikistat.md | 36 +- docs/ja/getting_started/index.md | 18 +- docs/ja/getting_started/install.md | 192 +- docs/ja/getting_started/playground.md | 49 +- docs/ja/getting_started/tutorial.md | 672 +++++- docs/ja/guides/apply_catboost_model.md | 240 +- docs/ja/guides/index.md | 17 +- docs/ja/interfaces/cli.md | 150 +- docs/ja/interfaces/cpp.md | 13 +- docs/ja/interfaces/formats.md | 1213 +++++++++- docs/ja/interfaces/http.md | 512 +++- docs/ja/interfaces/index.md | 30 +- docs/ja/interfaces/jdbc.md | 16 +- docs/ja/interfaces/mysql.md | 50 +- docs/ja/interfaces/odbc.md | 13 +- docs/ja/interfaces/tcp.md | 14 +- .../third-party/client_libraries.md | 59 +- docs/ja/interfaces/third-party/gui.md | 153 +- docs/ja/interfaces/third-party/index.md | 9 +- .../ja/interfaces/third-party/integrations.md | 97 +- docs/ja/interfaces/third-party/proxy.md | 47 +- docs/ja/introduction/adopters.md | 83 +- docs/ja/introduction/index.md | 9 +- docs/ja/operations/access_rights.md | 114 +- docs/ja/operations/backup.md | 42 +- docs/ja/operations/configuration_files.md | 58 +- docs/ja/operations/index.md | 29 +- docs/ja/operations/monitoring.md | 45 +- .../optimizing_performance/index.md | 9 +- .../sampling_query_profiler.md | 65 +- ...sampling_query_profiler_example_result.txt | 543 ++++- docs/ja/operations/performance_test.md | 83 +- docs/ja/operations/quotas.md | 113 +- docs/ja/operations/requirements.md | 62 +- .../server_configuration_parameters/index.md | 20 +- .../settings.md | 873 ++++++- .../settings/constraints_on_settings.md | 76 +- docs/ja/operations/settings/index.md | 33 +- .../settings/permissions_for_queries.md | 62 +- .../operations/settings/query_complexity.md | 302 ++- docs/ja/operations/settings/settings.md | 1203 +++++++++- .../operations/settings/settings_profiles.md | 72 +- docs/ja/operations/settings/settings_users.md | 149 +- docs/ja/operations/system_tables.md | 1098 ++++++++- docs/ja/operations/tips.md | 252 +- docs/ja/operations/troubleshooting.md | 147 +- docs/ja/operations/update.md | 21 +- .../utilities/clickhouse-benchmark.md | 157 +- .../operations/utilities/clickhouse-copier.md | 177 +- .../operations/utilities/clickhouse-local.md | 82 +- docs/ja/operations/utilities/index.md | 16 +- .../aggregate_functions/combinators.md | 167 +- .../aggregate_functions/index.md | 63 +- .../parametric_functions.md | 500 +++- .../aggregate_functions/reference.md | 1838 ++++++++++++++- .../data_types/aggregatefunction.md | 71 +- docs/ja/sql_reference/data_types/array.md | 78 +- docs/ja/sql_reference/data_types/boolean.md | 13 +- docs/ja/sql_reference/data_types/date.md | 16 +- docs/ja/sql_reference/data_types/datetime.md | 130 +- .../ja/sql_reference/data_types/datetime64.md | 105 +- docs/ja/sql_reference/data_types/decimal.md | 110 +- .../sql_reference/data_types/domains/index.md | 9 +- .../sql_reference/data_types/domains/ipv4.md | 85 +- .../sql_reference/data_types/domains/ipv6.md | 87 +- .../data_types/domains/overview.md | 33 +- docs/ja/sql_reference/data_types/enum.md | 133 +- .../sql_reference/data_types/fixedstring.md | 64 +- docs/ja/sql_reference/data_types/float.md | 88 +- docs/ja/sql_reference/data_types/index.md | 16 +- docs/ja/sql_reference/data_types/int_uint.md | 28 +- .../nested_data_structures/index.md | 13 +- .../nested_data_structures/nested.md | 107 +- docs/ja/sql_reference/data_types/nullable.md | 47 +- .../special_data_types/expression.md | 13 +- .../data_types/special_data_types/index.md | 15 +- .../data_types/special_data_types/interval.md | 86 +- .../data_types/special_data_types/nothing.md | 27 +- .../data_types/special_data_types/set.md | 13 +- docs/ja/sql_reference/data_types/string.md | 21 +- docs/ja/sql_reference/data_types/tuple.md | 53 +- docs/ja/sql_reference/data_types/uuid.md | 78 +- .../external_dictionaries/external_dicts.md | 57 +- .../external_dicts_dict.md | 54 +- .../external_dicts_dict_hierarchical.md | 71 +- .../external_dicts_dict_layout.md | 374 ++- .../external_dicts_dict_lifetime.md | 87 +- .../external_dicts_dict_sources.md | 609 ++++- .../external_dicts_dict_structure.md | 176 +- .../external_dictionaries/index.md | 9 +- docs/ja/sql_reference/dictionaries/index.md | 23 +- .../dictionaries/internal_dicts.md | 56 +- .../functions/arithmetic_functions.md | 88 +- .../functions/array_functions.md | 1058 ++++++++- docs/ja/sql_reference/functions/array_join.md | 38 +- .../sql_reference/functions/bit_functions.md | 256 +- .../functions/bitmap_functions.md | 497 +++- .../functions/comparison_functions.md | 38 +- .../functions/conditional_functions.md | 208 +- .../functions/date_time_functions.md | 451 +++- .../functions/encoding_functions.md | 176 +- .../functions/ext_dict_functions.md | 206 +- .../functions/functions_for_nulls.md | 313 ++- docs/ja/sql_reference/functions/geo.md | 511 +++- .../sql_reference/functions/hash_functions.md | 447 +++- .../functions/higher_order_functions.md | 265 ++- .../sql_reference/functions/in_functions.md | 27 +- docs/ja/sql_reference/functions/index.md | 75 +- .../sql_reference/functions/introspection.md | 311 ++- .../functions/ip_address_functions.md | 249 +- .../sql_reference/functions/json_functions.md | 232 +- .../functions/logical_functions.md | 23 +- .../functions/machine_learning_functions.md | 21 +- .../sql_reference/functions/math_functions.md | 117 +- .../functions/other_functions.md | 1080 ++++++++- .../functions/random_functions.md | 31 +- .../functions/rounding_functions.md | 191 +- .../functions/splitting_merging_functions.md | 118 +- .../functions/string_functions.md | 490 +++- .../functions/string_replace_functions.md | 95 +- .../functions/string_search_functions.md | 380 ++- .../functions/type_conversion_functions.md | 535 ++++- .../sql_reference/functions/url_functions.md | 210 +- .../sql_reference/functions/uuid_functions.md | 123 +- .../functions/ym_dict_functions.md | 157 +- docs/ja/sql_reference/index.md | 19 +- docs/ja/sql_reference/operators.md | 279 ++- docs/ja/sql_reference/statements/alter.md | 506 +++- docs/ja/sql_reference/statements/create.md | 310 ++- docs/ja/sql_reference/statements/index.md | 9 +- .../sql_reference/statements/insert_into.md | 81 +- docs/ja/sql_reference/statements/misc.md | 253 +- docs/ja/sql_reference/statements/select.md | 611 ++++- docs/ja/sql_reference/statements/show.md | 106 +- docs/ja/sql_reference/statements/system.md | 114 +- docs/ja/sql_reference/syntax.md | 188 +- docs/ja/sql_reference/table_functions/file.md | 122 +- .../sql_reference/table_functions/generate.md | 46 +- docs/ja/sql_reference/table_functions/hdfs.md | 105 +- .../ja/sql_reference/table_functions/index.md | 39 +- .../ja/sql_reference/table_functions/input.md | 48 +- docs/ja/sql_reference/table_functions/jdbc.md | 30 +- .../ja/sql_reference/table_functions/merge.md | 15 +- .../ja/sql_reference/table_functions/mysql.md | 87 +- .../sql_reference/table_functions/numbers.md | 31 +- docs/ja/sql_reference/table_functions/odbc.md | 109 +- .../sql_reference/table_functions/remote.md | 84 +- docs/ja/sql_reference/table_functions/url.md | 27 +- docs/ja/whats_new/changelog/2017.md | 269 ++- docs/ja/whats_new/changelog/2018.md | 1064 ++++++++- docs/ja/whats_new/changelog/2019.md | 2075 ++++++++++++++++- docs/ja/whats_new/changelog/index.md | 669 +++++- docs/ja/whats_new/index.md | 9 +- docs/ja/whats_new/roadmap.md | 20 +- docs/ja/whats_new/security_changelog.md | 77 +- docs/toc_es.yml | 242 -- docs/toc_fa.yml | 251 -- docs/toc_fr.yml | 244 -- docs/tools/build.py | 2 +- .../translate/replace-with-translation.sh | 2 +- docs/tools/translate/split_meta.py | 6 + 1046 files changed, 80189 insertions(+), 35711 deletions(-) create mode 100644 docs/es/commercial/index.md delete mode 100644 docs/es/data_types/nested_data_structures/index.md delete mode 100644 docs/es/data_types/special_data_types/expression.md delete mode 100644 docs/es/data_types/special_data_types/index.md delete mode 100644 docs/es/data_types/special_data_types/set.md delete mode 100644 docs/es/database_engines/index.md create mode 100644 docs/es/engines/database_engines/index.md rename docs/es/{ => engines}/database_engines/lazy.md (53%) rename docs/es/{ => engines}/database_engines/mysql.md (51%) create mode 100644 docs/es/engines/index.md create mode 100644 docs/es/engines/table_engines/index.md rename docs/es/{operations/table_engines => engines/table_engines/integrations}/hdfs.md (75%) create mode 100644 docs/es/engines/table_engines/integrations/index.md rename docs/es/{operations/table_engines => engines/table_engines/integrations}/jdbc.md (74%) rename docs/es/{operations/table_engines => engines/table_engines/integrations}/kafka.md (71%) rename docs/es/{operations/table_engines => engines/table_engines/integrations}/mysql.md (61%) rename docs/es/{operations/table_engines => engines/table_engines/integrations}/odbc.md (74%) create mode 100644 docs/es/engines/table_engines/log_family/index.md rename docs/es/{operations/table_engines => engines/table_engines/log_family}/log.md (86%) rename docs/es/{operations/table_engines => engines/table_engines/log_family}/log_family.md (75%) rename docs/es/{operations/table_engines => engines/table_engines/log_family}/stripelog.md (83%) rename docs/es/{operations/table_engines => engines/table_engines/log_family}/tinylog.md (66%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/aggregatingmergetree.md (84%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/collapsingmergetree.md (87%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/custom_partitioning_key.md (76%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/graphitemergetree.md (68%) create mode 100644 docs/es/engines/table_engines/mergetree_family/index.md rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/mergetree.md (68%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/replacingmergetree.md (72%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/replication.md (87%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/summingmergetree.md (78%) rename docs/es/{operations/table_engines => engines/table_engines/mergetree_family}/versionedcollapsingmergetree.md (85%) rename docs/es/{operations/table_engines => engines/table_engines/special}/buffer.md (74%) rename docs/es/{operations/table_engines => engines/table_engines/special}/dictionary.md (83%) rename docs/es/{operations/table_engines => engines/table_engines/special}/distributed.md (65%) rename docs/es/{operations/table_engines => engines/table_engines/special}/external_data.md (79%) rename docs/es/{operations/table_engines => engines/table_engines/special}/file.md (67%) rename docs/es/{operations/table_engines => engines/table_engines/special}/generate.md (73%) create mode 100644 docs/es/engines/table_engines/special/index.md rename docs/es/{operations/table_engines => engines/table_engines/special}/join.md (56%) create mode 100644 docs/es/engines/table_engines/special/materializedview.md rename docs/es/{operations/table_engines => engines/table_engines/special}/memory.md (88%) rename docs/es/{operations/table_engines => engines/table_engines/special}/merge.md (74%) rename docs/es/{operations/table_engines => engines/table_engines/special}/null.md (66%) rename docs/es/{operations/table_engines => engines/table_engines/special}/set.md (85%) rename docs/es/{operations/table_engines => engines/table_engines/special}/url.md (77%) rename docs/es/{operations/table_engines => engines/table_engines/special}/view.md (70%) create mode 100644 docs/es/faq/index.md mode change 120000 => 100644 docs/es/getting_started/example_datasets/index.md mode change 120000 => 100644 docs/es/getting_started/playground.md create mode 100644 docs/es/interfaces/third-party/index.md create mode 100644 docs/es/introduction/index.md create mode 100644 docs/es/operations/optimizing_performance/index.md rename docs/es/operations/{performance => optimizing_performance}/sampling_query_profiler.md (67%) rename docs/es/operations/{server_settings => server_configuration_parameters}/index.md (69%) rename docs/es/operations/{server_settings => server_configuration_parameters}/settings.md (62%) delete mode 100644 docs/es/operations/table_engines/index.md delete mode 100644 docs/es/operations/table_engines/materializedview.md rename docs/es/operations/{utils => utilities}/clickhouse-benchmark.md (59%) rename docs/es/operations/{utils => utilities}/clickhouse-copier.md (86%) rename docs/es/operations/{utils => utilities}/clickhouse-local.md (70%) create mode 100644 docs/es/operations/utilities/index.md delete mode 100644 docs/es/operations/utils/index.md delete mode 100644 docs/es/query_language/dicts/index.md delete mode 100644 docs/es/query_language/functions/comparison_functions.md delete mode 100644 docs/es/query_language/functions/machine_learning_functions.md delete mode 100644 docs/es/query_language/functions/splitting_merging_functions.md delete mode 100644 docs/es/query_language/index.md delete mode 100644 docs/es/query_language/select.md delete mode 100644 docs/es/query_language/system.md delete mode 100644 docs/es/query_language/table_functions/index.md delete mode 100644 docs/es/query_language/table_functions/merge.md rename docs/es/{query_language/agg_functions => sql_reference/aggregate_functions}/combinators.md (72%) rename docs/es/{query_language/agg_functions => sql_reference/aggregate_functions}/index.md (80%) rename docs/es/{query_language/agg_functions => sql_reference/aggregate_functions}/parametric_functions.md (76%) rename docs/es/{query_language/agg_functions => sql_reference/aggregate_functions}/reference.md (92%) rename docs/es/{data_types/nested_data_structures => sql_reference/data_types}/aggregatefunction.md (55%) rename docs/es/{ => sql_reference}/data_types/array.md (75%) rename docs/es/{ => sql_reference}/data_types/boolean.md (55%) rename docs/es/{ => sql_reference}/data_types/date.md (75%) rename docs/es/{ => sql_reference}/data_types/datetime.md (68%) rename docs/es/{ => sql_reference}/data_types/datetime64.md (80%) rename docs/es/{ => sql_reference}/data_types/decimal.md (95%) create mode 100644 docs/es/sql_reference/data_types/domains/index.md rename docs/es/{ => sql_reference}/data_types/domains/ipv4.md (81%) rename docs/es/{ => sql_reference}/data_types/domains/ipv6.md (84%) rename docs/es/{ => sql_reference}/data_types/domains/overview.md (54%) rename docs/es/{ => sql_reference}/data_types/enum.md (95%) rename docs/es/{ => sql_reference}/data_types/fixedstring.md (78%) rename docs/es/{ => sql_reference}/data_types/float.md (86%) rename docs/es/{ => sql_reference}/data_types/index.md (58%) rename docs/es/{ => sql_reference}/data_types/int_uint.md (72%) create mode 100644 docs/es/sql_reference/data_types/nested_data_structures/index.md rename docs/es/{ => sql_reference}/data_types/nested_data_structures/nested.md (82%) rename docs/es/{ => sql_reference}/data_types/nullable.md (67%) create mode 100644 docs/es/sql_reference/data_types/special_data_types/expression.md create mode 100644 docs/es/sql_reference/data_types/special_data_types/index.md rename docs/es/{ => sql_reference}/data_types/special_data_types/interval.md (72%) rename docs/es/{ => sql_reference}/data_types/special_data_types/nothing.md (59%) create mode 100644 docs/es/sql_reference/data_types/special_data_types/set.md rename docs/es/{ => sql_reference}/data_types/string.md (87%) rename docs/es/{ => sql_reference}/data_types/tuple.md (73%) rename docs/es/{ => sql_reference}/data_types/uuid.md (72%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts.md (65%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict.md (52%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_hierarchical.md (73%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_layout.md (93%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_lifetime.md (94%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_sources.md (89%) rename docs/es/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_structure.md (86%) create mode 100644 docs/es/sql_reference/dictionaries/external_dictionaries/index.md create mode 100644 docs/es/sql_reference/dictionaries/index.md rename docs/es/{query_language/dicts => sql_reference/dictionaries}/internal_dicts.md (94%) rename docs/es/{query_language => sql_reference}/functions/arithmetic_functions.md (84%) rename docs/es/{query_language => sql_reference}/functions/array_functions.md (89%) rename docs/es/{query_language => sql_reference}/functions/array_join.md (88%) rename docs/es/{query_language => sql_reference}/functions/bit_functions.md (67%) rename docs/es/{query_language => sql_reference}/functions/bitmap_functions.md (82%) create mode 100644 docs/es/sql_reference/functions/comparison_functions.md rename docs/es/{query_language => sql_reference}/functions/conditional_functions.md (91%) rename docs/es/{query_language => sql_reference}/functions/date_time_functions.md (72%) rename docs/es/{query_language => sql_reference}/functions/encoding_functions.md (82%) rename docs/es/{query_language => sql_reference}/functions/ext_dict_functions.md (59%) rename docs/es/{query_language => sql_reference}/functions/functions_for_nulls.md (85%) rename docs/es/{query_language => sql_reference}/functions/geo.md (73%) rename docs/es/{query_language => sql_reference}/functions/hash_functions.md (74%) rename docs/es/{query_language => sql_reference}/functions/higher_order_functions.md (84%) rename docs/es/{query_language => sql_reference}/functions/in_functions.md (63%) rename docs/es/{query_language => sql_reference}/functions/index.md (80%) rename docs/es/{query_language => sql_reference}/functions/introspection.md (93%) rename docs/es/{query_language => sql_reference}/functions/ip_address_functions.md (93%) rename docs/es/{query_language => sql_reference}/functions/json_functions.md (86%) rename docs/es/{query_language => sql_reference}/functions/logical_functions.md (73%) create mode 100644 docs/es/sql_reference/functions/machine_learning_functions.md rename docs/es/{query_language => sql_reference}/functions/math_functions.md (84%) rename docs/es/{query_language => sql_reference}/functions/other_functions.md (89%) rename docs/es/{query_language => sql_reference}/functions/random_functions.md (84%) rename docs/es/{query_language => sql_reference}/functions/rounding_functions.md (79%) create mode 100644 docs/es/sql_reference/functions/splitting_merging_functions.md rename docs/es/{query_language => sql_reference}/functions/string_functions.md (90%) rename docs/es/{query_language => sql_reference}/functions/string_replace_functions.md (89%) rename docs/es/{query_language => sql_reference}/functions/string_search_functions.md (67%) rename docs/es/{query_language => sql_reference}/functions/type_conversion_functions.md (65%) rename docs/es/{query_language => sql_reference}/functions/url_functions.md (90%) rename docs/es/{query_language => sql_reference}/functions/uuid_functions.md (84%) rename docs/es/{query_language => sql_reference}/functions/ym_dict_functions.md (75%) create mode 100644 docs/es/sql_reference/index.md rename docs/es/{query_language => sql_reference}/operators.md (58%) rename docs/es/{query_language => sql_reference/statements}/alter.md (67%) rename docs/es/{query_language => sql_reference/statements}/create.md (64%) create mode 100644 docs/es/sql_reference/statements/index.md rename docs/es/{query_language => sql_reference/statements}/insert_into.md (69%) rename docs/es/{query_language => sql_reference/statements}/misc.md (70%) create mode 100644 docs/es/sql_reference/statements/select.md rename docs/es/{query_language => sql_reference/statements}/show.md (68%) create mode 100644 docs/es/sql_reference/statements/system.md rename docs/es/{query_language => sql_reference}/syntax.md (88%) rename docs/es/{query_language => sql_reference}/table_functions/file.md (65%) rename docs/{fa/query_language => es/sql_reference}/table_functions/generate.md (74%) rename docs/es/{query_language => sql_reference}/table_functions/hdfs.md (64%) create mode 100644 docs/es/sql_reference/table_functions/index.md rename docs/es/{query_language => sql_reference}/table_functions/input.md (83%) rename docs/es/{query_language => sql_reference}/table_functions/jdbc.md (80%) create mode 100644 docs/es/sql_reference/table_functions/merge.md rename docs/es/{query_language => sql_reference}/table_functions/mysql.md (69%) rename docs/es/{query_language => sql_reference}/table_functions/numbers.md (68%) rename docs/es/{query_language => sql_reference}/table_functions/odbc.md (80%) rename docs/es/{query_language => sql_reference}/table_functions/remote.md (77%) rename docs/es/{query_language => sql_reference}/table_functions/url.md (74%) rename docs/es/{ => whats_new}/changelog/2017.md (93%) rename docs/es/{ => whats_new}/changelog/2018.md (99%) rename docs/es/{ => whats_new}/changelog/2019.md (99%) rename docs/es/{ => whats_new}/changelog/index.md (95%) create mode 100644 docs/es/whats_new/index.md create mode 100644 docs/es/whats_new/roadmap.md rename docs/es/{ => whats_new}/security_changelog.md (68%) delete mode 100644 docs/fa/changelog/2017.md delete mode 100644 docs/fa/changelog/2018.md delete mode 100644 docs/fa/changelog/2019.md delete mode 100644 docs/fa/changelog/index.md create mode 100644 docs/fa/commercial/index.md delete mode 100644 docs/fa/data_types/array.md delete mode 100644 docs/fa/data_types/boolean.md delete mode 100644 docs/fa/data_types/date.md delete mode 100644 docs/fa/data_types/datetime.md delete mode 100644 docs/fa/data_types/datetime64.md delete mode 100644 docs/fa/data_types/decimal.md delete mode 100644 docs/fa/data_types/domains/overview.md delete mode 100644 docs/fa/data_types/enum.md delete mode 100644 docs/fa/data_types/fixedstring.md delete mode 100644 docs/fa/data_types/float.md delete mode 100644 docs/fa/data_types/index.md delete mode 100644 docs/fa/data_types/int_uint.md delete mode 100644 docs/fa/data_types/nested_data_structures/aggregatefunction.md delete mode 100644 docs/fa/data_types/nested_data_structures/index.md delete mode 100644 docs/fa/data_types/nested_data_structures/nested.md delete mode 100644 docs/fa/data_types/nullable.md delete mode 100644 docs/fa/data_types/special_data_types/expression.md delete mode 100644 docs/fa/data_types/special_data_types/index.md delete mode 100644 docs/fa/data_types/special_data_types/interval.md delete mode 100644 docs/fa/data_types/special_data_types/nothing.md delete mode 100644 docs/fa/data_types/special_data_types/set.md delete mode 100644 docs/fa/data_types/string.md delete mode 100644 docs/fa/data_types/tuple.md delete mode 100644 docs/fa/data_types/uuid.md delete mode 100644 docs/fa/database_engines/index.md delete mode 100644 docs/fa/database_engines/lazy.md delete mode 100644 docs/fa/database_engines/mysql.md create mode 100644 docs/fa/engines/database_engines/index.md create mode 100644 docs/fa/engines/database_engines/lazy.md create mode 100644 docs/fa/engines/database_engines/mysql.md create mode 100644 docs/fa/engines/index.md create mode 100644 docs/fa/engines/table_engines/index.md create mode 100644 docs/fa/engines/table_engines/integrations/hdfs.md create mode 100644 docs/fa/engines/table_engines/integrations/index.md create mode 100644 docs/fa/engines/table_engines/integrations/jdbc.md create mode 100644 docs/fa/engines/table_engines/integrations/kafka.md create mode 100644 docs/fa/engines/table_engines/integrations/mysql.md create mode 100644 docs/fa/engines/table_engines/integrations/odbc.md create mode 100644 docs/fa/engines/table_engines/log_family/index.md create mode 100644 docs/fa/engines/table_engines/log_family/log.md create mode 100644 docs/fa/engines/table_engines/log_family/log_family.md create mode 100644 docs/fa/engines/table_engines/log_family/stripelog.md create mode 100644 docs/fa/engines/table_engines/log_family/tinylog.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/aggregatingmergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/collapsingmergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/custom_partitioning_key.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/graphitemergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/index.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/mergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/replacingmergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/replication.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/summingmergetree.md create mode 100644 docs/fa/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md create mode 100644 docs/fa/engines/table_engines/special/buffer.md rename docs/fa/{operations/table_engines => engines/table_engines/special}/dictionary.md (62%) create mode 100644 docs/fa/engines/table_engines/special/distributed.md create mode 100644 docs/fa/engines/table_engines/special/external_data.md create mode 100644 docs/fa/engines/table_engines/special/file.md create mode 100644 docs/fa/engines/table_engines/special/generate.md create mode 100644 docs/fa/engines/table_engines/special/index.md create mode 100644 docs/fa/engines/table_engines/special/join.md create mode 100644 docs/fa/engines/table_engines/special/materializedview.md create mode 100644 docs/fa/engines/table_engines/special/memory.md create mode 100644 docs/fa/engines/table_engines/special/merge.md create mode 100644 docs/fa/engines/table_engines/special/null.md create mode 100644 docs/fa/engines/table_engines/special/set.md create mode 100644 docs/fa/engines/table_engines/special/url.md create mode 100644 docs/fa/engines/table_engines/special/view.md create mode 100644 docs/fa/faq/index.md create mode 100644 docs/fa/interfaces/third-party/index.md create mode 100644 docs/fa/introduction/index.md create mode 100644 docs/fa/operations/optimizing_performance/index.md create mode 100644 docs/fa/operations/optimizing_performance/sampling_query_profiler.md delete mode 100644 docs/fa/operations/performance/sampling_query_profiler.md create mode 100644 docs/fa/operations/server_configuration_parameters/index.md create mode 100644 docs/fa/operations/server_configuration_parameters/settings.md delete mode 100644 docs/fa/operations/server_settings/index.md delete mode 100644 docs/fa/operations/server_settings/settings.md delete mode 100644 docs/fa/operations/table_engines/aggregatingmergetree.md delete mode 100644 docs/fa/operations/table_engines/buffer.md delete mode 100644 docs/fa/operations/table_engines/collapsingmergetree.md delete mode 100644 docs/fa/operations/table_engines/custom_partitioning_key.md delete mode 100644 docs/fa/operations/table_engines/distributed.md delete mode 100644 docs/fa/operations/table_engines/external_data.md delete mode 100644 docs/fa/operations/table_engines/file.md delete mode 100644 docs/fa/operations/table_engines/generate.md delete mode 100644 docs/fa/operations/table_engines/graphitemergetree.md delete mode 100644 docs/fa/operations/table_engines/hdfs.md delete mode 100644 docs/fa/operations/table_engines/index.md delete mode 100644 docs/fa/operations/table_engines/jdbc.md delete mode 100644 docs/fa/operations/table_engines/join.md delete mode 100644 docs/fa/operations/table_engines/kafka.md delete mode 100644 docs/fa/operations/table_engines/log.md delete mode 100644 docs/fa/operations/table_engines/log_family.md delete mode 100644 docs/fa/operations/table_engines/materializedview.md delete mode 100644 docs/fa/operations/table_engines/memory.md delete mode 100644 docs/fa/operations/table_engines/merge.md delete mode 100644 docs/fa/operations/table_engines/mergetree.md delete mode 100644 docs/fa/operations/table_engines/mysql.md delete mode 100644 docs/fa/operations/table_engines/null.md delete mode 100644 docs/fa/operations/table_engines/odbc.md delete mode 100644 docs/fa/operations/table_engines/replacingmergetree.md delete mode 100644 docs/fa/operations/table_engines/replication.md delete mode 100644 docs/fa/operations/table_engines/set.md delete mode 100644 docs/fa/operations/table_engines/stripelog.md delete mode 100644 docs/fa/operations/table_engines/summingmergetree.md delete mode 100644 docs/fa/operations/table_engines/tinylog.md delete mode 100644 docs/fa/operations/table_engines/url.md delete mode 100644 docs/fa/operations/table_engines/versionedcollapsingmergetree.md delete mode 100644 docs/fa/operations/table_engines/view.md create mode 100644 docs/fa/operations/utilities/clickhouse-benchmark.md rename docs/fa/operations/{utils => utilities}/clickhouse-copier.md (66%) create mode 100644 docs/fa/operations/utilities/clickhouse-local.md create mode 100644 docs/fa/operations/utilities/index.md delete mode 100644 docs/fa/operations/utils/clickhouse-benchmark.md delete mode 100644 docs/fa/operations/utils/clickhouse-local.md delete mode 100644 docs/fa/operations/utils/index.md delete mode 100644 docs/fa/query_language/agg_functions/combinators.md delete mode 100644 docs/fa/query_language/agg_functions/index.md delete mode 100644 docs/fa/query_language/agg_functions/parametric_functions.md delete mode 100644 docs/fa/query_language/agg_functions/reference.md delete mode 100644 docs/fa/query_language/alter.md delete mode 100644 docs/fa/query_language/create.md delete mode 100644 docs/fa/query_language/dicts/external_dicts.md delete mode 100644 docs/fa/query_language/dicts/external_dicts_dict.md delete mode 100644 docs/fa/query_language/dicts/external_dicts_dict_hierarchical.md delete mode 100644 docs/fa/query_language/dicts/external_dicts_dict_layout.md delete mode 100644 docs/fa/query_language/dicts/external_dicts_dict_lifetime.md delete mode 100644 docs/fa/query_language/dicts/external_dicts_dict_structure.md delete mode 100644 docs/fa/query_language/dicts/index.md delete mode 100644 docs/fa/query_language/dicts/internal_dicts.md delete mode 100644 docs/fa/query_language/functions/arithmetic_functions.md delete mode 100644 docs/fa/query_language/functions/array_functions.md delete mode 100644 docs/fa/query_language/functions/array_join.md delete mode 100644 docs/fa/query_language/functions/bit_functions.md delete mode 100644 docs/fa/query_language/functions/bitmap_functions.md delete mode 100644 docs/fa/query_language/functions/comparison_functions.md delete mode 100644 docs/fa/query_language/functions/conditional_functions.md delete mode 100644 docs/fa/query_language/functions/date_time_functions.md delete mode 100644 docs/fa/query_language/functions/encoding_functions.md delete mode 100644 docs/fa/query_language/functions/ext_dict_functions.md delete mode 100644 docs/fa/query_language/functions/geo.md delete mode 100644 docs/fa/query_language/functions/hash_functions.md delete mode 100644 docs/fa/query_language/functions/higher_order_functions.md delete mode 100644 docs/fa/query_language/functions/in_functions.md delete mode 100644 docs/fa/query_language/functions/index.md delete mode 100644 docs/fa/query_language/functions/json_functions.md delete mode 100644 docs/fa/query_language/functions/logical_functions.md delete mode 100644 docs/fa/query_language/functions/machine_learning_functions.md delete mode 100644 docs/fa/query_language/functions/math_functions.md delete mode 100644 docs/fa/query_language/functions/other_functions.md delete mode 100644 docs/fa/query_language/functions/random_functions.md delete mode 100644 docs/fa/query_language/functions/rounding_functions.md delete mode 100644 docs/fa/query_language/functions/splitting_merging_functions.md delete mode 100644 docs/fa/query_language/functions/string_functions.md delete mode 100644 docs/fa/query_language/functions/string_replace_functions.md delete mode 100644 docs/fa/query_language/functions/string_search_functions.md delete mode 100644 docs/fa/query_language/functions/type_conversion_functions.md delete mode 100644 docs/fa/query_language/functions/url_functions.md delete mode 100644 docs/fa/query_language/functions/ym_dict_functions.md delete mode 100644 docs/fa/query_language/index.md delete mode 100644 docs/fa/query_language/insert_into.md delete mode 100644 docs/fa/query_language/misc.md delete mode 100644 docs/fa/query_language/operators.md delete mode 100644 docs/fa/query_language/select.md delete mode 100644 docs/fa/query_language/show.md delete mode 100644 docs/fa/query_language/syntax.md delete mode 100644 docs/fa/query_language/system.md delete mode 100644 docs/fa/query_language/table_functions/file.md delete mode 100644 docs/fa/query_language/table_functions/hdfs.md delete mode 100644 docs/fa/query_language/table_functions/index.md delete mode 100644 docs/fa/query_language/table_functions/input.md delete mode 100644 docs/fa/query_language/table_functions/jdbc.md delete mode 100644 docs/fa/query_language/table_functions/merge.md delete mode 100644 docs/fa/query_language/table_functions/mysql.md delete mode 100644 docs/fa/query_language/table_functions/numbers.md delete mode 100644 docs/fa/query_language/table_functions/odbc.md delete mode 100644 docs/fa/query_language/table_functions/remote.md delete mode 100644 docs/fa/query_language/table_functions/url.md delete mode 100644 docs/fa/roadmap.md delete mode 100644 docs/fa/security_changelog.md create mode 100644 docs/fa/sql_reference/aggregate_functions/combinators.md create mode 100644 docs/fa/sql_reference/aggregate_functions/index.md create mode 100644 docs/fa/sql_reference/aggregate_functions/parametric_functions.md create mode 100644 docs/fa/sql_reference/aggregate_functions/reference.md create mode 100644 docs/fa/sql_reference/data_types/aggregatefunction.md create mode 100644 docs/fa/sql_reference/data_types/array.md create mode 100644 docs/fa/sql_reference/data_types/boolean.md create mode 100644 docs/fa/sql_reference/data_types/date.md create mode 100644 docs/fa/sql_reference/data_types/datetime.md create mode 100644 docs/fa/sql_reference/data_types/datetime64.md create mode 100644 docs/fa/sql_reference/data_types/decimal.md create mode 100644 docs/fa/sql_reference/data_types/domains/index.md rename docs/fa/{ => sql_reference}/data_types/domains/ipv4.md (67%) rename docs/fa/{ => sql_reference}/data_types/domains/ipv6.md (70%) create mode 100644 docs/fa/sql_reference/data_types/domains/overview.md create mode 100644 docs/fa/sql_reference/data_types/enum.md create mode 100644 docs/fa/sql_reference/data_types/fixedstring.md create mode 100644 docs/fa/sql_reference/data_types/float.md create mode 100644 docs/fa/sql_reference/data_types/index.md create mode 100644 docs/fa/sql_reference/data_types/int_uint.md create mode 100644 docs/fa/sql_reference/data_types/nested_data_structures/index.md create mode 100644 docs/fa/sql_reference/data_types/nested_data_structures/nested.md create mode 100644 docs/fa/sql_reference/data_types/nullable.md create mode 100644 docs/fa/sql_reference/data_types/special_data_types/expression.md create mode 100644 docs/fa/sql_reference/data_types/special_data_types/index.md create mode 100644 docs/fa/sql_reference/data_types/special_data_types/interval.md create mode 100644 docs/fa/sql_reference/data_types/special_data_types/nothing.md create mode 100644 docs/fa/sql_reference/data_types/special_data_types/set.md create mode 100644 docs/fa/sql_reference/data_types/string.md create mode 100644 docs/fa/sql_reference/data_types/tuple.md create mode 100644 docs/fa/sql_reference/data_types/uuid.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md rename docs/fa/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_sources.md (60%) create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md create mode 100644 docs/fa/sql_reference/dictionaries/external_dictionaries/index.md create mode 100644 docs/fa/sql_reference/dictionaries/index.md create mode 100644 docs/fa/sql_reference/dictionaries/internal_dicts.md create mode 100644 docs/fa/sql_reference/functions/arithmetic_functions.md create mode 100644 docs/fa/sql_reference/functions/array_functions.md create mode 100644 docs/fa/sql_reference/functions/array_join.md create mode 100644 docs/fa/sql_reference/functions/bit_functions.md create mode 100644 docs/fa/sql_reference/functions/bitmap_functions.md create mode 100644 docs/fa/sql_reference/functions/comparison_functions.md create mode 100644 docs/fa/sql_reference/functions/conditional_functions.md create mode 100644 docs/fa/sql_reference/functions/date_time_functions.md create mode 100644 docs/fa/sql_reference/functions/encoding_functions.md create mode 100644 docs/fa/sql_reference/functions/ext_dict_functions.md rename docs/fa/{query_language => sql_reference}/functions/functions_for_nulls.md (56%) create mode 100644 docs/fa/sql_reference/functions/geo.md create mode 100644 docs/fa/sql_reference/functions/hash_functions.md create mode 100644 docs/fa/sql_reference/functions/higher_order_functions.md create mode 100644 docs/fa/sql_reference/functions/in_functions.md create mode 100644 docs/fa/sql_reference/functions/index.md rename docs/fa/{query_language => sql_reference}/functions/introspection.md (58%) rename docs/fa/{query_language => sql_reference}/functions/ip_address_functions.md (62%) create mode 100644 docs/fa/sql_reference/functions/json_functions.md create mode 100644 docs/fa/sql_reference/functions/logical_functions.md create mode 100644 docs/fa/sql_reference/functions/machine_learning_functions.md create mode 100644 docs/fa/sql_reference/functions/math_functions.md create mode 100644 docs/fa/sql_reference/functions/other_functions.md create mode 100644 docs/fa/sql_reference/functions/random_functions.md create mode 100644 docs/fa/sql_reference/functions/rounding_functions.md create mode 100644 docs/fa/sql_reference/functions/splitting_merging_functions.md create mode 100644 docs/fa/sql_reference/functions/string_functions.md create mode 100644 docs/fa/sql_reference/functions/string_replace_functions.md create mode 100644 docs/fa/sql_reference/functions/string_search_functions.md create mode 100644 docs/fa/sql_reference/functions/type_conversion_functions.md create mode 100644 docs/fa/sql_reference/functions/url_functions.md rename docs/fa/{query_language => sql_reference}/functions/uuid_functions.md (51%) create mode 100644 docs/fa/sql_reference/functions/ym_dict_functions.md create mode 100644 docs/fa/sql_reference/index.md create mode 100644 docs/fa/sql_reference/operators.md create mode 100644 docs/fa/sql_reference/statements/alter.md create mode 100644 docs/fa/sql_reference/statements/create.md create mode 100644 docs/fa/sql_reference/statements/index.md create mode 100644 docs/fa/sql_reference/statements/insert_into.md create mode 100644 docs/fa/sql_reference/statements/misc.md create mode 100644 docs/fa/sql_reference/statements/select.md create mode 100644 docs/fa/sql_reference/statements/show.md create mode 100644 docs/fa/sql_reference/statements/system.md create mode 100644 docs/fa/sql_reference/syntax.md create mode 100644 docs/fa/sql_reference/table_functions/file.md rename docs/{es/query_language => fa/sql_reference}/table_functions/generate.md (50%) create mode 100644 docs/fa/sql_reference/table_functions/hdfs.md create mode 100644 docs/fa/sql_reference/table_functions/index.md create mode 100644 docs/fa/sql_reference/table_functions/input.md create mode 100644 docs/fa/sql_reference/table_functions/jdbc.md create mode 100644 docs/fa/sql_reference/table_functions/merge.md create mode 100644 docs/fa/sql_reference/table_functions/mysql.md create mode 100644 docs/fa/sql_reference/table_functions/numbers.md create mode 100644 docs/fa/sql_reference/table_functions/odbc.md create mode 100644 docs/fa/sql_reference/table_functions/remote.md create mode 100644 docs/fa/sql_reference/table_functions/url.md create mode 100644 docs/fa/whats_new/changelog/2017.md create mode 100644 docs/fa/whats_new/changelog/2018.md create mode 100644 docs/fa/whats_new/changelog/2019.md create mode 100644 docs/fa/whats_new/changelog/index.md create mode 100644 docs/fa/whats_new/index.md create mode 100644 docs/fa/whats_new/roadmap.md create mode 100644 docs/fa/whats_new/security_changelog.md delete mode 120000 docs/fr/changelog/2019.md create mode 100644 docs/fr/commercial/index.md delete mode 100644 docs/fr/data_types/special_data_types/set.md rename docs/fr/{ => engines}/database_engines/index.md (61%) rename docs/fr/{ => engines}/database_engines/lazy.md (84%) rename docs/fr/{ => engines}/database_engines/mysql.md (60%) create mode 100644 docs/fr/engines/index.md rename docs/fr/{operations => engines}/table_engines/index.md (51%) rename docs/fr/{operations/table_engines => engines/table_engines/integrations}/hdfs.md (90%) create mode 100644 docs/fr/engines/table_engines/integrations/index.md rename docs/fr/{operations/table_engines => engines/table_engines/integrations}/jdbc.md (86%) rename docs/fr/{operations/table_engines => engines/table_engines/integrations}/kafka.md (92%) rename docs/fr/{operations/table_engines => engines/table_engines/integrations}/mysql.md (81%) rename docs/fr/{operations/table_engines => engines/table_engines/integrations}/odbc.md (80%) create mode 100644 docs/fr/engines/table_engines/log_family/index.md rename docs/fr/{operations/table_engines => engines/table_engines/log_family}/log.md (92%) rename docs/fr/{operations/table_engines => engines/table_engines/log_family}/log_family.md (90%) rename docs/fr/{operations/table_engines => engines/table_engines/log_family}/stripelog.md (92%) rename docs/fr/{operations/table_engines => engines/table_engines/log_family}/tinylog.md (91%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/aggregatingmergetree.md (89%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/collapsingmergetree.md (97%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/custom_partitioning_key.md (89%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/graphitemergetree.md (90%) create mode 100644 docs/fr/engines/table_engines/mergetree_family/index.md rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/mergetree.md (82%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/replacingmergetree.md (92%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/replication.md (94%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/summingmergetree.md (87%) rename docs/fr/{operations/table_engines => engines/table_engines/mergetree_family}/versionedcollapsingmergetree.md (97%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/buffer.md (98%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/dictionary.md (85%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/distributed.md (87%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/external_data.md (97%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/file.md (73%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/generate.md (74%) create mode 100644 docs/fr/engines/table_engines/special/index.md rename docs/fr/{operations/table_engines => engines/table_engines/special}/join.md (66%) create mode 100644 docs/fr/engines/table_engines/special/materializedview.md rename docs/fr/{operations/table_engines => engines/table_engines/special}/memory.md (93%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/merge.md (95%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/null.md (80%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/set.md (91%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/url.md (84%) rename docs/fr/{operations/table_engines => engines/table_engines/special}/view.md (82%) create mode 100644 docs/fr/faq/index.md create mode 100644 docs/fr/interfaces/third-party/index.md create mode 100644 docs/fr/introduction/index.md create mode 100644 docs/fr/operations/optimizing_performance/index.md rename docs/fr/operations/{performance => optimizing_performance}/sampling_query_profiler.md (74%) mode change 120000 => 100644 docs/fr/operations/performance/sampling_query_profiler_example_result.txt rename docs/fr/operations/{server_settings => server_configuration_parameters}/index.md (77%) rename docs/fr/operations/{server_settings => server_configuration_parameters}/settings.md (81%) delete mode 100644 docs/fr/operations/table_engines/materializedview.md rename docs/fr/operations/{utils => utilities}/clickhouse-benchmark.md (98%) rename docs/fr/operations/{utils => utilities}/clickhouse-copier.md (98%) rename docs/fr/operations/{utils => utilities}/clickhouse-local.md (94%) rename docs/fr/operations/{utils => utilities}/index.md (80%) delete mode 100644 docs/fr/query_language/functions/machine_learning_functions.md delete mode 100644 docs/fr/query_language/index.md delete mode 100644 docs/fr/query_language/select.md delete mode 100644 docs/fr/query_language/table_functions/index.md rename docs/fr/{query_language/agg_functions => sql_reference/aggregate_functions}/combinators.md (93%) rename docs/fr/{query_language/agg_functions => sql_reference/aggregate_functions}/index.md (92%) rename docs/fr/{query_language/agg_functions => sql_reference/aggregate_functions}/parametric_functions.md (94%) rename docs/fr/{query_language/agg_functions => sql_reference/aggregate_functions}/reference.md (92%) rename docs/fr/{data_types/nested_data_structures => sql_reference/data_types}/aggregatefunction.md (70%) rename docs/fr/{ => sql_reference}/data_types/array.md (85%) rename docs/fr/{ => sql_reference}/data_types/boolean.md (72%) rename docs/fr/{ => sql_reference}/data_types/date.md (85%) rename docs/fr/{ => sql_reference}/data_types/datetime.md (77%) rename docs/fr/{ => sql_reference}/data_types/datetime64.md (85%) rename docs/fr/{ => sql_reference}/data_types/decimal.md (97%) create mode 100644 docs/fr/sql_reference/data_types/domains/index.md rename docs/fr/{ => sql_reference}/data_types/domains/ipv4.md (97%) rename docs/fr/{ => sql_reference}/data_types/domains/ipv6.md (97%) rename docs/fr/{ => sql_reference}/data_types/domains/overview.md (93%) rename docs/fr/{ => sql_reference}/data_types/enum.md (97%) rename docs/fr/{ => sql_reference}/data_types/fixedstring.md (83%) rename docs/fr/{ => sql_reference}/data_types/float.md (93%) rename docs/fr/{ => sql_reference}/data_types/index.md (73%) rename docs/fr/{ => sql_reference}/data_types/int_uint.md (80%) rename docs/fr/{ => sql_reference}/data_types/nested_data_structures/index.md (54%) rename docs/fr/{ => sql_reference}/data_types/nested_data_structures/nested.md (92%) rename docs/fr/{ => sql_reference}/data_types/nullable.md (74%) rename docs/fr/{ => sql_reference}/data_types/special_data_types/expression.md (71%) rename docs/fr/{ => sql_reference}/data_types/special_data_types/index.md (73%) rename docs/fr/{ => sql_reference}/data_types/special_data_types/interval.md (82%) rename docs/fr/{ => sql_reference}/data_types/special_data_types/nothing.md (67%) create mode 100644 docs/fr/sql_reference/data_types/special_data_types/set.md rename docs/fr/{ => sql_reference}/data_types/string.md (92%) rename docs/fr/{ => sql_reference}/data_types/tuple.md (75%) rename docs/fr/{ => sql_reference}/data_types/uuid.md (74%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts.md (68%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict.md (85%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_hierarchical.md (87%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_layout.md (94%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_lifetime.md (96%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_sources.md (92%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_structure.md (86%) create mode 100644 docs/fr/sql_reference/dictionaries/external_dictionaries/index.md rename docs/fr/{query_language/dicts => sql_reference/dictionaries}/index.md (65%) rename docs/fr/{query_language/dicts => sql_reference/dictionaries}/internal_dicts.md (96%) rename docs/fr/{query_language => sql_reference}/functions/arithmetic_functions.md (97%) rename docs/fr/{query_language => sql_reference}/functions/array_functions.md (91%) rename docs/fr/{query_language => sql_reference}/functions/array_join.md (93%) rename docs/fr/{query_language => sql_reference}/functions/bit_functions.md (93%) rename docs/fr/{query_language => sql_reference}/functions/bitmap_functions.md (96%) rename docs/fr/{query_language => sql_reference}/functions/comparison_functions.md (58%) rename docs/fr/{query_language => sql_reference}/functions/conditional_functions.md (98%) rename docs/fr/{query_language => sql_reference}/functions/date_time_functions.md (98%) rename docs/fr/{query_language => sql_reference}/functions/encoding_functions.md (90%) rename docs/fr/{query_language => sql_reference}/functions/ext_dict_functions.md (77%) rename docs/fr/{query_language => sql_reference}/functions/functions_for_nulls.md (95%) rename docs/fr/{query_language => sql_reference}/functions/geo.md (85%) rename docs/fr/{query_language => sql_reference}/functions/hash_functions.md (85%) rename docs/fr/{query_language => sql_reference}/functions/higher_order_functions.md (98%) rename docs/fr/{query_language => sql_reference}/functions/in_functions.md (77%) rename docs/fr/{query_language => sql_reference}/functions/index.md (97%) rename docs/fr/{query_language => sql_reference}/functions/introspection.md (91%) rename docs/fr/{query_language => sql_reference}/functions/ip_address_functions.md (95%) rename docs/fr/{query_language => sql_reference}/functions/json_functions.md (98%) rename docs/fr/{query_language => sql_reference}/functions/logical_functions.md (84%) create mode 100644 docs/fr/sql_reference/functions/machine_learning_functions.md rename docs/fr/{query_language => sql_reference}/functions/math_functions.md (96%) rename docs/fr/{query_language => sql_reference}/functions/other_functions.md (96%) rename docs/fr/{query_language => sql_reference}/functions/random_functions.md (89%) rename docs/fr/{query_language => sql_reference}/functions/rounding_functions.md (96%) rename docs/fr/{query_language => sql_reference}/functions/splitting_merging_functions.md (82%) rename docs/fr/{query_language => sql_reference}/functions/string_functions.md (95%) rename docs/fr/{query_language => sql_reference}/functions/string_replace_functions.md (97%) rename docs/fr/{query_language => sql_reference}/functions/string_search_functions.md (99%) rename docs/fr/{query_language => sql_reference}/functions/type_conversion_functions.md (74%) rename docs/fr/{query_language => sql_reference}/functions/url_functions.md (96%) rename docs/fr/{query_language => sql_reference}/functions/uuid_functions.md (86%) rename docs/fr/{query_language => sql_reference}/functions/ym_dict_functions.md (90%) create mode 100644 docs/fr/sql_reference/index.md rename docs/fr/{query_language => sql_reference}/operators.md (84%) rename docs/fr/{query_language => sql_reference/statements}/alter.md (90%) rename docs/fr/{query_language => sql_reference/statements}/create.md (88%) create mode 100644 docs/fr/sql_reference/statements/index.md rename docs/fr/{query_language => sql_reference/statements}/insert_into.md (86%) rename docs/fr/{query_language => sql_reference/statements}/misc.md (83%) create mode 100644 docs/fr/sql_reference/statements/select.md rename docs/fr/{query_language => sql_reference/statements}/show.md (87%) rename docs/fr/{query_language => sql_reference/statements}/system.md (78%) rename docs/fr/{query_language => sql_reference}/syntax.md (95%) rename docs/fr/{query_language => sql_reference}/table_functions/file.md (88%) rename docs/fr/{query_language => sql_reference}/table_functions/generate.md (95%) rename docs/fr/{query_language => sql_reference}/table_functions/hdfs.md (95%) create mode 100644 docs/fr/sql_reference/table_functions/index.md rename docs/fr/{query_language => sql_reference}/table_functions/input.md (95%) rename docs/fr/{query_language => sql_reference}/table_functions/jdbc.md (88%) rename docs/fr/{query_language => sql_reference}/table_functions/merge.md (79%) rename docs/fr/{query_language => sql_reference}/table_functions/mysql.md (86%) rename docs/fr/{query_language => sql_reference}/table_functions/numbers.md (89%) rename docs/fr/{query_language => sql_reference}/table_functions/odbc.md (87%) rename docs/fr/{query_language => sql_reference}/table_functions/remote.md (94%) rename docs/fr/{query_language => sql_reference}/table_functions/url.md (89%) rename docs/fr/{ => whats_new}/changelog/2017.md (99%) rename docs/fr/{ => whats_new}/changelog/2018.md (99%) create mode 100644 docs/fr/whats_new/changelog/2019.md rename docs/fr/{ => whats_new}/changelog/index.md (99%) create mode 100644 docs/fr/whats_new/index.md rename docs/fr/{ => whats_new}/roadmap.md (77%) rename docs/fr/{ => whats_new}/security_changelog.md (96%) mode change 120000 => 100644 docs/ja/commercial/cloud.md mode change 120000 => 100644 docs/ja/commercial/index.md mode change 120000 => 100644 docs/ja/development/architecture.md mode change 120000 => 100644 docs/ja/development/browse_code.md mode change 120000 => 100644 docs/ja/development/build.md mode change 120000 => 100644 docs/ja/development/build_cross_arm.md mode change 120000 => 100644 docs/ja/development/build_cross_osx.md mode change 120000 => 100644 docs/ja/development/build_osx.md mode change 120000 => 100644 docs/ja/development/contrib.md mode change 120000 => 100644 docs/ja/development/developer_instruction.md mode change 120000 => 100644 docs/ja/development/index.md mode change 120000 => 100644 docs/ja/development/style.md mode change 120000 => 100644 docs/ja/development/tests.md mode change 120000 => 100644 docs/ja/engines/database_engines/index.md mode change 120000 => 100644 docs/ja/engines/database_engines/lazy.md mode change 120000 => 100644 docs/ja/engines/database_engines/mysql.md mode change 120000 => 100644 docs/ja/engines/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/hdfs.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/jdbc.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/kafka.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/mysql.md mode change 120000 => 100644 docs/ja/engines/table_engines/integrations/odbc.md mode change 120000 => 100644 docs/ja/engines/table_engines/log_family/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/log_family/log.md mode change 120000 => 100644 docs/ja/engines/table_engines/log_family/log_family.md mode change 120000 => 100644 docs/ja/engines/table_engines/log_family/stripelog.md mode change 120000 => 100644 docs/ja/engines/table_engines/log_family/tinylog.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/mergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/replication.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/summingmergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/buffer.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/dictionary.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/distributed.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/external_data.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/file.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/generate.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/index.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/join.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/materializedview.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/memory.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/merge.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/null.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/set.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/url.md mode change 120000 => 100644 docs/ja/engines/table_engines/special/view.md mode change 120000 => 100644 docs/ja/faq/general.md mode change 120000 => 100644 docs/ja/faq/index.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/amplab_benchmark.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/criteo.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/index.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/metrica.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/nyc_taxi.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/ontime.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/star_schema.md mode change 120000 => 100644 docs/ja/getting_started/example_datasets/wikistat.md mode change 120000 => 100644 docs/ja/getting_started/index.md mode change 120000 => 100644 docs/ja/getting_started/install.md mode change 120000 => 100644 docs/ja/getting_started/playground.md mode change 120000 => 100644 docs/ja/getting_started/tutorial.md mode change 120000 => 100644 docs/ja/guides/apply_catboost_model.md mode change 120000 => 100644 docs/ja/guides/index.md mode change 120000 => 100644 docs/ja/interfaces/cli.md mode change 120000 => 100644 docs/ja/interfaces/cpp.md mode change 120000 => 100644 docs/ja/interfaces/formats.md mode change 120000 => 100644 docs/ja/interfaces/http.md mode change 120000 => 100644 docs/ja/interfaces/index.md mode change 120000 => 100644 docs/ja/interfaces/jdbc.md mode change 120000 => 100644 docs/ja/interfaces/mysql.md mode change 120000 => 100644 docs/ja/interfaces/odbc.md mode change 120000 => 100644 docs/ja/interfaces/tcp.md mode change 120000 => 100644 docs/ja/interfaces/third-party/client_libraries.md mode change 120000 => 100644 docs/ja/interfaces/third-party/gui.md mode change 120000 => 100644 docs/ja/interfaces/third-party/index.md mode change 120000 => 100644 docs/ja/interfaces/third-party/integrations.md mode change 120000 => 100644 docs/ja/interfaces/third-party/proxy.md mode change 120000 => 100644 docs/ja/introduction/adopters.md mode change 120000 => 100644 docs/ja/introduction/index.md mode change 120000 => 100644 docs/ja/operations/access_rights.md mode change 120000 => 100644 docs/ja/operations/backup.md mode change 120000 => 100644 docs/ja/operations/configuration_files.md mode change 120000 => 100644 docs/ja/operations/index.md mode change 120000 => 100644 docs/ja/operations/monitoring.md mode change 120000 => 100644 docs/ja/operations/optimizing_performance/index.md mode change 120000 => 100644 docs/ja/operations/optimizing_performance/sampling_query_profiler.md mode change 120000 => 100644 docs/ja/operations/performance/sampling_query_profiler_example_result.txt mode change 120000 => 100644 docs/ja/operations/performance_test.md mode change 120000 => 100644 docs/ja/operations/quotas.md mode change 120000 => 100644 docs/ja/operations/requirements.md mode change 120000 => 100644 docs/ja/operations/server_configuration_parameters/index.md mode change 120000 => 100644 docs/ja/operations/server_configuration_parameters/settings.md mode change 120000 => 100644 docs/ja/operations/settings/constraints_on_settings.md mode change 120000 => 100644 docs/ja/operations/settings/index.md mode change 120000 => 100644 docs/ja/operations/settings/permissions_for_queries.md mode change 120000 => 100644 docs/ja/operations/settings/query_complexity.md mode change 120000 => 100644 docs/ja/operations/settings/settings.md mode change 120000 => 100644 docs/ja/operations/settings/settings_profiles.md mode change 120000 => 100644 docs/ja/operations/settings/settings_users.md mode change 120000 => 100644 docs/ja/operations/system_tables.md mode change 120000 => 100644 docs/ja/operations/tips.md mode change 120000 => 100644 docs/ja/operations/troubleshooting.md mode change 120000 => 100644 docs/ja/operations/update.md mode change 120000 => 100644 docs/ja/operations/utilities/clickhouse-benchmark.md mode change 120000 => 100644 docs/ja/operations/utilities/clickhouse-copier.md mode change 120000 => 100644 docs/ja/operations/utilities/clickhouse-local.md mode change 120000 => 100644 docs/ja/operations/utilities/index.md mode change 120000 => 100644 docs/ja/sql_reference/aggregate_functions/combinators.md mode change 120000 => 100644 docs/ja/sql_reference/aggregate_functions/index.md mode change 120000 => 100644 docs/ja/sql_reference/aggregate_functions/parametric_functions.md mode change 120000 => 100644 docs/ja/sql_reference/aggregate_functions/reference.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/aggregatefunction.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/array.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/boolean.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/date.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/datetime.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/datetime64.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/decimal.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/domains/index.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/domains/ipv4.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/domains/ipv6.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/domains/overview.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/enum.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/fixedstring.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/float.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/index.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/int_uint.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/nested_data_structures/index.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/nested_data_structures/nested.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/nullable.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/special_data_types/expression.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/special_data_types/index.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/special_data_types/interval.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/special_data_types/nothing.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/special_data_types/set.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/string.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/tuple.md mode change 120000 => 100644 docs/ja/sql_reference/data_types/uuid.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/external_dictionaries/index.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/index.md mode change 120000 => 100644 docs/ja/sql_reference/dictionaries/internal_dicts.md mode change 120000 => 100644 docs/ja/sql_reference/functions/arithmetic_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/array_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/array_join.md mode change 120000 => 100644 docs/ja/sql_reference/functions/bit_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/bitmap_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/comparison_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/conditional_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/date_time_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/encoding_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/ext_dict_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/functions_for_nulls.md mode change 120000 => 100644 docs/ja/sql_reference/functions/geo.md mode change 120000 => 100644 docs/ja/sql_reference/functions/hash_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/higher_order_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/in_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/index.md mode change 120000 => 100644 docs/ja/sql_reference/functions/introspection.md mode change 120000 => 100644 docs/ja/sql_reference/functions/ip_address_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/json_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/logical_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/machine_learning_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/math_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/other_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/random_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/rounding_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/splitting_merging_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/string_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/string_replace_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/string_search_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/type_conversion_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/url_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/uuid_functions.md mode change 120000 => 100644 docs/ja/sql_reference/functions/ym_dict_functions.md mode change 120000 => 100644 docs/ja/sql_reference/index.md mode change 120000 => 100644 docs/ja/sql_reference/operators.md mode change 120000 => 100644 docs/ja/sql_reference/statements/alter.md mode change 120000 => 100644 docs/ja/sql_reference/statements/create.md mode change 120000 => 100644 docs/ja/sql_reference/statements/index.md mode change 120000 => 100644 docs/ja/sql_reference/statements/insert_into.md mode change 120000 => 100644 docs/ja/sql_reference/statements/misc.md mode change 120000 => 100644 docs/ja/sql_reference/statements/select.md mode change 120000 => 100644 docs/ja/sql_reference/statements/show.md mode change 120000 => 100644 docs/ja/sql_reference/statements/system.md mode change 120000 => 100644 docs/ja/sql_reference/syntax.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/file.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/generate.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/hdfs.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/index.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/input.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/jdbc.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/merge.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/mysql.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/numbers.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/odbc.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/remote.md mode change 120000 => 100644 docs/ja/sql_reference/table_functions/url.md mode change 120000 => 100644 docs/ja/whats_new/changelog/2017.md mode change 120000 => 100644 docs/ja/whats_new/changelog/2018.md mode change 120000 => 100644 docs/ja/whats_new/changelog/2019.md mode change 120000 => 100644 docs/ja/whats_new/changelog/index.md mode change 120000 => 100644 docs/ja/whats_new/index.md mode change 120000 => 100644 docs/ja/whats_new/roadmap.md mode change 120000 => 100644 docs/ja/whats_new/security_changelog.md delete mode 100644 docs/toc_es.yml delete mode 100644 docs/toc_fa.yml delete mode 100644 docs/toc_fr.yml diff --git a/.gitignore b/.gitignore index 8e2c1d21ede..f53598b4b4c 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ /docs/website /docs/venv/ /docs/tools/venv/ -/docs/tools/translate/venv/ +/docs/tools/translate/venv /docs/tools/translate/output.md /docs/en/single.md /docs/ru/single.md diff --git a/docs/es/commercial/cloud.md b/docs/es/commercial/cloud.md index f77f536341f..d8d21359f84 100644 --- a/docs/es/commercial/cloud.md +++ b/docs/es/commercial/cloud.md @@ -1,18 +1,21 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa --- # Proveedores de servicios en la nube de ClickHouse {#clickhouse-cloud-service-providers} !!! info "INFO" - Si ha lanzado una nube pública con el servicio ClickHouse administrado, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/es/commercial/cloud.md) añadiéndolo a la siguiente lista. + Si ha lanzado una nube pública con el servicio ClickHouse administrado, no dude en [abrir una solicitud de extracción](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) añadiéndolo a la siguiente lista. ## Nube de Yandex {#yandex-cloud} [Servicio administrado de Yandex para ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) proporciona las siguientes características clave: -- Servicio ZooKeeper totalmente gestionado para [Replicación de ClickHouse](../operations/table_engines/replication.md) +- Servicio ZooKeeper totalmente gestionado para [Replicación de ClickHouse](../engines/table_engines/mergetree_family/replication.md) - Múltiples opciones de tipo de almacenamiento - Réplicas en diferentes zonas de disponibilidad - Cifrado y aislamiento - Mantenimiento automatizado + +{## [Artículo Original](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/es/commercial/index.md b/docs/es/commercial/index.md new file mode 100644 index 00000000000..80bb81700f5 --- /dev/null +++ b/docs/es/commercial/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Commercial +toc_priority: 70 +--- + + diff --git a/docs/es/data_types/nested_data_structures/index.md b/docs/es/data_types/nested_data_structures/index.md deleted file mode 100644 index 9e5e2ad5444..00000000000 --- a/docs/es/data_types/nested_data_structures/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -machine_translated: true ---- - -# Estructuras de datos anidados {#nested-data-structures} - -[Artículo Original](https://clickhouse.tech/docs/es/data_types/nested_data_structures/) diff --git a/docs/es/data_types/special_data_types/expression.md b/docs/es/data_types/special_data_types/expression.md deleted file mode 100644 index bfdbf3ed0b1..00000000000 --- a/docs/es/data_types/special_data_types/expression.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# Expresion {#expression} - -Se utiliza para representar expresiones lambda en funciones de orden superior. - -[Artículo Original](https://clickhouse.tech/docs/es/data_types/special_data_types/expression/) diff --git a/docs/es/data_types/special_data_types/index.md b/docs/es/data_types/special_data_types/index.md deleted file mode 100644 index 5915b7b3d86..00000000000 --- a/docs/es/data_types/special_data_types/index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# Tipos de datos especiales {#special-data-types} - -Special data type values can’t be saved to a table or output in results, but can be used as the intermediate result when running a query. - -[Artículo Original](https://clickhouse.tech/docs/es/data_types/special_data_types/) diff --git a/docs/es/data_types/special_data_types/set.md b/docs/es/data_types/special_data_types/set.md deleted file mode 100644 index 5557b20dde2..00000000000 --- a/docs/es/data_types/special_data_types/set.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# Establecer {#set} - -Utilizado para la mitad derecha de un [ES](../../query_language/select.md#select-in-operators) expresión. - -[Artículo Original](https://clickhouse.tech/docs/es/data_types/special_data_types/set/) diff --git a/docs/es/database_engines/index.md b/docs/es/database_engines/index.md deleted file mode 100644 index 55ae226a20d..00000000000 --- a/docs/es/database_engines/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -machine_translated: true ---- - -# Motores de base de datos {#database-engines} - -Los motores de bases de datos le permiten trabajar con tablas. - -De forma predeterminada, ClickHouse utiliza su motor de base de datos nativa, que proporciona [motores de mesa](../operations/table_engines/index.md) y una [Sistema abierto.](../query_language/syntax.md). - -También puede utilizar los siguientes motores de base de datos: - -- [MySQL](mysql.md) - -- [Perezoso](lazy.md) - -[Artículo Original](https://clickhouse.tech/docs/es/database_engines/) diff --git a/docs/es/development/architecture.md b/docs/es/development/architecture.md index a6f41493f26..c38dfeee9ae 100644 --- a/docs/es/development/architecture.md +++ b/docs/es/development/architecture.md @@ -1,50 +1,53 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 62 +toc_title: "Descripci\xF3n general de la arquitectura ClickHouse" --- # Descripción general de la arquitectura ClickHouse {#overview-of-clickhouse-architecture} -ClickHouse es un verdadero DBMS orientado a columnas. Los datos se almacenan por columnas y durante la ejecución de matrices (vectores o fragmentos de columnas). Siempre que sea posible, las operaciones se envían en matrices, en lugar de en valores individuales. Esto se llama “vectorized query execution,” y ayuda a reducir el costo del procesamiento de datos real. +ClickHouse es un verdadero DBMS orientado a columnas. Los datos se almacenan por columnas y durante la ejecución de matrices (vectores o fragmentos de columnas). Siempre que sea posible, las operaciones se envían en matrices, en lugar de en valores individuales. Se llama “vectorized query execution,” y ayuda a reducir el costo del procesamiento de datos real. > Esta idea no es nada nuevo. Se remonta a la `APL` lenguaje de programación y sus descendientes: `A +`, `J`, `K`, y `Q`. La programación de matrices se utiliza en el procesamiento de datos científicos. Tampoco es esta idea algo nuevo en las bases de datos relacionales: por ejemplo, se usa en el `Vectorwise` sistema. -Existen dos enfoques diferentes para acelerar el procesamiento de consultas: la ejecución de consultas vectorizadas y la generación de código en tiempo de ejecución. En este último, el código se genera para cada tipo de consulta sobre la marcha, eliminando toda la indirección y el despacho dinámico. Ninguno de estos enfoques es estrictamente mejor que el otro. La generación de código de tiempo de ejecución puede ser mejor cuando fusiona muchas operaciones, utilizando así las unidades de ejecución de la CPU y la canalización. La ejecución de consultas vectorizadas puede ser menos práctica porque implica vectores temporales que deben escribirse en la memoria caché y leerse. Si los datos temporales no caben en la memoria caché L2, esto se convierte en un problema. Pero la ejecución de consultas vectorizadas utiliza más fácilmente las capacidades SIMD de la CPU. Un [documento de investigación](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) escrito por nuestros amigos muestra que es mejor combinar ambos enfoques. ClickHouse utiliza la ejecución de consultas vectorizadas y tiene un soporte inicial limitado para la generación de código en tiempo de ejecución. +Existen dos enfoques diferentes para acelerar el procesamiento de consultas: la ejecución de consultas vectorizadas y la generación de código en tiempo de ejecución. Este último elimina toda la indirección y el despacho dinámico. Ninguno de estos enfoques es estrictamente mejor que el otro. La generación de código de tiempo de ejecución puede ser mejor cuando fusiona muchas operaciones, utilizando así las unidades de ejecución de la CPU y la canalización. La ejecución de consultas vectorizadas puede ser menos práctica porque implica vectores temporales que deben escribirse en la memoria caché y leerse. Si los datos temporales no caben en la memoria caché L2, esto se convierte en un problema. Pero la ejecución de consultas vectorizadas utiliza más fácilmente las capacidades SIMD de la CPU. Un [documento de investigación](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) escrito por nuestros amigos muestra que es mejor combinar ambos enfoques. ClickHouse utiliza la ejecución de consultas vectorizadas y tiene un soporte inicial limitado para la generación de código en tiempo de ejecución. ## Columna {#columns} -Para representar columnas en la memoria (en realidad, fragmentos de columnas), el `IColumn` se utiliza la interfaz. Esta interfaz proporciona métodos auxiliares para la implementación de varios operadores relacionales. Casi todas las operaciones son inmutables: no modifican la columna original, sino que crean una nueva modificada. Por ejemplo, el `IColumn :: filter` método acepta una máscara de bytes de filtro. Se utiliza para el `WHERE` y `HAVING` operadores relacionales. Ejemplos adicionales: el `IColumn :: permute` para apoyar `ORDER BY` el `IColumn :: cut` para apoyar `LIMIT` y así sucesivamente. +`IColumn` interfaz se utiliza para representar columnas en la memoria (en realidad, fragmentos de columnas). Esta interfaz proporciona métodos auxiliares para la implementación de varios operadores relacionales. Casi todas las operaciones son inmutables: no modifican la columna original, sino que crean una nueva modificada. Por ejemplo, el `IColumn :: filter` método acepta una máscara de bytes de filtro. Se utiliza para el `WHERE` y `HAVING` operadores relacionales. Ejemplos adicionales: el `IColumn :: permute` para apoyar `ORDER BY`, el `IColumn :: cut` para apoyar `LIMIT`. -Diversos `IColumn` aplicación (`ColumnUInt8`, `ColumnString` y así sucesivamente) son responsables del diseño de memoria de las columnas. El diseño de memoria suele ser una matriz contigua. Para el tipo entero de columnas, es solo una matriz contigua, como `std :: vector`. Para `String` y `Array` columnas, son dos vectores: uno para todos los elementos de la matriz, colocados contiguamente, y un segundo para los desplazamientos al comienzo de cada matriz. También hay `ColumnConst` que almacena solo un valor en la memoria, pero parece una columna. +Diversos `IColumn` aplicación (`ColumnUInt8`, `ColumnString`, y así sucesivamente) son responsables del diseño de memoria de las columnas. El diseño de memoria suele ser una matriz contigua. Para el tipo entero de columnas, es solo una matriz contigua, como `std :: vector`. Para `String` y `Array` columnas, son dos vectores: uno para todos los elementos de la matriz, colocados contiguamente, y un segundo para los desplazamientos al comienzo de cada matriz. También hay `ColumnConst` que almacena solo un valor en la memoria, pero parece una columna. ## Campo {#field} Sin embargo, también es posible trabajar con valores individuales. Para representar un valor individual, el `Field` se utiliza. `Field` es sólo una unión discriminada de `UInt64`, `Int64`, `Float64`, `String` y `Array`. `IColumn` tiene el `operator[]` para obtener el valor n-ésimo como un `Field` y el `insert` método para agregar un `Field` al final de una columna. Estos métodos no son muy eficientes, ya que requieren tratar con temporal `Field` objetos que representan un valor individual. Hay métodos más eficientes, tales como `insertFrom`, `insertRangeFrom` y así sucesivamente. -`Field` no tiene suficiente información sobre un tipo de datos específico para una tabla. Por ejemplo, `UInt8`, `UInt16`, `UInt32`, y `UInt64` todos están representados como `UInt64` es una `Field`. +`Field` no tiene suficiente información sobre un tipo de datos específico para una tabla. Por ejemplo, `UInt8`, `UInt16`, `UInt32`, y `UInt64` todos están representados como `UInt64` en una `Field`. ## Abstracciones con fugas {#leaky-abstractions} `IColumn` tiene métodos para transformaciones relacionales comunes de datos, pero no satisfacen todas las necesidades. Por ejemplo, `ColumnUInt64` no tiene un método para calcular la suma de dos columnas, y `ColumnString` no tiene un método para ejecutar una búsqueda de subcadena. Estas innumerables rutinas se implementan fuera de `IColumn`. -Varias funciones en columnas se pueden implementar de una manera genérica, no eficiente utilizando `IColumn` para extraer `Field` valores, o de una manera especializada utilizando el conocimiento del diseño de la memoria interna de los datos en un `IColumn` aplicación. Para hacer esto, las funciones se convierten en un `IColumn` escriba y trate con la representación interna directamente. Por ejemplo, `ColumnUInt64` tiene el `getData` método que devuelve una referencia a una matriz interna, luego una rutina separada lee o llena esa matriz directamente. De hecho, tenemos “leaky abstractions” para permitir especializaciones eficientes de varias rutinas. +Varias funciones en columnas se pueden implementar de una manera genérica, no eficiente utilizando `IColumn` para extraer `Field` valores, o de una manera especializada utilizando el conocimiento del diseño de la memoria interna de los datos en un `IColumn` aplicación. Se implementa mediante la conversión de funciones a un `IColumn` escriba y trate con la representación interna directamente. Por ejemplo, `ColumnUInt64` tiene el `getData` método que devuelve una referencia a una matriz interna, luego una rutina separada lee o llena esa matriz directamente. Tenemos “leaky abstractions” para permitir especializaciones eficientes de varias rutinas. ## Tipos de datos {#data_types} -`IDataType` es responsable de la serialización y deserialización: para leer y escribir fragmentos de columnas o valores individuales en formato binario o de texto. `IDataType` corresponde directamente a los tipos de datos en las tablas. Por ejemplo, heno `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` y así sucesivamente. +`IDataType` es responsable de la serialización y deserialización: para leer y escribir fragmentos de columnas o valores individuales en formato binario o de texto. `IDataType` corresponde directamente a los tipos de datos en las tablas. Por ejemplo, hay `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` y así sucesivamente. `IDataType` y `IColumn` están vagamente relacionados entre sí. Diferentes tipos de datos se pueden representar en la memoria por el mismo `IColumn` aplicación. Por ejemplo, `DataTypeUInt32` y `DataTypeDateTime` están representados por `ColumnUInt32` o `ColumnConstUInt32`. Además, el mismo tipo de datos se puede representar mediante `IColumn` aplicación. Por ejemplo, `DataTypeUInt8` puede ser representado por `ColumnUInt8` o `ColumnConstUInt8`. -`IDataType` sólo almacena metadatos. Por ejemplo, `DataTypeUInt8` no almacena nada en absoluto (excepto vptr) y `DataTypeFixedString` tiendas en solitario `N` (el tamaño de las cadenas de tamaño fijo). +`IDataType` sólo almacena metadatos. Por ejemplo, `DataTypeUInt8` no almacena nada en absoluto (excepto vptr) y `DataTypeFixedString` tiendas solo `N` (el tamaño de las cadenas de tamaño fijo). -`IDataType` tiene métodos auxiliares para varios formatos de datos. Los ejemplos son métodos para serializar un valor con posibles citas, para serializar un valor para JSON y para serializar un valor como parte del formato XML. No hay correspondencia directa con los formatos de datos. Por ejemplo, los diferentes formatos de datos `Pretty` y `TabSeparated` Puede utilizar el mismo `serializeTextEscaped` método de ayuda de la `IDataType` interfaz. +`IDataType` tiene métodos auxiliares para varios formatos de datos. Los ejemplos son métodos para serializar un valor con posibles citas, para serializar un valor para JSON y para serializar un valor como parte del formato XML. No hay correspondencia directa con los formatos de datos. Por ejemplo, los diferentes formatos de datos `Pretty` y `TabSeparated` puede utilizar el mismo `serializeTextEscaped` método de ayuda de la `IDataType` interfaz. ## Bloque {#block} -Naciones `Block` es un contenedor que representa un subconjunto (porción) de una tabla en la memoria. Es sólo un conjunto de triples: `(IColumn, IDataType, column name)`. Durante la ejecución de la consulta, los datos son procesados por `Block`s. Si tenemos un `Block`, tenemos datos (en el `IColumn` objeto), tenemos información sobre su tipo (en `IDataType`) que nos dice cómo lidiar con esa columna, y tenemos el nombre de la columna (ya sea el nombre de la columna original de la tabla o algún nombre artificial asignado para obtener resultados temporales de los cálculos). +A `Block` es un contenedor que representa un subconjunto (porción) de una tabla en la memoria. Es sólo un conjunto de triples: `(IColumn, IDataType, column name)`. Durante la ejecución de la consulta, los datos son procesados por `Block`s. Si tenemos un `Block`, tenemos datos (en el `IColumn` objeto), tenemos información sobre su tipo (en `IDataType`) que nos dice cómo lidiar con esa columna, y tenemos el nombre de la columna. Podría ser el nombre de columna original de la tabla o algún nombre artificial asignado para obtener resultados temporales de los cálculos. -Cuando calculamos alguna función sobre columnas en un bloque, agregamos otra columna con su resultado al bloque, y no tocamos columnas para argumentos de la función porque las operaciones son inmutables. Más tarde, las columnas innecesarias se pueden eliminar del bloque, pero no se pueden modificar. Esto es conveniente para la eliminación de subexpresiones comunes. +Cuando calculamos alguna función sobre columnas en un bloque, agregamos otra columna con su resultado al bloque, y no tocamos columnas para argumentos de la función porque las operaciones son inmutables. Más tarde, las columnas innecesarias se pueden eliminar del bloque, pero no se pueden modificar. Es conveniente para la eliminación de subexpresiones comunes. -Se crean bloques para cada fragmento de datos procesado. Tenga en cuenta que para el mismo tipo de cálculo, los nombres y tipos de columna siguen siendo los mismos para diferentes bloques y solo los cambios de datos de columna. Es mejor dividir los datos del bloque desde el encabezado del bloque porque los tamaños de bloque pequeños tendrán una gran sobrecarga de cadenas temporales para copiar shared\_ptrs y nombres de columna. +Se crean bloques para cada fragmento de datos procesado. Tenga en cuenta que para el mismo tipo de cálculo, los nombres y tipos de columna siguen siendo los mismos para diferentes bloques y solo cambian los datos de columna. Es mejor dividir los datos del bloque desde el encabezado del bloque porque los tamaños de bloque pequeños tienen una gran sobrecarga de cadenas temporales para copiar shared\_ptrs y nombres de columna. ## Bloquear flujos {#block-streams} @@ -54,90 +57,90 @@ Los flujos son responsables de: 1. Leer o escribir en una mesa. La tabla solo devuelve una secuencia para leer o escribir bloques. 2. Implementación de formatos de datos. Por ejemplo, si desea enviar datos a un terminal en `Pretty` formato, crea un flujo de salida de bloque donde presiona bloques y los formatea. -3. Realización de transformaciones de datos. Digamos que tienes `IBlockInputStream` y desea crear una secuencia filtrada. Bienvenidos `FilterBlockInputStream` e inicializarlo con su transmisión. Luego, cuando tiras de un bloque de `FilterBlockInputStream`, extrae un bloque de su flujo, lo filtra y le devuelve el bloque filtrado. Las canalizaciones de ejecución de consultas se representan de esta manera. +3. Realización de transformaciones de datos. Digamos que tienes `IBlockInputStream` y desea crear una secuencia filtrada. Usted crea `FilterBlockInputStream` e inicializarlo con su transmisión. Luego, cuando tiras de un bloque de `FilterBlockInputStream`, extrae un bloque de su flujo, lo filtra y le devuelve el bloque filtrado. Las canalizaciones de ejecución de consultas se representan de esta manera. Hay transformaciones más sofisticadas. Por ejemplo, cuando tiras de `AggregatingBlockInputStream`, lee todos los datos de su origen, los agrega y, a continuación, devuelve un flujo de datos agregados para usted. Otro ejemplo: `UnionBlockInputStream` acepta muchas fuentes de entrada en el constructor y también una serie de subprocesos. Lanza múltiples hilos y lee de múltiples fuentes en paralelo. > Las secuencias de bloques usan el “pull” enfoque para controlar el flujo: cuando extrae un bloque de la primera secuencia, en consecuencia extrae los bloques requeridos de las secuencias anidadas, y toda la tubería de ejecución funcionará. Ni “pull” ni “push” es la mejor solución, porque el flujo de control está implícito y eso limita la implementación de varias características, como la ejecución simultánea de múltiples consultas (fusionando muchas tuberías). Esta limitación podría superarse con coroutines o simplemente ejecutando hilos adicionales que se esperan el uno al otro. Podemos tener más posibilidades si hacemos explícito el flujo de control: si localizamos la lógica para pasar datos de una unidad de cálculo a otra fuera de esas unidades de cálculo. Lea esto [artículo](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) para más pensamientos. -Debemos tener en cuenta que la canalización de ejecución de consultas crea datos temporales en cada paso. Tratamos de mantener el tamaño del bloque lo suficientemente pequeño para que los datos temporales se ajusten a la memoria caché de la CPU. Con esa suposición, escribir y leer datos temporales es casi gratis en comparación con otros cálculos. Podríamos considerar una alternativa, que es fusionar muchas operaciones en la tubería, para hacer que la tubería sea lo más corta posible y eliminar gran parte de los datos temporales. Esto podría ser una ventaja, pero también tiene inconvenientes. Por ejemplo, una canalización dividida facilita la implementación de almacenamiento en caché de datos intermedios, el robo de datos intermedios de consultas similares que se ejecutan al mismo tiempo y la fusión de canalizaciones para consultas similares. +Debemos tener en cuenta que la canalización de ejecución de consultas crea datos temporales en cada paso. Tratamos de mantener el tamaño del bloque lo suficientemente pequeño para que los datos temporales se ajusten a la memoria caché de la CPU. Con esa suposición, escribir y leer datos temporales es casi gratis en comparación con otros cálculos. Podríamos considerar una alternativa, que es fusionar muchas operaciones en la tubería. Podría hacer que la tubería sea lo más corta posible y eliminar gran parte de los datos temporales, lo que podría ser una ventaja, pero también tiene inconvenientes. Por ejemplo, una canalización dividida facilita la implementación de almacenamiento en caché de datos intermedios, el robo de datos intermedios de consultas similares que se ejecutan al mismo tiempo y la fusión de canalizaciones para consultas similares. ## Formato {#formats} -Los formatos de datos se implementan con flujos de bloques. Heno “presentational” sólo es adecuado para la salida de datos al cliente, tales como `Pretty` formato, que proporciona sólo `IBlockOutputStream`. Y hay formatos de entrada / salida, como `TabSeparated` o `JSONEachRow`. +Los formatos de datos se implementan con flujos de bloques. Hay “presentational” sólo es adecuado para la salida de datos al cliente, tales como `Pretty` formato, que proporciona sólo `IBlockOutputStream`. Y hay formatos de entrada / salida, como `TabSeparated` o `JSONEachRow`. -También hay secuencias de filas: `IRowInputStream` y `IRowOutputStream`. Permiten pull/push datos por filas individuales, no por bloques. Y solo son necesarios para simplificar la implementación de formatos orientados a filas. Envoltura `BlockInputStreamFromRowInputStream` y `BlockOutputStreamFromRowOutputStream` seguros de que el usuario puede acceder a la información sobre cualquier tipo de información. +También hay secuencias de filas: `IRowInputStream` y `IRowOutputStream`. Permiten pull/push datos por filas individuales, no por bloques. Y solo son necesarios para simplificar la implementación de formatos orientados a filas. Envoltura `BlockInputStreamFromRowInputStream` y `BlockOutputStreamFromRowOutputStream` le permite convertir flujos orientados a filas en flujos regulares orientados a bloques. -## E/S {#io} +## I/O {#io} -Para la entrada / salida orientada a bytes, heno `ReadBuffer` y `WriteBuffer` clases abstractas. Se usan en lugar de C ++ `iostream`s. No se preocupe: cada proyecto maduro de C ++ está usando algo más que `iostream`s por buenas razones. +Para la entrada / salida orientada a bytes, hay `ReadBuffer` y `WriteBuffer` clases abstractas. Se usan en lugar de C ++ `iostream`s. No se preocupe: cada proyecto maduro de C ++ está usando algo más que `iostream`s por buenas razones. `ReadBuffer` y `WriteBuffer` son solo un búfer contiguo y un cursor apuntando a la posición en ese búfer. Las implementaciones pueden poseer o no la memoria del búfer. Hay un método virtual para llenar el búfer con los siguientes datos (para `ReadBuffer`) o para vaciar el búfer en algún lugar (para `WriteBuffer`). Los métodos virtuales rara vez se llaman. -Implementaciones de `ReadBuffer`/`WriteBuffer` se utilizan para trabajar con archivos y descriptores de archivos y sockets de red, para implementar la compresión (`CompressedWriteBuffer` se inicializa con otro WriteBuffer y realiza la compresión antes de escribir datos en él), y para otros fines: los nombres `ConcatReadBuffer`, `LimitReadBuffer`, y `HashingWriteBuffer` hablar por sí mismos. +Implementaciones de `ReadBuffer`/`WriteBuffer` se utilizan para trabajar con archivos y descriptores de archivos y sockets de red, para implementar la compresión (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, y `HashingWriteBuffer` hablar por sí mismos. -Read / WriteBuffers solo se ocupan de bytes. Para ayudar con la entrada / salida formateada (por ejemplo, para escribir un número en formato decimal), hay funciones de `ReadHelpers` y `WriteHelpers` archivos de encabezado. +Read / WriteBuffers solo se ocupan de bytes. Hay funciones de `ReadHelpers` y `WriteHelpers` archivos de encabezado para ayudar con el formato de entrada / salida. Por ejemplo, hay ayudantes para escribir un número en formato decimal. -Veamos qué sucede cuando quieres escribir un conjunto de resultados en `JSON` Formato una salida estándar. Tiene un conjunto de resultados listo para ser recuperado de `IBlockInputStream`. Bienvenidos `WriteBufferFromFileDescriptor(STDOUT_FILENO)` para escribir bytes en stdout. Bienvenidos `JSONRowOutputStream`, inicializado con eso `WriteBuffer` para escribir filas en `JSON` una salida estándar. Bienvenidos `BlockOutputStreamFromRowOutputStream` encima de él, para representarlo como `IBlockOutputStream`. Entonces usted llama `copyData` para transferir datos desde `IBlockInputStream` Naciones `IBlockOutputStream` y todo funciona. Internamente, `JSONRowOutputStream` escribirá varios delimitadores JSON y llamará al `IDataType::serializeTextJSON` con una referencia a `IColumn` y el número de fila como argumentos. Consecuentemente, `IDataType::serializeTextJSON` llamará a un método de `WriteHelpers.h`: por ejemplo, `writeText` para tipos numéricos y `writeJSONString` para `DataTypeString`. +Veamos qué sucede cuando quieres escribir un conjunto de resultados en `JSON` formato a stdout. Tiene un conjunto de resultados listo para ser recuperado de `IBlockInputStream`. Usted crea `WriteBufferFromFileDescriptor(STDOUT_FILENO)` para escribir bytes en stdout. Usted crea `JSONRowOutputStream`, inicializado con eso `WriteBuffer` para escribir filas en `JSON` a stdout. Usted crea `BlockOutputStreamFromRowOutputStream` encima de él, para representarlo como `IBlockOutputStream`. Entonces usted llama `copyData` para transferir datos desde `IBlockInputStream` a `IBlockOutputStream` y todo funciona. Internamente, `JSONRowOutputStream` escribirá varios delimitadores JSON y llamará al `IDataType::serializeTextJSON` con una referencia a `IColumn` y el número de fila como argumentos. Consecuentemente, `IDataType::serializeTextJSON` llamará a un método de `WriteHelpers.h`: por ejemplo, `writeText` para tipos numéricos y `writeJSONString` para `DataTypeString`. ## Tabla {#tables} -Las tablas están representadas por el `IStorage` interfaz. Las diferentes implementaciones de esa interfaz son diferentes motores de tabla. Los ejemplos hijo `StorageMergeTree`, `StorageMemory` y así sucesivamente. Las instancias de estas clases son solo tablas. +El `IStorage` interfaz representa tablas. Las diferentes implementaciones de esa interfaz son diferentes motores de tabla. Los ejemplos son `StorageMergeTree`, `StorageMemory` y así sucesivamente. Las instancias de estas clases son solo tablas. -El más importante `IStorage` hijo `read` y `write`. También hay `alter`, `rename`, `drop` y así sucesivamente. El `read` método acepta los siguientes argumentos: el conjunto de columnas para leer de una tabla, el `AST` consulta a considerar, y el número deseado de flujos para devolver. Devuelve uno o varios `IBlockInputStream` objetos e información sobre la etapa de procesamiento de datos que se completó dentro de un motor de tablas durante la ejecución de la consulta. +Clave `IStorage` son `read` y `write`. También hay `alter`, `rename`, `drop` y así sucesivamente. El `read` método acepta los siguientes argumentos: el conjunto de columnas para leer de una tabla, el `AST` consulta a considerar, y el número deseado de flujos para devolver. Devuelve uno o varios `IBlockInputStream` objetos e información sobre la etapa de procesamiento de datos que se completó dentro de un motor de tablas durante la ejecución de la consulta. En la mayoría de los casos, el método de lectura solo es responsable de leer las columnas especificadas de una tabla, no de ningún procesamiento de datos adicional. Todo el procesamiento de datos adicional es realizado por el intérprete de consultas y está fuera de la responsabilidad de `IStorage`. Pero hay excepciones notables: -- La consulta AST se pasa al `read` y el motor de tablas puede usarlo para derivar el uso del índice y leer menos datos de una tabla. +- La consulta AST se pasa al `read` método, y el motor de tablas puede usarlo para derivar el uso del índice y leer menos datos de una tabla. - A veces, el motor de tablas puede procesar los datos a una etapa específica. Por ejemplo, `StorageDistributed` puede enviar una consulta a servidores remotos, pedirles que procesen datos a una etapa donde se puedan fusionar datos de diferentes servidores remotos y devolver esos datos preprocesados. El intérprete de consultas termina de procesar los datos. -La mesa de `read` método puede devolver múltiples `IBlockInputStream` objetos para permitir el procesamiento de datos en paralelo. Estos flujos de entrada de bloques múltiples pueden leer de una tabla en paralelo. A continuación, puede ajustar estas secuencias con varias transformaciones (como la evaluación de expresiones o el filtrado) que se pueden calcular de forma independiente y crear un `UnionBlockInputStream` encima de ellos, para leer desde múltiples flujos en paralelo. +Tabla `read` método puede devolver múltiples `IBlockInputStream` objetos para permitir el procesamiento de datos en paralelo. Estos flujos de entrada de bloques múltiples pueden leer de una tabla en paralelo. A continuación, puede ajustar estas secuencias con varias transformaciones (como la evaluación de expresiones o el filtrado) que se pueden calcular de forma independiente y crear un `UnionBlockInputStream` encima de ellos, para leer desde múltiples flujos en paralelo. -También hay `TableFunction`Estas son funciones que devuelven un `IStorage` objeto a utilizar en el `FROM` cláusula de una consulta. +También hay `TableFunction`s. Estas son funciones que devuelven un `IStorage` objeto a utilizar en el `FROM` cláusula de una consulta. -Para tener una idea rápida de cómo implementar su propio motor de tablas, vea algo simple, como `StorageMemory` o `StorageTinyLog`. +Para tener una idea rápida de cómo implementar su motor de tabla, vea algo simple, como `StorageMemory` o `StorageTinyLog`. -> Como resultado de la `read` método, `IStorage` devoluciones `QueryProcessingStage` – información sobre qué partes de la consulta ya se han calculado dentro del almacenamiento. Actualmente, solo tenemos una granularidad muy gruesa para esa información. No hay forma de que el almacenamiento diga “I have already processed this part of the expression in WHERE, for this range of data”. Tenemos que trabajar en eso. +> Como resultado de la `read` método, `IStorage` devoluciones `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. ## Analizador {#parsers} -Una consulta es analizada por un analizador de descenso recursivo escrito a mano. Por ejemplo, `ParserSelectQuery` simplemente llama recursivamente a los analizadores subyacentes para varias partes de la consulta. Los analizadores crean un `AST`. El `AST` está representado por nodos, que son instancias de `IAST`. +Un analizador de descenso recursivo escrito a mano analiza una consulta. Por ejemplo, `ParserSelectQuery` simplemente llama recursivamente a los analizadores subyacentes para varias partes de la consulta. Los analizadores crean un `AST`. El `AST` está representado por nodos, que son instancias de `IAST`. > Los generadores de analizadores no se utilizan por razones históricas. ## Interprete {#interpreters} -Los intérpretes son responsables de crear la canalización de ejecución de consultas `AST`. Hay intérpretes simples, como `InterpreterExistsQuery` y `InterpreterDropQuery` o el más sofisticado `InterpreterSelectQuery`. La canalización de ejecución de consultas es una combinación de flujos de entrada o salida de bloques. Por ejemplo, el resultado de interpretar el `SELECT` la consulta es la `IBlockInputStream` para leer el conjunto de resultados; el resultado de la consulta INSERT es el `IBlockOutputStream` para escribir datos para su inserción, y el resultado de interpretar el `INSERT SELECT` la consulta es la `IBlockInputStream` que devuelve un conjunto de resultados vacío en la primera lectura, pero que copia datos de `SELECT` Naciones `INSERT` al mismo tiempo. +Los intérpretes son responsables de crear la canalización de ejecución de consultas `AST`. Hay intérpretes simples, como `InterpreterExistsQuery` y `InterpreterDropQuery` o el más sofisticado `InterpreterSelectQuery`. La canalización de ejecución de consultas es una combinación de flujos de entrada o salida de bloques. Por ejemplo, el resultado de interpretar el `SELECT` la consulta es la `IBlockInputStream` para leer el conjunto de resultados; el resultado de la consulta INSERT es el `IBlockOutputStream` para escribir datos para su inserción, y el resultado de interpretar el `INSERT SELECT` la consulta es la `IBlockInputStream` que devuelve un conjunto de resultados vacío en la primera lectura, pero que copia datos de `SELECT` a `INSERT` al mismo tiempo. -`InterpreterSelectQuery` utilizar `ExpressionAnalyzer` y `ExpressionActions` maquinaria para el análisis de consultas y transformaciones. Aquí es donde se realizan la mayoría de las optimizaciones de consultas basadas en reglas. `ExpressionAnalyzer` Se deben extraer varias transformaciones de consultas y optimizaciones para separar clases para permitir transformaciones modulares o consultas. +`InterpreterSelectQuery` utilizar `ExpressionAnalyzer` y `ExpressionActions` maquinaria para el análisis de consultas y transformaciones. Aquí es donde se realizan la mayoría de las optimizaciones de consultas basadas en reglas. `ExpressionAnalyzer` es bastante complicado y debe reescribirse: se deben extraer varias transformaciones de consultas y optimizaciones para separar clases para permitir transformaciones modulares o consultas. ## Función {#functions} Hay funciones ordinarias y funciones agregadas. Para las funciones agregadas, consulte la siguiente sección. -Las funciones ordinarias no cambian el número de filas; funcionan como si estuvieran procesando cada fila de forma independiente. De hecho, las funciones no se llaman para filas individuales, sino para `Block`de datos para implementar la ejecución de consultas vectorizadas. +Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`de datos para implementar la ejecución de consultas vectorizadas. -Hay algunas funciones diversas, como [BlockSize](../query_language/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock), y [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate), que explotan el procesamiento de bloques y violan la independencia de las filas. +Hay algunas funciones diversas, como [BlockSize](../sql_reference/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../sql_reference/functions/other_functions.md#function-rownumberinblock), y [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate), que explotan el procesamiento de bloques y violan la independencia de las filas. -ClickHouse tiene una tipificación fuerte, por lo que no se produce la conversión de tipo implícita. Si una función no admite una combinación específica de tipos, se lanzará una excepción. Pero las funciones pueden funcionar (estar sobrecargadas) para muchas combinaciones diferentes de tipos. Por ejemplo, el `plus` función (para implementar el `+` operador) funciona para cualquier combinación de tipos numéricos: `UInt8` + `Float32`, `UInt16` + `Int8` y así sucesivamente. Además, algunas funciones variadas pueden aceptar cualquier número de argumentos, como el `concat` función. +ClickHouse tiene una tipificación fuerte, por lo que no hay conversión de tipo implícita. Si una función no admite una combinación específica de tipos, produce una excepción. Pero las funciones pueden funcionar (estar sobrecargadas) para muchas combinaciones diferentes de tipos. Por ejemplo, el `plus` función (para implementar el `+` operador) funciona para cualquier combinación de tipos numéricos: `UInt8` + `Float32`, `UInt16` + `Int8` y así sucesivamente. Además, algunas funciones variadas pueden aceptar cualquier número de argumentos, como el `concat` función. -Implementar una función puede ser un poco inconveniente porque una función distribuye explícitamente tipos de datos compatibles y `IColumns`. Por ejemplo, el `plus` función tiene código generado por la creación de instancias de una plantilla de C ++ para cada combinación de tipos numéricos, y para argumentos izquierdo y derecho constantes o no constantes. +Implementar una función puede ser un poco inconveniente porque una función distribuye explícitamente tipos de datos compatibles y `IColumns`. Por ejemplo, el `plus` La función tiene código generado por la creación de instancias de una plantilla de C ++ para cada combinación de tipos numéricos y argumentos izquierdo y derecho constantes o no constantes. -> Este es un buen lugar para implementar la generación de código en tiempo de ejecución para evitar la hinchazón del código de la plantilla. Además, permitirá agregar funciones fusionadas como multiplicar-agregar fusionado o hacer comparaciones múltiples en una iteración de bucle. +Es un excelente lugar para implementar la generación de código en tiempo de ejecución para evitar la hinchazón del código de plantilla. Además, permite agregar funciones fusionadas como multiplicar-agregar fusionado o hacer comparaciones múltiples en una iteración de bucle. -Debido a la ejecución de consultas vectorizadas, las funciones no se cortocircuitan. Por ejemplo, si escribe `WHERE f(x) AND g(y)`, ambos lados se calcularán, incluso para las filas, cuando `f(x)` es cero (excepto cuando `f(x)` es una expresión constante cero). Pero si la selectividad del `f(x)` la condición es alta, y el cálculo de `f(x)` es mucho más barato que `g(y)`, es mejor implementar el cálculo de paso múltiple: primero calcular `f(x)`, a continuación, filtrar columnas por el resultado, y luego calcular `g(y)` solo para trozos de datos más pequeños y filtrados. +Debido a la ejecución de consultas vectorizadas, las funciones no se cortocircuitan. Por ejemplo, si escribe `WHERE f(x) AND g(y)`, ambos lados se calculan, incluso para las filas, cuando `f(x)` es cero (excepto cuando `f(x)` es una expresión constante cero). Pero si la selectividad del `f(x)` la condición es alta, y el cálculo de `f(x)` es mucho más barato que `g(y)`, es mejor implementar el cálculo de paso múltiple. Primero calcularía `f(x)`, a continuación, filtrar columnas por el resultado, y luego calcular `g(y)` solo para trozos de datos más pequeños y filtrados. ## Funciones agregadas {#aggregate-functions} -Las funciones agregadas son funciones con estado. Acumulan valores pasados en algún estado y le permiten obtener resultados de ese estado. Se gestionan con el `IAggregateFunction` interfaz. Los estados pueden ser bastante simples (el estado para `AggregateFunctionCount` es sólo una sola `UInt64` valor) o bastante complejo (el estado de `AggregateFunctionUniqCombined` es una combinación de una matriz lineal, una tabla hash y un `HyperLogLog` estructura de datos probabilística). +Las funciones agregadas son funciones con estado. Acumulan valores pasados en algún estado y le permiten obtener resultados de ese estado. Se gestionan con el `IAggregateFunction` interfaz. Los estados pueden ser bastante simples (el estado para `AggregateFunctionCount` es sólo una sola `UInt64` valor) o bastante complejo (el estado de `AggregateFunctionUniqCombined` es una combinación de una matriz lineal, una tabla hash, y un `HyperLogLog` estructura de datos probabilística). -Para tratar con múltiples estados mientras se ejecuta una alta cardinalidad `GROUP BY` consulta, los estados se asignan en `Arena` (un grupo de memoria), o podrían asignarse en cualquier pieza de memoria adecuada. Los estados pueden tener un constructor y destructor no triviales: por ejemplo, los estados de agregación complejos pueden asignar memoria adicional ellos mismos. Esto requiere cierta atención a la creación y destrucción de estados y pasar adecuadamente su propiedad, para realizar un seguimiento de quién y cuándo destruirá los estados. +Los Estados están asignados en `Arena` (un grupo de memoria) para tratar con múltiples estados mientras se ejecuta una alta cardinalidad `GROUP BY` consulta. Los estados pueden tener un constructor y destructor no trivial: por ejemplo, los estados de agregación complicados pueden asignar memoria adicional ellos mismos. Requiere cierta atención a la creación y destrucción de estados y a la adecuada aprobación de su orden de propiedad y destrucción. Los estados de agregación se pueden serializar y deserializar para pasar a través de la red durante la ejecución de consultas distribuidas o para escribirlos en el disco donde no hay suficiente RAM. Incluso se pueden almacenar en una tabla con el `DataTypeAggregateFunction` para permitir la agregación incremental de datos. -> El formato de datos serializados para los estados de función agregados no tiene versiones en este momento. Esto está bien si los estados agregados solo se almacenan temporalmente. Pero tenemos el `AggregatingMergeTree` motor de tabla para la agregación incremental, y la gente ya lo está utilizando en producción. Esta es la razón por la que deberíamos agregar compatibilidad con versiones anteriores al cambiar el formato serializado para cualquier función agregada en el futuro. +> El formato de datos serializados para los estados de función agregados no tiene versiones en este momento. Está bien si los estados agregados solo se almacenan temporalmente. Pero tenemos el `AggregatingMergeTree` motor de tabla para la agregación incremental, y la gente ya lo está utilizando en producción. Es la razón por la que se requiere compatibilidad con versiones anteriores al cambiar el formato serializado para cualquier función agregada en el futuro. ## Servidor {#server} @@ -147,52 +150,54 @@ El servidor implementa varias interfaces diferentes: - Una interfaz TCP para el cliente nativo de ClickHouse y para la comunicación entre servidores durante la ejecución de consultas distribuidas. - Una interfaz para transferir datos para la replicación. -Internamente, es solo un servidor multiproceso básico sin corutinas, fibras, etc. Dado que el servidor no está diseñado para procesar una alta tasa de consultas simples, sino que está destinado a procesar una tasa relativamente baja de consultas complejas, cada uno de ellos puede procesar una gran cantidad de datos para análisis. +Internamente, es solo un servidor multiproceso primitivo sin corutinas ni fibras. Dado que el servidor no está diseñado para procesar una alta tasa de consultas simples, sino para procesar una tasa relativamente baja de consultas complejas, cada uno de ellos puede procesar una gran cantidad de datos para análisis. -El servidor inicializa el `Context` clase con el entorno necesario para la ejecución de consultas: la lista de bases de datos disponibles, usuarios y derechos de acceso, configuración, clústeres, la lista de procesos, el registro de consultas, etc. Este entorno es utilizado por intérpretes. +El servidor inicializa el `Context` clase con el entorno necesario para la ejecución de consultas: la lista de bases de datos disponibles, usuarios y derechos de acceso, configuración, clústeres, la lista de procesos, el registro de consultas, etc. Los intérpretes utilizan este entorno. -Mantenemos una compatibilidad completa hacia atrás y hacia adelante para el protocolo TCP del servidor: los clientes antiguos pueden hablar con servidores nuevos y los nuevos clientes pueden hablar con servidores antiguos. Pero no queremos mantenerlo eternamente, y estamos eliminando el soporte para versiones antiguas después de aproximadamente un año. +Mantenemos una compatibilidad total con versiones anteriores y posteriores para el protocolo TCP del servidor: los clientes antiguos pueden hablar con servidores nuevos y los nuevos clientes pueden hablar con servidores antiguos. Pero no queremos mantenerlo eternamente, y estamos eliminando el soporte para versiones antiguas después de aproximadamente un año. -> Para todas las aplicaciones externas, recomendamos usar la interfaz HTTP porque es simple y fácil de usar. El protocolo TCP está más estrechamente vinculado a las estructuras de datos internas: utiliza un formato interno para pasar bloques de datos y utiliza marcos personalizados para datos comprimidos. No hemos lanzado una biblioteca C para ese protocolo porque requiere vincular la mayor parte de la base de código ClickHouse, lo cual no es práctico. +!!! note "Nota" + Para la mayoría de las aplicaciones externas, recomendamos usar la interfaz HTTP porque es simple y fácil de usar. El protocolo TCP está más estrechamente vinculado a las estructuras de datos internas: utiliza un formato interno para pasar bloques de datos y utiliza marcos personalizados para datos comprimidos. No hemos lanzado una biblioteca C para ese protocolo porque requiere vincular la mayor parte de la base de código ClickHouse, lo cual no es práctico. ## Ejecución de consultas distribuidas {#distributed-query-execution} -Los servidores de una configuración de clúster son en su mayoría independientes. Puede crear un `Distributed` en uno o todos los servidores de un clúster. El `Distributed` la tabla no almacena datos en sí misma; solo proporciona un “view” a todas las tablas locales en varios nodos de un clúster. Cuando se SELECCIONA desde un `Distributed` tabla, reescribe esa consulta, elige nodos remotos de acuerdo con la configuración de equilibrio de carga y les envía la consulta. El `Distributed` table solicita a los servidores remotos que procesen una consulta hasta una etapa en la que se pueden fusionar resultados intermedios de diferentes servidores. Luego recibe los resultados intermedios y los fusiona. La tabla distribuida intenta distribuir tanto trabajo como sea posible a servidores remotos y no envía muchos datos intermedios a través de la red. +Los servidores de una configuración de clúster son en su mayoría independientes. Puede crear un `Distributed` en uno o todos los servidores de un clúster. El `Distributed` table does not store data itself – it only provides a “view” a todas las tablas locales en varios nodos de un clúster. Cuando se SELECCIONA desde un `Distributed` tabla, reescribe esa consulta, elige nodos remotos de acuerdo con la configuración de equilibrio de carga y les envía la consulta. El `Distributed` table solicita a los servidores remotos que procesen una consulta hasta una etapa en la que se pueden fusionar resultados intermedios de diferentes servidores. Luego recibe los resultados intermedios y los fusiona. La tabla distribuida intenta distribuir tanto trabajo como sea posible a servidores remotos y no envía muchos datos intermedios a través de la red. -> Las cosas se vuelven más complicadas cuando tiene subconsultas en cláusulas IN o JOIN y cada una de ellas usa un `Distributed` tabla. Tenemos diferentes estrategias para la ejecución de estas consultas. +Las cosas se vuelven más complicadas cuando tiene subconsultas en cláusulas IN o JOIN, y cada una de ellas usa un `Distributed` tabla. Tenemos diferentes estrategias para la ejecución de estas consultas. -No existe un plan de consulta global para la ejecución de consultas distribuidas. Cada nodo tiene su propio plan de consulta local para su parte del trabajo. Solo tenemos una ejecución simple de consultas distribuidas de un solo paso: enviamos consultas para nodos remotos y luego fusionamos los resultados. Pero esto no es factible para consultas difíciles con alta cardinalidad GROUP BY o con una gran cantidad de datos temporales para JOIN: en tales casos, necesitamos “reshuffle” datos entre servidores, lo que requiere una coordinación adicional. ClickHouse no admite ese tipo de ejecución de consultas, y tenemos que trabajar en ello. +No existe un plan de consulta global para la ejecución de consultas distribuidas. Cada nodo tiene su plan de consulta local para su parte del trabajo. Solo tenemos una ejecución simple de consultas distribuidas de un solo paso: enviamos consultas para nodos remotos y luego fusionamos los resultados. Pero esto no es factible para consultas complicadas con alta cardinalidad GROUP BY o con una gran cantidad de datos temporales para JOIN. En tales casos, necesitamos “reshuffle” datos entre servidores, lo que requiere una coordinación adicional. ClickHouse no admite ese tipo de ejecución de consultas, y tenemos que trabajar en ello. ## Árbol de fusión {#merge-tree} -`MergeTree` es una familia de motores de almacenamiento que admite la indexación por clave principal. La clave principal puede ser una tupla arbitraria de columnas o expresiones. Datos en un `MergeTree` se almacena en “parts”. Cada parte almacena datos en el orden de clave primaria (los datos son ordenados lexicográficamente por la tupla de clave primaria). Todas las columnas de la tabla se almacenan en `column.bin` archivos en estas partes. Los archivos consisten en bloques comprimidos. Cada bloque suele ser de 64 KB a 1 MB de datos sin comprimir, dependiendo del tamaño del valor promedio. Los bloques constan de valores de columna colocados contiguamente uno tras otro. Los valores de columna están en el mismo orden para cada columna (el orden está definido por la clave principal), por lo que cuando itera por muchas columnas, obtiene valores para las filas correspondientes. +`MergeTree` es una familia de motores de almacenamiento que admite la indexación por clave principal. La clave principal puede ser una tupla arbitraria de columnas o expresiones. Datos en un `MergeTree` se almacena en “parts”. Cada parte almacena los datos en el orden de la clave principal, por lo que la tupla de la clave principal ordena los datos lexicográficamente. Todas las columnas de la tabla se almacenan en `column.bin` archivos en estas partes. Los archivos consisten en bloques comprimidos. Cada bloque suele ser de 64 KB a 1 MB de datos sin comprimir, dependiendo del tamaño del valor promedio. Los bloques constan de valores de columna colocados contiguamente uno tras otro. Los valores de columna están en el mismo orden para cada columna (la clave principal define el orden), por lo que cuando itera por muchas columnas, obtiene valores para las filas correspondientes. -La clave principal en sí es “sparse”. No aborda todas las filas, sino solo algunos rangos de datos. Separado `primary.idx` file tiene el valor de la clave principal para cada fila N-ésima, donde se llama N `index_granularity` (generalmente, N = 8192). Además, para cada columna, tenemos `column.mrk` Archivos con “marks,” que son desplazamientos a cada fila N-ésima en el archivo de datos. Cada marca es un par: el desplazamiento en el archivo al comienzo del bloque comprimido y el desplazamiento en el bloque descomprimido al comienzo de los datos. Por lo general, los bloques comprimidos están alineados por marcas, y el desplazamiento en el bloque descomprimido es cero. Datos para `primary.idx` siempre reside en la memoria y los datos para `column.mrk` archivos se almacena en caché. +La clave principal en sí es “sparse”. No aborda cada fila, sino solo algunos rangos de datos. Separado `primary.idx` file tiene el valor de la clave principal para cada fila N-ésima, donde se llama N `index_granularity` (generalmente, N = 8192). Además, para cada columna, tenemos `column.mrk` archivos con “marks,” que son desplazamientos a cada fila N-ésima en el archivo de datos. Cada marca es un par: el desplazamiento en el archivo al comienzo del bloque comprimido y el desplazamiento en el bloque descomprimido al comienzo de los datos. Por lo general, los bloques comprimidos están alineados por marcas, y el desplazamiento en el bloque descomprimido es cero. Datos para `primary.idx` siempre reside en la memoria, y los datos para `column.mrk` archivos se almacena en caché. -Cuando vamos a leer algo de una parte en `MergeTree` miramos `primary.idx` datos y localice rangos que posiblemente podrían contener datos solicitados, luego mire `column.mrk` datos y calcular compensaciones para dónde comenzar a leer esos rangos. Debido a la escasez, el exceso de datos puede ser leído. ClickHouse no es adecuado para una gran carga de consultas de puntos simples, porque todo el rango con `index_granularity` se deben leer filas para cada clave, y todo el bloque comprimido debe descomprimirse para cada columna. Hicimos que el índice sea disperso porque debemos poder mantener billones de filas por único servidor sin un consumo de memoria notable para el índice. Además, debido a que la clave principal es escasa, no es única: no puede verificar la existencia de la clave en la tabla en el momento de INSERTAR. Podría tener muchas filas con la misma clave en una tabla. +Cuando vamos a leer algo de una parte en `MergeTree` miramos `primary.idx` datos y localice rangos que podrían contener datos solicitados, luego mire `column.mrk` datos y calcular compensaciones para dónde comenzar a leer esos rangos. Debido a la escasez, el exceso de datos puede ser leído. ClickHouse no es adecuado para una gran carga de consultas de puntos simples, porque todo el rango con `index_granularity` se deben leer filas para cada clave, y todo el bloque comprimido debe descomprimirse para cada columna. Hicimos que el índice sea disperso porque debemos poder mantener billones de filas por único servidor sin un consumo de memoria notable para el índice. Además, debido a que la clave principal es escasa, no es única: no puede verificar la existencia de la clave en la tabla en el momento de INSERTAR. Podría tener muchas filas con la misma clave en una tabla. -Cuando `INSERT` un montón de datos en `MergeTree`, ese grupo está ordenado por orden de clave primaria y forma una nueva parte. Para mantener el número de partes relativamente bajo, hay subprocesos de fondo que seleccionan periódicamente algunas partes y las fusionan en una sola parte ordenada. Es por eso que se llama `MergeTree`. Por supuesto, la fusión conduce a “write amplification”. Todas las partes son inmutables: solo se crean y eliminan, pero no se modifican. Cuando se ejecuta SELECT, contiene una instantánea de la tabla (un conjunto de partes). Después de la fusión, también mantenemos las piezas viejas durante algún tiempo para facilitar la recuperación después de la falla, por lo que si vemos que alguna parte fusionada probablemente esté rota, podemos reemplazarla con sus partes fuente. +Cuando `INSERT` un montón de datos en `MergeTree`, ese grupo está ordenado por orden de clave primaria y forma una nueva parte. Hay subprocesos de fondo que seleccionan periódicamente algunas partes y las fusionan en una sola parte ordenada para mantener el número de partes relativamente bajo. Es por eso que se llama `MergeTree`. Por supuesto, la fusión conduce a “write amplification”. Todas las partes son inmutables: solo se crean y eliminan, pero no se modifican. Cuando se ejecuta SELECT, contiene una instantánea de la tabla (un conjunto de partes). Después de la fusión, también mantenemos las piezas viejas durante algún tiempo para facilitar la recuperación después de la falla, por lo que si vemos que alguna parte fusionada probablemente esté rota, podemos reemplazarla con sus partes de origen. -`MergeTree` no es un árbol de LSM porque no contiene “memtable” y “log”: los datos insertados se escriben directamente en el sistema de archivos. Esto lo hace adecuado solo para INSERTAR datos en lotes, no por fila individual y no con mucha frecuencia: aproximadamente una vez por segundo está bien, pero mil veces por segundo no lo está. Lo hicimos de esta manera por simplicidad, y porque ya estamos insertando datos en lotes en nuestras aplicaciones. +`MergeTree` no es un árbol de LSM porque no contiene “memtable” y “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. > Las tablas MergeTree solo pueden tener un índice (primario): no hay índices secundarios. Sería bueno permitir múltiples representaciones físicas bajo una tabla lógica, por ejemplo, para almacenar datos en más de un orden físico o incluso para permitir representaciones con datos preagregados junto con datos originales. -Hay motores MergeTree que están haciendo un trabajo adicional durante las fusiones en segundo plano. Los ejemplos son `CollapsingMergeTree` y `AggregatingMergeTree`. Esto podría tratarse como un soporte especial para actualizaciones. Tenga en cuenta que estas no son actualizaciones reales porque los usuarios generalmente no tienen control sobre el tiempo en que se ejecutarán las fusiones en segundo plano y los datos en un `MergeTree` casi siempre se almacena en más de una parte, no en forma completamente fusionada. +Hay motores MergeTree que están haciendo un trabajo adicional durante las fusiones en segundo plano. Los ejemplos son `CollapsingMergeTree` y `AggregatingMergeTree`. Esto podría tratarse como soporte especial para actualizaciones. Tenga en cuenta que estas no son actualizaciones reales porque los usuarios generalmente no tienen control sobre el tiempo en que se ejecutan las fusiones en segundo plano y los datos en un `MergeTree` casi siempre se almacena en más de una parte, no en forma completamente fusionada. ## Replicación {#replication} -La replicación en ClickHouse se implementa por tabla. Podría tener algunas tablas replicadas y otras no replicadas en el mismo servidor. También puede tener tablas replicadas de diferentes maneras, como una tabla con replicación de dos factores y otra con replicación de tres factores. +La replicación en ClickHouse se puede configurar por tabla. Podría tener algunas tablas replicadas y otras no replicadas en el mismo servidor. También puede tener tablas replicadas de diferentes maneras, como una tabla con replicación de dos factores y otra con replicación de tres factores. La replicación se implementa en el `ReplicatedMergeTree` motor de almacenamiento. El camino en `ZooKeeper` se especifica como un parámetro para el motor de almacenamiento. Todas las tablas con la misma ruta en `ZooKeeper` se convierten en réplicas entre sí: sincronizan sus datos y mantienen la coherencia. Las réplicas se pueden agregar y eliminar dinámicamente simplemente creando o soltando una tabla. -La replicación utiliza un esquema multi-maestro asíncrono. Puede insertar datos en cualquier réplica que tenga una sesión con `ZooKeeper`, y los datos se replican en todas las demás réplicas de forma asíncrona. Debido a que ClickHouse no admite UPDATE, la replicación está libre de conflictos. Como no hay reconocimiento de quórum de inserciones, los datos recién insertados pueden perderse si un nodo falla. +La replicación utiliza un esquema multi-maestro asíncrono. Puede insertar datos en cualquier réplica que tenga una sesión con `ZooKeeper`, y los datos se replican en todas las demás réplicas de forma asíncrona. Como ClickHouse no admite UPDATE, la replicación está libre de conflictos. Como no hay reconocimiento de quórum de inserciones, los datos recién insertados pueden perderse si un nodo falla. -Los metadatos para la replicación se almacenan en ZooKeeper. Hay un registro de replicación que enumera las acciones que se deben realizar. Las acciones son: obtener parte; fusionar partes; soltar una partición, etc. Cada réplica copia el registro de replicación en su cola y, a continuación, ejecuta las acciones desde la cola. Por ejemplo, en la inserción, el “get the part” la acción se crea en el registro y cada réplica descarga esa parte. Las fusiones se coordinan entre réplicas para obtener resultados idénticos en bytes. Todas las piezas se combinan de la misma manera en todas las réplicas. Para lograr esto, se elige una réplica como líder y esa réplica inicia fusiones y escrituras “merge parts” Acciones al registro. +Los metadatos para la replicación se almacenan en ZooKeeper. Hay un registro de replicación que enumera las acciones que se deben realizar. Las acciones son: obtener parte; fusionar partes; soltar una partición, etc. Cada réplica copia el registro de replicación en su cola y, a continuación, ejecuta las acciones desde la cola. Por ejemplo, en la inserción, el “get the part” la acción se crea en el registro y cada réplica descarga esa parte. Las fusiones se coordinan entre réplicas para obtener resultados idénticos en bytes. Todas las piezas se combinan de la misma manera en todas las réplicas. Se logra eligiendo una réplica como líder, y esa réplica inicia fusiones y escrituras “merge parts” acciones al registro. -La replicación es física: solo las partes comprimidas se transfieren entre nodos, no consultas. Para reducir el costo de la red (para evitar la amplificación de la red), las fusiones se procesan en cada réplica de forma independiente en la mayoría de los casos. Las piezas combinadas grandes se envían a través de la red solo en casos de retraso de replicación significativo. +La replicación es física: solo las partes comprimidas se transfieren entre nodos, no consultas. Las fusiones se procesan en cada réplica de forma independiente en la mayoría de los casos para reducir los costos de red al evitar la amplificación de la red. Las piezas combinadas grandes se envían a través de la red solo en casos de retraso de replicación significativo. Además, cada réplica almacena su estado en ZooKeeper como el conjunto de piezas y sus sumas de comprobación. Cuando el estado en el sistema de archivos local difiere del estado de referencia en ZooKeeper, la réplica restaura su coherencia descargando partes faltantes y rotas de otras réplicas. Cuando hay algunos datos inesperados o rotos en el sistema de archivos local, ClickHouse no los elimina, sino que los mueve a un directorio separado y los olvida. -> El clúster ClickHouse consta de fragmentos independientes y cada fragmento consta de réplicas. El clúster no es elástico, por lo que después de agregar un nuevo fragmento, los datos no se reequilibran automáticamente entre fragmentos. En su lugar, la carga del clúster será desigual. Esta implementación le da más control, y está bien para clústeres relativamente pequeños, como decenas de nodos. Pero para los clústeres con cientos de nodos que estamos utilizando en producción, este enfoque se convierte en un inconveniente significativo. Debemos implementar un motor de tablas que abarque sus datos en todo el clúster con regiones replicadas dinámicamente que podrían dividirse y equilibrarse entre clústeres automáticamente. +!!! note "Nota" + El clúster ClickHouse consta de fragmentos independientes y cada fragmento consta de réplicas. El clúster es **no elástico**, por lo tanto, después de agregar un nuevo fragmento, los datos no se reequilibran automáticamente entre fragmentos. En su lugar, se supone que la carga del clúster debe ajustarse para que sea desigual. Esta implementación le da más control, y está bien para clústeres relativamente pequeños, como decenas de nodos. Pero para los clústeres con cientos de nodos que estamos utilizando en producción, este enfoque se convierte en un inconveniente significativo. Debemos implementar un motor de tablas que abarque todo el clúster con regiones replicadas dinámicamente que puedan dividirse y equilibrarse entre clústeres automáticamente. -[Artículo Original](https://clickhouse.tech/docs/es/development/architecture/) +{## [Artículo Original](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/es/development/browse_code.md b/docs/es/development/browse_code.md index 96a1c4bb939..6bbc9d57729 100644 --- a/docs/es/development/browse_code.md +++ b/docs/es/development/browse_code.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 63 +toc_title: "Examinar el c\xF3digo fuente de ClickHouse" --- # Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code} diff --git a/docs/es/development/build.md b/docs/es/development/build.md index 0f93822d1e9..e7737dfaa8d 100644 --- a/docs/es/development/build.md +++ b/docs/es/development/build.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 64 +toc_title: "C\xF3mo crear ClickHouse en Linux" --- # Cómo construir ClickHouse para el desarrollo {#how-to-build-clickhouse-for-development} @@ -70,7 +73,7 @@ Esto creará el `programs/clickhouse` ejecutable, que se puede usar con `client` La compilación requiere los siguientes componentes: -- Git (se usa solo para pagar las fuentes, no es necesario para la compilación) +- Git (se usa solo para verificar las fuentes, no es necesario para la compilación) - CMake 3.10 o más reciente - Ninja (recomendado) o Hacer - Compilador de C ++: gcc 9 o clang 8 o más reciente @@ -105,7 +108,7 @@ Ejemplo de Fedora Rawhide: cmake ../ClickHouse make -j $(nproc) -# Usted no tiene que construir ClickHouse {#you-dont-have-to-build-clickhouse} +# No tienes que construir ClickHouse {#you-dont-have-to-build-clickhouse} ClickHouse está disponible en binarios y paquetes preconstruidos. Los binarios son portátiles y se pueden ejecutar en cualquier tipo de Linux. @@ -135,4 +138,4 @@ $ cd ClickHouse $ ./release ``` -[Artículo Original](https://clickhouse.tech/docs/es/development/build/) +[Artículo Original](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/es/development/build_cross_arm.md b/docs/es/development/build_cross_arm.md index c7e8da0d864..ac325f3718c 100644 --- a/docs/es/development/build_cross_arm.md +++ b/docs/es/development/build_cross_arm.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 67 +toc_title: "C\xF3mo construir ClickHouse en Linux para AARCH64 (ARM64)" --- # Cómo construir ClickHouse en Linux para la arquitectura AARCH64 (ARM64 {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} diff --git a/docs/es/development/build_cross_osx.md b/docs/es/development/build_cross_osx.md index 584b28caac7..b3679c2c794 100644 --- a/docs/es/development/build_cross_osx.md +++ b/docs/es/development/build_cross_osx.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 66 +toc_title: "C\xF3mo construir ClickHouse en Linux para Mac OS X" --- # Cómo construir ClickHouse en Linux para Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} @@ -20,7 +23,7 @@ sudo apt-get install clang-8 # Instalar conjunto de herramientas de compilación cruzada {#install-cross-compilation-toolset} -Recordemos la ruta donde instalamos `cctools` Como ${CCTOOLS} +Recordemos la ruta donde instalamos `cctools` como ${CCTOOLS} ``` bash mkdir ${CCTOOLS} diff --git a/docs/es/development/build_osx.md b/docs/es/development/build_osx.md index d311411930e..1af1bbd0fac 100644 --- a/docs/es/development/build_osx.md +++ b/docs/es/development/build_osx.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 65 +toc_title: "C\xF3mo crear ClickHouse en Mac OS X" --- # Cómo crear ClickHouse en Mac OS X {#how-to-build-clickhouse-on-mac-os-x} @@ -47,7 +50,7 @@ $ cd .. Si tiene la intención de ejecutar clickhouse-server, asegúrese de aumentar la variable maxfiles del sistema. !!! info "Nota" - Tendrá que utilizar sudo. + Tendrás que usar sudo. Para ello, cree el siguiente archivo: @@ -85,6 +88,6 @@ $ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist Reiniciar. -Para comprobar si está funcionando, puede usar `ulimit -n` comando. +Para verificar si está funcionando, puede usar `ulimit -n` comando. -[Artículo Original](https://clickhouse.tech/docs/es/development/build_osx/) +[Artículo Original](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/es/development/contrib.md b/docs/es/development/contrib.md index d15168b93dd..387dbee9120 100644 --- a/docs/es/development/contrib.md +++ b/docs/es/development/contrib.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 70 +toc_title: Bibliotecas de terceros utilizadas --- # Bibliotecas de terceros utilizadas {#third-party-libraries-used} @@ -7,28 +10,28 @@ machine_translated: true | Biblioteca | Licencia | |--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------| | base64 | [Licencia BSD de 2 cláusulas](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| Impulsar | [Licencia de software Boost 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| impulsar | [Licencia de software Boost 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | | Bienvenido | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | | capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | | Cctz | [Licencia Apache 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | | doble conversión | [Licencia de 3 cláusulas BSD](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | | FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | | Más información | [Licencia de 3 cláusulas BSD](https://github.com/google/googletest/blob/master/LICENSE) | -| Hombre | [Licencia Apache 2.0](https://github.com/uber/h3/blob/master/LICENSE) | +| H3 | [Licencia Apache 2.0](https://github.com/uber/h3/blob/master/LICENSE) | | hyperscan | [Licencia de 3 cláusulas BSD](https://github.com/intel/hyperscan/blob/master/LICENSE) | | libbtrie | [Licencia BSD de 2 cláusulas](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | -| libcxxabi | [Sistema abierto.](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | | libdivide | [Licencia Zlib](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | | libgsasl | [Información adicional](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | | libhdfs3 | [Licencia Apache 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | | libmetrohash | [Licencia Apache 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| Libpcg-al azar | [Licencia Apache 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libpcg-al azar | [Licencia Apache 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | | Libressl | [Licencia OpenSSL](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | | Librdkafka | [Licencia BSD de 2 cláusulas](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | | libwidechar\_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | | llvm | [Licencia de 3 cláusulas BSD](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | | lz4 | [Licencia BSD de 2 cláusulas](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| Mariadb-conector-c | [Información adicional](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| mariadb-conector-c | [Información adicional](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | | murmurhash | [Dominio público](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | | pdqsort | [Licencia Zlib](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | | Poco | [Boost Software License - Versión 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | diff --git a/docs/es/development/developer_instruction.md b/docs/es/development/developer_instruction.md index f0c84485c55..a9572ae0eef 100644 --- a/docs/es/development/developer_instruction.md +++ b/docs/es/development/developer_instruction.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 61 +toc_title: "La instrucci\xF3n para desarrolladores de ClickHouse para principiantes" --- La construcción de ClickHouse es compatible con Linux, FreeBSD y Mac OS X. # Si utiliza Windows {#if-you-use-windows} -Si usa Windows, necesita crear una máquina virtual con Ubuntu. Para comenzar a trabajar con una máquina virtual, instale VirtualBox. Puede descargar Ubuntu desde el sitio web: https://www.ubuntu.com/\#download. Por favor, cree una máquina virtual a partir de la imagen descargada (debe reservar al menos 4 GB de RAM para ello). Para ejecutar un terminal de línea de comandos en Ubuntu, busque un programa que contenga la palabra “terminal” es su nombre (gnome-terminal, konsole, etc.) o simplemente presione Ctrl + Alt + T. +Si usa Windows, necesita crear una máquina virtual con Ubuntu. Para comenzar a trabajar con una máquina virtual, instale VirtualBox. Puede descargar Ubuntu desde el sitio web: https://www.ubuntu.com/\#download. Por favor, cree una máquina virtual a partir de la imagen descargada (debe reservar al menos 4 GB de RAM para ello). Para ejecutar un terminal de línea de comandos en Ubuntu, busque un programa que contenga la palabra “terminal” en su nombre (gnome-terminal, konsole etc.) o simplemente presione Ctrl + Alt + T. # Si utiliza un sistema de 32 bits {#if-you-use-a-32-bit-system} @@ -16,11 +19,11 @@ ClickHouse no puede funcionar ni construir en un sistema de 32 bits. Debe adquir Para comenzar a trabajar con el repositorio de ClickHouse, necesitará una cuenta de GitHub. -Probablemente ya tenga uno, pero si no lo hace, regístrese en https://github.com. En caso de que no tenga claves SSH, debe generarlas y luego cargarlas en GitHub. Es necesario para enviar a través de sus parches. También es posible usar las mismas claves SSH que usa con cualquier otro servidor SSH, probablemente ya las tenga. +Probablemente ya tenga uno, pero si no lo hace, regístrese en https://github.com . En caso de que no tenga claves SSH, debe generarlas y luego cargarlas en GitHub. Es necesario para enviar a través de sus parches. También es posible usar las mismas claves SSH que usa con cualquier otro servidor SSH, probablemente ya las tenga. Cree una bifurcación del repositorio ClickHouse. Para hacerlo por favor haga clic en el “fork” botón en la esquina superior derecha en https://github.com/ClickHouse/ClickHouse . Se bifurcará su propia copia de ClickHouse/ClickHouse a su cuenta. -El proceso de desarrollo consiste en comprometer primero los cambios previstos en su bifurcación de ClickHouse y luego crear un “pull request” Para que estos cambios sean aceptados en el repositorio principal (ClickHouse / ClickHouse). +El proceso de desarrollo consiste en comprometer primero los cambios previstos en su bifurcación de ClickHouse y luego crear un “pull request” para que estos cambios sean aceptados en el repositorio principal (ClickHouse / ClickHouse). Para trabajar con repositorios git, instale `git`. @@ -30,7 +33,7 @@ Para hacer eso en Ubuntu, ejecutaría en la terminal de línea de comandos: sudo apt install git Puede encontrar un breve manual sobre el uso de Git aquí: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf . -Para obtener un manual detallado sobre Git, consulte https://git-scm.com/book/ru/v2 . +Para obtener un manual detallado sobre Git, consulte https://git-scm.com/book/en/v2 . # Clonación de un repositorio en su máquina de desarrollo {#cloning-a-repository-to-your-development-machine} @@ -47,7 +50,7 @@ Este comando creará un directorio `ClickHouse` que contiene la copia de trabajo Es importante que la ruta al directorio de trabajo no contenga espacios en blanco, ya que puede ocasionar problemas con la ejecución del sistema de compilación. -Tenga en cuenta que el repositorio ClickHouse utiliza `submodules`. Así es como se llaman las referencias a repositorios adicionales (es decir, bibliotecas externas de las que depende el proyecto). Significa que al clonar el repositorio debe especificar el `--recursive` como en el ejemplo anterior. Si el repositorio se ha clonado sin submódulos, para descargarlos debe ejecutar lo siguiente: +Tenga en cuenta que el repositorio ClickHouse utiliza `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` como en el ejemplo anterior. Si el repositorio se ha clonado sin submódulos, para descargarlos debe ejecutar lo siguiente: git submodule init git submodule update @@ -138,15 +141,15 @@ Las compilaciones oficiales de Yandex actualmente usan GCC porque genera código Para instalar GCC en Ubuntu, ejecute: `sudo apt install gcc g++` -Compruebe la versión de gcc: `gcc --version`. Si está por debajo de 9, siga las instrucciones aquí: https://clickhouse .tech/docs/es/development/build/\#install-gcc-9. +Compruebe la versión de gcc: `gcc --version`. Si está por debajo de 9, siga las instrucciones aquí: https://clickhouse .tech/docs/en/development/build/\#install-gcc-9. La compilación de Mac OS X solo es compatible con Clang. Sólo tiene que ejecutar `brew install llvm` -Si decide utilizar Clang, también puede instalar `libc++` y `lld` sabe lo que es. Utilizar `ccache` también se recomienda. +Si decide utilizar Clang, también puede instalar `libc++` y `lld` si usted sabe lo que es. Utilizar `ccache` también se recomienda. # El proceso de construcción {#the-building-process} -Ahora que está listo para construir ClickHouse, le recomendamos que cree un directorio separado `build` Dentro `ClickHouse` que contendrá todos los de la generación de artefactos: +Ahora que está listo para construir ClickHouse, le recomendamos que cree un directorio separado `build` dentro `ClickHouse` que contendrá todos los de la generación de artefactos: mkdir build cd build @@ -171,7 +174,7 @@ Para una construcción más rápida, puede recurrir al `debug` tipo de compilaci cmake -D CMAKE_BUILD_TYPE=Debug .. -Puede cambiar el tipo de compilación ejecutando este comando en el `build` Directorio. +Puede cambiar el tipo de compilación ejecutando este comando en el `build` directorio. Ejecutar ninja para construir: @@ -193,7 +196,7 @@ En máquinas con 4GB de RAM, se recomienda especificar 1, para 8GB de RAM `-j 2` Si recibe el mensaje: `ninja: error: loading 'build.ninja': No such file or directory`, significa que la generación de una configuración de compilación ha fallado y necesita inspeccionar el mensaje anterior. -Cuando se inicie correctamente el proceso de construcción, verá el progreso de construcción: el número de tareas procesadas y el número total de tareas. +Cuando se inicie correctamente el proceso de construcción, verá el progreso de la compilación: el número de tareas procesadas y el número total de tareas. Al crear mensajes sobre archivos protobuf en la biblioteca libhdfs2, como `libprotobuf WARNING` puede aparecer. Afectan a nada y son seguros para ser ignorado. @@ -221,7 +224,7 @@ Puede reemplazar la versión de producción del binario ClickHouse instalado en sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start -Tenga en cuenta que `clickhouse-client`, `clickhouse-server` y otros son enlaces simbólicos a los comúnmente compartidos `clickhouse` Binario. +Tenga en cuenta que `clickhouse-client`, `clickhouse-server` y otros son enlaces simbólicos a los comúnmente compartidos `clickhouse` binario. También puede ejecutar su binario ClickHouse personalizado con el archivo de configuración del paquete ClickHouse instalado en su sistema: @@ -242,11 +245,11 @@ Por si acaso, vale la pena mencionar que CLion crea `build` por sí mismo, tambi La descripción de la arquitectura ClickHouse se puede encontrar aquí: https://clickhouse.tech/docs/es/desarrollo/arquitectura/ -La Guía de estilo de código: https://clickhouse.tech/docs/es/development/style/ +La Guía de estilo de código: https://clickhouse.tech/docs/en/development/style/ -Pruebas de escritura: https://clickhouse.tech/docs/es/development/tests/ +Pruebas de escritura: https://clickhouse.tech/docs/en/development/tests/ -Lista de tareas: https://github.com/ClickHouse/ClickHouse/blob/master/tests/instructions/easy\_tasks\_sorted\_en.md +Lista de tareas: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md # Datos de prueba {#test-data} @@ -275,8 +278,8 @@ Navega a tu repositorio de fork en la interfaz de usuario de GitHub. Si ha estad Se puede crear una solicitud de extracción incluso si el trabajo aún no se ha completado. En este caso, por favor ponga la palabra “WIP” (trabajo en curso) al comienzo del título, se puede cambiar más tarde. Esto es útil para la revisión cooperativa y la discusión de los cambios, así como para ejecutar todas las pruebas disponibles. Es importante que proporcione una breve descripción de sus cambios, que más tarde se utilizará para generar registros de cambios de lanzamiento. -Las pruebas comenzarán tan pronto como los empleados de Yandex etiqueten su PR con una etiqueta “can be tested”. Los resultados de algunas primeras comprobaciones (por ejemplo, el estilo de código) llegarán en varios minutos. Los resultados de la comprobación de compilación llegarán dentro de media hora. Y el conjunto principal de pruebas se informará dentro de una hora. +Las pruebas comenzarán tan pronto como los empleados de Yandex etiqueten su PR con una etiqueta “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. -El sistema preparará compilaciones binarias ClickHouse para su solicitud de extracción individualmente. Para recuperar estas compilaciones, haga clic en “Details” junto al enlace “ClickHouse build check” en la lista de cheques. Allí encontrará enlaces directos a la construcción.deb paquetes de ClickHouse que puede implementar incluso en sus servidores de producción (si no tiene miedo). +El sistema preparará compilaciones binarias ClickHouse para su solicitud de extracción individualmente. Para recuperar estas compilaciones, haga clic en “Details” junto al link “ClickHouse build check” en la lista de cheques. Allí encontrará enlaces directos a la construcción.deb paquetes de ClickHouse que puede implementar incluso en sus servidores de producción (si no tiene miedo). Lo más probable es que algunas de las compilaciones fallen las primeras veces. Esto se debe al hecho de que verificamos las compilaciones tanto con gcc como con clang, con casi todas las advertencias existentes (siempre con el `-Werror` bandera) habilitado para sonido. En esa misma página, puede encontrar todos los registros de compilación para que no tenga que compilar ClickHouse de todas las formas posibles. diff --git a/docs/es/development/index.md b/docs/es/development/index.md index a905fc295b9..815a633071e 100644 --- a/docs/es/development/index.md +++ b/docs/es/development/index.md @@ -1,7 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Development +toc_hidden: true +toc_priority: 58 +toc_title: oculto --- # Desarrollo de ClickHouse {#clickhouse-development} -[Artículo Original](https://clickhouse.tech/docs/es/development/) +[Artículo Original](https://clickhouse.tech/docs/en/development/) diff --git a/docs/es/development/style.md b/docs/es/development/style.md index 2ba57bc0f63..77815c0204d 100644 --- a/docs/es/development/style.md +++ b/docs/es/development/style.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 68 +toc_title: "C\xF3mo escribir c\xF3digo C ++" --- # Cómo escribir código C ++ {#how-to-write-c-code} @@ -54,7 +57,7 @@ memcpy(&buf[place_value], &x, sizeof(x)); for (size_t i = 0; i < rows; i += storage.index_granularity) ``` -**7.** Agregar espacios alrededor de los operadores binarios (`+`, `-`, `*`, `/`, `%`, …) y el operador ternario `?:`. +**7.** Agregar espacios alrededor de los operadores binarios (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. ``` cpp UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -79,13 +82,13 @@ dst.ClickEventID = click.EventID; dst.ClickGoodEvent = click.GoodEvent; ``` -**10.** No utilice espacios alrededor de los operadores `.`, `->`. +**10.** No use espacios alrededor de los operadores `.`, `->`. Si es necesario, el operador se puede envolver a la siguiente línea. En este caso, el desplazamiento frente a él aumenta. -**11.** No utilice un espacio para separar los operadores unarios (`--`, `++`, `*`, `&`, …) del argumento. +**11.** No utilice un espacio para separar los operadores unarios (`--`, `++`, `*`, `&`, …) from the argument. -**12.** Pon un espacio después de una coma, pero no antes. La misma regla se aplica a un punto y coma dentro de un `for` expresión. +**12.** Pon un espacio después de una coma, pero no antes. La misma regla se aplica a un punto y coma dentro de un `for` expresion. **13.** No utilice espacios para separar el `[]` operador. @@ -97,7 +100,7 @@ struct AggregatedStatElement {} ``` -**15.** En clases y estructuras, escribe `public`, `private`, y `protected` es el mismo nivel que `class/struct`, y sangrar el resto del código. +**15.** En clases y estructuras, escribe `public`, `private`, y `protected` en el mismo nivel que `class/struct`, y sangrar el resto del código. ``` cpp template @@ -110,7 +113,7 @@ public: } ``` -**16.** Si el mismo `namespace` se utiliza para todo el archivo, y no hay nada más significativo, un desplazamiento no es necesario dentro `namespace`. +**16.** Si el mismo `namespace` se usa para todo el archivo, y no hay nada más significativo, no es necesario un desplazamiento dentro `namespace`. **17.** Si el bloque para un `if`, `for`, `while`, u otra expresión consiste en una sola `statement`, las llaves son opcionales. Coloque el `statement` en una línea separada, en su lugar. Esta regla también es válida para `if`, `for`, `while`, … @@ -194,7 +197,7 @@ std::cerr << static_cast(c) << std::endl; Lo mismo es cierto para los métodos pequeños en cualquier clase o estructura. -Para las clases y estructuras con plantillas, no separe las declaraciones de métodos de la implementación (porque de lo contrario deben definirse en la misma unidad de traducción). +Para clases y estructuras con plantillas, no separe las declaraciones de métodos de la implementación (porque de lo contrario deben definirse en la misma unidad de traducción). **31.** Puede ajustar líneas en 140 caracteres, en lugar de 80. @@ -295,7 +298,7 @@ Nota: Puede usar Doxygen para generar documentación a partir de estos comentari /// Why did you do this stuff? ``` -**16.** No hay necesidad de escribir un comentario al final de un bloque describiendo de qué se trataba. +**16.** No es necesario escribir un comentario al final de un bloque que describa de qué se trataba. ``` cpp /// for @@ -445,7 +448,7 @@ Utilice excepciones. En la mayoría de los casos, solo necesita lanzar una excep En las aplicaciones de procesamiento de datos fuera de línea, a menudo es aceptable no detectar excepciones. -En los servidores que manejan las solicitudes de los usuarios, suele ser suficiente para detectar excepciones en el nivel superior del controlador de conexión. +En los servidores que manejan las solicitudes de los usuarios, generalmente es suficiente detectar excepciones en el nivel superior del controlador de conexión. En las funciones de subproceso, debe capturar y mantener todas las excepciones para volver a lanzarlas en el subproceso principal después `join`. @@ -566,7 +569,7 @@ Utilizar `unsigned` si es necesario. Utilice los tipos `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, y `Int64`, así como `size_t`, `ssize_t`, y `ptrdiff_t`. -No utilice estos tipos para los números: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. +No use estos tipos para números: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. **13.** Pasando argumentos. @@ -602,17 +605,17 @@ Las bibliotecas pequeñas tampoco necesitan esto. Para bibliotecas medianas a grandes, coloque todo en un `namespace`. -En la biblioteca `.h` Archivo, se puede utilizar `namespace detail` para ocultar los detalles de implementación no necesarios para el código de la aplicación. +En la biblioteca `.h` archivo, se puede utilizar `namespace detail` para ocultar los detalles de implementación no necesarios para el código de la aplicación. En un `.cpp` archivo, puede usar un `static` o espacio de nombres anónimo para ocultar símbolos. -Además, un `namespace` Puede ser utilizado para un `enum` para evitar que los nombres correspondientes caigan en un `namespace` (pero es mejor usar un `enum class`). +Además, un `namespace` puede ser utilizado para un `enum` para evitar que los nombres correspondientes caigan en un `namespace` (pero es mejor usar un `enum class`). **16.** Inicialización diferida. Si se requieren argumentos para la inicialización, normalmente no debe escribir un constructor predeterminado. -Si más adelante necesitará retrasar la inicialización, puede agregar un constructor predeterminado que creará un objeto no válido. O, para un pequeño número de objetos, puede usar `shared_ptr/unique_ptr`. +Si más adelante tendrá que retrasar la inicialización, puede agregar un constructor predeterminado que creará un objeto no válido. O, para un pequeño número de objetos, puede usar `shared_ptr/unique_ptr`. ``` cpp Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); @@ -627,7 +630,7 @@ Si la clase no está destinada para uso polimórfico, no necesita hacer que las **18.** Codificación. -Usa UTF-8 en todas partes. Utilizar `std::string`y`char *`. No uso `std::wstring`y`wchar_t`. +Usa UTF-8 en todas partes. Utilizar `std::string`y`char *`. No use `std::wstring`y`wchar_t`. **19.** Tala. @@ -653,7 +656,7 @@ Utilice la codificación UTF-8 en el registro. En casos excepcionales, puede usa No utilice `iostreams` en ciclos internos que son críticos para el rendimiento de la aplicación (y nunca usan `stringstream`). -Descripción `DB/IO` biblioteca en su lugar. +Utilice el `DB/IO` biblioteca en su lugar. **21.** Fecha y hora. @@ -667,7 +670,7 @@ Utilice siempre `#pragma once` en lugar de incluir guardias. `using namespace` no se utiliza. Usted puede utilizar `using` con algo específico. Pero hazlo local dentro de una clase o función. -**24.** No uso `trailing return type` para funciones a menos que sea necesario. +**24.** No use `trailing return type` para funciones a menos que sea necesario. ``` cpp [auto f() -> void;]{.strike} @@ -684,7 +687,7 @@ std::string s{"Hello"}; auto s = std::string{"Hello"}; ``` -**26.** Para funciones virtuales, escriba `virtual` es la clase base, pero escribe `override` es lugar de `virtual` en las clases descendientes. +**26.** Para funciones virtuales, escriba `virtual` en la clase base, pero escribe `override` en lugar de `virtual` en las clases descendientes. ## Características no utilizadas de C ++ {#unused-features-of-c} @@ -698,7 +701,7 @@ auto s = std::string{"Hello"}; Pero en igualdad de condiciones, se prefiere el código multiplataforma o portátil. -**2.** Idioma: hacer ++ 17. +**2.** Idioma: C ++ 17. **3.** Compilación: `gcc`. En este momento (diciembre de 2017), el código se compila utilizando la versión 7.2. (También se puede compilar usando `clang 4`.) @@ -722,7 +725,7 @@ El conjunto de instrucciones de CPU es el conjunto mínimo admitido entre nuestr **2.** Para la depuración, use `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...`, o `tcmalloc_minimal_debug`. -**3.** Para crear perfiles, uso `Linux Perf`, `valgrind` (`callgrind`Ciudad `strace -cf`. +**3.** Para crear perfiles, use `Linux Perf`, `valgrind` (`callgrind`), o `strace -cf`. **4.** Las fuentes están en Git. @@ -754,7 +757,7 @@ Si ya hay una buena solución disponible, úsela, incluso si eso significa que d (Pero prepárese para eliminar las bibliotecas incorrectas del código.) -**3.** Puede instalar una biblioteca que no está en los paquetes, si los paquetes no tienen lo que necesita o tienen una versión obsoleta o el tipo incorrecto de compilación. +**3.** Puede instalar una biblioteca que no esté en los paquetes, si los paquetes no tienen lo que necesita o tienen una versión obsoleta o el tipo de compilación incorrecto. **4.** Si la biblioteca es pequeña y no tiene su propio sistema de compilación complejo, coloque los archivos `contrib` carpeta. @@ -778,17 +781,17 @@ Si ya hay una buena solución disponible, úsela, incluso si eso significa que d **1.** Especificar explícitamente `std::` para tipos de `stddef.h` -no se recomienda. En otras palabras, recomendamos escribir `size_t` es su lugar `std::size_t` porque es más corto. +no se recomienda. En otras palabras, recomendamos escribir `size_t` en su lugar `std::size_t` porque es más corto. Es aceptable agregar `std::`. **2.** Especificar explícitamente `std::` para funciones de la biblioteca C estándar -no se recomienda. En otras palabras, escribir `memcpy` es lugar de `std::memcpy`. +no se recomienda. En otras palabras, escribir `memcpy` en lugar de `std::memcpy`. La razón es que hay funciones no estándar similares, tales como `memmem`. Utilizamos estas funciones en ocasiones. Estas funciones no existen en `namespace std`. -Si usted escribe `std::memcpy` es lugar de `memcpy` es todas partes, entonces `memmem` pecado `std::` se verá extraño. +Si usted escribe `std::memcpy` en lugar de `memcpy` en todas partes, entonces `memmem` sin `std::` se verá extraño. Sin embargo, todavía puedes usar `std::` si lo prefieres. @@ -796,7 +799,7 @@ Sin embargo, todavía puedes usar `std::` si lo prefieres. Esto es aceptable si es más eficiente. -Por ejemplo, uso `memcpy` es lugar de `std::copy` para copiar grandes trozos de memoria. +Por ejemplo, use `memcpy` en lugar de `std::copy` para copiar grandes trozos de memoria. **4.** Argumentos de función multilínea. @@ -835,4 +838,4 @@ function( size_t limit) ``` -[Artículo Original](https://clickhouse.tech/docs/es/development/style/) +[Artículo Original](https://clickhouse.tech/docs/en/development/style/) diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md index 1126cead128..2d1996e0535 100644 --- a/docs/es/development/tests.md +++ b/docs/es/development/tests.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 69 +toc_title: "C\xF3mo ejecutar pruebas de ClickHouse" --- # Pruebas de ClickHouse {#clickhouse-testing} @@ -10,15 +13,15 @@ Las pruebas funcionales son las más simples y cómodas de usar. La mayoría de Cada prueba funcional envía una o varias consultas al servidor ClickHouse en ejecución y compara el resultado con la referencia. -Las pruebas se encuentran en `tests/queries` Directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. +Las pruebas se encuentran en `testsies` directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. -Cada prueba puede ser de dos tipos: `.sql` y `.sh`. `.sql` prueba es el script SQL simple que se canaliza a `clickhouse-client --multiquery --testmode`. `.sh` test es un script que se ejecuta por sí mismo. +Cada prueba puede ser de dos tipos: `.sql` y `.sh`. `.sql` test es el script SQL simple que se canaliza a `clickhouse-client --multiquery --testmode`. `.sh` test es un script que se ejecuta por sí mismo. -Para ejecutar todas las pruebas, use `tests/clickhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. +Para ejecutar todas las pruebas, use `testskhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. -La forma más sencilla de invocar pruebas funcionales es copiar `clickhouse-client` Naciones `/usr/bin/`, ejecutar `clickhouse-server` y luego ejecutar `./clickhouse-test` de su propio directorio. +La forma más sencilla de invocar pruebas funcionales es copiar `clickhouse-client` a `/usr/bin/`, ejecutar `clickhouse-server` y luego ejecutar `./clickhouse-test` de su propio directorio. -Atracciones cercanas al hotel `.sql` o `.sh` archivo en `tests/queries/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. +Para agregar una nueva prueba, cree un `.sql` o `.sh` archivo en `testsies/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. Las pruebas deben usar (crear, soltar, etc.) solo tablas en `test` base de datos que se supone que se crea de antemano; también las pruebas pueden usar tablas temporales. @@ -26,20 +29,20 @@ Si desea utilizar consultas distribuidas en pruebas funcionales, puede aprovecha Algunas pruebas están marcadas con `zookeeper`, `shard` o `long` en sus nombres. `zookeeper` es para pruebas que están usando ZooKeeper. `shard` es para pruebas que -Requiere servidor para escuchar `127.0.0.*`; `distributed` o `global` Tienen el mismo +requiere servidor para escuchar `127.0.0.*`; `distributed` o `global` tienen el mismo significado. `long` es para pruebas que duran un poco más de un segundo. Usted puede -Deshabilitar estos grupos de pruebas utilizando `--no-zookeeper`, `--no-shard` y +deshabilitar estos grupos de pruebas utilizando `--no-zookeeper`, `--no-shard` y `--no-long` opciones, respectivamente. -## Errores conocidos {#known-bugs} +## Bugs conocidos {#known-bugs} -Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `tests/queries/bugs` directorio. Estas pruebas se moverán a `tests/queries/0_stateless` cuando se corrigen errores. +Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `testsies/bugs` directorio. Estas pruebas se moverán a `teststests_stateless` cuando se corrigen errores. ## Pruebas de integración {#integration-tests} Las pruebas de integración permiten probar ClickHouse en la configuración agrupada y la interacción de ClickHouse con otros servidores como MySQL, Postgres, MongoDB. Son útiles para emular divisiones de red, caídas de paquetes, etc. Estas pruebas se ejecutan bajo Docker y crean múltiples contenedores con varios software. -Ver `tests/integration/README.md` sobre cómo ejecutar estas pruebas. +Ver `testsgration/README.md` sobre cómo ejecutar estas pruebas. Tenga en cuenta que la integración de ClickHouse con controladores de terceros no se ha probado. Además, actualmente no tenemos pruebas de integración con nuestros controladores JDBC y ODBC. @@ -59,11 +62,11 @@ Si desea mejorar el rendimiento de ClickHouse en algún escenario, y si se puede ## Herramientas de prueba y secuencias de comandos {#test-tools-and-scripts} -Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, párr `Lexer` hay una herramienta `src/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. +Algunos programas en `tests` directorio no son pruebas preparadas, pero son herramientas de prueba. Por ejemplo, para `Lexer` hay una herramienta `dbms/Parsers/tests/lexer` que solo hacen la tokenización de stdin y escriben el resultado coloreado en stdout. Puede usar este tipo de herramientas como ejemplos de código y para exploración y pruebas manuales. También puede colocar un par de archivos `.sh` y `.reference` junto con la herramienta para ejecutarlo en alguna entrada predefinida, entonces el resultado del script se puede comparar con `.reference` file. Este tipo de pruebas no están automatizadas. -## Pruebas misceláneas {#miscellanous-tests} +## Miscellanous Pruebas {#miscellanous-tests} Hay pruebas para diccionarios externos ubicados en `tests/external_dictionaries` y para modelos aprendidos a máquina en `tests/external_models`. Estas pruebas no se actualizan y deben transferirse a pruebas de integración. @@ -77,7 +80,7 @@ Cuando desarrolla una nueva característica, es razonable probarla también manu Construir ClickHouse. Ejecute ClickHouse desde el terminal: cambie el directorio a `programs/clickhouse-server` y ejecutarlo con `./clickhouse-server`. Se utilizará la configuración (`config.xml`, `users.xml` y archivos dentro de `config.d` y `users.d` directorios) desde el directorio actual de forma predeterminada. Para conectarse al servidor ClickHouse, ejecute `programs/clickhouse-client/clickhouse-client`. -Tenga en cuenta que todas las herramientas de clickhouse (servidor, cliente, etc.) son solo enlaces simbólicos a un único binario llamado `clickhouse`. Puede encontrar este binario en `programs/clickhouse`. Todas las herramientas también se pueden invocar como `clickhouse tool` es lugar de `clickhouse-tool`. +Tenga en cuenta que todas las herramientas de clickhouse (servidor, cliente, etc.) son solo enlaces simbólicos a un único binario llamado `clickhouse`. Puede encontrar este binario en `programs/clickhouse`. Todas las herramientas también se pueden invocar como `clickhouse tool` en lugar de `clickhouse-tool`. Alternativamente, puede instalar el paquete ClickHouse: ya sea una versión estable del repositorio de Yandex o puede crear un paquete para usted con `./release` en la raíz de fuentes de ClickHouse. Luego inicie el servidor con `sudo service clickhouse-server start` (o detener para detener el servidor). Busque registros en `/etc/clickhouse-server/clickhouse-server.log`. @@ -154,9 +157,9 @@ Normalmente lanzamos y ejecutamos todas las pruebas en una sola variante de comp - construir en FreeBSD; - construir en Debian con bibliotecas de paquetes del sistema; -- Construir con enlaces compartidos de bibliotecas; +- construir con enlaces compartidos de bibliotecas; - construir en la plataforma AArch64; -- Construir en la plataforma PowerPc. +- construir en la plataforma PowerPc. Por ejemplo, construir con paquetes del sistema es una mala práctica, porque no podemos garantizar qué versión exacta de paquetes tendrá un sistema. Pero esto es realmente necesario para los mantenedores de Debian. Por esta razón, al menos tenemos que admitir esta variante de construcción. Otro ejemplo: la vinculación compartida es una fuente común de problemas, pero es necesaria para algunos entusiastas. @@ -168,7 +171,7 @@ Cuando ampliamos el protocolo de red ClickHouse, probamos manualmente que el ant ## Ayuda del compilador {#help-from-the-compiler} -Código principal de ClickHouse (que se encuentra en `dbms` Directorio) se construye con `-Wall -Wextra -Werror` y con algunas advertencias habilitadas adicionales. Aunque estas opciones no están habilitadas para bibliotecas de terceros. +Código principal de ClickHouse (que se encuentra en `dbms` directorio) se construye con `-Wall -Wextra -Werror` y con algunas advertencias habilitadas adicionales. Aunque estas opciones no están habilitadas para bibliotecas de terceros. Clang tiene advertencias aún más útiles: puedes buscarlas con `-Weverything` y elige algo para la compilación predeterminada. @@ -180,7 +183,7 @@ Para las compilaciones de producción, se usa gcc (todavía genera un código li Ejecutamos pruebas funcionales y de integración bajo ASan por compromiso. **Valgrind (Memcheck)**. -Realizamos pruebas funcionales bajo Valgrind durante la noche. Se tarda varias horas. Actualmente hay un falso positivo conocido en `re2` Biblioteca, ver [este artículo](https://research.swtch.com/sparse). +Realizamos pruebas funcionales bajo Valgrind durante la noche. Se tarda varias horas. Actualmente hay un falso positivo conocido en `re2` biblioteca, ver [este artículo](https://research.swtch.com/sparse). **Desinfectante de comportamiento indefinido.** Ejecutamos pruebas funcionales y de integración bajo ASan por compromiso. @@ -196,7 +199,7 @@ Versión de depuración de `jemalloc` se utiliza para la compilación de depurac ## Fuzzing {#fuzzing} -Usamos una prueba de fuzz simple para generar consultas SQL aleatorias y para verificar que el servidor no muera. Las pruebas de pelusa se realizan con el desinfectante Address. Lo puedes encontrar en `00746_sql_fuzzy.pl`. Esta prueba debe ejecutarse de forma continua (de la noche a la mañana y más). +Usamos una prueba de fuzz simple para generar consultas SQL aleatorias y verificar que el servidor no muera. Las pruebas de pelusa se realizan con el desinfectante Address. Lo puedes encontrar en `00746_sql_fuzzy.pl`. Esta prueba debe ejecutarse de forma continua (de la noche a la mañana y más). A partir de diciembre de 2018, todavía no usamos pruebas de fuzz aisladas del código de la biblioteca. @@ -208,7 +211,7 @@ La gente del departamento de Yandex Cloud hace una visión general básica de la Corremos `PVS-Studio` por compromiso. Hemos evaluado `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Encontrará instrucciones de uso en `tests/instructions/` directorio. También puedes leer [el artículo en ruso](https://habr.com/company/yandex/blog/342018/). -Si usted estados unidos `CLion` Como IDE, puede aprovechar algunos `clang-tidy` comprueba fuera de la caja. +Si usted usa `CLion` como IDE, puede aprovechar algunos `clang-tidy` comprueba fuera de la caja. ## Endurecer {#hardening} @@ -216,7 +219,7 @@ Si usted estados unidos `CLion` Como IDE, puede aprovechar algunos `clang-tidy` ## Estilo de código {#code-style} -Se describen las reglas de estilo de código [aqui](https://clickhouse.tech/docs/es/development/style/). +Se describen las reglas de estilo de código [aqui](https://clickhouse.tech/docs/en/development/style/). Para comprobar si hay algunas violaciones de estilo comunes, puede usar `utils/check-style` script. @@ -234,7 +237,7 @@ Estas pruebas son automatizadas por un equipo separado. Debido a la gran cantida ## Cobertura de prueba {#test-coverage} -A partir de julio de 2018 no realizamos un seguimiento de la cobertura de las pruebas. +A partir de julio de 2018, no realizamos un seguimiento de la cobertura de las pruebas. ## Automatización de pruebas {#test-automation} @@ -245,4 +248,5 @@ Los trabajos de compilación y las pruebas se ejecutan en Sandbox por confirmaci No usamos Travis CI debido al límite de tiempo y potencia computacional. No usamos Jenkins. Se usó antes y ahora estamos felices de no estar usando Jenkins. -[Artículo Original](https://clickhouse.tech/docs/es/development/tests/) +[Artículo Original](https://clickhouse.tech/docs/en/development/tests/) +pruebas/) diff --git a/docs/es/engines/database_engines/index.md b/docs/es/engines/database_engines/index.md new file mode 100644 index 00000000000..6a06a9d6952 --- /dev/null +++ b/docs/es/engines/database_engines/index.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Database Engines +toc_priority: 27 +toc_title: "Implantaci\xF3n" +--- + +# Motores de base de datos {#database-engines} + +Los motores de bases de datos le permiten trabajar con tablas. + +De forma predeterminada, ClickHouse utiliza su motor de base de datos nativa, que proporciona [motores de mesa](../../engines/table_engines/index.md) y una [Dialecto SQL](../../sql_reference/syntax.md). + +También puede utilizar los siguientes motores de base de datos: + +- [MySQL](mysql.md) + +- [Perezoso](lazy.md) + +[Artículo Original](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/es/database_engines/lazy.md b/docs/es/engines/database_engines/lazy.md similarity index 53% rename from docs/es/database_engines/lazy.md rename to docs/es/engines/database_engines/lazy.md index 95f69695112..602a83df606 100644 --- a/docs/es/database_engines/lazy.md +++ b/docs/es/engines/database_engines/lazy.md @@ -1,15 +1,18 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 31 +toc_title: Perezoso --- # Perezoso {#lazy} Mantiene las tablas en RAM solamente `expiration_time_in_seconds` segundos después del último acceso. Solo se puede usar con tablas \*Log. -Está optimizado para almacenar muchas tablas pequeñas \* Log, para las cuales hay un intervalo de tiempo largo entre los accesos. +Está optimizado para almacenar muchas tablas pequeñas \* Log, para las cuales hay un largo intervalo de tiempo entre los accesos. ## Creación de una base de datos {#creating-a-database} -CREAR BASE DE DATOS testlazy ENGINE = Lazy(expiration\_time\_in\_seconds); + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); -[Artículo Original](https://clickhouse.tech/docs/es/database_engines/lazy/) +[Artículo Original](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/es/database_engines/mysql.md b/docs/es/engines/database_engines/mysql.md similarity index 51% rename from docs/es/database_engines/mysql.md rename to docs/es/engines/database_engines/mysql.md index 927eae65b77..18e1752e4da 100644 --- a/docs/es/database_engines/mysql.md +++ b/docs/es/engines/database_engines/mysql.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 30 +toc_title: MySQL --- -# MySQL {#mysql} +# Mysql {#mysql} -Permite conectarse a bases de datos en un servidor MySQL remoto y realizar `INSERT` y `SELECT` Consultas para intercambiar datos entre ClickHouse y MySQL. +Permite conectarse a bases de datos en un servidor MySQL remoto y realizar `INSERT` y `SELECT` consultas para intercambiar datos entre ClickHouse y MySQL. El `MySQL` motor de base de datos traducir consultas al servidor MySQL para que pueda realizar operaciones tales como `SHOW TABLES` o `SHOW CREATE TABLE`. @@ -23,32 +26,32 @@ ENGINE = MySQL('host:port', 'database', 'user', 'password') **Parámetros del motor** -- `host:port` — Dirección del servidor MySQL. -- `database` — Nombre de base de datos remota. -- `user` — Usuario de MySQL. -- `password` — Contraseña de usuario. +- `host:port` — MySQL server address. +- `database` — Remote database name. +- `user` — MySQL user. +- `password` — User password. ## Soporte de tipos de datos {#data_types-support} -| MySQL | Haga clic en Casa | -|-----------------------------------|---------------------------------------------| -| TINYINT NO FIRMADO | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| SMALLINT UNSIGNED | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNFIRED INT, MEDIUMINT NO FIRMADO | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| BIGINT NO FIRMADO | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOTANTE | [Float32](../data_types/float.md) | -| DOBLE | [Float64](../data_types/float.md) | -| FECHA | [Fecha](../data_types/date.md) | -| DATETIME, TIMESTAMP | [FechaHora](../data_types/datetime.md) | -| BINARIO | [Cadena fija](../data_types/fixedstring.md) | +| MySQL | Haga clic en Casa | +|----------------------------------|--------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [Fecha](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [FechaHora](../../sql_reference/data_types/datetime.md) | +| BINARY | [Cadena fija](../../sql_reference/data_types/fixedstring.md) | -Todos los demás tipos de datos MySQL se convierten en [Cadena](../data_types/string.md). +Todos los demás tipos de datos MySQL se convierten en [Cadena](../../sql_reference/data_types/string.md). -[NULO](../data_types/nullable.md) se admite. +[NULL](../../sql_reference/data_types/nullable.md) se admite. ## Ejemplos de uso {#examples-of-use} @@ -68,11 +71,11 @@ mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from mysql_table; -+--------+-------+ ++------+-----+ | int_id | value | -+--------+-------+ ++------+-----+ | 1 | 2 | -+--------+-------+ ++------+-----+ 1 row in set (0,00 sec) ``` @@ -129,4 +132,4 @@ SELECT * FROM mysql_db.mysql_table └────────┴───────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/database_engines/mysql/) +[Artículo Original](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/es/engines/index.md b/docs/es/engines/index.md new file mode 100644 index 00000000000..c2a7f33b49f --- /dev/null +++ b/docs/es/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Engines +toc_priority: 25 +--- + + diff --git a/docs/es/engines/table_engines/index.md b/docs/es/engines/table_engines/index.md new file mode 100644 index 00000000000..46d769316ba --- /dev/null +++ b/docs/es/engines/table_engines/index.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Table Engines +toc_priority: 26 +toc_title: "Implantaci\xF3n" +--- + +# Motores de mesa {#table_engines} + +El motor de tabla (tipo de tabla) determina: + +- Cómo y dónde se almacenan los datos, dónde escribirlos y dónde leerlos. +- Qué consultas son compatibles y cómo. +- Acceso a datos simultáneos. +- Uso de índices, si está presente. +- Si es posible la ejecución de solicitudes multiproceso. +- Parámetros de replicación de datos. + +## Familias de motores {#engine-families} + +### Mergetree {#mergetree} + +Los motores de mesa más universales y funcionales para tareas de alta carga. La propiedad compartida por estos motores es la inserción rápida de datos con el posterior procesamiento de datos en segundo plano. `MergeTree` Los motores familiares admiten la replicación de datos (con [Replicado\*](mergetree_family/replication.md) versiones de motores), particionamiento y otras características no admitidas en otros motores. + +Motores en la familia: + +- [Método de codificación de datos:](mergetree_family/mergetree.md) +- [ReplacingMergeTree](mergetree_family/replacingmergetree.md) +- [SummingMergeTree](mergetree_family/summingmergetree.md) +- [AgregaciónMergeTree](mergetree_family/aggregatingmergetree.md) +- [ColapsarMergeTree](mergetree_family/collapsingmergetree.md) +- [VersionedCollapsingMergeTree](mergetree_family/versionedcollapsingmergetree.md) +- [GraphiteMergeTree](mergetree_family/graphitemergetree.md) + +### Registro {#log} + +Ligero [motor](log_family/index.md) con funcionalidad mínima. Son los más efectivos cuando necesita escribir rápidamente muchas tablas pequeñas (hasta aproximadamente 1 millón de filas) y leerlas más tarde como un todo. + +Motores en la familia: + +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [Registro](log_family/log.md) + +### Motores de integración {#integration-engines} + +Motores para comunicarse con otros sistemas de almacenamiento y procesamiento de datos. + +Motores en la familia: + +- [Kafka](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) + +### Motores especiales {#special-engines} + +Motores en la familia: + +- [Distribuido](special/distributed.md) +- [Método de codificación de datos:](special/materializedview.md) +- [Diccionario](special/dictionary.md) +- [Fusionar](special/merge.md) +- [File](special/file.md) +- [Nulo](special/null.md) +- [Establecer](special/set.md) +- [Unir](special/join.md) +- [URL](special/url.md) +- [Vista](special/view.md) +- [Memoria](special/memory.md) +- [Búfer](special/buffer.md) + +## Virtual Columnas {#table_engines-virtual-columns} + +La columna virtual es un atributo de motor de tabla integral que se define en el código fuente del motor. + +No debe especificar columnas virtuales en el `CREATE TABLE` consulta y no puedes verlos en `SHOW CREATE TABLE` y `DESCRIBE TABLE` resultados de la consulta. Las columnas virtuales también son de solo lectura, por lo que no puede insertar datos en columnas virtuales. + +Para seleccionar datos de una columna virtual, debe especificar su nombre en el `SELECT` consulta. `SELECT *` no devuelve valores de columnas virtuales. + +Si crea una tabla con una columna que tiene el mismo nombre que una de las columnas virtuales de la tabla, la columna virtual se vuelve inaccesible. No recomendamos hacer esto. Para ayudar a evitar conflictos, los nombres de columna virtual suelen tener el prefijo de un guión bajo. + +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/es/operations/table_engines/hdfs.md b/docs/es/engines/table_engines/integrations/hdfs.md similarity index 75% rename from docs/es/operations/table_engines/hdfs.md rename to docs/es/engines/table_engines/integrations/hdfs.md index 48078611bb0..eb41cdbc91b 100644 --- a/docs/es/operations/table_engines/hdfs.md +++ b/docs/es/engines/table_engines/integrations/hdfs.md @@ -1,11 +1,14 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: HDFS --- # HDFS {#table_engines-hdfs} Este motor proporciona integración con [Acerca de nosotros](https://en.wikipedia.org/wiki/Apache_Hadoop) permitiendo gestionar datos sobre [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)a través de ClickHouse. Este motor es similar -Angeles [File](file.md) y [URL](url.md) motores, pero proporciona características específicas de Hadoop. +a la [File](../special/file.md) y [URL](../special/url.md) motores, pero proporciona características específicas de Hadoop. ## Uso {#usage} @@ -16,8 +19,8 @@ ENGINE = HDFS(URI, format) El `URI` El parámetro es el URI del archivo completo en HDFS. El `format` parámetro especifica uno de los formatos de archivo disponibles. Realizar `SELECT` consultas, el formato debe ser compatible para la entrada, y para realizar -`INSERT` consultas – para la salida. Los formatos disponibles se enumeran en el -[Formato](../../interfaces/formats.md#formats) apartado. +`INSERT` queries – for output. The available formats are listed in the +[Formato](../../../interfaces/formats.md#formats) apartado. La parte de la ruta de `URI` puede contener globs. En este caso, la tabla sería de solo lectura. **Ejemplo:** @@ -59,12 +62,12 @@ SELECT * FROM hdfs_engine_table LIMIT 2 Múltiples componentes de ruta de acceso pueden tener globs. Para ser procesado, el archivo debe existir y coincidir con todo el patrón de ruta. Listado de archivos determina durante `SELECT` (no en `CREATE` momento). -- `*` — Sustituye cualquier número de caracteres excepto `/` incluyendo cadena vacía. -- `?` — Sustituye a cualquier carácter individual. -- `{some_string,another_string,yet_another_one}` — Sustituye cualquiera de las cadenas `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Sustituye cualquier número en el intervalo de N a M, incluidas ambas fronteras. +- `*` — Substitutes any number of any characters except `/` incluyendo cadena vacía. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. -Construcciones con `{}` hijo similares a la [remoto](../../query_language/table_functions/remote.md) función de la tabla. +Construcciones con `{}` son similares a la [remoto](../../../sql_reference/table_functions/remote.md) función de la tabla. **Ejemplo** @@ -108,13 +111,13 @@ Crear tabla con archivos llamados `file000`, `file001`, … , `file999`: CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') ``` -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_path` — Ruta de acceso al archivo. -- `_file` — Nombre del expediente. +- `_path` — Path to the file. +- `_file` — Name of the file. **Ver también** -- [Columnas virtuales](https://clickhouse.tech/docs/es/operations/table_engines/#table_engines-virtual_columns) +- [Virtual columnas](../index.md#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/hdfs/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/es/engines/table_engines/integrations/index.md b/docs/es/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..4145d8b3d7e --- /dev/null +++ b/docs/es/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Integrations +toc_priority: 30 +--- + + diff --git a/docs/es/operations/table_engines/jdbc.md b/docs/es/engines/table_engines/integrations/jdbc.md similarity index 74% rename from docs/es/operations/table_engines/jdbc.md rename to docs/es/engines/table_engines/integrations/jdbc.md index 7d6d5649df6..ad37b26f357 100644 --- a/docs/es/operations/table_engines/jdbc.md +++ b/docs/es/engines/table_engines/integrations/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 34 +toc_title: JDBC --- # JDBC {#table-engine-jdbc} @@ -8,7 +11,7 @@ Permite que ClickHouse se conecte a bases de datos externas a través de [JDBC]( Para implementar la conexión JDBC, ClickHouse utiliza el programa independiente [Sistema abierto.](https://github.com/alex-krash/clickhouse-jdbc-bridge) que debería ejecutarse como un demonio. -Este motor soporta el [NULO](../../data_types/nullable.md) tipo de datos. +Este motor soporta el [NULL](../../../sql_reference/data_types/nullable.md) tipo de datos. ## Creación de una tabla {#creating-a-table} @@ -22,18 +25,18 @@ ENGINE = JDBC(dbms_uri, external_database, external_table) **Parámetros del motor** -- `dbms_uri` — URI de un DBMS externo. +- `dbms_uri` — URI of an external DBMS. Formato: `jdbc:://:/?user=&password=`. Ejemplo para MySQL: `jdbc:mysql://localhost:3306/?user=root&password=root`. -- `external_database` — Base de datos en un DBMS externo. +- `external_database` — Database in an external DBMS. -- `external_table` — Nombre de la tabla en `external_database`. +- `external_table` — Name of the table in `external_database`. ## Ejemplo de uso {#usage-example} -Creación de una tabla en el servidor MySQL mediante la conexión directa con su cliente de consola: +Crear una tabla en el servidor MySQL conectándose directamente con su cliente de consola: ``` text mysql> CREATE TABLE `test`.`test` ( @@ -48,11 +51,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -82,6 +85,6 @@ FROM jdbc_table ## Ver también {#see-also} -- [Función de la tabla de JDBC](../../query_language/table_functions/jdbc.md). +- [Función de la tabla de JDBC](../../../sql_reference/table_functions/jdbc.md). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/jdbc/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/es/operations/table_engines/kafka.md b/docs/es/engines/table_engines/integrations/kafka.md similarity index 71% rename from docs/es/operations/table_engines/kafka.md rename to docs/es/engines/table_engines/integrations/kafka.md index 40bd3ce2578..eda6e626ba4 100644 --- a/docs/es/operations/table_engines/kafka.md +++ b/docs/es/engines/table_engines/integrations/kafka.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 32 +toc_title: Kafka --- # Kafka {#kafka} @@ -34,17 +37,17 @@ SETTINGS Parámetros requeridos: -- `kafka_broker_list` – Una lista separada por comas de corredores (por ejemplo, `localhost:9092`). -- `kafka_topic_list` – Una lista de temas Kafka. -- `kafka_group_name` – Un grupo de consumidores Kafka. Los márgenes de lectura se rastrean para cada grupo por separado. Si no desea que los mensajes se dupliquen en el clúster, utilice el mismo nombre de grupo en todas partes. -- `kafka_format` – Formato de mensaje. Utiliza la misma notación que el SQL `FORMAT` función, tal como `JSONEachRow`. Para obtener más información, consulte [Formato](../../interfaces/formats.md) apartado. +- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). +- `kafka_topic_list` – A list of Kafka topics. +- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` función, tal como `JSONEachRow`. Para obtener más información, consulte [Formato](../../../interfaces/formats.md) apartado. Parámetros opcionales: -- `kafka_row_delimiter` – Carácter delimitador, que termina el mensaje. -- `kafka_schema` – Parámetro que debe utilizarse si el formato requiere una definición de esquema. Por ejemplo, [Cap'n Proto](https://capnproto.org/) requiere la ruta de acceso al archivo de esquema y el nombre de la raíz `schema.capnp:Message` objeto. -- `kafka_num_consumers` – El número de consumidores por tabla. Predeterminado: `1`. Especifique más consumidores si el rendimiento de un consumidor es insuficiente. El número total de consumidores no debe exceder el número de particiones en el tema, ya que solo se puede asignar un consumidor por partición. -- `kafka_skip_broken_messages` – Tolerancia analizador de mensajes Kafka a los mensajes incompatibles con el esquema por bloque. Predeterminado: `0`. Si `kafka_skip_broken_messages = N` Entonces el motor salta *Y* Mensajes de Kafka que no se pueden analizar (un mensaje es igual a una fila de datos). +- `kafka_row_delimiter` – Delimiter character, which ends the message. +- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) requiere la ruta de acceso al archivo de esquema y el nombre de la raíz `schema.capnp:Message` objeto. +- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Especifique más consumidores si el rendimiento de un consumidor es insuficiente. El número total de consumidores no debe exceder el número de particiones en el tema, ya que solo se puede asignar un consumidor por partición. +- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. Si `kafka_skip_broken_messages = N` entonces el motor salta *N* Mensajes de Kafka que no se pueden analizar (un mensaje es igual a una fila de datos). Ejemplos: @@ -90,7 +93,7 @@ Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format -## Descripción {#description} +## Descripci {#description} Los mensajes entregados se realizan un seguimiento automático, por lo que cada mensaje de un grupo solo se cuenta una vez. Si desea obtener los datos dos veces, cree una copia de la tabla con otro nombre de grupo. @@ -127,7 +130,7 @@ Ejemplo: SELECT level, sum(total) FROM daily GROUP BY level; ``` -Para mejorar el rendimiento, los mensajes recibidos se agrupan en bloques del tamaño de [Max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size). Si el bloque no se formó dentro [Nombre de la red inalámbrica (SSID):](../settings/settings.md) milisegundos, los datos se vaciarán a la tabla independientemente de la integridad del bloque. +Para mejorar el rendimiento, los mensajes recibidos se agrupan en bloques del tamaño de [Max\_insert\_block\_size](../../../operations/server_configuration_parameters/settings.md#settings-max_insert_block_size). Si el bloque no se formó dentro de [Nombre de la red inalámbrica (SSID):](../../../operations/server_configuration_parameters/settings.md) milisegundos, los datos se vaciarán a la tabla independientemente de la integridad del bloque. Para detener la recepción de datos de tema o cambiar la lógica de conversión, desconecte la vista materializada: @@ -158,16 +161,16 @@ Similar a GraphiteMergeTree, el motor Kafka admite una configuración extendida Para obtener una lista de posibles opciones de configuración, consulte [referencia de configuración librdkafka](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Usa el guión bajo (`_`) en lugar de un punto en la configuración de ClickHouse. Por ejemplo, `check.crcs=true` será `true`. -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_topic` — El tema de Kafka. -- `_key` — Clave del mensaje. -- `_offset` — Desplazamiento del mensaje. -- `_timestamp` — Marca de tiempo del mensaje. -- `_partition` — Partición de Kafka tema. +- `_topic` — Kafka topic. +- `_key` — Key of the message. +- `_offset` — Offset of the message. +- `_timestamp` — Timestamp of the message. +- `_partition` — Partition of Kafka topic. **Ver también** -- [Columnas virtuales](index.md#table_engines-virtual_columns) +- [Virtual columnas](../index.md#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/kafka/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/es/operations/table_engines/mysql.md b/docs/es/engines/table_engines/integrations/mysql.md similarity index 61% rename from docs/es/operations/table_engines/mysql.md rename to docs/es/engines/table_engines/integrations/mysql.md index bb7d3016221..6d78036fbdc 100644 --- a/docs/es/operations/table_engines/mysql.md +++ b/docs/es/engines/table_engines/integrations/mysql.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 33 +toc_title: MySQL --- -# MySQL {#mysql} +# Mysql {#mysql} -El motor MySQL le permite realizar `SELECT` Consultas sobre datos almacenados en un servidor MySQL remoto. +El motor MySQL le permite realizar `SELECT` consultas sobre datos almacenados en un servidor MySQL remoto. ## Creación de una tabla {#creating-a-table} @@ -17,32 +20,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` -Vea una descripción detallada del [CREAR TABLA](../../query_language/create.md#create-table-query) consulta. +Vea una descripción detallada del [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) consulta. La estructura de la tabla puede diferir de la estructura de la tabla MySQL original: - Los nombres de columna deben ser los mismos que en la tabla MySQL original, pero puede usar solo algunas de estas columnas y en cualquier orden. -- Los tipos de columna pueden diferir de los de la tabla MySQL original. Haga clic en Casa intenta [elenco](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. +- Los tipos de columna pueden diferir de los de la tabla MySQL original. ClickHouse intenta [elenco](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. **Parámetros del motor** -- `host:port` — Dirección del servidor MySQL. +- `host:port` — MySQL server address. -- `database` — Nombre de base de datos remota. +- `database` — Remote database name. -- `table` — Nombre de la tabla remota. +- `table` — Remote table name. -- `user` — Usuario de MySQL. +- `user` — MySQL user. -- `password` — Contraseña de usuario. +- `password` — User password. -- `replace_query` — Bandera que convierte `INSERT INTO` Consultas a `REPLACE INTO`. Si `replace_query=1`, la consulta se sustituye. +- `replace_query` — Flag that converts `INSERT INTO` consultas a `REPLACE INTO`. Si `replace_query=1`, la consulta se sustituye. -- `on_duplicate_clause` — El `ON DUPLICATE KEY on_duplicate_clause` expresión que se añade a la `INSERT` consulta. +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expresión que se añade a la `INSERT` consulta. Ejemplo: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, donde `on_duplicate_clause` ser `UPDATE c2 = c2 + 1`. Ver el [Documentación de MySQL](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) para encontrar qué `on_duplicate_clause` se puede utilizar con el `ON DUPLICATE KEY` clausula. - Especificar `on_duplicate_clause` tienes que pasar `0` Angeles `replace_query` parámetro. Si pasa simultáneamente `replace_query = 1` y `on_duplicate_clause`, ClickHouse genera una excepción. + Especificar `on_duplicate_clause` tienes que pasar `0` a la `replace_query` parámetro. Si pasa simultáneamente `replace_query = 1` y `on_duplicate_clause`, ClickHouse genera una excepción. Simple `WHERE` cláusulas tales como `=, !=, >, >=, <, <=` se ejecutan en el servidor MySQL. @@ -65,11 +68,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -96,7 +99,7 @@ SELECT * FROM mysql_table ## Ver también {#see-also} -- [El ‘mysql’ función de la tabla](../../query_language/table_functions/mysql.md) -- [Uso de MySQL como fuente de diccionario externo](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [El ‘mysql’ función de la tabla](../../../sql_reference/table_functions/mysql.md) +- [Uso de MySQL como fuente de diccionario externo](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/mysql/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/es/operations/table_engines/odbc.md b/docs/es/engines/table_engines/integrations/odbc.md similarity index 74% rename from docs/es/operations/table_engines/odbc.md rename to docs/es/engines/table_engines/integrations/odbc.md index 765d6b814aa..4b26cca9146 100644 --- a/docs/es/operations/table_engines/odbc.md +++ b/docs/es/engines/table_engines/integrations/odbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 35 +toc_title: ODBC --- # ODBC {#table-engine-odbc} @@ -8,7 +11,7 @@ Permite que ClickHouse se conecte a bases de datos externas a través de [ODBC]( Para implementar con seguridad conexiones ODBC, ClickHouse usa un programa separado `clickhouse-odbc-bridge`. Si el controlador ODBC se carga directamente desde `clickhouse-server`, problemas de controlador pueden bloquear el servidor ClickHouse. ClickHouse se inicia automáticamente `clickhouse-odbc-bridge` cuando se requiere. El programa de puente ODBC se instala desde el mismo paquete que el `clickhouse-server`. -Este motor soporta el [NULO](../../data_types/nullable.md) tipo de datos. +Este motor soporta el [NULL](../../../sql_reference/data_types/nullable.md) tipo de datos. ## Creación de una tabla {#creating-a-table} @@ -22,18 +25,18 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ENGINE = ODBC(connection_settings, external_database, external_table) ``` -Vea una descripción detallada del [CREAR TABLA](../../query_language/create.md#create-table-query) consulta. +Vea una descripción detallada del [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) consulta. La estructura de la tabla puede diferir de la estructura de la tabla de origen: - Los nombres de columna deben ser los mismos que en la tabla de origen, pero puede usar solo algunas de estas columnas y en cualquier orden. -- Los tipos de columna pueden diferir de los de la tabla de origen. Haga clic en Casa intenta [elenco](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. +- Los tipos de columna pueden diferir de los de la tabla de origen. ClickHouse intenta [elenco](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) valores a los tipos de datos ClickHouse. **Parámetros del motor** -- `connection_settings` — Nombre de la sección con ajustes de conexión en el `odbc.ini` file. -- `external_database` — Nombre de una base de datos en un DBMS externo. -- `external_table` — Nombre de una tabla en el `external_database`. +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. ## Ejemplo de uso {#usage-example} @@ -71,7 +74,7 @@ Puede verificar la conexión usando el `isql` utilidad desde la instalación de ``` bash $ isql -v mysqlconn -+---------------------------------------+ ++-------------------------+ | Connected! | | | ... @@ -92,11 +95,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -123,7 +126,7 @@ SELECT * FROM odbc_t ## Ver también {#see-also} -- [Diccionarios externos ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Tabla ODBC función](../../query_language/table_functions/odbc.md) +- [Diccionarios externos ODBC](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [Tabla ODBC función](../../../sql_reference/table_functions/odbc.md) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/odbc/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/es/engines/table_engines/log_family/index.md b/docs/es/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..53f85f95043 --- /dev/null +++ b/docs/es/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Log Family +toc_priority: 29 +--- + + diff --git a/docs/es/operations/table_engines/log.md b/docs/es/engines/table_engines/log_family/log.md similarity index 86% rename from docs/es/operations/table_engines/log.md rename to docs/es/engines/table_engines/log_family/log.md index 595e2532d45..60fafda1fb8 100644 --- a/docs/es/operations/table_engines/log.md +++ b/docs/es/engines/table_engines/log_family/log.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 33 +toc_title: Registro --- # Registro {#log} @@ -10,4 +13,4 @@ El registro difiere de [TinyLog](tinylog.md) en que un pequeño archivo de “ma Para el acceso a datos simultáneos, las operaciones de lectura se pueden realizar simultáneamente, mientras que las operaciones de escritura bloquean las lecturas entre sí. El motor de registro no admite índices. Del mismo modo, si la escritura en una tabla falla, la tabla se rompe y la lectura de ella devuelve un error. El motor de registro es adecuado para datos temporales, tablas de escritura única y para fines de prueba o demostración. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/log/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/es/operations/table_engines/log_family.md b/docs/es/engines/table_engines/log_family/log_family.md similarity index 75% rename from docs/es/operations/table_engines/log_family.md rename to docs/es/engines/table_engines/log_family/log_family.md index 1a2b3f2bb70..f55002b3c0d 100644 --- a/docs/es/operations/table_engines/log_family.md +++ b/docs/es/engines/table_engines/log_family/log_family.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 31 +toc_title: "Implantaci\xF3n" --- # Familia del motor de registro {#log-engine-family} @@ -24,7 +27,7 @@ Motor: Durante `INSERT` consultas, la tabla está bloqueada y otras consultas para leer y escribir datos esperan a que la tabla se desbloquee. Si no hay consultas de escritura de datos, se puede realizar cualquier número de consultas de lectura de datos simultáneamente. -- No apoyo [mutación](../../query_language/alter.md#alter-mutations) operación. +- No apoyo [mutación](../../../sql_reference/statements/alter.md#alter-mutations) operación. - No admite índices. @@ -36,8 +39,8 @@ Motor: ## Diferencia {#differences} -El `TinyLog` es el más simple de la familia y proporciona la funcionalidad más pobre y la eficiencia más baja. El `TinyLog` el motor no admite la lectura de datos paralelos por varios subprocesos. Lee datos más lentamente que otros motores de la familia que admiten lectura paralela y utiliza casi tantos descriptores como los `Log` motor porque almacena cada columna en un archivo separado. Úselo en escenarios simples de baja carga. +El `TinyLog` es el más simple de la familia y proporciona la funcionalidad más pobre y la eficiencia más baja. El `TinyLog` el motor no admite la lectura de datos paralelos por varios hilos. Lee datos más lentamente que otros motores de la familia que admiten lectura paralela y utiliza casi tantos descriptores como los `Log` motor porque almacena cada columna en un archivo separado. Úselo en escenarios simples de baja carga. El `Log` y `StripeLog` Los motores admiten lectura de datos paralela. Al leer datos, ClickHouse usa múltiples hilos. Cada subproceso procesa un bloque de datos separado. El `Log` utiliza un archivo separado para cada columna de la tabla. `StripeLog` almacena todos los datos en un archivo. Como resultado, el `StripeLog` el motor utiliza menos descriptores en el sistema operativo, pero el `Log` proporciona una mayor eficiencia al leer datos. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/log_family/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/es/operations/table_engines/stripelog.md b/docs/es/engines/table_engines/log_family/stripelog.md similarity index 83% rename from docs/es/operations/table_engines/stripelog.md rename to docs/es/engines/table_engines/log_family/stripelog.md index a6131ae5d32..7e0d84837aa 100644 --- a/docs/es/operations/table_engines/stripelog.md +++ b/docs/es/engines/table_engines/log_family/stripelog.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 32 +toc_title: StripeLog --- -# StripeLog {#stripelog} +# Lista de Stripelog {#stripelog} Este motor pertenece a la familia de motores de registro. Consulte las propiedades comunes de los motores de registro y sus diferencias en [Familia del motor de registro](log_family.md) artículo. @@ -19,22 +22,22 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = StripeLog ``` -Vea la descripción detallada del [CREAR TABLA](../../query_language/create.md#create-table-query) consulta. +Vea la descripción detallada del [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) consulta. ## Escribir los datos {#table_engines-stripelog-writing-the-data} -El `StripeLog` el motor almacena todas las columnas en un archivo. Para cada `INSERT` ClickHouse agrega el bloque de datos al final de un archivo de tabla, escribiendo columnas una por una. +El `StripeLog` el motor almacena todas las columnas en un archivo. Para cada `INSERT` consulta, ClickHouse agrega el bloque de datos al final de un archivo de tabla, escribiendo columnas una por una. Para cada tabla, ClickHouse escribe los archivos: -- `data.bin` — Archivo de datos. -- `index.mrk` — Archivo con marcas. Las marcas contienen compensaciones para cada columna de cada bloque de datos insertado. +- `data.bin` — Data file. +- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. El `StripeLog` el motor no soporta el `ALTER UPDATE` y `ALTER DELETE` operación. ## Lectura de los datos {#table_engines-stripelog-reading-the-data} -El archivo con marcas permite ClickHouse paralelizar la lectura de datos. Esto significa que un `SELECT` consulta devuelve filas en un orden impredecible. Descripción `ORDER BY` cláusula para ordenar filas. +El archivo con marcas permite ClickHouse paralelizar la lectura de datos. Esto significa que un `SELECT` query devuelve filas en un orden impredecible. Utilice el `ORDER BY` cláusula para ordenar filas. ## Ejemplo de uso {#table_engines-stripelog-example-of-use} @@ -50,7 +53,7 @@ CREATE TABLE stripe_log_table ENGINE = StripeLog ``` -Inserte datos: +Insertar datos: ``` sql INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') @@ -89,4 +92,4 @@ SELECT * FROM stripe_log_table ORDER BY timestamp └─────────────────────┴──────────────┴────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/stripelog/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/es/operations/table_engines/tinylog.md b/docs/es/engines/table_engines/log_family/tinylog.md similarity index 66% rename from docs/es/operations/table_engines/tinylog.md rename to docs/es/engines/table_engines/log_family/tinylog.md index 1d9ac4c3b73..a082d47e272 100644 --- a/docs/es/operations/table_engines/tinylog.md +++ b/docs/es/engines/table_engines/log_family/tinylog.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 34 +toc_title: TinyLog --- # TinyLog {#tinylog} @@ -8,6 +11,6 @@ El motor pertenece a la familia de motores de registro. Ver [Familia del motor d Este motor de tablas se usa normalmente con el método write-once: escribir datos una vez, luego leerlos tantas veces como sea necesario. Por ejemplo, puede usar `TinyLog`-type tablas para datos intermedios que se procesan en pequeños lotes. Tenga en cuenta que el almacenamiento de datos en un gran número de tablas pequeñas es ineficiente. -Las consultas se ejecutan en una sola secuencia. En otras palabras, este motor está diseñado para tablas relativamente pequeñas (hasta aproximadamente 1,000,000 filas). Tiene sentido utilizar este motor de tablas si tiene muchas tablas pequeñas, ya que es más simple que el [Registro](log.md) motor (menos archivos necesitan ser abiertos. +Las consultas se ejecutan en una sola secuencia. En otras palabras, este motor está diseñado para tablas relativamente pequeñas (hasta aproximadamente 1,000,000 filas). Tiene sentido usar este motor de tablas si tiene muchas tablas pequeñas, ya que es más simple que el [Registro](log.md) motor (menos archivos necesitan ser abiertos). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/tinylog/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/es/operations/table_engines/aggregatingmergetree.md b/docs/es/engines/table_engines/mergetree_family/aggregatingmergetree.md similarity index 84% rename from docs/es/operations/table_engines/aggregatingmergetree.md rename to docs/es/engines/table_engines/mergetree_family/aggregatingmergetree.md index 9f49f3b76d3..5316c0d5752 100644 --- a/docs/es/operations/table_engines/aggregatingmergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -1,14 +1,17 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 35 +toc_title: "Agregaci\xF3nMergeTree" --- -# AgregaciónMergeTree {#aggregatingmergetree} +# Aggregatingmergetree {#aggregatingmergetree} El motor hereda de [Método de codificación de datos:](mergetree.md#table_engines-mergetree), alterando la lógica para la fusión de partes de datos. ClickHouse reemplaza todas las filas con la misma clave principal (o más exactamente, con la misma [clave de clasificación](mergetree.md)) con una sola fila (dentro de una parte de datos) que almacena una combinación de estados de funciones agregadas. Usted puede utilizar `AggregatingMergeTree` tablas para la agregación de datos incrementales, incluidas las vistas materializadas agregadas. -El motor procesa todas las columnas con [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) tipo. +El motor procesa todas las columnas con [AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) tipo. Es apropiado usar `AggregatingMergeTree` si reduce el número de filas por pedidos. @@ -28,7 +31,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../query_language/create.md). +Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql_reference/statements/create.md). **Cláusulas de consulta** @@ -55,10 +58,10 @@ Todos los parámetros tienen el mismo significado que en `MergeTree`. ## SELECCIONAR e INSERTAR {#select-and-insert} -Para insertar datos, utilice [INSERTAR SELECCIONAR](../../query_language/insert_into.md) Consulta con funciones agregadas -State-. +Para insertar datos, utilice [INSERT SELECT](../../../sql_reference/statements/insert_into.md) consulta con funciones agregadas -State-. Al seleccionar datos de `AggregatingMergeTree` mesa, uso `GROUP BY` cláusula y las mismas funciones agregadas que al insertar datos, pero usando `-Merge` sufijo. -En los resultados de `SELECT` consulta, los valores de `AggregateFunction` tipo tiene representación binaria específica de la implementación para todos los formatos de salida de ClickHouse. Si volcar datos en, por ejemplo, `TabSeparated` Formato con `SELECT` Consulta entonces este volcado se puede cargar de nuevo usando `INSERT` consulta. +En los resultados de `SELECT` consulta, los valores de `AggregateFunction` tipo tiene representación binaria específica de la implementación para todos los formatos de salida de ClickHouse. Si volcar datos en, por ejemplo, `TabSeparated` formato con `SELECT` consulta entonces este volcado se puede cargar de nuevo usando `INSERT` consulta. ## Ejemplo de una vista materializada agregada {#example-of-an-aggregated-materialized-view} @@ -96,4 +99,4 @@ GROUP BY StartDate ORDER BY StartDate; ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/aggregatingmergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/es/operations/table_engines/collapsingmergetree.md b/docs/es/engines/table_engines/mergetree_family/collapsingmergetree.md similarity index 87% rename from docs/es/operations/table_engines/collapsingmergetree.md rename to docs/es/engines/table_engines/mergetree_family/collapsingmergetree.md index 9a05e0effb7..7c9b87cc814 100644 --- a/docs/es/operations/table_engines/collapsingmergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: ColapsarMergeTree --- -# ColapsarMergeTree {#table_engine-collapsingmergetree} +# Colapsarmergetree {#table_engine-collapsingmergetree} El motor hereda de [Método de codificación de datos:](mergetree.md) y agrega la lógica de las filas que colapsan al algoritmo de fusión de partes de datos. @@ -25,13 +28,13 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../query_language/create.md). +Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../../sql_reference/statements/create.md). **CollapsingMergeTree Parámetros** -- `sign` — Nombre de la columna con el tipo de fila: `1` es una “state” fila, `-1` es una “cancel” Fila. +- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. - Tipo de datos de columna — `Int8`. + Column data type — `Int8`. **Cláusulas de consulta** @@ -55,9 +58,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Todos los parámetros excepto `sign` el mismo significado que en `MergeTree`. -- `sign` — Nombre de la columna con el tipo de fila: `1` — “state” fila, `-1` — “cancel” Fila. +- `sign` — Name of the column with the type of row: `1` — “state” fila, `-1` — “cancel” fila. - Tipo de datos de columna — `Int8`. + Column Data Type — `Int8`. @@ -67,7 +70,7 @@ Todos los parámetros excepto `sign` el mismo significado que en `MergeTree`. Considere la situación en la que necesita guardar datos que cambian continuamente para algún objeto. Parece lógico tener una fila para un objeto y actualizarla en cualquier cambio, pero la operación de actualización es costosa y lenta para DBMS porque requiere la reescritura de los datos en el almacenamiento. Si necesita escribir datos rápidamente, la actualización no es aceptable, pero puede escribir los cambios de un objeto secuencialmente de la siguiente manera. -Utilice la columna en particular `Sign`. Si `Sign = 1` significa que la fila es un estado de un objeto, llamémoslo “state” Fila. Si `Sign = -1` significa la cancelación del estado de un objeto con los mismos atributos, llamémoslo “cancel” Fila. +Utilice la columna en particular `Sign`. Si `Sign = 1` significa que la fila es un estado de un objeto, llamémoslo “state” fila. Si `Sign = -1` significa la cancelación del estado de un objeto con los mismos atributos, llamémoslo “cancel” fila. Por ejemplo, queremos calcular cuántas páginas revisaron los usuarios en algún sitio y cuánto tiempo estuvieron allí. En algún momento escribimos la siguiente fila con el estado de la actividad del usuario: @@ -115,22 +118,22 @@ Cuando ClickHouse combina partes de datos, cada grupo de filas consecutivas tien Para cada parte de datos resultante, ClickHouse guarda: -1. El primero “cancel” y el último “state” si el número de “state” y “cancel” y la última fila es una “state” Fila. +1. El primero “cancel” y el último “state” si el número de “state” y “cancel” y la última fila es una “state” fila. -2. El último “state” fila, si hay más “state” películas que “cancel” películas. +2. El último “state” fila, si hay más “state” filas que “cancel” filas. -3. El primero “cancel” fila, si hay más “cancel” películas que “state” películas. +3. El primero “cancel” fila, si hay más “cancel” filas que “state” filas. 4. Ninguna de las filas, en todos los demás casos. -También cuando hay al menos 2 más “state” películas que “cancel” filas, o al menos 2 más “cancel” películas entonces “state” fila, la fusión continúa, pero ClickHouse trata esta situación como un error lógico y la registra en el registro del servidor. Este error puede producirse si se insertan los mismos datos más de una vez. +También cuando hay al menos 2 más “state” filas que “cancel” filas, o al menos 2 más “cancel” filas entonces “state” fila, la fusión continúa, pero ClickHouse trata esta situación como un error lógico y la registra en el registro del servidor. Este error puede producirse si se insertan los mismos datos más de una vez. Por lo tanto, el colapso no debe cambiar los resultados del cálculo de las estadísticas. Los cambios colapsaron gradualmente para que al final solo quedara el último estado de casi todos los objetos. -El `Sign` se requiere porque el algoritmo de fusión no garantiza que todas las filas con la misma clave de ordenación estarán en la misma parte de datos resultante e incluso en el mismo servidor físico. Proceso de ClickHouse `SELECT` consultas con múltiples hilos, y no puede predecir el orden de las filas en el resultado. La agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de `CollapsingMergeTree` tabla. +El `Sign` se requiere porque el algoritmo de fusión no garantiza que todas las filas con la misma clave de clasificación estén en la misma parte de datos resultante e incluso en el mismo servidor físico. Proceso de ClickHouse `SELECT` consultas con múltiples hilos, y no puede predecir el orden de las filas en el resultado. La agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de `CollapsingMergeTree` tabla. -Para finalizar el colapso, escriba una consulta con `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` es lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` es lugar de `sum(x)` y así sucesivamente, y también añadir `HAVING sum(Sign) > 0`. +Para finalizar el colapso, escriba una consulta con `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` en lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` en lugar de `sum(x)` y así sucesivamente, y también añadir `HAVING sum(Sign) > 0`. Los agregados `count`, `sum` y `avg` podría calcularse de esta manera. El agregado `uniq` podría calcularse si un objeto tiene al menos un estado no colapsado. Los agregados `min` y `max` no se pudo calcular porque `CollapsingMergeTree` no guarda el historial de valores de los estados colapsados. @@ -190,9 +193,9 @@ SELECT * FROM UAct └─────────────────────┴───────────┴──────────┴──────┘ ``` -¿Qué vemos y dónde está colapsando? +¿qué vemos y dónde está colapsando? -Con dos `INSERT` Consultas, hemos creado 2 partes de datos. El `SELECT` la consulta se realizó en 2 hilos, y obtuvimos un orden aleatorio de filas. No se ha producido un colapso porque todavía no se había fusionado las partes de datos. ClickHouse fusiona parte de datos en un momento desconocido que no podemos predecir. +Con dos `INSERT` consultas, hemos creado 2 partes de datos. El `SELECT` la consulta se realizó en 2 hilos, y obtuvimos un orden aleatorio de filas. No se ha producido un colapso porque todavía no se había fusionado las partes de datos. ClickHouse fusiona parte de datos en un momento desconocido que no podemos predecir. Por lo tanto, necesitamos agregación: @@ -212,7 +215,7 @@ HAVING sum(Sign) > 0 └─────────────────────┴───────────┴──────────┘ ``` -Si no necesitamos agregación y queremos forzar el colapso, podemos usar `FINAL` Modificador para `FROM` clausula. +Si no necesitamos agregación y queremos forzar el colapso, podemos usar `FINAL` modificador para `FROM` clausula. ``` sql SELECT * FROM UAct FINAL @@ -303,4 +306,4 @@ select * FROM UAct └─────────────────────┴───────────┴──────────┴──────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/collapsingmergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/es/operations/table_engines/custom_partitioning_key.md b/docs/es/engines/table_engines/mergetree_family/custom_partitioning_key.md similarity index 76% rename from docs/es/operations/table_engines/custom_partitioning_key.md rename to docs/es/engines/table_engines/mergetree_family/custom_partitioning_key.md index 71cfeb52411..e97e26545b9 100644 --- a/docs/es/operations/table_engines/custom_partitioning_key.md +++ b/docs/es/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -1,14 +1,17 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 32 +toc_title: "Clave de partici\xF3n personalizada" --- # Clave de partición personalizada {#custom-partitioning-key} -La partición está disponible para el [Método de codificación de datos:](mergetree.md) mesas familiares (incluyendo [repetición](replication.md) tabla). [Vistas materializadas](materializedview.md) basado en tablas MergeTree soporte de particionamiento, también. +La partición está disponible para el [Método de codificación de datos:](mergetree.md) mesas familiares (incluyendo [repetición](replication.md) tabla). [Vistas materializadas](../special/materializedview.md) basado en tablas MergeTree soporte de particionamiento, también. Una partición es una combinación lógica de registros en una tabla por un criterio especificado. Puede establecer una partición por un criterio arbitrario, como por mes, por día o por tipo de evento. Cada partición se almacena por separado para simplificar las manipulaciones de estos datos. Al acceder a los datos, ClickHouse utiliza el subconjunto más pequeño de particiones posible. -La partición se especifica en el `PARTITION BY expr` cláusula cuando [Creando una tabla](mergetree.md#table_engine-mergetree-creating-a-table). La clave de partición puede ser cualquier expresión de las columnas de la tabla. Por ejemplo, para especificar la partición por mes, utilice la expresión `toYYYYMM(date_column)`: +La partición se especifica en el `PARTITION BY expr` cláusula cuando [creando una tabla](mergetree.md#table_engine-mergetree-creating-a-table). La clave de partición puede ser cualquier expresión de las columnas de la tabla. Por ejemplo, para especificar la partición por mes, utilice la expresión `toYYYYMM(date_column)`: ``` sql CREATE TABLE visits @@ -22,7 +25,7 @@ PARTITION BY toYYYYMM(VisitDate) ORDER BY Hour; ``` -La clave de partición también puede ser una tupla de expresiones (similar a la [Clave primaria](mergetree.md#primary-keys-and-indexes-in-queries)). Por ejemplo: +La clave de partición también puede ser una tupla de expresiones (similar a la [clave primaria](mergetree.md#primary-keys-and-indexes-in-queries)). Por ejemplo: ``` sql ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) @@ -35,9 +38,9 @@ En este ejemplo, establecemos la partición por los tipos de eventos que se prod Al insertar datos nuevos en una tabla, estos datos se almacenan como una parte separada (porción) ordenada por la clave principal. En 10-15 minutos después de insertar, las partes de la misma partición se fusionan en toda la parte. !!! info "INFO" - Una combinación solo funciona para partes de datos que tienen el mismo valor para la expresión de partición. Esto significa **no debe hacer particiones demasiado granulares** (más de un millar de particiones). De lo contrario, el `SELECT` consulta funciona mal debido a un número excesivamente grande de archivos en el sistema de archivos y descriptores de archivos abiertos. + Una combinación solo funciona para partes de datos que tienen el mismo valor para la expresión de partición. Esto significa **no deberías hacer particiones demasiado granulares** (más de un millar de particiones). De lo contrario, el `SELECT` consulta funciona mal debido a un número excesivamente grande de archivos en el sistema de archivos y descriptores de archivos abiertos. -Descripción [sistema.parte](../system_tables.md#system_tables-parts) tabla para ver las partes y particiones de la tabla. Por ejemplo, supongamos que tenemos un `visits` tabla con partición por mes. Vamos a realizar el `SELECT` Consulta para el `system.parts` tabla: +Utilice el [sistema.parte](../../../operations/system_tables.md#system_tables-parts) tabla para ver las partes y particiones de la tabla. Por ejemplo, supongamos que tenemos un `visits` tabla con partición por mes. Vamos a realizar el `SELECT` consulta para el `system.parts` tabla: ``` sql SELECT @@ -62,7 +65,7 @@ WHERE table = 'visits' El `partition` columna contiene los nombres de las particiones. Hay dos particiones en este ejemplo: `201901` y `201902`. Puede utilizar este valor de columna para especificar el nombre de partición en [ALTER … PARTITION](#alter_manipulations-with-partitions) consulta. -El `name` columna contiene los nombres de las partes de datos de partición. Puede utilizar esta columna para especificar el nombre de la pieza [ALTERAR PIEZA DE ADJUNTO](#alter_attach-partition) consulta. +El `name` columna contiene los nombres de las partes de datos de partición. Puede utilizar esta columna para especificar el nombre de la pieza [ALTER ATTACH PART](#alter_attach-partition) consulta. Vamos a desglosar el nombre de la primera parte: `201901_1_3_1`: @@ -76,7 +79,7 @@ Vamos a desglosar el nombre de la primera parte: `201901_1_3_1`: El `active` columna muestra el estado de la pieza. `1` está activo; `0` está inactivo. Las partes inactivas son, por ejemplo, las partes de origen que quedan después de fusionarse con una parte más grande. Las partes de datos dañadas también se indican como inactivas. -Como puede ver en el ejemplo, hay varias partes separadas de la misma partición (por ejemplo, `201901_1_3_1` y `201901_1_9_2`). Esto significa que estas partes aún no están fusionadas. ClickHouse combina las partes insertadas de datos periódicamente, aproximadamente 15 minutos después de la inserción. Además, puede realizar una fusión no programada utilizando el [OPTIMIZAR](../../query_language/misc.md#misc_operations-optimize) consulta. Ejemplo: +Como puede ver en el ejemplo, hay varias partes separadas de la misma partición (por ejemplo, `201901_1_3_1` y `201901_1_9_2`). Esto significa que estas partes aún no están fusionadas. ClickHouse combina las partes insertadas de datos periódicamente, aproximadamente 15 minutos después de la inserción. Además, puede realizar una fusión no programada utilizando el [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) consulta. Ejemplo: ``` sql OPTIMIZE TABLE visits PARTITION 201902; @@ -115,10 +118,10 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached Carpeta ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ y así sucesivamente son los directorios de las partes. Cada parte se relaciona con una partición correspondiente y contiene datos solo para un mes determinado (la tabla de este ejemplo tiene particiones por mes). -El `detached` el directorio contiene partes que se separaron de la tabla utilizando el [SEPARAR](#alter_detach-partition) consulta. Las partes dañadas también se mueven a este directorio, en lugar de eliminarse. El servidor no utiliza las piezas del `detached` directorio. Puede agregar, eliminar o modificar los datos de este directorio en cualquier momento; el servidor no lo sabrá hasta que ejecute el [CONECTAR](../../query_language/alter.md#alter_attach-partition) consulta. +El `detached` el directorio contiene partes que se separaron de la tabla utilizando el [DETACH](#alter_detach-partition) consulta. Las partes dañadas también se mueven a este directorio, en lugar de eliminarse. El servidor no utiliza las piezas del `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql_reference/statements/alter.md#alter_attach-partition) consulta. -Tenga en cuenta que en el servidor operativo, no puede cambiar manualmente el conjunto de piezas o sus datos en el sistema de archivos, ya que el servidor no lo sabrá. Para tablas no replicadas, puede hacerlo cuando el servidor está detenido, pero no se recomienda. Para tablas replicadas, el conjunto de piezas no se puede cambiar en ningún caso. +Tenga en cuenta que en el servidor operativo, no puede cambiar manualmente el conjunto de piezas o sus datos en el sistema de archivos, ya que el servidor no lo sabrá. Para tablas no replicadas, puede hacer esto cuando se detiene el servidor, pero no se recomienda. Para tablas replicadas, el conjunto de piezas no se puede cambiar en ningún caso. -ClickHouse le permite realizar operaciones con las particiones: eliminarlas, copiar de una tabla a otra o crear una copia de seguridad. Consulte la lista de todas las operaciones en la sección [Manipulaciones con particiones y piezas](../../query_language/alter.md#alter_manipulations-with-partitions). +ClickHouse le permite realizar operaciones con las particiones: eliminarlas, copiar de una tabla a otra o crear una copia de seguridad. Consulte la lista de todas las operaciones en la sección [Manipulaciones con particiones y piezas](../../../sql_reference/statements/alter.md#alter_manipulations-with-partitions). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/custom_partitioning_key/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/es/operations/table_engines/graphitemergetree.md b/docs/es/engines/table_engines/mergetree_family/graphitemergetree.md similarity index 68% rename from docs/es/operations/table_engines/graphitemergetree.md rename to docs/es/engines/table_engines/mergetree_family/graphitemergetree.md index 3db9f224807..f326b594d6a 100644 --- a/docs/es/operations/table_engines/graphitemergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/graphitemergetree.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: GraphiteMergeTree --- -# GraphiteMergeTree {#graphitemergetree} +# Graphitemergetree {#graphitemergetree} Este motor está diseñado para el adelgazamiento y la agregación / promedio (rollup) [Grafito](http://graphite.readthedocs.io/en/latest/index.html) datos. Puede ser útil para los desarrolladores que desean usar ClickHouse como almacén de datos para Graphite. -Puede utilizar cualquier motor de tabla ClickHouse para almacenar los datos de grafito si no necesita un paquete acumulativo, pero si necesita un paquete acumulativo, use `GraphiteMergeTree`. El motor reduce el volumen de almacenamiento y aumenta la eficiencia de las consultas de Grafito. +Puede usar cualquier motor de tabla ClickHouse para almacenar los datos de Graphite si no necesita un paquete acumulativo, pero si necesita un paquete acumulativo, use `GraphiteMergeTree`. El motor reduce el volumen de almacenamiento y aumenta la eficiencia de las consultas de Graphite. El motor hereda propiedades de [Método de codificación de datos:](mergetree.md). @@ -27,7 +30,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Vea una descripción detallada del [CREAR TABLA](../../query_language/create.md#create-table-query) consulta. +Vea una descripción detallada del [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) consulta. Una tabla para los datos de grafito debe tener las siguientes columnas para los siguientes datos: @@ -45,7 +48,7 @@ Los nombres de estas columnas deben establecerse en la configuración acumulativ **GraphiteMergeTree parámetros** -- `config_section` — Nombre de la sección en el archivo de configuración, donde se establecen las reglas de acumulación. +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. **Cláusulas de consulta** @@ -72,13 +75,13 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Todos los parámetros excepto `config_section` el mismo significado que en `MergeTree`. -- `config_section` — Nombre de la sección en el archivo de configuración, donde se establecen las reglas de acumulación. +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. ## Configuración acumulativa {#rollup-configuration} -La configuración del paquete acumulativo está definida por [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) parámetro en la configuración del servidor. El nombre del parámetro podría ser cualquiera. Puede crear varias configuraciones y usarlas para diferentes tablas. +La configuración del paquete acumulativo está definida por [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) parámetro en la configuración del servidor. El nombre del parámetro podría ser cualquiera. Puede crear varias configuraciones y usarlas para diferentes tablas. Estructura de configuración Rollup: @@ -87,10 +90,10 @@ Estructura de configuración Rollup: ### Columnas requeridas {#required-columns} -- `path_column_name` — El nombre de la columna que almacena el nombre de la métrica (sensor de grafito). Valor predeterminado: `Path`. -- `time_column_name` — El nombre de la columna que almacena el tiempo de medición de la métrica. Valor predeterminado: `Time`. -- `value_column_name` — El nombre de la columna que almacena el valor de la métrica a la hora establecida en `time_column_name`. Valor predeterminado: `Value`. -- `version_column_name` — El nombre de la columna que almacena la versión de la métrica. Valor predeterminado: `Timestamp`. +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Valor predeterminado: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. ### Patrón {#patterns} @@ -128,10 +131,10 @@ Al procesar una fila, ClickHouse comprueba las reglas en el `pattern` apartado. Campos para `pattern` y `default` apartado: -- `regexp`– Un patrón para el nombre de la métrica. -- `age` – La edad mínima de los datos en segundos. -- `precision`– Cómo definir con precisión la edad de los datos en segundos. Debe ser un divisor para 86400 (segundos en un día). -- `function` – El nombre de la función de agregación que se aplicará a los datos cuya antigüedad se encuentra dentro del intervalo `[age, age + precision]`. +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. ### Ejemplo de configuración {#configuration-example} @@ -168,4 +171,4 @@ Campos para `pattern` y `default` apartado: ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/graphitemergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/es/engines/table_engines/mergetree_family/index.md b/docs/es/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..339e1dd8eb3 --- /dev/null +++ b/docs/es/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: MergeTree Family +toc_priority: 28 +--- + + diff --git a/docs/es/operations/table_engines/mergetree.md b/docs/es/engines/table_engines/mergetree_family/mergetree.md similarity index 68% rename from docs/es/operations/table_engines/mergetree.md rename to docs/es/engines/table_engines/mergetree_family/mergetree.md index 53d89427ee0..a043effa333 100644 --- a/docs/es/operations/table_engines/mergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/mergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 30 +toc_title: "M\xE9todo de codificaci\xF3n de datos:" --- -# Método de codificación de datos: {#table_engines-mergetree} +# Mergetree {#table_engines-mergetree} El `MergeTree` motor y otros motores de esta familia (`*MergeTree`) son los motores de mesa ClickHouse más robustos. @@ -27,7 +30,7 @@ Principales características: Si es necesario, puede establecer el método de muestreo de datos en la tabla. !!! info "INFO" - El [Fusionar](merge.md) el motor no pertenece al `*MergeTree` Familia. + El [Fusionar](../special/merge.md) el motor no pertenece al `*MergeTree` familia. ## Creación de una tabla {#table_engine-mergetree-creating-a-table} @@ -48,32 +51,32 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros, consulte [Descripción de la consulta CREATE](../../query_language/create.md). +Para obtener una descripción de los parámetros, consulte [Descripción de la consulta CREATE](../../../sql_reference/statements/create.md). !!! note "Nota" `INDEX` es una característica experimental, ver [Índices de saltos de datos](#table_engine-mergetree-data_skipping-indexes). ### Cláusulas de consulta {#mergetree-query-clauses} -- `ENGINE` — Nombre y parámetros del motor. `ENGINE = MergeTree()`. El `MergeTree` el motor no tiene parámetros. +- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. El `MergeTree` el motor no tiene parámetros. -- `PARTITION BY` — El [clave de partición](custom_partitioning_key.md). +- `PARTITION BY` — The [clave de partición](custom_partitioning_key.md). - Para particionar por mes, utilice el `toYYYYMM(date_column)` expresión, donde `date_column` es una columna con una fecha del tipo [Fecha](../../data_types/date.md). Los nombres de partición aquí tienen el `"YYYYMM"` formato. + Para particionar por mes, utilice el `toYYYYMM(date_column)` expresión, donde `date_column` es una columna con una fecha del tipo [Fecha](../../../sql_reference/data_types/date.md). Los nombres de partición aquí tienen el `"YYYYMM"` formato. -- `ORDER BY` — La clave de clasificación. +- `ORDER BY` — The sorting key. Una tupla de columnas o expresiones arbitrarias. Ejemplo: `ORDER BY (CounterID, EventDate)`. -- `PRIMARY KEY` — La clave principal si [difiere de la clave de clasificación](mergetree.md). +- `PRIMARY KEY` — The primary key if it [difiere de la clave de clasificación](mergetree.md). De forma predeterminada, la clave principal es la misma que la clave de ordenación (que se especifica `ORDER BY` clausula). Por lo tanto, en la mayoría de los casos no es necesario especificar un `PRIMARY KEY` clausula. -- `SAMPLE BY` — Una expresión para el muestreo. +- `SAMPLE BY` — An expression for sampling. Si se utiliza una expresión de muestreo, la clave principal debe contenerla. Ejemplo: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. -- `TTL` — Una lista de reglas que especifican la duración de almacenamiento de las filas y definen la lógica del movimiento automático de piezas [entre discos y volúmenes](#table_engine-mergetree-multiple-volumes). +- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [entre discos y volúmenes](#table_engine-mergetree-multiple-volumes). La expresión debe tener una `Date` o `DateTime` columna como resultado. Ejemplo: `TTL date + INTERVAL 1 DAY` @@ -82,18 +85,18 @@ Para obtener una descripción de los parámetros, consulte [Descripción de la c Para obtener más información, consulte [TTL para columnas y tablas](#table_engine-mergetree-ttl) -- `SETTINGS` — Parámetros adicionales que controlan el comportamiento del `MergeTree`: +- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: - - `index_granularity` — Número máximo de filas de datos entre las marcas de un índice. Valor predeterminado: 8192. Ver [Almacenamiento de datos](#mergetree-data-storage). - - `index_granularity_bytes` — Tamaño máximo de los gránulos de datos en bytes. Valor predeterminado: 10 MB. Para restringir el tamaño del gránulo solo por el número de filas, establezca en 0 (no recomendado). Ver [Almacenamiento de datos](#mergetree-data-storage). - - `enable_mixed_granularity_parts` — Habilita o deshabilita la transición para controlar el tamaño del gránulo `index_granularity_bytes` configuración. Antes de la versión 19.11, sólo existía el `index_granularity` ajuste para restringir el tamaño del gránulo. El `index_granularity_bytes` mejora el rendimiento de ClickHouse al seleccionar datos de tablas con filas grandes (decenas y cientos de megabytes). Si tiene tablas con filas grandes, puede habilitar esta configuración para que las tablas mejoren la eficiencia de `SELECT` consulta. - - `use_minimalistic_part_header_in_zookeeper` — Método de almacenamiento de los encabezados de partes de datos en ZooKeeper. Si `use_minimalistic_part_header_in_zookeeper=1`, entonces ZooKeeper almacena menos datos. Para obtener más información, consulte [descripción del ajuste](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) es “Server configuration parameters”. - - `min_merge_bytes_to_use_direct_io` — El volumen mínimo de datos para la operación de fusión que se necesita para utilizar el acceso directo de E/S al disco de almacenamiento. Al fusionar partes de datos, ClickHouse calcula el volumen total de almacenamiento de todos los datos que se van a fusionar. Si el volumen excede `min_merge_bytes_to_use_direct_io` bytes, ClickHouse lee y escribe los datos en el disco de almacenamiento utilizando la interfaz de E / S directa (`O_DIRECT` opción). Si `min_merge_bytes_to_use_direct_io = 0`, entonces la E/S directa está deshabilitada. Valor predeterminado: `10 * 1024 * 1024 * 1024` byte. + - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Almacenamiento de datos](#mergetree-data-storage). + - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Almacenamiento de datos](#mergetree-data-storage). + - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` configuración. Antes de la versión 19.11, sólo existía el `index_granularity` ajuste para restringir el tamaño del gránulo. El `index_granularity_bytes` mejora el rendimiento de ClickHouse al seleccionar datos de tablas con filas grandes (decenas y cientos de megabytes). Si tiene tablas con filas grandes, puede habilitar esta configuración para que las tablas mejoren la eficiencia de `SELECT` consulta. + - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, entonces ZooKeeper almacena menos datos. Para obtener más información, consulte [descripción del ajuste](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) en “Server configuration parameters”. + - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse lee y escribe los datos en el disco de almacenamiento utilizando la interfaz de E / S directa (`O_DIRECT` opcion). Si `min_merge_bytes_to_use_direct_io = 0`, entonces la E/S directa está deshabilitada. Valor predeterminado: `10 * 1024 * 1024 * 1024` byte. - - `merge_with_ttl_timeout` — Retraso mínimo en segundos antes de repetir una fusión con TTL. Valor predeterminado: 86400 (1 día). - - `write_final_mark` — Habilita o deshabilita la escritura de la marca de índice final al final de la parte de datos (después del último byte). Valor predeterminado: 1. No lo apague. - - `merge_max_block_size` — Número máximo de filas en el bloque para operaciones de fusión. Valor predeterminado: 8192. - - `storage_policy` — Política de almacenamiento. Ver [Uso de varios dispositivos de bloque para el almacenamiento de datos](#table_engine-mergetree-multiple-volumes). + - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). + - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don't turn it off. + - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. + - `storage_policy` — Storage policy. See [Uso de varios dispositivos de bloque para el almacenamiento de datos](#table_engine-mergetree-multiple-volumes). **Ejemplo de configuración de secciones** @@ -103,7 +106,7 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa En el ejemplo, configuramos la partición por mes. -También establecemos una expresión para el muestreo como un hash por el ID de usuario. Esto le permite pseudoaleatorizar los datos en la tabla para cada `CounterID` y `EventDate`. Si definir un [MUESTRA](../../query_language/select.md#select-sample-clause) cláusula al seleccionar los datos, ClickHouse devolverá una muestra de datos pseudoaleatoria uniforme para un subconjunto de usuarios. +También establecemos una expresión para el muestreo como un hash por el ID de usuario. Esto le permite pseudoaleatorizar los datos en la tabla para cada `CounterID` y `EventDate`. Si define un [SAMPLE](../../../sql_reference/statements/select.md#select-sample-clause) cláusula al seleccionar los datos, ClickHouse devolverá una muestra de datos pseudoaleatoria uniforme para un subconjunto de usuarios. El `index_granularity` se puede omitir porque 8192 es el valor predeterminado. @@ -125,10 +128,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Parámetros MergeTree()** -- `date-column` — El nombre de una columna del [Fecha](../../data_types/date.md) tipo. ClickHouse crea automáticamente particiones por mes en función de esta columna. Los nombres de partición están en el `"YYYYMM"` formato. -- `sampling_expression` — Una expresión para el muestreo. -- `(primary, key)` — Clave principal. Tipo: [Tupla()](../../data_types/tuple.md) -- `index_granularity` — La granularidad de un índice. El número de filas de datos entre “marks” de un índice. El valor 8192 es apropiado para la mayoría de las tareas. +- `date-column` — The name of a column of the [Fecha](../../../sql_reference/data_types/date.md) tipo. ClickHouse crea automáticamente particiones por mes en función de esta columna. Los nombres de partición están en el `"YYYYMM"` formato. +- `sampling_expression` — An expression for sampling. +- `(primary, key)` — Primary key. Type: [Tupla()](../../../sql_reference/data_types/tuple.md) +- `index_granularity` — The granularity of an index. The number of data rows between the “marks” de un índice. El valor 8192 es apropiado para la mayoría de las tareas. **Ejemplo** @@ -155,7 +158,7 @@ El tamaño del gránulo es restringido por `index_granularity` y `index_granular Tome el `(CounterID, Date)` clave primaria como ejemplo. En este caso, la clasificación y el índice se pueden ilustrar de la siguiente manera: - Whole data: [-------------------------------------------------------------------------] + Whole data: [---------------------------------------------] CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] Marks: | | | | | | | | | | | @@ -185,7 +188,7 @@ El número de columnas en la clave principal no está explícitamente limitado. Si la clave principal es `(a, b)`, a continuación, añadir otra columna `c` mejorará el rendimiento si se cumplen las siguientes condiciones: - Hay consultas con una condición en la columna `c`. - - Rangos de datos largos (varias veces más `index_granularity`) con valores idénticos para `(a, b)` hijo comunes. En otras palabras, al agregar otra columna le permite omitir rangos de datos bastante largos. + - Rangos de datos largos (varias veces más `index_granularity`) con valores idénticos para `(a, b)` son comunes. En otras palabras, al agregar otra columna le permite omitir rangos de datos bastante largos. - Mejorar la compresión de datos. @@ -206,7 +209,7 @@ Esta característica es útil cuando se [SummingMergeTree](summingmergetree.md) En este caso, tiene sentido dejar solo unas pocas columnas en la clave principal que proporcionarán análisis de rango eficientes y agregarán las columnas de dimensión restantes a la tupla de clave de clasificación. -[ALTERAR](../../query_language/alter.md) de la clave de ordenación es una operación ligera porque cuando se agrega una nueva columna simultáneamente a la tabla y a la clave de ordenación, no es necesario cambiar las partes de datos existentes. Dado que la clave de ordenación anterior es un prefijo de la nueva clave de ordenación y no hay datos en la columna recién agregada, los datos se ordenan tanto por las claves de ordenación antiguas como por las nuevas en el momento de la modificación de la tabla. +[ALTER](../../../sql_reference/statements/alter.md) de la clave de ordenación es una operación ligera porque cuando se agrega una nueva columna simultáneamente a la tabla y a la clave de ordenación, las partes de datos existentes no necesitan ser cambiadas. Dado que la clave de ordenación anterior es un prefijo de la nueva clave de ordenación y no hay datos en la columna recién agregada, los datos se ordenan tanto por las claves de ordenación antiguas como por las nuevas en el momento de la modificación de la tabla. ### Uso de índices y particiones en consultas {#use-of-indexes-and-partitions-in-queries} @@ -228,7 +231,7 @@ SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDat ClickHouse utilizará el índice de clave principal para recortar datos incorrectos y la clave de partición mensual para recortar particiones que están en intervalos de fechas incorrectos. -Las consultas anteriores muestran que el índice se usa incluso para expresiones complejas. La lectura de la tabla está organizada de modo que el uso del índice no puede ser más lento que un análisis completo. +Las consultas anteriores muestran que el índice se usa incluso para expresiones complejas. La lectura de la tabla está organizada de modo que el uso del índice no puede ser más lento que un escaneo completo. En el siguiente ejemplo, el índice no se puede usar. @@ -236,7 +239,7 @@ En el siguiente ejemplo, el índice no se puede usar. SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' ``` -Para comprobar si ClickHouse puede usar el índice al ejecutar una consulta, use la configuración [Fecha de nacimiento](../settings/settings.md#settings-force_index_by_date) y [force\_primary\_key](../settings/settings.md). +Para comprobar si ClickHouse puede usar el índice al ejecutar una consulta, use la configuración [Fecha de nacimiento](../../../operations/settings/settings.md#settings-force_index_by_date) y [force\_primary\_key](../../../operations/settings/settings.md). La clave para particionar por mes permite leer solo aquellos bloques de datos que contienen fechas del rango adecuado. En este caso, el bloque de datos puede contener datos para muchas fechas (hasta un mes). Dentro de un bloque, los datos se ordenan por clave principal, que puede no contener la fecha como la primera columna. Debido a esto, el uso de una consulta con solo una condición de fecha que no especifica el prefijo de clave principal hará que se lean más datos que para una sola fecha. @@ -290,28 +293,28 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 - `set(max_rows)` - Almacena valores únicos de la expresión especificada (no más de `max_rows` películas, `max_rows=0` medio “no limits”). Utiliza los valores para comprobar si `WHERE` expresión no es satisfactorio en un bloque de datos. + Almacena valores únicos de la expresión especificada (no más de `max_rows` filas, `max_rows=0` medio “no limits”). Utiliza los valores para comprobar si `WHERE` expresión no es satisfactorio en un bloque de datos. - `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - Tiendas a [Filtro de floración](https://en.wikipedia.org/wiki/Bloom_filter) que contiene todos los ngrams de un bloque de datos. Funciona solo con cadenas. Puede ser utilizado para la optimización de `equals`, `like` y `in` expresiones. + Tiendas a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) que contiene todos los ngrams de un bloque de datos. Funciona solo con cadenas. Puede ser utilizado para la optimización de `equals`, `like` y `in` expresiones. - - `n` — tamaño del ngram, - - `size_of_bloom_filter_in_bytes` — Tamaño del filtro Bloom en bytes (puede usar valores grandes aquí, por ejemplo, 256 o 512, porque se puede comprimir bien). - - `number_of_hash_functions` — El número de funciones hash utilizadas en el filtro Bloom. - - `random_seed` — La semilla para las funciones hash de filtro Bloom. + - `n` — ngram size, + - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). + - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. + - `random_seed` — The seed for Bloom filter hash functions. - `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` Lo mismo que `ngrambf_v1`, pero almacena tokens en lugar de ngrams. Los tokens son secuencias separadas por caracteres no alfanuméricos. -- `bloom_filter([false_positive])` — Almacena un [Filtro de floración](https://en.wikipedia.org/wiki/Bloom_filter) para las columnas especificadas. +- `bloom_filter([false_positive])` — Stores a [Filtro de floración](https://en.wikipedia.org/wiki/Bloom_filter) para las columnas especificadas. Opcional `false_positive` parámetro es la probabilidad de recibir una respuesta falsa positiva del filtro. Valores posibles: (0, 1). Valor predeterminado: 0.025. Tipos de datos admitidos: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. - Las siguientes funciones pueden usarlo: [igual](../../query_language/functions/comparison_functions.md), [notEquals](../../query_language/functions/comparison_functions.md), [es](../../query_language/functions/in_functions.md), [noEn](../../query_language/functions/in_functions.md), [Tener](../../query_language/functions/array_functions.md). + Las siguientes funciones pueden usarlo: [igual](../../../sql_reference/functions/comparison_functions.md), [notEquals](../../../sql_reference/functions/comparison_functions.md), [en](../../../sql_reference/functions/in_functions.md), [noEn](../../../sql_reference/functions/in_functions.md), [tener](../../../sql_reference/functions/array_functions.md). @@ -327,24 +330,24 @@ Condiciones en el `WHERE` cláusula contiene llamadas de las funciones que opera El `set` index se puede utilizar con todas las funciones. Subconjuntos de funciones para otros índices se muestran en la siguiente tabla. -| Función (operador) / Índice | Clave primaria | minmax | Descripción | Sistema abierto. | bloom\_filter | -|--------------------------------------------------------------------------------------------------------|----------------|--------|-------------|------------------|---------------| -| [¿Por qué?)](../../query_language/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!Número)](../../query_language/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [como](../../query_language/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [No como](../../query_language/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [Comienza con](../../query_language/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [Finaliza con](../../query_language/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../query_language/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [es](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [noEn](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [menos (\<)](../../query_language/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [Alcalde (\>)](../../query_language/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [menosOrEquals (\<=)](../../query_language/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [mayorOrEquals (\>=)](../../query_language/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [Vaciar](../../query_language/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../query_language/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | +| Función (operador) / Índice | clave primaria | minmax | Descripción | Sistema abierto. | bloom\_filter | +|----------------------------------------------------------------------------------------------------------|----------------|--------|-------------|------------------|---------------| +| [igual (=, ==)](../../../sql_reference/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, \<\>)](../../../sql_reference/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [como](../../../sql_reference/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [No como](../../../sql_reference/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [Comienza con](../../../sql_reference/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [Finaliza con](../../../sql_reference/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../sql_reference/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [en](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [noEn](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [menos (\<)](../../../sql_reference/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [mayor (\>)](../../../sql_reference/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [menosOrEquals (\<=)](../../../sql_reference/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [mayorOrEquals (\>=)](../../../sql_reference/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [vaciar](../../../sql_reference/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql_reference/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Las funciones con un argumento constante que es menor que el tamaño de ngram no pueden ser utilizadas por `ngrambf_v1` para la optimización de consultas. @@ -375,7 +378,7 @@ Determina la duración de los valores. El `TTL` se puede establecer para toda la tabla y para cada columna individual. TTL de nivel de tabla también puede especificar la lógica de movimiento automático de datos entre discos y volúmenes. -Las expresiones deben evaluar [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md) tipo de datos. +Las expresiones deben evaluar [Fecha](../../../sql_reference/data_types/date.md) o [FechaHora](../../../sql_reference/data_types/datetime.md) tipo de datos. Ejemplo: @@ -384,7 +387,7 @@ TTL time_column TTL time_column + interval ``` -Definir `interval`, utilizar [intervalo de tiempo](../../query_language/operators.md#operators-datetime) operador. +Definir `interval`, utilizar [intervalo de tiempo](../../../sql_reference/operators.md#operators-datetime) operador. ``` sql TTL date_time + INTERVAL 1 MONTH @@ -475,32 +478,30 @@ Los datos con un TTL caducado se eliminan cuando ClickHouse fusiona partes de da Cuando ClickHouse ve que los datos han caducado, realiza una combinación fuera de programación. Para controlar la frecuencia de tales fusiones, puede establecer [Método de codificación de datos:](#mergetree_setting-merge_with_ttl_timeout). Si el valor es demasiado bajo, realizará muchas fusiones fuera de horario que pueden consumir muchos recursos. -Si realiza el `SELECT` consulta entre fusiones, puede obtener datos caducados. Para evitarlo, use el [OPTIMIZAR](../../query_language/misc.md#misc_operations-optimize) Consulta antes `SELECT`. +Si realiza el `SELECT` consulta entre fusiones, puede obtener datos caducados. Para evitarlo, use el [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) consulta antes `SELECT`. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/mergetree/) - -## Uso de varios dispositivos de bloque para el almacenamiento de datos {#table_engine-mergetree-multiple-volumes} +## Uso de múltiples dispositivos de bloque para el almacenamiento de datos {#table_engine-mergetree-multiple-volumes} ### Implantación {#introduction} `MergeTree` Los motores de tablas familiares pueden almacenar datos en múltiples dispositivos de bloque. Por ejemplo, puede ser útil cuando los datos de una determinada tabla se dividen implícitamente en “hot” y “cold”. Los datos más recientes se solicitan regularmente, pero solo requieren una pequeña cantidad de espacio. Por el contrario, los datos históricos de cola gorda se solicitan raramente. Si hay varios discos disponibles, el “hot” los datos pueden estar ubicados en discos rápidos (por ejemplo, SSD NVMe o en memoria), mientras que “cold” datos - en los relativamente lentos (por ejemplo, HDD). -La parte de datos es la unidad móvil mínima para `MergeTree`-mesas de motor. Los datos que pertenecen a una parte se almacenan en un disco. Las partes de datos se pueden mover entre discos en segundo plano (según la configuración del usuario) así como por medio de la [ALTERAR](../../query_language/alter.md#alter_move-partition) consulta. +La parte de datos es la unidad móvil mínima para `MergeTree`-mesas de motor. Los datos que pertenecen a una parte se almacenan en un disco. Las partes de datos se pueden mover entre discos en segundo plano (según la configuración del usuario) así como por medio de la [ALTER](../../../sql_reference/statements/alter.md#alter_move-partition) consulta. ### Plazo {#terms} -- Disco: bloquea el dispositivo montado en el sistema de archivos. -- Disco predeterminado: disco que almacena la ruta especificada en el [camino](../server_settings/settings.md#server_settings-path) configuración del servidor. -- Volumen — Conjunto ordenado de discos iguales (similar a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). -- Política de almacenamiento: conjunto de volúmenes y reglas para mover datos entre ellos. +- Disk — Block device mounted to the filesystem. +- Default disk — Disk that stores the path specified in the [camino](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) configuración del servidor. +- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). +- Storage policy — Set of volumes and the rules for moving data between them. -Los nombres dados a las entidades descritas se pueden encontrar en las tablas del sistema, [sistema.almacenamiento\_policies](../system_tables.md#system_tables-storage_policies) y [sistema.Discoteca](../system_tables.md#system_tables-disks). Para aplicar una de las directivas de almacenamiento configuradas para una tabla, `storage_policy` establecimiento de `MergeTree`-mesas de la familia del motor. +Los nombres dados a las entidades descritas se pueden encontrar en las tablas del sistema, [sistema.almacenamiento\_policies](../../../operations/system_tables.md#system_tables-storage_policies) y [sistema.disco](../../../operations/system_tables.md#system_tables-disks). Para aplicar una de las directivas de almacenamiento configuradas para una tabla, `storage_policy` establecimiento de `MergeTree`-motor de la familia de las tablas. ### Configuración {#table_engine-mergetree-multiple-volumes-configure} -Los discos, los volúmenes y las políticas de almacenamiento deben declararse `` etiqueta ya sea en el archivo principal `config.xml` o en un archivo distinto en el `config.d` Directorio. +Discos, volúmenes y políticas de almacenamiento deben ser declaradas dentro de la `` etiqueta ya sea en el archivo principal `config.xml` o en un archivo distinto en el `config.d` directorio. -Estructura de configuración: +Configuración de la estructura: ``` xml @@ -526,9 +527,9 @@ Estructura de configuración: Tags: -- `` — Nombre del disco. Los nombres deben ser diferentes para todos los discos. -- `path` — ruta bajo la cual un servidor almacenará datos (`data` y `shadow` carpetas), debe terminarse con ‘/’. -- `keep_free_space_bytes` — la cantidad de espacio libre en disco que debe reservarse. +- `` — Disk name. Names must be different for all disks. +- `path` — path under which a server will store data (`data` y `shadow` carpetas), debe terminarse con ‘/’. +- `keep_free_space_bytes` — the amount of free disk space to be reserved. El orden de la definición del disco no es importante. @@ -563,11 +564,11 @@ Marcado de configuración de directivas de almacenamiento: Tags: -- `policy_name_N` — Nombre de la póliza. Los nombres de directiva deben ser únicos. -- `volume_name_N` — Nombre del volumen. Los nombres de volumen deben ser únicos. -- `disk` — un disco dentro de un volumen. -- `max_data_part_size_bytes` — el tamaño máximo de una pieza que puede almacenarse en cualquiera de los discos del volumen. -- `move_factor` — cuando la cantidad de espacio disponible es inferior a este factor, los datos comienzan a moverse automáticamente en el siguiente volumen si los hay (por defecto, 0.1). +- `policy_name_N` — Policy name. Policy names must be unique. +- `volume_name_N` — Volume name. Volume names must be unique. +- `disk` — a disk within a volume. +- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume's disks. +- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). Cofiguration ejemplos: @@ -601,9 +602,9 @@ Cofiguration ejemplos: ``` -En un ejemplo dado, el `hdd_in_order` la política implementa el [Ronda-robin](https://en.wikipedia.org/wiki/Round-robin_scheduling) enfoque. Por lo tanto, esta política define solo un volumen (`single`), las partes de datos se almacenan en todos sus discos en orden circular. Dicha política puede ser bastante útil si hay varios discos similares montados en el sistema, pero RAID no está configurado. Tenga en cuenta que cada unidad de disco individual no es confiable y es posible que desee compensarlo con un factor de replicación de 3 o más. +En un ejemplo dado, el `hdd_in_order` implementa la política de [Ronda-robin](https://en.wikipedia.org/wiki/Round-robin_scheduling) enfoque. Por lo tanto esta política define un sólo volumen (`single`), las partes de datos se almacenan en todos sus discos en orden circular. Dicha política puede ser bastante útil si hay varios discos similares montados en el sistema, pero RAID no está configurado. Tenga en cuenta que cada unidad de disco individual no es confiable y es posible que desee compensarlo con un factor de replicación de 3 o más. -Si hay diferentes tipos de discos disponibles en el sistema, `moving_from_ssd_to_hdd` política se puede utilizar en su lugar. Volumen `hot` consta de un disco SSD (`fast_ssd`), y el tamaño máximo de una pieza que se puede almacenar en este volumen es de 1 GB. Todas las piezas con el tamaño más grande que 1GB serán almacenadas directamente en `cold` volumen, que contiene un disco duro `disk1`. +Si hay diferentes tipos de discos disponibles en el sistema, `moving_from_ssd_to_hdd` la política puede ser utilizado en su lugar. Volumen `hot` consta de un disco SSD (`fast_ssd`), y el tamaño máximo de una parte que puede ser almacenado en este volumen es de 1GB. Todas las piezas con el tamaño de más de 1GB se almacenan directamente en el `cold` volumen, que contiene un disco duro `disk1`. Además, una vez que el disco `fast_ssd` se llena en más del 80%, los datos se transferirán al `disk1` por un proceso en segundo plano. El orden de enumeración de volúmenes dentro de una directiva de almacenamiento es importante. Una vez que un volumen está sobrellenado, los datos se mueven al siguiente. El orden de la enumeración del disco también es importante porque los datos se almacenan en ellos por turnos. @@ -629,9 +630,9 @@ El `default` política de almacenamiento implica el uso de un solo volumen, que En el caso de `MergeTree` tablas, los datos están llegando al disco de diferentes maneras: - Como resultado de un inserto (`INSERT` consulta). -- Durante las fusiones de fondo y [mutación](../../query_language/alter.md#alter-mutations). +- Durante las fusiones de fondo y [mutación](../../../sql_reference/statements/alter.md#alter-mutations). - Al descargar desde otra réplica. -- Como resultado de la congelación de particiones [ALTER TABLE … CONGELAR LA PARTICIÓN](../../query_language/alter.md#alter_freeze-partition). +- Como resultado de la congelación de particiones [ALTER TABLE … FREEZE PARTITION](../../../sql_reference/statements/alter.md#alter_freeze-partition). En todos estos casos, excepto las mutaciones y la congelación de particiones, una pieza se almacena en un volumen y un disco de acuerdo con la política de almacenamiento dada: @@ -641,13 +642,13 @@ En todos estos casos, excepto las mutaciones y la congelación de particiones, u Bajo el capó, las mutaciones y la congelación de particiones hacen uso de [enlaces duros](https://en.wikipedia.org/wiki/Hard_link). Los enlaces duros entre diferentes discos no son compatibles, por lo tanto, en tales casos las partes resultantes se almacenan en los mismos discos que los iniciales. En el fondo, las partes se mueven entre volúmenes en función de la cantidad de espacio libre (`move_factor` parámetro) según el orden en que se declaran los volúmenes en el archivo de configuración. -Los datos nunca se transfieren desde el último y al primero. Uno puede usar tablas del sistema [sistema.part\_log](../system_tables.md#system_tables-part-log) (campo `type = MOVE_PART`) y [sistema.parte](../system_tables.md#system_tables-parts) (campo `path` y `disk`) para monitorear movimientos de fondo. Además, la información detallada se puede encontrar en los registros del servidor. +Los datos nunca se transfieren desde el último y al primero. Uno puede usar tablas del sistema [sistema.part\_log](../../../operations/system_tables.md#system_tables-part-log) (campo `type = MOVE_PART`) y [sistema.parte](../../../operations/system_tables.md#system_tables-parts) (campo `path` y `disk`) para monitorear el fondo se mueve. Además, la información detallada se puede encontrar en los registros del servidor. -El usuario puede forzar el movimiento de una pieza o una partición de un volumen a otro mediante la consulta [ALTER TABLE … MUEVA PARTE\|PARTICIÓN … AL VOLUMEN\|DISCO …](../../query_language/alter.md#alter_move-partition), todas las restricciones para las operaciones en segundo plano se tienen en cuenta. La consulta inicia un movimiento por sí misma y no espera a que se completen las operaciones en segundo plano. El usuario recibirá un mensaje de error si no hay suficiente espacio libre disponible o si no se cumple alguna de las condiciones requeridas. +El usuario puede forzar el movimiento de una pieza o una partición de un volumen a otro mediante la consulta [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql_reference/statements/alter.md#alter_move-partition), todas las restricciones para las operaciones en segundo plano se tienen en cuenta. La consulta inicia un movimiento por sí misma y no espera a que se completen las operaciones en segundo plano. El usuario recibirá un mensaje de error si no hay suficiente espacio libre disponible o si no se cumple alguna de las condiciones requeridas. Mover datos no interfiere con la replicación de datos. Por lo tanto, se pueden especificar diferentes directivas de almacenamiento para la misma tabla en diferentes réplicas. Después de la finalización de las fusiones y mutaciones de fondo, las partes viejas se eliminan solo después de un cierto período de tiempo (`old_parts_lifetime`). -Durante este tiempo, no se mueven a otros volúmenes o discos. Por lo tanto, hasta que las partes finalmente se eliminen, aún se tienen en cuenta para la evaluación del espacio en disco ocupado. +Durante este tiempo, no se mueven a otros volúmenes o discos. Por lo tanto, hasta que finalmente se retira, se tomará en cuenta para la evaluación de los ocupados de espacio en disco. [Artículo Original](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/es/operations/table_engines/replacingmergetree.md b/docs/es/engines/table_engines/mergetree_family/replacingmergetree.md similarity index 72% rename from docs/es/operations/table_engines/replacingmergetree.md rename to docs/es/engines/table_engines/mergetree_family/replacingmergetree.md index 149c0211662..09aa311d9c6 100644 --- a/docs/es/operations/table_engines/replacingmergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/replacingmergetree.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 33 +toc_title: ReplacingMergeTree --- -# ReplacingMergeTree {#replacingmergetree} +# Replacingmergetree {#replacingmergetree} -El motor difiere de [Método de codificación de datos:](mergetree.md#table_engines-mergetree) en que elimina las entradas duplicadas con el mismo valor de clave principal (o más exactamente, con el mismo [clave de clasificación](mergetree.md) de valor). +El motor difiere de [Método de codificación de datos:](mergetree.md#table_engines-mergetree) en que elimina las entradas duplicadas con el mismo valor de clave principal (o más exactamente, con el mismo [clave de clasificación](mergetree.md) valor). -La desduplicación de datos solo se produce durante una fusión. La fusión se produce en segundo plano en un momento desconocido, por lo que no se puede planificar para ello. Algunos de los datos pueden permanecer sin procesar. Aunque puede ejecutar una fusión no programada utilizando el `OPTIMIZE` Consulta, no cuente con su uso, porque el `OPTIMIZE` consulta leerá y escribirá una gran cantidad de datos. +La desduplicación de datos solo se produce durante una fusión. La fusión ocurre en segundo plano en un momento desconocido, por lo que no puede planificarla. Algunos de los datos pueden permanecer sin procesar. Aunque puede ejecutar una fusión no programada utilizando el `OPTIMIZE` consulta, no cuente con usarlo, porque el `OPTIMIZE` consulta leerá y escribirá una gran cantidad de datos. Así, `ReplacingMergeTree` es adecuado para borrar datos duplicados en segundo plano para ahorrar espacio, pero no garantiza la ausencia de duplicados. @@ -26,11 +29,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../query_language/create.md). +Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql_reference/statements/create.md). **ReplacingMergeTree Parámetros** -- `ver` — columna con versión. Tipo `UInt*`, `Date` o `DateTime`. Parámetro opcional. +- `ver` — column with version. Type `UInt*`, `Date` o `DateTime`. Parámetro opcional. Al fusionar, `ReplacingMergeTree` de todas las filas con la misma clave primaria deja solo una: @@ -63,4 +66,4 @@ Todos los parámetros excepto `ver` el mismo significado que en `MergeTree`. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/replacingmergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/es/operations/table_engines/replication.md b/docs/es/engines/table_engines/mergetree_family/replication.md similarity index 87% rename from docs/es/operations/table_engines/replication.md rename to docs/es/engines/table_engines/mergetree_family/replication.md index 36f0b877b81..c84014225d0 100644 --- a/docs/es/operations/table_engines/replication.md +++ b/docs/es/engines/table_engines/mergetree_family/replication.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 31 +toc_title: "Replicaci\xF3n de datos" --- # Replicación de datos {#table_engines-replication} @@ -18,7 +21,7 @@ La replicación funciona a nivel de una tabla individual, no de todo el servidor La replicación no depende de la fragmentación. Cada fragmento tiene su propia replicación independiente. -Datos comprimidos para `INSERT` y `ALTER` se replica (para obtener más información, consulte la documentación para [ALTERAR](../../query_language/alter.md#query_language_queries_alter)). +Datos comprimidos para `INSERT` y `ALTER` se replica (para obtener más información, consulte la documentación para [ALTER](../../../sql_reference/statements/alter.md#query_language_queries_alter)). `CREATE`, `DROP`, `ATTACH`, `DETACH` y `RENAME` las consultas se ejecutan en un único servidor y no se replican: @@ -28,7 +31,7 @@ Datos comprimidos para `INSERT` y `ALTER` se replica (para obtener más informac Uso de ClickHouse [Apache ZooKeeper](https://zookeeper.apache.org) para almacenar metainformación de réplicas. Utilice ZooKeeper versión 3.4.5 o posterior. -Para utilizar la replicación, establezca los parámetros [Zookeeper](../server_settings/settings.md#server-settings_zookeeper) sección de configuración del servidor. +Para utilizar la replicación, establezca los parámetros [Zookeeper](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) sección de configuración del servidor. !!! attention "Atención" No descuides la configuración de seguridad. ClickHouse soporta el `digest` [Esquema de ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) del subsistema de seguridad ZooKeeper. @@ -56,7 +59,7 @@ Puede especificar cualquier clúster ZooKeeper existente y el sistema utilizará Si ZooKeeper no está establecido en el archivo de configuración, no puede crear tablas replicadas y las tablas replicadas existentes serán de solo lectura. -ZooKeeper no se utiliza en `SELECT` consultas porque la replicación no afecta al rendimiento de `SELECT` y las consultas se ejecutan tan rápido como lo hacen para las tablas no replicadas. Al consultar tablas replicadas distribuidas, el comportamiento de ClickHouse se controla mediante la configuración [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) y [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). +ZooKeeper no se utiliza en `SELECT` consultas porque la replicación no afecta al rendimiento de `SELECT` y las consultas se ejecutan tan rápido como lo hacen para las tablas no replicadas. Al consultar tablas replicadas distribuidas, el comportamiento de ClickHouse se controla mediante la configuración [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) y [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). Para cada `INSERT` consulta, aproximadamente diez entradas se agregan a ZooKeeper a través de varias transacciones. (Para ser más precisos, esto es para cada bloque de datos insertado; una consulta INSERT contiene un bloque o un bloque por `max_insert_block_size = 1048576` filas.) Esto conduce a latencias ligeramente más largas para `INSERT` en comparación con las tablas no replicadas. Pero si sigue las recomendaciones para insertar datos en lotes de no más de uno `INSERT` por segundo, no crea ningún problema. Todo el clúster ClickHouse utilizado para coordinar un clúster ZooKeeper tiene un total de varios cientos `INSERTs` por segundo. El rendimiento en las inserciones de datos (el número de filas por segundo) es tan alto como para los datos no replicados. @@ -64,11 +67,11 @@ Para clústeres muy grandes, puede usar diferentes clústeres de ZooKeeper para La replicación es asíncrona y multi-master. `INSERT` consultas (así como `ALTER`) se puede enviar a cualquier servidor disponible. Los datos se insertan en el servidor donde se ejecuta la consulta y, a continuación, se copian a los demás servidores. Debido a que es asincrónico, los datos insertados recientemente aparecen en las otras réplicas con cierta latencia. Si parte de las réplicas no está disponible, los datos se escriben cuando estén disponibles. Si hay una réplica disponible, la latencia es la cantidad de tiempo que tarda en transferir el bloque de datos comprimidos a través de la red. -De forma predeterminada, una consulta INSERT espera la confirmación de la escritura de los datos de una sola réplica. Si los datos fue correctamente escrito a sólo una réplica y el servidor con esta réplica deja de existir, los datos almacenados se perderán. Para habilitar la confirmación de las escrituras de datos de varias réplicas, utilice `insert_quorum` opción. +De forma predeterminada, una consulta INSERT espera la confirmación de la escritura de los datos de una sola réplica. Si los datos fue correctamente escrito a sólo una réplica y el servidor con esta réplica deja de existir, los datos almacenados se perderán. Para habilitar la confirmación de las escrituras de datos de varias réplicas, utilice `insert_quorum` opcion. -Cada bloque de datos se escribe atómicamente. La consulta INSERT se divide en bloques hasta `max_insert_block_size = 1048576` películas. En otras palabras, si el `INSERT` consulta tiene menos de 1048576 filas, se hace atómicamente. +Cada bloque de datos se escribe atómicamente. La consulta INSERT se divide en bloques hasta `max_insert_block_size = 1048576` filas. En otras palabras, si el `INSERT` consulta tiene menos de 1048576 filas, se hace atómicamente. -Los bloques de datos se deduplican. Para varias escrituras del mismo bloque de datos (bloques de datos del mismo tamaño que contienen las mismas filas en el mismo orden), el bloque solo se escribe una vez. La razón de esto es en caso de errores de red cuando la aplicación cliente no sabe si los datos se escribieron en la base de datos, por lo que `INSERT` consulta simplemente se puede repetir. No importa a qué réplica se enviaron los INSERT con datos idénticos. `INSERTs` son idempotentes. Los parámetros de desduplicación son controlados por [merge\_tree](../server_settings/settings.md#server_settings-merge_tree) configuración del servidor. +Los bloques de datos se deduplican. Para varias escrituras del mismo bloque de datos (bloques de datos del mismo tamaño que contienen las mismas filas en el mismo orden), el bloque solo se escribe una vez. La razón de esto es en caso de fallas de red cuando la aplicación cliente no sabe si los datos se escribieron en la base de datos, por lo que `INSERT` consulta simplemente se puede repetir. No importa a qué réplica se enviaron los INSERT con datos idénticos. `INSERTs` son idempotentes. Los parámetros de desduplicación son controlados por [merge\_tree](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) configuración del servidor. Durante la replicación, sólo los datos de origen que se van a insertar se transfieren a través de la red. La transformación de datos adicional (fusión) se coordina y se realiza en todas las réplicas de la misma manera. Esto minimiza el uso de la red, lo que significa que la replicación funciona bien cuando las réplicas residen en centros de datos diferentes. (Tenga en cuenta que la duplicación de datos en diferentes centros de datos es el objetivo principal de la replicación.) @@ -82,8 +85,8 @@ El `Replicated` prefijo se agrega al nombre del motor de tabla. Por ejemplo:`Rep **Replicated\*MergeTree parámetros** -- `zoo_path` — El camino a la mesa en ZooKeeper. -- `replica_name` — El nombre de la réplica en ZooKeeper. +- `zoo_path` — The path to the table in ZooKeeper. +- `replica_name` — The replica name in ZooKeeper. Ejemplo: @@ -132,7 +135,7 @@ En este caso, la ruta consta de las siguientes partes: `{layer}-{shard}` es el identificador de fragmento. En este ejemplo consta de dos partes, ya que el Yandex.Metrica clúster utiliza sharding de dos niveles. Para la mayoría de las tareas, puede dejar solo la sustitución {shard}, que se expandirá al identificador de fragmento. `table_name` es el nombre del nodo de la tabla en ZooKeeper. Es una buena idea hacerlo igual que el nombre de la tabla. Se define explícitamente, porque a diferencia del nombre de la tabla, no cambia después de una consulta RENAME. -*SUGERENCIA*: podría agregar un nombre de base de datos delante de `table_name` También. Nivel de Cifrado WEP `db_name.table_name` +*HINT*: podría agregar un nombre de base de datos delante de `table_name` También. Nivel de Cifrado WEP `db_name.table_name` El nombre de réplica identifica diferentes réplicas de la misma tabla. Puede usar el nombre del servidor para esto, como en el ejemplo. El nombre solo tiene que ser único dentro de cada fragmento. @@ -144,7 +147,7 @@ Ejecute el `CREATE TABLE` consulta en cada réplica. Esta consulta crea una nuev Si agrega una nueva réplica después de que la tabla ya contenga algunos datos en otras réplicas, los datos se copiarán de las otras réplicas a la nueva después de ejecutar la consulta. En otras palabras, la nueva réplica se sincroniza con las demás. -Para eliminar una réplica, ejecute `DROP TABLE`. Sin embargo, solo se elimina una réplica, la que reside en el servidor donde se ejecuta la consulta. +Para eliminar una réplica, ejecute `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. ## Recuperación después de fallos {#recovery-after-failures} @@ -162,7 +165,7 @@ Cuando el servidor se inicia (o establece una nueva sesión con ZooKeeper), solo Si el conjunto local de datos difiere demasiado del esperado, se activa un mecanismo de seguridad. El servidor ingresa esto en el registro y se niega a iniciarse. La razón de esto es que este caso puede indicar un error de configuración, como si una réplica en un fragmento se configurara accidentalmente como una réplica en un fragmento diferente. Sin embargo, los umbrales para este mecanismo se establecen bastante bajos, y esta situación puede ocurrir durante la recuperación de falla normal. En este caso, los datos se restauran semiautomáticamente, mediante “pushing a button”. -Para iniciar la recuperación, cree el nodo `/path_to_table/replica_name/flags/force_restore_data` es ZooKeeper con cualquier contenido, o ejecute el comando para restaurar todas las tablas replicadas: +Para iniciar la recuperación, cree el nodo `/path_to_table/replica_name/flags/force_restore_data` en ZooKeeper con cualquier contenido, o ejecute el comando para restaurar todas las tablas replicadas: ``` bash sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data @@ -185,7 +188,7 @@ Una opción de recuperación alternativa es eliminar información sobre la répl No hay restricción en el ancho de banda de la red durante la recuperación. Tenga esto en cuenta si está restaurando muchas réplicas a la vez. -## La conversión de MergeTree a ReplicatedMergeTree {#converting-from-mergetree-to-replicatedmergetree} +## La Conversión De Mergetree A Replicatedmergetree {#converting-from-mergetree-to-replicatedmergetree} Usamos el término `MergeTree` para referirse a todos los motores de mesa en el `MergeTree family`, lo mismo que para `ReplicatedMergeTree`. @@ -197,7 +200,7 @@ Cambie el nombre de la tabla MergeTree existente y, a continuación, cree un `Re Mueva los datos de la tabla antigua a la `detached` subdirectorio dentro del directorio con los nuevos datos de la tabla (`/var/lib/clickhouse/data/db_name/table_name/`). Luego ejecuta `ALTER TABLE ATTACH PARTITION` en una de las réplicas para agregar estas partes de datos al conjunto de trabajo. -## La conversión de ReplicatedMergeTree a MergeTree {#converting-from-replicatedmergetree-to-mergetree} +## La Conversión De Replicatedmergetree A Mergetree {#converting-from-replicatedmergetree-to-mergetree} Cree una tabla MergeTree con un nombre diferente. Mueva todos los datos del directorio con el `ReplicatedMergeTree` datos de la tabla al directorio de datos de la nueva tabla. A continuación, elimine el `ReplicatedMergeTree` y reinicie el servidor. @@ -208,8 +211,8 @@ Si desea deshacerse de un `ReplicatedMergeTree` sin iniciar el servidor: Después de esto, puede iniciar el servidor, crear un `MergeTree` tabla, mueva los datos a su directorio y, a continuación, reinicie el servidor. -## Recuperación cuando los metadatos del clúster ZooKeeper se pierden o se dañan {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} +## Recuperación cuando los metadatos en el clúster de Zookeeper se pierden o se dañan {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} Si los datos de ZooKeeper se perdieron o se dañaron, puede guardar los datos moviéndolos a una tabla no duplicada como se describió anteriormente. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/replication/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/es/operations/table_engines/summingmergetree.md b/docs/es/engines/table_engines/mergetree_family/summingmergetree.md similarity index 78% rename from docs/es/operations/table_engines/summingmergetree.md rename to docs/es/engines/table_engines/mergetree_family/summingmergetree.md index dae6f366217..1e3241938f3 100644 --- a/docs/es/operations/table_engines/summingmergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/summingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 34 +toc_title: SummingMergeTree --- -# SummingMergeTree {#summingmergetree} +# Summingmergetree {#summingmergetree} El motor hereda de [Método de codificación de datos:](mergetree.md#table_engines-mergetree). La diferencia es que al fusionar partes de datos para `SummingMergeTree` ClickHouse reemplaza todas las filas con la misma clave primaria (o más exactamente, con la misma [clave de clasificación](mergetree.md)) con una fila que contiene valores resumidos para las columnas con el tipo de datos numérico. Si la clave de ordenación está compuesta de manera que un solo valor de clave corresponde a un gran número de filas, esto reduce significativamente el volumen de almacenamiento y acelera la selección de datos. @@ -23,7 +26,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../query_language/create.md). +Para obtener una descripción de los parámetros de solicitud, consulte [descripción de la solicitud](../../../sql_reference/statements/create.md). **Parámetros de SummingMergeTree** @@ -54,7 +57,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Todos los parámetros excepto `columns` el mismo significado que en `MergeTree`. -- `columns` — tupla con nombres de valores de columnas de los cuales se resumirán. Parámetro opcional. Para una descripción, vea el texto anterior. +- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. @@ -72,7 +75,7 @@ ENGINE = SummingMergeTree() ORDER BY key ``` -Inserte datos: +Insertar datos: ``` sql INSERT INTO summtt Values(1,1),(1,2),(2,1) @@ -95,7 +98,7 @@ SELECT key, sum(value) FROM summtt GROUP BY key Cuando los datos se insertan en una tabla, se guardan tal cual. Clickhouse fusiona las partes insertadas de datos periódicamente y esto es cuando las filas con la misma clave principal se suman y se reemplazan con una para cada parte resultante de los datos. -ClickHouse puede fusionar las partes de datos para que las diferentes partes resultantes del cat de datos consistan en filas con la misma clave principal, es decir, la suma estará incompleta. Pues (`SELECT`) una función agregada [resumir()](../../query_language/agg_functions/reference.md#agg_function-sum) y `GROUP BY` cláusula se debe utilizar en una consulta como se describe en el ejemplo anterior. +ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) una función agregada [resumir()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) y `GROUP BY` cláusula se debe utilizar en una consulta como se describe en el ejemplo anterior. ### Reglas comunes para la suma {#common-rules-for-summation} @@ -107,9 +110,9 @@ Si la columna no está en la clave principal y no se resume, se selecciona un va Los valores no se resumen para las columnas de la clave principal. -### La suma en las columnas AggregateFunction {#the-summation-in-the-aggregatefunction-columns} +### La suma en las columnas de función agregada {#the-summation-in-the-aggregatefunction-columns} -Para columnas de [Tipo AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) ClickHouse se comporta como [AgregaciónMergeTree](aggregatingmergetree.md) agregación del motor según la función. +Para columnas de [Tipo AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) ClickHouse se comporta como [AgregaciónMergeTree](aggregatingmergetree.md) agregación del motor según la función. ### Estructuras anidadas {#nested-structures} @@ -117,7 +120,7 @@ La tabla puede tener estructuras de datos anidadas que se procesan de una manera Si el nombre de una tabla anidada termina con `Map` y contiene al menos dos columnas que cumplen los siguientes criterios: -- la primera columna es numérica `(*Int*, Date, DateTime)` O una cadena `(String, FixedString)`, vamos a llamarlo `key`, +- la primera columna es numérica `(*Int*, Date, DateTime)` o una cadena `(String, FixedString)`, vamos a llamarlo `key`, - las otras columnas son aritméticas `(*Int*, Float32/64)`, vamos a llamarlo `(values...)`, entonces esta tabla anidada se interpreta como una asignación de `key => (values...)`, y al fusionar sus filas, los elementos de dos conjuntos de datos se fusionan por `key` con una suma de los correspondientes `(values...)`. @@ -131,8 +134,8 @@ Ejemplos: [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] ``` -Al solicitar datos, utilice el [SumMap (clave, valor)](../../query_language/agg_functions/reference.md) función para la agregación de `Map`. +Al solicitar datos, utilice el [sumMap(clave, valor)](../../../sql_reference/aggregate_functions/reference.md) función para la agregación de `Map`. Para la estructura de datos anidados, no necesita especificar sus columnas en la tupla de columnas para la suma. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/summingmergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/es/operations/table_engines/versionedcollapsingmergetree.md b/docs/es/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md similarity index 85% rename from docs/es/operations/table_engines/versionedcollapsingmergetree.md rename to docs/es/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md index c12c27cef88..e21ccc842f3 100644 --- a/docs/es/operations/table_engines/versionedcollapsingmergetree.md +++ b/docs/es/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: VersionedCollapsingMergeTree --- -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} +# Versionedcollapsingmergetree {#versionedcollapsingmergetree} Este motor: @@ -28,7 +31,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../query_language/create.md). +Para obtener una descripción de los parámetros de consulta, consulte [descripción de la consulta](../../../sql_reference/statements/create.md). **Parámetros del motor** @@ -36,11 +39,11 @@ Para obtener una descripción de los parámetros de consulta, consulte [descripc VersionedCollapsingMergeTree(sign, version) ``` -- `sign` — Nombre de la columna con el tipo de fila: `1` es una “state” fila, `-1` es una “cancel” Fila. +- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. El tipo de datos de columna debe ser `Int8`. -- `version` — Nombre de la columna con la versión del estado del objeto. +- `version` — Name of the column with the version of the object state. El tipo de datos de columna debe ser `UInt*`. @@ -66,11 +69,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Todos los parámetros excepto `sign` y `version` el mismo significado que en `MergeTree`. -- `sign` — Nombre de la columna con el tipo de fila: `1` es una “state” fila, `-1` es una “cancel” Fila. +- `sign` — Name of the column with the type of row: `1` es una “state” fila, `-1` es una “cancel” fila. - Tipo de datos de columna — `Int8`. + Column Data Type — `Int8`. -- `version` — Nombre de la columna con la versión del estado del objeto. +- `version` — Name of the column with the version of the object state. El tipo de datos de columna debe ser `UInt*`. @@ -82,7 +85,7 @@ Todos los parámetros excepto `sign` y `version` el mismo significado que en `Me Considere una situación en la que necesite guardar datos que cambien continuamente para algún objeto. Es razonable tener una fila para un objeto y actualizar la fila siempre que haya cambios. Sin embargo, la operación de actualización es costosa y lenta para un DBMS porque requiere volver a escribir los datos en el almacenamiento. La actualización no es aceptable si necesita escribir datos rápidamente, pero puede escribir los cambios en un objeto secuencialmente de la siguiente manera. -Descripción `Sign` columna al escribir la fila. Si `Sign = 1` significa que la fila es un estado de un objeto (llamémoslo el “state” fila). Si `Sign = -1` indica la cancelación del estado de un objeto con los mismos atributos (llamémoslo el “cancel” fila). También use el `Version` columna, que debe identificar cada estado de un objeto con un número separado. +Utilice el `Sign` columna al escribir la fila. Si `Sign = 1` significa que la fila es un estado de un objeto (llamémoslo el “state” fila). Si `Sign = -1` indica la cancelación del estado de un objeto con los mismos atributos (llamémoslo el “cancel” fila). También use el `Version` columna, que debe identificar cada estado de un objeto con un número separado. Por ejemplo, queremos calcular cuántas páginas visitaron los usuarios en algún sitio y cuánto tiempo estuvieron allí. En algún momento escribimos la siguiente fila con el estado de la actividad del usuario: @@ -120,7 +123,7 @@ Para averiguar por qué necesitamos dos filas para cada cambio, vea [Algoritmo]( **Notas sobre el uso** -1. El programa que escribe los datos debe recordar el estado de un objeto para cancelarlo. El “cancel” Cadena debe ser una copia de la “state” con lo opuesto `Sign`. Esto aumenta el tamaño inicial de almacenamiento, pero permite escribir los datos rápidamente. +1. El programa que escribe los datos debe recordar el estado de un objeto para cancelarlo. El “cancel” cadena debe ser una copia de la “state” con lo opuesto `Sign`. Esto aumenta el tamaño inicial de almacenamiento, pero permite escribir los datos rápidamente. 2. Las matrices de largo crecimiento en columnas reducen la eficiencia del motor debido a la carga para escribir. Cuanto más sencillos sean los datos, mejor será la eficiencia. 3. `SELECT` Los resultados dependen en gran medida de la coherencia del historial de cambios de objetos. Sea preciso al preparar los datos para insertarlos. Puede obtener resultados impredecibles con datos incoherentes, como valores negativos para métricas no negativas, como la profundidad de la sesión. @@ -132,9 +135,9 @@ Cuando ClickHouse inserta datos, ordena filas por la clave principal. Si el `Ver ## Selección de datos {#selecting-data} -ClickHouse no garantiza que todas las filas con la misma clave principal estarán en la misma parte de datos resultante o incluso en el mismo servidor físico. Esto es cierto tanto para escribir los datos como para la posterior fusión de las partes de datos. Además, ClickHouse procesa `SELECT` consultas con múltiples subprocesos, y no puede predecir el orden de las filas en el resultado. Esto significa que la agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de un `VersionedCollapsingMergeTree` tabla. +ClickHouse no garantiza que todas las filas con la misma clave principal estén en la misma parte de datos resultante o incluso en el mismo servidor físico. Esto es cierto tanto para escribir los datos como para la posterior fusión de las partes de datos. Además, ClickHouse procesa `SELECT` consultas con múltiples subprocesos, y no puede predecir el orden de las filas en el resultado. Esto significa que la agregación es necesaria si hay una necesidad de obtener completamente “collapsed” datos de un `VersionedCollapsingMergeTree` tabla. -Para finalizar el colapso, escriba una consulta con un `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` es lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` es lugar de `sum(x)` y agregar `HAVING sum(Sign) > 0`. +Para finalizar el colapso, escriba una consulta con un `GROUP BY` cláusula y funciones agregadas que representan el signo. Por ejemplo, para calcular la cantidad, use `sum(Sign)` en lugar de `count()`. Para calcular la suma de algo, use `sum(Sign * x)` en lugar de `sum(x)` y agregar `HAVING sum(Sign) > 0`. Los agregados `count`, `sum` y `avg` se puede calcular de esta manera. El agregado `uniq` se puede calcular si un objeto tiene al menos un estado no colapsado. Los agregados `min` y `max` no se puede calcular porque `VersionedCollapsingMergeTree` no guarda el historial de valores de estados colapsados. @@ -195,7 +198,7 @@ SELECT * FROM UAct └─────────────────────┴───────────┴──────────┴──────┴─────────┘ ``` -¿Qué vemos aquí y dónde están las partes colapsadas? +¿qué vemos aquí y dónde están las partes colapsadas? Creamos dos partes de datos usando dos `INSERT` consulta. El `SELECT` la consulta se realizó en dos subprocesos, y el resultado es un orden aleatorio de filas. No se produjo el colapso porque las partes de datos aún no se han fusionado. ClickHouse fusiona partes de datos en un punto desconocido en el tiempo que no podemos predecir. @@ -232,4 +235,4 @@ SELECT * FROM UAct FINAL Esta es una forma muy ineficiente de seleccionar datos. No lo use para mesas grandes. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/versionedcollapsingmergetree/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/es/operations/table_engines/buffer.md b/docs/es/engines/table_engines/special/buffer.md similarity index 74% rename from docs/es/operations/table_engines/buffer.md rename to docs/es/engines/table_engines/special/buffer.md index f05101a485e..0a869d556b8 100644 --- a/docs/es/operations/table_engines/buffer.md +++ b/docs/es/engines/table_engines/special/buffer.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: "B\xFAfer" --- # Búfer {#buffer} @@ -12,16 +15,16 @@ Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_ Parámetros del motor: -- `database` – Nombre de la base de datos. En lugar del nombre de la base de datos, puede usar una expresión constante que devuelva una cadena. -- `table` – Tabla para eliminar los datos. -- `num_layers` – Capa de paralelismo. Físicamente, la tabla se representará como `num_layers` de búferes independientes. Valor recomendado: 16. -- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, y `max_bytes` – Condiciones para el lavado de datos del búfer. +- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. +- `table` – Table to flush data to. +- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` de búferes independientes. Valor recomendado: 16. +- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, y `max_bytes` – Conditions for flushing data from the buffer. Los datos se vacían del búfer y se escriben en la tabla de destino si `min*` condiciones o al menos una `max*` condición se cumplen. -- `min_time`, `max_time` – Condición para el tiempo en segundos desde el momento de la primera escritura en el búfer. -- `min_rows`, `max_rows` – Condición para el número de filas en el búfer. -- `min_bytes`, `max_bytes` – Condición para el número de bytes en el búfer. +- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. Durante la operación de escritura, los datos se insertan en un `num_layers` número de búferes aleatorios. O bien, si la parte de datos para insertar es lo suficientemente grande (mayor que `max_rows` o `max_bytes`), se escribe directamente en la tabla de destino, omitiendo el búfer. @@ -42,7 +45,7 @@ Puede establecer cadenas vacías entre comillas simples para la base de datos y Al leer desde una tabla de búfer, los datos se procesan tanto desde el búfer como desde la tabla de destino (si hay uno). Tenga en cuenta que las tablas Buffer no admiten un índice. En otras palabras, los datos del búfer se analizan por completo, lo que puede ser lento para los búferes grandes. (Para los datos de una tabla subordinada, se utilizará el índice que admite.) -Si el conjunto de columnas de la tabla Buffer no coinciden con el conjunto de columnas de una tabla subordinada, se inserta un subconjunto de columnas que existen en ambas tablas. +Si el conjunto de columnas de la tabla Buffer no coincide con el conjunto de columnas de una tabla subordinada, se inserta un subconjunto de columnas que existen en ambas tablas. Si los tipos no coinciden con una de las columnas de la tabla Búfer y una tabla subordinada, se escribe un mensaje de error en el registro del servidor y se borra el búfer. Lo mismo sucede si la tabla subordinada no existe cuando se vacía el búfer. @@ -61,8 +64,8 @@ Si se replica la tabla de destino, se pierden algunas características esperadas Debido a estas desventajas, solo podemos recomendar el uso de una tabla Buffer en casos raros. -Una tabla de búfer se utiliza cuando se reciben demasiados INSERT de un gran número de servidores durante una unidad de tiempo y los datos no se pueden almacenar en búfer antes de la inserción, lo que significa que los INSERT no pueden ejecutarse lo suficientemente rápido. +Una tabla de búfer se usa cuando se reciben demasiados INSERT de un gran número de servidores durante una unidad de tiempo y los datos no se pueden almacenar en búfer antes de la inserción, lo que significa que los INSERT no pueden ejecutarse lo suficientemente rápido. -Tenga en cuenta que no tiene sentido insertar datos una fila a la vez, incluso para tablas de búfer. Esto solo producirá una velocidad de unos pocos miles de filas por segundo, mientras que la inserción de bloques de datos más grandes puede producir más de un millón de filas por segundo (consulte la sección “Performance”). +Tenga en cuenta que no tiene sentido insertar datos una fila a la vez, incluso para las tablas de búfer. Esto solo producirá una velocidad de unos pocos miles de filas por segundo, mientras que la inserción de bloques de datos más grandes puede producir más de un millón de filas por segundo (consulte la sección “Performance”). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/buffer/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/es/operations/table_engines/dictionary.md b/docs/es/engines/table_engines/special/dictionary.md similarity index 83% rename from docs/es/operations/table_engines/dictionary.md rename to docs/es/engines/table_engines/special/dictionary.md index d9c76229517..13e091be699 100644 --- a/docs/es/operations/table_engines/dictionary.md +++ b/docs/es/engines/table_engines/special/dictionary.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 35 +toc_title: Diccionario --- # Diccionario {#dictionary} -El `Dictionary` el motor muestra el [Diccionario](../../query_language/dicts/external_dicts.md) datos como una tabla ClickHouse. +El `Dictionary` el motor muestra el [diccionario](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) datos como una tabla ClickHouse. Como ejemplo, considere un diccionario de `products` con la siguiente configuración: @@ -61,7 +64,7 @@ WHERE name = 'products' └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ ``` -Puede usar el [dictGet\*](../../query_language/functions/ext_dict_functions.md#ext_dict_functions) función para obtener los datos del diccionario en este formato. +Puede usar el [dictGet\*](../../../sql_reference/functions/ext_dict_functions.md#ext_dict_functions) función para obtener los datos del diccionario en este formato. Esta vista no es útil cuando necesita obtener datos sin procesar o cuando `JOIN` operación. Para estos casos, puede usar el `Dictionary` motor, que muestra los datos del diccionario en una tabla. @@ -91,4 +94,4 @@ select * from products limit 1; └───────────────┴─────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/dictionary/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/es/operations/table_engines/distributed.md b/docs/es/engines/table_engines/special/distributed.md similarity index 65% rename from docs/es/operations/table_engines/distributed.md rename to docs/es/engines/table_engines/special/distributed.md index e47ceee011f..ae3ee5991d8 100644 --- a/docs/es/operations/table_engines/distributed.md +++ b/docs/es/engines/table_engines/special/distributed.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 33 +toc_title: Distribuido --- # Distribuido {#distributed} @@ -22,7 +25,7 @@ El motor distribuido acepta parámetros: Ver también: - `insert_distributed_sync` configuración - - [Método de codificación de datos:](mergetree.md#table_engine-mergetree-multiple-volumes) para los ejemplos + - [Método de codificación de datos:](../mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) para los ejemplos Ejemplo: @@ -36,7 +39,7 @@ Por ejemplo, para una consulta con GROUP BY, los datos se agregarán en servidor En lugar del nombre de la base de datos, puede usar una expresión constante que devuelva una cadena. Por ejemplo: currentDatabase(). -logs: el nombre del clúster en el archivo de configuración del servidor. +logs – The cluster name in the server's config file. Los clústeres se establecen así: @@ -80,15 +83,15 @@ Las réplicas están duplicando servidores (para leer todos los datos, puede acc Los nombres de clúster no deben contener puntos. -Los parámetros `host`, `port`, y opcionalmente `user`, `password`, `secure`, `compression` Se especifican para cada servidor: -- `host` – La dirección del servidor remoto. Puede utilizar el dominio o la dirección IPv4 o IPv6. Si especifica el dominio, el servidor realiza una solicitud DNS cuando se inicia y el resultado se almacena mientras el servidor se esté ejecutando. Si la solicitud DNS falla, el servidor no se inicia. Si cambia el registro DNS, reinicie el servidor. -- `port` – El puerto TCP para la actividad de mensajería (‘tcp\_port’ en la configuración, generalmente establecido en 9000). No lo confundas con http\_port. -- `user` – Nombre del usuario para conectarse a un servidor remoto. Valor predeterminado: predeterminado. Este usuario debe tener acceso para conectarse al servidor especificado. El acceso se configura en los usuarios.archivo xml. Para obtener más información, consulte la sección [Derechos de acceso](../../operations/access_rights.md). -- `password` – La contraseña para conectarse a un servidor remoto (no enmascarado). Valor predeterminado: cadena vacía. -- `secure` - Use ssl para la conexión, por lo general también debe definir `port` ¿Por qué? El servidor debe escuchar en Método de codificación de datos: y tener certificados correctos. +Los parámetros `host`, `port`, y opcionalmente `user`, `password`, `secure`, `compression` se especifican para cada servidor: +- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. +- `port` – The TCP port for messenger activity (‘tcp\_port’ en la configuración, generalmente establecido en 9000). No lo confundas con http\_port. +- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Derechos de acceso](../../../operations/access_rights.md). +- `password` – The password for connecting to a remote server (not masked). Default value: empty string. +- `secure` - Use ssl para la conexión, por lo general también debe definir `port` = 9440. El servidor debe escuchar en 9440 y tener certificados correctos. - `compression` - Utilice la compresión de datos. Valor predeterminado: true. -Al especificar réplicas, se seleccionará una de las réplicas disponibles para cada uno de los fragmentos al leer. Puede configurar el algoritmo para el equilibrio de carga (la preferencia para qué réplica acceder) [load\_balancing](../settings/settings.md#settings-load_balancing) configuración. +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load\_balancing](../../../operations/settings/settings.md#settings-load_balancing) configuración. Si no se establece la conexión con el servidor, habrá un intento de conectarse con un breve tiempo de espera. Si la conexión falla, se seleccionará la siguiente réplica, y así sucesivamente para todas las réplicas. Si el intento de conexión falló para todas las réplicas, el intento se repetirá de la misma manera, varias veces. Esto funciona a favor de la resiliencia, pero no proporciona una tolerancia completa a errores: un servidor remoto podría aceptar la conexión, pero podría no funcionar o funcionar mal. @@ -100,9 +103,7 @@ Para ver los clústeres, utilice el ‘system.clusters’ tabla. El motor distribuido permite trabajar con un clúster como un servidor local. Sin embargo, el clúster es inextensible: debe escribir su configuración en el archivo de configuración del servidor (mejor aún, para todos los servidores del clúster). -No hay compatibilidad con las tablas distribuidas que miran a otras tablas distribuidas (excepto en los casos en que una tabla distribuida solo tiene un fragmento). Como alternativa, haga que la tabla Distribuida mire el “final” tabla. - -El motor distribuido requiere escribir clústeres en el archivo de configuración. Los clústeres del archivo de configuración se actualizan sobre la marcha, sin reiniciar el servidor. Si necesita enviar una consulta a un conjunto desconocido de fragmentos y réplicas cada vez, no necesita crear una tabla ‘remote’ función de tabla en su lugar. Vea la sección [Funciones de tabla](../../query_language/table_functions/index.md). +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ función de tabla en su lugar. Vea la sección [Funciones de tabla](../../../sql_reference/table_functions/index.md). Hay dos métodos para escribir datos en un clúster: @@ -118,34 +119,34 @@ Si este parámetro se establece en ‘true’, la operación de escritura selecc Si se establece en ‘false’ (el valor predeterminado), los datos se escriben en todas las réplicas. En esencia, esto significa que la tabla distribuida replica los datos en sí. Esto es peor que usar tablas replicadas, porque no se verifica la consistencia de las réplicas y, con el tiempo, contendrán datos ligeramente diferentes. -Para seleccionar el fragmento al que se envía una fila de datos, se analiza la expresión de fragmentación y su resto se toma de dividirlo por el peso total de los fragmentos. La fila se envía al fragmento que corresponde al medio intervalo de los restos de ‘prev\_weight’ Naciones ‘prev\_weights + weight’, donde ‘prev\_weights’ es el peso total de los fragmentos con el número más pequeño, y ‘weight’ es el peso de este fragmento. Por ejemplo, si hay dos fragmentos, y el primero tiene un peso de 9 mientras que el segundo tiene un peso de 10, la fila se enviará al primer fragmento para los restos del rango \[0, 9), y al segundo para los restos del rango \[9, 19). +Para seleccionar el fragmento al que se envía una fila de datos, se analiza la expresión de fragmentación y su resto se toma de dividirlo por el peso total de los fragmentos. La fila se envía al fragmento que corresponde al medio intervalo de los restos de ‘prev\_weight’ a ‘prev\_weights + weight’, donde ‘prev\_weights’ es el peso total de los fragmentos con el número más pequeño, y ‘weight’ es el peso de este fragmento. Por ejemplo, si hay dos fragmentos, y el primero tiene un peso de 9 mientras que el segundo tiene un peso de 10, la fila se enviará al primer fragmento para los restos del rango \[0, 9), y al segundo para los restos del rango \[9, 19). -La expresión de fragmentación puede ser cualquier expresión de constantes y columnas de tabla que devuelva un entero. Por ejemplo, puede usar la expresión ‘rand()’ para la distribución aleatoria de datos, o ‘UserID’ para la distribución por el resto de dividir el ID del usuario (entonces los datos de un solo usuario residirán en un solo fragmento, lo que simplifica la ejecución de IN y JOIN por los usuarios). Si una de las columnas no se distribuye lo suficientemente uniformemente, puede envolverla en una función hash: intHash64(UserID) . +La expresión de fragmentación puede ser cualquier expresión de constantes y columnas de tabla que devuelva un entero. Por ejemplo, puede usar la expresión ‘rand()’ para la distribución aleatoria de datos, o ‘UserID’ para la distribución por el resto de dividir la ID del usuario (entonces los datos de un solo usuario residirán en un solo fragmento, lo que simplifica la ejecución de IN y JOIN por los usuarios). Si una de las columnas no se distribuye lo suficientemente uniformemente, puede envolverla en una función hash: intHash64(UserID) . -Un simple recordatorio de la división es una solución limitada para la fragmentación y no siempre es apropiado. Funciona para volúmenes medianos y grandes de datos (docenas de servidores), pero no para volúmenes muy grandes de datos (cientos de servidores o más). En este último caso, use el esquema de fragmentación requerido por el área asunto, en lugar de usar entradas en Tablas distribuidas. +Un simple recordatorio de la división es una solución limitada para sharding y no siempre es apropiado. Funciona para volúmenes medianos y grandes de datos (docenas de servidores), pero no para volúmenes muy grandes de datos (cientos de servidores o más). En este último caso, use el esquema de fragmentación requerido por el área asunto, en lugar de usar entradas en Tablas distribuidas. -Las consultas SELECT se envían a todos los fragmentos y funcionan independientemente de cómo se distribuyen los datos entre los fragmentos (se pueden distribuir completamente aleatoriamente). Cuando agrega un nuevo fragmento, no tiene que transferirle los datos antiguos. Puede escribir nuevos datos con un peso más pesado: los datos se distribuirán de manera ligeramente desigual, pero las consultas funcionarán correcta y eficientemente. +SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don't have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. Debería preocuparse por el esquema de fragmentación en los siguientes casos: - Se utilizan consultas que requieren unir datos (IN o JOIN) mediante una clave específica. Si esta clave fragmenta datos, puede usar IN local o JOIN en lugar de GLOBAL IN o GLOBAL JOIN, que es mucho más eficiente. - Se usa una gran cantidad de servidores (cientos o más) con una gran cantidad de consultas pequeñas (consultas de clientes individuales: sitios web, anunciantes o socios). Para que las pequeñas consultas no afecten a todo el clúster, tiene sentido ubicar datos para un solo cliente en un solo fragmento. Alternativamente, como lo hemos hecho en Yandex.Metrica, puede configurar sharding de dos niveles: divida todo el clúster en “layers”, donde una capa puede consistir en varios fragmentos. Los datos de un único cliente se encuentran en una sola capa, pero los fragmentos se pueden agregar a una capa según sea necesario y los datos se distribuyen aleatoriamente dentro de ellos. Las tablas distribuidas se crean para cada capa y se crea una única tabla distribuida compartida para consultas globales. -Los datos se escriben de forma asíncrona. Cuando se inserta en la tabla, el bloque de datos se acaba de escribir en el sistema de archivos local. Los datos se envían a los servidores remotos en segundo plano tan pronto como sea posible. El período de envío de datos está gestionado por el [Distributed\_directory\_monitor\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_sleep_time_ms) y [Distributed\_directory\_monitor\_max\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) configuración. El `Distributed` el motor envía cada archivo con datos insertados por separado, pero puede habilitar el envío por lotes de archivos [distributed\_directory\_monitor\_batch\_inserts](../settings/settings.md#distributed_directory_monitor_batch_inserts) configuración. Esta configuración mejora el rendimiento del clúster al utilizar mejor los recursos de red y servidor local. Debe comprobar si los datos se envían correctamente comprobando la lista de archivos (datos en espera de ser enviados) en el directorio de la tabla: `/var/lib/clickhouse/data/database/table/`. +Los datos se escriben de forma asíncrona. Cuando se inserta en la tabla, el bloque de datos se acaba de escribir en el sistema de archivos local. Los datos se envían a los servidores remotos en segundo plano tan pronto como sea posible. El período de envío de datos está gestionado por el [Distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) y [Distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) configuración. El `Distributed` el motor envía cada archivo con datos insertados por separado, pero puede habilitar el envío por lotes de archivos [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) configuración. Esta configuración mejora el rendimiento del clúster al utilizar mejor los recursos de red y servidor local. Debe comprobar si los datos se envían correctamente comprobando la lista de archivos (datos en espera de ser enviados) en el directorio de la tabla: `/var/lib/clickhouse/data/database/table/`. Si el servidor dejó de existir o tuvo un reinicio aproximado (por ejemplo, después de un error de dispositivo) después de un INSERT en una tabla distribuida, es posible que se pierdan los datos insertados. Si se detecta un elemento de datos dañado en el directorio de la tabla, se transfiere al ‘broken’ subdirectorio y ya no se utiliza. -Cuando la opción max\_parallel\_replicas está habilitada, el procesamiento de consultas se paralela en todas las réplicas dentro de un solo fragmento. Para obtener más información, consulte la sección [max\_parallel\_replicas](../settings/settings.md#settings-max_parallel_replicas). +Cuando la opción max\_parallel\_replicas está habilitada, el procesamiento de consultas se paralela en todas las réplicas dentro de un solo fragmento. Para obtener más información, consulte la sección [max\_parallel\_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_shard_num` — Contiene el `shard_num` (de `system.clusters`). Tipo: [UInt32](../../data_types/int_uint.md). +- `_shard_num` — Contains the `shard_num` (de `system.clusters`). Tipo: [UInt32](../../../sql_reference/data_types/int_uint.md). !!! note "Nota" - Ya [`remote`](../../query_language/table_functions/remote.md)/`cluster` funciones de tabla crean internamente instancia temporal del mismo motor distribuido, `_shard_num` está disponible allí también. + Ya [`remote`](../../../sql_reference/table_functions/remote.md)/`cluster` funciones de tabla crean internamente instancia temporal del mismo motor distribuido, `_shard_num` está disponible allí también. **Ver también** -- [Columnas virtuales](index.md#table_engines-virtual_columns) +- [Virtual columnas](index.md#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/distributed/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/es/operations/table_engines/external_data.md b/docs/es/engines/table_engines/special/external_data.md similarity index 79% rename from docs/es/operations/table_engines/external_data.md rename to docs/es/engines/table_engines/special/external_data.md index 1a88dac6729..ad15cf4e7d4 100644 --- a/docs/es/operations/table_engines/external_data.md +++ b/docs/es/engines/table_engines/special/external_data.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 34 +toc_title: Datos externos --- # Datos externos para el procesamiento de consultas {#external-data-for-query-processing} @@ -20,15 +23,15 @@ En el cliente de línea de comandos, puede especificar una sección de parámetr Puede tener varias secciones como esta, para el número de tablas que se transmiten. -**–externo** – Marca el comienzo de una cláusula. -**–file** – Ruta al archivo con el volcado de tabla, o -, que hace referencia a stdin. +**–external** – Marks the beginning of a clause. +**–file** – Path to the file with the table dump, or -, which refers to stdin. Solo se puede recuperar una sola tabla de stdin. -Los siguientes parámetros son opcionales: **–nombre**– Nombre de la tabla. Si se omite, se utiliza \_data. -**–formato** – Formato de datos en el archivo. Si se omite, se utiliza TabSeparated. +Los siguientes parámetros son opcionales: **–name**– Name of the table. If omitted, \_data is used. +**–format** – Data format in the file. If omitted, TabSeparated is used. -Se requiere uno de los siguientes parámetros:**–tipo** – Una lista de tipos de columnas separadas por comas. Por ejemplo: `UInt64,String`. Las columnas se llamarán \_1, \_2, … -**–estructura**– La estructura de la tabla en el formato`UserID UInt64`, `URL String`. Definir los nombres y tipos de columna. +Se requiere uno de los siguientes parámetros:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named \_1, \_2, … +**–structure**– The table structure in the format`UserID UInt64`, `URL String`. Define los nombres y tipos de columna. Los archivos especificados en ‘file’ se analizará mediante el formato especificado en ‘format’ utilizando los tipos de datos especificados en ‘types’ o ‘structure’. La mesa será cargado en el servidor y accesibles, como una tabla temporal con el nombre de ‘name’. @@ -62,4 +65,4 @@ $ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+coun Para el procesamiento de consultas distribuidas, las tablas temporales se envían a todos los servidores remotos. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/external_data/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/es/operations/table_engines/file.md b/docs/es/engines/table_engines/special/file.md similarity index 67% rename from docs/es/operations/table_engines/file.md rename to docs/es/engines/table_engines/special/file.md index dbe9ee7d873..460e5ae40f5 100644 --- a/docs/es/operations/table_engines/file.md +++ b/docs/es/engines/table_engines/special/file.md @@ -1,11 +1,14 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: File --- # File {#table_engines-file} El motor de tabla de archivos mantiene los datos en un archivo en uno de los [file -Formato](../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). +formato](../../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). Ejemplos de uso: @@ -13,7 +16,7 @@ Ejemplos de uso: - Convertir datos de un formato a otro. - Actualización de datos en ClickHouse mediante la edición de un archivo en un disco. -## Uso en el servidor ClickHouse {#usage-in-clickhouse-server} +## Uso en el servidor de Clickhouse {#usage-in-clickhouse-server} ``` sql File(Format) @@ -21,14 +24,14 @@ File(Format) El `Format` parámetro especifica uno de los formatos de archivo disponibles. Realizar `SELECT` consultas, el formato debe ser compatible para la entrada, y para realizar -`INSERT` consultas – para la salida. Los formatos disponibles se enumeran en el -[Formato](../../interfaces/formats.md#formats) apartado. +`INSERT` queries – for output. The available formats are listed in the +[Formato](../../../interfaces/formats.md#formats) apartado. -ClickHouse no permite especificar la ruta del sistema de archivos para`File`. Utilizará la carpeta definida por [camino](../server_settings/settings.md) configuración en la configuración del servidor. +ClickHouse no permite especificar la ruta del sistema de archivos para`File`. Utilizará la carpeta definida por [camino](../../../operations/server_configuration_parameters/settings.md) configuración en la configuración del servidor. Al crear una tabla usando `File(Format)` crea un subdirectorio vacío en esa carpeta. Cuando los datos se escriben en esa tabla, se colocan en `data.Format` en ese subdirectorio. -Puede crear manualmente esta subcarpeta y archivo en el sistema de archivos del servidor y luego [CONECTAR](../../query_language/misc.md) para mostrar información con el nombre coincidente, para que pueda consultar datos desde ese archivo. +Puede crear manualmente esta subcarpeta y archivo en el sistema de archivos del servidor y luego [ATTACH](../../../sql_reference/statements/misc.md) para mostrar información con el nombre coincidente, para que pueda consultar datos desde ese archivo. !!! warning "Advertencia" Tenga cuidado con esta funcionalidad, ya que ClickHouse no realiza un seguimiento de los cambios externos en dichos archivos. El resultado de las escrituras simultáneas a través de ClickHouse y fuera de ClickHouse no está definido. @@ -64,9 +67,9 @@ SELECT * FROM file_engine_table └──────┴───────┘ ``` -## Uso es Clickhouse-local {#usage-in-clickhouse-local} +## Uso en Clickhouse-local {#usage-in-clickhouse-local} -En [Sistema abierto.](../utils/clickhouse-local.md) El motor de archivos acepta la ruta del archivo además de `Format`. Los flujos de entrada / salida predeterminados se pueden especificar utilizando nombres numéricos o legibles por humanos como `0` o `stdin`, `1` o `stdout`. +En [Sistema abierto.](../../../operations/utilities/clickhouse-local.md) El motor de archivos acepta la ruta del archivo además de `Format`. Los flujos de entrada / salida predeterminados se pueden especificar utilizando nombres numéricos o legibles por humanos como `0` o `stdin`, `1` o `stdout`. **Ejemplo:** ``` bash @@ -84,4 +87,4 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64 - Indice - Replicación -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/file/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/es/operations/table_engines/generate.md b/docs/es/engines/table_engines/special/generate.md similarity index 73% rename from docs/es/operations/table_engines/generate.md rename to docs/es/engines/table_engines/special/generate.md index 11993bcc353..662eada5f86 100644 --- a/docs/es/operations/table_engines/generate.md +++ b/docs/es/engines/table_engines/special/generate.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 46 +toc_title: GenerateRandom --- -# GenerateRandom {#table_engines-generate} +# Generaterandom {#table_engines-generate} El motor de tabla GenerateRandom produce datos aleatorios para el esquema de tabla determinado. @@ -11,7 +14,7 @@ Ejemplos de uso: - Se usa en la prueba para poblar una tabla grande reproducible. - Generar entrada aleatoria para pruebas de fuzzing. -## Uso en el servidor ClickHouse {#usage-in-clickhouse-server} +## Uso en el servidor de Clickhouse {#usage-in-clickhouse-server} ``` sql ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) @@ -22,7 +25,7 @@ columnas y cadenas de matriz correspondientemente en los datos generados. Generar motor de tabla sólo admite `SELECT` consulta. -Es compatible con todos [Tipos de datos](../../data_types/index.md) que se pueden almacenar en una tabla excepto `LowCardinality` y `AggregateFunction`. +Es compatible con todos [Tipos de datos](../../../sql_reference/data_types/index.md) que se pueden almacenar en una tabla excepto `LowCardinality` y `AggregateFunction`. **Ejemplo:** @@ -55,4 +58,4 @@ SELECT * FROM generate_engine_table LIMIT 3 - Indice - Replicación -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/generate/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/es/engines/table_engines/special/index.md b/docs/es/engines/table_engines/special/index.md new file mode 100644 index 00000000000..9770e3fb6c5 --- /dev/null +++ b/docs/es/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Special +toc_priority: 31 +--- + + diff --git a/docs/es/operations/table_engines/join.md b/docs/es/engines/table_engines/special/join.md similarity index 56% rename from docs/es/operations/table_engines/join.md rename to docs/es/engines/table_engines/special/join.md index 65ec4b950a6..031305fc8ef 100644 --- a/docs/es/operations/table_engines/join.md +++ b/docs/es/engines/table_engines/special/join.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 40 +toc_title: Unir --- # Unir {#join} -Estructura de datos preparada para usar en [UNIR](../../query_language/select.md#select-join) operación. +Estructura de datos preparada para usar en [JOIN](../../../sql_reference/statements/select.md#select-join) operación. ## Creación de una tabla {#creating-a-table} @@ -16,15 +19,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) ``` -Vea la descripción detallada del [CREAR TABLA](../../query_language/create.md#create-table-query) consulta. +Vea la descripción detallada del [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) consulta. **Parámetros del motor** -- `join_strictness` – [ÚNETE a la rigurosidad](../../query_language/select.md#select-join-strictness). -- `join_type` – [Tipo de unión](../../query_language/select.md#select-join-types). -- `k1[, k2, ...]` – Columnas clave de la `USING` cláusula que el `JOIN` operación se hace con. +- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql_reference/statements/select.md#select-join-strictness). +- `join_type` – [Tipo de unión](../../../sql_reference/statements/select.md#select-join-types). +- `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con. -Entrar `join_strictness` y `join_type` parámetros sin comillas, por ejemplo, `Join(ANY, LEFT, col1)`. Deben coincidir con el `JOIN` operación para la que se utilizará la tabla. Si los parámetros no coinciden, ClickHouse no produce una excepción y puede devolver datos incorrectos. +Entrar `join_strictness` y `join_type` parámetros sin comillas, por ejemplo, `Join(ANY, LEFT, col1)`. Deben coincidir con el `JOIN` operación para la que se utilizará la tabla. Si los parámetros no coinciden, ClickHouse no lanza una excepción y puede devolver datos incorrectos. ## Uso de la tabla {#table-usage} @@ -78,26 +81,26 @@ SELECT joinGet('id_val_join', 'val', toUInt32(1)) ### Selección e inserción de datos {#selecting-and-inserting-data} -Usted puede utilizar `INSERT` Consultas para agregar datos al `Join`-mesas de motor. Si la tabla se creó con el `ANY` estricta, se ignoran los datos de las claves duplicadas. Con el `ALL` estricta, se agregan todas las filas. +Usted puede utilizar `INSERT` consultas para agregar datos al `Join`-mesas de motor. Si la tabla se creó con el `ANY` estricta, se ignoran los datos de las claves duplicadas. Con el `ALL` estricta, se agregan todas las filas. No se puede realizar una `SELECT` consulta directamente desde la tabla. En su lugar, use uno de los siguientes métodos: - Coloque la mesa hacia el lado derecho en un `JOIN` clausula. -- Llame al [joinGet](../../query_language/functions/other_functions.md#joinget) función, que le permite extraer datos de la tabla de la misma manera que de un diccionario. +- Llame al [joinGet](../../../sql_reference/functions/other_functions.md#joinget) función, que le permite extraer datos de la tabla de la misma manera que de un diccionario. ### Limitaciones y ajustes {#join-limitations-and-settings} Al crear una tabla, se aplican los siguientes valores: -- [Sistema abierto.](../settings/settings.md#join_use_nulls) -- [Método de codificación de datos:](../settings/query_complexity.md#settings-max_rows_in_join) -- [Método de codificación de datos:](../settings/query_complexity.md#settings-max_bytes_in_join) -- [join\_overflow\_mode](../settings/query_complexity.md#settings-join_overflow_mode) -- [join\_any\_take\_last\_row](../settings/settings.md#settings-join_any_take_last_row) +- [Sistema abierto.](../../../operations/settings/settings.md#join_use_nulls) +- [Método de codificación de datos:](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [Método de codificación de datos:](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [join\_overflow\_mode](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) -El `Join`-las tablas del motor no se pueden utilizar adentro `GLOBAL JOIN` operación. +El `Join`-las tablas del motor no se pueden usar en `GLOBAL JOIN` operación. -El `Join`-motor permite el uso [Sistema abierto.](../settings/settings.md#join_use_nulls) ajuste en el `CREATE TABLE` instrucción. Y [SELECCIONAR](../../query_language/select.md) Consulta permite el uso `join_use_nulls` demasiado. Si tienes diferentes `join_use_nulls` configuración, puede obtener un error al unirse a la tabla. Depende del tipo de JOIN. Cuando se utiliza [joinGet](../../query_language/functions/other_functions.md#joinget) función, usted tiene que utilizar el mismo `join_use_nulls` ajuste en `CRATE TABLE` y `SELECT` instrucción. +El `Join`-motor permite el uso [Sistema abierto.](../../../operations/settings/settings.md#join_use_nulls) ajuste en el `CREATE TABLE` instrucción. Y [SELECT](../../../sql_reference/statements/select.md) consulta permite el uso `join_use_nulls` demasiado. Si tienes diferentes `join_use_nulls` configuración, puede obtener un error al unirse a la tabla. Depende del tipo de JOIN. Cuando se utiliza [joinGet](../../../sql_reference/functions/other_functions.md#joinget) función, usted tiene que utilizar el mismo `join_use_nulls` ajuste en `CRATE TABLE` y `SELECT` instrucción. ## Almacenamiento de datos {#data-storage} @@ -105,4 +108,4 @@ El `Join`-motor permite el uso [Sistema abierto.](../settings/settings.md#join_u Si el servidor se reinicia incorrectamente, el bloque de datos en el disco puede perderse o dañarse. En este caso, es posible que deba eliminar manualmente el archivo con datos dañados. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/join/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/es/engines/table_engines/special/materializedview.md b/docs/es/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..4960ce2cd80 --- /dev/null +++ b/docs/es/engines/table_engines/special/materializedview.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 43 +toc_title: "M\xE9todo de codificaci\xF3n de datos:" +--- + +# Método de codificación de datos: {#materializedview} + +Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql_reference/statements/create.md)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor. + +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/es/operations/table_engines/memory.md b/docs/es/engines/table_engines/special/memory.md similarity index 88% rename from docs/es/operations/table_engines/memory.md rename to docs/es/engines/table_engines/special/memory.md index 66b1a79c4e4..f7a2f81ff94 100644 --- a/docs/es/operations/table_engines/memory.md +++ b/docs/es/engines/table_engines/special/memory.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: Memoria --- # Memoria {#memory} @@ -13,4 +16,4 @@ Normalmente, el uso de este motor de tabla no está justificado. Sin embargo, se El sistema utiliza el motor de memoria para tablas temporales con datos de consulta externos (consulte la sección “External data for processing a query”), y para la implementación de GLOBAL IN (véase la sección “IN operators”). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/memory/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/es/operations/table_engines/merge.md b/docs/es/engines/table_engines/special/merge.md similarity index 74% rename from docs/es/operations/table_engines/merge.md rename to docs/es/engines/table_engines/special/merge.md index 7147a55b975..15b67d2f721 100644 --- a/docs/es/operations/table_engines/merge.md +++ b/docs/es/engines/table_engines/special/merge.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: Fusionar --- # Fusionar {#merge} @@ -18,17 +21,17 @@ Los datos se leerán de las tablas en el `hits` base de datos que tienen nombres En lugar del nombre de la base de datos, puede usar una expresión constante que devuelva una cadena. Por ejemplo, `currentDatabase()`. -Expresiones regulares — [Re2](https://github.com/google/re2) (soporta un subconjunto de PCRE), sensible a mayúsculas y minúsculas. +Regular expressions — [Re2](https://github.com/google/re2) (soporta un subconjunto de PCRE), sensible a mayúsculas y minúsculas. Vea las notas sobre los símbolos de escape en expresiones regulares en el “match” apartado. Al seleccionar tablas para leer, el `Merge` no se seleccionará la tabla en sí, incluso si coincide con la expresión regular. Esto es para evitar bucles. -Es posible crear dos `Merge` tablas que intentarán sin cesar leer los datos de los demás, pero esta no es una buena idea. +Es posible crear dos `Merge` tablas que intentarán interminablemente leer los datos de los demás, pero esta no es una buena idea. La forma típica de usar el `Merge` para trabajar con un gran número de `TinyLog` tablas como si con una sola tabla. Ejemplo 2: -Supongamos que tiene una tabla antigua (WatchLog\_old) y decidió cambiar la partición sin mover los datos a una nueva tabla (WatchLog\_new) y necesita ver los datos de ambas tablas. +Digamos que tiene una tabla antigua (WatchLog\_old) y decidió cambiar la partición sin mover datos a una nueva tabla (WatchLog\_new) y necesita ver datos de ambas tablas. ``` sql CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) @@ -54,14 +57,14 @@ FROM WatchLog └────────────┴────────┴───────────┴─────┘ ``` -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_table` — Contiene el nombre de la tabla de la que se leyeron los datos. Tipo: [Cadena](../../data_types/string.md). +- `_table` — Contains the name of the table from which data was read. Type: [Cadena](../../../sql_reference/data_types/string.md). - Puede establecer las condiciones constantes en `_table` es el `WHERE/PREWHERE` cláusula (por ejemplo, `WHERE _table='xyz'`). En este caso, la operación de lectura se realiza sólo para las tablas donde la condición en `_table` está satisfecho, por lo que el `_table` columna actúa como un índice. + Puede establecer las condiciones constantes en `_table` en el `WHERE/PREWHERE` cláusula (por ejemplo, `WHERE _table='xyz'`). En este caso, la operación de lectura se realiza sólo para las tablas donde la condición en `_table` está satisfecho, por lo que el `_table` columna actúa como un índice. **Ver también** -- [Columnas virtuales](index.md#table_engines-virtual_columns) +- [Virtual columnas](index.md#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/merge/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/es/operations/table_engines/null.md b/docs/es/engines/table_engines/special/null.md similarity index 66% rename from docs/es/operations/table_engines/null.md rename to docs/es/engines/table_engines/special/null.md index 7915af8e108..53abadefb31 100644 --- a/docs/es/operations/table_engines/null.md +++ b/docs/es/engines/table_engines/special/null.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: Nulo --- # Nulo {#null} @@ -8,4 +11,4 @@ Al escribir en una tabla Null, los datos se ignoran. Al leer desde una tabla Nul Sin embargo, puede crear una vista materializada en una tabla Null. Entonces los datos escritos en la tabla terminarán en la vista. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/null/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/es/operations/table_engines/set.md b/docs/es/engines/table_engines/special/set.md similarity index 85% rename from docs/es/operations/table_engines/set.md rename to docs/es/engines/table_engines/special/set.md index 4c21397db0f..4afc3fad85d 100644 --- a/docs/es/operations/table_engines/set.md +++ b/docs/es/engines/table_engines/special/set.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: Establecer --- # Establecer {#set} @@ -13,4 +16,4 @@ Los datos siempre se encuentran en la memoria RAM. Para INSERT, los bloques de d Para un reinicio aproximado del servidor, el bloque de datos en el disco puede perderse o dañarse. En este último caso, es posible que deba eliminar manualmente el archivo con datos dañados. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/set/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/es/operations/table_engines/url.md b/docs/es/engines/table_engines/special/url.md similarity index 77% rename from docs/es/operations/table_engines/url.md rename to docs/es/engines/table_engines/special/url.md index d606de56083..37c796b15b8 100644 --- a/docs/es/operations/table_engines/url.md +++ b/docs/es/engines/table_engines/special/url.md @@ -1,27 +1,30 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: URL --- -# Nombre de la red inalámbrica (SSID):) {#table_engines-url} +# URL(URL, Formato) {#table_engines-url} Administra datos en un servidor HTTP/HTTPS remoto. Este motor es similar -Angeles [File](file.md) motor. +a la [File](file.md) motor. -## Uso del motor en el servidor ClickHouse {#using-the-engine-in-the-clickhouse-server} +## Uso del motor en el servidor de Clickhouse {#using-the-engine-in-the-clickhouse-server} El `format` debe ser uno que ClickHouse pueda usar en `SELECT` consultas y, si es necesario, en `INSERTs`. Para obtener la lista completa de formatos admitidos, consulte -[Formato](../../interfaces/formats.md#formats). +[Formato](../../../interfaces/formats.md#formats). El `URL` debe ajustarse a la estructura de un localizador uniforme de recursos. La dirección URL especificada debe apuntar a un servidor que utiliza HTTP o HTTPS. Esto no requiere ningún encabezados adicionales para obtener una respuesta del servidor. -`INSERT` y `SELECT` las consultas se transforman en `POST` y `GET` Peticiones, -Respectivamente. Para el procesamiento `POST` solicitudes, el servidor remoto debe admitir +`INSERT` y `SELECT` las consultas se transforman en `POST` y `GET` peticiones, +respectivamente. Para el procesamiento `POST` solicitudes, el servidor remoto debe admitir [Codificación de transferencia fragmentada](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). -Puede limitar el número máximo de saltos de redirección HTTP GET utilizando el [Nombre de la red inalámbrica (SSID):](../settings/settings.md#setting-max_http_get_redirects) configuración. +Puede limitar el número máximo de saltos de redirección HTTP GET utilizando el [Nombre de la red inalámbrica (SSID):](../../../operations/settings/settings.md#setting-max_http_get_redirects) configuración. **Ejemplo:** @@ -76,4 +79,4 @@ SELECT * FROM url_engine_table - Índices. - Replicación. -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/url/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/es/operations/table_engines/view.md b/docs/es/engines/table_engines/special/view.md similarity index 70% rename from docs/es/operations/table_engines/view.md rename to docs/es/engines/table_engines/special/view.md index 9ff0ec1836e..3dae42d1b06 100644 --- a/docs/es/operations/table_engines/view.md +++ b/docs/es/engines/table_engines/special/view.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 42 +toc_title: Vista --- # Vista {#table_engines-view} Se utiliza para implementar vistas (para obtener más información, consulte `CREATE VIEW query`). No almacena datos, pero solo almacena los datos especificados `SELECT` consulta. Al leer desde una tabla, ejecuta esta consulta (y elimina todas las columnas innecesarias de la consulta). -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/view/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/es/faq/general.md b/docs/es/faq/general.md index 351b0f18ec0..09c5a476af8 100644 --- a/docs/es/faq/general.md +++ b/docs/es/faq/general.md @@ -1,18 +1,21 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 78 +toc_title: Preguntas generales --- # Preguntas generales {#general-questions} -## ¿Por qué no usar algo como MapReduce? {#why-not-use-something-like-mapreduce} +## ¿por qué no usar algo como mapreduce? {#why-not-use-something-like-mapreduce} Podemos referirnos a sistemas como MapReduce como sistemas informáticos distribuidos en los que la operación de reducción se basa en la clasificación distribuida. La solución de código abierto más común en esta clase es [Acerca de nosotros](http://hadoop.apache.org). Yandex utiliza su solución interna, YT. -Estos sistemas no son apropiados para consultas en línea debido a su alta latencia. En otras palabras, no se pueden usar como back-end para una interfaz web. Estos tipos de sistemas no son útiles para actualizaciones de datos en tiempo real. La clasificación distribuida no es la mejor manera de realizar operaciones de reducción si el resultado de la operación y todos los resultados intermedios (si los hay) se encuentran en la RAM de un único servidor, que suele ser el caso para las consultas en línea. En tal caso, una tabla hash es una forma óptima de realizar operaciones de reducción. Un enfoque común para optimizar las tareas de reducción de mapas es la preagregación (reducción parcial) utilizando una tabla hash en RAM. El usuario realiza esta optimización manualmente. La clasificación distribuida es una de las principales causas de un rendimiento reducido cuando se ejecutan tareas simples de reducción de mapas. +Estos sistemas no son apropiados para consultas en línea debido a su alta latencia. En otras palabras, no se pueden usar como back-end para una interfaz web. Estos tipos de sistemas no son útiles para actualizaciones de datos en tiempo real. La clasificación distribuida no es la mejor manera de realizar operaciones de reducción si el resultado de la operación y todos los resultados intermedios (si los hay) se encuentran en la RAM de un único servidor, que generalmente es el caso de las consultas en línea. En tal caso, una tabla hash es una forma óptima de realizar operaciones de reducción. Un enfoque común para optimizar las tareas de reducción de mapas es la preagregación (reducción parcial) utilizando una tabla hash en RAM. El usuario realiza esta optimización manualmente. La clasificación distribuida es una de las principales causas de un rendimiento reducido cuando se ejecutan tareas simples de reducción de mapas. La mayoría de las implementaciones de MapReduce le permiten ejecutar código arbitrario en un clúster. Pero un lenguaje de consulta declarativo es más adecuado para OLAP para ejecutar experimentos rápidamente. Por ejemplo, Hadoop tiene Hive y Pig. También considere Cloudera Impala o Shark (obsoleto) para Spark, así como Spark SQL, Presto y Apache Drill. El rendimiento cuando se ejecutan tales tareas es muy subóptimo en comparación con los sistemas especializados, pero la latencia relativamente alta hace que sea poco realista utilizar estos sistemas como back-end para una interfaz web. -## ¿Qué sucede si tengo un problema con las codificaciones al usar Oracle a través de ODBC? {#oracle-odbc-encodings} +## ¿qué sucede si tengo un problema con las codificaciones al usar oracle a través de odbc? {#oracle-odbc-encodings} Si utiliza Oracle a través del controlador ODBC como fuente de diccionarios externos, debe establecer el valor `NLS_LANG` variable de entorno en `/etc/default/clickhouse`. Para obtener más información, consulte [Oracle NLS\_LANG Preguntas frecuentes](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). @@ -34,7 +37,7 @@ Por ejemplo: SELECT * FROM table INTO OUTFILE 'file' ``` -De forma predeterminada, ClickHouse usa el [TabSeparated](../interfaces/formats.md#tabseparated) formato de datos de salida. Para seleccionar el [Formato de datos](../interfaces/formats.md), utilizar el [Cláusula FORMAT](../query_language/select/#format-clause). +De forma predeterminada, ClickHouse usa el [TabSeparated](../interfaces/formats.md#tabseparated) formato de datos de salida. Para seleccionar el [formato de datos](../interfaces/formats.md), utilizar el [Cláusula FORMAT](../query_language/select/#format-clause). Por ejemplo: @@ -44,7 +47,7 @@ SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV ### Uso de una tabla de motor de archivo {#using-a-file-engine-table} -Ver [File](../operations/table_engines/file.md). +Ver [File](../engines/table_engines/special/file.md). ### Uso de la redirección de línea de comandos {#using-command-line-redirection} @@ -54,4 +57,4 @@ $ clickhouse-client --query "SELECT * from table" --format FormatName > result.t Ver [Casa de clics-cliente](../interfaces/cli.md). -[Artículo Original](https://clickhouse.tech/docs/es/faq/general/) +{## [Artículo Original](https://clickhouse.tech/docs/en/faq/general/) ##} diff --git a/docs/es/faq/index.md b/docs/es/faq/index.md new file mode 100644 index 00000000000..05863c71f9a --- /dev/null +++ b/docs/es/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/es/getting_started/example_datasets/amplab_benchmark.md b/docs/es/getting_started/example_datasets/amplab_benchmark.md index b59d3ff923d..79c2c6aea23 100644 --- a/docs/es/getting_started/example_datasets/amplab_benchmark.md +++ b/docs/es/getting_started/example_datasets/amplab_benchmark.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 17 +toc_title: Referencia de Big Data de AMPLab --- # Referencia de Big Data de AMPLab {#amplab-big-data-benchmark} Ver https://amplab.cs.berkeley.edu/benchmark/ -Regístrese para obtener una cuenta gratuita en https://aws.amazon.com. Necesitará una tarjeta de crédito, correo electrónico y número de teléfono. Obtenga una nueva clave de acceso en https://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential +Regístrese para obtener una cuenta gratuita en https://aws.amazon.com. Requiere una tarjeta de crédito, correo electrónico y número de teléfono. Obtenga una nueva clave de acceso en https://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential Ejecute lo siguiente en la consola: @@ -123,4 +126,4 @@ ORDER BY totalRevenue DESC LIMIT 1 ``` -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/amplab_benchmark/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/es/getting_started/example_datasets/criteo.md b/docs/es/getting_started/example_datasets/criteo.md index 37f4a5589b6..c501599bcc0 100644 --- a/docs/es/getting_started/example_datasets/criteo.md +++ b/docs/es/getting_started/example_datasets/criteo.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 19 +toc_title: Registros de clics de Terabyte de Criteo --- # Terabyte de registros de clics de Criteo {#terabyte-of-click-logs-from-criteo} @@ -75,4 +78,4 @@ INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int DROP TABLE criteo_log; ``` -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/criteo/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/es/getting_started/example_datasets/index.md b/docs/es/getting_started/example_datasets/index.md deleted file mode 120000 index c891314f915..00000000000 --- a/docs/es/getting_started/example_datasets/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/index.md \ No newline at end of file diff --git a/docs/es/getting_started/example_datasets/index.md b/docs/es/getting_started/example_datasets/index.md new file mode 100644 index 00000000000..7234cbbff39 --- /dev/null +++ b/docs/es/getting_started/example_datasets/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Example Datasets +toc_priority: 12 +toc_title: "Implantaci\xF3n" +--- + +# Datos De Ejemplo {#example-datasets} + +En esta sección se describe cómo obtener conjuntos de datos de ejemplo e importarlos a ClickHouse. +Para algunos conjuntos de datos también están disponibles consultas de ejemplo. + +- [Yandex anonimizado.Conjunto de datos de Metrica](metrica.md) +- [Estrella Schema Benchmark](star_schema.md) +- [Nombre de la red inalámbrica (SSID):](wikistat.md) +- [Terabyte de registros de clics de Criteo](criteo.md) +- [Referencia de Big Data de AMPLab](amplab_benchmark.md) +- [Datos de taxis de Nueva York](nyc_taxi.md) +- [A tiempo](ontime.md) + +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/es/getting_started/example_datasets/metrica.md b/docs/es/getting_started/example_datasets/metrica.md index 4c74ebce034..de0e8f41489 100644 --- a/docs/es/getting_started/example_datasets/metrica.md +++ b/docs/es/getting_started/example_datasets/metrica.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 21 +toc_title: El Yandex.Metrica Datos --- # Yandex anonimizado.Metrica Datos {#anonymized-yandex-metrica-data} El conjunto de datos consta de dos tablas que contienen datos anónimos sobre los hits (`hits_v1`) y visitas (`visits_v1`) el Yandex.Métrica. Puedes leer más sobre Yandex.Metrica en [Historial de ClickHouse](../../introduction/history.md) apartado. -El conjunto de datos consta de dos tablas, cualquiera de ellas se puede descargar como `tsv.xz` o como particiones preparadas. Además, una versión extendida de la `hits` La tabla que contiene 100 millones de filas está disponible como TSV en https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz y como particiones preparadas en https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated\_v1.tar.xz. +El conjunto de datos consta de dos tablas, cualquiera de ellas se puede descargar como `tsv.xz` o como particiones preparadas. Además, una versión extendida de la `hits` La tabla que contiene 100 millones de filas está disponible como TSV en https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz y como particiones preparadas en https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. ## Obtención de tablas a partir de particiones preparadas {#obtaining-tables-from-prepared-partitions} @@ -64,4 +67,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [Tutorial de ClickHouse](../../getting_started/tutorial.md) se basa en Yandex.El conjunto de datos de Metrica y la forma recomendada de comenzar con este conjunto de datos es simplemente pasar por el tutorial. -Se pueden encontrar ejemplos adicionales de consultas a estas tablas entre [pruebas estatales](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) de ClickHouse (se nombran `test.hists` y `test.visits` Todos los derechos reservados. +Se pueden encontrar ejemplos adicionales de consultas a estas tablas entre [pruebas estatales](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) de ClickHouse (se nombran `test.hists` y `test.visits` alli). diff --git a/docs/es/getting_started/example_datasets/nyc_taxi.md b/docs/es/getting_started/example_datasets/nyc_taxi.md index a0fa9ba13cf..47d54519da6 100644 --- a/docs/es/getting_started/example_datasets/nyc_taxi.md +++ b/docs/es/getting_started/example_datasets/nyc_taxi.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 16 +toc_title: Datos de taxis de Nueva York --- # Datos de taxis de Nueva York {#new-york-taxi-data} @@ -279,7 +282,7 @@ SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mer Entre otras cosas, puede ejecutar la consulta OPTIMIZE en MergeTree. Pero no es necesario ya que todo estará bien sin él. -## Descarga de Particiones Preparadas {#download-of-prepared-partitions} +## Descarga de Prepared Partitions {#download-of-prepared-partitions} ``` bash $ curl -O https://clickhouse-datasets.s3.yandex.net/trips_mergetree/partitions/trips_mergetree.tar @@ -378,10 +381,10 @@ Ejecutamos consultas utilizando un cliente ubicado en un centro de datos de Yand ## Resumen {#summary} -| servidor | Q1 | Q2 | Q3 | Q4 | -|-----------------|------------------|---------------------|-----------------|---------| -| Uno | 0.490 | Ciudad de México | Más información | 3.593 | -| Cómo hacer | Número de modelo | Código del artículo | 0.733 | Puertas | -| Más información | 0.028 | 0.043 | 0.051 | 0.072 | +| servidor | Q1 | Q2 | Q3 | Q4 | +|----------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/nyc_taxi/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/es/getting_started/example_datasets/ontime.md b/docs/es/getting_started/example_datasets/ontime.md index 671132dbed5..b471d784138 100644 --- a/docs/es/getting_started/example_datasets/ontime.md +++ b/docs/es/getting_started/example_datasets/ontime.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 15 +toc_title: A tiempo --- -# Un tiempo {#ontime} +# A tiempo {#ontime} Este conjunto de datos se puede obtener de dos maneras: @@ -150,7 +153,7 @@ Carga de datos: $ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done ``` -## Descarga de Particiones Preparadas {#download-of-prepared-partitions} +## Descarga de Prepared Partitions {#download-of-prepared-partitions} ``` bash $ curl -O https://clickhouse-datasets.s3.yandex.net/ontime/partitions/ontime.tar @@ -254,7 +257,7 @@ GROUP BY Carrier ORDER BY c3 DESC ``` -¿Por qué? La solicitud anterior de una gama más amplia de años, 2000-2008 +¿por qué? la solicitud anterior de una gama más amplia de años, 2000-2008 ``` sql SELECT Carrier, c, c2, c*100/c2 as c3 @@ -323,7 +326,7 @@ GROUP BY Year ORDER BY Year; ``` -¿Por qué? Los destinos más populares por el número de ciudades conectadas directamente para varios rangos de año +¿por qué? los destinos más populares por el número de ciudades conectadas directamente para varios rangos de año ``` sql SELECT DestCityName, uniqExact(OriginCityName) AS u @@ -333,7 +336,7 @@ GROUP BY DestCityName ORDER BY u DESC LIMIT 10; ``` -¿Por qué? +Q9. ``` sql SELECT Year, count(*) AS c1 @@ -341,7 +344,7 @@ FROM ontime GROUP BY Year; ``` -Preguntas frecuentes +Q10. ``` sql SELECT @@ -406,4 +409,4 @@ Esta prueba de rendimiento fue creada por Vadim Tkachenko. Ver: - https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ - http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/ontime/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/ontime/) diff --git a/docs/es/getting_started/example_datasets/star_schema.md b/docs/es/getting_started/example_datasets/star_schema.md index 88339d02c6f..ebc18b94fd6 100644 --- a/docs/es/getting_started/example_datasets/star_schema.md +++ b/docs/es/getting_started/example_datasets/star_schema.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 20 +toc_title: Estrella Schema Benchmark --- # Estrella Schema Benchmark {#star-schema-benchmark} @@ -15,7 +18,7 @@ $ make Generación de datos: !!! warning "Atención" - Desventaja `-s 100` dbgen genera 600 millones de filas (67 GB), mientras que `-s 1000` genera 6 mil millones de filas (lo que lleva mucho tiempo) + Con `-s 100` dbgen genera 600 millones de filas (67 GB), mientras que `-s 1000` genera 6 mil millones de filas (lo que lleva mucho tiempo) ``` bash $ ./dbgen -s 1000 -T c @@ -90,7 +93,7 @@ CREATE TABLE supplier ENGINE = MergeTree ORDER BY S_SUPPKEY; ``` -Inserte datos: +Insertar datos: ``` bash $ clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl @@ -155,7 +158,7 @@ INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; Las consultas: -Número de teléfono +Q1.1 ``` sql SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue @@ -163,7 +166,7 @@ FROM lineorder_flat WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; ``` -¿Qué puedes encontrar en Neodigit +Q1.2 ``` sql SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue @@ -171,7 +174,7 @@ FROM lineorder_flat WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35; ``` -¿Qué puedes encontrar en Neodigit +Q1.3 ``` sql SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue @@ -180,7 +183,7 @@ WHERE toISOWeek(LO_ORDERDATE) = 6 AND toYear(LO_ORDERDATE) = 1994 AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35; ``` -Preguntas frecuentes +Q2.1 ``` sql SELECT @@ -197,7 +200,7 @@ ORDER BY P_BRAND; ``` -Preguntas frecuentes +Q2.2 ``` sql SELECT @@ -214,7 +217,7 @@ ORDER BY P_BRAND; ``` -Preguntas más frecuentes +Q2.3 ``` sql SELECT @@ -231,7 +234,7 @@ ORDER BY P_BRAND; ``` -¿Qué puedes encontrar en Neodigit +Q3.1 ``` sql SELECT @@ -250,7 +253,7 @@ ORDER BY revenue DESC; ``` -¿Qué puedes encontrar en Neodigit +Q3.2 ``` sql SELECT @@ -269,7 +272,7 @@ ORDER BY revenue DESC; ``` -¿Qué puedes encontrar en Neodigit +Q3.3 ``` sql SELECT @@ -288,7 +291,7 @@ ORDER BY revenue DESC; ``` -¿Qué puedes encontrar en Neodigit +Q3.4 ``` sql SELECT @@ -307,7 +310,7 @@ ORDER BY revenue DESC; ``` -Preguntas más frecuentes +Q4.1 ``` sql SELECT @@ -324,7 +327,7 @@ ORDER BY C_NATION ASC; ``` -Preguntas más frecuentes +Q4.2 ``` sql SELECT @@ -344,7 +347,7 @@ ORDER BY P_CATEGORY ASC; ``` -Preguntas más frecuentes +Q4.3 ``` sql SELECT @@ -364,4 +367,4 @@ ORDER BY P_BRAND ASC; ``` -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/star_schema/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/es/getting_started/example_datasets/wikistat.md b/docs/es/getting_started/example_datasets/wikistat.md index c905dc9f742..064656320f6 100644 --- a/docs/es/getting_started/example_datasets/wikistat.md +++ b/docs/es/getting_started/example_datasets/wikistat.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 18 +toc_title: "Nombre de la red inal\xE1mbrica (SSID):" --- # Nombre de la red inalámbrica (SSID): {#wikistat} @@ -29,4 +32,4 @@ $ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/page $ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done ``` -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/example_datasets/wikistat/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/es/getting_started/index.md b/docs/es/getting_started/index.md index 11bc13f2bc6..c1d7973a39c 100644 --- a/docs/es/getting_started/index.md +++ b/docs/es/getting_started/index.md @@ -1,5 +1,10 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Getting Started +toc_hidden: true +toc_priority: 8 +toc_title: oculto --- # Primeros pasos {#getting-started} @@ -9,4 +14,4 @@ Si eres nuevo en ClickHouse y quieres tener una sensación práctica de su rendi - [Ir a través de tutorial detallado](tutorial.md) - [Experimente con conjuntos de datos de ejemplo](example_datasets/ontime.md) -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/) diff --git a/docs/es/getting_started/install.md b/docs/es/getting_started/install.md index e3997f8d1f5..30c9fd41407 100644 --- a/docs/es/getting_started/install.md +++ b/docs/es/getting_started/install.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 11 +toc_title: "Instalaci\xF3n" --- # Instalación {#installation} @@ -14,21 +17,21 @@ Los binarios oficiales preconstruidos generalmente se compilan para x86\_64 y ap $ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" ``` -Para ejecutar ClickHouse en procesadores que no admiten SSE 4.2 o tienen arquitectura AArch64 o PowerPC64LE, debe [Construir ClickHouse a partir de fuentes](#from-sources) con los ajustes de configuración adecuados. +Para ejecutar ClickHouse en procesadores que no admiten SSE 4.2 o tienen arquitectura AArch64 o PowerPC64LE, debe [construir ClickHouse a partir de fuentes](#from-sources) con los ajustes de configuración adecuados. ## Opciones de instalación disponibles {#available-installation-options} ### De paquetes DEB {#install-from-deb-packages} -Se recomienda utilizar pre-compilado oficial `deb` " resultados de la búsqueda relacionados +Se recomienda utilizar pre-compilado oficial `deb` Paquetes para Debian o Ubuntu. Para instalar paquetes oficiales, agregue el repositorio de Yandex en `/etc/apt/sources.list` o en otra `/etc/apt/sources.list.d/clickhouse.list` file: deb http://repo.clickhouse.tech/deb/stable/ main/ -Si desea utilizar la versión más reciente, reemplace `stable` desventaja `testing` (esto se recomienda para sus entornos de prueba). +Si desea utilizar la versión más reciente, reemplace `stable` con `testing` (esto se recomienda para sus entornos de prueba). -A continuación, ejecute estos comandos para instalar realmente los paquetes: +A continuación, ejecute estos comandos para instalar paquetes: ``` bash sudo apt-get install dirmngr # optional @@ -42,13 +45,13 @@ También puede descargar e instalar paquetes manualmente desde aquí: https://re #### Paquete {#packages} - `clickhouse-common-static` — Installs ClickHouse compiled binary files. -- `clickhouse-server` — Creates a symbolic link for `clickhouse-server`. Instala la configuración del servidor. -- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` y otras herramientas relacionadas con el cliente. Instala configuraciones de cliente. +- `clickhouse-server` — Creates a symbolic link for `clickhouse-server` e instala la configuración predeterminada del servidor. +- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` y otras herramientas relacionadas con el cliente. e instala los archivos de configuración del cliente. - `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. ### De paquetes RPM {#from-rpm-packages} -Se recomienda utilizar pre-compilado oficial `rpm` paquetes para CentOS, RedHat y todas las demás distribuciones de Linux basadas en rpm. +Se recomienda utilizar pre-compilado oficial `rpm` También puede utilizar los paquetes para CentOS, RedHat y todas las demás distribuciones de Linux basadas en rpm. Primero, necesitas agregar el repositorio oficial: @@ -58,7 +61,7 @@ sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64 ``` -Si desea utilizar la versión más reciente, reemplace `stable` desventaja `testing` (esto se recomienda para sus entornos de prueba). El `prestable` etiqueta a veces está disponible también. +Si desea utilizar la versión más reciente, reemplace `stable` con `testing` (esto se recomienda para sus entornos de prueba). El `prestable` etiqueta a veces está disponible también. A continuación, ejecute estos comandos para instalar paquetes: @@ -98,7 +101,7 @@ sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh Para los entornos de producción, se recomienda utilizar las últimas `stable`-versión. Puede encontrar su número en la página de GitHub https://github.com/ClickHouse/ClickHouse/tags con postfix `-stable`. -### Imagen de Desde Docker {#from-docker-image} +### Desde Docker Image {#from-docker-image} Para ejecutar ClickHouse dentro de Docker, siga la guía en [Eje de acoplador](https://hub.docker.com/r/yandex/clickhouse-server/). Esas imágenes usan oficial `deb` paquetes dentro. @@ -111,7 +114,7 @@ Puede compilar paquetes e instalarlos o usar programas sin instalar paquetes. Ad Client: programs/clickhouse-client Server: programs/clickhouse-server -Tendrá que crear carpetas de datos y metadatos y `chown` para el usuario deseado. Sus rutas se pueden cambiar en la configuración del servidor (src/programs/server/config .xml), por defecto son: +Tendrá que crear carpetas de datos y metadatos y `chown` para el usuario deseado. Sus rutas se pueden cambiar en la configuración del servidor (src/programs/server/config.xml), por defecto son: /opt/clickhouse/data/default/ /opt/clickhouse/metadata/default/ @@ -132,7 +135,7 @@ Si no tienes `service` comando ejecutar como $ sudo /etc/init.d/clickhouse-server start ``` -Vea los registros en el `/var/log/clickhouse-server/` Directorio. +Vea los registros en el `/var/log/clickhouse-server/` directorio. Si el servidor no se inicia, compruebe las configuraciones en el archivo `/etc/clickhouse-server/config.xml`. @@ -145,7 +148,7 @@ $ clickhouse-server --config-file=/etc/clickhouse-server/config.xml En este caso, el registro se imprimirá en la consola, lo cual es conveniente durante el desarrollo. Si el archivo de configuración está en el directorio actual, no es necesario `--config-file` parámetro. De forma predeterminada, utiliza `./config.xml`. -ClickHouse admite la configuración de restricción de acceso. Están ubicados en el `users.xml` Archivo (junto a `config.xml`). +ClickHouse admite la configuración de restricción de acceso. Están ubicados en el `users.xml` archivo (junto a `config.xml`). De forma predeterminada, se permite el acceso desde cualquier lugar `default` usuario, sin una contraseña. Ver `user/default/networks`. Para obtener más información, consulte la sección [“Configuration Files”](../operations/configuration_files.md). @@ -155,7 +158,7 @@ Después de iniciar el servidor, puede usar el cliente de línea de comandos par $ clickhouse-client ``` -Por defecto, se conecta a `localhost:9000` es nombre del usuario `default` sin una contraseña. También se puede usar para conectarse a un servidor remoto usando `--host` argumento. +Por defecto, se conecta a `localhost:9000` en nombre del usuario `default` sin una contraseña. También se puede usar para conectarse a un servidor remoto usando `--host` argumento. El terminal debe usar codificación UTF-8. Para obtener más información, consulte la sección [“Command-line client”](../interfaces/cli.md). @@ -185,4 +188,4 @@ SELECT 1 Para continuar experimentando, puede descargar uno de los conjuntos de datos de prueba o pasar por [tutorial](https://clickhouse.tech/tutorial.html). -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/install/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/es/getting_started/playground.md b/docs/es/getting_started/playground.md deleted file mode 120000 index de5b41f453e..00000000000 --- a/docs/es/getting_started/playground.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/playground.md \ No newline at end of file diff --git a/docs/es/getting_started/playground.md b/docs/es/getting_started/playground.md new file mode 100644 index 00000000000..1314ca679b5 --- /dev/null +++ b/docs/es/getting_started/playground.md @@ -0,0 +1,48 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 14 +toc_title: Infantil +--- + +# Zona de juegos ClickHouse {#clickhouse-playground} + +[Zona de juegos ClickHouse](https://play.clickhouse.tech?file=welcome) permite a las personas experimentar con ClickHouse ejecutando consultas al instante, sin configurar su servidor o clúster. +Varios conjuntos de datos de ejemplo están disponibles en Playground, así como consultas de ejemplo que muestran las características de ClickHouse. + +Las consultas se ejecutan como un usuario de sólo lectura. Implica algunas limitaciones: + +- No se permiten consultas DDL +- Las consultas INSERT no están permitidas + +También se aplican los siguientes valores: +- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) +- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) +- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) +- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) + +ClickHouse Playground da la experiencia de m2.pequeño +[Servicio administrado para ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) +instancia alojada en [El Yandex.Nube](https://cloud.yandex.com/). +Más información sobre [proveedores de la nube](../commercial/cloud.md). + +La interfaz web de ClickHouse Playground realiza solicitudes a través de ClickHouse [HTTP API](../interfaces/http.md). +El backend Playground es solo un clúster ClickHouse sin ninguna aplicación adicional del lado del servidor. +El punto final HTTPS de ClickHouse también está disponible como parte de Playground. + +Puede realizar consultas al patio de recreo utilizando cualquier cliente HTTP, por ejemplo [rizo](https://curl.haxx.se) o [wget](https://www.gnu.org/software/wget/), o configurar una conexión usando [JDBC](../interfaces/jdbc.md) o [ODBC](../interfaces/odbc.md) controlador. +Más información sobre los productos de software compatibles con ClickHouse está disponible [aqui](../interfaces/index.md). + +| Parámetro | Valor | +|:------------|:----------------------------------------------| +| Punto final | https://play-api.casa de clic.tecnología:8443 | +| Usuario | `playground` | +| Contraseña | `clickhouse` | + +Tenga en cuenta que este extremo requiere una conexión segura. + +Ejemplo: + +``` bash +curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" +``` diff --git a/docs/es/getting_started/tutorial.md b/docs/es/getting_started/tutorial.md index 3516b70ee8c..c9580e4d245 100644 --- a/docs/es/getting_started/tutorial.md +++ b/docs/es/getting_started/tutorial.md @@ -1,16 +1,19 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 12 +toc_title: Tutorial --- # Tutorial de ClickHouse {#clickhouse-tutorial} ## Qué Esperar de Este Tutorial? {#what-to-expect-from-this-tutorial} -Al pasar por este tutorial, aprenderá cómo configurar el clúster básico de ClickHouse, será pequeño, pero tolerante a fallos y escalable. Usaremos uno de los conjuntos de datos de ejemplo para llenarlo con datos y ejecutar algunas consultas de demostración. +Al pasar por este tutorial, aprenderá cómo configurar un clúster de ClickHouse simple. Será pequeño, pero tolerante a fallos y escalable. Luego usaremos uno de los conjuntos de datos de ejemplo para llenarlo con datos y ejecutar algunas consultas de demostración. ## Configuración de nodo único {#single-node-setup} -Para posponer las complejidades del entorno distribuido, comenzaremos con la implementación de ClickHouse en un único servidor o máquina virtual. ClickHouse generalmente se instala desde [deb](index.md#install-from-deb-packages) o [RPM](index.md#from-rpm-packages) paquetes, pero hay [alternativa](index.md#from-docker-image) para los sistemas operativos que no los admiten. +Para posponer las complejidades de un entorno distribuido, comenzaremos con la implementación de ClickHouse en un único servidor o máquina virtual. ClickHouse generalmente se instala desde [deb](index.md#install-from-deb-packages) o [RPM](index.md#from-rpm-packages) paquetes, pero hay [alternativa](index.md#from-docker-image) para los sistemas operativos que no los admiten. Por ejemplo, ha elegido `deb` paquetes y ejecutado: @@ -24,15 +27,15 @@ sudo apt-get update sudo apt-get install -y clickhouse-server clickhouse-client ``` -¿Qué tenemos en los paquetes que tengo instalados: +¿qué tenemos en los paquetes que tengo instalados: - `clickhouse-client` el paquete contiene [Casa de clics-cliente](../interfaces/cli.md) aplicación, cliente interactivo de la consola ClickHouse. - `clickhouse-common` El paquete contiene un archivo ejecutable ClickHouse. - `clickhouse-server` El paquete contiene archivos de configuración para ejecutar ClickHouse como servidor. -Los archivos de configuración del servidor se encuentran en `/etc/clickhouse-server/`. Antes de ir más lejos, tenga en cuenta el `` elemento en `config.xml`. La ruta determina la ubicación para el almacenamiento de datos, por lo que debe ubicarse en un volumen con gran capacidad de disco, el valor predeterminado es `/var/lib/clickhouse/`. Si desea ajustar la configuración, no es útil editar directamente `config.xml` archivo, teniendo en cuenta que podría ser reescrito en futuras actualizaciones de paquetes. La forma recomendada de anular los elementos de configuración es crear [archivos en config.directorio d](../operations/configuration_files.md) que sirven como “patches” de configuración.XML. +Los archivos de configuración del servidor se encuentran en `/etc/clickhouse-server/`. Antes de ir más lejos, tenga en cuenta el `` elemento en `config.xml`. La ruta determina la ubicación para el almacenamiento de datos, por lo que debe ubicarse en un volumen con gran capacidad de disco; el valor predeterminado es `/var/lib/clickhouse/`. Si desea ajustar la configuración, no es útil editar directamente `config.xml` archivo, teniendo en cuenta que podría ser reescrito en futuras actualizaciones de paquetes. La forma recomendada de anular los elementos de configuración es crear [archivos en config.directorio d](../operations/configuration_files.md) que sirven como “patches” de configuración.XML. -Como habrás notado, `clickhouse-server` no se inicia automáticamente después de la instalación del paquete. Tampoco se reiniciará automáticamente después de las actualizaciones. La forma en que inicia el servidor depende de su sistema de inicio, generalmente, es: +Como habrás notado, `clickhouse-server` no se inicia automáticamente después de la instalación del paquete. Tampoco se reiniciará automáticamente después de las actualizaciones. La forma en que inicia el servidor depende de su sistema de inicio, por lo general, es: ``` bash sudo service clickhouse-server start @@ -44,7 +47,7 @@ o sudo /etc/init.d/clickhouse-server start ``` -La ubicación predeterminada para los registros del servidor es `/var/log/clickhouse-server/`. El servidor estará listo para manejar las conexiones de cliente una vez `Ready for connections` se registró el mensaje. +La ubicación predeterminada para los registros del servidor es `/var/log/clickhouse-server/`. El servidor está listo para manejar las conexiones de cliente una vez que registra el `Ready for connections` mensaje. Una vez que el `clickhouse-server` está en funcionamiento, podemos usar `clickhouse-client` para conectarse al servidor y ejecutar algunas consultas de prueba como `SELECT "Hello, world!";`. @@ -84,7 +87,7 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv ## Importar conjunto de datos de muestra {#import-sample-dataset} -Ahora es el momento de llenar nuestro servidor ClickHouse con algunos datos de muestra. En este tutorial, usaremos datos anónimos de Yandex.Metrica, el primer servicio que ejecuta ClickHouse en forma de producción antes de que se convirtiera en código abierto (más sobre eso en [sección de historia](../introduction/history.md)). Hay [múltiples formas de importar Yandex.Conjunto de datos de Metrica](example_datasets/metrica.md) y por el bien del tutorial, iremos con el más realista. +Ahora es el momento de llenar nuestro servidor ClickHouse con algunos datos de muestra. En este tutorial, usaremos los datos anónimos de Yandex.Metrica, el primer servicio que ejecuta ClickHouse en forma de producción antes de que se convirtiera en código abierto (más sobre eso en [sección de historia](../introduction/history.md)). Hay [múltiples formas de importar Yandex.Conjunto de datos de Metrica](example_datasets/metrica.md), y por el bien del tutorial, iremos con el más realista. ### Descargar y extraer datos de tabla {#download-and-extract-table-data} @@ -97,19 +100,19 @@ Los archivos extraídos tienen un tamaño de aproximadamente 10 GB. ### Crear tablas {#create-tables} -Las tablas se agrupan lógicamente en “databases”. Hay un `default` base de datos, pero crearemos una nueva llamada `tutorial`: +Como en la mayoría de los sistemas de gestión de bases de datos, ClickHouse agrupa lógicamente las tablas en “databases”. Hay un `default` base de datos, pero crearemos una nueva llamada `tutorial`: ``` bash clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -La sintaxis para crear tablas es mucho más complicada en comparación con las bases de datos (ver [referencia](../query_language/create.md). En general `CREATE TABLE` declaración tiene que especificar tres cosas clave: +La sintaxis para crear tablas es mucho más complicada en comparación con las bases de datos (ver [referencia](../sql_reference/statements/create.md). En general `CREATE TABLE` declaración tiene que especificar tres cosas clave: 1. Nombre de la tabla que se va a crear. -2. Table schema, i.e. list of columns and their [tipos de datos](../data_types/index.md). -3. [Motor de tabla](../operations/table_engines/index.md) y su configuración, que determina todos los detalles sobre cómo se ejecutarán físicamente las consultas a esta tabla. +2. Table schema, i.e. list of columns and their [tipos de datos](../sql_reference/data_types/index.md). +3. [Motor de tabla](../engines/table_engines/index.md) y su configuración, que determina todos los detalles sobre cómo se ejecutarán físicamente las consultas a esta tabla. -El Yandex.Metrica es un servicio de análisis web y el conjunto de datos de muestra no cubre toda su funcionalidad, por lo que solo hay dos tablas para crear: +El Yandex.Metrica es un servicio de análisis web, y el conjunto de datos de muestra no cubre toda su funcionalidad, por lo que solo hay dos tablas para crear: - `hits` es una tabla con cada acción realizada por todos los usuarios en todos los sitios web cubiertos por el servicio. - `visits` es una tabla que contiene sesiones precompiladas en lugar de acciones individuales. @@ -459,11 +462,11 @@ SETTINGS index_granularity = 8192 Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres. -Como podemos ver, `hits_v1` utiliza el [motor básico MergeTree](../operations/table_engines/mergetree.md), mientras que el `visits_v1` utiliza el [Derrumbar](../operations/table_engines/collapsingmergetree.md) variante. +Como podemos ver, `hits_v1` utiliza el [motor básico MergeTree](../engines/table_engines/mergetree_family/mergetree.md), mientras que el `visits_v1` utiliza el [Derrumbar](../engines/table_engines/mergetree_family/collapsingmergetree.md) variante. ### Importar datos {#import-data} -La importación de datos a ClickHouse se realiza a través de [INSERTAR EN](../query_language/insert_into.md) consulta como en muchas otras bases de datos SQL. Sin embargo, los datos generalmente se proporcionan en uno de los [Formatos soportados](../interfaces/formats.md) en lugar de `VALUES` cláusula (que también es compatible). +La importación de datos a ClickHouse se realiza a través de [INSERT INTO](../sql_reference/statements/insert_into.md) consulta como en muchas otras bases de datos SQL. Sin embargo, los datos generalmente se proporcionan en uno de los [Formatos de serialización compatibles](../interfaces/formats.md) en lugar de `VALUES` cláusula (que también es compatible). Los archivos que descargamos anteriormente están en formato separado por tabuladores, así que aquí le mostramos cómo importarlos a través del cliente de la consola: @@ -483,16 +486,16 @@ FORMAT TSV max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." ``` -Opcionalmente se puede [OPTIMIZAR](../query_language/misc/#misc_operations-optimize) las tablas después de la importación. Las tablas que están configuradas con el motor de la familia MergeTree siempre fusionan partes de datos en segundo plano para optimizar el almacenamiento de datos (o al menos verificar si tiene sentido). Estas consultas solo obligarán al motor de tablas a realizar la optimización del almacenamiento en este momento en lugar de algún tiempo después: +Opcionalmente se puede [OPTIMIZE](../query_language/misc/#misc_operations-optimize) las tablas después de la importación. Las tablas que están configuradas con un motor de la familia MergeTree siempre fusionan partes de datos en segundo plano para optimizar el almacenamiento de datos (o al menos verificar si tiene sentido). Estas consultas obligan al motor de tablas a realizar la optimización del almacenamiento en este momento en lugar de algún tiempo después: ``` bash clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" ``` -Esta es una operación intensiva de E / S y CPU, por lo que si la tabla recibe constantemente datos nuevos, es mejor dejarlo solo y dejar que las fusiones se ejecuten en segundo plano. +Estas consultas inician una operación intensiva de E / S y CPU, por lo que si la tabla recibe datos nuevos de manera consistente, es mejor dejarlos solos y dejar que las fusiones se ejecuten en segundo plano. -Ahora podemos comprobar que las tablas se han importado correctamente: +Ahora podemos comprobar si la importación de la tabla fue exitosa: ``` bash clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" @@ -528,9 +531,9 @@ El clúster ClickHouse es un clúster homogéneo. Pasos para configurar: 1. Instale el servidor ClickHouse en todas las máquinas del clúster 2. Configurar configuraciones de clúster en archivos de configuración 3. Crear tablas locales en cada instancia -4. Crear un [Tabla distribuida](../operations/table_engines/distributed.md) +4. Crear un [Tabla distribuida](../engines/table_engines/special/distributed.md) -[Tabla distribuida](../operations/table_engines/distributed.md) es en realidad una especie de “view” a las tablas locales del clúster ClickHouse. La consulta SELECT de una tabla distribuida se ejecutará utilizando recursos de todos los fragmentos del clúster. Puede especificar configuraciones para varios clústeres y crear varias tablas distribuidas que proporcionen vistas a diferentes clústeres. +[Tabla distribuida](../engines/table_engines/special/distributed.md) es en realidad una especie de “view” a las tablas locales del clúster ClickHouse. La consulta SELECT de una tabla distribuida se ejecuta utilizando recursos de todos los fragmentos del clúster. Puede especificar configuraciones para varios clústeres y crear varias tablas distribuidas que proporcionen vistas a diferentes clústeres. Ejemplo de configuración para un clúster con tres fragmentos, una réplica cada uno: @@ -559,7 +562,7 @@ Ejemplo de configuración para un clúster con tres fragmentos, una réplica cad ``` -Para más demostraciones, creemos una nueva tabla local con la misma `CREATE TABLE` consulta que utilizamos para `hits_v1`, pero nombre de tabla diferente: +Para más demostraciones, vamos a crear una nueva tabla local con la misma `CREATE TABLE` consulta que utilizamos para `hits_v1`, pero nombre de tabla diferente: ``` sql CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... @@ -572,22 +575,22 @@ CREATE TABLE tutorial.hits_all AS tutorial.hits_local ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); ``` -Una práctica común es crear tablas distribuidas similares en todas las máquinas del clúster. Esto permitiría ejecutar consultas distribuidas en cualquier máquina del clúster. También hay una opción alternativa para crear una tabla distribuida temporal para una consulta SELECT determinada usando [remoto](../query_language/table_functions/remote.md) función de la tabla. +Una práctica común es crear tablas distribuidas similares en todas las máquinas del clúster. Permite ejecutar consultas distribuidas en cualquier máquina del clúster. También hay una opción alternativa para crear una tabla distribuida temporal para una consulta SELECT determinada usando [remoto](../sql_reference/table_functions/remote.md) función de la tabla. -Vamos a correr [INSERTAR SELECCIONAR](../query_language/insert_into.md) en la tabla Distributed para extender la tabla a varios servidores. +Vamos a correr [INSERT SELECT](../sql_reference/statements/insert_into.md) en la tabla Distributed para extender la tabla a varios servidores. ``` sql INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` !!! warning "Aviso" - Este enfoque no es adecuado para la fragmentación de tablas grandes. Hay una herramienta separada [Método de codificación de datos:](../operations/utils/clickhouse-copier.md) que puede volver a fragmentar tablas grandes arbitrarias. + Este enfoque no es adecuado para la fragmentación de tablas grandes. Hay una herramienta separada [Método de codificación de datos:](../operations/utilities/clickhouse-copier.md) que puede volver a fragmentar tablas grandes arbitrarias. -Como era de esperar, las consultas computacionalmente pesadas se ejecutan N veces más rápido y se lanzan en 3 servidores en lugar de uno. +Como era de esperar, las consultas computacionalmente pesadas se ejecutan N veces más rápido si utilizan 3 servidores en lugar de uno. -En este caso, hemos utilizado un clúster con 3 fragmentos, cada uno contiene una única réplica. +En este caso, hemos utilizado un clúster con 3 fragmentos, y cada uno contiene una sola réplica. -Para proporcionar resiliencia en un entorno de producción, recomendamos que cada fragmento contenga 2-3 réplicas distribuidas entre varios centros de datos. Tenga en cuenta que ClickHouse admite un número ilimitado de réplicas. +Para proporcionar resiliencia en un entorno de producción, se recomienda que cada fragmento contenga 2-3 réplicas distribuidas entre varias zonas de disponibilidad o centros de datos (o al menos racks). Tenga en cuenta que ClickHouse admite un número ilimitado de réplicas. Ejemplo de configuración para un clúster de un fragmento que contiene tres réplicas: @@ -613,13 +616,12 @@ Ejemplo de configuración para un clúster de un fragmento que contiene tres ré ``` -Para habilitar la replicación nativa ZooKeeper se requiere. ClickHouse se encargará de la coherencia de los datos en todas las réplicas y ejecutará el procedimiento de restauración después de la falla -automática. Se recomienda implementar el clúster ZooKeeper en servidores separados. +Para habilitar la replicación nativa [ZooKeeper](http://zookeeper.apache.org/) se requiere. ClickHouse se encarga de la coherencia de los datos en todas las réplicas y ejecuta el procedimiento de restauración después de la falla automáticamente. Se recomienda implementar el clúster ZooKeeper en servidores independientes (donde no se están ejecutando otros procesos, incluido ClickHouse). -ZooKeeper no es un requisito estricto: en algunos casos simples, puede duplicar los datos escribiéndolos en todas las réplicas de su código de aplicación. Este enfoque es **ni** recomendado, en este caso, ClickHouse no podrá -garantizar la coherencia de los datos en todas las réplicas. Esto sigue siendo responsabilidad de su aplicación. +!!! note "Nota" + ZooKeeper no es un requisito estricto: en algunos casos simples, puede duplicar los datos escribiéndolos en todas las réplicas de su código de aplicación. Este enfoque es **ni** recomendado, en este caso, ClickHouse no podrá garantizar la coherencia de los datos en todas las réplicas. Por lo tanto, se convierte en responsabilidad de su aplicación. -Las ubicaciones de ZooKeeper deben especificarse en el archivo de configuración: +Las ubicaciones de ZooKeeper se especifican en el archivo de configuración: ``` xml @@ -638,7 +640,7 @@ Las ubicaciones de ZooKeeper deben especificarse en el archivo de configuración ``` -Además, necesitamos establecer macros para identificar cada fragmento y réplica, se usará en la creación de la tabla: +Además, necesitamos establecer macros para identificar cada fragmento y réplica que se utilizan en la creación de tablas: ``` xml @@ -647,7 +649,7 @@ Además, necesitamos establecer macros para identificar cada fragmento y réplic ``` -Si no hay réplicas en este momento en la creación de la tabla replicada, se creará una nueva primera réplica. Si ya hay réplicas activas, la nueva réplica clonará los datos de las existentes. Tiene la opción de crear primero todas las tablas replicadas e insertar datos en ella. Otra opción es crear algunas réplicas y agregar las otras después o durante la inserción de datos. +Si no hay réplicas en este momento en la creación de la tabla replicada, se crea una instancia de una nueva primera réplica. Si ya hay réplicas activas, la nueva réplica clona los datos de las existentes. Tiene la opción de crear primero todas las tablas replicadas y, a continuación, insertar datos en ella. Otra opción es crear algunas réplicas y agregar las otras después o durante la inserción de datos. ``` sql CREATE TABLE tutorial.hits_replica (...) @@ -658,12 +660,12 @@ ENGINE = ReplcatedMergeTree( ... ``` -Aquí usamos [ReplicatedMergeTree](../operations/table_engines/replication.md) motor de mesa. En los parámetros, especificamos la ruta ZooKeeper que contiene identificadores de fragmentos y réplicas. +Aquí usamos [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) motor de mesa. En los parámetros, especificamos la ruta ZooKeeper que contiene identificadores de fragmentos y réplicas. ``` sql INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; ``` -La replicación funciona en modo multi-master. Los datos se pueden cargar en cualquier réplica y se sincronizarán con otras instancias automáticamente. La replicación es asíncrona, por lo que en un momento dado, no todas las réplicas pueden contener datos insertados recientemente. Para permitir la inserción de datos, al menos una réplica debe estar activa. Otros sincronizarán los datos y repararán la coherencia una vez que vuelvan a activarse. Tenga en cuenta que tal enfoque permite la baja posibilidad de una pérdida de datos que acaba de agregar. +La replicación funciona en modo multi-master. Los datos se pueden cargar en cualquier réplica y el sistema los sincroniza automáticamente con otras instancias. La replicación es asíncrona, por lo que en un momento dado, no todas las réplicas pueden contener datos insertados recientemente. Al menos una réplica debe estar disponible para permitir la ingestión de datos. Otros sincronizarán los datos y repararán la coherencia una vez que vuelvan a activarse. Tenga en cuenta que este enfoque permite la baja posibilidad de una pérdida de datos recientemente insertados. -[Artículo Original](https://clickhouse.tech/docs/es/getting_started/tutorial/) +[Artículo Original](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/es/guides/apply_catboost_model.md b/docs/es/guides/apply_catboost_model.md index 7c0e2677c82..416ad448da5 100644 --- a/docs/es/guides/apply_catboost_model.md +++ b/docs/es/guides/apply_catboost_model.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: "Aplicaci\xF3n de modelos CatBoost" --- # Aplicación de un modelo Catboost en ClickHouse {#applying-catboost-model-in-clickhouse} @@ -19,14 +22,14 @@ Para obtener más información sobre la formación de modelos CatBoost, consulte ## Requisito {#prerequisites} -Si usted no tiene el [Acoplador](https://docs.docker.com/install/) sin embargo, instalarlo. +Si no tienes el [Acoplador](https://docs.docker.com/install/) sin embargo, instalarlo. !!! note "Nota" [Acoplador](https://www.docker.com) es una plataforma de software que le permite crear contenedores que aíslan una instalación de CatBoost y ClickHouse del resto del sistema. Antes de aplicar un modelo CatBoost: -**1.** Neumático de la [Imagen de acoplador](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) del registro: +**1.** Tire de la [Imagen de acoplador](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) del registro: ``` bash $ docker pull yandex/tutorial-catboost-clickhouse @@ -113,7 +116,7 @@ FROM amazon_train +-count()-+ | 65538 | -+---------+ ++-------+ ``` ## 3. Integrar CatBoost en ClickHouse {#integrate-catboost-into-clickhouse} @@ -162,7 +165,7 @@ La forma más rápida de evaluar un modelo CatBoost es compilar `libcatboostmode Para el modelo de prueba, ejecute el cliente ClickHouse `$ clickhouse client`. -Vamos a asegurarnos de que el modelo está funcionando: +Asegurémonos de que el modelo esté funcionando: ``` sql :) SELECT @@ -182,7 +185,7 @@ LIMIT 10 ``` !!! note "Nota" - Función [modelEvaluar](../query_language/functions/other_functions.md#function-modelevaluate) devuelve tupla con predicciones sin procesar por clase para modelos multiclase. + Función [modelEvaluar](../sql_reference/functions/other_functions.md#function-modelevaluate) devuelve tupla con predicciones sin procesar por clase para modelos multiclase. Vamos a predecir la probabilidad: @@ -205,7 +208,7 @@ LIMIT 10 ``` !!! note "Nota" - Más información sobre [exp()](../query_language/functions/math_functions.md) función. + Más información sobre [exp()](../sql_reference/functions/math_functions.md) función. Vamos a calcular LogLoss en la muestra: @@ -231,6 +234,6 @@ FROM ``` !!! note "Nota" - Más información sobre [avg()](../query_language/agg_functions/reference.md#agg_function-avg) y [Registro()](../query_language/functions/math_functions.md) función. + Más información sobre [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) y [registro()](../sql_reference/functions/math_functions.md) función. -[Artículo Original](https://clickhouse.tech/docs/es/guides/apply_catboost_model/) +[Artículo Original](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/es/guides/index.md b/docs/es/guides/index.md index 178e9562ab9..2b410f6db01 100644 --- a/docs/es/guides/index.md +++ b/docs/es/guides/index.md @@ -1,11 +1,16 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Guides +toc_priority: 38 +toc_title: "Descripci\xF3n" --- # Guías de ClickHouse {#clickhouse-guides} -Instrucciones detalladas paso a paso que lo ayudarán a resolver varias tareas usando ClickHouse. +Lista de instrucciones detalladas paso a paso que ayudan a resolver varias tareas usando ClickHouse: +- [Tutorial sobre la configuración simple del clúster](../getting_started/tutorial.md) - [Aplicación de un modelo CatBoost en ClickHouse](apply_catboost_model.md) -[Artículo Original](https://clickhouse.tech/docs/es/guides/) +[Artículo Original](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/es/index.md b/docs/es/index.md index 9f9165b446b..2cbe375e8ee 100644 --- a/docs/es/index.md +++ b/docs/es/index.md @@ -1,39 +1,42 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 3 +toc_title: "Descripci\xF3n" --- -# ¿Qué es ClickHouse? {#what-is-clickhouse} +# ¿qué es clickhouse? {#what-is-clickhouse} ClickHouse es un sistema de gestión de bases de datos orientado a columnas (DBMS) para el procesamiento analítico en línea de consultas (OLAP). En un “normal” DBMS orientado a filas, los datos se almacenan en este orden: -| Fila | Argumento | JavaEnable | Titular | GoodEvent | EventTime | -|--------|------------------------------------|------------|---------------------------|-----------|-----------------------------------| -| \#0 | Sistema abierto. | Uno | Relaciones con inversores | Uno | ¿Qué puedes encontrar en Neodigit | -| \#1 | Sistema abierto. | Cero | Contáctenos | Uno | ¿Qué puedes encontrar en Neodigit | -| Nombre | Método de codificación de datos: | Uno | Misión | Uno | ¿Qué puedes encontrar en Neodigit | -| \#Y | … | … | … | … | … | +| Fila | Argumento | JavaEnable | Titular | GoodEvent | EventTime | +|------|-------------|------------|---------------------------|-----------|---------------------| +| \#0 | 89354350662 | 1 | Relaciones con inversores | 1 | 2016-05-18 05:19:20 | +| \#1 | 90329509958 | 0 | Contáctenos | 1 | 2016-05-18 08:10:20 | +| \#2 | 89953706054 | 1 | Mision | 1 | 2016-05-18 07:38:00 | +| \#N | … | … | … | … | … | En otras palabras, todos los valores relacionados con una fila se almacenan físicamente uno junto al otro. -Ejemplos de un DBMS orientado a filas son MySQL, Postgres y MS SQL Server. ¿Cómo?gris } +Ejemplos de un DBMS orientado a filas son MySQL, Postgres y MS SQL Server. En un DBMS orientado a columnas, los datos se almacenan así: -| Fila: | \#0 | \#1 | Nombre | \#Y | -|-------------|-----------------------------------|-----------------------------------|------------------------------------|-----| -| Argumento: | Sistema abierto. | Sistema abierto. | Método de codificación de datos: | … | -| JavaEnable: | Uno | Cero | Uno | … | -| Titular: | Relaciones con inversores | Contáctenos | Misión | … | -| GoodEvent: | Uno | Uno | Uno | … | -| EventTime: | ¿Qué puedes encontrar en Neodigit | ¿Qué puedes encontrar en Neodigit | ¿Qué puedes encontrar en Neodigit | … | +| Fila: | \#0 | \#1 | \#2 | \#N | +|-------------|---------------------------|---------------------|---------------------|-----| +| Argumento: | 89354350662 | 90329509958 | 89953706054 | … | +| JavaEnable: | 1 | 0 | 1 | … | +| Titular: | Relaciones con inversores | Contáctenos | Mision | … | +| GoodEvent: | 1 | 1 | 1 | … | +| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | Estos ejemplos solo muestran el orden en el que se organizan los datos. Los valores de diferentes columnas se almacenan por separado y los datos de la misma columna se almacenan juntos. -Ejemplos de un DBMS orientado a columnas: Vertica, Paraccel (Actian Matrix y Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise y Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid y kdb+. ¿Cómo?gris } +Ejemplos de un DBMS orientado a columnas: Vertica, Paraccel (Actian Matrix y Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise y Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid y kdb+. -Los diferentes pedidos para almacenar datos se adaptan mejor a diferentes escenarios. El escenario de acceso a datos se refiere a qué consultas se realizan, con qué frecuencia y en qué proporción; cuántos datos se leen para cada tipo de consulta: filas, columnas y bytes; la relación entre la lectura y la actualización de datos; el tamaño de trabajo de los datos y cómo se utilizan localmente; si se utilizan las transacciones y qué tan aisladas están; +Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. Cuanto mayor sea la carga en el sistema, más importante es personalizar el sistema configurado para que coincida con los requisitos del escenario de uso, y más fino será esta personalización. No existe un sistema que sea igualmente adecuado para escenarios significativamente diferentes. Si un sistema es adaptable a un amplio conjunto de escenarios, bajo una carga alta, el sistema manejará todos los escenarios igualmente mal, o funcionará bien para solo uno o algunos de los escenarios posibles. @@ -43,7 +46,7 @@ Cuanto mayor sea la carga en el sistema, más importante es personalizar el sist - Los datos se actualizan en lotes bastante grandes (\> 1000 filas), no por filas individuales; o no se actualiza en absoluto. - Los datos se agregan a la base de datos pero no se modifican. - Para las lecturas, se extrae un número bastante grande de filas de la base de datos, pero solo un pequeño subconjunto de columnas. -- Las tablas hijo “wide,” lo que significa que contienen un gran número de columnas. +- Las tablas son “wide,” lo que significa que contienen un gran número de columnas. - Las consultas son relativamente raras (generalmente cientos de consultas por servidor o menos por segundo). - Para consultas simples, se permiten latencias de alrededor de 50 ms. - Los valores de columna son bastante pequeños: números y cadenas cortas (por ejemplo, 60 bytes por URL). @@ -51,7 +54,7 @@ Cuanto mayor sea la carga en el sistema, más importante es personalizar el sist - Las transacciones no son necesarias. - Bajos requisitos para la coherencia de los datos. - Hay una tabla grande por consulta. Todas las mesas son pequeñas, excepto una. -- Un resultado de consulta es significativamente menor que los datos de origen. En otras palabras, los datos se filtran o se agregan, por lo que el resultado se ajusta a la memoria RAM de un solo servidor. +- Un resultado de consulta es significativamente menor que los datos de origen. En otras palabras, los datos se filtran o se agregan, por lo que el resultado se ajusta a la RAM de un solo servidor. Es fácil ver que el escenario OLAP es muy diferente de otros escenarios populares (como el acceso OLTP o Key-Value). Por lo tanto, no tiene sentido intentar usar OLTP o una base de datos de valor clave para procesar consultas analíticas si desea obtener un rendimiento decente. Por ejemplo, si intenta usar MongoDB o Redis para análisis, obtendrá un rendimiento muy bajo en comparación con las bases de datos OLAP. @@ -75,7 +78,7 @@ Ver la diferencia? 2. Dado que los datos se leen en paquetes, es más fácil de comprimir. Los datos en columnas también son más fáciles de comprimir. Esto reduce aún más el volumen de E/S. 3. Debido a la reducción de E / S, más datos se ajustan a la memoria caché del sistema. -Por ejemplo, la consulta “count the number of records for each advertising platform” más caliente “advertising platform ID” columna, que ocupa 1 byte sin comprimir. Si la mayor parte del tráfico no proviene de plataformas publicitarias, puede esperar al menos una compresión de 10 veces de esta columna. Cuando se utiliza un algoritmo de compresión rápida, la descompresión de datos es posible a una velocidad de al menos varios gigabytes de datos sin comprimir por segundo. En otras palabras, esta consulta se puede procesar a una velocidad de aproximadamente varios miles de millones de filas por segundo en un único servidor. Esta velocidad se logra realmente en la práctica. +Por ejemplo, la consulta “count the number of records for each advertising platform” requiere leer uno “advertising platform ID” columna, que ocupa 1 byte sin comprimir. Si la mayor parte del tráfico no proviene de plataformas publicitarias, puede esperar al menos una compresión de 10 veces de esta columna. Cuando se utiliza un algoritmo de compresión rápida, la descompresión de datos es posible a una velocidad de al menos varios gigabytes de datos sin comprimir por segundo. En otras palabras, esta consulta se puede procesar a una velocidad de aproximadamente varios miles de millones de filas por segundo en un único servidor. Esta velocidad se logra realmente en la práctica.
    @@ -121,11 +124,11 @@ SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIM ### CPU {#cpu} -Dado que la ejecución de una consulta requiere procesar un gran número de filas, ayuda enviar todas las operaciones para vectores completos en lugar de para filas separadas, o implementar el motor de consultas para que casi no haya costo de envío. Si no lo hace, con cualquier subsistema de disco medio decente, el intérprete de consultas inevitablemente detiene la CPU. Tiene sentido almacenar datos en columnas y procesarlos, cuando sea posible, por columnas. +Dado que la ejecución de una consulta requiere procesar un gran número de filas, ayuda enviar todas las operaciones para vectores completos en lugar de para filas separadas, o implementar el motor de consultas para que casi no haya costo de envío. Si no hace esto, con cualquier subsistema de disco medio decente, el intérprete de consultas inevitablemente detiene la CPU. Tiene sentido almacenar datos en columnas y procesarlos, cuando sea posible, por columnas. Hay dos formas de hacer esto: -1. Un vector motor. Todas las operaciones se escriben para vectores, en lugar de para valores separados. Esto significa que no es necesario llamar a las operaciones muy a menudo, y los costos de envío son insignificantes. El código de operación contiene un ciclo interno optimizado. +1. Un vector motor. Todas las operaciones se escriben para vectores, en lugar de para valores separados. Esto significa que no necesita llamar a las operaciones con mucha frecuencia, y los costos de envío son insignificantes. El código de operación contiene un ciclo interno optimizado. 2. Generación de código. El código generado para la consulta tiene todas las llamadas indirectas. @@ -133,4 +136,4 @@ Esto no se hace en “normal” bases de datos, porque no tiene sentido cuando s Tenga en cuenta que para la eficiencia de la CPU, el lenguaje de consulta debe ser declarativo (SQL o MDX), o al menos un vector (J, K). La consulta solo debe contener bucles implícitos, lo que permite la optimización. -[Artículo Original](https://clickhouse.tech/docs/es/) +{## [Artículo Original](https://clickhouse.tech/docs/en/) ##} diff --git a/docs/es/interfaces/cli.md b/docs/es/interfaces/cli.md index a0dbd31824b..058985194e1 100644 --- a/docs/es/interfaces/cli.md +++ b/docs/es/interfaces/cli.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 17 +toc_title: "Cliente de l\xEDnea de comandos" --- # Cliente de línea de comandos {#command-line-client} @@ -86,7 +89,7 @@ Formatee una consulta como de costumbre, luego coloque los valores que desea pas ``` - `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. -- `data type` — [Tipo de datos](../data_types/index.md) del valor del parámetro de la aplicación. Por ejemplo, una estructura de datos como `(integer, ('string', integer))` puede tener el `Tuple(UInt8, Tuple(String, UInt8))` tipo de datos (también puede usar otro [Entero](../data_types/int_uint.md) tipo). +- `data type` — [Tipo de datos](../sql_reference/data_types/index.md) del valor del parámetro de la aplicación. Por ejemplo, una estructura de datos como `(integer, ('string', integer))` puede tener el `Tuple(UInt8, Tuple(String, UInt8))` tipo de datos (también puede usar otro [entero](../sql_reference/data_types/int_uint.md) tipo). #### Ejemplo {#example} @@ -143,4 +146,4 @@ Ejemplo de un archivo de configuración: ``` -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/cli/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/cli/) diff --git a/docs/es/interfaces/cpp.md b/docs/es/interfaces/cpp.md index 5fad7c315e5..c854513568a 100644 --- a/docs/es/interfaces/cpp.md +++ b/docs/es/interfaces/cpp.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 24 +toc_title: Biblioteca de clientes de C++ --- # Biblioteca de clientes de C++ {#c-client-library} Ver README en [Bienvenidos](https://github.com/ClickHouse/clickhouse-cpp) repositorio. -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/cpp/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/cpp/) diff --git a/docs/es/interfaces/formats.md b/docs/es/interfaces/formats.md index d78c81a2849..fcb700a8968 100644 --- a/docs/es/interfaces/formats.md +++ b/docs/es/interfaces/formats.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 21 +toc_title: Formatos de entrada y salida --- # Formatos para datos de entrada y salida {#formats} @@ -107,9 +110,9 @@ Solo se escapa un pequeño conjunto de símbolos. Puede tropezar fácilmente con Las matrices se escriben como una lista de valores separados por comas entre corchetes. Los elementos numéricos de la matriz tienen el formato normal. `Date` y `DateTime` están escritos entre comillas simples. Las cadenas se escriben entre comillas simples con las mismas reglas de escape que las anteriores. -[NULL](../query_language/syntax.md) se formatea como `\N`. +[NULL](../sql_reference/syntax.md) se formatea como `\N`. -Cada elemento de [Anidar](../data_types/nested_data_structures/nested.md) estructuras se representa como una matriz. +Cada elemento de [Anidar](../sql_reference/data_types/nested_data_structures/nested.md) estructuras se representa como una matriz. Por ejemplo: @@ -314,7 +317,7 @@ format_template_resultset = '/some/path/resultset.format', format_template_row = ## TSKV {#tskv} -Similar a TabSeparated , pero genera un valor en formato name=value . Los nombres se escapan de la misma manera que en el formato TabSeparated, y el símbolo = también se escapa. +Similar a TabSeparated, pero las salidas de un valor en nombre=valor de formato. Los nombres se escapó de la misma manera como en TabSeparated formato, y el símbolo = es también escapó. ``` text SearchPhrase= count()=8267016 @@ -329,7 +332,7 @@ SearchPhrase=curtain designs count()=1064 SearchPhrase=baku count()=1000 ``` -[NULL](../query_language/syntax.md) se formatea como `\N`. +[NULL](../sql_reference/syntax.md) se formatea como `\N`. ``` sql SELECT * FROM t_null FORMAT TSKV @@ -461,7 +464,7 @@ Si la consulta contiene GROUP BY, rows\_before\_limit\_at\_least es el número e Este formato solo es apropiado para generar un resultado de consulta, pero no para analizar (recuperar datos para insertar en una tabla). -Soporta ClickHouse [NULL](../query_language/syntax.md), que se muestra como `null` en la salida JSON. +Soporta ClickHouse [NULL](../sql_reference/syntax.md), que se muestra como `null` en la salida JSON. Ver también el [JSONEachRow](#jsoneachrow) formato. @@ -538,7 +541,7 @@ ClickHouse ignora los espacios entre los elementos y las comas después de los o **Procesamiento de valores omitidos** -ClickHouse sustituye los valores omitidos por los valores predeterminados para el [tipos de datos](../data_types/index.md). +ClickHouse sustituye los valores omitidos por los valores predeterminados para el [tipos de datos](../sql_reference/data_types/index.md). Si `DEFAULT expr` se especifica, ClickHouse utiliza diferentes reglas de sustitución dependiendo de la [Entrada\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) configuración. @@ -583,7 +586,7 @@ A diferencia de la [JSON](#json) formato, no hay sustitución de secuencias UTF- ### Uso de estructuras anidadas {#jsoneachrow-nested} -Si tienes una mesa con [Anidar](../data_types/nested_data_structures/nested.md) columnas de tipo de datos, puede insertar datos JSON con la misma estructura. Habilite esta función con el [Entrada\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) configuración. +Si tienes una mesa con [Anidar](../sql_reference/data_types/nested_data_structures/nested.md) columnas de tipo de datos, puede insertar datos JSON con la misma estructura. Habilite esta función con el [Entrada\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) configuración. Por ejemplo, considere la siguiente tabla: @@ -657,7 +660,7 @@ Salidas de datos como tablas de arte Unicode, también utilizando secuencias de Se dibuja una cuadrícula completa de la tabla, y cada fila ocupa dos líneas en la terminal. Cada bloque de resultados se muestra como una tabla separada. Esto es necesario para que los bloques se puedan generar sin resultados de almacenamiento en búfer (el almacenamiento en búfer sería necesario para calcular previamente el ancho visible de todos los valores). -[NULL](../query_language/syntax.md) se emite como `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) se emite como `ᴺᵁᴸᴸ`. Ejemplo (mostrado para el [PrettyCompact](#prettycompact) formato): @@ -761,7 +764,7 @@ FixedString se representa simplemente como una secuencia de bytes. La matriz se representa como una longitud varint (sin signo [LEB128](https://en.wikipedia.org/wiki/LEB128)), seguido de elementos sucesivos de la matriz. -Para [NULL](../query_language/syntax.md#null-literal) soporte, se añade un byte adicional que contiene 1 o 0 antes de cada [NULL](../data_types/nullable.md) valor. Si 1, entonces el valor es `NULL` y este byte se interpreta como un valor separado. Si es 0, el valor después del byte no es `NULL`. +Para [NULL](../sql_reference/syntax.md#null-literal) soporte, se añade un byte adicional que contiene 1 o 0 antes de cada [NULL](../sql_reference/data_types/nullable.md) valor. Si 1, entonces el valor es `NULL` y este byte se interpreta como un valor separado. Si es 0, el valor después del byte no es `NULL`. ## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} @@ -773,7 +776,7 @@ Similar a [RowBinary](#rowbinary), pero con encabezado añadido: ## Valor {#data-format-values} -Imprime cada fila entre paréntesis. Las filas están separadas por comas. No hay coma después de la última fila. Los valores dentro de los corchetes también están separados por comas. Los números se emiten en formato decimal sin comillas. Las matrices se emiten entre corchetes. Las cadenas, fechas y fechas con horas se generan entre comillas. Las reglas de escape y el análisis son similares a las [TabSeparated](#tabseparated) formato. Durante el formateo, los espacios adicionales no se insertan, pero durante el análisis, se permiten y omiten (excepto los espacios dentro de los valores de la matriz, que no están permitidos). [NULL](../query_language/syntax.md) se representa como `NULL`. +Imprime cada fila entre paréntesis. Las filas están separadas por comas. No hay coma después de la última fila. Los valores dentro de los corchetes también están separados por comas. Los números se emiten en formato decimal sin comillas. Las matrices se emiten entre corchetes. Las cadenas, fechas y fechas con horas se generan entre comillas. Las reglas de escape y el análisis son similares a las [TabSeparated](#tabseparated) formato. Durante el formateo, los espacios adicionales no se insertan, pero durante el análisis, se permiten y omiten (excepto los espacios dentro de los valores de la matriz, que no están permitidos). [NULL](../sql_reference/syntax.md) se representa como `NULL`. The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. @@ -783,9 +786,9 @@ Ver también: [input\_format\_values\_interpret\_expressions](../operations/sett ## Vertical {#vertical} -Imprime cada valor en una línea independiente con el nombre de columna especificado. Este formato es conveniente para imprimir solo una o varias filas si cada fila consta de un gran número de columnas. +Imprime cada valor en una línea independiente con el nombre de la columna especificada. Este formato es conveniente para imprimir solo una o varias filas si cada fila consta de un gran número de columnas. -[NULL](../query_language/syntax.md) se emite como `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) se emite como `ᴺᵁᴸᴸ`. Ejemplo: @@ -964,7 +967,7 @@ message MessageType { ``` ClickHouse intenta encontrar una columna llamada `x.y.z` (o `x_y_z` o `X.y_Z` y así sucesivamente). -Los mensajes anidados son adecuados para [estructuras de datos anidados](../data_types/nested_data_structures/nested.md). +Los mensajes anidados son adecuados para [estructuras de datos anidados](../sql_reference/data_types/nested_data_structures/nested.md). Valores predeterminados definidos en un esquema protobuf como este @@ -976,7 +979,7 @@ message MessageType { } ``` -no se aplican; el [valores predeterminados de la tabla](../query_language/create.md#create-default-values) se utilizan en lugar de ellos. +no se aplican; el [valores predeterminados de la tabla](../sql_reference/statements/create.md#create-default-values) se utilizan en lugar de ellos. ClickHouse entra y emite mensajes protobuf en el `length-delimited` formato. Significa que antes de cada mensaje debe escribirse su longitud como un [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). @@ -990,23 +993,23 @@ El formato ClickHouse Avro admite lectura y escritura [Archivos de datos Avro](h ### Coincidencia de tipos de datos {#data_types-matching} -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../data_types/index.md) en `INSERT` y `SELECT` consulta. +La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql_reference/data_types/index.md) en `INSERT` y `SELECT` consulta. -| Tipo de datos Avro `INSERT` | Tipo de datos ClickHouse | Tipo de datos Avro `SELECT` | -|---------------------------------------------|---------------------------------------------------------------------------------------------|------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [¿Cómo funciona?)](../data_types/int_uint.md), [UInt(8\|16\|32)](../data_types/int_uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../data_types/int_uint.md), [UInt64](../data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [Cadena](../data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [Cadena fija (N)](../data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum (8\|16)](../data_types/enum.md) | `enum` | -| `array(T)` | [Matriz (T)](../data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nivel de Cifrado WEP)](../data_types/date.md) | `union(null, T)` | -| `null` | [Nullable (nada)](../data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Fecha](../data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [¿Qué puedes encontrar en Neodigit)](../data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [Cómo hacer esto?)](../data_types/datetime.md) | `long (timestamp-micros)` \* | +| Tipo de datos Avro `INSERT` | Tipo de datos ClickHouse | Tipo de datos Avro `SELECT` | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [¿cómo funciona?)](../sql_reference/data_types/int_uint.md), [UInt(8\|16\|32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [Cadena](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [Cadena fija (N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [Enum (8\|16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [Matriz (T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Nivel de Cifrado WEP)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Nullable (nada)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [Fecha](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [¿qué puedes encontrar en neodigit)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [Cómo hacer esto?)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | \* [Tipos lógicos Avro](http://avro.apache.org/docs/current/spec.html#Logical+Types) @@ -1060,7 +1063,7 @@ Lo mismo que [Avro](#data-format-avro) ### Uso {#usage} -Para verificar rápidamente la resolución del esquema, puede usar [Método de codificación de datos:](https://github.com/edenhill/kafkacat) con [Sistema abierto.](../operations/utils/clickhouse-local.md): +Para verificar rápidamente la resolución del esquema, puede usar [Método de codificación de datos:](https://github.com/edenhill/kafkacat) con [Sistema abierto.](../operations/utilities/clickhouse-local.md): ``` bash $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' @@ -1069,7 +1072,7 @@ $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse- 3 c ``` -Utilizar `AvroConfluent` con [Kafka](../operations/table_engines/kafka.md): +Utilizar `AvroConfluent` con [Kafka](../engines/table_engines/integrations/kafka.md): ``` sql CREATE TABLE topic1_stream @@ -1098,25 +1101,25 @@ SELECT * FROM topic1_stream; ### Coincidencia de tipos de datos {#data_types-matching-2} -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../data_types/index.md) en `INSERT` y `SELECT` consulta. +La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql_reference/data_types/index.md) en `INSERT` y `SELECT` consulta. -| Tipo de datos de parquet (`INSERT`) | Tipo de datos ClickHouse | Tipo de datos de parquet (`SELECT`) | -|-------------------------------------|---------------------------------------------|-------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Fecha](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [FechaHora](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [Cadena](../data_types/string.md) | `STRING` | -| — | [Cadena fija](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | `DECIMAL` | +| Tipo de datos de parquet (`INSERT`) | Tipo de datos ClickHouse | Tipo de datos de parquet (`SELECT`) | +|-------------------------------------|-----------------------------------------------------------|-------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [Fecha](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [FechaHora](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [Cadena](../sql_reference/data_types/string.md) | `STRING` | +| — | [Cadena fija](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [Decimal](../sql_reference/data_types/decimal.md) | `DECIMAL` | ClickHouse admite una precisión configurable de `Decimal` tipo. El `INSERT` consulta trata el Parquet `DECIMAL` tipo como el ClickHouse `Decimal128` tipo. @@ -1138,7 +1141,7 @@ Puede seleccionar datos de una tabla ClickHouse y guardarlos en algún archivo e $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../operations/table_engines/hdfs.md). +Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} @@ -1146,24 +1149,24 @@ Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../operation ### Coincidencia de tipos de datos {#data_types-matching-3} -La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../data_types/index.md) en `INSERT` consulta. +La siguiente tabla muestra los tipos de datos admitidos y cómo coinciden con ClickHouse [tipos de datos](../sql_reference/data_types/index.md) en `INSERT` consulta. -| Tipo de datos ORC (`INSERT`) | Tipo de datos ClickHouse | -|------------------------------|----------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Fecha](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [FechaHora](../data_types/datetime.md) | -| `STRING`, `BINARY` | [Cadena](../data_types/string.md) | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | +| Tipo de datos ORC (`INSERT`) | Tipo de datos ClickHouse | +|------------------------------|------------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [Fecha](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [FechaHora](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [Cadena](../sql_reference/data_types/string.md) | +| `DECIMAL` | [Decimal](../sql_reference/data_types/decimal.md) | ClickHouse soporta la precisión configurable de la `Decimal` tipo. El `INSERT` consulta trata el ORC `DECIMAL` tipo como el ClickHouse `Decimal128` tipo. @@ -1179,7 +1182,7 @@ Puede insertar datos ORC de un archivo en la tabla ClickHouse mediante el siguie $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../operations/table_engines/hdfs.md). +Para intercambiar datos con Hadoop, puede usar [Motor de mesa HDFS](../engines/table_engines/integrations/hdfs.md). ## Esquema de formato {#formatschema} @@ -1191,11 +1194,11 @@ Si el archivo tiene la extensión estándar para el formato (por ejemplo, `.prot se puede omitir y en este caso, el esquema de formato se ve así `schemafile:MessageType`. Si introduce o emite datos a través del [cliente](../interfaces/cli.md) en el [modo interactivo](../interfaces/cli.md#cli_usage), el nombre de archivo especificado en el esquema de formato -puede contener una ruta absoluta o una ruta relativa al directorio actual en el cliente. +puede contener una ruta de acceso absoluta o una ruta relativa al directorio actual en el cliente. Si utiliza el cliente en el [modo por lotes](../interfaces/cli.md#cli_usage), la ruta de acceso al esquema debe ser relativa por razones de seguridad. Si introduce o emite datos a través del [Interfaz HTTP](../interfaces/http.md) el nombre de archivo especificado en el esquema de formato -debe estar ubicado en el directorio especificado en [format\_schema\_path](../operations/server_settings/settings.md#server_settings-format_schema_path) +debe estar ubicado en el directorio especificado en [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) en la configuración del servidor. ## Salto de errores {#skippingerrors} diff --git a/docs/es/interfaces/http.md b/docs/es/interfaces/http.md index 1097d787a51..a2d32d30acc 100644 --- a/docs/es/interfaces/http.md +++ b/docs/es/interfaces/http.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 19 +toc_title: Interfaz HTTP --- # Interfaz HTTP {#http-interface} @@ -8,7 +11,7 @@ La interfaz HTTP le permite usar ClickHouse en cualquier plataforma desde cualqu De forma predeterminada, clickhouse-server escucha HTTP en el puerto 8123 (esto se puede cambiar en la configuración). -Si realiza una solicitud GET / sin parámetros, devuelve 200 códigos de respuesta y la cadena que definió en [http\_server\_default\_response](../operations/server_settings/settings.md#server_settings-http_server_default_response) valor predeterminado “Ok.” (con un avance de línea al final) +Si realiza una solicitud GET / sin parámetros, devuelve 200 códigos de respuesta y la cadena que definió en [http\_server\_default\_response](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response) valor predeterminado “Ok.” (con un avance de línea al final) ``` bash $ curl 'http://localhost:8123/' @@ -149,7 +152,7 @@ Puede utilizar el formato interno de compresión ClickHouse al transmitir datos. Si ha especificado `compress=1` en la URL, el servidor comprime los datos que le envía. Si ha especificado `decompress=1` en la dirección URL, el servidor descomprime los mismos datos que `POST` método. -También puede optar por utilizar [Compresión HTTP](https://en.wikipedia.org/wiki/HTTP_compression). Para enviar un `POST` solicitud, agregue el encabezado de solicitud `Content-Encoding: compression_method`. Para que ClickHouse comprima la respuesta, debe agregar `Accept-Encoding: compression_method`. Soporte ClickHouse `gzip`, `br`, y `deflate` [métodos de compresión](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). Para habilitar la compresión HTTP, debe usar ClickHouse [enable\_http\_compression](../operations/settings/settings.md#settings-enable_http_compression) configuración. Puede configurar el nivel de compresión de datos [http\_zlib\_compression\_level](#settings-http_zlib_compression_level) para todos los métodos de compresión. +También puede optar por utilizar [Compresión HTTP](https://en.wikipedia.org/wiki/HTTP_compression). Para enviar un `POST` solicitud, agregue el encabezado de solicitud `Content-Encoding: compression_method`. Para que ClickHouse comprima la respuesta, debe agregar `Accept-Encoding: compression_method`. Soporta ClickHouse `gzip`, `br`, y `deflate` [métodos de compresión](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). Para habilitar la compresión HTTP, debe usar ClickHouse [enable\_http\_compression](../operations/settings/settings.md#settings-enable_http_compression) configuración. Puede configurar el nivel de compresión de datos [http\_zlib\_compression\_level](#settings-http_zlib_compression_level) para todos los métodos de compresión. Puede usar esto para reducir el tráfico de red al transmitir una gran cantidad de datos o para crear volcados que se comprimen inmediatamente. @@ -243,11 +246,11 @@ X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_ro Posibles campos de encabezado: -- `read_rows` — Número de filas leídas. -- `read_bytes` — Volumen de datos leídos en bytes. -- `total_rows_to_read` — Número total de filas a leer. -- `written_rows` — Número de filas escritas. -- `written_bytes` — Volumen de datos escritos en bytes. +- `read_rows` — Number of rows read. +- `read_bytes` — Volume of data read in bytes. +- `total_rows_to_read` — Total number of rows to be read. +- `written_rows` — Number of rows written. +- `written_bytes` — Volume of data written in bytes. Las solicitudes en ejecución no se detienen automáticamente si se pierde la conexión HTTP. El análisis y el formato de datos se realizan en el lado del servidor, y el uso de la red puede ser ineficaz. Opcional ‘query\_id’ parámetro se puede pasar como el ID de consulta (cualquier cadena). Para obtener más información, consulte la sección “Settings, replace\_running\_query”. @@ -282,4 +285,227 @@ Puede crear una consulta con parámetros y pasar valores para ellos desde los pa $ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" ``` -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/http_interface/) +## Interfaz HTTP predefinida {#predefined_http_interface} + +ClickHouse admite consultas específicas a través de la interfaz HTTP. Por ejemplo, puede escribir datos en una tabla de la siguiente manera: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +ClickHouse también es compatible con la interfaz HTTP predefinida que puede ayudarle a una integración más fácil con herramientas de terceros como [Prometheus exportador](https://github.com/percona-lab/clickhouse_exporter). + +Ejemplo: + +- En primer lugar, agregue esta sección al archivo de configuración del servidor: + + + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +- Ahora puede solicitar la url directamente para los datos en el formato Prometheus: + + + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +Como puede ver en el ejemplo, si `` está configurado en la configuración.archivo xml, ClickHouse coincidirá con las solicitudes HTTP recibidas con el tipo predefinido en ``, entonces ClickHouse ejecutará la consulta predefinida correspondiente si la coincidencia es exitosa. + +Ahora `` puede configurar ``, ``, ``, `` y `` . + +## root\_handler {#root_handler} + +`` devuelve el contenido especificado para la solicitud de ruta de acceso raíz. El contenido devuelto específico se configura mediante `http_server_default_response` en la configuración.XML. si no se especifica, devolver **Ok.** + +`http_server_default_response` no está definido y se envía una solicitud HTTP a ClickHouse. El resultado es el siguiente: + +``` xml + + + +``` + + $ curl 'http://localhost:8123' + Ok. + +`http_server_default_response` se define y se envía una solicitud HTTP a ClickHouse. El resultado es el siguiente: + +``` xml +
    ]]>
    + + + + +``` + + $ curl 'http://localhost:8123' +
    % + +## Método de codificación de datos: {#ping_handler} + +`` se puede utilizar para sondear el estado del servidor ClickHouse actual. Cuando el servidor HTTP ClickHouse es normal, acceder a ClickHouse a través de `` volverá **Ok.**. + +Ejemplo: + +``` xml + + /ping + +``` + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## Sistema abierto. {#replicas_status_handler} + +`` se utiliza para detectar el estado de la réplica y el nodo de retorno **Ok.** si el nodo de réplica no tiene retraso. Si hay un retraso, devuelva el retraso específico. El valor de `` admite personalización. Si no especifica ``, Configuración predeterminada de ClickHouse `` ser **/replicas\_status**. + +Ejemplo: + +``` xml + + /replicas_status + +``` + +Ningún caso del retraso: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +Caso retrasado: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## Dirección de correo electrónico {#predefined_query_handler} + +Puede configurar ``, ``, `` y `` en ``. + +`` es responsable de hacer coincidir la parte del método de la solicitud HTTP. `` se ajusta plenamente a la definición de [método](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) en el protocolo HTTP. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte del método de la solicitud HTTP + +`` es responsable de hacer coincidir la parte url de la solicitud HTTP. Es compatible con [RE2](https://github.com/google/re2)expresiones regulares. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte url de la solicitud HTTP + +`` es responsable de hacer coincidir la parte del encabezado de la solicitud HTTP. Es compatible con las expresiones regulares de RE2. Es una configuración opcional. Si no está definido en el archivo de configuración, no coincide con la parte de encabezado de la solicitud HTTP + +`` valor es una consulta predefinida de ``, que es ejecutado por ClickHouse cuando se hace coincidir una solicitud HTTP y se devuelve el resultado de la consulta. Es una configuración imprescindible. + +`` admite la configuración de valores Settings y query\_params. + +En el ejemplo siguiente se definen los valores de `max_threads` y `max_alter_threads` configuración, a continuación, consulta la tabla del sistema para comprobar si estos ajustes se han establecido correctamente. + +Ejemplo: + +``` xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "Nota" + En uno ``, una `` sólo es compatible con uno `` de un tipo de plaquita. + +## Nombre de la red inalámbrica (SSID): {#dynamic_query_handler} + +`` que `` aumentar `` . + +ClickHouse extrae y ejecuta el valor correspondiente al `` valor en la url de la petición HTTP. +Configuración predeterminada de ClickHouse `` ser `/query` . Es una configuración opcional. Si no hay una definición en el archivo de configuración, el parámetro no se pasa. + +Para experimentar con esta funcionalidad, el ejemplo define los valores de max\_threads y max\_alter\_threads y consulta si la configuración se estableció correctamente. +La diferencia es que en ``, consulta se escribe en el archivo de configuración. Pero en ``, consulta se escribe en forma de param de la solicitud HTTP. + +Ejemplo: + +``` xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/es/interfaces/index.md b/docs/es/interfaces/index.md index 2bc67a3425c..8a15889ab81 100644 --- a/docs/es/interfaces/index.md +++ b/docs/es/interfaces/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Interfaces +toc_priority: 14 +toc_title: "Implantaci\xF3n" --- # Interfaz {#interfaces} @@ -22,4 +26,4 @@ También hay una amplia gama de bibliotecas de terceros para trabajar con ClickH - [Integración](third-party/integrations.md) - [Interfaces visuales](third-party/gui.md) -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/) diff --git a/docs/es/interfaces/jdbc.md b/docs/es/interfaces/jdbc.md index 2069c78dff2..fd3f26c946a 100644 --- a/docs/es/interfaces/jdbc.md +++ b/docs/es/interfaces/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 22 +toc_title: Controlador JDBC --- # Controlador JDBC {#jdbc-driver} @@ -9,4 +12,4 @@ machine_translated: true - [Sistema abierto.](https://github.com/housepower/ClickHouse-Native-JDBC) - [Método de codificación de datos:](https://github.com/blynkkk/clickhouse4j) -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/jdbc/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/jdbc/) diff --git a/docs/es/interfaces/mysql.md b/docs/es/interfaces/mysql.md index bd3e56cf61c..bd4545b46f8 100644 --- a/docs/es/interfaces/mysql.md +++ b/docs/es/interfaces/mysql.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 20 +toc_title: Interfaz MySQL --- # Interfaz de MySQL {#mysql-interface} -ClickHouse soporta el protocolo de cable MySQL. Puede ser habilitado por [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) configuración en el archivo de configuración: +ClickHouse soporta el protocolo de cable MySQL. Puede ser habilitado por [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) configuración en el archivo de configuración: ``` xml 9004 @@ -34,7 +37,7 @@ Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. mysql> ``` -Para la compatibilidad con todos los clientes MySQL, se recomienda especificar la contraseña de usuario con [Doble SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) en el archivo de configuración. +Para la compatibilidad con todos los clientes MySQL, se recomienda especificar la contraseña de usuario con [doble SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) en el archivo de configuración. Si la contraseña de usuario se especifica usando [SHA256](../operations/settings/settings_users.md#password_sha256_hex), algunos clientes no podrán autenticarse (mysqljs y versiones antiguas de la herramienta de línea de comandos mysql). Restricción: @@ -43,4 +46,4 @@ Restricción: - algunos tipos de datos se envían como cadenas -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/mysql/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/es/interfaces/odbc.md b/docs/es/interfaces/odbc.md index e5ca03e61d2..9d1a755122b 100644 --- a/docs/es/interfaces/odbc.md +++ b/docs/es/interfaces/odbc.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 23 +toc_title: Conductor ODBC --- # Conductor ODBC {#odbc-driver} - [Conductor oficial](https://github.com/ClickHouse/clickhouse-odbc). -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/odbc/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/odbc/) diff --git a/docs/es/interfaces/tcp.md b/docs/es/interfaces/tcp.md index 6a67fe09c99..fb304f739da 100644 --- a/docs/es/interfaces/tcp.md +++ b/docs/es/interfaces/tcp.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 18 +toc_title: Interfaz nativa (TCP) --- # Interfaz nativa (TCP) {#native-interface-tcp} El protocolo nativo se utiliza en el [cliente de línea de comandos](cli.md), para la comunicación entre servidores durante el procesamiento de consultas distribuidas, y también en otros programas de C, Desafortunadamente, el protocolo nativo de ClickHouse aún no tiene especificaciones formales, pero puede ser diseñado de manera inversa desde el código fuente de ClickHouse (comenzando [por aquí](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) y/o mediante la interceptación y el análisis del tráfico TCP. -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/tcp/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/es/interfaces/third-party/client_libraries.md b/docs/es/interfaces/third-party/client_libraries.md index 96dd21d1f9d..28c3a9529f4 100644 --- a/docs/es/interfaces/third-party/client_libraries.md +++ b/docs/es/interfaces/third-party/client_libraries.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 26 +toc_title: Bibliotecas de clientes --- # Bibliotecas de clientes de desarrolladores de terceros {#client-libraries-from-third-party-developers} @@ -13,12 +16,12 @@ machine_translated: true - [Casa de clics-cliente](https://github.com/yurial/clickhouse-client) - [Aiochclient](https://github.com/maximdanilchenko/aiochclient) - PHP - - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) - - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) - - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [SeasClick C++ client](https://github.com/SeasX/SeasClick) + - [Método de codificación de datos:](https://packagist.org/packages/smi2/phpClickHouse) + - [Sistema abierto.](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [Sistema abierto.](https://packagist.org/packages/bozerkins/clickhouse-client) + - [Sistema abierto.](https://packagist.org/packages/simpod/clickhouse-client) + - [Seva-code/php-click-house-cliente](https://packagist.org/packages/seva-code/php-click-house-client) + - [Cliente de SeasClick C++](https://github.com/SeasX/SeasClick) - Ve - [Casa de clics](https://github.com/kshvakov/clickhouse/) - [Sistema abierto.](https://github.com/roistat/go-clickhouse) @@ -52,4 +55,4 @@ machine_translated: true - Nim - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/leonardoce/nim-clickhouse) -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/third-party/client_libraries/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/es/interfaces/third-party/gui.md b/docs/es/interfaces/third-party/gui.md index 909b078505d..9ad60291016 100644 --- a/docs/es/interfaces/third-party/gui.md +++ b/docs/es/interfaces/third-party/gui.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 28 +toc_title: Interfaces Visuales --- # Interfaces visuales de desarrolladores de terceros {#visual-interfaces-from-third-party-developers} @@ -88,7 +91,7 @@ Función: ### Sistema abierto. {#clickhouse-flamegraph} -[Sistema abierto.](https://github.com/Slach/clickhouse-flamegraph) es una herramienta especializada para visualizar el `system.trace_log` como [Gráfico de llamas](http://www.brendangregg.com/flamegraphs.html). +[Sistema abierto.](https://github.com/Slach/clickhouse-flamegraph) es una herramienta especializada para visualizar el `system.trace_log` como [Flamegraph](http://www.brendangregg.com/flamegraphs.html). ## Comercial {#commercial} @@ -123,7 +126,7 @@ Nivel de Cifrado WEP [disponible de forma gratuita](https://cloud.yandex.com/doc ### Software de Holística {#holistics-software} -[Holística](https://www.holistics.io/) Este es una plataforma de datos de pila completa y una herramienta de inteligencia de negocios. +[Holística](https://www.holistics.io/) es una plataforma de datos de pila completa y una herramienta de inteligencia de negocios. Función: @@ -146,4 +149,4 @@ Función: [Cómo configurar ClickHouse en Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse) -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/third-party/gui/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/es/interfaces/third-party/index.md b/docs/es/interfaces/third-party/index.md new file mode 100644 index 00000000000..821867a3ca7 --- /dev/null +++ b/docs/es/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Third-Party +toc_priority: 24 +--- + + diff --git a/docs/es/interfaces/third-party/integrations.md b/docs/es/interfaces/third-party/integrations.md index 2d6e525e0bf..00be86b187d 100644 --- a/docs/es/interfaces/third-party/integrations.md +++ b/docs/es/interfaces/third-party/integrations.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 27 +toc_title: "Integraci\xF3n" --- # Bibliotecas de integración de desarrolladores externos {#integration-libraries-from-third-party-developers} @@ -39,7 +42,7 @@ machine_translated: true - [graphouse](https://github.com/yandex/graphouse) - [de carbono-clickhouse](https://github.com/lomik/carbon-clickhouse) + - [Sistema abierto.](https://github.com/lomik/graphite-clickhouse) - - [Grafito-ch-optimizador](https://github.com/innogames/graphite-ch-optimizer) - optimiza las particiones [\*GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) reglas de [Configuración de rollup](../../operations/table_engines/graphitemergetree.md#rollup-configuration) podría ser aplicado + - [Grafito-ch-optimizador](https://github.com/innogames/graphite-ch-optimizer) - optimiza las particiones [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) reglas de [Configuración de rollup](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) podría ser aplicado - [Grafana](https://grafana.com/) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/Vertamedia/clickhouse-grafana) - [Prometeo](https://prometheus.io/) @@ -48,7 +51,7 @@ machine_translated: true - [Sistema abierto.](https://github.com/hot-wifi/clickhouse_exporter) (utilizar [Ir cliente](https://github.com/kshvakov/clickhouse/)) - [Nagios](https://www.nagios.org/) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/exogroup/check_clickhouse/) - - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) + - [Inicio](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - [Zabbix](https://www.zabbix.com) - [Sistema abierto.](https://github.com/Altinity/clickhouse-zabbix-template) - [Sematext](https://sematext.com/) @@ -57,7 +60,7 @@ machine_translated: true - [rsyslog](https://www.rsyslog.com/) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - [fluentd](https://www.fluentd.org) - - [Casa de campo](https://github.com/flant/loghouse) (para [Kubernetes](https://kubernetes.io)) + - [casa de campo](https://github.com/flant/loghouse) (para [Kubernetes](https://kubernetes.io)) - [Información](https://www.sematext.com/logagent) - [Sistema de tabiquería interior y exterior](https://sematext.com/docs/logagent/output-plugin-clickhouse/) - Geo @@ -70,18 +73,18 @@ machine_translated: true - [SQLAlchemy](https://www.sqlalchemy.org) - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (utilizar [InformaciónSistema abierto.](https://github.com/Infinidat/infi.clickhouse_orm)) - [pandas](https://pandas.pydata.org) - - [Nuestros Servicios](https://github.com/kszucs/pandahouse) + - [Pandahouse](https://github.com/kszucs/pandahouse) - R - [Dplyr](https://db.rstudio.com/dplyr/) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse) (utilizar [Bienvenidos](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [Sistema abierto.](https://github.com/jaykelin/clickhouse-hdfs-loader) (utilizar [JDBC](../../query_language/table_functions/jdbc.md)) + - [Sistema abierto.](https://github.com/jaykelin/clickhouse-hdfs-loader) (utilizar [JDBC](../../sql_reference/table_functions/jdbc.md)) - Ciudad - [Akka](https://akka.io) - [Sistema abierto.](https://github.com/crobox/clickhouse-scala-client) - C\# - - [Nivel de Cifrado WEP](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) + - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - [Sistema abierto.Ado](https://github.com/killwort/ClickHouse-Net) - [Sistema abierto.Cliente](https://github.com/DarkWanderer/ClickHouse.Client) - [Sistema abierto.](https://github.com/ilyabreev/ClickHouse.Net) @@ -90,4 +93,4 @@ machine_translated: true - [Ecto](https://github.com/elixir-ecto/ecto) - [Método de codificación de datos:](https://github.com/appodeal/clickhouse_ecto) -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/third-party/integrations/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/es/interfaces/third-party/proxy.md b/docs/es/interfaces/third-party/proxy.md index 29e0773e3d8..6c83d97a747 100644 --- a/docs/es/interfaces/third-party/proxy.md +++ b/docs/es/interfaces/third-party/proxy.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 29 +toc_title: Proxy --- # Servidores proxy de desarrolladores de terceros {#proxy-servers-from-third-party-developers} @@ -40,4 +43,4 @@ Función: Implementado en Go. -[Artículo Original](https://clickhouse.tech/docs/es/interfaces/third-party/proxy/) +[Artículo Original](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/es/introduction/adopters.md b/docs/es/introduction/adopters.md index 5d23e3f1017..822946a421b 100644 --- a/docs/es/introduction/adopters.md +++ b/docs/es/introduction/adopters.md @@ -1,11 +1,14 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 8 +toc_title: Adoptante --- # Adoptadores de ClickHouse {#clickhouse-adopters} !!! warning "Descargo" - La siguiente lista de empresas que utilizan ClickHouse y sus historias de éxito se recopila a partir de fuentes públicas, por lo que podría diferir de la realidad actual. Realmente agradeceríamos que compartiera la historia de adoptar ClickHouse en su empresa y [agregarlo a la lista](https://github.com/ClickHouse/ClickHouse/edit/master/docs/es/introduction/adopters.md), pero por favor asegúrese de que usted no tendrá ningunos problemas de NDA haciendo así. Proporcionar actualizaciones con publicaciones de otras compañías también es útil. + La siguiente lista de empresas que utilizan ClickHouse y sus historias de éxito se recopila a partir de fuentes públicas, por lo que podría diferir de la realidad actual. Le agradeceríamos que compartiera la historia de adoptar ClickHouse en su empresa y [agregarlo a la lista](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), pero por favor asegúrese de que usted no tendrá ningunos problemas de NDA haciendo así. Proporcionar actualizaciones con publicaciones de otras compañías también es útil. | Empresa | Industria | Usecase | Tamaño de clúster | (Un)Tamaño de datos comprimidos\* | Referencia | |--------------------------------------------------------------------------------------------|------------------------------------|-----------------------------|------------------------------------------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -21,9 +24,9 @@ machine_translated: true | `Dataliance/UltraPower` | Telecomunicaciones | Analítica | — | — | [Diapositivas en chino, enero 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | | [CARTO](https://carto.com/) | Inteligencia de negocios | Análisis geográfico | — | — | [Procesamiento geoespacial con Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | | [CERN](http://public.web.cern.ch/public/) | Investigación | Experimento | — | — | [Comunicado de prensa, abril de 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [Cisco](http://cisco.com/) | Rojo | Análisis de tráfico | — | — | [Charla relámpago, octubre 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [Cisco](http://cisco.com/) | Red | Análisis de tráfico | — | — | [Charla relámpago, octubre 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | [Valores de la ciudadela](https://www.citadelsecurities.com/) | Financiación | — | — | — | [Contribución, marzo 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Más información](https://city-mobil.ru) | Taxi | Analítica | — | — | [Entrada de Blog en ruso, marzo de 2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [Más información](https://city-mobil.ru) | Taxi | Analítica | — | — | [Blog Post en ruso, marzo 2020](https://habr.com/en/company/citymobil/blog/490660/) | | [ContentSquare](https://contentsquare.com) | Análisis web | Producto principal | — | — | [Publicación de blog en francés, noviembre 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | [Cloudflare](https://cloudflare.com) | CDN | Análisis de tráfico | 36 servidores | — | [Mensaje del blog, Mayo 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Mensaje del blog, marzo 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | [Corunet](https://coru.net/) | Analítica | Producto principal | — | — | [Diapositivas en español, Abril 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | @@ -32,26 +35,26 @@ machine_translated: true | [Banco de Deutsche](https://db.com) | Financiación | BI Analytics | — | — | [Diapositivas en español, octubre 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | | [Diva-e](https://www.diva-e.com) | Consultoría digital | Producto principal | — | — | [Diapositivas en español, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | [Exness](https://www.exness.com) | Comercio | Métricas, Registro | — | — | [Charla en ruso, mayo 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Sistema abierto.](https://geniee.co.jp) | Anuncio rojo | Producto principal | — | — | [Publicación de blog en japonés, julio 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [Sistema abierto.](https://geniee.co.jp) | Red Ad | Producto principal | — | — | [Publicación de blog en japonés, julio 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | [HUYA](https://www.huya.com/) | Video Streaming | Analítica | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | [Idealista](https://www.idealista.com) | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Infovista](https://www.infovista.com/) | Rojo | Analítica | — | — | [Diapositivas en español, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [Infovista](https://www.infovista.com/) | Red | Analítica | — | — | [Diapositivas en español, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | [InnoGames](https://www.innogames.com) | Juego | Métricas, Registro | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | [Integros](https://integros.com) | Plataforma para servicios de video | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | | [Datos de Kodiak](https://www.kodiakdata.com/) | Nube | Producto principal | — | — | [Diapositivas en Engish, Abril 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | | [Kontur](https://kontur.ru) | Desarrollo de software | Métricas | — | — | [Charla en ruso, noviembre 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [Sistema abierto.](https://lifestreet.com/) | Anuncio rojo | Producto principal | 75 servidores (3 réplicas) | 5.27 PiB | [Publicación de blog en ruso, febrero 2017](https://habr.com/en/post/322620/) | +| [Sistema abierto.](https://lifestreet.com/) | Red Ad | Producto principal | 75 servidores (3 réplicas) | 5.27 PiB | [Publicación de blog en ruso, febrero 2017](https://habr.com/en/post/322620/) | | [Soluciones en la nube de Mail.ru](https://mcs.mail.ru/) | Servicios en la nube | Producto principal | — | — | [Ejecución de ClickHouse Instance, en ruso](https://mcs.mail.ru/help/db-create/clickhouse#) | | [Mensaje de pájaro](https://www.messagebird.com) | Telecomunicaciones | Estadísticas | — | — | [Diapositivas en español, noviembre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [MGID](https://www.mgid.com/) | Anuncio rojo | Analítica Web | — | — | [Nuestra experiencia en la implementación analítica DBMS ClickHouse, en ruso](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [MGID](https://www.mgid.com/) | Red Ad | Analítica Web | — | — | [Nuestra experiencia en la implementación analítica DBMS ClickHouse, en ruso](http://gs-studio.com/news-about-it/32777----clickhouse---c) | | [UnoAPM](https://www.oneapm.com/) | Supervisión y análisis de datos | Producto principal | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | | [Pragma Innovación](http://www.pragma-innovation.fr/) | Telemetría y Análisis de Big Data | Producto principal | — | — | [Diapositivas en español, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | | [QINGCLOUD](https://www.qingcloud.com/) | Servicios en la nube | Producto principal | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | Protección DDoS | Producto principal | — | — | [Mensaje del blog, marzo 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [Qrator](https://qrator.net) | Protección DDoS | Producto principal | — | — | [Blog Post, marzo 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | | [Tecnología de la información del PORCIMIENTO de Pekín Co., Ltd.](https://www.percent.cn/) | Analítica | Producto principal | — | — | [Diapositivas en chino, junio 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | [Rambler](https://rambler.ru) | Servicios de Internet | Analítica | — | — | [Charla en ruso, abril 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | | [Tencent](https://www.tencent.com) | Mensajería | Tala | — | — | [Charla en chino, noviembre 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Estrellas de tráfico](https://trafficstars.com/) | Anuncio rojo | — | — | — | [Diapositivas en ruso, mayo 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [Estrellas de tráfico](https://trafficstars.com/) | Red AD | — | — | — | [Diapositivas en ruso, mayo 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | | [S7 Aerolíneas](https://www.s7.ru) | Aérea | Métricas, Registro | — | — | [Charla en ruso, marzo 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | | [SEMrush](https://www.semrush.com/) | Marketing | Producto principal | — | — | [Diapositivas en ruso, agosto 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | | [Inicio](https://www.scireum.de/) | Comercio electrónico | Producto principal | — | — | [Charla en alemán, febrero de 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | @@ -63,7 +66,7 @@ machine_translated: true | [Salto](https://www.splunk.com/) | Análisis de negocios | Producto principal | — | — | [Diapositivas en español, enero 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | | [Spotify](https://www.spotify.com) | Sica | Experimentación | — | — | [Diapositivas, julio 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | [Tencent](https://www.tencent.com) | Grandes Datos | Procesamiento de datos | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Más información](https://www.uber.com) | Taxi | Tala | — | — | [Diapositivas, febrero de 2020](https://presentations.clickhouse.tech/meetup40/ml.pdf) | +| [Más información](https://www.uber.com) | Taxi | Tala | — | — | [Diapositivas, febrero de 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | | [VKontakte](https://vk.com) | Red social | Estadísticas, Registro | — | — | [Diapositivas en ruso, agosto 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | | [Método de codificación de datos:](https://wisebits.com/) | Soluciones de TI | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | | [Tecnología de Xiaoxin.](https://www.xiaoheiban.cn/) | Educación | Propósito común | — | — | [Diapositivas en español, noviembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | @@ -72,8 +75,8 @@ machine_translated: true | [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Inteligencia de negocios | Producto principal | — | — | [Diapositivas en ruso, diciembre 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | | [Mercado de Yandex](https://market.yandex.ru/) | Comercio electrónico | Métricas, Registro | — | — | [Charla en ruso, enero 2019](https://youtu.be/_l1qP0DyBcA?t=478) | | [Yandex Metrica](https://metrica.yandex.com) | Análisis web | Producto principal | 360 servidores en un clúster, 1862 servidores en un departamento | 66.41 PiB / 5.68 PiB | [Diapositivas, febrero de 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| [Método de codificación de datos:](https://htc-cs.ru/) | Desarrollo de software | Métricas, Registro | — | — | [Blog Post, marzo 2019, en ruso](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [ЦВТ](https://htc-cs.ru/) | Desarrollo de software | Métricas, Registro | — | — | [Blog Post, marzo 2019, en ruso](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | | [МКБ](https://mkb.ru/) | Banco | Supervisión del sistema web | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [Método de codificación de datos:](https://jinshuju.net) | BI Analytics | Producto principal | — | — | [Diapositivas en chino, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| [金数据](https://jinshuju.net) | BI Analytics | Producto principal | — | — | [Diapositivas en chino, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -[Artículo Original](https://clickhouse.tech/docs/es/introduction/adopters/) +[Artículo Original](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/es/introduction/distinctive_features.md b/docs/es/introduction/distinctive_features.md index ed071705e8e..5117fcf9324 100644 --- a/docs/es/introduction/distinctive_features.md +++ b/docs/es/introduction/distinctive_features.md @@ -1,16 +1,19 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 4 +toc_title: "Caracter\xEDsticas distintivas" --- # Características distintivas de ClickHouse {#distinctive-features-of-clickhouse} ## DBMS orientado a columnas verdaderas {#true-column-oriented-dbms} -En un verdadero DBMS orientado a columnas, no se almacenan datos adicionales con los valores. Entre otras cosas, esto significa que los valores de longitud constante deben ser compatibles, para evitar almacenar su longitud “number” al lado de los valores. Como ejemplo, mil millones de valores de tipo UInt8 deberían consumir alrededor de 1 GB sin comprimir, o esto afectará fuertemente el uso de la CPU. Es muy importante almacenar los datos de forma compacta (sin “garbage”) incluso sin comprimir, ya que la velocidad de descompresión (uso de CPU) depende principalmente del volumen de datos sin comprimir. +En un verdadero DBMS orientado a columnas, no se almacenan datos adicionales con los valores. Entre otras cosas, esto significa que los valores de longitud constante deben ser compatibles, para evitar almacenar su longitud “number” al lado de los valores. Como ejemplo, mil millones de valores de tipo UInt8 deberían consumir alrededor de 1 GB sin comprimir, o esto afecta fuertemente el uso de la CPU. Es esencial almacenar los datos de forma compacta (sin “garbage”) incluso sin comprimir, ya que la velocidad de descompresión (uso de CPU) depende principalmente del volumen de datos sin comprimir. -Esto vale la pena señalar porque hay sistemas que pueden almacenar valores de diferentes columnas por separado, pero que no pueden procesar eficazmente las consultas analíticas debido a su optimización para otros escenarios. Los ejemplos son HBase, BigTable, Cassandra e HyperTable. En estos sistemas, obtendrá un rendimiento de alrededor de cien mil filas por segundo, pero no cientos de millones de filas por segundo. +Vale la pena señalar porque hay sistemas que pueden almacenar valores de diferentes columnas por separado, pero que no pueden procesar efectivamente las consultas analíticas debido a su optimización para otros escenarios. Los ejemplos son HBase, BigTable, Cassandra e HyperTable. En estos sistemas, obtendría un rendimiento de alrededor de cien mil filas por segundo, pero no cientos de millones de filas por segundo. -También vale la pena señalar que ClickHouse es un sistema de gestión de bases de datos, no una sola base de datos. ClickHouse permite crear tablas y bases de datos en tiempo de ejecución, cargar datos y ejecutar consultas sin volver a configurar y reiniciar el servidor. +También vale la pena señalar que ClickHouse es un sistema de administración de bases de datos, no una sola base de datos. ClickHouse permite crear tablas y bases de datos en tiempo de ejecución, cargar datos y ejecutar consultas sin volver a configurar y reiniciar el servidor. ## Compresión de datos {#data-compression} @@ -18,16 +21,16 @@ Algunos DBMS orientados a columnas (InfiniDB CE y MonetDB) no utilizan la compre ## Almacenamiento en disco de datos {#disk-storage-of-data} -Mantener los datos físicamente ordenados por clave principal permite extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de pocas docenas de milisegundos. Algunos DBMS orientados a columnas (como SAP HANA y Google PowerDrill) solo pueden funcionar en RAM. Este enfoque fomenta la asignación de un presupuesto de hardware más grande que el realmente necesario para el análisis en tiempo real. ClickHouse está diseñado para funcionar en discos duros normales, lo que significa que el costo por GB de almacenamiento de datos es bajo, pero SSD y RAM adicional también se utilizan completamente si están disponibles. +Mantener los datos físicamente ordenados por clave principal permite extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de unas pocas docenas de milisegundos. Algunos DBMS orientados a columnas (como SAP HANA y Google PowerDrill) solo pueden funcionar en RAM. Este enfoque fomenta la asignación de un presupuesto de hardware más grande que el necesario para el análisis en tiempo real. ClickHouse está diseñado para funcionar en discos duros normales, lo que significa que el costo por GB de almacenamiento de datos es bajo, pero SSD y RAM adicional también se utilizan completamente si están disponibles. ## Procesamiento paralelo en varios núcleos {#parallel-processing-on-multiple-cores} -Las consultas de gran tamaño se paralelizan de forma natural, tomando todos los recursos necesarios que estén disponibles en el servidor actual. +Las consultas grandes se paralelizan naturalmente, tomando todos los recursos necesarios disponibles en el servidor actual. ## Procesamiento distribuido en varios servidores {#distributed-processing-on-multiple-servers} Casi ninguno de los DBMS columnar mencionados anteriormente tiene soporte para el procesamiento de consultas distribuidas. -En ClickHouse, los datos pueden residir en diferentes fragmentos. Cada fragmento puede ser un grupo de réplicas que se utilizan para la tolerancia a errores. La consulta se procesa en todos los fragmentos en paralelo. Esto es transparente para el usuario. +En ClickHouse, los datos pueden residir en diferentes fragmentos. Cada fragmento puede ser un grupo de réplicas utilizadas para la tolerancia a errores. Todos los fragmentos se utilizan para ejecutar una consulta en paralelo, de forma transparente para el usuario. ## Soporte SQL {#sql-support} @@ -37,19 +40,19 @@ No se admiten subconsultas y funciones de ventana dependientes. ## Motor del vector {#vector-engine} -Los datos no solo se almacenan mediante columnas, sino que se procesan mediante vectores (partes de columnas). Esto nos permite lograr una alta eficiencia de CPU. +Los datos no solo se almacenan mediante columnas, sino que se procesan mediante vectores (partes de columnas), lo que permite lograr una alta eficiencia de CPU. ## Actualizaciones de datos en tiempo real {#real-time-data-updates} -ClickHouse admite tablas con una clave principal. Para realizar consultas rápidamente en el rango de la clave principal, los datos se ordenan incrementalmente utilizando el árbol de combinación. Debido a esto, los datos se pueden agregar continuamente a la tabla. No se toman bloqueos cuando se ingieren nuevos datos. +ClickHouse admite tablas con una clave principal. Para realizar consultas rápidamente en el rango de la clave principal, los datos se ordenan de forma incremental utilizando el árbol de combinación. Debido a esto, los datos se pueden agregar continuamente a la tabla. No se toman bloqueos cuando se ingieren nuevos datos. ## Indice {#index} -Tener un dato ordenado físicamente por clave principal hace posible extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de pocas docenas de milisegundos. +Tener un dato ordenado físicamente por clave principal permite extraer datos para sus valores específicos o rangos de valores con baja latencia, menos de unas pocas docenas de milisegundos. ## Adecuado para consultas en línea {#suitable-for-online-queries} -La baja latencia significa que las consultas se pueden procesar sin demora y sin intentar preparar la respuesta con anticipación, justo en el mismo momento mientras se carga la página de la interfaz de usuario. En otras palabras, en línea. +La baja latencia significa que las consultas se pueden procesar sin demora y sin intentar preparar una respuesta por adelantado, justo en el mismo momento mientras se carga la página de la interfaz de usuario. En otras palabras, en línea. ## Soporte para cálculos aproximados {#support-for-approximated-calculations} @@ -61,8 +64,8 @@ ClickHouse proporciona varias formas de intercambiar precisión por rendimiento: ## Replicación de datos y soporte de integridad de datos {#data-replication-and-data-integrity-support} -Utiliza la replicación multimaster asincrónica. Después de escribir en cualquier réplica disponible, los datos se distribuyen a todas las réplicas restantes en segundo plano. El sistema mantiene datos idénticos en diferentes réplicas. La recuperación después de la mayoría de las fallas se realiza automáticamente, y en casos complejos, semiautomáticamente. +ClickHouse utiliza la replicación multi-maestro asincrónica. Después de escribir en cualquier réplica disponible, todas las réplicas restantes recuperan su copia en segundo plano. El sistema mantiene datos idénticos en diferentes réplicas. La recuperación después de la mayoría de las fallas se realiza automáticamente, o semiautomáticamente en casos complejos. -Para obtener más información, consulte la sección [Replicación de datos](../operations/table_engines/replication.md). +Para obtener más información, consulte la sección [Replicación de datos](../engines/table_engines/mergetree_family/replication.md). -[Artículo Original](https://clickhouse.tech/docs/es/introduction/distinctive_features/) +[Artículo Original](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/es/introduction/features_considered_disadvantages.md b/docs/es/introduction/features_considered_disadvantages.md index 1e615f9caaf..60eabad3102 100644 --- a/docs/es/introduction/features_considered_disadvantages.md +++ b/docs/es/introduction/features_considered_disadvantages.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 5 +toc_title: "Caracter\xEDsticas de ClickHouse que pueden considerarse desventajas" --- # Características de ClickHouse que pueden considerarse desventajas {#clickhouse-features-that-can-be-considered-disadvantages} @@ -8,4 +11,4 @@ machine_translated: true 2. Falta de capacidad para modificar o eliminar datos ya insertados con alta tasa y baja latencia. Hay eliminaciones y actualizaciones por lotes disponibles para limpiar o modificar datos, por ejemplo, para cumplir con [GDPR](https://gdpr-info.eu). 3. El índice disperso hace que ClickHouse no sea tan adecuado para consultas de puntos que recuperan filas individuales por sus claves. -[Artículo Original](https://clickhouse.tech/docs/es/introduction/features_considered_disadvantages/) +[Artículo Original](https://clickhouse.tech/docs/en/introduction/features_considered_disadvantages/) diff --git a/docs/es/introduction/history.md b/docs/es/introduction/history.md index bd50e867d29..d4a6b826822 100644 --- a/docs/es/introduction/history.md +++ b/docs/es/introduction/history.md @@ -1,21 +1,24 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 7 +toc_title: Historia --- # Historial de ClickHouse {#clickhouse-history} -ClickHouse fue desarrollado originalmente para alimentar [El Yandex.Métrica](https://metrica.yandex.com/), [la segunda plataforma de análisis web más grande del mundo](http://w3techs.com/technologies/overview/traffic_analysis/all), y sigue siendo el componente central de este sistema. Con más de 13 billones de registros en la base de datos y más de 20 mil millones de eventos diarios, ClickHouse permite generar informes personalizados sobre la marcha directamente a partir de datos no agregados. Este artículo cubre brevemente los objetivos de ClickHouse en las primeras etapas de su desarrollo. +ClickHouse se ha desarrollado inicialmente para alimentar [El Yandex.Métrica](https://metrica.yandex.com/), [la segunda plataforma de análisis web más grande del mundo](http://w3techs.com/technologies/overview/traffic_analysis/all), y sigue siendo el componente central de este sistema. Con más de 13 billones de registros en la base de datos y más de 20 mil millones de eventos diarios, ClickHouse permite generar informes personalizados sobre la marcha directamente a partir de datos no agregados. Este artículo cubre brevemente los objetivos de ClickHouse en las primeras etapas de su desarrollo. -El Yandex.Metrica construye informes personalizados sobre la marcha basados en hits y sesiones, con segmentos arbitrarios definidos por el usuario. Esto a menudo requiere la creación de agregados complejos, como el número de usuarios únicos. Los nuevos datos para crear un informe se reciben en tiempo real. +El Yandex.Metrica construye informes personalizados sobre la marcha basados en hits y sesiones, con segmentos arbitrarios definidos por el usuario. Hacerlo a menudo requiere construir agregados complejos, como el número de usuarios únicos. Los nuevos datos para crear un informe llegan en tiempo real. A partir de abril de 2014, Yandex.Metrica estaba rastreando alrededor de 12 mil millones de eventos (vistas de páginas y clics) diariamente. Todos estos eventos deben almacenarse para crear informes personalizados. Una sola consulta puede requerir escanear millones de filas en unos pocos cientos de milisegundos, o cientos de millones de filas en solo unos segundos. -## Uso en el Yandex.Metrica y otros servicios de Yandex {#usage-in-yandex-metrica-and-other-yandex-services} +## Uso en Yandex.Metrica y otros servicios de Yandex {#usage-in-yandex-metrica-and-other-yandex-services} -ClickHouse se utiliza para múltiples propósitos en Yandex.Métrica. -Su tarea principal es crear informes en modo en línea utilizando datos no agregados. Utiliza un clúster de 374 servidores, que almacenan más de 20,3 billones de filas en la base de datos. El volumen de datos comprimidos, sin contar la duplicación y la replicación, es de aproximadamente 2 PB. El volumen de datos sin comprimir (en formato TSV) sería de aproximadamente 17 PB. +ClickHouse sirve para múltiples propósitos en Yandex.Métrica. +Su tarea principal es crear informes en modo en línea utilizando datos no agregados. Utiliza un clúster de 374 servidores, que almacenan más de 20,3 billones de filas en la base de datos. El volumen de datos comprimidos es de aproximadamente 2 PB, sin tener en cuenta duplicados y réplicas. El volumen de datos sin comprimir (en formato TSV) sería de aproximadamente 17 PB. -ClickHouse también se utiliza para: +ClickHouse también juega un papel clave en los siguientes procesos: - Almacenamiento de datos para Session Replay de Yandex.Métrica. - Procesamiento de datos intermedios. @@ -23,17 +26,17 @@ ClickHouse también se utiliza para: - Ejecutar consultas para depurar el Yandex.Motor Metrica. - Análisis de registros desde la API y la interfaz de usuario. -ClickHouse tiene al menos una docena de instalaciones en otros servicios de Yandex: en verticales de búsqueda, Market, Direct, análisis de negocios, desarrollo móvil, AdFox, servicios personales y otros. +Hoy en día, hay varias docenas de instalaciones de ClickHouse en otros servicios y departamentos de Yandex: verticales de búsqueda, comercio electrónico, publicidad, análisis de negocios, desarrollo móvil, servicios personales y otros. ## Datos agregados y no agregados {#aggregated-and-non-aggregated-data} -Existe una opinión popular de que para calcular efectivamente las estadísticas, debe agregar datos ya que esto reduce el volumen de datos. +Existe una opinión generalizada de que para calcular las estadísticas de manera efectiva, debe agregar datos ya que esto reduce el volumen de datos. -Pero la agregación de datos es una solución muy limitada, por las siguientes razones: +Pero la agregación de datos viene con muchas limitaciones: -- Debe tener una lista predefinida de informes que el usuario necesitará. +- Debe tener una lista predefinida de los informes necesarios. - El usuario no puede hacer informes personalizados. -- Al agregar una gran cantidad de claves, el volumen de datos no se reduce y la agregación es inútil. +- Al agregar sobre un gran número de claves distintas, el volumen de datos apenas se reduce, por lo que la agregación es inútil. - Para un gran número de informes, hay demasiadas variaciones de agregación (explosión combinatoria). - Al agregar claves con alta cardinalidad (como las URL), el volumen de datos no se reduce en mucho (menos del doble). - Por esta razón, el volumen de datos con agregación podría crecer en lugar de reducirse. @@ -44,10 +47,10 @@ Si no agregamos nada y trabajamos con datos no agregados, esto podría reducir e Sin embargo, con la agregación, una parte significativa del trabajo se desconecta y se completa con relativa calma. Por el contrario, los cálculos en línea requieren calcular lo más rápido posible, ya que el usuario está esperando el resultado. -El Yandex.Metrica tiene un sistema especializado para agregar datos llamado Metrage, que se utiliza para la mayoría de los informes. +El Yandex.Metrica tiene un sistema especializado para agregar datos llamado Metrage, que se utilizó para la mayoría de los informes. A partir de 2009, Yandex.Metrica también utilizó una base de datos OLAP especializada para datos no agregados llamada OLAPServer, que anteriormente se usaba para el generador de informes. OLAPServer funcionó bien para datos no agregados, pero tenía muchas restricciones que no permitían que se utilizara para todos los informes según lo deseado. Estos incluyeron la falta de soporte para tipos de datos (solo números) y la incapacidad de actualizar datos de forma incremental en tiempo real (solo se podía hacer reescribiendo datos diariamente). OLAPServer no es un DBMS, sino una base de datos especializada. -Para eliminar las limitaciones de OLAPServer y resolver el problema de trabajar con datos no agregados para todos los informes, desarrollamos el DBMS ClickHouse. +El objetivo inicial de ClickHouse era eliminar las limitaciones de OLAPServer y resolver el problema de trabajar con datos no agregados para todos los informes, pero a lo largo de los años, se ha convertido en un sistema de gestión de bases de datos de propósito general adecuado para una amplia gama de tareas analíticas. -[Artículo Original](https://clickhouse.tech/docs/es/introduction/history/) +[Artículo Original](https://clickhouse.tech/docs/en/introduction/history/) diff --git a/docs/es/introduction/index.md b/docs/es/introduction/index.md new file mode 100644 index 00000000000..4488eeed6fc --- /dev/null +++ b/docs/es/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Introduction +toc_priority: 1 +--- + + diff --git a/docs/es/introduction/performance.md b/docs/es/introduction/performance.md index 4c2dfa971e0..b0b6cf0afa1 100644 --- a/docs/es/introduction/performance.md +++ b/docs/es/introduction/performance.md @@ -1,22 +1,25 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 6 +toc_title: Rendimiento --- # Rendimiento {#performance} De acuerdo con los resultados de las pruebas internas en Yandex, ClickHouse muestra el mejor rendimiento (tanto el mayor rendimiento para consultas largas como la menor latencia en consultas cortas) para escenarios operativos comparables entre los sistemas de su clase que estaban disponibles para pruebas. Puede ver los resultados de la prueba en un [página separada](https://clickhouse.tech/benchmark.html). -Esto también ha sido confirmado por numerosos puntos de referencia independientes. No son difíciles de encontrar mediante una búsqueda en Internet, o se puede ver [nuestra pequeña colección de enlaces relacionados](https://clickhouse.tech/#independent-benchmarks). +Numerosos puntos de referencia independientes llegaron a conclusiones similares. No son difíciles de encontrar mediante una búsqueda en Internet, o se puede ver [nuestra pequeña colección de enlaces relacionados](https://clickhouse.tech/#independent-benchmarks). ## Rendimiento para una única consulta grande {#throughput-for-a-single-large-query} -El rendimiento se puede medir en filas por segundo o en megabytes por segundo. Si los datos se colocan en la caché de la página, una consulta que no es demasiado compleja se procesa en hardware moderno a una velocidad de aproximadamente 2-10 GB / s de datos sin comprimir en un solo servidor (para los casos más simples, la velocidad puede alcanzar los 30 GB / s). Si los datos no se colocan en la memoria caché de la página, la velocidad depende del subsistema de disco y la velocidad de compresión de datos. Por ejemplo, si el subsistema de disco permite leer datos a 400 MB/s y la tasa de compresión de datos es de 3, la velocidad será de aproximadamente 1,2 GB/s. Para obtener la velocidad en filas por segundo, divida la velocidad en bytes por segundo por el tamaño total de las columnas utilizadas en la consulta. Por ejemplo, si se extraen 10 bytes de columnas, la velocidad será de alrededor de 100-200 millones de filas por segundo. +El rendimiento se puede medir en filas por segundo o megabytes por segundo. Si los datos se colocan en la caché de la página, una consulta que no es demasiado compleja se procesa en hardware moderno a una velocidad de aproximadamente 2-10 GB / s de datos sin comprimir en un solo servidor (para los casos más sencillos, la velocidad puede alcanzar 30 GB / s). Si los datos no se colocan en la memoria caché de la página, la velocidad depende del subsistema de disco y la velocidad de compresión de datos. Por ejemplo, si el subsistema de disco permite leer datos a 400 MB/s y la tasa de compresión de datos es 3, se espera que la velocidad sea de alrededor de 1,2 GB/s. Para obtener la velocidad en filas por segundo, divida la velocidad en bytes por segundo por el tamaño total de las columnas utilizadas en la consulta. Por ejemplo, si se extraen 10 bytes de columnas, se espera que la velocidad sea de alrededor de 100-200 millones de filas por segundo. La velocidad de procesamiento aumenta casi linealmente para el procesamiento distribuido, pero solo si el número de filas resultantes de la agregación o la clasificación no es demasiado grande. ## Latencia al procesar consultas cortas {#latency-when-processing-short-queries} -Si una consulta usa una clave principal y no selecciona demasiadas filas para procesar (cientos de miles) y no usa demasiadas columnas, podemos esperar menos de 50 milisegundos de latencia (dígitos individuales de milisegundos en el mejor de los casos) si los datos se colocan en la caché de la página. De lo contrario, la latencia se calcula a partir del número de búsquedas. Si utiliza unidades giratorias, para un sistema que no está sobrecargado, la latencia se calcula mediante esta fórmula: tiempo de búsqueda (10 ms) \* número de columnas consultadas \* número de partes de datos. +Si una consulta usa una clave principal y no selecciona demasiadas columnas y filas para procesar (cientos de miles), puede esperar menos de 50 milisegundos de latencia (dígitos individuales de milisegundos en el mejor de los casos) si los datos se colocan en la memoria caché de la página. De lo contrario, la latencia está dominada principalmente por el número de búsquedas. Si utiliza unidades de disco giratorias, para un sistema que no está sobrecargado, la latencia se puede estimar con esta fórmula: `seek time (10 ms) * count of columns queried * count of data parts`. ## Rendimiento al procesar una gran cantidad de consultas cortas {#throughput-when-processing-a-large-quantity-of-short-queries} @@ -24,6 +27,6 @@ En las mismas condiciones, ClickHouse puede manejar varios cientos de consultas ## Rendimiento al insertar datos {#performance-when-inserting-data} -Recomendamos insertar datos en paquetes de al menos 1000 filas o no más de una sola solicitud por segundo. Al insertar en una tabla MergeTree desde un volcado separado por tabuladores, la velocidad de inserción será de 50 a 200 MB / s. Si las filas insertadas tienen un tamaño de aproximadamente 1 Kb, la velocidad será de 50,000 a 200,000 filas por segundo. Si las filas son pequeñas, el rendimiento será mayor en filas por segundo (en los datos del sistema Banner -`>` 500.000 filas por segundo; en datos de grafito -`>` 1.000.000 de filas por segundo). Para mejorar el rendimiento, puede realizar varias consultas INSERT en paralelo, y el rendimiento aumentará linealmente. +Recomendamos insertar datos en paquetes de al menos 1000 filas o no más de una sola solicitud por segundo. Al insertar en una tabla MergeTree desde un volcado separado por tabuladores, la velocidad de inserción puede ser de 50 a 200 MB/s. Si las filas insertadas tienen alrededor de 1 Kb de tamaño, la velocidad será de 50,000 a 200,000 filas por segundo. Si las filas son pequeñas, el rendimiento puede ser mayor en filas por segundo (en los datos del sistema Banner -`>` 500.000 filas por segundo; en datos de grafito -`>` 1.000.000 de filas por segundo). Para mejorar el rendimiento, puede realizar varias consultas INSERT en paralelo, que se escala linealmente. -[Artículo Original](https://clickhouse.tech/docs/es/introduction/performance/) +[Artículo Original](https://clickhouse.tech/docs/en/introduction/performance/) diff --git a/docs/es/operations/access_rights.md b/docs/es/operations/access_rights.md index 9006ade40dd..2d909fdc750 100644 --- a/docs/es/operations/access_rights.md +++ b/docs/es/operations/access_rights.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 48 +toc_title: Derechos de acceso --- # Derechos de acceso {#access-rights} @@ -69,11 +72,11 @@ Los usuarios se registran en el `users` apartado. Aquí hay un fragmento de la ` Puede ver una declaración de dos usuarios: `default`y`web`. Hemos añadido el `web` usuario por separado. -El `default` usuario se elige en los casos en que no se pasa el nombre de usuario. El `default` usuario también se utiliza para el procesamiento de consultas distribuidas, si la configuración del servidor o clúster no `user` y `password` (véase la sección sobre el [Distribuido](../operations/table_engines/distributed.md) motor). +El `default` usuario se elige en los casos en que no se pasa el nombre de usuario. El `default` usuario también se utiliza para el procesamiento de consultas distribuidas, si la configuración del servidor o clúster no `user` y `password` (véase la sección sobre el [Distribuido](../engines/table_engines/special/distributed.md) motor). -El usuario que se utiliza para intercambiar información entre servidores combinados en un clúster no debe tener restricciones o cuotas sustanciales; de lo contrario, las consultas distribuidas fallarán. +The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. -La contraseña se especifica en texto sin cifrar (no recomendado) o en SHA-256. El hash no es salado. En este sentido, no debe considerar estas contraseñas como proporcionar seguridad contra posibles ataques maliciosos. Más bien, son necesarios para la protección de los empleados. +La contraseña se especifica en texto sin cifrar (no recomendado) o en SHA-256. El hash no está salado. En este sentido, no debe considerar estas contraseñas como proporcionar seguridad contra posibles ataques maliciosos. Más bien, son necesarios para la protección de los empleados. Se especifica una lista de redes desde las que se permite el acceso. En este ejemplo, la lista de redes para ambos usuarios se carga desde un archivo independiente (`/etc/metrika.xml`) que contiene el `networks` sustitución. Aquí hay un fragmento de eso: @@ -95,7 +98,7 @@ La configuración incluye comentarios que explican cómo abrir el acceso desde t Para su uso en producción, sólo especifique `ip` elementos (direcciones IP y sus máscaras), ya que usan `host` y `hoost_regexp` podría causar latencia adicional. -A continuación se especifica el perfil de configuración de usuario (consulte la sección “[Perfiles de configuración](settings/settings_profiles.md)”. Puede especificar el perfil predeterminado, `default'`. El perfil puede tener cualquier nombre. Puede especificar el mismo perfil para diferentes usuarios. Lo más importante que puede escribir en el perfil de configuración es `readonly=1`, que asegura el acceso de sólo lectura. A continuación, especifique la cuota que se utilizará (consulte la sección “[Cuota](quotas.md#quotas)”). Puede especificar la cuota predeterminada: `default`. Se establece en la configuración de forma predeterminada para contar solo el uso de recursos, sin restringirlo. La cuota puede tener cualquier nombre. Puede especificar la misma cuota para diferentes usuarios; en este caso, el uso de recursos se calcula para cada usuario individualmente. +A continuación se especifica el perfil de configuración de usuario (consulte la sección “[Perfiles de configuración](settings/settings_profiles.md)”. Puede especificar el perfil predeterminado, `default'`. El perfil puede tener cualquier nombre. Puede especificar el mismo perfil para diferentes usuarios. Lo más importante que puede escribir en el perfil de configuración es `readonly=1`, que asegura el acceso de sólo lectura. A continuación, especifique la cuota que se utilizará (consulte la sección “[Cuota](quotas.md#quotas)”). Puede especificar la cuota predeterminada: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. En el opcional `` sección, también puede especificar una lista de bases de datos a las que el usuario puede acceder. De forma predeterminada, todas las bases de datos están disponibles para el usuario. Puede especificar el `default` base. En este caso, el usuario recibirá acceso a la base de datos de forma predeterminada. @@ -103,8 +106,8 @@ En el opcional `` sección, también puede especificar una l Acceso a la `system` base de datos siempre está permitida (ya que esta base de datos se utiliza para procesar consultas). -El usuario puede obtener una lista de todas las bases de datos y tablas en ellos mediante el uso de `SHOW` Consultas o tablas del sistema, incluso si no se permite el acceso a bases de datos individuales. +El usuario puede obtener una lista de todas las bases de datos y tablas en ellos mediante el uso de `SHOW` consultas o tablas del sistema, incluso si no se permite el acceso a bases de datos individuales. El acceso a la base de datos no está [sólo lectura](settings/permissions_for_queries.md#settings_readonly) configuración. No puede conceder acceso completo a una base de datos y `readonly` acceso a otro. -[Artículo Original](https://clickhouse.tech/docs/es/operations/access_rights/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/es/operations/backup.md b/docs/es/operations/backup.md index 17a57d40487..b3e7aba307d 100644 --- a/docs/es/operations/backup.md +++ b/docs/es/operations/backup.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 49 +toc_title: Copia de seguridad de datos --- # Copia de seguridad de datos {#data-backup} -Mientras [replicación](table_engines/replication.md) proporciona protección contra fallas de hardware, no protege contra errores humanos: eliminación accidental de datos, eliminación de la tabla incorrecta o una tabla en el clúster incorrecto y errores de software que resultan en un procesamiento incorrecto de datos o daños en los datos. En muchos casos, errores como estos afectarán a todas las réplicas. ClickHouse tiene protecciones integradas para evitar algunos tipos de errores, por ejemplo, de forma predeterminada [no puede simplemente colocar tablas con un motor similar a MergeTree que contenga más de 50 Gb de datos](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Sin embargo, estas garantías no cubren todos los casos posibles y pueden eludirse. +Mientras [replicación](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [no puede simplemente eliminar tablas con un motor similar a MergeTree que contenga más de 50 Gb de datos](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Sin embargo, estas garantías no cubren todos los casos posibles y pueden eludirse. Para mitigar eficazmente los posibles errores humanos, debe preparar cuidadosamente una estrategia para realizar copias de seguridad y restaurar sus datos **previamente**. @@ -19,20 +22,20 @@ A menudo, los datos que se ingieren en ClickHouse se entregan a través de algú ## Instantáneas del sistema de archivos {#filesystem-snapshots} -Algunos sistemas de archivos locales proporcionan funcionalidad de instantánea (por ejemplo, [ZFS](https://en.wikipedia.org/wiki/ZFS)), pero podrían no ser la mejor opción para servir consultas en vivo. Una posible solución es crear réplicas adicionales con este tipo de sistema de archivos y excluirlas del [Distribuido](table_engines/distributed.md) tablas que se utilizan para `SELECT` consulta. Las instantáneas en tales réplicas estarán fuera del alcance de cualquier consulta que modifique los datos. Como beneficio adicional, estas réplicas podrían tener configuraciones de hardware especiales con más discos conectados por servidor, lo que sería rentable. +Algunos sistemas de archivos locales proporcionan funcionalidad de instantánea (por ejemplo, [ZFS](https://en.wikipedia.org/wiki/ZFS)), pero podrían no ser la mejor opción para servir consultas en vivo. Una posible solución es crear réplicas adicionales con este tipo de sistema de archivos y excluirlas del [Distribuido](../engines/table_engines/special/distributed.md) tablas que se utilizan para `SELECT` consulta. Las instantáneas en tales réplicas estarán fuera del alcance de cualquier consulta que modifique los datos. Como beneficio adicional, estas réplicas podrían tener configuraciones de hardware especiales con más discos conectados por servidor, lo que sería rentable. ## Método de codificación de datos: {#clickhouse-copier} -[Método de codificación de datos:](utils/clickhouse-copier.md) es una herramienta versátil que se creó inicialmente para volver a dividir tablas de tamaño petabyte. También se puede usar con fines de copia de seguridad y restauración porque copia datos de forma fiable entre tablas y clústeres de ClickHouse. +[Método de codificación de datos:](utilities/clickhouse-copier.md) es una herramienta versátil que se creó inicialmente para volver a dividir tablas de tamaño petabyte. También se puede usar con fines de copia de seguridad y restauración porque copia datos de forma fiable entre tablas y clústeres de ClickHouse. Para volúmenes de datos más pequeños, un simple `INSERT INTO ... SELECT ...` a tablas remotas podría funcionar también. ## Manipulaciones con piezas {#manipulations-with-parts} -Haz clickHouse permite usar el `ALTER TABLE ... FREEZE PARTITION ...` consulta para crear una copia local de particiones de tabla. Esto se implementa utilizando enlaces duros al `/var/lib/clickhouse/shadow/` carpeta, por lo que generalmente no consume espacio adicional en disco para datos antiguos. Las copias creadas de archivos no son manejadas por el servidor ClickHouse, por lo que puede dejarlos allí: tendrá una copia de seguridad simple que no requiere ningún sistema externo adicional, pero aún así será propenso a problemas de hardware. Por esta razón, es mejor copiarlos de forma remota en otra ubicación y luego eliminar las copias locales. Los sistemas de archivos distribuidos y los almacenes de objetos siguen siendo una buena opción para esto, pero los servidores de archivos conectados normales con una capacidad lo suficientemente grande podrían funcionar también (en este caso, la transferencia ocurrirá a través del sistema de archivos de red o tal vez [rsync](https://en.wikipedia.org/wiki/Rsync)). +ClickHouse permite usar el `ALTER TABLE ... FREEZE PARTITION ...` consulta para crear una copia local de particiones de tabla. Esto se implementa utilizando enlaces duros al `/var/lib/clickhouse/shadow/` carpeta, por lo que generalmente no consume espacio adicional en disco para datos antiguos. Las copias creadas de archivos no son manejadas por el servidor ClickHouse, por lo que puede dejarlas allí: tendrá una copia de seguridad simple que no requiere ningún sistema externo adicional, pero seguirá siendo propenso a problemas de hardware. Por esta razón, es mejor copiarlos de forma remota en otra ubicación y luego eliminar las copias locales. Los sistemas de archivos distribuidos y los almacenes de objetos siguen siendo una buena opción para esto, pero los servidores de archivos conectados normales con una capacidad lo suficientemente grande podrían funcionar también (en este caso, la transferencia ocurrirá a través del sistema de archivos de red o tal vez [rsync](https://en.wikipedia.org/wiki/Rsync)). -Para obtener más información sobre las consultas relacionadas con las manipulaciones de particiones, consulte [Documentación de ALTER](../query_language/alter.md#alter_manipulations-with-partitions). +Para obtener más información sobre las consultas relacionadas con las manipulaciones de particiones, consulte [Documentación de ALTER](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). Una herramienta de terceros está disponible para automatizar este enfoque: [Haga clic en el botón de copia de seguridad](https://github.com/AlexAkulov/clickhouse-backup). -[Artículo Original](https://clickhouse.tech/docs/es/operations/backup/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/es/operations/configuration_files.md b/docs/es/operations/configuration_files.md index af508217469..657fd61eb96 100644 --- a/docs/es/operations/configuration_files.md +++ b/docs/es/operations/configuration_files.md @@ -1,15 +1,18 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 50 +toc_title: "Archivos de configuraci\xF3n" --- # Archivos de configuración {#configuration_files} -ClickHouse admite la administración de configuración de varios archivos. El archivo de configuración del servidor principal es `/etc/clickhouse-server/config.xml`. Otros archivos deben estar en el `/etc/clickhouse-server/config.d` Directorio. +ClickHouse admite la administración de configuración de varios archivos. El archivo de configuración del servidor principal es `/etc/clickhouse-server/config.xml`. Otros archivos deben estar en el `/etc/clickhouse-server/config.d` directorio. !!! note "Nota" Todos los archivos de configuración deben estar en formato XML. Además, deben tener el mismo elemento raíz, generalmente ``. -Algunos valores especificados en el archivo de configuración principal se pueden anular en otros archivos de configuración. El `replace` o `remove` se pueden especificar atributos para los elementos de estos archivos de configuración. +Algunos valores especificados en el archivo de configuración principal se pueden anular en otros archivos de configuración. El `replace` o `remove` los atributos pueden ser especificada para los elementos de estos archivos de configuración. Si no se especifica ninguno, combina el contenido de los elementos de forma recursiva, reemplazando los valores de los elementos secundarios duplicados. @@ -17,13 +20,16 @@ Si `replace` se especifica, reemplaza todo el elemento por el especificado. Si `remove` se especifica, elimina el elemento. -La configuración también puede definir “substitutions”. Si un elemento tiene el `incl` atributo, la sustitución correspondiente del archivo se utilizará como el valor. De forma predeterminada, la ruta al archivo con sustituciones es `/etc/metrika.xml`. Esto se puede cambiar en el [include\_from](server_settings/settings.md#server_settings-include_from) elemento en la configuración del servidor. Los valores de sustitución se especifican en `/yandex/substitution_name` elementos en este archivo. Si una sustitución especificada en `incl` No existe, se registra en el registro. Para evitar que ClickHouse registre las sustituciones que faltan, especifique `optional="true"` atributo (por ejemplo, ajustes para [macro](server_settings/settings.md)). +La configuración también puede definir “substitutions”. Si un elemento tiene el `incl` atributo, la sustitución correspondiente del archivo se utilizará como el valor. De forma predeterminada, la ruta al archivo con sustituciones es `/etc/metrika.xml`. Esto se puede cambiar en el [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) elemento en la configuración del servidor. Los valores de sustitución se especifican en `/yandex/substitution_name` elementos en este archivo. Si una sustitución especificada en `incl` no existe, se registra en el registro. Para evitar que ClickHouse registre las sustituciones que faltan, especifique `optional="true"` atributo (por ejemplo, ajustes para [macro](server_configuration_parameters/settings.md)). -Las sustituciones también se pueden realizar desde ZooKeeper. Para hacer esto, especifique el atributo `from_zk = "/path/to/node"`. El valor del elemento se sustituye por el contenido del nodo en `/path/to/node` en ZooKeeper. También puede colocar un subárbol XML completo en el nodo ZooKeeper y se insertará completamente en el elemento de origen. +Las sustituciones pueden también ser realizada a partir de Cuidador. Para ello, especifique el atributo `from_zk = "/path/to/node"`. El valor del elemento se sustituye por el contenido del nodo en `/path/to/node` en ZooKeeper. También puede colocar un subárbol XML completo en el nodo ZooKeeper y se insertará completamente en el elemento de origen. -El `config.xml` file puede especificar una configuración separada con configuraciones de usuario, perfiles y cuotas. La ruta relativa a esta configuración se establece en el ‘users\_config’ elemento. Por defecto, es `users.xml`. Si `users_config` se omite, la configuración de usuario, los perfiles y las cuotas se especifican directamente en `config.xml`. +El `config.xml` file puede especificar una configuración separada con configuraciones de usuario, perfiles y cuotas. La ruta relativa a esta configuración se establece en el `users_config` elemento. Por defecto, es `users.xml`. Si `users_config` se omite, la configuración de usuario, los perfiles y las cuotas se especifican directamente en `config.xml`. -Además, `users_config` puede tener anulaciones en los archivos `users_config.d` Directorio (por ejemplo, `users.d`) y sustituciones. Por ejemplo, puede tener un archivo de configuración separado para cada usuario como este: +Configuración de usuarios puede ser dividido en archivos separados similar a `config.xml` y `config.d/`. +El nombre del directorio se define como `users_config` sin `.xml` postfix concatenado con `.d`. +Directorio `users.d` se utiliza por defecto, como `users_config` por defecto `users.xml`. +Por ejemplo, puede tener un archivo de configuración separado para cada usuario como este: ``` bash $ cat /etc/clickhouse-server/users.d/alice.xml @@ -48,4 +54,4 @@ Para cada archivo de configuración, el servidor también genera `file-preproces El servidor realiza un seguimiento de los cambios en los archivos de configuración, así como archivos y nodos ZooKeeper que se utilizaron al realizar sustituciones y anulaciones, y vuelve a cargar la configuración de los usuarios y clústeres sobre la marcha. Esto significa que puede modificar el clúster, los usuarios y su configuración sin reiniciar el servidor. -[Artículo Original](https://clickhouse.tech/docs/es/operations/configuration_files/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/es/operations/index.md b/docs/es/operations/index.md index 78c89f786f3..98bd68c4d0b 100644 --- a/docs/es/operations/index.md +++ b/docs/es/operations/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Operations +toc_priority: 41 +toc_title: "Implantaci\xF3n" --- # Operación {#operations} @@ -16,9 +20,9 @@ El manual de operaciones de ClickHouse consta de las siguientes secciones princi - [Archivos de configuración](configuration_files.md) - [Cuota](quotas.md) - [Tablas del sistema](system_tables.md) -- [Parámetros de configuración del servidor](server_settings/index.md) +- [Parámetros de configuración del servidor](server_configuration_parameters/index.md) - [Cómo probar su hardware con ClickHouse](performance_test.md) - [Configuración](settings/index.md) -- [Utilidad](utils/index.md) +- [Utilidad](utilities/index.md) -[Artículo Original](https://clickhouse.tech/docs/es/operations/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/es/operations/monitoring.md b/docs/es/operations/monitoring.md index 4729796dc83..eb8ddf816d9 100644 --- a/docs/es/operations/monitoring.md +++ b/docs/es/operations/monitoring.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: Monitoreo --- # Monitoreo {#monitoring} @@ -21,21 +24,21 @@ Se recomienda encarecidamente configurar la supervisión para: - Utilización del sistema de almacenamiento, RAM y red. -## Métricas del servidor ClickHouse {#clickhouse-server-metrics} +## Métricas del servidor de Clickhouse {#clickhouse-server-metrics} El servidor ClickHouse tiene instrumentos integrados para el monitoreo de estado propio. -Para realizar un seguimiento de los eventos del servidor, use los registros del servidor. Ver el [registrador](server_settings/settings.md#server_settings-logger) sección del archivo de configuración. +Para realizar un seguimiento de los eventos del servidor, use los registros del servidor. Ver el [registrador](server_configuration_parameters/settings.md#server_configuration_parameters-logger) sección del archivo de configuración. -Recoge de ClickHouse: +ClickHouse recoge: - Diferentes métricas de cómo el servidor utiliza recursos computacionales. -- Estadísticas comunes sobre el procesamiento de consultas. +- Común de la estadística en el procesamiento de la consulta. -Puede encontrar métricas en el [sistema.métricas](system_tables.md#system_tables-metrics), [sistema.evento](system_tables.md#system_tables-events), y [sistema.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) tabla. +Puede encontrar métricas en el [sistema.métricas](../operations/system_tables.md#system_tables-metrics), [sistema.evento](../operations/system_tables.md#system_tables-events), y [sistema.asynchronous\_metrics](../operations/system_tables.md#system_tables-asynchronous_metrics) tabla. -Puede configurar ClickHouse para exportar métricas a [Grafito](https://github.com/graphite-project). Ver el [Sección de grafito](server_settings/settings.md#server_settings-graphite) en el archivo de configuración del servidor ClickHouse. Antes de configurar la exportación de métricas, debe configurar Graphite siguiendo sus [guiar](https://graphite.readthedocs.io/en/latest/install.html). +Puede configurar ClickHouse para exportar métricas a [Grafito](https://github.com/graphite-project). Ver el [Sección de grafito](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) en el archivo de configuración del servidor ClickHouse. Antes de configurar la exportación de métricas, debe configurar Graphite siguiendo sus [guiar](https://graphite.readthedocs.io/en/latest/install.html). -Además, puede supervisar la disponibilidad del servidor a través de la API HTTP. Enviar el `HTTP GET` Solicitud de `/ping`. Si el servidor está disponible, responde con `200 OK`. +Además, puede supervisar la disponibilidad del servidor a través de la API HTTP. Enviar el `HTTP GET` solicitud de `/ping`. Si el servidor está disponible, responde con `200 OK`. Para supervisar servidores en una configuración de clúster, debe establecer [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parámetro y utilizar el recurso HTTP `/replicas_status`. Una solicitud para `/replicas_status` devoluciones `200 OK` si la réplica está disponible y no se retrasa detrás de las otras réplicas. Si una réplica se retrasa, devuelve `503 HTTP_SERVICE_UNAVAILABLE` con información sobre la brecha. diff --git a/docs/es/operations/optimizing_performance/index.md b/docs/es/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..126096db767 --- /dev/null +++ b/docs/es/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Optimizing Performance +toc_priority: 52 +--- + + diff --git a/docs/es/operations/performance/sampling_query_profiler.md b/docs/es/operations/optimizing_performance/sampling_query_profiler.md similarity index 67% rename from docs/es/operations/performance/sampling_query_profiler.md rename to docs/es/operations/optimizing_performance/sampling_query_profiler.md index cf12c2b2e5a..73676355499 100644 --- a/docs/es/operations/performance/sampling_query_profiler.md +++ b/docs/es/operations/optimizing_performance/sampling_query_profiler.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 54 +toc_title: "Generaci\xF3n de perfiles de consultas" --- # Analizador de consultas de muestreo {#sampling-query-profiler} @@ -8,9 +11,9 @@ ClickHouse ejecuta el generador de perfiles de muestreo que permite analizar la Para usar el generador de perfiles: -- Configurar el [trace\_log](../server_settings/settings.md#server_settings-trace_log) sección de la configuración del servidor. +- Configurar el [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) sección de la configuración del servidor. - Esta sección configura la [trace\_log](../system_tables.md#system_tables-trace_log) tabla del sistema que contiene los resultados del funcionamiento del generador de perfiles. Está configurado de forma predeterminada. Recuerde que los datos de esta tabla solo son válidos para un servidor en ejecución. Después de reiniciar el servidor, ClickHouse no limpia la tabla y toda la dirección de memoria virtual almacenada puede dejar de ser válida. + Esta sección configura la [trace\_log](../../operations/system_tables.md#system_tables-trace_log) tabla del sistema que contiene los resultados del funcionamiento del generador de perfiles. Está configurado de forma predeterminada. Recuerde que los datos de esta tabla solo son válidos para un servidor en ejecución. Después de reiniciar el servidor, ClickHouse no limpia la tabla y toda la dirección de memoria virtual almacenada puede dejar de ser válida. - Configurar el [Los resultados de la prueba](../settings/settings.md#query_profiler_cpu_time_period_ns) o [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) configuración. Ambos ajustes se pueden utilizar simultáneamente. @@ -26,9 +29,9 @@ Para analizar el `trace_log` tabla del sistema: Por razones de seguridad, las funciones de introspección están deshabilitadas de forma predeterminada. -- Descripción `addressToLine`, `addressToSymbol` y `demangle` [funciones de la introspección](../../query_language/functions/introspection.md) para obtener nombres de funciones y sus posiciones en el código ClickHouse. Para obtener un perfil para alguna consulta, debe agregar datos del `trace_log` tabla. Puede agregar datos por funciones individuales o por los seguimientos de pila completos. +- Utilice el `addressToLine`, `addressToSymbol` y `demangle` [funciones de la introspección](../../sql_reference/functions/introspection.md) para obtener nombres de funciones y sus posiciones en el código ClickHouse. Para obtener un perfil para alguna consulta, debe agregar datos del `trace_log` tabla. Puede agregar datos por funciones individuales o por los seguimientos de pila completos. -Si necesita visualizar `trace_log` información, intente [Gráfico de llamas](../../interfaces/third-party/gui/#clickhouse-flamegraph) y [Nivel de Cifrado WEP](https://github.com/laplab/clickhouse-speedscope). +Si necesita visualizar `trace_log` información, intente [Flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) y [Nivel de Cifrado WEP](https://github.com/laplab/clickhouse-speedscope). ## Ejemplo {#example} diff --git a/docs/es/operations/performance_test.md b/docs/es/operations/performance_test.md index 3e1448ffa87..3d982870c36 100644 --- a/docs/es/operations/performance_test.md +++ b/docs/es/operations/performance_test.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 54 +toc_title: Prueba de hardware --- # Cómo probar su hardware con ClickHouse {#how-to-test-your-hardware-with-clickhouse} Con esta instrucción, puede ejecutar una prueba de rendimiento básica de ClickHouse en cualquier servidor sin instalar paquetes de ClickHouse. -1. Ir a un “commits” página: https://github.com/ClickHouse/ClickHouse/commits/master +1. Ir a “commits” página: https://github.com/ClickHouse/ClickHouse/commits/master 2. Haga clic en la primera marca de verificación verde o cruz roja con verde “ClickHouse Build Check” y haga clic en el “Details” enlace cerca “ClickHouse Build Check”. @@ -23,7 +26,7 @@ Con esta instrucción, puede ejecutar una prueba de rendimiento básica de Click # Then do: chmod a+x clickhouse -1. Descargar configuraciones: +1. Descargar configs: @@ -62,7 +65,7 @@ Con esta instrucción, puede ejecutar una prueba de rendimiento básica de Click ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edite el benchmark-new.sh, cambie “clickhouse-client” Naciones “./clickhouse client” y añadir “–max\_memory\_usage 100000000000” parámetro. +1. Edite el benchmark-new.sh, cambie “clickhouse-client” a “./clickhouse client” y añadir “–max\_memory\_usage 100000000000” parámetro. diff --git a/docs/es/operations/quotas.md b/docs/es/operations/quotas.md index 4b53db1bbd0..af6d09a54ce 100644 --- a/docs/es/operations/quotas.md +++ b/docs/es/operations/quotas.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 51 +toc_title: Cuota --- # Cuota {#quotas} @@ -14,7 +17,7 @@ A diferencia de las restricciones de complejidad de consultas, las cuotas: - Coloque restricciones en un conjunto de consultas que se pueden ejecutar durante un período de tiempo, en lugar de limitar una sola consulta. - Tenga en cuenta los recursos gastados en todos los servidores remotos para el procesamiento de consultas distribuidas. -Echemos un vistazo a la sección de la ‘users.xml’ fichero que define las cuotas. +Veamos la sección del ‘users.xml’ fichero que define las cuotas. ``` xml @@ -36,7 +39,7 @@ Echemos un vistazo a la sección de la ‘users.xml’ fichero que define las cu ``` -De forma predeterminada, la cuota solo realiza un seguimiento del consumo de recursos para cada hora, sin limitar el uso. +De forma predeterminada, la cuota sólo pistas de consumo de recursos por cada hora, sin limitación de uso. El consumo de recursos calculado para cada intervalo se envía al registro del servidor después de cada solicitud. ``` xml @@ -71,15 +74,15 @@ Cuando finaliza el intervalo, se borran todos los valores recopilados. Para la s Estas son las cantidades que se pueden restringir: -`queries` – El número total de solicitudes. +`queries` – The total number of requests. -`errors` – El número de consultas que lanzaron una excepción. +`errors` – The number of queries that threw an exception. -`result_rows` – El número total de filas dadas como resultado. +`result_rows` – The total number of rows given as the result. -`read_rows` – El número total de filas de origen leídas de las tablas para ejecutar la consulta, en todos los servidores remotos. +`read_rows` – The total number of source rows read from tables for running the query, on all remote servers. -`execution_time` – El tiempo total de ejecución de la consulta, en segundos (tiempo de pared). +`execution_time` – The total query execution time, in seconds (wall time). Si se excede el límite durante al menos un intervalo de tiempo, se lanza una excepción con un texto sobre qué restricción se excedió, para qué intervalo y cuándo comienza el nuevo intervalo (cuando se pueden enviar consultas nuevamente). @@ -106,4 +109,4 @@ Para el procesamiento de consultas distribuidas, los importes acumulados se alma Cuando se reinicia el servidor, las cuotas se restablecen. -[Artículo Original](https://clickhouse.tech/docs/es/operations/quotas/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/es/operations/requirements.md b/docs/es/operations/requirements.md index 2aea6ba9a0c..3611cc9475d 100644 --- a/docs/es/operations/requirements.md +++ b/docs/es/operations/requirements.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: Requisito --- # Requisito {#requirements} @@ -10,7 +13,7 @@ Para la instalación desde paquetes deb precompilados, utilice una CPU con arqui ClickHouse implementa el procesamiento de datos paralelo y utiliza todos los recursos de hardware disponibles. Al elegir un procesador, tenga en cuenta que ClickHouse funciona de manera más eficiente en configuraciones con un gran número de núcleos pero con una velocidad de reloj más baja que en configuraciones con menos núcleos y una velocidad de reloj más alta. Por ejemplo, 16 núcleos con 2600 MHz es preferible a 8 núcleos con 3600 MHz. -Uso de **Impulso de Turbo** y **Hiper-threading** tecnologías se recomienda. Mejora significativamente el rendimiento con una carga típica. +Uso de **Impulso de Turbo** y **hiper-threading** tecnologías se recomienda. Mejora significativamente el rendimiento con una carga típica. ## RAM {#ram} @@ -21,9 +24,9 @@ El volumen requerido de RAM depende de: - La complejidad de las consultas. - La cantidad de datos que se procesan en las consultas. -Para calcular el volumen requerido de RAM, debe estimar el tamaño de los datos temporales para [GRUPO POR](../query_language/select.md#select-group-by-clause), [DISTINTO](../query_language/select.md#select-distinct), [UNIR](../query_language/select.md#select-join) y otras operaciones que utilice. +Para calcular el volumen requerido de RAM, debe estimar el tamaño de los datos temporales para [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) y otras operaciones que utilice. -ClickHouse puede usar memoria externa para datos temporales. Ver [GRUPO POR en memoria externa](../query_language/select.md#select-group-by-in-external-memory) para más detalles. +ClickHouse puede usar memoria externa para datos temporales. Ver [GROUP BY en memoria externa](../sql_reference/statements/select.md#select-group-by-in-external-memory) para más detalles. ## Archivo de intercambio {#swap-file} @@ -45,9 +48,9 @@ El volumen de almacenamiento requerido para sus datos debe calcularse por separa Para calcular el volumen final de datos que se almacenarán, aplique el coeficiente de compresión al volumen de datos estimado. Si planea almacenar datos en varias réplicas, multiplique el volumen estimado por el número de réplicas. -## Rojo {#network} +## Red {#network} -Si es posible, utilice redes de 10G o clase superior. +Si es posible, use redes de 10G o clase superior. El ancho de banda de la red es fundamental para procesar consultas distribuidas con una gran cantidad de datos intermedios. Además, la velocidad de la red afecta a los procesos de replicación. diff --git a/docs/es/operations/server_settings/index.md b/docs/es/operations/server_configuration_parameters/index.md similarity index 69% rename from docs/es/operations/server_settings/index.md rename to docs/es/operations/server_configuration_parameters/index.md index f98b4c8b788..5a323fec41f 100644 --- a/docs/es/operations/server_settings/index.md +++ b/docs/es/operations/server_configuration_parameters/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Server Configuration Parameters +toc_priority: 54 +toc_title: "Implantaci\xF3n" --- # Parámetros de configuración del servidor {#server-settings} @@ -12,4 +16,4 @@ Otros ajustes se describen en el “[Configuración](../settings/index.md#settin Antes de estudiar la configuración, lea el [Archivos de configuración](../configuration_files.md#configuration_files) sección y tomar nota del uso de sustituciones (el `incl` y `optional` atributo). -[Artículo Original](https://clickhouse.tech/docs/es/operations/server_settings/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/es/operations/server_settings/settings.md b/docs/es/operations/server_configuration_parameters/settings.md similarity index 62% rename from docs/es/operations/server_settings/settings.md rename to docs/es/operations/server_configuration_parameters/settings.md index 6c9c74592bd..3ec541c3acd 100644 --- a/docs/es/operations/server_settings/settings.md +++ b/docs/es/operations/server_configuration_parameters/settings.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 57 +toc_title: "Configuraci\xF3n del servidor" --- # Configuración del servidor {#server-settings} @@ -20,7 +23,7 @@ Valor predeterminado: 3600. ## compresión {#server-settings-compression} -Ajustes de compresión de datos para [Método de codificación de datos:](../table_engines/mergetree.md)-mesas de motor. +Ajustes de compresión de datos para [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md)-mesas de motor. !!! warning "Advertencia" No lo use si acaba de comenzar a usar ClickHouse. @@ -40,16 +43,16 @@ Plantilla de configuración: `` campo: -- `min_part_size` – El tamaño mínimo de una parte de datos. -- `min_part_size_ratio` – La relación entre el tamaño de la parte de datos y el tamaño de la tabla. -- `method` – Método de compresión. Valores aceptables: `lz4` o `zstd`. +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` o `zstd`. Puede configurar múltiples `` apartado. Acciones cuando se cumplen las condiciones: - Si un elemento de datos coincide con un conjunto de condiciones, ClickHouse utiliza el método de compresión especificado. -- Si un elemento de datos coinciden con varios conjuntos de condiciones, ClickHouse utiliza el primer conjunto de condiciones coincidente. +- Si un elemento de datos coincide con varios conjuntos de condiciones, ClickHouse utiliza el primer conjunto de condiciones coincidente. Si no se cumplen condiciones para un elemento de datos, ClickHouse utiliza el `lz4` compresión. @@ -69,7 +72,7 @@ Si no se cumplen condiciones para un elemento de datos, ClickHouse utiliza el `l La base de datos predeterminada. -Para obtener una lista de bases de datos, [MOSTRAR BASAS DE DATOS](../../query_language/show.md#show-databases) consulta. +Para obtener una lista de bases de datos, [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) consulta. **Ejemplo** @@ -89,7 +92,7 @@ Los perfiles de configuración se encuentran en el archivo especificado en el pa default ``` -## Diccionarios\_config {#server_settings-dictionaries_config} +## Diccionarios\_config {#server_configuration_parameters-dictionaries_config} La ruta de acceso al archivo de configuración para diccionarios externos. @@ -98,7 +101,7 @@ Camino: - Especifique la ruta absoluta o la ruta relativa al archivo de configuración del servidor. - La ruta puede contener comodines \* y ?. -Ver también “[Diccionarios externos](../../query_language/dicts/external_dicts.md)”. +Ver también “[Diccionarios externos](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. **Ejemplo** @@ -106,7 +109,7 @@ Ver también “[Diccionarios externos](../../query_language/dicts/external_dict *_dictionary.xml ``` -## Diccionarios\_lazy\_load {#server_settings-dictionaries_lazy_load} +## Diccionarios\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} La carga perezosa de los diccionarios. @@ -122,7 +125,7 @@ El valor predeterminado es `true`. true ``` -## format\_schema\_path {#server_settings-format_schema_path} +## format\_schema\_path {#server_configuration_parameters-format_schema_path} La ruta de acceso al directorio con los esquemas para los datos de entrada, como los esquemas [CapnProto](../../interfaces/formats.md#capnproto) formato. @@ -133,21 +136,21 @@ La ruta de acceso al directorio con los esquemas para los datos de entrada, como format_schemas/ ``` -## Grafito {#server_settings-graphite} +## grafito {#server_configuration_parameters-graphite} Envío de datos a [Grafito](https://github.com/graphite-project). Configuración: -- host – El servidor de grafito. -- el puerto del servidor grafito. -- intervalo – El intervalo para el envío, en segundos. -- timeout – El tiempo de espera para el envío de datos, en segundos. -- root\_path – Prefijo para las claves. -- métricas – Envío de datos desde el [sistema.métricas](../system_tables.md#system_tables-metrics) tabla. -- eventos – Envío de datos deltas acumulados para el período de tiempo [sistema.evento](../system_tables.md#system_tables-events) tabla. -- events\_cumulative: envío de datos acumulativos desde el [sistema.evento](../system_tables.md#system_tables-events) tabla. -- asynchronous\_metrics – Envío de datos desde el [sistema.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) tabla. +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [sistema.métricas](../../operations/system_tables.md#system_tables-metrics) tabla. +- events – Sending deltas data accumulated for the time period from the [sistema.evento](../../operations/system_tables.md#system_tables-events) tabla. +- events\_cumulative – Sending cumulative data from the [sistema.evento](../../operations/system_tables.md#system_tables-events) tabla. +- asynchronous\_metrics – Sending data from the [sistema.asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) tabla. Puede configurar múltiples `` clausula. Por ejemplo, puede usar esto para enviar datos diferentes a intervalos diferentes. @@ -167,11 +170,11 @@ Puede configurar múltiples `` clausula. Por ejemplo, puede usar esto ``` -## graphite\_rollup {#server_settings-graphite-rollup} +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} Ajustes para reducir los datos de grafito. -Para obtener más información, consulte [GraphiteMergeTree](../table_engines/graphitemergetree.md). +Para obtener más información, consulte [GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md). **Ejemplo** @@ -199,7 +202,7 @@ Para obtener más información, consulte [GraphiteMergeTree](../table_engines/gr El puerto para conectarse al servidor a través de HTTP(s). -Si `https_port` se especifica, [openSSL](#server_settings-openssl) debe ser configurado. +Si `https_port` se especifica, [openSSL](#server_configuration_parameters-openssl) debe ser configurado. Si `http_port` se especifica, la configuración de OpenSSL se ignora incluso si está establecida. @@ -209,7 +212,7 @@ Si `http_port` se especifica, la configuración de OpenSSL se ignora incluso si 0000 ``` -## http\_server\_default\_response {#server_settings-http_server_default_response} +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} La página que se muestra de forma predeterminada al acceder al servidor HTTP de ClickHouse. El valor predeterminado es “Ok.” (con un avance de línea al final) @@ -224,7 +227,7 @@ Abrir `https://tabix.io/` al acceder `http://localhost: http_port`. ``` -## include\_from {#server_settings-include_from} +## include\_from {#server_configuration_parameters-include_from} La ruta al archivo con sustituciones. @@ -262,13 +265,13 @@ Si se omite, se define de la misma manera que el `hostname-f` comando. ## interserver\_http\_credentials {#server-settings-interserver-http-credentials} -El nombre de usuario y la contraseña utilizados para [replicación](../table_engines/replication.md) con los motores Replicated\*. Estas credenciales sólo se utilizan para la comunicación entre réplicas y no están relacionadas con las credenciales de los clientes de ClickHouse. El servidor está comprobando estas credenciales para conectar réplicas y utiliza las mismas credenciales cuando se conecta a otras réplicas. Por lo tanto, estas credenciales deben establecerse igual para todas las réplicas de un clúster. +El nombre de usuario y la contraseña utilizados para [replicación](../../engines/table_engines/mergetree_family/replication.md) con los motores Replicated\*. Estas credenciales sólo se utilizan para la comunicación entre réplicas y no están relacionadas con las credenciales de los clientes de ClickHouse. El servidor está comprobando estas credenciales para conectar réplicas y utiliza las mismas credenciales cuando se conecta a otras réplicas. Por lo tanto, estas credenciales deben establecerse igual para todas las réplicas de un clúster. De forma predeterminada, la autenticación no se utiliza. Esta sección contiene los siguientes parámetros: -- `user` — nombre de usuario. -- `password` — contraseña. +- `user` — username. +- `password` — password. **Ejemplo** @@ -289,7 +292,7 @@ El número de segundos que ClickHouse espera las solicitudes entrantes antes de 3 ``` -## listen\_host {#server_settings-listen_host} +## listen\_host {#server_configuration_parameters-listen_host} Restricción en hosts de los que pueden provenir las solicitudes. Si desea que el servidor responda a todos ellos, especifique `::`. @@ -300,17 +303,17 @@ Ejemplos: 127.0.0.1 ``` -## registrador {#server_settings-logger} +## registrador {#server_configuration_parameters-logger} Configuración de registro. Claves: -- level – Nivel de registro. Valores aceptables: `trace`, `debug`, `information`, `warning`, `error`. -- log – El archivo de registro. Contiene todas las entradas según `level`. -- errorlog – Archivo de registro de errores. -- size – Tamaño del archivo. Se aplica a `log`y`errorlog`. Una vez que el archivo alcanza `size`, ClickHouse archiva y cambia el nombre, y crea un nuevo archivo de registro en su lugar. -- count: el número de archivos de registro archivados que almacena ClickHouse. +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`y`errorlog`. Una vez que el archivo alcanza `size`, ClickHouse archiva y cambia el nombre, y crea un nuevo archivo de registro en su lugar. +- count – The number of archived log files that ClickHouse stores. **Ejemplo** @@ -340,12 +343,12 @@ También se admite la escritura en el syslog. Config ejemplo: Claves: -- use\_syslog — Ajuste requerido si desea escribir en el syslog. -- address — El host\[:port\] de syslogd. Si se omite, se utiliza el daemon local. -- hostname — Opcional. El nombre del host desde el que se envían los registros. -- instalación — [La palabra clave syslog instalación](https://en.wikipedia.org/wiki/Syslog#Facility) en letras mayúsculas con el “LOG\_” prefijo: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3` y así sucesivamente). +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [La palabra clave syslog facility](https://en.wikipedia.org/wiki/Syslog#Facility) en letras mayúsculas con el “LOG\_” prefijo: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3` y así sucesivamente). Valor predeterminado: `LOG_USER` si `address` se especifica, `LOG_DAEMON otherwise.` -- format – Formato de mensaje. Valores posibles: `bsd` y `syslog.` +- format – Message format. Possible values: `bsd` y `syslog.` ## macro {#macros} @@ -353,7 +356,7 @@ Sustituciones de parámetros para tablas replicadas. Se puede omitir si no se utilizan tablas replicadas. -Para obtener más información, consulte la sección “[Creación de tablas replicadas](../../operations/table_engines/replication.md)”. +Para obtener más información, consulte la sección “[Creación de tablas replicadas](../../engines/table_engines/mergetree_family/replication.md)”. **Ejemplo** @@ -363,7 +366,7 @@ Para obtener más información, consulte la sección “[Creación de tablas rep ## Método de codificación de datos: {#server-mark-cache-size} -Tamaño aproximado (en bytes) de la memoria caché de marcas utilizadas por los motores de [Método de codificación de datos:](../table_engines/mergetree.md) Familia. +Tamaño aproximado (en bytes) de la memoria caché de marcas utilizadas por los motores de [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) familia. La memoria caché se comparte para el servidor y la memoria se asigna según sea necesario. El tamaño de la memoria caché debe ser al menos 5368709120. @@ -399,7 +402,7 @@ El número máximo de archivos abiertos. Predeterminada: `maximum`. -Recomendamos usar esta opción en Mac OS X desde el `getrlimit()` función devuelve un valor incorrecto. +Recomendamos el uso de esta opción en Mac OS X desde la `getrlimit()` la función devuelve un valor incorrecto. **Ejemplo** @@ -411,7 +414,7 @@ Recomendamos usar esta opción en Mac OS X desde el `getrlimit()` función devue Restricción en la eliminación de tablas. -Si el tamaño de un [Método de codificación de datos:](../table_engines/mergetree.md) más caliente `max_table_size_to_drop` (en bytes), no puede eliminarlo mediante una consulta DROP. +Si el tamaño de un [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) tabla supera `max_table_size_to_drop` (en bytes), no puede eliminarlo usando una consulta DROP. Si aún necesita eliminar la tabla sin reiniciar el servidor ClickHouse, cree el `/flags/force_drop_table` y ejecute la consulta DROP. @@ -425,9 +428,9 @@ El valor 0 significa que puede eliminar todas las tablas sin restricciones. 0 ``` -## merge\_tree {#server_settings-merge_tree} +## merge\_tree {#server_configuration_parameters-merge_tree} -Ajuste fino para tablas en el [Método de codificación de datos:](../table_engines/mergetree.md). +Ajuste fino para tablas en el [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md). Para obtener más información, vea MergeTreeSettings.h archivo de encabezado. @@ -439,7 +442,7 @@ Para obtener más información, vea MergeTreeSettings.h archivo de encabezado. ``` -## openSSL {#server_settings-openssl} +## openSSL {#server_configuration_parameters-openssl} Configuración cliente/servidor SSL. @@ -447,26 +450,26 @@ El soporte para SSL es proporcionado por el `libpoco` biblioteca. La interfaz se Claves para la configuración del servidor/cliente: -- privateKeyFile: la ruta de acceso al archivo con la clave secreta del certificado PEM. El archivo puede contener una clave y un certificado al mismo tiempo. -- certificateFile: la ruta de acceso al archivo de certificado cliente/servidor en formato PEM. Puede omitirlo si `privateKeyFile` contiene el certificado. -- caConfig: la ruta de acceso al archivo o directorio que contiene certificados raíz de confianza. -- verificationMode: el método para verificar los certificados del nodo. Los detalles están en la descripción del [Contexto](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) Clase. Valores posibles: `none`, `relaxed`, `strict`, `once`. -- Profundidad de verificación: la longitud máxima de la cadena de verificación. La verificación fallará si la longitud de la cadena del certificado supera el valor establecido. -- loadDefaultCAFile: indica que se usarán certificados de CA integrados para OpenSSL. Valores aceptables: `true`, `false`. \| -- cipherList: encriptaciones compatibles con OpenSSL. Por ejemplo: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. -- cacheSessions: habilita o deshabilita las sesiones de almacenamiento en caché. Debe usarse en combinación con `sessionIdContext`. Valores aceptables: `true`, `false`. -- sessionIdContext: un conjunto único de caracteres aleatorios que el servidor agrega a cada identificador generado. La longitud de la cuerda no debe exceder `SSL_MAX_SSL_SESSION_ID_LENGTH`. Este parámetro siempre se recomienda ya que ayuda a evitar problemas tanto si el servidor almacena en caché la sesión como si el cliente solicita el almacenamiento en caché. Valor predeterminado: `${application.name}`. -- sessionCacheSize: el número máximo de sesiones que el servidor almacena en caché. Valor predeterminado: 1024\*20. 0 – Sesiones ilimitadas. -- sessionTimeout - Tiempo para almacenar en caché la sesión en el servidor. -- extendedVerification : la verificación extendida automáticamente de los certificados después de que finalice la sesión. Valores aceptables: `true`, `false`. -- requireTLSv1: requiere una conexión TLSv1. Valores aceptables: `true`, `false`. -- requireTLSv1\_1: requiere una conexión TLSv1.1. Valores aceptables: `true`, `false`. -- requireTLSv1: requiere una conexión TLSv1.2. Valores aceptables: `true`, `false`. -- fips: activa el modo FIPS OpenSSL. Se admite si la versión OpenSSL de la biblioteca admite FIPS. -- privateKeyPassphraseHandler: clase (subclase PrivateKeyPassphraseHandler) que solicita la frase de contraseña para acceder a la clave privada. Por ejemplo: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler: clase (una subclase de CertificateHandler) para verificar certificados no válidos. Por ejemplo: ` ConsoleCertificateHandler ` . -- disableProtocols: protocolos que no pueden usarse. -- preferServerCiphers: cifras de servidor preferidas en el cliente. +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contiene el certificado. +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [Contexto](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) clase. Valores posibles: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Valores aceptables: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. Este parámetro siempre se recomienda ya que ayuda a evitar problemas tanto si el servidor almacena en caché la sesión como si el cliente solicita el almacenamiento en caché. Valor predeterminado: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. **Ejemplo de configuración:** @@ -498,18 +501,18 @@ Claves para la configuración del servidor/cliente: ``` -## part\_log {#server_settings-part-log} +## part\_log {#server_configuration_parameters-part-log} -Registro de eventos asociados con [Método de codificación de datos:](../table_engines/mergetree.md). Por ejemplo, agregar o fusionar datos. Puede utilizar el registro para simular algoritmos de combinación y comparar sus características. Puede visualizar el proceso de fusión. +Registro de eventos asociados con [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md). Por ejemplo, agregar o fusionar datos. Puede utilizar el registro para simular algoritmos de combinación y comparar sus características. Puede visualizar el proceso de fusión. -Las consultas se registran en el [sistema.part\_log](../system_tables.md#system_tables-part-log) tabla, no en un archivo separado. Puede configurar el nombre de esta tabla en el `table` parámetro (ver más abajo). +Las consultas se registran en el [sistema.part\_log](../../operations/system_tables.md#system_tables-part-log) tabla, no en un archivo separado. Puede configurar el nombre de esta tabla en el `table` parámetro (ver más abajo). Utilice los siguientes parámetros para configurar el registro: -- `database` – Nombre de la base de datos. -- `table` – Nombre de la tabla del sistema. -- `partition_by` – Establece un [clave de partición personalizada](../../operations/table_engines/custom_partitioning_key.md). -- `flush_interval_milliseconds` – Intervalo para el vaciado de datos desde el búfer en la memoria a la tabla. +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [clave de partición personalizada](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. **Ejemplo** @@ -522,7 +525,7 @@ Utilice los siguientes parámetros para configurar el registro: ``` -## camino {#server_settings-path} +## camino {#server_configuration_parameters-path} La ruta de acceso al directorio que contiene los datos. @@ -535,18 +538,18 @@ La ruta de acceso al directorio que contiene los datos. /var/lib/clickhouse/ ``` -## query\_log {#server_settings-query-log} +## query\_log {#server_configuration_parameters-query-log} Configuración de las consultas de registro recibidas con [log\_queries=1](../settings/settings.md) configuración. -Las consultas se registran en el [sistema.query\_log](../system_tables.md#system_tables-query_log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). +Las consultas se registran en el [sistema.query\_log](../../operations/system_tables.md#system_tables-query_log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). -Utilice los siguientes parámetros para configurar el registro: +Utilice los siguientes parámetros para configurar el inicio de sesión: -- `database` – Nombre de la base de datos. -- `table` – Nombre de la tabla del sistema en la que se registrarán las consultas. -- `partition_by` – Establece un [clave de partición personalizada](../../operations/table_engines/custom_partitioning_key.md) para una mesa. -- `flush_interval_milliseconds` – Intervalo para el vaciado de datos desde el búfer en la memoria a la tabla. +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [clave de partición personalizada](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) para una mesa. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de consultas cambió cuando se actualizó el servidor ClickHouse, se cambia el nombre de la tabla con la estructura anterior y se crea una nueva tabla automáticamente. @@ -561,18 +564,18 @@ Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de c ``` -## Sistema abierto. {#server_settings-query-thread-log} +## Sistema abierto. {#server_configuration_parameters-query-thread-log} Configuración de subprocesos de registro de consultas recibidas con [Log\_query\_threads = 1](../settings/settings.md#settings-log-query-threads) configuración. -Las consultas se registran en el [sistema.Sistema abierto.](../system_tables.md#system_tables-query-thread-log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). +Las consultas se registran en el [sistema.Sistema abierto.](../../operations/system_tables.md#system_tables-query-thread-log) tabla, no en un archivo separado. Puede cambiar el nombre de la tabla en el `table` parámetro (ver más abajo). -Utilice los siguientes parámetros para configurar el registro: +Utilice los siguientes parámetros para configurar el inicio de sesión: -- `database` – Nombre de la base de datos. -- `table` – Nombre de la tabla del sistema en la que se registrarán las consultas. -- `partition_by` – Establece un [clave de partición personalizada](../../operations/table_engines/custom_partitioning_key.md) para una tabla del sistema. -- `flush_interval_milliseconds` – Intervalo para el vaciado de datos desde el búfer en la memoria a la tabla. +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [clave de partición personalizada](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) para una tabla del sistema. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de subprocesos de consulta cambió cuando se actualizó el servidor ClickHouse, se cambia el nombre de la tabla con la estructura anterior y se crea una nueva tabla automáticamente. @@ -587,16 +590,16 @@ Si la tabla no existe, ClickHouse la creará. Si la estructura del registro de s ``` -## trace\_log {#server_settings-trace_log} +## trace\_log {#server_configuration_parameters-trace_log} -Ajustes para el [trace\_log](../system_tables.md#system_tables-trace_log) operación de la tabla del sistema. +La configuración para el [trace\_log](../../operations/system_tables.md#system_tables-trace_log) operación de la tabla del sistema. Parámetros: -- `database` — Base de datos para almacenar una tabla. -- `table` — Nombre de la tabla. -- `partition_by` — [Clave de partición personalizada](../../operations/table_engines/custom_partitioning_key.md) para una tabla del sistema. -- `flush_interval_milliseconds` — Intervalo para el vaciado de datos del búfer en la memoria a la tabla. +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [Clave de partición personalizada](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) para una tabla del sistema. +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. El archivo de configuración del servidor predeterminado `config.xml` contiene la siguiente sección de configuración: @@ -642,7 +645,7 @@ los nodos se almacenarán sin enmascarar. ## remote\_servers {#server-settings-remote-servers} -Configuración de los clústeres utilizados por [Distribuido](../../operations/table_engines/distributed.md) motor de mesa y por el `cluster` función de la tabla. +Configuración de los clústeres utilizados por [Distribuido](../../engines/table_engines/special/distributed.md) motor de mesa y por el `cluster` función de la tabla. **Ejemplo** @@ -656,7 +659,7 @@ Para el valor de la `incl` atributo, consulte la sección “[Archivos de config - [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) -## Zona horaria {#server_settings-timezone} +## Zona horaria {#server_configuration_parameters-timezone} La zona horaria del servidor. @@ -670,7 +673,7 @@ La zona horaria es necesaria para las conversiones entre los formatos String y D Europe/Moscow ``` -## Tcp\_port {#server_settings-tcp_port} +## Tcp\_port {#server_configuration_parameters-tcp_port} Puerto para comunicarse con clientes a través del protocolo TCP. @@ -680,9 +683,9 @@ Puerto para comunicarse con clientes a través del protocolo TCP. 9000 ``` -## Tcp\_port\_secure {#server_settings-tcp_port-secure} +## Tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} -Puerto TCP para una comunicación segura con los clientes. Úselo con [OpenSSL](#server_settings-openssl) configuración. +Puerto TCP para una comunicación segura con los clientes. Úselo con [OpenSSL](#server_configuration_parameters-openssl) configuración. **Valores posibles** @@ -694,7 +697,7 @@ Entero positivo. 9440 ``` -## mysql\_port {#server_settings-mysql_port} +## mysql\_port {#server_configuration_parameters-mysql_port} Puerto para comunicarse con clientes a través del protocolo MySQL. @@ -723,7 +726,7 @@ Ruta de acceso a datos temporales para procesar consultas grandes. ## tmp\_policy {#server-settings-tmp-policy} -Política de [`storage_configuration`](../table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) para almacenar archivos temporales. +Política de [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) para almacenar archivos temporales. Si no se establece [`tmp_path`](#server-settings-tmp_path) se utiliza, de lo contrario se ignora. !!! note "Nota" @@ -734,7 +737,7 @@ Si no se establece [`tmp_path`](#server-settings-tmp_path) se utiliza, de lo con ## Uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} -Tamaño de la memoria caché (en bytes) para los datos sin comprimir utilizados por los motores de [Método de codificación de datos:](../table_engines/mergetree.md). +Tamaño de la memoria caché (en bytes) para los datos sin comprimir utilizados por los motores de [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md). Hay una caché compartida para el servidor. La memoria se asigna a pedido. La caché se usa si la opción [Use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) está habilitado. @@ -746,9 +749,9 @@ La caché sin comprimir es ventajosa para consultas muy cortas en casos individu 8589934592 ``` -## user\_files\_path {#server_settings-user_files_path} +## user\_files\_path {#server_configuration_parameters-user_files_path} -El directorio con archivos de usuario. Utilizado en la función de tabla [file()](../../query_language/table_functions/file.md). +El directorio con archivos de usuario. Utilizado en la función de tabla [file()](../../sql_reference/table_functions/file.md). **Ejemplo** @@ -779,7 +782,7 @@ ClickHouse utiliza ZooKeeper para almacenar metadatos de réplicas cuando se uti Esta sección contiene los siguientes parámetros: -- `node` — Punto final ZooKeeper. Puede establecer varios puntos finales. +- `node` — ZooKeeper endpoint. You can set multiple endpoints. Por ejemplo: @@ -794,9 +797,9 @@ Esta sección contiene los siguientes parámetros: The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. -- `session_timeout` — Tiempo de espera máximo para la sesión del cliente en milisegundos. -- `root` — El [Znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) que se utiliza como la raíz de los znodes utilizados por el servidor ClickHouse. Opcional. -- `identity` — Usuario y contraseña, que puede ser requerido por ZooKeeper para dar acceso a los znodes solicitados. Opcional. +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [Znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) que se utiliza como la raíz de los znodes utilizados por el servidor ClickHouse. Opcional. +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. **Ejemplo de configuración** @@ -821,29 +824,29 @@ Esta sección contiene los siguientes parámetros: **Ver también** -- [Replicación](../../operations/table_engines/replication.md) +- [Replicación](../../engines/table_engines/mergetree_family/replication.md) - [Guía del programador ZooKeeper](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) ## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} Método de almacenamiento para encabezados de parte de datos en ZooKeeper. -Esta configuración sólo se aplica a `MergeTree` Familia. Se puede especificar: +Esta configuración sólo se aplica a `MergeTree` familia. Se puede especificar: -- A nivel mundial en el [merge\_tree](#server_settings-merge_tree) sección de la `config.xml` file. +- A nivel mundial en el [merge\_tree](#server_configuration_parameters-merge_tree) sección de la `config.xml` file. ClickHouse utiliza la configuración para todas las tablas del servidor. Puede cambiar la configuración en cualquier momento. Las tablas existentes cambian su comportamiento cuando cambia la configuración. - Para cada tabla. - Al crear una tabla, especifique la correspondiente [ajuste del motor](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). El comportamiento de una tabla existente con esta configuración no cambia, incluso si la configuración global cambia. + Al crear una tabla, especifique la correspondiente [ajuste del motor](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). El comportamiento de una tabla existente con esta configuración no cambia, incluso si la configuración global cambia. **Valores posibles** -- 0 — Funcionalidad está desactivada. -- 1 — Funcionalidad está activada. +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. -Si `use_minimalistic_part_header_in_zookeeper = 1`, entonces [repetición](../table_engines/replication.md) las tablas almacenan los encabezados de las partes de datos de forma compacta `znode`. Si la tabla contiene muchas columnas, este método de almacenamiento reduce significativamente el volumen de los datos almacenados en Zookeeper. +Si `use_minimalistic_part_header_in_zookeeper = 1`, entonces [repetición](../../engines/table_engines/mergetree_family/replication.md) las tablas almacenan los encabezados de las partes de datos de forma compacta `znode`. Si la tabla contiene muchas columnas, este método de almacenamiento reduce significativamente el volumen de los datos almacenados en Zookeeper. !!! attention "Atención" Después de aplicar `use_minimalistic_part_header_in_zookeeper = 1`, no puede degradar el servidor ClickHouse a una versión que no admite esta configuración. Tenga cuidado al actualizar ClickHouse en servidores de un clúster. No actualice todos los servidores a la vez. Es más seguro probar nuevas versiones de ClickHouse en un entorno de prueba o solo en unos pocos servidores de un clúster. @@ -866,4 +869,4 @@ La actualización se realiza de forma asíncrona, en un subproceso del sistema s **Valor predeterminado**: 15. -[Artículo Original](https://clickhouse.tech/docs/es/operations/server_settings/settings/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/es/operations/settings/constraints_on_settings.md b/docs/es/operations/settings/constraints_on_settings.md index a67f6b38a54..498ce115ec5 100644 --- a/docs/es/operations/settings/constraints_on_settings.md +++ b/docs/es/operations/settings/constraints_on_settings.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 62 +toc_title: "Restricciones en la configuraci\xF3n" --- # Restricciones en la configuración {#constraints-on-settings} @@ -29,7 +32,7 @@ Las restricciones se definen como las siguientes: ``` -Si el usuario intenta violar las restricciones, se produce una excepción y no se cambia la configuración. +Si el usuario intenta violar las restricciones, se lanza una excepción y la configuración no se cambia. Se admiten tres tipos de restricciones: `min`, `max`, `readonly`. El `min` y `max` Las restricciones especifican los límites superior e inferior para una configuración numérica y se pueden usar en combinación. El `readonly` constraint especifica que el usuario no puede cambiar la configuración correspondiente en absoluto. **Ejemplo:** Dejar `users.xml` incluye líneas: @@ -67,6 +70,6 @@ Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. ``` -**Nota:** el `default` perfil tiene un manejo especial: todas las restricciones definidas para el `default` perfil se convierten en las restricciones predeterminadas, por lo que restringen todos los usuarios hasta que se anulan explícitamente para estos usuarios. +**Nota:** el `default` perfil tiene un manejo especial: todas las restricciones definidas para el `default` profile se convierten en las restricciones predeterminadas, por lo que restringen a todos los usuarios hasta que se anulan explícitamente para estos usuarios. -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/constraints_on_settings/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/es/operations/settings/index.md b/docs/es/operations/settings/index.md index a62525c786d..7f135801098 100644 --- a/docs/es/operations/settings/index.md +++ b/docs/es/operations/settings/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Settings +toc_priority: 55 +toc_title: "Implantaci\xF3n" --- # Configuración {#settings} @@ -25,4 +29,4 @@ Formas de configurar los ajustes, por orden de prioridad: Los ajustes que solo se pueden realizar en el archivo de configuración del servidor no se tratan en esta sección. -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/es/operations/settings/permissions_for_queries.md b/docs/es/operations/settings/permissions_for_queries.md index 5f777aacf16..8bb684bbe66 100644 --- a/docs/es/operations/settings/permissions_for_queries.md +++ b/docs/es/operations/settings/permissions_for_queries.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 58 +toc_title: Permisos para consultas --- # Permisos para consultas {#permissions_for_queries} @@ -9,13 +12,13 @@ Las consultas en ClickHouse se pueden dividir en varios tipos: 1. Leer consultas de datos: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. 2. Escribir consultas de datos: `INSERT`, `OPTIMIZE`. 3. Cambiar la consulta de configuración: `SET`, `USE`. -4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) Consulta: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) consulta: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. 5. `KILL QUERY`. La siguiente configuración regula los permisos de usuario según el tipo de consulta: -- [sólo lectura](#settings_readonly) — Restringe los permisos para todos los tipos de consultas excepto las consultas DDL. -- [Método de codificación de datos:](#settings_allow_ddl) — Restringe los permisos para consultas DDL. +- [sólo lectura](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [Método de codificación de datos:](#settings_allow_ddl) — Restricts permissions for DDL queries. `KILL QUERY` se puede realizar con cualquier configuración. @@ -23,13 +26,13 @@ La siguiente configuración regula los permisos de usuario según el tipo de con Restringe los permisos para leer datos, escribir datos y cambiar las consultas de configuración. -Vea cómo las consultas se dividen en tipos [Arriba](#permissions_for_queries). +Vea cómo las consultas se dividen en tipos [arriba](#permissions_for_queries). Valores posibles: -- 0 — Se permiten todas las consultas. -- 1 — Sólo se permiten consultas de datos de lectura. -- 2 — Leer datos y cambiar las consultas de configuración están permitidos. +- 0 — All queries are allowed. +- 1 — Only read data queries are allowed. +- 2 — Read data and change settings queries are allowed. Después de configurar `readonly = 1` el usuario no puede cambiar `readonly` y `allow_ddl` configuración en la sesión actual. @@ -44,15 +47,15 @@ Valor predeterminado: 0 Permite o niega [DDL](https://en.wikipedia.org/wiki/Data_definition_language) consulta. -Vea cómo las consultas se dividen en tipos [Arriba](#permissions_for_queries). +Vea cómo las consultas se dividen en tipos [arriba](#permissions_for_queries). Valores posibles: -- 0 — No se permiten consultas DDL. -- 1 — Se permiten consultas DDL. +- 0 — DDL queries are not allowed. +- 1 — DDL queries are allowed. No se puede ejecutar `SET allow_ddl = 1` si `allow_ddl = 0` para la sesión actual. Valor predeterminado: 1 -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/permissions_for_queries/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/es/operations/settings/query_complexity.md b/docs/es/operations/settings/query_complexity.md index cfe667b5cf6..243ce5a0297 100644 --- a/docs/es/operations/settings/query_complexity.md +++ b/docs/es/operations/settings/query_complexity.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 59 +toc_title: Restricciones en la complejidad de consultas --- # Restricciones en la complejidad de consultas {#restrictions-on-query-complexity} @@ -14,11 +17,11 @@ Restricciones en el “maximum amount of something” puede tomar el valor 0, lo La mayoría de las restricciones también tienen un ‘overflow\_mode’ establecer, lo que significa qué hacer cuando se excede el límite. Puede tomar uno de dos valores: `throw` o `break`. Las restricciones en la agregación (group\_by\_overflow\_mode) también tienen el valor `any`. -`throw` – Lanzar una excepción (por defecto). +`throw` – Throw an exception (default). -`break` – Detener la ejecución de la consulta y devolver el resultado parcial, como si los datos de origen se agotaron. +`break` – Stop executing the query and return the partial result, as if the source data ran out. -`any (only for group_by_overflow_mode)` – Continuar la agregación de las claves que se metieron en el conjunto, pero no añadir nuevas claves al conjunto. +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. ## Método de codificación de datos: {#settings_max_memory_usage} @@ -26,7 +29,7 @@ La cantidad máxima de RAM que se utiliza para ejecutar una consulta en un únic En el archivo de configuración predeterminado, el máximo es de 10 GB. -El ajuste no tiene en cuenta el volumen de memoria disponible o el volumen total de memoria en la máquina. +La configuración no tiene en cuenta el volumen de memoria disponible ni el volumen total de memoria en la máquina. La restricción se aplica a una sola consulta dentro de un único servidor. Usted puede utilizar `SHOW PROCESSLIST` para ver el consumo de memoria actual para cada consulta. Además, el consumo máximo de memoria se rastrea para cada consulta y se escribe en el registro. @@ -66,7 +69,7 @@ Un número máximo de bytes (datos sin comprimir) que se pueden leer de una tabl ## Método de codificación de datos: {#read-overflow-mode} -Qué hacer cuando el volumen de datos leídos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer cuando el volumen de datos leídos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. ## Método de codificación de datos: {#settings-max-rows-to-group-by} @@ -74,17 +77,17 @@ Un número máximo de claves únicas recibidas de la agregación. Esta configura ## Grupo\_by\_overflow\_mode {#group-by-overflow-mode} -Qué hacer cuando el número de claves únicas para la agregación excede el límite: ‘throw’, ‘break’, o ‘any’. Por defecto, tirar. +Qué hacer cuando el número de claves únicas para la agregación excede el límite: ‘throw’, ‘break’, o ‘any’. Por defecto, throw. Uso de la ‘any’ valor le permite ejecutar una aproximación de GROUP BY. La calidad de esta aproximación depende de la naturaleza estadística de los datos. ## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} -Habilita o deshabilita la ejecución de `GROUP BY` en la memoria externa. Ver [GRUPO POR en memoria externa](../../query_language/select.md#select-group-by-in-external-memory). +Habilita o deshabilita la ejecución de `GROUP BY` en la memoria externa. Ver [GROUP BY en memoria externa](../../sql_reference/statements/select.md#select-group-by-in-external-memory). Valores posibles: -- Volumen máximo de RAM (en bytes) que puede ser utilizado por el único [GRUPO POR](../../query_language/select.md#select-group-by-clause) operación. -- Cero — `GROUP BY` en la memoria externa deshabilitada. +- Volumen máximo de RAM (en bytes) que puede ser utilizado por el único [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause) operación. +- 0 — `GROUP BY` en la memoria externa deshabilitada. Valor predeterminado: 0. @@ -98,7 +101,7 @@ Un número máximo de bytes antes de ordenar. ## sort\_overflow\_mode {#sort-overflow-mode} -Qué hacer si el número de filas recibidas antes de ordenar excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer si el número de filas recibidas antes de ordenar excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. ## max\_result\_rows {#setting-max_result_rows} @@ -110,9 +113,9 @@ Límite en el número de bytes en el resultado. Lo mismo que el ajuste anterior. ## result\_overflow\_mode {#result-overflow-mode} -Qué hacer si el volumen del resultado excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer si el volumen del resultado excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. -Utilizar ‘break’ es similar a un límite de usar. `Break` interrumpe la ejecución sólo en el nivel de bloque. Esto significa que la cantidad de filas devueltas es mayor que [max\_result\_rows](#setting-max_result_rows), múltiplo de [max\_block\_size](settings.md#setting-max_block_size) y depende de [max\_threads](settings.md#settings-max_threads). +Utilizar ‘break’ es similar a usar LIMIT. `Break` interrumpe la ejecución sólo en el nivel de bloque. Esto significa que la cantidad de filas devueltas es mayor que [max\_result\_rows](#setting-max_result_rows), múltiplo de [max\_block\_size](settings.md#setting-max_block_size) y depende de [max\_threads](settings.md#settings-max_threads). Ejemplo: @@ -138,7 +141,7 @@ En este momento, no se comprueba una de las etapas de clasificación, o al fusio ## timeout\_overflow\_mode {#timeout-overflow-mode} -Qué hacer si la consulta se ejecuta más de ‘max\_execution\_time’: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer si la consulta se ejecuta más de ‘max\_execution\_time’: ‘throw’ o ‘break’. Por defecto, throw. ## Método de codificación de datos: {#min-execution-speed} @@ -184,7 +187,7 @@ Profundidad máxima de la tubería. Corresponde al número de transformaciones q ## max\_ast\_depth {#max-ast-depth} Profundidad máxima de anidamiento de un árbol sintáctico de consulta. Si se supera, se produce una excepción. -En este momento, no se comprueba durante el análisis, sino solo después de analizar la consulta. Es decir, se puede crear un árbol sintáctico demasiado profundo durante el análisis, pero la consulta fallará. Por defecto, 1000. +En este momento, no se verifica durante el análisis, sino solo después de analizar la consulta. Es decir, se puede crear un árbol sintáctico demasiado profundo durante el análisis, pero la consulta fallará. Por defecto, 1000. ## max\_ast\_elements {#max-ast-elements} @@ -201,7 +204,7 @@ Número máximo de bytes (datos sin comprimir) utilizados por un conjunto en la ## set\_overflow\_mode {#set-overflow-mode} -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. ## Método de codificación de datos: {#max-rows-in-distinct} @@ -213,7 +216,7 @@ Un número máximo de bytes utilizados por una tabla hash cuando se utiliza DIST ## distinct\_overflow\_mode {#distinct-overflow-mode} -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. ## max\_rows\_to\_transfer {#max-rows-to-transfer} @@ -225,13 +228,13 @@ Un número máximo de bytes (datos sin comprimir) que se pueden pasar a un servi ## transfer\_overflow\_mode {#transfer-overflow-mode} -Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, tirar. +Qué hacer cuando la cantidad de datos excede uno de los límites: ‘throw’ o ‘break’. Por defecto, throw. ## Método de codificación de datos: {#settings-max_rows_in_join} Limita el número de filas de la tabla hash que se utiliza al unir tablas. -Esta configuración se aplica a [SELECCIONAR … UNIRSE](../../query_language/select.md#select-join) operaciones y la [Unir](../table_engines/join.md) motor de mesa. +Esta configuración se aplica a [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) operaciones y la [Unir](../../engines/table_engines/special/join.md) motor de mesa. Si una consulta contiene varias combinaciones, ClickHouse comprueba esta configuración para cada resultado intermedio. @@ -240,7 +243,7 @@ ClickHouse puede proceder con diferentes acciones cuando se alcanza el límite. Valores posibles: - Entero positivo. -- 0 — Número ilimitado de filas. +- 0 — Unlimited number of rows. Valor predeterminado: 0. @@ -248,7 +251,7 @@ Valor predeterminado: 0. Limita el tamaño en bytes de la tabla hash utilizada al unir tablas. -Esta configuración se aplica a [SELECCIONAR … UNIRSE](../../query_language/select.md#select-join) operaciones y [Unirse al motor de tabla](../table_engines/join.md). +Esta configuración se aplica a [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) operaciones y [Unirse al motor de tabla](../../engines/table_engines/special/join.md). Si la consulta contiene combinaciones, ClickHouse comprueba esta configuración para cada resultado intermedio. @@ -257,7 +260,7 @@ ClickHouse puede proceder con diferentes acciones cuando se alcanza el límite. Valores posibles: - Entero positivo. -- 0 — El control de memoria está desactivado. +- 0 — Memory control is disabled. Valor predeterminado: 0. @@ -270,22 +273,22 @@ Define qué acción realiza ClickHouse cuando se alcanza cualquiera de los sigui Valores posibles: -- `THROW` - ClickHouse lanza una excepción y rompe la operación. -- `BREAK` - ClickHouse interrumpe la operación y no lanza una excepción. +- `THROW` — ClickHouse throws an exception and breaks operation. +- `BREAK` — ClickHouse breaks operation and doesn't throw an exception. Valor predeterminado: `THROW`. **Ver también** -- [Cláusula JOIN](../../query_language/select.md#select-join) -- [Unirse al motor de tabla](../table_engines/join.md) +- [Cláusula JOIN](../../sql_reference/statements/select.md#select-join) +- [Unirse al motor de tabla](../../engines/table_engines/special/join.md) ## max\_partitions\_per\_insert\_block {#max-partitions-per-insert-block} Limita el número máximo de particiones en un único bloque insertado. - Entero positivo. -- 0 — número Ilimitado de particiones. +- 0 — Unlimited number of partitions. Valor predeterminado: 100. @@ -295,4 +298,4 @@ Al insertar datos, ClickHouse calcula el número de particiones en el bloque ins > “Too many partitions for single INSERT block (more than” ¿Cómo puedo hacerlo? “). The limit is controlled by ‘max\_partitions\_per\_insert\_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/query_complexity/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/es/operations/settings/settings.md b/docs/es/operations/settings/settings.md index a28ef316d83..dd92922aec2 100644 --- a/docs/es/operations/settings/settings.md +++ b/docs/es/operations/settings/settings.md @@ -1,28 +1,31 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 60 +toc_title: "Configuraci\xF3n" --- # Configuración {#settings} ## distributed\_product\_mode {#distributed-product-mode} -Cambia el comportamiento de [subconsultas distribuidas](../../query_language/select.md). +Cambia el comportamiento de [subconsultas distribuidas](../../sql_reference/statements/select.md). -ClickHouse aplica esta configuración cuando la consulta contiene el producto de tablas distribuidas, es decir, cuando la consulta para una tabla distribuida contiene una subconsulta no GLOBAL para la tabla distribuida. +ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. Restricción: - Solo se aplica para las subconsultas IN y JOIN. - Solo si la sección FROM utiliza una tabla distribuida que contiene más de un fragmento. - Si la subconsulta se refiere a una tabla distribuida que contiene más de un fragmento. -- No se usa para un valor de tabla [remoto](../../query_language/table_functions/remote.md) función. +- No se usa para un valor de tabla [remoto](../../sql_reference/table_functions/remote.md) función. Valores posibles: -- `deny` — Valor predeterminado. Prohíbe el uso de estos tipos de subconsultas (devuelve el “Double-distributed in/JOIN subqueries is denied” Salvedad). -- `local` — Sustituye la base de datos y la tabla de la subconsulta por locales para el servidor de destino (fragmento), dejando `IN`/`JOIN.` -- `global` — Sustituye el `IN`/`JOIN` Consulta con `GLOBAL IN`/`GLOBAL JOIN.` -- `allow` — Permite la utilización de este tipo de subconsultas. +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” salvedad). +- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` +- `global` — Replaces the `IN`/`JOIN` consulta con `GLOBAL IN`/`GLOBAL JOIN.` +- `allow` — Allows the use of these types of subqueries. ## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} @@ -32,8 +35,8 @@ La extracción de predicados puede reducir significativamente el tráfico de red Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 1. @@ -50,7 +53,7 @@ Si `enable_optimize_predicate_expression = 0`, entonces el tiempo de ejecución ## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} -Fuerza una consulta a una réplica obsoleta si los datos actualizados no están disponibles. Ver [Replicación](../table_engines/replication.md). +Fuerza una consulta a una réplica obsoleta si los datos actualizados no están disponibles. Ver [Replicación](../../engines/table_engines/mergetree_family/replication.md). ClickHouse selecciona la más relevante de las réplicas obsoletas de la tabla. @@ -64,7 +67,7 @@ Deshabilita la ejecución de consultas si el índice no se puede usar por fecha. Funciona con tablas de la familia MergeTree. -Si `force_index_by_date=1`, ClickHouse comprueba si la consulta tiene una condición de clave de fecha que se puede usar para restringir intervalos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Por ejemplo, la condición `Date != ' 2000-01-01 '` es aceptable incluso cuando coincide con todos los datos de la tabla (es decir, ejecutar la consulta requiere un escaneo completo). Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, vea [Método de codificación de datos:](../table_engines/mergetree.md). +Si `force_index_by_date=1`, ClickHouse comprueba si la consulta tiene una condición de clave de fecha que se puede usar para restringir intervalos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Por ejemplo, la condición `Date != ' 2000-01-01 '` es aceptable incluso cuando coincide con todos los datos de la tabla (es decir, ejecutar la consulta requiere un escaneo completo). Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, vea [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md). ## force\_primary\_key {#force-primary-key} @@ -72,7 +75,7 @@ Deshabilita la ejecución de consultas si no es posible la indexación mediante Funciona con tablas de la familia MergeTree. -Si `force_primary_key=1`, ClickHouse comprueba si la consulta tiene una condición de clave principal que se puede usar para restringir rangos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, consulte [Método de codificación de datos:](../table_engines/mergetree.md). +Si `force_primary_key=1`, ClickHouse comprueba si la consulta tiene una condición de clave principal que se puede usar para restringir rangos de datos. Si no hay una condición adecuada, arroja una excepción. Sin embargo, no comprueba si la condición reduce la cantidad de datos a leer. Para obtener más información acerca de los intervalos de datos en las tablas MergeTree, consulte [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md). ## Formato\_esquema {#format-schema} @@ -92,8 +95,8 @@ Para obtener más información, lea el [Descripción de la interfaz HTTP](../../ Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. @@ -113,8 +116,8 @@ Para obtener más información, lea el [Descripción de la interfaz HTTP](../../ Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. @@ -126,19 +129,19 @@ Para obtener más información, lea el [Descripción de la interfaz HTTP](../../ Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. ## Nombre de la red inalámbrica (SSID): {#setting-max_http_get_redirects} -Limita el número máximo de saltos de redirección HTTP GET para [URL](../table_engines/url.md)-mesas de motor. La configuración se aplica a ambos tipos de tablas: las creadas por [CREAR TABLA](../../query_language/create/#create-table-query) Consulta y por el [URL](../../query_language/table_functions/url.md) función de la tabla. +Limita el número máximo de saltos de redirección HTTP GET para [URL](../../engines/table_engines/special/url.md)-mesas de motor. La configuración se aplica a ambos tipos de tablas: las creadas por [CREATE TABLE](../../query_language/create/#create-table-query) consulta y por el [URL](../../sql_reference/table_functions/url.md) función de la tabla. Valores posibles: - Cualquier número entero positivo de saltos. -- 0 — No se permiten saltos. +- 0 — No hops allowed. Valor predeterminado: 0. @@ -169,15 +172,15 @@ Si ambos `input_format_allow_errors_num` y `input_format_allow_errors_ratio` se ## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} -Habilita o deshabilita el analizador SQL completo si el analizador de flujo rápido no puede analizar los datos. Esta configuración sólo se utiliza para [Valor](../../interfaces/formats.md#data-format-values) formato en la inserción de datos. Para obtener más información sobre el análisis de sintaxis, consulte [Sintaxis](../../query_language/syntax.md) apartado. +Habilita o deshabilita el analizador SQL completo si el analizador de secuencias rápidas no puede analizar los datos. Esta configuración sólo se utiliza para [Valor](../../interfaces/formats.md#data-format-values) formato en la inserción de datos. Para obtener más información sobre el análisis de sintaxis, consulte [Sintaxis](../../sql_reference/syntax.md) apartado. Valores posibles: -- 0 — Desactivado. +- 0 — Disabled. En este caso, debe proporcionar datos con formato. Ver el [Formato](../../interfaces/formats.md) apartado. -- 1 — Habilitado. +- 1 — Enabled. En este caso, puede usar una expresión SQL como valor, pero la inserción de datos es mucho más lenta de esta manera. Si inserta solo datos con formato, ClickHouse se comporta como si el valor de configuración fuera 0. @@ -185,7 +188,7 @@ Valor predeterminado: 1. Ejemplo de uso -Inserte el [FechaHora](../../data_types/datetime.md) valor de tipo con los diferentes ajustes. +Inserte el [FechaHora](../../sql_reference/data_types/datetime.md) valor de tipo con los diferentes ajustes. ``` sql SET input_format_values_interpret_expressions = 0; @@ -227,7 +230,7 @@ INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), ( - si `input_format_values_interpret_expressions=1` y `format_values_deduce_templates_of_expressions=0` Las expresiones se interpretarán por separado para cada fila (esto es muy lento para un gran número de filas) - si `input_format_values_interpret_expressions=0` y `format_values_deduce_templates_of_expressions=1` Las expresiones en la primera, segunda y tercera filas se analizarán usando la plantilla `lower(String)` e interpretados juntos, la expresión es la cuarta fila se analizará con otra plantilla (`upper(String)`) -- si `input_format_values_interpret_expressions=1` y `format_values_deduce_templates_of_expressions=1` - igual que en el caso anterior, pero también permite la alternativa a la interpretación de expresiones por separado si no es posible deducir la plantilla. +- si `input_format_values_interpret_expressions=1` y `format_values_deduce_templates_of_expressions=1` - lo mismo que en el caso anterior, pero también permite la alternativa a la interpretación de expresiones por separado si no es posible deducir la plantilla. Habilitado de forma predeterminada. @@ -242,7 +245,7 @@ Esta configuración sólo se utiliza cuando `input_format_values_deduce_template ``` Cuando esta configuración está habilitada, ClickHouse comprobará el tipo real de literal y utilizará una plantilla de expresión del tipo correspondiente. En algunos casos, puede ralentizar significativamente la evaluación de expresiones en `Values`. -Cuando está deshabilitado, ClickHouse puede usar un tipo más general para algunos literales (por ejemplo, `Float64` o `Int64` es lugar de `UInt64` para `42`), pero puede causar problemas de desbordamiento y precisión. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` o `Int64` en lugar de `UInt64` para `42`), pero puede causar problemas de desbordamiento y precisión. Habilitado de forma predeterminada. ## Entrada\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} @@ -254,8 +257,8 @@ Al realizar `INSERT` consultas, reemplace los valores de columna de entrada omit Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 1. @@ -271,21 +274,21 @@ Habilita o deshabilita el uso de valores predeterminados si los datos de entrada ## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} -Habilita o deshabilita omitir la inserción de datos adicionales. +Habilita o deshabilita saltarse la inserción de datos adicionales. -Al escribir datos, ClickHouse produce una excepción si los datos de entrada contienen columnas que no existen en la tabla de destino. Si la omisión está habilitada, ClickHouse no inserta datos adicionales y no produce una excepción. +Al escribir datos, ClickHouse produce una excepción si los datos de entrada contienen columnas que no existen en la tabla de destino. Si la omisión está habilitada, ClickHouse no inserta datos adicionales y no lanza una excepción. Formatos soportados: - [JSONEachRow](../../interfaces/formats.md#jsoneachrow) - [CSVWithNames](../../interfaces/formats.md#csvwithnames) - [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) -- [MOKOENA](../../interfaces/formats.md#tskv) +- [TSKV](../../interfaces/formats.md#tskv) Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. @@ -293,14 +296,14 @@ Valor predeterminado: 0. Habilita o deshabilita la inserción de datos JSON con objetos anidados. -Formatos soportados: +Formatos compatibles: - [JSONEachRow](../../interfaces/formats.md#jsoneachrow) Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. @@ -308,7 +311,7 @@ Ver también: - [Uso de estructuras anidadas](../../interfaces/formats.md#jsoneachrow-nested) con el `JSONEachRow` formato. -## Entrada\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} +## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} Habilita o deshabilita la comprobación del orden de las columnas al insertar datos. @@ -321,8 +324,8 @@ Formatos soportados: Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 1. @@ -330,15 +333,15 @@ Valor predeterminado: 1. Permite elegir un analizador de la representación de texto de fecha y hora. -La configuración no se aplica a [Funciones de fecha y hora](../../query_language/functions/date_time_functions.md). +La configuración no se aplica a [Funciones de fecha y hora](../../sql_reference/functions/date_time_functions.md). Valores posibles: -- `'best_effort'` — Permite el análisis extendido. +- `'best_effort'` — Enables extended parsing. - ClickHouse puede analizar el básico `YYYY-MM-DD HH:MM:SS` formato y todo [Descripción](https://en.wikipedia.org/wiki/ISO_8601) formatos de fecha y hora. Por ejemplo, `'2018-06-08T01:02:03.000Z'`. + ClickHouse puede analizar el básico `YYYY-MM-DD HH:MM:SS` formato y todo [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) formatos de fecha y hora. Por ejemplo, `'2018-06-08T01:02:03.000Z'`. -- `'basic'` — Utilice analizador básico. +- `'basic'` — Use basic parser. ClickHouse puede analizar solo lo básico `YYYY-MM-DD HH:MM:SS` formato. Por ejemplo, `'2019-08-20 10:18:56'`. @@ -346,19 +349,19 @@ Valor predeterminado: `'basic'`. Ver también: -- [Tipo de datos DateTime.](../../data_types/datetime.md) -- [Funciones para trabajar con fechas y horas.](../../query_language/functions/date_time_functions.md) +- [Tipo de datos DateTime.](../../sql_reference/data_types/datetime.md) +- [Funciones para trabajar con fechas y horas.](../../sql_reference/functions/date_time_functions.md) ## Por favor, introduzca su dirección de correo electrónico {#settings-join_default_strictness} -Establece el rigor predeterminado para [Cláusulas JOIN](../../query_language/select.md#select-join). +Establece el rigor predeterminado para [Cláusulas JOIN](../../sql_reference/statements/select.md#select-join). Valores posibles: -- `ALL` — Si la tabla correcta tiene varias filas coincidentes, ClickHouse crea un [Producto cartesiano](https://en.wikipedia.org/wiki/Cartesian_product) de filas coincidentes. Esta es la normal `JOIN` comportamiento de SQL estándar. -- `ANY` — Si la tabla correcta tiene varias filas coincidentes, solo se une la primera encontrada. Si la tabla correcta solo tiene una fila coincidente, los resultados de `ANY` y `ALL` hijo de los mismos. -- `ASOF` — Para unir secuencias con una coincidencia incierta. -- `Empty string` — Si `ALL` o `ANY` no se especifica en la consulta, ClickHouse produce una excepción. +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Producto cartesiano](https://en.wikipedia.org/wiki/Cartesian_product) de filas coincidentes. Esta es la normal `JOIN` comportamiento de SQL estándar. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` y `ALL` son los mismos. +- `ASOF` — For joining sequences with an uncertain match. +- `Empty string` — If `ALL` o `ANY` no se especifica en la consulta, ClickHouse produce una excepción. Valor predeterminado: `ALL`. @@ -367,29 +370,29 @@ Valor predeterminado: `ALL`. Cambia el comportamiento de las operaciones de unión con `ANY` rigor. !!! warning "Atención" - Esta configuración sólo se aplica a `JOIN` operaciones con [Unir](../table_engines/join.md) mesas de motores. + Esta configuración sólo se aplica a `JOIN` operaciones con [Unir](../../engines/table_engines/special/join.md) mesas de motores. Valores posibles: -- 0 — Si la tabla correcta tiene más de una fila coincidente, solo se une la primera encontrada. -- 1 — Si la tabla correcta tiene más de una fila coincidente, solo se une la última encontrada. +- 0 — If the right table has more than one matching row, only the first one found is joined. +- 1 — If the right table has more than one matching row, only the last one found is joined. Valor predeterminado: 0. Ver también: -- [Cláusula JOIN](../../query_language/select.md#select-join) -- [Unirse al motor de tabla](../table_engines/join.md) +- [Cláusula JOIN](../../sql_reference/statements/select.md#select-join) +- [Unirse al motor de tabla](../../engines/table_engines/special/join.md) - [Por favor, introduzca su dirección de correo electrónico](#settings-join_default_strictness) ## Sistema abierto. {#join_use_nulls} -Establece el tipo de [UNIR](../../query_language/select.md) comportamiento. Al fusionar tablas, pueden aparecer celdas vacías. ClickHouse los rellena de manera diferente según esta configuración. +Establece el tipo de [JOIN](../../sql_reference/statements/select.md) comportamiento. Al fusionar tablas, pueden aparecer celdas vacías. ClickHouse los rellena de manera diferente según esta configuración. Valores posibles: -- 0 — Las celdas vacías se rellenan con el valor predeterminado del tipo de campo correspondiente. -- Uno — `JOIN` se comporta de la misma manera que en SQL estándar. El tipo del campo correspondiente se convierte en [NULO](../../data_types/nullable.md#data_type-nullable), y las celdas vacías se llenan con [NULO](../../query_language/syntax.md). +- 0 — The empty cells are filled with the default value of the corresponding field type. +- 1 — `JOIN` se comporta de la misma manera que en SQL estándar. El tipo del campo correspondiente se convierte en [NULL](../../sql_reference/data_types/nullable.md#data_type-nullable), y las celdas vacías se llenan con [NULL](../../sql_reference/syntax.md). Valor predeterminado: 0. @@ -404,12 +407,12 @@ Bloquea el tamaño de `max_block_size` no siempre se cargan desde la tabla. Si e ## preferred\_block\_size\_bytes {#preferred-block-size-bytes} Utilizado para el mismo propósito que `max_block_size`, pero establece el tamaño de bloque recomendado en bytes adaptándolo al número de filas en el bloque. -Sin embargo, el tamaño del bloque no puede ser más que `max_block_size` películas. -Por defecto: 1.000.000. Solo funciona cuando se lee desde los motores MergeTree. +Sin embargo, el tamaño del bloque no puede ser más que `max_block_size` filas. +Por defecto: 1,000,000. Solo funciona cuando se lee desde los motores MergeTree. ## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} -Si el número de filas que se leerán de un fichero [Método de codificación de datos:](../table_engines/mergetree.md) más caliente `merge_tree_min_rows_for_concurrent_read` luego ClickHouse intenta realizar una lectura simultánea de este archivo en varios hilos. +Si el número de filas que se leerán de un fichero [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) tabla supera `merge_tree_min_rows_for_concurrent_read` luego ClickHouse intenta realizar una lectura simultánea de este archivo en varios hilos. Valores posibles: @@ -419,7 +422,7 @@ Valor predeterminado: 163840. ## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} -Si el número de bytes a leer de un archivo de un [Método de codificación de datos:](../table_engines/mergetree.md)-La tabla del motor excede `merge_tree_min_bytes_for_concurrent_read`, entonces ClickHouse intenta leer simultáneamente este archivo en varios subprocesos. +Si el número de bytes a leer de un archivo de un [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md)-La tabla del motor excede `merge_tree_min_bytes_for_concurrent_read`, entonces ClickHouse intenta leer simultáneamente este archivo en varios subprocesos. Valor posible: @@ -459,21 +462,21 @@ Valor predeterminado: 8. ## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} -Si ClickHouse debería leer más de `merge_tree_max_rows_to_use_cache` en una consulta, no utiliza la memoria caché de bloques sin comprimir. +Si ClickHouse debería leer más de `merge_tree_max_rows_to_use_cache` en una consulta, no usa la memoria caché de bloques sin comprimir. -La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. +La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. Valores posibles: - Cualquier entero positivo. -Valor predeterminado: 128 ✕ 8192. +Default value: 128 ✕ 8192. ## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} -Si ClickHouse debería leer más de `merge_tree_max_bytes_to_use_cache` bytes en una consulta, no utiliza la memoria caché de bloques sin comprimir. +Si ClickHouse debería leer más de `merge_tree_max_bytes_to_use_cache` bytes en una consulta, no usa el caché de bloques sin comprimir. -La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. +La memoria caché de bloques sin comprimir almacena datos extraídos para consultas. ClickHouse utiliza esta memoria caché para acelerar las respuestas a pequeñas consultas repetidas. Esta configuración protege la memoria caché del deterioro de las consultas que leen una gran cantidad de datos. El [Uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuración del servidor define el tamaño de la memoria caché de bloques sin comprimir. Valor posible: @@ -485,11 +488,11 @@ Valor predeterminado: 2013265920. El volumen de datos mínimo necesario para utilizar el acceso directo de E/S al disco de almacenamiento. -ClickHouse usa esta configuración al leer datos de tablas. Si el volumen total de almacenamiento de todos los datos a leer excede `min_bytes_to_use_direct_io` luego ClickHouse lee los datos del disco de almacenamiento con el `O_DIRECT` opción. +ClickHouse usa esta configuración al leer datos de tablas. Si el volumen total de almacenamiento de todos los datos a leer excede `min_bytes_to_use_direct_io` luego ClickHouse lee los datos del disco de almacenamiento con el `O_DIRECT` opcion. Valores posibles: -- 0 — E/S directa está deshabilitada. +- 0 — Direct I/O is disabled. - Entero positivo. Valor predeterminado: 0. @@ -498,7 +501,7 @@ Valor predeterminado: 0. Configuración del registro de consultas. -Las consultas enviadas a ClickHouse con esta configuración se registran de acuerdo con las reglas [query\_log](../server_settings/settings.md#server_settings-query-log) parámetro de configuración del servidor. +Las consultas enviadas a ClickHouse con esta configuración se registran de acuerdo con las reglas [query\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) parámetro de configuración del servidor. Ejemplo: @@ -510,7 +513,7 @@ log_queries=1 Configuración del registro de subprocesos de consulta. -Los subprocesos de consultas ejecutados por ClickHouse con esta configuración se registran de acuerdo con las reglas en el [Sistema abierto.](../server_settings/settings.md#server_settings-query-thread-log) parámetro de configuración del servidor. +Los subprocesos de consultas ejecutados por ClickHouse con esta configuración se registran de acuerdo con las reglas en el [Sistema abierto.](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) parámetro de configuración del servidor. Ejemplo: @@ -524,7 +527,7 @@ El tamaño de los bloques a formar para su inserción en una tabla. Esta configuración solo se aplica en los casos en que el servidor forma los bloques. Por ejemplo, para un INSERT a través de la interfaz HTTP, el servidor analiza el formato de datos y forma bloques del tamaño especificado. Pero al usar clickhouse-client, el cliente analiza los datos en sí, y el ‘max\_insert\_block\_size’ configuración en el servidor no afecta el tamaño de los bloques insertados. -La configuración tampoco tiene un propósito cuando se usa INSERT SELECT, ya que los datos se insertan usando los mismos bloques que se forman después de SELECT. +La configuración tampoco tiene un propósito cuando se usa INSERT SELECT , ya que los datos se insertan usando los mismos bloques que se forman después de SELECT . Valor predeterminado: 1.048.576. @@ -532,7 +535,7 @@ El valor predeterminado es ligeramente más que `max_block_size`. La razón de e ## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} -Deshabilita las réplicas rezagadas para consultas distribuidas. Ver [Replicación](../../operations/table_engines/replication.md). +Deshabilita las réplicas rezagadas para consultas distribuidas. Ver [Replicación](../../engines/table_engines/mergetree_family/replication.md). Establece el tiempo en segundos. Si una réplica tiene un retraso superior al valor establecido, no se utiliza esta réplica. @@ -553,7 +556,7 @@ Si normalmente se ejecuta menos de una consulta SELECT en un servidor a la vez, Para las consultas que se completan rápidamente debido a un LIMIT, puede establecer un ‘max\_threads’. Por ejemplo, si el número necesario de entradas se encuentra en cada bloque y max\_threads = 8, entonces se recuperan 8 bloques, aunque hubiera sido suficiente leer solo uno. -Cuanto menor sea el `max_threads` valor, menos memoria se consumen. +Cuanto menor sea el `max_threads` valor, menos memoria se consume. ## Método de codificación de datos: {#settings-max-insert-threads} @@ -561,7 +564,7 @@ El número máximo de subprocesos para ejecutar el `INSERT SELECT` consulta. Valores posibles: -- 0 (o 1) — `INSERT SELECT` sin ejecución paralela. +- 0 (or 1) — `INSERT SELECT` sin ejecución paralela. - Entero positivo. Más grande que 1. Valor predeterminado: 0. @@ -577,7 +580,7 @@ No confunda bloques para la compresión (un fragmento de memoria que consta de b ## Descripción del producto {#min-compress-block-size} -Para [Método de codificación de datos:](../table_engines/mergetree.md)" tabla. Para reducir la latencia al procesar consultas, un bloque se comprime al escribir la siguiente marca si su tamaño es al menos ‘min\_compress\_block\_size’. De forma predeterminada, 65.536. +Para [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md)" tabla. Para reducir la latencia al procesar consultas, un bloque se comprime al escribir la siguiente marca si su tamaño es al menos ‘min\_compress\_block\_size’. De forma predeterminada, 65.536. El tamaño real del bloque, si los datos sin comprimir son menores que ‘max\_compress\_block\_size’, no es menor que este valor y no menor que el volumen de datos para una marca. @@ -610,7 +613,7 @@ Valor predeterminado: 10, 300, 300. ## Cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} -Cancela las consultas de solo lectura HTTP (por ejemplo, SELECT) cuando un cliente cierra la conexión sin esperar la respuesta. +Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. Valor predeterminado: 0 @@ -655,7 +658,7 @@ Para obtener más información, consulte la sección “Extreme values”. ## Use\_uncompressed\_cache {#setting-use_uncompressed_cache} Si se debe usar una memoria caché de bloques sin comprimir. Acepta 0 o 1. De forma predeterminada, 0 (deshabilitado). -El uso de la memoria caché sin comprimir (solo para tablas de la familia MergeTree) puede reducir significativamente la latencia y aumentar el rendimiento cuando se trabaja con un gran número de consultas cortas. Habilite esta configuración para los usuarios que envían solicitudes cortas frecuentes. También preste atención al [Uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) parámetro de configuración (solo establecido en el archivo de configuración): el tamaño de los bloques de caché sin comprimir. De forma predeterminada, es 8 GiB. La memoria caché sin comprimir se rellena según sea necesario y los datos menos utilizados se eliminan automáticamente. +El uso de la memoria caché sin comprimir (solo para tablas de la familia MergeTree) puede reducir significativamente la latencia y aumentar el rendimiento cuando se trabaja con un gran número de consultas cortas. Habilite esta configuración para los usuarios que envían solicitudes cortas frecuentes. También preste atención al [Uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. Para consultas que leen al menos un volumen algo grande de datos (un millón de filas o más), la memoria caché sin comprimir se desactiva automáticamente para ahorrar espacio para consultas realmente pequeñas. Esto significa que puede mantener el ‘use\_uncompressed\_cache’ ajuste siempre establecido en 1. @@ -664,15 +667,15 @@ Para consultas que leen al menos un volumen algo grande de datos (un millón de Cuando se utiliza la interfaz HTTP, el ‘query\_id’ parámetro puede ser pasado. Se trata de cualquier cadena que sirva como identificador de consulta. Si una consulta del mismo usuario ‘query\_id’ que ya existe en este momento, el comportamiento depende de la ‘replace\_running\_query’ parámetro. -`0` (predeterminado) – Lanzar una excepción (no permita que la consulta se ejecute si una consulta ‘query\_id’ ya se está ejecutando). +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ ya se está ejecutando). -`1` – Cancelar la consulta anterior y empezar a ejecutar la nueva. +`1` – Cancel the old query and start running the new one. -El Yandex.Metrica utiliza este parámetro establecido en 1 para implementar sugerencias para las condiciones de segmentación. Después de ingresar el siguiente carácter, si la consulta anterior aún no ha terminado, debe cancelarse. +El Yandex.Metrica utiliza este parámetro establecido en 1 para implementar sugerencias para las condiciones de segmentación. Después de ingresar el siguiente carácter, si la consulta anterior aún no ha finalizado, debe cancelarse. ## Nombre de la red inalámbrica (SSID): {#stream-flush-interval-ms} -Funciona para tablas con streaming en el caso de un tiempo de espera, o cuando un subproceso genera [Max\_insert\_block\_size](#settings-max_insert_block_size) películas. +Funciona para tablas con streaming en el caso de un tiempo de espera, o cuando un subproceso genera [Max\_insert\_block\_size](#settings-max_insert_block_size) filas. El valor predeterminado es 7500. @@ -704,7 +707,7 @@ Desventajas: La proximidad del servidor no se tiene en cuenta; si las réplicas load_balancing = nearest_hostname ``` -El número de errores se cuenta para cada réplica. Cada 5 minutos, el número de errores se divide integralmente por 2. Por lo tanto, el número de errores se calcula para un tiempo reciente con suavizado exponencial. Si hay una réplica con un número mínimo de errores (es decir,errores ocurridos recientemente en las otras réplicas), la consulta se le envía. Si hay varias réplicas con el mismo número mínimo de errores, la consulta se envía a la réplica con un nombre de host que es más similar al nombre de host del servidor en el archivo de configuración (para el número de caracteres diferentes en posiciones idénticas, hasta la longitud mínima de ambos nombres de host). +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). Por ejemplo, example01-01-1 y example01-01-2.yandex.ru son diferentes en una posición, mientras que example01-01-1 y example01-02-2 difieren en dos lugares. Este método puede parecer primitivo, pero no requiere datos externos sobre la topología de red, y no compara las direcciones IP, lo que sería complicado para nuestras direcciones IPv6. @@ -727,9 +730,9 @@ Este método es apropiado cuando se sabe exactamente qué réplica es preferible load_balancing = first_or_random ``` -Este algoritmo elige la primera réplica del conjunto o una réplica aleatoria si la primera no está disponible. Es eficaz en configuraciones de topología de replicación cruzada, pero inútil en otras configuraciones. +Este algoritmo elige la primera réplica del conjunto o una réplica aleatoria si la primera no está disponible. Es efectivo en configuraciones de topología de replicación cruzada, pero inútil en otras configuraciones. -El `first_or_random` Resuelve el problema del algoritmo `in_order` algoritmo. Desventaja `in_order`, si una réplica se cae, la siguiente obtiene una carga doble mientras que las réplicas restantes manejan la cantidad habitual de tráfico. Cuando se utiliza el `first_or_random` algoritmo, la carga se distribuye uniformemente entre las réplicas que todavía están disponibles. +El `first_or_random` resuelve el problema del algoritmo `in_order` algoritmo. Con `in_order`, si una réplica se cae, la siguiente obtiene una carga doble mientras que las réplicas restantes manejan la cantidad habitual de tráfico. Cuando se utiliza el `first_or_random` algoritmo, la carga se distribuye uniformemente entre las réplicas que todavía están disponibles. ## prefer\_localhost\_replica {#settings-prefer-localhost-replica} @@ -737,8 +740,8 @@ Habilita/deshabilita el uso preferible de la réplica localhost al procesar cons Valores posibles: -- 1 - ClickHouse siempre envía una consulta a la réplica localhost si existe. -- 0 — ClickHouse utiliza la estrategia de equilibrio especificada [load\_balancing](#settings-load_balancing) configuración. +- 1 — ClickHouse always sends a query to the localhost replica if it exists. +- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) configuración. Valor predeterminado: 1. @@ -770,12 +773,12 @@ Si se compiló esta parte de la canalización, la consulta puede ejecutarse más ## min\_count\_to\_compile {#min-count-to-compile} -¿Cuántas veces usar potencialmente un fragmento de código compilado antes de ejecutar la compilación? Por defecto, 3. -Para las pruebas, el valor se puede establecer en 0: la compilación se ejecuta de forma sincrónica y la consulta espera al final del proceso de compilación antes de continuar con la ejecución. Para todos los demás casos, use valores comenzando con 1. La compilación normalmente toma alrededor de 5-10 segundos. +¿cuántas veces usar potencialmente un fragmento de código compilado antes de ejecutar la compilación? por defecto, 3. +For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. Si el valor es 1 o más, la compilación se produce de forma asíncrona en un subproceso independiente. El resultado se utilizará tan pronto como esté listo, incluidas las consultas que se están ejecutando actualmente. Se requiere código compilado para cada combinación diferente de funciones agregadas utilizadas en la consulta y el tipo de claves en la cláusula GROUP BY. -Los resultados de la compilación se guardan en el directorio de compilación en forma de archivos .so. No hay ninguna restricción en el número de resultados de compilación, ya que no utilizan mucho espacio. Los resultados anteriores se usarán después de reiniciar el servidor, excepto en el caso de una actualización del servidor; en este caso, se eliminan los resultados anteriores. +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. ## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} @@ -806,13 +809,13 @@ Habilita las escrituras de quórum. Valor predeterminado: 0. -Quórum escribe +Quorum escribe `INSERT` solo tiene éxito cuando ClickHouse logra escribir correctamente datos en el `insert_quorum` de réplicas durante el `insert_quorum_timeout`. Si por alguna razón el número de réplicas con escrituras exitosas no alcanza el `insert_quorum`, la escritura se considera fallida y ClickHouse eliminará el bloque insertado de todas las réplicas donde los datos ya se han escrito. Todas las réplicas del quórum son consistentes, es decir, contienen datos de todas las réplicas anteriores `INSERT` consulta. El `INSERT` la secuencia está linealizada. -Al leer los datos escritos desde el `insert_quorum` Puede utilizar el [select\_sequential\_consistency](#settings-select_sequential_consistency) opción. +Al leer los datos escritos desde el `insert_quorum` usted puede utilizar el [select\_sequential\_consistency](#settings-select_sequential_consistency) opcion. ClickHouse genera una excepción @@ -837,18 +840,18 @@ Ver también: ## select\_sequential\_consistency {#settings-select_sequential_consistency} -Habilita o deshabilita la coherencia secuencial para `SELECT` Consulta: +Habilita o deshabilita la coherencia secuencial para `SELECT` consulta: Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 0. Uso -Cuando se habilita la coherencia secuencial, ClickHouse permite al cliente ejecutar el `SELECT` consulta sólo para aquellas réplicas que contienen datos de todas las `INSERT` Consultas ejecutadas con `insert_quorum`. Si el cliente hace referencia a una réplica parcial, ClickHouse generará una excepción. La consulta SELECT no incluirá datos que aún no se hayan escrito en el quórum de réplicas. +Cuando se habilita la coherencia secuencial, ClickHouse permite al cliente ejecutar el `SELECT` consulta sólo para aquellas réplicas que contienen datos de todas las `INSERT` consultas ejecutadas con `insert_quorum`. Si el cliente hace referencia a una réplica parcial, ClickHouse generará una excepción. La consulta SELECT no incluirá datos que aún no se hayan escrito en el quórum de réplicas. Ver también: @@ -861,12 +864,12 @@ Habilita o deshabilita la desduplicación de bloques `INSERT` (para tablas repli Valores posibles: -- 0 — Desactivado. -- 1 — Habilitado. +- 0 — Disabled. +- 1 — Enabled. Valor predeterminado: 1. -De forma predeterminada, los bloques insertados en tablas replicadas `INSERT` (consulte \[Replicación de datos\] (../ table\_engines/replication.md). +De forma predeterminada, los bloques insertados en tablas replicadas `INSERT` (consulte \[Replicación de datos\] (../engines/table\_engines/mergetree\_family/replication.md). ## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} @@ -883,7 +886,7 @@ Uso De forma predeterminada, la desduplicación no se realiza para las vistas materializadas, sino que se realiza en sentido ascendente, en la tabla de origen. Si se omite un bloque INSERTed debido a la desduplicación en la tabla de origen, no habrá inserción en las vistas materializadas adjuntas. Este comportamiento existe para permitir la inserción de datos altamente agregados en vistas materializadas, para los casos en que los bloques insertados son los mismos después de la agregación de vistas materializadas pero derivados de diferentes INSERT en la tabla de origen. -Al mismo tiempo, este comportamiento “breaks” `INSERT` empotencia. Si una `INSERT` en la mesa principal fue exitoso y `INSERT` en una vista materializada falló (por ejemplo, debido a una falla de comunicación con Zookeeper), un cliente obtendrá un error y puede volver a intentar la operación. Sin embargo, la vista materializada no recibirá la segunda inserción porque se descartará mediante deduplicación en la tabla principal (fuente). Configuración `deduplicate_blocks_in_dependent_materialized_views` permite cambiar este comportamiento. Al reintentar, una vista materializada recibirá la inserción de repetición y realizará la comprobación de desduplicación por sí misma, +Al mismo tiempo, este comportamiento “breaks” `INSERT` idempotencia. Si una `INSERT` en la mesa principal fue exitoso y `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` permite cambiar este comportamiento. Al reintentar, una vista materializada recibirá la inserción de repetición y realizará la comprobación de desduplicación por sí misma, ignorando el resultado de la comprobación para la tabla de origen, e insertará filas perdidas debido a la primera falla. ## Método de codificación de datos: {#settings-max-network-bytes} @@ -893,7 +896,7 @@ Limita el volumen de datos (en bytes) que se recibe o se transmite a través de Valores posibles: - Entero positivo. -- 0 — El control de volumen de datos está desactivado. +- 0 — Data volume control is disabled. Valor predeterminado: 0. @@ -904,7 +907,7 @@ Limita la velocidad del intercambio de datos a través de la red en bytes por se Valores posibles: - Entero positivo. -- 0 — El control de ancho de banda está deshabilitado. +- 0 — Bandwidth control is disabled. Valor predeterminado: 0. @@ -915,7 +918,7 @@ Limita la velocidad del intercambio de datos a través de la red en bytes por se Valores posibles: - Entero positivo. -- 0 — El control de la velocidad de los datos está desactivado. +- 0 — Control of the data speed is disabled. Valor predeterminado: 0. @@ -926,21 +929,21 @@ Limita la velocidad a la que se intercambian datos a través de la red en bytes Valores posibles: - Entero positivo. -- 0 — El control de la velocidad de los datos está desactivado. +- 0 — Control of the data speed is disabled. Valor predeterminado: 0. ## count\_distinct\_implementation {#settings-count_distinct_implementation} -Especifica cuál de las `uniq*` se deben utilizar para realizar el [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count) construcción. +Especifica cuál de las `uniq*` se deben utilizar para realizar el [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) construcción. Valores posibles: -- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) -- [uniqCombined](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) -- [UniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [UniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) Valor predeterminado: `uniqExact`. @@ -950,25 +953,25 @@ Habilita o deshabilita la omisión silenciosa de fragmentos no disponibles. El fragmento se considera no disponible si todas sus réplicas no están disponibles. Una réplica no está disponible en los siguientes casos: -- ClickHouse no se puede conectar a la réplica por ningún motivo. +- ClickHouse no puede conectarse a la réplica por ningún motivo. Al conectarse a una réplica, ClickHouse realiza varios intentos. Si todos estos intentos fallan, la réplica se considera que no está disponible. -- Replica no se puede resolver a través de DNS. +- La réplica no se puede resolver a través de DNS. Si el nombre de host de la réplica no se puede resolver a través de DNS, puede indicar las siguientes situaciones: - - El host de réplica no tiene registro DNS. Puede ocurrir en sistemas con DNS dinámico, por ejemplo, [Kubernetes](https://kubernetes.io), donde los nodos pueden ser irresolubles durante el tiempo de inactividad, y esto no es un error. + - El host de Replica no tiene registro DNS. Puede ocurrir en sistemas con DNS dinámico, por ejemplo, [Kubernetes](https://kubernetes.io), donde los nodos pueden ser irresolubles durante el tiempo de inactividad, y esto no es un error. - Error de configuración. El archivo de configuración de ClickHouse contiene un nombre de host incorrecto. Valores posibles: -- 1 — omitir habilitado. +- 1 — skipping enabled. Si un fragmento no está disponible, ClickHouse devuelve un resultado basado en datos parciales y no informa de problemas de disponibilidad de nodos. -- 0 — omitiendo deshabilitado. +- 0 — skipping disabled. Si un fragmento no está disponible, ClickHouse produce una excepción. @@ -992,16 +995,27 @@ Valores posibles: Valor predeterminado: 0 +## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} + +Restablecer [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) para anidados `Distributed` tabla + +Valores posibles: + +- 1 — Enabled. +- 0 — Disabled. + +Valor predeterminado: 0. + ## Optize\_throw\_if\_noop {#setting-optimize_throw_if_noop} -Habilita o deshabilita el lanzamiento de una excepción [OPTIMIZAR](../../query_language/misc.md#misc_operations-optimize) la consulta no realizó una fusión. +Habilita o deshabilita el lanzamiento de una excepción [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) la consulta no realizó una fusión. Predeterminada, `OPTIMIZE` devuelve con éxito incluso si no hizo nada. Esta configuración le permite diferenciar estas situaciones y obtener el motivo en un mensaje de excepción. Valores posibles: -- 1 — Lanzar una excepción está habilitada. -- 0 — Lanzar una excepción está deshabilitado. +- 1 — Throwing an exception is enabled. +- 0 — Throwing an exception is disabled. Valor predeterminado: 0. @@ -1014,7 +1028,7 @@ Controla la rapidez con la que se ponen a cero los errores en las tablas distrib Ver también: -- [Motor de tabla distribuido](../../operations/table_engines/distributed.md) +- [Motor de tabla distribuido](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) ## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} @@ -1026,12 +1040,12 @@ El recuento de errores de cada réplica está limitado a este valor, lo que impi Ver también: -- [Motor de tabla distribuido](../../operations/table_engines/distributed.md) +- [Motor de tabla distribuido](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) ## Distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} -Intervalo base para el [Distribuido](../table_engines/distributed.md) motor de tabla para enviar datos. El intervalo real crece exponencialmente en caso de errores. +Intervalo base para el [Distribuido](../../engines/table_engines/special/distributed.md) motor de tabla para enviar datos. El intervalo real crece exponencialmente en caso de errores. Valores posibles: @@ -1041,7 +1055,7 @@ Valor predeterminado: 100 milisegundos. ## Distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} -Intervalo máximo para el [Distribuido](../table_engines/distributed.md) motor de tabla para enviar datos. Limita el crecimiento exponencial del intervalo establecido en el [Distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) configuración. +Intervalo máximo para el [Distribuido](../../engines/table_engines/special/distributed.md) motor de tabla para enviar datos. Limita el crecimiento exponencial del intervalo establecido en el [Distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) configuración. Valores posibles: @@ -1053,12 +1067,12 @@ Valor predeterminado: 30000 milisegundos (30 segundos). Habilita/deshabilita el envío de datos insertados en lotes. -Cuando el envío por lotes está habilitado, el [Distribuido](../table_engines/distributed.md) El motor de tabla intenta enviar varios archivos de datos insertados en una operación en lugar de enviarlos por separado. El envío por lotes mejora el rendimiento del clúster al utilizar mejor los recursos del servidor y de la red. +Cuando el envío por lotes está habilitado, el [Distribuido](../../engines/table_engines/special/distributed.md) El motor de tabla intenta enviar varios archivos de datos insertados en una operación en lugar de enviarlos por separado. El envío por lotes mejora el rendimiento del clúster al utilizar mejor los recursos del servidor y de la red. Valores posibles: -- 1 — Habilitado. -- 0 — Desactivado. +- 1 — Enabled. +- 0 — Disabled. Valor predeterminado: 0. @@ -1079,7 +1093,7 @@ Valor predeterminado: 0. ## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} -Establece el período para un temporizador de reloj real del [perfilador de consultas](../../operations/performance/sampling_query_profiler.md). El temporizador de reloj real cuenta el tiempo del reloj de pared. +Establece el período para un temporizador de reloj real del [perfilador de consultas](../../operations/optimizing_performance/sampling_query_profiler.md). El temporizador de reloj real cuenta el tiempo del reloj de pared. Valores posibles: @@ -1092,17 +1106,17 @@ Valores posibles: - 0 para apagar el temporizador. -Tipo: [UInt64](../../data_types/int_uint.md). +Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). Valor predeterminado: 1000000000 nanosegundos (una vez por segundo). Ver también: -- Tabla del sistema [trace\_log](../system_tables.md#system_tables-trace_log) +- Tabla del sistema [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## Los resultados de la prueba {#query_profiler_cpu_time_period_ns} -Establece el período para un temporizador de reloj de CPU [perfilador de consultas](../../operations/performance/sampling_query_profiler.md). Este temporizador solo cuenta el tiempo de CPU. +Establece el período para un temporizador de reloj de CPU [perfilador de consultas](../../operations/optimizing_performance/sampling_query_profiler.md). Este temporizador solo cuenta el tiempo de CPU. Valores posibles: @@ -1115,29 +1129,29 @@ Valores posibles: - 0 para apagar el temporizador. -Tipo: [UInt64](../../data_types/int_uint.md). +Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). Valor predeterminado: 1000000000 nanosegundos. Ver también: -- Tabla del sistema [trace\_log](../system_tables.md#system_tables-trace_log) +- Tabla del sistema [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## allow\_introspection\_functions {#settings-allow_introspection_functions} -Habilita deshabilita [funciones de introspecciones](../../query_language/functions/introspection.md) para la creación de perfiles de consultas. +Habilita deshabilita [funciones de introspecciones](../../sql_reference/functions/introspection.md) para la creación de perfiles de consultas. Valores posibles: -- 1 — Funciones de introspección habilitadas. -- 0 — Funciones de introspección deshabilitadas. +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. Valor predeterminado: 0. **Ver también** -- [Analizador de consultas de muestreo](../performance/sampling_query_profiler.md) -- Tabla del sistema [trace\_log](../system_tables.md#system_tables-trace_log) +- [Analizador de consultas de muestreo](../optimizing_performance/sampling_query_profiler.md) +- Tabla del sistema [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## input\_format\_parallel\_parsing {#input-format-parallel-parsing} @@ -1161,9 +1175,9 @@ Tipo: cadena Valores posibles: -- `null` — Sin compresión -- `deflate` — Comprimir con Deflate (zlib) -- `snappy` — Comprimir con [Rápido](https://google.github.io/snappy/) +- `null` — No compression +- `deflate` — Compress with Deflate (zlib) +- `snappy` — Compress with [Rápido](https://google.github.io/snappy/) Valor predeterminado: `snappy` (si está disponible) o `deflate`. @@ -1179,10 +1193,10 @@ Valor predeterminado: 32768 (32 KiB) ## Todos los derechos reservados. {#settings-format_avro_schema_registry_url} -Establece la URL del Registro de esquemas confluentes para usar con [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) Formato +Establece la URL del Registro de esquemas confluentes para usar con [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) formato Tipo: URL Valor predeterminado: Vacío -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/settings/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/es/operations/settings/settings_profiles.md b/docs/es/operations/settings/settings_profiles.md index 3bc93b7dc02..21a73d6f9e1 100644 --- a/docs/es/operations/settings/settings_profiles.md +++ b/docs/es/operations/settings/settings_profiles.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 61 +toc_title: "Perfiles de configuraci\xF3n" --- # Perfiles de configuración {#settings-profiles} @@ -65,4 +68,4 @@ El ejemplo especifica dos perfiles: `default` y `web`. El `default` tiene un pro Los perfiles de configuración pueden heredar unos de otros. Para usar la herencia, indique una o varias `profile` configuraciones antes de las demás configuraciones que se enumeran en el perfil. En caso de que se defina una configuración en diferentes perfiles, se utiliza la última definida. -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/settings_profiles/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/es/operations/settings/settings_users.md b/docs/es/operations/settings/settings_users.md index 32d3791b72f..ec03b34ff50 100644 --- a/docs/es/operations/settings/settings_users.md +++ b/docs/es/operations/settings/settings_users.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 63 +toc_title: "Configuraci\xF3n del usuario" --- # Configuración del usuario {#user-settings} @@ -67,23 +70,23 @@ La contraseña se puede especificar en texto sin formato o en SHA256 (formato he La primera línea del resultado es la contraseña. La segunda línea es el hash SHA1 doble correspondiente. -### Nombre\_usuario/redes {#user-namenetworks} +### user\_name/redes {#user-namenetworks} Lista de redes desde las que el usuario puede conectarse al servidor ClickHouse. Cada elemento de la lista puede tener una de las siguientes formas: -- `` — Dirección IP o máscara de red. +- `` — IP address or network mask. Ejemplos: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. -- `` — Nombre de host. +- `` — Hostname. Ejemplo: `example01.host.ru`. Para comprobar el acceso, se realiza una consulta DNS y todas las direcciones IP devueltas se comparan con la dirección del mismo nivel. -- `` — Expresión regular para nombres de host. +- `` — Regular expression for hostnames. Ejemplo, `^example\d\d-\d\d-\d\.host\.ru$` @@ -100,7 +103,7 @@ Para abrir el acceso del usuario desde cualquier red, especifique: ``` !!! warning "Advertencia" - Es inseguro abrir el acceso desde cualquier red a menos que tenga un firewall configurado correctamente o el servidor no esté conectado directamente a Internet. + No es seguro abrir el acceso desde cualquier red a menos que tenga un firewall configurado correctamente o el servidor no esté conectado directamente a Internet. Para abrir el acceso solo desde localhost, especifique: @@ -109,11 +112,11 @@ Para abrir el acceso solo desde localhost, especifique: 127.0.0.1 ``` -### Nombre\_usuario/perfil {#user-nameprofile} +### user\_name/perfil {#user-nameprofile} Puede asignar un perfil de configuración para el usuario. Los perfiles de configuración se configuran en una sección separada del `users.xml` file. Para obtener más información, consulte [Perfiles de configuración](settings_profiles.md). -### Nombre\_usuario/cuota {#user-namequota} +### user\_name/cuota {#user-namequota} Las cuotas le permiten realizar un seguimiento o limitar el uso de recursos durante un período de tiempo. Las cuotas se configuran en el `quotas` sección de la `users.xml` archivo de configuración. @@ -126,7 +129,7 @@ En esta sección, puede limitar las filas devueltas por ClickHouse para `SELECT` **Ejemplo** -La siguiente configuración obliga a que el usuario `user1` sólo puede ver las filas de `table1` como resultado de `SELECT` Consultas, donde el valor de la `id` campo es 1000. +La siguiente configuración obliga a que el usuario `user1` sólo puede ver las filas de `table1` como resultado de `SELECT` consultas, donde el valor de la `id` campo es 1000. ``` xml @@ -140,6 +143,6 @@ La siguiente configuración obliga a que el usuario `user1` sólo puede ver las ``` -El `filter` puede ser cualquier expresión que resulte en un [UInt8](../../data_types/int_uint.md)-tipo de valor. Por lo general, contiene comparaciones y operadores lógicos. Filas de `database_name.table1` donde los resultados del filtro a 0 no se devuelven para este usuario. El filtrado es incompatible con `PREWHERE` operaciones y desactiva `WHERE→PREWHERE` optimización. +El `filter` puede ser cualquier expresión que resulte en un [UInt8](../../sql_reference/data_types/int_uint.md)-tipo de valor. Por lo general, contiene comparaciones y operadores lógicos. Filas de `database_name.table1` donde los resultados del filtro a 0 no se devuelven para este usuario. El filtrado es incompatible con `PREWHERE` operaciones y desactiva `WHERE→PREWHERE` optimización. -[Artículo Original](https://clickhouse.tech/docs/es/operations/settings/settings_users/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/es/operations/system_tables.md b/docs/es/operations/system_tables.md index d2f1bf8b1b0..590ff99bc13 100644 --- a/docs/es/operations/system_tables.md +++ b/docs/es/operations/system_tables.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 52 +toc_title: Tablas del sistema --- # Tablas del sistema {#system-tables} @@ -8,7 +11,7 @@ Las tablas del sistema se utilizan para implementar parte de la funcionalidad de No puede eliminar una tabla del sistema (pero puede realizar DETACH). Las tablas del sistema no tienen archivos con datos en el disco o archivos con metadatos. El servidor crea todas las tablas del sistema cuando se inicia. Las tablas del sistema son de solo lectura. -Están ubicados en el ‘system’ basar. +Están ubicados en el ‘system’ base. ## sistema.asynchronous\_metrics {#system_tables-asynchronous_metrics} @@ -16,8 +19,8 @@ Contiene métricas que se calculan periódicamente en segundo plano. Por ejemplo Columna: -- `metric` ([Cadena](../data_types/string.md)) — Nombre métrico. -- `value` ([Float64](../data_types/float.md)) — Valor métrico. +- `metric` ([Cadena](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. **Ejemplo** @@ -42,10 +45,10 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 **Ver también** -- [Monitoreo](monitoring.md) — Conceptos básicos de monitoreo ClickHouse. -- [sistema.métricas](#system_tables-metrics) - Contiene métricas calculadas al instante. -- [sistema.evento](#system_tables-events) — Contiene una serie de eventos que han ocurrido. -- [sistema.metric\_log](#system_tables-metric_log) — Contiene un historial de valores de métricas de tablas `system.metrics` , . `system.events`. +- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. +- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. +- [sistema.evento](#system_tables-events) — Contains a number of events that have occurred. +- [sistema.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. ## sistema.Cluster {#system-clusters} @@ -53,14 +56,14 @@ Contiene información sobre los clústeres disponibles en el archivo de configur Columna: -- `cluster` (String) — El nombre del clúster. -- `shard_num` (UInt32) — El número de fragmento en el clúster, a partir de 1. -- `shard_weight` (UInt32) — El peso relativo del fragmento al escribir datos. -- `replica_num` (UInt32) — El número de réplica en el fragmento, a partir de 1. -- `host_name` (String) — El nombre de host, como se especifica en la configuración. -- `host_address` (String) — La dirección IP del host obtenida de DNS. -- `port` (UInt16): el puerto que se utiliza para conectarse al servidor. -- `user` (String) — El nombre del usuario para conectarse al servidor. +- `cluster` (String) — The cluster name. +- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. +- `shard_weight` (UInt32) — The relative weight of the shard when writing data. +- `replica_num` (UInt32) — The replica number in the shard, starting from 1. +- `host_name` (String) — The host name, as specified in the config. +- `host_address` (String) — The host IP address obtained from DNS. +- `port` (UInt16) — The port to use for connecting to the server. +- `user` (String) — The name of the user for connecting to the server. - `errors_count` (UInt32): número de veces que este host no pudo alcanzar la réplica. - `estimated_recovery_time` (UInt32): quedan segundos hasta que el recuento de errores de réplica se ponga a cero y se considere que vuelve a la normalidad. @@ -68,7 +71,7 @@ Tenga en cuenta que `errors_count` se actualiza una vez por consulta al clúster **Ver también** -- [Motor de tabla distribuido](table_engines/distributed.md) +- [Motor de tabla distribuido](../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_cap configuración](settings/settings.md#settings-distributed_replica_error_cap) - [distributed\_replica\_error\_half\_life configuración](settings/settings.md#settings-distributed_replica_error_half_life) @@ -76,24 +79,24 @@ Tenga en cuenta que `errors_count` se actualiza una vez por consulta al clúster Contiene información sobre las columnas de todas las tablas. -Puede utilizar esta tabla para obtener información similar a la [TABLA DE DESCRIBE](../query_language/misc.md#misc-describe-table) Consulta, pero para varias tablas a la vez. +Puede utilizar esta tabla para obtener información similar a la [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) consulta, pero para varias tablas a la vez. El `system.columns` tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): -- `database` (String) — Nombre de la base de datos. -- `table` (Cadena) — Nombre de tabla. -- `name` (Cadena) — Nombre de columna. -- `type` (Cadena) — Tipo de columna. -- `default_kind` (String) — Tipo de expresión (`DEFAULT`, `MATERIALIZED`, `ALIAS`) para el valor predeterminado, o una cadena vacía si no está definida. -- `default_expression` (String) — Expresión para el valor predeterminado, o una cadena vacía si no está definida. -- `data_compressed_bytes` (UInt64): el tamaño de los datos comprimidos, en bytes. -- `data_uncompressed_bytes` (UInt64): el tamaño de los datos descomprimidos, en bytes. -- `marks_bytes` (UInt64) — El tamaño de las marcas, en bytes. -- `comment` (Cadena): comenta la columna o una cadena vacía si no está definida. -- `is_in_partition_key` (UInt8): marca que indica si la columna está en la expresión de partición. -- `is_in_sorting_key` (UInt8): marca que indica si la columna está en la expresión de clave de ordenación. -- `is_in_primary_key` (UInt8): marca que indica si la columna está en la expresión de clave principal. -- `is_in_sampling_key` (UInt8): marca que indica si la columna está en la expresión de clave de muestreo. +- `database` (String) — Database name. +- `table` (String) — Table name. +- `name` (String) — Column name. +- `type` (String) — Column type. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) para el valor predeterminado, o una cadena vacía si no está definida. +- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. +- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. +- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. +- `marks_bytes` (UInt64) — The size of marks, in bytes. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. ## sistema.colaborador {#system-contributors} @@ -101,7 +104,7 @@ Contiene información sobre los colaboradores. Todos los constributores en orden Columna: -- `name` (Cadena) - Nombre del colaborador (autor) del git log. +- `name` (String) — Contributor (author) name from git log. **Ejemplo** @@ -136,35 +139,35 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' └──────────────────┘ ``` -## sistema.basar {#system-databases} +## sistema.base {#system-databases} -Esta tabla contiene una sola columna String llamada ‘name’ – el nombre de una base de datos. +Esta tabla contiene una sola columna String llamada ‘name’ – the name of a database. Cada base de datos que el servidor conoce tiene una entrada correspondiente en la tabla. Esta tabla del sistema se utiliza para implementar el `SHOW DATABASES` consulta. ## sistema.detached\_parts {#system_tables-detached_parts} -Contiene información sobre piezas separadas de [Método de codificación de datos:](table_engines/mergetree.md) tabla. El `reason` columna especifica por qué se separó la pieza. Para las piezas separadas por el usuario, el motivo está vacío. Tales partes se pueden unir con [ALTER TABLE ATTACH PARTITION\|PARTE](../query_language/query_language/alter/#alter_attach-partition) comando. Para obtener la descripción de otras columnas, consulte [sistema.parte](#system_tables-parts). Si el nombre de la pieza no es válido, los valores de algunas columnas pueden ser `NULL`. Tales partes se pueden eliminar con [ALTER MESA GOTA PARTE DESMONTADA](../query_language/query_language/alter/#alter_drop-detached). +Contiene información sobre piezas separadas de [Método de codificación de datos:](../engines/table_engines/mergetree_family/mergetree.md) tabla. El `reason` columna especifica por qué se separó la pieza. Para las piezas separadas por el usuario, el motivo está vacío. Tales partes se pueden unir con [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) comando. Para obtener la descripción de otras columnas, consulte [sistema.parte](#system_tables-parts). Si el nombre de la pieza no es válido, los valores de algunas columnas pueden ser `NULL`. Tales partes se pueden eliminar con [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). -## sistema.Diccionario {#system-dictionaries} +## sistema.diccionario {#system-dictionaries} Contiene información sobre diccionarios externos. Columna: -- `name` (Cadena) — Nombre del diccionario. -- `type` (Cadena) - Tipo de diccionario: plano, hash, caché. -- `origin` (String) — Ruta de acceso al archivo de configuración que describe el diccionario. -- `attribute.names` (Array(String)) — Matriz de nombres de atributos proporcionados por el diccionario. -- `attribute.types` (Array(String)) — Matriz correspondiente de tipos de atributos que proporciona el diccionario. -- `has_hierarchy` (UInt8) - Si el diccionario es jerárquico. -- `bytes_allocated` (UInt64) - La cantidad de RAM que usa el diccionario. -- `hit_rate` (Float64): para los diccionarios de caché, el porcentaje de usos para los que el valor estaba en la caché. -- `element_count` (UInt64) — El número de elementos almacenados en el diccionario. -- `load_factor` (Float64): el porcentaje rellenado en el diccionario (para un diccionario hash, el porcentaje rellenado en la tabla hash). -- `creation_time` (DateTime): la hora en que se creó el diccionario o se recargó correctamente por última vez. -- `last_exception` (Cadena) — Texto del error que se produce al crear o volver a cargar el diccionario si no se pudo crear el diccionario. -- `source` (String) — Texto que describe el origen de datos para el diccionario. +- `name` (String) — Dictionary name. +- `type` (String) — Dictionary type: Flat, Hashed, Cache. +- `origin` (String) — Path to the configuration file that describes the dictionary. +- `attribute.names` (Array(String)) — Array of attribute names provided by the dictionary. +- `attribute.types` (Array(String)) — Corresponding array of attribute types that are provided by the dictionary. +- `has_hierarchy` (UInt8) — Whether the dictionary is hierarchical. +- `bytes_allocated` (UInt64) — The amount of RAM the dictionary uses. +- `hit_rate` (Float64) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` (UInt64) — The number of items stored in the dictionary. +- `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. +- `source` (String) — Text describing the data source for the dictionary. Tenga en cuenta que la cantidad de memoria utilizada por el diccionario no es proporcional a la cantidad de elementos almacenados en él. Por lo tanto, para los diccionarios planos y en caché, todas las celdas de memoria se asignan previamente, independientemente de qué tan lleno esté realmente el diccionario. @@ -174,9 +177,9 @@ Contiene información sobre el número de eventos que se han producido en el sis Columna: -- `event` ([Cadena](../data_types/string.md)) — Nombre del evento. -- `value` ([UInt64](../data_types/int_uint.md)) — Número de eventos ocurridos. -- `description` ([Cadena](../data_types/string.md)) — Descripción del evento. +- `event` ([Cadena](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([Cadena](../sql_reference/data_types/string.md)) — Event description. **Ejemplo** @@ -196,10 +199,10 @@ SELECT * FROM system.events LIMIT 5 **Ver también** -- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contiene métricas calculadas periódicamente. -- [sistema.métricas](#system_tables-metrics) - Contiene métricas calculadas al instante. -- [sistema.metric\_log](#system_tables-metric_log) — Contiene un historial de valores de métricas de tablas `system.metrics` , . `system.events`. -- [Monitoreo](monitoring.md) — Conceptos básicos de monitoreo ClickHouse. +- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. +- [sistema.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. ## sistema.función {#system-functions} @@ -207,12 +210,12 @@ Contiene información sobre funciones normales y agregadas. Columna: -- `name`(`String`) – El nombre de la función. -- `is_aggregate`(`UInt8`) — Si la función es agregada. +- `name`(`String`) – The name of the function. +- `is_aggregate`(`UInt8`) — Whether the function is aggregate. ## sistema.graphite\_retentions {#system-graphite-retentions} -Contiene información sobre los parámetros [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) que se utilizan en tablas con [\*GraphiteMergeTree](table_engines/graphitemergetree.md) motor. +Contiene información sobre los parámetros [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) que se utilizan en tablas con [\*GraphiteMergeTree](../engines/table_engines/mergetree_family/graphitemergetree.md) motor. Columna: @@ -226,25 +229,25 @@ Columna: - `Tables.database` (Array(String)) - Matriz de nombres de tablas de base de datos que utilizan `config_name` parámetro. - `Tables.table` (Array(String)) - Matriz de nombres de tablas que utilizan `config_name` parámetro. -## sistema.Fusionar {#system-merges} +## sistema.fusionar {#system-merges} Contiene información sobre fusiones y mutaciones de piezas actualmente en proceso para tablas de la familia MergeTree. Columna: -- `database` (String) — El nombre de la base de datos en la que se encuentra la tabla. -- `table` (Cadena) — Nombre de tabla. -- `elapsed` (Float64) — El tiempo transcurrido (en segundos) desde que se inició la fusión. -- `progress` (Float64) — El porcentaje de trabajo completado de 0 a 1. -- `num_parts` (UInt64) — El número de piezas que se fusionarán. -- `result_part_name` (Cadena) — El nombre de la parte que se formará como resultado de la fusión. +- `database` (String) — The name of the database the table is in. +- `table` (String) — Table name. +- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. +- `progress` (Float64) — The percentage of completed work from 0 to 1. +- `num_parts` (UInt64) — The number of pieces to be merged. +- `result_part_name` (String) — The name of the part that will be formed as the result of merging. - `is_mutation` (UInt8) - 1 si este proceso es una mutación parte. -- `total_size_bytes_compressed` (UInt64): el tamaño total de los datos comprimidos en los fragmentos combinados. -- `total_size_marks` (UInt64) — Número total de marcas en las partes fusionadas. -- `bytes_read_uncompressed` (UInt64) — Número de bytes leídos, sin comprimir. -- `rows_read` (UInt64) — Número de filas leídas. -- `bytes_written_uncompressed` (UInt64) — Número de bytes escritos, sin comprimir. -- `rows_written` (UInt64) — Número de filas escritas. +- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. +- `total_size_marks` (UInt64) — The total number of marks in the merged parts. +- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. +- `rows_read` (UInt64) — Number of rows read. +- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. +- `rows_written` (UInt64) — Number of rows written. ## sistema.métricas {#system_tables-metrics} @@ -252,9 +255,9 @@ Contiene métricas que pueden calcularse instantáneamente o tener un valor actu Columna: -- `metric` ([Cadena](../data_types/string.md)) — Nombre métrico. -- `value` ([Int64](../data_types/int_uint.md)) — Valor métrico. -- `description` ([Cadena](../data_types/string.md)) — Descripción métrica. +- `metric` ([Cadena](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([Cadena](../sql_reference/data_types/string.md)) — Metric description. La lista de métricas admitidas que puede encontrar en el [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) archivo fuente de ClickHouse. @@ -281,10 +284,10 @@ SELECT * FROM system.metrics LIMIT 10 **Ver también** -- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contiene métricas calculadas periódicamente. -- [sistema.evento](#system_tables-events) — Contiene una serie de eventos que ocurrieron. -- [sistema.metric\_log](#system_tables-metric_log) — Contiene un historial de valores de métricas de tablas `system.metrics` , . `system.events`. -- [Monitoreo](monitoring.md) — Conceptos básicos de monitoreo ClickHouse. +- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistema.evento](#system_tables-events) — Contains a number of events that occurred. +- [sistema.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. ## sistema.metric\_log {#system_tables-metric_log} @@ -337,12 +340,12 @@ CurrentMetric_ReplicatedChecks: 0 **Ver también** -- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contiene métricas calculadas periódicamente. -- [sistema.evento](#system_tables-events) — Contiene una serie de eventos que ocurrieron. -- [sistema.métricas](#system_tables-metrics) - Contiene métricas calculadas al instante. -- [Monitoreo](monitoring.md) — Conceptos básicos de monitoreo ClickHouse. +- [sistema.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [sistema.evento](#system_tables-events) — Contains a number of events that occurred. +- [sistema.métricas](#system_tables-metrics) — Contains instantly calculated metrics. +- [Monitoreo](monitoring.md) — Base concepts of ClickHouse monitoring. -## sistema.número {#system-numbers} +## sistema.numero {#system-numbers} Esta tabla contiene una única columna UInt64 llamada ‘number’ que contiene casi todos los números naturales a partir de cero. Puede usar esta tabla para pruebas, o si necesita hacer una búsqueda de fuerza bruta. @@ -361,133 +364,133 @@ Esto es similar a la tabla DUAL que se encuentra en otros DBMS. ## sistema.parte {#system_tables-parts} -Contiene información sobre partes de [Método de codificación de datos:](table_engines/mergetree.md) tabla. +Contiene información sobre partes de [Método de codificación de datos:](../engines/table_engines/mergetree_family/mergetree.md) tabla. -Cada fila describe una parte de datos. +Cada fila describe una parte de los datos. Columna: -- `partition` (Cadena) – el nombre de La partición. Para saber qué es una partición, consulte la descripción del [ALTERAR](../query_language/alter.md#query_language_queries_alter) consulta. +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) consulta. Formato: - `YYYYMM` para la partición automática por mes. - `any_string` al particionar manualmente. -- `name` (`String`) – Nombre de la parte de datos. +- `name` (`String`) – Name of the data part. -- `active` (`UInt8`) – Indicador que indica si la parte de datos está activa. Si un elemento de datos está activo, se utiliza en una tabla. De lo contrario, se elimina. Las partes de datos inactivas permanecen después de la fusión. +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. -- `marks` (`UInt64`) – El número de puntos. Para obtener el número aproximado de filas en una parte de datos, multiplique `marks` por la granularidad del índice (generalmente 8192) (esta sugerencia no funciona para la granularidad adaptativa). +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` por la granularidad del índice (generalmente 8192) (esta sugerencia no funciona para la granularidad adaptativa). -- `rows` (`UInt64`) – El número de filas. +- `rows` (`UInt64`) – The number of rows. -- `bytes_on_disk` (`UInt64`) – Tamaño total de todos los archivos de parte de datos en bytes. +- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. -- `data_compressed_bytes` (`UInt64`) – Tamaño total de los datos comprimidos en la parte de datos. Todos los archivos auxiliares (por ejemplo, archivos con marcas) no están incluidos. +- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. -- `data_uncompressed_bytes` (`UInt64`) – Tamaño total de los datos sin comprimir en la parte de datos. Todos los archivos auxiliares (por ejemplo, archivos con marcas) no están incluidos. +- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. -- `marks_bytes` (`UInt64`) – El tamaño del archivo con marcas. +- `marks_bytes` (`UInt64`) – The size of the file with marks. -- `modification_time` (`DateTime`) – La hora en que se modificó el directorio con la parte de datos. Esto normalmente corresponde a la hora de creación del elemento de datos.\| +- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| -- `remove_time` (`DateTime`) – El momento en que la parte de datos quedó inactiva. +- `remove_time` (`DateTime`) – The time when the data part became inactive. -- `refcount` (`UInt32`) – El número de lugares donde se utiliza la parte de datos. Un valor mayor que 2 indica que el elemento de datos se utiliza en consultas o fusiones. +- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. -- `min_date` (`Date`) – El valor mínimo de la clave de fecha en la parte de datos. +- `min_date` (`Date`) – The minimum value of the date key in the data part. -- `max_date` (`Date`) – El valor máximo de la clave de fecha en la parte de datos. +- `max_date` (`Date`) – The maximum value of the date key in the data part. -- `min_time` (`DateTime`) – El valor mínimo de la clave de fecha y hora en la parte de datos. +- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. -- `max_time`(`DateTime`) – El valor máximo de la clave de fecha y hora en la parte de datos. +- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. -- `partition_id` (`String`) – ID de la partición. +- `partition_id` (`String`) – ID of the partition. -- `min_block_number` (`UInt64`) – El número mínimo de partes de datos que componen la parte actual después de la fusión. +- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. -- `max_block_number` (`UInt64`) – El número máximo de partes de datos que componen la parte actual después de la fusión. +- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. -- `level` (`UInt32`) – Profundidad del árbol de fusión. Cero significa que la parte actual se creó mediante inserción en lugar de fusionar otras partes. +- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. -- `data_version` (`UInt64`) – Número que se utiliza para determinar qué mutaciones se deben aplicar a la parte de datos (mutaciones con una versión superior a `data_version`). +- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). -- `primary_key_bytes_in_memory` (`UInt64`) – La cantidad de memoria (en bytes) utilizada por los valores de clave primaria. +- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. -- `primary_key_bytes_in_memory_allocated` (`UInt64`) – La cantidad de memoria (en bytes) reservada para los valores de clave primaria. +- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` (`UInt8`) – Indicador que muestra que existe una copia de seguridad de datos de partición. 1, la copia de seguridad existe. 0, la copia de seguridad no existe. Para obtener más información, consulte [CONGELAR PARTICIÓN](../query_language/alter.md#alter_freeze-partition) +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) -- `database` (`String`) – Nombre de la base de datos. +- `database` (`String`) – Name of the database. -- `table` (`String`) – Nombre de la tabla. +- `table` (`String`) – Name of the table. -- `engine` (`String`) – Nombre del motor de tabla sin parámetros. +- `engine` (`String`) – Name of the table engine without parameters. -- `path` (`String`) – Ruta absoluta a la carpeta con archivos de parte de datos. +- `path` (`String`) – Absolute path to the folder with data part files. -- `disk` (`String`) – Nombre de un disco que almacena la parte de datos. +- `disk` (`String`) – Name of a disk that stores the data part. -- `hash_of_all_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) de archivos comprimidos. +- `hash_of_all_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) de archivos comprimidos. -- `hash_of_uncompressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) de archivos sin comprimir (archivos con marcas, archivo de índice, etc.). +- `hash_of_uncompressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) de archivos sin comprimir (archivos con marcas, archivo de índice, etc.). -- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) de datos en los archivos comprimidos como si estuvieran descomprimidos. +- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) de datos en los archivos comprimidos como si estuvieran descomprimidos. -- `bytes` (`UInt64`) – Alias para `bytes_on_disk`. +- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. -- `marks_size` (`UInt64`) – Alias para `marks_bytes`. +- `marks_size` (`UInt64`) – Alias for `marks_bytes`. ## sistema.part\_log {#system_tables-part-log} -El `system.part_log` se crea sólo si el [part\_log](server_settings/settings.md#server_settings-part-log) se especifica la configuración del servidor. +El `system.part_log` se crea sólo si el [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) se especifica la configuración del servidor. -Esta tabla contiene información sobre eventos que ocurrieron con [partes de datos](table_engines/custom_partitioning_key.md) es el [Método de codificación de datos:](table_engines/mergetree.md) tablas familiares, como agregar o fusionar datos. +Esta tabla contiene información sobre eventos que ocurrieron con [partes de datos](../engines/table_engines/mergetree_family/custom_partitioning_key.md) en el [Método de codificación de datos:](../engines/table_engines/mergetree_family/mergetree.md) tablas familiares, como agregar o fusionar datos. El `system.part_log` contiene las siguientes columnas: -- `event_type` (Enum) — Tipo del evento que ocurrió con la parte de datos. Puede tener uno de los siguientes valores: - - `NEW_PART` — Inserción de una nueva parte de datos. - - `MERGE_PARTS` — Fusión de partes de datos. - - `DOWNLOAD_PART` — Descarga de una parte de datos. - - `REMOVE_PART` — Extracción o separación de una parte de datos mediante [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). - - `MUTATE_PART` — Mutación de una parte de datos. - - `MOVE_PART` — Mover la parte de datos de un disco a otro. -- `event_date` (Fecha) — fecha del Evento. -- `event_time` (DateTime) — Hora del evento. -- `duration_ms` (UInt64) — Duración. -- `database` (String) — Nombre de la base de datos en la que se encuentra la parte de datos. -- `table` (String) — Nombre de la tabla en la que se encuentra la parte de datos. -- `part_name` (String) — Nombre de la parte de datos. -- `partition_id` (String) — ID de la partición en la que se insertó la parte de datos. La columna toma el ‘all’ valor si la partición es por `tuple()`. -- `rows` (UInt64): el número de filas en la parte de datos. -- `size_in_bytes` (UInt64) — Tamaño de la parte de datos en bytes. -- `merged_from` (Array(String)) - Una matriz de nombres de las partes de las que se componía la parte actual (después de la fusión). -- `bytes_uncompressed` (UInt64): tamaño de bytes sin comprimir. -- `read_rows` (UInt64): el número de filas que se leyó durante la fusión. -- `read_bytes` (UInt64): el número de bytes que se leyeron durante la fusión. -- `error` (UInt16) — El número de código del error ocurrido. -- `exception` (Cadena) — Mensaje de texto del error ocurrido. +- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: + - `NEW_PART` — Inserting of a new data part. + - `MERGE_PARTS` — Merging of data parts. + - `DOWNLOAD_PART` — Downloading a data part. + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). + - `MUTATE_PART` — Mutating of a data part. + - `MOVE_PART` — Moving the data part from the one disk to another one. +- `event_date` (Date) — Event date. +- `event_time` (DateTime) — Event time. +- `duration_ms` (UInt64) — Duration. +- `database` (String) — Name of the database the data part is in. +- `table` (String) — Name of the table the data part is in. +- `part_name` (String) — Name of the data part. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ valor si la partición es por `tuple()`. +- `rows` (UInt64) — The number of rows in the data part. +- `size_in_bytes` (UInt64) — Size of the data part in bytes. +- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). +- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. +- `read_rows` (UInt64) — The number of rows was read during the merge. +- `read_bytes` (UInt64) — The number of bytes was read during the merge. +- `error` (UInt16) — The code number of the occurred error. +- `exception` (String) — Text message of the occurred error. El `system.part_log` se crea después de la primera inserción de datos `MergeTree` tabla. -## sistema.proceso {#system_tables-processes} +## sistema.procesa {#system_tables-processes} Esta tabla del sistema se utiliza para implementar el `SHOW PROCESSLIST` consulta. Columna: -- `user` (Cadena): el usuario que realizó la consulta. Tenga en cuenta que para el procesamiento distribuido, las consultas se envían a servidores remotos `default` usuario. El campo contiene el nombre de usuario para una consulta específica, no para una consulta que esta consulta inició. -- `address` (Cadena): la dirección IP desde la que se realizó la solicitud. Lo mismo para el procesamiento distribuido. Para realizar un seguimiento de dónde se hizo originalmente una consulta distribuida, mire `system.processes` en el servidor de solicitud de consulta. -- `elapsed` (Float64): el tiempo en segundos desde que se inició la ejecución de la solicitud. -- `rows_read` (UInt64): el número de filas leídas de la tabla. Para el procesamiento distribuido, en el solicitante servidor, este es el total para todos los servidores remotos. -- `bytes_read` (UInt64): el número de bytes sin comprimir leídos de la tabla. Para el procesamiento distribuido, en el solicitante servidor, este es el total para todos los servidores remotos. -- `total_rows_approx` (UInt64): la aproximación del número total de filas que se deben leer. Para el procesamiento distribuido, en el solicitante servidor, este es el total para todos los servidores remotos. Se puede actualizar durante el procesamiento de solicitudes, cuando se conozcan nuevas fuentes para procesar. -- `memory_usage` (UInt64): cantidad de RAM que usa la solicitud. Puede que no incluya algunos tipos de memoria dedicada. Ver el [Método de codificación de datos:](../operations/settings/query_complexity.md#settings_max_memory_usage) configuración. -- `query` (Cadena) – el texto de La consulta. Para `INSERT`, no incluye los datos para insertar. -- `query_id` (Cadena): ID de consulta, si se define. +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` usuario. El campo contiene el nombre de usuario para una consulta específica, no para una consulta que esta consulta inició. +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` en el servidor de solicitud de consulta. +- `elapsed` (Float64) – The time in seconds since request execution started. +- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [Método de codificación de datos:](../operations/settings/query_complexity.md#settings_max_memory_usage) configuración. +- `query` (String) – The query text. For `INSERT`, no incluye los datos para insertar. +- `query_id` (String) – Query ID, if defined. ## sistema.text\_log {#system-tables-text-log} @@ -496,10 +499,10 @@ Contiene entradas de registro. El nivel de registro que va a esta tabla se puede Columna: - `event_date` (`Date`) - Fecha de la entrada. -- `event_time` (`DateTime`) - Hora de la entrada. +- `event_time` (`DateTime`) - Tiempo de la entrada. - `microseconds` (`UInt32`) - Microsegundos de la entrada. -- `thread_name` (Cadena) — Nombre del subproceso desde el que se realizó el registro. -- `thread_id` (UInt64) - ID de subproceso del sistema operativo. +- `thread_name` (String) — Name of the thread from which the logging was done. +- `thread_id` (UInt64) — OS thread ID. - `level` (`Enum8`) - Nivel de entrada. - `'Fatal' = 1` - `'Critical' = 2` @@ -510,7 +513,7 @@ Columna: - `'Debug' = 7` - `'Trace' = 8` - `query_id` (`String`) - ID de la consulta. -- `logger_name` (`LowCardinality(String)`) - Nombre del registrador (es decir, `DDLWorker`) +- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) - `message` (`String`) - El mensaje en sí. - `revision` (`UInt32`) - Revisión de ClickHouse. - `source_file` (`LowCardinality(String)`) - Archivo de origen desde el que se realizó el registro. @@ -523,69 +526,69 @@ Contiene información sobre la ejecución de consultas. Para cada consulta, pued !!! note "Nota" La tabla no contiene datos de entrada para `INSERT` consulta. -ClickHouse crea esta tabla sólo si el [query\_log](server_settings/settings.md#server_settings-query-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. +ClickHouse crea esta tabla sólo si el [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. Para habilitar el registro de consultas, [Log\_queries](settings/settings.md#settings-log-queries) parámetro a 1. Para obtener más información, consulte el [Configuración](settings/settings.md) apartado. El `system.query_log` tabla registra dos tipos de consultas: 1. Consultas iniciales ejecutadas directamente por el cliente. -2. Consultas secundarias iniciadas por otras consultas (para la ejecución de consultas distribuidas). Para estos tipos de consultas, la información sobre las consultas principales se muestra en el `initial_*` columna. +2. Niño consultas que fueron iniciados por otras consultas (distribuida de la ejecución de la consulta). Para estos tipos de consultas, la información sobre el padre de las consultas se muestra en la `initial_*` columna. Columna: -- `type` (`Enum8`) — Tipo de evento que se produjo al ejecutar la consulta. Valor: - - `'QueryStart' = 1` — Inicio exitoso de la ejecución de la consulta. - - `'QueryFinish' = 2` — Final exitoso de la ejecución de la consulta. - - `'ExceptionBeforeStart' = 3` — Excepción antes del inicio de la ejecución de la consulta. - - `'ExceptionWhileProcessing' = 4` — Excepción durante la ejecución de la consulta. -- `event_date` (Fecha) — Fecha de inicio de la consulta. -- `event_time` (DateTime) — Hora de inicio de la consulta. -- `query_start_time` (DateTime) — Hora de inicio de la ejecución de la consulta. -- `query_duration_ms` (UInt64) — Duración de la ejecución de la consulta. -- `read_rows` (UInt64) — Número de filas leídas. -- `read_bytes` (UInt64) — Número de bytes leídos. -- `written_rows` (UInt64) — Para `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. -- `written_bytes` (UInt64) — Para `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. -- `result_rows` (UInt64) — Número de filas en el resultado. -- `result_bytes` (UInt64) — Número de bytes en el resultado. -- `memory_usage` (UInt64) — Consumo de memoria por la consulta. -- `query` (Cadena) — Cadena de consulta. -- `exception` (String) — Mensaje de excepción. -- `stack_trace` (String) - Rastreo de pila (una lista de métodos llamados antes de que ocurriera el error). Una cadena vacía, si la consulta se completa correctamente. -- `is_initial_query` (UInt8) — Tipo de consulta. Valores posibles: - - 1 — La consulta fue iniciada por el cliente. - - 0 — La consulta fue iniciada por otra consulta para la ejecución de consultas distribuidas. -- `user` (String) — Nombre del usuario que inició la consulta actual. -- `query_id` (String) — ID de la consulta. -- `address` (IPv6): dirección IP que se utilizó para realizar la consulta. -- `port` (UInt16): el puerto de cliente que se utilizó para realizar la consulta. -- `initial_user` (String) — Nombre del usuario que ejecutó la consulta inicial (para la ejecución de consultas distribuidas). -- `initial_query_id` (String) — ID de la consulta inicial (para la ejecución de consultas distribuidas). -- `initial_address` (IPv6): dirección IP desde la que se inició la consulta principal. -- `initial_port` (UInt16): el puerto de cliente que se utilizó para realizar la consulta principal. -- `interface` (UInt8): interfaz desde la que se inició la consulta. Valores posibles: +- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: + - `'QueryStart' = 1` — Successful start of query execution. + - `'QueryFinish' = 2` — Successful end of query execution. + - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. + - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. +- `written_bytes` (UInt64) — For `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. +- `result_rows` (UInt64) — Number of rows in the result. +- `result_bytes` (UInt64) — Number of bytes in the result. +- `memory_usage` (UInt64) — Memory consumption by the query. +- `query` (String) — Query string. +- `exception` (String) — Exception message. +- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (Cadena) — Nombre de usuario del sistema operativo que ejecuta [Casa de clics-cliente](../interfaces/cli.md). -- `client_hostname` (String) — Nombre de host de la máquina cliente donde [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. -- `client_name` (Cadena) — El [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. -- `client_revision` (UInt32) — Revisión del [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_major` (UInt32) — Versión principal del [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_minor` (UInt32) — Versión menor de la [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_patch` (UInt32) — Componente de parche del [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. -- `http_method` (UInt8): método HTTP que inició la consulta. Valores posibles: - - 0 — La consulta se inició desde la interfaz TCP. - - Uno — `GET` se utilizó el método. - - Cómo hacer — `POST` se utilizó el método. -- `http_user_agent` (Cadena) — El `UserAgent` encabezado pasado en la solicitud HTTP. -- `quota_key` (Cadena) — El “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). -- `revision` (UInt32) - Revisión de ClickHouse. -- `thread_numbers` (Array(UInt32)) — Número de subprocesos que participan en la ejecución de la consulta. -- `ProfileEvents.Names` (Array(String)) — Contadores que miden diferentes métricas. La descripción de ellos se puede encontrar en la tabla [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Valores de las métricas que se enumeran en `ProfileEvents.Names` columna. -- `Settings.Names` (Array(String)) — Nombres de la configuración que se cambiaron cuando el cliente ejecutó la consulta. Para habilitar los cambios de registro en la configuración, `log_query_settings` parámetro a 1. -- `Settings.Values` (Array(String)) — Valores de configuración que se enumeran en el `Settings.Names` columna. +- `os_user` (String) — OS's username who runs [Casa de clics-cliente](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. +- `client_name` (String) — The [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. +- `client_revision` (UInt32) — Revision of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_major` (UInt32) — Major version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_minor` (UInt32) — Minor version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_patch` (UInt32) — Patch component of the [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` se utilizó el método. + - 2 — `POST` se utilizó el método. +- `http_user_agent` (String) — The `UserAgent` encabezado pasado en la solicitud HTTP. +- `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [sistema.evento](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` columna. +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parámetro a 1. +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` columna. Cada consulta crea una o dos filas en el `query_log` tabla, dependiendo del estado de la consulta: @@ -593,108 +596,108 @@ Cada consulta crea una o dos filas en el `query_log` tabla, dependiendo del esta 2. Si se produjo un error durante el procesamiento de la consulta, se crean dos eventos con los tipos 1 y 4. 3. Si se produjo un error antes de iniciar la consulta, se crea un solo evento con el tipo 3. -De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [query\_log](server_settings/settings.md#server_settings-query-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. +De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. Cuando la tabla se elimina manualmente, se creará automáticamente sobre la marcha. Tenga en cuenta que se eliminarán todos los registros anteriores. !!! note "Nota" El período de almacenamiento para los registros es ilimitado. Los registros no se eliminan automáticamente de la tabla. Debe organizar la eliminación de registros obsoletos usted mismo. -Puede especificar una clave de partición arbitraria `system.query_log` mesa en el [query\_log](server_settings/settings.md#server_settings-query-log) configuración del servidor (consulte el `partition_by` parámetro). +Puede especificar una clave de partición arbitraria `system.query_log` mesa en el [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) configuración del servidor (consulte el `partition_by` parámetro). ## sistema.Sistema abierto. {#system_tables-query-thread-log} La tabla contiene información sobre cada subproceso de ejecución de consultas. -ClickHouse crea esta tabla sólo si el [Sistema abierto.](server_settings/settings.md#server_settings-query-thread-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. +ClickHouse crea esta tabla sólo si el [Sistema abierto.](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) se especifica el parámetro server. Este parámetro establece las reglas de registro, como el intervalo de registro o el nombre de la tabla en la que se registrarán las consultas. Para habilitar el registro de consultas, [Log\_query\_threads](settings/settings.md#settings-log-query-threads) parámetro a 1. Para obtener más información, consulte el [Configuración](settings/settings.md) apartado. Columna: -- `event_date` (Fecha) — la fecha en que el subproceso ha finalizado la ejecución de la consulta. -- `event_time` (DateTime) — la fecha y hora en que el subproceso ha finalizado la ejecución de la consulta. -- `query_start_time` (DateTime) — Hora de inicio de la ejecución de la consulta. -- `query_duration_ms` (UInt64) — Duración de la ejecución de la consulta. -- `read_rows` (UInt64) — Número de filas leídas. -- `read_bytes` (UInt64) — Número de bytes leídos. -- `written_rows` (UInt64) — Para `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. -- `written_bytes` (UInt64) — Para `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. -- `memory_usage` (Int64) - La diferencia entre la cantidad de memoria asignada y liberada en el contexto de este hilo. -- `peak_memory_usage` (Int64) - La diferencia máxima entre la cantidad de memoria asignada y liberada en el contexto de este hilo. -- `thread_name` (String) — Nombre del hilo. -- `thread_number` (UInt32) - ID de rosca interna. -- `os_thread_id` (Int32) - ID de subproceso del sistema operativo. -- `master_thread_id` (UInt64) - ID inicial del sistema operativo del hilo inicial. -- `query` (Cadena) — Cadena de consulta. -- `is_initial_query` (UInt8) — Tipo de consulta. Valores posibles: - - 1 — La consulta fue iniciada por el cliente. - - 0 — La consulta fue iniciada por otra consulta para la ejecución de consultas distribuidas. -- `user` (String) — Nombre del usuario que inició la consulta actual. -- `query_id` (String) — ID de la consulta. -- `address` (IPv6): dirección IP que se utilizó para realizar la consulta. -- `port` (UInt16): el puerto de cliente que se utilizó para realizar la consulta. -- `initial_user` (String) — Nombre del usuario que ejecutó la consulta inicial (para la ejecución de consultas distribuidas). -- `initial_query_id` (String) — ID de la consulta inicial (para la ejecución de consultas distribuidas). -- `initial_address` (IPv6): dirección IP desde la que se inició la consulta principal. -- `initial_port` (UInt16): el puerto de cliente que se utilizó para realizar la consulta principal. -- `interface` (UInt8): interfaz desde la que se inició la consulta. Valores posibles: +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` consultas, el número de filas escritas. Para otras consultas, el valor de la columna es 0. +- `written_bytes` (UInt64) — For `INSERT` consultas, el número de bytes escritos. Para otras consultas, el valor de la columna es 0. +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_id` (UInt64) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (Cadena) — Nombre de usuario del sistema operativo que ejecuta [Casa de clics-cliente](../interfaces/cli.md). -- `client_hostname` (String) — Nombre de host de la máquina cliente donde [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. -- `client_name` (Cadena) — El [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. -- `client_revision` (UInt32) — Revisión del [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_major` (UInt32) — Versión principal del [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_minor` (UInt32) — Versión menor de la [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. -- `client_version_patch` (UInt32) — Componente de parche del [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. -- `http_method` (UInt8): método HTTP que inició la consulta. Valores posibles: - - 0 — La consulta se inició desde la interfaz TCP. - - Uno — `GET` se utilizó el método. - - Cómo hacer — `POST` se utilizó el método. -- `http_user_agent` (Cadena) — El `UserAgent` encabezado pasado en la solicitud HTTP. -- `quota_key` (Cadena) — El “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). -- `revision` (UInt32) - Revisión de ClickHouse. -- `ProfileEvents.Names` (Array(String)) - Contadores que miden diferentes métricas para este hilo. La descripción de ellos se puede encontrar en la tabla [sistema.evento](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Valores de métricas para este subproceso que se enumeran en el `ProfileEvents.Names` columna. +- `os_user` (String) — OS's username who runs [Casa de clics-cliente](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [Casa de clics-cliente](../interfaces/cli.md) o se ejecuta otro cliente TCP. +- `client_name` (String) — The [Casa de clics-cliente](../interfaces/cli.md) o otro nombre de cliente TCP. +- `client_revision` (UInt32) — Revision of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_major` (UInt32) — Major version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_minor` (UInt32) — Minor version of the [Casa de clics-cliente](../interfaces/cli.md) o otro cliente TCP. +- `client_version_patch` (UInt32) — Patch component of the [Casa de clics-cliente](../interfaces/cli.md) o otra versión de cliente TCP. +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` se utilizó el método. + - 2 — `POST` se utilizó el método. +- `http_user_agent` (String) — The `UserAgent` encabezado pasado en la solicitud HTTP. +- `quota_key` (String) — The “quota key” especificado en el [cuota](quotas.md) ajuste (ver `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [sistema.evento](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` columna. -De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [Sistema abierto.](server_settings/settings.md#server_settings-query-thread-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. +De forma predeterminada, los registros se agregan a la tabla a intervalos de 7,5 segundos. Puede establecer este intervalo en el [Sistema abierto.](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) configuración del servidor (consulte el `flush_interval_milliseconds` parámetro). Para vaciar los registros a la fuerza desde el búfer de memoria a la tabla, utilice `SYSTEM FLUSH LOGS` consulta. Cuando la tabla se elimina manualmente, se creará automáticamente sobre la marcha. Tenga en cuenta que se eliminarán todos los registros anteriores. !!! note "Nota" El período de almacenamiento para los registros es ilimitado. Los registros no se eliminan automáticamente de la tabla. Debe organizar la eliminación de registros obsoletos usted mismo. -Puede especificar una clave de partición arbitraria `system.query_thread_log` mesa en el [Sistema abierto.](server_settings/settings.md#server_settings-query-thread-log) configuración del servidor (consulte el `partition_by` parámetro). +Puede especificar una clave de partición arbitraria `system.query_thread_log` mesa en el [Sistema abierto.](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) configuración del servidor (consulte el `partition_by` parámetro). ## sistema.trace\_log {#system_tables-trace_log} Contiene seguimientos de pila recopilados por el generador de perfiles de consultas de muestreo. -ClickHouse crea esta tabla cuando el [trace\_log](server_settings/settings.md#server_settings-trace_log) se establece la sección de configuración del servidor. También el [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) y [Los resultados de la prueba](settings/settings.md#query_profiler_cpu_time_period_ns) los ajustes deben establecerse. +ClickHouse crea esta tabla cuando el [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) se establece la sección de configuración del servidor. También el [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) y [Los resultados de la prueba](settings/settings.md#query_profiler_cpu_time_period_ns) los ajustes deben establecerse. Para analizar los registros, utilice el `addressToLine`, `addressToSymbol` y `demangle` funciones de inspección. Columna: -- `event_date`([Fecha](../data_types/date.md)) — Fecha del momento del muestreo. +- `event_date`([Fecha](../sql_reference/data_types/date.md)) — Date of sampling moment. -- `event_time`([FechaHora](../data_types/datetime.md)) — Marca de tiempo del momento de muestreo. +- `event_time`([FechaHora](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. -- `revision`([UInt32](../data_types/int_uint.md)) — Revisión de compilación del servidor ClickHouse. +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. Cuando se conecta al servidor por `clickhouse-client`, ves la cadena similar a `Connected to ClickHouse server version 19.18.1 revision 54429.`. Este campo contiene el `revision`, pero no el `version` de un servidor. -- `timer_type`([Enum8](../data_types/enum.md)) — Tipo de temporizador: +- `timer_type`([Enum8](../sql_reference/data_types/enum.md)) — Timer type: - `Real` representa el tiempo del reloj de pared. - `CPU` representa el tiempo de CPU. -- `thread_number`([UInt32](../data_types/int_uint.md)) — Identificador del subproceso. +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. -- `query_id`([Cadena](../data_types/string.md)) — Identificador de consulta que se puede utilizar para obtener detalles sobre una consulta que se estaba ejecutando desde el [query\_log](#system_tables-query_log) tabla del sistema. +- `query_id`([Cadena](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) tabla del sistema. -- `trace`([Matriz (UInt64)](../data_types/array.md)) — Rastro de apilamiento en el momento del muestreo. Cada elemento es una dirección de memoria virtual dentro del proceso del servidor ClickHouse. +- `trace`([Matriz (UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. **Ejemplo** @@ -780,7 +783,7 @@ Columna: - `zookeeper_path` (`String`) - Ruta de acceso a los datos de la tabla en ZooKeeper. - `replica_name` (`String`) - Nombre de réplica en ZooKeeper. Diferentes réplicas de la misma tabla tienen diferentes nombres. - `replica_path` (`String`) - Ruta de acceso a los datos de réplica en ZooKeeper. Lo mismo que concatenar ‘zookeeper\_path/replicas/replica\_path’. -- `columns_version` (`Int32`) - Número de versión de la estructura de la tabla. Indica cuántas veces se realizó ALTER. Si las réplicas tienen versiones diferentes, significa que algunas réplicas aún no han realizado todas las ALTER. +- `columns_version` (`Int32`) - Número de versión de la estructura de la tabla. Indica cuántas veces se realizó ALTER. Si las réplicas tienen versiones diferentes, significa que algunas réplicas aún no han hecho todas las ALTER. - `queue_size` (`UInt32`) - Tamaño de la cola para las operaciones en espera de ser realizadas. Las operaciones incluyen insertar bloques de datos, fusiones y otras acciones. Por lo general, coincide con `future_parts`. - `inserts_in_queue` (`UInt32`) - Número de inserciones de bloques de datos que deben realizarse. Las inserciones generalmente se replican con bastante rapidez. Si este número es grande, significa que algo anda mal. - `merges_in_queue` (`UInt32`) - El número de fusiones en espera de hacerse. A veces las fusiones son largas, por lo que este valor puede ser mayor que cero durante mucho tiempo. @@ -843,40 +846,57 @@ Es decir, se usa para ejecutar la consulta que está utilizando para leer del si Columna: -- `name` (Cadena) — Nombre de configuración. -- `value` (Cadena) — Valor de ajuste. -- `changed` (UInt8): si la configuración se definió explícitamente en la configuración o si se cambió explícitamente. +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. +- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [limitación](settings/constraints_on_settings.md#constraints-on-settings)). +- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [limitación](settings/constraints_on_settings.md#constraints-on-settings)). +- `readonly` (UInt8) — Can user change this setting (for more info, look into [limitación](settings/constraints_on_settings.md#constraints-on-settings)). Ejemplo: ``` sql -SELECT * +SELECT name, value FROM system.settings WHERE changed ``` ``` text -┌─name───────────────────┬─value───────┬─changed─┐ -│ max_threads │ 8 │ 1 │ -│ use_uncompressed_cache │ 0 │ 1 │ -│ load_balancing │ random │ 1 │ -│ max_memory_usage │ 10000000000 │ 1 │ -└────────────────────────┴─────────────┴─────────┘ +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ ``` +## sistema.merge\_tree\_settings {#system-merge_tree_settings} + +Contiene información sobre la configuración `MergeTree` tabla. + +Columna: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + ## sistema.table\_engines {#system-table-engines} Contiene la descripción de los motores de tablas admitidos por el servidor y su información de soporte de características. Esta tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): -- `name` (Cadena) — El nombre del motor de tabla. -- `supports_settings` (UInt8): marca que indica si el motor de tabla admite `SETTINGS` clausula. -- `supports_skipping_indices` (UInt8): marca que indica si el motor de tabla admite [Índices de saltos](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8): marca que indica si el motor de tabla admite [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). -- `supports_sort_order` (UInt8): marca que indica si el motor de tablas admite cláusulas `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` y `SAMPLE_BY`. -- `supports_replication` (UInt8): marca que indica si el motor de tabla admite [Replicación de datos](table_engines/replication/). -- `supports_duduplication` (UInt8): marca que indica si el motor de tablas admite la desduplicación de datos. +- `name` (String) — The name of table engine. +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clausula. +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [Índices de saltos](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` y `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [Replicación de datos](../engines/table_engines/mergetree_family/replication.md). +- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. Ejemplo: @@ -896,9 +916,9 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') **Ver también** -- Familia MergeTree [cláusulas de consulta](table_engines/mergetree.md#mergetree-query-clauses) -- Kafka [configuración](table_engines/kafka.md#table_engine-kafka-creating-a-table) -- Unir [configuración](table_engines/join.md#join-limitations-and-settings) +- Familia MergeTree [cláusulas de consulta](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- Kafka [configuración](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- Unir [configuración](../engines/table_engines/special/join.md#join-limitations-and-settings) ## sistema.tabla {#system-tables} @@ -906,49 +926,75 @@ Contiene metadatos de cada tabla que el servidor conoce. Las tablas separadas no Esta tabla contiene las siguientes columnas (el tipo de columna se muestra entre corchetes): -- `database` (String) — El nombre de la base de datos en la que se encuentra la tabla. -- `name` (Cadena) — Nombre de tabla. -- `engine` (Cadena) — Nombre del motor de tabla (sin parámetros). +- `database` (String) — The name of the database the table is in. + +- `name` (String) — Table name. + +- `engine` (String) — Table engine name (without parameters). + - `is_temporary` (UInt8): marca que indica si la tabla es temporal. + - `data_path` (String) - Ruta de acceso a los datos de la tabla en el sistema de archivos. + - `metadata_path` (String) - Ruta de acceso a los metadatos de la tabla en el sistema de archivos. + - `metadata_modification_time` (DateTime) - Hora de la última modificación de los metadatos de la tabla. + - `dependencies_database` (Array(String)) - Dependencias de base de datos. -- `dependencies_table` (Array(String)) - Dependencias de tabla ([Método de codificación de datos:](table_engines/materializedview.md) tablas basadas en la tabla real). + +- `dependencies_table` (Array(String)) - Dependencias de tabla ([Método de codificación de datos:](../engines/table_engines/special/materializedview.md) tablas basadas en la tabla actual). + - `create_table_query` (String) - La consulta que se utilizó para crear la tabla. + - `engine_full` (String) - Parámetros del motor de tabla. + - `partition_key` (String) - La expresión de clave de partición especificada en la tabla. -- `sorting_key` (String) - La expresión de clave de ordenación especificada en la tabla. + +- `sorting_key` (Cadena) - La clave de clasificación de la expresión especificada en la tabla. + - `primary_key` (String) - La expresión de clave principal especificada en la tabla. + - `sampling_key` (String) - La expresión de clave de muestreo especificada en la tabla. -El `system.tables` se utiliza en `SHOW TABLES` implementación de consultas. +- `storage_policy` (String) - La política de almacenamiento: + + - [Método de codificación de datos:](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [Distribuido](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64)) - Número total de filas, si es posible determinar rápidamente el número exacto de filas en la tabla, de lo contrario `Null` (incluyendo underying `Buffer` tabla). + +- `total_bytes` (Nullable(UInt64)) - Número total de bytes, si es posible determinar rápidamente el número exacto de bytes para la tabla en el almacenamiento, de lo contrario `Null` (**no** incluye cualquier almacenamiento subyacente). + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - Si la tabla almacena datos en la memoria, devuelve el número aproximado de bytes utilizados en la memoria. + +El `system.tables` se utiliza la tabla en `SHOW TABLES` implementación de consultas. ## sistema.Zookeeper {#system-zookeeper} La tabla no existe si ZooKeeper no está configurado. Permite leer datos del clúster ZooKeeper definido en la configuración. La consulta debe tener un ‘path’ condición de igualdad en la cláusula WHERE. Este es el camino en ZooKeeper para los niños para los que desea obtener datos. -Consulta `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` salidas de datos para todos los niños en el `/clickhouse` Nodo. +Consulta `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` salidas de datos para todos los niños en el `/clickhouse` nodo. Para generar datos para todos los nodos raíz, escriba path = ‘/’. Si la ruta especificada en ‘path’ no existe, se lanzará una excepción. Columna: -- `name` (String) — El nombre del nodo. -- `path` (String) — La ruta al nodo. -- `value` (Cadena) - el Valor de nodo. -- `dataLength` (Int32) — Tamaño del valor. -- `numChildren` (Int32) — Número de descendientes. -- `czxid` (Int64) — ID de la transacción que creó el nodo. -- `mzxid` (Int64) — ID de la transacción que cambió el nodo por última vez. -- `pzxid` (Int64) — ID de la transacción que eliminó o agregó descendientes por última vez. -- `ctime` (DateTime) — Hora de creación del nodo. -- `mtime` (DateTime) — Hora de la última modificación del nodo. -- `version` (Int32) — Versión del nodo: el número de veces que se cambió el nodo. -- `cversion` (Int32) — Número de descendientes añadidos o eliminados. -- `aversion` (Int32) — Número de cambios en la ACL. -- `ephemeralOwner` (Int64): para nodos efímeros, el ID de la sesión que posee este nodo. +- `name` (String) — The name of the node. +- `path` (String) — The path to the node. +- `value` (String) — Node value. +- `dataLength` (Int32) — Size of the value. +- `numChildren` (Int32) — Number of descendants. +- `czxid` (Int64) — ID of the transaction that created the node. +- `mzxid` (Int64) — ID of the transaction that last changed the node. +- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. +- `ctime` (DateTime) — Time of node creation. +- `mtime` (DateTime) — Time of the last modification of the node. +- `version` (Int32) — Node version: the number of times the node was changed. +- `cversion` (Int32) — Number of added or removed descendants. +- `aversion` (Int32) — Number of changes to the ACL. +- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. Ejemplo: @@ -997,17 +1043,17 @@ path: /clickhouse/tables/01-08/visits/replicas ## sistema.mutación {#system_tables-mutations} -La tabla contiene información sobre [mutación](../query_language/alter.md#alter-mutations) de las tablas MergeTree y su progreso. Cada comando de mutación está representado por una sola fila. La tabla tiene las siguientes columnas: +La tabla contiene información sobre [mutación](../sql_reference/statements/alter.md#alter-mutations) de las tablas MergeTree y su progreso. Cada comando de mutación está representado por una sola fila. La tabla tiene las siguientes columnas: -**basar**, **tabla** - El nombre de la base de datos y la tabla a la que se aplicó la mutación. +**base**, **tabla** - El nombre de la base de datos y la tabla a la que se aplicó la mutación. -**mutation\_id** - La identificación de la mutación. Para las tablas replicadas, estos identificadores corresponden a los nombres de znode `/mutations/` Directorio en ZooKeeper. Para las tablas no duplicadas, los ID corresponden a los nombres de archivo en el directorio de datos de la tabla. +**mutation\_id** - La identificación de la mutación. Para las tablas replicadas, estos identificadores corresponden a los nombres de znode `/mutations/` directorio en ZooKeeper. Para las tablas no duplicadas, los ID corresponden a los nombres de archivo en el directorio de datos de la tabla. **comando** - La cadena de comandos de mutación (la parte de la consulta después de `ALTER TABLE [db.]table`). **create\_time** - Cuando este comando de mutación fue enviado para su ejecución. -**block\_numbers.partition\_id**, **block\_numbers.número** - Una columna anidada. Para las mutaciones de tablas replicadas, contiene un registro para cada partición: el ID de partición y el número de bloque que fue adquirido por la mutación (en cada partición, solo se mutarán las partes que contienen bloques con números menores que el número de bloque adquirido por la mutación en esa partición). En tablas no replicadas, los números de bloque en todas las particiones forman una sola secuencia. Esto significa que para las mutaciones de tablas no replicadas, la columna contendrá un registro con un solo número de bloque adquirido por la mutación. +**block\_numbers.partition\_id**, **block\_numbers.numero** - Una columna anidada. Para las mutaciones de tablas replicadas, contiene un registro para cada partición: el ID de partición y el número de bloque que fue adquirido por la mutación (en cada partición, solo se mutarán las partes que contienen bloques con números menores que el número de bloque adquirido por la mutación en esa partición). En tablas no replicadas, los números de bloque en todas las particiones forman una sola secuencia. Esto significa que para las mutaciones de tablas no replicadas, la columna contendrá un registro con un solo número de bloque adquirido por la mutación. **partes\_a\_do** - El número de partes de datos que deben mutarse para que finalice la mutación. @@ -1021,31 +1067,31 @@ Si hubo problemas con la mutación de algunas partes, las siguientes columnas co **Método de codificación de datos:** - El mensaje de excepción que causó el error de mutación de parte más reciente. -## sistema.Discoteca {#system_tables-disks} +## sistema.disco {#system_tables-disks} -Contiene información sobre los discos definidos en el [configuración del servidor](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Contiene información sobre los discos definidos en el [configuración del servidor](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Columna: -- `name` ([Cadena](../data_types/string.md)) — Nombre de un disco en la configuración del servidor. -- `path` ([Cadena](../data_types/string.md)) — Ruta de acceso al punto de montaje en el sistema de archivos. -- `free_space` ([UInt64](../data_types/int_uint.md)) — Espacio libre en el disco en bytes. -- `total_space` ([UInt64](../data_types/int_uint.md)) — Volumen del disco en bytes. -- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — Cantidad de espacio en disco que debe permanecer libre en el disco en bytes. Definido en el `keep_free_space_bytes` parámetro de configuración del disco. +- `name` ([Cadena](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([Cadena](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parámetro de configuración del disco. ## sistema.almacenamiento\_policies {#system_tables-storage_policies} -Contiene información sobre las directivas de almacenamiento y los volúmenes [configuración del servidor](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Contiene información sobre las directivas de almacenamiento y los volúmenes [configuración del servidor](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Columna: -- `policy_name` ([Cadena](../data_types/string.md)) — Nombre de la política de almacenamiento. -- `volume_name` ([Cadena](../data_types/string.md)) — Nombre de volumen definido en la política de almacenamiento. -- `volume_priority` ([UInt64](../data_types/int_uint.md)) — Número de orden de volumen en la configuración. -- `disks` ([Matriz (Cadena)](../data_types/array.md)) — Nombres de disco, definidos en la directiva de almacenamiento. -- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — Tamaño máximo de una parte de datos que se puede almacenar en discos de volumen (0 — sin límite). -- `move_factor` ([Float64](../data_types/float.md)) — Relación de espacio libre en disco. Cuando la relación excede el valor del parámetro de configuración, ClickHouse comienza a mover los datos al siguiente volumen en orden. +- `policy_name` ([Cadena](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([Cadena](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([Array(Cadena)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. Si la directiva de almacenamiento contiene más de un volumen, la información de cada volumen se almacena en la fila individual de la tabla. -[Artículo Original](https://clickhouse.tech/docs/es/operations/system_tables/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/es/operations/table_engines/index.md b/docs/es/operations/table_engines/index.md deleted file mode 100644 index abb136e6347..00000000000 --- a/docs/es/operations/table_engines/index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -machine_translated: true ---- - -# Motores de mesa {#table_engines} - -El motor de tabla (tipo de tabla) determina: - -- Cómo y dónde se almacenan los datos, dónde escribirlos y dónde leerlos. -- Qué consultas son compatibles y cómo. -- Acceso a datos simultáneos. -- Uso de índices, si está presente. -- Si es posible la ejecución de solicitudes multiproceso. -- Parámetros de replicación de datos. - -## Familias de motores {#engine-families} - -### Método de codificación de datos: {#mergetree} - -Los motores de mesa más universales y funcionales para tareas de alta carga. La propiedad compartida por estos motores es la inserción rápida de datos con el posterior procesamiento de datos en segundo plano. `MergeTree` Los motores familiares admiten la replicación de datos (con [Replicado\*](replication.md) versiones de motores), particionamiento y otras características no admitidas en otros motores. - -Motores en la familia: - -- [Método de codificación de datos:](mergetree.md) -- [ReplacingMergeTree](replacingmergetree.md) -- [SummingMergeTree](summingmergetree.md) -- [AgregaciónMergeTree](aggregatingmergetree.md) -- [ColapsarMergeTree](collapsingmergetree.md) -- [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -- [GraphiteMergeTree](graphitemergetree.md) - -### Registro {#log} - -Ligero [motor](log_family.md) con funcionalidad mínima. Son los más efectivos cuando necesita escribir rápidamente muchas tablas pequeñas (hasta aproximadamente 1 millón de filas) y leerlas más tarde en su conjunto. - -Motores en la familia: - -- [TinyLog](tinylog.md) -- [StripeLog](stripelog.md) -- [Registro](log.md) - -### Motores de integración {#integration-engines} - -Motores para comunicarse con otros sistemas de almacenamiento y procesamiento de datos. - -Motores en la familia: - -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) -- [HDFS](hdfs.md) - -### Motores especiales {#special-engines} - -Motores en la familia: - -- [Distribuido](distributed.md) -- [Método de codificación de datos:](materializedview.md) -- [Diccionario](dictionary.md) -- [Fusionar](merge.md) -- [File](file.md) -- [Nulo](null.md) -- [Establecer](set.md) -- [Unir](join.md) -- [URL](url.md) -- [Vista](view.md) -- [Memoria](memory.md) -- [Búfer](buffer.md) - -## Columnas virtuales {#table_engines-virtual-columns} - -La columna virtual es un atributo de motor de tabla integral que se define en el código fuente del motor. - -No debe especificar columnas virtuales en el `CREATE TABLE` consulta y no se puede ver en `SHOW CREATE TABLE` y `DESCRIBE TABLE` resultados de la consulta. Las columnas virtuales también son de solo lectura, por lo que no puede insertar datos en columnas virtuales. - -Para seleccionar datos de una columna virtual, debe especificar su nombre en el `SELECT` consulta. `SELECT *` no devuelve valores de columnas virtuales. - -Si crea una tabla con una columna que tiene el mismo nombre que una de las columnas virtuales de la tabla, la columna virtual se vuelve inaccesible. No recomendamos hacer esto. Para ayudar a evitar conflictos, los nombres de columna virtual suelen tener el prefijo de un guión bajo. - -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/) diff --git a/docs/es/operations/table_engines/materializedview.md b/docs/es/operations/table_engines/materializedview.md deleted file mode 100644 index d40213a4d18..00000000000 --- a/docs/es/operations/table_engines/materializedview.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# Método de codificación de datos: {#materializedview} - -Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREAR TABLA](../../query_language/create.md)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor. - -[Artículo Original](https://clickhouse.tech/docs/es/operations/table_engines/materializedview/) diff --git a/docs/es/operations/tips.md b/docs/es/operations/tips.md index e07c29a5e5a..19f34f834fc 100644 --- a/docs/es/operations/tips.md +++ b/docs/es/operations/tips.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 58 +toc_title: Recomendaciones de uso --- # Recomendaciones de uso {#usage-recommendations} @@ -23,7 +26,7 @@ Para pequeñas cantidades de datos (hasta ~200 GB comprimidos), es mejor usar ta Para grandes cantidades de datos y al procesar consultas interactivas (en línea), debe usar una cantidad razonable de RAM (128 GB o más) para que el subconjunto de datos en caliente quepa en la memoria caché de páginas. Incluso para volúmenes de datos de ~ 50 TB por servidor, el uso de 128 GB de RAM mejora significativamente el rendimiento de las consultas en comparación con 64 GB. -No deshabilite el sobrecompromiso. Valor `cat /proc/sys/vm/overcommit_memory` Ser debe 0 la 1. Ejecutar +No deshabilite el sobrecompromiso. Valor `cat /proc/sys/vm/overcommit_memory` debe ser 0 o 1. Ejecutar ``` bash $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory @@ -42,8 +45,8 @@ Las páginas enormes permanentes tampoco necesitan ser asignadas. ## Subsistema de almacenamiento {#storage-subsystem} -Si su presupuesto le permite usar SSD, utilizar SSD. -Si no, use HDD. Los discos duros SATA 7200 RPM servirán. +Si su presupuesto le permite usar SSD, use SSD. +Si no, utilice el disco duro. Discos Duros SATA de 7200 RPM va a hacer. Dar preferencia a una gran cantidad de servidores con discos duros locales sobre un número menor de servidores con estantes de discos conectados. Pero para almacenar archivos con consultas raras, los estantes funcionarán. @@ -81,9 +84,9 @@ La mayoría de los otros sistemas de archivos también deberían funcionar bien. ## Núcleo de Linux {#linux-kernel} -No utilice un kernel de Linux obsoleto. +No use un kernel de Linux obsoleto. -## Rojo {#network} +## Red {#network} Si está utilizando IPv6, aumente el tamaño de la caché de ruta. El kernel de Linux anterior a 3.2 tenía una multitud de problemas con la implementación de IPv6. @@ -92,9 +95,9 @@ Utilice al menos una red de 10 GB, si es posible. 1 Gb también funcionará, per ## ZooKeeper {#zookeeper} -Probablemente ya esté utilizando ZooKeeper para otros fines. Puede utilizar la misma instalación de ZooKeeper, si aún no está sobrecargado. +Probablemente ya esté utilizando ZooKeeper para otros fines. Puede usar la misma instalación de ZooKeeper, si aún no está sobrecargada. -Lo mejor es utilizar una nueva versión de ZooKeeper – 3.4.9 o posterior. La versión en distribuciones estables de Linux puede estar desactualizada. +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. Nunca debe usar scripts escritos manualmente para transferir datos entre diferentes clústeres de ZooKeeper, ya que el resultado será incorrecto para los nodos secuenciales. Nunca utilice el “zkcopy” utilidad por la misma razón: https://github.com/ksprojects/zkcopy/issues/15 @@ -102,7 +105,7 @@ Si desea dividir un clúster ZooKeeper existente en dos, la forma correcta es au No ejecute ZooKeeper en los mismos servidores que ClickHouse. Porque ZooKeeper es muy sensible a la latencia y ClickHouse puede utilizar todos los recursos del sistema disponibles. -Con la configuración predeterminada, ZooKeeper es una bomba de tiempo: +Con la configuración predeterminada, Cuidador es una bomba de tiempo: > El servidor ZooKeeper no eliminará archivos de instantáneas y registros antiguos cuando utilice la configuración predeterminada (consulte autopurge), y esto es responsabilidad del operador. @@ -245,4 +248,4 @@ script end script ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/tips/) +{## [Artículo Original](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/es/operations/troubleshooting.md b/docs/es/operations/troubleshooting.md index d381ef8ffcf..70467a0cca4 100644 --- a/docs/es/operations/troubleshooting.md +++ b/docs/es/operations/troubleshooting.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 46 +toc_title: "Soluci\xF3n de problemas" --- # Solución de problemas {#troubleshooting} @@ -11,7 +14,7 @@ machine_translated: true ## Instalación {#troubleshooting-installation-errors} -### No puede obtener paquetes Deb del repositorio ClickHouse con apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} +### No puede obtener paquetes Deb del repositorio de Clickhouse con Apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} - Compruebe la configuración del firewall. - Si no puede acceder al repositorio por cualquier motivo, descargue los paquetes como se describe en el [Primeros pasos](../getting_started/index.md) artículo e instálelos manualmente usando el `sudo dpkg -i ` comando. También necesitará el `tzdata` paquete. @@ -45,8 +48,8 @@ El registro principal de `clickhouse-server` está en `/var/log/clickhouse-serve Si el servidor se inició correctamente, debería ver las cadenas: -- ` Application: starting up.` — Servidor iniciado. -- ` Application: Ready for connections.` — El servidor se está ejecutando y listo para las conexiones. +- ` Application: starting up.` — Server started. +- ` Application: Ready for connections.` — Server is running and ready for connections. Si `clickhouse-server` error de inicio con un error de configuración, debería ver el `` cadena con una descripción de error. Por ejemplo: @@ -78,7 +81,7 @@ Revision: 54413 **Ver sistema.d registros** -Si no encuentra ninguna información útil en `clickhouse-server` registros o no hay ningún registro, puede ver `system.d` comando el comando: +Si no encuentra ninguna información útil en `clickhouse-server` registros o no hay registros, puede ver `system.d` registros usando el comando: ``` bash $ sudo journalctl -u clickhouse-server @@ -102,7 +105,7 @@ Comprobar: - Configuración del punto final. - Comprobar [listen\_host](server_settings/settings.md#server_settings-listen_host) y [Tcp\_port](server_settings/settings.md#server_settings-tcp_port) configuración. + Comprobar [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) y [Tcp\_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) configuración. El servidor ClickHouse acepta conexiones localhost solo de forma predeterminada. @@ -114,8 +117,8 @@ Comprobar: Comprobar: - - El [Tcp\_port\_secure](server_settings/settings.md#server_settings-tcp_port_secure) configuración. - - Ajustes para [Sertificados SSL](server_settings/settings.md#server_settings-openssl). + - El [Tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) configuración. + - Ajustes para [Sertificados SSL](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). Utilice los parámetros adecuados mientras se conecta. Por ejemplo, utilice el `port_secure` parámetro con `clickhouse_client`. diff --git a/docs/es/operations/update.md b/docs/es/operations/update.md index ce93dab08dc..b391af9adc3 100644 --- a/docs/es/operations/update.md +++ b/docs/es/operations/update.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 47 +toc_title: "Actualizaci\xF3n de ClickHouse" --- # Actualización de ClickHouse {#clickhouse-update} diff --git a/docs/es/operations/utils/clickhouse-benchmark.md b/docs/es/operations/utilities/clickhouse-benchmark.md similarity index 59% rename from docs/es/operations/utils/clickhouse-benchmark.md rename to docs/es/operations/utilities/clickhouse-benchmark.md index 9c91d378b98..e1b59ac2255 100644 --- a/docs/es/operations/utils/clickhouse-benchmark.md +++ b/docs/es/operations/utilities/clickhouse-benchmark.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 61 +toc_title: Sistema abierto. --- # Sistema abierto. {#clickhouse-benchmark} @@ -33,29 +36,29 @@ clickhouse-benchmark [keys] < queries_file ## Claves {#clickhouse-benchmark-keys} -- `-c N`, `--concurrency=N` — Número de consultas que `clickhouse-benchmark` se envía simultáneamente. Valor predeterminado: 1. -- `-d N`, `--delay=N` — Intervalo en segundos entre informes intermedios (ajuste 0 para deshabilitar informes). Valor predeterminado: 1. -- `-h WORD`, `--host=WORD` — Host del servidor. Valor predeterminado: `localhost`. Para el [modo de comparación](#clickhouse-benchmark-comparison-mode) puedes usar múltiples `-h` claves. -- `-p N`, `--port=N` — Puerto del servidor. Valor predeterminado: 9000. Para el [modo de comparación](#clickhouse-benchmark-comparison-mode) puedes usar múltiples `-p` claves. -- `-i N`, `--iterations=N` — Número Total de consultas. Valor predeterminado: 0. -- `-r`, `--randomize` - Orden aleatorio de ejecución de consultas si hay más de una consulta de entrada. -- `-s`, `--secure` — Usando la conexión TLS. -- `-t N`, `--timelimit=N` — Límite de tiempo en segundos. `clickhouse-benchmark` detiene el envío de consultas cuando se alcanza el límite de tiempo especificado. Valor predeterminado: 0 (límite de tiempo desactivado). -- `--confidence=N` — Nivel de confianza para la prueba T. Valores posibles: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Valor predeterminado: 5. En el [modo de comparación](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` realiza el [Examen t independiente de dos muestras para estudiantes](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) para determinar si las dos distribuciones no son diferentes con el nivel de confianza seleccionado. -- `--cumulative` — Impresión de datos acumulativos en lugar de datos por intervalo. -- `--database=DATABASE_NAME` — Nombre de base de datos ClickHouse. Valor predeterminado: `default`. -- `--json=FILEPATH` — Salida JSON. Cuando se establece la clave, `clickhouse-benchmark` " resultados de la búsqueda relacionados -- `--user=USERNAME` — Nombre de usuario de ClickHouse. Valor predeterminado: `default`. -- `--password=PSWD` — Contraseña de usuario de ClickHouse. Valor predeterminado: cadena vacía. -- `--stacktrace` — Pila trazas de salida. Cuando se establece la clave, `clickhouse-bencmark` las salidas acumulan rastros de excepciones. -- `--stage=WORD` - Etapa de procesamiento de consultas en el servidor. ClickHouse detiene el procesamiento de consultas y devuelve la respuesta a `clickhouse-benchmark` en la etapa especificada. Valores posibles: `complete`, `fetch_columns`, `with_mergeable_state`. Valor predeterminado: `complete`. -- `--help` — Muestra el mensaje de ayuda. +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` se envía simultáneamente. Valor predeterminado: 1. +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. Para el [modo de comparación](#clickhouse-benchmark-comparison-mode) se pueden utilizar varios `-h` claves. +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [modo de comparación](#clickhouse-benchmark-comparison-mode) puedes usar múltiples `-p` claves. +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` detiene el envío de consultas cuando se alcanza el límite de tiempo especificado. Valor predeterminado: 0 (límite de tiempo desactivado). +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [modo de comparación](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` realiza el [Prueba t independiente de dos muestras para estudiantes](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) prueba para determinar si las dos distribuciones no son diferentes con el nivel de confianza seleccionado. +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` emite un informe al archivo JSON especificado. +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` las salidas acumulan rastros de excepciones. +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` en la etapa especificada. Valores posibles: `complete`, `fetch_columns`, `with_mergeable_state`. Valor predeterminado: `complete`. +- `--help` — Shows the help message. Si desea aplicar alguna [configuración](../../operations/settings/index.md) para consultas, páselas como una clave `--= SETTING_VALUE`. Por ejemplo, `--max_memory_usage=1048576`. ## Salida {#clickhouse-benchmark-output} -Predeterminada, `clickhouse-benchmark` Informes para cada `--delay` intervalo. +Predeterminada, `clickhouse-benchmark` informes para cada `--delay` intervalo. Ejemplo del informe: @@ -88,7 +91,7 @@ En el informe puedes encontrar: - Punto final del servidor ClickHouse. - Número de consultas procesadas. - - QPS: QPS: ¿Cuántas consultas realizó el servidor por segundo durante un período `--delay` argumento. + - QPS: QPS: ¿cuántas consultas servidor realizó por segundo durante un período de tiempo especificado en el `--delay` argumento. - RPS: ¿Cuántas filas lee el servidor por segundo durante un período `--delay` argumento. - MiB/s: ¿Cuántos mebibytes servidor leído por segundo durante un período especificado en el `--delay` argumento. - resultado RPS: ¿Cuántas filas colocadas por el servidor al resultado de una consulta por segundo durante un período `--delay` argumento. diff --git a/docs/es/operations/utils/clickhouse-copier.md b/docs/es/operations/utilities/clickhouse-copier.md similarity index 86% rename from docs/es/operations/utils/clickhouse-copier.md rename to docs/es/operations/utilities/clickhouse-copier.md index caf282c3806..917c4d41939 100644 --- a/docs/es/operations/utils/clickhouse-copier.md +++ b/docs/es/operations/utilities/clickhouse-copier.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 59 +toc_title: "M\xE9todo de codificaci\xF3n de datos:" --- # Método de codificación de datos: {#clickhouse-copier} @@ -33,12 +36,12 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat Parámetros: -- `daemon` — Empezar `clickhouse-copier` en modo demonio. -- `config` — El camino hacia el `zookeeper.xml` con los parámetros para la conexión a ZooKeeper. -- `task-path` — La ruta al nodo ZooKeeper. Este nodo se utiliza para la sincronización `clickhouse-copier` procesos y tareas de almacenamiento. Las tareas se almacenan en `$task-path/description`. -- `task-file` - Ruta opcional al archivo con la configuración de la tarea para la carga inicial a ZooKeeper. -- `task-upload-force` — Fuerza de carga `task-file` incluso si el nodo ya existe. -- `base-dir` — La ruta a los registros y archivos auxiliares. Cuando comienza, `clickhouse-copier` crear `clickhouse-copier_YYYYMMHHSS_` subdirectorios en `$base-dir`. Si se omite este parámetro, los directorios se crean en el directorio donde `clickhouse-copier` se puso en marcha. +- `daemon` — Starts `clickhouse-copier` en modo daemon. +- `config` — The path to the `zookeeper.xml` con los parámetros para la conexión a ZooKeeper. +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` procesos y tareas de almacenamiento. Las tareas se almacenan en `$task-path/description`. +- `task-file` — Optional path to file with task configuration for initial upload to ZooKeeper. +- `task-upload-force` — Force upload `task-file` incluso si el nodo ya existe. +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` crear `clickhouse-copier_YYYYMMHHSS_` subdirectorios en `$base-dir`. Si se omite este parámetro, los directorios se crean en el directorio donde `clickhouse-copier` se puso en marcha. ## Formato de zookeeper.XML {#format-of-zookeeper-xml} @@ -170,4 +173,4 @@ Parámetros: `clickhouse-copier` seguimiento de los cambios en `/task/path/description` y los aplica sobre la marcha. Por ejemplo, si cambia el valor de `max_workers`, el número de procesos que ejecutan tareas también cambiará. -[Artículo Original](https://clickhouse.tech/docs/es/operations/utils/clickhouse-copier/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/es/operations/utils/clickhouse-local.md b/docs/es/operations/utilities/clickhouse-local.md similarity index 70% rename from docs/es/operations/utils/clickhouse-local.md rename to docs/es/operations/utilities/clickhouse-local.md index faad161ba69..c469afbb770 100644 --- a/docs/es/operations/utils/clickhouse-local.md +++ b/docs/es/operations/utilities/clickhouse-local.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 60 +toc_title: Sistema abierto. --- # Sistema abierto. {#clickhouse-local} El `clickhouse-local` El programa le permite realizar un procesamiento rápido en archivos locales, sin tener que implementar y configurar el servidor ClickHouse. -Acepta datos que representan tablas y las consulta usando [Nombre de la red inalámbrica (SSID):](../../query_language/index.md). +Acepta datos que representan tablas y las consulta usando [Nombre de la red inalámbrica (SSID):](../../sql_reference/index.md). `clickhouse-local` utiliza el mismo núcleo que el servidor ClickHouse, por lo que es compatible con la mayoría de las características y el mismo conjunto de formatos y motores de tabla. @@ -25,17 +28,17 @@ $ clickhouse-local --structure "table_structure" --input-format "format_of_incom Argumento: -- `-S`, `--structure` — estructura de tabla para los datos de entrada. -- `-if`, `--input-format` — formato de entrada, `TSV` predeterminada. -- `-f`, `--file` — ruta de acceso a los datos, `stdin` predeterminada. -- `-q` `--query` — consultas para ejecutar con `;` como delimitador. -- `-N`, `--table` — nombre de la tabla dónde colocar los datos de salida, `table` predeterminada. -- `-of`, `--format`, `--output-format` — formato de salida, `TSV` predeterminada. -- `--stacktrace` — si volcar la salida de depuración en caso de excepción. -- `--verbose` — más detalles sobre la ejecución de la consulta. -- `-s` — desactivar `stderr` Tala. -- `--config-file` — ruta al archivo de configuración en el mismo formato que para el servidor ClickHouse, por defecto la configuración vacía. -- `--help` — argumentos referencias para `clickhouse-local`. +- `-S`, `--structure` — table structure for input data. +- `-if`, `--input-format` — input format, `TSV` predeterminada. +- `-f`, `--file` — path to data, `stdin` predeterminada. +- `-q` `--query` — queries to execute with `;` como delimitador. +- `-N`, `--table` — table name where to put output data, `table` predeterminada. +- `-of`, `--format`, `--output-format` — output format, `TSV` predeterminada. +- `--stacktrace` — whether to dump debug output in case of exception. +- `--verbose` — more details on query execution. +- `-s` — disables `stderr` tala. +- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. +- `--help` — arguments references for `clickhouse-local`. También hay argumentos para cada variable de configuración de ClickHouse que se usan más comúnmente en lugar de `--config-file`. @@ -75,4 +78,4 @@ Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. ... ``` -[Artículo Original](https://clickhouse.tech/docs/es/operations/utils/clickhouse-local/) +[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/es/operations/utilities/index.md b/docs/es/operations/utilities/index.md new file mode 100644 index 00000000000..221b043986a --- /dev/null +++ b/docs/es/operations/utilities/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Utilities +toc_priority: 56 +toc_title: "Descripci\xF3n" +--- + +# Utilidad ClickHouse {#clickhouse-utility} + +- [Sistema abierto.](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` hace esto. +- [Método de codificación de datos:](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [Sistema abierto.](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[Artículo Original](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/es/operations/utils/index.md b/docs/es/operations/utils/index.md deleted file mode 100644 index 2911d46f310..00000000000 --- a/docs/es/operations/utils/index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -machine_translated: true ---- - -# Utilidad ClickHouse {#clickhouse-utility} - -- [Sistema abierto.](clickhouse-local.md) — Permite ejecutar consultas SQL en datos sin detener el servidor ClickHouse, similar a cómo `awk` hace esto. -- [Método de codificación de datos:](clickhouse-copier.md) — Copia (y vuelve a dividir) los datos de un clúster a otro. -- [Sistema abierto.](clickhouse-benchmark.md) — Carga el servidor con las consultas y configuraciones personalizadas. - -[Artículo Original](https://clickhouse.tech/docs/es/operations/utils/) diff --git a/docs/es/query_language/dicts/index.md b/docs/es/query_language/dicts/index.md deleted file mode 100644 index 6007869a9b1..00000000000 --- a/docs/es/query_language/dicts/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -machine_translated: true ---- - -# Diccionario {#dictionaries} - -Un diccionario es un mapeo (`key -> attributes`) que es conveniente para varios tipos de listas de referencia. - -ClickHouse admite funciones especiales para trabajar con diccionarios que se pueden usar en consultas. Es más fácil y más eficiente usar diccionarios con funciones que un `JOIN` con tablas de referencia. - -[NULO](../syntax.md#null) no se pueden almacenar en un diccionario. - -Soporte ClickHouse: - -- [Diccionarios incorporados](internal_dicts.md#internal_dicts) con una específica [conjunto de funciones](../functions/ym_dict_functions.md). -- [Diccionarios complementarios (externos)](external_dicts.md) con un [conjunto de funciones](../functions/ext_dict_functions.md). - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/) diff --git a/docs/es/query_language/functions/comparison_functions.md b/docs/es/query_language/functions/comparison_functions.md deleted file mode 100644 index ebd799508a9..00000000000 --- a/docs/es/query_language/functions/comparison_functions.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -machine_translated: true ---- - -# Funciones de comparación {#comparison-functions} - -Las funciones de comparación siempre devuelven 0 o 1 (Uint8). - -Se pueden comparar los siguientes tipos: - -- número -- cuerdas y cuerdas fijas -- Fechas -- fechas con tiempos - -dentro de cada grupo, pero no entre diferentes grupos. - -Por ejemplo, no puede comparar una fecha con una cadena. Debe usar una función para convertir la cadena a una fecha, o viceversa. - -Las cadenas se comparan por bytes. Una cadena más corta es más pequeña que todas las cadenas que comienzan con ella y que contienen al menos un carácter más. - -Nota. Hasta la versión 1.1.54134, los números firmados y sin firmar se comparaban de la misma manera que en C ++. En otras palabras, podría obtener un resultado incorrecto en casos como SELECT 9223372036854775807 \> -1 . Este comportamiento cambió en la versión 1.1.54134 y ahora es matemáticamente correcto. - -## Por ejemplo: {#function-equals} - -## notEquals, un ! operador = b y a `<>` b {#function-notequals} - -## menos, `< operator` {#function-less} - -## alcalde, `> operator` {#function-greater} - -## lessOrEquals, `<= operator` {#function-lessorequals} - -## mayorOrEquals, `>= operator` {#function-greaterorequals} - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/comparison_functions/) diff --git a/docs/es/query_language/functions/machine_learning_functions.md b/docs/es/query_language/functions/machine_learning_functions.md deleted file mode 100644 index b1c80ee7ce0..00000000000 --- a/docs/es/query_language/functions/machine_learning_functions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -machine_translated: true ---- - -# Funciones de aprendizaje automático {#machine-learning-functions} - -## evalMLMethod (predicción) {#machine_learning_methods-evalmlmethod} - -Predicción utilizando modelos de regresión ajustados utiliza `evalMLMethod` función. Ver enlace en `linearRegression`. - -### Regresión lineal estocástica {#stochastic-linear-regression} - -El [stochasticLinearRegression](../agg_functions/reference.md#agg_functions-stochasticlinearregression) la función agregada implementa el método de descenso de gradiente estocástico utilizando el modelo lineal y la función de pérdida MSE. Utilizar `evalMLMethod` para predecir sobre nuevos datos. - -### Regresión logística estocástica {#stochastic-logistic-regression} - -El [stochasticLogisticRegression](../agg_functions/reference.md#agg_functions-stochasticlogisticregression) la función de agregado implementa el método de descenso de gradiente estocástico para el problema de clasificación binaria. Utilizar `evalMLMethod` para predecir sobre nuevos datos. diff --git a/docs/es/query_language/functions/splitting_merging_functions.md b/docs/es/query_language/functions/splitting_merging_functions.md deleted file mode 100644 index 9f257fe2f61..00000000000 --- a/docs/es/query_language/functions/splitting_merging_functions.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -machine_translated: true ---- - -# Funciones para dividir y fusionar cadenas y matrices {#functions-for-splitting-and-merging-strings-and-arrays} - -## Por ejemplo:) {#splitbycharseparator-s} - -Divide una cadena en subcadenas separadas por ‘separator’.'separador' debe ser una constante de cadena que consta de exactamente un carácter. -Devuelve una matriz de subcadenas seleccionadas. Se pueden seleccionar subcadenas vacías si el separador aparece al principio o al final de la cadena, o si hay varios separadores consecutivos. - -**Ejemplo:** - -``` sql -SELECT splitByChar(',', '1,2,3,abcde') -``` - -``` text -┌─splitByChar(',', '1,2,3,abcde')─┐ -│ ['1','2','3','abcde'] │ -└─────────────────────────────────┘ -``` - -## Por ejemplo:) {#splitbystringseparator-s} - -Lo mismo que el anterior, pero usa una cadena de múltiples caracteres como separador. Si la cadena está vacía, dividirá la cadena en una matriz de caracteres individuales. - -**Ejemplo:** - -``` sql -SELECT splitByString(', ', '1, 2 3, 4,5, abcde') -``` - -``` text -┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ -│ ['1','2 3','4,5','abcde'] │ -└───────────────────────────────────────────┘ -``` - -``` sql -SELECT splitByString('', 'abcde') -``` - -``` text -┌─splitByString('', 'abcde')─┐ -│ ['a','b','c','d','e'] │ -└────────────────────────────┘ -``` - -## Por ejemplo, se puede usar una matriz.\]) {#arraystringconcatarr-separator} - -Concatena las cadenas enumeradas en la matriz con el separador.'separador' es un parámetro opcional: una cadena constante, establecida en una cadena vacía por defecto. -Devuelve la cadena. - -## Sistema abierto.) {#alphatokenss} - -Selecciona subcadenas de bytes consecutivos de los rangos a-z y A-Z.Devuelve una matriz de subcadenas. - -**Ejemplo:** - -``` sql -SELECT alphaTokens('abca1abc') -``` - -``` text -┌─alphaTokens('abca1abc')─┐ -│ ['abca','abc'] │ -└─────────────────────────┘ -``` - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/splitting_merging_functions/) diff --git a/docs/es/query_language/index.md b/docs/es/query_language/index.md deleted file mode 100644 index fa981365e64..00000000000 --- a/docs/es/query_language/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -machine_translated: true ---- - -# Referencia SQL {#sql-reference} - -- [SELECCIONAR](select.md) -- [ES INSERTAR](insert_into.md) -- [CREAR](create.md) -- [ALTERAR](alter.md#query_language_queries_alter) -- [Otros tipos de consultas](misc.md) - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/) diff --git a/docs/es/query_language/select.md b/docs/es/query_language/select.md deleted file mode 100644 index b3aaef02b79..00000000000 --- a/docs/es/query_language/select.md +++ /dev/null @@ -1,1379 +0,0 @@ ---- -machine_translated: true ---- - -# SELECCIONAR consultas Sintaxis {#select-queries-syntax} - -`SELECT` realiza la recuperación de datos. - -``` sql -[WITH expr_list|(subquery)] -SELECT [DISTINCT] expr_list -[FROM [db.]table | (subquery) | table_function] [FINAL] -[SAMPLE sample_coeff] -[ARRAY JOIN ...] -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list -[PREWHERE expr] -[WHERE expr] -[GROUP BY expr_list] [WITH TOTALS] -[HAVING expr] -[ORDER BY expr_list] -[LIMIT [offset_value, ]n BY columns] -[LIMIT [n, ]m] -[UNION ALL ...] -[INTO OUTFILE filename] -[FORMAT format] -``` - -Todas las cláusulas son opcionales, excepto la lista requerida de expresiones inmediatamente después de SELECT. -Las siguientes cláusulas se describen casi en el mismo orden que en el transportador de ejecución de consultas. - -Si la consulta omite el `DISTINCT`, `GROUP BY` y `ORDER BY` cláusulas y el `IN` y `JOIN` subconsultas, la consulta se procesará por completo, utilizando O (1) cantidad de RAM. -De lo contrario, la consulta podría consumir mucha RAM si no se especifican las restricciones adecuadas: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Para obtener más información, consulte la sección «Settings». Es posible utilizar la clasificación externa (guardar tablas temporales en un disco) y la agregación externa. `The system does not have "merge join"`. - -### CON Cláusula {#with-clause} - -Esta sección proporciona soporte para expresiones de tabla común ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), con algunas limitaciones: -1. No se admiten consultas recursivas -2. Cuando se usa una subconsulta dentro de la sección WITH, su resultado debe ser escalar con exactamente una fila -3. Los resultados de la expresión no están disponibles en las subconsultas -Los resultados de las expresiones de la cláusula WITH se pueden usar dentro de la cláusula SELECT. - -Ejemplo 1: Usar expresión constante como «variable» - -``` sql -WITH '2019-08-01 15:23:00' as ts_upper_bound -SELECT * -FROM hits -WHERE - EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound -``` - -Ejemplo 2: Evictar el resultado de la expresión de sum(bytes) de la lista de columnas de la cláusula SELECT - -``` sql -WITH sum(bytes) as s -SELECT - formatReadableSize(s), - table -FROM system.parts -GROUP BY table -ORDER BY s -``` - -Ejemplo 3: Uso de los resultados de la subconsulta escalar - -``` sql -/* this example would return TOP 10 of most huge tables */ -WITH - ( - SELECT sum(bytes) - FROM system.parts - WHERE active - ) AS total_disk_usage -SELECT - (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, - table -FROM system.parts -GROUP BY table -ORDER BY table_disk_usage DESC -LIMIT 10 -``` - -Ejemplo 4: Reutilización de la expresión en subconsulta -Como solución alternativa para la limitación actual para el uso de expresiones en subconsultas, puede duplicarla. - -``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) -``` - -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` - -### Cláusula FROM {#select-from} - -Si se omite la cláusula FROM, los datos se leerán desde el `system.one` tabla. -El `system.one` table contiene exactamente una fila (esta tabla cumple el mismo propósito que la tabla DUAL que se encuentra en otros DBMS). - -El `FROM` cláusula especifica la fuente de la que se leen los datos: - -- Tabla -- Subconsultas -- [Función de la tabla](table_functions/index.md) - -`ARRAY JOIN` y el regular `JOIN` también se pueden incluir (ver más abajo). - -En lugar de una tabla, el `SELECT` subconsulta se puede especificar entre paréntesis. -A diferencia del SQL estándar, no es necesario especificar un sinónimo después de una subconsulta. - -Para ejecutar una consulta, todas las columnas enumeradas en la consulta se extraen de la tabla adecuada. Las columnas no necesarias para la consulta externa se eliminan de las subconsultas. -Si una consulta no muestra ninguna columnas (por ejemplo, `SELECT count() FROM t`), alguna columna se extrae de la tabla de todos modos (se prefiere la más pequeña), para calcular el número de filas. - -#### Modificador FINAL {#select-from-final} - -Aplicable al seleccionar datos de tablas del [Método de codificación de datos:](../operations/table_engines/mergetree.md)-Familia de motores distintos de `GraphiteMergeTree`. Cuando `FINAL` ClickHouse fusiona completamente los datos antes de devolver el resultado y, por lo tanto, realiza todas las transformaciones de datos que ocurren durante las fusiones para el motor de tabla dado. - -También soportado para: -- [Replicado](../operations/table_engines/replication.md) versiones de `MergeTree` motor. -- [Vista](../operations/table_engines/view.md), [Búfer](../operations/table_engines/buffer.md), [Distribuido](../operations/table_engines/distributed.md), y [Método de codificación de datos:](../operations/table_engines/materializedview.md) motores que funcionan sobre otros motores, siempre que se hayan creado sobre `MergeTree`-mesas de motor. - -Consultas que usan `FINAL` se ejecutan no tan rápido como las consultas similares que no lo hacen, porque: - -- La consulta se ejecuta en un solo subproceso y los datos se combinan durante la ejecución de la consulta. -- Consultas con `FINAL` leer columnas de clave primaria además de las columnas especificadas en la consulta. - -En la mayoría de los casos, evite usar `FINAL`. - -### Cláusula SAMPLE {#select-sample-clause} - -El `SAMPLE` cláusula permite un procesamiento de consultas aproximado. - -Cuando se habilita el muestreo de datos, la consulta no se realiza en todos los datos, sino solo en una cierta fracción de datos (muestra). Por ejemplo, si necesita calcular estadísticas para todas las visitas, es suficiente ejecutar la consulta en la fracción 1/10 de todas las visitas y luego multiplicar el resultado por 10. - -El procesamiento de consultas aproximado puede ser útil en los siguientes casos: - -- Cuando tiene requisitos de temporización estrictos (como \<100 ms) pero no puede justificar el costo de recursos de hardware adicionales para cumplirlos. -- Cuando sus datos brutos no son precisos, por lo que la aproximación no degrada notablemente la calidad. -- Los requisitos comerciales se centran en los resultados aproximados (por rentabilidad o para comercializar los resultados exactos a los usuarios premium). - -!!! note "Nota" - Sólo puede utilizar el muestreo con las tablas en el [Método de codificación de datos:](../operations/table_engines/mergetree.md) familia, y sólo si la expresión de muestreo se especificó durante la creación de la tabla (ver [Motor MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table)). - -Las características del muestreo de datos se enumeran a continuación: - -- El muestreo de datos es un mecanismo determinista. El resultado de la misma `SELECT .. SAMPLE` la consulta es siempre la misma. -- El muestreo funciona consistentemente para diferentes tablas. Para tablas con una sola clave de muestreo, una muestra con el mismo coeficiente siempre selecciona el mismo subconjunto de datos posibles. Por ejemplo, una muestra de ID de usuario toma filas con el mismo subconjunto de todos los ID de usuario posibles de diferentes tablas. Esto significa que puede utilizar el ejemplo en subconsultas [ES](#select-in-operators) clausula. Además, puede unir muestras usando el [UNIR](#select-join) clausula. -- El muestreo permite leer menos datos de un disco. Tenga en cuenta que debe especificar la clave de muestreo correctamente. Para obtener más información, consulte [Creación de una tabla MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table). - -Para el `SAMPLE` cláusula se admite la siguiente sintaxis: - -| Sintaxis de la cláusula MUESTRA | Descripción | -|---------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `SAMPLE k` | Aquí `k` es el número de 0 a 1.
    La consulta se ejecuta en `k` de datos. Por ejemplo, `SAMPLE 0.1` Ejecuta la consulta en el 10% de los datos. [Leer más](#select-sample-k) | -| `SAMPLE n` | Aquí `n` es un entero suficientemente grande.
    La consulta se ejecuta en una muestra de al menos `n` filas (pero no significativamente más que esto). Por ejemplo, `SAMPLE 10000000` ejecuta la consulta en un mínimo de 10.000.000 de filas. [Leer más](#select-sample-n) | -| `SAMPLE k OFFSET m` | Aquí `k` y `m` son los números del 0 al 1.
    La consulta se ejecuta en una muestra de `k` de los datos. Los datos utilizados para el ejemplo se compensan por `m` fracción. [Leer más](#select-sample-offset) | - -#### MUESTRA k {#select-sample-k} - -Aquí `k` es el número de 0 a 1 (se admiten notaciones fraccionarias y decimales). Por ejemplo, `SAMPLE 1/2` o `SAMPLE 0.5`. - -En un `SAMPLE k` cláusula, la muestra se toma de la `k` de datos. El ejemplo se muestra a continuación: - -``` sql -SELECT - Title, - count() * 10 AS PageViews -FROM hits_distributed -SAMPLE 0.1 -WHERE - CounterID = 34 -GROUP BY Title -ORDER BY PageViews DESC LIMIT 1000 -``` - -En este ejemplo, la consulta se ejecuta en una muestra de 0,1 (10%) de datos. Los valores de las funciones agregadas no se corrigen automáticamente, por lo que para obtener un resultado aproximado, el valor `count()` se multiplica manualmente por 10. - -#### MUESTRA n {#select-sample-n} - -Aquí `n` es un entero suficientemente grande. Por ejemplo, `SAMPLE 10000000`. - -En este caso, la consulta se ejecuta en una muestra de al menos `n` filas (pero no significativamente más que esto). Por ejemplo, `SAMPLE 10000000` ejecuta la consulta en un mínimo de 10.000.000 de filas. - -Dado que la unidad mínima para la lectura de datos es un gránulo (su tamaño se establece mediante el `index_granularity` ajuste), tiene sentido establecer una muestra que es mucho más grande que el tamaño del gránulo. - -Cuando se utiliza el `SAMPLE n` cláusula, no sabe qué porcentaje relativo de datos se procesó. Por lo tanto, no conoce el coeficiente por el que se deben multiplicar las funciones agregadas. Utilice el `_sample_factor` columna virtual para obtener el resultado aproximado. - -El `_sample_factor` columna contiene coeficientes relativos que se calculan dinámicamente. Esta columna se crea automáticamente cuando [crear](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table) una tabla con la clave de muestreo especificada. Los ejemplos de uso del `_sample_factor` columna se muestran a continuación. - -Consideremos la tabla `visits`, que contiene las estadísticas sobre las visitas al sitio. El primer ejemplo muestra cómo calcular el número de páginas vistas: - -``` sql -SELECT sum(PageViews * _sample_factor) -FROM visits -SAMPLE 10000000 -``` - -El siguiente ejemplo muestra cómo calcular el número total de visitas: - -``` sql -SELECT sum(_sample_factor) -FROM visits -SAMPLE 10000000 -``` - -El siguiente ejemplo muestra cómo calcular la duración media de la sesión. Tenga en cuenta que no necesita usar el coeficiente relativo para calcular los valores promedio. - -``` sql -SELECT avg(Duration) -FROM visits -SAMPLE 10000000 -``` - -#### MUESTRA k OFFSET m {#select-sample-offset} - -Aquí `k` y `m` son números del 0 al 1. Los ejemplos se muestran a continuación. - -**Ejemplo 1** - -``` sql -SAMPLE 1/10 -``` - -En este ejemplo, la muestra es 1/10 de todos los datos: - -`[++------------------]` - -**Ejemplo 2** - -``` sql -SAMPLE 1/10 OFFSET 1/2 -``` - -Aquí, se toma una muestra del 10% de la segunda mitad de los datos. - -`[----------++--------]` - -### ARRAY JOIN Cláusula {#select-array-join-clause} - -Permite ejecutar `JOIN` con una matriz o estructura de datos anidada. La intención es similar a la [arrayJoin](functions/array_join.md#functions_arrayjoin) función, pero su funcionalidad es más amplia. - -``` sql -SELECT -FROM -[LEFT] ARRAY JOIN -[WHERE|PREWHERE ] -... -``` - -Sólo puede especificar una sola `ARRAY JOIN` cláusula en una consulta. - -El orden de ejecución de la consulta se optimiza cuando se ejecuta `ARRAY JOIN`. Aunque `ARRAY JOIN` debe especificarse siempre antes de la `WHERE/PREWHERE` cláusula, se puede realizar ya sea antes `WHERE/PREWHERE` (si el resultado es necesario en esta cláusula), o después de completarlo (para reducir el volumen de cálculos). El optimizador de consultas controla el orden de procesamiento. - -Tipos admitidos de `ARRAY JOIN` se enumeran a continuación: - -- `ARRAY JOIN` - En este caso, las matrices vacías no se incluyen en el resultado de `JOIN`. -- `LEFT ARRAY JOIN` - El resultado de `JOIN` contiene filas con matrices vacías. El valor de una matriz vacía se establece en el valor predeterminado para el tipo de elemento de matriz (normalmente 0, cadena vacía o NULL). - -Los siguientes ejemplos demuestran el uso de la `ARRAY JOIN` y `LEFT ARRAY JOIN` clausula. Vamos a crear una tabla con un [Matriz](../data_types/array.md) escriba la columna e inserte valores en ella: - -``` sql -CREATE TABLE arrays_test -( - s String, - arr Array(UInt8) -) ENGINE = Memory; - -INSERT INTO arrays_test -VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); -``` - -``` text -┌─s───────────┬─arr─────┐ -│ Hello │ [1,2] │ -│ World │ [3,4,5] │ -│ Goodbye │ [] │ -└─────────────┴─────────┘ -``` - -El siguiente ejemplo utiliza el `ARRAY JOIN` clausula: - -``` sql -SELECT s, arr -FROM arrays_test -ARRAY JOIN arr; -``` - -``` text -┌─s─────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -└───────┴─────┘ -``` - -El siguiente ejemplo utiliza el `LEFT ARRAY JOIN` clausula: - -``` sql -SELECT s, arr -FROM arrays_test -LEFT ARRAY JOIN arr; -``` - -``` text -┌─s───────────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -│ Goodbye │ 0 │ -└─────────────┴─────┘ -``` - -#### Uso de alias {#using-aliases} - -Se puede especificar un alias para una matriz en el `ARRAY JOIN` clausula. En este caso, este alias puede acceder a un elemento de matriz, pero el nombre original tiene acceso a la matriz en sí. Ejemplo: - -``` sql -SELECT s, arr, a -FROM arrays_test -ARRAY JOIN arr AS a; -``` - -``` text -┌─s─────┬─arr─────┬─a─┐ -│ Hello │ [1,2] │ 1 │ -│ Hello │ [1,2] │ 2 │ -│ World │ [3,4,5] │ 3 │ -│ World │ [3,4,5] │ 4 │ -│ World │ [3,4,5] │ 5 │ -└───────┴─────────┴───┘ -``` - -Usando alias, puede realizar `ARRAY JOIN` con una matriz externa. Por ejemplo: - -``` sql -SELECT s, arr_external -FROM arrays_test -ARRAY JOIN [1, 2, 3] AS arr_external; -``` - -``` text -┌─s───────────┬─arr_external─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ Hello │ 3 │ -│ World │ 1 │ -│ World │ 2 │ -│ World │ 3 │ -│ Goodbye │ 1 │ -│ Goodbye │ 2 │ -│ Goodbye │ 3 │ -└─────────────┴──────────────┘ -``` - -Múltiples matrices se pueden separar por comas en el `ARRAY JOIN` clausula. En este caso, `JOIN` se realiza con ellos simultáneamente (la suma directa, no el producto cartesiano). Tenga en cuenta que todas las matrices deben tener el mismo tamaño. Ejemplo: - -``` sql -SELECT s, arr, a, num, mapped -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ -│ Hello │ [1,2] │ 1 │ 1 │ 2 │ -│ Hello │ [1,2] │ 2 │ 2 │ 3 │ -│ World │ [3,4,5] │ 3 │ 1 │ 4 │ -│ World │ [3,4,5] │ 4 │ 2 │ 5 │ -│ World │ [3,4,5] │ 5 │ 3 │ 6 │ -└───────┴─────────┴───┴─────┴────────┘ -``` - -El siguiente ejemplo utiliza el [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) función: - -``` sql -SELECT s, arr, a, num, arrayEnumerate(arr) -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ -│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ -│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ -│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ -│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ -│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ -└───────┴─────────┴───┴─────┴─────────────────────┘ -``` - -#### ARRAY JOIN con estructura de datos anidada {#array-join-with-nested-data-structure} - -`ARRAY`JOIN\`\` también funciona con [estructuras de datos anidados](../data_types/nested_data_structures/nested.md). Ejemplo: - -``` sql -CREATE TABLE nested_test -( - s String, - nest Nested( - x UInt8, - y UInt32) -) ENGINE = Memory; - -INSERT INTO nested_test -VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); -``` - -``` text -┌─s───────┬─nest.x──┬─nest.y─────┐ -│ Hello │ [1,2] │ [10,20] │ -│ World │ [3,4,5] │ [30,40,50] │ -│ Goodbye │ [] │ [] │ -└─────────┴─────────┴────────────┘ -``` - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Al especificar nombres de estructuras de datos anidadas en `ARRAY JOIN` el significado es el mismo `ARRAY JOIN` con todos los elementos de la matriz en los que consiste. Los ejemplos se enumeran a continuación: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`, `nest.y`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Esta variación también tiene sentido: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─────┐ -│ Hello │ 1 │ [10,20] │ -│ Hello │ 2 │ [10,20] │ -│ World │ 3 │ [30,40,50] │ -│ World │ 4 │ [30,40,50] │ -│ World │ 5 │ [30,40,50] │ -└───────┴────────┴────────────┘ -``` - -Se puede usar un alias para una estructura de datos anidada, con el fin de seleccionar `JOIN` resultado o la matriz de origen. Ejemplo: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest AS n; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ -└───────┴─────┴─────┴─────────┴────────────┘ -``` - -Ejemplo de uso del [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) función: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num -FROM nested_test -ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ -└───────┴─────┴─────┴─────────┴────────────┴─────┘ -``` - -### Cláusula JOIN {#select-join} - -Se une a los datos en el [SQL UNIRSE](https://en.wikipedia.org/wiki/Join_(SQL)) sentido. - -!!! info "Nota" - No relacionado con [ARRAY UNIRSE](#select-array-join-clause). - -``` sql -SELECT -FROM -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN -(ON )|(USING ) ... -``` - -Los nombres de tabla se pueden especificar en lugar de `` y ``. Esto es equivalente a la `SELECT * FROM table` subconsulta, excepto en un caso especial cuando la tabla tiene [Unir](../operations/table_engines/join.md) motor – una matriz preparada para unirse. - -#### Tipos admitidos de `JOIN` {#select-join-types} - -- `INNER JOIN` (o)) `JOIN`) -- `LEFT JOIN` (o)) `LEFT OUTER JOIN`) -- `RIGHT JOIN` (o)) `RIGHT OUTER JOIN`) -- `FULL JOIN` (o)) `FULL OUTER JOIN`) -- `CROSS JOIN` (o)) `,` ) - -Ver el estándar [SQL UNIRSE](https://en.wikipedia.org/wiki/Join_(SQL)) Descripción - -#### ÚNETE Múltiple {#multiple-join} - -Al realizar consultas, ClickHouse reescribe las uniones de varias tablas en la secuencia de uniones de dos tablas. Por ejemplo, si hay cuatro tablas para unir ClickHouse une la primera y la segunda, luego une el resultado con la tercera tabla, y en el último paso, se une a la cuarta. - -Si una consulta contiene el `WHERE` cláusula, ClickHouse intenta empujar hacia abajo los filtros de esta cláusula a través de la unión intermedia. Si no puede aplicar el filtro a cada unión intermedia, ClickHouse aplica los filtros después de que se completen todas las combinaciones. - -Recomendamos el `JOIN ON` o `JOIN USING` sintaxis para crear consultas. Por ejemplo: - -``` sql -SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a -``` - -Puede utilizar listas de tablas separadas por comas `FROM` clausula. Por ejemplo: - -``` sql -SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a -``` - -No mezcle estas sintaxis. - -ClickHouse no admite directamente la sintaxis con comas, por lo que no recomendamos su uso. El algoritmo intenta reescribir la consulta en términos de `CROSS JOIN` y `INNER JOIN` y luego procede al procesamiento de consultas. Al reescribir la consulta, ClickHouse intenta optimizar el rendimiento y el consumo de memoria. De forma predeterminada, ClickHouse trata las comas como `INNER JOIN` cláusula y convierte `INNER JOIN` Naciones `CROSS JOIN` cuando el algoritmo no puede garantizar que `INNER JOIN` devuelve los datos requeridos. - -#### Rigor {#select-join-strictness} - -- `ALL` — Si la tabla correcta tiene varias filas coincidentes, ClickHouse crea un [Producto cartesiano](https://en.wikipedia.org/wiki/Cartesian_product) de filas coincidentes. Este es el estándar `JOIN` comportamiento en SQL. -- `ANY` — Si la tabla correcta tiene varias filas coincidentes, solo se une la primera encontrada. Si la tabla correcta solo tiene una fila coincidente, los resultados de las consultas `ANY` y `ALL` palabras clave son las mismas. -- `ASOF` — Para unir secuencias con una coincidencia no exacta. `ASOF JOIN` el uso se describe a continuación. - -**ASOF UNIRSE A Uso** - -`ASOF JOIN` es útil cuando necesita unir registros que no tienen una coincidencia exacta. - -Tablas para `ASOF JOIN` debe tener una columna de secuencia ordenada. Esta columna no puede estar sola en una tabla y debe ser uno de los tipos de datos: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, y `DateTime`. - -Sintaxis `ASOF JOIN ... ON`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF LEFT JOIN table_2 -ON equi_cond AND closest_match_cond -``` - -Puede usar cualquier número de condiciones de igualdad y exactamente una condición de coincidencia más cercana. Por ejemplo, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. - -Condiciones admitidas para la coincidencia más cercana: `>`, `>=`, `<`, `<=`. - -Sintaxis `ASOF JOIN ... USING`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF JOIN table_2 -USING (equi_column1, ... equi_columnN, asof_column) -``` - -`ASOF JOIN` utilizar `equi_columnX` para unirse a la igualdad y `asof_column` para unirse en el partido más cercano con el `table_1.asof_column >= table_2.asof_column` condición. El `asof_column` columna siempre el último en el `USING` clausula. - -Por ejemplo, considere las siguientes tablas: - -``` text - table_1 table_2 - - event | ev_time | user_id event | ev_time | user_id -----------|---------|---------- ----------|---------|---------- - ... ... -event_1_1 | 12:00 | 42 event_2_1 | 11:59 | 42 - ... event_2_2 | 12:30 | 42 -event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42 - ... ... -``` - -`ASOF JOIN` puede tomar la marca de tiempo de un evento de usuario de `table_1` y encontrar un evento en `table_2` donde la marca de tiempo es la más cercana a la marca de tiempo del evento `table_1` correspondiente a la condición de coincidencia más cercana. Los valores de marca de tiempo iguales son los más cercanos si están disponibles. Aquí, el `user_id` se puede utilizar para unirse a la igualdad y el `ev_time` columna se puede utilizar para unirse en el partido más cercano. En nuestro ejemplo, `event_1_1` se puede unir con `event_2_1` y `event_1_2` se puede unir con `event_2_3` pero `event_2_2` no se puede unir. - -!!! note "Nota" - `ASOF` unirse es **ni** apoyado en el [Unir](../operations/table_engines/join.md) motor de mesa. - -Para establecer el valor de rigor predeterminado, utilice el parámetro de configuración de sesión [Por favor, introduzca su dirección de correo electrónico](../operations/settings/settings.md#settings-join_default_strictness). - -#### ÚNETE MUNDIAL {#global-join} - -Cuando se utiliza una normal `JOIN`, la consulta se envía a servidores remotos. Las subconsultas se ejecutan en cada una de ellas para crear la tabla correcta, y la unión se realiza con esta tabla. En otras palabras, la tabla correcta se forma en cada servidor por separado. - -Cuando se utiliza `GLOBAL ... JOIN`, primero el servidor requestor ejecuta una subconsulta para calcular la tabla correcta. Esta tabla temporal se pasa a cada servidor remoto y las consultas se ejecutan en ellos utilizando los datos temporales que se transmitieron. - -Tenga cuidado al usar `GLOBAL`. Para obtener más información, consulte la sección [Subconsultas distribuidas](#select-distributed-subqueries). - -#### Recomendaciones de uso {#usage-recommendations} - -Cuando se ejecuta un `JOIN`, no hay optimización del orden de ejecución en relación con otras etapas de la consulta. La combinación (una búsqueda en la tabla de la derecha) se ejecuta antes de filtrar `WHERE` y antes de la agregación. Para establecer explícitamente el orden de procesamiento, recomendamos ejecutar un `JOIN` Subconsulta con una subconsulta. - -Ejemplo: - -``` sql -SELECT - CounterID, - hits, - visits -FROM -( - SELECT - CounterID, - count() AS hits - FROM test.hits - GROUP BY CounterID -) ANY LEFT JOIN -( - SELECT - CounterID, - sum(Sign) AS visits - FROM test.visits - GROUP BY CounterID -) USING CounterID -ORDER BY hits DESC -LIMIT 10 -``` - -``` text -┌─CounterID─┬───hits─┬─visits─┐ -│ 1143050 │ 523264 │ 13665 │ -│ 731962 │ 475698 │ 102716 │ -│ 722545 │ 337212 │ 108187 │ -│ 722889 │ 252197 │ 10547 │ -│ 2237260 │ 196036 │ 9522 │ -│ 23057320 │ 147211 │ 7689 │ -│ 722818 │ 90109 │ 17847 │ -│ 48221 │ 85379 │ 4652 │ -│ 19762435 │ 77807 │ 7026 │ -│ 722884 │ 77492 │ 11056 │ -└───────────┴────────┴────────┘ -``` - -Las subconsultas no permiten establecer nombres ni usarlos para hacer referencia a una columna de una subconsulta específica. -Las columnas especificadas en `USING` debe tener los mismos nombres en ambas subconsultas, y las otras columnas deben tener un nombre diferente. Puede usar alias para cambiar los nombres de las columnas en subconsultas (el ejemplo usa los alias `hits` y `visits`). - -El `USING` clause especifica una o más columnas a unir, lo que establece la igualdad de estas columnas. La lista de columnas se establece sin corchetes. No se admiten condiciones de unión más complejas. - -La tabla correcta (el resultado de la subconsulta) reside en la RAM. Si no hay suficiente memoria, no puede ejecutar `JOIN`. - -Cada vez que se ejecuta una consulta `JOIN`, la subconsulta se ejecuta de nuevo porque el resultado no se almacena en caché. Para evitar esto, use el especial [Unir](../operations/table_engines/join.md) motor de tabla, que es una matriz preparada para unirse que siempre está en RAM. - -En algunos casos, es más eficiente de usar `IN` es lugar de `JOIN`. -Entre los diversos tipos de `JOIN` el más eficiente es `ANY LEFT JOIN`, entonces `ANY INNER JOIN`. Los menos eficientes hijo `ALL LEFT JOIN` y `ALL INNER JOIN`. - -Si necesita un `JOIN` para unirse a tablas de dimensión (son tablas relativamente pequeñas que contienen propiedades de dimensión, como nombres para campañas publicitarias), un `JOIN` podría no ser muy conveniente debido al hecho de que se vuelve a acceder a la tabla correcta para cada consulta. Para tales casos, hay un «external dictionaries» característica que debe utilizar en lugar de `JOIN`. Para obtener más información, consulte la sección [Diccionarios externos](dicts/external_dicts.md). - -**Limitaciones de memoria** - -ClickHouse utiliza el [hash unirse](https://en.wikipedia.org/wiki/Hash_join) algoritmo. Haga clic enCasa toma el `` y crea una tabla hash para ello en RAM. Si necesita restringir el consumo de memoria de la operación de unión, use la siguiente configuración: - -- [Método de codificación de datos:](../operations/settings/query_complexity.md#settings-max_rows_in_join) — Limita el número de filas en la tabla hash. -- [Método de codificación de datos:](../operations/settings/query_complexity.md#settings-max_bytes_in_join) — Limita el tamaño de la tabla hash. - -Cuando se alcanza cualquiera de estos límites, ClickHouse actúa como el [join\_overflow\_mode](../operations/settings/query_complexity.md#settings-join_overflow_mode) configuración instruye. - -#### Procesamiento de celdas vacías o NULL {#processing-of-empty-or-null-cells} - -Al unir tablas, pueden aparecer las celdas vacías. Configuración [Sistema abierto.](../operations/settings/settings.md#join_use_nulls) definir cómo ClickHouse llena estas celdas. - -Si el `JOIN` las llaves hijo [NULO](../data_types/nullable.md) campos, las filas donde al menos una de las claves tiene el valor [NULO](syntax.md#null-literal) no se unen. - -#### Limitaciones de sintaxis {#syntax-limitations} - -Para múltiples `JOIN` cláusulas en una sola `SELECT` Consulta: - -- Tomando todas las columnas a través de `*` está disponible solo si se unen tablas, no subconsultas. -- El `PREWHERE` cláusula no está disponible. - -Para `ON`, `WHERE`, y `GROUP BY` clausula: - -- Las expresiones arbitrarias no se pueden utilizar en `ON`, `WHERE`, y `GROUP BY` cláusulas, pero puede definir una expresión en un `SELECT` cláusula y luego usarla en estas cláusulas a través de un alias. - -### DONDE Cláusula {#select-where} - -Si hay una cláusula where, debe contener una expresión con el tipo UInt8. Esta suele ser una expresión con comparación y operadores lógicos. -Esta expresión se usará para filtrar datos antes de todas las demás transformaciones. - -Si los índices son compatibles con el motor de tablas de base de datos, la expresión se evalúa en función de la capacidad de usar índices. - -### PREWHERE Cláusula {#prewhere-clause} - -Esta cláusula tiene el mismo significado que la cláusula where. La diferencia radica en qué datos se leen de la tabla. -Al usar PREWHERE, primero solo se leen las columnas necesarias para ejecutar PREWHERE. Luego se leen las otras columnas que son necesarias para ejecutar la consulta, pero solo aquellos bloques donde la expresión PREWHERE es verdadera. - -Tiene sentido usar PREWHERE si hay condiciones de filtración utilizadas por una minoría de las columnas de la consulta, pero que proporcionan una filtración de datos fuerte. Esto reduce el volumen de datos a leer. - -Por ejemplo, es útil escribir PREWHERE para consultas que extraen un gran número de columnas, pero que solo tienen filtración para unas pocas columnas. - -PREWHERE solo es compatible con tablas de la `*MergeTree` Familia. - -Una consulta puede especificar simultáneamente PREWHERE y WHERE. En este caso, PREWHERE precede WHERE. - -Si el ‘optimize\_move\_to\_prewhere’ La configuración se establece en 1 y PREWHERE se omite, el sistema utiliza la heurística para mover automáticamente partes de expresiones de WHERE a PREWHERE. - -### GRUPO POR Cláusula {#select-group-by-clause} - -Esta es una de las partes más importantes de un DBMS orientado a columnas. - -Si hay una cláusula GROUP BY, debe contener una lista de expresiones. Cada expresión se mencionará aquí como una «key». -Todas las expresiones de las cláusulas SELECT, HAVING y ORDER BY deben calcularse a partir de claves o de funciones agregadas. En otras palabras, cada columna seleccionada de la tabla debe usarse en claves o dentro de funciones agregadas. - -Si una consulta solo contiene columnas de tabla dentro de funciones agregadas, se puede omitir la cláusula GROUP BY y se asume la agregación mediante un conjunto vacío de claves. - -Ejemplo: - -``` sql -SELECT - count(), - median(FetchTiming > 60 ? 60 : FetchTiming), - count() - sum(Refresh) -FROM hits -``` - -Sin embargo, a diferencia del SQL estándar, si la tabla no tiene ninguna fila (o no hay ninguna, o no hay ninguna después de usar WHERE para filtrar), se devuelve un resultado vacío y no el resultado de una de las filas que contienen los valores iniciales de las funciones agregadas. - -A diferencia de MySQL (y conforme a SQL estándar), no puede obtener algún valor de alguna columna que no esté en una función clave o agregada (excepto expresiones constantes). Para evitar esto, puede usar el ‘any’ función de agregado (obtener el primer valor encontrado) o ‘min/max’. - -Ejemplo: - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - count(), - any(Title) AS title -- getting the first occurred page header for each domain. -FROM hits -GROUP BY domain -``` - -Para cada valor de clave diferente encontrado, GROUP BY calcula un conjunto de valores de función agregados. - -GROUP BY no se admite para columnas de matriz. - -No se puede especificar una constante como argumentos para funciones agregadas. Ejemplo: sum(1). En lugar de esto, puedes deshacerte de la constante. Ejemplo: `count()`. - -#### Procesamiento NULL {#null-processing} - -Para agrupar, ClickHouse interpreta [NULO](syntax.md) como valor, y `NULL=NULL`. - -He aquí un ejemplo para mostrar lo que esto significa. - -Supongamos que tienes esta tabla: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Consulta `SELECT sum(x), y FROM t_null_big GROUP BY y` resultados en: - -``` text -┌─sum(x)─┬────y─┐ -│ 4 │ 2 │ -│ 3 │ 3 │ -│ 5 │ ᴺᵁᴸᴸ │ -└────────┴──────┘ -``` - -Se puede ver que `GROUP BY` para `y = NULL` resumir `x` como si `NULL` es este valor. - -Si pasa varias teclas a `GROUP BY` el resultado le dará todas las combinaciones de la selección, como si `NULL` fueron un valor específico. - -#### CON TOTALS Modificador {#with-totals-modifier} - -Si se especifica el modificador WITH TOTALS, se calculará otra fila. Esta fila tendrá columnas clave que contienen valores predeterminados (zeros o líneas vacías) y columnas de funciones agregadas con los valores calculados en todas las filas (el «total» valor). - -Esta fila adicional se genera en formatos JSON \*, TabSeparated \* y Pretty \*, por separado de las otras filas. En los otros formatos, esta fila no se géneros. - -En los formatos JSON\*, esta fila se muestra como una ‘totals’ campo. En los formatos TabSeparated\*, la fila viene después del resultado principal, precedida por una fila vacía (después de los otros datos). En los formatos Pretty\*, la fila se muestra como una tabla separada después del resultado principal. - -`WITH TOTALS` se puede ejecutar de diferentes maneras cuando HAVING está presente. El comportamiento depende de la ‘totals\_mode’ configuración. -Predeterminada, `totals_mode = 'before_having'`. En este caso, ‘totals’ se calcula en todas las filas, incluidas las que no pasan por HAVING y ‘max\_rows\_to\_group\_by’. - -Las otras alternativas incluyen solo las filas que pasan por HAVING en ‘totals’, y comportarse de manera diferente con el ajuste `max_rows_to_group_by` y `group_by_overflow_mode = 'any'`. - -`after_having_exclusive` – No incluya filas que no hayan pasado `max_rows_to_group_by`. En otras palabras, ‘totals’ tendrá menos o el mismo número de filas que si `max_rows_to_group_by` se omitieron. - -`after_having_inclusive` – Incluir todas las filas que no pasaron ‘max\_rows\_to\_group\_by’ es ‘totals’. En otras palabras, ‘totals’ tendrá más o el mismo número de filas como lo haría si `max_rows_to_group_by` se omitieron. - -`after_having_auto` – Cuente el número de filas que pasaron por HAVING. Si es más de una cierta cantidad (de forma predeterminada, 50%), incluya todas las filas que no pasaron ‘max\_rows\_to\_group\_by’ es ‘totals’. De lo contrario, no los incluya. - -`totals_auto_threshold` – Por defecto, 0.5. El coeficiente para `after_having_auto`. - -Si `max_rows_to_group_by` y `group_by_overflow_mode = 'any'` no se utilizan, todas las variaciones de `after_having` son los mismos, y se puede utilizar cualquiera de ellos (por ejemplo, `after_having_auto`). - -Puede usar WITH TOTALS en subconsultas, incluidas las subconsultas en la cláusula JOIN (en este caso, se combinan los valores totales respectivos). - -#### GRUPO POR en memoria externa {#select-group-by-in-external-memory} - -Puede habilitar el volcado de datos temporales en el disco para restringir el uso de memoria durante `GROUP BY`. -El [max\_bytes\_before\_external\_group\_by](../operations/settings/settings.md#settings-max_bytes_before_external_group_by) determina el umbral de consumo de RAM para el dumping `GROUP BY` datos temporales al sistema de archivos. Si se establece en 0 (el valor predeterminado), está deshabilitado. - -Cuando se utiliza `max_bytes_before_external_group_by`, le recomendamos que establezca `max_memory_usage` aproximadamente el doble de alto. Esto es necesario porque hay dos etapas para la agregación: leer la fecha y formar datos intermedios (1) y fusionar los datos intermedios (2). El volcado de datos al sistema de archivos solo puede ocurrir durante la etapa 1. Si los datos temporales no se descargaron, la etapa 2 puede requerir hasta la misma cantidad de memoria que en la etapa 1. - -Por ejemplo, si [Método de codificación de datos:](../operations/settings/settings.md#settings_max_memory_usage) se estableció en 10000000000 y desea usar agregación externa, tiene sentido establecer `max_bytes_before_external_group_by` a 10000000000, y max\_memory\_usage a 20000000000. Cuando se activa la agregación externa (si hubo al menos un volcado de datos temporales), el consumo máximo de RAM es solo un poco más que `max_bytes_before_external_group_by`. - -Con el procesamiento de consultas distribuidas, la agregación externa se realiza en servidores remotos. Para que el servidor solicitante use solo una pequeña cantidad de RAM, establezca `distributed_aggregation_memory_efficient` a 1. - -Al fusionar datos en el disco, así como al fusionar resultados de servidores remotos cuando `distributed_aggregation_memory_efficient` la configuración está habilitada, consume hasta `1/256 * the_number_of_threads` de la cantidad total de RAM. - -Cuando la agregación externa está habilitada, si `max_bytes_before_external_group_by` de datos (es decir, los datos no se enjuagaron), la consulta se ejecuta tan rápido como sin agregación externa. Si se vació algún dato temporal, el tiempo de ejecución será varias veces más largo (aproximadamente tres veces). - -Si usted tiene un `ORDER BY` con un `LIMIT` despues `GROUP BY`, entonces la cantidad de RAM usada depende de la cantidad de datos en `LIMIT`, no en toda la tabla. Pero si el `ORDER BY` no tiene `LIMIT`, no se olvide de habilitar la clasificación externa (`max_bytes_before_external_sort`). - -### LIMITAR POR Cláusula {#limit-by-clause} - -Una consulta con el `LIMIT n BY expressions` cláusula selecciona la primera `n` para cada valor distinto de `expressions`. La clave para `LIMIT BY` puede contener cualquier número de [expresiones](syntax.md#syntax-expressions). - -ClickHouse admite la siguiente sintaxis: - -- `LIMIT [offset_value, ]n BY expressions` -- `LIMIT n OFFSET offset_value BY expressions` - -Durante el procesamiento de consultas, ClickHouse selecciona los datos ordenados por clave de ordenación. La clave de ordenación se establece explícitamente utilizando un [ORDEN POR](#select-order-by) cláusula o implícitamente como una propiedad del motor de tablas. Entonces se aplica ClickHouse `LIMIT n BY expressions` y devuelve la primera `n` filas para cada combinación distinta de `expressions`. Si `OFFSET` se especifica, a continuación, para cada bloque de datos que pertenece a una combinación distinta de `expressions`, ClickHouse salta `offset_value` número de filas desde el principio del bloque y devuelve un máximo de `n` filas como resultado. Si `offset_value` es mayor que el número de filas en el bloque de datos, ClickHouse devuelve cero filas del bloque. - -`LIMIT BY` no está relacionado con `LIMIT`. Ambos se pueden usar en la misma consulta. - -**Ejemplos** - -Tabla de muestra: - -``` sql -CREATE TABLE limit_by(id Int, val Int) ENGINE = Memory; -INSERT INTO limit_by values(1, 10), (1, 11), (1, 12), (2, 20), (2, 21); -``` - -Consulta: - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 10 │ -│ 1 │ 11 │ -│ 2 │ 20 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 1, 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 11 │ -│ 1 │ 12 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -El `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` consulta devuelve el mismo resultado. - -La siguiente consulta devuelve las 5 referencias principales para cada `domain, device_type` par con un máximo de 100 filas en total (`LIMIT n BY + LIMIT`). - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - domainWithoutWWW(REFERRER_URL) AS referrer, - device_type, - count() cnt -FROM hits -GROUP BY domain, referrer, device_type -ORDER BY cnt DESC -LIMIT 5 BY domain, device_type -LIMIT 100 -``` - -### Cláusula HAVING {#having-clause} - -Permite filtrar el resultado recibido después de GROUP BY, similar a la cláusula WHERE. -WHERE y HAVING difieren en que WHERE se realiza antes de la agregación (GROUP BY), mientras que HAVING se realiza después de ella. -Si no se realiza la agregación, no se puede usar HAVING. - -### ORDEN POR CLÁUSULA {#select-order-by} - -La cláusula ORDER BY contiene una lista de expresiones, a las que se puede asignar DESC o ASC (la dirección de clasificación). Si no se especifica la dirección, se supone ASC. ASC se ordena en orden ascendente y DESC en orden descendente. La dirección de ordenación se aplica a una sola expresión, no a toda la lista. Ejemplo: `ORDER BY Visits DESC, SearchPhrase` - -Para ordenar por valores de cadena, puede especificar la intercalación (comparación). Ejemplo: `ORDER BY SearchPhrase COLLATE 'tr'` - para ordenar por palabra clave en orden ascendente, utilizando el alfabeto turco, insensible a mayúsculas y minúsculas, suponiendo que las cadenas están codificadas en UTF-8. COLLATE se puede especificar o no para cada expresión en ORDER BY de forma independiente. Si se especifica ASC o DESC, se especifica COLLATE después de él. Cuando se usa COLLATE, la clasificación siempre distingue entre mayúsculas y minúsculas. - -Solo recomendamos usar COLLATE para la clasificación final de un pequeño número de filas, ya que la clasificación con COLLATE es menos eficiente que la clasificación normal por bytes. - -Las filas que tienen valores idénticos para la lista de expresiones de clasificación se generan en un orden arbitrario, que también puede ser no determinista (diferente cada vez). -Si se omite la cláusula ORDER BY, el orden de las filas tampoco está definido y también puede ser no determinista. - -`NaN` y `NULL` orden de clasificación: - -- Con el modificador `NULLS FIRST` — Primero `NULL`, entonces `NaN`, luego otros valores. -- Con el modificador `NULLS LAST` — Primero los valores, luego `NaN`, entonces `NULL`. -- Predeterminado: lo mismo que con el `NULLS LAST` modificador. - -Ejemplo: - -Para la mesa - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 2 │ -│ 1 │ nan │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ nan │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Ejecute la consulta `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` conseguir: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 1 │ nan │ -│ 6 │ nan │ -│ 2 │ 2 │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Cuando se ordenan los números de coma flotante, los NaN están separados de los otros valores. Independientemente del orden de clasificación, los NaN vienen al final. En otras palabras, para la clasificación ascendente se colocan como si fueran más grandes que todos los demás números, mientras que para la clasificación descendente se colocan como si fueran más pequeños que el resto. - -Se usa menos RAM si se especifica un LIMIT lo suficientemente pequeño además de ORDER BY. De lo contrario, la cantidad de memoria gastada es proporcional al volumen de datos para clasificar. Para el procesamiento de consultas distribuidas, si se omite GROUP BY, la ordenación se realiza parcialmente en servidores remotos y los resultados se combinan en el servidor solicitante. Esto significa que para la ordenación distribuida, el volumen de datos a ordenar puede ser mayor que la cantidad de memoria en un único servidor. - -Si no hay suficiente RAM, es posible realizar la clasificación en la memoria externa (creando archivos temporales en un disco). Utilice el ajuste `max_bytes_before_external_sort` para este propósito. Si se establece en 0 (el valor predeterminado), la ordenación externa está deshabilitada. Si está habilitada, cuando el volumen de datos a ordenar alcanza el número especificado de bytes, los datos recopilados se ordenan y se vuelcan en un archivo temporal. Después de leer todos los datos, todos los archivos ordenados se fusionan y se generan los resultados. Los archivos se escriben en el directorio /var/lib/clickhouse/tmp/ en la configuración (de forma predeterminada, pero puede ‘tmp\_path’ parámetro para cambiar esta configuración). - -La ejecución de una consulta puede usar más memoria que ‘max\_bytes\_before\_external\_sort’. Por este motivo, esta configuración debe tener un valor significativamente menor que ‘max\_memory\_usage’. Como ejemplo, si su servidor tiene 128 GB de RAM y necesita ejecutar una sola consulta, establezca ‘max\_memory\_usage’ de hasta 100 GB, y ‘max\_bytes\_before\_external\_sort’ para 80 GB. - -La clasificación externa funciona con mucha menos eficacia que la clasificación en RAM. - -### SELECT Cláusula {#select-select} - -[Expresiones](syntax.md#syntax-expressions) especificado en el `SELECT` cláusula se calculan después de que todas las operaciones en las cláusulas descritas anteriormente hayan finalizado. Estas expresiones funcionan como si se aplicaran a filas separadas en el resultado. Si las expresiones en el `SELECT` cláusula contiene funciones agregadas, a continuación, ClickHouse procesa funciones agregadas y expresiones utilizadas como sus argumentos durante el [GRUPO POR](#select-group-by-clause) agregación. - -Si desea incluir todas las columnas en el resultado, use el asterisco (`*`) simbolo. Por ejemplo, `SELECT * FROM ...`. - -Para hacer coincidir algunas columnas en el resultado con un [Re2](https://en.wikipedia.org/wiki/RE2_(software)) expresión regular, puede utilizar el `COLUMNS` expresión. - -``` sql -COLUMNS('regexp') -``` - -Por ejemplo, considere la tabla: - -``` sql -CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog -``` - -La siguiente consulta selecciona datos de todas las columnas que contienen `a` símbolo en su nombre. - -``` sql -SELECT COLUMNS('a') FROM col_names -``` - -``` text -┌─aa─┬─ab─┐ -│ 1 │ 1 │ -└────┴────┘ -``` - -Las columnas seleccionadas no se devuelven en orden alfabético. - -Puede utilizar múltiples `COLUMNS` expresiones en una consulta y aplicarles funciones. - -Por ejemplo: - -``` sql -SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names -``` - -``` text -┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐ -│ 1 │ 1 │ 1 │ Int8 │ -└────┴────┴────┴────────────────┘ -``` - -Cada columna devuelta por el `COLUMNS` expresión se pasa a la función como un argumento separado. También puede pasar otros argumentos a la función si los admite. Tenga cuidado al usar funciones. Si una función no admite el número de argumentos que le ha pasado, ClickHouse produce una excepción. - -Por ejemplo: - -``` sql -SELECT COLUMNS('a') + COLUMNS('c') FROM col_names -``` - -``` text -Received exception from server (version 19.14.1): -Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. -``` - -En este ejemplo, `COLUMNS('a')` Todos los derechos reservados: `aa` y `ab`. `COLUMNS('c')` Nosotros `bc` columna. El `+` operador no puede aplicar a 3 argumentos, por lo que ClickHouse lanza una excepción con el mensaje relevante. - -Columnas que coinciden con el `COLUMNS` expresión puede tener diferentes tipos de datos. Si `COLUMNS` no coincide con ninguna columna y es la única expresión en `SELECT`, ClickHouse lanza una excepción. - -### Cláusula DISTINCT {#select-distinct} - -Si se especifica DISTINCT, sólo quedará una sola fila de todos los conjuntos de filas totalmente coincidentes en el resultado. -El resultado será el mismo que si GROUP BY se especificara en todos los campos especificados en SELECT sin funciones agregadas. Pero hay varias diferencias con GROUP BY: - -- DISTINCT se puede aplicar junto con GROUP BY. -- Cuando ORDER BY se omite y se define LIMIT, la consulta deja de ejecutarse inmediatamente después de leer el número necesario de filas diferentes. -- Los bloques de datos se generan a medida que se procesan, sin esperar a que finalice la ejecución de toda la consulta. - -DISTINCT no se admite si SELECT tiene al menos una columna de matriz. - -`DISTINCT` trabaja con [NULO](syntax.md) como si `NULL` Era un valor específico, y `NULL=NULL`. En otras palabras, en el `DISTINCT` resultados, diferentes combinaciones con `NULL` sólo ocurren una vez. - -ClickHouse admite el uso de `DISTINCT` y `ORDER BY` para diferentes columnas en una consulta. El `DISTINCT` cláusula se ejecuta antes de `ORDER BY` clausula. - -Tabla de ejemplo: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 1 │ 2 │ -│ 3 │ 3 │ -│ 2 │ 4 │ -└───┴───┘ -``` - -Al seleccionar datos con el `SELECT DISTINCT a FROM t1 ORDER BY b ASC` Consulta, obtenemos el siguiente resultado: - -``` text -┌─a─┐ -│ 2 │ -│ 1 │ -│ 3 │ -└───┘ -``` - -Si cambiamos la dirección de clasificación `SELECT DISTINCT a FROM t1 ORDER BY b DESC`, obtenemos el siguiente resultado: - -``` text -┌─a─┐ -│ 3 │ -│ 1 │ -│ 2 │ -└───┘ -``` - -Fila `2, 4` se cortó antes de clasificar. - -Tenga en cuenta esta especificidad de implementación al programar consultas. - -### Cláusula LIMIT {#limit-clause} - -`LIMIT m` permita seleccionar la primera `m` filas del resultado. - -`LIMIT n, m` permita seleccionar la primera `m` el resultado después de omitir la primera `n` películas. El `LIMIT m OFFSET n` sintaxis también es compatible. - -`n` y `m` deben ser enteros no negativos. - -Si no hay una `ORDER BY` cláusula que ordena explícitamente los resultados, el resultado puede ser arbitrario y no determinista. - -### UNION ALL Cláusula {#union-all-clause} - -Puede utilizar UNION ALL para combinar cualquier número de consultas. Ejemplo: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -Solo se admite UNION ALL. La UNIÓN regular (UNION DISTINCT) no es compatible. Si necesita UNION DISTINCT, puede escribir SELECT DISTINCT desde una subconsulta que contenga UNION ALL. - -Las consultas que forman parte de UNION ALL se pueden ejecutar simultáneamente y sus resultados se pueden mezclar. - -La estructura de los resultados (el número y el tipo de columnas) debe coincidir con las consultas. Pero los nombres de columna pueden diferir. En este caso, los nombres de columna para el resultado final se tomarán de la primera consulta. La fundición de tipo se realiza para uniones. Por ejemplo, si dos consultas que se combinan tienen el mismo campo-`Nullable` y `Nullable` tipos de un tipo compatible, el resultado `UNION ALL` tiene una `Nullable` campo de tipo. - -Las consultas que forman parte de UNION ALL no se pueden encerrar entre paréntesis. ORDER BY y LIMIT se aplican a consultas separadas, no al resultado final. Si necesita aplicar una conversión al resultado final, puede colocar todas las consultas con UNION ALL en una subconsulta en la cláusula FROM. - -### INTO OUTFILE Cláusula {#into-outfile-clause} - -Añadir el `INTO OUTFILE filename` cláusula (donde filename es un literal de cadena) para redirigir la salida de la consulta al archivo especificado. -A diferencia de MySQL, el archivo se crea en el lado del cliente. La consulta fallará si ya existe un archivo con el mismo nombre de archivo. -Esta funcionalidad está disponible en el cliente de línea de comandos y clickhouse-local (una consulta enviada a través de la interfaz HTTP fallará). - -El formato de salida predeterminado es TabSeparated (el mismo que en el modo de lote de cliente de línea de comandos). - -### FORMAT Cláusula {#format-clause} - -Especificar ‘FORMAT format’ para obtener datos en cualquier formato especificado. -Puede usar esto por conveniencia o para crear volcados. -Para obtener más información, consulte la sección «Formats». -Si se omite la cláusula FORMAT, se utiliza el formato predeterminado, que depende tanto de la configuración como de la interfaz utilizada para acceder a la base de datos. Para la interfaz HTTP y el cliente de línea de comandos en modo por lotes, el formato predeterminado es TabSeparated. Para el cliente de línea de comandos en modo interactivo, el formato predeterminado es PrettyCompact (tiene tablas atractivas y compactas). - -Cuando se utiliza el cliente de línea de comandos, los datos se pasan al cliente en un formato interno eficiente. El cliente interpreta independientemente la cláusula FORMAT de la consulta y da formato a los datos en sí (aliviando así la red y el servidor de la carga). - -### ES Operadores {#select-in-operators} - -El `IN`, `NOT IN`, `GLOBAL IN`, y `GLOBAL NOT IN` están cubiertos por separado, ya que su funcionalidad es bastante rica. - -El lado izquierdo del operador es una sola columna o una tupla. - -Ejemplos: - -``` sql -SELECT UserID IN (123, 456) FROM ... -SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... -``` - -Si el lado izquierdo es una sola columna que está en el índice, y el lado derecho es un conjunto de constantes, el sistema usa el índice para procesar la consulta. - -No enumere demasiados valores explícitamente (es decir, millones). Si un conjunto de datos es grande, colóquelo en una tabla temporal (por ejemplo, consulte la sección «External data for query processing»), luego use una subconsulta. - -El lado derecho del operador puede ser un conjunto de expresiones constantes, un conjunto de tuplas con expresiones constantes (mostradas en los ejemplos anteriores) o el nombre de una tabla de base de datos o subconsulta SELECT entre paréntesis. - -Si el lado derecho del operador es el nombre de una tabla (por ejemplo, `UserID IN users`), esto es equivalente a la subconsulta `UserID IN (SELECT * FROM users)`. Úselo cuando trabaje con datos externos que se envían junto con la consulta. Por ejemplo, la consulta se puede enviar junto con un conjunto de ID de usuario ‘users’ tabla temporal, que debe ser filtrada. - -Si el lado derecho del operador es un nombre de tabla que tiene el motor Set (un conjunto de datos preparado que siempre está en RAM), el conjunto de datos no se volverá a crear para cada consulta. - -La subconsulta puede especificar más de una columna para filtrar tuplas. -Ejemplo: - -``` sql -SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ... -``` - -Las columnas a la izquierda y a la derecha del operador IN deben tener el mismo tipo. - -El operador IN y la subconsulta pueden aparecer en cualquier parte de la consulta, incluidas las funciones agregadas y las funciones lambda. -Ejemplo: - -``` sql -SELECT - EventDate, - avg(UserID IN - ( - SELECT UserID - FROM test.hits - WHERE EventDate = toDate('2014-03-17') - )) AS ratio -FROM test.hits -GROUP BY EventDate -ORDER BY EventDate ASC -``` - -``` text -┌──EventDate─┬────ratio─┐ -│ 2014-03-17 │ 1 │ -│ 2014-03-18 │ 0.807696 │ -│ 2014-03-19 │ 0.755406 │ -│ 2014-03-20 │ 0.723218 │ -│ 2014-03-21 │ 0.697021 │ -│ 2014-03-22 │ 0.647851 │ -│ 2014-03-23 │ 0.648416 │ -└────────────┴──────────┘ -``` - -Para cada día después del 17 de marzo, cuente el porcentaje de páginas vistas realizadas por los usuarios que visitaron el sitio el 17 de marzo. -Una subconsulta en la cláusula IN siempre se ejecuta una sola vez en un único servidor. No hay subconsultas dependientes. - -#### Procesamiento NULL {#null-processing-1} - -Durante el procesamiento de la solicitud, el operador IN asume que el resultado de una operación [NULO](syntax.md) siempre es igual a `0`, independientemente de si `NULL` está en el lado derecho o izquierdo del operador. `NULL` Los valores no se incluyen en ningún conjunto de datos, no se corresponden entre sí y no se pueden comparar. - -Aquí hay un ejemplo con el `t_null` tabla: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -Ejecución de la consulta `SELECT x FROM t_null WHERE y IN (NULL,3)` da el siguiente resultado: - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -Se puede ver que la fila en la que `y = NULL` se expulsa de los resultados de la consulta. Esto se debe a que ClickHouse no puede decidir si `NULL` está incluido en el `(NULL,3)` conjunto, devuelve `0` como resultado de la operación, y `SELECT` excluye esta fila de la salida final. - -``` sql -SELECT y IN (NULL, 3) -FROM t_null -``` - -``` text -┌─in(y, tuple(NULL, 3))─┐ -│ 0 │ -│ 1 │ -└───────────────────────┘ -``` - -#### Subconsultas distribuidas {#select-distributed-subqueries} - -Hay dos opciones para IN-s con subconsultas (similar a JOINs): normal `IN` / `JOIN` y `GLOBAL IN` / `GLOBAL JOIN`. Se diferencian en cómo se ejecutan para el procesamiento de consultas distribuidas. - -!!! attention "Atención" - Recuerde que los algoritmos descritos a continuación pueden funcionar de manera diferente dependiendo de la [configuración](../operations/settings/settings.md) `distributed_product_mode` configuración. - -Cuando se utiliza el IN normal, la consulta se envía a servidores remotos, y cada uno de ellos ejecuta las subconsultas en el `IN` o `JOIN` clausula. - -Cuando se utiliza `GLOBAL IN` / `GLOBAL JOINs`, primero todas las subconsultas se ejecutan para `GLOBAL IN` / `GLOBAL JOINs`, y los resultados se recopilan en tablas temporales. A continuación, las tablas temporales se envían a cada servidor remoto, donde las consultas se ejecutan utilizando estos datos temporales. - -Para una consulta no distribuida, utilice el `IN` / `JOIN`. - -Tenga cuidado al usar subconsultas en el `IN` / `JOIN` para el procesamiento de consultas distribuidas. - -Veamos algunos ejemplos. Supongamos que cada servidor del clúster tiene un **local\_table**. Cada servidor también tiene un **distributed\_table** mesa con el **Distribuido** tipo, que mira todos los servidores del clúster. - -Para una consulta al **distributed\_table**, la consulta se enviará a todos los servidores remotos y se ejecutará en ellos usando el **local\_table**. - -Por ejemplo, la consulta - -``` sql -SELECT uniq(UserID) FROM distributed_table -``` - -se enviará a todos los servidores remotos como - -``` sql -SELECT uniq(UserID) FROM local_table -``` - -y ejecutar en cada uno de ellos en paralelo, hasta que llegue a la etapa donde se pueden combinar resultados intermedios. Luego, los resultados intermedios se devolverán al servidor solicitante y se fusionarán en él, y el resultado final se enviará al cliente. - -Ahora vamos a examinar una consulta con IN: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -- Cálculo de la intersección de audiencias de dos sitios. - -Esta consulta se enviará a todos los servidores remotos como - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -En otras palabras, los datos establecidos en la cláusula IN se recopilarán en cada servidor de forma independiente, solo a través de los datos que se almacenan localmente en cada uno de los servidores. - -Esto funcionará correctamente y de manera óptima si está preparado para este caso y ha distribuido datos en los servidores de clúster de modo que los datos de un único ID de usuario residen completamente en un único servidor. En este caso, todos los datos necesarios estarán disponibles localmente en cada servidor. De lo contrario, el resultado será inexacto. Nos referimos a esta variación de la consulta como «local IN». - -Para corregir cómo funciona la consulta cuando los datos se distribuyen aleatoriamente entre los servidores de clúster, puede especificar **distributed\_table** dentro de una subconsulta. La consulta se vería así: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -Esta consulta se enviará a todos los servidores remotos como - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -La subconsulta comenzará a ejecutarse en cada servidor remoto. Dado que la subconsulta utiliza una tabla distribuida, la subconsulta que se encuentra en cada servidor remoto se reenviará a cada servidor remoto como - -``` sql -SELECT UserID FROM local_table WHERE CounterID = 34 -``` - -Por ejemplo, si tiene un clúster de 100 servidores, la ejecución de toda la consulta requerirá 10.000 solicitudes elementales, lo que generalmente se considera inaceptable. - -En tales casos, siempre debe usar GLOBAL IN en lugar de IN. Veamos cómo funciona para la consulta - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -El servidor del solicitante ejecutará la subconsulta - -``` sql -SELECT UserID FROM distributed_table WHERE CounterID = 34 -``` - -y el resultado se colocará en una tabla temporal en la RAM. A continuación, la solicitud se enviará a cada servidor remoto como - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1 -``` - -y la tabla temporal `_data1` se enviará a cada servidor remoto con la consulta (el nombre de la tabla temporal está definido por la implementación). - -Esto es más óptimo que usar el IN normal. Sin embargo, tenga en cuenta los siguientes puntos: - -1. Al crear una tabla temporal, los datos no se hacen únicos. Para reducir el volumen de datos transmitidos a través de la red, especifique DISTINCT en la subconsulta. (No necesita hacer esto para un IN normal.) -2. La tabla temporal se enviará a todos los servidores remotos. La transmisión no tiene en cuenta la topología de red. Por ejemplo, si 10 servidores remotos residen en un centro de datos que es muy remoto en relación con el servidor solicitante, los datos se enviarán 10 veces a través del canal al centro de datos remoto. Intente evitar grandes conjuntos de datos cuando use GLOBAL IN. -3. Al transmitir datos a servidores remotos, las restricciones en el ancho de banda de la red no son configurables. Puede sobrecargar la red. -4. Intente distribuir datos entre servidores para que no necesite usar GLOBAL IN de forma regular. -5. Si necesita utilizar GLOBAL IN con frecuencia, planifique la ubicación del clúster ClickHouse para que un único grupo de réplicas resida en no más de un centro de datos con una red rápida entre ellos, de modo que una consulta se pueda procesar completamente dentro de un único centro de datos. - -También tiene sentido especificar una tabla local en el `GLOBAL IN` cláusula, en caso de que esta tabla local solo esté disponible en el servidor solicitante y desee usar datos de ella en servidores remotos. - -### Valores extremos {#extreme-values} - -Además de los resultados, también puede obtener valores mínimos y máximos para las columnas de resultados. Para hacer esto, establezca el **extremo** a 1. Los mínimos y máximos se calculan para tipos numéricos, fechas y fechas con horas. Para otras columnas, se generan los valores predeterminados. - -Se calculan dos filas adicionales: los mínimos y los máximos, respectivamente. Estas dos filas adicionales se generan en `JSON*`, `TabSeparated*`, y `Pretty*` [Formato](../interfaces/formats.md), separado de las otras filas. No se emiten para otros formatos. - -En `JSON*` los valores extremos se emiten en un formato separado. ‘extremes’ campo. En `TabSeparated*` , la fila viene después del resultado principal, y después de ‘totals’ si está presente. Está precedido por una fila vacía (después de los otros datos). En `Pretty*` formatea, la fila se muestra como una tabla separada después del resultado principal, y después de `totals` si está presente. - -Los valores extremos se calculan para las filas anteriores `LIMIT`, pero después `LIMIT BY`. Sin embargo, cuando se usa `LIMIT offset, size`, las filas antes `offset` están incluidos en `extremes`. En las solicitudes de secuencia, el resultado también puede incluir un pequeño número de filas que pasaron por `LIMIT`. - -### Nota {#notes} - -El `GROUP BY` y `ORDER BY` las cláusulas no admiten argumentos posicionales. Esto contradice MySQL, pero se ajusta al SQL estándar. -Por ejemplo, `GROUP BY 1, 2` se interpretará como agrupación por constantes (es decir, agregación de todas las filas en una). - -Puedes usar sinónimos (`AS` Cualquier parte de una consulta. - -Puede poner un asterisco en cualquier parte de una consulta en lugar de una expresión. Cuando se analiza la consulta, el asterisco se expande a una lista de todas las columnas de la tabla `MATERIALIZED` y `ALIAS` columna). Solo hay unos pocos casos en los que se justifica el uso de un asterisco: - -- Al crear un volcado de tabla. -- Para tablas que contienen solo unas pocas columnas, como las tablas del sistema. -- Para obtener información sobre qué columnas están en una tabla. En este caso, establezca `LIMIT 1`. Pero es mejor usar el `DESC TABLE` consulta. -- Cuando hay una filtración fuerte en un pequeño número de columnas usando `PREWHERE`. -- En subconsultas (ya que las columnas que no son necesarias para la consulta externa se excluyen de las subconsultas). - -En todos los demás casos, no recomendamos usar el asterisco, ya que solo le da los inconvenientes de un DBMS columnar en lugar de las ventajas. En otras palabras, no se recomienda usar el asterisco. - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/select/) diff --git a/docs/es/query_language/system.md b/docs/es/query_language/system.md deleted file mode 100644 index bd30ef013ed..00000000000 --- a/docs/es/query_language/system.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -machine_translated: true ---- - -# Consultas del sistema {#query-language-system} - -- [Cargar DICCIONARIOS](#query_language-system-reload-dictionaries) -- [Cargar DICCIONARIO](#query_language-system-reload-dictionary) -- [CATEGORÍA](#query_language-system-drop-dns-cache) -- [CACHÉ DE LA MARCA DE LA GOTA](#query_language-system-drop-mark-cache) -- [REGISTROS DE FLUSH](#query_language-system-flush_logs) -- [CONFIGURACIÓN DE Carga](#query_language-system-reload-config) -- [APAGADO](#query_language-system-shutdown) -- [MATAR](#query_language-system-kill) -- [PARADA DE SENTIDOS DISTRIBUIDOS](#query_language-system-stop-distributed-sends) -- [FLUSH DISTRIBUIDO](#query_language-system-flush-distributed) -- [COMIENZAR SENTIDOS DISTRIBUIDOS](#query_language-system-start-distributed-sends) -- [PARADA DE FUSIONES](#query_language-system-stop-merges) -- [COMIENZAR FUSIONES](#query_language-system-start-merges) - -## Cargar DICCIONARIOS {#query_language-system-reload-dictionaries} - -Vuelve a cargar todos los diccionarios que se han cargado correctamente antes. -De forma predeterminada, los diccionarios se cargan perezosamente (ver [Diccionarios\_lazy\_load](../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load)), por lo que en lugar de cargarse automáticamente al inicio, se inicializan en el primer acceso a través de la función dictGet o SELECT desde tablas con ENGINE = Dictionary . El `SYSTEM RELOAD DICTIONARIES` Consulta vuelve a cargar dichos diccionarios (LOADED). -Siempre vuelve `Ok.` independientemente del resultado de la actualización del diccionario. - -## RELOAD DICTIONARY dictionary\_name {#query_language-system-reload-dictionary} - -Recarga completamente un diccionario `dictionary_name`, independientemente del estado del diccionario (LOADED / NOT\_LOADED / FAILED). -Siempre vuelve `Ok.` independientemente del resultado de la actualización del diccionario. -El estado del diccionario se puede comprobar consultando el `system.dictionaries` tabla. - -``` sql -SELECT name, status FROM system.dictionaries; -``` - -## CATEGORÍA {#query_language-system-drop-dns-cache} - -Restablece la caché DNS interna de ClickHouse. A veces (para versiones anteriores de ClickHouse) es necesario usar este comando al cambiar la infraestructura (cambiar la dirección IP de otro servidor de ClickHouse o el servidor utilizado por los diccionarios). - -Para obtener una administración de caché más conveniente (automática), consulte disable\_internal\_dns\_cache, dns\_cache\_update\_period parameters. - -## CACHÉ DE LA MARCA DE LA GOTA {#query_language-system-drop-mark-cache} - -Restablece la caché de marcas. Utilizado en el desarrollo de ClickHouse y pruebas de rendimiento. - -## REGISTROS DE FLUSH {#query_language-system-flush_logs} - -Vuelca los búferes de los mensajes de registro a las tablas del sistema (por ejemplo, el sistema.query\_log). Le permite no esperar 7,5 segundos al depurar. - -## CONFIGURACIÓN DE Carga {#query_language-system-reload-config} - -Vuelve a cargar la configuración de ClickHouse. Se usa cuando la configuración se almacena en ZooKeeeper. - -## APAGADO {#query_language-system-shutdown} - -Normalmente se apaga ClickHouse (como `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) - -## MATAR {#query_language-system-kill} - -Anula el proceso de ClickHouse (como `kill -9 {$ pid_clickhouse-server}`) - -## Administración de tablas distribuidas {#query-language-system-distributed} - -ClickHouse puede administrar [distribuido](../operations/table_engines/distributed.md) tabla. Cuando un usuario inserta datos en estas tablas, ClickHouse primero crea una cola de los datos que se deben enviar a los nodos del clúster y, a continuación, los envía de forma asincrónica. Puede administrar el procesamiento de colas con el [PARADA DE SENTIDOS DISTRIBUIDOS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUIDO](#query_language-system-flush-distributed), y [COMIENZAR SENTIDOS DISTRIBUIDOS](#query_language-system-start-distributed-sends) consulta. También puede insertar sincrónicamente datos distribuidos con el `insert_distributed_sync` configuración. - -### PARADA DE SENTIDOS DISTRIBUIDOS {#query_language-system-stop-distributed-sends} - -Deshabilita la distribución de datos en segundo plano al insertar datos en tablas distribuidas. - -``` sql -SYSTEM STOP DISTRIBUTED SENDS [db.] -``` - -### FLUSH DISTRIBUIDO {#query_language-system-flush-distributed} - -Obliga a ClickHouse a enviar datos a nodos de clúster de forma sincrónica. Si algún nodo no está disponible, ClickHouse produce una excepción y detiene la ejecución de la consulta. Puede volver a intentar la consulta hasta que tenga éxito, lo que sucederá cuando todos los nodos estén nuevamente en línea. - -``` sql -SYSTEM FLUSH DISTRIBUTED [db.] -``` - -### COMIENZAR SENTIDOS DISTRIBUIDOS {#query_language-system-start-distributed-sends} - -Habilita la distribución de datos en segundo plano al insertar datos en tablas distribuidas. - -``` sql -SYSTEM START DISTRIBUTED SENDS [db.] -``` - -### PARADA DE FUSIONES {#query_language-system-stop-merges} - -Proporciona la posibilidad de detener las fusiones en segundo plano para las tablas de la familia MergeTree: - -``` sql -SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] -``` - -!!! note "Nota" - `DETACH / ATTACH` la tabla comenzará las fusiones de fondo para la tabla, incluso en caso de que las fusiones se hayan detenido para todas las tablas MergeTree antes. - -### COMIENZAR FUSIONES {#query_language-system-start-merges} - -Proporciona la posibilidad de iniciar fusiones en segundo plano para tablas de la familia MergeTree: - -``` sql -SYSTEM START MERGES [[db.]merge_tree_family_table_name] -``` - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/system/) diff --git a/docs/es/query_language/table_functions/index.md b/docs/es/query_language/table_functions/index.md deleted file mode 100644 index 0f249638a0b..00000000000 --- a/docs/es/query_language/table_functions/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -machine_translated: true ---- - -# Funciones de tabla {#table-functions} - -Las funciones de tabla son métodos para construir tablas. - -Puede usar funciones de tabla en: - -- [DE](../select.md#select-from) cláusula de la `SELECT` consulta. - - The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. - -- [Crear TABLA COMO \](../create.md#create-table-query) consulta. - - It's one of the methods of creating a table. - -!!! warning "Advertencia" - No puede utilizar funciones de tabla si [Método de codificación de datos:](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) la configuración está deshabilitada. - -| Función | Descripción | -|----------------------|-----------------------------------------------------------------------------------------------------------------------------------| -| [file](file.md) | Crea un [File](../../operations/table_engines/file.md)-mesa del motor. | -| [Fusionar](merge.md) | Crea un [Fusionar](../../operations/table_engines/merge.md)-mesa del motor. | -| [número](numbers.md) | Crea una tabla con una sola columna llena de números enteros. | -| [remoto](remote.md) | Le permite acceder a servidores remotos sin crear un [Distribuido](../../operations/table_engines/distributed.md)-mesa del motor. | -| [URL](url.md) | Crea un [URL](../../operations/table_engines/url.md)-mesa del motor. | -| [mysql](mysql.md) | Crea un [MySQL](../../operations/table_engines/mysql.md)-mesa del motor. | -| [jdbc](jdbc.md) | Crea un [JDBC](../../operations/table_engines/jdbc.md)-mesa del motor. | -| [Nosotros](odbc.md) | Crea un [ODBC](../../operations/table_engines/odbc.md)-mesa del motor. | -| [Hdfs](hdfs.md) | Crea un [HDFS](../../operations/table_engines/hdfs.md)-mesa del motor. | - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/) diff --git a/docs/es/query_language/table_functions/merge.md b/docs/es/query_language/table_functions/merge.md deleted file mode 100644 index 268ad0a84e0..00000000000 --- a/docs/es/query_language/table_functions/merge.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -machine_translated: true ---- - -# Fusionar {#merge} - -`merge(db_name, 'tables_regexp')` – Crea una tabla de combinación temporal. Para obtener más información, consulte la sección “Table engines, Merge”. - -La estructura de la tabla se toma de la primera tabla encontrada que coincide con la expresión regular. - -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/merge/) diff --git a/docs/es/query_language/agg_functions/combinators.md b/docs/es/sql_reference/aggregate_functions/combinators.md similarity index 72% rename from docs/es/query_language/agg_functions/combinators.md rename to docs/es/sql_reference/aggregate_functions/combinators.md index 3abe9eb0dcb..fb1ed37d427 100644 --- a/docs/es/query_language/agg_functions/combinators.md +++ b/docs/es/sql_reference/aggregate_functions/combinators.md @@ -1,14 +1,17 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: Combinadores de funciones agregadas --- # Combinadores de funciones agregadas {#aggregate_functions_combinators} El nombre de una función agregada puede tener un sufijo anexado. Esto cambia la forma en que funciona la función de agregado. -## Nuestros servicios {#agg-functions-combinator-if} +## -Si {#agg-functions-combinator-if} -El sufijo -If se puede anexar al nombre de cualquier función agregada. En este caso, la función de agregado acepta un argumento adicional: una condición (tipo Uint8). La función de agregado procesa solo las filas que desencadenan la condición. Si la condición no se desencadenó ni una sola vez, devuelve un valor predeterminado (normalmente ceros o cadenas vacías). +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). Ejemplos: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` y así sucesivamente. @@ -20,19 +23,19 @@ El sufijo -Array se puede agregar a cualquier función agregada. En este caso, l Ejemplo 1: `sumArray(arr)` - Totales de todos los elementos de todos ‘arr’ matriz. En este ejemplo, podría haber sido escrito más simplemente: `sum(arraySum(arr))`. -Ejemplo 2: `uniqArray(arr)` – Cuenta el número de elementos únicos ‘arr’ matriz. Esto podría hacerse de una manera más fácil: `uniq(arrayJoin(arr))` pero no siempre es posible añadir ‘arrayJoin’ a una consulta. +Ejemplo 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ matriz. Esto podría hacerse de una manera más fácil: `uniq(arrayJoin(arr))`, pero no siempre es posible agregar ‘arrayJoin’ a una consulta. --Si y -Array se pueden combinar. Obstante, ‘Array’ debe venir primero, entonces ‘If’. Ejemplos: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Debido a este pedido, el ‘cond’ el argumento no será una matriz. +-If y -Array se pueden combinar. Obstante, ‘Array’ debe venir primero, entonces ‘If’. Ejemplos: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Debido a este pedido, el ‘cond’ argumento no será una matriz. ## -Estado {#agg-functions-combinator-state} -Si aplica este combinador, la función de agregado no devuelve el valor resultante (como el número de valores únicos [uniq](reference.md#agg_function-uniq) función), pero un estado intermedio de la agregación (para `uniq`, esta es la tabla hash para calcular el número de valores únicos). Este es un `AggregateFunction(...)` que puede ser utilizado para su posterior procesamiento o almacenado en una tabla para terminar de agregar más tarde. +Si aplica este combinador, la función de agregado no devuelve el valor resultante (como el número de valores únicos para el [uniq](reference.md#agg_function-uniq) función), pero un estado intermedio de la agregación (para `uniq`, esta es la tabla hash para calcular el número de valores únicos). Este es un `AggregateFunction(...)` que puede ser utilizado para su posterior procesamiento o almacenado en una tabla para terminar de agregar más tarde. Para trabajar con estos estados, use: -- [AgregaciónMergeTree](../../operations/table_engines/aggregatingmergetree.md) motor de mesa. -- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) función. -- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) función. +- [AgregaciónMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) motor de mesa. +- [finalizeAggregation](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) función. +- [runningAccumulate](../../sql_reference/functions/other_functions.md#function-runningaccumulate) función. - [-Fusionar](#aggregate_functions_combinators_merge) combinador. - [-MergeState](#aggregate_functions_combinators_mergestate) combinador. @@ -102,10 +105,10 @@ Permite dividir los datos en grupos y, a continuación, agregar por separado los **Parámetros** -- `start` — Valor inicial de todo el intervalo requerido para `resampling_key` valor. -- `stop` — valor final de todo el intervalo requerido para `resampling_key` valor. Todo el intervalo no incluye el `stop` valor `[start, stop)`. -- `step` — Paso para separar todo el intervalo en subintervalos. El `aggFunction` se ejecuta sobre cada uno de esos subintervalos de forma independiente. -- `resampling_key` — Columna cuyos valores se utilizan para separar los datos en intervalos. +- `start` — Starting value of the whole required interval for `resampling_key` valor. +- `stop` — Ending value of the whole required interval for `resampling_key` valor. Todo el intervalo no incluye el `stop` valor `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` se ejecuta sobre cada uno de esos subintervalos de forma independiente. +- `resampling_key` — Column whose values are used for separating data into intervals. - `aggFunction_params` — `aggFunction` parámetros. **Valores devueltos** @@ -127,7 +130,7 @@ Considere el `people` con los siguientes datos: └────────┴─────┴──────┘ ``` -Vamos a obtener los nombres de las personas cuya edad se encuentra en los intervalos de `[30,60)` y `[60,75)`. Como usamos la representación entera para la edad, obtenemos edades en el `[30, 59]` y `[60,74]` intervalo. +Obtengamos los nombres de las personas cuya edad se encuentra en los intervalos de `[30,60)` y `[60,75)`. Como usamos la representación entera para la edad, obtenemos edades en el `[30, 59]` y `[60,74]` intervalo. Para agregar nombres en una matriz, usamos el [Método de codificación de datos:](reference.md#agg_function-grouparray) función de agregado. Se necesita un argumento. En nuestro caso, es el `name` columna. El `groupArrayResample` función debe utilizar el `age` columna para agregar nombres por edad. Para definir los intervalos requeridos, pasamos el `30, 75, 30` discusiones sobre el `groupArrayResample` función. @@ -143,7 +146,7 @@ SELECT groupArrayResample(30, 75, 30)(name, age) FROM people Considera los resultados. -`Jonh` está fuera de la muestra porque es demasiado joven. Otras personas se distribuyen de acuerdo con los intervalos de edad especificados. +`Jonh` est? fuera de la muestra porque es demasiado joven. Otras personas se distribuyen de acuerdo con los intervalos de edad especificados. Ahora vamos a contar el número total de personas y su salario promedio en los intervalos de edad especificados. @@ -160,4 +163,4 @@ FROM people └────────┴───────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/agg_functions/combinators/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/es/query_language/agg_functions/index.md b/docs/es/sql_reference/aggregate_functions/index.md similarity index 80% rename from docs/es/query_language/agg_functions/index.md rename to docs/es/sql_reference/aggregate_functions/index.md index 297cefd8d2d..6075dddb3e3 100644 --- a/docs/es/query_language/agg_functions/index.md +++ b/docs/es/sql_reference/aggregate_functions/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Aggregate Functions +toc_priority: 33 +toc_title: "Implantaci\xF3n" --- # Funciones agregadas {#aggregate-functions} @@ -29,17 +33,17 @@ Considere esta tabla: └───┴──────┘ ``` -Supongamos que necesita sumar los valores en el `y` columna: +Digamos que necesita sumar los valores en el `y` columna: ``` sql SELECT sum(y) FROM t_null_big ``` -Método de codificación de datos: -│ 7 │ -¿Qué puedes encontrar en Neodigit + ┌─sum(y)─┐ + │ 7 │ + └────────┘ -El `sum` función interpreta `NULL` como `0`. En particular, esto significa que si la función recibe la entrada de una selección donde todos los valores son `NULL`, entonces el resultado será `0`Nuestra `NULL`. +El `sum` función interpreta `NULL` como `0`. En particular, esto significa que si la función recibe la entrada de una selección donde todos los valores son `NULL`, entonces el resultado será `0`, ni `NULL`. Ahora puedes usar el `groupArray` función para crear una matriz a partir de la `y` columna: @@ -55,4 +59,4 @@ SELECT groupArray(y) FROM t_null_big `groupArray` no incluye `NULL` en la matriz resultante. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/agg_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/es/query_language/agg_functions/parametric_functions.md b/docs/es/sql_reference/aggregate_functions/parametric_functions.md similarity index 76% rename from docs/es/query_language/agg_functions/parametric_functions.md rename to docs/es/sql_reference/aggregate_functions/parametric_functions.md index 70703657548..f51168d1087 100644 --- a/docs/es/query_language/agg_functions/parametric_functions.md +++ b/docs/es/sql_reference/aggregate_functions/parametric_functions.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: "Funciones agregadas param\xE9tricas" --- # Funciones agregadas paramétricas {#aggregate_functions_parametric} -Algunas funciones agregadas pueden aceptar no solo columnas de argumentos (utilizadas para la compresión), sino un conjunto de parámetros: constantes para la inicialización. La sintaxis es de dos pares de corchetes en lugar de uno. El primero es para parámetros, y el segundo es para argumentos. +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. ## histograma {#histogram} @@ -18,18 +21,20 @@ Las funciones utiliza [Un algoritmo de árbol de decisión paralelo de transmisi **Parámetros** -`number_of_bins` — Límite superior para el número de ubicaciones en el histograma. La función calcula automáticamente el número de contenedores. Intenta alcanzar el número especificado de ubicaciones, pero si falla, utiliza menos ubicaciones. +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. `values` — [Expresion](../syntax.md#syntax-expressions) resultando en valores de entrada. **Valores devueltos** -- [Matriz](../../data_types/array.md) de [Tuples](../../data_types/tuple.md) del siguiente formato: +- [Matriz](../../sql_reference/data_types/array.md) de [Tuples](../../sql_reference/data_types/tuple.md) del siguiente formato: - [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` - - `lower` — Límite inferior del contenedor. - - `upper` — Límite superior del contenedor. - - `height` — Altura calculada del contenedor. + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. **Ejemplo** @@ -48,7 +53,7 @@ FROM ( └─────────────────────────────────────────────────────────────────────────┘ ``` -Puede visualizar un histograma con el [Bar](../functions/other_functions.md#function-bar) función, por ejemplo: +Puede visualizar un histograma con el [Bar](../../sql_reference/functions/other_functions.md#function-bar) función, por ejemplo: ``` sql WITH histogram(5)(rand() % 100) AS hist @@ -75,7 +80,7 @@ FROM En este caso, debe recordar que no conoce los bordes del contenedor del histograma. -## Por ejemplo, esta función es la siguiente:, …) {#function-sequencematch} +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} Comprueba si la secuencia contiene una cadena de eventos que coincida con el patrón. @@ -88,11 +93,11 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...) **Parámetros** -- `pattern` — Patrón de cadena. Ver [Sintaxis de patrón](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [Sintaxis de patrón](#sequence-function-pattern-syntax). -- `timestamp` — Columna que se considera que contiene datos de tiempo. Los tipos de datos típicos son `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../data_types/int_uint.md) tipos de datos. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../sql_reference/data_types/int_uint.md) tipos de datos. -- `cond1`, `cond2` — Condiciones que describen la cadena de eventos. Tipo de datos: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los salta. +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los omite. **Valores devueltos** @@ -104,11 +109,11 @@ Tipo: `UInt8`. **Sintaxis de patrón** -- `(?N)` — Hace coincidir el argumento de condición en la posición `N`. Las condiciones están numeradas en el `[1, 32]` Gama. Por ejemplo, `(?1)` coincide con el argumento pasado al `cond1` parámetro. +- `(?N)` — Matches the condition argument at position `N`. Las condiciones están numeradas en el `[1, 32]` gama. Por ejemplo, `(?1)` coincide con el argumento pasado al `cond1` parámetro. -- `.*` — Coincide con cualquier número de eventos. No necesita argumentos condicionales para hacer coincidir este elemento del patrón. +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. -- `(?t operator value)` — Establece el tiempo en segundos que debe separar dos eventos. Por ejemplo, patrón `(?1)(?t>1800)(?2)` coincide con los eventos que ocurren a más de 1800 segundos el uno del otro. Un número arbitrario de cualquier evento puede estar entre estos eventos. Puede usar el `>=`, `>`, `<`, `<=` operador. +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` coincide con los eventos que ocurren a más de 1800 segundos el uno del otro. Un número arbitrario de cualquier evento puede estar entre estos eventos. Puede usar el `>=`, `>`, `<`, `<=` operador. **Ejemplos** @@ -146,7 +151,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM └──────────────────────────────────────────────────────────────────────────────────────────┘ ``` -En este caso, la función no pudo encontrar la cadena de eventos que coincidiera con el patrón, porque el evento para el número 3 ocurrió entre 1 y 2. Si en el mismo caso comprobamos la condición para el número 4, la secuencia coincidiría con el patrón. +En este caso, la función no pudo encontrar la cadena de eventos que coincida con el patrón, porque el evento para el número 3 ocurrió entre 1 y 2. Si en el mismo caso comprobamos la condición para el número 4, la secuencia coincidiría con el patrón. ``` sql SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t @@ -162,7 +167,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM - [sequenceCount](#function-sequencecount) -## Por ejemplo, una secuencia de tiempo, …) {#function-sequencecount} +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} Cuenta el número de cadenas de eventos que coinciden con el patrón. La función busca cadenas de eventos que no se superponen. Comienza a buscar la siguiente cadena después de que se haga coincidir la cadena actual. @@ -175,11 +180,11 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...) **Parámetros** -- `pattern` — Patrón de cadena. Ver [Sintaxis de patrón](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [Sintaxis de patrón](#sequence-function-pattern-syntax). -- `timestamp` — Columna que se considera que contiene datos de tiempo. Los tipos de datos típicos son `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../data_types/int_uint.md) tipos de datos. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` y `DateTime`. También puede utilizar cualquiera de los [UInt](../../sql_reference/data_types/int_uint.md) tipos de datos. -- `cond1`, `cond2` — Condiciones que describen la cadena de eventos. Tipo de datos: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los salta. +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Puede pasar hasta 32 argumentos de condición. La función sólo tiene en cuenta los eventos descritos en estas condiciones. Si la secuencia contiene datos que no se describen en una condición, la función los omite. **Valores devueltos** @@ -238,11 +243,11 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Parámetros** -- `window` — Longitud de la ventana corredera en segundos. +- `window` — Length of the sliding window in seconds. - `mode` - Es un argumento opcional. - `'strict'` - Cuando el `'strict'` se establece, windowFunnel() aplica condiciones solo para los valores únicos. -- `timestamp` — Nombre de la columna que contiene la marca de tiempo. Tipos de datos admitidos: [Fecha](../../data_types/date.md), [FechaHora](../../data_types/datetime.md#data_type-datetime) y otros tipos de enteros sin signo (tenga en cuenta que aunque timestamp admite el `UInt64` tipo, su valor no puede exceder el máximo Int64, que es 2 ^ 63 - 1). -- `cond` — Condiciones o datos que describan la cadena de eventos. [UInt8](../../data_types/int_uint.md). +- `timestamp` — Name of the column containing the timestamp. Data types supported: [Fecha](../../sql_reference/data_types/date.md), [FechaHora](../../sql_reference/data_types/datetime.md#data_type-datetime) y otros tipos de enteros sin signo (tenga en cuenta que aunque timestamp admite el `UInt64` tipo, su valor no puede exceder el máximo de Int64, que es 2 ^ 63 - 1). +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). **Valor devuelto** @@ -311,7 +316,7 @@ Resultado: ## retención {#retention} La función toma como argumentos un conjunto de condiciones de 1 a 32 argumentos de tipo `UInt8` que indican si se cumplió una determinada condición para el evento. -Cualquier condición se puede especificar como un argumento (como en [DONDE](../../query_language/select.md#select-where)). +Cualquier condición se puede especificar como un argumento (como en [WHERE](../../sql_reference/statements/select.md#select-where)). Las condiciones, excepto la primera, se aplican en pares: el resultado del segundo será verdadero si el primero y el segundo son verdaderos, del tercero si el primero y el fird son verdaderos, etc. @@ -323,22 +328,22 @@ retention(cond1, cond2, ..., cond32); **Parámetros** -- `cond` — una expresión que devuelve un `UInt8` resultado (1 o 0). +- `cond` — an expression that returns a `UInt8` resultado (1 o 0). **Valor devuelto** La matriz de 1 o 0. -- 1 — se cumplió la condición para el evento. -- 0 - condición no se cumplió para el evento. +- 1 — condition was met for the event. +- 0 — condition wasn't met for the event. Tipo: `UInt8`. **Ejemplo** -Consideremos un ejemplo de cálculo de la `retention` función para determinar el tráfico del sitio. +Consideremos un ejemplo de cálculo del `retention` función para determinar el tráfico del sitio. -**1.** Сrear una tabla para ilustrar un ejemplo. +**1.** Сreate a table to illustrate an example. ``` sql CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; @@ -467,9 +472,9 @@ Donde: - `r2`- el número de visitantes únicos que visitaron el sitio durante un período de tiempo específico entre 2020-01-01 y 2020-01-02 (`cond1` y `cond2` condición). - `r3`- el número de visitantes únicos que visitaron el sitio durante un período de tiempo específico entre 2020-01-01 y 2020-01-03 (`cond1` y `cond3` condición). -## UniqUpTo (N) (x) {#uniquptonx} +## UniqUpTo(N)(x) {#uniquptonx} -Calcula el número de diferentes valores de argumento si es menor o igual a N. Si el número de diferentes valores de argumento es mayor que N, devuelve N + 1. +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. Recomendado para usar con Ns pequeños, hasta 10. El valor máximo de N es 100. @@ -487,8 +492,8 @@ Problem: Generate a report that shows only keywords that produced at least 5 uni Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/agg_functions/parametric_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) -## Por ejemplo, en el caso de que el usuario no tenga ningún problema.) {#summapfilteredkeys-to-keepkeys-values} +## sumMapFiltered(keys\_to\_keep)(claves, valores) {#summapfilteredkeys-to-keepkeys-values} El mismo comportamiento que [sumMap](reference.md#agg_functions-summap) excepto que una matriz de claves se pasa como un parámetro. Esto puede ser especialmente útil cuando se trabaja con una alta cardinalidad de claves. diff --git a/docs/es/query_language/agg_functions/reference.md b/docs/es/sql_reference/aggregate_functions/reference.md similarity index 92% rename from docs/es/query_language/agg_functions/reference.md rename to docs/es/sql_reference/aggregate_functions/reference.md index 467d50ae93f..1688cbe0160 100644 --- a/docs/es/query_language/agg_functions/reference.md +++ b/docs/es/sql_reference/aggregate_functions/reference.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: Referencia --- # Referencia de la función {#function-reference} @@ -16,15 +19,15 @@ ClickHouse admite las siguientes sintaxis para `count`: La función puede tomar: -- Cero parámetros. +- Cero de los parámetros. - Una [expresion](../syntax.md#syntax-expressions). **Valor devuelto** - Si se llama a la función sin parámetros, cuenta el número de filas. -- Si el [expresion](../syntax.md#syntax-expressions) se pasa, entonces la función cuenta cuántas veces esta expresión devuelve no nula. Si la expresión devuelve un [NULL](../../data_types/nullable.md)-type valor, entonces el resultado de `count` no se queda `Nullable`. La función devuelve 0 si la expresión devuelta `NULL` para todas las filas. +- Si el [expresion](../syntax.md#syntax-expressions) se pasa, entonces la función cuenta cuántas veces esta expresión devuelve no nula. Si la expresión devuelve un [NULL](../../sql_reference/data_types/nullable.md)-type valor, entonces el resultado de `count` no se queda `Nullable`. La función devuelve 0 si la expresión devuelta `NULL` para todas las filas. -En ambos casos el tipo del valor devuelto es [UInt64](../../data_types/int_uint.md). +En ambos casos el tipo del valor devuelto es [UInt64](../../sql_reference/data_types/int_uint.md). **Detalles** @@ -243,7 +246,7 @@ binary decimal ## Método de codificación de datos: {#groupbitmap} -Mapa de bits o cálculos agregados de una columna entera sin signo, devuelve cardinalidad de tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../functions/bitmap_functions.md). +Mapa de bits o cálculos agregados de una columna entera sin signo, devuelve cardinalidad de tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmap(expr) @@ -379,7 +382,7 @@ skewPop(expr) **Valor devuelto** -The skewness of the given distribution. Type — [Float64](../../data_types/float.md) +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) **Ejemplo** @@ -403,7 +406,7 @@ skewSamp(expr) **Valor devuelto** -The skewness of the given distribution. Type — [Float64](../../data_types/float.md). Si `n <= 1` (`n` es el tamaño de la muestra), luego la función devuelve `nan`. +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Si `n <= 1` (`n` es el tamaño de la muestra), luego la función devuelve `nan`. **Ejemplo** @@ -425,7 +428,7 @@ kurtPop(expr) **Valor devuelto** -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md) +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) **Ejemplo** @@ -449,7 +452,7 @@ kurtSamp(expr) **Valor devuelto** -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md). Si `n <= 1` (`n` es un tamaño de la muestra), luego la función devuelve `nan`. +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Si `n <= 1` (`n` es un tamaño de la muestra), luego la función devuelve `nan`. **Ejemplo** @@ -540,7 +543,7 @@ La función toma un número variable de parámetros. Los parámetros pueden ser **Valor devuelto** -- A [UInt64](../../data_types/int_uint.md)-tipo número. +- A [UInt64](../../sql_reference/data_types/int_uint.md)-tipo número. **Detalles de implementación** @@ -581,7 +584,7 @@ La función toma un número variable de parámetros. Los parámetros pueden ser **Valor devuelto** -- Numero [UInt64](../../data_types/int_uint.md)-tipo número. +- Un número [UInt64](../../sql_reference/data_types/int_uint.md)-tipo número. **Detalles de implementación** @@ -629,7 +632,7 @@ La función toma un número variable de parámetros. Los parámetros pueden ser **Valor devuelto** -- A [UInt64](../../data_types/int_uint.md)-tipo número. +- A [UInt64](../../sql_reference/data_types/int_uint.md)-tipo número. **Detalles de implementación** @@ -871,7 +874,7 @@ Apodo: `median`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). **Valor devuelto** @@ -879,9 +882,9 @@ Apodo: `median`. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para la entrada de tipo de datos numéricos. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -934,18 +937,18 @@ Apodo: `medianDeterministic`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). - `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. **Valor devuelto** -- Cuantil aproximado del nivel especificado. +- Aproximado cuantil del nivel especificado. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para el tipo de datos numérico de entrada. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -998,7 +1001,7 @@ Apodo: `medianExact`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). **Valor devuelto** @@ -1006,9 +1009,9 @@ Apodo: `medianExact`. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para el tipo de datos numérico de entrada. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -1050,7 +1053,7 @@ Apodo: `medianExactWeighted`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). - `weight` — Column with weights of sequence members. Weight is a number of value occurrences. **Valor devuelto** @@ -1059,9 +1062,9 @@ Apodo: `medianExactWeighted`. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para la entrada de tipo de datos numéricos. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -1115,7 +1118,7 @@ Apodo: `medianTiming`. - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../data_types/float.md)-tipo número. +- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../sql_reference/data_types/float.md)-tipo número. - If negative values are passed to the function, the behavior is undefined. - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. @@ -1139,7 +1142,7 @@ De lo contrario, el resultado del cálculo se redondea al múltiplo más cercano Tipo: `Float32`. !!! note "Nota" - Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../select.md#select-order-by) para notas sobre la clasificación `NaN` valor. + Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../sql_reference/data_types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../statements/select.md#select-order-by) para notas sobre la clasificación `NaN` valor. **Ejemplo** @@ -1198,7 +1201,7 @@ Apodo: `medianTimingWeighted`. - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../data_types/float.md)-tipo número. +- `expr` — [Expresion](../syntax.md#syntax-expressions) sobre una columna valores que devuelven un [Flotante\*](../../sql_reference/data_types/float.md)-tipo número. - If negative values are passed to the function, the behavior is undefined. - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. @@ -1224,7 +1227,7 @@ De lo contrario, el resultado del cálculo se redondea al múltiplo más cercano Tipo: `Float32`. !!! note "Nota" - Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../select.md#select-order-by) para notas sobre la clasificación `NaN` valor. + Si no se pasan valores a la función (cuando se `quantileTimingIf`), [NaN](../../sql_reference/data_types/float.md#data_type-float-nan-inf) se devuelve. El propósito de esto es diferenciar estos casos de los casos que resultan en cero. Ver [ORDER BY cláusula](../statements/select.md#select-order-by) para notas sobre la clasificación `NaN` valor. **Ejemplo** @@ -1281,7 +1284,7 @@ Apodo: `medianTDigest`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). **Valor devuelto** @@ -1289,9 +1292,9 @@ Apodo: `medianTDigest`. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para la entrada de tipo de datos numéricos. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -1335,7 +1338,7 @@ Apodo: `medianTDigest`. **Parámetros** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` valor en el rango de `[0.01, 0.99]`. Valor predeterminado: 0.5. En `level=0.5` la función calcula [mediana](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../data_types/index.md#data_types), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [tipos de datos](../../sql_reference/data_types/index.md#data_types), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). - `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. **Valor devuelto** @@ -1344,9 +1347,9 @@ Apodo: `medianTDigest`. Tipo: -- [Float64](../../data_types/float.md) para la entrada de tipo de datos numéricos. -- [Fecha](../../data_types/date.md) si los valores de entrada tienen `Date` tipo. -- [FechaHora](../../data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. +- [Float64](../../sql_reference/data_types/float.md) para la entrada de tipo de datos numéricos. +- [Fecha](../../sql_reference/data_types/date.md) si los valores de entrada tienen `Date` tipo. +- [FechaHora](../../sql_reference/data_types/datetime.md) si los valores de entrada tienen `DateTime` tipo. **Ejemplo** @@ -1493,7 +1496,7 @@ topKWeighted(N)(x, weight) **Argumento** - `x` – The value. -- `weight` — The weight. [UInt8](../../data_types/int_uint.md). +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). **Valor devuelto** @@ -1704,7 +1707,7 @@ stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') ## Método de codificación de datos: {#groupbitmapand} -Calcula el AND de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../functions/bitmap_functions.md). +Calcula el AND de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmapAnd(expr) @@ -1747,7 +1750,7 @@ SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_ ## Método de codificación de datos: {#groupbitmapor} -Calcula el OR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../functions/bitmap_functions.md). Esto es equivalente a `groupBitmapMerge`. +Calcula el OR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql_reference/functions/bitmap_functions.md). Esto es equivalente a `groupBitmapMerge`. ``` sql groupBitmapOr(expr) @@ -1790,7 +1793,7 @@ SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_t ## Método de codificación de datos: {#groupbitmapxor} -Calcula el XOR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../functions/bitmap_functions.md). +Calcula el XOR de una columna de mapa de bits, devuelve la cardinalidad del tipo UInt64, si agrega el sufijo -State, luego devuelve [objeto de mapa de bits](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmapOr(expr) diff --git a/docs/es/data_types/nested_data_structures/aggregatefunction.md b/docs/es/sql_reference/data_types/aggregatefunction.md similarity index 55% rename from docs/es/data_types/nested_data_structures/aggregatefunction.md rename to docs/es/sql_reference/data_types/aggregatefunction.md index a5e2214853e..648a1bb7150 100644 --- a/docs/es/data_types/nested_data_structures/aggregatefunction.md +++ b/docs/es/sql_reference/data_types/aggregatefunction.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 52 +toc_title: "Agregar funci\xF3n (nombre, types_of_arguments)...)" --- # AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} -El estado intermedio de una función agregada. Para obtenerlo, use funciones agregadas con el `-State` sufijo. Para obtener datos agregados en el futuro, debe utilizar las mismas funciones agregadas con el `-Merge`sufijo. +Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [una vista materializada](../../sql_reference/statements/select.md#create-view). La forma común de producir un estado de función agregada es llamando a la función agregada con el `-State` sufijo. Para obtener el resultado final de la agregación en el futuro, debe utilizar la misma función de agregado con el `-Merge`sufijo. `AggregateFunction` — parametric data type. @@ -12,7 +15,7 @@ El estado intermedio de una función agregada. Para obtenerlo, use funciones agr - Nombre de la función de agregado. - If the function is parametric specify its parameters too. + If the function is parametric, specify its parameters too. - Tipos de los argumentos de la función agregada. @@ -27,7 +30,7 @@ CREATE TABLE t ) ENGINE = ... ``` -[uniq](../../query_language/agg_functions/reference.md#agg_function-uniq), anyIf ([cualquier](../../query_language/agg_functions/reference.md#agg_function-any)+[Si](../../query_language/agg_functions/combinators.md#agg-functions-combinator-if)) y [cantiles](../../query_language/agg_functions/reference.md) son las funciones agregadas admitidas en ClickHouse. +[uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq), anyIf ([cualquier](../../sql_reference/aggregate_functions/reference.md#agg_function-any)+[Si](../../sql_reference/aggregate_functions/combinators.md#agg-functions-combinator-if)) y [cantiles](../../sql_reference/aggregate_functions/reference.md) son las funciones agregadas admitidas en ClickHouse. ## Uso {#usage} @@ -44,7 +47,7 @@ quantilesState(0.5, 0.9)(SendTiming) En contraste con las funciones correspondientes `uniq` y `quantiles`, `-State`- funciones devuelven el estado, en lugar del valor final. En otras palabras, devuelven un valor de `AggregateFunction` tipo. -En los resultados de `SELECT` consulta, los valores de `AggregateFunction` tipo tiene representación binaria específica de la implementación para todos los formatos de salida de ClickHouse. Si volcar datos en, por ejemplo, `TabSeparated` Formato con `SELECT` Consulta entonces este volcado se puede cargar de nuevo usando `INSERT` consulta. +En los resultados de `SELECT` consulta, los valores de `AggregateFunction` tipo tiene representación binaria específica de la implementación para todos los formatos de salida de ClickHouse. Si volcar datos en, por ejemplo, `TabSeparated` formato con `SELECT` consulta, entonces este volcado se puede cargar de nuevo usando `INSERT` consulta. ### Selección de datos {#data-selection} @@ -62,6 +65,6 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP ## Ejemplo de uso {#usage-example} -Ver [AgregaciónMergeTree](../../operations/table_engines/aggregatingmergetree.md) Descripción del motor. +Ver [AgregaciónMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) Descripción del motor. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/nested_data_structures/aggregatefunction/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/es/data_types/array.md b/docs/es/sql_reference/data_types/array.md similarity index 75% rename from docs/es/data_types/array.md rename to docs/es/sql_reference/data_types/array.md index bd6fd4f65a7..923e581ea8e 100644 --- a/docs/es/data_types/array.md +++ b/docs/es/sql_reference/data_types/array.md @@ -1,12 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 51 +toc_title: Matriz (T) --- -# Matriz (T) {#data-type-array} +# Matriz (t) {#data-type-array} -Matriz de `T`-tipo de artículos. - -`T` puede ser cualquier cosa, incluida una matriz. +Una matriz de `T`-tipo de artículos. `T` puede ser cualquier tipo de datos, incluida una matriz. ## Creación de una matriz {#creating-an-array} @@ -46,9 +47,9 @@ SELECT [1, 2] AS x, toTypeName(x) ## Trabajar con tipos de datos {#working-with-data-types} -Al crear una matriz sobre la marcha, ClickHouse define automáticamente el tipo de argumento como el tipo de datos más estrecho que puede almacenar todos los argumentos enumerados. Si hay alguna [NULO](../query_language/syntax.md#null-literal) o [NULO](nullable.md#data_type-nullable) los argumentos de tipo, el tipo de elementos de la matriz es [NULO](nullable.md). +Al crear una matriz sobre la marcha, ClickHouse define automáticamente el tipo de argumento como el tipo de datos más estrecho que puede almacenar todos los argumentos enumerados. Si hay alguna [NULL](nullable.md#data_type-nullable) o literal [NULL](../../sql_reference/syntax.md#null-literal) valores, el tipo de un elemento de matriz también se convierte en [NULL](nullable.md). -Si ClickHouse no pudo determinar el tipo de datos, generará una excepción. Por ejemplo, esto sucederá al intentar crear una matriz con cadenas y números simultáneamente (`SELECT array(1, 'a')`). +Si ClickHouse no pudo determinar el tipo de datos, genera una excepción. Por ejemplo, esto sucede cuando se intenta crear una matriz con cadenas y números simultáneamente (`SELECT array(1, 'a')`). Ejemplos de detección automática de tipos de datos: @@ -73,4 +74,4 @@ Received exception from server (version 1.1.54388): Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/array/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/array/) diff --git a/docs/es/data_types/boolean.md b/docs/es/sql_reference/data_types/boolean.md similarity index 55% rename from docs/es/data_types/boolean.md rename to docs/es/sql_reference/data_types/boolean.md index 7f7d839dcdb..04a16b108c7 100644 --- a/docs/es/data_types/boolean.md +++ b/docs/es/sql_reference/data_types/boolean.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 43 +toc_title: Booleana --- # Valores booleanos {#boolean-values} No hay un tipo separado para los valores booleanos. Utilice el tipo UInt8, restringido a los valores 0 o 1. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/boolean/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/boolean/) diff --git a/docs/es/data_types/date.md b/docs/es/sql_reference/data_types/date.md similarity index 75% rename from docs/es/data_types/date.md rename to docs/es/sql_reference/data_types/date.md index 3a4f5cfb5d9..22f41f04a7f 100644 --- a/docs/es/data_types/date.md +++ b/docs/es/sql_reference/data_types/date.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 47 +toc_title: Fecha --- # Fecha {#date} @@ -9,4 +12,4 @@ El valor mínimo se emite como 0000-00-00. El valor de fecha se almacena sin la zona horaria. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/date/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/date/) diff --git a/docs/es/data_types/datetime.md b/docs/es/sql_reference/data_types/datetime.md similarity index 68% rename from docs/es/data_types/datetime.md rename to docs/es/sql_reference/data_types/datetime.md index 0f8e35634ca..0109a442cae 100644 --- a/docs/es/data_types/datetime.md +++ b/docs/es/sql_reference/data_types/datetime.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 48 +toc_title: FechaHora --- -# FechaHora {#data_type-datetime} +# Datetime {#data_type-datetime} Permite almacenar un instante en el tiempo, que se puede expresar como una fecha del calendario y una hora de un día. @@ -20,15 +23,15 @@ Resolución: 1 segundo. El punto en el tiempo se guarda como un [Timestamp de Unix](https://en.wikipedia.org/wiki/Unix_time), independientemente de la zona horaria o el horario de verano. Además, el `DateTime` tipo puede almacenar zona horaria que es la misma para toda la columna, que afecta a cómo los valores de la `DateTime` Los valores de tipo se muestran en formato de texto y cómo se analizan los valores especificados como cadenas (‘2020-01-01 05:00:01’). La zona horaria no se almacena en las filas de la tabla (o en el conjunto de resultados), sino que se almacena en los metadatos de la columna. Se puede encontrar una lista de zonas horarias compatibles en el [Base de datos de zonas horarias de IANA](https://www.iana.org/time-zones). -El `tzdata` paquete, que contiene [Base de datos de zonas horarias de IANA](https://www.iana.org/time-zones), debe instalarse en el sistema. Descripción `timedatectl list-timezones` comando para listar zonas horarias conocidas por un sistema local. +El `tzdata` paquete, que contiene [Base de datos de zonas horarias de IANA](https://www.iana.org/time-zones), debe instalarse en el sistema. Utilice el `timedatectl list-timezones` comando para listar zonas horarias conocidas por un sistema local. -Puede establecer explícitamente una zona horaria para `DateTime`-type columnas al crear una tabla. Si la zona horaria no está establecida, ClickHouse usa el valor [Zona horaria](../operations/server_settings/settings.md#server_settings-timezone) parámetro en la configuración del servidor o la configuración del sistema operativo en el momento del inicio del servidor ClickHouse. +Puede establecer explícitamente una zona horaria para `DateTime`-type columnas al crear una tabla. Si la zona horaria no está establecida, ClickHouse usa el valor [Zona horaria](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) parámetro en la configuración del servidor o la configuración del sistema operativo en el momento del inicio del servidor ClickHouse. -El [Casa de clics-cliente](../interfaces/cli.md) aplica la zona horaria del servidor de forma predeterminada si una zona horaria no se establece explícitamente al inicializar el tipo de datos. Para utilizar la zona horaria del cliente, ejecute `clickhouse-client` con el `--use_client_time_zone` parámetro. +El [Casa de clics-cliente](../../interfaces/cli.md) aplica la zona horaria del servidor de forma predeterminada si una zona horaria no se establece explícitamente al inicializar el tipo de datos. Para utilizar la zona horaria del cliente, ejecute `clickhouse-client` con el `--use_client_time_zone` parámetro. -ClickHouse genera valores en `YYYY-MM-DD hh:mm:ss` formato de texto por defecto. Puede cambiar la salida con el [formatDateTime](../query_language/functions/date_time_functions.md#formatdatetime) función. +ClickHouse genera valores en `YYYY-MM-DD hh:mm:ss` formato de texto por defecto. Puede cambiar la salida con el [formatDateTime](../../sql_reference/functions/date_time_functions.md#formatdatetime) función. -Al insertar datos en ClickHouse, puede usar diferentes formatos de cadenas de fecha y hora, dependiendo del valor de la [Date\_time\_input\_format](../operations/settings/settings.md#settings-date_time_input_format) configuración. +Al insertar datos en ClickHouse, puede usar diferentes formatos de cadenas de fecha y hora, dependiendo del valor de la [Date\_time\_input\_format](../../operations/settings/settings.md#settings-date_time_input_format) configuración. ## Ejemplos {#examples} @@ -58,7 +61,7 @@ SELECT * FROM dt; └─────────────────────┴──────────┘ ``` -- Al insertar datetime como un entero, se trata como Unix Timestamp (UTC). `1546300800` representante `'2019-01-01 00:00:00'` UTC. Sin embargo, como `timestamp` tetas grandes `Europe/Moscow` (UTC + 3) zona horaria especificada, al emitir como cadena, el valor se mostrará como `'2019-01-01 03:00:00'` +- Al insertar datetime como un entero, se trata como Unix Timestamp (UTC). `1546300800` representar `'2019-01-01 00:00:00'` UTC. Sin embargo, como `timestamp` columna tiene `Europe/Moscow` (UTC + 3) zona horaria especificada, al emitir como cadena, el valor se mostrará como `'2019-01-01 03:00:00'` - Al insertar el valor de cadena como fecha y hora, se trata como si estuviera en la zona horaria de la columna. `'2019-01-01 00:00:00'` será tratado como estar en `Europe/Moscow` zona horaria y guardado como `1546290000`. **2.** Filtrado en `DateTime` valor @@ -115,12 +118,12 @@ FROM dt ## Ver también {#see-also} -- [Funciones de conversión de tipos](../query_language/functions/type_conversion_functions.md) -- [Funciones para trabajar con fechas y horas](../query_language/functions/date_time_functions.md) -- [Funciones para trabajar con matrices](../query_language/functions/array_functions.md) -- [El `date_time_input_format` configuración](../operations/settings/settings.md#settings-date_time_input_format) -- [El `timezone` parámetro de configuración del servidor](../operations/server_settings/settings.md#server_settings-timezone) -- [Operadores para trabajar con fechas y horas](../query_language/operators.md#operators-datetime) -- [El `Date` Tipo de datos](date.md) +- [Funciones de conversión de tipos](../../sql_reference/functions/type_conversion_functions.md) +- [Funciones para trabajar con fechas y horas](../../sql_reference/functions/date_time_functions.md) +- [Funciones para trabajar con matrices](../../sql_reference/functions/array_functions.md) +- [El `date_time_input_format` configuración](../../operations/settings/settings.md#settings-date_time_input_format) +- [El `timezone` parámetro de configuración del servidor](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Operadores para trabajar con fechas y horas](../../sql_reference/operators.md#operators-datetime) +- [El `Date` tipo de datos](date.md) -[Artículo Original](https://clickhouse.tech/docs/es/data_types/datetime/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/es/data_types/datetime64.md b/docs/es/sql_reference/data_types/datetime64.md similarity index 80% rename from docs/es/data_types/datetime64.md rename to docs/es/sql_reference/data_types/datetime64.md index e9a5245b146..e7327ccb12d 100644 --- a/docs/es/data_types/datetime64.md +++ b/docs/es/sql_reference/data_types/datetime64.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 49 +toc_title: DateTime64 --- -# DateTime64 {#data_type-datetime64} +# Datetime64 {#data_type-datetime64} Permite almacenar un instante en el tiempo, que se puede expresar como una fecha de calendario y una hora de un día, con una precisión de subsegundo definida @@ -44,8 +47,8 @@ SELECT * FROM dt └─────────────────────────┴──────────┘ ``` -- Al insertar datetime como un entero, se trata como una marca de tiempo Unix (UTC) apropiadamente escalada. `1546300800000` (con precisión 3) representa `'2019-01-01 00:00:00'` UTC. Sin embargo, como `timestamp` tetas grandes `Europe/Moscow` (UTC + 3) zona horaria especificada, al emitir como una cadena, el valor se mostrará como `'2019-01-01 03:00:00'` -- Al insertar el valor de cadena como fecha y hora, se trata como si estuviera en la zona horaria de la columna. `'2019-01-01 00:00:00'` será tratado como estar en `Europe/Moscow` Zona horaria y se almacena como `1546290000000`. +- Al insertar datetime como un entero, se trata como una marca de tiempo Unix (UTC) apropiadamente escalada. `1546300800000` (con precisión 3) representa `'2019-01-01 00:00:00'` UTC. Sin embargo, como `timestamp` columna tiene `Europe/Moscow` (UTC + 3) zona horaria especificada, al emitir como una cadena, el valor se mostrará como `'2019-01-01 03:00:00'` +- Al insertar el valor de cadena como fecha y hora, se trata como si estuviera en la zona horaria de la columna. `'2019-01-01 00:00:00'` será tratado como estar en `Europe/Moscow` zona horaria y se almacena como `1546290000000`. **2.** Filtrado en `DateTime64` valor @@ -91,11 +94,11 @@ FROM dt ## Ver también {#see-also} -- [Funciones de conversión de tipos](../query_language/functions/type_conversion_functions.md) -- [Funciones para trabajar con fechas y horas](../query_language/functions/date_time_functions.md) -- [Funciones para trabajar con matrices](../query_language/functions/array_functions.md) -- [El `date_time_input_format` configuración](../operations/settings/settings.md#settings-date_time_input_format) -- [El `timezone` parámetro de configuración del servidor](../operations/server_settings/settings.md#server_settings-timezone) -- [Operadores para trabajar con fechas y horas](../query_language/operators.md#operators-datetime) -- [`Date` Tipo de datos](date.md) -- [`DateTime` Tipo de datos](datetime.md) +- [Funciones de conversión de tipos](../../sql_reference/functions/type_conversion_functions.md) +- [Funciones para trabajar con fechas y horas](../../sql_reference/functions/date_time_functions.md) +- [Funciones para trabajar con matrices](../../sql_reference/functions/array_functions.md) +- [El `date_time_input_format` configuración](../../operations/settings/settings.md#settings-date_time_input_format) +- [El `timezone` parámetro de configuración del servidor](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Operadores para trabajar con fechas y horas](../../sql_reference/operators.md#operators-datetime) +- [`Date` tipo de datos](date.md) +- [`DateTime` tipo de datos](datetime.md) diff --git a/docs/es/data_types/decimal.md b/docs/es/sql_reference/data_types/decimal.md similarity index 95% rename from docs/es/data_types/decimal.md rename to docs/es/sql_reference/data_types/decimal.md index 9e84975443e..7b838dd70b5 100644 --- a/docs/es/data_types/decimal.md +++ b/docs/es/sql_reference/data_types/decimal.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 42 +toc_title: Decimal --- # Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} @@ -40,7 +43,7 @@ Las operaciones binarias en Decimal dan como resultado un tipo de resultado más Reglas para la escala: -- Sumar, restar: S = max (S1, S2). +- Sumar, restar: S = max(S1, S2). - multuply: S = S1 + S2. - división: S = S1. @@ -103,4 +106,4 @@ SELECT toDecimal32(1, 8) < 100 DB::Exception: Can't compare. ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/decimal/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/es/sql_reference/data_types/domains/index.md b/docs/es/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..2f281b956b9 --- /dev/null +++ b/docs/es/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Domains +toc_priority: 56 +--- + + diff --git a/docs/es/data_types/domains/ipv4.md b/docs/es/sql_reference/data_types/domains/ipv4.md similarity index 81% rename from docs/es/data_types/domains/ipv4.md rename to docs/es/sql_reference/data_types/domains/ipv4.md index 9e3dcbd5167..d6cde398d01 100644 --- a/docs/es/data_types/domains/ipv4.md +++ b/docs/es/sql_reference/data_types/domains/ipv4.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 59 +toc_title: IPv4 --- ## IPv4 {#ipv4} -`IPv4` es un dominio basado en `UInt32` tipo y sirve como reemplazo tipo para almacenar valores IPv4. Proporciona un almacenamiento compacto con un formato de entrada-salida amigable para los humanos e información de tipo de columna sobre la inspección. +`IPv4` es un dominio basado en `UInt32` tipo y sirve como un reemplazo con tipo para almacenar valores IPv4. Proporciona un almacenamiento compacto con el formato de entrada-salida amigable para los humanos y la información sobre el tipo de columna en la inspección. ### Uso básico {#basic-usage} @@ -27,17 +30,17 @@ O puede usar el dominio IPv4 como clave: CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; ``` -`IPv4` Todos los derechos reservados: +`IPv4` domain admite formato de entrada personalizado como cadenas IPv4: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/es/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` ``` text ┌─url────────────────────────────────┬───────────from─┐ -│ https://clickhouse.tech/docs/es/ │ 116.106.34.242 │ +│ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ │ https://wikipedia.org │ 116.253.40.133 │ │ https://clickhouse.tech │ 183.247.232.58 │ └────────────────────────────────────┴────────────────┘ @@ -66,7 +69,7 @@ SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; │ String │ 183.247.232.58 │ └───────────────────────────────────┴────────────────┘ -O echar una ONU `UInt32` valor: +O echar a un `UInt32` valor: ``` sql SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; @@ -78,4 +81,4 @@ SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; └──────────────────────────────────┴────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/domains/ipv4) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/es/data_types/domains/ipv6.md b/docs/es/sql_reference/data_types/domains/ipv6.md similarity index 84% rename from docs/es/data_types/domains/ipv6.md rename to docs/es/sql_reference/data_types/domains/ipv6.md index fa9bd746b30..7a1f9be42ba 100644 --- a/docs/es/data_types/domains/ipv6.md +++ b/docs/es/sql_reference/data_types/domains/ipv6.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 60 +toc_title: IPv6 --- ## IPv6 {#ipv6} -`IPv6` es un dominio basado en `FixedString(16)` tipo y sirve como reemplazo tipo para almacenar valores IPv6. Proporciona un almacenamiento compacto con un formato de entrada-salida amigable para los humanos e información de tipo de columna sobre la inspección. +`IPv6` es un dominio basado en `FixedString(16)` tipo y sirve como un reemplazo con tipo para almacenar valores IPv6. Proporciona un almacenamiento compacto con el formato de entrada-salida amigable para los humanos y la información sobre el tipo de columna en la inspección. ### Uso básico {#basic-usage} @@ -27,10 +30,10 @@ O puedes usar `IPv6` dominio como clave: CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; ``` -`IPv6` dominio admite entradas personalizadas como cadenas IPv6: +`IPv6` domain admite entradas personalizadas como cadenas IPv6: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/es/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` @@ -38,7 +41,7 @@ SELECT * FROM hits; ``` text ┌─url────────────────────────────────┬─from──────────────────────────┐ │ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ -│ https://clickhouse.tech/docs/es/ │ 2a02:e980:1e::1 │ +│ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ │ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ └────────────────────────────────────┴───────────────────────────────┘ ``` @@ -68,7 +71,7 @@ SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; └───────────────────────────────────┴───────────────────────────────┘ ``` -O echar una ONU `FixedString(16)` valor: +O echar a un `FixedString(16)` valor: ``` sql SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; @@ -80,4 +83,4 @@ SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; └───────────────────────────────────────────┴─────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/domains/ipv6) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/es/data_types/domains/overview.md b/docs/es/sql_reference/data_types/domains/overview.md similarity index 54% rename from docs/es/data_types/domains/overview.md rename to docs/es/sql_reference/data_types/domains/overview.md index 2d662c4f62f..3207a52482d 100644 --- a/docs/es/data_types/domains/overview.md +++ b/docs/es/sql_reference/data_types/domains/overview.md @@ -1,30 +1,32 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 58 +toc_title: "Descripci\xF3n" --- # Dominio {#domains} -Los dominios son tipos de propósito especial, que agregan algunas características adicionales encima del tipo base existente, dejando intacto el formato en cable y en disco de la tabla subyacente. Por el momento, ClickHouse no admite dominios definidos por el usuario. +Los dominios son tipos de propósito especial que agregan algunas características adicionales encima del tipo base existente, pero dejando intacto el formato en cable y en disco del tipo de datos subyacente. Por el momento, ClickHouse no admite dominios definidos por el usuario. -Puede usar dominios en cualquier lugar que se pueda usar el tipo base correspondiente: +Puede usar dominios en cualquier lugar que se pueda usar el tipo base correspondiente, por ejemplo: -- Crear una columna de tipo de dominio +- Crear una columna de un tipo de dominio - Leer/escribir valores desde/a la columna de dominio -- Úselo como índice si el tipo base se puede usar como índice +- Úselo como un índice si un tipo base se puede usar como un índice - Funciones de llamada con valores de la columna de dominio -- sucesivamente. ### Características adicionales de los dominios {#extra-features-of-domains} - Nombre de tipo de columna explícito en `SHOW CREATE TABLE` o `DESCRIBE TABLE` - Entrada del formato humano-amistoso con `INSERT INTO domain_table(domain_column) VALUES(...)` - Salida al formato humano-amistoso para `SELECT domain_column FROM domain_table` -- Carga de datos desde una fuente externa en un formato amigable para los humanos: `INSERT INTO domain_table FORMAT CSV ...` +- Carga de datos desde una fuente externa en el formato de uso humano: `INSERT INTO domain_table FORMAT CSV ...` -### Limitación {#limitations} +### Limitacion {#limitations} - No se puede convertir la columna de índice del tipo base al tipo de dominio a través de `ALTER TABLE`. - No se pueden convertir implícitamente valores de cadena en valores de dominio al insertar datos de otra columna o tabla. - Domain no agrega restricciones en los valores almacenados. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/domains/overview) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/es/data_types/enum.md b/docs/es/sql_reference/data_types/enum.md similarity index 95% rename from docs/es/data_types/enum.md rename to docs/es/sql_reference/data_types/enum.md index dd6ff216a52..d1065ccf863 100644 --- a/docs/es/data_types/enum.md +++ b/docs/es/sql_reference/data_types/enum.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 50 +toc_title: Enum --- # Enum {#enum} @@ -90,7 +93,7 @@ SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) A cada uno de los valores se le asigna un número en el rango `-128 ... 127` para `Enum8` o en el rango `-32768 ... 32767` para `Enum16`. Todas las cadenas y números deben ser diferentes. Se permite una cadena vacía. Si se especifica este tipo (en una definición de tabla), los números pueden estar en un orden arbitrario. Sin embargo, el orden no importa. -Ni la cadena ni el valor numérico en un `Enum` puede ser [NULO](../query_language/syntax.md). +Ni la cadena ni el valor numérico en un `Enum` puede ser [NULL](../../sql_reference/syntax.md). Un `Enum` puede estar contenido en [NULL](nullable.md) tipo. Entonces, si crea una tabla usando la consulta @@ -126,4 +129,4 @@ El tipo Enum se puede cambiar sin costo usando ALTER, si solo se cambia el conju Usando ALTER, es posible cambiar un Enum8 a un Enum16 o viceversa, al igual que cambiar un Int8 a Int16. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/enum/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/enum/) diff --git a/docs/es/data_types/fixedstring.md b/docs/es/sql_reference/data_types/fixedstring.md similarity index 78% rename from docs/es/data_types/fixedstring.md rename to docs/es/sql_reference/data_types/fixedstring.md index 51aa56239a5..d22b4e118b4 100644 --- a/docs/es/data_types/fixedstring.md +++ b/docs/es/sql_reference/data_types/fixedstring.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: Cadena fija (N) --- -# Cadena fija {#fixedstring} +# Cuerda fija {#fixedstring} Una cadena de longitud fija de `N` bytes (ni caracteres ni puntos de código). @@ -16,7 +19,7 @@ Donde `N` es un número natural. El `FixedString` tipo es eficiente cuando los datos tienen la longitud de `N` byte. En todos los demás casos, es probable que reduzca la eficiencia. -Ejemplos de los valores que se pueden almacenar eficientemente en `FixedString`Escrito columnas: +Ejemplos de los valores que se pueden almacenar eficientemente en `FixedString`escrito columnas: - La representación binaria de direcciones IP (`FixedString(16)` para IPv6). - Language codes (ru\_RU, en\_US … ). @@ -55,6 +58,6 @@ WHERE a = 'b\0' Este comportamiento difiere de MySQL para el `CHAR` tipo (donde las cadenas se rellenan con espacios y los espacios se eliminan para la salida). -Tenga en cuenta que la longitud del `FixedString(N)` el valor es constante. El [longitud](../query_language/functions/array_functions.md#array_functions-length) función devuelve `N` incluso si el `FixedString(N)` sólo se rellena con bytes nulos, pero el valor [Vaciar](../query_language/functions/string_functions.md#empty) función devuelve `1` en este caso. +Tenga en cuenta que la longitud del `FixedString(N)` el valor es constante. El [longitud](../../sql_reference/functions/array_functions.md#array_functions-length) función devuelve `N` incluso si el `FixedString(N)` sólo se rellena con bytes nulos, pero el valor [vaciar](../../sql_reference/functions/string_functions.md#empty) función devuelve `1` en este caso. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/fixedstring/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/es/data_types/float.md b/docs/es/sql_reference/data_types/float.md similarity index 86% rename from docs/es/data_types/float.md rename to docs/es/sql_reference/data_types/float.md index 7ce233b612b..53a4a1112a8 100644 --- a/docs/es/data_types/float.md +++ b/docs/es/sql_reference/data_types/float.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: "Descripci\xF3n del producto" --- # Descripción del producto {#float32-float64} @@ -37,7 +40,7 @@ SELECT 1 - 0.9 A diferencia de SQL estándar, ClickHouse admite las siguientes categorías de números de punto flotante: -- `Inf` – Infinito. +- `Inf` – Infinity. @@ -51,7 +54,7 @@ SELECT 0.5 / 0 └────────────────┘ ``` -- `-Inf` – Infinito negativo. +- `-Inf` – Negative infinity. @@ -65,7 +68,7 @@ SELECT -0.5 / 0 └─────────────────┘ ``` -- `NaN` – No es un número. +- `NaN` – Not a number. @@ -79,6 +82,6 @@ SELECT 0 / 0 └──────────────┘ ``` - See the rules for `NaN` sorting in the section [ORDER BY clause](../query_language/select.md). + See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select.md). -[Artículo Original](https://clickhouse.tech/docs/es/data_types/float/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/es/data_types/index.md b/docs/es/sql_reference/data_types/index.md similarity index 58% rename from docs/es/data_types/index.md rename to docs/es/sql_reference/data_types/index.md index f787476f864..f50f3461e18 100644 --- a/docs/es/data_types/index.md +++ b/docs/es/sql_reference/data_types/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Data Types +toc_priority: 37 +toc_title: "Implantaci\xF3n" --- # Tipos de datos {#data_types} @@ -8,4 +12,4 @@ ClickHouse puede almacenar varios tipos de datos en celdas de tabla. En esta sección se describen los tipos de datos admitidos y las consideraciones especiales para usarlos o implementarlos, si los hubiere. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/es/data_types/int_uint.md b/docs/es/sql_reference/data_types/int_uint.md similarity index 72% rename from docs/es/data_types/int_uint.md rename to docs/es/sql_reference/data_types/int_uint.md index ee64697f2ef..4325bbd7a74 100644 --- a/docs/es/data_types/int_uint.md +++ b/docs/es/sql_reference/data_types/int_uint.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 40 +toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 --- # UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} @@ -20,4 +23,4 @@ Enteros de longitud fija, con o sin signo. - UInt32 - \[0 : 4294967295\] - UInt64 - \[0 : 18446744073709551615\] -[Artículo Original](https://clickhouse.tech/docs/es/data_types/int_uint/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/int_uint/) diff --git a/docs/es/sql_reference/data_types/nested_data_structures/index.md b/docs/es/sql_reference/data_types/nested_data_structures/index.md new file mode 100644 index 00000000000..dc1bbb43826 --- /dev/null +++ b/docs/es/sql_reference/data_types/nested_data_structures/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Nested Data Structures +toc_hidden: true +toc_priority: 54 +toc_title: oculto +--- + +# Estructuras de datos anidados {#nested-data-structures} + +[Artículo Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) diff --git a/docs/es/data_types/nested_data_structures/nested.md b/docs/es/sql_reference/data_types/nested_data_structures/nested.md similarity index 82% rename from docs/es/data_types/nested_data_structures/nested.md rename to docs/es/sql_reference/data_types/nested_data_structures/nested.md index b316a29584b..21e2479756f 100644 --- a/docs/es/data_types/nested_data_structures/nested.md +++ b/docs/es/sql_reference/data_types/nested_data_structures/nested.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 57 +toc_title: Anidado (Nombre1 Tipo1, Nombre2 Tipo2, ...) --- -# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} +# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} -A nested data structure is like a nested table. The parameters of a nested data structure – the column names and types – are specified the same way as in a CREATE query. Each table row can correspond to any number of rows in a nested data structure. +A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql_reference/statements/create.md) consulta. Cada fila de la tabla puede corresponder a cualquier número de filas en una estructura de datos anidada. Ejemplo: @@ -35,7 +38,7 @@ Este ejemplo declara la `Goals` estructura de datos anidada, que contiene datos Solo se admite un único nivel de anidamiento. Las columnas de estructuras anidadas que contienen matrices son equivalentes a matrices multidimensionales, por lo que tienen un soporte limitado (no hay soporte para almacenar estas columnas en tablas con el motor MergeTree). -En la mayoría de los casos, cuando se trabaja con una estructura de datos anidada, se especifican sus columnas individuales. Para hacer esto, los nombres de columna están separados por un punto. Estas columnas forman una matriz de tipos coincidentes. Todas las matrices de columnas de una sola estructura de datos anidados tienen la misma longitud. +En la mayoría de los casos, cuando se trabaja con una estructura de datos anidada, sus columnas se especifican con nombres de columna separados por un punto. Estas columnas forman una matriz de tipos coincidentes. Todas las matrices de columnas de una sola estructura de datos anidados tienen la misma longitud. Ejemplo: @@ -96,8 +99,8 @@ No puede realizar SELECT para toda una estructura de datos anidados. Solo puede Para una consulta INSERT, debe pasar todas las matrices de columnas de componentes de una estructura de datos anidada por separado (como si fueran matrices de columnas individuales). Durante la inserción, el sistema comprueba que tienen la misma longitud. -Para una consulta DESCRIBE, las columnas de una estructura de datos anidada se enumeran por separado de la misma manera. +Para DESCRIBIR consulta, las columnas anidadas estructura de datos se muestran por separado en la misma forma. -La consulta ALTER es muy limitada para los elementos de una estructura de datos anidada. +La consulta ALTER para elementos en una estructura de datos anidados tiene limitaciones. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/nested_data_structures/nested/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) diff --git a/docs/es/data_types/nullable.md b/docs/es/sql_reference/data_types/nullable.md similarity index 67% rename from docs/es/data_types/nullable.md rename to docs/es/sql_reference/data_types/nullable.md index 9208ff90713..1bf2c919382 100644 --- a/docs/es/data_types/nullable.md +++ b/docs/es/sql_reference/data_types/nullable.md @@ -1,20 +1,23 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 54 +toc_title: 'NULL' --- -# Nombre de tipo) {#data_type-nullable} +# Nivel de Cifrado WEP) {#data_type-nullable} -Permite almacenar marcador especial ([NULO](../query_language/syntax.md)) que denota “missing value” con los valores normales permitidos por `TypeName`. Por ejemplo, un `Nullable(Int8)` Tipo columna puede almacenar `Int8` valores de tipo, y las filas que no tienen un valor almacenarán `NULL`. +Permite almacenar marcador especial ([NULL](../../sql_reference/syntax.md)) que denota “missing value” junto con los valores normales permitidos por `TypeName`. Por ejemplo, un `Nullable(Int8)` tipo columna puede almacenar `Int8` valores de tipo, y las filas que no tienen un valor almacenarán `NULL`. Para un `TypeName`, no puede usar tipos de datos compuestos [Matriz](array.md) y [Tupla](tuple.md). Los tipos de datos compuestos pueden contener `Nullable` valores de tipo, como `Array(Nullable(Int8))`. -Naciones `Nullable` no se puede incluir en los índices de tabla. +A `Nullable` no se puede incluir en los índices de tabla. `NULL` es el valor predeterminado para cualquier `Nullable` tipo, a menos que se especifique lo contrario en la configuración del servidor ClickHouse. ## Características de almacenamiento {#storage-features} -Almacenar `Nullable` es una columna de tabla, ClickHouse usa un archivo separado con `NULL` máscaras además del archivo normal con valores. Las entradas en el archivo de máscaras permiten ClickHouse distinguir entre `NULL` y un valor predeterminado del tipo de datos correspondiente para cada fila de la tabla. Debido a un archivo adicional, `Nullable` La columna consume espacio de almacenamiento adicional en comparación con una normal similar. +Almacenar `Nullable` en una columna de tabla, ClickHouse usa un archivo separado con `NULL` máscaras además del archivo normal con valores. Las entradas en el archivo de máscaras permiten ClickHouse distinguir entre `NULL` y un valor predeterminado del tipo de datos correspondiente para cada fila de la tabla. Debido a un archivo adicional, `Nullable` La columna consume espacio de almacenamiento adicional en comparación con una normal similar. !!! info "Nota" Utilizar `Nullable` casi siempre afecta negativamente al rendimiento, tenga esto en cuenta al diseñar sus bases de datos. @@ -40,4 +43,4 @@ SELECT x + y FROM t_null └────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/nullable/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/es/sql_reference/data_types/special_data_types/expression.md b/docs/es/sql_reference/data_types/special_data_types/expression.md new file mode 100644 index 00000000000..2abed21856d --- /dev/null +++ b/docs/es/sql_reference/data_types/special_data_types/expression.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 58 +toc_title: Expresion +--- + +# Expresion {#expression} + +Las expresiones se utilizan para representar lambdas en funciones de orden superior. + +[Artículo Original](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) diff --git a/docs/es/sql_reference/data_types/special_data_types/index.md b/docs/es/sql_reference/data_types/special_data_types/index.md new file mode 100644 index 00000000000..004d69df154 --- /dev/null +++ b/docs/es/sql_reference/data_types/special_data_types/index.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Special Data Types +toc_hidden: true +toc_priority: 55 +toc_title: oculto +--- + +# Tipos de datos especiales {#special-data-types} + +Los valores de tipo de datos especiales no se pueden serializar para guardar en una tabla o salida en los resultados de la consulta, pero se pueden usar como un resultado intermedio durante la ejecución de la consulta. + +[Artículo Original](https://clickhouse.tech/docs/en/data_types/special_data_types/) diff --git a/docs/es/data_types/special_data_types/interval.md b/docs/es/sql_reference/data_types/special_data_types/interval.md similarity index 72% rename from docs/es/data_types/special_data_types/interval.md rename to docs/es/sql_reference/data_types/special_data_types/interval.md index 099b3d064a5..9546c8c4b8e 100644 --- a/docs/es/data_types/special_data_types/interval.md +++ b/docs/es/sql_reference/data_types/special_data_types/interval.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 61 +toc_title: Intervalo --- # Intervalo {#data-type-interval} -La familia de tipos de datos que representan intervalos de fecha y hora. Los tipos resultantes del [INTERVALO](../../query_language/operators.md#operator-interval) operador. +La familia de tipos de datos que representan intervalos de fecha y hora. Los tipos resultantes del [INTERVAL](../../../sql_reference/operators.md#operator-interval) operador. !!! warning "Advertencia" `Interval` los valores de tipo de datos no se pueden almacenar en tablas. @@ -25,7 +28,7 @@ Tipos de intervalo admitidos: - `QUARTER` - `YEAR` -Para cada tipo de intervalo, hay un tipo de datos independiente. Por ejemplo, el `DAY` se expresa como el intervalo `IntervalDay` Tipo de datos: +Para cada tipo de intervalo, hay un tipo de datos independiente. Por ejemplo, el `DAY` el intervalo corresponde a la `IntervalDay` tipo de datos: ``` sql SELECT toTypeName(INTERVAL 4 DAY) @@ -39,7 +42,7 @@ SELECT toTypeName(INTERVAL 4 DAY) ## Observaciones de uso {#data-type-interval-usage-remarks} -Usted puede utilizar `Interval`-type valores en operaciones aritméticas con [Fecha](../../data_types/date.md) y [FechaHora](../../data_types/datetime.md)-type valores. Por ejemplo, puede agregar 4 días a la hora actual: +Usted puede utilizar `Interval`-type valores en operaciones aritméticas con [Fecha](../../../sql_reference/data_types/date.md) y [FechaHora](../../../sql_reference/data_types/datetime.md)-type valores. Por ejemplo, puede agregar 4 días a la hora actual: ``` sql SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY @@ -51,7 +54,7 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY └─────────────────────┴───────────────────────────────┘ ``` -Los intervalos con diferentes tipos no se pueden combinar. No puedes usar intervalos como `4 DAY 1 HOUR`. Exprese los intervalos en unidades que son más pequeñas o iguales que la unidad más pequeña del intervalo, por ejemplo, el intervalo `1 day and an hour` se puede expresar como `25 HOUR` o `90000 SECOND`. +Los intervalos con diferentes tipos no se pueden combinar. No puedes usar intervalos como `4 DAY 1 HOUR`. Especifique los intervalos en unidades que son más pequeñas o iguales que la unidad más pequeña del intervalo, por ejemplo, el intervalo `1 day and an hour` se puede expresar como `25 HOUR` o `90000 SECOND`. No puede realizar operaciones aritméticas con `Interval`-type valores, pero puede agregar intervalos de diferentes tipos en consecuencia a los valores en `Date` o `DateTime` tipos de datos. Por ejemplo: @@ -78,5 +81,5 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu ## Ver también {#see-also} -- [INTERVALO](../../query_language/operators.md#operator-interval) operador -- [ToInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) funciones de conversión de tipo +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) operador +- [ToInterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) funciones de conversión de tipo diff --git a/docs/es/data_types/special_data_types/nothing.md b/docs/es/sql_reference/data_types/special_data_types/nothing.md similarity index 59% rename from docs/es/data_types/special_data_types/nothing.md rename to docs/es/sql_reference/data_types/special_data_types/nothing.md index 2f8b44e2c79..2f5144f701d 100644 --- a/docs/es/data_types/special_data_types/nothing.md +++ b/docs/es/sql_reference/data_types/special_data_types/nothing.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 60 +toc_title: Nada --- # Nada {#nothing} El único propósito de este tipo de datos es representar casos en los que no se espera un valor. Entonces no puedes crear un `Nothing` valor de tipo. -Por ejemplo, literal [NULO](../../query_language/syntax.md#null-literal) tiene tipo de `Nullable(Nothing)`. Ver más sobre [NULO](../../data_types/nullable.md). +Por ejemplo, literal [NULL](../../../sql_reference/syntax.md#null-literal) tiene tipo de `Nullable(Nothing)`. Ver más sobre [NULL](../../../sql_reference/data_types/nullable.md). El `Nothing` tipo puede también se utiliza para denotar matrices vacías: @@ -20,4 +23,4 @@ SELECT toTypeName(array()) └─────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/special_data_types/nothing/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/es/sql_reference/data_types/special_data_types/set.md b/docs/es/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..2b0fc5f5b13 --- /dev/null +++ b/docs/es/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 59 +toc_title: Establecer +--- + +# Establecer {#set} + +Utilizado para la mitad derecha de un [IN](../../../sql_reference/statements/select.md#select-in-operators) expresion. + +[Artículo Original](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/es/data_types/string.md b/docs/es/sql_reference/data_types/string.md similarity index 87% rename from docs/es/data_types/string.md rename to docs/es/sql_reference/data_types/string.md index 238de32fa22..15217cf5c58 100644 --- a/docs/es/data_types/string.md +++ b/docs/es/sql_reference/data_types/string.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: Cadena --- # Cadena {#string} @@ -14,4 +17,4 @@ Si necesita almacenar textos, le recomendamos que utilice la codificación UTF-8 Del mismo modo, ciertas funciones para trabajar con cadenas tienen variaciones separadas que funcionan bajo el supuesto de que la cadena contiene un conjunto de bytes que representan un texto codificado en UTF-8. Por ejemplo, el ‘length’ función calcula la longitud de cadena en bytes, mientras que la ‘lengthUTF8’ función calcula la longitud de la cadena en puntos de código Unicode, suponiendo que el valor está codificado UTF-8. -[Artículo Original](https://clickhouse.tech/docs/es/data_types/string/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/string/) diff --git a/docs/es/data_types/tuple.md b/docs/es/sql_reference/data_types/tuple.md similarity index 73% rename from docs/es/data_types/tuple.md rename to docs/es/sql_reference/data_types/tuple.md index 68af5e5a77d..70da0f1652d 100644 --- a/docs/es/data_types/tuple.md +++ b/docs/es/sql_reference/data_types/tuple.md @@ -1,16 +1,19 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 53 +toc_title: Tuple (T1, T2, ...) --- -# Tuple (T1, T2, …) {#tuplet1-t2} +# Tuple(t1, T2, …) {#tuplet1-t2} -Una tupla de elementos, cada uno con un individuo [Tipo](index.md#data_types). +Una tupla de elementos, cada uno con un individuo [tipo](index.md#data_types). -Las tuplas se utilizan para la agrupación temporal de columnas. Las columnas se pueden agrupar cuando se usa una expresión IN en una consulta y para especificar ciertos parámetros formales de las funciones lambda. Para obtener más información, consulte las secciones [IN operadores](../query_language/select.md) y [Funciones de orden superior](../query_language/functions/higher_order_functions.md). +Las tuplas se utilizan para la agrupación temporal de columnas. Las columnas se pueden agrupar cuando se usa una expresión IN en una consulta y para especificar ciertos parámetros formales de las funciones lambda. Para obtener más información, consulte las secciones [IN operadores](../../sql_reference/statements/select.md) y [Funciones de orden superior](../../sql_reference/functions/higher_order_functions.md). Las tuplas pueden ser el resultado de una consulta. En este caso, para formatos de texto distintos de JSON, los valores están separados por comas entre corchetes. En formatos JSON, las tuplas se generan como matrices (entre corchetes). -## Creando una tupla {#creating-a-tuple} +## Creación de una tupla {#creating-a-tuple} Puedes usar una función para crear una tupla: @@ -32,7 +35,7 @@ SELECT tuple(1,'a') AS x, toTypeName(x) ## Trabajar con tipos de datos {#working-with-data-types} -Al crear una tupla sobre la marcha, ClickHouse detecta automáticamente el tipo de cada argumento como el mínimo de los tipos que pueden almacenar el valor del argumento. Si el argumento es [NULO](../query_language/syntax.md#null-literal), el tipo del elemento de tupla es [NULO](nullable.md). +Al crear una tupla sobre la marcha, ClickHouse detecta automáticamente el tipo de cada argumento como el mínimo de los tipos que pueden almacenar el valor del argumento. Si el argumento es [NULL](../../sql_reference/syntax.md#null-literal), el tipo del elemento de tupla es [NULL](nullable.md). Ejemplo de detección automática de tipos de datos: @@ -46,4 +49,4 @@ SELECT tuple(1, NULL) AS x, toTypeName(x) └──────────┴─────────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/data_types/tuple/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/tuple/) diff --git a/docs/es/data_types/uuid.md b/docs/es/sql_reference/data_types/uuid.md similarity index 72% rename from docs/es/data_types/uuid.md rename to docs/es/sql_reference/data_types/uuid.md index 1c1910edbf1..32182011548 100644 --- a/docs/es/data_types/uuid.md +++ b/docs/es/sql_reference/data_types/uuid.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 46 +toc_title: UUID --- # UUID {#uuid-data-type} @@ -20,7 +23,7 @@ Si no especifica el valor de la columna UUID al insertar un nuevo registro, el v ## Cómo generar {#how-to-generate} -Para generar el valor UUID, ClickHouse proporciona el [GenerateUUIDv4](../query_language/functions/uuid_functions.md) función. +Para generar el valor UUID, ClickHouse proporciona el [GenerateUUIDv4](../../sql_reference/functions/uuid_functions.md) función. ## Ejemplo de uso {#usage-example} @@ -67,8 +70,8 @@ SELECT * FROM t_uuid ## Restricción {#restrictions} -El tipo de datos UUID sólo admite funciones que [Cadena](string.md) tipo de datos también soporta (por ejemplo, [minuto](../query_language/agg_functions/reference.md#agg_function-min), [máximo](../query_language/agg_functions/reference.md#agg_function-max), y [contar](../query_language/agg_functions/reference.md#agg_function-count)). +El tipo de datos UUID sólo admite funciones que [Cadena](string.md) tipo de datos también soporta (por ejemplo, [minuto](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [máximo](../../sql_reference/aggregate_functions/reference.md#agg_function-max), y [contar](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). -El tipo de datos UUID no es compatible con operaciones aritméticas (por ejemplo, [abdominales](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) o funciones agregadas, tales como [resumir](../query_language/agg_functions/reference.md#agg_function-sum) y [avg](../query_language/agg_functions/reference.md#agg_function-avg). +El tipo de datos UUID no es compatible con operaciones aritméticas (por ejemplo, [abdominales](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs)) o funciones agregadas, tales como [resumir](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) y [avg](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). -[Artículo Original](https://clickhouse.tech/docs/es/data_types/uuid/) +[Artículo Original](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/es/query_language/dicts/external_dicts.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts.md similarity index 65% rename from docs/es/query_language/dicts/external_dicts.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts.md index e0b09c6015e..6b64d47c0c0 100644 --- a/docs/es/query_language/dicts/external_dicts.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: "Descripci\xF3n General" --- # Diccionarios externos {#dicts-external-dicts} @@ -10,11 +13,11 @@ Haga clic en Casa: - Almacena total o parcialmente los diccionarios en RAM. - Actualiza periódicamente los diccionarios y carga dinámicamente los valores que faltan. En otras palabras, los diccionarios se pueden cargar dinámicamente. -- Permite crear diccionarios externos con archivos xml o [Consultas DDL](../create.md#create-dictionary-query). +- Permite crear diccionarios externos con archivos xml o [Consultas DDL](../../statements/create.md#create-dictionary-query). -La configuración de diccionarios externos se puede ubicar en uno o más archivos xml. La ruta de acceso a la configuración se especifica en el [Diccionarios\_config](../../operations/server_settings/settings.md#server_settings-dictionaries_config) parámetro. +La configuración de diccionarios externos se puede ubicar en uno o más archivos xml. La ruta de acceso a la configuración se especifica en el [Diccionarios\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) parámetro. -Los diccionarios se pueden cargar en el inicio del servidor o en el primer uso, dependiendo de la [Diccionarios\_lazy\_load](../../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load) configuración. +Los diccionarios se pueden cargar en el inicio del servidor o en el primer uso, dependiendo de la [Diccionarios\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) configuración. El archivo de configuración del diccionario tiene el siguiente formato: @@ -36,10 +39,10 @@ El archivo de configuración del diccionario tiene el siguiente formato: Usted puede [configurar](external_dicts_dict.md) cualquier número de diccionarios en el mismo archivo. -[Consultas DDL para diccionarios](../create.md#create-dictionary-query) no requiere ningún registro adicional en la configuración del servidor. Permiten trabajar con diccionarios como entidades de primera clase, como tablas o vistas. +[Consultas DDL para diccionarios](../../statements/create.md#create-dictionary-query) no requiere ningún registro adicional en la configuración del servidor. Permiten trabajar con diccionarios como entidades de primera clase, como tablas o vistas. !!! attention "Atención" - Puede convertir valores para un diccionario pequeño describiéndolo en un `SELECT` Consulta (ver el [Ciudad](../functions/other_functions.md) función). Esta funcionalidad no está relacionada con diccionarios externos. + Puede convertir valores para un diccionario pequeño describiéndolo en un `SELECT` consulta (ver el [transformar](../../../sql_reference/functions/other_functions.md) función). Esta funcionalidad no está relacionada con diccionarios externos. ## Ver también {#ext-dicts-see-also} @@ -48,6 +51,6 @@ Usted puede [configurar](external_dicts_dict.md) cualquier número de diccionari - [Actualizaciones del diccionario](external_dicts_dict_lifetime.md) - [Fuentes de diccionarios externos](external_dicts_dict_sources.md) - [Clave y campos del diccionario](external_dicts_dict_structure.md) -- [Funciones para trabajar con diccionarios externos](../functions/ext_dict_functions.md) +- [Funciones para trabajar con diccionarios externos](../../../sql_reference/functions/ext_dict_functions.md) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/es/query_language/dicts/external_dicts_dict.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md similarity index 52% rename from docs/es/query_language/dicts/external_dicts_dict.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md index 1d1b7bbcb0e..18481e0a9ef 100644 --- a/docs/es/query_language/dicts/external_dicts_dict.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 40 +toc_title: "Configuraci\xF3n de un diccionario externo" --- # Configuración de un diccionario externo {#dicts-external-dicts-dict} @@ -28,7 +31,7 @@ Si el diccionario se configura usando un archivo xml, la configuración del dicc ``` -Correspondiente [Consulta DDL](../create.md#create-dictionary-query) tiene la siguiente estructura: +Correspondiente [Consulta DDL](../../statements/create.md#create-dictionary-query) tiene la siguiente estructura: ``` sql CREATE DICTIONARY dict_name @@ -41,10 +44,10 @@ LAYOUT(...) -- Memory layout configuration LIFETIME(...) -- Lifetime of dictionary in memory ``` -- `name` – El identificador que se puede utilizar para acceder al diccionario. Usa los personajes `[a-zA-Z0-9_\-]`. -- [fuente](external_dicts_dict_sources.md) — Fuente del diccionario. -- [diseño](external_dicts_dict_layout.md) — Diseño del diccionario en la memoria. -- [estructura](external_dicts_dict_structure.md) — Estructura del diccionario . Una clave y atributos que se pueden recuperar con esta clave. -- [vida](external_dicts_dict_lifetime.md) — Frecuencia de actualizaciones del diccionario. +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [fuente](external_dicts_dict_sources.md) — Source of the dictionary. +- [diseño](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [estructura](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [vida](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/es/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md similarity index 73% rename from docs/es/query_language/dicts/external_dicts_dict_hierarchical.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md index 2665fc76d0e..23b6cf6ee3e 100644 --- a/docs/es/query_language/dicts/external_dicts_dict_hierarchical.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: "Diccionarios jer\xE1rquicos" --- # Diccionarios jerárquicos {#hierarchical-dictionaries} @@ -26,17 +29,17 @@ Esta jerarquía se puede expresar como la siguiente tabla de diccionario. | region\_id | parent\_region | nombre\_región | |------------|----------------|----------------| -| Uno | Cero | Rusia | -| Cómo hacer | Uno | Moscu | -| Cómo hacer | Cómo hacer | Centrar | -| Cuatro | Cero | Gran Bretaña | -| Cinco | Cuatro | Londres | +| 1 | 0 | Rusia | +| 2 | 1 | Moscu | +| 3 | 2 | Centrar | +| 4 | 0 | Gran Bretaña | +| 5 | 4 | Londres | Esta tabla contiene una columna `parent_region` que contiene la clave del padre más cercano para el elemento. ClickHouse soporta el [jerárquica](external_dicts_dict_structure.md#hierarchical-dict-attr) propiedad para [diccionario externo](index.md) atributo. Esta propiedad le permite configurar el diccionario jerárquico similar al descrito anteriormente. -El [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) función le permite obtener la cadena principal de un elemento. +El [dictGetHierarchy](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) función le permite obtener la cadena principal de un elemento. Para nuestro ejemplo, la estructura del diccionario puede ser la siguiente: @@ -64,4 +67,4 @@ Para nuestro ejemplo, la estructura del diccionario puede ser la siguiente: ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict_hierarchical/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/es/query_language/dicts/external_dicts_dict_layout.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md similarity index 93% rename from docs/es/query_language/dicts/external_dicts_dict_layout.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md index cfa7dcaf304..9478b239914 100644 --- a/docs/es/query_language/dicts/external_dicts_dict_layout.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: Almacenamiento de diccionarios en la memoria --- # Almacenamiento de diccionarios en la memoria {#dicts-external-dicts-dict-layout} -Hay una variad de formas de almacenar diccionarios en la memoria. +Hay una variedad de formas de almacenar diccionarios en la memoria. -Recomendamos [Plano](#flat), [Hashed](#dicts-external_dicts_dict_layout-hashed) y [Método de codificación de datos:](#complex-key-hashed). que proporcionan una velocidad de procesamiento óptima. +Recomendamos [plano](#flat), [Hashed](#dicts-external_dicts_dict_layout-hashed) y [Método de codificación de datos:](#complex-key-hashed). que proporcionan una velocidad de procesamiento óptima. No se recomienda el almacenamiento en caché debido al rendimiento potencialmente bajo y las dificultades para seleccionar los parámetros óptimos. Lea más en la sección “[cache](#cache)”. @@ -38,7 +41,7 @@ La configuración se ve así:
    ``` -Correspondiente [Consulta DDL](../create.md#create-dictionary-query): +Correspondiente [Consulta DDL](../../statements/create.md#create-dictionary-query): ``` sql CREATE DICTIONARY (...) @@ -49,7 +52,7 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ## Maneras de almacenar diccionarios en la memoria {#ways-to-store-dictionaries-in-memory} -- [Plano](#flat) +- [plano](#flat) - [Hashed](#dicts-external_dicts_dict_layout-hashed) - [Sistema abierto.](#dicts-external_dicts_dict_layout-sparse_hashed) - [cache](#cache) @@ -58,7 +61,7 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings - [complejo\_key\_cache](#complex-key-cache) - [Método de codificación de datos:](#ip-trie) -### Plano {#flat} +### plano {#flat} El diccionario está completamente almacenado en la memoria en forma de matrices planas. ¿Cuánta memoria usa el diccionario? La cantidad es proporcional al tamaño de la clave más grande (en el espacio utilizado). @@ -143,15 +146,15 @@ Este método de almacenamiento funciona de la misma manera que hash y permite el Ejemplo: La tabla contiene descuentos para cada anunciante en el formato: ``` text -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | advertiser id | discount start date | discount end date | amount | +===============+=====================+===================+========+ | 123 | 2015-01-01 | 2015-01-15 | 0.15 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | 123 | 2015-01-16 | 2015-01-31 | 0.25 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | 456 | 2015-01-01 | 2015-01-15 | 0.05 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ ``` Para utilizar un ejemplo para intervalos de fechas, defina el `range_min` y `range_max` elementos en el [estructura](external_dicts_dict_structure.md). Estos elementos deben contener elementos `name` y`type` (si `type` no se especifica, se utilizará el tipo predeterminado - Fecha). `type` puede ser de cualquier tipo numérico (Fecha / DateTime / UInt64 / Int32 / otros). @@ -300,17 +303,17 @@ Este tipo de almacenamiento sirve para asignar prefijos de red (direcciones IP) Ejemplo: La tabla contiene prefijos de red y su correspondiente número AS y código de país: ``` text - +-----------------|-------|--------+ + +-----------|-----|------+ | prefix | asn | cca2 | +=================+=======+========+ | 202.79.32.0/20 | 17501 | NP | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2620:0:870::/48 | 3856 | US | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2a02:6b8:1::/48 | 13238 | RU | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2001:db8::/32 | 65536 | ZZ | - +-----------------|-------|--------+ + +-----------|-----|------+ ``` Cuando se utiliza este tipo de diseño, la estructura debe tener una clave compuesta. @@ -367,4 +370,4 @@ Todavía no se admiten otros tipos. La función devuelve el atributo para el pre Los datos se almacenan en un `trie`. Debe encajar completamente en la RAM. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict_layout/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/es/query_language/dicts/external_dicts_dict_lifetime.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md similarity index 94% rename from docs/es/query_language/dicts/external_dicts_dict_lifetime.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md index 4e004d0ee7f..78df541cff6 100644 --- a/docs/es/query_language/dicts/external_dicts_dict_lifetime.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 42 +toc_title: Actualizaciones del diccionario --- # Actualizaciones del diccionario {#dictionary-updates} @@ -80,4 +83,4 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict_lifetime/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/es/query_language/dicts/external_dicts_dict_sources.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md similarity index 89% rename from docs/es/query_language/dicts/external_dicts_dict_sources.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 953e0d193e0..e239fa6c5e5 100644 --- a/docs/es/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 43 +toc_title: Fuentes de diccionarios externos --- # Fuentes de diccionarios externos {#dicts-external-dicts-dict-sources} @@ -23,7 +26,7 @@ Si el diccionario se configura usando xml-file, la configuración se ve así: ``` -En caso de [Consulta DDL](../create.md#create-dictionary-query), la configuración igual parecerá: +En caso de [Consulta DDL](../../statements/create.md#create-dictionary-query), la configuración igual parecerá: ``` sql CREATE DICTIONARY dict_name (...) @@ -68,11 +71,11 @@ SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) Configuración de campos: - `path` – The absolute path to the file. -- `format` – The file format. All the formats described in “[Formato](../../interfaces/formats.md#formats)” son compatibles. +- `format` – The file format. All the formats described in “[Formato](../../../interfaces/formats.md#formats)” son compatibles. ## Archivo ejecutable {#dicts-external_dicts_dict_sources-executable} -Trabajar con archivos ejecutables depende de [cómo se almacena el diccionario en la memoria](external_dicts_dict_layout.md). Si el diccionario se almacena usando `cache` y `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data. +Trabajar con archivos ejecutables depende de [cómo se almacena el diccionario en la memoria](external_dicts_dict_layout.md). Si el diccionario se almacena usando `cache` y `complex_key_cache`, ClickHouse solicita las claves necesarias enviando una solicitud al STDIN del archivo ejecutable. De lo contrario, ClickHouse inicia el archivo ejecutable y trata su salida como datos del diccionario. Ejemplo de configuración: @@ -94,9 +97,9 @@ SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) Configuración de campos: - `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` – The file format. All the formats described in “[Formato](../../interfaces/formats.md#formats)” son compatibles. +- `format` – The file format. All the formats described in “[Formato](../../../interfaces/formats.md#formats)” son compatibles. -## HTTP(s)) {#dicts-external_dicts_dict_sources-http} +## Http(s) {#dicts-external_dicts_dict_sources-http} Trabajar con un servidor HTTP depende de [cómo se almacena el diccionario en la memoria](external_dicts_dict_layout.md). Si el diccionario se almacena usando `cache` y `complex_key_cache`, ClickHouse solicita las claves necesarias enviando una solicitud a través del `POST` método. @@ -132,12 +135,12 @@ SOURCE(HTTP( )) ``` -Para que ClickHouse tenga acceso a un recurso HTTPS, debe [configurar openSSL](../../operations/server_settings/settings.md#server_settings-openssl) en la configuración del servidor. +Para que ClickHouse tenga acceso a un recurso HTTPS, debe [configurar openSSL](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) en la configuración del servidor. Configuración de campos: - `url` – The source URL. -- `format` – The file format. All the formats described in “[Formato](../../interfaces/formats.md#formats)” son compatibles. +- `format` – The file format. All the formats described in “[Formato](../../../interfaces/formats.md#formats)” son compatibles. - `credentials` – Basic HTTP authentication. Optional parameter. - `user` – Username required for the authentication. - `password` – Password required for the authentication. @@ -181,9 +184,9 @@ Configuración de campos: - `connection_string` – Connection string. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Actualización de diccionarios](external_dicts_dict_lifetime.md). -ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. +ClickHouse recibe símbolos de cotización del controlador ODBC y cita todas las configuraciones en las consultas al controlador, por lo que es necesario establecer el nombre de la tabla de acuerdo con el caso del nombre de la tabla en la base de datos. -Si tiene problemas con las codificaciones al utilizar Oracle, consulte el [Preguntas frecuentes](../../faq/general.md#oracle-odbc-encodings) artículo. +Si tiene problemas con las codificaciones al utilizar Oracle, consulte el [FAQ](../../../faq/general.md#oracle-odbc-encodings) artículo. ### Vulnerabilidad conocida de la funcionalidad del diccionario ODBC {#known-vulnerability-of-the-odbc-dictionary-functionality} @@ -192,7 +195,7 @@ Si tiene problemas con las codificaciones al utilizar Oracle, consulte el [Pregu **Ejemplo de uso inseguro** -Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: +Vamos a configurar unixODBC para PostgreSQL. Contenido de `/etc/odbc.ini`: ``` text [gregtest] @@ -213,7 +216,7 @@ SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); El controlador ODBC enviará valores de `USERNAME` y `PASSWORD` de `odbc.ini` a `some-server.com`. -### Ejemplo de conexión de PostgreSQL {#example-of-connecting-postgresql} +### Ejemplo de conexión Postgresql {#example-of-connecting-postgresql} Sistema operativo Ubuntu. @@ -390,7 +393,7 @@ LIFETIME(MIN 300 MAX 360) ## DBMS {#dbms} -### MySQL {#dicts-external_dicts_dict_sources-mysql} +### Mysql {#dicts-external_dicts_dict_sources-mysql} Ejemplo de configuración: @@ -487,7 +490,7 @@ SOURCE(MYSQL( )) ``` -### Haga clic en Casa {#dicts-external_dicts_dict_sources-clickhouse} +### Inicio {#dicts-external_dicts_dict_sources-clickhouse} Ejemplo de configuración: @@ -521,7 +524,7 @@ SOURCE(CLICKHOUSE( Configuración de campos: -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distribuido](../../operations/table_engines/distributed.md) tabla e ingrésela en configuraciones posteriores. +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distribuido](../../../engines/table_engines/special/distributed.md) tabla e ingrésela en configuraciones posteriores. - `port` – The port on the ClickHouse server. - `user` – Name of the ClickHouse user. - `password` – Password of the ClickHouse user. @@ -530,7 +533,7 @@ Configuración de campos: - `where` – The selection criteria. May be omitted. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Actualización de diccionarios](external_dicts_dict_lifetime.md). -### MongoDB {#dicts-external_dicts_dict_sources-mongodb} +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} Ejemplo de configuración: @@ -602,4 +605,4 @@ Configuración de campos: - `storage_type` – The structure of internal Redis storage using for work with keys. `simple` es para fuentes simples y para fuentes de clave única hash, `hash_map` es para fuentes hash con dos teclas. Los orígenes a distancia y los orígenes de caché con clave compleja no son compatibles. Puede omitirse, el valor predeterminado es `simple`. - `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict_sources/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/es/query_language/dicts/external_dicts_dict_structure.md b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md similarity index 86% rename from docs/es/query_language/dicts/external_dicts_dict_structure.md rename to docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md index 4b97e3e1701..0c53ae71210 100644 --- a/docs/es/query_language/dicts/external_dicts_dict_structure.md +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: Clave y campos del diccionario --- # Clave y campos del diccionario {#dictionary-key-and-fields} @@ -89,12 +92,12 @@ PRIMARY KEY Id ### Clave compuesta {#composite-key} -La clave puede ser un `tuple` de cualquier tipo de campo. El [diseño](external_dicts_dict_layout.md) es este caso debe ser `complex_key_hashed` o `complex_key_cache`. +La clave puede ser un `tuple` de cualquier tipo de campo. El [diseño](external_dicts_dict_layout.md) en este caso debe ser `complex_key_hashed` o `complex_key_cache`. !!! tip "Consejo" Una clave compuesta puede consistir en un solo elemento. Esto hace posible usar una cadena como clave, por ejemplo. -La estructura clave se establece en el elemento ``. Los campos clave se especifican en el mismo formato que el diccionario [Atributo](external_dicts_dict_structure.md). Ejemplo: +La estructura clave se establece en el elemento ``. Los campos clave se especifican en el mismo formato que el diccionario [atributo](external_dicts_dict_structure.md). Ejemplo: ``` xml @@ -155,18 +158,18 @@ CREATE DICTIONARY somename ( Campos de configuración: -| Etiqueta | Descripción | Requerir | +| Etiqueta | Descripci | Requerir | |------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| | `name` | Nombre de columna. | Sí | -| `type` | Tipo de datos ClickHouse.
    ClickHouse intenta convertir el valor del diccionario al tipo de datos especificado. Por ejemplo, para MySQL, el campo podría ser `TEXT`, `VARCHAR`, o `BLOB` es la tabla fuente de MySQL, pero se puede cargar como `String` es ClickHouse.
    [NULL](../../data_types/nullable.md) no es compatible. | Sí | +| `type` | Tipo de datos ClickHouse.
    ClickHouse intenta convertir el valor del diccionario al tipo de datos especificado. Por ejemplo, para MySQL, el campo podría ser `TEXT`, `VARCHAR`, o `BLOB` en la tabla fuente de MySQL, pero se puede cargar como `String` en ClickHouse.
    [NULL](../../../sql_reference/data_types/nullable.md) no es compatible. | Sí | | `null_value` | Valor predeterminado para un elemento no existente.
    En el ejemplo, es una cadena vacía. No se puede utilizar `NULL` en este campo. | Sí | -| `expression` | [Expresion](../syntax.md#syntax-expressions) que ClickHouse ejecuta en el valor.
    La expresión puede ser un nombre de columna en la base de datos SQL remota. Por lo tanto, puede usarlo para crear un alias para la columna remota.

    Valor predeterminado: sin expresión. | No | +| `expression` | [Expresion](../../syntax.md#syntax-expressions) que ClickHouse ejecuta en el valor.
    La expresión puede ser un nombre de columna en la base de datos SQL remota. Por lo tanto, puede usarlo para crear un alias para la columna remota.

    Valor predeterminado: sin expresión. | No | | `hierarchical` | Si `true` el atributo contiene el valor de una clave primaria para la clave actual. Ver [Diccionarios jerárquicos](external_dicts_dict_hierarchical.md).

    Valor predeterminado: `false`. | No | | `injective` | Indicador que muestra si el `id -> attribute` la imagen es [inyectivo](https://en.wikipedia.org/wiki/Injective_function).
    Si `true`, ClickHouse puede colocar automáticamente después de la `GROUP BY` cláusula las solicitudes a los diccionarios con inyección. Por lo general, reduce significativamente la cantidad de tales solicitudes.

    Valor predeterminado: `false`. | No | | `is_object_id` | Indicador que muestra si la consulta se ejecuta para un documento MongoDB mediante `ObjectID`.

    Valor predeterminado: `false`. | No | ## Ver también {#see-also} -- [Funciones para trabajar con diccionarios externos](../functions/ext_dict_functions.md). +- [Funciones para trabajar con diccionarios externos](../../../sql_reference/functions/ext_dict_functions.md). -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/external_dicts_dict_structure/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/es/sql_reference/dictionaries/external_dictionaries/index.md b/docs/es/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..9105762ba0b --- /dev/null +++ b/docs/es/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: External Dictionaries +toc_priority: 37 +--- + + diff --git a/docs/es/sql_reference/dictionaries/index.md b/docs/es/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..610f2ac865e --- /dev/null +++ b/docs/es/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Dictionaries +toc_priority: 35 +toc_title: "Implantaci\xF3n" +--- + +# Diccionario {#dictionaries} + +Un diccionario es un mapeo (`key -> attributes`) que es conveniente para varios tipos de listas de referencia. + +ClickHouse admite funciones especiales para trabajar con diccionarios que se pueden usar en consultas. Es más fácil y más eficiente usar diccionarios con funciones que un `JOIN` con tablas de referencia. + +[NULL](../syntax.md#null) los valores no se pueden almacenar en un diccionario. + +Soporta ClickHouse: + +- [Diccionarios incorporados](internal_dicts.md#internal_dicts) con una específica [conjunto de funciones](../../sql_reference/functions/ym_dict_functions.md). +- [Diccionarios complementarios (externos)](external_dictionaries/external_dicts.md) con un [neto de funciones](../../sql_reference/functions/ext_dict_functions.md). + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/es/query_language/dicts/internal_dicts.md b/docs/es/sql_reference/dictionaries/internal_dicts.md similarity index 94% rename from docs/es/query_language/dicts/internal_dicts.md rename to docs/es/sql_reference/dictionaries/internal_dicts.md index 4795975b1cb..24da4deb80f 100644 --- a/docs/es/query_language/dicts/internal_dicts.md +++ b/docs/es/sql_reference/dictionaries/internal_dicts.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: Diccionarios internos --- # Diccionarios internos {#internal_dicts} @@ -22,7 +25,7 @@ La geobase se carga desde archivos de texto. Coloque el `regions_hierarchy*.txt` archivos en el `path_to_regions_hierarchy_file` directorio. Este parámetro de configuración debe contener la ruta `regions_hierarchy.txt` archivo (la jerarquía regional predeterminada), y los otros archivos (`regions_hierarchy_ua.txt`) debe estar ubicado en el mismo directorio. -Ponga el `regions_names_*.txt` archivos en el `path_to_regions_names_files` Directorio. +Ponga el `regions_names_*.txt` archivos en el `path_to_regions_names_files` directorio. También puede crear estos archivos usted mismo. El formato de archivo es el siguiente: @@ -49,4 +52,4 @@ Recomendamos actualizar periódicamente los diccionarios con la geobase. Durante También hay funciones para trabajar con identificadores de sistema operativo y Yandex.Motores de búsqueda Metrica, pero no deben ser utilizados. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/dicts/internal_dicts/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/es/query_language/functions/arithmetic_functions.md b/docs/es/sql_reference/functions/arithmetic_functions.md similarity index 84% rename from docs/es/query_language/functions/arithmetic_functions.md rename to docs/es/sql_reference/functions/arithmetic_functions.md index 9ec7995edad..bc407f7b622 100644 --- a/docs/es/query_language/functions/arithmetic_functions.md +++ b/docs/es/sql_reference/functions/arithmetic_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 35 +toc_title: "Aritm\xE9tica" --- # Funciones aritméticas {#arithmetic-functions} @@ -31,13 +34,13 @@ También puede agregar números enteros con una fecha o fecha y hora. En el caso Calcula la diferencia. El resultado siempre está firmado. -También puede calcular números enteros a partir de una fecha o fecha con la hora. La idea es la misma – ver arriba para ‘plus’. +You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. -## ¿Cómo puedo hacerlo? {#multiplya-b-a-b-operator} +## multiplicar(a, b) a \* b operador {#multiplya-b-a-b-operator} Calcula el producto de los números. -## divide (a, segundo), operador a / segundo {#dividea-b-a-b-operator} +## divide (a, b), operador a / b {#dividea-b-a-b-operator} Calcula el cociente de los números. El tipo de resultado es siempre un tipo de punto flotante. No es una división entera. Para la división de enteros, use el ‘intDiv’ función. @@ -63,22 +66,22 @@ Se produce una excepción al dividir por cero o al dividir un número negativo m Difiere de ‘modulo’ en que devuelve cero cuando el divisor es cero. -## ¿Cómo funciona? {#negatea-a-operator} +## negate(a), -un operador {#negatea-a-operator} Calcula un número con el signo inverso. El resultado siempre está firmado. ## abs (a) {#arithm_func-abs} -Calcula el valor absoluto del número (a). Es decir, si un \< 0, devuelve -a. Para los tipos sin signo no hace nada. Para los tipos de enteros con signo, devuelve un número sin signo. +Calcula el valor absoluto del número (a). Es decir, si un \<0, devuelve -a . Para los tipos sin firmar no hace nada. Para los tipos de enteros con signo, devuelve un número sin signo. ## GCD (a, b) {#gcda-b} Devuelve el mayor divisor común de los números. Se produce una excepción al dividir por cero o al dividir un número negativo mínimo por menos uno. -## Lcm (a, b) {#lcma-b} +## Lcm(a, b) {#lcma-b} Devuelve el mínimo múltiplo común de los números. Se produce una excepción al dividir por cero o al dividir un número negativo mínimo por menos uno. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/arithmetic_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/es/query_language/functions/array_functions.md b/docs/es/sql_reference/functions/array_functions.md similarity index 89% rename from docs/es/query_language/functions/array_functions.md rename to docs/es/sql_reference/functions/array_functions.md index e1dd2a38f69..42e22b64bbc 100644 --- a/docs/es/query_language/functions/array_functions.md +++ b/docs/es/sql_reference/functions/array_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 46 +toc_title: Trabajar con matrices --- # Funciones para trabajar con matrices {#functions-for-working-with-arrays} @@ -62,7 +65,7 @@ arrayConcat(arrays) **Parámetros** -- `arrays` – Arbitrary number of arguments of [Matriz](../../data_types/array.md) tipo. +- `arrays` – Arbitrary number of arguments of [Matriz](../../sql_reference/data_types/array.md) tipo. **Ejemplo** @@ -186,7 +189,6 @@ SELECT indexOf([1, 3, NULL, NULL], NULL) ``` ``` text - ┌─indexOf([1, 3, NULL, NULL], NULL)─┐ │ 3 │ └───────────────────────────────────┘ @@ -367,7 +369,7 @@ arrayPushBack(array, single_value) **Parámetros** - `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` tipo para el tipo de datos de la matriz. Para obtener más información sobre los tipos de datos en ClickHouse, consulte “[Tipos de datos](../../data_types/index.md#data_types)”. Puede ser `NULL`. La función agrega un `NULL` elemento de matriz a una matriz, y el tipo de elementos de matriz se convierte en `Nullable`. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` tipo para el tipo de datos de la matriz. Para obtener más información sobre los tipos de datos en ClickHouse, consulte “[Tipos de datos](../../sql_reference/data_types/index.md#data_types)”. Puede ser `NULL`. La función agrega un `NULL` elemento de matriz a una matriz, y el tipo de elementos de matriz se convierte en `Nullable`. **Ejemplo** @@ -392,7 +394,7 @@ arrayPushFront(array, single_value) **Parámetros** - `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` tipo para el tipo de datos de la matriz. Para obtener más información sobre los tipos de datos en ClickHouse, consulte “[Tipos de datos](../../data_types/index.md#data_types)”. Puede ser `NULL`. La función agrega un `NULL` elemento de matriz a una matriz, y el tipo de elementos de matriz se convierte en `Nullable`. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` tipo para el tipo de datos de la matriz. Para obtener más información sobre los tipos de datos en ClickHouse, consulte “[Tipos de datos](../../sql_reference/data_types/index.md#data_types)”. Puede ser `NULL`. La función agrega un `NULL` elemento de matriz a una matriz, y el tipo de elementos de matriz se convierte en `Nullable`. **Ejemplo** @@ -808,11 +810,24 @@ SELECT └──────────────┴───────────┘ ``` -## arrayReduce(agg\_func, arr1, …) {#array-functions-arrayreduce} +## arrayReduce {#arrayreduce} Aplica una función de agregado a los elementos de la matriz y devuelve su resultado. El nombre de la función de agregación se pasa como una cadena entre comillas simples `'max'`, `'sum'`. Cuando se utilizan funciones de agregado paramétrico, el parámetro se indica después del nombre de la función entre paréntesis `'uniqUpTo(6)'`. -Ejemplo: +**Sintaxis** + +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` + +**Parámetros** + +- `agg_func` — The name of an aggregate function which should be a constant [cadena](../../sql_reference/data_types/string.md). +- `arr` — Any number of [matriz](../../sql_reference/data_types/array.md) escriba columnas como los parámetros de la función de agregación. + +**Valor devuelto** + +**Ejemplo** ``` sql SELECT arrayReduce('max', [1, 2, 3]) @@ -826,8 +841,6 @@ SELECT arrayReduce('max', [1, 2, 3]) Si una función agregada toma varios argumentos, esta función debe aplicarse a varias matrices del mismo tamaño. -Ejemplo: - ``` sql SELECT arrayReduce('maxIf', [3, 5], [1, 0]) ``` @@ -850,7 +863,41 @@ SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) └─────────────────────────────────────────────────────────────┘ ``` -## arrayReverse (arr) {#array_functions-arrayreverse} +## arrayReduceInRanges {#arrayreduceinranges} + +Aplica una función de agregado a los elementos de matriz en rangos dados y devuelve una matriz que contiene el resultado correspondiente a cada rango. La función devolverá el mismo resultado que múltiples `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. + +**Sintaxis** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**Parámetros** + +- `agg_func` — The name of an aggregate function which should be a constant [cadena](../../sql_reference/data_types/string.md). +- `ranges` — The ranges to aggretate which should be an [matriz](../../sql_reference/data_types/array.md) de [tuplas](../../sql_reference/data_types/tuple.md) que contiene el índice y la longitud de cada rango. +- `arr` — Any number of [matriz](../../sql_reference/data_types/array.md) escriba columnas como los parámetros de la función de agregación. + +**Valor devuelto** + +**Ejemplo** + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## arrayReverse (arr) {#arrayreverse} Devuelve una matriz del mismo tamaño que la matriz original que contiene los elementos en orden inverso. @@ -891,7 +938,7 @@ Apodo: `flatten`. **Parámetros** -- `array_of_arrays` — [Matriz](../../data_types/array.md) de matrices. Por ejemplo, `[[1,2,3], [4,5]]`. +- `array_of_arrays` — [Matriz](../../sql_reference/data_types/array.md) de matrices. Por ejemplo, `[[1,2,3], [4,5]]`. **Ejemplos** @@ -917,7 +964,7 @@ arrayCompact(arr) **Parámetros** -`arr` — The [matriz](../../data_types/array.md) inspeccionar. +`arr` — The [matriz](../../sql_reference/data_types/array.md) inspeccionar. **Valor devuelto** @@ -953,7 +1000,7 @@ arrayZip(arr1, arr2, ..., arrN) **Parámetros** -`arr` — Any number of [matriz](../../data_types/array.md) escriba columnas para combinar. +`arr` — Any number of [matriz](../../sql_reference/data_types/array.md) escriba columnas para combinar. **Valor devuelto** @@ -975,4 +1022,36 @@ Resultado: └────────────────────────────────────────────┘ ``` +## arrayAUC {#arrayauc} + +Calcule AUC (Área bajo la curva, que es un concepto en el aprendizaje automático, vea más detalles: https://en.wikipedia.org/wiki/Receiver\_operating\_characteristic\#Area\_under\_the\_curve ). + +**Sintaxis** + +``` sql +arrayAUC(arr_scores, arr_labels) +``` + +**Parámetros** +- `arr_scores` — scores prediction model gives. +- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. + +**Valor devuelto** +Devuelve el valor AUC con el tipo Float64. + +**Ejemplo** +Consulta: + +``` sql +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +``` + +Resultado: + +``` text +┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ +│ 0.75 │ +└────────────────────────────────────────---──┘ +``` + [Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/es/query_language/functions/array_join.md b/docs/es/sql_reference/functions/array_join.md similarity index 88% rename from docs/es/query_language/functions/array_join.md rename to docs/es/sql_reference/functions/array_join.md index 25645efc45c..877cd862d7f 100644 --- a/docs/es/query_language/functions/array_join.md +++ b/docs/es/sql_reference/functions/array_join.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 61 +toc_title: arrayJoin --- # arrayJoin función {#functions_arrayjoin} @@ -31,4 +34,4 @@ SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src └─────┴───────────┴─────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/array_join/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/es/query_language/functions/bit_functions.md b/docs/es/sql_reference/functions/bit_functions.md similarity index 67% rename from docs/es/query_language/functions/bit_functions.md rename to docs/es/sql_reference/functions/bit_functions.md index 309c55e33ca..6c042834fde 100644 --- a/docs/es/query_language/functions/bit_functions.md +++ b/docs/es/sql_reference/functions/bit_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 48 +toc_title: Trozo --- # Bit funciones {#bit-functions} @@ -8,7 +11,7 @@ Las funciones de bits funcionan para cualquier par de tipos de UInt8, UInt16, UI El tipo de resultado es un entero con bits iguales a los bits máximos de sus argumentos. Si al menos uno de los argumentos está firmado, el resultado es un número firmado. Si un argumento es un número de coma flotante, se convierte en Int64. -## PocoY(a, b) {#bitanda-b} +## pocoY(a, b) {#bitanda-b} ## bitOr (a, b) {#bitora-b} @@ -26,7 +29,7 @@ El tipo de resultado es un entero con bits iguales a los bits máximos de sus ar ## bitTest {#bittest} -Toma cualquier entero y lo convierte en [Forma binaria](https://en.wikipedia.org/wiki/Binary_number) devuelve el valor de un bit en la posición especificada. La cuenta atrás comienza desde 0 de derecha a izquierda. +Toma cualquier entero y lo convierte en [forma binaria](https://en.wikipedia.org/wiki/Binary_number) devuelve el valor de un bit en la posición especificada. La cuenta atrás comienza desde 0 de derecha a izquierda. **Sintaxis** @@ -36,8 +39,8 @@ SELECT bitTest(number, index) **Parámetros** -- `number` – número entero. -- `index` – posición de bit. +- `number` – integer number. +- `index` – position of bit. **Valores devueltos** @@ -85,13 +88,13 @@ Devuelve el resultado de [conjunción lógica](https://en.wikipedia.org/wiki/Log La conjucción para operaciones bit a bit: -Nivel de Cifrado WEP +0 AND 0 = 0 -Nivel de Cifrado WEP +0 AND 1 = 0 -Nivel de Cifrado WEP +1 AND 0 = 0 -Nivel de Cifrado WEP +1 AND 1 = 1 **Sintaxis** @@ -101,8 +104,8 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...) **Parámetros** -- `number` – número entero. -- `index1`, `index2`, `index3`, `index4` – posiciones de bit. Por ejemplo, para un conjunto de posiciones (`index1`, `index2`, `index3`, `index4`) es verdadero si y solo si todas sus posiciones son verdaderas (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) es verdadero si y solo si todas sus posiciones son verdaderas (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). **Valores devueltos** @@ -150,13 +153,13 @@ Devuelve el resultado de [disyunción lógica](https://en.wikipedia.org/wiki/Log La disyunción para las operaciones bit a bit: -Nivel de Cifrado WEP +0 OR 0 = 0 -Nivel de Cifrado WEP +0 OR 1 = 1 -Nivel de Cifrado WEP +1 OR 0 = 1 -Nivel de Cifrado WEP +1 OR 1 = 1 **Sintaxis** @@ -166,8 +169,8 @@ SELECT bitTestAny(number, index1, index2, index3, index4, ...) **Parámetros** -- `number` – número entero. -- `index1`, `index2`, `index3`, `index4` – posiciones de bit. +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. **Valores devueltos** @@ -209,4 +212,44 @@ Resultado: └──────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/bit_functions/) +## bitCount {#bitcount} + +Calcula el número de bits establecido en uno en la representación binaria de un número. + +**Sintaxis** + +``` sql +bitCount(x) +``` + +**Parámetros** + +- `x` — [Entero](../../sql_reference/data_types/int_uint.md) o [punto flotante](../../sql_reference/data_types/float.md) numero. La función utiliza la representación de valor en la memoria. Permite admitir números de punto flotante. + +**Valor devuelto** + +- Número de bits establecido en uno en el número de entrada. + +La función no convierte el valor de entrada a un tipo más grande ([extensión de signo](https://en.wikipedia.org/wiki/Sign_extension)). Entonces, por ejemplo, `bitCount(toUInt8(-1)) = 8`. + +Tipo: `UInt8`. + +**Ejemplo** + +Tomemos por ejemplo el número 333. Su representación binaria: 0000000101001101. + +Consulta: + +``` sql +SELECT bitCount(333) +``` + +Resultado: + +``` text +┌─bitCount(333)─┐ +│ 5 │ +└───────────────┘ +``` + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/es/query_language/functions/bitmap_functions.md b/docs/es/sql_reference/functions/bitmap_functions.md similarity index 82% rename from docs/es/query_language/functions/bitmap_functions.md rename to docs/es/sql_reference/functions/bitmap_functions.md index 65fcc33731a..ec105de8382 100644 --- a/docs/es/query_language/functions/bitmap_functions.md +++ b/docs/es/sql_reference/functions/bitmap_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 49 +toc_title: Bits --- # Funciones de mapa de bits {#bitmap-functions} @@ -22,7 +25,7 @@ bitmapBuild(array) **Parámetros** -- `array` – matriz entera sin signo. +- `array` – unsigned integer array. **Ejemplo** @@ -46,7 +49,7 @@ bitmapToArray(bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -71,8 +74,8 @@ bitmapSubsetInRange(bitmap, range_start, range_end) **Parámetros** - `bitmap` – [Objeto de mapa de bits](#bitmap_functions-bitmapbuild). -- `range_start` – punto de inicio de la gama. Tipo: [UInt32](../../data_types/int_uint.md). -- `range_end` – punto final de rango(excluido). Tipo: [UInt32](../../data_types/int_uint.md). +- `range_start` – range start point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -99,8 +102,8 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) **Parámetros** - `bitmap` – [Objeto de mapa de bits](#bitmap_functions-bitmapbuild). -- `range_start` – El punto de partida del subconjunto. Tipo: [UInt32](../../data_types/int_uint.md). -- `cardinality_limit` – El subconjunto cardinalidad límite superior. Tipo: [UInt32](../../data_types/int_uint.md). +- `range_start` – The subset starting point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Valor devuelto** @@ -135,12 +138,12 @@ bitmapContains(haystack, needle) **Parámetros** - `haystack` – [Objeto de mapa de bits](#bitmap_functions-bitmapbuild), donde la función busca. -- `needle` – Valor que busca la función. Tipo: [UInt32](../../data_types/int_uint.md). +- `needle` – Value that the function searches. Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- 0 — Si `haystack` no contiene `needle`. -- 1 — Si `haystack` contener `needle`. +- 0 — If `haystack` no contiene `needle`. +- 1 — If `haystack` contener `needle`. Tipo: `UInt8`. @@ -164,15 +167,15 @@ Comprueba si dos mapas de bits tienen intersección por algunos elementos. bitmapHasAny(bitmap1, bitmap2) ``` -Si está seguro de que `bitmap2` Esta es la página de desambiguación en 24symbols. [bitmapContains](#bitmap_functions-bitmapcontains) función. Funciona de manera más eficiente. +Si está seguro de que `bitmap2` contiene estrictamente un elemento, considere usar el [bitmapContains](#bitmap_functions-bitmapcontains) función. Funciona de manera más eficiente. **Parámetros** -- `bitmap*` – objeto de mapa de bits. +- `bitmap*` – bitmap object. **Valores de retorno** -- `1` si `bitmap1` y `bitmap2` tienen un elemento similar al menos. +- `1`, si `bitmap1` y `bitmap2` tienen un elemento similar al menos. - `0`, de lo contrario. **Ejemplo** @@ -198,7 +201,7 @@ bitmapHasAll(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -222,7 +225,7 @@ bitmapCardinality(bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -244,7 +247,7 @@ Vuelva a ejecutar el valor más pequeño de tipo UInt64 en el conjunto, UINT32\_ **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -264,7 +267,7 @@ Vuelva a ejecutar el mayor valor de tipo UInt64 en el conjunto, 0 si el conjunto **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -284,9 +287,9 @@ Transformar una matriz de valores en un mapa de bits a otra matriz de valores, e **Parámetros** -- `bitmap` – objeto de mapa de bits. -- `from_array` – matriz UInt32. Para idx en el rango \[0, from\_array .size()), si el mapa de bits contiene from\_array\[idx\] , luego reemplácelo con to\_array\[idx\] . Tenga en cuenta que el resultado depende del orden de la matriz si hay elementos comunes entre from\_array y to\_array . -- `to_array` – Matriz UInt32, su tamaño será el mismo que from\_array. +- `bitmap` – bitmap object. +- `from_array` – UInt32 array. For idx in range \[0, from\_array.size()), if bitmap contains from\_array\[idx\], then replace it with to\_array\[idx\]. Note that the result depends on array ordering if there are common elements between from\_array and to\_array. +- `to_array` – UInt32 array, its size shall be the same to from\_array. **Ejemplo** @@ -308,7 +311,7 @@ bitmapAnd(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -332,7 +335,7 @@ bitmapOr(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -356,7 +359,7 @@ bitmapXor(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -380,7 +383,7 @@ bitmapAndnot(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -404,7 +407,7 @@ bitmapAndCardinality(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -428,7 +431,7 @@ bitmapOrCardinality(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -452,7 +455,7 @@ bitmapXorCardinality(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -476,7 +479,7 @@ bitmapAndnotCardinality(bitmap,bitmap) **Parámetros** -- `bitmap` – objeto de mapa de bits. +- `bitmap` – bitmap object. **Ejemplo** @@ -490,4 +493,4 @@ SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res └─────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/bitmap_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/es/sql_reference/functions/comparison_functions.md b/docs/es/sql_reference/functions/comparison_functions.md new file mode 100644 index 00000000000..dace972b6da --- /dev/null +++ b/docs/es/sql_reference/functions/comparison_functions.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: "Comparaci\xF3n" +--- + +# Funciones de comparación {#comparison-functions} + +Las funciones de comparación siempre devuelven 0 o 1 (Uint8). + +Se pueden comparar los siguientes tipos: + +- numero +- cuerdas y cuerdas fijas +- fechas +- fechas con tiempos + +dentro de cada grupo, pero no entre diferentes grupos. + +Por ejemplo, no puede comparar una fecha con una cadena. Debe usar una función para convertir la cadena a una fecha, o viceversa. + +Las cadenas se comparan por bytes. Una cadena más corta es más pequeña que todas las cadenas que comienzan con ella y que contienen al menos un carácter más. + +## iguales, a = b y a == b operador {#function-equals} + +## notEquals, un ! operador = b y un \<\> b {#function-notequals} + +## menos, operador \< {#function-less} + +## Saludos {#function-greater} + +## lessOrEquals, operador \<= {#function-lessorequals} + +## greaterOrEquals, operador \>= {#function-greaterorequals} + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/es/query_language/functions/conditional_functions.md b/docs/es/sql_reference/functions/conditional_functions.md similarity index 91% rename from docs/es/query_language/functions/conditional_functions.md rename to docs/es/sql_reference/functions/conditional_functions.md index 9506ea2aaa2..e05cf4ae82b 100644 --- a/docs/es/query_language/functions/conditional_functions.md +++ b/docs/es/sql_reference/functions/conditional_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 43 +toc_title: 'Condicional ' --- # Funciones condicionales {#conditional-functions} @@ -18,7 +21,7 @@ Si la condición `cond` evalúa a un valor distinto de cero, devuelve el resulta **Parámetros** -- `cond` – La condición para la evaluación que puede ser cero o no. El tipo es UInt8, Nullable(UInt8) o NULL. +- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. - `then` - La expresión que se va a devolver si se cumple la condición. - `else` - La expresión a devolver si no se cumple la condición.- @@ -112,15 +115,15 @@ Devoluciones `then` si el `cond` evalúa que es verdadero (mayor que cero), de l ## MultiIf {#multiif} -Le permite escribir el [CASO](../operators.md#operator_case) operador más compacto en la consulta. +Le permite escribir el [CASE](../operators.md#operator_case) operador más compacto en la consulta. Sintaxis: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` **Parámetros:** -- `cond_N` — La condición para que la función regrese `then_N`. -- `then_N` — El resultado de la función cuando se ejecuta. -- `else` — El resultado de la función si no se cumple ninguna de las condiciones. +- `cond_N` — The condition for the function to return `then_N`. +- `then_N` — The result of the function when executed. +- `else` — The result of the function if none of the conditions is met. La función acepta `2N+1` parámetros. @@ -201,4 +204,4 @@ FROM LEFT_RIGHT └──────┴───────┴──────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/conditional_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/es/query_language/functions/date_time_functions.md b/docs/es/sql_reference/functions/date_time_functions.md similarity index 72% rename from docs/es/query_language/functions/date_time_functions.md rename to docs/es/sql_reference/functions/date_time_functions.md index d1f2d2622cd..eb9d8371a29 100644 --- a/docs/es/query_language/functions/date_time_functions.md +++ b/docs/es/sql_reference/functions/date_time_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: Trabajar con fechas y horas --- # Funciones para trabajar con fechas y horas {#functions-for-working-with-dates-and-times} @@ -55,7 +58,7 @@ Convierte una fecha o fecha con hora en un número UInt8 que contiene el número ## ParaHora {#tohour} Convierte una fecha con hora en un número UInt8 que contiene el número de la hora en el tiempo de 24 horas (0-23). -Esta función supone que si los relojes se mueven hacia adelante, es de una hora y ocurre a las 2 a.m., y si los relojes se mueven hacia atrás, es de una hora y ocurre a las 3 a.m. (lo cual no siempre es cierto, incluso en Moscú los relojes se cambiaron dos veces en un momento diferente). +This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). ## ToMinute {#tominute} @@ -212,24 +215,24 @@ Convierte una fecha o fecha con hora en un número UInt16 que contiene el númer Convierte una fecha o fecha con hora en un número UInt8 que contiene el número de semana ISO. -## ParaSemana(fecha\[,modo\]) {#toweekdatemode} +## ToWeek(fecha\[,modo\]) {#toweekdatemode} Esta función devuelve el número de semana para la fecha o la fecha y hora. La forma de dos argumentos de toWeek() le permite especificar si la semana comienza el domingo o el lunes y si el valor de retorno debe estar en el rango de 0 a 53 o de 1 a 53. Si se omite el argumento mode, el modo predeterminado es 0. `toISOWeek()`es una función de compatibilidad que es equivalente a `toWeek(date,3)`. La siguiente tabla describe cómo funciona el argumento mode. -| Modo | Primer día de la semana | Gama | Semana 1 es la primera semana … | -|------------|-------------------------|------------------|---------------------------------| -| Cero | Domingo | 0-53 | con un domingo de este año | -| Uno | Lunes | 0-53 | con 4 o más días este año | -| Cómo hacer | Domingo | Sistema abierto. | con un domingo de este año | -| Cómo hacer | Lunes | Sistema abierto. | con 4 o más días este año | -| Cuatro | Domingo | 0-53 | con 4 o más días este año | -| Cinco | Lunes | 0-53 | con un lunes de este año | -| Código | Domingo | Sistema abierto. | con 4 o más días este año | -| Siete | Lunes | Sistema abierto. | con un lunes de este año | -| Ocho | Domingo | Sistema abierto. | contiene 1 de enero | -| Nueve | Lunes | Sistema abierto. | contiene 1 de enero | +| Modo | Primer día de la semana | Gama | Week 1 is the first week … | +|------|-------------------------|------|----------------------------| +| 0 | Domingo | 0-53 | con un domingo de este año | +| 1 | Lunes | 0-53 | con 4 o más días este año | +| 2 | Domingo | 1-53 | con un domingo de este año | +| 3 | Lunes | 1-53 | con 4 o más días este año | +| 4 | Domingo | 0-53 | con 4 o más días este año | +| 5 | Lunes | 0-53 | con un lunes de este año | +| 6 | Domingo | 1-53 | con 4 o más días este año | +| 7 | Lunes | 1-53 | con un lunes de este año | +| 8 | Domingo | 1-53 | contiene 1 de enero | +| 9 | Lunes | 1-53 | contiene 1 de enero | Para valores de modo con un significado de “with 4 or more days this year,” semanas están numeradas según ISO 8601:1988: @@ -237,7 +240,7 @@ Para valores de modo con un significado de “with 4 or more days this year,” - De lo contrario, es la última semana del año anterior, y la semana siguiente es la semana 1. -Para valores de modo con un significado de “contains January 1”, la semana contiene 1 de enero es la semana 1. No importa cuántos días en el nuevo año la semana contenía, incluso si contenía sólo un día. +Para valores de modo con un significado de “contains January 1”, la semana contiene 1 de enero es la semana 1. No importa cuántos días en el nuevo año contenía la semana, incluso si contenía solo un día. ``` sql toWeek(date, [, mode][, Timezone]) @@ -245,9 +248,9 @@ toWeek(date, [, mode][, Timezone]) **Parámetros** -- `date` – Fecha o DateTime. -- `mode` – Parámetro opcional, Rango de valores es \[0,9\], por defecto es 0. -- `Timezone` – Parámetro opcional, se comporta como cualquier otra función de conversión. +- `date` – Date or DateTime. +- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. +- `Timezone` – Optional parameter, it behaves like any other conversion function. **Ejemplo** @@ -286,12 +289,12 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d Acepta cero argumentos y devuelve la hora actual en uno de los momentos de ejecución de la solicitud. Esta función devuelve una constante, incluso si la solicitud tardó mucho en completarse. -## Hoy {#today} +## hoy {#today} Acepta cero argumentos y devuelve la fecha actual en uno de los momentos de ejecución de la solicitud. Lo mismo que ‘toDate(now())’. -## enfermería {#yesterday} +## ayer {#yesterday} Acepta cero argumentos y devuelve la fecha de ayer en uno de los momentos de ejecución de la solicitud. Lo mismo que ‘today() - 1’. @@ -363,12 +366,12 @@ dateDiff('unit', startdate, enddate, [timezone]) **Parámetros** -- `unit` — Unidad de tiempo, en la que se expresa el valor devuelto. [Cadena](../syntax.md#syntax-string-literal). +- `unit` — Time unit, in which the returned value is expressed. [Cadena](../syntax.md#syntax-string-literal). Supported values: | unit | - | ------ | + | ---- | |second | |minute | |hour | @@ -378,11 +381,11 @@ dateDiff('unit', startdate, enddate, [timezone]) |quarter | |year | -- `startdate` — El primer valor de tiempo para comparar. [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `startdate` — The first time value to compare. [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). -- `enddate` — El segundo valor de tiempo para comparar. [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `enddate` — The second time value to compare. [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). -- `timezone` — Parámetro opcional. Si se especifica, se aplica a ambos `startdate` y `enddate`. Si no se especifica, las zonas horarias `startdate` y `enddate` se utilizan. Si no son lo mismo, el resultado no está especificado. +- `timezone` — Optional parameter. If specified, it is applied to both `startdate` y `enddate`. Si no se especifica, las zonas horarias `startdate` y `enddate` se utilizan. Si no son lo mismo, el resultado no está especificado. **Valor devuelto** @@ -412,36 +415,36 @@ Para un intervalo de tiempo a partir de ‘StartTime’ y continuando por ‘Dur Por ejemplo, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. Esto es necesario para buscar páginas vistas en la sesión correspondiente. -## FormatDateTime(Hora, Formato\[, Zona horaria\]) {#formatdatetime} +## formatDateTime(Hora, Formato\[, Zona horaria\]) {#formatdatetime} -La función da formato a una hora según una cadena de formato dada. NB: El formato es una expresión constante, por ejemplo, no puede tener múltiples formatos para una sola columna de resultado. +Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. Modificadores compatibles para Formato: (“Example” columna muestra el resultado de formateo para el tiempo `2018-01-02 22:33:44`) -| Modificador | Descripción | Ejemplo | -|------------------|------------------------------------------------------------------|----------------------------------| -| %C | año dividido por 100 y truncado a entero (00-99) | Veinte | -| %d | día del mes, cero acolchado (01-31) | Bienvenido | -| %D | Fecha corta de MM/DD/YY, equivalente a %m/%d/%a | Método de codificación de datos: | -| %e | día del mes, espacio acolchado ( 1-31) | Cómo hacer | -| Categoría | fecha corta AAAA-MM-DD, equivalente de la onu %Y-%m-%d | Sistema abierto. | -| %H | hora en formato 24h (00-23) | Veintidos | -| %Me | hora en formato 12h (01-12) | Diez | -| %j | día del año (001-366) | Categoría | -| Más información | mes como un número decimal (01-12) | Acerca de | -| Más información | minutos (00-59) | Sistema abierto. | -| %y | carácter de nueva línea («») | | -| %p | Designación AM o PM | PM | -| %R | HH de 24 horas: Tiempo del milímetro, equivalente a %H: %M | 22:33 | -| %S | segundo (00-59) | Sistema abierto. | -| % t | carácter de pestaña horizontal (') | | -| Tipo de artículo | Formato de hora ISO 8601 (HH:MM:SS), equivalente a %H:%M:%S | 22:33:44 | -| %u | ISO 8601 día de la semana como número con el lunes como 1 (1-7) | Cómo hacer | -| %V | Número de semana ISO 8601 (01-53) | Acerca de | -| % w | día de la semana como un número decimal con domingo como 0 (0-6) | Cómo hacer | -| %y | Año, últimos dos dígitos (00-99) | Acerca de | -| %Y | Año | 2018 Nueva York | -| %% | Por qué? | % | +| Modificador | Descripci | Ejemplo | +|-----------------|------------------------------------------------------------------|------------| +| %C | año dividido por 100 y truncado a entero (00-99) | 20 | +| %d | día del mes, cero acolchado (01-31) | 02 | +| %D | Fecha corta de MM/DD/YY, equivalente a %m/%d/%y | 01/02/18 | +| %e | día del mes, espacio acolchado ( 1-31) | 2 | +| %F | fecha corta AAAA-MM-DD, equivalente a %Y-%m-%d | 2018-01-02 | +| %H | hora en formato 24h (00-23) | 22 | +| %I | hora en formato 12h (01-12) | 10 | +| %j | día del año (001-366) | 002 | +| Más información | mes como un número decimal (01-12) | 01 | +| %M | minutos (00-59) | 33 | +| %y | carácter de nueva línea ('') | | +| %p | Designación AM o PM | PM | +| %R | HH de 24 horas: Tiempo del milímetro, equivalente a %H: %M | 22:33 | +| %S | segundo (00-59) | 44 | +| % t | carácter de pestaña horizontal (') | | +| %T | Formato de hora ISO 8601 (HH:MM:SS), equivalente a %H:%M:%S | 22:33:44 | +| %u | ISO 8601 día de la semana como número con el lunes como 1 (1-7) | 2 | +| %V | Número de semana ISO 8601 (01-53) | 01 | +| % w | día de la semana como un número decimal con domingo como 0 (0-6) | 2 | +| %y | Año, últimos dos dígitos (00-99) | 18 | +| %Y | Año | 2018 | +| %% | signo | % | -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/date_time_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/es/query_language/functions/encoding_functions.md b/docs/es/sql_reference/functions/encoding_functions.md similarity index 82% rename from docs/es/query_language/functions/encoding_functions.md rename to docs/es/sql_reference/functions/encoding_functions.md index dadb4b4db00..7459dfcda0e 100644 --- a/docs/es/query_language/functions/encoding_functions.md +++ b/docs/es/sql_reference/functions/encoding_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 52 +toc_title: "Codificaci\xF3n" --- # Funciones de codificación {#encoding-functions} @@ -16,7 +19,7 @@ char(number_1, [number_2, ..., number_n]); **Parámetros** -- `number_1, number_2, ..., number_n` — Argumentos numéricos interpretados como enteros. Tipo: [En](../../data_types/int_uint.md), [Flotante](../../data_types/float.md). +- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [En](../../sql_reference/data_types/int_uint.md), [Flotante](../../sql_reference/data_types/float.md). **Valor devuelto** @@ -108,7 +111,7 @@ Los valores de los tipos de coma flotante y Decimal se codifican como su represe **Parámetros** -- `arg` — Un valor para convertir a hexadecimal. Tipo: [Cadena](../../data_types/string.md), [UInt](../../data_types/int_uint.md), [Flotante](../../data_types/float.md), [Decimal](../../data_types/decimal.md), [Fecha](../../data_types/date.md) o [FechaHora](../../data_types/datetime.md). +- `arg` — A value to convert to hexadecimal. Types: [Cadena](../../sql_reference/data_types/string.md), [UInt](../../sql_reference/data_types/int_uint.md), [Flotante](../../sql_reference/data_types/float.md), [Decimal](../../sql_reference/data_types/decimal.md), [Fecha](../../sql_reference/data_types/date.md) o [FechaHora](../../sql_reference/data_types/datetime.md). **Valor devuelto** @@ -150,7 +153,7 @@ Resultado: ## unhex(str) {#unhexstr} -Acepta una cadena que contiene cualquier número de dígitos hexadecimales y devuelve una cadena que contiene los bytes correspondientes. Admite letras mayúsculas y minúsculas A-F. El número de dígitos hexadecimales no tiene que ser par. Si es impar, el último dígito se interpreta como la mitad menos significativa del byte 00-0F. Si la cadena de argumento contiene algo distinto de los dígitos hexadecimales, se devuelve algún resultado definido por la implementación (no se produce una excepción). +Acepta una cadena que contiene cualquier número de dígitos hexadecimales y devuelve una cadena que contiene los bytes correspondientes. Admite letras mayúsculas y minúsculas A-F. El número de dígitos hexadecimales no tiene que ser par. Si es impar, el último dígito se interpreta como la mitad menos significativa del byte 00-0F. Si la cadena de argumento contiene algo que no sean dígitos hexadecimales, se devuelve algún resultado definido por la implementación (no se produce una excepción). Si desea convertir el resultado en un número, puede usar el ‘reverse’ y ‘reinterpretAsType’ función. ## UUIDStringToNum (str) {#uuidstringtonumstr} @@ -169,4 +172,4 @@ Acepta un entero. Devuelve una cadena que contiene la lista de potencias de dos Acepta un entero. Devuelve una matriz de números UInt64 que contiene la lista de potencias de dos que suman el número de origen cuando se suma. Los números en la matriz están en orden ascendente. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/encoding_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/es/query_language/functions/ext_dict_functions.md b/docs/es/sql_reference/functions/ext_dict_functions.md similarity index 59% rename from docs/es/query_language/functions/ext_dict_functions.md rename to docs/es/sql_reference/functions/ext_dict_functions.md index fe7515b8846..979f20aafdc 100644 --- a/docs/es/query_language/functions/ext_dict_functions.md +++ b/docs/es/sql_reference/functions/ext_dict_functions.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 58 +toc_title: Trabajar con diccionarios externos --- # Funciones para trabajar con diccionarios externos {#ext_dict_functions} -Para obtener información sobre cómo conectar y configurar diccionarios externos, consulte [Diccionarios externos](../dicts/external_dicts.md). +Para obtener información sobre cómo conectar y configurar diccionarios externos, consulte [Diccionarios externos](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). ## dictGet {#dictget} @@ -17,16 +20,16 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) **Parámetros** -- `dict_name` — Nombre del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `attr_name` — Nombre de la columna del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `id_expr` — Valor clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md) o [Tupla](../../data_types/tuple.md)valor -type dependiendo de la configuración del diccionario. -- `default_value_expr` — Valor devuelto si el diccionario no contiene una fila `id_expr` Nivel de Cifrado WEP [Expresion](../syntax.md#syntax-expressions) devolviendo el valor en el tipo de datos configurado para `attr_name` atributo. +- `dict_name` — Name of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md) o [Tupla](../../sql_reference/data_types/tuple.md)valor -type dependiendo de la configuración del diccionario. +- `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` clave. [Expresion](../syntax.md#syntax-expressions) devolviendo el valor en el tipo de datos configurado para `attr_name` atributo. **Valor devuelto** -- Si ClickHouse analiza el atributo correctamente en el [tipo de datos del atributo](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), funciones devuelven el valor del atributo de diccionario que corresponde a `id_expr`. +- Si ClickHouse analiza el atributo correctamente en el [tipo de datos del atributo](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), funciones devuelven el valor del atributo de diccionario que corresponde a `id_expr`. -- Si no hay la clave, correspondiente a `id_expr` es el diccionario, entonces: +- Si no hay la clave, correspondiente a `id_expr` en el diccionario, entonces: - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. @@ -94,7 +97,7 @@ LIMIT 3 **Ver también** -- [Diccionarios externos](../dicts/external_dicts.md) +- [Diccionarios externos](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) ## dictHas {#dicthas} @@ -106,8 +109,8 @@ dictHas('dict_name', id_expr) **Parámetros** -- `dict_name` — Nombre del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `id_expr` — Valor clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md)-tipo de valor. +- `dict_name` — Name of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md)-tipo de valor. **Valor devuelto** @@ -118,7 +121,7 @@ Tipo: `UInt8`. ## dictGetHierarchy {#dictgethierarchy} -Crea una matriz, que contiene todos los padres de una clave [diccionario jerárquico](../dicts/external_dicts_dict_hierarchical.md). +Crea una matriz, que contiene todos los padres de una clave [diccionario jerárquico](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). **Sintaxis** @@ -128,14 +131,14 @@ dictGetHierarchy('dict_name', key) **Parámetros** -- `dict_name` — Nombre del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `key` — Valor clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md)-tipo de valor. +- `dict_name` — Name of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `key` — Key value. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md)-tipo de valor. **Valor devuelto** - Padres por la llave. -Tipo: [Matriz (UInt64)](../../data_types/array.md). +Tipo: [Matriz (UInt64)](../../sql_reference/data_types/array.md). ## DictIsIn {#dictisin} @@ -147,9 +150,9 @@ dictIsIn('dict_name', child_id_expr, ancestor_id_expr) **Parámetros** -- `dict_name` — Nombre del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `child_id_expr` — Clave a comprobar. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md)-tipo de valor. -- `ancestor_id_expr` — Presunto ancestro de la `child_id_expr` Nivel de Cifrado WEP [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md)-tipo de valor. +- `dict_name` — Name of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `child_id_expr` — Key to be checked. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md)-tipo de valor. +- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md)-tipo de valor. **Valor devuelto** @@ -183,20 +186,20 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) **Parámetros** -- `dict_name` — Nombre del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `attr_name` — Nombre de la columna del diccionario. [Literal de cadena](../syntax.md#syntax-string-literal). -- `id_expr` — Valor clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../data_types/int_uint.md)-tipo de valor. -- `default_value_expr` — Valor que se devuelve si el diccionario no contiene una fila `id_expr` Nivel de Cifrado WEP [Expresion](../syntax.md#syntax-expressions) devolviendo un valor en el tipo de datos configurado para `attr_name` atributo. +- `dict_name` — Name of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [Literal de cadena](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [Expresion](../syntax.md#syntax-expressions) devolviendo un [UInt64](../../sql_reference/data_types/int_uint.md)-tipo de valor. +- `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` clave. [Expresion](../syntax.md#syntax-expressions) devolviendo un valor en el tipo de datos configurado para `attr_name` atributo. **Valor devuelto** -- Si ClickHouse analiza el atributo correctamente en el [tipo de datos del atributo](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), funciones devuelven el valor del atributo de diccionario que corresponde a `id_expr`. +- Si ClickHouse analiza el atributo correctamente en el [tipo de datos del atributo](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), funciones devuelven el valor del atributo de diccionario que corresponde a `id_expr`. -- Si no se solicita `id_expr` es el diccionario entonces: +- Si no se solicita `id_expr` en el diccionario entonces: - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. ClickHouse produce una excepción si no puede analizar el valor del atributo o si el valor no coincide con el tipo de datos del atributo. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/ext_dict_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/es/query_language/functions/functions_for_nulls.md b/docs/es/sql_reference/functions/functions_for_nulls.md similarity index 85% rename from docs/es/query_language/functions/functions_for_nulls.md rename to docs/es/sql_reference/functions/functions_for_nulls.md index e343667ce6e..662b6c27999 100644 --- a/docs/es/query_language/functions/functions_for_nulls.md +++ b/docs/es/sql_reference/functions/functions_for_nulls.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 63 +toc_title: Trabajar con argumentos Nullable --- # Funciones para trabajar con agregados anulables {#functions-for-working-with-nullable-aggregates} ## IsNull {#isnull} -Comprueba si el argumento es [NULO](../syntax.md#null). +Comprueba si el argumento es [NULL](../syntax.md#null). ``` sql isNull(x) @@ -14,7 +17,7 @@ isNull(x) **Parámetros** -- `x` — Un valor con un tipo de datos no compuesto. +- `x` — A value with a non-compound data type. **Valor devuelto** @@ -46,7 +49,7 @@ SELECT x FROM t_null WHERE isNull(y) ## isNotNull {#isnotnull} -Comprueba si el argumento es [NULO](../syntax.md#null). +Comprueba si el argumento es [NULL](../syntax.md#null). ``` sql isNotNull(x) @@ -54,7 +57,7 @@ isNotNull(x) **Parámetros:** -- `x` — Un valor con un tipo de datos no compuesto. +- `x` — A value with a non-compound data type. **Valor devuelto** @@ -99,7 +102,7 @@ coalesce(x,...) **Valores devueltos** - El primer no-`NULL` argumento. -- `NULL` si todos los argumentos hijo `NULL`. +- `NULL` si todos los argumentos son `NULL`. **Ejemplo** @@ -137,13 +140,13 @@ ifNull(x,alt) **Parámetros:** -- `x` — El valor que se debe comprobar `NULL`. -- `alt` — El valor que devuelve la función si `x` ser `NULL`. +- `x` — The value to check for `NULL`. +- `alt` — The value that the function returns if `x` ser `NULL`. **Valores devueltos** -- Valor `x` si `x` no es `NULL`. -- Valor `alt` si `x` ser `NULL`. +- Valor `x`, si `x` no es `NULL`. +- Valor `alt`, si `x` ser `NULL`. **Ejemplo** @@ -177,7 +180,7 @@ nullIf(x, y) **Parámetros:** -`x`, `y` — Valores de comparación. Deben ser tipos compatibles, o ClickHouse generará una excepción. +`x`, `y` — Values for comparison. They must be compatible types, or ClickHouse will generate an exception. **Valores devueltos** @@ -208,7 +211,7 @@ SELECT nullIf(1, 2) ## assumeNotNull {#assumenotnull} -Resultados en un valor de tipo [NULO](../../data_types/nullable.md) para un no- `Nullable` si el valor no es `NULL`. +Resultados en un valor de tipo [NULL](../../sql_reference/data_types/nullable.md) para un no- `Nullable` si el valor no es `NULL`. ``` sql assumeNotNull(x) @@ -216,7 +219,7 @@ assumeNotNull(x) **Parámetros:** -- `x` — El valor original. +- `x` — The original value. **Valores devueltos** @@ -278,7 +281,7 @@ toNullable(x) **Parámetros:** -- `x` — El valor de cualquier tipo no compuesto. +- `x` — The value of any non-compound type. **Valor devuelto** @@ -306,4 +309,4 @@ SELECT toTypeName(toNullable(10)) └────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/functions_for_nulls/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/es/query_language/functions/geo.md b/docs/es/sql_reference/functions/geo.md similarity index 73% rename from docs/es/query_language/functions/geo.md rename to docs/es/sql_reference/functions/geo.md index eba818b5639..05734e0289e 100644 --- a/docs/es/query_language/functions/geo.md +++ b/docs/es/sql_reference/functions/geo.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 62 +toc_title: "Trabajar con coordenadas geogr\xE1ficas" --- # Funciones para trabajar con coordenadas geográficas {#functions-for-working-with-geographical-coordinates} -## GranCircleDistance {#greatcircledistance} +## GreatCircleDistance {#greatcircledistance} Calcule la distancia entre dos puntos en la superficie de la Tierra usando [la fórmula del gran círculo](https://en.wikipedia.org/wiki/Great-circle_distance). @@ -14,10 +17,10 @@ greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) **Parámetros de entrada** -- `lon1Deg` — Longitud del primer punto en grados. Gama: `[-180°, 180°]`. -- `lat1Deg` — Latitud del primer punto en grados. Gama: `[-90°, 90°]`. -- `lon2Deg` — Longitud del segundo punto en grados. Gama: `[-180°, 180°]`. -- `lat2Deg` — Latitud del segundo punto en grados. Gama: `[-90°, 90°]`. +- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. +- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. +- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. +- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. Los valores positivos corresponden a latitud norte y longitud este, y los valores negativos corresponden a latitud sur y longitud oeste. @@ -50,9 +53,9 @@ pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) **Parámetros de entrada** -- `x, y` — Coordenadas de un punto en el plano. -- `xᵢ, yᵢ` — Coordenadas del centro de la `i`-ésimo puntos suspensivos. -- `aᵢ, bᵢ` — Ejes del `i`-ésimo puntos suspensivos en unidades de coordenadas x, y. +- `x, y` — Coordinates of a point on the plane. +- `xᵢ, yᵢ` — Coordinates of the center of the `i`-ésimo puntos suspensivos. +- `aᵢ, bᵢ` — Axes of the `i`-ésimo puntos suspensivos en unidades de coordenadas x, y. Los parámetros de entrada deben ser `2+4⋅n`, donde `n` es el número de puntos suspensivos. @@ -82,8 +85,8 @@ pointInPolygon((x, y), [(a, b), (c, d) ...], ...) **Valores de entrada** -- `(x, y)` — Coordenadas de un punto en el plano. Tipo de datos — [Tupla](../../data_types/tuple.md) — Una tupla de dos números. -- `[(a, b), (c, d) ...]` — Vértices de polígono. Tipo de datos — [Matriz](../../data_types/array.md). Cada vértice está representado por un par de coordenadas `(a, b)`. Los vértices deben especificarse en sentido horario o antihorario. El número mínimo de vértices es 3. El polígono debe ser constante. +- `(x, y)` — Coordinates of a point on the plane. Data type — [Tupla](../../sql_reference/data_types/tuple.md) — A tuple of two numbers. +- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Matriz](../../sql_reference/data_types/array.md). Cada vértice está representado por un par de coordenadas `(a, b)`. Los vértices deben especificarse en sentido horario o antihorario. El número mínimo de vértices es 3. El polígono debe ser constante. - La función también admite polígonos con agujeros (secciones recortadas). En este caso, agregue polígonos que definan las secciones recortadas utilizando argumentos adicionales de la función. La función no admite polígonos no simplemente conectados. **Valores devueltos** @@ -139,7 +142,7 @@ Decodifica cualquier cadena codificada por geohash en longitud y latitud. **Valores de entrada** -- cadena codificada - cadena codificada geohash. +- encoded string - cadena codificada geohash. **Valores devueltos** @@ -159,9 +162,9 @@ SELECT geohashDecode('ezs42') AS res ## geoToH3 {#geotoh3} -Devoluciones [Hombre](https://uber.github.io/h3/#/documentation/overview/introduction) índice de punto `(lon, lat)` con la resolución especificada. +Devoluciones [H3](https://uber.github.io/h3/#/documentation/overview/introduction) índice de punto `(lon, lat)` con la resolución especificada. -[Hombre](https://uber.github.io/h3/#/documentation/overview/introduction) es un sistema de indexación geográfica donde la superficie de la Tierra se divide en incluso azulejos hexagonales. Este sistema es jerárquico, es decir, cada hexágono en el nivel superior se puede dividir en siete incluso pero más pequeños y así sucesivamente. +[H3](https://uber.github.io/h3/#/documentation/overview/introduction) es un sistema de indexación geográfica donde la superficie de la Tierra se divide en incluso azulejos hexagonales. Este sistema es jerárquico, es decir, cada hexágono en el nivel superior se puede dividir en siete incluso pero más pequeños y así sucesivamente. Este índice se utiliza principalmente para ubicaciones de bucketing y otras manipulaciones geoespaciales. @@ -173,9 +176,9 @@ geoToH3(lon, lat, resolution) **Parámetros** -- `lon` — Longitud. Tipo: [Float64](../../data_types/float.md). -- `lat` — Latitud. Tipo: [Float64](../../data_types/float.md). -- `resolution` — Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../data_types/int_uint.md). +- `lon` — Longitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `lat` — Latitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Valores devueltos** @@ -210,16 +213,16 @@ Devuelve una matriz de cadenas codificadas por geohash de precisión dada que ca - latitude\_min - latitud mínima, valor flotante en el rango `[-90°, 90°]` - longitude\_max - longitud máxima, valor flotante en el rango `[-180°, 180°]` - latitude\_max - latitud máxima, valor flotante en el rango `[-90°, 90°]` -- precisión - precisión del geohash, `UInt8` es el rango `[1, 12]` +- precisión - precisión del geohash, `UInt8` en el rango `[1, 12]` Tenga en cuenta que todos los parámetros de coordenadas deben ser del mismo tipo: `Float32` o `Float64`. **Valores devueltos** - matriz de cadenas de precisión largas de geohash-cajas que cubren el área proporcionada, no debe confiar en el orden de los artículos. -- \[\] - matriz vacía si *minuto* Valores de *latitud* y *longitud* no son menos que correspondiente *máximo* valor. +- \[\] - matriz vacía si *minuto* valores de *latitud* y *longitud* no son menos que los correspondientes *máximo* valor. -Tenga en cuenta que la función arrojará una excepción si la matriz resultante tiene más de 10'000'000 elementos de longitud. +Tenga en cuenta que la función arrojará una excepción si la matriz resultante tiene más de 10'000'000 de elementos. **Ejemplo** @@ -245,11 +248,11 @@ h3GetBaseCell(index) **Parámetros** -- `index` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Número de celda base hexagonal. Tipo: [UInt8](../../data_types/int_uint.md). +- Número de celda base hexagonal. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -279,11 +282,11 @@ h3HexAreaM2(resolution) **Parámetros** -- `resolution` — Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Superficie en m². Tipo: [Float64](../../data_types/float.md). +- Area in m². Type: [Float64](../../sql_reference/data_types/float.md). **Ejemplo** @@ -313,12 +316,12 @@ h3IndexesAreNeighbors(index1, index2) **Parámetros** -- `index1` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). -- `index2` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). +- `index1` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `index2` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Devoluciones `1` si los índices son vecinos, `0` de lo contrario. Tipo: [UInt8](../../data_types/int_uint.md). +- Devoluciones `1` si los índices son vecinos, `0` de lo contrario. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -348,12 +351,12 @@ h3ToChildren(index, resolution) **Parámetros** -- `index` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). -- `resolution` — Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Matriz con los índices H3 hijo. Matriz de tipo: [UInt64](../../data_types/int_uint.md). +- Matriz con los índices H3 hijo. Matriz de tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -383,12 +386,12 @@ h3ToParent(index, resolution) **Parámetros** -- `index` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). -- `resolution` — Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Índice padre H3. Tipo: [UInt64](../../data_types/int_uint.md). +- Índice padre H3. Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -416,11 +419,11 @@ h3ToString(index) **Parámetros** -- `index` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Representación de cadena del índice H3. Tipo: [Cadena](../../data_types/string.md). +- Representación de cadena del índice H3. Tipo: [Cadena](../../sql_reference/data_types/string.md). **Ejemplo** @@ -448,11 +451,11 @@ stringToH3(index_str) **Parámetros** -- `index_str` — Representación de cadena del índice H3. Tipo: [Cadena](../../data_types/string.md). +- `index_str` — String representation of the H3 index. Type: [Cadena](../../sql_reference/data_types/string.md). **Valores devueltos** -- Número de índice hexagonal. Devuelve 0 en caso de error. Tipo: [UInt64](../../data_types/int_uint.md). +- Número de índice hexagonal. Devuelve 0 en caso de error. Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -482,11 +485,11 @@ h3GetResolution(index) **Parámetros** -- `index` — Número de índice hexagonal. Tipo: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valores devueltos** -- Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../data_types/int_uint.md). +- Resolución del índice. Gama: `[0, 15]`. Tipo: [UInt8](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -504,4 +507,4 @@ Resultado: └─────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/geo/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/es/query_language/functions/hash_functions.md b/docs/es/sql_reference/functions/hash_functions.md similarity index 74% rename from docs/es/query_language/functions/hash_functions.md rename to docs/es/sql_reference/functions/hash_functions.md index 0bb985c2af0..f6cbfabcf06 100644 --- a/docs/es/query_language/functions/hash_functions.md +++ b/docs/es/sql_reference/functions/hash_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 50 +toc_title: Hash --- # Funciones hash {#hash-functions} @@ -8,7 +11,7 @@ Las funciones Hash se pueden usar para la barajada pseudoaleatoria determinista ## HalfMD5 {#hash-functions-halfmd5} -[Interpretar](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) todos los parámetros de entrada como cadenas y calcula el [MD5](https://en.wikipedia.org/wiki/MD5) valor hash para cada uno de ellos. Luego combina hashes, toma los primeros 8 bytes del hash de la cadena resultante y los interpreta como `UInt64` en orden de bytes de big-endian. +[Interpretar](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) todos los parámetros de entrada como cadenas y calcula el [MD5](https://en.wikipedia.org/wiki/MD5) valor hash para cada uno de ellos. Luego combina hashes, toma los primeros 8 bytes del hash de la cadena resultante y los interpreta como `UInt64` en orden de bytes de big-endian. ``` sql halfMD5(par1, ...) @@ -19,11 +22,11 @@ Considere usar el [sipHash64](#hash_functions-siphash64) función en su lugar. **Parámetros** -La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -Naciones [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +A [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplo** @@ -40,12 +43,12 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00') ## MD5 {#hash_functions-md5} Calcula el MD5 de una cadena y devuelve el conjunto de bytes resultante como FixedString(16). -Si no necesita MD5 en particular, pero necesita un hash criptográfico de 128 bits decente, use el ‘sipHash128’ función en su lugar. +Si no necesita MD5 en particular, pero necesita un hash criptográfico decente de 128 bits, use el ‘sipHash128’ función en su lugar. Si desea obtener el mismo resultado que la salida de la utilidad md5sum, use lower(hex(MD5(s)) . ## sipHash64 {#hash_functions-siphash64} -Producir un [SipHash](https://131002.net/siphash/) hachís valor. +Produce un [SipHash](https://131002.net/siphash/) valor hash. ``` sql sipHash64(par1,...) @@ -53,7 +56,7 @@ sipHash64(par1,...) Esta es una función hash criptográfica. Funciona al menos tres veces más rápido que el [MD5](#hash_functions-md5) función. -Función [interpretar](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) todos los parámetros de entrada como cadenas y calcula el valor hash para cada uno de ellos. Luego combina hashes por el siguiente algoritmo: +Función [interpretar](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) todos los parámetros de entrada como cadenas y calcula el valor hash para cada uno de ellos. Luego combina hashes por el siguiente algoritmo: 1. Después de hash todos los parámetros de entrada, la función obtiene la matriz de hashes. 2. La función toma el primero y el segundo elementos y calcula un hash para la matriz de ellos. @@ -62,11 +65,11 @@ Función [interpretar](../../query_language/functions/type_conversion_functions. **Parámetros** -La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -Naciones [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +A [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplo** @@ -83,12 +86,12 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00 ## sipHash128 {#hash_functions-siphash128} Calcula SipHash a partir de una cadena. -Acepta un argumento de tipo String. ¿Cómo puedo hacerlo? +Acepta un argumento de tipo String. Devuelve FixedString(16). Difiere de sipHash64 en que el estado final de plegado xor solo se realiza hasta 128 bits. ## cityHash64 {#cityhash64} -Producir un [Método de codificación de datos:](https://github.com/google/cityhash) hachís valor. +Produce un [Método de codificación de datos:](https://github.com/google/cityhash) valor hash. ``` sql cityHash64(par1,...) @@ -98,11 +101,11 @@ Esta es una función hash rápida no criptográfica. Utiliza el algoritmo CityHa **Parámetros** -La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -Naciones [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +A [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplos** @@ -148,13 +151,13 @@ Incluso en estos casos, recomendamos aplicar la función offline y precalcular v ## Nombre de la red inalámbrica (SSID):\]) {#urlhashurl-n} Una función hash no criptográfica rápida y de calidad decente para una cadena obtenida de una URL utilizando algún tipo de normalización. -`URLHash(s)` – Calcula un hash de una cadena sin uno de los símbolos finales `/`,`?` o `#` al final, si está presente. -`URLHash(s, N)` – Calcula un hash de una cadena hasta el nivel N en la jerarquía de URL, sin uno de los símbolos finales `/`,`?` o `#` al final, si está presente. +`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` o `#` al final, si está presente. +`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` o `#` al final, si está presente. Los niveles son los mismos que en URLHierarchy. Esta función es específica de Yandex.Métrica. ## Método de codificación de datos: {#farmhash64} -Producir un [Método de codificación de datos:](https://github.com/google/farmhash) hachís valor. +Produce un [Método de codificación de datos:](https://github.com/google/farmhash) valor hash. ``` sql farmHash64(par1, ...) @@ -164,11 +167,11 @@ La función utiliza el `Hash64` de todos [métodos disponibles](https://github.c **Parámetros** -La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -Naciones [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +A [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplo** @@ -194,7 +197,7 @@ SELECT javaHash(''); **Valor devuelto** -Naciones `Int32` tipo de datos valor hash. +A `Int32` tipo de datos valor hash. **Ejemplo** @@ -224,11 +227,11 @@ javaHashUTF16LE(stringUtf16le) **Parámetros** -- `stringUtf16le` — una cadena en codificación UTF-16LE. +- `stringUtf16le` — a string in UTF-16LE encoding. **Valor devuelto** -Naciones `Int32` tipo de datos valor hash. +A `Int32` tipo de datos valor hash. **Ejemplo** @@ -260,7 +263,7 @@ Esto es sólo [Nivel de Cifrado WEP](#hash_functions-javahash) con poco de signo **Valor devuelto** -Naciones `Int32` tipo de datos valor hash. +A `Int32` tipo de datos valor hash. Tipo: `hiveHash`. @@ -282,7 +285,7 @@ Resultado: ## Método de codificación de datos: {#metrohash64} -Producir un [Método de codificación de datos:](http://www.jandrewrogers.com/2015/05/27/metrohash/) hachís valor. +Produce un [Método de codificación de datos:](http://www.jandrewrogers.com/2015/05/27/metrohash/) valor hash. ``` sql metroHash64(par1, ...) @@ -290,11 +293,11 @@ metroHash64(par1, ...) **Parámetros** -La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +La función toma un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -Naciones [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +A [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplo** @@ -316,7 +319,7 @@ Para obtener más información, consulte el enlace: [SaltarConsistentHash](https ## murmurHash2\_32, murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} -Producir un [Método de codificación de datos:](https://github.com/aappleby/smhasher) hachís valor. +Produce un [Método de codificación de datos:](https://github.com/aappleby/smhasher) valor hash. ``` sql murmurHash2_32(par1, ...) @@ -325,12 +328,12 @@ murmurHash2_64(par1, ...) **Parámetros** -Ambas funciones toman un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +Ambas funciones toman un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -- El `murmurHash2_32` función devuelve el valor hash que tiene el [UInt32](../../data_types/int_uint.md) tipo de datos. -- El `murmurHash2_64` función devuelve el valor hash que tiene el [UInt64](../../data_types/int_uint.md) tipo de datos. +- El `murmurHash2_32` función devuelve el valor hash que tiene el [UInt32](../../sql_reference/data_types/int_uint.md) tipo de datos. +- El `murmurHash2_64` función devuelve el valor hash que tiene el [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos. **Ejemplo** @@ -346,7 +349,7 @@ SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: ## murmurHash3\_32, murmurHash3\_64 {#murmurhash3-32-murmurhash3-64} -Producir un [Método de codificación de datos:](https://github.com/aappleby/smhasher) hachís valor. +Produce un [Método de codificación de datos:](https://github.com/aappleby/smhasher) valor hash. ``` sql murmurHash3_32(par1, ...) @@ -355,12 +358,12 @@ murmurHash3_64(par1, ...) **Parámetros** -Ambas funciones toman un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [Tipos de datos compatibles](../../data_types/index.md). +Ambas funciones toman un número variable de parámetros de entrada. Los parámetros pueden ser cualquiera de los [tipos de datos compatibles](../../sql_reference/data_types/index.md). **Valor devuelto** -- El `murmurHash3_32` función devuelve un [UInt32](../../data_types/int_uint.md) tipo de datos valor hash. -- El `murmurHash3_64` función devuelve un [UInt64](../../data_types/int_uint.md) tipo de datos valor hash. +- El `murmurHash3_32` función devuelve un [UInt32](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. +- El `murmurHash3_64` función devuelve un [UInt64](../../sql_reference/data_types/int_uint.md) tipo de datos valor hash. **Ejemplo** @@ -376,7 +379,7 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: ## murmurHash3\_128 {#murmurhash3-128} -Producir un [Método de codificación de datos:](https://github.com/aappleby/smhasher) hachís valor. +Produce un [Método de codificación de datos:](https://github.com/aappleby/smhasher) valor hash. ``` sql murmurHash3_128( expr ) @@ -384,11 +387,11 @@ murmurHash3_128( expr ) **Parámetros** -- `expr` — [Expresiones](../syntax.md#syntax-expressions) devolviendo un [Cadena](../../data_types/string.md)-tipo de valor. +- `expr` — [Expresiones](../syntax.md#syntax-expressions) devolviendo un [Cadena](../../sql_reference/data_types/string.md)-tipo de valor. **Valor devuelto** -Naciones [Cadena fija (16)](../../data_types/fixedstring.md) tipo de datos valor hash. +A [Cadena fija (16)](../../sql_reference/data_types/fixedstring.md) tipo de datos valor hash. **Ejemplo** @@ -416,7 +419,7 @@ SELECT xxHash64(''); **Valor devuelto** -Naciones `Uint32` o `Uint64` tipo de datos valor hash. +A `Uint32` o `Uint64` tipo de datos valor hash. Tipo: `xxHash`. @@ -440,4 +443,4 @@ Resultado: - [xxHash](http://cyan4973.github.io/xxHash/). -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/hash_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/es/query_language/functions/higher_order_functions.md b/docs/es/sql_reference/functions/higher_order_functions.md similarity index 84% rename from docs/es/query_language/functions/higher_order_functions.md rename to docs/es/sql_reference/functions/higher_order_functions.md index fce1c63d2c1..3fac42e8d3c 100644 --- a/docs/es/query_language/functions/higher_order_functions.md +++ b/docs/es/sql_reference/functions/higher_order_functions.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 57 +toc_title: Orden superior --- # Funciones de orden superior {#higher-order-functions} ## `->` operador, función lambda (params, expr) {#operator-lambdaparams-expr-function} -Permite describir una función lambda para pasar a una función de orden superior. El lado izquierdo de la flecha tiene un parámetro formal, que es cualquier ID, o múltiples parámetros formales: cualquier ID en una tupla. El lado derecho de la flecha tiene una expresión que puede usar estos parámetros formales, así como cualquier columnas de tabla. +Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. Ejemplos: `x -> 2 * x, str -> str != Referer.` @@ -27,7 +30,7 @@ No se puede omitir una función lambda para las siguientes funciones: - [arrayFirst](#higher_order_functions-array-first) - [arrayFirstIndex](#higher_order_functions-array-first-index) -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-map} +### arrayMap(func, arr1, …) {#higher_order_functions-array-map} Devuelve una matriz obtenida de la aplicación original `func` función a cada elemento en el `arr` matriz. @@ -57,7 +60,7 @@ SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayMap` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-filter} +### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} Devuelve una matriz que contiene sólo los elementos en `arr1` para los cuales `func` devuelve algo distinto de 0. @@ -90,7 +93,7 @@ SELECT Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayFilter` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-fill} +### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} Escanear a través de `arr1` desde el primer elemento hasta el último elemento y reemplazar `arr1[i]` por `arr1[i - 1]` si `func` devuelve 0. El primer elemento de `arr1` no será reemplazado. @@ -108,7 +111,7 @@ SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayFill` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-reverse-fill} +### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} Escanear a través de `arr1` del último elemento al primer elemento y reemplace `arr1[i]` por `arr1[i + 1]` si `func` devuelve 0. El último elemento de `arr1` no será reemplazado. @@ -126,7 +129,7 @@ SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayReverseFill` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-split} +### arraySplit(func, arr1, …) {#higher_order_functions-array-split} Dividir `arr1` en múltiples matrices. Cuando `func` devuelve algo distinto de 0, la matriz se dividirá en el lado izquierdo del elemento. La matriz no se dividirá antes del primer elemento. @@ -144,7 +147,7 @@ SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arraySplit` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-reverse-split} +### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} Dividir `arr1` en múltiples matrices. Cuando `func` devuelve algo distinto de 0, la matriz se dividirá en el lado derecho del elemento. La matriz no se dividirá después del último elemento. @@ -162,35 +165,35 @@ SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arraySplit` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-count} +### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} Devuelve el número de elementos de la matriz arr para los cuales func devuelve algo distinto de 0. Si ‘func’ no se especifica, devuelve el número de elementos distintos de cero en la matriz. -### ¿Cómo puedo hacerlo?, …) {#arrayexistsfunc-arr1} +### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} Devuelve 1 si hay al menos un elemento en ‘arr’ para los cuales ‘func’ devuelve algo distinto de 0. De lo contrario, devuelve 0. -### ¿Cómo puedo hacerlo?, …) {#arrayallfunc-arr1} +### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} Devuelve 1 si ‘func’ devuelve algo distinto de 0 para todos los elementos en ‘arr’. De lo contrario, devuelve 0. -### ¿Cómo puedo hacerlo?, …) {#higher-order-functions-array-sum} +### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} Devuelve la suma de la ‘func’ valor. Si se omite la función, simplemente devuelve la suma de los elementos de la matriz. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-first} +### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} Devuelve el primer elemento en el ‘arr1’ matriz para la cual ‘func’ devuelve algo distinto de 0. Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayFirst` función. -### ¿Cómo puedo hacerlo?, …) {#higher_order_functions-array-first-index} +### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} Devuelve el índice del primer elemento ‘arr1’ matriz para la cual ‘func’ devuelve algo distinto de 0. Tenga en cuenta que el primer argumento (función lambda) no se puede omitir en el `arrayFirstIndex` función. -### ¿Cómo puedo hacerlo?, …) {#arraycumsumfunc-arr1} +### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} Devuelve una matriz de sumas parciales de elementos en la matriz de origen (una suma en ejecución). Si el `func` se especifica la función, luego los valores de los elementos de la matriz se convierten mediante esta función antes de sumar. @@ -220,7 +223,7 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res └───────────┘ ``` -### ¿Cómo puedo hacerlo?, …) {#arraysortfunc-arr1} +### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} Devuelve una matriz como resultado de ordenar los elementos de `arr1` en orden ascendente. Si el `func` se especifica la función, el orden de clasificación se determina por el resultado de la función `func` aplicado a los elementos de la matriz (arrays) @@ -240,9 +243,9 @@ SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); Para obtener más información sobre el `arraySort` método, véase el [Funciones para trabajar con matrices](array_functions.md#array_functions-sort) apartado. -### ¿Cómo puedo hacerlo?, …) {#arrayreversesortfunc-arr1} +### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} -Devuelve una matriz como resultado de ordenar los elementos de `arr1` en orden descendente. Si el `func` se especifica la función, el orden de clasificación se determina por el resultado de la función `func` Aplicado a los elementos de la matriz (arrays). +Devuelve una matriz como resultado de ordenar los elementos de `arr1` en orden descendente. Si el `func` se especifica la función, el orden de clasificación se determina por el resultado de la función `func` aplicado a los elementos de la matriz (arrays). Ejemplo: @@ -258,4 +261,4 @@ SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; Para obtener más información sobre el `arrayReverseSort` método, véase el [Funciones para trabajar con matrices](array_functions.md#array_functions-reverse-sort) apartado. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/higher_order_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/es/query_language/functions/in_functions.md b/docs/es/sql_reference/functions/in_functions.md similarity index 63% rename from docs/es/query_language/functions/in_functions.md rename to docs/es/sql_reference/functions/in_functions.md index 9ecd8c1f8ad..ea593d14070 100644 --- a/docs/es/query_language/functions/in_functions.md +++ b/docs/es/sql_reference/functions/in_functions.md @@ -1,17 +1,20 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 60 +toc_title: "Implementaci\xF3n del operador IN" --- # Funciones para implementar el operador IN {#functions-for-implementing-the-in-operator} ## Información de uso {#in-functions} -Vea la sección [IN operadores](../select.md#select-in-operators). +Vea la sección [IN operadores](../statements/select.md#select-in-operators). -## tuple(x, y, …), operador (x, y, …) {#tuplex-y-operator-x-y} +## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} Una función que permite agrupar varias columnas. -Para columnas con los tipos T1, T2, …, devuelve una tupla de tipo Tuple(T1, T2, …) que contiene estas columnas. No hay ningún costo para ejecutar la función. +For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. Las tuplas se usan normalmente como valores intermedios para un argumento de operadores IN, o para crear una lista de parámetros formales de funciones lambda. Las tuplas no se pueden escribir en una tabla. ## Puede utilizar el siguiente ejemplo: {#tupleelementtuple-n-operator-x-n} @@ -20,4 +23,4 @@ Una función que permite obtener una columna de una tupla. ‘N’ es el índice de columna, comenzando desde 1. N debe ser una constante. ‘N’ debe ser una constante. ‘N’ debe ser un entero postivo estricto no mayor que el tamaño de la tupla. No hay ningún costo para ejecutar la función. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/in_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/es/query_language/functions/index.md b/docs/es/sql_reference/functions/index.md similarity index 80% rename from docs/es/query_language/functions/index.md rename to docs/es/sql_reference/functions/index.md index 80d528393ea..9a8c464cb12 100644 --- a/docs/es/query_language/functions/index.md +++ b/docs/es/sql_reference/functions/index.md @@ -1,10 +1,14 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Functions +toc_priority: 32 +toc_title: "Implantaci\xF3n" --- # Función {#functions} -Hay al menos \* dos tipos de funciones: funciones regulares (simplemente se llaman “functions”) y funciones agregadas. Estos son conceptos completamente diferentes. Las funciones regulares funcionan como si se aplicaran a cada fila por separado (para cada fila, el resultado de la función no depende de las otras filas). Las funciones agregadas acumulan un conjunto de valores de varias filas (es decir,dependen de todo el conjunto de filas). +Hay al menos \* dos tipos de funciones: funciones regulares (simplemente se llaman “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn't depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). En esta sección discutimos las funciones regulares. Para las funciones agregadas, consulte la sección “Aggregate functions”. @@ -26,7 +30,7 @@ Todas las funciones devuelven un único retorno como resultado (no varios valore Para simplificar, ciertas funciones solo pueden funcionar con constantes para algunos argumentos. Por ejemplo, el argumento correcto del operador LIKE debe ser una constante. Casi todas las funciones devuelven una constante para argumentos constantes. La excepción son las funciones que generan números aleatorios. -El ‘now’ función devuelve valores diferentes para las consultas que se ejecutaron en diferentes momentos, pero el resultado se considera una constante, ya que la constancia solo es importante dentro de una sola consulta. +El ‘now’ function devuelve valores diferentes para las consultas que se ejecutaron en diferentes momentos, pero el resultado se considera una constante, ya que la constancia solo es importante dentro de una sola consulta. Una expresión constante también se considera una constante (por ejemplo, la mitad derecha del operador LIKE se puede construir a partir de múltiples constantes). Las funciones se pueden implementar de diferentes maneras para argumentos constantes y no constantes (se ejecuta un código diferente). Pero los resultados para una constante y para una columna verdadera que contiene solo el mismo valor deben coincidir entre sí. @@ -40,7 +44,7 @@ Las funciones tienen los siguientes comportamientos: ## Constancia {#constancy} -Las funciones no pueden cambiar los valores de sus argumentos; cualquier cambio se devuelve como resultado. Por lo tanto, el resultado del cálculo de funciones separadas no depende del orden en que se escriban las funciones en la consulta. +Functions can't change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. ## Manejo de errores {#error-handling} @@ -58,8 +62,8 @@ Para el procesamiento de consultas distribuidas, se realizan tantas etapas de pr Esto significa que las funciones se pueden realizar en diferentes servidores. Por ejemplo, en la consulta `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` -- Más información `distributed_table` tiene al menos dos fragmentos, las funciones ‘g’ y ‘h’ se realizan en servidores remotos, y la función ‘f’ se realiza en el servidor solicitante. -- Más información `distributed_table` tiene sólo un fragmento, todos los ‘f’, ‘g’, y ‘h’ funciones se realizan en el servidor de este fragmento. +- si una `distributed_table` tiene al menos dos fragmentos, las funciones ‘g’ y ‘h’ se realizan en servidores remotos, y la función ‘f’ se realiza en el servidor solicitante. +- si una `distributed_table` tiene sólo un fragmento, todos los ‘f’, ‘g’, y ‘h’ funciones se realizan en el servidor de este fragmento. El resultado de una función generalmente no depende del servidor en el que se realice. Sin embargo, a veces esto es importante. Por ejemplo, las funciones que funcionan con diccionarios utilizan el diccionario que existe en el servidor en el que se están ejecutando. @@ -67,4 +71,4 @@ Otro ejemplo es el `hostName` función, que devuelve el nombre del servidor en e Si se realiza una función en una consulta en el servidor solicitante, pero debe realizarla en servidores remotos, puede envolverla en un ‘any’ agregar o agregarlo a una clave en `GROUP BY`. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/es/query_language/functions/introspection.md b/docs/es/sql_reference/functions/introspection.md similarity index 93% rename from docs/es/query_language/functions/introspection.md rename to docs/es/sql_reference/functions/introspection.md index 7dae8a35254..212950f2452 100644 --- a/docs/es/query_language/functions/introspection.md +++ b/docs/es/sql_reference/functions/introspection.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 65 +toc_title: "Introspecci\xF3n" --- # Funciones de introspección {#introspection-functions} -Puede utilizar las funciones descritas en este capítulo para [ELFO](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) y [ENANO](https://en.wikipedia.org/wiki/DWARF) para la creación de perfiles de consultas. +Puede utilizar las funciones descritas en este capítulo para [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) y [DWARF](https://en.wikipedia.org/wiki/DWARF) para la creación de perfiles de consultas. !!! warning "Advertencia" Estas funciones son lentas y pueden imponer consideraciones de seguridad. @@ -33,7 +36,7 @@ addressToLine(address_of_binary_instruction) **Parámetros** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Dirección de la instrucción en un proceso en ejecución. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. **Valor devuelto** @@ -45,7 +48,7 @@ addressToLine(address_of_binary_instruction) - Cadena vacía, si la dirección no es válida. -Tipo: [Cadena](../../data_types/string.md). +Tipo: [Cadena](../../sql_reference/data_types/string.md). **Ejemplo** @@ -124,14 +127,14 @@ addressToSymbol(address_of_binary_instruction) **Parámetros** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Dirección de la instrucción en un proceso en ejecución. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. **Valor devuelto** - Símbolo de archivos de objetos ClickHouse. - Cadena vacía, si la dirección no es válida. -Tipo: [Cadena](../../data_types/string.md). +Tipo: [Cadena](../../sql_reference/data_types/string.md). **Ejemplo** @@ -221,14 +224,14 @@ demangle(symbol) **Parámetros** -- `symbol` ([Cadena](../../data_types/string.md)) — Símbolo de un archivo de objeto. +- `symbol` ([Cadena](../../sql_reference/data_types/string.md)) — Symbol from an object file. **Valor devuelto** - Nombre de la función C++ - Cadena vacía si un símbolo no es válido. -Tipo: [Cadena](../../data_types/string.md). +Tipo: [Cadena](../../sql_reference/data_types/string.md). **Ejemplo** diff --git a/docs/es/query_language/functions/ip_address_functions.md b/docs/es/sql_reference/functions/ip_address_functions.md similarity index 93% rename from docs/es/query_language/functions/ip_address_functions.md rename to docs/es/sql_reference/functions/ip_address_functions.md index 5f55fc55fab..814db50684c 100644 --- a/docs/es/query_language/functions/ip_address_functions.md +++ b/docs/es/sql_reference/functions/ip_address_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 55 +toc_title: Trabajar con direcciones IP --- # Funciones para trabajar con direcciones IP {#functions-for-working-with-ip-addresses} @@ -45,7 +48,7 @@ LIMIT 10 Desde el uso ‘xxx’ es altamente inusual, esto puede cambiarse en el futuro. Le recomendamos que no confíe en el formato exacto de este fragmento. -### ¿Cómo puedo hacerlo?) {#ipv6numtostringx} +### ¿cómo puedo hacerlo?) {#ipv6numtostringx} Acepta un valor FixedString(16) que contiene la dirección IPv6 en formato binario. Devuelve una cadena que contiene esta dirección en formato de texto. Las direcciones IPv4 asignadas a IPv6 se emiten en el formato ::ffff:111.222.33.44. Ejemplos: @@ -150,7 +153,7 @@ SELECT └─────────────────────────────────────┴─────────────────────┘ ``` -## ¿Cómo puedo hacerlo?), {#ipv4cidrtorangeipv4-cidr} +## ¿cómo puedo hacerlo?), {#ipv4cidrtorangeipv4-cidr} Acepta un valor IPv4 y UInt8 que contiene el valor [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Devuelve una tupla con dos IPv4 que contienen el rango inferior y el rango superior de la subred. @@ -164,7 +167,7 @@ SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) └────────────────────────────────────────────┘ ``` -## ¿Cómo puedo hacerlo?), {#ipv6cidrtorangeipv6-cidr} +## ¿cómo puedo hacerlo?), {#ipv6cidrtorangeipv6-cidr} Acepta un valor IPv6 y UInt8 que contiene el CIDR. Devuelve una tupla con dos IPv6 que contienen el rango inferior y el rango superior de la subred. @@ -180,7 +183,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); ## Acerca de nosotros) {#toipv4string} -Un alias para `IPv4StringToNum()` que toma una forma de cadena de dirección IPv4 y devuelve el valor de [IPv4](../../data_types/domains/ipv4.md) tipo, que es binario igual al valor devuelto por `IPv4StringToNum()`. +Un alias para `IPv4StringToNum()` que toma una forma de cadena de dirección IPv4 y devuelve el valor de [IPv4](../../sql_reference/data_types/domains/ipv4.md) tipo, que es binario igual al valor devuelto por `IPv4StringToNum()`. ``` sql WITH @@ -212,7 +215,7 @@ SELECT ## Acerca de nosotros) {#toipv6string} -Un alias para `IPv6StringToNum()` que toma una forma de cadena de dirección IPv6 y devuelve el valor de [IPv6](../../data_types/domains/ipv6.md) tipo, que es binario igual al valor devuelto por `IPv6StringToNum()`. +Un alias para `IPv6StringToNum()` que toma una forma de cadena de dirección IPv6 y devuelve el valor de [IPv6](../../sql_reference/data_types/domains/ipv6.md) tipo, que es binario igual al valor devuelto por `IPv6StringToNum()`. ``` sql WITH @@ -242,4 +245,4 @@ SELECT └───────────────────────────────────┴──────────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/ip_address_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/es/query_language/functions/json_functions.md b/docs/es/sql_reference/functions/json_functions.md similarity index 86% rename from docs/es/query_language/functions/json_functions.md rename to docs/es/sql_reference/functions/json_functions.md index d8732b281e1..ed7f9ab7312 100644 --- a/docs/es/query_language/functions/json_functions.md +++ b/docs/es/sql_reference/functions/json_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 56 +toc_title: Trabajando con JSON. --- # Funciones para trabajar con JSON {#functions-for-working-with-json} @@ -9,7 +12,7 @@ En el Yandex.Metrica, JSON es transmitido por los usuarios como parámetros de s Se hacen las siguientes suposiciones: 1. El nombre de campo (argumento de función) debe ser una constante. -2. El nombre del campo de alguna manera está codificado canónicamente en JSON. Por ejemplo: `visitParamHas('{"abc":"def"}', 'abc') = 1` pero `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +2. El nombre del campo de alguna manera está codificado canónicamente en JSON. Por ejemplo: `visitParamHas('{"abc":"def"}', 'abc') = 1`, pero `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` 3. Los campos se buscan en cualquier nivel de anidación, indiscriminadamente. Si hay varios campos coincidentes, se utiliza la primera aparición. 4. El JSON no tiene caracteres de espacio fuera de los literales de cadena. @@ -44,7 +47,7 @@ visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' ``` -## Por favor, consulte el siguiente enlace:) {#visitparamextractstringparams-name} +## visitParamExtractString(params, nombre) {#visitparamextractstringparams-name} Analiza la cadena entre comillas dobles. El valor es sin escape. Si no se pudo desescapar, devuelve una cadena vacía. @@ -72,7 +75,7 @@ SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 SELECT isValidJSON('not a json') = 0 ``` -## ¿Cómo puedo hacerlo?\]…) {#jsonhasjson-indices-or-keys} +## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} Si el valor existe en el documento JSON, `1` serán devueltos. @@ -105,7 +108,7 @@ SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' ``` -## ¿Cómo puedo hacerlo?\]…) {#jsonlengthjson-indices-or-keys} +## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} Devuelve la longitud de una matriz JSON o un objeto JSON. @@ -118,7 +121,7 @@ SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 ``` -## ¿Cómo puedo hacerlo?\]…) {#jsontypejson-indices-or-keys} +## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} Devuelve el tipo de un valor JSON. @@ -132,13 +135,13 @@ SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' ``` -## ¿Cómo puedo hacerlo?\]…) {#jsonextractuintjson-indices-or-keys} +## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} -## ¿Cómo puedo hacerlo?\]…) {#jsonextractintjson-indices-or-keys} +## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} -## ¿Cómo puedo hacerlo?\]…) {#jsonextractfloatjson-indices-or-keys} +## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} -## ¿Cómo puedo hacerlo?\]…) {#jsonextractbooljson-indices-or-keys} +## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} Analiza un JSON y extrae un valor. Estas funciones son similares a `visitParam` función. @@ -152,7 +155,7 @@ SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200 SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 ``` -## Por ejemplo, puede utilizar el siguiente ejemplo:\]…) {#jsonextractstringjson-indices-or-keys} +## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} Analiza un JSON y extrae una cadena. Esta función es similar a `visitParamExtractString` función. @@ -170,7 +173,7 @@ SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' ``` -## Por ejemplo, se puede utilizar el siguiente método:) {#jsonextractjson-indices-or-keys-return-type} +## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} Analiza un JSON y extrae un valor del tipo de datos ClickHouse dado. @@ -201,7 +204,7 @@ Ejemplo: SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; ``` -## ¿Cómo puedo hacerlo?\]…) {#jsonextractrawjson-indices-or-keys} +## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} Devuelve una parte de JSON. @@ -213,7 +216,7 @@ Ejemplo: SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' ``` -## Por ejemplo, puede utilizar el siguiente ejemplo:\]…) {#jsonextractarrayrawjson-indices-or-keys} +## JSONExtractArrayRaw(json\[, indices\_or\_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} Devuelve una matriz con elementos de matriz JSON, cada uno representado como cadena sin analizar. @@ -225,4 +228,4 @@ Ejemplo: SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/json_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/es/query_language/functions/logical_functions.md b/docs/es/sql_reference/functions/logical_functions.md similarity index 73% rename from docs/es/query_language/functions/logical_functions.md rename to docs/es/sql_reference/functions/logical_functions.md index 5de5607f134..5094c9ef000 100644 --- a/docs/es/query_language/functions/logical_functions.md +++ b/docs/es/sql_reference/functions/logical_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: "L\xF3gico" --- # Funciones lógicas {#logical-functions} @@ -16,4 +19,4 @@ Cero como argumento se considera “false,” mientras que cualquier valor disti ## xor {#xor} -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/logical_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/es/sql_reference/functions/machine_learning_functions.md b/docs/es/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..fd695934937 --- /dev/null +++ b/docs/es/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 64 +toc_title: "Funciones de aprendizaje autom\xE1tico" +--- + +# Funciones de aprendizaje automático {#machine-learning-functions} + +## evalMLMethod (predicción) {#machine_learning_methods-evalmlmethod} + +Predicción utilizando modelos de regresión ajustados utiliza `evalMLMethod` función. Ver enlace en `linearRegression`. + +### Regresión lineal estocástica {#stochastic-linear-regression} + +El [stochasticLinearRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlinearregression) la función agregada implementa el método de descenso de gradiente estocástico utilizando el modelo lineal y la función de pérdida MSE. Utilizar `evalMLMethod` para predecir sobre nuevos datos. + +### Regresión logística estocástica {#stochastic-logistic-regression} + +El [stochasticLogisticRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlogisticregression) la función de agregado implementa el método de descenso de gradiente estocástico para el problema de clasificación binaria. Utilizar `evalMLMethod` para predecir sobre nuevos datos. diff --git a/docs/es/query_language/functions/math_functions.md b/docs/es/sql_reference/functions/math_functions.md similarity index 84% rename from docs/es/query_language/functions/math_functions.md rename to docs/es/sql_reference/functions/math_functions.md index 1cc15ba944d..6654ba5187b 100644 --- a/docs/es/query_language/functions/math_functions.md +++ b/docs/es/sql_reference/functions/math_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: "Matem\xE1tica" --- # Funciones matemáticas {#mathematical-functions} @@ -12,7 +15,7 @@ Devuelve un número Float64 que está cerca del número e. ## Ciudad() {#pi} -Devuelve un número Float64 que está cerca del número π. +Returns a Float64 number that is close to the number π. ## exp(x) {#expx} @@ -48,7 +51,7 @@ Acepta un argumento numérico y devuelve un número Float64 cercano a la raíz c ## erf(x) {#erfx} -Si ‘x’ no es negativo, entonces erf(x / σ√2) es la probabilidad de que una variable aleatoria tenga una distribución normal con desviación estándar ‘σ’ toma el valor que está separado del valor esperado en más de ‘x’. +Si ‘x’ is non-negative, then erf(x / σ√2) es la probabilidad de que una variable aleatoria tenga una distribución normal con desviación estándar ‘σ’ toma el valor que está separado del valor esperado en más de ‘x’. Ejemplo (regla de tres sigma): @@ -110,4 +113,4 @@ Acepta un argumento numérico y devuelve un número UInt64 cercano a 2 a la pote Acepta un argumento numérico y devuelve un número UInt64 cercano a 10 a la potencia de x. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/math_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/es/query_language/functions/other_functions.md b/docs/es/sql_reference/functions/other_functions.md similarity index 89% rename from docs/es/query_language/functions/other_functions.md rename to docs/es/sql_reference/functions/other_functions.md index 8201620f809..abd97f28c9d 100644 --- a/docs/es/query_language/functions/other_functions.md +++ b/docs/es/sql_reference/functions/other_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 66 +toc_title: Otro --- # Otras funciones {#other-functions} @@ -52,7 +55,7 @@ basename( expr ) **Parámetros** -- `expr` — Expresión que da como resultado un [Cadena](../../data_types/string.md) valor de tipo. Todas las barras diagonales inversas deben escaparse en el valor resultante. +- `expr` — Expression resulting in a [Cadena](../../sql_reference/data_types/string.md) valor de tipo. Todas las barras diagonales inversas deben escaparse en el valor resultante. **Valor devuelto** @@ -101,7 +104,7 @@ SELECT 'some-file-name' AS a, basename(a) Calcula el ancho aproximado al enviar valores a la consola en formato de texto (separado por tabuladores). Esta función es utilizada por el sistema para implementar formatos Pretty. -`NULL` se representa como una cadena correspondiente a `NULL` es `Pretty` formato. +`NULL` se representa como una cadena correspondiente a `NULL` en `Pretty` formato. ``` sql SELECT visibleWidth(NULL) @@ -129,7 +132,7 @@ En ClickHouse, las consultas siempre se ejecutan en bloques (conjuntos de partes Convierte una constante en una columna completa que contiene solo un valor. En ClickHouse, las columnas completas y las constantes se representan de manera diferente en la memoria. Las funciones funcionan de manera diferente para argumentos constantes y argumentos normales (se ejecuta un código diferente), aunque el resultado es casi siempre el mismo. Esta función es para depurar este comportamiento. -## ignorar(…) {#ignore} +## ignore(…) {#ignore} Acepta cualquier argumento, incluyendo `NULL`. Siempre devuelve 0. Sin embargo, el argumento aún se evalúa. Esto se puede usar para puntos de referencia. @@ -138,7 +141,7 @@ Sin embargo, el argumento aún se evalúa. Esto se puede usar para puntos de ref Dormir ‘seconds’ segundos en cada bloque de datos. Puede especificar un número entero o un número de punto flotante. -## SueñoCada fila(segundos) {#sleepeachrowseconds} +## sleepEachRow(segundos) {#sleepeachrowseconds} Dormir ‘seconds’ segundos en cada fila. Puede especificar un número entero o un número de punto flotante. @@ -184,7 +187,7 @@ Resultado: Acepta Float32 y Float64 y devuelve UInt8 igual a 1 si el argumento no es infinito y no es un NaN, de lo contrario 0. -## IsInfinite (x) {#isinfinitex} +## IsInfinite(x) {#isinfinitex} Acepta Float32 y Float64 y devuelve UInt8 igual a 1 si el argumento es infinito, de lo contrario 0. Tenga en cuenta que se devuelve 0 para un NaN. @@ -198,8 +201,8 @@ Comprueba si el valor de punto flotante es finito. **Parámetros** -- `x` — Valor que debe comprobarse para el infinito. Tipo: [Flotante\*](../../data_types/float.md). -- `y` — Valor de reserva. Tipo: [Flotante\*](../../data_types/float.md). +- `x` — Value to be checked for infinity. Type: [Flotante\*](../../sql_reference/data_types/float.md). +- `y` — Fallback value. Type: [Flotante\*](../../sql_reference/data_types/float.md). **Valor devuelto** @@ -218,7 +221,7 @@ Resultado: │ inf │ 42 │ └─────────┴───────────────────────────────┘ -Puede obtener un resultado similar usando [Operador ternario](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. +Puede obtener un resultado similar usando [operador ternario](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. ## isNaN(x) {#isnanx} @@ -234,13 +237,13 @@ Para los elementos de una estructura de datos anidada, la función comprueba la Permite construir un diagrama unicode-art. -`bar(x, min, max, width)` Dibuja una banda con un ancho proporcional a `(x - min)` E igual a `width` Caracteres cuando `x = max`. +`bar(x, min, max, width)` dibuja una banda con un ancho proporcional a `(x - min)` e igual a `width` caracteres cuando `x = max`. Parámetros: -- `x` — Tamaño para mostrar. -- `min, max` — Constantes enteras. El valor debe encajar en `Int64`. -- `width` — Constante, entero positivo, puede ser fraccional. +- `x` — Size to display. +- `min, max` — Integer constants. The value must fit in `Int64`. +- `width` — Constant, positive integer, can be fractional. La banda se dibuja con precisión a un octavo de un símbolo. @@ -285,22 +288,22 @@ ORDER BY h ASC └────┴────────┴────────────────────┘ ``` -## Ciudad {#transform} +## transformar {#transform} Transforma un valor de acuerdo con la asignación explícitamente definida de algunos elementos a otros. Hay dos variaciones de esta función: ### ¿Cómo puedo hacerlo?) {#transformx-array-from-array-to-default} -`x` – Qué transformar. +`x` – What to transform. -`array_from` – Matriz constante de valores para la conversión. +`array_from` – Constant array of values for converting. -`array_to` – Matriz constante de valores para convertir los valores en ‘from’ Naciones. +`array_to` – Constant array of values to convert the values in ‘from’ a. -`default` – ¿Qué valor utilizar si ‘x’ no es igual a ninguno de los valores en ‘from’. +`default` – Which value to use if ‘x’ no es igual a ninguno de los valores en ‘from’. -`array_from` y `array_to` – Matrices del mismo tamaño. +`array_from` y `array_to` – Arrays of the same size. Tipo: @@ -392,7 +395,7 @@ SELECT Devuelve el valor más pequeño de a y b. -## alcalde(a, b) {#greatesta-b} +## mayor(a, b) {#greatesta-b} Devuelve el valor más grande de a y b. @@ -435,13 +438,13 @@ Si realiza una subconsulta con ORDER BY y llama a la función desde fuera de la **Parámetros** -- `column` — Un nombre de columna o una expresión escalar. -- `offset` — El número de filas hacia delante o hacia atrás desde la fila actual de `column`. [Int64](../../data_types/int_uint.md). -- `default_value` — Opcional. El valor que se devolverá si offset va más allá del alcance del bloque. Tipo de bloques de datos afectados. +- `column` — A column name or scalar expression. +- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql_reference/data_types/int_uint.md). +- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. **Valores devueltos** -- Valor para `column` es `offset` distancia de la fila actual si `offset` valor no está fuera de los límites del bloque. +- Valor para `column` en `offset` distancia de la fila actual si `offset` valor no está fuera de los límites del bloque. - Valor predeterminado para `column` si `offset` valor está fuera de los límites del bloque. Si `default_value` se da, entonces será utilizado. Tipo: tipo de bloques de datos afectados o tipo de valor predeterminado. @@ -533,7 +536,7 @@ Resultado: ## EjecuciónDiferencia (x) {#other_functions-runningdifference} -Calcula la diferencia entre los sucesivos valores de fila en el bloque de datos. +Calculates the difference between successive row values ​​in the data block. Devuelve 0 para la primera fila y la diferencia con respecto a la fila anterior para cada fila subsiguiente. El resultado de la función depende de los bloques de datos afectados y del orden de los datos en el bloque. @@ -607,7 +610,7 @@ WHERE diff != 1 Lo mismo que para [runningDifference](./other_functions.md#other_functions-runningdifference), la diferencia es el valor de la primera fila, devolvió el valor de la primera fila, y cada fila subsiguiente devuelve la diferencia de la fila anterior. -## ¿Cómo puedo hacerlo?) {#macnumtostringnum} +## ¿cómo puedo hacerlo?) {#macnumtostringnum} Acepta un número UInt64. Lo interpreta como una dirección MAC en big endian. Devuelve una cadena que contiene la dirección MAC correspondiente con el formato AA:BB:CC:DD:EE:FF (números separados por dos puntos en forma hexadecimal). @@ -621,7 +624,7 @@ Acepta una dirección MAC con el formato AA:BB:CC:DD:EE:FF (números separados p ## getSizeOfEnumType {#getsizeofenumtype} -Devuelve el número de campos en [Enum](../../data_types/enum.md). +Devuelve el número de campos en [Enum](../../sql_reference/data_types/enum.md). ``` sql getSizeOfEnumType(value) @@ -629,7 +632,7 @@ getSizeOfEnumType(value) **Parámetros:** -- `value` — Valor del tipo `Enum`. +- `value` — Value of type `Enum`. **Valores devueltos** @@ -658,7 +661,7 @@ blockSerializedSize(value[, value[, ...]]) **Parámetros:** -- `value` — Cualquier valor. +- `value` — Any value. **Valores devueltos** @@ -686,7 +689,7 @@ toColumnTypeName(value) **Parámetros:** -- `value` — Cualquier tipo de valor. +- `value` — Any type of value. **Valores devueltos** @@ -726,7 +729,7 @@ dumpColumnStructure(value) **Parámetros:** -- `value` — Cualquier tipo de valor. +- `value` — Any type of value. **Valores devueltos** @@ -756,13 +759,13 @@ defaultValueOfArgumentType(expression) **Parámetros:** -- `expression` — Tipo de valor arbitrario o una expresión que da como resultado un valor de tipo arbitrario. +- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. **Valores devueltos** - `0` para los números. - Cadena vacía para cadenas. -- `ᴺᵁᴸᴸ` para [NULO](../../data_types/nullable.md). +- `ᴺᵁᴸᴸ` para [NULL](../../sql_reference/data_types/nullable.md). **Ejemplo** @@ -798,8 +801,8 @@ SELECT replicate(x, arr); **Parámetros:** -- `arr` — Matriz original. ClickHouse crea una nueva matriz de la misma longitud que el original y la llena con el valor `x`. -- `x` — El valor con el que se llenará la matriz resultante. +- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. +- `x` — The value that the resulting array will be filled with. **Valor devuelto** @@ -837,7 +840,7 @@ filesystemAvailable() - La cantidad de espacio restante disponible en bytes. -Tipo: [UInt64](../../data_types/int_uint.md). +Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -869,7 +872,7 @@ filesystemFree() - Cantidad de espacio libre en bytes. -Tipo: [UInt64](../../data_types/int_uint.md). +Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -889,7 +892,7 @@ Resultado: ## sistema de archivosCapacidad {#filesystemcapacity} -Devuelve la capacidad del sistema de archivos en bytes. Para la evaluación, el [camino](../../operations/server_settings/settings.md#server_settings-path) al directorio de datos debe estar configurado. +Devuelve la capacidad del sistema de archivos en bytes. Para la evaluación, el [camino](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) al directorio de datos debe estar configurado. **Sintaxis** @@ -901,7 +904,7 @@ filesystemCapacity() - Información de capacidad del sistema de archivos en bytes. -Tipo: [UInt64](../../data_types/int_uint.md). +Tipo: [UInt64](../../sql_reference/data_types/int_uint.md). **Ejemplo** @@ -931,9 +934,9 @@ Por lo tanto, el resultado de la función depende de la partición de los datos ## joinGet {#joinget} -La función le permite extraer datos de la tabla de la misma manera que [Diccionario](../../query_language/dicts/index.md). +La función le permite extraer datos de la tabla de la misma manera que [diccionario](../../sql_reference/dictionaries/index.md). -Obtiene datos de [Unir](../../operations/table_engines/join.md#creating-a-table) usando la clave de unión especificada. +Obtiene datos de [Unir](../../engines/table_engines/special/join.md#creating-a-table) usando la clave de unión especificada. Solo admite tablas creadas con `ENGINE = Join(ANY, LEFT, )` instrucción. @@ -945,9 +948,9 @@ joinGet(join_storage_table_name, `value_column`, join_keys) **Parámetros** -- `join_storage_table_name` — un [identificador](../syntax.md#syntax-identifiers) indica dónde se realiza la búsqueda. El identificador se busca en la base de datos predeterminada (ver parámetro `default_database` en el archivo de configuración). Para reemplazar la base de datos predeterminada, utilice `USE db_name` o especifique la base de datos y la tabla a través del separador `db_name.db_table`, ver el ejemplo. -- `value_column` — nombre de la columna de la tabla que contiene los datos necesarios. -- `join_keys` — lista de claves. +- `join_storage_table_name` — an [identificador](../syntax.md#syntax-identifiers) indica dónde se realiza la búsqueda. El identificador se busca en la base de datos predeterminada (ver parámetro `default_database` en el archivo de configuración). Para reemplazar la base de datos predeterminada, utilice `USE db_name` o especifique la base de datos y la tabla a través del separador `db_name.db_table`, ver el ejemplo. +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. **Valor devuelto** @@ -955,7 +958,7 @@ Devuelve la lista de valores correspondientes a la lista de claves. Si cierto no existe en la tabla fuente, entonces `0` o `null` será devuelto basado en [Sistema abierto.](../../operations/settings/settings.md#join_use_nulls) configuración. -Más información sobre `join_use_nulls` es [Únase a la operación](../../operations/table_engines/join.md). +Más información sobre `join_use_nulls` en [Únase a la operación](../../engines/table_engines/special/join.md). **Ejemplo** @@ -992,7 +995,7 @@ Resultado: └──────────────────────────────────────────────────┘ ``` -## modelEvaluate (nombre\_modelo, …) {#function-modelevaluate} +## modelEvaluate(model\_name, …) {#function-modelevaluate} Evaluar modelo externo. Acepta un nombre de modelo y argumentos de modelo. Devuelve Float64. @@ -1049,7 +1052,7 @@ randomPrintableASCII(length) **Parámetros** -- `length` — Longitud de cadena resultante. Entero positivo. +- `length` — Resulting string length. Positive integer. If you pass `length < 0`, behavior of the function is undefined. @@ -1057,7 +1060,7 @@ randomPrintableASCII(length) - Cadena con un conjunto aleatorio de [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) caracteres imprimibles. -Tipo: [Cadena](../../data_types/string.md) +Tipo: [Cadena](../../sql_reference/data_types/string.md) **Ejemplo** @@ -1073,4 +1076,4 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers └────────┴────────────────────────────────┴──────────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/other_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/es/query_language/functions/random_functions.md b/docs/es/sql_reference/functions/random_functions.md similarity index 84% rename from docs/es/query_language/functions/random_functions.md rename to docs/es/sql_reference/functions/random_functions.md index 19547210aa0..cb339f6692b 100644 --- a/docs/es/query_language/functions/random_functions.md +++ b/docs/es/sql_reference/functions/random_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 51 +toc_title: "Generaci\xF3n de n\xFAmeros pseudo-aleatorios" --- # Funciones para generar números pseudoaleatorios {#functions-for-generating-pseudo-random-numbers} @@ -24,4 +27,4 @@ Utiliza un generador congruente lineal. Devuelve un número pseudoaleatorio UInt32, El valor es uno para diferentes bloques. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/random_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/es/query_language/functions/rounding_functions.md b/docs/es/sql_reference/functions/rounding_functions.md similarity index 79% rename from docs/es/query_language/functions/rounding_functions.md rename to docs/es/sql_reference/functions/rounding_functions.md index 7d9eb229586..ea96a7b393a 100644 --- a/docs/es/query_language/functions/rounding_functions.md +++ b/docs/es/sql_reference/functions/rounding_functions.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: Redondeo --- # Funciones de redondeo {#rounding-functions} -## Piso(x\[, N\]) {#floorx-n} +## piso(x\[, N\]) {#floorx-n} -Devuelve el número de ronda más grande que es menor o igual que `x`. Un número redondo es un múltiplo de 1/10N, o el número más cercano del tipo de datos apropiado si 1 / 10N no es exacto. +Devuelve el número de ronda más grande que es menor o igual que `x`. Un número redondo es un múltiplo de 1 / 10N, o el número más cercano del tipo de datos apropiado si 1 / 10N no es exacto. ‘N’ es una constante entera, parámetro opcional. Por defecto es cero, lo que significa redondear a un entero. ‘N’ puede ser negativo. @@ -28,7 +31,7 @@ Devuelve el número redondo con el valor absoluto más grande que tiene un valor Redondea un valor a un número especificado de decimales. -La función devuelve el número más cercano del orden especificado. En caso de que el número dado tenga la misma distancia que los números circundantes, la función utiliza el redondeo de banquero para los tipos de números flotantes y redondea desde cero para los otros tipos de números. +La función devuelve el número más cercano del orden especificado. En caso de que el número dado tenga la misma distancia que los números circundantes, la función utiliza el redondeo del banquero para los tipos de números flotantes y se redondea desde cero para los otros tipos de números. ``` sql round(expression [, decimal_places]) @@ -36,8 +39,8 @@ round(expression [, decimal_places]) **Parámetros:** -- `expression` — Un número que se redondeará. Puede ser cualquier [expresión](../syntax.md#syntax-expressions) devolviendo el numérico [Tipo de datos](../../data_types/index.md#data_types). -- `decimal-places` — Un valor entero. +- `expression` — A number to be rounded. Can be any [expresion](../syntax.md#syntax-expressions) devolviendo el numérico [tipo de datos](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — An integer value. - Si `decimal-places > 0` luego la función redondea el valor a la derecha del punto decimal. - Si `decimal-places < 0` luego la función redondea el valor a la izquierda del punto decimal. - Si `decimal-places = 0` entonces la función redondea el valor a entero. En este caso, el argumento puede omitirse. @@ -91,7 +94,7 @@ round(3.65, 1) = 3.6 Redondea un número a una posición decimal especificada. -- Si el número de redondeo está a medio camino entre dos números, la función utiliza el redondeo de banquero. +- Si el número de redondeo está a medio camino entre dos números, la función utiliza el redondeo del banquero. Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. @@ -115,11 +118,11 @@ roundBankers(expression [, decimal_places]) **Parámetros** -- `expression` — Un número que se redondeará. Puede ser cualquier [expresión](../syntax.md#syntax-expressions) devolviendo el numérico [Tipo de datos](../../data_types/index.md#data_types). -- `decimal-places` — Lugares decimales. Un número entero. - - `decimal-places > 0` — La función redondea el número a la derecha del punto decimal. Ejemplo: `roundBankers(3.55, 1) = 3.6`. - - `decimal-places < 0` — La función redondea el número a la izquierda del punto decimal. Ejemplo: `roundBankers(24.55, -1) = 20`. - - `decimal-places = 0` — La función redondea el número a un entero. En este caso, el argumento puede omitirse. Ejemplo: `roundBankers(2.5) = 2`. +- `expression` — A number to be rounded. Can be any [expresion](../syntax.md#syntax-expressions) devolviendo el numérico [tipo de datos](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — Decimal places. An integer number. + - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. + - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. + - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. **Valor devuelto** @@ -166,7 +169,7 @@ roundBankers(10.755, 2) = 11,76 **Ver también** -- [Ronda](#rounding_functions-round) +- [ronda](#rounding_functions-round) ## ¿Cómo puedo hacerlo?) {#roundtoexp2num} @@ -184,4 +187,4 @@ Acepta un número. Si el número es menor que 18, devuelve 0. De lo contrario, r Acepta un número y lo redondea a un elemento en la matriz especificada. Si el valor es menor que el límite más bajo, se devuelve el límite más bajo. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/rounding_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/es/sql_reference/functions/splitting_merging_functions.md b/docs/es/sql_reference/functions/splitting_merging_functions.md new file mode 100644 index 00000000000..62683f72158 --- /dev/null +++ b/docs/es/sql_reference/functions/splitting_merging_functions.md @@ -0,0 +1,116 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 47 +toc_title: "Divisi\xF3n y fusi\xF3n de cuerdas y matrices" +--- + +# Funciones para dividir y fusionar cadenas y matrices {#functions-for-splitting-and-merging-strings-and-arrays} + +## Por ejemplo:) {#splitbycharseparator-s} + +Divide una cadena en subcadenas separadas por un carácter especificado. Utiliza una cadena constante `separator` que consiste en exactamente un carácter. +Devuelve una matriz de subcadenas seleccionadas. Se pueden seleccionar subcadenas vacías si el separador aparece al principio o al final de la cadena, o si hay varios separadores consecutivos. + +**Sintaxis** + +``` sql +splitByChar(, ) +``` + +**Parámetros** + +- `separator` — The separator which should contain exactly one character. [Cadena](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Cadena](../../sql_reference/data_types/string.md). + +**Valores devueltos)** + +Devuelve una matriz de subcadenas seleccionadas. Las subcadenas vacías se pueden seleccionar cuando: + +- Se produce un separador al principio o al final de la cadena; +- Hay varios separadores consecutivos; +- La cadena original `s` está vacío. + +Tipo: [Matriz](../../sql_reference/data_types/array.md) de [Cadena](../../sql_reference/data_types/string.md). + +**Ejemplo** + +``` sql +SELECT splitByChar(',', '1,2,3,abcde') +``` + +``` text +┌─splitByChar(',', '1,2,3,abcde')─┐ +│ ['1','2','3','abcde'] │ +└─────────────────────────────────┘ +``` + +## Por ejemplo:) {#splitbystringseparator-s} + +Divide una cadena en subcadenas separadas por una cadena. Utiliza una cadena constante `separator` de múltiples caracteres como separador. Si la cadena `separator` está vacío, dividirá la cadena `s` en una matriz de caracteres individuales. + +**Sintaxis** + +``` sql +splitByString(, ) +``` + +**Parámetros** + +- `separator` — The separator. [Cadena](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Cadena](../../sql_reference/data_types/string.md). + +**Valores devueltos)** + +Devuelve una matriz de subcadenas seleccionadas. Las subcadenas vacías se pueden seleccionar cuando: + +Tipo: [Matriz](../../sql_reference/data_types/array.md) de [Cadena](../../sql_reference/data_types/string.md). + +- Se produce un separador no vacío al principio o al final de la cadena; +- Hay varios separadores consecutivos no vacíos; +- La cadena original `s` está vacío mientras el separador no está vacío. + +**Ejemplo** + +``` sql +SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +``` + +``` text +┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ +│ ['1','2 3','4,5','abcde'] │ +└───────────────────────────────────────────┘ +``` + +``` sql +SELECT splitByString('', 'abcde') +``` + +``` text +┌─splitByString('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + +## Por ejemplo, se puede usar una matriz.\]) {#arraystringconcatarr-separator} + +Concatena las cadenas enumeradas en la matriz con el separador.'separador' es un parámetro opcional: una constante de cadena, establece una cadena vacía por defecto. +Devuelve la cadena. + +## Sistema abierto.) {#alphatokenss} + +Selecciona subcadenas de bytes consecutivos de los rangos a-z y A-Z.Devuelve una matriz de subcadenas. + +**Ejemplo** + +``` sql +SELECT alphaTokens('abca1abc') +``` + +``` text +┌─alphaTokens('abca1abc')─┐ +│ ['abca','abc'] │ +└─────────────────────────┘ +``` + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/es/query_language/functions/string_functions.md b/docs/es/sql_reference/functions/string_functions.md similarity index 90% rename from docs/es/query_language/functions/string_functions.md rename to docs/es/sql_reference/functions/string_functions.md index bd25c9128db..25b02ca3bd4 100644 --- a/docs/es/query_language/functions/string_functions.md +++ b/docs/es/sql_reference/functions/string_functions.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 40 +toc_title: Trabajar con cadenas --- # Funciones para trabajar con cadenas {#functions-for-working-with-strings} -## Vaciar {#empty} +## vaciar {#empty} Devuelve 1 para una cadena vacía o 0 para una cadena no vacía. El tipo de resultado es UInt8. @@ -53,7 +56,7 @@ No detecta el idioma. Entonces, para el turco, el resultado podría no ser exact Si la longitud de la secuencia de bytes UTF-8 es diferente para mayúsculas y minúsculas de un punto de código, el resultado puede ser incorrecto para este punto de código. Si la cadena contiene un conjunto de bytes que no es UTF-8, entonces el comportamiento no está definido. -## Todos los derechos reservados. {#upperutf8} +## superiorUTF8 {#upperutf8} Convierte una cadena en mayúsculas, suponiendo que la cadena contiene un conjunto de bytes que componen un texto codificado en UTF-8. No detecta el idioma. Entonces, para el turco, el resultado podría no ser exactamente correcto. @@ -74,7 +77,7 @@ toValidUTF8( input_string ) Parámetros: -- input\_string — Cualquier conjunto de bytes representados como el [Cadena](../../data_types/string.md) objeto de tipo de datos. +- input\_string — Any set of bytes represented as the [Cadena](../../sql_reference/data_types/string.md) objeto de tipo de datos. Valor devuelto: cadena UTF-8 válida. @@ -102,8 +105,8 @@ repeat(s, n) **Parámetros** -- `s` — La cadena de repetir. [Cadena](../../data_types/string.md). -- `n` — El número de veces para repetir la cadena. [UInt](../../data_types/int_uint.md). +- `s` — The string to repeat. [Cadena](../../sql_reference/data_types/string.md). +- `n` — The number of times to repeat the string. [UInt](../../sql_reference/data_types/int_uint.md). **Valor devuelto** @@ -127,7 +130,7 @@ Resultado: └────────────────────────────────┘ ``` -## Inverso {#reverse} +## inverso {#reverse} Invierte la cadena (como una secuencia de bytes). @@ -135,7 +138,7 @@ Invierte la cadena (como una secuencia de bytes). Invierte una secuencia de puntos de código Unicode, suponiendo que la cadena contiene un conjunto de bytes que representan un texto UTF-8. De lo contrario, hace otra cosa (no arroja una excepción). -## Formato (patrón, s0, s1, …) {#format} +## format(pattern, s0, s1, …) {#format} Formatear el patrón constante con la cadena enumerada en los argumentos. `pattern` es un patrón de formato de Python simplificado. La cadena de formato contiene “replacement fields” rodeado de llaves `{}`. Cualquier cosa que no esté contenida entre llaves se considera texto literal, que se copia sin cambios en la salida. Si necesita incluir un carácter de llave en el texto literal, se puede escapar duplicando: `{{ '{{' }}` y `{{ '}}' }}`. Los nombres de campo pueden ser números (comenzando desde cero) o vacíos (luego se tratan como números de consecuencia). @@ -272,7 +275,7 @@ Devuelve la cadena ‘s’ que se convirtió de la codificación en ‘from’ a Codificar ‘s’ cadena en base64 -## base64Decode(s)))) {#base64decode} +## base64Decode(s)) {#base64decode} Decodificar cadena codificada en base64 ‘s’ en la cadena original. En caso de fallo plantea una excepción. @@ -280,11 +283,11 @@ Decodificar cadena codificada en base64 ‘s’ en la cadena original. En caso d Similar a base64Decode, pero en caso de error se devolverá una cadena vacía. -## terminaCon(s, sufijo) {#endswith} +## endsWith(s, sufijo) {#endswith} Devuelve si se debe terminar con el sufijo especificado. Devuelve 1 si la cadena termina con el sufijo especificado, de lo contrario devuelve 0. -## Comienza con (str, prefijo) {#startswith} +## startsWith(str, prefijo) {#startswith} Devuelve 1 si la cadena comienza con el prefijo especificado, de lo contrario devuelve 0. @@ -326,8 +329,8 @@ trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) **Parámetros** -- `trim_character` — caracteres especificados para el recorte. [Cadena](../../data_types/string.md). -- `input_string` — cuerda para el acabado. [Cadena](../../data_types/string.md). +- `trim_character` — specified characters for trim. [Cadena](../../sql_reference/data_types/string.md). +- `input_string` — string for trim. [Cadena](../../sql_reference/data_types/string.md). **Valor devuelto** @@ -353,7 +356,7 @@ Resultado: ## trimLeft {#trimleft} -Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) desde el principio de una cadena. No elimina otros tipos de caracteres de espacio en blanco (tab, espacio sin interrupción, etc.). +Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) desde el principio de una cadena. No elimina otros tipos de caracteres de espacios en blanco (tab, espacio sin interrupción, etc.). **Sintaxis** @@ -365,7 +368,7 @@ Apodo: `ltrim(input_string)`. **Parámetros** -- `input_string` — cuerda para recortar. [Cadena](../../data_types/string.md). +- `input_string` — string to trim. [Cadena](../../sql_reference/data_types/string.md). **Valor devuelto** @@ -391,7 +394,7 @@ Resultado: ## trimRight {#trimright} -Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) del final de una cadena. No elimina otros tipos de caracteres de espacio en blanco (tab, espacio sin interrupción, etc.). +Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) del final de una cadena. No elimina otros tipos de caracteres de espacios en blanco (tab, espacio sin interrupción, etc.). **Sintaxis** @@ -403,7 +406,7 @@ Apodo: `rtrim(input_string)`. **Parámetros** -- `input_string` — cuerda para recortar. [Cadena](../../data_types/string.md). +- `input_string` — string to trim. [Cadena](../../sql_reference/data_types/string.md). **Valor devuelto** @@ -429,7 +432,7 @@ Resultado: ## AjusteTanto {#trimboth} -Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) de ambos extremos de una cadena. No elimina otros tipos de caracteres de espacio en blanco (tab, espacio sin interrupción, etc.). +Quita todas las apariciones consecutivas de espacios en blanco comunes (carácter ASCII 32) de ambos extremos de una cadena. No elimina otros tipos de caracteres de espacios en blanco (tab, espacio sin interrupción, etc.). **Sintaxis** @@ -441,7 +444,7 @@ Apodo: `trim(input_string)`. **Parámetros** -- `input_string` — cuerda para recortar. [Cadena](../../data_types/string.md). +- `input_string` — string to trim. [Cadena](../../sql_reference/data_types/string.md). **Valor devuelto** @@ -465,7 +468,7 @@ Resultado: └─────────────────────────────────────┘ ``` -## CRC32(s)))) {#crc32} +## CRC32(s)) {#crc32} Devuelve la suma de comprobación CRC32 de una cadena, utilizando el polinomio CRC-32-IEEE 802.3 y el valor inicial `0xffffffff` (implementación zlib). @@ -477,10 +480,10 @@ Devuelve la suma de comprobación CRC32 de una cadena, utilizando el polinomio C El tipo de resultado es UInt32. -## CRC64(s)))) {#crc64} +## CRC64(s)) {#crc64} Devuelve la suma de comprobación CRC64 de una cadena, utilizando el polinomio CRC-64-ECMA. El tipo de resultado es UInt64. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/string_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/es/query_language/functions/string_replace_functions.md b/docs/es/sql_reference/functions/string_replace_functions.md similarity index 89% rename from docs/es/query_language/functions/string_replace_functions.md rename to docs/es/sql_reference/functions/string_replace_functions.md index 3449162c5c5..eeccfb244ec 100644 --- a/docs/es/query_language/functions/string_replace_functions.md +++ b/docs/es/sql_reference/functions/string_replace_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 42 +toc_title: Para reemplazar en cadenas --- # Funciones para buscar y reemplazar en cadenas {#functions-for-searching-and-replacing-in-strings} @@ -18,7 +21,7 @@ Sustituye todas las apariciones del ‘pattern’ subcadena en ‘haystack’ co Reemplazo usando el ‘pattern’ expresión regular. Una expresión regular re2. Sustituye sólo la primera ocurrencia, si existe. Un patrón se puede especificar como ‘replacement’. Este patrón puede incluir sustituciones `\0-\9`. -Sustitución `\0` incluye toda la expresión regular. Sustitución `\1-\9` corresponden a los números de subpatrón. `\` es una plantilla, escapar de ella usando `\`. +Sustitución `\0` incluye toda la expresión regular. Sustitución `\1-\9` corresponden a los números de subpatrón. `\` en una plantilla, escapar de ella usando `\`. También tenga en cuenta que un literal de cadena requiere un escape adicional. Ejemplo 1. Conversión de la fecha a formato americano: @@ -84,8 +87,8 @@ SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res ## Sistema abierto.) {#regexpquotemetas} La función agrega una barra invertida antes de algunos caracteres predefinidos en la cadena. -Caracteres predefinidos: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’Lugar de origen, ‘?’Acerca de\*‘,’+‘,’{‘,’:‘,’¿Por qué? +Caracteres predefinidos: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, '\]', ‘?’, '\*‘,’+‘,’{‘,’:‘,’-'. Esta implementación difiere ligeramente de re2::RE2::QuoteMeta. Escapa de byte cero como \\0 en lugar de 00 y escapa solo de los caracteres requeridos. -Para obtener más información, consulte el enlace: [Bienvenido](https://github.com/google/re2/blob/master/re2/re2.cc#L473) +Para obtener más información, consulte el enlace: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/string_replace_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/es/query_language/functions/string_search_functions.md b/docs/es/sql_reference/functions/string_search_functions.md similarity index 67% rename from docs/es/query_language/functions/string_search_functions.md rename to docs/es/sql_reference/functions/string_search_functions.md index c9ef20be853..060b325d48d 100644 --- a/docs/es/query_language/functions/string_search_functions.md +++ b/docs/es/sql_reference/functions/string_search_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: Para buscar cadenas --- # Funciones para buscar cadenas {#functions-for-searching-strings} @@ -10,7 +13,7 @@ La búsqueda distingue entre mayúsculas y minúsculas de forma predeterminada e Devuelve la posición (en bytes) de la subcadena encontrada en la cadena, comenzando desde 1. -Funciona bajo el supuesto de que la cadena contiene un conjunto de bytes que representan un texto codificado de un solo byte. Si no se cumple esta suposición y un carácter no se puede representar con un solo byte, la función no produce una excepción y devuelve algún resultado inesperado. Si el carácter se puede representar usando dos bytes, usará dos bytes y así sucesivamente. +Funciona bajo el supuesto de que la cadena contiene un conjunto de bytes que representan un texto codificado de un solo byte. Si no se cumple esta suposición y un carácter no se puede representar usando un solo byte, la función no lanza una excepción y devuelve algún resultado inesperado. Si el carácter se puede representar usando dos bytes, usará dos bytes y así sucesivamente. Para una búsqueda sin distinción de mayúsculas y minúsculas, utilice la función [positionCaseInsensitive](#positioncaseinsensitive). @@ -24,8 +27,8 @@ Apodo: `locate(haystack, needle)`. **Parámetros** -- `haystack` — cadena, en la que se buscará la subcadena. [Cadena](../syntax.md#syntax-string-literal). -- `needle` — subcadena que se va a buscar. [Cadena](../syntax.md#syntax-string-literal). +- `haystack` — string, in which substring will to be searched. [Cadena](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Cadena](../syntax.md#syntax-string-literal). **Valores devueltos** @@ -36,7 +39,7 @@ Tipo: `Integer`. **Ejemplos** -Frase «Hello, world!» contiene un conjunto de bytes que representan un texto codificado de un solo byte. La función devuelve algún resultado esperado: +Frase “Hello, world!” contiene un conjunto de bytes que representan un texto codificado de un solo byte. La función devuelve algún resultado esperado: Consulta: @@ -72,7 +75,7 @@ Resultado: Lo mismo que [posición](#position) devuelve la posición (en bytes) de la subcadena encontrada en la cadena, comenzando desde 1. Utilice la función para una búsqueda que no distingue entre mayúsculas y minúsculas. -Funciona bajo el supuesto de que la cadena contiene un conjunto de bytes que representan un texto codificado de un solo byte. Si no se cumple esta suposición y un carácter no se puede representar con un solo byte, la función no produce una excepción y devuelve algún resultado inesperado. Si el carácter se puede representar usando dos bytes, usará dos bytes y así sucesivamente. +Funciona bajo el supuesto de que la cadena contiene un conjunto de bytes que representan un texto codificado de un solo byte. Si no se cumple esta suposición y un carácter no se puede representar usando un solo byte, la función no lanza una excepción y devuelve algún resultado inesperado. Si el carácter se puede representar usando dos bytes, usará dos bytes y así sucesivamente. **Sintaxis** @@ -82,8 +85,8 @@ positionCaseInsensitive(haystack, needle) **Parámetros** -- `haystack` — cadena, en la que se buscará la subcadena. [Cadena](../syntax.md#syntax-string-literal). -- `needle` — subcadena que se va a buscar. [Cadena](../syntax.md#syntax-string-literal). +- `haystack` — string, in which substring will to be searched. [Cadena](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Cadena](../syntax.md#syntax-string-literal). **Valores devueltos** @@ -124,8 +127,8 @@ positionUTF8(haystack, needle) **Parámetros** -- `haystack` — cadena, en la que se buscará la subcadena. [Cadena](../syntax.md#syntax-string-literal). -- `needle` — subcadena que se va a buscar. [Cadena](../syntax.md#syntax-string-literal). +- `haystack` — string, in which substring will to be searched. [Cadena](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Cadena](../syntax.md#syntax-string-literal). **Valores devueltos** @@ -136,7 +139,7 @@ Tipo: `Integer`. **Ejemplos** -Frase «Hello, world!» en ruso contiene un conjunto de puntos Unicode que representan un texto codificado de un solo punto. La función devuelve algún resultado esperado: +Frase “Hello, world!” en ruso contiene un conjunto de puntos Unicode que representan un texto codificado de un solo punto. La función devuelve algún resultado esperado: Consulta: @@ -152,7 +155,7 @@ Resultado: └───────────────────────────────────┘ ``` -Frase «Salut, étudiante!», donde el carácter `é` Puede ser representado usando un punto (`U+00E9`) o dos puntos (`U+0065U+0301`) la función se puede devolver algún resultado inesperado: +Frase “Salut, étudiante!”, donde el carácter `é` puede ser representado usando un un punto (`U+00E9`) o dos puntos (`U+0065U+0301`) la función se puede devolver algún resultado inesperado: Consulta de la carta `é`, que se representa un punto Unicode `U+00E9`: @@ -196,8 +199,8 @@ positionCaseInsensitiveUTF8(haystack, needle) **Parámetros** -- `haystack` — cadena, en la que se buscará la subcadena. [Cadena](../syntax.md#syntax-string-literal). -- `needle` — subcadena que se va a buscar. [Cadena](../syntax.md#syntax-string-literal). +- `haystack` — string, in which substring will to be searched. [Cadena](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Cadena](../syntax.md#syntax-string-literal). **Valor devuelto** @@ -240,8 +243,8 @@ multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) **Parámetros** -- `haystack` — cadena, en la que se buscará la subcadena. [Cadena](../syntax.md#syntax-string-literal). -- `needle` — subcadena que se va a buscar. [Cadena](../syntax.md#syntax-string-literal). +- `haystack` — string, in which substring will to be searched. [Cadena](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [Cadena](../syntax.md#syntax-string-literal). **Valores devueltos** @@ -267,62 +270,62 @@ Resultado: Ver `multiSearchAllPositions`. -## multiSearchFirstPosition(pajar, \[agujaUno, agujaCómo hacer, …, agujay\]) {#multisearchfirstposition} +## multiSearchFirstPosition(pajar, \[aguja1, aguja2, …, needley\]) {#multisearchfirstposition} Lo mismo que `position` pero devuelve el desplazamiento más a la izquierda de la cadena `haystack` que se corresponde con algunas de las agujas. Para una búsqueda que no distingue entre mayúsculas y minúsculas o / y en formato UTF-8, use funciones `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. -## multiSearchFirstIndex(pajar, \[agujaUno, agujaCómo hacer, …, agujay\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} +## multiSearchFirstIndex(pajar, \[aguja1, aguja2, …, needley\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} -Devuelve el índice `i` (a partir de 1) de la aguja encontrada más a la izquierdame es la cadena `haystack` y 0 de lo contrario. +Devuelve el índice `i` (a partir de 1) de la aguja encontrada más a la izquierdame en la cadena `haystack` y 0 de lo contrario. Para una búsqueda que no distingue entre mayúsculas y minúsculas o / y en formato UTF-8, use funciones `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. -## multiSearchAny(pajar, \[agujaUno, agujaCómo hacer, …, agujay\]) {#function-multisearchany} +## multiSearchAny(pajar, \[aguja1, aguja2, …, needley\]) {#function-multisearchany} Devuelve 1, si al menos una aguja de cuerdame coincide con la cadena `haystack` y 0 de lo contrario. Para una búsqueda que no distingue entre mayúsculas y minúsculas o / y en formato UTF-8, use funciones `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. !!! note "Nota" - En todos `multiSearch*` el número de agujas debe ser inferior a 2Ocho debido a la especificación de implementación. + En todos `multiSearch*` el número de agujas debe ser inferior a 28 debido a la especificación de implementación. ## match (pajar, patrón) {#matchhaystack-pattern} Comprueba si la cadena coincide con la `pattern` expresión regular. Un `re2` expresión regular. El [sintaxis](https://github.com/google/re2/wiki/Syntax) de la `re2` expresiones regulares es más limitada que la sintaxis de las expresiones regulares de Perl. -Devuelve 0 si no coinciden, o 1 si coinciden. +Devuelve 0 si no coincide, o 1 si coincide. Tenga en cuenta que el símbolo de barra invertida (`\`) se utiliza para escapar en la expresión regular. El mismo símbolo se usa para escapar en literales de cadena. Por lo tanto, para escapar del símbolo en una expresión regular, debe escribir dos barras invertidas (\\) en un literal de cadena. La expresión regular funciona con la cadena como si fuera un conjunto de bytes. La expresión regular no puede contener bytes nulos. Para que los patrones busquen subcadenas en una cadena, es mejor usar LIKE o ‘position’, ya que trabajan mucho más rápido. -## multiMatchAny(pajar, \[patrónUno, patrónCómo hacer, …, patróny\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} +## multiMatchAny(pajar, \[patrón1, patrón2, …, patterny\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} -Lo mismo que `match`, pero devuelve 0 si ninguna de las expresiones regulares coinciden y 1 si alguno de los patrones coinciden. Se utiliza [hyperscan](https://github.com/intel/hyperscan) biblioteca. Para que los patrones busquen subcadenas en una cadena, es mejor usar `multiSearchAny` ya que funciona mucho más rápido. +Lo mismo que `match`, pero devuelve 0 si ninguna de las expresiones regulares coincide y 1 si alguno de los patrones coincide. Se utiliza [hyperscan](https://github.com/intel/hyperscan) biblioteca. Para que los patrones busquen subcadenas en una cadena, es mejor usar `multiSearchAny` ya que funciona mucho más rápido. !!! note "Nota" - La longitud de cualquiera de los `haystack` Cadena debe ser inferior a 2Nivel de Cifrado WEP bytes de lo contrario, se lanza la excepción. Esta restricción tiene lugar debido a la API de hiperscan. + La longitud de cualquiera de los `haystack` cadena debe ser inferior a 232 bytes de lo contrario, se lanza la excepción. Esta restricción tiene lugar debido a la API de hiperscan. -## multiMatchAnyIndex(pajar, \[patrónUno, patrónCómo hacer, …, patróny\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} +## multiMatchAnyIndex(pajar, \[patrón1, patrón2, …, patterny\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} Lo mismo que `multiMatchAny`, pero devuelve cualquier índice que coincida con el pajar. -## ¿Cómo puedo obtener más información?Uno, patrónCómo hacer, …, patróny\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} +## ¿Cómo puedo obtener más información?1, patrón2, …, patterny\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} Lo mismo que `multiMatchAny`, pero devuelve la matriz de todas las indicaciones que coinciden con el pajar en cualquier orden. -## multiFuzzyMatchAny(pajar, distancia, \[patrónUno, patrónCómo hacer, …, patróny\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAny(pajar, distancia, \[patrón1, patrón2, …, patterny\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} -Lo mismo que `multiMatchAny`, pero devuelve 1 si algún patrón coincide con el pajar dentro de una constante [Editar distancia](https://en.wikipedia.org/wiki/Edit_distance). Esta función también está en modo experimental y puede ser extremadamente lenta. Para obtener más información, consulte [documentación de hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). +Lo mismo que `multiMatchAny`, pero devuelve 1 si algún patrón coincide con el pajar dentro de una constante [editar distancia](https://en.wikipedia.org/wiki/Edit_distance). Esta función también está en modo experimental y puede ser extremadamente lenta. Para obtener más información, consulte [documentación de hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). -## multiFuzzyMatchAnyIndex(pajar, distancia, \[patrónUno, patrónCómo hacer, …, patróny\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAnyIndex(pajar, distancia, \[patrón1, patrón2, …, patterny\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} Lo mismo que `multiFuzzyMatchAny`, pero devuelve cualquier índice que coincida con el pajar dentro de una distancia de edición constante. -## multiFuzzyMatchAllIndices(pajar, distancia, \[patrónUno, patrónCómo hacer, …, patróny\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAllIndices(pajar, distancia, \[patrón1, patrón2, …, patterny\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} Lo mismo que `multiFuzzyMatchAny`, pero devuelve la matriz de todos los índices en cualquier orden que coincida con el pajar dentro de una distancia de edición constante. @@ -334,11 +337,11 @@ Lo mismo que `multiFuzzyMatchAny`, pero devuelve la matriz de todos los índices ## extracto(pajar, patrón) {#extracthaystack-pattern} -Extrae un fragmento de una cadena utilizando una expresión regular. Si ‘haystack’ no coinciden con el ‘pattern’ regex, se devuelve una cadena vacía. Si la expresión regular no contiene subpatrones, toma el fragmento que coincide con toda la expresión regular. De lo contrario, toma el fragmento que coincide con el primer subpatrón. +Extrae un fragmento de una cadena utilizando una expresión regular. Si ‘haystack’ no coincide con el ‘pattern’ regex, se devuelve una cadena vacía. Si la expresión regular no contiene subpatrones, toma el fragmento que coincide con toda la expresión regular. De lo contrario, toma el fragmento que coincide con el primer subpatrón. ## extractAll(pajar, patrón) {#extractallhaystack-pattern} -Extrae todos los fragmentos de una cadena utilizando una expresión regular. Si ‘haystack’ no coinciden con el ‘pattern’ regex, se devuelve una cadena vacía. Devuelve una matriz de cadenas que consiste en todas las coincidencias con la expresión regular. En general, el comportamiento es el mismo que el ‘extract’ función (toma el primer subpatrón, o la expresión completa si no hay un subpatrón). +Extrae todos los fragmentos de una cadena utilizando una expresión regular. Si ‘haystack’ no coincide con el ‘pattern’ regex, se devuelve una cadena vacía. Devuelve una matriz de cadenas que consiste en todas las coincidencias con la expresión regular. En general, el comportamiento es el mismo que el ‘extract’ función (toma el primer subpatrón, o la expresión completa si no hay un subpatrón). ## como (pajar, patrón), operador de patrón COMO pajar {#function-like} @@ -360,17 +363,17 @@ Lo mismo que ‘like’ pero negativo. ## ngramDistance(pajar, aguja) {#ngramdistancehaystack-needle} -Calcula la distancia de 4 gramos entre `haystack` y `needle`: cuenta la diferencia simétrica entre dos conjuntos múltiples de 4 gramos y la normaliza por la suma de sus cardinalidades. Devuelve un número flotante de 0 a 1: cuanto más cerca de cero, más cadenas son similares entre sí. Si la constante `needle` o `haystack` es más de 32Kb, arroja una excepción. Si algunos de los no constantes `haystack` o `needle` Las cadenas son más de 32Kb, la distancia es siempre una. +Calcula la distancia de 4 gramos entre `haystack` y `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` o `haystack` es más de 32Kb, arroja una excepción. Si algunos de los no constantes `haystack` o `needle` Las cadenas son más de 32Kb, la distancia es siempre una. Para la búsqueda sin distinción de mayúsculas y minúsculas o / y en formato UTF-8, use funciones `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. ## ngramSearch(pajar, aguja) {#ngramsearchhaystack-needle} -Lo mismo que `ngramDistance` pero calcula la diferencia no simétrica entre `needle` y `haystack` – el número de n-grams de la aguja menos el número común de n-grams normalizado por el número de `needle` n-gramas. Cuanto más cerca de uno, más probable es `needle` está en el `haystack`. Puede ser útil para la búsqueda de cadenas difusas. +Lo mismo que `ngramDistance` pero calcula la diferencia no simétrica entre `needle` y `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` n-gramas. Cuanto más cerca de uno, más probable es `needle` está en el `haystack`. Puede ser útil para la búsqueda de cadenas difusas. Para la búsqueda sin distinción de mayúsculas y minúsculas o / y en formato UTF-8, use funciones `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. !!! note "Nota" - Para el caso UTF-8 usamos una distancia de 3 gramos. Todas estas no son distancias n-gram perfectamente justas. Usamos hashes de 2 bytes para hash n-gramos y luego calculamos la diferencia (no) simétrica entre estas tablas hash: pueden ocurrir colisiones. Con el formato UTF-8 sin distinción de mayúsculas y minúsculas, no usamos fair `tolower` función - ponemos a cero el bit 5-th (comenzando desde cero) de cada byte de punto de código y el primer bit de byte zeroth si bytes más de uno - esto funciona para el latín y principalmente para todas las letras cirílicas. + For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/string_search_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/es/query_language/functions/type_conversion_functions.md b/docs/es/sql_reference/functions/type_conversion_functions.md similarity index 65% rename from docs/es/query_language/functions/type_conversion_functions.md rename to docs/es/sql_reference/functions/type_conversion_functions.md index 9d6751a7c10..3920e809438 100644 --- a/docs/es/query_language/functions/type_conversion_functions.md +++ b/docs/es/sql_reference/functions/type_conversion_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: "Conversi\xF3n de tipo" --- # Funciones de conversión de tipos {#type-conversion-functions} @@ -8,16 +11,16 @@ machine_translated: true Cuando convierte un valor de uno a otro tipo de datos, debe recordar que, en un caso común, es una operación insegura que puede provocar una pérdida de datos. Puede producirse una pérdida de datos si intenta ajustar el valor de un tipo de datos más grande a un tipo de datos más pequeño, o si convierte valores entre diferentes tipos de datos. -Haga clic en Casa tiene el [mismo comportamiento que los programas de C++](https://en.cppreference.com/w/cpp/language/implicit_conversion). +ClickHouse tiene el [mismo comportamiento que los programas de C++](https://en.cppreference.com/w/cpp/language/implicit_conversion). ## ¿Cómo puedo obtener más información?) {#toint8163264} -Convierte un valor de entrada en el [En](../../data_types/int_uint.md) tipo de datos. Esta familia de funciones incluye: +Convierte un valor de entrada en el [En](../../sql_reference/data_types/int_uint.md) tipo de datos. Esta familia de funciones incluye: -- `toInt8(expr)` — Resultados en el `Int8` tipo de datos. -- `toInt16(expr)` — Resultados en el `Int16` tipo de datos. -- `toInt32(expr)` — Resultados en el `Int32` tipo de datos. -- `toInt64(expr)` — Resultados en el `Int64` tipo de datos. +- `toInt8(expr)` — Results in the `Int8` tipo de datos. +- `toInt16(expr)` — Results in the `Int16` tipo de datos. +- `toInt32(expr)` — Results in the `Int32` tipo de datos. +- `toInt64(expr)` — Results in the `Int64` tipo de datos. **Parámetros** @@ -27,9 +30,9 @@ Convierte un valor de entrada en el [En](../../data_types/int_uint.md) tipo de d Valor entero en el `Int8`, `Int16`, `Int32`, o `Int64` tipo de datos. -Funciones de uso [Tetas grandes](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), lo que significa que truncan dígitos fraccionarios de números. +Funciones de uso [redondeando hacia cero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), lo que significa que truncan dígitos fraccionarios de números. -El comportamiento de las funciones [NaN y Inf](../../data_types/float.md#data_type-float-nan-inf) los argumentos no están definidos. Recuerde acerca de [problemas de conversión numérica](#numeric-conversion-issues), al usar las funciones. +El comportamiento de las funciones [NaN y Inf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) los argumentos no están definidos. Recuerde acerca de [problemas de conversión numérica](#numeric-conversion-issues), al usar las funciones. **Ejemplo** @@ -77,12 +80,12 @@ select toInt64OrNull('123123'), toInt8OrNull('123qwe123') ## ¿Cómo puedo obtener más información?) {#touint8163264} -Convierte un valor de entrada en el [UInt](../../data_types/int_uint.md) tipo de datos. Esta familia de funciones incluye: +Convierte un valor de entrada en el [UInt](../../sql_reference/data_types/int_uint.md) tipo de datos. Esta familia de funciones incluye: -- `toUInt8(expr)` — Resultados en el `UInt8` tipo de datos. -- `toUInt16(expr)` — Resultados en el `UInt16` tipo de datos. -- `toUInt32(expr)` — Resultados en el `UInt32` tipo de datos. -- `toUInt64(expr)` — Resultados en el `UInt64` tipo de datos. +- `toUInt8(expr)` — Results in the `UInt8` tipo de datos. +- `toUInt16(expr)` — Results in the `UInt16` tipo de datos. +- `toUInt32(expr)` — Results in the `UInt32` tipo de datos. +- `toUInt64(expr)` — Results in the `UInt64` tipo de datos. **Parámetros** @@ -92,9 +95,9 @@ Convierte un valor de entrada en el [UInt](../../data_types/int_uint.md) tipo de Valor entero en el `UInt8`, `UInt16`, `UInt32`, o `UInt64` tipo de datos. -Funciones de uso [Tetas grandes](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), lo que significa que truncan dígitos fraccionarios de números. +Funciones de uso [redondeando hacia cero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), lo que significa que truncan dígitos fraccionarios de números. -El comportamiento de las funciones para los instrumentos negativos y para [NaN y Inf](../../data_types/float.md#data_type-float-nan-inf) los argumentos no están definidos. Si pasa una cadena con un número negativo, por ejemplo `'-32'`, ClickHouse genera una excepción. Recuerde acerca de [problemas de conversión numérica](#numeric-conversion-issues), al usar las funciones. +El comportamiento de las funciones para los instrumentos negativos y para [NaN y Inf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) los argumentos no están definidos. Si pasa una cadena con un número negativo, por ejemplo `'-32'`, ClickHouse genera una excepción. Recuerde acerca de [problemas de conversión numérica](#numeric-conversion-issues), al usar las funciones. **Ejemplo** @@ -132,7 +135,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) ## toDecimal(32/64/128) {#todecimal3264128} -Convertir `value` Angeles [Decimal](../../data_types/decimal.md) tipo de datos con precisión de `S`. El `value` puede ser un número o una cadena. El `S` (escala) parámetro especifica el número de decimales. +Convertir `value` a la [Decimal](../../sql_reference/data_types/decimal.md) tipo de datos con precisión de `S`. El `value` puede ser un número o una cadena. El `S` (escala) parámetro especifica el número de decimales. - `toDecimal32(value, S)` - `toDecimal64(value, S)` @@ -140,18 +143,18 @@ Convertir `value` Angeles [Decimal](../../data_types/decimal.md) tipo de datos c ## ¿Cómo puedo hacer esto? {#todecimal3264128ornull} -Convierte una cadena de entrada en un [Información detallada))](../../data_types/decimal.md) valor de tipo de datos. Esta familia de funciones incluye: +Convierte una cadena de entrada en un [Información detallada))](../../sql_reference/data_types/decimal.md) valor de tipo de datos. Esta familia de funciones incluye: -- `toDecimal32OrNull(expr, S)` — Resultados en `Nullable(Decimal32(S))` tipo de datos. -- `toDecimal64OrNull(expr, S)` — Resultados en `Nullable(Decimal64(S))` tipo de datos. -- `toDecimal128OrNull(expr, S)` — Resultados en `Nullable(Decimal128(S))` tipo de datos. +- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` tipo de datos. +- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` tipo de datos. +- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` tipo de datos. Estas funciones deben usarse en lugar de `toDecimal*()` funciones, si usted prefiere conseguir un `NULL` valor de entrada en lugar de una excepción en el caso de un error de análisis de valor de entrada. **Parámetros** -- `expr` — [Expresion](../syntax.md#syntax-expressions), devuelve un valor en el [Cadena](../../data_types/string.md) tipo de datos. ClickHouse espera la representación textual del número decimal. Por ejemplo, `'1.111'`. -- `S` — Escala, el número de decimales en el valor resultante. +- `expr` — [Expresion](../syntax.md#syntax-expressions), devuelve un valor en el [Cadena](../../sql_reference/data_types/string.md) tipo de datos. ClickHouse espera la representación textual del número decimal. Por ejemplo, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. **Valor devuelto** @@ -184,18 +187,18 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) ## Por ejemplo: {#todecimal3264128orzero} -Convierte un valor de entrada en el [Decimal (P, S)](../../data_types/decimal.md) tipo de datos. Esta familia de funciones incluye: +Convierte un valor de entrada en el [Decimal (P, S)](../../sql_reference/data_types/decimal.md) tipo de datos. Esta familia de funciones incluye: -- `toDecimal32OrZero( expr, S)` — Resultados en `Decimal32(S)` tipo de datos. -- `toDecimal64OrZero( expr, S)` — Resultados en `Decimal64(S)` tipo de datos. -- `toDecimal128OrZero( expr, S)` — Resultados en `Decimal128(S)` tipo de datos. +- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` tipo de datos. +- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` tipo de datos. +- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` tipo de datos. Estas funciones deben usarse en lugar de `toDecimal*()` funciones, si usted prefiere conseguir un `0` valor de entrada en lugar de una excepción en el caso de un error de análisis de valor de entrada. **Parámetros** -- `expr` — [Expresion](../syntax.md#syntax-expressions), devuelve un valor en el [Cadena](../../data_types/string.md) tipo de datos. ClickHouse espera la representación textual del número decimal. Por ejemplo, `'1.111'`. -- `S` — Escala, el número de decimales en el valor resultante. +- `expr` — [Expresion](../syntax.md#syntax-expressions), devuelve un valor en el [Cadena](../../sql_reference/data_types/string.md) tipo de datos. ClickHouse espera la representación textual del número decimal. Por ejemplo, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. **Valor devuelto** @@ -231,7 +234,7 @@ SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) Funciones para convertir entre números, cadenas (pero no cadenas fijas), fechas y fechas con horas. Todas estas funciones aceptan un argumento. -Al convertir a o desde una cadena, el valor se formatea o se analiza utilizando las mismas reglas que para el formato TabSeparated (y casi todos los demás formatos de texto). Si no se puede analizar la cadena, se produce una excepción y se cancela la solicitud. +Al convertir a o desde una cadena, el valor se formatea o se analiza utilizando las mismas reglas que para el formato TabSeparated (y casi todos los demás formatos de texto). Si la cadena no se puede analizar, se lanza una excepción y se cancela la solicitud. Al convertir fechas a números o viceversa, la fecha corresponde al número de días desde el comienzo de la época Unix. Al convertir fechas con horas a números o viceversa, la fecha con hora corresponde al número de segundos desde el comienzo de la época Unix. @@ -318,7 +321,7 @@ Esta función acepta un número o fecha o fecha con hora, y devuelve un FixedStr ## CAST(x, t) {#type_conversion_function-cast} -Convertir ‘x’ Angeles ‘t’ tipo de datos. La sintaxis CAST(x AS t) también es compatible. +Convertir ‘x’ a la ‘t’ tipo de datos. La sintaxis CAST(x AS t) también es compatible. Ejemplo: @@ -339,7 +342,7 @@ SELECT La conversión a FixedString(N) solo funciona para argumentos de tipo String o FixedString(N). -Conversión de tipo a [NULO](../../data_types/nullable.md) y la espalda es compatible. Ejemplo: +Conversión de tipo a [NULL](../../sql_reference/data_types/nullable.md) y la espalda es compatible. Ejemplo: ``` sql SELECT toTypeName(x) FROM t_null @@ -365,7 +368,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null ## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval} -Convierte un argumento de tipo Number en un [Intervalo](../../data_types/special_data_types/interval.md) tipo de datos. +Convierte un argumento de tipo Number en un [Intervalo](../../sql_reference/data_types/special_data_types/interval.md) tipo de datos. **Sintaxis** @@ -382,7 +385,7 @@ toIntervalYear(number) **Parámetros** -- `number` — Duración del intervalo. Número entero positivo. +- `number` — Duration of interval. Positive integer number. **Valores devueltos** @@ -406,18 +409,126 @@ SELECT └───────────────────────────┴──────────────────────────────┘ ``` -## parseDateTimeBestEffort {#type_conversion_functions-parsedatetimebesteffort} +## parseDateTimeBestEffort {#parsedatetimebesteffort} -Analice un argumento de tipo numérico a un tipo Date o DateTime. -diferente de toDate y toDateTime, parseDateTimeBestEffort puede progresar en un formato de fecha más complejo. -Para obtener más información, consulte el enlace: [Formato de fecha compleja](https://xkcd.com/1179/) +Convierte una fecha y una hora en el [Cadena](../../sql_reference/data_types/string.md) representación a [FechaHora](../../sql_reference/data_types/datetime.md#data_type-datetime) tipo de datos. + +La función analiza [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Especificación de fecha y hora](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse y algunos otros formatos de fecha y hora. + +**Sintaxis** + +``` sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**Parámetros** + +- `time_string` — String containing a date and time to convert. [Cadena](../../sql_reference/data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` según la zona horaria. [Cadena](../../sql_reference/data_types/string.md). + +**Formatos no estándar admitidos** + +- Una cadena que contiene 9..10 dígitos [marca de tiempo unix](https://en.wikipedia.org/wiki/Unix_time). +- Una cadena con un componente de fecha y hora: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, sucesivamente. +- Una cadena con una fecha, pero sin componente de hora: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` sucesivamente. +- Una cadena con un día y una hora: `DD`, `DD hh`, `DD hh:mm`. En este caso `YYYY-MM` se sustituyen como `2000-01`. +- Una cadena que incluye la fecha y la hora junto con la información de desplazamiento de zona horaria: `YYYY-MM-DD hh:mm:ss ±h:mm`, sucesivamente. Por ejemplo, `2020-12-12 17:36:00 -5:00`. + +Para todos los formatos con separador, la función analiza los nombres de meses expresados por su nombre completo o por las primeras tres letras de un nombre de mes. Ejemplos: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**Valor devuelto** + +- `time_string` convertido a la `DateTime` tipo de datos. + +**Ejemplos** + +Consulta: + +``` sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +Resultado: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +Consulta: + +``` sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +Resultado: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +Consulta: + +``` sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +Resultado: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +Consulta: + +``` sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +Resultado: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +Consulta: + +``` sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +Resultado: + +``` text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**Ver también** + +- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [Fecha](#todate) +- [toDateTime](#todatetime) ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} -Lo mismo que para [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) excepto que devuelve null cuando encuentra un formato de fecha que no se puede procesar. +Lo mismo que para [parseDateTimeBestEffort](#parsedatetimebesteffort) excepto que devuelve null cuando encuentra un formato de fecha que no se puede procesar. ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} -Lo mismo que para [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) que devuelve una fecha cero o una fecha cero cuando encuentra un formato de fecha que no se puede procesar. +Lo mismo que para [parseDateTimeBestEffort](#parsedatetimebesteffort) excepto que devuelve una fecha cero o una fecha cero cuando encuentra un formato de fecha que no se puede procesar. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/type_conversion_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/es/query_language/functions/url_functions.md b/docs/es/sql_reference/functions/url_functions.md similarity index 90% rename from docs/es/query_language/functions/url_functions.md rename to docs/es/sql_reference/functions/url_functions.md index 5cfb751446b..923d9250928 100644 --- a/docs/es/query_language/functions/url_functions.md +++ b/docs/es/sql_reference/functions/url_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 54 +toc_title: Trabajar con URL --- # Funciones para trabajar con URL {#functions-for-working-with-urls} @@ -14,7 +17,7 @@ Si la parte relevante no está presente en una URL, se devuelve una cadena vací Extrae el protocolo de una URL. -Ejemplos de valores típicos devueltos: http, https, ftp, mailto, tel, magnet… +Examples of typical returned values: http, https, ftp, mailto, tel, magnet… ### dominio {#domain} @@ -26,7 +29,7 @@ domain(url) **Parámetros** -- `url` — URL. Tipo: [Cadena](../../data_types/string.md). +- `url` — URL. Type: [Cadena](../../sql_reference/data_types/string.md). La URL se puede especificar con o sin un esquema. Ejemplos: @@ -77,7 +80,7 @@ topLevelDomain(url) **Parámetros** -- `url` — URL. Tipo: [Cadena](../../data_types/string.md). +- `url` — URL. Type: [Cadena](../../sql_reference/data_types/string.md). La URL se puede especificar con o sin un esquema. Ejemplos: @@ -108,7 +111,7 @@ SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') ### FirstSignificantSubdomain {#firstsignificantsubdomain} -Devuelve el “first significant subdomain”. Este es un concepto no estándar específico de Yandex.Métrica. El primer subdominio significativo es un dominio de segundo nivel si es ‘com’, ‘net’, ‘org’, o ‘co’. De lo contrario, es un dominio de tercer nivel. Por ejemplo, firstSignificantSubdomain (‘https://news.yandex.ru/’Número ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’Número ‘yandex’. La lista de “insignificant” dominios de segundo nivel y otros detalles de implementación pueden cambiar en el futuro. +Devuelve el “first significant subdomain”. Este es un concepto no estándar específico de Yandex.Métrica. El primer subdominio significativo es un dominio de segundo nivel si es ‘com’, ‘net’, ‘org’, o ‘co’. De lo contrario, es un dominio de tercer nivel. Por ejemplo, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. La lista de “insignificant” dominios de segundo nivel y otros detalles de implementación pueden cambiar en el futuro. ### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} @@ -203,4 +206,4 @@ Quita la cadena de consulta y el identificador de fragmento. El signo de interro Elimina el ‘name’ Parámetro URL, si está presente. Esta función funciona bajo el supuesto de que el nombre del parámetro está codificado en la URL exactamente de la misma manera que en el argumento pasado. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/url_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/es/query_language/functions/uuid_functions.md b/docs/es/sql_reference/functions/uuid_functions.md similarity index 84% rename from docs/es/query_language/functions/uuid_functions.md rename to docs/es/sql_reference/functions/uuid_functions.md index b83411c08ec..6bc2f18a59c 100644 --- a/docs/es/query_language/functions/uuid_functions.md +++ b/docs/es/sql_reference/functions/uuid_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 53 +toc_title: Trabajando con UUID --- # Funciones para trabajar con UUID {#functions-for-working-with-uuid} @@ -8,7 +11,7 @@ Las funciones para trabajar con UUID se enumeran a continuación. ## GenerateUUIDv4 {#uuid-function-generate} -Genera el [UUID](../../data_types/uuid.md) de [versión 4](https://tools.ietf.org/html/rfc4122#section-4.4). +Genera el [UUID](../../sql_reference/data_types/uuid.md) de [versión 4](https://tools.ietf.org/html/rfc4122#section-4.4). ``` sql generateUUIDv4() @@ -62,7 +65,7 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid ## UUIDStringToNum {#uuidstringtonum} -Acepta una cadena que contiene 36 caracteres en el formato `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, y lo devuelve como un conjunto de bytes en un [Cadena fija (16)](../../data_types/fixedstring.md). +Acepta una cadena que contiene 36 caracteres en el formato `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, y lo devuelve como un conjunto de bytes en un [Cadena fija (16)](../../sql_reference/data_types/fixedstring.md). ``` sql UUIDStringToNum(String) @@ -81,7 +84,6 @@ SELECT ``` ``` text - ┌─uuid─────────────────────────────────┬─bytes────────────┐ │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ └──────────────────────────────────────┴──────────────────┘ @@ -89,7 +91,7 @@ SELECT ## UUIDNumToString {#uuidnumtostring} -Acepta un [Cadena fija (16)](../../data_types/fixedstring.md) valor, y devuelve una cadena que contiene 36 caracteres en formato de texto. +Acepta un [Cadena fija (16)](../../sql_reference/data_types/fixedstring.md) valor, y devuelve una cadena que contiene 36 caracteres en formato de texto. ``` sql UUIDNumToString(FixedString(16)) @@ -117,4 +119,4 @@ SELECT - [dictGetUUID](ext_dict_functions.md#ext_dict_functions-other) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/uuid_function/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/es/query_language/functions/ym_dict_functions.md b/docs/es/sql_reference/functions/ym_dict_functions.md similarity index 75% rename from docs/es/query_language/functions/ym_dict_functions.md rename to docs/es/sql_reference/functions/ym_dict_functions.md index 7e5d7b2d73e..4ef4a169ebf 100644 --- a/docs/es/query_language/functions/ym_dict_functions.md +++ b/docs/es/sql_reference/functions/ym_dict_functions.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 59 +toc_title: Trabajando con Yandex.Diccionarios de Metrica --- -# Funciones para trabajar con Yandex.Métrica, diccionarios {#functions-for-working-with-yandex-metrica-dictionaries} +# Funciones para trabajar con Yandex.Metrica, diccionarios {#functions-for-working-with-yandex-metrica-dictionaries} -Para que las funciones a continuación funcionen, la configuración del servidor debe especificar las rutas y direcciones para obtener todo el Yandex.Diccionarios Metrica. Los diccionarios se cargan en la primera llamada de cualquiera de estas funciones. Si no se pueden cargar las listas de referencia, se produce una excepción. +Para que las funciones a continuación funcionen, la configuración del servidor debe especificar las rutas y direcciones para obtener todo el Yandex.Diccionarios Metrica. Los diccionarios se cargan en la primera llamada de cualquiera de estas funciones. Si las listas de referencia no se pueden cargar, se lanza una excepción. Para obtener información sobre cómo crear listas de referencia, consulte la sección “Dictionaries”. @@ -21,7 +24,7 @@ Por ejemplo, también encontrará el archivo `/opt/geo/regions_hierarchy_ua.txt` Todos los diccionarios se vuelven a cargar en tiempo de ejecución (una vez cada cierto número de segundos, como se define en el parámetro de configuración builtin\_dictionaries\_reload\_interval , o una vez por hora por defecto). Sin embargo, la lista de diccionarios disponibles se define una vez, cuando se inicia el servidor. -Todas las funciones para trabajar con regiones tienen un argumento opcional al final: la clave del diccionario. Se conoce como la geobase. +All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. Ejemplo: ``` sql @@ -32,7 +35,7 @@ regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/g ### ¿Cómo puedo hacerlo?\]) {#regiontocityid-geobase} -Acepta un número UInt32: el ID de región de la geobase de Yandex. Si esta región es una ciudad o parte de una ciudad, devuelve el ID de región para la ciudad apropiada. De lo contrario, devuelve 0. +Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. ### ¿Cómo puedo hacerlo?\]) {#regiontoareaid-geobase} @@ -104,6 +107,28 @@ Ejemplo: `regionToCountry(toUInt32(213)) = 225` convierte Moscú (213) a Rusia ( Convierte una región en un continente. En todos los demás sentidos, esta función es la misma que ‘regionToCity’. Ejemplo: `regionToContinent(toUInt32(213)) = 10001` convierte Moscú (213) a Eurasia (10001). +### Nuestra misión es brindarle un servicio de calidad y confianza a nuestros clientes.) {#regiontotopcontinent-regiontotopcontinent} + +Encuentra el continente más alto en la jerarquía de la región. + +**Sintaxis** + +``` sql +regionToTopContinent(id[, geobase]); +``` + +**Parámetros** + +- `id` — Region ID from the Yandex geobase. [UInt32](../../sql_reference/data_types/int_uint.md). +- `geobase` — Dictionary key. See [Múltiples Geobases](#multiple-geobases). [Cadena](../../sql_reference/data_types/string.md). Opcional. + +**Valor devuelto** + +- Identificador del continente de nivel superior (este último cuando subes la jerarquía de regiones). +- 0, si no hay ninguno. + +Tipo: `UInt32`. + ### Aquí está el código de identificación de la población.\]) {#regiontopopulationid-geobase} Obtiene la población de una región. @@ -114,17 +139,17 @@ En la geobase de Yandex, la población podría registrarse para las regiones sec ### ¿Cómo puedo hacerlo?\]) {#regioninlhs-rhs-geobase} Comprueba si un ‘lhs’ región pertenece a un ‘rhs’ regi. Devuelve un número UInt8 igual a 1 si pertenece, o 0 si no pertenece. -La relación es reflexiva: cualquier región también pertenece a sí misma. +The relationship is reflexive – any region also belongs to itself. ### RegiónJerarquía (id\[, geobase\]) {#regionhierarchyid-geobase} -Acepta un número UInt32: el ID de región de la geobase de Yandex. Devuelve una matriz de ID de región que consiste en la región pasada y todos los elementos primarios a lo largo de la cadena. +Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. Ejemplo: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. ### ¿Cómo puedo hacerlo?\]) {#regiontonameid-lang} -Acepta un número UInt32: el ID de región de la geobase de Yandex. Una cadena con el nombre del idioma se puede pasar como un segundo argumento. Los idiomas soportados son: ru, en, ua, uk, by, kz, tr. Si se omite el segundo argumento, el idioma ‘ru’ se utiliza. Si el idioma no es compatible, se produce una excepción. Devuelve una cadena: el nombre de la región en el idioma correspondiente. Si la región con el ID especificado no existe, se devuelve una cadena vacía. +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. `ua` y `uk` ambos significan ucraniano. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/functions/ym_dict_functions/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/es/sql_reference/index.md b/docs/es/sql_reference/index.md new file mode 100644 index 00000000000..69de6894e34 --- /dev/null +++ b/docs/es/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: SQL Reference +toc_hidden: true +toc_priority: 28 +toc_title: oculto +--- + +# Referencia SQL {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [Otros tipos de consultas](statements/misc.md) + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/es/query_language/operators.md b/docs/es/sql_reference/operators.md similarity index 58% rename from docs/es/query_language/operators.md rename to docs/es/sql_reference/operators.md index 5710064ee1c..f6e9117eecf 100644 --- a/docs/es/query_language/operators.md +++ b/docs/es/sql_reference/operators.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: Operador --- # Operador {#operators} @@ -9,69 +12,69 @@ Los grupos de operadores se enumeran en orden de prioridad (cuanto más alto est ## Operadores de acceso {#access-operators} -`a[N]` – Acceso a un elemento de una matriz. El `arrayElement(a, N)` función. +`a[N]` – Access to an element of an array. The `arrayElement(a, N)` función. -`a.N` – El acceso a un elemento de tupla. El `tupleElement(a, N)` función. +`a.N` – Access to a tuple element. The `tupleElement(a, N)` función. ## Operador de negación numérica {#numeric-negation-operator} -`-a` – El `negate (a)` función. +`-a` – The `negate (a)` función. ## Operadores de multiplicación y división {#multiplication-and-division-operators} -`a * b` – El `multiply (a, b)` función. +`a * b` – The `multiply (a, b)` función. -`a / b` – El `divide(a, b)` función. +`a / b` – The `divide(a, b)` función. -`a % b` – El `modulo(a, b)` función. +`a % b` – The `modulo(a, b)` función. ## Operadores de suma y resta {#addition-and-subtraction-operators} -`a + b` – El `plus(a, b)` función. +`a + b` – The `plus(a, b)` función. -`a - b` – El `minus(a, b)` función. +`a - b` – The `minus(a, b)` función. ## Operadores de comparación {#comparison-operators} -`a = b` – El `equals(a, b)` función. +`a = b` – The `equals(a, b)` función. -`a == b` – El `equals(a, b)` función. +`a == b` – The `equals(a, b)` función. -`a != b` – El `notEquals(a, b)` función. +`a != b` – The `notEquals(a, b)` función. -`a <> b` – El `notEquals(a, b)` función. +`a <> b` – The `notEquals(a, b)` función. -`a <= b` – El `lessOrEquals(a, b)` función. +`a <= b` – The `lessOrEquals(a, b)` función. -`a >= b` – El `greaterOrEquals(a, b)` función. +`a >= b` – The `greaterOrEquals(a, b)` función. -`a < b` – El `less(a, b)` función. +`a < b` – The `less(a, b)` función. -`a > b` – El `greater(a, b)` función. +`a > b` – The `greater(a, b)` función. -`a LIKE s` – El `like(a, b)` función. +`a LIKE s` – The `like(a, b)` función. -`a NOT LIKE s` – El `notLike(a, b)` función. +`a NOT LIKE s` – The `notLike(a, b)` función. -`a BETWEEN b AND c` – Lo mismo que `a >= b AND a <= c`. +`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. -`a NOT BETWEEN b AND c` – Lo mismo que `a < b OR a > c`. +`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. ## Operadores para trabajar con conjuntos de datos {#operators-for-working-with-data-sets} -*Ver [IN operadores](select.md#select-in-operators).* +*Ver [IN operadores](statements/select.md#select-in-operators).* -`a IN ...` – El `in(a, b)` función. +`a IN ...` – The `in(a, b)` función. -`a NOT IN ...` – El `notIn(a, b)` función. +`a NOT IN ...` – The `notIn(a, b)` función. -`a GLOBAL IN ...` – El `globalIn(a, b)` función. +`a GLOBAL IN ...` – The `globalIn(a, b)` función. -`a GLOBAL NOT IN ...` – El `globalNotIn(a, b)` función. +`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` función. -## Operadores para trabajar con fechas y horas {#operators-datetime} +## Operadores para trabajar con fechas y horarios {#operators-datetime} -### EXTRAER {#operator-extract} +### EXTRACT {#operator-extract} ``` sql EXTRACT(part FROM date); @@ -81,16 +84,16 @@ Extrae una parte de una fecha determinada. Por ejemplo, puede recuperar un mes a El `part` parámetro especifica qué parte de la fecha se va a recuperar. Los siguientes valores están disponibles: -- `DAY` — El día del mes. Valores posibles: 1-31. -- `MONTH` — El número de un mes. Valores posibles: 1-12. -- `YEAR` — Año. -- `SECOND` — Segundo. Valores posibles: 0–59. -- `MINUTE` — Minuto. Valores posibles: 0–59. -- `HOUR` — Hora. Valores posibles: 0–23. +- `DAY` — The day of the month. Possible values: 1–31. +- `MONTH` — The number of a month. Possible values: 1–12. +- `YEAR` — The year. +- `SECOND` — The second. Possible values: 0–59. +- `MINUTE` — The minute. Possible values: 0–59. +- `HOUR` — The hour. Possible values: 0–23. El `part` El parámetro no distingue entre mayúsculas y minúsculas. -El `date` parámetro especifica la fecha o la hora a procesar. Bien [Fecha](../data_types/date.md) o [FechaHora](../data_types/datetime.md) tipo es compatible. +El `date` parámetro especifica la fecha o la hora a procesar. Bien [Fecha](../sql_reference/data_types/date.md) o [FechaHora](../sql_reference/data_types/datetime.md) tipo es compatible. Ejemplos: @@ -135,9 +138,9 @@ FROM test.Orders; Puedes ver más ejemplos en [prueba](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). -### INTERVALO {#operator-interval} +### INTERVAL {#operator-interval} -Crea un [Intervalo](../data_types/special_data_types/interval.md)-type valor que debe utilizarse en operaciones aritméticas con [Fecha](../data_types/date.md) y [FechaHora](../data_types/datetime.md)-tipo valores. +Crea un [Intervalo](../sql_reference/data_types/special_data_types/interval.md)-type valor que debe utilizarse en operaciones aritméticas con [Fecha](../sql_reference/data_types/date.md) y [FechaHora](../sql_reference/data_types/datetime.md)-type valores. Tipos de intervalos: - `SECOND` @@ -150,7 +153,7 @@ Tipos de intervalos: - `YEAR` !!! warning "Advertencia" - Los intervalos con diferentes tipos no se pueden combinar. No puede usar expresiones como `INTERVAL 4 DAY 1 HOUR`. Exprese los intervalos en unidades que son más pequeñas o iguales a la unidad más pequeña del intervalo, por ejemplo `INTERVAL 25 HOUR`. Puede usar operaciones consequtive como en el siguiente ejemplo. + Los intervalos con diferentes tipos no se pueden combinar. No puedes usar expresiones como `INTERVAL 4 DAY 1 HOUR`. Exprese los intervalos en unidades que son más pequeñas o iguales a la unidad más pequeña del intervalo, por ejemplo `INTERVAL 25 HOUR`. Puede usar operaciones consequtive como en el siguiente ejemplo. Ejemplo: @@ -166,28 +169,28 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL **Ver también** -- [Intervalo](../data_types/special_data_types/interval.md) Tipo de datos -- [ToInterval](functions/type_conversion_functions.md#function-tointerval) funciones de conversión de tipo +- [Intervalo](../sql_reference/data_types/special_data_types/interval.md) tipo de datos +- [ToInterval](../sql_reference/functions/type_conversion_functions.md#function-tointerval) funciones de conversión de tipo ## Operador de Negación Lógica {#logical-negation-operator} -`NOT a` – El `not(a)` función. +`NOT a` – The `not(a)` función. ## Operador lógico and {#logical-and-operator} -`a AND b` – El`and(a, b)` función. +`a AND b` – The`and(a, b)` función. ## Operador lógico or {#logical-or-operator} -`a OR b` – El `or(a, b)` función. +`a OR b` – The `or(a, b)` función. ## Operador condicional {#conditional-operator} -`a ? b : c` – El `if(a, b, c)` función. +`a ? b : c` – The `if(a, b, c)` función. Nota: -El operador condicional calcula los valores de b y c, luego verifica si se cumple la condición a y luego devuelve el valor correspondiente. Si `b` o `C` es una [arrayJoin()](functions/array_join.md#functions_arrayjoin) función, cada fila se replicará independientemente de la “a” condición. +El operador condicional calcula los valores de b y c, luego verifica si se cumple la condición a y luego devuelve el valor correspondiente. Si `b` o `C` es una [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) función, cada fila se replicará independientemente de la “a” condición. ## Expresión condicional {#operator_case} @@ -199,7 +202,7 @@ CASE [x] END ``` -Si `x` se especifica, entonces `transform(x, [a, ...], [b, ...], c)` se utiliza la función. De lo contrario – `multiIf(a, b, ..., c)`. +Si `x` se especifica, entonces `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. Si no hay `ELSE c` cláusula en la expresión, el valor predeterminado es `NULL`. @@ -207,37 +210,37 @@ El `transform` no funciona con `NULL`. ## Operador de Concatenación {#concatenation-operator} -`s1 || s2` – El `concat(s1, s2) function.` +`s1 || s2` – The `concat(s1, s2) function.` ## Operador de Creación Lambda {#lambda-creation-operator} -`x -> expr` – El `lambda(x, expr) function.` +`x -> expr` – The `lambda(x, expr) function.` Los siguientes operadores no tienen prioridad, ya que son corchetes: ## Operador de creación de matrices {#array-creation-operator} -`[x1, ...]` – El `array(x1, ...) function.` +`[x1, ...]` – The `array(x1, ...) function.` ## Operador de creación de tupla {#tuple-creation-operator} -`(x1, x2, ...)` – El `tuple(x2, x2, ...) function.` +`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` ## Asociatividad {#associativity} Todos los operadores binarios han dejado asociatividad. Por ejemplo, `1 + 2 + 3` se transforma a `plus(plus(1, 2), 3)`. -A veces esto no funciona de la manera que usted espera. Por ejemplo, `SELECT 4 > 2 > 3` resultará en 0. +A veces esto no funciona de la manera que esperas. Por ejemplo, `SELECT 4 > 2 > 3` resultará en 0. Para la eficiencia, el `and` y `or` funciones aceptan cualquier número de argumentos. Las cadenas correspondientes de `AND` y `OR` operadores se transforman en una sola llamada de estas funciones. ## Comprobación de `NULL` {#checking-for-null} -ClickHouse soporta el `IS NULL` y `IS NOT NULL` operador. +ClickHouse soporta el `IS NULL` y `IS NOT NULL` a los operadores. -### ES NULO {#operator-is-null} +### IS NULL {#operator-is-null} -- Para [NULO](../data_types/nullable.md) valores de tipo, el `IS NULL` operador devuelve: - - `1` Español `NULL`. +- Para [NULL](../sql_reference/data_types/nullable.md) valores de tipo, el `IS NULL` operador devuelve: + - `1` si el valor es `NULL`. - `0` de lo contrario. - Para otros valores, el `IS NULL` operador siempre devuelve `0`. @@ -253,10 +256,10 @@ SELECT x+100 FROM t_null WHERE y IS NULL └──────────────┘ ``` -### NO ES NULO {#is-not-null} +### IS NOT NULL {#is-not-null} -- Para [NULO](../data_types/nullable.md) valores de tipo, el `IS NOT NULL` operador devuelve: - - `0` Español `NULL`. +- Para [NULL](../sql_reference/data_types/nullable.md) valores de tipo, el `IS NOT NULL` operador devuelve: + - `0` si el valor es `NULL`. - `1` de lo contrario. - Para otros valores, el `IS NOT NULL` operador siempre devuelve `1`. @@ -272,4 +275,4 @@ SELECT * FROM t_null WHERE y IS NOT NULL └───┴───┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/operators/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/es/query_language/alter.md b/docs/es/sql_reference/statements/alter.md similarity index 67% rename from docs/es/query_language/alter.md rename to docs/es/sql_reference/statements/alter.md index c823381c4ee..db8b3f09ada 100644 --- a/docs/es/query_language/alter.md +++ b/docs/es/sql_reference/statements/alter.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 36 +toc_title: ALTER --- -## ALTERAR {#query_language_queries_alter} +## ALTER {#query_language_queries_alter} El `ALTER` consulta sólo se admite para `*MergeTree` mesas, así como `Merge`y`Distributed`. La consulta tiene varias variaciones. @@ -19,15 +22,15 @@ Cada acción es una operación en una columna. Se admiten las siguientes acciones: -- [AÑADIR COLUMNA](#alter_add-column) — Agrega una nueva columna a la tabla. -- [COLUMNA DE GOTA](#alter_drop-column) — Elimina la columna. -- [Sistema abierto.](#alter_clear-column) — Restablece los valores de las columnas. -- [COLUMNA DE COMENTARIOS](#alter_comment-column) — Agrega un comentario de texto a la columna. -- [COLUMNA MODIFICAR](#alter_modify-column) — Cambia el tipo de columna, la expresión predeterminada y el TTL. +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. Estas acciones se describen en detalle a continuación. -#### AÑADIR COLUMNA {#alter_add-column} +#### ADD COLUMN {#alter_add-column} ``` sql ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] @@ -35,9 +38,9 @@ ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] Agrega una nueva columna a la tabla con el `name`, `type`, [`codec`](create.md#codecs) y `default_expr` (ver la sección [Expresiones predeterminadas](create.md#create-default-values)). -Si el `IF NOT EXISTS` se incluye una cláusula, la consulta no devolverá un error si la columna ya existe. Si especifica `AFTER name_after` (el nombre de otra columna), la columna se agrega después de la especificada en la lista de columnas de tabla. De lo contrario, la columna se agrega al final de la tabla. Tenga en cuenta que no hay forma de agregar una columna al principio de una tabla. Para una cadena de acciones, `name_after` puede ser el nombre de una columna que se agrega en una de las acciones anteriores. +Si el `IF NOT EXISTS` cláusula, la consulta no devolverá un error si la columna ya existe. Si especifica `AFTER name_after` (el nombre de otra columna), la columna se agrega después de la especificada en la lista de columnas de tabla. De lo contrario, la columna se agrega al final de la tabla. Tenga en cuenta que no hay forma de agregar una columna al principio de una tabla. Para una cadena de acciones, `name_after` puede ser el nombre de una columna que se agrega en una de las acciones anteriores. -Agregar una columna solo cambia la estructura de la tabla, sin realizar ninguna acción con datos. Los datos no aparecen en el disco después de `ALTER`. Si faltan los datos para una columna al leer de la tabla, se rellena con valores predeterminados (realizando la expresión predeterminada si hay una, o usando ceros o cadenas vacías). La columna aparece en el disco después de fusionar partes de datos (consulte [Método de codificación de datos:](../operations/table_engines/mergetree.md)). +Agregar una columna solo cambia la estructura de la tabla, sin realizar ninguna acción con datos. Los datos no aparecen en el disco después de `ALTER`. Si faltan los datos para una columna al leer de la tabla, se rellena con valores predeterminados (realizando la expresión predeterminada si hay una, o usando ceros o cadenas vacías). La columna aparece en el disco después de fusionar partes de datos (consulte [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md)). Este enfoque nos permite completar el `ALTER` consulta al instante, sin aumentar el volumen de datos antiguos. @@ -47,13 +50,13 @@ Ejemplo: ALTER TABLE visits ADD COLUMN browser String AFTER user_id ``` -#### COLUMNA DE GOTA {#alter_drop-column} +#### DROP COLUMN {#alter_drop-column} ``` sql DROP COLUMN [IF EXISTS] name ``` -Elimina la columna con el nombre `name`. Si el `IF EXISTS` Si se especifica una cláusula, la consulta no devolverá un error si la columna no existe. +Elimina la columna con el nombre `name`. Si el `IF EXISTS` se especifica una cláusula, la consulta no devolverá un error si la columna no existe. Elimina datos del sistema de archivos. Dado que esto elimina archivos completos, la consulta se completa casi al instante. @@ -63,7 +66,7 @@ Ejemplo: ALTER TABLE visits DROP COLUMN browser ``` -#### Sistema abierto. {#alter_clear-column} +#### CLEAR COLUMN {#alter_clear-column} ``` sql CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name @@ -71,7 +74,7 @@ CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name Restablece todos los datos de una columna para una partición especificada. Obtenga más información sobre cómo configurar el nombre de la partición en la sección [Cómo especificar la expresión de partición](#alter-how-to-specify-part-expr). -Si el `IF EXISTS` Si se especifica una cláusula, la consulta no devolverá un error si la columna no existe. +Si el `IF EXISTS` se especifica una cláusula, la consulta no devolverá un error si la columna no existe. Ejemplo: @@ -79,17 +82,17 @@ Ejemplo: ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() ``` -#### COLUMNA DE COMENTARIOS {#alter_comment-column} +#### COMMENT COLUMN {#alter_comment-column} ``` sql COMMENT COLUMN [IF EXISTS] name 'comment' ``` -Agrega un comentario a la columna. Si el `IF EXISTS` Si se especifica una cláusula, la consulta no devolverá un error si la columna no existe. +Agrega un comentario a la columna. Si el `IF EXISTS` se especifica una cláusula, la consulta no devolverá un error si la columna no existe. Cada columna puede tener un comentario. Si ya existe un comentario para la columna, un nuevo comentario sobrescribe el comentario anterior. -Los comentarios se almacenan en el `comment_expression` columna devuelta por el [TABLA DE DESCRIBE](misc.md#misc-describe-table) consulta. +Los comentarios se almacenan en el `comment_expression` columna devuelta por el [DESCRIBE TABLE](misc.md#misc-describe-table) consulta. Ejemplo: @@ -97,7 +100,7 @@ Ejemplo: ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' ``` -#### COLUMNA MODIFICAR {#alter_modify-column} +#### MODIFY COLUMN {#alter_modify-column} ``` sql MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] @@ -111,11 +114,11 @@ Esta consulta cambia el `name` propiedades de la columna: - TTL - For examples of columns TTL modifying, see [Column TTL](../operations/table_engines/mergetree.md#mergetree-column-ttl). + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). -Si el `IF EXISTS` Si se especifica una cláusula, la consulta no devolverá un error si la columna no existe. +Si el `IF EXISTS` se especifica una cláusula, la consulta no devolverá un error si la columna no existe. -Al cambiar el tipo, los valores se convierten como si [ToType](functions/type_conversion_functions.md) se les aplicaron funciones. Si solo se cambia la expresión predeterminada, la consulta no hace nada complejo y se completa casi al instante. +Al cambiar el tipo, los valores se convierten como si [ToType](../../sql_reference/functions/type_conversion_functions.md) se les aplicaron funciones. Si solo se cambia la expresión predeterminada, la consulta no hace nada complejo y se completa casi al instante. Ejemplo: @@ -123,7 +126,7 @@ Ejemplo: ALTER TABLE visits MODIFY COLUMN browser Array(String) ``` -Cambiar el tipo de columna es la única acción compleja: cambia el contenido de los archivos con datos. Para mesas grandes, esto puede llevar mucho tiempo. +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. Hay varias etapas de procesamiento: @@ -141,11 +144,11 @@ El `ALTER` se replica la consulta para cambiar columnas. Las instrucciones se gu El `ALTER` query le permite crear y eliminar elementos separados (columnas) en estructuras de datos anidadas, pero no en estructuras de datos anidadas completas. Para agregar una estructura de datos anidada, puede agregar columnas con un nombre como `name.nested_name` y el tipo `Array(T)`. Una estructura de datos anidada es equivalente a varias columnas de matriz con un nombre que tiene el mismo prefijo antes del punto. -No hay soporte para eliminar columnas en la clave principal o la clave de muestreo (columnas que se utilizan en el `ENGINE` expresión). Solo es posible cambiar el tipo de las columnas que se incluyen en la clave principal si este cambio no provoca que se modifiquen los datos (por ejemplo, puede agregar valores a un Enum o cambiar un tipo de `DateTime` Naciones `UInt32`). +No hay soporte para eliminar columnas en la clave principal o la clave de muestreo (columnas que se utilizan en el `ENGINE` expresion). Solo es posible cambiar el tipo de las columnas que se incluyen en la clave principal si este cambio no provoca que se modifiquen los datos (por ejemplo, puede agregar valores a un Enum o cambiar un tipo de `DateTime` a `UInt32`). -Si el `ALTER` la consulta no es suficiente para realizar los cambios en la tabla que necesita, puede crear una nueva tabla, copiar los datos [INSERTAR SELECCIONAR](insert_into.md#insert_query_insert-select) Consulta, luego cambie las tablas usando el [Renombrar](misc.md#misc_operations-rename) Consulta y elimina la tabla anterior. Puede usar el [Método de codificación de datos:](../operations/utils/clickhouse-copier.md) como una alternativa a la `INSERT SELECT` consulta. +Si el `ALTER` la consulta no es suficiente para realizar los cambios en la tabla que necesita, puede crear una nueva tabla, copiar los datos [INSERT SELECT](insert_into.md#insert_query_insert-select) consulta, luego cambie las tablas usando el [RENAME](misc.md#misc_operations-rename) consulta y elimina la tabla anterior. Puede usar el [Método de codificación de datos:](../../operations/utilities/clickhouse-copier.md) como una alternativa a la `INSERT SELECT` consulta. -El `ALTER` query bloquea todas las lecturas y escrituras para la tabla. En otras palabras, si un largo `SELECT` se está ejecutando en el momento de la `ALTER` Consulta, el `ALTER` la consulta esperará a que se complete. Al mismo tiempo, todas las consultas nuevas a la misma tabla esperarán `ALTER` se está ejecutando. +El `ALTER` query bloquea todas las lecturas y escrituras para la tabla. En otras palabras, si un largo `SELECT` se está ejecutando en el momento de la `ALTER` consulta, el `ALTER` la consulta esperará a que se complete. Al mismo tiempo, todas las consultas nuevas a la misma tabla esperarán `ALTER` se está ejecutando. Para tablas que no almacenan datos por sí mismas (como `Merge` y `Distributed`), `ALTER` simplemente cambia la estructura de la tabla, y no cambia la estructura de las tablas subordinadas. Por ejemplo, cuando se ejecuta ALTER para un `Distributed` mesa, también tendrá que ejecutar `ALTER` para las tablas en todos los servidores remotos. @@ -157,10 +160,10 @@ Se admite el siguiente comando: MODIFY ORDER BY new_expression ``` -Solo funciona para tablas en el [`MergeTree`](../operations/table_engines/mergetree.md) familia (incluyendo -[repetición](../operations/table_engines/replication.md) tabla). El comando cambia el -[clave de clasificación](../operations/table_engines/mergetree.md) de la mesa -Naciones `new_expression` (una expresión o una tupla de expresiones). La clave principal sigue siendo la misma. +Solo funciona para tablas en el [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) familia (incluyendo +[repetición](../../engines/table_engines/mergetree_family/replication.md) tabla). El comando cambia el +[clave de clasificación](../../engines/table_engines/mergetree_family/mergetree.md) de la mesa +a `new_expression` (una expresión o una tupla de expresiones). La clave principal sigue siendo la misma. El comando es liviano en el sentido de que solo cambia los metadatos. Para mantener la propiedad esa parte de datos las filas están ordenadas por la expresión de clave de ordenación, no puede agregar expresiones que contengan columnas existentes @@ -168,8 +171,8 @@ a la clave de ordenación (sólo las columnas añadidas `ADD COLUMN` comando en ### Manipulaciones con índices de saltos de datos {#manipulations-with-data-skipping-indices} -Solo funciona para tablas en el [`*MergeTree`](../operations/table_engines/mergetree.md) familia (incluyendo -[repetición](../operations/table_engines/replication.md) tabla). Las siguientes operaciones +Solo funciona para tablas en el [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) familia (incluyendo +[repetición](../../engines/table_engines/mergetree_family/replication.md) tabla). Las siguientes operaciones están disponibles: - `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Agrega la descripción del índice a los metadatos de las tablas. @@ -198,20 +201,20 @@ Todos los cambios en las tablas replicadas se transmiten a ZooKeeper, por lo que ### Manipulaciones con particiones y piezas {#alter_manipulations-with-partitions} -Las siguientes operaciones con [partición](../operations/table_engines/custom_partitioning_key.md) están disponibles: +Las siguientes operaciones con [partición](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) están disponibles: -- [DETACH PARTITION](#alter_detach-partition) – Mueve una partición a la `detached` Directorio y olvidarlo. -- [PARTICIÓN DE CAÍDA](#alter_drop-partition) – Elimina una partición. -- [ADJUNTA PARTE\|PARTICIÓN](#alter_attach-partition) – Añade una pieza o partición desde el `detached` directorio a la tabla. -- [REEMPLAZAR LA PARTICIÓN](#alter_replace-partition) - Copia la partición de datos de una tabla a otra. -- [ADJUNTA PARTICIÓN DE](#alter_attach-partition-from) – Copia la partición de datos de una tabla a otra y añade. -- [REEMPLAZAR LA PARTICIÓN](#alter_replace-partition) - Copia la partición de datos de una tabla a otra y reemplaza. -- [MUEVA LA PARTICIÓN A LA MESA](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - Mover la partición de datos de una tabla a otra. -- [COLUMNA CLARA EN PARTICIPACIÓN](#alter_clear-column-partition) - Restablece el valor de una columna especificada en una partición. -- [ÍNDICE CLARO EN PARTICIPACIÓN](#alter_clear-index-partition) - Restablece el índice secundario especificado en una partición. -- [CONGELAR PARTICIÓN](#alter_freeze-partition) – Crea una copia de seguridad de una partición. -- [PARTICIÓN FETCH](#alter_fetch-partition) – Descarga una partición de otro servidor. -- [PARTICIÓN DE MOVIMIENTO\|PARTE](#alter_move-partition) – Mover partición / parte de datos a otro disco o volumen. +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` directorio y olvidarlo. +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` directorio a la tabla. +- [REPLACE PARTITION](#alter_replace-partition) - Copia la partición de datos de una tabla a otra. +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) - Copia la partición de datos de una tabla a otra y reemplaza. +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - Mover la partición de datos de una tabla a otra. +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Restablece el valor de una columna especificada en una partición. +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Restablece el índice secundario especificado en una partición. +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. @@ -221,7 +224,7 @@ Las siguientes operaciones con [partición](../operations/table_engines/custom_p ALTER TABLE table_name DETACH PARTITION partition_expr ``` -Mueve todos los datos de la partición especificada `detached` directorio. El servidor se olvida de la partición de datos separada como si no existiera. El servidor no sabrá acerca de estos datos hasta que [CONECTAR](#alter_attach-partition) consulta. +Mueve todos los datos de la partición especificada `detached` directorio. El servidor se olvida de la partición de datos separada como si no existiera. El servidor no sabrá acerca de estos datos hasta que [ATTACH](#alter_attach-partition) consulta. Ejemplo: @@ -231,11 +234,11 @@ ALTER TABLE visits DETACH PARTITION 201901 Lea cómo configurar la expresión de partición en una sección [Cómo especificar la expresión de partición](#alter-how-to-specify-part-expr). -Después de ejecutar la consulta, puede hacer lo que quiera con los datos en el `detached` directorio — eliminarlo del sistema de archivos, o simplemente dejarlo. +Después de ejecutar la consulta, puede hacer lo que quiera con los datos en el `detached` directory — delete it from the file system, or just leave it. -Esta consulta se replica – mueve los datos a la `detached` directorio en todas las réplicas. Tenga en cuenta que solo puede ejecutar esta consulta en una réplica de líder. Para averiguar si una réplica es un líder, realice `SELECT` Consulta a la [sistema.Replica](../operations/system_tables.md#system_tables-replicas) tabla. Alternativamente, es más fácil hacer un `DETACH` consulta en todas las réplicas: todas las réplicas producen una excepción, excepto la réplica líder. +This query is replicated – it moves the data to the `detached` directorio en todas las réplicas. Tenga en cuenta que solo puede ejecutar esta consulta en una réplica de líder. Para averiguar si una réplica es un líder, realice `SELECT` consulta a la [sistema.Replica](../../operations/system_tables.md#system_tables-replicas) tabla. Alternativamente, es más fácil hacer un `DETACH` consulta en todas las réplicas: todas las réplicas producen una excepción, excepto la réplica líder. -#### PARTICIÓN DE CAÍDA {#alter_drop-partition} +#### DROP PARTITION {#alter_drop-partition} ``` sql ALTER TABLE table_name DROP PARTITION partition_expr @@ -245,9 +248,9 @@ Elimina la partición especificada de la tabla. Esta consulta etiqueta la partic Lea cómo configurar la expresión de partición en una sección [Cómo especificar la expresión de partición](#alter-how-to-specify-part-expr). -La consulta se replica: elimina los datos de todas las réplicas. +The query is replicated – it deletes data on all replicas. -#### CAÍDA DE DESPRENDIMIENTO DE LA PARTICIÓN\|PARTE {#alter_drop-detached} +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} ``` sql ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr @@ -256,7 +259,7 @@ ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr Quita la parte especificada o todas las partes de la partición especificada de `detached`. Más información sobre cómo establecer la expresión de partición en una sección [Cómo especificar la expresión de partición](#alter-how-to-specify-part-expr). -#### ADJUNTA PARTICIÓN\|PARTE {#alter_attach-partition} +#### ATTACH PARTITION\|PART {#alter_attach-partition} ``` sql ALTER TABLE table_name ATTACH PARTITION|PART partition_expr @@ -275,39 +278,39 @@ Esta consulta se replica. El iniciador de réplica comprueba si hay datos en el Entonces puedes poner datos en el `detached` en una réplica, y utilice el directorio `ALTER ... ATTACH` consulta para agregarlo a la tabla en todas las réplicas. -#### ADJUNTA PARTICIÓN DE {#alter_attach-partition-from} +#### ATTACH PARTITION FROM {#alter_attach-partition-from} ``` sql ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 ``` -Esta consulta copia la partición de datos `table1` Naciones `table2` añade datos a los que existen en el `table2`. Tenga en cuenta que los datos no se eliminarán de `table1`. +Esta consulta copia la partición de datos `table1` a `table2` añade datos a los que existen en el `table2`. Tenga en cuenta que los datos no se eliminarán de `table1`. Para que la consulta se ejecute correctamente, se deben cumplir las siguientes condiciones: - Ambas tablas deben tener la misma estructura. - Ambas tablas deben tener la misma clave de partición. -#### REEMPLAZAR LA PARTICIÓN {#alter_replace-partition} +#### REPLACE PARTITION {#alter_replace-partition} ``` sql ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 ``` -Esta consulta copia la partición de datos `table1` Naciones `table2` y reemplaza la partición existente en el `table2`. Tenga en cuenta que los datos no se eliminarán de `table1`. +Esta consulta copia la partición de datos `table1` a `table2` y reemplaza la partición existente en el `table2`. Tenga en cuenta que los datos no se eliminarán de `table1`. Para que la consulta se ejecute correctamente, se deben cumplir las siguientes condiciones: - Ambas tablas deben tener la misma estructura. - Ambas tablas deben tener la misma clave de partición. -#### MUEVA LA PARTICIÓN A LA MESA {#alter_move_to_table-partition} +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} ``` sql ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest ``` -Esta consulta mueve la partición de datos `table_source` Naciones `table_dest` con la eliminación de los datos de `table_source`. +Esta consulta mueve la partición de datos `table_source` a `table_dest` con la eliminación de los datos de `table_source`. Para que la consulta se ejecute correctamente, se deben cumplir las siguientes condiciones: @@ -316,7 +319,7 @@ Para que la consulta se ejecute correctamente, se deben cumplir las siguientes c - Ambas tablas deben ser de la misma familia de motores. (replicado o no replicado) - Ambas tablas deben tener la misma política de almacenamiento. -#### COLUMNA CLARA EN PARTICIPACIÓN {#alter_clear-column-partition} +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} ``` sql ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr @@ -330,7 +333,7 @@ Ejemplo: ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 ``` -#### CONGELAR PARTICIÓN {#alter_freeze-partition} +#### FREEZE PARTITION {#alter_freeze-partition} ``` sql ALTER TABLE table_name FREEZE [PARTITION partition_expr] @@ -349,11 +352,11 @@ En el momento de la ejecución, para una instantánea de datos, la consulta crea - `N` es el número incremental de la copia de seguridad. !!! note "Nota" - Si usted estados unidos [un conjunto de discos para el almacenamiento de datos en una tabla](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) el `shadow/N` directorio aparece en cada disco, almacenando partes de datos que coinciden con el `PARTITION` expresión. + Si usted usa [un conjunto de discos para el almacenamiento de datos en una tabla](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes), el `shadow/N` directorio aparece en cada disco, almacenando partes de datos que coinciden con el `PARTITION` expresion. La misma estructura de directorios se crea dentro de la copia de seguridad que dentro `/var/lib/clickhouse/`. La consulta realiza ‘chmod’ para todos los archivos, prohibiendo escribir en ellos. -Después de crear la copia de seguridad, puede copiar los datos desde `/var/lib/clickhouse/shadow/` al servidor remoto y, a continuación, elimínelo del servidor local. Tenga en cuenta que el `ALTER t FREEZE PARTITION` consulta no se réplica. Crea una copia de seguridad local solo en el servidor local. +Después de crear la copia de seguridad, puede copiar los datos desde `/var/lib/clickhouse/shadow/` al servidor remoto y, a continuación, elimínelo del servidor local. Tenga en cuenta que el `ALTER t FREEZE PARTITION` consulta no se replica. Crea una copia de seguridad local solo en el servidor local. La consulta crea una copia de seguridad casi instantáneamente (pero primero espera a que las consultas actuales a la tabla correspondiente terminen de ejecutarse). @@ -361,15 +364,15 @@ La consulta crea una copia de seguridad casi instantáneamente (pero primero esp Para restaurar los datos de una copia de seguridad, haga lo siguiente: -1. Cree la tabla si no existe. Para ver la consulta, utilice el .archivo sql (reemplazar `ATTACH` es ella con `CREATE`). -2. Copie los datos de la `data/database/table/` Directorio dentro de la copia de seguridad a la `/var/lib/clickhouse/data/database/table/detached/` Directorio. -3. Ejecutar `ALTER TABLE t ATTACH PARTITION` Consultas para agregar los datos a una tabla. +1. Crear la tabla si no existe. Para ver la consulta, utilice el .archivo sql (reemplazar `ATTACH` en ella con `CREATE`). +2. Copie los datos de la `data/database/table/` directorio dentro de la copia de seguridad a la `/var/lib/clickhouse/data/database/table/detached/` directorio. +3. Ejecutar `ALTER TABLE t ATTACH PARTITION` consultas para agregar los datos a una tabla. La restauración desde una copia de seguridad no requiere detener el servidor. -Para obtener más información sobre las copias de seguridad y la restauración de datos, consulte [Copia de seguridad de datos](../operations/backup.md) apartado. +Para obtener más información sobre las copias de seguridad y la restauración de datos, consulte [Copia de seguridad de datos](../../operations/backup.md) apartado. -#### ÍNDICE CLARO EN PARTICIPACIÓN {#alter_clear-index-partition} +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} ``` sql ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr @@ -377,7 +380,7 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr La consulta funciona de forma similar a `CLEAR COLUMN`, pero restablece un índice en lugar de una columna de datos. -#### PARTICIÓN FETCH {#alter_fetch-partition} +#### FETCH PARTITION {#alter_fetch-partition} ``` sql ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' @@ -388,7 +391,7 @@ Descarga una partición desde otro servidor. Esta consulta solo funciona para la La consulta hace lo siguiente: 1. Descarga la partición del fragmento especificado. En ‘path-in-zookeeper’ debe especificar una ruta al fragmento en ZooKeeper. -2. Luego, la consulta coloca los datos descargados en el `detached` Directorio de la `table_name` tabla. Descripción [ADJUNTA PARTICIÓN\|PARTE](#alter_attach-partition) consulta para agregar los datos a la tabla. +2. Luego, la consulta coloca los datos descargados en el `detached` directorio de la `table_name` tabla. Utilice el [ATTACH PARTITION\|PART](#alter_attach-partition) consulta para agregar los datos a la tabla. Por ejemplo: @@ -399,22 +402,22 @@ ALTER TABLE users ATTACH PARTITION 201902; Tenga en cuenta que: -- El `ALTER ... FETCH PARTITION` la consulta no está replicada. Coloca la partición en el `detached` sólo en el servidor local. -- El `ALTER TABLE ... ATTACH` consulta se replica. Agrega los datos a todas las réplicas. Los datos se agregan a una de las réplicas desde el `detached` directorio, y para los demás - de réplicas vecinas. +- El `ALTER ... FETCH PARTITION` consulta no se replica. Coloca la partición en el `detached` sólo en el servidor local. +- El `ALTER TABLE ... ATTACH` la consulta se replica. Agrega los datos a todas las réplicas. Los datos se agregan a una de las réplicas de la `detached` directorio, y para los demás - de réplicas vecinas. Antes de descargar, el sistema verifica si la partición existe y la estructura de la tabla coincide. La réplica más adecuada se selecciona automáticamente de las réplicas en buen estado. Aunque se llama a la consulta `ALTER TABLE`, no cambia la estructura de la tabla y no cambiar inmediatamente los datos disponibles en la tabla. -#### PARTICIÓN DE MOVIMIENTO\|PARTE {#alter_move-partition} +#### MOVE PARTITION\|PART {#alter_move-partition} -Mueve particiones o partes de datos a otro volumen o disco para `MergeTree`-mesas de motor. Ver [Uso de varios dispositivos de bloque para el almacenamiento de datos](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). +Mueve particiones o partes de datos a otro volumen o disco para `MergeTree`-mesas de motor. Ver [Uso de varios dispositivos de bloque para el almacenamiento de datos](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). ``` sql ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' ``` -El `ALTER TABLE t MOVE` Consulta: +El `ALTER TABLE t MOVE` consulta: - No replicado, porque diferentes réplicas pueden tener diferentes directivas de almacenamiento. - Devuelve un error si el disco o volumen especificado no está configurado. La consulta también devuelve un error si no se pueden aplicar las condiciones de movimiento de datos especificadas en la directiva de almacenamiento. @@ -434,13 +437,13 @@ Puede especificar la expresión de partición en `ALTER ... PARTITION` de difere - Como valor de la `partition` columna de la `system.parts` tabla. Por ejemplo, `ALTER TABLE visits DETACH PARTITION 201901`. - Como la expresión de la columna de la tabla. Se admiten constantes y expresiones constantes. Por ejemplo, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. - Usando el ID de partición. El ID de partición es un identificador de cadena de la partición (legible por humanos, si es posible) que se usa como nombres de particiones en el sistema de archivos y en ZooKeeper. El ID de partición debe especificarse en el `PARTITION ID` cláusula, entre comillas simples. Por ejemplo, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- En el [ALTERAR PIEZA DE ADJUNTO](#alter_attach-partition) y [PARTE DESMONTADA DE GOTA](#alter_drop-detached) Consulta, para especificar el nombre de una parte, utilice un literal de cadena con un valor `name` columna de la [sistema.detached\_parts](../operations/system_tables.md#system_tables-detached_parts) tabla. Por ejemplo, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. +- En el [ALTER ATTACH PART](#alter_attach-partition) y [DROP DETACHED PART](#alter_drop-detached) consulta, para especificar el nombre de una parte, utilice un literal de cadena con un valor `name` columna de la [sistema.detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) tabla. Por ejemplo, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. -El uso de comillas al especificar la partición depende del tipo de expresión de partición. Por ejemplo, para el `String` Tipo, debe especificar su nombre entre comillas (`'`). Para el `Date` y `Int*` tipos no se necesitan comillas. +El uso de comillas al especificar la partición depende del tipo de expresión de partición. Por ejemplo, para el `String` tipo, debe especificar su nombre entre comillas (`'`). Para el `Date` y `Int*` tipos no se necesitan comillas. -Para las tablas de estilo antiguo, puede especificar la partición como un número `201901` O una cadena `'201901'`. La sintaxis para las tablas de nuevo estilo es más estricta con los tipos (similar al analizador para el formato de entrada VALUES). +Para las tablas de estilo antiguo, puede especificar la partición como un número `201901` o una cadena `'201901'`. La sintaxis para las tablas de nuevo estilo es más estricta con los tipos (similar al analizador para el formato de entrada VALUES). -Todas las reglas anteriores también son ciertas para el [OPTIMIZAR](misc.md#misc_operations-optimize) consulta. Si necesita especificar la única partición al optimizar una tabla no particionada, establezca la expresión `PARTITION tuple()`. Por ejemplo: +Todas las reglas anteriores también son ciertas para el [OPTIMIZE](misc.md#misc_operations-optimize) consulta. Si necesita especificar la única partición al optimizar una tabla no particionada, establezca la expresión `PARTITION tuple()`. Por ejemplo: ``` sql OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; @@ -448,20 +451,20 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; Los ejemplos de `ALTER ... PARTITION` las consultas se demuestran en las pruebas [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) y [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). -### Manipulaciones con Tabla TTL {#manipulations-with-table-ttl} +### Manipulaciones con tabla TTL {#manipulations-with-table-ttl} -Usted puede cambiar [Tabla TTL](../operations/table_engines/mergetree.md#mergetree-table-ttl) con una solicitud del siguiente formulario: +Usted puede cambiar [tabla TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) con una solicitud del siguiente formulario: ``` sql ALTER TABLE table-name MODIFY TTL ttl-expression ``` -### Sincronicidad de las consultas ALTER {#synchronicity-of-alter-queries} +### Sincronicidad de consultas ALTER {#synchronicity-of-alter-queries} Para tablas no replicables, todas `ALTER` las consultas se realizan de forma sincrónica. Para las tablas replicables, la consulta solo agrega instrucciones para las acciones apropiadas para `ZooKeeper`, y las acciones mismas se realizan tan pronto como sea posible. Sin embargo, la consulta puede esperar a que estas acciones se completen en todas las réplicas. -Para `ALTER ... ATTACH|DETACH|DROP` Consultas, puede utilizar el `replication_alter_partitions_sync` configuración para configurar la espera. -Valores posibles: `0` – no espere; `1` – sólo esperar a su propia ejecución (por defecto); `2` – esperar a todos. +Para `ALTER ... ATTACH|DETACH|DROP` consultas, puede utilizar el `replication_alter_partitions_sync` configuración para configurar la espera. +Valores posibles: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. ### Mutación {#alter-mutations} @@ -495,8 +498,8 @@ Para las tablas \*MergeTree, las mutaciones se ejecutan reescribiendo partes de Las mutaciones están totalmente ordenadas por su orden de creación y se aplican a cada parte en ese orden. Las mutaciones también se ordenan parcialmente con INSERTs: los datos que se insertaron en la tabla antes de que se enviara la mutación se mutarán y los datos que se insertaron después de eso no se mutarán. Tenga en cuenta que las mutaciones no bloquean INSERTs de ninguna manera. -Una consulta de mutación regresa inmediatamente después de agregar la entrada de mutación (en el caso de tablas replicadas a ZooKeeper, para tablas no replicadas, al sistema de archivos). La mutación en sí se ejecuta de forma asíncrona utilizando la configuración del perfil del sistema. Para realizar un seguimiento del progreso de las mutaciones, puede usar el [`system.mutations`](../operations/system_tables.md#system_tables-mutations) tabla. Una mutación que se envió correctamente continuará ejecutándose incluso si se reinician los servidores ClickHouse. No hay forma de revertir la mutación una vez que se presenta, pero si la mutación está atascada por alguna razón, puede cancelarse con el [`KILL MUTATION`](misc.md#kill-mutation) consulta. +Una consulta de mutación regresa inmediatamente después de agregar la entrada de mutación (en el caso de tablas replicadas a ZooKeeper, para tablas no replicadas, al sistema de archivos). La mutación en sí se ejecuta de forma asíncrona utilizando la configuración del perfil del sistema. Para realizar un seguimiento del progreso de las mutaciones, puede usar el [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) tabla. Una mutación que se envió correctamente continuará ejecutándose incluso si se reinician los servidores ClickHouse. No hay forma de revertir la mutación una vez que se presenta, pero si la mutación está atascada por alguna razón, puede cancelarse con el [`KILL MUTATION`](misc.md#kill-mutation) consulta. Las entradas de mutaciones terminadas no se eliminan de inmediato (el número de entradas conservadas viene determinado por el `finished_mutations_to_keep` parámetro del motor de almacenamiento). Las entradas de mutación más antiguas se eliminan. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/alter/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/es/query_language/create.md b/docs/es/sql_reference/statements/create.md similarity index 64% rename from docs/es/query_language/create.md rename to docs/es/sql_reference/statements/create.md index d7663b01839..eb81945335d 100644 --- a/docs/es/query_language/create.md +++ b/docs/es/sql_reference/statements/create.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 35 +toc_title: CREATE --- -# CREAR Consultas {#create-queries} +# CREATE Consultas {#create-queries} -## CREAR BASE DE DATOS {#query-language-create-database} +## CREATE DATABASE {#query-language-create-database} Crea una base de datos. @@ -27,15 +30,15 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. - `ENGINE` - - [MySQL](../database_engines/mysql.md) + - [MySQL](../engines/database_engines/mysql.md) Allows you to retrieve data from the remote MySQL server. - By default, ClickHouse uses its own [database engine](../database_engines/index.md). + By default, ClickHouse uses its own [database engine](../engines/database_engines/index.md). -## CREAR TABLA {#create-table-query} +## CREATE TABLE {#create-table-query} -El `CREATE TABLE` puede tener varias formas. +El `CREATE TABLE` consulta puede tener varias formas. ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -46,7 +49,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = engine ``` -Crea una tabla llamada ‘name’ es el ‘db’ base de datos o la base de datos actual si ‘db’ no está establecida, con la estructura especificada entre paréntesis y ‘engine’ motor. +Crea una tabla llamada ‘name’ en el ‘db’ base de datos o la base de datos actual si ‘db’ no está establecida, con la estructura especificada entre paréntesis y ‘engine’ motor. La estructura de la tabla es una lista de descripciones de columnas. Si los índices son compatibles con el motor, se indican como parámetros para el motor de tablas. Una descripción de columna es `name type` en el caso más simple. Ejemplo: `RegionID UInt32`. @@ -62,17 +65,17 @@ Crea una tabla con la misma estructura que otra tabla. Puede especificar un moto CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() ``` -Crea una tabla con la estructura y los datos [función de la tabla](table_functions/index.md). +Crea una tabla con la estructura y los datos [función de la tabla](../table_functions/index.md). ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... ``` -Crea una tabla con una estructura como el resultado de la `SELECT` Consulta, con el ‘engine’ motor, y lo llena con datos de SELECT. +Crea una tabla con una estructura como el resultado de la `SELECT` consulta, con el ‘engine’ motor, y lo llena con datos de SELECT. -En todos los casos, si `IF NOT EXISTS` Si se especifica la tabla, la consulta no devolverá un error si la tabla ya existe. En este caso, la consulta no hará nada. +En todos los casos, si `IF NOT EXISTS` se especifica, la consulta no devolverá un error si la tabla ya existe. En este caso, la consulta no hará nada. -Puede haber otras cláusulas después del `ENGINE` cláusula en la consulta. Consulte la documentación detallada sobre cómo crear tablas en las descripciones de [motores de mesa](../operations/table_engines/index.md#table_engines). +Puede haber otras cláusulas después del `ENGINE` cláusula en la consulta. Consulte la documentación detallada sobre cómo crear tablas en las descripciones de [motores de mesa](../../engines/table_engines/index.md#table_engines). ### Valores predeterminados {#create-default-values} @@ -81,11 +84,11 @@ Ejemplo: `URLDomain String DEFAULT domain(URL)`. Si no se define una expresión para el valor predeterminado, los valores predeterminados se establecerán en ceros para números, cadenas vacías para cadenas, matrices vacías para matrices y `0000-00-00` para fechas o `0000-00-00 00:00:00` para las fechas con el tiempo. Los NULL no son compatibles. -Si se define la expresión predeterminada, el tipo de columna es opcional. Si no hay un tipo definido explícitamente, se utiliza el tipo de expresión predeterminado. Ejemplo: `EventDate DEFAULT toDate(EventTime)` – el ‘Date’ tipo será utilizado para el ‘EventDate’ columna. +Si se define la expresión predeterminada, el tipo de columna es opcional. Si no hay un tipo definido explícitamente, se utiliza el tipo de expresión predeterminado. Ejemplo: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ tipo será utilizado para el ‘EventDate’ columna. Si el tipo de datos y la expresión predeterminada se definen explícitamente, esta expresión se convertirá al tipo especificado utilizando funciones de conversión de tipos. Ejemplo: `Hits UInt32 DEFAULT 0` significa lo mismo que `Hits UInt32 DEFAULT toUInt32(0)`. -Las expresiones predeterminadas se pueden definir como una expresión arbitraria de las constantes y columnas de la tabla. Al crear y cambiar la estructura de la tabla, comprueba que las expresiones no contengan bucles. Para INSERT, comprueba que las expresiones se puedan resolver, que se hayan pasado todas las columnas a partir de las que se pueden calcular. +Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don't contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. `DEFAULT expr` @@ -99,8 +102,8 @@ Además, esta columna no se sustituye cuando se utiliza un asterisco en una cons `ALIAS expr` -Sinónimo. Dicha columna no se almacena en la tabla en absoluto. -Sus valores no se pueden insertar en una tabla y no se sustituyen cuando se utiliza un asterisco en una consulta SELECT. +Sinónimo. Tal columna no se almacena en la tabla en absoluto. +Sus valores no se pueden insertar en una tabla, y no se sustituye cuando se usa un asterisco en una consulta SELECT. Se puede usar en SELECT si el alias se expande durante el análisis de consultas. Cuando se utiliza la consulta ALTER para agregar nuevas columnas, no se escriben datos antiguos para estas columnas. En su lugar, al leer datos antiguos que no tienen valores para las nuevas columnas, las expresiones se calculan sobre la marcha de forma predeterminada. Sin embargo, si la ejecución de las expresiones requiere diferentes columnas que no están indicadas en la consulta, estas columnas se leerán adicionalmente, pero solo para los bloques de datos que lo necesitan. @@ -123,17 +126,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = engine ``` -`boolean_expr_1` podría por cualquier expresión booleana. Si se definen restricciones para la tabla, cada una de ellas se verificará para cada fila en `INSERT` consulta. Si no se cumple alguna restricción, el servidor generará una excepción con el nombre de la restricción y la expresión de comprobación. +`boolean_expr_1` podría por cualquier expresión booleana. Si se definen restricciones para la tabla, cada una de ellas se verificará para cada fila en `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. Agregar una gran cantidad de restricciones puede afectar negativamente el rendimiento de grandes `INSERT` consulta. ### Expresión TTL {#ttl-expression} -Define el tiempo de almacenamiento de los valores. Solo se puede especificar para tablas de la familia MergeTree. Para la descripción detallada, ver [TTL para columnas y tablas](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). +Define el tiempo de almacenamiento de los valores. Solo se puede especificar para tablas de la familia MergeTree. Para la descripción detallada, ver [TTL para columnas y tablas](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). ### Códecs de compresión de columna {#codecs} -De forma predeterminada, ClickHouse aplica el `lz4` método de compresión. Para `MergeTree`- familia de motor puede cambiar el método de compresión predeterminado en el [compresión](../operations/server_settings/settings.md#server-settings-compression) sección de una configuración de servidor. También puede definir el método de compresión para cada columna `CREATE TABLE` consulta. +De forma predeterminada, ClickHouse aplica el `lz4` método de compresión. Para `MergeTree`- familia de motor puede cambiar el método de compresión predeterminado en el [compresión](../../operations/server_configuration_parameters/settings.md#server-settings-compression) sección de una configuración de servidor. También puede definir el método de compresión para cada columna `CREATE TABLE` consulta. ``` sql CREATE TABLE codec_example @@ -155,23 +158,23 @@ Si se especifica un códec, el códec predeterminado no se aplica. Los códecs s La compresión es compatible con los siguientes motores de tablas: -- [Método de codificación de datos:](../operations/table_engines/mergetree.md) familia. Admite códecs de compresión de columnas y selecciona el método de compresión predeterminado mediante [compresión](../operations/server_settings/settings.md#server-settings-compression) configuración. -- [Registro](../operations/table_engines/log_family.md) Familia. Utiliza el `lz4` método de compresión por defecto y soporta códecs de compresión de columna. -- [Establecer](../operations/table_engines/set.md). Solo admite la compresión predeterminada. -- [Unir](../operations/table_engines/join.md). Solo admite la compresión predeterminada. +- [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) familia. Admite códecs de compresión de columnas y selecciona el método de compresión predeterminado mediante [compresión](../../operations/server_configuration_parameters/settings.md#server-settings-compression) configuración. +- [Registro](../../engines/table_engines/log_family/log_family.md) familia. Utiliza el `lz4` método de compresión por defecto y soporta códecs de compresión de columna. +- [Establecer](../../engines/table_engines/special/set.md). Solo admite la compresión predeterminada. +- [Unir](../../engines/table_engines/special/join.md). Solo admite la compresión predeterminada. ClickHouse admite códecs de propósito común y códecs especializados. #### Especializados Codecs {#create-query-specialized-codecs} -Estos códecs están diseñados para hacer que la compresión sea más efectiva mediante el uso de características específicas de los datos. Algunos de estos códecs no comprimen los propios datos. En su lugar, preparan los datos para un códec de propósito común, que lo comprime mejor que sin esta preparación. +Estos códecs están diseñados para hacer que la compresión sea más efectiva mediante el uso de características específicas de los datos. Algunos de estos códecs no comprimen los datos por sí mismos. En su lugar, preparan los datos para un códec de propósito común, que lo comprime mejor que sin esta preparación. -Códecs Especializados: +Especializados codecs: -- `Delta(delta_bytes)` — Enfoque de compresión en el que los valores brutos se sustituyen por la diferencia de dos valores vecinos, excepto el primer valor que permanece sin cambios. Hasta `delta_bytes` se utilizan para almacenar valores delta, por lo que `delta_bytes` es el tamaño máximo de los valores brutos. Posible `delta_bytes` valores: 1, 2, 4, 8. El valor predeterminado para `delta_bytes` ser `sizeof(type)` si es igual a 1, 2, 4 u 8. En todos los demás casos, es 1. -- `DoubleDelta` — Calcula delta de deltas y lo escribe en forma binaria compacta. Las tasas de compresión óptimas se logran para secuencias monotónicas con una zancada constante, como los datos de series de tiempo. Se puede utilizar con cualquier tipo de ancho fijo. Implementa el algoritmo utilizado en Gorilla TSDB, extendiéndolo para admitir tipos de 64 bits. Utiliza 1 bit adicional para deltas de 32 bytes: prefijos de 5 bits en lugar de prefijos de 4 bits. Para obtener información adicional, consulte Compresión de sellos de tiempo en [Gorila: Una base de datos de series temporales rápida, escalable y en memoria](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `Gorilla` - Calcula XOR entre el valor actual y el anterior y lo escribe en forma binaria compacta. Eficiente al almacenar una serie de valores de coma flotante que cambian lentamente, porque la mejor tasa de compresión se logra cuando los valores vecinos son binarios iguales. Implementa el algoritmo utilizado en Gorilla TSDB, extendiéndolo para admitir tipos de 64 bits. Para obtener información adicional, consulte Comprimir valores en [Gorila: Una base de datos de series temporales rápida, escalable y en memoria](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Enfoque de compresión que recorta bits altos no utilizados de valores en tipos de datos enteros (incluidos `Enum`, `Date` y `DateTime`). En cada paso de su algoritmo, el códec toma un bloque de 64 valores, los coloca en una matriz de 64x64 bits, lo transpone, recorta los bits de valores no utilizados y devuelve el resto como una secuencia. Los bits no utilizados son los bits, que no difieren entre los valores máximo y mínimo en toda la parte de datos para la que se utiliza la compresión. +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` se utilizan para almacenar valores delta, por lo que `delta_bytes` es el tamaño máximo de los valores brutos. Posible `delta_bytes` valores: 1, 2, 4, 8. El valor predeterminado para `delta_bytes` ser `sizeof(type)` si es igual a 1, 2, 4 u 8. En todos los demás casos, es 1. +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorila: Una base de datos de series temporales rápida, escalable y en memoria](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorila: Una base de datos de series temporales rápida, escalable y en memoria](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` y `DateTime`). En cada paso de su algoritmo, el códec toma un bloque de 64 valores, los coloca en una matriz de 64x64 bits, lo transpone, recorta los bits de valores no utilizados y devuelve el resto como una secuencia. Los bits no utilizados son los bits, que no difieren entre los valores máximo y mínimo en toda la parte de datos para la que se utiliza la compresión. `DoubleDelta` y `Gorilla` códecs se utilizan en Gorilla TSDB como los componentes de su algoritmo de compresión. El enfoque de gorila es efectivo en escenarios en los que hay una secuencia de valores que cambian lentamente con sus marcas de tiempo. Las marcas de tiempo se comprimen efectivamente por el `DoubleDelta` códec, y los valores son efectivamente comprimidos por el `Gorilla` códec. Por ejemplo, para obtener una tabla almacenada efectivamente, puede crearla en la siguiente configuración: @@ -184,13 +187,13 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` -#### Propósito común codecs {#create-query-common-purpose-codecs} +#### Propósito Común Codecs {#create-query-common-purpose-codecs} Códecs: -- `NONE` — Sin compresión. +- `NONE` — No compression. - `LZ4` — Lossless [algoritmo de compresión de datos](https://github.com/lz4/lz4) utilizado por defecto. Aplica compresión rápida LZ4. -- `LZ4HC[(level)]` — Algoritmo LZ4 HC (alta compresión) con nivel configurable. Nivel predeterminado: 9. Configuración `level <= 0` aplica el nivel predeterminado. Niveles posibles: \[1, 12\]. Rango de nivel recomendado: \[4, 9\]. +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` aplica el nivel predeterminado. Niveles posibles: \[1, 12\]. Rango de nivel recomendado: \[4, 9\]. - `ZSTD[(level)]` — [Algoritmo de compresión ZSTD](https://en.wikipedia.org/wiki/Zstandard) con configurable `level`. Niveles posibles: \[1, 22\]. Valor predeterminado: 1. Los altos niveles de compresión son útiles para escenarios asimétricos, como comprimir una vez, descomprimir repetidamente. Los niveles más altos significan una mejor compresión y un mayor uso de la CPU. @@ -219,7 +222,7 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name En la mayoría de los casos, las tablas temporales no se crean manualmente, sino cuando se utilizan datos externos para una consulta o para `(GLOBAL) IN`. Para obtener más información, consulte las secciones correspondientes -Es posible utilizar tablas con [MOTOR = Memoria](../operations/table_engines/memory.md) es lugar de tablas temporales. +Es posible usar tablas con [MOTOR = Memoria](../../engines/table_engines/special/memory.md) en lugar de tablas temporales. ## Consultas DDL distribuidas (cláusula ON CLUSTER) {#distributed-ddl-queries-on-cluster-clause} @@ -233,7 +236,7 @@ CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE Para ejecutar estas consultas correctamente, cada host debe tener la misma definición de clúster (para simplificar la sincronización de configuraciones, puede usar sustituciones de ZooKeeper). También deben conectarse a los servidores ZooKeeper. La versión local de la consulta finalmente se implementará en cada host del clúster, incluso si algunos hosts no están disponibles actualmente. El orden para ejecutar consultas dentro de un único host está garantizado. -## CREAR VISTA {#create-view} +## CREATE VIEW {#create-view} ``` sql CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... @@ -241,7 +244,7 @@ CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE Crea una vista. Hay dos tipos de vistas: normal y MATERIALIZADO. -Las vistas normales no almacenan ningún dato, sino que solo realizan una lectura desde otra tabla. En otras palabras, una vista normal no es más que una consulta guardada. Al leer desde una vista, esta consulta guardada se utiliza como una subconsulta en la cláusula FROM. +Las vistas normales no almacenan ningún dato, solo realizan una lectura desde otra tabla. En otras palabras, una vista normal no es más que una consulta guardada. Al leer desde una vista, esta consulta guardada se utiliza como una subconsulta en la cláusula FROM. Como ejemplo, suponga que ha creado una vista: @@ -263,7 +266,7 @@ SELECT a, b, c FROM (SELECT ...) Las vistas materializadas almacenan datos transformados por la consulta SELECT correspondiente. -Al crear una vista materializada sin `TO [db].[table]`, debe especificar ENGINE – el motor de tabla para almacenar datos. +Al crear una vista materializada sin `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. Al crear una vista materializada con `TO [db].[table]` usted no debe usar `POPULATE`. @@ -271,15 +274,15 @@ Una vista materializada se organiza de la siguiente manera: al insertar datos en Si especifica POPULATE, los datos de tabla existentes se insertan en la vista al crearlos, como si `CREATE TABLE ... AS SELECT ...` . De lo contrario, la consulta solo contiene los datos insertados en la tabla después de crear la vista. No recomendamos usar POPULATE, ya que los datos insertados en la tabla durante la creación de la vista no se insertarán en ella. -Naciones `SELECT` Consulta puede contener `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Tenga en cuenta que las conversiones correspondientes se realizan de forma independiente en cada bloque de datos insertados. Por ejemplo, si `GROUP BY` se establece, los datos se agregan durante la inserción, pero solo dentro de un solo paquete de datos insertados. Los datos no se agregarán más. La excepción es cuando se utiliza un ENGINE que realiza de forma independiente la agregación de datos, como `SummingMergeTree`. +A `SELECT` consulta puede contener `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` se establece, los datos se agregan durante la inserción, pero solo dentro de un solo paquete de datos insertados. Los datos no se agregarán más. La excepción es cuando se utiliza un ENGINE que realiza de forma independiente la agregación de datos, como `SummingMergeTree`. La ejecución de `ALTER` las consultas sobre vistas materializadas no se han desarrollado completamente, por lo que podrían ser inconvenientes. Si la vista materializada utiliza la construcción `TO [db.]name` puede `DETACH` la vista, ejecutar `ALTER` para la tabla de destino, y luego `ATTACH` el previamente separado (`DETACH`) vista. Las vistas tienen el mismo aspecto que las tablas normales. Por ejemplo, se enumeran en el resultado de la `SHOW TABLES` consulta. -No hay una consulta independiente para eliminar vistas. Para eliminar una vista, utilice `DROP TABLE`. +No hay una consulta separada para eliminar vistas. Para eliminar una vista, utilice `DROP TABLE`. -## CREAR DICCIONARIO {#create-dictionary-query} +## CREATE DICTIONARY {#create-dictionary-query} ``` sql CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] @@ -295,12 +298,12 @@ LAYOUT(LAYOUT_NAME([param_name param_value])) LIFETIME([MIN val1] MAX val2) ``` -Crear [diccionario externo](dicts/external_dicts.md) con dado [estructura](dicts/external_dicts_dict_structure.md), [fuente](dicts/external_dicts_dict_sources.md), [diseño](dicts/external_dicts_dict_layout.md) y [vida](dicts/external_dicts_dict_lifetime.md). +Crear [diccionario externo](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) con dado [estructura](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md), [fuente](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md), [diseño](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) y [vida](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md). La estructura del diccionario externo consta de atributos. Los atributos de diccionario se especifican de manera similar a las columnas de la tabla. La única propiedad de atributo requerida es su tipo, todas las demás propiedades pueden tener valores predeterminados. -Dependiendo del diccionario [diseño](dicts/external_dicts_dict_layout.md) se pueden especificar uno o más atributos como claves de diccionario. +Dependiendo del diccionario [diseño](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) se pueden especificar uno o más atributos como claves de diccionario. -Para obtener más información, consulte [Diccionarios externos](dicts/external_dicts.md) apartado. +Para obtener más información, consulte [Diccionarios externos](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) apartado. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/create/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/es/sql_reference/statements/index.md b/docs/es/sql_reference/statements/index.md new file mode 100644 index 00000000000..2fd3a2afc7d --- /dev/null +++ b/docs/es/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Statements +toc_priority: 31 +--- + + diff --git a/docs/es/query_language/insert_into.md b/docs/es/sql_reference/statements/insert_into.md similarity index 69% rename from docs/es/query_language/insert_into.md rename to docs/es/sql_reference/statements/insert_into.md index 62a0f6fdaf2..3d3daffbb36 100644 --- a/docs/es/query_language/insert_into.md +++ b/docs/es/sql_reference/statements/insert_into.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 34 +toc_title: INSERT INTO --- -## INSERTAR {#insert} +## INSERT {#insert} Adición de datos. @@ -17,15 +20,15 @@ La consulta puede especificar una lista de columnas para insertar `[(c1, c2, c3) - Los valores calculados a partir del `DEFAULT` expresiones especificadas en la definición de la tabla. - Ceros y cadenas vacías, si `DEFAULT` expresiones no están definidas. -Si [strict\_insert\_defaults=1](../operations/settings/settings.md), columnas que no tienen `DEFAULT` definido debe figurar en la consulta. +Si [strict\_insert\_defaults=1](../../operations/settings/settings.md), columnas que no tienen `DEFAULT` definido debe figurar en la consulta. -Los datos se pueden pasar al INSERT en cualquier [Formato](../interfaces/formats.md#formats) con el apoyo de ClickHouse. El formato debe especificarse explícitamente en la consulta: +Los datos se pueden pasar al INSERT en cualquier [formato](../../interfaces/formats.md#formats) con el apoyo de ClickHouse. El formato debe especificarse explícitamente en la consulta: ``` sql INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set ``` -Por ejemplo, el siguiente formato de consulta es idéntico a la versión básica de INSERT … VALUES: +For example, the following query format is identical to the basic version of INSERT … VALUES: ``` sql INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... @@ -41,11 +44,11 @@ INSERT INTO t FORMAT TabSeparated 22 Qwerty ``` -Puede insertar datos por separado de la consulta mediante el cliente de línea de comandos o la interfaz HTTP. Para obtener más información, consulte la sección “[Interfaz](../interfaces/index.md#interfaces)”. +Puede insertar datos por separado de la consulta mediante el cliente de línea de comandos o la interfaz HTTP. Para obtener más información, consulte la sección “[Interfaz](../../interfaces/index.md#interfaces)”. ### Limitación {#constraints} -Si la tabla tiene [limitación](create.md#constraints), sus expresiones se verificarán para cada fila de datos insertados. Si alguna de esas restricciones no se satisface, el servidor generará una excepción que contenga el nombre y la expresión de la restricción, la consulta se detendrá. +Si la tabla tiene [limitación](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. ### Insertar los resultados de `SELECT` {#insert_query_insert-select} @@ -60,7 +63,7 @@ Ninguno de los formatos de datos, excepto Valores, permite establecer valores pa No se admiten otras consultas para modificar partes de datos: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. Sin embargo, puede eliminar datos antiguos usando `ALTER TABLE ... DROP PARTITION`. -`FORMAT` cláusula debe especificarse al final de la consulta si `SELECT` cláusula contiene la función de tabla [entrada()](table_functions/input.md). +`FORMAT` cláusula debe especificarse al final de la consulta si `SELECT` cláusula contiene la función de tabla [entrada()](../table_functions/input.md). ### Consideraciones de rendimiento {#performance-considerations} @@ -71,7 +74,7 @@ Sin embargo, puede eliminar datos antiguos usando `ALTER TABLE ... DROP PARTITIO El rendimiento no disminuirá si: -- Los datos se agregan en tiempo real. -- Carga datos que normalmente están ordenados por tiempo. +- Se agregan datos en tiempo real. +- Cargar los datos que generalmente se ordenan por el tiempo. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/insert_into/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/es/query_language/misc.md b/docs/es/sql_reference/statements/misc.md similarity index 70% rename from docs/es/query_language/misc.md rename to docs/es/sql_reference/statements/misc.md index ff21fc1b457..7be8994313b 100644 --- a/docs/es/query_language/misc.md +++ b/docs/es/sql_reference/statements/misc.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: Otro --- # Consultas Misceláneas {#miscellaneous-queries} -## CONECTAR {#attach} +## ATTACH {#attach} -Esta consulta es exactamente la misma que `CREATE` pero +Esta consulta es exactamente la misma que `CREATE`, pero - En lugar de la palabra `CREATE` utiliza la palabra `ATTACH`. - La consulta no crea datos en el disco, pero supone que los datos ya están en los lugares apropiados, y simplemente agrega información sobre la tabla al servidor. @@ -20,7 +23,7 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] Esta consulta se utiliza al iniciar el servidor. El servidor almacena los metadatos de la tabla como archivos con `ATTACH` consultas, que simplemente se ejecuta en el lanzamiento (con la excepción de las tablas del sistema, que se crean explícitamente en el servidor). -## MESA DE VERIFICACIÓN {#check-table} +## CHECK TABLE {#check-table} Comprueba si los datos de la tabla están dañados. @@ -31,21 +34,21 @@ CHECK TABLE [db.]name El `CHECK TABLE` query compara los tamaños de archivo reales con los valores esperados que se almacenan en el servidor. Si los tamaños de archivo no coinciden con los valores almacenados, significa que los datos están dañados. Esto puede deberse, por ejemplo, a un bloqueo del sistema durante la ejecución de la consulta. La respuesta de consulta contiene el `result` columna con una sola fila. La fila tiene un valor de -[Booleana](../data_types/boolean.md) Tipo: +[Booleana](../../sql_reference/data_types/boolean.md) tipo: - 0 - Los datos de la tabla están dañados. - 1 - Los datos mantienen la integridad. El `CHECK TABLE` query admite los siguientes motores de tablas: -- [Registro](../operations/table_engines/log.md) -- [TinyLog](../operations/table_engines/tinylog.md) -- [StripeLog](../operations/table_engines/stripelog.md) -- [Familia MergeTree](../operations/table_engines/mergetree.md) +- [Registro](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [Familia MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) Realizado sobre las tablas con otros motores de tabla causa una excepción. -Motores del `*Log` La familia no proporciona recuperación automática de datos en caso de fallo. Utilice el `CHECK TABLE` consulta para rastrear la pérdida de datos de manera oportuna. +Motores del `*Log` la familia no proporciona la recuperación automática de datos en caso de fallo. Utilice el `CHECK TABLE` consulta para rastrear la pérdida de datos de manera oportuna. Para `MergeTree` motores familiares, el `CHECK TABLE` query muestra un estado de comprobación para cada parte de datos individual de una tabla en el servidor local. @@ -54,29 +57,29 @@ Para `MergeTree` motores familiares, el `CHECK TABLE` query muestra un estado de Si la tabla está dañada, puede copiar los datos no dañados a otra tabla. Para hacer esto: 1. Cree una nueva tabla con la misma estructura que la tabla dañada. Para ello, ejecute la consulta `CREATE TABLE AS `. -2. Establezca el [max\_threads](../operations/settings/settings.md#settings-max_threads) valor a 1 para procesar la siguiente consulta en un único subproceso. Para ello, ejecute la consulta `SET max_threads = 1`. +2. Establezca el [max\_threads](../../operations/settings/settings.md#settings-max_threads) valor a 1 para procesar la siguiente consulta en un único subproceso. Para ello, ejecute la consulta `SET max_threads = 1`. 3. Ejecutar la consulta `INSERT INTO SELECT * FROM `. Esta solicitud copia los datos no dañados de la tabla dañada a otra tabla. Solo se copiarán los datos anteriores a la parte dañada. 4. Reinicie el `clickhouse-client` para restablecer el `max_threads` valor. -## TABLA DE DESCRIBE {#misc-describe-table} +## DESCRIBE TABLE {#misc-describe-table} ``` sql DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] ``` -Devuelve lo siguiente `String` Tipo columnas: +Devuelve lo siguiente `String` tipo columnas: -- `name` — Nombre de la columna. -- `type`— Tipo de columna. -- `default_type` — Cláusula utilizada en [expresión predeterminada](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` o `ALIAS`). La columna contiene una cadena vacía, si no se especifica la expresión predeterminada. -- `default_expression` — Valor especificado en el `DEFAULT` clausula. -- `comment_expression` — Texto de comentario. +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [expresión predeterminada](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` o `ALIAS`). La columna contiene una cadena vacía, si no se especifica la expresión predeterminada. +- `default_expression` — Value specified in the `DEFAULT` clausula. +- `comment_expression` — Comment text. Las estructuras de datos anidadas se generan en “expanded” formato. Cada columna se muestra por separado, con el nombre después de un punto. -## SEPARAR {#detach} +## DETACH {#detach} -Elimina información sobre el ‘name’ tabla desde el servidor. El servidor deja de conocer la existencia de la tabla. +Elimina información sobre el ‘name’ tabla desde el servidor. El servidor deja de saber sobre la existencia de la tabla. ``` sql DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] @@ -87,7 +90,7 @@ Del mismo modo, un “detached” se puede volver a conectar usando el `ATTACH` No hay `DETACH DATABASE` consulta. -## GOTA {#drop} +## DROP {#drop} Esta consulta tiene dos tipos: `DROP DATABASE` y `DROP TABLE`. @@ -110,7 +113,7 @@ Si `IF EXISTS` se especifica, no devuelve un error si la tabla no existe o la ba Elimina el diccionario. Si `IF EXISTS` se especifica, no devuelve un error si la tabla no existe o la base de datos no existe. -## EXISTIR {#exists} +## EXISTS {#exists} ``` sql EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] @@ -118,7 +121,7 @@ EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT Devuelve una sola `UInt8`columna -type, que contiene el valor único `0` si la tabla o base de datos no existe, o `1` si la tabla existe en la base de datos especificada. -## Matar consulta {#kill-query} +## KILL QUERY {#kill-query} ``` sql KILL QUERY [ON CLUSTER cluster] @@ -147,13 +150,13 @@ De forma predeterminada, se utiliza la versión asincrónica de las consultas (` La versión síncrona (`SYNC`) espera a que se detengan todas las consultas y muestra información sobre cada proceso a medida que se detiene. La respuesta contiene el `kill_status` columna, que puede tomar los siguientes valores: -1. ‘finished’ – La consulta se ha finalizado correctamente. -2. ‘waiting’ – Esperando a que finalice la consulta después de enviarle una señal para finalizar. -3. Los otros valores consultan por qué no se puede detener. +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. Una consulta de prueba (`TEST`) sólo comprueba los derechos del usuario y muestra una lista de consultas para detener. -## MUTACIÓN DE MATAR {#kill-mutation} +## KILL MUTATION {#kill-mutation} ``` sql KILL MUTATION [ON CLUSTER cluster] @@ -162,7 +165,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Intenta cancelar y quitar [mutación](alter.md#alter-mutations) que se están ejecutando actualmente. Las mutaciones para cancelar se seleccionan en el [`system.mutations`](../operations/system_tables.md#system_tables-mutations) utilizando el filtro especificado por el `WHERE` cláusula de la `KILL` consulta. +Intenta cancelar y quitar [mutación](alter.md#alter-mutations) que se están ejecutando actualmente. Las mutaciones para cancelar se seleccionan en el [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) utilizando el filtro especificado por el `WHERE` cláusula de la `KILL` consulta. Una consulta de prueba (`TEST`) sólo comprueba los derechos del usuario y muestra una lista de consultas para detener. @@ -176,23 +179,23 @@ KILL MUTATION WHERE database = 'default' AND table = 'table' KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' ``` -La consulta es útil cuando una mutación está bloqueada y no puede finalizar (por ejemplo, si alguna función en la consulta de mutación arroja una excepción cuando se aplica a los datos contenidos en la tabla). +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). Los cambios ya realizados por la mutación no se revierten. -## OPTIMIZAR {#misc_operations-optimize} +## OPTIMIZE {#misc_operations-optimize} ``` sql OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] ``` -Esta consulta intenta inicializar una combinación no programada de partes de datos para tablas con un motor de tablas [Método de codificación de datos:](../operations/table_engines/mergetree.md) Familia. +Esta consulta intenta inicializar una combinación no programada de partes de datos para tablas con un motor de tablas [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) familia. -El `OPTMIZE` consulta también es compatible con el [Método de codificación de datos:](../operations/table_engines/materializedview.md) y el [Búfer](../operations/table_engines/buffer.md) motor. No se admiten otros motores de tabla. +El `OPTMIZE` consulta también es compatible con el [Método de codificación de datos:](../../engines/table_engines/special/materializedview.md) y el [Búfer](../../engines/table_engines/special/buffer.md) motor. No se admiten otros motores de tabla. -Cuando `OPTIMIZE` se utiliza con el [ReplicatedMergeTree](../operations/table_engines/replication.md) la familia de motores de tablas, ClickHouse crea una tarea para fusionar y espera la ejecución en todos los nodos (si `replication_alter_partitions_sync` está habilitada la configuración). +Cuando `OPTIMIZE` se utiliza con el [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md) la familia de motores de tablas, ClickHouse crea una tarea para fusionar y espera la ejecución en todos los nodos (si `replication_alter_partitions_sync` está habilitada la configuración). -- Si `OPTIMIZE` no realiza una fusión por ningún motivo, no notifica al cliente. Para habilitar las notificaciones, [Optize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop) configuración. +- Si `OPTIMIZE` no realiza una fusión por ningún motivo, no notifica al cliente. Para habilitar las notificaciones, [Optize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) configuración. - Si especifica un `PARTITION`, sólo la partición especificada está optimizada. [Cómo establecer la expresión de partición](alter.md#alter-how-to-specify-part-expr). - Si especifica `FINAL`, la optimización se realiza incluso cuando todos los datos ya están en una parte. - Si especifica `DEDUPLICATE`, luego se deduplicarán filas completamente idénticas (se comparan todas las columnas), tiene sentido solo para el motor MergeTree. @@ -200,7 +203,7 @@ Cuando `OPTIMIZE` se utiliza con el [ReplicatedMergeTree](../operations/table_en !!! warning "Advertencia" `OPTIMIZE` no se puede arreglar el “Too many parts” error. -## Renombrar {#misc_operations-rename} +## RENAME {#misc_operations-rename} Cambia el nombre de una o más tablas. @@ -210,13 +213,13 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... Todas las tablas se renombran bajo bloqueo global. Cambiar el nombre de las tablas es una operación ligera. Si ha indicado otra base de datos después de TO, la tabla se moverá a esta base de datos. Sin embargo, los directorios con bases de datos deben residir en el mismo sistema de archivos (de lo contrario, se devuelve un error). -## ESTABLECER {#query-set} +## SET {#query-set} ``` sql SET param = value ``` -Asignar `value` Angeles `param` [configuración](../operations/settings/index.md) para la sesión actual. No se puede cambiar [configuración del servidor](../operations/server_settings/index.md) de esta manera. +Asignar `value` a la `param` [configuración](../../operations/settings/index.md) para la sesión actual. No se puede cambiar [configuración del servidor](../../operations/server_configuration_parameters/index.md) de esta manera. También puede establecer todos los valores del perfil de configuración especificado en una sola consulta. @@ -224,9 +227,9 @@ También puede establecer todos los valores del perfil de configuración especif SET profile = 'profile-name-from-the-settings-file' ``` -Para obtener más información, consulte [Configuración](../operations/settings/settings.md). +Para obtener más información, consulte [Configuración](../../operations/settings/settings.md). -## TRUNCAR {#truncate} +## TRUNCATE {#truncate} ``` sql TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] @@ -234,9 +237,9 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Elimina todos los datos de una tabla. Cuando la cláusula `IF EXISTS` se omite, la consulta devuelve un error si la tabla no existe. -El `TRUNCATE` consulta no es compatible con [Vista](../operations/table_engines/view.md), [File](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) y [Nulo](../operations/table_engines/null.md) motores de mesa. +El `TRUNCATE` consulta no es compatible con [Vista](../../engines/table_engines/special/view.md), [File](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) y [Nulo](../../engines/table_engines/special/null.md) motores de mesa. -## UTILIZAR {#use} +## USE {#use} ``` sql USE db @@ -244,6 +247,6 @@ USE db Permite establecer la base de datos actual para la sesión. La base de datos actual se utiliza para buscar tablas si la base de datos no está definida explícitamente en la consulta con un punto antes del nombre de la tabla. -Esta consulta no se puede realizar cuando se utiliza el protocolo HTTP, ya que no existe un concepto de sesión. +Esta consulta no se puede realizar cuando se usa el protocolo HTTP, ya que no existe un concepto de sesión. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/misc/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/es/sql_reference/statements/select.md b/docs/es/sql_reference/statements/select.md new file mode 100644 index 00000000000..5cdcf1008cb --- /dev/null +++ b/docs/es/sql_reference/statements/select.md @@ -0,0 +1,610 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 33 +toc_title: SELECT +--- + +# SELECCIONAR consultas Sintaxis {#select-queries-syntax} + +`SELECT` realiza la recuperación de datos. + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] +[UNION ALL ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +Todas las cláusulas son opcionales, excepto la lista requerida de expresiones inmediatamente después de SELECT. +Las siguientes cláusulas se describen casi en el mismo orden que en el transportador de ejecución de consultas. + +Si la consulta omite el `DISTINCT`, `GROUP BY` y `ORDER BY` cláusulas y el `IN` y `JOIN` subconsultas, la consulta se procesará por completo, utilizando O (1) cantidad de RAM. +De lo contrario, la consulta podría consumir mucha RAM si no se especifican las restricciones adecuadas: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Para obtener más información, consulte la sección “Settings”. Es posible utilizar la clasificación externa (guardar tablas temporales en un disco) y la agregación externa. `The system does not have "merge join"`. + +### CON Cláusula {#with-clause} + +Esta sección proporciona soporte para expresiones de tabla común ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), con algunas limitaciones: +1. No se admiten consultas recursivas +2. Cuando se usa una subconsulta dentro de la sección WITH, su resultado debe ser escalar con exactamente una fila +3. Los resultados de la expresión no están disponibles en las subconsultas +Los resultados de las expresiones de la cláusula WITH se pueden usar dentro de la cláusula SELECT. + +Ejemplo 1: Usar expresión constante como “variable” + +``` sql +WITH '2019-08-01 15:23:00' as ts_upper_bound +SELECT * +FROM hits +WHERE + EventDate = toDate(ts_upper_bound) AND + EventTime <= ts_upper_bound +``` + +Ejemplo 2: Evictar el resultado de la expresión de sum(bytes) de la lista de columnas de la cláusula SELECT + +``` sql +WITH sum(bytes) as s +SELECT + formatReadableSize(s), + table +FROM system.parts +GROUP BY table +ORDER BY s +``` + +Ejemplo 3: Uso de los resultados de la subconsulta escalar + +``` sql +/* this example would return TOP 10 of most huge tables */ +WITH + ( + SELECT sum(bytes) + FROM system.parts + WHERE active + ) AS total_disk_usage +SELECT + (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, + table +FROM system.parts +GROUP BY table +ORDER BY table_disk_usage DESC +LIMIT 10 +``` + +Ejemplo 4: Reutilización de la expresión en subconsulta +Como solución alternativa para la limitación actual para el uso de expresiones en subconsultas, puede duplicarla. + +``` sql +WITH ['hello'] AS hello +SELECT + hello, + * +FROM +( + WITH ['hello'] AS hello + SELECT hello +) +``` + +``` text +┌─hello─────┬─hello─────┐ +│ ['hello'] │ ['hello'] │ +└───────────┴───────────┘ +``` + +### Cláusula FROM {#select-from} + +Si se omite la cláusula FROM, los datos se leerán desde el `system.one` tabla. +El `system.one` table contiene exactamente una fila (esta tabla cumple el mismo propósito que la tabla DUAL que se encuentra en otros DBMS). + +El `FROM` cláusula especifica la fuente de la que se leen los datos: + +- Tabla +- Subconsultas +- [Función de la tabla](../table_functions/index.md) + +`ARRAY JOIN` y el regular `JOIN` también se pueden incluir (ver más abajo). + +En lugar de una tabla, el `SELECT` subconsulta se puede especificar entre paréntesis. +A diferencia del SQL estándar, no es necesario especificar un sinónimo después de una subconsulta. + +Para ejecutar una consulta, todas las columnas enumeradas en la consulta se extraen de la tabla adecuada. Las columnas no necesarias para la consulta externa se eliminan de las subconsultas. +Si una consulta no muestra ninguna columnas (por ejemplo, `SELECT count() FROM t`), alguna columna se extrae de la tabla de todos modos (se prefiere la más pequeña), para calcular el número de filas. + +#### Modificador FINAL {#select-from-final} + +Aplicable al seleccionar datos de tablas del [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md)-Familia de motores distintos de `GraphiteMergeTree`. Cuando `FINAL` se especifica, ClickHouse fusiona completamente los datos antes de devolver el resultado y, por lo tanto, realiza todas las transformaciones de datos que ocurren durante las fusiones para el motor de tabla dado. + +También soportado para: +- [Replicado](../../engines/table_engines/mergetree_family/replication.md) versiones de `MergeTree` motor. +- [Vista](../../engines/table_engines/special/view.md), [Búfer](../../engines/table_engines/special/buffer.md), [Distribuido](../../engines/table_engines/special/distributed.md), y [Método de codificación de datos:](../../engines/table_engines/special/materializedview.md) motores que funcionan sobre otros motores, siempre que se hayan creado sobre `MergeTree`-mesas de motor. + +Consultas que usan `FINAL` se ejecutan no tan rápido como consultas similares que no lo hacen, porque: + +- La consulta se ejecuta en un solo subproceso y los datos se combinan durante la ejecución de la consulta. +- Consultas con `FINAL` leer columnas de clave primaria además de las columnas especificadas en la consulta. + +En la mayoría de los casos, evite usar `FINAL`. + +### Cláusula SAMPLE {#select-sample-clause} + +El `SAMPLE` cláusula permite un procesamiento de consultas aproximado. + +Cuando se habilita el muestreo de datos, la consulta no se realiza en todos los datos, sino solo en una cierta fracción de datos (muestra). Por ejemplo, si necesita calcular estadísticas para todas las visitas, es suficiente ejecutar la consulta en la fracción 1/10 de todas las visitas y luego multiplicar el resultado por 10. + +El procesamiento de consultas aproximado puede ser útil en los siguientes casos: + +- Cuando tiene requisitos de temporización estrictos (como \<100 ms) pero no puede justificar el costo de recursos de hardware adicionales para cumplirlos. +- Cuando sus datos sin procesar no son precisos, la aproximación no degrada notablemente la calidad. +- Los requisitos comerciales se centran en los resultados aproximados (por rentabilidad o para comercializar los resultados exactos a los usuarios premium). + +!!! note "Nota" + Sólo puede utilizar el muestreo con las tablas en el [Método de codificación de datos:](../../engines/table_engines/mergetree_family/mergetree.md) familia, y sólo si la expresión de muestreo se especificó durante la creación de la tabla (ver [Motor MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). + +Las características del muestreo de datos se enumeran a continuación: + +- El muestreo de datos es un mecanismo determinista. El resultado de la misma `SELECT .. SAMPLE` la consulta es siempre la misma. +- El muestreo funciona consistentemente para diferentes tablas. Para tablas con una sola clave de muestreo, una muestra con el mismo coeficiente siempre selecciona el mismo subconjunto de datos posibles. Por ejemplo, una muestra de ID de usuario toma filas con el mismo subconjunto de todos los ID de usuario posibles de diferentes tablas. Esto significa que puede utilizar el ejemplo en subconsultas [IN](#select-in-operators) clausula. Además, puede unir muestras usando el [JOIN](#select-join) clausula. +- El muestreo permite leer menos datos de un disco. Tenga en cuenta que debe especificar la clave de muestreo correctamente. Para obtener más información, consulte [Creación de una tabla MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). + +Para el `SAMPLE` cláusula se admite la siguiente sintaxis: + +| SAMPLE Clause Syntax | Descripci | +|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | Aqui `k` es el número de 0 a 1.
    La consulta se ejecuta en `k` de datos. Por ejemplo, `SAMPLE 0.1` ejecuta la consulta en el 10% de los datos. [Leer más](#select-sample-k) | +| `SAMPLE n` | Aqui `n` es un entero suficientemente grande.
    La consulta se ejecuta en una muestra de al menos `n` filas (pero no significativamente más que esto). Por ejemplo, `SAMPLE 10000000` ejecuta la consulta en un mínimo de 10.000.000 de filas. [Leer más](#select-sample-n) | +| `SAMPLE k OFFSET m` | Aqui `k` y `m` son los números del 0 al 1.
    La consulta se ejecuta en una muestra de `k` de los datos. Los datos utilizados para el ejemplo se compensan por `m` fracción. [Leer más](#select-sample-offset) | + +#### SAMPLE K {#select-sample-k} + +Aqui `k` es el número de 0 a 1 (se admiten notaciones fraccionarias y decimales). Por ejemplo, `SAMPLE 1/2` o `SAMPLE 0.5`. + +En un `SAMPLE k` cláusula, la muestra se toma de la `k` de datos. El ejemplo se muestra a continuación: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +En este ejemplo, la consulta se ejecuta en una muestra de 0,1 (10%) de datos. Los valores de las funciones agregadas no se corrigen automáticamente, por lo que para obtener un resultado aproximado, el valor `count()` se multiplica manualmente por 10. + +#### SAMPLE N {#select-sample-n} + +Aqui `n` es un entero suficientemente grande. Por ejemplo, `SAMPLE 10000000`. + +En este caso, la consulta se ejecuta en una muestra de al menos `n` filas (pero no significativamente más que esto). Por ejemplo, `SAMPLE 10000000` ejecuta la consulta en un mínimo de 10.000.000 de filas. + +Dado que la unidad mínima para la lectura de datos es un gránulo (su tamaño se establece mediante el `index_granularity` ajuste), tiene sentido establecer una muestra que es mucho más grande que el tamaño del gránulo. + +Cuando se utiliza el `SAMPLE n` cláusula, no sabe qué porcentaje relativo de datos se procesó. Por lo tanto, no sabe el coeficiente por el que se deben multiplicar las funciones agregadas. Utilice el `_sample_factor` columna virtual para obtener el resultado aproximado. + +El `_sample_factor` columna contiene coeficientes relativos que se calculan dinámicamente. Esta columna se crea automáticamente cuando [crear](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) una tabla con la clave de muestreo especificada. Los ejemplos de uso del `_sample_factor` columna se muestran a continuación. + +Consideremos la tabla `visits`, que contiene las estadísticas sobre las visitas al sitio. El primer ejemplo muestra cómo calcular el número de páginas vistas: + +``` sql +SELECT sum(PageViews * _sample_factor) +FROM visits +SAMPLE 10000000 +``` + +El siguiente ejemplo muestra cómo calcular el número total de visitas: + +``` sql +SELECT sum(_sample_factor) +FROM visits +SAMPLE 10000000 +``` + +El siguiente ejemplo muestra cómo calcular la duración media de la sesión. Tenga en cuenta que no necesita usar el coeficiente relativo para calcular los valores promedio. + +``` sql +SELECT avg(Duration) +FROM visits +SAMPLE 10000000 +``` + +#### SAMPLE K OFFSET M {#select-sample-offset} + +Aqui `k` y `m` son números del 0 al 1. Los ejemplos se muestran a continuación. + +**Ejemplo 1** + +``` sql +SAMPLE 1/10 +``` + +En este ejemplo, la muestra es 1/10 de todos los datos: + +`[++------------]` + +**Ejemplo 2** + +``` sql +SAMPLE 1/10 OFFSET 1/2 +``` + +Aquí, se toma una muestra del 10% de la segunda mitad de los datos. + +`[------++------]` + +### ARRAY JOIN Cláusula {#select-array-join-clause} + +Permite ejecutar `JOIN` con una matriz o estructura de datos anidada. La intención es similar a la [arrayJoin](../../sql_reference/functions/array_join.md#functions_arrayjoin) función, pero su funcionalidad es más amplia. + +``` sql +SELECT +FROM +[LEFT] ARRAY JOIN +[WHERE|PREWHERE ] +... +``` + +Sólo puede especificar una sola `ARRAY JOIN` cláusula en una consulta. + +El orden de ejecución de la consulta se optimiza cuando se ejecuta `ARRAY JOIN`. Aunque `ARRAY JOIN` debe especificarse siempre antes de la `WHERE/PREWHERE` cláusula, se puede realizar ya sea antes `WHERE/PREWHERE` (si el resultado es necesario en esta cláusula), o después de completarlo (para reducir el volumen de cálculos). El optimizador de consultas controla el orden de procesamiento. + +Tipos admitidos de `ARRAY JOIN` se enumeran a continuación: + +- `ARRAY JOIN` - En este caso, las matrices vacías no se incluyen en el resultado de `JOIN`. +- `LEFT ARRAY JOIN` - El resultado de `JOIN` contiene filas con matrices vacías. El valor de una matriz vacía se establece en el valor predeterminado para el tipo de elemento de matriz (normalmente 0, cadena vacía o NULL). + +Los siguientes ejemplos demuestran el uso de la `ARRAY JOIN` y `LEFT ARRAY JOIN` clausula. Vamos a crear una tabla con un [Matriz](../../sql_reference/data_types/array.md) escriba la columna e inserte valores en ella: + +``` sql +CREATE TABLE arrays_test +( + s String, + arr Array(UInt8) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +``` + +``` text +┌─s───────────┬─arr─────┐ +│ Hello │ [1,2] │ +│ World │ [3,4,5] │ +│ Goodbye │ [] │ +└─────────────┴─────────┘ +``` + +El siguiente ejemplo utiliza el `ARRAY JOIN` clausula: + +``` sql +SELECT s, arr +FROM arrays_test +ARRAY JOIN arr; +``` + +``` text +┌─s─────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +└───────┴─────┘ +``` + +El siguiente ejemplo utiliza el `LEFT ARRAY JOIN` clausula: + +``` sql +SELECT s, arr +FROM arrays_test +LEFT ARRAY JOIN arr; +``` + +``` text +┌─s───────────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +│ Goodbye │ 0 │ +└─────────────┴─────┘ +``` + +#### Uso de alias {#using-aliases} + +Se puede especificar un alias para una matriz en el `ARRAY JOIN` clausula. En este caso, este alias puede acceder a un elemento de matriz, pero el nombre original tiene acceso a la matriz en sí. Ejemplo: + +``` sql +SELECT s, arr, a +FROM arrays_test +ARRAY JOIN arr AS a; +``` + +``` text +┌─s─────┬─arr─────┬─a─┐ +│ Hello │ [1,2] │ 1 │ +│ Hello │ [1,2] │ 2 │ +│ World │ [3,4,5] │ 3 │ +│ World │ [3,4,5] │ 4 │ +│ World │ [3,4,5] │ 5 │ +└───────┴─────────┴───┘ +``` + +Usando alias, puede realizar `ARRAY JOIN` con una matriz externa. Por ejemplo: + +``` sql +SELECT s, arr_external +FROM arrays_test +ARRAY JOIN [1, 2, 3] AS arr_external; +``` + +``` text +┌─s───────────┬─arr_external─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ Hello │ 3 │ +│ World │ 1 │ +│ World │ 2 │ +│ World │ 3 │ +│ Goodbye │ 1 │ +│ Goodbye │ 2 │ +│ Goodbye │ 3 │ +└─────────────┴──────────────┘ +``` + +Múltiples matrices se pueden separar por comas en el `ARRAY JOIN` clausula. En este caso, `JOIN` se realiza con ellos simultáneamente (la suma directa, no el producto cartesiano). Tenga en cuenta que todas las matrices deben tener el mismo tamaño. Ejemplo: + +``` sql +SELECT s, arr, a, num, mapped +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ +│ Hello │ [1,2] │ 1 │ 1 │ 2 │ +│ Hello │ [1,2] │ 2 │ 2 │ 3 │ +│ World │ [3,4,5] │ 3 │ 1 │ 4 │ +│ World │ [3,4,5] │ 4 │ 2 │ 5 │ +│ World │ [3,4,5] │ 5 │ 3 │ 6 │ +└───────┴─────────┴───┴─────┴────────┘ +``` + +El siguiente ejemplo utiliza el [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) función: + +``` sql +SELECT s, arr, a, num, arrayEnumerate(arr) +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ +│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ +│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ +│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ +│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ +│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ +└───────┴─────────┴───┴─────┴─────────────────────┘ +``` + +#### ARRAY JOIN con estructura de datos anidada {#array-join-with-nested-data-structure} + +`ARRAY`JOIN\`\` también funciona con [estructuras de datos anidados](../../sql_reference/data_types/nested_data_structures/nested.md). Ejemplo: + +``` sql +CREATE TABLE nested_test +( + s String, + nest Nested( + x UInt8, + y UInt32) +) ENGINE = Memory; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +``` + +``` text +┌─s───────┬─nest.x──┬─nest.y─────┐ +│ Hello │ [1,2] │ [10,20] │ +│ World │ [3,4,5] │ [30,40,50] │ +│ Goodbye │ [] │ [] │ +└─────────┴─────────┴────────────┘ +``` + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +Al especificar nombres de estructuras de datos anidadas en `ARRAY JOIN` el significado es el mismo `ARRAY JOIN` con todos los elementos de la matriz en los que consiste. Los ejemplos se enumeran a continuación: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`, `nest.y`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +Esta variación también tiene sentido: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─────┐ +│ Hello │ 1 │ [10,20] │ +│ Hello │ 2 │ [10,20] │ +│ World │ 3 │ [30,40,50] │ +│ World │ 4 │ [30,40,50] │ +│ World │ 5 │ [30,40,50] │ +└───────┴────────┴────────────┘ +``` + +Se puede usar un alias para una estructura de datos anidada, con el fin de seleccionar `JOIN` resultado o la matriz de origen. Ejemplo: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest AS n; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ +└───────┴─────┴─────┴─────────┴────────────┘ +``` + +Ejemplo de uso del [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) función: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num +FROM nested_test +ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ +└───────┴─────┴─────┴─────────┴────────────┴─────┘ +``` + +### Cláusula JOIN {#select-join} + +Se une a los datos en el [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) sentido. + +!!! info "Nota" + No relacionado con [ARRAY JOIN](#select-array-join-clause). + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN +(ON )|(USING ) ... +``` + +Los nombres de tabla se pueden especificar en lugar de `` y ``. Esto es equivalente a la `SELECT * FROM table` subconsulta, excepto en un caso especial cuando la tabla tiene [Unir](../../engines/table_engines/special/join.md) engine – an array prepared for joining. + +#### Tipos compatibles de `JOIN` {#select-join-types} + +- `INNER JOIN` (o `JOIN`) +- `LEFT JOIN` (o `LEFT OUTER JOIN`) +- `RIGHT JOIN` (o `RIGHT OUTER JOIN`) +- `FULL JOIN` (o `FULL OUTER JOIN`) +- `CROSS JOIN` (o `,` ) + +Ver el estándar [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) descripci. + +#### ÚNETE Múltiple {#multiple-join} + +Al realizar consultas, ClickHouse reescribe las uniones de varias tablas en la secuencia de uniones de dos tablas. Por ejemplo, si hay cuatro tablas para unir ClickHouse une la primera y la segunda, luego une el resultado con la tercera tabla, y en el último paso, se une a la cuarta. + +Si una consulta contiene el `WHERE` cláusula, ClickHouse intenta empujar hacia abajo los filtros de esta cláusula a través de la unión intermedia. Si no puede aplicar el filtro a cada unión intermedia, ClickHouse aplica los filtros después de que se completen todas las combinaciones. + +Recomendamos el `JOIN ON` o `JOIN USING` sintaxis para crear consultas. Por ejemplo: + +``` sql +SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a +``` + +Puede utilizar listas de tablas separadas por comas `FROM` clausula. Por ejemplo: + +``` sql +SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a +``` + +No mezcle estas sintaxis. + +ClickHouse no admite directamente la sintaxis con comas, por lo que no recomendamos usarlas. El algoritmo intenta reescribir la consulta en términos de `CROSS JOIN` y `INNER JOIN` y luego procede al procesamiento de consultas. Al reescribir la consulta, ClickHouse intenta optimizar el rendimiento y el consumo de memoria. De forma predeterminada, ClickHouse trata las comas como `INNER JOIN` cláusula y convierte `INNER JOIN` a `CROSS JOIN` cuando el algoritmo no puede garantizar que `INNER JOIN` devuelve los datos requeridos. + +#### Rigor {#select-join-strictness} + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Producto cartesiano](https://en.wikipedia.org/wiki/Cartesian_product) de filas coincidentes. Este es el estándar `JOIN` comportamiento en SQL. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` y `ALL` palabras clave son las mismas. +- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación. + +**ASOF JOIN Uso** + +`ASOF JOIN` es útil cuando necesita unir registros que no tienen una coincidencia exacta. + +Tablas para `ASOF JOIN` debe tener una columna de secuencia ordenada. Esta columna no puede estar sola en una tabla y debe ser uno de los tipos de datos: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, y `DateTime`. + +Sintaxis `ASOF JOIN ... ON`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF LEFT JOIN table_2 +ON equi_cond AND closest_match_cond +``` + +Puede usar cualquier número de condiciones de igualdad y exactamente una condición de coincidencia más cercana. Por ejemplo, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. + +Condiciones admitidas para la coincidencia más cercana: `>`, `>=`, `<`, `<=`. + +Sintaxis `ASOF JOIN ... USING`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF JOIN table_2 +USING (equi_column1, ... equi_columnN, asof_column) +``` + +`ASOF JOIN` utilizar `equi_columnX` para unirse a la igualdad y `asof_column` para unirse en el partido más cercano con el `table_1.asof_column >= table_2.asof_column` condición. El `asof_column` columna siempre el último en el `USING` clausula. + +Por ejemplo, considere las siguientes tablas: + +\`\`\` texto +table\_1 table\_2 + +evento \| ev\_time \| user\_id evento \| ev\_time \| user\_id diff --git a/docs/es/query_language/show.md b/docs/es/sql_reference/statements/show.md similarity index 68% rename from docs/es/query_language/show.md rename to docs/es/sql_reference/statements/show.md index f713fbe337e..264bdaaa519 100644 --- a/docs/es/query_language/show.md +++ b/docs/es/sql_reference/statements/show.md @@ -1,18 +1,21 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: SHOW --- # MOSTRAR consultas {#show-queries} -## MOSTRAR CREAR TABLA {#show-create-table} +## SHOW CREATE TABLE {#show-create-table} ``` sql SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] ``` -Devuelve una sola `String`-tipo ‘statement’ columna, que contiene un único valor – el `CREATE` Consulta utilizada para crear el objeto especificado. +Devuelve una sola `String`-tipo ‘statement’ column, which contains a single value – the `CREATE` consulta utilizada para crear el objeto especificado. -## MOSTRAR BASAS DE DATOS {#show-databases} +## SHOW DATABASES {#show-databases} ``` sql SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] @@ -21,15 +24,15 @@ SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] Imprime una lista de todas las bases de datos. Esta consulta es idéntica a `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. -## MOSTRAR LISTA DE PROCESOS {#show-processlist} +## SHOW PROCESSLIST {#show-processlist} ``` sql SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] ``` -Envía el contenido de la [sistema.proceso](../operations/system_tables.md#system_tables-processes) tabla, que contiene una lista de consultas que se están procesando en este momento, exceptuando `SHOW PROCESSLIST` consulta. +Envía el contenido de la [sistema.procesa](../../operations/system_tables.md#system_tables-processes) tabla, que contiene una lista de consultas que se están procesando en este momento, exceptuando `SHOW PROCESSLIST` consulta. -El `SELECT * FROM system.processes` todas las consultas actuales. +El `SELECT * FROM system.processes` query devuelve datos sobre todas las consultas actuales. Consejo (ejecutar en la consola): @@ -37,7 +40,7 @@ Consejo (ejecutar en la consola): $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ``` -## MOSTRAR TABLAS {#show-tables} +## SHOW TABLES {#show-tables} Muestra una lista de tablas. @@ -47,7 +50,7 @@ SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMI Si el `FROM` no se especifica la cláusula, la consulta devuelve la lista de tablas de la base de datos actual. -Puede obtener los mismos resultados que el `SHOW TABLES` Consulta de la siguiente manera: +Puede obtener los mismos resultados que el `SHOW TABLES` consulta de la siguiente manera: ``` sql SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] @@ -68,9 +71,9 @@ SHOW TABLES FROM system LIKE '%co%' LIMIT 2 └────────────────────────────────┘ ``` -## MOSTRAR DICCIONARIOS {#show-dictionaries} +## SHOW DICTIONARIES {#show-dictionaries} -Muestra una lista de [diccionarios externos](dicts/external_dicts.md). +Muestra una lista de [diccionarios externos](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). ``` sql SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] @@ -78,7 +81,7 @@ SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] @@ -99,4 +102,4 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/show/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/es/sql_reference/statements/system.md b/docs/es/sql_reference/statements/system.md new file mode 100644 index 00000000000..ddb0a5aa84f --- /dev/null +++ b/docs/es/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: SYSTEM +--- + +# Consultas del sistema {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +Vuelve a cargar todos los diccionarios que se han cargado correctamente antes. +De forma predeterminada, los diccionarios se cargan perezosamente (ver [Diccionarios\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), por lo que en lugar de cargarse automáticamente al inicio, se inicializan en el primer acceso a través de la función dictGet o SELECT desde tablas con ENGINE = Dictionary . El `SYSTEM RELOAD DICTIONARIES` consulta vuelve a cargar dichos diccionarios (LOADED). +Siempre vuelve `Ok.` independientemente del resultado de la actualización del diccionario. + +## RELOAD DICTIONARY Dictionary\_name {#query_language-system-reload-dictionary} + +Recarga completamente un diccionario `dictionary_name`, independientemente del estado del diccionario (LOADED / NOT\_LOADED / FAILED). +Siempre vuelve `Ok.` independientemente del resultado de la actualización del diccionario. +El estado del diccionario se puede comprobar consultando el `system.dictionaries` tabla. + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +Restablece la caché DNS interna de ClickHouse. A veces (para versiones anteriores de ClickHouse) es necesario usar este comando al cambiar la infraestructura (cambiar la dirección IP de otro servidor de ClickHouse o el servidor utilizado por los diccionarios). + +Para obtener una administración de caché más conveniente (automática), consulte disable\_internal\_dns\_cache, dns\_cache\_update\_period parameters. + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +Restablece la caché de marcas. Utilizado en el desarrollo de ClickHouse y pruebas de rendimiento. + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +Vuelve a cargar la configuración de ClickHouse. Se usa cuando la configuración se almacena en ZooKeeeper. + +## SHUTDOWN {#query_language-system-shutdown} + +Normalmente se apaga ClickHouse (como `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +Anula el proceso de ClickHouse (como `kill -9 {$ pid_clickhouse-server}`) + +## Administración de tablas distribuidas {#query-language-system-distributed} + +ClickHouse puede administrar [distribuido](../../engines/table_engines/special/distributed.md) tabla. Cuando un usuario inserta datos en estas tablas, ClickHouse primero crea una cola de los datos que se deben enviar a los nodos del clúster y, a continuación, los envía de forma asincrónica. Puede administrar el procesamiento de colas con el [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), y [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) consulta. También puede insertar sincrónicamente datos distribuidos con el `insert_distributed_sync` configuración. + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +Deshabilita la distribución de datos en segundo plano al insertar datos en tablas distribuidas. + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +Obliga a ClickHouse a enviar datos a nodos de clúster de forma sincrónica. Si algún nodo no está disponible, ClickHouse produce una excepción y detiene la ejecución de la consulta. Puede volver a intentar la consulta hasta que tenga éxito, lo que sucederá cuando todos los nodos estén nuevamente en línea. + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +Habilita la distribución de datos en segundo plano al insertar datos en tablas distribuidas. + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +Proporciona la posibilidad de detener las fusiones en segundo plano para las tablas de la familia MergeTree: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "Nota" + `DETACH / ATTACH` la tabla comenzará las fusiones de fondo para la tabla, incluso en caso de que las fusiones se hayan detenido para todas las tablas MergeTree antes. + +### START MERGES {#query_language-system-start-merges} + +Proporciona la posibilidad de iniciar fusiones en segundo plano para tablas de la familia MergeTree: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/es/query_language/syntax.md b/docs/es/sql_reference/syntax.md similarity index 88% rename from docs/es/query_language/syntax.md rename to docs/es/sql_reference/syntax.md index cb24cf46430..1a9e643107a 100644 --- a/docs/es/query_language/syntax.md +++ b/docs/es/sql_reference/syntax.md @@ -1,23 +1,26 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 31 +toc_title: Sintaxis --- # Sintaxis {#syntax} Hay dos tipos de analizadores en el sistema: el analizador SQL completo (un analizador de descenso recursivo) y el analizador de formato de datos (un analizador de flujo rápido). En todos los casos, excepto el `INSERT` consulta, sólo se utiliza el analizador SQL completo. -El `INSERT` Consulta utiliza ambos analizadores: +El `INSERT` consulta utiliza ambos analizadores: ``` sql INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') ``` -El `INSERT INTO t VALUES` fragmento es analizado por el analizador completo, y los datos `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` es analizado por el analizador de flujo rápido. También puede activar el analizador completo de los datos mediante el [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) configuración. Cuando `input_format_values_interpret_expressions = 1`, ClickHouse primero intenta analizar valores con el analizador de flujo rápido. Si falla, ClickHouse intenta usar el analizador completo para los datos, tratándolo como un SQL [expresión](#syntax-expressions). +El `INSERT INTO t VALUES` fragmento es analizado por el analizador completo, y los datos `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` es analizado por el analizador de flujo rápido. También puede activar el analizador completo de los datos mediante el [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) configuración. Cuando `input_format_values_interpret_expressions = 1`, ClickHouse primero intenta analizar valores con el analizador de flujo rápido. Si falla, ClickHouse intenta usar el analizador completo para los datos, tratándolo como un SQL [expresion](#syntax-expressions). Los datos pueden tener cualquier formato. Cuando se recibe una consulta, el servidor no calcula más de [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) bytes de la solicitud en RAM (por defecto, 1 MB), y el resto se analiza la secuencia. -Esto significa que el sistema no tiene problemas con `INSERT` Consultas, como lo hace MySQL. +Esto significa que el sistema no tiene problemas con `INSERT` consultas, como lo hace MySQL. -Cuando se utiliza el `Values` formato en un `INSERT` consulta, puede parecer que los datos se analizan igual que las expresiones en un `SELECT` Consulta, pero esto no es cierto. El `Values` formato es mucho más limitado. +Cuando se utiliza el `Values` formato en un `INSERT` consulta, puede parecer que los datos se analizan igual que las expresiones en un `SELECT` consulta, pero esto no es cierto. El `Values` formato es mucho más limitado. A continuación cubriremos el analizador completo. Para obtener más información sobre los analizadores de formato, consulte [Formato](../interfaces/formats.md) apartado. @@ -29,7 +32,7 @@ Puede haber cualquier número de símbolos de espacio entre las construcciones s Se admiten comentarios de estilo SQL y de estilo C. Comentarios de estilo SQL: desde `--` al final de la línea. El espacio después `--` se puede omitir. -Comentarios en estilo C: de `/*` un `*/`. Estos comentarios pueden ser multilínea. Tampoco se requieren espacios aquí. +Comentarios en estilo C: de `/*` a `*/`. Estos comentarios pueden ser multilínea. Tampoco se requieren espacios aquí. ## Palabras clave {#syntax-keywords} @@ -42,11 +45,11 @@ Si el nombre del tipo de datos distingue entre mayúsculas y minúsculas `system A diferencia del SQL estándar, todas las demás palabras clave (incluidos los nombres de las funciones) son **minúsculas**. -Las palabras clave no están reservadas (simplemente se analizan como palabras clave en el contexto correspondiente). Si usted usa [identificador](#syntax-identifiers) lo mismo que las palabras clave, encerrarlas entre comillas. Por ejemplo, la consulta `SELECT "FROM" FROM table_name` es válido si la tabla `table_name` tetas grandes con el nombre `"FROM"`. +Las palabras clave no están reservadas (simplemente se analizan como palabras clave en el contexto correspondiente). Si usted usa [identificador](#syntax-identifiers) lo mismo que las palabras clave, encerrarlas entre comillas. Por ejemplo, la consulta `SELECT "FROM" FROM table_name` es válido si la tabla `table_name` tiene columna con el nombre `"FROM"`. ## Identificador {#syntax-identifiers} -Los identificadores hijo: +Los identificadores son: - Nombres de clúster, base de datos, tabla, partición y columna. - Función. @@ -73,34 +76,34 @@ Un literal numérico, intenta ser analizado: - De lo contrario, se devuelve un error. El valor correspondiente tendrá el tipo más pequeño en el que se ajuste el valor. -Por ejemplo, 1 se analiza como `UInt8` pero 256 se analiza como `UInt16`. Para obtener más información, consulte [Tipos de datos](../data_types/index.md). +Por ejemplo, 1 se analiza como `UInt8` pero 256 se analiza como `UInt16`. Para obtener más información, consulte [Tipos de datos](../sql_reference/data_types/index.md). Ejemplos: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. ### Cadena {#syntax-string-literal} -Solo se admiten literales de cadena entre comillas simples. Los caracteres incluidos se pueden escapar de barra invertida. Las siguientes secuencias de escape tienen un valor especial correspondiente: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. En todos los demás casos, secuencias de escape en el formato `\c`, donde `c` cualquier carácter, se convierten a `c`. Esto significa que puedes usar las secuencias `\'`y`\\`. El valor tendrá el [Cadena](../data_types/string.md) tipo. +Solo se admiten literales de cadena entre comillas simples. Los caracteres incluidos se pueden escapar de barra invertida. Las siguientes secuencias de escape tienen un valor especial correspondiente: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. En todos los demás casos, secuencias de escape en el formato `\c`, donde `c` cualquier carácter, se convierten a `c`. Esto significa que puedes usar las secuencias `\'`y`\\`. El valor tendrá el [Cadena](../sql_reference/data_types/string.md) tipo. -El conjunto mínimo de caracteres que necesita para escapar en literales de cadena: `'` y `\`. La comilla simple se puede escapar con la comilla simple, literales `'It\'s'` y `'It''s'` hijo iguales. +El conjunto mínimo de caracteres que necesita para escapar en literales de cadena: `'` y `\`. La comilla simple se puede escapar con la comilla simple, literales `'It\'s'` y `'It''s'` son iguales. ### Compuesto {#compound} Las construcciones son compatibles con las matrices: `[1, 2, 3]` y tuplas: `(1, 'Hello, world!', 2)`.. En realidad, estos no son literales, sino expresiones con el operador de creación de matriz y el operador de creación de tuplas, respectivamente. Una matriz debe constar de al menos un elemento y una tupla debe tener al menos dos elementos. -Las tuplas tienen un propósito especial para su uso en el `IN` cláusula de un `SELECT` consulta. Las tuplas se pueden obtener como resultado de una consulta, pero no se pueden guardar en una base de datos (con la excepción de [Memoria](../operations/table_engines/memory.md) tabla). +Las tuplas tienen un propósito especial para su uso en el `IN` cláusula de un `SELECT` consulta. Las tuplas se pueden obtener como resultado de una consulta, pero no se pueden guardar en una base de datos (con la excepción de [Memoria](../engines/table_engines/special/memory.md) tabla). -### NULO {#null-literal} +### NULL {#null-literal} Indica que falta el valor. -Para almacenar `NULL` es un campo de tabla, debe ser del [NULL](../data_types/nullable.md) tipo. +Para almacenar `NULL` en un campo de tabla, debe ser del [NULL](../sql_reference/data_types/nullable.md) tipo. Dependiendo del formato de datos (entrada o salida), `NULL` puede tener una representación diferente. Para obtener más información, consulte la documentación de [Formatos de datos](../interfaces/formats.md#formats). Hay muchos matices para el procesamiento `NULL`. Por ejemplo, si al menos uno de los argumentos de una operación de comparación es `NULL` el resultado de esta operación también se `NULL`. Lo mismo es cierto para la multiplicación, la suma y otras operaciones. Para obtener más información, lea la documentación de cada operación. -En las consultas, puede verificar `NULL` utilizando el [ES NULO](operators.md#operator-is-null) y [NO ES NULO](operators.md) operadores y las funciones relacionadas `isNull` y `isNotNull`. +En las consultas, puede verificar `NULL` utilizando el [IS NULL](operators.md#operator-is-null) y [IS NOT NULL](operators.md) operadores y las funciones relacionadas `isNull` y `isNotNull`. ## Función {#functions} @@ -128,7 +131,7 @@ expr AS alias For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - In the [CAST](functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. - `expr` — Any expression supported by ClickHouse. @@ -181,4 +184,4 @@ Una expresión es una función, identificador, literal, aplicación de un operad Una lista de expresiones es una o más expresiones separadas por comas. Las funciones y los operadores, a su vez, pueden tener expresiones como argumentos. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/syntax/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/es/query_language/table_functions/file.md b/docs/es/sql_reference/table_functions/file.md similarity index 65% rename from docs/es/query_language/table_functions/file.md rename to docs/es/sql_reference/table_functions/file.md index f20a208d920..78353de6328 100644 --- a/docs/es/query_language/table_functions/file.md +++ b/docs/es/sql_reference/table_functions/file.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 37 +toc_title: file --- # file {#file} -Crea una tabla a partir de un archivo. Esta función de tabla es similar a [URL](url.md) y [Hdfs](hdfs.md) Aquel. +Crea una tabla a partir de un archivo. Esta función de tabla es similar a [URL](url.md) y [Hdfs](hdfs.md) aquel. ``` sql file(path, format, structure) @@ -12,9 +15,9 @@ file(path, format, structure) **Parámetros de entrada** -- `path` — La ruta relativa al archivo desde [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Soporte de ruta a archivo siguiendo globs en modo de solo lectura: `*`, `?`, `{abc,def}` y `{N..M}` donde `N`, `M` — numero, \``'abc', 'def'` — cadena. -- `format` — El [Formato](../../interfaces/formats.md#formats) del archivo. -- `structure` — Estructura de la mesa. Formato `'column1_name column1_type, column2_name column2_type, ...'`. +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). Soporte de ruta a archivo siguiendo globs en modo de solo lectura: `*`, `?`, `{abc,def}` y `{N..M}` donde `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [formato](../../interfaces/formats.md#formats) del archivo. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. **Valor devuelto** @@ -58,12 +61,12 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U Múltiples componentes de ruta de acceso pueden tener globs. Para ser procesado, el archivo debe existir y coincidir con todo el patrón de ruta (no solo el sufijo o el prefijo). -- `*` — Sustituye cualquier número de caracteres excepto `/` incluyendo cadena vacía. -- `?` — Sustituye a cualquier carácter individual. -- `{some_string,another_string,yet_another_one}` — Sustituye cualquiera de las cadenas `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Sustituye cualquier número en el intervalo de N a M, incluidas ambas fronteras. +- `*` — Substitutes any number of any characters except `/` incluyendo cadena vacía. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. -Construcciones con `{}` hijo similares a la [función de tabla remota](../../query_language/table_functions/remote.md)). +Construcciones con `{}` son similares a la [función de tabla remota](../../sql_reference/table_functions/remote.md)). **Ejemplo** @@ -106,13 +109,13 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_path` — Ruta de acceso al archivo. -- `_file` — Nombre del expediente. +- `_path` — Path to the file. +- `_file` — Name of the file. **Ver también** -- [Columnas virtuales](https://clickhouse.tech/docs/es/operations/table_engines/#table_engines-virtual_columns) +- [Virtual columnas](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/file/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/fa/query_language/table_functions/generate.md b/docs/es/sql_reference/table_functions/generate.md similarity index 74% rename from docs/fa/query_language/table_functions/generate.md rename to docs/es/sql_reference/table_functions/generate.md index 273b5bd7e23..1d11afc5f8c 100644 --- a/docs/fa/query_language/table_functions/generate.md +++ b/docs/es/sql_reference/table_functions/generate.md @@ -1,18 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 47 +toc_title: generateRandom --- # generateRandom {#generaterandom} -Generates random data with given schema. -Allows to populate test tables with data. -Supports all data types that can be stored in table except `LowCardinality` and `AggregateFunction`. +Genera datos aleatorios con un esquema dado. +Permite rellenar tablas de prueba con datos. +Admite todos los tipos de datos que se pueden almacenar en la tabla, excepto `LowCardinality` y `AggregateFunction`. ``` sql generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); ``` -**Parameters** +**Parámetros** - `name` — Name of corresponding column. - `TypeName` — Type of corresponding column. @@ -21,11 +24,11 @@ generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_stri - `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. - `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. -**Returned Value** +**Valor devuelto** -A table object with requested schema. +Un objeto de tabla con el esquema solicitado. -## Usage Example {#usage-example} +## Ejemplo de uso {#usage-example} ``` sql SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); @@ -39,4 +42,4 @@ SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64( └──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/es/query_language/table_functions/hdfs.md b/docs/es/sql_reference/table_functions/hdfs.md similarity index 64% rename from docs/es/query_language/table_functions/hdfs.md rename to docs/es/sql_reference/table_functions/hdfs.md index 81e9a9e8898..b582e89f37e 100644 --- a/docs/es/query_language/table_functions/hdfs.md +++ b/docs/es/sql_reference/table_functions/hdfs.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 45 +toc_title: Hdfs --- # Hdfs {#hdfs} -Crea una tabla a partir de archivos en HDFS. Esta función de tabla es similar a [URL](url.md) y [file](file.md) Aquel. +Crea una tabla a partir de archivos en HDFS. Esta función de tabla es similar a [URL](url.md) y [file](file.md) aquel. ``` sql hdfs(URI, format, structure) @@ -12,9 +15,9 @@ hdfs(URI, format, structure) **Parámetros de entrada** -- `URI` — El URI relativo al archivo en HDFS. Soporte de ruta a archivo siguiendo globs en modo de solo lectura: `*`, `?`, `{abc,def}` y `{N..M}` donde `N`, `M` — numero, \``'abc', 'def'` — cadena. -- `format` — El [Formato](../../interfaces/formats.md#formats) del archivo. -- `structure` — Estructura de la mesa. Formato `'column1_name column1_type, column2_name column2_type, ...'`. +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` y `{N..M}` donde `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [formato](../../interfaces/formats.md#formats) del archivo. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. **Valor devuelto** @@ -41,12 +44,12 @@ LIMIT 2 Múltiples componentes de ruta de acceso pueden tener globs. Para ser procesado, el archivo debe existir y coincidir con todo el patrón de ruta (no solo el sufijo o el prefijo). -- `*` — Sustituye cualquier número de caracteres excepto `/` incluyendo cadena vacía. -- `?` — Sustituye a cualquier carácter individual. -- `{some_string,another_string,yet_another_one}` — Sustituye cualquiera de las cadenas `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Sustituye cualquier número en el intervalo de N a M, incluidas ambas fronteras. +- `*` — Substitutes any number of any characters except `/` incluyendo cadena vacía. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. -Construcciones con `{}` hijo similares a la [función de tabla remota](../../query_language/table_functions/remote.md)). +Construcciones con `{}` son similares a la [función de tabla remota](../../sql_reference/table_functions/remote.md)). **Ejemplo** @@ -89,13 +92,13 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` -## Columnas virtuales {#virtual-columns} +## Virtual Columnas {#virtual-columns} -- `_path` — Ruta de acceso al archivo. -- `_file` — Nombre del expediente. +- `_path` — Path to the file. +- `_file` — Name of the file. **Ver también** -- [Columnas virtuales](https://clickhouse.tech/docs/es/operations/table_engines/#table_engines-virtual_columns) +- [Virtual columnas](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/hdfs/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/es/sql_reference/table_functions/index.md b/docs/es/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..63f5b0da451 --- /dev/null +++ b/docs/es/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Table Functions +toc_priority: 34 +toc_title: "Implantaci\xF3n" +--- + +# Funciones de tabla {#table-functions} + +Las funciones de tabla son métodos para construir tablas. + +Puede usar funciones de tabla en: + +- [FROM](../statements/select.md#select-from) cláusula de la `SELECT` consulta. + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [CREAR TABLA COMO \](../statements/create.md#create-table-query) consulta. + + It's one of the methods of creating a table. + +!!! warning "Advertencia" + No puede usar funciones de tabla si el [Método de codificación de datos:](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) la configuración está deshabilitada. + +| Función | Descripci | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| [file](file.md) | Crea un [File](../../engines/table_engines/special/file.md)-mesa del motor. | +| [fusionar](merge.md) | Crea un [Fusionar](../../engines/table_engines/special/merge.md)-mesa del motor. | +| [numero](numbers.md) | Crea una tabla con una sola columna llena de números enteros. | +| [remoto](remote.md) | Le permite acceder a servidores remotos sin crear un [Distribuido](../../engines/table_engines/special/distributed.md)-mesa del motor. | +| [URL](url.md) | Crea un [URL](../../engines/table_engines/special/url.md)-mesa del motor. | +| [mysql](mysql.md) | Crea un [MySQL](../../engines/table_engines/integrations/mysql.md)-mesa del motor. | +| [jdbc](jdbc.md) | Crea un [JDBC](../../engines/table_engines/integrations/jdbc.md)-mesa del motor. | +| [Nosotros](odbc.md) | Crea un [ODBC](../../engines/table_engines/integrations/odbc.md)-mesa del motor. | +| [Hdfs](hdfs.md) | Crea un [HDFS](../../engines/table_engines/integrations/hdfs.md)-mesa del motor. | + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/es/query_language/table_functions/input.md b/docs/es/sql_reference/table_functions/input.md similarity index 83% rename from docs/es/query_language/table_functions/input.md rename to docs/es/sql_reference/table_functions/input.md index cbc91842eb2..e79b557ee8a 100644 --- a/docs/es/query_language/table_functions/input.md +++ b/docs/es/sql_reference/table_functions/input.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 46 +toc_title: entrada --- # entrada {#input} @@ -13,7 +16,7 @@ Por ejemplo, `'id UInt32, name String'`. Esta función sólo se puede utilizar en `INSERT SELECT` consulta y sólo una vez, pero por lo demás se comporta como función de tabla ordinaria (por ejemplo, se puede usar en subconsulta, etc.). -Los datos se pueden enviar de cualquier manera como para ordinario `INSERT` Consulta y pasado en cualquier disponible [Formato](../../interfaces/formats.md#formats) +Los datos se pueden enviar de cualquier manera como para ordinario `INSERT` consulta y pasado en cualquier disponible [formato](../../interfaces/formats.md#formats) que debe especificarse al final de la consulta (a diferencia de lo ordinario `INSERT SELECT`). La característica principal de esta función es que cuando el servidor recibe datos del cliente, los convierte simultáneamente @@ -23,8 +26,8 @@ con todos los datos transferidos no se crea. **Ejemplos** - Deje que el `test` tiene la siguiente estructura `(a String, b String)` - y datos en `data.csv` Tiene una estructura diferente `(col1 String, col2 Date, col3 Int32)`. Consulta de inserción - datos de la `data.csv` es el `test` con conversión simultánea se ve así: + y datos en `data.csv` tiene una estructura diferente `(col1 String, col2 Date, col3 Int32)`. Consulta de inserción + datos de la `data.csv` en el `test` con conversión simultánea se ve así: @@ -41,4 +44,4 @@ $ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" $ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/input/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/es/query_language/table_functions/jdbc.md b/docs/es/sql_reference/table_functions/jdbc.md similarity index 80% rename from docs/es/query_language/table_functions/jdbc.md rename to docs/es/sql_reference/table_functions/jdbc.md index 5c97bb56a42..a06e3c6d8cf 100644 --- a/docs/es/query_language/table_functions/jdbc.md +++ b/docs/es/sql_reference/table_functions/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 43 +toc_title: jdbc --- # jdbc {#table-function-jdbc} @@ -23,4 +26,4 @@ SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/jdbc/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/es/sql_reference/table_functions/merge.md b/docs/es/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..1a1a3bee1a2 --- /dev/null +++ b/docs/es/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 38 +toc_title: fusionar +--- + +# fusionar {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +La estructura de la tabla se toma de la primera tabla encontrada que coincide con la expresión regular. + +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/es/query_language/table_functions/mysql.md b/docs/es/sql_reference/table_functions/mysql.md similarity index 69% rename from docs/es/query_language/table_functions/mysql.md rename to docs/es/sql_reference/table_functions/mysql.md index a8d57279d6b..aacb6832057 100644 --- a/docs/es/query_language/table_functions/mysql.md +++ b/docs/es/sql_reference/table_functions/mysql.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 42 +toc_title: mysql --- # mysql {#mysql} @@ -12,19 +15,19 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_ **Parámetros** -- `host:port` — Dirección del servidor MySQL. +- `host:port` — MySQL server address. -- `database` — Nombre de base de datos remota. +- `database` — Remote database name. -- `table` — Nombre de la tabla remota. +- `table` — Remote table name. -- `user` — Usuario de MySQL. +- `user` — MySQL user. -- `password` — Contraseña de usuario. +- `password` — User password. -- `replace_query` — Bandera que convierte `INSERT INTO` Consultas a `REPLACE INTO`. Si `replace_query=1`, la consulta se reemplaza. +- `replace_query` — Flag that converts `INSERT INTO` consultas a `REPLACE INTO`. Si `replace_query=1`, la consulta se reemplaza. -- `on_duplicate_clause` — El `ON DUPLICATE KEY on_duplicate_clause` expresión que se añade a la `INSERT` consulta. +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expresión que se añade a la `INSERT` consulta. Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. @@ -55,11 +58,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -77,7 +80,7 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') ## Ver también {#see-also} -- [El ‘MySQL’ motor de mesa](../../operations/table_engines/mysql.md) -- [Uso de MySQL como fuente de diccionario externo](../dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [El ‘MySQL’ motor de mesa](../../engines/table_engines/integrations/mysql.md) +- [Uso de MySQL como fuente de diccionario externo](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/mysql/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/es/query_language/table_functions/numbers.md b/docs/es/sql_reference/table_functions/numbers.md similarity index 68% rename from docs/es/query_language/table_functions/numbers.md rename to docs/es/sql_reference/table_functions/numbers.md index 5d6017e523c..51a9904087a 100644 --- a/docs/es/query_language/table_functions/numbers.md +++ b/docs/es/sql_reference/table_functions/numbers.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 39 +toc_title: numero --- -# número {#numbers} +# numero {#numbers} -`numbers(N)` – Devuelve una tabla con el único ‘number’ columna (UInt64) que contiene enteros de 0 a N-1. +`numbers(N)` – Returns a table with the single ‘number’ columna (UInt64) que contiene enteros de 0 a N-1. `numbers(N, M)` - Devuelve una tabla con el único ‘number’ columna (UInt64) que contiene enteros de N a (N + M - 1). Similar a la `system.numbers` tabla, puede ser utilizado para probar y generar valores sucesivos, `numbers(N, M)` más eficiente que `system.numbers`. @@ -24,4 +27,4 @@ Ejemplos: select toDate('2010-01-01') + number as d FROM numbers(365); ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/numbers/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/es/query_language/table_functions/odbc.md b/docs/es/sql_reference/table_functions/odbc.md similarity index 80% rename from docs/es/query_language/table_functions/odbc.md rename to docs/es/sql_reference/table_functions/odbc.md index 1e0cf1a723e..b5105e1b3e3 100644 --- a/docs/es/query_language/table_functions/odbc.md +++ b/docs/es/sql_reference/table_functions/odbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 44 +toc_title: Nosotros --- # Nosotros {#table-functions-odbc} @@ -12,9 +15,9 @@ odbc(connection_settings, external_database, external_table) Parámetros: -- `connection_settings` — Nombre de la sección con ajustes de conexión en el `odbc.ini` file. -- `external_database` — Nombre de una base de datos en un DBMS externo. -- `external_table` — Nombre de una tabla en el `external_database`. +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. Para implementar con seguridad conexiones ODBC, ClickHouse usa un programa separado `clickhouse-odbc-bridge`. Si el controlador ODBC se carga directamente desde `clickhouse-server`, problemas de controlador pueden bloquear el servidor ClickHouse. ClickHouse se inicia automáticamente `clickhouse-odbc-bridge` cuando se requiere. El programa de puente ODBC se instala desde el mismo paquete que el `clickhouse-server`. @@ -56,7 +59,7 @@ Puede verificar la conexión usando el `isql` utilidad desde la instalación de ``` bash $ isql -v mysqlconn -+---------------------------------------+ ++-------------------------+ | Connected! | | | ... @@ -77,11 +80,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -99,7 +102,7 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') ## Ver también {#see-also} -- [Diccionarios externos ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Motor de tabla ODBC](../../operations/table_engines/odbc.md). +- [Diccionarios externos ODBC](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [Motor de tabla ODBC](../../engines/table_engines/integrations/odbc.md). -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/jdbc/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/es/query_language/table_functions/remote.md b/docs/es/sql_reference/table_functions/remote.md similarity index 77% rename from docs/es/query_language/table_functions/remote.md rename to docs/es/sql_reference/table_functions/remote.md index adc7aa44207..8294a2bd069 100644 --- a/docs/es/query_language/table_functions/remote.md +++ b/docs/es/sql_reference/table_functions/remote.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 40 +toc_title: remoto --- -# Remoto, remoteSecure {#remote-remotesecure} +# remoto, remoteSecure {#remote-remotesecure} Le permite acceder a servidores remotos sin crear un `Distributed` tabla. @@ -13,7 +16,7 @@ remote('addresses_expr', db, table[, 'user'[, 'password']]) remote('addresses_expr', db.table[, 'user'[, 'password']]) ``` -`addresses_expr` – Una expresión que genera direcciones de servidores remotos. Esta puede ser solo una dirección de servidor. La dirección del servidor es `host:port` O simplemente `host`. El host se puede especificar como nombre de servidor o como dirección IPv4 o IPv6. Una dirección IPv6 se especifica entre corchetes. El puerto es el puerto TCP del servidor remoto. Si se omite el puerto, utiliza `tcp_port` del archivo de configuración del servidor (por defecto, 9000). +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port` o simplemente `host`. El host se puede especificar como nombre de servidor o como dirección IPv4 o IPv6. Una dirección IPv6 se especifica entre corchetes. El puerto es el puerto TCP del servidor remoto. Si se omite el puerto, utiliza `tcp_port` del archivo de configuración del servidor (por defecto, 9000). !!! important "Importante" El puerto es necesario para una dirección IPv6. @@ -75,6 +78,6 @@ El `remote` puede ser útil en los siguientes casos: Si el usuario no está especificado, `default` se utiliza. Si no se especifica la contraseña, se utiliza una contraseña vacía. -`remoteSecure` - igual que `remote` pero con conexión segura. Puerto predeterminado — [Tcp\_port\_secure](../../operations/server_settings/settings.md#server_settings-tcp_port_secure) de configuración o 9440. +`remoteSecure` - igual que `remote` but with secured connection. Default port — [Tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) de config o 9440. -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/remote/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/es/query_language/table_functions/url.md b/docs/es/sql_reference/table_functions/url.md similarity index 74% rename from docs/es/query_language/table_functions/url.md rename to docs/es/sql_reference/table_functions/url.md index 6978d5ece18..5c6a62db350 100644 --- a/docs/es/query_language/table_functions/url.md +++ b/docs/es/sql_reference/table_functions/url.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 41 +toc_title: URL --- # URL {#url} @@ -9,7 +12,7 @@ machine_translated: true URL - Dirección de servidor HTTP o HTTPS, que puede aceptar `GET` y/o `POST` peticiones. -Formato - [Formato](../../interfaces/formats.md#formats) de los datos. +formato - [formato](../../interfaces/formats.md#formats) de los datos. estructura - estructura de la tabla en `'UserID UInt64, Name String'` formato. Determina los nombres y tipos de columna. @@ -20,4 +23,4 @@ estructura - estructura de la tabla en `'UserID UInt64, Name String'` formato. D SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/url/) +[Artículo Original](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/es/changelog/2017.md b/docs/es/whats_new/changelog/2017.md similarity index 93% rename from docs/es/changelog/2017.md rename to docs/es/whats_new/changelog/2017.md index b80e820169d..3852de2f640 100644 --- a/docs/es/changelog/2017.md +++ b/docs/es/whats_new/changelog/2017.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 79 +toc_title: '2017' --- ### Lanzamiento de ClickHouse 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} @@ -23,13 +26,13 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 #### Novedad: {#new-features} - Clave de partición personalizada para la familia MergeTree de motores de tabla. -- [Kafka](https://clickhouse.yandex/docs/es/operations/table_engines/kafka/) motor de mesa. +- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) motor de mesa. - Se agregó soporte para cargar [CatBoost](https://catboost.yandex/) modelos y aplicarlos a los datos almacenados en ClickHouse. - Se agregó soporte para zonas horarias con desplazamientos no enteros de UTC. - Se agregó soporte para operaciones aritméticas con intervalos de tiempo. - El intervalo de valores para los tipos Date y DateTime se extiende hasta el año 2105. -- Se agregó el `CREATE MATERIALIZED VIEW x TO y` Consulta (especifica una tabla existente para almacenar los datos de una vista materializada). -- Se agregó el `ATTACH TABLE` Consulta sin argumentos. +- Se agregó el `CREATE MATERIALIZED VIEW x TO y` consulta (especifica una tabla existente para almacenar los datos de una vista materializada). +- Se agregó el `ATTACH TABLE` consulta sin argumentos. - La lógica de procesamiento para columnas anidadas con nombres que terminan en -Map en una tabla SummingMergeTree se extrajo a la función de agregado sumMap. Ahora puede especificar dichas columnas explícitamente. - El tamaño máximo del diccionario IP trie se incrementa a 128M entradas. - Se agregó la función getSizeOfEnumType. @@ -49,7 +52,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 - Se corrigió la adición y eliminación no atómica de partes en tablas replicadas. - Los datos insertados en una vista materializada no están sujetos a una deduplicación innecesaria. - La ejecución de una consulta en una tabla distribuida para la que la réplica local está rezagada y las réplicas remotas no están disponibles ya no genera ningún error. -- Los usuarios no necesitan permisos de acceso a la `default` base de datos para crear tablas temporales más. +- Los usuarios no necesitan permisos de acceso `default` base de datos para crear tablas temporales más. - Se corrigió el bloqueo al especificar el tipo de matriz sin argumentos. - Se corrigieron bloqueos cuando el volumen del disco que contiene los registros del servidor está lleno. - Se corrigió un desbordamiento en la función toRelativeWeekNum para la primera semana de la época de Unix. @@ -62,7 +65,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 #### Novedad: {#new-features-1} -- Soporte TLS en el protocolo nativo (para habilitar, establecer `tcp_ssl_port` es `config.xml` ). +- Soporte TLS en el protocolo nativo (para habilitar, establecer `tcp_ssl_port` en `config.xml` ). #### Corrección de errores: {#bug-fixes-1} @@ -120,11 +123,11 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 - Se ha mejorado el proceso de eliminación de nodos antiguos en ZooKeeper. Anteriormente, los nodos antiguos a veces no se eliminaban si había inserciones muy frecuentes, lo que hacía que el servidor tardara en apagarse, entre otras cosas. - Se corrigió la aleatorización al elegir hosts para la conexión a ZooKeeper. - Se corrigió la exclusión de réplicas rezagadas en consultas distribuidas si la réplica es localhost. -- Se corrigió un error por el que una parte de datos en un `ReplicatedMergeTree` la tabla podría estar rota después de correr `ALTER MODIFY` es un elemento en un `Nested` estructura. +- Se corrigió un error por el que una parte de datos en un `ReplicatedMergeTree` la tabla podría estar rota después de correr `ALTER MODIFY` en un elemento en un `Nested` estructura. - Se corrigió un error que podía causar que las consultas SELECT “hang”. - Mejoras en las consultas DDL distribuidas. - Se corrigió la consulta `CREATE TABLE ... AS `. -- Se resolvió el punto muerto en el `ALTER ... CLEAR COLUMN IN PARTITION` Consulta para `Buffer` tabla. +- Se resolvió el punto muerto en el `ALTER ... CLEAR COLUMN IN PARTITION` consulta para `Buffer` tabla. - Se corrigió el valor predeterminado no válido para `Enum` s (0 en lugar del mínimo) cuando se utiliza el `JSONEachRow` y `TSKV` formato. - Se resolvió la aparición de procesos zombis al usar un diccionario con un `executable` fuente. - Se corrigió el segfault para la consulta HEAD. @@ -132,12 +135,12 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 #### Flujo de trabajo mejorado para desarrollar y ensamblar ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} - Usted puede utilizar `pbuilder` para construir ClickHouse. -- Usted puede utilizar `libc++` es lugar de `libstdc++` para construir en Linux. +- Usted puede utilizar `libc++` en lugar de `libstdc++` para construir en Linux. - Se agregaron instrucciones para usar herramientas de análisis de código estático: `Coverage`, `clang-tidy`, `cppcheck`. #### Tenga en cuenta al actualizar: {#please-note-when-upgrading} -- Ahora hay un valor predeterminado más alto para la configuración MergeTree `max_bytes_to_merge_at_max_space_in_pool` (el tamaño total máximo de las partes de datos a fusionar, en bytes): ha aumentado de 100 GiB a 150 GiB. Esto podría dar lugar a grandes fusiones que se ejecutan después de la actualización del servidor, lo que podría provocar una mayor carga en el subsistema de disco. Si el espacio libre disponible en el servidor es inferior al doble de la cantidad total de las fusiones que se están ejecutando, esto hará que todas las demás fusiones dejen de ejecutarse, incluidas las fusiones de pequeñas partes de datos. Como resultado, las consultas INSERT fallarán con el mensaje “Merges are processing significantly slower than inserts.” Descripción `SELECT * FROM system.merges` consulta para supervisar la situación. También puede comprobar el `DiskSpaceReservedForMerge` métrica en el `system.metrics` mesa, o en Grafito. No necesita hacer nada para solucionar esto, ya que el problema se resolverá una vez que finalicen las fusiones grandes. Si encuentra esto inaceptable, puede restaurar el valor anterior para el `max_bytes_to_merge_at_max_space_in_pool` configuración. Para hacer esto, vaya al sección en config.xml, establecer ``` ``107374182400 ``` y reinicie el servidor. +- Ahora hay un valor predeterminado más alto para la configuración MergeTree `max_bytes_to_merge_at_max_space_in_pool` (el tamaño total máximo de las partes de datos a fusionar, en bytes): ha aumentado de 100 GiB a 150 GiB. Esto podría dar lugar a grandes fusiones que se ejecutan después de la actualización del servidor, lo que podría provocar una mayor carga en el subsistema de disco. Si el espacio libre disponible en el servidor es inferior al doble de la cantidad total de las fusiones que se están ejecutando, esto hará que todas las demás fusiones dejen de ejecutarse, incluidas las fusiones de pequeñas partes de datos. Como resultado, las consultas INSERT fallarán con el mensaje “Merges are processing significantly slower than inserts.” Utilice el `SELECT * FROM system.merges` consulta para supervisar la situación. También puede comprobar el `DiskSpaceReservedForMerge` métrica en el `system.metrics` mesa, o en Grafito. No necesita hacer nada para solucionar esto, ya que el problema se resolverá solo una vez que finalicen las fusiones grandes. Si encuentra esto inaceptable, puede restaurar el valor anterior para el `max_bytes_to_merge_at_max_space_in_pool` configuración. Para hacer esto, vaya al sección en config.xml, establecer ``` ``107374182400 ``` y reinicie el servidor. ### Lanzamiento de ClickHouse 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} @@ -148,8 +151,8 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 Esta versión contiene correcciones de errores para la versión anterior 1.1.54276: - Fijo `DB::Exception: Assertion violation: !_path.empty()` cuando se inserta en una tabla distribuida. -- Se corrigió el análisis al insertar en formato RowBinary si los datos de entrada comienzan con ‘;’. -- Errores durante la compilación en tiempo de ejecución de ciertas funciones agregadas (por ejemplo, `groupArray()`). +- Se corrigió el análisis al insertar en formato RowBinary si los datos de entrada comienzan con';'. +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). ### Clickhouse Lanzamiento 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} @@ -207,7 +210,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.542 - Se corrigió la actualización automática incorrecta de los diccionarios Trie. - Se ha corregido una excepción al ejecutar consultas con una cláusula GROUP BY desde una tabla Merge cuando se usa SAMPLE. - Se corrigió un bloqueo de GROUP BY cuando se usabaributed\_aggregation\_memory\_efficient = 1. -- Ahora puede especificar la base de datos.es el lado derecho de IN y JOIN. +- Ahora puede especificar la base de datos.en el lado derecho de IN y JOIN. - Se usaron demasiados subprocesos para la agregación paralela. Esto ha sido solucionado. - Corregido cómo el “if” funciona con argumentos FixedString. - SELECT funcionó incorrectamente desde una tabla distribuida para fragmentos con un peso de 0. Esto ha sido solucionado. diff --git a/docs/es/changelog/2018.md b/docs/es/whats_new/changelog/2018.md similarity index 99% rename from docs/es/changelog/2018.md rename to docs/es/whats_new/changelog/2018.md index 7aa67e7934e..c63b94670ac 100644 --- a/docs/es/changelog/2018.md +++ b/docs/es/whats_new/changelog/2018.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 78 +toc_title: '2018' --- ## Lanzamiento de ClickHouse 18.16 {#clickhouse-release-18-16} @@ -971,7 +974,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 - Se agregó soporte para el uso de múltiples consumidores con el `Kafka` motor. Opciones de configuración ampliadas para `Kafka` (Marek Vavruša). - Se agregó el `intExp3` y `intExp4` función. - Se agregó el `sumKahan` función de agregado. -- Se agregaron las funciones \* Number\* OrNull, donde \* Number\* es un tipo numérico. +- Añadido el a \* Número\* OrNull funciones, donde \* Número\* es un tipo numérico. - Añadido soporte para `WITH` para una `INSERT SELECT` consulta (autor: zhang2014). - Configuración añadida: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. En particular, estos valores se utilizan para descargar partes de datos para la replicación. Cambiar esta configuración permite una conmutación por error más rápida si la red está sobrecargada. - Añadido soporte para `ALTER` para tablas de tipo `Null` (Anastasiya Tsarkova). @@ -1056,3 +1059,5 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 - Al realizar una actualización continua en un clúster, en el momento en que algunas de las réplicas ejecutan la versión anterior de ClickHouse y otras ejecutan la nueva versión, la replicación se detiene temporalmente y el mensaje `unknown parameter 'shard'` aparece en el registro. La replicación continuará después de que se actualicen todas las réplicas del clúster. - Si se están ejecutando diferentes versiones de ClickHouse en los servidores de clúster, es posible que las consultas distribuidas que utilizan las siguientes funciones tengan resultados incorrectos: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Debe actualizar todos los nodos del clúster. + +## [Nivel de Cifrado WEP](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/es/changelog/2019.md b/docs/es/whats_new/changelog/2019.md similarity index 99% rename from docs/es/changelog/2019.md rename to docs/es/whats_new/changelog/2019.md index 3acb9db1997..ab0a96f38b2 100644 --- a/docs/es/changelog/2019.md +++ b/docs/es/whats_new/changelog/2019.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 77 +toc_title: '2019' --- ## ClickHouse de liberación de la v19.17 {#clickhouse-release-v19-17} @@ -127,6 +130,10 @@ machine_translated: true ## ClickHouse de liberación de la v19.16 {#clickhouse-release-v19-16} +#### Todos los derechos reservados. {#clickhouse-release-v19-16-14-65-2020-03-25} + +- Se corrigió un error en los cálculos por lotes de OP lógicos ternarios en múltiples argumentos (más de 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alejandro Kazakov](https://github.com/Akazz)) Esta corrección de errores fue portada a la versión 19.16 por una solicitud especial de Altinity. + #### Todos los derechos reservados. {#clickhouse-release-v19-16-14-65-2020-03-05} - Corregir la incompatibilidad de subconsultas distribuidas con versiones anteriores de CH. Fijar [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) @@ -802,7 +809,7 @@ machine_translated: true - Análisis fijo de `AggregateFunction` valores incrustados en la consulta. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) - Corregido el comportamiento incorrecto de `trim` funciones familiares. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Lanzamiento de ClickHouse 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} +### ClickHouse liberación 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} #### Corrección de errores {#bug-fix-11} @@ -909,15 +916,15 @@ machine_translated: true - Mejorar el manejo de errores en los diccionarios de caché. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) - Corregido error en la función `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([propulsor](https://github.com/proller)) -- Fijar `JSONExtract` función mientras se extrae un `Tuple` de JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fijar `JSONExtract` la función mientras que la extracción de un `Tuple` de JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) - Se corrigió la posible pérdida de datos después de `ALTER DELETE` consulta en la tabla con índice de omisión. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) - Prueba de rendimiento fija. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Parquet: Fijar la lectura de columnas booleanos. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Corregido el comportamiento incorrecto de `nullIf` función de argumentos constantes. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Corregido el comportamiento incorrecto de `nullIf` la función de la constante de argumentos. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Solucione el problema de duplicación de mensajes de Kafka en el reinicio normal del servidor. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) - Se ha corregido un problema durante mucho tiempo `ALTER UPDATE` o `ALTER DELETE` puede evitar que se ejecuten fusiones regulares. Evite que las mutaciones se ejecuten si no hay suficientes subprocesos libres disponibles. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([Tavplubix](https://github.com/tavplubix)) - Corregido el error con el procesamiento “timezone” en el archivo de configuración del servidor. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Repara las pruebas de kafka. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) +- Revisión de kafka pruebas. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) #### Corrección de seguridad {#security-fix-3} @@ -978,7 +985,7 @@ machine_translated: true - Corregir un error al escribir marcas de índices secundarios con granularidad adaptativa. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) - Fijar `WITH ROLLUP` y `WITH CUBE` modificadores de `GROUP BY` con agregación de dos niveles. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) - Cuelgue fijo adentro `JSONExtractRaw` función. Fijo [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Reparar segfault en ExternalLoader :: loadOutdated (). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) +- Revisión violación de segmento en ExternalLoader::reloadOutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) - Se corrigió el caso en el que el servidor podía cerrar sockets de escucha pero no apagar y continuar sirviendo las consultas restantes. Puede terminar con dos procesos de servidor de clickhouse en ejecución. A veces, el servidor puede devolver un error `bad_function_call` para las consultas restantes. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Se corrigió la condición inútil e incorrecta en el campo de actualización para la carga inicial de diccionarios externos a través de ODBC, MySQL, ClickHouse y HTTP. Esto corrige [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Se corrigió una excepción irrelevante en el elenco de `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) @@ -1140,7 +1147,7 @@ machine_translated: true - Los tiempos de espera de red se pueden cambiar dinámicamente para las conexiones ya establecidas de acuerdo con la configuración. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) - Utilizar “public\_suffix\_list” para funciones `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. Está usando una tabla hash perfecta generada por `gperf` con una lista generada a partir del archivo: https://publicsuffix.org/list/public\_suffix\_list.dat. (por ejemplo, ahora reconocemos el dominio `ac.uk` como no significativo). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) - Adoptar `IPv6` tipo de datos en las tablas del sistema; columnas de información de cliente unificadas en `system.processes` y `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Uso de sesiones para conexiones con el protocolo de compatibilidad MySQL. Acerca de nosotros [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) +- Uso de sesiones para conexiones con el protocolo de compatibilidad MySQL. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) - Apoyo más `ALTER` consulta `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([Sundyli](https://github.com/sundy-li)) - Apoyo `` sección en `clickhouse-local` archivo de configuración. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([propulsor](https://github.com/proller)) - Permitir ejecutar consulta con `remote` función de la tabla en `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([propulsor](https://github.com/proller)) @@ -1378,7 +1385,7 @@ machine_translated: true - Bloqueo con sin comprimir\_cache + JOIN durante la fusión (\# 5197) [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila Kutenin](https://github.com/danlark1)) -- Error de segmentación en una consulta de clickhouse-cliente a tablas del sistema. Acerca de nosotros +- Error de segmentación en una consulta de clickhouse-cliente a tablas del sistema. \#5066 [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) ([Ivan](https://github.com/abyss7)) - Pérdida de datos en carga pesada a través de KafkaEngine (\#4736) @@ -1537,7 +1544,7 @@ machine_translated: true - Repara el comportamiento indefinido en `median` y `quantile` función. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([Hcz](https://github.com/hczhcz)) - Corregir la detección de nivel de compresión cuando `network_compression_method` en minúsculas. Roto en v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([propulsor](https://github.com/proller)) - Se corrigió la ignorancia de `UTC` ajuste (soluciona el problema [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([propulsor](https://github.com/proller)) -- Fijar `histogram` comportamiento de la función con `Distributed` tabla. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([Olegkv](https://github.com/olegkv)) +- Fijar `histogram` comportamiento de la función con `Distributed` tabla. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) - Informe tsan fijo `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Se corrigió el informe de TSan sobre el cierre debido a la condición de carrera en el uso de registros del sistema. Se corrigió el uso potencial después de liberar al apagar cuando part\_log está habilitado. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fijar las piezas de nuevo control en `ReplicatedMergeTreeAlterThread` en caso de error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) @@ -1588,7 +1595,7 @@ machine_translated: true - Ahora el usuario pidió contraseña para el usuario `'default'` durante la instalación. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([propulsor](https://github.com/proller)) - Suprimir advertencia en `rdkafka` biblioteca. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Permitir la capacidad de construir sin ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([propulsor](https://github.com/proller)) -- Agregue una forma de iniciar una imagen de clickhouse-server desde un usuario personalizado. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Agregar una forma de iniciar clickhouse-servidor de la imagen de un usuario personalizado. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - Actualice el impulso de contrib a 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([propulsor](https://github.com/proller)) - Deshabilitar el uso de `mremap` cuando se compila con Thread Sanitizer. Sorprendentemente, TSan no intercepta `mremap` (aunque intercepta `mmap`, `munmap`) que conduce a falsos positivos. Informe TSan fijo en pruebas con estado. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Agregue comprobación de prueba usando el esquema de formato a través de la interfaz HTTP. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) @@ -1612,7 +1619,7 @@ machine_translated: true - Se corrigió el informe de TSan sobre el cierre debido a la condición de carrera en el uso de registros del sistema. Se corrigió el uso potencial después de liberar al apagar cuando part\_log está habilitado. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fijar las piezas de nuevo control en `ReplicatedMergeTreeAlterThread` en caso de error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Las operaciones aritméticas en estados de función de agregado intermedios no funcionaban para argumentos constantes (como los resultados de subconsulta). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Siempre retroceda los nombres de las columnas en los metadatos. De lo contrario, es imposible crear una tabla con una columna llamada `index` (el servidor no se reiniciará debido a `ATTACH` consulta en metadatos). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Siempre retroceda los nombres de las columnas en los metadatos. De lo contrario es imposible crear una tabla con una columna denominada `index` (el servidor no se reiniciará debido a `ATTACH` consulta en metadatos). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fix crash en `ALTER ... MODIFY ORDER BY` en `Distributed` tabla. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([Método de codificación de datos:](https://github.com/TCeason)) - Arreglar segfault en `JOIN ON` con habilitado `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Invierno Zhang](https://github.com/zhang2014)) - Corregir un error al agregar una fila extraña después de consumir un mensaje protobuf de Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) @@ -1629,7 +1636,7 @@ machine_translated: true - Función `cutQueryStringAndFragment()` y `queryStringAndFragment()` ahora funciona correctamente cuando `URL` contiene un fragmento y ninguna consulta. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) - Corregir un error raro al configurar `min_bytes_to_use_direct_io` es mayor que cero, lo que ocurre cuando el hilo tiene que buscar hacia atrás en el archivo de columna. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) - Corregir tipos de argumentos incorrectos para funciones agregadas con `LowCardinality` argumentos (soluciona el problema [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fijar la función `toISOWeek` resultado para el año 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Fijar la función `toISOWeek` el resultado para el año 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Fijar `DROP`, `TRUNCATE` y `OPTIMIZE` duplicación de consultas, cuando se ejecuta en `ON CLUSTER` para `ReplicatedMergeTree*` tablas de la familia. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) #### Mejora {#improvements-2} @@ -1735,7 +1742,7 @@ machine_translated: true #### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement-13} -- Agregue una forma de iniciar una imagen de clickhouse-server desde un usuario personalizado [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Agregar una forma de iniciar clickhouse-servidor de la imagen de un usuario personalizada [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) ### Lanzamiento de ClickHouse 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} @@ -2063,3 +2070,5 @@ Esta versión contiene exactamente el mismo conjunto de parches que 19.3.6. - Se agregó script de prueba para reproducir la degradación del rendimiento en `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Se corrigieron errores ortográficos en comentarios y literales de cadena bajo `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) - Se corrigieron errores tipográficos en los comentarios. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [Nivel de Cifrado WEP](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/es/changelog/index.md b/docs/es/whats_new/changelog/index.md similarity index 95% rename from docs/es/changelog/index.md rename to docs/es/whats_new/changelog/index.md index 96e1b6f687c..03c8ea00cbe 100644 --- a/docs/es/changelog/index.md +++ b/docs/es/whats_new/changelog/index.md @@ -1,23 +1,31 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' --- ## Lanzamiento de ClickHouse v20.3 {#clickhouse-release-v20-3} +### Todos los derechos reservados. {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### Corrección de errores {#bug-fix} + +- Esta versión también contiene todas las correcciones de errores de 20.1.8.41 +- Solución faltante `rows_before_limit_at_least` para consultas sobre http (con canalización de procesadores). Esto corrige [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + ### Todos los derechos reservados. {#clickhouse-release-v20-3-3-6-2020-03-17} -### Corrección de errores {#bug-fix} +#### Corrección de errores {#bug-fix-1} -- Añadir ajuste `use_compact_format_in_distributed_parts_names` que permite escribir archivos para `INSERT` consultas en `Distributed` mesa con un formato más compacto. Esto corrige [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). Hace que la versión 20.3 sea compatible con versiones anteriores de nuevo. +- Esta versión también contiene todas las correcciones de errores de 20.1.7.38 - Corregir un error en una replicación que no permite que la replicación funcione si el usuario ha ejecutado mutaciones en la versión anterior. Esto corrige [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). Hace que la versión 20.3 sea compatible con versiones anteriores de nuevo. -- Se corrigieron los nombres de funciones internas incorrectos para `sumKahan` y `sumWithOverflow`. Conduzco a una excepción mientras uso estas funciones en consultas remotas. [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). Este problema estaba en todas las versiones de ClickHouse. -- Se solucionó el problema: la zona horaria no se conservaba si escribía una expresión aritmética simple como `time + 1` (en contraste con una expresión como `time + INTERVAL 1 SECOND`). Esto corrige [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). Este problema estaba en todas las versiones de ClickHouse. -- Corregir posibles excepciones `Size of filter doesn't match size of column` y `Invalid number of rows in Chunk` en `MergeTreeRangeReader`. Podrían aparecer mientras se ejecuta `PREWHERE` en algunos casos. Fijar [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) -- Permitir `ALTER ON CLUSTER` de `Distributed` tablas con replicación interna. Esto corrige [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). Este problema estaba en todas las versiones de ClickHouse. +- Añadir ajuste `use_compact_format_in_distributed_parts_names` que permite escribir archivos para `INSERT` consultas en `Distributed` mesa con un formato más compacto. Esto corrige [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). Hace que la versión 20.3 sea compatible con versiones anteriores de nuevo. ### Todos los derechos reservados. {#clickhouse-release-v20-3-2-1-2020-03-12} -### Cambio incompatible hacia atrás {#backward-incompatible-change} +#### Cambio incompatible hacia atrás {#backward-incompatible-change} - Se ha solucionado el problema `file name too long` al enviar datos para `Distributed` para un gran número de réplicas. Se corrigió el problema de que las credenciales de réplica se expusieran en el registro del servidor. El formato del nombre del directorio en el disco se cambió a `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([Mikhail Korotov](https://github.com/millb)) Después de actualizar a la nueva versión, no podrá degradar sin intervención manual, porque la versión anterior del servidor no reconoce el nuevo formato de directorio. Si desea degradar, debe cambiar el nombre manualmente de los directorios correspondientes al formato anterior. Este cambio sólo es relevante si ha utilizado `INSERT`s a `Distributed` tabla. En la versión 20.3.3 introduciremos una configuración que le permitirá habilitar el nuevo formato gradualmente. - Se ha cambiado el formato de las entradas de registro de replicación para los comandos de mutación. Tienes que esperar a que las mutaciones antiguas se procesen antes de instalar la nueva versión. @@ -31,7 +39,7 @@ machine_translated: true - Requiere que el servidor se reinicie para aplicar los cambios en la configuración de registro. Esta es una solución temporal para evitar el error en el que el servidor inicia sesión en un archivo de registro eliminado (consulte [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Configuración `experimental_use_processors` está habilitado de forma predeterminada. Esta configuración permite el uso de la nueva canalización de consultas. Esto es refactorización interna y no esperamos cambios visibles. Si ves algún problema, configúralo en cero. [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Novedad {#new-feature} +#### Novedad {#new-feature} - Añadir `Avro` y `AvroConfluent` Formatos de entrada/salida [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([Andrés Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([Andrés Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Actualizaciones de subprocesos múltiples y sin bloqueo de claves caducadas en `cache` diccionarios (con permiso opcional para leer los antiguos). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) @@ -62,12 +70,12 @@ machine_translated: true - Añadir función `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([Hcz](https://github.com/hczhcz)) - Tablas de sistema añadidas `system.zeros` y `system.zeros_mt` así como funciones de cuento `zeros()` y `zeros_mt()`. Las tablas (y funciones de tabla) contienen una sola columna con nombre `zero` y tipo `UInt8`. Esta columna contiene ceros. Es necesario para fines de prueba como el método más rápido para generar muchas filas. Esto corrige [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -### Característica Experimental {#experimental-feature} +#### Característica Experimental {#experimental-feature} - Añadir nuevo formato compacto de piezas en `MergeTree`-family tablas en las que todas las columnas se almacenan en un archivo. Esto ayuda a aumentar el rendimiento de las inserciones pequeñas y frecuentes. El formato antiguo (un archivo por columna) ahora se llama ancho. El formato de almacenamiento de datos se controla mediante la configuración `min_bytes_for_wide_part` y `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([Anton Popov](https://github.com/CurtizJ)) - Soporte para almacenamiento S3 para `Log`, `TinyLog` y `StripeLog` tabla. [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([Pavel Kovalenko](https://github.com/Jokser)) -### Corrección de errores {#bug-fix-1} +#### Corrección de errores {#bug-fix-2} - Se corrigieron espacios en blanco inconsistentes en los mensajes de registro. [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Corregir un error en el que las matrices de tuplas sin nombre se aplanaban como estructuras anidadas en la creación de la tabla. [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) @@ -155,7 +163,7 @@ machine_translated: true - Solución alternativa agregada si el sistema operativo devuelve un resultado incorrecto para `timer_create` función. [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Corregido el error en el uso de `min_marks_for_seek` parámetro. Se corrigió el mensaje de error cuando no hay una clave de fragmentación en la tabla distribuida e intentamos omitir fragmentos no utilizados. [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) -### Mejora {#improvement} +#### Mejora {#improvement} - Implementar `ALTER MODIFY/DROP` encima de mutaciones para `ReplicatedMergeTree*` familia de motores. Ahora `ALTERS` bloquea solo en la etapa de actualización de metadatos, y no bloquea después de eso. [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([alesapin](https://github.com/alesapin)) - Agregue la capacidad de reescribir CROSS a INNER JOINs con `WHERE` sección que contiene nombres unqialified. [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) @@ -197,7 +205,7 @@ machine_translated: true - Agregue una métrica del sistema que rastree el número de conexiones de cliente utilizando el protocolo MySQL ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([Eugene Klimov](https://github.com/Slach)) - A partir de ahora, las respuestas HTTP tendrán `X-ClickHouse-Timezone` encabezado establecido en el mismo valor de zona horaria que `SELECT timezone()` informaría. [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) -### Mejora del rendimiento {#performance-improvement} +#### Mejora del rendimiento {#performance-improvement} - Mejorar el rendimiento del índice de análisis con IN [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([Anton Popov](https://github.com/CurtizJ)) - Código más simple y eficiente en funciones lógicas + limpiezas de código. Un seguimiento de [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([Alejandro Kazakov](https://github.com/Akazz)) @@ -210,7 +218,7 @@ machine_translated: true - Mejorar el rendimiento de `reinterpretAsFixedString` función. [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([alexey-milovidov](https://github.com/alexey-milovidov)) - No envíe bloques al cliente para `Null` formato en la tubería de procesadores. [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alejandro Kuzmenkov](https://github.com/akuzm)) -### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement} +#### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement} - El manejo de excepciones ahora funciona correctamente en el subsistema Windows para Linux. Consulte https://github.com/ClickHouse-Extras/libunwind/pull/3 Esto corrige [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) - Reemplazar `readline` con `replxx` para la edición de línea interactiva en `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([Ivan](https://github.com/abyss7)) @@ -241,7 +249,7 @@ machine_translated: true - Comprobación actualizada de consultas colgadas en el script de prueba de clickhouse [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alejandro Kazakov](https://github.com/Akazz)) - Se eliminaron algunos archivos inútiles del repositorio. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Tipo cambiado de perftests matemáticos de `once` a `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html) +- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) - Suprima algunas fallas de prueba bajo MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Aceleración “exception while insert” prueba. Esta prueba a menudo se agota en la compilación de depuración con cobertura. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Actualizar `libcxx` y `libcxxabi` dominar. En preparación para [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -261,9 +269,40 @@ machine_translated: true ## Lanzamiento de ClickHouse v20.1 {#clickhouse-release-v20-1} +### Todos los derechos reservados. {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### Corrección de errores {#bug-fix-3} + +- Arreglar posible permanente `Cannot schedule a task` error (debido a una excepción no controlada en `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). Esto corrige [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- Corregir el consumo excesivo de memoria en `ALTER` consultas (mutaciones). Esto corrige [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) y [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([alesapin](https://github.com/alesapin)) +- Corregir un error en las citas inversas en diccionarios externos DDL. Esto corrige [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) + +### Todos los derechos reservados. {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### Corrección de errores {#bug-fix-4} + +- Se corrigieron los nombres de funciones internas incorrectos para `sumKahan` y `sumWithOverflow`. Conduzco a una excepción mientras uso estas funciones en consultas remotas. [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). Este problema estaba en todas las versiones de ClickHouse. +- Permitir `ALTER ON CLUSTER` de `Distributed` tablas con replicación interna. Esto corrige [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). Este problema estaba en todas las versiones de ClickHouse. +- Corregir posibles excepciones `Size of filter doesn't match size of column` y `Invalid number of rows in Chunk` en `MergeTreeRangeReader`. Podrían aparecer mientras se ejecuta `PREWHERE` en algunos casos. Fijar [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +- Se solucionó el problema: la zona horaria no se conservaba si escribía una expresión aritmética simple como `time + 1` (en contraste con una expresión como `time + INTERVAL 1 SECOND`). Esto corrige [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). Este problema estaba en todas las versiones de ClickHouse. +- Ahora no es posible crear o agregar columnas con alias cíclicos simples como `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +- Se solucionó el problema cuando el relleno al final del valor codificado en base64 puede estar mal formado. Actualización de la biblioteca base64. Esto corrige [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), cerca [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Corregir la carrera de datos en la destrucción de `Poco::HTTPServer`. Podría suceder cuando el servidor se inicia y se apaga inmediatamente. [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +- Corregir un posible bloqueo / número incorrecto de filas en `LIMIT n WITH TIES` cuando hay muchas filas iguales a n'th row. [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([Tavplubix](https://github.com/tavplubix)) +- Corregir posibles sumas de comprobación no coincidentes con TTL de columna. [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +- Corregir el bloqueo cuando un usuario intenta `ALTER MODIFY SETTING` para viejos `MergeTree` familia de motores de mesa. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +- Ahora intentaremos finalizar las mutaciones con más frecuencia. [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +- Corregir la incompatibilidad del protocolo de replicación introducida en [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +- Fix not(has()) para el índice bloom\_filter de los tipos de matriz. [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([chimbab](https://github.com/achimbab)) +- Se corrigió el comportamiento de `match` y `extract` funciona cuando haystack tiene cero bytes. El comportamiento era incorrecto cuando el pajar era constante. Esto corrige [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement-1} + +- El manejo de excepciones ahora funciona correctamente en el subsistema Windows para Linux. Consulte https://github.com/ClickHouse-Extras/libunwind/pull/3 Esto corrige [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + ### Todos los derechos reservados. {#clickhouse-release-v20-1-6-30-2020-03-05} -#### Corrección de errores {#bug-fix-2} +#### Corrección de errores {#bug-fix-5} - Corregir la incompatibilidad de datos cuando se comprime con `T64` códec. [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) @@ -323,7 +362,7 @@ machine_translated: true ### Todos los derechos reservados. {#clickhouse-release-v20-1-2-4-2020-01-22} -### Cambio incompatible hacia atrás {#backward-incompatible-change-1} +#### Cambio incompatible hacia atrás {#backward-incompatible-change-1} - Haga el ajuste `merge_tree_uniform_read_distribution` obsoleto. El servidor aún reconoce esta configuración, pero no tiene ningún efecto. [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Tipo de retorno cambiado de la función `greatCircleDistance` a `Float32` porque ahora el resultado del cálculo es `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -333,7 +372,7 @@ machine_translated: true - Añadir nuevo `ANY JOIN` lógica para `StorageJoin` consistente con `JOIN` operación. Para actualizar sin cambios en el comportamiento, necesita agregar `SETTINGS any_join_distinct_right_table_keys = 1` a Engine Unir metadatos de tablas o volver a crear estas tablas después de la actualización. [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) - Requiere que el servidor se reinicie para aplicar los cambios en la configuración de registro. Esta es una solución temporal para evitar el error en el que el servidor inicia sesión en un archivo de registro eliminado (consulte [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alejandro Kuzmenkov](https://github.com/akuzm)) -### Novedad {#new-feature-2} +#### Novedad {#new-feature-2} - Se agregó información sobre las rutas de acceso de piezas a `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) - Añadir capacidad de ejecutar `SYSTEM RELOAD DICTIONARY` consulta en `ON CLUSTER` modo. [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([Guillaume Tassery](https://github.com/YiuRULE)) @@ -367,7 +406,7 @@ machine_translated: true - Añadido soporte para brotli (`br`) compresión en almacenes relacionados con archivos y funciones de tabla. Esto corrige [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Añadir `groupBit*` funciones para el `SimpleAggregationFunction` tipo. [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([Guillaume Tassery](https://github.com/YiuRULE)) -### Corrección de errores {#bug-fix-3} +#### Corrección de errores {#bug-fix-6} - Corregir el cambio de nombre de las tablas con `Distributed` motor. Soluciona el problema [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([Tavplubix](https://github.com/tavplubix)) - Ahora diccionarios de apoyo `EXPRESSION` para atributos en cadena arbitraria en dialecto SQL no ClickHouse. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) @@ -469,7 +508,7 @@ machine_translated: true - Repara la recarga del diccionario si tiene `invalidate_query`, que detuvo las actualizaciones y alguna excepción en los intentos de actualización anteriores. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) - Corregido el error en la función `arrayReduce` que puede conducir a “double free” y error en el combinador de funciones agregadas `Resample` que puede conducir a la pérdida de memoria. Función agregada agregada `aggThrow`. Esta función se puede utilizar para fines de prueba. [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Mejora {#improvement-1} +#### Mejora {#improvement-1} - Registro mejorado cuando se trabaja con `S3` motor de mesa. [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) - Mensaje de ayuda impreso cuando no se pasan argumentos al llamar `clickhouse-local`. Esto corrige [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([Andrey Nagorny](https://github.com/Melancholic)) @@ -518,7 +557,7 @@ machine_translated: true - Configuración `mark_cache_min_lifetime` ahora está obsoleto y no hace nada. En versiones anteriores, la caché de marcas puede crecer en la memoria más grande que `mark_cache_size` para acomodar datos dentro de `mark_cache_min_lifetime` segundo. Eso provocó confusión y un mayor uso de memoria de lo esperado, lo que es especialmente malo en los sistemas con restricciones de memoria. Si observa una degradación del rendimiento después de instalar esta versión, debe aumentar la `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Preparación para usar `tid` doquier. Esto es necesario para [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([alexey-milovidov](https://github.com/alexey-milovidov)) -### Mejora del rendimiento {#performance-improvement-1} +#### Mejora del rendimiento {#performance-improvement-1} - Optimizaciones de rendimiento en la canalización de procesadores. [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - Actualizaciones sin bloqueo de claves caducadas en diccionarios de caché (con permiso para leer las antiguas). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) @@ -544,13 +583,13 @@ machine_translated: true - Formatos de datos de análisis paralelo [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - Habilitar analizador optimizado de `Values` con expresiones por defecto (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([Tavplubix](https://github.com/tavplubix)) -### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement-1} +#### Mejora de la construcción/prueba/empaquetado {#buildtestingpackaging-improvement-2} - Construir correcciones para `ARM` y en modo mínimo. [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([propulsor](https://github.com/proller)) - Añadir archivo de cobertura al ras para `clickhouse-server` cuando std::atexit no se llama. También mejoró ligeramente el registro en pruebas sin estado con cobertura. [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([alesapin](https://github.com/alesapin)) - Actualizar la biblioteca LLVM en contrib. Evite usar LLVM de paquetes de sistema operativo. [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Hacer incluido `curl` construir totalmente tranquilo. [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([Pavel Kovalenko](https://github.com/Jokser)) -- Arreglar algunos `MemorySanitizer` advertencia. [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alejandro Kuzmenkov](https://github.com/akuzm)) +- Hacer incluido `curl` construir completamente tranquilo. [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([Pavel Kovalenko](https://github.com/Jokser)) +- Arreglar algunos `MemorySanitizer` las advertencias. [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Utilizar `add_warning` y `no_warning` macros en `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([Ivan](https://github.com/abyss7)) - Agregue soporte del objeto Compatible Minio S3 (https://min.io/) para mejores pruebas de integración. [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([Pavel Kovalenko](https://github.com/Jokser)) - Importar `libc` encabezados a contrib. Permite hacer que las compilaciones sean más consistentes en varios sistemas (solo para `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([alexey-milovidov](https://github.com/alexey-milovidov)) @@ -615,13 +654,15 @@ machine_translated: true - Actualizar contrib/protobuf. [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V. Kornilov](https://github.com/matwey)) - En preparación para cambiar a c ++ 20 como una celebración de año nuevo. “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([Amos pájaro](https://github.com/amosbird)) -### Característica Experimental {#experimental-feature-1} +#### Característica Experimental {#experimental-feature-1} - Añadido ajuste experimental `min_bytes_to_use_mmap_io`. Permite leer archivos grandes sin copiar datos del kernel al espacio de usuario. La configuración está deshabilitada de forma predeterminada. El umbral recomendado es de aproximadamente 64 MB, porque mmap / munmap es lento. [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Cuotas reanudadas como parte del sistema de control de acceso. Añadido nueva tabla `system.quotas`, nuevas funciones `currentQuota`, `currentQuotaKey`, nueva sintaxis SQL `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([Vitaly Baranov](https://github.com/vitlibar)) - Permitir omitir configuraciones desconocidas con advertencias en lugar de lanzar excepciones. [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([Vitaly Baranov](https://github.com/vitlibar)) - Se reanudaron las políticas de fila como parte del sistema de control de acceso. Añadido nueva tabla `system.row_policies`, nueva función `currentRowPolicies()`, nueva sintaxis SQL `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([Vitaly Baranov](https://github.com/vitlibar)) -### Corrección de seguridad {#security-fix} +#### Corrección de seguridad {#security-fix} - Se corrigió la posibilidad de leer la estructura de directorios en tablas con `File` motor de mesa. Esto corrige [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## [Registro de cambios para 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/es/whats_new/index.md b/docs/es/whats_new/index.md new file mode 100644 index 00000000000..91486dbd9e2 --- /dev/null +++ b/docs/es/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_folder_title: What's New +toc_priority: 72 +--- + + diff --git a/docs/es/whats_new/roadmap.md b/docs/es/whats_new/roadmap.md new file mode 100644 index 00000000000..e0a027fc3b7 --- /dev/null +++ b/docs/es/whats_new/roadmap.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 74 +toc_title: Hoja de ruta +--- + +# Hoja de ruta {#roadmap} + +## Q1 2020 {#q1-2020} + +- Control de acceso basado en roles + +## Q2 2020 {#q2-2020} + +- Integración con servicios de autenticación externos +- Grupos de recursos para una distribución más precisa de la capacidad del clúster entre los usuarios + +{## [Artículo Original](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/es/security_changelog.md b/docs/es/whats_new/security_changelog.md similarity index 68% rename from docs/es/security_changelog.md rename to docs/es/whats_new/security_changelog.md index 94a0fd83f73..6a830efc3b6 100644 --- a/docs/es/security_changelog.md +++ b/docs/es/whats_new/security_changelog.md @@ -1,22 +1,25 @@ --- machine_translated: true +machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa +toc_priority: 76 +toc_title: Seguridad Changelog --- ## Corregido en la versión de ClickHouse 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} -### ¿Qué puedes encontrar en Neodigit {#cve-2019-15024} +### CVE-2019-15024 {#cve-2019-15024} -Un atacante que tenga acceso de escritura a ZooKeeper y que pueda ejecutar un servidor personalizado disponible desde la red donde se ejecuta ClickHouse, puede crear un servidor malicioso personalizado que actuará como una réplica de ClickHouse y lo registrará en ZooKeeper. Cuando otra réplica recuperará la parte de datos de la réplica maliciosa, puede forzar a clickhouse-server a escribir en una ruta arbitraria en el sistema de archivos. +Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Créditos: Eldar Zaitov del equipo de seguridad de la información de Yandex -### ¿Qué puedes encontrar en Neodigit {#cve-2019-16535} +### CVE-2019-16535 {#cve-2019-16535} -Аn La lectura OOB, la escritura OOB y el desbordamiento de enteros en los algoritmos de descompresión se pueden usar para lograr RCE o DoS a través del protocolo nativo. +Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. Créditos: Eldar Zaitov del equipo de seguridad de la información de Yandex -### ¿Qué puedes encontrar en Neodigit {#cve-2019-16536} +### CVE-2019-16536 {#cve-2019-16536} Un cliente autenticado malintencionado puede desencadenar el desbordamiento de pila que conduce a DoS. @@ -24,7 +27,7 @@ Créditos: Eldar Zaitov del equipo de seguridad de la información de Yandex ## Corregido en la versión de ClickHouse 19.13.6.1, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} -### ¿Qué puedes encontrar en Neodigit {#cve-2019-18657} +### CVE-2019-18657 {#cve-2019-18657} Función de la tabla `url` la vulnerabilidad permitió al atacante inyectar encabezados HTTP arbitrarios en la solicitud. @@ -32,7 +35,7 @@ Crédito: [Nikita Tikhomirov](https://github.com/NSTikhomirov) ## Corregido en la versión de ClickHouse 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} -### ¿Qué puedes encontrar en Neodigit {#cve-2018-14672} +### CVE-2018-14672 {#cve-2018-14672} Las funciones para cargar modelos CatBoost permitieron el recorrido de ruta y la lectura de archivos arbitrarios a través de mensajes de error. @@ -40,7 +43,7 @@ Créditos: Andrey Krasichkov del equipo de seguridad de la información de Yande ## Corregido en la versión de ClickHouse 18.10.3, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} -### ¿Qué puedes encontrar en Neodigit {#cve-2018-14671} +### CVE-2018-14671 {#cve-2018-14671} unixODBC permitía cargar objetos compartidos arbitrarios desde el sistema de archivos, lo que provocó una vulnerabilidad de ejecución remota de código. @@ -48,7 +51,7 @@ Créditos: Andrey Krasichkov y Evgeny Sidorov del equipo de seguridad de la info ## Corregido en la versión de ClickHouse 1.1.54388, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} -### ¿Qué puedes encontrar en Neodigit {#cve-2018-14668} +### CVE-2018-14668 {#cve-2018-14668} “remote” función de tabla permitió símbolos arbitrarios en “user”, “password” y “default\_database” campos que llevaron a ataques de falsificación de solicitudes de protocolo cruzado. @@ -56,7 +59,7 @@ Créditos: Andrey Krasichkov del equipo de seguridad de la información de Yande ## Corregido en la versión de ClickHouse 1.1.54390, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} -### ¿Qué puedes encontrar en Neodigit {#cve-2018-14669} +### CVE-2018-14669 {#cve-2018-14669} ClickHouse cliente MySQL tenía “LOAD DATA LOCAL INFILE” funcionalidad habilitada que permitió a una base de datos MySQL maliciosa leer archivos arbitrarios desde el servidor ClickHouse conectado. @@ -64,10 +67,10 @@ Créditos: Andrey Krasichkov y Evgeny Sidorov del equipo de seguridad de la info ## Corregido en la versión de ClickHouse 1.1.54131, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} -### ¿Qué puedes encontrar en Neodigit {#cve-2018-14670} +### CVE-2018-14670 {#cve-2018-14670} Una configuración incorrecta en el paquete deb podría conducir al uso no autorizado de la base de datos. Créditos: Centro Nacional de Seguridad Cibernética del Reino Unido (NCSC) -[Artículo Original](https://clickhouse.tech/docs/es/security_changelog/) +{## [Artículo Original](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/fa/changelog/2017.md b/docs/fa/changelog/2017.md deleted file mode 100644 index 95156754100..00000000000 --- a/docs/fa/changelog/2017.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -en_copy: true ---- - -### ClickHouse release 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} - -This release contains bug fixes for the previous release 1.1.54318: - -- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don’t see these messages in logs. - -### ClickHouse release 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} - -This release contains bug fixes for the previous release 1.1.54310: - -- Fixed incorrect row deletions during merges in the SummingMergeTree engine -- Fixed a memory leak in unreplicated MergeTree engines -- Fixed performance degradation with frequent inserts in MergeTree engines -- Fixed an issue that was causing the replication queue to stop running -- Fixed rotation and archiving of server logs - -### ClickHouse release 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} - -#### New features: {#new-features} - -- Custom partitioning key for the MergeTree family of table engines. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. -- Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. -- Added support for time zones with non-integer offsets from UTC. -- Added support for arithmetic operations with time intervals. -- The range of values for the Date and DateTime types is extended to the year 2105. -- Added the `CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view). -- Added the `ATTACH TABLE` query without arguments. -- The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly. -- Max size of the IP trie dictionary is increased to 128M entries. -- Added the getSizeOfEnumType function. -- Added the sumWithOverflow aggregate function. -- Added support for the Cap’n Proto input format. -- You can now customize compression level when using the zstd algorithm. - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Creation of temporary tables with an engine other than Memory is not allowed. -- Explicit creation of tables with the View or MaterializedView engine is not allowed. -- During table creation, a new check verifies that the sampling key expression is included in the primary key. - -#### Bug fixes: {#bug-fixes} - -- Fixed hangups when synchronously inserting into a Distributed table. -- Fixed nonatomic adding and removing of parts in Replicated tables. -- Data inserted into a materialized view is not subjected to unnecessary deduplication. -- Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. -- Users don’t need access permissions to the `default` database to create temporary tables anymore. -- Fixed crashing when specifying the Array type without arguments. -- Fixed hangups when the disk volume containing server logs is full. -- Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. - -#### Build improvements: {#build-improvements} - -- Several third-party libraries (notably Poco) were updated and converted to git submodules. - -### ClickHouse release 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} - -#### New features: {#new-features-1} - -- TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ). - -#### Bug fixes: {#bug-fixes-1} - -- `ALTER` for replicated tables now tries to start running as soon as possible. -- Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.` -- Fixed crashes of `clickhouse-client` when pressing `Page Down` -- Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL` -- `FREEZE PARTITION` always works atomically now. -- Empty POST requests now return a response with code 411. -- Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).` -- Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables. -- Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b` -- Users are updated correctly with invalid `users.xml` -- Correct handling when an executable dictionary returns a non-zero response code. - -### ClickHouse release 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} - -#### New features: {#new-features-2} - -- Added the `pointInPolygon` function for working with coordinates on a coordinate plane. -- Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. -- Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. -- The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting `compile = 1` , which is not used by default). -- Reduced the time needed for dynamic compilation of queries. - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency. -- Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. -- Removed excessive logging when restoring replicas. -- Fixed an error in the UNION ALL implementation. -- Fixed an error in the concat function that occurred if the first column in a block has the Array type. -- Progress is now displayed correctly in the system.merges table. - -### ClickHouse release 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} - -#### New features: {#new-features-3} - -- `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. -- Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. -- Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. -- Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`). -- External dictionaries can be loaded from MySQL by specifying a socket in the filesystem. -- External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters). -- Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user. -- Support for `DROP TABLE` for temporary tables. -- Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats. -- Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes). -- FIFO locking is used during ALTER: an ALTER query isn’t blocked indefinitely for continuously running queries. -- Option to set `umask` in the config file. -- Improved performance for queries with `DISTINCT` . - -#### Bug fixes: {#bug-fixes-3} - -- Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn’t get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. -- Fixed randomization when choosing hosts for the connection to ZooKeeper. -- Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. -- Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running `ALTER MODIFY` on an element in a `Nested` structure. -- Fixed an error that could cause SELECT queries to “hang”. -- Improvements to distributed DDL queries. -- Fixed the query `CREATE TABLE ... AS `. -- Resolved the deadlock in the `ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables. -- Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats. -- Resolved the appearance of zombie processes when using a dictionary with an `executable` source. -- Fixed segfault for the HEAD query. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} - -- You can use `pbuilder` to build ClickHouse. -- You can use `libc++` instead of `libstdc++` for builds on Linux. -- Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don’t need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ``` ``107374182400 ``` and restart the server. - -### ClickHouse release 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} - -- This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper. - -### ClickHouse release 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} - -This release contains bug fixes for the previous release 1.1.54276: - -- Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table. -- Fixed parsing when inserting in RowBinary format if input data starts with’;’. -- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). - -### Clickhouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} - -#### New features: {#new-features-4} - -- Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` -- INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert\_distributed\_sync=1. -- Added the UUID data type for working with 16-byte identifiers. -- Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau. -- Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers. -- You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries. -- Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` -- Added the max\_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. - -#### Main changes: {#main-changes} - -- Security improvements: all server files are created with 0640 permissions (can be changed via config parameter). -- Improved error messages for queries with invalid syntax. -- Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data. -- Significantly increased the performance of data merges for the ReplacingMergeTree engine. -- Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed\_directory\_monitor\_batch\_inserts=1. - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. - -#### Complete list of changes: {#complete-list-of-changes} - -- Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. -- Optimized stream allocation when reading from a Distributed table. -- Settings can be configured in readonly mode if the value doesn’t change. -- Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred\_block\_size\_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. -- Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` -- Added new settings for MergeTree engines (the merge\_tree section in config.xml): - - replicated\_deduplication\_window\_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables. - - cleanup\_delay\_period sets how often to start cleanup to remove outdated data. - - replicated\_can\_become\_leader can prevent a replica from becoming the leader (and assigning merges). -- Accelerated cleanup to remove outdated data from ZooKeeper. -- Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed\_ddl\_task\_timeout, which limits the time to wait for a response from the servers in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. -- Improved display of stack traces in the server logs. -- Added the “none” value for the compression method. -- You can use multiple dictionaries\_config sections in config.xml. -- It is possible to connect to MySQL through a socket in the file system. -- The system.parts table has a new column with information about the size of marks, in bytes. - -#### Bug fixes: {#bug-fixes-4} - -- Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field. -- Fixed a rare race condition in ReplicatedMergeTree when checking data parts. -- Fixed possible freezing on “leader election” when starting a server. -- The max\_replica\_delay\_for\_distributed\_queries setting was ignored when using a local replica of the data source. This has been fixed. -- Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column. -- Fixed an exception in the multiIf function when using empty arrays or strings. -- Fixed excessive memory allocations when deserializing Native format. -- Fixed incorrect auto-update of Trie dictionaries. -- Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE. -- Fixed a crash of GROUP BY when using distributed\_aggregation\_memory\_efficient=1. -- Now you can specify the database.table in the right side of IN and JOIN. -- Too many threads were used for parallel aggregation. This has been fixed. -- Fixed how the “if” function works with FixedString arguments. -- SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed. -- Running `CREATE VIEW IF EXISTS no longer causes crashes.` -- Fixed incorrect behavior when input\_format\_skip\_unknown\_fields=1 is set and there are negative numbers. -- Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary. -- Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables. -- Fixed an incorrect interpretation of a SELECT query from Dictionary tables. -- Fixed the “Cannot mremap” error when using arrays in IN and JOIN clauses with more than 2 billion elements. -- Fixed the failover for dictionaries with MySQL as the source. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} - -- Builds can be assembled in Arcadia. -- You can use gcc 7 to compile ClickHouse. -- Parallel builds using ccache+distcc are faster now. - -### ClickHouse release 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} - -#### New features: {#new-features-5} - -- Distributed DDL (for example, `CREATE TABLE ON CLUSTER`) -- The replicated query `ALTER TABLE CLEAR COLUMN IN PARTITION.` -- The engine for Dictionary tables (access to dictionary data in the form of a table). -- Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries). -- You can check for updates to the dictionary by sending a request to the source. -- Qualified column names -- Quoting identifiers using double quotation marks. -- Sessions in the HTTP interface. -- The OPTIMIZE query for a Replicated table can can run not only on the leader. - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- Removed SET GLOBAL. - -#### Minor changes: {#minor-changes} - -- Now after an alert is triggered, the log prints the full stack trace. -- Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives). - -#### Bug fixes: {#bug-fixes-5} - -- Fixed a bad connection “sticking” when inserting into a Distributed table. -- GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. -- The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed. -- Changes in how an executable source of cached external dictionaries works. -- Fixed the comparison of strings containing null characters. -- Fixed the comparison of Float32 primary key fields with constants. -- Previously, an incorrect estimate of the size of a field could lead to overly large allocations. -- Fixed a crash when querying a Nullable column added to a table using ALTER. -- Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT. -- Fixed an ORDER BY subquery consisting of only constant values. -- Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE. -- Aliases for scalar subqueries with empty results are no longer lost. -- Now a query that used compilation does not fail with an error if the .so file gets damaged. diff --git a/docs/fa/changelog/2018.md b/docs/fa/changelog/2018.md deleted file mode 100644 index 49bef18cbf3..00000000000 --- a/docs/fa/changelog/2018.md +++ /dev/null @@ -1,1060 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release 18.16 {#clickhouse-release-18-16} - -### ClickHouse release 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} - -#### Bug fixes: {#bug-fixes} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- JIT compilation of aggregate functions now works with LowCardinality columns. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) - -#### Improvements: {#improvements} - -- Added the `low_cardinality_allow_in_native_format` setting (enabled by default). When disabled, LowCardinality columns will be converted to ordinary columns for SELECT queries and ordinary columns will be expected for INSERT queries. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) - -#### Build improvements: {#build-improvements} - -- Fixes for builds on macOS and ARM. - -### ClickHouse release 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} - -#### New features: {#new-features} - -- `DEFAULT` expressions are evaluated for missing fields when loading data in semi-structured input formats (`JSONEachRow`, `TSKV`). The feature is enabled with the `insert_sample_with_metadata` setting. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) -- The `ALTER TABLE` query now has the `MODIFY ORDER BY` action for changing the sorting key when adding or removing a table column. This is useful for tables in the `MergeTree` family that perform additional tasks when merging based on this sorting key, such as `SummingMergeTree`, `AggregatingMergeTree`, and so on. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) -- For tables in the `MergeTree` family, now you can specify a different sorting key (`ORDER BY`) and index (`PRIMARY KEY`). The sorting key can be longer than the index. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) -- Added the `hdfs` table function and the `HDFS` table engine for importing and exporting data to HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) -- Added functions for working with base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) -- Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) -- Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) -- Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -- Added `dictGet` and `dictGetOrDefault` functions that don’t require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) -- Now you can specify comments for a column in the table description and change it using `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) -- Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `joinGet` function that allows you to use a `Join` type table like a dictionary. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `partition_key`, `sorting_key`, `primary_key`, and `sampling_key` columns to the `system.tables` table in order to provide information about table keys. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, and `is_in_sampling_key` columns to the `system.columns` table. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `min_time` and `max_time` columns to the `system.parts` table. These columns are populated when the partitioning key is an expression consisting of `DateTime` columns. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) - -#### Bug fixes: {#bug-fixes-1} - -- Fixes and performance improvements for the `LowCardinality` data type. `GROUP BY` using `LowCardinality(Nullable(...))`. Getting the values of `extremes`. Processing high-order functions. `LEFT ARRAY JOIN`. Distributed `GROUP BY`. Functions that return `Array`. Execution of `ORDER BY`. Writing to `Distributed` tables (nicelulu). Backward compatibility for `INSERT` queries from old clients that implement the `Native` protocol. Support for `LowCardinality` for `JOIN`. Improved performance when working in a single stream. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) -- Fixed how the `select_sequential_consistency` option works. Previously, when this setting was enabled, an incomplete result was sometimes returned after beginning to write to a new partition. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries and `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Databases are correctly specified for subqueries inside a VIEW. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a bug in `PREWHERE` with `FINAL` for `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) -- Now you can use `KILL QUERY` to cancel queries that have not started yet because they are waiting for the table to be locked. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) -- Corrected date and time calculations if the clocks were moved back at midnight (this happens in Iran, and happened in Moscow from 1981 to 1983). Previously, this led to the time being reset a day earlier than necessary, and also caused incorrect formatting of the date and time in text format. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) -- Fixed bugs in some cases of `VIEW` and subqueries that omit the database. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a race condition when simultaneously reading from a `MATERIALIZED VIEW` and deleting a `MATERIALIZED VIEW` due to not locking the internal `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) -- Fixed the error `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) -- Fixed query processing when the `compile_expressions` option is enabled (it’s enabled by default). Nondeterministic constant expressions like the `now` function are no longer unfolded. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) -- Fixed a crash when specifying a non-constant scale argument in `toDecimal32/64/128` functions. -- Fixed an error when trying to insert an array with `NULL` elements in the `Values` format into a column of type `Array` without `Nullable` (if `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) -- Fixed continuous error logging in `DDLWorker` if ZooKeeper is not available. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) -- Fixed the return type for `quantile*` functions from `Date` and `DateTime` types of arguments. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) -- Fixed the `WITH` clause if it specifies a simple alias without expressions. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) -- Fixed processing of queries with named sub-queries and qualified column names when `enable_optimize_predicate_expression` is enabled. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) -- Fixed the error `Attempt to attach to nullptr thread group` when working with materialized views. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) -- Fixed a crash when passing certain incorrect arguments to the `arrayReverse` function. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Fixed the buffer overflow in the `extractURLParameter` function. Improved performance. Added correct processing of strings containing zero bytes. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) -- Fixed buffer overflow in the `lowerUTF8` and `upperUTF8` functions. Removed the ability to execute these functions over `FixedString` type arguments. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) -- Fixed a rare race condition when deleting `MergeTree` tables. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) -- Fixed a race condition when reading from `Buffer` tables and simultaneously performing `ALTER` or `DROP` on the target tables. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Improvements: {#improvements-1} - -- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn’t have write access for the `clickhouse` user, which improves security. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) -- The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) -- Accelerated server start when there is a very large number of tables. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) -- Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) -- If the query syntax is invalid, the `400 Bad Request` code is returned in the `HTTP` interface (500 was returned previously). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) -- The `join_default_strictness` option is set to `ALL` by default for compatibility. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) -- Removed logging to `stderr` from the `re2` library for invalid or complex regular expressions. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) -- Added for the `Kafka` table engine: checks for subscriptions before beginning to read from Kafka; the kafka\_max\_block\_size setting for the table. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) -- The `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, and `murmurHash3_64` functions now work for any number of arguments and for arguments in the form of tuples. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) -- The `arrayReverse` function now works with any types of arrays. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Added an optional parameter: the slot size for the `timeSlots` function. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) -- For `FULL` and `RIGHT JOIN`, the `max_block_size` setting is used for a stream of non-joined data from the right table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3699) -- Added the `--secure` command line parameter in `clickhouse-benchmark` and `clickhouse-performance-test` to enable TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) -- Type conversion when the structure of a `Buffer` type table does not match the structure of the destination table. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) -- Added the `tcp_keep_alive_timeout` option to enable keep-alive packets after inactivity for the specified time interval. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) -- Removed unnecessary quoting of values for the partition key in the `system.parts` table if it consists of a single column. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) -- The modulo function works for `Date` and `DateTime` data types. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) -- Added synonyms for the `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, and `MID` functions. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Some function names are case-insensitive for compatibility with the SQL standard. Added syntactic sugar `SUBSTRING(expr FROM start FOR length)` for compatibility with SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) -- Added the ability to `mlock` memory pages corresponding to `clickhouse-server` executable code to prevent it from being forced out of memory. This feature is disabled by default. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) -- Improved performance when reading from `O_DIRECT` (with the `min_bytes_to_use_direct_io` option enabled). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) -- Improved performance of the `dictGet...OrDefault` function for a constant key argument and a non-constant default argument. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3563) -- The `firstSignificantSubdomain` function now processes the domains `gov`, `mil`, and `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Improved performance. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) -- Ability to specify custom environment variables for starting `clickhouse-server` using the `SYS-V init.d` script by defining `CLICKHOUSE_PROGRAM_ENV` in `/etc/default/clickhouse`. - [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) -- Correct return code for the clickhouse-server init script. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) -- The `system.metrics` table now has the `VersionInteger` metric, and `system.build_options` has the added line `VERSION_INTEGER`, which contains the numeric form of the ClickHouse version, such as `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) -- Removed the ability to compare the `Date` type with a number to avoid potential errors like `date = 2018-12-17`, where quotes around the date are omitted by mistake. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) -- Fixed the behavior of stateful functions like `rowNumberInAllBlocks`. They previously output a result that was one number larger due to starting during query analysis. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3729) -- If the `force_restore_data` file can’t be deleted, an error message is displayed. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3794) - -#### Build improvements: {#build-improvements-1} - -- Updated the `jemalloc` library, which fixes a potential memory leak. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3557) -- Profiling with `jemalloc` is enabled by default in order to debug builds. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) -- Added the ability to run integration tests when only `Docker` is installed on the system. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) -- Added the fuzz expression test in SELECT queries. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) -- Added a stress test for commits, which performs functional tests in parallel and in random order to detect more race conditions. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) -- Improved the method for starting clickhouse-server in a Docker image. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) -- For a Docker image, added support for initializing databases using files in the `/docker-entrypoint-initdb.d` directory. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) -- Fixes for builds on ARM. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Removed the ability to compare the `Date` type with a number. Instead of `toDate('2018-12-18') = 17883`, you must use explicit type conversion `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) - -## ClickHouse release 18.14 {#clickhouse-release-18-14} - -### ClickHouse release 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Build improvements: {#build-improvements-2} - -- Fixes for builds on ARM. - -### ClickHouse release 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} - -#### Bug fixes: {#bug-fixes-3} - -- Fixed error in `dictGet...` function for dictionaries of type `range`, if one of the arguments is constant and other is not. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) -- Fixed error that caused messages `netlink: '...': attribute type 1 has an invalid length` to be printed in Linux kernel log, that was happening only on fresh enough versions of Linux kernel. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) -- Fixed segfault in function `empty` for argument of `FixedString` type. [Daniel, Dao Quang Minh](https://github.com/ClickHouse/ClickHouse/pull/3703) -- Fixed excessive memory allocation when using large value of `max_query_size` setting (a memory chunk of `max_query_size` bytes was preallocated at once). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) - -#### Build changes: {#build-changes} - -- Fixed build with LLVM/Clang libraries of version 7 from the OS packages (these libraries are used for runtime query compilation). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} - -#### Bug fixes: {#bug-fixes-4} - -- Fixed cases when the ODBC bridge process did not terminate with the main server process. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) -- Fixed synchronous insertion into the `Distributed` table with a columns list that differs from the column list of the remote table. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) -- Fixed a rare race condition that can lead to a crash when dropping a MergeTree table. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed a query deadlock in case when query thread creation fails with the `Resource temporarily unavailable` error. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed parsing of the `ENGINE` clause when the `CREATE AS table` syntax was used and the `ENGINE` clause was specified before the `AS table` (the error resulted in ignoring the specified engine). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) - -### ClickHouse release 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} - -#### Bug fixes: {#bug-fixes-5} - -- The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to “Memory limit exceeded” errors. The issue appeared in version 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) - -### ClickHouse release 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} - -#### Bug fixes: {#bug-fixes-6} - -- Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) - -#### Build changes: {#build-changes-1} - -- Fixed problems (llvm-7 from system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} - -#### Bug fixes: {#bug-fixes-7} - -- Fixed the `Block structure mismatch in MergingSorted stream` error. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) -- Fixed `ON CLUSTER` queries in case when secure connections were turned on in the cluster config (the `` flag). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) -- Fixed an error in queries that used `SAMPLE`, `PREWHERE` and alias columns. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) -- Fixed a rare `unknown compression method` error when the `min_bytes_to_use_direct_io` setting was enabled. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) - -#### Performance improvements: {#performance-improvements} - -- Fixed performance regression of queries with `GROUP BY` of columns of UInt16 or Date type when executing on AMD EPYC processors. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) -- Fixed performance regression of queries that process long strings. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) - -#### Build improvements: {#build-improvements-3} - -- Improvements for simplifying the Arcadia build. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) - -### ClickHouse release 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} - -#### Bug fixes: {#bug-fixes-8} - -- Fixed a crash on joining two unnamed subqueries. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) -- Fixed generating incorrect queries (with an empty `WHERE` clause) when querying external databases. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) -- Fixed using an incorrect timeout value in ODBC dictionaries. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) - -### ClickHouse release 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} - -#### Bug fixes: {#bug-fixes-9} - -- Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) -- Fixed errors when merging data in tables containing arrays inside Nested structures. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) -- Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) -- Fixed an error on inserts to a Distributed table in Native format. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) - -### ClickHouse release 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} - -- The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) -- The `enable_optimize_predicate_expression` setting is disabled by default. - -### ClickHouse release 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} - -#### New features: {#new-features-1} - -- The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) -- Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) -- Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) -- Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) -- Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) -- Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) -- Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) -- Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) - -#### Experimental features: {#experimental-features} - -- Optimization of the GROUP BY clause for `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) -- Optimized calculation of expressions for `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) - -#### Improvements: {#improvements-2} - -- Significantly reduced memory consumption for queries with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- In the absence of `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` is assumed. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) -- Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) -- The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- The `compile_expressions` setting (JIT compilation of expressions) is enabled by default. -- Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message “File … already exists”, and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) -- LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3257). -- `ON CLUSTER` can be specified for `ALTER UPDATE` queries. -- Improved performance for reading data in `JSONEachRow` format. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) -- Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) -- Added the `TIMESTAMP` synonym for the `DateTime` type. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) -- There is always space reserved for query\_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools. -- Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) -- In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) -- The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) -- The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) -- In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) -- Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) -- Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) -- Support for the `Decimal` data type in external dictionaries. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) -- Support for the `Decimal` data type in `SummingMergeTree` tables. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) -- Added specializations for `UUID` in `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) -- Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) -- A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) - -#### Bug fixes: {#bug-fixes-10} - -- Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) -- Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) -- If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn’t be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) -- Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) -- Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) -- Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the query if the `JOIN` is only performed on remote servers. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) -- Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) -- If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn’t start. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) -- If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) -- Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) -- Corrected type conversion between `Decimal` and integer numbers. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) -- Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) -- Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) -- Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) -- Bug fixes in the `ALTER UPDATE` query. -- Fixed bugs in the `odbc` table function that appeared in version 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) -- Fixed the operation of aggregate functions with `StateArray` combinators. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) -- Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) -- Fixed output of types for operations using `Decimal` and integer arguments. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) -- Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) -- The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) -- Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) -- Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) -- Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) -- Fixed an error when using `FINAL` with `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) -- Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) -- Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) - -## ClickHouse release 18.12 {#clickhouse-release-18-12} - -### ClickHouse release 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} - -#### New features: {#new-features-2} - -- `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) -- Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) -- The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `system.part_log` table now has the `partition_id` column. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Bug fixes: {#bug-fixes-11} - -- `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) -- Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) -- Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn’t happen). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) - -### ClickHouse release 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} - -#### New features: {#new-features-3} - -- Added support for `ALTER UPDATE` queries. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) -- Added the `allow_ddl` option, which restricts the user’s access to DDL queries. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) -- Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) -- The `system.merges` system table now contains the `partition_id` column. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) - -#### Improvements {#improvements-3} - -- If a data part remains unchanged during mutation, it isn’t downloaded by replicas. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) -- Autocomplete is available for names of settings when working with `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) - -#### Bug fixes: {#bug-fixes-12} - -- Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) -- Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13. -- Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) -- Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) - -### ClickHouse release 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} - -#### New features: {#new-features-4} - -- Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) -- New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) -- Added support for JOIN with table functions. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) -- Ctrl+C in clickhouse-client clears a query that was entered. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) -- Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) -- Each line of the server log related to query processing shows the query ID. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.metrics` and `system.events` tables now have built-in documentation. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) -- Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2975) -- Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) -- Added the `retention` aggregate function. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) -- Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) -- Tables in the MergeTree family now have the virtual column `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Experimental features: {#experimental-features-1} - -- Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) -- Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) - -#### Improvements: {#improvements-4} - -- Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag. -- Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length. -- Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`. -- Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) -- Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2955) -- Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) -- Fixed a performance problem in the case of a large stream of queries that result in an error (the `_dl_addr` function is visible in `perf top`, but the server isn’t using much CPU). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) -- Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Improvements to the functionality for the `UUID` data type. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) -- The `UUID` data type is supported in The-Alchemist dictionaries. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) -- The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) -- When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) -- For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) -- The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) -- You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn’t happen as often. -- The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) -- Duplicate columns can be used in a `USING` clause for `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) -- `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) -- The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2885) -- Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) -- The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`. -- `ALTER DELETE` queries work for materialized views. -- Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables. -- Support for `ATTACH TABLE ... ON CLUSTER` queries. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) - -#### Bug fixes: {#bug-fixes-13} - -- Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) -- Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) -- Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) -- Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) -- Fixed a segfault during `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) -- Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) -- Fixed the “Not found column” error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) -- Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) -- Fixed the incorrect result when comparing `nan` with integers. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) -- Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) -- Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) -- Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously. -- Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) -- Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) -- Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) -- The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) -- Fixed the segfault when re-initializing the ZooKeeper session. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) -- Fixed potential blocking when working with ZooKeeper. -- Fixed incorrect code for adding nested data structures in a `SummingMergeTree`. -- When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) - -#### Security fix: {#security-fix} - -- Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) -- Fixed incorrect validation of the file path in the `catBoostPool` table function. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) -- The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user’s configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) - -#### Backward incompatible changes: {#backward-incompatible-changes-3} - -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. - -#### Build changes: {#build-changes-2} - -- Most integration tests can now be run by commit. -- Code style checks can also be run by commit. -- The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) -- When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) -- Debugging the build uses the `jemalloc` debug option. -- The interface of the library for interacting with ZooKeeper is declared abstract. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) - -## ClickHouse release 18.10 {#clickhouse-release-18-10} - -### ClickHouse release 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} - -#### New features: {#new-features-5} - -- HTTPS can be used for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) -- Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) -- Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) -- Support for `UUID` in the key columns. - -#### Improvements: {#improvements-5} - -- Clusters can be removed without restarting the server when they are deleted from the config files. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) -- External dictionaries can be removed without restarting the server when they are removed from config files. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) -- Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) -- Improvements for the `UUID` data type (not yet complete). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) -- Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) -- Old records of completed mutations are deleted (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) -- Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) -- The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) -- Added the `max_partition_size_to_drop` config option. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) -- Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) -- Added the `max_fetch_partition_retries_count` setting. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) -- Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) -- The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) - -#### Bug fixes: {#bug-fixes-14} - -- Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0. -- Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) -- Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) -- Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2823) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) -- Fixed a memory leak if an exception occurred when connecting to a MySQL server. -- Fixed incorrect clickhouse-client response code in case of a query error. -- Fixed incorrect behavior of materialized views containing DISTINCT. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) - -#### Backward incompatible changes {#backward-incompatible-changes-4} - -- Removed support for CHECK TABLE queries for Distributed tables. - -#### Build changes: {#build-changes-3} - -- The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) -- Use of libressl from a submodule. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) -- Use of unixodbc from a submodule. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) -- Use of mariadb-connector-c from a submodule. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) -- Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself). - -## ClickHouse release 18.6 {#clickhouse-release-18-6} - -### ClickHouse release 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} - -#### New features: {#new-features-6} - -- Added support for ON expressions for the JOIN ON syntax: - `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` - The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) -- HTTPS can be enabled for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) - -#### Improvements: {#improvements-6} - -- The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) - -## ClickHouse release 18.5 {#clickhouse-release-18-5} - -### ClickHouse release 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} - -#### New features: {#new-features-7} - -- Added the hash function `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). - -#### Improvements: {#improvements-7} - -- Now you can use the `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) attribute to set values in config files from environment variables. -- Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). - -#### Bug fixes: {#bug-fixes-15} - -- Fixed a possible bug when starting a replica [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). - -## ClickHouse release 18.4 {#clickhouse-release-18-4} - -### ClickHouse release 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} - -#### New features: {#new-features-8} - -- Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). -- Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). -- Support for `HTTP Basic` authentication in the replication protocol [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). -- The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). -- Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2701). - -#### Improvements: {#improvements-8} - -- The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). -- The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. -- Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). - -#### Bug fixes: {#bug-fixes-16} - -- Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) -- Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). -- Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) -- Fixed server crash when using the `countArray()` aggregate function. - -#### Backward incompatible changes: {#backward-incompatible-changes-5} - -- Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. - -## ClickHouse release 18.1 {#clickhouse-release-18-1} - -### ClickHouse release 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} - -#### New features: {#new-features-9} - -- Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). -- Support for arbitrary types for the `uniq*` family of aggregate functions ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). -- Support for arbitrary types in comparison operators ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). -- The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). -- Added the `arrayDistinct` function ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). -- The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). - -#### Improvements: {#improvements-9} - -- Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog. -- Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). -- If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). - -#### Bug fixes: {#bug-fixes-17} - -- Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2624)). -- Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). -- Fixed an error during a CAST to Nullable types ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). -- Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). -- Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). -- Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). -- Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn’t in uppercase letters ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). -- Added missing quoting of identifiers for queries to an external DBMS ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). - -#### Backward incompatible changes: {#backward-incompatible-changes-6} - -- Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. - -## ClickHouse release 1.1 {#clickhouse-release-1-1} - -### ClickHouse release 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} - -#### New features: {#new-features-10} - -- Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2600)). - -#### Bug fixes: {#bug-fixes-18} - -- Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. -- Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. -- The `has` function now works correctly for an array with Nullable elements ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). -- The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were queried from the table. -- Fixed how an empty `TinyLog` table works after inserting an empty data block ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). -- The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. - -### ClickHouse release 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} - -#### New features: {#new-features-11} - -- Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). -- Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2574)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2599)). - -#### Improvements: {#improvements-10} - -- Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). -- Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. -- Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). -- Added `Nullable` support for the `runningDifference` function ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). -- Improved query analysis performance when there is a very large number of expressions ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). -- Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). -- The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). - -#### Bug fixes: {#bug-fixes-19} - -- Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. -- Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. -- Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). -- Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). -- Fixed segfault if `macros` are used but they aren’t in the config file ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). -- Fixed switching to the default database when reconnecting the client ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). -- Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. - -#### Security fix: {#security-fix-1} - -- Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). - -### ClickHouse release 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} - -#### New features: {#new-features-12} - -- Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. -- Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables. -- Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) -- Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). -- Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). -- Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). -- Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). -- New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). -- The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). -- The password to `clickhouse-client` can be entered interactively. -- Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). -- Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). -- Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) -- Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. -- Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. - -#### Experimental features: {#experimental-features-2} - -- Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) -- JIT compilation to native code is now available for some expressions ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). - -#### Bug fixes: {#bug-fixes-20} - -- Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. -- Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. -- Fixed an error when reading an array column from a Nested structure ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). -- Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`. -- Fixed an error when analyzing queries with recursive aliases. -- Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). -- User profile settings were not applied when using sessions in the HTTP interface. -- Fixed how settings are applied from the command line parameters in clickhouse-local. -- The ZooKeeper client library now uses the session timeout received from the server. -- Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. -- Fixed pruning of parts for queries with conditions on partition key columns ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). -- Merges are now possible after `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). -- Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). -- Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). -- Fixed syntactic parsing and formatting of the `CAST` operator. -- Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). -- Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). -- Fixed SSRF in the remote() table function. -- Fixed exit behavior of `clickhouse-client` in multiline mode ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). - -#### Improvements: {#improvements-11} - -- Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). -- Improved LZ4 compression performance. -- Faster analysis for queries with a large number of JOINs and sub-queries. -- The DNS cache is now updated automatically when there are too many network errors. -- Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. -- Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. -- Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. -- A server with replicated tables can start even if you haven’t configured ZooKeeper. -- When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). -- Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). - -#### Build changes: {#build-changes-4} - -- The gcc8 compiler can be used for builds. -- Added the ability to build llvm from submodule. -- The version of the librdkafka library has been updated to v0.11.4. -- Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. -- Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). -- Cmake now generates files for ninja by default (like when using `-G Ninja`). -- Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). -- Fixed a header file conflict in Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). - -#### Backward incompatible changes: {#backward-incompatible-changes-7} - -- Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. -- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn’t have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. - -### ClickHouse release 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} - -#### Bug fixes: {#bug-fixes-21} - -- Fixed an error that in some cases caused ZooKeeper operations to block. - -### ClickHouse release 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} - -#### Bug fixes: {#bug-fixes-22} - -- Fixed a slowdown of replication queue if a table has many replicas. - -### ClickHouse release 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} - -#### Bug fixes: {#bug-fixes-23} - -- Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. - -### ClickHouse release 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} - -#### New features: {#new-features-13} - -- Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. - -#### Improvements: {#improvements-12} - -- Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`. -- Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit. - -#### Bug fixes: {#bug-fixes-24} - -- Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`. -- Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`. -- Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table. -- Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica. -- Fixed freezing of `KILL QUERY`. -- Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration. - -#### Backward incompatible changes: {#backward-incompatible-changes-8} - -- Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors. - -### ClickHouse release 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} - -#### New features: {#new-features-14} - -- Logging level can be changed without restarting the server. -- Added the `SHOW CREATE DATABASE` query. -- The `query_id` can be passed to `clickhouse-client` (elBroom). -- New setting: `max_network_bandwidth_for_all_users`. -- Added support for `ALTER TABLE ... PARTITION ...` for `MATERIALIZED VIEW`. -- Added information about the size of data parts in uncompressed form in the system table. -- Server-to-server encryption support for distributed tables (`1` in the replica config in ``). -- Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` -- Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server’s display name can be changed. It’s also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). -- Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) -- When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result. - -#### Improvements: {#improvements-13} - -- `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. -- `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. -- A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov). -- The `lengthUTF8` function runs faster (zhang2014). -- Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards. -- The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket’s `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa). -- More robust crash recovery for asynchronous insertion into `Distributed` tables. -- The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). - -#### Bug fixes: {#bug-fixes-25} - -- Fixed an error with `IN` when the left side of the expression is `Nullable`. -- Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. -- The `max_execution_time` limit now works correctly with distributed queries. -- Fixed errors when calculating the size of composite columns in the `system.columns` table. -- Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.` -- Fixed errors in `StorageKafka` (\#\#2075) -- Fixed server crashes from invalid arguments of certain aggregate functions. -- Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables. -- `Too many parts` state is less likely to happen when inserting into aggregated materialized views (\#\#2084). -- Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level. -- Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`. -- `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. -- Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. - -#### Build changes: {#build-changes-5} - -- The build supports `ninja` instead of `make` and uses `ninja` by default for building releases. -- Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. - -#### Backward incompatible changes: {#backward-incompatible-changes-9} - -- Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as “at least one `arr` element belongs to the `set`”. To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. -- Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. - -### ClickHouse release 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} - -#### New features: {#new-features-15} - -- Added the `system.macros` table and auto updating of macros when the config file is changed. -- Added the `SYSTEM RELOAD CONFIG` query. -- Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the “maximum” interval. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). - -#### Improvements: {#improvements-14} - -- When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). -- Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. - -#### Bug fixes: {#bug-fixes-26} - -- Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables. -- Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers. -- Fixed a race condition when reading from system `system.parts_columns tables.` -- Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout. -- Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query. -- Fixed incorrect dates in the `system.parts` table. -- Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster. -- Fixed the vertical merging algorithm for an empty `ORDER BY` table. -- Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362. -- Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. -- Removed extraneous error-level logging of `Not found column ... in block`. - -### Clickhouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} - -#### New features: {#new-features-16} - -- Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. -- Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. -- Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`. -- An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). -- Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta). -- Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings. -- Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. -- Added the `arrayCumSum` function (Javi Santana). -- Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats. -- Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan). -- Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier. -- The `remote` and `cluster` table functions can be used in `INSERT` queries. -- Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual. -- Added the `data_path` and `metadata_path` columns to `system.tables`and`system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables. -- Added additional information about merges in the `system.part_log` table. -- An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov). -- The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014). -- Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014). -- Support for `SHOW CREATE TABLE` for temporary tables (zhang2014). -- Added the `system_profile` configuration parameter for the settings used by internal processes. -- Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko). -- Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko). -- Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes. -- Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table. -- Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`. -- Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014). -- Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can’t be listened to (useful for systems with disabled support for IPv4 or IPv6). -- Added the `VersionedCollapsingMergeTree` table engine. -- Support for rows and arbitrary numeric types for the `library` dictionary source. -- `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`). -- A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`. -- `RENAME TABLE` can be performed for `VIEW`. -- Added the `throwIf` function. -- Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). -- The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns. - -#### Improvements: {#improvements-15} - -- Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. -- Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. -- Added the `allow_distributed_ddl` option. -- Nondeterministic functions are not allowed in expressions for `MergeTree` table keys. -- Files with substitutions from `config.d` directories are loaded in alphabetical order. -- Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`. -- The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks). -- When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server. -- The `MkDocs` documentation generator is used. -- When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014). -- Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342. -- `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. -- Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. - -#### Bug fixes: {#bug-fixes-27} - -- Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. -- Fixed a bug in merges for `ReplacingMergeTree` tables. -- Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`). -- Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries. -- Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`. -- Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table. -- Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata. -- Fixed the `DROP DATABASE` query for `Dictionary` databases. -- Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov). -- Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014). -- Fixed a rare case when a query to a `MergeTree` table couldn’t finish (chenxing-xc). -- Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc). -- Fixed a slight performance regression with functions that use regular expressions. -- Fixed a performance regression when creating multidimensional arrays from complex expressions. -- Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata. -- Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table. -- Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand). -- Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`. -- Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables. -- Fixed a bug when using `ALIAS` columns in `Distributed` tables. -- Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family. -- Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries. -- Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. -- Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. -- Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled). - -#### Backward incompatible changes: {#backward-incompatible-changes-10} - -- Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. -- Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. -- Removed the `UnsortedMergeTree` engine. - -### Clickhouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} - -- Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. -- Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. -- Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. - -### Clickhouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} - -This release contains bug fixes for the previous release 1.1.54337: - -- Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. -- Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d. -- Fixed a regression in 1.1.54337: wrong default configuration in the Docker image. -- Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`). -- Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`). -- Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). -- Fixed a bug in implementation of NULL. - -### Clickhouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} - -#### New features: {#new-features-17} - -- Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables. -- Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`. -- Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected. -- Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive. -- Added the `toStartOfFifteenMinutes` function (Kirill Shvakov). -- Added the `clickhouse format` tool for formatting queries. -- Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory. -- Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin). -- Added a column with documentation for the `system.settings` table (Kirill Shvakov). -- Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables. -- Added the `system.models` table with information about loaded `CatBoost` machine learning models. -- Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage. -- Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function). -- Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors. -- The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments. -- Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`. -- Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov). -- Users with the `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). -- Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša). -- Added the `intExp3` and `intExp4` functions. -- Added the `sumKahan` aggregate function. -- Added the to \* Number\* OrNull functions, where \* Number\* is a numeric type. -- Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014). -- Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded. -- Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova). -- The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory. -- Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. -- Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). - -#### Performance optimizations: {#performance-optimizations} - -- Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments. -- Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. -- Improved performance of parsing and formatting `Date` and `DateTime` type values in text format. -- Improved performance and precision of parsing floating point numbers. -- Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` . -- Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. - -#### Bug fixes: {#bug-fixes-28} - -- Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates. -- Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE` . -- Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration. -- Fixed unexpected results of passing the `Date` argument to `toStartOfDay` . -- Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for `INTERVAL n MONTH` in cases when the result has the previous year. -- Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete. -- Fixed `SummingMergeTree` behavior in cases when the rows summed to zero. -- Various fixes for the `Kafka` engine (Marek Vavruša). -- Fixed incorrect behavior of the `Join` table engine (Amos Bird). -- Fixed incorrect allocator behavior under FreeBSD and OS X. -- The `extractAll` function now supports empty matches. -- Fixed an error that blocked usage of `libressl` instead of `openssl` . -- Fixed the `CREATE TABLE AS SELECT` query from temporary tables. -- Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts. -- Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod). -- `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config). -- Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key. -- Fixed parsing of tuples (values of the `Tuple` data type) in text formats. -- Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions. -- Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to `NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc. -- Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc. -- Stricter checks for allowed combinations of composite columns. -- Fixed the overflow when specifying a very large parameter for the `FixedString` data type. -- Fixed a bug in the `topK` aggregate function in a generic case. -- Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator. -- Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322). -- Fixed the precision of the `exp10` function. -- Fixed the behavior of the `visitParamExtract` function for better compliance with documentation. -- Fixed the crash when incorrect data types are specified. -- Fixed the behavior of `DISTINCT` in the case when all columns are constants. -- Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index. -- Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries. -- Fixed a bug that leads to excessive rows in the result of `FULL` and `RIGHT JOIN` (Amos Bird). -- Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload. -- Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. -- Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša). - -#### Build improvements: {#build-improvements-4} - -- The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment. -- A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. -- Added the `clickhouse-test` package. It can be used to run functional tests. -- The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub. -- Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run. -- Added support for `Cap'n'Proto` in the default build. -- Changed the format of documentation sources from `Restricted Text` to `Markdown`. -- Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually. -- For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as `clickhouse clang` and `clickhouse lld` . -- Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`. -- Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. - -#### Backward incompatible changes: {#backward-incompatible-changes-11} - -- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn’t have `Nullable` columns or if the type of your table is not `Log`, then you don’t need to do anything. -- Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. -- The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. -- Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). -- Removed the `BlockTabSeparated` format that was used solely for demonstration purposes. -- Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com. -- In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. -- Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. -- If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes. - -## [Changelog for 2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) diff --git a/docs/fa/changelog/2019.md b/docs/fa/changelog/2019.md deleted file mode 100644 index 01a0756af14..00000000000 --- a/docs/fa/changelog/2019.md +++ /dev/null @@ -1,2071 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release v19.17 {#clickhouse-release-v19-17} - -### ClickHouse release v19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} - -#### Bug Fix {#bug-fix} - -- Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn’t cause the error `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed checking if a client host is allowed by host\_regexp specified in users.xml. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) -- `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) -- `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) -- Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) -- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn’t exist. Now in this case file would be created and then insert would be processed. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) -- Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn’t throw exception if `db` doesn’t exist. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) -- If a table wasn’t completely dropped because of server crash, the server will try to restore and load it [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) -- Fixed a trivial count query for a distributed table if there are more than two shard local table. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -- Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed `ALTER table MOVE part` executed immediately after merging the specified part, which could cause moving a part which the specified part merged into. Now it correctly moves the specified part. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Expressions for dictionaries can be specified as strings now. This is useful for calculation of attributes while extracting data from non-ClickHouse sources because it allows to use non-ClickHouse syntax for those expressions. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) -- Fixed a very rare race in `clickhouse-copier` because of an overflow in ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) -- Fixed the bug when after the query failed (due to “Too many simultaneous queries” for example) it would not read external tables info, and the - next request would interpret this info as the beginning of the next query causing an error like `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) -- Avoid null dereference after “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) -- Restore support of all ICU locales, add the ability to apply collations for constant expressions and add language name to system.collations table. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) -- Number of streams for read from `StorageFile` and `StorageHDFS` is now limited, to avoid exceeding the memory limit. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) -- Fixed `CHECK TABLE` query for `*MergeTree` tables without key. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) -- Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) -- Fixed the bug that mutations are skipped for some attached parts due to their data\_version are larger than the table mutation version. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) -- Allow starting the server with redundant copies of parts after moving them to another device. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed the error “Sizes of columns doesn’t match” that might appear when using aggregate function columns. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) -- Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it’s possible to use TOP with LIMIT BY. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) - -### ClickHouse release v19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} - -#### Backward Incompatible Change {#backward-incompatible-change} - -- Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird)) - -#### New Feature {#new-feature} - -- Add the ability to create dictionaries with DDL queries. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) -- Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add function `isValidJSON` to check that passed string is a valid json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) -- Implement `arrayCompact` function [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) -- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn’t delete last zero bytes. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) -- Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) -- Add `CRC32IEEE()`/`CRC64()` support [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) -- Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) -- Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) -- Implemented `javaHashUTF16LE()` function [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) -- Add `_shard_num` virtual column for the Distributed engine [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) - -#### Experimental Feature {#experimental-feature} - -- Support for processors (new query execution pipeline) in `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-1} - -- Fix incorrect float parsing in `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) -- Fix rare deadlock which can happen when trace\_log is enabled. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) -- Prevent message duplication when producing Kafka table has any MVs selecting from it [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) -- Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Fix aggregation (`avg` and quantiles) over empty decimal columns [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) -- Fix `INSERT` into Distributed with `MATERIALIZED` columns [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with `keep_free_space_ratio` not being read from disks configuration [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) -- Do not account memory for Buffer engine in max\_memory\_usage limit [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) -- Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) -- Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) -- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Fix crash in this case. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `Not found column in block` when joining on expression with RIGHT or FULL JOIN. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) -- One more attempt to fix infinite loop in `PrettySpace` format [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fix bug in `concat` function when all arguments were `FixedString` of the same size. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) -- Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix scope of the InterpreterSelectQuery for views with query [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) - -#### Improvement {#improvement} - -- `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Write current batch for distributed send atomically [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) -- Throw an exception if we cannot detect table for column name in query. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `merge_max_block_size` setting to `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) -- Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird)) -- Support parsing `(X,)` as tuple similar to python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird)) -- Make `range` function behaviors almost like pythonic one. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) -- Add `constraints` columns to table `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) -- Better Null format for tcp handler, so that it’s possible to use `select ignore() from table format Null` for perf measure via clickhouse-client [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird)) -- Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) - -#### Performance Improvement {#performance-improvement} - -- The performance of aggregation over short string keys is improved. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird)) -- Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird)) -- Use storage meta info to evaluate trivial `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) -- Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) -- Minor improvements in performance of `Kafka` consumption [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement} - -- Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) -- Unpack darwin-x86\_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) -- Update Docker Image for Binary Packager [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) -- Fixed compile errors on MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) -- Some refactoring in query analysis logic: split complex class into several simple ones. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix build without submodules [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) -- Better `add_globs` in CMake files [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird)) -- Remove hardcoded paths in `unwind` target [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) -- Allow to use mysql format without ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) - -#### Other {#other} - -- Added ANTLR4 grammar for ClickHouse SQL dialect [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release v19.16 {#clickhouse-release-v19-16} - -#### Clickhouse release v19.16.14.65, 2020-03-25 - -* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) This bugfix was backported to version 19.16 by a special request from Altinity. - -#### Clickhouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} - -- Fix distributed subqueries incompatibility with older CH versions. Fixes [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) - [(tabplubix)](https://github.com/tavplubix) -- When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in `ClickHouseDictionarySource`. - [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) -- Now background merges in `*MergeTree` table engines family preserve storage policy volume order more accurately. - [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) - [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) -- Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). - [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) -- Allow using `MaterializedView` with subqueries above `Kafka` tables. - [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) - -#### New Feature {#new-feature-1} - -- Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. - [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) - -### ClickHouse release v19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} - -#### Backward Incompatible Change {#backward-incompatible-change-1} - -- Add missing arity validation for count/counIf. - [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) - [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) -- Remove legacy `asterisk_left_columns_only` setting (it was disabled by default). - [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem - Zuikov](https://github.com/4ertus2)) -- Format strings for Template data format are now specified in files. - [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) - -#### New Feature {#new-feature-2} - -- Introduce uniqCombined64() to calculate cardinality greater than UINT\_MAX. - [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), - [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat - Khuzhin](https://github.com/azat)) -- Support Bloom filter indexes on Array columns. - [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) - ([achimbab](https://github.com/achimbab)) -- Add a function `getMacro(name)` that returns String with the value of corresponding `` - from server configuration. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Set two configuration options for a dictionary based on an HTTP source: `credentials` and - `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- Add a new ProfileEvent `Merge` that counts the number of launched background merges. - [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail - Korotov](https://github.com/millb)) -- Add fullHostName function that returns a fully qualified domain name. - [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) - [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) -- Add function `arraySplit` and `arrayReverseSplit` which split an array by “cut off” - conditions. They are useful in time sequence handling. - [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) -- Add new functions that return the Array of all matched indices in multiMatch family of functions. - [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila - Kutenin](https://github.com/danlark1)) -- Add a new database engine `Lazy` that is optimized for storing a large number of small -Log - tables. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang - Yu](https://github.com/yuzhichang)) -- Add aggregate function combinators -OrNull and -OrDefault, which return null - or default values when there is nothing to aggregate. - [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) - ([hcz](https://github.com/hczhcz)) -- Introduce CustomSeparated data format that supports custom escaping and - delimiter rules. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) -- Support Redis as source of external dictionary. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton - Popov](https://github.com/CurtizJ)) - -#### Bug Fix {#bug-fix-2} - -- Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is - used. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton - Popov](https://github.com/CurtizJ)) -- Disabled MariaDB authentication plugin, which depends on files outside of project. - [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy - Baranov](https://github.com/yurriy)) -- Fix exception `Cannot convert column ... because it is constant but values of constants are different in source and result` which could rarely happen when functions `now()`, `today()`, - `yesterday()`, `randConstant()` are used. - [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fixed a segmentation fault in groupBitmapOr (issue [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). - [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang - Yu](https://github.com/yuzhichang)) -- For materialized views the commit for Kafka is called after all data were written. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off. - [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests. - [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Don’t put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`. - [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) -- Fix segmentation fault in `ATTACH PART` query. - [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) - ([alesapin](https://github.com/alesapin)) -- Fix wrong result for some queries given by the optimization of empty IN subqueries and empty - INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixing AddressSanitizer error in the LIVE VIEW getHeader() method. - [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) - ([vzakaznikov](https://github.com/vzakaznikov)) - -#### Improvement {#improvement-1} - -- Add a message in case of queue\_wait\_max\_ms wait takes place. - [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat - Khuzhin](https://github.com/azat)) -- Made setting `s3_min_upload_part_size` table-level. - [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Check TTL in StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) - ([sundyli](https://github.com/sundy-li)) -- Squash left-hand blocks in partial merge join (optimization). - [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem - Zuikov](https://github.com/4ertus2)) -- Do not allow non-deterministic functions in mutations of Replicated table engines, because this - can introduce inconsistencies between replicas. - [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander - Kazakov](https://github.com/Akazz)) -- Disable memory tracker while converting exception stack trace to string. It can prevent the loss - of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read after eof` exception on client. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) - ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Miscellaneous format improvements. Resolves - [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), - [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), - [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), - [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) - [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) - ([tavplubix](https://github.com/tavplubix)) -- ClickHouse ignores values on the right side of IN operator that are not convertible to the left - side type. Make it work properly for compound types – Array and Tuple. - [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Support missing inequalities for ASOF JOIN. It’s possible to join less-or-equal variant and strict - greater and less variants for ASOF column in ON syntax. - [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem - Zuikov](https://github.com/4ertus2)) -- Optimize partial merge join. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) - ([Artem Zuikov](https://github.com/4ertus2)) -- Do not use more than 98K of memory in uniqCombined functions. - [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), - [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat - Khuzhin](https://github.com/azat)) -- Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough - memory). Load data back when needed. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) - ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvement {#performance-improvement-1} - -- Speed up joinGet with const arguments by avoiding data duplication. - [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos - Bird](https://github.com/amosbird)) -- Return early if the subquery is empty. - [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) -- Optimize parsing of SQL expression in Values. - [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) - ([tavplubix](https://github.com/tavplubix)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-1} - -- Disable some contribs for cross-compilation to Mac OS. - [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) -- Add missing linking with PocoXML for clickhouse\_common\_io. - [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat - Khuzhin](https://github.com/azat)) -- Accept multiple test filter arguments in clickhouse-test. - [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Enable musl and jemalloc for ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) - ([Amos Bird](https://github.com/amosbird)) -- Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client. - [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Preserve existing configs on rpm package upgrade. - [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) - ([filimonov](https://github.com/filimonov)) -- Fix errors detected by PVS. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem - Zuikov](https://github.com/4ertus2)) -- Fix build for Darwin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) - ([Ivan](https://github.com/abyss7)) -- glibc 2.29 compatibility. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos - Bird](https://github.com/amosbird)) -- Make sure dh\_clean does not touch potential source files. - [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos - Bird](https://github.com/amosbird)) -- Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately - in clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) - ([filimonov](https://github.com/filimonov)) -- Optimize some header files for faster rebuilds. - [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), - [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Add performance tests for Date and DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fix some tests that contained non-deterministic mutations. - [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander - Kazakov](https://github.com/Akazz)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) - ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat - Khuzhin](https://github.com/azat)) -- Fix some issues in Fields found by MemorySanitizer. - [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), - [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander - Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) - ([Amos Bird](https://github.com/amosbird)) -- Fix undefined behavior in murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos - Bird](https://github.com/amosbird)) -- Fix undefined behavior in StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) - ([tavplubix](https://github.com/tavplubix)) -- Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous - versions it wasn’t working for multiple constant expressions and was not working at all for Date, - DateTime and UUID. This fixes [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no\_users\_thread variable. - [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Get rid of malloc symbols in libcommon - [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), - [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos - Bird](https://github.com/amosbird)) -- Add global flag ENABLE\_LIBRARIES for disabling all libraries. - [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) - ([proller](https://github.com/proller)) - -#### Code cleanup {#code-cleanup} - -- Generalize configuration repository to prepare for DDL for Dictionaries. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) - ([alesapin](https://github.com/alesapin)) -- Parser for dictionaries DDL without any semantic. - [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) - ([alesapin](https://github.com/alesapin)) -- Split ParserCreateQuery into different smaller parsers. - [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) - ([alesapin](https://github.com/alesapin)) -- Small refactoring and renaming near external dictionaries. - [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) - ([alesapin](https://github.com/alesapin)) -- Refactor some code to prepare for role-based access control. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Some improvements in DatabaseOrdinary code. - [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Do not use iterators in find() and emplace() methods of hash tables. - [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Fix getMultipleValuesFromConfig in case when parameter root is not empty. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) - ([Mikhail Korotov](https://github.com/millb)) -- Remove some copy-paste (TemporaryFile and TemporaryFileStream) - [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem - Zuikov](https://github.com/4ertus2)) -- Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`). - [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws - an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and - fix comments to make obvious that it may throw. - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) - ([tavplubix](https://github.com/tavplubix)) - -## ClickHouse release 19.15 {#clickhouse-release-19-15} - -### ClickHouse release 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} - -#### Bug Fix {#bug-fix-3} - -- Added handling of SQL\_TINYINT and SQL\_BIGINT, and fix handling of SQL\_FLOAT data source types in ODBC Bridge. - [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Allowed to have some parts on destination disk or volume in MOVE PARTITION. - [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed NULL-values in nullable columns through ODBC-bridge. - [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed INSERT into Distributed non local node with MATERIALIZED columns. - [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Fixed function getMultipleValuesFromConfig. - [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) -- Wait for all jobs to finish on exception (fixes rare segfaults). - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) -- Don’t push to MVs when inserting into Kafka table. - [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Disable memory tracker for exception stack. - [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bad code in transforming query for external database. - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) -- Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} - -#### Bug Fix {#bug-fix-4} - -- Fixed bad\_variant in hashed dictionary. - ([alesapin](https://github.com/alesapin)) -- Fixed up bug with segmentation fault in ATTACH PART query. - ([alesapin](https://github.com/alesapin)) -- Fixed time calculation in `MergeTreeData`. - ([Vladimir Chebotarev](https://github.com/excitoon)) -- Commit to Kafka explicitly after the writing is finalized. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) - -### ClickHouse release 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} - -#### New Feature {#new-feature-3} - -- Tiered storage: support to use multiple storage volumes for tables with MergeTree engine. It’s possible to store fresh data on SSD and automatically move old data to HDD. ([example](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) -- Add table function `input` for reading incoming data in `INSERT SELECT` query. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) -- Add a `sparse_hashed` dictionary layout, that is functionally equivalent to the `hashed` layout, but is more memory efficient. It uses about twice as less memory at the cost of slower value retrieval. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) -- Implement ability to define list of users for access to dictionaries. Only current connected database using. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add `LIMIT` option to `SHOW` query. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Add `bitmapSubsetLimit(bitmap, range_start, limit)` function, that returns subset of the smallest `limit` values in set that is no smaller than `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add `bitmapMin` and `bitmapMax` functions. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add function `repeat` related to [issue-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) - -#### Experimental Feature {#experimental-feature-1} - -- Implement (in memory) Merge Join variant that does not change current pipeline. Result is partially sorted by merge key. Set `partial_merge_join = 1` to use this feature. The Merge Join is still in development. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `S3` engine and table function. It is still in development (no authentication support yet). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) - -#### Improvement {#improvement-2} - -- Every message read from Kafka is inserted atomically. This resolves almost all known issues with Kafka engine. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) -- Improvements for failover of Distributed queries. Shorten recovery time, also it is now configurable and can be seen in `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) -- Support numeric values for Enums directly in `IN` section. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) -- Support (optional, disabled by default) redirects on URL storage. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) -- Add information message when client with an older version connects to a server. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Remove maximum backoff sleep time limit for sending data in Distributed tables [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) -- Add ability to send profile events (counters) with cumulative values to graphite. It can be enabled under `` in server `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) -- Add automatically cast type `T` to `LowCardinality(T)` while inserting data in column of type `LowCardinality(T)` in Native format via HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add ability to use function `hex` without using `reinterpretAsString` for `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-2} - -- Add gdb-index to clickhouse binary with debug info. It will speed up startup time of `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) -- Speed up deb packaging with patched dpkg-deb which uses `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) -- Set `enable_fuzzing = 1` to enable libfuzzer instrumentation of all the project code. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) -- Add split build smoke test in CI. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Replace `libsparsehash` with `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) - -#### Bug Fix {#bug-fix-5} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix too early MySQL connection close in `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Returned support for very old Linux kernels (fix [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix possible data loss in `insert select` query in case of empty block in input stream. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fix complex queries with array joins and global subqueries. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) -- Fix `Unknown identifier` error in ORDER BY and GROUP BY with multiple JOINs [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed `MSan` warning while executing function with `LowCardinality` argument. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Backward Incompatible Change {#backward-incompatible-change-2} - -- Changed serialization format of bitmap\* aggregate function states to improve performance. Serialized states of bitmap\* from previous versions cannot be read. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) - -## ClickHouse release 19.14 {#clickhouse-release-19-14} - -### ClickHouse release 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} - -#### Bug Fix {#bug-fix-6} - -- This release also contains all bug fixes from 19.11.12.69. -- Fixed compatibility for distributed queries between 19.14 and earlier versions. This fixes [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} - -#### Bug Fix {#bug-fix-7} - -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fixed subquery name in queries with `ARRAY JOIN` and `GLOBAL IN subquery` with alias. Use subquery alias for external table name if it is specified. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-3} - -- Fix [flapping](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` by rewriting it to a shell scripts because it needs to wait for mutations to apply. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed UBSan and MemSan failure in function `groupUniqArray` with emtpy array argument. It was caused by placing of empty `PaddedPODArray` into hash table zero cell because constructor for zero cell value was not called. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Bird](https://github.com/amosbird)) - -### ClickHouse release 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} - -#### New Feature {#new-feature-4} - -- `WITH FILL` modifier for `ORDER BY`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- `WITH TIES` modifier for `LIMIT`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- Parse unquoted `NULL` literal as NULL (if setting `format_csv_unquoted_null_literal_as_null=1`). Initialize null fields with default values if data type of this field is not nullable (if setting `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) -- Support for wildcards in paths of table functions `file` and `hdfs`. If the path contains wildcards, the table will be readonly. Example of usage: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` and `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) -- New `system.metric_log` table which stores values of `system.events` and `system.metrics` with specified time interval. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to write ClickHouse text logs to `system.text_log` table. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show private symbols in stack traces (this is done via parsing symbol tables of ELF files). Added information about file and line number in stack traces if debug info is present. Speedup symbol name lookup with indexing symbols present in program. Added new SQL functions for introspection: `demangle` and `addressToLine`. Renamed function `symbolizeAddress` to `addressToSymbol` for consistency. Function `addressToSymbol` will return mangled name for performance reasons and you have to apply `demangle`. Added setting `allow_introspection_functions` which is turned off by default. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Table function `values` (the name is case-insensitive). It allows to read from `VALUES` list proposed in [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Example: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) -- Added an ability to alter storage settings. Syntax: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) -- Support for removing of detached parts. Syntax: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) -- Table constraints. Allows to add constraint to table definition which will be checked at insert. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Suppport for cascaded materialized views. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Bird](https://github.com/amosbird)) -- Turn on query profiler by default to sample every query execution thread once a second. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Input format `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) -- Added two new functions: `sigmoid` and `tanh` (that are useful for machine learning applications). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Function `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` to check if given token is in haystack. Token is a maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack). Token must be a constant string. Supported by tokenbf\_v1 index specialization. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) -- New function `neighbor(value, offset[, default_value])`. Allows to reach prev/next value within column in a block of data. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) -- Created a function `currentUser()`, returning login of authorized user. Added alias `user()` for compatibility with MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) -- New aggregate functions `quantilesExactInclusive` and `quantilesExactExclusive` which were proposed in [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) -- Function `bitmapRange(bitmap, range_begin, range_end)` which returns new set with specified range (not include the `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) -- Function `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` which creates array of precision-long strings of geohash-boxes covering provided area. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) -- Implement support for INSERT query with `Kafka` tables. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) -- Added support for `_partition` and `_timestamp` virtual columns to Kafka engine. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) -- Possibility to remove sensitive data from `query_log`, server logs, process list with regexp-based rules. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) - -#### Experimental Feature {#experimental-feature-2} - -- Input and output data format `Template`. It allows to specify custom format string for input and output. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) -- Implementation of `LIVE VIEW` tables that were originally proposed in [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), prepared in [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), and then updated in [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). See [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) for detailed description. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note that `LIVE VIEW` feature may be removed in next versions. - -#### Bug Fix {#bug-fix-8} - -- This release also contains all bug fixes from 19.13 and 19.11. -- Fix segmentation fault when the table has skip indices and vertical merge happens. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) -- Fix per-column TTL with non-trivial column defaults. Previously in case of force TTL merge with `OPTIMIZE ... FINAL` query, expired values was replaced by type defaults instead of user-specified column defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed infinite loop when reading Kafka messages. Do not pause/resume consumer on subscription at all - otherwise it may get paused indefinitely in some scenarios. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) -- Fix `Key expression contains comparison between inconvertible types` exception in `bitmapContains` function. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging in MySQL interface [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Return the ability to parse boolean settings from ‘true’ and ‘false’ in the configuration file. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) -- Fix JOIN results for key columns when used with `join_use_nulls`. Attach Nulls instead of columns defaults. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix for skip indices with vertical merge and alter. Fix for `Bad size of marks file` exception. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) -- Fix rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed unsafe code around `getIdentifier` function. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in MySQL wire protocol (is used while connecting to ClickHouse form MySQL client). Caused by heap buffer overflow in `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) -- Fixed memory leak in `bitmapSubsetInRange` function. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fix rare bug when mutation executed after granularity change. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) -- Allow protobuf message with all fields by default. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) -- Resolve a bug with `nullIf` function when we send a `NULL` argument on the second argument. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fix rare bug with wrong memory allocation/deallocation in complex key cache dictionaries with string fields which leads to infinite memory consumption (looks like memory leak). Bug reproduces when string size was a power of two starting from eight (8, 16, 32, etc). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fixed Gorilla encoding on small sequences which caused exception `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) -- Allow to use not nullable types in JOINs with `join_use_nulls` enabled. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) -- Disable `Poco::AbstractConfiguration` substitutions in query in `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock in `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `arrayReduce` for constant arguments may lead to segfault. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix inconsistent parts which can appear if replica was restored after `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed hang in `JSONExtractRaw` function. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with incorrect skip indices serialization and aggregation with adaptive granularity. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) -- Clearing the data buffer from the previous read operation that was completed with an error. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) -- Fix bug with enabling adaptive granularity when creating a new replica for Replicated\*MergeTree table. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix crash in `yandexConsistentHash` function. Found by fuzz test. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault when decoding symbol table. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Removed extra quoting of description in `system.settings` table. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid possible deadlock in `TRUNCATE` of Replicated table. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix reading in order of sorting key. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix bug opened by [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn’t query any columns (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `FormatFactory` behaviour for input streams which are not implemented as processor. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed typo. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) -- Typo in the error message ( is -\> are ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) -- Fixed error while parsing of columns list from string if type contained a comma (this issue was relevant for `File`, `URL`, `HDFS` storages) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) - -#### Security Fix {#security-fix} - -- This release also contains all bug security fixes from 19.13 and 19.11. -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser. Fixed the possibility of stack overflow in Merge and Distributed tables, materialized views and conditions for row-level security that involve subqueries. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-3} - -- Correct implementation of ternary logic for `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) -- Now values and rows with expired TTL will be removed after `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` query. Added queries `SYSTEM STOP/START TTL MERGES` to disallow/allow assign merges with TTL and filter expired values in all merges. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) -- Possibility to change the location of ClickHouse history file for client using `CLICKHOUSE_HISTORY_FILE` env. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) -- Remove `dry_run` flag from `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Support `ASOF JOIN` with `ON` section. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) -- Better support of skip indexes for mutations and replication. Support for `MATERIALIZE/CLEAR INDEX ... IN PARTITION` query. `UPDATE x = x` recalculates all indices that use column `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) -- Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Throw an exception if `config.d` file doesn’t have the corresponding root element as the config file. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) -- Print extra info in exception message for `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) -- When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) -- Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse can work on filesystems without `O_DIRECT` support (such as ZFS and BtrFS) without additional tuning. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support push down predicate for final subquery. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better `JOIN ON` keys extraction [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) -- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Optimize selecting of smallest column for `SELECT count()` query. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Bird](https://github.com/amosbird)) -- Added `strict` parameter in `windowFunnel()`. When the `strict` is set, the `windowFunnel()` applies conditions only for the unique values. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) -- Safer interface of `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) -- Options line size when executing with `--help` option now corresponds with terminal size. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) -- Disable “read in order” optimization for aggregation without keys. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) -- HTTP status code for `INCORRECT_DATA` and `TYPE_MISMATCH` error codes was changed from default `500 Internal Server Error` to `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) -- Move Join object from `ExpressionAction` into `AnalyzedJoin`. `ExpressionAnalyzer` and `ExpressionAction` do not know about `Join` class anymore. Its logic is hidden by `AnalyzedJoin` iface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) -- Move AST alias interpreting logic out of parser that doesn’t have to know anything about query semantics. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) -- Slightly more safe parsing of `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) -- Added optional message argument in `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) -- Server exception got while sending insertion data is now being processed in client as well. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) -- Added a metric `DistributedFilesToInsert` that shows the total number of files in filesystem that are selected to send to remote servers by Distributed tables. The number is summed across all shards. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move most of JOINs prepare logic from `ExpressionAction/ExpressionAnalyzer` to `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix TSan [warning](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) -- Better information messages about lack of Linux capabilities. Logging fatal errors with “fatal” level, that will make it easier to find in `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- When enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`, `ORDER BY`, it didn’t check the free disk space. The fix add a new setting `min_free_disk_space`, when the free disk space it smaller then the threshold, the query will stop and throw `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed recursive rwlock by thread. It makes no sense, because threads are reused between queries. `SELECT` query may acquire a lock in one thread, hold a lock from another thread and exit from first thread. In the same time, first thread can be reused by `DROP` query. This will lead to false “Attempt to acquire exclusive lock recursively” messages. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Split `ExpressionAnalyzer.appendJoin()`. Prepare a place in `ExpressionAnalyzer` for `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `mysql_native_password` authentication plugin to MySQL compatibility server. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) -- Less number of `clock_gettime` calls; fixed ABI compatibility between debug/release in `Allocator` (insignificant issue). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move `collectUsedColumns` from `ExpressionAnalyzer` to `SyntaxAnalyzer`. `SyntaxAnalyzer` makes `required_source_columns` itself now. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) -- Add setting `joined_subquery_requires_alias` to require aliases for subselects and table functions in `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) -- Extract `GetAggregatesVisitor` class from `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) -- `system.query_log`: change data type of `type` column to `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Static linking of `sha256_password` authentication plugin. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) -- Avoid extra dependency for the setting `compile` to work. In previous versions, the user may get error like `cannot open crti.o`, `unable to find library -lc` etc. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More validation of the input that may come from malicious replica. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now `clickhouse-obfuscator` file is available in `clickhouse-client` package. In previous versions it was available as `clickhouse obfuscator` (with whitespace). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed deadlock when we have at least two queries that read at least two tables in different order and another query that performs DDL operation on one of tables. Fixed another very rare deadlock. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added `os_thread_ids` column to `system.processes` and `system.query_log` for better debugging possibilities. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- A workaround for PHP mysqlnd extension bugs which occur when `sha256_password` is used as a default authentication plugin (described in [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) -- Remove unneeded place with changed nullability columns. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) -- Set default value of `queue_max_wait_ms` to zero, because current value (five seconds) makes no sense. There are rare circumstances when this settings has any use. Added settings `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` and `connection_pool_max_wait_ms` for disambiguation. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Extract `SelectQueryExpressionAnalyzer` from `ExpressionAnalyzer`. Keep the last one for non-select queries. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) -- Removed duplicating input and output formats. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `MergeTree` now has an additional option `ttl_only_drop_parts` (disabled by default) to avoid partial pruning of parts, so that they dropped completely when all the rows in a part are expired. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) -- Type checks for set index functions. Throw exception if function got a wrong type. This fixes fuzz test with UBSan. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Performance Improvement {#performance-improvement-2} - -- Optimize queries with `ORDER BY expressions` clause, where `expressions` have coinciding prefix with sorting key in `MergeTree` tables. This optimization is controlled by `optimize_read_in_order` setting. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) -- Allow to use multiple threads during parts loading and removal. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented batch variant of updating aggregate function states. It may lead to performance benefits. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `FastOps` library for functions `exp`, `log`, `sigmoid`, `tanh`. FastOps is a fast vector math library from Michael Parakhin (Yandex CTO). Improved performance of `exp` and `log` functions more than 6 times. The functions `exp` and `log` from `Float32` argument will return `Float32` (in previous versions they always return `Float64`). Now `exp(nan)` may return `inf`. The result of `exp` and `log` functions may be not the nearest machine representable number to the true answer. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Using Danila Kutenin variant to make fastops working [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable consecutive key optimization for `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) -- Improved performance of `simdjson` library by getting rid of dynamic allocation in `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) -- Pre-fault pages when allocating memory with `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) -- Fix performance bug in `Decimal` comparison. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-4} - -- Remove Compiler (runtime template instantiation) because we’ve win over it’s performance. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance test to show degradation of performance in gcc-9 in more isolated way. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added table function `numbers_mt`, which is multithreaded version of `numbers`. Updated performance tests with hash functions. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Comparison mode in `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) -- Best effort for printing stack traces. Also added `SIGPROF` as a debugging signal to print stack trace of a running thread. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Every function in its own file, part 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove doubled const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) -- Formatting changes for `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) -- Better subquery for join creation in `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) -- Remove a redundant condition (found by PVS Studio). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) -- Separate the hash table interface for `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) -- Refactoring of settings. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) -- Add comments for `set` index functions. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) -- Increase OOM score in debug version on Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) -- HDFS HA now work in debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) -- Added a test to `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for multiple materialized views for Kafka table. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) -- Make a better build scheme. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) -- Fixed `test_external_dictionaries` integration in case it was executed under non root user. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- The bug reproduces when total size of written packets exceeds `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) -- Added a test for `RENAME` table race condition [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid data race on Settings in `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add integration test for handling errors by a cache dictionary. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) -- Disable parsing of ELF object files on Mac OS, because it makes no sense. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Attempt to make changelog generator better. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Adding `-Wshadow` switch to the GCC. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) -- Removed obsolete code for `mimalloc` support. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `zlib-ng` determines x86 capabilities and saves this info to global variables. This is done in defalteInit call, which may be made by different threads simultaneously. To avoid multithreaded writes, do it on library startup. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) -- Regression test for a bug which in join which was fixed in [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) -- Fixed MSan report. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix flapping TTL test. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed false data race in `MergeTreeDataPart::is_frozen` field. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed timeouts in fuzz test. In previous version, it managed to find false hangup in query `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added debug checks to `static_cast` of columns. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for Oracle Linux in official RPM packages. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed json perftests from `once` to `loop` type. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `odbc-bridge.cpp` defines `main()` so it should not be included in `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) -- Test for crash in `FULL|RIGHT JOIN` with nulls in right table’s keys. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) -- Added a test for the limit on expansion of aliases just in case. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Switched from `boost::filesystem` to `std::filesystem` where appropriate. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added RPM packages to website. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test for fixed `Unknown identifier` exception in `IN` section. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) -- Simplify `shared_ptr_helper` because people facing difficulties understanding it. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance tests for fixed Gorilla and DoubleDelta codec. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) -- Split the integration test `test_dictionaries` into 4 separate tests. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix PVS-Studio warning in `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow to use `library` dictionary source with ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added option to generate changelog from a list of PRs. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Lock the `TinyLog` storage when reading. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) -- Check for broken symlinks in CI. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Increase timeout for “stack overflow” test because it may take a long time in debug build. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a check for double whitespaces. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `new/delete` memory tracking when build with sanitizers. Tracking is not clear. It only prevents memory limit exceptions in tests. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) -- Enable back the check of undefined symbols while linking. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) -- Avoid rebuilding `hyperscan` every day. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed UBSan report in `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Don’t allow to use query profiler with sanitizers because it is not compatible. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for reloading a dictionary after fail by timer. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix inconsistency in `PipelineExecutor::prepareProcessor` argument type. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added a test for bad URIs. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added more checks to `CAST` function. This should get more information about segmentation fault in fuzzy test. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added `gcc-9` support to `docker/builder` container that builds image locally. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Test for primary key with `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed tests affected by slow stack traces printing. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test case for crash in `groupUniqArray` fixed in [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) -- Fixed indices mutations tests. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) -- In performance test, do not read query log for queries we didn’t run. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) -- Materialized view now could be created with any low cardinality types regardless to the setting about suspicious low cardinality types. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) -- Updated tests for `send_logs_level` setting. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix build under gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) -- Fix build with internal libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) -- Fixes for Mac OS build (incomplete). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) -- Fix “splitted” build. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Other build fixes: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Bird](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-3} - -- Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) - -## ClickHouse release 19.13 {#clickhouse-release-19-13} - -### ClickHouse release 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} - -#### Bug Fix {#bug-fix-9} - -- This release also contains all bug fixes from 19.11.12.69. - -### ClickHouse release 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} - -#### Bug Fix {#bug-fix-10} - -- This release also contains all bug fixes from 19.14.6.12. -- Fixed possible inconsistent state of table while executing `DROP` query for replicated table while zookeeper is not accessible. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix for data race in StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug introduced in query profiler which leads to endless recv from socket. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) -- Fix excessive CPU usage while executing `JSONExtractRaw` function over a boolean value. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixes the regression while pushing to materialized view. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) -- Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. This issue was found by [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix useless `AST` check in Set index. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed parsing of `AggregateFunction` values embedded in query. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed wrong behaviour of `trim` functions family. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} - -#### Bug Fix {#bug-fix-11} - -- This release also contains all bug security fixes from 19.11.9.52 and 19.11.10.54. -- Fixed data race in `system.parts` table and `ALTER` query. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed mismatched header in streams happened in case of reading from empty distributed table with sample and prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed crash when using `IN` clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fix case with same column names in `GLOBAL JOIN ON` section. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix crash when casting types to `Decimal` that do not support it. Throw exception instead. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in `extractAll()` function. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) -- Query transformation for `MySQL`, `ODBC`, `JDBC` table functions now works properly for `SELECT WHERE` queries with multiple `AND` expressions. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) -- Added previous declaration checks for MySQL 8 integration. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) - -#### Security Fix {#security-fix-1} - -- Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} - -#### Bug Fix {#bug-fix-12} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Security Fix {#security-fix-2} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} - -#### New Feature {#new-feature-5} - -- Sampling profiler on query level. [Example](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) -- Allow to specify a list of columns with `COLUMNS('regexp')` expression that works like a more sophisticated variant of `*` asterisk. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CREATE TABLE AS table_function()` is now possible [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) -- Adam optimizer for stochastic gradient descent is used by default in `stochasticLinearRegression()` and `stochasticLogisticRegression()` aggregate functions, because it shows good quality without almost any tuning. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) -- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) -- `RENAME` queries now work with all storages. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) -- Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - -#### Backward Incompatible Change {#backward-incompatible-change-4} - -- The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) - -#### Experimental features {#experimental-features} - -- New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-13} - -- Kafka integration has been fixed in this version. -- Fixed `DoubleDelta` encoding of `Int64` for large `DoubleDelta` values, improved `DoubleDelta` encoding for random data for `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed overestimation of `max_rows_to_read` if the setting `merge_tree_uniform_read_distribution` is set to 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-4} - -- Throws an exception if `config.d` file doesn’t have the corresponding root element as the config file [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - -#### Performance Improvement {#performance-improvement-3} - -- Optimize `count()`. Now it uses the smallest column (if possible). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Bird](https://github.com/amosbird)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-5} - -- Report memory usage in performance tests. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) -- Fix build with external `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.11 {#clickhouse-release-19-11} - -### ClickHouse release 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} - -#### Bug Fix {#bug-fix-14} - -- Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin)) - -### ClickHouse release 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} - -#### Bug Fix {#bug-fix-15} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid rare SIGSEGV while sending data in tables with Distributed engine (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) -- Fix `Unknown identifier` with multiple joins. This fixes [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} - -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) - -### ClickHouse release 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} - -#### Bug Fix {#bug-fix-16} - -- Do store offsets for Kafka messages manually to be able to commit them all at once for all partitions. Fixes potential duplication in “one consumer - many partitions” scenario. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) - -### ClickHouse release 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} - -- Improve error handling in cache dictionaries. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed bug in function `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) -- Fix `JSONExtract` function while extracting a `Tuple` from JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed performance test. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Parquet: Fix reading boolean columns. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong behaviour of `nullIf` function for constant arguments. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed an issue when long `ALTER UPDATE` or `ALTER DELETE` may prevent regular merges to run. Prevent mutations from executing if there is no enough free threads available. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) -- Fixed error with processing “timezone” in server configuration file. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix kafka tests. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) - -#### Security Fix {#security-fix-3} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} - -#### Bug Fix {#bug-fix-17} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} - -#### Bug fix {#bug-fix-18} - -- Kafka integration has been fixed in this version. -- Fix segfault when using `arrayReduce` for constant arguments. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `toFloat()` monotonicity. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging from MySQL handler. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) -- Do not expose virtual columns in `system.columns` table. This is required for backward compatibility. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with memory allocation for string fields in complex key cache dictionary. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fix bug with enabling adaptive granularity when creating new replica for `Replicated*MergeTree` table. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fix infinite loop when reading Kafka messages. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser and possibility of stack overflow in `Merge` and `Distributed` tables [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed Gorilla encoding error on small sequences. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) - -#### Improvement {#improvement-5} - -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} - -#### Bug fix {#bug-fix-19} - -- Fixed the possibility of hanging queries when server is overloaded. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix FPE in yandexConsistentHash function. This fixes [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix parsing of `bool` settings from `true` and `false` strings in configuration files. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix rare bug with incompatible stream headers in queries to `Distributed` table over `MergeTree` table when part of `WHERE` moves to `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. This fixes [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Change {#backward-incompatible-change-5} - -- `Kafka` still broken. - -### ClickHouse release 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} - -#### Bug Fix {#bug-fix-20} - -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed hang in `JSONExtractRaw` function. Fixed [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault in ExternalLoader::reloadOutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix non-deterministic result of “uniq” aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fixed small memory leak when server throw many exceptions from many different contexts. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix the situation when consumer got paused before subscription and not resumed afterwards. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Clearing the Kafka data buffer from the previous read operation that was completed with an error [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Note that Kafka is broken in this version. -- Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-6} - -- Added official `rpm` packages. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) -- Add an ability to build `.rpm` and `.tgz` packages with `packager` script. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) -- Fixes for “Arcadia” build system. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-6} - -- `Kafka` is broken in this version. - -### ClickHouse release 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} - -#### New Feature {#new-feature-6} - -- Added support for prepared statements. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `DoubleDelta` and `Gorilla` column codecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) -- Added `os_thread_priority` setting that allows to control the “nice” value of query processing threads that is used by OS to adjust dynamic scheduling priority. It requires `CAP_SYS_NICE` capabilities to work. This implements [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implement `_topic`, `_offset`, `_key` columns for Kafka engine [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Add aggregate function combinator `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) -- Aggregate functions `groupArrayMovingSum(win_size)(x)` and `groupArrayMovingAvg(win_size)(x)`, which calculate moving sum/avg with or without window-size limitation. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) -- Add synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) -- Intergate H3 function `geoToH3` from Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Bug Fix {#bug-fix-21} - -- Implement DNS cache with asynchronous update. Separate thread resolves all hosts and updates DNS cache with period (setting `dns_cache_update_period`). It should help, when ip of hosts changes frequently. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) -- Fix segfault in `Delta` codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix rare bug in checking of part with `LowCardinality` column. Previously `checkDataPart` always fails for part with `LowCardinality` column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Avoid hanging connections when server thread pool is full. It is important for connections from `remote` table function or connections to a shard without replicas when there is long connection timeout. This fixes [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for constant arguments to `evalMLModel` function. This fixes [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the issue when ClickHouse determines default time zone as `UCT` instead of `UTC`. This fixes [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed buffer underflow in `visitParamExtractRaw`. This fixes [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now distributed `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` queries will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix `coalesce` for `ColumnConst` with `ColumnNullable` + related changes. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix the `ReadBufferFromKafkaConsumer` so that it keeps reading new messages after `commit()` even if it was stalled before [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) -- Fix `FULL` and `RIGHT` JOIN results when joining on `Nullable` keys in right table. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) -- Possible fix of infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix race condition, which cause that some queries may not appear in query\_log after `SYSTEM FLUSH LOGS` query. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed `heap-use-after-free` ASan warning in ClusterCopier caused by watch which try to use already removed copier object. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed wrong `StringRef` pointer returned by some implementations of `IColumn::deserializeAndInsertFromArena`. This bug affected only unit-tests. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Prevent source and intermediate array join columns of masking same name columns. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix insert and select query to MySQL engine with MySQL style identifier quoting. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Winter Zhang](https://github.com/zhang2014)) -- Now `CHECK TABLE` query can work with MergeTree engine family. It returns check status and message if any for each part (or file in case of simplier engines). Also, fix bug in fetch of a broken part. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) -- Fix SPLIT\_SHARED\_LIBRARIES runtime [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed time zone initialization when `/etc/localtime` is a relative symlink like `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- clickhouse-copier: Fix use-after free on shutdown [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) -- Updated `simdjson`. Fixed the issue that some invalid JSONs with zero bytes successfully parse. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix shutdown of SystemLogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) -- Fix hanging when condition in invalidate\_query depends on a dictionary. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) - -#### Improvement {#improvement-6} - -- Allow unresolvable addresses in cluster configuration. They will be considered unavailable and tried to resolve at every connection attempt. This is especially useful for Kubernetes. This fixes [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Close idle TCP connections (with one hour timeout by default). This is especially important for large clusters with multiple distributed tables on every server, because every server can possibly keep a connection pool to every other server, and after peak query concurrency, connections will stall. This fixes [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better quality of `topK` function. Changed the SavingSpace set behavior to remove the last element if the new element have a bigger weight. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) -- URL functions to work with domains now can work for incomplete URLs without scheme [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) -- Checksums added to the `system.parts_columns` table. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Added `Enum` data type as a synonim for `Enum8` or `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) -- Full bit transpose variant for `T64` codec. Could lead to better compression with `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) -- Condition on `startsWith` function now can uses primary key. This fixes [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) and [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) -- Allow to use `clickhouse-copier` with cross-replication cluster topology by permitting empty database name. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) -- Use `UTC` as default timezone on a system without `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` was printed and server or client refused to start. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Returned back support for floating point argument in function `quantileTiming` for backward compatibility. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show which table is missing column in error messages. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) -- Disallow run query with same query\_id by various users [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) -- More robust code for sending metrics to Graphite. It will work even during long multiple `RENAME TABLE` operation. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More informative error messages will be displayed when ThreadPool cannot schedule a task for execution. This fixes [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Inverting ngramSearch to be more intuitive [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) -- Add user parsing in HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) -- Update default value of `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) -- Added a notion of obsolete settings. The obsolete setting `allow_experimental_low_cardinality_type` can be used with no effect. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) - -#### Performance Improvement {#performance-improvement-4} - -- Increase number of streams to SELECT from Merge table for more uniform distribution of threads. Added setting `max_streams_multiplier_for_merge_tables`. This fixes [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-7} - -- Add a backward compatibility test for client-server interaction with different versions of clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) -- Test coverage information in every commit and pull request. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) -- Cooperate with address sanitizer to support our custom allocators (`Arena` and `ArenaWithFreeLists`) for better debugging of “use-after-free” errors. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) -- Switch to [LLVM libunwind implementation](https://github.com/llvm-mirror/libunwind) for C++ exception handling and for stack traces printing [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) -- Add two more warnings from -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to build ClickHouse with Memory Sanitizer. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed ubsan report about `bitTest` function in fuzz test. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Docker: added possibility to init a ClickHouse instance which requires authentication. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) -- Update librdkafka to version 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) -- Add global timeout for integration tests and disable some of them in tests code. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) -- Fix some ThreadSanitizer failures. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) -- The `--no-undefined` option forces the linker to check all external names for existence while linking. It’s very useful to track real dependencies between libraries in the split build mode. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) -- Added performance test for [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed compatibility with gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support for gcc-9. This fixes [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when libunwind can be linked incorrectly. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a few warnings found by PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added initial support for `clang-tidy` static analyzer. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Convert BSD/Linux endian macros( ‘be64toh’ and ‘htobe64’) to the Mac OS X equivalents [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) -- Improved integration tests guide. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixing build at macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) -- Fix a hard-to-spot typo: aggreAGte -\> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) -- Fix freebsd build [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) -- Add link to experimental YouTube channel to website [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) -- CMake: add option for coverage flags: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) -- Fix initial size of some inline PODArray’s. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) -- clickhouse-server.postinst: fix os detection for centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) -- Added Arch linux package generation. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Split Common/config.h by libs (dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) -- Fixes for “Arcadia” build platform [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) -- Fixes for unconventional build (gcc9, no submodules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) -- Require explicit type in unalignedStore because it was proven to be bug-prone [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) -- Fixes MacOS build [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) -- Performance test concerning the new JIT feature with bigger dataset, as requested here [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Run stateful tests in stress test [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-7} - -- `Kafka` is broken in this version. -- Enable `adaptive_index_granularity` = 10MB by default for new `MergeTree` tables. If you created new MergeTree tables on version 19.11+, downgrade to versions prior to 19.6 will be impossible. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) -- Removed obsolete undocumented embedded dictionaries that were used by Yandex.Metrica. The functions `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` are no longer available. If you are using these functions, write email to clickhouse-feedback@yandex-team.com. Note: at the last moment we decided to keep these functions for a while. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.10 {#clickhouse-release-19-10} - -### ClickHouse release 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} - -#### New Feature {#new-feature-7} - -- Add new column codec: `T64`. Made for (U)IntX/EnumX/Data(Time)/DecimalX columns. It should be good for columns with constant or small range values. Codec itself allows enlarge or shrink data type without re-compression. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) -- Add database engine `MySQL` that allow to view all the tables in remote MySQL server [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Winter Zhang](https://github.com/zhang2014)) -- `bitmapContains` implementation. It’s 2x faster than `bitmapHasAny` if the second bitmap contains one element. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) -- Support for `crc32` function (with behaviour exactly as in MySQL or PHP). Do not use it if you need a hash function. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) -- Implemented `SYSTEM START/STOP DISTRIBUTED SENDS` queries to control asynchronous inserts into `Distributed` tables. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Winter Zhang](https://github.com/zhang2014)) - -#### Bug Fix {#bug-fix-22} - -- Ignore query execution limits and max parts size for merge limits while executing mutations. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug which may lead to deduplication of normal blocks (extremely rare) and insertion of duplicate blocks (more often). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) -- Fix of function `arrayEnumerateUniqRanked` for arguments with empty arrays [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) -- Don’t subscribe to Kafka topics without intent to poll any messages. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) -- Make setting `join_use_nulls` get no effect for types that cannot be inside Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed `Incorrect size of index granularity` errors [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) -- Fix Float to Decimal convert overflow [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) -- Flush buffer when `WriteBufferFromHDFS`’s destructor is called. This fixes writing into `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) - -#### Improvement {#improvement-7} - -- Treat empty cells in `CSV` as default values when the setting `input_format_defaults_for_omitted_fields` is enabled. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) -- Non-blocking loading of external dictionaries. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) -- Network timeouts can be dynamically changed for already established connections according to the settings. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) -- Using “public\_suffix\_list” for functions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. It’s using a perfect hash table generated by `gperf` with a list generated from the file: https://publicsuffix.org/list/public\_suffix\_list.dat. (for example, now we recognize the domain `ac.uk` as non-significant). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Adopted `IPv6` data type in system tables; unified client info columns in `system.processes` and `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using sessions for connections with MySQL compatibility protocol. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) -- Support more `ALTER` queries `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) -- Support `` section in `clickhouse-local` config file. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) -- Allow run query with `remote` table function in `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) - -#### Performance Improvement {#performance-improvement-5} - -- Add the possibility to write the final mark at the end of MergeTree columns. It allows to avoid useless reads for keys that are out of table data range. It is enabled only if adaptive index granularity is in use. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) -- Improved performance of MergeTree tables on very slow filesystems by reducing number of `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed performance degradation in reading from MergeTree tables that was introduced in version 19.6. Fixes \#5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-8} - -- Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) -- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn’t affect each other. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) -- Remove `` and `` from performance tests [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed “select\_format” performance test for `Pretty` formats [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.9 {#clickhouse-release-19-9} - -### ClickHouse release 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} - -#### Bug Fix {#bug-fix-23} - -- Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix rare bug in checking of part with LowCardinality column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix potential infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix how ClickHouse determines default time zone as UCT instead of UTC. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix race condition, which cause that some queries may not appear in query\_log instantly after SYSTEM FLUSH LOGS query. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Added missing support for constant arguments to `evalMLModel` function. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} - -#### New Feature {#new-feature-8} - -- Print information about frozen parts in `system.parts` table. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) -- Ask client password on clickhouse-client start on tty if not set in arguments [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) -- Implement `dictGet` and `dictGetOrDefault` functions for Decimal types. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-8} - -- Debian init: Add service stop timeout [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) -- Add setting forbidden by default to create table with suspicious types for LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) -- Regression functions return model weights when not used as State in function `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) -- Rename and improve regression methods. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) -- Clearer interfaces of string searchers. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) - -#### Bug Fix {#bug-fix-24} - -- Fix potential data loss in Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) -- Fix potential infinite loop in `PrettySpace` format when called with zero columns [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix segfault with `bitmapHasAny` in scalar subquery [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) -- Fix INSERT into Distributed table with MATERIALIZED column [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) -- Fix bad alloc when truncate Join storage [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) -- In recent versions of package tzdata some of files are symlinks now. The current mechanism for detecting default timezone gets broken and gives wrong names for some timezones. Now at least we force the timezone name to the contents of TZ if provided. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Fix some extremely rare cases with MultiVolnitsky searcher when the constant needles in sum are at least 16KB long. The algorithm missed or overwrote the previous results which can lead to the incorrect result of `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) -- Fix the issue when settings for ExternalData requests couldn’t use ClickHouse settings. Also, for now, settings `date_time_input_format` and `low_cardinality_allow_in_native_format` cannot be used because of the ambiguity of names (in external data it can be interpreted as table format and in the query it can be a setting). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Remove debug logging from MySQL protocol [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Skip ZNONODE during DDL query processing [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix mix `UNION ALL` result column type. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Throw an exception on wrong integers in `dictGetT` functions instead of crash. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix wrong element\_count and load\_factor for hashed dictionary in `system.dictionaries` table. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-9} - -- Fixed build without `Brotli` HTTP compression support (`ENABLE_BROTLI=OFF` cmake variable). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) -- Include roaring.h as roaring/roaring.h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) -- Fix gcc9 warnings in hyperscan (\#line directive is evil!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) -- Fix all warnings when compiling with gcc-9. Fix some contrib issues. Fix gcc9 ICE and submit it to bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed linking with lld [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove unused specializations in dictionaries [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) -- Improvement performance tests for formatting and parsing tables for different types of files [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixes for parallel test run [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) -- Docker: use configs from clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) -- Fix compile for FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) -- Upgrade boost to 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) -- Fix build clickhouse as submodule [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) -- Improve JSONExtract performance tests [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.8 {#clickhouse-release-19-8} - -### ClickHouse release 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} - -#### New Features {#new-features} - -- Added functions to work with JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) -- Add a function basename, with a similar behaviour to a basename function, which exists in a lot of languages (`os.path.basename` in python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added `LIMIT n, m BY` or `LIMIT m OFFSET n BY` syntax to set offset of n for LIMIT BY clause. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) -- Added new data type `SimpleAggregateFunction`, which allows to have columns with light aggregation in an `AggregatingMergeTree`. This can only be used with simple functions like `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) -- Added support for non-constant arguments in function `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) -- Added functions `skewPop`, `skewSamp`, `kurtPop` and `kurtSamp` to compute for sequence skewness, sample skewness, kurtosis and sample kurtosis respectively. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) -- Support rename operation for `MaterializeView` storage. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added server which allows connecting to ClickHouse using MySQL client. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) -- Add `toDecimal*OrZero` and `toDecimal*OrNull` functions. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) -- Support Decimal types in functions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) -- Added `format` function. Formatting constant pattern (simplified Python format pattern) with the strings listed in the arguments. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) -- Added `system.detached_parts` table containing information about detached parts of `MergeTree` tables. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) -- Added `ngramSearch` function to calculate the non-symmetric difference between needle and haystack. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) -- Implementation of basic machine learning methods (stochastic linear regression and logistic regression) using aggregate functions interface. Has different strategies for updating model weights (simple gradient descent, momentum method, Nesterov method). Also supports mini-batches of custom size. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) -- Implementation of `geohashEncode` and `geohashDecode` functions. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) -- Added aggregate function `timeSeriesGroupSum`, which can aggregate different time series that sample timestamp not alignment. It will use linear interpolation between two sample timestamp and then sum time-series together. Added aggregate function `timeSeriesGroupRateSum`, which calculates the rate of time-series and then sum rates together. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Added functions `IPv4CIDRtoIPv4Range` and `IPv6CIDRtoIPv6Range` to calculate the lower and higher bounds for an IP in the subnet using a CIDR. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add a X-ClickHouse-Summary header when we send a query using HTTP with enabled setting `send_progress_in_http_headers`. Return the usual information of X-ClickHouse-Progress, with additional information like how many rows and bytes were inserted in the query. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) - -#### Improvements {#improvements} - -- Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn’t inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) -- Now `if` and `multiIf` functions don’t rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) -- `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) -- Check the time limit every (flush\_interval / poll\_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) -- Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) -- Batched version of RowRefList for ALL JOINS. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) -- clickhouse-server: more informative listen error messages. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) -- Support dictionaries in clickhouse-copier for functions in `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) -- Add new setting `kafka_commit_every_batch` to regulate Kafka committing policy. - It allows to set commit mode: after every batch of messages is handled, or after the whole block is written to the storage. It’s a trade-off between losing some messages or reading them twice in some extreme situations. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) -- Make `windowFunnel` support other Unsigned Integer Types. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) -- Allow to shadow virtual column `_table` in Merge engine. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) -- Make `sequenceMatch` aggregate functions support other unsigned Integer types [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) -- Better error messages if checksum mismatch is most likely caused by hardware failures. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Check that underlying tables support sampling for `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) -- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Improvements of MySQL Wire Protocol. Changed name of format to MySQLWire. Using RAII for calling RSA\_free. Disabling SSL if context cannot be created. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) -- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) -- Respect query settings in asynchronous INSERTs into Distributed tables. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) -- Renamed functions `leastSqr` to `simpleLinearRegression`, `LinearRegression` to `linearRegression`, `LogisticRegression` to `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Performance Improvements {#performance-improvements} - -- Parallelize processing of parts of non-replicated MergeTree tables in ALTER MODIFY query. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) -- Optimizations in regular expressions extraction. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) -- Do not add right join key column to join result if it’s used only in join on section. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) -- Freeze the Kafka buffer after first empty response. It avoids multiple invokations of `ReadBuffer::next()` for empty result in some row-parsing streams. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) -- `concat` function optimization for multiple arguments. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) -- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) -- Upgrade our LZ4 implementation with reference one to have faster decompression. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) -- Implemented MSD radix sort (based on kxsort), and partial sorting. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) - -#### Bug Fixes {#bug-fixes} - -- Fix push require columns with join [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed bug, when ClickHouse is run by systemd, the command `sudo service clickhouse-server forcerestart` was not working as expected. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) -- Fix http error codes in DataPartsExchange (interserver http server on 9009 port always returned code 200, even on errors). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) -- Fix SimpleAggregateFunction for String longer than MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) -- Fix error for `Decimal` to `Nullable(Decimal)` conversion in IN. Support other Decimal to Decimal conversions (including different scales). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed FPU clobbering in simdjson library that lead to wrong calculation of `uniqHLL` and `uniqCombined` aggregate function and math functions such as `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed handling mixed const/nonconst cases in JSON functions. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix `retention` function. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) -- Fix result type for `quantileExact` with Decimals. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Documentation {#documentation} - -- Translate documentation for `CollapsingMergeTree` to chinese. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) -- Translate some documentation about table engines to chinese. - [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) - [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) - ([never lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements} - -- Fix some sanitizer reports that show probable use-after-free.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) -- Move performance tests out of separate directories for convenience. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix incorrect performance tests. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) -- Added a tool to calculate checksums caused by bit flips to debug hardware issues. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make runner script more usable. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) -- Add small instruction how to write performance tests. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) -- Add ability to make substitutions in create, fill and drop query in performance tests [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) - -## ClickHouse release 19.7 {#clickhouse-release-19-7} - -### ClickHouse release 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} - -#### Bug Fix {#bug-fix-25} - -- Fix performance regression in some queries with JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) - -### ClickHouse release 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} - -#### New features {#new-features-1} - -- Added bitmap related functions `bitmapHasAny` and `bitmapHasAll` analogous to `hasAny` and `hasAll` functions for arrays. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) - -#### Bug Fixes {#bug-fixes-1} - -- Fix segfault on `minmax` INDEX with Null value. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) -- Mark all input columns in LIMIT BY as required output. It fixes ‘Not found column’ error in some distributed queries. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) -- Fix “Column ‘0’ already exists” error in `SELECT .. PREWHERE` on column with DEFAULT [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) -- Fix `ALTER MODIFY TTL` query on `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) -- Don’t crash the server when Kafka consumers have failed to start. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) -- Fixed bitmap functions produce wrong result. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) -- Fix element\_count for hashed dictionary (do not include duplicates) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) -- Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Do not try to convert integers in `dictGetT` functions, because it doesn’t work correctly. Throw an exception instead. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix settings in ExternalData HTTP request. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila - Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Fix segmentation fault in `bitmapHasAny` function. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn’t raise an exception if provided index does not exist. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that - did not process it, but already get list of children, will terminate the DDLWorker thread. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix INSERT into Distributed() table with MATERIALIZED column. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) - -### ClickHouse release 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} - -#### New Features {#new-features-2} - -- Allow to limit the range of a setting that can be specified by user. - These constraints can be set up in user settings profile. - [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Add a second version of the function `groupUniqArray` with an optional - `max_size` parameter that limits the size of the resulting array. This - behavior is similar to `groupArray(max_size)(x)` function. - [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- For TSVWithNames/CSVWithNames input file formats, column order can now be - determined from file header. This is controlled by - `input_format_with_names_use_header` parameter. - [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) - ([Alexander](https://github.com/Akazz)) - -#### Bug Fixes {#bug-fixes-2} - -- Crash with uncompressed\_cache + JOIN during merge (\#5197) - [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila - Kutenin](https://github.com/danlark1)) -- Segmentation fault on a clickhouse-client query to system tables. \#5066 - [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) - ([Ivan](https://github.com/abyss7)) -- Data loss on heavy load via KafkaEngine (\#4736) - [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) - ([Ivan](https://github.com/abyss7)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Performance Improvements {#performance-improvements-1} - -- Use radix sort for sorting by single numeric column in `ORDER BY` without - `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), - [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) - ([Evgenii Pravda](https://github.com/kvinty), - [alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Documentation {#documentation-1} - -- Translate documentation for some table engines to Chinese. - [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), - [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), - [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) - ([张风啸](https://github.com/AlexZFX)), - [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([never - lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-1} - -- Print UTF-8 characters properly in `clickhouse-test`. - [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add command line parameter for clickhouse-client to always load suggestion - data. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Resolve some of PVS-Studio warnings. - [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Update LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila - Kutenin](https://github.com/danlark1)) -- Add gperf to build requirements for upcoming pull request \#5030. - [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) - ([proller](https://github.com/proller)) - -## ClickHouse release 19.6 {#clickhouse-release-19-6} - -### ClickHouse release 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} - -#### Bug Fixes {#bug-fixes-3} - -- Fixed IN condition pushdown for queries from table functions `mysql` and `odbc` and corresponding table engines. This fixes \#3540 and \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix deadlock in Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) -- Allow quoted decimals in CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) -- Disallow conversion from float Inf/NaN into Decimals (throw exception). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix data race in rename query. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Winter Zhang](https://github.com/zhang2014)) -- Temporarily disable LFAlloc. Usage of LFAlloc might lead to a lot of MAP\_FAILED in allocating UncompressedCache and in a result to crashes of queries at high loaded servers. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) - -### ClickHouse release 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} - -#### New Features {#new-features-3} - -- TTL expressions for columns and tables. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) -- Added support for `brotli` compression for HTTP responses (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) -- Added new function `isValidUTF8` for checking whether a set of bytes is correctly utf-8 encoded. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) -- Add new load balancing policy `first_or_random` which sends queries to the first specified host and if it’s inaccessible send queries to random hosts of shard. Useful for cross-replication topology setups. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) - -#### Experimental Features {#experimental-features-1} - -- Add setting `index_granularity_bytes` (adaptive index granularity) for MergeTree\* tables family. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-1} - -- Added support for non-constant and negative size and length arguments for function `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable push-down to right table in left join, left table in right join, and both tables in full join. This fixes wrong JOIN results in some cases. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) -- `clickhouse-copier`: auto upload task configuration from `--task-file` option [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) -- Added typos handler for storage factory and table functions factory. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) -- Support asterisks and qualified asterisks for multiple joins without subqueries [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) -- Make missing column error message more user friendly. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvements {#performance-improvements-2} - -- Significant speedup of ASOF JOIN [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) - -#### Backward Incompatible Changes {#backward-incompatible-changes} - -- HTTP header `Query-Id` was renamed to `X-ClickHouse-Query-Id` for consistency. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) - -#### Bug Fixes {#bug-fixes-4} - -- Fixed potential null pointer dereference in `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) -- Fixed error on query with JOIN + ARRAY JOIN [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hanging on start of the server when a dictionary depends on another dictionary via a database with engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) -- Partially fix distributed\_product\_mode = local. It’s possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There’s not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix potentially wrong result for `SELECT DISTINCT` with `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-2} - -- Fixed test failures when running clickhouse-server on different host [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) -- clickhouse-test: Disable color control sequences in non tty environment. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) -- clickhouse-test: Allow use any test database (remove `test.` qualification where it possible) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) -- Fix ubsan errors [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) -- Yandex LFAlloc was added to ClickHouse to allocate MarkCache and UncompressedCache data in different ways to catch segfaults more reliable [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) -- Python util to help with backports and changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.5 {#clickhouse-release-19-5} - -### ClickHouse release 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} - -#### Bug fixes {#bug-fixes-5} - -- Fixed possible crash in bitmap\* functions [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. This error happened if LowCardinality column was the part of primary key. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Modification of retention function: If a row satisfies both the first and NTH condition, only the first satisfied condition is added to the data state. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) - -### ClickHouse release 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} - -#### Bug fixes {#bug-fixes-6} - -- Fixed type of setting `max_partitions_per_insert_block` from boolean to UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) - -### ClickHouse release 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} - -#### New Features {#new-features-4} - -- [Hyperscan](https://github.com/intel/hyperscan) multiple regular expression matching was added (functions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) -- `multiSearchFirstPosition` function was added. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) -- Implement the predefined expression filter per row for tables. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) -- A new type of data skipping indices based on bloom filters (can be used for `equal`, `in` and `like` functions). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added `ASOF JOIN` which allows to run queries that join to the most recent value known. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) -- Rewrite multiple `COMMA JOIN` to `CROSS JOIN`. Then rewrite them to `INNER JOIN` if possible. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-9} - -- `topK` and `topKWeighted` now supports custom `loadFactor` (fixes issue [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) -- Allow to use `parallel_replicas_count > 1` even for tables without sampling (the setting is simply ignored for them). In previous versions it was lead to exception. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) -- Support for `CREATE OR REPLACE VIEW`. Allow to create a view or set a new definition in a single statement. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) -- `Buffer` table engine now supports `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Add ability to start replicated table without metadata in zookeeper in `readonly` mode. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) -- Fixed flicker of progress bar in clickhouse-client. The issue was most noticeable when using `FORMAT Null` with streaming queries. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to disable functions with `hyperscan` library on per user basis to limit potentially excessive and uncontrolled resource usage. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add version number logging in all errors. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) -- Added restriction to the `multiMatch` functions which requires string size to fit into `unsigned int`. Also added the number of arguments limit to the `multiSearch` functions. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) -- Improved usage of scratch space and error handling in Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) -- Fill `system.graphite_detentions` from a table config of `*GraphiteMergeTree` engine tables. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Rename `trigramDistance` function to `ngramDistance` and add more functions with `CaseInsensitive` and `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) -- Improved data skipping indices calculation. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -#### Bug Fix {#bug-fix-26} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix crash of `JOIN` on not-nullable vs nullable column. Fix `NULLs` in right keys in `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix incorrect result in `FULL/RIGHT JOIN` with const column. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix duplicates in `GLOBAL JOIN` with asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong name qualification in `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-8} - -- Rename setting `insert_sample_with_metadata` to setting `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) -- Added setting `max_partitions_per_insert_block` (with value 100 by default). If inserted block contains larger number of partitions, an exception is thrown. Set it to 0 if you want to remove the limit (not recommended). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multi-search functions were renamed (`multiPosition` to `multiSearchAllPositions`, `multiSearch` to `multiSearchAny`, `firstMatch` to `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) - -#### Performance Improvement {#performance-improvement-6} - -- Optimize Volnitsky searcher by inlining, giving about 5-10% search improvement for queries with many needles or many similar bigrams. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) -- Fix performance issue when setting `use_uncompressed_cache` is greater than zero, which appeared when all read data contained in cache. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-10} - -- Hardening debug build: more granular memory mappings and ASLR; add memory protection for mark cache and index. This allows to find more memory stomping bugs in case when ASan and MSan cannot do it. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add support for cmake variables `ENABLE_PROTOBUF`, `ENABLE_PARQUET` and `ENABLE_BROTLI` which allows to enable/disable the above features (same as we can do for librdkafka, mysql, etc). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) -- Add ability to print process list and stacktraces of all threads if some queries are hung after test run. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) -- Add retries on `Connection loss` error in `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) -- Add freebsd build with vagrant and build with thread sanitizer to packager script. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) -- Now user asked for password for user `'default'` during installation. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) -- Suppress warning in `rdkafka` library. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow ability to build without ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Upgrade contrib boost to 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) -- Disable usage of `mremap` when compiled with Thread Sanitizer. Surprisingly enough, TSan does not intercept `mremap` (though it does intercept `mmap`, `munmap`) that leads to false positives. Fixed TSan report in stateful tests. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test checking using format schema via HTTP interface. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.4 {#clickhouse-release-19-4} - -### ClickHouse release 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} - -#### Bug Fixes {#bug-fixes-7} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-2} - -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -### ClickHouse release 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} - -#### Bug Fixes {#bug-fixes-8} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-11} - -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} - -#### Bug Fixes {#bug-fixes-9} - -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -### ClickHouse release 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} - -#### Bug Fixes {#bug-fixes-10} - -- Fixed remote queries which contain both `LIMIT BY` and `LIMIT`. Previously, if `LIMIT BY` and `LIMIT` were used for remote query, `LIMIT` could happen before `LIMIT BY`, which led to too filtered result. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) - -### ClickHouse release 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} - -#### New Features {#new-features-5} - -- Added full support for `Protobuf` format (input and output, nested data structures). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added bitmap functions with Roaring Bitmaps. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) -- Parquet format support. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) -- N-gram distance was added for fuzzy string comparison. It is similar to q-gram metrics in R language. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) -- Combine rules for graphite rollup from dedicated aggregation and retention patterns. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Added `max_execution_speed` and `max_execution_speed_bytes` to limit resource usage. Added `min_execution_speed_bytes` setting to complement the `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Winter Zhang](https://github.com/zhang2014)) -- Implemented function `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) -- Added functions `arrayEnumerateDenseRanked` and `arrayEnumerateUniqRanked` (it’s like `arrayEnumerateUniq` but allows to fine tune array depth to look inside multidimensional arrays). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-11} - -- This release also contains all bug fixes from 19.3 and 19.1. -- Fixed bug in data skipping indices: order of granules after INSERT was incorrect. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed `set` index for `Nullable` and `LowCardinality` columns. Before it, `set` index with `Nullable` or `LowCardinality` column led to error `Data type must be deserialized with multiple streams` while selecting. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Correctly set update\_time on full `executable` dictionary update. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) -- Fix broken progress bar in 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) -- Fixed inconsistent values of MemoryTracker when memory region was shrinked, in certain cases. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed undefined behaviour in ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a very rare crash with the message `mutex lock failed: Invalid argument` that could happen when a MergeTree table was dropped concurrently with a SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) -- ODBC driver compatibility with `LowCardinality` data type. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) -- FreeBSD: Fixup for `AIOcontextPool: Found io_event with unknown id 0` error. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `system.part_log` table was created regardless to configuration. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behaviour in `dictIsIn` function for cache dictionaries. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) -- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) -- Disable compile\_expressions by default until we get own `llvm` contrib and can test it with `clang` and `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) -- Prevent `std::terminate` when `invalidate_query` for `clickhouse` external dictionary source has returned wrong resultset (empty or more than one row or more than one column). Fixed issue when the `invalidate_query` was performed every five seconds regardless to the `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock when the `invalidate_query` for a dictionary with `clickhouse` source was involving `system.dictionaries` table or `Dictionaries` database (rare case). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes for CROSS JOIN with empty WHERE. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segfault in function “replicate” when constant argument is passed. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix lambda function with predicate optimizer. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Winter Zhang](https://github.com/zhang2014)) -- Multiple JOINs multiple fixes. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvements {#improvements-3} - -- Support aliases in JOIN ON section for right table columns. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) -- Result of multiple JOINs need correct result names to be used in subselects. Replace flat aliases with source names in result. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) -- Improve push-down logic for joined statements. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) - -#### Performance Improvements {#performance-improvements-3} - -- Improved heuristics of “move to PREWHERE” optimization. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Use proper lookup tables that uses HashTable’s API for 8-bit and 16-bit keys. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) -- Improved performance of string comparison. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Cleanup distributed DDL queue in a separate thread so that it doesn’t slow down the main loop that processes distributed DDL tasks. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) -- When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O\_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-12} - -- Added support for clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong `__asm__` instructions (again) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) -- Add ability to specify settings for `clickhouse-performance-test` from command line. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) -- Add dictionaries tests to integration tests. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) -- Added queries from the benchmark on the website to automated performance tests. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `xxhash.h` does not exist in external lz4 because it is an implementation detail and its symbols are namespaced with `XXH_NAMESPACE` macro. When lz4 is external, xxHash has to be external too, and the dependents have to link to it. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) -- Fixed a case when `quantileTiming` aggregate function can be called with negative or floating point argument (this fixes fuzz test with undefined behaviour sanitizer). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Spelling error correction. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) -- Fix compilation on Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) -- Build fixes for FreeBSD and various unusual build configurations. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) - -## ClickHouse release 19.3 {#clickhouse-release-19-3} - -### ClickHouse release 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} - -#### Bug Fixes {#bug-fixes-12} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-13} - -- Add a way to launch clickhouse-server image from a custom user [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} - -#### Bug fixes {#bug-fixes-13} - -- Fixed error in \#3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} - -#### Bug fixes {#bug-fixes-14} - -- When there are more than 1000 threads in a thread pool, `std::terminate` may happen on thread exit. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now it’s possible to create `ReplicatedMergeTree*` tables with comments on columns without defaults and tables with columns codecs without comments and defaults. Also fix comparison of codecs. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) -- Fixed crash on JOIN with array or tuple. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in clickhouse-copier with the message `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hangup on server shutdown if distributed DDLs were used. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) -- Incorrect column numbers were printed in error message about text format parsing for columns with number greater than 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-3} - -- Fixed build with AVX enabled. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Enable extended accounting and IO accounting based on good known version instead of kernel under which it is compiled. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) -- Allow to skip setting of core\_dump.size\_limit, warning instead of throw if limit set fail. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) -- Removed the `inline` tags of `void readBinary(...)` in `Field.cpp`. Also merged redundant `namespace DB` blocks. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) - -### ClickHouse release 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} - -#### Bug fixes {#bug-fixes-15} - -- Fixed bug with large http insert queries processing. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} - -#### Improvements {#improvements-4} - -- Table index size is not accounted for memory limits when doing `ATTACH TABLE` query. Avoided the possibility that a table cannot be attached after being detached. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Slightly raised up the limit on max string and array size received from ZooKeeper. It allows to continue to work with increased size of `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` on ZooKeeper. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to repair abandoned replica even if it already has huge number of nodes in its queue. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add one required argument to `SET` index (max stored rows number). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Bug Fixes {#bug-fixes-16} - -- Fixed `WITH ROLLUP` result for group by single `LowCardinality` key. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bug in the set index (dropping a granule if it contains more than `max_rows` rows). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) -- A lot of FreeBSD build fixes. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) -- Fixed aliases substitution in queries with subquery containing same alias (issue [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-4} - -- Add ability to run `clickhouse-server` for stateless tests in docker image. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) - -### ClickHouse release 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} - -#### New Features {#new-features-6} - -- Added the `KILL MUTATION` statement that allows removing mutations that are for some reasons stuck. Added `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` fields to the `system.mutations` table for easier troubleshooting. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added aggregate function `entropy` which computes Shannon entropy. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) -- Added ability to send queries `INSERT INTO tbl VALUES (....` to server without splitting on `query` and `data` parts. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) -- Generic implementation of `arrayWithConstant` function was added. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented `NOT BETWEEN` comparison operator. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) -- Implement `sumMapFiltered` in order to be able to limit the number of keys for which values will be summed by `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Added support of `Nullable` types in `mysql` table function. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) -- Support for arbitrary constant expressions in `LIMIT` clause. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) -- Added `topKWeighted` aggregate function that takes additional argument with (unsigned integer) weight. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) -- `StorageJoin` now supports `join_any_take_last_row` setting that allows overwriting existing values of the same key. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Bird](https://github.com/amosbird) -- Added function `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `RowBinaryWithNamesAndTypes` format. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) -- Added `IPv4` and `IPv6` data types. More effective implementations of `IPv*` functions. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) -- Added function `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `Protobuf` output format. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added brotli support for HTTP interface for data import (INSERTs). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) -- Added hints while user make typo in function name or type in command line client. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) -- Added `Query-Id` to Server’s HTTP Response header. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) - -#### Experimental features {#experimental-features-2} - -- Added `minmax` and `set` data skipping indices for MergeTree table engines family. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added conversion of `CROSS JOIN` to `INNER JOIN` if possible. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-17} - -- Fixed `Not found column` for duplicate columns in `JOIN ON` section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. This bug was appeared in 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segmentation fault with `use_uncompressed_cache=1` and exception with wrong uncompressed size. This bug was appeared in 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed `Illegal instruction` error when using base64 functions on old CPUs. This error has been reproduced only when ClickHouse was compiled with gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse dictionaries now load within `clickhouse` process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Added `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Fixed segfault with `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare race condition when removing of old data parts can fail with `File not found` error. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-5} - -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Various build fixes for FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) -- Added ability to create, fill and drop tables in perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) -- Added a script to check for duplicate includes. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added ability to run queries by index in performance test. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) -- Package with debug symbols is suggested to be installed. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Refactoring of performance-test. Better logging and signals handling. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) -- Added docs to anonymized Yandex.Metrika datasets. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) -- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added docs about two datasets in s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) -- Added script which creates changelog from pull requests description. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added puppet module for Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) -- Added docs for a group of undocumented functions. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) -- ARM build fixes. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) -- Dictionary tests now able to run from `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added checking SSE and AVX instruction at start. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) -- Init script will wait server until start. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-1} - -- Removed `allow_experimental_low_cardinality_type` setting. `LowCardinality` data types are production ready. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Reduce mark cache size and uncompressed cache size accordingly to available memory amount. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) -- Added keyword `INDEX` in `CREATE TABLE` query. A column with name `index` must be quoted with backticks or double quotes: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- `sumMap` now promote result type instead of overflow. The old `sumMap` behavior can be obtained by using `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - -#### Performance Improvements {#performance-improvements-4} - -- `std::sort` replaced by `pdqsort` for queries without `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) -- Now server reuse threads from global thread pool. This affects performance in some corner cases. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-5} - -- Implemented AIO support for FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `SELECT * FROM a JOIN b USING a, b` now return `a` and `b` columns only from the left table. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) -- Allow `-C` option of client to work as `-c` option. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) -- Now option `--password` used without value requires password from stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) -- Added highlighting of unescaped metacharacters in string literals that contain `LIKE` expressions or regexps. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added cancelling of HTTP read only queries if client socket goes away. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) -- Now server reports progress to keep client connections alive. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) -- Slightly better message with reason for OPTIMIZE query with `optimize_throw_if_noop` setting enabled. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support of `--version` option for clickhouse server. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) -- Added `--help/-h` option to `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) -- Added support for scalar subqueries with aggregate function state result. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Improved server shutdown time and ALTERs waiting time. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added info about the replicated\_can\_become\_leader setting to system.replicas and add logging if the replica won’t try to become leader. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1} - -### ClickHouse release 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} - -- Fixed error `Column ... queried more than once` that may happen if the setting `asterisk_left_columns_only` is set to 1 in case of using `GLOBAL JOIN` with `SELECT *` (rare case). The issue does not exist in 19.3 and newer. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} - -This release contains exactly the same set of patches as 19.3.7. - -### ClickHouse release 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} - -This release contains exactly the same set of patches as 19.3.6. - -## ClickHouse release 19.1 {#clickhouse-release-19-1-1} - -### ClickHouse release 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} - -#### Bug fixes {#bug-fixes-18} - -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} - -#### Bug Fixes {#bug-fixes-19} - -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1-2} - -### ClickHouse release 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} - -#### Bug Fixes {#bug-fixes-20} - -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed segmentation fault with `uncompressed_cache=1` and exception with wrong uncompressed size. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed `Not found column` for duplicate columns in JOIN ON section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} - -#### New Features {#new-features-7} - -- Custom per column compression codecs for tables. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Winter Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) -- Added compression codec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) -- Allow to `ALTER` compression codecs. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) -- Added functions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` for SQL standard compatibility. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) -- Support for write in `HDFS` tables and `hdfs` table function. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) -- Added functions to search for multiple constant strings from big haystack: `multiPosition`, `multiSearch` ,`firstMatch` also with `-UTF8`, `-CaseInsensitive`, and `-CaseInsensitiveUTF8` variants. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) -- Pruning of unused shards if `SELECT` query filters by sharding key (setting `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) -- Allow `Kafka` engine to ignore some number of parsing errors per block. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) -- Added support for `CatBoost` multiclass models evaluation. Function `modelEvaluate` returns tuple with per-class raw predictions for multiclass models. `libcatboostmodel.so` should be built with [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added functions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) -- Added hashing functions `xxHash64` and `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) -- Added `gccMurmurHash` hashing function (GCC flavoured Murmur hash) which uses the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) -- Added hashing functions `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) -- Added table function `remoteSecure`. Function works as `remote`, but uses secure connection. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) - -#### Experimental features {#experimental-features-3} - -- Added multiple JOINs emulation (`allow_experimental_multiple_joins_emulation` setting). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-21} - -- Make `compiled_expression_cache_size` setting limited by default to lower memory consumption. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) -- Fix a bug that led to hangups in threads that perform ALTERs of Replicated tables and in the thread that updates configuration from ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed a race condition when executing a distributed ALTER task. The race condition led to more than one replica trying to execute the task and all replicas except one failing with a ZooKeeper error. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix a bug when `from_zk` config elements weren’t refreshed after a request to ZooKeeper timed out. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with wrong prefix for IPv4 subnet masks. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) -- Fixed crash (`std::terminate`) in rare cases when a new thread cannot be created due to exhausted resources. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug when in `remote` table function execution when wrong restrictions were used for in `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) -- Fix a leak of netlink sockets. They were placed in a pool where they were never deleted and new sockets were created at the start of a new thread when all current sockets were in use. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with closing `/proc/self/fd` directory earlier than all fds were read from `/proc` after forking `odbc-bridge` subprocess. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) -- Fixed String to UInt monotonic conversion in case of usage String in primary key. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in calculation of integer conversion function monotonicity. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segfault in `arrayEnumerateUniq`, `arrayEnumerateDense` functions in case of some invalid arguments. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix UB in StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Bird](https://github.com/amosbird)) -- Fixed segfault in functions `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: functions `round`, `floor`, `trunc`, `ceil` may return bogus result when executed on integer argument and large negative scale. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug induced by ‘kill query sync’ which leads to a core dump. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) -- Fix bug with long delay after empty replication queue. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) -- Fixed excessive memory usage in case of inserting into table with `LowCardinality` primary key. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed `LowCardinality` serialization for `Native` format in case of empty arrays. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed incorrect result while using distinct by single LowCardinality numeric column. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed specialized aggregation with LowCardinality key (in case when `compile` setting is enabled). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fix user and password forwarding for replicated tables queries. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) -- Fixed very rare race condition that can happen when listing tables in Dictionary database while reloading dictionaries. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when HAVING was used with ROLLUP or CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) -- Fixed column aliases for query with `JOIN ON` syntax and distributed tables. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in internal implementation of `quantileTDigest` (found by Artem Vakhrushev). This error never happens in ClickHouse and was relevant only for those who use ClickHouse codebase as a library directly. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-6} - -- Support for `IF NOT EXISTS` in `ALTER TABLE ADD COLUMN` statements along with `IF EXISTS` in `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) -- Function `parseDateTimeBestEffort`: support for formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` and similar. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CapnProtoInputStream` now support jagged structures. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) -- Usability improvement: added a check that server process is started from the data directory’s owner. Do not allow to start server from root if the data belongs to non-root user. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) -- Better logic of checking required columns during analysis of queries with JOINs. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) -- Decreased the number of connections in case of large number of Distributed tables in a single server. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Winter Zhang](https://github.com/zhang2014)) -- Supported totals row for `WITH TOTALS` query for ODBC driver. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) -- Allowed to use `Enum`s as integers inside if function. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) -- Added `low_cardinality_allow_in_native_format` setting. If disabled, do not use `LowCadrinality` type in `Native` format. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Removed some redundant objects from compiled expressions cache to lower memory usage. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) -- Add check that `SET send_logs_level = 'value'` query accept appropriate value. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) -- Fixed data type check in type conversion functions. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Winter Zhang](https://github.com/zhang2014)) - -#### Performance Improvements {#performance-improvements-5} - -- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn’t support it. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) -- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn’t contain time. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Performance improvement for integer numbers serialization. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) -- Zero left padding PODArray so that -1 element is always valid and zeroed. It’s used for branchless calculation of offsets. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) -- Reverted `jemalloc` version which lead to performance degradation. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-2} - -- Removed undocumented feature `ALTER MODIFY PRIMARY KEY` because it was superseded by the `ALTER MODIFY ORDER BY` command. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) -- Removed function `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Forbid using scalar subqueries with result of type `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-6} - -- Added support for PowerPC (`ppc64le`) build. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) -- Stateful functional tests are run on public available dataset. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when the server cannot start with the `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message within Docker or systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Updated `rdkafka` library to v1.0.0-RC5. Used cppkafka instead of raw C interface. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) -- Updated `mariadb-client` library. Fixed one of issues found by UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Some fixes for UBSan builds. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added per-commit runs of tests with UBSan build. -- Added per-commit runs of PVS-Studio static analyzer. -- Fixed bugs found by PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed glibc compatibility issues. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move Docker images to 18.10 and add compatibility file for glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) -- Add env variable if user don’t want to chown directories in server Docker image. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) -- Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a few more warnings that are available only in clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) -- Added sanitizer variables for test images. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) -- `clickhouse-server` debian package will recommend `libcap2-bin` package to use `setcap` tool for setting capabilities. This is optional. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Improved compilation time, fixed includes. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) -- Added performance tests for hash functions. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) -- Fixed cyclic library dependences. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) -- Improved compilation with low available memory. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) -- Added test script to reproduce performance degradation in `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed misspells in comments and string literals under `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) -- Fixed typos in comments. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) - -## [Changelog for 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) diff --git a/docs/fa/changelog/index.md b/docs/fa/changelog/index.md deleted file mode 100644 index 1a89e03c333..00000000000 --- a/docs/fa/changelog/index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -en_copy: true ---- - -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/fa/commercial/cloud.md b/docs/fa/commercial/cloud.md index f096bdb92cf..511d07f4583 100644 --- a/docs/fa/commercial/cloud.md +++ b/docs/fa/commercial/cloud.md @@ -1,20 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 --- -# ClickHouse Cloud Service Providers {#clickhouse-cloud-service-providers} +# ClickHouse ابر ارائه دهندگان خدمات {#clickhouse-cloud-service-providers} -!!! info "Info" - If you have launched a public cloud with managed ClickHouse service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) adding it to the following list. +!!! info "اطلاعات" + اگر شما راه اندازی یک ابر عمومی با مدیریت سرویس خانه کلیک, احساس رایگان به [درخواست کشش را باز کنید](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) اضافه کردن به لیست زیر. -## Yandex Cloud {#yandex-cloud} +## ابر یاندکس {#yandex-cloud} -[Yandex Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) provides the following key features: +[سرویس مدیریت یاندکس برای کلیک](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) ویژگی های کلیدی زیر را فراهم می کند: -- Fully managed ZooKeeper service for [ClickHouse replication](../operations/table_engines/replication.md) -- Multiple storage type choices -- Replicas in different availability zones -- Encryption and isolation -- Automated maintenance +- خدمات باغ وحش به طور کامل مدیریت برای [تکرار کلیک](../engines/table_engines/mergetree_family/replication.md) +- های متعدد ذخیره سازی انتخاب نوع +- کپی در مناطق مختلف در دسترس بودن +- رمزگذاری و جداسازی +- تعمیر و نگهداری خودکار -{## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##} +{## [مقاله اصلی](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/fa/commercial/index.md b/docs/fa/commercial/index.md new file mode 100644 index 00000000000..71bc1afac05 --- /dev/null +++ b/docs/fa/commercial/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Commercial +toc_priority: 70 +--- + + diff --git a/docs/fa/data_types/array.md b/docs/fa/data_types/array.md deleted file mode 100644 index d134005c255..00000000000 --- a/docs/fa/data_types/array.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# Array(T) {#arrayt} - -آرایه ای از عناصر با تایپ T. تایپ T می تواند هر Type باشد، از جمله یک آرایه. ما توصیه به استفاده از آرایه های multidimensional نمی کنیم، چون آنها به خوبی پشتیبانی نمی شوند (برای مثال، شما نمی تونید در جداولی که موتور آنها MergeTree است، آرایه های multidimensional ذخیره سازی کنید). - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/array/) diff --git a/docs/fa/data_types/boolean.md b/docs/fa/data_types/boolean.md deleted file mode 100644 index 4b4338f6313..00000000000 --- a/docs/fa/data_types/boolean.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# مقادیر Boolean {#mqdyr-boolean} - -type مخصوص مقادیر boolean وجود ندارد. از Uint8 و محدود شده به 0 و 1 می توان استفاده کرد. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/boolean/) diff --git a/docs/fa/data_types/date.md b/docs/fa/data_types/date.md deleted file mode 100644 index 6daf1574b6d..00000000000 --- a/docs/fa/data_types/date.md +++ /dev/null @@ -1,11 +0,0 @@ -
    - -# Date {#date} - -Date، دو بایت به ازای هر تاریخ که به صورت عددی و از تاریخ 01-01-1970 می باشد ذخیره می کند (unsigned).این type به شما اجازه ی ذخیره سازی تاریخ های از ابتدای Unix Epoch تا بالاترین مقدار قابل پشتیبانی توسط این استاندارد را می دهد (در حال حاضر بالاترین مقدار این روش سال 2106 می باشد، اما سال آخری که به طور کامل پشتیبانی می شود سال 2105 است). کمترین مقدار این type در خروجی 0000-00-00 می باشد. - -Date بدون time zone ذخیره می شود. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/date/) diff --git a/docs/fa/data_types/datetime.md b/docs/fa/data_types/datetime.md deleted file mode 100644 index 872c633948c..00000000000 --- a/docs/fa/data_types/datetime.md +++ /dev/null @@ -1,17 +0,0 @@ -
    - -# DateTime {#data_type-datetime} - -تاریخ با ساعت 4 بایت به صورت Unix timestamp ذخیره می کند (unsigned). به شما اجازه ی ذخیره سازی در محدوده ی تایپ Date را می دهد. حداقل مقدار در خروجی 0000-00-00 00:00:00 می باشد. زمان با دقت تا یک ثانیه ذخیره می شود. - -## Time zones {#time-zones} - -این type از text به باینری تبدیل می شود، و در هنگام برگشت با توجه به time zone سرور، در زمانی که کلاینت یا سرور شروع به کار می کند تبدیل می شود. در فرمت text، اطلاعات DST از دست می رود. - -به صورت پیش فرض زمانی که کلاینت به سرور کانکت می شود، timezone آن به تنظیم سرور تغییر می کند. شما می توانیید این رفتار را با فعال سازی گزینه `--use_client_time_zone` در محیط command-line تغییر دهید. - -پس در هنگام کار با تاریخ متنی (برای مثال زمانی که دامپ text می گیرید)، به خاطر داشته باشید که ممکن است به دلیل تغییرات DST، نتایج مبهمی در خروجی ببینید، و ممکن است در صورت تغییر time zone مشکلی با مطابقت خروجی با داده ها وجود داشته باشد. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/datetime/) diff --git a/docs/fa/data_types/datetime64.md b/docs/fa/data_types/datetime64.md deleted file mode 100644 index e28390bbdd4..00000000000 --- a/docs/fa/data_types/datetime64.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -en_copy: true ---- - -# DateTime64 {#data_type-datetime64} - -Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision - -Tick size (precision): 10-precision seconds - -Syntax: - -``` sql -DateTime64(precision, [timezone]) -``` - -Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](datetime.md). - -## Examples {#examples} - -**1.** Creating a table with `DateTime64`-type column and inserting data into it: - -``` sql -CREATE TABLE dt -( - `timestamp` DateTime64(3, 'Europe/Moscow'), - `event_id` UInt8 -) -ENGINE = TinyLog -``` - -``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) -``` - -``` sql -SELECT * FROM dt -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 03:00:00.000 │ 1 │ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'` -- When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. - -**2.** Filtering on `DateTime64` values - -``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically - -**3.** Getting a time zone for a `DateTime64`-type value: - -``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x -``` - -``` text -┌──────────────────column─┬─x──────────────────────────────┐ -│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ -└─────────────────────────┴────────────────────────────────┘ -``` - -**4.** Timezone conversion - -``` sql -SELECT -toDateTime64(timestamp, 3, 'Europe/London') as lon_time, -toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt -``` - -``` text -┌───────────────lon_time──┬────────────────mos_time─┐ -│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ -│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ -└─────────────────────────┴─────────────────────────┘ -``` - -## See Also {#see-also} - -- [Type conversion functions](../query_language/functions/type_conversion_functions.md) -- [Functions for working with dates and times](../query_language/functions/date_time_functions.md) -- [Functions for working with arrays](../query_language/functions/array_functions.md) -- [The `date_time_input_format` setting](../operations/settings/settings.md#settings-date_time_input_format) -- [The `timezone` server configuration parameter](../operations/server_settings/settings.md#server_settings-timezone) -- [Operators for working with dates and times](../query_language/operators.md#operators-datetime) -- [`Date` data type](date.md) -- [`DateTime` data type](datetime.md) diff --git a/docs/fa/data_types/decimal.md b/docs/fa/data_types/decimal.md deleted file mode 100644 index d960675a310..00000000000 --- a/docs/fa/data_types/decimal.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -en_copy: true ---- - -# Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} - -Signed fixed-point numbers that keep precision during add, subtract and multiply operations. For division least significant digits are discarded (not rounded). - -## Parameters {#parameters} - -- P - precision. Valid range: \[ 1 : 38 \]. Determines how many decimal digits number can have (including fraction). -- S - scale. Valid range: \[ 0 : P \]. Determines how many decimal digits fraction can have. - -Depending on P parameter value Decimal(P, S) is a synonym for: -- P from \[ 1 : 9 \] - for Decimal32(S) -- P from \[ 10 : 18 \] - for Decimal64(S) -- P from \[ 19 : 38 \] - for Decimal128(S) - -## Decimal value ranges {#decimal-value-ranges} - -- Decimal32(S) - ( -1 \* 10^(9 - S), 1 \* 10^(9 - S) ) -- Decimal64(S) - ( -1 \* 10^(18 - S), 1 \* 10^(18 - S) ) -- Decimal128(S) - ( -1 \* 10^(38 - S), 1 \* 10^(38 - S) ) - -For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 with 0.0001 step. - -## Internal representation {#internal-representation} - -Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string. - -Because modern CPU’s do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64. - -## Operations and result type {#operations-and-result-type} - -Binary operations on Decimal result in wider result type (with any order of arguments). - -- Decimal64(S1) Decimal32(S2) -\> Decimal64(S) -- Decimal128(S1) Decimal32(S2) -\> Decimal128(S) -- Decimal128(S1) Decimal64(S2) -\> Decimal128(S) - -Rules for scale: - -- add, subtract: S = max(S1, S2). -- multuply: S = S1 + S2. -- divide: S = S1. - -For similar operations between Decimal and integers, the result is Decimal of the same size as an argument. - -Operations between Decimal and Float32/Float64 are not defined. If you need them, you can explicitly cast one of argument using toDecimal32, toDecimal64, toDecimal128 or toFloat32, toFloat64 builtins. Keep in mind that the result will lose precision and type conversion is a computationally expensive operation. - -Some functions on Decimal return result as Float64 (for example, var or stddev). Intermediate calculations might still be performed in Decimal, which might lead to different results between Float64 and Decimal inputs with the same values. - -## Overflow checks {#overflow-checks} - -During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception. - -``` sql -SELECT toDecimal32(2, 4) AS x, x / 3 -``` - -``` text -┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ -│ 2.0000 │ 0.6666 │ -└────────┴──────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32(4.2, 8) AS x, x * x -``` - -``` text -DB::Exception: Scale is out of bounds. -``` - -``` sql -SELECT toDecimal32(4.2, 8) AS x, 6 * x -``` - -``` text -DB::Exception: Decimal math overflow. -``` - -Overflow checks lead to operations slowdown. If it is known that overflows are not possible, it makes sense to disable checks using `decimal_check_overflow` setting. When checks are disabled and overflow happens, the result will be incorrect: - -``` sql -SET decimal_check_overflow = 0; -SELECT toDecimal32(4.2, 8) AS x, 6 * x -``` - -``` text -┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ -│ 4.20000000 │ -17.74967296 │ -└────────────┴──────────────────────────────────┘ -``` - -Overflow checks happen not only on arithmetic operations but also on value comparison: - -``` sql -SELECT toDecimal32(1, 8) < 100 -``` - -``` text -DB::Exception: Can't compare. -``` - -[Original article](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/fa/data_types/domains/overview.md b/docs/fa/data_types/domains/overview.md deleted file mode 100644 index a131f8f6f3e..00000000000 --- a/docs/fa/data_types/domains/overview.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -en_copy: true ---- - -# Domains {#domains} - -Domains are special-purpose types that add some extra features atop of existing base type, but leaving on-wire and on-disc format of the underlying data type intact. At the moment, ClickHouse does not support user-defined domains. - -You can use domains anywhere corresponding base type can be used, for example: - -- Create a column of a domain type -- Read/write values from/to domain column -- Use it as an index if a base type can be used as an index -- Call functions with values of domain column - -### Extra Features of Domains {#extra-features-of-domains} - -- Explicit column type name in `SHOW CREATE TABLE` or `DESCRIBE TABLE` -- Input from human-friendly format with `INSERT INTO domain_table(domain_column) VALUES(...)` -- Output to human-friendly format for `SELECT domain_column FROM domain_table` -- Loading data from an external source in the human-friendly format: `INSERT INTO domain_table FORMAT CSV ...` - -### Limitations {#limitations} - -- Can’t convert index column of base type to domain type via `ALTER TABLE`. -- Can’t implicitly convert string values into domain values when inserting data from another column or table. -- Domain adds no constrains on stored values. - -[Original article](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/fa/data_types/enum.md b/docs/fa/data_types/enum.md deleted file mode 100644 index 80e5bdba7a5..00000000000 --- a/docs/fa/data_types/enum.md +++ /dev/null @@ -1,33 +0,0 @@ -
    - -# Enum {#enum} - -Enum8 یا Enum16، به شما اجازه ی ذخیره سازی مجموعه ای محدود از رشته ها را می دهد که کارآمادتر از String می باشد. - -مثال: - -
    - - Enum8('hello' = 1, 'world' = 2) - -
    - -- مقدار داخل این ستون می تواند یکی از دو مقدار ‘hello’ یا ‘world’ باشد. - -هرکدام از مقادیر یک عدد در محدوده ی `-128 ... 127` برتی `Enum8` و در محدوده ی `-32768 ... 32767` برای `Enum16` می باشد. تمام رشته ها و اعداد باید متفاوت باشند. رشته ی خالی مجاز است. اگر این type مشخص شده باشد (در تعریف جدول)، اعداد می توانند به صورت دلخواه مرتب شوند. با این حال، ترتیب در اینجا مهم نیست. - -در RAM، این type مشابه `Int8` یا `Int16` ذخیره می شود. هنگام خواندن در فرم متنی، ClickHouse مقدار را به صورت String پارس می کند و رشته ی مربوطه را در مقادیر Enum جستجو می کند. اگر رشته را پیدا کند یک expection پرتاب می شود. در هنگام خواندن در فرمت text، رشته خواند می شود و مقدار عددی مربوطه مورد بررسی قرار می گیرد. اگر مقدار پیدا نشود expection پرتاب می شود. هنگام نوشتن، مقدار با رشته ی مربوط نوشته می شود. اگر داده ی ستون دارای garbage باشد، (اعدادی که از مجموعه ی معتبرنیستند)، یک expection پرتاب می شود. هنگام خواندن و نوشتن به صورت باینری، این type شبیه به Int8 و Int16 کار می کند. The implicit default value is the value with the lowest number. - -در حین `ORDER BY`, `GROUP BY`, `IN`, `DISTINCT` و…، این type رفتاری مشابه با اعداد دارد. برای مثال ORDER BY به صورت عددی اینها رو مرتب می کند.اپراتور های مقایسه ای و مساوی عمل مشابهی در Enums دارند. - -مقادیر Enum نمیتوانند با اعداد مقایسه شوند. مقادیر Enum را می توان با رشته ی ثابت مقایسه کرد. اگر رشته ی مقایسه، مقدار معتبری برای مقایس به Enum نداشته باشد، یک expetion رخ می دهد. اپراتور IN در Enum پشتیبانی می شود؛ به این صورت که ستون enum در سمت چپ و مجموعه از رشته ها در سمت راست قرار می گیرند. رشته ها مقادیر مربوط به Enum هستند. - -خیلی از اپراتورهای عددی و رشته ای برای مقادیر Enum تعریف نشده اند. برای مثال جمع یک عدد با Enum یا کانکت کردن یک رشته با Enum. با این حال Enum دارای تابعی به نام `toString` برای برگشت داده به صورت رشته می باشد. - -همچنین مقادیر Enum با استفاده از تابع `toT` قابل تبدیل به type های عددی هستند. جایی که T یک type عددی است. زمانی که T به نوع عددی Enum مربوط می شود، این تبدیل هیچ هزینه ای ندارد. اگر فقط مجموعه از مقادیر تغییر کند، Enum بدون هیچ هزینه ای می تواند ALTER شود. اضافه یا حذف کردن عضوهای Enum با استفاده از Alter ممکن است (حذف یک عضو به شرطی که تا الان در جدول از آن استفاده نشده باشد امن است). به عنوان یک محافظ، تغییر مقدار عددی که قبلا توسط اعضای Enum تعریف شده است منجر به expection می شود. - -استفاده از ALTER برای تبدیل Enum8 به Enum16 یا برعکس، ممکن است، دقیقا شبیه به Int8 به Int16. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/enum/) diff --git a/docs/fa/data_types/fixedstring.md b/docs/fa/data_types/fixedstring.md deleted file mode 100644 index e89b4fb0ad5..00000000000 --- a/docs/fa/data_types/fixedstring.md +++ /dev/null @@ -1,11 +0,0 @@ -
    - -# FixedString(N) {#fixedstringn} - -یک رشته با طول ثابت N بایت (fixed-length) (نه تعداد کاراکتر یا code point). N باید یک عدد طبیعی مثبت باشد. هنگامی که سرور رشته ای با اندازه ی کمتر از N میخواند (مثل زمان پارس کردن برای INSERT داده ها)، سمت راست رشته به اندازه فضای خالی باقی مانده به بایت، null درج می شود. زمانی که سرور رشته ای بزرگتر از N میخواند، پیغام خطا بر میگردد. زمانی که سرور یک رشته با طول ثابت را می نویسد (مثلا در زمانی که خروجی یک SELECT را برمیگرداند)، بایت های null از انتهای رشته حذف نمی شوند، و در خروجی می آیند. توجه داشته باشید که این رفتار متفاوت از رفتار MySQL برای Char می باشد (زمانی که رشته با space پر می شود و در خروجی space ها حذف می شود). - -توابع کمتری نسبت به String برای FixedString(N) وجود دارد، و برای استفاده کمتر مناسب است. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/fixedstring/) diff --git a/docs/fa/data_types/float.md b/docs/fa/data_types/float.md deleted file mode 100644 index a9646b9dc3d..00000000000 --- a/docs/fa/data_types/float.md +++ /dev/null @@ -1,82 +0,0 @@ -
    - -# Float32, Float64 {#float32-float64} - -[اعداد Float](https://en.wikipedia.org/wiki/IEEE_754). - -Type های float در ClickHouse مشابه C می باشد: - -- `Float32` - `float` -- `Float64` - `double` - -توصیه می کنیم که داده ها را هرزمان که امکان پذیره است به جای float به صورت int ذخیره کنید. برای مثال: تبدیل دقت اعداد به یک مقدار int، مثل سرعت page load در قالب میلی ثانیه. - -## استفاده از اعداد Float {#stfdh-z-dd-float} - -- محاسبات با اعداد با Float ممکن است خطای round شدن را ایجاد کنند. - -
    - -``` sql -SELECT 1 - 0.9 -``` - - ┌───────minus(1, 0.9)─┐ - │ 0.09999999999999998 │ - └─────────────────────┘ - -
    - -- نتایج محاسبات بسته به متد محاسباتی می باشد (نوع processor و معماری سیستم). -- محاسبات Float ممکن اسن نتایجی مثل infinity (`inf`) و «Not-a-number» (`Nan`) داشته باشد. این در هنگام پردازش نتایج محاسبات باید مورد توجه قرار گیرد. -- هنگام خواندن اعداد float از سطر ها، نتایج ممکن است نزدیک به اعداد machine-representable نباشد. - -## NaN و Inf {#data_type-float-nan-inf} - -در مقابل استاندارد SQL، ClickHouse از موارد زیر مربوط به اعداد float پشتیبانی می کند: - -- `Inf` – Infinity. - -
    - -``` sql -SELECT 0.5 / 0 -``` - - ┌─divide(0.5, 0)─┐ - │ inf │ - └────────────────┘ - -
    - -- `-Inf` – Negative infinity. - -
    - -``` sql -SELECT -0.5 / 0 -``` - - ┌─divide(-0.5, 0)─┐ - │ -inf │ - └─────────────────┘ - -
    - -- `NaN` – Not a number. - -
    - - SELECT 0 / 0 - - ┌─divide(0, 0)─┐ - │ nan │ - └──────────────┘ - -
    - -قوانین مربوط به مرتب سازی `Nan` را در بخش [ORDER BY clause](../query_language/select.md) ببینید. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/float/) diff --git a/docs/fa/data_types/index.md b/docs/fa/data_types/index.md deleted file mode 100644 index e6a0c7edad3..00000000000 --- a/docs/fa/data_types/index.md +++ /dev/null @@ -1,11 +0,0 @@ -
    - -# Data types {#data_types} - -ClickHouse قابلیت ذخیره سازی انواع type های مختلف برای ذخیره داده ها در جداول را دارا می باشد. - -این بخش انواع data type های قابل پشتیبانی در ClickHouse را شرح می دهد، همچنین ملاحطات آنها در هنگام استفاده آنها را شرح می دهد. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/) diff --git a/docs/fa/data_types/int_uint.md b/docs/fa/data_types/int_uint.md deleted file mode 100644 index caa0204a9e8..00000000000 --- a/docs/fa/data_types/int_uint.md +++ /dev/null @@ -1,23 +0,0 @@ -
    - -# UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} - -اعداد با طول مشخص (Fixed-length) با یا بدون sign - -## محدوده ی Int {#mhdwdh-y-int} - -- Int8 - \[-128 : 127\] -- Int16 - \[-32768 : 32767\] -- Int32 - \[-2147483648 : 2147483647\] -- Int64 - \[-9223372036854775808 : 9223372036854775807\] - -## محدوده ی Uint {#mhdwdh-y-uint} - -- UInt8 - \[0 : 255\] -- UInt16 - \[0 : 65535\] -- UInt32 - \[0 : 4294967295\] -- UInt64 - \[0 : 18446744073709551615\] - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/int_uint/) diff --git a/docs/fa/data_types/nested_data_structures/aggregatefunction.md b/docs/fa/data_types/nested_data_structures/aggregatefunction.md deleted file mode 100644 index c3de6f586fd..00000000000 --- a/docs/fa/data_types/nested_data_structures/aggregatefunction.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# AggregateFunction(name, types\_of\_arguments…) {#aggregatefunctionname-types-of-arguments} - -حالت متوسط از توابع aggregate. برای دریافت آن، از توابع aggregate به همراه پسوند ‘-State’ استفاده کنید. برای اطلاعات بیشتر قسمت «AggregatingMergeTree» را ببینید. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/fa/data_types/nested_data_structures/index.md b/docs/fa/data_types/nested_data_structures/index.md deleted file mode 100644 index 419af3b9294..00000000000 --- a/docs/fa/data_types/nested_data_structures/index.md +++ /dev/null @@ -1,7 +0,0 @@ -
    - -# Nested data structures {#nested-data-structures} - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/nested_data_structures/) diff --git a/docs/fa/data_types/nested_data_structures/nested.md b/docs/fa/data_types/nested_data_structures/nested.md deleted file mode 100644 index dc1b1ffefab..00000000000 --- a/docs/fa/data_types/nested_data_structures/nested.md +++ /dev/null @@ -1,111 +0,0 @@ -
    - -# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} - -ساختار داده ی nested شبیه به یک جدول nested می باشد.پارامترهای ساختار داده nested، نام ستون ها و type های آنها مشابه دستور CREATE، مشخص می شود. هر سطر از جدول می تواند به هر تعداد سطر در ساختار داده nested مربوط شود. - -مثال: - -
    - -``` sql -CREATE TABLE test.visits -( - CounterID UInt32, - StartDate Date, - Sign Int8, - IsNew UInt8, - VisitID UInt64, - UserID UInt64, - ... - Goals Nested - ( - ID UInt32, - Serial UInt32, - EventTime DateTime, - Price Int64, - OrderID String, - CurrencyID UInt32 - ), - ... -) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) -``` - -
    - -این مثال `Goals` را به عنوان یک ساختار داده nested تعریف می کند، که می تواند شامل داده های مربوط به conversion (اهداف رسیده) باشد. هر سطر در جدول `visit` می تواند با صفر یا چند coversion ارتباط داشته باشد. - -فقط تا یک لول از nested پشتیبانی می شود. ستون های nested دارای آرایه، با آرایه های multidimensional یکسان هستند، پس محدودیت در پشتیبانی دارند (جداول با موتور MergeTree از ستون های multidimensional پشتیبانی نمی کنند). - -در بیشتر موارد، هنگام کار با ساختار داده ی nested، ستون های آن مشخص شده اند. برای این کار، نام ستون ها با استفاده از دات جدا می شوند. این ستون ها آرایه ای از انواع type ها تشکیل می دهند. تمام آرایه های یک ساختار داده ی nested دارای طول ثابت هستند. - -مثال: - -
    - -``` sql -SELECT - Goals.ID, - Goals.EventTime -FROM test.visits -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - - ┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ - │ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ - │ [1073752] │ ['2014-03-17 00:28:25'] │ - │ [1073752] │ ['2014-03-17 10:46:20'] │ - │ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ - │ [] │ [] │ - │ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ - │ [] │ [] │ - │ [] │ [] │ - │ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ - │ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ - └────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ - -
    - -ساده ترین راه برای فکر کردن به یک ساختار داده nestet این است که، یک nestet مجموعه ای از آرایه های چند ستونی با طول ثابت است. - -تنها جایی که یک دستور SELECT می تواند کل ساختار داده ی nested را به جای مشخص کردن ستون های آن قرار دهد، عبارت ARRAY JOIN است. برای اطلاعات بیشتر «ARRAY JOIN clouse» را ببینید. مثال: - -
    - -``` sql -SELECT - Goal.ID, - Goal.EventTime -FROM test.visits -ARRAY JOIN Goals AS Goal -WHERE CounterID = 101500 AND length(Goals.ID) < 5 -LIMIT 10 -``` - - ┌─Goal.ID─┬──────Goal.EventTime─┐ - │ 1073752 │ 2014-03-17 16:38:10 │ - │ 591325 │ 2014-03-17 16:38:48 │ - │ 591325 │ 2014-03-17 16:42:27 │ - │ 1073752 │ 2014-03-17 00:28:25 │ - │ 1073752 │ 2014-03-17 10:46:20 │ - │ 1073752 │ 2014-03-17 13:59:20 │ - │ 591325 │ 2014-03-17 22:17:55 │ - │ 591325 │ 2014-03-17 22:18:07 │ - │ 591325 │ 2014-03-17 22:18:51 │ - │ 1073752 │ 2014-03-17 11:37:06 │ - └─────────┴─────────────────────┘ - -
    - -شما نمیتوانید در قسمت SELECT تمام ساختار داده ی nested را قرار دهید. شما فقط می توانید ستون های فردی که هر کدام بخشی از این ساختار داده هستند را لیست کنید. - -برای INSERT، شما باید تمام ستون های nested را به صورت جدا پاس بدید. در حین درج، سیستم بررسی می کند که تمام آنها طول یکسانی داشته باشند. - -برای دستور DESCRIBE، ستون های داخل nested، به صورت جدا ستون می شوند. - -دستور ALTER برای عناصر داخل nested بسیار محدود است. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/nested_data_structures/nested/) diff --git a/docs/fa/data_types/nullable.md b/docs/fa/data_types/nullable.md deleted file mode 100644 index 60c4cfab81e..00000000000 --- a/docs/fa/data_types/nullable.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -en_copy: true ---- - -# Nullable(TypeName) {#data_type-nullable} - -Allows to store special marker ([NULL](../query_language/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that don’t have a value will store `NULL`. - -For a `TypeName`, you can’t use composite data types [Array](array.md) and [Tuple](tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`. - -A `Nullable` type field can’t be included in table indexes. - -`NULL` is the default value for any `Nullable` type, unless specified otherwise in the ClickHouse server configuration. - -## Storage features {#storage-features} - -To store `Nullable` type values in a table column, ClickHouse uses a separate file with `NULL` masks in addition to normal file with values. Entries in masks file allow ClickHouse to distinguish between `NULL` and a default value of corresponding data type for each table row. Because of an additional file, `Nullable` column consumes additional storage space compared to a similar normal one. - -!!! info "Note" - Using `Nullable` almost always negatively affects performance, keep this in mind when designing your databases. - -## Usage example {#usage-example} - -``` sql -CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog -``` - -``` sql -INSERT INTO t_null VALUES (1, NULL), (2, 3) -``` - -``` sql -SELECT x + y FROM t_null -``` - -``` text -┌─plus(x, y)─┐ -│ ᴺᵁᴸᴸ │ -│ 5 │ -└────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/fa/data_types/special_data_types/expression.md b/docs/fa/data_types/special_data_types/expression.md deleted file mode 100644 index 3c33fad95a6..00000000000 --- a/docs/fa/data_types/special_data_types/expression.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# Expression {#expression} - -برای نشان دادن توابع لامبدا در توابع high-order استفاده می شود. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/special_data_types/expression/) diff --git a/docs/fa/data_types/special_data_types/index.md b/docs/fa/data_types/special_data_types/index.md deleted file mode 100644 index a95f5b1b49c..00000000000 --- a/docs/fa/data_types/special_data_types/index.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# Special data types {#special-data-types} - -مقادیر نوع داده special، نمیتوانند در در جدول ذخیره و یا در نتایج خروجی قرار بگیرند، اما در نتایج متوسط (intermediate) یک query در حال اجرا استفاده می شوند. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/special_data_types/) diff --git a/docs/fa/data_types/special_data_types/interval.md b/docs/fa/data_types/special_data_types/interval.md deleted file mode 100644 index 8a37476579c..00000000000 --- a/docs/fa/data_types/special_data_types/interval.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -en_copy: true ---- - -# Interval {#data-type-interval} - -The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../query_language/operators.md#operator-interval) operator. - -!!! warning "Warning" - `Interval` data type values can’t be stored in tables. - -Structure: - -- Time interval as an unsigned integer value. -- Type of an interval. - -Supported interval types: - -- `SECOND` -- `MINUTE` -- `HOUR` -- `DAY` -- `WEEK` -- `MONTH` -- `QUARTER` -- `YEAR` - -For each interval type, there is a separate data type. For example, the `DAY` interval corresponds to the `IntervalDay` data type: - -``` sql -SELECT toTypeName(INTERVAL 4 DAY) -``` - -``` text -┌─toTypeName(toIntervalDay(4))─┐ -│ IntervalDay │ -└──────────────────────────────┘ -``` - -## Usage Remarks {#data-type-interval-usage-remarks} - -You can use `Interval`-type values in arithmetical operations with [Date](../../data_types/date.md) and [DateTime](../../data_types/datetime.md)-type values. For example, you can add 4 days to the current time: - -``` sql -SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY -``` - -``` text -┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ -│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ -└─────────────────────┴───────────────────────────────┘ -``` - -Intervals with different types can’t be combined. You can’t use intervals like `4 DAY 1 HOUR`. Specify intervals in units that are smaller or equal to the smallest unit of the interval, for example, the interval `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`. - -You can’t perform arithmetical operations with `Interval`-type values, but you can add intervals of different types consequently to values in `Date` or `DateTime` data types. For example: - -``` sql -SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR -``` - -``` text -┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ -│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ -└─────────────────────┴────────────────────────────────────────────────────────┘ -``` - -The following query causes an exception: - -``` sql -select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) -``` - -``` text -Received exception from server (version 19.14.1): -Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. -``` - -## See Also {#see-also} - -- [INTERVAL](../../query_language/operators.md#operator-interval) operator -- [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) type convertion functions diff --git a/docs/fa/data_types/special_data_types/nothing.md b/docs/fa/data_types/special_data_types/nothing.md deleted file mode 100644 index a7a7e5b57d5..00000000000 --- a/docs/fa/data_types/special_data_types/nothing.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -en_copy: true ---- - -# Nothing {#nothing} - -The only purpose of this data type is to represent cases where a value is not expected. So you can’t create a `Nothing` type value. - -For example, literal [NULL](../../query_language/syntax.md#null-literal) has type of `Nullable(Nothing)`. See more about [Nullable](../../data_types/nullable.md). - -The `Nothing` type can also used to denote empty arrays: - -``` sql -SELECT toTypeName(array()) -``` - -``` text -┌─toTypeName(array())─┐ -│ Array(Nothing) │ -└─────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/fa/data_types/special_data_types/set.md b/docs/fa/data_types/special_data_types/set.md deleted file mode 100644 index 1d1a1a535ae..00000000000 --- a/docs/fa/data_types/special_data_types/set.md +++ /dev/null @@ -1,9 +0,0 @@ -
    - -# Set {#set} - -برای نصف سمت راست IN استفاده می شود. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/special_data_types/set/) diff --git a/docs/fa/data_types/string.md b/docs/fa/data_types/string.md deleted file mode 100644 index 8142ab0d861..00000000000 --- a/docs/fa/data_types/string.md +++ /dev/null @@ -1,13 +0,0 @@ -
    - -# String {#string} - -String یک type برای قرار دادن رشته با طول دلخواه می باشد. این طول محدود نمی باشد. این مقدار می تواند شامل مجموعه ای دلخواه از بایت ها، از جمله null byte باشد. String type جایگزین type های VARCHAR, BLOB, CLOB و … دیگر DBMS ها می باشد. - -## Encodings {#encodings} - -ClickHouse مفهومی به نام encoding ندارد. String ها می توانند شامل مجموعه ای بایت ها باشند که با همان شکل که نوشته می شوند به همان شکل هم در خروجی دیده شوند. اگر شما نیاز به ذخیره سازی متن دارید، توصیه می کنیم از UTF-8 استفاده کنید. حداقل اگر ترمینال شما از UTF-8 (پیشنهاد شده)، استفاده می کند، شما می توانید به راحتی مقادیر خود را نوشته و بخوانید.به طور مشابه توابع خاصی برای کار با رشته های متنوع وجود دارند که تخل این فرضیه عمل می کنند که رشته شامل مجوعه ای از بایت ها می باشند که نماینده ی متن های UTF-8 هستند. برای مثال تابع ‘length’ برای محاسبه طول رشته براساس بایت است، در حالی که تابع ‘lengthUTF8’ برای محاسبه طول رشته بر اساس UNICODE می باشد. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/string/) diff --git a/docs/fa/data_types/tuple.md b/docs/fa/data_types/tuple.md deleted file mode 100644 index 3935416b7d1..00000000000 --- a/docs/fa/data_types/tuple.md +++ /dev/null @@ -1,11 +0,0 @@ -
    - -# Tuple(T1, T2, …) {#tuplet1-t2} - -Tuple ها نمیتوانند در جدول نوشته شوند (به غیر جداول Memory). آنها برای گروه بندی موقت ستون ها مورد استفاده قرار می گیرند. ستون های می توانند گروه بندی شوند اگر از عبارت In در query استفاده کنیم، و برای تعیین فرمت پارامتر های خاصی از توابع لامبدا. برای اطلاعات بیشتر «اپراتور IN» , «توابع Higher order» را ببینید. - -Tuple می توانند در خروجی نتیجه query در حال اجرا باشند. در این مورد، برای فرمت های text به غیر از JSON\*، مقادیر به صورت comma-separate داخل براکت قرار میگیرند. در فرمت های JSON\* مقادیر tuple به صورت آرایه در خروجی می آیند. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/data_types/tuple/) diff --git a/docs/fa/data_types/uuid.md b/docs/fa/data_types/uuid.md deleted file mode 100644 index 4546be19371..00000000000 --- a/docs/fa/data_types/uuid.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -en_copy: true ---- - -# UUID {#uuid-data-type} - -A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). - -The example of UUID type value is represented below: - -``` text -61f0c404-5cb3-11e7-907b-a6006ad3dba0 -``` - -If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero: - -``` text -00000000-0000-0000-0000-000000000000 -``` - -## How to generate {#how-to-generate} - -To generate the UUID value, ClickHouse provides the [generateUUIDv4](../query_language/functions/uuid_functions.md) function. - -## Usage example {#usage-example} - -**Example 1** - -This example demonstrates creating a table with the UUID type column and inserting a value into the table. - -``` sql -CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog -``` - -``` sql -INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -└──────────────────────────────────────┴───────────┘ -``` - -**Example 2** - -In this example, the UUID column value is not specified when inserting a new record. - -``` sql -INSERT INTO t_uuid (y) VALUES ('Example 2') -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ -└──────────────────────────────────────┴───────────┘ -``` - -## Restrictions {#restrictions} - -The UUID data type only supports functions which [String](string.md) data type also supports (for example, [min](../query_language/agg_functions/reference.md#agg_function-min), [max](../query_language/agg_functions/reference.md#agg_function-max), and [count](../query_language/agg_functions/reference.md#agg_function-count)). - -The UUID data type is not supported by arithmetic operations (for example, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) or aggregate functions, such as [sum](../query_language/agg_functions/reference.md#agg_function-sum) and [avg](../query_language/agg_functions/reference.md#agg_function-avg). - -[Original article](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/fa/database_engines/index.md b/docs/fa/database_engines/index.md deleted file mode 100644 index 2092982b496..00000000000 --- a/docs/fa/database_engines/index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -en_copy: true ---- - -# Database Engines {#database-engines} - -Database engines allow you to work with tables. - -By default, ClickHouse uses its native database engine, which provides configurable [table engines](../operations/table_engines/index.md) and an [SQL dialect](../query_language/syntax.md). - -You can also use the following database engines: - -- [MySQL](mysql.md) - -- [Lazy](lazy.md) - -[Original article](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/fa/database_engines/lazy.md b/docs/fa/database_engines/lazy.md deleted file mode 100644 index 45c5fd602d7..00000000000 --- a/docs/fa/database_engines/lazy.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -en_copy: true ---- - -# Lazy {#lazy} - -Keeps tables in RAM only `expiration_time_in_seconds` seconds after last access. Can be used only with \*Log tables. - -It’s optimized for storing many small \*Log tables, for which there is a long time interval between accesses. - -## Creating a Database {#creating-a-database} - - CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); - -[Original article](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/fa/database_engines/mysql.md b/docs/fa/database_engines/mysql.md deleted file mode 100644 index 0c82f5860eb..00000000000 --- a/docs/fa/database_engines/mysql.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -en_copy: true ---- - -# MySQL {#mysql} - -Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL. - -The `MySQL` database engine translate queries to the MySQL server so you can perform operations such as `SHOW TABLES` or `SHOW CREATE TABLE`. - -You cannot perform the following queries: - -- `RENAME` -- `CREATE TABLE` -- `ALTER` - -## Creating a Database {#creating-a-database} - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', 'database', 'user', 'password') -``` - -**Engine Parameters** - -- `host:port` — MySQL server address. -- `database` — Remote database name. -- `user` — MySQL user. -- `password` — User password. - -## Data Types Support {#data_types-support} - -| MySQL | ClickHouse | -|----------------------------------|---------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOAT | [Float32](../data_types/float.md) | -| DOUBLE | [Float64](../data_types/float.md) | -| DATE | [Date](../data_types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) | -| BINARY | [FixedString](../data_types/fixedstring.md) | - -All other MySQL data types are converted into [String](../data_types/string.md). - -[Nullable](../data_types/nullable.md) is supported. - -## Examples of Use {#examples-of-use} - -Table in MySQL: - -``` text -mysql> USE test; -Database changed - -mysql> CREATE TABLE `mysql_table` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `float` FLOAT NOT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from mysql_table; -+--------+-------+ -| int_id | value | -+--------+-------+ -| 1 | 2 | -+--------+-------+ -1 row in set (0,00 sec) -``` - -Database in ClickHouse, exchanging data with the MySQL server: - -``` sql -CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') -``` - -``` sql -SHOW DATABASES -``` - -``` text -┌─name─────┐ -│ default │ -│ mysql_db │ -│ system │ -└──────────┘ -``` - -``` sql -SHOW TABLES FROM mysql_db -``` - -``` text -┌─name─────────┐ -│ mysql_table │ -└──────────────┘ -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -└────────┴───────┘ -``` - -``` sql -INSERT INTO mysql_db.mysql_table VALUES (3,4) -``` - -``` sql -SELECT * FROM mysql_db.mysql_table -``` - -``` text -┌─int_id─┬─value─┐ -│ 1 │ 2 │ -│ 3 │ 4 │ -└────────┴───────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/fa/development/architecture.md b/docs/fa/development/architecture.md index dc92d425d37..2a05095c400 100644 --- a/docs/fa/development/architecture.md +++ b/docs/fa/development/architecture.md @@ -1,200 +1,204 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC \u0627\ + \u0632 \u0645\u0639\u0645\u0627\u0631\u06CC \u06A9\u0644\u06CC\u06A9" --- -# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture} +# بررسی اجمالی از معماری کلیک {#overview-of-clickhouse-architecture} -ClickHouse is a true column-oriented DBMS. Data is stored by columns and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution,” and it helps lower the cost of actual data processing. +تاتر سندرم قبل از قاعدگی ستون گرا درست است. داده ها توسط ستون ها و در طول اجرای ارریس ذخیره می شود (بردارها و یا تکه های ستون). هر زمان ممکن, عملیات در ارریس اعزام, به جای در ارزش های فردی. این است که به نام “vectorized query execution,” و این کمک می کند کاهش هزینه پردازش داده های واقعی. -> This idea is nothing new. It dates back to the `APL` programming language and its descendants: `A +`, `J`, `K`, and `Q`. Array programming is used in scientific data processing. Neither is this idea something new in relational databases: for example, it is used in the `Vectorwise` system. +> این ایده چیز جدیدی نیست. این قدمت به `APL` زبان برنامه نویسی و فرزندان خود را: `A +`, `J`, `K` و `Q`. برنامه نویسی مجموعه در پردازش داده های علمی استفاده می شود. نه این ایده چیزی جدید در پایگاه داده های رابطه ای است: مثلا در `Vectorwise` سیستم. -There are two different approaches for speeding up query processing: vectorized query execution and runtime code generation. The latter removes all indirection and dynamic dispatch. Neither of these approaches is strictly better than the other. Runtime code generation can be better when it fuses many operations, thus fully utilizing CPU execution units and the pipeline. Vectorized query execution can be less practical because it involves temporary vectors that must be written to the cache and read back. If the temporary data does not fit in the L2 cache, this becomes an issue. But vectorized query execution more easily utilizes the SIMD capabilities of the CPU. A [research paper](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) written by our friends shows that it is better to combine both approaches. ClickHouse uses vectorized query execution and has limited initial support for runtime code generation. +دو روش مختلف برای سرعت بخشیدن به پردازش پرس و جو وجود دارد: اجرای پرس و جو و تولید کد زمان اجرا. در حالت دوم حذف تمام تغییر ناپذیر و اعزام پویا. هیچ کدام از این روش ها به شدت بهتر از دیگری نیست. تولید کد زمان اجرا می تواند بهتر باشد زمانی که فیوز بسیاری از عملیات, در نتیجه به طور کامل با استفاده از واحد اعدام پردازنده و خط لوله. اجرای پرس و جو بردار می تواند کمتر عملی باشد زیرا شامل بردار موقت است که باید به حافظه پنهان نوشته شود و به عقب برگردد. اگر داده های موقت در کش ال 2 مناسب نیست, این موضوع می شود. اما اجرای پرس و جو بردار به راحتی با بهره گیری از قابلیت سیم کارت از پردازنده. یک [مقاله پژوهشی](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) نوشته شده توسط دوستان ما نشان می دهد که بهتر است به ترکیب هر دو روش. تاتر با استفاده از اجرای پرس و جو بردار و حمایت اولیه برای تولید کد زمان اجرا محدود کرده است. -## Columns {#columns} +## ستونها {#columns} -`IColumn` interface is used to represent columns in memory (actually, chunks of columns). This interface provides helper methods for the implementation of various relational operators. Almost all operations are immutable: they do not modify the original column, but create a new modified one. For example, the `IColumn :: filter` method accepts a filter byte mask. It is used for the `WHERE` and `HAVING` relational operators. Additional examples: the `IColumn :: permute` method to support `ORDER BY`, the `IColumn :: cut` method to support `LIMIT`. +`IColumn` رابط برای نشان دادن ستون ها در حافظه (در واقع تکه های ستون) استفاده می شود. این رابط فراهم می کند روش های کمکی برای اجرای اپراتورهای مختلف رابطه ای. تقریبا تمام عملیات تغییر ناپذیر است: ستون اصلی را تغییر نمی دهند اما یک تغییر جدید ایجاد می کنند. برای مثال `IColumn :: filter` روش یک ماسک بایت فیلتر می پذیرد. این برای استفاده می شود `WHERE` و `HAVING` اپراتورهای رابطه. نمونه های اضافی: `IColumn :: permute` روش پشتیبانی `ORDER BY` این `IColumn :: cut` روش پشتیبانی `LIMIT`. -Various `IColumn` implementations (`ColumnUInt8`, `ColumnString`, and so on) are responsible for the memory layout of columns. The memory layout is usually a contiguous array. For the integer type of columns, it is just one contiguous array, like `std :: vector`. For `String` and `Array` columns, it is two vectors: one for all array elements, placed contiguously, and a second one for offsets to the beginning of each array. There is also `ColumnConst` that stores just one value in memory, but looks like a column. +مختلف `IColumn` پیاده سازی (`ColumnUInt8`, `ColumnString` و به همین ترتیب) برای طرح حافظه ستون ها به عهده دارند. طرح حافظه معمولا یک مجموعه پیوسته است. برای نوع عدد صحیح ستون, این فقط یک مجموعه به هم پیوسته است, پسندیدن `std :: vector`. برای `String` و `Array` ستون ها, این دو بردار است: یکی برای همه عناصر مجموعه, به طور متناوب قرار داده شده, و یک ثانیه برای شیپور خاموشی به ابتدای هر مجموعه. همچنین وجود دارد `ColumnConst` که فروشگاه فقط یک ارزش در حافظه, اما به نظر می رسد مانند یک ستون. -## Field {#field} +## زمینه {#field} -Nevertheless, it is possible to work with individual values as well. To represent an individual value, the `Field` is used. `Field` is just a discriminated union of `UInt64`, `Int64`, `Float64`, `String` and `Array`. `IColumn` has the `operator[]` method to get the n-th value as a `Field` and the `insert` method to append a `Field` to the end of a column. These methods are not very efficient, because they require dealing with temporary `Field` objects representing an individual value. There are more efficient methods, such as `insertFrom`, `insertRangeFrom`, and so on. +با این اوصاف, ممکن است برای کار با ارزش های فردی و همچنین. برای نشان دادن ارزش فردی `Field` استفاده شده است. `Field` فقط یک اتحادیه تبعیض `UInt64`, `Int64`, `Float64`, `String` و `Array`. `IColumn` دارد `operator[]` روش برای دریافت ارزش ازت به عنوان یک `Field` و `insert` روش برای اضافه کردن یک `Field` به پایان یک ستون. این روش ها بسیار موثر نیستند زیرا نیاز به برخورد موقت دارند `Field` اشیا به نمایندگی از ارزش فردی. روش های موثر تر مانند `insertFrom`, `insertRangeFrom` و به همین ترتیب. -`Field` doesn’t have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`. +`Field` اطلاعات کافی در مورد یک نوع داده خاص برای یک جدول ندارد. به عنوان مثال, `UInt8`, `UInt16`, `UInt32` و `UInt64` همه به عنوان نمایندگی `UInt64` در یک `Field`. -## Leaky Abstractions {#leaky-abstractions} +## انتزاعی نشتی {#leaky-abstractions} -`IColumn` has methods for common relational transformations of data, but they don’t meet all needs. For example, `ColumnUInt64` doesn’t have a method to calculate the sum of two columns, and `ColumnString` doesn’t have a method to run a substring search. These countless routines are implemented outside of `IColumn`. +`IColumn` روش هایی برای تحولات رابطه ای مشترک داده ها دارد اما همه نیازها را نمی بینند. به عنوان مثال, `ColumnUInt64` یک روش برای محاسبه مجموع دو ستون ندارد و `ColumnString` یک روش برای اجرای یک جستجو زیر رشته ندارد. این روال بی شماری در خارج از اجرا `IColumn`. -Various functions on columns can be implemented in a generic, non-efficient way using `IColumn` methods to extract `Field` values, or in a specialized way using knowledge of inner memory layout of data in a specific `IColumn` implementation. It is implemented by casting functions to a specific `IColumn` type and deal with internal representation directly. For example, `ColumnUInt64` has the `getData` method that returns a reference to an internal array, then a separate routine reads or fills that array directly. We have “leaky abstractions” to allow efficient specializations of various routines. +توابع مختلف در ستون ها را می توان در یک روش عمومی و غیر موثر استفاده کرد `IColumn` مواد و روش ها برای استخراج `Field` ارزش, و یا در یک راه تخصصی با استفاده از دانش طرح حافظه داخلی از داده ها در یک خاص `IColumn` اجرا کردن. این است که توسط توابع ریخته گری به خاص اجرا شده است `IColumn` نوع و مقابله با نمایندگی داخلی به طور مستقیم. به عنوان مثال, `ColumnUInt64` دارد `getData` روشی که اشاره به مجموعه داخلی را برمی گرداند, سپس یک روال جداگانه می خواند و یا که مجموعه را پر می کند به طور مستقیم. ما “leaky abstractions” برای اجازه دادن به تخصص های موثر روال های مختلف. -## Data Types {#data_types} +## انواع داده ها {#data_types} -`IDataType` is responsible for serialization and deserialization: for reading and writing chunks of columns or individual values in binary or text form. `IDataType` directly corresponds to data types in tables. For example, there are `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` and so on. +`IDataType` مسئول سریالسازی و deserialization: برای خواندن و نوشتن تکه های ستون یا فردی مقادیر دودویی یا به صورت متن. `IDataType` به طور مستقیم به انواع داده ها در جداول مربوط. مثلا, وجود دارد `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` و به همین ترتیب. -`IDataType` and `IColumn` are only loosely related to each other. Different data types can be represented in memory by the same `IColumn` implementations. For example, `DataTypeUInt32` and `DataTypeDateTime` are both represented by `ColumnUInt32` or `ColumnConstUInt32`. In addition, the same data type can be represented by different `IColumn` implementations. For example, `DataTypeUInt8` can be represented by `ColumnUInt8` or `ColumnConstUInt8`. +`IDataType` و `IColumn` فقط شل به یکدیگر مربوط. انواع داده های مختلف را می توان در حافظه توسط همان نشان داده شده است `IColumn` پیاده سازی. به عنوان مثال, `DataTypeUInt32` و `DataTypeDateTime` هر دو توسط نمایندگی `ColumnUInt32` یا `ColumnConstUInt32`. علاوه بر این, همان نوع داده را می توان با مختلف نشان `IColumn` پیاده سازی. به عنوان مثال, `DataTypeUInt8` می توان با نمایندگی `ColumnUInt8` یا `ColumnConstUInt8`. -`IDataType` only stores metadata. For instance, `DataTypeUInt8` doesn’t store anything at all (except vptr) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings). +`IDataType` فقط فروشگاه ابرداده. به عنوان مثال, `DataTypeUInt8` هیچ چیزی را ذخیره نمی کند (به جز ویپر) و `DataTypeFixedString` فروشگاه ها فقط `N` (اندازه رشته های ثابت). -`IDataType` has helper methods for various data formats. Examples are methods to serialize a value with possible quoting, to serialize a value for JSON, and to serialize a value as part of the XML format. There is no direct correspondence to data formats. For example, the different data formats `Pretty` and `TabSeparated` can use the same `serializeTextEscaped` helper method from the `IDataType` interface. +`IDataType` دارای روش های کمکی برای فرمت های داده های مختلف. نمونه روش برای مرتب کردن یک مقدار با امکان نقل قول, برای مرتب کردن یک مقدار برای جانسون, و برای مرتب کردن یک مقدار به عنوان بخشی از فرمت میلی لیتر. هیچ مکاتبات مستقیم به فرمت های داده وجود دارد. برای مثال فرمت های داده های مختلف `Pretty` و `TabSeparated` می توانید همان استفاده کنید `serializeTextEscaped` روش کمکی از `IDataType` واسط. -## Block {#block} +## بلوک {#block} -A `Block` is a container that represents a subset (chunk) of a table in memory. It is just a set of triples: `(IColumn, IDataType, column name)`. During query execution, data is processed by `Block`s. If we have a `Block`, we have data (in the `IColumn` object), we have information about its type (in `IDataType`) that tells us how to deal with that column, and we have the column name. It could be either the original column name from the table or some artificial name assigned for getting temporary results of calculations. +A `Block` یک ظرف است که نشان دهنده یک زیر مجموعه است (تکه) از یک جدول در حافظه. این فقط مجموعه ای از سه برابر است: `(IColumn, IDataType, column name)`. در طول اجرای پرس و جو, داده ها توسط پردازش `Block`اگر ما یک `Block`, ما داده (در `IColumn` هدف), ما باید اطلاعات در مورد نوع خود (به `IDataType`) که به ما می گوید که چگونه به مقابله با این ستون, و ما باید نام ستون. این می تواند یا نام ستون اصلی از جدول و یا برخی از نام مصنوعی اختصاص داده شده برای گرفتن نتایج موقت از محاسبات. -When we calculate some function over columns in a block, we add another column with its result to the block, and we don’t touch columns for arguments of the function because operations are immutable. Later, unneeded columns can be removed from the block, but not modified. It is convenient for the elimination of common subexpressions. +هنگامی که ما برخی از تابع محاسبه بیش از ستون در یک بلوک, ما اضافه کردن ستون دیگر با نتیجه خود را به بلوک, و ما ستون برای استدلال از تابع را لمس کنید چرا که عملیات تغییر ناپذیر هستند. بعد, ستون غیر ضروری را می توان از بلوک حذف, اما اصلاح نشده. مناسب برای از بین بردن اکسپرس مشترک است. -Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared\_ptrs and column names. +بلوک برای هر تکه پردازش داده ها ایجاد شده است. توجه داشته باشید که برای همان نوع از محاسبه, نام ستون و انواع یکسان باقی می ماند برای بلوک های مختلف, و تنها ستون تغییرات داده. بهتر است داده های بلوک را از هدر بلوک تقسیم کنید زیرا اندازه های بلوک کوچک دارای سربار بالایی از رشته های موقت برای کپی کردن نام های شکسته و ستون هستند. -## Block Streams {#block-streams} +## بلوک جریان {#block-streams} -Block streams are for processing data. We use streams of blocks to read data from somewhere, perform data transformations, or write data to somewhere. `IBlockInputStream` has the `read` method to fetch the next block while available. `IBlockOutputStream` has the `write` method to push the block somewhere. +جریان بلوک برای پردازش داده ها می باشد. ما با استفاده از جریان بلوک به خواندن داده ها از جایی انجام داده تحولات و یا نوشتن داده ها به جایی. `IBlockInputStream` دارد `read` روش به بهانه بلوک بعدی در حالی که در دسترس. `IBlockOutputStream` دارد `write` روش به فشار بلوک جایی. -Streams are responsible for: +جریان ها برای: -1. Reading or writing to a table. The table just returns a stream for reading or writing blocks. -2. Implementing data formats. For example, if you want to output data to a terminal in `Pretty` format, you create a block output stream where you push blocks, and it formats them. -3. Performing data transformations. Let’s say you have `IBlockInputStream` and want to create a filtered stream. You create `FilterBlockInputStream` and initialize it with your stream. Then when you pull a block from `FilterBlockInputStream`, it pulls a block from your stream, filters it, and returns the filtered block to you. Query execution pipelines are represented this way. +1. خواندن و یا نوشتن به یک جدول. جدول فقط می گرداند یک جریان برای خواندن و یا نوشتن بلوک. +2. اجرای فرمت های داده. مثلا, اگر شما می خواهید به خروجی داده ها به یک ترمینال در `Pretty` شما یک جریان خروجی بلوک ایجاد می کنید که بلوک ها را فشار می دهید و فرمت می کند. +3. انجام تحولات داده ها. بیایید می گویند شما `IBlockInputStream` و می خواهید برای ایجاد یک جریان فیلتر شده است. شما ایجاد `FilterBlockInputStream` و با جریان خود را مقداردهی اولیه. سپس هنگامی که شما جلو و یک بلوک از `FilterBlockInputStream`, این نیش ترمزی میزند یک بلوک از جریان خود را, فیلتر, و گرداند بلوک فیلتر به شما. خطوط لوله اجرای پرس و جو در این راه نشان داده شده است. -There are more sophisticated transformations. For example, when you pull from `AggregatingBlockInputStream`, it reads all data from its source, aggregates it, and then returns a stream of aggregated data for you. Another example: `UnionBlockInputStream` accepts many input sources in the constructor and also a number of threads. It launches multiple threads and reads from multiple sources in parallel. +تحولات پیچیده تر وجود دارد. مثلا, زمانی که شما از جلو `AggregatingBlockInputStream` تمام داده ها را از منبع خود می خواند و جمع می کند و سپس جریان داده های جمع شده را برای شما باز می گرداند. مثال دیگر: `UnionBlockInputStream` می پذیرد بسیاری از منابع ورودی در سازنده و همچنین تعدادی از موضوعات. این راه اندازی موضوعات متعدد و بار خوانده شده از منابع مختلف به صورت موازی. -> Block streams use the “pull” approach to control flow: when you pull a block from the first stream, it consequently pulls the required blocks from nested streams, and the entire execution pipeline will work. Neither “pull” nor “push” is the best solution, because control flow is implicit, and that limits the implementation of various features like simultaneous execution of multiple queries (merging many pipelines together). This limitation could be overcome with coroutines or just running extra threads that wait for each other. We may have more possibilities if we make control flow explicit: if we locate the logic for passing data from one calculation unit to another outside of those calculation units. Read this [article](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) for more thoughts. +> بلوک جریان استفاده از “pull” رویکرد به کنترل جریان: هنگامی که شما یک بلوک جلو و از جریان اول, در نتیجه می کشد بلوک های مورد نیاز از جریان تو در تو, و کل خط لوله اعدام کار خواهد کرد. نه “pull” نه “push” بهترین راه حل است زیرا جریان کنترل ضمنی است و محدودیت اجرای ویژگی های مختلف مانند اجرای همزمان چندین نمایش داده شد (ادغام بسیاری از خطوط لوله با هم). این محدودیت می تواند با کروتین ها و یا فقط در حال اجرا موضوعات اضافی که برای یکدیگر صبر غلبه بر. ما ممکن است امکانات بیشتری داشته باشیم اگر جریان کنترل را صریح کنیم: اگر منطق را برای عبور داده ها از یک واحد محاسبه به خارج دیگری از این واحدهای محاسبه قرار دهیم. خواندن این [مقاله](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) برای افکار بیشتر. -We should note that the query execution pipeline creates temporary data at each step. We try to keep block size small enough so that temporary data fits in the CPU cache. With that assumption, writing and reading temporary data is almost free in comparison with other calculations. We could consider an alternative, which is to fuse many operations in the pipeline together. It could make the pipeline as short as possible and remove much of the temporary data, which could be an advantage, but it also has drawbacks. For example, a split pipeline makes it easy to implement caching intermediate data, stealing intermediate data from similar queries running at the same time, and merging pipelines for similar queries. +ما باید توجه داشته باشید که خط لوله اجرای پرس و جو ایجاد داده های موقت در هر مرحله. ما سعی می کنیم برای حفظ اندازه بلوک به اندازه کافی کوچک به طوری که داده های موقت متناسب در کش پردازنده. با این فرض, نوشتن و خواندن داده های موقت تقریبا رایگان در مقایسه با محاسبات دیگر است. ما می توانیم یک جایگزین در نظر, است که به فیوز بسیاری از عملیات در خط لوله با هم. این می تواند خط لوله به عنوان کوتاه که ممکن است و حذف بسیاری از اطلاعات موقت, که می تواند یک مزیت, اما همچنین دارای اشکالاتی. مثلا, یک خط لوله تقسیم باعث می شود به راحتی پیاده سازی ذخیره داده متوسط, سرقت اطلاعات متوسط از نمایش داده شد مشابه در حال اجرا در همان زمان, و ادغام خطوط لوله برای نمایش داده شد مشابه. -## Formats {#formats} +## فرشها {#formats} -Data formats are implemented with block streams. There are “presentational” formats only suitable for the output of data to the client, such as `Pretty` format, which provides only `IBlockOutputStream`. And there are input/output formats, such as `TabSeparated` or `JSONEachRow`. +فرمت های داده ها با جریان بلوک اجرا شده است. وجود دارد “presentational” فرمت تنها مناسب برای خروجی داده ها به مشتری مانند `Pretty` قالب, فراهم می کند که تنها `IBlockOutputStream`. و فرمت های ورودی / خروجی مانند `TabSeparated` یا `JSONEachRow`. -There are also row streams: `IRowInputStream` and `IRowOutputStream`. They allow you to pull/push data by individual rows, not by blocks. And they are only needed to simplify the implementation of row-oriented formats. The wrappers `BlockInputStreamFromRowInputStream` and `BlockOutputStreamFromRowOutputStream` allow you to convert row-oriented streams to regular block-oriented streams. +همچنین جریان ردیف وجود دارد: `IRowInputStream` و `IRowOutputStream`. اجازه می دهد شما را به جلو و/فشار داده های ردیف های فردی, نه با بلوک. و فقط برای ساده سازی اجرای فرمت های ردیف گرا نیاز دارند. لفافه `BlockInputStreamFromRowInputStream` و `BlockOutputStreamFromRowOutputStream` به شما اجازه تبدیل جریان ردیف گرا به جریان بلوک گرا به طور منظم. ## I/O {#io} -For byte-oriented input/output, there are `ReadBuffer` and `WriteBuffer` abstract classes. They are used instead of C++ `iostream`s. Don’t worry: every mature C++ project is using something other than `iostream`s for good reasons. +برای ورودی بایت گرا / خروجی, وجود دارد `ReadBuffer` و `WriteBuffer` کلاس های انتزاعی. به جای ج++استفاده می شود `iostream`نگران نباشید: هر پروژه سی++ بالغ با استفاده از چیزی غیر از `iostream`به دلایل خوب. -`ReadBuffer` and `WriteBuffer` are just a contiguous buffer and a cursor pointing to the position in that buffer. Implementations may own or not own the memory for the buffer. There is a virtual method to fill the buffer with the following data (for `ReadBuffer`) or to flush the buffer somewhere (for `WriteBuffer`). The virtual methods are rarely called. +`ReadBuffer` و `WriteBuffer` فقط یک بافر پیوسته و مکان نما با اشاره به موقعیت در بافر که. پیاده سازی ممکن است خود و یا حافظه برای بافر خود را ندارد. یک روش مجازی برای پر کردن بافر با داده های زیر وجود دارد (برای `ReadBuffer`) و یا به خیط و پیت کردن بافر جایی (برای `WriteBuffer`). روش های مجازی به ندرت نامیده می شود. -Implementations of `ReadBuffer`/`WriteBuffer` are used for working with files and file descriptors and network sockets, for implementing compression (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, and `HashingWriteBuffer` speak for themselves. +پیاده سازی از `ReadBuffer`/`WriteBuffer` برای کار با فایل ها و توصیفگر فایل ها و سوکت های شبکه برای اجرای فشرده سازی استفاده می شود (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer` و `HashingWriteBuffer` صحبت برای خود. -Read/WriteBuffers only deal with bytes. There are functions from `ReadHelpers` and `WriteHelpers` header files to help with formatting input/output. For example, there are helpers to write a number in decimal format. +خواندن / نویسنده تنها با بایت برخورد. توابع از وجود دارد `ReadHelpers` و `WriteHelpers` فایل های هدر برای کمک به قالب بندی ورودی / خروجی. برای مثال یاران به نوشتن یک شماره در قالب دهدهی وجود دارد. -Let’s look at what happens when you want to write a result set in `JSON` format to stdout. You have a result set ready to be fetched from `IBlockInputStream`. You create `WriteBufferFromFileDescriptor(STDOUT_FILENO)` to write bytes to stdout. You create `JSONRowOutputStream`, initialized with that `WriteBuffer`, to write rows in `JSON` to stdout. You create `BlockOutputStreamFromRowOutputStream` on top of it, to represent it as `IBlockOutputStream`. Then you call `copyData` to transfer data from `IBlockInputStream` to `IBlockOutputStream`, and everything works. Internally, `JSONRowOutputStream` will write various JSON delimiters and call the `IDataType::serializeTextJSON` method with a reference to `IColumn` and the row number as arguments. Consequently, `IDataType::serializeTextJSON` will call a method from `WriteHelpers.h`: for example, `writeText` for numeric types and `writeJSONString` for `DataTypeString`. +بیایید نگاه کنیم که چه اتفاقی می افتد زمانی که می خواهید نتیجه را بنویسید `JSON` قالب به چاق و چله. شما باید یک نتیجه مجموعه ای از ذهن می شود `IBlockInputStream`. شما ایجاد `WriteBufferFromFileDescriptor(STDOUT_FILENO)` برای نوشتن بایت به چاق و چله. شما ایجاد `JSONRowOutputStream`, مقداردهی اولیه با که `WriteBuffer` برای نوشتن ردیف در `JSON` به چاق و چله. شما ایجاد `BlockOutputStreamFromRowOutputStream` در بالای این, برای نشان دادن به عنوان `IBlockOutputStream`. سپس با شما تماس `copyData` برای انتقال داده ها از `IBlockInputStream` به `IBlockOutputStream`, و همه چیز کار می کند. داخلی, `JSONRowOutputStream` خواهد شمارشگر های مختلف جانسون ارسال و پاسخ `IDataType::serializeTextJSON` روش با اشاره به `IColumn` و شماره ردیف به عنوان استدلال. در نتیجه, `IDataType::serializeTextJSON` یک روش از پاسخ `WriteHelpers.h`: به عنوان مثال, `writeText` برای انواع عددی و `writeJSONString` برای `DataTypeString`. -## Tables {#tables} +## جداول {#tables} -The `IStorage` interface represents tables. Different implementations of that interface are different table engines. Examples are `StorageMergeTree`, `StorageMemory`, and so on. Instances of these classes are just tables. +این `IStorage` رابط نشان دهنده جداول. پیاده سازی های مختلف که رابط موتورهای جدول متفاوت است. مثالها عبارتند از `StorageMergeTree`, `StorageMemory` و به همین ترتیب. نمونه هایی از این کلاس ها فقط جداول. -The key `IStorage` methods are `read` and `write`. There are also `alter`, `rename`, `drop`, and so on. The `read` method accepts the following arguments: the set of columns to read from a table, the `AST` query to consider, and the desired number of streams to return. It returns one or multiple `IBlockInputStream` objects and information about the stage of data processing that was completed inside a table engine during query execution. +کلید `IStorage` روش `read` و `write`. همچنین وجود دارد `alter`, `rename`, `drop` و به همین ترتیب. این `read` روش استدلال های زیر را می پذیرد: مجموعه ای از ستون ها برای خواندن از یک جدول `AST` پرس و جو را در نظر بگیرید, و تعداد مورد نظر از جریان به بازگشت. این گرداند یک یا چند `IBlockInputStream` اشیا و اطلاعات در مورد مرحله پردازش داده ها که در داخل یک موتور جدول در طول اجرای پرس و جو تکمیل شد. -In most cases, the read method is only responsible for reading the specified columns from a table, not for any further data processing. All further data processing is done by the query interpreter and is outside the responsibility of `IStorage`. +در بیشتر موارد, روش خواندن تنها برای خواندن ستون مشخص شده از یک جدول است, نه برای هر پردازش داده ها بیشتر. تمام پردازش داده های بیشتر توسط مترجم پرس و جو انجام می شود و خارج از وظیفه است `IStorage`. -But there are notable exceptions: +اما استثنا قابل توجه وجود دارد: -- The AST query is passed to the `read` method, and the table engine can use it to derive index usage and to read fewer data from a table. -- Sometimes the table engine can process data itself to a specific stage. For example, `StorageDistributed` can send a query to remote servers, ask them to process data to a stage where data from different remote servers can be merged, and return that preprocessed data. The query interpreter then finishes processing the data. +- پرس و جو اس تی به تصویب رسید `read` روش, و موتور جدول می توانید استفاده کنید به استفاده از شاخص و به خواندن اطلاعات کمتر از یک جدول. +- گاهی موتور جدول می تواند داده های خود را به یک مرحله خاص پردازش. به عنوان مثال, `StorageDistributed` می توانید یک پرس و جو به سرور از راه دور ارسال, از او بخواهید برای پردازش داده ها به مرحله ای که داده ها را از سرور های مختلف از راه دور را می توان با هم ادغام شدند, و بازگشت که داده های پیش پردازش. مترجم پرس و جو سپس پس از اتمام پردازش داده ها. -The table’s `read` method can return multiple `IBlockInputStream` objects to allow parallel data processing. These multiple block input streams can read from a table in parallel. Then you can wrap these streams with various transformations (such as expression evaluation or filtering) that can be calculated independently and create a `UnionBlockInputStream` on top of them, to read from multiple streams in parallel. +جدول `read` روش می تواند چندین بازگشت `IBlockInputStream` اشیا اجازه می دهد تا پردازش داده های موازی. این جریان ورودی بلوک های متعدد می توانید از یک جدول به صورت موازی به عنوان خوانده شده. سپس شما می توانید این جریان با تحولات مختلف قرار دادن (مانند ارزیابی بیان و یا فیلتر) است که می تواند به طور مستقل محاسبه و ایجاد یک `UnionBlockInputStream` برای خواندن از جریانهای چندگانه به صورت موازی. -There are also `TableFunction`s. These are functions that return a temporary `IStorage` object to use in the `FROM` clause of a query. +همچنین وجود دارد `TableFunction`این توابع که بازگشت موقت هستند `IStorage` شی برای استفاده در `FROM` بند یک پرس و جو. -To get a quick idea of how to implement your table engine, look at something simple, like `StorageMemory` or `StorageTinyLog`. +برای دریافت یک ایده سریع از نحوه پیاده سازی موتور جدول خود را در چیزی ساده مانند نگاه `StorageMemory` یا `StorageTinyLog`. -> As the result of the `read` method, `IStorage` returns `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. +> به عنوان نتیجه `read` روش, `IStorage` بازگشت `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. ## Parsers {#parsers} -A hand-written recursive descent parser parses a query. For example, `ParserSelectQuery` just recursively calls the underlying parsers for various parts of the query. Parsers create an `AST`. The `AST` is represented by nodes, which are instances of `IAST`. +تجزیه کننده تبار بازگشتی دست نوشته تجزیه کننده تجزیه پرس و جو تجزیه. به عنوان مثال, `ParserSelectQuery` فقط به صورت بازگشتی تماس تجزیه کننده زمینه ای برای بخش های مختلف از پرس و جو. تجزیه کننده ایجاد یک `AST`. این `AST` توسط گره هایی که نمونه هایی از `IAST`. -> Parser generators are not used for historical reasons. +> ژنراتور تجزیه کننده به دلایل تاریخی استفاده نمی شود. -## Interpreters {#interpreters} +## مترجمین {#interpreters} -Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the INSERT query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time. +مترجمین برای ایجاد خط لوله اجرای پرس و جو از `AST`. می مفسران ساده وجود دارد, مانند `InterpreterExistsQuery` و `InterpreterDropQuery` یا پیچیده تر `InterpreterSelectQuery`. خط لوله اجرای پرس و جو ترکیبی از ورودی بلوک یا جریان خروجی است. برای مثال نتیجه تفسیر `SELECT` پرس و جو است `IBlockInputStream` برای خواندن نتیجه مجموعه ای از; نتیجه پرس و جو درج است `IBlockOutputStream` برای نوشتن داده ها برای درج به, و در نتیجه تفسیر `INSERT SELECT` پرس و جو است `IBlockInputStream` که نتیجه خالی را برمی گرداند مجموعه ای در خواندن برای اولین بار, اما نسخه داده ها از `SELECT` به `INSERT` در همان زمان. -`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations or query. +`InterpreterSelectQuery` استفاده `ExpressionAnalyzer` و `ExpressionActions` ماشین برای تجزیه و تحلیل پرس و جو و تحولات. این جایی است که اکثر بهینه سازی پرس و جو مبتنی بر قانون انجام می شود. `ExpressionAnalyzer` کاملا کثیف است و باید بازنویسی شود: تحولات پرس و جو های مختلف و بهینه سازی باید استخراج به کلاس های جداگانه اجازه می دهد تا تحولات مدولار و یا پرس و جو. -## Functions {#functions} +## توابع {#functions} -There are ordinary functions and aggregate functions. For aggregate functions, see the next section. +توابع عادی و توابع کل وجود دارد. برای توابع کل, بخش بعدی را ببینید. -Ordinary functions don’t change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s of data to implement vectorized query execution. +Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`'ثانیه از داده ها برای پیاده سازی اجرای پرس و جو بردار. -There are some miscellaneous functions, like [blockSize](../query_language/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock), and [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate), that exploit block processing and violate the independence of rows. +برخی از توابع متفرقه مانند وجود دارد [blockSize](../sql_reference/functions/other_functions.md#function-blocksize), [رفع موانع](../sql_reference/functions/other_functions.md#function-rownumberinblock) و [خرابی اجرا](../sql_reference/functions/other_functions.md#function-runningaccumulate), که بهره برداری از پردازش بلوک و نقض استقلال ردیف. -ClickHouse has strong typing, so there’s no implicit type conversion. If a function doesn’t support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function. +تاتر تایپ قوی, بنابراین هیچ تبدیل نوع ضمنی وجود دارد. اگر یک تابع یک ترکیب خاص از انواع پشتیبانی نمی کند, این می اندازد یک استثنا. اما توابع می توانند کار کنند (غیرمنتظره) برای بسیاری از ترکیبات مختلف از انواع. برای مثال `plus` تابع (برای پیاده سازی `+` اپراتور) برای هر ترکیبی از انواع عددی کار می کند: `UInt8` + `Float32`, `UInt16` + `Int8` و به همین ترتیب. همچنین, برخی از توابع مختلف می توانید هر تعداد از استدلال قبول, مانند `concat` تابع. -Implementing a function may be slightly inconvenient because a function explicitly dispatches supported data types and supported `IColumns`. For example, the `plus` function has code generated by instantiation of a C++ template for each combination of numeric types, and constant or non-constant left and right arguments. +اجرای یک تابع ممکن است کمی ناخوشایند به دلیل یک تابع به صراحت اعزام انواع داده ها پشتیبانی و پشتیبانی `IColumns`. برای مثال `plus` تابع دارای کد تولید شده توسط نمونه از یک ج++ قالب برای هر ترکیبی از انواع عددی, و استدلال چپ و راست ثابت یا غیر ثابت. -It is an excellent place to implement runtime code generation to avoid template code bloat. Also, it makes it possible to add fused functions like fused multiply-add or to make multiple comparisons in one loop iteration. +این یک محل عالی برای اجرای تولید کد زمان اجرا برای جلوگیری از نفخ کد الگو است. همچنین, این امکان را برای اضافه کردن توابع ذوب مانند ذوب ضرب-اضافه کردن و یا به مقایسه های متعدد در یک تکرار حلقه. -Due to vectorized query execution, functions are not short-circuited. For example, if you write `WHERE f(x) AND g(y)`, both sides are calculated, even for rows, when `f(x)` is zero (except when `f(x)` is a zero constant expression). But if the selectivity of the `f(x)` condition is high, and calculation of `f(x)` is much cheaper than `g(y)`, it’s better to implement multi-pass calculation. It would first calculate `f(x)`, then filter columns by the result, and then calculate `g(y)` only for smaller, filtered chunks of data. +با توجه به اجرای پرس و جو بردار, توابع کوتاه دور نیست. مثلا, اگر شما ارسال `WHERE f(x) AND g(y)`, هر دو طرف محاسبه می شود, حتی برای ردیف, وقتی که `f(x)` صفر است (به جز زمانی که `f(x)` بیان ثابت صفر است). اما اگر انتخاب از `f(x)` شرایط بالا و محاسبه است `f(x)` بسیار ارزان تر از `g(y)` بهتر است محاسبات چند گذر را اجرا کنید. این برای اولین بار محاسبه `f(x)` سپس ستون ها را با نتیجه فیلتر کنید و سپس محاسبه کنید `g(y)` فقط برای کوچکتر, تکه های فیلتر شده از داده ها. -## Aggregate Functions {#aggregate-functions} +## توابع مجموع {#aggregate-functions} -Aggregate functions are stateful functions. They accumulate passed values into some state and allow you to get results from that state. They are managed with the `IAggregateFunction` interface. States can be rather simple (the state for `AggregateFunctionCount` is just a single `UInt64` value) or quite complex (the state of `AggregateFunctionUniqCombined` is a combination of a linear array, a hash table, and a `HyperLogLog` probabilistic data structure). +توابع مجموع توابع نفرت انگیز هستند. جمع ارزش به برخی از دولت منتقل می شود و به شما اجازه دریافت نتایج از دولت. با مدیریت `IAggregateFunction` واسط. ایالات می تواند نسبتا ساده (دولت برای `AggregateFunctionCount` تنها یک `UInt64` ارزش) و یا کاملا پیچیده (دولت از `AggregateFunctionUniqCombined` ترکیبی از یک مجموعه خطی است, یک جدول هش, و یک `HyperLogLog` ساختار داده احتمالاتی). -States are allocated in `Arena` (a memory pool) to deal with multiple states while executing a high-cardinality `GROUP BY` query. States can have a non-trivial constructor and destructor: for example, complicated aggregation states can allocate additional memory themselves. It requires some attention to creating and destroying states and properly passing their ownership and destruction order. +ایالات در اختصاص داده `Arena` (یک استخر حافظه) برای مقابله با کشورهای مختلف در حالی که اجرای یک کارتیت بالا `GROUP BY` پرس و جو. ایالات می تواند یک سازنده غیر بدیهی و مخرب دارند: مثلا, کشورهای تجمع پیچیده می توانید حافظه اضافی خود اختصاص. این نیاز به توجه به ایجاد و از بین بردن ایالات و به درستی عبور مالکیت و تخریب سفارش خود را. -Aggregation states can be serialized and deserialized to pass over the network during distributed query execution or to write them on the disk where there is not enough RAM. They can even be stored in a table with the `DataTypeAggregateFunction` to allow incremental aggregation of data. +تجمع متحده می تواند سرهمی و deserialized به تصویب بیش از این شبکه در توزیع پرس و جو اعدام و یا ارسال آنها را بر روی دیسک که در آن وجود دارد به اندازه کافی نمی رم. حتی می توانند در یک جدول با `DataTypeAggregateFunction` اجازه می دهد تا تجمع افزایشی از داده ها. -> The serialized data format for aggregate function states is not versioned right now. It is ok if aggregate states are only stored temporarily. But we have the `AggregatingMergeTree` table engine for incremental aggregation, and people are already using it in production. It is the reason why backward compatibility is required when changing the serialized format for any aggregate function in the future. +> فرمت داده سریال برای عملکرد کل ایالات در حال حاضر نسخه نیست. این خوب است اگر کشورهای کل تنها به طور موقت ذخیره می شود. اما ما `AggregatingMergeTree` جدول موتور به صورت افزایشی تجمع و مردم در حال حاضر استفاده از آن در تولید. این است دلیل که چرا سازگاری مورد نیاز است زمانی که تغییر سریال با فرمت برای هر aggregate function در آینده است. -## Server {#server} +## کارگزار {#server} -The server implements several different interfaces: +سرور پیاده سازی چندین رابط های مختلف: -- An HTTP interface for any foreign clients. -- A TCP interface for the native ClickHouse client and for cross-server communication during distributed query execution. -- An interface for transferring data for replication. +- رابط قام برای هر مشتریان خارجی. +- یک رابط واحد کنترل گیربکس اتوماتیک برای مشتری خانه رعیتی بومی و برای ارتباط متقابل سرور در طول اجرای پرس و جو توزیع شده است. +- رابط کاربری برای انتقال داده ها برای تکرار. -Internally, it is just a primitive multithreaded server without coroutines or fibers. Since the server is not designed to process a high rate of simple queries but to process a relatively low rate of complex queries, each of them can process a vast amount of data for analytics. +داخلی, این فقط یک سرور چند رشته ای بدوی بدون کروتین یا الیاف است. از زمانی که سرور طراحی نشده است برای پردازش نرخ بالای ساده نمایش داده شد اما برای پردازش نسبتا پایین نرخ پیچیده نمایش داده شد هر یک می تواند روند مقدار زیادی از داده ها برای تجزیه و تحلیل ترافیک. -The server initializes the `Context` class with the necessary environment for query execution: the list of available databases, users and access rights, settings, clusters, the process list, the query log, and so on. Interpreters use this environment. +سرور مقدار دهی اولیه `Context` کلاس با محیط لازم برای اجرای پرس و جو: لیستی از پایگاه داده های موجود, کاربران و حقوق دسترسی, تنظیمات, خوشه, لیست فرایند, ورود به سیستم پرس و جو, و غیره. مفسران استفاده از این محیط. -We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we don’t want to maintain it eternally, and we are removing support for old versions after about one year. +ما سازگاری کامل رو به عقب و رو به جلو را برای پروتکل سرور تی سی پی حفظ می کنیم: مشتریان قدیمی می توانند با سرورهای جدید صحبت کنند و مشتریان جدید می توانند با سرورهای قدیمی صحبت کنند. اما ما نمی خواهیم تا ابد حفظ کنیم و پس از حدود یک سال پشتیبانی از نسخه های قدیمی را از بین می بریم. -!!! note "Note" - For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical. +!!! note "یادداشت" + برای اکثر برنامه های کاربردی خارجی, توصیه می کنیم با استفاده از رابط اچ تی پی به دلیل ساده و ساده برای استفاده است. پروتکل تی سی پی به شدت با ساختارهای داده داخلی ارتباط دارد: از فرمت داخلی برای عبور بلوک های داده استفاده می کند و از فریم های سفارشی برای داده های فشرده استفاده می کند. ما یک کتابخانه سی که برای پروتکل منتشر نشده است چرا که نیاز به ارتباط بسیاری از کدهای کلیکهاوس, که عملی نیست. -## Distributed Query Execution {#distributed-query-execution} +## اجرای پرس و جو توزیع شده {#distributed-query-execution} -Servers in a cluster setup are mostly independent. You can create a `Distributed` table on one or all servers in a cluster. The `Distributed` table does not store data itself – it only provides a “view” to all local tables on multiple nodes of a cluster. When you SELECT from a `Distributed` table, it rewrites that query, chooses remote nodes according to load balancing settings, and sends the query to them. The `Distributed` table requests remote servers to process a query just up to a stage where intermediate results from different servers can be merged. Then it receives the intermediate results and merges them. The distributed table tries to distribute as much work as possible to remote servers and does not send much intermediate data over the network. +سرور در راه اندازی خوشه عمدتا مستقل هستند. شما می توانید یک `Distributed` جدول در یک یا تمام سرور در یک خوشه. این `Distributed` table does not store data itself – it only provides a “view” به تمام جداول محلی در گره های متعدد از یک خوشه. هنگامی که شما از یک انتخاب `Distributed` جدول, بازنویسی است که پرس و جو, را گره از راه دور با توجه به بار تنظیمات متعادل, و پرس و جو را به او می فرستد. این `Distributed` درخواست جدول سرور از راه دور برای پردازش یک پرس و جو فقط تا مرحله ای که نتایج متوسط از سرور های مختلف را می توان با هم ادغام شدند. سپس نتایج متوسط را دریافت می کند و ادغام می کند. جدول توزیع تلاش می کند برای توزیع کار به همان اندازه که ممکن است به سرور از راه دور می کند و داده های متوسط بسیار بیش از شبکه ارسال کنید. -Things become more complicated when you have subqueries in IN or JOIN clauses, and each of them uses a `Distributed` table. We have different strategies for the execution of these queries. +همه چیز پیچیده تر می شود زمانی که شما زیر کشتیها در و یا پیوستن به بند, و هر یک از استفاده از یک `Distributed` جدول ما استراتژی های مختلف برای اجرای این نمایش داده شد. -There is no global query plan for distributed query execution. Each node has its local query plan for its part of the job. We only have simple one-pass distributed query execution: we send queries for remote nodes and then merge the results. But this is not feasible for complicated queries with high cardinality GROUP BYs or with a large amount of temporary data for JOIN. In such cases, we need to “reshuffle” data between servers, which requires additional coordination. ClickHouse does not support that kind of query execution, and we need to work on it. +هیچ برنامه پرس و جو جهانی برای اجرای پرس و جو توزیع وجود دارد. هر گره دارای برنامه پرس و جو محلی خود را برای بخشی خود را از کار. ما فقط ساده یک پاس اجرای پرس و جو توزیع شده: ما ارسال نمایش داده شد برای گره های از راه دور و سپس ادغام نتایج. اما این امکان پذیر نیست برای نمایش داده شد پیچیده با بالا کار افتادگی گروه بورس و یا با مقدار زیادی از داده های موقت برای پیوستن به. در چنین مواردی ما نیاز به “reshuffle” داده ها بین سرور, که نیاز به هماهنگی اضافی. کلیک هاوس از این نوع اجرای پرس و جو پشتیبانی نمی کند و ما باید روش کار کنیم. -## Merge Tree {#merge-tree} +## ادغام درخت {#merge-tree} -`MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows. +`MergeTree` یک خانواده از موتورهای ذخیره سازی است که پشتیبانی از نمایه سازی توسط کلید اصلی است. کلید اصلی می تواند یک تاپل دلخواه از ستون ها و یا عبارات. داده ها در یک `MergeTree` جدول در ذخیره می شود “parts”. هر بخش ذخیره داده ها در جهت کلید اولیه, بنابراین داده ها از لحاظ واژگان توسط تاپل کلید اولیه دستور داد. تمام ستون های جدول به صورت جداگانه ذخیره می شوند `column.bin` فایل ها در این بخش. فایل ها از بلوک های فشرده تشکیل شده است. هر بلوک است که معمولا از 64 کیلوبایت به 1 مگابایت از داده های غیر فشرده, بسته به اندازه مقدار متوسط. بلوک از مقادیر ستون بعد از دیگری قرار داده شده به طور یکنواخت تشکیل شده است. مقادیر ستون در همان جهت برای هر ستون هستند (کلید اصلی سفارش را تعریف می کند), تا زمانی که شما توسط بسیاری از ستون تکرار, شما ارزش برای ردیف مربوطه. -The primary key itself is “sparse”. It doesn’t address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached. +کلید اصلی خود است “sparse”. این کار هر سطر رسیدگی نمی, اما تنها برخی از محدوده داده. جدا `primary.idx` فایل دارای ارزش کلید اصلی برای هر سطر نفر که نفر نامیده می شود `index_granularity` (معمولا, نفر = 8192). همچنین, برای هر ستون, ما داریم `column.mrk` پروندهها با “marks,” که ناراحتی به هر سطر نفر هفتم در فایل داده ها. هر علامت یک جفت است: افست در فایل به ابتدای بلوک فشرده و افست در بلوک فشرده به ابتدای داده ها. معمولا, بلوک های فشرده توسط علامت تراز وسط قرار دارد, و افست در بلوک فشرده صفر است. داده ها برای `primary.idx` همیشه در حافظه ساکن, و داده ها را برای `column.mrk` فایل های ذخیره شده است. -When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table. +هنگامی که ما می رویم به خواندن چیزی از یک شرکت در `MergeTree` ما نگاه می کنیم `primary.idx` داده ها و تعیین محل محدوده است که می تواند حاوی اطلاعات درخواست شده و سپس نگاه `column.mrk` داده ها و محاسبه شیپور خاموشی برای جایی که شروع به خواندن این محدوده. به دلیل نرمی, اطلاعات اضافی ممکن است به عنوان خوانده شده. تاتر مناسب برای یک بار بالا از نمایش داده شد نقطه ساده نیست, چرا که کل محدوده با `index_granularity` ردیف باید برای هر کلید به عنوان خوانده شده, و کل بلوک فشرده باید برای هر ستون از حالت فشرده خارج. ما جرقه شاخص ساخته شده چرا که ما باید قادر به حفظ تریلیون ردیف در هر سرور بدون مصرف حافظه قابل توجه برای شاخص باشد. همچنین, به دلیل کلید اصلی پراکنده است, منحصر به فرد نیست: این می تواند وجود کلید در جدول در زمان درج بررسی نمی. شما می توانید ردیف های بسیاری را با همان کلید در یک جدول داشته باشید. -When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts. +هنگامی که شما `INSERT` یک دسته از داده ها به `MergeTree`, که دسته مرتب شده بر اساس کلید اصلی سفارش و به شکل یک بخش جدید. موضوعات پس زمینه وجود دارد که به صورت دوره ای برخی از قطعات را انتخاب می کنند و به یک بخش مرتب شده اند تا تعداد قطعات نسبتا کم باشد. به همین دلیل است که نامیده می شود `MergeTree`. البته ادغام منجر به “write amplification”. تمام قطعات تغییر ناپذیر هستند: تنها ایجاد و حذف, اما اصلاح نشده. هنگامی که انتخاب اجرا شده است, دارای یک تصویر لحظهای از جدول (مجموعه ای از قطعات). پس از ادغام, ما همچنین قطعات قدیمی برای برخی از زمان به بهبود پس از شکست ساده تر نگه, بنابراین اگر ما می بینیم که برخی از بخش ادغام شده است که احتمالا شکسته, ما می توانیم با قطعات منبع خود را جایگزین. -`MergeTree` is not an LSM tree because it doesn’t contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. +`MergeTree` یک درخت ل اس ام نیست زیرا حاوی نیست “memtable” و “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. -> MergeTree tables can only have one (primary) index: there aren’t any secondary indices. It would be nice to allow multiple physical representations under one logical table, for example, to store data in more than one physical order or even to allow representations with pre-aggregated data along with original data. +> جداول ادغام تنها می توانید یک دارند (اولیه) شاخص: هیچ شاخص ثانویه وجود ندارد. این امر می تواند خوب اجازه می دهد تا بازنمایی فیزیکی متعدد تحت یک جدول منطقی, مثلا, برای ذخیره داده ها در بیش از یک نظم فیزیکی و یا حتی اجازه می دهد تا بازنمایی با داده های از پیش جمع همراه با داده های اصلی. -There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form. +موتورهای ادغام است که کار اضافی در طول پس زمینه ادغام انجام می دهند وجود دارد. مثالها عبارتند از `CollapsingMergeTree` و `AggregatingMergeTree`. این می تواند به عنوان پشتیبانی ویژه ای برای به روز رسانی درمان می شود. به خاطر داشته باشید که این به روز رسانی واقعی نیست چرا که کاربران معمولا هیچ کنترلی بر زمان هنگامی که پس زمینه ادغام اجرا می شوند, و داده ها در یک `MergeTree` جدول تقریبا همیشه در بیش از یک بخش ذخیره می شود, نه در فرم به طور کامل با هم ادغام شدند. -## Replication {#replication} +## تکرار {#replication} -Replication in ClickHouse can be configured on a per-table basis. You could have some replicated and some non-replicated tables on the same server. You could also have tables replicated in different ways, such as one table with two-factor replication and another with three-factor. +تکرار در کلیک خانه را می توان بر اساس هر جدول پیکربندی شده است. شما می توانید برخی از تکرار و برخی از جداول غیر تکرار بر روی همان سرور. شما همچنین می تواند جداول تکرار در راه های مختلف, مانند یک جدول با تکرار دو عامل و دیگری با سه عامل. -Replication is implemented in the `ReplicatedMergeTree` storage engine. The path in `ZooKeeper` is specified as a parameter for the storage engine. All tables with the same path in `ZooKeeper` become replicas of each other: they synchronize their data and maintain consistency. Replicas can be added and removed dynamically simply by creating or dropping a table. +تکرار در اجرا `ReplicatedMergeTree` موتور ذخیره سازی. مسیر در `ZooKeeper` به عنوان یک پارامتر برای موتور ذخیره سازی مشخص شده است. تمام جداول با همان مسیر در `ZooKeeper` تبدیل کپی از یکدیگر: همگام سازی داده های خود و حفظ ثبات. کپی می تواند اضافه شود و به صورت پویا به سادگی با ایجاد و یا حذف یک جدول حذف شده است. -Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn’t support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. +تکرار با استفاده از یک طرح چند استاد ناهمزمان. شما می توانید داده ها را به هر ماکت است که یک جلسه با وارد `ZooKeeper`, و داده ها به تمام کپی های دیگر غیر همزمان تکرار. چون کلیک هاوس به روز رسانی را پشتیبانی نمی کند, تکرار بدون درگیری است. همانطور که هیچ اذعان حد نصاب درج وجود دارد, داده فقط قرار داده ممکن است از دست داده اگر یک گره نتواند. -Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. It is achieved by electing one replica as the leader, and that replica initiates merges and writes “merge parts” actions to the log. +فراداده برای تکرار در باغ وحش ذخیره می شود. ورود به سیستم تکرار است که لیست چه اقداماتی را انجام دهید وجود دارد. اقدامات عبارتند از: دریافت بخشی; ادغام قطعات; رها کردن یک پارتیشن, و غیره. هر ماکت کپی ورود تکرار به صف خود و سپس اجرا اقدامات از صف. برای مثال در درج “get the part” عمل در ورود به سیستم ایجاد, و هر دانلود ماکت که بخشی. ادغام بین کپی هماهنگ برای دریافت بایت - نتایج یکسان. تمام قطعات در همان راه در تمام کپی با هم ادغام شدند. این است که با انتخاب یک ماکت به عنوان رهبر دست, و این ماکت شروع ادغام و می نویسد “merge parts” عملیات به ورود به سیستم. -Replication is physical: only compressed parts are transferred between nodes, not queries. Merges are processed on each replica independently in most cases to lower the network costs by avoiding network amplification. Large merged parts are sent over the network only in cases of significant replication lag. +تکرار فیزیکی است: تنها قطعات فشرده بین گره منتقل, نمایش داده شد نیست. ادغام در هر ماکت پردازش به طور مستقل در اکثر موارد به کاهش هزینه های شبکه با اجتناب از تقویت شبکه. قطعات با هم ادغام شدند بزرگ بر روی شبکه تنها در موارد تاخیر تکرار قابل توجهی ارسال. -Besides, each replica stores its state in ZooKeeper as the set of parts and its checksums. When the state on the local filesystem diverges from the reference state in ZooKeeper, the replica restores its consistency by downloading missing and broken parts from other replicas. When there is some unexpected or broken data in the local filesystem, ClickHouse does not remove it, but moves it to a separate directory and forgets it. +بعلاوه, هر ماکت ذخیره دولت خود را در باغ وحش به عنوان مجموعه ای از قطعات و چک سام خود. هنگامی که دولت در فایل سیستم محلی واگرا از دولت مرجع در باغ وحش, ماکت بازیابی سازگاری خود را با دانلود قطعات گم شده و شکسته از دیگر کپی. هنگامی که برخی از داده های غیر منتظره و یا شکسته در فایل سیستم محلی وجود دارد, خانه را حذف کنید, اما حرکت می کند به یک دایرکتوری جداگانه و فراموش. -!!! note "Note" - The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically. +!!! note "یادداشت" + خوشه محل کلیک متشکل از خرده ریز مستقل, و هر سفال شامل کپی. خوشه است **الاستیک نیست**, بنابراین پس از اضافه کردن یک سفال جدید, داده ها بین خرده ریز به طور خودکار توازن نیست. بجای, بار خوشه قرار است تنظیم شود ناهموار. این پیاده سازی به شما کنترل بیشتری می دهد و برای خوشه های نسبتا کوچک مانند ده ها گره مناسب است. اما برای خوشه با صدها گره که ما با استفاده از در تولید, این رویکرد یک نقطه ضعف قابل توجهی می شود. ما باید یک موتور جدول است که دهانه در سراسر خوشه با مناطق به صورت پویا تکرار است که می تواند تقسیم و متعادل کننده شده بین خوشه به طور خودکار پیاده سازی. -{## [Original article](https://clickhouse.tech/docs/en/development/architecture/) ##} +{## [مقاله اصلی](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/fa/development/browse_code.md b/docs/fa/development/browse_code.md index c3016d5e1dc..1609e4b1d77 100644 --- a/docs/fa/development/browse_code.md +++ b/docs/fa/development/browse_code.md @@ -1,11 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "\u0645\u0631\u0648\u0631 \u06A9\u062F \u0645\u0646\u0628\u0639 \u06A9\u0644\ + \u06CC\u06A9" --- -# Browse ClickHouse Source Code {#browse-clickhouse-source-code} +# فهرست clickhouse کد منبع {#browse-clickhouse-source-code} -You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. +شما می توانید استفاده کنید **ووبوک** آنلاین کد مرورگر موجود [اینجا](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). این فراهم می کند ناوبری کد و برجسته معنایی, جستجو و نمایه سازی. عکس فوری کد روزانه به روز می شود. -Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. +همچنین شما می توانید فهرست منابع در [گیتهاب](https://github.com/ClickHouse/ClickHouse) به عنوان معمول است. -If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favourite IDE. Vim and Emacs also count. +اگر شما علاقه مند هستید چه ide استفاده کنید توصیه می کنیم clion, qt creator, مقابل کد و kdevelop (با هشدارهای). شما می توانید هر محیط برنامه نویسی مورد علاقه استفاده کنید. ویم و ایمکس نیز حساب می کنند. diff --git a/docs/fa/development/build.md b/docs/fa/development/build.md index 32042a4128e..cef48a69fe3 100644 --- a/docs/fa/development/build.md +++ b/docs/fa/development/build.md @@ -1,26 +1,30 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 64 +toc_title: "\u0646\u062D\u0648\u0647 \u0633\u0627\u062E\u062A \u06A9\u0644\u06CC\u06A9\ + \ \u062F\u0631 \u0644\u06CC\u0646\u0648\u06A9\u0633" --- -# How to Build ClickHouse for Development {#how-to-build-clickhouse-for-development} +# چگونه برای ساخت خانه کلیک برای توسعه {#how-to-build-clickhouse-for-development} -The following tutorial is based on the Ubuntu Linux system. -With appropriate changes, it should also work on any other Linux distribution. -Supported platforms: x86\_64 and AArch64. Support for Power9 is experimental. +راهنمای زیر بر اساس سیستم لینوکس اوبونتو است. +با تغییرات مناسب, همچنین باید بر روی هر توزیع لینوکس دیگر کار. +سیستم عامل های پشتیبانی شده: ایکس86\_64 و عاشق64. پشتیبانی از قدرت9 تجربی است. -## Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja} +## شرح متنی (توضیحات سایت) در صفحات شما دیده نمی شود {#install-git-cmake-python-and-ninja} ``` bash $ sudo apt-get install git cmake python ninja-build ``` -Or cmake3 instead of cmake on older systems. +یا سیمک 3 به جای کیک در سیستم های قدیمی تر. -## Install GCC 9 {#install-gcc-9} +## نصب شورای همکاری خلیج فارس 9 {#install-gcc-9} -There are several ways to do this. +راه های مختلفی برای انجام این کار وجود دارد. -### Install from a PPA Package {#install-from-a-ppa-package} +### نصب از یک بسته پپا {#install-from-a-ppa-package} ``` bash $ sudo apt-get install software-properties-common @@ -29,30 +33,30 @@ $ sudo apt-get update $ sudo apt-get install gcc-9 g++-9 ``` -### Install from Sources {#install-from-sources} +### نصب از منابع {#install-from-sources} -Look at [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) +نگاه کن [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) -## Use GCC 9 for Builds {#use-gcc-9-for-builds} +## استفاده از شورای همکاری خلیج فارس 9 برای ساخت {#use-gcc-9-for-builds} ``` bash $ export CC=gcc-9 $ export CXX=g++-9 ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources} +## پرداخت منابع کلیک {#checkout-clickhouse-sources} ``` bash $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git ``` -or +یا ``` bash $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git ``` -## Build ClickHouse {#build-clickhouse} +## ساخت خانه کلیک {#build-clickhouse} ``` bash $ cd ClickHouse @@ -63,23 +67,23 @@ $ ninja $ cd .. ``` -To create an executable, run `ninja clickhouse`. -This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments. +برای ایجاد یک اجرایی, اجرا `ninja clickhouse`. +این ایجاد خواهد شد `programs/clickhouse` قابل اجرا است که می تواند با استفاده `client` یا `server` بحث کردن. -# How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux} +# چگونه برای ساخت کلیک بر روی هر لینوکس {#how-to-build-clickhouse-on-any-linux} -The build requires the following components: +ساخت نیاز به اجزای زیر دارد: -- Git (is used only to checkout the sources, it’s not needed for the build) -- CMake 3.10 or newer -- Ninja (recommended) or Make -- C++ compiler: gcc 9 or clang 8 or newer -- Linker: lld or gold (the classic GNU ld won’t work) -- Python (is only used inside LLVM build and it is optional) +- دستگاه گوارش (استفاده می شود تنها به پرداخت منابع مورد نیاز برای ساخت) +- کیک 3.10 یا جدیدتر +- نینجا (توصیه می شود) و یا +- ج ++ کامپایلر: شورای همکاری خلیج فارس 9 یا صدای شیپور 8 یا جدیدتر +- لینکر: لیلند یا طلا (کلاسیک گنو الدی کار نخواهد کرد) +- پایتون (فقط در داخل ساخت لورم استفاده می شود و اختیاری است) -If all the components are installed, you may build in the same way as the steps above. +اگر تمام اجزای نصب شده, شما ممکن است در همان راه به عنوان مراحل بالا ساخت. -Example for Ubuntu Eoan: +به عنوان مثال برای اوبونتو ایوان: sudo apt update sudo apt install git cmake ninja-build g++ python @@ -88,7 +92,7 @@ Example for Ubuntu Eoan: cmake ../ClickHouse ninja -Example for OpenSUSE Tumbleweed: +به عنوان مثال برای لینوکس تاج خروس: sudo zypper install git cmake ninja gcc-c++ python lld git clone --recursive https://github.com/ClickHouse/ClickHouse.git @@ -96,7 +100,7 @@ Example for OpenSUSE Tumbleweed: cmake ../ClickHouse ninja -Example for Fedora Rawhide: +به عنوان مثال برای فدورا پوست دباغی نشده: sudo yum update yum --nogpg install git cmake make gcc-c++ python2 @@ -105,34 +109,34 @@ Example for Fedora Rawhide: cmake ../ClickHouse make -j $(nproc) -# You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse} +# شما لازم نیست برای ساخت کلیک {#you-dont-have-to-build-clickhouse} -ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour. +تاتر در فایل های باینری از پیش ساخته شده و بسته های موجود است. فایل های باینری قابل حمل هستند و می تواند بر روی هر عطر و طعم لینوکس اجرا شود. -They are built for stable, prestable and testing releases as long as for every commit to master and for every pull request. +تا زمانی که برای هر متعهد به کارشناسی کارشناسی ارشد و برای هر درخواست کشش ساخته شده است برای انتشار پایدار و قابل پرست و تست. -To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”. +برای پیدا کردن تازه ترین ساخت از `master` برو به [مرتکب صفحه](https://github.com/ClickHouse/ClickHouse/commits/master), با کلیک بر روی اولین علامت سبز یا صلیب قرمز در نزدیکی ارتکاب, کلیک کنید و به “Details” پیوند درست بعد از “ClickHouse Build Check”. -# How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package} +# چگونه برای ساخت مخزن دبیان بسته {#how-to-build-clickhouse-debian-package} -## Install Git and Pbuilder {#install-git-and-pbuilder} +## نصب برنامه جی تی و پل ساز {#install-git-and-pbuilder} ``` bash $ sudo apt-get update $ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources-1} +## پرداخت منابع کلیک {#checkout-clickhouse-sources-1} ``` bash $ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git $ cd ClickHouse ``` -## Run Release Script {#run-release-script} +## اجرای اسکریپت انتشار {#run-release-script} ``` bash $ ./release ``` -[Original article](https://clickhouse.tech/docs/en/development/build/) +[مقاله اصلی](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/fa/development/build_cross_arm.md b/docs/fa/development/build_cross_arm.md index 0936a3133b2..3947252fa5c 100644 --- a/docs/fa/development/build_cross_arm.md +++ b/docs/fa/development/build_cross_arm.md @@ -1,17 +1,22 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 67 +toc_title: "\u0686\u06AF\u0648\u0646\u0647 \u0628\u0631\u0627\u06CC \u0633\u0627\u062E\ + \u062A ClickHouse \u062F\u0631 \u0644\u06CC\u0646\u0648\u06A9\u0633 \u0628\u0631\ + \u0627\u06CC AARCH64 (ARM64)" --- -# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} +# چگونه برای ساخت clickhouse در لینوکس برای aarch64 (arm64) معماری {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} -This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. +این برای مواردی است که شما دستگاه لینوکس دارید و می خواهید از این برای ساخت استفاده کنید `clickhouse` دودویی که در یک ماشین لینوکس دیگر با معماری پردازنده عاشق64 اجرا خواهد شد. این است که برای چک ادغام مداوم است که بر روی سرور های لینوکس اجرا در نظر گرفته شده. -The cross-build for AARCH64 is based on the [Build instructions](build.md), follow them first. +صلیب-ساخت برای aarch64 است که بر اساس [ساخت دستورالعمل](build.md) اول دنبالشون کن -# Install Clang-8 {#install-clang-8} +# نصب کلانگ-8 {#install-clang-8} -Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. -For example, in Ubuntu Bionic you can use the following commands: +دستورالعمل از دنبال https://apt.llvm.org / برای اوبونتو یا دبیان راه اندازی خود را. +مثلا, در اوبونتو بیونیک شما می توانید دستورات زیر استفاده کنید: ``` bash echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list @@ -19,7 +24,7 @@ sudo apt-get update sudo apt-get install clang-8 ``` -# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} +# نصب صلیب-تالیف مجموعه ابزارهای {#install-cross-compilation-toolset} ``` bash cd ClickHouse @@ -28,7 +33,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 ``` -# Build ClickHouse {#build-clickhouse} +# ساخت خانه کلیک {#build-clickhouse} ``` bash cd ClickHouse @@ -37,4 +42,4 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linu ninja -C build-arm64 ``` -The resulting binary will run only on Linux with the AARCH64 CPU architecture. +باینری حاصل تنها در لینوکس با معماری پردازنده اروچ64 اجرا خواهد شد. diff --git a/docs/fa/development/build_cross_osx.md b/docs/fa/development/build_cross_osx.md index a708dc4d4f3..dd3a1fcc21a 100644 --- a/docs/fa/development/build_cross_osx.md +++ b/docs/fa/development/build_cross_osx.md @@ -1,26 +1,32 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 66 +toc_title: "\u0686\u06AF\u0648\u0646\u0647 \u0628\u0631\u0627\u06CC \u0633\u0627\u062E\ + \u062A \u062A\u0627\u062A\u0631 \u062F\u0631 \u0644\u06CC\u0646\u0648\u06A9\u0633\ + \ \u0628\u0631\u0627\u06CC \u0633\u06CC\u0633\u062A\u0645 \u0639\u0627\u0645\u0644\ + \ \u0645\u06A9 \u0627\u06CC\u06A9\u0633" --- -# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} +# چگونه برای ساخت تاتر در لینوکس برای سیستم عامل مک ایکس {#how-to-build-clickhouse-on-linux-for-mac-os-x} -This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](build_osx.md). +این برای مواردی است که شما دستگاه لینوکس دارید و می خواهید از این برای ساخت استفاده کنید `clickhouse` این است که برای چک ادغام مداوم است که بر روی سرور های لینوکس اجرا در نظر گرفته شده. اگر شما می خواهید برای ساخت خانه کلیک به طور مستقیم در سیستم عامل مک ایکس, سپس با ادامه [دستورالعمل دیگر](build_osx.md). -The cross-build for Mac OS X is based on the [Build instructions](build.md), follow them first. +کراس ساخت برای سیستم عامل مک ایکس بر اساس [ساخت دستورالعمل](build.md) اول دنبالشون کن -# Install Clang-8 {#install-clang-8} +# نصب کلانگ-8 {#install-clang-8} -Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. -For example the commands for Bionic are like: +دستورالعمل از دنبال https://apt.llvm.org / برای اوبونتو یا دبیان راه اندازی خود را. +به عنوان مثال دستورات برای بیونیک مانند: ``` bash sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list sudo apt-get install clang-8 ``` -# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} +# نصب مجموعه ابزار صلیب کشی {#install-cross-compilation-toolset} -Let’s remember the path where we install `cctools` as ${CCTOOLS} +بیایید مسیری را که ما نصب می کنیم به یاد داشته باشیم `cctools` به عنوان ${CCTOOLS} ``` bash mkdir ${CCTOOLS} @@ -37,7 +43,7 @@ cd cctools-port/cctools make install ``` -Also, we need to download macOS X SDK into the working tree. +همچنین, ما نیاز به دانلود ماکو ایکس انحراف معیار به درخت کار. ``` bash cd ClickHouse @@ -46,7 +52,7 @@ mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 ``` -# Build ClickHouse {#build-clickhouse} +# ساخت خانه کلیک {#build-clickhouse} ``` bash cd ClickHouse @@ -58,4 +64,4 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin ninja -C build-osx ``` -The resulting binary will have a Mach-O executable format and can’t be run on Linux. +باینری حاصل یک فرمت اجرایی ماخ ای داشته باشد و نمی تواند در لینوکس اجرا شود. diff --git a/docs/fa/development/build_osx.md b/docs/fa/development/build_osx.md index 6b1839aaf7f..7782f9104f4 100644 --- a/docs/fa/development/build_osx.md +++ b/docs/fa/development/build_osx.md @@ -1,30 +1,35 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 65 +toc_title: "\u0686\u06AF\u0648\u0646\u0647 \u0628\u0631\u0627\u06CC \u0633\u0627\u062E\ + \u062A \u062A\u0627\u062A\u0631 \u062F\u0631 \u0633\u06CC\u0633\u062A\u0645 \u0639\ + \u0627\u0645\u0644 \u0645\u06A9 \u0627\u06CC\u06A9\u0633" --- -# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x} +# چگونه برای ساخت تاتر در سیستم عامل مک ایکس {#how-to-build-clickhouse-on-mac-os-x} -Build should work on Mac OS X 10.15 (Catalina) +ساخت باید در سیستم عامل مک ایکس کار 10.15 (کاتالینا) -## Install Homebrew {#install-homebrew} +## نصب گشتن {#install-homebrew} ``` bash $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" ``` -## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries} +## نصب کامپایلرهای مورد نیاز, ابزار, و کتابخانه {#install-required-compilers-tools-and-libraries} ``` bash $ brew install cmake ninja libtool gettext ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources} +## پرداخت منابع کلیک {#checkout-clickhouse-sources} ``` bash $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git ``` -or +یا ``` bash $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git @@ -32,7 +37,7 @@ $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git $ cd ClickHouse ``` -## Build ClickHouse {#build-clickhouse} +## ساخت خانه کلیک {#build-clickhouse} ``` bash $ mkdir build @@ -42,16 +47,16 @@ $ ninja $ cd .. ``` -## Caveats {#caveats} +## هشدارها {#caveats} -If you intend to run clickhouse-server, make sure to increase the system’s maxfiles variable. +اگر شما قصد اجرای clickhouse-سرور مطمئن شوید که برای افزایش سیستم maxfiles متغیر است. -!!! info "Note" - You’ll need to use sudo. +!!! info "یادداشت" + باید از سودو استفاده کنی -To do so, create the following file: +برای انجام این کار فایل زیر را ایجاد کنید: -/Library/LaunchDaemons/limit.maxfiles.plist: +/Library/LaunchDaemons/محدود می کند.مکسفیلزجان کلام: ``` xml @@ -77,14 +82,14 @@ To do so, create the following file: ``` -Execute the following command: +دستور زیر را اجرا کنید: ``` bash $ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist ``` -Reboot. +راه اندازی مجدد. -To check if it’s working, you can use `ulimit -n` command. +برای بررسی اگر این کار, شما می توانید استفاده کنید `ulimit -n` فرمان. -[Original article](https://clickhouse.tech/docs/en/development/build_osx/) +[مقاله اصلی](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/fa/development/contrib.md b/docs/fa/development/contrib.md index 2404509187e..574b8f55436 100644 --- a/docs/fa/development/contrib.md +++ b/docs/fa/development/contrib.md @@ -1,39 +1,44 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 70 +toc_title: "\u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0647\u0627\u06CC \u0634\ + \u062E\u0635 \u062B\u0627\u0644\u062B \u0627\u0633\u062A\u0641\u0627\u062F\u0647\ + \ \u0645\u06CC \u0634\u0648\u062F" --- -# Third-Party Libraries Used {#third-party-libraries-used} +# کتابخانه های شخص ثالث استفاده می شود {#third-party-libraries-used} -| Library | License | -|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| cctz | [Apache License 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| double-conversion | [BSD 3-Clause License](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| googletest | [BSD 3-Clause License](https://github.com/google/googletest/blob/master/LICENSE) | -| h3 | [Apache License 2.0](https://github.com/uber/h3/blob/master/LICENSE) | -| hyperscan | [BSD 3-Clause License](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libbtrie | [BSD 2-Clause License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Apache License 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-random | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| libressl | [OpenSSL License](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| librdkafka | [BSD 2-Clause License](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar\_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [BSD 2-Clause License](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Public Domain](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) | +| کتابخانه | مجوز | +|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| پایگاه64 | [لیسانس 2 بند](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | +| افزایش | [افزایش مجوز نرم افزار 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| برتلی | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | +| کاپپروتو | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | +| رکتتز | [نمایی مجوز 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | +| دو تبدیل | [لیسانس 3 بند](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | +| گام به گام | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | +| googletest | [لیسانس 3 بند](https://github.com/google/googletest/blob/master/LICENSE) | +| اچ 3 | [نمایی مجوز 2.0](https://github.com/uber/h3/blob/master/LICENSE) | +| hyperscan | [لیسانس 3 بند](https://github.com/intel/hyperscan/blob/master/LICENSE) | +| لیبتری | [لیسانس 2 بند](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | +| شکنجه نوجوان | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| لیبیدوید | [مجوز زلب](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | +| نوشیدن شراب | [الجی پی ال2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | +| لیبهدفس3 | [نمایی مجوز 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | +| کشتی کج | [نمایی مجوز 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | +| سوالات متداول | [نمایی مجوز 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libressl | [OpenSSL مجوز](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | +| کتابدار | [لیسانس 2 بند](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | +| \_عرض | [CC0 1.0 جهانی](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | +| llvm | [لیسانس 3 بند](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | +| lz4 | [لیسانس 2 بند](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | +| ماریادب-اتصال-ج | [الجی پی ال2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| سوفلهاش | [دامنه عمومی](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | +| رایانه های جیبی | [مجوز زلب](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | +| پوکو | [افزایش مجوز نرم افزار-نسخه 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | +| protobuf | [لیسانس 3 بند](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | +| شماره 2 | [لیسانس 3 بند](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | +| UnixODBC | [الجی پی ال2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | +| زلب نانوگرم | [مجوز زلب](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | +| زد | [لیسانس 3 بند](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/fa/development/developer_instruction.md b/docs/fa/development/developer_instruction.md index 8c5c0214c3e..d518caeda04 100644 --- a/docs/fa/development/developer_instruction.md +++ b/docs/fa/development/developer_instruction.md @@ -1,60 +1,65 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u062F\u0633\u062A\u0648\u0631\u0627\u0644\u0639\u0645\u0644 \u062A\u0648\ + \u0633\u0639\u0647 \u062F\u0647\u0646\u062F\u0647 \u06A9\u0644\u06CC\u06A9 \u0645\ + \u0628\u062A\u062F\u06CC" --- -Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X. +ساختمان از خانه کلیک بر روی لینوکس پشتیبانی, بورس و سیستم عامل مک ایکس. -# If you use Windows {#if-you-use-windows} +# در صورت استفاده از ویندوز {#if-you-use-windows} -If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/\#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T. +اگر شما استفاده از ویندوز, شما نیاز به ایجاد یک ماشین مجازی با اوبونتو. برای شروع کار با یک ماشین مجازی لطفا مجازی نصب کنید. شما می توانید اوبونتو را از وب سایت دانلود کنید: https://www.ubuntu.com/\#download.لطفا یک ماشین مجازی از تصویر دانلود شده ایجاد کنید (شما باید حداقل 4 گیگابایت رم را رزرو کنید). برای اجرای یک ترمینال خط فرمان در اوبونتو, لطفا یک برنامه حاوی کلمه قرار “terminal” به نام (گنوم ترمینال, کنسول و غیره.) یا فقط کنترل را فشار دهید -# If you use a 32-bit system {#if-you-use-a-32-bit-system} +# اگر از یک سیستم 32 بیتی استفاده می کنید {#if-you-use-a-32-bit-system} -ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading. +تاتر نمی تواند کار کند و یا ساخت بر روی یک سیستم 32 بیتی. شما باید دسترسی به یک سیستم 64 بیتی کسب و شما می توانید ادامه مطلب. -# Creating a repository on GitHub {#creating-a-repository-on-github} +# ایجاد یک مخزن در گیتهاب {#creating-a-repository-on-github} -To start working with ClickHouse repository you will need a GitHub account. +برای شروع کار با مخزن کلیک شما یک حساب گیتهاب نیاز. -You probably already have one, but if you don’t, please register at https://github.com. In case you do not have SSH keys, you should generate them and then upload them on GitHub. It is required for sending over your patches. It is also possible to use the same SSH keys that you use with any other SSH servers - probably you already have those. +شما احتمالا در حال حاضر یکی, اما اگر اینکار را نکنید, لطفا ثبت نام در https://github.com. در صورتی که کلید های سش را ندارید باید تولید کنید و سپس در گیتهاب بارگذاری کنید. این برای ارسال بیش از تکه های خود را مورد نیاز است. همچنین ممکن است به استفاده از کلید همان جلسه که شما با هر سرور جلسه دیگر استفاده کنید - احتمالا شما در حال حاضر کسانی که. -Create a fork of ClickHouse repository. To do that please click on the “fork” button in the upper right corner at https://github.com/ClickHouse/ClickHouse. It will fork your own copy of ClickHouse/ClickHouse to your account. +ایجاد یک چنگال مخزن مخزن مخزن. برای انجام این کار لطفا بر روی کلیک کنید “fork” دکمه در گوشه سمت راست بالا در https://github.com/ClickHouse/ClickHouse. آن را به چنگال خود کپی ClickHouse/ClickHouse به حساب کاربری خود. -The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse). +روند توسعه شامل اولین ارتکاب تغییرات در نظر گرفته شده را به چنگال خود را از خانه رعیتی و سپس ایجاد یک “pull request” برای این تغییرات پذیرفته می شود به مخزن اصلی (ClickHouse/ClickHouse). -To work with git repositories, please install `git`. +برای کار با مخازن دستگاه گوارش, لطفا نصب کنید `git`. -To do that in Ubuntu you would run in the command line terminal: +برای انجام این کار در اوبونتو شما در ترمینال خط فرمان اجرا می کنید: sudo apt update sudo apt install git -A brief manual on using Git can be found here: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf. -For a detailed manual on Git see https://git-scm.com/book/en/v2. +کتابچه راهنمای مختصر در استفاده از دستگاه گوارش را می توان یافت: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf. +برای یک کتابچه راهنمای دقیق در دستگاه گوارش را ببینید https://git-scm.com/book/en/v2. -# Cloning a repository to your development machine {#cloning-a-repository-to-your-development-machine} +# شبیه سازی یک مخزن به دستگاه توسعه خود را {#cloning-a-repository-to-your-development-machine} -Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine. +بعد, شما نیاز به دانلود فایل های منبع بر روی دستگاه کار خود را. این است که به نام “to clone a repository” زیرا ایجاد یک کپی محلی از مخزن بر روی دستگاه کار خود را. -In the command line terminal run: +در خط فرمان ترمینال اجرا: git clone --recursive git@guthub.com:your_github_username/ClickHouse.git cd ClickHouse -Note: please, substitute *your\_github\_username* with what is appropriate! +توجه: لطفا جایگزین کنید *تغییر \_نامهی تو* با چه مناسب است! -This command will create a directory `ClickHouse` containing the working copy of the project. +این دستور یک دایرکتوری ایجاد خواهد کرد `ClickHouse` حاوی کپی کار از پروژه. -It is important that the path to the working directory contains no whitespaces as it may lead to problems with running the build system. +مهم این است که مسیر به دایرکتوری کار شامل هیچ فضای سفید به عنوان ممکن است به مشکلات در حال اجرا سیستم ساخت منجر شود. -Please note that ClickHouse repository uses `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` flag as in the example above. If the repository has been cloned without submodules, to download them you need to run the following: +لطفا توجه داشته باشید که مخزن کلیک استفاده می کند `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` پرچم همانطور که در مثال بالا. اگر مخزن بدون زیر دستی مسدود شده باشد باید موارد زیر را دانلود کنید: git submodule init git submodule update -You can check the status with the command: `git submodule status`. +شما می توانید وضعیت را با فرمان بررسی کنید: `git submodule status`. -If you get the following error message: +اگر پیغام خطای زیر را دریافت کنید: Permission denied (publickey). fatal: Could not read from remote repository. @@ -62,23 +67,23 @@ If you get the following error message: Please make sure you have the correct access rights and the repository exists. -It generally means that the SSH keys for connecting to GitHub are missing. These keys are normally located in `~/.ssh`. For SSH keys to be accepted you need to upload them in the settings section of GitHub UI. +به طور کلی به این معنی است که کلید های برش برای اتصال به گیتهاب از دست رفته است. این کلید ها به طور معمول در واقع `~/.ssh`. برای کلید های جلسه پذیرفته می شود شما نیاز به ارسال در بخش تنظیمات رابط کاربر گیتهاب. -You can also clone the repository via https protocol: +شما همچنین می توانید مخزن از طریق پروتکل قام کلون: git clone https://github.com/ClickHouse/ClickHouse.git -This, however, will not let you send your changes to the server. You can still use it temporarily and add the SSH keys later replacing the remote address of the repository with `git remote` command. +این, با این حال, نمی خواهد به شما اجازه تغییرات خود را به سرور ارسال. شما هنوز هم می توانید به طور موقت استفاده کنید و اضافه کردن کلید های جلسه بعد جایگزین نشانی از راه دور از مخزن با `git remote` فرمان. -You can also add original ClickHouse repo’s address to your local repository to pull updates from there: +شما همچنین می توانید نشانی اصلی مخزن مخزن محلی خود را اضافه کنید به جلو و به روز رسانی از وجود دارد: git remote add upstream git@github.com:ClickHouse/ClickHouse.git -After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`. +پس از موفقیت در حال اجرا این دستور شما قادر خواهید بود به جلو و به روز رسانی از مخزن کلیک اصلی در حال اجرا خواهد بود `git pull upstream master`. -## Working with submodules {#working-with-submodules} +## کار با submodules {#working-with-submodules} -Working with submodules in git could be painful. Next commands will help to manage it: +کار با زیربول در دستگاه گوارش می تواند دردناک باشد. دستورات بعدی کمک خواهد کرد که برای مدیریت: # ! each command accepts --recursive # Update remote URLs for submodules. Barely rare case @@ -90,7 +95,7 @@ Working with submodules in git could be painful. Next commands will help to mana # Two last commands could be merged together git submodule update --init -The next commands would help you to reset all submodules to the initial state (!WARNING! - any changes inside will be deleted): +دستورات بعدی کمک خواهد کرد که شما را به تنظیم مجدد تمام زیربول به حالت اولیه (!هشدار! - هر گونه تغییر در داخل حذف خواهد شد): # Synchronizes submodules' remote URL with .gitmodules git submodule sync --recursive @@ -106,151 +111,151 @@ The next commands would help you to reset all submodules to the initial state (! git submodule foreach git submodule foreach git reset --hard git submodule foreach git submodule foreach git clean -xfd -# Build System {#build-system} +# ساخت سیستم {#build-system} -ClickHouse uses CMake and Ninja for building. +تاتر با استفاده از کیک و نینجا برای ساخت و ساز. -CMake - a meta-build system that can generate Ninja files (build tasks). -Ninja - a smaller build system with a focus on the speed used to execute those cmake generated tasks. +کیک-یک سیستم متا ساخت است که می تواند فایل های نینجا (ساخت وظایف) تولید کند. +نینجا-یک سیستم ساخت کوچکتر با تمرکز بر سرعت مورد استفاده برای اجرای این کارهای تولید کیک. -To install on Ubuntu, Debian or Mint run `sudo apt install cmake ninja-build`. +برای نصب در اوبونتو, دبیان و یا نعنا اجرا `sudo apt install cmake ninja-build`. -On CentOS, RedHat run `sudo yum install cmake ninja-build`. +در حال بارگذاری `sudo yum install cmake ninja-build`. -If you use Arch or Gentoo, you probably know it yourself how to install CMake. +اگر شما استفاده از قوس یا جنتو, شما احتمالا خودتان می دانید که چگونه به نصب کیک. -For installing CMake and Ninja on Mac OS X first install Homebrew and then install everything else via brew: +برای نصب کیک و نینجا در سیستم عامل مک ایکس اول گشتن نصب و سپس نصب هر چیز دیگری از طریق دم: /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" brew install cmake ninja -Next, check the version of CMake: `cmake --version`. If it is below 3.3, you should install a newer version from the website: https://cmake.org/download/. +بعد, بررسی نسخه از کیک: `cmake --version`. اگر زیر 3.3, شما باید یک نسخه جدیدتر از وب سایت نصب: https://cmake.org/download/. -# Optional External Libraries {#optional-external-libraries} +# کتابخانه های خارجی اختیاری {#optional-external-libraries} -ClickHouse uses several external libraries for building. All of them do not need to be installed separately as they are built together with ClickHouse from the sources located in the submodules. You can check the list in `contrib`. +تاتر با استفاده از چندین کتابخانه خارجی برای ساخت و ساز. همه آنها نمی نیاز به نصب به طور جداگانه به عنوان آنها ساخته شده است همراه با clickhouse از منابع واقع در submodules. شما می توانید لیست را بررسی کنید `contrib`. -# C++ Compiler {#c-compiler} +# ج ++ کامپایلر {#c-compiler} -Compilers GCC starting from version 9 and Clang version 8 or above are supported for building ClickHouse. +کامپایلر شورای همکاری خلیج فارس با شروع از نسخه 9 و صدای شیپور نسخه 8 یا بالاتر برای ساخت و ساز خانه عروسکی پشتیبانی می کند. -Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations. +یاندکس رسمی ایجاد شده در حال حاضر با استفاده از شورای همکاری خلیج فارس به دلیل تولید کد ماشین از عملکرد کمی بهتر (بازده تفاوت تا چند درصد با توجه به معیار ما). و کلانگ معمولا برای توسعه راحت تر است. هر چند, ادغام مداوم ما (سی) پلت فرم اجرا می شود چک برای حدود یک دوجین از ترکیب ساخت. -To install GCC on Ubuntu run: `sudo apt install gcc g++` +برای نصب شورای همکاری خلیج فارس در اوبونتو اجرای: `sudo apt install gcc g++` -Check the version of gcc: `gcc --version`. If it is below 9, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/\#install-gcc-9. +بررسی نسخه شورای همکاری خلیج فارس: `gcc --version`. اگر زیر است 9, سپس دستورالعمل اینجا را دنبال کنید: https://clickhouse.فناوری / اسناد / ارتباطات / توسعه/ساختن / \#نصب شورای همکاری خلیج فارس-9. -Mac OS X build is supported only for Clang. Just run `brew install llvm` +سیستم عامل مک ایکس ساخت فقط برای صدای جرنگ جرنگ پشتیبانی می شود. فقط فرار کن `brew install llvm` -If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended. +اگر شما تصمیم به استفاده از صدای شیپور, شما همچنین می توانید نصب `libc++` و `lld`, اگر شما می دانید چه چیزی است. با استفاده از `ccache` همچنین توصیه می شود. -# The Building process {#the-building-process} +# روند ساخت و ساز {#the-building-process} -Now that you are ready to build ClickHouse we recommend you to create a separate directory `build` inside `ClickHouse` that will contain all of the build artefacts: +حالا که اماده ساخت خانه عروسکی هستید توصیه می کنیم یک دایرکتوری جداگانه ایجاد کنید `build` داخل `ClickHouse` که شامل تمام مصنوعات ساخت: mkdir build cd build -You can have several different directories (build\_release, build\_debug, etc.) for different types of build. +شما می توانید چندین دایرکتوری های مختلف (build\_release, build\_debug ، ) برای انواع مختلف ساخت. -While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 9 gcc compiler in this example). +در حالی که در داخل `build` فهرست, پیکربندی ساخت خود را با در حال اجرا کیک. قبل از اولین اجرا, شما نیاز به تعریف متغیرهای محیطی که کامپایلر را مشخص (نسخه 9 کامپایلر شورای همکاری خلیج فارس در این مثال). -Linux: +لینوکس: export CC=gcc-9 CXX=g++-9 cmake .. -Mac OS X: +سیستم عامل مک ایکس: export CC=clang CXX=clang++ cmake .. -The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building. +این `CC` متغیر کامپایلر برای ج مشخص (کوتاه برای کامپایلر ج), و `CXX` دستور متغیر که سی++ کامپایلر است که برای ساخت و ساز استفاده می شود. -For a faster build, you can resort to the `debug` build type - a build with no optimizations. For that supply the following parameter `-D CMAKE_BUILD_TYPE=Debug`: +برای ساخت سریع تر, شما می توانید به توسل `debug` نوع ساخت-ساخت بدون بهینه سازی. برای عرضه پارامتر زیر `-D CMAKE_BUILD_TYPE=Debug`: cmake -D CMAKE_BUILD_TYPE=Debug .. -You can change the type of build by running this command in the `build` directory. +شما می توانید نوع ساخت را با اجرای این دستور در تغییر دهید `build` فهرست راهنما. -Run ninja to build: +اجرای نینجا برای ساخت: ninja clickhouse-server clickhouse-client -Only the required binaries are going to be built in this example. +فقط باینری مورد نیاز در حال رفتن به در این مثال ساخته شده است. -If you require to build all the binaries (utilities and tests), you should run ninja with no parameters: +اگر شما نیاز به ساخت تمام فایل های باینری (تاسیسات و تست), شما باید نینجا بدون پارامتر اجرا: ninja -Full build requires about 30GB of free disk space or 15GB to build the main binaries. +ساخت کامل نیاز به حدود 30 گیگابایت فضای دیسک رایگان یا 15 گیگابایت برای ساخت باینری اصلی دارد. -When a large amount of RAM is available on build machine you should limit the number of build tasks run in parallel with `-j` param: +هنگامی که مقدار زیادی از رم در ساخت دستگاه در دسترس است شما باید تعداد وظایف ساخت به صورت موازی با اجرا محدود می کند `-j` پرم: ninja -j 1 clickhouse-server clickhouse-client -On machines with 4GB of RAM, it is recommended to specify 1, for 8GB of RAM `-j 2` is recommended. +در ماشین با 4 گیگابایت رم, توصیه می شود برای مشخص 1, برای 8گیگابایت رم `-j 2` توصیه می شود. -If you get the message: `ninja: error: loading 'build.ninja': No such file or directory`, it means that generating a build configuration has failed and you need to inspect the message above. +اگر پیام را دریافت کنید: `ninja: error: loading 'build.ninja': No such file or directory`, به این معنی که تولید یک پیکربندی ساخت شکست خورده است و شما نیاز به بازرسی پیام بالا. -Upon the successful start of the building process, you’ll see the build progress - the number of processed tasks and the total number of tasks. +پس از شروع موفق از روند ساخت و ساز, شما پیشرفت ساخت را ببینید-تعداد کارهای پردازش شده و تعداد کل وظایف. -While building messages about protobuf files in libhdfs2 library like `libprotobuf WARNING` may show up. They affect nothing and are safe to be ignored. +در حالی که ساختمان پیام در مورد protobuf فایل در libhdfs2 کتابخانه مانند `libprotobuf WARNING` ممکن است نشان دهد تا. هیچ چیز تاثیر می گذارد و امن نادیده گرفته می شود. -Upon successful build you get an executable file `ClickHouse//programs/clickhouse`: +پس از ساخت موفق شما یک فایل اجرایی دریافت کنید `ClickHouse//programs/clickhouse`: ls -l programs/clickhouse -# Running the built executable of ClickHouse {#running-the-built-executable-of-clickhouse} +# اجرای اجرایی ساخته شده از خانه کلیک {#running-the-built-executable-of-clickhouse} -To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run: +برای اجرای سرور تحت کاربر فعلی شما نیاز به حرکت به `ClickHouse/programs/server/` (واقع در خارج از `build`) و اجرا: ../../../build/programs/clickhouse server -In this case, ClickHouse will use config files located in the current directory. You can run `clickhouse server` from any directory specifying the path to a config file as a command-line parameter `--config-file`. +در این مورد, تاتر خواهد فایل های پیکربندی واقع در دایرکتوری جاری استفاده. شما می توانید اجرا کنید `clickhouse server` از هر دایرکتوری مشخص کردن مسیر به یک فایل پیکربندی به عنوان یک پارامتر خط فرمان `--config-file`. -To connect to ClickHouse with clickhouse-client in another terminal navigate to `ClickHouse/build/programs/` and run `clickhouse client`. +برای اتصال به clickhouse با clickhouse-مشتری در یکی دیگر از ترمینال حرکت به `ClickHouse/build/programs/` و فرار کن `clickhouse client`. -If you get `Connection refused` message on Mac OS X or FreeBSD, try specifying host address 127.0.0.1: +اگر شما `Connection refused` سعی کنید مشخص نشانی میزبان 127.0.0.1: clickhouse client --host 127.0.0.1 -You can replace the production version of ClickHouse binary installed in your system with your custom-built ClickHouse binary. To do that install ClickHouse on your machine following the instructions from the official website. Next, run the following: +شما می توانید جایگزین تولید نسخه clickhouse باینری در سیستم شما نصب شده خود را با سفارشی ساخته شده clickhouse دودویی. برای انجام این کار نصب کلیک بر روی دستگاه خود را به دنبال دستورالعمل از وب سایت رسمی. بعد زیر را اجرا کنید: sudo service clickhouse-server stop sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ sudo service clickhouse-server start -Note that `clickhouse-client`, `clickhouse-server` and others are symlinks to the commonly shared `clickhouse` binary. +توجه داشته باشید که `clickhouse-client`, `clickhouse-server` و دیگران به طور معمول به اشتراک گذاشته می شوند `clickhouse` دودویی. -You can also run your custom-built ClickHouse binary with the config file from the ClickHouse package installed on your system: +شما همچنین می توانید خود را سفارشی ساخته شده clickhouse دودویی با فایل پیکربندی از clickhouse بسته نصب شده در سیستم شما: sudo service clickhouse-server stop sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml -# IDE (Integrated Development Environment) {#ide-integrated-development-environment} +# محیط توسعه یکپارچه) {#ide-integrated-development-environment} -If you do not know which IDE to use, we recommend that you use CLion. CLion is commercial software, but it offers 30 days free trial period. It is also free of charge for students. CLion can be used both on Linux and on Mac OS X. +اگر شما نمی دانید که محیط برنامه نویسی برای استفاده, توصیه می کنیم که شما با استفاده از کلون. کلوون نرم افزار تجاری است, اما 30 روز رایگان دوره محاکمه. این نیز رایگان برای دانشجویان. clion می توان هم بر روی لینوکس و mac os x. -KDevelop and QTCreator are other great alternatives of an IDE for developing ClickHouse. KDevelop comes in as a very handy IDE although unstable. If KDevelop crashes after a while upon opening project, you should click “Stop All” button as soon as it has opened the list of project’s files. After doing so KDevelop should be fine to work with. +KDevelop و QTCreator دیگر از جایگزین های بسیار خوبی از یک IDE برای توسعه ClickHouse. توسعه و توسعه به عنوان یک محیط برنامه نویسی بسیار مفید هر چند ناپایدار. اگر توسعه پس از مدتی پس از باز کردن پروژه سقوط, شما باید کلیک کنید “Stop All” دکمه به محض این که لیستی از فایل های پروژه را باز کرده است. پس از انجام این کار کدولاپ باید خوب باشد برای کار با. -As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate (all of which are available on Linux). +به عنوان ویراستاران کد ساده, شما می توانید متن والا و یا کد ویژوال استودیو استفاده, یا کیت (که همه در دسترس هستند در لینوکس). -Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion. +فقط در مورد لازم به ذکر است که clion ایجاد `build` مسیر خود را نیز در انتخاب خود `debug` برای ساخت نوع پیکربندی آن را با استفاده از یک نسخه از CMake که تعریف شده است در CLion و نه یک نصب شده توسط شما, و در نهایت, CLion استفاده خواهد کرد `make` برای اجرای وظایف ساخت به جای `ninja`. این رفتار طبیعی است, فقط نگه دارید که در ذهن برای جلوگیری از سردرگمی. -# Writing Code {#writing-code} +# نوشتن کد {#writing-code} -The description of ClickHouse architecture can be found here: https://clickhouse.tech/docs/en/development/architecture/ +شرح clickhouse معماری را می توان در اینجا یافت نشد: https://clickhouse.فناوری / اسناد/مهندسی / توسعه / معماری/ -The Code Style Guide: https://clickhouse.tech/docs/en/development/style/ +راهنمای سبک کد: https://clickhouse.فناوری / اسناد/در/توسعه / سبک/ -Writing tests: https://clickhouse.tech/docs/en/development/tests/ +تست نوشتن: https://clickhouse.فناوری / اسناد/توسعه/تست/ -List of tasks: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md +فهرست تکلیفها: https://github.com/clickhouse/clickhouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md -# Test Data {#test-data} +# داده های تست {#test-data} -Developing ClickHouse often requires loading realistic datasets. It is particularly important for performance testing. We have a specially prepared set of anonymized data from Yandex.Metrica. It requires additionally some 3GB of free disk space. Note that this data is not required to accomplish most of the development tasks. +در حال توسعه تاتر اغلب نیاز به بارگذاری مجموعه داده های واقع بینانه است. این امر به ویژه برای تست عملکرد مهم است. ما یک مجموعه خاص تهیه شده از داده های ناشناس از یاندکس.متریکا این علاوه بر برخی از 3 گیگابایت فضای دیسک رایگان نیاز دارد. توجه داشته باشید که این داده ها مورد نیاز است برای به انجام رساندن بسیاری از وظایف توسعه. sudo apt install wget xz-utils @@ -269,14 +274,14 @@ Developing ClickHouse often requires loading realistic datasets. It is particula clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv -# Creating Pull Request {#creating-pull-request} +# ایجاد درخواست کشش {#creating-pull-request} -Navigate to your fork repository in GitHub’s UI. If you have been developing in a branch, you need to select that branch. There will be a “Pull request” button located on the screen. In essence, this means “create a request for accepting my changes into the main repository”. +حرکت به مخزن چنگال خود را در رابط کاربر گیتهاب است. اگر شما شده اند در حال توسعه در یک شاخه, شما نیاز به انتخاب کنید که شاخه. وجود خواهد داشت “Pull request” دکمه واقع بر روی صفحه نمایش. در اصل این به این معنی است “create a request for accepting my changes into the main repository”. -A pull request can be created even if the work is not completed yet. In this case please put the word “WIP” (work in progress) at the beginning of the title, it can be changed later. This is useful for cooperative reviewing and discussion of changes as well as for running all of the available tests. It is important that you provide a brief description of your changes, it will later be used for generating release changelogs. +درخواست کشش را می توان ایجاد حتی اگر کار کامل نشده است. در این مورد لطفا کلمه را قرار دهید “WIP” (کار در حال پیشرفت) در ابتدای عنوان می تواند بعدا تغییر کند. این برای بررسی تعاونی و بحث در مورد تغییرات و همچنین برای اجرای تمام تست های موجود مفید است. این مهم است که شما شرح مختصری از تغییرات خود را فراهم, بعد برای تولید تغییرات انتشار استفاده خواهد شد. -Testing will commence as soon as Yandex employees label your PR with a tag “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. +تست شروع خواهد شد به عنوان به زودی به عنوان کارکنان یاندکس برچسب روابط عمومی خود را با یک برچسب “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. -The system will prepare ClickHouse binary builds for your pull request individually. To retrieve these builds click the “Details” link next to “ClickHouse build check” entry in the list of checks. There you will find direct links to the built .deb packages of ClickHouse which you can deploy even on your production servers (if you have no fear). +این سیستم خواهد باینری کلیک ایجاد شده برای درخواست کشش خود را به صورت جداگانه تهیه. برای بازیابی این ایجاد کلیک کنید “Details” پیوند بعدی به “ClickHouse build check” ورود در لیست چک. وجود دارد شما لینک مستقیم به ساخته شده پیدا کنید .بسته دب از تاتر که شما می توانید حتی بر روی سرور تولید خود را استقرار (اگر شما هیچ ترس). -Most probably some of the builds will fail at first times. This is due to the fact that we check builds both with gcc as well as with clang, with almost all of existing warnings (always with the `-Werror` flag) enabled for clang. On that same page, you can find all of the build logs so that you do not have to build ClickHouse in all of the possible ways. +به احتمال زیاد برخی از ایجاد خواهد شد شکست در اولین بار. این به خاطر این واقعیت است که ما بررسی می کنیم ایجاد هر دو با شورای همکاری خلیج فارس و همچنین با صدای جرنگ, با تقریبا تمام هشدارهای موجود (همیشه با `-Werror` پرچم) را فعال کنید برای صدای جرنگ جرنگ. در همان صفحه, شما می توانید تمام سیاهههای مربوط ساخت پیدا به طوری که شما لازم نیست که برای ساخت خانه در تمام راه های ممکن. diff --git a/docs/fa/development/index.md b/docs/fa/development/index.md index 727e89ca891..80e76c95a81 100644 --- a/docs/fa/development/index.md +++ b/docs/fa/development/index.md @@ -1,7 +1,12 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Development +toc_hidden: true +toc_priority: 58 +toc_title: "\u0645\u062E\u0641\u06CC" --- -# ClickHouse Development {#clickhouse-development} +# توسعه کلیک {#clickhouse-development} -[Original article](https://clickhouse.tech/docs/en/development/) +[مقاله اصلی](https://clickhouse.tech/docs/en/development/) diff --git a/docs/fa/development/style.md b/docs/fa/development/style.md index 5df099ecd52..2f8fb48cad0 100644 --- a/docs/fa/development/style.md +++ b/docs/fa/development/style.md @@ -1,26 +1,30 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 68 +toc_title: "\u0686\u06AF\u0648\u0646\u0647 \u0628\u0631\u0627\u06CC \u0646\u0648\u0634\ + \u062A\u0646 \u062C++ \u06A9\u062F" --- -# How to Write C++ Code {#how-to-write-c-code} +# چگونه برای نوشتن ج++ کد {#how-to-write-c-code} -## General Recommendations {#general-recommendations} +## توصیه های عمومی {#general-recommendations} -**1.** The following are recommendations, not requirements. +**1.** در زیر توصیه, مورد نیاز نیست. -**2.** If you are editing code, it makes sense to follow the formatting of the existing code. +**2.** اگر شما در حال ویرایش کد آن را حس می کند به دنبال این قالب از کد های موجود. -**3.** Code style is needed for consistency. Consistency makes it easier to read the code, and it also makes it easier to search the code. +**3.** سبک کد برای سازگاری مورد نیاز است. سازگاری خواندن کد را ساده تر می کند و همچنین باعث می شود که کد را جستجو کنید. -**4.** Many of the rules do not have logical reasons; they are dictated by established practices. +**4.** بسیاری از قوانین دلایل منطقی ندارند; دیکته شده توسط شیوه های تاسیس شده است. -## Formatting {#formatting} +## قالببندی {#formatting} -**1.** Most of the formatting will be done automatically by `clang-format`. +**1.** بسیاری از قالب بندی به صورت خودکار انجام می شود `clang-format`. -**2.** Indents are 4 spaces. Configure your development environment so that a tab adds four spaces. +**2.** فرورفتگی فضاهای 4 هستند. پیکربندی محیط توسعه خود را به طوری که یک تب اضافه می کند چهار فضا. -**3.** Opening and closing curly brackets must be on a separate line. +**3.** باز و بسته شدن براکت فرفری باید در یک خط جداگانه باشد. ``` cpp inline void readBoolText(bool & x, ReadBuffer & buf) @@ -31,14 +35,14 @@ inline void readBoolText(bool & x, ReadBuffer & buf) } ``` -**4.** If the entire function body is a single `statement`, it can be placed on a single line. Place spaces around curly braces (besides the space at the end of the line). +**4.** اگر کل بدن تابع یک است `statement`, این را می توان در یک خط قرار داده شده. فضاهای محل در اطراف پرانتز فرفری (علاوه بر فضای در پایان خط). ``` cpp inline size_t mask() const { return buf_size() - 1; } inline size_t place(HashValue x) const { return x & mask(); } ``` -**5.** For functions. Don’t put spaces around brackets. +**5.** برای توابع. فضاهای اطراف براکت قرار ندهید. ``` cpp void reinsert(const Value & x) @@ -48,13 +52,13 @@ void reinsert(const Value & x) memcpy(&buf[place_value], &x, sizeof(x)); ``` -**6.** In `if`, `for`, `while` and other expressions, a space is inserted in front of the opening bracket (as opposed to function calls). +**6.** داخل `if`, `for`, `while` و عبارت دیگر, یک فضای در مقابل براکت باز قرار داده(به عنوان مخالف به عملکرد تماس). ``` cpp for (size_t i = 0; i < rows; i += storage.index_granularity) ``` -**7.** Add spaces around binary operators (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. +**7.** اضافه کردن فضاهای اطراف اپراتورهای دودویی (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. ``` cpp UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -62,7 +66,7 @@ UInt8 month = (s[5] - '0') * 10 + (s[6] - '0'); UInt8 day = (s[8] - '0') * 10 + (s[9] - '0'); ``` -**8.** If a line feed is entered, put the operator on a new line and increase the indent before it. +**8.** اگر یک خوراک خط وارد شده است, قرار دادن اپراتور در یک خط جدید و افزایش تورفتگی قبل از. ``` cpp if (elapsed_ns) @@ -71,7 +75,7 @@ if (elapsed_ns) << bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) "; ``` -**9.** You can use spaces for alignment within a line, if desired. +**9.** شما می توانید فضاهای برای هم ترازی در یک خط استفاده, در صورت دلخواه. ``` cpp dst.ClickLogID = click.LogID; @@ -79,17 +83,17 @@ dst.ClickEventID = click.EventID; dst.ClickGoodEvent = click.GoodEvent; ``` -**10.** Don’t use spaces around the operators `.`, `->`. +**10.** از فضاهای اطراف اپراتورها استفاده نکنید `.`, `->`. -If necessary, the operator can be wrapped to the next line. In this case, the offset in front of it is increased. +در صورت لزوم اپراتور می تواند به خط بعدی پیچیده شود. در این مورد جبران در مقابل افزایش می یابد. -**11.** Do not use a space to separate unary operators (`--`, `++`, `*`, `&`, …) from the argument. +**11.** از فضا برای جدا کردن اپراتورهای غیر ضروری استفاده نکنید (`--`, `++`, `*`, `&`, …) from the argument. -**12.** Put a space after a comma, but not before it. The same rule goes for a semicolon inside a `for` expression. +**12.** بعد از ویرگول فاصله بگیر ولی نه قبل از اون همین قاعده برای یک نقطه و ویرگول در داخل یک `for` اصطلاح. -**13.** Do not use spaces to separate the `[]` operator. +**13.** از فضاها برای جدا کردن استفاده نکنید `[]` اپراتور -**14.** In a `template <...>` expression, use a space between `template` and `<`; no spaces after `<` or before `>`. +**14.** در یک `template <...>` عبارت, استفاده از یک فضای بین `template` و `<`; بدون فاصله پس از `<` یا قبل از `>`. ``` cpp template @@ -97,7 +101,7 @@ struct AggregatedStatElement {} ``` -**15.** In classes and structures, write `public`, `private`, and `protected` on the same level as `class/struct`, and indent the rest of the code. +**15.** در کلاس ها و سازه, نوشتن `public`, `private` و `protected` در همان سطح به عنوان `class/struct`, و تورفتگی بقیه کد. ``` cpp template @@ -110,11 +114,11 @@ public: } ``` -**16.** If the same `namespace` is used for the entire file, and there isn’t anything else significant, an offset is not necessary inside `namespace`. +**16.** اگر همان `namespace` برای کل فایل استفاده می شود, و هر چیز دیگری قابل توجهی وجود ندارد, افست در داخل لازم نیست `namespace`. -**17.** If the block for an `if`, `for`, `while`, or other expression consists of a single `statement`, the curly brackets are optional. Place the `statement` on a separate line, instead. This rule is also valid for nested `if`, `for`, `while`, … +**17.** اگر بلوک برای `if`, `for`, `while`, یا عبارت دیگر متشکل از یک `statement`, براکت فرفری اختیاری هستند. محل `statement` در یک خط جداگانه, در عوض. این قانون نیز برای تو در تو معتبر `if`, `for`, `while`, … -But if the inner `statement` contains curly brackets or `else`, the external block should be written in curly brackets. +اما اگر درونی `statement` شامل براکت های فرفری یا `else` بلوک خارجی باید در براکت های فرفری نوشته شود. ``` cpp /// Finish write. @@ -122,23 +126,23 @@ for (auto & stream : streams) stream.second->finalize(); ``` -**18.** There shouldn’t be any spaces at the ends of lines. +**18.** نباید در انتهای خطوط هیچ فضایی وجود داشته باشد. -**19.** Source files are UTF-8 encoded. +**19.** فایل های منبع هستند وزارت مخابرات 8 کد گذاری. -**20.** Non-ASCII characters can be used in string literals. +**20.** شخصیت های غیر ASCII استفاده می شود string literals. ``` cpp << ", " << (timer.elapsed() / chunks_stats.hits) << " μsec/hit."; ``` -**21.** Do not write multiple expressions in a single line. +**21.** هنوز عبارات متعدد در یک خط ارسال نمی. -**22.** Group sections of code inside functions and separate them with no more than one empty line. +**22.** بخش های گروهی کد در داخل توابع و با بیش از یک خط خالی جدا می شوند. -**23.** Separate functions, classes, and so on with one or two empty lines. +**23.** توابع جداگانه, کلاس, و به همین ترتیب با یک یا دو خط خالی. -**24.** `A const` (related to a value) must be written before the type name. +**24.** `A const` (مربوط به ارزش) باید قبل از نام نوع نوشته شده است. ``` cpp //correct @@ -148,7 +152,7 @@ const std::string & s char const * pos ``` -**25.** When declaring a pointer or reference, the `*` and `&` symbols should be separated by spaces on both sides. +**25.** هنگام اعلام اشاره گر یا مرجع `*` و `&` نمادها باید با فاصله در هر دو طرف از هم جدا. ``` cpp //correct @@ -158,11 +162,11 @@ const char* pos const char *pos ``` -**26.** When using template types, alias them with the `using` keyword (except in the simplest cases). +**26.** هنگام استفاده از انواع قالب ها با نام مستعار `using` کلمه کلیدی (به جز در ساده ترین موارد). -In other words, the template parameters are specified only in `using` and aren’t repeated in the code. +به عبارت دیگر پارامترهای قالب فقط در `using` و در کد تکرار نمی شود. -`using` can be declared locally, such as inside a function. +`using` می توان به صورت محلی اعلام کرد, مانند داخل یک تابع. ``` cpp //correct @@ -172,14 +176,14 @@ FileStreams streams; std::map> streams; ``` -**27.** Do not declare several variables of different types in one statement. +**27.** هنوز متغیرهای مختلفی از انواع مختلف در یک بیانیه اعلام نمی. ``` cpp //incorrect int x, *y; ``` -**28.** Do not use C-style casts. +**28.** هنوز کست ج سبک استفاده نمی. ``` cpp //incorrect @@ -188,27 +192,27 @@ std::cerr << (int)c <<; std::endl; std::cerr << static_cast(c) << std::endl; ``` -**29.** In classes and structs, group members and functions separately inside each visibility scope. +**29.** در کلاس ها و ساختار, اعضای گروه و توابع به طور جداگانه در داخل هر دامنه دید. -**30.** For small classes and structs, it is not necessary to separate the method declaration from the implementation. +**30.** برای کلاس های کوچک و structs آن است که لازم نیست برای جدا کردن روش بیانیه از اجرای. -The same is true for small methods in any classes or structs. +همان درست است برای روش های کوچک در هر کلاس و یا ساختار است. -For templated classes and structs, don’t separate the method declarations from the implementation (because otherwise they must be defined in the same translation unit). +برای templated کلاس و structs نیست جداگانه روش اعلامیه از اجرای (زیرا در غیر این صورت آنها باید تعریف شده در ترجمه همان واحد). -**31.** You can wrap lines at 140 characters, instead of 80. +**31.** شما می توانید خطوط در بسته بندی 140 شخصیت, بجای 80. -**32.** Always use the prefix increment/decrement operators if postfix is not required. +**32.** همیشه پیشوند اپراتورهای افزایش/کاهش استفاده کنید اگر پسوند مورد نیاز نمی باشد. ``` cpp for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) ``` -## Comments {#comments} +## توضیحات {#comments} -**1.** Be sure to add comments for all non-trivial parts of code. +**1.** حتما برای اضافه کردن نظر برای تمام بخش های غیر بدیهی از کد. -This is very important. Writing the comment might help you realize that the code isn’t necessary, or that it is designed wrong. +این بسیار مهم است. نوشتن نظر ممکن است به شما کمک کند متوجه شوید که کد لازم نیست یا اشتباه طراحی شده است. ``` cpp /** Part of piece of memory, that can be used. @@ -218,9 +222,9 @@ This is very important. Writing the comment might help you realize that the code */ ``` -**2.** Comments can be as detailed as necessary. +**2.** نظرات می تواند به عنوان دقیق که لازم است. -**3.** Place comments before the code they describe. In rare cases, comments can come after the code, on the same line. +**3.** محل نظرات قبل از کد توصیف می کنند. در موارد نادر, نظرات می تواند پس از کد است, در همان خط. ``` cpp /** Parses and executes the query. @@ -234,11 +238,11 @@ void executeQuery( ) ``` -**4.** Comments should be written in English only. +**4.** نظرات فقط باید به زبان انگلیسی نوشته شود. -**5.** If you are writing a library, include detailed comments explaining it in the main header file. +**5.** اگر شما در حال نوشتن یک کتابخانه, شامل نظرات دقیق توضیح در فایل هدر اصلی. -**6.** Do not add comments that do not provide additional information. In particular, do not leave empty comments like this: +**6.** هنوز نظر که اطلاعات اضافی را فراهم نمی کند اضافه کنید. به خصوص, نظرات خالی مثل این را ترک کنید: ``` cpp /* @@ -261,116 +265,116 @@ void executeQuery( */ ``` -The example is borrowed from the resource http://home.tamk.fi/~jaalto/course/coding-style/doc/unmaintainable-code/. +به عنوان مثال با اقتباس از منابع http://home.tamk.fi/~jaalto/دوره آموزشی/برنامه نویسی به سبک/doc/قابل نگهداشت-کد/. -**7.** Do not write garbage comments (author, creation date ..) at the beginning of each file. +**7.** هنوز نظرات زباله ارسال کنید (نویسنده, تاریخ ایجاد ..) در ابتدای هر فایل. -**8.** Single-line comments begin with three slashes: `///` and multi-line comments begin with `/**`. These comments are considered “documentation”. +**8.** نظرات تک خط با سه اسلش شروع می شود: `///` و نظرات چند خط با شروع `/**`. این نظرات در نظر گرفته شده است “documentation”. -Note: You can use Doxygen to generate documentation from these comments. But Doxygen is not generally used because it is more convenient to navigate the code in the IDE. +توجه: شما می توانید داکسیژن برای تولید اسناد از این نظرات استفاده کنید. اما داکسیگن به طور کلی استفاده نمی شود زیرا راحت تر است که کد را در محیط برنامه نویسی حرکت دهید. -**9.** Multi-line comments must not have empty lines at the beginning and end (except the line that closes a multi-line comment). +**9.** نظرات چند خط باید خطوط خالی در ابتدا و پایان ندارد (به جز خط که بسته یک نظر چند خط). -**10.** For commenting out code, use basic comments, not “documenting” comments. +**10.** برای اظهار نظر از کد, استفاده از نظرات اساسی, نه “documenting” نظر. -**11.** Delete the commented out parts of the code before committing. +**11.** حذف بخش هایی از کد اظهار نظر قبل از ارتکاب. -**12.** Do not use profanity in comments or code. +**12.** هنوز ناسزا در نظرات و یا کد استفاده کنید. -**13.** Do not use uppercase letters. Do not use excessive punctuation. +**13.** از حروف بزرگ استفاده نکنید. هنوز نقطه گذاری بیش از حد استفاده کنید. ``` cpp /// WHAT THE FAIL??? ``` -**14.** Do not use comments to make delimeters. +**14.** هنوز نظر را به محیطی استفاده نمی. ``` cpp ///****************************************************** ``` -**15.** Do not start discussions in comments. +**15.** هنوز بحث در نظرات شروع نشد. ``` cpp /// Why did you do this stuff? ``` -**16.** There’s no need to write a comment at the end of a block describing what it was about. +**16.** بدون نیاز به نوشتن نظر در پایان یک بلوک توصیف چه بود وجود دارد. ``` cpp /// for ``` -## Names {#names} +## نامها {#names} -**1.** Use lowercase letters with underscores in the names of variables and class members. +**1.** استفاده از حروف کوچک با رکورد در نام متغیرها و اعضای کلاس. ``` cpp size_t max_block_size; ``` -**2.** For the names of functions (methods), use camelCase beginning with a lowercase letter. +**2.** نام توابع (روش) استفاده از camelCase آغاز با حروف کوچک نامه. ``` cpp std::string getName() const override { return "Memory"; } ``` -**3.** For the names of classes (structs), use CamelCase beginning with an uppercase letter. Prefixes other than I are not used for interfaces. +**3.** برای نام کلاس ها (structs) استفاده از CamelCase آغاز با حروف بزرگ نامه. پیشوند دیگر از من برای رابط استفاده نمی شود. ``` cpp class StorageMemory : public IStorage ``` -**4.** `using` are named the same way as classes, or with `_t` on the end. +**4.** `using` به همان شیوه به عنوان کلاس به نام, و یا با `_t` در پایان. -**5.** Names of template type arguments: in simple cases, use `T`; `T`, `U`; `T1`, `T2`. +**5.** نام استدلال نوع الگو: در موارد ساده, استفاده `T`; `T`, `U`; `T1`, `T2`. -For more complex cases, either follow the rules for class names, or add the prefix `T`. +برای موارد پیچیده تر, هم پیروی از قوانین برای نام کلاس, و یا اضافه کردن پیشوند `T`. ``` cpp template struct AggregatedStatElement ``` -**6.** Names of template constant arguments: either follow the rules for variable names, or use `N` in simple cases. +**6.** نام استدلال ثابت الگو: هم پیروی از قوانین برای نام متغیر, و یا استفاده `N` در موارد ساده. ``` cpp template struct ExtractDomain ``` -**7.** For abstract classes (interfaces) you can add the `I` prefix. +**7.** برای کلاس های انتزاعی (رابط) شما می توانید اضافه کنید `I` پیشوند. ``` cpp class IBlockInputStream ``` -**8.** If you use a variable locally, you can use the short name. +**8.** اگر شما استفاده از یک متغیر به صورت محلی, شما می توانید نام کوتاه استفاده. -In all other cases, use a name that describes the meaning. +در تمام موارد دیگر, استفاده از یک نام است که معنای توصیف. ``` cpp bool info_successfully_loaded = false; ``` -**9.** Names of `define`s and global constants use ALL\_CAPS with underscores. +**9.** اسامی `define`بازدید کنندگان و ثابت جهانی استفاده از همه\_کاپ با زیرخط. ``` cpp #define MAX_SRC_TABLE_NAMES_TO_STORE 1000 ``` -**10.** File names should use the same style as their contents. +**10.** نام فایل باید همان سبک به عنوان مطالب خود استفاده کنید. -If a file contains a single class, name the file the same way as the class (CamelCase). +اگر یک فایل شامل یک کلاس, نام فایل به همان شیوه به عنوان کلاس (بالش). -If the file contains a single function, name the file the same way as the function (camelCase). +اگر فایل شامل یک تابع واحد, نام فایل به همان شیوه به عنوان تابع (بالش). -**11.** If the name contains an abbreviation, then: +**11.** اگر نام شامل مخفف, سپس: -- For variable names, the abbreviation should use lowercase letters `mysql_connection` (not `mySQL_connection`). -- For names of classes and functions, keep the uppercase letters in the abbreviation`MySQLConnection` (not `MySqlConnection`). +- برای نام متغیر مخفف حروف کوچک استفاده کنید `mysql_connection` ) نه `mySQL_connection`). +- برای نام کلاس ها و توابع, نگه داشتن حروف بزرگ در مخفف`MySQLConnection` ) نه `MySqlConnection`). -**12.** Constructor arguments that are used just to initialize the class members should be named the same way as the class members, but with an underscore at the end. +**12.** استدلال سازنده استفاده می شود که فقط به مقداردهی اولیه اعضای کلاس باید به همان شیوه به عنوان اعضای کلاس به نام, اما با تاکید در پایان. ``` cpp FileQueueProcessor( @@ -385,15 +389,15 @@ FileQueueProcessor( } ``` -The underscore suffix can be omitted if the argument is not used in the constructor body. +پسوند تاکید می توان حذف اگر استدلال در بدن سازنده استفاده نمی شود. -**13.** There is no difference in the names of local variables and class members (no prefixes required). +**13.** هیچ تفاوتی در نام متغیرهای محلی و اعضای کلاس وجود دارد (هیچ پیشوندهای مورد نیاز). ``` cpp timer (not m_timer) ``` -**14.** For the constants in an `enum`, use CamelCase with a capital letter. ALL\_CAPS is also acceptable. If the `enum` is non-local, use an `enum class`. +**14.** برای ثابت در یک `enum` استفاده از CamelCase با حرف بزرگ. ت\_کاپها نیز قابل قبول است. اگر `enum` غیر محلی است, استفاده از یک `enum class`. ``` cpp enum class CompressionMethod @@ -403,51 +407,51 @@ enum class CompressionMethod }; ``` -**15.** All names must be in English. Transliteration of Russian words is not allowed. +**15.** همه نامها باید به زبان انگلیسی باشد. ترجمه کلمات روسی مجاز نیست. not Stroka -**16.** Abbreviations are acceptable if they are well known (when you can easily find the meaning of the abbreviation in Wikipedia or in a search engine). +**16.** اختصارات قابل قبول هستند در صورتی که به خوبی شناخته شده است (زمانی که شما به راحتی می توانید معنای مخفف در ویکیپدیا و یا در یک موتور جستجو پیدا کنید). `AST`, `SQL`. Not `NVDH` (some random letters) -Incomplete words are acceptable if the shortened version is common use. +کلمات ناقص قابل قبول است اگر نسخه کوتاه استفاده مشترک است. -You can also use an abbreviation if the full name is included next to it in the comments. +شما همچنین می توانید مخفف استفاده کنید اگر نام کامل در کنار در نظرات گنجانده شده است. -**17.** File names with C++ source code must have the `.cpp` extension. Header files must have the `.h` extension. +**17.** نام فایل با ج++ کد منبع باید `.cpp` گسترش. فایل های هدر باید داشته باشند `.h` گسترش. -## How to Write Code {#how-to-write-code} +## نحوه نوشتن کد {#how-to-write-code} -**1.** Memory management. +**1.** مدیریت حافظه. -Manual memory deallocation (`delete`) can only be used in library code. +تخصیص حافظه دستی (`delete`) تنها می تواند در کد کتابخانه استفاده می شود. -In library code, the `delete` operator can only be used in destructors. +در کد کتابخانه `delete` اپراتور تنها می تواند در مخرب استفاده می شود. -In application code, memory must be freed by the object that owns it. +در کد برنامه, حافظه باید توسط شی که صاحب رهایی. -Examples: +مثالها: -- The easiest way is to place an object on the stack, or make it a member of another class. -- For a large number of small objects, use containers. -- For automatic deallocation of a small number of objects that reside in the heap, use `shared_ptr/unique_ptr`. +- ساده ترین راه این است که یک شی را روی پشته قرار دهید یا عضو یک کلاس دیگر شوید. +- برای تعداد زیادی از اشیای کوچک از ظروف استفاده کنید. +- برای تخصیص خودکار تعداد کمی از اشیا که در پشته قرار دارند استفاده کنید `shared_ptr/unique_ptr`. -**2.** Resource management. +**2.** مدیریت منابع. -Use `RAII` and see above. +استفاده `RAII` و بالا را ببینید. -**3.** Error handling. +**3.** رفع خطا. -Use exceptions. In most cases, you only need to throw an exception, and don’t need to catch it (because of `RAII`). +استفاده از استثنا. در بیشتر موارد, شما فقط نیاز به پرتاب یک استثنا, و لازم نیست برای گرفتن (به دلیل `RAII`). -In offline data processing applications, it’s often acceptable to not catch exceptions. +در برنامه های پردازش داده ها نیست, اغلب قابل قبول برای گرفتن استثنا نیست. -In servers that handle user requests, it’s usually enough to catch exceptions at the top level of the connection handler. +در سرور هایی که رسیدگی به درخواست کاربر, این معمولا به اندازه کافی برای گرفتن استثنا در سطح بالای کنترل اتصال. -In thread functions, you should catch and keep all exceptions to rethrow them in the main thread after `join`. +در توابع موضوع, شما باید گرفتن و نگه داشتن همه استثنا به تجدید نظر در موضوع اصلی پس از `join`. ``` cpp /// If there weren't any calculations yet, calculate the first block synchronously @@ -463,14 +467,14 @@ if (exception) exception->rethrow(); ``` -Never hide exceptions without handling. Never just blindly put all exceptions to log. +هرگز پنهان استثنا و بدون دست زدن به. هرگز فقط کورکورانه قرار داده و همه استثنا برای ورود به سیستم. ``` cpp //Not correct catch (...) {} ``` -If you need to ignore some exceptions, do so only for specific ones and rethrow the rest. +اگر شما نیاز به چشم پوشی از چند استثنا, انجام این کار تنها برای افراد خاص و تجدید نظر بقیه. ``` cpp catch (const DB::Exception & e) @@ -482,7 +486,7 @@ catch (const DB::Exception & e) } ``` -When using functions with response codes or `errno`, always check the result and throw an exception in case of error. +هنگام استفاده از توابع با کدهای پاسخ یا `errno`, همیشه نتیجه را بررسی کنید و پرتاب یک استثنا در صورت خطا. ``` cpp if (0 != close(fd)) @@ -491,24 +495,24 @@ if (0 != close(fd)) `Do not use assert`. -**4.** Exception types. +**4.** انواع استثنا. -There is no need to use complex exception hierarchy in application code. The exception text should be understandable to a system administrator. +بدون نیاز به استفاده از سلسله مراتب استثنا پیچیده در کد نرم افزار وجود دارد. متن استثنا باید قابل فهم برای یک مدیر سیستم. -**5.** Throwing exceptions from destructors. +**5.** پرتاب استثنا از destructors. -This is not recommended, but it is allowed. +این توصیه نمی شود, اما مجاز است. -Use the following options: +از گزینه های زیر استفاده کنید: -- Create a function (`done()` or `finalize()`) that will do all the work in advance that might lead to an exception. If that function was called, there should be no exceptions in the destructor later. -- Tasks that are too complex (such as sending messages over the network) can be put in separate method that the class user will have to call before destruction. -- If there is an exception in the destructor, it’s better to log it than to hide it (if the logger is available). -- In simple applications, it is acceptable to rely on `std::terminate` (for cases of `noexcept` by default in C++11) to handle exceptions. +- ایجاد یک تابع (`done()` یا `finalize()`) که همه کار در پیش است که ممکن است منجر به یک استثنا انجام دهد. در صورتی که تابع نامیده می شد, باید بدون استثنا در مخرب بعد وجود داشته باشد. +- کارهایی که بیش از حد پیچیده هستند (مانند ارسال پیام بر روی شبکه) را می توان در روش جداگانه قرار داده است که کاربر کلاس باید قبل از تخریب تماس بگیرید. +- اگر یک استثنا در مخرب وجود دارد, بهتر است به سیستم وارد شوید از برای مخفی کردن (اگر چوب در دسترس است). +- در برنامه های ساده, قابل قبول است به تکیه بر `std::terminate` (برای موارد `noexcept` به طور پیش فرض در ج++11) برای رسیدگی به استثنا. -**6.** Anonymous code blocks. +**6.** بلوک کد ناشناس. -You can create a separate code block inside a single function in order to make certain variables local, so that the destructors are called when exiting the block. +شما می توانید یک بلوک کد جداگانه در داخل یک تابع واحد به منظور ایجاد متغیرهای خاص محلی ایجاد, به طوری که مخرب نامیده می شوند در هنگام خروج از بلوک. ``` cpp Block block = data.in->read(); @@ -522,65 +526,65 @@ Block block = data.in->read(); ready_any.set(); ``` -**7.** Multithreading. +**7.** چند رشته. -In offline data processing programs: +در برنامههای پردازش داده برونخط: -- Try to get the best possible performance on a single CPU core. You can then parallelize your code if necessary. +- سعی کنید بهترین عملکرد ممکن را در یک هسته پردازنده تک دریافت کنید. سپس می توانید کد خود را در صورت لزوم موازی کنید. -In server applications: +در برنامه های سرور: -- Use the thread pool to process requests. At this point, we haven’t had any tasks that required userspace context switching. +- استفاده از استخر موضوع برای پردازش درخواست. در این مرحله, ما هیچ وظایفی که مورد نیاز تعویض زمینه فضای کاربری نداشته اند. -Fork is not used for parallelization. +چنگال برای موازی سازی استفاده نمی شود. -**8.** Syncing threads. +**8.** همگام سازی موضوعات. -Often it is possible to make different threads use different memory cells (even better: different cache lines,) and to not use any thread synchronization (except `joinAll`). +اغلب ممکن است موضوعات مختلف از سلول های حافظه مختلف (حتی بهتر: خطوط کش مختلف) استفاده کنند و از هماهنگ سازی موضوع (به جز `joinAll`). -If synchronization is required, in most cases, it is sufficient to use mutex under `lock_guard`. +اگر هماهنگ سازی مورد نیاز است, در بیشتر موارد, کافی است به استفاده از امکانپذیر تحت `lock_guard`. -In other cases use system synchronization primitives. Do not use busy wait. +در موارد دیگر استفاده از شکلهای هندسی اولیه هماهنگ سازی سیستم. هنوز انتظار مشغول استفاده کنید. -Atomic operations should be used only in the simplest cases. +عملیات اتمی باید تنها در ساده ترین موارد استفاده می شود. -Do not try to implement lock-free data structures unless it is your primary area of expertise. +سعی نکنید ساختارهای داده ای بدون قفل را اجرا کنید مگر اینکه منطقه اصلی تخصص شما باشد. -**9.** Pointers vs references. +**9.** اشاره گر در مقابل مراجع. -In most cases, prefer references. +در بیشتر موارد, ترجیح می دهند مراجع. -**10.** const. +**10.** توایع. -Use constant references, pointers to constants, `const_iterator`, and const methods. +استفاده از منابع ثابت اشاره گر به ثابت, `const_iterator`, و روش توایع. -Consider `const` to be default and use non-`const` only when necessary. +در نظر بگیرید `const` به طور پیش فرض و استفاده غیر-`const` فقط در صورت لزوم. -When passing variables by value, using `const` usually does not make sense. +هنگام عبور متغیرها بر اساس ارزش, با استفاده از `const` معمولا معنی ندارد. -**11.** unsigned. +**11.** امضا نشده. -Use `unsigned` if necessary. +استفاده `unsigned` در صورت لزوم -**12.** Numeric types. +**12.** انواع عددی. -Use the types `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, and `Int64`, as well as `size_t`, `ssize_t`, and `ptrdiff_t`. +استفاده از انواع `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32` و `Int64`, و همچنین `size_t`, `ssize_t` و `ptrdiff_t`. -Don’t use these types for numbers: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. +از این نوع برای اعداد استفاده نکنید: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. -**13.** Passing arguments. +**13.** عبور استدلال. -Pass complex values by reference (including `std::string`). +رمز عبور مقادیر پیچیده توسط مرجع (محتوی `std::string`). -If a function captures ownership of an object created in the heap, make the argument type `shared_ptr` or `unique_ptr`. +اگر یک تابع قطاری مالکیت یک شی ایجاد شده در پشته, را از نوع استدلال `shared_ptr` یا `unique_ptr`. -**14.** Return values. +**14.** ارزش بازگشت. -In most cases, just use `return`. Do not write `[return std::move(res)]{.strike}`. +در اکثر موارد فقط استفاده کنید `return`. ننویس `[return std::move(res)]{.strike}`. -If the function allocates an object on heap and returns it, use `shared_ptr` or `unique_ptr`. +اگر تابع یک شی در پشته اختصاص و بازده, استفاده `shared_ptr` یا `unique_ptr`. -In rare cases you might need to return the value via an argument. In this case, the argument should be a reference. +در موارد نادر شما ممکن است نیاز به بازگشت به ارزش از طریق بحث و جدل. در این مورد استدلال باید مرجع باشد. ``` cpp using AggregateFunctionPtr = std::shared_ptr; @@ -594,25 +598,25 @@ public: AggregateFunctionPtr get(const String & name, const DataTypes & argument_types) const; ``` -**15.** namespace. +**15.** فضای نام. -There is no need to use a separate `namespace` for application code. +بدون نیاز به استفاده از یک جداگانه وجود دارد `namespace` برای کد برنامه. -Small libraries don’t need this, either. +کتابخانه های کوچک هم به این نیاز ندارند. -For medium to large libraries, put everything in a `namespace`. +برای کتابخانه های متوسط تا بزرگ, همه چیز را در یک `namespace`. -In the library’s `.h` file, you can use `namespace detail` to hide implementation details not needed for the application code. +در کتابخانه `.h` پرونده, شما می توانید استفاده کنید `namespace detail` برای مخفی کردن اطلاعات پیاده سازی برای کد برنامه مورد نیاز نیست. -In a `.cpp` file, you can use a `static` or anonymous namespace to hide symbols. +در یک `.cpp` پرونده, شما می توانید یک استفاده `static` یا فضای نام ناشناس برای مخفی کردن نمادها. -Also, a `namespace` can be used for an `enum` to prevent the corresponding names from falling into an external `namespace` (but it’s better to use an `enum class`). +همچنین یک `namespace` می توان برای یک `enum` برای جلوگیری از نام های مربوطه را از افتادن به یک خارجی `namespace` (اما بهتر است از یک `enum class`). -**16.** Deferred initialization. +**16.** مقدار دهی اولیه معوق. -If arguments are required for initialization, then you normally shouldn’t write a default constructor. +اگر استدلال برای مقدار دهی اولیه مورد نیاز, سپس شما به طور معمول باید یک سازنده به طور پیش فرض ارسال کنید. -If later you’ll need to delay initialization, you can add a default constructor that will create an invalid object. Or, for a small number of objects, you can use `shared_ptr/unique_ptr`. +اگر بعد شما نیاز به تاخیر دهی اولیه, شما می توانید یک سازنده به طور پیش فرض است که یک شی نامعتبر ایجاد اضافه. یا برای تعداد کمی از اشیا می توانید استفاده کنید `shared_ptr/unique_ptr`. ``` cpp Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); @@ -621,59 +625,59 @@ Loader(DB::Connection * connection_, const std::string & query, size_t max_block Loader() {} ``` -**17.** Virtual functions. +**17.** توابع مجازی. -If the class is not intended for polymorphic use, you do not need to make functions virtual. This also applies to the destructor. +اگر کلاس برای استفاده چند شکل در نظر گرفته شده, شما لازم نیست که به توابع مجازی. این نیز به مخرب اعمال می شود. -**18.** Encodings. +**18.** کدگذاریها. -Use UTF-8 everywhere. Use `std::string`and`char *`. Do not use `std::wstring`and`wchar_t`. +استفاده از اوتیف - 8 در همه جا. استفاده `std::string`و`char *`. استفاده نشود `std::wstring`و`wchar_t`. -**19.** Logging. +**19.** ثبت. -See the examples everywhere in the code. +نمونه در همه جا در کد را ببینید. -Before committing, delete all meaningless and debug logging, and any other types of debug output. +قبل از ارتکاب, حذف همه بی معنی و اشکال زدایی ورود به سیستم, و هر نوع دیگری از خروجی اشکال زدایی. -Logging in cycles should be avoided, even on the Trace level. +ورود به چرخه باید حتی در سطح ردیابی اجتناب شود. -Logs must be readable at any logging level. +سیاهههای مربوط باید در هر سطح ورود به سیستم قابل خواندن باشد. -Logging should only be used in application code, for the most part. +ورود به سیستم تنها باید در کد نرم افزار مورد استفاده قرار, در بیشتر قسمت ها. -Log messages must be written in English. +ورود پیام باید به زبان انگلیسی نوشته شده است. -The log should preferably be understandable for the system administrator. +ورود ترجیحا باید برای مدیر سیستم قابل فهم باشد. -Do not use profanity in the log. +هنوز ناسزا در ورود به سیستم استفاده کنید. -Use UTF-8 encoding in the log. In rare cases you can use non-ASCII characters in the log. +استفاده از جی تی اف 8 را پشتیبانی می کند در ورود به سیستم. در موارد نادر شما می توانید شخصیت های غیر اسکی در ورود به سیستم استفاده کنید. -**20.** Input-output. +**20.** ورودی-خروجی. -Don’t use `iostreams` in internal cycles that are critical for application performance (and never use `stringstream`). +استفاده نکنید `iostreams` در چرخه های داخلی که برای عملکرد برنامه حیاتی هستند (و هرگز استفاده نکنید `stringstream`). -Use the `DB/IO` library instead. +استفاده از `DB/IO` کتابخانه به جای. -**21.** Date and time. +**21.** تاریخ و زمان. -See the `DateLUT` library. +دیدن `DateLUT` کتابخونه. -**22.** include. +**22.** شامل شدن. -Always use `#pragma once` instead of include guards. +همیشه استفاده کنید `#pragma once` به جای شامل نگهبانان. -**23.** using. +**23.** با استفاده از. -`using namespace` is not used. You can use `using` with something specific. But make it local inside a class or function. +`using namespace` استفاده نمی شود. شما می توانید استفاده کنید `using` با چیزی خاص. اما محلی در داخل یک کلاس و یا تابع را. -**24.** Do not use `trailing return type` for functions unless necessary. +**24.** استفاده نشود `trailing return type` برای توابع مگر اینکه لازم باشد. ``` cpp [auto f() -> void;]{.strike} ``` -**25.** Declaration and initialization of variables. +**25.** اعلامیه و مقدار دهی اولیه از متغیرهای. ``` cpp //right way @@ -684,123 +688,123 @@ std::string s{"Hello"}; auto s = std::string{"Hello"}; ``` -**26.** For virtual functions, write `virtual` in the base class, but write `override` instead of `virtual` in descendent classes. +**26.** برای توابع مجازی, نوشتن `virtual` در کلاس پایه, اما ارسال `override` به جای `virtual` در کلاس های نسل نو. -## Unused Features of C++ {#unused-features-of-c} +## ویژگی های استفاده نشده از سی++ {#unused-features-of-c} -**1.** Virtual inheritance is not used. +**1.** ارث مجازی استفاده نمی شود. -**2.** Exception specifiers from C++03 are not used. +**2.** ویژگی استثنا از ج++03 استفاده نمی شود. -## Platform {#platform} +## سکو {#platform} -**1.** We write code for a specific platform. +**1.** ما نوشتن کد برای یک پلت فرم خاص. -But other things being equal, cross-platform or portable code is preferred. +اما چیزهای دیگر برابر بودن, کراس پلت فرم و یا کد قابل حمل ترجیح داده می شود. -**2.** Language: C++17. +**2.** زبان: ج++17. -**3.** Compiler: `gcc`. At this time (December 2017), the code is compiled using version 7.2. (It can also be compiled using `clang 4`.) +**3.** کامپایلر: `gcc`. در این زمان (دسامبر 2017), کد با استفاده از نسخه وارد شده 7.2. (همچنین می تواند با استفاده از وارد شود `clang 4`.) -The standard library is used (`libstdc++` or `libc++`). +کتابخانه استاندارد استفاده شده است (`libstdc++` یا `libc++`). -**4.**OS: Linux Ubuntu, not older than Precise. +**4.**سیستم عامل: لینوکس اوبونتو, مسن تر از دقیق نیست. -**5.**Code is written for x86\_64 CPU architecture. +**5.**کد برای معماری پردازنده ایکس86\_64 نوشته شده است. -The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2. +مجموعه دستورالعمل پردازنده حداقل مجموعه پشتیبانی در میان سرورهای ما است. در حال حاضر, این سوس است 4.2. -**6.** Use `-Wall -Wextra -Werror` compilation flags. +**6.** استفاده `-Wall -Wextra -Werror` پرچم تلفیقی. -**7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command). +**7.** استفاده از لینک کردن استاتیک با تمام کتابخانه ها به جز کسانی که به سختی برای اتصال به استاتیک (خروجی را ببینید `ldd` فرمان). -**8.** Code is developed and debugged with release settings. +**8.** کد توسعه یافته است و با تنظیمات انتشار دیباگ. -## Tools {#tools} +## ابزارها {#tools} -**1.** KDevelop is a good IDE. +**1.** KDevelop خوب است IDE. -**2.** For debugging, use `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...`, or `tcmalloc_minimal_debug`. +**2.** برای اشکالزدایی, استفاده `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...` یا `tcmalloc_minimal_debug`. -**3.** For profiling, use `Linux Perf`, `valgrind` (`callgrind`), or `strace -cf`. +**3.** برای پروفایل استفاده کنید `Linux Perf`, `valgrind` (`callgrind`), یا `strace -cf`. -**4.** Sources are in Git. +**4.** منابع در دستگاه گوارش هستند. -**5.** Assembly uses `CMake`. +**5.** استفاده مجمع `CMake`. -**6.** Programs are released using `deb` packages. +**6.** برنامه ها با استفاده از منتشر `deb` بسته. -**7.** Commits to master must not break the build. +**7.** مرتکب به استاد باید ساخت شکستن نیست. -Though only selected revisions are considered workable. +هر چند تجدید نظر تنها انتخاب شده قابل اجرا در نظر گرفته. -**8.** Make commits as often as possible, even if the code is only partially ready. +**8.** مرتکب به عنوان اغلب به عنوان امکان پذیر است, حتی اگر کد تنها تا حدی اماده. -Use branches for this purpose. +استفاده از شاخه برای این منظور. -If your code in the `master` branch is not buildable yet, exclude it from the build before the `push`. You’ll need to finish it or remove it within a few days. +اگر کد شما در `master` شاخه هنوز قابل ساختن نیست و از قبل از ساخت حذف می شود `push`. باید تمومش کنی یا ظرف چند روز حذفش کنی -**9.** For non-trivial changes, use branches and publish them on the server. +**9.** برای تغییرات غیر بدیهی از شاخه ها استفاده کنید و بر روی سرور منتشر کنید. -**10.** Unused code is removed from the repository. +**10.** کد استفاده نشده است از مخزن حذف شده است. -## Libraries {#libraries} +## کتابخانهها {#libraries} -**1.** The C++14 standard library is used (experimental extensions are allowed), as well as `boost` and `Poco` frameworks. +**1.** ج++14 کتابخانه استاندارد استفاده شده است (پسوند تجربی مجاز), و همچنین `boost` و `Poco` چارچوب. -**2.** If necessary, you can use any well-known libraries available in the OS package. +**2.** در صورت لزوم, شما می توانید هر کتابخانه شناخته شده موجود در بسته سیستم عامل استفاده. -If there is a good solution already available, then use it, even if it means you have to install another library. +اگر یک راه حل خوب در حال حاضر در دسترس وجود دارد, سپس استفاده کنید, حتی اگر به این معنی شما باید برای نصب کتابخانه دیگر. -(But be prepared to remove bad libraries from code.) +(اما برای حذف کتابخانه های بد از کد تهیه می شود.) -**3.** You can install a library that isn’t in the packages, if the packages don’t have what you need or have an outdated version or the wrong type of compilation. +**3.** شما می توانید یک کتابخانه است که در بسته نیست نصب, اگر بسته لازم نیست که چه شما نیاز دارید و یا یک نسخه منسوخ شده و یا نوع اشتباه از تلفیقی. -**4.** If the library is small and doesn’t have its own complex build system, put the source files in the `contrib` folder. +**4.** اگر کتابخانه کوچک است و سیستم ساخت پیچیده خود را ندارد, قرار دادن فایل های منبع در `contrib` پوشه -**5.** Preference is always given to libraries that are already in use. +**5.** اولویت همیشه به کتابخانه هایی که در حال حاضر در حال استفاده هستند داده می شود. -## General Recommendations {#general-recommendations-1} +## توصیه های عمومی {#general-recommendations-1} -**1.** Write as little code as possible. +**1.** ارسال کد به عنوان کوچک که ممکن است. -**2.** Try the simplest solution. +**2.** ساده ترین راه حل را امتحان کنید. -**3.** Don’t write code until you know how it’s going to work and how the inner loop will function. +**3.** کد را بنویسید تا بدانید چگونه کار می کند و چگونه حلقه داخلی عمل می کند. -**4.** In the simplest cases, use `using` instead of classes or structs. +**4.** در ساده ترین موارد استفاده کنید `using` به جای کلاس و یا ساختار. -**5.** If possible, do not write copy constructors, assignment operators, destructors (other than a virtual one, if the class contains at least one virtual function), move constructors or move assignment operators. In other words, the compiler-generated functions must work correctly. You can use `default`. +**5.** در صورت امکان, انجام سازنده کپی ارسال کنید, اپراتورهای انتساب, مخرب (به غیر از یک مجازی, اگر کلاس شامل حداقل یک تابع مجازی), حرکت سازنده و یا اپراتورهای انتساب حرکت. به عبارت دیگر, توابع کامپایلر تولید باید به درستی کار. شما می توانید استفاده کنید `default`. -**6.** Code simplification is encouraged. Reduce the size of your code where possible. +**6.** ساده سازی کد تشویق می شود. کاهش اندازه کد خود را در صورت امکان. -## Additional Recommendations {#additional-recommendations} +## توصیه های اضافی {#additional-recommendations} -**1.** Explicitly specifying `std::` for types from `stddef.h` +**1.** به صراحت مشخص `std::` برای انواع از `stddef.h` -is not recommended. In other words, we recommend writing `size_t` instead `std::size_t`, because it’s shorter. +توصیه نمی شود. به عبارت دیگر توصیه می کنیم نوشتن کنید `size_t` در عوض `std::size_t` چون کوتاهتر است . -It is acceptable to add `std::`. +این قابل قبول است برای اضافه کردن `std::`. -**2.** Explicitly specifying `std::` for functions from the standard C library +**2.** به صراحت مشخص `std::` برای توابع از کتابخانه استاندارد ج -is not recommended. In other words, write `memcpy` instead of `std::memcpy`. +توصیه نمی شود. به عبارت دیگر نوشتن `memcpy` به جای `std::memcpy`. -The reason is that there are similar non-standard functions, such as `memmem`. We do use these functions on occasion. These functions do not exist in `namespace std`. +دلیل این است که توابع غیر استاندارد مشابه وجود دارد, مانند `memmem`. ما با استفاده از این توابع در مناسبت. این توابع وجود ندارد در `namespace std`. -If you write `std::memcpy` instead of `memcpy` everywhere, then `memmem` without `std::` will look strange. +اگر شما ارسال `std::memcpy` به جای `memcpy` پس همه جا `memmem` بدون `std::` نگاه عجیب و غریب. -Nevertheless, you can still use `std::` if you prefer it. +با این اوصاف, شما هنوز هم می توانید استفاده کنید `std::` اگر شما ترجیح می دهند. -**3.** Using functions from C when the same ones are available in the standard C++ library. +**3.** با استفاده از توابع از ج زمانی که همان در استاندارد ج++ کتابخانه در دسترس هستند. -This is acceptable if it is more efficient. +این قابل قبول است اگر کارایی بیشتری داشته باشد. -For example, use `memcpy` instead of `std::copy` for copying large chunks of memory. +برای مثال استفاده کنید `memcpy` به جای `std::copy` برای کپی کردن تکه های زیادی از حافظه است. -**4.** Multiline function arguments. +**4.** استدلال تابع چند خطی. -Any of the following wrapping styles are allowed: +هر یک از سبک های بسته بندی زیر مجاز است: ``` cpp function( @@ -835,4 +839,4 @@ function( size_t limit) ``` -[Original article](https://clickhouse.tech/docs/en/development/style/) +[مقاله اصلی](https://clickhouse.tech/docs/en/development/style/) diff --git a/docs/fa/development/tests.md b/docs/fa/development/tests.md index c9181349a2b..86bcd4d53c2 100644 --- a/docs/fa/development/tests.md +++ b/docs/fa/development/tests.md @@ -1,87 +1,91 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 69 +toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633\u062A\ + \ \u0647\u0627\u06CC \u06A9\u0644\u06CC\u06A9 \u062E\u0627\u0646\u0647" --- -# ClickHouse Testing {#clickhouse-testing} +# تست کلیک {#clickhouse-testing} -## Functional Tests {#functional-tests} +## تست های کاربردی {#functional-tests} -Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. +تست های کاربردی ساده ترین و راحت برای استفاده هستند. بسیاری از clickhouse ویژگی ها را می توان مورد آزمایش با استفاده از آزمون های عملکردی و آنها را اجباری به استفاده از برای هر تغییر در clickhouse کد است که می تواند آزمایش می شود که در راه است. -Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. +هر تست عملکردی یک یا چند نمایش داده شد به سرور در حال اجرا تاتر می فرستد و نتیجه را با مرجع مقایسه می کند. -Tests are located in `testsies` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. +تست ها در واقع `testsies` فهرست راهنما. دو زیرشاخه وجود دارد: `stateless` و `stateful`. تست های بدون تابعیت بدون هیچ گونه داده های تست پیش بارگذاری شده نمایش داده می شوند-اغلب مجموعه داده های مصنوعی کوچک را در پرواز در داخل تست خود ایجاد می کنند. تست های نفرت انگیز نیاز به داده های تست از قبل نصب شده از یاندکس.متریکا و در دسترس عموم نیست. ما تمایل به استفاده از تنها `stateless` تست ها و جلوگیری از اضافه کردن جدید `stateful` تستها -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. +هر تست می تواند یکی از دو نوع باشد: `.sql` و `.sh`. `.sql` تست اسکریپت ساده مربع است که به لوله کشی است `clickhouse-client --multiquery --testmode`. `.sh` تست یک اسکریپت است که به خودی خود اجرا است. -To run all tests, use `testskhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. +برای اجرای تمام تست ها استفاده کنید `testskhouse-test` ابزار. نگاه کن `--help` برای لیستی از گزینه های ممکن. شما به سادگی می توانید تمام تست ها را اجرا کنید یا زیر مجموعه ای از تست های فیلتر شده توسط زیر رشته را در نام تست اجرا کنید: `./clickhouse-test substring`. -The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. +ساده ترین راه برای فراخوانی تست های کاربردی کپی است `clickhouse-client` به `/usr/bin/` فرار کن `clickhouse-server` و سپس اجرا کنید `./clickhouse-test` از دایرکتوری خود را. -To add new test, create a `.sql` or `.sh` file in `testsies/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +برای اضافه کردن تست جدید, ایجاد یک `.sql` یا `.sh` پرونده در `testsies/0_stateless` فهرست راهنما را به صورت دستی بررسی کنید و سپس تولید کنید `.reference` پرونده به روش زیر: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` یا `./00000_test.sh > ./00000_test.reference`. -Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. +تست باید استفاده کنید (ساختن, قطره, و غیره) تنها جداول در `test` پایگاه داده است که فرض بر این است که از قبل ایجاد می شود; همچنین تست می توانید جداول موقت استفاده. -If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. +اگر شما می خواهید به استفاده از نمایش داده شد توزیع شده در تست های کاربردی, شما می توانید اهرم `remote` تابع جدول با `127.0.0.{1..2}` یا شما می توانید خوشه تست از پیش تعریف شده در فایل پیکربندی سرور مانند استفاده کنید `test_shard_localhost`. -Some tests are marked with `zookeeper`, `shard` or `long` in their names. -`zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that -requires server to listen `127.0.0.*`; `distributed` or `global` have the same -meaning. `long` is for tests that run slightly longer that one second. You can -disable these groups of tests using `--no-zookeeper`, `--no-shard` and -`--no-long` options, respectively. +برخی از تست ها با مشخص شده اند `zookeeper`, `shard` یا `long` در نام خود را. +`zookeeper` برای تست هایی است که از باغ وحش استفاده می کنند. `shard` برای تست هایی است که +نیاز به سرور برای گوش دادن `127.0.0.*`; `distributed` یا `global` همان +معنی. `long` برای تست هایی است که کمی طولانی تر اجرا می شوند که یک ثانیه. شما می توانید +غیر فعال کردن این گروه از تست با استفاده از `--no-zookeeper`, `--no-shard` و +`--no-long` گزینه, به ترتیب. -## Known bugs {#known-bugs} +## اشکالات شناخته شده {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `testsies/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. +اگر ما می دانیم برخی از اشکالات است که می تواند به راحتی توسط تست های کاربردی تکثیر, ما تست های عملکردی تهیه شده در `testsies/bugs` فهرست راهنما. این تست خواهد شد به نقل مکان کرد `teststests_stateless` هنگامی که اشکالات ثابت هستند. -## Integration Tests {#integration-tests} +## تست های ادغام {#integration-tests} -Integration tests allow to test ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. +ادغام آزمون اجازه می دهد برای تست clickhouse در خوشه پیکربندی و clickhouse تعامل با سرور های دیگر مانند mysql, postgres, mongodb. مفید برای تقلید انشعابات شبکه قطره بسته و غیره هستند. این تست ها تحت کارگر بارانداز اجرا و ایجاد ظروف متعدد با نرم افزار های مختلف. -See `testsgration/README.md` on how to run these tests. +ببینید `testsgration/README.md` در مورد چگونگی اجرای این تست. -Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don’t have integration tests with our JDBC and ODBC drivers. +توجه داشته باشید که ادغام کلیک با رانندگان شخص ثالث تست نشده است. همچنین ما در حال حاضر تست های یکپارچه سازی با رانندگان جی بی سی و بی سی ما ندارد. -## Unit Tests {#unit-tests} +## واحد آزمون {#unit-tests} -Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure. +تست واحد مفید هستند که شما می خواهید برای تست نیست خانه کلیک به عنوان یک کل, اما یک کتابخانه جدا شده و یا کلاس. شما می توانید ساخت تست ها را فعال یا غیر فعال کنید `ENABLE_TESTS` گزینه کیک. تست واحد (و دیگر برنامه های تست) در واقع `tests` زیرشاخه در سراسر کد. برای اجرای تست واحد, نوع `ninja test`. برخی از تست ها استفاده می کنند `gtest`, اما برخی فقط برنامه هایی که بازگشت کد خروج غیر صفر در شکست تست. -It’s not necessarily to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use). +این لزوما به واحد آزمون اگر این کد در حال حاضر تحت پوشش تست عملکرد (و آزمون عملکردی معمولا بسیار ساده تر برای استفاده). -## Performance Tests {#performance-tests} +## تست های عملکرد {#performance-tests} -Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Tests are located at `tests/performance`. Each test is represented by `.xml` file with description of test case. Tests are run with `clickhouse performance-test` tool (that is embedded in `clickhouse` binary). See `--help` for invocation. +تست های عملکرد اجازه می دهد برای اندازه گیری و مقایسه عملکرد برخی از بخش جدا شده از خانه رعیتی در نمایش داده شد مصنوعی. تست ها در واقع `tests/performance`. هر تست توسط نمایندگی `.xml` فایل با شرح مورد تست. تست ها با اجرا `clickhouse performance-test` ابزار (که در تعبیه شده است `clickhouse` دودویی). ببینید `--help` برای نیایش. -Each test run one or miltiple queries (possibly with combinations of parameters) in a loop with some conditions for stop (like “maximum execution speed is not changing in three seconds”) and measure some metrics about query performance (like “maximum execution speed”). Some tests can contain preconditions on preloaded test dataset. +هر تست یک یا چند ضلعی نمایش داده شد (احتمالا با ترکیبی از پارامترهای) در یک حلقه با برخی از شرایط برای توقف (مانند “maximum execution speed is not changing in three seconds”) و اندازه گیری برخی از معیارهای مورد عملکرد پرس و جو (مانند “maximum execution speed”). برخی از تست ها می توانند پیش شرط ها را در مجموعه داده های تست پیش بارگذاری شده داشته باشند. -If you want to improve performance of ClickHouse in some scenario, and if improvements can be observed on simple queries, it is highly recommended to write a performance test. It always makes sense to use `perf top` or other perf tools during your tests. +اگر شما می خواهید برای بهبود عملکرد تاتر در برخی از سناریو, و اگر پیشرفت را می توان در نمایش داده شد ساده مشاهده, بسیار توصیه می شود برای نوشتن یک تست عملکرد. همیشه حس می کند به استفاده از `perf top` و یا دیگر ابزار دقیق در طول تست های خود را. -## Test Tools And Scripts {#test-tools-and-scripts} +## ابزار تست و اسکریپت {#test-tools-and-scripts} -Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `dbms/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. +برخی از برنامه ها در `tests` دایرکتوری آماده نیست اما در حال تست ابزار. برای مثال برای `Lexer` یک ابزار وجود دارد `dbms/Parsers/tests/lexer` این فقط تقلید از استدین را انجام می دهد و نتیجه رنگی را به انحراف می نویسد. شما می توانید از این نوع ابزار به عنوان نمونه کد و برای اکتشاف و تست دستی استفاده کنید. -You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated. +شما همچنین می توانید جفت فایل قرار دهید `.sh` و `.reference` همراه با ابزار برای اجرا در برخی از ورودی از پیش تعریف شده - سپس نتیجه اسکریپت را می توان به مقایسه `.reference` پرونده. این نوع تست ها خودکار نیستند. -## Miscellanous Tests {#miscellanous-tests} +## تست های متنوعات {#miscellanous-tests} -There are tests for external dictionaries located at `tests/external_dictionaries` and for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests. +تست برای لغت نامه های خارجی واقع در وجود دارد `tests/external_dictionaries` و برای مدل های ماشین یاد گرفته شده در `tests/external_models`. این تست ها به روز نمی شوند و باید به تست های ادغام منتقل شوند. -There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not. +تست جداگانه برای درج حد نصاب وجود دارد. این اجرای آزمون clickhouse خوشه در سرورهای جداگانه و شبیه سازی شکست های مختلف در موارد: شبکه تقسیم بسته رها کردن (بین clickhouse گره بین clickhouse و باغ وحش بین clickhouse سرور و کلاینت ، ), `kill -9`, `kill -STOP` و `kill -CONT` مثل [جپسن](https://aphyr.com/tags/Jepsen). سپس چک تست که همه درج اذعان نوشته شده بود و همه درج رد شد. -Quorum test was written by separate team before ClickHouse was open-sourced. This team no longer work with ClickHouse. Test was accidentially written in Java. For these reasons, quorum test must be rewritten and moved to integration tests. +تست حد نصاب توسط تیم جداگانه نوشته شده بود قبل از کلیک باز منابع بود. این تیم دیگر با کلیکهاوس کار. تست تصادفی در جاوا نوشته شده بود. به این دلایل, تست حد نصاب باید بازنویسی شود و به تست ادغام نقل مکان کرد. -## Manual Testing {#manual-testing} +## تست دستی {#manual-testing} -When you develop a new feature, it is reasonable to also test it manually. You can do it with the following steps: +هنگامی که شما توسعه یک ویژگی جدید منطقی است و همچنین تست آن را به صورت دستی. شما می توانید آن را با مراحل زیر: -Build ClickHouse. Run ClickHouse from the terminal: change directory to `programs/clickhouse-server` and run it with `./clickhouse-server`. It will use configuration (`config.xml`, `users.xml` and files within `config.d` and `users.d` directories) from the current directory by default. To connect to ClickHouse server, run `programs/clickhouse-client/clickhouse-client`. +ساخت خانه کلیک. اجرای کلیک از ترمینال: تغییر دایرکتوری به `programs/clickhouse-server` و با `./clickhouse-server`. این پیکربندی استفاده کنید (`config.xml`, `users.xml` و فایل ها در `config.d` و `users.d` دایرکتوری ها) از دایرکتوری جاری به طور پیش فرض. برای اتصال به سرور کلیک اجرا کنید `programs/clickhouse-client/clickhouse-client`. -Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`. +توجه داشته باشید که تمام clickhouse ابزار (سرور مشتری و غیره) فقط symlinks به یک باینری به نام `clickhouse`. شما می توانید این دودویی در `programs/clickhouse`. همه ابزار همچنین می توانید به عنوان استناد شود `clickhouse tool` به جای `clickhouse-tool`. -Alternatively you can install ClickHouse package: either stable release from Yandex repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo service clickhouse-server start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`. +متناوبا شما می توانید بسته بندی کلیک را نصب کنید: در هر صورت انتشار پایدار از مخزن یاندکس و یا شما می توانید بسته را برای خودتان با ساخت `./release` در منابع کلیک خانه ریشه. سپس سرور را با شروع `sudo service clickhouse-server start` (یا توقف برای متوقف کردن سرور). به دنبال سیاهههای مربوط در `/etc/clickhouse-server/clickhouse-server.log`. -When ClickHouse is already installed on your system, you can build a new `clickhouse` binary and replace the existing binary: +هنگامی که تاتر در حال حاضر بر روی سیستم شما نصب شده, شما می توانید جدید ساخت `clickhouse` دودویی و جایگزین باینری موجود: ``` bash $ sudo service clickhouse-server stop @@ -89,161 +93,161 @@ $ sudo cp ./clickhouse /usr/bin/ $ sudo service clickhouse-server start ``` -Also you can stop system clickhouse-server and run your own with the same configuration but with logging to terminal: +همچنین شما می توانید سیستم کلیک سرور را متوقف و اجرا خود را با همان پیکربندی اما با ورود به ترمینال: ``` bash $ sudo service clickhouse-server stop $ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml ``` -Example with gdb: +به عنوان مثال با دیابت بارداری: ``` bash $ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml ``` -If the system clickhouse-server is already running and you don’t want to stop it, you can change port numbers in your `config.xml` (or override them in a file in `config.d` directory), provide appropriate data path, and run it. +اگر سیستم کلیک-سرور در حال اجرا است و شما نمی خواهید برای متوقف کردن, شما می توانید شماره پورت در خود تغییر دهید `config.xml` (یا نادیده گرفتن در یک فایل در `config.d` فهرست راهنما) مسیر داده مناسب را فراهم کرده و اجرا کنید. -`clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above. +`clickhouse` دودویی تقریبا هیچ وابستگی و کار در سراسر طیف گسترده ای از توزیع های لینوکس. برای تست سریع و کثیف تغییرات خود را بر روی یک سرور, شما به سادگی می توانید `scp` تازه ساخته شده است `clickhouse` باینری به سرور شما و سپس به عنوان مثال بالا اجرا شود. -## Testing Environment {#testing-environment} +## محیط تست {#testing-environment} -Before publishing release as stable we deploy it on testing environment. Testing environment is a cluster that process 1/39 part of [Yandex.Metrica](https://metrica.yandex.com/) data. We share our testing environment with Yandex.Metrica team. ClickHouse is upgraded without downtime on top of existing data. We look at first that data is processed successfully without lagging from realtime, the replication continue to work and there is no issues visible to Yandex.Metrica team. First check can be done in the following way: +قبل از انتشار انتشار به عنوان پایدار ما را در محیط تست استقرار. محیط تست یک خوشه است که بخشی از 1/39 را پردازش می کند [یاندکسمتریکا](https://metrica.yandex.com/) داده ها. ما محیط تست خود را با یاندکس به اشتراک می گذاریم.تیم متریکا تاتر بدون خرابی در بالای داده های موجود به روز رسانی. ما در ابتدا نگاه کنید که داده ها با موفقیت و بدون عقب مانده از زمان واقعی پردازش, تکرار ادامه کار و هیچ مشکلی برای یاندکس قابل مشاهده وجود دارد.تیم متریکا اولین چک را می توان در راه زیر انجام داد: ``` sql SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h; ``` -In some cases we also deploy to testing environment of our friend teams in Yandex: Market, Cloud, etc. Also we have some hardware servers that are used for development purposes. +در برخی موارد ما نیز به تست محیط زیست از تیم های دوست ما در یاندکس استقرار: بازار, ابر, و غیره. همچنین در حال حاضر برخی از سرورهای سخت افزاری است که برای اهداف توسعه استفاده می شود. -## Load Testing {#load-testing} +## تست بار {#load-testing} -After deploying to testing environment we run load testing with queries from production cluster. This is done manually. +پس از استقرار به محیط تست ما تست بار با نمایش داده شد از خوشه تولید را اجرا کنید. این کار به صورت دستی انجام می شود. -Make sure you have enabled `query_log` on your production cluster. +اطمینان حاصل کنید که شما را فعال کرده اند `query_log` در خوشه تولید خود را. -Collect query log for a day or more: +جمع کردن گزارش پرس و جو برای یک روز یا بیشتر: ``` bash $ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv ``` -This is a way complicated example. `type = 2` will filter queries that are executed successfully. `query LIKE '%ym:%'` is to select relevant queries from Yandex.Metrica. `is_initial_query` is to select only queries that are initiated by client, not by ClickHouse itself (as parts of distributed query processing). +این یک مثال راه پیچیده است. `type = 2` نمایش داده شد که با موفقیت اجرا فیلتر کنید. `query LIKE '%ym:%'` است برای انتخاب نمایش داده شد مربوطه از یاندکس.متریکا `is_initial_query` است را انتخاب کنید تنها نمایش داده شد که توسط مشتری شروع, نه با کلیک خود (به عنوان بخش هایی از پردازش پرس و جو توزیع). -`scp` this log to your testing cluster and run it as following: +`scp` این ورود به خوشه تست خود را و اجرا به شرح زیر است: ``` bash $ clickhouse benchmark --concurrency 16 < queries.tsv ``` -(probably you also want to specify a `--user`) +(احتمالا شما همچنین می خواهید برای مشخص کردن یک `--user`) -Then leave it for a night or weekend and go take a rest. +پس یه شب یا هفته ولش کن و برو استراحت کن -You should check that `clickhouse-server` doesn’t crash, memory footprint is bounded and performance not degrading over time. +شما باید بررسی کنید که `clickhouse-server` سقوط نمی کند, رد پای حافظه محدود است و عملکرد در طول زمان تنزل نمی. -Precise query execution timings are not recorded and not compared due to high variability of queries and environment. +زمان اجرای پرس و جو دقیق ثبت نشده است و با توجه به تنوع بالا از نمایش داده شد و محیط زیست در مقایسه نیست. -## Build Tests {#build-tests} +## ساخت تست {#build-tests} -Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. Tests are located at `ci` directory. They run build from source inside Docker, Vagrant, and sometimes with `qemu-user-static` inside Docker. These tests are under development and test runs are not automated. +تست های ساخت اجازه می دهد تا بررسی کنید که ساخت در تنظیمات مختلف جایگزین و در برخی از سیستم های خارجی شکسته نمی شود. تست ها در واقع `ci` فهرست راهنما. ساخت از منبع داخل کارگر بارانداز ولگرد و گاهی با اجرا می شوند `qemu-user-static` در داخل کارگر بارانداز. این تست ها در حال توسعه هستند و تست اجرا می شود خودکار نیست. -Motivation: +انگیزه: -Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples: +به طور معمول ما انتشار و اجرای تمام تست بر روی یک نوع واحد از ساخت تاتر. اما انواع ساخت جایگزین است که به طور کامل تست شده وجود دارد. مثالها: -- build on FreeBSD; -- build on Debian with libraries from system packages; -- build with shared linking of libraries; -- build on AArch64 platform; -- build on PowerPc platform. +- ساخت در بورس; +- ساخت در دبیان با کتابخانه ها از بسته های سیستم; +- ساخت با لینک مشترک از کتابخانه ها; +- ساخت پلت فرم AArch64; +- ساخت بر روی پلت فرم پاور. -For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts. +مثلا, ساخت با بسته های سیستم عمل بد است, چرا که ما نمی تواند تضمین کند که چه نسخه دقیق از بسته های یک سیستم باید. اما این واقعا توسط نگهداری دبیان مورد نیاز است. به همین دلیل ما حداقل باید برای حمایت از این نوع ساخت. مثال دیگر: ارتباط مشترک یک منبع مشترک از مشکل است, اما برای برخی از علاقه مندان مورد نیاز است. -Though we cannot run all tests on all variant of builds, we want to check at least that various build variants are not broken. For this purpose we use build tests. +هر چند ما می توانیم تمام تست در همه نوع از ایجاد اجرا کنید, ما می خواهیم برای بررسی حداقل که انواع ساخت های مختلف شکسته نمی. برای این منظور ما از تست های ساخت استفاده می کنیم. -## Testing For Protocol Compatibility {#testing-for-protocol-compatibility} +## تست برای سازگاری پروتکل {#testing-for-protocol-compatibility} -When we extend ClickHouse network protocol, we test manually that old clickhouse-client works with new clickhouse-server and new clickhouse-client works with old clickhouse-server (simply by running binaries from corresponding packages). +هنگامی که ما گسترش clickhouse پروتکل شبکه ما تست دستی که clickhouse-مشتری با این نسخهها کار جدید clickhouse-سرور و جدید clickhouse-مشتری با این نسخهها کار با clickhouse-سرور (به سادگی با در حال اجرا فایل های باینری از مربوطه بسته). -## Help From The Compiler {#help-from-the-compiler} +## کمک از کامپایلر {#help-from-the-compiler} -Main ClickHouse code (that is located in `dbms` directory) is built with `-Wall -Wextra -Werror` and with some additional enabled warnings. Although these options are not enabled for third-party libraries. +کد اصلی کلیک (که در واقع `dbms` فهرست راهنما) با ساخته شده است `-Wall -Wextra -Werror` و با برخی از هشدارهای اضافی را فعال کنید. اگر چه این گزینه ها برای کتابخانه های شخص ثالث فعال نیست. -Clang has even more useful warnings - you can look for them with `-Weverything` and pick something to default build. +کلانگ هشدارهای بیشتری دارد - شما می توانید با `-Weverything` و انتخاب چیزی به طور پیش فرض ساخت. -For production builds, gcc is used (it still generates slightly more efficient code than clang). For development, clang is usually more convenient to use. You can build on your own machine with debug mode (to save battery of your laptop), but please note that compiler is able to generate more warnings with `-O3` due to better control flow and inter-procedure analysis. When building with clang, `libc++` is used instead of `libstdc++` and when building with debug mode, debug version of `libc++` is used that allows to catch more errors at runtime. +برای تولید ساخت, شورای همکاری خلیج فارس استفاده می شود (هنوز تولید کد کمی موثر تر از صدای جرنگ جرنگ). برای توسعه, صدای جرنگ جرنگ است که معمولا راحت تر به استفاده از. شما می توانید بر روی دستگاه خود را با حالت اشکال زدایی ساخت (برای صرفه جویی در باتری لپ تاپ خود را), اما لطفا توجه داشته باشید که کامپایلر قادر به تولید هشدارهای بیشتر با است `-O3` با توجه به جریان کنترل بهتر و تجزیه و تحلیل بین روش. هنگام ساخت با صدای جرنگ جرنگ, `libc++` به جای استفاده `libstdc++` و هنگامی که ساختمان با حالت اشکال زدایی, نسخه اشکال زدایی از `libc++` استفاده شده است که اجازه می دهد تا برای گرفتن خطاهای بیشتر در زمان اجرا. ## Sanitizers {#sanitizers} -**Address sanitizer**. -We run functional and integration tests under ASan on per-commit basis. +**نشانی ضد عفونی کننده**. +ما تست های کاربردی و یکپارچه سازی را تحت عنوان بر اساس هر متعهد اجرا می کنیم. **Valgrind (Memcheck)**. -We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse). +ما یک شبه تست های کاربردی را تحت ارزیابی قرار می دهیم. چند ساعت طول می کشد. در حال حاضر یک مثبت کاذب شناخته شده در وجود دارد `re2` کتابخانه را ببینید [این مقاله](https://research.swtch.com/sparse). -**Undefined behaviour sanitizer.** -We run functional and integration tests under ASan on per-commit basis. +**تعریف نشده رفتار ضد عفونی کننده.** +ما تست های کاربردی و یکپارچه سازی را تحت عنوان بر اساس هر متعهد اجرا می کنیم. -**Thread sanitizer**. -We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis. +**ضدعفونی کننده موضوع**. +ما تست های کاربردی تحت تسان بر اساس هر مرتکب اجرا. ما هنوز تست های ادغام تحت تسان بر اساس هر متعهد اجرا کنید. -**Memory sanitizer**. -Currently we still don’t use MSan. +**ضد عفونی کننده حافظه**. +در حال حاضر ما هنوز از خانم استفاده نمی کنیم. -**Debug allocator.** -Debug version of `jemalloc` is used for debug build. +**اشکال زدایی تخصیص.** +نسخه اشکال زدایی از `jemalloc` برای ساخت اشکال زدایی استفاده می شود. ## Fuzzing {#fuzzing} -We use simple fuzz test to generate random SQL queries and to check that the server doesn’t die. Fuzz testing is performed with Address sanitizer. You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer). +ما با استفاده از تست ریش ریش شدن ساده برای تولید پرس و جو تصادفی گذاشتن و بررسی کنید که سرور نمی میرند. تست ریش شدن با نشانی ضد عفونی کننده انجام می شود. شما می توانید این را در `00746_sql_fuzzy.pl`. این تست باید به طور مداوم اجرا شود (یک شبه و طولانی تر). -As of December 2018, we still don’t use isolated fuzz testing of library code. +همانطور که از دسامبر 2018, ما هنوز تست ریش شدن جدا شده از کد کتابخانه استفاده نمی. -## Security Audit {#security-audit} +## ممیزی امنیتی {#security-audit} -People from Yandex Cloud department do some basic overview of ClickHouse capabilities from the security standpoint. +مردم از یاندکس ابر بخش انجام برخی از بررسی اجمالی اساسی از قابلیت های تاتر از نقطه نظر امنیت. -## Static Analyzers {#static-analyzers} +## تجزیه و تحلیل استاتیک {#static-analyzers} -We run `PVS-Studio` on per-commit basis. We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/). +فرار میکنیم `PVS-Studio` بر اساس هر مرتکب. ما ارزیابی کرده ایم `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. شما دستورالعمل برای استفاده در پیدا `tests/instructions/` فهرست راهنما. همچنین شما می توانید به عنوان خوانده شده [مقاله در روسیه](https://habr.com/company/yandex/blog/342018/). -If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box. +در صورت استفاده `CLion` به عنوان محیط برنامه نویسی, شما می توانید اهرم برخی از `clang-tidy` چک از جعبه. -## Hardening {#hardening} +## سخت شدن {#hardening} -`FORTIFY_SOURCE` is used by default. It is almost useless, but still makes sense in rare cases and we don’t disable it. +`FORTIFY_SOURCE` به طور پیش فرض استفاده می شود. این تقریبا بی فایده است, اما هنوز هم حس می کند در موارد نادر و ما این کار را غیر فعال کنید. -## Code Style {#code-style} +## سبک کد {#code-style} -Code style rules are described [here](https://clickhouse.tech/docs/en/development/style/). +قوانین سبک کد شرح داده شده است [اینجا](https://clickhouse.tech/docs/en/development/style/). -To check for some common style violations, you can use `utils/check-style` script. +برای بررسی برخی از نقض سبک مشترک, شما می توانید استفاده کنید `utils/check-style` خط نوشتن. -To force proper style of your code, you can use `clang-format`. File `.clang-format` is located at the sources root. It mostly corresponding with our actual code style. But it’s not recommended to apply `clang-format` to existing files because it makes formatting worse. You can use `clang-format-diff` tool that you can find in clang source repository. +به زور سبک مناسب از کد خود را, شما می توانید استفاده کنید `clang-format`. پرونده `.clang-format` در منابع ریشه واقع شده است. این بیشتر با سبک کد واقعی ما مطابقت دارد. اما توصیه نمی شود که اعمال شود `clang-format` به فایل های موجود چون باعث می شود قالب بندی بدتر است. شما می توانید استفاده کنید `clang-format-diff` ابزاری است که شما می توانید در مخزن منبع صدای جرنگ جرنگ پیدا. -Alternatively you can try `uncrustify` tool to reformat your code. Configuration is in `uncrustify.cfg` in the sources root. It is less tested than `clang-format`. +متناوبا شما می توانید سعی کنید `uncrustify` ابزار مجدد کد خود را. پیکربندی در `uncrustify.cfg` در منابع ریشه. این کمتر از تست شده است `clang-format`. -`CLion` has its own code formatter that has to be tuned for our code style. +`CLion` فرمت کد خود را دارد که باید برای سبک کد ما تنظیم شود. -## Metrica B2B Tests {#metrica-b2b-tests} +## تست های متریکا ب2 {#metrica-b2b-tests} -Each ClickHouse release is tested with Yandex Metrica and AppMetrica engines. Testing and stable versions of ClickHouse are deployed on VMs and run with a small copy of Metrica engine that is processing fixed sample of input data. Then results of two instances of Metrica engine are compared together. +هر clickhouse نسخه تست شده با yandex metrica و appmetrica موتورهای. تست و نسخه های پایدار از تاتر در ماشین های مجازی مستقر و اجرا با یک کپی کوچک از موتور متریکا است که پردازش نمونه ثابت از داده های ورودی. سپس نتایج حاصل از دو نمونه از موتور متریکا با هم مقایسه می شوند. -These tests are automated by separate team. Due to high number of moving parts, tests are fail most of the time by completely unrelated reasons, that are very difficult to figure out. Most likely these tests have negative value for us. Nevertheless these tests was proved to be useful in about one or two times out of hundreds. +این تست ها توسط تیم جداگانه خودکار می شوند. با توجه به تعداد زیادی از قطعات متحرک, تست شکست بیشتر از زمان به دلایل کاملا نامربوط, که بسیار دشوار است برای کشف کردن. به احتمال زیاد این تست ها ارزش منفی برای ما دارند. با این وجود این تست در حدود یک یا دو بار از صدها مفید ثابت شد. -## Test Coverage {#test-coverage} +## پوشش تست {#test-coverage} -As of July 2018 we don’t track test coverage. +تا جولای 2018 ما پوشش تست را پیگیری نمی کنیم. -## Test Automation {#test-automation} +## اتوماسیون تست {#test-automation} -We run tests with Yandex internal CI and job automation system named “Sandbox”. +ما تست ها را با سیستم اتوماسیون داخلی یاندکس اجرا می کنیم “Sandbox”. -Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored eternally. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you. +ساخت شغل و تست ها در گودال ماسهبازی در هر مرتکب اساس اجرا شود. نتیجه بسته ها و نتایج تست در گیتهاب منتشر شده و می تواند توسط لینک مستقیم دانلود. مصنوعات ابد ذخیره می شود. هنگامی که شما یک درخواست کشش ارسال در گیتهاب, ما برچسب به عنوان “can be tested” و ما CI سیستم خواهد ساخت ClickHouse بسته (نسخه debug با آدرس ضد عفونی کننده و غیره) را برای شما. -We don’t use Travis CI due to the limit on time and computational power. -We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins. +ما از تراویس سی به دلیل محدودیت در زمان و قدرت محاسباتی استفاده نمی کنیم. +ما از جنکینز استفاده نمیکنیم. این قبل از استفاده شد و در حال حاضر ما خوشحال ما با استفاده از جنکینز نیست. -[Original article](https://clickhouse.tech/docs/en/development/tests/) -velopment/tests/) +[مقاله اصلی](https://clickhouse.tech/docs/en/development/tests/) +velopment/آزمون/) diff --git a/docs/fa/engines/database_engines/index.md b/docs/fa/engines/database_engines/index.md new file mode 100644 index 00000000000..fdc0816f35b --- /dev/null +++ b/docs/fa/engines/database_engines/index.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Database Engines +toc_priority: 27 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# موتورهای پایگاه داده {#database-engines} + +موتورهای پایگاه داده به شما اجازه کار با جداول. + +به طور پیش فرض, تاتر با استفاده از موتور پایگاه داده مادری خود, فراهم می کند که تنظیم [موتورهای جدول](../../engines/table_engines/index.md) و یک [شمارهگیری](../../sql_reference/syntax.md). + +شما همچنین می توانید موتورهای پایگاه داده زیر استفاده کنید: + +- [MySQL](mysql.md) + +- [تنبل](lazy.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/fa/engines/database_engines/lazy.md b/docs/fa/engines/database_engines/lazy.md new file mode 100644 index 00000000000..6139c85337f --- /dev/null +++ b/docs/fa/engines/database_engines/lazy.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u062A\u0646\u0628\u0644" +--- + +# تنبل {#lazy} + +نگه می دارد جداول در رم تنها `expiration_time_in_seconds` ثانیه پس از دسترسی گذشته. را می توان تنها با استفاده \*جداول ورود به سیستم. + +این برای ذخیره سازی بسیاری از جداول کوچک \*ورود به سیستم بهینه شده است که فاصله زمانی طولانی بین دسترسی ها وجود دارد. + +## ایجاد یک پایگاه داده {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[مقاله اصلی](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/fa/engines/database_engines/mysql.md b/docs/fa/engines/database_engines/mysql.md new file mode 100644 index 00000000000..e77288257d9 --- /dev/null +++ b/docs/fa/engines/database_engines/mysql.md @@ -0,0 +1,135 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 30 +toc_title: MySQL +--- + +# Mysql {#mysql} + +اجازه می دهد تا برای اتصال به پایگاه داده بر روی یک سرور خروجی از راه دور و انجام `INSERT` و `SELECT` نمایش داده شد به تبادل اطلاعات بین کلیک و خروجی زیر. + +این `MySQL` موتور پایگاه داده ترجمه نمایش داده شد به سرور خروجی زیر بنابراین شما می توانید عملیات مانند انجام `SHOW TABLES` یا `SHOW CREATE TABLE`. + +شما می توانید نمایش داده شد زیر را انجام دهد: + +- `RENAME` +- `CREATE TABLE` +- `ALTER` + +## ایجاد یک پایگاه داده {#creating-a-database} + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] +ENGINE = MySQL('host:port', 'database', 'user', 'password') +``` + +**پارامترهای موتور** + +- `host:port` — MySQL server address. +- `database` — Remote database name. +- `user` — MySQL user. +- `password` — User password. + +## پشتیبانی از انواع داده ها {#data_types-support} + +| MySQL | فاحشه خانه | +|----------------------------------|------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [جسم شناور64](../../sql_reference/data_types/float.md) | +| DATE | [تاریخ](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [DateTime](../../sql_reference/data_types/datetime.md) | +| BINARY | [رشته ثابت](../../sql_reference/data_types/fixedstring.md) | + +همه انواع داده خروجی زیر دیگر به تبدیل [رشته](../../sql_reference/data_types/string.md). + +[Nullable](../../sql_reference/data_types/nullable.md) پشتیبانی می شود. + +## نمونه هایی از استفاده {#examples-of-use} + +جدول در خروجی زیر: + +``` text +mysql> USE test; +Database changed + +mysql> CREATE TABLE `mysql_table` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `float` FLOAT NOT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from mysql_table; ++------+-----+ +| int_id | value | ++------+-----+ +| 1 | 2 | ++------+-----+ +1 row in set (0,00 sec) +``` + +پایگاه داده در خانه, تبادل داده ها با سرور خروجی زیر: + +``` sql +CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') +``` + +``` sql +SHOW DATABASES +``` + +``` text +┌─name─────┐ +│ default │ +│ mysql_db │ +│ system │ +└──────────┘ +``` + +``` sql +SHOW TABLES FROM mysql_db +``` + +``` text +┌─name─────────┐ +│ mysql_table │ +└──────────────┘ +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +└────────┴───────┘ +``` + +``` sql +INSERT INTO mysql_db.mysql_table VALUES (3,4) +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/fa/engines/index.md b/docs/fa/engines/index.md new file mode 100644 index 00000000000..dcebf255df6 --- /dev/null +++ b/docs/fa/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Engines +toc_priority: 25 +--- + + diff --git a/docs/fa/engines/table_engines/index.md b/docs/fa/engines/table_engines/index.md new file mode 100644 index 00000000000..edef8c0a281 --- /dev/null +++ b/docs/fa/engines/table_engines/index.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Table Engines +toc_priority: 26 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# موتورهای جدول {#table_engines} + +موتور جدول (نوع جدول) تعیین می کند: + +- چگونه و در کجا اطلاعات ذخیره شده است, جایی که برای نوشتن به, و از کجا به خواندن از. +- که نمایش داده شد پشتیبانی می شوند, و چگونه. +- همزمان دسترسی به داده ها. +- استفاده از شاخص, در صورت وجود. +- این که اجرای درخواست چند رشته ای امکان پذیر باشد. +- پارامترهای تکرار داده. + +## خانواده موتور {#engine-families} + +### ادغام {#mergetree} + +موتورهای جدول جهانی ترین و کاربردی برای وظایف بار بالا. اموال به اشتراک گذاشته شده توسط این موتور درج داده های سریع با پردازش داده های پس زمینه های بعدی است. `MergeTree` موتورهای خانواده از تکرار داده ها پشتیبانی می کنند (با [تکرار\*](mergetree_family/replication.md) نسخه موتورهای) پارتیشن بندی و ویژگی های دیگر در موتورهای دیگر پشتیبانی نمی شود. + +موتورهای در خانواده: + +- [ادغام](mergetree_family/mergetree.md) +- [جایگزینی](mergetree_family/replacingmergetree.md) +- [سامینگمرگتری](mergetree_family/summingmergetree.md) +- [ریزدانه](mergetree_family/aggregatingmergetree.md) +- [سقوط غذای اصلی](mergetree_family/collapsingmergetree.md) +- [در حال بارگذاری](mergetree_family/versionedcollapsingmergetree.md) +- [نمودار](mergetree_family/graphitemergetree.md) + +### ثبت {#log} + +سبک [موتورها](log_family/index.md) با حداقل قابلیت. هنگامی که شما نیاز به سرعت نوشتن بسیاری از جداول کوچک (تا حدود 1 میلیون ردیف) و خواندن بعد به عنوان یک کل موثر ترین هستند. + +موتورهای در خانواده: + +- [جمع شدن](log_family/tinylog.md) +- [خط زدن](log_family/stripelog.md) +- [ثبت](log_family/log.md) + +### موتورهای یکپارچه سازی {#integration-engines} + +موتورهای برای برقراری ارتباط با دیگر ذخیره سازی داده ها و سیستم های پردازش. + +موتورهای در خانواده: + +- [کافکا](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) + +### موتورهای ویژه {#special-engines} + +موتورهای در خانواده: + +- [توزیع شده](special/distributed.md) +- [ماده بینی](special/materializedview.md) +- [واژهنامه](special/dictionary.md) +- [ادغام](special/merge.md) +- [پرونده](special/file.md) +- [خالی](special/null.md) +- [تنظیم](special/set.md) +- [پیوستن](special/join.md) +- [URL](special/url.md) +- [نما](special/view.md) +- [حافظه](special/memory.md) +- [بافر](special/buffer.md) + +## ستونهای مجازی {#table_engines-virtual-columns} + +ستون مجازی یک ویژگی موتور جدول انتگرال است که در کد منبع موتور تعریف شده است. + +شما باید ستون مجازی در مشخص نیست `CREATE TABLE` پرس و جو کنید و نمی توانید ببینید `SHOW CREATE TABLE` و `DESCRIBE TABLE` نتایج پرس و جو. ستون مجازی نیز فقط خواندنی, بنابراین شما می توانید داده ها را به ستون مجازی وارد کنید. + +برای انتخاب داده ها از یک ستون مجازی, شما باید نام خود را در مشخص `SELECT` پرس و جو. `SELECT *` مقادیر از ستون های مجازی بازگشت نیست. + +اگر شما یک جدول با یک ستون است که به همین نام به عنوان یکی از ستون های مجازی جدول ایجاد, ستون مجازی غیر قابل دسترس می شود. ما توصیه نمی انجام این کار. برای کمک به جلوگیری از درگیری, نام ستون مجازی معمولا با تاکید پیشوند. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/fa/engines/table_engines/integrations/hdfs.md b/docs/fa/engines/table_engines/integrations/hdfs.md new file mode 100644 index 00000000000..f2449898d04 --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/hdfs.md @@ -0,0 +1,123 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: HDFS +--- + +# HDFS {#table_engines-hdfs} + +این موتور ادغام با فراهم می کند [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) اکوسیستم با اجازه دادن به مدیریت داده ها در [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)از طریق کلیکهاوس. این موتور مشابه است +به [پرونده](../special/file.md) و [URL](../special/url.md) موتورهای, اما فراهم می کند ویژگی های هادوپ خاص. + +## استفاده {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +این `URI` پارامتر تمام فایل نشانی اینترنتی در اچ دی است. +این `format` پارامتر یکی از فرمت های فایل های موجود را مشخص می کند. برای انجام +`SELECT` نمایش داده شد, فرمت باید برای ورودی پشتیبانی می شود, و به انجام +`INSERT` queries – for output. The available formats are listed in the +[فرشها](../../../interfaces/formats.md#formats) بخش. +قسمت مسیر `URI` ممکن است حاوی دل تنگی. در این مورد جدول قابل خواندن خواهد بود. + +**مثال:** + +**1.** تنظیم `hdfs_engine_table` جدول: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** پر کردن پرونده: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** پرسوجوی داده: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## پیاده سازی اطلاعات {#implementation-details} + +- می خواند و می نویسد می تواند موازی +- پشتیبانی نمیشود: + - `ALTER` و `SELECT...SAMPLE` عملیات. + - شاخص. + - تکرار. + +**دل تنگی در مسیر** + +اجزای مسیر چندگانه می تواند دل تنگی دارند. برای پردازش فایل باید وجود داشته باشد و مسابقات به الگوی کل مسیر. لیست فایل های تعیین در طول `SELECT` (نه در `CREATE` لحظه). + +- `*` — Substitutes any number of any characters except `/` از جمله رشته خالی. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +سازه با `{}` شبیه به [دور](../../../sql_reference/table_functions/remote.md) تابع جدول. + +**مثال** + +1. فرض کنید ما چندین فایل را در قالب فیلم با اوریس زیر در اچ دی ها داریم: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. راه های مختلفی برای ایجاد یک جدول متشکل از تمام شش فایل وجود دارد: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +راه دیگر: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +جدول شامل تمام فایل ها در هر دو دایرکتوری (تمام فایل ها باید فرمت و طرح توصیف شده در پرس و جو راضی): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "اخطار" + اگر فهرستی از فایل های حاوی محدوده تعداد با صفر پیشرو, استفاده از ساخت و ساز با پرانتز برای هر رقم به طور جداگانه و یا استفاده `?`. + +**مثال** + +ایجاد جدول با فایل های به نام `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## ستونهای مجازی {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**همچنین نگاه کنید به** + +- [ستونهای مجازی](../index.md#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/fa/engines/table_engines/integrations/index.md b/docs/fa/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..8d7196c323a --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Integrations +toc_priority: 30 +--- + + diff --git a/docs/fa/engines/table_engines/integrations/jdbc.md b/docs/fa/engines/table_engines/integrations/jdbc.md new file mode 100644 index 00000000000..9afa23dae8d --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/jdbc.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: JDBC +--- + +# JDBC {#table-engine-jdbc} + +اجازه می دهد تا تاتر برای اتصال به پایگاه داده های خارجی از طریق [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). + +برای پیاده سازی اتصال جدی بی سی, خانه با استفاده از برنامه جداگانه [هومز-جد بی سی-پل](https://github.com/alex-krash/clickhouse-jdbc-bridge) که باید به عنوان یک شبح اجرا شود. + +این موتور از [Nullable](../../../sql_reference/data_types/nullable.md) نوع داده. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + columns list... +) +ENGINE = JDBC(dbms_uri, external_database, external_table) +``` + +**پارامترهای موتور** + +- `dbms_uri` — URI of an external DBMS. + + قالب: `jdbc:://:/?user=&password=`. + به عنوان مثال برای خروجی زیر: `jdbc:mysql://localhost:3306/?user=root&password=root`. + +- `external_database` — Database in an external DBMS. + +- `external_table` — Name of the table in `external_database`. + +## مثال طریقه استفاده {#usage-example} + +ایجاد یک جدول در سرور خروجی زیر با اتصال مستقیم با مشتری کنسول: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ایجاد یک جدول در سرور کلیک و انتخاب داده ها از: + +``` sql +CREATE TABLE jdbc_table +( + `int_id` Int32, + `int_nullable` Nullable(Int32), + `float` Float32, + `float_nullable` Nullable(Float32) +) +ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') +``` + +``` sql +SELECT * +FROM jdbc_table +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## همچنین نگاه کنید به {#see-also} + +- [تابع جدول جدی بی سی](../../../sql_reference/table_functions/jdbc.md). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/fa/engines/table_engines/integrations/kafka.md b/docs/fa/engines/table_engines/integrations/kafka.md new file mode 100644 index 00000000000..52fe6174b70 --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/kafka.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u06A9\u0627\u0641\u06A9\u0627" +--- + +# کافکا {#kafka} + +این موتور با این نسخهها کار [نمایی کافکا](http://kafka.apache.org/). + +کافکا به شما امکان می دهد: + +- انتشار یا اشتراک در جریان داده ها. +- سازماندهی ذخیره سازی مقاوم در برابر خطا. +- روند جریان به عنوان در دسترس تبدیل شده است. + +## ایجاد یک جدول {#table_engine-kafka-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = Kafka() +SETTINGS + kafka_broker_list = 'host:port', + kafka_topic_list = 'topic1,topic2,...', + kafka_group_name = 'group_name', + kafka_format = 'data_format'[,] + [kafka_row_delimiter = 'delimiter_symbol',] + [kafka_schema = '',] + [kafka_num_consumers = N,] + [kafka_skip_broken_messages = N] +``` + +پارامترهای مورد نیاز: + +- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). +- `kafka_topic_list` – A list of Kafka topics. +- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` تابع مانند `JSONEachRow`. برای کسب اطلاعات بیشتر, دیدن [فرشها](../../../interfaces/formats.md) بخش. + +پارامترهای اختیاری: + +- `kafka_row_delimiter` – Delimiter character, which ends the message. +- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [سروان نیا](https://capnproto.org/) نیاز به مسیر به فایل طرح و نام ریشه `schema.capnp:Message` اعتراض. +- `kafka_num_consumers` – The number of consumers per table. Default: `1`. مشخص مصرف کنندگان بیشتر اگر توان عملیاتی یک مصرف کننده کافی است. تعداد کل مصرف کنندگان باید تعداد پارتیشن در موضوع تجاوز نمی, از تنها یک مصرف کننده را می توان در هر پارتیشن اختصاص داده. +- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. اگر `kafka_skip_broken_messages = N` سپس موتور پرش *N* پیام کافکا که نمی تواند تجزیه شود (یک پیام برابر یک ردیف از داده ها). + +مثالها: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; +``` + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + از این روش در پروژه های جدید استفاده نکنید. در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) +``` + +
    + +## توصیف {#description} + +پیام تحویل به طور خودکار ردیابی, بنابراین هر پیام در یک گروه تنها یک بار شمارش. اگر شما می خواهید برای دریافت داده ها دو بار, سپس یک کپی از جدول با نام گروه دیگری ایجاد. + +گروه انعطاف پذیر هستند و همگام سازی در خوشه. برای مثال, اگر شما 10 موضوعات و 5 نسخه از یک جدول در یک خوشه, سپس هر کپی می شود 2 موضوعات. اگر تعداد نسخه تغییر, موضوعات در سراسر نسخه توزیع به طور خودکار. اطلاعات بیشتر در مورد این در http://kafka.apache.org/intro. + +`SELECT` به خصوص برای خواندن پیام های مفید نیست (به جز اشکال زدایی), چرا که هر پیام را می توان تنها یک بار به عنوان خوانده شده. این عملی تر است برای ایجاد موضوعات در زمان واقعی با استفاده از نمایش محقق. برای انجام این کار: + +1. از موتور برای ایجاد یک مصرف کننده کافکا استفاده کنید و جریان داده را در نظر بگیرید. +2. ایجاد یک جدول با ساختار مورد نظر. +3. یک دیدگاه محقق ایجاد کنید که داده ها را از موتور تبدیل می کند و به یک جدول قبلا ایجاد شده تبدیل می کند. + +هنگامی که `MATERIALIZED VIEW` به موتور می پیوندد و شروع به جمع کردن داده ها در پس زمینه می کند. این اجازه می دهد تا شما را به طور مستمر دریافت پیام از کافکا و تبدیل به فرمت مورد نیاز با استفاده از `SELECT`. +یک جدول کافکا می تواند به عنوان بسیاری از دیدگاه های تحقق به عنوان دوست دارید, اطلاعات از جدول کافکا به طور مستقیم به عنوان خوانده شده, اما دریافت پرونده های جدید (در بلوک), به این ترتیب شما می توانید به چند جدول با سطح جزییات مختلف ارسال (با گروه بندی - تجمع و بدون). + +مثال: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + CREATE TABLE daily ( + day Date, + level String, + total UInt64 + ) ENGINE = SummingMergeTree(day, (day, level), 8192); + + CREATE MATERIALIZED VIEW consumer TO daily + AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total + FROM queue GROUP BY day, level; + + SELECT level, sum(total) FROM daily GROUP BY level; +``` + +برای بهبود عملکرد, پیام های دریافت شده را به بلوک های اندازه گروه بندی می شوند [ا\_فزونهها](../../../operations/server_configuration_parameters/settings.md#settings-max_insert_block_size). اگر بلوک در داخل تشکیل نشده است [\_خاله جریان](../../../operations/server_configuration_parameters/settings.md) میلی ثانیه, داده خواهد شد به جدول بدون در نظر گرفتن کامل از بلوک سرخ. + +برای جلوگیری از دریافت داده های موضوع و یا تغییر منطق تبدیل جدا مشاهده محقق: + +``` sql + DETACH TABLE consumer; + ATTACH MATERIALIZED VIEW consumer; +``` + +اگر شما می خواهید به تغییر جدول هدف با استفاده از `ALTER` توصیه می کنیم دیدگاه مادی را غیرفعال کنید تا از اختلاف بین جدول هدف و داده ها از نظر جلوگیری شود. + +## پیکربندی {#configuration} + +شبیه به graphitemergetree های کافکا پشتیبانی از موتور تمدید پیکربندی با استفاده از clickhouse فایل پیکربندی. دو کلید پیکربندی است که شما می توانید استفاده کنید وجود دارد: جهانی (`kafka`) و سطح موضوع (`kafka_*`). پیکربندی جهانی برای اولین بار اعمال می شود و سپس پیکربندی سطح موضوع اعمال می شود (در صورت وجود). + +``` xml + + + cgrp + smallest + + + + + 250 + 100000 + +``` + +برای یک لیست از گزینه های پیکربندی ممکن, دیدن [مرجع پیکربندی کتابدار](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). استفاده از تاکید (`_`) به جای یک نقطه در پیکربندی کلیک. به عنوان مثال, `check.crcs=true` خواهد بود `true`. + +## ستونهای مجازی {#virtual-columns} + +- `_topic` — Kafka topic. +- `_key` — Key of the message. +- `_offset` — Offset of the message. +- `_timestamp` — Timestamp of the message. +- `_partition` — Partition of Kafka topic. + +**همچنین نگاه کنید** + +- [مجازی ستون](../index.md#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/fa/engines/table_engines/integrations/mysql.md b/docs/fa/engines/table_engines/integrations/mysql.md new file mode 100644 index 00000000000..e6786240ec8 --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/mysql.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: MySQL +--- + +# Mysql {#mysql} + +موتور خروجی زیر اجازه می دهد تا شما را به انجام `SELECT` نمایش داده شد در داده است که بر روی یک سرور خروجی از راه دور ذخیره می شود. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +مشاهده شرح مفصلی از [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) پرس و جو. + +ساختار جدول می تواند از ساختار جدول خروجی زیر اصلی متفاوت است: + +- نام ستون باید همان است که در جدول خروجی زیر اصلی باشد, اما شما می توانید تنها برخی از این ستون ها و در هر جهت استفاده. +- انواع ستون ممکن است از کسانی که در جدول خروجی زیر اصلی متفاوت است. فاحشه خانه تلاش می کند تا [بازیگران](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ارزش ها را به انواع داده های کلیک. + +**پارامترهای موتور** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` نمایش داده شد به `REPLACE INTO`. اگر `replace_query=1`, پرس و جو جایگزین شده است. + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` بیان است که به اضافه `INSERT` پرس و جو. + + مثال: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1` کجا `on_duplicate_clause` هست `UPDATE c2 = c2 + 1`. دیدن [مستندات خروجی زیر](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) برای پیدا کردن که `on_duplicate_clause` شما می توانید با استفاده از `ON DUPLICATE KEY` بند بند. + + برای مشخص کردن `on_duplicate_clause` شما نیاز به تصویب `0` به `replace_query` پارامتر. اگر شما به طور همزمان عبور `replace_query = 1` و `on_duplicate_clause`, تاتر تولید یک استثنا. + +ساده `WHERE` بند هایی مانند `=, !=, >, >=, <, <=` بر روی سرور خروجی زیر اجرا شده است. + +بقیه شرایط و `LIMIT` محدودیت نمونه برداری در محل کلیک تنها پس از پرس و جو به پس از اتمام خروجی زیر اجرا شده است. + +## مثال طریقه استفاده {#usage-example} + +جدول در خروجی زیر: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +جدول در تاتر, بازیابی داده ها از جدول خروجی زیر ایجاد شده در بالا: + +``` sql +CREATE TABLE mysql_table +( + `float_nullable` Nullable(Float32), + `int_id` Int32 +) +ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` sql +SELECT * FROM mysql_table +``` + +``` text +┌─float_nullable─┬─int_id─┐ +│ ᴺᵁᴸᴸ │ 1 │ +└────────────────┴────────┘ +``` + +## همچنین نگاه کنید {#see-also} + +- [این ‘mysql’ تابع جدول](../../../sql_reference/table_functions/mysql.md) +- [با استفاده از خروجی زیر به عنوان منبع فرهنگ لغت خارجی](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/fa/engines/table_engines/integrations/odbc.md b/docs/fa/engines/table_engines/integrations/odbc.md new file mode 100644 index 00000000000..1b9d6355479 --- /dev/null +++ b/docs/fa/engines/table_engines/integrations/odbc.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: ODBC +--- + +# ODBC {#table-engine-odbc} + +اجازه می دهد تا تاتر برای اتصال به پایگاه داده های خارجی از طریق [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +با خیال راحت پیاده سازی اتصالات ان بی سی, تاتر با استفاده از یک برنامه جداگانه `clickhouse-odbc-bridge`. اگر راننده او بی سی به طور مستقیم از لود `clickhouse-server`, مشکلات راننده می تواند سرور تاتر سقوط. تاتر به طور خودکار شروع می شود `clickhouse-odbc-bridge` هنگامی که مورد نیاز است. برنامه پل او بی سی از همان بسته به عنوان نصب `clickhouse-server`. + +این موتور از [Nullable](../../../sql_reference/data_types/nullable.md) نوع داده. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +مشاهده شرح مفصلی از [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) پرس و جو. + +ساختار جدول می تواند از ساختار جدول منبع متفاوت باشد: + +- نام ستون باید همان است که در جدول منبع باشد, اما شما می توانید تنها برخی از این ستون ها و در هر جهت استفاده. +- انواع ستون ممکن است از کسانی که در جدول منبع متفاوت. فاحشه خانه تلاش می کند تا [بازیگران](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ارزش ها را به انواع داده های کلیک. + +**پارامترهای موتور** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` پرونده. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## مثال طریقه استفاده {#usage-example} + +**بازیابی داده ها از نصب و راه اندازی خروجی زیر محلی از طریق ان بی سی** + +این مثال برای لینوکس اوبونتو 18.04 و سرور خروجی زیر 5.7 بررسی می شود. + +اطمینان حاصل شود که unixodbc و mysql اتصال نصب شده است. + +به طور پیش فرض (در صورت نصب از بسته), کلیک خانه شروع می شود به عنوان کاربر `clickhouse`. بدین ترتیب, شما نیاز به ایجاد و پیکربندی این کاربر در سرور خروجی زیر. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +سپس اتصال را پیکربندی کنید `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +شما می توانید اتصال با استفاده از بررسی `isql` ابزار از unixODBC نصب و راه اندازی. + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +جدول در خروجی زیر: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +جدول در تاتر بازیابی داده ها از جدول خروجی زیر: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## همچنین نگاه کنید به {#see-also} + +- [لغت نامه های خارجی ان بی سی](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [تابع جدول ان بی سی](../../../sql_reference/table_functions/odbc.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/fa/engines/table_engines/log_family/index.md b/docs/fa/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..330dac5b3af --- /dev/null +++ b/docs/fa/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Log Family +toc_priority: 29 +--- + + diff --git a/docs/fa/engines/table_engines/log_family/log.md b/docs/fa/engines/table_engines/log_family/log.md new file mode 100644 index 00000000000..e7317a81470 --- /dev/null +++ b/docs/fa/engines/table_engines/log_family/log.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\u062B\u0628\u062A" +--- + +# ثبت {#log} + +موتور متعلق به خانواده از موتورهای ورود به سیستم. مشاهده خواص مشترک از موتورهای ورود به سیستم و تفاوت های خود را در [ورود خانواده موتور](log_family.md) مقاله. + +ورود متفاوت از [جمع شدن](tinylog.md) در این فایل کوچک “marks” ساکن با فایل های ستون. این علامت ها در هر بلوک داده نوشته شده است و شامل شیپور خاموشی که نشان می دهد از کجا شروع به خواندن فایل به منظور جست و خیز تعداد مشخصی از ردیف. این باعث می شود امکان خواندن داده های جدول در موضوعات مختلف. +برای همزمان دسترسی به داده ها, عملیات خواندن را می توان به طور همزمان انجام, در حالی که ارسال عملیات بلوک می خواند و هر یک از دیگر. +موتور ورود به سیستم می کند شاخص را پشتیبانی نمی کند. به طور مشابه, اگر نوشتن به یک جدول شکست خورده, جدول شکسته است, و خواندن از این خطا را برمی گرداند. موتور ورود به سیستم مناسب برای داده های موقت است, نوشتن یک بار جداول, و برای تست و یا تظاهرات اهداف. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/fa/engines/table_engines/log_family/log_family.md b/docs/fa/engines/table_engines/log_family/log_family.md new file mode 100644 index 00000000000..eea788109ed --- /dev/null +++ b/docs/fa/engines/table_engines/log_family/log_family.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# ورود خانواده موتور {#log-engine-family} + +هنگامی که شما نیاز به سرعت نوشتن بسیاری از جداول کوچک (تا حدود 1 میلیون ردیف) و بعد به عنوان یک کل خواندن این موتور برای حالات توسعه داده شد. + +موتورهای خانواده: + +- [خط زدن](stripelog.md) +- [ثبت](log.md) +- [جمع شدن](tinylog.md) + +## ویژگیهای مشترک {#common-properties} + +موتورها: + +- ذخیره داده ها بر روی یک دیسک. + +- اضافه کردن داده ها به پایان فایل هنگام نوشتن. + +- قفل پشتیبانی برای دسترسی همزمان داده ها. + + در طول `INSERT` نمایش داده شد, جدول قفل شده است, و دیگر نمایش داده شد برای خواندن و نوشتن داده ها هر دو منتظر جدول برای باز کردن. اگر هیچ نمایش داده شد نوشتن داده ها وجود دارد, هر تعداد از نمایش داده شد خواندن داده ها را می توان به صورت همزمان انجام. + +- پشتیبانی نمی کند [جهش](../../../sql_reference/statements/alter.md#alter-mutations) عملیات. + +- هنوز شاخص را پشتیبانی نمی کند. + + این به این معنی است که `SELECT` نمایش داده شد برای محدوده داده ها موثر نیست. + +- هنوز داده نوشتن نیست اتمی. + + شما می توانید یک جدول با داده های خراب اگر چیزی می شکند عملیات نوشتن, مثلا, خاموش کردن سرور غیر طبیعی. + +## تفاوت {#differences} + +این `TinyLog` موتور ساده ترین در خانواده است و فقیرترین قابلیت ها و کمترین بهره وری را فراهم می کند. این `TinyLog` موتور از خواندن داده های موازی با چندین موضوع پشتیبانی نمی کند. این اطلاعات کندتر از موتورهای دیگر در خانواده است که خواندن موازی را پشتیبانی می کند و تقریبا به عنوان بسیاری از توصیفگرها به عنوان `Log` موتور به دلیل ذخیره هر ستون در یک فایل جداگانه. در حالات کم بار ساده استفاده کنید. + +این `Log` و `StripeLog` موتورهای پشتیبانی خواندن داده های موازی. هنگام خواندن داده ها, تاتر با استفاده از موضوعات متعدد. هر موضوع یک بلوک داده جداگانه را پردازش می کند. این `Log` موتور با استفاده از یک فایل جداگانه برای هر ستون از جدول. `StripeLog` ذخیره تمام داده ها در یک فایل. در نتیجه `StripeLog` موتور با استفاده از توصیف کمتر در سیستم عامل, اما `Log` موتور فراهم می کند بهره وری بالاتر در هنگام خواندن داده ها. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/fa/engines/table_engines/log_family/stripelog.md b/docs/fa/engines/table_engines/log_family/stripelog.md new file mode 100644 index 00000000000..eef50f02abb --- /dev/null +++ b/docs/fa/engines/table_engines/log_family/stripelog.md @@ -0,0 +1,95 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u062E\u0637 \u0632\u062F\u0646" +--- + +# خط زدن {#stripelog} + +این موتور متعلق به خانواده از موتورهای ورود به سیستم. مشاهده خواص مشترک از موتورهای ورود به سیستم و تفاوت های خود را در [ورود خانواده موتور](log_family.md) مقاله. + +با استفاده از این موتور در حالات زمانی که شما نیاز به نوشتن بسیاری از جداول با مقدار کمی از داده ها (کمتر از 1 میلیون ردیف). + +## ایجاد یک جدول {#table_engines-stripelog-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = StripeLog +``` + +شرح مفصلی از [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) پرس و جو. + +## نوشتن داده ها {#table_engines-stripelog-writing-the-data} + +این `StripeLog` موتور فروشگاه تمام ستون ها در یک فایل. برای هر `INSERT` پرس و جو, خانه رعیتی بلوک داده ها به پایان یک فایل جدول, نوشتن ستون یک به یک. + +برای هر کلیک جدول فایل ها را می نویسد: + +- `data.bin` — Data file. +- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. + +این `StripeLog` موتور را پشتیبانی نمی کند `ALTER UPDATE` و `ALTER DELETE` عملیات. + +## خواندن داده ها {#table_engines-stripelog-reading-the-data} + +فایل را با نشانه اجازه می دهد تا clickhouse به parallelize خواندن داده ها. این به این معنی است که یک `SELECT` پرس و جو ردیف در جهت غیر قابل پیش بینی می گرداند. استفاده از `ORDER BY` بند برای مرتب کردن ردیف. + +## مثال استفاده {#table_engines-stripelog-example-of-use} + +ایجاد یک جدول: + +``` sql +CREATE TABLE stripe_log_table +( + timestamp DateTime, + message_type String, + message String +) +ENGINE = StripeLog +``` + +درج داده: + +``` sql +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') +``` + +ما با استفاده از دو `INSERT` نمایش داده شد برای ایجاد دو بلوک داده ها در داخل `data.bin` پرونده. + +خانه رعیتی با استفاده از موضوعات متعدد در هنگام انتخاب داده ها. هر موضوع یک بلوک داده جداگانه را می خواند و ردیف ها را به طور مستقل به پایان می رساند. در نتیجه, منظور از بلوک های ردیف در خروجی می کند منظور از بلوک های مشابه در ورودی در اکثر موارد مطابقت ندارد. به عنوان مثال: + +``` sql +SELECT * FROM stripe_log_table +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +┌───────────timestamp─┬─message_type─┬─message───────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +└─────────────────────┴──────────────┴───────────────────────────┘ +``` + +مرتب سازی نتایج (صعودی با ترتیب به طور پیش فرض): + +``` sql +SELECT * FROM stripe_log_table ORDER BY timestamp +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/fa/engines/table_engines/log_family/tinylog.md b/docs/fa/engines/table_engines/log_family/tinylog.md new file mode 100644 index 00000000000..1611c916516 --- /dev/null +++ b/docs/fa/engines/table_engines/log_family/tinylog.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: "\u062C\u0645\u0639 \u0634\u062F\u0646" +--- + +# جمع شدن {#tinylog} + +موتور متعلق به خانواده موتور ورود به سیستم. ببینید [ورود خانواده موتور](log_family.md) برای خواص مشترک موتورهای ورود به سیستم و تفاوت های خود را. + +این موتور جدول معمولا با روش نوشتن یک بار استفاده می شود: نوشتن داده ها یک بار و سپس خواندن هر چند بار که لازم است. مثلا, شما می توانید استفاده کنید `TinyLog`- نوع جداول برای داده های واسطه است که در دسته های کوچک پردازش شده است. توجه داشته باشید که ذخیره سازی داده ها در تعداد زیادی از جداول کوچک بی اثر است. + +نمایش داده شد در یک جریان واحد اجرا شده است. به عبارت دیگر این موتور برای جداول نسبتا کوچک (تا حدود 1000000 ردیف) در نظر گرفته شده است. این را حس می کند به استفاده از این موتور جدول اگر شما بسیاری از جداول کوچک, از ساده تر از [ثبت](log.md) موتور (فایل های کمتر نیاز به باز شود). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/fa/engines/table_engines/mergetree_family/aggregatingmergetree.md b/docs/fa/engines/table_engines/mergetree_family/aggregatingmergetree.md new file mode 100644 index 00000000000..dde90fd84b3 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -0,0 +1,102 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\u0631\u06CC\u0632\u062F\u0627\u0646\u0647" +--- + +# ریزدانه {#aggregatingmergetree} + +موتور به ارث می برد از [ادغام](mergetree.md#table_engines-mergetree), تغییر منطق برای ادغام قطعات داده. تاتر جایگزین تمام ردیف با کلید اصلی همان (یا با دقت بیشتر, با همان [کلید مرتب سازی](mergetree.md)) با یک ردیف (در یک بخش یک داده) که ترکیبی از ایالت های توابع کل را ذخیره می کند. + +شما می توانید استفاده کنید `AggregatingMergeTree` جداول برای تجمع داده افزایشی, از جمله برای نمایش محقق جمع. + +موتور پردازش تمام ستون ها با [کارکرد](../../../sql_reference/data_types/aggregatefunction.md) نوع. + +مناسب برای استفاده است `AggregatingMergeTree` اگر تعداد ردیف ها را با دستور کاهش دهد. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = AggregatingMergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[TTL expr] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترهای درخواست را ببینید [درخواست توضیحات](../../../sql_reference/statements/create.md). + +**بندهای پرسوجو** + +هنگام ایجاد یک `AggregatingMergeTree` جدول همان [بند](mergetree.md) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + هنوز این روش در پروژه های جدید استفاده کنید و, در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +همه پارامترها همان معنی را دارند `MergeTree`. +
    + +## انتخاب و درج {#select-and-insert} + +برای وارد کردن داده ها استفاده کنید [INSERT SELECT](../../../sql_reference/statements/insert_into.md) پرس و جو با کل دولت توابع. +هنگام انتخاب داده ها از `AggregatingMergeTree` جدول استفاده کنید `GROUP BY` بند و توابع کل همان هنگام قرار دادن داده, اما با استفاده از `-Merge` پسوند. + +در نتایج `SELECT` پرس و جو, ارزش `AggregateFunction` نوع اجرای خاص نمایندگی دودویی برای همه فرمت های خروجی کلیک کنید. اگر کمپرسی داده ها به, مثلا, `TabSeparated` قالب با `SELECT` پرس و جو و سپس این روگرفت را می توان با استفاده از لود `INSERT` پرس و جو. + +## به عنوان مثال از یک مشاهده محقق جمع {#example-of-an-aggregated-materialized-view} + +`AggregatingMergeTree` مشاهده تحقق است که به تماشای `test.visits` جدول: + +``` sql +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; +``` + +درج داده به `test.visits` جدول + +``` sql +INSERT INTO test.visits ... +``` + +داده ها در هر دو جدول و مشخصات قرار داده شده `test.basic` که تجمع انجام خواهد شد. + +برای دریافت اطلاعات جمع, ما نیاز به اجرای یک پرس و جو مانند `SELECT ... GROUP BY ...` از نظر `test.basic`: + +``` sql +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/fa/engines/table_engines/mergetree_family/collapsingmergetree.md new file mode 100644 index 00000000000..25bc525885e --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -0,0 +1,309 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u0633\u0642\u0648\u0637 \u063A\u0630\u0627\u06CC \u0627\u0635\u0644\u06CC" +--- + +# سقوط غذای اصلی {#table_engine-collapsingmergetree} + +موتور به ارث می برد از [ادغام](mergetree.md) و می افزاید: منطق ردیف سقوط به قطعات داده الگوریتم ادغام. + +`CollapsingMergeTree` ناهمزمان حذف (فرو می ریزد) جفت ردیف اگر همه از زمینه ها در یک کلید مرتب سازی (`ORDER BY`) معادل به استثنای زمینه خاص است `Sign` که می تواند داشته باشد `1` و `-1` ارزشهای خبری عبارتند از: ردیف بدون یک جفت نگهداری می شوند. برای اطلاعات بیشتر نگاه کنید به [سقوط](#table_engine-collapsingmergetree-collapsing) بخش از سند. + +موتور ممکن است به طور قابل توجهی حجم ذخیره سازی را کاهش دهد و بهره وری را افزایش دهد `SELECT` پرس و جو به عنوان یک نتیجه. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = CollapsingMergeTree(sign) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترهای پرس و جو, دیدن [توضیحات پرس و جو](../../../sql_reference/statements/create.md). + +**پارامترهای پیش ساخته** + +- `sign` — Name of the column with the type of row: `1` یک “state” سطر, `-1` یک “cancel” پارو زدن. + + Column data type — `Int8`. + +**بندهای پرسوجو** + +هنگام ایجاد یک `CollapsingMergeTree` جدول, همان [بندهای پرسوجو](mergetree.md#table_engine-mergetree-creating-a-table) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + هنوز این روش در پروژه های جدید استفاده کنید و, در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) +``` + +همه پارامترها به استثنای `sign` همان معنی را در `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` — “state” سطر, `-1` — “cancel” پارو زدن. + + Column Data Type — `Int8`. + +
    + +## سقوط {#table_engine-collapsingmergetree-collapsing} + +### داده {#data} + +وضعیت جایی که شما نیاز به ذخیره به طور مداوم در حال تغییر داده ها برای برخی از شی را در نظر بگیرید. برای تلفن های موبایل منطقی به یک ردیف برای یک شی و به روز رسانی در هر تغییر, اما عملیات به روز رسانی گران و کند برای سندرم تونل کارپ است چرا که نیاز به بازنویسی از داده ها در ذخیره سازی. اگر شما نیاز به نوشتن داده ها به سرعت, به روز رسانی قابل قبول نیست, اما شما می توانید تغییرات یک شی پی در پی به شرح زیر ارسال. + +استفاده از ستون خاص `Sign`. اگر `Sign = 1` این بدان معنی است که ردیف دولت از یک شی است, اجازه دهید اسمش را “state” پارو زدن. اگر `Sign = -1` به این معنی لغو دولت از یک شی با ویژگی های مشابه, اجازه دهید اسمش را “cancel” پارو زدن. + +برای مثال ما می خواهیم برای محاسبه چقدر صفحات کاربران بررسی می شود در برخی از سایت و چه مدت وجود دارد. در برخی از لحظه ما ارسال ردیف زیر را با دولت از فعالیت های کاربر: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +در چند لحظه بعد ما تغییر فعالیت کاربر را ثبت می کنیم و با دو ردیف زیر می نویسیم. + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +ردیف اول لغو حالت قبلی از جسم (کاربر). این باید زمینه های کلیدی مرتب سازی دولت لغو به استثنای کپی کنید `Sign`. + +ردیف دوم شامل وضعیت فعلی. + +همانطور که ما نیاز به تنها دولت گذشته از فعالیت های کاربر, ردیف + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +می توان حذف سقوط نامعتبر (قدیمی) دولت از یک شی. `CollapsingMergeTree` این کار در حالی که ادغام قطعات داده. + +چرا ما نیاز به 2 ردیف برای هر تغییر در خواندن [الگوریتم](#table_engine-collapsingmergetree-collapsing-algorithm) بند بند بند. + +**خواص عجیب و غریب چنین رویکردی** + +1. برنامه ای که می نویسد داده ها باید به یاد داشته باشید دولت از یک شی قادر به لغو. “Cancel” رشته باید نسخه هایی از زمینه های کلیدی مرتب سازی شامل “state” رشته و مخالف `Sign`. این افزایش اندازه اولیه ذخیره سازی اما اجازه می دهد تا به نوشتن داده ها به سرعت. +2. در حال رشد طولانی در ستون کاهش بهره وری از موتور با توجه به بار برای نوشتن. داده های ساده تر, بالاتر بهره وری. +3. این `SELECT` نتایج به شدت بستگی به قوام شی تغییر تاریخ. هنگام تهیه داده ها برای قرار دادن دقیق باشید. شما می توانید نتایج غیر قابل پیش بینی در اطلاعات متناقض برای مثال مقادیر منفی برای معیارهای غیر منفی مانند جلسه عمق. + +### الگوریتم {#table_engine-collapsingmergetree-collapsing-algorithm} + +هنگامی که تاتر ادغام قطعات داده, هر گروه از ردیف متوالی با کلید مرتب سازی همان (`ORDER BY`) به بیش از دو ردیف کاهش می یابد, یکی با `Sign = 1` (“state” ردیف) و دیگری با `Sign = -1` (“cancel” ردیف). به عبارت دیگر, سقوط نوشته. + +برای هر یک از داده ها در نتیجه بخشی تاتر موجب صرفه جویی در: + +1. اولین “cancel” و گذشته “state” ردیف, اگر تعداد “state” و “cancel” ردیف مسابقات و ردیف گذشته است “state” پارو زدن. + +2. گذشته “state” ردیف, اگر بیشتر وجود دارد “state” سطر از “cancel” ردیف + +3. اولین “cancel” ردیف, اگر بیشتر وجود دارد “cancel” سطر از “state” ردیف + +4. هیچ یک از ردیف, در تمام موارد دیگر. + +همچنین زمانی که حداقل وجود دارد 2 بیشتر “state” سطر از “cancel” ردیف یا حداقل 2 بیشتر “cancel” سپس سطرها “state” ردیف, ادغام ادامه, اما تاتر این وضعیت رفتار به عنوان یک خطای منطقی و ثبت در ورود به سیستم سرور. این خطا می تواند رخ دهد اگر داده های مشابه بیش از یک بار قرار داده شد. + +بدین ترتیب, سقوط باید نتایج حاصل از محاسبه ارقام تغییر نمی. +تغییرات به تدریج فرو ریخت به طوری که در پایان تنها دولت گذشته تقریبا در هر شی را ترک کرد. + +این `Sign` لازم است زیرا الگوریتم ادغام تضمین نمی کند که تمام ردیف ها با کلید مرتب سازی مشابه در بخش داده های مشابه و حتی در همان سرور فیزیکی باشد. روند کلیک `SELECT` نمایش داده شد با موضوعات مختلف و می تواند منظور از ردیف در نتیجه پیش بینی نیست. تجمع مورد نیاز است اگر نیاز به طور کامل وجود دارد “collapsed” داده ها از `CollapsingMergeTree` جدول + +برای نهایی سقوط, نوشتن یک پرس و جو با `GROUP BY` بند و مجموع توابع است که برای ثبت نام حساب. برای مثال برای محاسبه مقدار استفاده کنید `sum(Sign)` به جای `count()`. برای محاسبه مجموع چیزی استفاده کنید `sum(Sign * x)` به جای `sum(x)` و به همین ترتیب و همچنین اضافه کنید `HAVING sum(Sign) > 0`. + +مصالح `count`, `sum` و `avg` می تواند محاسبه این راه. مجموع `uniq` می تواند محاسبه شود اگر یک شی حداقل یک دولت سقوط نیست. مصالح `min` و `max` محاسبه نشد زیرا `CollapsingMergeTree` می کند تاریخ ارزش از کشورهای سقوط را نجات دهد. + +اگر شما نیاز به استخراج داده ها بدون تجمع (مثلا, برای بررسی اینکه ردیف در حال حاضر که جدیدترین ارزش مطابقت با شرایط خاص هستند), شما می توانید با استفاده از `FINAL` تغییردهنده برای `FROM` بند بند. این رویکرد به طور قابل توجهی کمتر موثر است. + +## مثال استفاده {#example-of-use} + +اطلاعات نمونه: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +ایجاد جدول: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +درج داده ها: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) +``` + +ما با استفاده از دو `INSERT` نمایش داده شد برای ایجاد دو بخش داده های مختلف. اگر ما وارد کردن داده ها با یک پرس و جو تاتر ایجاد یک بخش داده ها و هر گونه ادغام تا کنون انجام نمی. + +گرفتن داده ها: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +چه ما را ببینید و جایی که در حال سقوط است? + +با دو `INSERT` نمایش داده شد, ما ایجاد 2 قطعات داده. این `SELECT` پرس و جو در انجام شد 2 موضوعات, و ما یک نظم تصادفی از ردیف کردم. سقوط رخ داده است چرا که هیچ ادغام از قطعات داده وجود دارد و در عین حال. تاتر ادغام بخش داده ها در یک لحظه ناشناخته که ما نمی توانیم پیش بینی. + +بنابراین ما نیاز به تجمع: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration +FROM UAct +GROUP BY UserID +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +اگر ما تجمع نیاز ندارد و می خواهید به زور سقوط, ما می توانیم با استفاده از `FINAL` تغییردهنده برای `FROM` بند بند. + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +این روش انتخاب داده ها بسیار کم است. برای میزهای بزرگ ازش استفاده نکن + +## نمونه ای از روش دیگری {#example-of-another-approach} + +اطلاعات نمونه: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ -5 │ -146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +ایده این است که ادغام را به حساب تنها زمینه های کلیدی. و در “Cancel” خط ما می توانیم مقادیر منفی که برابر نسخه های قبلی از ردیف در هنگام جمع بدون استفاده از ستون نشانه را مشخص کنید. برای این روش لازم است نوع داده را تغییر دهید `PageViews`,`Duration` برای ذخیره مقادیر منفی از UInt8 -\> Int16. + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews Int16, + Duration Int16, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +بیایید روش را تست کنیم: + +``` sql +insert into UAct values(4324182021466249494, 5, 146, 1); +insert into UAct values(4324182021466249494, -5, -146, -1); +insert into UAct values(4324182021466249494, 6, 185, 1); + +select * from UAct final; // avoid using final in production (just for a test or small tables) +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +``` sql +SELECT + UserID, + sum(PageViews) AS PageViews, + sum(Duration) AS Duration +FROM UAct +GROUP BY UserID +```text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +``` sqk +select count() FROM UAct +``` + +``` text +┌─count()─┐ +│ 3 │ +└─────────┘ +``` + +``` sql +optimize table UAct final; + +select * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/custom_partitioning_key.md b/docs/fa/engines/table_engines/mergetree_family/custom_partitioning_key.md new file mode 100644 index 00000000000..b7848bdebc4 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -0,0 +1,128 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u06A9\u0644\u06CC\u062F \u067E\u0627\u0631\u062A\u06CC\u0634\u0646 \u0628\ + \u0646\u062F\u06CC \u0633\u0641\u0627\u0631\u0634\u06CC" +--- + +# کلید پارتیشن بندی سفارشی {#custom-partitioning-key} + +پارتیشن بندی برای [ادغام](mergetree.md) جداول خانواده (شامل [تکرار](replication.md) جدول). [نمایش محقق](../special/materializedview.md) بر اساس جداول ادغام پشتیبانی پارتیشن بندی, همچنین. + +پارتیشن ترکیبی منطقی از سوابق در یک جدول توسط یک معیار مشخص شده است. شما می توانید یک پارتیشن توسط معیار دلخواه تنظیم, مانند ماه, به روز, و یا بر اساس نوع رویداد. هر پارتیشن به طور جداگانه ذخیره می شود به ساده دستکاری این داده ها. هنگام دسترسی به داده ها, تاتر با استفاده از کوچکترین زیر مجموعه از پارتیشن ممکن. + +پارتیشن در مشخص `PARTITION BY expr` بند زمانی که [ایجاد یک جدول](mergetree.md#table_engine-mergetree-creating-a-table). کلید پارتیشن می تواند هر عبارت از ستون های جدول باشد. برای مثال برای مشخص کردن پارتیشن بندی توسط ماه با استفاده از بیان `toYYYYMM(date_column)`: + +``` sql +CREATE TABLE visits +( + VisitDate Date, + Hour UInt8, + ClientID UUID +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(VisitDate) +ORDER BY Hour; +``` + +کلید پارتیشن همچنین می تواند یک تاپل از عبارات (شبیه به [کلید اصلی](mergetree.md#primary-keys-and-indexes-in-queries)). به عنوان مثال: + +``` sql +ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) +PARTITION BY (toMonday(StartDate), EventType) +ORDER BY (CounterID, StartDate, intHash32(UserID)); +``` + +در این مثال ما مجموعه پارتیشن بندی توسط انواع رویداد رخ داده است که در طول هفته جاری. + +هنگام قرار دادن داده های جدید به یک جدول, این داده ها به عنوان یک بخش جداگانه ذخیره می شود (تکه) مرتب شده بر اساس کلید اصلی. در 10-15 دقیقه پس از قرار دادن, بخش هایی از پارتیشن همان به کل بخش با هم ادغام شدند. + +!!! info "اطلاعات" + ادغام تنها برای قطعات داده که همان مقدار برای بیان پارتیشن بندی کار می کند. این به این معنی است **شما باید پارتیشن بیش از حد دانه را ندارد** (بیش از حدود یک هزار پارتیشن). در غیر این صورت `SELECT` پرس و جو انجام ضعیف به دلیل تعداد نامعقول زیادی از فایل ها در سیستم فایل و توصیف باز کردن فایل. + +استفاده از [سیستم.قطعات](../../../operations/system_tables.md#system_tables-parts) جدول برای مشاهده قطعات جدول و پارتیشن. مثلا, اجازه دهید فرض کنیم که ما یک `visits` جدول با پارتیشن بندی در ماه. بیایید انجام دهیم `SELECT` پرسوجو برای `system.parts` جدول: + +``` sql +SELECT + partition, + name, + active +FROM system.parts +WHERE table = 'visits' +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 1 │ +│ 201902 │ 201902_11_11_0 │ 1 │ +└───────────┴────────────────┴────────┘ +``` + +این `partition` ستون شامل نام پارتیشن. دو پارتیشن در این مثال وجود دارد: `201901` و `201902`. شما می توانید از این مقدار ستون برای مشخص کردن نام پارتیشن در استفاده کنید [ALTER … PARTITION](#alter_manipulations-with-partitions) نمایش داده شد. + +این `name` ستون شامل نام قطعات داده پارتیشن. شما می توانید از این ستون برای مشخص کردن نام شرکت در [ALTER ATTACH PART](#alter_attach-partition) پرس و جو. + +بیایید شکستن نام بخش اول: `201901_1_3_1`: + +- `201901` نام پارتیشن است. +- `1` حداقل تعداد بلوک داده است. +- `3` حداکثر تعداد بلوک داده است. +- `1` سطح تکه (عمق درخت ادغام از تشکیل شده است). + +!!! info "اطلاعات" + بخش هایی از جداول قدیمی از نوع نام: `20190117_20190123_2_2_0` (حداقل تاریخ - حداکثر تاریخ - حداقل تعداد بلوک - حداکثر تعداد بلوک - سطح). + +این `active` ستون وضعیت بخش را نشان می دهد. `1` فعال است; `0` غیر فعال است. قطعات غیر فعال هستند, مثلا, قطعات منبع باقی مانده پس از ادغام به بخش بزرگتر. قطعات داده خراب نیز به عنوان غیر فعال نشان داد. + +همانطور که شما می توانید در مثال ببینید, چندین بخش از هم جدا از پارتیشن های مشابه وجود دارد (مثلا, `201901_1_3_1` و `201901_1_9_2`). این به این معنی است که این قطعات با هم ادغام شدند و در عین حال. تاتر بخش های داده شده داده ها را به صورت دوره ای در حدود 15 دقیقه پس از قرار دادن ادغام می کند. علاوه بر این, شما می توانید یک ادغام غیر برنامه ریزی شده با استفاده از انجام [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) پرس و جو. مثال: + +``` sql +OPTIMIZE TABLE visits PARTITION 201902; +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 0 │ +│ 201902 │ 201902_4_11_2 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 0 │ +│ 201902 │ 201902_11_11_0 │ 0 │ +└───────────┴────────────────┴────────┘ +``` + +قطعات غیر فعال خواهد شد حدود حذف 10 دقیقه پس از ادغام. + +راه دیگر برای مشاهده مجموعه ای از قطعات و پارتیشن ها این است که به دایرکتوری جدول بروید: `/var/lib/clickhouse/data//
    /`. به عنوان مثال: + +``` bash +/var/lib/clickhouse/data/default/visits$ ls -l +total 40 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached +``` + +پوشهها ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ و به همین ترتیب دایرکتوری از قطعات هستند. هر بخش مربوط به پارتیشن مربوطه و شامل داده ها فقط برای یک ماه خاص (جدول در این مثال پارتیشن بندی توسط ماه). + +این `detached` دایرکتوری شامل قطعات است که از جدول با استفاده از جدا شد [DETACH](#alter_detach-partition) پرس و جو. قطعات خراب نیز به این دایرکتوری منتقل, به جای اینکه حذف. سرور از قطعات از `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql_reference/statements/alter.md#alter_attach-partition) پرس و جو. + +توجه داشته باشید که در سرور عامل شما نمی توانید به صورت دستی مجموعه ای از قطعات یا داده های خود را بر روی سیستم فایل تغییر دهید زیرا سرور در این مورد نمی داند. برای جداول غیر تکرار, شما می توانید این کار را انجام زمانی که سرور متوقف شده است, اما توصیه نمی شود. برای جداول تکرار, مجموعه ای از قطعات را نمی توان در هر صورت تغییر. + +کلیک هاوس اجازه می دهد تا شما را به انجام عملیات با پارتیشن: حذف, کپی از یک جدول به دیگری, و یا ایجاد یک نسخه پشتیبان تهیه. مشاهده لیست تمام عملیات در بخش [دستکاری با پارتیشن ها و قطعات](../../../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/fa/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/fa/engines/table_engines/mergetree_family/graphitemergetree.md new file mode 100644 index 00000000000..b3665e06d24 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/graphitemergetree.md @@ -0,0 +1,174 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u0646\u0645\u0648\u062F\u0627\u0631" +--- + +# نمودار {#graphitemergetree} + +این موتور طراحی شده است برای نازک شدن و جمع/متوسط (خلاصه) [گرافیت](http://graphite.readthedocs.io/en/latest/index.html) داده ها. این ممکن است به توسعه دهندگان که می خواهند به استفاده از تاتر به عنوان یک فروشگاه داده ها برای گرافیت مفید است. + +شما می توانید هر موتور جدول کلیک برای ذخیره داده گرافیت اگر شما رولپ نیاز ندارد استفاده, اما اگر شما نیاز به یک استفاده خلاصه `GraphiteMergeTree`. موتور حجم ذخیره سازی را کاهش می دهد و بهره وری نمایش داده شد از گرافیت را افزایش می دهد. + +موتور خواص از ارث می برد [ادغام](mergetree.md). + +## ایجاد یک جدول {#creating-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE = GraphiteMergeTree(config_section) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +مشاهده شرح مفصلی از [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) پرس و جو. + +یک جدول برای داده های گرافیت باید ستون های زیر را برای داده های زیر داشته باشد: + +- نام متریک (سنسور گرافیت). نوع داده: `String`. + +- زمان اندازه گیری متریک. نوع داده: `DateTime`. + +- ارزش متریک. نوع داده: هر عددی. + +- نسخه از متریک. نوع داده: هر عددی. + + تاتر موجب صرفه جویی در ردیف با بالاترین نسخه و یا گذشته نوشته شده است اگر نسخه یکسان هستند. ردیف های دیگر در طول ادغام قطعات داده حذف می شوند. + +نام این ستون ها باید در پیکربندی خلاصه مجموعه. + +**پارامترهای نمودار** + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +**بندهای پرسوجو** + +هنگام ایجاد یک `GraphiteMergeTree` جدول, همان [بند](mergetree.md#table_engine-mergetree-creating-a-table) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + هنوز این روش در پروژه های جدید استفاده کنید و, در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + EventDate Date, + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) +``` + +همه پارامترها به استثنای `config_section` همان معنی را در `MergeTree`. + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +
    + +## پیکربندی رولپ {#rollup-configuration} + +تنظیمات برای خلاصه توسط تعریف [لغزش \_ نمودار](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) پارامتر در پیکربندی سرور. نام پارامتر می تواند هر. شما می توانید تنظیمات متعددی ایجاد کنید و برای جداول مختلف استفاده کنید. + +ساختار پیکربندی رولپ: + + required-columns + patterns + +### ستون های مورد نیاز {#required-columns} + +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. مقدار پیشفرض: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. + +### الگوها {#patterns} + +ساختار `patterns` بخش: + +``` text +pattern + regexp + function +pattern + regexp + age + precision + ... +pattern + regexp + function + age + precision + ... +pattern + ... +default + function + age + precision + ... +``` + +!!! warning "توجه" + الگوها باید به شدت دستور داده شوند: + + 1. Patterns without `function` or `retention`. + 1. Patterns with both `function` and `retention`. + 1. Pattern `default`. + +هنگام پردازش یک ردیف, تاتر چک قوانین در `pattern` بخش. هر یک از `pattern` (شامل `default`) بخش می تواند شامل `function` پارامتر برای تجمع, `retention` پارامترها یا هر دو. اگر نام متریک با `regexp`, قوانین از `pattern` بخش (یا بخش) اعمال می شود; در غیر این صورت, قوانین از `default` بخش استفاده می شود. + +زمینه برای `pattern` و `default` بخش ها: + +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. + +### مثال پیکربندی {#configuration-example} + +``` xml + + Version + + click_cost + any + + 0 + 5 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/index.md b/docs/fa/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..b807da4f929 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: MergeTree Family +toc_priority: 28 +--- + + diff --git a/docs/fa/engines/table_engines/mergetree_family/mergetree.md b/docs/fa/engines/table_engines/mergetree_family/mergetree.md new file mode 100644 index 00000000000..b6f65611596 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/mergetree.md @@ -0,0 +1,654 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 30 +toc_title: "\u0627\u062F\u063A\u0627\u0645" +--- + +# ادغام {#table_engines-mergetree} + +این `MergeTree` موتور و سایر موتورهای این خانواده (`*MergeTree`) موتورهای جدول کلیک قوی ترین. + +موتور در `MergeTree` خانواده برای قرار دادن مقدار بسیار زیادی از داده ها را به یک جدول طراحی شده است. داده ها به سرعت به بخش جدول توسط بخش نوشته شده است, سپس قوانین برای ادغام قطعات در پس زمینه اعمال. این روش بسیار موثرتر از به طور مستمر بازنویسی داده ها در ذخیره سازی در درج است. + +ویژگی های اصلی: + +- فروشگاه داده طبقه بندی شده اند توسط کلید اصلی. + + این اجازه می دهد تا به شما برای ایجاد یک شاخص پراکنده کوچک است که کمک می کند تا پیدا کردن اطلاعات سریع تر. + +- پارتیشن ها را می توان در صورت استفاده کرد [کلید پارتیشن بندی](custom_partitioning_key.md) مشخص شده است. + + تاتر پشتیبانی از عملیات خاص با پارتیشن که موثر تر از عملیات عمومی بر روی داده های مشابه با همان نتیجه. کلیک هاوس همچنین به طور خودکار داده های پارتیشن را که کلید پارتیشن بندی در پرس و جو مشخص شده است قطع می کند. این نیز باعث بهبود عملکرد پرس و جو. + +- پشتیبانی از تکرار داده ها. + + خانواده `ReplicatedMergeTree` جداول فراهم می کند تکرار داده ها. برای کسب اطلاعات بیشتر, دیدن [تکرار داده ها](replication.md). + +- پشتیبانی از نمونه برداری داده ها. + + در صورت لزوم می توانید روش نمونه گیری داده ها را در جدول تنظیم کنید. + +!!! info "اطلاعات" + این [ادغام](../special/merge.md) موتور به تعلق ندارد `*MergeTree` خانواده + +## ایجاد یک جدول {#table_engine-mergetree-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... + INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, + INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 +) ENGINE = MergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترها [ایجاد توصیف پرسوجو](../../../sql_reference/statements/create.md). + +!!! note "یادداشت" + `INDEX` یک ویژگی تجربی است [شاخص های داده پرش](#table_engine-mergetree-data_skipping-indexes). + +### بندهای پرسوجو {#mergetree-query-clauses} + +- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. این `MergeTree` موتور پارامترهای ندارد. + +- `PARTITION BY` — The [کلید پارتیشن بندی](custom_partitioning_key.md). + + برای تقسیم ماه از `toYYYYMM(date_column)` عبارت, جایی که `date_column` یک ستون با تاریخ از نوع است [تاریخ](../../../sql_reference/data_types/date.md). نام پارتیشن در اینجا `"YYYYMM"` قالب. + +- `ORDER BY` — The sorting key. + + یک تاپل از ستون ها و یا عبارات دلخواه. مثال: `ORDER BY (CounterID, EventDate)`. + +- `PRIMARY KEY` — The primary key if it [متفاوت از کلید مرتب سازی](mergetree.md). + + به طور پیش فرض کلید اصلی همان کلید مرتب سازی است (که توسط مشخص شده است `ORDER BY` بند). بنابراین در اکثر موارد غیر ضروری است برای مشخص کردن یک جداگانه `PRIMARY KEY` بند بند. + +- `SAMPLE BY` — An expression for sampling. + + اگر یک عبارت نمونه برداری استفاده شده است, کلید اصلی باید باشد. مثال: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. + +- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [بین دیسک و حجم](#table_engine-mergetree-multiple-volumes). + + بیان باید یکی داشته باشد `Date` یا `DateTime` ستون به عنوان یک نتیجه. مثال: + `TTL date + INTERVAL 1 DAY` + + نوع قانون `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'` مشخص یک عمل با بخش انجام می شود در صورتی که بیان راضی است (می رسد زمان فعلی): حذف ردیف منقضی شده, در حال حرکت بخشی (اگر بیان برای تمام ردیف در یک بخش راضی است) به دیسک مشخص (`TO DISK 'xxx'`) یا به حجم (`TO VOLUME 'xxx'`). نوع پیش فرض قانون حذف است (`DELETE`). فهرست قوانین متعدد می توانید مشخص, اما باید بیش از یک وجود داشته باشد `DELETE` قانون. + + برای اطلاعات بیشتر, دیدن [ستون ها و جداول](#table_engine-mergetree-ttl) + +- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: + + - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [ذخیره سازی داده ها](#mergetree-data-storage). + - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [ذخیره سازی داده ها](#mergetree-data-storage). + - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` تنظیمات. قبل از نسخه 19.11, تنها وجود دارد `index_granularity` تنظیم برای محدود کردن اندازه گرانول. این `index_granularity_bytes` تنظیم را بهبود می بخشد عملکرد کلیک در هنگام انتخاب داده ها از جداول با ردیف بزرگ (ده ها و صدها مگابایت). اگر شما جداول با ردیف بزرگ, شما می توانید این تنظیمات را برای جداول را قادر به بهبود بهره وری از `SELECT` نمایش داده شد. + - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, سپس باغ وحش ذخیره داده های کمتر. برای کسب اطلاعات بیشتر, دیدن [تنظیم توضیحات](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) داخل “Server configuration parameters”. + - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` بایت, تاتر می خواند و می نویسد داده ها به دیسک ذخیره سازی با استفاده از رابط من/ای مستقیم (`O_DIRECT` گزینه). اگر `min_merge_bytes_to_use_direct_io = 0`, سپس مستقیم من / ای غیر فعال است. مقدار پیشفرض: `10 * 1024 * 1024 * 1024` بایت + + - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). + - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don't turn it off. + - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. + - `storage_policy` — Storage policy. See [با استفاده از دستگاه های بلوک های متعدد برای ذخیره سازی داده ها](#table_engine-mergetree-multiple-volumes). + +**مثال تنظیمات بخش** + +``` sql +ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 +``` + +در مثال ما مجموعه پارتیشن بندی توسط ماه. + +ما همچنین یک عبارت برای نمونه برداری به عنوان یک هش توسط شناسه کاربر تنظیم شده است. این اجازه می دهد تا شما را به نام مستعار داده ها در جدول برای هر `CounterID` و `EventDate`. اگر یک تعریف می کنید [SAMPLE](../../../sql_reference/statements/select.md#select-sample-clause) بند هنگام انتخاب داده ClickHouse را یک به طور مساوی pseudorandom داده های نمونه به صورت زیر مجموعه ای از کاربران است. + +این `index_granularity` تنظیم می تواند حذف شود زیرا 8192 مقدار پیش فرض است. + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + از این روش در پروژه های جدید استفاده نکنید. در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +**پارامترهای ادغام() ** + +- `date-column` — The name of a column of the [تاریخ](../../../sql_reference/data_types/date.md) نوع. تاتر به طور خودکار ایجاد پارتیشن های ماه بر اساس این ستون. نام پارتیشن در `"YYYYMM"` قالب. +- `sampling_expression` — An expression for sampling. +- `(primary, key)` — Primary key. Type: [تاپل()](../../../sql_reference/data_types/tuple.md) +- `index_granularity` — The granularity of an index. The number of data rows between the “marks” از یک شاخص. ارزش 8192 برای بسیاری از وظایف مناسب است. + +**مثال** + +``` sql +MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) +``` + +این `MergeTree` موتور در همان راه به عنوان مثال بالا برای روش پیکربندی موتور اصلی پیکربندی شده است. +
    + +## ذخیره سازی داده ها {#mergetree-data-storage} + +جدول شامل قطعات داده مرتب شده بر اساس کلید اصلی. + +هنگامی که داده ها در یک جدول قرار داده, قطعات داده های جداگانه ایجاد می شوند و هر یک از این واژه ها از لحاظ واژگان توسط کلید اصلی طبقه بندی شده اند. برای مثال اگر کلید اصلی است `(CounterID, Date)` داده ها در بخش طبقه بندی شده اند `CounterID` و در هر `CounterID`, این است که توسط دستور داد `Date`. + +داده های متعلق به پارتیشن های مختلف به بخش های مختلف جدا می شوند. در پس زمینه, کلیکهاوس ادغام قطعات داده ها برای ذخیره سازی موثر تر. قطعات متعلق به پارتیشن های مختلف با هم ادغام شدند. مکانیزم ادغام تضمین نمی کند که تمام ردیف ها با همان کلید اصلی در بخش داده های مشابه باشد. + +هر بخش داده منطقی به گرانول تقسیم شده است. گرانول کوچکترین مجموعه داده های تفکیک پذیر است که خانه می خواند در هنگام انتخاب داده ها است. خانه را کلیک می کند ردیف یا ارزش تقسیم نمی, بنابراین هر گرانول همیشه شامل یک عدد صحیح از ردیف. ردیف اول یک گرانول با ارزش کلید اصلی برای ردیف مشخص شده است. برای هر بخش داده, تاتر ایجاد یک فایل شاخص است که فروشگاه علامت. برای هر ستون, چه در کلید اصلی است یا نه, خانه رعیتی نیز علامت همان فروشگاه. این علامت به شما اجازه داده پیدا کردن به طور مستقیم در فایل های ستون. + +اندازه گرانول توسط `index_granularity` و `index_granularity_bytes` تنظیمات موتور جدول. تعداد ردیف ها در یک گرانول در `[1, index_granularity]` محدوده, بسته به اندازه ردیف. اندازه گرانول می تواند بیش از `index_granularity_bytes` اگر اندازه یک ردیف بیشتر از ارزش تنظیم است. در این مورد, اندازه گرانول برابر اندازه ردیف. + +## کلید های اولیه و شاخص ها در نمایش داده شد {#primary-keys-and-indexes-in-queries} + +نگاهی به `(CounterID, Date)` کلید اصلی به عنوان مثال. در این مورد, مرتب سازی و شاخص را می توان به شرح زیر نشان داده شده: + + Whole data: [---------------------------------------------] + CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] + Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] + Marks: | | | | | | | | | | | + a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 + Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 + +اگر پرسوجوی داده مشخص شود: + +- `CounterID in ('a', 'h')`, سرور بار خوانده شده داده ها در محدوده علامت `[0, 3)` و `[6, 8)`. +- `CounterID IN ('a', 'h') AND Date = 3`, سرور بار خوانده شده داده ها در محدوده علامت `[1, 3)` و `[7, 8)`. +- `Date = 3`, سرور می خواند داده ها در طیف وسیعی از علامت `[1, 10]`. + +نمونه های فوق نشان می دهد که همیشه بیشتر موثر برای استفاده از شاخص از اسکن کامل است. + +شاخص پراکنده اجازه می دهد تا داده های اضافی به عنوان خوانده شده. هنگام خواندن یک طیف وسیعی از کلید اصلی, تا `index_granularity * 2` ردیف اضافی در هر بلوک داده را می توان به عنوان خوانده شده. + +شاخص پراکنده اجازه می دهد شما را به کار با تعداد بسیار زیادی از ردیف جدول, چرا که در اکثر موارد, چنین شاخص در رم کامپیوتر مناسب. + +کلیک یک کلید اصلی منحصر به فرد نیاز ندارد. شما می توانید ردیف های متعدد را با همان کلید اولیه وارد کنید. + +### انتخاب کلید اصلی {#selecting-the-primary-key} + +تعداد ستون ها در کلید اصلی به صراحت محدود نمی شود. بسته به ساختار داده ها, شما می توانید ستون های بیشتر یا کمتر در کلید اصلی شامل. این ممکن است: + +- بهبود عملکرد یک شاخص. + + اگر کلید اصلی است `(a, b)` سپس یک ستون دیگر اضافه کنید `c` عملکرد را بهبود می بخشد اگر شرایط زیر رعایت شود: + + - نمایش داده شد با یک شرط در ستون وجود دارد `c`. + - محدوده داده های طولانی (چندین بار طولانی تر از `index_granularity`) با مقادیر یکسان برای `(a, b)` شایع هستند. به عبارت دیگر, در هنگام اضافه کردن یک ستون دیگر اجازه می دهد تا شما را به جست و خیز محدوده داده بسیار طولانی. + +- بهبود فشرده سازی داده ها. + + خانه را کلیک کنید انواع داده ها توسط کلید اصلی, بنابراین بالاتر از ثبات, بهتر فشرده سازی. + +- فراهم می کند که منطق اضافی در هنگام ادغام قطعات داده در [سقوط غذای اصلی](collapsingmergetree.md#table_engine-collapsingmergetree) و [سامینگمرگتری](summingmergetree.md) موتورها. + + در این مورد منطقی است که مشخص شود *کلید مرتب سازی* که متفاوت از کلید اصلی است. + +یک کلید اولیه طولانی منفی عملکرد درج و مصرف حافظه تاثیر می گذارد, اما ستون های اضافی در کلید اصلی انجام عملکرد تاتر در طول تاثیر نمی گذارد `SELECT` نمایش داده شد. + +### انتخاب کلید اصلی است که متفاوت از کلید مرتب سازی {#choosing-a-primary-key-that-differs-from-the-sorting-key} + +ممکن است که به مشخص کردن یک کلید اولیه (بیان با ارزش هایی که در فایل شاخص برای هر علامت نوشته شده است) که متفاوت از کلید مرتب سازی (بیان برای مرتب سازی ردیف در بخش های داده). در این مورد تاپل عبارت کلیدی اولیه باید یک پیشوند از تاپل عبارت کلیدی مرتب سازی شود. + +این ویژگی در هنگام استفاده از مفید است [سامینگمرگتری](summingmergetree.md) و +[ریزدانه](aggregatingmergetree.md) موتورهای جدول. در یک مورد مشترک در هنگام استفاده از این موتور جدول دو نوع ستون است: *ابعاد* و *اقدامات*. نمایش داده شد نمونه مقادیر کل ستون اندازه گیری با دلخواه `GROUP BY` و فیلتر بر اساس ابعاد. چون SummingMergeTree و AggregatingMergeTree جمع ردیف با همان مقدار از مرتب سازی کلیدی است برای اضافه کردن همه ابعاد آن است. در نتیجه, بیان کلیدی شامل یک لیست طولانی از ستون ها و این لیست باید اغلب با ابعاد تازه اضافه شده به روز. + +در این مورد منطقی است که تنها چند ستون در کلید اصلی را ترک کنید که اسکن های محدوده ای موثر را فراهم می کند و ستون های بعد باقی مانده را به دسته کلید مرتب سازی اضافه می کند. + +[ALTER](../../../sql_reference/statements/alter.md) از کلید مرتب سازی یک عملیات سبک وزن است چرا که زمانی که یک ستون جدید به طور همزمان به جدول و به کلید مرتب سازی اضافه, قطعات داده های موجود لازم نیست به تغییر. از کلید مرتب سازی قدیمی یک پیشوند از کلید مرتب سازی جدید است و هیچ داده در ستون به تازگی اضافه شده وجود دارد, داده ها توسط هر دو کلید مرتب سازی قدیمی و جدید در لحظه اصلاح جدول طبقه بندی شده اند. + +### استفاده از شاخص ها و پارتیشن ها در نمایش داده شد {#use-of-indexes-and-partitions-in-queries} + +برای `SELECT` نمایش داده شد, فاحشه خانه تجزیه و تحلیل اینکه یک شاخص می تواند مورد استفاده قرار گیرد. شاخص می تواند مورد استفاده قرار گیرد در صورتی که `WHERE/PREWHERE` بند بیان (به عنوان یکی از عناصر رابطه یا به طور کامل) است که نشان دهنده برابری یا نابرابری عملیات مقایسه و یا اگر `IN` یا `LIKE` با یک پیشوند ثابت در ستون ها و یا عبارات که در کلید اصلی و یا پارتیشن بندی هستند, و یا در برخی از توابع تا حدی تکراری از این ستون ها, و یا روابط منطقی از این عبارات. + +بدین ترتیب, ممکن است به سرعت اجرا نمایش داده شد در یک یا بسیاری از محدوده کلید اصلی. در این مثال, نمایش داده شد سریع خواهد بود که برای یک تگ ردیابی خاص اجرا, برای یک برچسب خاص و محدوده تاریخ, برای یک تگ و تاریخ خاص, برای برچسب های متعدد با محدوده تاریخ, و غیره. + +بیایید به موتور پیکربندی شده به شرح زیر نگاه کنیم: + + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 + +در این مورد در نمایش داده شد: + +``` sql +SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 +SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) +SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) +``` + +خانه رعیتی خواهد شاخص کلید اصلی به تر و تمیز داده های نامناسب و کلید پارتیشن بندی ماهانه به تر و تمیز پارتیشن که در محدوده تاریخ نامناسب هستند استفاده کنید. + +نمایش داده شد بالا نشان می دهد که شاخص حتی برای عبارات پیچیده استفاده می شود. خواندن از جدول سازمان یافته است به طوری که با استفاده از شاخص نمی تواند کندتر از اسکن کامل. + +در مثال زیر شاخص نمی تواند مورد استفاده قرار گیرد. + +``` sql +SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' +``` + +برای بررسی اینکه تاتر می توانید شاخص زمانی که در حال اجرا یک پرس و جو استفاده, استفاده از تنظیمات [اجبار](../../../operations/settings/settings.md#settings-force_index_by_date) و [اجبار](../../../operations/settings/settings.md). + +کلید پارتیشن بندی توسط ماه اجازه می دهد تا خواندن تنها کسانی که بلوک های داده که حاوی تاریخ از محدوده مناسب. در این مورد, بلوک داده ها ممکن است حاوی داده ها برای بسیاری از تاریخ (تا یک ماه کامل). در یک بلوک, داده ها توسط کلید اصلی طبقه بندی شده اند, که ممکن است حاوی تاریخ به عنوان ستون اول نیست. به خاطر همین, با استفاده از یک پرس و جو تنها با یک وضعیت تاریخ که پیشوند کلید اصلی مشخص نیست باعث می شود اطلاعات بیشتر از یک تاریخ به عنوان خوانده شود. + +### استفاده از شاخص برای کلید های اولیه تا حدی یکنواخت {#use-of-index-for-partially-monotonic-primary-keys} + +در نظر بگیرید, مثلا, روز از ماه. یک فرم [توالی یکنواختی](https://en.wikipedia.org/wiki/Monotonic_function) برای یک ماه, اما برای مدت طولانی تر یکنواخت نیست. این یک توالی نیمه یکنواخت است. اگر یک کاربر ایجاد جدول با نیمه یکنواخت کلید اولیه, خانه را ایجاد یک شاخص پراکنده به طور معمول. هنگامی که یک کاربر داده ها را انتخاب از این نوع از جدول, تاتر تجزیه و تحلیل شرایط پرس و جو. اگر کاربر می خواهد برای دریافت اطلاعات بین دو علامت از شاخص و هر دو این علامت در عرض یک ماه سقوط, خانه رعیتی می توانید شاخص در این مورد خاص استفاده کنید زیرا می تواند فاصله بین پارامترهای یک پرس و جو و شاخص محاسبه. + +کلیک خانه می تواند یک شاخص استفاده کنید اگر ارزش های کلید اصلی در محدوده پارامتر پرس و جو یک توالی یکنواخت نشان دهنده نیست. در این مورد, تاتر با استفاده از روش اسکن کامل. + +تاتر با استفاده از این منطق نه تنها برای روز از توالی ماه, اما برای هر کلید اصلی است که نشان دهنده یک توالی نیمه یکنواخت. + +### شاخص های پرش داده (تجربی) {#table_engine-mergetree-data_skipping-indexes} + +اعلامیه شاخص در بخش ستون ها از `CREATE` پرس و جو. + +``` sql +INDEX index_name expr TYPE type(...) GRANULARITY granularity_value +``` + +برای جداول از `*MergeTree` خانواده, شاخص پرش داده را می توان مشخص. + +این شاخص جمع برخی از اطلاعات در مورد بیان مشخص شده بر روی بلوک, که شامل `granularity_value` گرانول (اندازه گرانول با استفاده از `index_granularity` تنظیم در موتور جدول). سپس این دانه ها در استفاده می شود `SELECT` نمایش داده شد برای کاهش مقدار داده ها به خواندن از روی دیسک با پرش بلوک های بزرگ از داده ها که `where` پرس و جو نمی تواند راضی باشد. + +**مثال** + +``` sql +CREATE TABLE table_name +( + u64 UInt64, + i32 Int32, + s String, + ... + INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, + INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 +) ENGINE = MergeTree() +... +``` + +شاخص ها از مثال می توانند توسط کلیک خانه استفاده شوند تا میزان داده ها را برای خواندن از دیسک در موارد زیر کاهش دهند: + +``` sql +SELECT count() FROM table WHERE s < 'z' +SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 +``` + +#### انواع شاخص های موجود {#available-types-of-indices} + +- `minmax` + + فروشگاه افراط و بیان مشخص شده (در صورتی که بیان شده است `tuple` سپس افراط را برای هر عنصر ذخیره می کند `tuple`), با استفاده از اطلاعات ذخیره شده برای پرش بلوک از داده ها مانند کلید اصلی. + +- `set(max_rows)` + + ارزش های منحصر به فرد بیان مشخص شده را ذخیره می کند (بیش از `max_rows` سطرها, `max_rows=0` یعنی “no limits”). با استفاده از مقادیر برای بررسی در صورتی که `WHERE` بیان رضایت بخش در یک بلوک از داده ها نیست. + +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + فروشگاه ها [فیلتر بلوم](https://en.wikipedia.org/wiki/Bloom_filter) که شامل تمام نمگرام از یک بلوک از داده ها. این نسخهها کار میکند تنها با رشته. می توان برای بهینه سازی استفاده کرد `equals`, `like` و `in` عبارات. + + - `n` — ngram size, + - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). + - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. + - `random_seed` — The seed for Bloom filter hash functions. + +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + همان `ngrambf_v1`, اما فروشگاه نشانه به جای نمرگرام. نشانه ها توالی هایی هستند که توسط شخصیت های غیر عددی جدا شده اند. + +- `bloom_filter([false_positive])` — Stores a [فیلتر بلوم](https://en.wikipedia.org/wiki/Bloom_filter) برای ستون مشخص. + + اختیاری `false_positive` پارامتر احتمال دریافت پاسخ مثبت کاذب از فیلتر است. مقادیر ممکن: (0, 1). مقدار پیش فرض: 0.025. + + انواع داده های پشتیبانی شده: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. + + توابع زیر می توانند از این استفاده کنند: [برابر](../../../sql_reference/functions/comparison_functions.md), [نقلقولها](../../../sql_reference/functions/comparison_functions.md), [داخل](../../../sql_reference/functions/in_functions.md), [notIn](../../../sql_reference/functions/in_functions.md), [دارد](../../../sql_reference/functions/array_functions.md). + + + +``` sql +INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 +INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 +INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +``` + +#### توابع پشتیبانی {#functions-support} + +شرایط در `WHERE` بند شامل تماس از توابع است که با ستون کار. اگر ستون بخشی از یک شاخص است, خانه رعیتی تلاش می کند تا استفاده از این شاخص در هنگام انجام توابع. تاتر از زیر مجموعه های مختلف از توابع برای استفاده از شاخص. + +این `set` شاخص را می توان با تمام توابع استفاده می شود. زیر مجموعه های تابع برای شاخص های دیگر در جدول زیر نشان داده شده است. + +| تابع (اپراتور) / شاخص | کلید اصلی | مینمکس | نمرمبف1 | توکنبف1 | ت\_ضعیت | +|----------------------------------------------------------------------------------------------------------------------|-----------|--------|---------|---------|---------| +| [اطلاعات دقیق)](../../../sql_reference/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [نقلقولهای جدید از این نویسنده=, \<\>)](../../../sql_reference/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [مانند](../../../sql_reference/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [notLike](../../../sql_reference/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [startsWith](../../../sql_reference/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../sql_reference/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [چندزبانه](../../../sql_reference/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [داخل](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [کمتر (\<)](../../../sql_reference/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [بیشتر (\>)](../../../sql_reference/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [در حال بارگذاری)](../../../sql_reference/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [اطلاعات دقیق)](../../../sql_reference/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [خالی](../../../sql_reference/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql_reference/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| شتابدهنده | ✗ | ✗ | ✗ | ✔ | ✗ | + +توابع با استدلال ثابت است که کمتر از اندازه نیگرام می تواند توسط استفاده نمی شود `ngrambf_v1` برای بهینه سازی پرس و جو. + +فیلتر بلوم می توانید مسابقات مثبت کاذب دارند, به طوری که `ngrambf_v1`, `tokenbf_v1` و `bloom_filter` شاخص ها نمی توانند برای بهینه سازی پرس و جو هایی که انتظار می رود نتیجه عملکرد نادرست باشد استفاده شوند: + +- می توان بهینه سازی کرد: + - `s LIKE '%test%'` + - `NOT s NOT LIKE '%test%'` + - `s = 1` + - `NOT s != 1` + - `startsWith(s, 'test')` +- نمی توان بهینه سازی کرد: + - `NOT s LIKE '%test%'` + - `s NOT LIKE '%test%'` + - `NOT s = 1` + - `s != 1` + - `NOT startsWith(s, 'test')` + +## دسترسی همزمان داده ها {#concurrent-data-access} + +برای دسترسی به جدول همزمان, ما با استفاده از چند نسخه. به عبارت دیگر, زمانی که یک جدول به طور همزمان خواندن و به روز, داده ها از مجموعه ای از قطعات است که در زمان پرس و جو در حال حاضر به عنوان خوانده شده. هیچ قفل طولانی وجود دارد. درج در راه عملیات خواندن نیست. + +خواندن از یک جدول به طور خودکار موازی. + +## ستون ها و جداول {#table_engine-mergetree-ttl} + +تعیین طول عمر ارزش. + +این `TTL` بند را می توان برای کل جدول و برای هر ستون فردی تنظیم شده است. همچنین منطق حرکت خودکار داده ها بین دیسک ها و حجم ها را مشخص می کند. + +عبارات باید به ارزیابی [تاریخ](../../../sql_reference/data_types/date.md) یا [DateTime](../../../sql_reference/data_types/datetime.md) نوع داده. + +مثال: + +``` sql +TTL time_column +TTL time_column + interval +``` + +برای تعریف `interval` استفاده [فاصله زمانی](../../../sql_reference/operators.md#operators-datetime) اپراتورها. + +``` sql +TTL date_time + INTERVAL 1 MONTH +TTL date_time + INTERVAL 15 HOUR +``` + +### ستون {#mergetree-column-ttl} + +هنگامی که مقادیر در ستون منقضی, خانه را جایگزین با مقادیر پیش فرض برای نوع داده ستون. اگر تمام مقادیر ستون در بخش داده منقضی, تاتر حذف این ستون از بخش داده ها در یک سیستم فایل. + +این `TTL` بند را نمی توان برای ستون های کلیدی استفاده کرد. + +مثالها: + +ایجاد یک جدول با تی ال + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int TTL d + INTERVAL 1 MONTH, + b Int TTL d + INTERVAL 1 MONTH, + c String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d; +``` + +اضافه کردن تی ال به یک ستون از یک جدول موجود + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 DAY; +``` + +تغییر تعداد ستون + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 MONTH; +``` + +### جدول {#mergetree-table-ttl} + +جدول می تواند بیان برای حذف ردیف منقضی شده و عبارات متعدد برای حرکت خودکار قطعات بین [دیسک یا حجم](#table_engine-mergetree-multiple-volumes). هنگامی که ردیف در جدول منقضی, تاتر حذف تمام ردیف مربوطه. برای قطعات در حال حرکت از ویژگی های, تمام ردیف از یک بخش باید معیارهای بیان جنبش را تامین کند. + +``` sql +TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... +``` + +نوع قانون کنترل هوشمند ممکن است هر عبارت را دنبال کند. این تاثیر می گذارد یک عمل است که باید انجام شود یک بار بیان راضی است (زمان فعلی می رسد): + +- `DELETE` - حذف ردیف منقضی شده (اقدام پیش فرض); +- `TO DISK 'aaa'` - انتقال بخشی به دیسک `aaa`; +- `TO VOLUME 'bbb'` - انتقال بخشی به دیسک `bbb`. + +مثالها: + +ایجاد یک جدول با تی ال + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH [DELETE], + d + INTERVAL 1 WEEK TO VOLUME 'aaa', + d + INTERVAL 2 WEEK TO DISK 'bbb'; +``` + +تغییر تعداد جدول + +``` sql +ALTER TABLE example_table + MODIFY TTL d + INTERVAL 1 DAY; +``` + +**حذف داده ها** + +داده ها با یک حذف شده است زمانی که محل انتخابی ادغام قطعات داده. + +هنگامی که کلیک خانه را ببینید که داده تمام شده است, انجام یک ادغام خارج از برنامه. برای کنترل فرکانس چنین ادغام, شما می توانید مجموعه [ادغام \_بههنگامسازی](#mergetree_setting-merge_with_ttl_timeout). اگر مقدار خیلی کم است, این بسیاری از ادغام خارج از برنامه است که ممکن است مقدار زیادی از منابع مصرف انجام. + +اگر شما انجام `SELECT` پرس و جو بین ادغام, شما ممکن است داده های منقضی شده. برای جلوگیری از استفاده از [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) پرسوجو قبل از `SELECT`. + +## با استفاده از دستگاه های بلوک های متعدد برای ذخیره سازی داده ها {#table_engine-mergetree-multiple-volumes} + +### معرفی شرکت {#introduction} + +`MergeTree` موتورهای جدول خانواده می تواند داده ها در دستگاه های بلوک های متعدد ذخیره کنید. مثلا, این می تواند مفید باشد زمانی که داده ها از یک جدول خاص به طور ضمنی به تقسیم “hot” و “cold”. داده های اخیر به طور منظم درخواست شده است اما نیاز به تنها مقدار کمی از فضای. برعکس, داده های تاریخی چربی دم به ندرت درخواست. اگر چندین دیسک در دسترس هستند “hot” داده ها ممکن است بر روی دیسک های سریع واقع (مثلا, اس اس اس اس بلوم و یا در حافظه), در حالی که “cold” داده ها بر روی موارد نسبتا کند (مثلا هارد). + +بخش داده ها حداقل واحد متحرک برای `MergeTree`- جدول موتور . داده های متعلق به یک بخش بر روی یک دیسک ذخیره می شود. قطعات داده را می توان بین دیسک در پس زمینه (با توجه به تنظیمات کاربر) و همچنین با استفاده از نقل مکان کرد [ALTER](../../../sql_reference/statements/alter.md#alter_move-partition) نمایش داده شد. + +### شرایط {#terms} + +- Disk — Block device mounted to the filesystem. +- Default disk — Disk that stores the path specified in the [مسیر](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) تنظیم سرور. +- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). +- Storage policy — Set of volumes and the rules for moving data between them. + +اسامی داده شده به اشخاص توصیف شده را می توان در جداول سیستم یافت می شود, [سیستم.داستان\_یابی](../../../operations/system_tables.md#system_tables-storage_policies) و [سیستم.دیسکها](../../../operations/system_tables.md#system_tables-disks). برای اعمال یکی از سیاست های ذخیره سازی پیکربندی شده برای یک جدول از `storage_policy` تنظیم از `MergeTree`- جداول خانواده موتور . + +### پیکربندی {#table_engine-mergetree-multiple-volumes-configure} + +دیسک, حجم و سیاست های ذخیره سازی باید در داخل اعلام `` برچسب یا در فایل اصلی `config.xml` یا در یک فایل مجزا در `config.d` فهرست راهنما. + +ساختار پیکربندی: + +``` xml + + + + /mnt/fast_ssd/clickhouse + + + /mnt/hdd1/clickhouse + 10485760 + + + /mnt/hdd2/clickhouse + 10485760 + + + ... + + + ... + +``` + +برچسبها: + +- `` — Disk name. Names must be different for all disks. +- `path` — path under which a server will store data (`data` و `shadow` پوشه ها) باید با پایان ‘/’. +- `keep_free_space_bytes` — the amount of free disk space to be reserved. + +منظور از تعریف دیسک مهم نیست. + +نشانه گذاری پیکربندی سیاست های ذخیره سازی: + +``` xml + + ... + + + + + disk_name_from_disks_configuration + 1073741824 + + + + + + + 0.2 + + + + + + + + ... + +``` + +برچسبها: + +- `policy_name_N` — Policy name. Policy names must be unique. +- `volume_name_N` — Volume name. Volume names must be unique. +- `disk` — a disk within a volume. +- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume's disks. +- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). + +Cofiguration نمونه: + +``` xml + + ... + + + + + disk1 + disk2 + + + + + + + + fast_ssd + 1073741824 + + + disk1 + + + 0.2 + + + ... + +``` + +در مثال داده شده `hdd_in_order` سیاست پیاده سازی [گرد رابین](https://en.wikipedia.org/wiki/Round-robin_scheduling) نزدیک شو بنابراین این سیاست تنها یک جلد را تعریف می کند (`single`), قطعات داده ها بر روی تمام دیسک های خود را به ترتیب دایره ای ذخیره می شود. چنین سیاستی می تواند بسیار مفید اگر چندین دیسک مشابه به سیستم نصب شده وجود دارد, اما حمله پیکربندی نشده است. به خاطر داشته باشید که هر درایو دیسک منحصر به فرد قابل اعتماد نیست و شما ممکن است بخواهید با عامل تکرار 3 یا بیشتر جبران. + +اگر انواع مختلف دیسک های موجود در سیستم وجود دارد, `moving_from_ssd_to_hdd` سیاست را می توان به جای استفاده. حجم `hot` شامل یک دیسک اس اس دی (`fast_ssd`), و حداکثر اندازه یک بخش است که می تواند در این حجم ذخیره شده است 1گیگابایت. تمام قطعات با اندازه بزرگتر از 1 گیگابایت به طور مستقیم در `cold` حجم, که شامل یک دیسک هارد `disk1`. +همچنین هنگامی که دیسک `fast_ssd` می شود توسط بیش از پر 80%, داده خواهد شد به انتقال `disk1` توسط یک فرایند پس زمینه. + +منظور شمارش حجم در یک سیاست ذخیره سازی مهم است. هنگامی که یک حجم پر شده است, داده ها به یک بعدی منتقل. ترتیب شمارش دیسک نیز مهم است زیرا داده ها در نوبت ذخیره می شوند. + +هنگام ایجاد یک جدول می توان یکی از سیاست های ذخیره سازی پیکربندی شده را اعمال کرد: + +``` sql +CREATE TABLE table_with_non_default_policy ( + EventDate Date, + OrderID UInt64, + BannerID UInt64, + SearchPhrase String +) ENGINE = MergeTree +ORDER BY (OrderID, BannerID) +PARTITION BY toYYYYMM(EventDate) +SETTINGS storage_policy = 'moving_from_ssd_to_hdd' +``` + +این `default` سیاست ذخیره سازی نشان میدهد تنها با استفاده از یک حجم, که متشکل از تنها یک دیسک داده شده در ``. هنگامی که یک جدول ایجاد شده است, سیاست ذخیره سازی خود را نمی توان تغییر داد. + +### اطلاعات دقیق {#details} + +در مورد `MergeTree` جداول داده ها به دیسک در راه های مختلف گرفتن است: + +- به عنوان یک نتیجه از درج (`INSERT` پرسوجو). +- در طول پس زمینه ادغام و [جهشها](../../../sql_reference/statements/alter.md#alter-mutations). +- هنگام دانلود از ماکت دیگر. +- به عنوان یک نتیجه از انجماد پارتیشن [ALTER TABLE … FREEZE PARTITION](../../../sql_reference/statements/alter.md#alter_freeze-partition). + +در تمام این موارد به جز جهش و پارتیشن انجماد بخش ذخیره شده در حجم و دیسک با توجه به سیاست ذخیره سازی داده شده است: + +1. جلد اول (به ترتیب تعریف) که فضای دیسک به اندازه کافی برای ذخیره سازی یک بخش (`unreserved_space > current_part_size`) و اجازه می دهد تا برای ذخیره سازی بخش هایی از اندازه داده شده (`max_data_part_size_bytes > current_part_size`) انتخاب شده است . +2. در این حجم, که دیسک انتخاب شده است که به دنبال یکی, که برای ذخیره سازی تکه های قبلی از داده مورد استفاده قرار گرفت, و دارای فضای رایگان بیش از اندازه بخش (`unreserved_space - keep_free_space_bytes > current_part_size`). + +تحت هود جهش و پارتیشن انجماد استفاده از [لینک های سخت](https://en.wikipedia.org/wiki/Hard_link). لینک های سخت بین دیسک های مختلف پشتیبانی نمی شوند بنابراین در چنین مواردی قطعات حاصل شده بر روی دیسک های مشابه به عنوان اولیه ذخیره می شوند. + +در پس زمینه, قطعات بین حجم بر اساس مقدار فضای رایگان نقل مکان کرد (`move_factor` پارامتر) با توجه به سفارش حجم در فایل پیکربندی اعلام کرد. +داده ها هرگز از گذشته و به یکی از اولین منتقل شده است. ممکن است از جداول سیستم استفاده کنید [سیستم.\_خروج](../../../operations/system_tables.md#system_tables-part-log) (زمینه `type = MOVE_PART`) و [سیستم.قطعات](../../../operations/system_tables.md#system_tables-parts) (فیلدها `path` و `disk`) برای نظارت بر حرکت پس زمینه . همچنین, اطلاعات دقیق را می توان در سیاهههای مربوط به سرور پیدا شده است. + +کاربر می تواند نیروی حرکت بخشی یا پارتیشن از یک حجم به دیگری با استفاده از پرس و جو [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql_reference/statements/alter.md#alter_move-partition), تمام محدودیت برای عملیات پس زمینه در نظر گرفته شود. پرس و جو شروع یک حرکت به خودی خود و منتظر نیست برای عملیات پس زمینه به پایان خواهد رسید. کاربر یک پیام خطا اگر فضای رایگان به اندازه کافی در دسترس است و یا اگر هر یک از شرایط مورد نیاز را ملاقات کرد. + +داده های متحرک با تکرار داده ها دخالت نمی کنند. از این رو, سیاست های ذخیره سازی مختلف را می توان برای همان جدول در کپی های مختلف مشخص. + +پس از اتمام ادغام پس زمینه و جهش, قطعات قدیمی تنها پس از یک مقدار مشخصی از زمان حذف (`old_parts_lifetime`). +در طول این زمان به حجم یا دیسک های دیگر منتقل نمی شوند. از این رو, تا زمانی که قطعات در نهایت حذف, هنوز هم به حساب برای ارزیابی فضای دیسک اشغال گرفته. + +[مقاله اصلی](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/fa/engines/table_engines/mergetree_family/replacingmergetree.md new file mode 100644 index 00000000000..a2008e32229 --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/replacingmergetree.md @@ -0,0 +1,69 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\u062C\u0627\u06CC\u06AF\u0632\u06CC\u0646\u06CC" +--- + +# جایگزینی {#replacingmergetree} + +موتور متفاوت از [ادغام](mergetree.md#table_engines-mergetree) در که حذف نوشته های تکراری با همان مقدار اصلی کلید (یا دقیق تر, با همان [کلید مرتب سازی](mergetree.md) ارزش). + +تقسیم داده ها تنها در یک ادغام رخ می دهد. ادغام در پس زمینه در زمان ناشناخته رخ می دهد بنابراین شما نمی توانید برنامه ریزی کنید. برخی از داده ها ممکن است بدون پردازش باقی می ماند. اگر چه شما می توانید ادغام برنامه ریزی با استفاده از اجرا `OPTIMIZE` پرس و جو, در استفاده از این حساب نمی, به این دلیل که `OPTIMIZE` پرس و جو خواندن و نوشتن مقدار زیادی از داده ها. + +بدین ترتیب, `ReplacingMergeTree` مناسب برای پاک کردن داده های تکراری در پس زمینه برای صرفه جویی در فضا است اما عدم وجود تکراری را تضمین نمی کند. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = ReplacingMergeTree([ver]) +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترهای درخواست را ببینید [درخواست توضیحات](../../../sql_reference/statements/create.md). + +**پارامترهای جایگزین** + +- `ver` — column with version. Type `UInt*`, `Date` یا `DateTime`. پارامتر اختیاری. + + هنگام ادغام, `ReplacingMergeTree` از تمام ردیف ها با همان کلید اصلی تنها یک برگ دارد: + + - گذشته در انتخاب, اگر `ver` تنظیم نشده است. + - با حداکثر نسخه, اگر `ver` مشخص. + +**بندهای پرسوجو** + +هنگام ایجاد یک `ReplacingMergeTree` جدول همان [بند](mergetree.md) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + هنوز این روش در پروژه های جدید استفاده کنید و, در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) +``` + +همه پارامترها به استثنای `ver` همان معنی را در `MergeTree`. + +- `ver` - ستون با نسخه . پارامتر اختیاری. برای شرح, متن بالا را ببینید. + +
    + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/replication.md b/docs/fa/engines/table_engines/mergetree_family/replication.md new file mode 100644 index 00000000000..c03e853e29f --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/replication.md @@ -0,0 +1,218 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u062A\u06A9\u0631\u0627\u0631 \u062F\u0627\u062F\u0647 \u0647\u0627" +--- + +# تکرار داده ها {#table_engines-replication} + +تکرار تنها برای جداول در خانواده ادغام پشتیبانی می شود: + +- تکرار غذای اصلی +- تکرار می شود +- جایگزینی تکراری +- تکرار پلاکتی مگرمرگتری +- تکرار تغییرات +- تکرار مجدد محصول +- تکرار کننده + +تکرار کار می کند در سطح یک جدول فردی, نه کل سرور. سرور می تواند هر دو جدول تکرار و غیر تکرار در همان زمان ذخیره کنید. + +تکرار بستگی ندارد sharding. هر سفال تکرار مستقل خود را دارد. + +داده های فشرده برای `INSERT` و `ALTER` نمایش داده شد تکرار شده است (برای اطلاعات بیشتر, اسناد و مدارک برای دیدن [ALTER](../../../sql_reference/statements/alter.md#query_language_queries_alter)). + +`CREATE`, `DROP`, `ATTACH`, `DETACH` و `RENAME` نمایش داده شد بر روی یک سرور اجرا و تکرار نیست: + +- این `CREATE TABLE` پرس و جو ایجاد یک جدول تکرار جدید بر روی سرور که پرس و جو اجرا شده است. اگر این جدول در حال حاضر بر روی سرور های دیگر وجود دارد, اضافه می کند یک ماکت جدید. +- این `DROP TABLE` پرس و جو حذف ماکت واقع در سرور که پرس و جو اجرا شده است. +- این `RENAME` پرس و جو تغییر نام جدول در یکی از کپی. به عبارت دیگر, جداول تکرار می توانید نام های مختلف در کپی های مختلف دارند. + +استفاده از کلیک [سرویس پرداخت درونبرنامهای پلی](https://zookeeper.apache.org) برای ذخیره سازی اطلاعات متا کپی. استفاده از باغ وحش نسخه 3.4.5 یا جدیدتر. + +برای استفاده از تکرار, پارامترهای مجموعه ای در [باغ وحش](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) بخش پیکربندی سرور. + +!!! attention "توجه" + هنوز تنظیمات امنیتی غفلت نیست. تاتر از `digest` [طرح اکل](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) از زیر سیستم امنیتی باغ وحش. + +به عنوان مثال از تنظیم نشانی های خوشه باغ وحش: + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + + example3 + 2181 + + +``` + +شما می توانید هر خوشه باغ وحش موجود را مشخص کنید و سیستم یک دایرکتوری را برای داده های خود استفاده می کند (دایرکتوری هنگام ایجاد یک جدول تکرار شده مشخص می شود). + +اگر باغ وحش در فایل پیکربندی تنظیم نشده, شما می توانید جداول تکرار ایجاد کنید, و هر جداول تکرار موجود خواهد شد فقط به عنوان خوانده شده. + +باغ وحش در استفاده نمی شود `SELECT` نمایش داده شد به دلیل تکرار می کند عملکرد تاثیر نمی گذارد `SELECT` و نمایش داده شد اجرا فقط به همان سرعتی که برای جداول غیر تکرار انجام می دهند. هنگامی که پرس و جو جداول تکرار توزیع, رفتار کلیک است که توسط تنظیمات کنترل [\_شروع مجدد \_شروع مجدد \_شروع مجدد \_کاربری](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) و [شناسه بسته:](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). + +برای هر `INSERT` پرس و جو, حدود ده ورودی از طریق معاملات چند به باغ وحش دار اضافه. (به عبارت دقیق تر, این است که برای هر بلوک قرار داده شده از داده; پرس و جو درج شامل یک بلوک و یا یک بلوک در هر `max_insert_block_size = 1048576` ردیف) این منجر به زمان شروع کمی طولانی تر برای `INSERT` در مقایسه با جداول غیر تکرار. اما اگر شما به دنبال توصیه برای وارد کردن داده ها در دسته بیش از یک `INSERT` در هر ثانیه هیچ مشکلی ایجاد نمی کند. کل خوشه محل کلیک مورد استفاده برای هماهنگی یک خوشه باغ وحش در مجموع چند صد است `INSERTs` در هر ثانیه. توان در درج داده (تعداد ردیف در ثانیه) فقط به عنوان بالا به عنوان داده های غیر تکرار شده است. + +برای خوشه های بسیار بزرگ, شما می توانید خوشه باغ وحش های مختلف برای خرده ریز های مختلف استفاده کنید. با این حال, این لازم در یاندکس ثابت نشده.متریکا خوشه (تقریبا 300 سرور). + +تکرار ناهمزمان و چند استاد است. `INSERT` نمایش داده شد (و همچنین `ALTER`) را می توان به هر سرور در دسترس ارسال می شود. داده ها بر روی سرور قرار می گیرند که پرس و جو اجرا می شود و سپس به سرورهای دیگر کپی می شود. زیرا ناهمگام است, داده به تازگی قرار داده شده در کپی دیگر با برخی از تاخیر به نظر می رسد. اگر بخشی از کپی در دسترس نیست, داده ها نوشته شده است که در دسترس تبدیل. اگر یک ماکت در دسترس است, تاخیر مقدار زمان لازم برای انتقال بلوک از داده های فشرده بر روی شبکه است. + +به طور پیش فرض, پرس و جو درج منتظر تایید نوشتن داده ها از تنها یک ماکت. اگر داده ها با موفقیت به تنها یک ماکت نوشته شده بود و سرور با این ماکت متوقف به وجود, داده های ذخیره شده از دست خواهد رفت. برای فعال کردن گرفتن تایید داده ها می نویسد: از کپی های متعدد با استفاده از `insert_quorum` انتخاب + +هر بلوک از داده ها به صورت اتمی نوشته شده است. پرس و جو درج شده است را به بلوک تا تقسیم `max_insert_block_size = 1048576` ردیف به عبارت دیگر اگر `INSERT` پرس و جو کمتر از 1048576 ردیف, این است که به صورت اتمی ساخته شده. + +بلوک های داده تقسیم می شوند. برای چند می نویسد از بلوک داده های مشابه (بلوک های داده از همان اندازه حاوی ردیف در همان جهت) بلوک تنها یک بار نوشته شده است. دلیل این کار این است که در صورت شکست شبکه زمانی که نرم افزار سرویس گیرنده نمی داند که اگر داده ها به دسی بل نوشته شده بود, بنابراین `INSERT` پرس و جو به سادگی می تواند تکرار شود. مهم نیست که درج ماکت با داده های یکسان فرستاده شد. `INSERTs` ژولیده اند. پارامترهای تقسیم بندی توسط [ادغام](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) تنظیمات سرور. + +در طول تکرار, تنها داده های منبع برای وارد کردن بر روی شبکه منتقل. تحول داده های بیشتر (ادغام) هماهنگ و در تمام کپی در همان راه انجام. این به حداقل می رساند استفاده از شبکه, به این معنی که تکرار خوبی کار می کند زمانی که کپی در مراکز داده های مختلف اقامت. (توجه داشته باشید که تکثیر داده ها در مراکز داده های مختلف هدف اصلی از تکرار است.) + +شما می توانید هر تعداد از کپی از داده های مشابه داشته باشد. یاندکسمتریکا از تکرار دوگانه در تولید استفاده می کند. هر سرور با استفاده از حمله-5 و یا حمله-6, و حمله-10 در برخی موارد. این یک راه حل نسبتا قابل اعتماد و راحت است. + +سیستم نظارت بر هماهنگ سازی داده ها در کپی و قادر به بازیابی پس از شکست است. عدم موفقیت خودکار است (برای تفاوت های کوچک در داده ها) و یا نیمه اتوماتیک (زمانی که داده ها متفاوت بیش از حد, که ممکن است یک خطای پیکربندی نشان می دهد). + +## ایجاد جداول تکرار شده {#creating-replicated-tables} + +این `Replicated` پیشوند به نام موتور جدول اضافه شده است. به عنوان مثال:`ReplicatedMergeTree`. + +**تکرار \* پارامترهای ادغام** + +- `zoo_path` — The path to the table in ZooKeeper. +- `replica_name` — The replica name in ZooKeeper. + +مثال: + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +``` + +
    + +به عنوان مثال در نحو توصیه + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) +``` + +
    + +به عنوان مثال نشان می دهد, این پارامترها می تواند تعویض در براکت فرفری شامل. مقادیر جایگزین از گرفته ‘macros’ بخش از فایل پیکربندی. مثال: + +``` xml + + 05 + 02 + example05-02-1.yandex.ru + +``` + +مسیر به جدول در باغ وحش باید منحصر به فرد برای هر جدول تکرار شود. جداول در خرده ریز های مختلف باید مسیرهای مختلف داشته باشد. +در این مورد مسیر شامل قسمت های زیر است: + +`/clickhouse/tables/` پیشوند رایج است. ما توصیه می کنیم با استفاده از دقیقا این یکی. + +`{layer}-{shard}` شناسه سفال است. در این مثال شامل دو بخش از یاندکس.متریکا خوشه با استفاده از دو سطح شاردینگ. برای بسیاری از وظایف, شما می توانید فقط جایگزینی {سفال} ترک, خواهد شد که به شناسه سفال گسترش. + +`table_name` نام گره برای جدول در باغ وحش است. این یک ایده خوب را به همان نام جدول است. این است که به صراحت تعریف, چرا که در مقابل به نام جدول, این کار پس از یک پرس و جو تغییر نام نمی. +*HINT*: شما می توانید یک نام پایگاه داده در مقابل اضافه کنید `table_name` همینطور E. g. `db_name.table_name` + +نام ماکت شناسایی کپی های مختلف از همان جدول. شما می توانید نام سرور برای این استفاده, همانطور که در مثال. نام تنها نیاز به منحصر به فرد در هر سفال. + +شما می توانید پارامترهای صراحت به جای استفاده از تعویض تعریف کنیم. این ممکن است مناسب برای تست و برای پیکربندی خوشه های کوچک. با این حال, شما می توانید نمایش داده شد توزیع دی ال استفاده کنید (`ON CLUSTER`) در این مورد. + +هنگام کار با خوشه های بزرگ, توصیه می کنیم با استفاده از تعویض زیرا احتمال خطا را کاهش می دهد. + +اجرای `CREATE TABLE` پرس و جو در هر ماکت. این پرس و جو ایجاد یک جدول تکرار جدید, و یا می افزاید: یک ماکت جدید به یک موجود. + +اگر شما اضافه کردن یک ماکت جدید پس از جدول در حال حاضر شامل برخی از داده ها در کپی های دیگر کپی داده ها از کپی های دیگر کپی به یکی از جدید پس از اجرای پرس و جو. به عبارت دیگر, ماکت جدید خود را با دیگران همگام سازی. + +برای حذف یک ماکت, اجرا `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. + +## بازیابی پس از شکست {#recovery-after-failures} + +اگر باغ وحش در دسترس نیست که یک سرور شروع می شود, جداول تکرار تبدیل به حالت فقط خواندنی. این سیستم به صورت دوره ای تلاش برای اتصال به باغ وحش. + +اگر باغ وحش در طول یک در دسترس نیست `INSERT`, یا یک خطا رخ می دهد در هنگام تعامل با باغ وحش, یک استثنا پرتاب می شود. + +پس از اتصال به باغ وحش, سیستم چک چه مجموعه ای از داده ها در سیستم فایل محلی منطبق بر مجموعه مورد انتظار از داده ها (باغ وحش ذخیره این اطلاعات). اگر تناقضات کوچک وجود دارد, سیستم با همگام سازی داده ها با کپی حل. + +اگر سیستم تشخیص داده های شکسته قطعات (با اندازه اشتباه از فایل ها) و یا قطعات ناشناخته (قطعات نوشته شده به فایل سیستم اما ثبت نشده در باغ وحش) این حرکت را به `detached` دایرکتوری فرعی(حذف نمی شوند). هر بخش از دست رفته از کپی کپی کپی کپی. + +توجه داشته باشید که تاتر هیچ اقدامات مخرب مانند به طور خودکار حذف مقدار زیادی از داده ها را انجام نمی دهد. + +هنگامی که سرور شروع می شود (و یا ایجاد یک جلسه جدید با باغ وحش), این تنها چک مقدار و اندازه تمام فایل های. اگر اندازه فایل مطابقت اما بایت در جایی در وسط تغییر یافته است, این بلافاصله شناسایی نشده, اما تنها زمانی که تلاش برای خواندن داده ها برای یک `SELECT` پرس و جو. پرس و جو می اندازد یک استثنا در مورد کنترلی غیر تطبیق و یا اندازه یک بلوک فشرده. در این مورد, قطعات داده ها به صف تایید اضافه شده و کپی از کپی در صورت لزوم. + +اگر مجموعه ای محلی از داده های متفاوت بیش از حد از یک انتظار, یک مکانیزم ایمنی باعث شده است. سرور وارد این در ورود به سیستم و حاضر به راه اندازی. دلیل این کار این است که این مورد ممکن است یک خطای پیکربندی نشان می دهد, مانند اگر یک ماکت در سفال به طور تصادفی مانند یک ماکت در سفال های مختلف پیکربندی شده بود. با این حال, مانع برای این مکانیزم نسبتا کم, و این وضعیت ممکن است در طول بهبود شکست طبیعی رخ می دهد. در این مورد داده ها به صورت نیمه اتوماتیک بازسازی می شوند “pushing a button”. + +برای شروع بازیابی گره ایجاد کنید `/path_to_table/replica_name/flags/force_restore_data` در باغ وحش با هر یک از مطالب, و یا اجرای دستور برای بازگرداندن تمام جداول تکرار: + +``` bash +sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data +``` + +سپس سرور راه اندازی مجدد. در ابتدا سرور این پرچم ها را حذف می کند و شروع به بازیابی می کند. + +## بازیابی پس از از دست دادن اطلاعات کامل {#recovery-after-complete-data-loss} + +اگر تمام داده ها و ابرداده از یکی از سرورها ناپدید شد, این مراحل را برای بازیابی دنبال: + +1. نصب کلیک بر روی سرور. تعریف تعویض به درستی در فایل پیکربندی که شامل شناسه سفال و کپی, در صورت استفاده از. +2. اگر شما تا به حال جداول سه برابر است که باید به صورت دستی بر روی سرور تکرار, کپی اطلاعات خود را از یک ماکت (در دایرکتوری `/var/lib/clickhouse/data/db_name/table_name/`). +3. تعاریف جدول کپی واقع در `/var/lib/clickhouse/metadata/` از یک ماکت. اگر یک شناسه سفال یا ماکت به صراحت در تعاریف جدول تعریف, اصلاح به طوری که به این ماکت مربوط. (متناوبا, شروع سرور و تمام `ATTACH TABLE` نمایش داده شد که باید در شده .در حال بارگذاری `/var/lib/clickhouse/metadata/`.) +4. برای شروع بازیابی, ایجاد گره باغ وحش `/path_to_table/replica_name/flags/force_restore_data` با هر محتوا, و یا اجرای دستور برای بازگرداندن تمام جداول تکرار: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` + +سپس سرور شروع (راه اندازی مجدد, اگر در حال حاضر در حال اجرا). داده خواهد شد از کپی دانلود. + +گزینه بازیابی جایگزین این است که حذف اطلاعات در مورد ماکت از دست رفته از باغ وحش (`/path_to_table/replica_name`), سپس ایجاد ماکت دوباره به عنوان شرح داده شده در “[ایجاد جداول تکرار شده](#creating-replicated-tables)”. + +در طول بازیابی هیچ محدودیتی در پهنای باند شبکه وجود ندارد. این را در ذهن اگر شما در حال بازگرداندن بسیاری از کپی در یک بار. + +## تبدیل از ادغام به تکرار غذای اصلی {#converting-from-mergetree-to-replicatedmergetree} + +ما از اصطلاح استفاده می کنیم `MergeTree` برای اشاره به تمام موتورهای جدول در `MergeTree family`, همان است که برای `ReplicatedMergeTree`. + +اگر شما تا به حال `MergeTree` جدول که به صورت دستی تکرار شد, شما می توانید به یک جدول تکرار تبدیل. شما ممکن است نیاز به انجام این کار اگر شما در حال حاضر مقدار زیادی از داده ها در یک `MergeTree` جدول و در حال حاضر شما می خواهید برای فعال کردن تکرار. + +اگر داده ها در کپی های مختلف متفاوت, برای اولین بار همگام سازی, و یا حذف این داده ها در تمام کپی به جز یکی. + +تغییر نام جدول ادغام موجود, سپس ایجاد یک `ReplicatedMergeTree` جدول با نام های قدیمی. +انتقال داده ها از جدول قدیمی به `detached` دایرکتوری فرعی در داخل دایرکتوری با داده های جدول جدید (`/var/lib/clickhouse/data/db_name/table_name/`). +سپس اجرا کنید `ALTER TABLE ATTACH PARTITION` در یکی از کپی برای اضافه کردن این قطعات داده به مجموعه کار. + +## تبدیل از تکراری به ادغام {#converting-from-replicatedmergetree-to-mergetree} + +ایجاد یک جدول ادغام با نام های مختلف. انتقال تمام داده ها از دایرکتوری با `ReplicatedMergeTree` داده های جدول به دایرکتوری داده جدول جدید. سپس حذف `ReplicatedMergeTree` جدول و راه اندازی مجدد سرور. + +اگر شما می خواهید برای خلاص شدن از شر `ReplicatedMergeTree` جدول بدون راه اندازی سرور: + +- حذف متناظر `.sql` پرونده در فهرست راهنمای فراداده (`/var/lib/clickhouse/metadata/`). +- حذف مسیر مربوطه در باغ وحش (`/path_to_table/replica_name`). + +بعد از این, شما می توانید سرور راه اندازی, ایجاد یک `MergeTree` جدول, انتقال داده ها به دایرکتوری خود, و سپس راه اندازی مجدد سرور. + +## بازیابی هنگامی که ابرداده در خوشه باغ وحش از دست داده و یا صدمه دیده است {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} + +اگر داده های موجود در باغ وحش از دست رفته یا صدمه دیده بود می توانید داده ها را با حرکت دادن به یک جدول بدون علامت همانطور که در بالا توضیح داده شد ذخیره کنید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/fa/engines/table_engines/mergetree_family/summingmergetree.md b/docs/fa/engines/table_engines/mergetree_family/summingmergetree.md new file mode 100644 index 00000000000..b2d6169a44e --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/summingmergetree.md @@ -0,0 +1,141 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: "\u0633\u0627\u0645\u06CC\u0646\u06AF\u0645\u0631\u06AF\u062A\u0631\u06CC" +--- + +# سامینگمرگتری {#summingmergetree} + +موتور به ارث می برد از [ادغام](mergetree.md#table_engines-mergetree). تفاوت در این است که هنگامی که ادغام قطعات داده برای `SummingMergeTree` جداول تاتر جایگزین تمام ردیف با کلید اصلی همان (یا با دقت بیشتر ,با همان [کلید مرتب سازی](mergetree.md)) با یک ردیف که حاوی مقادیر خلاصه شده برای ستون ها با نوع داده عددی است. اگر کلید مرتب سازی در راه است که یک مقدار کلید تنها مربوط به تعداد زیادی از ردیف تشکیل شده, این به طور قابل توجهی کاهش می دهد حجم ذخیره سازی و سرعت بخشیدن به انتخاب داده ها. + +ما توصیه می کنیم به استفاده از موتور همراه با `MergeTree`. ذخیره اطلاعات کامل در `MergeTree` جدول و استفاده `SummingMergeTree` برای ذخیره سازی داده ها جمع, مثلا, هنگام تهیه گزارش. چنین رویکردی شما را از دست دادن اطلاعات با ارزش با توجه به کلید اولیه نادرست تشکیل شده جلوگیری می کند. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = SummingMergeTree([columns]) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترهای درخواست را ببینید [درخواست توضیحات](../../../sql_reference/statements/create.md). + +**پارامترهای سامینگمرگتری** + +- `columns` - یک تاپل با نام ستون که ارزش خلاصه خواهد شد. پارامتر اختیاری. + ستون باید از یک نوع عددی باشد و نباید در کلید اصلی باشد. + + اگر `columns` مشخص نشده, تاتر خلاصه مقادیر در تمام ستون ها با یک نوع داده عددی است که در کلید اصلی نیست. + +**بندهای پرسوجو** + +هنگام ایجاد یک `SummingMergeTree` جدول همان [بند](mergetree.md) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + هنوز این روش در پروژه های جدید استفاده کنید و, در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) +``` + +همه پارامترها به استثنای `columns` همان معنی را در `MergeTree`. + +- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. + +
    + +## مثال طریقه استفاده {#usage-example} + +جدول زیر را در نظر بگیرید: + +``` sql +CREATE TABLE summtt +( + key UInt32, + value UInt32 +) +ENGINE = SummingMergeTree() +ORDER BY key +``` + +درج داده به این: + +``` sql +INSERT INTO summtt Values(1,1),(1,2),(2,1) +``` + +تاتر ممکن است تمام ردیف نه به طور کامل جمع ([پایین را ببینید](#data-processing)), بنابراین ما با استفاده از یک تابع کلی `sum` و `GROUP BY` بند در پرس و جو. + +``` sql +SELECT key, sum(value) FROM summtt GROUP BY key +``` + +``` text +┌─key─┬─sum(value)─┐ +│ 2 │ 1 │ +│ 1 │ 3 │ +└─────┴────────────┘ +``` + +## پردازش داده ها {#data-processing} + +هنگامی که داده ها را به یک جدول قرار داده, ذخیره می شوند به عنوان است. خانه رعیتی ادغام بخش قرار داده شده از داده ها به صورت دوره ای و این زمانی است که ردیف با کلید اصلی همان خلاصه و جایگزین با یکی برای هر بخش حاصل از داده ها. + +ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) یک تابع جمع [جمع()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) و `GROUP BY` بند باید در پرس و جو به عنوان مثال در بالا توضیح داده شده استفاده می شود. + +### قوانین مشترک برای جمع {#common-rules-for-summation} + +مقادیر در ستون با نوع داده عددی خلاصه شده است. مجموعه ای از ستون ها توسط پارامتر تعریف شده است `columns`. + +اگر ارزش شد 0 در تمام ستون ها برای جمع, ردیف حذف شده است. + +اگر ستون در کلید اصلی نیست و خلاصه نشده است, یک مقدار دلخواه از موجود انتخاب. + +مقادیر برای ستون در کلید اصلی خلاصه نشده است. + +### جمعبندی ستونها {#the-summation-in-the-aggregatefunction-columns} + +برای ستون [نوع تابع](../../../sql_reference/data_types/aggregatefunction.md) عمل کلیک به عنوان [ریزدانه](aggregatingmergetree.md) جمع موتور با توجه به عملکرد. + +### ساختارهای تو در تو {#nested-structures} + +جدول می تواند ساختارهای داده تو در تو که در یک راه خاص پردازش کرده اند. + +اگر نام یک جدول تو در تو با به پایان می رسد `Map` و این شامل حداقل دو ستون است که با معیارهای زیر مطابقت دارند: + +- ستون اول عددی است `(*Int*, Date, DateTime)` یا یک رشته `(String, FixedString)` بهش زنگ بزن `key`, +- ستون های دیگر حساب `(*Int*, Float32/64)` بهش زنگ بزن `(values...)`, + +سپس این جدول تو در تو به عنوان یک نقشه برداری از تفسیر `key => (values...)`, و هنگامی که ادغام ردیف خود, عناصر دو مجموعه داده ها با هم ادغام شدند `key` با جمع بندی مربوطه `(values...)`. + +مثالها: + +``` text +[(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] +[(1, 100)] + [(1, 150)] -> [(1, 250)] +[(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] +[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] +``` + +هنگام درخواست داده ها از [sumMap(key, value)](../../../sql_reference/aggregate_functions/reference.md) تابع برای تجمع `Map`. + +برای ساختار داده های تو در تو, شما لازم نیست که برای مشخص ستون خود را در تاپل ستون برای جمع. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/fa/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/fa/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..4a269e8713d --- /dev/null +++ b/docs/fa/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -0,0 +1,239 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u062F\u0631 \u062D\u0627\u0644 \u0628\u0627\u0631\u06AF\u0630\u0627\u0631\ + \u06CC" +--- + +# در حال بارگذاری {#versionedcollapsingmergetree} + +این موتور: + +- اجازه می دهد تا نوشتن سریع از کشورهای شی که به طور مستمر در حال تغییر. +- حذف کشورهای شی قدیمی در پس زمینه. این به طور قابل توجهی حجم ذخیره سازی را کاهش می دهد. + +بخش را ببینید [سقوط](#table_engines_versionedcollapsingmergetree) برای اطلاعات بیشتر. + +موتور به ارث می برد از [ادغام](mergetree.md#table_engines-mergetree) و می افزاید: منطق برای سقوط ردیف به الگوریتم برای ادغام قطعات داده. `VersionedCollapsingMergeTree` در خدمت همان هدف به عنوان [سقوط غذای اصلی](collapsingmergetree.md) اما با استفاده از یک الگوریتم سقوط های مختلف است که اجازه می دهد تا قرار دادن داده ها در هر جهت با موضوعات متعدد. به خصوص `Version` ستون کمک می کند تا به سقوط ردیف درستی حتی در صورتی که در جهت اشتباه قرار داده شده. در مقابل, `CollapsingMergeTree` اجازه می دهد تا درج تنها به شدت متوالی. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +برای شرح پارامترهای پرس و جو, دیدن [توضیحات پرس و جو](../../../sql_reference/statements/create.md). + +**پارامترهای موتور** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` یک “state” سطر, `-1` یک “cancel” پارو زدن. + + نوع داده ستون باید باشد `Int8`. + +- `version` — Name of the column with the version of the object state. + + نوع داده ستون باید باشد `UInt*`. + +**بندهای پرسوجو** + +هنگام ایجاد یک `VersionedCollapsingMergeTree` جدول, همان [بند](mergetree.md) در هنگام ایجاد یک مورد نیاز است `MergeTree` جدول + +
    + +روش منسوخ برای ایجاد یک جدول + +!!! attention "توجه" + از این روش در پروژه های جدید استفاده نکنید. در صورت امکان, تغییر پروژه های قدیمی به روش بالا توضیح. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +همه پارامترها به جز `sign` و `version` همان معنی را در `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` یک “state” سطر, `-1` یک “cancel” پارو زدن. + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + نوع داده ستون باید باشد `UInt*`. + +
    + +## سقوط {#table_engines-versionedcollapsingmergetree} + +### داده {#data} + +در نظر بگیرید یک وضعیت که شما نیاز به ذخیره به طور مداوم در حال تغییر داده ها برای برخی از شی. این منطقی است که یک ردیف برای یک شی و به روز رسانی ردیف هر زمان که تغییرات وجود دارد. با این حال, عملیات به روز رسانی گران و کند برای یک سندرم تونل کارپ است چرا که نیاز به بازنویسی داده ها در ذخیره سازی. به روز رسانی قابل قبول نیست اگر شما نیاز به نوشتن داده ها به سرعت, اما شما می توانید تغییرات را به یک شی پی در پی به شرح زیر ارسال. + +استفاده از `Sign` ستون هنگام نوشتن ردیف. اگر `Sign = 1` این بدان معنی است که ردیف دولت از یک شی است (اجازه دهید این تماس “state” ردیف). اگر `Sign = -1` این نشان می دهد لغو دولت از یک شی با ویژگی های مشابه (اجازه دهید این پاسخ “cancel” ردیف). همچنین از `Version` ستون, که باید هر ایالت از یک شی با یک عدد جداگانه شناسایی. + +مثلا, ما می خواهیم برای محاسبه تعداد صفحات کاربران در برخی از سایت بازدید و چه مدت وجود دارد. در برخی از نقطه در زمان ما ارسال ردیف زیر را با دولت از فعالیت های کاربر: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +در برخی موارد بعد ما تغییر فعالیت کاربر را ثبت می کنیم و با دو ردیف زیر می نویسیم. + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +ردیف اول لغو حالت قبلی از جسم (کاربر). باید تمام زمینه های دولت لغو شده به جز کپی کنید `Sign`. + +ردیف دوم شامل وضعیت فعلی. + +چرا که ما نیاز به تنها دولت گذشته از فعالیت های کاربر ردیف + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +می توان حذف, سقوط نامعتبر (قدیمی) دولت از جسم. `VersionedCollapsingMergeTree` این کار در حالی که ادغام قطعات داده. + +برای پیدا کردن که چرا ما نیاز به دو ردیف برای هر تغییر را ببینید [الگوریتم](#table_engines-versionedcollapsingmergetree-algorithm). + +**نکاتی در مورد استفاده** + +1. برنامه ای که می نویسد داده ها باید به یاد داشته باشید دولت از یک شی به منظور لغو. این “cancel” رشته باید یک کپی از “state” رشته با مخالف `Sign`. این باعث افزایش اندازه اولیه ذخیره سازی اما اجازه می دهد تا به نوشتن داده ها به سرعت. +2. در حال رشد طولانی در ستون کاهش بهره وری از موتور با توجه به بار برای نوشتن. ساده تر داده, بهتر بهره وری. +3. `SELECT` نتایج به شدت بستگی به قوام تاریخ تغییر شی. هنگام تهیه داده ها برای قرار دادن دقیق باشید. شما می توانید نتایج غیر قابل پیش بینی با اطلاعات متناقض از جمله مقادیر منفی برای معیارهای غیر منفی مانند عمق جلسه. + +### الگوریتم {#table_engines-versionedcollapsingmergetree-algorithm} + +هنگامی که مالکیت خانه ادغام قطعات داده, حذف هر جفت ردیف که کلید اولیه و نسخه های مختلف و همان `Sign`. منظور از ردیف مهم نیست. + +هنگامی که داده ها را درج خانه, دستور ردیف توسط کلید اصلی. اگر `Version` ستون در کلید اصلی نیست, خانه عروسکی اضافه می کند به کلید اصلی به طور ضمنی به عنوان زمینه گذشته و برای سفارش استفاده. + +## انتخاب داده ها {#selecting-data} + +تاتر تضمین نمی کند که همه از ردیف با کلید اصلی همان خواهد شد در همان بخش داده و در نتیجه و یا حتی بر روی سرور فیزیکی است. این درست است هر دو برای نوشتن داده ها و برای ادغام بعدی از قطعات داده است. علاوه بر این فرایندهای کلیک `SELECT` نمایش داده شد با موضوعات متعدد و منظور از ردیف در نتیجه نمی تواند پیش بینی کند. این به این معنی است که تجمع مورد نیاز است اگر نیاز به طور کامل وجود دارد “collapsed” داده ها از یک `VersionedCollapsingMergeTree` جدول + +برای نهایی سقوط, ارسال یک پرس و جو با یک `GROUP BY` بند و مجموع توابع است که برای ثبت نام حساب. برای مثال برای محاسبه مقدار استفاده کنید `sum(Sign)` به جای `count()`. برای محاسبه مجموع چیزی استفاده کنید `sum(Sign * x)` به جای `sum(x)` و اضافه کردن `HAVING sum(Sign) > 0`. + +مصالح `count`, `sum` و `avg` می توان محاسبه این راه. مجموع `uniq` می توان محاسبه اگر یک شی حداقل یک دولت غیر فروریخته. مصالح `min` و `max` نمی توان محاسبه کرد زیرا `VersionedCollapsingMergeTree` تاریخ ارزش های کشورهای فرو ریخت را نجات دهد. + +اگر شما نیاز به استخراج داده ها با “collapsing” اما بدون تجمع (مثلا, برای بررسی اینکه ردیف در حال حاضر که جدیدترین ارزش مطابقت شرایط خاصی هستند), شما می توانید با استفاده از `FINAL` تغییردهنده برای `FROM` بند بند. این روش بی فایده است و باید با جداول بزرگ استفاده نمی شود. + +## مثال استفاده {#example-of-use} + +اطلاعات نمونه: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +ایجاد جدول: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +درج داده: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +ما با استفاده از دو `INSERT` نمایش داده شد برای ایجاد دو بخش داده های مختلف. اگر ما داده ها را وارد کنید با یک پرس و جو تنها, تاتر ایجاد یک بخش داده و هرگز هیچ ادغام انجام خواهد داد. + +گرفتن داده ها: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +چه ما در اینجا مشاهده کنید و قطعات فروریخته کجا هستند? +ما دو بخش داده با استفاده از دو `INSERT` نمایش داده شد. این `SELECT` پرس و جو در دو موضوع انجام شد, و در نتیجه یک نظم تصادفی از ردیف است. +سقوط رخ نداد زیرا قطعات داده هنوز ادغام نشده اند. تاتر ادغام قطعات داده در یک نقطه ناشناخته در زمان است که ما نمی توانیم پیش بینی. + +به همین دلیل است که ما نیاز به تجمع: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +اگر ما تجمع نیاز ندارد و می خواهید به زور سقوط, ما می توانیم با استفاده از `FINAL` تغییردهنده برای `FROM` بند بند. + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +این یک راه بسیار کارامد برای انتخاب داده ها است. برای جداول بزرگ استفاده نکنید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/fa/engines/table_engines/special/buffer.md b/docs/fa/engines/table_engines/special/buffer.md new file mode 100644 index 00000000000..6eddd4a1dc2 --- /dev/null +++ b/docs/fa/engines/table_engines/special/buffer.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u0628\u0627\u0641\u0631" +--- + +# بافر {#buffer} + +بافر داده ها به نوشتن در رم, دوره گرگرفتگی به جدول دیگر. در طول عملیات به عنوان خوانده شده, داده ها از بافر و جدول دیگر به طور همزمان به عنوان خوانده شده. + +``` sql +Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) +``` + +پارامترهای موتور: + +- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. +- `table` – Table to flush data to. +- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` از بافر مستقل. مقدار توصیه شده: 16. +- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes` و `max_bytes` – Conditions for flushing data from the buffer. + +داده ها از بافر سرخ و نوشته شده به جدول مقصد اگر همه `min*` شرایط و یا حداقل یک `max*` شرایط ملاقات کرد. + +- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. + +در طول عملیات نوشتن داده ها به یک `num_layers` تعداد بافر تصادفی. یا, اگر بخش داده ها برای وارد کردن به اندازه کافی بزرگ است (بیشتر از `max_rows` یا `max_bytes`), این است که به طور مستقیم به جدول مقصد نوشته شده, حذف بافر. + +شرایط برای گرگرفتگی داده ها به طور جداگانه برای هر یک از محاسبه `num_layers` بافر. برای مثال اگر `num_layers = 16` و `max_bytes = 100000000`, حداکثر مصرف رم است 1.6 گیگابایت. + +مثال: + +``` sql +CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) +``` + +ایجاد یک ‘merge.hits\_buffer’ جدول با ساختار مشابه ‘merge.hits’ و با استفاده از موتور بافر. هنگام نوشتن به این جدول, داده ها در رم بافر و بعد به نوشته ‘merge.hits’ جدول 16 بافر ایجاد می کند. اگر 100 ثانیه گذشت یا یک میلیون ردیف نوشته شده یا 100 مگابایت از داده ها نوشته شده است داده ها در هر یک از فوران است; یا اگر به طور همزمان 10 ثانیه گذشت و 10000 ردیف و 10 مگابایت داده ها نوشته شده است. مثلا, اگر فقط یک ردیف نوشته شده است, بعد از 100 ثانیه سرخ خواهد شد, مهم نیست که چه. اما اگر بسیاری از ردیف نوشته شده است, داده خواهد شد هر چه زودتر سرخ. + +هنگامی که سرور متوقف شده است, با جدول قطره و یا جدا جدول, داده های بافر نیز به جدول مقصد سرخ. + +شما می توانید رشته های خالی را در علامت نقل قول واحد برای پایگاه داده و نام جدول تنظیم کنید. این نشان می دهد عدم وجود یک جدول مقصد. در این مورد, زمانی که شرایط خیط و پیت کردن داده رسیده است, بافر است که به سادگی پاک. این ممکن است برای نگه داشتن یک پنجره داده ها در حافظه مفید باشد. + +هنگام خواندن از یک جدول بافر, داده ها هر دو از بافر و از جدول مقصد پردازش (اگر وجود دارد). +توجه داشته باشید که جداول بافر یک شاخص را پشتیبانی نمی کند. به عبارت دیگر, داده ها در بافر به طور کامل اسکن, که ممکن است کند برای بافر بزرگ. (برای داده ها در یک جدول تابع, شاخص است که پشتیبانی استفاده خواهد شد.) + +اگر مجموعه ای از ستون ها در جدول بافر می کند مجموعه ای از ستون ها در یک جدول تابع مطابقت ندارد, یک زیر مجموعه از ستون که در هر دو جدول وجود دارد قرار داده شده است. + +اگر انواع برای یکی از ستون ها در جدول بافر و یک جدول تابع مطابقت ندارد, یک پیام خطا در ورود به سیستم سرور وارد شده و بافر پاک شده است. +همین اتفاق می افتد اگر جدول تابع وجود ندارد زمانی که بافر سرخ است. + +اگر شما نیاز به اجرا را تغییر دهید برای یک جدول تابع و جدول بافر, توصیه می کنیم برای اولین بار حذف جدول بافر, در حال اجرا را تغییر دهید برای جدول تابع, سپس ایجاد جدول بافر دوباره. + +اگر سرور غیر طبیعی راه اندازی مجدد, داده ها در بافر از دست داده است. + +نهایی و نمونه به درستی برای جداول بافر کار نمی کند. این شرایط به جدول مقصد منتقل می شود, اما برای پردازش داده ها در بافر استفاده نمی شود. اگر این ویژگی های مورد نیاز توصیه می کنیم تنها با استفاده از جدول بافر برای نوشتن, در حالی که خواندن از جدول مقصد. + +هنگام اضافه کردن داده ها به یک بافر, یکی از بافر قفل شده است. این باعث تاخیر اگر یک عملیات به عنوان خوانده شده است به طور همزمان از جدول انجام. + +داده هایی که به یک جدول بافر قرار داده شده ممکن است در نهایت در جدول تابع در جهت های مختلف و در بلوک های مختلف. به خاطر همین, یک جدول بافر دشوار است به استفاده از برای نوشتن به یک سقوط به درستی. برای جلوگیری از مشکلات, شما می توانید مجموعه ‘num\_layers’ به 1. + +اگر جدول مقصد تکرار شده است, برخی از ویژگی های مورد انتظار از جداول تکرار از دست داده در هنگام نوشتن به یک جدول بافر. تغییرات تصادفی به منظور از سطر و اندازه قطعات داده باعث تقسیم بندی داده ها به ترک کار, به این معنی که ممکن است به یک قابل اعتماد ‘exactly once’ ارسال به جداول تکرار. + +با توجه به این معایب, ما فقط می توانیم با استفاده از یک جدول بافر در موارد نادر توصیه. + +جدول بافر استفاده شده است که بیش از حد بسیاری از درج از تعداد زیادی از سرور بیش از یک واحد از زمان دریافت و داده ها را نمی توان قبل از درج بافر, که به معنی درج می توانید به اندازه کافی سریع اجرا کنید. + +توجه داشته باشید که این کار حس برای وارد کردن داده ها یک ردیف در یک زمان را ندارد, حتی برای جداول بافر. این تنها تولید خواهد شد سرعت چند هزار ردیف در هر ثانیه در حالی که قرار دادن بلوک های بزرگتر از داده ها می تواند تولید بیش از یک میلیون ردیف در هر ثانیه (نگاه کنید به بخش “Performance”). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/fa/operations/table_engines/dictionary.md b/docs/fa/engines/table_engines/special/dictionary.md similarity index 62% rename from docs/fa/operations/table_engines/dictionary.md rename to docs/fa/engines/table_engines/special/dictionary.md index ebdaa4cf842..53ed5b5cf26 100644 --- a/docs/fa/operations/table_engines/dictionary.md +++ b/docs/fa/engines/table_engines/special/dictionary.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\u0648\u0627\u0698\u0647\u0646\u0627\u0645\u0647" --- -# Dictionary {#dictionary} +# واژهنامه {#dictionary} -The `Dictionary` engine displays the [dictionary](../../query_language/dicts/external_dicts.md) data as a ClickHouse table. +این `Dictionary` موتور نمایش [واژهنامه](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) داده ها به عنوان یک جدول کلیک. -As an example, consider a dictionary of `products` with the following configuration: +به عنوان مثال, در نظر گرفتن یک فرهنگ لغت از `products` با پیکربندی زیر: ``` xml @@ -39,7 +42,7 @@ As an example, consider a dictionary of `products` with the following configurat ``` -Query the dictionary data: +پرس و جو داده فرهنگ لغت: ``` sql SELECT @@ -61,17 +64,17 @@ WHERE name = 'products' └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ ``` -You can use the [dictGet\*](../../query_language/functions/ext_dict_functions.md#ext_dict_functions) function to get the dictionary data in this format. +شما می توانید از [دیکته کردن\*](../../../sql_reference/functions/ext_dict_functions.md#ext_dict_functions) تابع برای دریافت داده های فرهنگ لغت در این فرمت. -This view isn’t helpful when you need to get raw data, or when performing a `JOIN` operation. For these cases, you can use the `Dictionary` engine, which displays the dictionary data in a table. +این دیدگاه مفید نیست که شما نیاز به دریافت داده های خام, و یا در هنگام انجام یک `JOIN` عمل برای این موارد می توانید از `Dictionary` موتور, که نمایش داده فرهنگ لغت در یک جدول. -Syntax: +نحو: ``` sql CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` ``` -Usage example: +به عنوان مثال استفاده: ``` sql create table products (product_id UInt64, title String) Engine = Dictionary(products); @@ -79,7 +82,7 @@ create table products (product_id UInt64, title String) Engine = Dictionary(prod Ok -Take a look at what’s in the table. +نگاهی به در چه چیزی در جدول. ``` sql select * from products limit 1; @@ -91,4 +94,4 @@ select * from products limit 1; └───────────────┴─────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/fa/engines/table_engines/special/distributed.md b/docs/fa/engines/table_engines/special/distributed.md new file mode 100644 index 00000000000..b2d9dd45a4d --- /dev/null +++ b/docs/fa/engines/table_engines/special/distributed.md @@ -0,0 +1,152 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\u062A\u0648\u0632\u06CC\u0639 \u0634\u062F\u0647" +--- + +# توزیع شده {#distributed} + +**جداول با موتور توزیع شده هیچ اطلاعاتی را توسط خود ذخیره نمی کنند**, اما اجازه می دهد پردازش پرس و جو توزیع شده بر روی سرورهای متعدد. +خواندن به طور خودکار موازی. در طول خواندن, شاخص جدول بر روی سرور از راه دور استفاده می شود, اگر وجود دارد. + +موتور توزیع پارامترها را می پذیرد: + +- نام خوشه در فایل پیکربندی سرور + +- نام یک پایگاه داده از راه دور + +- نام یک میز از راه دور + +- (اختیاری) sharding کلیدی + +- (اختیاری) نام سیاست, استفاده خواهد شد برای ذخیره فایل های موقت برای ارسال کالاهای کابل + + همچنین نگاه کنید به: + + - `insert_distributed_sync` تنظیم + - [ادغام](../mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) برای نمونه + +مثال: + +``` sql +Distributed(logs, default, hits[, sharding_key[, policy_name]]) +``` + +داده ها از تمام سرورها در ‘logs’ خوشه, از پیش فرض.جدول بازدیدها واقع در هر سرور در خوشه. +داده ها نه تنها به عنوان خوانده شده اما تا حدی بر روی سرور از راه دور پردازش (تا حدی که این امکان پذیر است). +مثلا, برای یک پرس و جو با گروه های, داده خواهد شد بر روی سرور از راه دور جمع, و کشورهای متوسط از توابع دانه خواهد شد به سرور درخواست ارسال. سپس داده ها بیشتر جمع خواهد شد. + +به جای نام پایگاه داده, شما می توانید یک عبارت ثابت است که یک رشته را برمی گرداند استفاده. در حال بارگذاری + +logs – The cluster name in the server's config file. + +خوشه ها مانند این تنظیم می شوند: + +``` xml + + + + + 1 + + false + + example01-01-1 + 9000 + + + example01-01-2 + 9000 + + + + 2 + false + + example01-02-1 + 9000 + + + example01-02-2 + 1 + 9440 + + + + +``` + +در اینجا یک خوشه با نام تعریف شده است ‘logs’ که متشکل از دو خرده ریز, که هر کدام شامل دو کپی. +خرده ریز به سرور که شامل بخش های مختلف از داده ها مراجعه (به منظور خواندن تمام داده ها, شما باید تمام خرده ریز دسترسی داشته باشید). +کپی در حال تکثیر سرور (به منظور خواندن تمام داده ها, شما می توانید داده ها بر روی هر یک از کپی دسترسی). + +نام خوشه باید حاوی نقطه نیست. + +پارامترها `host`, `port` و در صورت تمایل `user`, `password`, `secure`, `compression` برای هر سرور مشخص شده است: +- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. +- `port` – The TCP port for messenger activity (‘tcp\_port’ در پیکربندی, معمولا به مجموعه 9000). نه اشتباه آن را با http\_port. +- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [حقوق دسترسی](../../../operations/access_rights.md). +- `password` – The password for connecting to a remote server (not masked). Default value: empty string. +- `secure` - استفاده از اس اس ال برای اتصال, معمولا شما همچنین باید تعریف `port` = 9440. سرور باید گوش کند 9440 و گواهی صحیح. +- `compression` - استفاده از فشرده سازی داده ها. مقدار پیش فرض: درست. + +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [\_تبالسازی](../../../operations/settings/settings.md#settings-load_balancing) تنظیمات. +اگر ارتباط با سرور ایجاد نشده است, وجود خواهد داشت تلاش برای ارتباط با یک ایست کوتاه. اگر اتصال شکست خورده, ماکت بعدی انتخاب خواهد شد, و به همین ترتیب برای همه کپی. اگر تلاش اتصال برای تمام کپی شکست خورده, تلاش تکرار خواهد شد به همان شیوه, چندین بار. +این کار به نفع حالت ارتجاعی, اما تحمل گسل کامل را فراهم نمی کند: یک سرور از راه دور ممکن است اتصال قبول, اما ممکن است کار نمی کند, و یا کار ضعیف. + +شما می توانید تنها یکی از خرده ریز مشخص (در این مورد, پردازش پرس و جو باید از راه دور به نام, به جای توزیع) و یا تا هر تعداد از خرده ریز. در هر سفال می توانید از یک به هر تعداد از کپی ها مشخص کنید. شما می توانید تعداد مختلف از کپی برای هر سفال مشخص. + +شما می توانید به عنوان بسیاری از خوشه های مشخص که شما در پیکربندی می خواهید. + +برای مشاهده خوشه های خود استفاده کنید ‘system.clusters’ جدول + +موتور توزیع اجازه می دهد تا کار با یک خوشه مانند یک سرور محلی. با این حال, خوشه غیر قابل اجتنابناپذیری است: شما باید پیکربندی خود را در فایل پیکربندی سرور ارسال (حتی بهتر, برای تمام سرورهای خوشه). + +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ تابع جدول به جای. بخش را ببینید [توابع جدول](../../../sql_reference/table_functions/index.md). + +دو روش برای نوشتن داده ها به یک خوشه وجود دارد: + +اولین, شما می توانید تعریف که سرور به ارسال که داده ها را به و انجام نوشتن به طور مستقیم در هر سفال. به عبارت دیگر, انجام درج در جداول که جدول توزیع “looks at”. این راه حل انعطاف پذیر ترین است که شما می توانید هر طرح شاردینگ استفاده, که می تواند غیر بدیهی با توجه به الزامات منطقه موضوع. این هم بهینه ترین راه حل از داده ها را می توان به خرده ریز های مختلف نوشته شده است به طور کامل به طور مستقل. + +دومین, شما می توانید درج در یک جدول توزیع انجام. در این مورد جدول توزیع داده های درج شده در سراسر سرور خود را. به منظور ارسال به یک جدول توزیع, باید یک مجموعه کلید شارژ دارند (پارامتر گذشته). علاوه بر این, اگر تنها یک سفال وجود دارد, عملیات نوشتن بدون مشخص کردن کلید شاردینگ کار می کند, چرا که هیچ چیز در این مورد معنی نیست. + +هر سفال می تواند وزن تعریف شده در فایل پیکربندی داشته باشد. به طور پیش فرض, وزن به یک برابر است. داده ها در سراسر خرده ریز در مقدار متناسب با وزن سفال توزیع. مثلا, اگر دو خرده ریز وجود دارد و برای اولین بار دارای وزن 9 در حالی که دوم دارای وزن 10, برای اولین بار ارسال خواهد شد 9 / 19 بخش هایی از ردیف, و دوم ارسال خواهد شد 10 / 19. + +هر سفال می تواند داشته باشد ‘internal\_replication’ پارامتر تعریف شده در فایل پیکربندی. + +اگر این پارامتر قرار است به ‘true’ عملیات نوشتن اولین ماکت سالم را انتخاب می کند و داده ها را می نویسد. با استفاده از این جایگزین اگر جدول توزیع شده “looks at” جداول تکرار. به عبارت دیگر اگر جدول ای که داده ها نوشته می شود خود را تکرار می کند. + +اگر قرار است ‘false’ (به طور پیش فرض), داده ها به تمام کپی نوشته شده. در اصل این بدان معنی است که توزیع جدول تکرار داده های خود را. این بدتر از استفاده از جداول تکرار شده است زیرا سازگاری کپی ها بررسی نشده است و در طول زمان حاوی اطلاعات کمی متفاوت خواهد بود. + +برای انتخاب سفال که یک ردیف از داده های فرستاده شده به sharding بیان تجزيه و تحليل است و آن باقی مانده است از تقسیم آن با وزن کلی خرده ریز. ردیف به سفال که مربوط به نیمه فاصله از باقی مانده از ارسال ‘prev\_weight’ به ‘prev\_weights + weight’ کجا ‘prev\_weights’ وزن کل خرده ریز با کمترین تعداد است, و ‘weight’ وزن این سفال است. مثلا, اگر دو خرده ریز وجود دارد, و برای اولین بار دارای یک وزن 9 در حالی که دوم دارای وزن 10, ردیف خواهد شد به سفال اول برای باقی مانده از محدوده ارسال \[0, 9), و دوم برای باقی مانده از محدوده \[9, 19). + +بیان شاردینگ می تواند هر عبارت از ثابت ها و ستون های جدول که یک عدد صحیح را برمی گرداند. برای مثال شما می توانید با استفاده از بیان ‘rand()’ برای توزیع تصادفی داده ها یا ‘UserID’ برای توزیع توسط باقی مانده از تقسیم شناسه کاربر (سپس داده ها از یک کاربر تنها بر روی یک سفال تنها اقامت, که ساده در حال اجرا در و پیوستن به کاربران). اگر یکی از ستون ها به طور مساوی توزیع نشده باشد می توانید در یک تابع هش قرار دهید: اینتاش64 (شناسه). + +یک یادآوری ساده از این بخش محدود است راه حل برای sharding و نیست همیشه مناسب است. این برای حجم متوسط و زیادی از داده ها کار می کند (ده ها تن از سرور), اما نه برای حجم بسیار زیادی از داده ها (صدها سرور یا بیشتر). در مورد دوم با استفاده از sharding طرح های مورد نیاز منطقه موضوع را به جای استفاده از مطالب موجود در توزیع جداول. + +SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don't have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. + +شما باید نگران sharding طرح در موارد زیر: + +- نمایش داده شد استفاده می شود که نیاز به پیوستن به داده ها (در یا پیوستن) توسط یک کلید خاص. اگر داده ها توسط این کلید پنهان, شما می توانید محلی در استفاده و یا پیوستن به جای جهانی در یا جهانی ملحق, که بسیار موثر تر است. +- تعداد زیادی از سرور استفاده شده است (صدها یا بیشتر) با تعداد زیادی از نمایش داده شد کوچک (نمایش داده شد فردی مشتریان - وب سایت, تبلیغ, و یا شرکای). به منظور نمایش داده شد کوچک به کل خوشه تاثیر نمی گذارد, این باعث می شود حس برای قرار دادن داده ها برای یک مشتری در یک سفال تنها. متناوبا, همانطور که ما در یاندکس انجام داده ام.متریکا, شما می توانید راه اندازی دو سطح شاردینگ: تقسیم کل خوشه را به “layers”, جایی که یک لایه ممکن است از تکه های متعدد تشکیل شده است. داده ها برای یک مشتری تنها بر روی یک لایه قرار دارد اما ذرات را می توان به یک لایه در صورت لزوم اضافه کرد و داده ها به طور تصادفی در داخل توزیع می شوند. جداول توزیع شده برای هر لایه ایجاد می شوند و یک جدول توزیع شده مشترک برای نمایش داده شد جهانی ایجاد می شود. + +داده ها ناهمگام نوشته شده است. هنگامی که در جدول قرار داده شده, بلوک داده ها فقط به سیستم فایل های محلی نوشته شده. داده ها به سرور از راه دور در پس زمینه در اسرع وقت ارسال می شود. دوره ارسال داده ها توسط مدیریت [در حال بارگذاری](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) و [در حال بارگذاری](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) تنظیمات. این `Distributed` موتور هر فایل می فرستد با داده های درج شده به طور جداگانه, اما شما می توانید دسته ای از ارسال فایل های با فعال [نمایش سایت](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) تنظیمات. این تنظیم را بهبود می بخشد عملکرد خوشه با استفاده بهتر از سرور محلی و منابع شبکه. شما باید بررسی کنید که داده ها با موفقیت با چک کردن لیست فایل ها (داده ها در حال انتظار برای ارسال) در دایرکتوری جدول ارسال می شود: `/var/lib/clickhouse/data/database/table/`. + +اگر سرور متوقف به وجود داشته باشد و یا راه اندازی مجدد خشن بود (مثلا, پس از یک شکست دستگاه) پس از قرار دادن به یک جدول توزیع, داده های درج شده ممکن است از دست داده. اگر بخشی از داده های خراب شده در دایرکتوری جدول شناسایی شود به ‘broken’ دایرکتوری فرعی و دیگر استفاده می شود. + +پردازش پرس و جو در سراسر تمام کپی در یک سفال واحد موازی است زمانی که گزینه حداکثر\_پرورالهراپیلاس فعال است. برای کسب اطلاعات بیشتر به بخش مراجعه کنید [بیشینه\_راپرال\_راپیکال](../../../operations/settings/settings.md#settings-max_parallel_replicas). + +## ستونهای مجازی {#virtual-columns} + +- `_shard_num` — Contains the `shard_num` (از `system.clusters`). نوع: [UInt32](../../../sql_reference/data_types/int_uint.md). + +!!! note "یادداشت" + از [`remote`](../../../sql_reference/table_functions/remote.md)/`cluster` توابع جدول داخلی ایجاد نمونه موقت از همان توزیع موتور, `_shard_num` در دسترس وجود دارد بیش از حد. + +**همچنین نگاه کنید** + +- [ستونهای مجازی](index.md#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/fa/engines/table_engines/special/external_data.md b/docs/fa/engines/table_engines/special/external_data.md new file mode 100644 index 00000000000..9041e4f667c --- /dev/null +++ b/docs/fa/engines/table_engines/special/external_data.md @@ -0,0 +1,68 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: "\u062F\u0627\u062F\u0647\u0647\u0627\u06CC \u062E\u0627\u0631\u062C\u06CC" +--- + +# داده های خارجی برای پردازش پرس و جو {#external-data-for-query-processing} + +تاتر اجازه می دهد تا ارسال یک سرور داده ها که برای پردازش یک پرس و جو مورد نیاز است, همراه با پرس و جو را انتخاب کنید. این داده ها در یک جدول موقت قرار داده (نگاه کنید به بخش “Temporary tables”) و می تواند مورد استفاده قرار گیرد در پرس و جو (برای مثال در اپراتورها). + +مثلا, اگر شما یک فایل متنی با شناسه کاربر مهم, شما می توانید به سرور همراه پرس و جو است که با استفاده از فیلتراسیون توسط این لیست ارسال. + +اگر شما نیاز به اجرای بیش از یک پرس و جو با حجم زیادی از داده های خارجی از این ویژگی استفاده نکنید. بهتر است برای بارگذاری داده ها به دسی بل جلوتر از زمان. + +داده های خارجی را می توان با استفاده از مشتری خط فرمان (در حالت غیر تعاملی) و یا با استفاده از رابط قام ارسال می شود. + +در خط فرمان مشتری شما می توانید مشخص پارامترهای بخش در قالب + +``` bash +--external --file=... [--name=...] [--format=...] [--types=...|--structure=...] +``` + +شما ممکن است بخش های متعدد مثل این, برای تعدادی از جداول در حال انتقال. + +**–external** – Marks the beginning of a clause. +**–file** – Path to the file with the table dump, or -, which refers to stdin. +فقط یک جدول را می توان از استدین بازیابی. + +پارامترهای زیر اختیاری هستند: **–name**– Name of the table. If omitted, \_data is used. +**–format** – Data format in the file. If omitted, TabSeparated is used. + +یکی از پارامترهای زیر مورد نیاز است:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named \_1, \_2, … +**–structure**– The table structure in the format`UserID UInt64`, `URL String`. تعریف نام ستون و انواع. + +فایل های مشخص شده در ‘file’ خواهد شد با فرمت مشخص شده در تجزیه ‘format’ با استفاده از انواع داده های مشخص شده در ‘types’ یا ‘structure’. جدول خواهد شد به سرور ارسال شده و در دسترس وجود دارد به عنوان یک جدول موقت با نام در ‘name’. + +مثالها: + +``` bash +$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 +849897 +$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +هنگام استفاده از رابط اچ تی پی, داده های خارجی در قالب چند/فرم داده به تصویب رسید. هر جدول به عنوان یک فایل جداگانه منتقل می شود. نام جدول از نام فایل گرفته شده است. این ‘query\_string’ پارامترهای منتقل می شود ‘name\_format’, ‘name\_types’ و ‘name\_structure’ کجا ‘name’ نام جدول که این پارامترها به مطابقت است. معنای پارامترهای همان است که در هنگام استفاده از مشتری خط فرمان است. + +مثال: + +``` bash +$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv + +$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +برای پردازش پرس و جو توزیع, جداول موقت به تمام سرور از راه دور ارسال. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/fa/engines/table_engines/special/file.md b/docs/fa/engines/table_engines/special/file.md new file mode 100644 index 00000000000..1f04f6dc692 --- /dev/null +++ b/docs/fa/engines/table_engines/special/file.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u067E\u0631\u0648\u0646\u062F\u0647" +--- + +# پرونده {#table_engines-file} + +موتور جدول فایل داده ها را در یک فایل در یکی از پشتیبانی نگه می دارد [پرونده +فرشها](../../../interfaces/formats.md#formats) (تابسپار, بومی, و غیره.). + +نمونه های استفاده: + +- صادرات داده ها از خانه کلیک به فایل. +- تبدیل داده ها از یک فرمت به دیگری. +- به روز رسانی داده ها در تاتر از طریق ویرایش یک فایل بر روی یک دیسک. + +## استفاده در سرور کلیک {#usage-in-clickhouse-server} + +``` sql +File(Format) +``` + +این `Format` پارامتر یکی از فرمت های فایل های موجود را مشخص می کند. برای انجام +`SELECT` نمایش داده شد, فرمت باید برای ورودی پشتیبانی می شود, و به انجام +`INSERT` queries – for output. The available formats are listed in the +[فرشها](../../../interfaces/formats.md#formats) بخش. + +کلیک اجازه نمی دهد مسیر سیستم فایل را مشخص کنید`File`. این پوشه تعریف شده توسط استفاده کنید [مسیر](../../../operations/server_configuration_parameters/settings.md) تنظیم در پیکربندی سرور. + +هنگام ایجاد جدول با استفاده از `File(Format)` این دایرکتوری فرعی خالی در این پوشه ایجاد می کند. هنگامی که داده ها به جدول نوشته شده است, این را به قرار `data.Format` فایل در دایرکتوری فرعی. + +شما می توانید این زیر پوشه و فایل را در فایل سیستم سرور و سپس ایجاد کنید [ATTACH](../../../sql_reference/statements/misc.md) این جدول اطلاعات با نام تطبیق, بنابراین شما می توانید داده ها را از این فایل پرس و جو. + +!!! warning "اخطار" + مراقب باشید با این قابلیت, به دلیل تاتر می کند پیگیری تغییرات خارجی به چنین فایل را حفظ کند. نتیجه همزمان می نویسد: از طریق clickhouse و خارج از clickhouse تعریف نشده است. + +**مثال:** + +**1.** تنظیم `file_engine_table` جدول: + +``` sql +CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) +``` + +به طور پیش فرض کلیک خواهد پوشه ایجاد کنید `/var/lib/clickhouse/data/default/file_engine_table`. + +**2.** دستی ایجاد کنید `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` حاوی: + +``` bash +$ cat data.TabSeparated +one 1 +two 2 +``` + +**3.** پرسوجوی داده: + +``` sql +SELECT * FROM file_engine_table +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## استفاده در کلیک-محلی {#usage-in-clickhouse-local} + +داخل [کلیک-محلی](../../../operations/utilities/clickhouse-local.md) موتور فایل مسیر فایل علاوه بر می پذیرد `Format`. جریان های ورودی / خروجی پیش فرض را می توان با استفاده از نام های عددی یا قابل خواندن توسط انسان مشخص کرد `0` یا `stdin`, `1` یا `stdout`. +**مثال:** + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +``` + +## اطلاعات پیاده سازی {#details-of-implementation} + +- چندگانه `SELECT` نمایش داده شد را می توان به صورت همزمان انجام, ولی `INSERT` نمایش داده شد هر یک از دیگر صبر کنید. +- پشتیبانی از ایجاد فایل جدید توسط `INSERT` پرس و جو. +- اگر پرونده وجود داشته باشد, `INSERT` ارزش های جدید را در این برنامه اضافه کنید. +- پشتیبانی نمیشود: + - `ALTER` + - `SELECT ... SAMPLE` + - شاخص ها + - تکرار + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/fa/engines/table_engines/special/generate.md b/docs/fa/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..affef675ae6 --- /dev/null +++ b/docs/fa/engines/table_engines/special/generate.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u0698\u0646\u0631\u0627\u0644" +--- + +# ژنرال {#table_engines-generate} + +موتور جدول عمومی تولید داده های تصادفی برای طرح جدول داده شده است. + +نمونه های استفاده: + +- استفاده در تست به جمعیت جدول بزرگ تجدید پذیر. +- تولید ورودی تصادفی برای تست ریش ریش شدن. + +## استفاده در سرور کلیک {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +این `max_array_length` و `max_string_length` پارامترها حداکثر طول همه را مشخص می کنند +ستون ها و رشته های متناوب در داده های تولید شده مطابقت دارند. + +تولید موتور جدول پشتیبانی از تنها `SELECT` نمایش داده شد. + +این پشتیبانی از تمام [انواع داده](../../../sql_reference/data_types/index.md) این را می توان در یک جدول ذخیره کرد به جز `LowCardinality` و `AggregateFunction`. + +**مثال:** + +**1.** تنظیم `generate_engine_table` جدول: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** پرسوجوی داده: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## اطلاعات پیاده سازی {#details-of-implementation} + +- پشتیبانی نمیشود: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - شاخص ها + - تکرار + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/fa/engines/table_engines/special/index.md b/docs/fa/engines/table_engines/special/index.md new file mode 100644 index 00000000000..301ca7f0005 --- /dev/null +++ b/docs/fa/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Special +toc_priority: 31 +--- + + diff --git a/docs/fa/engines/table_engines/special/join.md b/docs/fa/engines/table_engines/special/join.md new file mode 100644 index 00000000000..92463b26b7b --- /dev/null +++ b/docs/fa/engines/table_engines/special/join.md @@ -0,0 +1,111 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u067E\u06CC\u0648\u0633\u062A\u0646" +--- + +# پیوستن {#join} + +ساختار داده تهیه شده برای استفاده در [JOIN](../../../sql_reference/statements/select.md#select-join) عملیات. + +## ایجاد یک جدول {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], +) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) +``` + +شرح مفصلی از [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) پرس و جو. + +**پارامترهای موتور** + +- `join_strictness` – [پیوستن به سختی](../../../sql_reference/statements/select.md#select-join-strictness). +- `join_type` – [پیوستن به نوع](../../../sql_reference/statements/select.md#select-join-types). +- `k1[, k2, ...]` – Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده. + +وارد کردن `join_strictness` و `join_type` پارامترهای بدون نقل قول, مثلا, `Join(ANY, LEFT, col1)`. اونا باید با `JOIN` عملیاتی که جدول خواهد شد برای استفاده. اگر پارامترها مطابقت ندارند, خانه عروسکی می کند یک استثنا پرتاب نمی کند و ممکن است داده های نادرست بازگشت. + +## استفاده از جدول {#table-usage} + +### مثال {#example} + +ایجاد جدول سمت چپ: + +``` sql +CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog +``` + +``` sql +INSERT INTO id_val VALUES (1,11)(2,12)(3,13) +``` + +ایجاد سمت راست `Join` جدول: + +``` sql +CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id) +``` + +``` sql +INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23) +``` + +پیوستن به جداول: + +``` sql +SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1 +``` + +``` text +┌─id─┬─val─┬─id_val_join.val─┐ +│ 1 │ 11 │ 21 │ +│ 2 │ 12 │ ᴺᵁᴸᴸ │ +│ 3 │ 13 │ 23 │ +└────┴─────┴─────────────────┘ +``` + +به عنوان یک جایگزین, شما می توانید داده ها را از بازیابی `Join` جدول مشخص کردن مقدار پیوستن کلید: + +``` sql +SELECT joinGet('id_val_join', 'val', toUInt32(1)) +``` + +``` text +┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ +│ 21 │ +└────────────────────────────────────────────┘ +``` + +### انتخاب و قرار دادن داده ها {#selecting-and-inserting-data} + +شما می توانید استفاده کنید `INSERT` نمایش داده شد برای اضافه کردن داده ها به `Join`- جدول موتور . اگر جدول با ایجاد شد `ANY` سخت, داده ها برای کلید های تکراری نادیده گرفته می شوند. با `ALL` سخت, تمام ردیف اضافه می شوند. + +شما نمی توانید انجام دهید `SELECT` پرس و جو به طور مستقیم از جدول. بجای, استفاده از یکی از روش های زیر: + +- میز را به سمت راست قرار دهید `JOIN` بند بند. +- تماس با [جوینت](../../../sql_reference/functions/other_functions.md#joinget) تابع, که به شما امکان استخراج داده ها از جدول به همان شیوه به عنوان از یک فرهنگ لغت. + +### محدودیت ها و تنظیمات {#join-limitations-and-settings} + +هنگام ایجاد یک جدول تنظیمات زیر اعمال می شود: + +- [ارزشهای خبری عبارتند از:](../../../operations/settings/settings.md#join_use_nulls) +- [\_پاک کردن \_روشن گرافیک](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [\_پویش همیشگی](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [\_شروع مجدد](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [نمایش سایت](../../../operations/settings/settings.md#settings-join_any_take_last_row) + +این `Join`- جداول موتور نمی تواند مورد استفاده قرار گیرد `GLOBAL JOIN` عملیات. + +این `Join`- موتور اجازه می دهد تا استفاده کنید [ارزشهای خبری عبارتند از:](../../../operations/settings/settings.md#join_use_nulls) تنظیم در `CREATE TABLE` بیانیه. و [SELECT](../../../sql_reference/statements/select.md) پرسوجو به کار میرود `join_use_nulls` منم همینطور اگر شما متفاوت است `join_use_nulls` تنظیمات, شما می توانید یک خطا پیوستن به جدول از. این بستگی به نوع پیوستن دارد. هنگام استفاده [جوینت](../../../sql_reference/functions/other_functions.md#joinget) تابع, شما مجبور به استفاده از همان `join_use_nulls` تنظیم در `CRATE TABLE` و `SELECT` اظهارات. + +## ذخیره سازی داده ها {#data-storage} + +`Join` داده های جدول است که همیشه در رم واقع. در هنگام قرار دادن ردیف به یک جدول, کلیکهاوس می نویسد بلوک های داده را به دایرکتوری بر روی دیسک به طوری که می توان ترمیم زمانی که سرور راه اندازی مجدد. + +اگر سرور نادرست راه اندازی مجدد بلوک داده ها بر روی دیسک از دست رفته یا صدمه دیده ممکن است. در این مورد ممکن است لازم باشد فایل را به صورت دستی با داده های خراب شده حذف کنید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/fa/engines/table_engines/special/materializedview.md b/docs/fa/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..7fa8c2d217d --- /dev/null +++ b/docs/fa/engines/table_engines/special/materializedview.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u0645\u0627\u062F\u0647 \u0628\u06CC\u0646\u06CC" +--- + +# ماده بینی {#materializedview} + +مورد استفاده برای اجرای نمایش محقق (برای اطلاعات بیشتر, دیدن [CREATE TABLE](../../../sql_reference/statements/create.md)). برای ذخیره سازی داده ها از یک موتور مختلف استفاده می کند که هنگام ایجاد دیدگاه مشخص شده است. هنگام خواندن از یک جدول, فقط با استفاده از این موتور. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fa/engines/table_engines/special/memory.md b/docs/fa/engines/table_engines/special/memory.md new file mode 100644 index 00000000000..ce6541dd22b --- /dev/null +++ b/docs/fa/engines/table_engines/special/memory.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u062D\u0627\u0641\u0638\u0647" +--- + +# حافظه {#memory} + +موتور حافظه ذخیره داده ها در رم, در شکل غیر فشرده. داده ها دقیقا به همان شکل ذخیره می شوند که هنگام خواندن دریافت می شود. به عبارت دیگر, خواندن از این جدول کاملا رایگان است. +همزمان دسترسی به داده ها هماهنگ شده است. قفل کوتاه هستند: خواندن و نوشتن عملیات یکدیگر را مسدود نمی کند. +شاخص پشتیبانی نمی شوند. خواندن موازی است. +بهره وری حداکثر (بر فراز 10 گیگابایت/ثانیه) در نمایش داده شد ساده رسیده, چرا که هیچ خواندن از دیسک وجود دارد, از حالت فشرده خارج, و یا کسب اطلاعات. (ما باید توجه داشته باشید که در بسیاری از موارد بهره وری موتور ادغام تقریبا به عنوان بالا است.) +هنگام راه اندازی مجدد یک سرور, داده ها از بین می رود از جدول و جدول خالی می شود. +به طور معمول, با استفاده از این موتور جدول توجیه نیست. اما, این می تواند مورد استفاده قرار گیرد برای تست, و برای کارهایی که حداکثر سرعت مورد نیاز است در تعداد نسبتا کمی از ردیف (تا حدود 100,000,000). + +موتور حافظه توسط سیستم برای جداول موقت با داده های پرس و جو خارجی استفاده می شود (بخش را ببینید “External data for processing a query”) , و برای اجرای جهانی در (نگاه کنید به بخش “IN operators”). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/fa/engines/table_engines/special/merge.md b/docs/fa/engines/table_engines/special/merge.md new file mode 100644 index 00000000000..f75a20057e6 --- /dev/null +++ b/docs/fa/engines/table_engines/special/merge.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u0627\u062F\u063A\u0627\u0645" +--- + +# ادغام {#merge} + +این `Merge` موتور (با اشتباه گرفته شود `MergeTree`) اطلاعات خود را ذخیره نمی, اما اجازه می دهد تا خواندن از هر تعداد از جداول دیگر به طور همزمان. +خواندن به طور خودکار موازی. نوشتن به یک جدول پشتیبانی نمی شود. هنگام خواندن, شاخص جداول که در واقع در حال خواندن استفاده می شود, در صورتی که وجود داشته باشد. +این `Merge` موتور می پذیرد پارامترهای: نام پایگاه داده و یک عبارت منظم برای جداول. + +مثال: + +``` sql +Merge(hits, '^WatchLog') +``` + +داده خواهد شد از جداول در خواندن `hits` پایگاه داده است که نام هایی که مطابقت با عبارت منظم ‘`^WatchLog`’. + +به جای نام پایگاه داده, شما می توانید یک عبارت ثابت است که یک رشته را برمی گرداند استفاده. به عنوان مثال, `currentDatabase()`. + +Regular expressions — [شماره 2](https://github.com/google/re2) (پشتیبانی از یک زیر مجموعه از مدار چاپی), حساس به حروف. +یادداشت ها در مورد فرار نمادها در عبارات منظم در “match” بخش. + +هنگام انتخاب جداول برای خواندن `Merge` جدول خود را انتخاب نخواهد شد, حتی اگر منطبق عبارت منظم. این است که برای جلوگیری از حلقه. +ممکن است که به ایجاد دو `Merge` جداول که بی وقفه سعی خواهد کرد به خواندن داده های هر یک از دیگران, اما این یک ایده خوب نیست. + +راه معمولی برای استفاده از `Merge` موتور برای کار با تعداد زیادی از `TinyLog` جداول به عنوان اگر با یک جدول واحد. + +مثال 2: + +بیایید می گویند شما باید یک جدول (watchlog\_old) و تصمیم به تغییر پارتیشن بندی بدون حرکت داده ها به یک جدول جدید (watchlog\_new) و شما نیاز به مراجعه به داده ها از هر دو جدول. + +``` sql +CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree(date, (UserId, EventType), 8192); +INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); + +CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; +INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); + +CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); + +SELECT * +FROM WatchLog +``` + +``` text +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-01 │ 1 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-02 │ 2 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +``` + +## ستونهای مجازی {#virtual-columns} + +- `_table` — Contains the name of the table from which data was read. Type: [رشته](../../../sql_reference/data_types/string.md). + + شما می توانید شرایط ثابت را تنظیم کنید `_table` در `WHERE/PREWHERE` بند (به عنوان مثال, `WHERE _table='xyz'`). در این مورد عملیات خواندن فقط برای جداول انجام می شود که شرط است `_table` راضی است, به طوری که `_table` ستون به عنوان یک شاخص عمل می کند. + +**همچنین نگاه کنید به** + +- [مجازی ستون](index.md#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/fa/engines/table_engines/special/null.md b/docs/fa/engines/table_engines/special/null.md new file mode 100644 index 00000000000..4a9ec067aeb --- /dev/null +++ b/docs/fa/engines/table_engines/special/null.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u062E\u0627\u0644\u06CC" +--- + +# خالی {#null} + +هنگام نوشتن به یک جدول تهی, داده نادیده گرفته شده است. هنگام خواندن از یک جدول تهی, پاسخ خالی است. + +با این حال, شما می توانید یک نمایش تحقق در یک جدول تهی ایجاد. بنابراین داده های نوشته شده به جدول در نظر به پایان خواهد رسید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/fa/engines/table_engines/special/set.md b/docs/fa/engines/table_engines/special/set.md new file mode 100644 index 00000000000..6bcdcfff7df --- /dev/null +++ b/docs/fa/engines/table_engines/special/set.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u062A\u0646\u0638\u06CC\u0645" +--- + +# تنظیم {#set} + +مجموعه داده است که همیشه در رم. این است که برای استفاده در سمت راست اپراتور در نظر گرفته شده (بخش را ببینید “IN operators”). + +شما می توانید برای وارد کردن داده ها در جدول استفاده کنید. عناصر جدید خواهد شد به مجموعه داده ها اضافه, در حالی که تکراری نادیده گرفته خواهد شد. +اما شما نمی توانید انجام را انتخاب کنید از جدول. تنها راه بازیابی اطلاعات با استفاده از در نیمه راست اپراتور است. + +داده ها همیشه در رم واقع. برای قرار دادن, بلوک از داده های درج شده نیز به دایرکتوری از جداول بر روی دیسک نوشته شده. هنگام شروع سرور, این داده ها به رم لود. به عبارت دیگر, پس از راه اندازی مجدد, داده ها در محل باقی مانده است. + +برای راه اندازی مجدد سرور خشن بلوک داده ها بر روی دیسک ممکن است از دست داده و یا صدمه دیده است. در مورد دوم ممکن است لازم باشد فایل را با داده های خراب شده به صورت دستی حذف کنید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/fa/engines/table_engines/special/url.md b/docs/fa/engines/table_engines/special/url.md new file mode 100644 index 00000000000..f891f1dc911 --- /dev/null +++ b/docs/fa/engines/table_engines/special/url.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: URL +--- + +# آدرس(url, قالب) {#table_engines-url} + +مدیریت داده ها بر روی یک سرور کنترل از راه دور قام/قام. این موتور مشابه است +به [پرونده](file.md) موتور + +## با استفاده از موتور در سرور کلیک {#using-the-engine-in-the-clickhouse-server} + +این `format` باید یکی باشد که کلیک خانه می تواند در استفاده از +`SELECT` نمایش داده شد و, در صورت لزوم, به `INSERTs`. برای لیست کامل از فرمت های پشتیبانی شده, دیدن +[فرشها](../../../interfaces/formats.md#formats). + +این `URL` باید به ساختار یاب منابع یکنواخت مطابقت داشته باشد. نشانی وب مشخصشده باید به کارگزار اشاره کند +که با استفاده از قام یا قام. این هیچ نیاز ندارد +هدر اضافی برای گرفتن پاسخ از سرور. + +`INSERT` و `SELECT` نمایش داده شد به تبدیل `POST` و `GET` درخواست ها, +به ترتیب. برای پردازش `POST` درخواست, سرور از راه دور باید پشتیبانی +[کدگذاری انتقال داده شده](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). + +شما می توانید حداکثر تعداد قام را محدود کنید تغییر مسیر هاپ به کواس با استفاده از [عناصر](../../../operations/settings/settings.md#setting-max_http_get_redirects) تنظیمات. + +**مثال:** + +**1.** ایجاد یک `url_engine_table` جدول روی کارگزار : + +``` sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** ایجاد یک سرور اساسی قام با استفاده از پایتون استاندارد 3 ابزار و +شروع کن: + +``` python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +``` bash +$ python3 server.py +``` + +**3.** درخواست اطلاعات: + +``` sql +SELECT * FROM url_engine_table +``` + +``` text +┌─word──┬─value─┐ +│ Hello │ 1 │ +│ World │ 2 │ +└───────┴───────┘ +``` + +## اطلاعات پیاده سازی {#details-of-implementation} + +- می خواند و می نویسد می تواند موازی +- پشتیبانی نمیشود: + - `ALTER` و `SELECT...SAMPLE` عملیات. + - شاخص. + - تکرار. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/fa/engines/table_engines/special/view.md b/docs/fa/engines/table_engines/special/view.md new file mode 100644 index 00000000000..a256e7c97b1 --- /dev/null +++ b/docs/fa/engines/table_engines/special/view.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u0646\u0645\u0627" +--- + +# نما {#table_engines-view} + +مورد استفاده برای اجرای نمایش (برای اطلاعات بیشتر, دیدن `CREATE VIEW query`). این کار داده ذخیره نمی, اما تنها فروشگاه مشخص `SELECT` پرس و جو. هنگام خواندن از یک جدول, اجرا می شود این پرس و جو (و حذف تمام ستون های غیر ضروری از پرس و جو). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/fa/faq/general.md b/docs/fa/faq/general.md index 758382d7123..a63b0291aea 100644 --- a/docs/fa/faq/general.md +++ b/docs/fa/faq/general.md @@ -1,57 +1,60 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 78 +toc_title: "\u0633\u0648\u0627\u0644\u0627\u062A \u0639\u0645\u0648\u0645\u06CC" --- -# General Questions {#general-questions} +# سوالات عمومی {#general-questions} -## Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce} +## چرا چیزی شبیه نگاشتکاهش استفاده نمی? {#why-not-use-something-like-mapreduce} -We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT. +ما می توانیم به سیستم هایی مانند نگاشتکاهش به عنوان سیستم های محاسبات توزیع شده اشاره کنیم که عملیات کاهش بر اساس مرتب سازی توزیع شده است. شایع ترین راه حل منبع باز در این کلاس است [Apache Hadoop](http://hadoop.apache.org). یاندکس از راه حل داخلی خود استفاده می کند. -These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks. +این سیستم ها به دلیل زمان تاخیر بالا برای نمایش داده شد اینترنتی مناسب نیست. به عبارت دیگر نمی توانند به عنوان یک رابط وب به پایان برسند. این نوع سیستم ها برای به روز رسانی داده های زمان واقعی مفید نیستند. مرتب سازی توزیع شده بهترین راه برای انجام عملیات کاهش نیست اگر نتیجه عملیات و تمام نتایج متوسط (اگر وجود داشته باشد) در رم یک سرور قرار دارد که معمولا مورد نمایش داده شد اینترنتی است. در چنین حالتی یک جدول هش یک راه بهینه برای کاهش عملیات است. یک رویکرد مشترک برای بهینه سازی نقشه کاهش وظایف قبل از تجمع (بخشی کاهش) با استفاده از یک جدول هش در رم است. کاربر این بهینه سازی را به صورت دستی انجام می دهد. مرتب سازی توزیع شده یکی از علل اصلی کاهش عملکرد در هنگام اجرای نقشه ساده است-کاهش وظایف. -Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface. +اکثر پیاده سازی نگاشتکاهش به شما اجازه اجرای کد دلخواه در یک خوشه. اما یک زبان پرس و جو اعلانی بهتر است به تاباندن لیزر به اجرا تجربه به سرعت مناسب است. مثلا, هادوپ است کندو و خوک. همچنین در نظر ابردرا ایمپالا یا کوسه (منسوخ شده) برای جرقه, و همچنین شمع جرقه, تند, و مته درد مقعد. عملکرد هنگامی که در حال اجرا از جمله وظایف بسیار زیر بهینه در مقایسه با سیستم های تخصصی, اما زمان تاخیر نسبتا بالا باعث می شود غیر واقعی برای استفاده از این سیستم به عنوان باطن برای یک رابط وب. -## What If I Have a Problem with Encodings When Using Oracle Through ODBC? {#oracle-odbc-encodings} +## اگر من یک مشکل با کدگذاریها در هنگام استفاده از اوراکل از طریق ان بی سی دارند? {#oracle-odbc-encodings} -If you use Oracle through the ODBC driver as a source of external dictionaries, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). +اگر شما استفاده از اوراکل از طریق راننده او بی سی به عنوان یک منبع از لغت نامه های خارجی, شما نیاز به تنظیم مقدار صحیح برای `NLS_LANG` متغیر محیطی در `/etc/default/clickhouse`. برای کسب اطلاعات بیشتر, دیدن [اوراکل nls\_lang پرسش و پاسخ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). -**Example** +**مثال** ``` sql NLS_LANG=RUSSIAN_RUSSIA.UTF8 ``` -## How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file} +## چگونه می توانم صادرات داده ها از خانه رعیتی به یک فایل? {#how-to-export-to-file} -### Using INTO OUTFILE Clause {#using-into-outfile-clause} +### با استفاده از به outfile بند {#using-into-outfile-clause} -Add an [INTO OUTFILE](../query_language/select/#into-outfile-clause) clause to your query. +افزودن یک [INTO OUTFILE](../query_language/select/#into-outfile-clause) بند به درخواست شما. -For example: +به عنوان مثال: ``` sql SELECT * FROM table INTO OUTFILE 'file' ``` -By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../query_language/select/#format-clause). +به طور پیش فرض, تاتر با استفاده از [جدول دار](../interfaces/formats.md#tabseparated) فرمت برای داده های خروجی. برای انتخاب [قالب داده](../interfaces/formats.md), استفاده از [بند فرمت](../query_language/select/#format-clause). -For example: +به عنوان مثال: ``` sql SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV ``` -### Using a File-Engine Table {#using-a-file-engine-table} +### با استفاده از جدول فایل موتور {#using-a-file-engine-table} -See [File](../operations/table_engines/file.md). +ببینید [پرونده](../engines/table_engines/special/file.md). -### Using Command-Line Redirection {#using-command-line-redirection} +### با استفاده از تغییر مسیر خط فرمان {#using-command-line-redirection} ``` sql $ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt ``` -See [clickhouse-client](../interfaces/cli.md). +ببینید [کلیک مشتری](../interfaces/cli.md). -{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##} +{## [مقاله اصلی](https://clickhouse.tech/docs/en/faq/general/) ##} diff --git a/docs/fa/faq/index.md b/docs/fa/faq/index.md new file mode 100644 index 00000000000..d0338c572e4 --- /dev/null +++ b/docs/fa/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/fa/getting_started/example_datasets/amplab_benchmark.md b/docs/fa/getting_started/example_datasets/amplab_benchmark.md index f5c8ff2e9d0..8ad22853e2d 100644 --- a/docs/fa/getting_started/example_datasets/amplab_benchmark.md +++ b/docs/fa/getting_started/example_datasets/amplab_benchmark.md @@ -1,33 +1,34 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 17 +toc_title: "\u0645\u0639\u06CC\u0627\u0631 \u0628\u0632\u0631\u06AF \u062F\u0627\u062F\ + \u0647 \u0647\u0627\u06CC \u062A\u0642\u0648\u06CC\u062A \u06A9\u0646\u0646\u062F\ + \u0647" +--- -# بنچمارک AMPLab Big Data {#bnchmrkh-amplab-big-data} +# معیار بزرگ داده های تقویت کننده {#amplab-big-data-benchmark} ببینید https://amplab.cs.berkeley.edu/benchmark/ -با یک اکانت مجانی در https://aws.amazon.com ثبت نام کنید. شما نیاز به ایمیل، شماره تلفن و credit card دارید. یک Access key جدید از https://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential دریافت کنید. +ثبت نام برای یک حساب کاربری رایگان در https://aws.amazon.com. این نیاز به یک کارت اعتباری, پست الکترونیک, و شماره تلفن. یک کلید دسترسی جدید در https://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential -در کنسول این دستورات را وارد کنید: - -
    +زیر را در کنسول اجرا کنید: ``` bash -sudo apt-get install s3cmd -mkdir tiny; cd tiny; -s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . -cd .. -mkdir 1node; cd 1node; -s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . -cd .. -mkdir 5nodes; cd 5nodes; -s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . -cd .. +$ sudo apt-get install s3cmd +$ mkdir tiny; cd tiny; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . +$ cd .. +$ mkdir 1node; cd 1node; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . +$ cd .. +$ mkdir 5nodes; cd 5nodes; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . +$ cd .. ``` -
    - -این query های ClickHouse را اجرا کنید: - -
    +اجرای نمایش داده شد زیر کلیک: ``` sql CREATE TABLE rankings_tiny @@ -91,26 +92,18 @@ CREATE TABLE uservisits_5nodes_on_single ) ENGINE = MergeTree(visitDate, visitDate, 8192); ``` -
    - -به کنسول برگردید و دستورات زیر را مجددا اجرا کنید: - -
    +بازگشت به کنسول: ``` bash -for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done -for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done -for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done -for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done -for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done -for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done +$ for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done +$ for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done +$ for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done +$ for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done +$ for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done +$ for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done ``` -
    - -query های گرفتن data sample - -
    +نمایش داده شد برای اخذ نمونه داده ها: ``` sql SELECT pageURL, pageRank FROM rankings_1node WHERE pageRank > 1000 @@ -135,4 +128,4 @@ ORDER BY totalRevenue DESC LIMIT 1 ``` -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/amplab_benchmark/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/fa/getting_started/example_datasets/criteo.md b/docs/fa/getting_started/example_datasets/criteo.md index de5046e1c76..7602f3a5577 100644 --- a/docs/fa/getting_started/example_datasets/criteo.md +++ b/docs/fa/getting_started/example_datasets/criteo.md @@ -1,32 +1,29 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 19 +toc_title: "\u062A\u0631\u0627\u0628\u0627\u06CC\u062A \u06A9\u0644\u06CC\u06A9 \u0633\ + \u06CC\u0627\u0647\u0647\u0647\u0627\u06CC \u0645\u0631\u0628\u0648\u0637 \u0627\ + \u0632 \u0645\u062E\u0644\u0648\u0642" +--- -# ترابایت از لاگ های کلیک از سرویس Criteo {#trbyt-z-lg-hy-khlykh-z-srwys-criteo} +# ترابایت کلیک سیاهههای مربوط از مخلوق {#terabyte-of-click-logs-from-criteo} -داده ها را از http://labs.criteo.com/downloads/download-terabyte-click-logs/ دانلود کنید. +دانلود داده ها از http://labs.criteo.com/downloads/download-terabyte-click-logs/ -جدول را برای import لاگ ها ایجاد کنید: - -
    +ایجاد یک جدول برای وارد کردن ورود به سیستم: ``` sql CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log ``` -
    - داده ها را دانلود کنید: -
    - ``` bash -for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done +$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done ``` -
    - -یک جدول برای داده های تبدیل شده ایجاد کنید: - -
    +ایجاد یک جدول برای داده های تبدیل شده: ``` sql CREATE TABLE criteo @@ -75,11 +72,7 @@ CREATE TABLE criteo ) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192) ``` -
    - -داده ها را از لاگ raw انتقال و به جدول دوم وارد کنید: - -
    +داده ها را از ورود خام تغییر دهید و در جدول دوم قرار دهید: ``` sql INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; @@ -87,4 +80,4 @@ INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int DROP TABLE criteo_log; ``` -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/criteo/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/fa/getting_started/example_datasets/index.md b/docs/fa/getting_started/example_datasets/index.md index a07ff8b0010..19612cb26b7 100644 --- a/docs/fa/getting_started/example_datasets/index.md +++ b/docs/fa/getting_started/example_datasets/index.md @@ -1,18 +1,22 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Example Datasets +toc_priority: 12 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" --- -# Example Datasets +# به عنوان مثال مجموعه داده {#example-datasets} -This section describes how to obtain example datasets and import them into ClickHouse. -For some datasets example queries are also available. +در این بخش چگونگی اخذ مجموعه داده ها به عنوان مثال و وارد کردن را به کلیک کنید. +برای برخی از نمونه های داده نمایش داده شد نمایش داده شد نیز در دسترس هستند. -* [Anonymized Yandex.Metrica Dataset](metrica.md) -* [Star Schema Benchmark](star_schema.md) -* [WikiStat](wikistat.md) -* [Terabyte of Click Logs from Criteo](criteo.md) -* [AMPLab Big Data Benchmark](amplab_benchmark.md) -* [New York Taxi Data](nyc_taxi.md) -* [OnTime](ontime.md) +- [ناشناس یاندکس.مجموعه داده های متریکا](metrica.md) +- [معیار طرحواره ستاره](star_schema.md) +- [ویکیستات](wikistat.md) +- [ترابایت کلیک سیاهههای مربوط از مخلوق](criteo.md) +- [معیار بزرگ داده های تقویت کننده](amplab_benchmark.md) +- [داده های تاکسی نیویورک](nyc_taxi.md) +- [به موقع](ontime.md) -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/fa/getting_started/example_datasets/metrica.md b/docs/fa/getting_started/example_datasets/metrica.md index 1081001e3b8..5427a63259e 100644 --- a/docs/fa/getting_started/example_datasets/metrica.md +++ b/docs/fa/getting_started/example_datasets/metrica.md @@ -1,10 +1,18 @@ -# ناشناس یاندکس.اطلاعات متریکا {#nshns-yndkhs-tl-t-mtrykh} +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 21 +toc_title: "\u06CC\u0627\u0646\u062F\u06A9\u0633\u0627\u0637\u0644\u0627\u0639\u0627\ + \u062A \u0645\u062A\u0631\u06CC\u06A9\u0627" +--- + +# ناشناس یاندکس.اطلاعات متریکا {#anonymized-yandex-metrica-data} مجموعه داده شامل دو جدول حاوی داده های ناشناس در مورد بازدید (`hits_v1`) و بازدیدکننده داشته است (`visits_v1`) یاندکس . متریکا شما می توانید اطلاعات بیشتر در مورد یاندکس به عنوان خوانده شده.متریکا در [تاریخچه کلیک](../../introduction/history.md) بخش. -مجموعه داده ها شامل دو جدول است که هر کدام می توانند به عنوان یک فشرده دانلود شوند `tsv.xz` فایل و یا به عنوان پارتیشن تهیه شده است. علاوه بر این, یک نسخه طولانی از `hits` جدول حاوی 100 میلیون ردیف به عنوان تسو در دسترس است https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz و به عنوان پارتیشن تهیه شده در https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz. +مجموعه داده ها شامل دو جدول است که هر کدام می توانند به عنوان یک فشرده دانلود شوند `tsv.xz` فایل و یا به عنوان پارتیشن تهیه شده است. علاوه بر این, یک نسخه طولانی از `hits` جدول حاوی 100 میلیون ردیف به عنوان تسو در دسترس است https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz و به عنوان پارتیشن تهیه شده در https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. -## اخذ جداول از پارتیشن های تهیه شده {#khdh-jdwl-z-prtyshn-hy-thyh-shdh} +## اخذ جداول از پارتیشن های تهیه شده {#obtaining-tables-from-prepared-partitions} دانلود و وارد کردن جدول بازدید: @@ -26,7 +34,7 @@ sudo service clickhouse-server restart clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## اخذ جداول از فایل تسو فشرده {#khdh-jdwl-z-fyl-tsw-fshrdh} +## اخذ جداول از فایل تسو فشرده {#obtaining-tables-from-compressed-tsv-file} دانلود و وارد کردن بازدید از فایل تسو فشرده: @@ -56,7 +64,7 @@ clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## به عنوان مثال نمایش داده شد {#bh-nwn-mthl-nmysh-ddh-shd} +## به عنوان مثال نمایش داده شد {#example-queries} [اموزش کلیک](../../getting_started/tutorial.md) است در یاندکس بر اساس.مجموعه داده های متریکا و راه توصیه شده برای شروع این مجموعه داده ها فقط از طریق تدریس خصوصی است. diff --git a/docs/fa/getting_started/example_datasets/nyc_taxi.md b/docs/fa/getting_started/example_datasets/nyc_taxi.md index 282b52ebcdb..fc3ed92dfa9 100644 --- a/docs/fa/getting_started/example_datasets/nyc_taxi.md +++ b/docs/fa/getting_started/example_datasets/nyc_taxi.md @@ -1,16 +1,26 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 16 +toc_title: "\u062F\u0627\u062F\u0647 \u0647\u0627\u06CC \u062A\u0627\u06A9\u0633\u06CC\ + \ \u0646\u06CC\u0648\u06CC\u0648\u0631\u06A9" +--- -# داده های تاکسی New York {#ddh-hy-tkhsy-new-york} +# داده های تاکسی نیویورک {#new-york-taxi-data} -## چطور داده های raw را import کنیم {#chtwr-ddh-hy-raw-r-import-khnym} +این مجموعه داده را می توان به دو روش دریافت کرد: -برای توضیحات بیشتر در ارتباط با دیتاست و موارد مربوط به دانلود به دو لینک https://github.com/toddwschneider/nyc-taxi-data و http://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html مراجعه کنید. +- واردات از دادههای خام +- دانلود پارتیشن های تهیه شده -دانلود فایل ها حدود 277 گیگابایت داده ی غیرفشرده در قالب فایل های CSV می باشد. دانلود با استفاده ازبیش از یک کانکشن 1 Gbit نزدیک 1 ساعت طول می کشد (دانلود موازی از s3.amazonaws.com حداقل نصف کانال 1 Gbit رو جبران می کند). بعضی از فایل ها ممکن است به طول کامل دانلود نشوند. اندازه فایل ها را بررسی کنید و اگر فایلی مشکوک بود، مجددا دانلود کنید. +## نحوه وارد کردن داده های خام {#how-to-import-the-raw-data} -بعضی از فایل ها ممکن است دارای سطرهای نامعتبر باشه. با اجرای دستورات زیر این موارد برطرف می شود: +ببینید https://github.com/toddwschneider/nyc-taxi-data و http://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html برای شرح یک مجموعه داده ها و دستورالعمل ها برای دانلود. -
    +دانلود در مورد منجر خواهد شد 227 گیگابایت از داده های غیر فشرده در فایل های سی سی وی. دانلود حدود یک ساعت طول می کشد بیش از یک اتصال 1 گیگابیت (دانلود موازی از s3.amazonaws.com بازیابی حداقل نیمی از یک 1 گیگابیت کانال). +برخی از فایل ها ممکن است به طور کامل دانلود کنید. بررسی اندازه فایل و دوباره دانلود هر که به نظر می رسد تردید. + +برخی از فایل ها ممکن است حاوی ردیف نامعتبر است. شما می توانید به صورت زیر تعمیر کنید: ``` bash sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-02.csv > data/yellow_tripdata_2010-02.csv_ @@ -19,34 +29,28 @@ mv data/yellow_tripdata_2010-02.csv_ data/yellow_tripdata_2010-02.csv mv data/yellow_tripdata_2010-03.csv_ data/yellow_tripdata_2010-03.csv ``` -
    +سپس داده ها باید قبل از پردازش در شرایط لازم. این انتخاب از نقاط در چند ضلعی ایجاد (برای مطابقت با نقاط بر روی نقشه با بخش نیویورک از شهر نیویورک) و ترکیب تمام داده ها را به یک جدول تخت جریمه تنها با استفاده از یک ملحق. برای انجام این کار, شما نیاز به نصب postgresql با postgis پشتیبانی می کند. -سپس داده ها باید در PostgreSQL پیش پردازش شوند. این کار نقاط انتخابی چند ضلعی را ایجاد می کند (برای مطابقت با نقاط بر روی نقشه با مناطق شهر نیویورک) و تمام داده ها را با استفاده از JOIN در یک جدول flat و denormal ترکیب می کند. برای این کار شما نیاز به نصب PostgreSQL با پشتیبانی از PostGIS دارید. +مراقب باشید در هنگام اجرا `initialize_database.sh` و به صورت دستی دوباره بررسی کنید که تمام جداول به درستی ایجاد شد. -در هنگام اجرای `initialize_database.sh` مراقب باشید و به صورت دستی مجددا تمام جداول را چک کنید. +این در مورد طول می کشد 20-30 دقیقه برای پردازش ارزش هر ماه از داده ها در شرایط لازم, در مجموع در مورد 48 ساعت ها. -PostgreSQL تقریبا 20 تا 30 دقیقه برای پردازش هر ماه زمان نیاز میگیرد، در مجموع حدود 48 ساعت این عملیات طول می کشد. +شما می توانید تعداد ردیف های دانلود شده را به صورت زیر بررسی کنید: -از طریق دستور زیر شما می توانید تعداد سطرهای دانلود شده را دریافت کنید: +``` bash +$ time psql nyc-taxi-data -c "SELECT count(*) FROM trips;" +## Count + 1298979494 +(1 row) -
    +real 7m9.164s +``` - time psql nyc-taxi-data -c "SELECT count(*) FROM trips;" - ## count - 1298979494 - (1 row) +(این کمی بیش از 1.1 میلیارد ردیف گزارش شده توسط علامت گذاری به عنوان لیتوینچیک در یک سری از پست های وبلاگ.) - real 7m9.164s +اطلاعات در مورد اتصالات از 370 گیگابایت فضا استفاده می کند. -
    - -(در یکی از پست های مقالات Mark Litwintschik این کمی بیشتر از 1.1 میلیارد سطر گزارش شده است.) - -حجم داده ها در PostgreSQL 370 گیگابایت می باشد. - -Export گیری داده ها از PostgreSQL: - -
    +در حال بارگذاری: ``` sql COPY @@ -118,13 +122,10 @@ COPY ) TO '/opt/milovidov/nyc-taxi-data/trips.tsv'; ``` -
    +عکس فوری داده ها با سرعت حدود 50 مگابایت در ثانیه ایجاد می شود. در حالی که ایجاد عکس فوری, شل می خواند از دیسک با سرعت حدود 28 مگابایت در ثانیه. +این طول می کشد حدود 5 ساعت ها. فایل حاصل تسو 590612904969 بایت است. -snapshot از داده ها با سرعت 50 مگابایت در ثانیه انجام می شود. در هنگام ایجاد snapshot، PostgreSQL داده ها را با سرعت 28 مگابایت در ثانیه از روی می خواند. این کار حدود 5 ساعت زمان میبرد. نتیجه کار فایل TSV با حجم 590612904969 بایت می باشد. - -ساخت جدول temporary در ClickHouse: - -
    +ایجاد یک جدول موقت در کلیکهاوس: ``` sql CREATE TABLE trips @@ -183,115 +184,119 @@ dropoff_puma Nullable(String) ) ENGINE = Log; ``` -
    +برای تبدیل زمینه ها به انواع داده های صحیح تر و در صورت امکان برای از بین بردن نقاط صفر مورد نیاز است. -برای تبدیل فیلد ها به data type های صحیح تر و در صورت امکان، حذف NULL ها لازم است. +``` bash +$ time clickhouse-client --query="INSERT INTO trips FORMAT TabSeparated" < trips.tsv -
    +real 75m56.214s +``` - time clickhouse-client --query="INSERT INTO trips FORMAT TabSeparated" < trips.tsv +داده ها با سرعت 112-140 مگابایت در ثانیه خوانده می شوند. +بارگذاری داده ها را به یک جدول نوع ورود به سیستم در یک جریان و جو در زمان 76 دقیقه. +داده ها در این جدول با استفاده از 142 گیگابایت. - real 75m56.214s +(وارد کردن داده ها به طور مستقیم از پستگرس نیز ممکن است با استفاده از `COPY ... TO PROGRAM`.) -
    +Unfortunately, all the fields associated with the weather (precipitation…average\_wind\_speed) were filled with NULL. Because of this, we will remove them from the final data set. -داده ها با سرعت 112 تا 140 مگابیت در ثانیه خوانده می شوند. load کردن داده ها در جدول Log Type در یک Stream، 76 دقیقه زمان کشید. این داده ها در این جدول 142 گیگابایت فضا اشغال می کنند. +برای شروع, ما یک جدول بر روی یک سرور ایجاد. بعد ما را به جدول توزیع. -(import کردن داده ها به صورت مستقیم از Postgres با استفاده از `COPY ... TO PROGRAM` هم امکان پذیر است.) +ایجاد و پر کردن یک جدول خلاصه: -متاسفانه، تمام فیلد های مرتبط با آب و هوا (precipitation…average\_wind\_speed) با Null پر شدند. به خاطر همین، ما از دیتاست نهایی اینها رو حذف کردیم. +``` sql +CREATE TABLE trips_mergetree +ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +AS SELECT -برای شروع، ما یک جدول در یک سرور ایجاد کردیم. بعدا ما یک جدول توزیع شده می سازیم. +trip_id, +CAST(vendor_id AS Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14)) AS vendor_id, +toDate(pickup_datetime) AS pickup_date, +ifNull(pickup_datetime, toDateTime(0)) AS pickup_datetime, +toDate(dropoff_datetime) AS dropoff_date, +ifNull(dropoff_datetime, toDateTime(0)) AS dropoff_datetime, +assumeNotNull(store_and_fwd_flag) IN ('Y', '1', '2') AS store_and_fwd_flag, +assumeNotNull(rate_code_id) AS rate_code_id, +assumeNotNull(pickup_longitude) AS pickup_longitude, +assumeNotNull(pickup_latitude) AS pickup_latitude, +assumeNotNull(dropoff_longitude) AS dropoff_longitude, +assumeNotNull(dropoff_latitude) AS dropoff_latitude, +assumeNotNull(passenger_count) AS passenger_count, +assumeNotNull(trip_distance) AS trip_distance, +assumeNotNull(fare_amount) AS fare_amount, +assumeNotNull(extra) AS extra, +assumeNotNull(mta_tax) AS mta_tax, +assumeNotNull(tip_amount) AS tip_amount, +assumeNotNull(tolls_amount) AS tolls_amount, +assumeNotNull(ehail_fee) AS ehail_fee, +assumeNotNull(improvement_surcharge) AS improvement_surcharge, +assumeNotNull(total_amount) AS total_amount, +CAST((assumeNotNull(payment_type) AS pt) IN ('CSH', 'CASH', 'Cash', 'CAS', 'Cas', '1') ? 'CSH' : (pt IN ('CRD', 'Credit', 'Cre', 'CRE', 'CREDIT', '2') ? 'CRE' : (pt IN ('NOC', 'No Charge', 'No', '3') ? 'NOC' : (pt IN ('DIS', 'Dispute', 'Dis', '4') ? 'DIS' : 'UNK'))) AS Enum8('CSH' = 1, 'CRE' = 2, 'UNK' = 0, 'NOC' = 3, 'DIS' = 4)) AS payment_type_, +assumeNotNull(trip_type) AS trip_type, +ifNull(toFixedString(unhex(pickup), 25), toFixedString('', 25)) AS pickup, +ifNull(toFixedString(unhex(dropoff), 25), toFixedString('', 25)) AS dropoff, +CAST(assumeNotNull(cab_type) AS Enum8('yellow' = 1, 'green' = 2, 'uber' = 3)) AS cab_type, -یک جدول خلاصه ایجاد و پر کنید: +assumeNotNull(pickup_nyct2010_gid) AS pickup_nyct2010_gid, +toFloat32(ifNull(pickup_ctlabel, '0')) AS pickup_ctlabel, +assumeNotNull(pickup_borocode) AS pickup_borocode, +CAST(assumeNotNull(pickup_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS pickup_boroname, +toFixedString(ifNull(pickup_ct2010, '000000'), 6) AS pickup_ct2010, +toFixedString(ifNull(pickup_boroct2010, '0000000'), 7) AS pickup_boroct2010, +CAST(assumeNotNull(ifNull(pickup_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS pickup_cdeligibil, +toFixedString(ifNull(pickup_ntacode, '0000'), 4) AS pickup_ntacode, -
    +CAST(assumeNotNull(pickup_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS pickup_ntaname, - CREATE TABLE trips_mergetree - ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) - AS SELECT +toUInt16(ifNull(pickup_puma, '0')) AS pickup_puma, - trip_id, - CAST(vendor_id AS Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14)) AS vendor_id, - toDate(pickup_datetime) AS pickup_date, - ifNull(pickup_datetime, toDateTime(0)) AS pickup_datetime, - toDate(dropoff_datetime) AS dropoff_date, - ifNull(dropoff_datetime, toDateTime(0)) AS dropoff_datetime, - assumeNotNull(store_and_fwd_flag) IN ('Y', '1', '2') AS store_and_fwd_flag, - assumeNotNull(rate_code_id) AS rate_code_id, - assumeNotNull(pickup_longitude) AS pickup_longitude, - assumeNotNull(pickup_latitude) AS pickup_latitude, - assumeNotNull(dropoff_longitude) AS dropoff_longitude, - assumeNotNull(dropoff_latitude) AS dropoff_latitude, - assumeNotNull(passenger_count) AS passenger_count, - assumeNotNull(trip_distance) AS trip_distance, - assumeNotNull(fare_amount) AS fare_amount, - assumeNotNull(extra) AS extra, - assumeNotNull(mta_tax) AS mta_tax, - assumeNotNull(tip_amount) AS tip_amount, - assumeNotNull(tolls_amount) AS tolls_amount, - assumeNotNull(ehail_fee) AS ehail_fee, - assumeNotNull(improvement_surcharge) AS improvement_surcharge, - assumeNotNull(total_amount) AS total_amount, - CAST((assumeNotNull(payment_type) AS pt) IN ('CSH', 'CASH', 'Cash', 'CAS', 'Cas', '1') ? 'CSH' : (pt IN ('CRD', 'Credit', 'Cre', 'CRE', 'CREDIT', '2') ? 'CRE' : (pt IN ('NOC', 'No Charge', 'No', '3') ? 'NOC' : (pt IN ('DIS', 'Dispute', 'Dis', '4') ? 'DIS' : 'UNK'))) AS Enum8('CSH' = 1, 'CRE' = 2, 'UNK' = 0, 'NOC' = 3, 'DIS' = 4)) AS payment_type_, - assumeNotNull(trip_type) AS trip_type, - ifNull(toFixedString(unhex(pickup), 25), toFixedString('', 25)) AS pickup, - ifNull(toFixedString(unhex(dropoff), 25), toFixedString('', 25)) AS dropoff, - CAST(assumeNotNull(cab_type) AS Enum8('yellow' = 1, 'green' = 2, 'uber' = 3)) AS cab_type, +assumeNotNull(dropoff_nyct2010_gid) AS dropoff_nyct2010_gid, +toFloat32(ifNull(dropoff_ctlabel, '0')) AS dropoff_ctlabel, +assumeNotNull(dropoff_borocode) AS dropoff_borocode, +CAST(assumeNotNull(dropoff_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS dropoff_boroname, +toFixedString(ifNull(dropoff_ct2010, '000000'), 6) AS dropoff_ct2010, +toFixedString(ifNull(dropoff_boroct2010, '0000000'), 7) AS dropoff_boroct2010, +CAST(assumeNotNull(ifNull(dropoff_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS dropoff_cdeligibil, +toFixedString(ifNull(dropoff_ntacode, '0000'), 4) AS dropoff_ntacode, - assumeNotNull(pickup_nyct2010_gid) AS pickup_nyct2010_gid, - toFloat32(ifNull(pickup_ctlabel, '0')) AS pickup_ctlabel, - assumeNotNull(pickup_borocode) AS pickup_borocode, - CAST(assumeNotNull(pickup_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS pickup_boroname, - toFixedString(ifNull(pickup_ct2010, '000000'), 6) AS pickup_ct2010, - toFixedString(ifNull(pickup_boroct2010, '0000000'), 7) AS pickup_boroct2010, - CAST(assumeNotNull(ifNull(pickup_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS pickup_cdeligibil, - toFixedString(ifNull(pickup_ntacode, '0000'), 4) AS pickup_ntacode, +CAST(assumeNotNull(dropoff_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS dropoff_ntaname, - CAST(assumeNotNull(pickup_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS pickup_ntaname, +toUInt16(ifNull(dropoff_puma, '0')) AS dropoff_puma - toUInt16(ifNull(pickup_puma, '0')) AS pickup_puma, +FROM trips +``` - assumeNotNull(dropoff_nyct2010_gid) AS dropoff_nyct2010_gid, - toFloat32(ifNull(dropoff_ctlabel, '0')) AS dropoff_ctlabel, - assumeNotNull(dropoff_borocode) AS dropoff_borocode, - CAST(assumeNotNull(dropoff_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS dropoff_boroname, - toFixedString(ifNull(dropoff_ct2010, '000000'), 6) AS dropoff_ct2010, - toFixedString(ifNull(dropoff_boroct2010, '0000000'), 7) AS dropoff_boroct2010, - CAST(assumeNotNull(ifNull(dropoff_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS dropoff_cdeligibil, - toFixedString(ifNull(dropoff_ntacode, '0000'), 4) AS dropoff_ntacode, +این طول می کشد 3030 ثانیه در سرعت حدود 428,000 ردیف در هر ثانیه. +برای بارگذاری سریع تر می توانید جدول را با `Log` موتور به جای `MergeTree`. در این مورد, دانلود کار می کند سریع تر از 200 ثانیه. - CAST(assumeNotNull(dropoff_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS dropoff_ntaname, +جدول با استفاده از 126 گیگابایت فضای دیسک. - toUInt16(ifNull(dropoff_puma, '0')) AS dropoff_puma +``` sql +SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mergetree' AND active +``` - FROM trips +``` text +┌─formatReadableSize(sum(bytes))─┐ +│ 126.18 GiB │ +└────────────────────────────────┘ +``` -
    +در میان چیزهای دیگر, شما می توانید پرس و جو بهینه سازی در ادغام اجرا. اما لازم نیست که همه چیز بدون این خوب باشد. -این کار با سرعت 428 هزار رکورد در ثانیه و 3030 ثانیه طول خواهد کشید. برای load سریعتر، شما می توانید یک جدول با موتور `Log` به جای `MergeTree` بسازید. در این مورد، دانلود سریعتر از 200 ثانیه کار می کند. +## دانلود پارتیشن های تهیه شده {#download-of-prepared-partitions} -این جدول 126 گیابایت فضا بر روی دیسک اشغال می کند. +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/trips_mergetree/partitions/trips_mergetree.tar +$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.trips_mergetree" +``` -
    +!!! info "اطلاعات" + اگر شما نمایش داده شد شرح داده شده در زیر اجرا خواهد شد, شما مجبور به استفاده از نام جدول کامل, `datasets.trips_mergetree`. - :) SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mergetree' AND active - - SELECT formatReadableSize(sum(bytes)) - FROM system.parts - WHERE (table = 'trips_mergetree') AND active - - ┌─formatReadableSize(sum(bytes))─┐ - │ 126.18 GiB │ - └────────────────────────────────┘ - -
    - -در میان چیزهای دیگر، شما می تونید از دستور OPTIMIZE بر روی MergeTree استفاده کنید. اما از آنجایی که بدون این دستور همه چیز خوب است، اجرای این دستور ضروری نیست.. - -## نتایج بر روی یک سرور {#ntyj-br-rwy-ykh-srwr} - -
    +## نتایج بر روی سرور تک {#results-on-single-server} Q1: @@ -299,7 +304,7 @@ Q1: SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type ``` -0.490 seconds. +0.490 ثانیه است. Q2: @@ -307,7 +312,7 @@ Q2: SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count ``` -1.224 seconds. +1.224 ثانیه Q3: @@ -315,7 +320,7 @@ Q3: SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year ``` -2.104 seconds. +2.104 ثانیه است. Q4: @@ -326,76 +331,61 @@ GROUP BY passenger_count, year, distance ORDER BY year, count(*) DESC ``` -3.593 seconds. +3.593 ثانیه است. -
    +سرور زیر مورد استفاده قرار گرفت: -کانفیگ سرور به این صورت بود: +دو اینتل(ر) یون (ر) پردازنده ای5-2650 ولت2 @ 2.60 گیگاهرتز, 16 هسته فیزیکی کل, 128 دستگاه گوارش رم, 8ایکس6 سل اچ دی در حمله سخت افزار-5 -Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical kernels total, -128 GiB RAM, -8x6 TB HD on hardware RAID-5 +زمان اجرای بهترین از سه اجرا می شود. اما با شروع از اجرا دوم نمایش داده شد خواندن داده ها از حافظه پنهان سیستم فایل. بدون ذخیره بیشتر رخ می دهد: داده ها به عنوان خوانده شده و پردازش در هر اجرا. -زمان اجرا query ها از زمان دومین اجرا بهتر می شود، چون query ها داده ها رو از فایل system cache می خوانند. در ادامه cache دیگری رخ نمیدهد: داده ها در هر اجرا خوانده و پردازش شده اند. +ایجاد یک جدول در سه سرور: -ساخت جداول در در سه سرور: +در هر سرور: -در هر سرور دستور زیر را اجرا کنید: +``` sql +CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +``` -
    - - CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) - -
    - -بر روی سرور source دستور زیر را وارد کنید: - -
    +در سرور منبع: ``` sql CREATE TABLE trips_mergetree_x3 AS trips_mergetree_third ENGINE = Distributed(perftest, default, trips_mergetree_third, rand()) ``` -
    - -query زیر دادها را توزیع مجدد می کند: - -
    +پرس و جو زیر توزیع داده ها: ``` sql INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree ``` -
    - -این query 2454 ثانیه زمان میبرد. +این طول می کشد 2454 ثانیه. در سه سرور: -Q1: 0.212 ثانیه. -Q2: 0.438 ثانیه. -Q3: 0.733 ثانیه. -Q4: 1.241 ثانیه. +Q1: 0.212 ثانیه است. +Q2: 0.438 ثانیه است. +Q3: 0.733 ثانیه است. +Q4: 1.241 ثانیه است. -از آنجایی که query ها به صورت خطی scale شده اند، از نتایج به دست آمده شگفتی وجود ندارد. +هیچ شگفتی در اینجا, از نمایش داده شد خطی کوچک. -ما همچنین نتایج زیر را از اجرای این query در کلاستر 140 سرور دریافت کردیم: +ما همچنین نتایج حاصل از یک خوشه از 140 سرور: -Q1: 0.028 ثانیه. -Q2: 0.043 ثانیه. -Q3: 0.051 ثانیه. -Q4: 0.072 ثانیه. +Q1: 0.028 sec. +Q2: 0.043 sec. +Q3: 0.051 sec. +Q4: 0.072 sec. -در این مورد، زمان پردازش query براساس latency شبکه مشخص می شود. ما این query ها را با استفاده از یک مشتری واقع در دیتاسنتر Yandex در فنلاند در یک کلاستر روسیه دریافت کردیم، که latency آن حدود 20 میلی ثانیه به نتایج اضافه کرد. +در این مورد زمان پردازش پرس و جو بالاتر از همه تاخیر شبکه تعیین می شود. +ما نمایش داده شد با استفاده از یک مشتری واقع در یک مرکز داده یاندکس در فنلاند در یک خوشه در روسیه فرار, که اضافه شده در مورد 20 خانم از تاخیر. -## نتایج {#ntyj} +## خلاصه {#summary} -| نودها | Q1 | Q2 | Q3 | Q4 | -|-------|-------|-------|-------|-------| -| 1 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3 | 0.212 | 0.438 | 0.733 | 1.241 | -| 140 | 0.028 | 0.043 | 0.051 | 0.072 | +| کارگزارها | Q1 | Q2 | Q3 | Q4 | +|-----------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/nyc_taxi/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/fa/getting_started/example_datasets/ontime.md b/docs/fa/getting_started/example_datasets/ontime.md index 9f8387316ce..5864b3fe8e6 100644 --- a/docs/fa/getting_started/example_datasets/ontime.md +++ b/docs/fa/getting_started/example_datasets/ontime.md @@ -1,10 +1,20 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 15 +toc_title: "\u0628\u0647 \u0645\u0648\u0642\u0639" +--- -# OnTime {#ontime} +# به موقع {#ontime} -دانلود داده ها: +این مجموعه داده را می توان به دو روش دریافت کرد: -
    +- واردات از دادههای خام +- دانلود پارتیشن های تهیه شده + +## واردات از دادههای خام {#import-from-raw-data} + +بارگیری داده ها: ``` bash for s in `seq 1987 2018` @@ -16,13 +26,9 @@ done done ``` -
    - (از https://github.com/Percona-Lab/ontime-airline-performance/blob/master/download.sh ) -ساخت جدول: - -
    +ایجاد یک جدول: ``` sql CREATE TABLE `ontime` ( @@ -141,73 +147,81 @@ ORDER BY (Carrier, FlightDate) SETTINGS index_granularity = 8192; ``` -
    - -Load داده ها: - -
    +بارگیری داده: ``` bash -for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done ``` -
    +## دانلود پارتیشن های تهیه شده {#download-of-prepared-partitions} -query ها: +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/ontime/partitions/ontime.tar +$ tar xvf ontime.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.ontime" +``` + +!!! info "اطلاعات" + اگر شما نمایش داده شد شرح داده شده در زیر اجرا خواهد شد, شما مجبور به استفاده از نام جدول کامل, `datasets.ontime`. + +## نمایش داده شد {#queries} Q0. -
    - ``` sql -select avg(c1) from (select Year, Month, count(*) as c1 from ontime group by Year, Month); +SELECT avg(c1) +FROM +( + SELECT Year, Month, count(*) AS c1 + FROM ontime + GROUP BY Year, Month +); ``` -
    - -Q1. تعداد پروازهای به تفکیک روز از تاریخ 2000 تا 2008 - -
    +Q1. تعداد پرواز در روز از سال 2000 تا 2008 ``` sql -SELECT DayOfWeek, count(*) AS c FROM ontime WHERE Year >= 2000 AND Year <= 2008 GROUP BY DayOfWeek ORDER BY c DESC; +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; ``` -
    - -Q2. تعداد پروازهای بیش از 10 دقیقه تاخیر خورده، گروه بندی براساس روزهای هفته از سال 2000 تا 2008 - -
    +Q2. تعداد پروازهای تاخیر بیش از 10 دقیقه, گروه بندی شده توسط روز هفته, برای 2000-2008 ``` sql -SELECT DayOfWeek, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year >= 2000 AND Year <= 2008 GROUP BY DayOfWeek ORDER BY c DESC +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; ``` -
    - -Q3. تعداد تاخیرها براساس airport از سال 2000 تا 2008 - -
    +پرسش 3 تعداد تاخیر در فرودگاه برای 2000-2008 ``` sql -SELECT Origin, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year >= 2000 AND Year <= 2008 GROUP BY Origin ORDER BY c DESC LIMIT 10 +SELECT Origin, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY Origin +ORDER BY c DESC +LIMIT 10; ``` -
    - -Q4. تعداد تاخیرها براساس carrier در سال 78 - -
    +پرسش4. تعداد تاخیر توسط حامل برای 2007 ``` sql -SELECT Carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year = 2007 GROUP BY Carrier ORDER BY count(*) DESC +SELECT Carrier, count(*) +FROM ontime +WHERE DepDelay>10 AND Year=2007 +GROUP BY Carrier +ORDER BY count(*) DESC; ``` -
    - -Q5. درصد تاخیر ها براساس carrier در سال 2007 - -
    +پرسش 5 درصد تاخیر توسط حامل برای 2007 ``` sql SELECT Carrier, c, c2, c*100/c2 as c3 @@ -233,21 +247,17 @@ JOIN ORDER BY c3 DESC; ``` -
    - -نسخه ی بهتر query - -
    +نسخه بهتر از پرس و جو همان: ``` sql -SELECT Carrier, avg(DepDelay > 10) * 100 AS c3 FROM ontime WHERE Year = 2007 GROUP BY Carrier ORDER BY c3 DESC +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year=2007 +GROUP BY Carrier +ORDER BY c3 DESC ``` -
    - -Q6. مانند query قبلی اما برای طیف وسیعی از سال های 2000 تا 2008 - -
    +س6 درخواست قبلی برای طیف وسیع تری از سال 2000-2008 ``` sql SELECT Carrier, c, c2, c*100/c2 as c3 @@ -258,7 +268,7 @@ FROM count(*) AS c FROM ontime WHERE DepDelay>10 - AND Year >= 2000 AND Year <= 2008 + AND Year>=2000 AND Year<=2008 GROUP BY Carrier ) JOIN @@ -267,27 +277,23 @@ JOIN Carrier, count(*) AS c2 FROM ontime - WHERE Year >= 2000 AND Year <= 2008 + WHERE Year>=2000 AND Year<=2008 GROUP BY Carrier ) USING Carrier ORDER BY c3 DESC; ``` -
    - -نسخه ی بهتر query - -
    +نسخه بهتر از پرس و جو همان: ``` sql -SELECT Carrier, avg(DepDelay > 10) * 100 AS c3 FROM ontime WHERE Year >= 2000 AND Year <= 2008 GROUP BY Carrier ORDER BY c3 DESC +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY Carrier +ORDER BY c3 DESC; ``` -
    - -Q7. درصد تاخیر بیش از 10 دقیقه پروازها به تفکیک سال - -
    +پرسش 7 درصد پرواز به تاخیر افتاد برای بیش از 10 دقیقه, به سال ``` sql SELECT Year, c1/c2 @@ -308,82 +314,93 @@ JOIN from ontime GROUP BY Year ) USING (Year) -ORDER BY Year +ORDER BY Year; ``` -
    - -نسخه ی بهتر query - -
    +نسخه بهتر از پرس و جو همان: ``` sql -SELECT Year, avg(DepDelay > 10)*100 FROM ontime GROUP BY Year ORDER BY Year +SELECT Year, avg(DepDelay>10)*100 +FROM ontime +GROUP BY Year +ORDER BY Year; ``` -
    - -Q8. مقصدهای پرطرفدار براساس تعداد اتصال های مستقیم شهرها برای سال 2000 تا 2010 - -
    +س8 محبوب ترین مقصد توسط تعدادی از شهرستانها به طور مستقیم متصل برای محدوده های مختلف سال ``` sql -SELECT DestCityName, uniqExact(OriginCityName) AS u FROM ontime WHERE Year >= 2000 and Year <= 2010 GROUP BY DestCityName ORDER BY u DESC LIMIT 10; +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +WHERE Year >= 2000 and Year <= 2010 +GROUP BY DestCityName +ORDER BY u DESC LIMIT 10; ``` -
    - Q9. -
    - ``` sql -select Year, count(*) as c1 from ontime group by Year; +SELECT Year, count(*) AS c1 +FROM ontime +GROUP BY Year; ``` -
    - Q10. -
    - ``` sql -select - min(Year), max(Year), Carrier, count(*) as cnt, - sum(ArrDelayMinutes>30) as flights_delayed, - round(sum(ArrDelayMinutes>30)/count(*),2) as rate +SELECT + min(Year), max(Year), Carrier, count(*) AS cnt, + sum(ArrDelayMinutes>30) AS flights_delayed, + round(sum(ArrDelayMinutes>30)/count(*),2) AS rate FROM ontime WHERE - DayOfWeek not in (6,7) and OriginState not in ('AK', 'HI', 'PR', 'VI') - and DestState not in ('AK', 'HI', 'PR', 'VI') - and FlightDate < '2010-01-01' + DayOfWeek NOT IN (6,7) AND OriginState NOT IN ('AK', 'HI', 'PR', 'VI') + AND DestState NOT IN ('AK', 'HI', 'PR', 'VI') + AND FlightDate < '2010-01-01' GROUP by Carrier -HAVING cnt > 100000 and max(Year) > 1990 +HAVING cnt>100000 and max(Year)>1990 ORDER by rate DESC LIMIT 1000; ``` -
    - -query های بیشتر: - -
    +پاداش: ``` sql -SELECT avg(cnt) FROM (SELECT Year,Month,count(*) AS cnt FROM ontime WHERE DepDel15=1 GROUP BY Year,Month) +SELECT avg(cnt) +FROM +( + SELECT Year,Month,count(*) AS cnt + FROM ontime + WHERE DepDel15=1 + GROUP BY Year,Month +); -select avg(c1) from (select Year,Month,count(*) as c1 from ontime group by Year,Month) +SELECT avg(c1) FROM +( + SELECT Year,Month,count(*) AS c1 + FROM ontime + GROUP BY Year,Month +); -SELECT DestCityName, uniqExact(OriginCityName) AS u FROM ontime GROUP BY DestCityName ORDER BY u DESC LIMIT 10; +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +GROUP BY DestCityName +ORDER BY u DESC +LIMIT 10; -SELECT OriginCityName, DestCityName, count() AS c FROM ontime GROUP BY OriginCityName, DestCityName ORDER BY c DESC LIMIT 10; +SELECT OriginCityName, DestCityName, count() AS c +FROM ontime +GROUP BY OriginCityName, DestCityName +ORDER BY c DESC +LIMIT 10; -SELECT OriginCityName, count() AS c FROM ontime GROUP BY OriginCityName ORDER BY c DESC LIMIT 10; +SELECT OriginCityName, count() AS c +FROM ontime +GROUP BY OriginCityName +ORDER BY c DESC +LIMIT 10; ``` -
    - -این تست های performance توسط Vadim Tkachenko انجام شده است. برای اطلاعات بیشتر به لینک های زیر مراجعه کنید: +این تست عملکرد توسط وادیم تکچنکو ایجاد شد. ببینید: - https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/ - https://www.percona.com/blog/2009/10/26/air-traffic-queries-in-luciddb/ @@ -392,6 +409,4 @@ SELECT OriginCityName, count() AS c FROM ontime GROUP BY OriginCityName ORDER BY - https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ - http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/ontime/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/ontime/) diff --git a/docs/fa/getting_started/example_datasets/star_schema.md b/docs/fa/getting_started/example_datasets/star_schema.md index 9b699e59f50..d00fd99e0ff 100644 --- a/docs/fa/getting_started/example_datasets/star_schema.md +++ b/docs/fa/getting_started/example_datasets/star_schema.md @@ -1,100 +1,371 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 20 +toc_title: "\u0645\u0639\u06CC\u0627\u0631 \u0637\u0631\u062D\u0648\u0627\u0631\u0647\ + \ \u0633\u062A\u0627\u0631\u0647" +--- -# بنچمارک Star Schema {#bnchmrkh-star-schema} +# معیار طرحواره ستاره {#star-schema-benchmark} -از لینک روبرو dbgen رو کامپایل کنید. https://github.com/vadimtk/ssb-dbgen - -
    +تدوین نرم افزار: ``` bash -git clone git@github.com:vadimtk/ssb-dbgen.git -cd ssb-dbgen -make +$ git clone git@github.com:vadimtk/ssb-dbgen.git +$ cd ssb-dbgen +$ make ``` -
    +تولید داده: -در هنگام پردازش چند warnings نمایش داده می شود که مشکلی نیست و طبیعی است. - -`dbgen` و `dists.dss` را در یک جا با 800 گیگابایت فضای حالی دیسک قرار دهید. - -تولید داده ها: - -
    +!!! warning "توجه" + با `-s 100` تولید نرم افزار 600 میلیون ردیف (67 گیگابایت), در حالی که `-s 1000` این تولید 6 میلیارد ردیف (که طول می کشد زمان زیادی) ``` bash -./dbgen -s 1000 -T c -./dbgen -s 1000 -T l +$ ./dbgen -s 1000 -T c +$ ./dbgen -s 1000 -T l +$ ./dbgen -s 1000 -T p +$ ./dbgen -s 1000 -T s +$ ./dbgen -s 1000 -T d ``` -
    - -ساخت جداول در ClickHouse - -
    +ایجاد جداول در محل کلیک: ``` sql -CREATE TABLE lineorder ( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY String, - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE String -)Engine=MergeTree(LO_ORDERDATE,(LO_ORDERKEY,LO_LINENUMBER,LO_ORDERDATE),8192); - -CREATE TABLE customer ( +CREATE TABLE customer +( C_CUSTKEY UInt32, C_NAME String, C_ADDRESS String, - C_CITY String, - C_NATION String, - C_REGION String, + C_CITY LowCardinality(String), + C_NATION LowCardinality(String), + C_REGION LowCardinality(String), C_PHONE String, - C_MKTSEGMENT String, - C_FAKEDATE Date -)Engine=MergeTree(C_FAKEDATE,(C_CUSTKEY,C_FAKEDATE),8192); + C_MKTSEGMENT LowCardinality(String) +) +ENGINE = MergeTree ORDER BY (C_CUSTKEY); -CREATE TABLE part ( +CREATE TABLE lineorder +( + LO_ORDERKEY UInt32, + LO_LINENUMBER UInt8, + LO_CUSTKEY UInt32, + LO_PARTKEY UInt32, + LO_SUPPKEY UInt32, + LO_ORDERDATE Date, + LO_ORDERPRIORITY LowCardinality(String), + LO_SHIPPRIORITY UInt8, + LO_QUANTITY UInt8, + LO_EXTENDEDPRICE UInt32, + LO_ORDTOTALPRICE UInt32, + LO_DISCOUNT UInt8, + LO_REVENUE UInt32, + LO_SUPPLYCOST UInt32, + LO_TAX UInt8, + LO_COMMITDATE Date, + LO_SHIPMODE LowCardinality(String) +) +ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); + +CREATE TABLE part +( P_PARTKEY UInt32, P_NAME String, - P_MFGR String, - P_CATEGORY String, - P_BRAND String, - P_COLOR String, - P_TYPE String, + P_MFGR LowCardinality(String), + P_CATEGORY LowCardinality(String), + P_BRAND LowCardinality(String), + P_COLOR LowCardinality(String), + P_TYPE LowCardinality(String), P_SIZE UInt8, - P_CONTAINER String, - P_FAKEDATE Date -)Engine=MergeTree(P_FAKEDATE,(P_PARTKEY,P_FAKEDATE),8192); + P_CONTAINER LowCardinality(String) +) +ENGINE = MergeTree ORDER BY P_PARTKEY; -CREATE TABLE lineorderd AS lineorder ENGINE = Distributed(perftest_3shards_1replicas, default, lineorder, rand()); -CREATE TABLE customerd AS customer ENGINE = Distributed(perftest_3shards_1replicas, default, customer, rand()); -CREATE TABLE partd AS part ENGINE = Distributed(perftest_3shards_1replicas, default, part, rand()); +CREATE TABLE supplier +( + S_SUPPKEY UInt32, + S_NAME String, + S_ADDRESS String, + S_CITY LowCardinality(String), + S_NATION LowCardinality(String), + S_REGION LowCardinality(String), + S_PHONE String +) +ENGINE = MergeTree ORDER BY S_SUPPKEY; ``` -
    - -برای تست بر روی یک سرور، فقط از جداول MergeTree استفاده کنید. برای تست توزیع شده، شما نیاز به کانفیگ `perftest_3shards_1replicas` در فایل کانفیگ را دارید. در ادامه جداول MergeTree را در هر سرور ایجاد کنید و موارد بالا را توزیع کنید. - -دانلود داده ها (تغییر `customer` به `customerd` در نسخه ی توزیع شده): - -
    +درج داده: ``` bash -cat customer.tbl | sed 's/$/2000-01-01/' | clickhouse-client --query "INSERT INTO customer FORMAT CSV" -cat lineorder.tbl | clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" +$ clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl +$ clickhouse-client --query "INSERT INTO part FORMAT CSV" < part.tbl +$ clickhouse-client --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl +$ clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" < lineorder.tbl ``` -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/star_schema/) +تبدیل “star schema” به جریمه “flat schema”: + +``` sql +SET max_memory_usage = 20000000000; + +CREATE TABLE lineorder_flat +ENGINE = MergeTree +PARTITION BY toYear(LO_ORDERDATE) +ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS +SELECT + l.LO_ORDERKEY AS LO_ORDERKEY, + l.LO_LINENUMBER AS LO_LINENUMBER, + l.LO_CUSTKEY AS LO_CUSTKEY, + l.LO_PARTKEY AS LO_PARTKEY, + l.LO_SUPPKEY AS LO_SUPPKEY, + l.LO_ORDERDATE AS LO_ORDERDATE, + l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, + l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, + l.LO_QUANTITY AS LO_QUANTITY, + l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, + l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, + l.LO_DISCOUNT AS LO_DISCOUNT, + l.LO_REVENUE AS LO_REVENUE, + l.LO_SUPPLYCOST AS LO_SUPPLYCOST, + l.LO_TAX AS LO_TAX, + l.LO_COMMITDATE AS LO_COMMITDATE, + l.LO_SHIPMODE AS LO_SHIPMODE, + c.C_NAME AS C_NAME, + c.C_ADDRESS AS C_ADDRESS, + c.C_CITY AS C_CITY, + c.C_NATION AS C_NATION, + c.C_REGION AS C_REGION, + c.C_PHONE AS C_PHONE, + c.C_MKTSEGMENT AS C_MKTSEGMENT, + s.S_NAME AS S_NAME, + s.S_ADDRESS AS S_ADDRESS, + s.S_CITY AS S_CITY, + s.S_NATION AS S_NATION, + s.S_REGION AS S_REGION, + s.S_PHONE AS S_PHONE, + p.P_NAME AS P_NAME, + p.P_MFGR AS P_MFGR, + p.P_CATEGORY AS P_CATEGORY, + p.P_BRAND AS P_BRAND, + p.P_COLOR AS P_COLOR, + p.P_TYPE AS P_TYPE, + p.P_SIZE AS P_SIZE, + p.P_CONTAINER AS P_CONTAINER +FROM lineorder AS l +INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY +INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY +INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; +``` + +در حال اجرا نمایش داده شد: + +Q1.1 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; +``` + +Q1.2 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q1.3 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toISOWeek(LO_ORDERDATE) = 6 AND toYear(LO_ORDERDATE) = 1994 + AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q2.1 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.2 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.3 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q3.1 + +``` sql +SELECT + C_NATION, + S_NATION, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 +GROUP BY + C_NATION, + S_NATION, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.2 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.3 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.4 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND toYYYYMM(LO_ORDERDATE) = 199712 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q4.1 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + C_NATION, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + C_NATION +ORDER BY + year ASC, + C_NATION ASC; +``` + +Q4.2 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_NATION, + P_CATEGORY, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + S_NATION, + P_CATEGORY +ORDER BY + year ASC, + S_NATION ASC, + P_CATEGORY ASC; +``` + +Q4.3 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_CITY, + P_BRAND, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' +GROUP BY + year, + S_CITY, + P_BRAND +ORDER BY + year ASC, + S_CITY ASC, + P_BRAND ASC; +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/fa/getting_started/example_datasets/wikistat.md b/docs/fa/getting_started/example_datasets/wikistat.md index 6178cbae243..1bc6529936c 100644 --- a/docs/fa/getting_started/example_datasets/wikistat.md +++ b/docs/fa/getting_started/example_datasets/wikistat.md @@ -1,12 +1,15 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 18 +toc_title: "\u0648\u06CC\u06A9\u06CC\u0633\u062A\u0627\u062A" +--- -# WikiStat {#wikistat} +# ویکیستات {#wikistat} ببینید: http://dumps.wikimedia.org/other/pagecounts-raw/ -ساخت جدول: - -
    +ایجاد یک جدول: ``` sql CREATE TABLE wikistat @@ -21,16 +24,12 @@ CREATE TABLE wikistat ) ENGINE = MergeTree(date, (path, time), 8192); ``` -
    - -load دیتا - -
    +بارگیری داده: ``` bash -for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt -cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done -ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done +$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt +$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done +$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done ``` -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/example_datasets/wikistat/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/fa/getting_started/index.md b/docs/fa/getting_started/index.md index 6556609c10d..d4b2ba243f2 100644 --- a/docs/fa/getting_started/index.md +++ b/docs/fa/getting_started/index.md @@ -1,13 +1,17 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Getting Started +toc_hidden: true +toc_priority: 8 +toc_title: "\u0645\u062E\u0641\u06CC" +--- -# ﻥﺪﺷ ﻉﻭﺮﺷ +# شروع کار {#getting-started} -ﻖﯾﺮﻃ ﺯﺍ ﺪﯾﺎﺑ ﻪﻤﻫ ﺯﺍ ﻝﻭﺍ ، ﺪﯿﻨﮐ ﺱﺎﺴﺣﺍ ﺍﺭ ﻥﺁ ﺩﺮﮑﻠﻤﻋ ﺪﯿﻫﺍﻮﺧ ﯽﻣ ﻭ ﺪﯿﺘﺴﻫ ﺩﺭﺍﻭ ﻩﺯﺎﺗ[ﺐﺼﻧ ﻞﺣﺍﺮﻣ](install.md). -ﺪﯿﻨﮐ ﺏﺎﺨﺘﻧﺍ ﺍﺭ ﺮﯾﺯ ﯼﺎﻫ ﻪﻨﯾﺰﮔ ﺯﺍ ﯽﮑﯾ ﺪﯿﻧﺍﻮﺗ ﯽﻣ ﻥﺁ ﺯﺍ ﺲﭘ: +اگر شما تازه به تاتر هستند و می خواهید برای دریافت یک دست در احساس عملکرد خود را, اول از همه, شما نیاز به از طریق رفتن [مراحل نصب](install.md). بعد از که شما می توانید: -- [ﺪﯿﻨﮐ ﯽﻃ ﺍﺭ ﻞﺼﻔﻣ ﺵﺯﻮﻣﺁ](tutorial.md) -- [ﺪﯿﻨﮐ ﺶﯾﺎﻣﺯﺁ ﻪﻧﻮﻤﻧ ﯼﺎﻫ ﻩﺩﺍﺩ ﺎﺑ](example_datasets/ontime.md) +- [برو از طریق مفصل](tutorial.md) +- [تجربه با مجموعه داده های نمونه](example_datasets/ontime.md) -[ﯽﻠﺻﺍ ﻪﻟﺎﻘﻣ](https://clickhouse.tech/docs/fa/getting_started/) - -
    +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/) diff --git a/docs/fa/getting_started/install.md b/docs/fa/getting_started/install.md index 75b4131239d..0ce568018b3 100644 --- a/docs/fa/getting_started/install.md +++ b/docs/fa/getting_started/install.md @@ -1,171 +1,173 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 11 +toc_title: "\u0646\u0635\u0628 \u0648 \u0631\u0627\u0647 \u0627\u0646\u062F\u0627\u0632\ + \u06CC" +--- -# ﯼﺯﺍﺪﻧﺍ ﻩﺍﺭ ﻭ ﺐﺼﻧ +# نصب و راه اندازی {#installation} -## نیازمندی های سیستم {#nyzmndy-hy-systm} +## سیستم مورد نیاز {#system-requirements} -ClickHouse ﺲﮐﻮﻨﯿﻟ ﻉﻮﻧ ﺮﻫ ﯼﻭﺭ ﺮﺑ ﺪﻧﺍﻮﺗ ﯽﻣ ، FreeBSD ﺎﯾ Mac OS X ﯼﺭﺎﻤﻌﻣ ﺎﺑ CPU x +ClickHouse می تواند اجرا بر روی هر Linux, FreeBSD یا سیستم عامل Mac OS X با x86\_64, AArch64 یا PowerPC64LE معماری CPU. -:ﺖﺳﺍ ﻩﺪﻣﺁ ، ﺪﻨﮐ ﯽﻣ ﯽﻧﺎﺒﯿﺘﺸﭘ SSE 4.2 ﺯﺍ ﯽﻠﻌﻓ CPU ﺎﯾﺁ ﻪﮑﻨﯾﺍ ﯽﺳﺭﺮﺑ ﯼﺍﺮﺑ ﺭﻮﺘﺳﺩ ﻦﯾﺍ - -
    +رسمی از پیش ساخته شده باینری به طور معمول وارد شده برای ایکس86\_64 و اهرم بورس تحصیلی 4.2 مجموعه دستورالعمل, بنابراین مگر اینکه در غیر این صورت اعلام کرد استفاده از پردازنده است که پشتیبانی می شود یک سیستم اضافی مورد نیاز. در اینجا دستور برای بررسی اگر پردازنده فعلی دارای پشتیبانی برای اس اس 4.2: ``` bash -grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" ``` -
    +برای اجرای clickhouse در پردازنده هایی که پشتیبانی نمی sse 4.2 یا aarch64 یا powerpc64le معماری شما باید [ساخت کلیک از منابع](#from-sources) با تنظیمات پیکربندی مناسب. -ﺪﯾﺎﺑ ، ﺪﻧﺭﺍﺪﻧ PowerPC64LE ﺎﯾ AArch64 ﯼﺭﺎﻤﻌﻣ ﺎﯾ ﺪﻨﻨﮐ ﯽﻤﻧ ﯽﻧﺎﺒﯿﺘﺸﭘ SSE 4.2 ﺯﺍ ﻪﮐ[ClickHouse ﺪﯿﻨﮐ ﺩﺎﺠﯾﺍ ﻊﺑﺎﻨﻣ ﺯﺍ ﺍﺭ](#from-sources) ﺐﺳﺎﻨﻣ ﺕﺎﻤﯿﻈﻨﺗ ﺎﺑ +## گزینه های نصب موجود {#available-installation-options} -## ﺩﻮﺟﻮﻣ ﺐﺼﻧ ﯼﺎﻫ ﻪﻨﯾﺰﮔ +### از بسته های دب {#install-from-deb-packages} - -\#\#\# نصب از طریق پکیج های Debian/Ubuntu {\#from-deb-packages} +توصیه می شود به استفاده از رسمی از پیش وارد شده `deb` بسته برای دبیان یا اوبونتو. -در فایل `/etc/apt/sources.list` (یا در یک فایل جدا `/etc/apt/sources.list.d/clickhouse.list`)، Repo زیر را اضافه کنید: +برای نصب بسته های رسمی اضافه کردن مخزن یاندکس در `/etc/apt/sources.list` یا در یک جداگانه `/etc/apt/sources.list.d/clickhouse.list` پرونده: -
    + deb http://repo.clickhouse.tech/deb/stable/ main/ - deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ +اگر شما می خواهید به استفاده از نسخه های اخیر, جایگزین کردن `stable` با `testing` (این است که برای محیط های تست خود را توصیه می شود). -
    - -اگر شما میخوایید جدیدترین نسخه ی تست را استفاده کنید، ‘stable’ رو به ‘testing’ تغییر بدید. - -سپس دستورات زیر را اجرا کنید: - -
    +سپس این دستورات را برای نصب بسته ها اجرا کنید: ``` bash -sudo apt-get install dirmngr # optional +sudo apt-get install dirmngr # optional sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional sudo apt-get update sudo apt-get install clickhouse-client clickhouse-server ``` -
    +شما همچنین می توانید بسته ها را به صورت دستی از اینجا دانلود و نصب کنید: https://repo.yandex.ru/clickhouse/deb/stable/main/. -شما همچنین می توانید از طریق لینک زیر پکیج ClickHouse را به صورت دستی دانلود و نصب کنید: https://repo.yandex.ru/clickhouse/deb/stable/main/. +#### بستهها {#packages} -ClickHouse دارای تنظیمات محدودیت دسترسی می باشد. این تنظیمات در فایل ‘users.xml’ (کنار ‘config.xml’) می باشد. به صورت پیش فرض دسترسی برای کاربر ‘default’ از همه جا بدون نیاز به پسورد وجود دارد. ‘user/default/networks’ را مشاهده کنید. برای اطلاعات بیشتر قسمت «تنظیمات فایل ها» را مشاهده کنید. +- `clickhouse-common-static` — Installs ClickHouse compiled binary files. +- `clickhouse-server` — Creates a symbolic link for `clickhouse-server` و نصب پیکربندی سرور به طور پیش فرض. +- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` و دیگر ابزار مربوط به مشتری. و نصب فایل های پیکربندی مشتری. +- `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. -### RPM ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ {#from-rpm-packages} +### از بسته های دور در دقیقه {#from-rpm-packages} -.ﺪﻨﮐ ﯽﻣ ﻪﯿﺻﻮﺗ ﺲﮐﻮﻨﯿﻟ ﺮﺑ ﯽﻨﺘﺒﻣ rpm ﺮﺑ ﯽﻨﺘﺒﻣ ﯼﺎﻫ ﻊﯾﺯﻮﺗ ﺮﯾﺎﺳ ﻭ CentOS ، RedHat ﯼﺍ +توصیه می شود به استفاده از رسمی از پیش وارد شده `rpm` بسته برای لینوکس لینوکس, کلاه قرمز, و همه توزیع های لینوکس مبتنی بر دور در دقیقه دیگر. - :ﺪﯿﻨﮐ ﻪﻓﺎﺿﺍ ﺍﺭ ﯽﻤﺳﺭ ﻥﺰﺨﻣ ﺪﯾﺎﺑ ﺍﺪﺘﺑﺍ +اولین, شما نیاز به اضافه کردن مخزن رسمی: ``` bash sudo yum install yum-utils -sudo rpm --import https://repo.yandex.ru/clickhouse/CLICKHOUSE-KEY.GPG -sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/x86_64 +sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64 ``` -.(ﺩﻮﺷ ﯽﻣ ﻪﯿﺻﻮﺗ ﺎﻤﺷ ﺶﯾﺎﻣﺯﺁ ﯼﺎﻫ ﻂﯿﺤﻣ ﯼﺍﺮﺑ ﻦﯾﺍ) ﺪﯿﻨﮐ ﻦﯾﺰﮕﯾﺎﺟ «ﺖﺴﺗ» ﺎﺑ ﺍﺭ «ﺭﺍﺪﯾﺎﭘ» +اگر شما می خواهید به استفاده از نسخه های اخیر, جایگزین کردن `stable` با `testing` (این است که برای محیط های تست خود را توصیه می شود). این `prestable` برچسب است که گاهی اوقات در دسترس بیش از حد. - :ﺪﯿﻨﮐ ﺐﺼﻧ ﺍﺭ ﺎﻫ ﻪﺘﺴﺑ ﻊﻗﺍﻭ ﺭﺩ ﺎﺗ ﺪﯿﻨﮐ ﺍﺮﺟﺍ ﺍﺭ ﺕﺍﺭﻮﺘﺳﺩ ﻦﯾﺍ ﺲﭙﺳ +سپس این دستورات را برای نصب بسته ها اجرا کنید: ``` bash sudo yum install clickhouse-server clickhouse-client ``` -.https://repo.yandex.ru/clickhouse/rpm/stable/x86\_64 :ﺪﯿﻨﮐ ﺐﺼﻧ ﻭ ﯼﺮﯿﮔﺭﺎﺑ ﺎﺠﻨ +شما همچنین می توانید بسته ها را به صورت دستی از اینجا دانلود و نصب کنید: https://repo.فاحشه خانه.تکنولوژی/دور در دقیقه/پایدار / ایکس86\_64. - Docker Image ﺯﺍ ### +### از بایگانی {#from-tgz-archives} -.ﺪﻨﻨﮐ ﯽﻣ ﻩﺩﺎﻔﺘﺳﺍ ﻞﺧﺍﺩ ﺭﺩ «deb» ﯽﻤﺳﺭ ﯼﺎﻫ ﻪﺘﺴﺑ ﺯﺍ ﺮﯾﻭﺎﺼﺗ ﻦﯾﺍ .ﺪﯿﻨﮐ ﻝﺎﺒﻧﺩ ﺍﺭ (/ht +توصیه می شود به استفاده از رسمی از پیش وارد شده `tgz` بایگانی برای همه توزیع های لینوکس, که نصب و راه اندازی `deb` یا `rpm` بسته امکان پذیر نیست. -### نصب از طریق Source {#from-sources} - -برای Compile، دستورالعمل های فایل build.md را دنبال کنید: - -شما میتوانید پکیج را compile و نصب کنید. شما همچنین می توانید بدون نصب پکیج از برنامه ها استفاده کنید. - -
    - - Client: programs/clickhouse-client - Server: programs/clickhouse-server - -
    - -برای سرور، یک کاتالوگ با دیتا بسازید، مانند - -
    - - /opt/clickhouse/data/default/ - /opt/clickhouse/metadata/default/ - -
    - -(قابل تنظیم در تنظیمات سرور). ‘chown’ را برای کاربر دلخواه اجرا کنید. - -به مسیر لاگ ها در تنظیمات سرور توجه کنید (src/programs/config.xml). - -### روش های دیگر نصب {#from-docker-image} - -Docker image: https://hub.docker.com/r/yandex/clickhouse-server/ - -پکیج RPM برای CentOS یا RHEL: https://github.com/Altinity/clickhouse-rpm-install - -Gentoo: `emerge clickhouse` - -## راه اندازی {#rh-ndzy} - -برای استارت سرور (به صورت daemon)، دستور زیر را اجرا کنید: - -
    +نسخه مورد نیاز را می توان با دانلود `curl` یا `wget` از مخزن https://repo.yandex.ru/clickhouse/tgz/. +پس از که دانلود بایگانی باید غیر بستهای و نصب شده با اسکریپت نصب و راه اندازی. به عنوان مثال برای جدیدترین نسخه: ``` bash -sudo service clickhouse-server start +export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1` +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-server-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-client-$LATEST_VERSION.tgz + +tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz +sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz +sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-server-$LATEST_VERSION.tgz +sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh +sudo /etc/init.d/clickhouse-server start + +tar -xzvf clickhouse-client-$LATEST_VERSION.tgz +sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh ``` -
    +برای محیط های تولید توصیه می شود از جدیدترین استفاده کنید `stable`- نسخه شما می توانید شماره خود را در صفحه گیتهاب پیدا https://github.com/ClickHouse/ClickHouse/tags با پسوند `-stable`. -لاگ های دایرکتوری `/var/log/clickhouse-server/` directory. را مشاهده کنید. +### از تصویر کارگر بارانداز {#from-docker-image} -اگر سرور استارت نشد، فایل تنظیمات را بررسی کنید `/etc/clickhouse-server/config.xml.` +برای اجرای کلیک در داخل کارگر بارانداز راهنمای دنبال کنید [داکر توپی](https://hub.docker.com/r/yandex/clickhouse-server/). این تصاویر استفاده رسمی `deb` بسته در داخل. -شما همچنین می توانید سرور را از طریق کنسول راه اندازی کنید: +### از منابع {#from-sources} -
    +به صورت دستی کامپایل فاحشه خانه, دستورالعمل برای دنبال [لینوکس](../development/build.md) یا [سیستم عامل مک ایکس](../development/build_osx.md). + +شما می توانید بسته های کامپایل و نصب و یا استفاده از برنامه های بدون نصب بسته. همچنین با ساخت دستی شما می توانید ثانیه 4.2 مورد نیاز غیر فعال کردن و یا ساخت برای ایالت64 پردازنده. + + Client: programs/clickhouse-client + Server: programs/clickhouse-server + +شما نیاز به ایجاد یک داده ها و پوشه ابرداده و `chown` برای کاربر مورد نظر. مسیر خود را می توان در پیکربندی سرور تغییر (سری سی/برنامه/سرور/پیکربندی.به طور پیش فرض: + + /opt/clickhouse/data/default/ + /opt/clickhouse/metadata/default/ + +در جنتو, شما فقط می توانید استفاده کنید `emerge clickhouse` برای نصب کلیک از منابع. + +## راهاندازی {#launch} + +برای شروع سرور به عنوان یک شبح, اجرا: ``` bash -clickhouse-server --config-file=/etc/clickhouse-server/config.xml +$ sudo service clickhouse-server start ``` -
    - -در این مورد که مناسب زمان توسعه می باشد، لاگ ها در کنسول پرینت می شوند. اگر فایل تنظیمات در دایرکتوری جاری باشد، نیازی به مشخص کردن ‘–config-file’ نمی باشد. به صورت پیش فرض از ‘./config.xml’ استفاده می شود. - -شما می توانید از کلاینت command-line برای اتصال به سرور استفاده کنید: - -
    +اگر شما لازم نیست `service` فرمان, اجرا به عنوان ``` bash -clickhouse-client +$ sudo /etc/init.d/clickhouse-server start ``` -
    +سیاهههای مربوط در `/var/log/clickhouse-server/` فهرست راهنما. -پارامترهای پیش فرض، نشان از اتصال به localhost:9000 از طرف کاربر ‘default’ بدون پسورد را می دهد. از کلاینت میتوان برای اتصال به یک سرور remote استفاده کرد. مثال: +اگر سرور شروع نمی کند, بررسی تنظیمات در فایل `/etc/clickhouse-server/config.xml`. -
    +شما همچنین می توانید سرور را از کنسول به صورت دستی راه اندازی کنید: ``` bash -clickhouse-client --host=example.com +$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml ``` -
    +در این مورد, ورود به سیستم خواهد شد به کنسول چاپ, که مناسب است در طول توسعه. +اگر فایل پیکربندی در دایرکتوری فعلی است, شما لازم نیست برای مشخص کردن `--config-file` پارامتر. به طور پیش فرض استفاده می کند `./config.xml`. -برای اطلاعات بیشتر، بخش «کلاینت Command-line» را مشاهده کنید. +تاتر پشتیبانی از تنظیمات محدودیت دسترسی. این در واقع `users.xml` پرونده) در کنار ( `config.xml`). +به طور پیش فرض, دسترسی از هر نقطه برای اجازه `default` کاربر, بدون رمز عبور. ببینید `user/default/networks`. +برای کسب اطلاعات بیشتر به بخش مراجعه کنید [“Configuration Files”](../operations/configuration_files.md). -چک کردن سیستم: - -
    +پس از راه اندازی سرور, شما می توانید مشتری خط فرمان برای اتصال به استفاده: ``` bash -milovidov@hostname:~/work/metrica/src/src/Client$ ./clickhouse-client +$ clickhouse-client +``` + +به طور پیش فرض به `localhost:9000` از طرف کاربر `default` بدون رمز عبور. همچنین می تواند مورد استفاده قرار گیرد برای اتصال به یک سرور از راه دور با استفاده از `--host` استدلال کردن. + +ترمینال باید از کدگذاری جی تی اف 8 استفاده کند. +برای کسب اطلاعات بیشتر به بخش مراجعه کنید [“Command-line client”](../interfaces/cli.md). + +مثال: + +``` bash +$ ./clickhouse-client ClickHouse client version 0.0.18749. Connecting to localhost:9000. Connected to ClickHouse server version 0.0.18749. @@ -183,12 +185,8 @@ SELECT 1 :) ``` -
    +**تبریک, سیستم کار می کند!** -**تبریک میگم، سیستم کار می کنه!** +برای ادامه تجربه, شما می توانید یکی از مجموعه داده های تست دانلود و یا رفتن را از طریق [اموزش](https://clickhouse.tech/tutorial.html). -برای ادامه آزمایشات، شما میتوانید دیتاست های تستی را دریافت و امتحان کنید. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/getting_started/install/) +[مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/fa/getting_started/playground.md b/docs/fa/getting_started/playground.md index 186cb9030c2..e8fea38fd9d 100644 --- a/docs/fa/getting_started/playground.md +++ b/docs/fa/getting_started/playground.md @@ -1,44 +1,47 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 14 +toc_title: "\u0632\u0645\u06CC\u0646 \u0628\u0627\u0632\u06CC" --- -# ClickHouse Playground {#clickhouse-playground} +# تاتر زمین بازی {#clickhouse-playground} -[ClickHouse Playground](https://play.clickhouse.tech?file=welcome) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. -Several example datasets are available in the Playground as well as sample queries that show ClickHouse features. +[تاتر زمین بازی](https://play.clickhouse.tech?file=welcome) اجازه می دهد تا مردم را به تجربه با تاتر در حال اجرا نمایش داده شد فورا, بدون راه اندازی سرور و یا خوشه خود را. +چند مجموعه داده به عنوان مثال در زمین بازی و همچنین نمونه نمایش داده شد که نشان می دهد ویژگی های تاتر در دسترس هستند. -The queries are executed as a read-only user. It implies some limitations: +نمایش داده شد به عنوان یک کاربر فقط خواندنی اجرا شده است. این نشان میدهد برخی از محدودیت: -- DDL queries are not allowed -- INSERT queries are not allowed +- پرسشهای دادل مجاز نیستند +- درج نمایش داده شد امکان پذیر نیست -The following settings are also enforced: +تنظیمات زیر نیز اجرا می شوند: - [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) - [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) - [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) - [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) -ClickHouse Playground gives the experience of m2.small -[Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) -instance hosted in [Yandex.Cloud](https://cloud.yandex.com/). -More information about [cloud providers](../commercial/cloud.md). +زمین بازی کلیک می دهد تجربه متر2.کوچک +[خدمات مدیریت شده برای کلیک](https://cloud.yandex.com/services/managed-clickhouse) +به عنوان مثال میزبانی شده در [یاندکسابر](https://cloud.yandex.com/). +اطلاعات بیشتر در مورد [ابر دهندگان](../commercial/cloud.md). -ClickHouse Playground web interface makes requests via ClickHouse [HTTP API](../interfaces/http.md). -The Playground backend is just a ClickHouse cluster without any additional server-side application. -ClickHouse HTTPS endpoint is also available as a part of the Playground. +ClickHouse زمین بازی و رابط کاربری وب سایت باعث می شود درخواست از طریق ClickHouse [HTTP API](../interfaces/http.md). +باطن زمین بازی فقط یک خوشه محل کلیک بدون هیچ گونه نرم افزار سمت سرور اضافی است. +نقطه پایانی کلیک اچتیتیپس نیز به عنوان بخشی از زمین بازی در دسترس است. -You can make queries to playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. -More information about software products that support ClickHouse is available [here](../interfaces/index.md). +شما می توانید نمایش داده شد به زمین بازی با استفاده از هر مشتری قام را, مثلا [حلقه](https://curl.haxx.se) یا [عناصر](https://www.gnu.org/software/wget/), و یا راه اندازی یک اتصال با استفاده از [JDBC](../interfaces/jdbc.md) یا [ODBC](../interfaces/odbc.md) رانندگان. +اطلاعات بیشتر در مورد محصولات نرم افزاری است که پشتیبانی از تاتر در دسترس است [اینجا](../interfaces/index.md). -| Parameter | Value | -|:----------|:--------------------------------------| -| Endpoint | https://play-api.clickhouse.tech:8443 | -| User | `playground` | -| Password | `clickhouse` | +| پارامتر | مقدار | +|:------------|:-----------------------------------------| +| نقطه پایانی | https://play-api.فاحشه خانه.فناوری: 8443 | +| کاربر | `playground` | +| اسم رمز | `clickhouse` | -Note that this endpoint requires a secure connection. +توجه داشته باشید که این نقطه پایانی نیاز به یک اتصال امن. -Example: +مثال: ``` bash curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" diff --git a/docs/fa/getting_started/tutorial.md b/docs/fa/getting_started/tutorial.md index 2c0bcf086df..bb906fc48b5 100644 --- a/docs/fa/getting_started/tutorial.md +++ b/docs/fa/getting_started/tutorial.md @@ -1,12 +1,19 @@ -# اموزش کلیک {#mwzsh-khlykh} +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 12 +toc_title: "\u0627\u0645\u0648\u0632\u0634" +--- -## چه انتظار از این مقاله? {#chh-ntzr-z-yn-mqlh} +# اموزش کلیک {#clickhouse-tutorial} -با رفتن را از طریق این مقاله شما یاد بگیرند که چگونه به راه اندازی پایه خوشه خانه رعیتی, این کوچک, اما مقاوم در برابر خطا و مقیاس پذیر. ما از یکی از مجموعه داده های نمونه برای پر کردن داده ها و اجرای برخی از نمایش های نسخه ی نمایشی استفاده خواهیم کرد. +## چه انتظار از این مقاله? {#what-to-expect-from-this-tutorial} -## راه اندازی تک گره {#rh-ndzy-tkh-grh} +با رفتن را از طریق این مقاله, شما یاد بگیرند که چگونه به راه اندازی یک خوشه ساده تاتر. این کوچک خواهد بود, اما مقاوم در برابر خطا و مقیاس پذیر. سپس ما از یکی از مجموعه داده های نمونه برای پر کردن داده ها و اجرای برخی از نمایش های نسخه ی نمایشی استفاده خواهیم کرد. -برای به تعویق انداختن پیچیدگی از محیط توزیع, ما با استقرار کلیک بر روی یک سرور و یا ماشین مجازی شروع. خانه کلیک است که معمولا از نصب [دب](index.md#install-from-deb-packages) یا [دور در دقیقه](index.md#from-rpm-packages) بسته, اما وجود دارد [جایگزین ها](index.md#from-docker-image) برای سیستم عامل هایی که هیچ پشتیبانی نمی کنند. +## راه اندازی تک گره {#single-node-setup} + +برای به تعویق انداختن پیچیدگی های یک محیط توزیع, ما با استقرار کلیک بر روی یک سرور و یا ماشین مجازی شروع. خانه کلیک است که معمولا از نصب [دب](index.md#install-from-deb-packages) یا [دور در دقیقه](index.md#from-rpm-packages) بسته, اما وجود دارد [جایگزین ها](index.md#from-docker-image) برای سیستم عامل هایی که هیچ پشتیبانی نمی کنند. مثلا, شما را انتخاب کرده اند `deb` بسته ها و اعدام: @@ -22,13 +29,13 @@ sudo apt-get install -y clickhouse-server clickhouse-client در بسته هایی که نصب شده اند چه چیزی داریم: -- `clickhouse-client` بسته شامل [کلیک مشتری](../interfaces/cli.md) کاربرد, تعاملی مشتری کنسول تاتر. -- `clickhouse-common` بسته شامل یک فایل اجرایی کلیک. -- `clickhouse-server` بسته شامل فایل های پیکربندی برای اجرای تاتر به عنوان یک سرور. +- `clickhouse-client` بسته شامل [کلیک مشتری](../interfaces/cli.md) کاربرد, تعاملی مشتری کنسول تاتر. +- `clickhouse-common` بسته شامل یک فایل اجرایی کلیک. +- `clickhouse-server` بسته شامل فایل های پیکربندی برای اجرای تاتر به عنوان یک سرور. -فایل های پیکربندی سرور در واقع `/etc/clickhouse-server/`. قبل از رفتن بیشتر لطفا توجه کنید `` عنصر در `config.xml`. مسیر تعیین محل ذخیره سازی داده ها, بنابراین باید در حجم با ظرفیت دیسک بزرگ واقع, مقدار پیش فرض است `/var/lib/clickhouse/`. اگر شما می خواهید برای تنظیم پیکربندی دستی به طور مستقیم ویرایش نیست `config.xml` فایل, با توجه به اینکه ممکن است در به روز رسانی بسته های بعدی بازنویسی. راه توصیه می شود به نادیده گرفتن عناصر پیکربندی است که برای ایجاد [فایل ها در پیکربندی.فهرست راهنما](../operations/configuration_files.md) که به عنوان خدمت می کنند «patches» برای پیکربندی.. +فایل های پیکربندی سرور در واقع `/etc/clickhouse-server/`. قبل از رفتن بیشتر, لطفا توجه کنید `` عنصر در `config.xml`. مسیر تعیین محل ذخیره سازی داده ها, بنابراین باید در حجم با ظرفیت دیسک بزرگ واقع; مقدار پیش فرض است `/var/lib/clickhouse/`. اگر شما می خواهید برای تنظیم پیکربندی, این دستی به طور مستقیم ویرایش کنید `config.xml` فایل, با توجه به اینکه ممکن است در به روز رسانی بسته های بعدی بازنویسی. راه توصیه می شود به نادیده گرفتن عناصر پیکربندی است که برای ایجاد [فایل ها در پیکربندی.فهرست راهنما](../operations/configuration_files.md) که به عنوان خدمت می کنند “patches” برای پیکربندی.. -همانطور که شما ممکن است متوجه, `clickhouse-server` به طور خودکار پس از نصب بسته راه اندازی نشده است. این به طور خودکار پس از به روز رسانی هم دوباره راه اندازی نخواهد شد. راه شما شروع به سرور بستگی به سیستم اینیت خود را, معمولا, این: +همانطور که شما ممکن است متوجه, `clickhouse-server` به طور خودکار پس از نصب بسته راه اندازی نشده است. این به طور خودکار پس از به روز رسانی دوباره راه اندازی نخواهد شد. راه شما شروع به سرور بستگی به سیستم اینیت خود را, معمولا, این: ``` bash sudo service clickhouse-server start @@ -40,13 +47,14 @@ sudo service clickhouse-server start sudo /etc/init.d/clickhouse-server start ``` -محل پیش فرض برای سیاهههای مربوط به سرور است `/var/log/clickhouse-server/`. سرور خواهد بود برای رسیدگی به اتصالات مشتری یک بار `Ready for connections` پیام وارد شد. +محل پیش فرض برای سیاهههای مربوط به سرور است `/var/log/clickhouse-server/`. سرور برای رسیدگی به اتصالات مشتری پس از ورود به سیستم `Ready for connections` پیام هنگامی که `clickhouse-server` است و در حال اجرا, ما می توانیم با استفاده از `clickhouse-client` برای اتصال به سرور و اجرای برخی از نمایش داده شد تست مانند `SELECT "Hello, world!";`.
    -راهنمایی سریع برای کلیک-مشتری حالت تعاملی: +راهنمایی سریع برای کلیک-مشتری +حالت تعاملی: ``` bash clickhouse-client @@ -77,11 +85,11 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
    -## واردات مجموعه داده نمونه {#wrdt-mjmw-h-ddh-nmwnh} +## واردات مجموعه داده نمونه {#import-sample-dataset} -در حال حاضر زمان برای پر کردن سرور کلیک ما با برخی از داده های نمونه است. در این مقاله ما داده های ناشناس یاندکس را استفاده خواهیم کرد.متریکا, اولین سرویس اجرا می شود که کلیک در راه تولید قبل از منبع باز شد (بیشتر در که در [بخش تاریخچه](../introduction/history.md)). وجود دارد [راه های متعدد برای وارد کردن یاندکس.مجموعه داده های متریکا](example_datasets/metrica.md) و به خاطر اموزش, ما با یکی از واقع بینانه ترین رفتن. +در حال حاضر زمان برای پر کردن سرور کلیک ما با برخی از داده های نمونه است. در این مقاله ما از داده های ناشناس یاندکس استفاده خواهیم کرد.متریکا, اولین سرویس اجرا می شود که کلیک در راه تولید قبل از منبع باز شد (بیشتر در که در [بخش تاریخچه](../introduction/history.md)). وجود دارد [راه های متعدد برای وارد کردن یاندکس.مجموعه داده های متریکا](example_datasets/metrica.md), و به خاطر تدریس خصوصی, ما با یکی از واقع بینانه ترین رفتن. -### دانلود و استخراج داده های جدول {#dnlwd-w-stkhrj-ddh-hy-jdwl} +### دانلود و استخراج داده های جدول {#download-and-extract-table-data} ``` bash curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv @@ -90,24 +98,24 @@ curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unx فایل های استخراج شده حدود 10 گیگابایت است. -### ایجاد جداول {#yjd-jdwl} +### ایجاد جداول {#create-tables} -جداول منطقی به گروه بندی می شوند «databases». یک `default` پایگاه داده, اما ما یکی از جدید به نام ایجاد `tutorial`: +همانطور که در بسیاری از سیستم های مدیریت پایگاه داده, تاتر منطقی جداول گروه به “databases”. تاپیک `default` پایگاه داده, اما ما یکی از جدید به نام ایجاد `tutorial`: ``` bash clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -نحو برای ایجاد جداول راه پیچیده تر در مقایسه با پایگاه داده است (نگاه کنید به [مرجع](../query_language/create.md). به طور کلی `CREATE TABLE` بیانیه باید سه چیز کلیدی را مشخص کند: +نحو برای ایجاد جداول راه پیچیده تر در مقایسه با پایگاه داده است (نگاه کنید به [مرجع](../sql_reference/statements/create.md). به طور کلی `CREATE TABLE` بیانیه باید سه چیز کلیدی را مشخص کند: 1. نام جدول برای ایجاد. -2. طرحواره جدول, به عنوان مثال لیستی از ستون ها و خود [انواع داده ها](../data_types/index.md). -3. [موتور جدول](../operations/table_engines/index.md) و این تنظیمات است, که تعیین تمام اطلاعات در مورد نحوه نمایش داده شد به این جدول خواهد شد از لحاظ جسمی اجرا. +2. Table schema, i.e. list of columns and their [انواع داده ها](../sql_reference/data_types/index.md). +3. [موتور جدول](../engines/table_engines/index.md) و این تنظیمات است, که تعیین تمام اطلاعات در مورد نحوه نمایش داده شد به این جدول خواهد شد از لحاظ جسمی اجرا. یاندکسمتریکا یک سرویس تجزیه و تحلیل وب است و مجموعه داده نمونه قابلیت های کامل خود را پوشش نمی دهد بنابراین تنها دو جدول برای ایجاد وجود دارد: -- `hits` یک جدول با هر عمل انجام شده توسط همه کاربران در تمام وب سایت های تحت پوشش این سرویس است. -- `visits` یک جدول است که شامل جلسات از پیش ساخته شده به جای اقدامات فردی است. +- `hits` یک جدول با هر عمل انجام شده توسط همه کاربران در تمام وب سایت های تحت پوشش این سرویس است. +- `visits` یک جدول است که شامل جلسات از پیش ساخته شده به جای اقدامات فردی است. بیایید ببینید و اجرای واقعی ایجاد نمایش داده شد جدول برای این جداول: @@ -454,11 +462,11 @@ SETTINGS index_granularity = 8192 شما می توانید این پرسش ها را با استفاده از حالت تعاملی اجرا کنید `clickhouse-client` (فقط در یک ترمینال راه اندازی بدون مشخص کردن یک پرس و جو در پیش) و یا سعی کنید برخی از [رابط جایگزین](../interfaces/index.md) اگر شما می خواهید. -همانطور که می بینیم, `hits_v1` با استفاده از [موتور ادغام عمومی](../operations/table_engines/mergetree.md) در حالی که `visits_v1` با استفاده از [سقوط](../operations/table_engines/collapsingmergetree.md) گزینه. +همانطور که می بینیم, `hits_v1` با استفاده از [موتور ادغام عمومی](../engines/table_engines/mergetree_family/mergetree.md) در حالی که `visits_v1` با استفاده از [سقوط](../engines/table_engines/mergetree_family/collapsingmergetree.md) نوع. -### وارد کردن داده {#wrd-khrdn-ddh} +### وارد کردن داده {#import-data} -وارد کردن داده ها به تاتر از طریق انجام می شود [وارد](../query_language/insert_into.md) پرس و جو مانند در بسیاری از پایگاه داده های دیگر گذاشتن. با این حال داده ها معمولا در یکی از [فرمت های پشتیبانی شده](../interfaces/formats.md) به جای `VALUES` بند (که همچنین پشتیبانی). +وارد کردن داده ها به تاتر از طریق انجام می شود [INSERT INTO](../sql_reference/statements/insert_into.md) پرس و جو مانند در بسیاری از پایگاه داده های دیگر گذاشتن. با این حال, داده ها معمولا در یکی از [پشتیبانی از فرمت های ترتیب](../interfaces/formats.md) به جای `VALUES` بند (که همچنین پشتیبانی). فایل هایی که قبلا دانلود کردیم در قالب تب جدا شده اند بنابراین در اینجا نحوه وارد کردن از طریق مشتری کنسول است: @@ -478,23 +486,23 @@ FORMAT TSV max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." ``` -در صورت تمایل شما می توانید [بهینه سازی](../query_language/misc/#misc_operations-optimize) جداول پس از واردات. جداول که با ادغام پیکربندی-موتور خانواده همیشه ادغام قطعات داده ها در پس زمینه برای بهینه سازی ذخیره سازی داده ها (یا حداقل چک کنید اگر حس می کند). این نمایش داده شد فقط موتور جدول را مجبور به انجام بهینه سازی ذخیره سازی در حال حاضر به جای برخی از زمان بعد: +در صورت تمایل شما می توانید [OPTIMIZE](../query_language/misc/#misc_operations-optimize) جداول پس از واردات. جداول است که با یک موتور از ادغام خانواده پیکربندی همیشه ادغام قطعات داده ها در پس زمینه برای بهینه سازی ذخیره سازی داده ها (یا حداقل چک کنید اگر حس می کند). این نمایش داده شد نیروی موتور جدول به انجام بهینه سازی ذخیره سازی در حال حاضر به جای برخی از زمان بعد: ``` bash clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" ``` -این عملیات فشرده من / و پردازنده است بنابراین اگر جدول به طور مداوم داده های جدید را دریافت کند بهتر است تنها بماند و اجازه دهید ادغام در پس زمینه اجرا شود. +این نمایش داده شد شروع یک عملیات فشرده من/ای و پردازنده, بنابراین اگر جدول به طور مداوم داده های جدید دریافت, بهتر است به تنهایی ترک و اجازه دهید ادغام در پس زمینه اجرا. -در حال حاضر ما می توانید بررسی کنید که جداول با موفقیت وارد شده است: +در حال حاضر ما می توانید بررسی کنید اگر واردات جدول موفق بود: ``` bash clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" ``` -## به عنوان مثال نمایش داده شد {#bh-nwn-mthl-nmysh-ddh-shd} +## به عنوان مثال نمایش داده شد {#example-queries} ``` sql SELECT @@ -516,16 +524,16 @@ FROM tutorial.visits_v1 WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') ``` -## استقرار خوشه {#stqrr-khwshh} +## استقرار خوشه {#cluster-deployment} خوشه کلیک یک خوشه همگن است. مراحل برای راه اندازی: 1. نصب سرور کلیک بر روی تمام ماشین های خوشه 2. تنظیم پیکربندی خوشه در فایل های پیکربندی 3. ایجاد جداول محلی در هر نمونه -4. ایجاد یک [جدول توزیع شده](../operations/table_engines/distributed.md) +4. ایجاد یک [جدول توزیع شده](../engines/table_engines/special/distributed.md) -[جدول توزیع شده](../operations/table_engines/distributed.md) در واقع یک نوع از «view» به جداول محلی خوشه فاحشه خانه. پرس و جو را انتخاب کنید از یک جدول توزیع خواهد شد با استفاده از منابع خرده ریز تمام خوشه اجرا. شما ممکن است تنظیمات برای خوشه های متعدد مشخص و ایجاد جداول توزیع های متعدد فراهم کردن دیدگاه ها به خوشه های مختلف. +[جدول توزیع شده](../engines/table_engines/special/distributed.md) در واقع یک نوع از “view” به جداول محلی خوشه فاحشه خانه. پرس و جو را انتخاب کنید از یک جدول توزیع اجرا با استفاده از منابع خرده ریز تمام خوشه. شما ممکن است تنظیمات برای خوشه های متعدد مشخص و ایجاد جداول توزیع های متعدد فراهم کردن دیدگاه ها به خوشه های مختلف. به عنوان مثال پیکربندی برای یک خوشه با سه خرده ریز, یک ماکت هر: @@ -554,7 +562,7 @@ WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartU ``` -برای تظاهرات بیشتر بیایید ایجاد یک جدول محلی جدید با همان `CREATE TABLE` پرس و جو که ما برای استفاده `hits_v1`, اما نام جدول های مختلف: +برای تظاهرات بیشتر, اجازه دهید یک جدول محلی جدید با همان ایجاد `CREATE TABLE` پرس و جو که ما برای استفاده `hits_v1`, اما نام جدول های مختلف: ``` sql CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... @@ -567,22 +575,22 @@ CREATE TABLE tutorial.hits_all AS tutorial.hits_local ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); ``` -یک روش معمول این است که جداول توزیع شده مشابه را در تمام ماشین های خوشه ایجاد کنید. این اجازه می دهد در حال اجرا نمایش داده شد توزیع در هر دستگاه از خوشه. همچنین یک گزینه جایگزین برای ایجاد جدول توزیع موقت برای پرس و جو انتخاب داده شده با استفاده از وجود دارد [دور](../query_language/table_functions/remote.md) تابع جدول. +یک روش معمول این است که جداول توزیع شده مشابه را در تمام ماشین های خوشه ایجاد کنید. این اجازه می دهد در حال اجرا نمایش داده شد توزیع در هر دستگاه از خوشه. همچنین یک گزینه جایگزین برای ایجاد جدول توزیع موقت برای پرس و جو انتخاب داده شده با استفاده از وجود دارد [دور](../sql_reference/table_functions/remote.md) تابع جدول. -بیا فرار کنیم [درج را انتخاب کنید](../query_language/insert_into.md) به جدول توزیع شده برای گسترش جدول به چندین سرور. +بیا فرار کنیم [INSERT SELECT](../sql_reference/statements/insert_into.md) به جدول توزیع شده برای گسترش جدول به چندین سرور. ``` sql INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -!!! اخطار «Notice» این روش مناسب برای جلوگیری از جداول بزرگ نیست. یک ابزار جداگانه وجود دارد [تاتر-کپی](../operations/utils/clickhouse-copier.md) که می تواند جداول دلخواه بزرگ دوباره سفال. "سفال." +!!! warning "اطلاع" + این روش مناسب برای شارژ جداول بزرگ نیست. یک ابزار جداگانه وجود دارد [تاتر-کپی](../operations/utilities/clickhouse-copier.md) که می تواند جداول دلخواه بزرگ دوباره سفال. +همانطور که شما می توانید انتظار, نمایش داده شد محاسباتی سنگین اجرا نفر بار سریع تر در صورتی که استفاده 3 سرور به جای یک. -همانطور که شما می توانید انتظار نمایش داده شد محاسباتی سنگین نفر بار سریع تر اجرا در 3 سرور به جای یک راه اندازی. +در این مورد, ما یک خوشه با استفاده کرده اند 3 خرده ریز, و هر شامل یک ماکت تک. -در این مورد, ما یک خوشه با استفاده کرده اند 3 خرده ریز هر شامل یک ماکت تک. - -برای انعطاف پذیری در یک محیط تولید توصیه می کنیم که هر سفال باید حاوی 2-3 کپی بین مراکز داده های متعدد توزیع شده است. توجه داشته باشید که کلیک خانه پشتیبانی از تعداد نامحدودی از کپی. +برای انعطاف پذیری در یک محیط تولید, توصیه می کنیم که هر سفال باید شامل 2-3 کپی بین مناطق در دسترس بودن متعدد و یا مراکز داده گسترش (یا حداقل قفسه). توجه داشته باشید که کلیک خانه پشتیبانی از تعداد نامحدودی از کپی. به عنوان مثال پیکربندی برای یک خوشه از یک سفال حاوی سه کپی: @@ -608,11 +616,12 @@ INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -برای فعال کردن تکثیر بومی باغ وحش الزامی است. تاتر مراقبت از سازگاری داده ها در تمام کپی را اجرا و بازگرداندن روش پس از شکست بطور خودکار توصیه می شود برای استقرار خوشه باغ وحش به سرور جداگانه. +برای فعال کردن تکثیر بومی [باغ وحش](http://zookeeper.apache.org/) الزامی است. تاتر طول می کشد مراقبت از سازگاری داده ها در تمام کپی و اجرا می شود بازگرداندن روش پس از شکست به طور خودکار. توصیه می شود برای استقرار خوشه باغ وحش بر روی سرورهای جداگانه (جایی که هیچ پروسه های دیگر از جمله کلیک در حال اجرا هستند). -باغ وحش یک نیاز سخت نیست: در برخی موارد ساده می توانید داده ها را با نوشتن به تمام کپی ها از کد درخواست خود کپی کنید. این رویکرد است **نه** توصیه می شود, در این مورد, تاتر قادر نخواهد بود به تضمین سازگاری داده ها در تمام کپی. این وظیفه درخواست شما باقی می ماند. +!!! note "یادداشت" + باغ وحش یک نیاز سخت نیست: در برخی موارد ساده می توانید داده ها را با نوشتن به تمام کپی ها از کد درخواست خود کپی کنید. این رویکرد است **نه** توصیه می شود, در این مورد, تاتر قادر نخواهد بود برای تضمین ثبات داده ها در تمام کپی. بنابراین وظیفه درخواست شما می شود. -مکان های باغ وحش باید در فایل پیکربندی مشخص شود: +مکان های باغ وحش در فایل پیکربندی مشخص شده است: ``` xml @@ -631,7 +640,7 @@ INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -همچنین, ما نیاز به تنظیم ماکروها برای شناسایی هر سفال و ماکت, خواهد شد در ایجاد جدول استفاده می شود: +همچنین, ما نیاز به تنظیم ماکروها برای شناسایی هر سفال و ماکت که در ایجاد جدول استفاده می شود: ``` xml @@ -640,7 +649,7 @@ INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -اگر هیچ کپی در حال حاضر در ایجاد جدول تکرار وجود دارد, اولین ماکت جدید نمونه خواهد شد. اگر در حال حاضر زندگی می کنند کپی جدید کلون کردن داده ها از موجود. شما ابتدا یک گزینه برای ایجاد تمام جداول تکرار شده دارید و داده ها را وارد می کنید. یکی دیگر از گزینه این است که برای ایجاد برخی از کپی و اضافه کردن دیگران بعد یا در هنگام درج داده ها. +اگر هیچ کپی در حال حاضر در ایجاد جدول تکرار وجود دارد, اولین ماکت جدید نمونه است. اگر در حال حاضر زندگی می کنند کپی جدید کلون داده ها از موجود. شما ابتدا یک گزینه برای ایجاد تمام جداول تکرار شده دارید و سپس داده ها را وارد کنید. یکی دیگر از گزینه این است که برای ایجاد برخی از کپی و اضافه کردن دیگران بعد یا در هنگام درج داده ها. ``` sql CREATE TABLE tutorial.hits_replica (...) @@ -651,12 +660,12 @@ ENGINE = ReplcatedMergeTree( ... ``` -در اینجا ما با استفاده از [تکرار غذای اصلی](../operations/table_engines/replication.md) موتور جدول. در پارامترهای مشخص می کنیم مسیر باغ وحش حاوی سفال و کپی شناسه. +در اینجا ما با استفاده از [تکرار غذای اصلی](../engines/table_engines/mergetree_family/replication.md) موتور جدول. در پارامترهای مشخص می کنیم مسیر باغ وحش حاوی سفال و کپی شناسه. ``` sql INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; ``` -تکرار عمل در حالت چند استاد. داده ها را می توان به هر ماکت بارگذاری کرد و به طور خودکار با سایر موارد همگام سازی می شود. تکرار ناهمزمان است بنابراین در یک لحظه معین, همه کپی ممکن است حاوی داده به تازگی قرار داده شده. اجازه می دهد تا درج داده ها حداقل یک ماکت باید باشد. دیگران همگام سازی داده ها و قوام تعمیر هنگامی که دوباره فعال تبدیل خواهد شد. لطفا توجه داشته باشید که چنین رویکردی اجازه می دهد تا برای امکان کم از دست دادن داده ها فقط اضافه. +تکرار عمل در حالت چند استاد. داده ها را می توان به هر ماکت بارگذاری کرد و سپس سیستم را با موارد دیگر به طور خودکار همگام سازی می کند. تکرار ناهمزمان است بنابراین در یک لحظه معین, همه کپی ممکن است حاوی داده به تازگی قرار داده شده. حداقل یک ماکت باید اجازه می دهد تا مصرف داده ها. دیگران همگام سازی داده ها و قوام تعمیر هنگامی که دوباره فعال تبدیل خواهد شد. توجه داشته باشید که این روش اجازه می دهد تا برای امکان کم از دست دادن داده ها به تازگی قرار داده شده. [مقاله اصلی](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/fa/guides/apply_catboost_model.md b/docs/fa/guides/apply_catboost_model.md index 62eb386147f..90b915a60f8 100644 --- a/docs/fa/guides/apply_catboost_model.md +++ b/docs/fa/guides/apply_catboost_model.md @@ -1,40 +1,45 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u0627\u0633\u062A\u0641\u0627\u062F\u0647 \u0627\u0632 \u0645\u062F\u0644\ + \ \u0647\u0627\u06CC \u0627\u062F\u0645 \u06A9\u0648\u062F\u0646 \u0648 \u0627\u062D\ + \u0645\u0642" --- -# Applying a Catboost Model in ClickHouse {#applying-catboost-model-in-clickhouse} +# استفاده از مدل ادم کودن و احمق در فاحشه خانه {#applying-catboost-model-in-clickhouse} -[CatBoost](https://catboost.ai) is a free and open-source gradient boosting library developed at [Yandex](https://yandex.com/company/) for machine learning. +[مانتو](https://catboost.ai) یک کتابخانه تقویت شیب رایگان و منبع باز توسعه یافته در [یاندکس](https://yandex.com/company/) برای یادگیری ماشین. -With this instruction, you will learn to apply pre-trained models in ClickHouse by running model inference from SQL. +با استفاده از این دستورالعمل یاد خواهید گرفت که با اجرای مدل استنتاج از میدان از مدل های پیش روت شده در خانه استفاده کنید. -To apply a CatBoost model in ClickHouse: +برای اعمال یک مدل ادم کودن و احمق در خانه کلیک کنید: -1. [Create a Table](#create-table). -2. [Insert the Data to the Table](#insert-data-to-table). -3. [Integrate CatBoost into ClickHouse](#integrate-catboost-into-clickhouse) (Optional step). -4. [Run the Model Inference from SQL](#run-model-inference). +1. [ایجاد یک جدول](#create-table). +2. [درج داده به جدول](#insert-data-to-table). +3. [ادغام کاتبوست به کلیک](#integrate-catboost-into-clickhouse) (مرحله اختیاری). +4. [اجرای مدل استنتاج از گذاشتن](#run-model-inference). -For more information about training CatBoost models, see [Training and applying models](https://catboost.ai/docs/features/training.html#training). +برای کسب اطلاعات بیشتر در مورد اموزش مدل های کاتبوست مراجعه کنید [اموزش و مدل سازی](https://catboost.ai/docs/features/training.html#training). -## Prerequisites {#prerequisites} +## پیش نیازها {#prerequisites} -If you don’t have the [Docker](https://docs.docker.com/install/) yet, install it. +اگر شما لازم نیست که [کارگر بارانداز](https://docs.docker.com/install/) هنوز, نصب کنید. -!!! note "Note" - [Docker](https://www.docker.com) is a software platform that allows you to create containers that isolate a CatBoost and ClickHouse installation from the rest of the system. +!!! note "یادداشت" + [کارگر بارانداز](https://www.docker.com) یک پلت فرم نرم افزار است که اجازه می دهد تا به شما برای ایجاد ظروف که منزوی CatBoost و ClickHouse نصب و راه اندازی از بقیه سیستم. -Before applying a CatBoost model: +قبل از استفاده از مدل ادم کودن و احمق: -**1.** Pull the [Docker image](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) from the registry: +**1.** بکش [تصویر کارگر بارانداز](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) از رجیستری: ``` bash $ docker pull yandex/tutorial-catboost-clickhouse ``` -This Docker image contains everything you need to run CatBoost and ClickHouse: code, runtime, libraries, environment variables, and configuration files. +این docker تصویر شامل همه چیز شما نیاز به اجرای catboost و clickhouse: کد در زمان اجرا کتابخانه های محیط متغیر و فایل های پیکربندی. -**2.** Make sure the Docker image has been successfully pulled: +**2.** اطمینان حاصل کنید که تصویر کارگر بارانداز شده است با موفقیت کشیده: ``` bash $ docker image ls @@ -42,26 +47,26 @@ REPOSITORY TAG IMAGE ID CR yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB ``` -**3.** Start a Docker container based on this image: +**3.** شروع یک ظرف کارگر بارانداز بر اساس این تصویر: ``` bash $ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse ``` -## 1. Create a Table {#create-table} +## 1. ایجاد یک جدول {#create-table} -To create a ClickHouse table for the training sample: +برای ایجاد یک میز کلیک برای نمونه تمرین: -**1.** Start ClickHouse console client in the interactive mode: +**1.** شروع مشتری کنسول کلیک در حالت تعاملی: ``` bash $ clickhouse client ``` -!!! note "Note" - The ClickHouse server is already running inside the Docker container. +!!! note "یادداشت" + سرور کلیک در حال حاضر در داخل ظرف کارگر بارانداز در حال اجرا. -**2.** Create the table using the command: +**2.** ایجاد جدول با استفاده از دستور: ``` sql :) CREATE TABLE amazon_train @@ -81,29 +86,29 @@ $ clickhouse client ENGINE = MergeTree ORDER BY date ``` -**3.** Exit from ClickHouse console client: +**3.** خروج از مشتری کنسول کلیک کنید: ``` sql :) exit ``` -## 2. Insert the Data to the Table {#insert-data-to-table} +## 2. درج داده به جدول {#insert-data-to-table} -To insert the data: +برای وارد کردن داده ها: -**1.** Run the following command: +**1.** دستور زیر را اجرا کنید: ``` bash $ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv ``` -**2.** Start ClickHouse console client in the interactive mode: +**2.** شروع مشتری کنسول کلیک در حالت تعاملی: ``` bash $ clickhouse client ``` -**3.** Make sure the data has been uploaded: +**3.** اطمینان حاصل کنید که داده ها ارسال شده است: ``` sql :) SELECT count() FROM amazon_train @@ -113,27 +118,27 @@ FROM amazon_train +-count()-+ | 65538 | -+---------+ ++-------+ ``` -## 3. Integrate CatBoost into ClickHouse {#integrate-catboost-into-clickhouse} +## 3. ادغام کاتبوست به کلیک {#integrate-catboost-into-clickhouse} -!!! note "Note" - **Optional step.** The Docker image contains everything you need to run CatBoost and ClickHouse. +!!! note "یادداشت" + **گام اختیاری.** این Docker تصویر شامل همه چیز شما نیاز به اجرای CatBoost و ClickHouse. -To integrate CatBoost into ClickHouse: +برای ادغام کاتبوست به کلیک: -**1.** Build the evaluation library. +**1.** ساخت کتابخانه ارزیابی. -The fastest way to evaluate a CatBoost model is compile `libcatboostmodel.` library. For more information about how to build the library, see [CatBoost documentation](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). +سریعترین راه برای ارزیابی مدل ادم کودن و احمق کامپایل است `libcatboostmodel.` کتابخونه. برای کسب اطلاعات بیشتر در مورد چگونگی ساخت کتابخانه, دیدن [مستندات غلطیاب](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). -**2.** Create a new directory anywhere and with any name, for example, `data` and put the created library in it. The Docker image already contains the library `data/libcatboostmodel.so`. +**2.** ایجاد یک دایرکتوری جدید در هر کجا و با هر نام, مثلا, `data` و کتابخونه درستشون رو توش بذار. تصویر کارگر بارانداز در حال حاضر شامل کتابخانه `data/libcatboostmodel.so`. -**3.** Create a new directory for config model anywhere and with any name, for example, `models`. +**3.** ایجاد یک دایرکتوری جدید برای مدل پیکربندی در هر کجا و با هر نام, مثلا, `models`. -**4.** Create a model configuration file with any name, for example, `models/amazon_model.xml`. +**4.** برای مثال یک فایل پیکربندی مدل با هر نام ایجاد کنید, `models/amazon_model.xml`. -**5.** Describe the model configuration: +**5.** توصیف پیکربندی مدل: ``` xml @@ -150,7 +155,7 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel. ``` -**6.** Add the path to CatBoost and the model configuration to the ClickHouse configuration: +**6.** اضافه کردن مسیر به CatBoost و مدل پیکربندی به پیکربندی ClickHouse: ``` xml @@ -158,11 +163,11 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel./home/catboost/models/*_model.xml ``` -## 4. Run the Model Inference from SQL {#run-model-inference} +## 4. اجرای مدل استنتاج از گذاشتن {#run-model-inference} -For test model run the ClickHouse client `$ clickhouse client`. +برای مدل تست اجرای مشتری کلیک `$ clickhouse client`. -Let’s make sure that the model is working: +بیایید اطمینان حاصل کنیم که مدل کار می کند: ``` sql :) SELECT @@ -181,10 +186,10 @@ FROM amazon_train LIMIT 10 ``` -!!! note "Note" - Function [modelEvaluate](../query_language/functions/other_functions.md#function-modelevaluate) returns tuple with per-class raw predictions for multiclass models. +!!! note "یادداشت" + تابع [مدلووات](../sql_reference/functions/other_functions.md#function-modelevaluate) را برمی گرداند تاپل با پیش بینی های خام در هر کلاس برای مدل های چند طبقه. -Let’s predict the probability: +بیایید احتمال را پیش بینی کنیم: ``` sql :) SELECT @@ -204,10 +209,10 @@ FROM amazon_train LIMIT 10 ``` -!!! note "Note" - More info about [exp()](../query_language/functions/math_functions.md) function. +!!! note "یادداشت" + اطلاعات بیشتر در مورد [خروج()](../sql_reference/functions/math_functions.md) تابع. -Let’s calculate LogLoss on the sample: +بیایید محاسبه لگ در نمونه: ``` sql :) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss @@ -230,7 +235,7 @@ FROM ) ``` -!!! note "Note" - More info about [avg()](../query_language/agg_functions/reference.md#agg_function-avg) and [log()](../query_language/functions/math_functions.md) functions. +!!! note "یادداشت" + اطلاعات بیشتر در مورد [میانگین()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) و [ثبت()](../sql_reference/functions/math_functions.md) توابع. -[Original article](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) +[مقاله اصلی](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/fa/guides/index.md b/docs/fa/guides/index.md index c1968730961..6915953177b 100644 --- a/docs/fa/guides/index.md +++ b/docs/fa/guides/index.md @@ -1,12 +1,16 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Guides +toc_priority: 38 +toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC" --- -# ClickHouse Guides {#clickhouse-guides} +# راهنماهای کلیک {#clickhouse-guides} -List of detailed step-by-step instructions that help to solve various tasks using ClickHouse: +فهرست دقیق گام به گام دستورالعمل که برای کمک به حل وظایف مختلف با استفاده از کلیک: -- [Tutorial on simple cluster set-up](../getting_started/tutorial.md) -- [Applying a CatBoost model in ClickHouse](apply_catboost_model.md) +- [اموزش تنظیم خوشه ساده](../getting_started/tutorial.md) +- [استفاده از مدل ادم کودن و احمق در فاحشه خانه](apply_catboost_model.md) -[Original article](https://clickhouse.tech/docs/en/guides/) +[مقاله اصلی](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/fa/interfaces/cli.md b/docs/fa/interfaces/cli.md index e549a6e6ba5..dfee35f2083 100644 --- a/docs/fa/interfaces/cli.md +++ b/docs/fa/interfaces/cli.md @@ -1,122 +1,149 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 17 +toc_title: "\u0645\u0634\u062A\u0631\u06CC \u062E\u0637 \u0641\u0631\u0645\u0627\u0646" +--- -# کلاینت Command-line {#khlynt-command-line} +# مشتری خط فرمان {#command-line-client} -برای کار از طریق محیط ترمینال میتوانید از دستور `clickhouse-client` استفاده کنید +تاتر یک مشتری خط فرمان بومی فراهم می کند: `clickhouse-client`. مشتری پشتیبانی از گزینه های خط فرمان و فایل های پیکربندی. برای کسب اطلاعات بیشتر, دیدن [پیکربندی](#interfaces_cli_configuration). -
    +[نصب](../getting_started/index.md) این از `clickhouse-client` بسته بندی و اجرا با فرمان `clickhouse-client`. ``` bash $ clickhouse-client -ClickHouse client version 0.0.26176. -Connecting to localhost:9000. -Connected to ClickHouse server version 0.0.26176. +ClickHouse client version 19.17.1.1579 (official build). +Connecting to localhost:9000 as user default. +Connected to ClickHouse server version 19.17.1 revision 54428. :) ``` -
    +مشتری و سرور نسخه های مختلف سازگار با یکدیگر هستند, اما برخی از ویژگی های ممکن است در مشتریان قدیمی تر در دسترس باشد. ما توصیه می کنیم با استفاده از همان نسخه از مشتری به عنوان برنامه سرور. هنگامی که شما سعی می کنید به استفاده از یک مشتری از نسخه های قدیمی تر و سپس سرور, `clickhouse-client` پیام را نمایش میدهد: -کلاینت از آپشن های command-line و فایل های کانفیگ پشتیبانی می کند. برای اطلاعات بیشتر بخش «[پیکربندی](#interfaces_cli_configuration)» را مشاهده کنید. + ClickHouse client version is older than ClickHouse server. It may lack support for new features. -## استفاده {#stfdh} +## استفاده {#cli_usage} -کلاینت می تواند به دو صورت interactive و non-intercative (batch) مورد استفاده قرار گیرد. برای استفاده از حالت batch، پارامتر `query` را مشخص کنید، و یا داده ها ره به `stdin` ارسال کنید (کلاینت تایید می کند که `stdin` ترمینال نیست) و یا از هر 2 استفاده کنید. مشابه HTTP interface، هنگامی که از از پارامتر `query` و ارسال داده ها به `stdin` استفاده می کنید، درخواست، ترکیبی از پارامتر `query`، line feed، و داده ها در `stdin` است. این کار برای query های بزرگ INSERT مناسب است. +مشتری را می توان در حالت تعاملی و غیر تعاملی (دسته ای) استفاده کرد. برای استفاده از حالت دسته ای ‘query’ پارامتر یا ارسال داده به ‘stdin’ (تایید می کند که ‘stdin’ یک ترمینال نیست), یا هر دو. هنگام استفاده از رابط اچ تی پی مشابه است ‘query’ پارامتر و ارسال داده به ‘stdin’, درخواست یک الحاق از است ‘query’ پارامتر, خوراک خط, و داده ها در ‘stdin’. این مناسب برای نمایش داده شد درج بزرگ است. -مثالی از استفاده کلاینت برای اجرای دستور INSERT داده - -
    +نمونه ای از استفاده از مشتری برای وارد کردن داده ها: ``` bash -echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; -cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; 3, 'some text', '2016-08-14 00:00:00' 4, 'some more text', '2016-08-14 00:00:01' _EOF -cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; ``` -
    +در حالت دسته ای, فرمت داده ها به طور پیش فرض جدول است. شما می توانید فرمت را در بند فرمت پرس و جو تنظیم کنید. -در حالت Batch، فرمت داده ها به صورت پیش فرض به صورت TabSeparated می باشد. شما میتوانید فرمت داده ها رو در هنگام اجرای query و با استفاده از شرط FORMAT مشخص کنید. +به طور پیش فرض, شما فقط می توانید پردازش یک پرس و جو تنها در حالت دسته ای. برای ایجاد چندین نمایش داده شد از یک “script,” استفاده از `--multiquery` پارامتر. این برای همه نمایش داده شد به جز درج کار می کند. نتایج پرس و جو خروجی متوالی بدون جداکننده های اضافی می باشد. به طور مشابه, برای پردازش تعداد زیادی از نمایش داده شد, شما می توانید اجرا ‘clickhouse-client’ برای هر پرس و جو. توجه داشته باشید که ممکن است دهها میلی ثانیه برای راه اندازی ‘clickhouse-client’ برنامه -به طور پیش فرض شما فقط می توانید یک query را در خالت batch اجرا کنید.برای ساخت چندین query از یک «اسکریپت»، از پارامتر –multiquery استفاده کنید. این روش برای تمام query ها به جز INSERT کار می کند. نتایج query ها به صورت متوالی و بدون seperator اضافه تولید می شوند. به طور مشابه برای پردازش تعداد زیادی از query ها شما می توانید از ‘clickhouse-client’ برای هر query استفاده کنید. دقت کنید که ممکن است حدود 10 میلی ثانیه تا زمان راه اندازی برنامه ‘clickhouse-client’ زمان گرفته شود. +در حالت تعاملی شما یک خط فرمان دریافت می کنید که می توانید نمایش داده شده را وارد کنید. -در حالت intercative، شما یک command line برای درج query های خود دریافت می کنید. +اگر ‘multiline’ مشخص نشده است (به طور پیش فرض): برای اجرای پرس و جو را فشار دهید را وارد کنید. نقطه و ویرگول در پایان پرس و جو لازم نیست. برای ورود به پرس و جو چند خطی یک بک اسلش را وارد کنید `\` قبل از خط تغذیه. بعد از اینکه شما فشار وارد, از شما خواسته خواهد شد که برای ورود به خط بعدی از پرس و جو. -اگر ‘multiline’ مشخص نشده باشد (به صورت پیش فرض): برای اجرای یک query، دکمه Enter را بزنید. سیمی کالن در انتهای query اجباری نیست. برای درج یک query چند خطی (multiline)، دکمه ی بک اسلش `\` را قبل از line feed فشار دهید. بعد از فشردن Enter، از شما برای درج خط بعدی query درخواست خواهد شد. +اگر چند خطی مشخص شده است: برای اجرای یک پرس و جو, پایان با یک نقطه و ویرگول و مطبوعات را وارد کنید. اگر نقطه و ویرگول در پایان خط وارد شده حذف شد, از شما خواسته خواهد شد برای ورود به خط بعدی از پرس و جو. -اگر چند خطی (multiline) مشخص شده باشد: برای اجرای query، در انتها سیمی کالن را وارد کنید و سپس Enter بزنید. اگر سیمی کالن از انتهای خط حذف می شد، از شما برای درج خط جدید query درخواست می شد. +فقط یک پرس و جو تنها اجرا می شود, بنابراین همه چیز پس از نقطه و ویرگول نادیده گرفته شده است. -تنها یک query اجرا می شود. پس همه چیز بعد از سیمی کالن ignore می شود. +شما می توانید مشخص کنید `\G` بجای یا بعد از نقطه و ویرگول. این نشان می دهد فرمت عمودی. در این قالب, هر مقدار بر روی یک خط جداگانه چاپ, مناسب است که برای جداول گسترده ای. این ویژگی غیر معمول برای سازگاری با خروجی زیر کلی اضافه شد. -شما میتوانید از `\G` به جای سیمی کالن یا بعد از سیمی کالن استفاده کنید. این علامت، فرمت Vertical را نشان می دهد. در این فرمت، هر مقدار در یک خط جدا چاپ می شود که برای جداول عریض مناسب است. این ویژگی غیرمعمول برای سازگاری با MySQL CLI اضافه شد. +خط فرمان بر اساس ‘replxx’ (شبیه به ‘readline’). به عبارت دیگر از میانبرهای صفحهکلید اشنایی استفاده میکند و تاریخ را حفظ میکند. تاریخ به نوشته شده است `~/.clickhouse-client-history`. -command line برا پایه ‘replxx’ می باشد. به عبارت دیگر، این محیط از shortcut های آشنا استفاده می کند و history دستورات را نگه می دار. history ها در فایل ~/.clickhouse-client-history نوشته می شوند. +به طور پیش فرض, فرمت استفاده می شود قبل از شکست است. شما می توانید فرمت را در بند فرمت پرس و جو یا با مشخص کردن تغییر دهید `\G` در پایان پرس و جو, با استفاده از `--format` یا `--vertical` استدلال در خط فرمان, و یا با استفاده از فایل پیکربندی مشتری. -به صورت پیش فرض فرمت خروجی PrettyCompact می باشد. شما میتوانید از طریق دستور FORMAT در یک query، یا با مشخص کردن `\G` در انتهای query، استفاده از آرگومان های `--format` یا `--vertical` یا از کانفیگ فایل کلاینت، فرمت خروجی را مشخص کنید. +برای خروج از مشتری, کنترل مطبوعات+د (یا کنترل+ج), و یا یکی از موارد زیر را وارد کنید به جای یک پرس و جو: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q” -برای خروج از کلاینت، Ctrl-D (یا Ctrl+C) را فشار دهید؛ و یا یکی از دستورات زیر را به جای اجرای query اجرا کنید: «exit», «quit», «logout», «exit;», «quit;», «logout;», «q», «Q», «:q» +هنگامی که پردازش یک پرس و جو مشتری نشان می دهد: -در هنگام اجرای یک query، کلاینت موارد زیر را نمایش می دهد: +1. پیشرفت, که به روز شده است بیش از 10 بار در ثانیه (به طور پیش فرض). برای نمایش داده شد سریع پیشرفت ممکن است زمان نمایش داده می شود. +2. پرس و جو فرمت شده پس از تجزیه, برای اشکال زدایی. +3. نتیجه در قالب مشخص شده است. +4. تعداد خطوط در نتیجه زمان گذشت و سرعت متوسط پردازش پرس و جو. -1. Progress، که بیش از 10 بار در ثانیه بروز نخواهد شد ( به صورت پیش فرض). برای query های سریع، progress ممکن است زمانی برای نمایش پیدا نکند. -2. فرمت کردن query بعد از عملیات پارس کردن، به منظور دیباگ کردن query. -3. نمایش خروجی با توجه به نوع فرمت. -4. تعداد لاین های خروجی، زمان پاس شدن query، و میانگیم سرعت پردازش query. +شما می توانید یک پرس و جو طولانی با فشار دادن کنترل لغو+ج.با این حال, شما هنوز هم نیاز به کمی صبر کنید برای سرور به سقط درخواست. این ممکن است به لغو پرس و جو در مراحل خاص. اگر شما منتظر نیست و مطبوعات کنترل+ج بار دوم مشتری خروج خواهد شد. -شما میتوانید query های طولانی را با فشردن Ctrl-C کنسل کنید. هر چند، بعد از این کار همچنان نیاز به انتظار چند ثانیه ای برای قطع کردن درخواست توسط سرور می باشید. امکان کنسل کردن یک query در مراحل خاص وجود ندارد. اگر شما صبر نکنید و برای بار دوم Ctrl+C را وارد کنید از client خارج می شوید. +مشتری خط فرمان اجازه می دهد تا عبور داده های خارجی (جداول موقت خارجی) برای پرس و جو. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “External data for query processing”. -کلاینت commant-line اجازه ی پاس دادن داده های external (جداول موقت external) را برای query ها می دهد. برای اطلاعات بیشتر به بخش «داده های External برای پردازش query» مراجعه کنید. +### نمایش داده شد با پارامترهای {#cli-queries-with-parameters} + +شما می توانید پرس و جو را با پارامترها ایجاد کنید و مقادیر را از برنامه مشتری منتقل کنید. این اجازه می دهد تا برای جلوگیری از قالب بندی پرس و جو با ارزش های پویا خاص در سمت سرویس گیرنده. به عنوان مثال: + +``` bash +$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" +``` + +#### نحو پرس و جو {#cli-queries-with-parameters-syntax} + +یک پرس و جو را به طور معمول فرمت کنید و سپس مقادیر را که می خواهید از پارامترهای برنامه به پرس و جو در پرانتز در قالب زیر منتقل کنید قرار دهید: + +``` sql +{:} +``` + +- `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. +- `data type` — [نوع داده](../sql_reference/data_types/index.md) از مقدار پارامتر برنامه. برای مثال یک ساختار داده مانند `(integer, ('string', integer))` می تواند داشته باشد `Tuple(UInt8, Tuple(String, UInt8))` نوع داده (شما همچنین می توانید از یکی دیگر استفاده کنید [عدد صحیح](../sql_reference/data_types/int_uint.md) انواع). + +#### مثال {#example} + +``` bash +$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}" +``` ## پیکربندی {#interfaces_cli_configuration} -شما میتوانید، پارامتر ها را به `clickhouse-client` (تمام پارامترها دارای مقدار پیش فرض هستند) از دو روش زیر پاس بدید: +شما می توانید پارامترها را به `clickhouse-client` (همه پارامترها یک مقدار پیش فرض) با استفاده از: -- از طریق Command Line +- از خط فرمان - گزینه های Command-line مقادیر پیش فرض در ستینگ و کانفیگ فایل را نادیده میگیرد. + گزینه های خط فرمان نادیده گرفتن مقادیر پیش فرض و تنظیمات در فایل های پیکربندی. -- کانفیگ فایل ها. +- فایل های پیکربندی. - ستینگ های داخل کانفیگ فایل، مقادیر پیش فرض را نادیده می گیرد. + تنظیمات در فایل های پیکربندی نادیده گرفتن مقادیر پیش فرض. -### گزینه های Command line {#gzynh-hy-command-line} +### گزینههای خط فرمان {#command-line-options} -- `--host, -h` -– نام سرور، به صورت پیش فرض ‘localhost’ است. شما میتوانید یکی از موارد نام و یا IPv4 و یا IPv6 را در این گزینه مشخص کنید. -- `--port` – پورت اتصال به ClickHouse. مقدار پیش فرض: 9000. دقت کنید که پرت اینترفیس HTTP و اینتفریس native متفاوت است. -- `--user, -u` – نام کاربری جهت اتصال. پیش فرض: default. -- `--password` – پسورد جهت اتصال. پیش فرض: خالی -- `--query, -q` – مشخص کردن query برای پردازش در هنگام استفاده از حالت non-interactive. -- `--database, -d` – انتخاب دیتابیس در بدو ورود به کلاینت. مقدار پیش فرض: دیتابیس مشخص شده در تنظیمات سرور (پیش فرض ‘default’) -- `--multiline, -m` – اگر مشخص شود، یعنی اجازه ی نوشتن query های چند خطی را بده. (بعد از Enter، query را ارسال نکن). -- `--multiquery, -n` – اگر مشخص شود، اجازه ی اجرای چندین query که از طریق جمع و حلقه ها جدا شده اند را می دهد. فقط در حالت non-interactive کار می کند. -- `--format, -f` مشخص کردن نوع فرمت خروجی -- `--vertical, -E` اگر مشخص شود، از فرمت Vertical برای نمایش خروجی استفاده می شود. این گزینه مشابه ‘–format=Vertical’ می باشد. در این فرمت، هر مقدار در یک خط جدید چاپ می شود، که در هنگام نمایش جداول عریض مفید است. -- `--time, -t` اگر مشخص شود، در حالت non-interactive زمان اجرای query در ‘stderr’ جاپ می شود. -- `--stacktrace` – اگر مشخص شود stack trase مربوط به اجرای query در هنگام رخ دادن یک exception چاپ می شود. -- `--config-file` – نام فایل پیکربندی. +- `--host, -h` -– The server name, ‘localhost’ به طور پیش فرض. شما می توانید از نام یا نشانی اینترنتی4 یا ایپو6 استفاده کنید. +- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports. +- `--user, -u` – The username. Default value: default. +- `--password` – The password. Default value: empty string. +- `--query, -q` – The query to process when using non-interactive mode. +- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ به طور پیش فرض). +- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter). +- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons. +- `--format, -f` – Use the specified default format to output the result. +- `--vertical, -E` – If specified, use the Vertical format by default to output the result. This is the same as ‘–format=Vertical’. در این قالب, هر مقدار بر روی یک خط جداگانه چاپ, مفید است که در هنگام نمایش جداول گسترده. +- `--time, -t` – If specified, print the query execution time to ‘stderr’ در حالت غیر تعاملی. +- `--stacktrace` – If specified, also print the stack trace if an exception occurs. +- `--config-file` – The name of the configuration file. +- `--secure` – If specified, will connect to server over secure connection. +- `--param_` — Value for a [پرسوجو با پارامترها](#cli-queries-with-parameters). -### فایل های پیکربندی {#fyl-hy-pykhrbndy} +### پروندههای پیکربندی {#configuration_files} -`clickhouse-client` به ترتیب اولویت زیر از اولین فایل موجود برای ست کردن تنظیمات استفاده می کند: +`clickhouse-client` با استفاده از اولین فایل موجود در زیر: -- مشخص شده در پارامتر `--config-file` +- تعریف شده در `--config-file` پارامتر. - `./clickhouse-client.xml` -- `\~/.clickhouse-client/config.xml` +- `~/.clickhouse-client/config.xml` - `/etc/clickhouse-client/config.xml` -مثالی از یک کانفیگ فایل - -
    +نمونه ای از یک فایل پیکربندی: ``` xml username password + False ``` -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/cli/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/cli/) diff --git a/docs/fa/interfaces/cpp.md b/docs/fa/interfaces/cpp.md index 398d6e22687..b3d63565e83 100644 --- a/docs/fa/interfaces/cpp.md +++ b/docs/fa/interfaces/cpp.md @@ -1,5 +1,13 @@ -# C++ Client Library {#c-client-library} +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 24 +toc_title: "\u062C++ \u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0645\u0634\ + \u062A\u0631\u06CC" +--- -See README at [clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp) repository. +# ج++ کتابخانه مشتری {#c-client-library} -[Original article](https://clickhouse.tech/docs/fa/interfaces/cpp/) +دیدن readme در [صفحه اصلی](https://github.com/ClickHouse/clickhouse-cpp) مخزن. + +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/cpp/) diff --git a/docs/fa/interfaces/formats.md b/docs/fa/interfaces/formats.md index 41c6a957fd4..a39fee6c45f 100644 --- a/docs/fa/interfaces/formats.md +++ b/docs/fa/interfaces/formats.md @@ -1,174 +1,227 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 21 +toc_title: "\u0641\u0631\u0645\u062A \u0647\u0627\u06CC \u0648\u0631\u0648\u062F\u06CC\ + \ \u0648 \u062E\u0631\u0648\u062C\u06CC" +--- -# فرمت های Input و Output {#formats} +# فرمت برای داده های ورودی و خروجی {#formats} -فرمت تعیین می کند که چگونه داده ها پس از اجرای SELECT (چگونه نوشته شده و چگونه توسط سرور فرمت شده) به شما بر می گردد، و چگونه آن برای INSERT ها پذیرفته شده (چگونه آن توسط سرور پارس و خوانده می شود). +تاتر می توانید قبول و بازگشت داده ها در فرمت های مختلف. یک قالب پشتیبانی شده برای ورودی می تواند برای تجزیه داده های موجود در `INSERT`برای انجام `SELECT`بازدید کنندگان از یک جدول فایل حمایت مانند فایل, نشانی اینترنتی و یا اچ دی, و یا به خواندن یک فرهنگ لغت خارجی. فرمت پشتیبانی شده برای خروجی می تواند مورد استفاده قرار گیرد به ترتیب +نتایج یک `SELECT` و برای انجام `INSERT`ها را به یک جدول فایل حمایت. -جدول زیر لیست فرمت های پشتیبانی شده برای هر نوع از query ها را نمایش می دهد. +فرمت های پشتیبانی شده عبارتند از: -| Format | INSERT | SELECT | -|---------------------------------------------------------------------------|--------|--------| -| [TabSeparated](formats.md#tabseparated) | ✔ | ✔ | -| [TabSeparatedRaw](formats.md#tabseparatedraw) | ✗ | ✔ | -| [TabSeparatedWithNames](formats.md#tabseparatedwithnames) | ✔ | ✔ | -| [TabSeparatedWithNamesAndTypes](formats.md#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [Template](#format-template) | ✔ | ✔ | -| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | -| [CSV](formats.md#csv) | ✔ | ✔ | -| [CSVWithNames](formats.md#csvwithnames) | ✔ | ✔ | -| [Values](formats.md#data-format-values) | ✔ | ✔ | -| [Vertical](formats.md#vertical) | ✗ | ✔ | -| [VerticalRaw](formats.md#verticalraw) | ✗ | ✔ | -| [JSON](formats.md#json) | ✗ | ✔ | -| [JSONCompact](formats.md#jsoncompact) | ✗ | ✔ | -| [JSONEachRow](formats.md#jsoneachrow) | ✔ | ✔ | -| [TSKV](formats.md#tskv) | ✔ | ✔ | -| [Pretty](formats.md#pretty) | ✗ | ✔ | -| [PrettyCompact](formats.md#prettycompact) | ✗ | ✔ | -| [PrettyCompactMonoBlock](formats.md#prettycompactmonoblock) | ✗ | ✔ | -| [PrettyNoEscapes](formats.md#prettynoescapes) | ✗ | ✔ | -| [PrettySpace](formats.md#prettyspace) | ✗ | ✔ | -| [Protobuf](#protobuf) | ✔ | ✔ | -| [Avro](#data-format-avro) | ✔ | ✔ | -| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | -| [ORC](#data-format-orc) | ✔ | ✗ | -| [RowBinary](formats.md#rowbinary) | ✔ | ✔ | -| [Native](formats.md#native) | ✔ | ✔ | -| [Null](formats.md#null) | ✗ | ✔ | -| [XML](formats.md#xml) | ✗ | ✔ | -| [CapnProto](formats.md#capnproto) | ✔ | ✔ | +| قالب | ورودی | خروجی | +|---------------------------------------------------------|-------|-------| +| [جدول دار](#tabseparated) | ✔ | ✔ | +| [اطلاعات دقیق](#tabseparatedraw) | ✗ | ✔ | +| [اطلاعات دقیق](#tabseparatedwithnames) | ✔ | ✔ | +| [اطلاعات دقیق](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +| [قالب](#format-template) | ✔ | ✔ | +| [پاسخ تمپلیتینی](#templateignorespaces) | ✔ | ✗ | +| [CSV](#csv) | ✔ | ✔ | +| [اطلاعات دقیق](#csvwithnames) | ✔ | ✔ | +| [سفارشی](#format-customseparated) | ✔ | ✔ | +| [مقادیر](#data-format-values) | ✔ | ✔ | +| [عمودی](#vertical) | ✗ | ✔ | +| [عمودی](#verticalraw) | ✗ | ✔ | +| [JSON](#json) | ✗ | ✔ | +| [فوق العاده](#jsoncompact) | ✗ | ✔ | +| [جیسانچرو](#jsoneachrow) | ✔ | ✔ | +| [TSKV](#tskv) | ✔ | ✔ | +| [زیبا](#pretty) | ✗ | ✔ | +| [پیش تیمار](#prettycompact) | ✗ | ✔ | +| [بلوک پیش ساخته](#prettycompactmonoblock) | ✗ | ✔ | +| [کتاب های پیش بینی شده](#prettynoescapes) | ✗ | ✔ | +| [چوب نما](#prettyspace) | ✗ | ✔ | +| [Protobuf](#protobuf) | ✔ | ✔ | +| [اورو](#data-format-avro) | ✔ | ✔ | +| [هشدار داده می شود](#data-format-avro-confluent) | ✔ | ✗ | +| [پارکت](#data-format-parquet) | ✔ | ✔ | +| [ORC](#data-format-orc) | ✔ | ✗ | +| [مربوط به حوزه](#rowbinary) | ✔ | ✔ | +| [ارزشهای خبری عبارتند از:](#rowbinarywithnamesandtypes) | ✔ | ✔ | +| [بومی](#native) | ✔ | ✔ | +| [خالی](#null) | ✗ | ✔ | +| [XML](#xml) | ✗ | ✔ | +| [کاپپروتو](#capnproto) | ✔ | ✗ | -## TabSeparated {#tabseparated} +شما می توانید برخی از پارامترهای پردازش فرمت با تنظیمات کلیک را کنترل کنید. برای اطلاعات بیشتر بخوانید [تنظیمات](../operations/settings/settings.md) بخش. -در فرمت TabSeparated، داده ها به صورت سطر نوشته می شوند. هر سطر شامل مقادیر جدا شده با tab می باشد. هر مقدار با یک tab دنبال می شود، به جز آخرین مقدار یک سطر، که با line feed دنبال می شود. line feed unix در همه جا مورد تسافده قرار می گیرد. آخرین سطر از خروجی هم باید شامل line feed در انتها باشد. مقادیر در فرمت متنی بدون enclose با کوتیشون، و یا escape با کاراکترهای ویژه، نوشته می شوند. +## جدول دار {#tabseparated} -اعداد Integer با فرم decimal نوشته می شوند. اعداد می توانند شامل کاراکتر اضافه «+» در ابتدای خود باشند. (در هنگام پارس کردن نادیده گرفته می شوند، و در هنگام فرمت کردن، ثبت نمی شوند). اعداد غیر منفی نمیتوانند شامل علامت منفی باشند. در هنگام خواندن، اجازه داده می شود که رشته خالی را به عنوان صفر، پارس کرد، یا (برای تایپ های sign) یک رشته که شامل فقط یک علامت منفی است به عنوان صفر پارس کرد. اعدادی که در data type مربوطه فیت نشوند ممکن است به عددی متفاوت تبدیل شوند و پیغام خطایی هم نمایش ندهند. +در قالب جدولبندی داده ها توسط ردیف نوشته شده است. هر سطر شامل مقادیر جدا شده توسط زبانه. هر مقدار است که توسط یک تب به دنبال, به جز ارزش گذشته در ردیف, است که توسط یک خوراک خط به دنبال. به شدت یونیکس خط تغذیه در همه جا فرض شده است. ردیف گذشته نیز باید یک خوراک خط در پایان حاوی. ارزش ها در قالب متن نوشته شده, بدون گذاشتن علامت نقل قول, و با شخصیت های خاص فرار. -اعداد Floating-point به فرم decimal نوشته می شوند. از دات به عنوان جدا کننده decimal استفاده می شود. نوشته های نمایشی مثل ‘inf’، ‘+inf’، ‘-inf’ و ‘nan’ پشتیبانی می شوند. ورودی اعداد floating-point می تواند با یه نقطه اعشار شروع یا پایان یابد. در هنگام فرمت، دقت اعداد floating-point ممکن است گم شوند. در هنگام پارس کردن، دقیقا نیازی به خواندن نزدیکترین عدد machine-representable نیست. +این فرمت نیز تحت نام موجود است `TSV`. -Dates با فرمت YYY-MM-DD نوشته می شوند و به همین حالت پارس می شوند، اما با هر کاراکتری به عنوان جدا کننده. Dates به همراه زمان با فرمت YYYY-MM-DD hh:mm:ss نوشته می شوند و با همین فرمت پارس می شوند، اما با هر کاراکتری به عنوان جداکننده. این در منطقه زمان سیستم در زمانی که کلاینت یا سرور شروع می شود (بسته به اینکه کدام یک از داده ها را تشکیل می دهد) رخ می دهد. برای تاریخ همراه با زمان DST مشخص نمی شود. پس اگر یک دامپ دارای زمان DST باشد، دامپ، داده ها را به طور غیرمستقیم مطابقت نمی دهد و پارسینگ، یکی از دو ساعت را انتخاب خواهد کرد. در طول عملیات خواندن، تاریخ ها و تاریخ و ساعت های نادرست می توانند به صورت null و یا natural overflow پارس شوند، بدون اینکه پیغام خطایی نمایش دهند. +این `TabSeparated` فرمت مناسب برای پردازش داده ها با استفاده از برنامه های سفارشی و اسکریپت است. این است که به طور پیش فرض در رابط اچ تی پی و در حالت دسته ای مشتری خط فرمان استفاده می شود. این فرمت همچنین اجازه می دهد تا انتقال داده ها بین سرویس بهداشتی مختلف. مثلا, شما می توانید یک روگرفت از خروجی زیر و ارسال به کلیک, و یا بالعکس. -به عنوان یک استثنا، پارس کردن تاریخ به همراه ساعت، اگر مقدار دقیقا شامل 10 عدد decimal باشد، به عنوان فرمت unix timestamp پشتیبانی خواهد کرد. خروجی وابسته به time-zone نمی باشد. فرمت های YYYY-MM-DD hh: mm: ss و NNNNNNNNNN به صورت خودکار تمایز می یابند. - -رشته های دارای کاراکتر های ویژه backslash-escaped چاپ می شوند. escape های در ادامه برای خروجی استفاده می شوند: `\b`، `\f`، `\r`، `\n`، `\t`، `\0`, `\'`، `\\`. پارسر همچنین از `\a`، `\v`، و `\xHH` (hex escape) و هر `\c` پشتیبانی می کند. بدین ترتیب خواندن داده ها از فرمت line feed که می تواند به صورت `\n` یا `\` نوشته شود پشتیبانی می کند. برای مثال، رشته ی `Hello world` به همراه line feed بین کلمات به جای space می تواند به هر یک از حالات زیر پارس شود:: - -
    - - Hello\nworld - - Hello\ - world - -
    - -نوع دوم به دلیل پشتیبانی MySQL در هنگام نوشتن دامپ به صورت tab-separate، پشتیبانی می شود. - -حداقل مجموعه از کاراکترهایی که در هنگام پاس دادن داده در فرمت TabSeperate نیاز به escape آن دارید: tab، line feed (LF) بک اسلش. - -فقط مجموعه ی کمی از نماد ها escape می شوند. شما به راحتی می توانید بر روی مقدار رشته که در ترمینال شما در خروجی نمایش داده می شود حرکت کنید. - -آرایه ها به صورت لیستی از مقادیر که به comma از هم جدا شده اند و در داخل براکت قرار گرفته اند نوشته می شوند. آیتم های عددی در آرای به صورت نرمال فرمت می شوند، اما تاریخ و تاریخ با ساعت و رشته ها در داخل تک کوتیشن به همراه قوانین escape که بالا اشاره شد، نوشته می شوند. - -فرمت TabSeparate برای پردازش داده ها با استفاده از برنامه های شخصی سازی شده و اسکریپت ها مناسب است. TabSeparate به صورت پیش فرض در HTTP interface و در حالت batch کلاینت command-line مورد استفاده قرار می گیرد. همچنین این فرمت اجازه ی انتقال داده ها بین DBMS های مختلف را می دهد. برای مثال، شما می توانید از MySQL با این روش دامپ بگیرید و آن را در ClickHouse یا vice versa آپلود کنید. - -فرمت TabSeparated از خروحی total values (هنگام استفاده از WITH TOTALS) و extreme values (در هنگامی که ‘extreme’ برابر با 1 است) پشتیبانی می کند. در این موارد، total value و extreme بعد از داده های اصلی در خروجی می آیند. نتایج اصلی، total values و extreme همگی با یک empty line از هم جدا می شوند. مثال: - -
    +این `TabSeparated` فرمت پشتیبانی خروجی ارزش کل (هنگام استفاده با بالغ) و ارزش های شدید (وقتی که ‘extremes’ به مجموعه 1). در این موارد, کل ارزش ها و افراط خروجی پس از داده های اصلی. نتیجه اصلی, کل ارزش, و افراط و از یکدیگر توسط یک خط خالی از هم جدا. مثال: ``` sql SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` ``` - 2014-03-17 1406958 - 2014-03-18 1383658 - 2014-03-19 1405797 - 2014-03-20 1353623 - 2014-03-21 1245779 - 2014-03-22 1031592 - 2014-03-23 1046491 +``` text +2014-03-17 1406958 +2014-03-18 1383658 +2014-03-19 1405797 +2014-03-20 1353623 +2014-03-21 1245779 +2014-03-22 1031592 +2014-03-23 1046491 - 0000-00-00 8873898 +0000-00-00 8873898 - 2014-03-17 1031592 - 2014-03-23 1406958 +2014-03-17 1031592 +2014-03-23 1406958 +``` -
    +### قالببندی داده {#data-formatting} -این فرمت نیز تحت نام `TSV` موجود است. +اعداد صحیح به شکل اعشاری نوشته شده است. اعداد می توانند حاوی اضافی باشند “+” شخصیت در ابتدا (هنگام تجزیه نادیده گرفته شد و هنگام قالب بندی ثبت نشده است). اعداد غیر منفی نمی توانند علامت منفی داشته باشند. هنگام خواندن, مجاز است به تجزیه یک رشته خالی به عنوان یک صفر, یا (برای انواع امضا شده) یک رشته متشکل از فقط یک علامت منفی به عنوان یک صفر. اعداد است که به نوع داده مربوطه مناسب نیست ممکن است به عنوان شماره های مختلف تجزیه, بدون پیغام خطا. -## TabSeparatedRaw {#tabseparatedraw} +اعداد ممیز شناور به شکل اعشاری نوشته شده است. نقطه به عنوان جداکننده اعشاری استفاده می شود. ورودی های نمایشی پشتیبانی می شوند ‘inf’, ‘+inf’, ‘-inf’ و ‘nan’. ورود اعداد ممیز شناور ممکن است شروع یا پایان با یک نقطه اعشار. +در قالب بندی, دقت ممکن است در اعداد ممیز شناور از دست داده. +در تجزیه, این است که به شدت مورد نیاز برای خواندن نزدیکترین شماره ماشین نمایندگی. -تفاوت آن با `TabSeperated` در این است که در این فرمت سطرها بدون escape نوشته می شوند. این فرمت فقط مناسب خروجی نتایج query ها می باشد، نه برای پارس کردن (دریافت داده ها و درج آن در جدول). +تاریخ در فرمت یی-میلی متر-دی دی دی دی اس نوشته شده و تجزیه در قالب همان, اما با هر شخصیت به عنوان جدا. +تاریخ با زمان در قالب نوشته شده است `YYYY-MM-DD hh:mm:ss` و تجزیه در قالب همان, اما با هر شخصیت به عنوان جدا. +این همه در منطقه زمانی سیستم در زمان مشتری یا سرور شروع می شود (بسته به نوع فرمت داده ها) رخ می دهد. برای تاریخ با زمان, نور روز صرفه جویی در زمان مشخص نشده است. بنابراین اگر یک روگرفت است بار در طول نور روز صرفه جویی در زمان, روگرفت به صراحت مطابقت با داده ها نیست, و تجزیه یکی از دو بار را انتخاب کنید. +در طول یک عملیات به عنوان خوانده شده, تاریخ نادرست و تاریخ با زمان را می توان با سرریز طبیعی و یا تاریخ به عنوان تهی و بار تجزیه, بدون پیغام خطا. -همچنین این فرمت تحت عنوان `TSVRaw`وجود دارد. +به عنوان یک استثنا, تجزیه تاریخ با زمان نیز در قالب برچسب زمان یونیکس پشتیبانی, اگر از دقیقا شامل 10 رقم اعشار. نتیجه این است زمان وابسته به منطقه نیست. فرمت های یی-ام-دی-دی-اچ: میلی متر: اس اس و نونن به طور خودکار متفاوت هستند. -## TabSeparatedWithNames {#tabseparatedwithnames} +رشته ها خروجی با شخصیت های خاص بک اسلش فرار. توالی فرار زیر برای خروجی استفاده می شود: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\`. تجزیه همچنین از توالی ها پشتیبانی می کند `\a`, `\v` و `\xHH` (توالی فرار سحر و جادو) و هر `\c` دنباله هایی که `c` هر شخصیت (این توالی ها به تبدیل `c`). بدین ترتیب, خواندن داده ها پشتیبانی از فرمت های که یک خوراک خط را می توان به عنوان نوشته شده `\n` یا `\`, و یا به عنوان یک خوراک خط. مثلا, رشته `Hello world` با خوراک خط بین کلمات به جای فضا را می توان در هر یک از تغییرات زیر تجزیه شده است: -تفاوت آن با فرمت `TabSeparated` در این است که، در این فرمت نام ستون ها در سطر اول قرار می گیرد. در طول پارس کردن، سطر اول به طور کامل نادیده گرفته می شود. شما نمی توانید نام ستون ها را برای تعیین موقعیت آنها یا بررسی صحت آنها استفاده کنید. (پشتیبانی از پارس کردن سطر header ممکن است در آینده اضافه شود.) +``` text +Hello\nworld -همچنین این فرمت تحت عنوان `TSVWithNames`وجود دارد. +Hello\ +world +``` -## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes} +نوع دوم پشتیبانی می شود زیرا خروجی زیر هنگام نوشتن افسردگی های جدا شده از تب استفاده می کند. -تفاوت آن با `TabSeparated` در این است که در این فرمت نام ستون ها در سطر اول نوشته می شود، و type ستون ها در سطر دوم نوشته می شود. در طی پارسینگ، سطر اول و دوم به طور کامل نادیده گرفته می شوند. +حداقل مجموعه ای از شخصیت های که شما نیاز به فرار در هنگام عبور داده ها در قالب جدول پخش: باریکه, خوراک خط (ال اف) و بک اسلش. -همچنین این فرمت تحت عنوان `TSVWithNamesAndTypes`وجود دارد. +فقط یک مجموعه کوچک از علامت فرار. شما به راحتی می توانید بر روی یک مقدار رشته تلو تلو خوردن که ترمینال خود را در خروجی خراب کردن. -## Template {#format-template} +ارریس به عنوان یک لیست از ارزش کاما از هم جدا در براکت مربع نوشته شده است. موارد شماره در مجموعه به طور معمول فرمت می شوند. `Date` و `DateTime` انواع در نقل قول تک نوشته شده است. رشته ها در نقل قول های تک با قوانین فرار همان بالا نوشته شده است. -This format allows to specify a custom format string with placeholders for values with specified escaping rule. +[NULL](../sql_reference/syntax.md) به عنوان فرمت `\N`. -It uses settings `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` when using `JSON` escaping, see further) +هر عنصر [تو در تو](../sql_reference/data_types/nested_data_structures/nested.md) سازه ها به عنوان مجموعه ای نشان داده شده است. -Format string `format_schema_rows` specifies rows format with the following syntax: +به عنوان مثال: + +``` sql +CREATE TABLE nestedt +( + `id` UInt8, + `aux` Nested( + a UInt8, + b String + ) +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO nestedt Values ( 1, [1], ['a']) +``` + +``` sql +SELECT * FROM nestedt FORMAT TSV +``` + +``` text +1 [1] ['a'] +``` + +## اطلاعات دقیق {#tabseparatedraw} + +متفاوت از `TabSeparated` فرمت که در ردیف بدون فرار نوشته شده است. +این فرمت فقط برای خروجی یک نتیجه پرس و جو مناسب است, اما نه برای تجزیه (بازیابی اطلاعات برای وارد کردن در یک جدول). + +این فرمت نیز تحت نام موجود است `TSVRaw`. + +## اطلاعات دقیق {#tabseparatedwithnames} + +متفاوت از `TabSeparated` فرمت در که نام ستون در سطر اول نوشته شده است. +در تجزیه, ردیف اول به طور کامل نادیده گرفته. شما می توانید نام ستون برای تعیین موقعیت خود و یا برای بررسی صحت خود استفاده کنید. +(پشتیبانی از تجزیه ردیف هدر ممکن است در اینده اضافه شده است.) + +این فرمت نیز تحت نام موجود است `TSVWithNames`. + +## اطلاعات دقیق {#tabseparatedwithnamesandtypes} + +متفاوت از `TabSeparated` فرمت در که نام ستون به سطر اول نوشته شده است, در حالی که انواع ستون در ردیف دوم هستند. +در تجزیه, ردیف اول و دوم به طور کامل نادیده گرفته. + +این فرمت نیز تحت نام موجود است `TSVWithNamesAndTypes`. + +## قالب {#format-template} + +این فرمت اجازه می دهد تا تعیین یک رشته فرمت سفارشی با متغیرهایی برای ارزش با یک قاعده فرار مشخص شده است. + +با استفاده از تنظیمات `format_template_resultset`, `format_template_row`, `format_template_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` هنگام استفاده از `JSON` فرار, مشاهده بیشتر) + +تنظیم `format_template_row` مشخص مسیر به فایل, که شامل رشته فرمت برای ردیف با نحو زیر: `delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, - where `delimiter_i` is a delimiter between values (`$` symbol can be escaped as `$$`), - `column_i` is a name of a column whose values are to be selected or inserted (if empty, then column will be skipped), - `serializeAs_i` is an escaping rule for the column values. The following escaping rules are supported: +کجا `delimiter_i` یک جداساز بین مقادیر است (`$` نماد را می توان به عنوان فرار `$$`), +`column_i` یک نام یا فهرست یک ستون است که مقادیر انتخاب شده یا درج شده است (اگر خالی باشد سپس ستون حذف خواهد شد), +`serializeAs_i` یک قانون فرار برای مقادیر ستون است. قوانین فرار زیر پشتیبانی می شوند: - - `CSV`, `JSON`, `XML` (similarly to the formats of the same names) - - `Escaped` (similarly to `TSV`) - - `Quoted` (similarly to `Values`) - - `Raw` (without escaping, similarly to `TSVRaw`) - - `None` (no escaping rule, see further) +- `CSV`, `JSON`, `XML` (به طور مشابه به فرمت های نام های مشابه) +- `Escaped` (به طور مشابه به `TSV`) +- `Quoted` (به طور مشابه به `Values`) +- `Raw` (بدون فرار, به طور مشابه به `TSVRaw`) +- `None` (هیچ قانون فرار, مشاهده بیشتر) - If escaping rule is omitted, then`None` will be used. `XML` and `Raw` are suitable only for output. +اگر یک قانون فرار حذف شده است, سپس `None` استفاده خواهد شد. `XML` و `Raw` فقط برای خروجی مناسب است. - So, for the following format string: +بنابراین, برای رشته فرمت زیر: `Search phrase: ${SearchPhrase:Quoted}, count: ${c:Escaped}, ad price: $$${price:JSON};` - the values of `SearchPhrase`, `c` and `price` columns, which are escaped as `Quoted`, `Escaped` and `JSON` will be printed (for select) or will be expected (for insert) between `Search phrase: `, `, count: `, `, ad price: $` and `;` delimiters respectively. For example: +ارزش `SearchPhrase`, `c` و `price` ستون ها که به عنوان فرار `Quoted`, `Escaped` و `JSON` چاپ خواهد شد (برای انتخاب) و یا انتظار می رود (برای درج) میان `Search phrase:`, `, count:`, `, ad price: $` و `;` delimiters بود. به عنوان مثال: - `Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` +`Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` -The `format_schema_rows_between_delimiter` setting specifies delimiter between rows, which is printed (or expected) after every row except the last one (`\n` by default) +این `format_template_rows_between_delimiter` تنظیم مشخص جداساز بین ردیف, که چاپ شده است (یا انتظار می رود) بعد از هر سطر به جز یکی از گذشته (`\n` به طور پیش فرض) -Format string `format_schema` has the same syntax as `format_schema_rows` and allows to specify a prefix, a suffix and a way to print some additional information. It contains the following placeholders instead of column names: +تنظیم `format_template_resultset` مشخص کردن مسیر فایل که شامل یک رشته فرمت برای resultset. رشته فرمت برای حاصل است نحو همان رشته فرمت برای ردیف و اجازه می دهد تا برای مشخص کردن یک پیشوند, پسوند و یک راه برای چاپ برخی از اطلاعات اضافی. این شامل متغیرهایی زیر به جای نام ستون: -- `data` is the rows with data in `format_schema_rows` format, separated by `format_schema_rows_between_delimiter`. This placeholder must be the first placeholder in the format string. -- `totals` is the row with total values in `format_schema_rows` format (when using WITH TOTALS) -- `min` is the row with minimum values in `format_schema_rows` format (when extremes is set to 1) -- `max` is the row with maximum values in `format_schema_rows` format (when extremes is set to 1) -- `rows` is the total number of output rows -- `rows_before_limit` is the minimal number of rows there would have been without LIMIT. Output only if the query contains LIMIT. If the query contains GROUP BY, rows\_before\_limit\_at\_least is the exact number of rows there would have been without a LIMIT. -- `time` is the request execution time in seconds -- `rows_read` is the number of rows have been read -- `bytes_read` is the number of bytes (uncompressed) have been read +- `data` ردیف با داده ها در `format_template_row` قالب, جدا شده توسط `format_template_rows_between_delimiter`. این حفره یا سوراخ باید اولین حفره یا سوراخ در رشته فرمت باشد. +- `totals` ردیف با کل ارزش ها در `format_template_row` قالب (هنگام استفاده با مجموع) +- `min` ردیف با حداقل مقدار در `format_template_row` فرمت (هنگامی که افراط و به مجموعه 1) +- `max` ردیف با حداکثر ارزش در است `format_template_row` فرمت (هنگامی که افراط و به مجموعه 1) +- `rows` تعداد کل ردیف خروجی است +- `rows_before_limit` است حداقل تعداد ردیف وجود دارد که بدون محدودیت بوده است. خروجی تنها در صورتی که پرس و جو شامل حد. اگر پرس و جو شامل گروه های, ردیف ها\_افور\_لیمیت\_تلاست تعداد دقیق ردیف وجود دارد که بدون محدودیت بوده است. +- `time` زمان اجرای درخواست در ثانیه است +- `rows_read` است تعداد ردیف خوانده شده است +- `bytes_read` تعداد بایت (غیر فشرده) خوانده شده است -The placeholders `data`, `totals`, `min` and `max` must not have escaping rule specified (or `None` must be specified explicitly). The remaining placeholders may have any escaping rule specified. -If the `format_schema` setting is an empty string, `${data}` is used as default value. -For insert queries format allows to skip some columns or some fields if prefix or suffix (see example). +متغیرهایی `data`, `totals`, `min` و `max` باید فرار حکومت مشخص نیست (یا `None` باید به صراحت مشخص شود). متغیرهایی باقی مانده ممکن است هر گونه قانون فرار مشخص شده اند. +اگر `format_template_resultset` تنظیم یک رشته خالی است, `${data}` به عنوان مقدار پیش فرض استفاده می شود. +برای قرار دادن نمایش داده شد فرمت اجازه می دهد تا پرش برخی از ستون ها و یا برخی از زمینه های اگر پیشوند یا پسوند (به عنوان مثال مراجعه کنید). -`Select` example: +انتخاب نمونه: ``` sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 -FORMAT Template -SETTINGS format_schema = ' +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' +``` + +`/some/path/resultset.format`: + +``` text + Search phrases
    @@ -180,11 +233,17 @@ SETTINGS format_schema = '
    Search phrases
    Processed ${rows_read:XML} rows in ${time:XML} sec -', -format_schema_rows = ' ${SearchPhrase:XML} ${с:XML} ', -format_schema_rows_between_delimiter = '\n ' + ``` +`/some/path/row.format`: + +``` text + ${0:XML} ${1:XML} +``` + +نتیجه: + ``` html Search phrases @@ -205,87 +264,125 @@ format_schema_rows_between_delimiter = '\n ' ``` -`Insert` example: +درج مثال: - Some header - Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 - Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 - Total rows: 2 +``` text +Some header +Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 +Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 +Total rows: 2 +``` ``` sql INSERT INTO UserActivity FORMAT Template SETTINGS -format_schema = 'Some header\n${data}\nTotal rows: ${:CSV}\n', -format_schema_rows = 'Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV}' +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' ``` -`PageViews`, `UserID`, `Duration` and `Sign` inside placeholders are names of columns in the table. Values after `Useless field` in rows and after `\nTotal rows:` in suffix will be ignored. -All delimiters in the input data must be strictly equal to delimiters in specified format strings. +`/some/path/resultset.format`: -## TemplateIgnoreSpaces {#templateignorespaces} +``` text +Some header\n${data}\nTotal rows: ${:CSV}\n +``` -This format is suitable only for input. -Similar to `Template`, but skips whitespace characters between delimiters and values in the input stream. However, if format strings contain whitespace characters, these characters will be expected in the input stream. Also allows to specify empty placeholders (`${}` or `${:None}`) to split some delimiter into separate parts to ignore spaces between them. Such placeholders are used only for skipping whitespace characters. -It’s possible to read `JSON` using this format, if values of columns have the same order in all rows. For example, the following request can be used for inserting data from output example of format [JSON](#json): +`/some/path/row.format`: + +``` text +Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} +``` + +`PageViews`, `UserID`, `Duration` و `Sign` در داخل متغیرهایی نام ستون در جدول هستند. مقادیر پس از `Useless field` در ردیف و بعد از `\nTotal rows:` در پسوند نادیده گرفته خواهد شد. +همه delimiters در داده های ورودی باید به شدت در برابر delimiters در فرمت مشخص شده رشته. + +## پاسخ تمپلیتینی {#templateignorespaces} + +این فرمت فقط برای ورودی مناسب است. +مشابه به `Template`, اما پرش کاراکتر فضای سفید بین جداکننده ها و ارزش ها در جریان ورودی. با این حال, اگر رشته فرمت حاوی کاراکتر فضای سفید, این شخصیت خواهد شد در جریان ورودی انتظار می رود. همچنین اجازه می دهد برای مشخص متغیرهایی خالی (`${}` یا `${:None}`) به تقسیم برخی از جداساز به قطعات جداگانه به چشم پوشی از فضاهای بین. چنین متغیرهایی تنها برای پرش شخصیت فضای سفید استفاده می شود. +خواندن ممکن است `JSON` با استفاده از این فرمت, اگر ارزش ستون همان نظم در تمام ردیف. برای مثال درخواست زیر می تواند مورد استفاده قرار گیرد برای قرار دادن داده ها از خروجی نمونه ای از فرمت [JSON](#json): ``` sql INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS -format_schema = '{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}}', -format_schema_rows = '{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}}', -format_schema_rows_between_delimiter = ',' +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = ',' +``` + +`/some/path/resultset.format`: + +``` text +{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} +``` + +`/some/path/row.format`: + +``` text +{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} ``` ## TSKV {#tskv} -مشابه فرمت TabSeparated، اما خروجی به صورت name=value می باشد. نام ها مشابه روش TabSeparated، escape می شوند، و همچنین = symbol هم escape می شود. +شبیه به جدول, اما خروجی یک مقدار در نام=فرمت ارزش. نام ها به همان شیوه در قالب جدول فرار, و = نماد نیز فرار. -
    +``` text +SearchPhrase= count()=8267016 +SearchPhrase=bathroom interior design count()=2166 +SearchPhrase=yandex count()=1655 +SearchPhrase=2014 spring fashion count()=1549 +SearchPhrase=freeform photos count()=1480 +SearchPhrase=angelina jolie count()=1245 +SearchPhrase=omsk count()=1112 +SearchPhrase=photos of dog breeds count()=1091 +SearchPhrase=curtain designs count()=1064 +SearchPhrase=baku count()=1000 +``` - SearchPhrase= count()=8267016 - SearchPhrase=bathroom interior design count()=2166 - SearchPhrase=yandex count()=1655 - SearchPhrase=spring 2014 fashion count()=1549 - SearchPhrase=freeform photos count()=1480 - SearchPhrase=angelina jolia count()=1245 - SearchPhrase=omsk count()=1112 - SearchPhrase=photos of dog breeds count()=1091 - SearchPhrase=curtain design count()=1064 - SearchPhrase=baku count()=1000 +[NULL](../sql_reference/syntax.md) به عنوان فرمت `\N`. -
    +``` sql +SELECT * FROM t_null FORMAT TSKV +``` -وقتی تعداد زیادی از ستون ها وجود دارد، این فرمت بی فایده است، و در حالت کلی دلیلی بر استفاده از این فرمت در این مواقع وجود ندارد. این فرمت در بعضی از دپارتمان های Yandex استفاده می شد. +``` text +x=1 y=\N +``` -خروجی داده ها و پارس کردن هر دو در این فرمت پشتیبانی می شوند. برای پارس کردن، هر ترتیبی برای مقادیر ستون های مختلف پشتیبانی می شود. حذف بعضی از مقادیر قابل قبول است. این مقادیر با مقادیر پیش فرض خود برابر هستند. در این مورد، صفر و سطر خالی، توسط مقادیر پیش فرض پر می شوند. مقادیر پیچیده ای که می تواند در جدول مشخص شود به عنوان پیش فرض در این فرمت پشتیبانی نمیشوند. +هنگامی که تعداد زیادی از ستون های کوچک وجود دارد, این فرمت بی اثر است, و به طور کلی هیچ دلیلی برای استفاده وجود دارد. با این وجود از لحاظ کارایی بدتر از جیسوناکرو نیست. -پارس کردن، اجازه می دهد که فیلد اضافه ی `tskv` بدون علامت و مقدار وجود داشته باشد. این فیلد نادیده گرفته می شود. +Both data output and parsing are supported in this format. For parsing, any order is supported for the values of different columns. It is acceptable for some values to be omitted – they are treated as equal to their default values. In this case, zeros and blank rows are used as default values. Complex values that could be specified in the table are not supported as defaults. + +تجزیه اجازه می دهد تا حضور زمینه های اضافی `tskv` بدون علامت مساوی یا ارزش. این زمینه نادیده گرفته شده است. ## CSV {#csv} -Comma Separated Values format ([RFC](https://tools.ietf.org/html/rfc4180)). +با کاما از هم جدا فرمت ارزش ([RFC](https://tools.ietf.org/html/rfc4180)). -زمانی که از این روش برای فرمت استفاده می شود، سطر ها با دابل کوتیشن enclosed می شوند. دابل کوتیشن داخل یک رشته خروجی آن به صورت دو دابل کوتیشن در یک سطر است. قانون دیگری برای escape کردن کاراکترها وجود ندارد. تاریخ و تاریخ-ساعت در دابل کوتیشن ها enclosed می شوند. اعداد بدون دابل کوتیشن در خروجی می آیند. مقادیر با جدا کننده \* مشخص می شوند. سطر ها با استفاده از line feed (LF) جدا می شوند. آرایه ها در csv به این صورت serialize می شوند: ابتدا آرایه به یک رشته با فرمت TabSeparate سریالایز می شوند، و سپس رشته ی حاصل در دابل کوتیشن برای csv ارسال می شود. Tuple ها در فرمت CSV در ستون های جدا سریالایز می شوند (به این ترتیب، nest ها در tuble از دست میروند) +هنگام قالب بندی, ردیف در دو نقل قول محصور. نقل قول دو در داخل یک رشته خروجی به عنوان دو نقل قول دو در یک ردیف است. هیچ قانون دیگری برای فرار از شخصیت وجود دارد. تاریخ و تاریخ زمان در دو نقل قول محصور شده است. اعداد خروجی بدون نقل قول. ارزش ها توسط یک شخصیت جداساز از هم جدا, که `,` به طور پیش فرض. شخصیت جداساز در تنظیمات تعریف شده است [\_مخفی کردن \_قابلیت \_جدید](../operations/settings/settings.md#settings-format_csv_delimiter). ردیف ها با استفاده از خوراک خط یونیکس جدا می شوند. ارریس در سی سی اس وی به شرح زیر مرتب شده است: ابتدا مجموعه ای به یک رشته به عنوان در قالب تبسپار شده مرتب شده است و سپس رشته حاصل خروجی به سی سی اس وی در دو نقل قول است. دسته بندی ها در قالب سی اس وی به صورت ستون های جداگانه مرتب می شوند (به این معنا که لانه خود را در تاپل از دست داده است). -
    +``` bash +$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` - clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +\* به طور پیش فرض, جداساز است `,`. دیدن [\_مخفی کردن \_قابلیت \_جدید](../operations/settings/settings.md#settings-format_csv_delimiter) تنظیم برای اطلاعات بیشتر. -
    +هنگامی که تجزیه, تمام مقادیر را می توان یا با یا بدون نقل قول تجزیه. هر دو نقل قول دو و تک پشتیبانی می شوند. ردیف همچنین می توانید بدون نقل قول مرتب شود. در این مورد, به شخصیت جداساز و یا خوراک خط تجزیه (کروم و یا ال اف). در هنگام تجزیه ردیف بدون نقل قول فضاهای پیشرو و انتهایی و زبانه ها نادیده گرفته می شوند. برای اشتراک خط, یونیکس (کلیک کنید), پنجره ها (کروم ال اف) و سیستم عامل مک کلاسیک (کروم ال اف) انواع پشتیبانی می شوند. -\*به صورت پیش فرض — `,`. برای اطلاعات بیشتر [format\_csv\_delimiter](/operations/settings/settings/#settings-format_csv_delimiter) را ببینید. +مقادیر ورودی بدون علامت خالی با مقادیر پیش فرض برای ستون های مربوطه جایگزین می شوند +[\_پوشه های ورودی و خروجی](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) +فعال است. -در هنگام پارس کردن، تمامی مقادیر می توانند با کوتیشن یا بدون کوتیشن پارس شوند. تک کوتیشن و دابل کوتیشن پشتیبانی می شود. سطر ها می توانند بدون کوتیشن تنظیم شوند. در این مورد سطر ها، جدا کننده ها با (CR یا LF) پارس می شوند. در موارد نقض RFC، در هنگام پارس کردن سطر ها بدون کوتیشن، فضاها و tab های پیشین نادید گرفته می شوند. برای line feed، یونیکس از (LF)، ویدنوز از (CR LF) و Mac OS کلاسیک (CR LF) پشتیبانی می کند. +`NULL` به عنوان فرمت `\N` یا `NULL` یا یک رشته بدون علامت خالی (تنظیمات را ببینید [\_فرستادن به \_کوچکنمایی](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) و [\_پوشه های ورودی و خروجی](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)). -فرمت CSV خروجی total و extreme را همانند `TabSeparated` پشتیبانی می کنند. +پشتیبانی از خروجی بالغ و افراط به همان شیوه به عنوان `TabSeparated`. -## CSVWithNames {#csvwithnames} +## اطلاعات دقیق {#csvwithnames} -همچنین header سطر را چاپ می کند، شبیه به `TabSeparatedWithNames`. +همچنین چاپ ردیف هدر, شبیه به `TabSeparatedWithNames`. + +## سفارشی {#format-customseparated} + +مشابه به [قالب](#format-template) اما همه ستون ها را چاپ می کند یا می خواند و از قاعده فرار از تنظیم استفاده می کند `format_custom_escaping_rule` و جداکننده از تنظیمات `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` و `format_custom_result_after_delimiter`, نه از رشته فرمت. +همچنین وجود دارد `CustomSeparatedIgnoreSpaces` قالب که شبیه به `TemplateIgnoreSpaces`. ## JSON {#json} -خروجی داده ها با فرمت JSON. در کنال داده های جداول، خروجی JSON اسم ستون ها و type آنها به همراه اطلاعات بیشتر تولید می کند: تعداد سطر های خروجی، و همچنین تعداد رکورد های کل بدون در نظر گرفتن دستور LIMIT. مثال: - -
    +خروجی داده ها در فرمت جانسون. علاوه بر جداول داده, همچنین خروجی نام ستون و انواع, همراه با برخی از اطلاعات اضافی: تعداد کل ردیف خروجی, و تعداد ردیف است که می تواند خروجی بوده است اگر یک محدودیت وجود ندارد. مثال: ``` sql SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON @@ -355,28 +452,29 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA } ``` -
    +جانسون سازگار با جاوا اسکریپت است. برای اطمینان از این, برخی از شخصیت ها علاوه بر فرار: بریده بریده `/` فرار به عنوان `\/`; معافیت خط جایگزین `U+2028` و `U+2029`, که شکستن برخی از مرورگرهای, به عنوان فرار `\uXXXX`. شخصیت های کنترل اسکی فرار: برگشت به عقب, خوراک فرم, خوراک خط, بازگشت حمل, و تب افقی با جایگزین `\b`, `\f`, `\n`, `\r`, `\t` , و همچنین بایت باقی مانده در محدوده 00-1ف با استفاده از `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double-quotes by default. To remove the quotes, you can set the configuration parameter [خروجی \_فرمان\_جسون\_کوات\_64بیت\_تنظیمی](../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) به 0. -JSON با جاوااسکریپت سازگار است. برای اطمینان از این، بعضی از کاراکتر ها ecape های اضافه دارند: اسلش `/` به صورت `\/` escape می شود؛ line break جایگزین یعنی `U+2028` و `U+2029` که باعث break در بعضی از مروگرها می شود، به شکل `\uXXXX` escape می شوند. کاراکتر های کنترلی ASCII هم escape می شوند: backspace، form feed، line feed، carriage return، و horizontal tab به ترتیب با `\b`، `\f`، `\n`، `\r`، `\t` جایگزین می شوند. همچنین بایت های باقی مانده در محدوده 00 تا 1F با استفاده از `\uXXXX` جایگزین می شوند. کاراکتر های بی اعتبار UTF-8 با � جایگزین می شوند، پس خروجی JSON شامل موارد معتبر UTF-8 می باشد. برای سازگاری با جاوااسکریپت، اعداد Int64 و Uint64 به صورت پیش فرض، با استفاده از دابل کوتیشن enclose می شوند. برای حذف کوتیشن، شما باید پارامتر output\_format\_json\_quote\_64bit\_integers v رو برابر با 0 قرار دهید. +`rows` – The total number of output rows. -`rows` – تعداد سطر های خروجی +`rows_before_limit_at_least` حداقل تعداد ردیف وجود دارد که بدون محدودیت بوده است. خروجی تنها در صورتی که پرس و جو شامل حد. +اگر پرس و جو شامل گروه های, ردیف ها\_افور\_لیمیت\_تلاست تعداد دقیق ردیف وجود دارد که بدون محدودیت بوده است. -`rows_before_limit_at_least` حداقل تعداد سطر ها در هنگام عدم استفاده از LIMIT. فقط در هنگامی که query دارای LIMIT است خروجی دارد. اگر query شامل GROUP BY باشد، مقدار rows\_before\_limit\_at\_least دقیقا با زمانی که از LIMIT استفاده نمی شود یکی است. +`totals` – Total values (when using WITH TOTALS). -`totals` – مقدار TOTAL (زمانی که از WITH TOTALS استفاده می شود). +`extremes` – Extreme values (when extremes are set to 1). -`extremes` – مقدار Extreme (در هنگامی که extreme برابر با 1 است). +این فرمت فقط برای خروجی یک نتیجه پرس و جو مناسب است, اما نه برای تجزیه (بازیابی اطلاعات برای وارد کردن در یک جدول). -این فرمت فقط مناسب خروجی query های می باشد، به این معنی که برای عملیات پارس کردن (دریافت داده برای insert در جدول) نیست. همچنین فرمت JSONEachRow را ببینید. +پشتیبانی از کلیک [NULL](../sql_reference/syntax.md), است که به عنوان نمایش داده `null` در خروجی جانسون. -## JSONCompact {#jsoncompact} +همچنین نگاه کنید به [جیسانچرو](#jsoneachrow) قالب. -فقط در جاهایی که داده ها به جای object در array هستند خروجی آنها متفاوت است. +## فوق العاده {#jsoncompact} + +متفاوت از جانسون تنها در ردیف داده ها خروجی در ارریس, نه در اشیا. مثال: -
    - ``` json { "meta": @@ -396,8 +494,8 @@ JSON با جاوااسکریپت سازگار است. برای اطمینان ا ["", "8267016"], ["bathroom interior design", "2166"], ["yandex", "1655"], - ["spring 2014 fashion", "1549"], - ["freeform photos", "1480"] + ["fashion trends spring 2014", "1549"], + ["freeform photo", "1480"] ], "totals": ["","8873898"], @@ -414,52 +512,96 @@ JSON با جاوااسکریپت سازگار است. برای اطمینان ا } ``` -
    +این فرمت فقط برای خروجی یک نتیجه پرس و جو مناسب است, اما نه برای تجزیه (بازیابی اطلاعات برای وارد کردن در یک جدول). +همچنین نگاه کنید به `JSONEachRow` قالب. -این فرمت فقط مناسب خروجی query های می باشد، به این معنی که برای عملیات پارس کردن (دریافت داده برای insert در جدول) نیست. همچنین فرمت JSONEachRow را ببینید. +## جیسانچرو {#jsoneachrow} -## JSONEachRow {#jsoneachrow} - -هر سطر برای خود JSON Object جدا دارد. (با استفاده از newline، JSON تعریف می شوند.) - -
    +هنگامی که با استفاده از این فرمت, تاتر خروجی ردیف به عنوان جدا, اجسام جسون-خط حد و مرز مشخصی, اما داده ها به عنوان یک کل است جسون معتبر نیست. ``` json -{"SearchPhrase":"","count()":"8267016"} -{"SearchPhrase":"bathroom interior design","count()":"2166"} -{"SearchPhrase":"yandex","count()":"1655"} -{"SearchPhrase":"spring 2014 fashion","count()":"1549"} -{"SearchPhrase":"freeform photo","count()":"1480"} -{"SearchPhrase":"angelina jolie","count()":"1245"} -{"SearchPhrase":"omsk","count()":"1112"} -{"SearchPhrase":"photos of dog breeds","count()":"1091"} -{"SearchPhrase":"curtain design","count()":"1064"} +{"SearchPhrase":"curtain designs","count()":"1064"} {"SearchPhrase":"baku","count()":"1000"} +{"SearchPhrase":"","count()":"8267016"} ``` -
    +هنگام قرار دادن داده ها, شما باید یک شی جانسون جداگانه برای هر سطر فراهم. -بر خلاف فرمت JSON، هیچ جایگزینی برای کاراکتر های بی اعتبار UTF-8 وجود ندارد. هر مجموعه ای از بایت های می تواند داخل سطر در خروجی باشند. پس داده ها بدون از دست دادن هیچ اطلاعاتی فرمت می شوند. مقادیر شبیه به JSON، escape می شوند. +### درج داده {#inserting-data} -برای پارس کردن، هر ترتیبی برای مقادیر ستون های مختلف پشتیبانی می شود. حذف شدن بعضی مقادیر قابل قبول است، آنها با مقادیر پیش فرض خود برابر هستند. در این مورد، صفر و سطر های خالی به عنوان مقادیر پیش فرض قرار می گیرند. مقادیر پیچیده که می توانند در جدول مشخص شوند، به عنوان مقادیر پیش فرض پشتیبانی نمی شوند. Whitespace بین element ها نادیده گرفته می شوند. اگر کاما بعد از object ها قرار گیرند، نادیده گرفته می شوند. object ها نیازی به جداسازی با استفاده از new line را ندارند. +``` sql +INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` -### Usage of Nested Structures {#jsoneachrow-nested} +کلیک اجازه می دهد تا: -If you have a table with the [Nested](../data_types/nested_data_structures/nested.md) data type columns, you can insert JSON data having the same structure. Enable this functionality with the [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) setting. +- هر منظور از جفت کلید ارزش در جسم. +- حذف برخی از ارزش ها. -For example, consider the following table: +تاتر فضاهای بین عناصر و کاما را پس از اشیا نادیده می گیرد. شما می توانید تمام اشیا را در یک خط منتقل کنید. لازم نیست با شکستن خط اونارو جدا کنی + +**حذف پردازش مقادیر** + +را کلیک کنید جایگزین مقادیر حذف شده با مقادیر پیش فرض برای مربوطه [انواع داده ها](../sql_reference/data_types/index.md). + +اگر `DEFAULT expr` مشخص شده است, تاتر با استفاده از قوانین تعویض مختلف بسته به [\_پوشه های ورودی و خروجی](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) تنظیمات. + +جدول زیر را در نظر بگیرید: + +``` sql +CREATE TABLE IF NOT EXISTS example_table +( + x UInt32, + a DEFAULT x * 2 +) ENGINE = Memory; +``` + +- اگر `input_format_defaults_for_omitted_fields = 0` سپس مقدار پیش فرض برای `x` و `a` برابر `0` (به عنوان مقدار پیش فرض برای `UInt32` نوع داده). +- اگر `input_format_defaults_for_omitted_fields = 1` سپس مقدار پیش فرض برای `x` برابر `0` اما مقدار پیش فرض `a` برابر `x * 2`. + +!!! note "اخطار" + هنگام قرار دادن داده ها با `insert_sample_with_metadata = 1`, کلیکهاوس مصرف منابع محاسباتی بیشتر, در مقایسه با درج با `insert_sample_with_metadata = 0`. + +### انتخاب داده ها {#selecting-data} + +در نظر بگیرید که `UserActivity` جدول به عنوان مثال: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +پرسوجو `SELECT * FROM UserActivity FORMAT JSONEachRow` بازگشت: + +``` text +{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} +{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +بر خلاف [JSON](#json) قالب, هیچ جایگزینی نامعتبر گفته-8 توالی وجود دارد. ارزش ها در همان راه برای فرار `JSON`. + +!!! note "یادداشت" + هر مجموعه ای از بایت می تواند خروجی در رشته ها. استفاده از `JSONEachRow` فرمت اگر شما اطمینان حاصل کنید که داده ها در جدول را می توان به عنوان جانسون بدون از دست دادن هر گونه اطلاعات فرمت شده است. + +### استفاده از ساختارهای تو در تو {#jsoneachrow-nested} + +اگر شما یک جدول با [تو در تو](../sql_reference/data_types/nested_data_structures/nested.md) ستون نوع داده, شما می توانید داده های جانسون با همان ساختار وارد. فعال کردن این ویژگی با [تغییر \_کم\_تر\_تنظیم مجدد \_جنسان](../operations/settings/settings.md#settings-input_format_import_nested_json) تنظیمات. + +برای مثال جدول زیر را در نظر بگیرید: ``` sql CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory ``` -As you can find in the `Nested` data type description, ClickHouse treats each component of the nested structure as a separate column, `n.s` and `n.i` for our table. So you can insert the data the following way: +همانطور که شما می توانید در `Nested` شرح نوع داده, تاتر رفتار هر یک از اجزای ساختار تو در تو به عنوان یک ستون جداگانه (`n.s` و `n.i` برای جدول ما). شما می توانید داده ها را به روش زیر وارد کنید: ``` sql INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} ``` -To insert data as hierarchical JSON object set [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). +برای قرار دادن داده ها به عنوان یک شی جوسون سلسله مراتبی, تنظیم [وارد کردن \_ترمپ\_م\_تر\_اس\_جسون ثبت شده=1](../operations/settings/settings.md#settings-input_format_import_nested_json). ``` json { @@ -470,7 +612,7 @@ To insert data as hierarchical JSON object set [input\_format\_import\_nested\_j } ``` -Without this setting ClickHouse throws the exception. +بدون این تنظیم, محل کلیک می اندازد یک استثنا. ``` sql SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' @@ -502,144 +644,188 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` -## Native {#native} +## بومی {#native} -کارآمدترین فرمت. داده ها توسط بلاک ها و در فرمت باینری نوشته و خوانده می شوند. برای هر بلاک، تعداد سطرها، تعداد ستون ها، نام ستون ها و type آنها، و بخش هایی از ستون ها در این بلاک یکی پس از دیگری ثبت می شوند. به عبارت دیگر، این فرمت «columnar» است - این فرمت ستون ها را به سطر تبدیل نمی کند. این فرمت در حالت native interface و بین سرور و محیط ترمینال و همچنین کلاینت C++ استفاده می شود. +فرمت موثر ترین. داده ها توسط بلوک ها در فرمت باینری نوشته شده و خوانده می شوند. برای هر بلوک, تعداد ردیف, تعداد ستون, نام ستون و انواع, و بخش هایی از ستون ها در این بلوک یکی پس از دیگری ثبت. به عبارت دیگر این قالب است “columnar” – it doesn't convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. -شما می توانید از این فرمت برای تهیه دامپ سریع که فقط توسط مدیریت دیتابیس ClickHouse قابل خواندن است استفاده کنید. برای استفاده از این فرمت برای خودتان منطقی نیست. +شما می توانید این فرمت را به سرعت تولید افسردگی است که تنها می تواند توسط سندرم تونل کارپ به عنوان خوانده شده استفاده کنید. این حس برای کار با این فرمت خود را ندارد. -## Null {#null} +## خالی {#null} -هیچی در خروجی نمایش داده نمی شود. با این حال، query پردازش می شود، و زمانی که از کلایت command-line استفاده می کنید، داده ها برای کلاینت ارسال می شوند. از این برای تست، شامل تست بهره وری استفاده می شود. به طور مشخص، این فرمت فقط برای خروجی مناسب است نه برای پارس کردن. +هیچ چیز خروجی است. اما پرس و جو پردازش شده است و در هنگام استفاده از خط فرمان مشتری داده منتقل می شود به مشتری. این است که برای تست استفاده, از جمله تست عملکرد. +به طور مشخص, این فرمت فقط برای خروجی مناسب است, نه برای تجزیه. -## Pretty {#pretty} +## زیبا {#pretty} -خروجی داده ها به صورت جداول Unicode-art، همچنین استفاده از ANSI-escape برای تنظیم رنگ های ترمینال. یک جدول کامل کشیده می شود، و هر سطر دو خط از ترمینال را اشغال می کند. هر بلاکِ نتیجه، به عنوان یک جدول جدا چاپ می شود.پس بلاک ها می توانند بدون بافر کردن نتایج چاپ شوند (بافرینگ برای pre-calculate تمام مقادیر قابل مشاهده ضروری است). برای جلوگیری از دامپ زیاد داده ها در ترمینال، 10 هزار سطر اول چاپ می شوند. اگر تعداد سطر های بزرگتر مساوی 10 هزار باشد، پیغام " 10 هزار اول نمایش داده شد" چاپ می شود. این فرمت فقط مناسب خروجی نتایج query ها می باشد، نه برای پارس کردن (دریافت داده ها و درج آن در جدول). +خروجی داده ها به عنوان جداول یونیکد هنر, همچنین با استفاده از توالی انسی فرار برای تنظیم رنگ در ترمینال. +یک شبکه کامل از جدول کشیده شده است, و هر سطر را اشغال دو خط در ترمینال. +هر بلوک نتیجه خروجی به عنوان یک جدول جداگانه است. این لازم است به طوری که بلوک می تواند خروجی بدون نتیجه بافر (بافر می شود به منظور قبل از محاسبه عرض قابل مشاهده از تمام مقادیر لازم). -فرمت Pretty از total values (هنگام استفاده از WITH TOTALS) و extreme (هنگام که ‘extremes’ برابر با 1 است) برای خروجی پشتیبانی می کند. در این موارد، total values و extreme values بعد از نمایش داده های اصلی در جداول جدا، چاپ می شوند. مثال (برای فرمت PrettyCompact نمایش داده شده است): +[NULL](../sql_reference/syntax.md) خروجی به عنوان `ᴺᵁᴸᴸ`. -
    +مثال (نشان داده شده برای [پیش تیمار](#prettycompact) قالب): + +``` sql +SELECT * FROM t_null +``` + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +ردیف در زیبا \* فرمت فرار نیست. به عنوان مثال برای نشان داده شده است [پیش تیمار](#prettycompact) قالب: + +``` sql +SELECT 'String with \'quotes\' and \t character' AS Escaping_test +``` + +``` text +┌─Escaping_test────────────────────────┐ +│ String with 'quotes' and character │ +└──────────────────────────────────────┘ +``` + +برای جلوگیری از تخلیه اطلاعات بیش از حد به ترمینال تنها 10000 ردیف اول چاپ شده است. اگر تعداد ردیف بیشتر یا برابر با است 10,000, پیام “Showed first 10 000” چاپ شده است. +این فرمت فقط برای خروجی یک نتیجه پرس و جو مناسب است, اما نه برای تجزیه (بازیابی اطلاعات برای وارد کردن در یک جدول). + +فرمت زیبا پشتیبانی خروجی ارزش کل (هنگام استفاده با بالغ) و افراط و (وقتی که ‘extremes’ به مجموعه 1). در این موارد, کل ارزش ها و ارزش های شدید خروجی پس از داده های اصلی هستند, در جداول جداگانه. مثال (نشان داده شده برای [پیش تیمار](#prettycompact) قالب): ``` sql SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT PrettyCompact ``` - ┌──EventDate─┬───────c─┐ - │ 2014-03-17 │ 1406958 │ - │ 2014-03-18 │ 1383658 │ - │ 2014-03-19 │ 1405797 │ - │ 2014-03-20 │ 1353623 │ - │ 2014-03-21 │ 1245779 │ - │ 2014-03-22 │ 1031592 │ - │ 2014-03-23 │ 1046491 │ - └────────────┴─────────┘ +``` text +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1406958 │ +│ 2014-03-18 │ 1383658 │ +│ 2014-03-19 │ 1405797 │ +│ 2014-03-20 │ 1353623 │ +│ 2014-03-21 │ 1245779 │ +│ 2014-03-22 │ 1031592 │ +│ 2014-03-23 │ 1046491 │ +└────────────┴─────────┘ - Totals: - ┌──EventDate─┬───────c─┐ - │ 0000-00-00 │ 8873898 │ - └────────────┴─────────┘ +Totals: +┌──EventDate─┬───────c─┐ +│ 0000-00-00 │ 8873898 │ +└────────────┴─────────┘ - Extremes: - ┌──EventDate─┬───────c─┐ - │ 2014-03-17 │ 1031592 │ - │ 2014-03-23 │ 1406958 │ - └────────────┴─────────┘ - -
    - -## PrettyCompact {#prettycompact} - -تفاوت آن با `Pretty` در این است که grid های کشیده شده بین سطر ها و خروجی فشرده تر است. این فرمت به صورت پیش فرض در محیط کلاینت در حالت interactive مورد استفاده قرار می گیرد. - -## PrettyCompactMonoBlock {#prettycompactmonoblock} - -تفاوت آن با `PrettyCompact` در این است که 10 هزار سطر خروجی بافر می شوند، و سپس در یک جدول چاپ می شوند. نه به صورت بلاک - -## PrettyNoEscapes {#prettynoescapes} - -تفاوت آن با Pretty در این است که از ANSI-escape استفاده نمی کند. این برای نمایش این فرمت در مروگر ضروری است، و همچنین برای استفاده از دستور ‘watch’ ضروری است. - -مثال: - -
    - -``` bash -watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" +Extremes: +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1031592 │ +│ 2014-03-23 │ 1406958 │ +└────────────┴─────────┘ ``` -
    +## پیش تیمار {#prettycompact} -شما می توانید برای نمایش در مرورگر از interface HTTP استفاده کنید. +متفاوت از [زیبا](#pretty) در که شبکه بین ردیف کشیده شده و نتیجه جمع و جور تر است. +این فرمت به طور پیش فرض در مشتری خط فرمان در حالت تعاملی استفاده می شود. -### PrettyCompactNoEscapes {#prettycompactnoescapes} +## بلوک پیش ساخته {#prettycompactmonoblock} -همانند تنظیم قبلی می باشد. +متفاوت از [پیش تیمار](#prettycompact) در که تا 10000 ردیف بافر و سپس خروجی به عنوان یک جدول واحد نه بلوک. -### PrettySpaceNoEscapes {#prettyspacenoescapes} +## کتاب های پیش بینی شده {#prettynoescapes} -همانند تنظیم قبلی می باشد.. - -## PrettySpace {#prettyspace} - -تفاوت آن با `PrettyCompact` در این است که از whitespace (کاراکتر های space) به جای grid استفاده می کند. - -## RowBinary {#rowbinary} - -فرمت ها و پارس کردن داده ها، براساس سطر در فرمت باینری است.سطرها و مقادیر به صورت پیوسته و بدون جدا کننده لیست می شوند.این فرمت کم کارآمد تر از فرمت native است، از آنجایی که ردیف گرا است. - -اعداد Integers از fixed-length استفاده می کنند. برای مثال Uint64 از 8 بایت استفاده می کند. DateTime از UInt32 که شامل مقدار Unix Timestamp است استفاده می کند. Date از UInt16 که شامل تعداد روز از تاریخ 1-1-1970 است استفاده می کند. String به عنوان variant length نشان داده می شود (unsigned [LEB128](https://en.wikipedia.org/wiki/LEB128))، که دنباله ای از بایت های یک رشته هستند. FixedString به سادگی به عنوان توالی از بایت ها نمایش داده می شود. - -آرایه به عنوان variant length نشان داده می شود (unsigned [LEB128](https://en.wikipedia.org/wiki/LEB128))، دنباله ای از عانصر پیوسته آرایه - -## Values {#data-format-values} - -هر سطر داخل براکت چاپ می شود. سطر ها توسط comma جدا می شوند. برای آخرین سطر comma وجود ندارد. مقادیر داخل براکت همچنین توسط comma جدا می شوند. اعداد با فرمت decimal و بدون کوتیشن چاپ می شوند. آرایه ها در براکت ها چاپ می شوند. رشته ها، تاریخ و تاریخ با ساعت داخل کوتیشن قرار می گیرند. قوانین escape و پارس کردن شبیه به فرمت TabSeparated انجام می شود. در طول فرمت، extra spaces درج نمی شوند، اما در هنگام پارس کردن، آنها مجاز و skip می شوند. (به جز space های داخل مقادیر آرایه، که مجاز نیستند). - -حداقل کاراکترهای که شما در هنگام پاس دادن داده ها برای escape نیاز دارید: تک کوتیشن و بک اسلش. - -این فرمت برای دستور `INSERT INTO t VALUES ...` مورد استفاده قرار می گیرد، اما همچنین شما می تونید برای فرمت نتایج query استفاده کنید. - -## Vertical {#vertical} - -مقدار هر ستون به همراه نام ستون در سطر جداگانه چاپ می شود. اگر هر سطر شامل تعداد زیادی ستون است، این فرمت جهت چاپ چند سطر مناسب است. این فرمت فقط مناسب خروجی نتایج query ها می باشد، نه برای پارس کردن (دریافت داده ها و درج آن در جدول). - -## VerticalRaw {#verticalraw} - -تفاوت آن با `Vertical` در این است که سطر ها escape نمی شوند. این فرمت فقط مناسب خروجی نتایج query ها می باشد، نه برای پارس کردن (دریافت داده ها و درج آن در جدول). +متفاوت از زیبا که در توالی اس-فرار استفاده نمی شود. این برای نمایش این فرمت در یک مرورگر و همچنین برای استفاده ضروری است ‘watch’ ابزار خط فرمان. مثال: -
    +``` bash +$ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" +``` - :) SHOW CREATE TABLE geonames FORMAT VerticalRaw; - Row 1: - ────── - statement: CREATE TABLE default.geonames ( geonameid UInt32, date Date DEFAULT CAST('2017-12-08' AS Date)) ENGINE = MergeTree(date, geonameid, 8192) +شما می توانید رابط قام برای نمایش در مرورگر استفاده کنید. - :) SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT VerticalRaw; - Row 1: - ────── - test: string with 'quotes' and with some special - characters +### کتاب های پیش ساخته {#prettycompactnoescapes} -
    +همان تنظیمات قبلی. -در مقایسه با فرمت Vertical: +### کتاب های پیش بینی شده {#prettyspacenoescapes} -
    +همان تنظیمات قبلی. - :) SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical; - Row 1: - ────── - test: string with \'quotes\' and \t with some special \n characters +## چوب نما {#prettyspace} + +متفاوت از [پیش تیمار](#prettycompact) در که فضای سفید (شخصیت های فضایی) به جای شبکه استفاده می شود. + +## مربوط به حوزه {#rowbinary} + +فرمت ها و تجزیه داده ها توسط ردیف در فرمت باینری. سطر و ارزش ها به صورت متوالی ذکر شده, بدون جدا. +این فرمت کمتر از فرمت بومی است زیرا مبتنی بر ردیف است. + +اعداد صحیح استفاده از ثابت طول نمایندگی کوچک اندی. مثلا, اوینت64 با استفاده از 8 بایت. +تاریخ ساعت به عنوان اوینت32 حاوی برچسب زمان یونیکس به عنوان ارزش نشان داده است. +تاریخ به عنوان یک شی اوینت16 که شامل تعداد روز از 1970-01-01 به عنوان ارزش نشان داده شده است. +رشته به عنوان یک طول ورق (بدون علامت) نشان داده شده است [LEB128](https://en.wikipedia.org/wiki/LEB128)), پس از بایت رشته. +رشته ثابت است که به سادگی به عنوان یک دنباله از بایت نشان داده شده است. + +اری به عنوان یک طول ورینت (بدون علامت) نشان داده شده است [LEB128](https://en.wikipedia.org/wiki/LEB128)), پس از عناصر پی در پی از مجموعه. + +برای [NULL](../sql_reference/syntax.md#null-literal) حمایت کردن, یک بایت اضافی حاوی 1 یا 0 قبل از هر اضافه [Nullable](../sql_reference/data_types/nullable.md) ارزش. اگر 1, سپس ارزش است `NULL` و این بایت به عنوان یک مقدار جداگانه تفسیر. اگر 0, ارزش پس از بایت است `NULL`. + +## ارزشهای خبری عبارتند از: {#rowbinarywithnamesandtypes} + +مشابه به [مربوط به حوزه](#rowbinary) اما با هدر اضافه شده است: + +- [LEB128](https://en.wikipedia.org/wiki/LEB128)- کد گذاری تعداد ستون ها) +- N `String`مشخص کردن نامهای ستون +- N `String`بازدید کنندگان مشخص انواع ستون + +## مقادیر {#data-format-values} + +چاپ هر سطر در براکت. ردیف ها توسط کاما جدا می شوند. بعد از ردیف گذشته هیچ کاما وجود ندارد. مقادیر داخل براکت نیز با کاما از هم جدا هستند. اعداد خروجی در قالب اعشاری بدون نقل قول هستند. ارریس خروجی در براکت مربع است. رشته, تاریخ, و تاریخ با زمان خروجی در نقل قول. فرار قوانین و تجزیه شبیه به [جدول دار](#tabseparated) قالب. در قالب بندی فضاهای اضافی وارد نشده اند اما در طول تجزیه مجاز و نادیده گرفته می شوند (به جز فضاهای درون مقادیر مجموعه ای که مجاز نیستند). [NULL](../sql_reference/syntax.md) به عنوان نمایندگی `NULL`. + +The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. + +این فرمت است که در استفاده می شود `INSERT INTO t VALUES ...` اما شما همچنین می توانید برای قالب بندی نتایج پرس و جو استفاده کنید. + +همچنین نگاه کنید به: [در حال خواندن:](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) و [در حال خواندن:](../operations/settings/settings.md#settings-input_format_values_deduce_templates_of_expressions) تنظیمات. + +## عمودی {#vertical} + +چاپ هر مقدار در یک خط جداگانه با نام ستون مشخص. این فرمت مناسب برای چاپ فقط یک یا چند ردیف است اگر هر سطر شامل تعداد زیادی از ستون. + +[NULL](../sql_reference/syntax.md) خروجی به عنوان `ᴺᵁᴸᴸ`. + +مثال: + +``` sql +SELECT * FROM t_null FORMAT Vertical +``` + +``` text +Row 1: +────── +x: 1 +y: ᴺᵁᴸᴸ +``` + +ردیف در فرمت عمودی فرار نیست: + +``` sql +SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical +``` + +``` text +Row 1: +────── +test: string with 'quotes' and with some special + characters +``` + +این فرمت فقط برای خروجی یک نتیجه پرس و جو مناسب است, اما نه برای تجزیه (بازیابی اطلاعات برای وارد کردن در یک جدول). + +## عمودی {#verticalraw} + +مشابه به [عمودی](#vertical), اما با فرار غیر فعال. این قالب فقط برای خروجی نتایج پرس و جو مناسب است, نه برای تجزیه (دریافت داده ها و قرار دادن در جدول). ## XML {#xml} -
    - -فرمت XML فقط برای خروجی مناسب است، نه برای پارس کردن. مثال: - -
    +فرمت فقط برای خروجی مناسب است نه برای تجزیه. مثال: ``` xml @@ -670,7 +856,7 @@ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FOR 1655 - spring 2014 fashion + 2014 spring fashion 1549 @@ -690,7 +876,7 @@ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FOR 1091 - curtain design + curtain designs 1064 @@ -703,54 +889,44 @@ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FOR ``` -
    +اگر نام ستون یک فرمت قابل قبول ندارد, فقط ‘field’ به عنوان نام عنصر استفاده می شود. به طور کلی ساختار ایکس میل از ساختار جسون پیروی می کند. +Just as for JSON, invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. -اگر نام فیلد، فرمت قابل قبولی نداشته باشد، اسم ‘field’ به عنوان نام عنصر استفاده می شود. به طور کلی، ساختار XML مشابه ساختار JSON می باشد. فقط در JSON، موارد بی اعتبار UTF-8 تبدیل به کاراکتر � می شوند که منجر به خروجی معتبر UTF-8 می شود. +در مقادیر رشته, شخصیت `<` و `&` فرار به عنوان `<` و `&`. -در مقادیر رشته ای، کاراکتر های `>` و `&` به صورت `<` و `&` escape می شوند. +ارریس خروجی به عنوان `HelloWorld...`,و تاپل به عنوان `HelloWorld...`. -آرایه ها به شکل `HelloWorld...` و tuple ها به صورت `HelloWorld...` در خروجی می آیند. +## کاپپروتو {#capnproto} -## CapnProto {#capnproto} +کپن پروتو فرمت پیام باینری شبیه به بافر پروتکل و صرفه جویی است, اما جسون یا مساگپک را دوست ندارد. -Cap’n Proto یک فرمت پیام باینری شبیه به Protocol Buffer و Thrift می باشد، اما شبیه به JSON یا MessagePack نیست. +پیام های کپ ان پروتو به شدت تایپ شده و نه خود توصیف, به این معنی که نیاز به یک شرح طرح خارجی. طرح در پرواز اعمال می شود و ذخیره سازی برای هر پرس و جو. -پیغام های Cap’n Proto به صورت self-describing نیستند، به این معنی که آنها نیاز دارند که به صورت external، schema آنها شرح داده شود. schema به صورت on the fly اضافه می شود و برای هر query، cache می شود. - -
    - -``` sql -SELECT SearchPhrase, count() AS c FROM test.hits - GROUP BY SearchPhrase FORMAT CapnProto SETTINGS schema = 'schema:Message' +``` bash +$ cat capnproto_messages.bin | clickhouse-client --query "INSERT INTO test.hits FORMAT CapnProto SETTINGS format_schema='schema:Message'" ``` -
    +کجا `schema.capnp` به نظر می رسد مثل این: -جایی که `schema.capnp` شبیه این است: +``` capnp +struct Message { + SearchPhrase @0 :Text; + c @1 :Uint64; +} +``` -
    +Deserialization موثر است و معمولا نمی افزایش بار سیستم. - struct Message { - SearchPhrase @0 :Text; - c @1 :Uint64; - } - -
    - -فایل های Schema در فایلی قرار دارند که این فایل در دایرکتوری مشخص شده کانفیگ [format\_schema\_path](../operations/server_settings/settings.md) قرار گرفته اند. - -عملیات Deserialization موثر است و معمولا لود سیستم را افزایش نمی دهد. - -
    +همچنین نگاه کنید به [شمای فرمت](#formatschema). ## Protobuf {#protobuf} -Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format. +Protobuf - یک [بافر پروتکل](https://developers.google.com/protocol-buffers/) قالب. -This format requires an external format schema. The schema is cached between queries. -ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported. +این فرمت نیاز به یک طرح فرمت خارجی دارد. طرح بین نمایش داده شد ذخیره سازی. +تاتر از هر دو `proto2` و `proto3` syntaxes. تکرار / اختیاری / زمینه های مورد نیاز پشتیبانی می شوند. -Usage examples: +نمونه های استفاده: ``` sql SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' @@ -760,7 +936,7 @@ SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:Me cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" ``` -where the file `schemafile.proto` looks like this: +جایی که فایل `schemafile.proto` به نظر می رسد مثل این: ``` capnp syntax = "proto3"; @@ -773,11 +949,11 @@ message MessageType { }; ``` -To find the correspondence between table columns and fields of Protocol Buffers’ message type ClickHouse compares their names. -This comparison is case-insensitive and the characters `_` (underscore) and `.` (dot) are considered as equal. -If types of a column and a field of Protocol Buffers’ message are different the necessary conversion is applied. +برای پیدا کردن مکاتبات بین ستون های جدول و زمینه های بافر پروتکل' نوع پیام تاتر نام خود را مقایسه می کند. +این مقایسه غیر حساس به حروف و شخصیت است `_` هشدار داده می شود `.` (نقطه) به عنوان برابر در نظر گرفته. +اگر نوع ستون و زمینه پیام بافر پروتکل متفاوت تبدیل لازم اعمال می شود. -Nested messages are supported. For example, for the field `z` in the following message type +پیام های تو در تو پشتیبانی می شوند. برای مثال برای این زمینه است `z` در نوع پیام زیر ``` capnp message MessageType { @@ -791,10 +967,10 @@ message MessageType { }; ``` -ClickHouse tries to find a column named `x.y.z` (or `x_y_z` or `X.y_Z` and so on). -Nested messages are suitable to input or output a [nested data structures](../data_types/nested_data_structures/nested.md). +تاتر تلاش می کند برای پیدا کردن یک ستون به نام `x.y.z` (یا `x_y_z` یا `X.y_Z` و به همین ترتیب). +پیام های تو در تو مناسب برای ورودی یا خروجی هستند [ساختارهای داده تو در تو](../sql_reference/data_types/nested_data_structures/nested.md). -Default values defined in a protobuf schema like this +مقادیر پیش فرض تعریف شده در یک طرح اولیه مانند این ``` capnp syntax = "proto2"; @@ -804,91 +980,91 @@ message MessageType { } ``` -are not applied; the [table defaults](../query_language/create.md#create-default-values) are used instead of them. +اعمال نمی شود [پیشفرضهای جدول](../sql_reference/statements/create.md#create-default-values) به جای اونها استفاده میشه -ClickHouse inputs and outputs protobuf messages in the `length-delimited` format. -It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). +ClickHouse ورودی و خروجی protobuf پیام در `length-delimited` قالب. +این بدان معنی است قبل از هر پیام باید طول خود را به عنوان یک نوشته [ورینت](https://developers.google.com/protocol-buffers/docs/encoding#varints). +همچنین نگاه کنید به [چگونه به خواندن / نوشتن طول-حد و مرز مشخصی پیام های ورودی به زبان های محبوب](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). -## Avro {#data-format-avro} +## اورو {#data-format-avro} -[Apache Avro](http://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project. +[هشدار داده می شود](http://avro.apache.org/) یک چارچوب سریال داده ردیف گرا توسعه یافته در درون پروژه هادوپ خود نمایی است. -ClickHouse Avro format supports reading and writing [Avro data files](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). +قالب کلیک اور پشتیبانی از خواندن و نوشتن [پروندههای داد و ستد](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). -### Data Types Matching {#data_types-matching} +### تطبیق انواع داده ها {#data_types-matching} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +جدول زیر انواع داده های پشتیبانی شده را نشان می دهد و چگونه با کلیک مطابقت دارند [انواع داده ها](../sql_reference/data_types/index.md) داخل `INSERT` و `SELECT` نمایش داده شد. -| Avro data type `INSERT` | ClickHouse data type | Avro data type `SELECT` | -|---------------------------------------------|-------------------------------------------------------------------------------------------|------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [Int(8\|16\|32)](../data_types/int_uint.md), [UInt(8\|16\|32)](../data_types/int_uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../data_types/int_uint.md), [UInt64](../data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [String](../data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [FixedString(N)](../data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum(8\|16)](../data_types/enum.md) | `enum` | -| `array(T)` | [Array(T)](../data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nullable(T)](../data_types/date.md) | `union(null, T)` | -| `null` | [Nullable(Nothing)](../data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Date](../data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [DateTime64(3)](../data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [DateTime64(6)](../data_types/datetime.md) | `long (timestamp-micros)` \* | +| نوع داده اورو `INSERT` | نوع داده کلیک | نوع داده اورو `SELECT` | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [اعضای هیات(8/16/32)](../sql_reference/data_types/int_uint.md), [اوینت (8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [جسم شناور64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [رشته](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [رشته ثابت)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [شمارشی (8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [& توری)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Nullable(هیچ چیز)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [تاریخ](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [طول تاریخ 64 (3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [طول تاریخ 64 (6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | -\* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types) +\* [انواع منطقی اورو](http://avro.apache.org/docs/current/spec.html#Logical+Types) -Unsupported Avro data types: `record` (non-root), `map` +انواع داده های ورودی پشتیبانی نشده: `record` (غیر ریشه), `map` -Unsupported Avro logical data types: `uuid`, `time-millis`, `time-micros`, `duration` +انواع داده های منطقی پشتیبانی نشده: `uuid`, `time-millis`, `time-micros`, `duration` -### Inserting Data {#inserting-data} +### درج داده {#inserting-data-1} -To insert data from an Avro file into ClickHouse table: +برای وارد کردن داده ها از یک فایل اورو به جدول کلیک کنید: ``` bash $ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" ``` -The root schema of input Avro file must be of `record` type. +طرح ریشه فایل ورودی ورودی باید باشد `record` نوع. -To find the correspondence between table columns and fields of Avro schema ClickHouse compares their names. This comparison is case-sensitive. -Unused fields are skipped. +برای پیدا کردن مکاتبات بین ستون های جدول و زمینه های اور طرحواره کلیک نام خود را مقایسه می کند. این مقایسه حساس به حروف است. +زمینه های استفاده نشده قلم می شوند. -Data types of a ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to corresponding column type. +انواع داده ها از ستون جدول کلیک می توانید از زمینه های مربوطه را از داده های اور قرار داده متفاوت است. در هنگام قرار دادن داده ها, تاتر تفسیر انواع داده ها با توجه به جدول بالا و سپس [کست](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) داده ها به نوع ستون مربوطه. -### Selecting Data {#selecting-data} +### انتخاب داده ها {#selecting-data-1} -To select data from ClickHouse table into an Avro file: +برای انتخاب داده ها از جدول کلیک به یک فایل پیشرو: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro ``` -Column names must: +نام ستون باید: -- start with `[A-Za-z_]` -- subsequently contain only `[A-Za-z0-9_]` +- شروع با `[A-Za-z_]` +- متعاقبا تنها حاوی `[A-Za-z0-9_]` -Output Avro file compression and sync interval can be configured with [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) and [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) respectively. +خروجی فشرده سازی فایل و فاصله همگام سازی را می توان با پیکربندی [\_فرماندگی لبه بام](../operations/settings/settings.md#settings-output_format_avro_codec) و [\_فرماندگی لبه چشم](../operations/settings/settings.md#settings-output_format_avro_sync_interval) به ترتیب. -## AvroConfluent {#data-format-avro-confluent} +## هشدار داده می شود {#data-format-avro-confluent} -AvroConfluent supports decoding single-object Avro messages commonly used with [Kafka](https://kafka.apache.org/) and [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html). +ارتباط با پشتیبانی از رمز گشایی پیام های تک شی اور معمولا با استفاده می شود [کافکا](https://kafka.apache.org/) و [رجیستری طرح پساب](https://docs.confluent.io/current/schema-registry/index.html). -Each Avro message embeds a schema id that can be resolved to the actual schema with help of the Schema Registry. +هر پیام ایسترو جاسازی یک شناسه طرح است که می تواند به طرح واقعی با کمک رجیستری طرح حل و فصل شود. -Schemas are cached once resolved. +طرحواره ذخیره سازی یک بار حل شود. -Schema Registry URL is configured with [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) +نشانی وب رجیستری طرحواره با پیکربندی [باز کردن \_نمایش مجدد](../operations/settings/settings.md#settings-format_avro_schema_registry_url) -### Data Types Matching {#data_types-matching-1} +### تطبیق انواع داده ها {#data_types-matching-1} -Same as [Avro](#data-format-avro) +مثل [اورو](#data-format-avro) -### Usage {#usage} +### استفاده {#usage} -To quickly verify schema resolution you can use [kafkacat](https://github.com/edenhill/kafkacat) with [clickhouse-local](../operations/utils/clickhouse-local.md): +به سرعت بررسی طرح قطعنامه شما می توانید استفاده کنید [کفککت](https://github.com/edenhill/kafkacat) با [کلیک-محلی](../operations/utilities/clickhouse-local.md): ``` bash $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' @@ -897,7 +1073,7 @@ $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse- 3 c ``` -To use `AvroConfluent` with [Kafka](../operations/table_engines/kafka.md): +برای استفاده `AvroConfluent` با [کافکا](../engines/table_engines/integrations/kafka.md): ``` sql CREATE TABLE topic1_stream @@ -917,96 +1093,121 @@ SET format_avro_schema_registry_url = 'http://schema-registry'; SELECT * FROM topic1_stream; ``` -!!! note "Warning" - Setting `format_avro_schema_registry_url` needs to be configured in `users.xml` to maintain it’s value after a restart. +!!! note "اخطار" + تنظیم `format_avro_schema_registry_url` نیاز به پیکربندی در `users.xml` برای حفظ ارزش پس از راه اندازی مجدد. -## Parquet {#data-format-parquet} +## پارکت {#data-format-parquet} -[Apache Parquet](http://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format. +[پارکت چوب کمر درد](http://parquet.apache.org/) فرمت ذخیره سازی ستونی گسترده در اکوسیستم هادوپ است. تاتر پشتیبانی خواندن و نوشتن عملیات برای این فرمت. -### Data Types Matching {#data_types-matching-2} +### تطبیق انواع داده ها {#data_types-matching-2} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +جدول زیر انواع داده های پشتیبانی شده را نشان می دهد و چگونه با کلیک مطابقت دارند [انواع داده ها](../sql_reference/data_types/index.md) داخل `INSERT` و `SELECT` نمایش داده شد. -| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) | -|------------------------------|---------------------------------------------|------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Date](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [String](../data_types/string.md) | `STRING` | -| — | [FixedString](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | `DECIMAL` | +| نوع داده پارکت (`INSERT`) | نوع داده کلیک | نوع داده پارکت (`SELECT`) | +|---------------------------|---------------------------------------------------------|---------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [جسم شناور64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [تاریخ](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [رشته](../sql_reference/data_types/string.md) | `STRING` | +| — | [رشته ثابت](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [دهدهی](../sql_reference/data_types/decimal.md) | `DECIMAL` | -ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type. +کلیک هاوس از دقت قابل تنظیم پشتیبانی می کند `Decimal` نوع. این `INSERT` پرس و جو رفتار پارکت `DECIMAL` نوع به عنوان محل کلیک `Decimal128` نوع. -Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +انواع داده های پارکت پشتیبانی نشده: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Data types of a ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. +انواع داده ها از ستون جدول کلیک خانه می تواند از زمینه های مربوطه را از داده پارکت قرار داده متفاوت است. در هنگام قرار دادن داده ها, تاتر تفسیر انواع داده ها با توجه به جدول بالا و سپس [بازیگران](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) داده ها به این نوع داده است که برای ستون جدول کلیک مجموعه. -### Inserting and Selecting Data {#inserting-and-selecting-data} +### درج و انتخاب داده ها {#inserting-and-selecting-data} -You can insert Parquet data from a file into ClickHouse table by the following command: +شما می توانید داده های پارکت از یک فایل را به جدول کلیک توسط دستور زیر وارد کنید: ``` bash $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" ``` -You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command: +شما می توانید داده ها را از یک جدول کلیک انتخاب کنید و با دستور زیر به برخی از فایل ها در قالب پارکت ذخیره کنید: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +برای تبادل اطلاعات با هادوپ, شما می توانید استفاده کنید [موتور جدول اچ دی اف](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} -[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse. +[آپاچی orc](https://orc.apache.org/) فرمت ذخیره سازی ستونی گسترده در اکوسیستم هادوپ است. شما فقط می توانید داده ها را در این قالب به کلیک کنید. -### Data Types Matching {#data_types-matching-3} +### تطبیق انواع داده ها {#data_types-matching-3} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` queries. +جدول زیر انواع داده های پشتیبانی شده را نشان می دهد و چگونه با کلیک مطابقت دارند [انواع داده ها](../sql_reference/data_types/index.md) داخل `INSERT` نمایش داده شد. -| ORC data type (`INSERT`) | ClickHouse data type | -|--------------------------|---------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Date](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | -| `STRING`, `BINARY` | [String](../data_types/string.md) | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | +| نوع داده اورک (`INSERT`) | نوع داده کلیک | +|--------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [جسم شناور64](../sql_reference/data_types/float.md) | +| `DATE32` | [تاریخ](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [رشته](../sql_reference/data_types/string.md) | +| `DECIMAL` | [دهدهی](../sql_reference/data_types/decimal.md) | -ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type. +تاتر از دقت قابل تنظیم از `Decimal` نوع. این `INSERT` پرس و جو رفتار اورک `DECIMAL` نوع به عنوان محل کلیک `Decimal128` نوع. -Unsupported ORC data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +انواع داده های پشتیبانی نشده: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. +انواع داده ها از ستون جدول کلیک هاوس لازم نیست برای مطابقت با زمینه های داده اورک مربوطه. در هنگام قرار دادن داده ها, تاتر تفسیر انواع داده ها با توجه به جدول بالا و سپس [کست](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) داده ها به نوع داده تعیین شده برای ستون جدول کلیک. -### Inserting Data {#inserting-data-1} +### درج داده {#inserting-data-2} -You can insert ORC data from a file into ClickHouse table by the following command: +شما می توانید داده اورک از یک فایل به جدول کلیک توسط دستور زیر وارد کنید: ``` bash $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +برای تبادل اطلاعات با هادوپ, شما می توانید استفاده کنید [موتور جدول اچ دی اف](../engines/table_engines/integrations/hdfs.md). -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/formats/) +## شمای فرمت {#formatschema} + +نام پرونده حاوی شمای قالب با تنظیم تنظیم می شود `format_schema`. +لازم است این تنظیم را هنگام استفاده از یکی از فرمت ها تنظیم کنید `Cap'n Proto` و `Protobuf`. +شمای فرمت ترکیبی از یک نام فایل و نام یک نوع پیام در این فایل است که توسط روده بزرگ جدا شده است, +e.g. `schemafile.proto:MessageType`. +اگر فایل دارای فرمت استاندارد برای فرمت (به عنوان مثال, `.proto` برای `Protobuf`), +این را می توان حذف و در این مورد طرح فرمت به نظر می رسد `schemafile:MessageType`. + +اگر داده های ورودی یا خروجی را از طریق [کارگیر](../interfaces/cli.md) در [حالت تعاملی](../interfaces/cli.md#cli_usage), نام پرونده مشخص شده در شمای فرمت +می تواند شامل یک مسیر مطلق و یا یک مسیر نسبت به دایرکتوری جاری در مشتری. +اگر شما با استفاده از مشتری در [حالت دسته ای](../interfaces/cli.md#cli_usage) مسیر طرح باید به دلایل امنیتی نسبی باشد. + +اگر داده های ورودی یا خروجی را از طریق [رابط قام](../interfaces/http.md) نام پرونده مشخص شده در شمای قالب +باید در دایرکتوری مشخص شده در واقع [قالب\_شکلمات شیمی](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) +در پیکربندی سرور. + +## پرش خطاها {#skippingerrors} + +برخی از فرمت های مانند `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` و `Protobuf` می توانید ردیف شکسته جست و خیز اگر خطای تجزیه رخ داده است و ادامه تجزیه از ابتدای ردیف بعدی. ببینید [وارد کردن \_فرست\_مرزیابی \_نمایش مجدد](../operations/settings/settings.md#settings-input_format_allow_errors_num) و +[ثبت نام](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) تنظیمات. +محدودیت ها: +- در صورت خطای تجزیه `JSONEachRow` پرش تمام داده ها تا زمانی که خط جدید (یا بخش ویژه), بنابراین ردیف باید توسط حد و مرز مشخصی `\n` برای شمارش خطاها به درستی. +- `Template` و `CustomSeparated` استفاده از جداساز پس از ستون گذشته و جداساز بین ردیف برای پیدا کردن ابتدای سطر بعدی, بنابراین اشتباهات پرش کار می کند تنها اگر حداقل یکی از خالی نیست. + +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/formats/) diff --git a/docs/fa/interfaces/http.md b/docs/fa/interfaces/http.md index d51b60f00ba..5ba4d4ef789 100644 --- a/docs/fa/interfaces/http.md +++ b/docs/fa/interfaces/http.md @@ -1,29 +1,38 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 19 +toc_title: "\u0631\u0627\u0628\u0637 \u0642\u0627\u0645" +--- -# HTTP interface {#http-interface} +# رابط قام {#http-interface} -HTTP interface به شما امکان استفاده از ClickHouse در هر پلتفرم با هر زمان برنامه نویسی را می دهد. ما از این Interface برای زبان های Java و Perl به مانند shell استفاده می کنیم. در دیگر دپارتمان ها، HTTP interface در Perl، Python، و Go استفاده می شود. HTTP Interface محدود تر از native interface می باشد، اما سازگاری بهتری دارد. +رابط اچ تی پی به شما امکان استفاده از کلیک بر روی هر پلت فرم از هر زبان برنامه نویسی. ما برای کار از جاوا و پرل و همچنین اسکریپت های پوسته استفاده می کنیم. در بخش های دیگر, رابط قام است از پرل استفاده, پایتون, و رفتن. رابط قام محدود تر از رابط بومی است, اما سازگاری بهتر. -به صورت پیش فرض، clickhouse-server به پرت 8123 در HTTP گوش می دهد. (میتونه در کانفیگ فایل تغییر پیدا کنه). اگر شما یک درخواست GET / بدون پارامتر بسازید، رشته ی «Ok.» رو دریافت می کنید (به همراه line feed در انتها). شما می توانید از این درخواست برای اسکریپت های health-check استفاده کنید. +به طور پیش فرض, کلیک سرور گوش برای اچ تی پی در بندر 8123 (این را می توان در پیکربندی تغییر). -
    +اگر شما یک دریافت / درخواست بدون پارامتر, باز می گردد 200 کد پاسخ و رشته که در تعریف [نقلقولهای جدید از این نویسنده](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response) مقدار پیشفرض “Ok.” (با خوراک خط در پایان) ``` bash $ curl 'http://localhost:8123/' Ok. ``` -
    +استفاده از دریافت / درخواست پینگ در بهداشت و درمان چک اسکریپت. این کنترل همیشه باز می گردد “Ok.” (با یک خوراک خط در پایان). موجود از نسخه 18.12.13. -درخواست های خود را پارامتر ‘query’، یا با متد POST، یا ابتدای query را در پارامتر ‘query’ ارسال کنید، و بقیه را در POST (بعدا توضیح خواهیم داد که چرا این کار ضروری است). سایت URL محدود به 16 کیلوبایت است، پس هنگام ارسال query های بزرگ، اینو به خاطر داشته باشید +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` -اگر درخواست موفق آمیز باشد، استاتوس کد 200 را دریافت می کنید و نتایج در بدنه response می باشد. اگر اروری رخ دهد، استاتوس کد 500 را دریافت می کنید و توضیحات ارور در بدنه ی reponse قرار می گیرد. +ارسال درخواست به عنوان نشانی وب ‘query’ پارامتر, و یا به عنوان یک پست. و یا ارسال ابتدای پرس و جو در ‘query’ پارامتر, و بقیه در پست (بعدا توضیح خواهیم داد که چرا این لازم است). اندازه نشانی اینترنتی محدود به 16 کیلوبایت است بنابراین این را در نظر داشته باشید در هنگام ارسال نمایش داده شد بزرگ. -در هنگام استفاده از متد GET، ‘readonly’ ست می شود. به عبارت دیگر، برای query هایی که قصد تغییر دیتا را دارند، شما فقط از طریق متد POST می توانید این تغییرات را انجام دهید. شما میتونید query رو در بدنه ی POST یه یا به عنوان پارامتر های URL ارسال کنید. +اگر موفق, شما دریافت 200 کد پاسخ و در نتیجه در بدن پاسخ. +اگر یک خطا رخ می دهد, شما در دریافت 500 کد پاسخ و یک متن شرح خطا در بدن پاسخ. -مثال: +هنگام استفاده از روش دریافت, ‘readonly’ قرار است. به عبارت دیگر برای نمایش داده شد که تغییر داده ها شما فقط می توانید با استفاده از روش پست. شما می توانید پرس و جو خود را در قسمت پست یا در پارامتر نشانی وب ارسال کنید. -
    +مثالها: ``` bash $ curl 'http://localhost:8123/?query=SELECT%201' @@ -34,17 +43,18 @@ $ wget -O- -q 'http://localhost:8123/?query=SELECT 1' $ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 HTTP/1.0 200 OK +Date: Wed, 27 Nov 2019 10:30:18 GMT Connection: Close -Date: Fri, 16 Nov 2012 19:21:50 GMT +Content-Type: text/tab-separated-values; charset=UTF-8 +X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal +X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f +X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} 1 ``` -
    - -همانطور که می بینید، curl is somewhat inconvenient in that spaces must be URL escaped. هر چند wget همه چیز را خودش escape می کنه، ما توصیه به استفاده از اون رو نمی کنیم، چون wget به خوبی با HTTP 1.1 در هنگام استفاده از هدر های keep-alive و Transfer-Encoding: chunked کار نمی کند. - -
    +همانطور که می بینید, حلقه تا حدودی ناخوشایند است که در فضاهای باید نشانی اینترنتی فرار. +اگر چه سازمان تجارت جهانی از همه چیز خود فرار می کند ما توصیه نمی کنیم از این استفاده کنیم زیرا هنگام استفاده از زنده ماندن و انتقال رمزگذاری به خوبی کار نمی کند 1.1. ``` bash $ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- @@ -57,11 +67,8 @@ $ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- 1 ``` -
    - -اگر بخشی از query در پارامتر ارسال شود، و بخش دیگر در POST، یک line feed بین دو بخش وارد می شود. مثال (این کار نمی کند): - -
    +اگر بخشی از پرس و جو در پارامتر ارسال, و بخشی در پست, خوراک خط بین این دو بخش داده قرار داده. +مثال (این کار نخواهد کرد): ``` bash $ echo 'ECT 1' | curl 'http://localhost:8123/?query=SEL' --data-binary @- @@ -70,11 +77,8 @@ ECT 1 , expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception ``` -
    - -به صورت پیش فرض، داده ها با فرمت TabSeparated بر میگردند. (برای اطلاعات بیشتر بخش «فرمت» را مشاهده کنید). شما میتوانید از دستور FORMAT در query خود برای ست کردن فرمتی دیگر استفاده کنید. - -
    +به طور پیش فرض, داده ها در قالب جدولبندی بازگشت (برای اطلاعات بیشتر, دیدن “Formats” بخش). +شما با استفاده از بند فرمت پرس و جو به درخواست هر فرمت دیگر. ``` bash $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- @@ -85,63 +89,39 @@ $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @ └───┘ ``` -
    +روش پست انتقال داده ها برای درج نمایش داده شد لازم است. در این مورد می توانید ابتدا پرس و جو را در پارامتر نشانی وب بنویسید و از پست برای انتقال داده ها برای وارد کردن استفاده کنید. داده ها برای وارد کردن می تواند, مثلا, تخلیه تب جدا از خروجی زیر. در این راه وارد کردن پرس و جو جایگزین بارگذاری داده های محلی infile از mysql. -برای query های INSERT متد POST ضروری است. در این مورد، شما می توانید ابتدای query خود را در URL parameter بنویسید، و از POST برای پاس داده داده ها برای درج استفاده کنید. داده ی برای درج می تواند، برای مثال یک دامپ tab-separated شده از MySQL باشد. به این ترتیب، query INSERT جایگزین LOAD DATA LOCAL INFILE از MySQL می شود. - -مثال: ساخت جدول - -
    +نمونه: ایجاد یک جدول: ``` bash -echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- +$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- ``` -
    - -استفاده از query INSERT برای درج داده: - -
    +با استفاده از قرار دادن پرس و جو برای درج داده ها: ``` bash -echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- +$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- ``` -
    - -داده ها میتوانند جدا از پارامتر query ارسال شوند: - -
    +داده ها را می توان به طور جداگانه از پرس و جو ارسال می شود: ``` bash -echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- ``` -
    - -شما می توانید هر نوع فرمت دیتایی مشخص کنید. فرمت ‘Values’ دقیقا مشابه زمانی است که شما INSERT INTO t VALUES را می نویسید: - -
    +شما می توانید هر فرمت داده را مشخص کنید. این ‘Values’ فرمت همان چیزی است که هنگام نوشتن به مقادیر تی استفاده می شود: ``` bash -echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- +$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- ``` -
    - -برای درج داده ها از یک دامپ tab-separate، فرمت مشخص زیر را وارد کنید: - -
    +برای وارد کردن داده ها از تخلیه زبانه جدا شده فرمت مربوطه را مشخص کنید: ``` bash -echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- +$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- ``` -
    - -به دلیل پردازش موازی، نتایج query با ترتیب رندوم چاپ می شود: - -
    +خواندن محتویات جدول. داده ها خروجی به صورت تصادفی به دلیل پردازش پرس و جو موازی است: ``` bash $ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' @@ -159,31 +139,37 @@ $ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' 6 ``` -
    - -حدف جدول: - -
    +حذف جدول. ``` bash -echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- +$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- ``` -
    +برای درخواست موفق است که یک جدول داده ها بازگشت نیست, بدن پاسخ خالی بازگشته است. -برای درخواست هایی موفقی که داده ای از جدول بر نمیگردد، بدنه response خالی است. +شما می توانید فرمت فشرده سازی کلیک داخلی در هنگام انتقال داده ها استفاده کنید. داده های فشرده دارای فرمت غیر استاندارد است و شما باید از ویژه استفاده کنید `clickhouse-compressor` برنامه ای برای کار با ان (با ان نصب شده است `clickhouse-client` بسته). برای افزایش بهره وری از درج داده, شما می توانید سرور سمت تایید کنترلی با استفاده از غیر فعال کردن [تغییر در حسابهای کاربری دستگاه](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) تنظیمات. -شما می توانید از فرمت فشرده سازی داخلی ClickHouse در هنگان انتقال داده ها استفاده کنید. این فشرده سازی داده، یک فرمت غیراستاندارد است، و شما باید از برنامه مخصوص فشرده سازی ClickHouse برای استفاده از آن استفاده کنید. (این برنامه در هنگام نصب پکیج clickhouse-client نصب شده است) +اگر شما مشخص `compress=1` در نشانی وب سرور دادههای ارسالی شما را فشرده میکند. +اگر شما مشخص `decompress=1` در نشانی اینترنتی کارگزار دادههای مشابهی را که در `POST` روش. -اگر شما در URL پارامتر ‘compress=1’ را قرار دهید، سرور داده های ارسالی به شما را فشرده سازی می کند. اگر شما پارامتر ‘decompress=1’ را در URL ست کنید، سرور داده های ارسالی توسط متد POST را decompress می کند. +شما همچنین می توانید استفاده کنید را انتخاب کنید [فشردهسازی قام](https://en.wikipedia.org/wiki/HTTP_compression). برای ارسال یک فشرده `POST` درخواست, اضافه هدر درخواست `Content-Encoding: compression_method`. به منظور کلیک برای فشرده سازی پاسخ, شما باید اضافه `Accept-Encoding: compression_method`. پشتیبانی از کلیک `gzip`, `br` و `deflate` [روش های فشرده سازی](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). برای فعال کردن فشرده سازی قام, شما باید از خانه کلیک استفاده [نصب و راه اندازی](../operations/settings/settings.md#settings-enable_http_compression) تنظیمات. شما می توانید سطح فشرده سازی داده ها در پیکربندی [\_تنظیم مجدد به حالت اولیه](#settings-http_zlib_compression_level) تنظیم برای تمام روش های فشرده سازی. -همچنین استفاده از فشرده سازی استاندارد gzip در HTTP ممکن است. برای ارسال درخواست POST و فشرده سازی آن به صورت gzip، هدر `Content-Encoding: gzip` را به request خود اضافه کنید. برای اینکه ClickHouse، response فشرده شده به صورت gzip برای شما ارسال کند، ابتدا باید `enable_http_compression` را در تنظیمات ClickHouse فعال کنید و در ادامه هدر `Accept-Encoding: gzip` را به درخواست خود اضافه کنید. +شما می توانید این برای کاهش ترافیک شبکه در هنگام انتقال مقدار زیادی از داده ها و یا برای ایجاد افسردگی است که بلافاصله فشرده استفاده کنید. -شما می توانید از این کار برای کاهش ترافیک شبکه هنگام ارسال مقدار زیادی از داده ها یا برای ایجاد dump هایی که بلافاصله فشرده می شوند، استفاده کنید. +نمونه هایی از ارسال داده ها با فشرده سازی: -شما می توانید از پارامتر ‘database’ در URL برای مشخص کردن دیتابیس پیش فرض استفاده کنید. +``` bash +#Sending data to the server: +$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip' -
    +#Sending data to the client: +$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/' +``` + +!!! note "یادداشت" + برخی از مشتریان اچ تی پی ممکن است داده ها را از حالت فشرده خارج از سرور به طور پیش فرض (با `gzip` و `deflate`) و شما ممکن است داده ها از حالت فشرده خارج حتی اگر شما با استفاده از تنظیمات فشرده سازی به درستی. + +شما می توانید از ‘database’ پارامتر نشانی وب برای مشخص کردن پایگاه داده به طور پیش فرض. ``` bash $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- @@ -199,37 +185,38 @@ $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?data 9 ``` -
    +به طور پیش فرض, پایگاه داده است که در تنظیمات سرور ثبت نام به عنوان پایگاه داده به طور پیش فرض استفاده. به طور پیش فرض, این پایگاه داده به نام است ‘default’. متناوبا, شما همیشه می توانید مشخص کردن پایگاه داده با استفاده از یک نقطه قبل از نام جدول. -به صورت پیش فرض، دیتابیس ثبت شده در تنظیمات سرور به عنوان دیتابیس پیش فرض مورد استفاده قرار می گیرد، این دیتابیس ‘default’ نامیده می شود. از سوی دیگر، شما می توانید همیشه نام دیتابیس را با دات و قبل از اسم جدول مشخص کنید. +نام کاربری و رمز عبور را می توان در یکی از سه راه نشان داد: -نام کاربری و پسورد می توانند به یکی از دو روش زیر ست شوند: +1. با استفاده از احراز هویت اولیه. مثال: -1. استفاده از HTTP Basic Authentication. مثال: - -
    + ``` bash -echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- +$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- ``` -
    +1. در ‘user’ و ‘password’ پارامترهای نشانی وب. مثال: -1. با دو پارامتر ‘user’ و ‘password’ در URL. مثال: - -
    + ``` bash -echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- +$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- ``` -
    +1. با استفاده از ‘X-ClickHouse-User’ و ‘X-ClickHouse-Key’ سرصفحهها. مثال: -اگر نام کاربری مشخص نشود، نام کاربری ‘default’ استفاده می شود. اگر پسورد مشخص نشود، پسورد خالی استفاده می شود. شما همچنین می توانید از پارامتر های URL برای مشخص کردن هر تنظیمی برای اجرای یک query استفاده کنید. مثال: http://localhost:8123/?profile=web&max\_rows\_to\_read=1000000000&query=SELECT+1 + -برای اطلاعات بیشتر بخش «تنظیمات» را مشاهده کنید. +``` bash +$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @- +``` -
    +اگر نام کاربر مشخص نشده است `default` نام استفاده شده است. اگر رمز عبور مشخص نشده است, رمز عبور خالی استفاده شده است. +شما همچنین می توانید از پارامترهای نشانی وب برای مشخص کردن هر گونه تنظیمات برای پردازش یک پرس و جو یا کل پروفایل های تنظیمات استفاده کنید. هشدار داده می شودمشخصات=وب و حداکثر\_نظیم = 1000000000 & پرس و جو = انتخاب+1 + +برای کسب اطلاعات بیشتر, دیدن [تنظیمات](../operations/settings/index.md) بخش. ``` bash $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- @@ -245,40 +232,280 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 9 ``` -
    +برای اطلاعات در مورد پارامترهای دیگر, بخش را ببینید “SET”. -برای اطلاعات بیشتر در مورد دیگر پارامترها، بخش «SET» را ببینید. +به طور مشابه, شما می توانید جلسات کلیک در پروتکل قام استفاده. برای انجام این کار, شما نیاز به اضافه کردن `session_id` دریافت پارامتر به درخواست. شما می توانید هر رشته به عنوان شناسه جلسه استفاده کنید. به طور پیش فرض جلسه پس از 60 ثانیه عدم فعالیت خاتمه می یابد. برای تغییر این فاصله, تغییر `default_session_timeout` تنظیم در پیکربندی سرور یا اضافه کردن `session_timeout` دریافت پارامتر به درخواست. برای بررسی وضعیت جلسه از `session_check=1` پارامتر. فقط یک پرس و جو در یک زمان می تواند در یک جلسه اجرا شود. -به طور مشابه، شما می توانید از ClickHouse Session در HTTP استفاده کنید. برای این کار، شما نیاز به ست کردن پارامتر`session_id` برای درخواست را دارید. شما میتوانید از هر رشته ای برای ست کردن Session ID استفاده کنید. به صورت پیش فرض، یک session بعد از 60 ثانیه از اجرای آخرین درخواست نابود می شود. برای تغییر زمان timeout، باید `default_session_timeout` را در تنظیمات سرور تغییر دهید و یا پارامتر `session_timeout` در هنگام درخواست ست کنید. برای بررسی وضعیت status از پارامتر `session_check=1` استفاده کنید. با یک session تنها یک query در لحظه می توان اجرا کرد. +شما می توانید اطلاعات در مورد پیشرفت یک پرس و جو در دریافت `X-ClickHouse-Progress` هدر پاسخ. برای انجام این کار, فعال کردن [نمایش سایت](../operations/settings/settings.md#settings-send_progress_in_http_headers). مثال توالی هدر: -گزینه ای برای دریافت اطلاعات progress اجرای query وجود دارد. این گزینه هدر X-ClickHouse-Progress می باشد. برای این کار تنظیم send\_progress\_in\_http\_headers در کانفیگ فایل فعال کنید. +``` text +X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"} +``` -درخواست در حال اجرا، در صورت لاست شدن کانکشن HTTP به صورت اتوماتیک متوقف نمی شود. پارس کردن و فرمت کردن داده ها در سمت سرور صورت می گیرد، و استفاده از شبکه ممکن است ناکارآمد باشد. پارامتر ‘query\_id’ می تونه به عنوان query id پاس داده شود (هر رشته ای قابل قبول است). برای اطلاعات بیشتر بخش «تنظیمات، replace\_running\_query» را مشاهده کنید. +زمینه های سربرگ احتمالی: -پارامتر ‘quoto\_key’ می تواند به عنوان quoto key پاس داده شود (هر رشته ای قابل قبول است). برای اطلاعات بیشتر بخش «Quotas» را مشاهده کنید. +- `read_rows` — Number of rows read. +- `read_bytes` — Volume of data read in bytes. +- `total_rows_to_read` — Total number of rows to be read. +- `written_rows` — Number of rows written. +- `written_bytes` — Volume of data written in bytes. -HTTP interface اجازه ی پاس دادن داده های external (جداول موقت external) به query را می دهد. برای اطلاعات بیشتر، بخش «داده های External برای پردازش query» را مشاهده کنید. +درخواست های در حال اجرا به طور خودکار متوقف نمی شود اگر اتصال قام از دست داده است. تجزیه و قالب بندی داده ها در سمت سرور انجام, و با استفاده از شبکه ممکن است بی اثر. +اختیاری ‘query\_id’ پارامتر را می توان به عنوان شناسه پرس و جو (هر رشته) منتقل می شود. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “Settings, replace\_running\_query”. -## بافرینگ Response {#bfryng-response} +اختیاری ‘quota\_key’ پارامتر را می توان به عنوان کلید سهمیه (هر رشته) منتقل می شود. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “Quotas”. -شما می توانید در سمت سرور قابلی بافرینگ response را فعال کنید. پارامترهای `buffer_size` و `wait_end_of_query` در URL برای این هدف ارائه شده اند. +رابط اچ تی پی اجازه می دهد تا عبور داده های خارجی (جداول موقت خارجی) برای پرس و جو. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “External data for query processing”. -`buffer_size` تعیین کننده ی اندازه ی نتیجه (بایت) برای بافر شدن در حافظه سرور است. اگر بدنه ی نتیجه بیش بزرگتر از این threshold باشد، بافر بر روی HTTP channel نوشته می شود و باقی مانده ی دیتا به صورت مستقیم به HTTP Channel ارسال می شود. +## بافر پاسخ {#response-buffering} -برای اینکه مطمعن بشید که کل response بافر شده است، `wait_end_of_query=1` را ست کنید. در این مورد، داده های که در حافظه ذخیره نمی شوند، در یک فایل موقت سمت سرور بافر می شوند. +شما می توانید پاسخ بافر در سمت سرور را فعال کنید. این `buffer_size` و `wait_end_of_query` پارامترهای نشانی وب برای این منظور فراهم شده است. + +`buffer_size` تعیین تعداد بایت در نتیجه به بافر در حافظه سرور. اگر یک بدن نتیجه بزرگتر از این حد است, بافر به کانال قام نوشته شده, و داده های باقی مانده به طور مستقیم به کانال قام ارسال. + +برای اطمینان از اینکه کل پاسخ بافر شده است, تنظیم `wait_end_of_query=1`. در این مورد داده هایی که در حافظه ذخیره نمی شوند در یک فایل سرور موقت بافر می شوند. مثال: -
    - ``` bash -curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' +$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' ``` -
    +استفاده از بافر برای جلوگیری از شرایطی که یک خطای پردازش پرس و جو رخ داده است پس از کد پاسخ و هدر قام به مشتری ارسال شد. در این وضعیت یک پیغام خطا نوشته شده است در پایان پاسخ بدن و در سمت سرویس گیرنده خطا تنها می تواند تشخیص داده شده در مرحله تجزیه. -از بافرینگ به منظور اجتناب از شرایطی که یک خطای پردازش query رخ داده بعد از response کد و هدر های ارسال شده به کلاینت استفاده کنید. در این شرایط، پیغام خطا در انتهای بنده response نوشته می شود، و در سمت کلاینت، پیغام خطا فقط از طریق مرحله پارس کردن قابل شناسایی است. +### نمایش داده شد با پارامترهای {#cli-queries-with-parameters} -
    +شما می توانید پرس و جو را با پارامترها ایجاد کنید و مقادیر را از پارامترهای درخواست مربوط به اچ تی پی منتقل کنید. برای کسب اطلاعات بیشتر, دیدن [نمایش داده شد با پارامترهای کلی](cli.md#cli-queries-with-parameters). -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/http_interface/) +### مثال {#example} + +``` bash +$ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" +``` + +## حذف میانبر در صفحه خانه {#predefined_http_interface} + +تاتر پشتیبانی از نمایش داده شد خاص از طریق رابط قام. مثلا, شما می توانید داده ها را به یک جدول به شرح زیر ارسال: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +کلیک هاوس همچنین از رابط اچ تی پی از پیش تعریف شده پشتیبانی می کند که می تواند به شما در ادغام راحت تر با ابزارهای شخص ثالث مانند کمک کند [پرومته صادر کننده](https://github.com/percona-lab/clickhouse_exporter). + +مثال: + +- اول از همه, اضافه کردن این بخش به فایل پیکربندی سرور: + + + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +- شما هم اکنون می توانید لینک را مستقیما برای داده ها در قالب پرومته درخواست کنید: + + + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +همانطور که شما می توانید از مثال ببینید, اگر `` در پیکربندی پیکربندی پیکربندی شده است.xml فایل ClickHouse را مطابقت با درخواست های HTTP به دریافت این نوع از پیش تعریف شده در ``, سپس کلیک خانه پرس و جو مربوطه از پیش تعریف شده اجرا اگر بازی موفق است. + +حالا `` می توانید پیکربندی کنید ``, ``, ``, `` و `` . + +## روی\_خندلر {#root_handler} + +`` بازگرداندن محتوای مشخص شده برای درخواست مسیر ریشه. محتوای بازگشتی خاص توسط پیکربندی شده است `http_server_default_response` در پیکربندی.. اگر مشخص نشده, برگشت **باشه** + +`http_server_default_response` تعریف نشده است و درخواست قام به کلیک ارسال می شود. نتیجه به شرح زیر است: + +``` xml + + + +``` + + $ curl 'http://localhost:8123' + Ok. + +`http_server_default_response` تعریف شده است و درخواست قام ارسال شده است به کلیک. نتیجه به شرح زیر است: + +``` xml +
    ]]>
    + + + + +``` + + $ curl 'http://localhost:8123' +
    % + +## پینگ\_ هندلر {#ping_handler} + +`` می توان برای بررسی سلامت سرور فعلی کلیک استفاده کرد. زمانی که ClickHouse سرور HTTP طبیعی است دسترسی به ClickHouse از طریق `` باز خواهد گشت **باشه**. + +مثال: + +``` xml + + /ping + +``` + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## تکرار\_ستاتوس\_هندلر {#replicas_status_handler} + +`` برای تشخیص وضعیت گره ماکت و بازگشت استفاده می شود **باشه** اگر گره ماکت هیچ تاخیر. در صورتی که برای تاخیر وجود دارد, بازگشت تاخیر خاص. ارزش `` پشتیبانی از سفارشی سازی. اگر مشخص نکنید `` تنظیمات پیشفرض کلیک `` هست **/ بازتولیتوس**. + +مثال: + +``` xml + + /replicas_status + +``` + +هیچ مورد تاخیر: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +مورد تاخیر: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## باز تعریف {#predefined_query_handler} + +شما می توانید پیکربندی کنید ``, ``, `` و `` داخل ``. + +`` برای تطبیق روش بخشی از درخواست قام است. `` به طور کامل مطابق با تعریف [روش](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) در پروتکل قام. این پیکربندی اختیاری است. اگر در فایل پیکربندی تعریف نشده باشد با بخش روش درخواست قام مطابقت ندارد + +`` وظیفه تطبیق بخشی نشانی وب از درخواست قام است. این سازگار با است [RE2](https://github.com/google/re2)عبارات منظم است. این پیکربندی اختیاری است. اگر در فایل پیکربندی تعریف نشده باشد با بخش نشانی وب درخواست قام مطابقت ندارد + +`` برای تطبیق بخش هدر درخواست قام است. این است که سازگار با عبارات منظم را دوباره2 است. این پیکربندی اختیاری است. اگر در فایل پیکربندی تعریف نشده باشد با بخش هدر درخواست قام مطابقت ندارد + +`` مقدار پرس و جو از پیش تعریف شده است ``, است که توسط کلیکهاوس اجرا زمانی که یک درخواست قام همسان است و در نتیجه از پرس و جو بازگشته است. این پیکربندی باید است. + +`` پشتیبانی از تنظیمات و مقادیر قوری\_پرم. + +مثال زیر مقادیر را تعریف می کند `max_threads` و `max_alter_threads` تنظیمات, سپس نمایش داده شد جدول سیستم برای بررسی اینکه این تنظیمات با موفقیت تعیین شد. + +مثال: + +``` xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "یادداشت" + در یک `` یک `` تنها پشتیبانی از یک `` از یک نوع درج. + +## هشدار داده می شود {#dynamic_query_handler} + +`` از `` افزایش `` . + +عصاره کلیک و اجرا ارزش مربوط به `` مقدار در نشانی وب درخواست قام. +تنظیم پیشفرض کلیک `` هست `/query` . این پیکربندی اختیاری است. در صورتی که هیچ تعریف در فایل پیکربندی وجود دارد, پرم در تصویب نشده است. + +برای آزمایش این قابلیت به عنوان مثال تعریف ارزش از max\_threads و max\_alter\_threads و پرس و جو که آیا تنظیمات راه اندازی شد با موفقیت. +تفاوت این است که در ``, پرس و جو در فایل پیکربندی نوشت. اما در ``, پرس و جو در قالب پرام از درخواست قام نوشته شده است. + +مثال: + +``` xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/fa/interfaces/index.md b/docs/fa/interfaces/index.md index 30adb2e5806..900e176f77d 100644 --- a/docs/fa/interfaces/index.md +++ b/docs/fa/interfaces/index.md @@ -1,23 +1,29 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Interfaces +toc_priority: 14 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- -# رابط ها {#interfaces} +# واسط {#interfaces} -ClickHouse دو اینترفیس شبکه را فراهم می کند (هر دو می توانند به صورت اختیاری در TLS برای امنیت اضافی پیچیده شوند): +تاتر فراهم می کند دو رابط شبکه (هر دو می تواند به صورت اختیاری در ال اس برای امنیت بیشتر پیچیده): -- [HTTP](http.md), که مستند شده و به راحتی به طور مستقیم استفاده می شود. -- [بومی TCP](tcp.md), که دارای سربار کمتر است. +- [HTTP](http.md), که مستند شده است و به راحتی استفاده به طور مستقیم. +- [بومی tcp](tcp.md), که دارای سربار کمتر. -اگرچه در بیشتر موارد توصیه می شود از ابزار یا کتابخانه مناسب استفاده کنید تا به طور مستقیم با آن ها ارتباط برقرار نکنید. به طور رسمی توسط یانداکس پشتیبانی می شوند عبارتند از: -\* [خط فرمان خط](cli.md) -\* [راننده JDBC](jdbc.md) -\* [راننده ODBC](odbc.md) -\* [C ++ کتابخانه مشتری](cpp.md) +در اغلب موارد توصیه می شود به استفاده از ابزار مناسب و یا کتابخانه به جای تعامل با کسانی که به طور مستقیم. به طور رسمی توسط یاندکس پشتیبانی به شرح زیر است: -همچنین برای کار با ClickHouse طیف گسترده ای از کتابخانه های شخص ثالث وجود دارد: -\* [کتابخانه های مشتری](third-party/client_libraries.md) -\* [ادغام](third-party/integrations.md) -\* [رابط های بصری](third-party/gui.md) +- [مشتری خط فرمان](cli.md) +- [JDBC driver](jdbc.md) +- [ODBC driver](odbc.md) +- [ج++ کتابخانه مشتری](cpp.md) -
    +همچنین طیف گسترده ای از کتابخانه های شخص ثالث برای کار با کلیک وجود دارد: -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/) +- [کتابخانه های مشتری](third-party/client_libraries.md) +- [یکپارچگی](third-party/integrations.md) +- [رابط های بصری](third-party/gui.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/) diff --git a/docs/fa/interfaces/jdbc.md b/docs/fa/interfaces/jdbc.md index 417931a16b7..719e7a1630b 100644 --- a/docs/fa/interfaces/jdbc.md +++ b/docs/fa/interfaces/jdbc.md @@ -1,13 +1,15 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 22 +toc_title: JDBC Driver +--- -# درایور JDBC {#drywr-jdbc} +# JDBC Driver {#jdbc-driver} -درایور رسمی JDBC برای ClickHouse وجود دارد. برای اطلاعات بیشتر [اینجا](https://github.com/ClickHouse/clickhouse-jdbc) را ببینید. +- **[راننده رسمی](https://github.com/ClickHouse/clickhouse-jdbc)** +- رانندگان شخص ثالث: + - [جستجو](https://github.com/housepower/ClickHouse-Native-JDBC) + - [کلیک کنیدهوش4ج](https://github.com/blynkkk/clickhouse4j) -درایور JDBC توسط سازمان های دیگر اجرا می شوند. - -- [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC) - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/jdbc/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/jdbc/) diff --git a/docs/fa/interfaces/mysql.md b/docs/fa/interfaces/mysql.md index 668c0b7b9c3..1d9f3669acd 100644 --- a/docs/fa/interfaces/mysql.md +++ b/docs/fa/interfaces/mysql.md @@ -1,22 +1,25 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 20 +toc_title: "\u0631\u0627\u0628\u0637 MySQL" --- -# MySQL interface {#mysql-interface} +# رابط خروجی زیر {#mysql-interface} -ClickHouse supports MySQL wire protocol. It can be enabled by [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) setting in configuration file: +کلیک پروتکل سیم خروجی زیر را پشتیبانی می کند. این را می توان با فعال [\_وارد کردن](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) تنظیم در پرونده پیکربندی: ``` xml 9004 ``` -Example of connecting using command-line tool `mysql`: +مثال اتصال با استفاده از ابزار خط فرمان `mysql`: ``` bash $ mysql --protocol tcp -u default -P 9004 ``` -Output if a connection succeeded: +خروجی اگر اتصال موفق شد: ``` text Welcome to the MySQL monitor. Commands end with ; or \g. @@ -34,13 +37,13 @@ Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. mysql> ``` -For compatibility with all MySQL clients, it is recommended to specify user password with [double SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) in configuration file. -If user password is specified using [SHA256](../operations/settings/settings_users.md#password_sha256_hex), some clients won’t be able to authenticate (mysqljs and old versions of command-line tool mysql). +برای سازگاری با تمام مشتریان خروجی زیر, توصیه می شود برای مشخص کردن رمز عبور کاربر با [دو شی1](../operations/settings/settings_users.md#password_double_sha1_hex) در فایل پیکربندی. +اگر رمز عبور کاربر مشخص شده است با استفاده از [SHA256](../operations/settings/settings_users.md#password_sha256_hex) برخی از مشتریان قادر نخواهد بود به تصدیق (رمز و نسخه های قدیمی خط فرمان ابزار خروجی زیر). -Restrictions: +محدودیت ها: -- prepared queries are not supported +- نمایش داده شد تهیه پشتیبانی نمی شوند -- some data types are sent as strings +- برخی از انواع داده ها به عنوان رشته ارسال می شود -[Original article](https://clickhouse.tech/docs/en/interfaces/mysql/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/fa/interfaces/odbc.md b/docs/fa/interfaces/odbc.md index 03d8d8dbd9c..ce50f06965a 100644 --- a/docs/fa/interfaces/odbc.md +++ b/docs/fa/interfaces/odbc.md @@ -1,9 +1,12 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 23 +toc_title: ODBC Driver +--- -# ODBC درایور {#odbc-drywr} +# ODBC Driver {#odbc-driver} -درایور رسمی ODBC برای ClickHouse وجود دارد. برای اطلاعات بیشتر [اینجا](https://github.com/ClickHouse/clickhouse-odbc) را ببینید. +- [راننده رسمی](https://github.com/ClickHouse/clickhouse-odbc). -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/odbc/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/odbc/) diff --git a/docs/fa/interfaces/tcp.md b/docs/fa/interfaces/tcp.md index efd041886f9..294cbe6ba8b 100644 --- a/docs/fa/interfaces/tcp.md +++ b/docs/fa/interfaces/tcp.md @@ -1,9 +1,13 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 18 +toc_title: "\u0631\u0627\u0628\u0637 \u06A9\u0627\u0631\u0628\u0631\u06CC \u0628\u0648\ + \u0645\u06CC)" +--- -# رابط بومی (TCP) {#rbt-bwmy-tcp} +# رابط کاربری بومی) {#native-interface-tcp} -پروتکل بومی در \[خط فرمان خط\] (cli.md)، برای برقراری ارتباط بین سرور در طی پردازش پرس و جو توزیع شده، و همچنین در سایر برنامه های C ++ استفاده می شود. متاسفانه، پروتکل ClickHouse بومی هنوز مشخصات رسمی ندارد، اما می توان آن را از کد منبع ClickHouse (شروع [از اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) و / یا با رهگیری و تجزیه و تحلیل ترافیک TCP. +پروتکل بومی در [مشتری خط فرمان](cli.md), برای ارتباط بین سرور در طول پردازش پرس و جو توزیع, و همچنین در دیگر ج++ برنامه. متاسفانه بومی ClickHouse پروتکل ندارد رسمی مشخصات رتبهدهی نشده است, اما می توان آن را مهندسی معکوس از ClickHouse کد منبع (شروع [اینجا](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) و / یا با متوقف کردن و تجزیه و تحلیل ترافیک تی پی. -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/tcp/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/fa/interfaces/third-party/client_libraries.md b/docs/fa/interfaces/third-party/client_libraries.md index a2c17d053fb..9d61ab4bd77 100644 --- a/docs/fa/interfaces/third-party/client_libraries.md +++ b/docs/fa/interfaces/third-party/client_libraries.md @@ -1,55 +1,59 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 26 +toc_title: "\u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0647\u0627\u06CC \u0645\ + \u0634\u062A\u0631\u06CC" +--- -# کتابخانه های مشتری شخص ثالث {#khtbkhnh-hy-mshtry-shkhs-thlth} +# کتابخانه های مشتری از توسعه دهندگان شخص ثالث {#client-libraries-from-third-party-developers} -!!! warning "سلب مسئولیت" - Yandex نه حفظ کتابخانه ها در زیر ذکر شده و نشده انجام هر آزمایش های گسترده ای برای اطمینان از کیفیت آنها. +!!! warning "تکذیبنامهها" + یاندکس می کند **نه** حفظ کتابخانه های ذکر شده در زیر و هر تست گسترده برای اطمینان از کیفیت خود را انجام نداده اند. -- Python - - [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm) - - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) - - [clickhouse-client](https://github.com/yurial/clickhouse-client) +- پایتون + - [اطالعات.کلیک \_شورم](https://github.com/Infinidat/infi.clickhouse_orm) + - [کلیک راننده](https://github.com/mymarilyn/clickhouse-driver) + - [کلیک مشتری](https://github.com/yurial/clickhouse-client) + - [اطلاعات دقیق](https://github.com/maximdanilchenko/aiochclient) - PHP - - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) - - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) - - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [SeasClick C++ client](https://github.com/SeasX/SeasClick) -- Go - - [clickhouse](https://github.com/kshvakov/clickhouse/) - - [go-clickhouse](https://github.com/roistat/go-clickhouse) - - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) - - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) -- NodeJs - - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - - [node-clickhouse](https://github.com/apla/node-clickhouse) -- Perl + - [اسمی2 / تلفن هوشمند](https://packagist.org/packages/smi2/phpClickHouse) + - [8bitov/clickhouse-php-مشتری](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [برزرکینس / ناحیه کاربری](https://packagist.org/packages/bozerkins/clickhouse-client) + - [ساده / کلیک-کارفرما](https://packagist.org/packages/simpod/clickhouse-client) + - [سوا کد / پی اچ پی-کلیک-خانه مشتری](https://packagist.org/packages/seva-code/php-click-house-client) + - [دریازده ج++ مشتری](https://github.com/SeasX/SeasClick) +- برو + - [فاحشه خانه](https://github.com/kshvakov/clickhouse/) + - [برو کلیک هاوس](https://github.com/roistat/go-clickhouse) + - [میلروگو-کلیک هاوس](https://github.com/mailru/go-clickhouse) + - [گلنگ-خانه کلیک](https://github.com/leprosus/golang-clickhouse) +- نوکس + - [فاحشه خانه)](https://github.com/TimonKK/clickhouse) + - [گره کلیک هاوس](https://github.com/apla/node-clickhouse) +- پرل - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) -- Ruby - - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [صفحه اصلی](https://metacpan.org/release/HTTP-ClickHouse) + - [هرفنت-کلیکهاوس](https://metacpan.org/release/AnyEvent-ClickHouse) +- روبی + - [تاتر (روبی)](https://github.com/shlima/click_house) - R - - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) -- Java - - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) -- Scala - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) -- Kotlin + - [کلیک تحقیق](https://github.com/hannesmuehleisen/clickhouse-r) + - [خانه روستایی](https://github.com/IMSMWU/RClickhouse) +- جاوا + - [فاحشه خانه-مشتری-جاوا](https://github.com/VirtusAI/clickhouse-client-java) + - [کلیک مشتری](https://github.com/Ecwid/clickhouse-client) +- اسکالا + - [تاتر-اسکالا-کارفرما](https://github.com/crobox/clickhouse-scala-client) +- کوتلین - [AORM](https://github.com/TanVD/AORM) - C\# - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [فاحشه خانه.ادو](https://github.com/killwort/ClickHouse-Net) + - [فاحشه خانه.کارگیر](https://github.com/DarkWanderer/ClickHouse.Client) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) -- Elixir - - [clickhousex](https://github.com/appodeal/clickhousex/) -- Nim - - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) +- اکسیر + - [کلیک](https://github.com/appodeal/clickhousex/) +- نیم + - [نیم کلیک خانه](https://github.com/leonardoce/nim-clickhouse) -ما این کتابخانه ها را تست نکردیم. آنها به صورت تصادفی انتخاب شده اند. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/third-party/client_libraries/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/fa/interfaces/third-party/gui.md b/docs/fa/interfaces/third-party/gui.md index 38e2422a7f9..24d7a09e92f 100644 --- a/docs/fa/interfaces/third-party/gui.md +++ b/docs/fa/interfaces/third-party/gui.md @@ -1,92 +1,152 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 28 +toc_title: "\u0631\u0627\u0628\u0637 \u0647\u0627\u06CC \u0628\u0635\u0631\u06CC" +--- -# interface های visual توسعه دهندگان third-party {#interface-hy-visual-tws-h-dhndgn-third-party} +# رابط های بصری از توسعه دهندگان شخص ثالث {#visual-interfaces-from-third-party-developers} -## متن باز {#mtn-bz} +## متن باز {#open-source} -### Tabix {#tabix} +### ترکیب {#tabix} -interface تحت وب برای ClickHouse در پروژه [Tabix](https://github.com/tabixio/tabix). +رابط وب برای کلیک در [ترکیب](https://github.com/tabixio/tabix) پروژه. ویژگی ها: -- کار با ClickHouse به صورت مستقیم و از طریق مرورگر، بدون نیاز به نرم افزار اضافی. -- ادیتور query به همراه syntax highlighting. -- ویژگی Auto-completion برای دستورات. -- ابزارهایی برای آنالیز گرافیکی اجرای query. -- گزینه های Color scheme. +- با کلیک هاوس به طور مستقیم از مرورگر کار می کند, بدون نیاز به نصب نرم افزار اضافی. +- ویرایشگر پرس و جو با نحو برجسته. +- تکمیل خودکار دستورات. +- ابزار برای تجزیه و تحلیل گرافیکی از اجرای پرس و جو. +- گزینه های طرح رنگ. -[مستندات Tabix](https://tabix.io/doc/). +[مستندات زبانه](https://tabix.io/doc/). -### HouseOps {#houseops} +### خانه {#houseops} -[HouseOps](https://github.com/HouseOps/HouseOps) نرم افزار Desktop برای سیستم عامل های Linux و OSX و Windows می باشد.. +[خانه](https://github.com/HouseOps/HouseOps) برای اطلاعات بیشتر کلیک کنید ویژگی ها: -- ابزار Query builder به همراه syntax highlighting. نمایش نتایج به صورت جدول و JSON Object. -- خروجی نتایج به صورت csv و JSON Object. -- Pنمایش Processes List ها به همراه توضیحات، ویژگی حالت record و kill کردن process ها. -- نمودار دیتابیس به همراه تمام جداول و ستون ها به همراه اطلاعات اضافی. -- نماش آسان سایز ستون ها. -- تنظیمات سرور. -- مدیریت دیتابیس (بزودی); -- مدیریت کاربران (بزودی); -- آنالیز داده ها به صورت Real-Time (بزودی); -- مانیتورینگ کلاستر/زیرساخت (بزودی); -- مدیریت کلاستر (بزودی); -- مانیتورینگ کافکا و جداول replicate (بزودی); -- و بسیاری از ویژگی های دیگر برای شما. +- سازنده پرس و جو با نحو برجسته. مشاهده پاسخ در یک جدول و یا نمایش جسون. +- صادرات نتایج پرس و جو به عنوان فیلم و یا جانسون. +- فهرست فرایندها با توصیف. نوشتن حالت. توانایی متوقف کردن (`KILL`) یک فرایند . +- نمودار پایگاه داده. نشان می دهد تمام جداول و ستون خود را با اطلاعات اضافی. +- نمایش سریع از اندازه ستون. +- پیکربندی سرور. -### LightHouse {#lighthouse} +ویژگی های زیر برای توسعه برنامه ریزی شده است: -[LightHouse](https://github.com/VKCOM/lighthouse) رابط کاربری سبک وزن برای ClickHouse است. +- مدیریت پایگاه داده. +- مدیریت کاربر. +- تجزیه و تحلیل داده های زمان واقعی. +- نظارت خوشه. +- مدیریت خوشه. +- نظارت بر تکرار و کافکا جداول. -امکانات: +### فانوس دریایی {#lighthouse} -- لیست جدول با فیلتر کردن و ابرداده. +[فانوس دریایی](https://github.com/VKCOM/lighthouse) یک رابط وب بسیار سبک وزن و برای کلیک است. + +ویژگی ها: + +- فهرست جدول با فیلتر کردن و ابرداده. - پیش نمایش جدول با فیلتر کردن و مرتب سازی. -- اعداد نمایش داده شده فقط خواندنی +- فقط خواندنی نمایش داده شد اعدام. + +### قرمز {#redash} + +[قرمز](https://github.com/getredash/redash) یک پلت فرم برای تجسم داده ها است. + +پشتیبانی از منابع چندگانه داده ها از جمله clickhouse, redash می توانید پیوستن به نتایج حاصل از پرس و جو از داده های مختلف منابع را به یکی از نهایی مجموعه. + +ویژگی ها: + +- ویرایشگر قدرتمند نمایش داده شد. +- پایگاه اکسپلورر. +- ابزار تجسم, که اجازه می دهد شما را به نمایندگی از داده ها در اشکال مختلف. ### DBeaver {#dbeaver} -[DBeaver](https://dbeaver.io/) - مشتری دسکتاپ دسکتاپ دسکتاپ با پشتیبانی ClickHouse. +[DBeaver](https://dbeaver.io/) - مشتری جهانی پایگاه داده دسکتاپ با پشتیبانی از کلیک. -امکانات: +ویژگی ها: -- توسعه پرس و جو با برجسته نحو -- پیش نمایش جدول -- تکمیل خودکار +- توسعه پرس و جو با نحو برجسته و تکمیل خودکار. +- فهرست جدول با فیلتر و ابرداده جستجو. +- جدول پیش نمایش داده ها. +- جستجوی کامل متن. -### clickhouse-cli {#clickhouse-cli} +### کلیک کل {#clickhouse-cli} -[clickhouse-cli](https://github.com/hatarist/clickhouse-cli) یک مشتری خط فرمان برای ClickHouse است که در پایتون 3 نوشته شده است. +[کلیک کل](https://github.com/hatarist/clickhouse-cli) یک مشتری خط فرمان جایگزین برای فاحشه خانه است که در پایتون 3 نوشته شده است. -امکانات: -- تکمیل خودکار -- نحو برجسته برای نمایش داده ها و خروجی داده ها. -- پشتیبانی از Pager برای خروجی داده. -- دستورات پست سفارشی مانند PostgreSQL. +ویژگی ها: -### clickhouse-flamegraph {#clickhouse-flamegraph} +- تکمیل خودکار. +- نحو برجسته برای نمایش داده شد و خروجی داده ها. +- پشتیبانی پیجر برای خروجی داده ها. +- دستورات مانند: -[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) یک ابزار تخصصی برای تجسم است `system.trace_log`مانند[flamegraph](http://www.brendangregg.com/flamegraphs.html). +### کلیک-شق {#clickhouse-flamegraph} -## تجاری {#tjry} +[کلیک-شق](https://github.com/Slach/clickhouse-flamegraph) یک ابزار تخصصی برای تجسم است `system.trace_log` به عنوان [شق](http://www.brendangregg.com/flamegraphs.html). -### DataGrip {#datagrip} +## بازرگانی {#commercial} -[DataGrip](https://www.jetbrains.com/datagrip/) IDE پایگاه داده از JetBrains با پشتیبانی اختصاصی برای ClickHouse است. این ابزار همچنین به سایر ابزارهای مبتنی بر IntelliJ تعبیه شده است: PyCharm، IntelliJ IDEA، GoLand، PhpStorm و دیگران. +### نوار داده {#datagrip} -امکانات: +[نوار داده](https://www.jetbrains.com/datagrip/) یک پایگاه داده محیط برنامه نویسی از جت با پشتیبانی اختصاصی برای کلیک. آن را نیز تعبیه شده در دیگر IntelliJ مبتنی بر ابزار: PyCharm, IntelliJ IDEA, GoLand, PhpStorm و دیگران. -- تکمیل کد بسیار سریع -- نحو برجسته ClickHouse. -- پشتیبانی از ویژگی های خاص برای ClickHouse، برای مثال ستون های توپی، موتورهای جدول. +ویژگی ها: + +- تکمیل کد بسیار سریع است. +- برجسته نحو تاتر. +- پشتیبانی از ویژگی های خاص به تاتر, مثلا, ستون تو در تو, موتورهای جدول. - ویرایشگر داده. - Refactorings. -- جستجو و ناوبری +- جستجو و ناوبری. -
    +### داده های یاندکس {#yandex-datalens} -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/third-party/gui/) +[داده های یاندکس](https://cloud.yandex.ru/services/datalens) یک سرویس تجسم داده ها و تجزیه و تحلیل. + +ویژگی ها: + +- طیف گسترده ای از تصویری در دسترس, از نمودار نوار ساده به داشبورد پیچیده. +- داشبورد می تواند در دسترس عموم ساخته شده است. +- پشتیبانی از منابع داده های متعدد از جمله خانه عروسکی. +- ذخیره سازی برای داده های محقق بر اساس کلیک. + +اطلاعات دقیق [به صورت رایگان در دسترس است](https://cloud.yandex.com/docs/datalens/pricing) برای پروژه های کم بار, حتی برای استفاده تجاری. + +- [اسناد داده](https://cloud.yandex.com/docs/datalens/). +- [اموزش](https://cloud.yandex.com/docs/solutions/datalens/data-from-ch-visualization) در تجسم داده ها از یک پایگاه داده کلیک. + +### Holistics نرم افزار {#holistics-software} + +[Holistics](https://www.holistics.io/) یک پلت فرم داده کامل پشته و ابزار هوش کسب و کار است. + +ویژگی ها: + +- خودکار ایمیل, شل و ورق گوگل برنامه از گزارش. +- ویرایشگر گذاشتن با تصویری, کنترل نسخه, تکمیل خودکار, اجزای پرس و جو قابل استفاده مجدد و فیلتر پویا. +- تجزیه و تحلیل ترافیک جاسازی شده از گزارش ها و داشبورد از طریق ای فریم. +- تهیه داده ها و قابلیت های ال. +- گذاشتن پشتیبانی مدل سازی داده ها برای نقشه برداری رابطه ای از داده ها. + +### تماشاچی {#looker} + +[تماشاچی](https://looker.com) یک پلت فرم داده ها و ابزار هوش کسب و کار با پشتیبانی از 50 گویش پایگاه داده از جمله کلیک است. تماشاچی به عنوان یک پلت فرم تولید و خود میزبانی در دسترس است. کاربران می توانند تماشاچی از طریق مرورگر برای کشف داده ها استفاده کنید, ساخت تصویری و داشبورد, گزارش برنامه, و به اشتراک گذاری بینش خود را با همکاران. تماشاچی فراهم می کند مجموعه ای غنی از ابزار برای جاسازی این ویژگی در برنامه های کاربردی دیگر و رابط برنامه کاربردی +به ادغام داده ها با برنامه های کاربردی دیگر. + +ویژگی ها: + +- توسعه ساده و چابک با استفاده از نگاه میلی لیتر, یک زبان است که پشتیبانی از سرپرستی + [مدل سازی داده ها](https://looker.com/platform/data-modeling) برای حمایت از نویسندگان گزارش و کاربران نهایی. +- ادغام گردش کار قدرتمند از طریق تماشاچی [کنشهای داده](https://looker.com/platform/actions). + +[چگونه برای پیکربندی تاتر در تماشاچی.](https://docs.looker.com/setup-and-management/database-config/clickhouse) + +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/fa/interfaces/third-party/index.md b/docs/fa/interfaces/third-party/index.md new file mode 100644 index 00000000000..5b94c79ad35 --- /dev/null +++ b/docs/fa/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Third-Party +toc_priority: 24 +--- + + diff --git a/docs/fa/interfaces/third-party/integrations.md b/docs/fa/interfaces/third-party/integrations.md index ce5938b3fa7..ddbdffad169 100644 --- a/docs/fa/interfaces/third-party/integrations.md +++ b/docs/fa/interfaces/third-party/integrations.md @@ -1,91 +1,96 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 27 +toc_title: "\u06CC\u06A9\u067E\u0627\u0631\u0686\u06AF\u06CC" +--- -# کتابخانه ادغام ثالث {#khtbkhnh-dgm-thlth} +# کتابخانه ادغام از توسعه دهندگان شخص ثالث {#integration-libraries-from-third-party-developers} -!!! warning "سلب مسئولیت" - Yandex نه حفظ کتابخانه ها در زیر ذکر شده و نشده انجام هر آزمایش های گسترده ای برای اطمینان از کیفیت آنها. +!!! warning "تکذیبنامهها" + یاندکس می کند **نه** حفظ ابزار و کتابخانه های ذکر شده در زیر و هر تست گسترده برای اطمینان از کیفیت خود را انجام نداده اند. -## محصولات زیربنایی {#mhswlt-zyrbnyy} +## محصولات زیربنایی {#infrastructure-products} - سیستم های مدیریت پایگاه داده رابطه ای - [MySQL](https://www.mysql.com) - - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) + - [در حال بارگذاری](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) + - [تاتر-خروجی زیر-داده خوان](https://github.com/Altinity/clickhouse-mysql-data-reader) + - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) - [PostgreSQL](https://www.postgresql.org) - - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - - [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (استفاده می کند [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pg2ch](https://github.com/mkabilov/pg2ch) + - [\_پاک کردن تصویر](https://github.com/Percona-Lab/clickhousedb_fdw) + - [اطالعات.\_پاک کردن](https://github.com/Infinidat/infi.clickhouse_fdw) (استفاده [اطالعات.کلیک \_شورم](https://github.com/Infinidat/infi.clickhouse_orm)) + - [پی جی 2چ](https://github.com/mkabilov/pg2ch) + - [\_پاک کردن](https://github.com/adjust/clickhouse_fdw) - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - - [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) + - [کلیک کنیدهاجر](https://github.com/zlzforever/ClickHouseMigrator) - صف پیام - - [Kafka](https://kafka.apache.org) - - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (استفاده می کند [Go client](https://github.com/kshvakov/clickhouse/)) -- فروشگاه شی + - [کافکا](https://kafka.apache.org) + - [در حال بارگذاری](https://github.com/housepower/clickhouse_sinker) (استفاده [برو کارگیر](https://github.com/kshvakov/clickhouse/)) +- ذخیره سازی شی - [S3](https://en.wikipedia.org/wiki/Amazon_S3) - - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) + - [کلیک-پشتیبان گیری](https://github.com/AlexAkulov/clickhouse-backup) - ارکستراسیون کانتینر - - [Kubernetes](https://kubernetes.io) - - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) -- مدیریت تنظیمات - - [puppet](https://puppet.com) - - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) - - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) -- نظارت بر - - [Graphite](https://graphiteapp.org) - - [graphouse](https://github.com/yandex/graphouse) - - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../operations/table_engines/graphitemergetree.md#rollup-configuration) could be applied - - [Grafana](https://grafana.com/) - - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - - [Prometheus](https://prometheus.io/) - - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) - - [PromHouse](https://github.com/Percona-Lab/PromHouse) - - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/)) + - [کوبرنتس](https://kubernetes.io) + - [کلیک اپراتور](https://github.com/Altinity/clickhouse-operator) +- مدیریت پیکربندی + - [عروسک](https://puppet.com) + - [نام و نام خانوادگی / خانه کلیک](https://forge.puppet.com/innogames/clickhouse) + - [مفدوتوف / خانه کلیک](https://forge.puppet.com/mfedotov/clickhouse) +- نظارت + - [گرافیت](https://graphiteapp.org) + - [نمودار](https://github.com/yandex/graphouse) + - [کربن کلیک](https://github.com/lomik/carbon-clickhouse) + + - [گرافیت-تاتر](https://github.com/lomik/graphite-clickhouse) + - [گرافیت-چ بهینه ساز](https://github.com/innogames/graphite-ch-optimizer) - بهینه سازی پارتیشن های ریخته شده در [اطلاعات دقیق](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) اگر قوانین از [پیکربندی رولپ](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) می تواند اعمال شود + - [گرافانا](https://grafana.com/) + - [فاحشه خانه-گرافانا](https://github.com/Vertamedia/clickhouse-grafana) + - [پرومتیوس](https://prometheus.io/) + - [کلیک \_گزارشسپر](https://github.com/f1yegor/clickhouse_exporter) + - [مجلس رقص رسمی دبیرستان](https://github.com/Percona-Lab/PromHouse) + - [کلیک \_گزارشسپر](https://github.com/hot-wifi/clickhouse_exporter) (استفاده [برو کارگیر](https://github.com/kshvakov/clickhouse/)) - [Nagios](https://www.nagios.org/) - - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) - - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - - [Zabbix](https://www.zabbix.com) - - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template) - - [Sematext](https://sematext.com/) - - [clickhouse ادغام](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) -- ثبت نام + - [\_تخچه نشانزنی](https://github.com/exogroup/check_clickhouse/) + - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) + - [زاببیکس](https://www.zabbix.com) + - [هوکاوس-زاببیکس-قالب](https://github.com/Altinity/clickhouse-zabbix-template) + - [کلیپ برد چند منظوره](https://sematext.com/) + - [ادغام کلیک](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) +- ثبت - [rsyslog](https://www.rsyslog.com/) - - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - - [fluentd](https://www.fluentd.org) - - [loghouse](https://github.com/flant/loghouse) (برای [Kubernetes](https://kubernetes.io)) - - [logagent](https://www.sematext.com/logagent) - - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) -- جغرافیایی - - [MaxMind](https://dev.maxmind.com/geoip/) - - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + - [املتخانه](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [روان کننده](https://www.fluentd.org) + - [خانه ثبت](https://github.com/flant/loghouse) (برای [کوبرنتس](https://kubernetes.io)) + - [ثبت](https://www.sematext.com/logagent) + - [خروجی ورود-پلاگین-خانه کلیک](https://sematext.com/docs/logagent/output-plugin-clickhouse/) +- نق + - [نیروی برتر](https://dev.maxmind.com/geoip/) + - [خانه هوشمند](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) -## اکوسیستم زبان برنامه نویسی {#khwsystm-zbn-brnmh-nwysy} +## اکوسیستم های زبان برنامه نویسی {#programming-language-ecosystems} -- Python +- پایتون - [SQLAlchemy](https://www.sqlalchemy.org) - - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (استفاده می کند [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pandas](https://pandas.pydata.org) - - [pandahouse](https://github.com/kszucs/pandahouse) + - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (استفاده [اطالعات.کلیک \_شورم](https://github.com/Infinidat/infi.clickhouse_orm)) + - [پانداها](https://pandas.pydata.org) + - [پانداهاوس](https://github.com/kszucs/pandahouse) - R - - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (استفاده می کند [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) -- Java - - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (استفاده می کند [JDBC](../../query_language/table_functions/jdbc.md)) -- Scala - - [Akka](https://akka.io) - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) + - [هواپیمای دوباله](https://db.rstudio.com/dplyr/) + - [خانه روستایی](https://github.com/IMSMWU/RClickhouse) (استفاده [صفحه اصلی](https://github.com/artpaul/clickhouse-cpp)) +- جاوا + - [هادوپ](http://hadoop.apache.org) + - [سرویس پرداخت درونبرنامهای پلی](https://github.com/jaykelin/clickhouse-hdfs-loader) (استفاده [JDBC](../../sql_reference/table_functions/jdbc.md)) +- اسکالا + - [اککا](https://akka.io) + - [تاتر-اسکالا-کارفرما](https://github.com/crobox/clickhouse-scala-client) - C\# - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) - - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) -- Elixir + - [فاحشه خانه.ادو](https://github.com/killwort/ClickHouse-Net) + - [فاحشه خانه.کارگیر](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) + - [مهاجرت.](https://github.com/ilyabreev/ClickHouse.Net.Migrations) +- اکسیر - [Ecto](https://github.com/elixir-ecto/ecto) - - [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto) + - [حذف جستجو](https://github.com/appodeal/clickhouse_ecto) -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/third-party/integrations/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/fa/interfaces/third-party/proxy.md b/docs/fa/interfaces/third-party/proxy.md index 5ecc5caf751..d9f5e9e1af7 100644 --- a/docs/fa/interfaces/third-party/proxy.md +++ b/docs/fa/interfaces/third-party/proxy.md @@ -1,41 +1,46 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 29 +toc_title: "\u067E\u0631\u0627\u06A9\u0633\u06CC \u0647\u0627" +--- -# سرورهای پروکسی از توسعه دهندگان شخص ثالث {#srwrhy-prwkhsy-z-tws-h-dhndgn-shkhs-thlth} +# سرور های پروکسی از توسعه دهندگان شخص ثالث {#proxy-servers-from-third-party-developers} -[chproxy](https://github.com/Vertamedia/chproxy)، یک پراکسی HTTP و تعادل بار برای پایگاه داده ClickHouse است. +## لوسیون صورت {#chproxy} -امکانات +[لوسیون صورت](https://github.com/Vertamedia/chproxy), یک پروکسی قام است و متعادل کننده بار برای پایگاه داده کلیک. -- مسیریابی و پاسخ دهی کاربر به کاربر. -- محدودیت انعطاف پذیر -- تمدید SSL cerificate به صورت خودکار. +ویژگی ها: -اجرا شده در برو +- هر کاربر مسیریابی و ذخیره پاسخ. +- محدودیت های انعطاف پذیر. +- تمدید گواهی اس اس ال به صورت خودکار. -## KittenHouse {#kittenhouse} +اجرا در بروید. -[KittenHouse](https://github.com/VKCOM/kittenhouse) طراحی شده است که یک پروکسی محلی بین ClickHouse و سرور برنامه باشد در صورتی که غیر ممکن است یا ناخوشایند بافر کردن اطلاعات INSERT در قسمت درخواست شما. +## خانه کوچک {#kittenhouse} -امکانات: +[خانه کوچک](https://github.com/VKCOM/kittenhouse) طراحی شده است که یک پروکسی محلی بین کلاینت و سرور نرم افزار در صورتی که غیر ممکن است و یا ناخوشایند به بافر قرار دادن داده ها در سمت نرم افزار خود را. -- بافر حافظه در حافظه و درایو. -- مسیریابی در جدول -- تعادل بار و بررسی سلامت. +ویژگی ها: -اجرا شده در برو +- در حافظه و بر روی دیسک بافر داده. +- در هر جدول مسیریابی. +- متعادل کننده بار و چک کردن سلامت. -## ClickHouse-Bulk {#clickhouse-bulk} +اجرا در بروید. -[ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) یک ClickHouse جمع کننده ساده است. +## کلیک-فله {#clickhouse-bulk} -امکانات: +[کلیک-فله](https://github.com/nikepan/clickhouse-bulk) یک جمع درج کلیک ساده است. -- درخواست گروهی و ارسال توسط آستانه یا فاصله. -- چند سرور از راه دور -- احراز هویت پایه +ویژگی ها: -اجرا شده در برو +- درخواست گروه و ارسال شده توسط حد و یا فاصله. +- چندین سرور از راه دور. +- احراز هویت عمومی. -
    +اجرا در بروید. -[مقاله اصلی](https://clickhouse.tech/docs/fa/interfaces/third-party/proxy/) +[مقاله اصلی](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/fa/introduction/adopters.md b/docs/fa/introduction/adopters.md index ef841b2fa05..e10b3a269f3 100644 --- a/docs/fa/introduction/adopters.md +++ b/docs/fa/introduction/adopters.md @@ -1,79 +1,82 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 8 +toc_title: "\u067E\u0630\u06CC\u0631\u0627" --- -# ClickHouse Adopters {#clickhouse-adopters} +# پذیرندگان خانه کلیک {#clickhouse-adopters} -!!! warning "Disclaimer" - The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We’d appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won’t have any NDA issues by doing so. Providing updates with publications from other companies is also useful. +!!! warning "تکذیبنامهها" + لیست زیر از شرکت ها با استفاده از کلیک و داستان های موفقیت خود را از منابع عمومی مونتاژ, در نتیجه ممکن است از واقعیت فعلی متفاوت. اگر داستان اتخاذ خانه کلیک در شرکت خود را به اشتراک بگذارید قدردانی می کنیم [افزودن به فهرست](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md) اما لطفا اطمینان حاصل کنید که با انجام این کار هیچ مشکلی ندارید. فراهم کردن به روز رسانی با انتشارات از شرکت های دیگر نیز مفید است. -| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size\* | Reference | -|-----------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | -| [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | -| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| [ArenaData](https://arenadata.tech/) | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| [Badoo](https://badoo.com) | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | -| [Benocs](https://www.benocs.com/) | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | -| [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| `Dataliance/UltraPower` | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | -| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Citymobil](https://city-mobil.ru) | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| [ContentSquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| [Corunet](https://coru.net/) | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| [CraiditX 氪信](https://creditx.com) | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| [Criteo/Storetail](https://www.criteo.com/) | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| [Deutsche Bank](https://db.com) | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| [Diva-e](https://www.diva-e.com) | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| [Exness](https://www.exness.com) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Geniee](https://geniee.co.jp) | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| [HUYA](https://www.huya.com/) | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Idealista](https://www.idealista.com) | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Infovista](https://www.infovista.com/) | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| [InnoGames](https://www.innogames.com) | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| [Integros](https://integros.com) | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Kodiak Data](https://www.kodiakdata.com/) | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| [Kontur](https://kontur.ru) | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [LifeStreet](https://lifestreet.com/) | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | -| [Mail.ru Cloud Solutions](https://mcs.mail.ru/) | Cloud services | Main product | — | — | [Running ClickHouse Instance, in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | -| [MessageBird](https://www.messagebird.com) | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [MGID](https://www.mgid.com/) | Ad network | Web-analytics | — | — | [Our experience in implementing analytical DBMS ClickHouse, in Russian](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| [OneAPM](https://www.oneapm.com/) | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| [Pragma Innovation](http://www.pragma-innovation.fr/) | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| [QINGCLOUD](https://www.qingcloud.com/) | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| [Beijing PERCENT Information Technology Co., Ltd.](https://www.percent.cn/) | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| [Rambler](https://rambler.ru) | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| [Tencent](https://www.tencent.com) | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Traffic Stars](https://trafficstars.com/) | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| [S7 Airlines](https://www.s7.ru) | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| [SEMrush](https://www.semrush.com/) | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| [scireum GmbH](https://www.scireum.de/) | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [Sentry](https://sentry.io/) | Software developer | Backend for product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| [seo.do](https://seo.do/) | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| [Sina](http://english.sina.com/index.html) | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| [SMI2](https://smi2.ru/) | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | -| [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | -| [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| [Ximalaya](https://www.ximalaya.com/) | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | -| [Yandex Market](https://market.yandex.ru/) | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | -| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| شرکت | صنعت | حذف جستجو | اندازه خوشه | (سازمان ملل متحد) حجم داده های فشرده\* | مرجع | +|-------------------------------------------------------------------|----------------------------------------|-------------------------|------------------------------------------|------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2گیس](https://2gis.ru) | مپز | نظارت | — | — | [بحث در روسیه, جولای 2019](https://youtu.be/58sPkXfq6nw) | +| [سلام مرورگر](https://alohabrowser.com/) | نرم افزار تلفن همراه | باطن مرورگر | — | — | [اسلاید در روسیه, بیشتر 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [Amadeus](https://amadeus.com/) | سفر | تجزیه و تحلیل | — | — | [اطلاعیه مطبوعاتی مارس 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [اپسفیر](https://www.appsflyer.com) | تجزیه و تحلیل ترافیک تلفن همراه | محصول اصلی | — | — | [بحث در روسیه, جولای 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [دادههای رایانه](https://arenadata.tech/) | پلت فرم داده | محصول اصلی | — | — | [اسلاید در روسیه, دسامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [بدو](https://badoo.com) | دوستیابی | زمانهای | — | — | [اسلاید در روسیه, دسامبر 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [بنکس](https://www.benocs.com/) | تله متری شبکه و تجزیه و تحلیل | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, اکتبر 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [بلومبرگ](https://www.bloomberg.com/) | امور مالی, رسانه ها | نظارت | 102 سرور | — | [اسلاید مه 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [نفخ](https://bloxy.info) | بلاکچین | تجزیه و تحلیل | — | — | [اسلاید در روسیه, اوت 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | مخابرات | تجزیه و تحلیل | — | — | [اسلاید در چین, ژانویه 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | اطلاعات کسب و کار | تجزیه و تحلیل جغرافیایی | — | — | [پردازش جغرافیایی با کلیک](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | پژوهش و فناوری | تجربه | — | — | [اطلاعیه مطبوعاتی مارس 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [سیسکو](http://cisco.com/) | شبکه | تجزیه و تحلیل ترافیک | — | — | [بحث رعد و برق, اکتبر 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [اوراق بهادار ارگ](https://www.citadelsecurities.com/) | امور مالی | — | — | — | [مشارکت, مارس 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [سیتیموبیل](https://city-mobil.ru) | تاکسی | تجزیه و تحلیل | — | — | [پست وبلاگ در روسیه مارس 2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [تماس با ما](https://contentsquare.com) | تجزیه و تحلیل ترافیک وب | محصول اصلی | — | — | [پست وبلاگ در فرانسه, نوامبر 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [ابر پرچم](https://cloudflare.com) | CDN | تجزیه و تحلیل ترافیک | 36 سرور | — | [پست وبلاگ, بیشتر 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [پست وبلاگ, مارس 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [کوروت](https://coru.net/) | تجزیه و تحلیل | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, مارس 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | Finance AI | تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, نوامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [نمایش سایت](https://www.criteo.com/) | خرده فروشی | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [دویچه بانک](https://db.com) | امور مالی | بی تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, اکتبر 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [سردستهزنان خواننده اپرا](https://www.diva-e.com) | مشاوره دیجیتال | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, سپتامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [اعمال](https://www.exness.com) | بازرگانی | معیارهای ورود به سیستم | — | — | [بحث در روسیه, بیشتر 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [ژنی](https://geniee.co.jp) | شبکه تبلیغاتی | محصول اصلی | — | — | [پست وبلاگ در ژاپن, جولای 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | جریان ویدیو | تجزیه و تحلیل | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | املاک و مستغلات | تجزیه و تحلیل | — | — | [پست وبلاگ به زبان انگلیسی, مارس 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [اینفویستا](https://www.infovista.com/) | شبکه ها | تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, اکتبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [نام](https://www.innogames.com) | بازی ها | معیارهای ورود به سیستم | — | — | [اسلاید در روسیه, سپتامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [پوششی](https://integros.com) | بستر های نرم افزاری برای خدمات تصویری | تجزیه و تحلیل | — | — | [اسلاید در روسیه, بیشتر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [دادههای کدیاک](https://www.kodiakdata.com/) | ابرها | محصول اصلی | — | — | [اسلاید در انگلیسی, مارس 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Kontur](https://kontur.ru) | توسعه نرم افزار | متریک | — | — | [بحث در روسیه, نوامبر 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [اشک](https://lifestreet.com/) | شبکه تبلیغاتی | محصول اصلی | 75 سرور (3 کپی) | 5.27 پوند | [پست وبلاگ در روسیه, فوریه 2017](https://habr.com/en/post/322620/) | +| [Mail.ru راه حل های ابر](https://mcs.mail.ru/) | خدمات ابری | محصول اصلی | — | — | [در حال اجرا به عنوان مثال فاحشه خانه, در روسیه](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [مسگبرد](https://www.messagebird.com) | مخابرات | امار | — | — | [اسلاید به زبان انگلیسی, نوامبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | شبکه تبلیغاتی | تجزیه و تحلیل وب سایت | — | — | [تجربه ما در اجرای تحلیلی سندرم تونل کارپ در روسیه](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [بعد از ظهر](https://www.oneapm.com/) | مانیتورینگ و تجزیه و تحلیل داده ها | محصول اصلی | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [پراگما ابداع](http://www.pragma-innovation.fr/) | تله متری و تجزیه و تحلیل داده های بزرگ | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | خدمات ابری | محصول اصلی | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [فرمانده](https://qrator.net) | DDoS protection | محصول اصلی | — | — | [وبلاگ پست, مارس 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [شرکت فناوری اطلاعات درصد پکن اینترنت](https://www.percent.cn/) | تجزیه و تحلیل | محصول اصلی | — | — | [اسلاید در چین, خرداد 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [رامبلر](https://rambler.ru) | خدمات اینترنت | تجزیه و تحلیل | — | — | [بحث در روسیه, مارس 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [شمارش](https://www.tencent.com) | پیامرسانی | ثبت | — | — | [بحث در چین, نوامبر 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [ستاره ترافیک](https://trafficstars.com/) | شبکه تبلیغاتی | — | — | — | [اسلاید در روسیه, بیشتر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7 Airlines](https://www.s7.ru) | خطوط هوایی | معیارهای ورود به سیستم | — | — | [بحث در روسیه, مارس 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [SEMrush](https://www.semrush.com/) | بازاریابی | محصول اصلی | — | — | [اسلاید در روسیه, اوت 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [صفحه اصلی](https://www.scireum.de/) | تجارت الکترونیک | محصول اصلی | — | — | [بحث در, فوریه 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [نگهبانی](https://sentry.io/) | توسعه دهنده نرم افزار | باطن برای محصول | — | — | [پست وبلاگ به زبان انگلیسی, بیشتر 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | دولت امنیت اجتماعی | تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, نوامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | تجزیه و تحلیل | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, نوامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [سینا](http://english.sina.com/index.html) | اخبار | — | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | اخبار | تجزیه و تحلیل | — | — | [پست وبلاگ در روسیه, نوامبر 2017](https://habr.com/ru/company/smi2/blog/314558/) | +| [اسپانک](https://www.splunk.com/) | تجزیه و تحلیل کسب و کار | محصول اصلی | — | — | [اسلاید به زبان انگلیسی, ژانویه 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [رفع ابهام](https://www.spotify.com) | موسیقی | تجربه | — | — | [اسلاید, جولای 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [شمارش](https://www.tencent.com) | داده های بزرگ | پردازش داده ها | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [بارگذاری](https://www.uber.com) | تاکسی | ثبت | — | — | [اسلاید فوریه 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [اطلاعات دقیق](https://vk.com) | شبکه اجتماعی | ارقام, ورود به سیستم | — | — | [اسلاید در روسیه, اوت 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [خردمندهها](https://wisebits.com/) | راه حل های فناوری اطلاعات | تجزیه و تحلیل | — | — | [اسلاید در روسیه, بیشتر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [فناوری زییاکسین](https://www.xiaoheiban.cn/) | تحصیلات | هدف مشترک | — | — | [اسلاید به زبان انگلیسی, نوامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [خیمیایا](https://www.ximalaya.com/) | به اشتراک گذاری صوتی | OLAP | — | — | [اسلاید به زبان انگلیسی, نوامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [ابر یاندکس](https://cloud.yandex.ru/services/managed-clickhouse) | ابر عمومی | محصول اصلی | — | — | [بحث در روسیه, دسامبر 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [داده های یاندکس](https://cloud.yandex.ru/services/datalens) | اطلاعات کسب و کار | محصول اصلی | — | — | [اسلاید در روسیه, دسامبر 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [بازار یاندکس](https://market.yandex.ru/) | تجارت الکترونیک | معیارهای ورود به سیستم | — | — | [بحث در روسیه, ژانویه 2019](https://youtu.be/_l1qP0DyBcA?t=478) | +| [یاندکس متریکا](https://metrica.yandex.com) | تجزیه و تحلیل ترافیک وب | محصول اصلی | 360 سرور در یک خوشه, 1862 سرور در یک بخش | 66.41 PiB / 5.68 PiB | [اسلاید فوریه 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | توسعه نرم افزار | معیارهای ورود به سیستم | — | — | [پست وبلاگ, مارس 2019, در روسیه](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | بانک | نظارت بر وب سیستم | — | — | [اسلاید در روسیه, سپتامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | بی تجزیه و تحلیل | محصول اصلی | — | — | [اسلاید در چین, اکتبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) +[مقاله اصلی](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/fa/introduction/distinctive_features.md b/docs/fa/introduction/distinctive_features.md index 8879190233f..a4313168796 100644 --- a/docs/fa/introduction/distinctive_features.md +++ b/docs/fa/introduction/distinctive_features.md @@ -60,7 +60,7 @@ ClickHouse روش های مختلفی برای کسب دقیق performance ار ClickHouse از روش asynchronous multimaster replication استفاده می کند. بعد از نوشتن داده در یکی از replica های موجود، داده به صورت توزیع شده به بقیه replica ها منتقل می شود. این سیستم داده های مشابه را در replica های مختلف نگه داری می کند. در اکثر موارد که سیستم fail می شوند، داده ها به صورت اتوماتیک restore می شوند و یا در موارد پیچیده به صورت نیمه اتوماتیک restore می شوند. -برای اطلاعات بیشتر، به بخش [replication داده ها](../operations/table_engines/replication.md) مراجعه کنید. +برای اطلاعات بیشتر، به بخش [replication داده ها](../engines/table_engines/mergetree_family/replication.md) مراجعه کنید.
    diff --git a/docs/fa/introduction/history.md b/docs/fa/introduction/history.md index 7882c89eae5..c654e825ad7 100644 --- a/docs/fa/introduction/history.md +++ b/docs/fa/introduction/history.md @@ -1,50 +1,56 @@ -
    +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 7 +toc_title: "\u062A\u0627\u0631\u06CC\u062E\u0686\u0647" +--- -# ClickHouse ﻪﭽﺨﯾﺭﺎﺗ {#clickhouse} +# تاریخچه کلیک {#clickhouse-history} -ClickHouse در ابتدا برای قدرت به Yandex.Metrica دومین بستر آنالیز وب در دنیا توسعه داده شد، و همچنان جز اصلی آن است. ClickHouse اجازه می دهند که با بیش از 13 تریلیون رکورد در دیتابیس و بیش از 20 میلیارد event در روز، گزارش های مستقیم (On the fly) از داده های non-aggregate تهیه کنیم. این مقاله پیشنیه ی تاریخی در ارتباط با اهداف اصلی ClickHouse قبل از آنکه به یک محصول open source تبدیل شود، می دهد. +تاتر در ابتدا به قدرت توسعه داده شده است [یاندکسمتریکا](https://metrica.yandex.com/), [دومین بزرگترین پلت فرم تجزیه و تحلیل ترافیک وب در جهان](http://w3techs.com/technologies/overview/traffic_analysis/all) و همچنان بخش اصلی این سیستم است. با بیش از 13 تریلیون رکورد در پایگاه داده و بیش از 20 میلیارد حوادث روزانه, خانه رعیتی اجازه می دهد تا تولید گزارش های سفارشی در پرواز به طور مستقیم از داده های غیر جمع. این مقاله به طور خلاصه اهداف کلیک در مراحل اولیه توسعه خود را پوشش می دهد. -Yandex.Metrica تولید گزارش های برپایه بازدید و session ها به صورت on the fly و با استفده از بخش های دلخواه و دوره ی زمانی که توسط کاربر انتخاب می شود را انجام می دهد. aggregate های پیچیده معمولا مورد نیاز هستند، مانند تعداد بازدیدکنندگان unique. داده های جدید برای تهیه گزارش گیری به صورت real-time می رسند. +یاندکسمتریکا گزارش های سفارشی در پرواز را بر اساس بازدید ها و جلسات با بخش های دلخواه تعریف شده توسط کاربر ایجاد می کند. انجام این کار اغلب نیاز به ساختمان مجموعه های پیچیده مانند تعداد کاربران منحصر به فرد. اطلاعات جدید برای ساخت یک گزارش می رسد در زمان واقعی است. -از آوریل 2014، Yandex.Metrica تقریبا 12 میلیارد event شامل page view و click در روز دریافت کرد. تمام این event ها باید به ترتیب برای ساخت گزارش های سفارشی ذخیره سازی می شدند. یک query ممکن است نیاز به اسکن کردن میلیون ها سطر با زمان کمتر از چند صد میلی ثانیه، یا چند صد میلیون سطر در عرض چند ثانیه داشته باشد. +همانطور که از مارس 2014, یاندکس.متریکا روزانه حدود 12 میلیارد رویداد (نمایش صفحه و کلیک) را ردیابی کرد. همه این وقایع باید ذخیره شود برای ساخت گزارش های سفارشی. پرس و جو تنها ممکن است نیاز به اسکن میلیون ها ردیف در عرض چند صد میلی ثانیه و یا صدها میلیون ردیف فقط در چند ثانیه. -## استفاده در Yandex.Metrica و دیگر سرویس های Yandex {#stfdh-dr-yandex-metrica-w-dygr-srwys-hy-yandex} +## استفاده در یاندکس.متریکا و سایر خدمات یاندکس {#usage-in-yandex-metrica-and-other-yandex-services} -ClickHouse با چندین اهداف در Yandex.Metrica استفاده می شود. وظیفه اصلی آن ساخت گزارش های آنلاین از داده های non-aggregate می باشد. ClickHouse در یک کلاستر با سایز 374 سرور، که بیش از 20.3 تریلیون سطر در دیتابیس را دارد مورد استفاده قرار می گیرد. اندازه فشرده داده ها، بدون شمارش داده های تکراری و replication، حدود 2 پتابایت می باشد. اندازه ی غیرفشرده داده ها (در فرمت TSV) حدودا 17 پتابایت می باشد. +خانه عروسکی در خدمت اهداف متعدد در یاندکس.متریکا +وظیفه اصلی این است برای ساخت گزارش در حالت اینترنتی با استفاده از داده های غیر جمع. با استفاده از یک خوشه 374 سرور, که ذخیره بیش از 20.3 تریلیون ردیف در پایگاه داده. حجم داده های فشرده است در مورد 2 سرب, بدون حسابداری برای تکراری و کپی. حجم داده های غیر فشرده (در فرمت تسو) حدود 17 پوند خواهد بود. -ClickHouse همچنین در موارد زیراستفاده می شود: +کلیک هاوس همچنین نقش کلیدی در فرایندهای زیر ایفا می کند: -- ذخیره سازی داده ها برای Session replay از Yandex.Metrica. -- پردازش داده های Intermediate. -- ساخت گزارش های سراسری از آنالیز ها. -- اجرای query ها برای debug کردن موتور Yandex.Metrica. -- آنالیز لاگ های به دست آمده از API ها و user interface. +- ذخیره سازی داده ها برای پخش جلسه از یاندکس.متریکا +- پردازش اطلاعات متوسط. +- ساختمان گزارش های جهانی با تجزیه و تحلیل ترافیک. +- در حال اجرا نمایش داده شد برای اشکال زدایی یاندکس.موتور متریکا. +- تجزیه و تحلیل سیاهههای مربوط از رابط کاربر. -ClickHouse حداقل در دوازده جای دیگر سرویس Yandex نصب شده است: در search verticals، Market، Direct، Business Analytics، Mobile Development، AdFox، سرویس های شخصی و.. +امروزه چند ده clickhouse تاسیسات در دیگر yandex خدمات و ادارات: جستجوی عمودی, e-commerce, تبلیغات, کسب و کار, تجزیه و تحلیل ترافیک تلفن همراه و توسعه و خدمات شخصی ، -## داده های Aggregate , Non-Aggregate {#ddh-hy-aggregate-non-aggregate} +## داده های جمع شده و غیر جمع شده {#aggregated-and-non-aggregated-data} -یک دیدگاه محبوب وجود دارد که شما باید، داده های خود را به منظور کاهش اندازه داده ها Aggregate کنید. +یک نظر گسترده است که برای محاسبه ارقام به طور موثر وجود دارد, شما باید داده ها جمع از این حجم داده ها را کاهش می دهد. -اما به دلایل زیر، aggregate کردن داده ها راه حل بسیار محدودی است: +اما تجمع داده ها با محدودیت های زیادی همراه است: -- شما باید لیست گزارش های از قبل تعریف شده توسط کاربر که نیاز به تهیه گزارش آنها را دارید، داشته باشید. -- کاربر نمیتواند گزارش های سفارشی تهیه کند. -- در هنگام aggregate کردن تعداد بسیار زیاد key، اندازه ی داده ها کم نمی شود و aggregate بی فایده است. -- برای تعداد زیادی از گزارش ها، aggregate های متنوع و تغییرپذیر زیادی وجود دارد. (انفجار ترکیبی). -- هنگام aggregate کردن key ها با cardinality بالا (مثل URL ها)، اندازه داده ها به اندازه کافی کاهش پیدا نمی کند (کمتر از دو برابر). -- به این دلیل اندازه ی داده ها با aggregate کردن ممکن است به جای شکستن، رشد هم بکند. -- کاربر تمام گزارش هایی که ما تولید کردیم را نگاه نمی کند. بخش بزرگی از محاسبات بی فایده است. -- یکپارچگی منطقی داده ها ممکن است برای aggregate های مختلف نقض شود. +- شما باید یک لیست از پیش تعریف شده از گزارش های مورد نیاز داشته باشد. +- کاربر می تواند گزارش های سفارشی را ندارد. +- هنگامی که جمع بیش از تعداد زیادی از کلید های متمایز, حجم داده ها به سختی کاهش می یابد, بنابراین تجمع بی فایده است. +- برای تعداد زیادی از گزارش, بیش از حد بسیاری از تغییرات تجمع وجود دارد (انفجار ترکیبی). +- هنگامی که جمع کلید با کارتنیت بالا (مانند نشانیهای وب), حجم داده ها توسط بسیار کاهش می یابد (کمتر از دو قسم). +- به همین دلیل حجم داده ها با تجمع ممکن است به جای کوچک شدن رشد می کنند. +- کاربران تمام گزارش هایی را که برای ما تولید می کنیم مشاهده نمی کنند. بخش بزرگی از این محاسبات بی فایده است. +- یکپارچگی منطقی داده ها ممکن است برای تجمع های مختلف نقض شده است. -اگر ما هیچ چیزی را aggregate نکنیم و با داده های non-aggregate کار کنیم، در واقع این ممکن است باعث کاهش اندازه ی محاسبات شود. +اگر ما هر چیزی جمع نیست و کار با داده های غیر جمع, این ممکن است حجم محاسبات را کاهش می دهد. -با این حال، با aggregate کردن، بخش قابل توجهی از کار به صورت آفلاین انجام می شود و نسبتا آرام به پایان می رسد. در مقابل، محاسبات آنلاین به دلیل اینکه کاربر منتظر نمایش نتایج می باشد، نیازمند محاسبه سریع تا جایی که ممکن است می باشد. +با این حال, با تجمع, بخش قابل توجهی از کار گرفته شده است و نسبتا کلمی تکمیل. در مقابل محاسبات اینترنتی نیاز به محاسبه به همان سرعتی که ممکن است از کاربر در حال انتظار برای نتیجه. -Yandex.Metrica دارای یک سیستم تخصصی برای aggregate کردن داده ها به اسم Metrage می باشد، که برای اکثریت گزارش های مورد استفاده قرار می گیرد. شروع سال 2009، Yandex.Metrica همچنین از یک دیتابیس تخصصی OLAP برای داده های non-aggregate به نام OLAPServer، که قبلا برای ساخت گزارش ها استفاده می شد، استفاده می کرد. OLAPServer به خوبی روی داده های Non-Aggregate کار می کرد، اما محدودیت های بسیار زیادی داشت که اجازه ی استفاده در تمام گزارش های دلخواه را نمی داد. مواردی از قبیل عدم پشتیبانی از data type ها (فقط عدد)، و عدم توانایی در بروزرسانی افزایشی داده ها به صورت real-time (این کار فقط به rewrite کردن داده ها به صورت روزانه امکام پذیر بود). OLAPServer یک مدیریت دیتابیس نبود اما یک دیتابیس تخصصی بود. +یاندکسمتریکا دارای یک سیستم تخصصی برای جمع بندی داده ها به نام متراژ, که برای اکثر گزارش مورد استفاده قرار گرفت. +شروع در 2009, یاندکس.metrica همچنین با استفاده از تخصصی olap پایگاه داده برای عدم جمع آوری داده ها به نام olapserver که قبلا با استفاده از گزارش ساز. +برای دادههای غیر تجمیع خوب کار میکرد اما محدودیتهای زیادی داشت که اجازه نداد برای تمامی گزارشها مورد نظر استفاده قرار گیرد. این شامل عدم پشتیبانی از انواع داده ها (فقط اعداد) و ناتوانی در به روز رسانی تدریجی داده ها در زمان واقعی (تنها می تواند با بازنویسی داده ها روزانه انجام می شود). اولسپرز یک دسی بل نیست, اما یک دسی بل تخصصی. -برای حذف محدودیت های OLAPServer و حل مشکلات کار با داده های Non-Aggregate برای تمام گزارش ها، ما مدیریت دیتابیس ClicHouse را توسعه دادیم.. +هدف اولیه برای clickhouse بود برای حذف محدودیت های olapserver و حل مشکل از کار کردن با غیر جمع آوری داده ها برای همه, گزارش, اما در طول سال رشد کرده است به طور کلی هدف مدیریت پایگاه داده سیستم مناسب برای طیف گسترده ای از وظایف تحلیلی. -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/introduction/ya_metrika_task/) +[مقاله اصلی](https://clickhouse.tech/docs/en/introduction/history/) diff --git a/docs/fa/introduction/index.md b/docs/fa/introduction/index.md new file mode 100644 index 00000000000..317489d277b --- /dev/null +++ b/docs/fa/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Introduction +toc_priority: 1 +--- + + diff --git a/docs/fa/operations/access_rights.md b/docs/fa/operations/access_rights.md index d5f7da9161e..2ad006e0fcf 100644 --- a/docs/fa/operations/access_rights.md +++ b/docs/fa/operations/access_rights.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: "\u062D\u0642\u0648\u0642 \u062F\u0633\u062A\u0631\u0633\u06CC" --- -# Access Rights {#access-rights} +# حقوق دسترسی {#access-rights} -Users and access rights are set up in the user config. This is usually `users.xml`. +کاربران و حقوق دسترسی هستند تا در پیکربندی کاربر تنظیم شده است. این است که معمولا `users.xml`. -Users are recorded in the `users` section. Here is a fragment of the `users.xml` file: +کاربران در ثبت `users` بخش. در اینجا یک قطعه از است `users.xml` پرونده: ``` xml @@ -67,15 +70,15 @@ Users are recorded in the `users` section. Here is a fragment of the `users.xml` ``` -You can see a declaration from two users: `default`and`web`. We added the `web` user separately. +شما می توانید اعلامیه ای از دو کاربر را ببینید: `default`و`web`. ما اضافه کردیم `web` کاربر به طور جداگانه. -The `default` user is chosen in cases when the username is not passed. The `default` user is also used for distributed query processing, if the configuration of the server or cluster doesn’t specify the `user` and `password` (see the section on the [Distributed](../operations/table_engines/distributed.md) engine). +این `default` کاربر در مواردی که نام کاربری تصویب نشده است انتخاب شده است. این `default` کاربر همچنین برای پردازش پرس و جو توزیع شده استفاده می شود, اگر پیکربندی سرور یا خوشه می کند مشخص نیست `user` و `password` (نگاه کنید به بخش در [توزیع شده](../engines/table_engines/special/distributed.md) موتور). The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. -The password is specified in clear text (not recommended) or in SHA-256. The hash isn’t salted. In this regard, you should not consider these passwords as providing security against potential malicious attacks. Rather, they are necessary for protection from employees. +رمز عبور در متن روشن مشخص (توصیه نمی شود) و یا در شا 256. هش شور نیست. در این راستا نباید این رمزهای عبور را به عنوان امنیت در برابر حملات مخرب بالقوه در نظر بگیرید. بلکه لازم است برای حفاظت از کارکنان. -A list of networks is specified that access is allowed from. In this example, the list of networks for both users is loaded from a separate file (`/etc/metrika.xml`) containing the `networks` substitution. Here is a fragment of it: +یک لیست از شبکه مشخص شده است که دسترسی از اجازه. در این مثال لیستی از شبکه ها برای هر دو کاربران لود شده از یک فایل جداگانه (`/etc/metrika.xml`) حاوی `networks` جایگزینی. در اینجا یک قطعه است: ``` xml @@ -89,22 +92,22 @@ A list of networks is specified that access is allowed from. In this example, th ``` -You could define this list of networks directly in `users.xml`, or in a file in the `users.d` directory (for more information, see the section “[Configuration files](configuration_files.md#configuration_files)”). +شما می توانید این لیست از شبکه به طور مستقیم در تعریف `users.xml` یا در یک فایل در `users.d` فهرست راهنما (برای اطلاعات بیشتر, بخش را ببینید “[پروندههای پیکربندی](configuration_files.md#configuration_files)”). -The config includes comments explaining how to open access from everywhere. +پیکربندی شامل نظرات توضیح میدهد که چگونه برای باز کردن دسترسی از همه جا. -For use in production, only specify `ip` elements (IP addresses and their masks), since using `host` and `hoost_regexp` might cause extra latency. +برای استفاده در تولید فقط مشخص کنید `ip` عناصر (نشانی اینترنتی و ماسک خود را), از زمان استفاده از `host` و `hoost_regexp` ممکن است تاخیر اضافی شود. -Next the user settings profile is specified (see the section “[Settings profiles](settings/settings_profiles.md)”. You can specify the default profile, `default'`. The profile can have any name. You can specify the same profile for different users. The most important thing you can write in the settings profile is `readonly=1`, which ensures read-only access. Then specify the quota to be used (see the section “[Quotas](quotas.md#quotas)”). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. +بعد مشخصات تنظیمات کاربر مشخص شده است (بخش را ببینید “[پروفایل تنظیمات](settings/settings_profiles.md)”. شما می توانید مشخصات پیش فرض را مشخص کنید, `default'`. مشخصات می توانید هر نام دارند. شما می توانید مشخصات مشابه برای کاربران مختلف را مشخص کنید. مهم ترین چیز شما می توانید در مشخصات تنظیمات ارسال شده است `readonly=1`, که تضمین می کند فقط خواندنی دسترسی. سپس سهمیه مشخص مورد استفاده قرار گیرد (بخش را ببینید “[سهمیه](quotas.md#quotas)”). شما می توانید سهمیه پیش فرض را مشخص کنید: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. -In the optional `` section, you can also specify a list of databases that the user can access. By default, all databases are available to the user. You can specify the `default` database. In this case, the user will receive access to the database by default. +در اختیاری `` بخش, شما همچنین می توانید یک لیست از پایگاه داده که کاربر می تواند دسترسی مشخص. به طور پیش فرض تمام پایگاه های داده در دسترس کاربر هستند. شما می توانید مشخص کنید `default` پایگاه داده است. در این مورد, کاربر دسترسی به پایگاه داده به طور پیش فرض دریافت. -In the optional `` section, you can also specify a list of dictionaries that the user can access. By default, all dictionaries are available to the user. +در اختیاری `` بخش, شما همچنین می توانید یک لیست از لغت نامه که کاربر می تواند دسترسی مشخص. به طور پیش فرض تمام لغت نامه ها برای کاربر در دسترس هستند. -Access to the `system` database is always allowed (since this database is used for processing queries). +دسترسی به `system` پایگاه داده همیشه مجاز (از این پایگاه داده برای پردازش نمایش داده شد استفاده می شود). -The user can get a list of all databases and tables in them by using `SHOW` queries or system tables, even if access to individual databases isn’t allowed. +کاربر می تواند لیستی از تمام پایگاه های داده و جداول را با استفاده از `SHOW` نمایش داده شد و یا جداول سیستم, حتی اگر دسترسی به پایگاه داده های فردی مجاز نیست. -Database access is not related to the [readonly](settings/permissions_for_queries.md#settings_readonly) setting. You can’t grant full access to one database and `readonly` access to another one. +دسترسی به پایگاه داده به [فقط خواندنی](settings/permissions_for_queries.md#settings_readonly) تنظیمات. شما نمی توانید دسترسی کامل به یک پایگاه داده و `readonly` دسترسی به یکی دیگر. -[Original article](https://clickhouse.tech/docs/en/operations/access_rights/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/fa/operations/backup.md b/docs/fa/operations/backup.md index 90efb613098..91938fbcba8 100644 --- a/docs/fa/operations/backup.md +++ b/docs/fa/operations/backup.md @@ -1,38 +1,42 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: "\u067E\u0634\u062A\u06CC\u0628\u0627\u0646 \u06AF\u06CC\u0631\u06CC \u062F\ + \u0627\u062F\u0647 \u0647\u0627" --- -# Data Backup {#data-backup} +# پشتیبان گیری داده ها {#data-backup} -While [replication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). However, these safeguards don’t cover all possible cases and can be circumvented. +در حالی که [تکرار](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [شما نمی توانید فقط جداول را با یک موتور ادغام مانند حاوی بیش از 50 گیگابایت داده رها کنید](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). با این حال, این پادمان تمام موارد ممکن را پوشش نمی دهد و می تواند دور. -In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. +به منظور به طور موثر کاهش خطاهای انسانی ممکن است, شما باید با دقت تهیه یک استراتژی برای پشتیبان گیری و بازیابی اطلاعات خود را **در پیش**. -Each company has different resources available and business requirements, so there’s no universal solution for ClickHouse backups and restores that will fit every situation. What works for one gigabyte of data likely won’t work for tens of petabytes. There are a variety of possible approaches with their own pros and cons, which will be discussed below. It is a good idea to use several approaches instead of just one in order to compensate for their various shortcomings. +هر شرکت دارای منابع مختلف در دسترس و کسب و کار مورد نیاز, بنابراین هیچ راه حل جهانی برای پشتیبان گیری تاتر و بازیابی است که هر وضعیت مناسب وجود دارد. چه کار می کند برای یک گیگابایت از داده ها به احتمال زیاد برای ده ها پتابایت کار نمی کند. انواع روش های ممکن با جوانب مثبت و منفی خود را که در زیر مورد بحث وجود دارد. این یک ایده خوب برای استفاده از روش های مختلف به جای فقط یک به منظور جبران کاستی های مختلف خود است. -!!! note "Note" - Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. +!!! note "یادداشت" + به خاطر داشته باشید که اگر شما چیزی حمایت کردن و هرگز سعی در بازگرداندن, شانس هستند که بازگرداندن به درستی کار نمی کند زمانی که شما در واقع نیاز (یا حداقل طول خواهد کشید از کسب و کار می تواند تحمل). بنابراین هر روش پشتیبان گیری شما را انتخاب کنید, اطمینان حاصل کنید که به طور خودکار روند بازگرداندن و همچنین, و تمرین در یک خوشه محل انتخابی یدکی به طور منظم. -## Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else} +## تکثیر اطلاعات منبع در جایی دیگر {#duplicating-source-data-somewhere-else} -Often data that is ingested into ClickHouse is delivered through some sort of persistent queue, such as [Apache Kafka](https://kafka.apache.org). In this case it is possible to configure an additional set of subscribers that will read the same data stream while it is being written to ClickHouse and store it in cold storage somewhere. Most companies already have some default recommended cold storage, which could be an object store or a distributed filesystem like [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). +اغلب داده هایی که به خانه کلیک مصرف از طریق نوعی از صف مداوم تحویل داده, مانند [نمایی کافکا](https://kafka.apache.org). در این مورد ممکن است یک مجموعه اضافی از مشترکین را پیکربندی کنید که جریان داده های مشابه را می خواند در حالی که نوشته شده است کلیک کنید و جایی در ذخیره سازی سرد ذخیره کنید. اکثر شرکت ها در حال حاضر برخی از پیش فرض توصیه می شود ذخیره سازی سرد, که می تواند یک فروشگاه شی و یا یک فایل سیستم توزیع مانند [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). -## Filesystem Snapshots {#filesystem-snapshots} +## گزارشهای ویژه سیستم پرونده {#filesystem-snapshots} -Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](table_engines/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective. +برخی از سیستم های فایل های محلی قابلیت عکس فوری (به عنوان مثال, [ZFS](https://en.wikipedia.org/wiki/ZFS)), اما ممکن است بهترین انتخاب برای خدمت نمایش داده شد زندگی می کنند. یک راه حل ممکن است برای ایجاد کپی های اضافی با این نوع از سیستم فایل و حذف از [توزیع شده](../engines/table_engines/special/distributed.md) جداول که برای استفاده `SELECT` نمایش داده شد. عکس های فوری در چنین کپی خواهد شد در دسترس از هر گونه نمایش داده شد که تغییر داده ها باشد. به عنوان یک جایزه, این کپی ممکن است تنظیمات سخت افزار خاص با دیسک های بیشتر متصل در هر سرور, خواهد بود که مقرون به صرفه. -## clickhouse-copier {#clickhouse-copier} +## تاتر-کپی {#clickhouse-copier} -[clickhouse-copier](utils/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters. +[تاتر-کپی](utilities/clickhouse-copier.md) یک ابزار همه کاره است که در ابتدا به جداول پتابایت به اندازه دوباره سفال ساخته شده است. همچنین می تواند برای تهیه پشتیبان و بازیابی اهداف استفاده شود زیرا به طور قابل اعتماد داده ها را بین جداول کلیک و خوشه ها کپی می کند. -For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well. +برای حجم کمتری از داده ها, ساده `INSERT INTO ... SELECT ...` به جداول از راه دور نیز ممکن است کار کند. -## Manipulations with Parts {#manipulations-with-parts} +## دستکاری با قطعات {#manipulations-with-parts} -ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that doesn’t require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). +کلیک اجازه می دهد تا با استفاده از `ALTER TABLE ... FREEZE PARTITION ...` پرس و جو برای ایجاد یک کپی محلی از پارتیشن های جدول. این اجرا با استفاده از hardlinks به `/var/lib/clickhouse/shadow/` پوشه, بنابراین معمولا فضای دیسک اضافی برای داده های قدیمی مصرف نمی. نسخه های ایجاد شده از فایل ها توسط سرور کلیک هاوس انجام نمی شود, بنابراین شما فقط می توانید ترک وجود دارد: شما یک نسخه پشتیبان تهیه ساده است که هیچ سیستم خارجی اضافی نیاز ندارد, اما هنوز هم مستعد ابتلا به مشکلات سخت افزاری خواهد بود. به همین دلیل بهتر است از راه دور به مکان دیگری کپی کنید و سپس نسخه های محلی را حذف کنید. توزیع فایل سیستم ها و فروشگاه های شی هنوز هم یک گزینه خوب برای این, اما عادی فایل های پیوست شده سرور با ظرفیت به اندازه کافی بزرگ ممکن است کار و همچنین (در این مورد انتقال از طریق فایل سیستم شبکه و یا شاید رخ می دهد [درباره ما](https://en.wikipedia.org/wiki/Rsync)). -For more information about queries related to partition manipulations, see the [ALTER documentation](../query_language/alter.md#alter_manipulations-with-partitions). +برای کسب اطلاعات بیشتر در مورد نمایش داده شد مربوط به دستکاری پارتیشن, دیدن [تغییر مستندات](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). -A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). +یک ابزار شخص ثالث در دسترس است به طور خودکار این روش: [کلیک-پشتیبان گیری](https://github.com/AlexAkulov/clickhouse-backup). -[Original article](https://clickhouse.tech/docs/en/operations/backup/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/fa/operations/configuration_files.md b/docs/fa/operations/configuration_files.md index 60e6e9a8bde..086d92cfda9 100644 --- a/docs/fa/operations/configuration_files.md +++ b/docs/fa/operations/configuration_files.md @@ -1,32 +1,36 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u067E\u0631\u0648\u0646\u062F\u0647\u0647\u0627\u06CC \u067E\u06CC\u06A9\ + \u0631\u0628\u0646\u062F\u06CC" --- -# Configuration Files {#configuration_files} +# پروندههای پیکربندی {#configuration_files} -ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. +تاتر پشتیبانی از مدیریت پیکربندی چند فایل. فایل پیکربندی سرور اصلی است `/etc/clickhouse-server/config.xml`. فایل های دیگر باید در `/etc/clickhouse-server/config.d` فهرست راهنما. -!!! note "Note" - All the configuration files should be in XML format. Also, they should have the same root element, usually ``. +!!! note "یادداشت" + تمام فایل های پیکربندی باید در فرمت میلی لیتر باشد. همچنین معمولا باید یک عنصر ریشه داشته باشند ``. -Some settings specified in the main configuration file can be overridden in other configuration files. The `replace` or `remove` attributes can be specified for the elements of these configuration files. +برخی از تنظیمات مشخص شده در فایل پیکربندی اصلی را می توان در دیگر فایل های پیکربندی باطل. این `replace` یا `remove` صفات را می توان برای عناصر این فایل های پیکربندی مشخص شده است. -If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children. +اگر نه مشخص شده است, ترکیبی از محتویات عناصر به صورت بازگشتی, جایگزینی مقادیر کودکان تکراری. -If `replace` is specified, it replaces the entire element with the specified one. +اگر `replace` مشخص شده است, جایگزین کل عنصر با یک مشخص. -If `remove` is specified, it deletes the element. +اگر `remove` مشخص شده است, حذف عنصر. -The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include\_from](server_settings/settings.md#server_settings-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](server_settings/settings.md)). +پیکربندی همچنین می توانید تعریف “substitutions”. اگر یک عنصر است `incl` ویژگی, جایگزینی مربوطه را از فایل خواهد شد به عنوان ارزش استفاده. به طور پیش فرض, مسیر به فایل با تعویض است `/etc/metrika.xml`. این را می توان در تغییر [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) عنصر در پیکربندی سرور. مقادیر جایگزینی در مشخص `/yandex/substitution_name` عناصر در این فایل. اگر جایگزینی مشخص شده در `incl` وجود ندارد, این است که در ورود به سیستم ثبت. برای جلوگیری از جایگزینی ورود به سیستم کلیک کنید `optional="true"` ویژگی (مثلا, تنظیمات برای [& کلاندارها](server_configuration_parameters/settings.md)). -Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. +تعویض همچنین می توانید از باغ وحش انجام شود. برای انجام این کار ویژگی را مشخص کنید `from_zk = "/path/to/node"`. مقدار عنصر با محتویات گره در جایگزین `/path/to/node` در باغ وحش. شما همچنین می توانید یک زیر درخت کل در گره باغ وحش قرار داده و به طور کامل به عنصر منبع وارد می شود. -The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the `users_config` element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`. +این `config.xml` فایل می تواند یک پیکربندی جداگانه با تنظیمات کاربر مشخص, پروفایل, و سهمیه. مسیر نسبی به این پیکربندی در مجموعه `users_config` عنصر. به طور پیش فرض است `users.xml`. اگر `users_config` حذف شده است, تنظیمات کاربر, پروفایل, و سهمیه به طور مستقیم در مشخص `config.xml`. -Users configuration can be splitted into separate files similar to `config.xml` and `config.d/`. -Directory name is defined as `users_config` setting without `.xml` postfix concatenated with `.d`. -Directory `users.d` is used by default, as `users_config` defaults to `users.xml`. -For example, you can have separate config file for each user like this: +پیکربندی کاربران را می توان به فایل های جداگانه شبیه به تقسیم `config.xml` و `config.d/`. +نام فهرست راهنما به نام `users_config` تنظیم بدون `.xml` پس از مخلوط با `.d`. +فهرست راهنما `users.d` به طور پیش فرض استفاده می شود, مانند `users_config` پیشفرضها به `users.xml`. +مثلا, شما می توانید فایل پیکربندی جداگانه برای هر کاربر مثل این دارند: ``` bash $ cat /etc/clickhouse-server/users.d/alice.xml @@ -47,8 +51,8 @@ $ cat /etc/clickhouse-server/users.d/alice.xml ``` -For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file. +برای هر فایل پیکربندی سرور نیز تولید می کند `file-preprocessed.xml` فایل در هنگام شروع. این فایل ها شامل تمام تعویض های تکمیل شده و لغو می شوند و برای استفاده اطلاعاتی در نظر گرفته می شوند. اگر تعویض باغ وحش در فایل های پیکربندی مورد استفاده قرار گرفت اما باغ وحش در دسترس بر روی شروع سرور نیست, سرور بارهای پیکربندی از فایل پیش پردازش. -The server tracks changes in config files, as well as files and ZooKeeper nodes that were used when performing substitutions and overrides, and reloads the settings for users and clusters on the fly. This means that you can modify the cluster, users, and their settings without restarting the server. +مسیر سرور تغییر در فایل های پیکربندی, و همچنین فایل ها و گره باغ وحش که در هنگام انجام تعویض و لغو مورد استفاده قرار گرفت, و بارگذاری مجدد تنظیمات برای کاربران و خوشه در پرواز. این به این معنی است که شما می توانید خوشه تغییر, کاربران, و تنظیمات خود را بدون راه اندازی مجدد سرور. -[Original article](https://clickhouse.tech/docs/en/operations/configuration_files/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/fa/operations/index.md b/docs/fa/operations/index.md index 921307be179..ac5c7a28fdc 100644 --- a/docs/fa/operations/index.md +++ b/docs/fa/operations/index.md @@ -1,24 +1,28 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Operations +toc_priority: 41 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" --- -# Operations {#operations} +# عملیات {#operations} -ClickHouse operations manual consists of the following major sections: +کتابچه راهنمای عملیات کلیک متشکل از بخش های اصلی زیر است: -- [Requirements](requirements.md) -- [Monitoring](monitoring.md) -- [Troubleshooting](troubleshooting.md) -- [Usage Recommendations](tips.md) -- [Update Procedure](update.md) -- [Access Rights](access_rights.md) -- [Data Backup](backup.md) -- [Configuration Files](configuration_files.md) -- [Quotas](quotas.md) -- [System Tables](system_tables.md) -- [Server Configuration Parameters](server_settings/index.md) -- [How To Test Your Hardware With ClickHouse](performance_test.md) -- [Settings](settings/index.md) -- [Utilities](utils/index.md) +- [الزامات](requirements.md) +- [نظارت](monitoring.md) +- [عیب یابی](troubleshooting.md) +- [توصیه های استفاده](tips.md) +- [روش به روز رسانی](update.md) +- [حقوق دسترسی](access_rights.md) +- [پشتیبان گیری داده ها](backup.md) +- [پروندههای پیکربندی](configuration_files.md) +- [سهمیه](quotas.md) +- [جداول سیستم](system_tables.md) +- [پارامترهای پیکربندی سرور](server_configuration_parameters/index.md) +- [چگونه برای تست سخت افزار خود را با کلیک](performance_test.md) +- [تنظیمات](settings/index.md) +- [تاسیسات](utilities/index.md) -[Original article](https://clickhouse.tech/docs/en/operations/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/fa/operations/monitoring.md b/docs/fa/operations/monitoring.md index b8d72b07b61..79e11cf51e6 100644 --- a/docs/fa/operations/monitoring.md +++ b/docs/fa/operations/monitoring.md @@ -1,41 +1,44 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u0646\u0638\u0627\u0631\u062A" --- -# Monitoring {#monitoring} +# نظارت {#monitoring} -You can monitor: +شما می توانید نظارت: -- Utilization of hardware resources. -- ClickHouse server metrics. +- استفاده از منابع سخت افزاری. +- معیارهای سرور کلیک. -## Resource Utilization {#resource-utilization} +## استفاده از منابع {#resource-utilization} -ClickHouse does not monitor the state of hardware resources by itself. +کلیک می کند دولت از منابع سخت افزاری به خودی خود نظارت نیست. -It is highly recommended to set up monitoring for: +این است که به شدت توصیه می شود به راه اندازی نظارت برای: -- Load and temperature on processors. +- بار و درجه حرارت در پردازنده. - You can use [dmesg](https://en.wikipedia.org/wiki/Dmesg), [turbostat](https://www.linux.org/docs/man8/turbostat.html) or other instruments. + شما می توانید استفاده کنید [راهنمایی و رانندگی](https://en.wikipedia.org/wiki/Dmesg), [توربوستات](https://www.linux.org/docs/man8/turbostat.html) و یا ابزار های دیگر. -- Utilization of storage system, RAM and network. +- استفاده از سیستم ذخیره سازی, رم و شبکه. -## ClickHouse Server Metrics {#clickhouse-server-metrics} +## معیارهای سرور کلیک {#clickhouse-server-metrics} -ClickHouse server has embedded instruments for self-state monitoring. +سرور کلیک ابزار برای نظارت خود دولت تعبیه شده است. -To track server events use server logs. See the [logger](server_settings/settings.md#server_settings-logger) section of the configuration file. +برای پیگیری رویدادهای سرور استفاده از سیاهههای مربوط به سرور. دیدن [چوبگر](server_configuration_parameters/settings.md#server_configuration_parameters-logger) بخش از فایل پیکربندی. -ClickHouse collects: +جمعهای کلیک: -- Different metrics of how the server uses computational resources. -- Common statistics on query processing. +- معیارهای مختلف چگونه سرور با استفاده از منابع محاسباتی. +- ارقام مشترک در پردازش پرس و جو. -You can find metrics in the [system.metrics](system_tables.md#system_tables-metrics), [system.events](system_tables.md#system_tables-events), and [system.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) tables. +شما می توانید معیارهای موجود در [سیستم.متریک](../operations/system_tables.md#system_tables-metrics), [سیستم.رویدادها](../operations/system_tables.md#system_tables-events) و [سیستم.\_نامهنویسی ناهمزمان](../operations/system_tables.md#system_tables-asynchronous_metrics) میز -You can configure ClickHouse to export metrics to [Graphite](https://github.com/graphite-project). See the [Graphite section](server_settings/settings.md#server_settings-graphite) in the ClickHouse server configuration file. Before configuring export of metrics, you should set up Graphite by following their official [guide](https://graphite.readthedocs.io/en/latest/install.html). +شما می توانید کلیک کنید هاوس به صادرات معیارهای به پیکربندی کنید [گرافیت](https://github.com/graphite-project). دیدن [بخش گرافیت](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) در فایل پیکربندی سرور کلیک. قبل از پیکربندی صادرات معیارهای, شما باید راه اندازی گرافیت با پیروی از رسمی خود را [راهنما](https://graphite.readthedocs.io/en/latest/install.html). -Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`. +علاوه بر این, شما می توانید در دسترس بودن سرور از طریق صفحه اصلی نظارت. ارسال `HTTP GET` درخواست برای `/ping`. اگر سرور در دسترس است, با پاسخ `200 OK`. -To monitor servers in a cluster configuration, you should set the [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap. +برای نظارت بر سرور در یک پیکربندی خوشه, شما باید مجموعه ای از [\_شروع مجدد \_شروع مجدد \_شروع مجدد \_کاربری](settings/settings.md#settings-max_replica_delay_for_distributed_queries) پارامتر و استفاده از منبع قام `/replicas_status`. یک درخواست برای `/replicas_status` بازگشت `200 OK` اگر ماکت در دسترس است و در پشت کپی دیگر به تعویق افتاد. اگر یک ماکت به تاخیر افتاد, باز می گردد `503 HTTP_SERVICE_UNAVAILABLE` با اطلاعات در مورد شکاف. diff --git a/docs/fa/operations/optimizing_performance/index.md b/docs/fa/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..13dc51db028 --- /dev/null +++ b/docs/fa/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Optimizing Performance +toc_priority: 52 +--- + + diff --git a/docs/fa/operations/optimizing_performance/sampling_query_profiler.md b/docs/fa/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..c5e1b55d55c --- /dev/null +++ b/docs/fa/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,65 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "\u067E\u0631\u0648\u0641\u0627\u06CC\u0644 \u067E\u0631\u0633 \u0648 \u062C\ + \u0648" +--- + +# پروفایل پرس و جو نمونه برداری {#sampling-query-profiler} + +فاحشه خانه اجرا می شود نمونه برداری پیشفیلتر که اجازه می دهد تجزیه و تحلیل اجرای پرس و جو. با استفاده از نیمرخ شما می توانید روال کد منبع که اغلب در طول اجرای پرس و جو استفاده پیدا. شما می توانید زمان پردازنده و دیوار ساعت زمان صرف شده از جمله زمان بیکار ردیابی. + +برای استفاده از پروفیل: + +- برپایی [\_قطع](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) بخش پیکربندی سرور. + + در این بخش پیکربندی [\_قطع](../../operations/system_tables.md#system_tables-trace_log) جدول سیستم حاوی نتایج حاصل از عملکرد پیشفیلتر. این است که به طور پیش فرض پیکربندی شده است. به یاد داشته باشید که داده ها در این جدول تنها برای یک سرور در حال اجرا معتبر است. پس از راه اندازی مجدد سرور, تاتر تمیز نمی کند تا جدول و تمام نشانی حافظه مجازی ذخیره شده ممکن است نامعتبر. + +- برپایی [ایران در تهران](../settings/settings.md#query_profiler_cpu_time_period_ns) یا [جستجو](../settings/settings.md#query_profiler_real_time_period_ns) تنظیمات. هر دو تنظیمات را می توان به طور همزمان استفاده کرد. + + این تنظیمات به شما اجازه پیکربندی تایمر پیشفیلتر. همانطور که این تنظیمات جلسه هستند, شما می توانید فرکانس نمونه برداری های مختلف برای کل سرور از, کاربران فردی و یا پروفایل های کاربر, برای جلسه تعاملی خود را, و برای هر پرس و جو فردی. + +فرکانس نمونه گیری به طور پیش فرض یک نمونه در هر ثانیه است و هر دو پردازنده و تایمر واقعی را فعال کنید. این فرکانس اجازه می دهد تا اطلاعات کافی در مورد خوشه کلیک کنید. همزمان, کار با این فرکانس, پیشفیلتر می کند عملکرد سرور کلیک را تحت تاثیر قرار نمی. اگر شما نیاز به مشخصات هر پرس و جو فردی سعی کنید به استفاده از فرکانس نمونه برداری بالاتر است. + +برای تجزیه و تحلیل `trace_log` جدول سیستم: + +- نصب `clickhouse-common-static-dbg` بسته ببینید [نصب از بسته های دب](../../getting_started/install.md#install-from-deb-packages). + +- اجازه توابع درون گرایی توسط [اجازه دادن به \_فعال کردن اختلال در عملکرد](../settings/settings.md#settings-allow_introspection_functions) تنظیمات. + + به دلایل امنیتی, توابع درون گرایی به طور پیش فرض غیر فعال. + +- استفاده از `addressToLine`, `addressToSymbol` و `demangle` [توابع درون گرایی](../../sql_reference/functions/introspection.md) برای گرفتن نام تابع و موقعیت خود را در کد کلیک کنید. برای دریافت یک پروفایل برای برخی از پرس و جو, شما نیاز به جمع داده ها از `trace_log` جدول شما می توانید داده ها را با توابع فردی یا کل ردیابی پشته جمع کنید. + +اگر شما نیاز به تجسم `trace_log` اطلاعات را امتحان کنید [شق](../../interfaces/third-party/gui/#clickhouse-flamegraph) و [سرعت سنج](https://github.com/laplab/clickhouse-speedscope). + +## مثال {#example} + +در این مثال ما: + +- پالایش `trace_log` داده ها توسط یک شناسه پرس و جو و تاریخ جاری. + +- جمع توسط ردیابی پشته. + +- با استفاده از توابع درون گرایی, ما یک گزارش از دریافت: + + - نام نمادها و توابع کد منبع مربوطه. + - محل کد منبع از این توابع. + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/fa/operations/performance/sampling_query_profiler.md b/docs/fa/operations/performance/sampling_query_profiler.md deleted file mode 100644 index 25368fcd883..00000000000 --- a/docs/fa/operations/performance/sampling_query_profiler.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -en_copy: true ---- - -# Sampling Query Profiler {#sampling-query-profiler} - -ClickHouse runs sampling profiler that allows analyzing query execution. Using profiler you can find source code routines that used the most frequently during query execution. You can trace CPU time and wall-clock time spent including idle time. - -To use profiler: - -- Setup the [trace\_log](../server_settings/settings.md#server_settings-trace_log) section of the server configuration. - - This section configures the [trace\_log](../system_tables.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid. - -- Setup the [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) or [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously. - - These settings allow you to configure profiler timers. As these are the session settings, you can get different sampling frequency for the whole server, individual users or user profiles, for your interactive session, and for each individual query. - -The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler doesn’t affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. - -To analyze the `trace_log` system table: - -- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting_started/install.md#install-from-deb-packages). - -- Allow introspection functions by the [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) setting. - - For security reasons, introspection functions are disabled by default. - -- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../query_language/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces. - -If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). - -## Example {#example} - -In this example we: - -- Filtering `trace_log` data by a query identifier and the current date. - -- Aggregating by stack trace. - -- Using introspection functions, we will get a report of: - - - Names of symbols and corresponding source code functions. - - Source code locations of these functions. - - - -``` sql -SELECT - count(), - arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym -FROM system.trace_log -WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) -GROUP BY trace -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -{% include "operations/performance/sampling_query_profiler_example_result.txt" %} -``` diff --git a/docs/fa/operations/performance_test.md b/docs/fa/operations/performance_test.md index ae4c5752703..55f589fc83b 100644 --- a/docs/fa/operations/performance_test.md +++ b/docs/fa/operations/performance_test.md @@ -1,18 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "\u0633\u062E\u062A \u0627\u0641\u0632\u0627\u0631 \u062A\u0633\u062A" --- -# How To Test Your Hardware With ClickHouse {#how-to-test-your-hardware-with-clickhouse} +# چگونه برای تست سخت افزار خود را با کلیک {#how-to-test-your-hardware-with-clickhouse} -With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. +با این آموزش شما می توانید اجرا پایه clickhouse آزمون عملکرد بر روی هر سرور بدون نصب و راه اندازی clickhouse بسته است. -1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master +1. برو به “commits” صفحه: https://github.com/ClickHouse/ClickHouse/commits/master -2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. +2. با کلیک بر روی اولین علامت چک سبز یا صلیب قرمز با سبز “ClickHouse Build Check” و با کلیک بر روی “Details” لینک نزدیک “ClickHouse Build Check”. -3. Copy the link to “clickhouse” binary for amd64 or aarch64. +3. رونوشت از پیوند به “clickhouse” دودویی برای amd64 یا aarch64. -4. ssh to the server and download it with wget: +4. به سرور بروید و با استفاده از ابزار دانلود کنید: @@ -23,7 +26,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -1. Download configs: +1. دانلود تنظیمات: @@ -33,7 +36,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -1. Download benchmark files: +1. دانلود فایل معیار: @@ -41,7 +44,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +1. دانلود داده های تست با توجه به [یاندکسمجموعه داده های متریکا](../getting_started/example_datasets/metrica.md) دستورالعمل (“hits” جدول حاوی 100 میلیون ردیف). @@ -49,31 +52,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -1. Run the server: +1. اجرای کارساز: ./clickhouse server -1. Check the data: ssh to the server in another terminal +1. داده ها را بررسی کنید: در ترمینال دیگر به سرور مراجعه کنید ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +1. ویرایش benchmark-new.sh تغییر “clickhouse-client” به “./clickhouse client” و اضافه کردن “–max\_memory\_usage 100000000000” پارامتر. mcedit benchmark-new.sh -1. Run the benchmark: +1. اجرای معیار: ./benchmark-new.sh hits_100m_obfuscated -1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +1. ارسال اعداد و اطلاعات در مورد پیکربندی سخت افزار خود را به clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark\_hardware.html +همه نتایج در اینجا منتشر شده: https://clickhouse.فناوری/ظروف محک.زنگام diff --git a/docs/fa/operations/quotas.md b/docs/fa/operations/quotas.md index 191f304c82b..7baed14e2ca 100644 --- a/docs/fa/operations/quotas.md +++ b/docs/fa/operations/quotas.md @@ -1,20 +1,23 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "\u0633\u0647\u0645\u06CC\u0647" --- -# Quotas {#quotas} +# سهمیه {#quotas} -Quotas allow you to limit resource usage over a period of time, or simply track the use of resources. -Quotas are set up in the user config. This is usually ‘users.xml’. +سهمیه به شما اجازه محدود کردن استفاده از منابع بیش از یک دوره از زمان, و یا به سادگی پیگیری استفاده از منابع. +سهمیه در پیکربندی کاربر راه اندازی. این است که معمولا ‘users.xml’. -The system also has a feature for limiting the complexity of a single query. See the section “Restrictions on query complexity”). +این سیستم همچنین دارای یک ویژگی برای محدود کردن پیچیدگی یک پرس و جو واحد. بخش را ببینید “Restrictions on query complexity”). -In contrast to query complexity restrictions, quotas: +در مقابل به پرس و جو محدودیت پیچیدگی, سهمیه: -- Place restrictions on a set of queries that can be run over a period of time, instead of limiting a single query. -- Account for resources spent on all remote servers for distributed query processing. +- محل محدودیت در مجموعه ای از نمایش داده شد که می تواند بیش از یک دوره از زمان اجرا, به جای محدود کردن یک پرس و جو. +- حساب برای منابع صرف شده در تمام سرور از راه دور برای پردازش پرس و جو توزیع شده است. -Let’s look at the section of the ‘users.xml’ file that defines quotas. +بیایید به بخش ‘users.xml’ فایل که سهمیه را تعریف می کند. ``` xml @@ -36,8 +39,8 @@ Let’s look at the section of the ‘users.xml’ file that defines quotas. ``` -By default, the quota just tracks resource consumption for each hour, without limiting usage. -The resource consumption calculated for each interval is output to the server log after each request. +به طور پیش فرض, سهمیه فقط ردیابی مصرف منابع برای هر ساعت, بدون محدود کردن استفاده. +مصرف منابع محاسبه شده برای هر فاصله خروجی به ورود به سیستم سرور بعد از هر درخواست است. ``` xml @@ -65,11 +68,11 @@ The resource consumption calculated for each interval is output to the server lo ``` -For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. +برای ‘statbox’ سهمیه, محدودیت برای هر ساعت و برای هر مجموعه 24 ساعت ها (86,400 ثانیه). فاصله زمانی شمارش شروع از یک لحظه ثابت پیاده سازی تعریف شده در زمان. به عبارت دیگر فاصله 24 ساعته لزوما در نیمه شب شروع نمی شود. -When the interval ends, all collected values are cleared. For the next hour, the quota calculation starts over. +هنگامی که فاصله به پایان می رسد تمام مقادیر جمع شده پاک می شوند. برای ساعت بعد محاسبه سهمیه بیش از شروع می شود. -Here are the amounts that can be restricted: +در اینجا مقدار است که می تواند محدود می شود: `queries` – The total number of requests. @@ -81,9 +84,9 @@ Here are the amounts that can be restricted: `execution_time` – The total query execution time, in seconds (wall time). -If the limit is exceeded for at least one time interval, an exception is thrown with a text about which restriction was exceeded, for which interval, and when the new interval begins (when queries can be sent again). +اگر حد برای حداقل یک فاصله زمانی بیش از, یک استثنا با یک متن که در مورد محدودیت بیش از حد شد پرتاب, که فاصله, و هنگامی که فاصله جدید شروع می شود (هنگامی که نمایش داده شد را می توان دوباره ارسال). -Quotas can use the “quota key” feature in order to report on resources for multiple keys independently. Here is an example of this: +سهمیه می توانید استفاده کنید “quota key” ویژگی به منظور گزارش منابع برای کلید های متعدد به طور مستقل. در اینجا یک مثال از این است: ``` xml @@ -100,10 +103,10 @@ Quotas can use the “quota key” feature in order to report on resources for m ``` -The quota is assigned to users in the ‘users’ section of the config. See the section “Access rights”. +سهمیه به کاربران در اختصاص داده ‘users’ بخش پیکربندی. بخش را ببینید “Access rights”. -For distributed query processing, the accumulated amounts are stored on the requestor server. So if the user goes to another server, the quota there will “start over”. +برای پردازش پرس و جو توزیع, مقدار انباشته شده بر روی سرور درخواست ذخیره می شود. بنابراین اگر کاربر می رود به سرور دیگر, سهمیه وجود خواهد داشت “start over”. -When the server is restarted, quotas are reset. +هنگامی که سرور دوباره راه اندازی شده است, سهمیه تنظیم مجدد. -[Original article](https://clickhouse.tech/docs/en/operations/quotas/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/fa/operations/requirements.md b/docs/fa/operations/requirements.md index 9dd5553a241..6050649de90 100644 --- a/docs/fa/operations/requirements.md +++ b/docs/fa/operations/requirements.md @@ -1,58 +1,61 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u0627\u0644\u0632\u0627\u0645\u0627\u062A" --- -# Requirements {#requirements} +# الزامات {#requirements} ## CPU {#cpu} -For installation from prebuilt deb packages, use a CPU with x86\_64 architecture and support for SSE 4.2 instructions. To run ClickHouse with processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should build ClickHouse from sources. +برای نصب و راه اندازی از بسته های پیش ساخته دب, استفاده از یک پردازنده با معماری ایکس86\_64 و پشتیبانی برای سوس 4.2 دستورالعمل. برای اجرای clickhouse با پردازنده های که پشتیبانی نمی کند sse 4.2 یا aarch64 یا powerpc64le معماری شما باید ساخت clickhouse از منابع. -ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz. +تاتر پیاده سازی پردازش داده های موازی و با استفاده از تمام منابع سخت افزاری در دسترس. در هنگام انتخاب یک پردازنده, را به حساب که فاحشه خانه کار می کند موثر تر در تنظیمات با تعداد زیادی از هسته اما نرخ ساعت پایین تر از در تنظیمات با هسته کمتر و نرخ ساعت بالاتر. مثلا, 16 هسته با 2600 مگاهرتز بهتر از است 8 هسته با 3600 مگاهرتز. -Use of **Turbo Boost** and **hyper-threading** technologies is recommended. It significantly improves performance with a typical load. +استفاده از **افزایش توربو** و **بیش از حد نخ** تکنولوژی توصیه می شود. این به طور قابل توجهی عملکرد را با یک بار معمولی بهبود می بخشد. ## RAM {#ram} -We recommend to use a minimum of 4GB of RAM in order to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries. +ما توصیه می کنیم به استفاده از حداقل 4 گیگابایت رم به منظور انجام نمایش داده شد غیر بدیهی. سرور کلیک می توانید با مقدار بسیار کوچکتر از رم اجرا, اما نیاز به حافظه برای پردازش نمایش داده شد. -The required volume of RAM depends on: +حجم مورد نیاز رم بستگی دارد: -- The complexity of queries. -- The amount of data that is processed in queries. +- پیچیدگی نمایش داده شد. +- مقدار داده هایی که در نمایش داده شد پردازش شده است. -To calculate the required volume of RAM, you should estimate the size of temporary data for [GROUP BY](../query_language/select.md#select-group-by-clause), [DISTINCT](../query_language/select.md#select-distinct), [JOIN](../query_language/select.md#select-join) and other operations you use. +برای محاسبه حجم مورد نیاز رم, شما باید اندازه داده های موقت برای تخمین [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) و عملیات دیگر استفاده می کنید. -ClickHouse can use external memory for temporary data. See [GROUP BY in External Memory](../query_language/select.md#select-group-by-in-external-memory) for details. +تاتر می توانید حافظه خارجی برای داده های موقت استفاده. ببینید [گروه در حافظه خارجی](../sql_reference/statements/select.md#select-group-by-in-external-memory) برای اطلاعات بیشتر. -## Swap File {#swap-file} +## تعویض پرونده {#swap-file} -Disable the swap file for production environments. +غیر فعال کردن فایل مبادله برای محیط های تولید. -## Storage Subsystem {#storage-subsystem} +## زیرسیستم ذخیره سازی {#storage-subsystem} -You need to have 2GB of free disk space to install ClickHouse. +شما باید 2 گیگابایت فضای دیسک رایگان برای نصب کلیک کنید. -The volume of storage required for your data should be calculated separately. Assessment should include: +حجم ذخیره سازی مورد نیاز برای داده های خود را باید به طور جداگانه محاسبه می شود. ارزیابی باید شامل موارد زیر باشد: -- Estimation of the data volume. +- تخمین حجم داده ها. - You can take a sample of the data and get the average size of a row from it. Then multiply the value by the number of rows you plan to store. + شما می توانید یک نمونه از داده ها و اندازه متوسط یک ردیف از. سپس مقدار ضرب شده توسط تعدادی از ردیف شما برنامه ای برای ذخیره. -- The data compression coefficient. +- ضریب فشرده سازی داده ها. - To estimate the data compression coefficient, load a sample of your data into ClickHouse and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times. + برای تخمین ضریب فشرده سازی داده ها, بار یک نمونه از داده های خود را به خانه رعیتی و مقایسه اندازه واقعی از داده ها با اندازه جدول ذخیره می شود. مثلا, داده های کلیک استریم است که معمولا توسط فشرده 6-10 بار. -To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas. +برای محاسبه حجم نهایی داده ها ذخیره می شود, اعمال ضریب فشرده سازی به حجم داده های تخمین زده شده. اگر شما قصد دارید برای ذخیره داده ها در چند کپی, سپس ضرب حجم تخمین زده شده توسط تعدادی از کپی. -## Network {#network} +## شبکه {#network} -If possible, use networks of 10G or higher class. +در صورت امکان از شبکه های 10 گرم یا کلاس بالاتر استفاده کنید. -The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. In addition, network speed affects replication processes. +پهنای باند شبکه برای پردازش نمایش داده شد توزیع با مقدار زیادی از داده های متوسط بسیار مهم است. علاوه بر این, سرعت شبکه را تحت تاثیر قرار فرایندهای تکرار. -## Software {#software} +## نرم افزار {#software} -ClickHouse is developed for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system. +کلیک هاوس برای خانواده لینوکس سیستم عامل توسعه یافته است. توزیع لینوکس توصیه شده اوبونتو است. این `tzdata` بسته باید در سیستم نصب شود. -ClickHouse can also work in other operating system families. See details in the [Getting started](../getting_started/index.md) section of the documentation. +تاتر همچنین می توانید در دیگر خانواده سیستم عامل کار. مشاهده اطلاعات در [شروع کار](../getting_started/index.md) بخش از اسناد و مدارک. diff --git a/docs/fa/operations/server_configuration_parameters/index.md b/docs/fa/operations/server_configuration_parameters/index.md new file mode 100644 index 00000000000..308eb9cc8b3 --- /dev/null +++ b/docs/fa/operations/server_configuration_parameters/index.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Server Configuration Parameters +toc_priority: 54 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# پارامترهای پیکربندی سرور {#server-settings} + +این بخش شامل شرح تنظیمات سرور است که نمی تواند در سطح جلسه یا پرس و جو تغییر کند. + +این تنظیمات در ذخیره می شود `config.xml` فایل بر روی سرور کلیک. + +تنظیمات دیگر در توصیف “[تنظیمات](../settings/index.md#settings)” بخش. + +قبل از مطالعه تنظیمات, خواندن [پروندههای پیکربندی](../configuration_files.md#configuration_files) بخش و توجه داشته باشید استفاده از تعویض ( `incl` و `optional` صفات). + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/fa/operations/server_configuration_parameters/settings.md b/docs/fa/operations/server_configuration_parameters/settings.md new file mode 100644 index 00000000000..a99799b59b9 --- /dev/null +++ b/docs/fa/operations/server_configuration_parameters/settings.md @@ -0,0 +1,873 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u062A\u0646\u0638\u06CC\u0645\u0627\u062A \u06A9\u0627\u0631\u06AF\u0632\ + \u0627\u0631" +--- + +# تنظیمات کارگزار {#server-settings} + +## ساختن و احراز هویت اکانتهای دستگاه {#builtin-dictionaries-reload-interval} + +فاصله در ثانیه قبل از بارگذاری ساخته شده است در لغت نامه. + +مخزن بارگذاری مجدد ساخته شده است در لغت نامه در هر ثانیه ایکس. این امکان ویرایش واژهنامهها را فراهم میکند “on the fly” بدون راه اندازی مجدد سرور. + +مقدار پیش فرض: 3600. + +**مثال** + +``` xml +3600 +``` + +## فشردهسازی {#server-settings-compression} + +تنظیمات فشرده سازی داده ها برای [ادغام](../../engines/table_engines/mergetree_family/mergetree.md)- جدول موتور . + +!!! warning "اخطار" + اگر شما فقط شروع به استفاده از خانه کلیک استفاده نکنید. + +قالب پیکربندی: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` زمینه: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` یا `zstd`. + +شما می توانید چند پیکربندی کنید `` بخش. + +اقدامات زمانی که شرایط ملاقات می شوند: + +- اگر بخشی از داده ها منطبق یک مجموعه شرایط, تاتر با استفاده از روش فشرده سازی مشخص. +- اگر یک بخش داده منطبق مجموعه شرایط متعدد, خانه رعیتی با استفاده از اولین مجموعه شرایط همسان. + +اگر هیچ شرایطی برای یک بخش داده ملاقات, خانه عروسکی با استفاده از `lz4` فشردهسازی. + +**مثال** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## & تنظیمات {#default-database} + +پایگاه داده به طور پیش فرض. + +برای دریافت یک لیست از پایگاه داده, استفاده از [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) پرس و جو. + +**مثال** + +``` xml +default +``` + +## قصور {#default-profile} + +تنظیمات پیش فرض مشخصات. + +پروفایل های تنظیمات در فایل مشخص شده در پارامتر واقع شده است `user_config`. + +**مثال** + +``` xml +default +``` + +## دیکشنامهای {#server_configuration_parameters-dictionaries_config} + +مسیر به فایل پیکربندی برای لغت نامه های خارجی. + +مسیر: + +- مشخص کردن مسیر مطلق و یا مسیر نسبت به فایل پیکربندی سرور. +- مسیر می تواند حاوی نویسه عام \* و?. + +همچنین نگاه کنید به “[واژهنامهها خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. + +**مثال** + +``` xml +*_dictionary.xml +``` + +## \_بارگیری کامل {#server_configuration_parameters-dictionaries_lazy_load} + +بارگذاری تنبل از لغت نامه. + +اگر `true` سپس هر فرهنگ لغت در اولین استفاده ایجاد می شود. اگر ایجاد فرهنگ لغت شکست خورده, تابع بود که با استفاده از فرهنگ لغت می اندازد یک استثنا. + +اگر `false`, تمام لغت نامه ها ایجاد می شوند زمانی که سرور شروع می شود, و اگر یک خطا وجود دارد, سرور خاموش. + +به طور پیش فرض است `true`. + +**مثال** + +``` xml +true +``` + +## قالب\_شکلمات شیمی {#server_configuration_parameters-format_schema_path} + +مسیر به دایرکتوری با طرح برای داده های ورودی, مانند طرحواره برای [کاپپروتو](../../interfaces/formats.md#capnproto) قالب. + +**مثال** + +``` xml + + format_schemas/ +``` + +## گرافیت {#server_configuration_parameters-graphite} + +ارسال داده به [گرافیت](https://github.com/graphite-project). + +تنظیمات: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [سیستم.متریک](../../operations/system_tables.md#system_tables-metrics) جدول +- events – Sending deltas data accumulated for the time period from the [سیستم.رویدادها](../../operations/system_tables.md#system_tables-events) جدول +- events\_cumulative – Sending cumulative data from the [سیستم.رویدادها](../../operations/system_tables.md#system_tables-events) جدول +- asynchronous\_metrics – Sending data from the [سیستم.\_نامهنویسی ناهمزمان](../../operations/system_tables.md#system_tables-asynchronous_metrics) جدول + +شما می توانید چند پیکربندی کنید `` بند. برای مثال شما می توانید از این برای ارسال داده های مختلف در فواصل مختلف استفاده کنید. + +**مثال** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## لغزش \_ نمودار {#server_configuration_parameters-graphite-rollup} + +تنظیمات برای نازک شدن داده ها برای گرافیت. + +برای اطلاعات بیشتر, دیدن [نمودار](../../engines/table_engines/mergetree_family/graphitemergetree.md). + +**مثال** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## \_وارد کردن/پشتیبانی {#http-porthttps-port} + +درگاه برای اتصال به کارساز بالای صفحه) ها (. + +اگر `https_port` مشخص شده است, [openSSL](#server_configuration_parameters-openssl) باید پیکربندی شود. + +اگر `http_port` مشخص شده است, پیکربندی اپنسسل نادیده گرفته شده است حتی اگر قرار است. + +**مثال** + +``` xml +0000 +``` + +## نقلقولهای جدید از این نویسنده {#server_configuration_parameters-http_server_default_response} + +صفحه ای که به طور پیش فرض نشان داده شده است زمانی که شما دسترسی به سرور قام کلیک. +مقدار پیش فرض است “Ok.” (با خوراک خط در پایان) + +**مثال** + +باز می شود `https://tabix.io/` هنگام دسترسی `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## include\_from {#server_configuration_parameters-include_from} + +مسیر به فایل با تعویض. + +برای کسب اطلاعات بیشتر به بخش مراجعه کنید “[پروندههای پیکربندی](../configuration_files.md#configuration_files)”. + +**مثال** + +``` xml +/etc/metrica.xml +``` + +## \_صادر کردن {#interserver-http-port} + +پورت برای تبادل اطلاعات بین سرور های فاحشه خانه. + +**مثال** + +``` xml +9009 +``` + +## حذف جستجو {#interserver-http-host} + +نام میزبان است که می تواند توسط سرور های دیگر برای دسترسی به این سرور استفاده می شود. + +اگر حذف, این است که در همان راه به عنوان تعریف `hostname-f` فرمان. + +مفید برای شکستن دور از یک رابط شبکه خاص. + +**مثال** + +``` xml +example.yandex.ru +``` + +## پتانسیلهای متقابل {#server-settings-interserver-http-credentials} + +نام کاربری و رمز عبور مورد استفاده برای تصدیق در طول [تکرار](../../engines/table_engines/mergetree_family/replication.md) با تکرار \* موتورهای. این اعتبار تنها برای ارتباط بین کپی استفاده می شود و ربطی به اعتبار برای مشتریان خانه عروسکی هستند. سرور چک کردن این اعتبار برای اتصال کپی و استفاده از اعتبار همان هنگام اتصال به دیگر کپی. بنابراین, این اعتبار باید همین کار را برای همه کپی در یک خوشه مجموعه. +به طور پیش فرض احراز هویت استفاده نمی شود. + +این بخش شامل پارامترهای زیر است: + +- `user` — username. +- `password` — password. + +**مثال** + +``` xml + + admin + 222 + +``` + +## حفاظت از حریم خصوصی {#keep-alive-timeout} + +تعداد ثانیه که تاتر منتظر درخواست های دریافتی قبل از بستن اتصال. به طور پیش فرض به 3 ثانیه. + +**مثال** + +``` xml +3 +``` + +## \_نوست فهرست {#server_configuration_parameters-listen_host} + +محدودیت در میزبان که درخواست می توانید از. اگر می خواهید سرور برای پاسخ به همه انها مشخص شود `::`. + +مثالها: + +``` xml +::1 +127.0.0.1 +``` + +## چوبگر {#server_configuration_parameters-logger} + +تنظیمات ورود به سیستم. + +کلید: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`و`errorlog`. هنگامی که فایل می رسد `size`, بایگانی کلیک هوس و تغییر نام, و ایجاد یک فایل ورود به سیستم جدید را در خود جای. +- count – The number of archived log files that ClickHouse stores. + +**مثال** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +نوشتن به وبلاگ نیز پشتیبانی می کند. پیکربندی مثال: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +کلید: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [کلمه کلیدی تسهیلات سیسلوگ](https://en.wikipedia.org/wiki/Syslog#Facility) در حروف بزرگ با “LOG\_” پیشوند: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, و به همین ترتیب). + مقدار پیشفرض: `LOG_USER` اگر `address` مشخص شده است, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` و `syslog.` + +## & کلاندارها {#macros} + +تعویض پارامتر برای جداول تکرار. + +می توان حذف اگر جداول تکرار استفاده نمی شود. + +برای کسب اطلاعات بیشتر به بخش مراجعه کنید “[ایجاد جداول تکرار شده](../../engines/table_engines/mergetree_family/replication.md)”. + +**مثال** + +``` xml + +``` + +## نشاندار کردن \_چ\_سیز {#server-mark-cache-size} + +اندازه تقریبی (به بایت) کش علامت های استفاده شده توسط موتورهای جدول [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) خانواده + +کش برای سرور به اشتراک گذاشته و حافظه به عنوان مورد نیاز اختصاص داده است. اندازه کش باید حداقل 5368709120 باشد. + +**مثال** + +``` xml +5368709120 +``` + +## م\_قیاس تصویر {#max-concurrent-queries} + +حداکثر تعداد درخواست به طور همزمان پردازش. + +**مثال** + +``` xml +100 +``` + +## \_تنامههای بیشینه {#max-connections} + +حداکثر تعداد اتصالات ورودی. + +**مثال** + +``` xml +4096 +``` + +## \_موضوعات بیشینه {#max-open-files} + +حداکثر تعداد فایل های باز. + +به طور پیش فرض: `maximum`. + +ما توصیه می کنیم با استفاده از این گزینه در سیستم عامل مک ایکس از `getrlimit()` تابع یک مقدار نادرست می گرداند. + +**مثال** + +``` xml +262144 +``` + +## حداکثر\_طب\_ضز\_توقف {#max-table-size-to-drop} + +محدودیت در حذف جداول. + +اگر اندازه یک [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) جدول بیش از `max_table_size_to_drop` با استفاده از پرس و جو قطره نمی توانید حذف کنید. + +اگر شما هنوز هم نیاز به حذف جدول بدون راه اندازی مجدد سرور کلیک, ایجاد `/flags/force_drop_table` فایل و اجرای پرس و جو قطره. + +مقدار پیش فرض: 50 گیگابایت. + +ارزش 0 بدان معنی است که شما می توانید تمام جداول بدون هیچ گونه محدودیت حذف. + +**مثال** + +``` xml +0 +``` + +## ادغام {#server_configuration_parameters-merge_tree} + +تنظیم زیبا برای جداول در [ادغام](../../engines/table_engines/mergetree_family/mergetree.md). + +برای کسب اطلاعات بیشتر, دیدن ادغام.فایل هدر ساعت. + +**مثال** + +``` xml + + 5 + +``` + +## openSSL {#server_configuration_parameters-openssl} + +SSL client/server configuration. + +پشتیبانی از اس اس ال توسط `libpoco` کتابخونه. رابط در فایل شرح داده شده است [سوسمنگر.ه](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +کلید برای تنظیمات سرور / مشتری: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` شامل گواهی. +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [متن](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) کلاس. مقادیر ممکن: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. مقادیر قابل قبول: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. این پارامتر همیشه توصیه می شود از این کمک می کند تا جلوگیری از مشکلات هر دو اگر سرور حافظه پنهان جلسه و اگر مشتری درخواست ذخیره. مقدار پیشفرض: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**مثال تنظیمات:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## \_خروج {#server_configuration_parameters-part-log} + +وقایع ورود به سیستم که با مرتبط [ادغام](../../engines/table_engines/mergetree_family/mergetree.md). برای مثال, اضافه کردن یا ادغام داده ها. شما می توانید ورود به سیستم برای شبیه سازی الگوریتم های ادغام و مقایسه ویژگی های خود استفاده کنید. شما می توانید روند ادغام تجسم. + +نمایش داده شد در سیستم وارد [سیستم.\_خروج](../../operations/system_tables.md#system_tables-part-log) جدول, نه در یک فایل جداگانه. شما می توانید نام این جدول را در پیکربندی `table` پارامتر (پایین را ببینید). + +از پارامترهای زیر برای پیکربندی ورود استفاده کنید: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [کلید پارتیشن بندی سفارشی](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**مثال** + +``` xml + + system + part_log
    + toMonday(event_date) + 7500 +
    +``` + +## مسیر {#server_configuration_parameters-path} + +مسیر به دایرکتوری حاوی داده. + +!!! note "یادداشت" + اسلش الزامی است. + +**مثال** + +``` xml +/var/lib/clickhouse/ +``` + +## \_خروج {#server_configuration_parameters-query-log} + +تنظیم برای ورود به سیستم نمایش داده شد با دریافت [\_ترکیب = 1](../settings/settings.md) تنظیمات. + +نمایش داده شد در سیستم وارد [سیستم.\_خروج](../../operations/system_tables.md#system_tables-query_log) جدول, نه در یک فایل جداگانه. شما می توانید نام جدول را در `table` پارامتر (پایین را ببینید). + +از پارامترهای زیر برای پیکربندی ورود استفاده کنید: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [کلید پارتیشن بندی سفارشی](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) برای یک جدول. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +اگه جدول وجود نداشته باشه. اگر ساختار ورود به سیستم پرس و جو تغییر زمانی که سرور فاحشه خانه به روز شد, جدول با ساختار قدیمی تغییر نام داد, و یک جدول جدید به طور خودکار ایجاد شده است. + +**مثال** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## \_ر\_خروج {#server_configuration_parameters-query-thread-log} + +تنظیم برای ورود به سیستم موضوعات نمایش داده شد دریافت شده با [& پایین: 1](../settings/settings.md#settings-log-query-threads) تنظیمات. + +نمایش داده شد در سیستم وارد [سیستم.\_ر\_خروج](../../operations/system_tables.md#system_tables-query-thread-log) جدول, نه در یک فایل جداگانه. شما می توانید نام جدول را در `table` پارامتر (پایین را ببینید). + +از پارامترهای زیر برای پیکربندی ورود استفاده کنید: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [کلید پارتیشن بندی سفارشی](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) برای یک جدول سیستم. +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +اگه جدول وجود نداشته باشه. اگر ساختار پرس و جو موضوع ورود به سیستم تغییر زمانی که سرور فاحشه خانه به روز شد, جدول با ساختار قدیمی تغییر نام داد, و یک جدول جدید به طور خودکار ایجاد شده است. + +**مثال** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## \_قطع {#server_configuration_parameters-trace_log} + +تنظیمات برای [\_قطع](../../operations/system_tables.md#system_tables-trace_log) عملیات جدول سیستم. + +پارامترها: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [کلید پارتیشن بندی سفارشی](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) برای یک جدول سیستم. +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +فایل پیکربندی پیش فرض سرور `config.xml` شامل بخش تنظیمات زیر است: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## \_منبع {#query-masking-rules} + +قوانین مبتنی بر عبارت منظم, خواهد شد که به نمایش داده شد و همچنین تمام پیام های ورود به سیستم قبل از ذخیره سازی در سیاهههای مربوط به سرور اعمال, +`system.query_log`, `system.text_log`, `system.processes` جدول, و در سیاهههای مربوط به مشتری ارسال. که اجازه می دهد تا جلوگیری از +نشت اطلاعات حساس از پرس و جو گذاشتن (مانند نام, ایمیل, شخصی +شناسه و یا شماره کارت اعتباری) به سیاهههای مربوط. + +**مثال** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +زمینه پیکربندی: +- `name` - نام قانون (اختیاری) +- `regexp` - تکرار 2 عبارت منظم سازگار (اجباری) +- `replace` - رشته جایگزینی برای داده های حساس (اختیاری به طور پیش فرض-شش ستاره) + +قوانین پوشش به کل پرس و جو اعمال می شود (برای جلوگیری از نشت اطلاعات حساس از نمایش داده شد ناقص / غیر تجزیه). + +`system.events` جدول شمارنده `QueryMaskingRulesMatch` که تعداد کلی از پرس و جو پوشش قوانین مسابقات. + +برای نمایش داده شد توزیع هر سرور باید به طور جداگانه پیکربندی شود, در غیر این صورت, فرعی به دیگر منتقل +گره ها بدون پوشش ذخیره می شوند. + +## دور دور {#server-settings-remote-servers} + +پیکربندی خوشه های مورد استفاده توسط [توزیع شده](../../engines/table_engines/special/distributed.md) موتور جدول و توسط `cluster` تابع جدول. + +**مثال** + +``` xml + +``` + +برای ارزش `incl` ویژگی, بخش را ببینید “[پروندههای پیکربندی](../configuration_files.md#configuration_files)”. + +**همچنین نگاه کنید** + +- [در حال بارگذاری](../settings/settings.md#settings-skip_unavailable_shards) + +## منطقهی زمانی {#server_configuration_parameters-timezone} + +منطقه زمانی سرور. + +مشخص شده به عنوان شناساگر ایانا برای منطقه زمانی یو تی سی یا موقعیت جغرافیایی (مثلا افریقا / ابیجان). + +منطقه زمانی برای تبدیل بین فرمت های رشته و تاریخ ساعت لازم است که زمینه های تاریخ ساعت خروجی به فرمت متن (چاپ شده بر روی صفحه نمایش و یا در یک فایل), و هنگامی که گرفتن تاریخ ساعت از یک رشته. علاوه بر این, منطقه زمانی در توابع است که با زمان و تاریخ کار می کنند در صورتی که منطقه زمانی در پارامترهای ورودی دریافت نمی استفاده. + +**مثال** + +``` xml +Europe/Moscow +``` + +## \_صادر کردن {#server_configuration_parameters-tcp_port} + +پورت برای برقراری ارتباط با مشتریان بیش از پروتکل تی سی پی. + +**مثال** + +``` xml +9000 +``` + +## \_شروع مجدد {#server_configuration_parameters-tcp_port-secure} + +پورت تی سی پی برای برقراری ارتباط امن با مشتریان. با استفاده از [OpenSSL](#server_configuration_parameters-openssl) تنظیمات. + +**مقادیر ممکن** + +عدد صحیح مثبت. + +**مقدار پیشفرض** + +``` xml +9440 +``` + +## \_وارد کردن {#server_configuration_parameters-mysql_port} + +پورت برای برقراری ارتباط با مشتریان بیش از پروتکل خروجی زیر. + +**مقادیر ممکن** + +عدد صحیح مثبت. + +مثال + +``` xml +9004 +``` + +## \_مخفی کردن {#server-settings-tmp_path} + +مسیر به داده های موقت برای پردازش نمایش داده شد بزرگ است. + +!!! note "یادداشت" + اسلش الزامی است. + +**مثال** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## پیدا کردن موقعیت جغرافیایی از روی شبکه {#server-settings-tmp-policy} + +سیاست از [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) برای ذخیره فایل های موقت. +اگر تنظیم نشود [`tmp_path`](#server-settings-tmp_path) استفاده شده است, در غیر این صورت نادیده گرفته شده است. + +!!! note "یادداشت" + - `move_factor` نادیده گرفته شده است +- `keep_free_space_bytes` نادیده گرفته شده است +- `max_data_part_size_bytes` نادیده گرفته شده است +- شما باید دقیقا یک جلد در این سیاست داشته باشید + +## \_بالا {#server-settings-uncompressed_cache_size} + +اندازه کش (به بایت) برای داده های غیر فشرده استفاده شده توسط موتورهای جدول از [ادغام](../../engines/table_engines/mergetree_family/mergetree.md). + +یک کش مشترک برای سرور وجود دارد. حافظه در تقاضا اختصاص داده. کش در صورتی که گزینه استفاده می شود [همترازی پایین](../settings/settings.md#setting-use_uncompressed_cache) فعال است. + +کش غیر فشرده سودمند برای نمایش داده شد بسیار کوتاه در موارد فردی است. + +**مثال** + +``` xml +8589934592 +``` + +## \_مخفی کردن \_صفحه {#server_configuration_parameters-user_files_path} + +دایرکتوری با فایل های کاربر. مورد استفاده در تابع جدول [پرونده()](../../sql_reference/table_functions/file.md). + +**مثال** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## \_تنفورد {#users-config} + +مسیر پروندهی شامل: + +- تنظیمات کاربر. +- حقوق دسترسی. +- پروفایل تنظیمات. +- تنظیمات سهمیه. + +**مثال** + +``` xml +users.xml +``` + +## باغ وحش {#server-settings_zookeeper} + +شامل تنظیماتی است که اجازه می دهد تا کلیک برای ارتباط برقرار کردن با یک [باغ وحش](http://zookeeper.apache.org/) خوشه خوشه. + +کلیک هاوس با استفاده از باغ وحش برای ذخیره سازی ابرداده از کپی در هنگام استفاده از جداول تکرار. اگر جداول تکرار استفاده نمی شود, این بخش از پارامترها را می توان حذف. + +این بخش شامل پارامترهای زیر است: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + به عنوان مثال: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [حالت](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) استفاده شده است که به عنوان ریشه برای znodes استفاده شده توسط ClickHouse سرور. اختیاری. +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**پیکربندی نمونه** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**همچنین نگاه کنید** + +- [تکرار](../../engines/table_engines/mergetree_family/replication.md) +- [راهنمای برنامه نویس باغ وحش](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## سرویس پرداخت درونبرنامهای پلی {#server-settings-use_minimalistic_part_header_in_zookeeper} + +روش ذخیره سازی برای هدر بخش داده ها در باغ وحش. + +این تنظیم فقط در مورد `MergeTree` خانواده این را می توان مشخص کرد: + +- در سطح جهانی در [ادغام](#server_configuration_parameters-merge_tree) بخش از `config.xml` پرونده. + + تاتر با استفاده از تنظیمات برای تمام جداول بر روی سرور. شما می توانید تنظیمات را در هر زمان تغییر دهید. جداول موجود رفتار خود را تغییر دهید زمانی که تنظیمات تغییر می کند. + +- برای هر جدول. + + هنگام ایجاد یک جدول مربوطه را مشخص کنید [تنظیم موتور](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). رفتار یک جدول موجود با این تنظیم تغییر نمی کند, حتی اگر تغییرات تنظیم جهانی. + +**مقادیر ممکن** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +اگر `use_minimalistic_part_header_in_zookeeper = 1` پس [تکرار](../../engines/table_engines/mergetree_family/replication.md) جداول هدر قطعات داده را با استفاده از یک واحد ذخیره می کنند `znode`. اگر جدول شامل بسیاری از ستون, این روش ذخیره سازی به طور قابل توجهی کاهش می دهد حجم داده های ذخیره شده در باغ وحش. + +!!! attention "توجه" + پس از استفاده از `use_minimalistic_part_header_in_zookeeper = 1` شما نمیتوانید سرور کلیک را به نسخه ای که از این تنظیم پشتیبانی نمی کند ارتقا دهید. مراقب باشید در هنگام به روز رسانی تاتر بر روی سرور در یک خوشه. همه سرورها را در یک زمان ارتقا ندهید. این امن تر است برای تست نسخه های جدید از خانه رعیتی در یک محیط تست, و یا فقط در چند سرور از یک خوشه. + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**مقدار پیشفرض:** 0. + +## نمایش سایت {#server-settings-disable-internal-dns-cache} + +غیر فعال کش دی ان اس داخلی. توصیه شده برای کارخانه کلیک در سیستم +با زیرساخت های اغلب در حال تغییر مانند کوبرنتس. + +**مقدار پیشفرض:** 0. + +## پیدا کردن موقعیت جغرافیایی از روی شبکه {#server-settings-dns-cache-update-period} + +دوره به روز رسانی نشانی های اینترنتی ذخیره شده در کش دی ان اس داخلی خانه (در ثانیه). +به روز رسانی همزمان انجام, در یک موضوع سیستم جداگانه. + +**مقدار پیشفرض**: 15. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/fa/operations/server_settings/index.md b/docs/fa/operations/server_settings/index.md deleted file mode 100644 index 0dc8e5cbf22..00000000000 --- a/docs/fa/operations/server_settings/index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -en_copy: true ---- - -# Server configuration parameters {#server-settings} - -This section contains descriptions of server settings that cannot be changed at the session or query level. - -These settings are stored in the `config.xml` file on the ClickHouse server. - -Other settings are described in the “[Settings](../settings/index.md#settings)” section. - -Before studying the settings, read the [Configuration files](../configuration_files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes). - -[Original article](https://clickhouse.tech/docs/en/operations/server_settings/) diff --git a/docs/fa/operations/server_settings/settings.md b/docs/fa/operations/server_settings/settings.md deleted file mode 100644 index bfc1aca7217..00000000000 --- a/docs/fa/operations/server_settings/settings.md +++ /dev/null @@ -1,869 +0,0 @@ ---- -en_copy: true ---- - -# Server Settings {#server-settings} - -## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} - -The interval in seconds before reloading built-in dictionaries. - -ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries “on the fly” without restarting the server. - -Default value: 3600. - -**Example** - -``` xml -3600 -``` - -## compression {#server-settings-compression} - -Data compression settings for [MergeTree](../table_engines/mergetree.md)-engine tables. - -!!! warning "Warning" - Don’t use it if you have just started using ClickHouse. - -Configuration template: - -``` xml - - - ... - ... - ... - - ... - -``` - -`` fields: - -- `min_part_size` – The minimum size of a data part. -- `min_part_size_ratio` – The ratio of the data part size to the table size. -- `method` – Compression method. Acceptable values: `lz4` or `zstd`. - -You can configure multiple `` sections. - -Actions when conditions are met: - -- If a data part matches a condition set, ClickHouse uses the specified compression method. -- If a data part matches multiple condition sets, ClickHouse uses the first matched condition set. - -If no conditions met for a data part, ClickHouse uses the `lz4` compression. - -**Example** - -``` xml - - - 10000000000 - 0.01 - zstd - - -``` - -## default\_database {#default-database} - -The default database. - -To get a list of databases, use the [SHOW DATABASES](../../query_language/show.md#show-databases) query. - -**Example** - -``` xml -default -``` - -## default\_profile {#default-profile} - -Default settings profile. - -Settings profiles are located in the file specified in the parameter `user_config`. - -**Example** - -``` xml -default -``` - -## dictionaries\_config {#server_settings-dictionaries_config} - -The path to the config file for external dictionaries. - -Path: - -- Specify the absolute path or the path relative to the server config file. -- The path can contain wildcards \* and ?. - -See also “[External dictionaries](../../query_language/dicts/external_dicts.md)”. - -**Example** - -``` xml -*_dictionary.xml -``` - -## dictionaries\_lazy\_load {#server_settings-dictionaries_lazy_load} - -Lazy loading of dictionaries. - -If `true`, then each dictionary is created on first use. If dictionary creation failed, the function that was using the dictionary throws an exception. - -If `false`, all dictionaries are created when the server starts, and if there is an error, the server shuts down. - -The default is `true`. - -**Example** - -``` xml -true -``` - -## format\_schema\_path {#server_settings-format_schema_path} - -The path to the directory with the schemes for the input data, such as schemas for the [CapnProto](../../interfaces/formats.md#capnproto) format. - -**Example** - -``` xml - - format_schemas/ -``` - -## graphite {#server_settings-graphite} - -Sending data to [Graphite](https://github.com/graphite-project). - -Settings: - -- host – The Graphite server. -- port – The port on the Graphite server. -- interval – The interval for sending, in seconds. -- timeout – The timeout for sending data, in seconds. -- root\_path – Prefix for keys. -- metrics – Sending data from the [system.metrics](../system_tables.md#system_tables-metrics) table. -- events – Sending deltas data accumulated for the time period from the [system.events](../system_tables.md#system_tables-events) table. -- events\_cumulative – Sending cumulative data from the [system.events](../system_tables.md#system_tables-events) table. -- asynchronous\_metrics – Sending data from the [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) table. - -You can configure multiple `` clauses. For instance, you can use this for sending different data at different intervals. - -**Example** - -``` xml - - localhost - 42000 - 0.1 - 60 - one_min - true - true - false - true - -``` - -## graphite\_rollup {#server_settings-graphite-rollup} - -Settings for thinning data for Graphite. - -For more details, see [GraphiteMergeTree](../table_engines/graphitemergetree.md). - -**Example** - -``` xml - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -## http\_port/https\_port {#http-porthttps-port} - -The port for connecting to the server over HTTP(s). - -If `https_port` is specified, [openSSL](#server_settings-openssl) must be configured. - -If `http_port` is specified, the OpenSSL configuration is ignored even if it is set. - -**Example** - -``` xml -0000 -``` - -## http\_server\_default\_response {#server_settings-http_server_default_response} - -The page that is shown by default when you access the ClickHouse HTTP(s) server. -The default value is “Ok.” (with a line feed at the end) - -**Example** - -Opens `https://tabix.io/` when accessing `http://localhost: http_port`. - -``` xml - -
    ]]> -
    -``` - -## include\_from {#server_settings-include_from} - -The path to the file with substitutions. - -For more information, see the section “[Configuration files](../configuration_files.md#configuration_files)”. - -**Example** - -``` xml -/etc/metrica.xml -``` - -## interserver\_http\_port {#interserver-http-port} - -Port for exchanging data between ClickHouse servers. - -**Example** - -``` xml -9009 -``` - -## interserver\_http\_host {#interserver-http-host} - -The hostname that can be used by other servers to access this server. - -If omitted, it is defined in the same way as the `hostname-f` command. - -Useful for breaking away from a specific network interface. - -**Example** - -``` xml -example.yandex.ru -``` - -## interserver\_http\_credentials {#server-settings-interserver-http-credentials} - -The username and password used to authenticate during [replication](../table_engines/replication.md) with the Replicated\* engines. These credentials are used only for communication between replicas and are unrelated to credentials for ClickHouse clients. The server is checking these credentials for connecting replicas and use the same credentials when connecting to other replicas. So, these credentials should be set the same for all replicas in a cluster. -By default, the authentication is not used. - -This section contains the following parameters: - -- `user` — username. -- `password` — password. - -**Example** - -``` xml - - admin - 222 - -``` - -## keep\_alive\_timeout {#keep-alive-timeout} - -The number of seconds that ClickHouse waits for incoming requests before closing the connection. Defaults to 3 seconds. - -**Example** - -``` xml -3 -``` - -## listen\_host {#server_settings-listen_host} - -Restriction on hosts that requests can come from. If you want the server to answer all of them, specify `::`. - -Examples: - -``` xml -::1 -127.0.0.1 -``` - -## logger {#server_settings-logger} - -Logging settings. - -Keys: - -- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. -- log – The log file. Contains all the entries according to `level`. -- errorlog – Error log file. -- size – Size of the file. Applies to `log`and`errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place. -- count – The number of archived log files that ClickHouse stores. - -**Example** - -``` xml - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - -``` - -Writing to the syslog is also supported. Config example: - -``` xml - - 1 - -
    syslog.remote:10514
    - myhost.local - LOG_LOCAL6 - syslog -
    -
    -``` - -Keys: - -- use\_syslog — Required setting if you want to write to the syslog. -- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. -- hostname — Optional. The name of the host that logs are sent from. -- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG\_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on). - Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON otherwise.` -- format – Message format. Possible values: `bsd` and `syslog.` - -## macros {#macros} - -Parameter substitutions for replicated tables. - -Can be omitted if replicated tables are not used. - -For more information, see the section “[Creating replicated tables](../../operations/table_engines/replication.md)”. - -**Example** - -``` xml - -``` - -## mark\_cache\_size {#server-mark-cache-size} - -Approximate size (in bytes) of the cache of marks used by table engines of the [MergeTree](../table_engines/mergetree.md) family. - -The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120. - -**Example** - -``` xml -5368709120 -``` - -## max\_concurrent\_queries {#max-concurrent-queries} - -The maximum number of simultaneously processed requests. - -**Example** - -``` xml -100 -``` - -## max\_connections {#max-connections} - -The maximum number of inbound connections. - -**Example** - -``` xml -4096 -``` - -## max\_open\_files {#max-open-files} - -The maximum number of open files. - -By default: `maximum`. - -We recommend using this option in Mac OS X since the `getrlimit()` function returns an incorrect value. - -**Example** - -``` xml -262144 -``` - -## max\_table\_size\_to\_drop {#max-table-size-to-drop} - -Restriction on deleting tables. - -If the size of a [MergeTree](../table_engines/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a DROP query. - -If you still need to delete the table without restarting the ClickHouse server, create the `/flags/force_drop_table` file and run the DROP query. - -Default value: 50 GB. - -The value 0 means that you can delete all tables without any restrictions. - -**Example** - -``` xml -0 -``` - -## merge\_tree {#server_settings-merge_tree} - -Fine tuning for tables in the [MergeTree](../table_engines/mergetree.md). - -For more information, see the MergeTreeSettings.h header file. - -**Example** - -``` xml - - 5 - -``` - -## openSSL {#server_settings-openssl} - -SSL client/server configuration. - -Support for SSL is provided by the `libpoco` library. The interface is described in the file [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) - -Keys for server/client settings: - -- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. -- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contains the certificate. -- caConfig – The path to the file or directory that contains trusted root certificates. -- verificationMode – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. -- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. -- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| -- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. -- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. -- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. -- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. -- sessionTimeout – Time for caching the session on the server. -- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. -- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. -- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. -- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . -- disableProtocols – Protocols that are not allowed to use. -- preferServerCiphers – Preferred server ciphers on the client. - -**Example of settings:** - -``` xml - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - -``` - -## part\_log {#server_settings-part-log} - -Logging events that are associated with [MergeTree](../table_engines/mergetree.md). For instance, adding or merging data. You can use the log to simulate merge algorithms and compare their characteristics. You can visualize the merge process. - -Queries are logged in the [system.part\_log](../system_tables.md#system_tables-part-log) table, not in a separate file. You can configure the name of this table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md). -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -**Example** - -``` xml - - system - part_log
    - toMonday(event_date) - 7500 -
    -``` - -## path {#server_settings-path} - -The path to the directory containing data. - -!!! note "Note" - The trailing slash is mandatory. - -**Example** - -``` xml -/var/lib/clickhouse/ -``` - -## query\_log {#server_settings-query-log} - -Setting for logging queries received with the [log\_queries=1](../settings/settings.md) setting. - -Queries are logged in the [system.query\_log](../system_tables.md#system_tables-query_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a table. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -If the table doesn’t exist, ClickHouse will create it. If the structure of the query log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. - -**Example** - -``` xml - - system - query_log
    - toMonday(event_date) - 7500 -
    -``` - -## query\_thread\_log {#server_settings-query-thread-log} - -Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting. - -Queries are logged in the [system.query\_thread\_log](../system_tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -If the table doesn’t exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. - -**Example** - -``` xml - - system - query_thread_log
    - toMonday(event_date) - 7500 -
    -``` - -## trace\_log {#server_settings-trace_log} - -Settings for the [trace\_log](../system_tables.md#system_tables-trace_log) system table operation. - -Parameters: - -- `database` — Database for storing a table. -- `table` — Table name. -- `partition_by` — [Custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. -- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. - -The default server configuration file `config.xml` contains the following settings section: - -``` xml - - system - trace_log
    - toYYYYMM(event_date) - 7500 -
    -``` - -## query\_masking\_rules {#query-masking-rules} - -Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, -`system.query_log`, `system.text_log`, `system.processes` table, and in logs sent to the client. That allows preventing -sensitive data leakage from SQL queries (like names, emails, personal -identifiers or credit card numbers) to logs. - -**Example** - -``` xml - - - hide SSN - (^|\D)\d{3}-\d{2}-\d{4}($|\D) - 000-00-0000 - - -``` - -Config fields: -- `name` - name for the rule (optional) -- `regexp` - RE2 compatible regular expression (mandatory) -- `replace` - substitution string for sensitive data (optional, by default - six asterisks) - -The masking rules are applied to the whole query (to prevent leaks of sensitive data from malformed / non-parsable queries). - -`system.events` table have counter `QueryMaskingRulesMatch` which have an overall number of query masking rules matches. - -For distributed queries each server have to be configured separately, otherwise, subqueries passed to other -nodes will be stored without masking. - -## remote\_servers {#server-settings-remote-servers} - -Configuration of clusters used by the [Distributed](../../operations/table_engines/distributed.md) table engine and by the `cluster` table function. - -**Example** - -``` xml - -``` - -For the value of the `incl` attribute, see the section “[Configuration files](../configuration_files.md#configuration_files)”. - -**See Also** - -- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) - -## timezone {#server_settings-timezone} - -The server’s time zone. - -Specified as an IANA identifier for the UTC timezone or geographic location (for example, Africa/Abidjan). - -The time zone is necessary for conversions between String and DateTime formats when DateTime fields are output to text format (printed on the screen or in a file), and when getting DateTime from a string. Besides, the time zone is used in functions that work with the time and date if they didn’t receive the time zone in the input parameters. - -**Example** - -``` xml -Europe/Moscow -``` - -## tcp\_port {#server_settings-tcp_port} - -Port for communicating with clients over the TCP protocol. - -**Example** - -``` xml -9000 -``` - -## tcp\_port\_secure {#server_settings-tcp_port-secure} - -TCP port for secure communication with clients. Use it with [OpenSSL](#server_settings-openssl) settings. - -**Possible values** - -Positive integer. - -**Default value** - -``` xml -9440 -``` - -## mysql\_port {#server_settings-mysql_port} - -Port for communicating with clients over MySQL protocol. - -**Possible values** - -Positive integer. - -Example - -``` xml -9004 -``` - -## tmp\_path {#server-settings-tmp_path} - -Path to temporary data for processing large queries. - -!!! note "Note" - The trailing slash is mandatory. - -**Example** - -``` xml -/var/lib/clickhouse/tmp/ -``` - -## tmp\_policy {#server-settings-tmp-policy} - -Policy from [`storage_configuration`](../table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) to store temporary files. -If not set [`tmp_path`](#server-settings-tmp_path) is used, otherwise it is ignored. - -!!! note "Note" - - `move_factor` is ignored -- `keep_free_space_bytes` is ignored -- `max_data_part_size_bytes` is ignored -- you must have exactly one volume in that policy - -## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} - -Cache size (in bytes) for uncompressed data used by table engines from the [MergeTree](../table_engines/mergetree.md). - -There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) is enabled. - -The uncompressed cache is advantageous for very short queries in individual cases. - -**Example** - -``` xml -8589934592 -``` - -## user\_files\_path {#server_settings-user_files_path} - -The directory with user files. Used in the table function [file()](../../query_language/table_functions/file.md). - -**Example** - -``` xml -/var/lib/clickhouse/user_files/ -``` - -## users\_config {#users-config} - -Path to the file that contains: - -- User configurations. -- Access rights. -- Settings profiles. -- Quota settings. - -**Example** - -``` xml -users.xml -``` - -## zookeeper {#server-settings_zookeeper} - -Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster. - -ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted. - -This section contains the following parameters: - -- `node` — ZooKeeper endpoint. You can set multiple endpoints. - - For example: - - - -``` xml - - example_host - 2181 - -``` - - The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. - -- `session_timeout` — Maximum timeout for the client session in milliseconds. -- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional. -- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. - -**Example configuration** - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - 30000 - 10000 - - /path/to/zookeeper/node - - user:password - -``` - -**See Also** - -- [Replication](../../operations/table_engines/replication.md) -- [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) - -## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} - -Storage method for data part headers in ZooKeeper. - -This setting only applies to the `MergeTree` family. It can be specified: - -- Globally in the [merge\_tree](#server_settings-merge_tree) section of the `config.xml` file. - - ClickHouse uses the setting for all the tables on the server. You can change the setting at any time. Existing tables change their behaviour when the setting changes. - -- For each table. - - When creating a table, specify the corresponding [engine setting](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). The behaviour of an existing table with this setting does not change, even if the global setting changes. - -**Possible values** - -- 0 — Functionality is turned off. -- 1 — Functionality is turned on. - -If `use_minimalistic_part_header_in_zookeeper = 1`, then [replicated](../table_engines/replication.md) tables store the headers of the data parts compactly using a single `znode`. If the table contains many columns, this storage method significantly reduces the volume of the data stored in Zookeeper. - -!!! attention "Attention" - After applying `use_minimalistic_part_header_in_zookeeper = 1`, you can’t downgrade the ClickHouse server to a version that doesn’t support this setting. Be careful when upgrading ClickHouse on servers in a cluster. Don’t upgrade all the servers at once. It is safer to test new versions of ClickHouse in a test environment, or on just a few servers of a cluster. - - Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. - -**Default value:** 0. - -## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} - -Disables the internal DNS cache. Recommended for operating ClickHouse in systems -with frequently changing infrastructure such as Kubernetes. - -**Default value:** 0. - -## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} - -The period of updating IP addresses stored in the ClickHouse internal DNS cache (in seconds). -The update is performed asynchronously, in a separate system thread. - -**Default value**: 15. - -[Original article](https://clickhouse.tech/docs/en/operations/server_settings/settings/) diff --git a/docs/fa/operations/settings/constraints_on_settings.md b/docs/fa/operations/settings/constraints_on_settings.md index b0037813199..19aa7e26f3a 100644 --- a/docs/fa/operations/settings/constraints_on_settings.md +++ b/docs/fa/operations/settings/constraints_on_settings.md @@ -1,11 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u0645\u062D\u062F\u0648\u062F\u06CC\u062A \u062F\u0631 \u062A\u0646\u0638\ + \u06CC\u0645\u0627\u062A" --- -# Constraints on Settings {#constraints-on-settings} +# محدودیت در تنظیمات {#constraints-on-settings} -The constraints on settings can be defined in the `profiles` section of the `user.xml` configuration file and prohibit users from changing some of the settings with the `SET` query. -The constraints are defined as the following: +محدودیت در تنظیمات را می توان در تعریف `profiles` بخش از `user.xml` فایل پیکربندی و منع کاربران از تغییر برخی از تنظیمات با `SET` پرس و جو. +محدودیت ها به صورت زیر تعریف می شوند: ``` xml @@ -29,10 +33,10 @@ The constraints are defined as the following: ``` -If the user tries to violate the constraints an exception is thrown and the setting isn’t changed. -There are supported three types of constraints: `min`, `max`, `readonly`. The `min` and `max` constraints specify upper and lower boundaries for a numeric setting and can be used in combination. The `readonly` constraint specifies that the user cannot change the corresponding setting at all. +اگر کاربر تلاش می کند به نقض محدودیت یک استثنا پرتاب می شود و تنظیم تغییر نکرده است. +سه نوع محدودیت پشتیبانی می شوند: `min`, `max`, `readonly`. این `min` و `max` محدودیت مشخص مرزهای بالا و پایین برای یک محیط عددی و می تواند در ترکیب استفاده می شود. این `readonly` محدودیت مشخص می کند که کاربر می تواند تنظیمات مربوطه را تغییر دهید و در همه. -**Example:** Let `users.xml` includes lines: +**مثال:** اجازه بدهید `users.xml` شامل خطوط: ``` xml @@ -53,7 +57,7 @@ There are supported three types of constraints: `min`, `max`, `readonly`. The `m ``` -The following queries all throw exceptions: +نمایش داده شد زیر همه استثنا پرتاب: ``` sql SET max_memory_usage=20000000001; @@ -67,6 +71,6 @@ Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. ``` -**Note:** the `default` profile has special handling: all the constraints defined for the `default` profile become the default constraints, so they restrict all the users until they’re overridden explicitly for these users. +**یادداشت:** این `default` مشخصات است دست زدن به ویژه: همه محدودیت های تعریف شده برای `default` مشخصات تبدیل به محدودیت های پیش فرض, بنابراین محدود کردن تمام کاربران تا زمانی که به صراحت برای این کاربران باطل. -[Original article](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/fa/operations/settings/index.md b/docs/fa/operations/settings/index.md index c2a69a83856..f43d76958bb 100644 --- a/docs/fa/operations/settings/index.md +++ b/docs/fa/operations/settings/index.md @@ -1,28 +1,32 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Settings +toc_priority: 55 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" --- -# Settings {#settings} +# تنظیمات {#settings} -There are multiple ways to make all the settings described below. -Settings are configured in layers, so each subsequent layer redefines the previous settings. +راه های متعدد را به تمام تنظیمات زیر توضیح داده شده وجود دارد. +تنظیمات در لایه پیکربندی, بنابراین هر لایه های بعدی دوباره تعریف تنظیمات قبلی. -Ways to configure settings, in order of priority: +راه های پیکربندی تنظیمات به ترتیب اولویت: -- Settings in the `users.xml` server configuration file. +- تنظیمات در `users.xml` فایل پیکربندی سرور. - Set in the element ``. + تنظیم در عنصر ``. -- Session settings. +- تنظیمات جلسه. - Send `SET setting=value` from the ClickHouse console client in interactive mode. - Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter. + ارسال `SET setting=value` از مشتری کنسول کلیک در حالت تعاملی. + به طور مشابه, شما می توانید جلسات کلیک در پروتکل قام استفاده. برای انجام این, شما نیاز به مشخص `session_id` پارامتر قام. -- Query settings. +- تنظیمات پرس و جو. - - When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`. - - When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`). + - هنگام شروع مشتری کنسول کلیک در حالت غیر تعاملی, تنظیم پارامتر راه اندازی `--setting=value`. + - هنگام استفاده از پارامترهای سیستم (`URL?setting_1=value&setting_2=value...`). -Settings that can only be made in the server config file are not covered in this section. +تنظیمات است که تنها می تواند در فایل پیکربندی سرور ساخته شده در این بخش پوشش داده نمی شود. -[Original article](https://clickhouse.tech/docs/en/operations/settings/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/fa/operations/settings/permissions_for_queries.md b/docs/fa/operations/settings/permissions_for_queries.md index 60942e1926b..f8596d44792 100644 --- a/docs/fa/operations/settings/permissions_for_queries.md +++ b/docs/fa/operations/settings/permissions_for_queries.md @@ -1,58 +1,62 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u0645\u062C\u0648\u0632 \u0628\u0631\u0627\u06CC \u0646\u0645\u0627\u06CC\ + \u0634 \u062F\u0627\u062F\u0647 \u0634\u062F" --- -# Permissions for Queries {#permissions_for_queries} +# مجوز برای نمایش داده شد {#permissions_for_queries} -Queries in ClickHouse can be divided into several types: +نمایش داده شد در کلیک خانه را می توان به انواع مختلفی تقسیم شده است: -1. Read data queries: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. -2. Write data queries: `INSERT`, `OPTIMIZE`. -3. Change settings query: `SET`, `USE`. -4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +1. خواندن نمایش داده شد داده: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. نوشتن نمایش داده شد داده ها: `INSERT`, `OPTIMIZE`. +3. تغییر پرسوجوی تنظیمات: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) نمایش داده شد: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. 5. `KILL QUERY`. -The following settings regulate user permissions by the type of query: +تنظیمات زیر تنظیم مجوز کاربر بر اساس نوع پرس و جو: -- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. -- [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. +- [فقط خواندنی](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [اجازه دادن به \_نشانی](#settings_allow_ddl) — Restricts permissions for DDL queries. -`KILL QUERY` can be performed with any settings. +`KILL QUERY` را می توان با هر تنظیمات انجام می شود. -## readonly {#settings_readonly} +## فقط خواندنی {#settings_readonly} -Restricts permissions for reading data, write data and change settings queries. +محدود مجوز برای خواندن داده ها, نوشتن داده ها و تغییر تنظیمات نمایش داده شد. -See how the queries are divided into types [above](#permissions_for_queries). +ببینید که چگونه نمایش داده شد به انواع تقسیم [بالا](#permissions_for_queries). -Possible values: +مقادیر ممکن: - 0 — All queries are allowed. - 1 — Only read data queries are allowed. - 2 — Read data and change settings queries are allowed. -After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session. +پس از تنظیم `readonly = 1` کاربر نمیتواند تغییر کند `readonly` و `allow_ddl` تنظیمات در جلسه فعلی. -When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method. +هنگام استفاده از `GET` روش در [رابط قام](../../interfaces/http.md), `readonly = 1` به طور خودکار تنظیم شده است. برای تغییر داده ها از `POST` روش. -Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user -from changing only specific settings, for details see [constraints on settings](constraints_on_settings.md). +تنظیم `readonly = 1` منع کاربر از تغییر تمام تنظیمات. یک راه برای منع کاربر وجود دارد +از تغییر تنظیمات تنها خاص, برای اطلاعات بیشتر ببینید [محدودیت در تنظیمات](constraints_on_settings.md). -Default value: 0 +مقدار پیشفرض: 0 -## allow\_ddl {#settings_allow_ddl} +## اجازه دادن به \_نشانی {#settings_allow_ddl} -Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries. +اجازه می دهد یا رد می کند [DDL](https://en.wikipedia.org/wiki/Data_definition_language) نمایش داده شد. -See how the queries are divided into types [above](#permissions_for_queries). +ببینید که چگونه نمایش داده شد به انواع تقسیم [بالا](#permissions_for_queries). -Possible values: +مقادیر ممکن: - 0 — DDL queries are not allowed. - 1 — DDL queries are allowed. -You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session. +شما نمی توانید اجرا کنید `SET allow_ddl = 1` اگر `allow_ddl = 0` برای جلسه فعلی. -Default value: 1 +مقدار پیشفرض: 1 -[Original article](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/fa/operations/settings/query_complexity.md b/docs/fa/operations/settings/query_complexity.md index 577b43cad6f..f926e3ced50 100644 --- a/docs/fa/operations/settings/query_complexity.md +++ b/docs/fa/operations/settings/query_complexity.md @@ -1,120 +1,124 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u0645\u062D\u062F\u0648\u062F\u06CC\u062A \u062F\u0631 \u067E\u06CC\u0686\ + \u06CC\u062F\u06AF\u06CC \u067E\u0631\u0633 \u0648 \u062C\u0648" --- -# Restrictions on Query Complexity {#restrictions-on-query-complexity} +# محدودیت در پیچیدگی پرس و جو {#restrictions-on-query-complexity} -Restrictions on query complexity are part of the settings. -They are used to provide safer execution from the user interface. -Almost all the restrictions only apply to `SELECT`. For distributed query processing, restrictions are applied on each server separately. +محدودیت در پیچیدگی پرس و جو بخشی از تنظیمات. +برای اجرای امن تر از رابط کاربر استفاده می شود. +تقریبا تمام محدودیت ها فقط برای اعمال `SELECT`. برای پردازش پرس و جو توزیع, محدودیت بر روی هر سرور به طور جداگانه اعمال. -ClickHouse checks the restrictions for data parts, not for each row. It means that you can exceed the value of restriction with the size of the data part. +خانه را کلیک کنید چک محدودیت برای قطعات داده, نه برای هر سطر. این بدان معنی است که شما می توانید ارزش محدودیت با اندازه بخش داده ها تجاوز. -Restrictions on the “maximum amount of something” can take the value 0, which means “unrestricted”. -Most restrictions also have an ‘overflow\_mode’ setting, meaning what to do when the limit is exceeded. -It can take one of two values: `throw` or `break`. Restrictions on aggregation (group\_by\_overflow\_mode) also have the value `any`. +محدودیت در “maximum amount of something” می توانید مقدار را 0, که به معنی “unrestricted”. +اکثر محدودیت ها نیز دارند ‘overflow\_mode’ محیط, به این معنی چه باید بکنید هنگامی که از حد فراتر رفته است. +این می تواند یکی از دو مقدار را: `throw` یا `break`. محدودیت در تجمع (کد \_شورت\_فلو\_وشه گروه) نیز ارزش داشته باشد `any`. `throw` – Throw an exception (default). `break` – Stop executing the query and return the partial result, as if the source data ran out. -`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don’t add new keys to the set. +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. -## max\_memory\_usage {#settings_max_memory_usage} +## \_کاساژ بیشینه {#settings_max_memory_usage} -The maximum amount of RAM to use for running a query on a single server. +حداکثر مقدار رم برای استفاده برای اجرای پرس و جو بر روی یک سرور واحد. -In the default configuration file, the maximum is 10 GB. +در فایل پیکربندی پیش فرض, حداکثر است 10 گیگابایت. -The setting doesn’t consider the volume of available memory or the total volume of memory on the machine. -The restriction applies to a single query within a single server. -You can use `SHOW PROCESSLIST` to see the current memory consumption for each query. -Besides, the peak memory consumption is tracked for each query and written to the log. +تنظیم می کند حجم حافظه در دسترس و یا حجم کل حافظه بر روی دستگاه در نظر نمی. +محدودیت شامل یک پرس و جو تنها در یک سرور. +شما می توانید استفاده کنید `SHOW PROCESSLIST` برای دیدن مصرف حافظه فعلی برای هر پرس و جو. +بعلاوه, مصرف حافظه اوج برای هر پرس و جو ردیابی و نوشته شده به ورود به سیستم. -Memory usage is not monitored for the states of certain aggregate functions. +استفاده از حافظه برای ایالت های توابع مجموع خاص نظارت نیست. -Memory usage is not fully tracked for states of the aggregate functions `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` from `String` and `Array` arguments. +استفاده از حافظه به طور کامل برای ایالت ها از توابع کل ردیابی نیست `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` از `String` و `Array` بحث کردن. -Memory consumption is also restricted by the parameters `max_memory_usage_for_user` and `max_memory_usage_for_all_queries`. +مصرف حافظه نیز توسط پارامترها محدود شده است `max_memory_usage_for_user` و `max_memory_usage_for_all_queries`. -## max\_memory\_usage\_for\_user {#max-memory-usage-for-user} +## \_شمارهگیر بیشینه {#max-memory-usage-for-user} -The maximum amount of RAM to use for running a user’s queries on a single server. +حداکثر مقدار رم برای استفاده برای اجرای نمایش داده شد کاربر بر روی یک سرور واحد. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +مقادیر پیش فرض در تعریف [تنظیمات.ه](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). به طور پیش فرض مقدار محدود نمی شود (`max_memory_usage_for_user = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +همچنین نگاه کنید به شرح [\_کاساژ بیشینه](#settings_max_memory_usage). -## max\_memory\_usage\_for\_all\_queries {#max-memory-usage-for-all-queries} +## \_شیشه بخاطر \_خروج {#max-memory-usage-for-all-queries} -The maximum amount of RAM to use for running all queries on a single server. +حداکثر مقدار رم برای استفاده برای اجرای تمام نمایش داده شد بر روی یک سرور واحد. -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +مقادیر پیش فرض در تعریف [تنظیمات.ه](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). به طور پیش فرض مقدار محدود نمی شود (`max_memory_usage_for_all_queries = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +همچنین نگاه کنید به شرح [\_کاساژ بیشینه](#settings_max_memory_usage). -## max\_rows\_to\_read {#max-rows-to-read} +## \_گذرواژههای \_ورود {#max-rows-to-read} -The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little. -When running a query in multiple threads, the following restrictions apply to each thread separately. +محدودیت های زیر را می توان در هر بلوک بررسی (به جای در هر سطر). به این معنا که, محدودیت را می توان شکسته کمی. +هنگامی که در حال اجرا یک پرس و جو در موضوعات مختلف, محدودیت های زیر به هر موضوع اعمال می شود به طور جداگانه. -A maximum number of rows that can be read from a table when running a query. +حداکثر تعداد ردیف است که می تواند از یک جدول زمانی که در حال اجرا یک پرس و جو به عنوان خوانده شده. -## max\_bytes\_to\_read {#max-bytes-to-read} +## \_مخفی کردن {#max-bytes-to-read} -A maximum number of bytes (uncompressed data) that can be read from a table when running a query. +حداکثر تعداد بایت (داده های غیر فشرده) است که می تواند از یک جدول به عنوان خوانده شده در هنگام اجرای یک پرس و جو. -## read\_overflow\_mode {#read-overflow-mode} +## \_ورود به سیستم {#read-overflow-mode} -What to do when the volume of data read exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید هنگامی که حجم داده ها به عنوان خوانده شده بیش از یکی از محدودیت های: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by} +## \_رو\_تو\_گروهها {#settings-max-rows-to-group-by} -A maximum number of unique keys received from aggregation. This setting lets you limit memory consumption when aggregating. +حداکثر تعداد کلید منحصر به فرد دریافت شده از تجمع. این تنظیم به شما امکان مصرف حافظه محدود در هنگام جمع. -## group\_by\_overflow\_mode {#group-by-overflow-mode} +## \_شماره \_شماره گروه {#group-by-overflow-mode} -What to do when the number of unique keys for aggregation exceeds the limit: ‘throw’, ‘break’, or ‘any’. By default, throw. -Using the ‘any’ value lets you run an approximation of GROUP BY. The quality of this approximation depends on the statistical nature of the data. +چه باید بکنید هنگامی که تعدادی از کلید های منحصر به فرد برای تجمع بیش از حد: ‘throw’, ‘break’ یا ‘any’. به طور پیش فرض, پرتاب. +با استفاده از ‘any’ ارزش شما اجازه می دهد یک تقریب از گروه های اجرا. کیفیت این تقریب بستگی به ماهیت استاتیک داده ها دارد. -## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} +## ا\_فزون\_بر\_گونهی\_گونهی زیر\_گروهها {#settings-max_bytes_before_external_group_by} -Enables or disables execution of `GROUP BY` clauses in external memory. See [GROUP BY in external memory](../../query_language/select.md#select-group-by-in-external-memory). +فعالسازی یا غیرفعالسازی اعدام `GROUP BY` بند در حافظه خارجی. ببینید [گروه در حافظه خارجی](../../sql_reference/statements/select.md#select-group-by-in-external-memory). -Possible values: +مقادیر ممکن: -- Maximum volume of RAM (in bytes) that can be used by the single [GROUP BY](../../query_language/select.md#select-group-by-clause) operation. -- 0 — `GROUP BY` in external memory disabled. +- حداکثر حجم رم (به بایت) است که می تواند توسط تک استفاده می شود [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause) عمل +- 0 — `GROUP BY` در حافظه خارجی غیر فعال. -Default value: 0. +مقدار پیش فرض: 0. -## max\_rows\_to\_sort {#max-rows-to-sort} +## \_شماره بیشینه {#max-rows-to-sort} -A maximum number of rows before sorting. This allows you to limit memory consumption when sorting. +حداکثر تعداد ردیف قبل از مرتب سازی. این اجازه می دهد تا شما را به محدود کردن مصرف حافظه در هنگام مرتب سازی. -## max\_bytes\_to\_sort {#max-bytes-to-sort} +## ا\_سلایدی {#max-bytes-to-sort} -A maximum number of bytes before sorting. +حداکثر تعداد بایت قبل از مرتب سازی. -## sort\_overflow\_mode {#sort-overflow-mode} +## کد\_و\_وشهیابی {#sort-overflow-mode} -What to do if the number of rows received before sorting exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید اگر تعداد ردیف قبل از مرتب سازی دریافت بیش از یکی از محدودیت: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## max\_result\_rows {#setting-max_result_rows} +## بارشهای بیشینه {#setting-max_result_rows} -Limit on the number of rows in the result. Also checked for subqueries, and on remote servers when running parts of a distributed query. +محدود در تعداد ردیف در نتیجه. همچنین برای زیرمجموعه بررسی, و بر روی سرور از راه دور در هنگام اجرای بخش هایی از یک پرس و جو توزیع. -## max\_result\_bytes {#max-result-bytes} +## حداکثر\_زمین بایت {#max-result-bytes} -Limit on the number of bytes in the result. The same as the previous setting. +محدود در تعداد بایت در نتیجه. همان تنظیمات قبلی. -## result\_overflow\_mode {#result-overflow-mode} +## \_شماره حاصل {#result-overflow-mode} -What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید اگر حجم نتیجه بیش از یکی از محدودیت های: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max\_result\_rows](#setting-max_result_rows), multiple of [max\_block\_size](settings.md#setting-max_block_size) and depends on [max\_threads](settings.md#settings-max_threads). +با استفاده از ‘break’ شبیه به استفاده از حد است. `Break` قطع اعدام تنها در سطح بلوک. این به این معنی است که مقدار ردیف بازگشت بیشتر از [بارشهای بیشینه](#setting-max_result_rows) چندین [ت\_مایش بیشینه](settings.md#setting-max_block_size) و بستگی دارد [\_مخفی کردن](settings.md#settings-max_threads). -Example: +مثال: ``` sql SET max_threads = 3, max_block_size = 3333; @@ -125,174 +129,174 @@ FROM numbers_mt(100000) FORMAT Null; ``` -Result: +نتیجه: ``` text 6666 rows in set. ... ``` -## max\_execution\_time {#max-execution-time} +## زمان \_شنامهی حداکثر {#max-execution-time} -Maximum query execution time in seconds. -At this time, it is not checked for one of the sorting stages, or when merging and finalizing aggregate functions. +حداکثر زمان اجرای پرس و جو در ثانیه. +در این زمان برای یکی از مراحل مرتب سازی بررسی نمی شود و یا هنگام ادغام و نهایی کردن توابع کلی. -## timeout\_overflow\_mode {#timeout-overflow-mode} +## \_شروع مجدد {#timeout-overflow-mode} -What to do if the query is run longer than ‘max\_execution\_time’: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید اگر پرس و جو اجرا می شود بیش از ‘max\_execution\_time’: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## min\_execution\_speed {#min-execution-speed} +## \_شروع مجدد {#min-execution-speed} -Minimal execution speed in rows per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is lower, an exception is thrown. +سرعت اجرای حداقل در ردیف در هر ثانیه. بررسی در هر بلوک داده زمانی که ‘timeout\_before\_checking\_execution\_speed’ انقضا مییابد. اگر سرعت اجرای پایین تر است, یک استثنا پرتاب می شود. -## min\_execution\_speed\_bytes {#min-execution-speed-bytes} +## ا\_فزونهها {#min-execution-speed-bytes} -A minimum number of execution bytes per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is lower, an exception is thrown. +حداقل تعداد بایت اعدام در هر ثانیه. بررسی در هر بلوک داده زمانی که ‘timeout\_before\_checking\_execution\_speed’ انقضا مییابد. اگر سرعت اجرای پایین تر است, یک استثنا پرتاب می شود. -## max\_execution\_speed {#max-execution-speed} +## حداکثر\_حاقسازی سرعت {#max-execution-speed} -A maximum number of execution rows per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is high, the execution speed will be reduced. +حداکثر تعداد ردیف اعدام در هر ثانیه. بررسی در هر بلوک داده زمانی که ‘timeout\_before\_checking\_execution\_speed’ انقضا مییابد. اگر سرعت اجرای بالا است, سرعت اجرای کاهش خواهد یافت. -## max\_execution\_speed\_bytes {#max-execution-speed-bytes} +## حداکثر\_کشن\_پیمایههای سرعت {#max-execution-speed-bytes} -A maximum number of execution bytes per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is high, the execution speed will be reduced. +حداکثر تعداد بایت اعدام در هر ثانیه. بررسی در هر بلوک داده زمانی که ‘timeout\_before\_checking\_execution\_speed’ انقضا مییابد. اگر سرعت اجرای بالا است, سرعت اجرای کاهش خواهد یافت. -## timeout\_before\_checking\_execution\_speed {#timeout-before-checking-execution-speed} +## جستجو {#timeout-before-checking-execution-speed} -Checks that execution speed is not too slow (no less than ‘min\_execution\_speed’), after the specified time in seconds has expired. +چک که سرعت اجرای بیش از حد کند نیست (کمتر از ‘min\_execution\_speed’), پس از زمان مشخص شده در ثانیه تمام شده است. -## max\_columns\_to\_read {#max-columns-to-read} +## \_رنگ \_ورود {#max-columns-to-read} -A maximum number of columns that can be read from a table in a single query. If a query requires reading a greater number of columns, it throws an exception. +حداکثر تعداد ستون است که می تواند از یک جدول در یک پرس و جو به عنوان خوانده شده. اگر پرس و جو نیاز به خواندن تعداد بیشتری از ستون, این می اندازد یک استثنا. -## max\_temporary\_columns {#max-temporary-columns} +## \_رنگ بیشینه {#max-temporary-columns} -A maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. If there are more temporary columns than this, it throws an exception. +حداکثر تعداد ستون موقت است که باید در رم در همان زمان نگه داشته شود که در حال اجرا یک پرس و جو, از جمله ستون ثابت. اگر ستون موقت بیش از این وجود دارد, این یک استثنا می اندازد. -## max\_temporary\_non\_const\_columns {#max-temporary-non-const-columns} +## \_رنگ {#max-temporary-non-const-columns} -The same thing as ‘max\_temporary\_columns’, but without counting constant columns. -Note that constant columns are formed fairly often when running a query, but they require approximately zero computing resources. +همان چیزی که به عنوان ‘max\_temporary\_columns’, اما بدون شمارش ستون ثابت. +توجه داشته باشید که ستون های ثابت در حال اجرا یک پرس و جو نسبتا اغلب تشکیل, اما نیاز به حدود صفر منابع محاسباتی. -## max\_subquery\_depth {#max-subquery-depth} +## حداکثر {#max-subquery-depth} -Maximum nesting depth of subqueries. If subqueries are deeper, an exception is thrown. By default, 100. +حداکثر عمق تودرتو از کارخانه های فرعی. اگر کارخانه های فرعی عمیق تر, یک استثنا پرتاب می شود. به طور پیش فرض, 100. -## max\_pipeline\_depth {#max-pipeline-depth} +## حداکثر \_پیپیلین {#max-pipeline-depth} -Maximum pipeline depth. Corresponds to the number of transformations that each data block goes through during query processing. Counted within the limits of a single server. If the pipeline depth is greater, an exception is thrown. By default, 1000. +حداکثر عمق خط لوله. مربوط به تعدادی از تحولات که هر بلوک داده می رود از طریق در طول پردازش پرس و جو. شمارش در محدوده یک سرور واحد. اگر عمق خط لوله بیشتر است, یک استثنا پرتاب می شود. به طور پیش فرض 1000. -## max\_ast\_depth {#max-ast-depth} +## \_ص\_خلاف {#max-ast-depth} -Maximum nesting depth of a query syntactic tree. If exceeded, an exception is thrown. -At this time, it isn’t checked during parsing, but only after parsing the query. That is, a syntactic tree that is too deep can be created during parsing, but the query will fail. By default, 1000. +حداکثر عمق تودرتو از یک درخت نحوی پرس و جو. اگر بیش از, یک استثنا پرتاب می شود. +در این زمان در تجزیه بررسی نمی شود اما تنها پس از تجزیه پرس و جو. به این معنا که, یک درخت نحوی است که بیش از حد عمیق می تواند در طول تجزیه ایجاد, اما پرس و جو شکست مواجه خواهد شد. به طور پیش فرض 1000. -## max\_ast\_elements {#max-ast-elements} +## \_محلولات حداکثر {#max-ast-elements} -A maximum number of elements in a query syntactic tree. If exceeded, an exception is thrown. -In the same way as the previous setting, it is checked only after parsing the query. By default, 50,000. +حداکثر تعداد عناصر در یک درخت نحوی پرس و جو. اگر بیش از, یک استثنا پرتاب می شود. +در همان راه به عنوان تنظیمات قبلی تنها پس از تجزیه پرس و جو بررسی می شود. به طور پیش فرض 50000. -## max\_rows\_in\_set {#max-rows-in-set} +## \_رو\_ تنظیم {#max-rows-in-set} -A maximum number of rows for a data set in the IN clause created from a subquery. +حداکثر تعداد ردیف برای یک مجموعه داده ها در بند در ایجاد شده از یک خرده فروشی. -## max\_bytes\_in\_set {#max-bytes-in-set} +## تنظیم \_سریع {#max-bytes-in-set} -A maximum number of bytes (uncompressed data) used by a set in the IN clause created from a subquery. +حداکثر تعداد بایت (داده های غیر فشرده) استفاده شده توسط یک مجموعه در بند در ایجاد شده از یک خرده فروشی. -## set\_overflow\_mode {#set-overflow-mode} +## \_حالت تنظیم {#set-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید هنگامی که مقدار داده ها بیش از یکی از محدودیت های: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## max\_rows\_in\_distinct {#max-rows-in-distinct} +## حوزه \_کاربری مکس {#max-rows-in-distinct} -A maximum number of different rows when using DISTINCT. +حداکثر تعداد ردیف های مختلف در هنگام استفاده از متمایز. -## max\_bytes\_in\_distinct {#max-bytes-in-distinct} +## مک\_بتس\_ حوزه {#max-bytes-in-distinct} -A maximum number of bytes used by a hash table when using DISTINCT. +حداکثر تعداد بایت استفاده شده توسط یک جدول هش در هنگام استفاده متمایز. -## distinct\_overflow\_mode {#distinct-overflow-mode} +## \_شروع مجدد {#distinct-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید هنگامی که مقدار داده ها بیش از یکی از محدودیت های: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## max\_rows\_to\_transfer {#max-rows-to-transfer} +## ترجمههای بیشینه {#max-rows-to-transfer} -A maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +حداکثر تعداد ردیف است که می تواند به یک سرور از راه دور منتقل می شود و یا ذخیره شده در یک جدول موقت در هنگام استفاده از جهانی در. -## max\_bytes\_to\_transfer {#max-bytes-to-transfer} +## ترجمههای بیشینه {#max-bytes-to-transfer} -A maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +حداکثر تعداد بایت (داده های غیر فشرده) است که می تواند به یک سرور از راه دور منتقل می شود و یا ذخیره شده در یک جدول موقت در هنگام استفاده از جهانی در. -## transfer\_overflow\_mode {#transfer-overflow-mode} +## \_شروع مجدد {#transfer-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +چه باید بکنید هنگامی که مقدار داده ها بیش از یکی از محدودیت های: ‘throw’ یا ‘break’. به طور پیش فرض, پرتاب. -## max\_rows\_in\_join {#settings-max_rows_in_join} +## \_پاک کردن \_روشن گرافیک {#settings-max_rows_in_join} -Limits the number of rows in the hash table that is used when joining tables. +محدودیت تعداد ردیف در جدول هش استفاده شده است که در هنگام پیوستن به جداول. -This settings applies to [SELECT … JOIN](../../query_language/select.md#select-join) operations and the [Join](../table_engines/join.md) table engine. +این تنظیمات در مورد [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) عملیات و [پیوستن](../../engines/table_engines/special/join.md) موتور جدول. -If a query contains multiple joins, ClickHouse checks this setting for every intermediate result. +اگر یک پرس و جو شامل چند می پیوندد, خانه چک این تنظیم برای هر نتیجه متوسط. -ClickHouse can proceed with different actions when the limit is reached. Use the [join\_overflow\_mode](#settings-join_overflow_mode) setting to choose the action. +تاتر می توانید با اقدامات مختلف ادامه دهید زمانی که از حد رسیده است. استفاده از [\_شروع مجدد](#settings-join_overflow_mode) تنظیم برای انتخاب عمل. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Unlimited number of rows. -Default value: 0. +مقدار پیش فرض: 0. -## max\_bytes\_in\_join {#settings-max_bytes_in_join} +## \_پویش همیشگی {#settings-max_bytes_in_join} -Limits the size in bytes of the hash table used when joining tables. +محدودیت اندازه در بایت از جدول هش استفاده می شود در هنگام پیوستن به جداول. -This settings applies to [SELECT … JOIN](../../query_language/select.md#select-join) operations and [Join table engine](../table_engines/join.md). +این تنظیمات در مورد [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) عملیات و [پیوستن به موتور جدول](../../engines/table_engines/special/join.md). -If the query contains joins, ClickHouse checks this setting for every intermediate result. +اگر پرس و جو شامل می پیوندد, کلیک چک این تنظیمات برای هر نتیجه متوسط. -ClickHouse can proceed with different actions when the limit is reached. Use [join\_overflow\_mode](#settings-join_overflow_mode) settings to choose the action. +تاتر می توانید با اقدامات مختلف ادامه دهید زمانی که از حد رسیده است. استفاده [\_شروع مجدد](#settings-join_overflow_mode) تنظیمات برای انتخاب عمل. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Memory control is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## join\_overflow\_mode {#settings-join_overflow_mode} +## \_شروع مجدد {#settings-join_overflow_mode} -Defines what action ClickHouse performs when any of the following join limits is reached: +تعریف می کند که چه عمل کلیک انجام زمانی که هر یک از محدودیت های زیر ملحق رسیده است: -- [max\_bytes\_in\_join](#settings-max_bytes_in_join) -- [max\_rows\_in\_join](#settings-max_rows_in_join) +- [\_پویش همیشگی](#settings-max_bytes_in_join) +- [\_پاک کردن \_روشن گرافیک](#settings-max_rows_in_join) -Possible values: +مقادیر ممکن: - `THROW` — ClickHouse throws an exception and breaks operation. -- `BREAK` — ClickHouse breaks operation and doesn’t throw an exception. +- `BREAK` — ClickHouse breaks operation and doesn't throw an exception. -Default value: `THROW`. +مقدار پیشفرض: `THROW`. -**See Also** +**همچنین نگاه کنید** -- [JOIN clause](../../query_language/select.md#select-join) -- [Join table engine](../table_engines/join.md) +- [پیوستن بند](../../sql_reference/statements/select.md#select-join) +- [پیوستن به موتور جدول](../../engines/table_engines/special/join.md) -## max\_partitions\_per\_insert\_block {#max-partitions-per-insert-block} +## \_مسدود کردن بیشینه {#max-partitions-per-insert-block} -Limits the maximum number of partitions in a single inserted block. +حداکثر تعداد پارتیشن در یک بلوک قرار داده شده را محدود می کند. -- Positive integer. +- عدد صحیح مثبت. - 0 — Unlimited number of partitions. -Default value: 100. +مقدار پیش فرض: 100. -**Details** +**اطلاعات دقیق** -When inserting data, ClickHouse calculates the number of partitions in the inserted block. If the number of partitions is more than `max_partitions_per_insert_block`, ClickHouse throws an exception with the following text: +هنگام قرار دادن داده ها, تاتر محاسبه تعداد پارتیشن در بلوک قرار داده. اگر تعداد پارتیشن ها بیش از `max_partitions_per_insert_block`, خانه را کلیک می اندازد یک استثنا با متن زیر: -> “Too many partitions for single INSERT block (more than” + toString(max\_parts) + “). The limit is controlled by ‘max\_partitions\_per\_insert\_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” +> “Too many partitions for single INSERT block (more than” اطلاعات دقیق “). The limit is controlled by ‘max\_partitions\_per\_insert\_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” -[Original article](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/fa/operations/settings/settings.md b/docs/fa/operations/settings/settings.md index 0475642124a..e4349477cfb 100644 --- a/docs/fa/operations/settings/settings.md +++ b/docs/fa/operations/settings/settings.md @@ -1,191 +1,194 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u062A\u0646\u0638\u06CC\u0645\u0627\u062A" --- -# Settings {#settings} +# تنظیمات {#settings} -## distributed\_product\_mode {#distributed-product-mode} +## \_شماره توزیع شده {#distributed-product-mode} -Changes the behavior of [distributed subqueries](../../query_language/select.md). +تغییر رفتار [توزیع subqueries](../../sql_reference/statements/select.md). ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. -Restrictions: +محدودیت ها: -- Only applied for IN and JOIN subqueries. -- Only if the FROM section uses a distributed table containing more than one shard. -- If the subquery concerns a distributed table containing more than one shard. -- Not used for a table-valued [remote](../../query_language/table_functions/remote.md) function. +- تنها برای اعمال در و پیوستن به subqueries. +- فقط اگر از بخش با استفاده از یک جدول توزیع حاوی بیش از یک سفال. +- اگر خرده فروشی مربوط به یک جدول توزیع حاوی بیش از یک سفال. +- برای ارزش جدول استفاده نمی شود [دور](../../sql_reference/table_functions/remote.md) تابع. -Possible values: +مقادیر ممکن: -- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” exception). +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” استثنا). - `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` -- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.` +- `global` — Replaces the `IN`/`JOIN` پرسوجو با `GLOBAL IN`/`GLOBAL JOIN.` - `allow` — Allows the use of these types of subqueries. -## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} +## تغییر در حسابهای کاربری دستگاه {#enable-optimize-predicate-expression} -Turns on predicate pushdown in `SELECT` queries. +تبدیل به پیش فرض فشار در `SELECT` نمایش داده شد. -Predicate pushdown may significantly reduce network traffic for distributed queries. +افت فشار پیش فرض ممکن است به طور قابل توجهی کاهش ترافیک شبکه برای نمایش داده شد توزیع شده است. -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +مقدار پیش فرض: 1. -Usage +استفاده -Consider the following queries: +پرس و جو های زیر را در نظر بگیرید: 1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` 2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` -If `enable_optimize_predicate_expression = 1`, then the execution time of these queries is equal because ClickHouse applies `WHERE` to the subquery when processing it. +اگر `enable_optimize_predicate_expression = 1`, سپس زمان اجرای این نمایش داده شد برابر است چرا که کلیکاوس اعمال می شود `WHERE` به خرده فروشی در هنگام پردازش. -If `enable_optimize_predicate_expression = 0`, then the execution time of the second query is much longer, because the `WHERE` clause applies to all the data after the subquery finishes. +اگر `enable_optimize_predicate_expression = 0` سپس زمان اجرای پرس و جو دوم بسیار طولانی تر است زیرا `WHERE` بند مربوط به تمام داده ها پس از اتمام زیرخاکری. -## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} +## شناسه بسته: {#settings-fallback_to_stale_replicas_for_distributed_queries} -Forces a query to an out-of-date replica if updated data is not available. See [Replication](../table_engines/replication.md). +نیروهای پرس و جو به ماکت خارج از تاریخ اگر داده به روز شده در دسترس نیست. ببینید [تکرار](../../engines/table_engines/mergetree_family/replication.md). -ClickHouse selects the most relevant from the outdated replicas of the table. +تاتر انتخاب مناسب ترین از کپی منسوخ شده از جدول. -Used when performing `SELECT` from a distributed table that points to replicated tables. +مورد استفاده در هنگام انجام `SELECT` از یک جدول توزیع شده است که اشاره به جداول تکرار. -By default, 1 (enabled). +به طور پیش فرض, 1 (فعال). -## force\_index\_by\_date {#settings-force_index_by_date} +## اجبار {#settings-force_index_by_date} -Disables query execution if the index can’t be used by date. +غیرفعال اجرای پرس و جو در صورتی که شاخص می تواند بر اساس تاریخ استفاده نمی شود. -Works with tables in the MergeTree family. +با جداول در خانواده ادغام کار می کند. -If `force_index_by_date=1`, ClickHouse checks whether the query has a date key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For example, the condition `Date != ' 2000-01-01 '` is acceptable even when it matches all the data in the table (i.e., running the query requires a full scan). For more information about ranges of data in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). +اگر `force_index_by_date=1` چک چه پرس و جو وضعیت کلید تاریخ است که می تواند مورد استفاده قرار گیرد برای محدود کردن محدوده داده. اگر هیچ شرایط مناسب وجود دارد, این یک استثنا می اندازد. با این حال, بررسی نمی کند که وضعیت مقدار داده ها به خواندن را کاهش می دهد. مثلا, شرایط `Date != ' 2000-01-01 '` قابل قبول است حتی زمانی که منطبق بر تمام داده ها در جدول (به عنوان مثال در حال اجرا پرس و جو نیاز به اسکن کامل). برای کسب اطلاعات بیشتر در مورد محدوده داده ها در جداول ادغام, دیدن [ادغام](../../engines/table_engines/mergetree_family/mergetree.md). -## force\_primary\_key {#force-primary-key} +## اجبار {#force-primary-key} -Disables query execution if indexing by the primary key is not possible. +غیر فعال اعدام پرس و جو اگر نمایه سازی توسط کلید اصلی امکان پذیر نیست. -Works with tables in the MergeTree family. +با جداول در خانواده ادغام کار می کند. -If `force_primary_key=1`, ClickHouse checks to see if the query has a primary key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For more information about data ranges in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). +اگر `force_primary_key=1` چک خانه را ببینید اگر پرس و جو شرایط کلیدی اولیه است که می تواند مورد استفاده قرار گیرد برای محدود کردن محدوده داده است. اگر هیچ شرایط مناسب وجود دارد, این یک استثنا می اندازد. با این حال, بررسی نمی کند که وضعیت مقدار داده ها به خواندن را کاهش می دهد. برای کسب اطلاعات بیشتر در مورد محدوده داده ها در جداول ادغام, دیدن [ادغام](../../engines/table_engines/mergetree_family/mergetree.md). -## format\_schema {#format-schema} +## قالب\_نما {#format-schema} -This parameter is useful when you are using formats that require a schema definition, such as [Cap’n Proto](https://capnproto.org/) or [Protobuf](https://developers.google.com/protocol-buffers/). The value depends on the format. +این پارامتر زمانی مفید است که شما با استفاده از فرمت های که نیاز به یک تعریف طرح, مانند [سروان نیا](https://capnproto.org/) یا [Protobuf](https://developers.google.com/protocol-buffers/). ارزش بستگی به فرمت. -## fsync\_metadata {#fsync-metadata} +## دادههای سایت {#fsync-metadata} -Enables or disables [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) when writing `.sql` files. Enabled by default. +فعالسازی یا غیرفعال کردن [فوسینک](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) هنگام نوشتن `.sql` پرونده ها فعال به طور پیش فرض. -It makes sense to disable it if the server has millions of tiny tables that are constantly being created and destroyed. +این را حس می کند به غیر فعال کردن اگر سرور دارای میلیون ها جدول کوچک است که به طور مداوم در حال ایجاد و نابود شده است. -## enable\_http\_compression {#settings-enable_http_compression} +## نصب و راه اندازی {#settings-enable_http_compression} -Enables or disables data compression in the response to an HTTP request. +را قادر می سازد و یا غیر فعال فشرده سازی داده ها در پاسخ به درخواست قام. -For more information, read the [HTTP interface description](../../interfaces/http.md). +برای کسب اطلاعات بیشتر, خواندن [توصیف واسط قام](../../interfaces/http.md). -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} +## \_تنظیم مجدد به حالت اولیه {#settings-http_zlib_compression_level} -Sets the level of data compression in the response to an HTTP request if [enable\_http\_compression = 1](#settings-enable_http_compression). +سطح فشرده سازی داده ها را در پاسخ به درخواست قام تنظیم می کند [قابلیت تنظیم صدا = 1](#settings-enable_http_compression). -Possible values: Numbers from 1 to 9. +مقادیر ممکن: اعداد از 1 تا 9. -Default value: 3. +مقدار پیش فرض: 3. -## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} +## تغییر در حسابهای کاربری دستگاه {#settings-http_native_compression_disable_checksumming_on_decompress} -Enables or disables checksum verification when decompressing the HTTP POST data from the client. Used only for ClickHouse native compression format (not used with `gzip` or `deflate`). +را قادر می سازد و یا غیر فعال تایید کنترلی زمانی که از حالت فشرده خارج کردن اطلاعات ارسال قام از مشتری. فقط برای فرمت فشرده سازی بومی کلیک (با استفاده نمی شود `gzip` یا `deflate`). -For more information, read the [HTTP interface description](../../interfaces/http.md). +برای کسب اطلاعات بیشتر, خواندن [توصیف واسط قام](../../interfaces/http.md). -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} +## نمایش سایت {#settings-send_progress_in_http_headers} -Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. +فعالسازی یا غیرفعال کردن `X-ClickHouse-Progress` هدرهای پاسخ قام در `clickhouse-server` پاسخ. -For more information, read the [HTTP interface description](../../interfaces/http.md). +برای کسب اطلاعات بیشتر, خواندن [توصیف واسط قام](../../interfaces/http.md). -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -## max\_http\_get\_redirects {#setting-max_http_get_redirects} +## عناصر {#setting-max_http_get_redirects} -Limits the maximum number of HTTP GET redirect hops for [URL](../table_engines/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../query_language/create/#create-table-query) query and by the [url](../../query_language/table_functions/url.md) table function. +محدودیت حداکثر تعداد قام از رازک تغییر مسیر برای [URL](../../engines/table_engines/special/url.md)- جدول موتور . تنظیمات مربوط به هر دو نوع جداول: کسانی که ایجاد شده توسط [CREATE TABLE](../../query_language/create/#create-table-query) پرس و جو و توسط [نشانی وب](../../sql_reference/table_functions/url.md) تابع جدول. -Possible values: +مقادیر ممکن: -- Any positive integer number of hops. +- هر عدد صحیح مثبت رازک. - 0 — No hops allowed. -Default value: 0. +مقدار پیش فرض: 0. -## input\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} +## وارد کردن \_فرست\_مرزیابی \_نمایش مجدد {#settings-input_format_allow_errors_num} -Sets the maximum number of acceptable errors when reading from text formats (CSV, TSV, etc.). +حداکثر تعداد خطاهای قابل قبول را در هنگام خواندن از فرمت های متنی تنظیم می کند). -The default value is 0. +مقدار پیش فرض 0 است. -Always pair it with `input_format_allow_errors_ratio`. +همیشه با جفت `input_format_allow_errors_ratio`. -If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_num`, ClickHouse ignores the row and moves on to the next one. +اگر یک خطا رخ داده است در حالی که خواندن ردیف اما ضد خطا است که هنوز هم کمتر از `input_format_allow_errors_num`, خانه را نادیده می گیرد ردیف و حرکت بر روی یک بعدی. -If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. +اگر هر دو `input_format_allow_errors_num` و `input_format_allow_errors_ratio` بیش از حد, فاحشه خانه می اندازد یک استثنا. -## input\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} +## ثبت نام {#settings-input_format_allow_errors_ratio} -Sets the maximum percentage of errors allowed when reading from text formats (CSV, TSV, etc.). -The percentage of errors is set as a floating-point number between 0 and 1. +حداکثر درصد خطاها را در هنگام خواندن از فرمت های متنی تنظیم می کند.). +درصد خطاها به عنوان یک عدد ممیز شناور بین 0 و 1 تنظیم شده است. -The default value is 0. +مقدار پیش فرض 0 است. -Always pair it with `input_format_allow_errors_num`. +همیشه با جفت `input_format_allow_errors_num`. -If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_ratio`, ClickHouse ignores the row and moves on to the next one. +اگر یک خطا رخ داده است در حالی که خواندن ردیف اما ضد خطا است که هنوز هم کمتر از `input_format_allow_errors_ratio`, خانه را نادیده می گیرد ردیف و حرکت بر روی یک بعدی. -If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. +اگر هر دو `input_format_allow_errors_num` و `input_format_allow_errors_ratio` بیش از حد, فاحشه خانه می اندازد یک استثنا. -## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} +## در حال خواندن: {#settings-input_format_values_interpret_expressions} -Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../query_language/syntax.md) section. +را قادر می سازد و یا غیر فعال تجزیه کننده کامل گذاشتن اگر تجزیه کننده جریان سریع نمی تواند تجزیه داده ها. این تنظیم فقط برای [مقادیر](../../interfaces/formats.md#data-format-values) فرمت در درج داده ها. برای کسب اطلاعات بیشتر در مورد تجزیه نحو, دیدن [نحو](../../sql_reference/syntax.md) بخش. -Possible values: +مقادیر ممکن: - 0 — Disabled. - In this case, you must provide formatted data. See the [Formats](../../interfaces/formats.md) section. + در این مورد, شما باید داده های فرمت شده را فراهم. دیدن [فرشها](../../interfaces/formats.md) بخش. - 1 — Enabled. - In this case, you can use an SQL expression as a value, but data insertion is much slower this way. If you insert only formatted data, then ClickHouse behaves as if the setting value is 0. + در این مورد, شما می توانید یک عبارت گذاشتن به عنوان یک ارزش استفاده, اما درج داده است این راه بسیار کندتر. اگر شما وارد کردن داده های فرمت شده تنها, سپس کلیک کنیدهاوس رفتار به عنوان اگر مقدار تنظیم است 0. -Default value: 1. +مقدار پیش فرض: 1. -Example of Use +مثال استفاده -Insert the [DateTime](../../data_types/datetime.md) type value with the different settings. +درج [DateTime](../../sql_reference/data_types/datetime.md) ارزش نوع با تنظیمات مختلف. ``` sql SET input_format_values_interpret_expressions = 0; @@ -206,7 +209,7 @@ INSERT INTO datetime_t VALUES (now()) Ok. ``` -The last query is equivalent to the following: +پرس و جو گذشته معادل به شرح زیر است: ``` sql SET input_format_values_interpret_expressions = 0; @@ -217,23 +220,23 @@ INSERT INTO datetime_t SELECT now() Ok. ``` -## input\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} +## در حال خواندن: {#settings-input_format_values_deduce_templates_of_expressions} -Enables or disables template deduction for an SQL expressions in [Values](../../interfaces/formats.md#data-format-values) format. It allows to parse and interpret expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse will try to deduce template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows. For the following query: +کسر قالب را برای عبارات مربع فعال یا غیرفعال می کند [مقادیر](../../interfaces/formats.md#data-format-values) قالب. این اجازه می دهد به تجزیه و تفسیر عبارات در `Values` بسیار سریع تر اگر عبارات در ردیف متوالی همان ساختار. تاتر سعی خواهد کرد به استنباط قالب یک عبارت, تجزیه ردیف زیر با استفاده از این الگو و ارزیابی بیان در یک دسته از ردیف موفقیت تجزیه. برای پرس و جو زیر: ``` sql INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... ``` -- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=0` expressions will be interpreted separately for each row (this is very slow for large number of rows) -- if `input_format_values_interpret_expressions=0` and `format_values_deduce_templates_of_expressions=1` expressions in the first, second and third rows will be parsed using template `lower(String)` and interpreted together, expression is the forth row will be parsed with another template (`upper(String)`) -- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=1` - the same as in previous case, but also allows fallback to interpreting expressions separately if it’s not possible to deduce template. +- اگر `input_format_values_interpret_expressions=1` و `format_values_deduce_templates_of_expressions=0` عبارات خواهد شد به طور جداگانه برای هر سطر تفسیر (این برای تعداد زیادی از ردیف بسیار کند است) +- اگر `input_format_values_interpret_expressions=0` و `format_values_deduce_templates_of_expressions=1` عبارات در اولین, ردیف دوم و سوم خواهد شد با استفاده از الگو تجزیه `lower(String)` و تفسیر با هم, بیان ردیف جلو خواهد شد با قالب دیگری تجزیه (`upper(String)`) +- اگر `input_format_values_interpret_expressions=1` و `format_values_deduce_templates_of_expressions=1` - همان است که در مورد قبلی, اما همچنین اجازه می دهد تا عقب نشینی به تفسیر عبارات به طور جداگانه اگر ممکن نیست به استنباط الگو. -Enabled by default. +فعال به طور پیش فرض. -## input\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} +## وارد کردن \_تماس\_عول\_ایجاد \_شکلتهای \_شخصی {#settings-input-format-values-accurate-types-of-literals} -This setting is used only when `input_format_values_deduce_templates_of_expressions = 1`. It can happen, that expressions for some column have the same structure, but contain numeric literals of different types, e.g +این تنظیم تنها زمانی استفاده می شود `input_format_values_deduce_templates_of_expressions = 1`. آن را می تواند رخ دهد که برای برخی از عبارات ستون دارای ساختار مشابه اما حاوی عددی literals از انواع مختلف الکترونیکی.g ``` sql (..., abs(0), ...), -- UInt64 literal @@ -241,959 +244,959 @@ This setting is used only when `input_format_values_deduce_templates_of_expressi (..., abs(-1), ...), -- Int64 literal ``` -When this setting is enabled, ClickHouse will check the actual type of literal and will use an expression template of the corresponding type. In some cases, it may significantly slow down expression evaluation in `Values`. -When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` or `Int64` instead of `UInt64` for `42`), but it may cause overflow and precision issues. -Enabled by default. +هنگامی که این تنظیم فعال است, خانه را به نوع واقعی از تحت اللفظی را بررسی کنید و یک قالب بیان از نوع مربوطه استفاده. در بعضی موارد, ممکن است به طور قابل توجهی کاهش سرعت ارزیابی بیان در `Values`. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` یا `Int64` به جای `UInt64` برای `42`), اما ممکن است مشکلات سرریز و دقت شود. +فعال به طور پیش فرض. -## input\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} +## \_پوشه های ورودی و خروجی {#session_settings-input_format_defaults_for_omitted_fields} -When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) and [TabSeparated](../../interfaces/formats.md#tabseparated) formats. +هنگام انجام `INSERT` نمایش داده شد, جایگزین مقادیر ستون ورودی حذف شده با مقادیر پیش فرض از ستون مربوطه. این گزینه فقط برای اعمال [جیسانچرو](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) و [جدول دار](../../interfaces/formats.md#tabseparated) فرمتها. -!!! note "Note" - When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance. +!!! note "یادداشت" + هنگامی که این گزینه فعال است, ابرداده جدول طولانی از سرور به مشتری ارسال. این مصرف منابع محاسباتی اضافی بر روی سرور و می تواند عملکرد را کاهش دهد. -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +مقدار پیش فرض: 1. -## input\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} +## پیشسو {#settings-input-format-tsv-empty-as-default} -When enabled, replace empty input fields in TSV with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too. +هنگامی که فعال, جایگزین زمینه های ورودی خالی در فیلم با مقادیر پیش فرض. برای عبارات پیش فرض پیچیده `input_format_defaults_for_omitted_fields` باید بیش از حد فعال شود. -Disabled by default. +غیر فعال به طور پیش فرض. -## input\_format\_null\_as\_default {#settings-input-format-null-as-default} +## خرابی در حذف گواهینامهها {#settings-input-format-null-as-default} -Enables or disables using default values if input data contain `NULL`, but data type of the corresponding column in not `Nullable(T)` (for text input formats). +را قادر می سازد و یا غیر فعال با استفاده از مقادیر پیش فرض اگر داده های ورودی شامل `NULL`, اما نوع داده از ستون مربوطه در نمی `Nullable(T)` (برای فرمت های ورودی متن). -## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} +## \_دفتر\_صرفههای شناسنامهی ورودی {#settings-input-format-skip-unknown-fields} -Enables or disables skipping insertion of extra data. +را قادر می سازد و یا غیر فعال پرش درج داده های اضافی. -When writing data, ClickHouse throws an exception if input data contain columns that do not exist in the target table. If skipping is enabled, ClickHouse doesn’t insert extra data and doesn’t throw an exception. +در هنگام نوشتن داده ها, تاتر می اندازد یک استثنا اگر داده های ورودی حاوی ستون که در جدول هدف وجود ندارد. اگر پرش فعال است, تاتر می کند داده های اضافی وارد کنید و یک استثنا پرتاب نمی. -Supported formats: +فرمت های پشتیبانی شده: -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) -- [CSVWithNames](../../interfaces/formats.md#csvwithnames) -- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) +- [جیسانچرو](../../interfaces/formats.md#jsoneachrow) +- [اطلاعات دقیق](../../interfaces/formats.md#csvwithnames) +- [اطلاعات دقیق](../../interfaces/formats.md#tabseparatedwithnames) - [TSKV](../../interfaces/formats.md#tskv) -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -## input\_format\_import\_nested\_json {#settings-input_format_import_nested_json} +## تغییر \_کم\_تر\_تنظیم مجدد \_جنسان {#settings-input_format_import_nested_json} -Enables or disables the insertion of JSON data with nested objects. +درج دادههای جسون را با اشیای تو در تو فعال یا غیرفعال میکند. -Supported formats: +فرمت های پشتیبانی شده: -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [جیسانچرو](../../interfaces/formats.md#jsoneachrow) -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -See also: +همچنین نگاه کنید به: -- [Usage of Nested Structures](../../interfaces/formats.md#jsoneachrow-nested) with the `JSONEachRow` format. +- [استفاده از ساختارهای تو در تو](../../interfaces/formats.md#jsoneachrow-nested) با `JSONEachRow` قالب. -## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} +## \_فرست\_ام\_امنمایش گذرواژه {#settings-input-format-with-names-use-header} -Enables or disables checking the column order when inserting data. +را قادر می سازد و یا غیر فعال چک کردن سفارش ستون در هنگام قرار دادن داده ها. -To improve insert performance, we recommend disabling this check if you are sure that the column order of the input data is the same as in the target table. +برای بهبود عملکرد درج, توصیه می کنیم غیر فعال کردن این چک اگر شما اطمینان حاصل کنید که سفارش ستون از داده های ورودی همان است که در جدول هدف است. -Supported formats: +فرمت های پشتیبانی شده: -- [CSVWithNames](../../interfaces/formats.md#csvwithnames) -- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) +- [اطلاعات دقیق](../../interfaces/formats.md#csvwithnames) +- [اطلاعات دقیق](../../interfaces/formats.md#tabseparatedwithnames) -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +مقدار پیش فرض: 1. -## date\_time\_input\_format {#settings-date_time_input_format} +## تغییر \_شماره {#settings-date_time_input_format} -Allows choosing a parser of the text representation of date and time. +اجازه می دهد تا انتخاب تجزیه کننده از نمایش متن از تاریخ و زمان. -The setting doesn’t apply to [date and time functions](../../query_language/functions/date_time_functions.md). +تنظیمات برای اعمال نمی شود [توابع تاریخ و زمان](../../sql_reference/functions/date_time_functions.md). -Possible values: +مقادیر ممکن: - `'best_effort'` — Enables extended parsing. - ClickHouse can parse the basic `YYYY-MM-DD HH:MM:SS` format and all [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) date and time formats. For example, `'2018-06-08T01:02:03.000Z'`. + تاتر می توانید پایه تجزیه `YYYY-MM-DD HH:MM:SS` فرمت و همه [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) فرمت های تاریخ و زمان. به عنوان مثال, `'2018-06-08T01:02:03.000Z'`. - `'basic'` — Use basic parser. - ClickHouse can parse only the basic `YYYY-MM-DD HH:MM:SS` format. For example, `'2019-08-20 10:18:56'`. + تاتر می توانید تنها پایه تجزیه `YYYY-MM-DD HH:MM:SS` قالب. به عنوان مثال, `'2019-08-20 10:18:56'`. -Default value: `'basic'`. +مقدار پیشفرض: `'basic'`. -See also: +همچنین نگاه کنید به: -- [DateTime data type.](../../data_types/datetime.md) -- [Functions for working with dates and times.](../../query_language/functions/date_time_functions.md) +- [نوع داده حسگر ناحیه رنگی.](../../sql_reference/data_types/datetime.md) +- [توابع برای کار با تاریخ و زمان.](../../sql_reference/functions/date_time_functions.md) -## join\_default\_strictness {#settings-join_default_strictness} +## بررسی اجمالی {#settings-join_default_strictness} -Sets default strictness for [JOIN clauses](../../query_language/select.md#select-join). +مجموعه سختی پیش فرض برای [تاریخ بند](../../sql_reference/statements/select.md#select-join). -Possible values: +مقادیر ممکن: -- `ALL` — If the right table has several matching rows, ClickHouse creates a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from matching rows. This is the normal `JOIN` behaviour from standard SQL. -- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` and `ALL` are the same. +- `ALL` — If the right table has several matching rows, ClickHouse creates a [محصول دکارتی](https://en.wikipedia.org/wiki/Cartesian_product) از تطبیق ردیف. این طبیعی است `JOIN` رفتار از استاندارد گذاشتن. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` و `ALL` یکسان هستند. - `ASOF` — For joining sequences with an uncertain match. -- `Empty string` — If `ALL` or `ANY` is not specified in the query, ClickHouse throws an exception. +- `Empty string` — If `ALL` یا `ANY` در پرس و جو مشخص نشده است, خانه عروسکی می اندازد یک استثنا. -Default value: `ALL`. +مقدار پیشفرض: `ALL`. -## join\_any\_take\_last\_row {#settings-join_any_take_last_row} +## نمایش سایت {#settings-join_any_take_last_row} -Changes behaviour of join operations with `ANY` strictness. +تغییرات رفتار پیوستن به عملیات با `ANY` سخت بودن. -!!! warning "Attention" - This setting applies only for `JOIN` operations with [Join](../table_engines/join.md) engine tables. +!!! warning "توجه" + این تنظیم فقط برای `JOIN` عملیات با [پیوستن](../../engines/table_engines/special/join.md) جداول موتور. -Possible values: +مقادیر ممکن: - 0 — If the right table has more than one matching row, only the first one found is joined. - 1 — If the right table has more than one matching row, only the last one found is joined. -Default value: 0. +مقدار پیش فرض: 0. -See also: +همچنین نگاه کنید به: -- [JOIN clause](../../query_language/select.md#select-join) -- [Join table engine](../table_engines/join.md) -- [join\_default\_strictness](#settings-join_default_strictness) +- [پیوستن بند](../../sql_reference/statements/select.md#select-join) +- [پیوستن به موتور جدول](../../engines/table_engines/special/join.md) +- [بررسی اجمالی](#settings-join_default_strictness) -## join\_use\_nulls {#join_use_nulls} +## ارزشهای خبری عبارتند از: {#join_use_nulls} -Sets the type of [JOIN](../../query_language/select.md) behavior. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. +نوع را تنظیم می کند [JOIN](../../sql_reference/statements/select.md) رفتار هنگامی که ادغام جداول سلول های خالی ممکن است ظاهر شود. کلیک هاوس بر اساس این تنظیم متفاوت است. -Possible values: +مقادیر ممکن: - 0 — The empty cells are filled with the default value of the corresponding field type. -- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../data_types/nullable.md#data_type-nullable), and empty cells are filled with [NULL](../../query_language/syntax.md). +- 1 — `JOIN` رفتار به همان شیوه به عنوان در گذاشتن استاندارد. نوع زمینه مربوطه به تبدیل [Nullable](../../sql_reference/data_types/nullable.md#data_type-nullable) سلول های خالی پر شده اند [NULL](../../sql_reference/syntax.md). -Default value: 0. +مقدار پیش فرض: 0. -## max\_block\_size {#setting-max_block_size} +## ت\_مایش بیشینه {#setting-max_block_size} -In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. +در خانه, داده ها توسط بلوک های پردازش (مجموعه ای از قطعات ستون). چرخه پردازش داخلی برای یک بلوک به اندازه کافی موثر هستند, اما هزینه های قابل توجه در هر بلوک وجود دارد. این `max_block_size` تنظیم یک توصیه برای چه اندازه بلوک (در تعداد ردیف) برای بارگذاری از جداول است. اندازه بلوک نباید بیش از حد کوچک, به طوری که هزینه در هر بلوک هنوز هم قابل توجه است, اما نه بیش از حد بزرگ به طوری که پرس و جو با محدودیت است که پس از اولین بلوک به سرعت پردازش تکمیل. هدف این است که برای جلوگیری از مصرف حافظه بیش از حد در هنگام استخراج تعداد زیادی از ستون ها در موضوعات مختلف و برای حفظ حداقل برخی از محل کش. -Default value: 65,536. +مقدار پیش فرض: 65,536. -Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed. +بلوک اندازه `max_block_size` همیشه از جدول لود نمی. اگر واضح است که داده های کمتر نیاز به بازیابی یک بلوک کوچکتر پردازش می شود. -## preferred\_block\_size\_bytes {#preferred-block-size-bytes} +## ترجی\_حات {#preferred-block-size-bytes} -Used for the same purpose as `max_block_size`, but it sets the recommended block size in bytes by adapting it to the number of rows in the block. -However, the block size cannot be more than `max_block_size` rows. -By default: 1,000,000. It only works when reading from MergeTree engines. +مورد استفاده برای همان هدف به عنوان `max_block_size` اما با تطبیق تعداد سطرها در بلوک اندازه بلوک توصیه شده را در بایت تنظیم می کند. +با این حال, اندازه بلوک نمی تواند بیش از `max_block_size` ردیف +به طور پیش فرض: 1,000,000. تنها در هنگام خواندن از موتورهای ادغام کار می کند. -## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} +## ادغام \_تر\_م\_را\_م\_مایش مجدد {#setting-merge-tree-min-rows-for-concurrent-read} -If the number of rows to be read from a file of a [MergeTree](../table_engines/mergetree.md) table exceeds `merge_tree_min_rows_for_concurrent_read` then ClickHouse tries to perform a concurrent reading from this file on several threads. +اگر تعداد ردیف از یک فایل از یک خوانده شود [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) جدول بیش از `merge_tree_min_rows_for_concurrent_read` سپس کلیک کنیدهاوس تلاش می کند برای انجام خواندن همزمان از این فایل در موضوعات مختلف. -Possible values: +مقادیر ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. -Default value: 163840. +مقدار پیش فرض: 163840. -## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} +## \_انتقال به \_انتقال به \_شخصی {#setting-merge-tree-min-bytes-for-concurrent-read} -If the number of bytes to read from one file of a [MergeTree](../table_engines/mergetree.md)-engine table exceeds `merge_tree_min_bytes_for_concurrent_read`, then ClickHouse tries to concurrently read from this file in several threads. +اگر تعداد بایت برای خواندن از یک فایل از یک [ادغام](../../engines/table_engines/mergetree_family/mergetree.md)- جدول موتور بیش از `merge_tree_min_bytes_for_concurrent_read` سپس کلیک کنیدهاوس تلاش می کند به صورت همزمان از این فایل در موضوعات مختلف به عنوان خوانده شده. -Possible value: +مقدار ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. -Default value: 251658240. +مقدار پیش فرض: 251658240. -## merge\_tree\_min\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} +## ادغام \_تر\_م\_را\_م\_را\_مایش مجدد {#setting-merge-tree-min-rows-for-seek} -If the distance between two data blocks to be read in one file is less than `merge_tree_min_rows_for_seek` rows, then ClickHouse does not seek through the file but reads the data sequentially. +اگر فاصله بین دو بلوک داده در یک فایل خوانده شود کمتر از `merge_tree_min_rows_for_seek` ردیف, سپس کلیک می کند از طریق فایل به دنبال ندارد اما می خواند پی در پی داده ها. -Possible values: +مقادیر ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. -Default value: 0. +مقدار پیش فرض: 0. -## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} +## ادغام \_تر\_حضربه \_ترکمال {#setting-merge-tree-min-bytes-for-seek} -If the distance between two data blocks to be read in one file is less than `merge_tree_min_bytes_for_seek` bytes, then ClickHouse sequentially reads a range of file that contains both blocks, thus avoiding extra seek. +اگر فاصله بین دو بلوک داده در یک فایل خوانده شود کمتر از `merge_tree_min_bytes_for_seek` بایت, سپس پی در پی تاتر می خواند طیف وسیعی از فایل است که شامل هر دو بلوک, در نتیجه اجتناب اضافی به دنبال. -Possible values: +مقادیر ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. -Default value: 0. +مقدار پیش فرض: 0. -## merge\_tree\_coarse\_index\_granularity {#setting-merge-tree-coarse-index-granularity} +## ادغام \_تر\_کوارسی\_یندگرمانی {#setting-merge-tree-coarse-index-granularity} -When searching for data, ClickHouse checks the data marks in the index file. If ClickHouse finds that required keys are in some range, it divides this range into `merge_tree_coarse_index_granularity` subranges and searches the required keys there recursively. +هنگامی که جستجو برای داده ها, تاتر چک علامت داده ها در فایل شاخص. اگر فاحشه خانه می یابد که کلید های مورد نیاز در برخی از محدوده هستند, این تقسیم این محدوده به `merge_tree_coarse_index_granularity` موشک و جستجو کلید های مورد نیاز وجود دارد به صورت بازگشتی. -Possible values: +مقادیر ممکن: -- Any positive even integer. +- هر عدد صحیح حتی مثبت. -Default value: 8. +مقدار پیش فرض: 8. -## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} +## \_انتقال به \_انتقال {#setting-merge-tree-max-rows-to-use-cache} -If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it doesn’t use the cache of uncompressed blocks. +اگر کلیک خانه باید بیش از خواندن `merge_tree_max_rows_to_use_cache` ردیف ها در یک پرس و جو از کش بلوک های غیر فشرده استفاده نمی کنند. -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +ذخیره سازی داده های ذخیره شده بلوک های غیر فشرده برای نمایش داده شد. تاتر با استفاده از این کش برای سرعت بخشیدن به پاسخ به نمایش داده شد کوچک تکرار شده است. این تنظیم محافظت از کش از سطل زباله توسط نمایش داده شد که مقدار زیادی از داده ها به عنوان خوانده شده. این [\_بالا](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) تنظیم سرور اندازه کش از بلوک های غیر فشرده را تعریف می کند. -Possible values: +مقادیر ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. Default value: 128 ✕ 8192. -## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} +## \_انتقال به \_انتقال {#setting-merge-tree-max-bytes-to-use-cache} -If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it doesn’t use the cache of uncompressed blocks. +اگر کلیک خانه باید بیش از خواندن `merge_tree_max_bytes_to_use_cache` بایت در یک پرس و جو, این کش از بلوک های غیر فشرده استفاده نمی. -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +ذخیره سازی داده های ذخیره شده بلوک های غیر فشرده برای نمایش داده شد. تاتر با استفاده از این کش برای سرعت بخشیدن به پاسخ به نمایش داده شد کوچک تکرار شده است. این تنظیم محافظت از کش از سطل زباله توسط نمایش داده شد که مقدار زیادی از داده ها به عنوان خوانده شده. این [\_بالا](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) تنظیم سرور اندازه کش از بلوک های غیر فشرده را تعریف می کند. -Possible value: +مقدار ممکن: -- Any positive integer. +- هر عدد صحیح مثبت. -Default value: 2013265920. +مقدار پیش فرض: 2013265920. -## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} +## \_عنوان \_تو\_میشه {#settings-min-bytes-to-use-direct-io} -The minimum data volume required for using direct I/O access to the storage disk. +حداقل حجم داده های مورد نیاز برای استفاده از دسترسی مستقیم به دیسک ذخیره سازی. -ClickHouse uses this setting when reading data from tables. If the total storage volume of all the data to be read exceeds `min_bytes_to_use_direct_io` bytes, then ClickHouse reads the data from the storage disk with the `O_DIRECT` option. +تاتر با استفاده از این تنظیم در هنگام خواندن داده ها از جداول. اگر حجم کل ذخیره سازی از تمام داده ها به عنوان خوانده شده بیش از `min_bytes_to_use_direct_io` بایت, سپس کلیک هاوس می خواند داده ها از دیسک ذخیره سازی با `O_DIRECT` انتخاب -Possible values: +مقادیر ممکن: - 0 — Direct I/O is disabled. -- Positive integer. +- عدد صحیح مثبت. -Default value: 0. +مقدار پیش فرض: 0. -## log\_queries {#settings-log-queries} +## \_خروج {#settings-log-queries} -Setting up query logging. +راه اندازی ورود به سیستم پرس و جو. -Queries sent to ClickHouse with this setup are logged according to the rules in the [query\_log](../server_settings/settings.md#server_settings-query-log) server configuration parameter. +نمایش داده شد با توجه به قوانین در به کلیک خانه فرستاده می شود [\_خروج](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) پارامتر پیکربندی سرور. -Example: +مثال: ``` text log_queries=1 ``` -## log\_query\_threads {#settings-log-query-threads} +## باز کردن {#settings-log-query-threads} -Setting up query threads logging. +راه اندازی موضوعات پرس و جو ورود به سیستم. -Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server_settings/settings.md#server_settings-query-thread-log) server configuration parameter. +نمایش داده شد' موضوعات runned توسط clickhouse با راه اندازی این سیستم هستند با توجه به قوانین در [\_ر\_خروج](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) پارامتر پیکربندی سرور. -Example: +مثال: ``` text log_query_threads=1 ``` -## max\_insert\_block\_size {#settings-max_insert_block_size} +## ا\_فزونهها {#settings-max_insert_block_size} -The size of blocks to form for insertion into a table. -This setting only applies in cases when the server forms the blocks. -For example, for an INSERT via the HTTP interface, the server parses the data format and forms blocks of the specified size. -But when using clickhouse-client, the client parses the data itself, and the ‘max\_insert\_block\_size’ setting on the server doesn’t affect the size of the inserted blocks. -The setting also doesn’t have a purpose when using INSERT SELECT, since data is inserted using the same blocks that are formed after SELECT. +اندازه بلوک به شکل برای درج به یک جدول. +این تنظیم فقط در مواردی که سرور بلوک را تشکیل می دهد اعمال می شود. +برای مثال برای درج از طریق رابط اچ.تی. تی. پی سرور تجزیه فرمت داده ها و اشکال بلوک از اندازه مشخص شده است. +اما هنگامی که با استفاده از کلیک-مشتری تجزیه داده های خود را و ‘max\_insert\_block\_size’ تنظیم بر روی سرور به اندازه بلوک قرار داده تاثیر نمی گذارد. +تنظیمات نیز یک هدف در هنگام استفاده از درج را انتخاب کنید ندارد, از داده ها با استفاده از بلوک های مشابه که پس از انتخاب تشکیل قرار داده. -Default value: 1,048,576. +مقدار پیش فرض: 1,048,576. -The default is slightly more than `max_block_size`. The reason for this is because certain table engines (`*MergeTree`) form a data part on the disk for each inserted block, which is a fairly large entity. Similarly, `*MergeTree` tables sort data during insertion and a large enough block size allow sorting more data in RAM. +به طور پیش فرض کمی بیش از `max_block_size`. دلیل این کار این است زیرا موتورهای جدول خاص (`*MergeTree`) بخش داده ها بر روی دیسک برای هر بلوک قرار داده شده است که یک نهاد نسبتا بزرگ را تشکیل می دهند. به طور مشابه, `*MergeTree` جداول مرتب سازی بر داده ها در هنگام درج و اندازه بلوک به اندازه کافی بزرگ اجازه می دهد مرتب سازی داده های بیشتر در رم. -## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} +## \_شروع مجدد \_شروع مجدد \_شروع مجدد \_کاربری {#settings-max_replica_delay_for_distributed_queries} -Disables lagging replicas for distributed queries. See [Replication](../../operations/table_engines/replication.md). +غیرفعال تاخیر کپی برای نمایش داده شد توزیع شده است. ببینید [تکرار](../../engines/table_engines/mergetree_family/replication.md). -Sets the time in seconds. If a replica lags more than the set value, this replica is not used. +زمان را در عرض چند ثانیه تنظیم می کند. اگر یک ماکت نشدم بیش از ارزش مجموعه, این ماکت استفاده نمی شود. -Default value: 300. +مقدار پیش فرض: 300. -Used when performing `SELECT` from a distributed table that points to replicated tables. +مورد استفاده در هنگام انجام `SELECT` از یک جدول توزیع شده است که اشاره به جداول تکرار. -## max\_threads {#settings-max_threads} +## \_مخفی کردن {#settings-max_threads} -The maximum number of query processing threads, excluding threads for retrieving data from remote servers (see the ‘max\_distributed\_connections’ parameter). +حداکثر تعداد موضوعات پردازش پرس و جو, به جز موضوعات برای بازیابی داده ها از سرور از راه دور (دیدن ‘max\_distributed\_connections’ پارامتر). -This parameter applies to threads that perform the same stages of the query processing pipeline in parallel. -For example, when reading from a table, if it is possible to evaluate expressions with functions, filter with WHERE and pre-aggregate for GROUP BY in parallel using at least ‘max\_threads’ number of threads, then ‘max\_threads’ are used. +این پارامتر شامل موضوعات است که انجام همان مراحل از خط لوله پردازش پرس و جو به صورت موازی. +مثلا, در هنگام خواندن از یک جدول, اگر ممکن است به ارزیابی عبارات با توابع, فیلتر با کجا و از پیش جمع شده برای گروه به صورت موازی با استفاده از حداقل ‘max\_threads’ تعداد موضوعات, سپس ‘max\_threads’ استفاده می شود. -Default value: the number of physical CPU cores. +مقدار پیش فرض: تعداد هسته های پردازنده فیزیکی. -If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores. +اگر کمتر از یک پرس و جو را انتخاب کنید به طور معمول بر روی یک سرور در یک زمان اجرا, تنظیم این پارامتر به یک مقدار کمی کمتر از تعداد واقعی هسته پردازنده. -For queries that are completed quickly because of a LIMIT, you can set a lower ‘max\_threads’. For example, if the necessary number of entries are located in every block and max\_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one. +برای نمایش داده شد که به سرعت به دلیل محدودیت تکمیل, شما می توانید یک مجموعه پایین تر ‘max\_threads’. مثلا , اگر تعداد لازم از نوشته در هر بلوک و حداکثر \_سرخ واقع = 8, سپس 8 بلوک بازیابی می شوند, اگر چه این امر می توانست به اندازه کافی برای خواندن فقط یک بوده است. -The smaller the `max_threads` value, the less memory is consumed. +کوچکتر `max_threads` ارزش حافظه کمتر مصرف می شود. -## max\_insert\_threads {#settings-max-insert-threads} +## ا\_فزونهها {#settings-max-insert-threads} -The maximum number of threads to execute the `INSERT SELECT` query. +حداکثر تعداد موضوعات برای اجرای `INSERT SELECT` پرس و جو. -Possible values: +مقادیر ممکن: -- 0 (or 1) — `INSERT SELECT` no parallel execution. -- Positive integer. Bigger than 1. +- 0 (or 1) — `INSERT SELECT` بدون اجرای موازی. +- عدد صحیح مثبت. بزرگتر از 1. -Default value: 0. +مقدار پیش فرض: 0. -Parallel `INSERT SELECT` has effect only if the `SELECT` part is executed in parallel, see [max\_threads](#settings-max_threads) setting. -Higher values will lead to higher memory usage. +موازی `INSERT SELECT` اثر تنها در صورتی که `SELECT` بخش به صورت موازی اجرا, دیدن [\_مخفی کردن](#settings-max_threads) تنظیمات. +مقادیر بالاتر به استفاده از حافظه بالاتر منجر شود. -## max\_compress\_block\_size {#max-compress-block-size} +## \_بزرگنمایی {#max-compress-block-size} -The maximum size of blocks of uncompressed data before compressing for writing to a table. By default, 1,048,576 (1 MiB). If the size is reduced, the compression rate is significantly reduced, the compression and decompression speed increases slightly due to cache locality, and memory consumption is reduced. There usually isn’t any reason to change this setting. +حداکثر اندازه بلوک از داده های غیر فشرده قبل از فشرده سازی برای نوشتن به یک جدول. به طور پیش فرض, 1,048,576 (1 مگابایت). اگر اندازه کاهش می یابد, میزان فشرده سازی به طور قابل توجهی کاهش می یابد, سرعت فشرده سازی و رفع فشار کمی با توجه به محل کش را افزایش می دهد, و مصرف حافظه کاهش می یابد. معمولا وجود دارد هر دلیلی برای تغییر این تنظیم نیست. -Don’t confuse blocks for compression (a chunk of memory consisting of bytes) with blocks for query processing (a set of rows from a table). +هنوز بلوک برای فشرده سازی اشتباه نیست (یک تکه از حافظه متشکل از بایت) با بلوک برای پردازش پرس و جو (مجموعه ای از ردیف از یک جدول). -## min\_compress\_block\_size {#min-compress-block-size} +## \_بزرگنمایی {#min-compress-block-size} -For [MergeTree](../table_engines/mergetree.md)" tables. In order to reduce latency when processing queries, a block is compressed when writing the next mark if its size is at least ‘min\_compress\_block\_size’. By default, 65,536. +برای [ادغام](../../engines/table_engines/mergetree_family/mergetree.md)"جداول . به منظور کاهش زمان تاخیر در هنگام پردازش نمایش داده شد, یک بلوک فشرده شده است در هنگام نوشتن علامت بعدی اگر اندازه خود را حداقل ‘min\_compress\_block\_size’. به طور پیش فرض 65,536. -The actual size of the block, if the uncompressed data is less than ‘max\_compress\_block\_size’, is no less than this value and no less than the volume of data for one mark. +اندازه واقعی بلوک, اگر داده غیر فشرده کمتر از است ‘max\_compress\_block\_size’, کمتر از این مقدار و کمتر از حجم داده ها برای یک علامت. -Let’s look at an example. Assume that ‘index\_granularity’ was set to 8192 during table creation. +بیایید نگاهی به عنوان مثال. فرض کنیم که ‘index\_granularity’ در طول ایجاد جدول به 8192 تنظیم شد. -We are writing a UInt32-type column (4 bytes per value). When writing 8192 rows, the total will be 32 KB of data. Since min\_compress\_block\_size = 65,536, a compressed block will be formed for every two marks. +ما در حال نوشتن یک ستون نوع 32 (4 بایت در هر مقدار). هنگام نوشتن 8192 ردیف, کل خواهد بود 32 کیلوبایت داده. پس min\_compress\_block\_size = 65,536 یک فشرده بلوک تشکیل خواهد شد برای هر دو نشانه است. -We are writing a URL column with the String type (average size of 60 bytes per value). When writing 8192 rows, the average will be slightly less than 500 KB of data. Since this is more than 65,536, a compressed block will be formed for each mark. In this case, when reading data from the disk in the range of a single mark, extra data won’t be decompressed. +ما در حال نوشتن یک ستون نشانی اینترنتی با نوع رشته (اندازه متوسط 60 بایت در هر مقدار). هنگام نوشتن 8192 ردیف, متوسط خواهد بود کمی کمتر از 500 کیلوبایت داده. پس از این بیش از است 65,536, یک بلوک فشرده خواهد شد برای هر علامت تشکیل. در این مورد, در هنگام خواندن داده ها از دیسک در طیف وسیعی از یک علامت, اطلاعات اضافی نمی خواهد از حالت فشرده خارج شود. -There usually isn’t any reason to change this setting. +معمولا وجود دارد هر دلیلی برای تغییر این تنظیم نیست. -## max\_query\_size {#settings-max_query_size} +## بیشینه\_کرکی\_سیز {#settings-max_query_size} -The maximum part of a query that can be taken to RAM for parsing with the SQL parser. -The INSERT query also contains data for INSERT that is processed by a separate stream parser (that consumes O(1) RAM), which is not included in this restriction. +حداکثر بخشی از پرس و جو است که می تواند به رم برای تجزیه با تجزیه کننده گذاشتن گرفته شده است. +پرس و جو درج همچنین شامل داده ها برای درج است که توسط تجزیه کننده جریان جداگانه پردازش (که مصرف درجه(1) رم), است که در این محدودیت شامل نمی شود. -Default value: 256 KiB. +مقدار پیش فرض: 256 کیلوبایت. -## interactive\_delay {#interactive-delay} +## فعالسازی \_دلای {#interactive-delay} -The interval in microseconds for checking whether request execution has been cancelled and sending the progress. +فاصله در میکروثانیه برای بررسی اینکه اجرای درخواست لغو شده است و ارسال پیشرفت. -Default value: 100,000 (checks for cancelling and sends the progress ten times per second). +مقدار پیش فرض: 100,000 (چک برای لغو و پیشرفت می فرستد ده بار در ثانیه). ## connect\_timeout, receive\_timeout, send\_timeout {#connect-timeout-receive-timeout-send-timeout} -Timeouts in seconds on the socket used for communicating with the client. +وقفه در ثانیه بر روی سوکت مورد استفاده برای برقراری ارتباط با مشتری. -Default value: 10, 300, 300. +مقدار پیش فرض: 10, 300, 300. -## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} +## \_انتقال به \_ار\_خروج {#cancel-http-readonly-queries-on-client-close} Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. -Default value: 0 +مقدار پیشفرض: 0 -## poll\_interval {#poll-interval} +## پول\_نتروال {#poll-interval} -Lock in a wait loop for the specified number of seconds. +قفل در یک حلقه انتظار برای تعداد مشخصی از ثانیه. -Default value: 10. +مقدار پیش فرض: 10. -## max\_distributed\_connections {#max-distributed-connections} +## \_ادغام گیر {#max-distributed-connections} -The maximum number of simultaneous connections with remote servers for distributed processing of a single query to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. +حداکثر تعداد اتصالات همزمان با سرور از راه دور برای پردازش توزیع از یک پرس و جو تنها به یک جدول توزیع. ما توصیه می کنیم تنظیم یک مقدار کمتر از تعداد سرور در خوشه. -Default value: 1024. +مقدار پیش فرض: 1024. -The following parameters are only used when creating Distributed tables (and when launching a server), so there is no reason to change them at runtime. +پارامترهای زیر فقط هنگام ایجاد جداول توزیع شده (و هنگام راه اندازی یک سرور) استفاده می شود بنابراین هیچ دلیلی برای تغییر در زمان اجرا وجود ندارد. -## distributed\_connections\_pool\_size {#distributed-connections-pool-size} +## نمایش سایت {#distributed-connections-pool-size} -The maximum number of simultaneous connections with remote servers for distributed processing of all queries to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. +حداکثر تعداد اتصالات همزمان با سرور از راه دور برای پردازش توزیع از همه نمایش داده شد به یک جدول توزیع شده است. ما توصیه می کنیم تنظیم یک مقدار کمتر از تعداد سرور در خوشه. -Default value: 1024. +مقدار پیش فرض: 1024. -## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} +## \_انتقال به \_مزاح\_اف\_کننده {#connect-timeout-with-failover-ms} -The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. -If unsuccessful, several attempts are made to connect to various replicas. +فاصله در میلی ثانیه برای اتصال به یک سرور از راه دور برای یک موتور جدول توزیع, اگر ‘shard’ و ‘replica’ بخش ها در تعریف خوشه استفاده می شود. +اگر ناموفق, چندین تلاش برای اتصال به کپی های مختلف ساخته شده. -Default value: 50. +مقدار پیش فرض: 50. -## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} +## قابلیت اتصال به شبکه {#connections-with-failover-max-tries} -The maximum number of connection attempts with each replica for the Distributed table engine. +حداکثر تعداد تلاش اتصال با هر ماکت برای موتور جدول توزیع. -Default value: 3. +مقدار پیش فرض: 3. -## extremes {#extremes} +## افراط {#extremes} -Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled). -For more information, see the section “Extreme values”. +اینکه مقادیر شدید (حداقل و حداکثر در ستون یک نتیجه پرس و جو) شمارش شود. می پذیرد 0 یا 1. به طور پیش فرض, 0 (غیر فعال). +برای کسب اطلاعات بیشتر به بخش مراجعه کنید “Extreme values”. -## use\_uncompressed\_cache {#setting-use_uncompressed_cache} +## همترازی پایین {#setting-use_uncompressed_cache} -Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled). -Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. +اینکه از یک کش از بلوکهای غیر فشرده استفاده شود یا خیر. می پذیرد 0 یا 1. به طور پیش فرض, 0 (غیر فعال). +با استفاده از کش غیر فشرده (فقط برای جداول در خانواده ادغام) می تواند به طور قابل توجهی کاهش زمان تاخیر و افزایش توان در هنگام کار با تعداد زیادی از نمایش داده شد کوتاه است. فعال کردن این تنظیم برای کاربرانی که ارسال درخواست کوتاه مکرر. همچنین با توجه به پرداخت [\_بالا](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. -For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use\_uncompressed\_cache’ setting always set to 1. +برای نمایش داده شد که خواندن حداقل حجم تا حدودی زیادی از داده ها (یک میلیون ردیف یا بیشتر) غیر فشرده کش غیر فعال است به طور خودکار به صرفه جویی در فضا برای واقعا کوچک نمایش داده شد. این به این معنی است که شما می توانید نگه دارید ‘use\_uncompressed\_cache’ تنظیم همیشه به مجموعه 1. -## replace\_running\_query {#replace-running-query} +## جایگزینی \_خروج {#replace-running-query} -When using the HTTP interface, the ‘query\_id’ parameter can be passed. This is any string that serves as the query identifier. -If a query from the same user with the same ‘query\_id’ already exists at this time, the behaviour depends on the ‘replace\_running\_query’ parameter. +هنگام استفاده از رابط قام ‘query\_id’ پارامتر را می توان گذشت. این هر رشته که به عنوان شناسه پرس و جو در خدمت است. +اگر پرس و جو از همان کاربر با همان ‘query\_id’ در حال حاضر در این زمان وجود دارد, رفتار بستگی به ‘replace\_running\_query’ پارامتر. -`0` (default) – Throw an exception (don’t allow the query to run if a query with the same ‘query\_id’ is already running). +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ در حال حاضر در حال اجرا). `1` – Cancel the old query and start running the new one. -Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled. +یاندکسمتریکا با استفاده از این پارامتر را به 1 برای اجرای پیشنهادات خود را برای شرایط تقسیم بندی. پس از ورود به شخصیت بعدی, اگر پرس و جو قدیمی هنوز تمام نشده است, باید لغو شود. -## stream\_flush\_interval\_ms {#stream-flush-interval-ms} +## \_خاله جریان {#stream-flush-interval-ms} -Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows. +این نسخهها کار میکند برای جداول با جریان در مورد یک ایست, و یا زمانی که یک موضوع تولید [ا\_فزونهها](#settings-max_insert_block_size) ردیف -The default value is 7500. +مقدار پیش فرض 7500 است. -The smaller the value, the more often data is flushed into the table. Setting the value too low leads to poor performance. +کوچکتر ارزش, اطلاعات بیشتر به جدول سرخ. تنظیم مقدار خیلی کم منجر به عملکرد ضعیف می شود. -## load\_balancing {#settings-load_balancing} +## \_تبالسازی {#settings-load_balancing} -Specifies the algorithm of replicas selection that is used for distributed query processing. +تعیین الگوریتم انتخاب کپی است که برای پردازش پرس و جو توزیع استفاده. -ClickHouse supports the following algorithms of choosing replicas: +کلیک هاوس از الگوریتم های زیر برای انتخاب کپی ها پشتیبانی می کند: -- [Random](#load_balancing-random) (by default) -- [Nearest hostname](#load_balancing-nearest_hostname) -- [In order](#load_balancing-in_order) -- [First or random](#load_balancing-first_or_random) +- [تصادفی](#load_balancing-random) (به طور پیش فرض) +- [نزدیکترین نام میزبان](#load_balancing-nearest_hostname) +- [به ترتیب](#load_balancing-in_order) +- [اول یا تصادفی](#load_balancing-first_or_random) -### Random (by default) {#load_balancing-random} +### تصادفی (به طور پیش فرض) {#load_balancing-random} ``` sql load_balancing = random ``` -The number of errors is counted for each replica. The query is sent to the replica with the fewest errors, and if there are several of these, to anyone of them. -Disadvantages: Server proximity is not accounted for; if the replicas have different data, you will also get different data. +تعداد خطاها برای هر ماکت شمارش. پرس و جو با کمترین خطاها به ماکت ارسال می شود و اگر چندین مورد از این موارد وجود داشته باشد به هر کسی. +معایب: نزدیکی سرور برای خود اختصاص نمی; اگر کپی داده های مختلف, شما همچنین می خواهد داده های مختلف. -### Nearest Hostname {#load_balancing-nearest_hostname} +### نزدیکترین نام میزبان {#load_balancing-nearest_hostname} ``` sql load_balancing = nearest_hostname ``` -The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server’s hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). -For instance, example01-01-1 and example01-01-2.yandex.ru are different in one position, while example01-01-1 and example01-02-2 differ in two places. -This method might seem primitive, but it doesn’t require external data about network topology, and it doesn’t compare IP addresses, which would be complicated for our IPv6 addresses. +مثلا example01-01-1 و example01-01-2.yandex.ru متفاوت هستند در یک موقعیت در حالی که example01-01-1 و example01-02-2 متفاوت در دو مکان است. +این روش ممکن است ابتدایی به نظر برسد اما اطلاعات خارجی در مورد توپولوژی شبکه نیاز ندارد و نشانی های اینترنتی را مقایسه نمی کند که برای نشانیهای اینترنتی6 پیچیده خواهد بود. -Thus, if there are equivalent replicas, the closest one by name is preferred. -We can also assume that when sending a query to the same server, in the absence of failures, a distributed query will also go to the same servers. So even if different data is placed on the replicas, the query will return mostly the same results. +بدین ترتیب, اگر کپی معادل وجود دارد, نزدیک ترین یک به نام ترجیح داده می شود. +ما همچنین می توانیم فرض کنیم که در هنگام ارسال یک پرس و جو به همان سرور در صورت عدم وجود شکست توزیع پرس و جو نیز به همان سرور. بنابراین حتی اگر داده های مختلف بر روی کپی قرار داده شده, پرس و جو بیشتر همان نتایج بازگشت. -### In Order {#load_balancing-in_order} +### به ترتیب {#load_balancing-in_order} ``` sql load_balancing = in_order ``` -Replicas with the same number of errors are accessed in the same order as they are specified in the configuration. -This method is appropriate when you know exactly which replica is preferable. +کپی با همان تعداد از اشتباهات در همان جهت قابل دسترسی هستند که در پیکربندی مشخص شده است. +این روش مناسب است که شما می دانید دقیقا همان است که ماکت ترجیح داده شده است. -### First or Random {#load_balancing-first_or_random} +### اول یا تصادفی {#load_balancing-first_or_random} ``` sql load_balancing = first_or_random ``` -This algorithm chooses the first replica in the set or a random replica if the first is unavailable. It’s effective in cross-replication topology setups, but useless in other configurations. +این الگوریتم را انتخاب اولین ماکت در مجموعه و یا یک ماکت تصادفی اگر برای اولین بار در دسترس نیست. این در تنظیم توپولوژی متقابل تکرار موثر, اما بی فایده در تنظیمات دیگر. -The `first_or_random` algorithm solves the problem of the `in_order` algorithm. With `in_order`, if one replica goes down, the next one gets a double load while the remaining replicas handle the usual amount of traffic. When using the `first_or_random` algorithm, the load is evenly distributed among replicas that are still available. +این `first_or_random` الگوریتم حل مشکل از `in_order` الگوریتم. با `in_order`, اگر یک ماکت پایین می رود, یک بعدی می شود یک بار دو برابر در حالی که کپی باقی مانده رسیدگی به مقدار معمول از ترافیک. هنگام استفاده از `first_or_random` الگوریتم, بار به طور مساوی در میان کپی که هنوز هم در دسترس هستند توزیع. -## prefer\_localhost\_replica {#settings-prefer-localhost-replica} +## پیشفرض {#settings-prefer-localhost-replica} -Enables/disables preferable using the localhost replica when processing distributed queries. +را قادر می سازد / غیر فعال ترجیح با استفاده از ماکت مجنون زمانی که پردازش نمایش داده شد توزیع شده است. -Possible values: +مقادیر ممکن: - 1 — ClickHouse always sends a query to the localhost replica if it exists. -- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) setting. +- 0 — ClickHouse uses the balancing strategy specified by the [\_تبالسازی](#settings-load_balancing) تنظیمات. -Default value: 1. +مقدار پیش فرض: 1. -!!! warning "Warning" - Disable this setting if you use [max\_parallel\_replicas](#settings-max_parallel_replicas). +!!! warning "اخطار" + غیر فعال کردن این تنظیم در صورت استفاده [بیشینه\_راپرال\_راپیکال](#settings-max_parallel_replicas). -## totals\_mode {#totals-mode} +## کد \_ورود {#totals-mode} -How to calculate TOTALS when HAVING is present, as well as when max\_rows\_to\_group\_by and group\_by\_overflow\_mode = ‘any’ are present. -See the section “WITH TOTALS modifier”. +چگونه برای محاسبه مجموع زمانی که نیاز است در حال حاضر به عنوان زمانی که max\_rows\_to\_group\_by و group\_by\_overflow\_mode = ‘any’ حضور دارند. +بخش را ببینید “WITH TOTALS modifier”. -## totals\_auto\_threshold {#totals-auto-threshold} +## در حال بارگذاری {#totals-auto-threshold} -The threshold for `totals_mode = 'auto'`. -See the section “WITH TOTALS modifier”. +نگهبان `totals_mode = 'auto'`. +بخش را ببینید “WITH TOTALS modifier”. -## max\_parallel\_replicas {#settings-max_parallel_replicas} +## بیشینه\_راپرال\_راپیکال {#settings-max_parallel_replicas} -The maximum number of replicas for each shard when executing a query. -For consistency (to get different parts of the same data split), this option only works when the sampling key is set. -Replica lag is not controlled. +حداکثر تعداد کپی برای هر سفال در هنگام اجرای یک پرس و جو. +برای سازگاری (برای دریافت بخش های مختلف از تقسیم داده های مشابه), این گزینه تنها کار می کند زمانی که کلید نمونه گیری قرار است. +تاخیر المثنی کنترل نمی شود. -## compile {#compile} +## کامپایل {#compile} -Enable compilation of queries. By default, 0 (disabled). +فعال کردن مجموعه ای از نمایش داده شد. به طور پیش فرض, 0 (غیر فعال). -The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). -If this portion of the pipeline was compiled, the query may run faster due to deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution. +تدوین تنها برای بخشی از خط لوله پرس و جو پردازش استفاده می شود: برای مرحله اول تجمع (گروه های). +اگر این بخش از خط لوله وارد شده بود, پرس و جو ممکن است سریع تر با توجه به استقرار چرخه های کوتاه و محدود تماس تابع جمع اجرا. حداکثر بهبود عملکرد (تا چهار برابر سریعتر در موارد نادر) برای نمایش داده شد با توابع چند دانه ساده دیده می شود. معمولا افزایش عملکرد ناچیز است. در موارد بسیار نادر, ممکن است کم کردن سرعت اجرای پرس و جو. -## min\_count\_to\_compile {#min-count-to-compile} +## \_کوچکنمایی {#min-count-to-compile} -How many times to potentially use a compiled chunk of code before running compilation. By default, 3. +چند بار به طور بالقوه استفاده از یک تکه وارد شده از کد قبل از در حال اجرا تلفیقی. به طور پیش فرض, 3. For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running. +اگر مقدار است 1 یا بیشتر, تلفیقی ناهمگام در یک موضوع جداگانه رخ می دهد. نتیجه به محض این که حاضر است از جمله نمایش داده شد که در حال حاضر در حال اجرا استفاده می شود. -Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don’t use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. +کد کامپایل شده برای هر ترکیب های مختلف از توابع کل مورد استفاده در پرس و جو و نوع کلید در گروه بند مورد نیاز است. +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. -## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} +## خروجی \_فرمان\_جسون\_کوات\_64بیت\_تنظیمی {#session_settings-output_format_json_quote_64bit_integers} -If the value is true, integers appear in quotes when using JSON\* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. +اگر مقدار درست است صحیح به نظر می رسد در نقل قول ها در هنگام استفاده از json\* int64 و uint64 فرمت (برای سازگاری بیشتر با جاوا اسکریپت پیاده سازی); در غیر این صورت اعداد صحیح هستند و خروجی بدون نقل قول. -## format\_csv\_delimiter {#settings-format_csv_delimiter} +## \_مخفی کردن \_قابلیت \_جدید {#settings-format_csv_delimiter} -The character interpreted as a delimiter in the CSV data. By default, the delimiter is `,`. +شخصیت به عنوان یک جداساز در داده های سی سی. وی تفسیر شده است. به طور پیش فرض, جداساز است `,`. -## input\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} +## \_فرستادن به \_کوچکنمایی {#settings-input_format_csv_unquoted_null_literal_as_null} -For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`). +برای فرمت ورودی سی اس وی را قادر می سازد و یا غیر فعال تجزیه بدون نقل `NULL` به عنوان تحت اللفظی (مترادف برای `\N`). -## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} +## \_انتقال به \_شروع مجدد {#settings-output-format-csv-crlf-end-of-line} -Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF). +استفاده از داس/ویندوز-سبک خط جدا کننده (crlf) در csv به جای یونیکس سبک (lf). -## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} +## \_فرستادن در\_م\_مایش از \_برخط {#settings-output-format-tsv-crlf-end-of-line} -Use DOC/Windows-style line separator (CRLF) in TSV instead of Unix style (LF). +استفاده از توضیحات / ویندوز به سبک خط جدا (سازمان تنظیم مقررات) در واحد پشتیبانی فنی فنی فنی مهندسی به جای سبک یونیکس. -## insert\_quorum {#settings-insert_quorum} +## \_معامله {#settings-insert_quorum} -Enables the quorum writes. +را قادر می سازد حد نصاب می نویسد. -- If `insert_quorum < 2`, the quorum writes are disabled. -- If `insert_quorum >= 2`, the quorum writes are enabled. +- اگر `insert_quorum < 2`, حد نصاب می نویسد غیر فعال هستند. +- اگر `insert_quorum >= 2`, حد نصاب می نویسد فعال هستند. -Default value: 0. +مقدار پیش فرض: 0. -Quorum writes +حد نصاب می نویسد -`INSERT` succeeds only when ClickHouse manages to correctly write data to the `insert_quorum` of replicas during the `insert_quorum_timeout`. If for any reason the number of replicas with successful writes does not reach the `insert_quorum`, the write is considered failed and ClickHouse will delete the inserted block from all the replicas where data has already been written. +`INSERT` موفق تنها زمانی که تاتر موفق به درستی ارسال داده ها به `insert_quorum` از کپی در طول `insert_quorum_timeout`. اگر به هر دلیلی تعدادی از کپی با موفق می نویسد می کند از دسترس نیست `insert_quorum` نوشتن در نظر گرفته شده است شکست خورده و خانه را حذف بلوک قرار داده شده از تمام کپی که داده ها در حال حاضر نوشته شده است. -All the replicas in the quorum are consistent, i.e., they contain data from all previous `INSERT` queries. The `INSERT` sequence is linearized. +همه تکرار در حد نصاب سازگار هستند, به عنوان مثال, حاوی اطلاعات از همه قبلی `INSERT` نمایش داده شد. این `INSERT` دنباله خطی است. -When reading the data written from the `insert_quorum`, you can use the [select\_sequential\_consistency](#settings-select_sequential_consistency) option. +هنگام خواندن داده های نوشته شده از `insert_quorum` شما می توانید از [مورد احترام](#settings-select_sequential_consistency) انتخاب -ClickHouse generates an exception +تاتر تولید یک استثنا -- If the number of available replicas at the time of the query is less than the `insert_quorum`. -- At an attempt to write data when the previous block has not yet been inserted in the `insert_quorum` of replicas. This situation may occur if the user tries to perform an `INSERT` before the previous one with the `insert_quorum` is completed. +- اگر تعداد کپی های موجود در زمان پرس و جو کمتر از `insert_quorum`. +- در تلاش برای نوشتن داده ها زمانی که بلوک قبلی هنوز در وارد نشده است `insert_quorum` از کپی. این وضعیت ممکن است رخ دهد اگر کاربر تلاش می کند برای انجام یک `INSERT` قبل از قبلی با `insert_quorum` کامل شده است. -See also: +همچنین نگاه کنید به: -- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) -- [select\_sequential\_consistency](#settings-select_sequential_consistency) +- [\_بههنگامسازی](#settings-insert_quorum_timeout) +- [مورد احترام](#settings-select_sequential_consistency) -## insert\_quorum\_timeout {#settings-insert_quorum-timeout} +## \_بههنگامسازی {#settings-insert_quorum-timeout} -Write to quorum timeout in seconds. If the timeout has passed and no write has taken place yet, ClickHouse will generate an exception and the client must repeat the query to write the same block to the same or any other replica. +ارسال به فاصله حد نصاب در ثانیه. اگر ایست را تصویب کرده است و بدون نوشتن صورت گرفته است و در عین حال, تاتر یک استثنا تولید و مشتری باید پرس و جو تکرار برای نوشتن همان بلوک به همان و یا هر ماکت دیگر. -Default value: 60 seconds. +مقدار پیش فرض: 60 ثانیه. -See also: +همچنین نگاه کنید به: -- [insert\_quorum](#settings-insert_quorum) -- [select\_sequential\_consistency](#settings-select_sequential_consistency) +- [\_معامله](#settings-insert_quorum) +- [مورد احترام](#settings-select_sequential_consistency) -## select\_sequential\_consistency {#settings-select_sequential_consistency} +## مورد احترام {#settings-select_sequential_consistency} -Enables or disables sequential consistency for `SELECT` queries: +قوام متوالی را فعال یا غیرفعال می کند `SELECT` نمایش داده شد: -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -Usage +استفاده -When sequential consistency is enabled, ClickHouse allows the client to execute the `SELECT` query only for those replicas that contain data from all previous `INSERT` queries executed with `insert_quorum`. If the client refers to a partial replica, ClickHouse will generate an exception. The SELECT query will not include data that has not yet been written to the quorum of replicas. +هنگامی که قوام پی در پی فعال است, تاتر اجازه می دهد تا مشتری برای اجرای `SELECT` پرس و جو فقط برای کسانی که کپی که حاوی داده ها از همه قبلی `INSERT` نمایش داده شد اجرا با `insert_quorum`. اگر مشتری اشاره به یک ماکت بخشی, تاتر یک استثنا تولید. پرس و جو را انتخاب کنید داده است که هنوز به حد نصاب کپی نوشته نشده است را شامل نمی شود. -See also: +همچنین نگاه کنید به: -- [insert\_quorum](#settings-insert_quorum) -- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) +- [\_معامله](#settings-insert_quorum) +- [\_بههنگامسازی](#settings-insert_quorum_timeout) -## insert\_deduplicate {#settings-insert-deduplicate} +## \_تنظیم مجدد به حالت اولیه {#settings-insert-deduplicate} -Enables or disables block deduplication of `INSERT` (for Replicated\* tables). +امکان حذف یا غیرفعال کردن مسدود کردن تقسیم بندی `INSERT` (برای تکرار\* جداول). -Possible values: +مقادیر ممکن: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +مقدار پیش فرض: 1. -By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see \[Data Replication\] (../ table\_engines/replication.md). +به طور پیش فرض بلوک ها به جداول تکرار شده توسط `INSERT` بیانیه deduplicated (نگاه کنید به \[Data Replication\] (../موتورهای/table\_engines/mergetree\_family/تکرار است.md). -## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} +## دریافت حسابهای کاربری دستگاه {#settings-deduplicate-blocks-in-dependent-materialized-views} -Enables or disables the deduplication check for materialized views that receive data from Replicated\* tables. +را قادر می سازد و یا غیر فعال بررسی تقسیم بندی برای نمایش محقق که دریافت داده ها از تکرار\* جداول. -Possible values: +مقادیر ممکن: 0 — Disabled. 1 — Enabled. -Default value: 0. +مقدار پیش فرض: 0. -Usage +استفاده -By default, deduplication is not performed for materialized views but is done upstream, in the source table. -If an INSERTed block is skipped due to deduplication in the source table, there will be no insertion into attached materialized views. This behaviour exists to enable insertion of highly aggregated data into materialized views, for cases where inserted blocks are the same after materialized view aggregation but derived from different INSERTs into the source table. -At the same time, this behaviour “breaks” `INSERT` idempotency. If an `INSERT` into the main table was successful and `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won’t receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` allows for changing this behaviour. On retry, a materialized view will receive the repeat insert and will perform deduplication check by itself, -ignoring check result for the source table, and will insert rows lost because of the first failure. +به طور پیش فرض, تقسیم بندی برای نمایش تحقق انجام نشده است اما بالادست انجام, در جدول منبع. +اگر یک بلوک قرار داده شده است به دلیل تقسیم بندی در جدول منبع قلم, وجود خواهد داشت بدون درج به نمایش مواد متصل. این رفتار وجود دارد برای فعال کردن درج داده ها بسیار جمع به نمایش محقق, برای مواردی که بلوک های قرار داده شده همان پس از تجمع مشاهده محقق اما مشتق شده از درج های مختلف را به جدول منبع. +همزمان, این رفتار “breaks” `INSERT` حق تقدم. اگر یک `INSERT` به جدول اصلی موفق بود و `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` اجازه می دهد تا برای تغییر این رفتار. در تلاش مجدد, یک دیدگاه محقق درج تکرار دریافت خواهد کرد و بررسی تکرار به خودی خود انجام, +نادیده گرفتن نتیجه چک برای جدول منبع, و ردیف به دلیل شکست اول از دست داده وارد. -## max\_network\_bytes {#settings-max-network-bytes} +## ویژ\_گیها {#settings-max-network-bytes} -Limits the data volume (in bytes) that is received or transmitted over the network when executing a query. This setting applies to every individual query. +محدودیت حجم داده ها (به بایت) است که دریافت و یا انتقال بر روی شبکه در هنگام اجرای یک پرس و جو. این تنظیم در مورد هر پرس و جو فردی. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Data volume control is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## max\_network\_bandwidth {#settings-max-network-bandwidth} +## \_عرض {#settings-max-network-bandwidth} -Limits the speed of the data exchange over the network in bytes per second. This setting applies to every query. +محدودیت سرعت تبادل داده ها بر روی شبکه در بایت در هر ثانیه. این تنظیم در مورد هر پرس و جو. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Bandwidth control is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} +## \_شمارهگیر بیشینه {#settings-max-network-bandwidth-for-user} -Limits the speed of the data exchange over the network in bytes per second. This setting applies to all concurrently running queries performed by a single user. +محدودیت سرعت تبادل داده ها بر روی شبکه در بایت در هر ثانیه. این تنظیم به تمام نمایش داده شد همزمان در حال اجرا انجام شده توسط یک کاربر اعمال می شود. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Control of the data speed is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} +## \_شمارهگیرها {#settings-max-network-bandwidth-for-all-users} -Limits the speed that data is exchanged at over the network in bytes per second. This setting applies to all concurrently running queries on the server. +محدودیت سرعت است که داده ها در بیش از شبکه در بایت در هر ثانیه رد و بدل. این تنظیم در مورد تمام نمایش داده شد به صورت همزمان در حال اجرا بر روی سرور. -Possible values: +مقادیر ممکن: -- Positive integer. +- عدد صحیح مثبت. - 0 — Control of the data speed is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## count\_distinct\_implementation {#settings-count_distinct_implementation} +## ا\_فزونهها {#settings-count_distinct_implementation} -Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count) construction. +مشخص می کند که کدام یک از `uniq*` توابع باید برای انجام [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) ساخت و ساز. -Possible values: +مقادیر ممکن: -- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) -- [uniqCombined](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) -- [uniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [نیم قرن 64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [یونقلل12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [قرارداد اتحادیه](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) -Default value: `uniqExact`. +مقدار پیشفرض: `uniqExact`. -## skip\_unavailable\_shards {#settings-skip_unavailable_shards} +## در حال بارگذاری {#settings-skip_unavailable_shards} -Enables or disables silently skipping of unavailable shards. +را قادر می سازد و یا غیر فعال سکوت پرش از خرده ریز در دسترس نیست. -Shard is considered unavailable if all its replicas are unavailable. A replica is unavailable in the following cases: +سفال در دسترس نیست در نظر گرفته اگر همه کپی خود را در دسترس نیست. ماکت در موارد زیر در دسترس نیست: -- ClickHouse can’t connect to replica for any reason. +- تاتر نمی تواند به هر دلیلی به ماکت متصل شود. - When connecting to a replica, ClickHouse performs several attempts. If all these attempts fail, the replica is considered unavailable. + هنگام اتصال به یک ماکت, تاتر انجام چندین تلاش. اگر تمام این تلاش شکست, ماکت در دسترس نیست در نظر گرفته شده است. -- Replica can’t be resolved through DNS. +- بدل نمی تواند از طریق دی ان اس حل شود. - If replica’s hostname can’t be resolved through DNS, it can indicate the following situations: + اگر نام میزبان ماکت را نمی توان از طریق دی ان اس حل و فصل, می تواند شرایط زیر نشان می دهد: - - Replica’s host has no DNS record. It can occur in systems with dynamic DNS, for example, [Kubernetes](https://kubernetes.io), where nodes can be unresolvable during downtime, and this is not an error. + - میزبان ماکت هیچ سابقه دی ان اس. این می تواند در سیستم های با دی ان اس پویا رخ می دهد, مثلا, [کوبرنتس](https://kubernetes.io), جایی که گره می تواند در طول خرابی قابل حل, و این یک خطا نیست. - - Configuration error. ClickHouse configuration file contains a wrong hostname. + - خطای پیکربندی. فایل پیکربندی کلیک شامل یک نام میزبان اشتباه است. -Possible values: +مقادیر ممکن: - 1 — skipping enabled. - If a shard is unavailable, ClickHouse returns a result based on partial data and doesn’t report node availability issues. + اگر یک سفال در دسترس نیست, خانه را برمی گرداند در نتیجه بر اساس داده های بخشی می کند و مشکلات در دسترس بودن گره گزارش نمی. - 0 — skipping disabled. - If a shard is unavailable, ClickHouse throws an exception. + اگر یک سفال در دسترس نیست, تاتر می اندازد یک استثنا. -Default value: 0. +مقدار پیش فرض: 0. -## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} +## افراد زیر در این افزونه مشارکت کردهاند {#settings-optimize_skip_unused_shards} -Enables or disables skipping of unused shards for SELECT queries that have sharding key condition in PREWHERE/WHERE (assumes that the data is distributed by sharding key, otherwise do nothing). +فعال یا غیر فعال پرش استفاده نشده خرده ریز برای انتخاب نمایش داده شد که sharding شرط کلیدی در prewhere/که در آن (به فرض که داده ها توزیع شده است sharding کلیدی در غیر این صورت هیچ چیز). -Default value: 0 +مقدار پیشفرض: 0 -## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} +## افراد زیر در این افزونه مشارکت کردهاند {#settings-force_optimize_skip_unused_shards} -Enables or disables query execution if [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) enabled and skipping of unused shards is not possible. If the skipping is not possible and the setting is enabled exception will be thrown. +فعال یا غیرفعال اجرای پرس و جو اگر [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) فعال و پرش از خرده ریز استفاده نشده امکان پذیر نیست. اگر پرش امکان پذیر نیست و تنظیمات را فعال کنید استثنا پرتاب خواهد شد. -Possible values: +مقادیر ممکن: -- 0 - Disabled (do not throws) -- 1 - Disable query execution only if the table has sharding key -- 2 - Disable query execution regardless sharding key is defined for the table +- 0-معلول (نمی اندازد) +- 1-غیر فعال کردن اجرای پرس و جو تنها در صورتی که جدول دارای کلید شارژ +- 2-غیر فعال کردن اجرای پرس و جو بدون در نظر گرفتن کلید شاردینگ برای جدول تعریف شده است -Default value: 0 +مقدار پیشفرض: 0 -## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} +## به زور \_بهتیتیتیتی\_سکیپ\_اس\_ش\_شارد\_مایش داده میشود {#settings-force_optimize_skip_unused_shards_no_nested} -Reset [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) for nested `Distributed` table +بازنشانی [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) برای تو در تو `Distributed` جدول -Possible values: +مقادیر ممکن: - 1 — Enabled. - 0 — Disabled. -Default value: 0. +مقدار پیش فرض: 0. -## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} +## ا\_فزون\_ف\_کوپ {#setting-optimize_throw_if_noop} -Enables or disables throwing an exception if an [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query didn’t perform a merge. +را قادر می سازد و یا غیر فعال پرتاب یک استثنا اگر یک [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) پرس و جو یک ادغام انجام نمی. -By default, `OPTIMIZE` returns successfully even if it didn’t do anything. This setting lets you differentiate these situations and get the reason in an exception message. +به طور پیش فرض, `OPTIMIZE` حتی اگر هیچ کاری انجام نمی دهد با موفقیت باز می گردد. این تنظیم به شما اجازه می دهد تا این شرایط را متمایز کنید و دلیل را در یک پیام استثنا دریافت کنید. -Possible values: +مقادیر ممکن: - 1 — Throwing an exception is enabled. - 0 — Throwing an exception is disabled. -Default value: 0. +مقدار پیش فرض: 0. -## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} +## در حال بارگذاری {#settings-distributed_replica_error_half_life} -- Type: seconds -- Default value: 60 seconds +- نوع: ثانیه +- مقدار پیشفرض: 60 ثانیه -Controls how fast errors in distributed tables are zeroed. If a replica is unavailable for some time, accumulates 5 errors, and distributed\_replica\_error\_half\_life is set to 1 second, then the replica is considered normal 3 seconds after last error. +کنترل خطاهای چگونه سریع در جداول توزیع صفر. اگر یک ماکت برای برخی از زمان در دسترس نیست, تجمع می یابد 5 اشتباهات, و توزیع \_راپیرار\_لفا\_لایف تنظیم شده است 1 دوم, سپس ماکت در نظر گرفته شده است طبیعی 3 ثانیه پس از خطا گذشته. -See also: +همچنین نگاه کنید به: -- [Table engine Distributed](../../operations/table_engines/distributed.md) -- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) +- [موتور جدول توزیع شده است](../../engines/table_engines/special/distributed.md) +- [نمایش سایت](#settings-distributed_replica_error_cap) -## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} +## نمایش سایت {#settings-distributed_replica_error_cap} -- Type: unsigned int -- Default value: 1000 +- نوع: امضا نشده +- مقدار پیش فرض: 1000 -Error count of each replica is capped at this value, preventing a single replica from accumulating too many errors. +تعداد خطا از هر ماکت است که در این مقدار پوش, جلوگیری از یک ماکت از تجمع بیش از حد بسیاری از اشتباهات. -See also: +همچنین نگاه کنید به: -- [Table engine Distributed](../../operations/table_engines/distributed.md) -- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) +- [موتور جدول توزیع شده است](../../engines/table_engines/special/distributed.md) +- [در حال بارگذاری](#settings-distributed_replica_error_half_life) -## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} +## در حال بارگذاری {#distributed_directory_monitor_sleep_time_ms} -Base interval for the [Distributed](../table_engines/distributed.md) table engine to send data. The actual interval grows exponentially in the event of errors. +فاصله پایه برای [توزیع شده](../../engines/table_engines/special/distributed.md) موتور جدول برای ارسال داده ها. فاصله واقعی نمایی رشد می کند در صورت خطا. -Possible values: +مقادیر ممکن: -- A positive integer number of milliseconds. +- عدد صحیح مثبت میلی ثانیه. -Default value: 100 milliseconds. +مقدار پیش فرض: 100 میلی ثانیه. -## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} +## در حال بارگذاری {#distributed_directory_monitor_max_sleep_time_ms} -Maximum interval for the [Distributed](../table_engines/distributed.md) table engine to send data. Limits exponential growth of the interval set in the [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) setting. +حداکثر فاصله برای [توزیع شده](../../engines/table_engines/special/distributed.md) موتور جدول برای ارسال داده ها. محدودیت رشد نمایی از فاصله تعیین شده در [در حال بارگذاری](#distributed_directory_monitor_sleep_time_ms) تنظیمات. -Possible values: +مقادیر ممکن: -- A positive integer number of milliseconds. +- عدد صحیح مثبت میلی ثانیه. -Default value: 30000 milliseconds (30 seconds). +مقدار پیش فرض: 30000 میلی ثانیه (30 ثانیه). -## distributed\_directory\_monitor\_batch\_inserts {#distributed_directory_monitor_batch_inserts} +## نمایش سایت {#distributed_directory_monitor_batch_inserts} -Enables/disables sending of inserted data in batches. +را قادر می سازد/غیر فعال ارسال داده های درج شده در دسته. -When batch sending is enabled, the [Distributed](../table_engines/distributed.md) table engine tries to send multiple files of inserted data in one operation instead of sending them separately. Batch sending improves cluster performance by better-utilizing server and network resources. +هنگام ارسال دسته ای فعال است [توزیع شده](../../engines/table_engines/special/distributed.md) موتور جدول تلاش می کند چندین فایل داده های درج شده را در یک عملیات به جای ارسال جداگانه ارسال کند. دسته ای ارسال را بهبود می بخشد عملکرد خوشه با استفاده بهتر از سرور و شبکه منابع. -Possible values: +مقادیر ممکن: - 1 — Enabled. - 0 — Disabled. -Default value: 0. +مقدار پیش فرض: 0. -## os\_thread\_priority {#setting-os-thread-priority} +## \_شخصیت {#setting-os-thread-priority} -Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. +اولویت را تنظیم می کند ([خوبه](https://en.wikipedia.org/wiki/Nice_(Unix))) برای موضوعات که نمایش داده شد را اجرا کند . زمانبندی سیستم عامل این اولویت در نظر هنگام انتخاب موضوع بعدی به اجرا در هر هسته پردازنده در دسترس است. -!!! warning "Warning" - To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments don’t allow you to set the `CAP_SYS_NICE` capability. In this case, `clickhouse-server` shows a message about it at the start. +!!! warning "اخطار" + برای استفاده از این تنظیم, شما نیاز به تنظیم `CAP_SYS_NICE` توان. این `clickhouse-server` بسته بندی در هنگام نصب تنظیم می شود. برخی از محیط های مجازی اجازه نمی دهد که شما را به مجموعه `CAP_SYS_NICE` توان. در این مورد, `clickhouse-server` در ابتدا پیامی در موردش نشان می دهد. -Possible values: +مقادیر ممکن: -- You can set values in the range `[-20, 19]`. +- شما می توانید مقادیر را در محدوده تنظیم کنید `[-20, 19]`. -Lower values mean higher priority. Threads with low `nice` priority values are executed more frequently than threads with high values. High values are preferable for long-running non-interactive queries because it allows them to quickly give up resources in favour of short interactive queries when they arrive. +مقادیر پایین تر به معنای اولویت بالاتر است. رشتهها با کم `nice` مقادیر اولویت اغلب از موضوعات با ارزش های بالا اجرا می شود. مقادیر بالا برای پرس و جو های غیر تعاملی طولانی تر ترجیح داده می شود زیرا اجازه می دهد تا به سرعت منابع را به نفع نمایش های تعاملی کوتاه در هنگام ورود به دست بدهند. -Default value: 0. +مقدار پیش فرض: 0. -## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} +## جستجو {#query_profiler_real_time_period_ns} -Sets the period for a real clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). Real clock timer counts wall-clock time. +دوره را برای یک تایمر ساعت واقعی تنظیم می کند [پروفیل پرس و جو](../../operations/optimizing_performance/sampling_query_profiler.md). تایمر ساعت واقعی شمارش زمان دیوار ساعت. -Possible values: +مقادیر ممکن: -- Positive integer number, in nanoseconds. +- عدد صحیح مثبت در nanoseconds. - Recommended values: + مقادیر توصیه شده: - 10000000 (100 times a second) nanoseconds and less for single queries. - 1000000000 (once a second) for cluster-wide profiling. -- 0 for turning off the timer. +- 0 برای خاموش کردن تایمر. -Type: [UInt64](../../data_types/int_uint.md). +نوع: [UInt64](../../sql_reference/data_types/int_uint.md). -Default value: 1000000000 nanoseconds (once a second). +مقدار پیش فرض: 1000000000 نانو ثانیه (یک بار در ثانیه). -See also: +همچنین نگاه کنید به: -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- جدول سیستم [\_قطع](../../operations/system_tables.md#system_tables-trace_log) -## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} +## ایران در تهران {#query_profiler_cpu_time_period_ns} -Sets the period for a CPU clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). This timer counts only CPU time. +دوره را برای تایمر ساعت پردازنده تنظیم می کند [پروفیل پرس و جو](../../operations/optimizing_performance/sampling_query_profiler.md). این تایمر شمارش تنها زمان پردازنده. -Possible values: +مقادیر ممکن: -- A positive integer number of nanoseconds. +- عدد صحیح مثبت نانو ثانیه. - Recommended values: + مقادیر توصیه شده: - 10000000 (100 times a second) nanoseconds and more for single queries. - 1000000000 (once a second) for cluster-wide profiling. -- 0 for turning off the timer. +- 0 برای خاموش کردن تایمر. -Type: [UInt64](../../data_types/int_uint.md). +نوع: [UInt64](../../sql_reference/data_types/int_uint.md). -Default value: 1000000000 nanoseconds. +مقدار پیش فرض: 1000000000 نانو ثانیه. -See also: +همچنین نگاه کنید به: -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- جدول سیستم [\_قطع](../../operations/system_tables.md#system_tables-trace_log) -## allow\_introspection\_functions {#settings-allow_introspection_functions} +## اجازه دادن به \_فعال کردن اختلال در عملکرد {#settings-allow_introspection_functions} -Enables of disables [introspections functions](../../query_language/functions/introspection.md) for query profiling. +فعالسازی از کارانداختن [توابع درون گونه](../../sql_reference/functions/introspection.md) برای پروفایل پرس و جو. -Possible values: +مقادیر ممکن: - 1 — Introspection functions enabled. - 0 — Introspection functions disabled. -Default value: 0. +مقدار پیش فرض: 0. -**See Also** +**همچنین نگاه کنید** -- [Sampling Query Profiler](../performance/sampling_query_profiler.md) -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- [پروفایل پرس و جو نمونه برداری](../optimizing_performance/sampling_query_profiler.md) +- جدول سیستم [\_قطع](../../operations/system_tables.md#system_tables-trace_log) -## input\_format\_parallel\_parsing {#input-format-parallel-parsing} +## وارد\_فرمت\_پارلل\_درپارس {#input-format-parallel-parsing} -- Type: bool -- Default value: True +- نوع: بولی +- مقدار پیشفرض: درست -Enable order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. +فعال کردن نظم حفظ تجزیه موازی از فرمت های داده. پشتیبانی تنها برای tsv tksv csv و jsoneachrow فرمت های. -## min\_chunk\_bytes\_for\_parallel\_parsing {#min-chunk-bytes-for-parallel-parsing} +## \_حداقل کردن \_بیتس\_برای\_پرال\_درپارس {#min-chunk-bytes-for-parallel-parsing} -- Type: unsigned int -- Default value: 1 MiB +- نوع: امضا نشده +- مقدار پیشفرض: 1 مگابایت -The minimum chunk size in bytes, which each thread will parse in parallel. +حداقل اندازه تکه در بایت, که هر موضوع به صورت موازی تجزیه خواهد شد. -## output\_format\_avro\_codec {#settings-output_format_avro_codec} +## \_فرماندگی لبه بام {#settings-output_format_avro_codec} -Sets the compression codec used for output Avro file. +مجموعه کدک فشرده سازی مورد استفاده برای خروجی فایل اورو. -Type: string +نوع: رشته -Possible values: +مقادیر ممکن: - `null` — No compression - `deflate` — Compress with Deflate (zlib) -- `snappy` — Compress with [Snappy](https://google.github.io/snappy/) +- `snappy` — Compress with [روح](https://google.github.io/snappy/) -Default value: `snappy` (if available) or `deflate`. +مقدار پیشفرض: `snappy` (در صورت موجود بودن) یا `deflate`. -## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} +## \_فرماندگی لبه چشم {#settings-output_format_avro_sync_interval} -Sets minimum data size (in bytes) between synchronization markers for output Avro file. +مجموعه حداقل اندازه داده (در بایت) بین نشانگر هماهنگ سازی برای فایل خروجی هواپیما. -Type: unsigned int +نوع: امضا نشده -Possible values: 32 (32 bytes) - 1073741824 (1 GiB) +مقادیر ممکن: 32 (32 بایت) - 1073741824 (1 دستگاه گوارش) -Default value: 32768 (32 KiB) +مقدار پیش فرض: 32768 (32 کیلوبایت) -## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} +## باز کردن \_نمایش مجدد {#settings-format_avro_schema_registry_url} -Sets Confluent Schema Registry URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format +نشانی اینترنتی رجیستری طرحواره را برای استفاده تنظیم میکند [هشدار داده می شود](../../interfaces/formats.md#data-format-avro-confluent) قالب -Type: URL +نوع: نشانی وب -Default value: Empty +مقدار پیشفرض: خالی -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/fa/operations/settings/settings_profiles.md b/docs/fa/operations/settings/settings_profiles.md index 3c694c0889e..d23abba1ac1 100644 --- a/docs/fa/operations/settings/settings_profiles.md +++ b/docs/fa/operations/settings/settings_profiles.md @@ -1,23 +1,27 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u067E\u0631\u0648\u0641\u0627\u06CC\u0644 \u062A\u0646\u0638\u06CC\u0645\ + \u0627\u062A" --- -# Settings Profiles {#settings-profiles} +# پروفایل تنظیمات {#settings-profiles} -A settings profile is a collection of settings grouped under the same name. Each ClickHouse user has a profile. -To apply all the settings in a profile, set the `profile` setting. +مشخصات تنظیمات مجموعه ای از تنظیمات گروه بندی شده تحت همین نام است. هر کاربر کلیک دارای مشخصات. +برای اعمال تمام تنظیمات در یک پروفایل, تنظیم `profile` تنظیمات. -Example: +مثال: -Install the `web` profile. +نصب `web` پرونده. ``` sql SET profile = 'web' ``` -Settings profiles are declared in the user config file. This is usually `users.xml`. +پروفایل تنظیمات در فایل پیکربندی کاربر اعلام کرد. این است که معمولا `users.xml`. -Example: +مثال: ``` xml @@ -61,8 +65,8 @@ Example: ``` -The example specifies two profiles: `default` and `web`. The `default` profile has a special purpose: it must always be present and is applied when starting the server. In other words, the `default` profile contains default settings. The `web` profile is a regular profile that can be set using the `SET` query or using a URL parameter in an HTTP query. +به عنوان مثال دو پروفایل مشخص: `default` و `web`. این `default` مشخصات دارای یک هدف خاص: همیشه باید وجود داشته باشد و در هنگام شروع سرور اعمال می شود. به عبارت دیگر `default` مشخصات شامل تنظیمات پیش فرض. این `web` مشخصات یک پروفایل به طور منظم است که می تواند با استفاده از مجموعه است `SET` پرسوجو یا استفاده از یک پارامتر نشانی وب در پرسوجو اچتیتیپی. -Settings profiles can inherit from each other. To use inheritance, indicate one or multiple `profile` settings before the other settings that are listed in the profile. In case when one setting is defined in different profiles, the latest defined is used. +پروفایل تنظیمات می توانید از یکدیگر به ارث می برند. برای استفاده از ارث, نشان می دهد یک یا چند `profile` تنظیمات قبل از تنظیمات دیگر که در مشخصات ذکر شده. در صورتی که یک تنظیم در پروفایل های مختلف تعریف شده, از تعریف استفاده شده است. -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/fa/operations/settings/settings_users.md b/docs/fa/operations/settings/settings_users.md index 8b852380f5b..5f5cb6762ea 100644 --- a/docs/fa/operations/settings/settings_users.md +++ b/docs/fa/operations/settings/settings_users.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "\u062A\u0646\u0638\u06CC\u0645\u0627\u062A \u06A9\u0627\u0631\u0628\u0631" --- -# User Settings {#user-settings} +# تنظیمات کاربر {#user-settings} -The `users` section of the `user.xml` configuration file contains user settings. +این `users` بخش از `user.xml` فایل پیکربندی شامل تنظیمات کاربر. -Structure of the `users` section: +ساختار `users` بخش: ``` xml @@ -35,98 +38,98 @@ Structure of the `users` section: ``` -### user\_name/password {#user-namepassword} +### نام / رمز عبور {#user-namepassword} -Password can be specified in plaintext or in SHA256 (hex format). +رمز عبور را می توان در متن یا در شی256 (فرمت سحر و جادو) مشخص شده است. -- To assign a password in plaintext (**not recommended**), place it in a `password` element. +- برای اختصاص دادن رمز عبور به متن (**توصیه نمی شود**), جای خود را در یک `password` عنصر. - For example, `qwerty`. The password can be left blank. + به عنوان مثال, `qwerty`. رمز عبور را می توان خالی گذاشت. -- To assign a password using its SHA256 hash, place it in a `password_sha256_hex` element. +- برای اختصاص دادن رمز عبور با استفاده از هش ش256 در یک `password_sha256_hex` عنصر. - For example, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + به عنوان مثال, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. - Example of how to generate a password from shell: + نمونه ای از نحوه تولید رمز عبور از پوسته: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' - The first line of the result is the password. The second line is the corresponding SHA256 hash. + خط اول نتیجه رمز عبور است. خط دوم مربوط به هش ش256 است. -- For compatibility with MySQL clients, password can be specified in double SHA1 hash. Place it in `password_double_sha1_hex` element. +- برای سازگاری با مشتریان خروجی زیر, رمز عبور را می توان در دو شی1 هش مشخص. محل را در `password_double_sha1_hex` عنصر. - For example, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + به عنوان مثال, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. - Example of how to generate a password from shell: + نمونه ای از نحوه تولید رمز عبور از پوسته: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' - The first line of the result is the password. The second line is the corresponding double SHA1 hash. + خط اول نتیجه رمز عبور است. خط دوم مربوط به هش دو شی1 است. -### user\_name/networks {#user-namenetworks} +### نام / شبکه {#user-namenetworks} -List of networks from which the user can connect to the ClickHouse server. +لیست شبکه هایی که کاربر می تواند به سرور کلیک متصل شود. -Each element of the list can have one of the following forms: +هر عنصر از لیست می توانید یکی از اشکال زیر را داشته باشد: - `` — IP address or network mask. - Examples: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + مثالها: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. - `` — Hostname. - Example: `example01.host.ru`. + مثال: `example01.host.ru`. - To check access, a DNS query is performed, and all returned IP addresses are compared to the peer address. + برای بررسی دسترسی یک پرسوجوی دی ان اس انجام میشود و تمامی نشانیهای اینترنتی برگشتی با نشانی همکار مقایسه میشوند. - `` — Regular expression for hostnames. - Example, `^example\d\d-\d\d-\d\.host\.ru$` + مثال, `^example\d\d-\d\d-\d\.host\.ru$` - To check access, a [DNS PTR query](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) is performed for the peer address and then the specified regexp is applied. Then, another DNS query is performed for the results of the PTR query and all the received addresses are compared to the peer address. We strongly recommend that regexp ends with $. + برای بررسی دسترسی [جستجو](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) برای نشانی همکار انجام می شود و سپس عبارت منظم مشخص شده اعمال می شود. سپس پرس و جو دی ان اس دیگری برای نتایج پرس و جو انجام می شود و تمامی نشانیهای دریافتی با نشانی همکار مقایسه می شوند. ما قویا توصیه می کنیم که عبارت منظم به پایان می رسد با$. -All results of DNS requests are cached until the server restarts. +تمام نتایج درخواست دی ان اس ذخیره سازی تا زمانی که سرور راه اندازی مجدد. -**Examples** +**مثالها** -To open access for user from any network, specify: +برای باز کردن دسترسی برای کاربر از هر شبکه مشخص کنید: ``` xml ::/0 ``` -!!! warning "Warning" - It’s insecure to open access from any network unless you have a firewall properly configured or the server is not directly connected to Internet. +!!! warning "اخطار" + این نا امن برای باز کردن دسترسی از هر شبکه مگر اینکه شما یک فایروال به درستی پیکربندی و یا سرور به طور مستقیم به اینترنت متصل نیست. -To open access only from localhost, specify: +برای باز کردن دسترسی فقط از جایل هاست مشخص کنید: ``` xml ::1 127.0.0.1 ``` -### user\_name/profile {#user-nameprofile} +### نام / پروفایل {#user-nameprofile} -You can assign a settings profile for the user. Settings profiles are configured in a separate section of the `users.xml` file. For more information, see [Profiles of Settings](settings_profiles.md). +شما می توانید یک پروفایل تنظیمات برای کاربر اختصاص دهید. پروفایل های تنظیمات در یک بخش جداگانه از پیکربندی `users.xml` پرونده. برای کسب اطلاعات بیشتر, دیدن [پروفایل تنظیمات](settings_profiles.md). -### user\_name/quota {#user-namequota} +### نام / سهمیه {#user-namequota} -Quotas allow you to track or limit resource usage over a period of time. Quotas are configured in the `quotas` -section of the `users.xml` configuration file. +سهمیه اجازه می دهد شما را به پیگیری و یا محدود کردن استفاده از منابع بیش از یک دوره از زمان. سهمیه در پیکربندی `quotas` +بخش از `users.xml` فایل پیکربندی. -You can assign a quotas set for the user. For a detailed description of quotas configuration, see [Quotas](../quotas.md#quotas). +شما می توانید یک سهمیه تعیین شده برای کاربر اختصاص. برای شرح مفصلی از پیکربندی سهمیه, دیدن [سهمیه](../quotas.md#quotas). -### user\_name/databases {#user-namedatabases} +### نام/پایگاه های داده {#user-namedatabases} -In this section, you can you can limit rows that are returned by ClickHouse for `SELECT` queries made by the current user, thus implementing basic row-level security. +در این بخش می توانید ردیف هایی را که توسط کلیک برای بازگشت هستند محدود کنید `SELECT` نمایش داده شد ساخته شده توسط کاربر فعلی, در نتیجه اجرای امنیت سطح ردیف پایه. -**Example** +**مثال** -The following configuration forces that user `user1` can only see the rows of `table1` as the result of `SELECT` queries, where the value of the `id` field is 1000. +پیکربندی زیر نیروهای که کاربر `user1` فقط می توانید ردیف ها را ببینید `table1` به عنوان نتیجه `SELECT` نمایش داده شد که ارزش `id` درست است 1000. ``` xml @@ -140,6 +143,6 @@ The following configuration forces that user `user1` can only see the rows of `t ``` -The `filter` can be any expression resulting in a [UInt8](../../data_types/int_uint.md)-type value. It usually contains comparisons and logical operators. Rows from `database_name.table1` where filter results to 0 are not returned for this user. The filtering is incompatible with `PREWHERE` operations and disables `WHERE→PREWHERE` optimization. +این `filter` می تواند هر بیان و در نتیجه [UInt8](../../sql_reference/data_types/int_uint.md)- نوع ارزش. این حالت معمولا شامل مقایسه و اپراتورهای منطقی. سطرها از `database_name.table1` از کجا نتایج فیلتر به 0 برای این کاربر بازگشت نیست. فیلتر کردن با ناسازگار است `PREWHERE` عملیات و معلولین `WHERE→PREWHERE` بهینهسازی. -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_users/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/fa/operations/system_tables.md b/docs/fa/operations/system_tables.md index af47b99222a..6d393432c18 100644 --- a/docs/fa/operations/system_tables.md +++ b/docs/fa/operations/system_tables.md @@ -1,25 +1,28 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: "\u062C\u062F\u0627\u0648\u0644 \u0633\u06CC\u0633\u062A\u0645" --- -# System tables {#system-tables} +# جداول سیستم {#system-tables} -System tables are used for implementing part of the system’s functionality, and for providing access to information about how the system is working. -You can’t delete a system table (but you can perform DETACH). -System tables don’t have files with data on the disk or files with metadata. The server creates all the system tables when it starts. -System tables are read-only. -They are located in the ‘system’ database. +جداول سیستم برای اجرای بخشی از قابلیت های سیستم استفاده می شود و برای دسترسی به اطلاعات در مورد چگونگی کار سیستم. +شما می توانید یک جدول سیستم را حذف کنید (اما شما می توانید جدا انجام). +جداول سیستم فایل های با داده ها بر روی دیسک و یا فایل های با ابرداده ندارد. سرور ایجاد تمام جداول سیستم زمانی که شروع می شود. +جداول سیستم فقط خواندنی. +این در واقع ‘system’ پایگاه داده است. -## system.asynchronous\_metrics {#system_tables-asynchronous_metrics} +## سیستم.\_نامهنویسی ناهمزمان {#system_tables-asynchronous_metrics} -Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use. +شامل معیارهای که به صورت دوره ای در پس زمینه محاسبه می شود. مثلا, مقدار رم در حال استفاده. -Columns: +ستونها: -- `metric` ([String](../data_types/string.md)) — Metric name. -- `value` ([Float64](../data_types/float.md)) — Metric value. +- `metric` ([رشته](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([جسم شناور64](../sql_reference/data_types/float.md)) — Metric value. -**Example** +**مثال** ``` sql SELECT * FROM system.asynchronous_metrics LIMIT 10 @@ -40,18 +43,18 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 └─────────────────────────────────────────┴────────────┘ ``` -**See Also** +**همچنین نگاه کنید به** -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that have occurred. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [نظارت](monitoring.md) — Base concepts of ClickHouse monitoring. +- [سیستم.متریک](#system_tables-metrics) — Contains instantly calculated metrics. +- [سیستم.رویدادها](#system_tables-events) — Contains a number of events that have occurred. +- [سیستم.\_اشکالزدایی](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -## system.clusters {#system-clusters} +## سیستم.خوشه {#system-clusters} -Contains information about clusters available in the config file and the servers in them. +حاوی اطلاعاتی در مورد خوشه های موجود در فایل پیکربندی و سرورهای موجود در ان. -Columns: +ستونها: - `cluster` (String) — The cluster name. - `shard_num` (UInt32) — The shard number in the cluster, starting from 1. @@ -61,30 +64,30 @@ Columns: - `host_address` (String) — The host IP address obtained from DNS. - `port` (UInt16) — The port to use for connecting to the server. - `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32) - number of times this host failed to reach replica. -- `estimated_recovery_time` (UInt32) - seconds left until replica error count is zeroed and it is considered to be back to normal. +- `errors_count` (اوینت32) - تعداد دفعاتی که این میزبان موفق به رسیدن به ماکت. +- `estimated_recovery_time` (اوینت32) - ثانیه به سمت چپ تا زمانی که تعداد خطا ماکت صفر است و در نظر گرفته می شود به حالت عادی. -Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors. +لطفا توجه داشته باشید که `errors_count` یک بار در هر پرس و جو به خوشه به روز, ولی `estimated_recovery_time` بر روی تقاضا محاسبه شده است. بنابراین می تواند یک مورد غیر صفر باشد `errors_count` و صفر `estimated_recovery_time`, که پرس و جو بعدی صفر خواهد شد `errors_count` و سعی کنید به استفاده از ماکت به عنوان اگر هیچ خطا. -**See also** +**همچنین نگاه کنید به** -- [Table engine Distributed](table_engines/distributed.md) -- [distributed\_replica\_error\_cap setting](settings/settings.md#settings-distributed_replica_error_cap) -- [distributed\_replica\_error\_half\_life setting](settings/settings.md#settings-distributed_replica_error_half_life) +- [موتور جدول توزیع شده است](../engines/table_engines/special/distributed.md) +- [تنظیمات \_فرهنگ توزیع میشود](settings/settings.md#settings-distributed_replica_error_cap) +- [پخش \_راپیشا\_را\_را\_را\_حالف\_لایف تنظیم](settings/settings.md#settings-distributed_replica_error_half_life) -## system.columns {#system-columns} +## سیستم.ستونها {#system-columns} -Contains information about columns in all the tables. +حاوی اطلاعات در مورد ستون در تمام جداول. -You can use this table to get information similar to the [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table) query, but for multiple tables at once. +شما می توانید با استفاده از این جدول برای دریافت اطلاعات مشابه به [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) پرس و جو, اما برای جداول متعدد در یک بار. -The `system.columns` table contains the following columns (the column type is shown in brackets): +این `system.columns` جدول شامل ستون های زیر (نوع ستون در براکت نشان داده شده است): - `database` (String) — Database name. - `table` (String) — Table name. - `name` (String) — Column name. - `type` (String) — Column type. -- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) برای مقدار پیش فرض, و یا یک رشته خالی اگر تعریف نشده است. - `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. - `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. - `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. @@ -95,15 +98,15 @@ The `system.columns` table contains the following columns (the column type is sh - `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. - `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. -## system.contributors {#system-contributors} +## سیستم.یاریدهندکان {#system-contributors} -Contains information about contributors. All constributors in random order. The order is random at query execution time. +حاوی اطلاعات در مورد همکاران. همه مربیان به صورت تصادفی. سفارش تصادفی در زمان اجرای پرس و جو است. -Columns: +ستونها: - `name` (String) — Contributor (author) name from git log. -**Example** +**مثال** ``` sql SELECT * FROM system.contributors LIMIT 10 @@ -124,7 +127,7 @@ SELECT * FROM system.contributors LIMIT 10 └──────────────────┘ ``` -To find out yourself in the table, use a query: +برای پیدا کردن خود را در جدول, استفاده از یک پرس و جو: ``` sql SELECT * FROM system.contributors WHERE name='Olga Khvostikova' @@ -136,21 +139,21 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' └──────────────────┘ ``` -## system.databases {#system-databases} +## سیستم.پایگاههای داده {#system-databases} -This table contains a single String column called ‘name’ – the name of a database. -Each database that the server knows about has a corresponding entry in the table. -This system table is used for implementing the `SHOW DATABASES` query. +این جدول شامل یک ستون رشته ای به نام ‘name’ – the name of a database. +هر پایگاه داده که سرور می داند در مورد یک ورودی مربوطه را در جدول. +این جدول سیستم برای اجرای استفاده می شود `SHOW DATABASES` پرس و جو. -## system.detached\_parts {#system_tables-detached_parts} +## سیستم.قطعات مجزا {#system_tables-detached_parts} -Contains information about detached parts of [MergeTree](table_engines/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). +حاوی اطلاعات در مورد قطعات جدا شده از [ادغام](../engines/table_engines/mergetree_family/mergetree.md) میز این `reason` ستون مشخص می کند که چرا بخش جدا شد. برای قطعات کاربر جدا, دلیل خالی است. چنین قطعات را می توان با [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) فرمان. برای توضیحات ستون های دیگر را ببینید [سیستم.قطعات](#system_tables-parts). اگر نام قسمت نامعتبر است, ارزش برخی از ستون ممکن است `NULL`. این قطعات را می توان با حذف [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). -## system.dictionaries {#system-dictionaries} +## سیستم.واژهنامهها {#system-dictionaries} -Contains information about external dictionaries. +شامل اطلاعات در مورد لغت نامه های خارجی. -Columns: +ستونها: - `name` (String) — Dictionary name. - `type` (String) — Dictionary type: Flat, Hashed, Cache. @@ -163,22 +166,22 @@ Columns: - `element_count` (UInt64) — The number of items stored in the dictionary. - `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. -- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. - `source` (String) — Text describing the data source for the dictionary. -Note that the amount of memory used by the dictionary is not proportional to the number of items stored in it. So for flat and cached dictionaries, all the memory cells are pre-assigned, regardless of how full the dictionary actually is. +توجه داشته باشید که مقدار حافظه مورد استفاده توسط فرهنگ لغت متناسب با تعداد اقلام ذخیره شده در این نیست. بنابراین برای لغت نامه تخت و کش, تمام سلول های حافظه از پیش تعیین شده, صرف نظر از چگونه کامل فرهنگ لغت در واقع. -## system.events {#system_tables-events} +## سیستم.رویدادها {#system_tables-events} -Contains information about the number of events that have occurred in the system. For example, in the table, you can find how many `SELECT` queries were processed since the ClickHouse server started. +حاوی اطلاعات در مورد تعدادی از حوادث که در سیستم رخ داده است. مثلا, در جدول, شما می توانید پیدا کنید که چگونه بسیاری از `SELECT` نمایش داده شد از سرور کلیک شروع پردازش شد. -Columns: +ستونها: -- `event` ([String](../data_types/string.md)) — Event name. -- `value` ([UInt64](../data_types/int_uint.md)) — Number of events occurred. -- `description` ([String](../data_types/string.md)) — Event description. +- `event` ([رشته](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([رشته](../sql_reference/data_types/string.md)) — Event description. -**Example** +**مثال** ``` sql SELECT * FROM system.events LIMIT 5 @@ -194,43 +197,43 @@ SELECT * FROM system.events LIMIT 5 └───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**همچنین نگاه کنید به** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [سیستم.\_نامهنویسی ناهمزمان](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [سیستم.متریک](#system_tables-metrics) — Contains instantly calculated metrics. +- [سیستم.\_اشکالزدایی](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [نظارت](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.functions {#system-functions} +## سیستم.توابع {#system-functions} -Contains information about normal and aggregate functions. +حاوی اطلاعات در مورد توابع عادی و جمع. -Columns: +ستونها: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. -## system.graphite\_retentions {#system-graphite-retentions} +## سیستم.بازداشت گرافیت {#system-graphite-retentions} -Contains information about parameters [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [\*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. +حاوی اطلاعات در مورد پارامترها [لغزش \_ نمودار](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) که در جداول با استفاده [اطلاعات دقیق](../engines/table_engines/mergetree_family/graphitemergetree.md) موتورها. -Columns: +ستونها: -- `config_name` (String) - `graphite_rollup` parameter name. -- `regexp` (String) - A pattern for the metric name. -- `function` (String) - The name of the aggregating function. -- `age` (UInt64) - The minimum age of the data in seconds. -- `precision` (UInt64) - How precisely to define the age of the data in seconds. -- `priority` (UInt16) - Pattern priority. -- `is_default` (UInt8) - Whether the pattern is the default. -- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. -- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. +- `config_name` ) رشته) - `graphite_rollup` نام پارامتر. +- `regexp` (رشته) - یک الگوی برای نام متریک. +- `function` (رشته) - نام تابع جمع. +- `age` (UInt64) - حداقل سن دیتا در ثانیه. +- `precision` (اوینت64) - چگونه دقیقا به تعریف سن داده ها در ثانیه. +- `priority` (UInt16) - الگوی اولویت است. +- `is_default` (UInt8) - آیا الگوی پیش فرض است. +- `Tables.database` (مجموعه (رشته)) - مجموعه ای از نام جداول پایگاه داده که از `config_name` پارامتر. +- `Tables.table` (صف (رشته)) - مجموعه ای از نام جدول که با استفاده از `config_name` پارامتر. -## system.merges {#system-merges} +## سیستم.ادغام {#system-merges} -Contains information about merges and part mutations currently in process for tables in the MergeTree family. +حاوی اطلاعات در مورد ادغام و جهش بخشی در حال حاضر در روند برای جداول در خانواده ادغام. -Columns: +ستونها: - `database` (String) — The name of the database the table is in. - `table` (String) — Table name. @@ -238,7 +241,7 @@ Columns: - `progress` (Float64) — The percentage of completed work from 0 to 1. - `num_parts` (UInt64) — The number of pieces to be merged. - `result_part_name` (String) — The name of the part that will be formed as the result of merging. -- `is_mutation` (UInt8) - 1 if this process is a part mutation. +- `is_mutation` (اوینت8) - 1 اگر این فرایند جهش بخشی است. - `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. - `total_size_marks` (UInt64) — The total number of marks in the merged parts. - `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. @@ -246,19 +249,19 @@ Columns: - `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. - `rows_written` (UInt64) — Number of rows written. -## system.metrics {#system_tables-metrics} +## سیستم.متریک {#system_tables-metrics} -Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date. +شامل معیارهای است که می تواند فورا محاسبه, و یا یک مقدار فعلی. مثلا, تعداد نمایش داده شد به طور همزمان پردازش و یا تاخیر ماکت فعلی. این جدول همیشه به روز. -Columns: +ستونها: -- `metric` ([String](../data_types/string.md)) — Metric name. -- `value` ([Int64](../data_types/int_uint.md)) — Metric value. -- `description` ([String](../data_types/string.md)) — Metric description. +- `metric` ([رشته](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([رشته](../sql_reference/data_types/string.md)) — Metric description. -The list of supported metrics you can find in the [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) source file of ClickHouse. +لیستی از معیارهای پشتیبانی شده شما می توانید در [افراد زیر در این افزونه مشارکت کردهاندپردازنده](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) فایل منبع از خانه کلیک. -**Example** +**مثال** ``` sql SELECT * FROM system.metrics LIMIT 10 @@ -279,17 +282,17 @@ SELECT * FROM system.metrics LIMIT 10 └────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**همچنین نگاه کنید به** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that occurred. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [سیستم.\_نامهنویسی ناهمزمان](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [سیستم.رویدادها](#system_tables-events) — Contains a number of events that occurred. +- [سیستم.\_اشکالزدایی](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [نظارت](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.metric\_log {#system_tables-metric_log} +## سیستم.\_اشکالزدایی {#system_tables-metric_log} -Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. -To turn on metrics history collection on `system.metric_log`, create `/etc/clickhouse-server/config.d/metric_log.xml` with following content: +دارای تاریخچه معیارهای ارزش از جداول `system.metrics` و `system.events`, دوره ای به دیسک سرخ. +برای روشن کردن مجموعه تاریخچه معیارهای در `system.metric_log` ایجاد `/etc/clickhouse-server/config.d/metric_log.xml` با محتوای زیر: ``` xml @@ -302,7 +305,7 @@ To turn on metrics history collection on `system.metric_log`, create `/etc/click ``` -**Example** +**مثال** ``` sql SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; @@ -335,50 +338,50 @@ CurrentMetric_ReplicatedChecks: 0 ... ``` -**See also** +**همچنین نگاه کنید به** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that occurred. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [سیستم.\_نامهنویسی ناهمزمان](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [سیستم.رویدادها](#system_tables-events) — Contains a number of events that occurred. +- [سیستم.متریک](#system_tables-metrics) — Contains instantly calculated metrics. +- [نظارت](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.numbers {#system-numbers} +## سیستم.اعداد {#system-numbers} -This table contains a single UInt64 column named ‘number’ that contains almost all the natural numbers starting from zero. -You can use this table for tests, or if you need to do a brute force search. -Reads from this table are not parallelized. +این جدول شامل یک uint64 ستون به نام ‘number’ که شامل تقریبا تمام اعداد طبیعی با شروع از صفر. +شما می توانید این جدول برای تست استفاده, و یا اگر شما نیاز به انجام یک جستجو نیروی بی رحم. +بار خوانده شده از این جدول موازی نیست. -## system.numbers\_mt {#system-numbers-mt} +## سیستم.\_شماره حساب {#system-numbers-mt} -The same as ‘system.numbers’ but reads are parallelized. The numbers can be returned in any order. -Used for tests. +همان ‘system.numbers’ اما بار خوانده شده موازی هستند. اعداد را می توان در هر سفارش بازگشت. +مورد استفاده برای تست. -## system.one {#system-one} +## سیستم.یک {#system-one} -This table contains a single row with a single ‘dummy’ UInt8 column containing the value 0. -This table is used if a SELECT query doesn’t specify the FROM clause. -This is similar to the DUAL table found in other DBMSs. +این جدول شامل یک ردیف با یک ‘dummy’ در زیر8 ستون حاوی مقدار 0. +این جدول استفاده می شود اگر پرس و جو را انتخاب کنید از بند مشخص نیست. +این شبیه میز دوگانه است که در سایر موارد یافت می شود. -## system.parts {#system_tables-parts} +## سیستم.قطعات {#system_tables-parts} -Contains information about parts of [MergeTree](table_engines/mergetree.md) tables. +حاوی اطلاعات در مورد بخش هایی از [ادغام](../engines/table_engines/mergetree_family/mergetree.md) میز -Each row describes one data part. +هر سطر توصیف یک بخش داده. -Columns: +ستونها: -- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../query_language/alter.md#query_language_queries_alter) query. +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) پرس و جو. - Formats: + فرشها: - - `YYYYMM` for automatic partitioning by month. - - `any_string` when partitioning manually. + - `YYYYMM` برای پارتیشن بندی خودکار در ماه. + - `any_string` هنگامی که پارتیشن بندی دستی. - `name` (`String`) – Name of the data part. -- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. -- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` با دانه دانه دانه شاخص (معمولا 8192) (این اشاره برای دانه دانه تطبیقی کار نمی کند). - `rows` (`UInt64`) – The number of rows. @@ -418,7 +421,7 @@ Columns: - `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../query_language/alter.md#alter_freeze-partition) +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) - `database` (`String`) – Name of the database. @@ -430,29 +433,29 @@ Columns: - `disk` (`String`) – Name of a disk that stores the data part. -- `hash_of_all_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of compressed files. +- `hash_of_all_files` (`String`) – [سیفون128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) از فایل های فشرده. -- `hash_of_uncompressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.). +- `hash_of_uncompressed_files` (`String`) – [سیفون128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) از فایل های غیر فشرده (فایل های با علامت, فایل شاخص و غیره.). -- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed. +- `uncompressed_hash_of_compressed_files` (`String`) – [سیفون128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) از داده ها در فایل های فشرده به عنوان اگر غیر فشرده شد. - `bytes` (`UInt64`) – Alias for `bytes_on_disk`. - `marks_size` (`UInt64`) – Alias for `marks_bytes`. -## system.part\_log {#system_tables-part-log} +## سیستم.\_خروج {#system_tables-part-log} -The `system.part_log` table is created only if the [part\_log](server_settings/settings.md#server_settings-part-log) server setting is specified. +این `system.part_log` جدول تنها در صورتی ایجاد می شود [\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) تنظیم سرور مشخص شده است. -This table contains information about events that occurred with [data parts](table_engines/custom_partitioning_key.md) in the [MergeTree](table_engines/mergetree.md) family tables, such as adding or merging data. +این جدول حاوی اطلاعات در مورد اتفاقاتی که با رخ داده است [قطعات داده](../engines/table_engines/mergetree_family/custom_partitioning_key.md) در [ادغام](../engines/table_engines/mergetree_family/mergetree.md) جداول خانواده, مانند اضافه کردن و یا ادغام داده ها. -The `system.part_log` table contains the following columns: +این `system.part_log` جدول شامل ستون های زیر است: - `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: - `NEW_PART` — Inserting of a new data part. - `MERGE_PARTS` — Merging of data parts. - `DOWNLOAD_PART` — Downloading a data part. - - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). - `MUTATE_PART` — Mutating of a data part. - `MOVE_PART` — Moving the data part from the one disk to another one. - `event_date` (Date) — Event date. @@ -461,7 +464,7 @@ The `system.part_log` table contains the following columns: - `database` (String) — Name of the database the data part is in. - `table` (String) — Name of the table the data part is in. - `part_name` (String) — Name of the data part. -- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ value if the partitioning is by `tuple()`. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ ارزش اگر پارتیشن بندی توسط `tuple()`. - `rows` (UInt64) — The number of rows in the data part. - `size_in_bytes` (UInt64) — Size of the data part in bytes. - `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). @@ -471,36 +474,36 @@ The `system.part_log` table contains the following columns: - `error` (UInt16) — The code number of the occurred error. - `exception` (String) — Text message of the occurred error. -The `system.part_log` table is created after the first inserting data to the `MergeTree` table. +این `system.part_log` جدول پس از اولین قرار دادن داده ها به ایجاد `MergeTree` جدول -## system.processes {#system_tables-processes} +## سیستم.فرایندها {#system_tables-processes} -This system table is used for implementing the `SHOW PROCESSLIST` query. +این جدول سیستم برای اجرای استفاده می شود `SHOW PROCESSLIST` پرس و جو. -Columns: +ستونها: -- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` user. The field contains the username for a specific query, not for a query that this query initiated. -- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` on the query requestor server. +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` کاربر. زمینه شامل نام کاربری برای یک پرس و جو خاص, نه برای پرس و جو که این پرس و جو شروع. +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` در سرور درخواست پرس و جو. - `elapsed` (Float64) – The time in seconds since request execution started. - `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. - `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. - `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. -- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) setting. -- `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [\_کاساژ بیشینه](../operations/settings/query_complexity.md#settings_max_memory_usage) تنظیمات. +- `query` (String) – The query text. For `INSERT` این شامل داده ها برای وارد کردن نیست. - `query_id` (String) – Query ID, if defined. -## system.text\_log {#system-tables-text-log} +## سیستم.\_خروج {#system-tables-text-log} -Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting. +شامل ورودی ورود به سیستم. سطح ورود به سیستم که می رود به این جدول را می توان با محدود `text_log.level` تنظیم سرور. -Columns: +ستونها: -- `event_date` (`Date`) - Date of the entry. -- `event_time` (`DateTime`) - Time of the entry. -- `microseconds` (`UInt32`) - Microseconds of the entry. +- `event_date` (`Date`)- تاریخ ورود. +- `event_time` (`DateTime`)- زمان ورود . +- `microseconds` (`UInt32`)- میکروثانیه از ورود. - `thread_name` (String) — Name of the thread from which the logging was done. - `thread_id` (UInt64) — OS thread ID. -- `level` (`Enum8`) - Entry level. +- `level` (`Enum8`)- ورود به سطح . - `'Fatal' = 1` - `'Critical' = 2` - `'Error' = 3` @@ -509,30 +512,30 @@ Columns: - `'Information' = 6` - `'Debug' = 7` - `'Trace' = 8` -- `query_id` (`String`) - ID of the query. +- `query_id` (`String`)- شناسه پرس و جو . - `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) -- `message` (`String`) - The message itself. -- `revision` (`UInt32`) - ClickHouse revision. -- `source_file` (`LowCardinality(String)`) - Source file from which the logging was done. -- `source_line` (`UInt64`) - Source line from which the logging was done. +- `message` (`String`)- پیام خود را. +- `revision` (`UInt32`)- تجدید نظر کلیک کنیدهاوس . +- `source_file` (`LowCardinality(String)`)- فایل منبع که از ورود به سیستم انجام شد . +- `source_line` (`UInt64`)- خط منبع که از ورود به سیستم انجام شد. -## system.query\_log {#system_tables-query_log} +## سیستم.\_خروج {#system_tables-query_log} -Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information. +حاوی اطلاعات در مورد اجرای نمایش داده شد. برای هر پرس و جو, شما می توانید زمان شروع پردازش را ببینید, مدت زمان پردازش, پیام های خطا و اطلاعات دیگر. -!!! note "Note" - The table doesn’t contain input data for `INSERT` queries. +!!! note "یادداشت" + جدول حاوی اطلاعات ورودی برای `INSERT` نمایش داده شد. -ClickHouse creates this table only if the [query\_log](server_settings/settings.md#server_settings-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. +تاتر این جدول را فقط در صورتی ایجاد می کند [\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) پارامتر سرور مشخص شده است. این پارامتر مجموعه قوانین ورود به سیستم, مانند فاصله ورود به سیستم و یا نام جدول نمایش داده شد خواهد شد وارد سایت شوید. -To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section. +برای فعال کردن ورود به سیستم پرس و جو, تنظیم [\_خروج](settings/settings.md#settings-log-queries) پارامتر به 1. برای اطلاعات بیشتر [تنظیمات](settings/settings.md) بخش. -The `system.query_log` table registers two kinds of queries: +این `system.query_log` جدول ثبت دو نوع نمایش داده شد: -1. Initial queries that were run directly by the client. -2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns. +1. نمایش داده شد اولیه که به طور مستقیم توسط مشتری اجرا شد. +2. کودک نمایش داده شد که توسط دیگر نمایش داده شد (برای اجرای پرس و جو توزیع). برای این نوع از نمایش داده شد, اطلاعات در مورد پدر و مادر نمایش داده شد در نشان داده شده است `initial_*` ستون ها -Columns: +ستونها: - `type` (`Enum8`) — Type of event that occurred when executing the query. Values: - `'QueryStart' = 1` — Successful start of query execution. @@ -545,8 +548,8 @@ Columns: - `query_duration_ms` (UInt64) — Duration of query execution. - `read_rows` (UInt64) — Number of read rows. - `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. -- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `written_rows` (UInt64) — For `INSERT` نمایش داده شد, تعداد ردیف نوشته شده. برای نمایش داده شد دیگر مقدار ستون 0 است. +- `written_bytes` (UInt64) — For `INSERT` نمایش داده شد, تعداد بایت نوشته شده. برای نمایش داده شد دیگر مقدار ستون 0 است. - `result_rows` (UInt64) — Number of rows in the result. - `result_bytes` (UInt64) — Number of bytes in the result. - `memory_usage` (UInt64) — Memory consumption by the query. @@ -567,50 +570,50 @@ Columns: - `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. -- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. -- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `os_user` (String) — OS's username who runs [کلیک مشتری](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی اجرا می شود. +- `client_name` (String) — The [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از نام مشتری تی پی. +- `client_revision` (UInt32) — Revision of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_major` (UInt32) — Major version of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_minor` (UInt32) — Minor version of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_patch` (UInt32) — Patch component of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از نسخه مشتری تی سی پی. - `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - 0 — The query was launched from the TCP interface. - - 1 — `GET` method was used. - - 2 — `POST` method was used. -- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. -- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). + - 1 — `GET` روش مورد استفاده قرار گرفت. + - 2 — `POST` روش مورد استفاده قرار گرفت. +- `http_user_agent` (String) — The `UserAgent` هدر در درخواست قام منتقل می شود. +- `quota_key` (String) — The “quota key” مشخص شده در [سهمیه](quotas.md) تنظیم (دیدن `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column. -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [سیستم.رویدادها](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` ستون. +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` پارامتر به 1. +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` ستون. -Each query creates one or two rows in the `query_log` table, depending on the status of the query: +هر پرس و جو ایجاد یک یا دو ردیف در `query_log` جدول بسته به وضعیت پرس و جو: -1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column). -2. If an error occurred during query processing, two events with types 1 and 4 are created. -3. If an error occurred before launching the query, a single event with type 3 is created. +1. اگر اجرای پرس و جو موفق است, دو رویداد با انواع 1 و 2 ایجاد می شوند (دیدن `type` ستون). +2. اگر یک خطا در طول پردازش پرس و جو رخ داده است, دو رویداد با انواع 1 و 4 ایجاد می شوند. +3. اگر یک خطا قبل از راه اندازی پرس و جو رخ داده است, یک رویداد واحد با نوع 3 ایجاد شده است. -By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. +به طور پیش فرض, سیاهههای مربوط به جدول در فواصل 7.5 ثانیه اضافه. شما می توانید این فاصله در مجموعه [\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) تنظیم سرور (نگاه کنید به `flush_interval_milliseconds` پارامتر). به خیط و پیت کردن سیاهههای مربوط به زور از بافر حافظه را به جدول, استفاده از `SYSTEM FLUSH LOGS` پرس و جو. -When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. +هنگامی که جدول به صورت دستی حذف, به طور خودکار در پرواز ایجاد. توجه داشته باشید که تمام سیاهههای مربوط قبلی حذف خواهد شد. -!!! note "Note" - The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. +!!! note "یادداشت" + دوره ذخیره سازی برای سیاهههای مربوط نامحدود است. سیاهههای مربوط به طور خودکار از جدول حذف نمی شود. شما نیاز به سازماندهی حذف سیاهههای مربوط منسوخ شده خود را. -You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `partition_by` parameter). +شما می توانید یک کلید پارتیشن بندی دلخواه برای مشخص `system.query_log` جدول در [\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) تنظیم سرور (نگاه کنید به `partition_by` پارامتر). -## system.query\_thread\_log {#system_tables-query-thread-log} +## سیستم.\_ر\_خروج {#system_tables-query-thread-log} -The table contains information about each query execution thread. +جدول شامل اطلاعات در مورد هر موضوع اجرای پرس و جو. -ClickHouse creates this table only if the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. +تاتر این جدول را فقط در صورتی ایجاد می کند [\_ر\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) پارامتر سرور مشخص شده است. این پارامتر مجموعه قوانین ورود به سیستم, مانند فاصله ورود به سیستم و یا نام جدول نمایش داده شد خواهد شد وارد سایت شوید. -To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section. +برای فعال کردن ورود به سیستم پرس و جو, تنظیم [باز کردن](settings/settings.md#settings-log-query-threads) پارامتر به 1. برای اطلاعات بیشتر [تنظیمات](settings/settings.md) بخش. -Columns: +ستونها: - `event_date` (Date) — the date when the thread has finished execution of the query. - `event_time` (DateTime) — the date and time when the thread has finished execution of the query. @@ -618,8 +621,8 @@ Columns: - `query_duration_ms` (UInt64) — Duration of query execution. - `read_rows` (UInt64) — Number of read rows. - `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. -- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `written_rows` (UInt64) — For `INSERT` نمایش داده شد, تعداد ردیف نوشته شده. برای نمایش داده شد دیگر مقدار ستون 0 است. +- `written_bytes` (UInt64) — For `INSERT` نمایش داده شد, تعداد بایت نوشته شده. برای نمایش داده شد دیگر مقدار ستون 0 است. - `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. - `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. - `thread_name` (String) — Name of the thread. @@ -641,62 +644,62 @@ Columns: - `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. -- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. -- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `os_user` (String) — OS's username who runs [کلیک مشتری](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی اجرا می شود. +- `client_name` (String) — The [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از نام مشتری تی پی. +- `client_revision` (UInt32) — Revision of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_major` (UInt32) — Major version of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_minor` (UInt32) — Minor version of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از مشتری تی پی. +- `client_version_patch` (UInt32) — Patch component of the [کلیک مشتری](../interfaces/cli.md) یا یکی دیگر از نسخه مشتری تی سی پی. - `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - 0 — The query was launched from the TCP interface. - - 1 — `GET` method was used. - - 2 — `POST` method was used. -- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. -- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). + - 1 — `GET` روش مورد استفاده قرار گرفت. + - 2 — `POST` روش مورد استفاده قرار گرفت. +- `http_user_agent` (String) — The `UserAgent` هدر در درخواست قام منتقل می شود. +- `quota_key` (String) — The “quota key” مشخص شده در [سهمیه](quotas.md) تنظیم (دیدن `keyed`). - `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [سیستم.رویدادها](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` ستون. -By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. +به طور پیش فرض, سیاهههای مربوط به جدول در فواصل 7.5 ثانیه اضافه. شما می توانید این فاصله در مجموعه [\_ر\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) تنظیم سرور (نگاه کنید به `flush_interval_milliseconds` پارامتر). به خیط و پیت کردن سیاهههای مربوط به زور از بافر حافظه را به جدول, استفاده از `SYSTEM FLUSH LOGS` پرس و جو. -When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. +هنگامی که جدول به صورت دستی حذف, به طور خودکار در پرواز ایجاد. توجه داشته باشید که تمام سیاهههای مربوط قبلی حذف خواهد شد. -!!! note "Note" - The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. +!!! note "یادداشت" + دوره ذخیره سازی برای سیاهههای مربوط نامحدود است. سیاهههای مربوط به طور خودکار از جدول حذف نمی شود. شما نیاز به سازماندهی حذف سیاهههای مربوط منسوخ شده خود را. -You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `partition_by` parameter). +شما می توانید یک کلید پارتیشن بندی دلخواه برای مشخص `system.query_thread_log` جدول در [\_ر\_خروج](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) تنظیم سرور (نگاه کنید به `partition_by` پارامتر). -## system.trace\_log {#system_tables-trace_log} +## سیستم.\_قطع {#system_tables-trace_log} -Contains stack traces collected by the sampling query profiler. +حاوی ردیاب های پشته ای است که توسط پروفایل پرس و جو نمونه گیری می شود. -ClickHouse creates this table when the [trace\_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. +تاتر این جدول زمانی ایجاد می کند [\_قطع](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) بخش پیکربندی سرور تنظیم شده است. همچنین [جستجو](settings/settings.md#query_profiler_real_time_period_ns) و [ایران در تهران](settings/settings.md#query_profiler_cpu_time_period_ns) تنظیمات باید تنظیم شود. -To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions. +برای تجزیه و تحلیل سیاهههای مربوط, استفاده از `addressToLine`, `addressToSymbol` و `demangle` توابع درون گرایی. -Columns: +ستونها: -- `event_date`([Date](../data_types/date.md)) — Date of sampling moment. +- `event_date`([تاریخ](../sql_reference/data_types/date.md)) — Date of sampling moment. -- `event_time`([DateTime](../data_types/datetime.md)) — Timestamp of sampling moment. +- `event_time`([DateTime](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. -- `revision`([UInt32](../data_types/int_uint.md)) — ClickHouse server build revision. +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. - When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. + هنگام اتصال به سرور توسط `clickhouse-client`, شما رشته شبیه به دیدن `Connected to ClickHouse server version 19.18.1 revision 54429.`. این فیلد شامل `revision` اما نه `version` از یک سرور. -- `timer_type`([Enum8](../data_types/enum.md)) — Timer type: +- `timer_type`([شمار8](../sql_reference/data_types/enum.md)) — Timer type: - - `Real` represents wall-clock time. - - `CPU` represents CPU time. + - `Real` نشان دهنده زمان دیوار ساعت. + - `CPU` نشان دهنده زمان پردازنده. -- `thread_number`([UInt32](../data_types/int_uint.md)) — Thread identifier. +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. -- `query_id`([String](../data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) system table. +- `query_id`([رشته](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [\_خروج](#system_tables-query_log) جدول سیستم. -- `trace`([Array(UInt64)](../data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. +- `trace`([Array(UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. -**Example** +**مثال** ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -714,12 +717,12 @@ query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] ``` -## system.replicas {#system_tables-replicas} +## سیستم.تکرار {#system_tables-replicas} -Contains information and status for replicated tables residing on the local server. -This table can be used for monitoring. The table contains a row for every Replicated\* table. +شامل اطلاعات و وضعیت برای جداول تکرار ساکن بر روی سرور محلی. +این جدول را می توان برای نظارت استفاده می شود. جدول شامل یک ردیف برای هر تکرار \* جدول. -Example: +مثال: ``` sql SELECT * @@ -763,46 +766,46 @@ total_replicas: 2 active_replicas: 2 ``` -Columns: +ستونها: -- `database` (`String`) - Database name -- `table` (`String`) - Table name -- `engine` (`String`) - Table engine name -- `is_leader` (`UInt8`) - Whether the replica is the leader. - Only one replica at a time can be the leader. The leader is responsible for selecting background merges to perform. - Note that writes can be performed to any replica that is available and has a session in ZK, regardless of whether it is a leader. -- `can_become_leader` (`UInt8`) - Whether the replica can be elected as a leader. -- `is_readonly` (`UInt8`) - Whether the replica is in read-only mode. - This mode is turned on if the config doesn’t have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. -- `is_session_expired` (`UInt8`) - the session with ZooKeeper has expired. Basically the same as `is_readonly`. -- `future_parts` (`UInt32`) - The number of data parts that will appear as the result of INSERTs or merges that haven’t been done yet. -- `parts_to_check` (`UInt32`) - The number of data parts in the queue for verification. A part is put in the verification queue if there is suspicion that it might be damaged. -- `zookeeper_path` (`String`) - Path to table data in ZooKeeper. -- `replica_name` (`String`) - Replica name in ZooKeeper. Different replicas of the same table have different names. -- `replica_path` (`String`) - Path to replica data in ZooKeeper. The same as concatenating ‘zookeeper\_path/replicas/replica\_path’. -- `columns_version` (`Int32`) - Version number of the table structure. Indicates how many times ALTER was performed. If replicas have different versions, it means some replicas haven’t made all of the ALTERs yet. -- `queue_size` (`UInt32`) - Size of the queue for operations waiting to be performed. Operations include inserting blocks of data, merges, and certain other actions. It usually coincides with `future_parts`. -- `inserts_in_queue` (`UInt32`) - Number of inserts of blocks of data that need to be made. Insertions are usually replicated fairly quickly. If this number is large, it means something is wrong. -- `merges_in_queue` (`UInt32`) - The number of merges waiting to be made. Sometimes merges are lengthy, so this value may be greater than zero for a long time. -- `part_mutations_in_queue` (`UInt32`) - The number of mutations waiting to be made. -- `queue_oldest_time` (`DateTime`) - If `queue_size` greater than 0, shows when the oldest operation was added to the queue. -- `inserts_oldest_time` (`DateTime`) - See `queue_oldest_time` -- `merges_oldest_time` (`DateTime`) - See `queue_oldest_time` -- `part_mutations_oldest_time` (`DateTime`) - See `queue_oldest_time` +- `database` (`String`)- نام پایگاه داده +- `table` (`String`)- نام جدول +- `engine` (`String`)- نام موتور جدول +- `is_leader` (`UInt8`)- چه ماکت رهبر است. + فقط یک ماکت در یک زمان می تواند رهبر باشد. رهبر برای انتخاب پس زمینه ادغام به انجام است. + توجه داشته باشید که می نویسد را می توان به هر ماکت است که در دسترس است و یک جلسه در زک انجام, صرف نظر از اینکه این یک رهبر است. +- `can_become_leader` (`UInt8`)- چه ماکت می تواند به عنوان یک رهبر انتخاب می شوند. +- `is_readonly` (`UInt8`)- چه ماکت در حالت فقط خواندنی است. + در این حالت روشن است اگر پیکربندی ندارد بخش با باغ وحش اگر یک خطای ناشناخته رخ داده است که reinitializing جلسات در باغ وحش و در طول جلسه reinitialization در باغ وحش. +- `is_session_expired` (`UInt8`)- جلسه با باغ وحش منقضی شده است. در واقع همان `is_readonly`. +- `future_parts` (`UInt32`)- تعداد قطعات داده است که به عنوان نتیجه درج و یا ادغام که هنوز انجام نشده است ظاهر می شود. +- `parts_to_check` (`UInt32`)- تعداد قطعات داده در صف برای تایید. اگر شک وجود دارد که ممکن است صدمه دیده است بخشی در صف تایید قرار داده است. +- `zookeeper_path` (`String`)- مسیر به داده های جدول در باغ وحش. +- `replica_name` (`String`)- نام ماکت در باغ وحش. کپی های مختلف از همان جدول نام های مختلف. +- `replica_path` (`String`)- مسیر به داده های ماکت در باغ وحش. همان الحاق ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`)- تعداد نسخه از ساختار جدول . نشان می دهد که چند بار تغییر انجام شد. اگر کپی نسخه های مختلف, به این معنی برخی از کپی ساخته شده است همه از تغییر نکرده است. +- `queue_size` (`UInt32`)- اندازه صف برای عملیات در حال انتظار برای انجام شود . عملیات شامل قرار دادن بلوک های داده ادغام و برخی اقدامات دیگر. معمولا همزمان با `future_parts`. +- `inserts_in_queue` (`UInt32`)- تعداد درج بلوک از داده ها که نیاز به ساخته شده است . درج معمولا نسبتا به سرعت تکرار. اگر این تعداد بزرگ است, به این معنی چیزی اشتباه است. +- `merges_in_queue` (`UInt32`)- تعداد ادغام انتظار ساخته شود. گاهی اوقات ادغام طولانی هستند, بنابراین این مقدار ممکن است بیشتر از صفر برای یک مدت طولانی. +- `part_mutations_in_queue` (`UInt32`)- تعداد جهش در انتظار ساخته شده است. +- `queue_oldest_time` (`DateTime`)- اگر `queue_size` بیشتر از 0, نشان می دهد که قدیمی ترین عملیات به صف اضافه شد. +- `inserts_oldest_time` (`DateTime` دیدن وضعیت شبکه `queue_oldest_time` +- `merges_oldest_time` (`DateTime` دیدن وضعیت شبکه `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime` دیدن وضعیت شبکه `queue_oldest_time` -The next 4 columns have a non-zero value only where there is an active session with ZK. +4 ستون بعدی یک مقدار غیر صفر تنها جایی که یک جلسه فعال با زک وجود دارد. -- `log_max_index` (`UInt64`) - Maximum entry number in the log of general activity. -- `log_pointer` (`UInt64`) - Maximum entry number in the log of general activity that the replica copied to its execution queue, plus one. If `log_pointer` is much smaller than `log_max_index`, something is wrong. -- `last_queue_update` (`DateTime`) - When the queue was updated last time. -- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has. -- `total_replicas` (`UInt8`) - The total number of known replicas of this table. -- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas). +- `log_max_index` (`UInt64`)- حداکثر تعداد ورودی در ورود به سیستم از فعالیت های عمومی. +- `log_pointer` (`UInt64`) - حداکثر تعداد ورودی در ورود به سیستم از فعالیت های عمومی که ماکت کپی شده به صف اعدام خود را, به علاوه یک. اگر `log_pointer` بسیار کوچکتر از `log_max_index`, چیزی اشتباه است. +- `last_queue_update` (`DateTime`)- هنگامی که صف در زمان گذشته به روز شد. +- `absolute_delay` (`UInt64`)- تاخیر چقدر بزرگ در ثانیه ماکت فعلی است. +- `total_replicas` (`UInt8`)- تعداد کل کپی شناخته شده از این جدول. +- `active_replicas` (`UInt8`)- تعداد کپی از این جدول که یک جلسه در باغ وحش (یعنی تعداد تکرار عملکرد). -If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row. -If you don’t request the last 4 columns (log\_max\_index, log\_pointer, total\_replicas, active\_replicas), the table works quickly. +اگر شما درخواست تمام ستون, جدول ممکن است کمی کند کار, از چند بار خوانده شده از باغ وحش برای هر سطر ساخته شده. +اگر شما درخواست آخرین 4 ستون (log\_max\_index, log\_pointer, total\_replicas, active\_replicas) جدول با این نسخهها کار به سرعت. -For example, you can check that everything is working correctly like this: +مثلا, شما می توانید بررسی کنید که همه چیز به درستی کار مثل این: ``` sql SELECT @@ -834,25 +837,25 @@ WHERE OR active_replicas < total_replicas ``` -If this query doesn’t return anything, it means that everything is fine. +اگر این پرس و جو چیزی نمی گرداند, به این معنی که همه چیز خوب است. -## system.settings {#system-settings} +## سیستم.تنظیمات {#system-settings} -Contains information about settings that are currently in use. -I.e. used for executing the query you are using to read from the system.settings table. +حاوی اطلاعات در مورد تنظیمات که در حال حاضر در حال استفاده. +به عنوان مثال مورد استفاده برای اجرای پرس و جو شما با استفاده از به خواندن از سیستم. جدول تنظیمات. -Columns: +ستونها: - `name` (String) — Setting name. - `value` (String) — Setting value. - `description` (String) — Setting description. - `type` (String) — Setting type (implementation specific string value). - `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `readonly` (UInt8) — Can user change this setting (for more info, look into [constraints](settings/constraints_on_settings.md#constraints-on-settings)). +- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [قیدها](settings/constraints_on_settings.md#constraints-on-settings)). +- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [قیدها](settings/constraints_on_settings.md#constraints-on-settings)). +- `readonly` (UInt8) — Can user change this setting (for more info, look into [قیدها](settings/constraints_on_settings.md#constraints-on-settings)). -Example: +مثال: ``` sql SELECT name, value @@ -869,11 +872,11 @@ WHERE changed └────────────────────────┴─────────────┘ ``` -## system.merge\_tree\_settings {#system-merge_tree_settings} +## سیستم.خرابی در حذف گواهینامهها {#system-merge_tree_settings} -Contains information about settings for `MergeTree` tables. +حاوی اطلاعات در مورد تنظیمات برای `MergeTree` میز -Columns: +ستونها: - `name` (String) — Setting name. - `value` (String) — Setting value. @@ -881,21 +884,21 @@ Columns: - `type` (String) — Setting type (implementation specific string value). - `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -## system.table\_engines {#system-table-engines} +## سیستم.\_زبانهها {#system-table-engines} -Contains description of table engines supported by server and their feature support information. +شامل شرح موتورهای جدول پشتیبانی شده توسط سرور و اطلاعات پشتیبانی از ویژگی های خود را. -This table contains the following columns (the column type is shown in brackets): +این جدول شامل ستون های زیر (نوع ستون در براکت نشان داده شده است): - `name` (String) — The name of table engine. -- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clause. -- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [skipping indices](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). -- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`. -- `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](table_engines/replication/). +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` بند بند. +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [پرش شاخص](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` و `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [تکرار داده ها](../engines/table_engines/mergetree_family/replication.md). - `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. -Example: +مثال: ``` sql SELECT * @@ -911,56 +914,72 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') └───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ ``` -**See also** +**همچنین نگاه کنید به** -- MergeTree family [query clauses](table_engines/mergetree.md#mergetree-query-clauses) -- Kafka [settings](table_engines/kafka.md#table_engine-kafka-creating-a-table) -- Join [settings](table_engines/join.md#join-limitations-and-settings) +- ادغام خانواده [بندهای پرسوجو](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- کافکا [تنظیمات](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- پیوستن [تنظیمات](../engines/table_engines/special/join.md#join-limitations-and-settings) -## system.tables {#system-tables} +## سیستم.جداول {#system-tables} -Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. +حاوی ابرداده از هر جدول که سرور می داند در مورد. جداول جداگانه در نشان داده نمی شود `system.tables`. -This table contains the following columns (the column type is shown in brackets): +این جدول شامل ستون های زیر (نوع ستون در براکت نشان داده شده است): - `database` (String) — The name of the database the table is in. + - `name` (String) — Table name. + - `engine` (String) — Table engine name (without parameters). -- `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. -- `data_path` (String) - Path to the table data in the file system. -- `metadata_path` (String) - Path to the table metadata in the file system. -- `metadata_modification_time` (DateTime) - Time of latest modification of the table metadata. -- `dependencies_database` (Array(String)) - Database dependencies. -- `dependencies_table` (Array(String)) - Table dependencies ([MaterializedView](table_engines/materializedview.md) tables based on the current table). -- `create_table_query` (String) - The query that was used to create the table. -- `engine_full` (String) - Parameters of the table engine. -- `partition_key` (String) - The partition key expression specified in the table. -- `sorting_key` (String) - The sorting key expression specified in the table. -- `primary_key` (String) - The primary key expression specified in the table. -- `sampling_key` (String) - The sampling key expression specified in the table. -- `storage_policy` (String) - The storage policy: - - [MergeTree](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) - - [Distributed](table_engines/distributed.md#distributed) +- `is_temporary` (زیر8) - پرچم که نشان می دهد که جدول موقت است. -- `total_rows` (Nullable(UInt64)) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `Null` (including underying `Buffer` table). -- `total_bytes` (Nullable(UInt64)) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `Null` (**does not** includes any underlying storage). +- `data_path` (رشته) - مسیر به داده های جدول در سیستم فایل. - - If the table stores data on disk, returns used space on disk (i.e. compressed). - - If the table stores data in memory, returns approximated number of used bytes in memory. +- `metadata_path` (رشته) - مسیر به ابرداده جدول در سیستم فایل. -The `system.tables` table is used in `SHOW TABLES` query implementation. +- `metadata_modification_time` (تاریخ ساعت) - زمان شدن اصلاح ابرداده جدول. -## system.zookeeper {#system-zookeeper} +- `dependencies_database` - وابستگی پایگاه داده . -The table does not exist if ZooKeeper is not configured. Allows reading data from the ZooKeeper cluster defined in the config. -The query must have a ‘path’ equality condition in the WHERE clause. This is the path in ZooKeeper for the children that you want to get data for. +- `dependencies_table` (رشته)) - وابستگی های جدول ([ماده بینی](../engines/table_engines/special/materializedview.md) جداول بر اساس جدول فعلی). -The query `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` outputs data for all children on the `/clickhouse` node. -To output data for all root nodes, write path = ‘/’. -If the path specified in ‘path’ doesn’t exist, an exception will be thrown. +- `create_table_query` (رشته) - پرس و جو که برای ایجاد جدول مورد استفاده قرار گرفت. -Columns: +- `engine_full` (رشته) - پارامترهای موتور جدول. + +- `partition_key` (رشته) - بیان کلید پارتیشن مشخص شده در جدول. + +- `sorting_key` (رشته) - عبارت کلیدی مرتب سازی مشخص شده در جدول. + +- `primary_key` (رشته) - عبارت کلیدی اولیه مشخص شده در جدول. + +- `sampling_key` (رشته) - نمونه عبارت کلیدی مشخص شده در جدول. + +- `storage_policy` (رشته) - سیاست ذخیره سازی: + + - [ادغام](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [توزیع شده](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64)) - تعداد کل ردیف آن است که اگر ممکن است به سرعت تعیین دقیق تعداد ردیف در جدول در غیر این صورت `Null` (از جمله زیرینگ `Buffer` جدول). + +- `total_bytes` (Nullable(UInt64)) - مجموع تعداد بایت, اگر آن را ممکن است به سرعت تعیین دقیق تعداد بایت به صورت جدول ذخیره در غیر این صورت `Null` (**نه** شامل هر ذخیره سازی زمینه ای). + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - اگر جدول ذخیره داده ها در حافظه, بازده تعداد تقریبی بایت مورد استفاده در حافظه. + +این `system.tables` جدول در استفاده می شود `SHOW TABLES` اجرای پرس و جو. + +## سیستم.باغ وحش {#system-zookeeper} + +جدول وجود ندارد اگر باغ وحش پیکربندی نشده است. اجازه می دهد تا خواندن داده ها از خوشه باغ وحش تعریف شده در پیکربندی. +پرس و جو باید یک ‘path’ شرایط برابری در بند جایی که. این مسیر در باغ وحش برای کودکان که شما می خواهید برای دریافت اطلاعات برای است. + +پرسوجو `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` خروجی داده ها برای همه کودکان در `/clickhouse` گره. +به داده های خروجی برای تمام گره های ریشه, نوشتن مسیر = ‘/’. +اگر مسیر مشخص شده در ‘path’ وجود ندارد, یک استثنا پرتاب خواهد شد. + +ستونها: - `name` (String) — The name of the node. - `path` (String) — The path to the node. @@ -977,7 +996,7 @@ Columns: - `aversion` (Int32) — Number of changes to the ACL. - `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. -Example: +مثال: ``` sql SELECT * @@ -1022,57 +1041,57 @@ pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` -## system.mutations {#system_tables-mutations} +## سیستم.جهشها {#system_tables-mutations} -The table contains information about [mutations](../query_language/alter.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns: +جدول حاوی اطلاعات در مورد [جهشها](../sql_reference/statements/alter.md#alter-mutations) از جداول ادغام و پیشرفت خود را. هر دستور جهش توسط یک ردیف نشان داده شده است. جدول دارای ستون های زیر است: -**database**, **table** - The name of the database and table to which the mutation was applied. +**دادگان**, **جدول** - نام پایگاه داده و جدول که جهش استفاده شد . -**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table. +**قطع عضو** - شناسه جهش. برای جداول تکرار این شناسه به نام زنود در مطابقت `/mutations/` راهنمای در باغ وحش. برای جداول سه برابر شناسه مربوط به فایل نام در دایرکتوری داده ها از جدول. -**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`). +**فرمان** - رشته فرمان جهش (بخشی از پرس و جو پس از `ALTER TABLE [db.]table`). -**create\_time** - When this mutation command was submitted for execution. +**\_بروزرسانی** - هنگامی که این دستور جهش برای اجرای ارسال شد . -**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. +**\_شمارهی بلوک.ا\_ضافه کردن**, **\_شمارهی بلوک.شماره** - ستون تو در تو . برای جهش از جداول تکرار, این شامل یک رکورد برای هر پارتیشن: شناسه پارتیشن و شماره بلوک که توسط جهش خریداری شد (در هر پارتیشن, تنها بخش هایی که حاوی بلوک با اعداد کمتر از تعداد بلوک های خریداری شده توسط جهش در پارتیشن که جهش خواهد شد). در جداول غیر تکرار, تعداد بلوک در تمام پارتیشن به صورت یک توالی واحد. این به این معنی است که برای جهش از جداول غیر تکرار, ستون یک رکورد با یک عدد بلوک واحد خریداری شده توسط جهش شامل. -**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish. +**\_کوچکنمایی** - تعدادی از قطعات داده است که نیاز به جهش را به پایان برساند جهش یافته است . -**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated. +**\_مخفی کردن** - توجه داشته باشید که حتی اگر `parts_to_do = 0` ممکن است که جهش جدول تکرار هنوز به دلیل درج طولانی در حال اجرا است که ایجاد بخش داده های جدید است که نیاز به جهش انجام می شود است. -If there were problems with mutating some parts, the following columns contain additional information: +اگر مشکلی با جهش برخی از قطعات وجود دارد, ستون های زیر حاوی اطلاعات اضافی: -**latest\_failed\_part** - The name of the most recent part that could not be mutated. +**\_شروع مجدد** - نام جدید ترین بخش است که نمی تواند جهش یافته است. -**latest\_fail\_time** - The time of the most recent part mutation failure. +**زمان \_رشته** - زمان جدید ترین شکست جهش بخشی . -**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure. +**\_شروع مجدد** - پیام استثنا که باعث شکست جهش بخشی اخیر. -## system.disks {#system_tables-disks} +## سیستم.دیسکها {#system_tables-disks} -Contains information about disks defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +حاوی اطلاعات در مورد دیسک های تعریف شده در [پیکربندی کارساز](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). -Columns: +ستونها: -- `name` ([String](../data_types/string.md)) — Name of a disk in the server configuration. -- `path` ([String](../data_types/string.md)) — Path to the mount point in the file system. -- `free_space` ([UInt64](../data_types/int_uint.md)) — Free space on disk in bytes. -- `total_space` ([UInt64](../data_types/int_uint.md)) — Disk volume in bytes. -- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration. +- `name` ([رشته](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([رشته](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` پارامتر پیکربندی دیسک. -## system.storage\_policies {#system_tables-storage_policies} +## سیستم.داستان\_یابی {#system_tables-storage_policies} -Contains information about storage policies and volumes defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +حاوی اطلاعات در مورد سیاست های ذخیره سازی و حجم تعریف شده در [پیکربندی کارساز](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). -Columns: +ستونها: -- `policy_name` ([String](../data_types/string.md)) — Name of the storage policy. -- `volume_name` ([String](../data_types/string.md)) — Volume name defined in the storage policy. -- `volume_priority` ([UInt64](../data_types/int_uint.md)) — Volume order number in the configuration. -- `disks` ([Array(String)](../data_types/array.md)) — Disk names, defined in the storage policy. -- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). -- `move_factor` ([Float64](../data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. +- `policy_name` ([رشته](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([رشته](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([رشته)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([جسم شناور64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. -If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table. +اگر سیاست ذخیره سازی شامل بیش از یک حجم, سپس اطلاعات برای هر حجم در ردیف فرد از جدول ذخیره می شود. -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/fa/operations/table_engines/aggregatingmergetree.md b/docs/fa/operations/table_engines/aggregatingmergetree.md deleted file mode 100644 index 755f0c3bb16..00000000000 --- a/docs/fa/operations/table_engines/aggregatingmergetree.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -en_copy: true ---- - -# AggregatingMergeTree {#aggregatingmergetree} - -The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree), altering the logic for data parts merging. ClickHouse replaces all rows with the same primary key (or more accurately, with the same [sorting key](mergetree.md)) with a single row (within a one data part) that stores a combination of states of aggregate functions. - -You can use `AggregatingMergeTree` tables for incremental data aggregation, including for aggregated materialized views. - -The engine processes all columns with [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) type. - -It is appropriate to use `AggregatingMergeTree` if it reduces the number of rows by orders. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = AggregatingMergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[TTL expr] -[SETTINGS name=value, ...] -``` - -For a description of request parameters, see [request description](../../query_language/create.md). - -**Query clauses** - -When creating a `AggregatingMergeTree` table the same [clauses](mergetree.md) are required, as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -All of the parameters have the same meaning as in `MergeTree`. -
    - -## SELECT and INSERT {#select-and-insert} - -To insert data, use [INSERT SELECT](../../query_language/insert_into.md) query with aggregate -State- functions. -When selecting data from `AggregatingMergeTree` table, use `GROUP BY` clause and the same aggregate functions as when inserting data, but using `-Merge` suffix. - -In the results of `SELECT` query, the values of `AggregateFunction` type have implementation-specific binary representation for all of the ClickHouse output formats. If dump data into, for example, `TabSeparated` format with `SELECT` query then this dump can be loaded back using `INSERT` query. - -## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view} - -`AggregatingMergeTree` materialized view that watches the `test.visits` table: - -``` sql -CREATE MATERIALIZED VIEW test.basic -ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) -AS SELECT - CounterID, - StartDate, - sumState(Sign) AS Visits, - uniqState(UserID) AS Users -FROM test.visits -GROUP BY CounterID, StartDate; -``` - -Inserting data into the `test.visits` table. - -``` sql -INSERT INTO test.visits ... -``` - -The data are inserted in both the table and view `test.basic` that will perform the aggregation. - -To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the view `test.basic`: - -``` sql -SELECT - StartDate, - sumMerge(Visits) AS Visits, - uniqMerge(Users) AS Users -FROM test.basic -GROUP BY StartDate -ORDER BY StartDate; -``` - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/fa/operations/table_engines/buffer.md b/docs/fa/operations/table_engines/buffer.md deleted file mode 100644 index 2ce0cff4b16..00000000000 --- a/docs/fa/operations/table_engines/buffer.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -en_copy: true ---- - -# Buffer {#buffer} - -Buffers the data to write in RAM, periodically flushing it to another table. During the read operation, data is read from the buffer and the other table simultaneously. - -``` sql -Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) -``` - -Engine parameters: - -- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. -- `table` – Table to flush data to. -- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` of independent buffers. Recommended value: 16. -- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`, and `max_bytes` – Conditions for flushing data from the buffer. - -Data is flushed from the buffer and written to the destination table if all the `min*` conditions or at least one `max*` condition are met. - -- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. -- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. -- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. - -During the write operation, data is inserted to a `num_layers` number of random buffers. Or, if the data part to insert is large enough (greater than `max_rows` or `max_bytes`), it is written directly to the destination table, omitting the buffer. - -The conditions for flushing the data are calculated separately for each of the `num_layers` buffers. For example, if `num_layers = 16` and `max_bytes = 100000000`, the maximum RAM consumption is 1.6 GB. - -Example: - -``` sql -CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) -``` - -Creating a ‘merge.hits\_buffer’ table with the same structure as ‘merge.hits’ and using the Buffer engine. When writing to this table, data is buffered in RAM and later written to the ‘merge.hits’ table. 16 buffers are created. The data in each of them is flushed if either 100 seconds have passed, or one million rows have been written, or 100 MB of data have been written; or if simultaneously 10 seconds have passed and 10,000 rows and 10 MB of data have been written. For example, if just one row has been written, after 100 seconds it will be flushed, no matter what. But if many rows have been written, the data will be flushed sooner. - -When the server is stopped, with DROP TABLE or DETACH TABLE, buffer data is also flushed to the destination table. - -You can set empty strings in single quotation marks for the database and table name. This indicates the absence of a destination table. In this case, when the data flush conditions are reached, the buffer is simply cleared. This may be useful for keeping a window of data in memory. - -When reading from a Buffer table, data is processed both from the buffer and from the destination table (if there is one). -Note that the Buffer tables does not support an index. In other words, data in the buffer is fully scanned, which might be slow for large buffers. (For data in a subordinate table, the index that it supports will be used.) - -If the set of columns in the Buffer table doesn’t match the set of columns in a subordinate table, a subset of columns that exist in both tables is inserted. - -If the types don’t match for one of the columns in the Buffer table and a subordinate table, an error message is entered in the server log and the buffer is cleared. -The same thing happens if the subordinate table doesn’t exist when the buffer is flushed. - -If you need to run ALTER for a subordinate table and the Buffer table, we recommend first deleting the Buffer table, running ALTER for the subordinate table, then creating the Buffer table again. - -If the server is restarted abnormally, the data in the buffer is lost. - -FINAL and SAMPLE do not work correctly for Buffer tables. These conditions are passed to the destination table, but are not used for processing data in the buffer. If these features are required we recommend only using the Buffer table for writing, while reading from the destination table. - -When adding data to a Buffer, one of the buffers is locked. This causes delays if a read operation is simultaneously being performed from the table. - -Data that is inserted to a Buffer table may end up in the subordinate table in a different order and in different blocks. Because of this, a Buffer table is difficult to use for writing to a CollapsingMergeTree correctly. To avoid problems, you can set ‘num\_layers’ to 1. - -If the destination table is replicated, some expected characteristics of replicated tables are lost when writing to a Buffer table. The random changes to the order of rows and sizes of data parts cause data deduplication to quit working, which means it is not possible to have a reliable ‘exactly once’ write to replicated tables. - -Due to these disadvantages, we can only recommend using a Buffer table in rare cases. - -A Buffer table is used when too many INSERTs are received from a large number of servers over a unit of time and data can’t be buffered before insertion, which means the INSERTs can’t run fast enough. - -Note that it doesn’t make sense to insert data one row at a time, even for Buffer tables. This will only produce a speed of a few thousand rows per second, while inserting larger blocks of data can produce over a million rows per second (see the section “Performance”). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/fa/operations/table_engines/collapsingmergetree.md b/docs/fa/operations/table_engines/collapsingmergetree.md deleted file mode 100644 index 97f8c7ecf46..00000000000 --- a/docs/fa/operations/table_engines/collapsingmergetree.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -en_copy: true ---- - -# CollapsingMergeTree {#table_engine-collapsingmergetree} - -The engine inherits from [MergeTree](mergetree.md) and adds the logic of rows collapsing to data parts merge algorithm. - -`CollapsingMergeTree` asynchronously deletes (collapses) pairs of rows if all of the fields in a sorting key (`ORDER BY`) are equivalent excepting the particular field `Sign` which can have `1` and `-1` values. Rows without a pair are kept. For more details see the [Collapsing](#table_engine-collapsingmergetree-collapsing) section of the document. - -The engine may significantly reduce the volume of storage and increase the efficiency of `SELECT` query as a consequence. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = CollapsingMergeTree(sign) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -For a description of query parameters, see [query description](../../query_language/create.md). - -**CollapsingMergeTree Parameters** - -- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. - - Column data type — `Int8`. - -**Query clauses** - -When creating a `CollapsingMergeTree` table, the same [query clauses](mergetree.md#table_engine-mergetree-creating-a-table) are required, as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) -``` - -All of the parameters excepting `sign` have the same meaning as in `MergeTree`. - -- `sign` — Name of the column with the type of row: `1` — “state” row, `-1` — “cancel” row. - - Column Data Type — `Int8`. - -
    - -## Collapsing {#table_engine-collapsingmergetree-collapsing} - -### Data {#data} - -Consider the situation where you need to save continually changing data for some object. It sounds logical to have one row for an object and update it at any change, but update operation is expensive and slow for DBMS because it requires rewriting of the data in the storage. If you need to write data quickly, update not acceptable, but you can write the changes of an object sequentially as follows. - -Use the particular column `Sign`. If `Sign = 1` it means that the row is a state of an object, let’s call it “state” row. If `Sign = -1` it means the cancellation of the state of an object with the same attributes, let’s call it “cancel” row. - -For example, we want to calculate how much pages users checked at some site and how long they were there. At some moment we write the following row with the state of user activity: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -At some moment later we register the change of user activity and write it with the following two rows. - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -The first row cancels the previous state of the object (user). It should copy the sorting key fields of the cancelled state excepting `Sign`. - -The second row contains the current state. - -As we need only the last state of user activity, the rows - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -can be deleted collapsing the invalid (old) state of an object. `CollapsingMergeTree` does this while merging of the data parts. - -Why we need 2 rows for each change read in the [Algorithm](#table_engine-collapsingmergetree-collapsing-algorithm) paragraph. - -**Peculiar properties of such approach** - -1. The program that writes the data should remember the state of an object to be able to cancel it. “Cancel” string should contain copies of the sorting key fields of the “state” string and the opposite `Sign`. It increases the initial size of storage but allows to write the data quickly. -2. Long growing arrays in columns reduce the efficiency of the engine due to load for writing. The more straightforward data, the higher the efficiency. -3. The `SELECT` results depend strongly on the consistency of object changes history. Be accurate when preparing data for inserting. You can get unpredictable results in inconsistent data, for example, negative values for non-negative metrics such as session depth. - -### Algorithm {#table_engine-collapsingmergetree-collapsing-algorithm} - -When ClickHouse merges data parts, each group of consecutive rows with the same sorting key (`ORDER BY`) is reduced to not more than two rows, one with `Sign = 1` (“state” row) and another with `Sign = -1` (“cancel” row). In other words, entries collapse. - -For each resulting data part ClickHouse saves: - -1. The first “cancel” and the last “state” rows, if the number of “state” and “cancel” rows matches and the last row is a “state” row. - -2. The last “state” row, if there are more “state” rows than “cancel” rows. - -3. The first “cancel” row, if there are more “cancel” rows than “state” rows. - -4. None of the rows, in all other cases. - -Also when there are at least 2 more “state” rows than “cancel” rows, or at least 2 more “cancel” rows then “state” rows, the merge continues, but ClickHouse treats this situation as a logical error and records it in the server log. This error can occur if the same data were inserted more than once. - -Thus, collapsing should not change the results of calculating statistics. -Changes gradually collapsed so that in the end only the last state of almost every object left. - -The `Sign` is required because the merging algorithm doesn’t guarantee that all of the rows with the same sorting key will be in the same resulting data part and even on the same physical server. ClickHouse process `SELECT` queries with multiple threads, and it can not predict the order of rows in the result. The aggregation is required if there is a need to get completely “collapsed” data from `CollapsingMergeTree` table. - -To finalize collapsing, write a query with `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and so on, and also add `HAVING sum(Sign) > 0`. - -The aggregates `count`, `sum` and `avg` could be calculated this way. The aggregate `uniq` could be calculated if an object has at least one state not collapsed. The aggregates `min` and `max` could not be calculated because `CollapsingMergeTree` does not save the values history of the collapsed states. - -If you need to extract data without aggregation (for example, to check whether rows are present whose newest values match certain conditions), you can use the `FINAL` modifier for the `FROM` clause. This approach is significantly less efficient. - -## Example of use {#example-of-use} - -Example data: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -Creation of the table: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -Insertion of the data: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) -``` - -We use two `INSERT` queries to create two different data parts. If we insert the data with one query ClickHouse creates one data part and will not perform any merge ever. - -Getting the data: - -``` sql -SELECT * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -What do we see and where is collapsing? - -With two `INSERT` queries, we created 2 data parts. The `SELECT` query was performed in 2 threads, and we got a random order of rows. Collapsing not occurred because there was no merge of the data parts yet. ClickHouse merges data part in an unknown moment which we can not predict. - -Thus we need aggregation: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration -FROM UAct -GROUP BY UserID -HAVING sum(Sign) > 0 -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -If we do not need aggregation and want to force collapsing, we can use `FINAL` modifier for `FROM` clause. - -``` sql -SELECT * FROM UAct FINAL -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -This way of selecting the data is very inefficient. Don’t use it for big tables. - -## Example of another approach {#example-of-another-approach} - -Example data: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ -│ 4324182021466249494 │ -5 │ -146 │ -1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -The idea is that merges take into account only key fields. And in the “Cancel” line we can specify negative values that equalize the previous version of the row when summing without using the Sign column. For this approach, it is necessary to change the data type `PageViews`,`Duration` to store negative values of UInt8 -\> Int16. - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews Int16, - Duration Int16, - Sign Int8 -) -ENGINE = CollapsingMergeTree(Sign) -ORDER BY UserID -``` - -Let’s test the approach: - -``` sql -insert into UAct values(4324182021466249494, 5, 146, 1); -insert into UAct values(4324182021466249494, -5, -146, -1); -insert into UAct values(4324182021466249494, 6, 185, 1); - -select * from UAct final; // avoid using final in production (just for a test or small tables) -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -``` sql -SELECT - UserID, - sum(PageViews) AS PageViews, - sum(Duration) AS Duration -FROM UAct -GROUP BY UserID -```text -┌──────────────UserID─┬─PageViews─┬─Duration─┐ -│ 4324182021466249494 │ 6 │ 185 │ -└─────────────────────┴───────────┴──────────┘ -``` - -``` sqk -select count() FROM UAct -``` - -``` text -┌─count()─┐ -│ 3 │ -└─────────┘ -``` - -``` sql -optimize table UAct final; - -select * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/fa/operations/table_engines/custom_partitioning_key.md b/docs/fa/operations/table_engines/custom_partitioning_key.md deleted file mode 100644 index 1a661728f0a..00000000000 --- a/docs/fa/operations/table_engines/custom_partitioning_key.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -en_copy: true ---- - -# Custom Partitioning Key {#custom-partitioning-key} - -Partitioning is available for the [MergeTree](mergetree.md) family tables (including [replicated](replication.md) tables). [Materialized views](materializedview.md) based on MergeTree tables support partitioning, as well. - -A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. - -The partition is specified in the `PARTITION BY expr` clause when [creating a table](mergetree.md#table_engine-mergetree-creating-a-table). The partition key can be any expression from the table columns. For example, to specify partitioning by month, use the expression `toYYYYMM(date_column)`: - -``` sql -CREATE TABLE visits -( - VisitDate Date, - Hour UInt8, - ClientID UUID -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(VisitDate) -ORDER BY Hour; -``` - -The partition key can also be a tuple of expressions (similar to the [primary key](mergetree.md#primary-keys-and-indexes-in-queries)). For example: - -``` sql -ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) -PARTITION BY (toMonday(StartDate), EventType) -ORDER BY (CounterID, StartDate, intHash32(UserID)); -``` - -In this example, we set partitioning by the event types that occurred during the current week. - -When inserting new data to a table, this data is stored as a separate part (chunk) sorted by the primary key. In 10-15 minutes after inserting, the parts of the same partition are merged into the entire part. - -!!! info "Info" - A merge only works for data parts that have the same value for the partitioning expression. This means **you shouldn’t make overly granular partitions** (more than about a thousand partitions). Otherwise, the `SELECT` query performs poorly because of an unreasonably large number of files in the file system and open file descriptors. - -Use the [system.parts](../system_tables.md#system_tables-parts) table to view the table parts and partitions. For example, let’s assume that we have a `visits` table with partitioning by month. Let’s perform the `SELECT` query for the `system.parts` table: - -``` sql -SELECT - partition, - name, - active -FROM system.parts -WHERE table = 'visits' -``` - -``` text -┌─partition─┬─name───────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1 │ 1 │ -│ 201902 │ 201902_10_10_0 │ 1 │ -│ 201902 │ 201902_11_11_0 │ 1 │ -└───────────┴────────────────┴────────┘ -``` - -The `partition` column contains the names of the partitions. There are two partitions in this example: `201901` and `201902`. You can use this column value to specify the partition name in [ALTER … PARTITION](#alter_manipulations-with-partitions) queries. - -The `name` column contains the names of the partition data parts. You can use this column to specify the name of the part in the [ALTER ATTACH PART](#alter_attach-partition) query. - -Let’s break down the name of the first part: `201901_1_3_1`: - -- `201901` is the partition name. -- `1` is the minimum number of the data block. -- `3` is the maximum number of the data block. -- `1` is the chunk level (the depth of the merge tree it is formed from). - -!!! info "Info" - The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level). - -The `active` column shows the status of the part. `1` is active; `0` is inactive. The inactive parts are, for example, source parts remaining after merging to a larger part. The corrupted data parts are also indicated as inactive. - -As you can see in the example, there are several separated parts of the same partition (for example, `201901_1_3_1` and `201901_1_9_2`). This means that these parts are not merged yet. ClickHouse merges the inserted parts of data periodically, approximately 15 minutes after inserting. In addition, you can perform a non-scheduled merge using the [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query. Example: - -``` sql -OPTIMIZE TABLE visits PARTITION 201902; -``` - -``` text -┌─partition─┬─name───────────┬─active─┐ -│ 201901 │ 201901_1_3_1 │ 0 │ -│ 201901 │ 201901_1_9_2 │ 1 │ -│ 201901 │ 201901_8_8_0 │ 0 │ -│ 201901 │ 201901_9_9_0 │ 0 │ -│ 201902 │ 201902_4_6_1 │ 0 │ -│ 201902 │ 201902_4_11_2 │ 1 │ -│ 201902 │ 201902_10_10_0 │ 0 │ -│ 201902 │ 201902_11_11_0 │ 0 │ -└───────────┴────────────────┴────────┘ -``` - -Inactive parts will be deleted approximately 10 minutes after merging. - -Another way to view a set of parts and partitions is to go into the directory of the table: `/var/lib/clickhouse/data///`. For example: - -``` bash -/var/lib/clickhouse/data/default/visits$ ls -l -total 40 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 -drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached -``` - -The folders ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ and so on are the directories of the parts. Each part relates to a corresponding partition and contains data just for a certain month (the table in this example has partitioning by month). - -The `detached` directory contains parts that were detached from the table using the [DETACH](#alter_detach-partition) query. The corrupted parts are also moved to this directory, instead of being deleted. The server does not use the parts from the `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../query_language/alter.md#alter_attach-partition) query. - -Note that on the operating server, you cannot manually change the set of parts or their data on the file system, since the server will not know about it. For non-replicated tables, you can do this when the server is stopped, but it isn’t recommended. For replicated tables, the set of parts cannot be changed in any case. - -ClickHouse allows you to perform operations with the partitions: delete them, copy from one table to another, or create a backup. See the list of all operations in the section [Manipulations With Partitions and Parts](../../query_language/alter.md#alter_manipulations-with-partitions). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/fa/operations/table_engines/distributed.md b/docs/fa/operations/table_engines/distributed.md deleted file mode 100644 index 2d94dba2039..00000000000 --- a/docs/fa/operations/table_engines/distributed.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -en_copy: true ---- - -# Distributed {#distributed} - -**Tables with Distributed engine do not store any data by themself**, but allow distributed query processing on multiple servers. -Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any. - -The Distributed engine accepts parameters: - -- the cluster name in the server’s config file - -- the name of a remote database - -- the name of a remote table - -- (optionally) sharding key - -- (optionally) policy name, it will be used to store temporary files for async send - - See also: - - - `insert_distributed_sync` setting - - [MergeTree](mergetree.md#table_engine-mergetree-multiple-volumes) for the examples - -Example: - -``` sql -Distributed(logs, default, hits[, sharding_key[, policy_name]]) -``` - -Data will be read from all servers in the ‘logs’ cluster, from the default.hits table located on every server in the cluster. -Data is not only read but is partially processed on the remote servers (to the extent that this is possible). -For example, for a query with GROUP BY, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated. - -Instead of the database name, you can use a constant expression that returns a string. For example: currentDatabase(). - -logs – The cluster name in the server’s config file. - -Clusters are set like this: - -``` xml - - - - - 1 - - false - - example01-01-1 - 9000 - - - example01-01-2 - 9000 - - - - 2 - false - - example01-02-1 - 9000 - - - example01-02-2 - 1 - 9440 - - - - -``` - -Here a cluster is defined with the name ‘logs’ that consists of two shards, each of which contains two replicas. -Shards refer to the servers that contain different parts of the data (in order to read all the data, you must access all the shards). -Replicas are duplicating servers (in order to read all the data, you can access the data on any one of the replicas). - -Cluster names must not contain dots. - -The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `compression` are specified for each server: -- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn’t start. If you change the DNS record, restart the server. -- `port` – The TCP port for messenger activity (‘tcp\_port’ in the config, usually set to 9000). Do not confuse it with http\_port. -- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Access rights](../../operations/access_rights.md). -- `password` – The password for connecting to a remote server (not masked). Default value: empty string. -- `secure` - Use ssl for connection, usually you also should define `port` = 9440. Server should listen on 9440 and have correct certificates. -- `compression` - Use data compression. Default value: true. - -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load\_balancing](../settings/settings.md#settings-load_balancing) setting. -If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. -This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly. - -You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard. - -You can specify as many clusters as you wish in the configuration. - -To view your clusters, use the ‘system.clusters’ table. - -The Distributed engine allows working with a cluster like a local server. However, the cluster is inextensible: you must write its configuration in the server config file (even better, for all the cluster’s servers). - -The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don’t need to create a Distributed table – use the ‘remote’ table function instead. See the section [Table functions](../../query_language/table_functions/index.md). - -There are two methods for writing data to a cluster: - -First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table “looks at”. This is the most flexible solution as you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently. - -Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it doesn’t mean anything in this case. - -Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19. - -Each shard can have the ‘internal\_replication’ parameter defined in the config file. - -If this parameter is set to ‘true’, the write operation selects the first healthy replica and writes data to it. Use this alternative if the Distributed table “looks at” replicated tables. In other words, if the table where data will be written is going to replicate them itself. - -If it is set to ‘false’ (the default), data is written to all replicas. In essence, this means that the Distributed table replicates data itself. This is worse than using replicated tables, because the consistency of replicas is not checked, and over time they will contain slightly different data. - -To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from ‘prev\_weight’ to ‘prev\_weights + weight’, where ‘prev\_weights’ is the total weight of the shards with the smallest number, and ‘weight’ is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19). - -The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression ‘rand()’ for random distribution of data, or ‘UserID’ for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running IN and JOIN by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function: intHash64(UserID). - -A simple reminder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables. - -SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don’t have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. - -You should be concerned about the sharding scheme in the following cases: - -- Queries are used that require joining data (IN or JOIN) by a specific key. If data is sharded by this key, you can use local IN or JOIN instead of GLOBAL IN or GLOBAL JOIN, which is much more efficient. -- A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries. - -Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The period for sending data is managed by the [distributed\_directory\_monitor\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed\_directory\_monitor\_batch\_inserts](../settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. - -If the server ceased to exist or had a rough restart (for example, after a device failure) after an INSERT to a Distributed table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the ‘broken’ subdirectory and no longer used. - -When the max\_parallel\_replicas option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max\_parallel\_replicas](../settings/settings.md#settings-max_parallel_replicas). - -## Virtual Columns {#virtual-columns} - -- `_shard_num` — Contains the `shard_num` (from `system.clusters`). Type: [UInt32](../../data_types/int_uint.md). - -!!! note "Note" - Since [`remote`](../../query_language/table_functions/remote.md)/`cluster` table functions internally create temporary instance of the same Distributed engine, `_shard_num` is available there too. - -**See Also** - -- [Virtual columns](index.md#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/fa/operations/table_engines/external_data.md b/docs/fa/operations/table_engines/external_data.md deleted file mode 100644 index 1c675b92016..00000000000 --- a/docs/fa/operations/table_engines/external_data.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -en_copy: true ---- - -# External Data for Query Processing {#external-data-for-query-processing} - -ClickHouse allows sending a server the data that is needed for processing a query, together with a SELECT query. This data is put in a temporary table (see the section “Temporary tables”) and can be used in the query (for example, in IN operators). - -For example, if you have a text file with important user identifiers, you can upload it to the server along with a query that uses filtration by this list. - -If you need to run more than one query with a large volume of external data, don’t use this feature. It is better to upload the data to the DB ahead of time. - -External data can be uploaded using the command-line client (in non-interactive mode), or using the HTTP interface. - -In the command-line client, you can specify a parameters section in the format - -``` bash ---external --file=... [--name=...] [--format=...] [--types=...|--structure=...] -``` - -You may have multiple sections like this, for the number of tables being transmitted. - -**–external** – Marks the beginning of a clause. -**–file** – Path to the file with the table dump, or -, which refers to stdin. -Only a single table can be retrieved from stdin. - -The following parameters are optional: **–name**– Name of the table. If omitted, \_data is used. -**–format** – Data format in the file. If omitted, TabSeparated is used. - -One of the following parameters is required:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named \_1, \_2, … -**–structure**– The table structure in the format`UserID UInt64`, `URL String`. Defines the column names and types. - -The files specified in ‘file’ will be parsed by the format specified in ‘format’, using the data types specified in ‘types’ or ‘structure’. The table will be uploaded to the server and accessible there as a temporary table with the name in ‘name’. - -Examples: - -``` bash -$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 -849897 -$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -When using the HTTP interface, external data is passed in the multipart/form-data format. Each table is transmitted as a separate file. The table name is taken from the file name. The ‘query\_string’ is passed the parameters ‘name\_format’, ‘name\_types’, and ‘name\_structure’, where ‘name’ is the name of the table that these parameters correspond to. The meaning of the parameters is the same as when using the command-line client. - -Example: - -``` bash -$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv - -$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' -/bin/sh 20 -/bin/false 5 -/bin/bash 4 -/usr/sbin/nologin 1 -/bin/sync 1 -``` - -For distributed query processing, the temporary tables are sent to all the remote servers. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/fa/operations/table_engines/file.md b/docs/fa/operations/table_engines/file.md deleted file mode 100644 index 6794c5a9d8f..00000000000 --- a/docs/fa/operations/table_engines/file.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -en_copy: true ---- - -# File {#table_engines-file} - -The File table engine keeps the data in a file in one of the supported [file -formats](../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). - -Usage examples: - -- Data export from ClickHouse to file. -- Convert data from one format to another. -- Updating data in ClickHouse via editing a file on a disk. - -## Usage in ClickHouse Server {#usage-in-clickhouse-server} - -``` sql -File(Format) -``` - -The `Format` parameter specifies one of the available file formats. To perform -`SELECT` queries, the format must be supported for input, and to perform -`INSERT` queries – for output. The available formats are listed in the -[Formats](../../interfaces/formats.md#formats) section. - -ClickHouse does not allow to specify filesystem path for`File`. It will use folder defined by [path](../server_settings/settings.md) setting in server configuration. - -When creating table using `File(Format)` it creates empty subdirectory in that folder. When data is written to that table, it’s put into `data.Format` file in that subdirectory. - -You may manually create this subfolder and file in server filesystem and then [ATTACH](../../query_language/misc.md) it to table information with matching name, so you can query data from that file. - -!!! warning "Warning" - Be careful with this functionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined. - -**Example:** - -**1.** Set up the `file_engine_table` table: - -``` sql -CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) -``` - -By default ClickHouse will create folder `/var/lib/clickhouse/data/default/file_engine_table`. - -**2.** Manually create `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` containing: - -``` bash -$ cat data.TabSeparated -one 1 -two 2 -``` - -**3.** Query the data: - -``` sql -SELECT * FROM file_engine_table -``` - -``` text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## Usage in Clickhouse-local {#usage-in-clickhouse-local} - -In [clickhouse-local](../utils/clickhouse-local.md) File engine accepts file path in addition to `Format`. Default input/output streams can be specified using numeric or human-readable names like `0` or `stdin`, `1` or `stdout`. -**Example:** - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -``` - -## Details of Implementation {#details-of-implementation} - -- Multiple `SELECT` queries can be performed concurrently, but `INSERT` queries will wait each other. -- Supported creating new file by `INSERT` query. -- If file exists, `INSERT` would append new values in it. -- Not supported: - - `ALTER` - - `SELECT ... SAMPLE` - - Indices - - Replication - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/fa/operations/table_engines/generate.md b/docs/fa/operations/table_engines/generate.md deleted file mode 100644 index 051369d2e1c..00000000000 --- a/docs/fa/operations/table_engines/generate.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -en_copy: true ---- - -# GenerateRandom {#table_engines-generate} - -The GenerateRandom table engine produces random data for given table schema. - -Usage examples: - -- Use in test to populate reproducible large table. -- Generate random input for fuzzing tests. - -## Usage in ClickHouse Server {#usage-in-clickhouse-server} - -``` sql -ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) -``` - -The `max_array_length` and `max_string_length` parameters specify maximum length of all -array columns and strings correspondingly in generated data. - -Generate table engine supports only `SELECT` queries. - -It supports all [DataTypes](../../data_types/index.md) that can be stored in a table except `LowCardinality` and `AggregateFunction`. - -**Example:** - -**1.** Set up the `generate_engine_table` table: - -``` sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Query the data: - -``` sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -``` text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## Details of Implementation {#details-of-implementation} - -- Not supported: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Indices - - Replication - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/fa/operations/table_engines/graphitemergetree.md b/docs/fa/operations/table_engines/graphitemergetree.md deleted file mode 100644 index 6916441acd0..00000000000 --- a/docs/fa/operations/table_engines/graphitemergetree.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -en_copy: true ---- - -# GraphiteMergeTree {#graphitemergetree} - -This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. - -You can use any ClickHouse table engine to store the Graphite data if you don’t need rollup, but if you need a rollup use `GraphiteMergeTree`. The engine reduces the volume of storage and increases the efficiency of queries from Graphite. - -The engine inherits properties from [MergeTree](mergetree.md). - -## Creating a Table {#creating-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - Path String, - Time DateTime, - Value , - Version - ... -) ENGINE = GraphiteMergeTree(config_section) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -A table for the Graphite data should have the following columns for the following data: - -- Metric name (Graphite sensor). Data type: `String`. - -- Time of measuring the metric. Data type: `DateTime`. - -- Value of the metric. Data type: any numeric. - -- Version of the metric. Data type: any numeric. - - ClickHouse saves the rows with the highest version or the last written if versions are the same. Other rows are deleted during the merge of data parts. - -The names of these columns should be set in the rollup configuration. - -**GraphiteMergeTree parameters** - -- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. - -**Query clauses** - -When creating a `GraphiteMergeTree` table, the same [clauses](mergetree.md#table_engine-mergetree-creating-a-table) are required, as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - EventDate Date, - Path String, - Time DateTime, - Value , - Version - ... -) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) -``` - -All of the parameters excepting `config_section` have the same meaning as in `MergeTree`. - -- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. - -
    - -## Rollup configuration {#rollup-configuration} - -The settings for rollup are defined by the [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) parameter in the server configuration. The name of the parameter could be any. You can create several configurations and use them for different tables. - -Rollup configuration structure: - - required-columns - patterns - -### Required Columns {#required-columns} - -- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. -- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. -- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Default value: `Value`. -- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. - -### Patterns {#patterns} - -Structure of the `patterns` section: - -``` text -pattern - regexp - function -pattern - regexp - age + precision - ... -pattern - regexp - function - age + precision - ... -pattern - ... -default - function - age + precision - ... -``` - -!!! warning "Attention" - Patterns must be strictly ordered: - - 1. Patterns without `function` or `retention`. - 1. Patterns with both `function` and `retention`. - 1. Pattern `default`. - -When processing a row, ClickHouse checks the rules in the `pattern` sections. Each of `pattern` (including `default`) sections can contain `function` parameter for aggregation, `retention` parameters or both. If the metric name matches the `regexp`, the rules from the `pattern` section (or sections) are applied; otherwise, the rules from the `default` section are used. - -Fields for `pattern` and `default` sections: - -- `regexp`– A pattern for the metric name. -- `age` – The minimum age of the data in seconds. -- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). -- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. - -### Configuration Example {#configuration-example} - -``` xml - - Version - - click_cost - any - - 0 - 5 - - - 86400 - 60 - - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/fa/operations/table_engines/hdfs.md b/docs/fa/operations/table_engines/hdfs.md deleted file mode 100644 index 07bd0800aa5..00000000000 --- a/docs/fa/operations/table_engines/hdfs.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -en_copy: true ---- - -# HDFS {#table_engines-hdfs} - -This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. This engine is similar -to the [File](file.md) and [URL](url.md) engines, but provides Hadoop-specific features. - -## Usage {#usage} - -``` sql -ENGINE = HDFS(URI, format) -``` - -The `URI` parameter is the whole file URI in HDFS. -The `format` parameter specifies one of the available file formats. To perform -`SELECT` queries, the format must be supported for input, and to perform -`INSERT` queries – for output. The available formats are listed in the -[Formats](../../interfaces/formats.md#formats) section. -The path part of `URI` may contain globs. In this case the table would be readonly. - -**Example:** - -**1.** Set up the `hdfs_engine_table` table: - -``` sql -CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') -``` - -**2.** Fill file: - -``` sql -INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) -``` - -**3.** Query the data: - -``` sql -SELECT * FROM hdfs_engine_table LIMIT 2 -``` - -``` text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## Implementation Details {#implementation-details} - -- Reads and writes can be parallel -- Not supported: - - `ALTER` and `SELECT...SAMPLE` operations. - - Indexes. - - Replication. - -**Globs in path** - -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern. Listing of files determines during `SELECT` (not at `CREATE` moment). - -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Constructions with `{}` are similar to the [remote](../../query_language/table_functions/remote.md) table function. - -**Example** - -1. Suppose we have several files in TSV format with the following URIs on HDFS: - -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ - -1. There are several ways to make a table consisting of all six files: - - - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') -``` - -Another way: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') -``` - -Table consists of all the files in both directories (all files should satisfy format and schema described in query): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') -``` - -!!! warning "Warning" - If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -**Example** - -Create table with files named `file000`, `file001`, … , `file999`: - -``` sql -CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') -``` - -## Virtual Columns {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**See Also** - -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/fa/operations/table_engines/index.md b/docs/fa/operations/table_engines/index.md deleted file mode 100644 index 4ffff8f3bb3..00000000000 --- a/docs/fa/operations/table_engines/index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -en_copy: true ---- - -# Table engines {#table_engines} - -The table engine (type of table) determines: - -- How and where data is stored, where to write it to, and where to read it from. -- Which queries are supported, and how. -- Concurrent data access. -- Use of indexes, if present. -- Whether multithreaded request execution is possible. -- Data replication parameters. - -## Engine Families {#engine-families} - -### MergeTree {#mergetree} - -The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](replication.md) versions of engines), partitioning, and other features not supported in other engines. - -Engines in the family: - -- [MergeTree](mergetree.md) -- [ReplacingMergeTree](replacingmergetree.md) -- [SummingMergeTree](summingmergetree.md) -- [AggregatingMergeTree](aggregatingmergetree.md) -- [CollapsingMergeTree](collapsingmergetree.md) -- [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -- [GraphiteMergeTree](graphitemergetree.md) - -### Log {#log} - -Lightweight [engines](log_family.md) with minimum functionality. They’re the most effective when you need to quickly write many small tables (up to approximately 1 million rows) and read them later as a whole. - -Engines in the family: - -- [TinyLog](tinylog.md) -- [StripeLog](stripelog.md) -- [Log](log.md) - -### Integration engines {#integration-engines} - -Engines for communicating with other data storage and processing systems. - -Engines in the family: - -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) -- [HDFS](hdfs.md) - -### Special engines {#special-engines} - -Engines in the family: - -- [Distributed](distributed.md) -- [MaterializedView](materializedview.md) -- [Dictionary](dictionary.md) -- [Merge](merge.md) -- [File](file.md) -- [Null](null.md) -- [Set](set.md) -- [Join](join.md) -- [URL](url.md) -- [View](view.md) -- [Memory](memory.md) -- [Buffer](buffer.md) - -## Virtual columns {#table_engines-virtual-columns} - -Virtual column is an integral table engine attribute that is defined in the engine source code. - -You shouldn’t specify virtual columns in the `CREATE TABLE` query and you can’t see them in `SHOW CREATE TABLE` and `DESCRIBE TABLE` query results. Virtual columns are also read-only, so you can’t insert data into virtual columns. - -To select data from a virtual column, you must specify its name in the `SELECT` query. `SELECT *` doesn’t return values from virtual columns. - -If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We don’t recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/fa/operations/table_engines/jdbc.md b/docs/fa/operations/table_engines/jdbc.md deleted file mode 100644 index 576c7182907..00000000000 --- a/docs/fa/operations/table_engines/jdbc.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -en_copy: true ---- - -# JDBC {#table-engine-jdbc} - -Allows ClickHouse to connect to external databases via [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). - -To implement the JDBC connection, ClickHouse uses the separate program [clickhouse-jdbc-bridge](https://github.com/alex-krash/clickhouse-jdbc-bridge) that should run as a daemon. - -This engine supports the [Nullable](../../data_types/nullable.md) data type. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name -( - columns list... -) -ENGINE = JDBC(dbms_uri, external_database, external_table) -``` - -**Engine Parameters** - -- `dbms_uri` — URI of an external DBMS. - - Format: `jdbc:://:/?user=&password=`. - Example for MySQL: `jdbc:mysql://localhost:3306/?user=root&password=root`. - -- `external_database` — Database in an external DBMS. - -- `external_table` — Name of the table in `external_database`. - -## Usage Example {#usage-example} - -Creating a table in MySQL server by connecting directly with it’s console client: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Creating a table in ClickHouse server and selecting data from it: - -``` sql -CREATE TABLE jdbc_table -( - `int_id` Int32, - `int_nullable` Nullable(Int32), - `float` Float32, - `float_nullable` Nullable(Float32) -) -ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') -``` - -``` sql -SELECT * -FROM jdbc_table -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## See Also {#see-also} - -- [JDBC table function](../../query_language/table_functions/jdbc.md). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/fa/operations/table_engines/join.md b/docs/fa/operations/table_engines/join.md deleted file mode 100644 index 303fe5a8cc2..00000000000 --- a/docs/fa/operations/table_engines/join.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -en_copy: true ---- - -# Join {#join} - -Prepared data structure for using in [JOIN](../../query_language/select.md#select-join) operations. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], -) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) -``` - -See the detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -**Engine Parameters** - -- `join_strictness` – [JOIN strictness](../../query_language/select.md#select-join-strictness). -- `join_type` – [JOIN type](../../query_language/select.md#select-join-types). -- `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with. - -Enter `join_strictness` and `join_type` parameters without quotes, for example, `Join(ANY, LEFT, col1)`. They must match the `JOIN` operation that the table will be used for. If the parameters don’t match, ClickHouse doesn’t throw an exception and may return incorrect data. - -## Table Usage {#table-usage} - -### Example {#example} - -Creating the left-side table: - -``` sql -CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog -``` - -``` sql -INSERT INTO id_val VALUES (1,11)(2,12)(3,13) -``` - -Creating the right-side `Join` table: - -``` sql -CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id) -``` - -``` sql -INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23) -``` - -Joining the tables: - -``` sql -SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1 -``` - -``` text -┌─id─┬─val─┬─id_val_join.val─┐ -│ 1 │ 11 │ 21 │ -│ 2 │ 12 │ ᴺᵁᴸᴸ │ -│ 3 │ 13 │ 23 │ -└────┴─────┴─────────────────┘ -``` - -As an alternative, you can retrieve data from the `Join` table, specifying the join key value: - -``` sql -SELECT joinGet('id_val_join', 'val', toUInt32(1)) -``` - -``` text -┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ -│ 21 │ -└────────────────────────────────────────────┘ -``` - -### Selecting and Inserting Data {#selecting-and-inserting-data} - -You can use `INSERT` queries to add data to the `Join`-engine tables. If the table was created with the `ANY` strictness, data for duplicate keys are ignored. With the `ALL` strictness, all rows are added. - -You cannot perform a `SELECT` query directly from the table. Instead, use one of the following methods: - -- Place the table to the right side in a `JOIN` clause. -- Call the [joinGet](../../query_language/functions/other_functions.md#joinget) function, which lets you extract data from the table the same way as from a dictionary. - -### Limitations and Settings {#join-limitations-and-settings} - -When creating a table, the following settings are applied: - -- [join\_use\_nulls](../settings/settings.md#join_use_nulls) -- [max\_rows\_in\_join](../settings/query_complexity.md#settings-max_rows_in_join) -- [max\_bytes\_in\_join](../settings/query_complexity.md#settings-max_bytes_in_join) -- [join\_overflow\_mode](../settings/query_complexity.md#settings-join_overflow_mode) -- [join\_any\_take\_last\_row](../settings/settings.md#settings-join_any_take_last_row) - -The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations. - -The `Join`-engine allows use [join\_use\_nulls](../settings/settings.md#join_use_nulls) setting in the `CREATE TABLE` statement. And [SELECT](../../query_language/select.md) query allows use `join_use_nulls` too. If you have different `join_use_nulls` settings, you can get an error joining table. It depends on kind of JOIN. When you use [joinGet](../../query_language/functions/other_functions.md#joinget) function, you have to use the same `join_use_nulls` setting in `CRATE TABLE` and `SELECT` statements. - -## Data Storage {#data-storage} - -`Join` table data is always located in the RAM. When inserting rows into a table, ClickHouse writes data blocks to the directory on the disk so that they can be restored when the server restarts. - -If the server restarts incorrectly, the data block on the disk might get lost or damaged. In this case, you may need to manually delete the file with damaged data. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/fa/operations/table_engines/kafka.md b/docs/fa/operations/table_engines/kafka.md deleted file mode 100644 index b182d97585d..00000000000 --- a/docs/fa/operations/table_engines/kafka.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -en_copy: true ---- - -# Kafka {#kafka} - -This engine works with [Apache Kafka](http://kafka.apache.org/). - -Kafka lets you: - -- Publish or subscribe to data flows. -- Organize fault-tolerant storage. -- Process streams as they become available. - -## Creating a Table {#table_engine-kafka-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = Kafka() -SETTINGS - kafka_broker_list = 'host:port', - kafka_topic_list = 'topic1,topic2,...', - kafka_group_name = 'group_name', - kafka_format = 'data_format'[,] - [kafka_row_delimiter = 'delimiter_symbol',] - [kafka_schema = '',] - [kafka_num_consumers = N,] - [kafka_skip_broken_messages = N] -``` - -Required parameters: - -- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). -- `kafka_topic_list` – A list of Kafka topics. -- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don’t want messages to be duplicated in the cluster, use the same group name everywhere. -- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` function, such as `JSONEachRow`. For more information, see the [Formats](../../interfaces/formats.md) section. - -Optional parameters: - -- `kafka_row_delimiter` – Delimiter character, which ends the message. -- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. -- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. -- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). - -Examples: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - SELECT * FROM queue LIMIT 5; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', - kafka_topic_list = 'topic', - kafka_group_name = 'group1', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; - - CREATE TABLE queue2 ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') - SETTINGS kafka_format = 'JSONEachRow', - kafka_num_consumers = 4; -``` - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects. If possible, switch old projects to the method described above. - -``` sql -Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format - [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) -``` - -
    - -## Description {#description} - -The delivered messages are tracked automatically, so each message in a group is only counted once. If you want to get the data twice, then create a copy of the table with another group name. - -Groups are flexible and synced on the cluster. For instance, if you have 10 topics and 5 copies of a table in a cluster, then each copy gets 2 topics. If the number of copies changes, the topics are redistributed across the copies automatically. Read more about this at http://kafka.apache.org/intro. - -`SELECT` is not particularly useful for reading messages (except for debugging), because each message can be read only once. It is more practical to create real-time threads using materialized views. To do this: - -1. Use the engine to create a Kafka consumer and consider it a data stream. -2. Create a table with the desired structure. -3. Create a materialized view that converts data from the engine and puts it into a previously created table. - -When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background. This allows you to continually receive messages from Kafka and convert them to the required format using `SELECT`. -One kafka table can have as many materialized views as you like, they do not read data from the kafka table directly, but receive new records (in blocks), this way you can write to several tables with different detail level (with grouping - aggregation and without). - -Example: - -``` sql - CREATE TABLE queue ( - timestamp UInt64, - level String, - message String - ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); - - CREATE TABLE daily ( - day Date, - level String, - total UInt64 - ) ENGINE = SummingMergeTree(day, (day, level), 8192); - - CREATE MATERIALIZED VIEW consumer TO daily - AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total - FROM queue GROUP BY day, level; - - SELECT level, sum(total) FROM daily GROUP BY level; -``` - -To improve performance, received messages are grouped into blocks the size of [max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream\_flush\_interval\_ms](../settings/settings.md) milliseconds, the data will be flushed to the table regardless of the completeness of the block. - -To stop receiving topic data or to change the conversion logic, detach the materialized view: - -``` sql - DETACH TABLE consumer; - ATTACH MATERIALIZED VIEW consumer; -``` - -If you want to change the target table by using `ALTER`, we recommend disabling the material view to avoid discrepancies between the target table and the data from the view. - -## Configuration {#configuration} - -Similar to GraphiteMergeTree, the Kafka engine supports extended configuration using the ClickHouse config file. There are two configuration keys that you can use: global (`kafka`) and topic-level (`kafka_*`). The global configuration is applied first, and then the topic-level configuration is applied (if it exists). - -``` xml - - - cgrp - smallest - - - - - 250 - 100000 - -``` - -For a list of possible configuration options, see the [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Use the underscore (`_`) instead of a dot in the ClickHouse configuration. For example, `check.crcs=true` will be `true`. - -## Virtual Columns {#virtual-columns} - -- `_topic` — Kafka topic. -- `_key` — Key of the message. -- `_offset` — Offset of the message. -- `_timestamp` — Timestamp of the message. -- `_partition` — Partition of Kafka topic. - -**See Also** - -- [Virtual columns](index.md#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/fa/operations/table_engines/log.md b/docs/fa/operations/table_engines/log.md deleted file mode 100644 index b0817167d68..00000000000 --- a/docs/fa/operations/table_engines/log.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -en_copy: true ---- - -# Log {#log} - -Engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](log_family.md) article. - -Log differs from [TinyLog](tinylog.md) in that a small file of “marks” resides with the column files. These marks are written on every data block and contain offsets that indicate where to start reading the file in order to skip the specified number of rows. This makes it possible to read table data in multiple threads. -For concurrent data access, the read operations can be performed simultaneously, while write operations block reads and each other. -The Log engine does not support indexes. Similarly, if writing to a table failed, the table is broken, and reading from it returns an error. The Log engine is appropriate for temporary data, write-once tables, and for testing or demonstration purposes. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/fa/operations/table_engines/log_family.md b/docs/fa/operations/table_engines/log_family.md deleted file mode 100644 index ecf535b5392..00000000000 --- a/docs/fa/operations/table_engines/log_family.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -en_copy: true ---- - -# Log Engine Family {#log-engine-family} - -These engines were developed for scenarios when you need to quickly write many small tables (up to about 1 million rows) and read them later as a whole. - -Engines of the family: - -- [StripeLog](stripelog.md) -- [Log](log.md) -- [TinyLog](tinylog.md) - -## Common properties {#common-properties} - -Engines: - -- Store data on a disk. - -- Append data to the end of file when writing. - -- Support locks for concurrent data access. - - During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently. - -- Do not support [mutation](../../query_language/alter.md#alter-mutations) operations. - -- Do not support indexes. - - This means that `SELECT` queries for ranges of data are not efficient. - -- Do not write data atomically. - - You can get a table with corrupted data if something breaks the write operation, for example, abnormal server shutdown. - -## Differences {#differences} - -The `TinyLog` engine is the simplest in the family and provides the poorest functionality and lowest efficiency. The `TinyLog` engine doesn’t support parallel data reading by several threads. It reads data slower than other engines in the family that support parallel reading and it uses almost as many descriptors as the `Log` engine because it stores each column in a separate file. Use it in simple low-load scenarios. - -The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer descriptors in the operating system, but the `Log` engine provides higher efficiency when reading data. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/fa/operations/table_engines/materializedview.md b/docs/fa/operations/table_engines/materializedview.md deleted file mode 100644 index 6219a84e4d7..00000000000 --- a/docs/fa/operations/table_engines/materializedview.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -en_copy: true ---- - -# MaterializedView {#materializedview} - -Used for implementing materialized views (for more information, see [CREATE TABLE](../../query_language/create.md)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses this engine. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fa/operations/table_engines/memory.md b/docs/fa/operations/table_engines/memory.md deleted file mode 100644 index a7f79421fa1..00000000000 --- a/docs/fa/operations/table_engines/memory.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -en_copy: true ---- - -# Memory {#memory} - -The Memory engine stores data in RAM, in uncompressed form. Data is stored in exactly the same form as it is received when read. In other words, reading from this table is completely free. -Concurrent data access is synchronized. Locks are short: read and write operations don’t block each other. -Indexes are not supported. Reading is parallelized. -Maximal productivity (over 10 GB/sec) is reached on simple queries, because there is no reading from the disk, decompressing, or deserializing data. (We should note that in many cases, the productivity of the MergeTree engine is almost as high.) -When restarting a server, data disappears from the table and the table becomes empty. -Normally, using this table engine is not justified. However, it can be used for tests, and for tasks where maximum speed is required on a relatively small number of rows (up to approximately 100,000,000). - -The Memory engine is used by the system for temporary tables with external query data (see the section “External data for processing a query”), and for implementing GLOBAL IN (see the section “IN operators”). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/fa/operations/table_engines/merge.md b/docs/fa/operations/table_engines/merge.md deleted file mode 100644 index 5b8ccefcfbd..00000000000 --- a/docs/fa/operations/table_engines/merge.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -en_copy: true ---- - -# Merge {#merge} - -The `Merge` engine (not to be confused with `MergeTree`) does not store data itself, but allows reading from any number of other tables simultaneously. -Reading is automatically parallelized. Writing to a table is not supported. When reading, the indexes of tables that are actually being read are used, if they exist. -The `Merge` engine accepts parameters: the database name and a regular expression for tables. - -Example: - -``` sql -Merge(hits, '^WatchLog') -``` - -Data will be read from the tables in the `hits` database that have names that match the regular expression ‘`^WatchLog`’. - -Instead of the database name, you can use a constant expression that returns a string. For example, `currentDatabase()`. - -Regular expressions — [re2](https://github.com/google/re2) (supports a subset of PCRE), case-sensitive. -See the notes about escaping symbols in regular expressions in the “match” section. - -When selecting tables to read, the `Merge` table itself will not be selected, even if it matches the regex. This is to avoid loops. -It is possible to create two `Merge` tables that will endlessly try to read each others’ data, but this is not a good idea. - -The typical way to use the `Merge` engine is for working with a large number of `TinyLog` tables as if with a single table. - -Example 2: - -Let’s say you have a old table (WatchLog\_old) and decided to change partitioning without moving data to a new table (WatchLog\_new) and you need to see data from both tables. - -``` sql -CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) -ENGINE=MergeTree(date, (UserId, EventType), 8192); -INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); - -CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) -ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; -INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); - -CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); - -SELECT * -FROM WatchLog -``` - -``` text -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-01 │ 1 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ -│ 2018-01-02 │ 2 │ hit │ 3 │ -└────────────┴────────┴───────────┴─────┘ -``` - -## Virtual Columns {#virtual-columns} - -- `_table` — Contains the name of the table from which data was read. Type: [String](../../data_types/string.md). - - You can set the constant conditions on `_table` in the `WHERE/PREWHERE` clause (for example, `WHERE _table='xyz'`). In this case the read operation is performed only for that tables where the condition on `_table` is satisfied, so the `_table` column acts as an index. - -**See Also** - -- [Virtual columns](index.md#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/fa/operations/table_engines/mergetree.md b/docs/fa/operations/table_engines/mergetree.md deleted file mode 100644 index d5d13fd8177..00000000000 --- a/docs/fa/operations/table_engines/mergetree.md +++ /dev/null @@ -1,653 +0,0 @@ ---- -en_copy: true ---- - -# MergeTree {#table_engines-mergetree} - -The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines. - -Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert. - -Main features: - -- Stores data sorted by primary key. - - This allows you to create a small sparse index that helps find data faster. - -- Partitions can be used if the [partitioning key](custom_partitioning_key.md) is specified. - - ClickHouse supports certain operations with partitions that are more effective than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. This also improves query performance. - -- Data replication support. - - The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](replication.md). - -- Data sampling support. - - If necessary, you can set the data sampling method in the table. - -!!! info "Info" - The [Merge](merge.md) engine does not belong to the `*MergeTree` family. - -## Creating a Table {#table_engine-mergetree-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... - INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, - INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 -) ENGINE = MergeTree() -[PARTITION BY expr] -[ORDER BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] -[SETTINGS name=value, ...] -``` - -For a description of parameters, see the [CREATE query description](../../query_language/create.md). - -!!! note "Note" - `INDEX` is an experimental feature, see [Data Skipping Indexes](#table_engine-mergetree-data_skipping-indexes). - -### Query Clauses {#mergetree-query-clauses} - -- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. The `MergeTree` engine does not have parameters. - -- `PARTITION BY` — The [partitioning key](custom_partitioning_key.md). - - For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](../../data_types/date.md). The partition names here have the `"YYYYMM"` format. - -- `ORDER BY` — The sorting key. - - A tuple of columns or arbitrary expressions. Example: `ORDER BY (CounterID, EventDate)`. - -- `PRIMARY KEY` — The primary key if it [differs from the sorting key](mergetree.md). - - By default the primary key is the same as the sorting key (which is specified by the `ORDER BY` clause). Thus in most cases it is unnecessary to specify a separate `PRIMARY KEY` clause. - -- `SAMPLE BY` — An expression for sampling. - - If a sampling expression is used, the primary key must contain it. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. - -- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). - - Expression must have one `Date` or `DateTime` column as a result. Example: - `TTL date + INTERVAL 1 DAY` - - Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`). Default type of the rule is removal (`DELETE`). List of multiple rules can specified, but there should be no more than one `DELETE` rule. - - For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl) - -- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: - - - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Data Storage](#mergetree-data-storage). - - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Data Storage](#mergetree-data-storage). - - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries. - - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”. - - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes. - - - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). - - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don’t turn it off. - - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. - - `storage_policy` — Storage policy. See [Using Multiple Block Devices for Data Storage](#table_engine-mergetree-multiple-volumes). - -**Example of Sections Setting** - -``` sql -ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 -``` - -In the example, we set partitioning by month. - -We also set an expression for sampling as a hash by the user ID. This allows you to pseudorandomize the data in the table for each `CounterID` and `EventDate`. If you define a [SAMPLE](../../query_language/select.md#select-sample-clause) clause when selecting the data, ClickHouse will return an evenly pseudorandom data sample for a subset of users. - -The `index_granularity` setting can be omitted because 8192 is the default value. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects. If possible, switch old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -**MergeTree() Parameters** - -- `date-column` — The name of a column of the [Date](../../data_types/date.md) type. ClickHouse automatically creates partitions by month based on this column. The partition names are in the `"YYYYMM"` format. -- `sampling_expression` — An expression for sampling. -- `(primary, key)` — Primary key. Type: [Tuple()](../../data_types/tuple.md) -- `index_granularity` — The granularity of an index. The number of data rows between the “marks” of an index. The value 8192 is appropriate for most tasks. - -**Example** - -``` sql -MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) -``` - -The `MergeTree` engine is configured in the same way as in the example above for the main engine configuration method. -
    - -## Data Storage {#mergetree-data-storage} - -A table consists of data parts sorted by primary key. - -When data is inserted in a table, separate data parts are created and each of them is lexicographically sorted by primary key. For example, if the primary key is `(CounterID, Date)`, the data in the part is sorted by `CounterID`, and within each `CounterID`, it is ordered by `Date`. - -Data belonging to different partitions are separated into different parts. In the background, ClickHouse merges data parts for more efficient storage. Parts belonging to different partitions are not merged. The merge mechanism does not guarantee that all rows with the same primary key will be in the same data part. - -Each data part is logically divided into granules. A granule is the smallest indivisible data set that ClickHouse reads when selecting data. ClickHouse doesn’t split rows or values, so each granule always contains an integer number of rows. The first row of a granule is marked with the value of the primary key for the row. For each data part, ClickHouse creates an index file that stores the marks. For each column, whether it’s in the primary key or not, ClickHouse also stores the same marks. These marks let you find data directly in column files. - -The granule size is restricted by the `index_granularity` and `index_granularity_bytes` settings of the table engine. The number of rows in a granule lays in the `[1, index_granularity]` range, depending on the size of the rows. The size of a granule can exceed `index_granularity_bytes` if the size of a single row is greater than the value of the setting. In this case, the size of the granule equals the size of the row. - -## Primary Keys and Indexes in Queries {#primary-keys-and-indexes-in-queries} - -Take the `(CounterID, Date)` primary key as an example. In this case, the sorting and index can be illustrated as follows: - - Whole data: [-------------------------------------------------------------------------] - CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] - Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] - Marks: | | | | | | | | | | | - a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 - Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 - -If the data query specifies: - -- `CounterID in ('a', 'h')`, the server reads the data in the ranges of marks `[0, 3)` and `[6, 8)`. -- `CounterID IN ('a', 'h') AND Date = 3`, the server reads the data in the ranges of marks `[1, 3)` and `[7, 8)`. -- `Date = 3`, the server reads the data in the range of marks `[1, 10]`. - -The examples above show that it is always more effective to use an index than a full scan. - -A sparse index allows extra data to be read. When reading a single range of the primary key, up to `index_granularity * 2` extra rows in each data block can be read. - -Sparse indexes allow you to work with a very large number of table rows, because in most cases, such indexes fit in the computer’s RAM. - -ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key. - -### Selecting the Primary Key {#selecting-the-primary-key} - -The number of columns in the primary key is not explicitly limited. Depending on the data structure, you can include more or fewer columns in the primary key. This may: - -- Improve the performance of an index. - - If the primary key is `(a, b)`, then adding another column `c` will improve the performance if the following conditions are met: - - - There are queries with a condition on column `c`. - - Long data ranges (several times longer than the `index_granularity`) with identical values for `(a, b)` are common. In other words, when adding another column allows you to skip quite long data ranges. - -- Improve data compression. - - ClickHouse sorts data by primary key, so the higher the consistency, the better the compression. - -- Provide additional logic when merging data parts in the [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) and [SummingMergeTree](summingmergetree.md) engines. - - In this case it makes sense to specify the *sorting key* that is different from the primary key. - -A long primary key will negatively affect the insert performance and memory consumption, but extra columns in the primary key do not affect ClickHouse performance during `SELECT` queries. - -### Choosing a Primary Key that Differs from the Sorting Key {#choosing-a-primary-key-that-differs-from-the-sorting-key} - -It is possible to specify a primary key (an expression with values that are written in the index file for each mark) that is different from the sorting key (an expression for sorting the rows in data parts). In this case the primary key expression tuple must be a prefix of the sorting key expression tuple. - -This feature is helpful when using the [SummingMergeTree](summingmergetree.md) and -[AggregatingMergeTree](aggregatingmergetree.md) table engines. In a common case when using these engines, the table has two types of columns: *dimensions* and *measures*. Typical queries aggregate values of measure columns with arbitrary `GROUP BY` and filtering by dimensions. Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns and this list must be frequently updated with newly added dimensions. - -In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple. - -[ALTER](../../query_language/alter.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts don’t need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. - -### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries} - -For `SELECT` queries, ClickHouse analyzes whether an index can be used. An index can be used if the `WHERE/PREWHERE` clause has an expression (as one of the conjunction elements, or entirely) that represents an equality or inequality comparison operation, or if it has `IN` or `LIKE` with a fixed prefix on columns or expressions that are in the primary key or partitioning key, or on certain partially repetitive functions of these columns, or logical relationships of these expressions. - -Thus, it is possible to quickly run queries on one or many ranges of the primary key. In this example, queries will be fast when run for a specific tracking tag, for a specific tag and date range, for a specific tag and date, for multiple tags with a date range, and so on. - -Let’s look at the engine configured as follows: - - ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 - -In this case, in queries: - -``` sql -SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 -SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) -SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) -``` - -ClickHouse will use the primary key index to trim improper data and the monthly partitioning key to trim partitions that are in improper date ranges. - -The queries above show that the index is used even for complex expressions. Reading from the table is organized so that using the index can’t be slower than a full scan. - -In the example below, the index can’t be used. - -``` sql -SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' -``` - -To check whether ClickHouse can use the index when running a query, use the settings [force\_index\_by\_date](../settings/settings.md#settings-force_index_by_date) and [force\_primary\_key](../settings/settings.md). - -The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date. - -### Use of Index for Partially-Monotonic Primary Keys {#use-of-index-for-partially-monotonic-primary-keys} - -Consider, for example, the days of the month. They form a [monotonic sequence](https://en.wikipedia.org/wiki/Monotonic_function) for one month, but not monotonic for more extended periods. This is a partially-monotonic sequence. If a user creates the table with partially-monotonic primary key, ClickHouse creates a sparse index as usual. When a user selects data from this kind of table, ClickHouse analyzes the query conditions. If the user wants to get data between two marks of the index and both these marks fall within one month, ClickHouse can use the index in this particular case because it can calculate the distance between the parameters of a query and index marks. - -ClickHouse cannot use an index if the values of the primary key in the query parameter range don’t represent a monotonic sequence. In this case, ClickHouse uses the full scan method. - -ClickHouse uses this logic not only for days of the month sequences, but for any primary key that represents a partially-monotonic sequence. - -### Data Skipping Indexes (Experimental) {#table_engine-mergetree-data_skipping-indexes} - -The index declaration is in the columns section of the `CREATE` query. - -``` sql -INDEX index_name expr TYPE type(...) GRANULARITY granularity_value -``` - -For tables from the `*MergeTree` family, data skipping indices can be specified. - -These indices aggregate some information about the specified expression on blocks, which consist of `granularity_value` granules (the size of the granule is specified using the `index_granularity` setting in the table engine). Then these aggregates are used in `SELECT` queries for reducing the amount of data to read from the disk by skipping big blocks of data where the `where` query cannot be satisfied. - -**Example** - -``` sql -CREATE TABLE table_name -( - u64 UInt64, - i32 Int32, - s String, - ... - INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, - INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 -) ENGINE = MergeTree() -... -``` - -Indices from the example can be used by ClickHouse to reduce the amount of data to read from disk in the following queries: - -``` sql -SELECT count() FROM table WHERE s < 'z' -SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 -``` - -#### Available Types of Indices {#available-types-of-indices} - -- `minmax` - - Stores extremes of the specified expression (if the expression is `tuple`, then it stores extremes for each element of `tuple`), uses stored info for skipping blocks of data like the primary key. - -- `set(max_rows)` - - Stores unique values of the specified expression (no more than `max_rows` rows, `max_rows=0` means “no limits”). Uses the values to check if the `WHERE` expression is not satisfiable on a block of data. - -- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - - Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with strings. Can be used for optimization of `equals`, `like` and `in` expressions. - - - `n` — ngram size, - - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). - - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. - - `random_seed` — The seed for Bloom filter hash functions. - -- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - - The same as `ngrambf_v1`, but stores tokens instead of ngrams. Tokens are sequences separated by non-alphanumeric characters. - -- `bloom_filter([false_positive])` — Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) for the specified columns. - - The optional `false_positive` parameter is the probability of receiving a false positive response from the filter. Possible values: (0, 1). Default value: 0.025. - - Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. - - The following functions can use it: [equals](../../query_language/functions/comparison_functions.md), [notEquals](../../query_language/functions/comparison_functions.md), [in](../../query_language/functions/in_functions.md), [notIn](../../query_language/functions/in_functions.md), [has](../../query_language/functions/array_functions.md). - - - -``` sql -INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 -INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 -INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 -``` - -#### Functions Support {#functions-support} - -Conditions in the `WHERE` clause contains calls of the functions that operate with columns. If the column is a part of an index, ClickHouse tries to use this index when performing the functions. ClickHouse supports different subsets of functions for using indexes. - -The `set` index can be used with all functions. Function subsets for other indexes are shown in the table below. - -| Function (operator) / Index | primary key | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | -|----------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../query_language/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, \<\>)](../../query_language/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](../../query_language/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [notLike](../../query_language/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [startsWith](../../query_language/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../query_language/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../query_language/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../query_language/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../query_language/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../query_language/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../query_language/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../query_language/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../query_language/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | - -Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization. - -Bloom filters can have false positive matches, so the `ngrambf_v1`, `tokenbf_v1`, and `bloom_filter` indexes can’t be used for optimizing queries where the result of a function is expected to be false, for example: - -- Can be optimized: - - `s LIKE '%test%'` - - `NOT s NOT LIKE '%test%'` - - `s = 1` - - `NOT s != 1` - - `startsWith(s, 'test')` -- Can’t be optimized: - - `NOT s LIKE '%test%'` - - `s NOT LIKE '%test%'` - - `NOT s = 1` - - `s != 1` - - `NOT startsWith(s, 'test')` - -## Concurrent Data Access {#concurrent-data-access} - -For concurrent table access, we use multi-versioning. In other words, when a table is simultaneously read and updated, data is read from a set of parts that is current at the time of the query. There are no lengthy locks. Inserts do not get in the way of read operations. - -Reading from a table is automatically parallelized. - -## TTL for Columns and Tables {#table_engine-mergetree-ttl} - -Determines the lifetime of values. - -The `TTL` clause can be set for the whole table and for each individual column. Table-level TTL can also specify logic of automatic move of data between disks and volumes. - -Expressions must evaluate to [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md) data type. - -Example: - -``` sql -TTL time_column -TTL time_column + interval -``` - -To define `interval`, use [time interval](../../query_language/operators.md#operators-datetime) operators. - -``` sql -TTL date_time + INTERVAL 1 MONTH -TTL date_time + INTERVAL 15 HOUR -``` - -### Column TTL {#mergetree-column-ttl} - -When the values in the column expire, ClickHouse replaces them with the default values for the column data type. If all the column values in the data part expire, ClickHouse deletes this column from the data part in a filesystem. - -The `TTL` clause can’t be used for key columns. - -Examples: - -Creating a table with TTL - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int TTL d + INTERVAL 1 MONTH, - b Int TTL d + INTERVAL 1 MONTH, - c String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d; -``` - -Adding TTL to a column of an existing table - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 DAY; -``` - -Altering TTL of the column - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 MONTH; -``` - -### Table TTL {#mergetree-table-ttl} - -Table can have an expression for removal of expired rows, and multiple expressions for automatic move of parts between [disks or volumes](#table_engine-mergetree-multiple-volumes). When rows in the table expire, ClickHouse deletes all corresponding rows. For parts moving feature, all rows of a part must satisfy the movement expression criteria. - -``` sql -TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... -``` - -Type of TTL rule may follow each TTL expression. It affects an action which is to be done once the expression is satisfied (reaches current time): - -- `DELETE` - delete expired rows (default action); -- `TO DISK 'aaa'` - move part to the disk `aaa`; -- `TO VOLUME 'bbb'` - move part to the disk `bbb`. - -Examples: - -Creating a table with TTL - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH [DELETE], - d + INTERVAL 1 WEEK TO VOLUME 'aaa', - d + INTERVAL 2 WEEK TO DISK 'bbb'; -``` - -Altering TTL of the table - -``` sql -ALTER TABLE example_table - MODIFY TTL d + INTERVAL 1 DAY; -``` - -**Removing Data** - -Data with an expired TTL is removed when ClickHouse merges data parts. - -When ClickHouse see that data is expired, it performs an off-schedule merge. To control the frequency of such merges, you can set [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout). If the value is too low, it will perform many off-schedule merges that may consume a lot of resources. - -If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query before `SELECT`. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) - -## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes} - -### Introduction {#introduction} - -`MergeTree` family table engines can store data on multiple block devices. For example, it can be useful when the data of a certain table are implicitly split into “hot” and “cold”. The most recent data is regularly requested but requires only a small amount of space. On the contrary, the fat-tailed historical data is requested rarely. If several disks are available, the “hot” data may be located on fast disks (for example, NVMe SSDs or in memory), while the “cold” data - on relatively slow ones (for example, HDD). - -Data part is the minimum movable unit for `MergeTree`-engine tables. The data belonging to one part are stored on one disk. Data parts can be moved between disks in the background (according to user settings) as well as by means of the [ALTER](../../query_language/alter.md#alter_move-partition) queries. - -### Terms {#terms} - -- Disk — Block device mounted to the filesystem. -- Default disk — Disk that stores the path specified in the [path](../server_settings/settings.md#server_settings-path) server setting. -- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). -- Storage policy — Set of volumes and the rules for moving data between them. - -The names given to the described entities can be found in the system tables, [system.storage\_policies](../system_tables.md#system_tables-storage_policies) and [system.disks](../system_tables.md#system_tables-disks). To apply one of the configured storage policies for a table, use the `storage_policy` setting of `MergeTree`-engine family tables. - -### Configuration {#table_engine-mergetree-multiple-volumes-configure} - -Disks, volumes and storage policies should be declared inside the `` tag either in the main file `config.xml` or in a distinct file in the `config.d` directory. - -Configuration structure: - -``` xml - - - - /mnt/fast_ssd/clickhouse - - - /mnt/hdd1/clickhouse - 10485760 - - - /mnt/hdd2/clickhouse - 10485760 - - - ... - - - ... - -``` - -Tags: - -- `` — Disk name. Names must be different for all disks. -- `path` — path under which a server will store data (`data` and `shadow` folders), should be terminated with ‘/’. -- `keep_free_space_bytes` — the amount of free disk space to be reserved. - -The order of the disk definition is not important. - -Storage policies configuration markup: - -``` xml - - ... - - - - - disk_name_from_disks_configuration - 1073741824 - - - - - - - 0.2 - - - - - - - - ... - -``` - -Tags: - -- `policy_name_N` — Policy name. Policy names must be unique. -- `volume_name_N` — Volume name. Volume names must be unique. -- `disk` — a disk within a volume. -- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. -- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). - -Cofiguration examples: - -``` xml - - ... - - - - - disk1 - disk2 - - - - - - - - fast_ssd - 1073741824 - - - disk1 - - - 0.2 - - - ... - -``` - -In given example, the `hdd_in_order` policy implements the [round-robin](https://en.wikipedia.org/wiki/Round-robin_scheduling) approach. Thus this policy defines only one volume (`single`), the data parts are stored on all its disks in circular order. Such policy can be quite useful if there are several similar disks are mounted to the system, but RAID is not configured. Keep in mind that each individual disk drive is not reliable and you might want to compensate it with replication factor of 3 or more. - -If there are different kinds of disks available in the system, `moving_from_ssd_to_hdd` policy can be used instead. The volume `hot` consists of an SSD disk (`fast_ssd`), and the maximum size of a part that can be stored on this volume is 1GB. All the parts with the size larger than 1GB will be stored directly on the `cold` volume, which contains an HDD disk `disk1`. -Also, once the disk `fast_ssd` gets filled by more than 80%, data will be transferred to the `disk1` by a background process. - -The order of volume enumeration within a storage policy is important. Once a volume is overfilled, data are moved to the next one. The order of disk enumeration is important as well because data are stored on them in turns. - -When creating a table, one can apply one of the configured storage policies to it: - -``` sql -CREATE TABLE table_with_non_default_policy ( - EventDate Date, - OrderID UInt64, - BannerID UInt64, - SearchPhrase String -) ENGINE = MergeTree -ORDER BY (OrderID, BannerID) -PARTITION BY toYYYYMM(EventDate) -SETTINGS storage_policy = 'moving_from_ssd_to_hdd' -``` - -The `default` storage policy implies using only one volume, which consists of only one disk given in ``. Once a table is created, its storage policy cannot be changed. - -### Details {#details} - -In the case of `MergeTree` tables, data is getting to disk in different ways: - -- As a result of an insert (`INSERT` query). -- During background merges and [mutations](../../query_language/alter.md#alter-mutations). -- When downloading from another replica. -- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](../../query_language/alter.md#alter_freeze-partition). - -In all these cases except for mutations and partition freezing, a part is stored on a volume and a disk according to the given storage policy: - -1. The first volume (in the order of definition) that has enough disk space for storing a part (`unreserved_space > current_part_size`) and allows for storing parts of a given size (`max_data_part_size_bytes > current_part_size`) is chosen. -2. Within this volume, that disk is chosen that follows the one, which was used for storing the previous chunk of data, and that has free space more than the part size (`unreserved_space - keep_free_space_bytes > current_part_size`). - -Under the hood, mutations and partition freezing make use of [hard links](https://en.wikipedia.org/wiki/Hard_link). Hard links between different disks are not supported, therefore in such cases the resulting parts are stored on the same disks as the initial ones. - -In the background, parts are moved between volumes on the basis of the amount of free space (`move_factor` parameter) according to the order the volumes are declared in the configuration file. -Data is never transferred from the last one and into the first one. One may use system tables [system.part\_log](../system_tables.md#system_tables-part-log) (field `type = MOVE_PART`) and [system.parts](../system_tables.md#system_tables-parts) (fields `path` and `disk`) to monitor background moves. Also, the detailed information can be found in server logs. - -User can force moving a part or a partition from one volume to another using the query [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../query_language/alter.md#alter_move-partition), all the restrictions for background operations are taken into account. The query initiates a move on its own and does not wait for background operations to be completed. User will get an error message if not enough free space is available or if any of the required conditions are not met. - -Moving data does not interfere with data replication. Therefore, different storage policies can be specified for the same table on different replicas. - -After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`). -During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space. - -[Original article](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/fa/operations/table_engines/mysql.md b/docs/fa/operations/table_engines/mysql.md deleted file mode 100644 index 8fc3ece097c..00000000000 --- a/docs/fa/operations/table_engines/mysql.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -en_copy: true ---- - -# MySQL {#mysql} - -The MySQL engine allows you to perform `SELECT` queries on data that is stored on a remote MySQL server. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... -) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); -``` - -See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -The table structure can differ from the original MySQL table structure: - -- Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. -- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) values to the ClickHouse data types. - -**Engine Parameters** - -- `host:port` — MySQL server address. - -- `database` — Remote database name. - -- `table` — Remote table name. - -- `user` — MySQL user. - -- `password` — User password. - -- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. If `replace_query=1`, the query is substituted. - -- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query. - - Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. - - To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. - -Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are executed on the MySQL server. - -The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. - -## Usage Example {#usage-example} - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Table in ClickHouse, retrieving data from the MySQL table created above: - -``` sql -CREATE TABLE mysql_table -( - `float_nullable` Nullable(Float32), - `int_id` Int32 -) -ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') -``` - -``` sql -SELECT * FROM mysql_table -``` - -``` text -┌─float_nullable─┬─int_id─┐ -│ ᴺᵁᴸᴸ │ 1 │ -└────────────────┴────────┘ -``` - -## See Also {#see-also} - -- [The ‘mysql’ table function](../../query_language/table_functions/mysql.md) -- [Using MySQL as a source of external dictionary](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/fa/operations/table_engines/null.md b/docs/fa/operations/table_engines/null.md deleted file mode 100644 index 49b1439500f..00000000000 --- a/docs/fa/operations/table_engines/null.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -en_copy: true ---- - -# Null {#null} - -When writing to a Null table, data is ignored. When reading from a Null table, the response is empty. - -However, you can create a materialized view on a Null table. So the data written to the table will end up in the view. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/fa/operations/table_engines/odbc.md b/docs/fa/operations/table_engines/odbc.md deleted file mode 100644 index 69003623e0a..00000000000 --- a/docs/fa/operations/table_engines/odbc.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -en_copy: true ---- - -# ODBC {#table-engine-odbc} - -Allows ClickHouse to connect to external databases via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. - -This engine supports the [Nullable](../../data_types/nullable.md) data type. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1], - name2 [type2], - ... -) -ENGINE = ODBC(connection_settings, external_database, external_table) -``` - -See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -The table structure can differ from the source table structure: - -- Column names should be the same as in the source table, but you can use just some of these columns and in any order. -- Column types may differ from those in the source table. ClickHouse tries to [cast](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) values to the ClickHouse data types. - -**Engine Parameters** - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -## Usage Example {#usage-example} - -**Retrieving data from the local MySQL installation via ODBC** - -This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. - -Ensure that unixODBC and MySQL Connector are installed. - -By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus, you need to create and configure this user in the MySQL server. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -Then configure the connection in `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -You can check the connection using the `isql` utility from the unixODBC installation. - -``` bash -$ isql -v mysqlconn -+---------------------------------------+ -| Connected! | -| | -... -``` - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Table in ClickHouse, retrieving data from the MySQL table: - -``` sql -CREATE TABLE odbc_t -( - `int_id` Int32, - `float_nullable` Nullable(Float32) -) -ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') -``` - -``` sql -SELECT * FROM odbc_t -``` - -``` text -┌─int_id─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└────────┴────────────────┘ -``` - -## See Also {#see-also} - -- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [ODBC table function](../../query_language/table_functions/odbc.md) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/fa/operations/table_engines/replacingmergetree.md b/docs/fa/operations/table_engines/replacingmergetree.md deleted file mode 100644 index 5cd66e51993..00000000000 --- a/docs/fa/operations/table_engines/replacingmergetree.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -en_copy: true ---- - -# ReplacingMergeTree {#replacingmergetree} - -The engine differs from [MergeTree](mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same primary key value (or more accurately, with the same [sorting key](mergetree.md) value). - -Data deduplication occurs only during a merge. Merging occurs in the background at an unknown time, so you can’t plan for it. Some of the data may remain unprocessed. Although you can run an unscheduled merge using the `OPTIMIZE` query, don’t count on using it, because the `OPTIMIZE` query will read and write a large amount of data. - -Thus, `ReplacingMergeTree` is suitable for clearing out duplicate data in the background in order to save space, but it doesn’t guarantee the absence of duplicates. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = ReplacingMergeTree([ver]) -[PARTITION BY expr] -[ORDER BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -For a description of request parameters, see [request description](../../query_language/create.md). - -**ReplacingMergeTree Parameters** - -- `ver` — column with version. Type `UInt*`, `Date` or `DateTime`. Optional parameter. - - When merging, `ReplacingMergeTree` from all the rows with the same primary key leaves only one: - - - Last in the selection, if `ver` not set. - - With the maximum version, if `ver` specified. - -**Query clauses** - -When creating a `ReplacingMergeTree` table the same [clauses](mergetree.md) are required, as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) -``` - -All of the parameters excepting `ver` have the same meaning as in `MergeTree`. - -- `ver` - column with the version. Optional parameter. For a description, see the text above. - -
    - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/fa/operations/table_engines/replication.md b/docs/fa/operations/table_engines/replication.md deleted file mode 100644 index 3172fc52d92..00000000000 --- a/docs/fa/operations/table_engines/replication.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -en_copy: true ---- - -# Data Replication {#table_engines-replication} - -Replication is only supported for tables in the MergeTree family: - -- ReplicatedMergeTree -- ReplicatedSummingMergeTree -- ReplicatedReplacingMergeTree -- ReplicatedAggregatingMergeTree -- ReplicatedCollapsingMergeTree -- ReplicatedVersionedCollapsingMergeTree -- ReplicatedGraphiteMergeTree - -Replication works at the level of an individual table, not the entire server. A server can store both replicated and non-replicated tables at the same time. - -Replication does not depend on sharding. Each shard has its own independent replication. - -Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](../../query_language/alter.md#query_language_queries_alter)). - -`CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated: - -- The `CREATE TABLE` query creates a new replicatable table on the server where the query is run. If this table already exists on other servers, it adds a new replica. -- The `DROP TABLE` query deletes the replica located on the server where the query is run. -- The `RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas. - -ClickHouse uses [Apache ZooKeeper](https://zookeeper.apache.org) for storing replicas meta information. Use ZooKeeper version 3.4.5 or newer. - -To use replication, set parameters in the [zookeeper](../server_settings/settings.md#server-settings_zookeeper) server configuration section. - -!!! attention "Attention" - Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem. - -Example of setting the addresses of the ZooKeeper cluster: - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - - example3 - 2181 - - -``` - -You can specify any existing ZooKeeper cluster and the system will use a directory on it for its own data (the directory is specified when creating a replicatable table). - -If ZooKeeper isn’t set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only. - -ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). - -For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it doesn’t create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data. - -For very large clusters, you can use different ZooKeeper clusters for different shards. However, this hasn’t proven necessary on the Yandex.Metrica cluster (approximately 300 servers). - -Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. - -By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option. - -Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically. - -Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application doesn’t know if the data was written to the DB, so the `INSERT` query can simply be repeated. It doesn’t matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge\_tree](../server_settings/settings.md#server_settings-merge_tree) server settings. - -During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.) - -You can have any number of replicas of the same data. Yandex.Metrica uses double replication in production. Each server uses RAID-5 or RAID-6, and RAID-10 in some cases. This is a relatively reliable and convenient solution. - -The system monitors data synchronicity on replicas and is able to recover after a failure. Failover is automatic (for small differences in data) or semi-automatic (when data differs too much, which may indicate a configuration error). - -## Creating Replicated Tables {#creating-replicated-tables} - -The `Replicated` prefix is added to the table engine name. For example:`ReplicatedMergeTree`. - -**Replicated\*MergeTree parameters** - -- `zoo_path` — The path to the table in ZooKeeper. -- `replica_name` — The replica name in ZooKeeper. - -Example: - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID) -``` - -
    - -Example in deprecated syntax - -``` sql -CREATE TABLE table_name -( - EventDate DateTime, - CounterID UInt32, - UserID UInt32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) -``` - -
    - -As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the ‘macros’ section of the configuration file. Example: - -``` xml - - 05 - 02 - example05-02-1.yandex.ru - -``` - -The path to the table in ZooKeeper should be unique for each replicated table. Tables on different shards should have different paths. -In this case, the path consists of the following parts: - -`/clickhouse/tables/` is the common prefix. We recommend using exactly this one. - -`{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the Yandex.Metrica cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier. - -`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it doesn’t change after a RENAME query. -*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name` - -The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard. - -You can define the parameters explicitly instead of using substitutions. This might be convenient for testing and for configuring small clusters. However, you can’t use distributed DDL queries (`ON CLUSTER`) in this case. - -When working with large clusters, we recommend using substitutions because they reduce the probability of error. - -Run the `CREATE TABLE` query on each replica. This query creates a new replicated table, or adds a new replica to an existing one. - -If you add a new replica after the table already contains some data on other replicas, the data will be copied from the other replicas to the new one after running the query. In other words, the new replica syncs itself with the others. - -To delete a replica, run `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. - -## Recovery After Failures {#recovery-after-failures} - -If ZooKeeper is unavailable when a server starts, replicated tables switch to read-only mode. The system periodically attempts to connect to ZooKeeper. - -If ZooKeeper is unavailable during an `INSERT`, or an error occurs when interacting with ZooKeeper, an exception is thrown. - -After connecting to ZooKeeper, the system checks whether the set of data in the local file system matches the expected set of data (ZooKeeper stores this information). If there are minor inconsistencies, the system resolves them by syncing data with the replicas. - -If the system detects broken data parts (with the wrong size of files) or unrecognized parts (parts written to the file system but not recorded in ZooKeeper), it moves them to the `detached` subdirectory (they are not deleted). Any missing parts are copied from the replicas. - -Note that ClickHouse does not perform any destructive actions such as automatically deleting a large amount of data. - -When the server starts (or establishes a new session with ZooKeeper), it only checks the quantity and sizes of all files. If the file sizes match but bytes have been changed somewhere in the middle, this is not detected immediately, but only when attempting to read the data for a `SELECT` query. The query throws an exception about a non-matching checksum or size of a compressed block. In this case, data parts are added to the verification queue and copied from the replicas if necessary. - -If the local set of data differs too much from the expected one, a safety mechanism is triggered. The server enters this in the log and refuses to launch. The reason for this is that this case may indicate a configuration error, such as if a replica on a shard was accidentally configured like a replica on a different shard. However, the thresholds for this mechanism are set fairly low, and this situation might occur during normal failure recovery. In this case, data is restored semi-automatically - by “pushing a button”. - -To start recovery, create the node `/path_to_table/replica_name/flags/force_restore_data` in ZooKeeper with any content, or run the command to restore all replicated tables: - -``` bash -sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data -``` - -Then restart the server. On start, the server deletes these flags and starts recovery. - -## Recovery After Complete Data Loss {#recovery-after-complete-data-loss} - -If all data and metadata disappeared from one of the servers, follow these steps for recovery: - -1. Install ClickHouse on the server. Define substitutions correctly in the config file that contains the shard identifier and replicas, if you use them. -2. If you had unreplicated tables that must be manually duplicated on the servers, copy their data from a replica (in the directory `/var/lib/clickhouse/data/db_name/table_name/`). -3. Copy table definitions located in `/var/lib/clickhouse/metadata/` from a replica. If a shard or replica identifier is defined explicitly in the table definitions, correct it so that it corresponds to this replica. (Alternatively, start the server and make all the `ATTACH TABLE` queries that should have been in the .sql files in `/var/lib/clickhouse/metadata/`.) -4. To start recovery, create the ZooKeeper node `/path_to_table/replica_name/flags/force_restore_data` with any content, or run the command to restore all replicated tables: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` - -Then start the server (restart, if it is already running). Data will be downloaded from replicas. - -An alternative recovery option is to delete information about the lost replica from ZooKeeper (`/path_to_table/replica_name`), then create the replica again as described in “[Creating replicated tables](#creating-replicated-tables)”. - -There is no restriction on network bandwidth during recovery. Keep this in mind if you are restoring many replicas at once. - -## Converting from MergeTree to ReplicatedMergeTree {#converting-from-mergetree-to-replicatedmergetree} - -We use the term `MergeTree` to refer to all table engines in the `MergeTree family`, the same as for `ReplicatedMergeTree`. - -If you had a `MergeTree` table that was manually replicated, you can convert it to a replicated table. You might need to do this if you have already collected a large amount of data in a `MergeTree` table and now you want to enable replication. - -If the data differs on various replicas, first sync it, or delete this data on all the replicas except one. - -Rename the existing MergeTree table, then create a `ReplicatedMergeTree` table with the old name. -Move the data from the old table to the `detached` subdirectory inside the directory with the new table data (`/var/lib/clickhouse/data/db_name/table_name/`). -Then run `ALTER TABLE ATTACH PARTITION` on one of the replicas to add these data parts to the working set. - -## Converting from ReplicatedMergeTree to MergeTree {#converting-from-replicatedmergetree-to-mergetree} - -Create a MergeTree table with a different name. Move all the data from the directory with the `ReplicatedMergeTree` table data to the new table’s data directory. Then delete the `ReplicatedMergeTree` table and restart the server. - -If you want to get rid of a `ReplicatedMergeTree` table without launching the server: - -- Delete the corresponding `.sql` file in the metadata directory (`/var/lib/clickhouse/metadata/`). -- Delete the corresponding path in ZooKeeper (`/path_to_table/replica_name`). - -After this, you can launch the server, create a `MergeTree` table, move the data to its directory, and then restart the server. - -## Recovery When Metadata in The ZooKeeper Cluster is Lost or Damaged {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} - -If the data in ZooKeeper was lost or damaged, you can save data by moving it to an unreplicated table as described above. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/fa/operations/table_engines/set.md b/docs/fa/operations/table_engines/set.md deleted file mode 100644 index 76e276fc0ab..00000000000 --- a/docs/fa/operations/table_engines/set.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -en_copy: true ---- - -# Set {#set} - -A data set that is always in RAM. It is intended for use on the right side of the IN operator (see the section “IN operators”). - -You can use INSERT to insert data in the table. New elements will be added to the data set, while duplicates will be ignored. -But you can’t perform SELECT from the table. The only way to retrieve data is by using it in the right half of the IN operator. - -Data is always located in RAM. For INSERT, the blocks of inserted data are also written to the directory of tables on the disk. When starting the server, this data is loaded to RAM. In other words, after restarting, the data remains in place. - -For a rough server restart, the block of data on the disk might be lost or damaged. In the latter case, you may need to manually delete the file with damaged data. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/fa/operations/table_engines/stripelog.md b/docs/fa/operations/table_engines/stripelog.md deleted file mode 100644 index 9cf881d3590..00000000000 --- a/docs/fa/operations/table_engines/stripelog.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -en_copy: true ---- - -# StripeLog {#stripelog} - -This engine belongs to the family of log engines. See the common properties of log engines and their differences in the [Log Engine Family](log_family.md) article. - -Use this engine in scenarios when you need to write many tables with a small amount of data (less than 1 million rows). - -## Creating a Table {#table_engines-stripelog-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = StripeLog -``` - -See the detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -## Writing the Data {#table_engines-stripelog-writing-the-data} - -The `StripeLog` engine stores all the columns in one file. For each `INSERT` query, ClickHouse appends the data block to the end of a table file, writing columns one by one. - -For each table ClickHouse writes the files: - -- `data.bin` — Data file. -- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. - -The `StripeLog` engine does not support the `ALTER UPDATE` and `ALTER DELETE` operations. - -## Reading the Data {#table_engines-stripelog-reading-the-data} - -The file with marks allows ClickHouse to parallelize the reading of data. This means that a `SELECT` query returns rows in an unpredictable order. Use the `ORDER BY` clause to sort rows. - -## Example of Use {#table_engines-stripelog-example-of-use} - -Creating a table: - -``` sql -CREATE TABLE stripe_log_table -( - timestamp DateTime, - message_type String, - message String -) -ENGINE = StripeLog -``` - -Inserting data: - -``` sql -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') -INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') -``` - -We used two `INSERT` queries to create two data blocks inside the `data.bin` file. - -ClickHouse uses multiple threads when selecting data. Each thread reads a separate data block and returns resulting rows independently as it finishes. As a result, the order of blocks of rows in the output does not match the order of the same blocks in the input in most cases. For example: - -``` sql -SELECT * FROM stripe_log_table -``` - -``` text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -┌───────────timestamp─┬─message_type─┬─message───────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -└─────────────────────┴──────────────┴───────────────────────────┘ -``` - -Sorting the results (ascending order by default): - -``` sql -SELECT * FROM stripe_log_table ORDER BY timestamp -``` - -``` text -┌───────────timestamp─┬─message_type─┬─message────────────────────┐ -│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ -│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ -│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ -└─────────────────────┴──────────────┴────────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/fa/operations/table_engines/summingmergetree.md b/docs/fa/operations/table_engines/summingmergetree.md deleted file mode 100644 index 9c3bbfa607e..00000000000 --- a/docs/fa/operations/table_engines/summingmergetree.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -en_copy: true ---- - -# SummingMergeTree {#summingmergetree} - -The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree). The difference is that when merging data parts for `SummingMergeTree` tables ClickHouse replaces all the rows with the same primary key (or more accurately, with the same [sorting key](mergetree.md)) with one row which contains summarized values for the columns with the numeric data type. If the sorting key is composed in a way that a single key value corresponds to large number of rows, this significantly reduces storage volume and speeds up data selection. - -We recommend to use the engine together with `MergeTree`. Store complete data in `MergeTree` table, and use `SummingMergeTree` for aggregated data storing, for example, when preparing reports. Such an approach will prevent you from losing valuable data due to an incorrectly composed primary key. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = SummingMergeTree([columns]) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -For a description of request parameters, see [request description](../../query_language/create.md). - -**Parameters of SummingMergeTree** - -- `columns` - a tuple with the names of columns where values will be summarized. Optional parameter. - The columns must be of a numeric type and must not be in the primary key. - - If `columns` not specified, ClickHouse summarizes the values in all columns with a numeric data type that are not in the primary key. - -**Query clauses** - -When creating a `SummingMergeTree` table the same [clauses](mergetree.md) are required, as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) -``` - -All of the parameters excepting `columns` have the same meaning as in `MergeTree`. - -- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. - -
    - -## Usage Example {#usage-example} - -Consider the following table: - -``` sql -CREATE TABLE summtt -( - key UInt32, - value UInt32 -) -ENGINE = SummingMergeTree() -ORDER BY key -``` - -Insert data to it: - -``` sql -INSERT INTO summtt Values(1,1),(1,2),(2,1) -``` - -ClickHouse may sum all the rows not completely ([see below](#data-processing)), so we use an aggregate function `sum` and `GROUP BY` clause in the query. - -``` sql -SELECT key, sum(value) FROM summtt GROUP BY key -``` - -``` text -┌─key─┬─sum(value)─┐ -│ 2 │ 1 │ -│ 1 │ 3 │ -└─────┴────────────┘ -``` - -## Data Processing {#data-processing} - -When data are inserted into a table, they are saved as-is. Clickhouse merges the inserted parts of data periodically and this is when rows with the same primary key are summed and replaced with one for each resulting part of data. - -ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) an aggregate function [sum()](../../query_language/agg_functions/reference.md#agg_function-sum) and `GROUP BY` clause should be used in a query as described in the example above. - -### Common rules for summation {#common-rules-for-summation} - -The values in the columns with the numeric data type are summarized. The set of columns is defined by the parameter `columns`. - -If the values were 0 in all of the columns for summation, the row is deleted. - -If column is not in the primary key and is not summarized, an arbitrary value is selected from the existing ones. - -The values are not summarized for columns in the primary key. - -### The Summation in the AggregateFunction Columns {#the-summation-in-the-aggregatefunction-columns} - -For columns of [AggregateFunction type](../../data_types/nested_data_structures/aggregatefunction.md) ClickHouse behaves as [AggregatingMergeTree](aggregatingmergetree.md) engine aggregating according to the function. - -### Nested Structures {#nested-structures} - -Table can have nested data structures that are processed in a special way. - -If the name of a nested table ends with `Map` and it contains at least two columns that meet the following criteria: - -- the first column is numeric `(*Int*, Date, DateTime)` or a string `(String, FixedString)`, let’s call it `key`, -- the other columns are arithmetic `(*Int*, Float32/64)`, let’s call it `(values...)`, - -then this nested table is interpreted as a mapping of `key => (values...)`, and when merging its rows, the elements of two data sets are merged by `key` with a summation of the corresponding `(values...)`. - -Examples: - -``` text -[(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] -[(1, 100)] + [(1, 150)] -> [(1, 250)] -[(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] -[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] -``` - -When requesting data, use the [sumMap(key, value)](../../query_language/agg_functions/reference.md) function for aggregation of `Map`. - -For nested data structure, you do not need to specify its columns in the tuple of columns for summation. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/fa/operations/table_engines/tinylog.md b/docs/fa/operations/table_engines/tinylog.md deleted file mode 100644 index 8745b43c729..00000000000 --- a/docs/fa/operations/table_engines/tinylog.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -en_copy: true ---- - -# TinyLog {#tinylog} - -The engine belongs to the log engine family. See [Log Engine Family](log_family.md) for common properties of log engines and their differences. - -This table engine is typically used with the write-once method: write data one time, then read it as many times as necessary. For example, you can use `TinyLog`-type tables for intermediary data that is processed in small batches. Note that storing data in a large number of small tables is inefficient. - -Queries are executed in a single stream. In other words, this engine is intended for relatively small tables (up to about 1,000,000 rows). It makes sense to use this table engine if you have many small tables, since it’s simpler than the [Log](log.md) engine (fewer files need to be opened). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/fa/operations/table_engines/url.md b/docs/fa/operations/table_engines/url.md deleted file mode 100644 index 4f6681a7a17..00000000000 --- a/docs/fa/operations/table_engines/url.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -en_copy: true ---- - -# URL(URL, Format) {#table_engines-url} - -Manages data on a remote HTTP/HTTPS server. This engine is similar -to the [File](file.md) engine. - -## Using the engine in the ClickHouse server {#using-the-engine-in-the-clickhouse-server} - -The `format` must be one that ClickHouse can use in -`SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see -[Formats](../../interfaces/formats.md#formats). - -The `URL` must conform to the structure of a Uniform Resource Locator. The specified URL must point to a server -that uses HTTP or HTTPS. This does not require any -additional headers for getting a response from the server. - -`INSERT` and `SELECT` queries are transformed to `POST` and `GET` requests, -respectively. For processing `POST` requests, the remote server must support -[Chunked transfer encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). - -You can limit the maximum number of HTTP GET redirect hops using the [max\_http\_get\_redirects](../settings/settings.md#setting-max_http_get_redirects) setting. - -**Example:** - -**1.** Create a `url_engine_table` table on the server : - -``` sql -CREATE TABLE url_engine_table (word String, value UInt64) -ENGINE=URL('http://127.0.0.1:12345/', CSV) -``` - -**2.** Create a basic HTTP server using the standard Python 3 tools and -start it: - -``` python3 -from http.server import BaseHTTPRequestHandler, HTTPServer - -class CSVHTTPServer(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-type', 'text/csv') - self.end_headers() - - self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) - -if __name__ == "__main__": - server_address = ('127.0.0.1', 12345) - HTTPServer(server_address, CSVHTTPServer).serve_forever() -``` - -``` bash -$ python3 server.py -``` - -**3.** Request data: - -``` sql -SELECT * FROM url_engine_table -``` - -``` text -┌─word──┬─value─┐ -│ Hello │ 1 │ -│ World │ 2 │ -└───────┴───────┘ -``` - -## Details of Implementation {#details-of-implementation} - -- Reads and writes can be parallel -- Not supported: - - `ALTER` and `SELECT...SAMPLE` operations. - - Indexes. - - Replication. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/fa/operations/table_engines/versionedcollapsingmergetree.md b/docs/fa/operations/table_engines/versionedcollapsingmergetree.md deleted file mode 100644 index 29f6d44d748..00000000000 --- a/docs/fa/operations/table_engines/versionedcollapsingmergetree.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -en_copy: true ---- - -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} - -This engine: - -- Allows quick writing of object states that are continually changing. -- Deletes old object states in the background. This significantly reduces the volume of storage. - -See the section [Collapsing](#table_engines_versionedcollapsingmergetree) for details. - -The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree) and adds the logic for collapsing rows to the algorithm for merging data parts. `VersionedCollapsingMergeTree` serves the same purpose as [CollapsingMergeTree](collapsingmergetree.md) but uses a different collapsing algorithm that allows inserting the data in any order with multiple threads. In particular, the `Version` column helps to collapse the rows properly even if they are inserted in the wrong order. In contrast, `CollapsingMergeTree` allows only strictly consecutive insertion. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = VersionedCollapsingMergeTree(sign, version) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -For a description of query parameters, see the [query description](../../query_language/create.md). - -**Engine Parameters** - -``` sql -VersionedCollapsingMergeTree(sign, version) -``` - -- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. - - The column data type should be `Int8`. - -- `version` — Name of the column with the version of the object state. - - The column data type should be `UInt*`. - -**Query Clauses** - -When creating a `VersionedCollapsingMergeTree` table, the same [clauses](mergetree.md) are required as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects. If possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) -``` - -All of the parameters except `sign` and `version` have the same meaning as in `MergeTree`. - -- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. - - Column Data Type — `Int8`. - -- `version` — Name of the column with the version of the object state. - - The column data type should be `UInt*`. - -
    - -## Collapsing {#table_engines-versionedcollapsingmergetree} - -### Data {#data} - -Consider a situation where you need to save continually changing data for some object. It is reasonable to have one row for an object and update the row whenever there are changes. However, the update operation is expensive and slow for a DBMS because it requires rewriting the data in the storage. Update is not acceptable if you need to write data quickly, but you can write the changes to an object sequentially as follows. - -Use the `Sign` column when writing the row. If `Sign = 1` it means that the row is a state of an object (let’s call it the “state” row). If `Sign = -1` it indicates the cancellation of the state of an object with the same attributes (let’s call it the “cancel” row). Also use the `Version` column, which should identify each state of an object with a separate number. - -For example, we want to calculate how many pages users visited on some site and how long they were there. At some point in time we write the following row with the state of user activity: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -At some point later we register the change of user activity and write it with the following two rows. - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -The first row cancels the previous state of the object (user). It should copy all of the fields of the canceled state except `Sign`. - -The second row contains the current state. - -Because we need only the last state of user activity, the rows - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -can be deleted, collapsing the invalid (old) state of the object. `VersionedCollapsingMergeTree` does this while merging the data parts. - -To find out why we need two rows for each change, see [Algorithm](#table_engines-versionedcollapsingmergetree-algorithm). - -**Notes on Usage** - -1. The program that writes the data should remember the state of an object in order to cancel it. The “cancel” string should be a copy of the “state” string with the opposite `Sign`. This increases the initial size of storage but allows to write the data quickly. -2. Long growing arrays in columns reduce the efficiency of the engine due to the load for writing. The more straightforward the data, the better the efficiency. -3. `SELECT` results depend strongly on the consistency of the history of object changes. Be accurate when preparing data for inserting. You can get unpredictable results with inconsistent data, such as negative values for non-negative metrics like session depth. - -### Algorithm {#table_engines-versionedcollapsingmergetree-algorithm} - -When ClickHouse merges data parts, it deletes each pair of rows that have the same primary key and version and different `Sign`. The order of rows does not matter. - -When ClickHouse inserts data, it orders rows by the primary key. If the `Version` column is not in the primary key, ClickHouse adds it to the primary key implicitly as the last field and uses it for ordering. - -## Selecting Data {#selecting-data} - -ClickHouse doesn’t guarantee that all of the rows with the same primary key will be in the same resulting data part or even on the same physical server. This is true both for writing the data and for subsequent merging of the data parts. In addition, ClickHouse processes `SELECT` queries with multiple threads, and it cannot predict the order of rows in the result. This means that aggregation is required if there is a need to get completely “collapsed” data from a `VersionedCollapsingMergeTree` table. - -To finalize collapsing, write a query with a `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and add `HAVING sum(Sign) > 0`. - -The aggregates `count`, `sum` and `avg` can be calculated this way. The aggregate `uniq` can be calculated if an object has at least one non-collapsed state. The aggregates `min` and `max` can’t be calculated because `VersionedCollapsingMergeTree` does not save the history of values of collapsed states. - -If you need to extract the data with “collapsing” but without aggregation (for example, to check whether rows are present whose newest values match certain conditions), you can use the `FINAL` modifier for the `FROM` clause. This approach is inefficient and should not be used with large tables. - -## Example of Use {#example-of-use} - -Example data: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -Creating the table: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8, - Version UInt8 -) -ENGINE = VersionedCollapsingMergeTree(Sign, Version) -ORDER BY UserID -``` - -Inserting the data: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) -``` - -We use two `INSERT` queries to create two different data parts. If we insert the data with a single query, ClickHouse creates one data part and will never perform any merge. - -Getting the data: - -``` sql -SELECT * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -What do we see here and where are the collapsed parts? -We created two data parts using two `INSERT` queries. The `SELECT` query was performed in two threads, and the result is a random order of rows. -Collapsing did not occur because the data parts have not been merged yet. ClickHouse merges data parts at an unknown point in time which we cannot predict. - -This is why we need aggregation: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration, - Version -FROM UAct -GROUP BY UserID, Version -HAVING sum(Sign) > 0 -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 2 │ -└─────────────────────┴───────────┴──────────┴─────────┘ -``` - -If we don’t need aggregation and want to force collapsing, we can use the `FINAL` modifier for the `FROM` clause. - -``` sql -SELECT * FROM UAct FINAL -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -This is a very inefficient way to select data. Don’t use it for large tables. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/fa/operations/table_engines/view.md b/docs/fa/operations/table_engines/view.md deleted file mode 100644 index 9c3c1f2e0e0..00000000000 --- a/docs/fa/operations/table_engines/view.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -en_copy: true ---- - -# View {#table_engines-view} - -Used for implementing views (for more information, see the `CREATE VIEW query`). It does not store data, but only stores the specified `SELECT` query. When reading from a table, it runs this query (and deletes all unnecessary columns from the query). - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/fa/operations/tips.md b/docs/fa/operations/tips.md index 9e789599e7d..f411e525012 100644 --- a/docs/fa/operations/tips.md +++ b/docs/fa/operations/tips.md @@ -1,116 +1,120 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u062A\u0648\u0635\u06CC\u0647 \u0647\u0627\u06CC \u0627\u0633\u062A\u0641\ + \u0627\u062F\u0647" --- -# Usage Recommendations {#usage-recommendations} +# توصیه های استفاده {#usage-recommendations} -## CPU Scaling Governor {#cpu-scaling-governor} +## فرماندار پوسته پوسته شدن پردازنده {#cpu-scaling-governor} -Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand. +همیشه استفاده از `performance` پوسته پوسته شدن فرماندار. این `on-demand` پوسته پوسته شدن فرماندار کار می کند بسیار بدتر با تقاضای به طور مداوم بالا. ``` bash $ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ``` -## CPU Limitations {#cpu-limitations} +## محدودیت های پردازنده {#cpu-limitations} -Processors can overheat. Use `dmesg` to see if the CPU’s clock rate was limited due to overheating. -The restriction can also be set externally at the datacenter level. You can use `turbostat` to monitor it under a load. +پردازنده می تواند بیش از حد گرم. استفاده `dmesg` برای دیدن اگر نرخ ساعت پردازنده به دلیل گرمای بیش از حد محدود بود. +محدودیت همچنین می توانید خارجی در سطح مرکز داده تنظیم شود. شما می توانید استفاده کنید `turbostat` تحت نظر داشته باشمش ## RAM {#ram} -For small amounts of data (up to ~200 GB compressed), it is best to use as much memory as the volume of data. -For large amounts of data and when processing interactive (online) queries, you should use a reasonable amount of RAM (128 GB or more) so the hot data subset will fit in the cache of pages. -Even for data volumes of ~50 TB per server, using 128 GB of RAM significantly improves query performance compared to 64 GB. +برای مقدار کمی از داده ها (تا ~ 200 گیگابایت فشرده), بهتر است به استفاده از حافظه به همان اندازه که حجم داده ها. +برای مقادیر زیادی از داده ها و در هنگام پردازش تعاملی (اینترنتی) نمایش داده شد, شما باید یک مقدار مناسب از رم استفاده (128 گیگابایت یا بیشتر) بنابراین زیر مجموعه داده های داغ در کش صفحات مناسب خواهد شد. +حتی برای حجم داده ها از ~50 سل در هر سرور, با استفاده از 128 گیگابایت رم به طور قابل توجهی بهبود می بخشد عملکرد پرس و جو در مقایسه با 64 گیگابایت. -Do not disable overcommit. The value `cat /proc/sys/vm/overcommit_memory` should be 0 or 1. Run +هنوز بیش از حد غیر فعال کردن نیست. مقدار `cat /proc/sys/vm/overcommit_memory` باید 0 یا 1. بدو ``` bash $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory ``` -## Huge Pages {#huge-pages} +## صفحات بزرگ {#huge-pages} -Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation. +همیشه صفحات بزرگ شفاف غیر فعال کنید. این با تخصیص حافظه تداخل, که منجر به تخریب عملکرد قابل توجهی. ``` bash $ echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled ``` -Use `perf top` to watch the time spent in the kernel for memory management. -Permanent huge pages also do not need to be allocated. +استفاده `perf top` برای تماشای زمان صرف شده در هسته برای مدیریت حافظه. +صفحات بزرگ ثابت نیز لازم نیست اختصاص داده شود. -## Storage Subsystem {#storage-subsystem} +## زیرسیستم ذخیره سازی {#storage-subsystem} -If your budget allows you to use SSD, use SSD. -If not, use HDD. SATA HDDs 7200 RPM will do. +اگر بودجه شما اجازه می دهد تا شما را به استفاده از اس اس دی, استفاده از اس اس دی. +اگر نه, استفاده از هارد. ساعت 7200 دور در دقیقه انجام خواهد شد. -Give preference to a lot of servers with local hard drives over a smaller number of servers with attached disk shelves. -But for storing archives with rare queries, shelves will work. +دادن اولویت به بسیاری از سرور با دیسک های سخت محلی بیش از تعداد کمتری از سرور با قفسه های دیسک متصل. +اما برای ذخیره سازی بایگانی با نمایش داده شد نادر, قفسه کار خواهد کرد. ## RAID {#raid} -When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50. -For Linux, software RAID is better (with `mdadm`). We don’t recommend using LVM. -When creating RAID-10, select the `far` layout. -If your budget allows, choose RAID-10. +هنگام استفاده از هارد, شما می توانید حمله خود را ترکیب-10, حمله-5, حمله-6 و یا حمله-50. +برای لینوکس, حمله نرم افزار بهتر است (با `mdadm`). ما توصیه نمی کنیم با استفاده از سطح. +هنگام ایجاد حمله-10, را انتخاب کنید `far` طرح بندی. +اگر بودجه شما اجازه می دهد تا, را انتخاب کنید حمله-10. -If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5. -When using RAID-5, RAID-6 or RAID-50, always increase stripe\_cache\_size, since the default value is usually not the best choice. +اگر شما بیش از 4 دیسک, استفاده از حمله-6 (ترجیحا) و یا حمله-50, به جای حمله-5. +هنگام استفاده از حمله-5, حمله-6 و یا حمله-50, همیشه افزایش نزاع, از مقدار پیش فرض است که معمولا بهترین انتخاب نیست. ``` bash $ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size ``` -Calculate the exact number from the number of devices and the block size, using the formula: `2 * num_devices * chunk_size_in_bytes / 4096`. +محاسبه تعداد دقیق از تعداد دستگاه ها و اندازه بلوک با استفاده از فرمول: `2 * num_devices * chunk_size_in_bytes / 4096`. -A block size of 1024 KB is sufficient for all RAID configurations. -Never set the block size too small or too large. +اندازه بلوک 1024 کیلوبایت برای تمام تنظیمات حمله کافی است. +هرگز اندازه بلوک بیش از حد کوچک یا بیش از حد بزرگ تنظیم شده است. -You can use RAID-0 on SSD. -Regardless of RAID use, always use replication for data security. +شما می توانید حمله استفاده-0 در اس اس دی. +صرف نظر از استفاده از حمله, همیشه تکرار برای امنیت داده ها استفاده. -Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting. -For HDD, enable the write cache. +فعال کردن دفتر مرکزی اروپا با یک صف طولانی. برای hdd را انتخاب کنید cfq زمانبندی و برای ssd را انتخاب کنید noop. کاهش نمی دهد ‘readahead’ تنظیمات. +برای هارد, فعال کردن کش نوشتن. -## File System {#file-system} +## سیستم پرونده {#file-system} -Ext4 is the most reliable option. Set the mount options `noatime, nobarrier`. -XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse. -Most other file systems should also work fine. File systems with delayed allocation work better. +موجود 4 قابل اطمینان ترین گزینه است. تنظیم گزینههای سوارکردن `noatime, nobarrier`. +XFS نیز مناسب است اما از آن شده است به طور کامل تست شده با ClickHouse. +اکثر سیستم های فایل های دیگر نیز باید خوب کار می کنند. سیستم های فایل با تاخیر تخصیص کار بهتر است. -## Linux Kernel {#linux-kernel} +## هسته لینوکس {#linux-kernel} -Don’t use an outdated Linux kernel. +هنوز یک هسته لینوکس منسوخ شده استفاده کنید. -## Network {#network} +## شبکه {#network} -If you are using IPv6, increase the size of the route cache. -The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementation. +اگر شما با استفاده از ایپو6, افزایش اندازه کش مسیر. +هسته لینوکس قبل از 3.2 بسیاری از مشکلات با اجرای قانون مجازات اسلامی بود. -Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. +استفاده از حداقل یک 10 شبکه گیگابایت, در صورت امکان. 1 گیگابایت نیز کار خواهد کرد, اما برای وصله کپی با ده ها ترابایت داده بسیار بدتر خواهد بود, و یا برای پردازش نمایش داده شد توزیع با مقدار زیادی از داده های متوسط. -## ZooKeeper {#zookeeper} +## باغ وحش {#zookeeper} -You are probably already using ZooKeeper for other purposes. You can use the same installation of ZooKeeper, if it isn’t already overloaded. +شما احتمالا در حال حاضر با استفاده از باغ وحش برای مقاصد دیگر. شما می توانید نصب و راه اندازی همان باغ وحش استفاده, اگر در حال حاضر بیش از حد نیست. -It’s best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. -You should never use manually written scripts to transfer data between different ZooKeeper clusters, because the result will be incorrect for sequential nodes. Never use the “zkcopy” utility for the same reason: https://github.com/ksprojects/zkcopy/issues/15 +شما هرگز نباید از اسکریپت های دستی نوشته شده برای انتقال داده ها بین خوشه های مختلف باغ وحش استفاده کنید زیرا نتیجه برای گره های متوالی نادرست خواهد بود. هرگز استفاده از “zkcopy” ابزار به همین دلیل: https://github.com/ksprojects/zkcopy/issues/15 -If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters. +اگر میخواهید یک خوشه باغ وحش موجود را به دو قسمت تقسیم کنید راه درست این است که تعداد تکرار های خود را افزایش دهید و سپس به عنوان دو خوشه مستقل پیکربندی کنید. -Do not run ZooKeeper on the same servers as ClickHouse. Because ZooKeeper is very sensitive for latency and ClickHouse may utilize all available system resources. +باغ وحش را بر روی سرورهای مشابه کلیک کنید. چرا که باغ وحش برای تاخیر بسیار حساس است و خانه رعیتی ممکن است تمام منابع سیستم در دسترس استفاده کنند. -With the default settings, ZooKeeper is a time bomb: +با تنظیمات پیش فرض, باغ وحش یک بمب زمان است: -> The ZooKeeper server won’t delete files from old snapshots and logs when using the default configuration (see autopurge), and this is the responsibility of the operator. +> سرور باغ وحش فایل ها را از عکس های فوری و سیاهههای مربوط قدیمی هنگام استفاده از پیکربندی پیش فرض حذف نمی کند (نگاه کنید به کالبد شکافی), و این به عهده اپراتور است. -This bomb must be defused. +این بمب باید خنثی شود. -The ZooKeeper (3.5.1) configuration below is used in the Yandex.Metrica production environment as of May 20, 2017: +باغ وحش (3.5.1) پیکربندی زیر در یاندکس استفاده می شود.محیط تولید متریکا تا 20 مه 2017: -zoo.cfg: +باغ وحش.cfg: ``` bash # http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html @@ -166,14 +170,14 @@ standaloneEnabled=false dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic ``` -Java version: +نسخه جاوا: ``` text Java(TM) SE Runtime Environment (build 1.8.0_25-b17) Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) ``` -JVM parameters: +پارامترهای جی ام: ``` bash NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} @@ -214,7 +218,7 @@ JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ -XX:+CMSParallelRemarkEnabled" ``` -Salt init: +نمک درون: ``` text description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" @@ -245,4 +249,4 @@ script end script ``` -{## [Original article](https://clickhouse.tech/docs/en/operations/tips/) ##} +{## [مقاله اصلی](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/fa/operations/troubleshooting.md b/docs/fa/operations/troubleshooting.md index d48e2b4b7f6..73f30d78ec6 100644 --- a/docs/fa/operations/troubleshooting.md +++ b/docs/fa/operations/troubleshooting.md @@ -1,66 +1,69 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u0639\u06CC\u0628 \u06CC\u0627\u0628\u06CC" --- -# Troubleshooting {#troubleshooting} +# عیب یابی {#troubleshooting} -- [Installation](#troubleshooting-installation-errors) -- [Connecting to the server](#troubleshooting-accepts-no-connections) -- [Query processing](#troubleshooting-does-not-process-queries) -- [Efficiency of query processing](#troubleshooting-too-slow) +- [نصب و راه اندازی](#troubleshooting-installation-errors) +- [اتصال به سرور](#troubleshooting-accepts-no-connections) +- [پردازش پرس و جو](#troubleshooting-does-not-process-queries) +- [کارایی پردازش پرس و جو](#troubleshooting-too-slow) -## Installation {#troubleshooting-installation-errors} +## نصب و راه اندازی {#troubleshooting-installation-errors} -### You Cannot Get Deb Packages from ClickHouse Repository With apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} +### شما می توانید بسته های دب از مخزن کلیک با مناسب دریافت کنید {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} -- Check firewall settings. -- If you cannot access the repository for any reason, download packages as described in the [Getting started](../getting_started/index.md) article and install them manually using the `sudo dpkg -i ` command. You will also need the `tzdata` package. +- بررسی تنظیمات فایروال. +- اگر شما می توانید مخزن به هر دلیلی دسترسی پیدا کنید, دانلود بسته همانطور که در توصیف [شروع کار](../getting_started/index.md) مقاله و نصب دستی با استفاده از `sudo dpkg -i ` فرمان. همچنین شما می خواهد نیاز `tzdata` بسته -## Connecting to the Server {#troubleshooting-accepts-no-connections} +## اتصال به سرور {#troubleshooting-accepts-no-connections} -Possible issues: +مشکلات احتمالی: -- The server is not running. -- Unexpected or wrong configuration parameters. +- سرور در حال اجرا نیست. +- پارامترهای پیکربندی غیر منتظره و یا اشتباه. -### Server Is Not Running {#server-is-not-running} +### کارساز در حال اجرا نیست {#server-is-not-running} -**Check if server is runnnig** +**بررسی کنید که کارگزار روننیگ باشد** -Command: +فرمان: ``` bash $ sudo service clickhouse-server status ``` -If the server is not running, start it with the command: +اگر سرور در حال اجرا نیست, شروع با فرمان: ``` bash $ sudo service clickhouse-server start ``` -**Check logs** +**بررسی سیاههها** -The main log of `clickhouse-server` is in `/var/log/clickhouse-server/clickhouse-server.log` by default. +ورود اصلی `clickhouse-server` در `/var/log/clickhouse-server/clickhouse-server.log` به طور پیش فرض. -If the server started successfully, you should see the strings: +اگر سرور با موفقیت شروع, شما باید رشته ها را ببینید: - ` Application: starting up.` — Server started. - ` Application: Ready for connections.` — Server is running and ready for connections. -If `clickhouse-server` start failed with a configuration error, you should see the `` string with an error description. For example: +اگر `clickhouse-server` شروع با یک خطای پیکربندی شکست خورده, شما باید ببینید `` رشته با شرح خطا. به عنوان مثال: ``` text 2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused ``` -If you don’t see an error at the end of the file, look through the entire file starting from the string: +اگر شما یک خطا در انتهای فایل را نمی بینم, از طریق تمام فایل با شروع از رشته نگاه: ``` text Application: starting up. ``` -If you try to start a second instance of `clickhouse-server` on the server, you see the following log: +اگر شما سعی می کنید برای شروع یک نمونه دوم از `clickhouse-server` بر روی سرور, شما ورود به سیستم زیر را ببینید: ``` text 2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 @@ -76,68 +79,68 @@ Revision: 54413 2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread ``` -**See system.d logs** +**مشاهده سیستم.د سیاهههای مربوط** -If you don’t find any useful information in `clickhouse-server` logs or there aren’t any logs, you can view `system.d` logs using the command: +اگر شما هر گونه اطلاعات مفید در پیدا کنید `clickhouse-server` سیاهههای مربوط و یا هر گونه سیاهههای مربوط وجود ندارد, شما می توانید مشاهده `system.d` سیاهههای مربوط با استفاده از دستور: ``` bash $ sudo journalctl -u clickhouse-server ``` -**Start clickhouse-server in interactive mode** +**شروع کلیک-سرور در حالت تعاملی** ``` bash $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml ``` -This command starts the server as an interactive app with standard parameters of the autostart script. In this mode `clickhouse-server` prints all the event messages in the console. +این دستور سرور را به عنوان یک برنامه تعاملی با پارامترهای استاندارد اسکریپت خودکار شروع می کند. در این حالت `clickhouse-server` چاپ تمام پیام های رویداد در کنسول. -### Configuration Parameters {#configuration-parameters} +### پارامترهای پیکربندی {#configuration-parameters} -Check: +بررسی: -- Docker settings. +- تنظیمات کارگر بارانداز. - If you run ClickHouse in Docker in an IPv6 network, make sure that `network=host` is set. + اطمینان حاصل کنید که اگر شما اجرا خانه عروسکی در کارگر بارانداز در یک شبکه اینترنتی6 `network=host` قرار است. -- Endpoint settings. +- تنظیمات نقطه پایانی. - Check [listen\_host](server_settings/settings.md#server_settings-listen_host) and [tcp\_port](server_settings/settings.md#server_settings-tcp_port) settings. + بررسی [\_نوست فهرست](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) و [\_صادر کردن](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) تنظیمات. - ClickHouse server accepts localhost connections only by default. + سرور کلیک می پذیرد اتصالات مجنون تنها به طور پیش فرض. -- HTTP protocol settings. +- تنظیمات پروتکل قام. - Check protocol settings for the HTTP API. + بررسی تنظیمات پروتکل برای صفحه اصلی. -- Secure connection settings. +- تنظیمات اتصال امن. - Check: + بررسی: - - The [tcp\_port\_secure](server_settings/settings.md#server_settings-tcp_port_secure) setting. - - Settings for [SSL sertificates](server_settings/settings.md#server_settings-openssl). + - این [\_شروع مجدد](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) تنظیمات. + - تنظیمات برای [SSL sertificates](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). - Use proper parameters while connecting. For example, use the `port_secure` parameter with `clickhouse_client`. + استفاده از پارامترهای مناسب در حالی که اتصال. برای مثال با استفاده از `port_secure` پارامتر با `clickhouse_client`. -- User settings. +- تنظیمات کاربر. - You might be using the wrong user name or password. + شما ممکن است با استفاده از نام کاربری اشتباه و یا رمز عبور. -## Query Processing {#troubleshooting-does-not-process-queries} +## پردازش پرس و جو {#troubleshooting-does-not-process-queries} -If ClickHouse is not able to process the query, it sends an error description to the client. In the `clickhouse-client` you get a description of the error in the console. If you are using the HTTP interface, ClickHouse sends the error description in the response body. For example: +اگر فاحشه خانه است که قادر به پردازش پرس و جو نمی, این شرح خطا به مشتری می فرستد. در `clickhouse-client` شما دریافت می کنید شرح خطا در کنسول. اگر شما با استفاده از HTTP رابط ClickHouse می فرستد خطا توضیحات در پاسخ بدن. به عنوان مثال: ``` bash $ curl 'http://localhost:8123/' --data-binary "SELECT a" Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception ``` -If you start `clickhouse-client` with the `stack-trace` parameter, ClickHouse returns the server stack trace with the description of an error. +اگر شما شروع `clickhouse-client` با `stack-trace` پارامتر, خانه را برمی گرداند ردیابی پشته سرور با شرح خطا. -You might see a message about a broken connection. In this case, you can repeat the query. If the connection breaks every time you perform the query, check the server logs for errors. +شما ممکن است یک پیام در مورد یک اتصال شکسته را ببینید. در این مورد می توانید پرس و جو را تکرار کنید. اگر اتصال می شکند هر بار که شما انجام پرس و جو, بررسی سیاهههای مربوط به سرور برای اشتباهات. -## Efficiency of Query Processing {#troubleshooting-too-slow} +## کارایی پردازش پرس و جو {#troubleshooting-too-slow} -If you see that ClickHouse is working too slowly, you need to profile the load on the server resources and network for your queries. +اگر شما می بینید که تاتر در حال کار بیش از حد کند, شما نیاز به مشخصات بار بر روی منابع سرور و شبکه برای نمایش داده شد خود را. -You can use the clickhouse-benchmark utility to profile queries. It shows the number of queries processed per second, the number of rows processed per second, and percentiles of query processing times. +شما می توانید ابزار کلیک معیار به نمایش داده شد مشخصات استفاده کنید. این نشان می دهد تعداد نمایش داده شد پردازش در هر ثانیه, تعداد ردیف پردازش در هر ثانیه, و صدک از زمان پردازش پرس و جو. diff --git a/docs/fa/operations/update.md b/docs/fa/operations/update.md index b09eb707e77..392514bf8aa 100644 --- a/docs/fa/operations/update.md +++ b/docs/fa/operations/update.md @@ -1,10 +1,14 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u0628\u0647 \u0631\u0648\u0632 \u0631\u0633\u0627\u0646\u06CC \u06A9\u0644\ + \u06CC\u06A9" --- -# ClickHouse Update {#clickhouse-update} +# به روز رسانی کلیک {#clickhouse-update} -If ClickHouse was installed from deb packages, execute the following commands on the server: +اگر تاتر از بسته های دب نصب شد, اجرای دستورات زیر را بر روی سرور: ``` bash $ sudo apt-get update @@ -12,6 +16,6 @@ $ sudo apt-get install clickhouse-client clickhouse-server $ sudo service clickhouse-server restart ``` -If you installed ClickHouse using something other than the recommended deb packages, use the appropriate update method. +اگر شما نصب تاتر با استفاده از چیزی غیر از بسته های دب توصیه می شود, استفاده از روش به روز رسانی مناسب. -ClickHouse does not support a distributed update. The operation should be performed consecutively on each separate server. Do not update all the servers on a cluster simultaneously, or the cluster will be unavailable for some time. +کلیک می کند به روز رسانی توزیع را پشتیبانی نمی کند. این عملیات باید به صورت متوالی در هر سرور جداگانه انجام شود. هنوز تمام سرور بر روی یک خوشه به طور همزمان به روز رسانی نیست, یا خوشه برای برخی از زمان در دسترس نخواهد بود. diff --git a/docs/fa/operations/utilities/clickhouse-benchmark.md b/docs/fa/operations/utilities/clickhouse-benchmark.md new file mode 100644 index 00000000000..87ebab07c5d --- /dev/null +++ b/docs/fa/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1,156 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u06A9\u0644\u06CC\u06A9-\u0645\u0639\u06CC\u0627\u0631" +--- + +# کلیک-معیار {#clickhouse-benchmark} + +قابلیت اتصال به یک سرور کلیک و بارها و بارها نمایش داده شد مشخص می فرستد. + +نحو: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +یا + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +اگر شما می خواهید برای ارسال مجموعه ای از نمایش داده شد, ایجاد یک فایل متنی و قرار دادن هر پرس و جو در رشته های فردی در این فایل. به عنوان مثال: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +سپس این فایل را به یک ورودی استاندارد منتقل می کند `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## کلید {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` می فرستد به طور همزمان. مقدار پیش فرض: 1. +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. برای [مقایسه حالت](#clickhouse-benchmark-comparison-mode) شما می توانید چند استفاده کنید `-h` کلیدا +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [حالت مقایسه](#clickhouse-benchmark-comparison-mode) شما می توانید چند استفاده کنید `-p` کلیدا +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` متوقف می شود ارسال نمایش داده شد زمانی که محدودیت زمانی مشخص رسیده است. مقدار پیش فرض: 0 (محدودیت زمانی غیر فعال). +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [حالت مقایسه](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` انجام [تی تست دانشجویان مستقل دو نمونه](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) تست برای تعیین اینکه دو توزیع با سطح انتخاب شده اعتماد به نفس متفاوت نیست. +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` خروجی یک گزارش به مشخص جانسون فایل. +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` خروجی پشته اثری از استثنا. +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` در مرحله مشخص شده. مقادیر ممکن: `complete`, `fetch_columns`, `with_mergeable_state`. مقدار پیشفرض: `complete`. +- `--help` — Shows the help message. + +اگر شما می خواهید به درخواست برخی از [تنظیمات](../../operations/settings/index.md) برای پرس و جو, عبور خود را به عنوان یک کلید `--= SETTING_VALUE`. به عنوان مثال, `--max_memory_usage=1048576`. + +## خروجی {#clickhouse-benchmark-output} + +به طور پیش فرض, `clickhouse-benchmark` گزارش برای هر `--delay` فاصله. + +نمونه ای از گزارش: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +در این گزارش شما می توانید پیدا کنید: + +- تعداد نمایش داده شد در `Queries executed:` رشته. + +- رشته وضعیت حاوی) به ترتیب (): + + - نقطه پایانی از سرور کلیک. + - تعداد نمایش داده شد پردازش شده است. + - QPS: QPS: چگونه بسیاری از نمایش داده شد سرور انجام شده در هر ثانیه در طول یک دوره مشخص شده در `--delay` استدلال کردن. + - سرور مجازی: چند ردیف سرور در هر ثانیه در طول یک دوره مشخص شده در `--delay` استدلال کردن. + - مگابایت بر ثانیه: چگونه بسیاری از مگابایت سرور در ثانیه در طول یک دوره مشخص شده در خواندن `--delay` استدلال کردن. + - نتیجه ریسمانهای: چگونه بسیاری از ردیف توسط سرور به نتیجه یک پرس و جو در هر ثانیه در طول یک دوره مشخص شده در قرار داده شده `--delay` استدلال کردن. + - چگونه بسیاری از مگابایت توسط سرور به نتیجه یک پرس و جو در هر ثانیه در طول یک دوره مشخص شده در قرار داده شده `--delay` استدلال کردن. + +- صدک از نمایش داده شد زمان اجرای. + +## حالت مقایسه {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` می توانید اجرای برای دو سرور در حال اجرا تاتر مقایسه. + +برای استفاده از حالت مقایسه, مشخص نقطه پایانی هر دو سرور توسط دو جفت از `--host`, `--port` کلیدا کلید با هم توسط موقعیت در لیست استدلال همسان, اولین `--host` با اولین همسان `--port` و به همین ترتیب. `clickhouse-benchmark` ایجاد ارتباط به هر دو سرور, سپس نمایش داده شد می فرستد. هر پرس و جو خطاب به یک سرور به طور تصادفی انتخاب شده است. نتایج برای هر سرور به طور جداگانه نشان داده شده است. + +## مثال {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/fa/operations/utils/clickhouse-copier.md b/docs/fa/operations/utilities/clickhouse-copier.md similarity index 66% rename from docs/fa/operations/utils/clickhouse-copier.md rename to docs/fa/operations/utilities/clickhouse-copier.md index 489505481b5..14e16f89949 100644 --- a/docs/fa/operations/utils/clickhouse-copier.md +++ b/docs/fa/operations/utilities/clickhouse-copier.md @@ -1,46 +1,49 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u062A\u0627\u062A\u0631-\u06A9\u067E\u06CC" --- -# clickhouse-copier {#clickhouse-copier} +# تاتر-کپی {#clickhouse-copier} -Copies data from the tables in one cluster to tables in another (or the same) cluster. +کپی داده ها از جداول در یک خوشه به جداول در یکی دیگر از (یا همان) خوشه. -You can run multiple `clickhouse-copier` instances on different servers to perform the same job. ZooKeeper is used for syncing the processes. +شما می توانید چند اجرا `clickhouse-copier` نمونه بر روی سرور های مختلف برای انجام همان کار. باغ وحش برای همگام سازی فرایندها استفاده می شود. -After starting, `clickhouse-copier`: +پس از شروع, `clickhouse-copier`: -- Connects to ZooKeeper and receives: +- قابلیت اتصال به باغ وحش و دریافت: - - Copying jobs. - - The state of the copying jobs. + - شغل کپی. + - دولت از مشاغل کپی. -- It performs the jobs. +- این کار را انجام می دهد. - Each running process chooses the “closest” shard of the source cluster and copies the data into the destination cluster, resharding the data if necessary. + هر فرایند در حال اجرا را انتخاب “closest” سفال از خوشه منبع و کپی داده ها را به خوشه مقصد, تغییر شکل داده ها در صورت لزوم. -`clickhouse-copier` tracks the changes in ZooKeeper and applies them on the fly. +`clickhouse-copier` تغییرات باغ وحش را دنبال می کند و در پرواز اعمال می شود. -To reduce network traffic, we recommend running `clickhouse-copier` on the same server where the source data is located. +برای کاهش ترافیک شبکه توصیه می کنیم در حال اجرا `clickhouse-copier` در همان سرور که داده های منبع واقع شده است. -## Running clickhouse-copier {#running-clickhouse-copier} +## در حال اجرا تاتر-کپی {#running-clickhouse-copier} -The utility should be run manually: +ابزار باید به صورت دستی اجرا شود: ``` bash $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir ``` -Parameters: +پارامترها: -- `daemon` — Starts `clickhouse-copier` in daemon mode. -- `config` — The path to the `zookeeper.xml` file with the parameters for the connection to ZooKeeper. -- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` processes and storing tasks. Tasks are stored in `$task-path/description`. +- `daemon` — Starts `clickhouse-copier` در حالت شبح. +- `config` — The path to the `zookeeper.xml` فایل با پارامترهای اتصال به باغ وحش. +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` پردازش و ذخیره سازی وظایف. وظایف در ذخیره می شود `$task-path/description`. - `task-file` — Optional path to file with task configuration for initial upload to ZooKeeper. -- `task-upload-force` — Force upload `task-file` even if node already exists. -- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` creates `clickhouse-copier_YYYYMMHHSS_` subdirectories in `$base-dir`. If this parameter is omitted, the directories are created in the directory where `clickhouse-copier` was launched. +- `task-upload-force` — Force upload `task-file` حتی اگر گره در حال حاضر وجود دارد. +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` ایجاد `clickhouse-copier_YYYYMMHHSS_` زیرشاخه در `$base-dir`. اگر این پارامتر حذف شده است, دایرکتوری ها در دایرکتوری که ایجاد `clickhouse-copier` راه اندازی شد. -## Format of zookeeper.xml {#format-of-zookeeper-xml} +## قالب باغ وحش.شمع {#format-of-zookeeper-xml} ``` xml @@ -59,7 +62,7 @@ Parameters: ``` -## Configuration of copying tasks {#configuration-of-copying-tasks} +## پیکربندی وظایف کپی کردن {#configuration-of-copying-tasks} ``` xml @@ -168,6 +171,6 @@ Parameters: ``` -`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change. +`clickhouse-copier` پیگیری تغییرات در `/task/path/description` و اونا رو تو پرواز بکار میبره برای مثال, اگر شما ارزش تغییر `max_workers` تعداد فرایندهای در حال اجرا وظایف نیز تغییر خواهد کرد. -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/fa/operations/utilities/clickhouse-local.md b/docs/fa/operations/utilities/clickhouse-local.md new file mode 100644 index 00000000000..8a77363f694 --- /dev/null +++ b/docs/fa/operations/utilities/clickhouse-local.md @@ -0,0 +1,81 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u06A9\u0644\u06CC\u06A9-\u0645\u062D\u0644\u06CC" +--- + +# کلیک-محلی {#clickhouse-local} + +این `clickhouse-local` برنامه شما را قادر به انجام پردازش سریع بر روی فایل های محلی, بدون نیاز به استقرار و پیکربندی سرور کلیک. + +داده هایی را می پذیرد که نشان دهنده جداول و نمایش داده شد با استفاده از [ClickHouse SQL گویش](../../sql_reference/index.md). + +`clickhouse-local` بنابراین پشتیبانی از بسیاری از ویژگی های و همان مجموعه ای از فرمت ها و موتورهای جدول با استفاده از هسته همان سرور تاتر. + +به طور پیش فرض `clickhouse-local` دسترسی به داده ها در همان میزبان ندارد, اما پشتیبانی از پیکربندی سرور در حال بارگذاری با استفاده از `--config-file` استدلال کردن. + +!!! warning "اخطار" + توصیه نمی شود که پیکربندی سرور تولید را بارگیری کنید `clickhouse-local` زیرا داده ها می توانند در صورت خطای انسانی صدمه ببینند. + +## استفاده {#usage} + +استفاده عمومی: + +``` bash +$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" +``` + +نشانوندها: + +- `-S`, `--structure` — table structure for input data. +- `-if`, `--input-format` — input format, `TSV` به طور پیش فرض. +- `-f`, `--file` — path to data, `stdin` به طور پیش فرض. +- `-q` `--query` — queries to execute with `;` به عنوان دسیمترتراپی. +- `-N`, `--table` — table name where to put output data, `table` به طور پیش فرض. +- `-of`, `--format`, `--output-format` — output format, `TSV` به طور پیش فرض. +- `--stacktrace` — whether to dump debug output in case of exception. +- `--verbose` — more details on query execution. +- `-s` — disables `stderr` ثبت. +- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. +- `--help` — arguments references for `clickhouse-local`. + +همچنین استدلال برای هر متغیر پیکربندی کلیک که معمولا به جای استفاده می شود وجود دارد `--config-file`. + +## مثالها {#examples} + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" +Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. +1 2 +3 4 +``` + +مثال قبلی همان است: + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. +1 2 +3 4 +``` + +حالا اجازه دهید خروجی کاربر حافظه برای هر کاربر یونیکس: + +``` bash +$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +``` + +``` text +Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. +┏━━━━━━━━━━┳━━━━━━━━━━┓ +┃ user ┃ memTotal ┃ +┡━━━━━━━━━━╇━━━━━━━━━━┩ +│ bayonet │ 113.5 │ +├──────────┼──────────┤ +│ root │ 8.8 │ +├──────────┼──────────┤ +... +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/fa/operations/utilities/index.md b/docs/fa/operations/utilities/index.md new file mode 100644 index 00000000000..a8f4c01c69c --- /dev/null +++ b/docs/fa/operations/utilities/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Utilities +toc_priority: 56 +toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC" +--- + +# ابزار کلیک {#clickhouse-utility} + +- [کلیک-محلی](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` این کار را می کند. +- [تاتر-کپی](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [کلیک-معیار](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[مقاله اصلی](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/fa/operations/utils/clickhouse-benchmark.md b/docs/fa/operations/utils/clickhouse-benchmark.md deleted file mode 100644 index 1d8ac3dec46..00000000000 --- a/docs/fa/operations/utils/clickhouse-benchmark.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -en_copy: true ---- - -# clickhouse-benchmark {#clickhouse-benchmark} - -Connects to a ClickHouse server and repeatedly sends specified queries. - -Syntax: - -``` bash -$ echo "single query" | clickhouse-benchmark [keys] -``` - -or - -``` bash -$ clickhouse-benchmark [keys] <<< "single query" -``` - -If you want to send a set of queries, create a text file and place each query on the individual string in this file. For example: - -``` sql -SELECT * FROM system.numbers LIMIT 10000000 -SELECT 1 -``` - -Then pass this file to a standard input of `clickhouse-benchmark`. - -``` bash -clickhouse-benchmark [keys] < queries_file -``` - -## Keys {#clickhouse-benchmark-keys} - -- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1. -- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. -- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys. -- `-p N`, `--port=N` — Server port. Default value: 9000. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-p` keys. -- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. -- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. -- `-s`, `--secure` — Using TLS connection. -- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` stops sending queries when the specified time limit is reached. Default value: 0 (time limit disabled). -- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) test to determine whether the two distributions aren’t different with the selected level of confidence. -- `--cumulative` — Printing cumulative data instead of data per interval. -- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file. -- `--user=USERNAME` — ClickHouse user name. Default value: `default`. -- `--password=PSWD` — ClickHouse user password. Default value: empty string. -- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions. -- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`. -- `--help` — Shows the help message. - -If you want to apply some [settings](../../operations/settings/index.md) for queries, pass them as a key `--= SETTING_VALUE`. For example, `--max_memory_usage=1048576`. - -## Output {#clickhouse-benchmark-output} - -By default, `clickhouse-benchmark` reports for each `--delay` interval. - -Example of the report: - -``` text -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. - -0.000% 0.145 sec. -10.000% 0.146 sec. -20.000% 0.146 sec. -30.000% 0.146 sec. -40.000% 0.147 sec. -50.000% 0.148 sec. -60.000% 0.148 sec. -70.000% 0.148 sec. -80.000% 0.149 sec. -90.000% 0.150 sec. -95.000% 0.150 sec. -99.000% 0.150 sec. -99.900% 0.150 sec. -99.990% 0.150 sec. -``` - -In the report you can find: - -- Number of queries in the `Queries executed:` field. - -- Status string containing (in order): - - - Endpoint of ClickHouse server. - - Number of processed queries. - - QPS: QPS: How many queries server performed per second during a period specified in the `--delay` argument. - - RPS: How many rows server read per second during a period specified in the `--delay` argument. - - MiB/s: How many mebibytes server read per second during a period specified in the `--delay` argument. - - result RPS: How many rows placed by server to the result of a query per second during a period specified in the `--delay` argument. - - result MiB/s. How many mebibytes placed by server to the result of a query per second during a period specified in the `--delay` argument. - -- Percentiles of queries execution time. - -## Comparison mode {#clickhouse-benchmark-comparison-mode} - -`clickhouse-benchmark` can compare performances for two running ClickHouse servers. - -To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately. - -## Example {#clickhouse-benchmark-example} - -``` bash -$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 -``` - -``` text -Loaded 1 queries. - -Queries executed: 6. - -localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.159 sec. -30.000% 0.160 sec. -40.000% 0.160 sec. -50.000% 0.162 sec. -60.000% 0.164 sec. -70.000% 0.165 sec. -80.000% 0.166 sec. -90.000% 0.166 sec. -95.000% 0.167 sec. -99.000% 0.167 sec. -99.900% 0.167 sec. -99.990% 0.167 sec. - - - -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.160 sec. -30.000% 0.163 sec. -40.000% 0.164 sec. -50.000% 0.165 sec. -60.000% 0.166 sec. -70.000% 0.166 sec. -80.000% 0.167 sec. -90.000% 0.167 sec. -95.000% 0.170 sec. -99.000% 0.172 sec. -99.900% 0.172 sec. -99.990% 0.172 sec. -``` diff --git a/docs/fa/operations/utils/clickhouse-local.md b/docs/fa/operations/utils/clickhouse-local.md deleted file mode 100644 index b75afe212b5..00000000000 --- a/docs/fa/operations/utils/clickhouse-local.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -en_copy: true ---- - -# clickhouse-local {#clickhouse-local} - -The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. - -Accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../query_language/index.md). - -`clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines. - -By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument. - -!!! warning "Warning" - It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. - -## Usage {#usage} - -Basic usage: - -``` bash -$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" -``` - -Arguments: - -- `-S`, `--structure` — table structure for input data. -- `-if`, `--input-format` — input format, `TSV` by default. -- `-f`, `--file` — path to data, `stdin` by default. -- `-q` `--query` — queries to execute with `;` as delimeter. -- `-N`, `--table` — table name where to put output data, `table` by default. -- `-of`, `--format`, `--output-format` — output format, `TSV` by default. -- `--stacktrace` — whether to dump debug output in case of exception. -- `--verbose` — more details on query execution. -- `-s` — disables `stderr` logging. -- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. -- `--help` — arguments references for `clickhouse-local`. - -Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`. - -## Examples {#examples} - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" -Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. -1 2 -3 4 -``` - -Previous example is the same as: - -``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" -Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. -1 2 -3 4 -``` - -Now let’s output memory user for each Unix user: - -``` bash -$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" -``` - -``` text -Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. -┏━━━━━━━━━━┳━━━━━━━━━━┓ -┃ user ┃ memTotal ┃ -┡━━━━━━━━━━╇━━━━━━━━━━┩ -│ bayonet │ 113.5 │ -├──────────┼──────────┤ -│ root │ 8.8 │ -├──────────┼──────────┤ -... -``` - -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/fa/operations/utils/index.md b/docs/fa/operations/utils/index.md deleted file mode 100644 index 62e4151ad28..00000000000 --- a/docs/fa/operations/utils/index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -en_copy: true ---- - -# ClickHouse Utility {#clickhouse-utility} - -- [clickhouse-local](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this. -- [clickhouse-copier](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. -- [clickhouse-benchmark](clickhouse-benchmark.md) — Loads server with the custom queries and settings. - -[Original article](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/fa/query_language/agg_functions/combinators.md b/docs/fa/query_language/agg_functions/combinators.md deleted file mode 100644 index a173e56fbea..00000000000 --- a/docs/fa/query_language/agg_functions/combinators.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -en_copy: true ---- - -# Aggregate function combinators {#aggregate_functions_combinators} - -The name of an aggregate function can have a suffix appended to it. This changes the way the aggregate function works. - -## -If {#agg-functions-combinator-if} - -The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). - -Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` and so on. - -With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, in Yandex.Metrica, conditional aggregate functions are used to implement the segment comparison functionality. - -## -Array {#agg-functions-combinator-array} - -The -Array suffix can be appended to any aggregate function. In this case, the aggregate function takes arguments of the ‘Array(T)’ type (arrays) instead of ‘T’ type arguments. If the aggregate function accepts multiple arguments, this must be arrays of equal lengths. When processing arrays, the aggregate function works like the original aggregate function across all array elements. - -Example 1: `sumArray(arr)` - Totals all the elements of all ‘arr’ arrays. In this example, it could have been written more simply: `sum(arraySum(arr))`. - -Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ arrays. This could be done an easier way: `uniq(arrayJoin(arr))`, but it’s not always possible to add ‘arrayJoin’ to a query. - --If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array. - -## -State {#agg-functions-combinator-state} - -If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](reference.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. - -To work with these states, use: - -- [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) table engine. -- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) function. -- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) function. -- [-Merge](#aggregate_functions_combinators_merge) combinator. -- [-MergeState](#aggregate_functions_combinators_mergestate) combinator. - -## -Merge {#aggregate_functions_combinators-merge} - -If you apply this combinator, the aggregate function takes the intermediate aggregation state as an argument, combines the states to finish aggregation, and returns the resulting value. - -## -MergeState {#aggregate_functions_combinators-mergestate} - -Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it doesn’t return the resulting value, but an intermediate aggregation state, similar to the -State combinator. - -## -ForEach {#agg-functions-combinator-foreach} - -Converts an aggregate function for tables into an aggregate function for arrays that aggregates the corresponding array items and returns an array of results. For example, `sumForEach` for the arrays `[1, 2]`, `[3, 4, 5]`and`[6, 7]`returns the result `[10, 13, 5]` after adding together the corresponding array items. - -## -OrDefault {#agg-functions-combinator-ordefault} - -Fills the default value of the aggregate function’s return type if there is nothing to aggregate. - -``` sql -SELECT avg(number), avgOrDefault(number) FROM numbers(0) -``` - -``` text -┌─avg(number)─┬─avgOrDefault(number)─┐ -│ nan │ 0 │ -└─────────────┴──────────────────────┘ -``` - -## -OrNull {#agg-functions-combinator-ornull} - -Fills `null` if there is nothing to aggregate. The return column will be nullable. - -``` sql -SELECT avg(number), avgOrNull(number) FROM numbers(0) -``` - -``` text -┌─avg(number)─┬─avgOrNull(number)─┐ -│ nan │ ᴺᵁᴸᴸ │ -└─────────────┴───────────────────┘ -``` - --OrDefault and -OrNull can be combined with other combinators. It is useful when the aggregate function does not accept the empty input. - -``` sql -SELECT avgOrNullIf(x, x > 10) -FROM -( - SELECT toDecimal32(1.23, 2) AS x -) -``` - -``` text -┌─avgOrNullIf(x, greater(x, 10))─┐ -│ ᴺᵁᴸᴸ │ -└────────────────────────────────┘ -``` - -## -Resample {#agg-functions-combinator-resample} - -Lets you divide data into groups, and then separately aggregates the data in those groups. Groups are created by splitting the values from one column into intervals. - -``` sql -Resample(start, end, step)(, resampling_key) -``` - -**Parameters** - -- `start` — Starting value of the whole required interval for `resampling_key` values. -- `stop` — Ending value of the whole required interval for `resampling_key` values. The whole interval doesn’t include the `stop` value `[start, stop)`. -- `step` — Step for separating the whole interval into subintervals. The `aggFunction` is executed over each of those subintervals independently. -- `resampling_key` — Column whose values are used for separating data into intervals. -- `aggFunction_params` — `aggFunction` parameters. - -**Returned values** - -- Array of `aggFunction` results for each subinterval. - -**Example** - -Consider the `people` table with the following data: - -``` text -┌─name───┬─age─┬─wage─┐ -│ John │ 16 │ 10 │ -│ Alice │ 30 │ 15 │ -│ Mary │ 35 │ 8 │ -│ Evelyn │ 48 │ 11.5 │ -│ David │ 62 │ 9.9 │ -│ Brian │ 60 │ 16 │ -└────────┴─────┴──────┘ -``` - -Let’s get the names of the people whose age lies in the intervals of `[30,60)` and `[60,75)`. Since we use integer representation for age, we get ages in the `[30, 59]` and `[60,74]` intervals. - -To aggregate names in an array, we use the [groupArray](reference.md#agg_function-grouparray) aggregate function. It takes one argument. In our case, it’s the `name` column. The `groupArrayResample` function should use the `age` column to aggregate names by age. To define the required intervals, we pass the `30, 75, 30` arguments into the `groupArrayResample` function. - -``` sql -SELECT groupArrayResample(30, 75, 30)(name, age) FROM people -``` - -``` text -┌─groupArrayResample(30, 75, 30)(name, age)─────┐ -│ [['Alice','Mary','Evelyn'],['David','Brian']] │ -└───────────────────────────────────────────────┘ -``` - -Consider the results. - -`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals. - -Now let’s count the total number of people and their average wage in the specified age intervals. - -``` sql -SELECT - countResample(30, 75, 30)(name, age) AS amount, - avgResample(30, 75, 30)(wage, age) AS avg_wage -FROM people -``` - -``` text -┌─amount─┬─avg_wage──────────────────┐ -│ [3,2] │ [11.5,12.949999809265137] │ -└────────┴───────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/fa/query_language/agg_functions/index.md b/docs/fa/query_language/agg_functions/index.md deleted file mode 100644 index c439ddb1e6a..00000000000 --- a/docs/fa/query_language/agg_functions/index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -en_copy: true ---- - -# Aggregate functions {#aggregate-functions} - -Aggregate functions work in the [normal](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) way as expected by database experts. - -ClickHouse also supports: - -- [Parametric aggregate functions](parametric_functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns. -- [Combinators](combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions. - -## NULL processing {#null-processing} - -During aggregation, all `NULL`s are skipped. - -**Examples:** - -Consider this table: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Let’s say you need to total the values in the `y` column: - -``` sql -SELECT sum(y) FROM t_null_big -``` - - ┌─sum(y)─┐ - │ 7 │ - └────────┘ - -The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`. - -Now you can use the `groupArray` function to create an array from the `y` column: - -``` sql -SELECT groupArray(y) FROM t_null_big -``` - -``` text -┌─groupArray(y)─┐ -│ [2,2,3] │ -└───────────────┘ -``` - -`groupArray` does not include `NULL` in the resulting array. - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/fa/query_language/agg_functions/parametric_functions.md b/docs/fa/query_language/agg_functions/parametric_functions.md deleted file mode 100644 index d4e29feff0e..00000000000 --- a/docs/fa/query_language/agg_functions/parametric_functions.md +++ /dev/null @@ -1,496 +0,0 @@ ---- -en_copy: true ---- - -# Parametric aggregate functions {#aggregate_functions_parametric} - -Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. - -## histogram {#histogram} - -Calculates an adaptive histogram. It doesn’t guarantee precise results. - -``` sql -histogram(number_of_bins)(values) -``` - -The functions uses [A Streaming Parallel Decision Tree Algorithm](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). The borders of histogram bins are adjusted as new data enters a function. In common case, the widths of bins are not equal. - -**Parameters** - -`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. -`values` — [Expression](../syntax.md#syntax-expressions) resulting in input values. - -**Returned values** - -- [Array](../../data_types/array.md) of [Tuples](../../data_types/tuple.md) of the following format: - - ``` - [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] - ``` - - - `lower` — Lower bound of the bin. - - `upper` — Upper bound of the bin. - - `height` — Calculated height of the bin. - -**Example** - -``` sql -SELECT histogram(5)(number + 1) -FROM ( - SELECT * - FROM system.numbers - LIMIT 20 -) -``` - -``` text -┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ -│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ -└─────────────────────────────────────────────────────────────────────────┘ -``` - -You can visualize a histogram with the [bar](../functions/other_functions.md#function-bar) function, for example: - -``` sql -WITH histogram(5)(rand() % 100) AS hist -SELECT - arrayJoin(hist).3 AS height, - bar(height, 0, 6, 5) AS bar -FROM -( - SELECT * - FROM system.numbers - LIMIT 20 -) -``` - -``` text -┌─height─┬─bar───┐ -│ 2.125 │ █▋ │ -│ 3.25 │ ██▌ │ -│ 5.625 │ ████▏ │ -│ 5.625 │ ████▏ │ -│ 3.375 │ ██▌ │ -└────────┴───────┘ -``` - -In this case, you should remember that you don’t know the histogram bin borders. - -## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} - -Checks whether the sequence contains an event chain that matches the pattern. - -``` sql -sequenceMatch(pattern)(timestamp, cond1, cond2, ...) -``` - -!!! warning "Warning" - Events that occur at the same second may lay in the sequence in an undefined order affecting the result. - -**Parameters** - -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). - -- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. - -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. - -**Returned values** - -- 1, if the pattern is matched. -- 0, if the pattern isn’t matched. - -Type: `UInt8`. - - -**Pattern syntax** - -- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. - -- `.*` — Matches any number of events. You don’t need conditional arguments to match this element of the pattern. - -- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=` operators. - -**Examples** - -Consider data in the `t` table: - -``` text -┌─time─┬─number─┐ -│ 1 │ 1 │ -│ 2 │ 3 │ -│ 3 │ 2 │ -└──────┴────────┘ -``` - -Perform the query: - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ -│ 1 │ -└───────────────────────────────────────────────────────────────────────┘ -``` - -The function found the event chain where number 2 follows number 1. It skipped number 3 between them, because the number is not described as an event. If we want to take this number into account when searching for the event chain given in the example, we should make a condition for it. - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ -│ 0 │ -└──────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -In this case, the function couldn’t find the event chain matching the pattern, because the event for number 3 occured between 1 and 2. If in the same case we checked the condition for number 4, the sequence would match the pattern. - -``` sql -SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t -``` - -``` text -┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ -│ 1 │ -└──────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -**See Also** - -- [sequenceCount](#function-sequencecount) - -## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} - -Counts the number of event chains that matched the pattern. The function searches event chains that don’t overlap. It starts to search for the next chain after the current chain is matched. - -!!! warning "Warning" - Events that occur at the same second may lay in the sequence in an undefined order affecting the result. - -``` sql -sequenceCount(pattern)(timestamp, cond1, cond2, ...) -``` - -**Parameters** - -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). - -- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. - -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. - -**Returned values** - -- Number of non-overlapping event chains that are matched. - -Type: `UInt64`. - -**Example** - -Consider data in the `t` table: - -``` text -┌─time─┬─number─┐ -│ 1 │ 1 │ -│ 2 │ 3 │ -│ 3 │ 2 │ -│ 4 │ 1 │ -│ 5 │ 3 │ -│ 6 │ 2 │ -└──────┴────────┘ -``` - -Count how many times the number 2 occurs after the number 1 with any amount of other numbers between them: - -``` sql -SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t -``` - -``` text -┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ -│ 2 │ -└─────────────────────────────────────────────────────────────────────────┘ -``` - -**See Also** - -- [sequenceMatch](#function-sequencematch) - -## windowFunnel {#windowfunnel} - -Searches for event chains in a sliding time window and calculates the maximum number of events that occurred from the chain. - -The function works according to the algorithm: - -- The function searches for data that triggers the first condition in the chain and sets the event counter to 1. This is the moment when the sliding window starts. - -- If events from the chain occur sequentially within the window, the counter is incremented. If the sequence of events is disrupted, the counter isn’t incremented. - -- If the data has multiple event chains at varying points of completion, the function will only output the size of the longest chain. - -**Syntax** - -``` sql -windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) -``` - -**Parameters** - -- `window` — Length of the sliding window in seconds. -- `mode` - It is an optional argument. - - `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. -- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md#data_type-datetime) and other unsigned integer types (note that even though timestamp supports the `UInt64` type, it’s value can’t exceed the Int64 maximum, which is 2^63 - 1). -- `cond` — Conditions or data describing the chain of events. [UInt8](../../data_types/int_uint.md). - -**Returned value** - -The maximum number of consecutive triggered conditions from the chain within the sliding time window. -All the chains in the selection are analyzed. - -Type: `Integer`. - -**Example** - -Determine if a set period of time is enough for the user to select a phone and purchase it twice in the online store. - -Set the following chain of events: - -1. The user logged in to their account on the store (`eventID = 1003`). -2. The user searches for a phone (`eventID = 1007, product = 'phone'`). -3. The user placed an order (`eventID = 1009`). -4. The user made the order again (`eventID = 1010`). - -Input table: - -``` text -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ -│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ -└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ -``` - -Find out how far the user `user_id` could get through the chain in a period in January-February of 2019. - -Query: - -``` sql -SELECT - level, - count() AS c -FROM -( - SELECT - user_id, - windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level - FROM trend - WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') - GROUP BY user_id -) -GROUP BY level -ORDER BY level ASC -``` - -Result: - -``` text -┌─level─┬─c─┐ -│ 4 │ 1 │ -└───────┴───┘ -``` - -## retention {#retention} - -The function takes as arguments a set of conditions from 1 to 32 arguments of type `UInt8` that indicate whether a certain condition was met for the event. -Any condition can be specified as an argument (as in [WHERE](../../query_language/select.md#select-where)). - -The conditions, except the first, apply in pairs: the result of the second will be true if the first and second are true, of the third if the first and fird are true, etc. - -**Syntax** - -``` sql -retention(cond1, cond2, ..., cond32); -``` - -**Parameters** - -- `cond` — an expression that returns a `UInt8` result (1 or 0). - -**Returned value** - -The array of 1 or 0. - -- 1 — condition was met for the event. -- 0 — condition wasn’t met for the event. - -Type: `UInt8`. - -**Example** - -Let’s consider an example of calculating the `retention` function to determine site traffic. - -**1.** Сreate a table to illustrate an example. - -``` sql -CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; - -INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); -INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); -INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); -``` - -Input table: - -Query: - -``` sql -SELECT * FROM retention_test -``` - -Result: - -``` text -┌───────date─┬─uid─┐ -│ 2020-01-01 │ 0 │ -│ 2020-01-01 │ 1 │ -│ 2020-01-01 │ 2 │ -│ 2020-01-01 │ 3 │ -│ 2020-01-01 │ 4 │ -└────────────┴─────┘ -┌───────date─┬─uid─┐ -│ 2020-01-02 │ 0 │ -│ 2020-01-02 │ 1 │ -│ 2020-01-02 │ 2 │ -│ 2020-01-02 │ 3 │ -│ 2020-01-02 │ 4 │ -│ 2020-01-02 │ 5 │ -│ 2020-01-02 │ 6 │ -│ 2020-01-02 │ 7 │ -│ 2020-01-02 │ 8 │ -│ 2020-01-02 │ 9 │ -└────────────┴─────┘ -┌───────date─┬─uid─┐ -│ 2020-01-03 │ 0 │ -│ 2020-01-03 │ 1 │ -│ 2020-01-03 │ 2 │ -│ 2020-01-03 │ 3 │ -│ 2020-01-03 │ 4 │ -│ 2020-01-03 │ 5 │ -│ 2020-01-03 │ 6 │ -│ 2020-01-03 │ 7 │ -│ 2020-01-03 │ 8 │ -│ 2020-01-03 │ 9 │ -│ 2020-01-03 │ 10 │ -│ 2020-01-03 │ 11 │ -│ 2020-01-03 │ 12 │ -│ 2020-01-03 │ 13 │ -│ 2020-01-03 │ 14 │ -└────────────┴─────┘ -``` - -**2.** Group users by unique ID `uid` using the `retention` function. - -Query: - -``` sql -SELECT - uid, - retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r -FROM retention_test -WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') -GROUP BY uid -ORDER BY uid ASC -``` - -Result: - -``` text -┌─uid─┬─r───────┐ -│ 0 │ [1,1,1] │ -│ 1 │ [1,1,1] │ -│ 2 │ [1,1,1] │ -│ 3 │ [1,1,1] │ -│ 4 │ [1,1,1] │ -│ 5 │ [0,0,0] │ -│ 6 │ [0,0,0] │ -│ 7 │ [0,0,0] │ -│ 8 │ [0,0,0] │ -│ 9 │ [0,0,0] │ -│ 10 │ [0,0,0] │ -│ 11 │ [0,0,0] │ -│ 12 │ [0,0,0] │ -│ 13 │ [0,0,0] │ -│ 14 │ [0,0,0] │ -└─────┴─────────┘ -``` - -**3.** Calculate the total number of site visits per day. - -Query: - -``` sql -SELECT - sum(r[1]) AS r1, - sum(r[2]) AS r2, - sum(r[3]) AS r3 -FROM -( - SELECT - uid, - retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r - FROM retention_test - WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') - GROUP BY uid -) -``` - -Result: - -``` text -┌─r1─┬─r2─┬─r3─┐ -│ 5 │ 5 │ 5 │ -└────┴────┴────┘ -``` - -Where: - -- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition). -- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions). -- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions). - -## uniqUpTo(N)(x) {#uniquptonx} - -Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. - -Recommended for use with small Ns, up to 10. The maximum value of N is 100. - -For the state of an aggregate function, it uses the amount of memory equal to 1 + N \* the size of one value of bytes. -For strings, it stores a non-cryptographic hash of 8 bytes. That is, the calculation is approximated for strings. - -The function also works for several arguments. - -It works as fast as possible, except for cases when a large N value is used and the number of unique values is slightly less than N. - -Usage example: - -``` text -Problem: Generate a report that shows only keywords that produced at least 5 unique users. -Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) - -## sumMapFiltered(keys\_to\_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values} - -Same behavior as [sumMap](reference.md#agg_functions-summap) except that an array of keys is passed as a parameter. This can be especially useful when working with a high cardinality of keys. diff --git a/docs/fa/query_language/agg_functions/reference.md b/docs/fa/query_language/agg_functions/reference.md deleted file mode 100644 index 31de8bf1226..00000000000 --- a/docs/fa/query_language/agg_functions/reference.md +++ /dev/null @@ -1,1834 +0,0 @@ ---- -en_copy: true ---- - -# Function Reference {#function-reference} - -## count {#agg_function-count} - -Counts the number of rows or not-NULL values. - -ClickHouse supports the following syntaxes for `count`: -- `count(expr)` or `COUNT(DISTINCT expr)`. -- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific. - -**Parameters** - -The function can take: - -- Zero parameters. -- One [expression](../syntax.md#syntax-expressions). - -**Returned value** - -- If the function is called without parameters it counts the number of rows. -- If the [expression](../syntax.md#syntax-expressions) is passed, then the function counts how many times this expression returned not null. If the expression returns a [Nullable](../../data_types/nullable.md)-type value, then the result of `count` stays not `Nullable`. The function returns 0 if the expression returned `NULL` for all the rows. - -In both cases the type of the returned value is [UInt64](../../data_types/int_uint.md). - -**Details** - -ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](#agg_function-uniqexact) function. - -The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. - -**Examples** - -Example 1: - -``` sql -SELECT count() FROM t -``` - -``` text -┌─count()─┐ -│ 5 │ -└─────────┘ -``` - -Example 2: - -``` sql -SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' -``` - -``` text -┌─name──────────────────────────┬─value─────┐ -│ count_distinct_implementation │ uniqExact │ -└───────────────────────────────┴───────────┘ -``` - -``` sql -SELECT count(DISTINCT num) FROM t -``` - -``` text -┌─uniqExact(num)─┐ -│ 3 │ -└────────────────┘ -``` - -This example shows that `count(DISTINCT num)` is performed by the `uniqExact` function according to the `count_distinct_implementation` setting value. - -## any(x) {#agg_function-any} - -Selects the first encountered value. -The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate. -To get a determinate result, you can use the ‘min’ or ‘max’ function instead of ‘any’. - -In some cases, you can rely on the order of execution. This applies to cases when SELECT comes from a subquery that uses ORDER BY. - -When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function. - -## anyHeavy(x) {#anyheavyx} - -Selects a frequently occurring value using the [heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) algorithm. If there is a value that occurs more than in half the cases in each of the query’s execution threads, this value is returned. Normally, the result is nondeterministic. - -``` sql -anyHeavy(column) -``` - -**Arguments** - -- `column` – The column name. - -**Example** - -Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select any frequently occurring value in the `AirlineID` column. - -``` sql -SELECT anyHeavy(AirlineID) AS res -FROM ontime -``` - -``` text -┌───res─┐ -│ 19690 │ -└───────┘ -``` - -## anyLast(x) {#anylastx} - -Selects the last value encountered. -The result is just as indeterminate as for the `any` function. - -## groupBitAnd {#groupbitand} - -Applies bitwise `AND` for series of numbers. - -``` sql -groupBitAnd(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitAnd(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -00000100 = 4 -``` - -## groupBitOr {#groupbitor} - -Applies bitwise `OR` for series of numbers. - -``` sql -groupBitOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitOr(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -01111101 = 125 -``` - -## groupBitXor {#groupbitxor} - -Applies bitwise `XOR` for series of numbers. - -``` sql -groupBitXor(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitXor(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -01101000 = 104 -``` - -## groupBitmap {#groupbitmap} - -Bitmap or Aggregate calculations from a unsigned integer column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmap(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -Test data: - -``` text -UserID -1 -1 -2 -3 -``` - -Query: - -``` sql -SELECT groupBitmap(UserID) as num FROM t -``` - -Result: - -``` text -num -3 -``` - -## min(x) {#agg_function-min} - -Calculates the minimum. - -## max(x) {#agg_function-max} - -Calculates the maximum. - -## argMin(arg, val) {#agg-function-argmin} - -Calculates the ‘arg’ value for a minimal ‘val’ value. If there are several different values of ‘arg’ for minimal values of ‘val’, the first of these values encountered is output. - -**Example:** - -``` text -┌─user─────┬─salary─┐ -│ director │ 5000 │ -│ manager │ 3000 │ -│ worker │ 1000 │ -└──────────┴────────┘ -``` - -``` sql -SELECT argMin(user, salary) FROM salary -``` - -``` text -┌─argMin(user, salary)─┐ -│ worker │ -└──────────────────────┘ -``` - -## argMax(arg, val) {#agg-function-argmax} - -Calculates the ‘arg’ value for a maximum ‘val’ value. If there are several different values of ‘arg’ for maximum values of ‘val’, the first of these values encountered is output. - -## sum(x) {#agg_function-sum} - -Calculates the sum. -Only works for numbers. - -## sumWithOverflow(x) {#sumwithoverflowx} - -Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, the function returns an error. - -Only works for numbers. - -## sumMap(key, value) {#agg_functions-summap} - -Totals the ‘value’ array according to the keys specified in the ‘key’ array. -The number of elements in ‘key’ and ‘value’ must be the same for each row that is totaled. -Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. - -Example: - -``` sql -CREATE TABLE sum_map( - date Date, - timeslot DateTime, - statusMap Nested( - status UInt16, - requests UInt64 - ) -) ENGINE = Log; -INSERT INTO sum_map VALUES - ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); -SELECT - timeslot, - sumMap(statusMap.status, statusMap.requests) -FROM sum_map -GROUP BY timeslot -``` - -``` text -┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ -│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ -│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ -└─────────────────────┴──────────────────────────────────────────────┘ -``` - -## skewPop {#skewpop} - -Computes the [skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. - -``` sql -skewPop(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The skewness of the given distribution. Type — [Float64](../../data_types/float.md) - -**Example** - -``` sql -SELECT skewPop(value) FROM series_with_value_column -``` - -## skewSamp {#skewsamp} - -Computes the [sample skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. - -It represents an unbiased estimate of the skewness of a random variable if passed values form its sample. - -``` sql -skewSamp(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The skewness of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is the size of the sample), then the function returns `nan`. - -**Example** - -``` sql -SELECT skewSamp(value) FROM series_with_value_column -``` - -## kurtPop {#kurtpop} - -Computes the [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. - -``` sql -kurtPop(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md) - -**Example** - -``` sql -SELECT kurtPop(value) FROM series_with_value_column -``` - -## kurtSamp {#kurtsamp} - -Computes the [sample kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. - -It represents an unbiased estimate of the kurtosis of a random variable if passed values form its sample. - -``` sql -kurtSamp(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is a size of the sample), then the function returns `nan`. - -**Example** - -``` sql -SELECT kurtSamp(value) FROM series_with_value_column -``` - -## timeSeriesGroupSum(uid, timestamp, value) {#agg-function-timeseriesgroupsum} - -`timeSeriesGroupSum` can aggregate different time series that sample timestamp not alignment. -It will use linear interpolation between two sample timestamp and then sum time-series together. - -- `uid` is the time series unique id, `UInt64`. -- `timestamp` is Int64 type in order to support millisecond or microsecond. -- `value` is the metric. - -The function returns array of tuples with `(timestamp, aggregated_value)` pairs. - -Before using this function make sure `timestamp` is in ascending order. - -Example: - -``` text -┌─uid─┬─timestamp─┬─value─┐ -│ 1 │ 2 │ 0.2 │ -│ 1 │ 7 │ 0.7 │ -│ 1 │ 12 │ 1.2 │ -│ 1 │ 17 │ 1.7 │ -│ 1 │ 25 │ 2.5 │ -│ 2 │ 3 │ 0.6 │ -│ 2 │ 8 │ 1.6 │ -│ 2 │ 12 │ 2.4 │ -│ 2 │ 18 │ 3.6 │ -│ 2 │ 24 │ 4.8 │ -└─────┴───────────┴───────┘ -``` - -``` sql -CREATE TABLE time_series( - uid UInt64, - timestamp Int64, - value Float64 -) ENGINE = Memory; -INSERT INTO time_series VALUES - (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), - (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); - -SELECT timeSeriesGroupSum(uid, timestamp, value) -FROM ( - SELECT * FROM time_series order by timestamp ASC -); -``` - -And the result will be: - -``` text -[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] -``` - -## timeSeriesGroupRateSum(uid, ts, val) {#agg-function-timeseriesgroupratesum} - -Similarly timeSeriesGroupRateSum, timeSeriesGroupRateSum will Calculate the rate of time-series and then sum rates together. -Also, timestamp should be in ascend order before use this function. - -Use this function, the result above case will be: - -``` text -[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] -``` - -## avg(x) {#agg_function-avg} - -Calculates the average. -Only works for numbers. -The result is always Float64. - -## uniq {#agg_function-uniq} - -Calculates the approximate number of different values of the argument. - -``` sql -uniq(x[, ...]) -``` - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**Returned value** - -- A [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash for all parameters in the aggregate, then uses it in calculations. - -- Uses an adaptive sampling algorithm. For the calculation state, the function uses a sample of element hash values up to 65536. - - This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. - -- Provides the result deterministically (it doesn’t depend on the query processing order). - -We recommend using this function in almost all scenarios. - -**See Also** - -- [uniqCombined](#agg_function-uniqcombined) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined {#agg_function-uniqcombined} - -Calculates the approximate number of different argument values. - -``` sql -uniqCombined(HLL_precision)(x[, ...]) -``` - -The `uniqCombined` function is a good choice for calculating the number of different values. - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -`HLL_precision` is the base-2 logarithm of the number of cells in [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Optional, you can use the function as `uniqCombined(x[, ...])`. The default value for `HLL_precision` is 17, which is effectively 96 KiB of space (2^17 cells, 6 bits each). - -**Returned value** - -- A number [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash (64-bit hash for `String` and 32-bit otherwise) for all parameters in the aggregate, then uses it in calculations. - -- Uses a combination of three algorithms: array, hash table, and HyperLogLog with an error correction table. - - For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. - -- Provides the result deterministically (it doesn’t depend on the query processing order). - -!!! note "Note" - Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](#agg_function-uniqcombined64) - -Compared to the [uniq](#agg_function-uniq) function, the `uniqCombined`: - -- Consumes several times less memory. -- Calculates with several times higher accuracy. -- Usually has slightly lower performance. In some scenarios, `uniqCombined` can perform better than `uniq`, for example, with distributed queries that transmit a large number of aggregation states over the network. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined64 {#agg_function-uniqcombined64} - -Same as [uniqCombined](#agg_function-uniqcombined), but uses 64-bit hash for all data types. - -## uniqHLL12 {#agg_function-uniqhll12} - -Calculates the approximate number of different argument values, using the [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algorithm. - -``` sql -uniqHLL12(x[, ...]) -``` - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**Returned value** - -- A [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash for all parameters in the aggregate, then uses it in calculations. - -- Uses the HyperLogLog algorithm to approximate the number of different argument values. - - 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). - -- Provides the determinate result (it doesn’t depend on the query processing order). - -We don’t recommend using this function. In most cases, use the [uniq](#agg_function-uniq) or [uniqCombined](#agg_function-uniqcombined) function. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqExact](#agg_function-uniqexact) - -## uniqExact {#agg_function-uniqexact} - -Calculates the exact number of different argument values. - -``` sql -uniqExact(x[, ...]) -``` - -Use the `uniqExact` function if you absolutely need an exact result. Otherwise use the [uniq](#agg_function-uniq) function. - -The `uniqExact` function uses more memory than `uniq`, because the size of the state has unbounded growth as the number of different values increases. - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqHLL12](#agg_function-uniqhll12) - -## groupArray(x), groupArray(max\_size)(x) {#agg_function-grouparray} - -Creates an array of argument values. -Values can be added to the array in any (indeterminate) order. - -The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. -For example, `groupArray (1) (x)` is equivalent to `[any (x)]`. - -In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`. - -## groupArrayInsertAt(value, position) {#grouparrayinsertatvalue-position} - -Inserts a value into the array in the specified position. - -!!! note "Note" - This function uses zero-based positions, contrary to the conventional one-based positions for SQL arrays. - -Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. - -Optional parameters: - -- The default value for substituting in empty positions. -- The length of the resulting array. This allows you to receive arrays of the same size for all the aggregate keys. When using this parameter, the default value must be specified. - -## groupArrayMovingSum {#agg_function-grouparraymovingsum} - -Calculates the moving sum of input values. - -``` sql -groupArrayMovingSum(numbers_for_summing) -groupArrayMovingSum(window_size)(numbers_for_summing) -``` - -The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. - -**Parameters** - -- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. -- `window_size` — Size of the calculation window. - -**Returned values** - -- Array of the same size and type as the input data. - -**Example** - -The sample table: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -The queries: - -``` sql -SELECT - groupArrayMovingSum(int) AS I, - groupArrayMovingSum(float) AS F, - groupArrayMovingSum(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingSum(2)(int) AS I, - groupArrayMovingSum(2)(float) AS F, - groupArrayMovingSum(2)(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -## groupArrayMovingAvg {#agg_function-grouparraymovingavg} - -Calculates the moving average of input values. - -``` sql -groupArrayMovingAvg(numbers_for_summing) -groupArrayMovingAvg(window_size)(numbers_for_summing) -``` - -The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. - -**Parameters** - -- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. -- `window_size` — Size of the calculation window. - -**Returned values** - -- Array of the same size and type as the input data. - -The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). It truncates the decimal places insignificant for the resulting data type. - -**Example** - -The sample table `b`: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -The queries: - -``` sql -SELECT - groupArrayMovingAvg(int) AS I, - groupArrayMovingAvg(float) AS F, - groupArrayMovingAvg(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ -│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ -└───────────┴─────────────────────────────────────┴───────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingAvg(2)(int) AS I, - groupArrayMovingAvg(2)(float) AS F, - groupArrayMovingAvg(2)(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ -│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ -└───────────┴──────────────────────────────────┴───────────────────────┘ -``` - -## groupUniqArray(x), groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} - -Creates an array from different argument values. Memory consumption is the same as for the `uniqExact` function. - -The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. -For example, `groupUniqArray(1)(x)` is equivalent to `[any(x)]`. - -## quantile {#quantile} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and a random number generator for sampling. The result is non-deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantile(level)(expr) -``` - -Alias: `median`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT quantile(val) FROM t -``` - -Result: - -``` text -┌─quantile(val)─┐ -│ 1.5 │ -└───────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileDeterministic {#quantiledeterministic} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and deterministic algorithm of sampling. The result is deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileDeterministic(level)(expr, determinator) -``` - -Alias: `medianDeterministic`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT quantileDeterministic(val, 1) FROM t -``` - -Result: - -``` text -┌─quantileDeterministic(val, 1)─┐ -│ 1.5 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExact {#quantileexact} - -Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileExact(level)(expr) -``` - -Alias: `medianExact`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileExact(number) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileExact(number)─┐ -│ 5 │ -└───────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExactWeighted {#quantileexactweighted} - -Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence, taking into account the weight of each element. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). You can use this function instead of `quantileExact` and specify the weight 1. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileExactWeighted(level)(expr, weight) -``` - -Alias: `medianExactWeighted`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. - -**Returned value** - -- Quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─n─┬─val─┐ -│ 0 │ 3 │ -│ 1 │ 2 │ -│ 2 │ 1 │ -│ 5 │ 4 │ -└───┴─────┘ -``` - -Query: - -``` sql -SELECT quantileExactWeighted(n, val) FROM t -``` - -Result: - -``` text -┌─quantileExactWeighted(n, val)─┐ -│ 1 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTiming {#quantiletiming} - -With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTiming(level)(expr) -``` - -Alias: `medianTiming`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -**Accuracy** - -The calculation is accurate if: - -- Total number of values doesn’t exceed 5670. -- Total number of values exceeds 5670, but the page loading time is less than 1024ms. - -Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. - -!!! note "Note" - For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). - -**Returned value** - -- Quantile of the specified level. - -Type: `Float32`. - -!!! note "Note" - If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. - -**Example** - -Input table: - -``` text -┌─response_time─┐ -│ 72 │ -│ 112 │ -│ 126 │ -│ 145 │ -│ 104 │ -│ 242 │ -│ 313 │ -│ 168 │ -│ 108 │ -└───────────────┘ -``` - -Query: - -``` sql -SELECT quantileTiming(response_time) FROM t -``` - -Result: - -``` text -┌─quantileTiming(response_time)─┐ -│ 126 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTimingWeighted {#quantiletimingweighted} - -With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence according to the weight of each sequence member. - -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTimingWeighted(level)(expr, weight) -``` - -Alias: `medianTimingWeighted`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Accuracy** - -The calculation is accurate if: - -- Total number of values doesn’t exceed 5670. -- Total number of values exceeds 5670, but the page loading time is less than 1024ms. - -Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. - -!!! note "Note" - For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). - -**Returned value** - -- Quantile of the specified level. - -Type: `Float32`. - -!!! note "Note" - If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. - -**Example** - -Input table: - -``` text -┌─response_time─┬─weight─┐ -│ 68 │ 1 │ -│ 104 │ 2 │ -│ 112 │ 3 │ -│ 126 │ 2 │ -│ 138 │ 1 │ -│ 162 │ 1 │ -└───────────────┴────────┘ -``` - -Query: - -``` sql -SELECT quantileTimingWeighted(response_time, weight) FROM t -``` - -Result: - -``` text -┌─quantileTimingWeighted(response_time, weight)─┐ -│ 112 │ -└───────────────────────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigest {#quantiletdigest} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. - -The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. - -The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTDigest(level)(expr) -``` - -Alias: `medianTDigest`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileTDigest(number) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileTDigest(number)─┐ -│ 4.5 │ -└─────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigestWeighted {#quantiletdigestweighted} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. The function takes into account the weight of each sequence member. The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. - -The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. - -The result depends on the order of running the query, and is nondeterministic. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTDigest(level)(expr) -``` - -Alias: `medianTDigest`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileTDigestWeighted(number, 1)─┐ -│ 4.5 │ -└────────────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## median {#median} - -The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample. - -Functions: - -- `median` — Alias for [quantile](#quantile). -- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT medianDeterministic(val, 1) FROM t -``` - -Result: - -``` text -┌─medianDeterministic(val, 1)─┐ -│ 1.5 │ -└─────────────────────────────┘ -``` - -## quantiles(level1, level2, …)(x) {#quantiles} - -All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. - -## varSamp(x) {#varsampx} - -Calculates the amount `Σ((x - x̅)^2) / (n - 1)`, where `n` is the sample size and `x̅`is the average value of `x`. - -It represents an unbiased estimate of the variance of a random variable if passed values form its sample. - -Returns `Float64`. When `n <= 1`, returns `+∞`. - -## varPop(x) {#varpopx} - -Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. - -In other words, dispersion for a set of values. Returns `Float64`. - -## stddevSamp(x) {#stddevsampx} - -The result is equal to the square root of `varSamp(x)`. - -## stddevPop(x) {#stddevpopx} - -The result is equal to the square root of `varPop(x)`. - -## topK(N)(x) {#topknx} - -Returns an array of the approximately most frequent values in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). - -Implements the [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) algorithm for analyzing TopK, based on the reduce-and-combine algorithm from [Parallel Space Saving](https://arxiv.org/pdf/1401.0702.pdf). - -``` sql -topK(N)(column) -``` - -This function doesn’t provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. - -We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. - -**Parameters** - -- ‘N’ is the number of elements to return. - -If the parameter is omitted, default value 10 is used. - -**Arguments** - -- ’ x ’ – The value to calculate frequency. - -**Example** - -Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select the three most frequently occurring values in the `AirlineID` column. - -``` sql -SELECT topK(3)(AirlineID) AS res -FROM ontime -``` - -``` text -┌─res─────────────────┐ -│ [19393,19790,19805] │ -└─────────────────────┘ -``` - -## topKWeighted {#topkweighted} - -Similar to `topK` but takes one additional argument of integer type - `weight`. Every value is accounted `weight` times for frequency calculation. - -**Syntax** - -``` sql -topKWeighted(N)(x, weight) -``` - -**Parameters** - -- `N` — The number of elements to return. - -**Arguments** - -- `x` – The value. -- `weight` — The weight. [UInt8](../../data_types/int_uint.md). - -**Returned value** - -Returns an array of the values with maximum approximate sum of weights. - -**Example** - -Query: - -``` sql -SELECT topKWeighted(10)(number, number) FROM numbers(1000) -``` - -Result: - -``` text -┌─topKWeighted(10)(number, number)──────────┐ -│ [999,998,997,996,995,994,993,992,991,990] │ -└───────────────────────────────────────────┘ -``` - -## covarSamp(x, y) {#covarsampx-y} - -Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`. - -Returns Float64. When `n <= 1`, returns +∞. - -## covarPop(x, y) {#covarpopx-y} - -Calculates the value of `Σ((x - x̅)(y - y̅)) / n`. - -## corr(x, y) {#corrx-y} - -Calculates the Pearson correlation coefficient: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. - -## categoricalInformationValue {#categoricalinformationvalue} - -Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category. - -``` sql -categoricalInformationValue(category1, category2, ..., tag) -``` - -The result indicates how a discrete (categorical) feature `[category1, category2, ...]` contribute to a learning model which predicting the value of `tag`. - -## simpleLinearRegression {#simplelinearregression} - -Performs simple (unidimensional) linear regression. - -``` sql -simpleLinearRegression(x, y) -``` - -Parameters: - -- `x` — Column with dependent variable values. -- `y` — Column with explanatory variable values. - -Returned values: - -Constants `(a, b)` of the resulting line `y = a*x + b`. - -**Examples** - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ -│ (1,0) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ -│ (1,3) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## stochasticLinearRegression {#agg_functions-stochasticlinearregression} - -This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). - -### Parameters {#agg_functions-stochasticlinearregression-parameters} - -There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning. - -``` text -stochasticLinearRegression(1.0, 1.0, 10, 'SGD') -``` - -1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`. -2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`. -3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`. -4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. - -### Usage {#agg_functions-stochasticlinearregression-usage} - -`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc). -To predict we use function [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on. - - - -**1.** Fitting - -Such query may be used. - -``` sql -CREATE TABLE IF NOT EXISTS train_data -( - param1 Float64, - param2 Float64, - target Float64 -) ENGINE = Memory; - -CREATE TABLE your_model ENGINE = Memory AS SELECT -stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) -AS state FROM train_data; -``` - -Here we also need to insert data into `train_data` table. The number of parameters is not fixed, it depends only on number of arguments, passed into `linearRegressionState`. They all must be numeric values. -Note that the column with target value(which we would like to learn to predict) is inserted as the first argument. - -**2.** Predicting - -After saving a state into the table, we may use it multiple times for prediction, or even merge with other states and create new even better models. - -``` sql -WITH (SELECT state FROM your_model) AS model SELECT -evalMLMethod(model, param1, param2) FROM test_data -``` - -The query will return a column of predicted values. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. - -`test_data` is a table like `train_data` but may not contain target value. - -### Notes {#agg_functions-stochasticlinearregression-notes} - -1. To merge two models user may create such query: - `sql SELECT state1 + state2 FROM your_models` - where `your_models` table contains both models. This query will return new `AggregateFunctionState` object. - -2. User may fetch weights of the created model for its own purposes without saving the model if no `-State` combinator is used. - `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` - Such query will fit the model and return its weights - first are weights, which correspond to the parameters of the model, the last one is bias. So in the example above the query will return a column with 3 values. - -**See Also** - -- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) -- [Difference between linear and logistic regressions](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} - -This function implements stochastic logistic regression. It can be used for binary classification problem, supports the same custom parameters as stochasticLinearRegression and works the same way. - -### Parameters {#agg_functions-stochasticlogisticregression-parameters} - -Parameters are exactly the same as in stochasticLinearRegression: -`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. -For more information see [parameters](#agg_functions-stochasticlinearregression-parameters). - -``` text -stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') -``` - -1. Fitting - - - - See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. - - Predicted labels have to be in \[-1, 1\]. - -1. Predicting - - - - Using saved state we can predict probability of object having label `1`. - - ``` sql - WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) FROM test_data - ``` - - The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. - - We can also set a bound of probability, which assigns elements to different labels. - - ``` sql - SELECT ans < 1.1 AND ans > 0.5 FROM - (WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) AS ans FROM test_data) - ``` - - Then the result will be labels. - - `test_data` is a table like `train_data` but may not contain target value. - -**See Also** - -- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) -- [Difference between linear and logistic regressions.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## groupBitmapAnd {#groupbitmapand} - -Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmapAnd(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapAnd(z)─┐ -│ 3 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ -│ [6,8,10] │ -└──────────────────────────────────────────────────┘ -``` - -## groupBitmapOr {#groupbitmapor} - -Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). This is equivalent to `groupBitmapMerge`. - -``` sql -groupBitmapOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapOr(z)─┐ -│ 15 │ -└──────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ -│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ -└─────────────────────────────────────────────────┘ -``` - -## groupBitmapXor {#groupbitmapxor} - -Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmapOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapXor(z)─┐ -│ 10 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ -│ [1,3,5,6,8,10,11,13,14,15] │ -└──────────────────────────────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/fa/query_language/alter.md b/docs/fa/query_language/alter.md deleted file mode 100644 index a2b05037315..00000000000 --- a/docs/fa/query_language/alter.md +++ /dev/null @@ -1,502 +0,0 @@ ---- -en_copy: true ---- - -## ALTER {#query_language_queries_alter} - -The `ALTER` query is only supported for `*MergeTree` tables, as well as `Merge`and`Distributed`. The query has several variations. - -### Column Manipulations {#column-manipulations} - -Changing the table structure. - -``` sql -ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... -``` - -In the query, specify a list of one or more comma-separated actions. -Each action is an operation on a column. - -The following actions are supported: - -- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. -- [DROP COLUMN](#alter_drop-column) — Deletes the column. -- [CLEAR COLUMN](#alter_clear-column) — Resets column values. -- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. -- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL. - -These actions are described in detail below. - -#### ADD COLUMN {#alter_add-column} - -``` sql -ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] -``` - -Adds a new column to the table with the specified `name`, `type`, [`codec`](create.md#codecs) and `default_expr` (see the section [Default expressions](create.md#create-default-values)). - -If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. Otherwise, the column is added to the end of the table. Note that there is no way to add a column to the beginning of a table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. - -Adding a column just changes the table structure, without performing any actions with data. The data doesn’t appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../operations/table_engines/mergetree.md)). - -This approach allows us to complete the `ALTER` query instantly, without increasing the volume of old data. - -Example: - -``` sql -ALTER TABLE visits ADD COLUMN browser String AFTER user_id -``` - -#### DROP COLUMN {#alter_drop-column} - -``` sql -DROP COLUMN [IF EXISTS] name -``` - -Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. - -Example: - -``` sql -ALTER TABLE visits DROP COLUMN browser -``` - -#### CLEAR COLUMN {#alter_clear-column} - -``` sql -CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name -``` - -Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Example: - -``` sql -ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() -``` - -#### COMMENT COLUMN {#alter_comment-column} - -``` sql -COMMENT COLUMN [IF EXISTS] name 'comment' -``` - -Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment. - -Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](misc.md#misc-describe-table) query. - -Example: - -``` sql -ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' -``` - -#### MODIFY COLUMN {#alter_modify-column} - -``` sql -MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] -``` - -This query changes the `name` column properties: - -- Type - -- Default expression - -- TTL - - For examples of columns TTL modifying, see [Column TTL](../operations/table_engines/mergetree.md#mergetree-column-ttl). - -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -When changing the type, values are converted as if the [toType](functions/type_conversion_functions.md) functions were applied to them. If only the default expression is changed, the query doesn’t do anything complex, and is completed almost instantly. - -Example: - -``` sql -ALTER TABLE visits MODIFY COLUMN browser Array(String) -``` - -Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. - -There are several processing stages: - -- Preparing temporary (new) files with modified data. -- Renaming old files. -- Renaming the temporary (new) files to the old names. -- Deleting the old files. - -Only the first stage takes time. If there is a failure at this stage, the data is not changed. -If there is a failure during one of the successive stages, data can be restored manually. The exception is if the old files were deleted from the file system but the data for the new files did not get written to the disk and was lost. - -The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously. - -#### ALTER Query Limitations {#alter-query-limitations} - -The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot. - -There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`). - -If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](insert_into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](misc.md#misc_operations-rename) query and delete the old table. You can use the [clickhouse-copier](../operations/utils/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. - -The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. - -For tables that don’t store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. - -### Manipulations With Key Expressions {#manipulations-with-key-expressions} - -The following command is supported: - -``` sql -MODIFY ORDER BY new_expression -``` - -It only works for tables in the [`MergeTree`](../operations/table_engines/mergetree.md) family (including -[replicated](../operations/table_engines/replication.md) tables). The command changes the -[sorting key](../operations/table_engines/mergetree.md) of the table -to `new_expression` (an expression or a tuple of expressions). Primary key remains the same. - -The command is lightweight in a sense that it only changes metadata. To keep the property that data part -rows are ordered by the sorting key expression you cannot add expressions containing existing columns -to the sorting key (only columns added by the `ADD COLUMN` command in the same `ALTER` query). - -### Manipulations With Data Skipping Indices {#manipulations-with-data-skipping-indices} - -It only works for tables in the [`*MergeTree`](../operations/table_engines/mergetree.md) family (including -[replicated](../operations/table_engines/replication.md) tables). The following operations -are available: - -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Adds index description to tables metadata. - -- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. - -These commands are lightweight in a sense that they only change metadata or remove files. -Also, they are replicated (syncing indices metadata through ZooKeeper). - -### Manipulations with constraints {#manipulations-with-constraints} - -See more on [constraints](create.md#constraints) - -Constraints could be added or deleted using following syntax: - -``` sql -ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; -ALTER TABLE [db].name DROP CONSTRAINT constraint_name; -``` - -Queries will add or remove metadata about constraints from table so they are processed immediately. - -Constraint check *will not be executed* on existing data if it was added. - -All changes on replicated tables are broadcasting to ZooKeeper so will be applied on other replicas. - -### Manipulations With Partitions and Parts {#alter_manipulations-with-partitions} - -The following operations with [partitions](../operations/table_engines/custom_partitioning_key.md) are available: - -- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` directory and forget it. -- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. -- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` directory to the table. -- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another. -- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. -- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another and replaces. -- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - Move the data partition from one table to another. -- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Resets the value of a specified column in a partition. -- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Resets the specified secondary index in a partition. -- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. -- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. - - - -#### DETACH PARTITION {\#alter\_detach-partition} {#detach-partition-alter-detach-partition} - -``` sql -ALTER TABLE table_name DETACH PARTITION partition_expr -``` - -Moves all data for the specified partition to the `detached` directory. The server forgets about the detached data partition as if it does not exist. The server will not know about this data until you make the [ATTACH](#alter_attach-partition) query. - -Example: - -``` sql -ALTER TABLE visits DETACH PARTITION 201901 -``` - -Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it. - -This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../operations/system_tables.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replica. - -#### DROP PARTITION {#alter_drop-partition} - -``` sql -ALTER TABLE table_name DROP PARTITION partition_expr -``` - -Deletes the specified partition from the table. This query tags the partition as inactive and deletes data completely, approximately in 10 minutes. - -Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -The query is replicated – it deletes data on all replicas. - -#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} - -``` sql -ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr -``` - -Removes the specified part or all parts of the specified partition from `detached`. -Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -#### ATTACH PARTITION\|PART {#alter_attach-partition} - -``` sql -ALTER TABLE table_name ATTACH PARTITION|PART partition_expr -``` - -Adds data to the table from the `detached` directory. It is possible to add data for an entire partition or for a separate part. Examples: - -``` sql -ALTER TABLE visits ATTACH PARTITION 201901; -ALTER TABLE visits ATTACH PART 201901_2_2_0; -``` - -Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. All other replicas download the data from the replica-initiator. - -So you can put data to the `detached` directory on one replica, and use the `ALTER ... ATTACH` query to add it to the table on all replicas. - -#### ATTACH PARTITION FROM {#alter_attach-partition-from} - -``` sql -ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 -``` - -This query copies the data partition from the `table1` to `table2` adds data to exsisting in the `table2`. Note that data won’t be deleted from `table1`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. - -#### REPLACE PARTITION {#alter_replace-partition} - -``` sql -ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 -``` - -This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data won’t be deleted from `table1`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. - -#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} - -``` sql -ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest -``` - -This query move the data partition from the `table_source` to `table_dest` with deleting the data from `table_source`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. -- Both tables must be the same engine family. (replicated or non-replicated) -- Both tables must have the same storage policy. - -#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} - -``` sql -ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr -``` - -Resets all values in the specified column in a partition. If the `DEFAULT` clause was determined when creating a table, this query sets the column value to a specified default value. - -Example: - -``` sql -ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 -``` - -#### FREEZE PARTITION {#alter_freeze-partition} - -``` sql -ALTER TABLE table_name FREEZE [PARTITION partition_expr] -``` - -This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once. - -!!! note "Note" - The entire backup process is performed without stopping the server. - -Note that for old-styled tables you can specify the prefix of the partition name (for example, ‘2019’) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -At the time of execution, for a data snapshot, the query creates hardlinks to a table data. Hardlinks are placed in the directory `/var/lib/clickhouse/shadow/N/...`, where: - -- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config. -- `N` is the incremental number of the backup. - -!!! note "Note" - If you use [a set of disks for data storage in a table](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression. - -The same structure of directories is created inside the backup as inside `/var/lib/clickhouse/`. The query performs ‘chmod’ for all files, forbidding writing into them. - -After creating the backup, you can copy the data from `/var/lib/clickhouse/shadow/` to the remote server and then delete it from the local server. Note that the `ALTER t FREEZE PARTITION` query is not replicated. It creates a local backup only on the local server. - -The query creates backup almost instantly (but first it waits for the current queries to the corresponding table to finish running). - -`ALTER TABLE t FREEZE PARTITION` copies only the data, not table metadata. To make a backup of table metadata, copy the file `/var/lib/clickhouse/metadata/database/table.sql` - -To restore data from a backup, do the following: - -1. Create the table if it does not exist. To view the query, use the .sql file (replace `ATTACH` in it with `CREATE`). -2. Copy the data from the `data/database/table/` directory inside the backup to the `/var/lib/clickhouse/data/database/table/detached/` directory. -3. Run `ALTER TABLE t ATTACH PARTITION` queries to add the data to a table. - -Restoring from a backup doesn’t require stopping the server. - -For more information about backups and restoring data, see the [Data Backup](../operations/backup.md) section. - -#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} - -``` sql -ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr -``` - -The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. - -#### FETCH PARTITION {#alter_fetch-partition} - -``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' -``` - -Downloads a partition from another server. This query only works for the replicated tables. - -The query does the following: - -1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. -2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. - -For example: - -``` sql -ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; -ALTER TABLE users ATTACH PARTITION 201902; -``` - -Note that: - -- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. -- The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. - -Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. - -Although the query is called `ALTER TABLE`, it does not change the table structure and does not immediately change the data available in the table. - -#### MOVE PARTITION\|PART {#alter_move-partition} - -Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). - -``` sql -ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' -``` - -The `ALTER TABLE t MOVE` query: - -- Not replicated, because different replicas can have different storage policies. -- Returns an error if the specified disk or volume is not configured. Query also returns an error if conditions of data moving, that specified in the storage policy, can’t be applied. -- Can return an error in the case, when data to be moved is already moved by a background process, concurrent `ALTER TABLE t MOVE` query or as a result of background data merging. A user shouldn’t perform any additional actions in this case. - -Example: - -``` sql -ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' -ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' -``` - -#### How To Set Partition Expression {#alter-how-to-specify-part-expr} - -You can specify the partition expression in `ALTER ... PARTITION` queries in different ways: - -- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`. -- As the expression from the table column. Constants and constant expressions are supported. For example, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. -- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached\_parts](../operations/system_tables.md#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. - -Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed. - -For old-style tables, you can specify the partition either as a number `201901` or a string `'201901'`. The syntax for the new-style tables is stricter with types (similar to the parser for the VALUES input format). - -All the rules above are also true for the [OPTIMIZE](misc.md#misc_operations-optimize) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example: - -``` sql -OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; -``` - -The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). - -### Manipulations with Table TTL {#manipulations-with-table-ttl} - -You can change [table TTL](../operations/table_engines/mergetree.md#mergetree-table-ttl) with a request of the following form: - -``` sql -ALTER TABLE table-name MODIFY TTL ttl-expression -``` - -### Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} - -For non-replicatable tables, all `ALTER` queries are performed synchronously. For replicatable tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. - -For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. -Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. - -### Mutations {#alter-mutations} - -Mutations are an ALTER query variant that allows changing or deleting rows in a table. In contrast to standard `UPDATE` and `DELETE` queries that are intended for point data changes, mutations are intended for heavy operations that change a lot of rows in a table. Supported for the `MergeTree` family of table engines including the engines with replication support. - -Existing tables are ready for mutations as-is (no conversion necessary), but after the first mutation is applied to a table, its metadata format becomes incompatible with previous server versions and falling back to a previous version becomes impossible. - -Currently available commands: - -``` sql -ALTER TABLE [db.]table DELETE WHERE filter_expr -``` - -The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value. - -``` sql -ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr -``` - -The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported. - -``` sql -ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name -``` - -The query rebuilds the secondary index `name` in the partition `partition_name`. - -One query can contain several commands separated by commas. - -For \*MergeTree tables mutations execute by rewriting whole data parts. There is no atomicity - parts are substituted for mutated parts as soon as they are ready and a `SELECT` query that started executing during a mutation will see data from parts that have already been mutated along with data from parts that have not been mutated yet. - -Mutations are totally ordered by their creation order and are applied to each part in that order. Mutations are also partially ordered with INSERTs - data that was inserted into the table before the mutation was submitted will be mutated and data that was inserted after that will not be mutated. Note that mutations do not block INSERTs in any way. - -A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for nonreplicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](misc.md#kill-mutation) query. - -Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. - -[Original article](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/fa/query_language/create.md b/docs/fa/query_language/create.md deleted file mode 100644 index 6099b0e4641..00000000000 --- a/docs/fa/query_language/create.md +++ /dev/null @@ -1,306 +0,0 @@ ---- -en_copy: true ---- - -# CREATE Queries {#create-queries} - -## CREATE DATABASE {#query-language-create-database} - -Creates database. - -``` sql -CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] -``` - -### Clauses {#clauses} - -- `IF NOT EXISTS` - - If the `db_name` database already exists, then ClickHouse doesn't create a new database and: - - - Doesn't throw an exception if clause is specified. - - Throws an exception if clause isn't specified. - -- `ON CLUSTER` - - ClickHouse creates the `db_name` database on all the servers of a specified cluster. - -- `ENGINE` - - - [MySQL](../database_engines/mysql.md) - - Allows you to retrieve data from the remote MySQL server. - - By default, ClickHouse uses its own [database engine](../database_engines/index.md). - -## CREATE TABLE {#create-table-query} - -The `CREATE TABLE` query can have several forms. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], - ... -) ENGINE = engine -``` - -Creates a table named ‘name’ in the ‘db’ database or the current database if ‘db’ is not set, with the structure specified in brackets and the ‘engine’ engine. -The structure of the table is a list of column descriptions. If indexes are supported by the engine, they are indicated as parameters for the table engine. - -A column description is `name type` in the simplest case. Example: `RegionID UInt32`. -Expressions can also be defined for default values (see below). - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] -``` - -Creates a table with the same structure as another table. You can specify a different engine for the table. If the engine is not specified, the same engine will be used as for the `db2.name2` table. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() -``` - -Creates a table with the structure and data returned by a [table function](table_functions/index.md). - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... -``` - -Creates a table with a structure like the result of the `SELECT` query, with the ‘engine’ engine, and fills it with data from SELECT. - -In all cases, if `IF NOT EXISTS` is specified, the query won’t return an error if the table already exists. In this case, the query won’t do anything. - -There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../operations/table_engines/index.md#table_engines). - -### Default Values {#create-default-values} - -The column description can specify an expression for a default value, in one of the following ways:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. -Example: `URLDomain String DEFAULT domain(URL)`. - -If an expression for the default value is not defined, the default values will be set to zeros for numbers, empty strings for strings, empty arrays for arrays, and `0000-00-00` for dates or `0000-00-00 00:00:00` for dates with time. NULLs are not supported. - -If the default expression is defined, the column type is optional. If there isn’t an explicitly defined type, the default expression type is used. Example: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ type will be used for the ‘EventDate’ column. - -If the data type and default expression are defined explicitly, this expression will be cast to the specified type using type casting functions. Example: `Hits UInt32 DEFAULT 0` means the same thing as `Hits UInt32 DEFAULT toUInt32(0)`. - -Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don’t contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. - -`DEFAULT expr` - -Normal default value. If the INSERT query doesn’t specify the corresponding column, it will be filled in by computing the corresponding expression. - -`MATERIALIZED expr` - -Materialized expression. Such a column can’t be specified for INSERT, because it is always calculated. -For an INSERT without a list of columns, these columns are not considered. -In addition, this column is not substituted when using an asterisk in a SELECT query. This is to preserve the invariant that the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns. - -`ALIAS expr` - -Synonym. Such a column isn’t stored in the table at all. -Its values can’t be inserted in a table, and it is not substituted when using an asterisk in a SELECT query. -It can be used in SELECTs if the alias is expanded during query parsing. - -When using the ALTER query to add new columns, old data for these columns is not written. Instead, when reading old data that does not have values for the new columns, expressions are computed on the fly by default. However, if running the expressions requires different columns that are not indicated in the query, these columns will additionally be read, but only for the blocks of data that need it. - -If you add a new column to a table but later change its default expression, the values used for old data will change (for data where values were not stored on the disk). Note that when running background merges, data for columns that are missing in one of the merging parts is written to the merged part. - -It is not possible to set default values for elements in nested data structures. - -### Constraints {#constraints} - -Along with columns descriptions constraints could be defined: - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - ... - CONSTRAINT constraint_name_1 CHECK boolean_expr_1, - ... -) ENGINE = engine -``` - -`boolean_expr_1` could by any boolean expression. If constraints are defined for the table, each of them will be checked for every row in `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. - -Adding large amount of constraints can negatively affect performance of big `INSERT` queries. - -### TTL Expression {#ttl-expression} - -Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). - -### Column Compression Codecs {#codecs} - -By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../operations/server_settings/settings.md#server-settings-compression) section of a server configuration. You can also define the compression method for each individual column in the `CREATE TABLE` query. - -``` sql -CREATE TABLE codec_example -( - dt Date CODEC(ZSTD), - ts DateTime CODEC(LZ4HC), - float_value Float32 CODEC(NONE), - double_value Float64 CODEC(LZ4HC(9)) - value Float32 CODEC(Delta, ZSTD) -) -ENGINE = -... -``` - -If a codec is specified, the default codec doesn’t apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. - -!!! warning "Warning" - You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. - -Compression is supported for the following table engines: - -- [MergeTree](../operations/table_engines/mergetree.md) family. Supports column compression codecs and selecting the default compression method by [compression](../operations/server_settings/settings.md#server-settings-compression) settings. -- [Log](../operations/table_engines/log_family.md) family. Uses the `lz4` compression method by default and supports column compression codecs. -- [Set](../operations/table_engines/set.md). Only supported the default compression. -- [Join](../operations/table_engines/join.md). Only supported the default compression. - -ClickHouse supports common purpose codecs and specialized codecs. - -#### Specialized Codecs {#create-query-specialized-codecs} - -These codecs are designed to make compression more effective by using specific features of data. Some of these codecs don’t compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. - -Specialized codecs: - -- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` are used for storing delta values, so `delta_bytes` is the maximum size of raw values. Possible `delta_bytes` values: 1, 2, 4, 8. The default value for `delta_bytes` is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. -- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that don’t differ between maximum and minimum values in the whole data part for which the compression is used. - -`DoubleDelta` and `Gorilla` codecs are used in Gorilla TSDB as the components of its compressing algorithm. Gorilla approach is effective in scenarios when there is a sequence of slowly changing values with their timestamps. Timestamps are effectively compressed by the `DoubleDelta` codec, and values are effectively compressed by the `Gorilla` codec. For example, to get an effectively stored table, you can create it in the following configuration: - -``` sql -CREATE TABLE codec_example -( - timestamp DateTime CODEC(DoubleDelta), - slow_values Float32 CODEC(Gorilla) -) -ENGINE = MergeTree() -``` - -#### Common purpose codecs {#create-query-common-purpose-codecs} - -Codecs: - -- `NONE` — No compression. -- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression. -- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` applies the default level. Possible levels: \[1, 12\]. Recommended level range: \[4, 9\]. -- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default value: 1. - -High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. - -## Temporary Tables {#temporary-tables} - -ClickHouse supports temporary tables which have the following characteristics: - -- Temporary tables disappear when the session ends, including if the connection is lost. -- A temporary table uses the Memory engine only. -- The DB can’t be specified for a temporary table. It is created outside of databases. -- Impossible to create a temporary table with distributed DDL query on all cluster servers (by using `ON CLUSTER`): this table exists only in the current session. -- If a temporary table has the same name as another one and a query specifies the table name without specifying the DB, the temporary table will be used. -- For distributed query processing, temporary tables used in a query are passed to remote servers. - -To create a temporary table, use the following syntax: - -``` sql -CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) -``` - -In most cases, temporary tables are not created manually, but when using external data for a query, or for distributed `(GLOBAL) IN`. For more information, see the appropriate sections - -It’s possible to use tables with [ENGINE = Memory](../operations/table_engines/memory.md) instead of temporary tables. - -## Distributed DDL queries (ON CLUSTER clause) {#distributed-ddl-queries-on-cluster-clause} - -The `CREATE`, `DROP`, `ALTER`, and `RENAME` queries support distributed execution on a cluster. -For example, the following query creates the `all_hits` `Distributed` table on each host in `cluster`: - -``` sql -CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) -``` - -In order to run these queries correctly, each host must have the same cluster definition (to simplify syncing configs, you can use substitutions from ZooKeeper). They must also connect to the ZooKeeper servers. -The local version of the query will eventually be implemented on each host in the cluster, even if some hosts are currently not available. The order for executing queries within a single host is guaranteed. - -## CREATE VIEW {#create-view} - -``` sql -CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... -``` - -Creates a view. There are two types of views: normal and MATERIALIZED. - -Normal views don’t store any data, but just perform a read from another table. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the FROM clause. - -As an example, assume you’ve created a view: - -``` sql -CREATE VIEW view AS SELECT ... -``` - -and written a query: - -``` sql -SELECT a, b, c FROM view -``` - -This query is fully equivalent to using the subquery: - -``` sql -SELECT a, b, c FROM (SELECT ...) -``` - -Materialized views store data transformed by the corresponding SELECT query. - -When creating a materialized view without `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. - -When creating a materialized view with `TO [db].[table]`, you must not use `POPULATE`. - -A materialized view is arranged as follows: when inserting data to the table specified in SELECT, part of the inserted data is converted by this SELECT query, and the result is inserted in the view. - -If you specify POPULATE, the existing table data is inserted in the view when creating it, as if making a `CREATE TABLE ... AS SELECT ...` . Otherwise, the query contains only the data inserted in the table after creating the view. We don’t recommend using POPULATE, since data inserted in the table during the view creation will not be inserted in it. - -A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an ENGINE that independently performs data aggregation, such as `SummingMergeTree`. - -The execution of `ALTER` queries on materialized views has not been fully developed, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view. - -Views look the same as normal tables. For example, they are listed in the result of the `SHOW TABLES` query. - -There isn’t a separate query for deleting views. To delete a view, use `DROP TABLE`. - -## CREATE DICTIONARY {#create-dictionary-query} - -``` sql -CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] -( - key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], - attr1 type2 [DEFAULT|EXPRESSION expr3], - attr2 type2 [DEFAULT|EXPRESSION expr4] -) -PRIMARY KEY key1, key2 -SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) -LAYOUT(LAYOUT_NAME([param_name param_value])) -LIFETIME([MIN val1] MAX val2) -``` - -Creates [external dictionary](dicts/external_dicts.md) with given [structure](dicts/external_dicts_dict_structure.md), [source](dicts/external_dicts_dict_sources.md), [layout](dicts/external_dicts_dict_layout.md) and [lifetime](dicts/external_dicts_dict_lifetime.md). - -External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values. - -Depending on dictionary [layout](dicts/external_dicts_dict_layout.md) one or more attributes can be specified as dictionary keys. - -For more information, see [External Dictionaries](dicts/external_dicts.md) section. - -[Original article](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/fa/query_language/dicts/external_dicts.md b/docs/fa/query_language/dicts/external_dicts.md deleted file mode 100644 index ef41a48f95f..00000000000 --- a/docs/fa/query_language/dicts/external_dicts.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -en_copy: true ---- - -# External Dictionaries {#dicts-external-dicts} - -You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](external_dicts_dict_sources.md)”. - -ClickHouse: - -- Fully or partially stores dictionaries in RAM. -- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically. -- Allows to create external dictionaries with xml files or [DDL queries](../create.md#create-dictionary-query). - -The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries\_config](../../operations/server_settings/settings.md#server_settings-dictionaries_config) parameter. - -Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries\_lazy\_load](../../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load) setting. - -The dictionary configuration file has the following format: - -``` xml - - An optional element with any content. Ignored by the ClickHouse server. - - - /etc/metrika.xml - - - - - - - - -``` - -You can [configure](external_dicts_dict.md) any number of dictionaries in the same file. - -[DDL queries for dictionaries](../create.md#create-dictionary-query) doesn’t require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views. - -!!! attention "Attention" - You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../functions/other_functions.md) function). This functionality is not related to external dictionaries. - -## See also {#ext-dicts-see-also} - -- [Configuring an External Dictionary](external_dicts_dict.md) -- [Storing Dictionaries in Memory](external_dicts_dict_layout.md) -- [Dictionary Updates](external_dicts_dict_lifetime.md) -- [Sources of External Dictionaries](external_dicts_dict_sources.md) -- [Dictionary Key and Fields](external_dicts_dict_structure.md) -- [Functions for Working with External Dictionaries](../functions/ext_dict_functions.md) - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict.md b/docs/fa/query_language/dicts/external_dicts_dict.md deleted file mode 100644 index 0519cd381f4..00000000000 --- a/docs/fa/query_language/dicts/external_dicts_dict.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -en_copy: true ---- - -# Configuring an External Dictionary {#dicts-external-dicts-dict} - -If dictionary is configured using xml file, than dictionary configuration has the following structure: - -``` xml - - dict_name - - - - - - - - - - - - - - - - - -``` - -Corresponding [DDL-query](../create.md#create-dictionary-query) has the following structure: - -``` sql -CREATE DICTIONARY dict_name -( - ... -- attributes -) -PRIMARY KEY ... -- complex or single key configuration -SOURCE(...) -- Source configuration -LAYOUT(...) -- Memory layout configuration -LIFETIME(...) -- Lifetime of dictionary in memory -``` - -- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. -- [source](external_dicts_dict_sources.md) — Source of the dictionary. -- [layout](external_dicts_dict_layout.md) — Dictionary layout in memory. -- [structure](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. -- [lifetime](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/fa/query_language/dicts/external_dicts_dict_hierarchical.md deleted file mode 100644 index 1a1232f95cd..00000000000 --- a/docs/fa/query_language/dicts/external_dicts_dict_hierarchical.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -en_copy: true ---- - -# Hierarchical Dictionaries {#hierarchical-dictionaries} - -ClickHouse supports hierarchical dictionaries with a [numeric key](external_dicts_dict_structure.md#ext_dict-numeric-key). - -Look at the following hierarchical structure: - -``` text -0 (Common parent) -│ -├── 1 (Russia) -│ │ -│ └── 2 (Moscow) -│ │ -│ └── 3 (Center) -│ -└── 4 (Great Britain) - │ - └── 5 (London) -``` - -This hierarchy can be expressed as the following dictionary table. - -| region\_id | parent\_region | region\_name | -|------------|----------------|---------------| -| 1 | 0 | Russia | -| 2 | 1 | Moscow | -| 3 | 2 | Center | -| 4 | 0 | Great Britain | -| 5 | 4 | London | - -This table contains a column `parent_region` that contains the key of the nearest parent for the element. - -ClickHouse supports the [hierarchical](external_dicts_dict_structure.md#hierarchical-dict-attr) property for [external dictionary](index.md) attributes. This property allows you to configure the hierarchical dictionary similar to described above. - -The [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) function allows you to get the parent chain of an element. - -For our example, the structure of dictionary can be the following: - -``` xml - - - - region_id - - - - parent_region - UInt64 - 0 - true - - - - region_name - String - - - - - -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict_layout.md b/docs/fa/query_language/dicts/external_dicts_dict_layout.md deleted file mode 100644 index c6aa101da46..00000000000 --- a/docs/fa/query_language/dicts/external_dicts_dict_layout.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -en_copy: true ---- - -# Storing Dictionaries in Memory {#dicts-external-dicts-dict-layout} - -There are a variety of ways to store dictionaries in memory. - -We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex\_key\_hashed](#complex-key-hashed). which provide optimal processing speed. - -Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section “[cache](#cache)”. - -There are several ways to improve dictionary performance: - -- Call the function for working with the dictionary after `GROUP BY`. -- Mark attributes to extract as injective. An attribute is called injective if different attribute values correspond to different keys. So when `GROUP BY` uses a function that fetches an attribute value by the key, this function is automatically taken out of `GROUP BY`. - -ClickHouse generates an exception for errors with dictionaries. Examples of errors: - -- The dictionary being accessed could not be loaded. -- Error querying a `cached` dictionary. - -You can view the list of external dictionaries and their statuses in the `system.dictionaries` table. - -The configuration looks like this: - -``` xml - - - ... - - - - - - ... - - -``` - -Corresponding [DDL-query](../create.md#create-dictionary-query): - -``` sql -CREATE DICTIONARY (...) -... -LAYOUT(LAYOUT_TYPE(param value)) -- layout settings -... -``` - -## Ways to Store Dictionaries in Memory {#ways-to-store-dictionaries-in-memory} - -- [flat](#flat) -- [hashed](#dicts-external_dicts_dict_layout-hashed) -- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) -- [cache](#cache) -- [range\_hashed](#range-hashed) -- [complex\_key\_hashed](#complex-key-hashed) -- [complex\_key\_cache](#complex-key-cache) -- [ip\_trie](#ip-trie) - -### flat {#flat} - -The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). - -The dictionary key has the `UInt64` type and the value is limited to 500,000. If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -This method provides the best performance among all available methods of storing the dictionary. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(FLAT()) -``` - -### hashed {#dicts-external_dicts_dict_layout-hashed} - -The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(HASHED()) -``` - -### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} - -Similar to `hashed`, but uses less memory in favor more CPU usage. - -Configuration example: - -``` xml - - - -``` - -``` sql -LAYOUT(SPARSE_HASHED()) -``` - -### complex\_key\_hashed {#complex-key-hashed} - -This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `hashed`. - -Configuration example: - -``` xml - - - -``` - -``` sql -LAYOUT(COMPLEX_KEY_HASHED()) -``` - -### range\_hashed {#range-hashed} - -The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. - -This storage method works the same way as hashed and allows using date/time (arbitrary numeric type) ranges in addition to the key. - -Example: The table contains discounts for each advertiser in the format: - -``` text -+---------------|---------------------|-------------------|--------+ -| advertiser id | discount start date | discount end date | amount | -+===============+=====================+===================+========+ -| 123 | 2015-01-01 | 2015-01-15 | 0.15 | -+---------------|---------------------|-------------------|--------+ -| 123 | 2015-01-16 | 2015-01-31 | 0.25 | -+---------------|---------------------|-------------------|--------+ -| 456 | 2015-01-01 | 2015-01-15 | 0.05 | -+---------------|---------------------|-------------------|--------+ -``` - -To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](external_dicts_dict_structure.md). These elements must contain elements `name` and`type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). - -Example: - -``` xml - - - Id - - - first - Date - - - last - Date - - ... -``` - -or - -``` sql -CREATE DICTIONARY somedict ( - id UInt64, - first Date, - last Date -) -PRIMARY KEY id -LAYOUT(RANGE_HASHED()) -RANGE(MIN first MAX last) -``` - -To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected: - -``` sql -dictGetT('dict_name', 'attr_name', id, date) -``` - -This function returns the value for the specified `id`s and the date range that includes the passed date. - -Details of the algorithm: - -- If the `id` is not found or a range is not found for the `id`, it returns the default value for the dictionary. -- If there are overlapping ranges, you can use any. -- If the range delimiter is `NULL` or an invalid date (such as 1900-01-01 or 2039-01-01), the range is left open. The range can be open on both sides. - -Configuration example: - -``` xml - - - - ... - - - - - - - - Abcdef - - - StartTimeStamp - UInt64 - - - EndTimeStamp - UInt64 - - - XXXType - String - - - - - - -``` - -or - -``` sql -CREATE DICTIONARY somedict( - Abcdef UInt64, - StartTimeStamp UInt64, - EndTimeStamp UInt64, - XXXType String DEFAULT '' -) -PRIMARY KEY Abcdef -RANGE(MIN StartTimeStamp MAX EndTimeStamp) -``` - -### cache {#cache} - -The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. - -When searching for a dictionary, the cache is searched first. For each block of data, all keys that are not found in the cache or are outdated are requested from the source using `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. The received data is then written to the cache. - -For cache dictionaries, the expiration [lifetime](external_dicts_dict_lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used, and it is re-requested the next time it needs to be used. -This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the `system.dictionaries` table. - -To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. - -Supported [sources](external_dicts_dict_sources.md): MySQL, ClickHouse, executable, HTTP. - -Example of settings: - -``` xml - - - - 1000000000 - - -``` - -or - -``` sql -LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) -``` - -Set a large enough cache size. You need to experiment to select the number of cells: - -1. Set some value. -2. Run queries until the cache is completely full. -3. Assess memory consumption using the `system.dictionaries` table. -4. Increase or decrease the number of cells until the required memory consumption is reached. - -!!! warning "Warning" - Do not use ClickHouse as a source, because it is slow to process queries with random reads. - -### complex\_key\_cache {#complex-key-cache} - -This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `cache`. - -### ip\_trie {#ip-trie} - -This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. - -Example: The table contains network prefixes and their corresponding AS number and country code: - -``` text - +-----------------|-------|--------+ - | prefix | asn | cca2 | - +=================+=======+========+ - | 202.79.32.0/20 | 17501 | NP | - +-----------------|-------|--------+ - | 2620:0:870::/48 | 3856 | US | - +-----------------|-------|--------+ - | 2a02:6b8:1::/48 | 13238 | RU | - +-----------------|-------|--------+ - | 2001:db8::/32 | 65536 | ZZ | - +-----------------|-------|--------+ -``` - -When using this type of layout, the structure must have a composite key. - -Example: - -``` xml - - - - prefix - String - - - - asn - UInt32 - - - - cca2 - String - ?? - - ... -``` - -or - -``` sql -CREATE DICTIONARY somedict ( - prefix String, - asn UInt32, - cca2 String DEFAULT '??' -) -PRIMARY KEY prefix -``` - -The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet. - -For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys: - -``` sql -dictGetT('dict_name', 'attr_name', tuple(ip)) -``` - -The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6: - -``` sql -dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) -``` - -Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. - -Data is stored in a `trie`. It must completely fit into RAM. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict_lifetime.md b/docs/fa/query_language/dicts/external_dicts_dict_lifetime.md deleted file mode 100644 index 3a90e437681..00000000000 --- a/docs/fa/query_language/dicts/external_dicts_dict_lifetime.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -en_copy: true ---- - -# Dictionary Updates {#dictionary-updates} - -ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `` tag in seconds. - -Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -Example of settings: - -``` xml - - ... - 300 - ... - -``` - -``` sql -CREATE DICTIONARY (...) -... -LIFETIME(300) -... -``` - -Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. - -You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. - -Example of settings: - -``` xml - - ... - - 300 - 360 - - ... - -``` - -or - -``` sql -LIFETIME(MIN 300 MAX 360) -``` - -When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](external_dicts_dict_sources.md): - -- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. -- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query. -- Dictionaries from other sources are updated every time by default. - -For MySQL (InnoDB), ODBC and ClickHouse sources, you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: - -- The dictionary table must have a field that always changes when the source data is updated. -- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](external_dicts_dict_sources.md). - -Example of settings: - -``` xml - - ... - - ... - SELECT update_time FROM dictionary_source where id = 1 - - ... - -``` - -or - -``` sql -... -SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) -... -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict_structure.md b/docs/fa/query_language/dicts/external_dicts_dict_structure.md deleted file mode 100644 index acb0ce36875..00000000000 --- a/docs/fa/query_language/dicts/external_dicts_dict_structure.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -en_copy: true ---- - -# Dictionary Key and Fields {#dictionary-key-and-fields} - -The `` clause describes the dictionary key and fields available for queries. - -XML description: - -``` xml - - - - Id - - - - - - - ... - - - -``` - -Attributes are described in the elements: - -- `` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key). -- `` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -DDL query: - -``` sql -CREATE DICTIONARY dict_name ( - Id UInt64, - -- attributes -) -PRIMARY KEY Id -... -``` - -Attributes are described in the query body: - -- `PRIMARY KEY` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key) -- `AttrName AttrType` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -## Key {#ext_dict_structure-key} - -ClickHouse supports the following types of keys: - -- Numeric key. `UInt64`. Defined in the `` tag or using `PRIMARY KEY` keyword. -- Composite key. Set of values of different types. Defined in the tag `` or `PRIMARY KEY` keyword. - -An xml structure can contain either `` or ``. DDL-query must contain single `PRIMARY KEY`. - -!!! warning "Warning" - You must not describe key as an attribute. - -### Numeric Key {#ext_dict-numeric-key} - -Type: `UInt64`. - -Configuration example: - -``` xml - - Id - -``` - -Configuration fields: - -- `name` – The name of the column with keys. - -For DDL-query: - -``` sql -CREATE DICTIONARY ( - Id UInt64, - ... -) -PRIMARY KEY Id -... -``` - -- `PRIMARY KEY` – The name of the column with keys. - -### Composite Key {#composite-key} - -The key can be a `tuple` from any types of fields. The [layout](external_dicts_dict_layout.md) in this case must be `complex_key_hashed` or `complex_key_cache`. - -!!! tip "Tip" - A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. - -The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](external_dicts_dict_structure.md). Example: - -``` xml - - - - field1 - String - - - field2 - UInt32 - - ... - -... -``` - -or - -``` sql -CREATE DICTIONARY ( - field1 String, - field2 String - ... -) -PRIMARY KEY field1, field2 -... -``` - -For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. - -## Attributes {#ext_dict_structure-attributes} - -Configuration example: - -``` xml - - ... - - Name - ClickHouseDataType - - rand64() - true - true - true - - -``` - -or - -``` sql -CREATE DICTIONARY somename ( - Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID -) -``` - -Configuration fields: - -| Tag | Description | Required | -|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| -| `name` | Column name. | Yes | -| `type` | ClickHouse data type.
    ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
    [Nullable](../../data_types/nullable.md) is not supported. | Yes | -| `null_value` | Default value for a non-existing element.
    In the example, it is an empty string. You cannot use `NULL` in this field. | Yes | -| `expression` | [Expression](../syntax.md#syntax-expressions) that ClickHouse executes on the value.
    The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

    Default value: no expression. | No | -| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](external_dicts_dict_hierarchical.md).

    Default value: `false`. | No | -| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
    If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

    Default value: `false`. | No | -| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

    Default value: `false`. | No | - -## See Also {#see-also} - -- [Functions for working with external dictionaries](../functions/ext_dict_functions.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/fa/query_language/dicts/index.md b/docs/fa/query_language/dicts/index.md deleted file mode 100644 index 9c7883cf7a1..00000000000 --- a/docs/fa/query_language/dicts/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -en_copy: true ---- - -# Dictionaries {#dictionaries} - -A dictionary is a mapping (`key -> attributes`) that is convenient for various types of reference lists. - -ClickHouse supports special functions for working with dictionaries that can be used in queries. It is easier and more efficient to use dictionaries with functions than a `JOIN` with reference tables. - -[NULL](../syntax.md#null) values can’t be stored in a dictionary. - -ClickHouse supports: - -- [Built-in dictionaries](internal_dicts.md#internal_dicts) with a specific [set of functions](../functions/ym_dict_functions.md). -- [Plug-in (external) dictionaries](external_dicts.md) with a [set of functions](../functions/ext_dict_functions.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/fa/query_language/dicts/internal_dicts.md b/docs/fa/query_language/dicts/internal_dicts.md deleted file mode 100644 index a7ac9fe7d8c..00000000000 --- a/docs/fa/query_language/dicts/internal_dicts.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -en_copy: true ---- - -# Internal dictionaries {#internal_dicts} - -ClickHouse contains a built-in feature for working with a geobase. - -This allows you to: - -- Use a region’s ID to get its name in the desired language. -- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. -- Check whether a region is part of another region. -- Get a chain of parent regions. - -All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with Yandex.Metrica dictionaries”. - -The internal dictionaries are disabled in the default package. -To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. - -The geobase is loaded from text files. - -Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. - -Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. - -You can also create these files yourself. The file format is as follows: - -`regions_hierarchy*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- parent region ID (`UInt32`) -- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types don’t have values -- population (`UInt32`) — optional column - -`regions_names_*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. - -A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. - -Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. -For updates, the file modification times are checked. If a file has changed, the dictionary is updated. -The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. -Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. - -There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldn’t be used. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/fa/query_language/functions/arithmetic_functions.md b/docs/fa/query_language/functions/arithmetic_functions.md deleted file mode 100644 index 849c8e28340..00000000000 --- a/docs/fa/query_language/functions/arithmetic_functions.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -en_copy: true ---- - -# Arithmetic functions {#arithmetic-functions} - -For all arithmetic functions, the result type is calculated as the smallest number type that the result fits in, if there is such a type. The minimum is taken simultaneously based on the number of bits, whether it is signed, and whether it floats. If there are not enough bits, the highest bit type is taken. - -Example: - -``` sql -SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) -``` - -``` text -┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ -│ UInt8 │ UInt16 │ UInt32 │ UInt64 │ -└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ -``` - -Arithmetic functions work for any pair of types from UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, or Float64. - -Overflow is produced the same way as in C++. - -## plus(a, b), a + b operator {#plusa-b-a-b-operator} - -Calculates the sum of the numbers. -You can also add integer numbers with a date or date and time. In the case of a date, adding an integer means adding the corresponding number of days. For a date with time, it means adding the corresponding number of seconds. - -## minus(a, b), a - b operator {#minusa-b-a-b-operator} - -Calculates the difference. The result is always signed. - -You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. - -## multiply(a, b), a \* b operator {#multiplya-b-a-b-operator} - -Calculates the product of the numbers. - -## divide(a, b), a / b operator {#dividea-b-a-b-operator} - -Calculates the quotient of the numbers. The result type is always a floating-point type. -It is not integer division. For integer division, use the ‘intDiv’ function. -When dividing by zero you get ‘inf’, ‘-inf’, or ‘nan’. - -## intDiv(a, b) {#intdiva-b} - -Calculates the quotient of the numbers. Divides into integers, rounding down (by the absolute value). -An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. - -## intDivOrZero(a, b) {#intdivorzeroa-b} - -Differs from ‘intDiv’ in that it returns zero when dividing by zero or when dividing a minimal negative number by minus one. - -## modulo(a, b), a % b operator {#moduloa-b-a-b-operator} - -Calculates the remainder after division. -If arguments are floating-point numbers, they are pre-converted to integers by dropping the decimal portion. -The remainder is taken in the same sense as in C++. Truncated division is used for negative numbers. -An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. - -## moduloOrZero(a, b) {#moduloorzeroa-b} - -Differs from ‘modulo’ in that it returns zero when the divisor is zero. - -## negate(a), -a operator {#negatea-a-operator} - -Calculates a number with the reverse sign. The result is always signed. - -## abs(a) {#arithm_func-abs} - -Calculates the absolute value of the number (a). That is, if a \< 0, it returns -a. For unsigned types it doesn’t do anything. For signed integer types, it returns an unsigned number. - -## gcd(a, b) {#gcda-b} - -Returns the greatest common divisor of the numbers. -An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. - -## lcm(a, b) {#lcma-b} - -Returns the least common multiple of the numbers. -An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/fa/query_language/functions/array_functions.md b/docs/fa/query_language/functions/array_functions.md deleted file mode 100644 index 88f0994b911..00000000000 --- a/docs/fa/query_language/functions/array_functions.md +++ /dev/null @@ -1,1052 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with arrays {#functions-for-working-with-arrays} - -## empty {#function-empty} - -Returns 1 for an empty array, or 0 for a non-empty array. -The result type is UInt8. -The function also works for strings. - -## notEmpty {#function-notempty} - -Returns 0 for an empty array, or 1 for a non-empty array. -The result type is UInt8. -The function also works for strings. - -## length {#array_functions-length} - -Returns the number of items in the array. -The result type is UInt64. -The function also works for strings. - -## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} - -## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} - -## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} - -## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} - -## emptyArrayString {#emptyarraystring} - -Accepts zero arguments and returns an empty array of the appropriate type. - -## emptyArrayToSingle {#emptyarraytosingle} - -Accepts an empty array and returns a one-element array that is equal to the default value. - -## range(end), range(start, end \[, step\]) {#rangeend-rangestart-end-step} - -Returns an array of numbers from start to end-1 by step. -If the argument `start` is not specified, defaults to 0. -If the argument `step` is not specified, defaults to 1. -It behaviors almost like pythonic `range`. But the difference is that all the arguments type must be `UInt` numbers. -Just in case, an exception is thrown if arrays with a total length of more than 100,000,000 elements are created in a data block. - -## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} - -Creates an array from the function arguments. -The arguments must be constants and have types that have the smallest common type. At least one argument must be passed, because otherwise it isn’t clear which type of array to create. That is, you can’t use this function to create an empty array (to do that, use the ‘emptyArray\*’ function described above). -Returns an ‘Array(T)’ type result, where ‘T’ is the smallest common type out of the passed arguments. - -## arrayConcat {#arrayconcat} - -Combines arrays passed as arguments. - -``` sql -arrayConcat(arrays) -``` - -**Parameters** - -- `arrays` – Arbitrary number of arguments of [Array](../../data_types/array.md) type. - **Example** - - - -``` sql -SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res -``` - -``` text -┌─res───────────┐ -│ [1,2,3,4,5,6] │ -└───────────────┘ -``` - -## arrayElement(arr, n), operator arr\[n\] {#arrayelementarr-n-operator-arrn} - -Get the element with the index `n` from the array `arr`. `n` must be any integer type. -Indexes in an array begin from one. -Negative indexes are supported. In this case, it selects the corresponding element numbered from the end. For example, `arr[-1]` is the last item in the array. - -If the index falls outside of the bounds of an array, it returns some default value (0 for numbers, an empty string for strings, etc.), except for the case with a non-constant array and a constant index 0 (in this case there will be an error `Array indices are 1-based`). - -## has(arr, elem) {#hasarr-elem} - -Checks whether the ‘arr’ array has the ‘elem’ element. -Returns 0 if the the element is not in the array, or 1 if it is. - -`NULL` is processed as a value. - -``` sql -SELECT has([1, 2, NULL], NULL) -``` - -``` text -┌─has([1, 2, NULL], NULL)─┐ -│ 1 │ -└─────────────────────────┘ -``` - -## hasAll {#hasall} - -Checks whether one array is a subset of another. - -``` sql -hasAll(set, subset) -``` - -**Parameters** - -- `set` – Array of any type with a set of elements. -- `subset` – Array of any type with elements that should be tested to be a subset of `set`. - -**Return values** - -- `1`, if `set` contains all of the elements from `subset`. -- `0`, otherwise. - -**Peculiar properties** - -- An empty array is a subset of any array. -- `Null` processed as a value. -- Order of values in both of arrays doesn’t matter. - -**Examples** - -`SELECT hasAll([], [])` returns 1. - -`SELECT hasAll([1, Null], [Null])` returns 1. - -`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` returns 1. - -`SELECT hasAll(['a', 'b'], ['a'])` returns 1. - -`SELECT hasAll([1], ['a'])` returns 0. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0. - -## hasAny {#hasany} - -Checks whether two arrays have intersection by some elements. - -``` sql -hasAny(array1, array2) -``` - -**Parameters** - -- `array1` – Array of any type with a set of elements. -- `array2` – Array of any type with a set of elements. - -**Return values** - -- `1`, if `array1` and `array2` have one similar element at least. -- `0`, otherwise. - -**Peculiar properties** - -- `Null` processed as a value. -- Order of values in both of arrays doesn’t matter. - -**Examples** - -`SELECT hasAny([1], [])` returns `0`. - -`SELECT hasAny([Null], [Null, 1])` returns `1`. - -`SELECT hasAny([-128, 1., 512], [1])` returns `1`. - -`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` returns `0`. - -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` returns `1`. - -## indexOf(arr, x) {#indexofarr-x} - -Returns the index of the first ‘x’ element (starting from 1) if it is in the array, or 0 if it is not. - -Example: - -``` sql -SELECT indexOf([1, 3, NULL, NULL], NULL) -``` - -``` text - -┌─indexOf([1, 3, NULL, NULL], NULL)─┐ -│ 3 │ -└───────────────────────────────────┘ -``` - -Elements set to `NULL` are handled as normal values. - -## countEqual(arr, x) {#countequalarr-x} - -Returns the number of elements in the array equal to x. Equivalent to arrayCount (elem -\> elem = x, arr). - -`NULL` elements are handled as separate values. - -Example: - -``` sql -SELECT countEqual([1, 2, NULL, NULL], NULL) -``` - -``` text -┌─countEqual([1, 2, NULL, NULL], NULL)─┐ -│ 2 │ -└──────────────────────────────────────┘ -``` - -## arrayEnumerate(arr) {#array_functions-arrayenumerate} - -Returns the array \[1, 2, 3, …, length (arr) \] - -This function is normally used with ARRAY JOIN. It allows counting something just once for each array after applying ARRAY JOIN. Example: - -``` sql -SELECT - count() AS Reaches, - countIf(num = 1) AS Hits -FROM test.hits -ARRAY JOIN - GoalsReached, - arrayEnumerate(GoalsReached) AS num -WHERE CounterID = 160656 -LIMIT 10 -``` - -``` text -┌─Reaches─┬──Hits─┐ -│ 95606 │ 31406 │ -└─────────┴───────┘ -``` - -In this example, Reaches is the number of conversions (the strings received after applying ARRAY JOIN), and Hits is the number of pageviews (strings before ARRAY JOIN). In this particular case, you can get the same result in an easier way: - -``` sql -SELECT - sum(length(GoalsReached)) AS Reaches, - count() AS Hits -FROM test.hits -WHERE (CounterID = 160656) AND notEmpty(GoalsReached) -``` - -``` text -┌─Reaches─┬──Hits─┐ -│ 95606 │ 31406 │ -└─────────┴───────┘ -``` - -This function can also be used in higher-order functions. For example, you can use it to get array indexes for elements that match a condition. - -## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} - -Returns an array the same size as the source array, indicating for each element what its position is among elements with the same value. -For example: arrayEnumerateUniq(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. - -This function is useful when using ARRAY JOIN and aggregation of array elements. -Example: - -``` sql -SELECT - Goals.ID AS GoalID, - sum(Sign) AS Reaches, - sumIf(Sign, num = 1) AS Visits -FROM test.visits -ARRAY JOIN - Goals, - arrayEnumerateUniq(Goals.ID) AS num -WHERE CounterID = 160656 -GROUP BY GoalID -ORDER BY Reaches DESC -LIMIT 10 -``` - -``` text -┌──GoalID─┬─Reaches─┬─Visits─┐ -│ 53225 │ 3214 │ 1097 │ -│ 2825062 │ 3188 │ 1097 │ -│ 56600 │ 2803 │ 488 │ -│ 1989037 │ 2401 │ 365 │ -│ 2830064 │ 2396 │ 910 │ -│ 1113562 │ 2372 │ 373 │ -│ 3270895 │ 2262 │ 812 │ -│ 1084657 │ 2262 │ 345 │ -│ 56599 │ 2260 │ 799 │ -│ 3271094 │ 2256 │ 812 │ -└─────────┴─────────┴────────┘ -``` - -In this example, each goal ID has a calculation of the number of conversions (each element in the Goals nested data structure is a goal that was reached, which we refer to as a conversion) and the number of sessions. Without ARRAY JOIN, we would have counted the number of sessions as sum(Sign). But in this particular case, the rows were multiplied by the nested Goals structure, so in order to count each session one time after this, we apply a condition to the value of the arrayEnumerateUniq(Goals.ID) function. - -The arrayEnumerateUniq function can take multiple arrays of the same size as arguments. In this case, uniqueness is considered for tuples of elements in the same positions in all the arrays. - -``` sql -SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res -``` - -``` text -┌─res───────────┐ -│ [1,2,1,1,2,1] │ -└───────────────┘ -``` - -This is necessary when using ARRAY JOIN with a nested data structure and further aggregation across multiple elements in this structure. - -## arrayPopBack {#arraypopback} - -Removes the last item from the array. - -``` sql -arrayPopBack(array) -``` - -**Parameters** - -- `array` – Array. - -**Example** - -``` sql -SELECT arrayPopBack([1, 2, 3]) AS res -``` - -``` text -┌─res───┐ -│ [1,2] │ -└───────┘ -``` - -## arrayPopFront {#arraypopfront} - -Removes the first item from the array. - -``` sql -arrayPopFront(array) -``` - -**Parameters** - -- `array` – Array. - -**Example** - -``` sql -SELECT arrayPopFront([1, 2, 3]) AS res -``` - -``` text -┌─res───┐ -│ [2,3] │ -└───────┘ -``` - -## arrayPushBack {#arraypushback} - -Adds one item to the end of the array. - -``` sql -arrayPushBack(array, single_value) -``` - -**Parameters** - -- `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type for the data type of the array. For more information about the types of data in ClickHouse, see “[Data types](../../data_types/index.md#data_types)”. Can be `NULL`. The function adds a `NULL` element to an array, and the type of array elements converts to `Nullable`. - -**Example** - -``` sql -SELECT arrayPushBack(['a'], 'b') AS res -``` - -``` text -┌─res───────┐ -│ ['a','b'] │ -└───────────┘ -``` - -## arrayPushFront {#arraypushfront} - -Adds one element to the beginning of the array. - -``` sql -arrayPushFront(array, single_value) -``` - -**Parameters** - -- `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type for the data type of the array. For more information about the types of data in ClickHouse, see “[Data types](../../data_types/index.md#data_types)”. Can be `NULL`. The function adds a `NULL` element to an array, and the type of array elements converts to `Nullable`. - -**Example** - -``` sql -SELECT arrayPushFront(['b'], 'a') AS res -``` - -``` text -┌─res───────┐ -│ ['a','b'] │ -└───────────┘ -``` - -## arrayResize {#arrayresize} - -Changes the length of the array. - -``` sql -arrayResize(array, size[, extender]) -``` - -**Parameters:** - -- `array` — Array. -- `size` — Required length of the array. - - If `size` is less than the original size of the array, the array is truncated from the right. -- If `size` is larger than the initial size of the array, the array is extended to the right with `extender` values or default values for the data type of the array items. -- `extender` — Value for extending an array. Can be `NULL`. - -**Returned value:** - -An array of length `size`. - -**Examples of calls** - -``` sql -SELECT arrayResize([1], 3) -``` - -``` text -┌─arrayResize([1], 3)─┐ -│ [1,0,0] │ -└─────────────────────┘ -``` - -``` sql -SELECT arrayResize([1], 3, NULL) -``` - -``` text -┌─arrayResize([1], 3, NULL)─┐ -│ [1,NULL,NULL] │ -└───────────────────────────┘ -``` - -## arraySlice {#arrayslice} - -Returns a slice of the array. - -``` sql -arraySlice(array, offset[, length]) -``` - -**Parameters** - -- `array` – Array of data. -- `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. -- `length` - The length of the required slice. If you specify a negative value, the function returns an open slice `[offset, array_length - length)`. If you omit the value, the function returns the slice `[offset, the_end_of_array]`. - -**Example** - -``` sql -SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res -``` - -``` text -┌─res────────┐ -│ [2,NULL,4] │ -└────────────┘ -``` - -Array elements set to `NULL` are handled as normal values. - -## arraySort(\[func,\] arr, …) {#array_functions-sort} - -Sorts the elements of the `arr` array in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the elements of the array. If `func` accepts multiple arguments, the `arraySort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arraySort` description. - -Example of integer values sorting: - -``` sql -SELECT arraySort([1, 3, 3, 0]); -``` - -``` text -┌─arraySort([1, 3, 3, 0])─┐ -│ [0,1,3,3] │ -└─────────────────────────┘ -``` - -Example of string values sorting: - -``` sql -SELECT arraySort(['hello', 'world', '!']); -``` - -``` text -┌─arraySort(['hello', 'world', '!'])─┐ -│ ['!','hello','world'] │ -└────────────────────────────────────┘ -``` - -Consider the following sorting order for the `NULL`, `NaN` and `Inf` values: - -``` sql -SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); -``` - -``` text -┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ -│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ -└───────────────────────────────────────────────────────────┘ -``` - -- `-Inf` values are first in the array. -- `NULL` values are last in the array. -- `NaN` values are right before `NULL`. -- `Inf` values are right before `NaN`. - -Note that `arraySort` is a [higher-order function](higher_order_functions.md). You can pass a lambda function to it as the first argument. In this case, sorting order is determined by the result of the lambda function applied to the elements of the array. - -Let’s consider the following example: - -``` sql -SELECT arraySort((x) -> -x, [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [3,2,1] │ -└─────────┘ -``` - -For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` function sorts the keys in ascending order, the result is \[3, 2, 1\]. Thus, the `(x) –> -x` lambda function sets the [descending order](#array_functions-reverse-sort) in a sorting. - -The lambda function can accept multiple arguments. In this case, you need to pass the `arraySort` function several arrays of identical length that the arguments of lambda function will correspond to. The resulting array will consist of elements from the first input array; elements from the next input array(s) specify the sorting keys. For example: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res────────────────┐ -│ ['world', 'hello'] │ -└────────────────────┘ -``` - -Here, the elements that are passed in the second array (\[2, 1\]) define a sorting key for the corresponding element from the source array (\[‘hello’, ‘world’\]), that is, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn’t use `x`, actual values of the source array don’t affect the order in the result. So, ‘hello’ will be the second element in the result, and ‘world’ will be the first. - -Other examples are shown below. - -``` sql -SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; -``` - -``` text -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -``` sql -SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [2,1,0] │ -└─────────┘ -``` - -!!! note "Note" - To improve sorting efficiency, the [Schwartzian transform](https://en.wikipedia.org/wiki/Schwartzian_transform) is used. - -## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} - -Sorts the elements of the `arr` array in descending order. If the `func` function is specified, `arr` is sorted according to the result of the `func` function applied to the elements of the array, and then the sorted array is reversed. If `func` accepts multiple arguments, the `arrayReverseSort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arrayReverseSort` description. - -Example of integer values sorting: - -``` sql -SELECT arrayReverseSort([1, 3, 3, 0]); -``` - -``` text -┌─arrayReverseSort([1, 3, 3, 0])─┐ -│ [3,3,1,0] │ -└────────────────────────────────┘ -``` - -Example of string values sorting: - -``` sql -SELECT arrayReverseSort(['hello', 'world', '!']); -``` - -``` text -┌─arrayReverseSort(['hello', 'world', '!'])─┐ -│ ['world','hello','!'] │ -└───────────────────────────────────────────┘ -``` - -Consider the following sorting order for the `NULL`, `NaN` and `Inf` values: - -``` sql -SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; -``` - -``` text -┌─res───────────────────────────────────┐ -│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ -└───────────────────────────────────────┘ -``` - -- `Inf` values are first in the array. -- `NULL` values are last in the array. -- `NaN` values are right before `NULL`. -- `-Inf` values are right before `NaN`. - -Note that the `arrayReverseSort` is a [higher-order function](higher_order_functions.md). You can pass a lambda function to it as the first argument. Example is shown below. - -``` sql -SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [1,2,3] │ -└─────────┘ -``` - -The array is sorted in the following way: - -1. At first, the source array (\[1, 2, 3\]) is sorted according to the result of the lambda function applied to the elements of the array. The result is an array \[3, 2, 1\]. -2. Array that is obtained on the previous step, is reversed. So, the final result is \[1, 2, 3\]. - -The lambda function can accept multiple arguments. In this case, you need to pass the `arrayReverseSort` function several arrays of identical length that the arguments of lambda function will correspond to. The resulting array will consist of elements from the first input array; elements from the next input array(s) specify the sorting keys. For example: - -``` sql -SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res───────────────┐ -│ ['hello','world'] │ -└───────────────────┘ -``` - -In this example, the array is sorted in the following way: - -1. At first, the source array (\[‘hello’, ‘world’\]) is sorted according to the result of the lambda function applied to the elements of the arrays. The elements that are passed in the second array (\[2, 1\]), define the sorting keys for corresponding elements from the source array. The result is an array \[‘world’, ‘hello’\]. -2. Array that was sorted on the previous step, is reversed. So, the final result is \[‘hello’, ‘world’\]. - -Other examples are shown below. - -``` sql -SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; -``` - -``` text -┌─res─────┐ -│ [5,3,4] │ -└─────────┘ -``` - -``` sql -SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; -``` - -``` text -┌─res─────┐ -│ [4,3,5] │ -└─────────┘ -``` - -## arrayUniq(arr, …) {#arrayuniqarr} - -If one argument is passed, it counts the number of different elements in the array. -If multiple arguments are passed, it counts the number of different tuples of elements at corresponding positions in multiple arrays. - -If you want to get a list of unique items in an array, you can use arrayReduce(‘groupUniqArray’, arr). - -## arrayJoin(arr) {#array-functions-join} - -A special function. See the section [“ArrayJoin function”](array_join.md#functions_arrayjoin). - -## arrayDifference {#arraydifference} - -Calculates the difference between adjacent array elements. Returns an array where the first element will be 0, the second is the difference between `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). - -**Syntax** - -``` sql -arrayDifference(array) -``` - -**Parameters** - -- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/). - -**Returned values** - -Returns an array of differences between adjacent elements. - -Type: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.yandex/docs/en/data_types/float/). - -**Example** - -Query: - -``` sql -SELECT arrayDifference([1, 2, 3, 4]) -``` - -Result: - -``` text -┌─arrayDifference([1, 2, 3, 4])─┐ -│ [0,1,1,1] │ -└───────────────────────────────┘ -``` - -Example of the overflow due to result type Int64: - -Query: - -``` sql -SELECT arrayDifference([0, 10000000000000000000]) -``` - -Result: - -``` text -┌─arrayDifference([0, 10000000000000000000])─┐ -│ [0,-8446744073709551616] │ -└────────────────────────────────────────────┘ -``` - -## arrayDistinct {#arraydistinct} - -Takes an array, returns an array containing the distinct elements only. - -**Syntax** - -``` sql -arrayDistinct(array) -``` - -**Parameters** - -- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/). - -**Returned values** - -Returns an array containing the distinct elements. - -**Example** - -Query: - -``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) -``` - -Result: - -``` text -┌─arrayDistinct([1, 2, 2, 3, 1])─┐ -│ [1,2,3] │ -└────────────────────────────────┘ -``` - -## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} - -Returns an array of the same size as the source array, indicating where each element first appears in the source array. - -Example: - -``` sql -SELECT arrayEnumerateDense([10, 20, 10, 30]) -``` - -``` text -┌─arrayEnumerateDense([10, 20, 10, 30])─┐ -│ [1,2,1,3] │ -└───────────────────────────────────────┘ -``` - -## arrayIntersect(arr) {#array-functions-arrayintersect} - -Takes multiple arrays, returns an array with elements that are present in all source arrays. Elements order in the resulting array is the same as in the first array. - -Example: - -``` sql -SELECT - arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, - arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect -``` - -``` text -┌─no_intersect─┬─intersect─┐ -│ [] │ [1] │ -└──────────────┴───────────┘ -``` - -## arrayReduce {#arrayreduce} - -Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`. - -**Syntax** - -```sql -arrayReduce(agg_func, arr1, arr2, ..., arrN) -``` - -**Parameters** - -* `agg_func` — The name of an aggregate function which should be a constant [string](../../data_types/string.md). -* `arr` — Any number of [array](../../data_types/array.md) type columns as the parameters of the aggregation function. - -**Returned value** - -**Example** - -```sql -SELECT arrayReduce('max', [1, 2, 3]) -``` - -```text -┌─arrayReduce('max', [1, 2, 3])─┐ -│ 3 │ -└───────────────────────────────┘ -``` - -If an aggregate function takes multiple arguments, then this function must be applied to multiple arrays of the same size. - -```sql -SELECT arrayReduce('maxIf', [3, 5], [1, 0]) -``` - -```text -┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ -│ 3 │ -└──────────────────────────────────────┘ -``` - -Example with a parametric aggregate function: - -```sql -SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) -``` - -```text -┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ -│ 4 │ -└─────────────────────────────────────────────────────────────┘ -``` - -## arrayReduceInRanges {#arrayreduceinranges} - -Applies an aggregate function to array elements in given ranges and returns an array containing the result corresponding to each range. The function will return the same result as multiple `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. - -**Syntax** - -```sql -arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) -``` - -**Parameters** - -* `agg_func` — The name of an aggregate function which should be a constant [string](../../data_types/string.md). -* `ranges` — The ranges to aggretate which should be an [array](../../data_types/array.md) of [tuples](../../data_types/tuple.md) which containing the index and the length of each range. -* `arr` — Any number of [array](../../data_types/array.md) type columns as the parameters of the aggregation function. - -**Returned value** - -**Example** - -```sql -SELECT arrayReduceInRanges( - 'sum', - [(1, 5), (2, 3), (3, 4), (4, 4)], - [1000000, 200000, 30000, 4000, 500, 60, 7] -) AS res -``` - -```text -┌─res─────────────────────────┐ -│ [1234500,234000,34560,4567] │ -└─────────────────────────────┘ -``` - -## arrayReverse(arr) {#arrayreverse} - -Returns an array of the same size as the original array containing the elements in reverse order. - -Example: - -``` sql -SELECT arrayReverse([1, 2, 3]) -``` - -``` text -┌─arrayReverse([1, 2, 3])─┐ -│ [3,2,1] │ -└─────────────────────────┘ -``` - -## reverse(arr) {#array-functions-reverse} - -Synonym for [“arrayReverse”](#array_functions-arrayreverse) - -## arrayFlatten {#arrayflatten} - -Converts an array of arrays to a flat array. - -Function: - -- Applies to any depth of nested arrays. -- Does not change arrays that are already flat. - -The flattened array contains all the elements from all source arrays. - -**Syntax** - -``` sql -flatten(array_of_arrays) -``` - -Alias: `flatten`. - -**Parameters** - -- `array_of_arrays` — [Array](../../data_types/array.md) of arrays. For example, `[[1,2,3], [4,5]]`. - -**Examples** - -``` sql -SELECT flatten([[[1]], [[2], [3]]]) -``` - -``` text -┌─flatten(array(array([1]), array([2], [3])))─┐ -│ [1,2,3] │ -└─────────────────────────────────────────────┘ -``` - -## arrayCompact {#arraycompact} - -Removes consecutive duplicate elements from an array. The order of result values is determined by the order in the source array. - -**Syntax** - -``` sql -arrayCompact(arr) -``` - -**Parameters** - -`arr` — The [array](../../data_types/array.md) to inspect. - -**Returned value** - -The array without duplicate. - -Type: `Array`. - -**Example** - -Query: - -``` sql -SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) -``` - -Result: - -``` text -┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ -│ [1,nan,nan,2,3] │ -└────────────────────────────────────────────┘ -``` - -## arrayZip {#arrayzip} - -Combine multiple Array type columns into one Array\[Tuple(…)\] column - -**Syntax** - -``` sql -arrayZip(arr1, arr2, ..., arrN) -``` - -**Parameters** - -`arr` — Any number of [array](../../data_types/array.md) type columns to combine. - -**Returned value** - -The result of Array\[Tuple(…)\] type after the combination of these arrays - -**Example** - -Query: - -``` sql -SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f']); -``` - -Result: - -``` text -┌─arrayZip(['a', 'b', 'c'], ['d', 'e', 'f'])─┐ -│ [('a','d'),('b','e'),('c','f')] │ -└────────────────────────────────────────────┘ -``` - -## arrayAUC {#arrayauc} -Calculate AUC (Area Under the Curve, which is a concept in machine learning, see more details: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve). - -**Syntax** -```sql -arrayAUC(arr_scores, arr_labels) -``` - -**Parameters** -- `arr_scores` — scores prediction model gives. -- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. - -**Returned value** -Returns AUC value with type Float64. - -**Example** -Query: -```sql -select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) -``` - -Result: - -```text -┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ -│ 0.75 │ -└────────────────────────────────────────-----──┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/fa/query_language/functions/array_join.md b/docs/fa/query_language/functions/array_join.md deleted file mode 100644 index 5bb2c1f446b..00000000000 --- a/docs/fa/query_language/functions/array_join.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -en_copy: true ---- - -# arrayJoin function {#functions_arrayjoin} - -This is a very unusual function. - -Normal functions don’t change a set of rows, but just change the values in each row (map). -Aggregate functions compress a set of rows (fold or reduce). -The ‘arrayJoin’ function takes each row and generates a set of rows (unfold). - -This function takes an array as an argument, and propagates the source row to multiple rows for the number of elements in the array. -All the values in columns are simply copied, except the values in the column where this function is applied; it is replaced with the corresponding array value. - -A query can use multiple `arrayJoin` functions. In this case, the transformation is performed multiple times. - -Note the ARRAY JOIN syntax in the SELECT query, which provides broader possibilities. - -Example: - -``` sql -SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src -``` - -``` text -┌─dst─┬─\'Hello\'─┬─src─────┐ -│ 1 │ Hello │ [1,2,3] │ -│ 2 │ Hello │ [1,2,3] │ -│ 3 │ Hello │ [1,2,3] │ -└─────┴───────────┴─────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/fa/query_language/functions/bit_functions.md b/docs/fa/query_language/functions/bit_functions.md deleted file mode 100644 index 139d5b240e7..00000000000 --- a/docs/fa/query_language/functions/bit_functions.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -en_copy: true ---- - -# Bit functions {#bit-functions} - -Bit functions work for any pair of types from UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, or Float64. - -The result type is an integer with bits equal to the maximum bits of its arguments. If at least one of the arguments is signed, the result is a signed number. If an argument is a floating-point number, it is cast to Int64. - -## bitAnd(a, b) {#bitanda-b} - -## bitOr(a, b) {#bitora-b} - -## bitXor(a, b) {#bitxora-b} - -## bitNot(a) {#bitnota} - -## bitShiftLeft(a, b) {#bitshiftlefta-b} - -## bitShiftRight(a, b) {#bitshiftrighta-b} - -## bitRotateLeft(a, b) {#bitrotatelefta-b} - -## bitRotateRight(a, b) {#bitrotaterighta-b} - -## bitTest {#bittest} - -Takes any integer and converts it into [binary form](https://en.wikipedia.org/wiki/Binary_number), returns the value of a bit at specified position. The countdown starts from 0 from the right to the left. - -**Syntax** - -``` sql -SELECT bitTest(number, index) -``` - -**Parameters** - -- `number` – integer number. -- `index` – position of bit. - -**Returned values** - -Returns a value of bit at specified position. - -Type: `UInt8`. - -**Example** - -For example, the number 43 in base-2 (binary) numeral system is 101011. - -Query: - -``` sql -SELECT bitTest(43, 1) -``` - -Result: - -``` text -┌─bitTest(43, 1)─┐ -│ 1 │ -└────────────────┘ -``` - -Another example: - -Query: - -``` sql -SELECT bitTest(43, 2) -``` - -Result: - -``` text -┌─bitTest(43, 2)─┐ -│ 0 │ -└────────────────┘ -``` - -## bitTestAll {#bittestall} - -Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. The countdown starts from 0 from the right to the left. - -The conjuction for bitwise operations: - -0 AND 0 = 0 - -0 AND 1 = 0 - -1 AND 0 = 0 - -1 AND 1 = 1 - -**Syntax** - -``` sql -SELECT bitTestAll(number, index1, index2, index3, index4, ...) -``` - -**Parameters** - -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) is true if and only if all of its positions are true (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). - -**Returned values** - -Returns result of logical conjuction. - -Type: `UInt8`. - -**Example** - -For example, the number 43 in base-2 (binary) numeral system is 101011. - -Query: - -``` sql -SELECT bitTestAll(43, 0, 1, 3, 5) -``` - -Result: - -``` text -┌─bitTestAll(43, 0, 1, 3, 5)─┐ -│ 1 │ -└────────────────────────────┘ -``` - -Another example: - -Query: - -``` sql -SELECT bitTestAll(43, 0, 1, 3, 5, 2) -``` - -Result: - -``` text -┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ -│ 0 │ -└───────────────────────────────┘ -``` - -## bitTestAny {#bittestany} - -Returns result of [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) (OR operator) of all bits at given positions. The countdown starts from 0 from the right to the left. - -The disjunction for bitwise operations: - -0 OR 0 = 0 - -0 OR 1 = 1 - -1 OR 0 = 1 - -1 OR 1 = 1 - -**Syntax** - -``` sql -SELECT bitTestAny(number, index1, index2, index3, index4, ...) -``` - -**Parameters** - -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. - -**Returned values** - -Returns result of logical disjuction. - -Type: `UInt8`. - -**Example** - -For example, the number 43 in base-2 (binary) numeral system is 101011. - -Query: - -``` sql -SELECT bitTestAny(43, 0, 2) -``` - -Result: - -``` text -┌─bitTestAny(43, 0, 2)─┐ -│ 1 │ -└──────────────────────┘ -``` - -Another example: - -Query: - -``` sql -SELECT bitTestAny(43, 4, 2) -``` - -Result: - -``` text -┌─bitTestAny(43, 4, 2)─┐ -│ 0 │ -└──────────────────────┘ -``` -## bitCount {#bitcount} - -Calculates the number of bits set to one in the binary representation of a number. - -**Syntax** - -```sql -bitCount(x) -``` - -**Parameters** - -- `x` — [Integer](../../data_types/int_uint.md) or [floating-point](../../data_types/float.md) number. The function uses the value representation in memory. It allows supporting floating-point numbers. - -**Returned value** - -- Number of bits set to one in the input number. - -The function doesn't convert input value to a larger type ([sign extension](https://en.wikipedia.org/wiki/Sign_extension)). So, for example, `bitCount(toUInt8(-1)) = 8`. - -Type: `UInt8`. - -**Example** - -Take for example the number 333. Its binary representation: 0000000101001101. - -Query: - -```sql -SELECT bitCount(333) -``` - -Result: - -```text -┌─bitCount(333)─┐ -│ 5 │ -└───────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/fa/query_language/functions/bitmap_functions.md b/docs/fa/query_language/functions/bitmap_functions.md deleted file mode 100644 index 2a157652847..00000000000 --- a/docs/fa/query_language/functions/bitmap_functions.md +++ /dev/null @@ -1,493 +0,0 @@ ---- -en_copy: true ---- - -# Bitmap functions {#bitmap-functions} - -Bitmap functions work for two bitmaps Object value calculation, it is to return new bitmap or cardinality while using formula calculation, such as and, or, xor, and not, etc. - -There are 2 kinds of construction methods for Bitmap Object. One is to be constructed by aggregation function groupBitmap with -State, the other is to be constructed by Array Object. It is also to convert Bitmap Object to Array Object. - -RoaringBitmap is wrapped into a data structure while actual storage of Bitmap objects. When the cardinality is less than or equal to 32, it uses Set objet. When the cardinality is greater than 32, it uses RoaringBitmap object. That is why storage of low cardinality set is faster. - -For more information on RoaringBitmap, see: [CRoaring](https://github.com/RoaringBitmap/CRoaring). - -## bitmapBuild {#bitmap_functions-bitmapbuild} - -Build a bitmap from unsigned integer array. - -``` sql -bitmapBuild(array) -``` - -**Parameters** - -- `array` – unsigned integer array. - -**Example** - -``` sql -SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) -``` - -``` text -┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ -│  │ AggregateFunction(groupBitmap, UInt8) │ -└─────┴──────────────────────────────────────────────┘ -``` - -## bitmapToArray {#bitmaptoarray} - -Convert bitmap to integer array. - -``` sql -bitmapToArray(bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - -``` text -┌─res─────────┐ -│ [1,2,3,4,5] │ -└─────────────┘ -``` - -## bitmapSubsetInRange {#bitmap-functions-bitmapsubsetinrange} - -Return subset in specified range (not include the range\_end). - -``` sql -bitmapSubsetInRange(bitmap, range_start, range_end) -``` - -**Parameters** - -- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – range start point. Type: [UInt32](../../data_types/int_uint.md). -- `range_end` – range end point(excluded). Type: [UInt32](../../data_types/int_uint.md). - -**Example** - -``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - -``` text -┌─res───────────────┐ -│ [30,31,32,33,100] │ -└───────────────────┘ -``` - -## bitmapSubsetLimit {#bitmapsubsetlimit} - -Creates a subset of bitmap with n elements taken between `range_start` and `cardinality_limit`. - -**Syntax** - -``` sql -bitmapSubsetLimit(bitmap, range_start, cardinality_limit) -``` - -**Parameters** - -- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – The subset starting point. Type: [UInt32](../../data_types/int_uint.md). -- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../data_types/int_uint.md). - -**Returned value** - -The subset. - -Type: `Bitmap object`. - -**Example** - -Query: - -``` sql -SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res -``` - -Result: - -``` text -┌─res───────────────────────┐ -│ [30,31,32,33,100,200,500] │ -└───────────────────────────┘ -``` - -## bitmapContains {#bitmap_functions-bitmapcontains} - -Checks whether the bitmap contains an element. - -``` sql -bitmapContains(haystack, needle) -``` - -**Parameters** - -- `haystack` – [Bitmap object](#bitmap_functions-bitmapbuild), where the function searches. -- `needle` – Value that the function searches. Type: [UInt32](../../data_types/int_uint.md). - -**Returned values** - -- 0 — If `haystack` doesn’t contain `needle`. -- 1 — If `haystack` contains `needle`. - -Type: `UInt8`. - -**Example** - -``` sql -SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapHasAny {#bitmaphasany} - -Checks whether two bitmaps have intersection by some elements. - -``` sql -bitmapHasAny(bitmap1, bitmap2) -``` - -If you are sure that `bitmap2` contains strictly one element, consider using the [bitmapContains](#bitmap_functions-bitmapcontains) function. It works more efficiently. - -**Parameters** - -- `bitmap*` – bitmap object. - -**Return values** - -- `1`, if `bitmap1` and `bitmap2` have one similar element at least. -- `0`, otherwise. - -**Example** - -``` sql -SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapHasAll {#bitmaphasall} - -Analogous to `hasAll(array, array)` returns 1 if the first bitmap contains all the elements of the second one, 0 otherwise. -If the second argument is an empty bitmap then returns 1. - -``` sql -bitmapHasAll(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res -``` - -``` text -┌─res─┐ -│ 0 │ -└─────┘ -``` - -## bitmapCardinality {#bitmapcardinality} - -Retrun bitmap cardinality of type UInt64. - -``` sql -bitmapCardinality(bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - -``` text -┌─res─┐ -│ 5 │ -└─────┘ -``` - -## bitmapMin {#bitmapmin} - -Retrun the smallest value of type UInt64 in the set, UINT32\_MAX if the set is empty. - - bitmapMin(bitmap) - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 1 │ - └─────┘ - -## bitmapMax {#bitmapmax} - -Retrun the greatest value of type UInt64 in the set, 0 if the set is empty. - - bitmapMax(bitmap) - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res -``` - - ┌─res─┐ - │ 5 │ - └─────┘ - -## bitmapTransform {#bitmaptransform} - -Transform an array of values in a bitmap to another array of values, the result is a new bitmap. - - bitmapTransform(bitmap, from_array, to_array) - -**Parameters** - -- `bitmap` – bitmap object. -- `from_array` – UInt32 array. For idx in range \[0, from\_array.size()), if bitmap contains from\_array\[idx\], then replace it with to\_array\[idx\]. Note that the result depends on array ordering if there are common elements between from\_array and to\_array. -- `to_array` – UInt32 array, its size shall be the same to from\_array. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res -``` - - ┌─res───────────────────┐ - │ [1,3,4,6,7,8,9,10,20] │ - └───────────────────────┘ - -## bitmapAnd {#bitmapand} - -Two bitmap and calculation, the result is a new bitmap. - -``` sql -bitmapAnd(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res─┐ -│ [3] │ -└─────┘ -``` - -## bitmapOr {#bitmapor} - -Two bitmap or calculation, the result is a new bitmap. - -``` sql -bitmapOr(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res─────────┐ -│ [1,2,3,4,5] │ -└─────────────┘ -``` - -## bitmapXor {#bitmapxor} - -Two bitmap xor calculation, the result is a new bitmap. - -``` sql -bitmapXor(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res───────┐ -│ [1,2,4,5] │ -└───────────┘ -``` - -## bitmapAndnot {#bitmapandnot} - -Two bitmap andnot calculation, the result is a new bitmap. - -``` sql -bitmapAndnot(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res -``` - -``` text -┌─res───┐ -│ [1,2] │ -└───────┘ -``` - -## bitmapAndCardinality {#bitmapandcardinality} - -Two bitmap and calculation, return cardinality of type UInt64. - -``` sql -bitmapAndCardinality(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## bitmapOrCardinality {#bitmaporcardinality} - -Two bitmap or calculation, return cardinality of type UInt64. - -``` sql -bitmapOrCardinality(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 5 │ -└─────┘ -``` - -## bitmapXorCardinality {#bitmapxorcardinality} - -Two bitmap xor calculation, return cardinality of type UInt64. - -``` sql -bitmapXorCardinality(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 4 │ -└─────┘ -``` - -## bitmapAndnotCardinality {#bitmapandnotcardinality} - -Two bitmap andnot calculation, return cardinality of type UInt64. - -``` sql -bitmapAndnotCardinality(bitmap,bitmap) -``` - -**Parameters** - -- `bitmap` – bitmap object. - -**Example** - -``` sql -SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; -``` - -``` text -┌─res─┐ -│ 2 │ -└─────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/fa/query_language/functions/comparison_functions.md b/docs/fa/query_language/functions/comparison_functions.md deleted file mode 100644 index 8dd01d0b107..00000000000 --- a/docs/fa/query_language/functions/comparison_functions.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -en_copy: true ---- - -# Comparison functions {#comparison-functions} - -Comparison functions always return 0 or 1 (Uint8). - -The following types can be compared: - -- numbers -- strings and fixed strings -- dates -- dates with times - -within each group, but not between different groups. - -For example, you can’t compare a date with a string. You have to use a function to convert the string to a date, or vice versa. - -Strings are compared by bytes. A shorter string is smaller than all strings that start with it and that contain at least one more character. - -## equals, a = b and a == b operator {#function-equals} - -## notEquals, a ! operator= b and a <> b {#function-notequals} - -## less, < operator {#function-less} - -## greater, > operator {#function-greater} - -## lessOrEquals, <= operator {#function-lessorequals} - -## greaterOrEquals, >= operator {#function-greaterorequals} - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/fa/query_language/functions/conditional_functions.md b/docs/fa/query_language/functions/conditional_functions.md deleted file mode 100644 index 0f11f1cd622..00000000000 --- a/docs/fa/query_language/functions/conditional_functions.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -en_copy: true ---- - -# Conditional functions {#conditional-functions} - -## if {#if} - -Controls conditional branching. Unlike most systems, ClickHouse always evaluate both expressions `then` and `else`. - -**Syntax** - -``` sql -SELECT if(cond, then, else) -``` - -If the condition `cond` evaluates to a non-zero value, returns the result of the expression `then`, and the result of the expression `else`, if present, is skipped. If the `cond` is zero or `NULL`, then the result of the `then` expression is skipped and the result of the `else` expression, if present, is returned. - -**Parameters** - -- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. -- `then` - The expression to return if condition is met. -- `else` - The expression to return if condition is not met. - -**Returned values** - -The function executes `then` and `else` expressions and returns its result, depending on whether the condition `cond` ended up being zero or not. - -**Example** - -Query: - -``` sql -SELECT if(1, plus(2, 2), plus(2, 6)) -``` - -Result: - -``` text -┌─plus(2, 2)─┐ -│ 4 │ -└────────────┘ -``` - -Query: - -``` sql -SELECT if(0, plus(2, 2), plus(2, 6)) -``` - -Result: - -``` text -┌─plus(2, 6)─┐ -│ 8 │ -└────────────┘ -``` - -- `then` and `else` must have the lowest common type. - -**Example:** - -Take this `LEFT_RIGHT` table: - -``` sql -SELECT * -FROM LEFT_RIGHT - -┌─left─┬─right─┐ -│ ᴺᵁᴸᴸ │ 4 │ -│ 1 │ 3 │ -│ 2 │ 2 │ -│ 3 │ 1 │ -│ 4 │ ᴺᵁᴸᴸ │ -└──────┴───────┘ -``` - -The following query compares `left` and `right` values: - -``` sql -SELECT - left, - right, - if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller -FROM LEFT_RIGHT -WHERE isNotNull(left) AND isNotNull(right) - -┌─left─┬─right─┬─is_smaller──────────────────────────┐ -│ 1 │ 3 │ left is smaller than right │ -│ 2 │ 2 │ right is greater or equal than left │ -│ 3 │ 1 │ right is greater or equal than left │ -└──────┴───────┴─────────────────────────────────────┘ -``` - -Note: `NULL` values are not used in this example, check [NULL values in conditionals](#null-values-in-conditionals) section. - -## Ternary Operator {#ternary-operator} - -It works same as `if` function. - -Syntax: `cond ? then : else` - -Returns `then` if the `cond` evaluates to be true (greater than zero), otherwise returns `else`. - -- `cond` must be of type of `UInt8`, and `then` and `else` must have the lowest common type. - -- `then` and `else` can be `NULL` - -**See also** - -- [ifNotFinite](other_functions.md#ifnotfinite). - -## multiIf {#multiif} - -Allows you to write the [CASE](../operators.md#operator_case) operator more compactly in the query. - -Syntax: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` - -**Parameters:** - -- `cond_N` — The condition for the function to return `then_N`. -- `then_N` — The result of the function when executed. -- `else` — The result of the function if none of the conditions is met. - -The function accepts `2N+1` parameters. - -**Returned values** - -The function returns one of the values `then_N` or `else`, depending on the conditions `cond_N`. - -**Example** - -Again using `LEFT_RIGHT` table. - -``` sql -SELECT - left, - right, - multiIf(left < right, 'left is smaller', left > right, 'left is greater', left = right, 'Both equal', 'Null value') AS result -FROM LEFT_RIGHT - -┌─left─┬─right─┬─result──────────┐ -│ ᴺᵁᴸᴸ │ 4 │ Null value │ -│ 1 │ 3 │ left is smaller │ -│ 2 │ 2 │ Both equal │ -│ 3 │ 1 │ left is greater │ -│ 4 │ ᴺᵁᴸᴸ │ Null value │ -└──────┴───────┴─────────────────┘ -``` - -## Using conditional results directly {#using-conditional-results-directly} - -Conditionals always result to `0`, `1` or `NULL`. So you can use conditional results directly like this: - -``` sql -SELECT left < right AS is_small -FROM LEFT_RIGHT - -┌─is_small─┐ -│ ᴺᵁᴸᴸ │ -│ 1 │ -│ 0 │ -│ 0 │ -│ ᴺᵁᴸᴸ │ -└──────────┘ -``` - -## NULL values in conditionals {#null-values-in-conditionals} - -When `NULL` values are involved in conditionals, the result will also be `NULL`. - -``` sql -SELECT - NULL < 1, - 2 < NULL, - NULL < NULL, - NULL = NULL - -┌─less(NULL, 1)─┬─less(2, NULL)─┬─less(NULL, NULL)─┬─equals(NULL, NULL)─┐ -│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -└───────────────┴───────────────┴──────────────────┴────────────────────┘ -``` - -So you should construct your queries carefully if the types are `Nullable`. - -The following example demonstrates this by failing to add equals condition to `multiIf`. - -``` sql -SELECT - left, - right, - multiIf(left < right, 'left is smaller', left > right, 'right is smaller', 'Both equal') AS faulty_result -FROM LEFT_RIGHT - -┌─left─┬─right─┬─faulty_result────┐ -│ ᴺᵁᴸᴸ │ 4 │ Both equal │ -│ 1 │ 3 │ left is smaller │ -│ 2 │ 2 │ Both equal │ -│ 3 │ 1 │ right is smaller │ -│ 4 │ ᴺᵁᴸᴸ │ Both equal │ -└──────┴───────┴──────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/fa/query_language/functions/date_time_functions.md b/docs/fa/query_language/functions/date_time_functions.md deleted file mode 100644 index 4cff590034e..00000000000 --- a/docs/fa/query_language/functions/date_time_functions.md +++ /dev/null @@ -1,447 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with dates and times {#functions-for-working-with-dates-and-times} - -Support for time zones - -All functions for working with the date and time that have a logical use for the time zone can accept a second optional time zone argument. Example: Asia/Yekaterinburg. In this case, they use the specified time zone instead of the local (default) one. - -``` sql -SELECT - toDateTime('2016-06-15 23:00:00') AS time, - toDate(time) AS date_local, - toDate(time, 'Asia/Yekaterinburg') AS date_yekat, - toString(time, 'US/Samoa') AS time_samoa -``` - -``` text -┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ -│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ -└─────────────────────┴────────────┴────────────┴─────────────────────┘ -``` - -Only time zones that differ from UTC by a whole number of hours are supported. - -## toTimeZone {#totimezone} - -Convert time or date and time to the specified time zone. - -## toYear {#toyear} - -Converts a date or date with time to a UInt16 number containing the year number (AD). - -## toQuarter {#toquarter} - -Converts a date or date with time to a UInt8 number containing the quarter number. - -## toMonth {#tomonth} - -Converts a date or date with time to a UInt8 number containing the month number (1-12). - -## toDayOfYear {#todayofyear} - -Converts a date or date with time to a UInt16 number containing the number of the day of the year (1-366). - -## toDayOfMonth {#todayofmonth} - -Converts a date or date with time to a UInt8 number containing the number of the day of the month (1-31). - -## toDayOfWeek {#todayofweek} - -Converts a date or date with time to a UInt8 number containing the number of the day of the week (Monday is 1, and Sunday is 7). - -## toHour {#tohour} - -Converts a date with time to a UInt8 number containing the number of the hour in 24-hour time (0-23). -This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). - -## toMinute {#tominute} - -Converts a date with time to a UInt8 number containing the number of the minute of the hour (0-59). - -## toSecond {#tosecond} - -Converts a date with time to a UInt8 number containing the number of the second in the minute (0-59). -Leap seconds are not accounted for. - -## toUnixTimestamp {#to-unix-timestamp} - -For DateTime argument: converts value to its internal numeric representation (Unix Timestamp). -For String argument: parse datetime from string according to the timezone (optional second argument, server timezone is used by default) and returns the corresponding unix timestamp. -For Date argument: the behaviour is unspecified. - -**Syntax** - -``` sql -toUnixTimestamp(datetime) -toUnixTimestamp(str, [timezone]) -``` - -**Returned value** - -- Returns the unix timestamp. - -Type: `UInt32`. - -**Example** - -Query: - -``` sql -SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp -``` - -Result: - -``` text -┌─unix_timestamp─┐ -│ 1509836867 │ -└────────────────┘ -``` - -## toStartOfYear {#tostartofyear} - -Rounds down a date or date with time to the first day of the year. -Returns the date. - -## toStartOfISOYear {#tostartofisoyear} - -Rounds down a date or date with time to the first day of ISO year. -Returns the date. - -## toStartOfQuarter {#tostartofquarter} - -Rounds down a date or date with time to the first day of the quarter. -The first day of the quarter is either 1 January, 1 April, 1 July, or 1 October. -Returns the date. - -## toStartOfMonth {#tostartofmonth} - -Rounds down a date or date with time to the first day of the month. -Returns the date. - -!!! attention "Attention" - The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow. - -## toMonday {#tomonday} - -Rounds down a date or date with time to the nearest Monday. -Returns the date. - -## toStartOfWeek(t\[,mode\]) {#tostartofweektmode} - -Rounds down a date or date with time to the nearest Sunday or Monday by mode. -Returns the date. -The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. - -## toStartOfDay {#tostartofday} - -Rounds down a date with time to the start of the day. - -## toStartOfHour {#tostartofhour} - -Rounds down a date with time to the start of the hour. - -## toStartOfMinute {#tostartofminute} - -Rounds down a date with time to the start of the minute. - -## toStartOfFiveMinute {#tostartoffiveminute} - -Rounds down a date with time to the start of the five-minute interval. - -## toStartOfTenMinutes {#tostartoftenminutes} - -Rounds down a date with time to the start of the ten-minute interval. - -## toStartOfFifteenMinutes {#tostartoffifteenminutes} - -Rounds down the date with time to the start of the fifteen-minute interval. - -## toStartOfInterval(time\_or\_data, INTERVAL x unit \[, time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} - -This is a generalization of other functions named `toStartOf*`. For example, -`toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`, -`toStartOfInterval(t, INTERVAL 1 month)` returns the same as `toStartOfMonth(t)`, -`toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`, -`toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc. - -## toTime {#totime} - -Converts a date with time to a certain fixed date, while preserving the time. - -## toRelativeYearNum {#torelativeyearnum} - -Converts a date with time or date to the number of the year, starting from a certain fixed point in the past. - -## toRelativeQuarterNum {#torelativequarternum} - -Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past. - -## toRelativeMonthNum {#torelativemonthnum} - -Converts a date with time or date to the number of the month, starting from a certain fixed point in the past. - -## toRelativeWeekNum {#torelativeweeknum} - -Converts a date with time or date to the number of the week, starting from a certain fixed point in the past. - -## toRelativeDayNum {#torelativedaynum} - -Converts a date with time or date to the number of the day, starting from a certain fixed point in the past. - -## toRelativeHourNum {#torelativehournum} - -Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past. - -## toRelativeMinuteNum {#torelativeminutenum} - -Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past. - -## toRelativeSecondNum {#torelativesecondnum} - -Converts a date with time or date to the number of the second, starting from a certain fixed point in the past. - -## toISOYear {#toisoyear} - -Converts a date or date with time to a UInt16 number containing the ISO Year number. - -## toISOWeek {#toisoweek} - -Converts a date or date with time to a UInt8 number containing the ISO Week number. - -## toWeek(date\[,mode\]) {#toweekdatemode} - -This function returns the week number for date or datetime. The two-argument form of toWeek() enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0. -`toISOWeek()`is a compatibility function that is equivalent to `toWeek(date,3)`. -The following table describes how the mode argument works. - -| Mode | First day of week | Range | Week 1 is the first week … | -|------|-------------------|-------|-------------------------------| -| 0 | Sunday | 0-53 | with a Sunday in this year | -| 1 | Monday | 0-53 | with 4 or more days this year | -| 2 | Sunday | 1-53 | with a Sunday in this year | -| 3 | Monday | 1-53 | with 4 or more days this year | -| 4 | Sunday | 0-53 | with 4 or more days this year | -| 5 | Monday | 0-53 | with a Monday in this year | -| 6 | Sunday | 1-53 | with 4 or more days this year | -| 7 | Monday | 1-53 | with a Monday in this year | -| 8 | Sunday | 1-53 | contains January 1 | -| 9 | Monday | 1-53 | contains January 1 | - -For mode values with a meaning of “with 4 or more days this year,” weeks are numbered according to ISO 8601:1988: - -- If the week containing January 1 has 4 or more days in the new year, it is week 1. - -- Otherwise, it is the last week of the previous year, and the next week is week 1. - -For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It doesn’t matter how many days in the new year the week contained, even if it contained only one day. - -``` sql -toWeek(date, [, mode][, Timezone]) -``` - -**Parameters** - -- `date` – Date or DateTime. -- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. -- `Timezone` – Optional parameter, it behaves like any other conversion function. - -**Example** - -``` sql -SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9; -``` - -``` text -┌───────date─┬─week0─┬─week1─┬─week9─┐ -│ 2016-12-27 │ 52 │ 52 │ 1 │ -└────────────┴───────┴───────┴───────┘ -``` - -## toYearWeek(date\[,mode\]) {#toyearweekdatemode} - -Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year. - -The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. - -`toISOYear()`is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`. - -**Example** - -``` sql -SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; -``` - -``` text -┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ -│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ -└────────────┴───────────┴───────────┴───────────┘ -``` - -## now {#now} - -Accepts zero arguments and returns the current time at one of the moments of request execution. -This function returns a constant, even if the request took a long time to complete. - -## today {#today} - -Accepts zero arguments and returns the current date at one of the moments of request execution. -The same as ‘toDate(now())’. - -## yesterday {#yesterday} - -Accepts zero arguments and returns yesterday’s date at one of the moments of request execution. -The same as ‘today() - 1’. - -## timeSlot {#timeslot} - -Rounds the time to the half hour. -This function is specific to Yandex.Metrica, since half an hour is the minimum amount of time for breaking a session into two sessions if a tracking tag shows a single user’s consecutive pageviews that differ in time by strictly more than this amount. This means that tuples (the tag ID, user ID, and time slot) can be used to search for pageviews that are included in the corresponding session. - -## toYYYYMM {#toyyyymm} - -Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 100 + MM). - -## toYYYYMMDD {#toyyyymmdd} - -Converts a date or date with time to a UInt32 number containing the year and month number (YYYY \* 10000 + MM \* 100 + DD). - -## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} - -Converts a date or date with time to a UInt64 number containing the year and month number (YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss). - -## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} - -Function adds a Date/DateTime interval to a Date/DateTime and then return the Date/DateTime. For example: - -``` sql -WITH - toDate('2018-01-01') AS date, - toDateTime('2018-01-01 00:00:00') AS date_time -SELECT - addYears(date, 1) AS add_years_with_date, - addYears(date_time, 1) AS add_years_with_date_time -``` - -``` text -┌─add_years_with_date─┬─add_years_with_date_time─┐ -│ 2019-01-01 │ 2019-01-01 00:00:00 │ -└─────────────────────┴──────────────────────────┘ -``` - -## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} - -Function subtract a Date/DateTime interval to a Date/DateTime and then return the Date/DateTime. For example: - -``` sql -WITH - toDate('2019-01-01') AS date, - toDateTime('2019-01-01 00:00:00') AS date_time -SELECT - subtractYears(date, 1) AS subtract_years_with_date, - subtractYears(date_time, 1) AS subtract_years_with_date_time -``` - -``` text -┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ -│ 2018-01-01 │ 2018-01-01 00:00:00 │ -└──────────────────────────┴───────────────────────────────┘ -``` - -## dateDiff {#datediff} - -Returns the difference between two Date or DateTime values. - -**Syntax** - -``` sql -dateDiff('unit', startdate, enddate, [timezone]) -``` - -**Parameters** - -- `unit` — Time unit, in which the returned value is expressed. [String](../syntax.md#syntax-string-literal). - - Supported values: - - | unit | - | ------ | - |second | - |minute | - |hour | - |day | - |week | - |month | - |quarter | - |year | - -- `startdate` — The first time value to compare. [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -- `enddate` — The second time value to compare. [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -- `timezone` — Optional parameter. If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. - -**Returned value** - -Difference between `startdate` and `enddate` expressed in `unit`. - -Type: `int`. - -**Example** - -Query: - -``` sql -SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); -``` - -Result: - -``` text -┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐ -│ 25 │ -└────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} - -For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter: a constant UInt32, set to 1800 by default. -For example, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. -This is necessary for searching for pageviews in the corresponding session. - -## formatDateTime(Time, Format\[, Timezone\]) {#formatdatetime} - -Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. - -Supported modifiers for Format: -(“Example” column shows formatting result for time `2018-01-02 22:33:44`) - -| Modifier | Description | Example | -|----------|---------------------------------------------------------|------------| -| %C | year divided by 100 and truncated to integer (00-99) | 20 | -| %d | day of the month, zero-padded (01-31) | 02 | -| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/18 | -| %e | day of the month, space-padded ( 1-31) | 2 | -| %F | short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2018-01-02 | -| %H | hour in 24h format (00-23) | 22 | -| %I | hour in 12h format (01-12) | 10 | -| %j | day of the year (001-366) | 002 | -| %m | month as a decimal number (01-12) | 01 | -| %M | minute (00-59) | 33 | -| %n | new-line character (‘’) | | -| %p | AM or PM designation | PM | -| %R | 24-hour HH:MM time, equivalent to %H:%M | 22:33 | -| %S | second (00-59) | 44 | -| %t | horizontal-tab character (’) | | -| %T | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 22:33:44 | -| %u | ISO 8601 weekday as number with Monday as 1 (1-7) | 2 | -| %V | ISO 8601 week number (01-53) | 01 | -| %w | weekday as a decimal number with Sunday as 0 (0-6) | 2 | -| %y | Year, last two digits (00-99) | 18 | -| %Y | Year | 2018 | -| %% | a % sign | % | - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/fa/query_language/functions/encoding_functions.md b/docs/fa/query_language/functions/encoding_functions.md deleted file mode 100644 index c9143398827..00000000000 --- a/docs/fa/query_language/functions/encoding_functions.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -en_copy: true ---- - -# Encoding functions {#encoding-functions} - -## char {#char} - -Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow. - -**Syntax** - -``` sql -char(number_1, [number_2, ..., number_n]); -``` - -**Parameters** - -- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../data_types/int_uint.md), [Float](../../data_types/float.md). - -**Returned value** - -- a string of given bytes. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello -``` - -Result: - -``` text -┌─hello─┐ -│ hello │ -└───────┘ -``` - -You can construct a string of arbitrary encoding by passing the corresponding bytes. Here is example for UTF-8: - -Query: - -``` sql -SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; -``` - -Result: - -``` text -┌─hello──┐ -│ привет │ -└────────┘ -``` - -Query: - -``` sql -SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; -``` - -Result: - -``` text -┌─hello─┐ -│ 你好 │ -└───────┘ -``` - -## hex {#hex} - -Returns a string containing the argument’s hexadecimal representation. - -**Syntax** - -``` sql -hex(arg) -``` - -The function is using uppercase letters `A-F` and not using any prefixes (like `0x`) or suffixes (like `h`). - -For integer arguments, it prints hex digits (“nibbles”) from the most significant to least significant (big endian or “human readable” order). It starts with the most significant non-zero byte (leading zero bytes are omitted) but always prints both digits of every byte even if leading digit is zero. - -Example: - -**Example** - -Query: - -``` sql -SELECT hex(1); -``` - -Result: - -``` text -01 -``` - -Values of type `Date` and `DateTime` are formatted as corresponding integers (the number of days since Epoch for Date and the value of Unix Timestamp for DateTime). - -For `String` and `FixedString`, all bytes are simply encoded as two hexadecimal numbers. Zero bytes are not omitted. - -Values of floating point and Decimal types are encoded as their representation in memory. As we support little endian architecture, they are encoded in little endian. Zero leading/trailing bytes are not omitted. - -**Parameters** - -- `arg` — A value to convert to hexadecimal. Types: [String](../../data_types/string.md), [UInt](../../data_types/int_uint.md), [Float](../../data_types/float.md), [Decimal](../../data_types/decimal.md), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- A string with the hexadecimal representation of the argument. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2); -``` - -Result: - -``` text -┌─hex_presentation─┐ -│ 00007041 │ -│ 00008041 │ -└──────────────────┘ -``` - -Query: - -``` sql -SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2); -``` - -Result: - -``` text -┌─hex_presentation─┐ -│ 0000000000002E40 │ -│ 0000000000003040 │ -└──────────────────┘ -``` - -## unhex(str) {#unhexstr} - -Accepts a string containing any number of hexadecimal digits, and returns a string containing the corresponding bytes. Supports both uppercase and lowercase letters A-F. The number of hexadecimal digits does not have to be even. If it is odd, the last digit is interpreted as the least significant half of the 00-0F byte. If the argument string contains anything other than hexadecimal digits, some implementation-defined result is returned (an exception isn’t thrown). -If you want to convert the result to a number, you can use the ‘reverse’ and ‘reinterpretAsType’ functions. - -## UUIDStringToNum(str) {#uuidstringtonumstr} - -Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16). - -## UUIDNumToString(str) {#uuidnumtostringstr} - -Accepts a FixedString(16) value. Returns a string containing 36 characters in text format. - -## bitmaskToList(num) {#bitmasktolistnum} - -Accepts an integer. Returns a string containing the list of powers of two that total the source number when summed. They are comma-separated without spaces in text format, in ascending order. - -## bitmaskToArray(num) {#bitmasktoarraynum} - -Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/fa/query_language/functions/ext_dict_functions.md b/docs/fa/query_language/functions/ext_dict_functions.md deleted file mode 100644 index 9b4d256104a..00000000000 --- a/docs/fa/query_language/functions/ext_dict_functions.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -en_copy: true ---- - -# Functions for Working with External Dictionaries {#ext_dict_functions} - -For information on connecting and configuring external dictionaries, see [External dictionaries](../dicts/external_dicts.md). - -## dictGet {#dictget} - -Retrieves a value from an external dictionary. - -``` sql -dictGet('dict_name', 'attr_name', id_expr) -dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) -``` - -**Parameters** - -- `dict_name` — Name of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md) or [Tuple](../../data_types/tuple.md)-type value depending on the dictionary configuration. -- `default_value_expr` — Value returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../syntax.md#syntax-expressions) returning the value in the data type configured for the `attr_name` attribute. - -**Returned value** - -- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. - -- If there is no the key, corresponding to `id_expr`, in the dictionary, then: - - - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. - - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. - -ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. - -**Example** - -Create a text file `ext-dict-text.csv` containing the following: - -``` text -1,1 -2,2 -``` - -The first column is `id`, the second column is `c1`. - -Configure the external dictionary: - -``` xml - - - ext-dict-test - - - /path-to/ext-dict-test.csv - CSV - - - - - - - - id - - - c1 - UInt32 - - - - 0 - - -``` - -Perform the query: - -``` sql -SELECT - dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val, - toTypeName(val) AS type -FROM system.numbers -LIMIT 3 -``` - -``` text -┌─val─┬─type───┐ -│ 1 │ UInt32 │ -│ 2 │ UInt32 │ -│ 20 │ UInt32 │ -└─────┴────────┘ -``` - -**See Also** - -- [External Dictionaries](../dicts/external_dicts.md) - -## dictHas {#dicthas} - -Checks whether a key is present in a dictionary. - -``` sql -dictHas('dict_name', id_expr) -``` - -**Parameters** - -- `dict_name` — Name of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md)-type value. - -**Returned value** - -- 0, if there is no key. -- 1, if there is a key. - -Type: `UInt8`. - -## dictGetHierarchy {#dictgethierarchy} - -Creates an array, containing all the parents of a key in the [hierarchical dictionary](../dicts/external_dicts_dict_hierarchical.md). - -**Syntax** - -``` sql -dictGetHierarchy('dict_name', key) -``` - -**Parameters** - -- `dict_name` — Name of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `key` — Key value. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md)-type value. - -**Returned value** - -- Parents for the key. - -Type: [Array(UInt64)](../../data_types/array.md). - -## dictIsIn {#dictisin} - -Checks the ancestor of a key through the whole hierarchical chain in the dictionary. - -``` sql -dictIsIn('dict_name', child_id_expr, ancestor_id_expr) -``` - -**Parameters** - -- `dict_name` — Name of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `child_id_expr` — Key to be checked. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md)-type value. -- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` key. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md)-type value. - -**Returned value** - -- 0, if `child_id_expr` is not a child of `ancestor_id_expr`. -- 1, if `child_id_expr` is a child of `ancestor_id_expr` or if `child_id_expr` is an `ancestor_id_expr`. - -Type: `UInt8`. - -## Other functions {#ext_dict_functions-other} - -ClickHouse supports specialized functions that convert dictionary attribute values to a specific data type regardless of the dictionary configuration. - -Functions: - -- `dictGetInt8`, `dictGetInt16`, `dictGetInt32`, `dictGetInt64` -- `dictGetUInt8`, `dictGetUInt16`, `dictGetUInt32`, `dictGetUInt64` -- `dictGetFloat32`, `dictGetFloat64` -- `dictGetDate` -- `dictGetDateTime` -- `dictGetUUID` -- `dictGetString` - -All these functions have the `OrDefault` modification. For example, `dictGetDateOrDefault`. - -Syntax: - -``` sql -dictGet[Type]('dict_name', 'attr_name', id_expr) -dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) -``` - -**Parameters** - -- `dict_name` — Name of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `attr_name` — Name of the column of the dictionary. [String literal](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) returning a [UInt64](../../data_types/int_uint.md)-type value. -- `default_value_expr` — Value which is returned if the dictionary doesn’t contain a row with the `id_expr` key. [Expression](../syntax.md#syntax-expressions) returning a value in the data type configured for the `attr_name` attribute. - -**Returned value** - -- If ClickHouse parses the attribute successfully in the [attribute’s data type](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), functions return the value of the dictionary attribute that corresponds to `id_expr`. - -- If there is no requested `id_expr` in the dictionary then: - - - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. - - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. - -ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/fa/query_language/functions/geo.md b/docs/fa/query_language/functions/geo.md deleted file mode 100644 index d82b4ccb66a..00000000000 --- a/docs/fa/query_language/functions/geo.md +++ /dev/null @@ -1,507 +0,0 @@ ---- -en_copy: true ---- - -# Functions for Working with Geographical Coordinates {#functions-for-working-with-geographical-coordinates} - -## greatCircleDistance {#greatcircledistance} - -Calculate the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance). - -``` sql -greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) -``` - -**Input parameters** - -- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. -- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. -- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. -- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. - -Positive values correspond to North latitude and East longitude, and negative values correspond to South latitude and West longitude. - -**Returned value** - -The distance between two points on the Earth’s surface, in meters. - -Generates an exception when the input parameter values fall outside of the range. - -**Example** - -``` sql -SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) -``` - -``` text -┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ -│ 14132374.194975413 │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## pointInEllipses {#pointinellipses} - -Checks whether the point belongs to at least one of the ellipses. -Coordinates are geometric in the Cartesian coordinate system. - -``` sql -pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) -``` - -**Input parameters** - -- `x, y` — Coordinates of a point on the plane. -- `xᵢ, yᵢ` — Coordinates of the center of the `i`-th ellipsis. -- `aᵢ, bᵢ` — Axes of the `i`-th ellipsis in units of x, y coordinates. - -The input parameters must be `2+4⋅n`, where `n` is the number of ellipses. - -**Returned values** - -`1` if the point is inside at least one of the ellipses; `0`if it is not. - -**Example** - -``` sql -SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) -``` - -``` text -┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐ -│ 1 │ -└─────────────────────────────────────────────────┘ -``` - -## pointInPolygon {#pointinpolygon} - -Checks whether the point belongs to the polygon on the plane. - -``` sql -pointInPolygon((x, y), [(a, b), (c, d) ...], ...) -``` - -**Input values** - -- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../data_types/tuple.md) — A tuple of two numbers. -- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Array](../../data_types/array.md). Each vertex is represented by a pair of coordinates `(a, b)`. Vertices should be specified in a clockwise or counterclockwise order. The minimum number of vertices is 3. The polygon must be constant. -- The function also supports polygons with holes (cut out sections). In this case, add polygons that define the cut out sections using additional arguments of the function. The function does not support non-simply-connected polygons. - -**Returned values** - -`1` if the point is inside the polygon, `0` if it is not. -If the point is on the polygon boundary, the function may return either 0 or 1. - -**Example** - -``` sql -SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res -``` - -``` text -┌─res─┐ -│ 1 │ -└─────┘ -``` - -## geohashEncode {#geohashencode} - -Encodes latitude and longitude as a geohash-string, please see (http://geohash.org/, https://en.wikipedia.org/wiki/Geohash). - -``` sql -geohashEncode(longitude, latitude, [precision]) -``` - -**Input values** - -- longitude - longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]` -- latitude - latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]` -- precision - Optional, length of the resulting encoded string, defaults to `12`. Integer in range `[1, 12]`. Any value less than `1` or greater than `12` is silently converted to `12`. - -**Returned values** - -- alphanumeric `String` of encoded coordinate (modified version of the base32-encoding alphabet is used). - -**Example** - -``` sql -SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res -``` - -``` text -┌─res──────────┐ -│ ezs42d000000 │ -└──────────────┘ -``` - -## geohashDecode {#geohashdecode} - -Decodes any geohash-encoded string into longitude and latitude. - -**Input values** - -- encoded string - geohash-encoded string. - -**Returned values** - -- (longitude, latitude) - 2-tuple of `Float64` values of longitude and latitude. - -**Example** - -``` sql -SELECT geohashDecode('ezs42') AS res -``` - -``` text -┌─res─────────────────────────────┐ -│ (-5.60302734375,42.60498046875) │ -└─────────────────────────────────┘ -``` - -## geoToH3 {#geotoh3} - -Returns [H3](https://uber.github.io/h3/#/documentation/overview/introduction) point index `(lon, lat)` with specified resolution. - -[H3](https://uber.github.io/h3/#/documentation/overview/introduction) is a geographical indexing system where Earth’s surface divided into even hexagonal tiles. This system is hierarchical, i. e. each hexagon on the top level can be splitted into seven even but smaller ones and so on. - -This index is used primarily for bucketing locations and other geospatial manipulations. - -**Syntax** - -``` sql -geoToH3(lon, lat, resolution) -``` - -**Parameters** - -- `lon` — Longitude. Type: [Float64](../../data_types/float.md). -- `lat` — Latitude. Type: [Float64](../../data_types/float.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). - -**Returned values** - -- Hexagon index number. -- 0 in case of error. - -Type: `UInt64`. - -**Example** - -Query: - -``` sql -SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index -``` - -Result: - -``` text -┌────────────h3Index─┐ -│ 644325524701193974 │ -└────────────────────┘ -``` - -## geohashesInBox {#geohashesinbox} - -Returns an array of geohash-encoded strings of given precision that fall inside and intersect boundaries of given box, basically a 2D grid flattened into array. - -**Input values** - -- longitude\_min - min longitude, floating value in range `[-180°, 180°]` -- latitude\_min - min latitude, floating value in range `[-90°, 90°]` -- longitude\_max - max longitude, floating value in range `[-180°, 180°]` -- latitude\_max - max latitude, floating value in range `[-90°, 90°]` -- precision - geohash precision, `UInt8` in range `[1, 12]` - -Please note that all coordinate parameters should be of the same type: either `Float32` or `Float64`. - -**Returned values** - -- array of precision-long strings of geohash-boxes covering provided area, you should not rely on order of items. -- \[\] - empty array if *min* values of *latitude* and *longitude* aren’t less than corresponding *max* values. - -Please note that function will throw an exception if resulting array is over 10’000’000 items long. - -**Example** - -``` sql -SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos -``` - -``` text -┌─thasos──────────────────────────────────────┐ -│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ -└─────────────────────────────────────────────┘ -``` - -## h3GetBaseCell {#h3getbasecell} - -Returns the base cell number of the index. - -**Syntax** - -``` sql -h3GetBaseCell(index) -``` - -**Parameters** - -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). - -**Returned values** - -- Hexagon base cell number. Type: [UInt8](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT h3GetBaseCell(612916788725809151) as basecell -``` - -Result: - -``` text -┌─basecell─┐ -│ 12 │ -└──────────┘ -``` - -## h3HexAreaM2 {#h3hexaream2} - -Average hexagon area in square meters at the given resolution. - -**Syntax** - -``` sql -h3HexAreaM2(resolution) -``` - -**Parameters** - -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). - -**Returned values** - -- Area in m². Type: [Float64](../../data_types/float.md). - -**Example** - -Query: - -``` sql -SELECT h3HexAreaM2(13) as area -``` - -Result: - -``` text -┌─area─┐ -│ 43.9 │ -└──────┘ -``` - -## h3IndexesAreNeighbors {#h3indexesareneighbors} - -Returns whether or not the provided H3Indexes are neighbors. - -**Syntax** - -``` sql -h3IndexesAreNeighbors(index1, index2) -``` - -**Parameters** - -- `index1` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `index2` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). - -**Returned values** - -- Returns `1` if the indexes are neighbors, `0` otherwise. Type: [UInt8](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n -``` - -Result: - -``` text -┌─n─┐ -│ 1 │ -└───┘ -``` - -## h3ToChildren {#h3tochildren} - -Returns an array with the child indexes of the given index. - -**Syntax** - -``` sql -h3ToChildren(index, resolution) -``` - -**Parameters** - -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). - -**Returned values** - -- Array with the child H3 indexes. Array of type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT h3ToChildren(599405990164561919, 6) AS children -``` - -Result: - -``` text -┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## h3ToParent {#h3toparent} - -Returns the parent (coarser) index containing the given index. - -**Syntax** - -``` sql -h3ToParent(index, resolution) -``` - -**Parameters** - -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). - -**Returned values** - -- Parent H3 index. Type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT h3ToParent(599405990164561919, 3) as parent -``` - -Result: - -``` text -┌─────────────parent─┐ -│ 590398848891879423 │ -└────────────────────┘ -``` - -## h3ToString {#h3tostring} - -Converts the H3Index representation of the index to the string representation. - -``` sql -h3ToString(index) -``` - -**Parameters** - -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). - -**Returned values** - -- String representation of the H3 index. Type: [String](../../data_types/string.md). - -**Example** - -Query: - -``` sql -SELECT h3ToString(617420388352917503) as h3_string -``` - -Result: - -``` text -┌─h3_string───────┐ -│ 89184926cdbffff │ -└─────────────────┘ -``` - -## stringToH3 {#stringtoh3} - -Converts the string representation to H3Index (UInt64) representation. - -``` sql -stringToH3(index_str) -``` - -**Parameters** - -- `index_str` — String representation of the H3 index. Type: [String](../../data_types/string.md). - -**Returned values** - -- Hexagon index number. Returns 0 on error. Type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT stringToH3('89184926cc3ffff') as index -``` - -Result: - -``` text -┌──────────────index─┐ -│ 617420388351344639 │ -└────────────────────┘ -``` - -## h3GetResolution {#h3getresolution} - -Returns the resolution of the index. - -**Syntax** - -``` sql -h3GetResolution(index) -``` - -**Parameters** - -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). - -**Returned values** - -- Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT h3GetResolution(617420388352917503) as res -``` - -Result: - -``` text -┌─res─┐ -│ 9 │ -└─────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/fa/query_language/functions/hash_functions.md b/docs/fa/query_language/functions/hash_functions.md deleted file mode 100644 index 8cd588931c8..00000000000 --- a/docs/fa/query_language/functions/hash_functions.md +++ /dev/null @@ -1,443 +0,0 @@ ---- -en_copy: true ---- - -# Hash functions {#hash-functions} - -Hash functions can be used for the deterministic pseudo-random shuffling of elements. - -## halfMD5 {#hash-functions-halfmd5} - -[Interprets](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order. - -``` sql -halfMD5(par1, ...) -``` - -The function is relatively slow (5 million short strings per second per processor core). -Consider using the [sipHash64](#hash_functions-siphash64) function instead. - -**Parameters** - -The function takes a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -A [UInt64](../../data_types/int_uint.md) data type hash value. - -**Example** - -``` sql -SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type -``` - -``` text -┌────────halfMD5hash─┬─type───┐ -│ 186182704141653334 │ UInt64 │ -└────────────────────┴────────┘ -``` - -## MD5 {#hash_functions-md5} - -Calculates the MD5 from a string and returns the resulting set of bytes as FixedString(16). -If you don’t need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the ‘sipHash128’ function instead. -If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))). - -## sipHash64 {#hash_functions-siphash64} - -Produces a 64-bit [SipHash](https://131002.net/siphash/) hash value. - -``` sql -sipHash64(par1,...) -``` - -This is a cryptographic hash function. It works at least three times faster than the [MD5](#hash_functions-md5) function. - -Function [interprets](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. Then combines hashes by the following algorithm: - -1. After hashing all the input parameters, the function gets the array of hashes. -2. Function takes the first and the second elements and calculates a hash for the array of them. -3. Then the function takes the hash value, calculated at the previous step, and the third element of the initial hash array, and calculates a hash for the array of them. -4. The previous step is repeated for all the remaining elements of the initial hash array. - -**Parameters** - -The function takes a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -A [UInt64](../../data_types/int_uint.md) data type hash value. - -**Example** - -``` sql -SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type -``` - -``` text -┌──────────────SipHash─┬─type───┐ -│ 13726873534472839665 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## sipHash128 {#hash_functions-siphash128} - -Calculates SipHash from a string. -Accepts a String-type argument. Returns FixedString(16). -Differs from sipHash64 in that the final xor-folding state is only done up to 128 bits. - -## cityHash64 {#cityhash64} - -Produces a 64-bit [CityHash](https://github.com/google/cityhash) hash value. - -``` sql -cityHash64(par1,...) -``` - -This is a fast non-cryptographic hash function. It uses the CityHash algorithm for string parameters and implementation-specific fast non-cryptographic hash function for parameters with other data types. The function uses the CityHash combinator to get the final results. - -**Parameters** - -The function takes a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -A [UInt64](../../data_types/int_uint.md) data type hash value. - -**Examples** - -Call example: - -``` sql -SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type -``` - -``` text -┌─────────────CityHash─┬─type───┐ -│ 12072650598913549138 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -The following example shows how to compute the checksum of the entire table with accuracy up to the row order: - -``` sql -SELECT groupBitXor(cityHash64(*)) FROM table -``` - -## intHash32 {#inthash32} - -Calculates a 32-bit hash code from any type of integer. -This is a relatively fast non-cryptographic hash function of average quality for numbers. - -## intHash64 {#inthash64} - -Calculates a 64-bit hash code from any type of integer. -It works faster than intHash32. Average quality. - -## SHA1 {#sha1} - -## SHA224 {#sha224} - -## SHA256 {#sha256} - -Calculates SHA-1, SHA-224, or SHA-256 from a string and returns the resulting set of bytes as FixedString(20), FixedString(28), or FixedString(32). -The function works fairly slowly (SHA-1 processes about 5 million short strings per second per processor core, while SHA-224 and SHA-256 process about 2.2 million). -We recommend using this function only in cases when you need a specific hash function and you can’t select it. -Even in these cases, we recommend applying the function offline and pre-calculating values when inserting them into the table, instead of applying it in SELECTS. - -## URLHash(url\[, N\]) {#urlhashurl-n} - -A fast, decent-quality non-cryptographic hash function for a string obtained from a URL using some type of normalization. -`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` or `#` at the end, if present. -`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` or `#` at the end, if present. -Levels are the same as in URLHierarchy. This function is specific to Yandex.Metrica. - -## farmHash64 {#farmhash64} - -Produces a 64-bit [FarmHash](https://github.com/google/farmhash) hash value. - -``` sql -farmHash64(par1, ...) -``` - -The function uses the `Hash64` method from all [available methods](https://github.com/google/farmhash/blob/master/src/farmhash.h). - -**Parameters** - -The function takes a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -A [UInt64](../../data_types/int_uint.md) data type hash value. - -**Example** - -``` sql -SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type -``` - -``` text -┌─────────────FarmHash─┬─type───┐ -│ 17790458267262532859 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## javaHash {#hash_functions-javahash} - -Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string. This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result. - -**Syntax** - -``` sql -SELECT javaHash(''); -``` - -**Returned value** - -A `Int32` data type hash value. - -**Example** - -Query: - -``` sql -SELECT javaHash('Hello, world!'); -``` - -Result: - -``` text -┌─javaHash('Hello, world!')─┐ -│ -1880044555 │ -└───────────────────────────┘ -``` - -## javaHashUTF16LE {#javahashutf16le} - -Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) from a string, assuming it contains bytes representing a string in UTF-16LE encoding. - -**Syntax** - -``` sql -javaHashUTF16LE(stringUtf16le) -``` - -**Parameters** - -- `stringUtf16le` — a string in UTF-16LE encoding. - -**Returned value** - -A `Int32` data type hash value. - -**Example** - -Correct query with UTF-16LE encoded string. - -Query: - -``` sql -SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) -``` - -Result: - -``` text -┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ -│ 3556498 │ -└──────────────────────────────────────────────────────────────┘ -``` - -## hiveHash {#hash-functions-hivehash} - -Calculates `HiveHash` from a string. - -``` sql -SELECT hiveHash(''); -``` - -This is just [JavaHash](#hash_functions-javahash) with zeroed out sign bit. This function is used in [Apache Hive](https://en.wikipedia.org/wiki/Apache_Hive) for versions before 3.0. This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result. - -**Returned value** - -A `Int32` data type hash value. - -Type: `hiveHash`. - -**Example** - -Query: - -``` sql -SELECT hiveHash('Hello, world!'); -``` - -Result: - -``` text -┌─hiveHash('Hello, world!')─┐ -│ 267439093 │ -└───────────────────────────┘ -``` - -## metroHash64 {#metrohash64} - -Produces a 64-bit [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) hash value. - -``` sql -metroHash64(par1, ...) -``` - -**Parameters** - -The function takes a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -A [UInt64](../../data_types/int_uint.md) data type hash value. - -**Example** - -``` sql -SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type -``` - -``` text -┌────────────MetroHash─┬─type───┐ -│ 14235658766382344533 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## jumpConsistentHash {#jumpconsistenthash} - -Calculates JumpConsistentHash form a UInt64. -Accepts two arguments: a UInt64-type key and the number of buckets. Returns Int32. -For more information, see the link: [JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) - -## murmurHash2\_32, murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} - -Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value. - -``` sql -murmurHash2_32(par1, ...) -murmurHash2_64(par1, ...) -``` - -**Parameters** - -Both functions take a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -- The `murmurHash2_32` function returns hash value having the [UInt32](../../data_types/int_uint.md) data type. -- The `murmurHash2_64` function returns hash value having the [UInt64](../../data_types/int_uint.md) data type. - -**Example** - -``` sql -SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type -``` - -``` text -┌──────────MurmurHash2─┬─type───┐ -│ 11832096901709403633 │ UInt64 │ -└──────────────────────┴────────┘ -``` - -## murmurHash3\_32, murmurHash3\_64 {#murmurhash3-32-murmurhash3-64} - -Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value. - -``` sql -murmurHash3_32(par1, ...) -murmurHash3_64(par1, ...) -``` - -**Parameters** - -Both functions take a variable number of input parameters. Parameters can be any of the [supported data types](../../data_types/index.md). - -**Returned Value** - -- The `murmurHash3_32` function returns a [UInt32](../../data_types/int_uint.md) data type hash value. -- The `murmurHash3_64` function returns a [UInt64](../../data_types/int_uint.md) data type hash value. - -**Example** - -``` sql -SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type -``` - -``` text -┌─MurmurHash3─┬─type───┐ -│ 2152717 │ UInt32 │ -└─────────────┴────────┘ -``` - -## murmurHash3\_128 {#murmurhash3-128} - -Produces a 128-bit [MurmurHash3](https://github.com/aappleby/smhasher) hash value. - -``` sql -murmurHash3_128( expr ) -``` - -**Parameters** - -- `expr` — [Expressions](../syntax.md#syntax-expressions) returning a [String](../../data_types/string.md)-type value. - -**Returned Value** - -A [FixedString(16)](../../data_types/fixedstring.md) data type hash value. - -**Example** - -``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type -``` - -``` text -┌─MurmurHash3──────┬─type────────────┐ -│ 6�1�4"S5KT�~~q │ FixedString(16) │ -└──────────────────┴─────────────────┘ -``` - -## xxHash32, xxHash64 {#hash-functions-xxhash32} - -Calculates `xxHash` from a string. It is proposed in two flavors, 32 and 64 bits. - -``` sql -SELECT xxHash32(''); - -OR - -SELECT xxHash64(''); -``` - -**Returned value** - -A `Uint32` or `Uint64` data type hash value. - -Type: `xxHash`. - -**Example** - -Query: - -``` sql -SELECT xxHash32('Hello, world!'); -``` - -Result: - -``` text -┌─xxHash32('Hello, world!')─┐ -│ 834093149 │ -└───────────────────────────┘ -``` - -**See Also** - -- [xxHash](http://cyan4973.github.io/xxHash/). - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/fa/query_language/functions/higher_order_functions.md b/docs/fa/query_language/functions/higher_order_functions.md deleted file mode 100644 index 66d98440323..00000000000 --- a/docs/fa/query_language/functions/higher_order_functions.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -en_copy: true ---- - -# Higher-order functions {#higher-order-functions} - -## `->` operator, lambda(params, expr) function {#operator-lambdaparams-expr-function} - -Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. - -Examples: `x -> 2 * x, str -> str != Referer.` - -Higher-order functions can only accept lambda functions as their functional argument. - -A lambda function that accepts multiple arguments can be passed to a higher-order function. In this case, the higher-order function is passed several arrays of identical length that these arguments will correspond to. - -For some functions, such as [arrayCount](#higher_order_functions-array-count) or [arraySum](#higher_order_functions-array-count), the first argument (the lambda function) can be omitted. In this case, identical mapping is assumed. - -A lambda function can’t be omitted for the following functions: - -- [arrayMap](#higher_order_functions-array-map) -- [arrayFilter](#higher_order_functions-array-filter) -- [arrayFill](#higher_order_functions-array-fill) -- [arrayReverseFill](#higher_order_functions-array-reverse-fill) -- [arraySplit](#higher_order_functions-array-split) -- [arrayReverseSplit](#higher_order_functions-array-reverse-split) -- [arrayFirst](#higher_order_functions-array-first) -- [arrayFirstIndex](#higher_order_functions-array-first-index) - -### arrayMap(func, arr1, …) {#higher_order_functions-array-map} - -Returns an array obtained from the original application of the `func` function to each element in the `arr` array. - -Examples: - -``` sql -SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; -``` - -``` text -┌─res─────┐ -│ [3,4,5] │ -└─────────┘ -``` - -The following example shows how to create a tuple of elements from different arrays: - -``` sql -SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res -``` - -``` text -┌─res─────────────────┐ -│ [(1,4),(2,5),(3,6)] │ -└─────────────────────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arrayMap` function. - -### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} - -Returns an array containing only the elements in `arr1` for which `func` returns something other than 0. - -Examples: - -``` sql -SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res -``` - -``` text -┌─res───────────┐ -│ ['abc World'] │ -└───────────────┘ -``` - -``` sql -SELECT - arrayFilter( - (i, x) -> x LIKE '%World%', - arrayEnumerate(arr), - ['Hello', 'abc World'] AS arr) - AS res -``` - -``` text -┌─res─┐ -│ [2] │ -└─────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arrayFilter` function. - -### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} - -Scan through `arr1` from the first element to the last element and replace `arr1[i]` by `arr1[i - 1]` if `func` returns 0. The first element of `arr1` will not be replaced. - -Examples: - -``` sql -SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res -``` - -``` text -┌─res──────────────────────────────┐ -│ [1,1,3,11,12,12,12,5,6,14,14,14] │ -└──────────────────────────────────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arrayFill` function. - -### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} - -Scan through `arr1` from the last element to the first element and replace `arr1[i]` by `arr1[i + 1]` if `func` returns 0. The last element of `arr1` will not be replaced. - -Examples: - -``` sql -SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res -``` - -``` text -┌─res────────────────────────────────┐ -│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ -└────────────────────────────────────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arrayReverseFill` function. - -### arraySplit(func, arr1, …) {#higher_order_functions-array-split} - -Split `arr1` into multiple arrays. When `func` returns something other than 0, the array will be split on the left hand side of the element. The array will not be split before the first element. - -Examples: - -``` sql -SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res -``` - -``` text -┌─res─────────────┐ -│ [[1,2,3],[4,5]] │ -└─────────────────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arraySplit` function. - -### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} - -Split `arr1` into multiple arrays. When `func` returns something other than 0, the array will be split on the right hand side of the element. The array will not be split after the last element. - -Examples: - -``` sql -SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res -``` - -``` text -┌─res───────────────┐ -│ [[1],[2,3,4],[5]] │ -└───────────────────┘ -``` - -Note that the first argument (lambda function) can’t be omitted in the `arraySplit` function. - -### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} - -Returns the number of elements in the arr array for which func returns something other than 0. If ‘func’ is not specified, it returns the number of non-zero elements in the array. - -### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} - -Returns 1 if there is at least one element in ‘arr’ for which ‘func’ returns something other than 0. Otherwise, it returns 0. - -### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} - -Returns 1 if ‘func’ returns something other than 0 for all the elements in ‘arr’. Otherwise, it returns 0. - -### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} - -Returns the sum of the ‘func’ values. If the function is omitted, it just returns the sum of the array elements. - -### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} - -Returns the first element in the ‘arr1’ array for which ‘func’ returns something other than 0. - -Note that the first argument (lambda function) can’t be omitted in the `arrayFirst` function. - -### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} - -Returns the index of the first element in the ‘arr1’ array for which ‘func’ returns something other than 0. - -Note that the first argument (lambda function) can’t be omitted in the `arrayFirstIndex` function. - -### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} - -Returns an array of partial sums of elements in the source array (a running sum). If the `func` function is specified, then the values of the array elements are converted by this function before summing. - -Example: - -``` sql -SELECT arrayCumSum([1, 1, 1, 1]) AS res -``` - -``` text -┌─res──────────┐ -│ [1, 2, 3, 4] │ -└──────────────┘ -``` - -### arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} - -Same as `arrayCumSum`, returns an array of partial sums of elements in the source array (a running sum). Different `arrayCumSum`, when then returned value contains a value less than zero, the value is replace with zero and the subsequent calculation is performed with zero parameters. For example: - -``` sql -SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res -``` - -``` text -┌─res───────┐ -│ [1,2,0,1] │ -└───────────┘ -``` - -### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} - -Returns an array as result of sorting the elements of `arr1` in ascending order. If the `func` function is specified, sorting order is determined by the result of the function `func` applied to the elements of array (arrays) - -The [Schwartzian transform](https://en.wikipedia.org/wiki/Schwartzian_transform) is used to improve sorting efficiency. - -Example: - -``` sql -SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); -``` - -``` text -┌─res────────────────┐ -│ ['world', 'hello'] │ -└────────────────────┘ -``` - -For more information about the `arraySort` method, see the [Functions for Working With Arrays](array_functions.md#array_functions-sort) section. - -### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} - -Returns an array as result of sorting the elements of `arr1` in descending order. If the `func` function is specified, sorting order is determined by the result of the function `func` applied to the elements of array (arrays). - -Example: - -``` sql -SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; -``` - -``` text -┌─res───────────────┐ -│ ['hello','world'] │ -└───────────────────┘ -``` - -For more information about the `arrayReverseSort` method, see the [Functions for Working With Arrays](array_functions.md#array_functions-reverse-sort) section. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/fa/query_language/functions/in_functions.md b/docs/fa/query_language/functions/in_functions.md deleted file mode 100644 index bd40c164a34..00000000000 --- a/docs/fa/query_language/functions/in_functions.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -en_copy: true ---- - -# Functions for implementing the IN operator {#functions-for-implementing-the-in-operator} - -## in, notIn, globalIn, globalNotIn {#in-functions} - -See the section [IN operators](../select.md#select-in-operators). - -## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} - -A function that allows grouping multiple columns. -For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. -Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples can’t be written to a table. - -## tupleElement(tuple, n), operator x.N {#tupleelementtuple-n-operator-x-n} - -A function that allows getting a column from a tuple. -‘N’ is the column index, starting from 1. N must be a constant. ‘N’ must be a constant. ‘N’ must be a strict postive integer no greater than the size of the tuple. -There is no cost to execute the function. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/fa/query_language/functions/index.md b/docs/fa/query_language/functions/index.md deleted file mode 100644 index 0cf62a7f565..00000000000 --- a/docs/fa/query_language/functions/index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -en_copy: true ---- - -# Functions {#functions} - -There are at least\* two types of functions - regular functions (they are just called “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn’t depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). - -In this section we discuss regular functions. For aggregate functions, see the section “Aggregate functions”. - -\* - There is a third type of function that the ‘arrayJoin’ function belongs to; table functions can also be mentioned separately.\* - -## Strong typing {#strong-typing} - -In contrast to standard SQL, ClickHouse has strong typing. In other words, it doesn’t make implicit conversions between types. Each function works for a specific set of types. This means that sometimes you need to use type conversion functions. - -## Common subexpression elimination {#common-subexpression-elimination} - -All expressions in a query that have the same AST (the same record or same result of syntactic parsing) are considered to have identical values. Such expressions are concatenated and executed once. Identical subqueries are also eliminated this way. - -## Types of results {#types-of-results} - -All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. - -## Constants {#constants} - -For simplicity, certain functions can only work with constants for some arguments. For example, the right argument of the LIKE operator must be a constant. -Almost all functions return a constant for constant arguments. The exception is functions that generate random numbers. -The ‘now’ function returns different values for queries that were run at different times, but the result is considered a constant, since constancy is only important within a single query. -A constant expression is also considered a constant (for example, the right half of the LIKE operator can be constructed from multiple constants). - -Functions can be implemented in different ways for constant and non-constant arguments (different code is executed). But the results for a constant and for a true column containing only the same value should match each other. - -## NULL processing {#null-processing} - -Functions have the following behaviors: - -- If at least one of the arguments of the function is `NULL`, the function result is also `NULL`. -- Special behavior that is specified individually in the description of each function. In the ClickHouse source code, these functions have `UseDefaultImplementationForNulls=false`. - -## Constancy {#constancy} - -Functions can’t change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. - -## Error handling {#error-handling} - -Some functions might throw an exception if the data is invalid. In this case, the query is canceled and an error text is returned to the client. For distributed processing, when an exception occurs on one of the servers, the other servers also attempt to abort the query. - -## Evaluation of argument expressions {#evaluation-of-argument-expressions} - -In almost all programming languages, one of the arguments might not be evaluated for certain operators. This is usually the operators `&&`, `||`, and `?:`. -But in ClickHouse, arguments of functions (operators) are always evaluated. This is because entire parts of columns are evaluated at once, instead of calculating each row separately. - -## Performing functions for distributed query processing {#performing-functions-for-distributed-query-processing} - -For distributed query processing, as many stages of query processing as possible are performed on remote servers, and the rest of the stages (merging intermediate results and everything after that) are performed on the requestor server. - -This means that functions can be performed on different servers. -For example, in the query `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` - -- if a `distributed_table` has at least two shards, the functions ‘g’ and ‘h’ are performed on remote servers, and the function ‘f’ is performed on the requestor server. -- if a `distributed_table` has only one shard, all the ‘f’, ‘g’, and ‘h’ functions are performed on this shard’s server. - -The result of a function usually doesn’t depend on which server it is performed on. However, sometimes this is important. -For example, functions that work with dictionaries use the dictionary that exists on the server they are running on. -Another example is the `hostName` function, which returns the name of the server it is running on in order to make `GROUP BY` by servers in a `SELECT` query. - -If a function in a query is performed on the requestor server, but you need to perform it on remote servers, you can wrap it in an ‘any’ aggregate function or add it to a key in `GROUP BY`. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/fa/query_language/functions/json_functions.md b/docs/fa/query_language/functions/json_functions.md deleted file mode 100644 index 3c2bd30d8c9..00000000000 --- a/docs/fa/query_language/functions/json_functions.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with JSON {#functions-for-working-with-json} - -In Yandex.Metrica, JSON is transmitted by users as session parameters. There are some special functions for working with this JSON. (Although in most of the cases, the JSONs are additionally pre-processed, and the resulting values are put in separate columns in their processed format.) All these functions are based on strong assumptions about what the JSON can be, but they try to do as little as possible to get the job done. - -The following assumptions are made: - -1. The field name (function argument) must be a constant. -2. The field name is somehow canonically encoded in JSON. For example: `visitParamHas('{"abc":"def"}', 'abc') = 1`, but `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` -3. Fields are searched for on any nesting level, indiscriminately. If there are multiple matching fields, the first occurrence is used. -4. The JSON doesn’t have space characters outside of string literals. - -## visitParamHas(params, name) {#visitparamhasparams-name} - -Checks whether there is a field with the ‘name’ name. - -## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} - -Parses UInt64 from the value of the field named ‘name’. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0. - -## visitParamExtractInt(params, name) {#visitparamextractintparams-name} - -The same as for Int64. - -## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} - -The same as for Float64. - -## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} - -Parses a true/false value. The result is UInt8. - -## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} - -Returns the value of a field, including separators. - -Examples: - -``` sql -visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' -visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' -``` - -## visitParamExtractString(params, name) {#visitparamextractstringparams-name} - -Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string. - -Examples: - -``` sql -visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' -visitParamExtractString('{"abc":"hello}', 'abc') = '' -``` - -There is currently no support for code points in the format `\uXXXX\uYYYY` that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8). - -The following functions are based on [simdjson](https://github.com/lemire/simdjson) designed for more complex JSON parsing requirements. The assumption 2 mentioned above still applies. - -## isValidJSON(json) {#isvalidjsonjson} - -Checks that passed string is a valid json. - -Examples: - -``` sql -SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 -SELECT isValidJSON('not a json') = 0 -``` - -## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} - -If the value exists in the JSON document, `1` will be returned. - -If the value does not exist, `0` will be returned. - -Examples: - -``` sql -SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 -SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 -``` - -`indices_or_keys` is a list of zero or more arguments each of them can be either string or integer. - -- String = access object member by key. -- Positive integer = access the n-th member/key from the beginning. -- Negative integer = access the n-th member/key from the end. - -Minimum index of the element is 1. Thus the element 0 doesn’t exist. - -You may use integers to access both JSON arrays and JSON objects. - -So, for example: - -``` sql -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' -SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' -SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' -``` - -## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} - -Return the length of a JSON array or a JSON object. - -If the value does not exist or has a wrong type, `0` will be returned. - -Examples: - -``` sql -SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 -SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 -``` - -## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} - -Return the type of a JSON value. - -If the value does not exist, `Null` will be returned. - -Examples: - -``` sql -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' -SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' -``` - -## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} - -## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} - -## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} - -## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} - -Parses a JSON and extract a value. These functions are similar to `visitParam` functions. - -If the value does not exist or has a wrong type, `0` will be returned. - -Examples: - -``` sql -SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 -SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 -SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 -``` - -## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} - -Parses a JSON and extract a string. This function is similar to `visitParamExtractString` functions. - -If the value does not exist or has a wrong type, an empty string will be returned. - -The value is unescaped. If unescaping failed, it returns an empty string. - -Examples: - -``` sql -SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' -SELECT JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' -SELECT JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' -SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' -SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' -``` - -## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} - -Parses a JSON and extract a value of the given ClickHouse data type. - -This is a generalization of the previous `JSONExtract` functions. -This means -`JSONExtract(..., 'String')` returns exactly the same as `JSONExtractString()`, -`JSONExtract(..., 'Float64')` returns exactly the same as `JSONExtractFloat()`. - -Examples: - -``` sql -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] -SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL -SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 -SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' -SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' -``` - -## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} - -Parse key-value pairs from a JSON where the values are of the given ClickHouse data type. - -Example: - -``` sql -SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; -``` - -## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} - -Returns a part of JSON. - -If the part does not exist or has a wrong type, an empty string will be returned. - -Example: - -``` sql -SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' -``` - -## JSONExtractArrayRaw(json\[, indices\_or\_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} - -Returns an array with elements of JSON array, each represented as unparsed string. - -If the part does not exist or isn’t array, an empty array will be returned. - -Example: - -``` sql -SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/fa/query_language/functions/logical_functions.md b/docs/fa/query_language/functions/logical_functions.md deleted file mode 100644 index a41cb5a1099..00000000000 --- a/docs/fa/query_language/functions/logical_functions.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -en_copy: true ---- - -# Logical functions {#logical-functions} - -Logical functions accept any numeric types, but return a UInt8 number equal to 0 or 1. - -Zero as an argument is considered “false,” while any non-zero value is considered “true”. - -## and, AND operator {#and-and-operator} - -## or, OR operator {#or-or-operator} - -## not, NOT operator {#not-not-operator} - -## xor {#xor} - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/fa/query_language/functions/machine_learning_functions.md b/docs/fa/query_language/functions/machine_learning_functions.md deleted file mode 100644 index 21458924bd6..00000000000 --- a/docs/fa/query_language/functions/machine_learning_functions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -en_copy: true ---- - -# Machine learning functions {#machine-learning-functions} - -## evalMLMethod (prediction) {#machine_learning_methods-evalmlmethod} - -Prediction using fitted regression models uses `evalMLMethod` function. See link in `linearRegression`. - -### Stochastic Linear Regression {#stochastic-linear-regression} - -The [stochasticLinearRegression](../agg_functions/reference.md#agg_functions-stochasticlinearregression) aggregate function implements stochastic gradient descent method using linear model and MSE loss function. Uses `evalMLMethod` to predict on new data. - -### Stochastic Logistic Regression {#stochastic-logistic-regression} - -The [stochasticLogisticRegression](../agg_functions/reference.md#agg_functions-stochasticlogisticregression) aggregate function implements stochastic gradient descent method for binary classification problem. Uses `evalMLMethod` to predict on new data. diff --git a/docs/fa/query_language/functions/math_functions.md b/docs/fa/query_language/functions/math_functions.md deleted file mode 100644 index dd405e436f0..00000000000 --- a/docs/fa/query_language/functions/math_functions.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -en_copy: true ---- - -# Mathematical functions {#mathematical-functions} - -All the functions return a Float64 number. The accuracy of the result is close to the maximum precision possible, but the result might not coincide with the machine representable number nearest to the corresponding real number. - -## e() {#e} - -Returns a Float64 number that is close to the number e. - -## pi() {#pi} - -Returns a Float64 number that is close to the number π. - -## exp(x) {#expx} - -Accepts a numeric argument and returns a Float64 number close to the exponent of the argument. - -## log(x), ln(x) {#logx-lnx} - -Accepts a numeric argument and returns a Float64 number close to the natural logarithm of the argument. - -## exp2(x) {#exp2x} - -Accepts a numeric argument and returns a Float64 number close to 2 to the power of x. - -## log2(x) {#log2x} - -Accepts a numeric argument and returns a Float64 number close to the binary logarithm of the argument. - -## exp10(x) {#exp10x} - -Accepts a numeric argument and returns a Float64 number close to 10 to the power of x. - -## log10(x) {#log10x} - -Accepts a numeric argument and returns a Float64 number close to the decimal logarithm of the argument. - -## sqrt(x) {#sqrtx} - -Accepts a numeric argument and returns a Float64 number close to the square root of the argument. - -## cbrt(x) {#cbrtx} - -Accepts a numeric argument and returns a Float64 number close to the cubic root of the argument. - -## erf(x) {#erfx} - -If ‘x’ is non-negative, then erf(x / σ√2) is the probability that a random variable having a normal distribution with standard deviation ‘σ’ takes the value that is separated from the expected value by more than ‘x’. - -Example (three sigma rule): - -``` sql -SELECT erf(3 / sqrt(2)) -``` - -``` text -┌─erf(divide(3, sqrt(2)))─┐ -│ 0.9973002039367398 │ -└─────────────────────────┘ -``` - -## erfc(x) {#erfcx} - -Accepts a numeric argument and returns a Float64 number close to 1 - erf(x), but without loss of precision for large ‘x’ values. - -## lgamma(x) {#lgammax} - -The logarithm of the gamma function. - -## tgamma(x) {#tgammax} - -Gamma function. - -## sin(x) {#sinx} - -The sine. - -## cos(x) {#cosx} - -The cosine. - -## tan(x) {#tanx} - -The tangent. - -## asin(x) {#asinx} - -The arc sine. - -## acos(x) {#acosx} - -The arc cosine. - -## atan(x) {#atanx} - -The arc tangent. - -## pow(x, y), power(x, y) {#powx-y-powerx-y} - -Takes two numeric arguments x and y. Returns a Float64 number close to x to the power of y. - -## intExp2 {#intexp2} - -Accepts a numeric argument and returns a UInt64 number close to 2 to the power of x. - -## intExp10 {#intexp10} - -Accepts a numeric argument and returns a UInt64 number close to 10 to the power of x. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/fa/query_language/functions/other_functions.md b/docs/fa/query_language/functions/other_functions.md deleted file mode 100644 index f40451d8300..00000000000 --- a/docs/fa/query_language/functions/other_functions.md +++ /dev/null @@ -1,1076 +0,0 @@ ---- -en_copy: true ---- - -# Other functions {#other-functions} - -## hostName() {#hostname} - -Returns a string with the name of the host that this function was performed on. For distributed processing, this is the name of the remote server host, if the function is performed on a remote server. - -## FQDN {#fqdn} - -Returns the fully qualified domain name. - -**Syntax** - -``` sql -fqdn(); -``` - -This function is case-insensitive. - -**Returned value** - -- String with the fully qualified domain name. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT FQDN(); -``` - -Result: - -``` text -┌─FQDN()──────────────────────────┐ -│ clickhouse.ru-central1.internal │ -└─────────────────────────────────┘ -``` - -## basename {#basename} - -Extracts the trailing part of a string after the last slash or backslash. This function if often used to extract the filename from a path. - -``` sql -basename( expr ) -``` - -**Parameters** - -- `expr` — Expression resulting in a [String](../../data_types/string.md) type value. All the backslashes must be escaped in the resulting value. - -**Returned Value** - -A string that contains: - -- The trailing part of a string after the last slash or backslash. - - If the input string contains a path ending with slash or backslash, for example, `/` or `c:\`, the function returns an empty string. - -- The original string if there are no slashes or backslashes. - -**Example** - -``` sql -SELECT 'some/long/path/to/file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some\\long\\path\\to\\file' AS a, basename(a) -``` - -``` text -┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ -│ some\long\path\to\file │ file │ -└────────────────────────┴────────────────────────────────────────┘ -``` - -``` sql -SELECT 'some-file-name' AS a, basename(a) -``` - -``` text -┌─a──────────────┬─basename('some-file-name')─┐ -│ some-file-name │ some-file-name │ -└────────────────┴────────────────────────────┘ -``` - -## visibleWidth(x) {#visiblewidthx} - -Calculates the approximate width when outputting values to the console in text format (tab-separated). -This function is used by the system for implementing Pretty formats. - -`NULL` is represented as a string corresponding to `NULL` in `Pretty` formats. - -``` sql -SELECT visibleWidth(NULL) -``` - -``` text -┌─visibleWidth(NULL)─┐ -│ 4 │ -└────────────────────┘ -``` - -## toTypeName(x) {#totypenamex} - -Returns a string containing the type name of the passed argument. - -If `NULL` is passed to the function as input, then it returns the `Nullable(Nothing)` type, which corresponds to an internal `NULL` representation in ClickHouse. - -## blockSize() {#function-blocksize} - -Gets the size of the block. -In ClickHouse, queries are always run on blocks (sets of column parts). This function allows getting the size of the block that you called it for. - -## materialize(x) {#materializex} - -Turns a constant into a full column containing just one value. -In ClickHouse, full columns and constants are represented differently in memory. Functions work differently for constant arguments and normal arguments (different code is executed), although the result is almost always the same. This function is for debugging this behavior. - -## ignore(…) {#ignore} - -Accepts any arguments, including `NULL`. Always returns 0. -However, the argument is still evaluated. This can be used for benchmarks. - -## sleep(seconds) {#sleepseconds} - -Sleeps ‘seconds’ seconds on each data block. You can specify an integer or a floating-point number. - -## sleepEachRow(seconds) {#sleepeachrowseconds} - -Sleeps ‘seconds’ seconds on each row. You can specify an integer or a floating-point number. - -## currentDatabase() {#currentdatabase} - -Returns the name of the current database. -You can use this function in table engine parameters in a CREATE TABLE query where you need to specify the database. - -## currentUser() {#other-function-currentuser} - -Returns the login of current user. Login of user, that initiated query, will be returned in case distibuted query. - -``` sql -SELECT currentUser(); -``` - -Alias: `user()`, `USER()`. - -**Returned values** - -- Login of current user. -- Login of user that initiated query in case of disributed query. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT currentUser(); -``` - -Result: - -``` text -┌─currentUser()─┐ -│ default │ -└───────────────┘ -``` - -## isFinite(x) {#isfinitex} - -Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is not infinite and not a NaN, otherwise 0. - -## isInfinite(x) {#isinfinitex} - -Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is infinite, otherwise 0. Note that 0 is returned for a NaN. - -## ifNotFinite {#ifnotfinite} - -Checks whether floating point value is finite. - -**Syntax** - - ifNotFinite(x,y) - -**Parameters** - -- `x` — Value to be checked for infinity. Type: [Float\*](../../data_types/float.md). -- `y` — Fallback value. Type: [Float\*](../../data_types/float.md). - -**Returned value** - -- `x` if `x` is finite. -- `y` if `x` is not finite. - -**Example** - -Query: - - SELECT 1/0 as infimum, ifNotFinite(infimum,42) - -Result: - - ┌─infimum─┬─ifNotFinite(divide(1, 0), 42)─┐ - │ inf │ 42 │ - └─────────┴───────────────────────────────┘ - -You can get similar result by using [ternary operator](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. - -## isNaN(x) {#isnanx} - -Accepts Float32 and Float64 and returns UInt8 equal to 1 if the argument is a NaN, otherwise 0. - -## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} - -Accepts constant strings: database name, table name, and column name. Returns a UInt8 constant expression equal to 1 if there is a column, otherwise 0. If the hostname parameter is set, the test will run on a remote server. -The function throws an exception if the table does not exist. -For elements in a nested data structure, the function checks for the existence of a column. For the nested data structure itself, the function returns 0. - -## bar {#function-bar} - -Allows building a unicode-art diagram. - -`bar(x, min, max, width)` draws a band with a width proportional to `(x - min)` and equal to `width` characters when `x = max`. - -Parameters: - -- `x` — Size to display. -- `min, max` — Integer constants. The value must fit in `Int64`. -- `width` — Constant, positive integer, can be fractional. - -The band is drawn with accuracy to one eighth of a symbol. - -Example: - -``` sql -SELECT - toHour(EventTime) AS h, - count() AS c, - bar(c, 0, 600000, 20) AS bar -FROM test.hits -GROUP BY h -ORDER BY h ASC -``` - -``` text -┌──h─┬──────c─┬─bar────────────────┐ -│ 0 │ 292907 │ █████████▋ │ -│ 1 │ 180563 │ ██████ │ -│ 2 │ 114861 │ ███▋ │ -│ 3 │ 85069 │ ██▋ │ -│ 4 │ 68543 │ ██▎ │ -│ 5 │ 78116 │ ██▌ │ -│ 6 │ 113474 │ ███▋ │ -│ 7 │ 170678 │ █████▋ │ -│ 8 │ 278380 │ █████████▎ │ -│ 9 │ 391053 │ █████████████ │ -│ 10 │ 457681 │ ███████████████▎ │ -│ 11 │ 493667 │ ████████████████▍ │ -│ 12 │ 509641 │ ████████████████▊ │ -│ 13 │ 522947 │ █████████████████▍ │ -│ 14 │ 539954 │ █████████████████▊ │ -│ 15 │ 528460 │ █████████████████▌ │ -│ 16 │ 539201 │ █████████████████▊ │ -│ 17 │ 523539 │ █████████████████▍ │ -│ 18 │ 506467 │ ████████████████▊ │ -│ 19 │ 520915 │ █████████████████▎ │ -│ 20 │ 521665 │ █████████████████▍ │ -│ 21 │ 542078 │ ██████████████████ │ -│ 22 │ 493642 │ ████████████████▍ │ -│ 23 │ 400397 │ █████████████▎ │ -└────┴────────┴────────────────────┘ -``` - -## transform {#transform} - -Transforms a value according to the explicitly defined mapping of some elements to other ones. -There are two variations of this function: - -### transform(x, array\_from, array\_to, default) {#transformx-array-from-array-to-default} - -`x` – What to transform. - -`array_from` – Constant array of values for converting. - -`array_to` – Constant array of values to convert the values in ‘from’ to. - -`default` – Which value to use if ‘x’ is not equal to any of the values in ‘from’. - -`array_from` and `array_to` – Arrays of the same size. - -Types: - -`transform(T, Array(T), Array(U), U) -> U` - -`T` and `U` can be numeric, string, or Date or DateTime types. -Where the same letter is indicated (T or U), for numeric types these might not be matching types, but types that have a common type. -For example, the first argument can have the Int64 type, while the second has the Array(UInt16) type. - -If the ‘x’ value is equal to one of the elements in the ‘array\_from’ array, it returns the existing element (that is numbered the same) from the ‘array\_to’ array. Otherwise, it returns ‘default’. If there are multiple matching elements in ‘array\_from’, it returns one of the matches. - -Example: - -``` sql -SELECT - transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, - count() AS c -FROM test.hits -WHERE SearchEngineID != 0 -GROUP BY title -ORDER BY c DESC -``` - -``` text -┌─title─────┬──────c─┐ -│ Yandex │ 498635 │ -│ Google │ 229872 │ -│ Other │ 104472 │ -└───────────┴────────┘ -``` - -### transform(x, array\_from, array\_to) {#transformx-array-from-array-to} - -Differs from the first variation in that the ‘default’ argument is omitted. -If the ‘x’ value is equal to one of the elements in the ‘array\_from’ array, it returns the matching element (that is numbered the same) from the ‘array\_to’ array. Otherwise, it returns ‘x’. - -Types: - -`transform(T, Array(T), Array(T)) -> T` - -Example: - -``` sql -SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, - count() AS c -FROM test.hits -GROUP BY domain(Referer) -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -┌─s──────────────┬───────c─┐ -│ │ 2906259 │ -│ www.yandex │ 867767 │ -│ ███████.ru │ 313599 │ -│ mail.yandex.ru │ 107147 │ -│ ██████.ru │ 100355 │ -│ █████████.ru │ 65040 │ -│ news.yandex.ru │ 64515 │ -│ ██████.net │ 59141 │ -│ example.com │ 57316 │ -└────────────────┴─────────┘ -``` - -## formatReadableSize(x) {#formatreadablesizex} - -Accepts the size (number of bytes). Returns a rounded size with a suffix (KiB, MiB, etc.) as a string. - -Example: - -``` sql -SELECT - arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, - formatReadableSize(filesize_bytes) AS filesize -``` - -``` text -┌─filesize_bytes─┬─filesize───┐ -│ 1 │ 1.00 B │ -│ 1024 │ 1.00 KiB │ -│ 1048576 │ 1.00 MiB │ -│ 192851925 │ 183.92 MiB │ -└────────────────┴────────────┘ -``` - -## least(a, b) {#leasta-b} - -Returns the smallest value from a and b. - -## greatest(a, b) {#greatesta-b} - -Returns the largest value of a and b. - -## uptime() {#uptime} - -Returns the server’s uptime in seconds. - -## version() {#version} - -Returns the version of the server as a string. - -## timezone() {#timezone} - -Returns the timezone of the server. - -## blockNumber {#blocknumber} - -Returns the sequence number of the data block where the row is located. - -## rowNumberInBlock {#function-rownumberinblock} - -Returns the ordinal number of the row in the data block. Different data blocks are always recalculated. - -## rowNumberInAllBlocks() {#rownumberinallblocks} - -Returns the ordinal number of the row in the data block. This function only considers the affected data blocks. - -## neighbor {#neighbor} - -The window function that provides access to a row at a specified offset which comes before or after the current row of a given column. - -**Syntax** - -``` sql -neighbor(column, offset[, default_value]) -``` - -The result of the function depends on the affected data blocks and the order of data in the block. -If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result. - -**Parameters** - -- `column` — A column name or scalar expression. -- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../data_types/int_uint.md). -- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. - -**Returned values** - -- Value for `column` in `offset` distance from current row if `offset` value is not outside block bounds. -- Default value for `column` if `offset` value is outside block bounds. If `default_value` is given, then it will be used. - -Type: type of data blocks affected or default value type. - -**Example** - -Query: - -``` sql -SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; -``` - -Result: - -``` text -┌─number─┬─neighbor(number, 2)─┐ -│ 0 │ 2 │ -│ 1 │ 3 │ -│ 2 │ 4 │ -│ 3 │ 5 │ -│ 4 │ 6 │ -│ 5 │ 7 │ -│ 6 │ 8 │ -│ 7 │ 9 │ -│ 8 │ 0 │ -│ 9 │ 0 │ -└────────┴─────────────────────┘ -``` - -Query: - -``` sql -SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; -``` - -Result: - -``` text -┌─number─┬─neighbor(number, 2, 999)─┐ -│ 0 │ 2 │ -│ 1 │ 3 │ -│ 2 │ 4 │ -│ 3 │ 5 │ -│ 4 │ 6 │ -│ 5 │ 7 │ -│ 6 │ 8 │ -│ 7 │ 9 │ -│ 8 │ 999 │ -│ 9 │ 999 │ -└────────┴──────────────────────────┘ -``` - -This function can be used to compute year-over-year metric value: - -Query: - -``` sql -WITH toDate('2018-01-01') AS start_date -SELECT - toStartOfMonth(start_date + (number * 32)) AS month, - toInt32(month) % 100 AS money, - neighbor(money, -12) AS prev_year, - round(prev_year / money, 2) AS year_over_year -FROM numbers(16) -``` - -Result: - -``` text -┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ -│ 2018-01-01 │ 32 │ 0 │ 0 │ -│ 2018-02-01 │ 63 │ 0 │ 0 │ -│ 2018-03-01 │ 91 │ 0 │ 0 │ -│ 2018-04-01 │ 22 │ 0 │ 0 │ -│ 2018-05-01 │ 52 │ 0 │ 0 │ -│ 2018-06-01 │ 83 │ 0 │ 0 │ -│ 2018-07-01 │ 13 │ 0 │ 0 │ -│ 2018-08-01 │ 44 │ 0 │ 0 │ -│ 2018-09-01 │ 75 │ 0 │ 0 │ -│ 2018-10-01 │ 5 │ 0 │ 0 │ -│ 2018-11-01 │ 36 │ 0 │ 0 │ -│ 2018-12-01 │ 66 │ 0 │ 0 │ -│ 2019-01-01 │ 97 │ 32 │ 0.33 │ -│ 2019-02-01 │ 28 │ 63 │ 2.25 │ -│ 2019-03-01 │ 56 │ 91 │ 1.62 │ -│ 2019-04-01 │ 87 │ 22 │ 0.25 │ -└────────────┴───────┴───────────┴────────────────┘ -``` - -## runningDifference(x) {#other_functions-runningdifference} - -Calculates the difference between successive row values ​​in the data block. -Returns 0 for the first row and the difference from the previous row for each subsequent row. - -The result of the function depends on the affected data blocks and the order of data in the block. -If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result. - -Example: - -``` sql -SELECT - EventID, - EventTime, - runningDifference(EventTime) AS delta -FROM -( - SELECT - EventID, - EventTime - FROM events - WHERE EventDate = '2016-11-24' - ORDER BY EventTime ASC - LIMIT 5 -) -``` - -``` text -┌─EventID─┬───────────EventTime─┬─delta─┐ -│ 1106 │ 2016-11-24 00:00:04 │ 0 │ -│ 1107 │ 2016-11-24 00:00:05 │ 1 │ -│ 1108 │ 2016-11-24 00:00:05 │ 0 │ -│ 1109 │ 2016-11-24 00:00:09 │ 4 │ -│ 1110 │ 2016-11-24 00:00:10 │ 1 │ -└─────────┴─────────────────────┴───────┘ -``` - -Please note - block size affects the result. With each new block, the `runningDifference` state is reset. - -``` sql -SELECT - number, - runningDifference(number + 1) AS diff -FROM numbers(100000) -WHERE diff != 1 -``` - -``` text -┌─number─┬─diff─┐ -│ 0 │ 0 │ -└────────┴──────┘ -┌─number─┬─diff─┐ -│ 65536 │ 0 │ -└────────┴──────┘ -``` - -``` sql -set max_block_size=100000 -- default value is 65536! - -SELECT - number, - runningDifference(number + 1) AS diff -FROM numbers(100000) -WHERE diff != 1 -``` - -``` text -┌─number─┬─diff─┐ -│ 0 │ 0 │ -└────────┴──────┘ -``` - -## runningDifferenceStartingWithFirstValue {#runningdifferencestartingwithfirstvalue} - -Same as for [runningDifference](./other_functions.md#other_functions-runningdifference), the difference is the value of the first row, returned the value of the first row, and each subsequent row returns the difference from the previous row. - -## MACNumToString(num) {#macnumtostringnum} - -Accepts a UInt64 number. Interprets it as a MAC address in big endian. Returns a string containing the corresponding MAC address in the format AA:BB:CC:DD:EE:FF (colon-separated numbers in hexadecimal form). - -## MACStringToNum(s) {#macstringtonums} - -The inverse function of MACNumToString. If the MAC address has an invalid format, it returns 0. - -## MACStringToOUI(s) {#macstringtoouis} - -Accepts a MAC address in the format AA:BB:CC:DD:EE:FF (colon-separated numbers in hexadecimal form). Returns the first three octets as a UInt64 number. If the MAC address has an invalid format, it returns 0. - -## getSizeOfEnumType {#getsizeofenumtype} - -Returns the number of fields in [Enum](../../data_types/enum.md). - -``` sql -getSizeOfEnumType(value) -``` - -**Parameters:** - -- `value` — Value of type `Enum`. - -**Returned values** - -- The number of fields with `Enum` input values. -- An exception is thrown if the type is not `Enum`. - -**Example** - -``` sql -SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x -``` - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -## blockSerializedSize {#blockserializedsize} - -Returns size on disk (without taking into account compression). - -``` sql -blockSerializedSize(value[, value[, ...]]) -``` - -**Parameters:** - -- `value` — Any value. - -**Returned values** - -- The number of bytes that will be written to disk for block of values (without compression). - -**Example** - -``` sql -SELECT blockSerializedSize(maxState(1)) as x -``` - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -## toColumnTypeName {#tocolumntypename} - -Returns the name of the class that represents the data type of the column in RAM. - -``` sql -toColumnTypeName(value) -``` - -**Parameters:** - -- `value` — Any type of value. - -**Returned values** - -- A string with the name of the class that is used for representing the `value` data type in RAM. - -**Example of the difference between`toTypeName ' and ' toColumnTypeName`** - -``` sql -SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) -``` - -``` text -┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ DateTime │ -└─────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) -``` - -``` text -┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ Const(UInt32) │ -└───────────────────────────────────────────────────────────┘ -``` - -The example shows that the `DateTime` data type is stored in memory as `Const(UInt32)`. - -## dumpColumnStructure {#dumpcolumnstructure} - -Outputs a detailed description of data structures in RAM - -``` sql -dumpColumnStructure(value) -``` - -**Parameters:** - -- `value` — Any type of value. - -**Returned values** - -- A string describing the structure that is used for representing the `value` data type in RAM. - -**Example** - -``` sql -SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) -``` - -``` text -┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ -│ DateTime, Const(size = 1, UInt32(size = 1)) │ -└──────────────────────────────────────────────────────────────┘ -``` - -## defaultValueOfArgumentType {#defaultvalueofargumenttype} - -Outputs the default value for the data type. - -Does not include default values for custom columns set by the user. - -``` sql -defaultValueOfArgumentType(expression) -``` - -**Parameters:** - -- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. - -**Returned values** - -- `0` for numbers. -- Empty string for strings. -- `ᴺᵁᴸᴸ` for [Nullable](../../data_types/nullable.md). - -**Example** - -``` sql -SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) -``` - -``` text -┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ -│ 0 │ -└─────────────────────────────────────────────┘ -``` - -``` sql -SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) -``` - -``` text -┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ -│ ᴺᵁᴸᴸ │ -└───────────────────────────────────────────────────────┘ -``` - -## replicate {#other-functions-replicate} - -Creates an array with a single value. - -Used for internal implementation of [arrayJoin](array_join.md#functions_arrayjoin). - -``` sql -SELECT replicate(x, arr); -``` - -**Parameters:** - -- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. -- `x` — The value that the resulting array will be filled with. - -**Returned value** - -An array filled with the value `x`. - -Type: `Array`. - -**Example** - -Query: - -``` sql -SELECT replicate(1, ['a', 'b', 'c']) -``` - -Result: - -``` text -┌─replicate(1, ['a', 'b', 'c'])─┐ -│ [1,1,1] │ -└───────────────────────────────┘ -``` - -## filesystemAvailable {#filesystemavailable} - -Returns amount of remaining space on the filesystem where the files of the databases located. It is always smaller than total free space ([filesystemFree](#filesystemfree)) because some space is reserved for OS. - -**Syntax** - -``` sql -filesystemAvailable() -``` - -**Returned value** - -- The amount of remaining space available in bytes. - -Type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT formatReadableSize(filesystemAvailable()) AS "Available space", toTypeName(filesystemAvailable()) AS "Type"; -``` - -Result: - -``` text -┌─Available space─┬─Type───┐ -│ 30.75 GiB │ UInt64 │ -└─────────────────┴────────┘ -``` - -## filesystemFree {#filesystemfree} - -Returns total amount of the free space on the filesystem where the files of the databases located. See also `filesystemAvailable` - -**Syntax** - -``` sql -filesystemFree() -``` - -**Returned value** - -- Amount of free space in bytes. - -Type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; -``` - -Result: - -``` text -┌─Free space─┬─Type───┐ -│ 32.39 GiB │ UInt64 │ -└────────────┴────────┘ -``` - -## filesystemCapacity {#filesystemcapacity} - -Returns the capacity of the filesystem in bytes. For evaluation, the [path](../../operations/server_settings/settings.md#server_settings-path) to the data directory must be configured. - -**Syntax** - -``` sql -filesystemCapacity() -``` - -**Returned value** - -- Capacity information of the filesystem in bytes. - -Type: [UInt64](../../data_types/int_uint.md). - -**Example** - -Query: - -``` sql -SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" -``` - -Result: - -``` text -┌─Capacity──┬─Type───┐ -│ 39.32 GiB │ UInt64 │ -└───────────┴────────┘ -``` - -## finalizeAggregation {#function-finalizeaggregation} - -Takes state of aggregate function. Returns result of aggregation (finalized state). - -## runningAccumulate {#function-runningaccumulate} - -Takes the states of the aggregate function and returns a column with values, are the result of the accumulation of these states for a set of block lines, from the first to the current line. -For example, takes state of aggregate function (example runningAccumulate(uniqState(UserID))), and for each row of block, return result of aggregate function on merge of states of all previous rows and current row. -So, result of function depends on partition of data to blocks and on order of data in block. - -## joinGet {#joinget} - -The function lets you extract data from the table the same way as from a [dictionary](../../query_language/dicts/index.md). - -Gets data from [Join](../../operations/table_engines/join.md#creating-a-table) tables using the specified join key. - -Only supports tables created with the `ENGINE = Join(ANY, LEFT, )` statement. - -**Syntax** - -``` sql -joinGet(join_storage_table_name, `value_column`, join_keys) -``` - -**Parameters** - -- `join_storage_table_name` — an [identifier](../syntax.md#syntax-identifiers) indicates where search is performed. The identifier is searched in the default database (see parameter `default_database` in the config file). To override the default database, use the `USE db_name` or specify the database and the table through the separator `db_name.db_table`, see the example. -- `value_column` — name of the column of the table that contains required data. -- `join_keys` — list of keys. - -**Returned value** - -Returns list of values corresponded to list of keys. - -If certain doesn’t exist in source table then `0` or `null` will be returned based on [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls) setting. - -More info about `join_use_nulls` in [Join operation](../../operations/table_engines/join.md). - -**Example** - -Input table: - -``` sql -CREATE DATABASE db_test -CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 -INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) -``` - -``` text -┌─id─┬─val─┐ -│ 4 │ 13 │ -│ 2 │ 12 │ -│ 1 │ 11 │ -└────┴─────┘ -``` - -Query: - -``` sql -SELECT joinGet(db_test.id_val,'val',toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 -``` - -Result: - -``` text -┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ -│ 0 │ -│ 11 │ -│ 12 │ -│ 0 │ -└──────────────────────────────────────────────────┘ -``` - -## modelEvaluate(model\_name, …) {#function-modelevaluate} - -Evaluate external model. -Accepts a model name and model arguments. Returns Float64. - -## throwIf(x\[, custom\_message\]) {#throwifx-custom-message} - -Throw an exception if the argument is non zero. -custom\_message - is an optional parameter: a constant string, provides an error message - -``` sql -SELECT throwIf(number = 3, 'Too many') FROM numbers(10); -``` - -``` text -↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): -Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. -``` - -## identity {#identity} - -Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer doesn’t look inside `identity` functions. - -**Syntax** - -``` sql -identity(x) -``` - -**Example** - -Query: - -``` sql -SELECT identity(42) -``` - -Result: - -``` text -┌─identity(42)─┐ -│ 42 │ -└──────────────┘ -``` - -## randomPrintableASCII {#randomascii} - -Generates a string with a random set of [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) printable characters. - -**Syntax** - -``` sql -randomPrintableASCII(length) -``` - -**Parameters** - -- `length` — Resulting string length. Positive integer. - - If you pass `length < 0`, behavior of the function is undefined. - -**Returned value** - -- String with a random set of [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) printable characters. - -Type: [String](../../data_types/string.md) - -**Example** - -``` sql -SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 -``` - -``` text -┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ -│ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ -│ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ -│ 2 │ /"+<"wUTh:=LjJ Vm!c&hI*m#XTfzz │ 30 │ -└────────┴────────────────────────────────┴──────────────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/fa/query_language/functions/random_functions.md b/docs/fa/query_language/functions/random_functions.md deleted file mode 100644 index 759bc85806e..00000000000 --- a/docs/fa/query_language/functions/random_functions.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -en_copy: true ---- - -# Functions for generating pseudo-random numbers {#functions-for-generating-pseudo-random-numbers} - -Non-cryptographic generators of pseudo-random numbers are used. - -All the functions accept zero arguments or one argument. -If an argument is passed, it can be any type, and its value is not used for anything. -The only purpose of this argument is to prevent common subexpression elimination, so that two different instances of the same function return different columns with different random numbers. - -## rand {#rand} - -Returns a pseudo-random UInt32 number, evenly distributed among all UInt32-type numbers. -Uses a linear congruential generator. - -## rand64 {#rand64} - -Returns a pseudo-random UInt64 number, evenly distributed among all UInt64-type numbers. -Uses a linear congruential generator. - -## randConstant {#randconstant} - -Returns a pseudo-random UInt32 number, The value is one for different blocks. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/fa/query_language/functions/rounding_functions.md b/docs/fa/query_language/functions/rounding_functions.md deleted file mode 100644 index 8f156b1cb7b..00000000000 --- a/docs/fa/query_language/functions/rounding_functions.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -en_copy: true ---- - -# Rounding functions {#rounding-functions} - -## floor(x\[, N\]) {#floorx-n} - -Returns the largest round number that is less than or equal to `x`. A round number is a multiple of 1/10N, or the nearest number of the appropriate data type if 1 / 10N isn’t exact. -‘N’ is an integer constant, optional parameter. By default it is zero, which means to round to an integer. -‘N’ may be negative. - -Examples: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` - -`x` is any numeric type. The result is a number of the same type. -For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function doesn’t do anything). -If rounding causes overflow (for example, floor(-128, -1)), an implementation-specific result is returned. - -## ceil(x\[, N\]), ceiling(x\[, N\]) {#ceilx-n-ceilingx-n} - -Returns the smallest round number that is greater than or equal to `x`. In every other way, it is the same as the `floor` function (see above). - -## trunc(x\[, N\]), truncate(x\[, N\]) {#truncx-n-truncatex-n} - -Returns the round number with largest absolute value that has an absolute value less than or equal to `x`‘s. In every other way, it is the same as the ’floor’ function (see above). - -## round(x\[, N\]) {#rounding_functions-round} - -Rounds a value to a specified number of decimal places. - -The function returns the nearest number of the specified order. In case when given number has equal distance to surrounding numbers, the function uses banker’s rounding for float number types and rounds away from zero for the other number types. - -``` sql -round(expression [, decimal_places]) -``` - -**Parameters:** - -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) returning the numeric [data type](../../data_types/index.md#data_types). -- `decimal-places` — An integer value. - - If `decimal-places > 0` then the function rounds the value to the right of the decimal point. - - If `decimal-places < 0` then the function rounds the value to the left of the decimal point. - - If `decimal-places = 0` then the function rounds the value to integer. In this case the argument can be omitted. - -**Returned value:** - -The rounded number of the same type as the input number. - -### Examples {#examples} - -**Example of use** - -``` sql -SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 -``` - -``` text -┌───x─┬─round(divide(number, 2))─┐ -│ 0 │ 0 │ -│ 0.5 │ 0 │ -│ 1 │ 1 │ -└─────┴──────────────────────────┘ -``` - -**Examples of rounding** - -Rounding to the nearest number. - -``` text -round(3.2, 0) = 3 -round(4.1267, 2) = 4.13 -round(22,-1) = 20 -round(467,-2) = 500 -round(-467,-2) = -500 -``` - -Banker’s rounding. - -``` text -round(3.5) = 4 -round(4.5) = 4 -round(3.55, 1) = 3.6 -round(3.65, 1) = 3.6 -``` - -**See Also** - -- [roundBankers](#roundbankers) - -## roundBankers {#roundbankers} - -Rounds a number to a specified decimal position. - -- If the rounding number is halfway between two numbers, the function uses banker’s rounding. - - Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. - - It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. - -- In other cases, the function rounds numbers to the nearest integer. - -Using banker’s rounding, you can reduce the effect that rounding numbers has on the results of summing or subtracting these numbers. - -For example, sum numbers 1.5, 2.5, 3.5, 4.5 with different rounding: - -- No rounding: 1.5 + 2.5 + 3.5 + 4.5 = 12. -- Banker’s rounding: 2 + 2 + 4 + 4 = 12. -- Rounding to the nearest integer: 2 + 3 + 4 + 5 = 14. - -**Syntax** - -``` sql -roundBankers(expression [, decimal_places]) -``` - -**Parameters** - -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) returning the numeric [data type](../../data_types/index.md#data_types). -- `decimal-places` — Decimal places. An integer number. - - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. - - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. - - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. - -**Returned value** - -A value rounded by the banker’s rounding method. - -### Examples {#examples-1} - -**Example of use** - -Query: - -``` sql - SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 -``` - -Result: - -``` text -┌───x─┬─b─┐ -│ 0 │ 0 │ -│ 0.5 │ 0 │ -│ 1 │ 1 │ -│ 1.5 │ 2 │ -│ 2 │ 2 │ -│ 2.5 │ 2 │ -│ 3 │ 3 │ -│ 3.5 │ 4 │ -│ 4 │ 4 │ -│ 4.5 │ 4 │ -└─────┴───┘ -``` - -**Examples of Banker’s rounding** - -``` text -roundBankers(0.4) = 0 -roundBankers(-3.5) = -4 -roundBankers(4.5) = 4 -roundBankers(3.55, 1) = 3.6 -roundBankers(3.65, 1) = 3.6 -roundBankers(10.35, 1) = 10.4 -roundBankers(10.755, 2) = 11,76 -``` - -**See Also** - -- [round](#rounding_functions-round) - -## roundToExp2(num) {#roundtoexp2num} - -Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to the nearest (whole non-negative) degree of two. - -## roundDuration(num) {#rounddurationnum} - -Accepts a number. If the number is less than one, it returns 0. Otherwise, it rounds the number down to numbers from the set: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. This function is specific to Yandex.Metrica and used for implementing the report on session length. - -## roundAge(num) {#roundagenum} - -Accepts a number. If the number is less than 18, it returns 0. Otherwise, it rounds the number down to a number from the set: 18, 25, 35, 45, 55. This function is specific to Yandex.Metrica and used for implementing the report on user age. - -## roundDown(num, arr) {#rounddownnum-arr} - -Accepts a number and rounds it down to an element in the specified array. If the value is less than the lowest bound, the lowest bound is returned. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/fa/query_language/functions/splitting_merging_functions.md b/docs/fa/query_language/functions/splitting_merging_functions.md deleted file mode 100644 index de3f60a3a00..00000000000 --- a/docs/fa/query_language/functions/splitting_merging_functions.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -en_copy: true ---- - -# Functions for splitting and merging strings and arrays {#functions-for-splitting-and-merging-strings-and-arrays} - -## splitByChar(separator, s) {#splitbycharseparator-s} - -Splits a string into substrings separated by a specified character. It uses a constant string `separator` which consisting of exactly one character. -Returns an array of selected substrings. Empty substrings may be selected if the separator occurs at the beginning or end of the string, or if there are multiple consecutive separators. - -**Syntax** - -```sql -splitByChar(, ) -``` - -**Parameters** - -- `separator` — The separator which should contain exactly one character. [String](../../data_types/string.md). -- `s` — The string to split. [String](../../data_types/string.md). - -**Returned value(s)** - -Returns an array of selected substrings. Empty substrings may be selected when: - -* A separator occurs at the beginning or end of the string; -* There are multiple consecutive separators; -* The original string `s` is empty. - -Type: [Array](../../data_types/array.md) of [String](../../data_types/string.md). - -**Example** - -``` sql -SELECT splitByChar(',', '1,2,3,abcde') -``` - -``` text -┌─splitByChar(',', '1,2,3,abcde')─┐ -│ ['1','2','3','abcde'] │ -└─────────────────────────────────┘ -``` - -## splitByString(separator, s) {#splitbystringseparator-s} - -Splits a string into substrings separated by a string. It uses a constant string `separator` of multiple characters as the separator. If the string `separator` is empty, it will split the string `s` into an array of single characters. - -**Syntax** - -```sql -splitByString(, ) -``` - -**Parameters** - -- `separator` — The separator. [String](../../data_types/string.md). -- `s` — The string to split. [String](../../data_types/string.md). - -**Returned value(s)** - -Returns an array of selected substrings. Empty substrings may be selected when: - -Type: [Array](../../data_types/array.md) of [String](../../data_types/string.md). - -* A non-empty separator occurs at the beginning or end of the string; -* There are multiple consecutive non-empty separators; -* The original string `s` is empty while the separator is not empty. - -**Example** - -``` sql -SELECT splitByString(', ', '1, 2 3, 4,5, abcde') -``` - -``` text -┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ -│ ['1','2 3','4,5','abcde'] │ -└───────────────────────────────────────────┘ -``` - -``` sql -SELECT splitByString('', 'abcde') -``` - -``` text -┌─splitByString('', 'abcde')─┐ -│ ['a','b','c','d','e'] │ -└────────────────────────────┘ -``` - -## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} - -Concatenates the strings listed in the array with the separator.’separator’ is an optional parameter: a constant string, set to an empty string by default. -Returns the string. - -## alphaTokens(s) {#alphatokenss} - -Selects substrings of consecutive bytes from the ranges a-z and A-Z.Returns an array of substrings. - -**Example** - -``` sql -SELECT alphaTokens('abca1abc') -``` - -``` text -┌─alphaTokens('abca1abc')─┐ -│ ['abca','abc'] │ -└─────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/fa/query_language/functions/string_functions.md b/docs/fa/query_language/functions/string_functions.md deleted file mode 100644 index ef742ee37da..00000000000 --- a/docs/fa/query_language/functions/string_functions.md +++ /dev/null @@ -1,486 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with strings {#functions-for-working-with-strings} - -## empty {#empty} - -Returns 1 for an empty string or 0 for a non-empty string. -The result type is UInt8. -A string is considered non-empty if it contains at least one byte, even if this is a space or a null byte. -The function also works for arrays. - -## notEmpty {#notempty} - -Returns 0 for an empty string or 1 for a non-empty string. -The result type is UInt8. -The function also works for arrays. - -## length {#length} - -Returns the length of a string in bytes (not in characters, and not in code points). -The result type is UInt64. -The function also works for arrays. - -## lengthUTF8 {#lengthutf8} - -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). -The result type is UInt64. - -## char\_length, CHAR\_LENGTH {#char-length} - -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). -The result type is UInt64. - -## character\_length, CHARACTER\_LENGTH {#character-length} - -Returns the length of a string in Unicode code points (not in characters), assuming that the string contains a set of bytes that make up UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). -The result type is UInt64. - -## lower, lcase {#lower} - -Converts ASCII Latin symbols in a string to lowercase. - -## upper, ucase {#upper} - -Converts ASCII Latin symbols in a string to uppercase. - -## lowerUTF8 {#lowerutf8} - -Converts a string to lowercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. -It doesn’t detect the language. So for Turkish the result might not be exactly correct. -If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. -If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. - -## upperUTF8 {#upperutf8} - -Converts a string to uppercase, assuming the string contains a set of bytes that make up a UTF-8 encoded text. -It doesn’t detect the language. So for Turkish the result might not be exactly correct. -If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point. -If the string contains a set of bytes that is not UTF-8, then the behavior is undefined. - -## isValidUTF8 {#isvalidutf8} - -Returns 1, if the set of bytes is valid UTF-8 encoded, otherwise 0. - -## toValidUTF8 {#tovalidutf8} - -Replaces invalid UTF-8 characters by the `�` (U+FFFD) character. All running in a row invalid characters are collapsed into the one replacement character. - -``` sql -toValidUTF8( input_string ) -``` - -Parameters: - -- input\_string — Any set of bytes represented as the [String](../../data_types/string.md) data type object. - -Returned value: Valid UTF-8 string. - -**Example** - -``` sql -SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') -``` - -``` text -┌─toValidUTF8('a����b')─┐ -│ a�b │ -└───────────────────────┘ -``` - -## repeat {#repeat} - -Repeats a string as many times as specified and concatenates the replicated values as a single string. - -**Syntax** - -``` sql -repeat(s, n) -``` - -**Parameters** - -- `s` — The string to repeat. [String](../../data_types/string.md). -- `n` — The number of times to repeat the string. [UInt](../../data_types/int_uint.md). - -**Returned value** - -The single string, which contains the string `s` repeated `n` times. If `n` \< 1, the function returns empty string. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT repeat('abc', 10) -``` - -Result: - -``` text -┌─repeat('abc', 10)──────────────┐ -│ abcabcabcabcabcabcabcabcabcabc │ -└────────────────────────────────┘ -``` - -## reverse {#reverse} - -Reverses the string (as a sequence of bytes). - -## reverseUTF8 {#reverseutf8} - -Reverses a sequence of Unicode code points, assuming that the string contains a set of bytes representing a UTF-8 text. Otherwise, it does something else (it doesn’t throw an exception). - -## format(pattern, s0, s1, …) {#format} - -Formatting constant pattern with the string listed in the arguments. `pattern` is a simplified Python format pattern. Format string contains “replacement fields” surrounded by curly braces `{}`. Anything that is not contained in braces is considered literal text, which is copied unchanged to the output. If you need to include a brace character in the literal text, it can be escaped by doubling: `{{ '{{' }}` and `{{ '}}' }}`. Field names can be numbers (starting from zero) or empty (then they are treated as consequence numbers). - -``` sql -SELECT format('{1} {0} {1}', 'World', 'Hello') -``` - -``` text -┌─format('{1} {0} {1}', 'World', 'Hello')─┐ -│ Hello World Hello │ -└─────────────────────────────────────────┘ -``` - -``` sql -SELECT format('{} {}', 'Hello', 'World') -``` - -``` text -┌─format('{} {}', 'Hello', 'World')─┐ -│ Hello World │ -└───────────────────────────────────┘ -``` - -## concat {#concat} - -Concatenates the strings listed in the arguments, without a separator. - -**Syntax** - -``` sql -concat(s1, s2, ...) -``` - -**Parameters** - -Values of type String or FixedString. - -**Returned values** - -Returns the String that results from concatenating the arguments. - -If any of argument values is `NULL`, `concat` returns `NULL`. - -**Example** - -Query: - -``` sql -SELECT concat('Hello, ', 'World!') -``` - -Result: - -``` text -┌─concat('Hello, ', 'World!')─┐ -│ Hello, World! │ -└─────────────────────────────┘ -``` - -## concatAssumeInjective {#concatassumeinjective} - -Same as [concat](#concat), the difference is that you need to ensure that `concat(s1, s2, ...) → sn` is injective, it will be used for optimization of GROUP BY. - -The function is named “injective” if it always returns different result for different values of arguments. In other words: different arguments never yield identical result. - -**Syntax** - -``` sql -concatAssumeInjective(s1, s2, ...) -``` - -**Parameters** - -Values of type String or FixedString. - -**Returned values** - -Returns the String that results from concatenating the arguments. - -If any of argument values is `NULL`, `concatAssumeInjective` returns `NULL`. - -**Example** - -Input table: - -``` sql -CREATE TABLE key_val(`key1` String, `key2` String, `value` UInt32) ENGINE = TinyLog; -INSERT INTO key_val VALUES ('Hello, ','World',1), ('Hello, ','World',2), ('Hello, ','World!',3), ('Hello',', World!',2); -SELECT * from key_val; -``` - -``` text -┌─key1────┬─key2─────┬─value─┐ -│ Hello, │ World │ 1 │ -│ Hello, │ World │ 2 │ -│ Hello, │ World! │ 3 │ -│ Hello │ , World! │ 2 │ -└─────────┴──────────┴───────┘ -``` - -Query: - -``` sql -SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) -``` - -Result: - -``` text -┌─concat(key1, key2)─┬─sum(value)─┐ -│ Hello, World! │ 3 │ -│ Hello, World! │ 2 │ -│ Hello, World │ 3 │ -└────────────────────┴────────────┘ -``` - -## substring(s, offset, length), mid(s, offset, length), substr(s, offset, length) {#substring} - -Returns a substring starting with the byte from the ‘offset’ index that is ‘length’ bytes long. Character indexing starts from one (as in standard SQL). The ‘offset’ and ‘length’ arguments must be constants. - -## substringUTF8(s, offset, length) {#substringutf8} - -The same as ‘substring’, but for Unicode code points. Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, it returns some result (it doesn’t throw an exception). - -## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsent} - -If the ‘s’ string is non-empty and does not contain the ‘c’ character at the end, it appends the ‘c’ character to the end. - -## convertCharset(s, from, to) {#convertcharset} - -Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’. - -## base64Encode(s) {#base64encode} - -Encodes ‘s’ string into base64 - -## base64Decode(s) {#base64decode} - -Decode base64-encoded string ‘s’ into original string. In case of failure raises an exception. - -## tryBase64Decode(s) {#trybase64decode} - -Similar to base64Decode, but in case of error an empty string would be returned. - -## endsWith(s, suffix) {#endswith} - -Returns whether to end with the specified suffix. Returns 1 if the string ends with the specified suffix, otherwise it returns 0. - -## startsWith(str, prefix) {#startswith} - -Returns 1 whether string starts with the specified prefix, otherwise it returns 0. - -``` sql -SELECT startsWith('Spider-Man', 'Spi'); -``` - -**Returned values** - -- 1, if the string starts with the specified prefix. -- 0, if the string doesn’t start with the specified prefix. - -**Example** - -Query: - -``` sql -SELECT startsWith('Hello, world!', 'He'); -``` - -Result: - -``` text -┌─startsWith('Hello, world!', 'He')─┐ -│ 1 │ -└───────────────────────────────────┘ -``` - -## trim {#trim} - -Removes all specified characters from the start or end of a string. -By default removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. - -**Syntax** - -``` sql -trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) -``` - -**Parameters** - -- `trim_character` — specified characters for trim. [String](../../data_types/string.md). -- `input_string` — string for trim. [String](../../data_types/string.md). - -**Returned value** - -A string without leading and (or) trailing specified characters. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT trim(BOTH ' ()' FROM '( Hello, world! )') -``` - -Result: - -``` text -┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ -│ Hello, world! │ -└───────────────────────────────────────────────┘ -``` - -## trimLeft {#trimleft} - -Removes all consecutive occurrences of common whitespace (ASCII character 32) from the beginning of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). - -**Syntax** - -``` sql -trimLeft(input_string) -``` - -Alias: `ltrim(input_string)`. - -**Parameters** - -- `input_string` — string to trim. [String](../../data_types/string.md). - -**Returned value** - -A string without leading common whitespaces. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT trimLeft(' Hello, world! ') -``` - -Result: - -``` text -┌─trimLeft(' Hello, world! ')─┐ -│ Hello, world! │ -└─────────────────────────────────────┘ -``` - -## trimRight {#trimright} - -Removes all consecutive occurrences of common whitespace (ASCII character 32) from the end of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). - -**Syntax** - -``` sql -trimRight(input_string) -``` - -Alias: `rtrim(input_string)`. - -**Parameters** - -- `input_string` — string to trim. [String](../../data_types/string.md). - -**Returned value** - -A string without trailing common whitespaces. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT trimRight(' Hello, world! ') -``` - -Result: - -``` text -┌─trimRight(' Hello, world! ')─┐ -│ Hello, world! │ -└──────────────────────────────────────┘ -``` - -## trimBoth {#trimboth} - -Removes all consecutive occurrences of common whitespace (ASCII character 32) from both ends of a string. It doesn’t remove other kinds of whitespace characters (tab, no-break space, etc.). - -**Syntax** - -``` sql -trimBoth(input_string) -``` - -Alias: `trim(input_string)`. - -**Parameters** - -- `input_string` — string to trim. [String](../../data_types/string.md). - -**Returned value** - -A string without leading and trailing common whitespaces. - -Type: `String`. - -**Example** - -Query: - -``` sql -SELECT trimBoth(' Hello, world! ') -``` - -Result: - -``` text -┌─trimBoth(' Hello, world! ')─┐ -│ Hello, world! │ -└─────────────────────────────────────┘ -``` - -## CRC32(s) {#crc32} - -Returns the CRC32 checksum of a string, using CRC-32-IEEE 802.3 polynomial and initial value `0xffffffff` (zlib implementation). - -The result type is UInt32. - -## CRC32IEEE(s) {#crc32ieee} - -Returns the CRC32 checksum of a string, using CRC-32-IEEE 802.3 polynomial. - -The result type is UInt32. - -## CRC64(s) {#crc64} - -Returns the CRC64 checksum of a string, using CRC-64-ECMA polynomial. - -The result type is UInt64. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/fa/query_language/functions/string_replace_functions.md b/docs/fa/query_language/functions/string_replace_functions.md deleted file mode 100644 index 6379d724c11..00000000000 --- a/docs/fa/query_language/functions/string_replace_functions.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -en_copy: true ---- - -# Functions for searching and replacing in strings {#functions-for-searching-and-replacing-in-strings} - -## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} - -Replaces the first occurrence, if it exists, of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring. -Hereafter, ‘pattern’ and ‘replacement’ must be constants. - -## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} - -Replaces all occurrences of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring. - -## replaceRegexpOne(haystack, pattern, replacement) {#replaceregexponehaystack-pattern-replacement} - -Replacement using the ‘pattern’ regular expression. A re2 regular expression. -Replaces only the first occurrence, if it exists. -A pattern can be specified as ‘replacement’. This pattern can include substitutions `\0-\9`. -The substitution `\0` includes the entire regular expression. Substitutions `\1-\9` correspond to the subpattern numbers.To use the `\` character in a template, escape it using `\`. -Also keep in mind that a string literal requires an extra escape. - -Example 1. Converting the date to American format: - -``` sql -SELECT DISTINCT - EventDate, - replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res -FROM test.hits -LIMIT 7 -FORMAT TabSeparated -``` - -``` text -2014-03-17 03/17/2014 -2014-03-18 03/18/2014 -2014-03-19 03/19/2014 -2014-03-20 03/20/2014 -2014-03-21 03/21/2014 -2014-03-22 03/22/2014 -2014-03-23 03/23/2014 -``` - -Example 2. Copying a string ten times: - -``` sql -SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res -``` - -``` text -┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ -│ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ -└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -## replaceRegexpAll(haystack, pattern, replacement) {#replaceregexpallhaystack-pattern-replacement} - -This does the same thing, but replaces all the occurrences. Example: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res -``` - -``` text -┌─res────────────────────────┐ -│ HHeelllloo,, WWoorrlldd!! │ -└────────────────────────────┘ -``` - -As an exception, if a regular expression worked on an empty substring, the replacement is not made more than once. -Example: - -``` sql -SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res -``` - -``` text -┌─res─────────────────┐ -│ here: Hello, World! │ -└─────────────────────┘ -``` - -## regexpQuoteMeta(s) {#regexpquotemetas} - -The function adds a backslash before some predefined characters in the string. -Predefined characters: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, ’\]’, ‘?’, ’\*‘,’+‘,’{‘,’:‘,’-’. -This implementation slightly differs from re2::RE2::QuoteMeta. It escapes zero byte as \\0 instead of 00 and it escapes only required characters. -For more information, see the link: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/fa/query_language/functions/string_search_functions.md b/docs/fa/query_language/functions/string_search_functions.md deleted file mode 100644 index 2937ab60496..00000000000 --- a/docs/fa/query_language/functions/string_search_functions.md +++ /dev/null @@ -1,376 +0,0 @@ ---- -en_copy: true ---- - -# Functions for Searching Strings {#functions-for-searching-strings} - -The search is case-sensitive by default in all these functions. There are separate variants for case insensitive search. - -## position(haystack, needle), locate(haystack, needle) {#position} - -Returns the position (in bytes) of the found substring in the string, starting from 1. - -Works under the assumption that the string contains a set of bytes representing a single-byte encoded text. If this assumption is not met and a character can’t be represented using a single byte, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two bytes, it will use two bytes and so on. - -For a case-insensitive search, use the function [positionCaseInsensitive](#positioncaseinsensitive). - -**Syntax** - -``` sql -position(haystack, needle) -``` - -Alias: `locate(haystack, needle)`. - -**Parameters** - -- `haystack` — string, in which substring will to be searched. [String](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../syntax.md#syntax-string-literal). - -**Returned values** - -- Starting position in bytes (counting from 1), if substring was found. -- 0, if the substring was not found. - -Type: `Integer`. - -**Examples** - -The phrase “Hello, world!” contains a set of bytes representing a single-byte encoded text. The function returns some expected result: - -Query: - -``` sql -SELECT position('Hello, world!', '!') -``` - -Result: - -``` text -┌─position('Hello, world!', '!')─┐ -│ 13 │ -└────────────────────────────────┘ -``` - -The same phrase in Russian contains characters which can’t be represented using a single byte. The function returns some unexpected result (use [positionUTF8](#positionutf8) function for multi-byte encoded text): - -Query: - -``` sql -SELECT position('Привет, мир!', '!') -``` - -Result: - -``` text -┌─position('Привет, мир!', '!')─┐ -│ 21 │ -└───────────────────────────────┘ -``` - -## positionCaseInsensitive {#positioncaseinsensitive} - -The same as [position](#position) returns the position (in bytes) of the found substring in the string, starting from 1. Use the function for a case-insensitive search. - -Works under the assumption that the string contains a set of bytes representing a single-byte encoded text. If this assumption is not met and a character can’t be represented using a single byte, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two bytes, it will use two bytes and so on. - -**Syntax** - -``` sql -positionCaseInsensitive(haystack, needle) -``` - -**Parameters** - -- `haystack` — string, in which substring will to be searched. [String](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../syntax.md#syntax-string-literal). - -**Returned values** - -- Starting position in bytes (counting from 1), if substring was found. -- 0, if the substring was not found. - -Type: `Integer`. - -**Example** - -Query: - -``` sql -SELECT positionCaseInsensitive('Hello, world!', 'hello') -``` - -Result: - -``` text -┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ -│ 1 │ -└───────────────────────────────────────────────────┘ -``` - -## positionUTF8 {#positionutf8} - -Returns the position (in Unicode points) of the found substring in the string, starting from 1. - -Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. - -For a case-insensitive search, use the function [positionCaseInsensitiveUTF8](#positioncaseinsensitiveutf8). - -**Syntax** - -``` sql -positionUTF8(haystack, needle) -``` - -**Parameters** - -- `haystack` — string, in which substring will to be searched. [String](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../syntax.md#syntax-string-literal). - -**Returned values** - -- Starting position in Unicode points (counting from 1), if substring was found. -- 0, if the substring was not found. - -Type: `Integer`. - -**Examples** - -The phrase “Hello, world!” in Russian contains a set of Unicode points representing a single-point encoded text. The function returns some expected result: - -Query: - -``` sql -SELECT positionUTF8('Привет, мир!', '!') -``` - -Result: - -``` text -┌─positionUTF8('Привет, мир!', '!')─┐ -│ 12 │ -└───────────────────────────────────┘ -``` - -The phrase “Salut, étudiante!”, where character `é` can be represented using a one point (`U+00E9`) or two points (`U+0065U+0301`) the function can be returned some unexpected result: - -Query for the letter `é`, which is represented one Unicode point `U+00E9`: - -``` sql -SELECT positionUTF8('Salut, étudiante!', '!') -``` - -Result: - -``` text -┌─positionUTF8('Salut, étudiante!', '!')─┐ -│ 17 │ -└────────────────────────────────────────┘ -``` - -Query for the letter `é`, which is represented two Unicode points `U+0065U+0301`: - -``` sql -SELECT positionUTF8('Salut, étudiante!', '!') -``` - -Result: - -``` text -┌─positionUTF8('Salut, étudiante!', '!')─┐ -│ 18 │ -└────────────────────────────────────────┘ -``` - -## positionCaseInsensitiveUTF8 {#positioncaseinsensitiveutf8} - -The same as [positionUTF8](#positionutf8), but is case-insensitive. Returns the position (in Unicode points) of the found substring in the string, starting from 1. - -Works under the assumption that the string contains a set of bytes representing a UTF-8 encoded text. If this assumption is not met, the function doesn’t throw an exception and returns some unexpected result. If character can be represented using two Unicode points, it will use two and so on. - -**Syntax** - -``` sql -positionCaseInsensitiveUTF8(haystack, needle) -``` - -**Parameters** - -- `haystack` — string, in which substring will to be searched. [String](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../syntax.md#syntax-string-literal). - -**Returned value** - -- Starting position in Unicode points (counting from 1), if substring was found. -- 0, if the substring was not found. - -Type: `Integer`. - -**Example** - -Query: - -``` sql -SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') -``` - -Result: - -``` text -┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ -│ 9 │ -└────────────────────────────────────────────────────┘ -``` - -## multiSearchAllPositions {#multisearchallpositions} - -The same as [position](string_search_functions.md#position) but returns `Array` of positions (in bytes) of the found corresponding substrings in the string. Positions are indexed starting from 1. - -The search is performed on sequences of bytes without respect to string encoding and collation. - -- For case-insensitive ASCII search, use the function `multiSearchAllPositionsCaseInsensitive`. -- For search in UTF-8, use the function [multiSearchAllPositionsUTF8](#multiSearchAllPositionsUTF8). -- For case-insensitive UTF-8 search, use the function multiSearchAllPositionsCaseInsensitiveUTF8. - -**Syntax** - -``` sql -multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) -``` - -**Parameters** - -- `haystack` — string, in which substring will to be searched. [String](../syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../syntax.md#syntax-string-literal). - -**Returned values** - -- Array of starting positions in bytes (counting from 1), if the corresponding substring was found and 0 if not found. - -**Example** - -Query: - -``` sql -SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) -``` - -Result: - -``` text -┌─multiSearchAllPositions('Hello, World!', ['hello', '!', 'world'])─┐ -│ [0,13,0] │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## multiSearchAllPositionsUTF8 {#multiSearchAllPositionsUTF8} - -See `multiSearchAllPositions`. - -## multiSearchFirstPosition(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstposition} - -The same as `position` but returns the leftmost offset of the string `haystack` that is matched to some of the needles. - -For a case-insensitive search or/and in UTF-8 format use functions `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. - -## multiSearchFirstIndex(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} - -Returns the index `i` (starting from 1) of the leftmost found needlei in the string `haystack` and 0 otherwise. - -For a case-insensitive search or/and in UTF-8 format use functions `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. - -## multiSearchAny(haystack, \[needle1, needle2, …, needlen\]) {#function-multisearchany} - -Returns 1, if at least one string needlei matches the string `haystack` and 0 otherwise. - -For a case-insensitive search or/and in UTF-8 format use functions `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. - -!!! note "Note" - In all `multiSearch*` functions the number of needles should be less than 28 because of implementation specification. - -## match(haystack, pattern) {#matchhaystack-pattern} - -Checks whether the string matches the `pattern` regular expression. A `re2` regular expression. The [syntax](https://github.com/google/re2/wiki/Syntax) of the `re2` regular expressions is more limited than the syntax of the Perl regular expressions. - -Returns 0 if it doesn’t match, or 1 if it matches. - -Note that the backslash symbol (`\`) is used for escaping in the regular expression. The same symbol is used for escaping in string literals. So in order to escape the symbol in a regular expression, you must write two backslashes (\\) in a string literal. - -The regular expression works with the string as if it is a set of bytes. The regular expression can’t contain null bytes. -For patterns to search for substrings in a string, it is better to use LIKE or ‘position’, since they work much faster. - -## multiMatchAny(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} - -The same as `match`, but returns 0 if none of the regular expressions are matched and 1 if any of the patterns matches. It uses [hyperscan](https://github.com/intel/hyperscan) library. For patterns to search substrings in a string, it is better to use `multiSearchAny` since it works much faster. - -!!! note "Note" - The length of any of the `haystack` string must be less than 232 bytes otherwise the exception is thrown. This restriction takes place because of hyperscan API. - -## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} - -The same as `multiMatchAny`, but returns any index that matches the haystack. - -## multiMatchAllIndices(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} - -The same as `multiMatchAny`, but returns the array of all indicies that match the haystack in any order. - -## multiFuzzyMatchAny(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} - -The same as `multiMatchAny`, but returns 1 if any pattern matches the haystack within a constant [edit distance](https://en.wikipedia.org/wiki/Edit_distance). This function is also in an experimental mode and can be extremely slow. For more information see [hyperscan documentation](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). - -## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} - -The same as `multiFuzzyMatchAny`, but returns any index that matches the haystack within a constant edit distance. - -## multiFuzzyMatchAllIndices(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} - -The same as `multiFuzzyMatchAny`, but returns the array of all indices in any order that match the haystack within a constant edit distance. - -!!! note "Note" - `multiFuzzyMatch*` functions do not support UTF-8 regular expressions, and such expressions are treated as bytes because of hyperscan restriction. - -!!! note "Note" - To turn off all functions that use hyperscan, use setting `SET allow_hyperscan = 0;`. - -## extract(haystack, pattern) {#extracthaystack-pattern} - -Extracts a fragment of a string using a regular expression. If ‘haystack’ doesn’t match the ‘pattern’ regex, an empty string is returned. If the regex doesn’t contain subpatterns, it takes the fragment that matches the entire regex. Otherwise, it takes the fragment that matches the first subpattern. - -## extractAll(haystack, pattern) {#extractallhaystack-pattern} - -Extracts all the fragments of a string using a regular expression. If ‘haystack’ doesn’t match the ‘pattern’ regex, an empty string is returned. Returns an array of strings consisting of all matches to the regex. In general, the behavior is the same as the ‘extract’ function (it takes the first subpattern, or the entire expression if there isn’t a subpattern). - -## like(haystack, pattern), haystack LIKE pattern operator {#function-like} - -Checks whether a string matches a simple regular expression. -The regular expression can contain the metasymbols `%` and `_`. - -`%` indicates any quantity of any bytes (including zero characters). - -`_` indicates any one byte. - -Use the backslash (`\`) for escaping metasymbols. See the note on escaping in the description of the ‘match’ function. - -For regular expressions like `%needle%`, the code is more optimal and works as fast as the `position` function. -For other regular expressions, the code is the same as for the ‘match’ function. - -## notLike(haystack, pattern), haystack NOT LIKE pattern operator {#function-notlike} - -The same thing as ‘like’, but negative. - -## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle} - -Calculates the 4-gram distance between `haystack` and `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` or `haystack` is more than 32Kb, throws an exception. If some of the non-constant `haystack` or `needle` strings are more than 32Kb, the distance is always one. - -For case-insensitive search or/and in UTF-8 format use functions `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. - -## ngramSearch(haystack, needle) {#ngramsearchhaystack-needle} - -Same as `ngramDistance` but calculates the non-symmetric difference between `needle` and `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` n-grams. The closer to one, the more likely `needle` is in the `haystack`. Can be useful for fuzzy string search. - -For case-insensitive search or/and in UTF-8 format use functions `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. - -!!! note "Note" - For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/fa/query_language/functions/type_conversion_functions.md b/docs/fa/query_language/functions/type_conversion_functions.md deleted file mode 100644 index 779bff245b0..00000000000 --- a/docs/fa/query_language/functions/type_conversion_functions.md +++ /dev/null @@ -1,533 +0,0 @@ ---- -en_copy: true ---- - -# Type Conversion Functions {#type-conversion-functions} - -## Common Issues of Numeric Conversions {#numeric-conversion-issues} - -When you convert a value from one to another data type, you should remember that in common case, it is an unsafe operation that can lead to a data loss. A data loss can occur if you try to fit value from a larger data type to a smaller data type, or if you convert values between different data types. - -ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w/cpp/language/implicit_conversion). - -## toInt(8\|16\|32\|64) {#toint8163264} - -Converts an input value to the [Int](../../data_types/int_uint.md) data type. This function family includes: - -- `toInt8(expr)` — Results in the `Int8` data type. -- `toInt16(expr)` — Results in the `Int16` data type. -- `toInt32(expr)` — Results in the `Int32` data type. -- `toInt64(expr)` — Results in the `Int64` data type. - -**Parameters** - -- `expr` — [Expression](../syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. - -**Returned value** - -Integer value in the `Int8`, `Int16`, `Int32`, or `Int64` data type. - -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. - -The behavior of functions for the [NaN and Inf](../../data_types/float.md#data_type-float-nan-inf) arguments is undefined. Remember about [numeric convertions issues](#numeric-conversion-issues), when using the functions. - -**Example** - -``` sql -SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8) -``` - -``` text -┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ -│ -9223372036854775808 │ 32 │ 16 │ 8 │ -└──────────────────────┴─────────────┴───────────────┴─────────────┘ -``` - -## toInt(8\|16\|32\|64)OrZero {#toint8163264orzero} - -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64). If failed, returns 0. - -**Example** - -``` sql -select toInt64OrZero('123123'), toInt8OrZero('123qwe123') -``` - -``` text -┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ -│ 123123 │ 0 │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toInt(8\|16\|32\|64)OrNull {#toint8163264ornull} - -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64). If failed, returns NULL. - -**Example** - -``` sql -select toInt64OrNull('123123'), toInt8OrNull('123qwe123') -``` - -``` text -┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ -│ 123123 │ ᴺᵁᴸᴸ │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toUInt(8\|16\|32\|64) {#touint8163264} - -Converts an input value to the [UInt](../../data_types/int_uint.md) data type. This function family includes: - -- `toUInt8(expr)` — Results in the `UInt8` data type. -- `toUInt16(expr)` — Results in the `UInt16` data type. -- `toUInt32(expr)` — Results in the `UInt32` data type. -- `toUInt64(expr)` — Results in the `UInt64` data type. - -**Parameters** - -- `expr` — [Expression](../syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. - -**Returned value** - -Integer value in the `UInt8`, `UInt16`, `UInt32`, or `UInt64` data type. - -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. - -The behavior of functions for negative agruments and for the [NaN and Inf](../../data_types/float.md#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric convertions issues](#numeric-conversion-issues), when using the functions. - -**Example** - -``` sql -SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) -``` - -``` text -┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ -│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ -└─────────────────────┴───────────────┴────────────────┴──────────────┘ -``` - -## toUInt(8\|16\|32\|64)OrZero {#touint8163264orzero} - -## toUInt(8\|16\|32\|64)OrNull {#touint8163264ornull} - -## toFloat(32\|64) {#tofloat3264} - -## toFloat(32\|64)OrZero {#tofloat3264orzero} - -## toFloat(32\|64)OrNull {#tofloat3264ornull} - -## toDate {#todate} - -## toDateOrZero {#todateorzero} - -## toDateOrNull {#todateornull} - -## toDateTime {#todatetime} - -## toDateTimeOrZero {#todatetimeorzero} - -## toDateTimeOrNull {#todatetimeornull} - -## toDecimal(32\|64\|128) {#todecimal3264128} - -Converts `value` to the [Decimal](../../data_types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places. - -- `toDecimal32(value, S)` -- `toDecimal64(value, S)` -- `toDecimal128(value, S)` - -## toDecimal(32\|64\|128)OrNull {#todecimal3264128ornull} - -Converts an input string to a [Nullable(Decimal(P,S))](../../data_types/decimal.md) data type value. This family of functions include: - -- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` data type. -- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` data type. -- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `NULL` value instead of an exception in the event of an input value parsing error. - -**Parameters** - -- `expr` — [Expression](../syntax.md#syntax-expressions), returns a value in the [String](../../data_types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Returned value** - -A value in the `Nullable(Decimal(P,S))` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- `NULL`, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Examples** - -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val) -``` - -``` text -┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ -│ -1.11100 │ Nullable(Decimal(9, 5)) │ -└──────────┴────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) -``` - -``` text -┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ -│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ -└──────┴────────────────────────────────────────────────────┘ -``` - -## toDecimal(32\|64\|128)OrZero {#todecimal3264128orzero} - -Converts an input value to the [Decimal(P,S)](../../data_types/decimal.md) data type. This family of functions include: - -- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` data type. -- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` data type. -- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `0` value instead of an exception in the event of an input value parsing error. - -**Parameters** - -- `expr` — [Expression](../syntax.md#syntax-expressions), returns a value in the [String](../../data_types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Returned value** - -A value in the `Nullable(Decimal(P,S))` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- 0 with `S` decimal places, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Example** - -``` sql -SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val) -``` - -``` text -┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ -│ -1.11100 │ Decimal(9, 5) │ -└──────────┴────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) -``` - -``` text -┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ -│ 0.00 │ Decimal(9, 2) │ -└──────┴────────────────────────────────────────────────────┘ -``` - -## toString {#tostring} - -Functions for converting between numbers, strings (but not fixed strings), dates, and dates with times. -All these functions accept one argument. - -When converting to or from a string, the value is formatted or parsed using the same rules as for the TabSeparated format (and almost all other text formats). If the string can’t be parsed, an exception is thrown and the request is canceled. - -When converting dates to numbers or vice versa, the date corresponds to the number of days since the beginning of the Unix epoch. -When converting dates with times to numbers or vice versa, the date with time corresponds to the number of seconds since the beginning of the Unix epoch. - -The date and date-with-time formats for the toDate/toDateTime functions are defined as follows: - -``` text -YYYY-MM-DD -YYYY-MM-DD hh:mm:ss -``` - -As an exception, if converting from UInt32, Int32, UInt64, or Int64 numeric types to Date, and if the number is greater than or equal to 65536, the number is interpreted as a Unix timestamp (and not as the number of days) and is rounded to the date. This allows support for the common occurrence of writing ‘toDate(unix\_timestamp)’, which otherwise would be an error and would require writing the more cumbersome ‘toDate(toDateTime(unix\_timestamp))’. - -Conversion between a date and date with time is performed the natural way: by adding a null time or dropping the time. - -Conversion between numeric types uses the same rules as assignments between different numeric types in C++. - -Additionally, the toString function of the DateTime argument can take a second String argument containing the name of the time zone. Example: `Asia/Yekaterinburg` In this case, the time is formatted according to the specified time zone. - -``` sql -SELECT - now() AS now_local, - toString(now(), 'Asia/Yekaterinburg') AS now_yekat -``` - -``` text -┌───────────now_local─┬─now_yekat───────────┐ -│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ -└─────────────────────┴─────────────────────┘ -``` - -Also see the `toUnixTimestamp` function. - -## toFixedString(s, N) {#tofixedstrings-n} - -Converts a String type argument to a FixedString(N) type (a string with fixed length N). N must be a constant. -If the string has fewer bytes than N, it is passed with null bytes to the right. If the string has more bytes than N, an exception is thrown. - -## toStringCutToZero(s) {#tostringcuttozeros} - -Accepts a String or FixedString argument. Returns the String with the content truncated at the first zero byte found. - -Example: - -``` sql -SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut -``` - -``` text -┌─s─────────────┬─s_cut─┐ -│ foo\0\0\0\0\0 │ foo │ -└───────────────┴───────┘ -``` - -``` sql -SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut -``` - -``` text -┌─s──────────┬─s_cut─┐ -│ foo\0bar\0 │ foo │ -└────────────┴───────┘ -``` - -## reinterpretAsUInt(8\|16\|32\|64) {#reinterpretasuint8163264} - -## reinterpretAsInt(8\|16\|32\|64) {#reinterpretasint8163264} - -## reinterpretAsFloat(32\|64) {#reinterpretasfloat3264} - -## reinterpretAsDate {#reinterpretasdate} - -## reinterpretAsDateTime {#reinterpretasdatetime} - -These functions accept a string and interpret the bytes placed at the beginning of the string as a number in host order (little endian). If the string isn’t long enough, the functions work as if the string is padded with the necessary number of null bytes. If the string is longer than needed, the extra bytes are ignored. A date is interpreted as the number of days since the beginning of the Unix Epoch, and a date with time is interpreted as the number of seconds since the beginning of the Unix Epoch. - -## reinterpretAsString {#type_conversion_functions-reinterpretAsString} - -This function accepts a number or date or date with time, and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long. - -## reinterpretAsFixedString {#reinterpretasfixedstring} - -This function accepts a number or date or date with time, and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long. - -## CAST(x, t) {#type_conversion_function-cast} - -Converts ‘x’ to the ‘t’ data type. The syntax CAST(x AS t) is also supported. - -Example: - -``` sql -SELECT - '2016-06-15 23:00:00' AS timestamp, - CAST(timestamp AS DateTime) AS datetime, - CAST(timestamp AS Date) AS date, - CAST(timestamp, 'String') AS string, - CAST(timestamp, 'FixedString(22)') AS fixed_string -``` - -``` text -┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ -│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ -└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ -``` - -Conversion to FixedString(N) only works for arguments of type String or FixedString(N). - -Type conversion to [Nullable](../../data_types/nullable.md) and back is supported. Example: - -``` sql -SELECT toTypeName(x) FROM t_null -``` - -``` text -┌─toTypeName(x)─┐ -│ Int8 │ -│ Int8 │ -└───────────────┘ -``` - -``` sql -SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null -``` - -``` text -┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ -│ Nullable(UInt16) │ -│ Nullable(UInt16) │ -└─────────────────────────────────────────┘ -``` - -## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval} - -Converts a Number type argument to an [Interval](../../data_types/special_data_types/interval.md) data type. - -**Syntax** - -``` sql -toIntervalSecond(number) -toIntervalMinute(number) -toIntervalHour(number) -toIntervalDay(number) -toIntervalWeek(number) -toIntervalMonth(number) -toIntervalQuarter(number) -toIntervalYear(number) -``` - -**Parameters** - -- `number` — Duration of interval. Positive integer number. - -**Returned values** - -- The value in `Interval` data type. - -**Example** - -``` sql -WITH - toDate('2019-01-01') AS date, - INTERVAL 1 WEEK AS interval_week, - toIntervalWeek(1) AS interval_to_week -SELECT - date + interval_week, - date + interval_to_week -``` - -``` text -┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ -│ 2019-01-08 │ 2019-01-08 │ -└───────────────────────────┴──────────────────────────────┘ -``` - -## parseDateTimeBestEffort {#parsedatetimebesteffort} - -Converts a date and time in the [String](../../data_types/string.md) representation to [DateTime](../../data_types/datetime.md#data_type-datetime) data type. - -The function parses [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse's and some other date and time formats. - - -**Syntax** - -```sql -parseDateTimeBestEffort(time_string [, time_zone]); -``` - -**Parameters** - -- `time_string` — String containing a date and time to convert. [String](../../data_types/string.md). -- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../data_types/string.md). - - -**Supported non-standard formats** - -- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). -- A string with a date and a time component: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. -- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. -- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case `YYYY-MM` are substituted as `2000-01`. -- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`. - -For all of the formats with separator the function parses months names expressed by their full name or by the first three letters of a month name. Examples: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. - -**Returned value** - -- `time_string` converted to the `DateTime` data type. - -**Examples** - -Query: - -```sql -SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') -AS parseDateTimeBestEffort; -``` - -Result: - -```text -┌─parseDateTimeBestEffort─┐ -│ 2020-12-12 12:12:57 │ -└─────────────────────────┘ -``` - -Query: - -```sql -SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') -AS parseDateTimeBestEffort -``` - -Result: - -```text -┌─parseDateTimeBestEffort─┐ -│ 2018-08-18 10:22:16 │ -└─────────────────────────┘ -``` - -Query: - -```sql -SELECT parseDateTimeBestEffort('1284101485') -AS parseDateTimeBestEffort -``` - -Result: - -```text -┌─parseDateTimeBestEffort─┐ -│ 2015-07-07 12:04:41 │ -└─────────────────────────┘ -``` - -Query: - -```sql -SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') -AS parseDateTimeBestEffort -``` - -Result: - -```text -┌─parseDateTimeBestEffort─┐ -│ 2018-12-12 10:12:12 │ -└─────────────────────────┘ -``` - -Query: - -```sql -SELECT parseDateTimeBestEffort('10 20:19') -``` - -Result: - -```text -┌─parseDateTimeBestEffort('10 20:19')─┐ -│ 2000-01-10 20:19:00 │ -└─────────────────────────────────────┘ -``` - -**See Also** - -- [ISO 8601 announcement by @xkcd](https://xkcd.com/1179/) -- [RFC 1123](https://tools.ietf.org/html/rfc1123) -- [toDate](#todate) -- [toDateTime](#todatetime) - -## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} - -Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns null when it encounters a date format that cannot be processed. - -## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} - -Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/fa/query_language/functions/url_functions.md b/docs/fa/query_language/functions/url_functions.md deleted file mode 100644 index fe1c9c14c7d..00000000000 --- a/docs/fa/query_language/functions/url_functions.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with URLs {#functions-for-working-with-urls} - -All these functions don’t follow the RFC. They are maximally simplified for improved performance. - -## Functions that Extract Parts of a URL {#functions-that-extract-parts-of-a-url} - -If the relevant part isn’t present in a URL, an empty string is returned. - -### protocol {#protocol} - -Extracts the protocol from a URL. - -Examples of typical returned values: http, https, ftp, mailto, tel, magnet… - -### domain {#domain} - -Extracts the hostname from a URL. - -``` sql -domain(url) -``` - -**Parameters** - -- `url` — URL. Type: [String](../../data_types/string.md). - -The URL can be specified with or without a scheme. Examples: - -``` text -svn+ssh://some.svn-hosting.com:80/repo/trunk -some.svn-hosting.com:80/repo/trunk -https://yandex.com/time/ -``` - -For these examples, the `domain` function returns the following results: - -``` text -some.svn-hosting.com -some.svn-hosting.com -yandex.com -``` - -**Returned values** - -- Host name. If ClickHouse can parse the input string as a URL. -- Empty string. If ClickHouse can’t parse the input string as a URL. - -Type: `String`. - -**Example** - -``` sql -SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') -``` - -``` text -┌─domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')─┐ -│ some.svn-hosting.com │ -└────────────────────────────────────────────────────────┘ -``` - -### domainWithoutWWW {#domainwithoutwww} - -Returns the domain and removes no more than one ‘www.’ from the beginning of it, if present. - -### topLevelDomain {#topleveldomain} - -Extracts the the top-level domain from a URL. - -``` sql -topLevelDomain(url) -``` - -**Parameters** - -- `url` — URL. Type: [String](../../data_types/string.md). - -The URL can be specified with or without a scheme. Examples: - -``` text -svn+ssh://some.svn-hosting.com:80/repo/trunk -some.svn-hosting.com:80/repo/trunk -https://yandex.com/time/ -``` - -**Returned values** - -- Domain name. If ClickHouse can parse the input string as a URL. -- Empty string. If ClickHouse cannot parse the input string as a URL. - -Type: `String`. - -**Example** - -``` sql -SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') -``` - -``` text -┌─topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')─┐ -│ com │ -└────────────────────────────────────────────────────────────────────┘ -``` - -### firstSignificantSubdomain {#firstsignificantsubdomain} - -Returns the “first significant subdomain”. This is a non-standard concept specific to Yandex.Metrica. The first significant subdomain is a second-level domain if it is ‘com’, ‘net’, ‘org’, or ‘co’. Otherwise, it is a third-level domain. For example, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. The list of “insignificant” second-level domains and other implementation details may change in the future. - -### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} - -Returns the part of the domain that includes top-level subdomains up to the “first significant subdomain” (see the explanation above). - -For example, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. - -### path {#path} - -Returns the path. Example: `/top/news.html` The path does not include the query string. - -### pathFull {#pathfull} - -The same as above, but including query string and fragment. Example: /top/news.html?page=2\#comments - -### queryString {#querystring} - -Returns the query string. Example: page=1&lr=213. query-string does not include the initial question mark, as well as \# and everything after \#. - -### fragment {#fragment} - -Returns the fragment identifier. fragment does not include the initial hash symbol. - -### queryStringAndFragment {#querystringandfragment} - -Returns the query string and fragment identifier. Example: page=1\#29390. - -### extractURLParameter(URL, name) {#extracturlparameterurl-name} - -Returns the value of the ‘name’ parameter in the URL, if present. Otherwise, an empty string. If there are many parameters with this name, it returns the first occurrence. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument. - -### extractURLParameters(URL) {#extracturlparametersurl} - -Returns an array of name=value strings corresponding to the URL parameters. The values are not decoded in any way. - -### extractURLParameterNames(URL) {#extracturlparameternamesurl} - -Returns an array of name strings corresponding to the names of URL parameters. The values are not decoded in any way. - -### URLHierarchy(URL) {#urlhierarchyurl} - -Returns an array containing the URL, truncated at the end by the symbols /,? in the path and query-string. Consecutive separator characters are counted as one. The cut is made in the position after all the consecutive separator characters. - -### URLPathHierarchy(URL) {#urlpathhierarchyurl} - -The same as above, but without the protocol and host in the result. The / element (root) is not included. Example: the function is used to implement tree reports the URL in Yandex. Metric. - -``` text -URLPathHierarchy('https://example.com/browse/CONV-6788') = -[ - '/browse/', - '/browse/CONV-6788' -] -``` - -### decodeURLComponent(URL) {#decodeurlcomponenturl} - -Returns the decoded URL. -Example: - -``` sql -SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; -``` - -``` text -┌─DecodedURL─────────────────────────────┐ -│ http://127.0.0.1:8123/?query=SELECT 1; │ -└────────────────────────────────────────┘ -``` - -## Functions that remove part of a URL. {#functions-that-remove-part-of-a-url} - -If the URL doesn’t have anything similar, the URL remains unchanged. - -### cutWWW {#cutwww} - -Removes no more than one ‘www.’ from the beginning of the URL’s domain, if present. - -### cutQueryString {#cutquerystring} - -Removes query string. The question mark is also removed. - -### cutFragment {#cutfragment} - -Removes the fragment identifier. The number sign is also removed. - -### cutQueryStringAndFragment {#cutquerystringandfragment} - -Removes the query string and fragment identifier. The question mark and number sign are also removed. - -### cutURLParameter(URL, name) {#cuturlparameterurl-name} - -Removes the ‘name’ URL parameter, if present. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/fa/query_language/functions/ym_dict_functions.md b/docs/fa/query_language/functions/ym_dict_functions.md deleted file mode 100644 index 2e2ba15d59e..00000000000 --- a/docs/fa/query_language/functions/ym_dict_functions.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -en_copy: true ---- - -# Functions for working with Yandex.Metrica dictionaries {#functions-for-working-with-yandex-metrica-dictionaries} - -In order for the functions below to work, the server config must specify the paths and addresses for getting all the Yandex.Metrica dictionaries. The dictionaries are loaded at the first call of any of these functions. If the reference lists can’t be loaded, an exception is thrown. - -For information about creating reference lists, see the section “Dictionaries”. - -## Multiple geobases {#multiple-geobases} - -ClickHouse supports working with multiple alternative geobases (regional hierarchies) simultaneously, in order to support various perspectives on which countries certain regions belong to. - -The ‘clickhouse-server’ config specifies the file with the regional hierarchy::`/opt/geo/regions_hierarchy.txt` - -Besides this file, it also searches for files nearby that have the \_ symbol and any suffix appended to the name (before the file extension). -For example, it will also find the file `/opt/geo/regions_hierarchy_ua.txt`, if present. - -`ua` is called the dictionary key. For a dictionary without a suffix, the key is an empty string. - -All the dictionaries are re-loaded in runtime (once every certain number of seconds, as defined in the builtin\_dictionaries\_reload\_interval config parameter, or once an hour by default). However, the list of available dictionaries is defined one time, when the server starts. - -All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. -Example: - -``` sql -regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt -regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt -regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt -``` - -### regionToCity(id\[, geobase\]) {#regiontocityid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. - -### regionToArea(id\[, geobase\]) {#regiontoareaid-geobase} - -Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - -``` text -┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ -│ │ -│ Moscow and Moscow region │ -│ St. Petersburg and Leningrad region │ -│ Belgorod region │ -│ Ivanovsk region │ -│ Kaluga region │ -│ Kostroma region │ -│ Kursk region │ -│ Lipetsk region │ -│ Orlov region │ -│ Ryazan region │ -│ Smolensk region │ -│ Tambov region │ -│ Tver region │ -│ Tula region │ -└──────────────────────────────────────────────────────┘ -``` - -### regionToDistrict(id\[, geobase\]) {#regiontodistrictid-geobase} - -Converts a region to a federal district (type 4 in the geobase). In every other way, this function is the same as ‘regionToCity’. - -``` sql -SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) -FROM system.numbers -LIMIT 15 -``` - -``` text -┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ -│ │ -│ Central federal district │ -│ Northwest federal district │ -│ South federal district │ -│ North Caucases federal district │ -│ Privolga federal district │ -│ Ural federal district │ -│ Siberian federal district │ -│ Far East federal district │ -│ Scotland │ -│ Faroe Islands │ -│ Flemish region │ -│ Brussels capital region │ -│ Wallonia │ -│ Federation of Bosnia and Herzegovina │ -└──────────────────────────────────────────────────────────┘ -``` - -### regionToCountry(id\[, geobase\]) {#regiontocountryid-geobase} - -Converts a region to a country. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia (225). - -### regionToContinent(id\[, geobase\]) {#regiontocontinentid-geobase} - -Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001). - -### regionToTopContinent (#regiontotopcontinent) - -Finds the highest continent in the hierarchy for the region. - -**Syntax** - -```sql -regionToTopContinent(id[, geobase]); -``` - -**Parameters** - -- `id` — Region ID from the Yandex geobase. [UInt32](../../data_types/int_uint.md). -- `geobase` — Dictionary key. See [Multiple Geobases](#multiple-geobases). [String](../../data_types/string.md). Optional. - - -**Returned value** - -- Identifier of the top level continent (the latter when you climb the hierarchy of regions). -- 0, if there is none. - -Type: `UInt32`. - - -### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase} - -Gets the population for a region. -The population can be recorded in files with the geobase. See the section “External dictionaries”. -If the population is not recorded for the region, it returns 0. -In the Yandex geobase, the population might be recorded for child regions, but not for parent regions. - -### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} - -Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it doesn’t belong. -The relationship is reflexive – any region also belongs to itself. - -### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} - -Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. -Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. - -### regionToName(id\[, lang\]) {#regiontonameid-lang} - -Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn’t exist, an empty string is returned. - -`ua` and `uk` both mean Ukrainian. - -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/fa/query_language/index.md b/docs/fa/query_language/index.md deleted file mode 100644 index 07950fb56a6..00000000000 --- a/docs/fa/query_language/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -en_copy: true ---- - -# SQL Reference {#sql-reference} - -- [SELECT](select.md) -- [INSERT INTO](insert_into.md) -- [CREATE](create.md) -- [ALTER](alter.md#query_language_queries_alter) -- [Other types of queries](misc.md) - -[Original article](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/fa/query_language/insert_into.md b/docs/fa/query_language/insert_into.md deleted file mode 100644 index 8ac7d0abf86..00000000000 --- a/docs/fa/query_language/insert_into.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -en_copy: true ---- - -## INSERT {#insert} - -Adding data. - -Basic query format: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... -``` - -The query can specify a list of columns to insert `[(c1, c2, c3)]`. In this case, the rest of the columns are filled with: - -- The values calculated from the `DEFAULT` expressions specified in the table definition. -- Zeros and empty strings, if `DEFAULT` expressions are not defined. - -If [strict\_insert\_defaults=1](../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query. - -Data can be passed to the INSERT in any [format](../interfaces/formats.md#formats) supported by ClickHouse. The format must be specified explicitly in the query: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set -``` - -For example, the following query format is identical to the basic version of INSERT … VALUES: - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... -``` - -ClickHouse removes all spaces and one line feed (if there is one) before the data. When forming a query, we recommend putting the data on a new line after the query operators (this is important if the data begins with spaces). - -Example: - -``` sql -INSERT INTO t FORMAT TabSeparated -11 Hello, world! -22 Qwerty -``` - -You can insert data separately from the query by using the command-line client or the HTTP interface. For more information, see the section “[Interfaces](../interfaces/index.md#interfaces)”. - -### Constraints {#constraints} - -If table has [constraints](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. - -### Inserting The Results of `SELECT` {#insert_query_insert-select} - -``` sql -INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... -``` - -Columns are mapped according to their position in the SELECT clause. However, their names in the SELECT expression and the table for INSERT may differ. If necessary, type casting is performed. - -None of the data formats except Values allow setting values to expressions such as `now()`, `1 + 2`, and so on. The Values format allows limited use of expressions, but this is not recommended, because in this case inefficient code is used for their execution. - -Other queries for modifying data parts are not supported: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. -However, you can delete old data using `ALTER TABLE ... DROP PARTITION`. - -`FORMAT` clause must be specified in the end of query if `SELECT` clause contains table function [input()](table_functions/input.md). - -### Performance Considerations {#performance-considerations} - -`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this: - -- Add data in fairly large batches, such as 100,000 rows at a time. -- Group data by a partition key before uploading it to ClickHouse. - -Performance will not decrease if: - -- Data is added in real time. -- You upload data that is usually sorted by time. - -[Original article](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/fa/query_language/misc.md b/docs/fa/query_language/misc.md deleted file mode 100644 index 152dc0dd3b4..00000000000 --- a/docs/fa/query_language/misc.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -en_copy: true ---- - -# Miscellaneous Queries {#miscellaneous-queries} - -## ATTACH {#attach} - -This query is exactly the same as `CREATE`, but - -- Instead of the word `CREATE` it uses the word `ATTACH`. -- The query does not create data on the disk, but assumes that data is already in the appropriate places, and just adds information about the table to the server. - After executing an ATTACH query, the server will know about the existence of the table. - -If the table was previously detached (`DETACH`), meaning that its structure is known, you can use shorthand without defining the structure. - -``` sql -ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] -``` - -This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of system tables, which are explicitly created on the server). - -## CHECK TABLE {#check-table} - -Checks if the data in the table is corrupted. - -``` sql -CHECK TABLE [db.]name -``` - -The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution. - -The query response contains the `result` column with a single row. The row has a value of -[Boolean](../data_types/boolean.md) type: - -- 0 - The data in the table is corrupted. -- 1 - The data maintains integrity. - -The `CHECK TABLE` query supports the following table engines: - -- [Log](../operations/table_engines/log.md) -- [TinyLog](../operations/table_engines/tinylog.md) -- [StripeLog](../operations/table_engines/stripelog.md) -- [MergeTree family](../operations/table_engines/mergetree.md) - -Performed over the tables with another table engines causes an exception. - -Engines from the `*Log` family don’t provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. - -For `MergeTree` family engines, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. - -**If the data is corrupted** - -If the table is corrupted, you can copy the non-corrupted data to another table. To do this: - -1. Create a new table with the same structure as damaged table. To do this execute the query `CREATE TABLE AS `. -2. Set the [max\_threads](../operations/settings/settings.md#settings-max_threads) value to 1 to process the next query in a single thread. To do this run the query `SET max_threads = 1`. -3. Execute the query `INSERT INTO SELECT * FROM `. This request copies the non-corrupted data from the damaged table to another table. Only the data before the corrupted part will be copied. -4. Restart the `clickhouse-client` to reset the `max_threads` value. - -## DESCRIBE TABLE {#misc-describe-table} - -``` sql -DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] -``` - -Returns the following `String` type columns: - -- `name` — Column name. -- `type`— Column type. -- `default_type` — Clause that is used in [default expression](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` or `ALIAS`). Column contains an empty string, if the default expression isn’t specified. -- `default_expression` — Value specified in the `DEFAULT` clause. -- `comment_expression` — Comment text. - -Nested data structures are output in “expanded” format. Each column is shown separately, with the name after a dot. - -## DETACH {#detach} - -Deletes information about the ‘name’ table from the server. The server stops knowing about the table’s existence. - -``` sql -DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. -Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them). - -There is no `DETACH DATABASE` query. - -## DROP {#drop} - -This query has two types: `DROP DATABASE` and `DROP TABLE`. - -``` sql -DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] -``` - -Deletes all tables inside the ‘db’ database, then deletes the ‘db’ database itself. -If `IF EXISTS` is specified, it doesn’t return an error if the database doesn’t exist. - -``` sql -DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Deletes the table. -If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. - - DROP DICTIONARY [IF EXISTS] [db.]name - -Delets the dictionary. -If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. - -## EXISTS {#exists} - -``` sql -EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] -``` - -Returns a single `UInt8`-type column, which contains the single value `0` if the table or database doesn’t exist, or `1` if the table exists in the specified database. - -## KILL QUERY {#kill-query} - -``` sql -KILL QUERY [ON CLUSTER cluster] - WHERE - [SYNC|ASYNC|TEST] - [FORMAT format] -``` - -Attempts to forcibly terminate the currently running queries. -The queries to terminate are selected from the system.processes table using the criteria defined in the `WHERE` clause of the `KILL` query. - -Examples: - -``` sql --- Forcibly terminates all queries with the specified query_id: -KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' - --- Synchronously terminates all queries run by 'username': -KILL QUERY WHERE user='username' SYNC -``` - -Read-only users can only stop their own queries. - -By default, the asynchronous version of queries is used (`ASYNC`), which doesn’t wait for confirmation that queries have stopped. - -The synchronous version (`SYNC`) waits for all queries to stop and displays information about each process as it stops. -The response contains the `kill_status` column, which can take the following values: - -1. ‘finished’ – The query was terminated successfully. -2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. -3. The other values ​​explain why the query can’t be stopped. - -A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. - -## KILL MUTATION {#kill-mutation} - -``` sql -KILL MUTATION [ON CLUSTER cluster] - WHERE - [TEST] - [FORMAT format] -``` - -Tries to cancel and remove [mutations](alter.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. - -A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. - -Examples: - -``` sql --- Cancel and remove all mutations of the single table: -KILL MUTATION WHERE database = 'default' AND table = 'table' - --- Cancel the specific mutation: -KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' -``` - -The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). - -Changes already made by the mutation are not rolled back. - -## OPTIMIZE {#misc_operations-optimize} - -``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] -``` - -This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../operations/table_engines/mergetree.md) family. - -The `OPTMIZE` query is also supported for the [MaterializedView](../operations/table_engines/materializedview.md) and the [Buffer](../operations/table_engines/buffer.md) engines. Other table engines aren’t supported. - -When `OPTIMIZE` is used with the [ReplicatedMergeTree](../operations/table_engines/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled). - -- If `OPTIMIZE` doesn’t perform a merge for any reason, it doesn’t notify the client. To enable notifications, use the [optimize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. -- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter.md#alter-how-to-specify-part-expr). -- If you specify `FINAL`, optimization is performed even when all the data is already in one part. -- If you specify `DEDUPLICATE`, then completely identical rows will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. - -!!! warning "Warning" - `OPTIMIZE` can’t fix the “Too many parts” error. - -## RENAME {#misc_operations-rename} - -Renames one or more tables. - -``` sql -RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] -``` - -All tables are renamed under global locking. Renaming tables is a light operation. If you indicated another database after TO, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). - -## SET {#query-set} - -``` sql -SET param = value -``` - -Assigns `value` to the `param` [setting](../operations/settings/index.md) for the current session. You cannot change [server settings](../operations/server_settings/index.md) this way. - -You can also set all the values from the specified settings profile in a single query. - -``` sql -SET profile = 'profile-name-from-the-settings-file' -``` - -For more information, see [Settings](../operations/settings/settings.md). - -## TRUNCATE {#truncate} - -``` sql -TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist. - -The `TRUNCATE` query is not supported for [View](../operations/table_engines/view.md), [File](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) and [Null](../operations/table_engines/null.md) table engines. - -## USE {#use} - -``` sql -USE db -``` - -Lets you set the current database for the session. -The current database is used for searching for tables if the database is not explicitly defined in the query with a dot before the table name. -This query can’t be made when using the HTTP protocol, since there is no concept of a session. - -[Original article](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/fa/query_language/operators.md b/docs/fa/query_language/operators.md deleted file mode 100644 index 4f29bbb3cad..00000000000 --- a/docs/fa/query_language/operators.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -en_copy: true ---- - -# Operators {#operators} - -All operators are transformed to their corresponding functions at the query parsing stage in accordance with their precedence and associativity. -Groups of operators are listed in order of priority (the higher it is in the list, the earlier the operator is connected to its arguments). - -## Access Operators {#access-operators} - -`a[N]` – Access to an element of an array. The `arrayElement(a, N)` function. - -`a.N` – Access to a tuple element. The `tupleElement(a, N)` function. - -## Numeric Negation Operator {#numeric-negation-operator} - -`-a` – The `negate (a)` function. - -## Multiplication and Division Operators {#multiplication-and-division-operators} - -`a * b` – The `multiply (a, b)` function. - -`a / b` – The `divide(a, b)` function. - -`a % b` – The `modulo(a, b)` function. - -## Addition and Subtraction Operators {#addition-and-subtraction-operators} - -`a + b` – The `plus(a, b)` function. - -`a - b` – The `minus(a, b)` function. - -## Comparison Operators {#comparison-operators} - -`a = b` – The `equals(a, b)` function. - -`a == b` – The `equals(a, b)` function. - -`a != b` – The `notEquals(a, b)` function. - -`a <> b` – The `notEquals(a, b)` function. - -`a <= b` – The `lessOrEquals(a, b)` function. - -`a >= b` – The `greaterOrEquals(a, b)` function. - -`a < b` – The `less(a, b)` function. - -`a > b` – The `greater(a, b)` function. - -`a LIKE s` – The `like(a, b)` function. - -`a NOT LIKE s` – The `notLike(a, b)` function. - -`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. - -`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. - -## Operators for Working With Data Sets {#operators-for-working-with-data-sets} - -*See [IN operators](select.md#select-in-operators).* - -`a IN ...` – The `in(a, b)` function. - -`a NOT IN ...` – The `notIn(a, b)` function. - -`a GLOBAL IN ...` – The `globalIn(a, b)` function. - -`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` function. - -## Operators for Working with Dates and Times {#operators-datetime} - -### EXTRACT {#operator-extract} - -``` sql -EXTRACT(part FROM date); -``` - -Extracts a part from a given date. For example, you can retrieve a month from a given date, or a second from a time. - -The `part` parameter specifies which part of the date to retrieve. The following values are available: - -- `DAY` — The day of the month. Possible values: 1–31. -- `MONTH` — The number of a month. Possible values: 1–12. -- `YEAR` — The year. -- `SECOND` — The second. Possible values: 0–59. -- `MINUTE` — The minute. Possible values: 0–59. -- `HOUR` — The hour. Possible values: 0–23. - -The `part` parameter is case-insensitive. - -The `date` parameter specifies the date or the time to process. Either [Date](../data_types/date.md) or [DateTime](../data_types/datetime.md) type is supported. - -Examples: - -``` sql -SELECT EXTRACT(DAY FROM toDate('2017-06-15')); -SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); -SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); -``` - -In the following example we create a table and insert into it a value with the `DateTime` type. - -``` sql -CREATE TABLE test.Orders -( - OrderId UInt64, - OrderName String, - OrderDate DateTime -) -ENGINE = Log; -``` - -``` sql -INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); -``` - -``` sql -SELECT - toYear(OrderDate) AS OrderYear, - toMonth(OrderDate) AS OrderMonth, - toDayOfMonth(OrderDate) AS OrderDay, - toHour(OrderDate) AS OrderHour, - toMinute(OrderDate) AS OrderMinute, - toSecond(OrderDate) AS OrderSecond -FROM test.Orders; -``` - -``` text -┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ -│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ -└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ -``` - -You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). - -### INTERVAL {#operator-interval} - -Creates an [Interval](../data_types/special_data_types/interval.md)-type value that should be used in arithmetical operations with [Date](../data_types/date.md) and [DateTime](../data_types/datetime.md)-type values. - -Types of intervals: -- `SECOND` -- `MINUTE` -- `HOUR` -- `DAY` -- `WEEK` -- `MONTH` -- `QUARTER` -- `YEAR` - -!!! warning "Warning" - Intervals with different types can’t be combined. You can’t use expressions like `INTERVAL 4 DAY 1 HOUR`. Express intervals in units that are smaller or equal the the smallest unit of the interval, for example `INTERVAL 25 HOUR`. You can use consequtive operations like in the example below. - -Example: - -``` sql -SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR -``` - -``` text -┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ -│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ -└─────────────────────┴────────────────────────────────────────────────────────┘ -``` - -**See Also** - -- [Interval](../data_types/special_data_types/interval.md) data type -- [toInterval](functions/type_conversion_functions.md#function-tointerval) type convertion functions - -## Logical Negation Operator {#logical-negation-operator} - -`NOT a` – The `not(a)` function. - -## Logical AND Operator {#logical-and-operator} - -`a AND b` – The`and(a, b)` function. - -## Logical OR Operator {#logical-or-operator} - -`a OR b` – The `or(a, b)` function. - -## Conditional Operator {#conditional-operator} - -`a ? b : c` – The `if(a, b, c)` function. - -Note: - -The conditional operator calculates the values of b and c, then checks whether condition a is met, and then returns the corresponding value. If `b` or `C` is an [arrayJoin()](functions/array_join.md#functions_arrayjoin) function, each row will be replicated regardless of the “a” condition. - -## Conditional Expression {#operator_case} - -``` sql -CASE [x] - WHEN a THEN b - [WHEN ... THEN ...] - [ELSE c] -END -``` - -If `x` is specified, then `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. - -If there is no `ELSE c` clause in the expression, the default value is `NULL`. - -The `transform` function does not work with `NULL`. - -## Concatenation Operator {#concatenation-operator} - -`s1 || s2` – The `concat(s1, s2) function.` - -## Lambda Creation Operator {#lambda-creation-operator} - -`x -> expr` – The `lambda(x, expr) function.` - -The following operators do not have a priority, since they are brackets: - -## Array Creation Operator {#array-creation-operator} - -`[x1, ...]` – The `array(x1, ...) function.` - -## Tuple Creation Operator {#tuple-creation-operator} - -`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` - -## Associativity {#associativity} - -All binary operators have left associativity. For example, `1 + 2 + 3` is transformed to `plus(plus(1, 2), 3)`. -Sometimes this doesn’t work the way you expect. For example, `SELECT 4 > 2 > 3` will result in 0. - -For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed to a single call of these functions. - -## Checking for `NULL` {#checking-for-null} - -ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. - -### IS NULL {#operator-is-null} - -- For [Nullable](../data_types/nullable.md) type values, the `IS NULL` operator returns: - - `1`, if the value is `NULL`. - - `0` otherwise. -- For other values, the `IS NULL` operator always returns `0`. - - - -``` sql -SELECT x+100 FROM t_null WHERE y IS NULL -``` - -``` text -┌─plus(x, 100)─┐ -│ 101 │ -└──────────────┘ -``` - -### IS NOT NULL {#is-not-null} - -- For [Nullable](../data_types/nullable.md) type values, the `IS NOT NULL` operator returns: - - `0`, if the value is `NULL`. - - `1` otherwise. -- For other values, the `IS NOT NULL` operator always returns `1`. - - - -``` sql -SELECT * FROM t_null WHERE y IS NOT NULL -``` - -``` text -┌─x─┬─y─┐ -│ 2 │ 3 │ -└───┴───┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/fa/query_language/select.md b/docs/fa/query_language/select.md deleted file mode 100644 index c917ba811bf..00000000000 --- a/docs/fa/query_language/select.md +++ /dev/null @@ -1,607 +0,0 @@ ---- -en_copy: true ---- - -# SELECT Queries Syntax {#select-queries-syntax} - -`SELECT` performs data retrieval. - -``` sql -[WITH expr_list|(subquery)] -SELECT [DISTINCT] expr_list -[FROM [db.]table | (subquery) | table_function] [FINAL] -[SAMPLE sample_coeff] -[ARRAY JOIN ...] -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list -[PREWHERE expr] -[WHERE expr] -[GROUP BY expr_list] [WITH TOTALS] -[HAVING expr] -[ORDER BY expr_list] -[LIMIT [offset_value, ]n BY columns] -[LIMIT [n, ]m] -[UNION ALL ...] -[INTO OUTFILE filename] -[FORMAT format] -``` - -All the clauses are optional, except for the required list of expressions immediately after SELECT. -The clauses below are described in almost the same order as in the query execution conveyor. - -If the query omits the `DISTINCT`, `GROUP BY` and `ORDER BY` clauses and the `IN` and `JOIN` subqueries, the query will be completely stream processed, using O(1) amount of RAM. -Otherwise, the query might consume a lot of RAM if the appropriate restrictions are not specified: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. For more information, see the section “Settings”. It is possible to use external sorting (saving temporary tables to a disk) and external aggregation. `The system does not have "merge join"`. - -### WITH Clause {#with-clause} - -This section provides support for Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), with some limitations: -1. Recursive queries are not supported -2. When subquery is used inside WITH section, it’s result should be scalar with exactly one row -3. Expression’s results are not available in subqueries -Results of WITH clause expressions can be used inside SELECT clause. - -Example 1: Using constant expression as “variable” - -``` sql -WITH '2019-08-01 15:23:00' as ts_upper_bound -SELECT * -FROM hits -WHERE - EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound -``` - -Example 2: Evicting sum(bytes) expression result from SELECT clause column list - -``` sql -WITH sum(bytes) as s -SELECT - formatReadableSize(s), - table -FROM system.parts -GROUP BY table -ORDER BY s -``` - -Example 3: Using results of scalar subquery - -``` sql -/* this example would return TOP 10 of most huge tables */ -WITH - ( - SELECT sum(bytes) - FROM system.parts - WHERE active - ) AS total_disk_usage -SELECT - (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, - table -FROM system.parts -GROUP BY table -ORDER BY table_disk_usage DESC -LIMIT 10 -``` - -Example 4: Re-using expression in subquery -As a workaround for current limitation for expression usage in subqueries, you may duplicate it. - -``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) -``` - -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` - -### FROM Clause {#select-from} - -If the FROM clause is omitted, data will be read from the `system.one` table. -The `system.one` table contains exactly one row (this table fulfills the same purpose as the DUAL table found in other DBMSs). - -The `FROM` clause specifies the source to read data from: - -- Table -- Subquery -- [Table function](table_functions/index.md) - -`ARRAY JOIN` and the regular `JOIN` may also be included (see below). - -Instead of a table, the `SELECT` subquery may be specified in parenthesis. -In contrast to standard SQL, a synonym does not need to be specified after a subquery. - -To execute a query, all the columns listed in the query are extracted from the appropriate table. Any columns not needed for the external query are thrown out of the subqueries. -If a query does not list any columns (for example, `SELECT count() FROM t`), some column is extracted from the table anyway (the smallest one is preferred), in order to calculate the number of rows. - -#### FINAL Modifier {#select-from-final} - -Applicable when selecting data from tables from the [MergeTree](../operations/table_engines/mergetree.md)-engine family other than `GraphiteMergeTree`. When `FINAL` is specified, ClickHouse fully merges the data before returning the result and thus performs all data transformations that happen during merges for the given table engine. - -Also supported for: -- [Replicated](../operations/table_engines/replication.md) versions of `MergeTree` engines. -- [View](../operations/table_engines/view.md), [Buffer](../operations/table_engines/buffer.md), [Distributed](../operations/table_engines/distributed.md), and [MaterializedView](../operations/table_engines/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables. - -Queries that use `FINAL` are executed not as fast as similar queries that don’t, because: - -- Query is executed in a single thread and data is merged during query execution. -- Queries with `FINAL` read primary key columns in addition to the columns specified in the query. - -In most cases, avoid using `FINAL`. - -### SAMPLE Clause {#select-sample-clause} - -The `SAMPLE` clause allows for approximated query processing. - -When data sampling is enabled, the query is not performed on all the data, but only on a certain fraction of data (sample). For example, if you need to calculate statistics for all the visits, it is enough to execute the query on the 1/10 fraction of all the visits and then multiply the result by 10. - -Approximated query processing can be useful in the following cases: - -- When you have strict timing requirements (like \<100ms) but you can’t justify the cost of additional hardware resources to meet them. -- When your raw data is not accurate, so approximation doesn’t noticeably degrade the quality. -- Business requirements target approximate results (for cost-effectiveness, or in order to market exact results to premium users). - -!!! note "Note" - You can only use sampling with the tables in the [MergeTree](../operations/table_engines/mergetree.md) family, and only if the sampling expression was specified during table creation (see [MergeTree engine](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table)). - -The features of data sampling are listed below: - -- Data sampling is a deterministic mechanism. The result of the same `SELECT .. SAMPLE` query is always the same. -- Sampling works consistently for different tables. For tables with a single sampling key, a sample with the same coefficient always selects the same subset of possible data. For example, a sample of user IDs takes rows with the same subset of all the possible user IDs from different tables. This means that you can use the sample in subqueries in the [IN](#select-in-operators) clause. Also, you can join samples using the [JOIN](#select-join) clause. -- Sampling allows reading less data from a disk. Note that you must specify the sampling key correctly. For more information, see [Creating a MergeTree Table](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table). - -For the `SAMPLE` clause the following syntax is supported: - -| SAMPLE Clause Syntax | Description | -|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `SAMPLE k` | Here `k` is the number from 0 to 1.
    The query is executed on `k` fraction of data. For example, `SAMPLE 0.1` runs the query on 10% of data. [Read more](#select-sample-k) | -| `SAMPLE n` | Here `n` is a sufficiently large integer.
    The query is executed on a sample of at least `n` rows (but not significantly more than this). For example, `SAMPLE 10000000` runs the query on a minimum of 10,000,000 rows. [Read more](#select-sample-n) | -| `SAMPLE k OFFSET m` | Here `k` and `m` are the numbers from 0 to 1.
    The query is executed on a sample of `k` fraction of the data. The data used for the sample is offset by `m` fraction. [Read more](#select-sample-offset) | - -#### SAMPLE k {#select-sample-k} - -Here `k` is the number from 0 to 1 (both fractional and decimal notations are supported). For example, `SAMPLE 1/2` or `SAMPLE 0.5`. - -In a `SAMPLE k` clause, the sample is taken from the `k` fraction of data. The example is shown below: - -``` sql -SELECT - Title, - count() * 10 AS PageViews -FROM hits_distributed -SAMPLE 0.1 -WHERE - CounterID = 34 -GROUP BY Title -ORDER BY PageViews DESC LIMIT 1000 -``` - -In this example, the query is executed on a sample from 0.1 (10%) of data. Values of aggregate functions are not corrected automatically, so to get an approximate result, the value `count()` is manually multiplied by 10. - -#### SAMPLE n {#select-sample-n} - -Here `n` is a sufficiently large integer. For example, `SAMPLE 10000000`. - -In this case, the query is executed on a sample of at least `n` rows (but not significantly more than this). For example, `SAMPLE 10000000` runs the query on a minimum of 10,000,000 rows. - -Since the minimum unit for data reading is one granule (its size is set by the `index_granularity` setting), it makes sense to set a sample that is much larger than the size of the granule. - -When using the `SAMPLE n` clause, you don’t know which relative percent of data was processed. So you don’t know the coefficient the aggregate functions should be multiplied by. Use the `_sample_factor` virtual column to get the approximate result. - -The `_sample_factor` column contains relative coefficients that are calculated dynamically. This column is created automatically when you [create](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table) a table with the specified sampling key. The usage examples of the `_sample_factor` column are shown below. - -Let’s consider the table `visits`, which contains the statistics about site visits. The first example shows how to calculate the number of page views: - -``` sql -SELECT sum(PageViews * _sample_factor) -FROM visits -SAMPLE 10000000 -``` - -The next example shows how to calculate the total number of visits: - -``` sql -SELECT sum(_sample_factor) -FROM visits -SAMPLE 10000000 -``` - -The example below shows how to calculate the average session duration. Note that you don’t need to use the relative coefficient to calculate the average values. - -``` sql -SELECT avg(Duration) -FROM visits -SAMPLE 10000000 -``` - -#### SAMPLE k OFFSET m {#select-sample-offset} - -Here `k` and `m` are numbers from 0 to 1. Examples are shown below. - -**Example 1** - -``` sql -SAMPLE 1/10 -``` - -In this example, the sample is 1/10th of all data: - -`[++------------------]` - -**Example 2** - -``` sql -SAMPLE 1/10 OFFSET 1/2 -``` - -Here, a sample of 10% is taken from the second half of the data. - -`[----------++--------]` - -### ARRAY JOIN Clause {#select-array-join-clause} - -Allows executing `JOIN` with an array or nested data structure. The intent is similar to the [arrayJoin](functions/array_join.md#functions_arrayjoin) function, but its functionality is broader. - -``` sql -SELECT -FROM -[LEFT] ARRAY JOIN -[WHERE|PREWHERE ] -... -``` - -You can specify only a single `ARRAY JOIN` clause in a query. - -The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the `WHERE/PREWHERE` clause, it can be performed either before `WHERE/PREWHERE` (if the result is needed in this clause), or after completing it (to reduce the volume of calculations). The processing order is controlled by the query optimizer. - -Supported types of `ARRAY JOIN` are listed below: - -- `ARRAY JOIN` - In this case, empty arrays are not included in the result of `JOIN`. -- `LEFT ARRAY JOIN` - The result of `JOIN` contains rows with empty arrays. The value for an empty array is set to the default value for the array element type (usually 0, empty string or NULL). - -The examples below demonstrate the usage of the `ARRAY JOIN` and `LEFT ARRAY JOIN` clauses. Let’s create a table with an [Array](../data_types/array.md) type column and insert values into it: - -``` sql -CREATE TABLE arrays_test -( - s String, - arr Array(UInt8) -) ENGINE = Memory; - -INSERT INTO arrays_test -VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); -``` - -``` text -┌─s───────────┬─arr─────┐ -│ Hello │ [1,2] │ -│ World │ [3,4,5] │ -│ Goodbye │ [] │ -└─────────────┴─────────┘ -``` - -The example below uses the `ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -ARRAY JOIN arr; -``` - -``` text -┌─s─────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -└───────┴─────┘ -``` - -The next example uses the `LEFT ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -LEFT ARRAY JOIN arr; -``` - -``` text -┌─s───────────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -│ Goodbye │ 0 │ -└─────────────┴─────┘ -``` - -#### Using Aliases {#using-aliases} - -An alias can be specified for an array in the `ARRAY JOIN` clause. In this case, an array item can be accessed by this alias, but the array itself is accessed by the original name. Example: - -``` sql -SELECT s, arr, a -FROM arrays_test -ARRAY JOIN arr AS a; -``` - -``` text -┌─s─────┬─arr─────┬─a─┐ -│ Hello │ [1,2] │ 1 │ -│ Hello │ [1,2] │ 2 │ -│ World │ [3,4,5] │ 3 │ -│ World │ [3,4,5] │ 4 │ -│ World │ [3,4,5] │ 5 │ -└───────┴─────────┴───┘ -``` - -Using aliases, you can perform `ARRAY JOIN` with an external array. For example: - -``` sql -SELECT s, arr_external -FROM arrays_test -ARRAY JOIN [1, 2, 3] AS arr_external; -``` - -``` text -┌─s───────────┬─arr_external─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ Hello │ 3 │ -│ World │ 1 │ -│ World │ 2 │ -│ World │ 3 │ -│ Goodbye │ 1 │ -│ Goodbye │ 2 │ -│ Goodbye │ 3 │ -└─────────────┴──────────────┘ -``` - -Multiple arrays can be comma-separated in the `ARRAY JOIN` clause. In this case, `JOIN` is performed with them simultaneously (the direct sum, not the cartesian product). Note that all the arrays must have the same size. Example: - -``` sql -SELECT s, arr, a, num, mapped -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ -│ Hello │ [1,2] │ 1 │ 1 │ 2 │ -│ Hello │ [1,2] │ 2 │ 2 │ 3 │ -│ World │ [3,4,5] │ 3 │ 1 │ 4 │ -│ World │ [3,4,5] │ 4 │ 2 │ 5 │ -│ World │ [3,4,5] │ 5 │ 3 │ 6 │ -└───────┴─────────┴───┴─────┴────────┘ -``` - -The example below uses the [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) function: - -``` sql -SELECT s, arr, a, num, arrayEnumerate(arr) -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ -│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ -│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ -│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ -│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ -│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ -└───────┴─────────┴───┴─────┴─────────────────────┘ -``` - -#### ARRAY JOIN With Nested Data Structure {#array-join-with-nested-data-structure} - -`ARRAY`JOIN\`\` also works with [nested data structures](../data_types/nested_data_structures/nested.md). Example: - -``` sql -CREATE TABLE nested_test -( - s String, - nest Nested( - x UInt8, - y UInt32) -) ENGINE = Memory; - -INSERT INTO nested_test -VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); -``` - -``` text -┌─s───────┬─nest.x──┬─nest.y─────┐ -│ Hello │ [1,2] │ [10,20] │ -│ World │ [3,4,5] │ [30,40,50] │ -│ Goodbye │ [] │ [] │ -└─────────┴─────────┴────────────┘ -``` - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -When specifying names of nested data structures in `ARRAY JOIN`, the meaning is the same as `ARRAY JOIN` with all the array elements that it consists of. Examples are listed below: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`, `nest.y`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -This variation also makes sense: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─────┐ -│ Hello │ 1 │ [10,20] │ -│ Hello │ 2 │ [10,20] │ -│ World │ 3 │ [30,40,50] │ -│ World │ 4 │ [30,40,50] │ -│ World │ 5 │ [30,40,50] │ -└───────┴────────┴────────────┘ -``` - -An alias may be used for a nested data structure, in order to select either the `JOIN` result or the source array. Example: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest AS n; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ -└───────┴─────┴─────┴─────────┴────────────┘ -``` - -Example of using the [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) function: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num -FROM nested_test -ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ -└───────┴─────┴─────┴─────────┴────────────┴─────┘ -``` - -### JOIN Clause {#select-join} - -Joins the data in the normal [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) sense. - -!!! info "Note" - Not related to [ARRAY JOIN](#select-array-join-clause). - -``` sql -SELECT -FROM -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN -(ON )|(USING ) ... -``` - -The table names can be specified instead of `` and ``. This is equivalent to the `SELECT * FROM table` subquery, except in a special case when the table has the [Join](../operations/table_engines/join.md) engine – an array prepared for joining. - -#### Supported Types of `JOIN` {#select-join-types} - -- `INNER JOIN` (or `JOIN`) -- `LEFT JOIN` (or `LEFT OUTER JOIN`) -- `RIGHT JOIN` (or `RIGHT OUTER JOIN`) -- `FULL JOIN` (or `FULL OUTER JOIN`) -- `CROSS JOIN` (or `,` ) - -See the standard [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) description. - -#### Multiple JOIN {#multiple-join} - -Performing queries, ClickHouse rewrites multi-table joins into the sequence of two-table joins. For example, if there are four tables for join ClickHouse joins the first and the second, then joins the result with the third table, and at the last step, it joins the fourth one. - -If a query contains the `WHERE` clause, ClickHouse tries to pushdown filters from this clause through the intermediate join. If it cannot apply the filter to each intermediate join, ClickHouse applies the filters after all joins are completed. - -We recommend the `JOIN ON` or `JOIN USING` syntax for creating queries. For example: - -``` sql -SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a -``` - -You can use comma-separated lists of tables in the `FROM` clause. For example: - -``` sql -SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a -``` - -Don’t mix these syntaxes. - -ClickHouse doesn’t directly support syntax with commas, so we don’t recommend using them. The algorithm tries to rewrite the query in terms of `CROSS JOIN` and `INNER JOIN` clauses and then proceeds to query processing. When rewriting the query, ClickHouse tries to optimize performance and memory consumption. By default, ClickHouse treats commas as an `INNER JOIN` clause and converts `INNER JOIN` to `CROSS JOIN` when the algorithm cannot guarantee that `INNER JOIN` returns the required data. - -#### Strictness {#select-join-strictness} - -- `ALL` — If the right table has several matching rows, ClickHouse creates a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from matching rows. This is the standard `JOIN` behavior in SQL. -- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` and `ALL` keywords are the same. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below. - -**ASOF JOIN Usage** - -`ASOF JOIN` is useful when you need to join records that have no exact match. - -Tables for `ASOF JOIN` must have an ordered sequence column. This column cannot be alone in a table, and should be one of the data types: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, and `DateTime`. - -Syntax `ASOF JOIN ... ON`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF LEFT JOIN table_2 -ON equi_cond AND closest_match_cond -``` - -You can use any number of equality conditions and exactly one closest match condition. For example, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. - -Conditions supported for the closest match: `>`, `>=`, `<`, `<=`. - -Syntax `ASOF JOIN ... USING`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF JOIN table_2 -USING (equi_column1, ... equi_columnN, asof_column) -``` - -`ASOF JOIN` uses `equi_columnX` for joining on equality and `asof_column` for joining on the closest match with the `table_1.asof_column >= table_2.asof_column` condition. The `asof_column` column always the last one in the `USING` clause. - -For example, consider the following tables: - -``` text - table_1 table_2 - - event | ev_time | user_id event | ev_time | user_id diff --git a/docs/fa/query_language/show.md b/docs/fa/query_language/show.md deleted file mode 100644 index 840a2fc9766..00000000000 --- a/docs/fa/query_language/show.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -en_copy: true ---- - -# SHOW Queries {#show-queries} - -## SHOW CREATE TABLE {#show-create-table} - -``` sql -SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] -``` - -Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object. - -## SHOW DATABASES {#show-databases} - -``` sql -SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] -``` - -Prints a list of all databases. -This query is identical to `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. - -## SHOW PROCESSLIST {#show-processlist} - -``` sql -SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] -``` - -Outputs the content of the [system.processes](../operations/system_tables.md#system_tables-processes) table, that contains a list of queries that is being processed at the moment, excepting `SHOW PROCESSLIST` queries. - -The `SELECT * FROM system.processes` query returns data about all the current queries. - -Tip (execute in the console): - -``` bash -$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" -``` - -## SHOW TABLES {#show-tables} - -Displays a list of tables. - -``` sql -SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -If the `FROM` clause is not specified, the query returns the list of tables from the current database. - -You can get the same results as the `SHOW TABLES` query in the following way: - -``` sql -SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -**Example** - -The following query selects the first two rows from the list of tables in the `system` database, whose names contain `co`. - -``` sql -SHOW TABLES FROM system LIKE '%co%' LIMIT 2 -``` - -``` text -┌─name───────────────────────────┐ -│ aggregate_function_combinators │ -│ collations │ -└────────────────────────────────┘ -``` - -## SHOW DICTIONARIES {#show-dictionaries} - -Displays a list of [external dictionaries](dicts/external_dicts.md). - -``` sql -SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -If the `FROM` clause is not specified, the query returns the list of dictionaries from the current database. - -You can get the same results as the `SHOW DICTIONARIES` query in the following way: - -``` sql -SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] -``` - -**Example** - -The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`. - -``` sql -SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 -``` - -``` text -┌─name─────────┐ -│ regions │ -│ region_names │ -└──────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/fa/query_language/syntax.md b/docs/fa/query_language/syntax.md deleted file mode 100644 index fb86f56e7bd..00000000000 --- a/docs/fa/query_language/syntax.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -en_copy: true ---- - -# Syntax {#syntax} - -There are two types of parsers in the system: the full SQL parser (a recursive descent parser), and the data format parser (a fast stream parser). -In all cases except the `INSERT` query, only the full SQL parser is used. -The `INSERT` query uses both parsers: - -``` sql -INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') -``` - -The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions). - -Data can have any format. When a query is received, the server calculates no more than [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. -This means the system doesn’t have problems with large `INSERT` queries, like MySQL does. - -When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited. - -Next we will cover the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. - -## Spaces {#spaces} - -There may be any number of space symbols between syntactical constructions (including the beginning and end of a query). Space symbols include the space, tab, line feed, CR, and form feed. - -## Comments {#comments} - -SQL-style and C-style comments are supported. -SQL-style comments: from `--` to the end of the line. The space after `--` can be omitted. -Comments in C-style: from `/*` to `*/`. These comments can be multiline. Spaces are not required here, either. - -## Keywords {#syntax-keywords} - -Keywords are case-insensitive when they correspond to: - -- SQL standard. For example, `SELECT`, `select` and `SeLeCt` are all valid. -- Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is same as `datetime`. - -Whether data type name is case-sensitive can be checked in the `system.data_type_families` table. - -In contrast to standard SQL all other keywords (including functions names) are **case-sensitive**. - -Keywords are not reserved (they are just parsed as keywords in the corresponding context). If you use [identifiers](#syntax-identifiers) the same as the keywords, enclose them into quotes. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. - -## Identifiers {#syntax-identifiers} - -Identifiers are: - -- Cluster, database, table, partition and column names. -- Functions. -- Data types. -- [Expression aliases](#syntax-expression_aliases). - -Identifiers can be quoted or non-quoted. It is recommended to use non-quoted identifiers. - -Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.` - -If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``. - -## Literals {#literals} - -There are: numeric, string, compound and `NULL` literals. - -### Numeric {#numeric} - -A numeric literal tries to be parsed: - -- First as a 64-bit signed number, using the [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) function. -- If unsuccessful, as a 64-bit unsigned number, using the [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) function. -- If unsuccessful, as a floating-point number using the [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) function. -- Otherwise, an error is returned. - -The corresponding value will have the smallest type that the value fits in. -For example, 1 is parsed as `UInt8`, but 256 is parsed as `UInt16`. For more information, see [Data types](../data_types/index.md). - -Examples: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. - -### String {#syntax-string-literal} - -Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. This means that you can use the sequences `\'`and`\\`. The value will have the [String](../data_types/string.md) type. - -The minimum set of characters that you need to escape in string literals: `'` and `\`. Single quote can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. - -### Compound {#compound} - -Constructions are supported for arrays: `[1, 2, 3]` and tuples: `(1, 'Hello, world!', 2)`.. -Actually, these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. -An array must consist of at least one item, and a tuple must have at least two items. -Tuples have a special purpose for use in the `IN` clause of a `SELECT` query. Tuples can be obtained as the result of a query, but they can’t be saved to a database (with the exception of [Memory](../operations/table_engines/memory.md) tables). - -### NULL {#null-literal} - -Indicates that the value is missing. - -In order to store `NULL` in a table field, it must be of the [Nullable](../data_types/nullable.md) type. - -Depending on the data format (input or output), `NULL` may have a different representation. For more information, see the documentation for [data formats](../interfaces/formats.md#formats). - -There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation will also be `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation. - -In queries, you can check `NULL` using the [IS NULL](operators.md#operator-is-null) and [IS NOT NULL](operators.md) operators and the related functions `isNull` and `isNotNull`. - -## Functions {#functions} - -Functions are written like an identifier with a list of arguments (possibly empty) in brackets. In contrast to standard SQL, the brackets are required, even for an empty arguments list. Example: `now()`. -There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions. - -## Operators {#operators} - -Operators are converted to their corresponding functions during query parsing, taking their priority and associativity into account. -For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, multiply(2, 3)), 4)`. - -## Data Types and Database Table Engines {#data_types-and-database-table-engines} - -Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an arguments list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. - -## Expression Aliases {#syntax-expression_aliases} - -An alias is a user-defined name for an expression in a query. - -``` sql -expr AS alias -``` - -- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause without using the `AS` keyword. - - For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - - In the [CAST](functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. - -- `expr` — Any expression supported by ClickHouse. - - For example, `SELECT column_name * 2 AS double FROM some_table`. - -- `alias` — Name for `expr`. Aliases should comply with the [identifiers](#syntax-identifiers) syntax. - - For example, `SELECT "table t".column_name FROM table_name AS "table t"`. - -### Notes on Usage {#notes-on-usage} - -Aliases are global for a query or subquery and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. - -Aliases are not visible in subqueries and between subqueries. For example, while executing the query `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse generates the exception `Unknown identifier: num`. - -If an alias is defined for the result columns in the `SELECT` clause of a subquery, these columns are visible in the outer query. For example, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. - -Be careful with aliases that are the same as column or table names. Let’s consider the following example: - -``` sql -CREATE TABLE t -( - a Int, - b Int -) -ENGINE = TinyLog() -``` - -``` sql -SELECT - argMax(a, b), - sum(b) AS b -FROM t -``` - -``` text -Received exception from server (version 18.14.17): -Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. -``` - -In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. - -## Asterisk {#asterisk} - -In a `SELECT` query, an asterisk can replace the expression. For more information, see the section “SELECT”. - -## Expressions {#syntax-expressions} - -An expression is a function, identifier, literal, application of an operator, expression in brackets, subquery, or asterisk. It can also contain an alias. -A list of expressions is one or more expressions separated by commas. -Functions and operators, in turn, can have expressions as arguments. - -[Original article](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/fa/query_language/system.md b/docs/fa/query_language/system.md deleted file mode 100644 index a6b72d63ead..00000000000 --- a/docs/fa/query_language/system.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -en_copy: true ---- - -# SYSTEM Queries {#query-language-system} - -- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) -- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) -- [DROP DNS CACHE](#query_language-system-drop-dns-cache) -- [DROP MARK CACHE](#query_language-system-drop-mark-cache) -- [FLUSH LOGS](#query_language-system-flush_logs) -- [RELOAD CONFIG](#query_language-system-reload-config) -- [SHUTDOWN](#query_language-system-shutdown) -- [KILL](#query_language-system-kill) -- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) -- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) -- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) -- [STOP MERGES](#query_language-system-stop-merges) -- [START MERGES](#query_language-system-start-merges) - -## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} - -Reloads all dictionaries that have been successfully loaded before. -By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED). -Always returns `Ok.` regardless of the result of the dictionary update. - -## RELOAD DICTIONARY dictionary\_name {#query_language-system-reload-dictionary} - -Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED). -Always returns `Ok.` regardless of the result of updating the dictionary. -The status of the dictionary can be checked by querying the `system.dictionaries` table. - -``` sql -SELECT name, status FROM system.dictionaries; -``` - -## DROP DNS CACHE {#query_language-system-drop-dns-cache} - -Resets ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries). - -For more convenient (automatic) cache management, see disable\_internal\_dns\_cache, dns\_cache\_update\_period parameters. - -## DROP MARK CACHE {#query_language-system-drop-mark-cache} - -Resets the mark cache. Used in development of ClickHouse and performance tests. - -## FLUSH LOGS {#query_language-system-flush_logs} - -Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. - -## RELOAD CONFIG {#query_language-system-reload-config} - -Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeeper. - -## SHUTDOWN {#query_language-system-shutdown} - -Normally shuts down ClickHouse (like `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) - -## KILL {#query_language-system-kill} - -Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`) - -## Managing Distributed Tables {#query-language-system-distributed} - -ClickHouse can manage [distributed](../operations/table_engines/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting. - -### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} - -Disables background data distribution when inserting data into distributed tables. - -``` sql -SYSTEM STOP DISTRIBUTED SENDS [db.] -``` - -### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} - -Forces ClickHouse to send data to cluster nodes synchronously. If any nodes are unavailable, ClickHouse throws an exception and stops query execution. You can retry the query until it succeeds, which will happen when all nodes are back online. - -``` sql -SYSTEM FLUSH DISTRIBUTED [db.] -``` - -### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} - -Enables background data distribution when inserting data into distributed tables. - -``` sql -SYSTEM START DISTRIBUTED SENDS [db.] -``` - -### STOP MERGES {#query_language-system-stop-merges} - -Provides possibility to stop background merges for tables in the MergeTree family: - -``` sql -SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] -``` - -!!! note "Note" - `DETACH / ATTACH` table will start background merges for the table even in case when merges have been stopped for all MergeTree tables before. - -### START MERGES {#query_language-system-start-merges} - -Provides possibility to start background merges for tables in the MergeTree family: - -``` sql -SYSTEM START MERGES [[db.]merge_tree_family_table_name] -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/fa/query_language/table_functions/file.md b/docs/fa/query_language/table_functions/file.md deleted file mode 100644 index 95c3a9378bc..00000000000 --- a/docs/fa/query_language/table_functions/file.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -en_copy: true ---- - -# file {#file} - -Creates a table from a file. This table function is similar to [url](url.md) and [hdfs](hdfs.md) ones. - -``` sql -file(path, format, structure) -``` - -**Input parameters** - -- `path` — The relative path to the file from [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) of the file. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - -**Returned value** - -A table with the specified structure for reading or writing data in the specified file. - -**Example** - -Setting `user_files_path` and the contents of the file `test.csv`: - -``` bash -$ grep user_files_path /etc/clickhouse-server/config.xml - /var/lib/clickhouse/user_files/ - -$ cat /var/lib/clickhouse/user_files/test.csv - 1,2,3 - 3,2,1 - 78,43,45 -``` - -Table from`test.csv` and selection of the first two rows from it: - -``` sql -SELECT * -FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') -LIMIT 2 -``` - -``` text -┌─column1─┬─column2─┬─column3─┐ -│ 1 │ 2 │ 3 │ -│ 3 │ 2 │ 1 │ -└─────────┴─────────┴─────────┘ -``` - -``` sql --- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file -SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 -``` - -**Globs in path** - -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). - -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). - -**Example** - -1. Suppose we have several files with the following relative paths: - -- ‘some\_dir/some\_file\_1’ -- ‘some\_dir/some\_file\_2’ -- ‘some\_dir/some\_file\_3’ -- ‘another\_dir/some\_file\_1’ -- ‘another\_dir/some\_file\_2’ -- ‘another\_dir/some\_file\_3’ - -1. Query the amount of rows in these files: - - - -``` sql -SELECT count(*) -FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') -``` - -1. Query the amount of rows in all files of these two directories: - - - -``` sql -SELECT count(*) -FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') -``` - -!!! warning "Warning" - If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -**Example** - -Query the data from files named `file000`, `file001`, … , `file999`: - -``` sql -SELECT count(*) -FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') -``` - -## Virtual Columns {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**See Also** - -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/fa/query_language/table_functions/hdfs.md b/docs/fa/query_language/table_functions/hdfs.md deleted file mode 100644 index f636b7d19bb..00000000000 --- a/docs/fa/query_language/table_functions/hdfs.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -en_copy: true ---- - -# hdfs {#hdfs} - -Creates a table from files in HDFS. This table function is similar to [url](url.md) and [file](file.md) ones. - -``` sql -hdfs(URI, format, structure) -``` - -**Input parameters** - -- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) of the file. -- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - -**Returned value** - -A table with the specified structure for reading or writing data in the specified file. - -**Example** - -Table from `hdfs://hdfs1:9000/test` and selection of the first two rows from it: - -``` sql -SELECT * -FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') -LIMIT 2 -``` - -``` text -┌─column1─┬─column2─┬─column3─┐ -│ 1 │ 2 │ 3 │ -│ 3 │ 2 │ 1 │ -└─────────┴─────────┴─────────┘ -``` - -**Globs in path** - -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). - -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. - -Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). - -**Example** - -1. Suppose that we have several files with following URIs on HDFS: - -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ - -1. Query the amount of rows in these files: - - - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') -``` - -1. Query the amount of rows in all files of these two directories: - - - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') -``` - -!!! warning "Warning" - If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -**Example** - -Query the data from files named `file000`, `file001`, … , `file999`: - -``` sql -SELECT count(*) -FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') -``` - -## Virtual Columns {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**See Also** - -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/fa/query_language/table_functions/index.md b/docs/fa/query_language/table_functions/index.md deleted file mode 100644 index ba231a6eeea..00000000000 --- a/docs/fa/query_language/table_functions/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -en_copy: true ---- - -# Table Functions {#table-functions} - -Table functions are methods for constructing tables. - -You can use table functions in: - -- [FROM](../select.md#select-from) clause of the `SELECT` query. - - The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. - -- [CREATE TABLE AS \](../create.md#create-table-query) query. - - It's one of the methods of creating a table. - -!!! warning "Warning" - You can’t use table functions if the [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) setting is disabled. - -| Function | Description | -|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------| -| [file](file.md) | Creates a [File](../../operations/table_engines/file.md)-engine table. | -| [merge](merge.md) | Creates a [Merge](../../operations/table_engines/merge.md)-engine table. | -| [numbers](numbers.md) | Creates a table with a single column filled with integer numbers. | -| [remote](remote.md) | Allows you to access remote servers without creating a [Distributed](../../operations/table_engines/distributed.md)-engine table. | -| [url](url.md) | Creates a [Url](../../operations/table_engines/url.md)-engine table. | -| [mysql](mysql.md) | Creates a [MySQL](../../operations/table_engines/mysql.md)-engine table. | -| [jdbc](jdbc.md) | Creates a [JDBC](../../operations/table_engines/jdbc.md)-engine table. | -| [odbc](odbc.md) | Creates a [ODBC](../../operations/table_engines/odbc.md)-engine table. | -| [hdfs](hdfs.md) | Creates a [HDFS](../../operations/table_engines/hdfs.md)-engine table. | - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/fa/query_language/table_functions/input.md b/docs/fa/query_language/table_functions/input.md deleted file mode 100644 index 7536a9bffc2..00000000000 --- a/docs/fa/query_language/table_functions/input.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -en_copy: true ---- - -# input {#input} - -`input(structure)` - table function that allows effectively convert and insert data sent to the -server with given structure to the table with another structure. - -`structure` - structure of data sent to the server in following format `'column1_name column1_type, column2_name column2_type, ...'`. -For example, `'id UInt32, name String'`. - -This function can be used only in `INSERT SELECT` query and only once but otherwise behaves like ordinary table function -(for example, it can be used in subquery, etc.). - -Data can be sent in any way like for ordinary `INSERT` query and passed in any available [format](../../interfaces/formats.md#formats) -that must be specified in the end of query (unlike ordinary `INSERT SELECT`). - -The main feature of this function is that when server receives data from client it simultaneously converts it -according to the list of expressions in the `SELECT` clause and inserts into the target table. Temporary table -with all transferred data is not created. - -**Examples** - -- Let the `test` table has the following structure `(a String, b String)` - and data in `data.csv` has a different structure `(col1 String, col2 Date, col3 Int32)`. Query for insert - data from the `data.csv` into the `test` table with simultaneous conversion looks like this: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; -``` - -- If `data.csv` contains data of the same structure `test_structure` as the table `test` then these two queries are equal: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/fa/query_language/table_functions/jdbc.md b/docs/fa/query_language/table_functions/jdbc.md deleted file mode 100644 index e1ba7b362bd..00000000000 --- a/docs/fa/query_language/table_functions/jdbc.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -en_copy: true ---- - -# jdbc {#table-function-jdbc} - -`jdbc(jdbc_connection_uri, schema, table)` - returns table that is connected via JDBC driver. - -This table function requires separate `clickhouse-jdbc-bridge` program to be running. -It supports Nullable types (based on DDL of remote table that is queried). - -**Examples** - -``` sql -SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fa/query_language/table_functions/merge.md b/docs/fa/query_language/table_functions/merge.md deleted file mode 100644 index 3638fad418d..00000000000 --- a/docs/fa/query_language/table_functions/merge.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -en_copy: true ---- - -# merge {#merge} - -`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. - -The table structure is taken from the first table encountered that matches the regular expression. - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/fa/query_language/table_functions/mysql.md b/docs/fa/query_language/table_functions/mysql.md deleted file mode 100644 index 5a8e8d4fd96..00000000000 --- a/docs/fa/query_language/table_functions/mysql.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -en_copy: true ---- - -# mysql {#mysql} - -Allows `SELECT` queries to be performed on data that is stored on a remote MySQL server. - -``` sql -mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); -``` - -**Parameters** - -- `host:port` — MySQL server address. - -- `database` — Remote database name. - -- `table` — Remote table name. - -- `user` — MySQL user. - -- `password` — User password. - -- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. If `replace_query=1`, the query is replaced. - -- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query. - - Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. - - To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. - -Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on the MySQL server. - -The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. - -**Returned Value** - -A table object with the same columns as the original MySQL table. - -## Usage Example {#usage-example} - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Selecting data from ClickHouse: - -``` sql -SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## See Also {#see-also} - -- [The ‘MySQL’ table engine](../../operations/table_engines/mysql.md) -- [Using MySQL as a source of external dictionary](../dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/fa/query_language/table_functions/numbers.md b/docs/fa/query_language/table_functions/numbers.md deleted file mode 100644 index 5aec0b3c96b..00000000000 --- a/docs/fa/query_language/table_functions/numbers.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -en_copy: true ---- - -# numbers {#numbers} - -`numbers(N)` – Returns a table with the single ‘number’ column (UInt64) that contains integers from 0 to N-1. -`numbers(N, M)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1). - -Similar to the `system.numbers` table, it can be used for testing and generating successive values, `numbers(N, M)` more efficient than `system.numbers`. - -The following queries are equivalent: - -``` sql -SELECT * FROM numbers(10); -SELECT * FROM numbers(0, 10); -SELECT * FROM system.numbers LIMIT 10; -``` - -Examples: - -``` sql --- Generate a sequence of dates from 2010-01-01 to 2010-12-31 -select toDate('2010-01-01') + number as d FROM numbers(365); -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/fa/query_language/table_functions/odbc.md b/docs/fa/query_language/table_functions/odbc.md deleted file mode 100644 index 8c972b1f93a..00000000000 --- a/docs/fa/query_language/table_functions/odbc.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -en_copy: true ---- - -# odbc {#table-functions-odbc} - -Returns table that is connected via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -``` sql -odbc(connection_settings, external_database, external_table) -``` - -Parameters: - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. - -The fields with the `NULL` values from the external table are converted into the default values for the base data type. For example, if a remote MySQL table field has the `INT NULL` type it is converted to 0 (the default value for ClickHouse `Int32` data type). - -## Usage example {#usage-example} - -**Getting data from the local MySQL installation via ODBC** - -This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. - -Ensure that unixODBC and MySQL Connector are installed. - -By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus you need to create and configure this user in the MySQL server. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -Then configure the connection in `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -You can check the connection using the `isql` utility from the unixODBC installation. - -``` bash -$ isql -v mysqlconn -+---------------------------------------+ -| Connected! | -| | -... -``` - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Retrieving data from the MySQL table in ClickHouse: - -``` sql -SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ 0 │ 2 │ 0 │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## See Also {#see-also} - -- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [ODBC table engine](../../operations/table_engines/odbc.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fa/query_language/table_functions/remote.md b/docs/fa/query_language/table_functions/remote.md deleted file mode 100644 index e8c751af7e2..00000000000 --- a/docs/fa/query_language/table_functions/remote.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -en_copy: true ---- - -# remote, remoteSecure {#remote-remotesecure} - -Allows you to access remote servers without creating a `Distributed` table. - -Signatures: - -``` sql -remote('addresses_expr', db, table[, 'user'[, 'password']]) -remote('addresses_expr', db.table[, 'user'[, 'password']]) -``` - -`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. The port is the TCP port on the remote server. If the port is omitted, it uses `tcp_port` from the server’s config file (by default, 9000). - -!!! important "Important" - The port is required for an IPv6 address. - -Examples: - -``` text -example01-01-1 -example01-01-1:9000 -localhost -127.0.0.1 -[::]:9000 -[2a02:6b8:0:1111::11]:9000 -``` - -Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like to shards with different data). - -Example: - -``` text -example01-01-1,example01-02-1 -``` - -Part of the expression can be specified in curly brackets. The previous example can be written as follows: - -``` text -example01-0{1,2}-1 -``` - -Curly brackets can contain a range of numbers separated by two dots (non-negative integers). In this case, the range is expanded to a set of values that generate shard addresses. If the first number starts with zero, the values are formed with the same zero alignment. The previous example can be written as follows: - -``` text -example01-{01..02}-1 -``` - -If you have multiple pairs of curly brackets, it generates the direct product of the corresponding sets. - -Addresses and parts of addresses in curly brackets can be separated by the pipe symbol (\|). In this case, the corresponding sets of addresses are interpreted as replicas, and the query will be sent to the first healthy replica. However, the replicas are iterated in the order currently set in the [load\_balancing](../../operations/settings/settings.md) setting. - -Example: - -``` text -example01-{01..02}-{1|2} -``` - -This example specifies two shards that each have two replicas. - -The number of addresses generated is limited by a constant. Right now this is 1000 addresses. - -Using the `remote` table function is less optimal than creating a `Distributed` table, because in this case, the server connection is re-established for every request. In addition, if host names are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and don’t use the `remote` table function. - -The `remote` table function can be useful in the following cases: - -- Accessing a specific server for data comparison, debugging, and testing. -- Queries between various ClickHouse clusters for research purposes. -- Infrequent distributed requests that are made manually. -- Distributed requests where the set of servers is re-defined each time. - -If the user is not specified, `default` is used. -If the password is not specified, an empty password is used. - -`remoteSecure` - same as `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_settings/settings.md#server_settings-tcp_port_secure) from config or 9440. - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/fa/query_language/table_functions/url.md b/docs/fa/query_language/table_functions/url.md deleted file mode 100644 index e1250b438ab..00000000000 --- a/docs/fa/query_language/table_functions/url.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -en_copy: true ---- - -# url {#url} - -`url(URL, format, structure)` - returns a table created from the `URL` with given -`format` and `structure`. - -URL - HTTP or HTTPS server address, which can accept `GET` and/or `POST` requests. - -format - [format](../../interfaces/formats.md#formats) of the data. - -structure - table structure in `'UserID UInt64, Name String'` format. Determines column names and types. - -**Example** - -``` sql --- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. -SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/fa/roadmap.md b/docs/fa/roadmap.md deleted file mode 100644 index 434058d8311..00000000000 --- a/docs/fa/roadmap.md +++ /dev/null @@ -1,12 +0,0 @@ -# Roadmap {#roadmap} - -## Q1 2020 {#q1-2020} - -- Role-based access control - -## Q2 2020 {#q2-2020} - -- Integration with external authentication services -- Resource pools for more precise distribution of cluster capacity between users - -{## [Original article](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/fa/security_changelog.md b/docs/fa/security_changelog.md deleted file mode 100644 index 9d33e1083d5..00000000000 --- a/docs/fa/security_changelog.md +++ /dev/null @@ -1,69 +0,0 @@ -## Fixed in ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} - -### CVE-2019-15024 {#cve-2019-15024} - -Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. - -Credits: Eldar Zaitov of Yandex Information Security Team - -### CVE-2019-16535 {#cve-2019-16535} - -Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. - -Credits: Eldar Zaitov of Yandex Information Security Team - -### CVE-2019-16536 {#cve-2019-16536} - -Stack overflow leading to DoS can be triggered by a malicious authenticated client. - -Credits: Eldar Zaitov of Yandex Information Security Team - -## Fixed in ClickHouse Release 19.13.6.1, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} - -### CVE-2019-18657 {#cve-2019-18657} - -Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. - -Credits: [Nikita Tikhomirov](https://github.com/NSTikhomirov) - -## Fixed in ClickHouse Release 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} - -### CVE-2018-14672 {#cve-2018-14672} - -Functions for loading CatBoost models allowed path traversal and reading arbitrary files through error messages. - -Credits: Andrey Krasichkov of Yandex Information Security Team - -## Fixed in ClickHouse Release 18.10.3, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} - -### CVE-2018-14671 {#cve-2018-14671} - -unixODBC allowed loading arbitrary shared objects from the file system which led to a Remote Code Execution vulnerability. - -Credits: Andrey Krasichkov and Evgeny Sidorov of Yandex Information Security Team - -## Fixed in ClickHouse Release 1.1.54388, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} - -### CVE-2018-14668 {#cve-2018-14668} - -“remote” table function allowed arbitrary symbols in “user”, “password” and “default\_database” fields which led to Cross Protocol Request Forgery Attacks. - -Credits: Andrey Krasichkov of Yandex Information Security Team - -## Fixed in ClickHouse Release 1.1.54390, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} - -### CVE-2018-14669 {#cve-2018-14669} - -ClickHouse MySQL client had “LOAD DATA LOCAL INFILE” functionality enabled that allowed a malicious MySQL database read arbitrary files from the connected ClickHouse server. - -Credits: Andrey Krasichkov and Evgeny Sidorov of Yandex Information Security Team - -## Fixed in ClickHouse Release 1.1.54131, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} - -### CVE-2018-14670 {#cve-2018-14670} - -Incorrect configuration in deb package could lead to the unauthorized use of the database. - -Credits: the UK’s National Cyber Security Centre (NCSC) - -{## [Original article](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/fa/sql_reference/aggregate_functions/combinators.md b/docs/fa/sql_reference/aggregate_functions/combinators.md new file mode 100644 index 00000000000..6c2450904fd --- /dev/null +++ b/docs/fa/sql_reference/aggregate_functions/combinators.md @@ -0,0 +1,167 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u062A\u0631\u06A9\u06CC\u0628 \u06A9\u0646\u0646\u062F\u0647\u0647\u0627\ + \u06CC \u062A\u0627\u0628\u0639 \u062C\u0645\u0639" +--- + +# ترکیب کنندههای تابع جمع {#aggregate_functions_combinators} + +نام یک تابع جمع می تواند یک پسوند اضافه شده است. این تغییر راه تابع کلی کار می کند. + +## - اگر {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +مثالها: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` و به همین ترتیب. + +با توابع مجموع شرطی, شما می توانید مصالح برای چندین شرایط در یک بار محاسبه, بدون استفاده از کارخانه های فرعی و `JOIN`برای مثال در یاندکس.متریکا, توابع مجموع مشروط استفاده می شود برای پیاده سازی قابلیت مقایسه بخش. + +## حداقل صفحه نمایش: {#agg-functions-combinator-array} + +پسوند مجموعه را می توان به هر تابع جمع اضافه شده است. در این مورد, تابع کل استدلال از طول می کشد ‘Array(T)’ نوع (ارریس) به جای ‘T’ استدلال نوع. اگر تابع جمع استدلال های متعدد را می پذیرد, این باید مجموعه ای از طول های برابر شود. هنگامی که پردازش ارریس, تابع کل کار می کند مانند تابع کل اصلی در تمام عناصر مجموعه. + +مثال 1: `sumArray(arr)` - مجموع تمام عناصر از همه ‘arr’ اراریس در این مثال می توانست بیشتر به سادگی نوشته شده است: `sum(arraySum(arr))`. + +مثال 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ اراریس این می تواند انجام شود یک راه ساده تر: `uniq(arrayJoin(arr))` اما همیشه امکان اضافه کردن وجود ندارد ‘arrayJoin’ به پرس و جو. + +\- اگر و مجموعه ای می تواند ترکیب شود. هرچند, ‘Array’ پس اول باید بیای ‘If’. مثالها: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. با توجه به این سفارش ‘cond’ برهان صف نیست. + +## - وضعیت {#agg-functions-combinator-state} + +اگر شما درخواست این ترکیب, تابع کل می کند مقدار حاصل بازگشت نیست (مانند تعدادی از ارزش های منحصر به فرد برای [uniq](reference.md#agg_function-uniq) تابع) , اما یک دولت متوسط از تجمع (برای `uniq`, این جدول هش برای محاسبه تعداد ارزش های منحصر به فرد است). این یک `AggregateFunction(...)` که می تواند برای پردازش بیشتر استفاده می شود و یا ذخیره شده در یک جدول را به پایان برساند جمع بعد. + +برای کار با این کشورها, استفاده: + +- [ریزدانه](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) موتور جدول. +- [پلاکتی](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) تابع. +- [خرابی اجرا](../../sql_reference/functions/other_functions.md#function-runningaccumulate) تابع. +- [- ادغام](#aggregate_functions_combinators_merge) ترکیب کننده. +- [اطلاعات دقیق](#aggregate_functions_combinators_mergestate) ترکیب کننده. + +## - ادغام {#aggregate_functions_combinators-merge} + +اگر شما درخواست این ترکیب, تابع کل طول می کشد حالت تجمع متوسط به عنوان یک استدلال, ترکیبی از کشورهای به پایان تجمع, و ارزش حاصل می گرداند. + +## اطلاعات دقیق {#aggregate_functions_combinators-mergestate} + +ادغام کشورهای تجمع متوسط در همان راه به عنوان ترکیب-ادغام. با این حال, این مقدار حاصل بازگشت نیست, اما یک دولت تجمع متوسط, شبیه به ترکیب دولت. + +## - فورچ {#agg-functions-combinator-foreach} + +تبدیل یک تابع جمع برای جداول به یک تابع کلی برای ارریس که جمع اقلام مجموعه مربوطه و مجموعه ای از نتایج را برمی گرداند. به عنوان مثال, `sumForEach` برای ارریس `[1, 2]`, `[3, 4, 5]`و`[6, 7]`نتیجه را برمی گرداند `[10, 13, 5]` پس از اضافه کردن با هم موارد مجموعه مربوطه. + +## شناسه بسته: {#agg-functions-combinator-ordefault} + +پر مقدار پیش فرض از نوع بازگشت تابع جمع است اگر چیزی برای جمع وجود دارد. + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## اطلاعات دقیق {#agg-functions-combinator-ornull} + +پر `null` در صورتی که هیچ چیز به جمع وجود دارد. ستون بازگشت قابل ابطال خواهد بود. + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +-OrDefault و OrNull می تواند در ترکیب با دیگر combinators. این زمانی مفید است که تابع جمع می کند ورودی خالی را قبول نمی کند. + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## - نمونه {#agg-functions-combinator-resample} + +به شما امکان می دهد داده ها را به گروه تقسیم کنید و سپس به طور جداگانه داده ها را در این گروه ها جمع کنید. گروه ها با تقسیم مقادیر از یک ستون به فواصل ایجاد شده است. + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**پارامترها** + +- `start` — Starting value of the whole required interval for `resampling_key` ارزشهای خبری عبارتند از: +- `stop` — Ending value of the whole required interval for `resampling_key` ارزشهای خبری عبارتند از: کل فاصله شامل نمی شود `stop` مقدار `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` بیش از هر یک از این زیرگروه اعدام به طور مستقل. +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` پارامترها + +**مقادیر بازگشتی** + +- مجموعه ای از `aggFunction` نتایج جستجو برای هر subinterval. + +**مثال** + +در نظر بگیرید که `people` جدول با داده های زیر: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +بیایید نام افرادی که سن نهفته در فواصل `[30,60)` و `[60,75)`. پس ما با استفاده از نمایندگی عدد صحیح برای سن, ما سنین در `[30, 59]` و `[60,74]` فواصل زمانی. + +به نام کلی در مجموعه, ما با استفاده از [گرامری](reference.md#agg_function-grouparray) تابع جمع. طول می کشد تا یک استدلال. در مورد ما این است `name` ستون. این `groupArrayResample` تابع باید از `age` ستون به نام دانه های سن. برای تعریف فواصل مورد نیاز ما `30, 75, 30` نشانوندها به `groupArrayResample` تابع. + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +در نظر گرفتن نتایج. + +`Jonh` خارج از نمونه است چرا که او بیش از حد جوان است. افراد دیگر با توجه به فواصل زمانی مشخص شده توزیع می شوند. + +حالا اجازه دهید تعداد کل مردم و متوسط دستمزد خود را در فواصل سنی مشخص شده است. + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/fa/sql_reference/aggregate_functions/index.md b/docs/fa/sql_reference/aggregate_functions/index.md new file mode 100644 index 00000000000..6442dddedd2 --- /dev/null +++ b/docs/fa/sql_reference/aggregate_functions/index.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Aggregate Functions +toc_priority: 33 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# توابع مجموع {#aggregate-functions} + +توابع مجموع در کار [عادی](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) راه به عنوان کارشناسان پایگاه داده انتظار می رود. + +فاحشه خانه نیز پشتیبانی می کند: + +- [توابع مجموع پارامتری](parametric_functions.md#aggregate_functions_parametric), که قبول پارامترهای دیگر علاوه بر ستون. +- [ترکیب کنندهها](combinators.md#aggregate_functions_combinators) که تغییر رفتار مجموع توابع. + +## پردازش پوچ {#null-processing} + +در طول تجمع همه `NULL`بازدید کنندگان قلم می. + +**مثالها:** + +این جدول را در نظر بگیرید: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +بیایید می گویند شما نیاز به کل ارزش ها در `y` ستون: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +این `sum` تابع تفسیر می کند `NULL` به عنوان `0`. به خصوص, این بدان معنی است که اگر تابع ورودی از یک انتخاب که تمام مقادیر دریافت `NULL` سپس نتیجه خواهد بود `0` نه `NULL`. + +حالا شما می توانید استفاده کنید `groupArray` تابع برای ایجاد مجموعه ای از `y` ستون: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` شامل نمی شود `NULL` در مجموعه ای نتیجه. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/fa/sql_reference/aggregate_functions/parametric_functions.md b/docs/fa/sql_reference/aggregate_functions/parametric_functions.md new file mode 100644 index 00000000000..3a45a615e07 --- /dev/null +++ b/docs/fa/sql_reference/aggregate_functions/parametric_functions.md @@ -0,0 +1,500 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u062A\u0648\u0627\u0628\u0639 \u0645\u062C\u0645\u0648\u0639 \u067E\u0627\ + \u0631\u0627\u0645\u062A\u0631\u06CC" +--- + +# توابع مجموع پارامتری {#aggregate_functions_parametric} + +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. + +## سابقهنما {#histogram} + +محاسبه هیستوگرام تطبیقی. این نتایج دقیق را تضمین نمی کند. + +``` sql +histogram(number_of_bins)(values) +``` + +توابع استفاده می کند [جریان الگوریتم درخت تصمیم موازی](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). مرزهای سطل هیستوگرام تنظیم به عنوان داده های جدید وارد یک تابع. در مورد مشترک عرض سطل برابر نیست. + +**پارامترها** + +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. +`values` — [عبارت](../syntax.md#syntax-expressions) در نتیجه مقادیر ورودی. + +**مقادیر بازگشتی** + +- [& حذف](../../sql_reference/data_types/array.md) از [توپلس](../../sql_reference/data_types/tuple.md) از قالب زیر: + + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. + +**مثال** + +``` sql +SELECT histogram(5)(number + 1) +FROM ( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ +│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +شما می توانید یک هیستوگرام با تجسم [بار](../../sql_reference/functions/other_functions.md#function-bar) تابع برای مثال: + +``` sql +WITH histogram(5)(rand() % 100) AS hist +SELECT + arrayJoin(hist).3 AS height, + bar(height, 0, 6, 5) AS bar +FROM +( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─height─┬─bar───┐ +│ 2.125 │ █▋ │ +│ 3.25 │ ██▌ │ +│ 5.625 │ ████▏ │ +│ 5.625 │ ████▏ │ +│ 3.375 │ ██▌ │ +└────────┴───────┘ +``` + +در این مورد, شما باید به یاد داشته باشید که شما مرزهای هیستوگرام بن نمی دانند. + +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} + +بررسی اینکه دنباله شامل یک زنجیره رویداد که منطبق بر الگوی. + +``` sql +sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +``` + +!!! warning "اخطار" + رویدادهایی که در همان دوم رخ می دهد ممکن است در دنباله در سفارش تعریف نشده موثر بر نتیجه دراز. + +**پارامترها** + +- `pattern` — Pattern string. See [نحو الگو](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` و `DateTime`. شما همچنین می توانید هر یک از پشتیبانی استفاده کنید [اینترنت](../../sql_reference/data_types/int_uint.md) انواع داده ها. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. شما می توانید به تصویب تا 32 استدلال شرط. تابع طول می کشد تنها حوادث شرح داده شده در این شرایط به حساب. اگر دنباله حاوی اطلاعاتی است که در شرایط توصیف نشده, تابع پرش. + +**مقادیر بازگشتی** + +- 1, اگر الگوی همسان است. +- 0, اگر الگوی همسان نیست. + +نوع: `UInt8`. + + +**نحو الگو** + +- `(?N)` — Matches the condition argument at position `N`. شرایط در شماره `[1, 32]` محدوده. به عنوان مثال, `(?1)` با استدلال به تصویب رسید `cond1` پارامتر. + +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. + +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` مسابقات رویدادهایی که رخ می دهد بیش از 1800 ثانیه از یکدیگر. تعداد دلخواه از هر رویدادی می تواند بین این حوادث دراز. شما می توانید از `>=`, `>`, `<`, `<=` اپراتورها. + +**مثالها** + +داده ها را در نظر بگیرید `t` جدول: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +└──────┴────────┘ +``` + +انجام پرس و جو: + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 1 │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +تابع زنجیره رویداد که تعداد پیدا شده است 2 زیر شماره 1. این قلم شماره 3 بین, چرا که تعداد به عنوان یک رویداد توصیف نشده. اگر ما می خواهیم این شماره را در نظر بگیریم هنگام جستجو برای زنجیره رویداد داده شده در مثال باید شرایط را ایجاد کنیم. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ +│ 0 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +در این مورد, تابع می تواند زنجیره رویداد تطبیق الگوی پیدا کنید, چرا که این رویداد برای شماره 3 رخ داده است بین 1 و 2. اگر در همان مورد ما شرایط را برای شماره بررسی 4, دنباله الگوی مطابقت. + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ +│ 1 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**همچنین نگاه کنید** + +- [شمارش معکوس](#function-sequencecount) + +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} + +شمارش تعداد زنجیره رویداد که الگوی همسان. تابع جستجو زنجیره رویداد که با هم همپوشانی دارند. این شروع به جستجو برای زنجیره بعدی پس از زنجیره فعلی همسان است. + +!!! warning "اخطار" + رویدادهایی که در همان دوم رخ می دهد ممکن است در دنباله در سفارش تعریف نشده موثر بر نتیجه دراز. + +``` sql +sequenceCount(pattern)(timestamp, cond1, cond2, ...) +``` + +**پارامترها** + +- `pattern` — Pattern string. See [نحو الگو](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` و `DateTime`. شما همچنین می توانید هر یک از پشتیبانی استفاده کنید [اینترنت](../../sql_reference/data_types/int_uint.md) انواع داده ها. + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. شما می توانید به تصویب تا 32 استدلال شرط. تابع طول می کشد تنها حوادث شرح داده شده در این شرایط به حساب. اگر دنباله حاوی اطلاعاتی است که در شرایط توصیف نشده, تابع پرش. + +**مقادیر بازگشتی** + +- تعداد زنجیره رویداد غیر با هم تداخل دارند که همسان. + +نوع: `UInt64`. + +**مثال** + +داده ها را در نظر بگیرید `t` جدول: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +│ 4 │ 1 │ +│ 5 │ 3 │ +│ 6 │ 2 │ +└──────┴────────┘ +``` + +تعداد چند بار تعداد 2 پس از شماره 1 با هر مقدار از شماره های دیگر بین رخ می دهد: + +``` sql +SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 2 │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [ترتیب سنج](#function-sequencematch) + +## در پنجره {#windowfunnel} + +جستجو برای زنجیره رویداد در یک پنجره زمان کشویی و محاسبه حداکثر تعداد رویدادهایی که از زنجیره رخ داده است. + +تابع با توجه به الگوریتم کار می کند: + +- تابع جستجو برای داده هایی که باعث شرط اول در زنجیره و مجموعه ضد رویداد به 1. این لحظه ای است که پنجره کشویی شروع می شود. + +- اگر حوادث از زنجیره پی در پی در پنجره رخ می دهد, ضد افزایش است. اگر دنباله ای از حوادث مختل شده است, شمارنده است افزایش نمی. + +- اگر داده های زنجیره رویداد های متعدد در نقاط مختلف از اتمام, تابع تنها خروجی به اندازه طولانی ترین زنجیره ای. + +**نحو** + +``` sql +windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) +``` + +**پارامترها** + +- `window` — Length of the sliding window in seconds. +- `mode` - این یک استدلال اختیاری است . + - `'strict'` - وقتی که `'strict'` تنظیم شده است, پنجره() اعمال شرایط تنها برای ارزش های منحصر به فرد. +- `timestamp` — Name of the column containing the timestamp. Data types supported: [تاریخ](../../sql_reference/data_types/date.md), [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) و دیگر انواع عدد صحیح بدون علامت (توجه داشته باشید که حتی اگر برچسب زمان پشتیبانی از `UInt64` نوع, این مقدار می تواند بین المللی تجاوز نمی64 بیشترین, که 2^63 - 1). +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقدار بازگشتی** + +حداکثر تعداد متوالی باعث شرایط از زنجیره ای در پنجره زمان کشویی. +تمام زنجیره ها در انتخاب تجزیه و تحلیل می شوند. + +نوع: `Integer`. + +**مثال** + +تعیین کنید که یک دوره زمانی معین برای کاربر کافی باشد تا گوشی را انتخاب کند و دو بار در فروشگاه اینترنتی خریداری کند. + +زنجیره ای از وقایع زیر را تنظیم کنید: + +1. کاربر وارد شده به حساب خود را در فروشگاه (`eventID = 1003`). +2. کاربر برای یک تلفن جستجو می کند (`eventID = 1007, product = 'phone'`). +3. کاربر سفارش داده شده (`eventID = 1009`). +4. کاربر دوباره سفارش داد (`eventID = 1010`). + +جدول ورودی: + +``` text +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +``` + +یافتن پست های تا چه حد کاربر `user_id` می تواند از طریق زنجیره ای در یک دوره در ژانویه و فوریه از 2019. + +پرسوجو: + +``` sql +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level + FROM trend + WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') + GROUP BY user_id +) +GROUP BY level +ORDER BY level ASC +``` + +نتیجه: + +``` text +┌─level─┬─c─┐ +│ 4 │ 1 │ +└───────┴───┘ +``` + +## نگهداری {#retention} + +تابع طول می کشد به عنوان استدلال مجموعه ای از شرایط از 1 به 32 استدلال از نوع `UInt8` که نشان می دهد که یک بیماری خاص برای این رویداد مواجه شد. +هر گونه شرایط را می توان به عنوان یک استدلال مشخص (همانطور که در [WHERE](../../sql_reference/statements/select.md#select-where)). + +شرایط, به جز اولین, درخواست در جفت: نتیجه دوم درست خواهد بود اگر اول و دوم درست باشد, از سوم اگر اولین و فیرد درست باشد, و غیره. + +**نحو** + +``` sql +retention(cond1, cond2, ..., cond32); +``` + +**پارامترها** + +- `cond` — an expression that returns a `UInt8` نتیجه (1 یا 0). + +**مقدار بازگشتی** + +مجموعه ای از 1 یا 0. + +- 1 — condition was met for the event. +- 0 — condition wasn't met for the event. + +نوع: `UInt8`. + +**مثال** + +بیایید یک نمونه از محاسبه را در نظر بگیریم `retention` تابع برای تعیین ترافیک سایت. + +**1.** Сreate a table to illustrate an example. + +``` sql +CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; + +INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); +INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); +INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); +``` + +جدول ورودی: + +پرسوجو: + +``` sql +SELECT * FROM retention_test +``` + +نتیجه: + +``` text +┌───────date─┬─uid─┐ +│ 2020-01-01 │ 0 │ +│ 2020-01-01 │ 1 │ +│ 2020-01-01 │ 2 │ +│ 2020-01-01 │ 3 │ +│ 2020-01-01 │ 4 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-02 │ 0 │ +│ 2020-01-02 │ 1 │ +│ 2020-01-02 │ 2 │ +│ 2020-01-02 │ 3 │ +│ 2020-01-02 │ 4 │ +│ 2020-01-02 │ 5 │ +│ 2020-01-02 │ 6 │ +│ 2020-01-02 │ 7 │ +│ 2020-01-02 │ 8 │ +│ 2020-01-02 │ 9 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-03 │ 0 │ +│ 2020-01-03 │ 1 │ +│ 2020-01-03 │ 2 │ +│ 2020-01-03 │ 3 │ +│ 2020-01-03 │ 4 │ +│ 2020-01-03 │ 5 │ +│ 2020-01-03 │ 6 │ +│ 2020-01-03 │ 7 │ +│ 2020-01-03 │ 8 │ +│ 2020-01-03 │ 9 │ +│ 2020-01-03 │ 10 │ +│ 2020-01-03 │ 11 │ +│ 2020-01-03 │ 12 │ +│ 2020-01-03 │ 13 │ +│ 2020-01-03 │ 14 │ +└────────────┴─────┘ +``` + +**2.** کاربران گروه با شناسه منحصر به فرد `uid` با استفاده از `retention` تابع. + +پرسوجو: + +``` sql +SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r +FROM retention_test +WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') +GROUP BY uid +ORDER BY uid ASC +``` + +نتیجه: + +``` text +┌─uid─┬─r───────┐ +│ 0 │ [1,1,1] │ +│ 1 │ [1,1,1] │ +│ 2 │ [1,1,1] │ +│ 3 │ [1,1,1] │ +│ 4 │ [1,1,1] │ +│ 5 │ [0,0,0] │ +│ 6 │ [0,0,0] │ +│ 7 │ [0,0,0] │ +│ 8 │ [0,0,0] │ +│ 9 │ [0,0,0] │ +│ 10 │ [0,0,0] │ +│ 11 │ [0,0,0] │ +│ 12 │ [0,0,0] │ +│ 13 │ [0,0,0] │ +│ 14 │ [0,0,0] │ +└─────┴─────────┘ +``` + +**3.** محاسبه تعداد کل بازدیدکننده داشته است سایت در هر روز. + +پرسوجو: + +``` sql +SELECT + sum(r[1]) AS r1, + sum(r[2]) AS r2, + sum(r[3]) AS r3 +FROM +( + SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r + FROM retention_test + WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') + GROUP BY uid +) +``` + +نتیجه: + +``` text +┌─r1─┬─r2─┬─r3─┐ +│ 5 │ 5 │ 5 │ +└────┴────┴────┘ +``` + +کجا: + +- `r1`- تعداد بازدید کنندگان منحصر به فرد که در طول 2020-01-01 (بازدید `cond1` شرط). +- `r2`- تعداد بازدید کنندگان منحصر به فرد که برای بازدید از سایت در طول یک دوره زمانی خاص بین 2020-01-01 و 2020-01-02 (`cond1` و `cond2` شرایط). +- `r3`- تعداد بازدید کنندگان منحصر به فرد که برای بازدید از سایت در طول یک دوره زمانی خاص بین 2020-01-01 و 2020-01-03 (`cond1` و `cond3` شرایط). + +## uniqUpTo(N)(x) {#uniquptonx} + +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. + +توصیه می شود برای استفاده با شماره های کوچک, تا 10. حداکثر مقدار نفر است 100. + +برای دولت از یک تابع جمع, با استفاده از مقدار حافظه برابر با 1 + نفر \* اندازه یک مقدار بایت. +برای رشته, این فروشگاه یک هش غیر رمزنگاری 8 بایت. به این معنا که محاسبه برای رشته ها تقریبی است. + +این تابع همچنین برای چندین استدلال کار می کند. + +این کار به همان سرعتی که ممکن است, به جز برای موارد زمانی که یک مقدار نفر بزرگ استفاده می شود و تعدادی از ارزش های منحصر به فرد است کمی کمتر از ان. + +مثال طریقه استفاده: + +``` text +Problem: Generate a report that shows only keywords that produced at least 5 unique users. +Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) + +## sumMapFiltered(keys\_to\_keep)(کلید ارزش ها) {#summapfilteredkeys-to-keepkeys-values} + +رفتار مشابه [& سواپ](reference.md#agg_functions-summap) جز این که مجموعه ای از کلید به عنوان یک پارامتر منتقل می شود. این می تواند مفید باشد به خصوص در هنگام کار با یک کارت از کلید های بالا. diff --git a/docs/fa/sql_reference/aggregate_functions/reference.md b/docs/fa/sql_reference/aggregate_functions/reference.md new file mode 100644 index 00000000000..6c76f2caff0 --- /dev/null +++ b/docs/fa/sql_reference/aggregate_functions/reference.md @@ -0,0 +1,1837 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u0645\u0631\u062C\u0639" +--- + +# مرجع عملکرد {#function-reference} + +## شمارش {#agg_function-count} + +شمارش تعداد ردیف یا نه تهی ارزش. + +ClickHouse زیر پشتیبانی می کند syntaxes برای `count`: +- `count(expr)` یا `COUNT(DISTINCT expr)`. +- `count()` یا `COUNT(*)`. این `count()` نحو ClickHouse خاص. + +**پارامترها** + +این تابع می تواند: + +- صفر پارامتر. +- یک [عبارت](../syntax.md#syntax-expressions). + +**مقدار بازگشتی** + +- اگر تابع بدون پارامتر نامیده می شود تعداد ردیف شمارش. +- اگر [عبارت](../syntax.md#syntax-expressions) به تصویب می رسد, سپس تابع شمارش چند بار این عبارت بازگشت تهی نیست. اگر بیان می گرداند [Nullable](../../sql_reference/data_types/nullable.md)- نوع ارزش و سپس نتیجه `count` باقی نمی ماند `Nullable`. تابع بازده 0 اگر بیان بازگشت `NULL` برای تمام ردیف. + +در هر دو مورد نوع مقدار بازگشتی است [UInt64](../../sql_reference/data_types/int_uint.md). + +**اطلاعات دقیق** + +تاتر از `COUNT(DISTINCT ...)` نحو. رفتار این ساخت و ساز بستگی به [ا\_فزونهها](../../operations/settings/settings.md#settings-count_distinct_implementation) تنظیمات. این تعریف می کند که کدام یک از [uniq\*](#agg_function-uniq) توابع برای انجام عملیات استفاده می شود. به طور پیش فرض است [قرارداد اتحادیه](#agg_function-uniqexact) تابع. + +این `SELECT count() FROM table` پرس و جو بهینه سازی شده نیست, چرا که تعداد ورودی در جدول به طور جداگانه ذخیره نمی. این ستون کوچک را از جدول انتخاب می کند و تعداد مقادیر موجود را شمارش می کند. + +**مثالها** + +مثال 1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +مثال 2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +این مثال نشان می دهد که `count(DISTINCT num)` توسط `uniqExact` عملکرد با توجه به `count_distinct_implementation` مقدار تنظیم. + +## هر) {#agg_function-any} + +انتخاب اولین مقدار مواجه می شوند. +پرس و جو را می توان در هر سفارش و حتی در جهت های مختلف در هر زمان اجرا, بنابراین نتیجه این تابع نامشخص است. +برای دریافت یک نتیجه معین, شما می توانید با استفاده از ‘min’ یا ‘max’ تابع به جای ‘any’. + +در بعضی موارد, شما می توانید در جهت اعدام تکیه. این امر در مورد مواردی که انتخاب می شود از یک زیرخاکی است که از سفارش استفاده می کند. + +هنگامی که یک `SELECT` پرسوجو دارد `GROUP BY` بند و یا حداقل یک مجموع عملکرد ClickHouse (در مقایسه با MySQL) مستلزم آن است که تمام عبارات در `SELECT`, `HAVING` و `ORDER BY` بند از کلید و یا از توابع کل محاسبه می شود. به عبارت دیگر, هر ستون انتخاب شده از جدول باید یا در کلید و یا در داخل توابع دانه استفاده می شود. برای دریافت رفتار مانند خروجی زیر, شما می توانید ستون های دیگر در قرار `any` تابع جمع. + +## هشدار داده می شود) {#anyheavyx} + +انتخاب یک مقدار اغلب اتفاق می افتد با استفاده از [بزرگان سنگین](http://www.cs.umd.edu/~samir/498/karp.pdf) الگوریتم. در صورتی که یک مقدار که بیش از در نیمی از موارد در هر یک از موضوعات اعدام پرس و جو رخ می دهد وجود دارد, این مقدار بازگشته است. به طور معمول نتیجه nondeterministic. + +``` sql +anyHeavy(column) +``` + +**نشانوندها** + +- `column` – The column name. + +**مثال** + +نگاهی به [به موقع](../../getting_started/example_datasets/ontime.md) مجموعه داده ها و انتخاب هر مقدار اغلب اتفاق می افتد در `AirlineID` ستون. + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## حداقل صفحه نمایش:) {#anylastx} + +ارزش گذشته مواجه می شوند را انتخاب می کند. +نتیجه این است که فقط به عنوان نامشخص به عنوان برای `any` تابع. + +## گروه بیتاند {#groupbitand} + +اعمال بیتی `AND` برای مجموعه ای از اعداد. + +``` sql +groupBitAnd(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `UInt*` نوع. + +**مقدار بازگشتی** + +ارزش `UInt*` نوع. + +**مثال** + +داده های تست: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +پرسوجو: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +کجا `num` ستون با داده های تست است. + +نتیجه: + +``` text +binary decimal +00000100 = 4 +``` + +## ویرایشگر گروه {#groupbitor} + +اعمال بیتی `OR` برای مجموعه ای از اعداد. + +``` sql +groupBitOr(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `UInt*` نوع. + +**مقدار بازگشتی** + +ارزش `UInt*` نوع. + +**مثال** + +داده های تست: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +پرسوجو: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +کجا `num` ستون با داده های تست است. + +نتیجه: + +``` text +binary decimal +01111101 = 125 +``` + +## گروهبیتکسور {#groupbitxor} + +شامل اعضای اتحادیه اروپا `XOR` برای مجموعه ای از اعداد. + +``` sql +groupBitXor(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `UInt*` نوع. + +**مقدار بازگشتی** + +ارزش `UInt*` نوع. + +**مثال** + +داده های تست: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +پرسوجو: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +کجا `num` ستون با داده های تست است. + +نتیجه: + +``` text +binary decimal +01101000 = 104 +``` + +## نگاشت گروهی {#groupbitmap} + +بیت مپ و یا کل محاسبات از یک unsigned integer ستون بازگشت cardinality از نوع uint64 اگر اضافه کردن پسوند -دولت بازگشت [شی نگاشت بیت](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `UInt*` نوع. + +**مقدار بازگشتی** + +ارزش `UInt64` نوع. + +**مثال** + +داده های تست: + +``` text +UserID +1 +1 +2 +3 +``` + +پرسوجو: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +نتیجه: + +``` text +num +3 +``` + +## کمینه) {#agg_function-min} + +محاسبه حداقل. + +## بیشینه) {#agg_function-max} + +محاسبه حداکثر. + +## هشدار داده می شود) {#agg-function-argmin} + +محاسبه ‘arg’ ارزش برای حداقل ‘val’ ارزش. اگر چندین مقدار مختلف وجود دارد ‘arg’ برای مقادیر حداقل ‘val’ اولین بار از این مقادیر مواجه خروجی است. + +**مثال:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## هشدار داده می شود) {#agg-function-argmax} + +محاسبه ‘arg’ مقدار برای حداکثر ‘val’ ارزش. اگر چندین مقدار مختلف وجود دارد ‘arg’ برای حداکثر مقادیر ‘val’ اولین بار از این مقادیر مواجه خروجی است. + +## جمع) {#agg_function-sum} + +محاسبه مجموع. +فقط برای اعداد کار می کند. + +## ورود به سیستم) {#sumwithoverflowx} + +محاسبه مجموع اعداد, با استفاده از همان نوع داده برای نتیجه به عنوان پارامترهای ورودی. اگر مجموع بیش از حداکثر مقدار برای این نوع داده, تابع یک خطا می گرداند. + +فقط برای اعداد کار می کند. + +## sumMap(key, value) {#agg_functions-summap} + +مجموع ‘value’ تنظیم با توجه به کلید های مشخص شده در ‘key’ صف کردن. +تعداد عناصر در ‘key’ و ‘value’ باید همین کار را برای هر سطر است که بالغ بر شود. +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +مثال: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## سیخ کباب {#skewpop} + +محاسبه [skewness](https://en.wikipedia.org/wiki/Skewness) از یک توالی. + +``` sql +skewPop(expr) +``` + +**پارامترها** + +`expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد. + +**مقدار بازگشتی** + +The skewness of the given distribution. Type — [جسم شناور64](../../sql_reference/data_types/float.md) + +**مثال** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## سیخ {#skewsamp} + +محاسبه [نمونه skewness](https://en.wikipedia.org/wiki/Skewness) از یک توالی. + +این نشان دهنده یک تخمین بی طرفانه از اریب یک متغیر تصادفی اگر ارزش گذشت نمونه خود را تشکیل می دهند. + +``` sql +skewSamp(expr) +``` + +**پارامترها** + +`expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد. + +**مقدار بازگشتی** + +The skewness of the given distribution. Type — [جسم شناور64](../../sql_reference/data_types/float.md). اگر `n <= 1` (`n` اندازه نمونه است), سپس بازده تابع `nan`. + +**مثال** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## کورتپ {#kurtpop} + +محاسبه [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) از یک توالی. + +``` sql +kurtPop(expr) +``` + +**پارامترها** + +`expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد. + +**مقدار بازگشتی** + +The kurtosis of the given distribution. Type — [جسم شناور64](../../sql_reference/data_types/float.md) + +**مثال** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## کردها {#kurtsamp} + +محاسبه [نمونه kurtosis](https://en.wikipedia.org/wiki/Kurtosis) از یک توالی. + +این نشان دهنده یک تخمین بی طرفانه از کورتوز یک متغیر تصادفی اگر ارزش گذشت نمونه خود را تشکیل می دهند. + +``` sql +kurtSamp(expr) +``` + +**پارامترها** + +`expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد. + +**مقدار بازگشتی** + +The kurtosis of the given distribution. Type — [جسم شناور64](../../sql_reference/data_types/float.md). اگر `n <= 1` (`n` اندازه نمونه است) و سپس تابع بازده `nan`. + +**مثال** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## هشدار داده می شود) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` می توانید سری های زمانی مختلف که برچسب زمان نمونه هم ترازی جمع نمی. +این برون یابی خطی بین دو برچسب زمان نمونه و سپس مجموع زمان سری با هم استفاده کنید. + +- `uid` سری زمان شناسه منحصر به فرد است, `UInt64`. +- `timestamp` است نوع درون64 به منظور حمایت میلی ثانیه یا میکروثانیه. +- `value` متریک است. + +تابع گرداند مجموعه ای از تاپل با `(timestamp, aggregated_value)` جفت + +قبل از استفاده از این تابع اطمینان حاصل کنید `timestamp` به ترتیب صعودی است. + +مثال: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +و نتیجه خواهد بود: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## هشدار داده می شود) {#agg-function-timeseriesgroupratesum} + +به طور مشابه timeseriesgroupratesum, timeseriesgroupratesum را محاسبه نرخ سری زمانی و سپس مجموع نرخ با هم. +همچنین, برچسب زمان باید در جهت صعود قبل از استفاده از این تابع باشد. + +با استفاده از این تابع نتیجه مورد بالا خواهد بود: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## میانگین) {#agg_function-avg} + +محاسبه متوسط. +فقط برای اعداد کار می کند. +نتیجه این است که همیشه شناور64. + +## uniq {#agg_function-uniq} + +محاسبه تعداد تقریبی مقادیر مختلف استدلال. + +``` sql +uniq(x[, ...]) +``` + +**پارامترها** + +تابع طول می کشد تعداد متغیر از پارامترهای. پارامترها می توانند باشند `Tuple`, `Array`, `Date`, `DateTime`, `String`, یا انواع عددی. + +**مقدار بازگشتی** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-نوع شماره. + +**پیاده سازی اطلاعات** + +تابع: + +- هش را برای تمام پارامترها در مجموع محاسبه می کند و سپس در محاسبات استفاده می شود. + +- با استفاده از یک تطبیقی نمونه الگوریتم. برای محاسبه دولت تابع با استفاده از یک نمونه از عنصر هش ارزش تا 65536. + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- نتیجه را تعیین می کند (به سفارش پردازش پرس و جو بستگی ندارد). + +ما توصیه می کنیم با استفاده از این تابع تقریبا در تمام حالات. + +**همچنین نگاه کنید** + +- [مخلوط نشده](#agg_function-uniqcombined) +- [نیم قرن 64](#agg_function-uniqcombined64) +- [یونقلل12](#agg_function-uniqhll12) +- [قرارداد اتحادیه](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +محاسبه تعداد تقریبی مقادیر استدلال های مختلف. + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +این `uniqCombined` تابع یک انتخاب خوب برای محاسبه تعداد مقادیر مختلف است. + +**پارامترها** + +تابع طول می کشد تعداد متغیر از پارامترهای. پارامترها می توانند باشند `Tuple`, `Array`, `Date`, `DateTime`, `String`, یا انواع عددی. + +`HLL_precision` پایه-2 لگاریتم تعداد سلول ها در [جمع شدن](https://en.wikipedia.org/wiki/HyperLogLog). اختیاری, شما می توانید تابع به عنوان استفاده `uniqCombined(x[, ...])`. مقدار پیش فرض برای `HLL_precision` است 17, که به طور موثر 96 کیلوبایت فضا (2^17 سلول ها, 6 بیت در هر). + +**مقدار بازگشتی** + +- یک عدد [UInt64](../../sql_reference/data_types/int_uint.md)- نوع شماره . + +**پیاده سازی اطلاعات** + +تابع: + +- محاسبه هش (هش 64 بیتی برای `String` و در غیر این صورت 32 بیتی) برای تمام پارامترها در مجموع و سپس در محاسبات استفاده می شود. + +- با استفاده از ترکیبی از سه الگوریتم: مجموعه, جدول هش, و جمع شدن با جدول تصحیح خطا. + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- نتیجه را تعیین می کند (به سفارش پردازش پرس و جو بستگی ندارد). + +!!! note "یادداشت" + از هش 32 بیتی برای غیر استفاده می کند-`String` نوع, نتیجه خطا بسیار بالا برای کاریت به طور قابل توجهی بزرگتر از اند `UINT_MAX` (خطا به سرعت پس از چند ده میلیارد ارزش متمایز افزایش خواهد یافت), از این رو در این مورد شما باید استفاده کنید [نیم قرن 64](#agg_function-uniqcombined64) + +در مقایسه با [uniq](#agg_function-uniq) عملکرد `uniqCombined`: + +- مصرف چندین بار حافظه کمتر. +- محاسبه با دقت چند بار بالاتر است. +- معمولا عملکرد کمی پایین تر است. در برخی از حالات, `uniqCombined` می توانید بهتر از انجام `uniq` برای مثال با توزیع نمایش داده شد که انتقال تعداد زیادی از جمع متحده بر روی شبکه. + +**همچنین نگاه کنید** + +- [دانشگاه](#agg_function-uniq) +- [نیم قرن 64](#agg_function-uniqcombined64) +- [یونقلل12](#agg_function-uniqhll12) +- [قرارداد اتحادیه](#agg_function-uniqexact) + +## نیم قرن 64 {#agg_function-uniqcombined64} + +مثل [مخلوط نشده](#agg_function-uniqcombined), اما با استفاده از هش 64 بیتی برای تمام انواع داده ها. + +## یونقلل12 {#agg_function-uniqhll12} + +محاسبه تعداد تقریبی مقادیر استدلال های مختلف, با استفاده از [جمع شدن](https://en.wikipedia.org/wiki/HyperLogLog) الگوریتم. + +``` sql +uniqHLL12(x[, ...]) +``` + +**پارامترها** + +این تابع یک متغیر تعدادی از پارامترهای. پارامترهای می تواند `Tuple`, `Array`, `Date`, `DateTime`, `String`, یا انواع عددی. + +**مقدار بازگشتی** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-نوع شماره. + +**پیاده سازی اطلاعات** + +تابع: + +- هش را برای تمام پارامترها در مجموع محاسبه می کند و سپس در محاسبات استفاده می شود. + +- با استفاده از الگوریتم جمع شدن تقریبی تعداد مقادیر استدلال های مختلف. + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- نتیجه تعیین شده را فراهم می کند (به سفارش پردازش پرس و جو بستگی ندارد). + +ما توصیه نمی کنیم با استفاده از این تابع. در اغلب موارد از [دانشگاه](#agg_function-uniq) یا [مخلوط نشده](#agg_function-uniqcombined) تابع. + +**همچنین نگاه کنید** + +- [دانشگاه](#agg_function-uniq) +- [مخلوط نشده](#agg_function-uniqcombined) +- [قرارداد اتحادیه](#agg_function-uniqexact) + +## قرارداد اتحادیه {#agg_function-uniqexact} + +محاسبه تعداد دقیق ارزش استدلال های مختلف. + +``` sql +uniqExact(x[, ...]) +``` + +استفاده از `uniqExact` تابع اگر شما کاملا نیاز به یک نتیجه دقیق. در غیر این صورت استفاده از [uniq](#agg_function-uniq) تابع. + +این `uniqExact` تابع با استفاده از حافظه بیش از `uniq`, چرا که اندازه دولت رشد گشوده است به عنوان تعدادی از ارزش های مختلف را افزایش می دهد. + +**پارامترها** + +تابع طول می کشد تعداد متغیر از پارامترهای. پارامترها می توانند باشند `Tuple`, `Array`, `Date`, `DateTime`, `String`, یا انواع عددی. + +**همچنین نگاه کنید به** + +- [uniq](#agg_function-uniq) +- [مخلوط نشده](#agg_function-uniqcombined) +- [یونقلل12](#agg_function-uniqhll12) + +## groupArray(x) groupArray(max\_size)(x) {#agg_function-grouparray} + +مجموعه ای از مقادیر استدلال را ایجاد می کند. +مقادیر را می توان به ترتیب در هر (نامعین) اضافه کرد. + +نسخه دوم (با `max_size` پارامتر) اندازه مجموعه حاصل را محدود می کند `max_size` عناصر. +به عنوان مثال, `groupArray (1) (x)` معادل است `[any (x)]`. + +در بعضی موارد, شما هنوز هم می توانید در جهت اعدام تکیه. این امر در مورد مواردی که `SELECT` همراه از یک خرده فروشی که با استفاده از `ORDER BY`. + +## ارزش موقعیت) {#grouparrayinsertatvalue-position} + +مقدار را به مجموعه ای در موقعیت مشخص شده وارد می کند. + +!!! note "یادداشت" + این تابع با استفاده از موقعیت های مبتنی بر صفر, بر خلاف موقعیت های معمولی مبتنی بر یک برای فرود میدان. + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +پارامترهای اختیاری: + +- مقدار پیش فرض برای جایگزینی در موقعیت های خالی. +- طول مجموعه حاصل. این اجازه می دهد تا شما را به دریافت مجموعه ای از همان اندازه برای تمام کلید های کل. هنگام استفاده از این پارامتر, مقدار پیش فرض باید مشخص شود. + +## هشدار داده می شود {#agg_function-grouparraymovingsum} + +محاسبه مجموع در حال حرکت از ارزش های ورودی. + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +این تابع می تواند اندازه پنجره به عنوان یک پارامتر را. اگر سمت چپ نامشخص, تابع طول می کشد اندازه پنجره به تعداد ردیف در ستون برابر. + +**پارامترها** + +- `numbers_for_summing` — [عبارت](../syntax.md#syntax-expressions) در نتیجه یک مقدار نوع داده عددی. +- `window_size` — Size of the calculation window. + +**مقادیر بازگشتی** + +- مجموعه ای از همان اندازه و نوع به عنوان داده های ورودی. + +**مثال** + +جدول نمونه: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +نمایش داده شد: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## گروهاریموینگاوگ {#agg_function-grouparraymovingavg} + +محاسبه میانگین متحرک از ارزش های ورودی. + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +این تابع می تواند اندازه پنجره به عنوان یک پارامتر را. اگر سمت چپ نامشخص, تابع طول می کشد اندازه پنجره به تعداد ردیف در ستون برابر. + +**پارامترها** + +- `numbers_for_summing` — [عبارت](../syntax.md#syntax-expressions) در نتیجه یک مقدار نوع داده عددی. +- `window_size` — Size of the calculation window. + +**مقادیر بازگشتی** + +- مجموعه ای از همان اندازه و نوع به عنوان داده های ورودی. + +تابع استفاده می کند [گرد کردن به سمت صفر](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). این کوتاه رقم اعشار ناچیز برای نوع داده و در نتیجه. + +**مثال** + +جدول نمونه `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +نمایش داده شد: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## groupUniqArray(x) groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} + +مجموعه ای از مقادیر مختلف استدلال ایجاد می کند. مصرف حافظه همان است که برای `uniqExact` تابع. + +نسخه دوم (با `max_size` پارامتر) اندازه مجموعه حاصل را محدود می کند `max_size` عناصر. +به عنوان مثال, `groupUniqArray(1)(x)` معادل است `[any(x)]`. + +## quantile {#quantile} + +محاسبه تقریبی [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی. + +این تابع اعمال می شود [نمونه برداری مخزن](https://en.wikipedia.org/wiki/Reservoir_sampling) با اندازه مخزن تا 8192 و یک مولد عدد تصادفی برای نمونه برداری. نتیجه غیر قطعی است. برای دریافت یک کمی دقیق, استفاده از [کوانتوم](#quantileexact) تابع. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantile(level)(expr) +``` + +نام مستعار: `median`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +**مقدار بازگشتی** + +- کمی تقریبی از سطح مشخص شده است. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +جدول ورودی: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +پرسوجو: + +``` sql +SELECT quantile(val) FROM t +``` + +نتیجه: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**همچنین نگاه کنید به** + +- [میانه](#median) +- [quantiles](#quantiles) + +## نامعینیهای کوانتی {#quantiledeterministic} + +محاسبه تقریبی [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی. + +این تابع اعمال می شود [نمونه برداری مخزن](https://en.wikipedia.org/wiki/Reservoir_sampling) با اندازه مخزن تا 8192 و الگوریتم قطعی نمونه گیری. نتیجه قطعی است. برای دریافت یک کمی دقیق, استفاده از [کوانتوم](#quantileexact) تابع. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +نام مستعار: `medianDeterministic`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**مقدار بازگشتی** + +- کمی تقریبی از سطح مشخص شده است. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +جدول ورودی: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +پرسوجو: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +نتیجه: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**همچنین نگاه کنید** + +- [میانه](#median) +- [quantiles](#quantiles) + +## کوانتوم {#quantileexact} + +دقیقا محاسبه می کند [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` حافظه, جایی که `n` تعدادی از ارزش هایی که تصویب شد. اما, برای تعداد کمی از ارزش, تابع بسیار موثر است. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileExact(level)(expr) +``` + +نام مستعار: `medianExact`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +**مقدار بازگشتی** + +- Quantile از سطح مشخص شده. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +پرسوجو: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +نتیجه: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**همچنین نگاه کنید** + +- [میانه](#median) +- [quantiles](#quantiles) + +## نمایش سایت {#quantileexactweighted} + +دقیقا محاسبه می کند [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی, با در نظر گرفتن وزن هر عنصر. + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [کوانتوم](#quantileexact). شما می توانید این تابع به جای استفاده از `quantileExact` و وزن 1 را مشخص کنید. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +نام مستعار: `medianExactWeighted`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**مقدار بازگشتی** + +- Quantile از سطح مشخص شده. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +جدول ورودی: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +پرسوجو: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +نتیجه: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [میانه](#median) +- [quantiles](#quantiles) + +## زمان کمی {#quantiletiming} + +با دقت تعیین شده محاسبه می شود [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی. + +نتیجه قطعی است(به سفارش پردازش پرس و جو بستگی ندارد). این تابع برای کار با توالی هایی که توزیع هایی مانند بارگذاری صفحات وب بار یا زمان پاسخ باطن را توصیف می کنند بهینه شده است. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileTiming(level)(expr) +``` + +نام مستعار: `medianTiming`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). + +- `expr` — [عبارت](../syntax.md#syntax-expressions) بیش از یک مقادیر ستون بازگشت [شناور\*](../../sql_reference/data_types/float.md)-نوع شماره. + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**دقت** + +محاسبه دقیق است اگر: + +- تعداد کل مقادیر 5670 تجاوز نمی کند. +- تعداد کل مقادیر بیش از 5670, اما زمان بارگذاری صفحه کمتر از است 1024خانم. + +در غیر این صورت, نتیجه محاسبه به نزدیکترین چند از گرد 16 خانم. + +!!! note "یادداشت" + برای محاسبه زمان بارگذاری صفحه quantiles این تابع این است که موثر تر و دقیق تر از [quantile](#quantile). + +**مقدار بازگشتی** + +- Quantile از سطح مشخص شده. + +نوع: `Float32`. + +!!! note "یادداشت" + اگر هیچ ارزش به تابع منتقل می شود (هنگام استفاده از `quantileTimingIf`), [نان](../../sql_reference/data_types/float.md#data_type-float-nan-inf) بازگشته است. هدف از این است که افتراق این موارد از مواردی که منجر به صفر. ببینید [ORDER BY](../statements/select.md#select-order-by) برای یادداشت ها در مرتب سازی `NaN` ارزشهای خبری عبارتند از: + +**مثال** + +جدول ورودی: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +پرسوجو: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +نتیجه: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [میانه](#median) +- [quantiles](#quantiles) + +## زمان کمی {#quantiletimingweighted} + +با دقت تعیین شده محاسبه می شود [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی با توجه به وزن هر یک از اعضای دنباله. + +نتیجه قطعی است(به سفارش پردازش پرس و جو بستگی ندارد). این تابع برای کار با توالی هایی که توزیع هایی مانند بارگذاری صفحات وب بار یا زمان پاسخ باطن را توصیف می کنند بهینه شده است. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +نام مستعار: `medianTimingWeighted`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). + +- `expr` — [عبارت](../syntax.md#syntax-expressions) بیش از یک مقادیر ستون بازگشت [شناور\*](../../sql_reference/data_types/float.md)- نوع شماره . + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**دقت** + +محاسبه دقیق است اگر: + +- تعداد کل مقادیر 5670 تجاوز نمی کند. +- تعداد کل مقادیر بیش از 5670, اما زمان بارگذاری صفحه کمتر از است 1024خانم. + +در غیر این صورت, نتیجه محاسبه به نزدیکترین چند از گرد 16 خانم. + +!!! note "یادداشت" + برای محاسبه زمان بارگذاری صفحه quantiles این تابع این است که موثر تر و دقیق تر از [quantile](#quantile). + +**مقدار بازگشتی** + +- Quantile از سطح مشخص شده. + +نوع: `Float32`. + +!!! note "یادداشت" + اگر هیچ ارزش به تابع منتقل می شود (هنگام استفاده از `quantileTimingIf`), [نان](../../sql_reference/data_types/float.md#data_type-float-nan-inf) بازگشته است. هدف از این است که افتراق این موارد از مواردی که منجر به صفر. ببینید [ORDER BY](../statements/select.md#select-order-by) برای یادداشت ها در مرتب سازی `NaN` ارزشهای خبری عبارتند از: + +**مثال** + +جدول ورودی: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +پرسوجو: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +نتیجه: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**همچنین نگاه کنید** + +- [میانه](#median) +- [quantiles](#quantiles) + +## مقدار کمی {#quantiletdigest} + +محاسبه تقریبی [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی با استفاده از [خلاصه](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) الگوریتم. + +حداکثر خطا است 1%. مصرف حافظه است `log(n)` کجا `n` تعدادی از ارزش است. نتیجه بستگی دارد منظور از در حال اجرا پرس و جو و nondeterministic. + +عملکرد تابع کمتر از عملکرد است [quantile](#quantile) یا [زمان کمی](#quantiletiming). از لحاظ نسبت اندازه دولت به دقت, این تابع بسیار بهتر از `quantile`. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileTDigest(level)(expr) +``` + +نام مستعار: `medianTDigest`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +**مقدار بازگشتی** + +- کمی تقریبی از سطح مشخص شده است. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +پرسوجو: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +نتیجه: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [میانه](#median) +- [quantiles](#quantiles) + +## نمایش سایت {#quantiletdigestweighted} + +محاسبه تقریبی [quantile](https://en.wikipedia.org/wiki/Quantile) از یک توالی داده های عددی با استفاده از [خلاصه](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) الگوریتم. تابع طول می کشد را به حساب وزن هر یک از اعضای دنباله. حداکثر خطا است 1%. مصرف حافظه است `log(n)` کجا `n` تعدادی از ارزش است. + +عملکرد تابع کمتر از عملکرد است [quantile](#quantile) یا [زمان کمی](#quantiletiming). از لحاظ نسبت اندازه دولت به دقت, این تابع بسیار بهتر از `quantile`. + +نتیجه بستگی دارد منظور از در حال اجرا پرس و جو و nondeterministic. + +هنگام استفاده از چندین `quantile*` توابع با سطوح مختلف در پرس و جو, کشورهای داخلی در ترکیب نیست (به این معنا که, پرس و جو کار می کند موثر کمتر از می تواند). در این مورد از [quantiles](#quantiles) تابع. + +**نحو** + +``` sql +quantileTDigest(level)(expr) +``` + +نام مستعار: `medianTDigest`. + +**پارامترها** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` مقدار در محدوده `[0.01, 0.99]`. مقدار پیش فرض: 0.5. در `level=0.5` تابع محاسبه می کند [میانه](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [انواع داده ها](../../sql_reference/data_types/index.md#data_types), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**مقدار بازگشتی** + +- کمی تقریبی از سطح مشخص شده است. + +نوع: + +- [جسم شناور64](../../sql_reference/data_types/float.md) برای ورودی نوع داده عددی. +- [تاریخ](../../sql_reference/data_types/date.md) اگر مقادیر ورودی `Date` نوع. +- [DateTime](../../sql_reference/data_types/datetime.md) اگر مقادیر ورودی `DateTime` نوع. + +**مثال** + +پرسوجو: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +نتیجه: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [میانه](#median) +- [quantiles](#quantiles) + +## میانه {#median} + +این `median*` توابع نام مستعار برای مربوطه `quantile*` توابع. متوسط یک نمونه داده عددی را محاسبه می کنند. + +توابع: + +- `median` — Alias for [quantile](#quantile). +- `medianDeterministic` — Alias for [نامعینیهای کوانتی](#quantiledeterministic). +- `medianExact` — Alias for [کوانتوم](#quantileexact). +- `medianExactWeighted` — Alias for [نمایش سایت](#quantileexactweighted). +- `medianTiming` — Alias for [زمان کمی](#quantiletiming). +- `medianTimingWeighted` — Alias for [زمان کمی](#quantiletimingweighted). +- `medianTDigest` — Alias for [مقدار کمی](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [نمایش سایت](#quantiletdigestweighted). + +**مثال** + +جدول ورودی: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +پرسوجو: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +نتیجه: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +تمام quantile توابع نیز مربوط quantiles توابع: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. این توابع محاسبه تمام کوانتوم از سطوح ذکر شده در یک پاس, و بازگشت مجموعه ای از مقادیر حاصل. + +## اطلاعات دقیق) {#varsampx} + +محاسبه مقدار `Σ((x - x̅)^2) / (n - 1)` کجا `n` اندازه نمونه است و `x̅`مقدار متوسط است `x`. + +این نشان دهنده یک تخمین بی طرفانه از واریانس یک متغیر تصادفی اگر ارزش گذشت نمونه خود را تشکیل می دهند. + +بازگشت `Float64`. چه زمانی `n <= 1`, بازگشت `+∞`. + +## هشدار داده می شود) {#varpopx} + +محاسبه مقدار `Σ((x - x̅)^2) / n` کجا `n` اندازه نمونه است و `x̅`مقدار متوسط است `x`. + +به عبارت دیگر, پراکندگی برای مجموعه ای از ارزش. بازگشت `Float64`. + +## اطلاعات دقیق) {#stddevsampx} + +نتیجه برابر با ریشه مربع است `varSamp(x)`. + +## اطلاعات دقیق) {#stddevpopx} + +نتیجه برابر با ریشه مربع است `varPop(x)`. + +## topK(N)(x) {#topknx} + +بازگرداندن مجموعه ای از مقادیر تقریبا شایع ترین در ستون مشخص. مجموعه حاصل به ترتیب نزولی فرکانس تقریبی ارزش ها (نه با ارزش های خود) طبقه بندی شده اند. + +پیاده سازی [فیلتر صرفه جویی در فضا](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) الگوریتم برای تجزیه و تحلیل توپک, بر اساس الگوریتم کاهش و ترکیب از [صرفه جویی در فضای موازی](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +این تابع یک نتیجه تضمین شده را فراهم نمی کند. در شرایط خاص, اشتباهات ممکن است رخ دهد و ممکن است مقادیر مکرر که مقادیر شایع ترین نیست بازگشت. + +ما توصیه می کنیم با استفاده از `N < 10` عملکرد با بزرگ کاهش می یابد `N` ارزشهای خبری عبارتند از: حداکثر مقدار `N = 65536`. + +**پارامترها** + +- ‘N’ است تعدادی از عناصر به بازگشت. + +اگر پارامتر حذف شده است, مقدار پیش فرض 10 استفاده شده است. + +**نشانوندها** + +- ' x ' – The value to calculate frequency. + +**مثال** + +نگاهی به [به موقع](../../getting_started/example_datasets/ontime.md) مجموعه داده ها و انتخاب سه ارزش اغلب اتفاق می افتد در `AirlineID` ستون. + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## کشتی کج {#topkweighted} + +مشابه به `topK` اما طول می کشد یک استدلال اضافی از نوع صحیح - `weight`. هر مقدار به حساب `weight` بار برای محاسبه فرکانس. + +**نحو** + +``` sql +topKWeighted(N)(x, weight) +``` + +**پارامترها** + +- `N` — The number of elements to return. + +**نشانوندها** + +- `x` – The value. +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقدار بازگشتی** + +بازگرداندن مجموعه ای از مقادیر با حداکثر مجموع تقریبی وزن. + +**مثال** + +پرسوجو: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +نتیجه: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## هشدار داده می شود) {#covarsampx-y} + +محاسبه ارزش `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +را برمی گرداند شناور64. زمانی که `n <= 1`, returns +∞. + +## نمایش سایت) {#covarpopx-y} + +محاسبه ارزش `Σ((x - x̅)(y - y̅)) / n`. + +## هشدار داده می شود) {#corrx-y} + +محاسبه ضریب همبستگی پیرسون: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## طبقه بندی فرمول بندی {#categoricalinformationvalue} + +محاسبه ارزش `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` برای هر دسته. + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +نتیجه نشان می دهد که چگونه یک ویژگی گسسته (قطعی) `[category1, category2, ...]` کمک به یک مدل یادگیری که پیش بینی ارزش `tag`. + +## ساده سازی مقررات {#simplelinearregression} + +انجام ساده (unidimensional) رگرسیون خطی. + +``` sql +simpleLinearRegression(x, y) +``` + +پارامترها: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +مقادیر بازگشتی: + +ثابتها `(a, b)` از خط نتیجه `y = a*x + b`. + +**مثالها** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## تنظیم مقررات {#agg_functions-stochasticlinearregression} + +این تابع پیاده سازی رگرسیون خطی تصادفی. این پشتیبانی از پارامترهای سفارشی برای نرخ یادگیری, ل2 ضریب منظم, اندازه مینی دسته ای و دارای چند روش برای به روز رسانی وزن ([ادام](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (به طور پیش فرض استفاده می شود), [اطلاعات دقیق](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [شتاب](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [نستروف](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### پارامترها {#agg_functions-stochasticlinearregression-parameters} + +4 پارامتر قابل تنظیم وجود دارد. به ترتیب تابع منتقل می شود اما بدون نیاز به تصویب تمام مقادیر چهار پیش فرض استفاده می شود با این حال مدل خوب مورد نیاز برخی از تنظیم پارامتر وجود دارد. + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` ضریب در طول گام است, زمانی که گام گرادیان تبار انجام شده است. نرخ یادگیری بیش از حد بزرگ ممکن است وزن بی نهایت از مدل شود. پیشفرض `0.00001`. +2. `l2 regularization coefficient` که ممکن است کمک به جلوگیری از سوراخ سوراخ شدن بیش از حد. پیشفرض `0.1`. +3. `mini-batch size` مجموعه تعدادی از عناصر که شیب محاسبه خواهد شد و خلاصه به انجام یک مرحله از گرادیان تبار. تبار تصادفی خالص با استفاده از یک عنصر, با این حال داشتن دسته های کوچک(در باره 10 عناصر) را گام شیب پایدار تر. پیشفرض `15`. +4. `method for updating weights` اونا: `Adam` (به طور پیش فرض), `SGD`, `Momentum`, `Nesterov`. `Momentum` و `Nesterov` نیاز به کمی بیشتر محاسبات و حافظه, اما آنها به اتفاق مفید از نظر سرعت convergance و ثبات stochastic gradient روش. + +### استفاده {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` در دو مرحله استفاده می شود: اتصالات مدل و پیش بینی بر روی داده های جدید. به منظور متناسب با مدل و صرفه جویی در دولت خود را برای استفاده های بعدی استفاده می کنیم `-State` ترکیب کننده, که اساسا موجب صرفه جویی در دولت (وزن مدل, و غیره). +برای پیش بینی ما با استفاده از تابع [ارزیابی](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod), که طول می کشد یک دولت به عنوان یک استدلال و همچنین ویژگی های به پیش بینی در. + + + +**1.** اتصالات + +چنین پرس و جو ممکن است مورد استفاده قرار گیرد. + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +در اینجا ما همچنین نیاز به وارد کردن داده ها به `train_data` جدول تعداد پارامترهای ثابت نیست, این تنها در تعدادی از استدلال بستگی دارد, گذشت به `linearRegressionState`. همه باید مقادیر عددی باشد. +توجه داشته باشید که ستون با ارزش هدف(که ما می خواهم برای یادگیری به پیش بینی) به عنوان اولین استدلال قرار داده شده است. + +**2.** پیش بینی + +پس از ذخیره یک دولت به جدول, ما ممکن است چندین بار برای پیش بینی استفاده, و یا حتی با کشورهای دیگر ادغام و ایجاد مدل های جدید و حتی بهتر. + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +پرس و جو یک ستون از مقادیر پیش بینی شده بازگشت. توجه داشته باشید که استدلال اول `evalMLMethod` هست `AggregateFunctionState` هدف, بعدی ستون از ویژگی های هستند. + +`test_data` یک جدول مانند `train_data` اما ممکن است حاوی ارزش هدف نیست. + +### یادداشتها {#agg_functions-stochasticlinearregression-notes} + +1. برای ادغام دو مدل کاربر ممکن است چنین پرس و جو ایجاد کنید: + `sql SELECT state1 + state2 FROM your_models` + کجا `your_models` جدول شامل هر دو مدل. این پرس و جو جدید باز خواهد گشت `AggregateFunctionState` اعتراض. + +2. کاربر ممکن است وزن مدل ایجاد شده برای اهداف خود را بدون صرفه جویی در مدل اگر هیچ واکشی `-State` ترکیب استفاده شده است. + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + چنین پرس و جو خواهد مدل مناسب و بازگشت وزن خود را - برای اولین بار وزن هستند, که به پارامترهای مدل مطابقت, یکی از گذشته تعصب است. بنابراین در مثال بالا پرس و جو یک ستون با 3 مقدار بازگشت. + +**همچنین نگاه کنید** + +- [سرکوب مقررات عمومی](#agg_functions-stochasticlogisticregression) +- [تفاوت رگرسیون خطی و لجستیک](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## سرکوب مقررات عمومی {#agg_functions-stochasticlogisticregression} + +این تابع پیاده سازی رگرسیون لجستیک تصادفی. این را می توان برای مشکل طبقه بندی دودویی استفاده, پشتیبانی از پارامترهای سفارشی به عنوان مقررات زدایی و کار به همان شیوه. + +### پارامترها {#agg_functions-stochasticlogisticregression-parameters} + +پارامترها دقیقا مشابه در تنظیم مقررات است: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +برای اطلاعات بیشتر نگاه کنید به [پارامترها](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. اتصالات + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. پیش بینی + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**همچنین نگاه کنید** + +- [تنظیم مقررات](#agg_functions-stochasticlinearregression) +- [تفاوت بین رگرسیون خطی و لجستیک.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## گروهبیتمافند {#groupbitmapand} + +محاسبات و یک بیت مپ ستون بازگشت cardinality از نوع uint64 اگر اضافه کردن پسوند -دولت بازگشت [شی نگاشت بیت](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` نوع. + +**مقدار بازگشتی** + +ارزش `UInt64` نوع. + +**مثال** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## گروهبیتمافور {#groupbitmapor} + +محاسبات و یا یک بیت مپ ستون بازگشت cardinality از نوع uint64 اگر اضافه کردن پسوند -دولت بازگشت [شی نگاشت بیت](../../sql_reference/functions/bitmap_functions.md). این معادل است `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` نوع. + +**مقدار بازگشتی** + +ارزش `UInt64` نوع. + +**مثال** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## گروهبیتمافکر {#groupbitmapxor} + +محاسبات xor یک بیت مپ ستون بازگشت cardinality از نوع uint64 اگر اضافه کردن پسوند -دولت بازگشت [شی نگاشت بیت](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**پارامترها** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` نوع. + +**مقدار بازگشتی** + +ارزش `UInt64` نوع. + +**مثال** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/fa/sql_reference/data_types/aggregatefunction.md b/docs/fa/sql_reference/data_types/aggregatefunction.md new file mode 100644 index 00000000000..f6430b6658c --- /dev/null +++ b/docs/fa/sql_reference/data_types/aggregatefunction.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: "\u06A9\u0627\u0631\u06A9\u0631\u062F(\u0646\u0627\u0645 \u0648 \u0646\u0627\ + \u0645 \u062E\u0627\u0646\u0648\u0627\u062F\u06AF\u06CC..)" +--- + +# AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} + +Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [مشاهده محقق](../../sql_reference/statements/select.md#create-view). راه معمول برای تولید یک دولت تابع جمع است با فراخوانی تابع جمع با `-State` پسوند. برای دریافت نتیجه نهایی از تجمع در اینده, شما باید همان تابع کل با استفاده از `-Merge`پسوند. + +`AggregateFunction` — parametric data type. + +**پارامترها** + +- نام تابع جمع. + + If the function is parametric, specify its parameters too. + +- انواع استدلال تابع جمع. + +**مثال** + +``` sql +CREATE TABLE t +( + column1 AggregateFunction(uniq, UInt64), + column2 AggregateFunction(anyIf, String, UInt8), + column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) +) ENGINE = ... +``` + +[دانشگاه](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq). ([هر](../../sql_reference/aggregate_functions/reference.md#agg_function-any)+[اگر](../../sql_reference/aggregate_functions/combinators.md#agg-functions-combinator-if)) و [quantiles](../../sql_reference/aggregate_functions/reference.md) توابع مجموع پشتیبانی در خانه کلیک می باشد. + +## استفاده {#usage} + +### درج داده {#data-insertion} + +برای وارد کردن داده ها استفاده کنید `INSERT SELECT` با مجموع `-State`- توابع . + +**نمونه تابع** + +``` sql +uniqState(UserID) +quantilesState(0.5, 0.9)(SendTiming) +``` + +در مقایسه با توابع مربوطه `uniq` و `quantiles`, `-State`- توابع بازگشت به دولت, به جای ارزش نهایی. به عبارت دیگر ارزش بازگشت `AggregateFunction` نوع. + +در نتایج `SELECT` پرس و جو, ارزش `AggregateFunction` نوع اجرای خاص نمایندگی دودویی برای همه فرمت های خروجی کلیک کنید. اگر کمپرسی داده ها به, مثلا, `TabSeparated` قالب با `SELECT` پرس و جو, سپس این روگرفت را می توان با استفاده از لود `INSERT` پرس و جو. + +### گزینش داده {#data-selection} + +هنگام انتخاب داده ها از `AggregatingMergeTree` جدول استفاده کنید `GROUP BY` بند و همان مجموع توابع به عنوان زمانی که قرار دادن داده اما با استفاده از `-Merge`پسوند. + +یک تابع جمع با `-Merge` پسوند مجموعه ای از ایالت ها را ترکیب می کند و نتیجه تجمع کامل داده ها را باز می گرداند. + +مثلا, دو نمایش داده شد زیر بازگشت به همان نتیجه: + +``` sql +SELECT uniq(UserID) FROM table + +SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) +``` + +## مثال طریقه استفاده {#usage-example} + +ببینید [ریزدانه](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) موتور باشرکت. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/fa/sql_reference/data_types/array.md b/docs/fa/sql_reference/data_types/array.md new file mode 100644 index 00000000000..322e806b8ba --- /dev/null +++ b/docs/fa/sql_reference/data_types/array.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "& \u062A\u0648\u0631\u06CC)" +--- + +# & توری) {#data-type-array} + +مجموعه ای از `T`- نوع اقلام است. `T` می تواند هر نوع داده, از جمله مجموعه. + +## ایجاد یک مجموعه {#creating-an-array} + +شما می توانید یک تابع برای ایجاد مجموعه ای استفاده کنید: + +``` sql +array(T) +``` + +شما همچنین می توانید براکت مربع استفاده کنید. + +``` sql +[] +``` + +نمونه ای از ایجاد یک مجموعه: + +``` sql +SELECT array(1, 2) AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName(array(1, 2))─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴─────────────────────────┘ +``` + +``` sql +SELECT [1, 2] AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName([1, 2])─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴────────────────────┘ +``` + +## کار با انواع داده ها {#working-with-data-types} + +در هنگام ایجاد مجموعه ای در پرواز, خانه رعیتی به طور خودکار نوع استدلال به عنوان باریک ترین نوع داده است که می تواند تمام استدلال ذکر شده ذخیره تعریف. اگر وجود دارد [Nullable](nullable.md#data_type-nullable) یا تحت اللفظی [NULL](../../sql_reference/syntax.md#null-literal) ارزش, نوع عنصر مجموعه ای نیز می شود [Nullable](nullable.md). + +اگر فاحشه خانه می تواند نوع داده را تعیین نمی کند, این تولید یک استثنا. مثلا, این اتفاق می افتد زمانی که تلاش برای ایجاد مجموعه ای با رشته ها و اعداد به طور همزمان (`SELECT array(1, 'a')`). + +نمونه هایی از تشخیص نوع داده ها به صورت خودکار: + +``` sql +SELECT array(1, 2, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ +│ [1,2,NULL] │ Array(Nullable(UInt8)) │ +└────────────┴───────────────────────────────┘ +``` + +اگر شما سعی می کنید برای ایجاد یک آرایه از ناسازگار انواع داده ها clickhouse پرتاب یک استثنا: + +``` sql +SELECT array(1, 'a') +``` + +``` text +Received exception from server (version 1.1.54388): +Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/array/) diff --git a/docs/fa/sql_reference/data_types/boolean.md b/docs/fa/sql_reference/data_types/boolean.md new file mode 100644 index 00000000000..a906de86943 --- /dev/null +++ b/docs/fa/sql_reference/data_types/boolean.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u0628\u0648\u0644\u06CC" +--- + +# مقادیر بولی {#boolean-values} + +هیچ نوع جداگانه برای مقادیر بولی وجود دارد. استفاده از نوع اوینت8, محدود به ارزش 0 یا 1. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/boolean/) diff --git a/docs/fa/sql_reference/data_types/date.md b/docs/fa/sql_reference/data_types/date.md new file mode 100644 index 00000000000..2a9de86cfb5 --- /dev/null +++ b/docs/fa/sql_reference/data_types/date.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u062A\u0627\u0631\u06CC\u062E" +--- + +# تاریخ {#date} + +قرار ذخیره شده در دو بایت به عنوان تعداد روز از 1970-01-01 (بدون علامت). اجازه می دهد تا ذخیره سازی مقادیر از درست بعد از شروع عصر یونیکس به حد بالایی تعریف شده توسط ثابت در مرحله تدوین (در حال حاضر, این است تا سال 2106, اما نهایی سال به طور کامل پشتیبانی شده است 2105). +حداقل مقدار خروجی به عنوان 0000-00-00. + +ارزش تاریخ بدون منطقه زمانی ذخیره می شود. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/date/) diff --git a/docs/fa/sql_reference/data_types/datetime.md b/docs/fa/sql_reference/data_types/datetime.md new file mode 100644 index 00000000000..d2620b35038 --- /dev/null +++ b/docs/fa/sql_reference/data_types/datetime.md @@ -0,0 +1,129 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: DateTime +--- + +# تاریخ ساعت {#data_type-datetime} + +اجازه می دهد تا برای ذخیره یک لحظه در زمان, است که می تواند به عنوان یک تاریخ تقویم و یک زمان از یک روز بیان. + +نحو: + +``` sql +DateTime([timezone]) +``` + +محدوده پشتیبانی شده از ارزش ها: \[1970-01-01 00:00:00, 2105-12-31 23:59:59\]. + +حل: 1 ثانیه. + +## استفاده از سخنان {#usage-remarks} + +نقطه در زمان به عنوان یک ذخیره می شود [برچسب زمان یونیکس](https://en.wikipedia.org/wiki/Unix_time), صرف نظر از منطقه زمانی و یا صرفه جویی در زمان نور روز. علاوه بر این `DateTime` نوع می توانید منطقه زمانی است که همین کار را برای کل ستون ذخیره, که تحت تاثیر قرار چگونه ارزش های `DateTime` مقادیر نوع در قالب متن نمایش داده می شود و چگونه مقادیر مشخص شده به عنوان رشته تجزیه می شوند (‘2020-01-01 05:00:01’). منطقه زمانی در ردیف جدول ذخیره نمی شود (و یا در نتیجه), اما در ابرداده ستون ذخیره می شود. +لیستی از مناطق زمانی پشتیبانی شده را می توان در [اانا پایگاه منطقه زمانی](https://www.iana.org/time-zones). +این `tzdata` بسته حاوی [اانا پایگاه منطقه زمانی](https://www.iana.org/time-zones), باید در سیستم نصب. استفاده از `timedatectl list-timezones` فرمان به لیست جغرافیایی شناخته شده توسط یک سیستم محلی. + +شما به صراحت می توانید یک منطقه زمانی برای `DateTime`- ستون نوع در هنگام ایجاد یک جدول. اگر منطقه زمانی تنظیم نشده است, خانه رعیتی با استفاده از ارزش [منطقهی زمانی](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) پارامتر در تنظیمات سرور و یا تنظیمات سیستم عامل در حال حاضر از شروع سرور کلیک. + +این [کلیک مشتری](../../interfaces/cli.md) اعمال منطقه زمانی سرور به طور پیش فرض اگر یک منطقه زمانی است که به صراحت تنظیم نشده است که مقدار دهی اولیه نوع داده ها. برای استفاده از منطقه زمان مشتری اجرا کنید `clickhouse-client` با `--use_client_time_zone` پارامتر. + +خروجی کلیک ارزش در `YYYY-MM-DD hh:mm:ss` قالب متن به طور پیش فرض. شما می توانید خروجی را با تغییر [formatDateTime](../../sql_reference/functions/date_time_functions.md#formatdatetime) تابع. + +هنگام قرار دادن داده ها به تاتر, شما می توانید فرمت های مختلف تاریخ و زمان رشته استفاده, بسته به ارزش [تغییر \_شماره](../../operations/settings/settings.md#settings-date_time_input_format) تنظیمات. + +## مثالها {#examples} + +**1.** ایجاد یک جدول با یک `DateTime`- ستون را تایپ کنید و داده ها را وارد کنید: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime('Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog; +``` + +``` sql +INSERT INTO dt Values (1546300800, 1), ('2019-01-01 00:00:00', 2); +``` + +``` sql +SELECT * FROM dt; +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +- هنگام قرار دادن تاریخ ساعت به عنوان یک عدد صحیح, این است که به عنوان برچسب زمان یونیکس درمان (مجموعه مقالات). `1546300800` نشان دهنده `'2019-01-01 00:00:00'` ادا کردن. با این حال, مانند `timestamp` ستون دارد `Europe/Moscow` (مجموعه مقالات+3) منطقه زمانی مشخص, در هنگام خروجی به عنوان رشته ارزش خواهد شد به عنوان نشان داده شده است `'2019-01-01 03:00:00'` +- هنگام قرار دادن مقدار رشته به عنوان تاریخ ساعت, این است که به عنوان بودن در منطقه زمانی ستون درمان. `'2019-01-01 00:00:00'` خواهد شد به عنوان در درمان `Europe/Moscow` منطقه زمانی و ذخیره به عنوان `1546290000`. + +**2.** پالایش بر روی `DateTime` مقادیر + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Europe/Moscow') +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +`DateTime` مقادیر ستون را می توان با استفاده از یک مقدار رشته در فیلتر `WHERE` مسندکردن. این تبدیل خواهد شد به `DateTime` به طور خودکار: + +``` sql +SELECT * FROM dt WHERE timestamp = '2019-01-01 00:00:00' +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +└─────────────────────┴──────────┘ +``` + +**3.** گرفتن یک منطقه زمانی برای یک `DateTime`- نوع ستون: + +``` sql +SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────column─┬─x─────────────────────────┐ +│ 2019-10-16 04:12:04 │ DateTime('Europe/Moscow') │ +└─────────────────────┴───────────────────────────┘ +``` + +**4.** تبدیل منطقه زمانی + +``` sql +SELECT +toDateTime(timestamp, 'Europe/London') as lon_time, +toDateTime(timestamp, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────lon_time──┬────────────mos_time─┐ +│ 2019-01-01 00:00:00 │ 2019-01-01 03:00:00 │ +│ 2018-12-31 21:00:00 │ 2019-01-01 00:00:00 │ +└─────────────────────┴─────────────────────┘ +``` + +## همچنین نگاه کنید به {#see-also} + +- [توابع تبدیل نوع](../../sql_reference/functions/type_conversion_functions.md) +- [توابع برای کار با تاریخ و زمان](../../sql_reference/functions/date_time_functions.md) +- [توابع کار با آرایه ها](../../sql_reference/functions/array_functions.md) +- [این `date_time_input_format` تنظیم](../../operations/settings/settings.md#settings-date_time_input_format) +- [این `timezone` پارامتر پیکربندی سرور](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [اپراتورها برای کار با تاریخ و زمان](../../sql_reference/operators.md#operators-datetime) +- [این `Date` نوع داده](date.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/fa/sql_reference/data_types/datetime64.md b/docs/fa/sql_reference/data_types/datetime64.md new file mode 100644 index 00000000000..6e2ddaf9447 --- /dev/null +++ b/docs/fa/sql_reference/data_types/datetime64.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: "\u0637\u0648\u0644 \u062A\u0627\u0631\u06CC\u062E 64" +--- + +# طول تاریخ 64 {#data_type-datetime64} + +اجازه می دهد تا برای ذخیره یک لحظه در زمان, است که می تواند به عنوان یک تاریخ تقویم و یک زمان از یک روز بیان, با دقت زیر دوم تعریف شده + +اندازه تیک (دقت): 10- دقت ثانیه + +نحو: + +``` sql +DateTime64(precision, [timezone]) +``` + +داخلی, ذخیره داده ها به عنوان تعدادی از ‘ticks’ از عصر شروع (1970-01-01 00:00:00 یو تی سی) به عنوان اینترنشنال64. وضوح تیک توسط پارامتر دقیق تعیین می شود. علاوه بر این `DateTime64` نوع می توانید منطقه زمانی است که همین کار را برای کل ستون ذخیره, که تحت تاثیر قرار چگونه ارزش های `DateTime64` مقادیر نوع در قالب متن نمایش داده می شود و چگونه مقادیر مشخص شده به عنوان رشته تجزیه می شوند (‘2020-01-01 05:00:01.000’). منطقه زمانی در ردیف جدول ذخیره نمی شود (و یا در نتیجه), اما در ابرداده ستون ذخیره می شود. مشاهده اطلاعات در [DateTime](datetime.md). + +## مثالها {#examples} + +**1.** ایجاد یک جدول با `DateTime64`- ستون را تایپ کنید و داده ها را وارد کنید: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- هنگام قرار دادن تاریخ ساعت به عنوان یک عدد صحیح, این است که به عنوان یک مناسب کوچک برچسب زمان یونیکس درمان (مجموعه مقالات). `1546300800000` (با دقت 3) نشان دهنده `'2019-01-01 00:00:00'` ادا کردن. با این حال, مانند `timestamp` ستون دارد `Europe/Moscow` (مجموعه مقالات+3) منطقه زمانی مشخص, در هنگام خروجی به عنوان یک رشته ارزش خواهد شد به عنوان نشان داده شده است `'2019-01-01 03:00:00'` +- هنگام قرار دادن مقدار رشته به عنوان تاریخ ساعت, این است که به عنوان بودن در منطقه زمانی ستون درمان. `'2019-01-01 00:00:00'` خواهد شد به عنوان در درمان `Europe/Moscow` منطقه زمانی و ذخیره شده به عنوان `1546290000000`. + +**2.** پالایش بر روی `DateTime64` مقادیر + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +برخلاف `DateTime`, `DateTime64` ارزش ها از تبدیل نمی `String` به طور خودکار + +**3.** گرفتن یک منطقه زمانی برای یک `DateTime64`- مقدار نوع: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** تبدیل منطقه زمانی + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## همچنین نگاه کنید {#see-also} + +- [توابع تبدیل نوع](../../sql_reference/functions/type_conversion_functions.md) +- [توابع برای کار با تاریخ و زمان](../../sql_reference/functions/date_time_functions.md) +- [توابع برای کار با ارریس](../../sql_reference/functions/array_functions.md) +- [این `date_time_input_format` تنظیم](../../operations/settings/settings.md#settings-date_time_input_format) +- [این `timezone` پارامتر پیکربندی سرور](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [اپراتورها برای کار با تاریخ و زمان](../../sql_reference/operators.md#operators-datetime) +- [`Date` نوع داده](date.md) +- [`DateTime` نوع داده](datetime.md) diff --git a/docs/fa/sql_reference/data_types/decimal.md b/docs/fa/sql_reference/data_types/decimal.md new file mode 100644 index 00000000000..e2c822e76bd --- /dev/null +++ b/docs/fa/sql_reference/data_types/decimal.md @@ -0,0 +1,109 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u062F\u0647\u062F\u0647\u06CC" +--- + +# Decimal(P, S) Decimal32(ع) Decimal64(ع) Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} + +امضا اعداد ثابت نقطه که دقت در طول اضافه کردن نگه دارید, تفریق و ضرب عملیات. برای تقسیم رقم حداقل قابل توجهی دور انداخته می شوند (گرد نیست). + +## پارامترها {#parameters} + +- ص دقیق. محدوده معتبر: \[ 1 : 38 \]. تعیین می کند که چگونه بسیاری از اعداد اعشاری می تواند داشته باشد (از جمله کسر). +- ص-مقیاس. محدوده معتبر: \[0: پ \]. تعیین می کند که چگونه بسیاری از رقم اعشاری کسر می تواند داشته باشد. + +بسته به فسفر اعشاری مقدار پارامتر (پ, بازدید کنندگان) مترادف برای است: +- پ از \[ 1 : 9 \] - برای اعشار (بازدید کنندگان) +- پ از \[ 10 : 18 \] - برای اعشار64(بازدید کنندگان) +- پ از \[ 19: 38 \] - برای اعشار128 (بازدید کنندگان) + +## محدوده مقدار دهدهی {#decimal-value-ranges} + +- دسیمال32 (بازدید کنندگان) - ( -1 \* 10^(9 - بازدید کنندگان), 1 \* 10^(9 بازدید کنندگان) ) +- اعشار64 (بازدید کنندگان) - ( -1 \* 10^(18 - س), 1 \* 10^(18 بازدید کنندگان) ) +- اعشار128 (بازدید کنندگان) - ( -1 \* 10^(38 - بازدید کنندگان), 1 \* 10^(38 بازدید کنندگان) ) + +برای مثال decimal32(4) می تواند شامل اعداد از -99999.9999 به 99999.9999 با 0.0001 گام. + +## نمایندگی داخلی {#internal-representation} + +داخلی داده ها به عنوان اعداد صحیح امضا نرمال با عرض بیت مربوطه نشان داده است. محدوده ارزش واقعی است که می تواند در حافظه ذخیره می شود کمی بزرگتر از بالا مشخص, که تنها در تبدیل از یک رشته بررسی. + +چرا که پردازنده مدرن اعداد صحیح 128 بیتی بومی را پشتیبانی نمی کند, عملیات بر روی اعشار128 شبیه سازی. از آنجا که این decimal128 با این نسخهها کار به طور قابل توجهی کندتر از decimal32/decimal64. + +## عملیات و نوع نتیجه {#operations-and-result-type} + +عملیات دودویی در نتیجه اعشاری در نوع نتیجه گسترده تر (با هر سفارش از استدلال). + +- اعشار64 (س1) Decimal32(S2) -\> Decimal64(S) +- اعشار128 (س1) Decimal32(S2) -\> Decimal128(S) +- اعشار128 (س1) Decimal64(S2) -\> Decimal128(S) + +قوانین برای مقیاس: + +- اضافه کردن به, تفریق کردن: بازدید کنندگان = حداکثر(بازدید کنندگان 1, بازدید کنندگان2). +- multuply: S = S1 + S2. +- تقسیم: S = S1. + +برای عملیات مشابه بین دهدهی و اعداد صحیح, نتیجه اعشاری از همان اندازه به عنوان یک استدلال است. + +عملیات بین دهدهی و float32/float64 تعریف نشده. اگر شما به آنها نیاز دارید, شما می توانید به صراحت بازیگران یکی از استدلال با استفاده از todecimal32, todecimal64, todecimal128 یا tofloat32, tofloat64 برنامهنویسی. به خاطر داشته باشید که نتیجه دقت از دست دادن و تبدیل نوع یک عملیات محاسباتی گران است. + +برخی از توابع در نتیجه بازگشت اعشاری به عنوان شناور64 (مثلا, ور یا استودف). محاسبات متوسط هنوز هم ممکن است در دهدهی انجام شود, که ممکن است به نتایج مختلف بین نت64 و ورودی اعشاری با ارزش های مشابه منجر شود. + +## بررسی سرریز {#overflow-checks} + +در طول محاسبات در اعشاری, عدد صحیح سرریز ممکن است رخ دهد. رقم بیش از حد در کسری دور انداخته می شوند (گرد نیست). رقم بیش از حد در بخش عدد صحیح به یک استثنا منجر شود. + +``` sql +SELECT toDecimal32(2, 4) AS x, x / 3 +``` + +``` text +┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ +│ 2.0000 │ 0.6666 │ +└────────┴──────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, x * x +``` + +``` text +DB::Exception: Scale is out of bounds. +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +DB::Exception: Decimal math overflow. +``` + +سرریز چک منجر به کاهش سرعت عملیات. اگر مشخص است که سرریزهای امکان پذیر نیست, حس می کند برای غیر فعال کردن چک با استفاده از `decimal_check_overflow` تنظیمات. هنگامی که چک غیر فعال هستند و سرریز اتفاق می افتد, نتیجه نادرست خواهد بود: + +``` sql +SET decimal_check_overflow = 0; +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ +│ 4.20000000 │ -17.74967296 │ +└────────────┴──────────────────────────────────┘ +``` + +چک سرریز اتفاق می افتد نه تنها در عملیات ریاضی بلکه در مقایسه ارزش: + +``` sql +SELECT toDecimal32(1, 8) < 100 +``` + +``` text +DB::Exception: Can't compare. +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/fa/sql_reference/data_types/domains/index.md b/docs/fa/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..3a743f84290 --- /dev/null +++ b/docs/fa/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Domains +toc_priority: 56 +--- + + diff --git a/docs/fa/data_types/domains/ipv4.md b/docs/fa/sql_reference/data_types/domains/ipv4.md similarity index 67% rename from docs/fa/data_types/domains/ipv4.md rename to docs/fa/sql_reference/data_types/domains/ipv4.md index 55be7ab4439..cd33df6acc9 100644 --- a/docs/fa/data_types/domains/ipv4.md +++ b/docs/fa/sql_reference/data_types/domains/ipv4.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: IPv4 --- ## IPv4 {#ipv4} -`IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +`IPv4` یک دامنه بر اساس `UInt32` نوع و به عنوان یک جایگزین تایپ شده برای ذخیره سازی مقادیر ایپو4 عمل می کند. این فراهم می کند ذخیره سازی جمع و جور با فرمت ورودی خروجی انسان پسند و نوع ستون اطلاعات در بازرسی. -### Basic Usage {#basic-usage} +### استفاده عمومی {#basic-usage} ``` sql CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; @@ -21,13 +24,13 @@ DESCRIBE TABLE hits; └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ ``` -OR you can use IPv4 domain as a key: +یا شما می توانید از دامنه ایپو4 به عنوان یک کلید استفاده کنید: ``` sql CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; ``` -`IPv4` domain supports custom input format as IPv4-strings: +`IPv4` دامنه پشتیبانی از فرمت ورودی سفارشی به عنوان ایپو4 رشته: ``` sql INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); @@ -43,7 +46,7 @@ SELECT * FROM hits; └────────────────────────────────────┴────────────────┘ ``` -Values are stored in compact binary form: +مقادیر به صورت باینری جمع و جور ذخیره می شود: ``` sql SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; @@ -55,8 +58,8 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴───────────┘ ``` -Domain values are not implicitly convertible to types other than `UInt32`. -If you want to convert `IPv4` value to a string, you have to do that explicitly with `IPv4NumToString()` function: +ارزش دامنه به طور ضمنی قابل تبدیل به انواع دیگر از `UInt32`. +اگر شما می خواهید برای تبدیل `IPv4` ارزش به یک رشته, شما باید برای انجام این کار به صراحت با `IPv4NumToString()` تابع: ``` sql SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; @@ -66,7 +69,7 @@ SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; │ String │ 183.247.232.58 │ └───────────────────────────────────┴────────────────┘ -Or cast to a `UInt32` value: +یا بازیگران به `UInt32` مقدار: ``` sql SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; @@ -78,4 +81,4 @@ SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; └──────────────────────────────────┴────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/data_types/domains/ipv4) +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/fa/data_types/domains/ipv6.md b/docs/fa/sql_reference/data_types/domains/ipv6.md similarity index 70% rename from docs/fa/data_types/domains/ipv6.md rename to docs/fa/sql_reference/data_types/domains/ipv6.md index 4b856230ccc..8c2779777de 100644 --- a/docs/fa/data_types/domains/ipv6.md +++ b/docs/fa/sql_reference/data_types/domains/ipv6.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: IPv6 --- ## IPv6 {#ipv6} -`IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +`IPv6` یک دامنه بر اساس `FixedString(16)` نوع و به عنوان یک جایگزین تایپ شده برای ذخیره سازی ارزش های ایپو6 عمل می کند. این فراهم می کند ذخیره سازی جمع و جور با فرمت ورودی خروجی انسان پسند و نوع ستون اطلاعات در بازرسی. -### Basic Usage {#basic-usage} +### استفاده عمومی {#basic-usage} ``` sql CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; @@ -21,13 +24,13 @@ DESCRIBE TABLE hits; └──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ ``` -OR you can use `IPv6` domain as a key: +یا شما می توانید استفاده کنید `IPv6` دامنه به عنوان یک کلید: ``` sql CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; ``` -`IPv6` domain supports custom input as IPv6-strings: +`IPv6` دامنه پشتیبانی از ورودی های سفارشی به عنوان ایپو6 رشته: ``` sql INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); @@ -43,7 +46,7 @@ SELECT * FROM hits; └────────────────────────────────────┴───────────────────────────────┘ ``` -Values are stored in compact binary form: +مقادیر به صورت باینری جمع و جور ذخیره می شود: ``` sql SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; @@ -55,8 +58,8 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴──────────────────────────────────┘ ``` -Domain values are not implicitly convertible to types other than `FixedString(16)`. -If you want to convert `IPv6` value to a string, you have to do that explicitly with `IPv6NumToString()` function: +ارزش دامنه به طور ضمنی قابل تبدیل به انواع دیگر از `FixedString(16)`. +اگر شما می خواهید برای تبدیل `IPv6` ارزش به یک رشته, شما باید برای انجام این کار به صراحت با `IPv6NumToString()` تابع: ``` sql SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; @@ -68,7 +71,7 @@ SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; └───────────────────────────────────┴───────────────────────────────┘ ``` -Or cast to a `FixedString(16)` value: +یا بازیگران به یک `FixedString(16)` مقدار: ``` sql SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; @@ -80,4 +83,4 @@ SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; └───────────────────────────────────────────┴─────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/data_types/domains/ipv6) +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/fa/sql_reference/data_types/domains/overview.md b/docs/fa/sql_reference/data_types/domains/overview.md new file mode 100644 index 00000000000..ab6155caf64 --- /dev/null +++ b/docs/fa/sql_reference/data_types/domains/overview.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u0628\u0631\u0631\u0633\u06CC \u0627\u062C\u0645\u0627\u0644\u06CC" +--- + +# دامنه {#domains} + +دامنه انواع خاصی است که اضافه کردن برخی از ویژگی های اضافی در بالای نوع پایه موجود, اما ترک بر روی سیم و بر روی دیسک فرمت از نوع داده اساسی دست نخورده. درحال حاضر, تاتر می کند دامنه تعریف شده توسط کاربر را پشتیبانی نمی کند. + +شما می توانید دامنه در هر نقطه نوع پایه مربوطه استفاده می شود, مثلا: + +- ایجاد یک ستون از یک نوع دامنه +- خواندن / نوشتن مقادیر از / به ستون دامنه +- اگر یک نوع پایه می تواند به عنوان یک شاخص استفاده می شود به عنوان شاخص استفاده می شود +- توابع تماس با مقادیر ستون دامنه + +### ویژگی های اضافی از دامنه {#extra-features-of-domains} + +- صریح نام نوع ستون در `SHOW CREATE TABLE` یا `DESCRIBE TABLE` +- ورودی از فرمت انسان دوستانه با `INSERT INTO domain_table(domain_column) VALUES(...)` +- خروجی به فرمت انسان دوستانه برای `SELECT domain_column FROM domain_table` +- بارگیری داده ها از یک منبع خارجی در قالب انسان دوستانه: `INSERT INTO domain_table FORMAT CSV ...` + +### محدودیت ها {#limitations} + +- می توانید ستون شاخص از نوع پایه به نوع دامنه از طریق تبدیل کنید `ALTER TABLE`. +- نمی تواند به طور ضمنی تبدیل مقادیر رشته به ارزش دامنه در هنگام قرار دادن داده ها از ستون یا جدول دیگر. +- دامنه می افزاید: هیچ محدودیتی در مقادیر ذخیره شده. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/fa/sql_reference/data_types/enum.md b/docs/fa/sql_reference/data_types/enum.md new file mode 100644 index 00000000000..e468c910106 --- /dev/null +++ b/docs/fa/sql_reference/data_types/enum.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u0634\u0645\u0627\u0631\u0634\u06CC" +--- + +# شمارشی {#enum} + +نوع شمارش متشکل از ارزش به نام. + +مقادیر نام شده باید به عنوان اعلام شود `'string' = integer` جفت انبار فروشگاه تنها اعداد, اما پشتیبانی از عملیات با ارزش از طریق نام خود را. + +پشتیبانی از کلیک: + +- 8 بیتی `Enum`. این می تواند تا 256 مقدار شمارش شده در `[-128, 127]` محدوده. +- 16 بیتی `Enum`. این می تواند تا 65536 مقدار شمارش شده در `[-32768, 32767]` محدوده. + +تاتر به طور خودکار انتخاب نوع `Enum` هنگامی که داده درج شده است. شما همچنین می توانید استفاده کنید `Enum8` یا `Enum16` انواع برای اطمینان در اندازه ذخیره سازی. + +## نمونه های استفاده {#usage-examples} + +در اینجا ما یک جدول با یک ایجاد می کنیم `Enum8('hello' = 1, 'world' = 2)` نوع ستون: + +``` sql +CREATE TABLE t_enum +( + x Enum('hello' = 1, 'world' = 2) +) +ENGINE = TinyLog +``` + +ستون `x` فقط می توانید مقادیر که در تعریف نوع ذکر شده را ذخیره کنید: `'hello'` یا `'world'`. اگر شما سعی می کنید برای صرفه جویی در هر مقدار دیگر, کلیک یک استثنا بالا می برد. اندازه 8 بیتی برای این `Enum` به طور خودکار انتخاب شده است. + +``` sql +INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') +``` + +``` text +Ok. +``` + +``` sql +INSERT INTO t_enum values('a') +``` + +``` text +Exception on client: +Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) +``` + +هنگامی که شما پرس و جو داده ها را از جدول, تاتر خروجی مقادیر رشته از `Enum`. + +``` sql +SELECT * FROM t_enum +``` + +``` text +┌─x─────┐ +│ hello │ +│ world │ +│ hello │ +└───────┘ +``` + +اگر شما نیاز به دیدن معادل عددی از ردیف, شما باید بازیگران `Enum` ارزش به نوع صحیح. + +``` sql +SELECT CAST(x, 'Int8') FROM t_enum +``` + +``` text +┌─CAST(x, 'Int8')─┐ +│ 1 │ +│ 2 │ +│ 1 │ +└─────────────────┘ +``` + +برای ایجاد یک مقدار شمارشی در پرس و جو, شما همچنین نیاز به استفاده از `CAST`. + +``` sql +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) +``` + +``` text +┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└─────────────────────────────────────────────────────┘ +``` + +## قوانین عمومی و استفاده {#general-rules-and-usage} + +هر یک از مقادیر یک عدد در محدوده اختصاص داده شده است `-128 ... 127` برای `Enum8` یا در محدوده `-32768 ... 32767` برای `Enum16`. همه رشته ها و اعداد باید متفاوت باشد. یک رشته خالی مجاز است. اگر این نوع مشخص شده است (در یک تعریف جدول), اعداد را می توان در جهت دلخواه. با این حال, سفارش مهم نیست. + +نه رشته و نه مقدار عددی در یک `Enum` می تواند باشد [NULL](../../sql_reference/syntax.md). + +یک `Enum` می توان در [Nullable](nullable.md) نوع. بنابراین اگر شما یک جدول با استفاده از پرس و جو ایجاد کنید + +``` sql +CREATE TABLE t_enum_nullable +( + x Nullable( Enum8('hello' = 1, 'world' = 2) ) +) +ENGINE = TinyLog +``` + +این می تواند نه تنها ذخیره `'hello'` و `'world'` اما `NULL` همینطور + +``` sql +INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) +``` + +در رم `Enum` ستون به همان شیوه ذخیره می شود `Int8` یا `Int16` از مقادیر عددی مربوطه. + +هنگام خواندن در فرم متن, تاتر تجزیه ارزش به عنوان یک رشته و جستجو برای رشته مربوطه را از مجموعه ای از ارزش شمارشی. اگر یافت نشد, یک استثنا پرتاب می شود. هنگام خواندن در قالب متن, رشته به عنوان خوانده شده و مقدار عددی مربوطه نگاه کردن. یک استثنا پرتاب خواهد شد اگر یافت نشد. +هنگام نوشتن در فرم متن, این ارزش به عنوان رشته مربوطه می نویسد. اگر داده های ستون شامل زباله (اعداد است که از مجموعه معتبر نیست), یک استثنا پرتاب می شود. زمانی که خواندن و نوشتن در فایل باینری فرم آن را با این نسخهها کار به همان روش برای int8 و int16 انواع داده ها. +مقدار پیش فرض ضمنی ارزش با کمترین تعداد است. + +در طول `ORDER BY`, `GROUP BY`, `IN`, `DISTINCT` و به همین ترتیب, مادران رفتار به همان شیوه به عنوان اعداد مربوطه. مثلا, سفارش شده توسط انواع عددی. اپراتورهای برابری و مقایسه کار به همان شیوه در مادران به عنوان در مقادیر عددی اساسی انجام. + +مقادیر شمارشی را نمی توان با اعداد مقایسه شده است. شمارشی را می توان به یک رشته ثابت در مقایسه. اگر رشته در مقایسه با یک مقدار معتبر برای شمارشی نیست, یک استثنا پرتاب خواهد شد. اپراتور در با شمارشی در سمت چپ و مجموعه ای از رشته ها در سمت راست پشتیبانی می شود. رشته ارزش مربوط شمارشی هستند. + +Most numeric and string operations are not defined for Enum values, e.g. adding a number to an Enum or concatenating a string to an Enum. +با این حال, شمارشی طبیعی است `toString` تابع است که ارزش رشته خود را برمی گرداند. + +مقادیر شمارشی نیز قابل تبدیل به انواع عددی با استفاده از `toT` تابع, که تی یک نوع عددی است. هنگامی که تی مربوط به نوع عددی اساسی شمارشی است, این تبدیل صفر هزینه است. +نوع شمارشی را می توان بدون هزینه با استفاده از تغییر تغییر, اگر تنها مجموعه ای از ارزش تغییر است. ممکن است که به هر دو اضافه کردن و حذف اعضای شمارشی با استفاده از تغییر (از بین بردن امن است تنها در صورتی که مقدار حذف شده است هرگز در جدول استفاده می شود). به عنوان یک حفاظت, تغییر مقدار عددی از یک عضو شمارشی که قبلا تعریف یک استثنا پرتاب. + +با استفاده از تغییر آن را ممکن است برای تغییر یک enum8 به enum16 یا بالعکس فقط مانند تغییر یک int8 به int16. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/enum/) diff --git a/docs/fa/sql_reference/data_types/fixedstring.md b/docs/fa/sql_reference/data_types/fixedstring.md new file mode 100644 index 00000000000..e151008822c --- /dev/null +++ b/docs/fa/sql_reference/data_types/fixedstring.md @@ -0,0 +1,63 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u0631\u0634\u062A\u0647 \u062B\u0627\u0628\u062A)" +--- + +# رشته ثابت {#fixedstring} + +یک رشته ثابت طول `N` بایت (نه شخصیت و نه نقاط کد). + +برای اعلام یک ستون از `FixedString` نوع, استفاده از نحو زیر: + +``` sql + FixedString(N) +``` + +کجا `N` یک عدد طبیعی است. + +این `FixedString` نوع زمانی موثر است که داده ها طول دقیق داشته باشند `N` بایت در تمام موارد دیگر, این احتمال وجود دارد به کاهش بهره وری. + +نمونه هایی از مقادیر است که می تواند موثر در ذخیره می شود `FixedString`- ستون های تایپ شده: + +- نمایندگی دودویی نشانی های اینترنتی (`FixedString(16)` برای ایپو6). +- Language codes (ru\_RU, en\_US … ). +- Currency codes (USD, RUB … ). +- نمایش دودویی رشته هش (`FixedString(16)` برای ام دی 5, `FixedString(32)` برای شی256). + +برای ذخیره مقادیر یوید از [UUID](uuid.md) نوع داده. + +هنگام قرار دادن داده ها, تاتر: + +- مکمل یک رشته با null بایت اگر رشته شامل کمتر از `N` بایت +- پرتاب `Too large value for FixedString(N)` استثنا اگر رشته شامل بیش از `N` بایت + +در هنگام انتخاب داده, تاتر می کند بایت پوچ در پایان رشته را حذف کنید. اگر شما استفاده از `WHERE` بند, شما باید بایت پوچ دستی اضافه برای مطابقت با `FixedString` ارزش. مثال زیر نشان میدهد که چگونه به استفاده از `WHERE` بند با `FixedString`. + +بیایید جدول زیر را با تک در نظر بگیریم `FixedString(2)` ستون: + +``` text +┌─name──┐ +│ b │ +└───────┘ +``` + +پرسوجو `SELECT * FROM FixedStringTable WHERE a = 'b'` هیچ داده به عنوان یک نتیجه نمی گرداند. ما باید الگوی فیلتر با بایت پوچ تکمیل. + +``` sql +SELECT * FROM FixedStringTable +WHERE a = 'b\0' +``` + +``` text +┌─a─┐ +│ b │ +└───┘ +``` + +این رفتار از خروجی زیر برای متفاوت `CHAR` نوع (جایی که رشته ها با فضاهای خالی, و فضاهای برای خروجی حذف). + +توجه داشته باشید که طول `FixedString(N)` ارزش ثابت است. این [طول](../../sql_reference/functions/array_functions.md#array_functions-length) بازده عملکرد `N` حتی اگر `FixedString(N)` ارزش تنها با بایت پوچ پر, اما [خالی](../../sql_reference/functions/string_functions.md#empty) بازده عملکرد `1` در این مورد. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/fa/sql_reference/data_types/float.md b/docs/fa/sql_reference/data_types/float.md new file mode 100644 index 00000000000..99f25f4e810 --- /dev/null +++ b/docs/fa/sql_reference/data_types/float.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: Float32, Float64 +--- + +# Float32, Float64 {#float32-float64} + +[اعداد ممیز شناور](https://en.wikipedia.org/wiki/IEEE_754). + +انواع معادل انواع ج هستند: + +- `Float32` - `float` +- `Float64` - `double` + +ما توصیه می کنیم که شما ذخیره داده ها در فرم صحیح در هر زمان ممکن است. مثلا, تبدیل اعداد دقت ثابت به ارزش عدد صحیح, مانند مقدار پولی و یا بار بار صفحه در میلی ثانیه. + +## با استفاده از اعداد ممیز شناور {#using-floating-point-numbers} + +- محاسبات با اعداد ممیز شناور ممکن است یک خطای گرد کردن تولید. + + + +``` sql +SELECT 1 - 0.9 +``` + +``` text +┌───────minus(1, 0.9)─┐ +│ 0.09999999999999998 │ +└─────────────────────┘ +``` + +- نتیجه محاسبه بستگی به روش محاسبه (نوع پردازنده و معماری سیستم کامپیوتری). +- محاسبات ممیز شناور ممکن است در اعداد مانند بی نهایت منجر شود (`Inf`) و “not-a-number” (`NaN`). این را باید در نظر گرفته شود در هنگام پردازش نتایج محاسبات. +- هنگامی که تجزیه اعداد ممیز شناور از متن, نتیجه ممکن است نزدیکترین شماره ماشین نمایندگی. + +## هشدار داده می شود {#data_type-float-nan-inf} + +در مقابل به گذاشتن استاندارد, خانه رعیتی پشتیبانی از مقوله های زیر است از اعداد ممیز شناور: + +- `Inf` – Infinity. + + + +``` sql +SELECT 0.5 / 0 +``` + +``` text +┌─divide(0.5, 0)─┐ +│ inf │ +└────────────────┘ +``` + +- `-Inf` – Negative infinity. + + + +``` sql +SELECT -0.5 / 0 +``` + +``` text +┌─divide(-0.5, 0)─┐ +│ -inf │ +└─────────────────┘ +``` + +- `NaN` – Not a number. + + + +``` sql +SELECT 0 / 0 +``` + +``` text +┌─divide(0, 0)─┐ +│ nan │ +└──────────────┘ +``` + + See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select.md). + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/fa/sql_reference/data_types/index.md b/docs/fa/sql_reference/data_types/index.md new file mode 100644 index 00000000000..cb5a702ddbd --- /dev/null +++ b/docs/fa/sql_reference/data_types/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Data Types +toc_priority: 37 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# انواع داده ها {#data_types} + +تاتر می توانید انواع مختلف داده ها در سلول های جدول ذخیره کنید. + +این بخش انواع داده های پشتیبانی شده و ملاحظات ویژه ای را برای استفاده و/یا در صورت وجود اجرا می کند. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/fa/sql_reference/data_types/int_uint.md b/docs/fa/sql_reference/data_types/int_uint.md new file mode 100644 index 00000000000..600ffbc56e9 --- /dev/null +++ b/docs/fa/sql_reference/data_types/int_uint.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 +--- + +# UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} + +اعداد صحیح ثابت طول, با یا بدون نشانه. + +## محدوده اعضای هیات {#int-ranges} + +- بین8- \[-128 : 127\] +- اینتر16- \[-32768 : 32767\] +- اینتر32 - \[-2147483648: 2147483647\] +- اینتر64- \[-9223372036854775808 : 9223372036854775807\] + +## محدوده دانشگاه تهران {#uint-ranges} + +- توینت8- \[0: 255\] +- اوینت16- \[0: 65535\] +- اوینت32- \[0: 4294967295\] +- اوت64 - \[0: 18446744073709551615\] + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/int_uint/) diff --git a/docs/fa/sql_reference/data_types/nested_data_structures/index.md b/docs/fa/sql_reference/data_types/nested_data_structures/index.md new file mode 100644 index 00000000000..f2885dc8a16 --- /dev/null +++ b/docs/fa/sql_reference/data_types/nested_data_structures/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Nested Data Structures +toc_hidden: true +toc_priority: 54 +toc_title: "\u0645\u062E\u0641\u06CC" +--- + +# ساختارهای داده تو در تو {#nested-data-structures} + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) diff --git a/docs/fa/sql_reference/data_types/nested_data_structures/nested.md b/docs/fa/sql_reference/data_types/nested_data_structures/nested.md new file mode 100644 index 00000000000..8d5b9897781 --- /dev/null +++ b/docs/fa/sql_reference/data_types/nested_data_structures/nested.md @@ -0,0 +1,106 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u062A\u0648 \u062F\u0631 \u062A\u0648(Name1 Type1, Name2 Type2, ...)" +--- + +# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} + +A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql_reference/statements/create.md) پرس و جو. هر سطر جدول می تواند به هر تعداد از ردیف در یک ساختار داده تو در تو مطابقت. + +مثال: + +``` sql +CREATE TABLE test.visits +( + CounterID UInt32, + StartDate Date, + Sign Int8, + IsNew UInt8, + VisitID UInt64, + UserID UInt64, + ... + Goals Nested + ( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32 + ), + ... +) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) +``` + +این مثال اعلام کرد `Goals` ساختار داده های تو در تو, که شامل داده ها در مورد تبدیل (اهداف رسیده). هر سطر در ‘visits’ جدول می تواند به صفر یا هر تعداد از تبدیل مطابقت دارد. + +فقط یک سطح تودرتو تک پشتیبانی می شود. ستون های سازه های تو در تو حاوی ارریس معادل چندین بعدی هستند بنابراین پشتیبانی محدودی دارند (هیچ پشتیبانی ای برای ذخیره این ستون ها در جداول با موتور ادغام وجود ندارد). + +در بیشتر موارد, در هنگام کار با یک ساختار داده های تو در تو, ستون خود را با نام ستون جدا شده توسط یک نقطه مشخص. این ستون ها مجموعه ای از انواع تطبیق را تشکیل می دهند. تمام ستون ها از یک ساختار داده های تو در تو یکسان هستند. + +مثال: + +``` sql +SELECT + Goals.ID, + Goals.EventTime +FROM test.visits +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ +│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ +│ [1073752] │ ['2014-03-17 00:28:25'] │ +│ [1073752] │ ['2014-03-17 10:46:20'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ +│ [] │ [] │ +│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ +│ [] │ [] │ +│ [] │ [] │ +│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ +└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +ساده ترین راه این است که از یک ساختار داده های تو در تو فکر می کنم به عنوان مجموعه ای از مجموعه ای از مجموعه های ستون های متعدد از همان طول. + +تنها جایی که پرس و جو را انتخاب کنید می توانید نام کل ساختار داده های تو در تو به جای ستون های فردی مشخص مجموعه ملحق بند. برای کسب اطلاعات بیشتر, دیدن “ARRAY JOIN clause”. مثال: + +``` sql +SELECT + Goal.ID, + Goal.EventTime +FROM test.visits +ARRAY JOIN Goals AS Goal +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goal.ID─┬──────Goal.EventTime─┐ +│ 1073752 │ 2014-03-17 16:38:10 │ +│ 591325 │ 2014-03-17 16:38:48 │ +│ 591325 │ 2014-03-17 16:42:27 │ +│ 1073752 │ 2014-03-17 00:28:25 │ +│ 1073752 │ 2014-03-17 10:46:20 │ +│ 1073752 │ 2014-03-17 13:59:20 │ +│ 591325 │ 2014-03-17 22:17:55 │ +│ 591325 │ 2014-03-17 22:18:07 │ +│ 591325 │ 2014-03-17 22:18:51 │ +│ 1073752 │ 2014-03-17 11:37:06 │ +└─────────┴─────────────────────┘ +``` + +شما نمی توانید انتخاب کنید برای کل ساختار داده های تو در تو انجام دهد. شما فقط می توانید به صراحت ستون های فردی را که بخشی از این هستند لیست کنید. + +برای پرس و جو درج, شما باید تمام عناصر ستون مولفه از یک ساختار داده های تو در تو به طور جداگانه منتقل (در صورتی که مجموعه فردی بودند). در حین درج سیستم چک می کند که همان طول را دارند. + +برای پرس و جو توصیف, ستون در یک ساختار داده های تو در تو به طور جداگانه در همان راه ذکر شده. + +پرس و جو را تغییر دهید برای عناصر در یک ساختار داده های تو در تو دارای محدودیت. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) diff --git a/docs/fa/sql_reference/data_types/nullable.md b/docs/fa/sql_reference/data_types/nullable.md new file mode 100644 index 00000000000..987f338c07e --- /dev/null +++ b/docs/fa/sql_reference/data_types/nullable.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: Nullable +--- + +# Nullable(typename) {#data_type-nullable} + +اجازه می دهد تا برای ذخیره نشانگر ویژه ([NULL](../../sql_reference/syntax.md)) که نشان دهنده “missing value” در کنار مقادیر عادی مجاز `TypeName`. برای مثال یک `Nullable(Int8)` ستون نوع می تواند ذخیره شود `Int8` ارزش نوع و ردیف است که ارزش ذخیره خواهد شد `NULL`. + +برای یک `TypeName` شما نمی توانید از انواع داده های کامپوزیت استفاده کنید [& حذف](array.md) و [تاپل](tuple.md). انواع داده های کامپوزیت می تواند شامل `Nullable` مقادیر نوع مانند `Array(Nullable(Int8))`. + +A `Nullable` فیلد نوع را نمی توان در شاخص های جدول گنجانده شده است. + +`NULL` مقدار پیش فرض برای هر `Nullable` نوع, مگر اینکه در غیر این صورت در پیکربندی سرور کلیک مشخص. + +## ویژگی های ذخیره سازی {#storage-features} + +برای ذخیره `Nullable` ارزش نوع در یک ستون جدول, تاتر با استفاده از یک فایل جداگانه با `NULL` ماسک علاوه بر فایل عادی با ارزش. مطالب در ماسک فایل اجازه می دهد خانه کلیک برای تشخیص بین `NULL` و یک مقدار پیش فرض از نوع داده مربوطه را برای هر سطر جدول. به دلیل یک فایل اضافی, `Nullable` ستون مصرف فضای ذخیره سازی اضافی در مقایسه با یک نرمال مشابه. + +!!! info "یادداشت" + با استفاده از `Nullable` تقریبا همیشه منفی تاثیر می گذارد عملکرد, نگه داشتن این در ذهن در هنگام طراحی پایگاه داده های خود را. + +## مثال طریقه استفاده {#usage-example} + +``` sql +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog +``` + +``` sql +INSERT INTO t_null VALUES (1, NULL), (2, 3) +``` + +``` sql +SELECT x + y FROM t_null +``` + +``` text +┌─plus(x, y)─┐ +│ ᴺᵁᴸᴸ │ +│ 5 │ +└────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/fa/sql_reference/data_types/special_data_types/expression.md b/docs/fa/sql_reference/data_types/special_data_types/expression.md new file mode 100644 index 00000000000..102136441b6 --- /dev/null +++ b/docs/fa/sql_reference/data_types/special_data_types/expression.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u0639\u0628\u0627\u0631\u062A" +--- + +# عبارت {#expression} + +عبارات برای نمایندگی لامبداها در توابع بالا سفارش استفاده می شود. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) diff --git a/docs/fa/sql_reference/data_types/special_data_types/index.md b/docs/fa/sql_reference/data_types/special_data_types/index.md new file mode 100644 index 00000000000..d77e3bd93d2 --- /dev/null +++ b/docs/fa/sql_reference/data_types/special_data_types/index.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Special Data Types +toc_hidden: true +toc_priority: 55 +toc_title: "\u0645\u062E\u0641\u06CC" +--- + +# انواع داده های ویژه {#special-data-types} + +مقادیر ویژه نوع داده را نمی توان برای صرفه جویی در یک جدول یا خروجی در نتایج پرس و جو سریال, اما می تواند به عنوان یک نتیجه متوسط در طول اجرای پرس و جو مورد استفاده قرار. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/special_data_types/) diff --git a/docs/fa/sql_reference/data_types/special_data_types/interval.md b/docs/fa/sql_reference/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..7c108a72641 --- /dev/null +++ b/docs/fa/sql_reference/data_types/special_data_types/interval.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u0641\u0627\u0635\u0644\u0647" +--- + +# فاصله {#data-type-interval} + +خانواده از انواع داده ها به نمایندگی از فواصل زمان و تاریخ. انواع حاصل از [INTERVAL](../../../sql_reference/operators.md#operator-interval) اپراتور + +!!! warning "اخطار" + `Interval` مقادیر نوع داده را نمی توان در جداول ذخیره کرد. + +ساختار: + +- فاصله زمانی به عنوان یک مقدار عدد صحیح بدون علامت. +- نوع یک بازه ی زمانی. + +انواع فاصله پشتیبانی شده: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +برای هر نوع فاصله, یک نوع داده جداگانه وجود دارد. برای مثال `DAY` فاصله مربوط به `IntervalDay` نوع داده: + +``` sql +SELECT toTypeName(INTERVAL 4 DAY) +``` + +``` text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## اظهارات طریقه استفاده {#data-type-interval-usage-remarks} + +شما می توانید استفاده کنید `Interval`- ارزش نوع در عملیات ریاضی با [تاریخ](../../../sql_reference/data_types/date.md) و [DateTime](../../../sql_reference/data_types/datetime.md)- ارزش نوع . مثلا, شما می توانید اضافه کنید 4 روز به زمان فعلی: + +``` sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` + +``` text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +فواصل با انواع مختلف نمی تواند ترکیب شود. شما می توانید فواصل مانند استفاده کنید `4 DAY 1 HOUR`. تعیین فواصل در واحد هایی که کوچکتر یا مساوی به کوچکترین واحد از فاصله مثلا فاصله `1 day and an hour` فاصله را می توان به عنوان بیان شده است `25 HOUR` یا `90000 SECOND`. + +شما می توانید عملیات ریاضی با انجام نمی `Interval`- ارزش نوع, اما شما می توانید فواصل از انواع مختلف در نتیجه به ارزش در اضافه `Date` یا `DateTime` انواع داده ها. به عنوان مثال: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +پرس و جوی زیر باعث می شود یک استثنا: + +``` sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` + +``` text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## همچنین نگاه کنید به {#see-also} + +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) اپراتور +- [توینتروال](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) توابع تبدیل نوع diff --git a/docs/fa/sql_reference/data_types/special_data_types/nothing.md b/docs/fa/sql_reference/data_types/special_data_types/nothing.md new file mode 100644 index 00000000000..eff72a116ea --- /dev/null +++ b/docs/fa/sql_reference/data_types/special_data_types/nothing.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u0647\u06CC\u0686 \u0686\u06CC\u0632" +--- + +# هیچی {#nothing} + +تنها هدف از این نوع داده ها نشان دهنده مواردی است که انتظار نمی رود ارزش باشد. بنابراین شما می توانید یک ایجاد کنید `Nothing` نوع ارزش. + +مثلا, تحت اللفظی [NULL](../../../sql_reference/syntax.md#null-literal) دارای نوع `Nullable(Nothing)`. اطلاعات بیشتر در مورد [Nullable](../../../sql_reference/data_types/nullable.md). + +این `Nothing` نوع نیز می تواند مورد استفاده برای نشان دادن خالی: + +``` sql +SELECT toTypeName(array()) +``` + +``` text +┌─toTypeName(array())─┐ +│ Array(Nothing) │ +└─────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/fa/sql_reference/data_types/special_data_types/set.md b/docs/fa/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..a3146c4a321 --- /dev/null +++ b/docs/fa/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u062A\u0646\u0638\u06CC\u0645" +--- + +# تنظیم {#set} + +مورد استفاده برای نیمه راست یک [IN](../../../sql_reference/statements/select.md#select-in-operators) اصطلاح. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/fa/sql_reference/data_types/string.md b/docs/fa/sql_reference/data_types/string.md new file mode 100644 index 00000000000..4cd48b2a2f2 --- /dev/null +++ b/docs/fa/sql_reference/data_types/string.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u0631\u0634\u062A\u0647" +--- + +# رشته {#string} + +رشته ها از طول دلخواه. طول محدود نمی شود. مقدار می تواند مجموعه دلخواه از بایت شامل, از جمله بایت پوچ. +نوع رشته جایگزین انواع لاکار, قطره, مسدود کردن, و دیگران را از دیگر سرویس بهداشتی. + +## کدگذاریها {#encodings} + +کلیکهاوس مفهوم کدگذاریها را ندارد. رشته ها می توانند شامل مجموعه دلخواه از بایت, که ذخیره می شود و خروجی به عنوان است. +اگر شما نیاز به ذخیره متون, توصیه می کنیم با استفاده از یونایتد-8 رمزگذاری. حداقل, اگر ترمینال خود را با استفاده از سخن گفتن-8 (به عنوان توصیه می شود), شما می توانید خواندن و نوشتن ارزش های خود را بدون ساخت تبدیل. +به طور مشابه, توابع خاص برای کار با رشته تغییرات جداگانه که با این فرض کار می کنند که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری شده-8. +برای مثال ‘length’ تابع محاسبه طول رشته در بایت, در حالی که ‘lengthUTF8’ تابع محاسبه طول رشته در نقاط کد یونیکد, فرض کنید که ارزش است گفتن-8 کد گذاری. + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/string/) diff --git a/docs/fa/sql_reference/data_types/tuple.md b/docs/fa/sql_reference/data_types/tuple.md new file mode 100644 index 00000000000..c971a5432fe --- /dev/null +++ b/docs/fa/sql_reference/data_types/tuple.md @@ -0,0 +1,52 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 53 +toc_title: "\u062A\u0627\u067E\u0644 (\u062A\u06CC1, \u062A\u06CC2,...)" +--- + +# Tuple(t1, T2, …) {#tuplet1-t2} + +یک تاپل از عناصر, هر یک با داشتن یک فرد [نوع](index.md#data_types). + +تاپل برای گروه بندی ستون موقت استفاده می شود. ستون ها را می توان گروه بندی کرد زمانی که یک عبارت در یک پرس و جو استفاده می شود, و برای مشخص کردن پارامترهای رسمی خاصی از توابع لامبدا. برای کسب اطلاعات بیشتر به بخش ها مراجعه کنید [در اپراتورها](../../sql_reference/statements/select.md) و [توابع سفارش بالاتر](../../sql_reference/functions/higher_order_functions.md). + +تاپل می تواند در نتیجه یک پرس و جو. در این مورد, برای فرمت های متنی غیر از جانسون, ارزش کاما از هم جدا در براکت. در فرمت های جوسون, تاپل خروجی به عنوان ارریس هستند (در براکت مربع). + +## ایجاد یک تاپل {#creating-a-tuple} + +شما می توانید یک تابع برای ایجاد یک تاپل استفاده کنید: + +``` sql +tuple(T1, T2, ...) +``` + +نمونه ای از ایجاد یک تاپل: + +``` sql +SELECT tuple(1,'a') AS x, toTypeName(x) +``` + +``` text +┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ +│ (1,'a') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────────┘ +``` + +## کار با انواع داده ها {#working-with-data-types} + +در هنگام ایجاد یک تاپل در پرواز, تاتر به طور خودکار نوع هر استدلال به عنوان حداقل از انواع که می تواند ارزش استدلال ذخیره تشخیص. اگر استدلال است [NULL](../../sql_reference/syntax.md#null-literal), نوع عنصر تاپل است [Nullable](nullable.md). + +نمونه ای از تشخیص نوع داده ها به صورت خودکار: + +``` sql +SELECT tuple(1, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ +│ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ +└──────────┴─────────────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/tuple/) diff --git a/docs/fa/sql_reference/data_types/uuid.md b/docs/fa/sql_reference/data_types/uuid.md new file mode 100644 index 00000000000..c8ed8a7cf79 --- /dev/null +++ b/docs/fa/sql_reference/data_types/uuid.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: UUID +--- + +# UUID {#uuid-data-type} + +شناسه جهانی منحصر به فرد (شناسه) یک عدد 16 بایت مورد استفاده برای شناسایی سوابق است. برای کسب اطلاعات دقیق در مورد شناسه, دیدن [ویکیپدیا](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +نمونه ای از ارزش نوع شناسه در زیر نشان داده شده است: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +اگر شما مقدار ستون شناسه مشخص نیست در هنگام قرار دادن یک رکورد جدید, ارزش شناسه با صفر پر: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## چگونه برای تولید {#how-to-generate} + +برای تولید ارزش شناسه, خانه فراهم می کند [جنراتیدو4](../../sql_reference/functions/uuid_functions.md) تابع. + +## مثال طریقه استفاده {#usage-example} + +**مثال 1** + +این مثال نشان می دهد ایجاد یک جدول با ستون نوع شناسه و قرار دادن یک مقدار به جدول. + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**مثال 2** + +در این مثال مقدار ستون یوید هنگام وارد کردن یک رکورد جدید مشخص نشده است. + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## محدودیت ها {#restrictions} + +نوع داده شناسه تنها پشتیبانی از توابع که [رشته](string.md) نوع داده نیز پشتیبانی می کند (به عنوان مثال, [کمینه](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [حداکثر](../../sql_reference/aggregate_functions/reference.md#agg_function-max) و [شمارش](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). + +نوع داده یوید توسط عملیات ریاضی پشتیبانی نمی شود (به عنوان مثال, [شکم](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs)) و یا توابع دانه, مانند [جمع](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) و [میانگین](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). + +[مقاله اصلی](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts.md new file mode 100644 index 00000000000..0022f8186a6 --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u062A\u0648\u0636\u06CC\u062D\u0627\u062A \u06A9\u0644\u06CC" +--- + +# واژهنامهها خارجی {#dicts-external-dicts} + +شما می توانید لغت نامه خود را از منابع داده های مختلف اضافه کنید. منبع داده برای یک فرهنگ لغت می تواند یک متن محلی و یا فایل اجرایی, یک منبع اچتیتیپی(بازدید کنندگان), یا سندرم داون دیگر. برای کسب اطلاعات بیشتر, دیدن “[منابع لغت نامه های خارجی](external_dicts_dict_sources.md)”. + +فاحشه خانه: + +- به طور کامل و یا تا حدی فروشگاه لغت نامه در رم. +- دوره به روز رسانی لغت نامه ها و به صورت پویا بارهای ارزش از دست رفته. به عبارت دیگر, لغت نامه را می توان به صورت پویا لود. +- اجازه می دهد تا برای ایجاد لغت نامه های خارجی با فایل های میلی لیتر و یا [نمایش داده شد](../../statements/create.md#create-dictionary-query). + +پیکربندی لغت نامه های خارجی را می توان در یک یا چند میلی لیتر فایل واقع شده است. مسیر پیکربندی در مشخص [دیکشنامهای](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) پارامتر. + +واژهنامهها را می توان در هنگام راه اندازی سرور و یا در اولین استفاده لود, بسته به [\_بارگیری کامل](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) تنظیمات. + +فایل پیکربندی فرهنگ لغت دارای فرمت زیر است: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +شما می توانید [پیکربندی](external_dicts_dict.md) هر تعداد از لغت نامه ها در همان فایل. + +[نمایش داده شد دی ال برای لغت نامه](../../statements/create.md#create-dictionary-query) هیچ پرونده اضافی در پیکربندی سرور نیاز ندارد. اجازه می دهد برای کار با لغت نامه به عنوان نهادهای طبقه اول, مانند جداول و یا دیدگاه. + +!!! attention "توجه" + شما می توانید مقادیر را برای یک فرهنگ لغت کوچک با توصیف در یک تبدیل کنید `SELECT` پرسوجو (نگاه کنید به [تبدیل](../../../sql_reference/functions/other_functions.md) تابع). این قابلیت به لغت نامه های خارجی مربوط نیست. + +## همچنین نگاه کنید به {#ext-dicts-see-also} + +- [پیکربندی یک فرهنگ لغت خارجی](external_dicts_dict.md) +- [ذخیره واژهنامهها در حافظه](external_dicts_dict_layout.md) +- [به روز رسانی فرهنگ لغت](external_dicts_dict_lifetime.md) +- [منابع لغت نامه های خارجی](external_dicts_dict_sources.md) +- [کلید فرهنگ لغت و زمینه های](external_dicts_dict_structure.md) +- [توابع برای کار با لغت نامه های خارجی](../../../sql_reference/functions/ext_dict_functions.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md new file mode 100644 index 00000000000..a33d3f4c18d --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -0,0 +1,54 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u067E\u06CC\u06A9\u0631\u0628\u0646\u062F\u06CC \u06CC\u06A9 \u0641\u0631\ + \u0647\u0646\u06AF \u0644\u063A\u062A \u062E\u0627\u0631\u062C\u06CC" +--- + +# پیکربندی یک فرهنگ لغت خارجی {#dicts-external-dicts-dict} + +اگر فرهنگ لغت با استفاده از فایل میلی لیتر پیکربندی, از پیکربندی فرهنگ لغت دارای ساختار زیر: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +متناظر [توصیف](../../statements/create.md#create-dictionary-query) دارای ساختار زیر است: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [متن](external_dicts_dict_sources.md) — Source of the dictionary. +- [طرحبندی](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [ساختار](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [طول عمر](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..15120b3e8c2 --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u0644\u063A\u062A\u0646\u0627\u0645\u0647\u0647\u0627 \u0633\u0644\u0633\ + \u0644\u0647 \u0645\u0631\u0627\u062A\u0628\u06CC" +--- + +# لغتنامهها سلسله مراتبی {#hierarchical-dictionaries} + +کلیک هاوس از لغت نامه های سلسله مراتبی با یک [کلید عددی](external_dicts_dict_structure.md#ext_dict-numeric-key). + +در ساختار سلسله مراتبی زیر نگاه کنید: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +این سلسله مراتب را می توان به عنوان جدول فرهنگ لغت زیر بیان شده است. + +| \_ورود | \_ نواحی | نام \_خانوادگی | +|--------|----------|----------------| +| 1 | 0 | روسیه | +| 2 | 1 | مسکو | +| 3 | 2 | مرکز | +| 4 | 0 | بریتانیا | +| 5 | 4 | لندن | + +این جدول شامل یک ستون است `parent_region` که شامل کلید نزدیکترین پدر و مادر برای عنصر. + +تاتر از [سلسله مراتبی](external_dicts_dict_structure.md#hierarchical-dict-attr) املاک برای [فرهنگ لغت خارجی](index.md) صفات. این ویژگی اجازه می دهد تا شما را به پیکربندی فرهنگ لغت سلسله مراتبی شبیه به بالا توضیح داده شد. + +این [حکومت دیکتاتوری](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) تابع اجازه می دهد تا شما را به زنجیره پدر و مادر از یک عنصر. + +برای مثال ما ساختار فرهنگ لغت می تواند به شرح زیر است: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md new file mode 100644 index 00000000000..b1bbf407f7c --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -0,0 +1,374 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u0630\u062E\u06CC\u0631\u0647 \u0648\u0627\u0698\u0647\u0646\u0627\u0645\ + \u0647\u0647\u0627 \u062F\u0631 \u062D\u0627\u0641\u0638\u0647" +--- + +# ذخیره واژهنامهها در حافظه {#dicts-external-dicts-dict-layout} + +راه های مختلفی برای ذخیره لغت نامه ها در حافظه وجود دارد. + +ما توصیه می کنیم [تخت](#flat), [درهم](#dicts-external_dicts_dict_layout-hashed) و [\_ساخت مجتمع](#complex-key-hashed). که سرعت پردازش بهینه را فراهم می کند. + +ذخیره سازی به دلیل عملکرد بالقوه ضعیف و مشکلات در انتخاب پارامترهای مطلوب توصیه نمی شود. ادامه مطلب در بخش “[نهانگاه](#cache)”. + +راه های مختلفی برای بهبود عملکرد فرهنگ لغت وجود دارد: + +- پاسخ تابع برای کار با فرهنگ لغت پس از `GROUP BY`. +- علامت گذاری به عنوان ویژگی برای استخراج به عنوان تزریقی. یک ویژگی است که به نام عاطفی اگر مقادیر ویژگی های مختلف به کلید های مختلف مطابقت دارد. بنابراین زمانی که `GROUP BY` با استفاده از یک تابع است که بازخوانی ارزش ویژگی های کلیدی, این تابع به طور خودکار از گرفته `GROUP BY`. + +تاتر تولید یک استثنا برای خطا با لغت نامه. نمونه هایی از اشتباهات: + +- فرهنگ لغت در حال دسترسی نمی تواند لود شود. +- خطای پرسوجو یک `cached` فرهنگ لغت. + +شما می توانید لیستی از لغت نامه های خارجی و وضعیت خود را در `system.dictionaries` جدول + +پیکربندی به نظر می رسد مثل این: + +``` xml + + + ... + + + + + + ... + + +``` + +متناظر [توصیف](../../statements/create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## راه هایی برای ذخیره لغت نامه ها در حافظه {#ways-to-store-dictionaries-in-memory} + +- [تخت](#flat) +- [درهم](#dicts-external_dicts_dict_layout-hashed) +- [فشردهسازی](#dicts-external_dicts_dict_layout-sparse_hashed) +- [نهانگاه](#cache) +- [رنگها](#range-hashed) +- [\_ساخت مجتمع](#complex-key-hashed) +- [\_پیچید\_چهای پیچیده](#complex-key-cache) +- [شمال اروپا](#ip-trie) + +### تخت {#flat} + +فرهنگ لغت به طور کامل در حافظه در قالب مجموعه تخت ذخیره می شود. چقدر حافظه استفاده از فرهنگ لغت? مقدار متناسب با اندازه بزرگترین کلید است (در فضا استفاده می شود). + +کلید فرهنگ لغت است `UInt64` نوع و ارزش محدود به 500,000 است. اگر یک کلید بزرگتر کشف شده است در هنگام ایجاد فرهنگ لغت, تاتر می اندازد یک استثنا و فرهنگ لغت ایجاد کنید. + +تمام انواع منابع پشتیبانی می شوند. هنگام به روز رسانی, داده ها (از یک فایل و یا از یک جدول) در تمامیت خود را به عنوان خوانده شده. + +این روش بهترین عملکرد را در میان تمام روش های موجود ذخیره سازی فرهنگ لغت فراهم می کند. + +مثال پیکربندی: + +``` xml + + + +``` + +یا + +``` sql +LAYOUT(FLAT()) +``` + +### درهم {#dicts-external_dicts_dict_layout-hashed} + +فرهنگ لغت به طور کامل در حافظه در قالب یک جدول هش ذخیره می شود. فرهنگ لغت می تواند شامل هر تعداد از عناصر با هر شناسه در عمل تعداد کلید ده ها میلیون نفر از اقلام برسد. + +تمام انواع منابع پشتیبانی می شوند. هنگام به روز رسانی, داده ها (از یک فایل و یا از یک جدول) در تمامیت خود را به عنوان خوانده شده. + +مثال پیکربندی: + +``` xml + + + +``` + +یا + +``` sql +LAYOUT(HASHED()) +``` + +### فشردهسازی {#dicts-external_dicts_dict_layout-sparse_hashed} + +مشابه به `hashed`, اما با استفاده از حافظه کمتر به نفع استفاده از پردازنده بیشتر. + +مثال پیکربندی: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### \_ساخت مجتمع {#complex-key-hashed} + +این نوع ذخیره سازی برای استفاده با کامپوزیت است [کلید](external_dicts_dict_structure.md). مشابه به `hashed`. + +مثال پیکربندی: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### رنگها {#range-hashed} + +فرهنگ لغت در حافظه به شکل یک جدول هش با مجموعه ای مرتب از محدوده ها و مقادیر مربوطه ذخیره می شود. + +این روش ذخیره سازی کار می کند به همان شیوه به عنوان درهم و اجازه می دهد تا با استفاده از تاریخ/زمان (نوع عددی دلخواه) محدوده علاوه بر کلید. + +مثال: جدول شامل تخفیف برای هر تبلیغ در قالب: + +``` text ++---------|-------------|-------------|------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------|-------------|-------------|------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------|-------------|-------------|------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------|-------------|-------------|------+ +``` + +برای استفاده از یک نمونه برای محدوده تاریخ, تعریف `range_min` و `range_max` عناصر در [ساختار](external_dicts_dict_structure.md). این عناصر باید حاوی عناصر `name` و`type` (اگر `type` مشخص نشده است, نوع پیش فرض استفاده خواهد شد - تاریخ). `type` می تواند هر نوع عددی (تاریخ / DateTime / UInt64 / Int32 / دیگران). + +مثال: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +یا + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +برای کار با این لغت نامه, شما نیاز به تصویب یک استدلال اضافی به `dictGetT` تابع, که یک محدوده انتخاب شده است: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +این تابع ارزش برای مشخص گرداند `id`بازدید کنندگان و محدوده تاریخ که شامل تاریخ گذشت. + +اطلاعات از الگوریتم: + +- اگر `id` یافت نشد و یا یک محدوده برای یافت نشد `id` مقدار پیش فرض فرهنگ لغت را برمی گرداند. +- اگر با هم تداخل دارند محدوده وجود دارد, شما می توانید هر استفاده. +- اگر جداساز محدوده باشد `NULL` یا نامعتبر تاریخ (مانند 1900-01-01 یا 2039-01-01) طیف وسیعی است که در سمت چپ باز است. محدوده را می توان در هر دو طرف باز کرد. + +مثال پیکربندی: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +یا + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### نهانگاه {#cache} + +فرهنگ لغت در کش است که تعداد ثابتی از سلول های ذخیره می شود. این سلول ها حاوی عناصر اغلب استفاده می شود. + +هنگام جستجو برای یک فرهنگ لغت کش اول جستجو می شود. برای هر بلوک از داده ها, تمام کلید هایی که در کش یافت نشد و یا منسوخ شده از منبع با استفاده از درخواست `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. داده های دریافت شده است و سپس به کش نوشته شده است. + +برای لغت نامه کش, انقضا [طول عمر](external_dicts_dict_lifetime.md) از داده ها در کش را می توان تنظیم کرد. اگر زمان بیشتری از `lifetime` از زمان بارگذاری داده ها در یک سلول گذشت, ارزش سلول استفاده نمی شود, و دوباره درخواست دفعه بعد که نیاز به استفاده می شود. +این حداقل موثر از تمام راه هایی برای ذخیره لغت نامه است. سرعت کش به شدت در تنظیمات صحیح و سناریوی استفاده بستگی دارد. فرهنگ لغت نوع کش به خوبی انجام تنها زمانی که نرخ ضربه به اندازه کافی بالا هستند (توصیه می شود 99% و بالاتر). شما می توانید میزان ضربه به طور متوسط در مشاهده `system.dictionaries` جدول + +برای بهبود عملکرد کش, استفاده از یک خرده فروشی با `LIMIT`, و پاسخ تابع با فرهنگ لغت خارجی. + +پشتیبانی [منابع](external_dicts_dict_sources.md) پردازشگر پشتیبانی شده: + +مثال تنظیمات: + +``` xml + + + + 1000000000 + + +``` + +یا + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +تنظیم اندازه کش به اندازه کافی بزرگ است. شما نیاز به تجربه برای انتخاب تعدادی از سلول های: + +1. تنظیم برخی از ارزش. +2. نمایش داده شد اجرا تا کش کاملا کامل است. +3. ارزیابی مصرف حافظه با استفاده از `system.dictionaries` جدول +4. افزایش یا کاهش تعداد سلول ها تا زمانی که مصرف حافظه مورد نیاز رسیده است. + +!!! warning "اخطار" + هنوز تاتر به عنوان یک منبع استفاده نمی, چرا که کند است برای پردازش نمایش داده شد با تصادفی می خواند. + +### \_پیچید\_چهای پیچیده {#complex-key-cache} + +این نوع ذخیره سازی برای استفاده با کامپوزیت است [کلید](external_dicts_dict_structure.md). مشابه به `cache`. + +### شمال اروپا {#ip-trie} + +این نوع ذخیره سازی برای پیشوندهای نقشه برداری شبکه (نشانی های اینترنتی) به فراداده مانند ان است. + +مثال: جدول شامل پیشوندهای شبکه و مربوط به خود را به عنوان شماره و کد کشور: + +``` text + +-----------|-----|------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------|-----|------+ + | 2620:0:870::/48 | 3856 | US | + +-----------|-----|------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------|-----|------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------|-----|------+ +``` + +هنگام استفاده از این نوع طرح, ساختار باید یک کلید کامپوزیت دارند. + +مثال: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +یا + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +کلید باید تنها یک ویژگی نوع رشته ای داشته باشد که شامل یک پیشوند مجاز است. انواع دیگر هنوز پشتیبانی نمی شوند. + +برای نمایش داده شد, شما باید توابع مشابه استفاده کنید (`dictGetT` با یک تاپل) به لغت نامه ها با کلید های ترکیبی: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +تابع طول می کشد یا `UInt32` برای ایپو4 یا `FixedString(16)` برای IPv6: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +انواع دیگر هنوز پشتیبانی نمی شوند. تابع ویژگی برای پیشوند که مربوط به این نشانی اینترنتی را برمی گرداند. اگر پیشوند با هم تداخل دارند وجود دارد, یکی از خاص ترین بازگشته است. + +داده ها در یک ذخیره می شود `trie`. این به طور کامل باید به رم مناسب. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..cb315313a24 --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u0628\u0647 \u0631\u0648\u0632 \u0631\u0633\u0627\u0646\u06CC \u0641\u0631\ + \u0647\u0646\u06AF \u0644\u063A\u062A" +--- + +# به روز رسانی فرهنگ لغت {#dictionary-updates} + +خانه کلیک دوره به روز رسانی لغت نامه. فاصله به روز رسانی برای لغت نامه به طور کامل دانلود و فاصله ابطال لغت نامه کش در تعریف `` برچسب در ثانیه. + +به روز رسانی فرهنگ لغت (به غیر از بارگذاری برای استفاده اول) نمایش داده شد را مسدود کند. در طول به روز رسانی, نسخه های قدیمی از یک فرهنگ لغت استفاده شده است. اگر یک خطا در طول به روز رسانی رخ می دهد, خطا به ورود به سیستم سرور نوشته شده, و نمایش داده شد ادامه استفاده از نسخه های قدیمی از لغت نامه. + +مثال تنظیمات: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +تنظیم `0` (`LIFETIME(0)`) جلوگیری از لغت نامه از به روز رسانی . + +شما می توانید یک بازه زمانی برای ارتقا تنظیم, و تاتر یک زمان یکنواخت تصادفی در این محدوده را انتخاب کنید. این به منظور توزیع بار بر روی منبع فرهنگ لغت در هنگام به روز رسانی در تعداد زیادی از سرور لازم است. + +مثال تنظیمات: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +یا + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +هنگام به روز رسانی لغت نامه, سرور کلیک اعمال منطق مختلف بسته به نوع [متن](external_dicts_dict_sources.md): + +- برای یک فایل متنی زمان اصلاح را بررسی می کند. اگر زمان از زمان قبلا ثبت شده متفاوت, فرهنگ لغت به روز شده است. +- برای جداول میثم, زمان اصلاح بررسی می شود با استفاده از یک `SHOW TABLE STATUS` پرس و جو. +- واژهنامهها از منابع دیگر در هر زمان به طور پیش فرض به روز شد. + +برای خروجی زیر (دیگر), ان بی سی و منابع فاحشه خانه, شما می توانید راه اندازی یک پرس و جو است که لغت نامه تنها در صورتی که واقعا تغییر به روز رسانی, به جای هر زمان. برای انجام این کار این مراحل را دنبال کنید: + +- جدول فرهنگ لغت باید یک میدان است که همیشه تغییر زمانی که داده های منبع به روز شده است. +- تنظیمات منبع باید پرس و جو که بازیابی زمینه در حال تغییر را مشخص کنید. سرور کلیک تفسیر نتیجه پرس و جو به عنوان یک ردیف, و اگر این ردیف نسبت به حالت قبلی خود تغییر کرده است, فرهنگ لغت به روز شده است. مشخص کردن پرسوجو در `` درست در تنظیمات برای [متن](external_dicts_dict_sources.md). + +مثال تنظیمات: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +یا + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/fa/query_language/dicts/external_dicts_dict_sources.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md similarity index 60% rename from docs/fa/query_language/dicts/external_dicts_dict_sources.md rename to docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 37d050a8e72..398e4ceab39 100644 --- a/docs/fa/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -1,12 +1,16 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u0645\u0646\u0627\u0628\u0639 \u0644\u063A\u062A \u0646\u0627\u0645\u0647\ + \ \u0647\u0627\u06CC \u062E\u0627\u0631\u062C\u06CC" --- -# Sources of External Dictionaries {#dicts-external-dicts-dict-sources} +# منابع لغت نامه های خارجی {#dicts-external-dicts-dict-sources} -An external dictionary can be connected from many different sources. +فرهنگ لغت خارجی را می توان از بسیاری از منابع مختلف متصل می شود. -If dictionary is configured using xml-file, the configuration looks like this: +اگر فرهنگ لغت پیکربندی شده است با استفاده از فایل های فشرده, پیکربندی به نظر می رسد مثل این: ``` xml @@ -23,7 +27,7 @@ If dictionary is configured using xml-file, the configuration looks like this: ``` -In case of [DDL-query](../create.md#create-dictionary-query), equal configuration will looks like: +در صورت [توصیف](../../statements/create.md#create-dictionary-query), پیکربندی برابر خواهد شد مانند به نظر می رسد: ``` sql CREATE DICTIONARY dict_name (...) @@ -32,23 +36,23 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration ... ``` -The source is configured in the `source` section. +منبع در پیکربندی `source` بخش. -Types of sources (`source_type`): +انواع منابع (`source_type`): -- [Local file](#dicts-external_dicts_dict_sources-local_file) -- [Executable file](#dicts-external_dicts_dict_sources-executable) +- [پرونده محلی](#dicts-external_dicts_dict_sources-local_file) +- [پرونده اجرایی](#dicts-external_dicts_dict_sources-executable) - [HTTP(s)](#dicts-external_dicts_dict_sources-http) - DBMS - [ODBC](#dicts-external_dicts_dict_sources-odbc) - [MySQL](#dicts-external_dicts_dict_sources-mysql) - - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - - [Redis](#dicts-external_dicts_dict_sources-redis) + - [فاحشه خانه](#dicts-external_dicts_dict_sources-clickhouse) + - [مانگودیبی](#dicts-external_dicts_dict_sources-mongodb) + - [ردیس](#dicts-external_dicts_dict_sources-redis) -## Local File {#dicts-external_dicts_dict_sources-local_file} +## پرونده محلی {#dicts-external_dicts_dict_sources-local_file} -Example of settings: +مثال تنظیمات: ``` xml @@ -59,22 +63,22 @@ Example of settings: ``` -or +یا ``` sql SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) ``` -Setting fields: +تنظیم فیلدها: - `path` – The absolute path to the file. -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[فرشها](../../../interfaces/formats.md#formats)” پشتیبانی می شوند. -## Executable File {#dicts-external_dicts_dict_sources-executable} +## پرونده اجرایی {#dicts-external_dicts_dict_sources-executable} -Working with executable files depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data. +کار با فایل های اجرایی بستگی دارد [چگونه فرهنگ لغت در حافظه ذخیره می شود](external_dicts_dict_layout.md). اگر فرهنگ لغت با استفاده از ذخیره می شود `cache` و `complex_key_cache` کلیک هاوس کلید های لازم را با ارسال درخواست به فایل اجرایی درخواست می کند. در غیر این صورت, تاتر شروع می شود فایل اجرایی و خروجی خود را به عنوان داده فرهنگ لغت رفتار. -Example of settings: +مثال تنظیمات: ``` xml @@ -85,22 +89,22 @@ Example of settings: ``` -or +یا ``` sql SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) ``` -Setting fields: +تنظیم فیلدها: - `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[فرشها](../../../interfaces/formats.md#formats)” پشتیبانی می شوند. -## HTTP(s) {#dicts-external_dicts_dict_sources-http} +## قام) {#dicts-external_dicts_dict_sources-http} -Working with an HTTP(s) server depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. +کار با سرور اچ تی پی بستگی دارد [چگونه فرهنگ لغت در حافظه ذخیره می شود](external_dicts_dict_layout.md). اگر فرهنگ لغت با استفاده از ذخیره می شود `cache` و `complex_key_cache`, کلیک درخواست کلید های لازم با ارسال یک درخواست از طریق `POST` روش. -Example of settings: +مثال تنظیمات: ``` xml @@ -121,7 +125,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(HTTP( @@ -132,12 +136,12 @@ SOURCE(HTTP( )) ``` -In order for ClickHouse to access an HTTPS resource, you must [configure openSSL](../../operations/server_settings/settings.md#server_settings-openssl) in the server configuration. +برای دسترسی به یک منبع اچ تی پی باید از اینجا کلیک کنید [پیکربندی اپنسسل](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) در پیکربندی سرور. -Setting fields: +تنظیم فیلدها: - `url` – The source URL. -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[فرشها](../../../interfaces/formats.md#formats)” پشتیبانی می شوند. - `credentials` – Basic HTTP authentication. Optional parameter. - `user` – Username required for the authentication. - `password` – Password required for the authentication. @@ -148,9 +152,9 @@ Setting fields: ## ODBC {#dicts-external_dicts_dict_sources-odbc} -You can use this method to connect any database that has an ODBC driver. +شما می توانید از این روش برای اتصال هر پایگاه داده است که یک راننده بی سی استفاده کنید. -Example of settings: +مثال تنظیمات: ``` xml @@ -163,7 +167,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(ODBC( @@ -174,25 +178,25 @@ SOURCE(ODBC( )) ``` -Setting fields: +تنظیم فیلدها: -- `db` – Name of the database. Omit it if the database name is set in the `` parameters. +- `db` – Name of the database. Omit it if the database name is set in the `` پارامترها - `table` – Name of the table and schema if exists. - `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [بهروزرسانی واژهنامهها](external_dicts_dict_lifetime.md). -ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. +تاتر دریافت به نقل از علامت از او بی سی راننده و نقل قول تمام تنظیمات در نمایش داده شد به راننده, بنابراین لازم است به مجموعه ای از نام جدول بر این اساس به نام جدول مورد در پایگاه داده. -If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../faq/general.md#oracle-odbc-encodings) article. +اگر شما یک مشکل با کدگذاریها در هنگام استفاده از اوراکل, دیدن مربوطه [FAQ](../../../faq/general.md#oracle-odbc-encodings) مقاله. -### Known vulnerability of the ODBC dictionary functionality {#known-vulnerability-of-the-odbc-dictionary-functionality} +### قابلیت پذیری شناخته شده از قابلیت او بی سی فرهنگ لغت {#known-vulnerability-of-the-odbc-dictionary-functionality} -!!! attention "Attention" - When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. +!!! attention "توجه" + هنگام اتصال به پایگاه داده از طریق پارامتر اتصال درایور او بی سی `Servername` می تواند جایگزین شود. در این مورد ارزش `USERNAME` و `PASSWORD` از `odbc.ini` به سرور از راه دور ارسال می شود و می تواند به خطر بیافتد. -**Example of insecure use** +**نمونه ای از استفاده نا امن** -Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: +اجازه می دهد تا پیکربندی unixodbc برای postgresql. محتوای `/etc/odbc.ini`: ``` text [gregtest] @@ -205,25 +209,25 @@ USERNAME = test PASSWORD = test ``` -If you then make a query such as +اگر شما پس از ایجاد یک پرس و جو مانند ``` sql SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); ``` -ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. +درایور او بی سی خواهد ارزش ارسال `USERNAME` و `PASSWORD` از `odbc.ini` به `some-server.com`. -### Example of Connecting PostgreSQL {#example-of-connecting-postgresql} +### به عنوان مثال از اتصال شل {#example-of-connecting-postgresql} -Ubuntu OS. +سیستم عامل اوبونتو. -Installing unixODBC and the ODBC driver for PostgreSQL: +نصب unixodbc و odbc driver for postgresql: ``` bash $ sudo apt-get install -y unixodbc odbcinst odbc-postgresql ``` -Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): +پیکربندی `/etc/odbc.ini` (یا `~/.odbc.ini`): ``` text [DEFAULT] @@ -244,7 +248,7 @@ Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): ConnSettings = ``` -The dictionary configuration in ClickHouse: +پیکربندی فرهنگ لغت در کلیک: ``` xml @@ -279,7 +283,7 @@ The dictionary configuration in ClickHouse: ``` -or +یا ``` sql CREATE DICTIONARY table_name ( @@ -292,19 +296,19 @@ LAYOUT(HASHED()) LIFETIME(MIN 300 MAX 360) ``` -You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. +شما ممکن است نیاز به ویرایش `odbc.ini` برای مشخص کردن مسیر کامل به کتابخانه با راننده `DRIVER=/usr/local/lib/psqlodbcw.so`. -### Example of Connecting MS SQL Server {#example-of-connecting-ms-sql-server} +### به عنوان مثال اتصال سرور کارشناسی ارشد گذاشتن {#example-of-connecting-ms-sql-server} -Ubuntu OS. +سیستم عامل اوبونتو. -Installing the driver: : +نصب درایور: : ``` bash $ sudo apt-get install tdsodbc freetds-bin sqsh ``` -Configuring the driver: +پیکربندی راننده: ``` bash $ cat /etc/freetds/freetds.conf @@ -339,7 +343,7 @@ Configuring the driver: Port = 1433 ``` -Configuring the dictionary in ClickHouse: +پیکربندی فرهنگ لغت در کلیک: ``` xml @@ -375,7 +379,7 @@ Configuring the dictionary in ClickHouse: ``` -or +یا ``` sql CREATE DICTIONARY test ( @@ -390,9 +394,9 @@ LIFETIME(MIN 300 MAX 360) ## DBMS {#dbms} -### MySQL {#dicts-external_dicts_dict_sources-mysql} +### Mysql {#dicts-external_dicts_dict_sources-mysql} -Example of settings: +مثال تنظیمات: ``` xml @@ -416,7 +420,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(MYSQL( @@ -432,7 +436,7 @@ SOURCE(MYSQL( )) ``` -Setting fields: +تنظیم فیلدها: - `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). @@ -449,13 +453,13 @@ Setting fields: - `table` – Name of the table. -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` بند در خروجی زیر, مثلا, `id > 10 AND id < 20`. پارامتر اختیاری. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [بهروزرسانی واژهنامهها](external_dicts_dict_lifetime.md). -MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. +خروجی زیر را می توان در یک میزبان محلی از طریق سوکت متصل. برای انجام این کار, تنظیم `host` و `socket`. -Example of settings: +مثال تنظیمات: ``` xml @@ -472,7 +476,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(MYSQL( @@ -487,9 +491,9 @@ SOURCE(MYSQL( )) ``` -### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} +### فاحشه خانه {#dicts-external_dicts_dict_sources-clickhouse} -Example of settings: +مثال تنظیمات: ``` xml @@ -505,7 +509,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(CLICKHOUSE( @@ -519,20 +523,20 @@ SOURCE(CLICKHOUSE( )) ``` -Setting fields: +تنظیم فیلدها: -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distributed](../../operations/table_engines/distributed.md) table and enter it in subsequent configurations. +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [توزیع شده](../../../engines/table_engines/special/distributed.md) جدول و در تنظیمات بعدی وارد کنید. - `port` – The port on the ClickHouse server. - `user` – Name of the ClickHouse user. - `password` – Password of the ClickHouse user. - `db` – Name of the database. - `table` – Name of the table. - `where` – The selection criteria. May be omitted. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [بهروزرسانی واژهنامهها](external_dicts_dict_lifetime.md). -### MongoDB {#dicts-external_dicts_dict_sources-mongodb} +### مانگودیبی {#dicts-external_dicts_dict_sources-mongodb} -Example of settings: +مثال تنظیمات: ``` xml @@ -547,7 +551,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(MONGO( @@ -560,7 +564,7 @@ SOURCE(MONGO( )) ``` -Setting fields: +تنظیم فیلدها: - `host` – The MongoDB host. - `port` – The port on the MongoDB server. @@ -569,9 +573,9 @@ Setting fields: - `db` – Name of the database. - `collection` – Name of the collection. -### Redis {#dicts-external_dicts_dict_sources-redis} +### ردیس {#dicts-external_dicts_dict_sources-redis} -Example of settings: +مثال تنظیمات: ``` xml @@ -584,7 +588,7 @@ Example of settings: ``` -or +یا ``` sql SOURCE(REDIS( @@ -595,11 +599,11 @@ SOURCE(REDIS( )) ``` -Setting fields: +تنظیم فیلدها: - `host` – The Redis host. - `port` – The port on the Redis server. -- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` برای منابع ساده و برای منابع تک کلیدی درهم, `hash_map` برای منابع درهم با دو کلید. منابع در بازه زمانی و منابع کش با کلید پیچیده پشتیبانی نشده است. ممکن است حذف شود, مقدار پیش فرض است `simple`. - `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md new file mode 100644 index 00000000000..914e7968e5c --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u06A9\u0644\u06CC\u062F \u0641\u0631\u0647\u0646\u06AF \u0644\u063A\u062A\ + \ \u0648 \u0632\u0645\u06CC\u0646\u0647 \u0647\u0627\u06CC" +--- + +# کلید فرهنگ لغت و زمینه های {#dictionary-key-and-fields} + +این `` بند توصیف کلید فرهنگ لغت و زمینه های موجود برای نمایش داده شد. + +توصیف: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +صفات در عناصر شرح داده شده است: + +- `` — [ستون کلید](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [ستون داده](external_dicts_dict_structure.md#ext_dict_structure-attributes). می تواند تعدادی از ویژگی های وجود دارد. + +پرسوجو: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +صفات در بدن پرس و جو توصیف: + +- `PRIMARY KEY` — [ستون کلید](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [ستون داده](external_dicts_dict_structure.md#ext_dict_structure-attributes). می تواند تعدادی از ویژگی های وجود دارد. + +## کلید {#ext_dict_structure-key} + +تاتر از انواع زیر از کلید: + +- کلید عددی. `UInt64`. تعریف شده در `` برچسب یا استفاده `PRIMARY KEY` کلمه کلیدی. +- کلید کامپوزیت. مجموعه ای از مقادیر از انواع مختلف. تعریف شده در برچسب `` یا `PRIMARY KEY` کلمه کلیدی. + +یک ساختار میلی لیتر می تواند شامل موارد زیر باشد `` یا ``. دی ال پرس و جو باید شامل تک `PRIMARY KEY`. + +!!! warning "اخطار" + شما باید کلید به عنوان یک ویژگی توصیف نیست. + +### کلید عددی {#ext_dict-numeric-key} + +نوع: `UInt64`. + +مثال پیکربندی: + +``` xml + + Id + +``` + +حوزههای پیکربندی: + +- `name` – The name of the column with keys. + +برای & پرسوجو: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### کلید کامپوزیت {#composite-key} + +کلید می تواند یک `tuple` از هر نوع زمینه. این [طرحبندی](external_dicts_dict_layout.md) در این مورد باید باشد `complex_key_hashed` یا `complex_key_cache`. + +!!! tip "نکته" + کلید کامپوزیت می تواند از یک عنصر واحد تشکیل شده است. این امکان استفاده از یک رشته به عنوان کلید, برای مثال. + +ساختار کلیدی در عنصر تنظیم شده است ``. زمینه های کلیدی در قالب همان فرهنگ لغت مشخص شده است [خصیصهها](external_dicts_dict_structure.md). مثال: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +یا + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +برای پرس و جو به `dictGet*` تابع, یک تاپل به عنوان کلید به تصویب رسید. مثال: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## خصیصهها {#ext_dict_structure-attributes} + +مثال پیکربندی: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +یا + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +حوزههای پیکربندی: + +| برچسب | توصیف | مورد نیاز | +|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| +| `name` | نام ستون. | بله | +| `type` | نوع داده کلیک.
    تاتر تلاش می کند به بازیگران ارزش از فرهنگ لغت به نوع داده مشخص شده است. مثلا, برای خروجی زیر, زمینه ممکن است `TEXT`, `VARCHAR` یا `BLOB` در جدول منبع خروجی زیر, اما می تواند به عنوان ارسال `String` در فاحشه خانه.
    [Nullable](../../../sql_reference/data_types/nullable.md) پشتیبانی نمی شود. | بله | +| `null_value` | مقدار پیش فرض برای یک عنصر غیر موجود.
    در مثال این یک رشته خالی است. شما نمی توانید استفاده کنید `NULL` در این زمینه. | بله | +| `expression` | [عبارت](../../syntax.md#syntax-expressions) که فاحشه خانه اجرا در ارزش.
    بیان می تواند یک نام ستون در پایگاه داده از راه دور گذاشتن. بدین ترتیب, شما می توانید برای ایجاد یک نام مستعار برای ستون از راه دور استفاده.

    مقدار پیش فرض: بدون بیان. | نه | +| `hierarchical` | اگر `true`, ویژگی شامل ارزش یک کلید پدر و مادر برای کلید فعلی. ببینید [لغتنامهها سلسله مراتبی](external_dicts_dict_hierarchical.md).

    مقدار پیشفرض: `false`. | نه | +| `injective` | پرچمی که نشان میدهد چه `id -> attribute` تصویر [تزریق](https://en.wikipedia.org/wiki/Injective_function).
    اگر `true`, کلیک خانه به طور خودکار می تواند پس از محل `GROUP BY` بند درخواست به لغت نامه با تزریق. معمولا به طور قابل توجهی میزان چنین درخواست را کاهش می دهد.

    مقدار پیشفرض: `false`. | نه | +| `is_object_id` | پرچمی که نشان میدهد پرسوجو برای سند مانگودیبی اجرا شده است `ObjectID`.

    مقدار پیشفرض: `false`. | نه | + +## همچنین نگاه کنید به {#see-also} + +- [توابع برای کار با لغت نامه های خارجی](../../../sql_reference/functions/ext_dict_functions.md). + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/fa/sql_reference/dictionaries/external_dictionaries/index.md b/docs/fa/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..b5c506f5d93 --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: External Dictionaries +toc_priority: 37 +--- + + diff --git a/docs/fa/sql_reference/dictionaries/index.md b/docs/fa/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..004dfa7718a --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Dictionaries +toc_priority: 35 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# واژهنامهها {#dictionaries} + +فرهنگ لغت نقشه برداری است (`key -> attributes`) که مناسب برای انواع مختلفی از لیست مرجع است. + +تاتر پشتیبانی از توابع خاص برای کار با لغت نامه است که می تواند در نمایش داده شد استفاده می شود. این ساده تر و موثر تر به استفاده از لغت نامه ها با توابع از یک است `JOIN` با جداول مرجع. + +[NULL](../syntax.md#null) ارزش ها را نمی توان در یک فرهنگ لغت ذخیره کرد. + +پشتیبانی از کلیک: + +- [ساخته شده در لغت نامه](internal_dicts.md#internal_dicts) با یک خاص [مجموعه ای از توابع](../../sql_reference/functions/ym_dict_functions.md). +- [افزونه لغت نامه (خارجی) ](external_dictionaries/external_dicts.md) با یک [خالص توابع](../../sql_reference/functions/ext_dict_functions.md). + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/fa/sql_reference/dictionaries/internal_dicts.md b/docs/fa/sql_reference/dictionaries/internal_dicts.md new file mode 100644 index 00000000000..1b297c22a5a --- /dev/null +++ b/docs/fa/sql_reference/dictionaries/internal_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u0648\u0627\u0698\u0647\u0646\u0627\u0645\u0647\u0647\u0627 \u062F\u0627\ + \u062E\u0644\u06CC" +--- + +# واژهنامهها داخلی {#internal_dicts} + +ClickHouse شامل ساخته شده است در ویژگی برای کار با یک geobase. + +این اجازه می دهد تا شما را به: + +- استفاده از شناسه یک منطقه به نام خود را در زبان مورد نظر. +- استفاده از یک منطقه شناسه برای دریافت شناسه شهر منطقه فدرال منطقه کشور یا قاره. +- بررسی کنید که یک منطقه بخشی از یک منطقه دیگر است. +- دریافت زنجیره ای از مناطق پدر و مادر. + +تمام توابع پشتیبانی “translocality,” توانایی به طور همزمان استفاده از دیدگاه های مختلف در مالکیت منطقه. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “Functions for working with Yandex.Metrica dictionaries”. + +واژهنامهها داخلی در بسته به طور پیش فرض غیر فعال. +برای فعال کردن پارامترها `path_to_regions_hierarchy_file` و `path_to_regions_names_files` در فایل پیکربندی سرور. + +پایگاه داده از فایل های متنی لود می شود. + +محل `regions_hierarchy*.txt` فایل ها به `path_to_regions_hierarchy_file` فهرست راهنما. این پارامتر پیکربندی باید مسیر را به `regions_hierarchy.txt` فایل (سلسله مراتب منطقه ای پیش فرض) و فایل های دیگر (`regions_hierarchy_ua.txt`) باید در همان دایرکتوری واقع شده است. + +قرار دادن `regions_names_*.txt` فایل ها در `path_to_regions_names_files` فهرست راهنما. + +شما همچنین می توانید این فایل ها خود را ایجاد کنید. فرمت فایل به شرح زیر است: + +`regions_hierarchy*.txt`: ستون (بدون هدر): + +- شناسه منطقه (`UInt32`) +- شناسه منطقه والد (`UInt32`) +- نوع منطقه (`UInt8`): 1 - قاره, 3 - کشور, 4 - منطقه فدرال, 5 - منطقه, 6-شهرستان; انواع دیگر ارزش ندارد +- جمعیت (`UInt32`) — optional column + +`regions_names_*.txt`: ستون (بدون هدر): + +- شناسه منطقه (`UInt32`) +- نام منطقه (`String`) — Can't contain tabs or line feeds, even escaped ones. + +مجموعه تخت برای ذخیره سازی در رم استفاده می شود. به همین دلیل شناسه نباید بیش از یک میلیون. + +واژهنامهها را می توان بدون راه اندازی مجدد سرور به روز شد. با این حال, مجموعه ای از لغت نامه های موجود به روز نمی. +برای به روز رسانی بار اصلاح فایل بررسی می شود. اگر یک فایل تغییر کرده است, فرهنگ لغت به روز شده است. +فاصله برای بررسی تغییرات در پیکربندی `builtin_dictionaries_reload_interval` پارامتر. +به روز رسانی فرهنگ لغت (به غیر از بارگذاری در اولین استفاده) نمایش داده شد را مسدود کند. در طول به روز رسانی, نمایش داده شد با استفاده از نسخه های قدیمی از لغت نامه. اگر یک خطا در طول به روز رسانی رخ می دهد, خطا به ورود به سیستم سرور نوشته شده, و نمایش داده شد ادامه استفاده از نسخه های قدیمی از لغت نامه. + +ما توصیه می کنیم دوره به روز رسانی لغت نامه با پایگاه داده. در طول به روز رسانی, تولید فایل های جدید و ارسال به یک مکان جداگانه. وقتی همه چیز اماده است فایل های مورد استفاده توسط سرور را تغییر دهید. + +همچنین توابع برای کار با شناسه های سیستم عامل و یاندکس وجود دارد.موتورهای جستجو متریکا, اما نباید استفاده شود. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/fa/sql_reference/functions/arithmetic_functions.md b/docs/fa/sql_reference/functions/arithmetic_functions.md new file mode 100644 index 00000000000..3d5af8b3536 --- /dev/null +++ b/docs/fa/sql_reference/functions/arithmetic_functions.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\u062D\u0633\u0627\u0628" +--- + +# توابع ریاضی {#arithmetic-functions} + +برای تمام توابع ریاضی, نوع نتیجه به عنوان کوچکترین نوع شماره که در نتیجه متناسب با محاسبه, اگر چنین نوع وجود دارد. حداقل به طور همزمان بر اساس تعداد بیت ها امضا شده است یا شناور است. اگر بیت به اندازه کافی وجود ندارد, بالاترین نوع بیت گرفته شده است. + +مثال: + +``` sql +SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) +``` + +``` text +┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ +│ UInt8 │ UInt16 │ UInt32 │ UInt64 │ +└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ +``` + +حساب توابع کار برای هر جفت از انواع از uint8, uint16, uint32, uint64, int8, int16, int32, int64, float32 یا float64. + +سرریز به همان شیوه که در ج تولید++. + +## به علاوه (یک, ب), + اپراتور ب {#plusa-b-a-b-operator} + +محاسبه مجموع اعداد. +شما همچنین می توانید اعداد صحیح را با یک تاریخ یا تاریخ و زمان اضافه کنید. در مورد یک تاریخ, اضافه کردن یک عدد صحیح به معنی اضافه کردن تعداد مربوط به روز. برای تاریخ با زمان, به این معنی اضافه کردن شماره مربوطه را از ثانیه. + +## منفی (یک, ب), اپراتور الف ب {#minusa-b-a-b-operator} + +محاسبه تفاوت. نتیجه همیشه امضا شده است. + +You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. + +## ضرب (ب) اپراتور \* ب {#multiplya-b-a-b-operator} + +محاسبه محصول از اعداد. + +## تقسیم کردن (یک, ب), یک / اپراتور ب {#dividea-b-a-b-operator} + +محاسبه خارج قسمت از اعداد. نوع نتیجه همیشه یک نوع شناور است. +این تقسیم عدد صحیح نیست. برای تقسیم عدد صحیح, استفاده از ‘intDiv’ تابع. +هنگامی که تقسیم بر صفر می کنید ‘inf’, ‘-inf’ یا ‘nan’. + +## اینترنت) {#intdiva-b} + +محاسبه خارج قسمت از اعداد. تقسیم به اعداد صحیح, گرد کردن پایین (با ارزش مطلق). +یک استثنا در هنگام تقسیم بر صفر یا زمانی که تقسیم یک عدد منفی حداقل توسط منهای یک پرتاب می شود. + +## intDivOrZero(a, b) {#intdivorzeroa-b} + +متفاوت از ‘intDiv’ در این بازده صفر در هنگام تقسیم صفر و یا زمانی که تقسیم یک عدد منفی حداقل منهای یک. + +## مودولو (و, ب), یک % اپراتور ب {#moduloa-b-a-b-operator} + +محاسبه باقی مانده پس از تقسیم. +اگر استدلال اعداد ممیز شناور هستند قبل از تبدیل به اعداد صحیح با حذف بخش اعشاری هستند. +باقی مانده است به همان معنا که در ج گرفته++. تقسیم کوتاه برای اعداد منفی استفاده می شود. +یک استثنا در هنگام تقسیم بر صفر یا زمانی که تقسیم یک عدد منفی حداقل توسط منهای یک پرتاب می شود. + +## moduloOrZero(a, b) {#moduloorzeroa-b} + +متفاوت از ‘modulo’ در که باز می گردد صفر زمانی که مقسوم علیه صفر است. + +## نفی (),- اپراتور {#negatea-a-operator} + +محاسبه یک عدد با علامت معکوس. نتیجه همیشه امضا شده است. + +## شکم(یک) {#arithm_func-abs} + +محاسبه ارزش مطلق تعداد (). به این معنا که, اگر یک \< 0, باز می گردد یک. برای انواع عدد صحیح امضا, این برمی گرداند یک عدد بدون علامت. + +## گسیدی(یک, ب) {#gcda-b} + +بازگرداندن بزرگترین مقسوم علیه مشترک از اعداد. +یک استثنا در هنگام تقسیم بر صفر یا زمانی که تقسیم یک عدد منفی حداقل توسط منهای یک پرتاب می شود. + +## اندازه) {#lcma-b} + +بازگرداندن چند حداقل مشترک از اعداد. +یک استثنا در هنگام تقسیم بر صفر یا زمانی که تقسیم یک عدد منفی حداقل توسط منهای یک پرتاب می شود. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/fa/sql_reference/functions/array_functions.md b/docs/fa/sql_reference/functions/array_functions.md new file mode 100644 index 00000000000..76064d612a8 --- /dev/null +++ b/docs/fa/sql_reference/functions/array_functions.md @@ -0,0 +1,1057 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0627\u0631\u0631\u06CC\u0633" +--- + +# توابع برای کار با ارریس {#functions-for-working-with-arrays} + +## خالی {#function-empty} + +بازده 1 برای یک مجموعه خالی, یا 0 برای یک مجموعه غیر خالی. +نتیجه این نوع uint8. +این تابع نیز برای رشته کار می کند. + +## notEmpty {#function-notempty} + +بازده 0 برای یک مجموعه خالی, یا 1 برای یک مجموعه غیر خالی. +نتیجه این نوع uint8. +این تابع نیز برای رشته کار می کند. + +## طول {#array_functions-length} + +بازگرداندن تعداد اقلام در مجموعه. +نتیجه این نوع uint64. +این تابع نیز برای رشته کار می کند. + +## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} + +## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} + +## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} + +## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} + +## تخت خواب {#emptyarraystring} + +قبول صفر استدلال و مجموعه ای خالی از نوع مناسب را برمی گرداند. + +## خالی {#emptyarraytosingle} + +یک مجموعه خالی را می پذیرد و یک مجموعه یک عنصر را که برابر با مقدار پیش فرض است باز می گرداند. + +## محدوده( پایان), دامنه (شروع, پایان \[, گام\]) {#rangeend-rangestart-end-step} + +بازگرداندن مجموعه ای از اعداد از ابتدا تا انتها-1 به گام. +اگر استدلال `start` مشخص نشده است, به طور پیش فرض به 0. +اگر استدلال `step` مشخص نشده است, به طور پیش فرض به 1. +این رفتار تقریبا مانند پیتون `range`. اما تفاوت این است که همه نوع استدلال باید باشد `UInt` اعداد. +فقط در مورد, یک استثنا پرتاب می شود اگر ارریس با طول کل بیش از 100,000,000 عناصر در یک بلوک داده ها ایجاد. + +## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} + +ایجاد مجموعه ای از استدلال تابع. +استدلال باید ثابت باشد و انواع که کوچکترین نوع رایج. حداقل یک استدلال باید تصویب شود, چرا که در غیر این صورت مشخص نیست که چه نوع از مجموعه ای برای ایجاد. به این معنا که شما نمی توانید از این تابع برای ایجاد یک مجموعه خالی استفاده کنید (برای انجام این کار از ‘emptyArray\*’ تابع در بالا توضیح داده شد). +بازگشت یک ‘Array(T)’ نوع نتیجه, جایی که ‘T’ کوچکترین نوع رایج از استدلال گذشت. + +## موافقم {#arrayconcat} + +ترکیبی از ارریس به عنوان استدلال گذشت. + +``` sql +arrayConcat(arrays) +``` + +**پارامترها** + +- `arrays` – Arbitrary number of arguments of [& حذف](../../sql_reference/data_types/array.md) نوع. + **مثال** + + + +``` sql +SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,3,4,5,6] │ +└───────────────┘ +``` + +## هشدار داده می شود\] {#arrayelementarr-n-operator-arrn} + +عنصر را با شاخص دریافت کنید `n` از مجموعه `arr`. `n` باید هر نوع عدد صحیح باشد. +شاخص ها در مجموعه ای از یک شروع می شوند. +شاخص های منفی پشتیبانی می شوند. در این مورد آن را انتخاب می کند که عنصر مربوطه شماره از پایان. به عنوان مثال, `arr[-1]` اخرین وسیله ست + +اگر شاخص می افتد در خارج از مرزهای مجموعه, این گرداند برخی از مقدار پیش فرض (0 برای اعداد, یک رشته خالی برای رشته, و غیره.), به جز برای مورد با یک مجموعه غیر ثابت و یک شاخص ثابت 0 (در این مورد وجود خواهد داشت یک خطا `Array indices are 1-based`). + +## است (ورود, علم) {#hasarr-elem} + +بررسی اینکه ‘arr’ اری ‘elem’ عنصر. +بازده 0 اگر عنصر در مجموعه نیست, یا 1 اگر. + +`NULL` به عنوان یک ارزش پردازش شده است. + +``` sql +SELECT has([1, 2, NULL], NULL) +``` + +``` text +┌─has([1, 2, NULL], NULL)─┐ +│ 1 │ +└─────────────────────────┘ +``` + +## حصال {#hasall} + +بررسی اینکه یک مجموعه زیر مجموعه دیگری باشد. + +``` sql +hasAll(set, subset) +``` + +**پارامترها** + +- `set` – Array of any type with a set of elements. +- `subset` – Array of any type with elements that should be tested to be a subset of `set`. + +**مقادیر بازگشتی** + +- `1` اگر `set` شامل تمام عناصر از `subset`. +- `0` وگرنه + +**خواص عجیب و غریب** + +- مجموعه خالی زیر مجموعه ای از هر است. +- `Null` پردازش به عنوان یک ارزش. +- منظور از ارزش ها در هر دو ارریس مهم نیست. + +**مثالها** + +`SELECT hasAll([], [])` بازده 1. + +`SELECT hasAll([1, Null], [Null])` بازده 1. + +`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` بازده 1. + +`SELECT hasAll(['a', 'b'], ['a'])` بازده 1. + +`SELECT hasAll([1], ['a'])` بازده 0. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` بازده 0. + +## hasAny {#hasany} + +بررسی اینکه دو بند چهار راه توسط برخی از عناصر. + +``` sql +hasAny(array1, array2) +``` + +**پارامترها** + +- `array1` – Array of any type with a set of elements. +- `array2` – Array of any type with a set of elements. + +**بازگشت ارزش** + +- `1` اگر `array1` و `array2` حداقل یک عنصر مشابه داشته باشید. +- `0` وگرنه + +**خواص عجیب و غریب** + +- `Null` پردازش به عنوان یک ارزش. +- منظور از ارزش ها در هر دو ارریس مهم نیست. + +**مثالها** + +`SELECT hasAny([1], [])` بازگشت `0`. + +`SELECT hasAny([Null], [Null, 1])` بازگشت `1`. + +`SELECT hasAny([-128, 1., 512], [1])` بازگشت `1`. + +`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` بازگشت `0`. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` بازگشت `1`. + +## هشدار داده می شود) {#indexofarr-x} + +بازگرداندن شاخص از اولین ‘x’ عنصر (با شروع از 1) اگر در مجموعه ای است, یا 0 اگر نیست. + +مثال: + +``` sql +SELECT indexOf([1, 3, NULL, NULL], NULL) +``` + +``` text +┌─indexOf([1, 3, NULL, NULL], NULL)─┐ +│ 3 │ +└───────────────────────────────────┘ +``` + +عناصر را به `NULL` به عنوان مقادیر طبیعی انجام می شود. + +## هشدار داده می شود) {#countequalarr-x} + +بازده تعداد عناصر موجود در آرایه برابر با x. معادل arraycount (elem -\> elem = x arr). + +`NULL` عناصر به عنوان مقادیر جداگانه به کار گرفته. + +مثال: + +``` sql +SELECT countEqual([1, 2, NULL, NULL], NULL) +``` + +``` text +┌─countEqual([1, 2, NULL, NULL], NULL)─┐ +│ 2 │ +└──────────────────────────────────────┘ +``` + +## هشدار داده می شود) {#array_functions-arrayenumerate} + +Returns the array \[1, 2, 3, …, length (arr) \] + +این تابع به طور معمول با مجموعه ای استفاده می شود. این اجازه می دهد شمارش چیزی فقط یک بار برای هر مجموعه پس از استفاده از مجموعه پیوستن. مثال: + +``` sql +SELECT + count() AS Reaches, + countIf(num = 1) AS Hits +FROM test.hits +ARRAY JOIN + GoalsReached, + arrayEnumerate(GoalsReached) AS num +WHERE CounterID = 160656 +LIMIT 10 +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +در این مثال, می رسد تعداد تبدیل است (رشته دریافت پس از استفاده از مجموعه ملحق), و بازدید تعداد بازدید صفحات (رشته قبل از مجموعه ملحق). در این مورد خاص شما می توانید همان نتیجه را در یک راه ساده تر: + +``` sql +SELECT + sum(length(GoalsReached)) AS Reaches, + count() AS Hits +FROM test.hits +WHERE (CounterID = 160656) AND notEmpty(GoalsReached) +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +این تابع همچنین می توانید در توابع مرتبه بالاتر استفاده می شود. برای مثال می توانید از شاخص های مجموعه ای برای عناصری که با شرایط مطابقت دارند استفاده کنید. + +## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} + +بازگرداندن مجموعه ای به همان اندازه به عنوان مجموعه منبع, نشان می دهد برای هر عنصر چه موقعیت خود را در میان عناصر با همان مقدار. +به عنوان مثال: ارریینومراتونیک(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. + +این تابع در هنگام استفاده از مجموعه ای پیوستن و تجمع عناصر مجموعه ای مفید است. +مثال: + +``` sql +SELECT + Goals.ID AS GoalID, + sum(Sign) AS Reaches, + sumIf(Sign, num = 1) AS Visits +FROM test.visits +ARRAY JOIN + Goals, + arrayEnumerateUniq(Goals.ID) AS num +WHERE CounterID = 160656 +GROUP BY GoalID +ORDER BY Reaches DESC +LIMIT 10 +``` + +``` text +┌──GoalID─┬─Reaches─┬─Visits─┐ +│ 53225 │ 3214 │ 1097 │ +│ 2825062 │ 3188 │ 1097 │ +│ 56600 │ 2803 │ 488 │ +│ 1989037 │ 2401 │ 365 │ +│ 2830064 │ 2396 │ 910 │ +│ 1113562 │ 2372 │ 373 │ +│ 3270895 │ 2262 │ 812 │ +│ 1084657 │ 2262 │ 345 │ +│ 56599 │ 2260 │ 799 │ +│ 3271094 │ 2256 │ 812 │ +└─────────┴─────────┴────────┘ +``` + +در این مثال هر هدف شناسه محاسبه تعداد تبدیل (هر عنصر در اهداف تو در تو ساختار داده ها یک هدف است که رسیده بود که ما اشاره به عنوان یک تبدیل) و تعداد جلسات. بدون مجموعه ملحق, ما می خواهیم تعداد جلسات به عنوان مجموع شمارش (امضا کردن). اما در این مورد خاص ردیف شد ضرب در تو در تو در اهداف و ساختار آن در سفارش به تعداد هر جلسه یک بار بعد از این ما اعمال یک شرط به ارزش arrayenumerateuniq(اهداف است.id) تابع. + +تابع ارریینومراتونیک می تواند چندین بار از همان اندازه به عنوان استدلال استفاده کند. در این مورد, منحصر به فرد است برای تاپل از عناصر در موقعیت های مشابه در تمام ارریس در نظر گرفته. + +``` sql +SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,1,1,2,1] │ +└───────────────┘ +``` + +این در هنگام استفاده از مجموعه با یک ساختار داده های تو در تو و تجمع بیشتر در سراسر عناصر متعدد در این ساختار ملحق لازم است. + +## عقبگرد {#arraypopback} + +حذف مورد گذشته از مجموعه. + +``` sql +arrayPopBack(array) +``` + +**پارامترها** + +- `array` – Array. + +**مثال** + +``` sql +SELECT arrayPopBack([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## ساحل {#arraypopfront} + +اولین مورد را از مجموعه حذف می کند. + +``` sql +arrayPopFront(array) +``` + +**پارامترها** + +- `array` – Array. + +**مثال** + +``` sql +SELECT arrayPopFront([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [2,3] │ +└───────┘ +``` + +## عقب نشینی {#arraypushback} + +یک مورد را به انتهای مجموعه اضافه می کند. + +``` sql +arrayPushBack(array, single_value) +``` + +**پارامترها** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` نوع داده مجموعه را تایپ کنید. برای کسب اطلاعات بیشتر در مورد انواع داده ها در خانه کلیک کنید “[انواع داده ها](../../sql_reference/data_types/index.md#data_types)”. می توان `NULL`. تابع می افزاید: `NULL` عنصر به مجموعه ای, و نوع عناصر مجموعه ای تبدیل به `Nullable`. + +**مثال** + +``` sql +SELECT arrayPushBack(['a'], 'b') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## ساحلی {#arraypushfront} + +یک عنصر را به ابتدای مجموعه اضافه می کند. + +``` sql +arrayPushFront(array, single_value) +``` + +**پارامترها** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` نوع داده مجموعه را تایپ کنید. برای کسب اطلاعات بیشتر در مورد انواع داده ها در خانه کلیک کنید “[انواع داده ها](../../sql_reference/data_types/index.md#data_types)”. می تواند باشد `NULL`. این تابع می افزاید: `NULL` عنصر به مجموعه ای, و نوع عناصر مجموعه ای تبدیل به `Nullable`. + +**مثال** + +``` sql +SELECT arrayPushFront(['b'], 'a') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## نمایش سایت {#arrayresize} + +طول مجموعه را تغییر می دهد. + +``` sql +arrayResize(array, size[, extender]) +``` + +**پارامترها:** + +- `array` — Array. +- `size` — Required length of the array. + - اگر `size` کمتر از اندازه اصلی مجموعه است, مجموعه ای از سمت راست کوتاه. +- اگر `size` مجموعه بزرگتر از اندازه اولیه مجموعه است که به سمت راست گسترش می یابد `extender` مقادیر یا مقادیر پیش فرض برای نوع داده از موارد مجموعه. +- `extender` — Value for extending an array. Can be `NULL`. + +**مقدار بازگشتی:** + +مجموعه ای از طول `size`. + +**نمونه هایی از تماس** + +``` sql +SELECT arrayResize([1], 3) +``` + +``` text +┌─arrayResize([1], 3)─┐ +│ [1,0,0] │ +└─────────────────────┘ +``` + +``` sql +SELECT arrayResize([1], 3, NULL) +``` + +``` text +┌─arrayResize([1], 3, NULL)─┐ +│ [1,NULL,NULL] │ +└───────────────────────────┘ +``` + +## arraySlice {#arrayslice} + +یک تکه از مجموعه را برمی گرداند. + +``` sql +arraySlice(array, offset[, length]) +``` + +**پارامترها** + +- `array` – Array of data. +- `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. +- `length` - طول قطعه مورد نیاز . اگر شما یک مقدار منفی مشخص, تابع یک تکه باز می گرداند `[offset, array_length - length)`. اگر شما حذف ارزش, تابع برش می گرداند `[offset, the_end_of_array]`. + +**مثال** + +``` sql +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +``` + +``` text +┌─res────────┐ +│ [2,NULL,4] │ +└────────────┘ +``` + +عناصر مجموعه ای به `NULL` به عنوان مقادیر طبیعی انجام می شود. + +## arraySort(\[func,\] arr, …) {#array_functions-sort} + +عناصر را مرتب می کند `arr` صف در صعودی. اگر `func` تابع مشخص شده است, مرتب سازی سفارش توسط نتیجه تعیین `func` تابع اعمال شده به عناصر مجموعه. اگر `func` قبول استدلال های متعدد `arraySort` تابع به تصویب می رسد چند بند که استدلال `func` خواهد به مطابقت. نمونه های دقیق در پایان نشان داده شده است `arraySort` توصیف. + +نمونه ای از مقادیر صحیح مرتب سازی: + +``` sql +SELECT arraySort([1, 3, 3, 0]); +``` + +``` text +┌─arraySort([1, 3, 3, 0])─┐ +│ [0,1,3,3] │ +└─────────────────────────┘ +``` + +نمونه ای از مقادیر رشته مرتب سازی: + +``` sql +SELECT arraySort(['hello', 'world', '!']); +``` + +``` text +┌─arraySort(['hello', 'world', '!'])─┐ +│ ['!','hello','world'] │ +└────────────────────────────────────┘ +``` + +ترتیب مرتب سازی زیر را برای `NULL`, `NaN` و `Inf` مقادیر: + +``` sql +SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); +``` + +``` text +┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ +│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────────────────────────┘ +``` + +- `-Inf` مقادیر برای اولین بار در مجموعه هستند. +- `NULL` ارزشهای خبری عبارتند از: +- `NaN` مقادیر درست قبل هستند `NULL`. +- `Inf` مقادیر درست قبل هستند `NaN`. + +توجه داشته باشید که `arraySort` یک [عملکرد عالی مرتبه](higher_order_functions.md). شما می توانید یک تابع لامبدا را به عنوان اولین استدلال منتقل کنید. در این مورد مرتب سازی سفارش تعیین می شود در نتیجه از lambda تابع اعمال شده به عناصر آرایه است. + +بیایید مثال زیر را در نظر بگیریم: + +``` sql +SELECT arraySort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,2,1] │ +└─────────┘ +``` + +For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` تابع انواع کلید به ترتیب صعودی, نتیجه این است \[3, 2, 1\]. بنابراین `(x) –> -x` عملکرد لامبدا مجموعه [ترتیب نزولی](#array_functions-reverse-sort) در یک مرتب سازی. + +تابع لامبدا می تواند استدلال های متعدد را قبول کند. در این مورد, شما نیاز به تصویب `arraySort` تابع چند بند از طول یکسان است که استدلال تابع لامبدا به مطابقت. مجموعه حاصل از عناصر از اولین مجموعه ورودی تشکیل شده است. به عنوان مثال: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +در اینجا عناصر موجود در مجموعه دوم (\[2, 1\]) تعریف یک کلید مرتب سازی برای عنصر مربوطه از مجموعه منبع (\[‘hello’, ‘world’\]), به این معنا که, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn't use `x` مقادیر واقعی مجموعه منبع بر نظم در نتیجه تاثیر نمی گذارد. پس, ‘hello’ خواهد بود که عنصر دوم در نتیجه, و ‘world’ خواهد بود که برای اولین بار. + +نمونه های دیگر در زیر نشان داده شده. + +``` sql +SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +``` sql +SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +!!! note "یادداشت" + برای بهبود کارایی مرتب سازی [تبدیل شوارتز](https://en.wikipedia.org/wiki/Schwartzian_transform) استفاده شده است. + +## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} + +عناصر را مرتب می کند `arr` صف در نزولی. اگر `func` تابع مشخص شده است, `arr` بر اساس نتیجه طبقه بندی شده اند `func` عملکرد به عناصر مجموعه اعمال می شود و سپس مجموعه مرتب شده معکوس می شود. اگر `func` قبول استدلال های متعدد `arrayReverseSort` تابع به تصویب می رسد چند بند که استدلال `func` خواهد به مطابقت. نمونه های دقیق در پایان نشان داده شده است `arrayReverseSort` توصیف. + +نمونه ای از مقادیر صحیح مرتب سازی: + +``` sql +SELECT arrayReverseSort([1, 3, 3, 0]); +``` + +``` text +┌─arrayReverseSort([1, 3, 3, 0])─┐ +│ [3,3,1,0] │ +└────────────────────────────────┘ +``` + +نمونه ای از مقادیر رشته مرتب سازی: + +``` sql +SELECT arrayReverseSort(['hello', 'world', '!']); +``` + +``` text +┌─arrayReverseSort(['hello', 'world', '!'])─┐ +│ ['world','hello','!'] │ +└───────────────────────────────────────────┘ +``` + +ترتیب مرتب سازی زیر را برای `NULL`, `NaN` و `Inf` مقادیر: + +``` sql +SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; +``` + +``` text +┌─res───────────────────────────────────┐ +│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────┘ +``` + +- `Inf` مقادیر برای اولین بار در مجموعه هستند. +- `NULL` ارزشهای خبری عبارتند از: +- `NaN` مقادیر درست قبل هستند `NULL`. +- `-Inf` مقادیر درست قبل هستند `NaN`. + +توجه داشته باشید که `arrayReverseSort` یک [عملکرد عالی مرتبه](higher_order_functions.md). شما می توانید یک تابع لامبدا را به عنوان اولین استدلال منتقل کنید. مثال زیر نشان داده شده. + +``` sql +SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [1,2,3] │ +└─────────┘ +``` + +این مجموعه به روش زیر مرتب شده است: + +1. ابتدا مجموعه منبع (\[1, 2, 3\]) با توجه به نتیجه تابع لامبدا اعمال شده به عناصر مجموعه طبقه بندی شده اند. نتیجه یک مجموعه است \[3, 2, 1\]. +2. مجموعه ای است که در مرحله قبل به دست, معکوس شده است. بنابراین, نتیجه نهایی است \[1, 2, 3\]. + +تابع لامبدا می تواند استدلال های متعدد را قبول کند. در این مورد, شما نیاز به تصویب `arrayReverseSort` تابع چند بند از طول یکسان است که استدلال تابع لامبدا به مطابقت. مجموعه حاصل از عناصر از اولین مجموعه ورودی تشکیل شده است. به عنوان مثال: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +در این مثال مجموعه به روش زیر مرتب شده است: + +1. در ابتدا مجموعه منبع (\[‘hello’, ‘world’\]) با توجه به نتیجه تابع لامبدا اعمال شده به عناصر از ارریس طبقه بندی شده اند. عناصر که در مجموعه دوم به تصویب رسید (\[2, 1\]), تعریف کلید مرتب سازی برای عناصر مربوطه را از مجموعه منبع. نتیجه یک مجموعه است \[‘world’, ‘hello’\]. +2. مجموعه ای که در مرحله قبل طبقه بندی شده اند, معکوس شده است. بنابراین نتیجه نهایی این است \[‘hello’, ‘world’\]. + +نمونه های دیگر در زیر نشان داده شده. + +``` sql +SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; +``` + +``` text +┌─res─────┐ +│ [5,3,4] │ +└─────────┘ +``` + +``` sql +SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; +``` + +``` text +┌─res─────┐ +│ [4,3,5] │ +└─────────┘ +``` + +## arrayUniq(arr, …) {#arrayuniqarr} + +اگر یک استدلال به تصویب می رسد, تعداد عناصر مختلف در مجموعه شمارش. +اگر استدلال های متعدد به تصویب می رسد, شمارش تعداد تاپل های مختلف از عناصر در موقعیت های مربوطه در مجموعه های متعدد. + +اگر شما می خواهید برای دریافت یک لیست از اقلام منحصر به فرد در مجموعه, شما می توانید از ارری راهاهن استفاده(‘groupUniqArray’, arr). + +## هشدار داده می شود) {#array-functions-join} + +یک تابع خاص. بخش را ببینید [“ArrayJoin function”](array_join.md#functions_arrayjoin). + +## کلیدواژه {#arraydifference} + +محاسبه تفاوت بین عناصر مجموعه مجاور. بازگرداندن مجموعه ای که عنصر اول خواهد بود 0, دوم تفاوت بین است `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). + +**نحو** + +``` sql +arrayDifference(array) +``` + +**پارامترها** + +- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/). + +**مقادیر بازگشتی** + +بازگرداندن مجموعه ای از تفاوت بین عناصر مجاور. + +نوع: [اینترنت\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [شناور\*](https://clickhouse.yandex/docs/en/data_types/float/). + +**مثال** + +پرسوجو: + +``` sql +SELECT arrayDifference([1, 2, 3, 4]) +``` + +نتیجه: + +``` text +┌─arrayDifference([1, 2, 3, 4])─┐ +│ [0,1,1,1] │ +└───────────────────────────────┘ +``` + +مثال سرریز به علت نوع نتیجه اینترن64: + +پرسوجو: + +``` sql +SELECT arrayDifference([0, 10000000000000000000]) +``` + +نتیجه: + +``` text +┌─arrayDifference([0, 10000000000000000000])─┐ +│ [0,-8446744073709551616] │ +└────────────────────────────────────────────┘ +``` + +## حوزه ارریددیست {#arraydistinct} + +مجموعه ای را می گیرد و تنها شامل عناصر مجزا می شود. + +**نحو** + +``` sql +arrayDistinct(array) +``` + +**پارامترها** + +- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/). + +**مقادیر بازگشتی** + +بازگرداندن مجموعه ای حاوی عناصر متمایز. + +**مثال** + +پرسوجو: + +``` sql +SELECT arrayDistinct([1, 2, 2, 3, 1]) +``` + +نتیجه: + +``` text +┌─arrayDistinct([1, 2, 2, 3, 1])─┐ +│ [1,2,3] │ +└────────────────────────────────┘ +``` + +## هشدار داده می شود) {#array_functions-arrayenumeratedense} + +بازگرداندن مجموعه ای از همان اندازه به عنوان مجموعه منبع, نشان می دهد که هر عنصر برای اولین بار در مجموعه منبع به نظر می رسد. + +مثال: + +``` sql +SELECT arrayEnumerateDense([10, 20, 10, 30]) +``` + +``` text +┌─arrayEnumerateDense([10, 20, 10, 30])─┐ +│ [1,2,1,3] │ +└───────────────────────────────────────┘ +``` + +## هشدار داده می شود) {#array-functions-arrayintersect} + +طول می کشد مجموعه ای با عناصر که در تمام مجموعه منبع در حال حاضر می گرداند. عناصر سفارش در مجموعه حاصل همان است که در مجموعه اول است. + +مثال: + +``` sql +SELECT + arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, + arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect +``` + +``` text +┌─no_intersect─┬─intersect─┐ +│ [] │ [1] │ +└──────────────┴───────────┘ +``` + +## نمایش سایت {#arrayreduce} + +یک تابع کلی برای عناصر مجموعه ای اعمال می شود و نتیجه خود را باز می گرداند. نام تابع تجمع به عنوان یک رشته در نقل قول های تک منتقل می شود `'max'`, `'sum'`. هنگام استفاده از توابع دانه پارامتری پارامتر پس از نام تابع در پرانتز نشان داده شده است `'uniqUpTo(6)'`. + +**نحو** + +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` + +**پارامترها** + +- `agg_func` — The name of an aggregate function which should be a constant [رشته](../../sql_reference/data_types/string.md). +- `arr` — Any number of [& حذف](../../sql_reference/data_types/array.md) نوع ستون به عنوان پارامترهای تابع تجمع. + +**مقدار بازگشتی** + +**مثال** + +``` sql +SELECT arrayReduce('max', [1, 2, 3]) +``` + +``` text +┌─arrayReduce('max', [1, 2, 3])─┐ +│ 3 │ +└───────────────────────────────┘ +``` + +اگر یک تابع جمع استدلال های متعدد طول می کشد, سپس این تابع باید به مجموعه های متعدد از همان اندازه اعمال. + +``` sql +SELECT arrayReduce('maxIf', [3, 5], [1, 0]) +``` + +``` text +┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ +│ 3 │ +└──────────────────────────────────────┘ +``` + +به عنوان مثال با یک تابع جمع پارامتری: + +``` sql +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +``` + +``` text +┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ +│ 4 │ +└─────────────────────────────────────────────────────────────┘ +``` + +## تغییرات {#arrayreduceinranges} + +یک تابع کلی برای عناصر مجموعه ای در محدوده های داده شده اعمال می شود و مجموعه ای حاوی نتیجه مربوط به هر محدوده را باز می گرداند. تابع همان نتیجه به عنوان چند بازگشت `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. + +**نحو** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**پارامترها** + +- `agg_func` — The name of an aggregate function which should be a constant [رشته](../../sql_reference/data_types/string.md). +- `ranges` — The ranges to aggretate which should be an [& حذف](../../sql_reference/data_types/array.md) از [توپلس](../../sql_reference/data_types/tuple.md) که شامل شاخص و طول هر محدوده. +- `arr` — Any number of [& حذف](../../sql_reference/data_types/array.md) نوع ستون به عنوان پارامترهای تابع تجمع. + +**مقدار بازگشتی** + +**مثال** + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## هشدار داده می شود) {#arrayreverse} + +بازگرداندن مجموعه ای از همان اندازه به عنوان مجموعه اصلی حاوی عناصر در جهت معکوس. + +مثال: + +``` sql +SELECT arrayReverse([1, 2, 3]) +``` + +``` text +┌─arrayReverse([1, 2, 3])─┐ +│ [3,2,1] │ +└─────────────────────────┘ +``` + +## معکوس) {#array-functions-reverse} + +مترادف برای [“arrayReverse”](#array_functions-arrayreverse) + +## ارریفلاتتن {#arrayflatten} + +مجموعه ای از ارریس ها را به یک مجموعه صاف تبدیل می کند. + +تابع: + +- امر به هر عمق مجموعه های تو در تو. +- طعم هایی را که در حال حاضر مسطح هستند تغییر نمی دهد. + +مجموعه مسطح شامل تمام عناصر از تمام منابع است. + +**نحو** + +``` sql +flatten(array_of_arrays) +``` + +نام مستعار: `flatten`. + +**پارامترها** + +- `array_of_arrays` — [& حذف](../../sql_reference/data_types/array.md) ارریس به عنوان مثال, `[[1,2,3], [4,5]]`. + +**مثالها** + +``` sql +SELECT flatten([[[1]], [[2], [3]]]) +``` + +``` text +┌─flatten(array(array([1]), array([2], [3])))─┐ +│ [1,2,3] │ +└─────────────────────────────────────────────┘ +``` + +## اررایکمپکت {#arraycompact} + +عناصر تکراری متوالی را از یک مجموعه حذف می کند. ترتیب مقادیر نتیجه به ترتیب در مجموعه منبع تعیین می شود. + +**نحو** + +``` sql +arrayCompact(arr) +``` + +**پارامترها** + +`arr` — The [& حذف](../../sql_reference/data_types/array.md) برای بازرسی. + +**مقدار بازگشتی** + +مجموعه ای بدون تکراری. + +نوع: `Array`. + +**مثال** + +پرسوجو: + +``` sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +``` + +نتیجه: + +``` text +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ +``` + +## ارریزیپ {#arrayzip} + +Combine multiple Array type columns into one Array\[Tuple(…)\] column + +**نحو** + +``` sql +arrayZip(arr1, arr2, ..., arrN) +``` + +**پارامترها** + +`arr` — Any number of [& حذف](../../sql_reference/data_types/array.md) ستون نوع به ترکیب. + +**مقدار بازگشتی** + +The result of Array\[Tuple(…)\] type after the combination of these arrays + +**مثال** + +پرسوجو: + +``` sql +SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f']); +``` + +نتیجه: + +``` text +┌─arrayZip(['a', 'b', 'c'], ['d', 'e', 'f'])─┐ +│ [('a','d'),('b','e'),('c','f')] │ +└────────────────────────────────────────────┘ +``` + +## ارریایکو {#arrayauc} + +محاسبه حراج (منطقه تحت منحنی, که یک مفهوم در یادگیری ماشین است, مشاهده اطلاعات بیشتر: https://en.wikipedia.org/wiki/receiver\_operating\_characteristic\#area\_under\_the\_curve). + +**نحو** + +``` sql +arrayAUC(arr_scores, arr_labels) +``` + +**پارامترها** +- `arr_scores` — scores prediction model gives. +- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. + +**مقدار بازگشتی** +را برمی گرداند ارزش حراج با نوع شناور64. + +**مثال** +پرسوجو: + +``` sql +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +``` + +نتیجه: + +``` text +┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ +│ 0.75 │ +└────────────────────────────────────────---──┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/fa/sql_reference/functions/array_join.md b/docs/fa/sql_reference/functions/array_join.md new file mode 100644 index 00000000000..9d36eba1b89 --- /dev/null +++ b/docs/fa/sql_reference/functions/array_join.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u0627\u0631\u0631\u06CC\u062C\u06CC\u0646" +--- + +# تابع ارریجین {#functions_arrayjoin} + +این یک تابع بسیار غیر معمول است. + +توابع عادی انجام مجموعه ای از ردیف را تغییر دهید, اما فقط تغییر مقادیر در هر سطر (کوتاه). +توابع مجموع فشرده سازی مجموعه ای از ردیف (برابر یا کاهش). +این ‘arrayJoin’ تابع طول می کشد هر سطر و تولید مجموعه ای از ردیف (اشکار). + +این تابع یک مجموعه را به عنوان یک استدلال می گیرد و ردیف منبع را به چندین ردیف برای تعدادی از عناصر در مجموعه منتشر می کند. +تمام مقادیر ستون ها به سادگی کپی می شوند به جز مقادیر در ستون ای که این تابع اعمال می شود. + +پرس و جو می توانید چند استفاده کنید `arrayJoin` توابع. در این مورد تحول انجام شده است چندین بار. + +توجه داشته باشید که ترتیب پیوستن به نحو در پرس و جو را انتخاب کنید, فراهم می کند که امکانات گسترده تر. + +مثال: + +``` sql +SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src +``` + +``` text +┌─dst─┬─\'Hello\'─┬─src─────┐ +│ 1 │ Hello │ [1,2,3] │ +│ 2 │ Hello │ [1,2,3] │ +│ 3 │ Hello │ [1,2,3] │ +└─────┴───────────┴─────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/fa/sql_reference/functions/bit_functions.md b/docs/fa/sql_reference/functions/bit_functions.md new file mode 100644 index 00000000000..5399bb3857a --- /dev/null +++ b/docs/fa/sql_reference/functions/bit_functions.md @@ -0,0 +1,255 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: "\u0628\u06CC\u062A" +--- + +# توابع بیت {#bit-functions} + +بیت توابع کار برای هر جفت از انواع از uint8, uint16, uint32, uint64, int8, int16, int32, int64, float32 یا float64. + +نوع نتیجه یک عدد صحیح با بیت به حداکثر بیت از استدلال خود را برابر است. اگر حداقل یکی از استدلال امضا شده است, نتیجه یک شماره امضا شده است. اگر استدلال یک عدد ممیز شناور است, این است که به درون بازیگران64. + +## بیت و ب) {#bitanda-b} + +## bitOr(a, b) {#bitora-b} + +## هشدار داده می شود) {#bitxora-b} + +## bitNot(یک) {#bitnota} + +## اطلاعات دقیق) {#bitshiftlefta-b} + +## باز کردن پنجره روی برنامههای دیگر) {#bitshiftrighta-b} + +## هشدار داده می شود) {#bitrotatelefta-b} + +## حفاظت از بیت) {#bitrotaterighta-b} + +## بیتترین {#bittest} + +طول می کشد هر عدد صحیح و تبدیل به [شکل دودویی](https://en.wikipedia.org/wiki/Binary_number), بازگرداندن ارزش کمی در موقعیت مشخص. شمارش معکوس از 0 از سمت راست به سمت چپ شروع می شود. + +**نحو** + +``` sql +SELECT bitTest(number, index) +``` + +**پارامترها** + +- `number` – integer number. +- `index` – position of bit. + +**مقادیر بازگشتی** + +بازگرداندن مقدار کمی در موقعیت مشخص. + +نوع: `UInt8`. + +**مثال** + +مثلا, تعداد 43 در پایه-2 (دودویی) سیستم اعداد است 101011. + +پرسوجو: + +``` sql +SELECT bitTest(43, 1) +``` + +نتیجه: + +``` text +┌─bitTest(43, 1)─┐ +│ 1 │ +└────────────────┘ +``` + +مثال دیگر: + +پرسوجو: + +``` sql +SELECT bitTest(43, 2) +``` + +نتیجه: + +``` text +┌─bitTest(43, 2)─┐ +│ 0 │ +└────────────────┘ +``` + +## تماس {#bittestall} + +بازده نتیجه [ساخت منطقی](https://en.wikipedia.org/wiki/Logical_conjunction) (و اپراتور) از تمام بیت در موقعیت های داده شده. شمارش معکوس از 0 از سمت راست به سمت چپ شروع می شود. + +ساخت و ساز برای عملیات بیتی: + +0 AND 0 = 0 + +0 AND 1 = 0 + +1 AND 0 = 0 + +1 AND 1 = 1 + +**نحو** + +``` sql +SELECT bitTestAll(number, index1, index2, index3, index4, ...) +``` + +**پارامترها** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) درست است اگر و تنها اگر تمام موقعیت خود را درست هستند (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). + +**مقادیر بازگشتی** + +بازده نتیجه منطقی conjuction. + +نوع: `UInt8`. + +**مثال** + +مثلا, تعداد 43 در پایه-2 (دودویی) سیستم اعداد است 101011. + +پرسوجو: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5) +``` + +نتیجه: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5)─┐ +│ 1 │ +└────────────────────────────┘ +``` + +مثال دیگر: + +پرسوجو: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5, 2) +``` + +نتیجه: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ +│ 0 │ +└───────────────────────────────┘ +``` + +## بیتستانی {#bittestany} + +بازده نتیجه [حکم منطقی](https://en.wikipedia.org/wiki/Logical_disjunction) (یا اپراتور) از تمام بیت در موقعیت های داده شده. شمارش معکوس از 0 از سمت راست به سمت چپ شروع می شود. + +دستور برای عملیات بیتی: + +0 OR 0 = 0 + +0 OR 1 = 1 + +1 OR 0 = 1 + +1 OR 1 = 1 + +**نحو** + +``` sql +SELECT bitTestAny(number, index1, index2, index3, index4, ...) +``` + +**پارامترها** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. + +**مقادیر بازگشتی** + +بازده نتیجه ساخت و ساز منطقی. + +نوع: `UInt8`. + +**مثال** + +مثلا, تعداد 43 در پایه-2 (دودویی) سیستم اعداد است 101011. + +پرسوجو: + +``` sql +SELECT bitTestAny(43, 0, 2) +``` + +نتیجه: + +``` text +┌─bitTestAny(43, 0, 2)─┐ +│ 1 │ +└──────────────────────┘ +``` + +مثال دیگر: + +پرسوجو: + +``` sql +SELECT bitTestAny(43, 4, 2) +``` + +نتیجه: + +``` text +┌─bitTestAny(43, 4, 2)─┐ +│ 0 │ +└──────────────────────┘ +``` + +## شمارش {#bitcount} + +محاسبه تعداد بیت را به یکی در نمایندگی دودویی از یک عدد است. + +**نحو** + +``` sql +bitCount(x) +``` + +**پارامترها** + +- `x` — [عدد صحیح](../../sql_reference/data_types/int_uint.md) یا [شناور نقطه](../../sql_reference/data_types/float.md) شماره. تابع با استفاده از نمایندگی ارزش در حافظه. این اجازه می دهد تا حمایت از اعداد ممیز شناور. + +**مقدار بازگشتی** + +- تعداد بیت را به یکی در تعداد ورودی. + +تابع مقدار ورودی را به یک نوع بزرگتر تبدیل نمی کند ([ثبت نام پسوند](https://en.wikipedia.org/wiki/Sign_extension)). بنابراین, مثلا, `bitCount(toUInt8(-1)) = 8`. + +نوع: `UInt8`. + +**مثال** + +نگاهی به عنوان مثال تعداد 333. نمایندگی دودویی: 00000001001101. + +پرسوجو: + +``` sql +SELECT bitCount(333) +``` + +نتیجه: + +``` text +┌─bitCount(333)─┐ +│ 5 │ +└───────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/fa/sql_reference/functions/bitmap_functions.md b/docs/fa/sql_reference/functions/bitmap_functions.md new file mode 100644 index 00000000000..e561baef9c0 --- /dev/null +++ b/docs/fa/sql_reference/functions/bitmap_functions.md @@ -0,0 +1,496 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: "\u0646\u06AF\u0627\u0634\u062A \u0628\u06CC\u062A" +--- + +# توابع نگاشت بیت {#bitmap-functions} + +توابع بیت مپ برای دو بیت مپ محاسبه ارزش شی کار, این است که بازگشت بیت مپ جدید و یا کارتیت در حالی که با استفاده از محاسبه فرمول, مانند و, یا, صخره نوردی, و نه, و غیره. + +2 نوع از روش های ساخت و ساز برای شی بیت مپ وجود دارد. یکی این است که توسط گروه بیت مپ تابع تجمع با دولت ساخته شود, دیگر این است که توسط شی مجموعه ای ساخته شود. این نیز برای تبدیل شی بیت مپ به مجموعه شی. + +نقشه شهری روارینگ به یک ساختار داده در حالی که ذخیره سازی واقعی از اجسام بیت مپ پیچیده شده است. هنگامی که کارتیت کمتر از یا برابر است 32, با استفاده از عینیت مجموعه. هنگامی که کارتیت بیشتر از است 32, با استفاده از شی نقشه شهری روارینگ. به همین دلیل است ذخیره سازی مجموعه کارتیت کم سریع تر است. + +برای کسب اطلاعات بیشتر در مورد نقشه شهری روارینگ: [پرورش دهنده](https://github.com/RoaringBitmap/CRoaring). + +## طراحی بیت مپ {#bitmap_functions-bitmapbuild} + +ساخت یک بیت مپ از مجموعه عدد صحیح بدون علامت. + +``` sql +bitmapBuild(array) +``` + +**پارامترها** + +- `array` – unsigned integer array. + +**مثال** + +``` sql +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) +``` + +``` text +┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ +│  │ AggregateFunction(groupBitmap, UInt8) │ +└─────┴──────────────────────────────────────────────┘ +``` + +## بیت مپوری {#bitmaptoarray} + +تبدیل بیت مپ به مجموعه عدد صحیح. + +``` sql +bitmapToArray(bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## اطلاعات دقیق {#bitmap-functions-bitmapsubsetinrange} + +زیرمجموعه بازگشت در محدوده مشخص شده (دامنه را شامل نمی شود). + +``` sql +bitmapSubsetInRange(bitmap, range_start, range_end) +``` + +**پارامترها** + +- `bitmap` – [شی نگاشت بیت](#bitmap_functions-bitmapbuild). +- `range_start` – range start point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +``` text +┌─res───────────────┐ +│ [30,31,32,33,100] │ +└───────────────────┘ +``` + +## نمایش سایت {#bitmapsubsetlimit} + +ایجاد یک زیر مجموعه از بیت مپ با عناصر نفر گرفته شده بین `range_start` و `cardinality_limit`. + +**نحو** + +``` sql +bitmapSubsetLimit(bitmap, range_start, cardinality_limit) +``` + +**پارامترها** + +- `bitmap` – [شی نگاشت بیت](#bitmap_functions-bitmapbuild). +- `range_start` – The subset starting point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**مقدار بازگشتی** + +زیرمجموعه. + +نوع: `Bitmap object`. + +**مثال** + +پرسوجو: + +``` sql +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +نتیجه: + +``` text +┌─res───────────────────────┐ +│ [30,31,32,33,100,200,500] │ +└───────────────────────────┘ +``` + +## اطلاعات دقیق {#bitmap_functions-bitmapcontains} + +بررسی اینکه نگاشت بیت شامل یک عنصر است. + +``` sql +bitmapContains(haystack, needle) +``` + +**پارامترها** + +- `haystack` – [شی نگاشت بیت](#bitmap_functions-bitmapbuild), جایی که تابع جستجو. +- `needle` – Value that the function searches. Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- 0 — If `haystack` شامل نمی شود `needle`. +- 1 — If `haystack` شامل `needle`. + +نوع: `UInt8`. + +**مثال** + +``` sql +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## بیتمافاسانی {#bitmaphasany} + +بررسی اینکه دو بیت مپ دارند تقاطع توسط برخی از عناصر. + +``` sql +bitmapHasAny(bitmap1, bitmap2) +``` + +اگر شما اطمینان حاصل کنید که `bitmap2` حاوی شدت یک عنصر, در نظر با استفاده از [اطلاعات دقیق](#bitmap_functions-bitmapcontains) تابع. این کار موثر تر است. + +**پارامترها** + +- `bitmap*` – bitmap object. + +**بازگشت ارزش** + +- `1` اگر `bitmap1` و `bitmap2` حداقل یک عنصر مشابه داشته باشید. +- `0` وگرنه + +**مثال** + +``` sql +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## بیتمافاسال {#bitmaphasall} + +مشابه به `hasAll(array, array)` بازده 1 اگر بیت مپ اول شامل تمام عناصر از یک ثانیه, 0 در غیر این صورت. +اگر استدلال دوم بیت مپ خالی است و سپس باز می گردد 1. + +``` sql +bitmapHasAll(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 0 │ +└─────┘ +``` + +## هشدار داده می شود {#bitmapcardinality} + +Retrun بیت مپ cardinality از نوع UInt64. + +``` sql +bitmapCardinality(bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## بیت مپمن {#bitmapmin} + +Retrun کوچکترین مقدار از نوع UInt64 در مجموعه UINT32\_MAX اگر این مجموعه خالی است. + + bitmapMin(bitmap) + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## جرم اتمی {#bitmapmax} + +جابجایی بزرگترین ارزش نوع اوینت64 در مجموعه, 0 اگر مجموعه ای خالی است. + + bitmapMax(bitmap) + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## ترجمههای بیت مپ {#bitmaptransform} + +تبدیل مجموعه ای از ارزش ها در بیت مپ به مجموعه ای دیگر از ارزش, نتیجه یک بیت مپ جدید است. + + bitmapTransform(bitmap, from_array, to_array) + +**پارامترها** + +- `bitmap` – bitmap object. +- `from_array` – UInt32 array. For idx in range \[0, from\_array.size()), if bitmap contains from\_array\[idx\], then replace it with to\_array\[idx\]. Note that the result depends on array ordering if there are common elements between from\_array and to\_array. +- `to_array` – UInt32 array, its size shall be the same to from\_array. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res +``` + + ┌─res───────────────────┐ + │ [1,3,4,6,7,8,9,10,20] │ + └───────────────────────┘ + +## بیت مپند {#bitmapand} + +دو بیت مپ و محاسبه, نتیجه یک بیت مپ جدید است. + +``` sql +bitmapAnd(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─┐ +│ [3] │ +└─────┘ +``` + +## نگاشت بیت {#bitmapor} + +دو بیت مپ و یا محاسبه, نتیجه یک بیت مپ جدید است. + +``` sql +bitmapOr(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## بیت مپکسور {#bitmapxor} + +دو محاسبه گز بیت مپ, نتیجه یک بیت مپ جدید است. + +``` sql +bitmapXor(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,4,5] │ +└───────────┘ +``` + +## بیت مپندو {#bitmapandnot} + +دو محاسبه بیت مپ اندنوت, نتیجه یک بیت مپ جدید است. + +``` sql +bitmapAndnot(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## اطلاعات دقیق {#bitmapandcardinality} + +دو بیت مپ و محاسبه بازگشت cardinality از نوع uint64. + +``` sql +bitmapAndCardinality(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## کمبود سیگار {#bitmaporcardinality} + +دو بیت مپ و یا محاسبه بازگشت cardinality از نوع uint64. + +``` sql +bitmapOrCardinality(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## هشدار داده می شود {#bitmapxorcardinality} + +دو بیت مپ xor محاسبه بازگشت cardinality از نوع uint64. + +``` sql +bitmapXorCardinality(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 4 │ +└─────┘ +``` + +## اطلاعات دقیق {#bitmapandnotcardinality} + +دو بیت مپ andnot محاسبه بازگشت cardinality از نوع uint64. + +``` sql +bitmapAndnotCardinality(bitmap,bitmap) +``` + +**پارامترها** + +- `bitmap` – bitmap object. + +**مثال** + +``` sql +SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 2 │ +└─────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/fa/sql_reference/functions/comparison_functions.md b/docs/fa/sql_reference/functions/comparison_functions.md new file mode 100644 index 00000000000..aee5cfe664c --- /dev/null +++ b/docs/fa/sql_reference/functions/comparison_functions.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u0645\u0642\u0627\u06CC\u0633\u0647" +--- + +# توابع مقایسه {#comparison-functions} + +توابع مقایسه همیشه بازگشت 0 یا 1 (اوینت8). + +انواع زیر را می توان مقایسه کرد: + +- اعداد +- رشته ها و رشته های ثابت +- تاریخ +- تاریخ با زمان + +در هر گروه, اما نه بین گروه های مختلف. + +مثلا, شما می توانید یک تاریخ را با یک رشته مقایسه نیست. شما مجبور به استفاده از یک تابع برای تبدیل رشته به یک تاریخ, و یا بالعکس. + +رشته ها با بایت مقایسه. یک رشته کوتاه تر کوچکتر از همه رشته هایی است که با این کار شروع می شوند و شامل حداقل یک شخصیت دیگر می شوند. + +## برابر, = ب و = = ب اپراتور {#function-equals} + +## کار در حالت اضطراری, یک ! اپراتور = ب و \< \> ب {#function-notequals} + +## کمتر, \< اپراتور {#function-less} + +## بیشتر, \> اپراتور {#function-greater} + +## اطلاعات دقیق {#function-lessorequals} + +## در حال بارگذاری {#function-greaterorequals} + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/fa/sql_reference/functions/conditional_functions.md b/docs/fa/sql_reference/functions/conditional_functions.md new file mode 100644 index 00000000000..a57820687dd --- /dev/null +++ b/docs/fa/sql_reference/functions/conditional_functions.md @@ -0,0 +1,207 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u0634\u0631\u0637\u06CC " +--- + +# توابع شرطی {#conditional-functions} + +## اگر {#if} + +کنترل انشعاب مشروط. بر خلاف اکثر سیستم های تاتر همیشه هر دو عبارت را ارزیابی کنید `then` و `else`. + +**نحو** + +``` sql +SELECT if(cond, then, else) +``` + +اگر شرایط `cond` ارزیابی به یک مقدار غیر صفر, می گرداند در نتیجه بیان `then` و نتیجه بیان `else`, اگر در حال حاضر, قلم است. اگر `cond` صفر یا `NULL` سپس نتیجه `then` بیان نادیده گرفته شده است و در نتیجه `else` عبارت, در صورت حاضر, بازگشته است. + +**پارامترها** + +- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. +- `then` - بیان به بازگشت اگر شرایط ملاقات کرده است. +- `else` - بیان به بازگشت اگر شرایط ملاقات نکرده است. + +**مقادیر بازگشتی** + +تابع اجرا می شود `then` و `else` عبارات و نتیجه خود را بر می گرداند, بسته به اینکه شرایط `cond` به پایان رسید تا صفر یا نه. + +**مثال** + +پرسوجو: + +``` sql +SELECT if(1, plus(2, 2), plus(2, 6)) +``` + +نتیجه: + +``` text +┌─plus(2, 2)─┐ +│ 4 │ +└────────────┘ +``` + +پرسوجو: + +``` sql +SELECT if(0, plus(2, 2), plus(2, 6)) +``` + +نتیجه: + +``` text +┌─plus(2, 6)─┐ +│ 8 │ +└────────────┘ +``` + +- `then` و `else` باید کمترین نوع مشترک دارند. + +**مثال:** + +اینو بگیر `LEFT_RIGHT` جدول: + +``` sql +SELECT * +FROM LEFT_RIGHT + +┌─left─┬─right─┐ +│ ᴺᵁᴸᴸ │ 4 │ +│ 1 │ 3 │ +│ 2 │ 2 │ +│ 3 │ 1 │ +│ 4 │ ᴺᵁᴸᴸ │ +└──────┴───────┘ +``` + +پرس و جو زیر مقایسه می کند `left` و `right` مقادیر: + +``` sql +SELECT + left, + right, + if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller +FROM LEFT_RIGHT +WHERE isNotNull(left) AND isNotNull(right) + +┌─left─┬─right─┬─is_smaller──────────────────────────┐ +│ 1 │ 3 │ left is smaller than right │ +│ 2 │ 2 │ right is greater or equal than left │ +│ 3 │ 1 │ right is greater or equal than left │ +└──────┴───────┴─────────────────────────────────────┘ +``` + +یادداشت: `NULL` ارزش ها در این مثال استفاده نمی شود, بررسی [ارزشهای پوچ در شرطی](#null-values-in-conditionals) بخش. + +## اپراتور سه تایی {#ternary-operator} + +این همان کار می کند `if` تابع. + +نحو: `cond ? then : else` + +بازگشت `then` اگر `cond` ارزیابی درست باشد (بیشتر از صفر), در غیر این صورت بازده `else`. + +- `cond` باید از نوع باشد `UInt8` و `then` و `else` باید کمترین نوع مشترک دارند. + +- `then` و `else` می تواند باشد `NULL` + +**همچنین نگاه کنید به** + +- [اطلاعات دقیق](other_functions.md#ifnotfinite). + +## چندف {#multiif} + +اجازه می دهد تا شما را به نوشتن [CASE](../operators.md#operator_case) اپراتور فشرده تر در پرس و جو. + +نحو: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` + +**پارامترها:** + +- `cond_N` — The condition for the function to return `then_N`. +- `then_N` — The result of the function when executed. +- `else` — The result of the function if none of the conditions is met. + +تابع می پذیرد `2N+1` پارامترها + +**مقادیر بازگشتی** + +تابع یکی از مقادیر را برمی گرداند `then_N` یا `else`, بسته به شرایط `cond_N`. + +**مثال** + +دوباره با استفاده از `LEFT_RIGHT` جدول + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'left is greater', left = right, 'Both equal', 'Null value') AS result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─result──────────┐ +│ ᴺᵁᴸᴸ │ 4 │ Null value │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ left is greater │ +│ 4 │ ᴺᵁᴸᴸ │ Null value │ +└──────┴───────┴─────────────────┘ +``` + +## با استفاده از نتایج شرطی به طور مستقیم {#using-conditional-results-directly} + +شرطی همیشه به نتیجه `0`, `1` یا `NULL`. بنابراین شما می توانید نتایج شرطی به طور مستقیم مثل این استفاده کنید: + +``` sql +SELECT left < right AS is_small +FROM LEFT_RIGHT + +┌─is_small─┐ +│ ᴺᵁᴸᴸ │ +│ 1 │ +│ 0 │ +│ 0 │ +│ ᴺᵁᴸᴸ │ +└──────────┘ +``` + +## ارزشهای پوچ در شرطی {#null-values-in-conditionals} + +زمانی که `NULL` ارزش ها در شرطی درگیر, نتیجه نیز خواهد بود `NULL`. + +``` sql +SELECT + NULL < 1, + 2 < NULL, + NULL < NULL, + NULL = NULL + +┌─less(NULL, 1)─┬─less(2, NULL)─┬─less(NULL, NULL)─┬─equals(NULL, NULL)─┐ +│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└───────────────┴───────────────┴──────────────────┴────────────────────┘ +``` + +بنابراین شما باید نمایش داده شد خود را با دقت ساخت اگر انواع هستند `Nullable`. + +مثال زیر نشان می دهد این شکست برای اضافه کردن شرایط برابر به `multiIf`. + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'right is smaller', 'Both equal') AS faulty_result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─faulty_result────┐ +│ ᴺᵁᴸᴸ │ 4 │ Both equal │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ right is smaller │ +│ 4 │ ᴺᵁᴸᴸ │ Both equal │ +└──────┴───────┴──────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/fa/sql_reference/functions/date_time_functions.md b/docs/fa/sql_reference/functions/date_time_functions.md new file mode 100644 index 00000000000..8a34ad69b3f --- /dev/null +++ b/docs/fa/sql_reference/functions/date_time_functions.md @@ -0,0 +1,451 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u062A\u0627\u0631\u06CC\u062E \u0648\ + \ \u0632\u0645\u0627\u0646" +--- + +# توابع برای کار با تاریخ و زمان {#functions-for-working-with-dates-and-times} + +پشتیبانی از مناطق زمانی + +همه توابع برای کار با تاریخ و زمان است که یک استفاده منطقی برای منطقه زمانی می تواند یک زمان اختیاری استدلال منطقه دوم قبول. مثال: اسیا/یکاترینبورگ. در این مورد از منطقه زمانی مشخص شده به جای محلی (پیش فرض) استفاده می کنند. + +``` sql +SELECT + toDateTime('2016-06-15 23:00:00') AS time, + toDate(time) AS date_local, + toDate(time, 'Asia/Yekaterinburg') AS date_yekat, + toString(time, 'US/Samoa') AS time_samoa +``` + +``` text +┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ +└─────────────────────┴────────────┴────────────┴─────────────────────┘ +``` + +فقط مناطق زمانی که از مجموعه مقالات توسط تعداد کل ساعت متفاوت پشتیبانی می شوند. + +## توتیمزون {#totimezone} + +تبدیل زمان یا تاریخ و زمان به منطقه زمانی مشخص شده است. + +## اسباب بازی {#toyear} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد اوینت16 حاوی شماره سال (میلادی). + +## فهرست توزیع جدید {#toquarter} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد کوچک8 حاوی شماره سه ماهه. + +## تامونت {#tomonth} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد کوچک8 حاوی شماره ماه (1-12). + +## سال {#todayofyear} + +تبدیل یک تاریخ و یا تاریخ با گذشت زمان به یک uint16 تعداد شامل تعداد روز از سال (1-366). + +## تودیفمون {#todayofmonth} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد اوینت8 حاوی تعداد روز از ماه (1-31). + +## تدیفوک {#todayofweek} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد اوینت8 حاوی تعداد روز هفته (دوشنبه است 1, و یکشنبه است 7). + +## تمام {#tohour} + +تبدیل تاریخ با هم به یک uint8 شماره حاوی تعداد ساعت در زمان 24 ساعته (0-23). +This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). + +## تامینوت {#tominute} + +تبدیل تاریخ با هم به یک uint8 شماره حاوی تعداد دقیقه از ساعت (0-59). + +## جای خالی {#tosecond} + +تبدیل تاریخ با هم به یک uint8 شماره حاوی شماره دوم در دقیقه (0-59). +ثانیه جهش برای به حساب نمی. + +## تیونیتیمستمپ {#to-unix-timestamp} + +برای استدلال حسگر ناحیه رنگی: تبدیل ارزش به نمایندگی عددی داخلی خود (برچسب زمان یونیکس). +برای استدلال رشته: تاریخ ساعت پارسه از رشته با توجه به منطقه زمانی (بحث دوم اختیاری, منطقه زمانی سرور به طور پیش فرض استفاده می شود) و مربوط برچسب زمان یونیکس می گرداند. +برای استدلال تاریخ: رفتار نامشخص است. + +**نحو** + +``` sql +toUnixTimestamp(datetime) +toUnixTimestamp(str, [timezone]) +``` + +**مقدار بازگشتی** + +- زمان یونیکس را برمی گرداند. + +نوع: `UInt32`. + +**مثال** + +پرسوجو: + +``` sql +SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp +``` + +نتیجه: + +``` text +┌─unix_timestamp─┐ +│ 1509836867 │ +└────────────────┘ +``` + +## سال نو {#tostartofyear} + +دور یک تاریخ یا تاریخ با زمان به روز اول سال. +تاریخ را برمی گرداند. + +## تاستارتوفیزیر {#tostartofisoyear} + +دور کردن تاریخ یا تاریخ با زمان به روز اول سال ایزو. +تاریخ را برمی گرداند. + +## toStartOfQuarter {#tostartofquarter} + +دور یک تاریخ یا تاریخ با زمان به روز اول سه ماهه. +اولین روز از سه ماهه است یا 1 ژانویه, 1 مارس, 1 جولای, یا 1 اکتبر. +تاریخ را برمی گرداند. + +## ماهی تابه {#tostartofmonth} + +دور پایین تاریخ یا تاریخ با زمان به روز اول ماه. +تاریخ را برمی گرداند. + +!!! attention "توجه" + رفتار تجزیه تاریخ نادرست اجرای خاص است. تاتر ممکن است صفر تاریخ بازگشت, پرتاب یک استثنا و یا انجام “natural” سرریز کردن. + +## روز قیامت {#tomonday} + +دور کردن یک تاریخ یا تاریخ با زمان به نزدیکترین دوشنبه. +تاریخ را برمی گرداند. + +## تستارتوفک (تی \[, حالت\]) {#tostartofweektmode} + +دور یک تاریخ یا تاریخ را با زمان به نزدیکترین یکشنبه یا دوشنبه با حالت. +تاریخ را برمی گرداند. +استدلال حالت کار می کند دقیقا مانند استدلال حالت به یدک کش (). برای نحو تک استدلال, ارزش حالت 0 استفاده شده است. + +## روزهای سه بعدی {#tostartofday} + +دور پایین تاریخ با زمان به شروع روز. + +## تاستارتوفهور {#tostartofhour} + +دور پایین تاریخ با زمان به شروع ساعت. + +## حفاظت {#tostartofminute} + +دور پایین تاریخ با زمان به شروع دقیقه. + +## تستارتوفیفومینوت {#tostartoffiveminute} + +دور پایین تاریخ با زمان به شروع فاصله پنج دقیقه. + +## حفاظت {#tostartoftenminutes} + +دور پایین تاریخ با زمان به شروع فاصله ده دقیقه. + +## toStartOfFifteenMinutes {#tostartoffifteenminutes} + +دور پایین تاریخ با زمان به شروع فاصله پانزده دقیقه. + +## toStartOfInterval(time\_or\_data فاصله x واحد \[, time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} + +این یک تعمیم توابع دیگر به نام است `toStartOf*`. به عنوان مثال, +`toStartOfInterval(t, INTERVAL 1 year)` همان را برمی گرداند `toStartOfYear(t)`, +`toStartOfInterval(t, INTERVAL 1 month)` همان را برمی گرداند `toStartOfMonth(t)`, +`toStartOfInterval(t, INTERVAL 1 day)` همان را برمی گرداند `toStartOfDay(t)`, +`toStartOfInterval(t, INTERVAL 15 minute)` همان را برمی گرداند `toStartOfFifteenMinutes(t)` و غیره + +## & تمام کردن {#totime} + +تبدیل یک تاریخ با زمان به یک تاریخ ثابت خاص, در حالی که حفظ زمان. + +## ترلتیویینوم {#torelativeyearnum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد سال, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیواارترن {#torelativequarternum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد سه ماهه, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیومنتنوم {#torelativemonthnum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد ماه, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیواکنام {#torelativeweeknum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد هفته, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیدینوم {#torelativedaynum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد روز, با شروع از یک نقطه ثابت خاص در گذشته. + +## تورلاتویورنام {#torelativehournum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد ساعت, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیومینوتنوم {#torelativeminutenum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد دقیقه, با شروع از یک نقطه ثابت خاص در گذشته. + +## ترلتیویسکنندوم {#torelativesecondnum} + +تبدیل یک تاریخ با زمان و یا تاریخ به تعداد دوم, با شروع از یک نقطه ثابت خاص در گذشته. + +## ریز ریز کردن {#toisoyear} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد اوینت16 حاوی شماره سال ایزو. + +## هشدار داده می شود {#toisoweek} + +تبدیل یک تاریخ و یا تاریخ با گذشت زمان به یک uint8 تعداد شامل iso هفته شماره. + +## تاریخ \[, حالت\]) {#toweekdatemode} + +این تابع تعداد هفته برای تاریخ و یا تاریخ ساعت می گرداند. فرم دو برهان یدک کش () شما را قادر به مشخص کنید که هفته در روز یکشنبه یا دوشنبه شروع می شود و چه مقدار بازگشتی باید در محدوده 0 تا 53 یا از 1 به 53 باشد. اگر استدلال حالت حذف شده است, حالت پیش فرض است 0. +`toISOWeek()`یک تابع سازگاری است که معادل است `toWeek(date,3)`. +جدول زیر توضیح می دهد که چگونه استدلال حالت کار می کند. + +| حالت | اولین روز هفته | گستره | Week 1 is the first week … | +|------|----------------|-------|-----------------------------| +| 0 | یکشنبه | 0-53 | با یکشنبه در این سال | +| 1 | دوشنبه | 0-53 | با 4 یا چند روز در سال جاری | +| 2 | یکشنبه | 1-53 | با یکشنبه در این سال | +| 3 | دوشنبه | 1-53 | با 4 یا چند روز در سال جاری | +| 4 | یکشنبه | 0-53 | با 4 یا چند روز در سال جاری | +| 5 | دوشنبه | 0-53 | با دوشنبه در این سال | +| 6 | یکشنبه | 1-53 | با 4 یا چند روز در سال جاری | +| 7 | دوشنبه | 1-53 | با دوشنبه در این سال | +| 8 | یکشنبه | 1-53 | شامل ژانویه 1 | +| 9 | دوشنبه | 1-53 | شامل ژانویه 1 | + +برای مقادیر حالت با معنی “with 4 or more days this year,” هفته ها با توجه به ایزو شماره 8601: 1988: + +- اگر هفته حاوی ژانویه 1 است 4 یا چند روز در سال جدید, هفته است 1. + +- در غیر این صورت, این هفته گذشته سال گذشته است, و هفته بعد هفته است 1. + +برای مقادیر حالت با معنی “contains January 1”, هفته شامل ژانویه 1 هفته است 1. مهم نیست که چند روز در سال جدید هفته شامل, حتی اگر شامل تنها یک روز. + +``` sql +toWeek(date, [, mode][, Timezone]) +``` + +**پارامترها** + +- `date` – Date or DateTime. +- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. +- `Timezone` – Optional parameter, it behaves like any other conversion function. + +**مثال** + +``` sql +SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9; +``` + +``` text +┌───────date─┬─week0─┬─week1─┬─week9─┐ +│ 2016-12-27 │ 52 │ 52 │ 1 │ +└────────────┴───────┴───────┴───────┘ +``` + +## تاریخ \[, حالت\]) {#toyearweekdatemode} + +را برمی گرداند سال و هفته برای تاریخ. سال در نتیجه ممکن است متفاوت از سال در بحث تاریخ برای اولین و هفته گذشته سال. + +استدلال حالت کار می کند دقیقا مانند استدلال حالت به یدک کش (). برای نحو تک استدلال, ارزش حالت 0 استفاده شده است. + +`toISOYear()`یک تابع سازگاری است که معادل است `intDiv(toYearWeek(date,3),100)`. + +**مثال** + +``` sql +SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; +``` + +``` text +┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ +│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ +└────────────┴───────────┴───────────┴───────────┘ +``` + +## حالا {#now} + +قبول صفر استدلال و بازده زمان فعلی در یکی از لحظات اجرای درخواست. +این تابع ثابت می گرداند, حتی اگر درخواست زمان طولانی برای تکمیل. + +## امروز {#today} + +قبول صفر استدلال و بازده تاریخ جاری در یکی از لحظات اجرای درخواست. +همان ‘toDate(now())’. + +## دیروز {#yesterday} + +قبول صفر استدلال و بازده تاریخ دیروز در یکی از لحظات اجرای درخواست. +همان ‘today() - 1’. + +## سانس {#timeslot} + +دور زمان به نیم ساعت. +این تابع خاص به یاندکس است.متریکا, از نیم ساعت حداقل مقدار زمان برای شکستن یک جلسه به دو جلسه است اگر یک تگ ردیابی نشان می دهد تعداد صفحات متوالی یک کاربر که در زمان به شدت بیش از این مقدار متفاوت. این به این معنی است که تاپل (شناسه برچسب, شناسه کاربری, و شکاف زمان) را می توان مورد استفاده قرار گیرد به جستجو برای تعداد صفحات که در جلسه مربوطه شامل. + +## ستاره فیلم سکسی {#toyyyymm} + +تبدیل یک تاریخ یا تاریخ با زمان به یک عدد اوینت32 حاوی تعداد سال و ماه (یی \* 100 + میلی متر). + +## ستاره فیلم سکسی {#toyyyymmdd} + +تبدیل یک تاریخ یا تاریخ با زمان به تعداد اوینت32 حاوی سال و ماه شماره (یی \* 10000 + میلی متر \* 100 + دی.دی. + +## اطلاعات دقیق {#toyyyymmddhhmmss} + +تبدیل یک تاریخ و یا تاریخ با گذشت زمان به یک uint64 تعداد شامل سال و ماه شماره (yyyy \* 10000000000 + mm \* 100000000 + dd \* 1000000 + hh \* 10000 + mm \* 100 + ss) است. + +## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} + +تابع می افزاید: یک تاریخ/فاصله زمانی به یک تاریخ/تاریخ ساعت و سپس بازگشت تاریخ / تاریخ ساعت. به عنوان مثال: + +``` sql +WITH + toDate('2018-01-01') AS date, + toDateTime('2018-01-01 00:00:00') AS date_time +SELECT + addYears(date, 1) AS add_years_with_date, + addYears(date_time, 1) AS add_years_with_date_time +``` + +``` text +┌─add_years_with_date─┬─add_years_with_date_time─┐ +│ 2019-01-01 │ 2019-01-01 00:00:00 │ +└─────────────────────┴──────────────────────────┘ +``` + +## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} + +تابع تفریق یک تاریخ/فاصله زمانی به تاریخ/تاریخ ساعت و سپس بازگشت تاریخ / تاریخ ساعت. به عنوان مثال: + +``` sql +WITH + toDate('2019-01-01') AS date, + toDateTime('2019-01-01 00:00:00') AS date_time +SELECT + subtractYears(date, 1) AS subtract_years_with_date, + subtractYears(date_time, 1) AS subtract_years_with_date_time +``` + +``` text +┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ +│ 2018-01-01 │ 2018-01-01 00:00:00 │ +└──────────────────────────┴───────────────────────────────┘ +``` + +## dateDiff {#datediff} + +بازگرداندن تفاوت بین دو تاریخ و یا تاریخ ساعت ارزش. + +**نحو** + +``` sql +dateDiff('unit', startdate, enddate, [timezone]) +``` + +**پارامترها** + +- `unit` — Time unit, in which the returned value is expressed. [رشته](../syntax.md#syntax-string-literal). + + Supported values: + + | unit | + | ---- | + |second | + |minute | + |hour | + |day | + |week | + |month | + |quarter | + |year | + +- `startdate` — The first time value to compare. [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +- `enddate` — The second time value to compare. [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +- `timezone` — Optional parameter. If specified, it is applied to both `startdate` و `enddate`. اگر مشخص نشده, زمان از `startdate` و `enddate` استفاده می شود. در صورتی که یکسان نیست, نتیجه نامشخص است. + +**مقدار بازگشتی** + +تفاوت بین `startdate` و `enddate` بیان شده در `unit`. + +نوع: `int`. + +**مثال** + +پرسوجو: + +``` sql +SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); +``` + +نتیجه: + +``` text +┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐ +│ 25 │ +└────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## زمانهای(StartTime, مدت,\[, اندازه\]) {#timeslotsstarttime-duration-size} + +برای یک بازه زمانی شروع در ‘StartTime’ و در ادامه برای ‘Duration’ ثانیه, این مجموعه ای از لحظات در زمان گرداند, متشکل از نقاط از این فاصله به گرد ‘Size’ در عرض چند ثانیه ‘Size’ یک پارامتر اختیاری است: یک ثابت اوینت32, مجموعه ای به 1800 به طور پیش فرض. +به عنوان مثال, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. +این برای جستجوی صفحات در جلسه مربوطه ضروری است. + +## formatDateTime(زمان فرمت \[منطقه زمانی\]) {#formatdatetime} + +Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. + +اصلاح کننده های پشتیبانی شده برای فرمت: +(“Example” ستون نتیجه قالب بندی برای زمان را نشان می دهد `2018-01-02 22:33:44`) + +| تغییردهنده | توصیف | مثال | +|------------|-----------------------------------------------------------------------------|------------| +| %C | سال تقسیم بر 100 و کوتاه به عدد صحیح (00-99) | 20 | +| \# د | روز از ماه, صفر خالی (01-31) | 02 | +| %D | کوتاه میلی متر/دی دی/یی تاریخ, معادل %متر/%د / %و | 01/02/18 | +| \# ا | روز از ماه, فضا خالی ( 1-31) | 2 | +| %F | کوتاه تاریخ یی-میلی متر-دی دی, معادل%و-%متر - % د | 2018-01-02 | +| %H | ساعت در فرمت 24 ساعت (00-23) | 22 | +| %I | ساعت در فرمت 12 ساعت (01-12) | 10 | +| \# ج | روز سال (001-366) | 002 | +| % متر | ماه به عنوان یک عدد اعشاری (01-12) | 01 | +| %M | دقیقه (00-59) | 33 | +| % ن | شخصیت جدید خط (") | | +| \# پ | هستم یا بعد از ظهر تعیین | PM | +| %R | 24-ساعت ساعت ساعت: زمان میلی متر, معادل %ساعت: % متر | 22:33 | +| %S | دوم (00-59) | 44 | +| % تی | شخصیت افقی تب (') | | +| %T | ایزو 8601 فرمت زمان (ساعت:میلی متر:اس اس), معادل %ساعت:%متر:%بازدید کنندگان | 22:33:44 | +| \# تو | ایزو 8601 روز هفته به عنوان شماره با دوشنبه به عنوان 1 (1-7) | 2 | +| %V | ایزو 8601 هفته شماره (01-53) | 01 | +| \# وات | روز هفته به عنوان یک عدد اعشاری با یکشنبه به عنوان 0 (0-6) | 2 | +| \#... | سال گذشته دو رقم (00-99) | 18 | +| %Y | سال | 2018 | +| %% | یک % نشانه | % | + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/fa/sql_reference/functions/encoding_functions.md b/docs/fa/sql_reference/functions/encoding_functions.md new file mode 100644 index 00000000000..0c08d0cad86 --- /dev/null +++ b/docs/fa/sql_reference/functions/encoding_functions.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: "\u06A9\u062F\u0628\u0646\u062F\u06CC" +--- + +# توابع را پشتیبانی می کند {#encoding-functions} + +## کاراکتر {#char} + +بازگرداندن رشته با طول به عنوان تعدادی از استدلال گذشت و هر بایت دارای ارزش استدلال مربوطه. می پذیرد استدلال های متعدد از انواع عددی. اگر ارزش بحث خارج از محدوده uint8 نوع داده آن است که تبدیل به uint8 با امکان گرد کردن و سرریز. + +**نحو** + +``` sql +char(number_1, [number_2, ..., number_n]); +``` + +**پارامترها** + +- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../sql_reference/data_types/int_uint.md), [شناور](../../sql_reference/data_types/float.md). + +**مقدار بازگشتی** + +- یک رشته از بایت داده. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello +``` + +نتیجه: + +``` text +┌─hello─┐ +│ hello │ +└───────┘ +``` + +شما می توانید یک رشته از رمزگذاری دلخواه با عبور از بایت مربوطه ساخت. در اینجا به عنوان مثال برای یونایتد-8 است: + +پرسوجو: + +``` sql +SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; +``` + +نتیجه: + +``` text +┌─hello──┐ +│ привет │ +└────────┘ +``` + +پرسوجو: + +``` sql +SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; +``` + +نتیجه: + +``` text +┌─hello─┐ +│ 你好 │ +└───────┘ +``` + +## هکس {#hex} + +بازگرداندن یک رشته حاوی نمایندگی هگزادسیمال استدلال. + +**نحو** + +``` sql +hex(arg) +``` + +تابع با استفاده از حروف بزرگ `A-F` و با استفاده از هیچ پیشوندها (مانند `0x`) یا پسوندها (مانند `h`). + +برای استدلال عدد صحیح رقم سحر و جادو را چاپ می کند (“nibbles”) از مهم ترین به حداقل قابل توجهی (اندی بزرگ یا “human readable” سفارش). این با مهم ترین بایت غیر صفر شروع می شود (پیشرو صفر بایت حذف می شوند) اما همیشه چاپ هر دو رقم از هر بایت حتی اگر رقم پیشرو صفر است. + +مثال: + +**مثال** + +پرسوجو: + +``` sql +SELECT hex(1); +``` + +نتیجه: + +``` text +01 +``` + +مقادیر نوع `Date` و `DateTime` به عنوان اعداد صحیح مربوطه فرمت (تعداد روز از عصر برای تاریخ و ارزش برچسب زمان یونیکس برای تاریخ ساعت داده). + +برای `String` و `FixedString`, تمام بایت به سادگی به عنوان دو عدد هگزادسیمال کد گذاری. صفر بایت حذف نشده است. + +ارزش های ممیز شناور و رقم اعشاری به عنوان نمایندگی خود را در حافظه کد گذاری. همانطور که ما از معماری اندیان کوچک حمایت می کنیم در اندی کوچک کد گذاری می شوند. صفر پیشرو / عقبی بایت حذف نشده است. + +**پارامترها** + +- `arg` — A value to convert to hexadecimal. Types: [رشته](../../sql_reference/data_types/string.md), [اینترنت](../../sql_reference/data_types/int_uint.md), [شناور](../../sql_reference/data_types/float.md), [دهدهی](../../sql_reference/data_types/decimal.md), [تاریخ](../../sql_reference/data_types/date.md) یا [DateTime](../../sql_reference/data_types/datetime.md). + +**مقدار بازگشتی** + +- یک رشته با نمایش هگزادسیمال استدلال. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2); +``` + +نتیجه: + +``` text +┌─hex_presentation─┐ +│ 00007041 │ +│ 00008041 │ +└──────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2); +``` + +نتیجه: + +``` text +┌─hex_presentation─┐ +│ 0000000000002E40 │ +│ 0000000000003040 │ +└──────────────────┘ +``` + +## unhex(str) {#unhexstr} + +می پذیرد یک رشته حاوی هر تعداد از رقم هگزادسیمال, و یک رشته حاوی بایت مربوطه را برمی گرداند. پشتیبانی از حروف بزرگ و کوچک تعداد ارقام هگزادسیمال ندارد به حتی. اگر عجیب و غریب است, رقم گذشته به عنوان نیمه حداقل قابل توجهی از بایت 00-0ف تفسیر. اگر رشته استدلال شامل هر چیزی غیر از رقم هگزادسیمال, برخی از نتیجه پیاده سازی تعریف بازگشته است (یک استثنا پرتاب نمی شود). +اگر شما می خواهید برای تبدیل نتیجه به یک عدد, شما می توانید با استفاده از ‘reverse’ و ‘reinterpretAsType’ توابع. + +## هشدار داده می شود) {#uuidstringtonumstr} + +می پذیرد یک رشته حاوی 36 کاراکتر در قالب `123e4567-e89b-12d3-a456-426655440000` و به عنوان مجموعه ای از بایت ها در یک رشته ثابت(16) باز می گردد. + +## هشدار داده می شود) {#uuidnumtostringstr} + +می پذیرد یک رشته ثابت (16) ارزش. بازگرداندن یک رشته حاوی 36 کاراکتر در قالب متن. + +## بیتماسکتولیست (عدد) {#bitmasktolistnum} + +می پذیرد یک عدد صحیح. بازگرداندن یک رشته حاوی لیستی از قدرت های دو که در مجموع تعداد منبع که خلاصه. با کاما از هم جدا بدون فاصله در قالب متن, به ترتیب صعودی. + +## هشدار داده می شود) {#bitmasktoarraynum} + +می پذیرد یک عدد صحیح. بازگرداندن مجموعه ای از اعداد اوینت64 حاوی لیستی از قدرت های دو که در مجموع تعداد منبع در هنگام خلاصه. اعداد به ترتیب صعودی هستند. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/fa/sql_reference/functions/ext_dict_functions.md b/docs/fa/sql_reference/functions/ext_dict_functions.md new file mode 100644 index 00000000000..622344f789c --- /dev/null +++ b/docs/fa/sql_reference/functions/ext_dict_functions.md @@ -0,0 +1,206 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0648\u0627\u0698\u0647\u0646\u0627\u0645\ + \u0647\u0647\u0627 \u062E\u0627\u0631\u062C\u06CC" +--- + +# توابع برای کار با لغت نامه های خارجی {#ext_dict_functions} + +برای اطلاعات در مورد اتصال و پیکربندی لغت نامه های خارجی, دیدن [واژهنامهها خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +## دیکته کردن {#dictget} + +بازیابی یک مقدار از یک فرهنگ لغت خارجی. + +``` sql +dictGet('dict_name', 'attr_name', id_expr) +dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**پارامترها** + +- `dict_name` — Name of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md) یا [تاپل](../../sql_reference/data_types/tuple.md)- نوع ارزش بسته به پیکربندی فرهنگ لغت . +- `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` کلید [عبارت](../syntax.md#syntax-expressions) بازگشت ارزش در نوع داده پیکربندی شده برای `attr_name` صفت کردن. + +**مقدار بازگشتی** + +- اگر تاتر تجزیه ویژگی موفقیت در [نوع داده خصیصه](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), توابع بازگشت ارزش ویژگی فرهنگ لغت که مربوط به `id_expr`. + +- اگر هیچ کلید وجود دارد, مربوط به `id_expr`, در فرهنگ لغت, سپس: + + - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. + +کلیک هاوس می اندازد یک استثنا اگر می تواند ارزش ویژگی تجزیه و یا ارزش می کند نوع داده ویژگی مطابقت ندارد. + +**مثال** + +ایجاد یک فایل متنی `ext-dict-text.csv` حاوی موارد زیر است: + +``` text +1,1 +2,2 +``` + +ستون اول است `id` ستون دوم `c1`. + +پیکربندی واژهنامه خارجی: + +``` xml + + + ext-dict-test + + + /path-to/ext-dict-test.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + 0 + + +``` + +انجام پرس و جو: + +``` sql +SELECT + dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3 +``` + +``` text +┌─val─┬─type───┐ +│ 1 │ UInt32 │ +│ 2 │ UInt32 │ +│ 20 │ UInt32 │ +└─────┴────────┘ +``` + +**همچنین نگاه کنید** + +- [واژهنامهها خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) + +## دیکتس {#dicthas} + +بررسی اینکه یک کلید در حال حاضر در یک فرهنگ لغت است. + +``` sql +dictHas('dict_name', id_expr) +``` + +**پارامترها** + +- `dict_name` — Name of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md)- نوع ارزش. + +**مقدار بازگشتی** + +- 0, اگر هیچ کلید وجود دارد. +- 1, اگر یک کلید وجود دارد. + +نوع: `UInt8`. + +## حکومت دیکتاتوری {#dictgethierarchy} + +یک مجموعه ای ایجاد می کند که شامل همه والدین یک کلید در [فرهنگ لغت سلسله مراتبی](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). + +**نحو** + +``` sql +dictGetHierarchy('dict_name', key) +``` + +**پارامترها** + +- `dict_name` — Name of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `key` — Key value. [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md)- نوع ارزش. + +**مقدار بازگشتی** + +- پدر و مادر برای کلید. + +نوع: [Array(UInt64)](../../sql_reference/data_types/array.md). + +## دیکتاتوری {#dictisin} + +جد یک کلید را از طریق کل زنجیره سلسله مراتبی در فرهنگ لغت بررسی می کند. + +``` sql +dictIsIn('dict_name', child_id_expr, ancestor_id_expr) +``` + +**پارامترها** + +- `dict_name` — Name of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `child_id_expr` — Key to be checked. [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md)- نوع ارزش. +- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` کلید [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md)- نوع ارزش. + +**مقدار بازگشتی** + +- 0 اگر `child_id_expr` یک کودک نیست `ancestor_id_expr`. +- 1 اگر `child_id_expr` یک کودک است `ancestor_id_expr` یا اگر `child_id_expr` یک `ancestor_id_expr`. + +نوع: `UInt8`. + +## توابع دیگر {#ext_dict_functions-other} + +تاتر پشتیبانی از توابع تخصصی است که تبدیل ارزش فرهنگ لغت ویژگی به یک نوع داده خاص بدون در نظر گرفتن پیکربندی فرهنگ لغت. + +توابع: + +- `dictGetInt8`, `dictGetInt16`, `dictGetInt32`, `dictGetInt64` +- `dictGetUInt8`, `dictGetUInt16`, `dictGetUInt32`, `dictGetUInt64` +- `dictGetFloat32`, `dictGetFloat64` +- `dictGetDate` +- `dictGetDateTime` +- `dictGetUUID` +- `dictGetString` + +همه این توابع `OrDefault` اصلاح. به عنوان مثال, `dictGetDateOrDefault`. + +نحو: + +``` sql +dictGet[Type]('dict_name', 'attr_name', id_expr) +dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**پارامترها** + +- `dict_name` — Name of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [رشته تحت اللفظی](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [عبارت](../syntax.md#syntax-expressions) بازگشت یک [UInt64](../../sql_reference/data_types/int_uint.md)- نوع ارزش. +- `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` کلید [عبارت](../syntax.md#syntax-expressions) بازگشت یک مقدار در نوع داده پیکربندی شده برای `attr_name` صفت کردن. + +**مقدار بازگشتی** + +- اگر تاتر تجزیه ویژگی موفقیت در [نوع داده خصیصه](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), توابع بازگشت ارزش ویژگی فرهنگ لغت که مربوط به `id_expr`. + +- در صورتی که هیچ درخواست وجود دارد `id_expr` در فرهنگ لغت و سپس: + + - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. + +کلیک هاوس می اندازد یک استثنا اگر می تواند ارزش ویژگی تجزیه و یا ارزش می کند نوع داده ویژگی مطابقت ندارد. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/fa/query_language/functions/functions_for_nulls.md b/docs/fa/sql_reference/functions/functions_for_nulls.md similarity index 56% rename from docs/fa/query_language/functions/functions_for_nulls.md rename to docs/fa/sql_reference/functions/functions_for_nulls.md index 5be0ddb21a5..02efe209696 100644 --- a/docs/fa/query_language/functions/functions_for_nulls.md +++ b/docs/fa/sql_reference/functions/functions_for_nulls.md @@ -1,29 +1,33 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 Nullable \u0627\u0633\u062A\u062F\u0644\ + \u0627\u0644" --- -# Functions for working with Nullable aggregates {#functions-for-working-with-nullable-aggregates} +# توابع برای کار با nullable مصالح {#functions-for-working-with-nullable-aggregates} ## isNull {#isnull} -Checks whether the argument is [NULL](../syntax.md#null). +بررسی اینکه بحث چیست [NULL](../syntax.md#null). ``` sql isNull(x) ``` -**Parameters** +**پارامترها** - `x` — A value with a non-compound data type. -**Returned value** +**مقدار بازگشتی** -- `1` if `x` is `NULL`. -- `0` if `x` is not `NULL`. +- `1` اگر `x` هست `NULL`. +- `0` اگر `x` نیست `NULL`. -**Example** +**مثال** -Input table +جدول ورودی ``` text ┌─x─┬────y─┐ @@ -32,7 +36,7 @@ Input table └───┴──────┘ ``` -Query +پرسوجو ``` sql SELECT x FROM t_null WHERE isNull(y) @@ -44,26 +48,26 @@ SELECT x FROM t_null WHERE isNull(y) └───┘ ``` -## isNotNull {#isnotnull} +## اینترنت {#isnotnull} -Checks whether the argument is [NULL](../syntax.md#null). +بررسی اینکه بحث چیست [NULL](../syntax.md#null). ``` sql isNotNull(x) ``` -**Parameters:** +**پارامترها:** - `x` — A value with a non-compound data type. -**Returned value** +**مقدار بازگشتی** -- `0` if `x` is `NULL`. -- `1` if `x` is not `NULL`. +- `0` اگر `x` هست `NULL`. +- `1` اگر `x` نیست `NULL`. -**Example** +**مثال** -Input table +جدول ورودی ``` text ┌─x─┬────y─┐ @@ -72,7 +76,7 @@ Input table └───┴──────┘ ``` -Query +پرسوجو ``` sql SELECT x FROM t_null WHERE isNotNull(y) @@ -84,26 +88,26 @@ SELECT x FROM t_null WHERE isNotNull(y) └───┘ ``` -## coalesce {#coalesce} +## فلز کاری {#coalesce} -Checks from left to right whether `NULL` arguments were passed and returns the first non-`NULL` argument. +چک از چپ به راست چه `NULL` استدلال به تصویب رسید و اولین غیر گرداند-`NULL` استدلال کردن. ``` sql coalesce(x,...) ``` -**Parameters:** +**پارامترها:** -- Any number of parameters of a non-compound type. All parameters must be compatible by data type. +- هر تعداد از پارامترهای یک نوع غیر مرکب. تمام پارامترها باید با نوع داده سازگار باشند. -**Returned values** +**مقادیر بازگشتی** -- The first non-`NULL` argument. -- `NULL`, if all arguments are `NULL`. +- اولین غیر-`NULL` استدلال کردن. +- `NULL`, اگر همه استدلال ها `NULL`. -**Example** +**مثال** -Consider a list of contacts that may specify multiple ways to contact a customer. +یک لیست از مخاطبین است که ممکن است راه های متعدد برای تماس با مشتری مشخص را در نظر بگیرید. ``` text ┌─name─────┬─mail─┬─phone─────┬──icq─┐ @@ -112,9 +116,9 @@ Consider a list of contacts that may specify multiple ways to contact a customer └──────────┴──────┴───────────┴──────┘ ``` -The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32`, so it needs to be converted to `String`. +این `mail` و `phone` زمینه های رشته نوع هستند, اما `icq` زمینه است `UInt32` بنابراین نیاز به تبدیل شدن به `String`. -Get the first available contact method for the customer from the contact list: +دریافت اولین روش تماس در دسترس برای مشتری از لیست تماس: ``` sql SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook @@ -129,23 +133,23 @@ SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook ## ifNull {#ifnull} -Returns an alternative value if the main argument is `NULL`. +بازگرداندن یک مقدار جایگزین اگر استدلال اصلی است `NULL`. ``` sql ifNull(x,alt) ``` -**Parameters:** +**پارامترها:** - `x` — The value to check for `NULL`. -- `alt` — The value that the function returns if `x` is `NULL`. +- `alt` — The value that the function returns if `x` هست `NULL`. -**Returned values** +**مقادیر بازگشتی** -- The value `x`, if `x` is not `NULL`. -- The value `alt`, if `x` is `NULL`. +- مقدار `x` اگر `x` نیست `NULL`. +- مقدار `alt` اگر `x` هست `NULL`. -**Example** +**مثال** ``` sql SELECT ifNull('a', 'b') @@ -169,22 +173,22 @@ SELECT ifNull(NULL, 'b') ## nullIf {#nullif} -Returns `NULL` if the arguments are equal. +بازگشت `NULL` اگر استدلال برابر هستند. ``` sql nullIf(x, y) ``` -**Parameters:** +**پارامترها:** `x`, `y` — Values for comparison. They must be compatible types, or ClickHouse will generate an exception. -**Returned values** +**مقادیر بازگشتی** -- `NULL`, if the arguments are equal. -- The `x` value, if the arguments are not equal. +- `NULL`, اگر استدلال برابر هستند. +- این `x` ارزش, اگر استدلال برابر نیست. -**Example** +**مثال** ``` sql SELECT nullIf(1, 1) @@ -206,26 +210,26 @@ SELECT nullIf(1, 2) └──────────────┘ ``` -## assumeNotNull {#assumenotnull} +## قابل قبول {#assumenotnull} -Results in a value of type [Nullable](../../data_types/nullable.md) for a non- `Nullable`, if the value is not `NULL`. +نتایج در ارزش نوع [Nullable](../../sql_reference/data_types/nullable.md) برای یک غیر- `Nullable`, اگر مقدار است `NULL`. ``` sql assumeNotNull(x) ``` -**Parameters:** +**پارامترها:** - `x` — The original value. -**Returned values** +**مقادیر بازگشتی** -- The original value from the non-`Nullable` type, if it is not `NULL`. -- The default value for the non-`Nullable` type if the original value was `NULL`. +- مقدار اصلی از غیر-`Nullable` نوع, اگر نیست `NULL`. +- مقدار پیش فرض برای غیر-`Nullable` نوع اگر مقدار اصلی بود `NULL`. -**Example** +**مثال** -Consider the `t_null` table. +در نظر بگیرید که `t_null` جدول ``` sql SHOW CREATE TABLE t_null @@ -244,7 +248,7 @@ SHOW CREATE TABLE t_null └───┴──────┘ ``` -Apply the `assumeNotNull` function to the `y` column. +درخواست `assumeNotNull` تابع به `y` ستون. ``` sql SELECT assumeNotNull(y) FROM t_null @@ -268,23 +272,23 @@ SELECT toTypeName(assumeNotNull(y)) FROM t_null └──────────────────────────────┘ ``` -## toNullable {#tonullable} +## قابل تنظیم {#tonullable} -Converts the argument type to `Nullable`. +تبدیل نوع استدلال به `Nullable`. ``` sql toNullable(x) ``` -**Parameters:** +**پارامترها:** - `x` — The value of any non-compound type. -**Returned value** +**مقدار بازگشتی** -- The input value with a `Nullable` type. +- مقدار ورودی با یک `Nullable` نوع. -**Example** +**مثال** ``` sql SELECT toTypeName(10) @@ -306,4 +310,4 @@ SELECT toTypeName(toNullable(10)) └────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/fa/sql_reference/functions/geo.md b/docs/fa/sql_reference/functions/geo.md new file mode 100644 index 00000000000..cafe88281d9 --- /dev/null +++ b/docs/fa/sql_reference/functions/geo.md @@ -0,0 +1,511 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0645\u062E\u062A\u0635\u0627\u062A \u062C\ + \u063A\u0631\u0627\u0641\u06CC\u0627\u06CC\u06CC" +--- + +# توابع برای کار با مختصات جغرافیایی {#functions-for-working-with-geographical-coordinates} + +## نمایش سایت {#greatcircledistance} + +محاسبه فاصله بین دو نقطه بر روی سطح زمین با استفاده از [فرمول دایره بزرگ](https://en.wikipedia.org/wiki/Great-circle_distance). + +``` sql +greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) +``` + +**پارامترهای ورودی** + +- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. +- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. +- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. +- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. + +مقادیر مثبت به طول و عرض جغرافیایی شمال شرق مطابقت, و مقادیر منفی به طول و عرض جغرافیایی جنوبی و طول جغرافیایی غرب مطابقت. + +**مقدار بازگشتی** + +فاصله بین دو نقطه بر روی سطح زمین, در متر. + +تولید یک استثنا زمانی که مقادیر پارامتر ورودی در خارج از محدوده قرار می گیرند. + +**مثال** + +``` sql +SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) +``` + +``` text +┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ +│ 14132374.194975413 │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## نقلقولهای جدید از این نویسنده {#pointinellipses} + +بررسی اینکه نقطه متعلق به حداقل یکی از بیضی. +مختصات هندسی در سیستم مختصات دکارتی هستند. + +``` sql +pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) +``` + +**پارامترهای ورودی** + +- `x, y` — Coordinates of a point on the plane. +- `xᵢ, yᵢ` — Coordinates of the center of the `i`- حذف هفتم . +- `aᵢ, bᵢ` — Axes of the `i`- حذف ت در واحد های ایکس, و مختصات. + +پارامترهای ورودی باید باشد `2+4⋅n` کجا `n` تعداد بیضی است. + +**مقادیر بازگشتی** + +`1` اگر نقطه در داخل است حداقل یکی از بیضی; `0`اگر این طور نیست. + +**مثال** + +``` sql +SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) +``` + +``` text +┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐ +│ 1 │ +└─────────────────────────────────────────────────┘ +``` + +## نقطه چین {#pointinpolygon} + +بررسی اینکه نقطه متعلق به چند ضلعی در هواپیما. + +``` sql +pointInPolygon((x, y), [(a, b), (c, d) ...], ...) +``` + +**مقادیر ورودی** + +- `(x, y)` — Coordinates of a point on the plane. Data type — [تاپل](../../sql_reference/data_types/tuple.md) — A tuple of two numbers. +- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [& حذف](../../sql_reference/data_types/array.md). هر راس است که توسط یک جفت مختصات نشان داده شده است `(a, b)`. راس باید در جهت عقربه های ساعت و یا در خلاف جهت عقربه مشخص شده است. حداقل تعداد راس است 3. چند ضلعی باید ثابت باشد. +- این تابع همچنین از چند ضلعی با سوراخ (برش بخش). در این مورد, اضافه چند ضلعی است که تعریف بخش برش با استفاده از استدلال های اضافی از تابع. تابع چند ضلعی غیر به سادگی متصل را پشتیبانی نمی کند. + +**مقادیر بازگشتی** + +`1` اگر نقطه در داخل چند ضلعی باشد, `0` اگر این طور نیست. +اگر نقطه در مرز چند ضلعی, تابع ممکن است یا بازگشت 0 یا 1. + +**مثال** + +``` sql +SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## کد جغرافیایی {#geohashencode} + +کد طول و عرض جغرافیایی به عنوان یک geohash-رشته مراجعه کنید (http://geohash.org/, https://en.wikipedia.org/wiki/geohash). + +``` sql +geohashEncode(longitude, latitude, [precision]) +``` + +**مقادیر ورودی** + +- طول جغرافیایی-طول جغرافیایی بخشی از مختصات شما می خواهید به رمز. شناور در محدوده`[-180°, 180°]` +- عرض جغرافیایی-عرض جغرافیایی بخشی از مختصات شما می خواهید به رمز. شناور در محدوده `[-90°, 90°]` +- دقت-اختیاری, طول رشته کد گذاری نتیجه, به طور پیش فرض به `12`. عدد صحیح در محدوده `[1, 12]`. هر مقدار کمتر از `1` یا بیشتر از `12` در سکوت به تبدیل `12`. + +**مقادیر بازگشتی** + +- عدد و الفبایی `String` مختصات کد گذاری شده (نسخه اصلاح شده از الفبای باس32 رمزگذاری استفاده شده است). + +**مثال** + +``` sql +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +``` + +``` text +┌─res──────────┐ +│ ezs42d000000 │ +└──────────────┘ +``` + +## کد جغرافیایی {#geohashdecode} + +هر رشته جغرافیایی کد گذاری به طول و عرض جغرافیایی را رمزگشایی می کند. + +**مقادیر ورودی** + +- رشته کد گذاری-رشته جغرافیایی کد گذاری. + +**مقادیر بازگشتی** + +- (طول جغرافیایی, عرض جغرافیایی) - 2-تاپل از `Float64` ارزش طول و عرض جغرافیایی. + +**مثال** + +``` sql +SELECT geohashDecode('ezs42') AS res +``` + +``` text +┌─res─────────────────────────────┐ +│ (-5.60302734375,42.60498046875) │ +└─────────────────────────────────┘ +``` + +## جغرافیایی 3 {#geotoh3} + +بازگشت [H3](https://uber.github.io/h3/#/documentation/overview/introduction) شاخص نقطه `(lon, lat)` با وضوح مشخص شده است. + +[H3](https://uber.github.io/h3/#/documentation/overview/introduction) یک سیستم نمایه سازی جغرافیایی است که سطح زمین به کاشی های شش ضلعی حتی تقسیم شده است. این سیستم سلسله مراتبی است, به عنوان مثال. هر شش گوش در سطح بالا را می توان به هفت حتی اما کوچکتر و به همین ترتیب تقسیم. + +این شاخص در درجه اول برای مکان های جفتک انداختن و دیگر دستکاری های جغرافیایی استفاده می شود. + +**نحو** + +``` sql +geoToH3(lon, lat, resolution) +``` + +**پارامترها** + +- `lon` — Longitude. Type: [جسم شناور64](../../sql_reference/data_types/float.md). +- `lat` — Latitude. Type: [جسم شناور64](../../sql_reference/data_types/float.md). +- `resolution` — Index resolution. Range: `[0, 15]`. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- عدد شاخص شش گوش. +- 0 در صورت خطا. + +نوع: `UInt64`. + +**مثال** + +پرسوجو: + +``` sql +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +``` + +نتیجه: + +``` text +┌────────────h3Index─┐ +│ 644325524701193974 │ +└────────────────────┘ +``` + +## جعبه جواهر {#geohashesinbox} + +بازگرداندن مجموعه ای از رشته های جغرافیایی کد گذاری شده از دقت داده شده است که در داخل و تقاطع مرزهای جعبه داده شده قرار می گیرند, اساسا یک شبکه 2د مسطح به مجموعه. + +**مقادیر ورودی** + +- طولی-دقیقه طول جغرافیایی, ارزش شناور در محدوده `[-180°, 180°]` +- عرضی-دقیقه عرض جغرافیایی, ارزش شناور در محدوده `[-90°, 90°]` +- طولی-حداکثر طول جغرافیایی, ارزش شناور در محدوده `[-180°, 180°]` +- عرضی-حداکثر عرض جغرافیایی, ارزش شناور در محدوده `[-90°, 90°]` +- دقت-جغرافیایی دقیق, `UInt8` در محدوده `[1, 12]` + +لطفا توجه داشته باشید که تمام پارامترهای هماهنگ باید از همان نوع باشد: هم `Float32` یا `Float64`. + +**مقادیر بازگشتی** + +- مجموعه ای از رشته های دقت طولانی از زمینهاش جعبه پوشش منطقه فراهم, شما باید به ترتیب از اقلام تکیه نمی. +- \[\]- مجموعه خالی اگر *کمینه* ارزش *عرض جغرافیایی* و *طول جغرافیایی* کمتر از متناظر نیست *حداکثر* ارزشهای خبری عبارتند از: + +لطفا توجه داشته باشید که عملکرد یک استثنا را پرتاب می کند اگر مجموعه ای بیش از 10'000'000 باشد. + +**مثال** + +``` sql +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +``` + +``` text +┌─thasos──────────────────────────────────────┐ +│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ +└─────────────────────────────────────────────┘ +``` + +## هد3گتاسکل {#h3getbasecell} + +بازگرداندن تعداد سلول پایه از شاخص. + +**نحو** + +``` sql +h3GetBaseCell(index) +``` + +**پارامترها** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- شش گوش شماره سلول پایه. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3GetBaseCell(612916788725809151) as basecell +``` + +نتیجه: + +``` text +┌─basecell─┐ +│ 12 │ +└──────────┘ +``` + +## ه3حکسرام2 {#h3hexaream2} + +میانگین منطقه شش گوش در متر مربع در وضوح داده شده. + +**نحو** + +``` sql +h3HexAreaM2(resolution) +``` + +**پارامترها** + +- `resolution` — Index resolution. Range: `[0, 15]`. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- Area in m². Type: [جسم شناور64](../../sql_reference/data_types/float.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3HexAreaM2(13) as area +``` + +نتیجه: + +``` text +┌─area─┐ +│ 43.9 │ +└──────┘ +``` + +## در حال بارگذاری {#h3indexesareneighbors} + +بازده یا نه فراهم هیندکس همسایه هستند. + +**نحو** + +``` sql +h3IndexesAreNeighbors(index1, index2) +``` + +**پارامترها** + +- `index1` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `index2` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- بازگشت `1` اگر شاخص همسایه هستند, `0` وگرنه نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n +``` + +نتیجه: + +``` text +┌─n─┐ +│ 1 │ +└───┘ +``` + +## بچه گانه های 3 {#h3tochildren} + +بازگرداندن مجموعه ای با شاخص کودک از شاخص داده. + +**نحو** + +``` sql +h3ToChildren(index, resolution) +``` + +**پارامترها** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- با شاخص های اچ 3 کودک تنظیم کنید. مجموعه ای از نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3ToChildren(599405990164561919, 6) AS children +``` + +نتیجه: + +``` text +┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## ساعت 3 {#h3toparent} + +بازگرداندن پدر و مادر (درشت) شاخص حاوی شاخص داده. + +**نحو** + +``` sql +h3ToParent(index, resolution) +``` + +**پارامترها** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- شاخص اچ 3 پدر و مادر. نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3ToParent(599405990164561919, 3) as parent +``` + +نتیجه: + +``` text +┌─────────────parent─┐ +│ 590398848891879423 │ +└────────────────────┘ +``` + +## اچ 3 {#h3tostring} + +تبدیل نمایندگی هیندکس از شاخص به نمایندگی رشته. + +``` sql +h3ToString(index) +``` + +**پارامترها** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- نمایندگی رشته از شاخص اچ 3. نوع: [رشته](../../sql_reference/data_types/string.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3ToString(617420388352917503) as h3_string +``` + +نتیجه: + +``` text +┌─h3_string───────┐ +│ 89184926cdbffff │ +└─────────────────┘ +``` + +## استراینگتوه3 {#stringtoh3} + +تبدیل رشته به نمایندگی h3index (uint64) نمایندگی. + +``` sql +stringToH3(index_str) +``` + +**پارامترها** + +- `index_str` — String representation of the H3 index. Type: [رشته](../../sql_reference/data_types/string.md). + +**مقادیر بازگشتی** + +- عدد شاخص شش گوش. بازده 0 در خطا. نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT stringToH3('89184926cc3ffff') as index +``` + +نتیجه: + +``` text +┌──────────────index─┐ +│ 617420388351344639 │ +└────────────────────┘ +``` + +## انتقال انرژی 3 {#h3getresolution} + +بازگرداندن وضوح از شاخص. + +**نحو** + +``` sql +h3GetResolution(index) +``` + +**پارامترها** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مقادیر بازگشتی** + +- وضوح صفحه اول. گستره: `[0, 15]`. نوع: [UInt8](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT h3GetResolution(617420388352917503) as res +``` + +نتیجه: + +``` text +┌─res─┐ +│ 9 │ +└─────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/fa/sql_reference/functions/hash_functions.md b/docs/fa/sql_reference/functions/hash_functions.md new file mode 100644 index 00000000000..71ca3660ccf --- /dev/null +++ b/docs/fa/sql_reference/functions/hash_functions.md @@ -0,0 +1,446 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u0647\u0634" +--- + +# توابع هش {#hash-functions} + +توابع هش را می توان برای زدن شبه تصادفی قطعی از عناصر استفاده می شود. + +## نیم مترد5 {#hash-functions-halfmd5} + +[تفسیر](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) تمام پارامترهای ورودی به عنوان رشته ها و محاسبه [MD5](https://en.wikipedia.org/wiki/MD5) ارزش هش برای هر یک از. سپس هش ها را ترکیب می کند و اولین بایت 8 هش رشته حاصل را می گیرد و به عنوان تفسیر می کند `UInt64` به ترتیب بایت بزرگ اندی. + +``` sql +halfMD5(par1, ...) +``` + +تابع نسبتا کند است (5 میلیون رشته کوتاه در هر ثانیه در هر هسته پردازنده). +در نظر بگیرید با استفاده از [سیفون64](#hash_functions-siphash64) تابع به جای. + +**پارامترها** + +تابع طول می کشد تعداد متغیر پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +A [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type +``` + +``` text +┌────────halfMD5hash─┬─type───┐ +│ 186182704141653334 │ UInt64 │ +└────────────────────┴────────┘ +``` + +## MD5 {#hash_functions-md5} + +محاسبه ام دی 5 از یک رشته و مجموعه ای در نتیجه از بایت به عنوان رشته ثابت را برمی گرداند(16). +اگر شما ام دی 5 به طور خاص نیاز ندارد, اما شما نیاز به یک رمزنگاری مناسب و معقول هش 128 بیتی, استفاده از ‘sipHash128’ تابع به جای. +اگر شما می خواهید برای دریافت همان نتیجه به عنوان خروجی توسط ابزار موسسه خدمات مالی, استفاده از پایین تر(سحر و جادو کردن(توسعه هزاره5(بازدید کنندگان))). + +## سیفون64 {#hash_functions-siphash64} + +تولید 64 بیتی [سیفون](https://131002.net/siphash/) مقدار هش. + +``` sql +sipHash64(par1,...) +``` + +این یک تابع هش رمزنگاری است. این کار حداقل سه بار سریع تر از [MD5](#hash_functions-md5) تابع. + +تابع [تفسیر](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) تمام پارامترهای ورودی به عنوان رشته و محاسبه مقدار هش برای هر یک از. سپس هش ها را با الگوریتم زیر ترکیب می کند: + +1. پس از هش کردن تمام پارامترهای ورودی, تابع می شود مجموعه ای از رشته هش. +2. تابع عناصر اول و دوم را می گیرد و هش را برای مجموعه ای از این موارد محاسبه می کند. +3. سپس تابع مقدار هش را محاسبه می کند که در مرحله قبل محاسبه می شود و عنصر سوم هش های اولیه را محاسبه می کند و هش را برای مجموعه ای از انها محاسبه می کند. +4. گام قبلی برای تمام عناصر باقی مانده از مجموعه هش اولیه تکرار شده است. + +**پارامترها** + +تابع طول می کشد تعداد متغیر پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +A [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type +``` + +``` text +┌──────────────SipHash─┬─type───┐ +│ 13726873534472839665 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## سیفون128 {#hash_functions-siphash128} + +محاسبه سیفون از یک رشته. +می پذیرد استدلال رشته از نوع. را برمی گرداند رشته ثابت (16). +متفاوت از سیفون64 در که حالت نهایی صخره نوردی تاشو تنها تا 128 بیت انجام می شود. + +## 4تیهاش64 {#cityhash64} + +تولید 64 بیتی [هشدار داده می شود](https://github.com/google/cityhash) مقدار هش. + +``` sql +cityHash64(par1,...) +``` + +این یک تابع هش غیر رمزنگاری سریع است. با استفاده از الگوریتم سیتیاش برای پارامترهای رشته و اجرای خاص تابع هش غیر رمزنگاری سریع برای پارامترهای با انواع داده های دیگر. این تابع از ترکیب کننده سیتیاش برای دریافت نتایج نهایی استفاده می کند. + +**پارامترها** + +تابع طول می کشد تعداد متغیر پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +A [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثالها** + +مثال تماس: + +``` sql +SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type +``` + +``` text +┌─────────────CityHash─┬─type───┐ +│ 12072650598913549138 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +مثال زیر نشان می دهد که چگونه برای محاسبه کنترلی از کل جدول با دقت تا سفارش ردیف: + +``` sql +SELECT groupBitXor(cityHash64(*)) FROM table +``` + +## اینتش32 {#inthash32} + +محاسبه یک کد هش 32 بیتی از هر نوع عدد صحیح. +این یک تابع هش غیر رمزنگاری نسبتا سریع از کیفیت متوسط برای اعداد است. + +## اینتاش64 {#inthash64} + +محاسبه یک کد هش 64 بیتی از هر نوع عدد صحیح. +این کار سریع تر از اینتش32. میانگین کیفیت. + +## SHA1 {#sha1} + +## SHA224 {#sha224} + +## SHA256 {#sha256} + +محاسبه شا-1, شا-224, یا شا-256 از یک رشته و مجموعه ای در نتیجه از بایت به عنوان رشته گرداند(20), رشته ثابت(28), و یا رشته ثابت(32). +تابع کار می کند نسبتا کند (شا-1 پردازش در مورد 5 میلیون رشته کوتاه در هر ثانیه در هر هسته پردازنده, در حالی که شا-224 و شا-256 روند در مورد 2.2 میلیون). +ما توصیه می کنیم با استفاده از این تابع تنها در مواردی که شما نیاز به یک تابع هش خاص و شما می توانید انتخاب کنید. +حتی در این موارد توصیه می شود که از تابع به صورت افلاین استفاده کنید و مقادیر قبل از محاسبه هنگام وارد کردن جدول به جای استفاده در انتخاب ها. + +## نشانی اینترنتی\]) {#urlhashurl-n} + +یک تابع هش غیر رمزنگاری سریع و با کیفیت مناسب برای یک رشته از یک نشانی وب با استفاده از نوعی عادی سازی. +`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` یا `#` در پایان, اگر در حال حاضر. +`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` یا `#` در پایان, اگر در حال حاضر. +سطح همان است که در هرج و مرج است. این تابع خاص به یاندکس است.متریکا + +## فرمان 64 {#farmhash64} + +تولید 64 بیتی [مزرعه دار](https://github.com/google/farmhash) مقدار هش. + +``` sql +farmHash64(par1, ...) +``` + +تابع با استفاده از `Hash64` روش از همه [روش های موجود](https://github.com/google/farmhash/blob/master/src/farmhash.h). + +**پارامترها** + +تابع طول می کشد تعداد متغیر پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +A [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type +``` + +``` text +┌─────────────FarmHash─┬─type───┐ +│ 17790458267262532859 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## جواهاش {#hash_functions-javahash} + +محاسبه [جواهاش](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) از یک رشته. این تابع هش نه سریع و نه با کیفیت خوب است. تنها دلیل استفاده از این است که این الگوریتم در حال حاضر در سیستم دیگری استفاده می شود و شما باید دقیقا همان نتیجه را محاسبه کنید. + +**نحو** + +``` sql +SELECT javaHash(''); +``` + +**مقدار بازگشتی** + +A `Int32` نوع داده مقدار هش. + +**مثال** + +پرسوجو: + +``` sql +SELECT javaHash('Hello, world!'); +``` + +نتیجه: + +``` text +┌─javaHash('Hello, world!')─┐ +│ -1880044555 │ +└───────────────────────────┘ +``` + +## جواهرشوتف16 {#javahashutf16le} + +محاسبه [جواهاش](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) از یک رشته, فرض کنید که شامل بایت به نمایندگی از یک رشته در رمزگذاری اوت-16ل. + +**نحو** + +``` sql +javaHashUTF16LE(stringUtf16le) +``` + +**پارامترها** + +- `stringUtf16le` — a string in UTF-16LE encoding. + +**مقدار بازگشتی** + +A `Int32` نوع داده مقدار هش. + +**مثال** + +درست پرس و جو با utf-16le کد گذاری رشته است. + +پرسوجو: + +``` sql +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +``` + +نتیجه: + +``` text +┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ +│ 3556498 │ +└──────────────────────────────────────────────────────────────┘ +``` + +## هیوهاش {#hash-functions-hivehash} + +محاسبه `HiveHash` از یک رشته. + +``` sql +SELECT hiveHash(''); +``` + +این فقط [جواهاش](#hash_functions-javahash) با کمی نشانه صفر کردن. این تابع در استفاده [زنبورک کندو](https://en.wikipedia.org/wiki/Apache_Hive) برای نسخه های قبل از 3.0. این تابع هش نه سریع و نه با کیفیت خوب است. تنها دلیل استفاده از این است که این الگوریتم در حال حاضر در سیستم دیگری استفاده می شود و شما باید دقیقا همان نتیجه را محاسبه کنید. + +**مقدار بازگشتی** + +A `Int32` نوع داده مقدار هش. + +نوع: `hiveHash`. + +**مثال** + +پرسوجو: + +``` sql +SELECT hiveHash('Hello, world!'); +``` + +نتیجه: + +``` text +┌─hiveHash('Hello, world!')─┐ +│ 267439093 │ +└───────────────────────────┘ +``` + +## متروهاش64 {#metrohash64} + +تولید 64 بیتی [متروهاش](http://www.jandrewrogers.com/2015/05/27/metrohash/) مقدار هش. + +``` sql +metroHash64(par1, ...) +``` + +**پارامترها** + +تابع طول می کشد تعداد متغیر پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +A [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type +``` + +``` text +┌────────────MetroHash─┬─type───┐ +│ 14235658766382344533 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## مورد احترام {#jumpconsistenthash} + +محاسبه jumpconsistenthash فرم uint64. +می پذیرد دو استدلال: یک کلید بین 64 نوع و تعداد سطل. بازده int32. +برای کسب اطلاعات بیشتر به لینک مراجعه کنید: [مورد احترام](https://arxiv.org/pdf/1406.2294.pdf) + +## سوفلش2\_32, سوفلشه2\_64 {#murmurhash2-32-murmurhash2-64} + +تولید یک [زمزمه 2](https://github.com/aappleby/smhasher) مقدار هش. + +``` sql +murmurHash2_32(par1, ...) +murmurHash2_64(par1, ...) +``` + +**پارامترها** + +هر دو توابع را به تعداد متغیر از پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +- این `murmurHash2_32` تابع را برمی گرداند مقدار هش داشتن [UInt32](../../sql_reference/data_types/int_uint.md) نوع داده. +- این `murmurHash2_64` تابع را برمی گرداند مقدار هش داشتن [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده. + +**مثال** + +``` sql +SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type +``` + +``` text +┌──────────MurmurHash2─┬─type───┐ +│ 11832096901709403633 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## سوفلش3\_32, سوفلشه3\_64 {#murmurhash3-32-murmurhash3-64} + +تولید یک [سوفلهاش3](https://github.com/aappleby/smhasher) مقدار هش. + +``` sql +murmurHash3_32(par1, ...) +murmurHash3_64(par1, ...) +``` + +**پارامترها** + +هر دو توابع را به تعداد متغیر از پارامترهای ورودی. پارامترها می توانند هر یک از [انواع داده های پشتیبانی شده](../../sql_reference/data_types/index.md). + +**مقدار بازگشتی** + +- این `murmurHash3_32` تابع یک [UInt32](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. +- این `murmurHash3_64` تابع یک [UInt64](../../sql_reference/data_types/int_uint.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3─┬─type───┐ +│ 2152717 │ UInt32 │ +└─────────────┴────────┘ +``` + +## سوفلش3\_128 {#murmurhash3-128} + +تولید 128 بیتی [سوفلهاش3](https://github.com/aappleby/smhasher) مقدار هش. + +``` sql +murmurHash3_128( expr ) +``` + +**پارامترها** + +- `expr` — [عبارتها](../syntax.md#syntax-expressions) بازگشت یک [رشته](../../sql_reference/data_types/string.md)- نوع ارزش. + +**مقدار بازگشتی** + +A [رشته ثابت (16)](../../sql_reference/data_types/fixedstring.md) نوع داده مقدار هش. + +**مثال** + +``` sql +SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3──────┬─type────────────┐ +│ 6�1�4"S5KT�~~q │ FixedString(16) │ +└──────────────────┴─────────────────┘ +``` + +## بیست و 3264 {#hash-functions-xxhash32} + +محاسبه `xxHash` از یک رشته. این است که در دو طعم پیشنهاد, 32 و 64 بیت. + +``` sql +SELECT xxHash32(''); + +OR + +SELECT xxHash64(''); +``` + +**مقدار بازگشتی** + +A `Uint32` یا `Uint64` نوع داده مقدار هش. + +نوع: `xxHash`. + +**مثال** + +پرسوجو: + +``` sql +SELECT xxHash32('Hello, world!'); +``` + +نتیجه: + +``` text +┌─xxHash32('Hello, world!')─┐ +│ 834093149 │ +└───────────────────────────┘ +``` + +**همچنین نگاه کنید به** + +- [معلم](http://cyan4973.github.io/xxHash/). + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/fa/sql_reference/functions/higher_order_functions.md b/docs/fa/sql_reference/functions/higher_order_functions.md new file mode 100644 index 00000000000..44e7469a5f7 --- /dev/null +++ b/docs/fa/sql_reference/functions/higher_order_functions.md @@ -0,0 +1,264 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u0628\u0627\u0644\u0627\u062A\u0631 \u0633\u0641\u0627\u0631\u0634" +--- + +# توابع مرتبه بالاتر {#higher-order-functions} + +## `->` اپراتور, لامبدا (پارامترهای, اکسپر) تابع {#operator-lambdaparams-expr-function} + +Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. + +مثالها: `x -> 2 * x, str -> str != Referer.` + +توابع مرتبه بالاتر تنها می تواند توابع لامبدا به عنوان استدلال عملکردی خود را قبول. + +یک تابع لامبدا که استدلال های متعدد می پذیرد را می توان به یک تابع مرتبه بالاتر منتقل می شود. در این مورد, تابع مرتبه بالاتر به تصویب می رسد چند مجموعه ای از طول یکسان است که این استدلال به مطابقت. + +برای برخی از توابع مانند [پیاده کردن](#higher_order_functions-array-count) یا [ارریسوم](#higher_order_functions-array-count), استدلال اول (تابع لامبدا) را می توان حذف. در این مورد, نقشه برداری یکسان فرض شده است. + +یک تابع لامبدا را نمی توان برای توابع زیر حذف کرد: + +- [اررایماپ](#higher_order_functions-array-map) +- [شلوار لی](#higher_order_functions-array-filter) +- [شلوار لی](#higher_order_functions-array-fill) +- [نمایش سایت](#higher_order_functions-array-reverse-fill) +- [لرزش دستگاه](#higher_order_functions-array-split) +- [نمایش سایت](#higher_order_functions-array-reverse-split) +- [دریافیرست](#higher_order_functions-array-first) +- [نمایش سایت](#higher_order_functions-array-first-index) + +### arrayMap(func, arr1, …) {#higher_order_functions-array-map} + +بازگرداندن مجموعه ای از برنامه اصلی `func` عملکرد به هر عنصر در `arr` صف کردن. + +مثالها: + +``` sql +SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,4,5] │ +└─────────┘ +``` + +مثال زیر نشان می دهد که چگونه برای ایجاد یک تاپل از عناصر از مجموعه های مختلف: + +``` sql +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res +``` + +``` text +┌─res─────────────────┐ +│ [(1,4),(2,5),(3,6)] │ +└─────────────────────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayMap` تابع. + +### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} + +بازگرداندن مجموعه ای حاوی تنها عناصر در `arr1` برای کدام `func` چیزی غیر از 0 را برمی گرداند. + +مثالها: + +``` sql +SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res +``` + +``` text +┌─res───────────┐ +│ ['abc World'] │ +└───────────────┘ +``` + +``` sql +SELECT + arrayFilter( + (i, x) -> x LIKE '%World%', + arrayEnumerate(arr), + ['Hello', 'abc World'] AS arr) + AS res +``` + +``` text +┌─res─┐ +│ [2] │ +└─────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayFilter` تابع. + +### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} + +پویش از طریق `arr1` از عنصر اول به عنصر گذشته و جایگزین `arr1[i]` توسط `arr1[i - 1]` اگر `func` بازده 0. اولین عنصر `arr1` جایگزین نخواهد شد. + +مثالها: + +``` sql +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res──────────────────────────────┐ +│ [1,1,3,11,12,12,12,5,6,14,14,14] │ +└──────────────────────────────────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayFill` تابع. + +### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} + +پویش از طریق `arr1` از عنصر گذشته به عنصر اول و جایگزین `arr1[i]` توسط `arr1[i + 1]` اگر `func` بازده 0. عنصر گذشته از `arr1` جایگزین نخواهد شد. + +مثالها: + +``` sql +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res────────────────────────────────┐ +│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ +└────────────────────────────────────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayReverseFill` تابع. + +### arraySplit(func, arr1, …) {#higher_order_functions-array-split} + +شکافتن `arr1` به چندین ردیف چه زمانی `func` بازگرداندن چیزی غیر از 0, مجموعه خواهد شد در سمت چپ عنصر تقسیم. مجموعه قبل از اولین عنصر تقسیم نخواهد شد. + +مثالها: + +``` sql +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res─────────────┐ +│ [[1,2,3],[4,5]] │ +└─────────────────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arraySplit` تابع. + +### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} + +شکافتن `arr1` به چندین ردیف چه زمانی `func` بازگرداندن چیزی غیر از 0, مجموعه خواهد شد در سمت راست عنصر تقسیم. مجموعه پس از عنصر گذشته تقسیم نخواهد شد. + +مثالها: + +``` sql +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res───────────────┐ +│ [[1],[2,3,4],[5]] │ +└───────────────────┘ +``` + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arraySplit` تابع. + +### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} + +بازگرداندن تعدادی از عناصر در مجموعه ارر که فارک چیزی غیر از گرداند 0. اگر ‘func’ مشخص نشده است, تعداد عناصر غیر صفر گرداند در مجموعه. + +### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} + +بازده 1 اگر حداقل یک عنصر در وجود دارد ‘arr’ برای کدام ‘func’ چیزی غیر از 0 را برمی گرداند. در غیر این صورت, باز می گردد 0. + +### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} + +بازده 1 اگر ‘func’ چیزی غیر از 0 را برای تمام عناصر در ‘arr’. در غیر این صورت, باز می گردد 0. + +### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} + +بازگرداندن مجموع ‘func’ ارزشهای خبری عبارتند از: اگر تابع حذف شده است, فقط می گرداند مجموع عناصر مجموعه. + +### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} + +بازگرداندن اولین عنصر در ‘arr1’ تنظیم برای کدام ‘func’ چیزی غیر از 0 را برمی گرداند. + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayFirst` تابع. + +### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} + +بازگرداندن شاخص عنصر اول در ‘arr1’ تنظیم برای کدام ‘func’ چیزی غیر از 0 را برمی گرداند. + +توجه داشته باشید که استدلال اول (تابع لامبدا) را نمی توان در حذف `arrayFirstIndex` تابع. + +### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} + +بازگرداندن مجموعه ای از مبالغ بخشی از عناصر در مجموعه منبع (مجموع در حال اجرا). اگر `func` تابع مشخص شده است, سپس ارزش عناصر مجموعه توسط این تابع قبل از جمع تبدیل. + +مثال: + +``` sql +SELECT arrayCumSum([1, 1, 1, 1]) AS res +``` + +``` text +┌─res──────────┐ +│ [1, 2, 3, 4] │ +└──────────────┘ +``` + +### هشدار داده می شود) {#arraycumsumnonnegativearr} + +مثل `arrayCumSum`, بازگرداندن مجموعه ای از مبالغ بخشی از عناصر در مجموعه منبع (مجموع در حال اجرا). متفاوت `arrayCumSum`, هنگامی که ارزش سپس بازگشت شامل یک مقدار کمتر از صفر, ارزش است جایگزین با صفر و محاسبه بعدی با صفر پارامترهای انجام. به عنوان مثال: + +``` sql +SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,0,1] │ +└───────────┘ +``` + +### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} + +بازگرداندن مجموعه ای به عنوان نتیجه مرتب سازی عناصر `arr1` به ترتیب صعودی. اگر `func` تابع مشخص شده است, مرتب سازی سفارش توسط نتیجه تابع تعیین `func` اعمال شده به عناصر مجموعه (ارریس) + +این [تبدیل شوارتز](https://en.wikipedia.org/wiki/Schwartzian_transform) برای بهبود کارایی مرتب سازی استفاده می شود. + +مثال: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +برای کسب اطلاعات بیشتر در مورد `arraySort` روش, دیدن [توابع برای کار با ارریس](array_functions.md#array_functions-sort) بخش. + +### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} + +بازگرداندن مجموعه ای به عنوان نتیجه مرتب سازی عناصر `arr1` به ترتیب نزولی. اگر `func` تابع مشخص شده است, مرتب سازی سفارش توسط نتیجه تابع تعیین `func` اعمال شده به عناصر مجموعه ای (ارریس). + +مثال: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +برای کسب اطلاعات بیشتر در مورد `arrayReverseSort` روش, دیدن [توابع برای کار با ارریس](array_functions.md#array_functions-reverse-sort) بخش. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/fa/sql_reference/functions/in_functions.md b/docs/fa/sql_reference/functions/in_functions.md new file mode 100644 index 00000000000..5beb072fad6 --- /dev/null +++ b/docs/fa/sql_reference/functions/in_functions.md @@ -0,0 +1,27 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u0627\u062C\u0631\u0627\u06CC \u0627\u067E\u0631\u0627\u062A\u0648\u0631\ + \ \u062F\u0631" +--- + +# توابع برای اجرای اپراتور در {#functions-for-implementing-the-in-operator} + +## در notIn, globalIn, globalNotIn {#in-functions} + +بخش را ببینید [در اپراتورها](../statements/select.md#select-in-operators). + +## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} + +یک تابع است که اجازه می دهد تا گروه بندی ستون های متعدد. +For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. +تاپل به طور معمول به عنوان مقادیر متوسط برای بحث در اپراتورها و یا برای ایجاد یک لیست از پارامترهای رسمی توابع لامبدا استفاده می شود. تاپل را نمی توان به یک جدول نوشته شده است. + +## هشدار داده می شود {#tupleelementtuple-n-operator-x-n} + +یک تابع است که اجازه می دهد تا گرفتن یک ستون از یک تاپل. +‘N’ شاخص ستون است, با شروع از 1. نفر باید ثابت باشد. ‘N’ باید ثابت باشه ‘N’ باید یک عدد صحیح انتخابی سخت نه بیشتر از اندازه تاپل باشد. +هیچ هزینه ای برای اجرای تابع وجود دارد. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/fa/sql_reference/functions/index.md b/docs/fa/sql_reference/functions/index.md new file mode 100644 index 00000000000..9d9679fca26 --- /dev/null +++ b/docs/fa/sql_reference/functions/index.md @@ -0,0 +1,74 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Functions +toc_priority: 32 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# توابع {#functions} + +حداقل\* دو نوع از توابع وجود دارد - توابع به طور منظم (فقط به نام “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn't depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). + +در این بخش ما در مورد توابع به طور منظم. برای توابع کل, بخش را ببینید “Aggregate functions”. + +\* - یک نوع سوم از تابع که وجود دارد ‘arrayJoin’ توابع جدول همچنین می توانید به طور جداگانه ذکر شود.\* + +## تایپ قوی {#strong-typing} + +در مقابل به گذاشتن استاندارد, خانه رعیتی است تایپ قوی. به عبارت دیگر تبدیل ضمنی بین انواع را ندارد. هر تابع برای یک مجموعه خاص از انواع کار می کند. این به این معنی است که گاهی اوقات شما نیاز به استفاده از توابع تبدیل نوع. + +## رفع ضعف مشترک {#common-subexpression-elimination} + +همه عبارات در پرس و جو که همان اعضای هیات (همان رکورد و یا همان نتیجه تجزیه نحوی) در نظر گرفته به ارزش یکسان. چنین عبارات یک بار تایید و اجرا می شوند. زیرمجموعه های یکسان نیز این راه را حذف می کنند. + +## انواع نتایج {#types-of-results} + +همه توابع بازگشت بازگشت بازگشت تنها به عنوان نتیجه (چند ارزش نیست, و نه صفر ارزش). نوع نتیجه است که معمولا تنها با انواع استدلال تعریف, نه با ارزش. استثنا هستند tupleelement function (a.n اپراتور) و tofixedstring تابع. + +## ثابتها {#constants} + +برای سادگی, توابع خاص تنها می تواند با ثابت برای برخی از استدلال کار. مثلا, استدلال درست از اپراتور مانند باید ثابت باشد. +تقریبا تمام توابع بازگشت ثابت برای استدلال ثابت. استثنا توابع است که تولید اعداد تصادفی است. +این ‘now’ تابع مقادیر مختلف برای نمایش داده شد که در زمان های مختلف اجرا شد را برمی گرداند, اما نتیجه در نظر گرفته ثابت, از ثبات در یک پرس و جو تنها مهم است. +یک عبارت ثابت نیز ثابت در نظر گرفته (مثلا, نیمه راست اپراتور مانند را می توان از ثابت های متعدد ساخته). + +توابع را می توان به روش های مختلف برای استدلال ثابت و غیر ثابت اجرا (کد های مختلف اجرا شده است). اما نتایج برای یک ثابت و برای یک ستون واقعی حاوی تنها همان مقدار باید با یکدیگر مطابقت. + +## پردازش پوچ {#null-processing} + +توابع رفتارهای زیر را دارند: + +- اگر حداقل یکی از استدلال از تابع است `NULL` نتیجه عملکرد نیز است `NULL`. +- رفتار ویژه ای است که به صورت جداگانه در شرح هر تابع مشخص. در کد منبع کلیک این توابع `UseDefaultImplementationForNulls=false`. + +## پایداری {#constancy} + +Functions can't change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. + +## خطا {#error-handling} + +برخی از توابع ممکن است یک استثنا پرتاب اگر داده نامعتبر است. در این مورد پرس و جو لغو شده است و یک متن خطا به مشتری بازگردانده می شود. برای پردازش توزیع, هنگامی که یک استثنا در یکی از سرورهای رخ می دهد, سرور های دیگر نیز تلاش برای لغو پرس و جو. + +## ارزیابی عبارات استدلال {#evaluation-of-argument-expressions} + +تقریبا در تمام زبان های برنامه نویسی, یکی از استدلال ممکن است برای اپراتورهای خاص ارزیابی نمی شود. این است که معمولا اپراتورها `&&`, `||` و `?:`. +اما در فاحشه خانه, استدلال از توابع (اپراتورهای) همیشه مورد بررسی قرار. دلیل این است که کل بخش هایی از ستون ها در یک بار مورد بررسی قرار, به جای محاسبه هر سطر به طور جداگانه. + +## انجام توابع برای پردازش پرس و جو توزیع شده {#performing-functions-for-distributed-query-processing} + +برای پردازش پرس و جو توزیع, به عنوان بسیاری از مراحل پردازش پرس و جو که ممکن است بر روی سرور از راه دور انجام, و بقیه مراحل (ادغام نتایج متوسط و همه چیز که) بر روی سرور درخواست انجام. + +این به این معنی است که توابع را می توان بر روی سرور های مختلف انجام می شود. +برای مثال در پرس و جو `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` + +- اگر یک `distributed_table` دارای حداقل دو خرده ریز, توابع ‘g’ و ‘h’ بر روی سرورهای راه دور و عملکرد انجام می شود ‘f’ بر روی سرور درخواست کننده انجام می شود. +- اگر یک `distributed_table` تنها یک سفال, تمام ‘f’, ‘g’ و ‘h’ توابع بر روی سرور این سفال انجام. + +نتیجه یک تابع معمولا بستگی ندارد که سرور انجام شده است. اما گاهی اوقات این مهم است. +مثلا, توابع است که با لغت نامه کار استفاده از فرهنگ لغت که بر روی سرور وجود دارد که در حال اجرا هستند در. +مثال دیگر این است که `hostName` تابع, که نام سرور را بر می گرداند در حال اجرا است به منظور ایجاد `GROUP BY` توسط سرور در یک `SELECT` پرس و جو. + +اگر یک تابع در یک پرس و جو بر روی سرور درخواست انجام, اما شما نیاز به انجام این کار بر روی سرور از راه دور, شما می توانید در یک بسته بندی ‘any’ تابع جمع و یا اضافه کردن به یک کلید در `GROUP BY`. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/fa/query_language/functions/introspection.md b/docs/fa/sql_reference/functions/introspection.md similarity index 58% rename from docs/fa/query_language/functions/introspection.md rename to docs/fa/sql_reference/functions/introspection.md index bb1d884d15b..6eb75456f08 100644 --- a/docs/fa/query_language/functions/introspection.md +++ b/docs/fa/sql_reference/functions/introspection.md @@ -1,61 +1,64 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 65 +toc_title: "\u062F\u0631\u0648\u0646 \u0646\u06AF\u0631\u06CC" --- -# Introspection Functions {#introspection-functions} +# توابع درون گرایی {#introspection-functions} -You can use functions described in this chapter to introspect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) and [DWARF](https://en.wikipedia.org/wiki/DWARF) for query profiling. +شما می توانید توابع شرح داده شده در این فصل به درون نگری استفاده کنید [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) و [DWARF](https://en.wikipedia.org/wiki/DWARF) برای پروفایل پرس و جو. -!!! warning "Warning" - These functions are slow and may impose security considerations. +!!! warning "اخطار" + این توابع کند هستند و ممکن است ملاحظات امنیتی تحمیل کنند. -For proper operation of introspection functions: +برای بهره برداری مناسب از توابع درون گرایی: -- Install the `clickhouse-common-static-dbg` package. +- نصب `clickhouse-common-static-dbg` بسته -- Set the [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1. +- تنظیم [اجازه دادن به \_فعال کردن اختلال در عملکرد](../../operations/settings/settings.md#settings-allow_introspection_functions) تنظیم به 1. For security reasons introspection functions are disabled by default. -ClickHouse saves profiler reports to the [trace\_log](../../operations/system_tables.md#system_tables-trace_log) system table. Make sure the table and profiler are configured properly. +تاتر موجب صرفه جویی در گزارش نیمرخ به [\_قطع](../../operations/system_tables.md#system_tables-trace_log) جدول سیستم. اطمینان حاصل کنید که جدول و پیشفیلتر به درستی پیکربندی شده است. -## addressToLine {#addresstoline} +## افزودن مدخل جدید {#addresstoline} -Converts virtual memory address inside ClickHouse server process to the filename and the line number in ClickHouse source code. +تبدیل آدرس حافظه مجازی در داخل clickhouse فرایند سرور به نام فایل و شماره خط در clickhouse کد منبع. -If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package. +اگر شما استفاده از بسته های رسمی تاتر, شما نیاز به نصب `clickhouse-common-static-dbg` بسته -**Syntax** +**نحو** ``` sql addressToLine(address_of_binary_instruction) ``` -**Parameters** +**پارامترها** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. -**Returned value** +**مقدار بازگشتی** -- Source code filename and the line number in this file delimited by colon. +- نام فایل کد منبع و شماره خط در این فایل حد و مرز مشخصی توسط روده بزرگ. For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. -- Name of a binary, if the function couldn’t find the debug information. +- نام یک باینری, اگر تابع می تواند اطلاعات اشکال زدایی پیدا کنید. -- Empty string, if the address is not valid. +- رشته خالی, اگر نشانی معتبر نیست. -Type: [String](../../data_types/string.md). +نوع: [رشته](../../sql_reference/data_types/string.md). -**Example** +**مثال** -Enabling introspection functions: +فعال کردن توابع درون گرایی: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +انتخاب رشته اول از `trace_log` جدول سیستم: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -73,9 +76,9 @@ query_id: 421b6855-1858-45a5-8f37-f383409d6d72 trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] ``` -The `trace` field contains the stack trace at the moment of sampling. +این `trace` درست شامل ردیابی پشته در حال حاضر نمونه برداری. -Getting the source code filename and the line number for a single address: +گرفتن نام فایل کد منبع و شماره خط برای یک نشانی واحد: ``` sql SELECT addressToLine(94784076370703) \G @@ -87,7 +90,7 @@ Row 1: addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 ``` -Applying the function to the whole stack trace: +استفاده از تابع به ردیابی کل پشته: ``` sql SELECT @@ -97,7 +100,7 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToLine` function. The result of this processing you see in the `trace_source_code_lines` column of output. +این [اررایماپ](higher_order_functions.md#higher_order_functions-array-map) تابع اجازه می دهد تا برای پردازش هر عنصر منحصر به فرد از `trace` تنظیم توسط `addressToLine` تابع. در نتیجه این پردازش می بینید در `trace_source_code_lines` ستون خروجی. ``` text Row 1: @@ -112,36 +115,36 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 ``` -## addressToSymbol {#addresstosymbol} +## افزودن موقعیت {#addresstosymbol} -Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files. +تبدیل آدرس حافظه مجازی در داخل clickhouse سرور روند به نمادی از clickhouse شی فایل های. -**Syntax** +**نحو** ``` sql addressToSymbol(address_of_binary_instruction) ``` -**Parameters** +**پارامترها** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. -**Returned value** +**مقدار بازگشتی** -- Symbol from ClickHouse object files. -- Empty string, if the address is not valid. +- نمادی از clickhouse شی فایل های. +- رشته خالی, اگر نشانی معتبر نیست. -Type: [String](../../data_types/string.md). +نوع: [رشته](../../sql_reference/data_types/string.md). -**Example** +**مثال** -Enabling introspection functions: +فعال کردن توابع درون گرایی: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +انتخاب رشته اول از `trace_log` جدول سیستم: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -159,9 +162,9 @@ query_id: 724028bf-f550-45aa-910d-2af6212b94ac trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] ``` -The `trace` field contains the stack trace at the moment of sampling. +این `trace` درست شامل ردیابی پشته در حال حاضر نمونه برداری. -Getting a symbol for a single address: +گرفتن نماد برای یک نشانی واحد: ``` sql SELECT addressToSymbol(94138803686098) \G @@ -173,7 +176,7 @@ Row 1: addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE ``` -Applying the function to the whole stack trace: +استفاده از تابع به ردیابی کل پشته: ``` sql SELECT @@ -183,7 +186,7 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToSymbols` function. The result of this processing you see in the `trace_symbols` column of output. +این [اررایماپ](higher_order_functions.md#higher_order_functions-array-map) تابع اجازه می دهد تا برای پردازش هر عنصر منحصر به فرد از `trace` تنظیم توسط `addressToSymbols` تابع. نتیجه این پردازش شما در دیدن `trace_symbols` ستون خروجی. ``` text Row 1: @@ -209,36 +212,36 @@ start_thread clone ``` -## demangle {#demangle} +## درهم و برهم کردن {#demangle} -Converts a symbol that you can get using the [addressToSymbol](#addresstosymbol) function to the C++ function name. +تبدیل یک نماد است که شما می توانید با استفاده از [افزودن موقعیت](#addresstosymbol) تابع به ج++ نام تابع. -**Syntax** +**نحو** ``` sql demangle(symbol) ``` -**Parameters** +**پارامترها** -- `symbol` ([String](../../data_types/string.md)) — Symbol from an object file. +- `symbol` ([رشته](../../sql_reference/data_types/string.md)) — Symbol from an object file. -**Returned value** +**مقدار بازگشتی** -- Name of the C++ function. -- Empty string if a symbol is not valid. +- نام تابع ج++. +- رشته خالی اگر یک نماد معتبر نیست. -Type: [String](../../data_types/string.md). +نوع: [رشته](../../sql_reference/data_types/string.md). -**Example** +**مثال** -Enabling introspection functions: +فعال کردن توابع درون گرایی: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +انتخاب رشته اول از `trace_log` جدول سیستم: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -256,9 +259,9 @@ query_id: 724028bf-f550-45aa-910d-2af6212b94ac trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] ``` -The `trace` field contains the stack trace at the moment of sampling. +این `trace` زمینه شامل ردیابی پشته در لحظه نمونه برداری. -Getting a function name for a single address: +گرفتن نام تابع برای یک نشانی واحد: ``` sql SELECT demangle(addressToSymbol(94138803686098)) \G @@ -270,7 +273,7 @@ Row 1: demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const ``` -Applying the function to the whole stack trace: +استفاده از تابع به ردیابی کل پشته: ``` sql SELECT @@ -280,7 +283,7 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `demangle` function. The result of this processing you see in the `trace_functions` column of output. +این [اررایماپ](higher_order_functions.md#higher_order_functions-array-map) تابع اجازه می دهد تا برای پردازش هر عنصر منحصر به فرد از `trace` تنظیم توسط `demangle` تابع. در نتیجه این پردازش می بینید در `trace_functions` ستون خروجی. ``` text Row 1: diff --git a/docs/fa/query_language/functions/ip_address_functions.md b/docs/fa/sql_reference/functions/ip_address_functions.md similarity index 62% rename from docs/fa/query_language/functions/ip_address_functions.md rename to docs/fa/sql_reference/functions/ip_address_functions.md index 23951926f69..fe4a6c7828e 100644 --- a/docs/fa/query_language/functions/ip_address_functions.md +++ b/docs/fa/sql_reference/functions/ip_address_functions.md @@ -1,22 +1,26 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 55 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0646\u0634\u0627\u0646\u06CC\u0647\u0627\ + \u06CC \u0627\u06CC\u0646\u062A\u0631\u0646\u062A\u06CC" --- -# Functions for working with IP addresses {#functions-for-working-with-ip-addresses} +# توابع برای کار با نشانی های اینترنتی {#functions-for-working-with-ip-addresses} -## IPv4NumToString(num) {#ipv4numtostringnum} +## اطلاعات دقیق) {#ipv4numtostringnum} -Takes a UInt32 number. Interprets it as an IPv4 address in big endian. Returns a string containing the corresponding IPv4 address in the format A.B.C.d (dot-separated numbers in decimal form). +طول می کشد یک uint32 شماره. به عنوان یک نشانی اینترنتی 4 در اندی بزرگ تفسیر می کند. بازده یک رشته حاوی مربوطه آدرس ipv4 در قالب a. b. c. d (نقطه جدا کردن اعداد در شکل اعشاری). -## IPv4StringToNum(s) {#ipv4stringtonums} +## مدت 4 ساعت) {#ipv4stringtonums} -The reverse function of IPv4NumToString. If the IPv4 address has an invalid format, it returns 0. +عملکرد معکوس ایپو4نومتوسترینگ. اگر نشانی اینترنتی4 دارای یک فرمت نامعتبر, باز می گردد 0. -## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} +## اطلاعات دقیق) {#ipv4numtostringclasscnum} -Similar to IPv4NumToString, but using xxx instead of the last octet. +شبیه به ipv4numtostring اما با استفاده از \ به جای گذشته هشت تایی. -Example: +مثال: ``` sql SELECT @@ -43,12 +47,12 @@ LIMIT 10 └────────────────┴───────┘ ``` -Since using ‘xxx’ is highly unusual, this may be changed in the future. We recommend that you don’t rely on the exact format of this fragment. +از زمان استفاده ‘xxx’ بسیار غیر معمول است, این ممکن است در اینده تغییر. ما توصیه می کنیم که شما در قالب دقیق این قطعه تکیه نمی. -### IPv6NumToString(x) {#ipv6numtostringx} +### اطلاعات دقیق) {#ipv6numtostringx} -Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing this address in text format. -IPv6-mapped IPv4 addresses are output in the format ::ffff:111.222.33.44. Examples: +یک رشته ثابت(16) مقدار حاوی نشانی اینترنتی6 را در قالب باینری می پذیرد. بازگرداندن یک رشته حاوی این نشانی در قالب متن. +نشانی های ایپو6-نقشه برداری ایپو4 خروجی در قالب هستند:: افف:111.222.33.44. مثالها: ``` sql SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr @@ -112,14 +116,14 @@ LIMIT 10 └────────────────────────────┴────────┘ ``` -## IPv6StringToNum(s) {#ipv6stringtonums} +## مدت 6 ساعت) {#ipv6stringtonums} -The reverse function of IPv6NumToString. If the IPv6 address has an invalid format, it returns a string of null bytes. -HEX can be uppercase or lowercase. +عملکرد معکوس ایپو6نومتوسترینگ. اگر نشانی اینترنتی6 دارای یک فرمت نامعتبر, یک رشته از بایت پوچ را برمی گرداند. +سحر و جادو می تواند بزرگ یا کوچک. ## IPv4ToIPv6(x) {#ipv4toipv6x} -Takes a `UInt32` number. Interprets it as an IPv4 address in [big endian](https://en.wikipedia.org/wiki/Endianness). Returns a `FixedString(16)` value containing the IPv6 address in binary format. Examples: +طول می کشد یک `UInt32` شماره. تفسیر به عنوان یک نشانی اینترنتی4 در [اندی بزرگ](https://en.wikipedia.org/wiki/Endianness). بازگرداندن یک `FixedString(16)` مقدار حاوی نشانی اینترنتی6 در قالب دودویی. مثالها: ``` sql SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr @@ -131,9 +135,9 @@ SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr └────────────────────┘ ``` -## cutIPv6(x, bitsToCutForIPv6, bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} +## cutIPv6(x bitsToCutForIPv6, bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} -Accepts a FixedString(16) value containing the IPv6 address in binary format. Returns a string containing the address of the specified number of bits removed in text format. For example: +یک رشته ثابت(16) مقدار حاوی نشانی اینترنتی6 را در قالب باینری می پذیرد. بازگرداندن یک رشته حاوی نشانی از تعداد مشخصی از بیت در قالب متن حذف. به عنوان مثال: ``` sql WITH @@ -152,7 +156,7 @@ SELECT ## IPv4CIDRToRange(ipv4, cidr), {#ipv4cidrtorangeipv4-cidr} -Accepts an IPv4 and an UInt8 value containing the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Return a tuple with two IPv4 containing the lower range and the higher range of the subnet. +قبول یک ipv4 و uint8 ارزش شامل [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). یک تاپل را با دو لیگ4 حاوی محدوده پایین تر و محدوده بالاتر زیر شبکه باز کنید. ``` sql SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) @@ -164,9 +168,9 @@ SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) └────────────────────────────────────────────┘ ``` -## IPv6CIDRToRange(ipv6, cidr), {#ipv6cidrtorangeipv6-cidr} +## IPv6CIDRToRange(ipv6 cidr), {#ipv6cidrtorangeipv6-cidr} -Accepts an IPv6 and an UInt8 value containing the CIDR. Return a tuple with two IPv6 containing the lower range and the higher range of the subnet. +قبول یک ipv6 و uint8 ارزش حاوی cidr. یک تاپل را با دو ایپو6 حاوی محدوده پایین تر و محدوده بالاتر زیر شبکه باز کنید. ``` sql SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); @@ -178,9 +182,9 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); └────────────────────────────────────────────────────────────────────────┘ ``` -## toIPv4(string) {#toipv4string} +## تایپه 4 (رشته) {#toipv4string} -An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../data_types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. +یک نام مستعار برای `IPv4StringToNum()` که طول می کشد یک شکل رشته ای از ایپو4 نشانی و ارزش را برمی گرداند [IPv4](../../sql_reference/data_types/domains/ipv4.md) نوع باینری برابر با مقدار بازگشتی است `IPv4StringToNum()`. ``` sql WITH @@ -210,9 +214,9 @@ SELECT └───────────────────────────────────┴──────────────────────────┘ ``` -## toIPv6(string) {#toipv6string} +## تیپو6 (رشته) {#toipv6string} -An alias to `IPv6StringToNum()` that takes a string form of IPv6 address and returns value of [IPv6](../../data_types/domains/ipv6.md) type, which is binary equal to value returned by `IPv6StringToNum()`. +یک نام مستعار برای `IPv6StringToNum()` که طول می کشد یک شکل رشته ای از ایپو6 نشانی و ارزش را برمی گرداند [IPv6](../../sql_reference/data_types/domains/ipv6.md) نوع باینری برابر با مقدار بازگشتی است `IPv6StringToNum()`. ``` sql WITH @@ -242,4 +246,4 @@ SELECT └───────────────────────────────────┴──────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/fa/sql_reference/functions/json_functions.md b/docs/fa/sql_reference/functions/json_functions.md new file mode 100644 index 00000000000..fd5d30eaade --- /dev/null +++ b/docs/fa/sql_reference/functions/json_functions.md @@ -0,0 +1,231 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 56 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u062C\u0627\u0646\u0633\u0648\u0646." +--- + +# توابع برای کار با جانسون {#functions-for-working-with-json} + +در یاندکسمتریکا جیسون توسط کاربران به عنوان پارامترهای جلسه منتقل می شود. برخی از توابع خاص برای کار با این جانسون وجود دارد. (اگر چه در بسیاری از موارد jsons هستند علاوه بر این قبل از پردازش و در نتیجه ارزش ها قرار داده و در ستون جداگانه در خود پردازش فرمت.) همه این توابع در فرضیات قوی در مورد چه جانسون می تواند بر اساس, اما سعی می کنند به عنوان کوچک که ممکن است به کار انجام می شود. + +مفروضات زیر ساخته شده است: + +1. نام فیلد (استدلال تابع) باید ثابت باشد. +2. نام فیلد به نحوی می تواند در جیسون کد گذاری شود. به عنوان مثال: `visitParamHas('{"abc":"def"}', 'abc') = 1` اما `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +3. زمینه ها برای در هر سطح تودرتو جستجو, یکسره. اگر زمینه های تطبیق های متعدد وجود دارد, وقوع اول استفاده شده است. +4. JSON ندارد کاراکتر فضای خارج از string literals. + +## ویسیتپراماس (پارامز, نام) {#visitparamhasparams-name} + +بررسی اینکه یک میدان با وجود ‘name’ اسم. + +## ویسیتپرامستراکتینت (پارامز, نام) {#visitparamextractuintparams-name} + +تجزیه ظاهری64 از ارزش این زمینه به نام ‘name’. اگر این یک رشته رشته زمینه, تلاش می کند به تجزیه یک عدد از ابتدای رشته. اگر میدان وجود ندارد, و یا وجود دارد اما حاوی یک عدد نیست, باز می گردد 0. + +## ویزیتپرامستراکتینت (پارامز, نام) {#visitparamextractintparams-name} + +همان int64. + +## اطلاعات دقیق) {#visitparamextractfloatparams-name} + +همان است که برای شناور64. + +## ویسیتپرامسترکتبولبولول (پارامز, نام) {#visitparamextractboolparams-name} + +تجزیه واقعی / ارزش کاذب. نتیجه این است uint8. + +## ویسیتپرمککتراو (پارامز, نام) {#visitparamextractrawparams-name} + +بازگرداندن ارزش یک میدان, از جمله جدا. + +مثالها: + +``` sql +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +``` + +## نام و نام خانوادگی) {#visitparamextractstringparams-name} + +تجزیه رشته در نقل قول دو. ارزش بی نتیجه است. اگر بیم شکست خورده, این یک رشته خالی می گرداند. + +مثالها: + +``` sql +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' +visitParamExtractString('{"abc":"hello}', 'abc') = '' +``` + +در حال حاضر هیچ پشتیبانی برای نقاط کد در قالب وجود دارد `\uXXXX\uYYYY` این از هواپیما چند زبانه پایه نیست (به جای اوتو-8 تبدیل می شود). + +توابع زیر بر اساس [سیمدجسون](https://github.com/lemire/simdjson) طراحی شده برای نیازهای پیچیده تر جسون تجزیه. فرض 2 ذکر شده در بالا هنوز هم صدق. + +## هشدار داده می شود) {#isvalidjsonjson} + +چک که رشته گذشت جانسون معتبر است. + +مثالها: + +``` sql +SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 +SELECT isValidJSON('not a json') = 0 +``` + +## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} + +اگر مقدار در سند جسون وجود داشته باشد, `1` برگردانده خواهد شد. + +اگر این مقدار وجود ندارد, `0` برگردانده خواهد شد. + +مثالها: + +``` sql +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 +``` + +`indices_or_keys` لیستی از استدلال های صفر یا بیشتر است که هر کدام می توانند رشته یا عدد صحیح باشند. + +- رشته = عضو شی دسترسی های کلیدی. +- عدد صحیح مثبت = از ابتدا به عضو / کلید نفر دسترسی پیدا کنید. +- عدد صحیح منفی = دسترسی به عضو / کلید نفر از پایان. + +حداقل شاخص عنصر 1 است. بنابراین عنصر 0 وجود ندارد. + +شما می توانید از اعداد صحیح برای دسترسی به هر دو اشیای جسون ارریس و جسون استفاده کنید. + +بنابراین, مثلا: + +``` sql +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' +``` + +## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} + +بازگشت طول یک مجموعه جانسون یا یک شی جانسون. + +اگر مقدار وجود ندارد و یا دارای یک نوع اشتباه, `0` برگردانده خواهد شد. + +مثالها: + +``` sql +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 +``` + +## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} + +بازگشت به نوع یک مقدار جانسون. + +اگر این مقدار وجود ندارد, `Null` برگردانده خواهد شد. + +مثالها: + +``` sql +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' +``` + +## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} + +## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} + +## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} + +## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} + +تجزیه جانسون و استخراج ارزش. این توابع شبیه به `visitParam` توابع. + +اگر مقدار وجود ندارد و یا دارای یک نوع اشتباه, `0` برگردانده خواهد شد. + +مثالها: + +``` sql +SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 +SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 +SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 +``` + +## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} + +تجزیه جانسون و استخراج یک رشته. این تابع شبیه به `visitParamExtractString` توابع. + +اگر مقدار وجود ندارد و یا دارای یک نوع اشتباه, یک رشته خالی بازگردانده خواهد شد. + +ارزش بی نتیجه است. اگر بیم شکست خورده, این یک رشته خالی می گرداند. + +مثالها: + +``` sql +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' +SELECT JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +SELECT JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' +SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' +``` + +## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} + +تجزیه یک جسون و استخراج یک مقدار از نوع داده داده داده کلیک. + +این یک تعمیم قبلی است `JSONExtract` توابع. +این به این معنی است +`JSONExtract(..., 'String')` بازده دقیقا همان `JSONExtractString()`, +`JSONExtract(..., 'Float64')` بازده دقیقا همان `JSONExtractFloat()`. + +مثالها: + +``` sql +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL +SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 +SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' +SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' +``` + +## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} + +پارسه جفت کلید ارزش از یک جانسون که ارزش از نوع داده داده خانه رعیتی هستند. + +مثال: + +``` sql +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; +``` + +## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} + +بازگرداندن بخشی از جانسون. + +اگر بخش وجود ندارد و یا دارای یک نوع اشتباه, یک رشته خالی بازگردانده خواهد شد. + +مثال: + +``` sql +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' +``` + +## JSONExtractArrayRaw(json\[, indices\_or\_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} + +بازگرداندن مجموعه ای با عناصر از مجموعه جانسون,هر یک به عنوان رشته نامحدود نشان. + +اگر بخش وجود ندارد و یا مجموعه ای نیست, مجموعه ای خالی بازگردانده خواهد شد. + +مثال: + +``` sql +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/fa/sql_reference/functions/logical_functions.md b/docs/fa/sql_reference/functions/logical_functions.md new file mode 100644 index 00000000000..47a7de029f0 --- /dev/null +++ b/docs/fa/sql_reference/functions/logical_functions.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u0645\u0646\u0637\u0642\u06CC" +--- + +# توابع منطقی {#logical-functions} + +توابع منطقی قبول هر نوع عددی, اما بازگشت یک عدد توینت8 برابر 0 یا 1. + +صفر به عنوان یک استدلال در نظر گرفته شده است “false,” در حالی که هر مقدار غیر صفر در نظر گرفته شده است “true”. + +## و, و اپراتور {#and-and-operator} + +## یا اپراتور {#or-or-operator} + +## نه, اپراتور نیست {#not-not-operator} + +## خور {#xor} + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/fa/sql_reference/functions/machine_learning_functions.md b/docs/fa/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..34b069cdebd --- /dev/null +++ b/docs/fa/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 64 +toc_title: "\u062A\u0648\u0627\u0628\u0639 \u06CC\u0627\u062F\u06AF\u06CC\u0631\u06CC\ + \ \u0645\u0627\u0634\u06CC\u0646" +--- + +# توابع یادگیری ماشین {#machine-learning-functions} + +## ارزیابی (پیش بینی) {#machine_learning_methods-evalmlmethod} + +پیش بینی با استفاده از مدل های رگرسیون نصب شده `evalMLMethod` تابع. مشاهده لینک در `linearRegression`. + +### رگرسیون خطی تصادفی {#stochastic-linear-regression} + +این [تنظیم مقررات](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlinearregression) تابع جمع پیاده سازی روش گرادیان نزولی تصادفی با استفاده از مدل خطی و تابع از دست دادن مرتبه اول. استفاده `evalMLMethod` برای پیش بینی در داده های جدید. + +### رگرسیون لجستیک تصادفی {#stochastic-logistic-regression} + +این [سرکوب مقررات عمومی](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlogisticregression) تابع جمع پیاده سازی روش گرادیان نزولی برای مشکل طبقه بندی دودویی. استفاده `evalMLMethod` برای پیش بینی در داده های جدید. diff --git a/docs/fa/sql_reference/functions/math_functions.md b/docs/fa/sql_reference/functions/math_functions.md new file mode 100644 index 00000000000..37d6ca94c2f --- /dev/null +++ b/docs/fa/sql_reference/functions/math_functions.md @@ -0,0 +1,116 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u0631\u06CC\u0627\u0636\u06CC" +--- + +# توابع ریاضی {#mathematical-functions} + +همه توابع بازگشت یک عدد شناور64. دقت و صحت نتیجه به حداکثر دقت ممکن است اما نتیجه ممکن است همزمان با ماشین representable شماره نزدیکترین مربوطه عدد حقیقی است. + +## ا() {#e} + +بازگرداندن یک عدد شناور64 است که نزدیک به تعداد الکترونیکی. + +## پی() {#pi} + +Returns a Float64 number that is close to the number π. + +## واردات) {#expx} + +می پذیرد یک استدلال عددی و یک عدد شناور64 نزدیک به توان استدلال را برمی گرداند. + +## هشدار داده می شود) {#logx-lnx} + +یک استدلال عددی را می پذیرد و یک عدد شناور64 را نزدیک به لگاریتم طبیعی استدلال می کند. + +## exp2(x) {#exp2x} + +می پذیرد یک استدلال عددی و یک عدد شناور می گرداند نزدیک به 2 به قدرت ایکس. + +## log2(x) {#log2x} + +می پذیرد یک استدلال عددی و یک عدد جسم شناور64 را برمی گرداند نزدیک به لگاریتم باینری از استدلال. + +## exp10(x) {#exp10x} + +می پذیرد یک استدلال عددی و یک عدد شناور می گرداند نزدیک به 10 به قدرت ایکس. + +## ثبت 10 (ایکس) {#log10x} + +می پذیرد یک استدلال عددی و یک عدد جسم شناور64 نزدیک به لگاریتم اعشاری از استدلال را برمی گرداند. + +## هشدار داده می شود) {#sqrtx} + +می پذیرد یک استدلال عددی و یک عدد شناور می گرداند نزدیک به ریشه مربع از استدلال. + +## هشدار داده می شود) {#cbrtx} + +می پذیرد یک استدلال عددی و یک عدد جسم شناور را برمی گرداند نزدیک به ریشه مکعب استدلال. + +## عارف) {#erfx} + +اگر ‘x’ is non-negative, then erf(x / σ√2) احتمال این که یک متغیر تصادفی داشتن یک توزیع نرمال با انحراف استاندارد است ‘σ’ طول می کشد ارزش است که از مقدار مورد انتظار توسط بیش از هم جدا ‘x’. + +مثال (قانون سه سیگما): + +``` sql +SELECT erf(3 / sqrt(2)) +``` + +``` text +┌─erf(divide(3, sqrt(2)))─┐ +│ 0.9973002039367398 │ +└─────────────────────────┘ +``` + +## erfc(x) {#erfcx} + +قبول یک استدلال عددی و یک عدد شناور را برمی گرداند نزدیک به 1 - دوره (ایکس), اما بدون از دست دادن دقت برای بزرگ ‘x’ ارزشهای خبری عبارتند از: + +## هشدار داده می شود) {#lgammax} + +لگاریتم تابع گاما. + +## ترجما) {#tgammax} + +تابع گاما + +## گناه) {#sinx} + +سینوس. + +## کسینوس (ایکس) {#cosx} + +کسینوس. + +## قهوهای مایل به زرد(ایکس) {#tanx} + +خط مماس. + +## اطلاعات دقیق) {#asinx} + +سینوسی قوس. + +## acos(x) {#acosx} + +قوس کسینوس. + +## هشدار داده می شود) {#atanx} + +مماس قوس. + +## صدای انفجار (ایکس, و), قدرت (ایکس, و) {#powx-y-powerx-y} + +طول می کشد دو استدلال عددی ایکس و و و. گرداند یک عدد جسم شناور64 نزدیک به ایکس به قدرت و. + +## اینتکسپ2 {#intexp2} + +می پذیرد یک استدلال عددی و باز می گردد یک عدد اوینت64 نزدیک به 2 به قدرت ایکس. + +## اینتکسپ10 {#intexp10} + +می پذیرد یک استدلال عددی و باز می گردد یک عدد اوینت64 نزدیک به 10 به قدرت ایکس. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/fa/sql_reference/functions/other_functions.md b/docs/fa/sql_reference/functions/other_functions.md new file mode 100644 index 00000000000..55c44241615 --- /dev/null +++ b/docs/fa/sql_reference/functions/other_functions.md @@ -0,0 +1,1079 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 66 +toc_title: "\u063A\u06CC\u0631\u0647" +--- + +# توابع دیگر {#other-functions} + +## نام میزبان() {#hostname} + +بازگرداندن یک رشته با نام میزبان که این تابع در انجام شد. برای پردازش توزیع شده, این نام میزبان سرور از راه دور است, اگر تابع بر روی یک سرور از راه دور انجام. + +## FQDN {#fqdn} + +بازگرداندن نام دامنه به طور کامل واجد شرایط. + +**نحو** + +``` sql +fqdn(); +``` + +این تابع غیر حساس است. + +**مقدار بازگشتی** + +- رشته با نام دامنه به طور کامل واجد شرایط. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT FQDN(); +``` + +نتیجه: + +``` text +┌─FQDN()──────────────────────────┐ +│ clickhouse.ru-central1.internal │ +└─────────────────────────────────┘ +``` + +## basename {#basename} + +عصاره قسمت انتهایی یک رشته پس از بریده بریده و یا ممیز گذشته. این تابع اگر اغلب مورد استفاده برای استخراج نام فایل از یک مسیر. + +``` sql +basename( expr ) +``` + +**پارامترها** + +- `expr` — Expression resulting in a [رشته](../../sql_reference/data_types/string.md) نوع ارزش. همه بک اسلش باید در ارزش حاصل فرار. + +**مقدار بازگشتی** + +یک رشته که شامل: + +- قسمت انتهایی یک رشته پس از بریده بریده و یا ممیز گذشته. + + If the input string contains a path ending with slash or backslash, for example, `/` or `c:\`, the function returns an empty string. + +- رشته اصلی اگر هیچ اسلش یا بک اسلش وجود دارد. + +**مثال** + +``` sql +SELECT 'some/long/path/to/file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some\\long\\path\\to\\file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some-file-name' AS a, basename(a) +``` + +``` text +┌─a──────────────┬─basename('some-file-name')─┐ +│ some-file-name │ some-file-name │ +└────────────────┴────────────────────────────┘ +``` + +## عریض) {#visiblewidthx} + +محاسبه عرض تقریبی در هنگام خروجی ارزش به کنسول در قالب متن (تب از هم جدا). +این تابع توسط سیستم برای اجرای فرمت های زیبا استفاده می شود. + +`NULL` به عنوان یک رشته مربوط به نمایندگی `NULL` داخل `Pretty` فرمتها. + +``` sql +SELECT visibleWidth(NULL) +``` + +``` text +┌─visibleWidth(NULL)─┐ +│ 4 │ +└────────────────────┘ +``` + +## نام کامل) {#totypenamex} + +بازگرداندن یک رشته حاوی نام نوع استدلال گذشت. + +اگر `NULL` به عنوان ورودی به عملکرد منتقل می شود و سپس باز می گردد `Nullable(Nothing)` نوع, که مربوط به داخلی `NULL` نمایندگی در فاحشه خانه. + +## blockSize() {#function-blocksize} + +می شود به اندازه بلوک. +در خانه, نمایش داده شد همیشه در بلوک های اجرا (مجموعه ای از قطعات ستون). این تابع اجازه می دهد تا اندازه بلوک را که شما برای نام برد دریافت کنید. + +## تحقق (ایکس) {#materializex} + +تبدیل ثابت به یک ستون کامل حاوی فقط یک مقدار. +در خانه, ستون کامل و ثابت متفاوت در حافظه نشان. توابع کار متفاوت برای استدلال ثابت و استدلال طبیعی (کد های مختلف اجرا شده است), اگر چه نتیجه این است که تقریبا همیشه همان. این تابع برای اشکال زدایی این رفتار. + +## ignore(…) {#ignore} + +می پذیرد هر استدلال, محتوی `NULL`. همیشه برمی گرداند 0. +با این حال, استدلال هنوز ارزیابی. این را می توان برای معیار استفاده می شود. + +## خواب (ثانیه) {#sleepseconds} + +خواب ‘seconds’ ثانیه در هر بلوک داده ها. شما می توانید یک عدد صحیح یا عدد ممیز شناور را مشخص کنید. + +## خواب (ثانیه) {#sleepeachrowseconds} + +خواب ‘seconds’ ثانیه در هر سطر. شما می توانید یک عدد صحیح یا عدد ممیز شناور را مشخص کنید. + +## متن() {#currentdatabase} + +بازگرداندن نام پایگاه داده فعلی. +شما می توانید این تابع در پارامترهای موتور جدول در یک پرس و جو جدول ایجاد جایی که شما نیاز به مشخص کردن پایگاه داده استفاده. + +## currentUser() {#other-function-currentuser} + +بازده ورود به سایت از کاربر فعلی. ورود کاربر که آغاز پرس و جو بازگردانده خواهد شد در مورد distibuted پرس و جو. + +``` sql +SELECT currentUser(); +``` + +نام مستعار: `user()`, `USER()`. + +**مقادیر بازگشتی** + +- ورود کاربر فعلی. +- ورود کاربر که پرس و جو در صورت پرس و جو از کار افتاده است. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT currentUser(); +``` + +نتیجه: + +``` text +┌─currentUser()─┐ +│ default │ +└───────────────┘ +``` + +## اطلاعات دقیق) {#isfinitex} + +قبول float32 و float64 و بازده uint8 برابر با 1 اگر این استدلال بی نهایت است و نه یک نان در غیر این صورت 0 است. + +## اطلاعات دقیق) {#isinfinitex} + +قبول float32 و float64 و بازده uint8 برابر با 1 اگر این استدلال بی نهایت است در غیر این صورت 0 است. توجه داشته باشید که 0 برای نان بازگشت. + +## اطلاعات دقیق {#ifnotfinite} + +بررسی اینکه مقدار ممیز شناور محدود است. + +**نحو** + + ifNotFinite(x,y) + +**پارامترها** + +- `x` — Value to be checked for infinity. Type: [شناور\*](../../sql_reference/data_types/float.md). +- `y` — Fallback value. Type: [شناور\*](../../sql_reference/data_types/float.md). + +**مقدار بازگشتی** + +- `x` اگر `x` محدود است. +- `y` اگر `x` محدود نیست. + +**مثال** + +پرسوجو: + + SELECT 1/0 as infimum, ifNotFinite(infimum,42) + +نتیجه: + + ┌─infimum─┬─ifNotFinite(divide(1, 0), 42)─┐ + │ inf │ 42 │ + └─────────┴───────────────────────────────┘ + +شما می توانید نتیجه مشابه با استفاده از [اپراتور سه تایی](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. + +## اطلاعات دقیق) {#isnanx} + +قبول float32 و float64 و بازده uint8 برابر با 1 اگر استدلال این است که یک نان در غیر این صورت 0 است. + +## قابل تنظیم(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} + +می پذیرد رشته ثابت: نام پایگاه داده, نام جدول, و نام ستون. بازگرداندن یک بیان ثابت سنت8 برابر 1 اگر یک ستون وجود دارد, در غیر این صورت 0. اگر پارامتر نام میزبان تنظیم شده است, تست بر روی یک سرور از راه دور اجرا خواهد شد. +تابع می اندازد یک استثنا اگر جدول وجود ندارد. +برای عناصر در یک ساختار داده های تو در تو, تابع چک برای وجود یک ستون. برای ساختار داده های تو در تو خود, بازده تابع 0. + +## بار {#function-bar} + +اجازه می دهد تا ساخت یک نمودار یونیکد هنر. + +`bar(x, min, max, width)` تساوی یک گروه با عرض متناسب با `(x - min)` و برابر با `width` شخصیت زمانی که `x = max`. + +پارامترها: + +- `x` — Size to display. +- `min, max` — Integer constants. The value must fit in `Int64`. +- `width` — Constant, positive integer, can be fractional. + +گروه با دقت به یک هشتم نماد کشیده شده است. + +مثال: + +``` sql +SELECT + toHour(EventTime) AS h, + count() AS c, + bar(c, 0, 600000, 20) AS bar +FROM test.hits +GROUP BY h +ORDER BY h ASC +``` + +``` text +┌──h─┬──────c─┬─bar────────────────┐ +│ 0 │ 292907 │ █████████▋ │ +│ 1 │ 180563 │ ██████ │ +│ 2 │ 114861 │ ███▋ │ +│ 3 │ 85069 │ ██▋ │ +│ 4 │ 68543 │ ██▎ │ +│ 5 │ 78116 │ ██▌ │ +│ 6 │ 113474 │ ███▋ │ +│ 7 │ 170678 │ █████▋ │ +│ 8 │ 278380 │ █████████▎ │ +│ 9 │ 391053 │ █████████████ │ +│ 10 │ 457681 │ ███████████████▎ │ +│ 11 │ 493667 │ ████████████████▍ │ +│ 12 │ 509641 │ ████████████████▊ │ +│ 13 │ 522947 │ █████████████████▍ │ +│ 14 │ 539954 │ █████████████████▊ │ +│ 15 │ 528460 │ █████████████████▌ │ +│ 16 │ 539201 │ █████████████████▊ │ +│ 17 │ 523539 │ █████████████████▍ │ +│ 18 │ 506467 │ ████████████████▊ │ +│ 19 │ 520915 │ █████████████████▎ │ +│ 20 │ 521665 │ █████████████████▍ │ +│ 21 │ 542078 │ ██████████████████ │ +│ 22 │ 493642 │ ████████████████▍ │ +│ 23 │ 400397 │ █████████████▎ │ +└────┴────────┴────────────────────┘ +``` + +## تبدیل {#transform} + +تبدیل یک ارزش با توجه به نقشه برداری به صراحت تعریف شده از برخی از عناصر به دیگر. +دو نوع از این تابع وجود دارد: + +### تبدیل(x array\_from, array\_to به طور پیش فرض) {#transformx-array-from-array-to-default} + +`x` – What to transform. + +`array_from` – Constant array of values for converting. + +`array_to` – Constant array of values to convert the values in ‘from’ به. + +`default` – Which value to use if ‘x’ برابر است با هر یک از مقادیر در ‘from’. + +`array_from` و `array_to` – Arrays of the same size. + +انواع: + +`transform(T, Array(T), Array(U), U) -> U` + +`T` و `U` می تواند عددی, رشته,یا تاریخ و یا انواع تاریخ ساعت. +از کجا همان نامه نشان داده شده است (تی یا تو), برای انواع عددی این ممکن است تطبیق انواع, اما انواع که یک نوع رایج. +برای مثال استدلال می توانید نوع int64 در حالی که دوم آرایه(uint16) نوع. + +اگر ‘x’ ارزش به یکی از عناصر در برابر است ‘array\_from’ مجموعه, این بازگرداندن عنصر موجود (که شماره همان) از ‘array\_to’ صف کردن. در غیر این صورت, باز می گردد ‘default’. اگر عناصر تطبیق های متعدد در وجود دارد ‘array\_from’ این یکی از مسابقات را برمی گرداند. + +مثال: + +``` sql +SELECT + transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, + count() AS c +FROM test.hits +WHERE SearchEngineID != 0 +GROUP BY title +ORDER BY c DESC +``` + +``` text +┌─title─────┬──────c─┐ +│ Yandex │ 498635 │ +│ Google │ 229872 │ +│ Other │ 104472 │ +└───────────┴────────┘ +``` + +### تبدیل) {#transformx-array-from-array-to} + +متفاوت از تنوع برای اولین بار در که ‘default’ استدلال حذف شده است. +اگر ‘x’ ارزش به یکی از عناصر در برابر است ‘array\_from’ مجموعه, این بازگرداندن عنصر تطبیق (که شماره همان) از ‘array\_to’ صف کردن. در غیر این صورت, باز می گردد ‘x’. + +انواع: + +`transform(T, Array(T), Array(T)) -> T` + +مثال: + +``` sql +SELECT + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + count() AS c +FROM test.hits +GROUP BY domain(Referer) +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +┌─s──────────────┬───────c─┐ +│ │ 2906259 │ +│ www.yandex │ 867767 │ +│ ███████.ru │ 313599 │ +│ mail.yandex.ru │ 107147 │ +│ ██████.ru │ 100355 │ +│ █████████.ru │ 65040 │ +│ news.yandex.ru │ 64515 │ +│ ██████.net │ 59141 │ +│ example.com │ 57316 │ +└────────────────┴─────────┘ +``` + +## قالببندی) ایکس() {#formatreadablesizex} + +می پذیرد اندازه (تعداد بایت). بازگرداندن اندازه گرد با پسوند (کیلوبایت, مگابایت, و غیره.) به عنوان یک رشته . + +مثال: + +``` sql +SELECT + arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, + formatReadableSize(filesize_bytes) AS filesize +``` + +``` text +┌─filesize_bytes─┬─filesize───┐ +│ 1 │ 1.00 B │ +│ 1024 │ 1.00 KiB │ +│ 1048576 │ 1.00 MiB │ +│ 192851925 │ 183.92 MiB │ +└────────────────┴────────────┘ +``` + +## کمترین) {#leasta-b} + +بازگرداندن کوچکترین ارزش از یک و ب. + +## بزرگترین (و, ب) {#greatesta-b} + +بازگرداندن بزرگترین ارزش یک و ب. + +## زمان بالا() {#uptime} + +بازگرداندن زمان انجام کار سرور در ثانیه. + +## نسخه() {#version} + +بازگرداندن نسخه از سرور به عنوان یک رشته. + +## منطقهی زمانی() {#timezone} + +بازگرداندن منطقه زمانی از سرور. + +## blockNumber {#blocknumber} + +بازگرداندن تعداد دنباله ای از بلوک داده که در ردیف واقع شده است. + +## رفع موانع {#function-rownumberinblock} + +بازگرداندن تعداد ترتیبی از ردیف در بلوک داده. بلوک های داده های مختلف همیشه محاسبه. + +## بلوک های رونمبرینالیک() {#rownumberinallblocks} + +بازگرداندن تعداد ترتیبی از ردیف در بلوک داده. این تابع تنها بلوک های داده تحت تاثیر قرار می گیرد. + +## همسایه {#neighbor} + +تابع پنجره که دسترسی به یک ردیف در یک افست مشخص شده است که قبل یا بعد از ردیف فعلی یک ستون داده می شود فراهم می کند. + +**نحو** + +``` sql +neighbor(column, offset[, default_value]) +``` + +نتیجه عملکرد بستگی به بلوک های داده تحت تاثیر قرار و منظور از داده ها در بلوک. +اگر شما یک خرده فروشی با سفارش و پاسخ تابع از خارج از خرده فروشی, شما می توانید نتیجه مورد انتظار از. + +**پارامترها** + +- `column` — A column name or scalar expression. +- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql_reference/data_types/int_uint.md). +- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. + +**مقادیر بازگشتی** + +- مقدار برای `column` داخل `offset` فاصله از ردیف فعلی اگر `offset` ارزش خارج از مرزهای بلوک نیست. +- مقدار پیشفرض برای `column` اگر `offset` ارزش مرزهای بلوک خارج است. اگر `default_value` داده می شود و سپس از آن استفاده خواهد شد. + +نوع: نوع بلوک های داده را تحت تاثیر قرار و یا نوع مقدار پیش فرض. + +**مثال** + +پرسوجو: + +``` sql +SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; +``` + +نتیجه: + +``` text +┌─number─┬─neighbor(number, 2)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 0 │ +│ 9 │ 0 │ +└────────┴─────────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; +``` + +نتیجه: + +``` text +┌─number─┬─neighbor(number, 2, 999)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 999 │ +│ 9 │ 999 │ +└────────┴──────────────────────────┘ +``` + +این تابع می تواند مورد استفاده قرار گیرد برای محاسبه ارزش متریک در سال بیش از سال: + +پرسوجو: + +``` sql +WITH toDate('2018-01-01') AS start_date +SELECT + toStartOfMonth(start_date + (number * 32)) AS month, + toInt32(month) % 100 AS money, + neighbor(money, -12) AS prev_year, + round(prev_year / money, 2) AS year_over_year +FROM numbers(16) +``` + +نتیجه: + +``` text +┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ +│ 2018-01-01 │ 32 │ 0 │ 0 │ +│ 2018-02-01 │ 63 │ 0 │ 0 │ +│ 2018-03-01 │ 91 │ 0 │ 0 │ +│ 2018-04-01 │ 22 │ 0 │ 0 │ +│ 2018-05-01 │ 52 │ 0 │ 0 │ +│ 2018-06-01 │ 83 │ 0 │ 0 │ +│ 2018-07-01 │ 13 │ 0 │ 0 │ +│ 2018-08-01 │ 44 │ 0 │ 0 │ +│ 2018-09-01 │ 75 │ 0 │ 0 │ +│ 2018-10-01 │ 5 │ 0 │ 0 │ +│ 2018-11-01 │ 36 │ 0 │ 0 │ +│ 2018-12-01 │ 66 │ 0 │ 0 │ +│ 2019-01-01 │ 97 │ 32 │ 0.33 │ +│ 2019-02-01 │ 28 │ 63 │ 2.25 │ +│ 2019-03-01 │ 56 │ 91 │ 1.62 │ +│ 2019-04-01 │ 87 │ 22 │ 0.25 │ +└────────────┴───────┴───────────┴────────────────┘ +``` + +## تغییر تنظیمات صدا) {#other_functions-runningdifference} + +Calculates the difference between successive row values ​​in the data block. +بازده 0 برای ردیف اول و تفاوت از ردیف قبلی برای هر سطر بعدی. + +نتیجه عملکرد بستگی به بلوک های داده تحت تاثیر قرار و منظور از داده ها در بلوک. +اگر شما یک خرده فروشی با سفارش و پاسخ تابع از خارج از خرده فروشی, شما می توانید نتیجه مورد انتظار از. + +مثال: + +``` sql +SELECT + EventID, + EventTime, + runningDifference(EventTime) AS delta +FROM +( + SELECT + EventID, + EventTime + FROM events + WHERE EventDate = '2016-11-24' + ORDER BY EventTime ASC + LIMIT 5 +) +``` + +``` text +┌─EventID─┬───────────EventTime─┬─delta─┐ +│ 1106 │ 2016-11-24 00:00:04 │ 0 │ +│ 1107 │ 2016-11-24 00:00:05 │ 1 │ +│ 1108 │ 2016-11-24 00:00:05 │ 0 │ +│ 1109 │ 2016-11-24 00:00:09 │ 4 │ +│ 1110 │ 2016-11-24 00:00:10 │ 1 │ +└─────────┴─────────────────────┴───────┘ +``` + +لطفا توجه داشته باشید-اندازه بلوک بر نتیجه تاثیر می گذارد. با هر بلوک جدید `runningDifference` دولت تنظیم مجدد است. + +``` sql +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +┌─number─┬─diff─┐ +│ 65536 │ 0 │ +└────────┴──────┘ +``` + +``` sql +set max_block_size=100000 -- default value is 65536! + +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +``` + +## در حال بارگذاری {#runningdifferencestartingwithfirstvalue} + +همان است که برای [عدم پذیرش](./other_functions.md#other_functions-runningdifference), تفاوت ارزش ردیف اول است, ارزش سطر اول بازگشت, و هر سطر بعدی تفاوت از ردیف قبلی را برمی گرداند. + +## هشدار داده می شود) {#macnumtostringnum} + +قبول uint64 شماره. تفسیر به عنوان نشانی مک در اندی بزرگ. بازگرداندن یک رشته حاوی نشانی مک مربوطه را در قالب قلمی: ب: ر. ن:دکتر: ف.ا: ف. ف. (تعداد کولون جدا شده در فرم هگزادسیمال). + +## MACStringToNum(s) {#macstringtonums} + +عملکرد معکوس مک نومتوسترینگ. اگر نشانی مک دارای یک فرمت نامعتبر, باز می گردد 0. + +## درشتنمایی) {#macstringtoouis} + +می پذیرد یک نشانی مک در فرمت قلمی:بیت:ر.ن:دی. دی:اف (تعداد روده بزرگ از هم جدا در فرم هگزادسیمال). بازگرداندن سه هشت تایی اول به عنوان یک عدد ظاهری64. اگر نشانی مک دارای یک فرمت نامعتبر, باز می گردد 0. + +## نوع گیرنده {#getsizeofenumtype} + +بازگرداندن تعدادی از زمینه های در [شمارشی](../../sql_reference/data_types/enum.md). + +``` sql +getSizeOfEnumType(value) +``` + +**پارامترها:** + +- `value` — Value of type `Enum`. + +**مقادیر بازگشتی** + +- تعدادی از زمینه های با `Enum` مقادیر ورودی. +- یک استثنا پرتاب می شود اگر نوع نیست `Enum`. + +**مثال** + +``` sql +SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## بلوک سازی {#blockserializedsize} + +بازده اندازه بر روی دیسک (بدون در نظر گرفتن فشرده سازی حساب). + +``` sql +blockSerializedSize(value[, value[, ...]]) +``` + +**پارامترها:** + +- `value` — Any value. + +**مقادیر بازگشتی** + +- تعداد بایت خواهد شد که به دیسک برای بلوک از ارزش های نوشته شده (بدون فشرده سازی). + +**مثال** + +``` sql +SELECT blockSerializedSize(maxState(1)) as x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## بدون نام {#tocolumntypename} + +بازگرداندن نام کلاس است که نشان دهنده نوع داده ها از ستون در رم. + +``` sql +toColumnTypeName(value) +``` + +**پارامترها:** + +- `value` — Any type of value. + +**مقادیر بازگشتی** + +- یک رشته با نام کلاس است که برای نمایندگی از استفاده `value` نوع داده در رم. + +**نمونه ای از تفاوت بین`toTypeName ' and ' toColumnTypeName`** + +``` sql +SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime │ +└─────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ Const(UInt32) │ +└───────────────────────────────────────────────────────────┘ +``` + +مثال نشان می دهد که `DateTime` نوع داده در حافظه ذخیره می شود به عنوان `Const(UInt32)`. + +## روبنا دامپکول {#dumpcolumnstructure} + +خروجی شرح مفصلی از ساختارهای داده در رم + +``` sql +dumpColumnStructure(value) +``` + +**پارامترها:** + +- `value` — Any type of value. + +**مقادیر بازگشتی** + +- یک رشته توصیف ساختار است که برای نمایندگی از استفاده `value` نوع داده در رم. + +**مثال** + +``` sql +SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) +``` + +``` text +┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime, Const(size = 1, UInt32(size = 1)) │ +└──────────────────────────────────────────────────────────────┘ +``` + +## نوع قراردادی {#defaultvalueofargumenttype} + +خروجی مقدار پیش فرض برای نوع داده. + +مقادیر پیش فرض برای ستون های سفارشی تعیین شده توسط کاربر را شامل نمی شود. + +``` sql +defaultValueOfArgumentType(expression) +``` + +**پارامترها:** + +- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. + +**مقادیر بازگشتی** + +- `0` برای اعداد. +- رشته خالی برای رشته. +- `ᴺᵁᴸᴸ` برای [Nullable](../../sql_reference/data_types/nullable.md). + +**مثال** + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ +│ 0 │ +└─────────────────────────────────────────────┘ +``` + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ +│ ᴺᵁᴸᴸ │ +└───────────────────────────────────────────────────────┘ +``` + +## تکرار {#other-functions-replicate} + +ایجاد مجموعه ای با یک مقدار واحد. + +مورد استفاده برای اجرای داخلی [ارریجین](array_join.md#functions_arrayjoin). + +``` sql +SELECT replicate(x, arr); +``` + +**پارامترها:** + +- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. +- `x` — The value that the resulting array will be filled with. + +**مقدار بازگشتی** + +مجموعه ای پر از ارزش `x`. + +نوع: `Array`. + +**مثال** + +پرسوجو: + +``` sql +SELECT replicate(1, ['a', 'b', 'c']) +``` + +نتیجه: + +``` text +┌─replicate(1, ['a', 'b', 'c'])─┐ +│ [1,1,1] │ +└───────────────────────────────┘ +``` + +## رشته های قابل استفاده {#filesystemavailable} + +بازگرداندن مقدار فضای باقی مانده بر روی سیستم فایل که فایل های پایگاه داده واقع. این است که همیشه کوچکتر از فضای کل رایگان ([بدون پرونده](#filesystemfree)) چرا که برخی از فضا برای سیستم عامل محفوظ می باشد. + +**نحو** + +``` sql +filesystemAvailable() +``` + +**مقدار بازگشتی** + +- مقدار فضای باقی مانده موجود در بایت. + +نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT formatReadableSize(filesystemAvailable()) AS "Available space", toTypeName(filesystemAvailable()) AS "Type"; +``` + +نتیجه: + +``` text +┌─Available space─┬─Type───┐ +│ 30.75 GiB │ UInt64 │ +└─────────────────┴────────┘ +``` + +## بدون پرونده {#filesystemfree} + +بازگرداندن مقدار کل فضای رایگان بر روی سیستم فایل که فایل های پایگاه داده واقع. همچنین نگاه کنید به `filesystemAvailable` + +**نحو** + +``` sql +filesystemFree() +``` + +**مقدار بازگشتی** + +- مقدار فضای رایگان در بایت. + +نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; +``` + +نتیجه: + +``` text +┌─Free space─┬─Type───┐ +│ 32.39 GiB │ UInt64 │ +└────────────┴────────┘ +``` + +## سختی پرونده {#filesystemcapacity} + +بازگرداندن ظرفیت فایل سیستم در بایت. برای ارزیابی [مسیر](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) به دایرکتوری داده ها باید پیکربندی شود. + +**نحو** + +``` sql +filesystemCapacity() +``` + +**مقدار بازگشتی** + +- اطلاعات ظرفیت سیستم فایل در بایت. + +نوع: [UInt64](../../sql_reference/data_types/int_uint.md). + +**مثال** + +پرسوجو: + +``` sql +SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" +``` + +نتیجه: + +``` text +┌─Capacity──┬─Type───┐ +│ 39.32 GiB │ UInt64 │ +└───────────┴────────┘ +``` + +## پلاکتی {#function-finalizeaggregation} + +طول می کشد دولت از تابع جمع. بازده نتیجه تجمع (دولت نهایی). + +## خرابی اجرا {#function-runningaccumulate} + +طول می کشد کشورهای تابع جمع و یک ستون با ارزش را برمی گرداند, در نتیجه تجمع این کشورها برای مجموعه ای از خطوط بلوک هستند, از اول به خط فعلی. +برای مثال طول می کشد state of aggregate function (به عنوان مثال runningaccumulate(uniqstate(userid))) و برای هر ردیف از بلوک بازگشت نتیجه از مجموع عملکرد در ادغام دولت قبلی تمام ردیف و ردیف جاری است. +بنابراین نتیجه عملکرد بستگی به پارتیشن داده ها به بلوک ها و به ترتیب داده ها در بلوک دارد. + +## جوینت {#joinget} + +تابع شما اجازه می دهد استخراج داده ها از جدول به همان شیوه به عنوان از یک [واژهنامه](../../sql_reference/dictionaries/index.md). + +می شود داده ها از [پیوستن](../../engines/table_engines/special/join.md#creating-a-table) جداول با استفاده از کلید ملحق مشخص. + +فقط پشتیبانی از جداول ایجاد شده با `ENGINE = Join(ANY, LEFT, )` بیانیه. + +**نحو** + +``` sql +joinGet(join_storage_table_name, `value_column`, join_keys) +``` + +**پارامترها** + +- `join_storage_table_name` — an [شناسه](../syntax.md#syntax-identifiers) نشان می دهد که جستجو انجام شده است. شناسه در پایگاه داده به طور پیش فرض جستجو (پارامتر را ببینید `default_database` در فایل پیکربندی). برای نادیده گرفتن پایگاه داده پیش فرض از `USE db_name` یا پایگاه داده و جدول را از طریق جداساز مشخص کنید `db_name.db_table`, مثال را ببینید. +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. + +**مقدار بازگشتی** + +را برمی گرداند لیستی از ارزش مطابقت دارد به لیست کلید. + +اگر برخی در جدول منبع وجود ندارد و سپس `0` یا `null` خواهد شد بر اساس بازگشت [ارزشهای خبری عبارتند از:](../../operations/settings/settings.md#join_use_nulls) تنظیمات. + +اطلاعات بیشتر در مورد `join_use_nulls` داخل [پیوستن به عملیات](../../engines/table_engines/special/join.md). + +**مثال** + +جدول ورودی: + +``` sql +CREATE DATABASE db_test +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 +INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) +``` + +``` text +┌─id─┬─val─┐ +│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +پرسوجو: + +``` sql +SELECT joinGet(db_test.id_val,'val',toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 +``` + +نتیجه: + +``` text +┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ +│ 0 │ +│ 11 │ +│ 12 │ +│ 0 │ +└──────────────────────────────────────────────────┘ +``` + +## modelEvaluate(model\_name, …) {#function-modelevaluate} + +ارزیابی مدل خارجی. +می پذیرد نام مدل و استدلال مدل. را برمی گرداند شناور64. + +## throwIf(x\[, custom\_message\]) {#throwifx-custom-message} + +پرتاب یک استثنا اگر استدلال غیر صفر است. +\_پیغام سفارشی-پارامتر اختیاری است: یک رشته ثابت, فراهم می کند یک پیغام خطا + +``` sql +SELECT throwIf(number = 3, 'Too many') FROM numbers(10); +``` + +``` text +↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): +Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. +``` + +## هویت {#identity} + +بازگرداندن همان مقدار که به عنوان استدلال خود مورد استفاده قرار گرفت. مورد استفاده برای اشکال زدایی و تست, اجازه می دهد تا به لغو با استفاده از شاخص, و عملکرد پرس و جو از یک اسکن کامل. هنگامی که پرس و جو برای استفاده احتمالی از شاخص تجزیه و تحلیل, تجزیه و تحلیل می کند در داخل نگاه نمی `identity` توابع. + +**نحو** + +``` sql +identity(x) +``` + +**مثال** + +پرسوجو: + +``` sql +SELECT identity(42) +``` + +نتیجه: + +``` text +┌─identity(42)─┐ +│ 42 │ +└──────────────┘ +``` + +## درباره ما {#randomascii} + +تولید یک رشته با مجموعه ای تصادفی از [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) شخصیت های قابل چاپ. + +**نحو** + +``` sql +randomPrintableASCII(length) +``` + +**پارامترها** + +- `length` — Resulting string length. Positive integer. + + If you pass `length < 0`, behavior of the function is undefined. + +**مقدار بازگشتی** + +- رشته با مجموعه ای تصادفی از [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) شخصیت های قابل چاپ. + +نوع: [رشته](../../sql_reference/data_types/string.md) + +**مثال** + +``` sql +SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 +``` + +``` text +┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ +│ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ +│ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ +│ 2 │ /"+<"wUTh:=LjJ Vm!c&hI*m#XTfzz │ 30 │ +└────────┴────────────────────────────────┴──────────────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/fa/sql_reference/functions/random_functions.md b/docs/fa/sql_reference/functions/random_functions.md new file mode 100644 index 00000000000..fd98d228bc5 --- /dev/null +++ b/docs/fa/sql_reference/functions/random_functions.md @@ -0,0 +1,31 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "\u062A\u0648\u0644\u06CC\u062F \u0627\u0639\u062F\u0627\u062F \u0634\u0628\ + \u0647 \u062A\u0635\u0627\u062F\u0641\u06CC" +--- + +# توابع برای تولید اعداد شبه تصادفی {#functions-for-generating-pseudo-random-numbers} + +ژنراتور غیر رمزنگاری اعداد شبه تصادفی استفاده می شود. + +تمام توابع قبول استدلال صفر و یا یک استدلال. +اگر استدلال به تصویب می رسد, این می تواند هر نوع, و ارزش خود را برای هر چیزی استفاده نمی شود. +تنها هدف از این استدلال این است که برای جلوگیری از حذف خشونت مشترک, به طوری که دو نمونه مختلف از همان تابع بازگشت ستون های مختلف با شماره های مختلف تصادفی. + +## رند {#rand} + +بازده یک شبه تصادفی uint32 شماره به طور مساوی توزیع شده در میان تمام uint32 از نوع اعداد است. +با استفاده از یک خطی congruential ژنراتور. + +## رند64 {#rand64} + +بازده یک شبه تصادفی uint64 شماره به طور مساوی توزیع شده در میان تمام uint64 از نوع اعداد است. +با استفاده از یک ژنراتور همخوان خطی. + +## شرکت رندکونستانت {#randconstant} + +بازگرداندن یک عدد اوینت32 شبه تصادفی, ارزش یکی برای بلوک های مختلف است. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/fa/sql_reference/functions/rounding_functions.md b/docs/fa/sql_reference/functions/rounding_functions.md new file mode 100644 index 00000000000..41ff2de5d3a --- /dev/null +++ b/docs/fa/sql_reference/functions/rounding_functions.md @@ -0,0 +1,190 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u06AF\u0631\u062F \u06A9\u0631\u062F\u0646" +--- + +# گرد کردن توابع {#rounding-functions} + +## طبقه (ایکس\]) {#floorx-n} + +بازگرداندن بیشترین تعداد دور است که کمتر از یا مساوی `x`. تعداد دور چند تن از 1/10 و یا نزدیکترین تعداد داده های مناسب نوع اگر 1 / 10 دقیق نیست. +‘N’ ثابت عدد صحیح است, پارامتر اختیاری. به طور پیش فرض صفر است, که به معنی به دور به یک عدد صحیح. +‘N’ ممکن است منفی باشد. + +مثالها: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` + +`x` هر نوع عددی است. نتیجه تعدادی از همان نوع است. +برای استدلال عدد صحیح را حس می کند به دور با منفی `N` ارزش (برای غیر منفی `N` تابع هیچ کاری نمی کند). +اگر گرد باعث سرریز (مثلا, کف سازی(-128, -1)), نتیجه اجرای خاص بازگشته است. + +## هشدار داده می شود\]) {#ceilx-n-ceilingx-n} + +بازگرداندن کوچکترین عدد دور است که بیشتر از یا مساوی `x`. در هر راه دیگر, این همان است که `floor` تابع (بالا را ببینید). + +## هشدار داده می شود\]) {#truncx-n-truncatex-n} + +بازگرداندن تعداد دور با بزرگترین ارزش مطلق است که ارزش مطلق کمتر یا مساوی `x`‘s. In every other way, it is the same as the ’floor’ تابع (بالا را ببینید). + +## دور (ایکس\]) {#rounding_functions-round} + +دور یک مقدار به تعداد مشخصی از رقم اعشار. + +تابع نزدیکترین تعداد از سفارش مشخص شده را برمی گرداند. در صورتی که تعداد داده شده است فاصله برابر با شماره های اطراف, تابع با استفاده از گرد کردن بانکدار برای انواع شماره شناور و دور به دور از صفر برای انواع شماره های دیگر. + +``` sql +round(expression [, decimal_places]) +``` + +**پارامترها:** + +- `expression` — A number to be rounded. Can be any [عبارت](../syntax.md#syntax-expressions) بازگشت عددی [نوع داده](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — An integer value. + - اگر `decimal-places > 0` سپس تابع دور ارزش به سمت راست از نقطه اعشار. + - اگر `decimal-places < 0` سپس تابع دور ارزش به سمت چپ نقطه اعشار. + - اگر `decimal-places = 0` سپس تابع دور ارزش به عدد صحیح. در این مورد استدلال را می توان حذف. + +**مقدار بازگشتی:** + +گرد شماره از همان نوع به عنوان ورودی شماره. + +### مثالها {#examples} + +**مثال استفاده** + +``` sql +SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 +``` + +``` text +┌───x─┬─round(divide(number, 2))─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +└─────┴──────────────────────────┘ +``` + +**نمونه هایی از گرد کردن** + +گرد کردن به نزدیکترین شماره. + +``` text +round(3.2, 0) = 3 +round(4.1267, 2) = 4.13 +round(22,-1) = 20 +round(467,-2) = 500 +round(-467,-2) = -500 +``` + +گرد کردن بانکدار. + +``` text +round(3.5) = 4 +round(4.5) = 4 +round(3.55, 1) = 3.6 +round(3.65, 1) = 3.6 +``` + +**همچنین نگاه کنید به** + +- [roundBankers](#roundbankers) + +## سرباز {#roundbankers} + +دور یک عدد به یک موقعیت دهدهی مشخص شده است. + +- اگر تعداد گرد کردن در نیمه راه بین دو عدد است, تابع با استفاده از گرد کردن بانکدار. + + Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. + + It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. + +- در موارد دیگر تابع دور اعداد به نزدیکترین عدد صحیح. + +با استفاده از گرد کردن بانکدار, شما می توانید اثر است که گرد کردن اعداد در نتایج حاصل از جمع و یا کم کردن این اعداد را کاهش می دهد. + +برای مثال مجموع اعداد 1.5, 2.5, 3.5, 4.5 مختلف گرد: + +- بدون گرد کردن: 1.5 + 2.5 + 3.5 + 4.5 = 12. +- گرد کردن بانکدار: 2 + 2 + 4 + 4 = 12. +- گرد کردن به نزدیکترین عدد صحیح: 2 + 3 + 4 + 5 = 14. + +**نحو** + +``` sql +roundBankers(expression [, decimal_places]) +``` + +**پارامترها** + +- `expression` — A number to be rounded. Can be any [عبارت](../syntax.md#syntax-expressions) بازگشت عددی [نوع داده](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — Decimal places. An integer number. + - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. + - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. + - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. + +**مقدار بازگشتی** + +ارزش گرد شده توسط روش گرد کردن بانکدار. + +### مثالها {#examples-1} + +**مثال استفاده** + +پرسوجو: + +``` sql + SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 +``` + +نتیجه: + +``` text +┌───x─┬─b─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +│ 1.5 │ 2 │ +│ 2 │ 2 │ +│ 2.5 │ 2 │ +│ 3 │ 3 │ +│ 3.5 │ 4 │ +│ 4 │ 4 │ +│ 4.5 │ 4 │ +└─────┴───┘ +``` + +**نمونه هایی از گرد کردن بانکدار** + +``` text +roundBankers(0.4) = 0 +roundBankers(-3.5) = -4 +roundBankers(4.5) = 4 +roundBankers(3.55, 1) = 3.6 +roundBankers(3.65, 1) = 3.6 +roundBankers(10.35, 1) = 10.4 +roundBankers(10.755, 2) = 11,76 +``` + +**همچنین نگاه کنید به** + +- [گرد](#rounding_functions-round) + +## توسعه پایدار2) {#roundtoexp2num} + +می پذیرد تعداد. اگر تعداد کمتر از یک است, باز می گردد 0. در غیر این صورت, این دور تعداد پایین به نزدیکترین (مجموع غیر منفی) درجه دو. + +## طول عمر (تعداد) {#rounddurationnum} + +می پذیرد تعداد. اگر تعداد کمتر از یک است, باز می گردد 0. در غیر این صورت, این دور تعداد را به اعداد از مجموعه: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. این تابع خاص به یاندکس است.متریکا و مورد استفاده برای اجرای گزارش در طول جلسه. + +## عدد) {#roundagenum} + +می پذیرد تعداد. اگر تعداد کمتر از است 18, باز می گردد 0. در غیر این صورت, این دور تعداد را به یک عدد از مجموعه: 18, 25, 35, 45, 55. این تابع خاص به یاندکس است.متریکا و مورد استفاده برای اجرای گزارش در سن کاربر. + +## roundDown(num arr) {#rounddownnum-arr} + +یک عدد را می پذیرد و به یک عنصر در مجموعه مشخص شده منتقل می کند. اگر مقدار کمتر از پایین ترین حد محدود است, پایین ترین حد بازگشته است. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/fa/sql_reference/functions/splitting_merging_functions.md b/docs/fa/sql_reference/functions/splitting_merging_functions.md new file mode 100644 index 00000000000..fdcb7bbac56 --- /dev/null +++ b/docs/fa/sql_reference/functions/splitting_merging_functions.md @@ -0,0 +1,117 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u062A\u0642\u0633\u06CC\u0645 \u0648 \u0627\u062F\u063A\u0627\u0645 \u0631\ + \u0634\u062A\u0647 \u0647\u0627 \u0648 \u0627\u0631\u0631\u06CC\u0633" +--- + +# توابع برای تقسیم و ادغام رشته ها و ارریس {#functions-for-splitting-and-merging-strings-and-arrays} + +## اسپلیت بیچار (جداساز) {#splitbycharseparator-s} + +انشعابات یک رشته به بسترهای جدا شده توسط یک شخصیت مشخص شده است. با استفاده از یک رشته ثابت `separator` که متشکل از دقیقا یک شخصیت. +بازگرداندن مجموعه ای از بسترهای انتخاب. بسترهای خالی ممکن است انتخاب شود اگر جدا در ابتدا یا انتهای رشته رخ می دهد, و یا اگر چند جداکننده متوالی وجود دارد. + +**نحو** + +``` sql +splitByChar(, ) +``` + +**پارامترها** + +- `separator` — The separator which should contain exactly one character. [رشته](../../sql_reference/data_types/string.md). +- `s` — The string to split. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی)** + +بازگرداندن مجموعه ای از بسترهای انتخاب. بسترهای خالی ممکن است انتخاب شود که: + +- جداساز در ابتدا یا انتهای رشته رخ می دهد; +- چندین جداکننده متوالی وجود دارد; +- رشته اصلی `s` خالیه + +نوع: [& حذف](../../sql_reference/data_types/array.md) از [رشته](../../sql_reference/data_types/string.md). + +**مثال** + +``` sql +SELECT splitByChar(',', '1,2,3,abcde') +``` + +``` text +┌─splitByChar(',', '1,2,3,abcde')─┐ +│ ['1','2','3','abcde'] │ +└─────────────────────────────────┘ +``` + +## رشته اسپلیتبیست (جداساز) {#splitbystringseparator-s} + +انشعابات یک رشته به بسترهای جدا شده توسط یک رشته. با استفاده از یک رشته ثابت `separator` از شخصیت های متعدد به عنوان جدا کننده. اگر رشته `separator` خالی است, این رشته تقسیم `s` به مجموعه ای از شخصیت های تک. + +**نحو** + +``` sql +splitByString(, ) +``` + +**پارامترها** + +- `separator` — The separator. [رشته](../../sql_reference/data_types/string.md). +- `s` — The string to split. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی)** + +بازگرداندن مجموعه ای از بسترهای انتخاب. بسترهای خالی ممکن است انتخاب شود که: + +نوع: [& حذف](../../sql_reference/data_types/array.md) از [رشته](../../sql_reference/data_types/string.md). + +- جدا کننده غیر خالی در ابتدا یا انتهای رشته رخ می دهد; +- چند جدا متوالی غیر خالی وجود دارد; +- رشته اصلی `s` خالی است در حالی که جدا خالی نیست. + +**مثال** + +``` sql +SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +``` + +``` text +┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ +│ ['1','2 3','4,5','abcde'] │ +└───────────────────────────────────────────┘ +``` + +``` sql +SELECT splitByString('', 'abcde') +``` + +``` text +┌─splitByString('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + +## حذف میانبر در صفحه خانه\]) {#arraystringconcatarr-separator} + +رشته های ذکر شده در مجموعه را با جداساز مطابقت می دهد.'جدا کننده' پارامتر اختیاری است: یک رشته ثابت, مجموعه ای به یک رشته خالی به طور پیش فرض. +رشته را برمی گرداند. + +## اطلاعات دقیق) {#alphatokenss} + +انتخاب substrings متوالی بایت از محدوده a-z و a-z. بازگرداندن یک آرایه از substrings. + +**مثال** + +``` sql +SELECT alphaTokens('abca1abc') +``` + +``` text +┌─alphaTokens('abca1abc')─┐ +│ ['abca','abc'] │ +└─────────────────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/fa/sql_reference/functions/string_functions.md b/docs/fa/sql_reference/functions/string_functions.md new file mode 100644 index 00000000000..9a2bedf584e --- /dev/null +++ b/docs/fa/sql_reference/functions/string_functions.md @@ -0,0 +1,489 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0631\u0634\u062A\u0647 \u0647\u0627" +--- + +# توابع برای کار با رشته {#functions-for-working-with-strings} + +## خالی {#empty} + +بازده 1 برای یک رشته خالی و یا 0 برای یک رشته غیر خالی. +نتیجه این نوع uint8. +یک رشته در نظر گرفته شده است غیر خالی اگر شامل حداقل یک بایت, حتی اگر این یک فضا یا یک بایت پوچ است. +این تابع همچنین برای ارریس کار می کند. + +## notEmpty {#notempty} + +بازده 0 برای یک رشته خالی یا 1 برای یک رشته غیر خالی. +نتیجه این نوع uint8. +این تابع همچنین برای ارریس کار می کند. + +## طول {#length} + +بازگرداندن طول یک رشته در بایت (نه در شخصیت, و نه در نقاط کد). +نتیجه این نوع uint64. +این تابع همچنین برای ارریس کار می کند. + +## طول 8 {#lengthutf8} + +بازگرداندن طول یک رشته در نقاط کد یونیکد (نه در شخصیت), فرض کنید که رشته شامل مجموعه ای از بایت است که متن کد گذاری شده را تشکیل می دهند. اگر این فرض ملاقات نکرده است, این گرداند برخی از نتیجه (این یک استثنا پرتاب نمی کند). +نتیجه این نوع uint64. + +## \_شروع مجدد {#char-length} + +بازگرداندن طول یک رشته در نقاط کد یونیکد (نه در شخصیت), فرض کنید که رشته شامل مجموعه ای از بایت است که متن کد گذاری شده را تشکیل می دهند. اگر این فرض ملاقات نکرده است, این گرداند برخی از نتیجه (این یک استثنا پرتاب نمی کند). +نتیجه این نوع uint64. + +## \_شخصیت شناسی {#character-length} + +بازگرداندن طول یک رشته در نقاط کد یونیکد (نه در شخصیت), فرض کنید که رشته شامل مجموعه ای از بایت است که متن کد گذاری شده را تشکیل می دهند. اگر این فرض ملاقات نکرده است, این گرداند برخی از نتیجه (این یک استثنا پرتاب نمی کند). +نتیجه این نوع uint64. + +## پایین تر {#lower} + +تبدیل نمادهای اسکی لاتین در یک رشته به حروف کوچک. + +## بالارفتن {#upper} + +تبدیل نمادهای اسکی لاتین در یک رشته به حروف بزرگ. + +## لوراتف8 {#lowerutf8} + +تبدیل یک رشته به حروف کوچک, فرض رشته شامل مجموعه ای از بایت که یک متن کد گذاری شده-8 را تشکیل می دهند. +این زبان را تشخیص نمی دهد. بنابراین برای ترکیه نتیجه ممکن است دقیقا درست باشد. +اگر طول توالی یونایتد-8 بایت برای مورد بالا و پایین تر از یک نقطه کد متفاوت است, نتیجه ممکن است برای این نقطه کد نادرست. +اگر رشته شامل مجموعه ای از بایت است که سخن گفتن نیست-8, سپس رفتار تعریف نشده است. + +## یوتف8 {#upperutf8} + +تبدیل یک رشته به حروف بزرگ, فرض رشته شامل مجموعه ای از بایت که یک متن کد گذاری شده-8 را تشکیل می دهند. +این زبان را تشخیص نمی دهد. بنابراین برای ترکیه نتیجه ممکن است دقیقا درست باشد. +اگر طول توالی یونایتد-8 بایت برای مورد بالا و پایین تر از یک نقطه کد متفاوت است, نتیجه ممکن است برای این نقطه کد نادرست. +اگر رشته شامل مجموعه ای از بایت است که سخن گفتن نیست-8, سپس رفتار تعریف نشده است. + +## اسوالدیدوتف8 {#isvalidutf8} + +بازده 1, اگر مجموعه ای از کلمه در ادامه متن معتبر است-8 کد گذاری, در غیر این صورت 0. + +## تولدیدوتف8 {#tovalidutf8} + +8 کاراکتر نامعتبر را جایگزین می کند `�` اطلاعات دقیق همه در حال اجرا در یک ردیف شخصیت نامعتبر را به یک شخصیت جایگزین فرو ریخت. + +``` sql +toValidUTF8( input_string ) +``` + +پارامترها: + +- input\_string — Any set of bytes represented as the [رشته](../../sql_reference/data_types/string.md) شی نوع داده. + +مقدار بازگشتی: معتبر یونایتد-8 رشته. + +**مثال** + +``` sql +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +``` + +``` text +┌─toValidUTF8('a����b')─┐ +│ a�b │ +└───────────────────────┘ +``` + +## تکرار {#repeat} + +تکرار یک رشته را به عنوان چند بار به عنوان مشخص شده و concatenates تکراری ارزش به عنوان یک رشته است. + +**نحو** + +``` sql +repeat(s, n) +``` + +**پارامترها** + +- `s` — The string to repeat. [رشته](../../sql_reference/data_types/string.md). +- `n` — The number of times to repeat the string. [اینترنت](../../sql_reference/data_types/int_uint.md). + +**مقدار بازگشتی** + +تک رشته ای که حاوی رشته است `s` تکرار `n` زمان. اگر `n` \< 1, تابع رشته خالی می گرداند. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT repeat('abc', 10) +``` + +نتیجه: + +``` text +┌─repeat('abc', 10)──────────────┐ +│ abcabcabcabcabcabcabcabcabcabc │ +└────────────────────────────────┘ +``` + +## معکوس {#reverse} + +معکوس رشته (به عنوان یک دنباله از بایت). + +## معکوس کردن8 {#reverseutf8} + +معکوس دنباله ای از نقاط کد یونیکد, فرض کنید که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن گفته-8. در غیر این صورت, این کار چیز دیگری (این یک استثنا پرتاب نمی). + +## format(pattern, s0, s1, …) {#format} + +قالب بندی الگوی ثابت با رشته ذکر شده در استدلال. `pattern` یک الگوی فرمت پایتون ساده شده است. رشته فرمت شامل “replacement fields” احاطه شده توسط پرانتز فرفری `{}`. هر چیزی که در پرانتز موجود نیست در نظر گرفته شده است متن تحت اللفظی است که بدون تغییر به خروجی کپی شده است. اگر شما نیاز به شامل یک شخصیت بند در متن تحت اللفظی, این را می توان با دو برابر فرار: `{{ '{{' }}` و `{{ '}}' }}`. نام فیلد می تواند اعداد (با شروع از صفر) یا خالی (سپس به عنوان شماره نتیجه درمان می شوند). + +``` sql +SELECT format('{1} {0} {1}', 'World', 'Hello') +``` + +``` text +┌─format('{1} {0} {1}', 'World', 'Hello')─┐ +│ Hello World Hello │ +└─────────────────────────────────────────┘ +``` + +``` sql +SELECT format('{} {}', 'Hello', 'World') +``` + +``` text +┌─format('{} {}', 'Hello', 'World')─┐ +│ Hello World │ +└───────────────────────────────────┘ +``` + +## الحاق {#concat} + +رشته های ذکر شده در استدلال بدون جدا کننده را تصدیق می کند. + +**نحو** + +``` sql +concat(s1, s2, ...) +``` + +**پارامترها** + +ارزش رشته نوع و یا رشته ثابت. + +**مقادیر بازگشتی** + +را برمی گرداند رشته ای که منجر به از الحاق استدلال. + +اگر هر یک از مقادیر استدلال است `NULL`, `concat` بازگشت `NULL`. + +**مثال** + +پرسوجو: + +``` sql +SELECT concat('Hello, ', 'World!') +``` + +نتیجه: + +``` text +┌─concat('Hello, ', 'World!')─┐ +│ Hello, World! │ +└─────────────────────────────┘ +``` + +## همبسته {#concatassumeinjective} + +مثل [الحاق](#concat) تفاوت این است که شما نیاز به اطمینان حاصل شود که `concat(s1, s2, ...) → sn` این برای بهینه سازی گروه توسط استفاده می شود. + +تابع به نام “injective” اگر همیشه نتیجه های مختلف برای مقادیر مختلف استدلال می گرداند. به عبارت دیگر: استدلال های مختلف هرگز نتیجه یکسان عملکرد. + +**نحو** + +``` sql +concatAssumeInjective(s1, s2, ...) +``` + +**پارامترها** + +ارزش رشته نوع و یا رشته ثابت. + +**مقادیر بازگشتی** + +را برمی گرداند رشته ای که منجر به از الحاق استدلال. + +اگر هر یک از مقادیر استدلال است `NULL`, `concatAssumeInjective` بازگشت `NULL`. + +**مثال** + +جدول ورودی: + +``` sql +CREATE TABLE key_val(`key1` String, `key2` String, `value` UInt32) ENGINE = TinyLog; +INSERT INTO key_val VALUES ('Hello, ','World',1), ('Hello, ','World',2), ('Hello, ','World!',3), ('Hello',', World!',2); +SELECT * from key_val; +``` + +``` text +┌─key1────┬─key2─────┬─value─┐ +│ Hello, │ World │ 1 │ +│ Hello, │ World │ 2 │ +│ Hello, │ World! │ 3 │ +│ Hello │ , World! │ 2 │ +└─────────┴──────────┴───────┘ +``` + +پرسوجو: + +``` sql +SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) +``` + +نتیجه: + +``` text +┌─concat(key1, key2)─┬─sum(value)─┐ +│ Hello, World! │ 3 │ +│ Hello, World! │ 2 │ +│ Hello, World │ 3 │ +└────────────────────┴────────────┘ +``` + +## زیر رشته(بازدید کنندگان, انحراف, طول), اواسط(بازدید کنندگان, انحراف, طول), عام (بازدید کنندگان, انحراف, طول) {#substring} + +بازگرداندن یک رشته شروع با بایت از ‘offset’ شاخص این است ‘length’ کلمه در ادامه متن طولانی. نمایه سازی شخصیت از یک شروع می شود (همانطور که در گذاشتن استاندارد). این ‘offset’ و ‘length’ استدلال باید ثابت باشد. + +## زیر بغل کردن 8(بازدید کنندگان, انحراف, طول) {#substringutf8} + +همان ‘substring’, اما برای نقاط کد یونیکد. این نسخهها کار میکند با این فرض که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری شده وزارت مخابرات 8. اگر این فرض ملاقات نکرده است, این گرداند برخی از نتیجه (این یک استثنا پرتاب نمی کند). + +## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsent} + +اگر ‘s’ رشته غیر خالی است و حاوی نیست ‘c’ شخصیت در پایان این برنامه ‘c’ شخصیت به پایان. + +## تبدیل(بازدید کنندگان, از, به) {#convertcharset} + +بازگرداندن رشته ‘s’ که از رمزگذاری در تبدیل شد ‘from’ به رمزگذاری در ‘to’. + +## کد زیر 64) {#base64encode} + +کدگذاریها ‘s’ رشته به پایگاه64 + +## کد زیر 64) {#base64decode} + +رمزگشایی پایگاه64-رشته کد گذاری شده ‘s’ به رشته اصلی. در صورت شکست را افزایش می دهد یک استثنا. + +## تریباس64دسیدی) {#trybase64decode} + +شبیه به حالت کد باس64, اما در صورت خطا یک رشته خالی می شود بازگشت. + +## endsWith(s, پسوند) {#endswith} + +بازگرداندن اینکه با پسوند مشخص شده پایان یابد. بازده 1 اگر رشته به پایان می رسد با پسوند مشخص, در غیر این صورت باز می گردد 0. + +## startsWith(str, پیشوند) {#startswith} + +بازده 1 اینکه رشته با پیشوند مشخص شروع می شود, در غیر این صورت باز می گردد 0. + +``` sql +SELECT startsWith('Spider-Man', 'Spi'); +``` + +**مقادیر بازگشتی** + +- 1, اگر رشته با پیشوند مشخص شروع می شود. +- 0, اگر رشته با پیشوند مشخص شروع نشد. + +**مثال** + +پرسوجو: + +``` sql +SELECT startsWith('Hello, world!', 'He'); +``` + +نتیجه: + +``` text +┌─startsWith('Hello, world!', 'He')─┐ +│ 1 │ +└───────────────────────────────────┘ +``` + +## تر و تمیز {#trim} + +حذف تمام شخصیت های مشخص شده از شروع یا پایان یک رشته. +به طور پیش فرض حذف همه وقوع متوالی از فضای سفید مشترک (شخصیت اسکی 32) از هر دو به پایان می رسد از یک رشته. + +**نحو** + +``` sql +trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) +``` + +**پارامترها** + +- `trim_character` — specified characters for trim. [رشته](../../sql_reference/data_types/string.md). +- `input_string` — string for trim. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی** + +یک رشته بدون پیشرو و (یا) انتهایی شخصیت مشخص شده است. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT trim(BOTH ' ()' FROM '( Hello, world! )') +``` + +نتیجه: + +``` text +┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ +│ Hello, world! │ +└───────────────────────────────────────────────┘ +``` + +## trimLeft {#trimleft} + +حذف تمام رخدادهای متوالی از فضای سفید مشترک (شخصیت اسکی 32) از ابتدای یک رشته. آن را نمی کند, حذف انواع دیگر از کاراکترهای فضای سفید (برگه بدون شکستن فضا و غیره.). + +**نحو** + +``` sql +trimLeft(input_string) +``` + +نام مستعار: `ltrim(input_string)`. + +**پارامترها** + +- `input_string` — string to trim. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی** + +یک رشته بدون پیشرو فضاهای سفید مشترک. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT trimLeft(' Hello, world! ') +``` + +نتیجه: + +``` text +┌─trimLeft(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## trimRight {#trimright} + +حذف همه متوالی تکرار مشترک فضای خالی (ascii شخصیت 32) از پایان یک رشته است. آن را نمی کند, حذف انواع دیگر از کاراکترهای فضای سفید (برگه بدون شکستن فضا و غیره.). + +**نحو** + +``` sql +trimRight(input_string) +``` + +نام مستعار: `rtrim(input_string)`. + +**پارامترها** + +- `input_string` — string to trim. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی** + +یک رشته بدون انتهایی فضاهای خالی مشترک. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT trimRight(' Hello, world! ') +``` + +نتیجه: + +``` text +┌─trimRight(' Hello, world! ')─┐ +│ Hello, world! │ +└──────────────────────────────────────┘ +``` + +## اصلاح {#trimboth} + +حذف تمام رخدادهای متوالی از فضای سفید مشترک (شخصیت اسکی 32) از هر دو به پایان می رسد از یک رشته. این کار انواع دیگر از شخصیت های فضای سفید را حذف کنید (باریکه, فضای بدون استراحت, و غیره.). + +**نحو** + +``` sql +trimBoth(input_string) +``` + +نام مستعار: `trim(input_string)`. + +**پارامترها** + +- `input_string` — string to trim. [رشته](../../sql_reference/data_types/string.md). + +**مقدار بازگشتی** + +یک رشته بدون پیشرو و انتهایی فضاهای سفید مشترک. + +نوع: `String`. + +**مثال** + +پرسوجو: + +``` sql +SELECT trimBoth(' Hello, world! ') +``` + +نتیجه: + +``` text +┌─trimBoth(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## CRC32(s) {#crc32} + +بازگرداندن کنترلی از یک رشته, با استفاده از کروک-32-یی 802.3 چند جملهای و مقدار اولیه `0xffffffff` هشدار داده می شود + +نتیجه این نوع uint32. + +## CRC32IEEE(s) {#crc32ieee} + +را برمی گرداند کنترل از یک رشته, با استفاده از کروم-32-یی 802.3 چند جملهای. + +نتیجه این نوع uint32. + +## CRC64(s) {#crc64} + +بازده crc64 کنترلی از یک رشته با استفاده از crc-64-ecma چند جملهای. + +نتیجه این نوع uint64. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/fa/sql_reference/functions/string_replace_functions.md b/docs/fa/sql_reference/functions/string_replace_functions.md new file mode 100644 index 00000000000..311c89c9a45 --- /dev/null +++ b/docs/fa/sql_reference/functions/string_replace_functions.md @@ -0,0 +1,95 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u0628\u0631\u0627\u06CC \u062C\u0627\u06CC\u06AF\u0632\u06CC\u0646\u06CC\ + \ \u062F\u0631 \u0631\u0634\u062A\u0647\u0647\u0627" +--- + +# توابع برای جستجو و جایگزینی در رشته ها {#functions-for-searching-and-replacing-in-strings} + +## جایگزینی جایگزین) {#replaceonehaystack-pattern-replacement} + +جایگزین وقوع اول, در صورت وجود, از ‘pattern’ زیر رشته در ‘haystack’ با ‘replacement’ زیر رشته. +از این پس, ‘pattern’ و ‘replacement’ حتما ثابته + +## replaceAll(haystack, الگوی جایگزینی) جایگزین(haystack, الگوی جایگزینی) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} + +جایگزین تمام اتفاقات ‘pattern’ زیر رشته در ‘haystack’ با ‘replacement’ زیر رشته. + +## جایگزین کردن الگوی جایگزین) {#replaceregexponehaystack-pattern-replacement} + +جایگزینی با استفاده از ‘pattern’ عبارت منظم. دوباره2 عبارت منظم. +جایگزین تنها وقوع اول, در صورت وجود. +یک الگو را می توان به عنوان ‘replacement’. این الگو می تواند شامل تعویض `\0-\9`. +جایگزینی `\0` شامل کل عبارت منظم. درحال جایگزینی `\1-\9` مربوط به زیرخط numbers.To استفاده از `\` شخصیت در قالب, فرار با استفاده از `\`. +همچنین در نظر داشته باشید که یک رشته تحت اللفظی نیاز به فرار اضافی نگه دارید. + +مثال 1. تبدیل تاریخ به فرمت امریکایی: + +``` sql +SELECT DISTINCT + EventDate, + replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res +FROM test.hits +LIMIT 7 +FORMAT TabSeparated +``` + +``` text +2014-03-17 03/17/2014 +2014-03-18 03/18/2014 +2014-03-19 03/19/2014 +2014-03-20 03/20/2014 +2014-03-21 03/21/2014 +2014-03-22 03/22/2014 +2014-03-23 03/23/2014 +``` + +مثال 2. کپی کردن یک رشته ده بار: + +``` sql +SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res +``` + +``` text +┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## جایگزین کردن الگوی جایگزین) {#replaceregexpallhaystack-pattern-replacement} + +این کار همان چیزی, اما جایگزین همه وقوع. مثال: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res +``` + +``` text +┌─res────────────────────────┐ +│ HHeelllloo,, WWoorrlldd!! │ +└────────────────────────────┘ +``` + +به عنوان یک استثنا, اگر یک عبارت منظم در زیر رشته خالی کار می کرد, جایگزینی بیش از یک بار ساخته شده است. +مثال: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res +``` + +``` text +┌─res─────────────────┐ +│ here: Hello, World! │ +└─────────────────────┘ +``` + +## سرویس پرداخت درونبرنامهای پلی) {#regexpquotemetas} + +تابع می افزاید: یک بک اسلش قبل از برخی از شخصیت های از پیش تعریف شده در رشته. +نویسههای از پیش تعریفشده: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, '\]', ‘?’, '\*‘,’+‘,’{‘,’:‘,’-'. +این اجرای کمی از دوباره متفاوت2::پاسخ2:: نقل قول. این فرار صفر بایت به عنوان \\ 0 بجای 00 و فرار شخصیت تنها مورد نیاز. +برای کسب اطلاعات بیشتر به لینک مراجعه کنید: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/fa/sql_reference/functions/string_search_functions.md b/docs/fa/sql_reference/functions/string_search_functions.md new file mode 100644 index 00000000000..9c18958d48f --- /dev/null +++ b/docs/fa/sql_reference/functions/string_search_functions.md @@ -0,0 +1,380 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u0628\u0631\u0627\u06CC \u062C\u0633\u062A\u062C\u0648\u06CC \u0631\u0634\ + \u062A\u0647\u0647\u0627" +--- + +# توابع برای جستجوی رشته ها {#functions-for-searching-strings} + +جستجو به طور پیش فرض در تمام این توابع حساس به حروف است. انواع جداگانه ای برای جستجوی غیر حساس مورد وجود دارد. + +## موقعیت (انبار کاه, سوزن), تعیین محل (انبار کاه, سوزن) {#position} + +بازگرداندن موقعیت (به بایت) از رشته پیدا شده است در رشته, با شروع از 1. + +این نسخهها کار میکند با این فرض که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری تک بایت. اگر این فرض ملاقات کرد و یک شخصیت نمی تواند با استفاده از یک بایت تنها نشان داده شود, تابع یک استثنا پرتاب نمی کند و برخی از نتیجه غیر منتظره را برمی گرداند. اگر شخصیت را می توان با استفاده از دو بایت نشان, این دو بایت و غیره استفاده. + +برای یک جستجو غیر حساس به حالت, استفاده از تابع [حساس به حالت](#positioncaseinsensitive). + +**نحو** + +``` sql +position(haystack, needle) +``` + +نام مستعار: `locate(haystack, needle)`. + +**پارامترها** + +- `haystack` — string, in which substring will to be searched. [رشته](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [رشته](../syntax.md#syntax-string-literal). + +**مقادیر بازگشتی** + +- موقعیت شروع در بایت (شمارش از 1), اگر زیر رشته پیدا شد. +- 0, اگر زیر رشته یافت نشد. + +نوع: `Integer`. + +**مثالها** + +عبارت “Hello, world!” شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری تک بایت. تابع بازده برخی از نتیجه انتظار می رود: + +پرسوجو: + +``` sql +SELECT position('Hello, world!', '!') +``` + +نتیجه: + +``` text +┌─position('Hello, world!', '!')─┐ +│ 13 │ +└────────────────────────────────┘ +``` + +همان عبارت در روسیه شامل شخصیت های که نمی تواند با استفاده از یک بایت نشان داده شود. تابع بازده برخی از نتیجه غیر منتظره (استفاده [موقعیت 8](#positionutf8) تابع برای متن چند بایت کد گذاری): + +پرسوجو: + +``` sql +SELECT position('Привет, мир!', '!') +``` + +نتیجه: + +``` text +┌─position('Привет, мир!', '!')─┐ +│ 21 │ +└───────────────────────────────┘ +``` + +## حساس به حالت {#positioncaseinsensitive} + +همان [موقعیت](#position) بازگرداندن موقعیت (به بایت) از رشته پیدا شده است در رشته, با شروع از 1. استفاده از تابع برای یک جستجو غیر حساس به حالت. + +این نسخهها کار میکند با این فرض که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری تک بایت. اگر این فرض ملاقات کرد و یک شخصیت نمی تواند با استفاده از یک بایت تنها نشان داده شود, تابع یک استثنا پرتاب نمی کند و برخی از نتیجه غیر منتظره را برمی گرداند. اگر شخصیت را می توان با استفاده از دو بایت نشان, این دو بایت و غیره استفاده. + +**نحو** + +``` sql +positionCaseInsensitive(haystack, needle) +``` + +**پارامترها** + +- `haystack` — string, in which substring will to be searched. [رشته](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [رشته](../syntax.md#syntax-string-literal). + +**مقادیر بازگشتی** + +- موقعیت شروع در بایت (شمارش از 1), اگر زیر رشته پیدا شد. +- 0, اگر زیر رشته یافت نشد. + +نوع: `Integer`. + +**مثال** + +پرسوجو: + +``` sql +SELECT positionCaseInsensitive('Hello, world!', 'hello') +``` + +نتیجه: + +``` text +┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ +│ 1 │ +└───────────────────────────────────────────────────┘ +``` + +## موقعیت 8 {#positionutf8} + +بازگرداندن موقعیت (در نقاط یونیکد) از رشته پیدا شده است در رشته, با شروع از 1. + +این نسخهها کار میکند با این فرض که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری شده وزارت مخابرات 8. اگر این فرض ملاقات نمی, تابع یک استثنا پرتاب نمی کند و برخی از نتیجه غیر منتظره را برمی گرداند. اگر شخصیت را می توان با استفاده از دو نقطه یونیکد نشان, این دو و غیره استفاده. + +برای یک جستجو غیر حساس به حالت, استفاده از تابع [در حال بارگذاری](#positioncaseinsensitiveutf8). + +**نحو** + +``` sql +positionUTF8(haystack, needle) +``` + +**پارامترها** + +- `haystack` — string, in which substring will to be searched. [رشته](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [رشته](../syntax.md#syntax-string-literal). + +**مقادیر بازگشتی** + +- موقعیت شروع در یونیکد امتیاز (شمارش از 1), اگر زیر رشته پیدا شد. +- 0, اگر زیر رشته یافت نشد. + +نوع: `Integer`. + +**مثالها** + +عبارت “Hello, world!” در روسیه شامل مجموعه ای از نقاط یونیکد نمایندگی یک متن کد گذاری تک نقطه. تابع بازده برخی از نتیجه انتظار می رود: + +پرسوجو: + +``` sql +SELECT positionUTF8('Привет, мир!', '!') +``` + +نتیجه: + +``` text +┌─positionUTF8('Привет, мир!', '!')─┐ +│ 12 │ +└───────────────────────────────────┘ +``` + +عبارت “Salut, étudiante!”, جایی که شخصیت `é` می توان با استفاده از یک نقطه نشان داد (`U+00E9`) یا دو نقطه (`U+0065U+0301`) تابع را می توان بازگشت برخی از نتیجه غیر منتظره: + +پرسوجو برای نامه `é` که نشان داده شده است یک نقطه یونیکد `U+00E9`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +نتیجه: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 17 │ +└────────────────────────────────────────┘ +``` + +پرسوجو برای نامه `é` که به نمایندگی از دو نقطه یونیکد `U+0065U+0301`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +نتیجه: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 18 │ +└────────────────────────────────────────┘ +``` + +## در حال بارگذاری {#positioncaseinsensitiveutf8} + +همان [موقعیت 8](#positionutf8), اما غیر حساس به حروف است. بازگرداندن موقعیت (در نقاط یونیکد) از رشته پیدا شده است در رشته, با شروع از 1. + +این نسخهها کار میکند با این فرض که رشته شامل مجموعه ای از بایت به نمایندگی از یک متن کد گذاری شده وزارت مخابرات 8. اگر این فرض ملاقات نمی, تابع یک استثنا پرتاب نمی کند و برخی از نتیجه غیر منتظره را برمی گرداند. اگر شخصیت را می توان با استفاده از دو نقطه یونیکد نشان, این دو و غیره استفاده. + +**نحو** + +``` sql +positionCaseInsensitiveUTF8(haystack, needle) +``` + +**پارامترها** + +- `haystack` — string, in which substring will to be searched. [رشته](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [رشته](../syntax.md#syntax-string-literal). + +**مقدار بازگشتی** + +- موقعیت شروع در یونیکد امتیاز (شمارش از 1), اگر زیر رشته پیدا شد. +- 0, اگر زیر رشته یافت نشد. + +نوع: `Integer`. + +**مثال** + +پرسوجو: + +``` sql +SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') +``` + +نتیجه: + +``` text +┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ +│ 9 │ +└────────────────────────────────────────────────────┘ +``` + +## چند ضلعی {#multisearchallpositions} + +همان [موقعیت](string_search_functions.md#position) اما بازگشت `Array` از موقعیت (به بایت) از بسترهای مربوطه موجود در رشته. موقعیت ها با شروع از نمایه 1. + +جستجو در دنباله ای از بایت بدون توجه به رمزگذاری رشته و میترا انجام می شود. + +- برای جستجو اسکی غیر حساس به حالت, استفاده از تابع `multiSearchAllPositionsCaseInsensitive`. +- برای جستجو در یوتف-8, استفاده از تابع [چند ضلعی پایگاه داده های8](#multiSearchAllPositionsUTF8). +- برای غیر حساس به حالت جستجو-8, استفاده از تابع چند تخصیص چندگانه 18. + +**نحو** + +``` sql +multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) +``` + +**پارامترها** + +- `haystack` — string, in which substring will to be searched. [رشته](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [رشته](../syntax.md#syntax-string-literal). + +**مقادیر بازگشتی** + +- مجموعه ای از موقعیت های شروع در بایت (شمارش از 1), اگر زیر رشته مربوطه پیدا شد و 0 اگر یافت نشد. + +**مثال** + +پرسوجو: + +``` sql +SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) +``` + +نتیجه: + +``` text +┌─multiSearchAllPositions('Hello, World!', ['hello', '!', 'world'])─┐ +│ [0,13,0] │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## چند ضلعی پایگاه داده های8 {#multiSearchAllPositionsUTF8} + +ببینید `multiSearchAllPositions`. + +## ترکیب چندجفتاری (هیستک, \[سوزن1 سوزن2, …, needleنه\]) {#multisearchfirstposition} + +همان `position` اما بازده سمت چپ افست از رشته `haystack` که به برخی از سوزن همسان. + +برای یک جستجوی غیر حساس مورد و یا / و در توابع استفاده از فرمت جی تی اف 8 `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. + +## مقالههای جدید مرتبط با تحقیق این نویسنده1 سوزن2, …, needleنه\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} + +بازگرداندن شاخص `i` (شروع از 1) سوزن چپ پیدا شده استمن ... در رشته `haystack` و 0 در غیر این صورت. + +برای یک جستجوی غیر حساس مورد و یا / و در توابع استفاده از فرمت جی تی اف 8 `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. + +## مولسیرچانی (هیستک, \[سوزن1 سوزن2, …, needleنه\]) {#function-multisearchany} + +بازده 1, اگر حداقل یک سوزن رشتهمن ... مسابقات رشته `haystack` و 0 در غیر این صورت. + +برای یک جستجوی غیر حساس مورد و یا / و در توابع استفاده از فرمت جی تی اف 8 `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. + +!!! note "یادداشت" + در همه `multiSearch*` توابع تعداد سوزن ها باید کمتر از 2 باشد8 به دلیل مشخصات پیاده سازی. + +## همخوانی داشتن (کومه علف خشک, الگو) {#matchhaystack-pattern} + +بررسی اینکه رشته با `pattern` عبارت منظم. یک `re2` عبارت منظم. این [نحو](https://github.com/google/re2/wiki/Syntax) از `re2` عبارات منظم محدود تر از نحو عبارات منظم پرل است. + +بازده 0 اگر مطابقت ندارد, یا 1 اگر منطبق. + +توجه داشته باشید که نماد ممیز (`\`) برای فرار در عبارت منظم استفاده می شود. همان نماد برای فرار در لیتر رشته استفاده می شود. بنابراین به منظور فرار از نماد در یک عبارت منظم, شما باید دو بک اسلش ارسال (\\) در یک رشته تحت اللفظی. + +عبارت منظم با رشته کار می کند به عنوان اگر مجموعه ای از بایت است. عبارت منظم می تواند بایت پوچ نیست. +برای الگوهای به جستجو برای بسترهای در یک رشته, بهتر است به استفاده از مانند و یا ‘position’ از اونجایی که خیلی سریعتر کار میکنن + +## ملتمتچانی (کومه علف خشک, \[الگو1 الگو2, …, patternنه\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} + +همان `match`, اما بازده 0 اگر هیچ یک از عبارات منظم همسان و 1 اگر هر یک از الگوهای مسابقات. این استفاده می کند [hyperscan](https://github.com/intel/hyperscan) کتابخونه. برای الگوهای به جستجو بسترهای در یک رشته, بهتر است به استفاده از `multiSearchAny` چون خیلی سریعتر کار میکنه + +!!! note "یادداشت" + طول هر یک از `haystack` رشته باید کمتر از 2 باشد32 بایت در غیر این صورت استثنا پرتاب می شود. این محدودیت صورت می گیرد به دلیل hyperscan API. + +## مقالههای جدید مرتبط با تحقیق این نویسنده1 الگو2, …, patternنه\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} + +همان `multiMatchAny`, اما بازگرداندن هر شاخص که منطبق بر انبار کاه. + +## اطلاعات دقیق1 الگو2, …, patternنه\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} + +همان `multiMatchAny`, اما بازگرداندن مجموعه ای از تمام شاخص که مطابقت انبار کاه در هر سفارش. + +## چندبازیماتچانی (هیستک, فاصله, \[الگو1 الگو2, …, patternنه\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} + +همان `multiMatchAny`, اما بازده 1 اگر هر الگوی منطبق انبار کاه در یک ثابت [ویرایش فاصله](https://en.wikipedia.org/wiki/Edit_distance). این تابع نیز در حالت تجربی است و می تواند بسیار کند باشد. برای اطلاعات بیشتر نگاه کنید به [hyperscan مستندات](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). + +## چند شکلی (هیستاک, فاصله, \[الگو1 الگو2, …, patternنه\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} + +همان `multiFuzzyMatchAny`, اما می گرداند هر شاخص که منطبق بر انبار کاه در فاصله ویرایش ثابت. + +## بازهای چندگانه (انبار کاه, فاصله, \[الگو1 الگو2, …, patternنه\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} + +همان `multiFuzzyMatchAny`, اما بازگرداندن مجموعه ای از تمام شاخص در هر منظور که مطابقت با انبار کاه در فاصله ویرایش ثابت. + +!!! note "یادداشت" + `multiFuzzyMatch*` توابع از عبارات منظم یونایتد-8 پشتیبانی نمی کنند و چنین عبارات به عنوان بایت به دلیل محدودیت هیپراکسان درمان می شوند. + +!!! note "یادداشت" + برای خاموش کردن تمام توابع است که با استفاده از بیش از حد اسکان, استفاده از تنظیمات `SET allow_hyperscan = 0;`. + +## عصاره (انبار کاه, الگو) {#extracthaystack-pattern} + +عصاره یک قطعه از یک رشته با استفاده از یک عبارت منظم. اگر ‘haystack’ با ‘pattern’ عبارت منظم, یک رشته خالی بازگشته است. اگر عبارت منظم حاوی وسترن نیست, طول می کشد قطعه که منطبق بر کل عبارت منظم. در غیر این صورت قطعه ای را می گیرد که با اولین زیر دست ساز مطابقت دارد. + +## خارج تماس بگیرید) {#extractallhaystack-pattern} + +عصاره تمام قطعات از یک رشته با استفاده از یک عبارت منظم. اگر ‘haystack’ با ‘pattern’ عبارت منظم, یک رشته خالی بازگشته است. بازگرداندن مجموعه ای از رشته متشکل از تمام مسابقات به عبارت منظم. به طور کلی, رفتار همان است که ‘extract’ تابع (در آن طول می کشد برای اولین بار subpattern یا کل بیان اگر وجود ندارد subpattern). + +## مانند (کومه علف خشک, الگو), کومه علف خشک مانند اپراتور الگوی {#function-like} + +بررسی اینکه یک رشته منطبق یک عبارت ساده به طور منظم. +عبارت منظم می تواند حاوی متسیمبلس باشد `%` و `_`. + +`%` نشان می دهد هر مقدار از هر بایت (از جمله صفر شخصیت). + +`_` نشان می دهد هر یک بایت. + +از بک اسلش استفاده کنید (`\`) برای فرار از متسیمبلس . توجه داشته باشید در فرار در شرح ‘match’ تابع. + +برای عبارات منظم مانند `%needle%`, کد مطلوب تر است و کار می کند به همان سرعتی که `position` تابع. +برای دیگر عبارات منظم, کد همان است که برای است ‘match’ تابع. + +## notLike(انبار کاه pattern), انبار کاه نیست مانند الگوی اپراتور {#function-notlike} + +همان چیزی که به عنوان ‘like’, اما منفی. + +## نمک زدایی (انبار کاه, سوزن) {#ngramdistancehaystack-needle} + +محاسبه فاصله 4 گرم بین `haystack` و `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` یا `haystack` بیش از 32 کیلوبایت است, می اندازد یک استثنا. اگر برخی از غیر ثابت `haystack` یا `needle` رشته ها بیش از 32 کیلوبایت, فاصله است که همیشه یکی. + +برای جستجوی غیر حساس به حالت یا / و در توابع استفاده از فرمت جی تی اف 8 `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. + +## نگراماسراچ (هیستک سوزن) {#ngramsearchhaystack-needle} + +مثل `ngramDistance` اما محاسبه تفاوت غیر متقارن بین `needle` و `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` مامان بزرگ نزدیک تر به یک, بیشتر احتمال دارد `needle` در `haystack`. می تواند برای جستجو رشته فازی مفید باشد. + +برای جستجوی غیر حساس به حالت یا / و در توابع استفاده از فرمت جی تی اف 8 `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. + +!!! note "یادداشت" + For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/fa/sql_reference/functions/type_conversion_functions.md b/docs/fa/sql_reference/functions/type_conversion_functions.md new file mode 100644 index 00000000000..ed3c83b3031 --- /dev/null +++ b/docs/fa/sql_reference/functions/type_conversion_functions.md @@ -0,0 +1,534 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u062A\u0628\u062F\u06CC\u0644 \u0646\u0648\u0639" +--- + +# توابع تبدیل نوع {#type-conversion-functions} + +## مشکلات متداول تبدیل عددی {#numeric-conversion-issues} + +هنگامی که شما یک مقدار تبدیل از یک به نوع داده های دیگر, شما باید به یاد داشته باشید که در مورد مشترک, این یک عملیات نا امن است که می تواند به از دست دادن داده ها منجر شود. از دست دادن داده ها می تواند رخ دهد اگر شما سعی می کنید به جا ارزش از یک نوع داده بزرگتر به یک نوع داده کوچکتر, و یا اگر شما ارزش بین انواع داده های مختلف تبدیل. + +کلیک کرده است [همان رفتار به عنوان ج++ برنامه](https://en.cppreference.com/w/cpp/language/implicit_conversion). + +## toInt(8/16/32/64) {#toint8163264} + +تبدیل یک مقدار ورودی به [Int](../../sql_reference/data_types/int_uint.md) نوع داده. این خانواده تابع شامل: + +- `toInt8(expr)` — Results in the `Int8` نوع داده. +- `toInt16(expr)` — Results in the `Int16` نوع داده. +- `toInt32(expr)` — Results in the `Int32` نوع داده. +- `toInt64(expr)` — Results in the `Int64` نوع داده. + +**پارامترها** + +- `expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد یا یک رشته با نمایش اعشاری یک عدد. دودویی, مبنای هشت, و بازنمایی هگزادسیمال از اعداد پشتیبانی نمی شوند. صفر منجر محروم هستند. + +**مقدار بازگشتی** + +مقدار صحیح در `Int8`, `Int16`, `Int32` یا `Int64` نوع داده. + +توابع استفاده [گرد کردن به سمت صفر](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) یعنی عدد کسری از اعداد را کوتاه می کنند. + +رفتار توابع برای [هشدار داده می شود](../../sql_reference/data_types/float.md#data_type-float-nan-inf) استدلال تعریف نشده است. به یاد داشته باشید در مورد [مسایل همگرایی عددی](#numeric-conversion-issues), هنگام استفاده از توابع. + +**مثال** + +``` sql +SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8) +``` + +``` text +┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ +│ -9223372036854775808 │ 32 │ 16 │ 8 │ +└──────────────────────┴─────────────┴───────────────┴─────────────┘ +``` + +## toInt(8/16/32/64)OrZero {#toint8163264orzero} + +این استدلال از نوع رشته طول می کشد و تلاش می کند تا به اعضای هیات تجزیه (8 \| 16 \| 32 \| 64). اگر شکست خورده, بازده 0. + +**مثال** + +``` sql +select toInt64OrZero('123123'), toInt8OrZero('123qwe123') +``` + +``` text +┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ +│ 123123 │ 0 │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toInt(8/16/32/64)OrNull {#toint8163264ornull} + +این استدلال از نوع رشته طول می کشد و تلاش می کند تا به اعضای هیات تجزیه (8 \| 16 \| 32 \| 64). اگر شکست خورده, تهی گرداند. + +**مثال** + +``` sql +select toInt64OrNull('123123'), toInt8OrNull('123qwe123') +``` + +``` text +┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ +│ 123123 │ ᴺᵁᴸᴸ │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toUInt(8/16/32/64) {#touint8163264} + +تبدیل یک مقدار ورودی به [اینترنت](../../sql_reference/data_types/int_uint.md) نوع داده. این خانواده تابع شامل: + +- `toUInt8(expr)` — Results in the `UInt8` نوع داده. +- `toUInt16(expr)` — Results in the `UInt16` نوع داده. +- `toUInt32(expr)` — Results in the `UInt32` نوع داده. +- `toUInt64(expr)` — Results in the `UInt64` نوع داده. + +**پارامترها** + +- `expr` — [عبارت](../syntax.md#syntax-expressions) بازگشت یک عدد یا یک رشته با نمایش اعشاری یک عدد. دودویی, مبنای هشت, و بازنمایی هگزادسیمال از اعداد پشتیبانی نمی شوند. صفر منجر محروم هستند. + +**مقدار بازگشتی** + +مقدار صحیح در `UInt8`, `UInt16`, `UInt32` یا `UInt64` نوع داده. + +توابع استفاده [گرد کردن به سمت صفر](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) یعنی عدد کسری از اعداد را کوتاه می کنند. + +رفتار توابع برای کشاورزی منفی و برای [هشدار داده می شود](../../sql_reference/data_types/float.md#data_type-float-nan-inf) استدلال تعریف نشده است. اگر شما یک رشته عبور با تعداد منفی, مثلا `'-32'`, خانه را افزایش می دهد یک استثنا. به یاد داشته باشید در مورد [مسایل همگرایی عددی](#numeric-conversion-issues), هنگام استفاده از توابع. + +**مثال** + +``` sql +SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) +``` + +``` text +┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ +│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ +└─────────────────────┴───────────────┴────────────────┴──────────────┘ +``` + +## toUInt(8/16/32/64)OrZero {#touint8163264orzero} + +## toUInt(8/16/32/64)OrNull {#touint8163264ornull} + +## توفلوات (32/64) {#tofloat3264} + +## toFloat(32/64)OrZero {#tofloat3264orzero} + +## toFloat(32/64)OrNull {#tofloat3264ornull} + +## toDate {#todate} + +## تودارزرو {#todateorzero} + +## طول عمر {#todateornull} + +## toDateTime {#todatetime} + +## به اندازه تو {#todatetimeorzero} + +## طول عمر ترنول {#todatetimeornull} + +## toDecimal(32/64/128) {#todecimal3264128} + +تبدیل `value` به [دهدهی](../../sql_reference/data_types/decimal.md) نوع داده با دقت `S`. این `value` می تواند یک عدد یا یک رشته. این `S` (مقیاس) پارامتر تعداد رقم اعشار را مشخص می کند. + +- `toDecimal32(value, S)` +- `toDecimal64(value, S)` +- `toDecimal128(value, S)` + +## toDecimal(32/64/128)OrNull {#todecimal3264128ornull} + +تبدیل یک رشته ورودی به یک [Nullable(اعشاری(P,S))](../../sql_reference/data_types/decimal.md) مقدار نوع داده. این خانواده از توابع عبارتند از: + +- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` نوع داده. +- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` نوع داده. +- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` نوع داده. + +این توابع باید به جای استفاده `toDecimal*()` توابع, اگر شما ترجیح می دهید برای دریافت یک `NULL` ارزش به جای یک استثنا در صورت خطا تجزیه ارزش ورودی. + +**پارامترها** + +- `expr` — [عبارت](../syntax.md#syntax-expressions), بازگرداندن یک مقدار در [رشته](../../sql_reference/data_types/string.md) نوع داده. تاتر انتظار نمایندگی متنی از عدد اعشاری. به عنوان مثال, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**مقدار بازگشتی** + +یک مقدار در `Nullable(Decimal(P,S))` نوع داده. مقدار شامل: + +- شماره با `S` اعشار, اگر تاتر تفسیر رشته ورودی به عنوان یک عدد. +- `NULL`, اگر تاتر می توانید رشته ورودی به عنوان یک عدد تفسیر نمی کند و یا اگر تعداد ورودی شامل بیش از `S` رقم اعشار. + +**مثالها** + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ +│ -1.11100 │ Nullable(Decimal(9, 5)) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ +│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toDecimal(32/64/128)OrZero {#todecimal3264128orzero} + +تبدیل یک مقدار ورودی به [دهدهی)](../../sql_reference/data_types/decimal.md) نوع داده. این خانواده از توابع عبارتند از: + +- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` نوع داده. +- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` نوع داده. +- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` نوع داده. + +این توابع باید به جای استفاده `toDecimal*()` توابع, اگر شما ترجیح می دهید برای دریافت یک `0` ارزش به جای یک استثنا در صورت خطا تجزیه ارزش ورودی. + +**پارامترها** + +- `expr` — [عبارت](../syntax.md#syntax-expressions), بازگرداندن یک مقدار در [رشته](../../sql_reference/data_types/string.md) نوع داده. تاتر انتظار نمایندگی متنی از عدد اعشاری. به عنوان مثال, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**مقدار بازگشتی** + +یک مقدار در `Nullable(Decimal(P,S))` نوع داده. مقدار شامل: + +- شماره با `S` اعشار, اگر تاتر تفسیر رشته ورودی به عنوان یک عدد. +- 0 با `S` رقم اعشار, اگر تاتر می توانید رشته ورودی به عنوان یک عدد تفسیر نمی کند و یا اگر تعداد ورودی شامل بیش از `S` رقم اعشار. + +**مثال** + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ +│ -1.11100 │ Decimal(9, 5) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ +│ 0.00 │ Decimal(9, 2) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toString {#tostring} + +توابع برای تبدیل بین اعداد, رشته ها (اما رشته ثابت نیست), تاریخ, و تاریخ با زمان. +همه این توابع قبول یک استدلال. + +در هنگام تبدیل به و یا از یک رشته, ارزش فرمت شده و یا تجزیه با استفاده از قوانین مشابه برای فرمت جدولبندیشده (و تقریبا تمام فرمت های متنی دیگر). اگر رشته را نمی توان تجزیه, یک استثنا پرتاب می شود و درخواست لغو شده است. + +هنگام تبدیل تاریخ به اعداد و یا بالعکس, تاریخ مربوط به تعداد روز از ابتدای عصر یونیکس. +هنگام تبدیل تاریخ با بار به اعداد و یا بالعکس, تاریخ با زمان مربوط به تعداد ثانیه از ابتدای عصر یونیکس. + +تاریخ و تاریخ-با-فرمت زمان برای todate/todatetime توابع تعریف شده به شرح زیر است: + +``` text +YYYY-MM-DD +YYYY-MM-DD hh:mm:ss +``` + +به عنوان یک استثنا اگر تبدیل از uint32, int32, uint64 یا int64 عددی انواع, به, تاریخ, و اگر عدد بزرگتر یا مساوی به 65536 تعدادی را به عنوان تفسیر یک زمان یونیکس (و نه به عنوان تعداد روز) و گرد است به تاریخ. این اجازه می دهد تا پشتیبانی از وقوع مشترک نوشتن ‘toDate(unix\_timestamp)’ که در غیر این صورت یک خطا خواهد بود و نیاز به نوشتن بیشتر دست و پا گیر ‘toDate(toDateTime(unix\_timestamp))’. + +تبدیل بین تاریخ و تاریخ با زمان انجام شده است راه طبیعی: با اضافه کردن یک زمان خالی و یا حذف زمان. + +تبدیل بین انواع عددی با استفاده از قوانین مشابه به عنوان تکالیف بین انواع مختلف عددی در ج++. + +علاوه بر این, تابع حول از استدلال حسگر ناحیه رنگی می توانید یک استدلال رشته دوم حاوی نام منطقه زمانی را. مثال: `Asia/Yekaterinburg` در این مورد, زمان با توجه به منطقه زمانی مشخص فرمت. + +``` sql +SELECT + now() AS now_local, + toString(now(), 'Asia/Yekaterinburg') AS now_yekat +``` + +``` text +┌───────────now_local─┬─now_yekat───────────┐ +│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ +└─────────────────────┴─────────────────────┘ +``` + +همچنین `toUnixTimestamp` تابع. + +## وضعیت زیستشناختی رکورد) {#tofixedstrings-n} + +تبدیل یک استدلال نوع رشته به یک رشته (نفر) نوع (یک رشته با طول ثابت نفر). نفر باید ثابت باشد. +اگر رشته دارای بایت کمتر از نفر, این است که با بایت پوچ به سمت راست منتقل. اگر رشته دارای بایت بیش از نفر, یک استثنا پرتاب می شود. + +## در حال بارگذاری) {#tostringcuttozeros} + +می پذیرد یک رشته یا رشته ثابت استدلال. بازگرداندن رشته با محتوای کوتاه در اولین صفر بایت پیدا شده است. + +مثال: + +``` sql +SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s─────────────┬─s_cut─┐ +│ foo\0\0\0\0\0 │ foo │ +└───────────────┴───────┘ +``` + +``` sql +SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s──────────┬─s_cut─┐ +│ foo\0bar\0 │ foo │ +└────────────┴───────┘ +``` + +## reinterpretAsUInt(8/16/32/64) {#reinterpretasuint8163264} + +## reinterpretAsInt(8/16/32/64) {#reinterpretasint8163264} + +## تفسیر مجدد (32/64) {#reinterpretasfloat3264} + +## بازخوانی مجدد {#reinterpretasdate} + +## حسگر ناحیه رنگی {#reinterpretasdatetime} + +این توابع یک رشته را قبول می کنند و بایت هایی را که در ابتدای رشته قرار می گیرند به عنوان یک عدد در دستور میزبان (اندی کوچک) تفسیر می کنند. اگر رشته به اندازه کافی بلند نیست, توابع کار به عنوان اگر رشته با تعداد لازم از بایت پوچ خالی. اگر رشته طولانی تر از مورد نیاز است, بایت اضافی نادیده گرفته می شوند. تاریخ به عنوان تعداد روز از ابتدای عصر یونیکس تفسیر و تاریخ با زمان به عنوان تعداد ثانیه از ابتدای عصر یونیکس تفسیر شده است. + +## رشته مجدد {#type_conversion_functions-reinterpretAsString} + +این تابع یک شماره یا تاریخ و یا تاریخ با زمان می پذیرد, و یک رشته حاوی بایت به نمایندگی از ارزش مربوطه را در سفارش میزبان را برمی گرداند (اندی کمی). بایت پوچ از پایان کاهش یافته است. مثلا, ارزش نوع اوینت32 255 یک رشته است که یک بایت طولانی است. + +## رشته مجدد {#reinterpretasfixedstring} + +این تابع یک شماره یا تاریخ و یا تاریخ با زمان می پذیرد, و یک رشته ثابت حاوی بایت به نمایندگی از ارزش مربوطه را در سفارش میزبان را برمی گرداند (اندی کمی). بایت پوچ از پایان کاهش یافته است. مثلا, ارزش نوع اوینت32 255 رشته ثابت است که یک بایت طولانی است. + +## قالب (ایکس تی) {#type_conversion_function-cast} + +تبدیل ‘x’ به ‘t’ نوع داده. بازیگران نحو (ایکس به عنوان تی) نیز پشتیبانی می کند. + +مثال: + +``` sql +SELECT + '2016-06-15 23:00:00' AS timestamp, + CAST(timestamp AS DateTime) AS datetime, + CAST(timestamp AS Date) AS date, + CAST(timestamp, 'String') AS string, + CAST(timestamp, 'FixedString(22)') AS fixed_string +``` + +``` text +┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ +└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ +``` + +تبدیل به fixedstring(n) تنها با این نسخهها کار برای استدلال از نوع string یا fixedstring(n). + +تبدیل نوع به [Nullable](../../sql_reference/data_types/nullable.md) و پشت پشتیبانی می شود. مثال: + +``` sql +SELECT toTypeName(x) FROM t_null +``` + +``` text +┌─toTypeName(x)─┐ +│ Int8 │ +│ Int8 │ +└───────────────┘ +``` + +``` sql +SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null +``` + +``` text +┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ +│ Nullable(UInt16) │ +│ Nullable(UInt16) │ +└─────────────────────────────────────────┘ +``` + +## توینتال (سال / سه ماهه / ماه / هفته / روز / ساعت / دقیقه / ثانیه) {#function-tointerval} + +تبدیل یک استدلال نوع شماره به یک [فاصله](../../sql_reference/data_types/special_data_types/interval.md) نوع داده. + +**نحو** + +``` sql +toIntervalSecond(number) +toIntervalMinute(number) +toIntervalHour(number) +toIntervalDay(number) +toIntervalWeek(number) +toIntervalMonth(number) +toIntervalQuarter(number) +toIntervalYear(number) +``` + +**پارامترها** + +- `number` — Duration of interval. Positive integer number. + +**مقادیر بازگشتی** + +- مقدار در `Interval` نوع داده. + +**مثال** + +``` sql +WITH + toDate('2019-01-01') AS date, + INTERVAL 1 WEEK AS interval_week, + toIntervalWeek(1) AS interval_to_week +SELECT + date + interval_week, + date + interval_to_week +``` + +``` text +┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ +│ 2019-01-08 │ 2019-01-08 │ +└───────────────────────────┴──────────────────────────────┘ +``` + +## پردازش زمان {#parsedatetimebesteffort} + +تبدیل یک تاریخ و زمان در [رشته](../../sql_reference/data_types/string.md) نمایندگی به [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) نوع داده. + +تابع تجزیه می کند [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [1123-5.2.14 تومان-822 تاریخ و زمان مشخصات](https://tools.ietf.org/html/rfc1123#page-55), را کلیک کنید و برخی از فرمت های تاریخ و زمان دیگر. + +**نحو** + +``` sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**پارامترها** + +- `time_string` — String containing a date and time to convert. [رشته](../../sql_reference/data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` با توجه به منطقه زمانی. [رشته](../../sql_reference/data_types/string.md). + +**فرمت های غیر استاندارد پشتیبانی شده** + +- یک رشته حاوی 9..10 رقمی [برچسب زمان یونیکس](https://en.wikipedia.org/wiki/Unix_time). +- یک رشته با یک تاریخ و یک مولفه زمان: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, و غیره. +- یک رشته با یک تاریخ, اما هیچ مولفه زمان: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` و غیره +- یک رشته با یک روز و زمان: `DD`, `DD hh`, `DD hh:mm`. در این مورد `YYYY-MM` به عنوان جایگزین `2000-01`. +- یک رشته است که شامل تاریخ و زمان همراه با منطقه زمانی اطلاعات افست: `YYYY-MM-DD hh:mm:ss ±h:mm`, و غیره. به عنوان مثال, `2020-12-12 17:36:00 -5:00`. + +برای همه فرمت های با جدا تابع تجزیه نام ماه بیان شده توسط نام کامل خود و یا با سه حرف اول یک نام ماه. مثالها: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**مقدار بازگشتی** + +- `time_string` تبدیل به `DateTime` نوع داده. + +**مثالها** + +پرسوجو: + +``` sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +نتیجه: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +نتیجه: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +نتیجه: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +نتیجه: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +پرسوجو: + +``` sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +نتیجه: + +``` text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**همچنین نگاه کنید** + +- \[ISO 8601 announcement by @xkcd\])https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [toDate](#todate) +- [toDateTime](#todatetime) + +## - ترجمه نشده - {#parsedatetimebesteffortornull} + +همان است که برای [پردازش زمان](#parsedatetimebesteffort) با این تفاوت که وقتی با فرمت تاریخ مواجه می شود که نمی تواند پردازش شود تهی می شود. + +## - ترجمه نشده - {#parsedatetimebesteffortorzero} + +همان است که برای [پردازش زمان](#parsedatetimebesteffort) با این تفاوت که تاریخ صفر یا زمان صفر را باز می گرداند زمانی که یک فرمت تاریخ مواجه است که نمی تواند پردازش شود. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/fa/sql_reference/functions/url_functions.md b/docs/fa/sql_reference/functions/url_functions.md new file mode 100644 index 00000000000..61e3b5dddf3 --- /dev/null +++ b/docs/fa/sql_reference/functions/url_functions.md @@ -0,0 +1,210 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0646\u0634\u0627\u0646\u06CC\u0647\u0627\ + \u06CC \u0627\u06CC\u0646\u062A\u0631\u0646\u062A\u06CC" +--- + +# توابع برای کار با نشانیهای اینترنتی {#functions-for-working-with-urls} + +همه این توابع را دنبال کنید. حداکثر برای بهبود عملکرد ساده شده اند. + +## توابع استخراج بخشهایی از نشانی وب {#functions-that-extract-parts-of-a-url} + +اگر قسمت مربوطه در نشانی وب وجود نداشته باشد یک رشته خالی برگردانده میشود. + +### قرارداد {#protocol} + +پروتکل را از نشانی اینترنتی خارج میکند. + +Examples of typical returned values: http, https, ftp, mailto, tel, magnet… + +### دامنه {#domain} + +نام میزبان را از یک نشانی اینترنتی استخراج می کند. + +``` sql +domain(url) +``` + +**پارامترها** + +- `url` — URL. Type: [رشته](../../sql_reference/data_types/string.md). + +نشانی وب را می توان با یا بدون یک طرح مشخص شده است. مثالها: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +برای این نمونه ها `domain` تابع نتایج زیر را برمی گرداند: + +``` text +some.svn-hosting.com +some.svn-hosting.com +yandex.com +``` + +**مقادیر بازگشتی** + +- نام میزبان. اگر کلیک هاوس می تواند رشته ورودی را به عنوان نشانی وب تجزیه کند. +- رشته خالی. اگر کلیکهاوس نمیتواند رشته ورودی را به عنوان نشانی وب تجزیه کند. + +نوع: `String`. + +**مثال** + +``` sql +SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')─┐ +│ some.svn-hosting.com │ +└────────────────────────────────────────────────────────┘ +``` + +### دامینویتهویتوو {#domainwithoutwww} + +بازگرداندن دامنه و حذف بیش از یک ‘www.’ اگه از اول شروع بشه + +### توپولدومین {#topleveldomain} + +عصاره این دامنه سطح بالا از یک url. + +``` sql +topLevelDomain(url) +``` + +**پارامترها** + +- `url` — URL. Type: [رشته](../../sql_reference/data_types/string.md). + +نشانی وب را می توان با یا بدون یک طرح مشخص شده است. مثالها: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +**مقادیر بازگشتی** + +- نام دامنه. اگر کلیک هاوس می تواند رشته ورودی را به عنوان نشانی وب تجزیه کند. +- رشته خالی. اگر کلیکهاوس نمیتواند رشته ورودی را به عنوان نشانی وب تجزیه کند. + +نوع: `String`. + +**مثال** + +``` sql +SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')─┐ +│ com │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### در حال بارگذاری {#firstsignificantsubdomain} + +بازگرداندن “first significant subdomain”. این یک مفهوم غیر استاندارد خاص به یاندکس است.متریکا اولین زیر دامنه قابل توجهی یک دامنه سطح دوم است ‘com’, ‘net’, ‘org’ یا ‘co’. در غیر این صورت, این یک دامنه سطح سوم است. به عنوان مثال, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. فهرست “insignificant” دامنه های سطح دوم و سایر اطلاعات پیاده سازی ممکن است در اینده تغییر کند. + +### در حال بارگذاری {#cuttofirstsignificantsubdomain} + +بازگرداندن بخشی از دامنه است که شامل زیر دامنه سطح بالا تا “first significant subdomain” (توضیح بالا را ببینید). + +به عنوان مثال, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. + +### مسیر {#path} + +مسیر را برمی گرداند. مثال: `/top/news.html` مسیر رشته پرس و جو را شامل نمی شود. + +### مسیر {#pathfull} + +همان بالا, اما از جمله رشته پرس و جو و قطعه. مثال:/بالا / اخبار.زنگامصفحه = 2 \# نظرات + +### رشته {#querystring} + +بازگرداندن رشته پرس و جو. مثال: صفحه=1&چاپی=213. پرس و جو رشته علامت سوال اولیه را شامل نمی شود, و همچنین \# و همه چیز بعد از \#. + +### قطعه {#fragment} + +بازگرداندن شناسه قطعه. قطعه می کند نماد هش اولیه را شامل نمی شود. + +### وضعیت زیستشناختی رکورد {#querystringandfragment} + +بازگرداندن رشته پرس و جو و شناسه قطعه. مثال: صفحه=1\#29390. + +### نام) {#extracturlparameterurl-name} + +بازگرداندن ارزش ‘name’ پارامتر در نشانی وب, در صورت وجود. در غیر این صورت, یک رشته خالی. اگر پارامترهای بسیاری با این نام وجود دارد, این اولین رخداد را برمی گرداند. این تابع با این فرض کار می کند که نام پارامتر در نشانی اینترنتی دقیقا به همان شیوه در استدلال گذشت کد گذاری شده است. + +### شمارش معکوس) {#extracturlparametersurl} + +مجموعه ای از نام=رشته ارزش مربوط به پارامترهای نشانی وب را برمی گرداند. ارزش ها به هیچ وجه رمزگشایی نمی. + +### extractURLParameterNames(URL) {#extracturlparameternamesurl} + +مجموعه ای از نام رشته های مربوط به نام پارامترهای نشانی وب را باز می گرداند. ارزش ها به هیچ وجه رمزگشایی نمی. + +### URLHierarchy(URL) {#urlhierarchyurl} + +را برمی گرداند مجموعه ای حاوی نشانی اینترنتی, کوتاه در پایان توسط علامت /,? در مسیر و پرس و جو-رشته. کاراکتر جداکننده متوالی به عنوان یکی شمارش می شود. برش در موقعیت بعد از تمام کاراکتر جدا متوالی ساخته شده است. + +### URLPathHierarchy(URL) {#urlpathhierarchyurl} + +همانطور که در بالا, اما بدون پروتکل و میزبان در نتیجه. / عنصر (ریشه) گنجانده نشده است. به عنوان مثال: تابع استفاده می شود برای پیاده سازی درخت گزارش نشانی اینترنتی در یاندکس. متریک. + +``` text +URLPathHierarchy('https://example.com/browse/CONV-6788') = +[ + '/browse/', + '/browse/CONV-6788' +] +``` + +### نما & نشانی وب) {#decodeurlcomponenturl} + +را برمی گرداند نشانی اینترنتی رمزگشایی. +مثال: + +``` sql +SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; +``` + +``` text +┌─DecodedURL─────────────────────────────┐ +│ http://127.0.0.1:8123/?query=SELECT 1; │ +└────────────────────────────────────────┘ +``` + +## توابع که حذف بخشی از یک نشانی وب. {#functions-that-remove-part-of-a-url} + +اگر نشانی وب هیچ چیز مشابه ندارد, نشانی اینترنتی بدون تغییر باقی می ماند. + +### بریدن {#cutwww} + +حذف بیش از یک ‘www.’ از ابتدای دامنه نشانی اینترنتی در صورت وجود. + +### & رشته {#cutquerystring} + +حذف رشته پرس و جو. علامت سوال نیز حذف شده است. + +### تقسیم {#cutfragment} + +حذف شناسه قطعه. علامت شماره نیز حذف شده است. + +### هشدار داده می شود {#cutquerystringandfragment} + +حذف رشته پرس و جو و شناسه قطعه. علامت سوال و علامت شماره نیز حذف شده است. + +### cutURLParameter(URL, نام) {#cuturlparameterurl-name} + +حذف ‘name’ پارامتر نشانی وب, در صورت وجود. این تابع با این فرض کار می کند که نام پارامتر در نشانی اینترنتی دقیقا به همان شیوه در استدلال گذشت کد گذاری شده است. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/fa/query_language/functions/uuid_functions.md b/docs/fa/sql_reference/functions/uuid_functions.md similarity index 51% rename from docs/fa/query_language/functions/uuid_functions.md rename to docs/fa/sql_reference/functions/uuid_functions.md index e865f92f639..d4c6f325b06 100644 --- a/docs/fa/query_language/functions/uuid_functions.md +++ b/docs/fa/sql_reference/functions/uuid_functions.md @@ -1,26 +1,29 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 53 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u06CC\u0648\u06CC\u062F" --- -# Functions for working with UUID {#functions-for-working-with-uuid} +# توابع برای کار با یوید {#functions-for-working-with-uuid} -The functions for working with UUID are listed below. +توابع برای کار با شناسه به شرح زیر است. -## generateUUIDv4 {#uuid-function-generate} +## جنراتیدو4 {#uuid-function-generate} -Generates the [UUID](../../data_types/uuid.md) of [version 4](https://tools.ietf.org/html/rfc4122#section-4.4). +تولید [UUID](../../sql_reference/data_types/uuid.md) از [نسخه 4](https://tools.ietf.org/html/rfc4122#section-4.4). ``` sql generateUUIDv4() ``` -**Returned value** +**مقدار بازگشتی** -The UUID type value. +مقدار نوع شناسه. -**Usage example** +**مثال طریقه استفاده** -This example demonstrates creating a table with the UUID type column and inserting a value into the table. +این مثال نشان می دهد ایجاد یک جدول با ستون نوع شناسه و قرار دادن یک مقدار به جدول. ``` sql CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog @@ -36,19 +39,19 @@ SELECT * FROM t_uuid └──────────────────────────────────────┘ ``` -## toUUID (x) {#touuid-x} +## شناسه بسته:) {#touuid-x} -Converts String type value to UUID type. +تبدیل مقدار نوع رشته به نوع شناسه. ``` sql toUUID(String) ``` -**Returned value** +**مقدار بازگشتی** -The UUID type value. +این uuid نوع ارزش است. -**Usage example** +**مثال طریقه استفاده** ``` sql SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid @@ -60,19 +63,19 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid └──────────────────────────────────────┘ ``` -## UUIDStringToNum {#uuidstringtonum} +## وضعیت زیستشناختی رکورد {#uuidstringtonum} -Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../data_types/fixedstring.md). +می پذیرد یک رشته حاوی 36 کاراکتر در قالب `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` و به عنوان مجموعه ای از بایت ها در یک [رشته ثابت (16)](../../sql_reference/data_types/fixedstring.md). ``` sql UUIDStringToNum(String) ``` -**Returned value** +**مقدار بازگشتی** -FixedString(16) +رشته ثابت (16) -**Usage examples** +**نمونه های استفاده** ``` sql SELECT @@ -81,25 +84,24 @@ SELECT ``` ``` text - ┌─uuid─────────────────────────────────┬─bytes────────────┐ │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ └──────────────────────────────────────┴──────────────────┘ ``` -## UUIDNumToString {#uuidnumtostring} +## هشدار داده می شود {#uuidnumtostring} -Accepts a [FixedString(16)](../../data_types/fixedstring.md) value, and returns a string containing 36 characters in text format. +می پذیرد [رشته ثابت (16)](../../sql_reference/data_types/fixedstring.md) ارزش, و یک رشته حاوی گرداند 36 شخصیت در قالب متن. ``` sql UUIDNumToString(FixedString(16)) ``` -**Returned value** +**مقدار بازگشتی** -String. +رشته. -**Usage example** +**مثال طریقه استفاده** ``` sql SELECT @@ -113,8 +115,8 @@ SELECT └──────────────────┴──────────────────────────────────────┘ ``` -## See also {#see-also} +## همچنین نگاه کنید به {#see-also} -- [dictGetUUID](ext_dict_functions.md#ext_dict_functions-other) +- [دیکتاتوری](ext_dict_functions.md#ext_dict_functions-other) -[Original article](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/fa/sql_reference/functions/ym_dict_functions.md b/docs/fa/sql_reference/functions/ym_dict_functions.md new file mode 100644 index 00000000000..25a31cd1020 --- /dev/null +++ b/docs/fa/sql_reference/functions/ym_dict_functions.md @@ -0,0 +1,157 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u06CC\u0627\u0646\u062F\u06A9\u0633.\u0648\ + \u0627\u0698\u0647\u0646\u0627\u0645\u0647\u0647\u0627 \u0645\u062A\u0631\u06CC\u06A9\ + \u0627" +--- + +# توابع برای کار با یاندکس.واژهنامهها متریکا {#functions-for-working-with-yandex-metrica-dictionaries} + +به منظور توابع زیر به کار پیکربندی سرور باید مشخص مسیر و نشانی برای گرفتن تمام یاندکس.واژهنامهها متریکا. لغت نامه ها در اولین تماس از هر یک از این توابع لود می شود. اگر لیست مرجع نمی تواند لود شود, یک استثنا پرتاب می شود. + +برای اطلاعات در مورد ایجاد لیست مرجع, بخش را ببینید “Dictionaries”. + +## موقعیت جغرافیایی چندگانه {#multiple-geobases} + +ClickHouse پشتیبانی از کار با چند جایگزین geobases (منطقه ای سلسله مراتب) به طور همزمان به منظور حمایت از دیدگاه های مختلف که در آن کشورهای خاص مناطق متعلق به. + +این ‘clickhouse-server’ پیکربندی فایل را با سلسله مراتب منطقه ای مشخص می کند::`/opt/geo/regions_hierarchy.txt` + +علاوه بر این فایل, همچنین برای فایل های جستجو در این نزدیکی هست که نماد \_ و هر پسوند اضافه به نام (قبل از پسوند فایل). +مثلا, همچنین فایل را پیدا خواهد کرد `/opt/geo/regions_hierarchy_ua.txt`, اگر در حال حاضر. + +`ua` کلید فرهنگ لغت نامیده می شود. برای یک فرهنگ لغت بدون پسوند, کلید یک رشته خالی است. + +تمام واژهنامهها دوباره لود شده در زمان اجرا (یک بار در هر تعداد معینی از ثانیه, همانطور که در دستور داخلی تعریف شده \_فرهنگ\_ پارامتر پیکربندی, و یا یک بار در ساعت به طور پیش فرض). با این حال, لیست لغت نامه های موجود تعریف شده است یک بار, زمانی که سرور شروع می شود. + +All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. +مثال: + +``` sql +regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt +``` + +### regionToCity(id\[, geobase\]) {#regiontocityid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. + +### regionToArea(id\[, geobase\]) {#regiontoareaid-geobase} + +تبدیل یک منطقه به یک منطقه (نوع 5 در پایگاه داده). در هر راه دیگر, این تابع همان است که ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ +│ │ +│ Moscow and Moscow region │ +│ St. Petersburg and Leningrad region │ +│ Belgorod region │ +│ Ivanovsk region │ +│ Kaluga region │ +│ Kostroma region │ +│ Kursk region │ +│ Lipetsk region │ +│ Orlov region │ +│ Ryazan region │ +│ Smolensk region │ +│ Tambov region │ +│ Tver region │ +│ Tula region │ +└──────────────────────────────────────────────────────┘ +``` + +### regionToDistrict(id\[, geobase\]) {#regiontodistrictid-geobase} + +تبدیل یک منطقه به یک منطقه فدرال (نوع 4 در پایگاه داده). در هر راه دیگر, این تابع همان است که ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ +│ │ +│ Central federal district │ +│ Northwest federal district │ +│ South federal district │ +│ North Caucases federal district │ +│ Privolga federal district │ +│ Ural federal district │ +│ Siberian federal district │ +│ Far East federal district │ +│ Scotland │ +│ Faroe Islands │ +│ Flemish region │ +│ Brussels capital region │ +│ Wallonia │ +│ Federation of Bosnia and Herzegovina │ +└──────────────────────────────────────────────────────────┘ +``` + +### regionToCountry(id\[, geobase\]) {#regiontocountryid-geobase} + +تبدیل یک منطقه به یک کشور. در هر راه دیگر, این تابع همان است که ‘regionToCity’. +مثال: `regionToCountry(toUInt32(213)) = 225` تبدیل مسکو (213) به روسیه (225). + +### regionToContinent(id\[, geobase\]) {#regiontocontinentid-geobase} + +تبدیل یک منطقه به یک قاره. در هر راه دیگر, این تابع همان است که ‘regionToCity’. +مثال: `regionToContinent(toUInt32(213)) = 10001` تبدیل مسکو (213) به اوراسیا (10001). + +### نقلقولهای جدید از این نویسنده) {#regiontotopcontinent-regiontotopcontinent} + +بالاترین قاره را در سلسله مراتب منطقه پیدا می کند. + +**نحو** + +``` sql +regionToTopContinent(id[, geobase]); +``` + +**پارامترها** + +- `id` — Region ID from the Yandex geobase. [UInt32](../../sql_reference/data_types/int_uint.md). +- `geobase` — Dictionary key. See [موقعیت جغرافیایی چندگانه](#multiple-geobases). [رشته](../../sql_reference/data_types/string.md). اختیاری. + +**مقدار بازگشتی** + +- شناسه قاره سطح بالا (دومی زمانی که شما صعود سلسله مراتب مناطق). +- 0, اگر هیچ کدام وجود دارد. + +نوع: `UInt32`. + +### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase} + +می شود جمعیت برای یک منطقه. +جمعیت را می توان در فایل های با پایگاه داده ثبت شده است. بخش را ببینید “External dictionaries”. +اگر جمعیت برای منطقه ثبت نشده, باز می گردد 0. +در یاندکس پایگاه جغرافیایی, جمعیت ممکن است برای مناطق کودک ثبت, اما نه برای مناطق پدر و مادر. + +### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} + +بررسی اینکه یک ‘lhs’ منطقه متعلق به ‘rhs’ منطقه. بازگرداندن یک عدد 18 برابر 1 اگر متعلق, یا 0 اگر تعلق ندارد. +The relationship is reflexive – any region also belongs to itself. + +### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. +مثال: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. + +### شناسه بسته:\]) {#regiontonameid-lang} + +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. + +`ua` و `uk` هر دو به معنای اوکراین. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/fa/sql_reference/index.md b/docs/fa/sql_reference/index.md new file mode 100644 index 00000000000..bfee52fe239 --- /dev/null +++ b/docs/fa/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: SQL Reference +toc_hidden: true +toc_priority: 28 +toc_title: "\u0645\u062E\u0641\u06CC" +--- + +# مرجع مربع {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [انواع دیگر نمایش داده شد](statements/misc.md) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/fa/sql_reference/operators.md b/docs/fa/sql_reference/operators.md new file mode 100644 index 00000000000..577b9079998 --- /dev/null +++ b/docs/fa/sql_reference/operators.md @@ -0,0 +1,278 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u0627\u067E\u0631\u0627\u062A\u0648\u0631\u0647\u0627" +--- + +# اپراتورها {#operators} + +همه اپراتورها در حال تبدیل به توابع مربوط به خود را در پرس و جو و تجزیه مرحله مطابق با اولویت و associativity. +گروه اپراتورهای به ترتیب اولویت ذکر شده (بالاتر در لیست است, زودتر اپراتور به استدلال خود متصل). + +## اپراتورهای دسترسی {#access-operators} + +`a[N]` – Access to an element of an array. The `arrayElement(a, N)` تابع. + +`a.N` – Access to a tuple element. The `tupleElement(a, N)` تابع. + +## اپراتور نفی عددی {#numeric-negation-operator} + +`-a` – The `negate (a)` تابع. + +## اپراتورهای ضرب و تقسیم {#multiplication-and-division-operators} + +`a * b` – The `multiply (a, b)` تابع. + +`a / b` – The `divide(a, b)` تابع. + +`a % b` – The `modulo(a, b)` تابع. + +## اپراتورهای جمع و تفریق {#addition-and-subtraction-operators} + +`a + b` – The `plus(a, b)` تابع. + +`a - b` – The `minus(a, b)` تابع. + +## مقایسه اپراتورها {#comparison-operators} + +`a = b` – The `equals(a, b)` تابع. + +`a == b` – The `equals(a, b)` تابع. + +`a != b` – The `notEquals(a, b)` تابع. + +`a <> b` – The `notEquals(a, b)` تابع. + +`a <= b` – The `lessOrEquals(a, b)` تابع. + +`a >= b` – The `greaterOrEquals(a, b)` تابع. + +`a < b` – The `less(a, b)` تابع. + +`a > b` – The `greater(a, b)` تابع. + +`a LIKE s` – The `like(a, b)` تابع. + +`a NOT LIKE s` – The `notLike(a, b)` تابع. + +`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. + +`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. + +## اپراتورها برای کار با مجموعه داده ها {#operators-for-working-with-data-sets} + +*ببینید [در اپراتورها](statements/select.md#select-in-operators).* + +`a IN ...` – The `in(a, b)` تابع. + +`a NOT IN ...` – The `notIn(a, b)` تابع. + +`a GLOBAL IN ...` – The `globalIn(a, b)` تابع. + +`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` تابع. + +## اپراتورها برای کار با تاریخ و زمان {#operators-datetime} + +### EXTRACT {#operator-extract} + +``` sql +EXTRACT(part FROM date); +``` + +عصاره بخشی از یک تاریخ معین. مثلا, شما می توانید یک ماه از یک تاریخ معین بازیابی, یا یک ثانیه از یک زمان. + +این `part` پارامتر مشخص می کند که بخشی از تاریخ برای بازیابی. مقادیر زیر در دسترس هستند: + +- `DAY` — The day of the month. Possible values: 1–31. +- `MONTH` — The number of a month. Possible values: 1–12. +- `YEAR` — The year. +- `SECOND` — The second. Possible values: 0–59. +- `MINUTE` — The minute. Possible values: 0–59. +- `HOUR` — The hour. Possible values: 0–23. + +این `part` پارامتر غیر حساس به حروف است. + +این `date` پارامتر تاریخ یا زمان پردازش را مشخص می کند. هر دو [تاریخ](../sql_reference/data_types/date.md) یا [DateTime](../sql_reference/data_types/datetime.md) نوع پشتیبانی می شود. + +مثالها: + +``` sql +SELECT EXTRACT(DAY FROM toDate('2017-06-15')); +SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); +SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); +``` + +در مثال زیر ما ایجاد یک جدول و قرار دادن به آن یک مقدار با `DateTime` نوع. + +``` sql +CREATE TABLE test.Orders +( + OrderId UInt64, + OrderName String, + OrderDate DateTime +) +ENGINE = Log; +``` + +``` sql +INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); +``` + +``` sql +SELECT + toYear(OrderDate) AS OrderYear, + toMonth(OrderDate) AS OrderMonth, + toDayOfMonth(OrderDate) AS OrderDay, + toHour(OrderDate) AS OrderHour, + toMinute(OrderDate) AS OrderMinute, + toSecond(OrderDate) AS OrderSecond +FROM test.Orders; +``` + +``` text +┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ +│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ +└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ +``` + +شما می توانید نمونه های بیشتری را در [تستها](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). + +### INTERVAL {#operator-interval} + +ایجاد یک [فاصله](../sql_reference/data_types/special_data_types/interval.md)- ارزش نوع است که باید در عملیات ریاضی با استفاده [تاریخ](../sql_reference/data_types/date.md) و [DateTime](../sql_reference/data_types/datetime.md)- ارزش نوع . + +انواع فواصل: +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +!!! warning "اخطار" + فواصل با انواع مختلف نمی تواند ترکیب شود. شما می توانید عبارات مانند استفاده کنید `INTERVAL 4 DAY 1 HOUR`. بیان فواصل در واحد است که کوچکتر یا مساوی کوچکترین واحد فاصله برای مثال `INTERVAL 25 HOUR`. شما می توانید عملیات تبعی مانند مثال زیر استفاده کنید. + +مثال: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +**همچنین نگاه کنید** + +- [فاصله](../sql_reference/data_types/special_data_types/interval.md) نوع داده +- [توینتروال](../sql_reference/functions/type_conversion_functions.md#function-tointerval) توابع تبدیل نوع + +## اپراتور نفی منطقی {#logical-negation-operator} + +`NOT a` – The `not(a)` تابع. + +## منطقی و اپراتور {#logical-and-operator} + +`a AND b` – The`and(a, b)` تابع. + +## منطقی یا اپراتور {#logical-or-operator} + +`a OR b` – The `or(a, b)` تابع. + +## اپراتور شرطی {#conditional-operator} + +`a ? b : c` – The `if(a, b, c)` تابع. + +یادداشت: + +اپراتور مشروط محاسبه ارزش ب و ج, سپس چک چه شرایط ملاقات کرده است, و سپس مقدار مربوطه را برمی گرداند. اگر `b` یا `C` یک [ارریجین()](../sql_reference/functions/array_join.md#functions_arrayjoin) عملکرد هر سطر خواهد بود تکرار صرف نظر از “a” شرط. + +## عبارت شرطی {#operator_case} + +``` sql +CASE [x] + WHEN a THEN b + [WHEN ... THEN ...] + [ELSE c] +END +``` + +اگر `x` مشخص شده است, سپس `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. + +اگر وجود ندارد `ELSE c` بند در بیان, مقدار پیش فرض است `NULL`. + +این `transform` تابع با کار نمی کند `NULL`. + +## اپراتور الحاق {#concatenation-operator} + +`s1 || s2` – The `concat(s1, s2) function.` + +## لامبدا اپراتور ایجاد {#lambda-creation-operator} + +`x -> expr` – The `lambda(x, expr) function.` + +اپراتورهای زیر یک اولویت ندارد, از براکت هستند: + +## اپراتور ایجاد مجموعه {#array-creation-operator} + +`[x1, ...]` – The `array(x1, ...) function.` + +## اپراتور ایجاد تاپل {#tuple-creation-operator} + +`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` + +## Associativity {#associativity} + +همه اپراتورهای دودویی انجمن را ترک کرده اند. به عنوان مثال, `1 + 2 + 3` تبدیل به `plus(plus(1, 2), 3)`. +گاهی اوقات این راه شما انتظار می رود کار نمی کند. به عنوان مثال, `SELECT 4 > 2 > 3` در نتیجه 0. + +برای بهره وری `and` و `or` توابع قبول هر تعداد از استدلال. زنجیره های مربوطه از `AND` و `OR` اپراتورها به یک تماس از این توابع تبدیل شده است. + +## در حال بررسی برای `NULL` {#checking-for-null} + +تاتر از `IS NULL` و `IS NOT NULL` اپراتورها. + +### IS NULL {#operator-is-null} + +- برای [Nullable](../sql_reference/data_types/nullable.md) مقادیر نوع `IS NULL` بازگشت اپراتور: + - `1` اگر مقدار باشد `NULL`. + - `0` وگرنه +- برای ارزش های دیگر `IS NULL` اپراتور همیشه باز می گردد `0`. + + + +``` sql +SELECT x+100 FROM t_null WHERE y IS NULL +``` + +``` text +┌─plus(x, 100)─┐ +│ 101 │ +└──────────────┘ +``` + +### IS NOT NULL {#is-not-null} + +- برای [Nullable](../sql_reference/data_types/nullable.md) مقادیر نوع `IS NOT NULL` بازگشت اپراتور: + - `0` اگر مقدار باشد `NULL`. + - `1` وگرنه +- برای ارزش های دیگر `IS NOT NULL` اپراتور همیشه باز می گردد `1`. + + + +``` sql +SELECT * FROM t_null WHERE y IS NOT NULL +``` + +``` text +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/fa/sql_reference/statements/alter.md b/docs/fa/sql_reference/statements/alter.md new file mode 100644 index 00000000000..c2b8561c299 --- /dev/null +++ b/docs/fa/sql_reference/statements/alter.md @@ -0,0 +1,505 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: ALTER +--- + +## ALTER {#query_language_queries_alter} + +این `ALTER` پرسوجو فقط برای پشتیبانی `*MergeTree` جداول و همچنین `Merge`و`Distributed`. پرس و جو دارای چندین تغییرات. + +### دستکاری ستون {#column-manipulations} + +تغییر ساختار جدول. + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +در پرس و جو, مشخص یک لیست از یک یا چند اقدامات کاما از هم جدا. +هر عمل یک عملیات بر روی یک ستون است. + +اقدامات زیر پشتیبانی می شوند: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. + +این اقدامات در زیر توضیح داده شده است. + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +یک ستون جدید به جدول با مشخص اضافه می کند `name`, `type`, [`codec`](create.md#codecs) و `default_expr` (نگاه کنید به بخش [عبارتهای پیشفرض](create.md#create-default-values)). + +اگر `IF NOT EXISTS` بند گنجانده شده است, پرس و جو یک خطا بازگشت نیست اگر ستون در حال حاضر وجود دارد. اگر شما مشخص کنید `AFTER name_after` (نام ستون دیگر), ستون پس از یک مشخص شده در لیست ستون جدول اضافه. در غیر این صورت, ستون به پایان جدول اضافه. توجه داشته باشید که هیچ راهی برای اضافه کردن یک ستون به ابتدای جدول وجود دارد. برای زنجیره ای از اقدامات, `name_after` می تواند نام یک ستون است که در یکی از اقدامات قبلی اضافه شده است. + +اضافه کردن یک ستون فقط تغییر ساختار جدول, بدون انجام هر گونه اقدامات با داده. داده ها بر روی دیسک پس از ظاهر نمی شود `ALTER`. اگر داده ها برای یک ستون از دست رفته در هنگام خواندن از جدول, این است که در با مقادیر پیش فرض پر (با انجام عبارت پیش فرض اگر یکی وجود دارد, و یا با استفاده از صفر یا رشته های خالی). ستون بر روی دیسک به نظر می رسد پس از ادغام قطعات داده (دیدن [ادغام](../../engines/table_engines/mergetree_family/mergetree.md)). + +این رویکرد به ما اجازه می دهد برای تکمیل `ALTER` پرس و جو فورا, بدون افزایش حجم داده های قدیمی. + +مثال: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +ستون را با نام حذف می کند `name`. اگر `IF EXISTS` بند مشخص شده است, پرس و جو یک خطا بازگشت نیست اگر ستون وجود ندارد. + +حذف داده ها از سیستم فایل. از این حذف تمام فایل های پرس و جو تقریبا بلافاصله تکمیل شده است. + +مثال: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +بازنشانی تمام داده ها در یک ستون برای یک پارتیشن مشخص. اطلاعات بیشتر در مورد تنظیم نام پارتیشن در بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +اگر `IF EXISTS` بند مشخص شده است, پرس و جو یک خطا بازگشت نیست اگر ستون وجود ندارد. + +مثال: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +می افزاید: نظر به ستون. اگر `IF EXISTS` بند مشخص شده است, پرس و جو یک خطا بازگشت نیست اگر ستون وجود ندارد. + +هر ستون می تواند یک نظر داشته باشد. اگر یک نظر در حال حاضر برای ستون وجود دارد, یک نظر جدید رونویسی نظر قبلی. + +نظرات در ذخیره می شود `comment_expression` ستون توسط [DESCRIBE TABLE](misc.md#misc-describe-table) پرس و جو. + +مثال: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +این پرسوجو تغییر میکند `name` ویژگیهای ستون: + +- نوع + +- عبارت پیشفرض + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). + +اگر `IF EXISTS` بند مشخص شده است, پرس و جو یک خطا بازگشت نیست اگر ستون وجود ندارد. + +هنگام تغییر نوع, ارزش ها به عنوان اگر تبدیل [نوع](../../sql_reference/functions/type_conversion_functions.md) توابع به کار گرفته شد. اگر تنها عبارت پیش فرض تغییر می کند, پرس و جو می کند هر چیزی پیچیده نیست, و تقریبا بلافاصله تکمیل. + +مثال: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +چندین مرحله پردازش وجود دارد: + +- تهیه فایل های موقت (جدید) با داده های اصلاح شده. +- تغییر نام فایل های قدیمی. +- تغییر نام فایل های موقت (جدید) به نام های قدیمی. +- حذف فایل های قدیمی. + +فقط مرحله اول زمان طول می کشد. در صورتی که یک شکست در این مرحله وجود دارد, داده ها تغییر نکرده است. +در صورتی که یک شکست در یکی از مراحل پی در پی وجود دارد, داده ها را می توان به صورت دستی ترمیم. استثنا است اگر فایل های قدیمی از سیستم فایل حذف شد اما داده ها را برای فایل های جدید را به دیسک نوشته شده است و از دست داده بود. + +این `ALTER` پرس و جو برای تغییر ستون تکرار شده است. دستورالعمل ها در باغ وحش ذخیره می شوند و سپس هر ماکت اعمال می شود. همه `ALTER` نمایش داده شد در همان جهت اجرا شود. پرس و جو منتظر اقدامات مناسب برای در کپی دیگر تکمیل شود. با این حال, پرس و جو برای تغییر ستون در یک جدول تکرار می تواند قطع, و تمام اقدامات غیر همزمان انجام خواهد شد. + +#### تغییر محدودیت پرس و جو {#alter-query-limitations} + +این `ALTER` پرس و جو به شما امکان ایجاد و حذف عناصر جداگانه (ستون) در ساختارهای داده های تو در تو, اما نه کل ساختارهای داده تو در تو. برای اضافه کردن یک ساختار داده های تو در تو, شما می توانید ستون با یک نام مانند اضافه `name.nested_name` و نوع `Array(T)`. ساختار داده های تو در تو معادل ستون های چندگانه با یک نام است که پیشوند مشابه قبل از نقطه است. + +هیچ پشتیبانی برای حذف ستون ها در کلید اصلی یا کلید نمونه برداری (ستون هایی که در استفاده می شود) وجود ندارد. `ENGINE` عبارت). تغییر نوع ستون که در کلید اصلی گنجانده شده است تنها ممکن است اگر این تغییر باعث نمی شود داده ها به اصلاح شود (مثلا, شما مجاز به اضافه کردن مقادیر به شمارشی و یا برای تغییر یک نوع از `DateTime` به `UInt32`). + +اگر `ALTER` پرس و جو برای ایجاد تغییرات جدول مورد نیاز کافی نیست شما می توانید یک جدول جدید ایجاد کنید و داده ها را با استفاده از داده ها کپی کنید [INSERT SELECT](insert_into.md#insert_query_insert-select) پرس و جو, سپس جداول با استفاده از تغییر [RENAME](misc.md#misc_operations-rename) پرس و جو و حذف جدول قدیمی. شما می توانید از [تاتر-کپی](../../operations/utilities/clickhouse-copier.md) به عنوان یک جایگزین برای `INSERT SELECT` پرس و جو. + +این `ALTER` بلوک پرس و جو همه می خواند و می نویسد برای جدول. به عبارت دیگر, اگر طولانی `SELECT` در حال اجرا است در زمان `ALTER` پرس و جو `ALTER` پرس و جو منتظر خواهد ماند تا کامل شود. همزمان, تمام نمایش داده شد جدید به همان جدول صبر کنید در حالی که این `ALTER` در حال اجرا است. + +برای جداول که داده های خود را ذخیره کنید (مانند `Merge` و `Distributed`), `ALTER` فقط ساختار جدول را تغییر می دهد و ساختار جداول تابع را تغییر نمی دهد. مثلا, زمانی که در حال اجرا را تغییر دهید برای یک `Distributed` جدول, شما همچنین نیاز به اجرا `ALTER` برای جداول در تمام سرور از راه دور. + +### دستکاری با عبارات کلیدی {#manipulations-with-key-expressions} + +دستور زیر پشتیبانی می شود: + +``` sql +MODIFY ORDER BY new_expression +``` + +این فقط برای جداول در کار می کند [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) خانواده (از جمله +[تکرار](../../engines/table_engines/mergetree_family/replication.md) جدول). فرمان تغییر +[کلید مرتب سازی](../../engines/table_engines/mergetree_family/mergetree.md) از جدول +به `new_expression` (بیان و یا یک تاپل از عبارات). کلید اصلی یکسان باقی می ماند. + +فرمان سبک وزن به این معنا است که تنها ابرداده را تغییر می دهد. برای حفظ اموال که بخش داده ها +ردیف ها توسط عبارت کلیدی مرتب سازی شما می توانید عبارات حاوی ستون های موجود اضافه کنید دستور داد +به کلید مرتب سازی (فقط ستون اضافه شده توسط `ADD COLUMN` فرمان در همان `ALTER` پرسوجو). + +### دستکاری با شاخص های پرش داده {#manipulations-with-data-skipping-indices} + +این فقط برای جداول در کار می کند [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) خانواده (از جمله +[تکرار](../../engines/table_engines/mergetree_family/replication.md) جدول). عملیات زیر +در دسترس هستند: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - می افزاید توضیحات شاخص به ابرداده جداول . + +- `ALTER TABLE [db].name DROP INDEX name` - حذف شرح شاخص از ابرداده جداول و حذف فایل های شاخص از دیسک. + +این دستورات سبک وزن هستند به این معنا که فقط فراداده را تغییر می دهند یا فایل ها را حذف می کنند. +همچنین تکرار میشوند (همگامسازی فرادادههای شاخص از طریق باغ وحش). + +### دستکاری با محدودیت {#manipulations-with-constraints} + +مشاهده بیشتر در [قیدها](create.md#constraints) + +محدودیت ها می توانند با استفاده از نحو زیر اضافه یا حذف شوند: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +نمایش داده شد اضافه خواهد شد و یا حذف ابرداده در مورد محدودیت از جدول به طوری که بلافاصله پردازش شده است. + +بررسی قید *اعدام نخواهد شد* در داده های موجود اگر اضافه شد. + +همه تغییرات در جداول تکرار در حال پخش به باغ وحش بنابراین خواهد شد در دیگر کپی اعمال می شود. + +### دستکاری با پارتیشن ها و قطعات {#alter_manipulations-with-partitions} + +عملیات زیر را با [پارتیشن ها](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) در دسترس هستند: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` دایرکتوری و فراموش کرده ام. +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` دایرکتوری به جدول. +- [REPLACE PARTITION](#alter_replace-partition) - پارتیشن داده ها را از یک جدول به دیگری کپی می کند. +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) - پارتیشن داده ها را از یک جدول به دیگری کپی می کند و جایگزین می شود. +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#تغییر\_موف\_ قابل تنظیم-پارتیشن) - پارتیشن داده را از یک جدول به دیگری حرکت دهید. +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - بازنشانی ارزش یک ستون مشخص شده در یک پارتیشن. +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - بازنشانی شاخص ثانویه مشخص شده در یک پارتیشن. +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### جدا پارتیشن {\#alter\_detach-پارتیشن} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +تمام داده ها را برای پارتیشن مشخص شده به `detached` فهرست راهنما. سرور فراموش در مورد پارتیشن داده جدا به عنوان اگر وجود ندارد. سرور نمی خواهد در مورد این داده ها می دانم تا زمانی که شما را به [ATTACH](#alter_attach-partition) پرس و جو. + +مثال: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +اطلاعات بیشتر در مورد تنظیم بیان پارتیشن در یک بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +پس از پرس و جو اجرا شده است, شما می توانید هر کاری که می خواهید با داده ها در انجام `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` دایرکتوری در تمام کپی. توجه داشته باشید که شما می توانید این پرس و جو تنها در یک ماکت رهبر را اجرا کند. برای پیدا کردن اگر یک ماکت یک رهبر است, انجام `SELECT` پرسوجو به [سیستم.تکرار](../../operations/system_tables.md#system_tables-replicas) جدول متناوبا, راحت تر به یک است `DETACH` پرس و جو در تمام کپی - همه کپی پرتاب یک استثنا, به جز ماکت رهبر. + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +پارتیشن مشخص شده را از جدول حذف می کند. این برچسب ها پرس و جو پارتیشن به عنوان غیر فعال و حذف داده ها به طور کامل, حدود در 10 دقیقه. + +اطلاعات بیشتر در مورد تنظیم بیان پارتیشن در یک بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +بخش مشخص شده یا تمام قسمت های پارتیشن مشخص شده را از بین می برد `detached`. +اطلاعات بیشتر در مورد تنظیم بیان پارتیشن در یک بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +می افزاید داده ها به جدول از `detached` فهرست راهنما. ممکن است که به اضافه کردن داده ها برای کل پارتیشن و یا برای یک بخش جداگانه. مثالها: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +اطلاعات بیشتر در مورد تنظیم بیان پارتیشن در یک بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +این پرس و جو تکرار شده است. چک المثنی-ابتکار چه داده ها در وجود دارد `detached` فهرست راهنما. اگر داده وجود دارد, پرس و جو چک یکپارچگی خود را. اگر همه چیز درست است, پرس و جو می افزاید: داده ها به جدول. همه کپی دیگر دانلود داده ها از ماکت-مبتکر. + +بنابراین شما می توانید داده ها را به `detached` دایرکتوری در یک ماکت, و استفاده از `ALTER ... ATTACH` پرس و جو برای اضافه کردن به جدول در تمام کپی. + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +این پرس و جو پارتیشن داده را از `table1` به `table2` می افزاید داده ها به لیست موجود در `table2`. توجه داشته باشید که داده ها از حذف نمی شود `table1`. + +برای پرس و جو به اجرا با موفقیت, شرایط زیر باید رعایت شود: + +- هر دو جدول باید همان ساختار دارند. +- هر دو جدول باید کلید پارتیشن همان. + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +این پرس و جو پارتیشن داده را از `table1` به `table2` و جایگزین پارتیشن موجود در `table2`. توجه داشته باشید که داده ها از حذف نمی شود `table1`. + +برای پرس و جو به اجرا با موفقیت, شرایط زیر باید رعایت شود: + +- هر دو جدول باید همان ساختار دارند. +- هر دو جدول باید کلید پارتیشن همان. + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +این پرس و جو انتقال پارتیشن داده ها از `table_source` به `table_dest` با حذف داده ها از `table_source`. + +برای پرس و جو به اجرا با موفقیت, شرایط زیر باید رعایت شود: + +- هر دو جدول باید همان ساختار دارند. +- هر دو جدول باید کلید پارتیشن همان. +- هر دو جدول باید همان خانواده موتور باشد. (تکرار و یا غیر تکرار) +- هر دو جدول باید سیاست ذخیره سازی همان. + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +بازنشانی تمام مقادیر در ستون مشخص شده در یک پارتیشن. اگر `DEFAULT` بند هنگام ایجاد یک جدول تعیین شد, این پرس و جو مجموعه مقدار ستون به یک مقدار پیش فرض مشخص. + +مثال: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +این پرس و جو یک نسخه پشتیبان تهیه محلی از یک پارتیشن مشخص شده ایجاد می کند. اگر `PARTITION` بند حذف شده است, پرس و جو ایجاد پشتیبان گیری از تمام پارتیشن در یک بار. + +!!! note "یادداشت" + تمامی مراحل پشتیبان گیری بدون توقف سرور انجام می شود. + +توجه داشته باشید که برای جداول قدیمی مدل دهید شما می توانید پیشوند نام پارتیشن را مشخص کنید (به عنوان مثال, ‘2019’)- سپس پرس و جو پشتیبان گیری برای تمام پارتیشن مربوطه ایجاد می کند. اطلاعات بیشتر در مورد تنظیم بیان پارتیشن در یک بخش [نحوه مشخص کردن عبارت پارتیشن](#alter-how-to-specify-part-expr). + +در زمان اجرای, برای یک تصویر لحظهای داده, پرس و جو ایجاد لینک به داده های جدول. پیوندهای سخت در دایرکتوری قرار می گیرند `/var/lib/clickhouse/shadow/N/...` کجا: + +- `/var/lib/clickhouse/` است دایرکتوری محل کلیک کار مشخص شده در پیکربندی. +- `N` تعداد افزایشی از نسخه پشتیبان تهیه شده است. + +!!! note "یادداشت" + در صورت استفاده [مجموعه ای از دیسک برای ذخیره سازی داده ها در یک جدول](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) این `shadow/N` دایرکتوری به نظر می رسد در هر دیسک, ذخیره سازی قطعات داده که توسط همسان `PARTITION` اصطلاح. + +همان ساختار دایرکتوری ها در داخل پشتیبان به عنوان داخل ایجاد شده است `/var/lib/clickhouse/`. پرس و جو انجام می شود ‘chmod’ برای همه پروندهها نوشتن رو ممنوع کردم + +پس از ایجاد نسخه پشتیبان تهیه, شما می توانید داده ها را از کپی `/var/lib/clickhouse/shadow/` به سرور از راه دور و سپس از سرور محلی حذف کنید. توجه داشته باشید که `ALTER t FREEZE PARTITION` پرس و جو تکرار نشده است. این یک نسخه پشتیبان تهیه محلی تنها بر روی سرور محلی ایجاد می کند. + +پرس و جو ایجاد نسخه پشتیبان تهیه تقریبا بلافاصله (اما برای اولین بار برای نمایش داده شد در حال حاضر به جدول مربوطه منتظر را به پایان برساند در حال اجرا). + +`ALTER TABLE t FREEZE PARTITION` نسخه تنها داده, ابرداده جدول نیست. برای تهیه نسخه پشتیبان از فراداده های جدول فایل را کپی کنید `/var/lib/clickhouse/metadata/database/table.sql` + +برای بازیابی اطلاعات از یک نسخه پشتیبان تهیه زیر را انجام دهید: + +1. ایجاد جدول اگر وجود ندارد. برای مشاهده پرس و جو, استفاده از .پرونده جدید `ATTACH` در این با `CREATE`). +2. رونوشت داده از `data/database/table/` دایرکتوری در داخل پشتیبان گیری به `/var/lib/clickhouse/data/database/table/detached/` فهرست راهنما. +3. بدو `ALTER TABLE t ATTACH PARTITION` نمایش داده شد برای اضافه کردن داده ها به یک جدول. + +بازگرداندن از یک نسخه پشتیبان تهیه می کند نیاز به متوقف کردن سرور نیست. + +برای کسب اطلاعات بیشتر در مورد پشتیبان گیری و بازیابی اطلاعات, دیدن [پشتیبان گیری داده ها](../../operations/backup.md) بخش. + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +پرس و جو کار می کند شبیه به `CLEAR COLUMN`, اما بازنشانی شاخص به جای یک داده ستون. + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +دانلود یک پارتیشن از سرور دیگر. این پرس و جو تنها برای جداول تکرار کار می کند. + +پرس و جو می کند به شرح زیر است: + +1. پارتیشن را از سفال مشخص شده دانلود کنید. داخل ‘path-in-zookeeper’ شما باید یک مسیر به سفال در باغ وحش را مشخص کنید. +2. سپس پرس و جو داده های دانلود شده را به `detached` دایرکتوری از `table_name` جدول استفاده از [ATTACH PARTITION\|PART](#alter_attach-partition) پرسوجو برای افزودن داده به جدول. + +به عنوان مثال: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +توجه داشته باشید که: + +- این `ALTER ... FETCH PARTITION` پرس و جو تکرار نشده است. این پارتیشن را به `detached` دایرکتوری تنها بر روی سرور محلی. +- این `ALTER TABLE ... ATTACH` پرس و جو تکرار شده است. این می افزاید: داده ها به تمام کپی. داده ها به یکی از کپی ها از `detached` فهرست راهنما, و به دیگران - از کپی همسایه. + +قبل از دانلود, سیستم چک اگر پارتیشن وجود دارد و ساختار جدول مسابقات. ماکت مناسب ترین به طور خودکار از کپی سالم انتخاب شده است. + +اگر چه پرس و جو نامیده می شود `ALTER TABLE`, این ساختار جدول را تغییر دهید و بلافاصله داده های موجود در جدول را تغییر دهید. + +#### MOVE PARTITION\|PART {#alter_move-partition} + +پارتیشن ها یا قطعات داده را به حجم یا دیسک دیگری منتقل می کند `MergeTree`- جدول موتور . ببینید [با استفاده از دستگاه های بلوک های متعدد برای ذخیره سازی داده ها](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +این `ALTER TABLE t MOVE` پرسوجو: + +- تکرار نشده, به دلیل کپی های مختلف می توانید سیاست های ذخیره سازی مختلف دارند. +- بازگرداندن خطا در صورتی که دیسک یا حجم مشخص پیکربندی نشده است. پرس و جو نیز یک خطا را برمی گرداند اگر شرایط در حال حرکت داده, که مشخص شده در سیاست ذخیره سازی, نمی توان اعمال کرد. +- می توانید یک خطا در مورد بازگشت, زمانی که داده ها به نقل مکان کرد در حال حاضر توسط یک فرایند پس زمینه نقل مکان کرد, همزمان `ALTER TABLE t MOVE` پرس و جو و یا به عنوان یک نتیجه از ادغام داده های پس زمینه. کاربر باید هر گونه اقدامات اضافی در این مورد انجام نمی. + +مثال: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### نحوه تنظیم بیان پارتیشن {#alter-how-to-specify-part-expr} + +شما می توانید بیان پارتیشن را مشخص کنید `ALTER ... PARTITION` نمایش داده شد به روش های مختلف: + +- به عنوان یک مقدار از `partition` ستون از `system.parts` جدول به عنوان مثال, `ALTER TABLE visits DETACH PARTITION 201901`. +- به عنوان بیان از ستون جدول. ثابت ها و عبارات ثابت پشتیبانی می شوند. به عنوان مثال, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- با استفاده از شناسه پارتیشن. شناسه پارتیشن شناسه رشته پارتیشن (انسان قابل خواندن در صورت امکان) است که به عنوان نام پارتیشن در فایل سیستم و در باغ وحش استفاده می شود. شناسه پارتیشن باید در `PARTITION ID` بند, در یک نقل قول واحد. به عنوان مثال, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- در [ALTER ATTACH PART](#alter_attach-partition) و [DROP DETACHED PART](#alter_drop-detached) پرس و جو, برای مشخص کردن نام یک بخش, استفاده از رشته تحت اللفظی با ارزش از `name` ستون از [سیستم.قطعات مجزا](../../operations/system_tables.md#system_tables-detached_parts) جدول به عنوان مثال, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +استفاده از نقل قول در هنگام مشخص کردن پارتیشن بستگی به نوع بیان پارتیشن. برای مثال برای `String` نوع, شما باید برای مشخص کردن نام خود را در نقل قول (`'`). برای `Date` و `Int*` انواع بدون نقل قول مورد نیاز است. + +برای جداول قدیمی به سبک, شما می توانید پارتیشن یا به عنوان یک عدد مشخص `201901` یا یک رشته `'201901'`. نحو برای جداول سبک جدید سختگیرانه تر با انواع است (شبیه به تجزیه کننده برای فرمت ورودی ارزش). + +تمام قوانین فوق نیز برای درست است [OPTIMIZE](misc.md#misc_operations-optimize) پرس و جو. اگر شما نیاز به مشخص کردن تنها پارتیشن در هنگام بهینه سازی یک جدول غیر تقسیم, تنظیم بیان `PARTITION tuple()`. به عنوان مثال: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +نمونه هایی از `ALTER ... PARTITION` نمایش داده شد در تست نشان داده شده است [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) و [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### دستکاری با جدول جدول {#manipulations-with-table-ttl} + +شما می توانید تغییر دهید [جدول](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) با درخواست فرم زیر: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### همزمانی تغییر نمایش داده شد {#synchronicity-of-alter-queries} + +برای جداول غیر قابل تکرار, همه `ALTER` نمایش داده شد همزمان انجام می شود. برای جداول تکرار, پرس و جو فقط می افزاید دستورالعمل برای اقدامات مناسب به `ZooKeeper` و اقدامات خود را در اسرع وقت انجام می شود. با این حال, پرس و جو می توانید صبر کنید برای این اقدامات در تمام کپی تکمیل شود. + +برای `ALTER ... ATTACH|DETACH|DROP` نمایش داده شد, شما می توانید با استفاده از `replication_alter_partitions_sync` راه اندازی به راه اندازی انتظار. +مقادیر ممکن: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### جهشها {#alter-mutations} + +جهش یک نوع پرس و جو را تغییر دهید که اجازه می دهد تا تغییر یا حذف ردیف در یک جدول. در مقایسه با استاندارد `UPDATE` و `DELETE` نمایش داده شد که برای تغییرات داده نقطه در نظر گرفته شده, جهش برای عملیات سنگین است که تغییر بسیاری از ردیف در یک جدول در نظر گرفته شده. پشتیبانی برای `MergeTree` خانواده از موتورهای جدول از جمله موتورهای با پشتیبانی تکرار. + +جداول موجود برای جهش به عنوان (بدون تبدیل لازم), اما پس از جهش برای اولین بار است که به یک جدول اعمال, فرمت ابرداده خود را با نسخه های سرور قبلی ناسازگار می شود و سقوط به نسخه های قبلی غیر ممکن می شود. + +دستورات در حال حاضر در دسترس: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +این `filter_expr` باید از نوع باشد `UInt8`. پرس و جو حذف ردیف در جدول که این عبارت طول می کشد یک مقدار غیر صفر. + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +این `filter_expr` باید از نوع باشد `UInt8`. این پرس و جو به روز رسانی مقادیر ستون مشخص شده به ارزش عبارات مربوطه در ردیف که `filter_expr` طول می کشد یک مقدار غیر صفر. ارزش ها به نوع ستون با استفاده از قالبی `CAST` اپراتور. به روز رسانی ستون هایی که در محاسبه اولیه استفاده می شود و یا کلید پارتیشن پشتیبانی نمی شود. + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +پرس و جو بازسازی شاخص ثانویه `name` در پارتیشن `partition_name`. + +یک پرس و جو می تواند شامل چندین دستورات جدا شده توسط کاما. + +برای \* جداول ادغام جهش اجرا با بازنویسی تمام قطعات داده. هیچ اتمیتی وجود ندارد - قطعات برای قطعات جهش یافته جایگزین می شوند به محض اینکه اماده باشند و `SELECT` پرس و جو است که شروع به اجرای در طول جهش داده ها از قطعات است که در حال حاضر همراه با داده ها از قطعات است که هنوز جهش یافته شده اند جهش را ببینید. + +جهش ها به طور کامل توسط نظم خلقت خود دستور داده می شوند و به هر بخش به این ترتیب اعمال می شوند. جهش نیز تا حدی با درج دستور داد - داده هایی که به جدول وارد شد قبل از جهش ارسال خواهد شد جهش یافته و داده هایی که پس از که قرار داده شد جهش یافته نمی شود. توجه داشته باشید که جهش درج به هیچ وجه مسدود نیست. + +یک جهش پرس و جو می گرداند بلافاصله پس از جهش مطلب اضافه شده است (در صورت تکرار جداول به باغ وحش برای nonreplicated جداول - به فایل سیستم). جهش خود را اجرا ناهمگام با استفاده از تنظیمات مشخصات سیستم. برای پیگیری پیشرفت جهش شما می توانید با استفاده از [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) جدول یک جهش است که با موفقیت ارسال شد ادامه خواهد داد برای اجرای حتی اگر سرور کلیک دوباره راه اندازی. هیچ راهی برای عقب انداختن جهش هنگامی که ارسال شده است وجود دارد, اما اگر جهش برای برخی از دلیل گیر می تواند با لغو [`KILL MUTATION`](misc.md#kill-mutation) پرس و جو. + +مطالب برای جهش به پایان رسید حذف نمی حق دور (تعداد نوشته های حفظ شده توسط تعیین `finished_mutations_to_keep` پارامتر موتور ذخیره سازی). نوشته جهش قدیمی تر حذف می شوند. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/fa/sql_reference/statements/create.md b/docs/fa/sql_reference/statements/create.md new file mode 100644 index 00000000000..947239c39ce --- /dev/null +++ b/docs/fa/sql_reference/statements/create.md @@ -0,0 +1,309 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: CREATE +--- + +# ایجاد پرس و جو {#create-queries} + +## CREATE DATABASE {#query-language-create-database} + +ایجاد پایگاه داده. + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] +``` + +### بند {#clauses} + +- `IF NOT EXISTS` + + If the `db_name` database already exists, then ClickHouse doesn't create a new database and: + + - Doesn't throw an exception if clause is specified. + - Throws an exception if clause isn't specified. + +- `ON CLUSTER` + + ClickHouse creates the `db_name` database on all the servers of a specified cluster. + +- `ENGINE` + + - [MySQL](../engines/database_engines/mysql.md) + + Allows you to retrieve data from the remote MySQL server. + + By default, ClickHouse uses its own [database engine](../engines/database_engines/index.md). + +## CREATE TABLE {#create-table-query} + +این `CREATE TABLE` پرس و جو می تواند اشکال مختلف داشته باشد. + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], + ... +) ENGINE = engine +``` + +ایجاد یک جدول به نام ‘name’ در ‘db’ پایگاه داده یا پایگاه داده فعلی اگر ‘db’ تنظیم نشده است, با ساختار مشخص شده در براکت و ‘engine’ موتور. +ساختار جدول یک لیست از توصیف ستون است. اگر شاخص ها توسط موتور پشتیبانی می شوند به عنوان پارامتر برای موتور جدول نشان داده می شوند. + +شرح ستون است `name type` در ساده ترین حالت. مثال: `RegionID UInt32`. +عبارات همچنین می توانید برای مقادیر پیش فرض تعریف شود (پایین را ببینید). + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] +``` + +ایجاد یک جدول با همان ساختار به عنوان جدول دیگر. شما می توانید یک موتور مختلف برای جدول مشخص کنید. اگر موتور مشخص نشده است, همان موتور خواهد شد که برای استفاده `db2.name2` جدول + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() +``` + +ایجاد یک جدول با ساختار و داده های بازگردانده شده توسط یک [تابع جدول](../table_functions/index.md). + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +``` + +ایجاد یک جدول با یک ساختار مانند نتیجه `SELECT` پرس و جو, با ‘engine’ موتور, و پر با داده ها از را انتخاب کنید. + +در همه موارد اگر `IF NOT EXISTS` مشخص شده است, پرس و جو یک خطا بازگشت نیست اگر جدول در حال حاضر وجود دارد. در این مورد پرس و جو هیچ کاری انجام نخواهد داد. + +می تواند بند دیگر پس از وجود دارد `ENGINE` بند در پرس و جو. دیدن مستندات دقیق در مورد چگونگی ایجاد جدول در شرح [موتورهای جدول](../../engines/table_engines/index.md#table_engines). + +### مقادیر پیشفرض {#create-default-values} + +شرح ستون می تواند یک عبارت برای یک مقدار پیش فرض مشخص, در یکی از روش های زیر:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. +مثال: `URLDomain String DEFAULT domain(URL)`. + +اگر یک عبارت برای مقدار پیش فرض تعریف نشده است, مقادیر پیش فرض خواهد شد به صفر برای اعداد تنظیم, رشته های خالی برای رشته, طعمه خالی برای ارریس, و `0000-00-00` برای تاریخ و یا `0000-00-00 00:00:00` برای تاریخ با زمان. نقاط صفر پشتیبانی نمی شوند. + +اگر عبارت پیش فرض تعریف شده است, نوع ستون اختیاری است. در صورتی که یک نوع به صراحت تعریف شده وجود ندارد, نوع بیان پیش فرض استفاده شده است. مثال: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ نوع خواهد شد برای استفاده ‘EventDate’ ستون. + +اگر نوع داده ها و پیش فرض بیان تعریف شده به صراحت این را بیان خواهد شد بازیگران به نوع مشخص شده با استفاده از نوع مصاحبه توابع. مثال: `Hits UInt32 DEFAULT 0` معنی همان چیزی که به عنوان `Hits UInt32 DEFAULT toUInt32(0)`. + +Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don't contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. + +`DEFAULT expr` + +مقدار پیش فرض عادی. اگر پرس و جو درج می کند ستون مربوطه مشخص نیست, خواهد شد در محاسبات بیان مربوطه پر. + +`MATERIALIZED expr` + +بیان محقق. چنین ستون ای نمی تواند برای درج مشخص شود زیرا همیشه محاسبه می شود. +برای درج بدون یک لیست از ستون, این ستون ها در نظر گرفته نمی. +علاوه بر این, این ستون جایگزین نشده است که با استفاده از یک ستاره در یک پرس و جو را انتخاب کنید. این است که برای حفظ همواره که تخلیه با استفاده از `SELECT *` را می توان به جدول با استفاده از درج بدون مشخص کردن لیست ستون قرار داده شده. + +`ALIAS expr` + +دو واژه مترادف. چنین ستون در جدول ذخیره نمی شود و در همه. +ارزش های خود را نمی توان در یک جدول قرار داد و هنگام استفاده از ستاره در یک پرس و جو انتخاب جایگزین نمی شود. +این را می توان در انتخاب استفاده می شود اگر نام مستعار است که در طول تجزیه پرس و جو گسترش یافته است. + +هنگام استفاده از پرس و جو را تغییر دهید برای اضافه کردن ستون جدید, داده های قدیمی برای این ستون نوشته نشده است. بجای, در هنگام خواندن داده های قدیمی که ارزش برای ستون جدید ندارد, عبارات در پرواز به طور پیش فرض محاسبه. با این حال, اگر در حال اجرا عبارات نیاز به ستون های مختلف است که در پرس و جو نشان داده نمی, این ستون علاوه بر خوانده خواهد شد, اما فقط برای بلوک های داده که نیاز. + +اگر شما یک ستون جدید اضافه کردن به یک جدول اما بعد تغییر بیان پیش فرض خود, ارزش های مورد استفاده برای داده های قدیمی تغییر خواهد کرد (برای داده هایی که ارزش بر روی دیسک ذخیره نمی شد). توجه داشته باشید که هنگام اجرای ادغام پس زمینه, داده ها را برای ستون که در یکی از قطعات ادغام از دست رفته است به بخش ادغام شده نوشته شده. + +این ممکن است به مجموعه مقادیر پیش فرض برای عناصر در ساختارهای داده تو در تو. + +### قیدها {#constraints} + +همراه با ستون توصیف محدودیت می تواند تعریف شود: + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + ... + CONSTRAINT constraint_name_1 CHECK boolean_expr_1, + ... +) ENGINE = engine +``` + +`boolean_expr_1` می تواند با هر عبارت بولی. اگر قیود برای جدول تعریف شود هر کدام برای هر سطر بررسی خواهند شد `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. + +اضافه کردن مقدار زیادی از محدودیت های منفی می تواند عملکرد بزرگ را تحت تاثیر قرار `INSERT` نمایش داده شد. + +### عبارت دیگر {#ttl-expression} + +تعریف می کند زمان ذخیره سازی برای ارزش. می توان تنها برای ادغام مشخص-جداول خانواده. برای توضیحات دقیق, دیدن [ستون ها و جداول](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). + +### کدکهای فشردهسازی ستون {#codecs} + +به طور پیش فرض, تاتر اعمال `lz4` روش فشرده سازی. برای `MergeTree`- خانواده موتور شما می توانید روش فشرده سازی به طور پیش فرض در تغییر [فشردهسازی](../../operations/server_configuration_parameters/settings.md#server-settings-compression) بخش پیکربندی سرور. شما همچنین می توانید روش فشرده سازی برای هر ستون فردی در تعریف `CREATE TABLE` پرس و جو. + +``` sql +CREATE TABLE codec_example +( + dt Date CODEC(ZSTD), + ts DateTime CODEC(LZ4HC), + float_value Float32 CODEC(NONE), + double_value Float64 CODEC(LZ4HC(9)) + value Float32 CODEC(Delta, ZSTD) +) +ENGINE = +... +``` + +اگر یک کدک مشخص شده است, کدک به طور پیش فرض صدق نمی کند. به عنوان مثال کدک ها را می توان در یک خط لوله ترکیب کرد, `CODEC(Delta, ZSTD)`. برای انتخاب بهترین ترکیب کدک برای شما پروژه معیار شبیه به شرح داده شده در التیت منتقل می کند [کدگذاری های جدید برای بهبود بهره وری کلیک](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) مقاله. + +!!! warning "اخطار" + شما می توانید فایل های پایگاه داده کلیک از حالت فشرده خارج با تاسیسات خارجی مانند `lz4`. بجای, استفاده از ویژه [کلیک کمپرسور](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) سودمند. + +فشرده سازی برای موتورهای جدول زیر پشتیبانی می شود: + +- [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) خانواده پشتیبانی از کدک های فشرده سازی ستون و انتخاب روش فشرده سازی پیش فرض توسط [فشردهسازی](../../operations/server_configuration_parameters/settings.md#server-settings-compression) تنظیمات. +- [ثبت](../../engines/table_engines/log_family/log_family.md) خانواده با استفاده از `lz4` روش فشرده سازی به طور پیش فرض و پشتیبانی از کدک های فشرده سازی ستون. +- [تنظیم](../../engines/table_engines/special/set.md). فقط فشرده سازی پیش فرض پشتیبانی می کند. +- [پیوستن](../../engines/table_engines/special/join.md). فقط فشرده سازی پیش فرض پشتیبانی می کند. + +تاتر با پشتیبانی از کدک های هدف مشترک و کدک های تخصصی. + +#### کدکهای تخصصی {#create-query-specialized-codecs} + +این کدک ها طراحی شده اند تا فشرده سازی را با استفاده از ویژگی های خاص داده ها موثر تر کند. برخی از این کدک ها اطلاعات خود را فشرده سازی نمی کنند. در عوض داده ها را برای یک کدک هدف مشترک تهیه می کنند که بهتر از بدون این دارو را فشرده می کند. + +کدکهای تخصصی: + +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` برای ذخیره سازی مقادیر دلتا استفاده می شود, بنابراین `delta_bytes` حداکثر اندازه مقادیر خام است. ممکن است `delta_bytes` ارزش: 1, 2, 4, 8. مقدار پیش فرض برای `delta_bytes` هست `sizeof(type)` اگر به برابر 1, 2, 4, یا 8. در تمام موارد دیگر 1 است. +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [گوریل: سریع, مقیاس پذیر, در حافظه پایگاه داده سری زمان](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [گوریل: سریع, مقیاس پذیر, در حافظه پایگاه داده سری زمان](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` و `DateTime`). در هر مرحله از الگوریتم کدک یک بلوک از ارزش های 64 را می گیرد و به ماتریس بیتی 6464 می رسد و بیت های استفاده نشده ارزش ها را تولید می کند و بقیه را به عنوان یک دنباله باز می گرداند. بیت های استفاده نشده بیت هایی هستند که بین مقادیر حداکثر و حداقل در کل بخش داده ای که فشرده سازی استفاده می شود متفاوت نیستند. + +`DoubleDelta` و `Gorilla` کدک ها در گوریل تسدب به عنوان اجزای الگوریتم فشرده سازی خود استفاده می شود. رویکرد گوریل در سناریوها موثر است زمانی که یک دنباله از ارزش های کم زمان خود را با تغییر دادن زمان وجود دارد. مهر زمانی به طور موثر توسط فشرده `DoubleDelta` کدک و ارزش ها به طور موثر توسط فشرده `Gorilla` وابسته به کدک. مثلا برای دریافت جدول ذخیره شده به طور موثر می توانید در تنظیمات زیر ایجاد کنید: + +``` sql +CREATE TABLE codec_example +( + timestamp DateTime CODEC(DoubleDelta), + slow_values Float32 CODEC(Gorilla) +) +ENGINE = MergeTree() +``` + +#### کدک های هدف مشترک {#create-query-common-purpose-codecs} + +کدکها: + +- `NONE` — No compression. +- `LZ4` — Lossless [الگوریتم فشرده سازی داده ها](https://github.com/lz4/lz4) به طور پیش فرض استفاده می شود. اعمال فشرده سازی سریع 4. +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` سطح پیش فرض اعمال می شود. سطوح ممکن: \[1, 12\]. محدوده سطح توصیه شده: \[4, 9\]. +- `ZSTD[(level)]` — [الگوریتم فشرد فشاری](https://en.wikipedia.org/wiki/Zstandard) با قابلیت تنظیم `level`. سطوح ممکن: \[1, 22\]. مقدار پیش فرض: 1. + +سطح فشرده سازی بالا برای حالات نامتقارن مفید هستند, مانند فشرده سازی یک بار, از حالت فشرده خارج بارها و بارها. سطوح بالاتر به معنای فشرده سازی بهتر و استفاده از پردازنده بالاتر است. + +## جداول موقت {#temporary-tables} + +تاتر از جداول موقت که دارای ویژگی های زیر: + +- جداول موقت ناپدید می شوند هنگامی که جلسه به پایان می رسد از جمله اگر اتصال از دست داده است. +- جدول موقت با استفاده از موتور حافظه تنها. +- دسی بل را نمی توان برای یک جدول موقت مشخص شده است. این است که در خارج از پایگاه داده ایجاد شده است. +- غیر ممکن است برای ایجاد یک جدول موقت با پرس و جو توزیع دی ال در تمام سرورهای خوشه (با استفاده از `ON CLUSTER`): این جدول تنها در جلسه فعلی وجود دارد. +- اگر یک جدول موقت است به همین نام به عنوان یکی دیگر و یک پرس و جو نام جدول بدون مشخص دسی بل مشخص, جدول موقت استفاده خواهد شد. +- برای پردازش پرس و جو توزیع, جداول موقت مورد استفاده در یک پرس و جو به سرور از راه دور منتقل. + +برای ایجاد یک جدول موقت از نحو زیر استفاده کنید: + +``` sql +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) +``` + +در بیشتر موارد, جداول موقت به صورت دستی ایجاد نمی, اما در هنگام استفاده از داده های خارجی برای پرس و جو, و یا برای توزیع `(GLOBAL) IN`. برای کسب اطلاعات بیشتر به بخش های مناسب مراجعه کنید + +امکان استفاده از جداول با [موتور = حافظه](../../engines/table_engines/special/memory.md) به جای جداول موقت. + +## توزیع پرس و جو ددل (در بند خوشه) {#distributed-ddl-queries-on-cluster-clause} + +این `CREATE`, `DROP`, `ALTER` و `RENAME` نمایش داده شد حمایت از اجرای توزیع در یک خوشه. +برای مثال پرس و جو زیر ایجاد می کند `all_hits` `Distributed` جدول در هر میزبان در `cluster`: + +``` sql +CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) +``` + +به منظور اجرای این نمایش داده شد به درستی, هر یک از میزبان باید تعریف خوشه همان (برای ساده سازی تنظیمات همگام سازی, شما می توانید تعویض از باغ وحش استفاده). همچنین باید به سرورهای باغ وحش متصل شوند. +نسخه محلی از پرس و جو در نهایت بر روی هر یک از میزبان در خوشه اجرا می شود, حتی اگر برخی از میزبان در حال حاضر در دسترس نیست. سفارش برای اجرای نمایش داده شد در یک میزبان واحد تضمین شده است. + +## CREATE VIEW {#create-view} + +``` sql +CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +``` + +ایجاد یک دیدگاه. دو نوع دیدگاه وجود دارد: طبیعی و تحقق. + +نمایش عادی هیچ داده ذخیره نمی, اما فقط انجام خواندن از جدول دیگر. به عبارت دیگر, یک نمایش عادی چیزی بیش از یک پرس و جو ذخیره شده است. هنگام خواندن از نظر, این پرس و جو را نجات داد به عنوان یک خرده فروشی در بند از استفاده. + +به عنوان مثال, فرض کنیم شما یک دیدگاه ایجاد کرده اید: + +``` sql +CREATE VIEW view AS SELECT ... +``` + +و پرس و جو نوشته شده است: + +``` sql +SELECT a, b, c FROM view +``` + +این پرس و جو به طور کامل به استفاده از خرده فروشی معادل است: + +``` sql +SELECT a, b, c FROM (SELECT ...) +``` + +نمایش ها محقق داده فروشگاه تبدیل شده توسط پرس و جو مربوطه را انتخاب کنید. + +هنگام ایجاد یک دیدگاه محقق بدون `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. + +هنگام ایجاد یک دیدگاه محقق با `TO [db].[table]`, شما باید استفاده کنید `POPULATE`. + +مشاهده محقق به شرح زیر مرتب: هنگام قرار دادن داده ها به جدول مشخص شده در انتخاب, بخشی از داده های درج شده است این پرس و جو را انتخاب کنید تبدیل, و در نتیجه در نظر قرار داده. + +اگر شما جمعیت مشخص, داده های جدول موجود در نظر قرار داده در هنگام ایجاد, اگر ساخت یک `CREATE TABLE ... AS SELECT ...` . در غیر این صورت پرس و جو شامل تنها داده های درج شده در جدول پس از ایجاد دیدگاه. ما توصیه نمی کنیم با استفاده از جمعیت, از داده ها در جدول در طول ایجاد مشاهده قرار داده نمی شود. + +A `SELECT` پرسوجو می تواند شامل شود `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` تنظیم شده است, داده ها در طول درج جمع, اما تنها در یک بسته واحد از داده های درج شده. داده ها بیشتر جمع نمی شود. استثنا است که با استفاده از یک موتور است که به طور مستقل انجام تجمع داده ها, مانند `SummingMergeTree`. + +اعدام `ALTER` نمایش داده شد در نمایش محقق شده است به طور کامل توسعه یافته نیست, بنابراین ممکن است ناخوشایند. اگر مشاهده محقق با استفاده از ساخت و ساز `TO [db.]name` شما می توانید `DETACH` منظره, اجرا `ALTER` برای جدول هدف و سپس `ATTACH` قبلا جدا (`DETACH`) نظر . + +نمایش ها نگاه همان جداول عادی. برای مثال در نتیجه ذکر شده است `SHOW TABLES` پرس و جو. + +پرس و جو جداگانه برای حذف نمایش ها وجود ندارد. برای حذف یک نما, استفاده `DROP TABLE`. + +## CREATE DICTIONARY {#create-dictionary-query} + +``` sql +CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +( + key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + attr1 type2 [DEFAULT|EXPRESSION expr3], + attr2 type2 [DEFAULT|EXPRESSION expr4] +) +PRIMARY KEY key1, key2 +SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) +LAYOUT(LAYOUT_NAME([param_name param_value])) +LIFETIME([MIN val1] MAX val2) +``` + +ایجاد [فرهنگ لغت خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) با توجه به [ساختار](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md), [متن](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md), [طرحبندی](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) و [طول عمر](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md). + +ساختار فرهنگ لغت خارجی شامل ویژگی های. ویژگی فرهنگ لغت به طور مشابه به ستون جدول مشخص شده است. تنها ویژگی مورد نیاز ویژگی نوع خود است, تمام خواص دیگر ممکن است مقادیر پیش فرض دارند. + +بسته به فرهنگ لغت [طرحبندی](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) یک یا چند ویژگی را می توان به عنوان کلید فرهنگ لغت مشخص شده است. + +برای کسب اطلاعات بیشتر, دیدن [واژهنامهها خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) بخش. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/fa/sql_reference/statements/index.md b/docs/fa/sql_reference/statements/index.md new file mode 100644 index 00000000000..a2246679149 --- /dev/null +++ b/docs/fa/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Statements +toc_priority: 31 +--- + + diff --git a/docs/fa/sql_reference/statements/insert_into.md b/docs/fa/sql_reference/statements/insert_into.md new file mode 100644 index 00000000000..25709b2ee67 --- /dev/null +++ b/docs/fa/sql_reference/statements/insert_into.md @@ -0,0 +1,80 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: INSERT INTO +--- + +## INSERT {#insert} + +اضافه کردن داده ها. + +قالب پرس و جو عمومی: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... +``` + +پرسوجو میتواند فهرستی از ستونها را برای درج مشخص کند `[(c1, c2, c3)]`. در این مورد, بقیه ستون ها با پر: + +- مقادیر محاسبه شده از `DEFAULT` عبارات مشخص شده در تعریف جدول. +- صفر و رشته خالی, اگر `DEFAULT` عبارات تعریف نشده. + +اگر [\_مرحلهای دقیق = 1](../../operations/settings/settings.md) ستون هایی که ندارند `DEFAULT` تعریف شده باید در پرس و جو ذکر شده است. + +داده ها را می توان به درج در هر گذشت [قالب](../../interfaces/formats.md#formats) پشتیبانی شده توسط فاحشه خانه. قالب باید به صراحت در پرس و جو مشخص شود: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set +``` + +For example, the following query format is identical to the basic version of INSERT … VALUES: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... +``` + +تاتر حذف تمام فضاها و خوراک یک خط (در صورتی که وجود دارد) قبل از داده ها. هنگام تشکیل یک پرس و جو, توصیه می کنیم قرار دادن داده ها بر روی یک خط جدید پس از اپراتورهای پرس و جو (این مهم است که اگر داده ها با فضاهای شروع می شود). + +مثال: + +``` sql +INSERT INTO t FORMAT TabSeparated +11 Hello, world! +22 Qwerty +``` + +شما می توانید داده ها را به طور جداگانه از پرس و جو با استفاده از مشتری خط فرمان یا رابط اچ تی پی وارد کنید. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “[واسط](../../interfaces/index.md#interfaces)”. + +### قیدها {#constraints} + +اگر جدول [قیدها](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. + +### قرار دادن نتایج `SELECT` {#insert_query_insert-select} + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... +``` + +ستون ها با توجه به موقعیت خود را در بند را انتخاب کنید نقشه برداری. با این حال, نام خود را در عبارت را انتخاب کنید و جدول برای درج ممکن است متفاوت باشد. در صورت لزوم نوع ریخته گری انجام می شود. + +هیچ یک از فرمت های داده به جز مقادیر اجازه تنظیم مقادیر به عبارات مانند `now()`, `1 + 2` و به همین ترتیب. فرمت ارزش اجازه می دهد تا استفاده محدود از عبارات, اما این توصیه نمی شود, چرا که در این مورد کد کم است برای اجرای خود استفاده می شود. + +نمایش داده شد دیگر برای تغییر قطعات داده ها پشتیبانی نمی شوند: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. +با این حال, شما می توانید داده های قدیمی با استفاده از حذف `ALTER TABLE ... DROP PARTITION`. + +`FORMAT` بند باید در پایان پرس و جو مشخص شود اگر `SELECT` بند شامل تابع جدول [ورودی()](../table_functions/input.md). + +### ملاحظات عملکرد {#performance-considerations} + +`INSERT` داده های ورودی را با کلید اصلی مرتب می کند و توسط یک کلید پارتیشن به پارتیشن تقسیم می شود. اگر داده ها را به چندین پارتیشن در یک بار وارد کنید می تواند به طور قابل توجهی عملکرد را کاهش دهد `INSERT` پرس و جو. برای جلوگیری از این: + +- اضافه کردن داده ها در دسته نسبتا بزرگ, مانند 100,000 ردیف در یک زمان. +- داده های گروه توسط یک کلید پارتیشن قبل از بارگذاری به کلیک. + +عملکرد کاهش نخواهد یافت اگر: + +- داده ها در زمان واقعی اضافه شده است. +- شما داده ها است که معمولا بر اساس زمان طبقه بندی شده اند را بارگذاری کنید. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/fa/sql_reference/statements/misc.md b/docs/fa/sql_reference/statements/misc.md new file mode 100644 index 00000000000..113c3e7173d --- /dev/null +++ b/docs/fa/sql_reference/statements/misc.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u063A\u06CC\u0631\u0647" +--- + +# متفرقه نمایش داده شد {#miscellaneous-queries} + +## ATTACH {#attach} + +این پرس و جو دقیقا همان است `CREATE` اما + +- به جای کلمه `CREATE` با استفاده از این کلمه `ATTACH`. +- پرس و جو می کند داده ها بر روی دیسک ایجاد کنید, اما فرض می شود که داده ها در حال حاضر در مکان های مناسب, و فقط می افزاید: اطلاعات در مورد جدول به سرور. + پس از اجرای یک ضمیمه پرس و جو در سرور خواهد شد در مورد وجود جدول. + +اگر جدول قبلا جدا شد (`DETACH`), به این معنی که ساختار خود شناخته شده است, شما می توانید مختصر بدون تعریف ساختار استفاده. + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +این پرس و جو در هنگام شروع سرور استفاده می شود. سرور ذخیره ابرداده جدول به عنوان فایل های با `ATTACH` نمایش داده شد, که به سادگی در راه اندازی اجرا می شود (به غیر از جداول سیستم, که به صراحت بر روی سرور ایجاد). + +## CHECK TABLE {#check-table} + +چک اگر داده ها در جدول خراب شده است. + +``` sql +CHECK TABLE [db.]name +``` + +این `CHECK TABLE` پرس و جو اندازه فایل واقعی با مقادیر مورد انتظار که بر روی سرور ذخیره می شود مقایسه می کند. اگر اندازه فایل انجام مقادیر ذخیره شده مطابقت ندارد, به این معنی که داده ها خراب شده است. این می تواند باعث, مثلا, توسط یک تصادف سیستم در طول اجرای پرس و جو. + +پاسخ پرس و جو شامل `result` ستون با یک ردیف. ردیف دارای ارزش +[بولی](../../sql_reference/data_types/boolean.md) نوع: + +- 0-داده ها در جدول خراب شده است. +- 1-داده حفظ یکپارچگی. + +این `CHECK TABLE` پرس و جو از موتورهای جدول زیر پشتیبانی می کند: + +- [ثبت](../../engines/table_engines/log_family/log.md) +- [جمع شدن](../../engines/table_engines/log_family/tinylog.md) +- [خط زدن](../../engines/table_engines/log_family/stripelog.md) +- [ادغام خانواده](../../engines/table_engines/mergetree_family/mergetree.md) + +انجام بیش از جداول با موتورهای جدول دیگر باعث یک استثنا. + +موتورهای از `*Log` خانواده بازیابی اطلاعات خودکار در شکست را فراهم نمی کند. استفاده از `CHECK TABLE` پرس و جو برای پیگیری از دست دادن داده ها به موقع. + +برای `MergeTree` موتورهای خانواده `CHECK TABLE` پرس و جو نشان می دهد وضعیت چک برای هر بخش داده های فردی از یک جدول بر روی سرور محلی. + +**اگر داده ها خراب شده است** + +اگر جدول خراب شده است, شما می توانید داده های غیر خراب به جدول دیگر کپی کنید. برای انجام این کار: + +1. ایجاد یک جدول جدید با ساختار همان جدول صدمه دیده است. برای انجام این کار پرس و جو را اجرا کنید `CREATE TABLE AS `. +2. تنظیم [\_مخفی کردن](../../operations/settings/settings.md#settings-max_threads) ارزش به 1 برای پردازش پرس و جو بعدی در یک موضوع واحد. برای انجام این کار پرس و جو را اجرا کنید `SET max_threads = 1`. +3. اجرای پرسوجو `INSERT INTO SELECT * FROM `. این درخواست داده های غیر خراب شده را از جدول خراب شده به جدول دیگر کپی می کند. فقط داده ها قبل از قسمت خراب کپی خواهد شد. +4. راه اندازی مجدد `clickhouse-client` برای تنظیم مجدد `max_threads` ارزش. + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +بازگرداندن موارد زیر `String` نوع ستونها: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [عبارت پیشفرض](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` یا `ALIAS`). ستون شامل یک رشته خالی, اگر عبارت پیش فرض مشخص نشده است. +- `default_expression` — Value specified in the `DEFAULT` بند بند. +- `comment_expression` — Comment text. + +ساختارهای داده تو در تو خروجی در “expanded” قالب. هر ستون به طور جداگانه نشان داده شده است, با نام بعد از یک نقطه. + +## DETACH {#detach} + +حذف اطلاعات در مورد ‘name’ جدول از سرور. سرور متوقف می شود دانستن در مورد وجود جدول. + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +این داده ها و یا ابرداده جدول را حذف کنید. در راه اندازی سرور بعدی, سرور خواهد ابرداده به عنوان خوانده شده و پیدا کردن در مورد جدول دوباره. +به طور مشابه “detached” جدول را می توان دوباره متصل با استفاده از `ATTACH` پرس و جو (به غیر از جداول سیستم که لازم نیست metadata ذخیره شده برای آنها). + +وجود ندارد `DETACH DATABASE` پرس و جو. + +## DROP {#drop} + +این پرسوجو دارای دو نوع است: `DROP DATABASE` و `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +حذف تمام جداول در داخل ‘db’ پایگاه داده, سپس حذف ‘db’ پایگاه داده خود را. +اگر `IF EXISTS` مشخص شده است, این خطا بازگشت نیست اگر پایگاه داده وجود ندارد. + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +حذف جدول. +اگر `IF EXISTS` مشخص شده است, این خطا را نمی گرداند اگر جدول وجود ندارد و یا پایگاه داده وجود ندارد. + + DROP DICTIONARY [IF EXISTS] [db.]name + +دلس فرهنگ لغت. +اگر `IF EXISTS` مشخص شده است, این خطا را نمی گرداند اگر جدول وجود ندارد و یا پایگاه داده وجود ندارد. + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +بازگرداندن یک `UInt8`- نوع ستون, که شامل ارزش واحد `0` اگر جدول یا پایگاه داده وجود ندارد, یا `1` اگر جدول در پایگاه داده مشخص شده وجود دارد. + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +تلاش برای به زور خاتمه نمایش داده شد در حال حاضر در حال اجرا. +نمایش داده شد به فسخ از سیستم انتخاب شده است.جدول پردازش ها با استفاده از معیارهای تعریف شده در `WHERE` بند از `KILL` پرس و جو. + +مثالها: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +فقط خواندنی کاربران تنها می تواند نمایش داده شد خود را متوقف کند. + +به طور پیش فرض, نسخه ناهمزمان از نمایش داده شد استفاده شده است (`ASYNC`), که برای تایید است که نمایش داده شد را متوقف کرده اند منتظر نیست. + +نسخه همزمان (`SYNC`) منتظر تمام نمایش داده شد برای متوقف کردن و نمایش اطلاعات در مورد هر فرایند به عنوان متوقف می شود. +پاسخ شامل `kill_status` ستون, که می تواند مقادیر زیر را: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. + +پرسوجوی تست (`TEST`) فقط چک حقوق کاربر و نمایش یک لیست از نمایش داده شد برای متوقف کردن. + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +تلاش برای لغو و حذف [جهشها](alter.md#alter-mutations) که در حال حاضر اجرای. جهش به لغو از انتخاب [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) جدول با استفاده از فیلتر مشخص شده توسط `WHERE` بند از `KILL` پرس و جو. + +آزمون پرس و جو (`TEST`) فقط چک حقوق کاربر و نمایش یک لیست از نمایش داده شد برای متوقف کردن. + +مثالها: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +تغییرات در حال حاضر توسط جهش ساخته شده به عقب نورد نیست. + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +این پرس و جو تلاش می کند تا ادغام برنامه ریزی نشده از قطعات داده برای جداول با یک موتور جدول از [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) خانواده + +این `OPTMIZE` پرس و جو نیز برای پشتیبانی [ماده بینی](../../engines/table_engines/special/materializedview.md) و [بافر](../../engines/table_engines/special/buffer.md) موتورها. دیگر موتورهای جدول پشتیبانی نمی شوند. + +زمانی که `OPTIMIZE` با استفاده از [تکرار غذای اصلی](../../engines/table_engines/mergetree_family/replication.md) خانواده از موتورهای جدول, تاتر ایجاد یک کار برای ادغام و منتظر اعدام در تمام گره (در صورتی که `replication_alter_partitions_sync` تنظیم فعال است). + +- اگر `OPTIMIZE` یک ادغام به هر دلیلی انجام نمی, این کار مشتری اطلاع نیست. برای فعال کردن اعلان ها از [ا\_فزون\_ف\_کوپ](../../operations/settings/settings.md#setting-optimize_throw_if_noop) تنظیمات. +- اگر شما یک مشخص `PARTITION` فقط پارتیشن مشخص شده بهینه شده است. [نحوه تنظیم بیان پارتیشن](alter.md#alter-how-to-specify-part-expr). +- اگر شما مشخص کنید `FINAL` حتی زمانی که تمام داده ها در حال حاضر در یک بخش بهینه سازی انجام شده است. +- اگر شما مشخص کنید `DEDUPLICATE` و سپس به طور کامل یکسان ردیف خواهد بود deduplicated (تمام ستون ها در مقایسه با) آن را حس می کند تنها برای MergeTree موتور. + +!!! warning "اخطار" + `OPTIMIZE` می توانید رفع نیست “Too many parts” خطا. + +## RENAME {#misc_operations-rename} + +تغییر نام یک یا چند جدول. + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +همه جداول تحت قفل جهانی تغییر نام داد. تغییر نام جداول یک عملیات نور است. اگر شما یک پایگاه داده دیگر نشان داد پس از به, جدول خواهد شد به این پایگاه داده منتقل. اما, دایرکتوری ها با پایگاه داده باید اقامت در همان فایل سیستم (در غیر این صورت یک خطا بازگشته است). + +## SET {#query-set} + +``` sql +SET param = value +``` + +انتساب `value` به `param` [تنظیم](../../operations/settings/index.md) برای جلسه فعلی. شما نمی توانید تغییر دهید [تنظیمات سرور](../../operations/server_configuration_parameters/index.md) از این طرف + +شما همچنین می توانید تمام مقادیر را از مشخصات تنظیمات مشخص شده در یک پرس و جو واحد تنظیم کنید. + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +برای کسب اطلاعات بیشتر, دیدن [تنظیمات](../../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +حذف تمام داده ها را از یک جدول. هنگامی که بند `IF EXISTS` حذف شده است, پرس و جو یک خطا می گرداند اگر جدول وجود ندارد. + +این `TRUNCATE` پرسوجو برای پشتیبانی نمیشود [مشاهده](../../engines/table_engines/special/view.md), [پرونده](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) و [خالی](../../engines/table_engines/special/null.md) موتورهای جدول. + +## USE {#use} + +``` sql +USE db +``` + +به شما اجازه می دهد پایگاه داده فعلی را برای جلسه تنظیم کنید. +پایگاه داده فعلی برای جستجو برای جداول استفاده می شود اگر پایگاه داده به صراحت در پرس و جو با یک نقطه قبل از نام جدول تعریف نشده است. +این پرس و جو را نمی توان در هنگام استفاده از پروتکل قام ساخته شده, از هیچ مفهوم یک جلسه وجود دارد. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/fa/sql_reference/statements/select.md b/docs/fa/sql_reference/statements/select.md new file mode 100644 index 00000000000..a1f0a833e3f --- /dev/null +++ b/docs/fa/sql_reference/statements/select.md @@ -0,0 +1,610 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: SELECT +--- + +# نحو نمایش داده شد را انتخاب کنید {#select-queries-syntax} + +`SELECT` بازیابی داده ها را انجام می دهد. + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] +[UNION ALL ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +همه بند اختیاری هستند, به جز برای لیست مورد نیاز از عبارات بلافاصله پس از انتخاب. +بند های زیر تقریبا به همان ترتیب در نوار نقاله اجرای پرس و جو توصیف می شوند. + +اگر پرس و جو حذف `DISTINCT`, `GROUP BY` و `ORDER BY` بند و `IN` و `JOIN` subqueries این پرس و جو خواهد شد به طور کامل جریان پردازش با استفاده از O(1) میزان رم. +در غیر این صورت, پرس و جو ممکن است مقدار زیادی از رم مصرف اگر محدودیت های مناسب مشخص نشده است: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “Settings”. ممکن است که به استفاده از مرتب سازی خارجی (صرفه جویی در جداول موقت به یک دیسک) و تجمع خارجی. `The system does not have "merge join"`. + +### با بند {#with-clause} + +در این بخش پشتیبانی از عبارات جدول مشترک فراهم می کند ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), با برخی از محدودیت: +1. نمایش داده شد بازگشتی پشتیبانی نمی شوند +2. هنگامی که زیرخاکری در داخل با بخش استفاده می شود, این نتیجه باید اسکالر با دقیقا یک ردیف باشد +3. بیان نتایج در دسترس نیست در subqueries +نتایج با عبارات بند را می توان در داخل بند را انتخاب کنید استفاده می شود. + +مثال 1: با استفاده از عبارت ثابت به عنوان “variable” + +``` sql +WITH '2019-08-01 15:23:00' as ts_upper_bound +SELECT * +FROM hits +WHERE + EventDate = toDate(ts_upper_bound) AND + EventTime <= ts_upper_bound +``` + +مثال 2: جمع تخلیه(بایت) نتیجه بیان از بند لیست ستون را انتخاب کنید + +``` sql +WITH sum(bytes) as s +SELECT + formatReadableSize(s), + table +FROM system.parts +GROUP BY table +ORDER BY s +``` + +مثال 3: استفاده از نتایج عددی پرس + +``` sql +/* this example would return TOP 10 of most huge tables */ +WITH + ( + SELECT sum(bytes) + FROM system.parts + WHERE active + ) AS total_disk_usage +SELECT + (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, + table +FROM system.parts +GROUP BY table +ORDER BY table_disk_usage DESC +LIMIT 10 +``` + +مثال 4: استفاده مجدد از بیان در زیرخاکری +به عنوان یک راهحل برای محدودیت کنونی برای بیان استفاده در subqueries شما ممکن است تکراری است. + +``` sql +WITH ['hello'] AS hello +SELECT + hello, + * +FROM +( + WITH ['hello'] AS hello + SELECT hello +) +``` + +``` text +┌─hello─────┬─hello─────┐ +│ ['hello'] │ ['hello'] │ +└───────────┴───────────┘ +``` + +### از بند {#select-from} + +اگر از بند حذف شده است, داده خواهد شد از خواندن `system.one` جدول +این `system.one` جدول شامل دقیقا یک ردیف است (این جدول همان هدف را به عنوان جدول دوگانه موجود در سایر دساماسها انجام می دهد). + +این `FROM` بند منبع برای خواندن داده ها از مشخص: + +- جدول +- خرده فروشی +- [تابع جدول](../table_functions/index.md) + +`ARRAY JOIN` و به طور منظم `JOIN` همچنین ممکن است شامل شود (پایین را ببینید). + +به جای یک جدول `SELECT` خرده فروشی ممکن است در پرانتز مشخص. +در مقابل به گذاشتن استاندارد, مترادف نیازی به پس از یک خرده فروشی مشخص شود. + +برای اجرای پرس و جو تمام ستون های ذکر شده در پرس و جو از جدول مناسب استخراج می شوند. هر ستون برای پرس و جو خارجی مورد نیاز نیست از کارخانه های فرعی پرتاب می شود. +اگر پرس و جو هیچ ستون لیست نیست (به عنوان مثال, `SELECT count() FROM t`), برخی از ستون از جدول استخراج به هر حال (کوچکترین ترجیح داده می شود), به منظور محاسبه تعداد ردیف. + +#### تغییردهنده نهایی {#select-from-final} + +قابل استفاده در هنگام انتخاب داده ها از جداول از [ادغام](../../engines/table_engines/mergetree_family/mergetree.md)- خانواده موتور غیر از `GraphiteMergeTree`. چه زمانی `FINAL` مشخص شده است, تاتر به طور کامل ادغام داده ها قبل از بازگشت به نتیجه و در نتیجه انجام تمام تحولات داده که در طول ادغام برای موتور جدول داده شده اتفاق می افتد. + +همچنین برای پشتیبانی: +- [تکرار](../../engines/table_engines/mergetree_family/replication.md) نسخه های `MergeTree` موتورها. +- [نما](../../engines/table_engines/special/view.md), [بافر](../../engines/table_engines/special/buffer.md), [توزیع شده](../../engines/table_engines/special/distributed.md) و [ماده بینی](../../engines/table_engines/special/materializedview.md) موتورها که بیش از موتورهای دیگر کار می کنند به شرطی که بیش از ایجاد شده اند `MergeTree`- جدول موتور . + +نمایش داده شد که با استفاده از `FINAL` اعدام به همان سرعتی که نمایش داده شد مشابه که نمی, زیرا: + +- پرس و جو در یک موضوع اجرا و داده ها در طول اجرای پرس و جو با هم ادغام شدند. +- نمایش داده شد با `FINAL` خوانده شده ستون کلید اولیه در علاوه بر این به ستون مشخص شده در پرس و جو. + +در بیشتر موارد, اجتناب از استفاده از `FINAL`. + +### بند نمونه {#select-sample-clause} + +این `SAMPLE` بند اجازه می دهد تا برای پردازش پرس و جو تقریبی. + +هنگامی که نمونه گیری داده ها فعال است, پرس و جو بر روی تمام داده ها انجام نمی, اما تنها در بخش خاصی از داده ها (نمونه). مثلا, اگر شما نیاز به محاسبه ارقام برای تمام بازدیدکننده داشته است, کافی است برای اجرای پرس و جو در 1/10 کسری از تمام بازدیدکننده داشته است و سپس ضرب در نتیجه 10. + +پردازش پرس و جو تقریبی می تواند در موارد زیر مفید باشد: + +- هنگامی که شما شرایط زمان بندی دقیق (مانند \<100 مگابایت) اما شما نمی توانید هزینه منابع سخت افزاری اضافی را برای دیدار با خود توجیه کنید. +- هنگامی که داده های خام خود را دقیق نیست, بنابراین تقریب می کند به طرز محسوسی کاهش کیفیت. +- نیازهای کسب و کار هدف تقریبی نتایج (برای مقرون به صرفه بودن و یا به منظور به بازار دقیق نتایج به کاربران حق بیمه). + +!!! note "یادداشت" + شما فقط می توانید نمونه برداری با استفاده از جداول در [ادغام](../../engines/table_engines/mergetree_family/mergetree.md) خانواده, و تنها در صورتی که بیان نمونه برداری در ایجاد جدول مشخص شد (دیدن [موتور ادغام](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). + +ویژگی های نمونه گیری داده ها به شرح زیر است: + +- نمونهگیری دادهها یک مکانیسم قطعی است. نتیجه همان `SELECT .. SAMPLE` پرس و جو همیشه یکسان است. +- نمونه گیری به طور مداوم برای جداول مختلف کار می کند. برای جداول با یک کلید نمونه برداری تک, یک نمونه با ضریب همان همیشه زیر مجموعه همان داده های ممکن را انتخاب. برای مثال یک نمونه از شناسه های کاربر طول می کشد ردیف با همان زیر مجموعه از همه ممکن است شناسه کاربر از جداول مختلف. این به این معنی است که شما می توانید نمونه در کارخانه های فرعی در استفاده از [IN](#select-in-operators) بند بند. همچنین شما می توانید نمونه ها را با استفاده از [JOIN](#select-join) بند بند. +- نمونه گیری اجازه می دهد تا خواندن اطلاعات کمتر از یک دیسک. توجه داشته باشید که شما باید کلید نمونه برداری به درستی مشخص کنید. برای کسب اطلاعات بیشتر, دیدن [ایجاد یک جدول ادغام](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). + +برای `SAMPLE` بند نحو زیر پشتیبانی می شود: + +| SAMPLE Clause Syntax | توصیف | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | اینجا `k` است تعداد از 0 به 1.
    پرس و جو در اجرا `k` کسری از داده ها. به عنوان مثال, `SAMPLE 0.1` پرس و جو را در 10 درصد از داده ها اجرا می کند. [ادامه مطلب](#select-sample-k) | +| `SAMPLE n` | اینجا `n` عدد صحیح به اندازه کافی بزرگ است.
    پرس و جو بر روی یک نمونه از حداقل اعدام `n` ردیف (اما نه به طور قابل توجهی بیشتر از این). به عنوان مثال, `SAMPLE 10000000` پرس و جو را در حداقل ردیف های 10000000 اجرا می کند. [ادامه مطلب](#select-sample-n) | +| `SAMPLE k OFFSET m` | اینجا `k` و `m` اعداد از 0 به 1.
    پرس و جو بر روی یک نمونه از اعدام `k` کسری از داده ها. داده های مورد استفاده برای نمونه توسط جبران `m` کسر کردن. [ادامه مطلب](#select-sample-offset) | + +#### SAMPLE K {#select-sample-k} + +اینجا `k` است تعداد از 0 به 1 (هر دو نمادهای کسری و اعشاری پشتیبانی می شوند). به عنوان مثال, `SAMPLE 1/2` یا `SAMPLE 0.5`. + +در یک `SAMPLE k` بند, نمونه از گرفته `k` کسری از داده ها. مثال زیر نشان داده شده است: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +در این مثال پرس و جو اجرا شده است در یک نمونه از 0.1 (10%) از داده ها. ارزش توابع دانه ها به طور خودکار اصلاح نمی, بنابراین برای دریافت یک نتیجه تقریبی, ارزش `count()` به صورت دستی توسط ضرب 10. + +#### SAMPLE N {#select-sample-n} + +اینجا `n` عدد صحیح به اندازه کافی بزرگ است. به عنوان مثال, `SAMPLE 10000000`. + +در این مورد, پرس و جو بر روی یک نمونه از حداقل اعدام `n` ردیف (اما نه به طور قابل توجهی بیشتر از این). به عنوان مثال, `SAMPLE 10000000` پرس و جو را در حداقل ردیف های 10000000 اجرا می کند. + +از حداقل واحد برای خواندن داده ها یک گرانول است (اندازه خود را توسط مجموعه `index_granularity` تنظیم), این را حس می کند به مجموعه ای از یک نمونه است که بسیار بزرگتر از اندازه گرانول. + +هنگام استفاده از `SAMPLE n` بند, شما نمی دانید که درصد نسبی داده پردازش شد. بنابراین شما نمی دانید ضریب توابع کل باید توسط ضرب. استفاده از `_sample_factor` ستون مجازی برای دریافت نتیجه تقریبی. + +این `_sample_factor` ستون شامل ضرایب نسبی است که به صورت پویا محاسبه می شود. این ستون به طور خودکار ایجاد زمانی که شما [ایجاد](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) یک جدول با کلید نمونه گیری مشخص. نمونه های استفاده از `_sample_factor` ستون در زیر نشان داده شده. + +بیایید جدول را در نظر بگیریم `visits`, که شامل ارقام در مورد بازدیدکننده داشته است سایت. مثال اول نشان می دهد که چگونه برای محاسبه تعداد بازدید از صفحه: + +``` sql +SELECT sum(PageViews * _sample_factor) +FROM visits +SAMPLE 10000000 +``` + +مثال بعدی نشان می دهد که چگونه برای محاسبه تعداد کل بازدیدکننده داشته است: + +``` sql +SELECT sum(_sample_factor) +FROM visits +SAMPLE 10000000 +``` + +مثال زیر نشان می دهد که چگونه برای محاسبه مدت زمان جلسه به طور متوسط. توجه داشته باشید که شما لازم نیست به استفاده از ضریب نسبی برای محاسبه مقادیر متوسط. + +``` sql +SELECT avg(Duration) +FROM visits +SAMPLE 10000000 +``` + +#### SAMPLE K OFFSET M {#select-sample-offset} + +اینجا `k` و `m` اعداد از 0 به 1. نمونه های زیر نشان داده شده. + +**مثال 1** + +``` sql +SAMPLE 1/10 +``` + +در این مثال نمونه 1 / 10 از تمام داده ها است: + +`[++------------]` + +**مثال 2** + +``` sql +SAMPLE 1/10 OFFSET 1/2 +``` + +در اینجا یک نمونه از 10 درصد گرفته شده از نیمه دوم از داده ها. + +`[------++------]` + +### مجموعه پیوستن بند {#select-array-join-clause} + +اجازه می دهد تا اجرای `JOIN` با یک آرایه یا تو در تو ساختار داده ها. قصد این است که شبیه به [ارریجین](../../sql_reference/functions/array_join.md#functions_arrayjoin) تابع, اما قابلیت های خود را گسترده تر است. + +``` sql +SELECT +FROM +[LEFT] ARRAY JOIN +[WHERE|PREWHERE ] +... +``` + +شما می توانید تنها یک مشخص `ARRAY JOIN` بند در یک پرس و جو. + +سفارش اجرای پرس و جو در هنگام اجرا بهینه شده است `ARRAY JOIN`. اگرچه `ARRAY JOIN` همیشه باید قبل از مشخص شود `WHERE/PREWHERE` بند, این می تواند انجام شود یا قبل از `WHERE/PREWHERE` (اگر نتیجه در این بند مورد نیاز است), و یا پس از اتمام (برای کاهش حجم محاسبات). سفارش پردازش توسط بهینه ساز پرس و جو کنترل می شود. + +انواع پشتیبانی شده از `ARRAY JOIN` به شرح زیر است: + +- `ARRAY JOIN` - در این مورد, بند خالی در نتیجه شامل نمی شود `JOIN`. +- `LEFT ARRAY JOIN` - نتیجه `JOIN` شامل ردیف با ارریس خالی است. مقدار برای یک مجموعه خالی است به مقدار پیش فرض برای نوع عنصر مجموعه (معمولا 0, رشته خالی و یا تهی). + +نمونه های زیر نشان می دهد استفاده از `ARRAY JOIN` و `LEFT ARRAY JOIN` بند. بیایید یک جدول با یک [& حذف](../../sql_reference/data_types/array.md) ستون را تایپ کنید و مقادیر را وارد کنید: + +``` sql +CREATE TABLE arrays_test +( + s String, + arr Array(UInt8) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +``` + +``` text +┌─s───────────┬─arr─────┐ +│ Hello │ [1,2] │ +│ World │ [3,4,5] │ +│ Goodbye │ [] │ +└─────────────┴─────────┘ +``` + +مثال زیر از `ARRAY JOIN` بند: + +``` sql +SELECT s, arr +FROM arrays_test +ARRAY JOIN arr; +``` + +``` text +┌─s─────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +└───────┴─────┘ +``` + +مثال بعدی با استفاده از `LEFT ARRAY JOIN` بند: + +``` sql +SELECT s, arr +FROM arrays_test +LEFT ARRAY JOIN arr; +``` + +``` text +┌─s───────────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +│ Goodbye │ 0 │ +└─────────────┴─────┘ +``` + +#### استفاده از نام مستعار {#using-aliases} + +یک نام مستعار می تواند برای مجموعه ای در `ARRAY JOIN` بند بند. در این مورد, یک مورد مجموعه ای را می توان با این نام مستعار دیده, اما مجموعه خود را با نام اصلی قابل دسترسی. مثال: + +``` sql +SELECT s, arr, a +FROM arrays_test +ARRAY JOIN arr AS a; +``` + +``` text +┌─s─────┬─arr─────┬─a─┐ +│ Hello │ [1,2] │ 1 │ +│ Hello │ [1,2] │ 2 │ +│ World │ [3,4,5] │ 3 │ +│ World │ [3,4,5] │ 4 │ +│ World │ [3,4,5] │ 5 │ +└───────┴─────────┴───┘ +``` + +با استفاده از نام مستعار, شما می توانید انجام `ARRAY JOIN` با یک مجموعه خارجی. به عنوان مثال: + +``` sql +SELECT s, arr_external +FROM arrays_test +ARRAY JOIN [1, 2, 3] AS arr_external; +``` + +``` text +┌─s───────────┬─arr_external─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ Hello │ 3 │ +│ World │ 1 │ +│ World │ 2 │ +│ World │ 3 │ +│ Goodbye │ 1 │ +│ Goodbye │ 2 │ +│ Goodbye │ 3 │ +└─────────────┴──────────────┘ +``` + +ارریس های متعدد را می توان با کاما از هم جدا در `ARRAY JOIN` بند بند. در این مورد, `JOIN` به طور همزمان (مجموع مستقیم و نه محصول دکارتی) انجام می شود. توجه داشته باشید که تمام ارریس باید به همان اندازه. مثال: + +``` sql +SELECT s, arr, a, num, mapped +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ +│ Hello │ [1,2] │ 1 │ 1 │ 2 │ +│ Hello │ [1,2] │ 2 │ 2 │ 3 │ +│ World │ [3,4,5] │ 3 │ 1 │ 4 │ +│ World │ [3,4,5] │ 4 │ 2 │ 5 │ +│ World │ [3,4,5] │ 5 │ 3 │ 6 │ +└───────┴─────────┴───┴─────┴────────┘ +``` + +مثال زیر از [شناسه بسته:](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) تابع: + +``` sql +SELECT s, arr, a, num, arrayEnumerate(arr) +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ +│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ +│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ +│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ +│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ +│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ +└───────┴─────────┴───┴─────┴─────────────────────┘ +``` + +#### مجموعه پیوستن با ساختار داده های تو در تو {#array-join-with-nested-data-structure} + +`ARRAY`پیوستن " همچنین با این نسخهها کار [ساختارهای داده تو در تو](../../sql_reference/data_types/nested_data_structures/nested.md). مثال: + +``` sql +CREATE TABLE nested_test +( + s String, + nest Nested( + x UInt8, + y UInt32) +) ENGINE = Memory; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +``` + +``` text +┌─s───────┬─nest.x──┬─nest.y─────┐ +│ Hello │ [1,2] │ [10,20] │ +│ World │ [3,4,5] │ [30,40,50] │ +│ Goodbye │ [] │ [] │ +└─────────┴─────────┴────────────┘ +``` + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +هنگام مشخص کردن نام ساختارهای داده های تو در تو در `ARRAY JOIN`, معنای همان است که `ARRAY JOIN` با تمام عناصر مجموعه ای که شامل. نمونه به شرح زیر است: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`, `nest.y`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +این تنوع نیز حس می کند: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─────┐ +│ Hello │ 1 │ [10,20] │ +│ Hello │ 2 │ [10,20] │ +│ World │ 3 │ [30,40,50] │ +│ World │ 4 │ [30,40,50] │ +│ World │ 5 │ [30,40,50] │ +└───────┴────────┴────────────┘ +``` + +نام مستعار ممکن است برای یک ساختار داده های تو در تو استفاده می شود, به منظور انتخاب هر دو `JOIN` نتیجه یا مجموعه منبع. مثال: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest AS n; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ +└───────┴─────┴─────┴─────────┴────────────┘ +``` + +نمونه ای از استفاده از [شناسه بسته:](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) تابع: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num +FROM nested_test +ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ +└───────┴─────┴─────┴─────────┴────────────┴─────┘ +``` + +### پیوستن بند {#select-join} + +می پیوندد داده ها در عادی [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) با عقل. + +!!! info "یادداشت" + نه مربوط به [ARRAY JOIN](#select-array-join-clause). + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN +(ON )|(USING ) ... +``` + +نام جدول را می توان به جای مشخص `` و ``. این معادل است `SELECT * FROM table` خرده فروشی, به جز در یک مورد خاص زمانی که جدول است [پیوستن](../../engines/table_engines/special/join.md) engine – an array prepared for joining. + +#### انواع پشتیبانی شده از `JOIN` {#select-join-types} + +- `INNER JOIN` (یا `JOIN`) +- `LEFT JOIN` (یا `LEFT OUTER JOIN`) +- `RIGHT JOIN` (یا `RIGHT OUTER JOIN`) +- `FULL JOIN` (یا `FULL OUTER JOIN`) +- `CROSS JOIN` (یا `,` ) + +مشاهده استاندارد [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) توصیف. + +#### چند پیوستن {#multiple-join} + +انجام نمایش داده شد, بازنویسی کلیک خانه چند جدول می پیوندد به دنباله ای از دو جدول می پیوندد. مثلا, اگر چهار جدول برای عضویت کلیک خانه می پیوندد اول و دوم وجود دارد, سپس نتیجه می پیوندد با جدول سوم, و در مرحله گذشته, می پیوندد یک چهارم. + +اگر پرس و جو شامل `WHERE` بند ClickHouse تلاش می کند به pushdown فیلتر از این بند از طریق متوسط پیوستن به. اگر می تواند فیلتر به هر ملحق متوسط اعمال می شود, تاتر اعمال فیلتر بعد از همه می پیوندد به پایان رسید. + +ما توصیه می کنیم `JOIN ON` یا `JOIN USING` نحو برای ایجاد نمایش داده شد. به عنوان مثال: + +``` sql +SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a +``` + +شما می توانید لیست کاما از هم جدا از جداول در استفاده از `FROM` بند بند. به عنوان مثال: + +``` sql +SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a +``` + +این سینتکس را مخلوط نکنید. + +کلیکهاوس مستقیما از دستورات ارتباطی با کاما پشتیبانی نمی کند بنابراین توصیه نمی کنیم از انها استفاده کنید. الگوریتم تلاش می کند به بازنویسی پرس و جو از نظر `CROSS JOIN` و `INNER JOIN` بند و سپس به پرس و جو پردازش. هنگامی که بازنویسی پرس و جو, تاتر تلاش می کند برای بهینه سازی عملکرد و مصرف حافظه. به طور پیش فرض, تاتر رفتار کاما به عنوان یک `INNER JOIN` بند و تبدیل `INNER JOIN` به `CROSS JOIN` هنگامی که الگوریتم نمی تواند تضمین کند که `INNER JOIN` بازگرداندن اطلاعات مورد نیاز. + +#### سختی {#select-join-strictness} + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [محصول دکارتی](https://en.wikipedia.org/wiki/Cartesian_product) از تطبیق ردیف. این استاندارد است `JOIN` رفتار در گذاشتن. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` و `ALL` کلمات کلیدی یکسان هستند. +- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است. + +**از این رو پیوستن به استفاده** + +`ASOF JOIN` مفید است زمانی که شما نیاز به پیوستن به سوابق که هیچ بازی دقیق. + +جداول برای `ASOF JOIN` باید یک ستون توالی دستور داده اند. این ستون نمی تواند به تنهایی در یک جدول باشد و باید یکی از انواع داده ها باشد: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date` و `DateTime`. + +نحو `ASOF JOIN ... ON`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF LEFT JOIN table_2 +ON equi_cond AND closest_match_cond +``` + +شما می توانید هر تعداد از شرایط برابری و دقیقا یکی از نزدیک ترین شرایط بازی استفاده کنید. به عنوان مثال, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. + +شرایط پشتیبانی شده برای نزدیک ترین بازی: `>`, `>=`, `<`, `<=`. + +نحو `ASOF JOIN ... USING`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF JOIN table_2 +USING (equi_column1, ... equi_columnN, asof_column) +``` + +`ASOF JOIN` استفاده `equi_columnX` برای پیوستن به برابری و `asof_column` برای پیوستن به نزدیک ترین مسابقه با `table_1.asof_column >= table_2.asof_column` شرط. این `asof_column` ستون همیشه یکی از گذشته در `USING` بند بند. + +مثلا, جداول زیر را در نظر بگیرید: + +"'متن +table\_1 table\_2 + +رویداد \| عصر / رویداد فراسوی / عصر / شناسه diff --git a/docs/fa/sql_reference/statements/show.md b/docs/fa/sql_reference/statements/show.md new file mode 100644 index 00000000000..19b6fa5b622 --- /dev/null +++ b/docs/fa/sql_reference/statements/show.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: SHOW +--- + +# نمایش & پرسوجو {#show-queries} + +## SHOW CREATE TABLE {#show-create-table} + +``` sql +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +بازگرداندن یک `String`- نوع ‘statement’ column, which contains a single value – the `CREATE` پرس و جو مورد استفاده برای ایجاد شی مشخص شده است. + +## SHOW DATABASES {#show-databases} + +``` sql +SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] +``` + +چاپ یک لیست از تمام پایگاه های داده. +این پرس و جو یکسان است `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. + +## SHOW PROCESSLIST {#show-processlist} + +``` sql +SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] +``` + +خروجی محتوای [سیستم.فرایندها](../../operations/system_tables.md#system_tables-processes) جدول, که شامل یک لیست از نمایش داده شد که در حال حاضر پردازش, به استثنای `SHOW PROCESSLIST` نمایش داده شد. + +این `SELECT * FROM system.processes` پرس و جو داده ها در مورد تمام نمایش داده شد در حال حاضر را برمی گرداند. + +نکته (اجرا در کنسول): + +``` bash +$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" +``` + +## SHOW TABLES {#show-tables} + +نمایش یک لیست از جداول. + +``` sql +SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +اگر `FROM` بند مشخص نشده است, پرس و جو لیستی از جداول گرداند از پایگاه داده فعلی. + +شما می توانید نتایج مشابه را دریافت کنید `SHOW TABLES` پرسوجو به روش زیر: + +``` sql +SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**مثال** + +پرس و جو زیر انتخاب دو ردیف اول از لیست جداول در `system` پایگاه داده, که نام حاوی `co`. + +``` sql +SHOW TABLES FROM system LIKE '%co%' LIMIT 2 +``` + +``` text +┌─name───────────────────────────┐ +│ aggregate_function_combinators │ +│ collations │ +└────────────────────────────────┘ +``` + +## SHOW DICTIONARIES {#show-dictionaries} + +نمایش یک لیست از [واژهنامهها خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +``` sql +SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +اگر `FROM` بند مشخص نشده است, پرس و جو لیستی از لغت نامه ها را برمی گرداند از پایگاه داده در حال حاضر. + +شما می توانید نتایج مشابه را دریافت کنید `SHOW DICTIONARIES` پرسوجو به روش زیر: + +``` sql +SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**مثال** + +پرس و جو زیر انتخاب دو ردیف اول از لیست جداول در `system` پایگاه داده, که نام حاوی `reg`. + +``` sql +SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 +``` + +``` text +┌─name─────────┐ +│ regions │ +│ region_names │ +└──────────────┘ +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/fa/sql_reference/statements/system.md b/docs/fa/sql_reference/statements/system.md new file mode 100644 index 00000000000..6dfedc03cc1 --- /dev/null +++ b/docs/fa/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: SYSTEM +--- + +# نمایش داده شد سیستم {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +بارگذاری مجدد تمام لغت نامه که با موفقیت قبل از لود شده است. +به طور پیش فرض, لغت نامه ها به صورت تنبلی لود (دیدن [\_بارگیری کامل](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), بنابراین به جای اینکه به طور خودکار در هنگام راه اندازی لود, در اولین دسترسی از طریق تابع دیکته مقداردهی اولیه و یا از جداول با موتور = فرهنگ لغت را انتخاب کنید. این `SYSTEM RELOAD DICTIONARIES` پرس و جو بارگذاری مجدد از جمله لغت نامه (لود شده). +همیشه باز می گردد `Ok.` صرف نظر از نتیجه به روز رسانی فرهنگ لغت. + +## بازخوانی لغتنامهها {#query_language-system-reload-dictionary} + +به طور کامل یک فرهنگ لغت را دوباره بارگذاری کنید `dictionary_name` بدون در نظر گرفتن دولت از فرهنگ لغت (لود / NOT\_LOADED / شکست خورده). +همیشه باز می گردد `Ok.` صرف نظر از نتیجه به روز رسانی فرهنگ لغت. +وضعیت فرهنگ لغت را می توان با پرس و جو بررسی کرد `system.dictionaries` جدول + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +کش دی ان اس داخلی بازنشانی را کلیک کنید. گاهی اوقات (برای clickhouse نسخه) لازم است برای استفاده از این دستور هنگامی که در حال تغییر زیرساخت ها (تغییر آدرس ip دیگر clickhouse سرور یا سرور استفاده شده توسط لغت نامه). + +برای راحت تر (اتوماتیک) مدیریت کش دیدن disable\_internal\_dns\_cache, dns\_cache\_update\_period پارامترهای. + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +بازنشانی کش علامت. مورد استفاده در توسعه تست های کلیک و عملکرد. + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +بارگذاری مجدد پیکربندی محل کلیک. استفاده می شود که پیکربندی در باغ وحش ذخیره می شود. + +## SHUTDOWN {#query_language-system-shutdown} + +به طور معمول خاموش کردن کلیک (مانند `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +سقط فرایند کلیک (مانند `kill -9 {$ pid_clickhouse-server}`) + +## مدیریت جداول توزیع شده {#query-language-system-distributed} + +کلیک خانه می تواند مدیریت کند [توزیع شده](../../engines/table_engines/special/distributed.md) میز هنگامی که یک کاربر درج داده ها را به این جداول, خانه رعیتی برای اولین بار ایجاد یک صف از داده ها است که باید به گره های خوشه ای ارسال, سپس ناهمگام می فرستد. شما می توانید پردازش صف با مدیریت [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) و [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) نمایش داده شد. شما همچنین می توانید همزمان داده های توزیع شده را با `insert_distributed_sync` تنظیمات. + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +غیرفعال توزیع داده های پس زمینه در هنگام قرار دادن داده ها به جداول توزیع شده است. + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +نیروهای خانه را کلیک کنید برای ارسال داده ها به گره های خوشه همزمان. اگر هر گره در دسترس نیست, تاتر می اندازد یک استثنا و متوقف می شود اجرای پرس و جو. شما می توانید پرس و جو را دوباره امتحان کنید تا زمانی که موفق, که اتفاق خواهد افتاد زمانی که تمام گره ها در حال بازگشت. + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +توزیع داده های پس زمینه را هنگام قرار دادن داده ها به جداول توزیع می کند. + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +فراهم می کند امکان متوقف ادغام پس زمینه برای جداول در خانواده ادغام: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "یادداشت" + `DETACH / ATTACH` جدول پس زمینه ادغام برای جدول شروع خواهد شد حتی در صورتی که ادغام برای تمام جداول ادغام قبل از متوقف شده است. + +### START MERGES {#query_language-system-start-merges} + +فراهم می کند امکان شروع پس زمینه ادغام برای جداول در خانواده ادغام: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/fa/sql_reference/syntax.md b/docs/fa/sql_reference/syntax.md new file mode 100644 index 00000000000..250b6c6aa5f --- /dev/null +++ b/docs/fa/sql_reference/syntax.md @@ -0,0 +1,187 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u0646\u062D\u0648" +--- + +# نحو {#syntax} + +دو نوع تجزیه کننده در سیستم وجود دارد: تجزیه کننده کامل مربع (تجزیه کننده نزول بازگشتی) و تجزیه کننده فرمت داده (تجزیه کننده جریان سریع). +در تمام موارد به جز `INSERT` پرس و جو, تنها تجزیه کننده کامل گذاشتن استفاده شده است. +این `INSERT` پرس و جو از هر دو تجزیه کننده استفاده می کند: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +این `INSERT INTO t VALUES` قطعه توسط تجزیه کننده کامل و داده ها تجزیه می شود `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` توسط تجزیه کننده جریان سریع تجزیه می شود. شما همچنین می توانید تجزیه کننده کامل برای داده ها با استفاده از [در حال خواندن:](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) تنظیمات. چه زمانی `input_format_values_interpret_expressions = 1` کلیک هاوس اول سعی می کند به تجزیه ارزش با تجزیه کننده جریان سریع. در صورت عدم موفقیت کلیک هاوس تلاش می کند تا از تجزیه کننده کامل برای داده ها استفاده کند و مانند یک مربع درمان شود [عبارت](#syntax-expressions). + +داده ها می توانند هر فرمت داشته باشند. هنگامی که یک پرس و جو را دریافت کرده است, سرور محاسبه بیش از [بیشینه\_کرکی\_سیز](../operations/settings/settings.md#settings-max_query_size) بایت از درخواست در رم (به طور پیش فرض, 1 مگابایت), و بقیه جریان تجزیه. +این به این معنی است که سیستم با مشکلات بزرگ ندارد `INSERT` نمایش داده شد, مانند خروجی زیر می کند. + +هنگام استفاده از `Values` قالب در یک `INSERT` پرس و جو, ممکن است به نظر می رسد که داده ها همان عبارات در تجزیه `SELECT` پرس و جو, اما این درست نیست. این `Values` فرمت بسیار محدود تر است. + +بعد ما تجزیه کننده کامل را پوشش خواهیم داد. برای کسب اطلاعات بیشتر در مورد تجزیه کننده فرمت, دیدن [فرشها](../interfaces/formats.md) بخش. + +## فاصلهها {#spaces} + +ممکن است هر تعداد از نمادهای فضایی بین سازه های نحوی (از جمله شروع و پایان پرس و جو) وجود دارد. علامت فضا شامل فضا, باریکه, خوراک خط, کروم, و خوراک فرم. + +## توضیحات {#comments} + +گذاشتن به سبک و ج سبک نظرات پشتیبانی می شوند. +گذاشتن به سبک نظرات: از `--` به انتهای خط فضای پس از `--` می توان حذف. +نظرات در ج سبک: از `/*` به `*/`. این نظرات می تواند چند خطی باشد. فضاهای اینجا مورد نیاز نیست, هم. + +## کلیدواژهها {#syntax-keywords} + +کلمات کلیدی حساس به حروف هستند که مربوط به: + +- استاندارد گذاشتن. به عنوان مثال, `SELECT`, `select` و `SeLeCt` همه معتبر هستند. +- پیاده سازی در برخی از پایگاه داده محبوب (خروجی زیر و یا پست). به عنوان مثال, `DateTime` همان است `datetime`. + +این که نام نوع داده حساس به حروف باشد را میتوان در `system.data_type_families` جدول + +در مقایسه با استاندارد گذاشتن تمام کلمات کلیدی دیگر (از جمله نام توابع) عبارتند از **حساس به حالت**. + +کلمات کلیدی محفوظ نیست (فقط به عنوان کلمات کلیدی در زمینه مربوطه تجزیه می شوند). در صورت استفاده [شناسهها](#syntax-identifiers) همان کلمات کلیدی را به نقل قول محصور. برای مثال پرس و جو `SELECT "FROM" FROM table_name` معتبر است اگر جدول `table_name` دارای ستون با نام `"FROM"`. + +## شناسهها {#syntax-identifiers} + +شناسه ها عبارتند از: + +- خوشه, پایگاه داده, جدول, پارتیشن و ستون نام. +- توابع. +- انواع داده ها. +- [نامگردانهای بیان](#syntax-expression_aliases). + +شناسه را می توان به نقل و یا غیر نقل. توصیه می شود از شناسه های غیر نقل قول استفاده کنید. + +شناسه های غیر نقل قول باید عبارت منظم مطابقت `^[a-zA-Z_][0-9a-zA-Z_]*$` و نمی تواند برابر باشد [کلیدواژهها](#syntax-keywords). مثالها: `x, _1, X_y__Z123_.` + +اگر شما می خواهید به استفاده از شناسه همان کلمات کلیدی و یا شما می خواهید به استفاده از نمادهای دیگر در شناسه, نقل قول با استفاده از دو نقل قول و یا پشت پرده, مثلا, `"id"`, `` `id` ``. + +## Literals {#literals} + +وجود دارد: عددی, رشته, ترکیب و `NULL` literals. + +### عددی {#numeric} + +تحت اللفظی عددی تلاش می کند به تجزیه شود: + +- برای اولین بار به عنوان یک شماره امضا 64 بیتی, با استفاده از [استرتول](https://en.cppreference.com/w/cpp/string/byte/strtoul) تابع. +- اگر ناموفق, به عنوان یک عدد بدون علامت 64 بیتی, با استفاده از [استرول](https://en.cppreference.com/w/cpp/string/byte/strtol) تابع. +- اگر ناموفق, به عنوان یک عدد شناور نقطه با استفاده از [رشته](https://en.cppreference.com/w/cpp/string/byte/strtof) تابع. +- در غیر این صورت, یک خطا بازگشته است. + +مقدار مربوطه کوچکترین نوع است که متناسب با ارزش داشته باشد. +مثلا, 1 به عنوان تجزیه `UInt8` اما 256 به عنوان تجزیه شده است `UInt16`. برای کسب اطلاعات بیشتر, دیدن [انواع داده ها](../sql_reference/data_types/index.md). + +مثالها: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### رشته {#syntax-string-literal} + +فقط رشته های رشته ای در نقل قول های تک پشتیبانی می شوند. شخصیت های محصور می تواند بک اسلش فرار. توالی فرار زیر یک مقدار خاص مربوطه: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. در تمام موارد دیگر, فرار توالی در قالب `\c` کجا `c` است هر شخصیت, به تبدیل `c`. این به این معنی است که شما می توانید توالی استفاده کنید `\'`و`\\`. ارزش خواهد شد که [رشته](../sql_reference/data_types/string.md) نوع. + +حداقل مجموعه ای از شخصیت های که شما نیاز به فرار در لیتر رشته: `'` و `\`. نقل قول تنها را می توان با نقل قول تنها فرار, لیتر `'It\'s'` و `'It''s'` برابر هستند. + +### ترکیب {#compound} + +سازه ها برای ارریس پشتیبانی می شوند: `[1, 2, 3]` و تاپل: `(1, 'Hello, world!', 2)`.. +در واقع این نیست literals اما عبارات با آرایه ایجاد اپراتور و چند تایی ایجاد اپراتور بود. +مجموعه ای باید شامل حداقل یک مورد باشد و یک تاپل باید حداقل دو مورد داشته باشد. +تاپل یک هدف خاص برای استفاده در `IN` بند یک `SELECT` پرس و جو. توپلس می تواند به عنوان نتیجه یک پرس و جو به دست, اما نمی توان به یک پایگاه داده ذخیره (به غیر از [حافظه](../engines/table_engines/special/memory.md) جدول). + +### NULL {#null-literal} + +نشان می دهد که ارزش از دست رفته است. + +به منظور ذخیره `NULL` در یک میدان جدول باید از [Nullable](../sql_reference/data_types/nullable.md) نوع. + +بسته به فرمت داده (ورودی یا خروجی), `NULL` ممکن است نمایندگی های مختلف. برای کسب اطلاعات بیشتر, اسناد و مدارک برای دیدن [قالبهای داده](../interfaces/formats.md#formats). + +بسیاری از تفاوت های ظریف برای پردازش وجود دارد `NULL`. مثلا, اگر حداقل یکی از استدلال از یک عملیات مقایسه است `NULL` نتیجه این عملیات نیز خواهد بود `NULL`. همان درست است برای ضرب است, بعلاوه, و عملیات دیگر. برای کسب اطلاعات بیشتر, خواندن اسناد و مدارک برای هر عملیات. + +در نمایش داده شد, شما می توانید بررسی کنید `NULL` با استفاده از [IS NULL](operators.md#operator-is-null) و [IS NOT NULL](operators.md) اپراتورها و توابع مرتبط `isNull` و `isNotNull`. + +## توابع {#functions} + +توابع مانند یک شناسه با یک لیست از استدلال نوشته شده (احتمالا خالی) در داخل پرانتز. در مقابل به گذاشتن استاندارد, براکت مورد نیاز است, حتی برای یک لیست استدلال خالی. مثال: `now()`. +توابع منظم و جمع وجود دارد (بخش را ببینید “Aggregate functions”). برخی از توابع دانه می تواند شامل دو لیست از استدلال در براکت. مثال: `quantile (0.9) (x)`. این توابع مجموع نامیده می شوند “parametric” توابع, و استدلال در لیست اول نامیده می شوند “parameters”. نحو توابع کل بدون پارامتر همان است که برای توابع به طور منظم است. + +## اپراتورها {#operators} + +اپراتورها در حال تبدیل به توابع مربوط به خود را طی پرس و جو و تجزیه گرفتن اولویت خود را و associativity به حساب آورد. +برای مثال بیان `1 + 2 * 3 + 4` تبدیل به `plus(plus(1, multiply(2, 3)), 4)`. + +## انواع داده ها و موتورهای جدول پایگاه داده {#data_types-and-database-table-engines} + +انواع داده ها و موتورهای جدول در `CREATE` پرس و جو به همان شیوه به عنوان شناسه و یا توابع نوشته شده است. به عبارت دیگر, ممکن است یا ممکن است حاوی یک لیست استدلال در براکت نیست. برای کسب اطلاعات بیشتر به بخش ها مراجعه کنید “Data types,” “Table engines,” و “CREATE”. + +## نامگردانهای بیان {#syntax-expression_aliases} + +نام مستعار یک نام تعریف شده توسط کاربر برای بیان در پرس و جو است. + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` بند بدون استفاده از `AS` کلمه کلیدی. + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. نام مستعار باید با پیروی از [شناسهها](#syntax-identifiers) نحو. + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### نکاتی در مورد استفاده {#notes-on-usage} + +نام مستعار جهانی برای یک پرس و جو و یا زیرخاکی هستند و شما می توانید یک نام مستعار در هر بخشی از یک پرس و جو برای هر بیان تعریف. به عنوان مثال, `SELECT (1 AS n) + 2, n`. + +نام مستعار قابل مشاهده نیست در subqueries و بین subqueries. مثلا, در حالی که اجرای پرس و جو `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` تاتر استثنا را تولید می کند `Unknown identifier: num`. + +اگر یک نام مستعار برای ستون نتیجه در تعریف `SELECT` بند یک خرده فروشی, این ستون ها در پرس و جو بیرونی قابل مشاهده هستند. به عنوان مثال, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +مراقب باشید با نام مستعار است که همان نام ستون یا جدول می باشد. بیایید مثال زیر را در نظر بگیریم: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +در این مثال ما اعلام جدول `t` با ستون `b`. سپس, در هنگام انتخاب داده, ما تعریف `sum(b) AS b` نام مستعار. به عنوان نام مستعار جهانی هستند, خانه را جایگزین تحت اللفظی `b` در عبارت `argMax(a, b)` با بیان `sum(b)`. این جایگزینی باعث استثنا. + +## ستاره {#asterisk} + +در یک `SELECT` پرس و جو, ستاره می تواند عبارت جایگزین. برای کسب اطلاعات بیشتر به بخش مراجعه کنید “SELECT”. + +## عبارتها {#syntax-expressions} + +بیان یک تابع است, شناسه, تحت اللفظی, استفاده از یک اپراتور, بیان در داخل پرانتز, خرده فروشی, یا ستاره. همچنین می تواند شامل یک نام مستعار. +لیستی از عبارات یک یا چند عبارت از هم جدا شده توسط کاما است. +توابع و اپراتورهای, به نوبه خود, می توانید عبارات به عنوان استدلال دارند. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/fa/sql_reference/table_functions/file.md b/docs/fa/sql_reference/table_functions/file.md new file mode 100644 index 00000000000..62e49c8cd80 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/file.md @@ -0,0 +1,121 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u067E\u0631\u0648\u0646\u062F\u0647" +--- + +# پرونده {#file} + +ایجاد یک جدول از یک فایل. این تابع جدول شبیه به [نشانی وب](url.md) و [hdfs](hdfs.md) یکی + +``` sql +file(path, format, structure) +``` + +**پارامترهای ورودی** + +- `path` — The relative path to the file from [\_مخفی کردن \_صفحه](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). مسیر به فایل پشتیبانی پس از دل تنگی در حالت فقط خواندنی: `*`, `?`, `{abc,def}` و `{N..M}` کجا `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [قالب](../../interfaces/formats.md#formats) پرونده +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**مقدار بازگشتی** + +یک جدول با ساختار مشخص شده برای خواندن یا نوشتن داده ها در فایل مشخص شده است. + +**مثال** + +تنظیم `user_files_path` و محتویات فایل `test.csv`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ + +$ cat /var/lib/clickhouse/user_files/test.csv + 1,2,3 + 3,2,1 + 78,43,45 +``` + +جدول از`test.csv` و انتخاب دو ردیف اول: + +``` sql +SELECT * +FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +``` sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` + +**دل تنگی در مسیر** + +اجزای مسیر چندگانه می تواند دل تنگی دارند. برای پردازش فایل باید وجود داشته باشد و مسابقات به الگوی کل مسیر (نه تنها پسوند یا پیشوند). + +- `*` — Substitutes any number of any characters except `/` از جمله رشته خالی. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +سازه با `{}` شبیه به [عملکرد جدول از راه دور](../../sql_reference/table_functions/remote.md)). + +**مثال** + +1. فرض کنید ما چندین فایل با مسیرهای نسبی زیر داریم: + +- ‘some\_dir/some\_file\_1’ +- ‘some\_dir/some\_file\_2’ +- ‘some\_dir/some\_file\_3’ +- ‘another\_dir/some\_file\_1’ +- ‘another\_dir/some\_file\_2’ +- ‘another\_dir/some\_file\_3’ + +1. پرس و جو مقدار ردیف در این فایل ها: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. پرس و جو مقدار ردیف در تمام فایل های این دو دایرکتوری: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "اخطار" + اگر لیست خود را از فایل های حاوی محدوده تعداد با صفر پیشرو, استفاده از ساخت و ساز با پرانتز برای هر رقم به طور جداگانه و یا استفاده `?`. + +**مثال** + +پرس و جو داده ها از فایل های به نام `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## ستونهای مجازی {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**همچنین نگاه کنید به** + +- [مجازی ستون](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/es/query_language/table_functions/generate.md b/docs/fa/sql_reference/table_functions/generate.md similarity index 50% rename from docs/es/query_language/table_functions/generate.md rename to docs/fa/sql_reference/table_functions/generate.md index 5192b0563f3..0236d99cf57 100644 --- a/docs/es/query_language/table_functions/generate.md +++ b/docs/fa/sql_reference/table_functions/generate.md @@ -1,31 +1,34 @@ --- machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u0698\u0646\u0631\u0627\u0644" --- -# generateRandom {#generaterandom} +# ژنرال {#generaterandom} -Genera datos aleatorios con un esquema dado. -Permite rellenar tablas de prueba con datos. -Admite todos los tipos de datos que se pueden almacenar en la tabla, excepto `LowCardinality` y `AggregateFunction`. +تولید داده های تصادفی با طرح داده شده است. +اجازه می دهد تا به جمعیت جداول تست با داده ها. +پشتیبانی از تمام انواع داده است که می تواند در جدول به جز ذخیره می شود `LowCardinality` و `AggregateFunction`. ``` sql generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); ``` -**Parámetros** +**پارامترها** -- `name` — Nombre de la columna correspondiente. -- `TypeName` — Tipo de columna correspondiente. -- `limit` — Número de filas a generar. -- `max_array_length` — Longitud máxima de matriz para todas las matrices generadas. Por defecto `10`. -- `max_string_length` — Longitud máxima de cadena para todas las cadenas generadas. Por defecto `10`. -- `random_seed` — Especifique manualmente la semilla aleatoria para producir resultados estables. Si NULL — semilla se genera aleatoriamente. +- `name` — Name of corresponding column. +- `TypeName` — Type of corresponding column. +- `limit` — Number of rows to generate. +- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. +- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. +- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. -**Valor devuelto** +**مقدار بازگشتی** -Un objeto de tabla con el esquema solicitado. +یک شی جدول با طرح درخواست. -## Ejemplo de uso {#usage-example} +## مثال طریقه استفاده {#usage-example} ``` sql SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); @@ -39,4 +42,4 @@ SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64( └──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ ``` -[Artículo Original](https://clickhouse.tech/docs/es/query_language/table_functions/generate/) +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/fa/sql_reference/table_functions/hdfs.md b/docs/fa/sql_reference/table_functions/hdfs.md new file mode 100644 index 00000000000..44e9c1d627b --- /dev/null +++ b/docs/fa/sql_reference/table_functions/hdfs.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: hdfs +--- + +# hdfs {#hdfs} + +ایجاد یک جدول از فایل ها در اچ دی. این جدول عملکرد شبیه به [نشانی وب](url.md) و [پرونده](file.md) یکی + +``` sql +hdfs(URI, format, structure) +``` + +**پارامترهای ورودی** + +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` و `{N..M}` کجا `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [قالب](../../interfaces/formats.md#formats) پرونده +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**مقدار بازگشتی** + +یک جدول با ساختار مشخص شده برای خواندن یا نوشتن داده ها در فایل مشخص شده است. + +**مثال** + +جدول از `hdfs://hdfs1:9000/test` و انتخاب دو ردیف اول: + +``` sql +SELECT * +FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**دل تنگی در مسیر** + +اجزای مسیر چندگانه می تواند دل تنگی دارند. برای پردازش فایل باید وجود داشته باشد و مسابقات به الگوی کل مسیر (نه تنها پسوند یا پیشوند). + +- `*` — Substitutes any number of any characters except `/` از جمله رشته خالی. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +سازه با `{}` شبیه به [عملکرد جدول از راه دور](../../sql_reference/table_functions/remote.md)). + +**مثال** + +1. فرض کنید که ما چندین فایل با اوریس زیر در اچ دی ها داریم: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. پرس و جو مقدار ردیف در این فایل ها: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. پرس و جو مقدار ردیف در تمام فایل های این دو دایرکتوری: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "اخطار" + اگر لیست خود را از فایل های حاوی محدوده تعداد با صفر پیشرو, استفاده از ساخت و ساز با پرانتز برای هر رقم به طور جداگانه و یا استفاده `?`. + +**مثال** + +پرس و جو داده ها از فایل های به نام `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## ستونهای مجازی {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**همچنین نگاه کنید به** + +- [ستونهای مجازی](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/fa/sql_reference/table_functions/index.md b/docs/fa/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..58143c6a5dd --- /dev/null +++ b/docs/fa/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Table Functions +toc_priority: 34 +toc_title: "\u0645\u0639\u0631\u0641\u06CC \u0634\u0631\u06A9\u062A" +--- + +# توابع جدول {#table-functions} + +توابع جدول روش برای ساخت جداول. + +شما می توانید توابع جدول در استفاده: + +- [FROM](../statements/select.md#select-from) بند از `SELECT` پرس و جو. + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [ایجاد جدول به عنوان \](../statements/create.md#create-table-query) پرس و جو. + + It's one of the methods of creating a table. + +!!! warning "اخطار" + شما می توانید توابع جدول اگر استفاده نمی [اجازه دادن به \_نشانی](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) تنظیم غیر فعال است. + +| تابع | توصیف | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------| +| [پرونده](file.md) | ایجاد یک [پرونده](../../engines/table_engines/special/file.md)- جدول موتور. | +| [ادغام](merge.md) | ایجاد یک [ادغام](../../engines/table_engines/special/merge.md)- جدول موتور. | +| [اعداد](numbers.md) | ایجاد یک جدول با یک ستون پر از اعداد صحیح. | +| [دور](remote.md) | اجازه می دهد تا شما را به دسترسی به سرور از راه دور بدون ایجاد یک [توزیع شده](../../engines/table_engines/special/distributed.md)- جدول موتور. | +| [نشانی وب](url.md) | ایجاد یک [نشانی وب](../../engines/table_engines/special/url.md)- جدول موتور. | +| [خروجی زیر](mysql.md) | ایجاد یک [MySQL](../../engines/table_engines/integrations/mysql.md)- جدول موتور. | +| [جستجو](jdbc.md) | ایجاد یک [JDBC](../../engines/table_engines/integrations/jdbc.md)- جدول موتور. | +| [جستجو](odbc.md) | ایجاد یک [ODBC](../../engines/table_engines/integrations/odbc.md)- جدول موتور. | +| [hdfs](hdfs.md) | ایجاد یک [HDFS](../../engines/table_engines/integrations/hdfs.md)- جدول موتور. | + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/fa/sql_reference/table_functions/input.md b/docs/fa/sql_reference/table_functions/input.md new file mode 100644 index 00000000000..0ab23171f73 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/input.md @@ -0,0 +1,47 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u0648\u0631\u0648\u062F\u06CC" +--- + +# ورودی {#input} + +`input(structure)` - تابع جدول که اجازه می دهد تا به طور موثر تبدیل و قرار دادن داده های ارسال شده به +سرور با ساختار داده شده به جدول با ساختار دیگر. + +`structure` - ساختار داده ها در قالب زیر به سرور ارسال می شود `'column1_name column1_type, column2_name column2_type, ...'`. +به عنوان مثال, `'id UInt32, name String'`. + +این تابع را می توان تنها در `INSERT SELECT` پرس و جو و تنها یک بار اما در غیر این صورت مانند تابع جدول معمولی رفتار می کنند +(مثلا, این را می توان در زیرخاکری مورد استفاده قرار, و غیره.). + +داده ها را می توان به هیچ وجه مانند عادی ارسال می شود `INSERT` پرس و جو و گذشت در هر موجود [قالب](../../interfaces/formats.md#formats) +که باید در پایان پرس و جو مشخص شود (بر خلاف عادی `INSERT SELECT`). + +ویژگی اصلی این تابع این است که وقتی سرور داده ها را از مشتری دریافت می کند به طور همزمان تبدیل می کند +با توجه به لیست عبارات در `SELECT` بند و درج به جدول هدف. جدول موقت +با تمام داده های منتقل شده ایجاد نمی شود. + +**مثالها** + +- اجازه دهید `test` جدول دارای ساختار زیر است `(a String, b String)` + و داده ها در `data.csv` دارای ساختار متفاوت `(col1 String, col2 Date, col3 Int32)`. پرسوجو برای درج + داده ها از `data.csv` به `test` جدول با تبدیل همزمان به نظر می رسد مثل این: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- اگر `data.csv` حاوی اطلاعات از ساختار مشابه `test_structure` به عنوان جدول `test` سپس این دو نمایش داده شد برابر هستند: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/fa/sql_reference/table_functions/jdbc.md b/docs/fa/sql_reference/table_functions/jdbc.md new file mode 100644 index 00000000000..4fd500df33c --- /dev/null +++ b/docs/fa/sql_reference/table_functions/jdbc.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u062C\u0633\u062A\u062C\u0648" +--- + +# جستجو {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` - جدول بازده است که از طریق راننده جدی بی سی متصل. + +این تابع جدول نیاز به جداگانه دارد `clickhouse-jdbc-bridge` برنامه در حال اجرا است. +این پشتیبانی از انواع باطل (بر اساس دسیدال جدول از راه دور است که تردید). + +**مثالها** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fa/sql_reference/table_functions/merge.md b/docs/fa/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..5e843f4e460 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u0627\u062F\u063A\u0627\u0645" +--- + +# ادغام {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +ساختار جدول از جدول اول مواجه می شوند که منطبق بر عبارت منظم گرفته شده است. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/fa/sql_reference/table_functions/mysql.md b/docs/fa/sql_reference/table_functions/mysql.md new file mode 100644 index 00000000000..2c00529040c --- /dev/null +++ b/docs/fa/sql_reference/table_functions/mysql.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u062E\u0631\u0648\u062C\u06CC \u0632\u06CC\u0631" +--- + +# خروجی زیر {#mysql} + +اجازه می دهد `SELECT` نمایش داده شد به داده است که بر روی یک سرور خروجی از راه دور ذخیره می شود انجام می شود. + +``` sql +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +**پارامترها** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` نمایش داده شد به `REPLACE INTO`. اگر `replace_query=1`, پرس و جو جایگزین شده است. + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` بیان است که به اضافه `INSERT` پرس و جو. + + Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. + + To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. + +ساده `WHERE` بند هایی مانند `=, !=, >, >=, <, <=` در حال حاضر بر روی سرور خروجی زیر اجرا شده است. + +بقیه شرایط و `LIMIT` محدودیت نمونه برداری در محل کلیک تنها پس از پرس و جو به پس از اتمام خروجی زیر اجرا شده است. + +**مقدار بازگشتی** + +یک شی جدول با ستون همان جدول خروجی زیر اصلی. + +## مثال طریقه استفاده {#usage-example} + +جدول در خروجی زیر: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +انتخاب داده ها از خانه کلیک: + +``` sql +SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## همچنین نگاه کنید به {#see-also} + +- [این ‘MySQL’ موتور جدول](../../engines/table_engines/integrations/mysql.md) +- [با استفاده از خروجی زیر به عنوان منبع فرهنگ لغت خارجی](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/fa/sql_reference/table_functions/numbers.md b/docs/fa/sql_reference/table_functions/numbers.md new file mode 100644 index 00000000000..86a4829ef72 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/numbers.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u0627\u0639\u062F\u0627\u062F" +--- + +# اعداد {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ ستون (اوینت64)که شامل اعداد صحیح از 0 تا 1. +`numbers(N, M)` - بازگرداندن یک جدول با تک ‘number’ ستون (اوینت64) که شامل اعداد صحیح از نفر به (نفر + متر - 1). + +شبیه به `system.numbers` جدول را می توان برای تست و تولید مقادیر پی در پی استفاده کرد, `numbers(N, M)` کارایی بیشتر از `system.numbers`. + +نمایش داده شد زیر معادل هستند: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +مثالها: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/fa/sql_reference/table_functions/odbc.md b/docs/fa/sql_reference/table_functions/odbc.md new file mode 100644 index 00000000000..088d1b9bd02 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/odbc.md @@ -0,0 +1,108 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u062C\u0633\u062A\u062C\u0648" +--- + +# جستجو {#table-functions-odbc} + +بازگرداندن جدول است که از طریق متصل [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +پارامترها: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` پرونده. +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +با خیال راحت پیاده سازی اتصالات ان بی سی, تاتر با استفاده از یک برنامه جداگانه `clickhouse-odbc-bridge`. اگر راننده او بی سی به طور مستقیم از لود `clickhouse-server`, مشکلات راننده می تواند سرور تاتر سقوط. تاتر به طور خودکار شروع می شود `clickhouse-odbc-bridge` هنگامی که مورد نیاز است. برنامه پل او بی سی از همان بسته به عنوان نصب `clickhouse-server`. + +زمینه های با `NULL` مقادیر از جدول خارجی به مقادیر پیش فرض برای نوع داده پایه تبدیل می شوند. مثلا, اگر یک میدان جدول خروجی زیر از راه دور است `INT NULL` نوع این است که به 0 تبدیل (مقدار پیش فرض برای کلیک `Int32` نوع داده). + +## مثال طریقه استفاده {#usage-example} + +**گرفتن اطلاعات از نصب و راه اندازی خروجی زیر محلی از طریق ان بی سی** + +این مثال برای لینوکس اوبونتو 18.04 و سرور خروجی زیر 5.7 بررسی می شود. + +اطمینان حاصل شود که unixodbc و mysql اتصال نصب شده است. + +به طور پیش فرض (در صورت نصب از بسته), کلیک خانه شروع می شود به عنوان کاربر `clickhouse`. بنابراین شما نیاز به ایجاد و پیکربندی این کاربر در سرور خروجی زیر. + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +سپس اتصال را پیکربندی کنید `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +شما می توانید اتصال با استفاده از بررسی `isql` ابزار از unixODBC نصب و راه اندازی. + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +جدول در خروجی زیر: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +بازیابی اطلاعات از جدول خروجی زیر در کلیک: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## همچنین نگاه کنید به {#see-also} + +- [لغت نامه های خارجی ان بی سی](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [موتور جدول ان بی سی](../../engines/table_engines/integrations/odbc.md). + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fa/sql_reference/table_functions/remote.md b/docs/fa/sql_reference/table_functions/remote.md new file mode 100644 index 00000000000..8763779d9d4 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/remote.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u062F\u0648\u0631" +--- + +# از راه دور remoteSecure {#remote-remotesecure} + +اجازه می دهد تا شما را به دسترسی به سرور از راه دور بدون ایجاد یک `Distributed` جدول + +امضاها: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port` یا فقط `host`. میزبان را می توان به عنوان نام سرور مشخص, و یا به عنوان ایپو4 یا ایپو6 نشانی. نشانی اینترنتی6 در براکت مربع مشخص شده است. پورت پورت تی سی پی بر روی سرور از راه دور است. اگر پورت حذف شده است, با استفاده از `tcp_port` از فایل پیکربندی سرور (به طور پیش فرض, 9000). + +!!! important "مهم" + پورت برای یک نشانی اینترنتی6 مورد نیاز است. + +مثالها: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +نشانی های متعدد را می توان با کاما از هم جدا شده است. در این مورد کلیک هاوس از پردازش توزیع شده استفاده می کند بنابراین پرس و جو را به تمام نشانیهای مشخص شده ارسال می کند (مانند داده های مختلف). + +مثال: + +``` text +example01-01-1,example01-02-1 +``` + +بخشی از بیان را می توان در براکت فرفری مشخص شده است. مثال قبلی را می توان به شرح زیر نوشته شده است: + +``` text +example01-0{1,2}-1 +``` + +براکت فرفری می تواند شامل طیف وسیعی از اعداد جدا شده توسط دو نقطه (اعداد صحیح غیر منفی). در این مورد, محدوده به مجموعه ای از ارزش هایی که تولید نشانی سفال گسترش. اگر عدد اول با صفر شروع می شود, ارزش ها با همان تراز صفر تشکیل. مثال قبلی را می توان به شرح زیر نوشته شده است: + +``` text +example01-{01..02}-1 +``` + +اگر شما جفت های متعدد از براکت در اشکال مختلف, این تولید محصول مستقیم از مجموعه مربوطه. + +نشانی ها و بخش هایی از نشانی در براکت فرفری را می توان با نماد لوله جدا (\|). در این مورد, مجموعه مربوطه را از نشانی ها به عنوان کپی تفسیر, و پرس و جو خواهد شد به اولین ماکت سالم ارسال. با این حال, کپی در نظم در حال حاضر در مجموعه تکرار [\_تبالسازی](../../operations/settings/settings.md) تنظیمات. + +مثال: + +``` text +example01-{01..02}-{1|2} +``` + +این مثال دو تکه که هر کدام دو کپی مشخص. + +تعدادی از آدرس های تولید شده محدود است توسط یک ثابت است. در حال حاضر این 1000 نشانی است. + +با استفاده از `remote` تابع جدول کمتر مطلوب تر از ایجاد یک است `Distributed` جدول, چرا که در این مورد, اتصال سرور دوباره تاسیس برای هر درخواست. علاوه بر این, اگر نام میزبان قرار است, نام حل و فصل, و خطا شمارش نیست در هنگام کار با کپی های مختلف. هنگامی که پردازش تعداد زیادی از نمایش داده شد, همیشه ایجاد `Distributed` جدول جلوتر از زمان, و استفاده نکنید `remote` تابع جدول. + +این `remote` تابع جدول می تواند در موارد زیر مفید باشد: + +- دسترسی به یک سرور خاص برای مقایسه داده ها, اشکال زدایی, و تست. +- نمایش داده شد بین خوشه های مختلف کلیک برای اهداف تحقیقاتی. +- درخواست توزیع نادر است که به صورت دستی ساخته شده. +- درخواست توزیع شده که مجموعه ای از سرورها در هر زمان دوباره تعریف شده است. + +اگر کاربر مشخص نشده است, `default` استفاده شده است. +اگر رمز عبور مشخص نشده است, رمز عبور خالی استفاده شده است. + +`remoteSecure` - مثل `remote` but with secured connection. Default port — [\_شروع مجدد](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) از پیکربندی و یا 9440. + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/fa/sql_reference/table_functions/url.md b/docs/fa/sql_reference/table_functions/url.md new file mode 100644 index 00000000000..0b282bf6633 --- /dev/null +++ b/docs/fa/sql_reference/table_functions/url.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u0646\u0634\u0627\u0646\u06CC \u0648\u0628" +--- + +# نشانی وب {#url} + +`url(URL, format, structure)` - بازگرداندن یک جدول ایجاد شده از `URL` با توجه به +`format` و `structure`. + +نشانی وب-نشانی کارساز اچتیتیپ یا اچتیتیپس که میتواند بپذیرد `GET` و / یا `POST` درخواست. + +قالب - [قالب](../../interfaces/formats.md#formats) از داده ها. + +ساختار-ساختار جدول در `'UserID UInt64, Name String'` قالب. تعیین نام ستون و انواع. + +**مثال** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[مقاله اصلی](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/fa/whats_new/changelog/2017.md b/docs/fa/whats_new/changelog/2017.md new file mode 100644 index 00000000000..da41e1828b2 --- /dev/null +++ b/docs/fa/whats_new/changelog/2017.md @@ -0,0 +1,268 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 79 +toc_title: '2017' +--- + +### انتشار کلیک 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +این نسخه شامل رفع اشکال برای نسخه قبلی 1.1.54318: + +- اشکال ثابت با شرایط مسابقه ممکن است در تکرار است که می تواند به از دست دادن داده ها منجر شود. این مسئله تاثیر می گذارد نسخه 1.1.54310 و 1.1.54318. اگر شما استفاده از یکی از این نسخه ها با جداول تکرار, به روز رسانی است که به شدت توصیه می شود. این موضوع نشان می دهد در سیاهههای مربوط در پیام های هشدار دهنده مانند `Part ... from own log doesn't exist.` موضوع مربوط است حتی اگر شما این پیام ها در سیاهههای مربوط را نمی بینم. + +### انتشار کلیک 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} + +این نسخه شامل رفع اشکال برای نسخه های قبلی 1.1.54310: + +- حذف ردیف نادرست ثابت در هنگام ادغام در موتور جمعبندی +- رفع نشت حافظه در موتورهای ادغام سه گانه +- تخریب عملکرد ثابت با درج مکرر در موتورهای ادغام +- ثابت است که شماره بود که باعث صف تکرار برای جلوگیری از در حال اجرا +- چرخش ثابت و بایگانی سیاهههای مربوط به سرور + +### انتشار کلیک 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### ویژگی های جدید: {#new-features} + +- کلید پارتیشن بندی سفارشی برای خانواده ادغام موتورهای جدول. +- [کافکا](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) موتور جدول. +- اضافه شدن پشتیبانی برای بارگذاری [مانتو](https://catboost.yandex/) مدل ها و استفاده از داده های ذخیره شده در کلیک. +- اضافه شدن پشتیبانی برای مناطق زمانی با شیپور خاموشی غیر عدد صحیح از مجموعه مقالات. +- اضافه شدن پشتیبانی برای عملیات ریاضی با فواصل زمانی. +- طیف وسیعی از ارزش ها برای انواع تاریخ و تاریخ ساعت به سال گسترش 2105. +- اضافه شدن `CREATE MATERIALIZED VIEW x TO y` پرس و جو (مشخص یک جدول موجود برای ذخیره سازی داده ها از یک نمایش محقق). +- اضافه شدن `ATTACH TABLE` پرس و جو بدون استدلال. +- پردازش منطق به صورت تو در تو ستون با نام پایان در نقشه در یک summingmergetree جدول استخراج شد به summap مجموع عملکرد. شما هم اکنون می توانید این ستون ها به صراحت مشخص کنید. +- حداکثر اندازه فرهنگ لغت ایپ تری به ورودی های 128 متری افزایش می یابد. +- اضافه شدن تابع نوع گیرنده. +- اضافه شده تابع جمع مجموع ورود. +- اضافه شدن پشتیبانی از فرمت ورودی ورودی سروان پروتو. +- شما هم اکنون می توانید سطح فشرده سازی سفارشی در هنگام استفاده از الگوریتم زد. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes} + +- ایجاد جداول موقت با موتور غیر از حافظه مجاز نیست. +- ایجاد صریح جداول با مشاهده و یا موتور نمای مادی مجاز نیست. +- در طول ایجاد جدول, یک بررسی جدید تایید می کند که عبارت کلیدی نمونه برداری در کلید اصلی شامل. + +#### رفع اشکال: {#bug-fixes} + +- قطع ثابت زمانی که همزمان به یک جدول توزیع قرار دادن. +- ثابت غیر اتمی اضافه کردن و از بین بردن قطعات در جداول تکرار. +- داده های وارد شده به یک دیدگاه محقق شده در معرض تقسیم غیر ضروری نیست. +- اجرای یک پرس و جو به یک جدول توزیع که ماکت محلی است عقب مانده و کپی از راه دور در دسترس نیست در یک خطا منجر نمی شود. +- کاربران به مجوزهای دسترسی نیاز ندارند `default` پایگاه داده برای ایجاد جداول موقت دیگر. +- ثابت توفنده در هنگام تعیین نوع مجموعه بدون استدلال. +- قطع ثابت زمانی که حجم دیسک حاوی سیاهههای مربوط به سرور کامل است. +- سرریز در تابع تورلتیواکنام برای هفته اول عصر یونیکس ثابت شده است. + +#### بهبود ساخت: {#build-improvements} + +- چندین کتابخانه شخص ثالث (به ویژه کم) به روز شد و تبدیل به دستی دستگاه گوارش. + +### انتشار کلیک 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### ویژگی های جدید: {#new-features-1} + +- پردازشگر پشتیبانی شده: `tcp_ssl_port` داخل `config.xml` ). + +#### رفع اشکال: {#bug-fixes-1} + +- `ALTER` برای جداول تکرار در حال حاضر تلاش می کند تا شروع به در حال اجرا در اسرع وقت. +- ثابت توفنده در هنگام خواندن داده ها با تنظیم `preferred_block_size_bytes=0.` +- سقوط ثابت از `clickhouse-client` هنگام فشار دادن `Page Down` +- تفسیر صحیح از برخی نمایش داده شد پیچیده با `GLOBAL IN` و `UNION ALL` +- `FREEZE PARTITION` همیشه از نظر عام کار می کند در حال حاضر. +- درخواست پست خالی در حال حاضر پاسخ با کد بازگشت 411. +- خطاهای تفسیر ثابت برای عبارات مانند `CAST(1 AS Nullable(UInt8)).` +- ثابت خطا در هنگام خواندن `Array(Nullable(String))` ستون از `MergeTree` میز +- ثابت توفنده زمانی که نمایش داده شد تجزیه مانند `SELECT dummy AS dummy, dummy AS b` +- کاربران به درستی با نامعتبر به روز شد `users.xml` +- دست زدن درست زمانی که یک فرهنگ لغت اجرایی یک کد پاسخ غیر صفر می گرداند. + +### انتشار کلیک 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### ویژگی های جدید: {#new-features-2} + +- اضافه شدن `pointInPolygon` تابع برای کار با مختصات در یک هواپیما مختصات. +- اضافه شدن `sumMap` تابع جمع برای محاسبه مجموع ارریس, شبیه به `SummingMergeTree`. +- اضافه شدن `trunc` تابع. بهبود عملکرد توابع گرد کردن (`round`, `floor`, `ceil`, `roundToExp2`) و اصلاح منطق چگونه کار می کنند. منطق را تغییر داد `roundToExp2` تابع برای کسر و اعداد منفی. +- این clickhouse فایل اجرایی است که در حال حاضر کمتر وابسته به libc نسخه. همان فایل اجرایی کلیک می تواند بر روی طیف گسترده ای از سیستم های لینوکس اجرا شود. هنوز وابستگی وجود دارد که با استفاده از نمایش داده شد وارد (با تنظیم `compile = 1` , است که به طور پیش فرض استفاده نمی شود). +- کاهش زمان مورد نیاز برای تدوین پویا از نمایش داده شد. + +#### رفع اشکال: {#bug-fixes-2} + +- ثابت خطا که گاهی اوقات تولید `part ... intersects previous part` پیام ها و قوام ماکت ضعیف. +- رفع خطا که باعث سرور به قفل کردن اگر باغ وحش در طول خاموش کردن در دسترس نیست. +- حذف ورود به سیستم بیش از حد در هنگام بازگرداندن کپی. +- ثابت خطا در اتحادیه تمام اجرای. +- ثابت خطا در تابع الحاق که در صورتی که ستون اول در یک بلوک رخ داده است نوع مجموعه ای. +- پیشرفت در حال حاضر به درستی در سیستم نمایش داده می شود.ادغام جدول. + +### انتشار کلیک 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### ویژگی های جدید: {#new-features-3} + +- `SYSTEM` نمایش داده شد برای مدیریت سرور: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- اضافه شدن توابع برای کار با ارریس: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- اضافه شده `root` و `identity` پارامترهای پیکربندی باغ وحش. این اجازه می دهد تا شما را به منزوی کردن کاربران فردی در خوشه باغ وحش است. +- اضافه شده توابع مجموع `groupBitAnd`, `groupBitOr` و `groupBitXor` (برای سازگاری, همچنین تحت نام در دسترس هستند `BIT_AND`, `BIT_OR` و `BIT_XOR`). +- لغت نامه های خارجی را می توان از خروجی زیر با مشخص کردن یک سوکت در سیستم فایل لود می شود. +- خارجی دیکشنری می توان از mysql بیش از ssl (`ssl_cert`, `ssl_key`, `ssl_ca` پارامترهای). +- اضافه شدن `max_network_bandwidth_for_user` تنظیم برای محدود کردن استفاده از پهنای باند کلی برای نمایش داده شد در هر کاربر. +- پشتیبانی از `DROP TABLE` برای جداول موقت. +- پشتیبانی برای خواندن `DateTime` مقادیر در قالب برچسب زمان یونیکس از `CSV` و `JSONEachRow` فرمتها. +- تاخیر کپی در نمایش داده شد توزیع در حال حاضر به طور پیش فرض حذف شدند (حد پیش فرض است 5 دقیقه). +- قفل فیفو در طول تغییر استفاده می شود: پرس و جو را تغییر دهید به طور نامحدود برای نمایش داده شد به طور مداوم در حال اجرا مسدود شده است. +- گزینه ای برای تنظیم `umask` در فایل پیکربندی. +- بهبود عملکرد برای نمایش داده شد با `DISTINCT` . + +#### رفع اشکال: {#bug-fixes-3} + +- بهبود فرآیند برای حذف پیر گره ها در باغ وحش. قبلا, گره های قدیمی گاهی اوقات نمی حذف اگر درج بسیار مکرر وجود دارد, که باعث سرور به کند به تعطیل, در میان چیزهای دیگر. +- تصادفی ثابت در هنگام انتخاب میزبان برای اتصال به باغ وحش. +- رفع محرومیت از عقب انداختن کپی در نمایش داده شد توزیع اگر ماکت جایل هاست است. +- خطایی را که یک بخش داده در یک `ReplicatedMergeTree` جدول را می توان پس از در حال اجرا شکسته `ALTER MODIFY` بر روی یک عنصر در یک `Nested` ساختار. +- رفع خطا که می تواند پرس و جو را انتخاب کنید به باعث “hang”. +- بهبود نمایش داده شد توزیع شده است. +- پرس و جو را ثابت کرد `CREATE TABLE ... AS `. +- حل و فصل بن بست در `ALTER ... CLEAR COLUMN IN PARTITION` پرسوجو برای `Buffer` میز +- مقدار پیشفرض نامعتبر را ثابت کرد `Enum` بازدید کنندگان (0 به جای حداقل) در هنگام استفاده از `JSONEachRow` و `TSKV` فرمتها. +- حل و فصل ظاهر فرایندهای زامبی در هنگام استفاده از یک فرهنگ لغت با `executable` منبع. +- ثابت segfault برای سر پرس و جو. + +#### گردش کار بهبود یافته برای توسعه و مونتاژ تاتر: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- شما می توانید استفاده کنید `pbuilder` برای ساخت فاحشه خانه. +- شما می توانید استفاده کنید `libc++` به جای `libstdc++` برای ایجاد بر روی لینوکس. +- دستورالعمل های اضافه شده برای استفاده از ابزار تجزیه و تحلیل کد استاتیک: `Coverage`, `clang-tidy`, `cppcheck`. + +#### لطفا توجه داشته باشید در هنگام به روز رسانی: {#please-note-when-upgrading} + +- در حال حاضر یک مقدار پیش فرض بالاتر برای تنظیم ادغام وجود دارد `max_bytes_to_merge_at_max_space_in_pool` (حداکثر اندازه کل قطعات داده به ادغام در بایت): از 100 دستگاه گوارش به 150 دستگاه گوارش افزایش یافته است. این ممکن است در ادغام بزرگ در حال اجرا پس از ارتقا سرور منجر, که می تواند افزایش بار بر روی زیر سیستم دیسک باعث. اگر فضای رایگان موجود بر روی سرور کمتر از دو برابر مقدار کل ادغام که در حال اجرا هستند, این باعث می شود همه ادغام دیگر برای جلوگیری از در حال اجرا, از جمله ادغام قطعات داده های کوچک. در نتیجه, قرار دادن نمایش داده شد با پیام شکست مواجه خواهد شد “Merges are processing significantly slower than inserts.” استفاده از `SELECT * FROM system.merges` پرس و جو برای نظارت بر وضعیت. شما همچنین می توانید بررسی کنید `DiskSpaceReservedForMerge` متریک در `system.metrics` جدول, و یا در گرافیت. شما لازم نیست برای انجام هر کاری برای رفع این مشکل خود را هنگامی که ادغام بزرگ پایان حل و فصل خواهد شد. اگر شما این غیر قابل قبول, شما می توانید مقدار قبلی برای بازگرداندن `max_bytes_to_merge_at_max_space_in_pool` تنظیمات. برای انجام این کار به بخش در پیکربندی.تنظیم ``` ``107374182400 ``` و راه اندازی مجدد سرور. + +### انتشار کلیک 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} + +- این یک نسخه رفع اشکال برای نسخه 1.1.54282 قبلی است. این رفع نشت در دایرکتوری قطعات در باغ وحش. + +### انتشار کلیک 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} + +این نسخه شامل رفع اشکال برای نسخه قبلی 1.1.54276: + +- ثابت `DB::Exception: Assertion violation: !_path.empty()` هنگام قرار دادن به یک جدول توزیع شده. +- تجزیه ثابت در هنگام قرار دادن در فرمت مربوط به حوزه علمیه اگر داده های ورودی با شروع می شود. +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### انتشار کلیک 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} + +#### ویژگی های جدید: {#new-features-4} + +- اضافه شده اختیاری با بخش برای پرس و جو را انتخاب کنید. به عنوان مثال پرس و جو: `WITH 1+1 AS a SELECT a, a*a` +- درج را می توان همزمان در یک جدول توزیع انجام: خوب است تنها پس از بازگشت تمام داده ها بر روی تمام خرده ریز را نجات داد. این است که با تنظیم فعال می شود +- اضافه شده نوع داده شناسه برای کار با شناسه 16 بایت. +- نام مستعار اضافه شده از کاراکتر, شناور و انواع دیگر برای سازگاری با تابلو فرش. +- اضافه شده توابع toyyyymm, toyyyymmdd و toyyyymmddhhmmss برای تبدیل زمان به اعداد. +- شما می توانید از نشانی های اینترنتی (همراه با نام میزبان) برای شناسایی سرورها برای نمایش داده شد های هوشمند خوشه ای استفاده کنید. +- اضافه شدن پشتیبانی برای استدلال غیر ثابت و شیپور خاموشی منفی در تابع `substring(str, pos, len).` +- اضافه شدن پارامتر حداکثر `groupArray(max_size)(column)` عملکرد کلی و عملکرد خود را بهینه سازی کرد. + +#### تغییرات اصلی: {#main-changes} + +- بهبود امنیت: تمام فایل های سرور با مجوز 0640 ایجاد می شوند (می توانند از طریق تغییر کنند پارامتر پیکربندی). +- پیام های خطا بهبود یافته برای نمایش داده شد با نحو نامعتبر است. +- به طور قابل توجهی کاهش مصرف حافظه و بهبود عملکرد در هنگام ادغام بخش های زیادی از داده های ادغام. +- به طور قابل توجهی افزایش عملکرد ادغام داده ها برای موتور جایگزین. +- عملکرد بهبود یافته برای درج ناهمزمان از یک جدول توزیع شده با ترکیب درج منبع های متعدد. برای فعال کردن این قابلیت از تنظیمات پخش شده \_ تنظیم کننده \_منیتور\_برش\_برنده=1 استفاده کنید. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-1} + +- تغییر فرمت باینری از کشورهای جمع `groupArray(array_column)` توابع برای ارریس. + +#### لیست کامل تغییرات: {#complete-list-of-changes} + +- اضافه شدن `output_format_json_quote_denormals` تنظیمات, را قادر می سازد خروجی نان و ارزشهای جبهه ملی در فرمت جانسون. +- تخصیص جریان بهینه شده در هنگام خواندن از یک جدول توزیع شده است. +- تنظیمات را می توان در حالت فقط خواندنی پیکربندی در صورتی که ارزش تغییر نمی کند. +- اضافه شده توانایی برای بازیابی غیر صحیح گرانول از mergetree موتور به منظور دیدار با محدودیت در اندازه بلوک مشخص شده در preferred\_block\_size\_bytes تنظیم. هدف این است که برای کاهش مصرف رم و افزایش محل کش در هنگام پردازش نمایش داده شد از جداول با ستون های بزرگ. +- استفاده موثر از شاخص هایی که حاوی عبارات هستند `toStartOfHour(x)` برای شرایطی مانند `toStartOfHour(x) op сonstexpr.` +- اضافه شدن تنظیمات جدید برای mergetree موتورهای (به merge\_tree بخش در config.شمع): + - replicated\_deduplication\_window\_seconds مجموعه تعدادی از ثانیه های مجاز برای deduplicating درج در تکرار جداول. + - پاک کردن \_خروج \_پیروید تنظیم میکند که چگونه اغلب شروع به پاکسازی برای حذف اطلاعات منسوخ شده میکند. + - از تبدیل شدن به رهبر (و اختصاص ادغام) می توانید یک کپی از تبدیل شدن به رهبر جلوگیری کنید. +- پاکسازی شتاب به حذف اطلاعات منسوخ شده از باغ وحش. +- بهبود و رفع چندگانه برای نمایش داده شد دسته ای دی ال. از علاقه خاص است که تنظیم جدید distributed\_ddl\_task\_timeout که محدودیت زمان انتظار برای پاسخ از سرور در خوشه. اگر یک درخواست دی ال شده است در تمام میزبان انجام نشده است, پاسخ حاوی یک خطا ایست و یک درخواست خواهد شد در حالت کالاهای کابل اجرا. +- صفحه نمایش بهبود یافته از ردیابی پشته در سیاهههای مربوط به سرور. +- اضافه شدن “none” ارزش روش فشرده سازی. +- شما می توانید بخش های مختلف \_تنفورد کامل در پیکربندی استفاده کنید.. +- ممکن است که به اتصال به خروجی زیر را از طریق یک سوکت در سیستم فایل. +- سیستمجدول قطعات دارای یک ستون جدید با اطلاعات در مورد اندازه علامت, در بایت. + +#### رفع اشکال: {#bug-fixes-4} + +- جداول توزیع با استفاده از یک جدول ادغام در حال حاضر به درستی برای پرس و جو را انتخاب کنید با یک شرط در کار `_table` رشته. +- در هنگام چک کردن قطعات داده ها یک وضعیت مسابقه نادر در تکرار می شود. +- انجماد ممکن ثابت در “leader election” هنگام شروع یک سرور. +- این max\_replica\_delay\_for\_distributed\_queries تنظیم نادیده گرفته شد که با استفاده از یک محلی ماکت از منبع داده. این ثابت شده است. +- رفتار نادرست ثابت `ALTER TABLE CLEAR COLUMN IN PARTITION` هنگامی که تلاش برای تمیز کردن یک ستون غیر موجود. +- ثابت یک استثنا در تابع چندف در هنگام استفاده از مجموعه های خالی و یا رشته. +- تخصیص حافظه بیش از حد ثابت هنگام فراخوانی فرمت بومی. +- ثابت نادرست خودکار به روز رسانی از لغت نامه سه. +- ثابت یک استثنا زمانی که در حال اجرا نمایش داده شد با یک گروه بند از یک جدول ادغام در هنگام استفاده از نمونه. +- تصادف گروهی با استفاده از توزیع شده \_اگزا\_موری\_افلیتی کمبود=1 را ثابت کرد. +- حالا شما می توانید پایگاه داده را مشخص کنید.جدول در سمت راست در و پیوستن. +- بیش از حد بسیاری از موضوعات برای تجمع موازی مورد استفاده قرار گرفت. این ثابت شده است. +- ثابت چگونه “if” تابع با استدلال رشته کار می کند. +- انتخاب به اشتباه از یک جدول توزیع برای خرده ریز با وزن کار 0. این ثابت شده است. +- در حال اجرا `CREATE VIEW IF EXISTS no longer causes crashes.` +- رفتار نادرست ثابت در هنگام وارد کردن \_فصل\_سک\_کنون\_فیلدهای ورودی = 1 تنظیم شده است و اعداد منفی وجود دارد. +- ثابت یک حلقه بی نهایت در `dictGetHierarchy()` تابع در صورتی که برخی از داده های نامعتبر در فرهنگ لغت وجود دارد. +- ثابت `Syntax error: unexpected (...)` خطاها هنگامی که در حال اجرا نمایش داده شد توزیع شده با کارخانه های فرعی در یک در و یا پیوستن بند و ادغام جداول. +- ثابت تفسیر نادرست از پرس و جو را انتخاب کنید از جداول فرهنگ لغت. +- ثابت “Cannot mremap” خطا در هنگام استفاده از بند در و پیوستن به بند با بیش از 2 میلیارد عنصر. +- عدم موفقیت برای لغت نامه با خروجی زیر به عنوان منبع ثابت شده است. + +#### گردش کار بهبود یافته برای توسعه و مونتاژ تاتر: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- ساخت را می توان در ارکادیا مونتاژ. +- شما می توانید شورای همکاری خلیج فارس 7 به کامپایل خانه عروسکی استفاده کنید. +- موازی ایجاد شده با استفاده از ccache+distcc سریع تر در حال حاضر. + +### انتشار کلیک 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} + +#### ویژگی های جدید: {#new-features-5} + +- توزیع دی ال (به عنوان مثال, `CREATE TABLE ON CLUSTER`) +- پرسوجوی تکرار شده `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- موتور برای جداول فرهنگ لغت (دسترسی به داده های فرهنگ لغت در قالب یک جدول). +- موتور پایگاه داده فرهنگ لغت (این نوع از پایگاه داده به طور خودکار دارای جداول فرهنگ لغت در دسترس برای تمام لغت نامه های خارجی متصل). +- شما می توانید برای به روز رسانی به فرهنگ لغت با ارسال یک درخواست به منبع را بررسی کنید. +- نام ستون واجد شرایط +- به نقل از شناسه با استفاده از علامت نقل قول دو. +- در حال بارگذاری +- پرس و جو بهینه سازی برای یک جدول تکرار می تواند نه تنها در رهبر را اجرا کنید. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-2} + +- حذف مجموعه جهانی است. + +#### تغییرات جزیی: {#minor-changes} + +- در حال حاضر پس از یک هشدار باعث شده است, ورود به سیستم چاپ ردیابی پشته کامل. +- تایید تعداد قطعات صدمه دیده/اضافی داده در هنگام راه اندازی (بیش از حد بسیاری مثبت کاذب وجود دارد). + +#### رفع اشکال: {#bug-fixes-5} + +- ثابت اتصال بد “sticking” هنگام قرار دادن به یک جدول توزیع شده. +- جهانی در حال حاضر برای پرس و جو از یک جدول ادغام که به نظر می رسد در یک جدول توزیع کار می کند. +- تعداد نادرست هسته بر روی یک ماشین مجازی موتور محاسبه گوگل تشخیص داده شد. این ثابت شده است. +- تغییرات در چگونه یک منبع اجرایی لغت نامه های خارجی ذخیره شده کار می کند. +- ثابت مقایسه رشته های حاوی شخصیت های پوچ. +- مقایسه زمینه های کلیدی اصلی شناور32 با ثابت ها را ثابت کرد. +- قبلا تخمین نادرست از اندازه یک میدان می تواند منجر به بیش از حد بزرگ تخصیص. +- ثابت تصادف در هنگام پرس و جو یک ستون باطل به یک جدول با استفاده از تغییر اضافه شده است. +- ثابت تصادف در هنگام مرتب سازی توسط یک ستون قابل ابطال, اگر تعداد ردیف کمتر از حد است. +- ثابت سفارش های خرده فروشی متشکل از ارزش تنها ثابت است. +- قبلا, یک جدول تکرار می تواند در حالت نامعتبر پس از یک جدول افت شکست خورده باقی می ماند. +- نام مستعار برای زیرکار اسکالر با نتایج خالی دیگر از دست داده. +- در حال حاضر پرس و جو که تلفیقی استفاده می شود با یک خطا شکست نیست در صورتی که . diff --git a/docs/fa/whats_new/changelog/2018.md b/docs/fa/whats_new/changelog/2018.md new file mode 100644 index 00000000000..65d9b37dfc1 --- /dev/null +++ b/docs/fa/whats_new/changelog/2018.md @@ -0,0 +1,1063 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 78 +toc_title: '2018' +--- + +## انتشار کلیک 18.16 {#clickhouse-release-18-16} + +### انتشار کلیک 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} + +#### رفع اشکال: {#bug-fixes} + +- رفع خطا که به مشکلات با به روز رسانی لغت نامه با منبع ان بی سی منجر شده است. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- مجموعه دستگاه گوارش از توابع کل در حال حاضر با ستون های کمکاری کار می کند. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### بهبود: {#improvements} + +- اضافه شدن `low_cardinality_allow_in_native_format` تنظیم (فعال به طور پیش فرض). هنگامی که غیر فعال, ستون های کم هزینه خواهد شد به ستون های معمولی برای نمایش داده شد را انتخاب کنید تبدیل و ستون عادی خواهد شد برای قرار دادن نمایش داده شد انتظار می رود. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### بهبود ساخت: {#build-improvements} + +- رفع برای ایجاد بر روی مکینتاش و بازو. + +### انتشار کلیک 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} + +#### ویژگی های جدید: {#new-features} + +- `DEFAULT` عبارات برای زمینه های از دست رفته در هنگام بارگذاری داده ها در فرمت های ورودی نیمه ساختار ارزیابی (`JSONEachRow`, `TSKV`). این ویژگی با فعال `insert_sample_with_metadata` تنظیمات. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- این `ALTER TABLE` پرس و جو در حال حاضر `MODIFY ORDER BY` اقدام برای تغییر کلید مرتب سازی هنگام اضافه کردن یا حذف یک ستون جدول. این برای جداول در مفید است `MergeTree` خانواده ای که انجام کارهای اضافی در هنگام ادغام بر اساس این کلید مرتب سازی, مانند `SummingMergeTree`, `AggregatingMergeTree` و به همین ترتیب. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- برای جداول در `MergeTree` خانواده, در حال حاضر شما می توانید یک کلید مرتب سازی های مختلف را مشخص کنید (`ORDER BY`) و شاخص (`PRIMARY KEY`). کلید مرتب سازی می تواند طولانی تر از شاخص باشد. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- اضافه شدن `hdfs` عملکرد جدول و `HDFS` موتور جدول برای واردات و صادرات داده ها به اچ دی اف. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- اضافه شدن توابع برای کار بازه64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [الکساندر کرشنینیکف](https://github.com/ClickHouse/ClickHouse/pull/3350) +- حالا شما می توانید یک پارامتر برای پیکربندی دقت استفاده کنید `uniqCombined` تابع جمع (تعداد سلول های بیش از حد جمع را انتخاب کنید). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- اضافه شدن `system.contributors` جدول که شامل نام هر کسی که مرتکب در خانه کلیک ساخته شده. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- اضافه شدن توانایی حذف پارتیشن برای `ALTER TABLE ... FREEZE` پرس و جو به منظور پشتیبان گیری از تمام پارتیشن در یک بار. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- اضافه شده `dictGet` و `dictGetOrDefault` توابع که نیاز به تعیین نوع مقدار بازگشتی ندارند. نوع به طور خودکار از توضیحات فرهنگ لغت تعیین می شود. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3564) +- حالا شما می توانید نظرات را برای یک ستون در توضیحات جدول مشخص کنید و با استفاده از تغییر دهید `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- خواندن برای پشتیبانی `Join` نوع جداول با کلید های ساده. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3728) +- حالا شما می توانید گزینه های مشخص `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join` و `join_overflow_mode` هنگام ایجاد یک `Join` جدول نوع. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3728) +- اضافه شدن `joinGet` تابع است که اجازه می دهد تا شما را به استفاده از یک `Join` نوع جدول مانند یک فرهنگ لغت. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3728) +- اضافه شدن `partition_key`, `sorting_key`, `primary_key` و `sampling_key` ستونها به `system.tables` جدول به منظور فراهم کردن اطلاعات در مورد کلید های جدول. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- اضافه شدن `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key` و `is_in_sampling_key` ستونها به `system.columns` جدول [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- اضافه شدن `min_time` و `max_time` ستونها به `system.parts` جدول این ستون ها جمعیت زمانی که کلید پارتیشن بندی بیان متشکل از است `DateTime` ستون ها [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### رفع اشکال: {#bug-fixes-1} + +- رفع و بهبود عملکرد برای `LowCardinality` نوع داده. `GROUP BY` با استفاده از `LowCardinality(Nullable(...))`. گرفتن ارزش `extremes`. پردازش توابع بالا سفارش. `LEFT ARRAY JOIN`. توزیع شده `GROUP BY`. توابع است که بازگشت `Array`. اعدام `ORDER BY`. نوشتن به `Distributed` جداول (نیکولولو). سازگاری به عقب برای `INSERT` نمایش داده شد از مشتریان قدیمی که پیاده سازی `Native` قانون پشتیبانی از `LowCardinality` برای `JOIN`. بهبود عملکرد در هنگام کار در یک جریان واحد. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- ثابت چگونه `select_sequential_consistency` گزینه کار می کند. قبلا, زمانی که این تنظیم فعال بود, نتیجه ناقص گاهی اوقات پس از شروع به نوشتن به یک پارتیشن جدید بازگردانده شد. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- پایگاه داده ها هنگام اجرای دی ال به درستی مشخص شده است `ON CLUSTER` نمایش داده شد و `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- پایگاه داده ها به درستی برای کارخانه های فرعی در داخل یک نمایش مشخص شده است. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- رفع اشکال در `PREWHERE` با `FINAL` برای `VersionedCollapsingMergeTree`. [7167بد7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- حالا شما می توانید استفاده کنید `KILL QUERY` برای لغو نمایش داده شد که هنوز شروع نشده است زیرا انتظار دارند جدول قفل شود. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- اصلاح محاسبات تاریخ و زمان اگر ساعت در نیمه شب منتقل شد (این اتفاق می افتد در ایران, و در مسکو از اتفاق افتاد 1981 به 1983). قبلا, این منجر به زمان در حال تنظیم مجدد یک روز زودتر از حد لازم, و همچنین باعث قالب بندی نادرست از تاریخ و زمان در قالب متن. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- اشکالات ثابت در برخی موارد `VIEW` و کارخانه های فرعی که حذف پایگاه داده. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3521) +- ثابت شرایط مسابقه زمانی که به طور همزمان از یک خواندن `MATERIALIZED VIEW` و حذف یک `MATERIALIZED VIEW` با توجه به قفل کردن داخلی `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- خطا را ثابت کرد `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- پردازش پرس و جو ثابت زمانی که `compile_expressions` گزینه فعال است(به طور پیش فرض فعال است). عبارات ثابت نامشخص مانند `now` تابع دیگر گشوده. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- ثابت تصادف در هنگام مشخص کردن یک استدلال مقیاس غیر ثابت در `toDecimal32/64/128` توابع. +- ثابت خطا در هنگام تلاش برای وارد کردن مجموعه ای با `NULL` عناصر در `Values` قالب در یک ستون از نوع `Array` بدون `Nullable` (اگر `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- ثابت ورود خطا مداوم در `DDLWorker` اگر باغ وحش در دسترس نیست. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- ثابت نوع بازگشت برای `quantile*` توابع از `Date` و `DateTime` انواع استدلال. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- ثابت `WITH` بند اگر یک نام مستعار ساده و بدون عبارات مشخص. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- پردازش ثابت نمایش داده شد با نام زیر نمایش داده شد و نام ستون واجد شرایط زمانی که `enable_optimize_predicate_expression` فعال است. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3588) +- خطا را ثابت کرد `Attempt to attach to nullptr thread group` در هنگام کار با نمایش محقق. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- ثابت تصادف در هنگام عبور استدلال نادرست خاص به `arrayReverse` تابع. [733ا7ب6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- ثابت سرریز بافر در `extractURLParameter` تابع. بهبود عملکرد. اضافه شده پردازش صحیح رشته حاوی صفر بایت. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- سرریز بافر ثابت در `lowerUTF8` و `upperUTF8` توابع. حذف توانایی برای اجرای این توابع بیش از `FixedString` استدلال نوع. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- ثابت شرایط مسابقه نادر در هنگام حذف `MergeTree` میز [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- ثابت شرایط مسابقه در هنگام خواندن از `Buffer` جداول و به طور همزمان انجام `ALTER` یا `DROP` در جداول هدف. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- ثابت segfault اگر `max_temporary_non_const_columns` حد بیش از حد شد. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### بهبود: {#improvements-1} + +- سرور فایل های پیکربندی پردازش شده را به `/etc/clickhouse-server/` فهرست راهنما. در عوض, این موجب صرفه جویی در `preprocessed_configs` فهرست راهنمای داخل `path`. این به این معنی است که `/etc/clickhouse-server/` دایرکتوری دسترسی نوشتن برای ندارد `clickhouse` کاربر, که باعث بهبود امنیت. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- این `min_merge_bytes_to_use_direct_io` گزینه به 10 دستگاه گوارش به طور پیش فرض تنظیم شده است. ادغام که بخش های زیادی از جداول از خانواده ادغام را تشکیل می دهد در انجام خواهد شد `O_DIRECT` حالت, که مانع از اخراج کش صفحه بیش از حد. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- شتاب سرور شروع زمانی که تعداد بسیار زیادی از جداول وجود دارد. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- اضافه شدن یک استخر اتصال و قام `Keep-Alive` برای ارتباط بین کپی. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- اگر نحو پرس و جو نامعتبر است `400 Bad Request` کد در بازگشت `HTTP` رابط (500 قبلا بازگردانده شد). [31ب680ا](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- این `join_default_strictness` گزینه تنظیم شده است `ALL` به طور پیش فرض برای سازگاری. [120الکترونیکی 2جبه](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- حذف ورود به سیستم `stderr` از `re2` کتابخانه برای عبارات منظم نامعتبر و یا پیچیده. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- اضافه شده برای `Kafka` موتور جدول: چک برای اشتراک قبل از شروع به خواندن از کافکا; تنظیمات کافکا\_مکس\_بلک\_سیز برای جدول. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- این `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32` و `murmurHash3_64` توابع در حال حاضر برای هر تعداد از استدلال و برای استدلال در قالب تاپل کار می کنند. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- این `arrayReverse` تابع در حال حاضر با هر نوع ارریس کار می کند. [733ا7ب6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- اضافه شده یک پارامتر اختیاری: اندازه اسلات برای `timeSlots` تابع. [کیریل شواکوف](https://github.com/ClickHouse/ClickHouse/pull/3724) +- برای `FULL` و `RIGHT JOIN` این `max_block_size` تنظیم برای یک جریان از داده های غیر پیوست از جدول سمت راست استفاده می شود. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3699) +- اضافه شدن `--secure` پارامتر خط فرمان در `clickhouse-benchmark` و `clickhouse-performance-test` برای فعال کردن شماره تلفن. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- تبدیل نوع زمانی که ساختار یک `Buffer` جدول نوع ساختار جدول مقصد مطابقت ندارد. [ویتالی بارانو](https://github.com/ClickHouse/ClickHouse/pull/3603) +- اضافه شدن `tcp_keep_alive_timeout` گزینه ای برای فعال نگه داشتن زنده بسته پس از عدم فعالیت برای فاصله زمانی مشخص شده است. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- حذف نقل غیر ضروری از ارزش برای کلید پارتیشن در `system.parts` جدول اگر از یک ستون تشکیل شده است. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- عملکرد پیمانه برای `Date` و `DateTime` انواع داده ها. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- اضافه شده مترادف برای `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR` و `MID` توابع. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) برخی از نام های تابع حروف حساس برای سازگاری با استاندارد گذاشتن. شکر نحوی اضافه شده است `SUBSTRING(expr FROM start FOR length)` برای سازگاری با گذاشتن. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- توانایی اضافه شده به `mlock` صفحات حافظه مربوط به `clickhouse-server` کد اجرایی برای جلوگیری از مجبور شدن از حافظه. این ویژگی به طور پیش فرض غیر فعال. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- بهبود عملکرد در هنگام خواندن از `O_DIRECT` (با `min_bytes_to_use_direct_io` گزینه فعال). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- بهبود عملکرد `dictGet...OrDefault` تابع برای یک استدلال کلیدی ثابت و یک استدلال پیش فرض غیر ثابت. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3563) +- این `firstSignificantSubdomain` تابع در حال حاضر پردازش دامنه `gov`, `mil` و `edu`. [ایگور هاتاریست](https://github.com/ClickHouse/ClickHouse/pull/3601) بهبود عملکرد. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- امکان مشخص کردن متغیرهای محیطی سفارشی برای شروع `clickhouse-server` با استفاده از `SYS-V init.d` اسکریپت با تعریف `CLICKHOUSE_PROGRAM_ENV` داخل `/etc/default/clickhouse`. + [پاولو باشینسکیی](https://github.com/ClickHouse/ClickHouse/pull/3612) +- کد بازگشت صحیح برای اسکریپت اینیت کلاینت سرور. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- این `system.metrics` جدول در حال حاضر `VersionInteger` متریک و `system.build_options` دارای خط اضافه شده است `VERSION_INTEGER`, که شامل فرم عددی از نسخه کلیک, مانند `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- حذف توانایی مقایسه `Date` نوع با یک عدد برای جلوگیری از خطاهای بالقوه مانند `date = 2018-12-17`, جایی که نقل قول در سراسر تاریخ به اشتباه حذف. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- ثابت رفتار توابع نفرت انگیز مانند `rowNumberInAllBlocks`. قبلا خروجی نتیجه که یک عدد بزرگتر با توجه به شروع در طول تجزیه و تحلیل پرس و جو بود. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3729) +- اگر `force_restore_data` فایل را نمی توان حذف کرد, یک پیغام خطا نمایش داده می شود. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### بهبود ساخت: {#build-improvements-1} + +- به روز شده در `jemalloc` کتابخانه, که رفع نشت حافظه بالقوه. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3557) +- پروفایل با `jemalloc` به طور پیش فرض به منظور اشکال زدایی ایجاد فعال است. [2سی82ف5 درجه سانتیگراد](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- اضافه شدن توانایی برای اجرای تست ادغام زمانی که تنها `Docker` بر روی سیستم نصب شده است. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- اضافه شدن تست بیان ریش ریش شدن در نمایش داده شد را انتخاب کنید. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- اضافه شدن یک تست استرس برای مرتکب, که انجام تست های کاربردی به صورت موازی و به صورت تصادفی برای تشخیص شرایط مسابقه بیشتر. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- بهبود روش برای شروع کلیک سرور در یک تصویر کارگر بارانداز. [الغزال احمد](https://github.com/ClickHouse/ClickHouse/pull/3663) +- برای یک تصویر کارگر بارانداز, اضافه شدن پشتیبانی برای مقداردهی اولیه پایگاه داده با استفاده از فایل ها در `/docker-entrypoint-initdb.d` فهرست راهنما. [کنستانتین لبتوف](https://github.com/ClickHouse/ClickHouse/pull/3695) +- رفع برای ایجاد بر روی بازو. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes} + +- حذف توانایی مقایسه `Date` نوع با یک عدد. به جای `toDate('2018-12-18') = 17883`, شما باید تبدیل نوع صریح و روشن استفاده `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## انتشار کلیک 18.14 {#clickhouse-release-18-14} + +### انتشار کلیک 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} + +#### رفع اشکال: {#bug-fixes-2} + +- رفع خطا که به مشکلات با به روز رسانی لغت نامه با منبع ان بی سی منجر شده است. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- پایگاه داده ها هنگام اجرای دی ال به درستی مشخص شده است `ON CLUSTER` نمایش داده شد. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- ثابت segfault اگر `max_temporary_non_const_columns` حد بیش از حد شد. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### بهبود ساخت: {#build-improvements-2} + +- رفع برای ایجاد بر روی بازو. + +### انتشار کلیک 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} + +#### رفع اشکال: {#bug-fixes-3} + +- خطای ثابت در `dictGet...` تابع برای لغت نامه از نوع `range`, اگر یکی از استدلال ثابت است و دیگر نیست. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- خطا ثابت که باعث پیام `netlink: '...': attribute type 1 has an invalid length` برای چاپ در لینوکس هسته ورود که اتفاق می افتد تنها در نسخه های تازه به اندازه کافی از هسته لینوکس. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- پیش فرض ثابت در عملکرد `empty` برای استدلال `FixedString` نوع. [دانیل, دا کوانگ مین](https://github.com/ClickHouse/ClickHouse/pull/3703) +- تخصیص حافظه بیش از حد ثابت در هنگام استفاده از مقدار زیادی از `max_query_size` تنظیم (یک تکه حافظه از `max_query_size` بایت در یک بار تخصیص داده شد). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### ایجاد تغییرات: {#build-changes} + +- ساخت ثابت با کتابخانه های لووم/کلنگ نسخه 7 از بسته های سیستم عامل (این کتابخانه ها برای تدوین پرس و جو در زمان اجرا استفاده می شود). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### انتشار کلیک 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} + +#### رفع اشکال: {#bug-fixes-4} + +- موارد ثابت زمانی که روند پل ان بی سی با روند سرور اصلی خاتمه نیست. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- درج همزمان ثابت به `Distributed` جدول با یک لیست ستون که از لیست ستون جدول از راه دور متفاوت. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- ثابت یک بیماری مسابقه نادر است که می تواند به یک تصادف در هنگام حذف یک جدول ادغام منجر شود. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- ثابت بن بست پرس و جو در مورد زمانی که ایجاد موضوع پرس و جو با شکست مواجه `Resource temporarily unavailable` خطا. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- تجزیه ثابت از `ENGINE` بند زمانی که `CREATE AS table` نحو مورد استفاده قرار گرفت و `ENGINE` بند قبل از مشخص شد `AS table` (خطا منجر به نادیده گرفتن موتور مشخص شده). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### انتشار کلیک 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} + +#### رفع اشکال: {#bug-fixes-5} + +- اندازه تکه حافظه دست بالا بود در حالی که غیرشخصی ستون از نوع `Array(String)` که منجر به “Memory limit exceeded” خطاها. این موضوع در نسخه 18.12.13 ظاهر شد. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### انتشار کلیک 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} + +#### رفع اشکال: {#bug-fixes-6} + +- ثابت `ON CLUSTER` نمایش داده شد که خوشه پیکربندی به عنوان امن (پرچم ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### ایجاد تغییرات: {#build-changes-1} + +- مشکلات ثابت-7 از سیستم مکینتاش) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### انتشار کلیک 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} + +#### رفع اشکال: {#bug-fixes-7} + +- ثابت `Block structure mismatch in MergingSorted stream` خطا. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- ثابت `ON CLUSTER` نمایش داده شد در صورتی که اتصالات امن در در پیکربندی خوشه تبدیل شد (از `` پرچم). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- ثابت خطا در نمایش داده شد که استفاده می شود `SAMPLE`, `PREWHERE` و ستون نام مستعار. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- ثابت نادر `unknown compression method` خطا در هنگام `min_bytes_to_use_direct_io` تنظیمات فعال شد. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### بهبود عملکرد: {#performance-improvements} + +- رگرسیون عملکرد ثابت نمایش داده شد با `GROUP BY` از ستون UInt16 یا نوع تاریخ که در اجرای AMD EPYC پردازنده. [ایگور لاپکو](https://github.com/ClickHouse/ClickHouse/pull/3512) +- رگرسیون عملکرد ثابت نمایش داده شد که رشته های طولانی را پردازش می کند. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### بهبود ساخت: {#build-improvements-3} + +- بهبود برای ساده سازی ساخت ارکادیا. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### انتشار کلیک 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} + +#### رفع اشکال: {#bug-fixes-8} + +- ثابت سقوط در پیوستن به دو کارخانه های فرعی که نامش ذکر نشده. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- ثابت تولید نمایش داده شد نادرست (با خالی `WHERE` بند) هنگامی که پرس و جو پایگاه داده های خارجی. [هیلد](https://github.com/ClickHouse/ClickHouse/pull/3477) +- ثابت با استفاده از یک مقدار ایست نادرست در لغت نامه او بی سی. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### انتشار کلیک 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} + +#### رفع اشکال: {#bug-fixes-9} + +- خطا را ثابت کرد `Block structure mismatch in UNION stream: different number of columns` در حد نمایش داده شد. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- خطاهای ثابت در هنگام ادغام داده ها در جداول حاوی ارریس در داخل ساختارهای تو در تو. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- نتایج پرس و جو نادرست ثابت اگر `merge_tree_uniform_read_distribution` تنظیم غیر فعال است(به طور پیش فرض فعال). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- ثابت خطا در درج به یک جدول توزیع شده در فرمت بومی. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### انتشار کلیک 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} + +- این `compile_expressions` تنظیم (مجموعه دستگاه گوارش عبارات) به طور پیش فرض غیر فعال است. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- این `enable_optimize_predicate_expression` تنظیم به طور پیش فرض غیر فعال است. + +### انتشار کلیک 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} + +#### ویژگی های جدید: {#new-features-1} + +- این `WITH CUBE` تغییردهنده برای `GROUP BY` (نحو جایگزین `GROUP BY CUBE(...)` همچنین در دسترس است). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- اضافه شدن `formatDateTime` تابع. [الکساندر کراشنینیکوف](https://github.com/ClickHouse/ClickHouse/pull/2770) +- اضافه شدن `JDBC` موتور جدول و `jdbc` تابع جدول (نیاز به نصب کلیک-جد بی سی پل). [الکساندر کراشنینیکوف](https://github.com/ClickHouse/ClickHouse/pull/3210) +- اضافه شدن توابع برای کار با شماره ایزو هفته: `toISOWeek`, `toISOYear`, `toStartOfISOYear` و `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- حالا شما می توانید استفاده کنید `Nullable` ستون برای `MySQL` و `ODBC` میز [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- ساختارهای داده تو در تو را می توان به عنوان اجسام تو در تو در خواندن `JSONEachRow` قالب. اضافه شدن `input_format_import_nested_json` تنظیمات. [ولمان یونکان](https://github.com/ClickHouse/ClickHouse/pull/3144) +- پردازش موازی برای بسیاری در دسترس است `MATERIALIZED VIEW`هنگام قرار دادن داده ها. دیدن `parallel_view_processing` تنظیمات. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- اضافه شدن `SYSTEM FLUSH LOGS` پرس و جو (مجبور ورود حملات گرگرفتگی به جداول سیستم مانند `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- حالا شما می توانید از پیش تعریف شده استفاده کنید `database` و `table` ماکروها هنگام اعلام `Replicated` میز [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- توانایی خواندن را اضافه کرد `Decimal` ارزش نوع در نماد مهندسی (نشان می دهد قدرت ده). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### ویژگی های تجربی: {#experimental-features} + +- بهینه سازی گروه بند برای `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- محاسبه بهینه از عبارات برای `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### بهبود: {#improvements-2} + +- به طور قابل توجهی کاهش مصرف حافظه برای نمایش داده شد با `ORDER BY` و `LIMIT`. دیدن `max_bytes_before_remerge_sort` تنظیمات. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- در صورت عدم وجود `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` فرض بر این است. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- ستاره واجد شرایط به درستی در نمایش داده شد با کار `JOIN`. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3202) +- این `ODBC` موتور جدول به درستی انتخاب روش برای به نقل از شناسه در گویش گذاشتن یک پایگاه داده از راه دور. [الکساندر کراشنینیکوف](https://github.com/ClickHouse/ClickHouse/pull/3210) +- این `compile_expressions` تنظیم (مجموعه دستگاه گوارش از عبارات) به طور پیش فرض فعال است. +- رفتار ثابت برای پایگاه داده قطره به طور همزمان/جدول اگر وجود دارد و ایجاد پایگاه داده / جدول اگر وجود ندارد. قبلا `CREATE DATABASE ... IF NOT EXISTS` پرسوجو میتواند پیغام خطا را بازگرداند “File … already exists” و `CREATE TABLE ... IF NOT EXISTS` و `DROP TABLE IF EXISTS` نمایش داده شد می تواند بازگشت `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- مانند و در عبارات با نیمه راست ثابت به سرور از راه دور منتقل می شود که پرس و جو از خروجی زیر و یا جداول ان بی سی. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- مقایسه با عبارات ثابت در جایی که بند به سرور از راه دور منتقل می شود که پرس و جو از خروجی زیر و جداول ان بی سی. قبلا, تنها مقایسه با ثابت تصویب شد. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- محاسبه صحیح عرض ردیف در ترمینال برای `Pretty` فرمت, از جمله رشته با هیروگلیف. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` می توان برای مشخص `ALTER UPDATE` نمایش داده شد. +- بهبود عملکرد برای خواندن داده ها در `JSONEachRow` قالب. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- اضافه شده مترادف برای `LENGTH` و `CHARACTER_LENGTH` توابع برای سازگاری. این `CONCAT` تابع دیگر حساس به حروف نیست. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- اضافه شدن `TIMESTAMP` مترادف برای `DateTime` نوع. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- همیشه فضای این سایت متعلق به پرس و جو کوری\_ید در سیاهههای مربوط به سرور وجود دارد, حتی اگر خط ورود به سیستم به یک پرس و جو مربوط نیست. این باعث می شود ساده تر به تجزیه سرور سیاهههای مربوط به متن با ابزار شخص ثالث. +- مصرف حافظه توسط پرس و جو وارد شده است که بیش از سطح بعدی از یک عدد صحیح گیگابایت است. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- اضافه شده حالت سازگاری برای مورد زمانی که کتابخانه مشتری که با استفاده از پروتکل بومی ستون کمتر به اشتباه می فرستد از سرور انتظار برای پرس و جو درج. این سناریو ممکن بود در هنگام استفاده از کتابخانه کلیک پردازنده. قبلا, این سناریو باعث سرور به سقوط. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- در تعریف کاربر که بیان در `clickhouse-copier` شما هم اکنون می توانید از یک `partition_key` نام مستعار (برای فیلتر کردن اضافی توسط پارتیشن جدول منبع). این بسیار مفید است اگر طرح پارتیشن بندی در طول کپی تغییر, اما تنها کمی تغییر. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- گردش کار از `Kafka` موتور شده است به یک استخر موضوع پس زمینه به منظور به طور خودکار کاهش سرعت خواندن داده ها در بارهای بالا نقل مکان کرد. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- پشتیبانی از خواندن `Tuple` و `Nested` ارزش سازه ها مانند `struct` در `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- فهرست دامنه های سطح بالا برای `firstSignificantSubdomain` تابع در حال حاضر شامل دامنه `biz`. [بریدن](https://github.com/ClickHouse/ClickHouse/pull/3219) +- در پیکربندی واژهنامهها خارجی, `null_value` به عنوان مقدار نوع داده پیش فرض تفسیر شده است. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- پشتیبانی از `intDiv` و `intDivOrZero` توابع برای `Decimal`. [48402الکترونیکی8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- پشتیبانی از `Date`, `DateTime`, `UUID` و `Decimal` انواع به عنوان یک کلید برای `sumMap` تابع جمع. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- پشتیبانی از `Decimal` نوع داده در لغت نامه های خارجی. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- پشتیبانی از `Decimal` نوع داده در `SummingMergeTree` میز [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- اضافه شده تخصص برای `UUID` داخل `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- کاهش تعداد `open` و `close` سیستم هنگام خواندن از یک تماس می گیرد `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` پرس و جو را می توان در هر ماکت اجرا (پرس و جو به ماکت رهبر منتقل). [کیریل شواکوف](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### رفع اشکال: {#bug-fixes-10} + +- ثابت موضوع را با `Dictionary` جداول برای `range_hashed` واژهنامهها. این خطا در نسخه 18.12.17 رخ داده است. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- ثابت خطا در هنگام بارگذاری `range_hashed` واژهنامهها (پیام `Unsupported type Nullable (...)`). این خطا در نسخه 18.12.17 رخ داده است. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- خطاهای ثابت در `pointInPolygon` تابع با توجه به تجمع محاسبات نادرست برای چند ضلعی با تعداد زیادی از راس واقع نزدیک به یکدیگر. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- اگر پس از ادغام قطعات داده, کنترلی برای بخش حاصل از نتیجه همان ادغام در ماکت دیگر متفاوت, نتیجه ادغام حذف شده است و بخش داده ها از ماکت های دیگر دانلود (این رفتار صحیح است). اما پس از دانلود بخش داده ها, این می تواند به مجموعه کار به دلیل یک خطا که بخشی در حال حاضر وجود دارد اضافه نمی شود (به این دلیل که بخش داده ها با برخی از تاخیر پس از ادغام حذف شد). این امر منجر به تلاش چرخه ای برای دانلود داده های مشابه. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- محاسبه نادرست ثابت از مصرف کل حافظه توسط نمایش داده شد (به دلیل محاسبه نادرست `max_memory_usage_for_all_queries` تنظیم نادرست کار می کرد و `MemoryTracking` متریک مقدار نادرست بود). این خطا در نسخه 18.12.13 رخ داده است. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- ثابت قابلیت های `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` این خطا در نسخه 18.12.13 رخ داده است. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- تهیه غیر ضروری ثابت از ساختارهای داده برای `JOIN`بازدید کنندگان بر روی سرور که شروع پرس و جو در صورتی که `JOIN` تنها بر روی سرور از راه دور انجام می شود. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- اشکالات ثابت در `Kafka` موتور: بن بست پس از استثنا در هنگام شروع به خواندن داده ها و قفل پس از اتمام [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- برای `Kafka` جداول اختیاری `schema` پارامتر تصویب نشد (طرح از `Cap'n'Proto` قالب). [اطلاعات دقیق](https://github.com/ClickHouse/ClickHouse/pull/3150) +- اگر این گروه از باغ وحش سرور است سرور است که اتصال را قبول اما پس از آن بلافاصله آن را به جای پاسخ به از دست دادن clickhouse انتخاب برای اتصال به سرور دیگری. قبلا این خطا را تولید کرد `Cannot read all data. Bytes read: 0. Bytes expected: 4.` و سرور نمی تواند شروع. [8218رف3](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- اگر این گروه از سرور باغ وحش شامل سرور که پرس و جو دی ان اس خطا می گرداند, این سرویس دهنده نادیده گرفته می شوند. [17ب8209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- تبدیل نوع ثابت بین `Date` و `DateTime` هنگام وارد کردن داده ها در `VALUES` قالب (اگر `input_format_values_interpret_expressions = 1`). قبلا, تبدیل بین مقدار عددی تعداد روز در زمان عصر یونیکس و برچسب زمان یونیکس انجام شد, که منجر به نتایج غیر منتظره. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- اصلاح نوع تبدیل بین `Decimal` و اعداد صحیح. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- خطاهای ثابت در `enable_optimize_predicate_expression` تنظیمات. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3231) +- یک خطای تجزیه کننده در فرمت سی سی اس وی با اعداد ممیز شناور ثابت شده است اگر جدا کننده سی سی اس وی غیر پیش فرض استفاده شود مانند `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- ثابت `arrayCumSumNonNegative` تابع (این مقادیر منفی تجمع می یابد اگر باتری کمتر از صفر است). [الکسی استدیو](https://github.com/ClickHouse/ClickHouse/pull/3163) +- ثابت چگونه `Merge` جداول در بالای کار `Distributed` جداول هنگام استفاده از `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- رفع اشکال در `ALTER UPDATE` پرس و جو. +- اشکالات ثابت در `odbc` تابع جدول که در نسخه ظاهر شد 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- ثابت بهره برداری از توابع مجموع با `StateArray` ترکیب کننده ها [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- ثابت تصادف در هنگام تقسیم یک `Decimal` ارزش صفر. [69د6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- خروجی ثابت از انواع برای عملیات با استفاده از `Decimal` و استدلال عدد صحیح. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- ثابت segfault در طول `GROUP BY` روشن `Decimal128`. [3359با06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- این `log_query_threads` تنظیم (ورود اطلاعات در مورد هر موضوع اجرای پرس و جو) در حال حاضر اثر تنها در صورتی که `log_queries` گزینه (ورود اطلاعات در مورد نمایش داده شد) به 1 تنظیم شده است. از زمان `log_query_threads` گزینه به طور پیش فرض فعال, اطلاعات در مورد موضوعات قبلا وارد شده بود حتی اگر ورود به سیستم پرس و جو غیر فعال شد. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- ثابت خطا در توزیع, بهره برداری از quantiles aggregate function (پیام خطا `Not found column quantile...`). [292ا8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- ثابت مشکل سازگاری در هنگام کار بر روی یک خوشه از نسخه 18.12.17 سرور و سرور های قدیمی تر در همان زمان. برای نمایش داده شد توزیع شده با گروه های کلید از هر دو ثابت و غیر ثابت طول اگر وجود دارد مقدار زیادی از داده ها به کل بازگشت داده بود و نه همیشه به طور کامل جمع (دو ردیف مختلف شامل همان جمع کلید). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- ثابت دست زدن به تعویض در `clickhouse-performance-test`, اگر پرس و جو شامل تنها بخشی از تعویض اعلام شده در تست. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- ثابت خطا در هنگام استفاده از `FINAL` با `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- ثابت خطا در هنگام استفاده از `PREWHERE` بیش از ستون که در طول اضافه شد `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- اضافه شدن یک چک برای عدم وجود `arrayJoin` برای `DEFAULT` و `MATERIALIZED` عبارات. قبلا, `arrayJoin` منجر به خطا در هنگام قرار دادن داده ها. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- اضافه شدن یک چک برای عدم وجود `arrayJoin` در یک `PREWHERE` بند بند. قبلا, این منجر به پیام هایی مانند `Size ... doesn't match` یا `Unknown compression method` هنگام اجرای نمایش داده شد. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- ثابت segfault است که می تواند رخ دهد در موارد نادر پس از بهینه سازی است که جایگزین شده و زنجیره ای از برابری با ارزیابی مربوطه در بیان. [هشدار داده می شود](https://github.com/ClickHouse/ClickHouse/pull/3339) +- اصلاحات جزیی به `clickhouse-benchmark`: قبلا, اطلاعات مربوط به مشتری به سرور ارسال نمی شد; در حال حاضر تعداد نمایش داده شد اجرا شده است با دقت بیشتری محاسبه زمانی که بستن و برای محدود کردن تعداد تکرار. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-1} + +- حذف `allow_experimental_decimal_type` انتخاب این `Decimal` نوع داده برای استفاده پیش فرض در دسترس است. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## انتشار کلیک 18.12 {#clickhouse-release-18-12} + +### انتشار کلیک 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} + +#### ویژگی های جدید: {#new-features-2} + +- `invalidate_query` (قابلیت مشخص کردن پرس و جو برای بررسی اینکه یک فرهنگ لغت خارجی باید به روز شود) برای اجرا `clickhouse` منبع. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- توانایی استفاده را اضافه کرد `UInt*`, `Int*` و `DateTime` انواع داده ها (همراه با `Date` نوع) به عنوان یک `range_hashed` کلید فرهنگ لغت خارجی که مرزهای محدوده را تعریف می کند. حالا `NULL` می توان برای تعیین محدوده باز استفاده کرد. [واسیلی نمکو](https://github.com/ClickHouse/ClickHouse/pull/3123) +- این `Decimal` نوع در حال حاضر پشتیبانی می کند `var*` و `stddev*` توابع مجموع. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- این `Decimal` نوع در حال حاضر پشتیبانی از توابع ریاضی (`exp`, `sin` و به همین ترتیب.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- این `system.part_log` جدول در حال حاضر `partition_id` ستون. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### رفع اشکال: {#bug-fixes-11} + +- `Merge` در حال حاضر به درستی کار می کند `Distributed` میز [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3159) +- ناسازگاری ثابت (وابستگی غیر ضروری به `glibc` نسخه) ساخته شده است که غیر ممکن است برای اجرای کلیک بر روی `Ubuntu Precise` و نسخه های قدیمی تر. ناسازگاری در نسخه 18.12.13 ظاهر شد. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- خطاهای ثابت در `enable_optimize_predicate_expression` تنظیمات. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3107) +- ثابت یک موضوع کوچک با سازگاری به عقب که به نظر می رسد در هنگام کار با یک خوشه از کپی در نسخه های زودتر از 18.12.13 و به طور همزمان ایجاد یک کپی جدید از یک جدول بر روی یک سرور با یک نسخه جدیدتر (نشان داده شده در پیام `Can not clone replica, because the ... updated to new ClickHouse version` که منطقی است اما نباید اتفاق می افتد). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-2} + +- این `enable_optimize_predicate_expression` گزینه به طور پیش فرض فعال (که است و نه خوش بینانه). اگر تجزیه و تحلیل پرس و جو خطا رخ می دهد که مربوط به جستجو برای نام ستون مجموعه `enable_optimize_predicate_expression` به 0. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### انتشار کلیک 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} + +#### ویژگی های جدید: {#new-features-3} + +- اضافه شدن پشتیبانی برای `ALTER UPDATE` نمایش داده شد. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- اضافه شدن `allow_ddl` گزینه ای که دسترسی کاربر به پرس و جو دی ال را محدود می کند. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- اضافه شدن `min_merge_bytes_to_use_direct_io` گزینه برای `MergeTree` موتورهای, که اجازه می دهد تا شما را به مجموعه یک خودداری برای اندازه کل ادغام (زمانی که بالاتر از حد خودداری, فایل های بخش داده خواهد شد با استفاده از اچ به کار گرفته). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- این `system.merges` جدول سیستم در حال حاضر شامل `partition_id` ستون. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### بهبود {#improvements-3} + +- اگر بخش داده ها در طول جهش بدون تغییر باقی می ماند, این است که توسط کپی دانلود کنید. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- تکمیل خودکار برای نام تنظیمات در هنگام کار با در دسترس است `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### رفع اشکال: {#bug-fixes-12} + +- اضافه شدن یک چک برای اندازه ارریس که عناصر هستند `Nested` نوع زمینه در هنگام قرار دادن. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- ثابت خطا به روز رسانی لغت نامه های خارجی با `ODBC` منبع و `hashed` انبار. این خطا در نسخه 18.12.13 رخ داده است. +- ثابت تصادف در هنگام ایجاد یک جدول موقت از پرس و جو با یک `IN` شرط. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3098) +- ثابت خطا در توابع کل برای ارریس است که می تواند داشته باشد `NULL` عناصر. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### انتشار کلیک 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} + +#### ویژگی های جدید: {#new-features-4} + +- اضافه شدن `DECIMAL(digits, scale)` نوع داده (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). برای فعال کردن از تنظیمات استفاده کنید `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- جدید `WITH ROLLUP` تغییردهنده برای `GROUP BY` (نحو جایگزین: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- در نمایش داده شد با پیوستن, شخصیت ستاره گسترش می یابد به یک لیست از ستون ها در تمام جداول, در انطباق با استاندارد گذاشتن. شما می توانید رفتار قدیمی با تنظیم بازگرداندن `asterisk_left_columns_only` به 1 در سطح پیکربندی کاربر. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2787) +- اضافه شدن پشتیبانی برای پیوستن با توابع جدول. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2907) +- تکمیل خودکار با فشار دادن زبانه در خانه مشتری. [سرگی شچربین](https://github.com/ClickHouse/ClickHouse/pull/2447) +- کنترل + ج در فاحشه خانه-مشتری پاک پرس و جو که وارد شد. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- اضافه شدن `join_default_strictness` تنظیم (مقادیر: `"`, `'any'`, `'all'`). این اجازه می دهد تا شما را مشخص کنید `ANY` یا `ALL` برای `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- هر خط از ورود به سیستم سرور مربوط به پردازش پرس و جو نشان می دهد که شناسه پرس و جو. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- حالا شما می توانید سیاهههای مربوط به اجرای پرس و جو در خانه مشتری (با استفاده از `send_logs_level` تنظیمات). با پردازش پرس و جو توزیع, سیاهههای مربوط از تمام سرور های تهدیدی جدی. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- این `system.query_log` و `system.processes` (`SHOW PROCESSLIST`) جداول در حال حاضر اطلاعات در مورد تمام تنظیمات تغییر زمانی که شما یک پرس و جو (ساختار تو در تو از `Settings` اطلاعات دقیق اضافه شدن `log_query_settings` تنظیمات. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- این `system.query_log` و `system.processes` جداول در حال حاضر اطلاعات مربوط به تعدادی از موضوعات که در اجرای پرس و جو شرکت نشان می دهد (نگاه کنید به `thread_numbers` ستون). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- اضافه شده `ProfileEvents` شمارنده است که اندازه گیری زمان صرف شده در خواندن و نوشتن بر روی شبکه و خواندن و نوشتن بر روی دیسک, تعداد خطاهای شبکه, و زمان صرف انتظار زمانی که پهنای باند شبکه محدود است. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- اضافه شده `ProfileEvents`شمارنده است که شامل سیستم متریک از rusage (شما می توانید آنها را به دریافت اطلاعات در مورد CPU usage در userspace و هسته صفحه گسل و زمینه سوئیچ ها) و همچنین taskstats متریک (با استفاده از این برای به دست آوردن اطلاعات در مورد I/O منتظر زمان CPU صبر کنید زمان و مقدار اطلاعات خوانده شده و ثبت هر دو با و بدون صفحه کش). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- این `ProfileEvents` شمارنده در سطح جهانی و برای هر پرس و جو اعمال, و همچنین برای هر موضوع اعدام پرس و جو, که اجازه می دهد تا شما را به مشخصات مصرف منابع به طور مفصل. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- اضافه شدن `system.query_thread_log` جدول, که شامل اطلاعات در مورد هر موضوع اعدام پرس و جو. اضافه شدن `log_query_threads` تنظیمات. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- این `system.metrics` و `system.events` جداول در حال حاضر ساخته شده است در اسناد و مدارک. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- اضافه شدن `arrayEnumerateDense` تابع. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2975) +- اضافه شدن `arrayCumSumNonNegative` و `arrayDifference` توابع. [الکسی استدیو](https://github.com/ClickHouse/ClickHouse/pull/2942) +- اضافه شدن `retention` تابع جمع. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) +- حالا شما می توانید اضافه کنید (ادغام) ایالات کل توابع با استفاده از اپراتور به علاوه و ضرب ایالات کل توابع توسط یک ثابت نامنفی. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- جداول در خانواده ادغام در حال حاضر ستون مجازی `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### ویژگی های تجربی: {#experimental-features-1} + +- اضافه شدن `LowCardinality(T)` نوع داده. این نوع داده به طور خودکار یک فرهنگ لغت محلی از ارزش ها ایجاد می کند و اجازه می دهد تا پردازش داده ها بدون باز کردن فرهنگ لغت. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- اضافه شده یک کش از توابع دستگاه گوارش وارد شده و یک شمارنده برای تعداد استفاده قبل از کامپایل. به جیت کامپایل عبارات, فعال کردن `compile_expressions` تنظیمات. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### بهبود: {#improvements-4} + +- ثابت مشکل با تجمع نامحدود از ورود به سیستم تکرار زمانی که کپی رها وجود دارد. اضافه شده یک حالت بهبود موثر برای کپی با تاخیر طولانی. +- بهبود عملکرد `GROUP BY` هنگامی که یکی از رشته ها رشته است و دیگران طول ثابت با زمینه های تجمع های متعدد. +- بهبود عملکرد در هنگام استفاده از `PREWHERE` و با انتقال ضمنی عبارات در `PREWHERE`. +- بهبود عملکرد تجزیه برای فرمت های متن (`CSV`, `TSV`). [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- بهبود عملکرد رشته خواندن و جذب در فرمت های باینری. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2955) +- افزایش عملکرد و کاهش مصرف حافظه برای نمایش داده شد به `system.tables` و `system.columns` هنگامی که تعداد بسیار زیادی از جداول در یک سرور وجود دارد. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- رفع مشکل عملکرد در مورد یک جریان بزرگ از نمایش داده شد که منجر به خطا ( `_dl_addr` عملکرد در قابل مشاهده است `perf top` اما سرور پردازنده بسیار استفاده نمی شود). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- شرایط به نظر بازیگران (وقتی که `enable_optimize_predicate_expression` فعال است). [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2907) +- بهبود عملکرد برای `UUID` نوع داده. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- این `UUID` نوع داده ها در لغت نامه کیمیاگر پشتیبانی می شود. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- این `visitParamExtractRaw` تابع به درستی کار می کند با ساختارهای تو در تو. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2974) +- هنگامی که `input_format_skip_unknown_fields` تنظیم فعال است, زمینه های شی در `JSONEachRow` قالب به درستی قلم. [بلکجک](https://github.com/ClickHouse/ClickHouse/pull/2958) +- برای یک `CASE` بیان با شرایط, شما هم اکنون می توانید حذف `ELSE` که معادل است `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- ایست عملیات هم اکنون می توانید پیکربندی شود در هنگام کار با باغ وحش. [اوریخی](https://github.com/ClickHouse/ClickHouse/pull/2971) +- شما می توانید افست برای مشخص `LIMIT n, m` به عنوان `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- شما می توانید از `SELECT TOP n` نحو به عنوان یک جایگزین برای `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- افزایش اندازه صف برای نوشتن به جداول سیستم, بنابراین `SystemLog parameter queue is full` خطا اغلب اتفاق نمی افتد. +- این `windowFunnel` تابع مجموع در حال حاضر پشتیبانی از اتفاقاتی که با شرایط متعدد. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2801) +- ستون های تکراری را می توان در یک `USING` بند برای `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` فرمت در حال حاضر محدودیتی در تراز ستون های عرض است. استفاده از `output_format_pretty_max_column_pad_width` تنظیمات. اگر مقدار گسترده تر است, هنوز هم در تمامیت خود را نمایش داده خواهد شد, اما سلول های دیگر در جدول نخواهد بود بیش از حد گسترده. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- این `odbc` تابع جدول در حال حاضر اجازه می دهد تا به شما برای مشخص کردن پایگاه داده/نام طرح. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2885) +- توانایی استفاده از یک نام کاربری مشخص شده در `clickhouse-client` فایل پیکربندی. [ولادیمیر کوزبین](https://github.com/ClickHouse/ClickHouse/pull/2909) +- این `ZooKeeperExceptions` شمارنده به سه شمارنده تقسیم شده است: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions` و `ZooKeeperOtherExceptions`. +- `ALTER DELETE` نمایش داده شد و کار را برای محقق views. +- تصادفی اضافه شده در هنگام اجرای موضوع پاکسازی به صورت دوره ای برای `ReplicatedMergeTree` جداول به منظور جلوگیری از خوشه بار دوره ای زمانی که تعداد بسیار زیادی از وجود دارد `ReplicatedMergeTree` میز +- پشتیبانی از `ATTACH TABLE ... ON CLUSTER` نمایش داده شد. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### رفع اشکال: {#bug-fixes-13} + +- ثابت موضوع را با `Dictionary` جداول (پرتاب `Size of offsets doesn't match size of column` یا `Unknown compression method` استثنا). این اشکال در نسخه 18.10.3 ظاهر شد. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- رفع اشکال هنگام ادغام `CollapsingMergeTree` جداول اگر یکی از قطعات داده خالی است (این قطعات در طول ادغام و یا تشکیل `ALTER DELETE` اگر تمام داده ها حذف شد), و `vertical` الگوریتم برای ادغام مورد استفاده قرار گرفت. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- ثابت شرایط مسابقه در طول `DROP` یا `TRUNCATE` برای `Memory` جداول با همزمان `SELECT`, که می تواند به سقوط سرور منجر شود. این اشکال در نسخه 1.1.54388 ظاهر شد. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- ثابت امکان از دست دادن داده ها در هنگام قرار دادن در `Replicated` جداول اگر `Session is expired` خطا بازگشته است (از دست دادن داده ها را می توان با شناسایی `ReplicatedDataLoss` متریک). این خطا در نسخه 1.1.54378 رخ داده است. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- ثابت segfault در طول `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- ثابت خطا جستجو نام ستون زمانی که `WHERE` بیان به طور کامل شامل یک نام ستون واجد شرایط, مانند `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- ثابت “Not found column” خطا که هنگام اجرای نمایش داده شد توزیع رخ داده است اگر یک ستون متشکل از یک عبارت در با یک زیرخاکی از یک سرور از راه دور درخواست. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- ثابت `Block structure mismatch in UNION stream: different number of columns` خطا که برای نمایش داده شد توزیع رخ داده است اگر یکی از خرده ریز محلی است و از سوی دیگر نمی باشد, و بهینه سازی حرکت به `PREWHERE` باعث شده است. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- ثابت `pointInPolygon` تابع برای موارد خاصی از چند ضلعی غیر محدب. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- ثابت نتیجه نادرست در هنگام مقایسه `nan` با اعداد صحیح. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- ثابت خطا در `zlib-ng` کتابخانه ای که می تواند در موارد نادر به پیش فرض منجر شود. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- ثابت نشت حافظه در هنگام قرار دادن به یک جدول با `AggregateFunction` ستون, اگر دولت از تابع کل ساده نیست (اختصاص حافظه به طور جداگانه), و اگر یک درخواست درج نتایج در بلوک های کوچک متعدد. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- ثابت شرایط مسابقه در هنگام ایجاد و حذف همان `Buffer` یا `MergeTree` جدول به طور همزمان. +- ثابت امکان یک پشتک زمانی که مقایسه تاپل ساخته شده از برخی از انواع غیر بدیهی, مانند تاپل. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- ثابت امکان یک پشتک زمانی که در حال اجرا خاص `ON CLUSTER` نمایش داده شد. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2960) +- ثابت خطا در `arrayDistinct` تابع برای `Nullable` عناصر مجموعه. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- این `enable_optimize_predicate_expression` گزینه در حال حاضر به درستی پشتیبانی از موارد با `SELECT *`. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2929) +- ثابت شده است که در هنگام راه اندازی مجدد جلسه باغ وحش. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- مسدود کردن پتانسیل ثابت در هنگام کار با باغ وحش. +- کد نادرست ثابت برای اضافه کردن ساختارهای داده تو در تو `SummingMergeTree`. +- هنگام تخصیص حافظه برای ایالت های توابع کل, تراز دلخواه به درستی در نظر گرفته شود, که این امکان را به استفاده از عملیات که نیاز به هم ترازی در هنگام اجرای کشورهای توابع دانه. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### تعمیر امنیتی: {#security-fix} + +- استفاده ایمن از منابع داده او بی سی. تعامل با رانندگان بی سی با استفاده از یک جداگانه `clickhouse-odbc-bridge` روند. خطاها در رانندگان اد بی سی شخص ثالث دیگر باعث مشکلات با ثبات سرور و یا ناپایداری. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- اعتبار سنجی نادرست ثابت از مسیر فایل در `catBoostPool` تابع جدول. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- محتویات جداول سیستم (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas` و `replication_queue`) با توجه به دسترسی پیکربندی شده کاربر به پایگاه داده فیلتر شده است (`allow_databases`). [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-3} + +- در نمایش داده شد با پیوستن, شخصیت ستاره گسترش می یابد به یک لیست از ستون ها در تمام جداول, در انطباق با استاندارد گذاشتن. شما می توانید رفتار قدیمی با تنظیم بازگرداندن `asterisk_left_columns_only` به 1 در سطح پیکربندی کاربر. + +#### ایجاد تغییرات: {#build-changes-2} + +- اکثر تست های یکپارچه سازی هم اکنون می توانید توسط متعهد اجرا می شود. +- چک سبک کد همچنین می توانید با ارتکاب اجرا می شود. +- این `memcpy` پیاده سازی به درستی در هنگام ساخت در لینوکس7/فدورا انتخاب شده است. [اتین champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) +- هنگام استفاده از صدای شیپور برای ساخت, برخی از هشدارهای از `-Weverything` اضافه شده است, در علاوه بر این به طور منظم `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- اشکال زدایی ساخت با استفاده از `jemalloc` گزینه اشکال زدایی. +- رابط کتابخانه برای تعامل با باغ وحش انتزاعی اعلام شده است. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## انتشار کلیک 18.10 {#clickhouse-release-18-10} + +### انتشار کلیک خانه 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} + +#### ویژگی های جدید: {#new-features-5} + +- قام را می توان برای تکرار استفاده می شود. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- اضافه شدن توابع `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64` و `murmurHash3_128` علاوه بر موجود `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- پشتیبانی از nullable types در clickhouse odbc driver (`ODBCDriver2` فرمت خروجی). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- پشتیبانی از `UUID` در ستون های کلیدی. + +#### بهبود: {#improvements-5} + +- خوشه ها را می توان بدون راه اندازی مجدد سرور هنگام حذف از فایل های پیکربندی حذف کرد. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- لغت نامه های خارجی را می توان بدون راه اندازی مجدد سرور حذف زمانی که از فایل های پیکربندی حذف شده است. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- اضافه شده `SETTINGS` پشتیبانی از `Kafka` موتور جدول. [الکساندر مارشالو](https://github.com/ClickHouse/ClickHouse/pull/2781) +- بهبود برای `UUID` نوع داده (هنوز کامل نیست). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- پشتیبانی از قطعات خالی پس از ادغام در `SummingMergeTree`, `CollapsingMergeTree` و `VersionedCollapsingMergeTree` موتورها. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- سوابق قدیمی جهش های تکمیل شده حذف می شوند (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- اضافه شدن `system.merge_tree_settings` جدول [کیریل شواکوف](https://github.com/ClickHouse/ClickHouse/pull/2841) +- این `system.tables` جدول در حال حاضر ستون وابستگی: `dependencies_database` و `dependencies_table`. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2851) +- اضافه شدن `max_partition_size_to_drop` گزینه پیکربندی. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- اضافه شدن `output_format_json_escape_forward_slashes` انتخاب [الکساندر بوچاروف](https://github.com/ClickHouse/ClickHouse/pull/2812) +- اضافه شدن `max_fetch_partition_retries_count` تنظیمات. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- اضافه شدن `prefer_localhost_replica` تنظیم برای غیر فعال کردن اولویت برای یک ماکت محلی و رفتن به یک ماکت محلی بدون تعامل بین فرایند. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- این `quantileExact` بازده عملکرد کل `nan` در مورد تجمع در خالی `Float32` یا `Float64` حاضر [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### رفع اشکال: {#bug-fixes-14} + +- حذف غیر ضروری فرار از پارامترهای رشته اتصال برای ان بی سی, ساخته شده است که غیر ممکن است برای ایجاد یک اتصال. این خطا در نسخه 18.6.0 رخ داده است. +- ثابت منطق برای پردازش `REPLACE PARTITION` دستورات در صف تکرار. اگر دو وجود دارد `REPLACE` منطق نادرست میتواند باعث شود که در صف تکرار باقی بمانند و اعدام نشوند. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- رفع اشکال ادغام زمانی که تمام قطعات داده خالی بود (بخش هایی که از ادغام و یا از تشکیل شد `ALTER DELETE` اگر تمام داده ها حذف شد). این اشکال در نسخه 18.1.0 ظاهر شد. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- ثابت خطا برای همزمان `Set` یا `Join`. [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2823) +- ثابت `Block structure mismatch in UNION stream: different number of columns` خطایی که برای `UNION ALL` نمایش داده شد در داخل زیر پرس و جو اگر یکی از `SELECT` نمایش داده شد شامل نام ستون تکراری. [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2094) +- ثابت نشت حافظه اگر یک استثنا رخ داده است در هنگام اتصال به یک سرور خروجی زیر. +- ثابت نادرست کد پاسخ کلیک مشتری در صورت خطا پرس و جو. +- رفتار نادرست ثابت از دیدگاه محقق حاوی متمایز. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### تغییرات ناسازگار به عقب {#backward-incompatible-changes-4} + +- پشتیبانی حذف برای بررسی جدول نمایش داده شد برای جداول توزیع شده است. + +#### ایجاد تغییرات: {#build-changes-3} + +- تخصیص جایگزین شده است: `jemalloc` در حال حاضر به جای استفاده می شود `tcmalloc`. در برخی از حالات, این افزایش سرعت تا 20%. با این حال, نمایش داده شد که تا کند شده وجود دارد 20%. مصرف حافظه شده است حدود کاهش 10% در برخی از حالات, با ثبات بهبود یافته. با بارهای بسیار رقابتی, استفاده از پردازنده در فضای کاربری و در سیستم نشان می دهد فقط یک افزایش کمی. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- استفاده از libressl از یک submodule. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- استفاده از unixodbc از یک submodule. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- استفاده از ماریادب-اتصال-ج از یک زیر زمینی. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- اضافه شده فایل های تست عملکردی به مخزن که در دسترس بودن داده های تست بستگی دارد (در حال حاضر, بدون داده های تست خود را). + +## انتشار کلیک 18.6 {#clickhouse-release-18-6} + +### انتشار کلیک 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} + +#### ویژگی های جدید: {#new-features-6} + +- اضافه شدن پشتیبانی برای در عبارات برای عضویت در نحو: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + بیان باید یک زنجیره ای از تجهیزات پیوست شده توسط و اپراتور باشد. هر طرف از برابری می تواند یک بیان دلخواه بیش از ستون از یکی از جداول. استفاده از نام ستون به طور کامل واجد شرایط پشتیبانی می شود (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) برای جدول سمت راست . [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- قام را می توان برای تکرار فعال کنید. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### بهبود: {#improvements-6} + +- سرور بخشی پچ از نسخه خود را به مشتری می گذرد. اطلاعات در مورد مولفه نسخه پچ در `system.processes` و `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## انتشار کلیک 18.5 {#clickhouse-release-18-5} + +### ClickHouse انتشار 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} + +#### ویژگی های جدید: {#new-features-7} + +- تابع هش اضافه شده است `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### بهبود: {#improvements-7} + +- حالا شما می توانید استفاده کنید `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) نسبت به تنظیم مقادیر در فایل های پیکربندی از متغیرهای محیط زیست. +- نسخه های غیر حساس به حروف اضافه شده است `coalesce`, `ifNull` و `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### رفع اشکال: {#bug-fixes-15} + +- رفع اشکال ممکن است در هنگام شروع یک ماکت [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## انتشار کلیک 18.4 {#clickhouse-release-18-4} + +### انتشار کلیک 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} + +#### ویژگی های جدید: {#new-features-8} + +- جداول سیستم اضافه شده است: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- اضافه شدن توانایی استفاده از یک تابع جدول به جای یک جدول به عنوان یک استدلال از یک `remote` یا `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- پشتیبانی از `HTTP Basic` احراز هویت در پروتکل تکرار [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- این `has` تابع در حال حاضر اجازه می دهد تا جستجو برای یک مقدار عددی در مجموعه ای از `Enum` مقادیر [ماکسیم خریسانف](https://github.com/ClickHouse/ClickHouse/pull/2699). +- پشتیبانی از اضافه کردن جدا پیام دلخواه در هنگام خواندن از `Kafka` [ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### بهبود: {#improvements-8} + +- این `ALTER TABLE t DELETE WHERE` پرس و جو می کند قطعات داده است که توسط جایی که شرایط تحت تاثیر قرار نمی بازنویسی نیست [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- این `use_minimalistic_checksums_in_zookeeper` گزینه برای `ReplicatedMergeTree` جداول به طور پیش فرض فعال. این تنظیمات اضافه شده در نسخه 1.1.54378, 2018-04-16. نسخه هایی که مسن تر از 1.1.54378 هستند دیگر نمی توانند نصب شوند. +- پشتیبانی از در حال اجرا `KILL` و `OPTIMIZE` نمایش داده شد که مشخص `ON CLUSTER` [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### رفع اشکال: {#bug-fixes-16} + +- خطا را ثابت کرد `Column ... is not under an aggregate function and not in GROUP BY` برای تجمع با بیان در. این اشکال در نسخه 18.1.0 ظاهر شد. ([ببد780ب](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- رفع اشکال در `windowFunnel aggregate function` [زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2735). +- رفع اشکال در `anyHeavy` تابع جمع ([الف212](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- تصادف سرور ثابت در هنگام استفاده از `countArray()` تابع جمع. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-5} + +- پارامترها برای `Kafka` موتور از تغییر یافت `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` به `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. اگر جداول خود استفاده کنید `kafka_schema` یا `kafka_num_consumers` پارامترهای, شما باید به صورت دستی ویرایش فایل های ابرداده `path/metadata/database/table.sql` و اضافه کردن `kafka_row_delimiter` پارامتر با `''` ارزش. + +## انتشار کلیک 18.1 {#clickhouse-release-18-1} + +### ClickHouse انتشار 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} + +#### ویژگی های جدید: {#new-features-9} + +- پشتیبانی از `ALTER TABLE t DELETE WHERE` پرسوجو برای جداول ادغام غیر تکرار شده ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- پشتیبانی از انواع دلخواه برای `uniq*` خانواده از توابع کل ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- پشتیبانی از انواع دلخواه در مقایسه اپراتورها ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- این `users.xml` فایل اجازه می دهد تا تنظیم یک ماسک زیر شبکه در قالب `10.0.0.1/255.255.255.0`. این برای استفاده از ماسک برای شبکه های اینترنتی6 با صفر در وسط ضروری است ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- اضافه شدن `arrayDistinct` تابع ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- این summingmergetree موتور هم اکنون می توانید کار با aggregatefunction نوع ستون ([پان سنتانتین](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### بهبود: {#improvements-9} + +- طرح شماره برای نسخه های انتشار تغییر کرده است. در حال حاضر بخش اول شامل سال انتشار (a. d. مسکو منطقه زمانی منهای 2000) بخش دوم شامل تعدادی به صورت عمده تغییرات را افزایش می دهد برای بسیاری منتشر شده) و بخش سوم پچ نسخه. منتشر شده هنوز هم به عقب سازگار هستند, مگر اینکه در غیر این صورت در تغییرات اعلام. +- تبدیل سریع تر اعداد ممیز شناور به یک رشته ([ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- اگر برخی از ردیف در طول درج به دلیل خطاهای تجزیه قلم شد (این ممکن است با `input_allow_errors_num` و `input_allow_errors_ratio` تنظیمات را فعال کنید), تعداد ردیف قلم در حال حاضر به ورود به سیستم سرور نوشته شده است ([لوناردو سیسیچی](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### رفع اشکال: {#bug-fixes-17} + +- ثابت فرمان کوتاه برای جداول موقت ([ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- ثابت بن بست نادر در کتابخانه مشتری باغ وحش که رخ داده است زمانی که یک خطای شبکه وجود دارد در حالی که خواندن پاسخ ([315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- ثابت خطا در طول بازیگران به nullable types ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- ثابت نتیجه نادرست از `maxIntersection()` تابع زمانی که مرزهای فواصل همزمان ([مایکل فورمور](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- تحول نادرست ثابت از زنجیره بیان و یا در یک استدلال تابع ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- تخریب عملکرد ثابت برای نمایش داده شد حاوی `IN (subquery)` عبارات در داخل یکی دیگر از خرده فروشی ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- ناسازگاری ثابت بین سرورها با نسخه های مختلف در نمایش داده شد توزیع شده است که با استفاده از یک `CAST` تابع است که در حروف بزرگ نیست ([ف8چ4د6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- اضافه شده از دست رفته به نقل از شناسه برای نمایش داده شد به سندرم تونل کارپ خارجی ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-6} + +- تبدیل یک رشته حاوی عدد صفر به تاریخ ساعت کار نمی کند. مثال: `SELECT toDateTime('0')`. این نیز دلیل است که `DateTime DEFAULT '0'` در جداول و همچنین کار نمی کند `0` در لغت نامه. راه حل: جایگزین کردن `0` با `0000-00-00 00:00:00`. + +## انتشار کلیک 1.1 {#clickhouse-release-1-1} + +### انتشار کلیک 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} + +#### ویژگی های جدید: {#new-features-10} + +- اضافه شدن `histogram` تابع جمع ([میخیل سورین](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- حالا `OPTIMIZE TABLE ... FINAL` می توان بدون مشخص کردن پارتیشن ها برای `ReplicatedMergeTree` ([ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### رفع اشکال: {#bug-fixes-18} + +- رفع مشکل با یک ایست بسیار کوچک برای سوکت (یک ثانیه) برای خواندن و نوشتن در هنگام ارسال و دانلود داده تکرار, ساخته شده است که غیر ممکن است برای دانلود قطعات بزرگتر اگر یک بار بر روی شبکه و یا دیسک وجود دارد (در تلاش دوره ای برای دانلود قطعات منجر). این خطا در نسخه 1.1.54388 رخ داده است. +- مشکلات ثابت در هنگام استفاده از ریشه کن کردن در باغ وحش اگر شما بلوک های داده های تکراری در جدول قرار داده است. +- این `has` تابع در حال حاضر به درستی برای مجموعه ای با عناصر قابل تعویض کار می کند ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- این `system.tables` جدول در حال حاضر به درستی کار می کند زمانی که در نمایش داده شد توزیع استفاده می شود. این `metadata_modification_time` و `engine_full` ستون در حال حاضر غیر مجازی. ثابت خطا که رخ داده است اگر تنها این ستون از جدول تردید شد. +- ثابت چگونه خالی `TinyLog` جدول پس از قرار دادن یک بلوک داده خالی کار می کند ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- این `system.zookeeper` جدول کار می کند اگر ارزش گره در باغ وحش تهی است. + +### انتشار کلیک 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} + +#### ویژگی های جدید: {#new-features-11} + +- نمایش داده شد را می توان در ارسال `multipart/form-data` قالب (در `query` رشته), مفید است که اگر داده های خارجی نیز برای پردازش پرس و جو ارسال ([اولگا هوستیکوا](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- اضافه شدن توانایی برای فعال یا غیر فعال کردن پردازش نقل قول یک یا دو هنگام خواندن داده ها در فرمت سی سی.وی. شما می توانید این را در پیکربندی `format_csv_allow_single_quotes` و `format_csv_allow_double_quotes` تنظیمات ([ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- حالا `OPTIMIZE TABLE ... FINAL` می توان بدون مشخص کردن پارتیشن برای انواع غیر تکرار استفاده کرد `MergeTree` ([ایموس پرنده](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### بهبود: {#improvements-10} + +- بهبود عملکرد, کاهش مصرف حافظه, و ردیابی مصرف حافظه صحیح با استفاده از اپراتور در زمانی که یک شاخص جدول می تواند مورد استفاده قرار گیرد ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- حذف چک کردن کار برکنار شده از چک سام در هنگام اضافه کردن یک بخش داده ها. این مهم است که تعداد زیادی از کپی وجود دارد, چرا که در این موارد تعداد کل چک به نفر برابر بود^2. +- اضافه شدن پشتیبانی برای `Array(Tuple(...))` نشانوندها برای `arrayEnumerateUniq` تابع ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- اضافه شده `Nullable` پشتیبانی از `runningDifference` تابع ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- بهبود عملکرد تجزیه و تحلیل پرس و جو زمانی که تعداد بسیار زیادی از عبارات وجود دارد ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- انتخاب سریع تر از قطعات داده برای ادغام در `ReplicatedMergeTree` میز بازیابی سریع تر از جلسه باغ وحش ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- این `format_version.txt` پرونده برای `MergeTree` جداول دوباره ایجاد اگر از دست رفته است, که حس می کند اگر تاتر است پس از کپی کردن ساختار دایرکتوری بدون فایل راه اندازی ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### رفع اشکال: {#bug-fixes-19} + +- رفع اشکال در هنگام کار با باغ وحش است که می تواند غیر ممکن است برای بازیابی جلسه و خواندنی ایالات جداول قبل از راه اندازی مجدد سرور. +- رفع اشکال در هنگام کار با باغ وحش است که می تواند در گره های قدیمی در نتیجه حذف نمی شود اگر جلسه قطع شده است. +- ثابت خطا در `quantileTDigest` تابع برای استدلال شناور (این اشکال در نسخه 1.1.54388 معرفی شد) ([میخیل سورین](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- رفع اشکال در شاخص برای جداول ادغام اگر ستون کلید اصلی در داخل تابع برای تبدیل انواع بین اعداد صحیح امضا و بدون علامت از همان اندازه واقع شده است ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- ثابت segfault اگر `macros` استفاده می شود اما در فایل پیکربندی نیستند ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- تعویض ثابت به پایگاه داده به طور پیش فرض در هنگام اتصال مجدد مشتری ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- رفع اشکال که زمانی رخ داده است `use_index_for_in_with_subqueries` تنظیم غیر فعال شد. + +#### تعمیر امنیتی: {#security-fix-1} + +- ارسال فایل های دیگر ممکن است زمانی که به خروجی زیر متصل می شود (`LOAD DATA LOCAL INFILE`). + +### انتشار کلیک 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} + +#### ویژگی های جدید: {#new-features-12} + +- پشتیبانی از `ALTER TABLE t DELETE WHERE` پرس و جو برای جداول تکرار. اضافه شدن `system.mutations` جدول برای پیگیری پیشرفت این نوع از نمایش داده شد. +- پشتیبانی از `ALTER TABLE t [REPLACE|ATTACH] PARTITION` پرس و جو برای \* جداول ادغام. +- پشتیبانی از `TRUNCATE TABLE` پرسوجو ([زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- چند جدید `SYSTEM` نمایش داده شد برای جداول تکرار (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- توانایی نوشتن به یک جدول با موتور خروجی زیر و عملکرد جدول مربوطه اضافه شده است ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- اضافه شدن `url()` عملکرد جدول و `URL` موتور جدول ([الکساندر sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- اضافه شدن `windowFunnel` تابع جمع ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- جدید `startsWith` و `endsWith` توابع برای رشته ها ([وادیم پلختینسکی](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- این `numbers()` تابع جدول در حال حاضر اجازه می دهد تا شما را به مشخص افست ([زمستان ژانگ](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- رمز عبور به `clickhouse-client` می توان تعاملی وارد شده است. +- سیاهههای مربوط به سرور هم اکنون می توانید به وبلاگ ارسال می شود ([الکساندر کرشنینیکف](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- پشتیبانی از ورود به لغت نامه ها با یک منبع کتابخانه مشترک ([الکساندر sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- پشتیبانی برای سفارشی csv delimiters ([ایوان ژوکوف](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- اضافه شدن `date_time_input_format` تنظیمات. اگر این تنظیم را تغییر دهید `'best_effort'`, ارزش تاریخ ساعت خواهد شد در طیف گسترده ای از فرمت های به عنوان خوانده شده. +- اضافه شدن `clickhouse-obfuscator` ابزار برای مبهم و تاریک کردن داده ها. مثال طریقه استفاده: انتشار داده های مورد استفاده در تست عملکرد. + +#### ویژگی های تجربی: {#experimental-features-2} + +- توانایی محاسبه اضافه شده است `and` استدلال تنها جایی که مورد نیاز هستند ([کشتن از سر ترحم تسارکوا](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- مجموعه کیت به کد بومی در حال حاضر برای برخی از عبارات در دسترس است ([پایوس](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### رفع اشکال: {#bug-fixes-20} + +- تکراری دیگر برای پرس و جو با ظاهر `DISTINCT` و `ORDER BY`. +- نمایش داده شد با `ARRAY JOIN` و `arrayFilter` دیگر نتیجه نادرست بازگشت. +- هنگام خواندن یک ستون جداگانه از یک ساختار تو در تو خطایی رخ داد ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- ثابت خطا در هنگام تجزیه و تحلیل نمایش داده شد با داشتن بند مانند `HAVING tuple IN (...)`. +- ثابت خطا در هنگام تجزیه و تحلیل نمایش داده شد با نام مستعار بازگشتی. +- ثابت خطا در هنگام خواندن از replacingmergetree با یک بیماری در prewhere فیلتر است که تمام ردیف ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- هنگام استفاده از جلسات در رابط اچ تی پی تنظیمات پروفایل کاربر اعمال نشد. +- ثابت چگونه تنظیمات از پارامترهای خط فرمان در خانه کلیک اعمال می شود-محلی. +- کتابخانه مشتری باغ وحش در حال حاضر با استفاده از فاصله جلسه دریافت شده از سرور. +- رفع اشکال در کتابخانه مشتری باغ وحش زمانی که مشتری منتظر پاسخ سرور طولانی تر از ایست. +- هرس ثابت قطعات برای نمایش داده شد با شرایط در ستون های کلیدی پارتیشن ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- ادغام در حال حاضر ممکن است پس از `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- نقشه برداری نوع در تابع جدول او بی سی ثابت شده است ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- مقایسه نوع برای ثابت شده است `DateTime` با و بدون منطقه زمانی ([الکساندر بوچاروف](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- تجزیه نحوی ثابت و قالب بندی از `CAST` اپراتور +- درج ثابت به یک نمایش تحقق برای موتور جدول توزیع شده است ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- ثابت شرایط مسابقه در هنگام نوشتن داده ها از `Kafka` موتور به نمایش تحقق ([Yangkuan لیو](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- در از راه دور() تابع جدول ثابت شده است. +- رفتار خروج ثابت از `clickhouse-client` در حالت چند خطی ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### بهبود: {#improvements-11} + +- وظایف پس زمینه در جداول تکرار در حال حاضر در یک استخر موضوع به جای در موضوعات جداگانه انجام می شود ([سیلو کاراژیا](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- بهبود عملکرد فشرده سازی 2.4. +- تجزیه و تحلیل سریع تر برای نمایش داده شد با تعداد زیادی از می پیوندد و زیر نمایش داده شد. +- کش دی ان اس در حال حاضر به طور خودکار به روز هنگامی که بیش از حد بسیاری از خطاهای شبکه وجود دارد. +- جدول درج دیگر رخ می دهد, اگر وارد یکی از این محقق views امکان پذیر نیست به دلیل آن است بیش از حد بسیاری از قطعات. +- اصلاح اختلاف در شمارنده رویداد `Query`, `SelectQuery` و `InsertQuery`. +- عبارات مانند `tuple IN (SELECT tuple)` مجاز اگر انواع تاپل مطابقت. +- سرور با جداول تکرار می توانید شروع به حتی اگر شما باغ وحش پیکربندی نشده است. +- هنگام محاسبه تعداد هسته های پردازنده در دسترس, محدودیت در گروه های گروه در حال حاضر در نظر گرفته شود ([اتری شارما](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- اضافه شده پیون برای دایرکتوری پیکربندی در فایل پیکربندی سیستم ([میخیل شیریو](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### ایجاد تغییرات: {#build-changes-4} + +- کامپایلر جی سی8 را می توان برای ساخت استفاده می شود. +- اضافه شده توانایی برای ساخت llvm از submodule. +- این نسخه از librdkafka کتابخانه به روز شده است برای v0.11.4. +- اضافه شدن توانایی استفاده از کتابخانه سیستم لیبکپویید. نسخه کتابخانه شده است به 0.4.0 به روز شد. +- ثابت ساخت با استفاده از vectorclass کتابخانه ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- در حال حاضر تولید فایل برای نینجا به طور پیش فرض (مانند هنگام استفاده از `-G Ninja`). +- اضافه شدن قابلیت استفاده از کتابخانه لیبتاینفو به جای نوشیدن شراب ([جورجی کندراتیف](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- رفع یک درگیری فایل هدر در فدورا پوست دباغی نشده ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-7} + +- حذف فرار در `Vertical` و `Pretty*` فرمت ها و حذف `VerticalRaw` قالب. +- اگر سرور با نسخه 1.1.54388 (یا جدیدتر) و سرور با نسخه های قدیمی تر به طور همزمان در یک پرس و جو توزیع استفاده می شود و پرس و جو است `cast(x, 'Type')` بیان بدون `AS` کلمه کلیدی و کلمه ندارد `cast` در بزرگ, یک استثنا خواهد شد با یک پیام مانند پرتاب `Not found column cast(0, 'UInt8') in block`. تخلیه: به روز رسانی سرور در کل خوشه. + +### انتشار کلیک 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} + +#### رفع اشکال: {#bug-fixes-21} + +- ثابت خطا که در برخی موارد باعث عملیات باغ وحش برای جلوگیری از. + +### انتشار کلیک 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} + +#### رفع اشکال: {#bug-fixes-22} + +- ثابت کاهش سرعت صف تکرار اگر یک جدول است بسیاری از کپی. + +### انتشار کلیک 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} + +#### رفع اشکال: {#bug-fixes-23} + +- ثابت نشت گره در باغ وحش زمانی که خانه رعیتی اتصال به سرور باغ وحش از دست می دهد. + +### ClickHouse انتشار 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} + +#### ویژگی های جدید: {#new-features-13} + +- تابع جدول اضافه شده است `file(path, format, structure)`. به عنوان مثال خواندن بایت از `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### بهبود: {#improvements-12} + +- زیرمجموعه ها را می توان در `()` براکت به منظور افزایش خوانایی پرس و جو. به عنوان مثال: `(SELECT 1) UNION ALL (SELECT 1)`. +- ساده `SELECT` نمایش داده شد از `system.processes` جدول در شامل نمی شود `max_concurrent_queries` حد. + +#### رفع اشکال: {#bug-fixes-24} + +- رفتار نادرست ثابت از `IN` اپراتور هنگام انتخاب از `MATERIALIZED VIEW`. +- فیلتر نادرست ثابت توسط شاخص پارتیشن در عبارات مانند `partition_key_column IN (...)`. +- ناتوانی ثابت برای اجرا `OPTIMIZE` پرس و جو در ماکت غیر رهبر اگر `REANAME` بر روی میز انجام شد. +- خطای مجوز هنگام اجرای ثابت شد `OPTIMIZE` یا `ALTER` نمایش داده شد در یک ماکت غیر رهبر. +- انجماد ثابت `KILL QUERY`. +- رفع خطا در کتابخانه مشتری باغ وحش که منجر به از دست دادن ساعت انجماد توزیع صف دی ال و کاهش سرعت در صف تکرار اگر غیر خالی `chroot` پیشوند در پیکربندی باغ وحش استفاده می شود. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-8} + +- پشتیبانی حذف برای عبارات مانند `(a, b) IN (SELECT (a, b))` (شما می توانید بیان معادل استفاده کنید `(a, b) IN (SELECT a, b)`). در نسخه های قبلی, این عبارات منجر به نامشخص `WHERE` فیلتر کردن یا ایجاد خطا. + +### انتشار کلیک 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} + +#### ویژگی های جدید: {#new-features-14} + +- ورود به سیستم سطح را می توان بدون راه اندازی مجدد سرور تغییر کرده است. +- اضافه شدن `SHOW CREATE DATABASE` پرس و جو. +- این `query_id` می توان به تصویب رسید `clickhouse-client` . +- تنظیمات جدید: `max_network_bandwidth_for_all_users`. +- اضافه شدن پشتیبانی برای `ALTER TABLE ... PARTITION ...` برای `MATERIALIZED VIEW`. +- اطلاعات اضافه شده در مورد اندازه قطعات داده در فرم غیر فشرده در جدول سیستم. +- پشتیبانی از رمزگذاری سرور به سرور برای جداول توزیع شده (`1` در پیکربندی ماکت در ``). +- پیکربندی سطح جدول برای `ReplicatedMergeTree` خانواده به منظور به حداقل رساندن مقدار داده های ذخیره شده در باغ وحش: : `use_minimalistic_checksums_in_zookeeper = 1` +- پیکربندی از `clickhouse-client` اعلان کردن. به طور پیش فرض, نام سرور در حال حاضر خروجی به اعلان. نام صفحه نمایش سرور را می توان تغییر داد. همچنین در ارسال `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). +- چند کاما از هم جدا `topics` می توان برای مشخص `Kafka` موتور (توبیاس Adamson) +- هنگامی که یک پرس و جو توسط متوقف `KILL QUERY` یا `replace_running_query` مشتری دریافت می کند `Query was canceled` استثنا به جای یک نتیجه ناقص. + +#### بهبود: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` نمایش داده شد در مقابل صف تکرار اجرا شود. +- `SELECT ... FINAL` و `OPTIMIZE ... FINAL` می توان حتی زمانی که جدول دارای یک بخش داده واحد استفاده می شود. +- A `query_log` جدول در پرواز دوباره اگر به صورت دستی حذف شد (کریل شواکوف). +- این `lengthUTF8` عملکرد سریعتر اجرا می شود (ژانگ2014). +- بهبود عملکرد درج همزمان در `Distributed` جداول (`insert_distributed_sync = 1`) هنگامی که تعداد بسیار زیادی از خرده ریز وجود دارد. +- سرور می پذیرد `send_timeout` و `receive_timeout` تنظیمات از مشتری و در هنگام اتصال به مشتری اعمال می شود (که در جهت معکوس اعمال می شود: سوکت سرور `send_timeout` به مجموعه `receive_timeout` ارزش دریافت شده از مشتری و بالعکس). +- بازیابی سقوط قوی تر برای درج ناهمزمان به `Distributed` میز +- نوع بازگشت `countEqual` تابع تغییر از `UInt32` به `UInt64` (谢磊). + +#### رفع اشکال: {#bug-fixes-25} + +- ثابت خطا با `IN` هنگامی که سمت چپ عبارت است `Nullable`. +- نتایج صحیح در حال حاضر در هنگام استفاده از تاپل با بازگشت `IN` هنگامی که برخی از اجزای تاپل در شاخص جدول هستند. +- این `max_execution_time` محدود در حال حاضر به درستی کار می کند با نمایش داده شد توزیع شده است. +- خطاهای ثابت هنگام محاسبه اندازه ستون های کامپوزیت در `system.columns` جدول +- هنگام ایجاد یک جدول موقت خطایی رخ داد `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- خطاهای ثابت در `StorageKafka` (\#\#2075) +- سقوط سرور ثابت از استدلال نامعتبر از توابع مجموع خاص. +- ثابت خطا که مانع از `DETACH DATABASE` پرسو جو از توقف وظایف پس زمینه برای `ReplicatedMergeTree` میز +- `Too many parts` دولت کمتر احتمال دارد به اتفاق می افتد در هنگام قرار دادن به نمایش مواد جمع (\#2084). +- دست زدن به بازگشتی اصلاح تعویض در پیکربندی اگر یک تعویض باید توسط جایگزینی دیگر در همان سطح به دنبال. +- اصلاح نحو در فایل ابرداده در هنگام ایجاد یک `VIEW` که با استفاده از یک پرس و جو با `UNION ALL`. +- `SummingMergeTree` در حال حاضر به درستی کار می کند برای جمع ساختارهای داده های تو در تو با یک کلید کامپوزیت. +- ثابت امکان شرایط مسابقه در هنگام انتخاب رهبر برای `ReplicatedMergeTree` میز + +#### ایجاد تغییرات: {#build-changes-5} + +- پشتیبانی ساخت `ninja` به جای `make` و موارد استفاده `ninja` به طور پیش فرض برای انتشار ساختمان. +- بسته تغییر نام داد: `clickhouse-server-base` داخل `clickhouse-common-static`; `clickhouse-server-common` داخل `clickhouse-server`; `clickhouse-common-dbg` داخل `clickhouse-common-static-dbg`. برای نصب استفاده کنید `clickhouse-server clickhouse-client`. بسته با نام های قدیمی هنوز هم در مخازن برای سازگاری بار. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-9} + +- حذف تفسیر خاص از یک عبارت در اگر مجموعه ای در سمت چپ مشخص شده است. قبلا بیان `arr IN (set)` به عنوان تفسیر شد “at least one `arr` element belongs to the `set`”. برای دریافت همان رفتار در نسخه جدید, نوشتن `arrayExists(x -> x IN (set), arr)`. +- استفاده نادرست از گزینه سوکت را غیرفعال کرد `SO_REUSEPORT`, که به اشتباه به طور پیش فرض در کتابخانه کم فعال شد. توجه داشته باشید که در لینوکس دیگر هیچ دلیلی وجود ندارد که به طور همزمان نشانی ها را مشخص کند `::` و `0.0.0.0` for listen – use just `::`, که اجازه می دهد گوش دادن به اتصال هر دو بیش از لیگ4 و ایپو6 (با تنظیمات پیکربندی هسته به طور پیش فرض). شما همچنین می توانید به رفتار از نسخه های قبلی با مشخص برگرداندن `1` در پیکربندی. + +### انتشار کلیک 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} + +#### ویژگی های جدید: {#new-features-15} + +- اضافه شدن `system.macros` جدول و به روز رسانی خودکار از ماکروها زمانی که فایل پیکربندی تغییر کرده است. +- اضافه شدن `SYSTEM RELOAD CONFIG` پرس و جو. +- اضافه شدن `maxIntersections(left_col, right_col)` تابع جمع, که حداکثر تعداد فواصل به طور همزمان متقاطع گرداند `[left; right]`. این `maxIntersectionsPosition(left, right)` تابع می گرداند ابتدای “maximum” فاصله. ([مایکل فورمور](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### بهبود: {#improvements-14} + +- هنگام وارد کردن داده ها در یک `Replicated` جدول, درخواست کمتر به ساخته شده `ZooKeeper` (و بسیاری از خطاهای سطح کاربر از ناپدید شد `ZooKeeper` ورود). +- توانایی ایجاد نام مستعار برای مجموعه داده ها اضافه شده است. مثال: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### رفع اشکال: {#bug-fixes-26} + +- ثابت `Illegal PREWHERE` خطا هنگام خواندن از جداول ادغام برای `Distributed`میز +- رفع اضافه شده است که به شما اجازه شروع کلیک سرور در ظروف فایل اجرایی فقط 4. +- ثابت شرایط مسابقه در هنگام خواندن از سیستم `system.parts_columns tables.` +- بافر دو حذف در طول یک درج همزمان به یک `Distributed` جدول, که می تواند باعث اتصال به ایست. +- رفع اشکال که باعث انتظار بیش از حد طولانی برای یک ماکت در دسترس نیست قبل از شروع یک `SELECT` پرس و جو. +- تاریخ نادرست ثابت در `system.parts` جدول +- رفع اشکال ساخته شده است که غیر ممکن است برای وارد کردن داده ها در یک `Replicated` جدول اگر `chroot` غیر خالی در پیکربندی بود `ZooKeeper` خوشه خوشه. +- ثابت الگوریتم ادغام عمودی برای خالی `ORDER BY` جدول +- ترمیم توانایی استفاده از لغت نامه در نمایش داده شد به جداول از راه دور, حتی اگر این لغت نامه در سرور درخواست وجود ندارد. این قابلیت در نسخه 1.1.54362 از دست داده بود. +- ترمیم رفتار برای نمایش داده شد مانند `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` هنگامی که در سمت راست از `IN` باید از راه دور استفاده کنید `default.table` به جای یک محلی. این رفتار در نسخه 1.1.54358 شکسته شد. +- حذف غیر اصلی ورود به سیستم سطح خطا از `Not found column ... in block`. + +### انتشار کلیک 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} + +#### ویژگی های جدید: {#new-features-16} + +- تجمع بدون `GROUP BY` برای یک مجموعه خالی (مانند `SELECT count(*) FROM table WHERE 0`) در حال حاضر در نتیجه با یک ردیف با ارزش تهی برای توابع کل گرداند, در انطباق با استاندارد گذاشتن. برای بازگرداندن رفتار قدیمی (بازگشت به نتیجه خالی), تنظیم `empty_result_for_aggregation_by_empty_set` به 1. +- تبدیل نوع اضافه شده برای `UNION ALL`. نام مستعار مختلف مجاز است `SELECT` موقعیت خود را در `UNION ALL`, در انطباق با استاندارد گذاشتن. +- عبارات دلخواه در پشتیبانی `LIMIT BY` بند. قبلا, تنها ممکن بود به استفاده از ستون ناشی از `SELECT`. +- یک شاخص از `MergeTree` جداول استفاده می شود که `IN` به یک تاپل عبارات از ستون کلید اصلی اعمال می شود. مثال: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` هشدار داده می شود +- اضافه شدن `clickhouse-copier` ابزار برای کپی کردن بین خوشه ها و داده های تغییر شکل (بتا). +- اضافه شده توابع هش سازگار: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. آنها را می توان به عنوان یک sharding کلیدی به منظور کاهش مقدار از ترافیک شبکه در طول پس از آن reshardings. +- اضافه شدن توابع: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- اضافه شدن `arrayCumSum` تابع (جوی سانتانا). +- اضافه شدن `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero` و `parseDateTimeBestEffortOrNull` توابع برای خواندن تاریخ ساعت از یک رشته حاوی متن در طیف گسترده ای از فرمت های ممکن. +- داده ها را می توان تا حدی از لغت نامه های خارجی در طول به روز رسانی دوباره (بار فقط سوابق که ارزش این زمینه مشخص شده بیشتر از در دانلود قبلی) (ارسن هاکوبیان). +- اضافه شدن `cluster` تابع جدول. مثال: `cluster(cluster_name, db, table)`. این `remote` تابع جدول می توانید نام خوشه به عنوان اولین استدلال قبول, اگر به عنوان یک شناسه مشخص. +- این `remote` و `cluster` توابع جدول را می توان در `INSERT` نمایش داده شد. +- اضافه شدن `create_table_query` و `engine_full` ستون های مجازی به `system.tables`جدول این `metadata_modification_time` ستون مجازی است. +- اضافه شدن `data_path` و `metadata_path` ستونها به `system.tables`و`system.databases` جداول و اضافه شدن `path` ستون به `system.parts` و `system.parts_columns` میز +- اضافه شدن اطلاعات اضافی در مورد ادغام در `system.part_log` جدول +- یک کلید پارتیشن بندی دلخواه می تواند برای `system.query_log` جدول (کریل شواکوف). +- این `SHOW TABLES` پرس و جو در حال حاضر نیز جداول موقت را نشان می دهد. جداول موقت و `is_temporary` ستون به `system.tables` (ژانگ2014). +- اضافه شده `DROP TEMPORARY TABLE` و `EXISTS TEMPORARY TABLE` نمایش داده شد (ژانگ2014). +- پشتیبانی از `SHOW CREATE TABLE` برای جداول موقت (ژانگ2014). +- اضافه شدن `system_profile` پارامتر پیکربندی برای تنظیمات مورد استفاده توسط فرایندهای داخلی. +- پشتیبانی برای بارگذاری `object_id` به عنوان یک ویژگی در `MongoDB` واژهنامهها (پاول لیتویننکو). +- خواندن `null` به عنوان مقدار پیش فرض هنگام بارگذاری داده ها برای یک فرهنگ لغت خارجی با `MongoDB` منبع (پاول لیتویننکو). +- خواندن `DateTime` ارزش در `Values` فرمت از برچسب زمان یونیکس بدون نقل قول تنها. +- عدم موفقیت در پشتیبانی `remote` توابع جدول برای موارد زمانی که برخی از کپی از دست رفته جدول درخواست. +- تنظیمات پیکربندی را می توان در خط فرمان باطل زمانی که شما اجرا `clickhouse-server`. مثال: `clickhouse-server -- --logger.level=information`. +- اجرا `empty` تابع از یک `FixedString` استدلال: تابع بازده 1 اگر رشته شامل به طور کامل از بایت پوچ (ژانگ2014). +- اضافه شدن `listen_try`پارامتر پیکربندی برای گوش دادن به حداقل یکی از نشانی های گوش دادن بدون ترک, اگر برخی از نشانی ها را نمی توان به گوش (مفید برای سیستم های با پشتیبانی غیر فعال برای لیگ4 یا ایپو6). +- اضافه شدن `VersionedCollapsingMergeTree` موتور جدول. +- پشتیبانی از ردیف ها و انواع عددی دلخواه برای `library` منبع فرهنگ لغت. +- `MergeTree` جداول را می توان بدون یک کلید اولیه استفاده می شود (شما نیاز به مشخص `ORDER BY tuple()`). +- A `Nullable` نوع می تواند باشد `CAST` به یک غیر-`Nullable` نوع اگر استدلال نیست `NULL`. +- `RENAME TABLE` می توان برای انجام `VIEW`. +- اضافه شدن `throwIf` تابع. +- اضافه شدن `odbc_default_field_size` گزینه, که اجازه می دهد تا شما را به گسترش حداکثر اندازه از ارزش لود شده از یک منبع بی سی (به طور پیش فرض, این هست 1024). +- این `system.processes` جدول و `SHOW PROCESSLIST` در حال حاضر `is_cancelled` و `peak_memory_usage` ستون ها + +#### بهبود: {#improvements-15} + +- محدودیت ها و سهمیه بندی در نتیجه دیگر به داده های متوسط برای اعمال `INSERT SELECT` نمایش داده شد و یا برای `SELECT` subqueries. +- باعث کاذب کمتر از `force_restore_data` هنگام چک کردن وضعیت `Replicated` جداول زمانی که سرور شروع می شود. +- اضافه شدن `allow_distributed_ddl` انتخاب +- توابع نامشخص در عبارات برای مجاز نیست `MergeTree` کلید های جدول. +- پروندهها با جایگزینی از `config.d` دایرکتوری ها به ترتیب حروف الفبا لود می شود. +- بهبود عملکرد `arrayElement` تابع در مورد یک مجموعه چند بعدی ثابت با مجموعه ای خالی به عنوان یکی از عناصر. مثال: `[[1], []][x]`. +- سرور شروع می شود سریع تر در حال حاضر در هنگام استفاده از فایل های پیکربندی با تعویض بسیار بزرگ (به عنوان مثال, لیست بسیار زیادی از شبکه های اینترنتی). +- هنگامی که در حال اجرا یک پرس و جو, جدول توابع ارزش اجرا یک بار. قبلا, `remote` و `mysql` جدول ارزش توابع پرس و جو همان دو بار انجام برای بازیابی ساختار جدول از یک سرور از راه دور. +- این `MkDocs` ژنراتور مستندات استفاده شده است. +- هنگامی که شما سعی می کنید یک ستون جدول را حذف کنید که `DEFAULT`/`MATERIALIZED` عبارات از ستون های دیگر بستگی دارد, یک استثنا پرتاب می شود (ژانگ2014). +- اضافه شدن توانایی تجزیه یک خط خالی در فرمت های متن به عنوان شماره 0 برای `Float` انواع داده ها. این ویژگی قبلا در دسترس بود اما در نسخه 1.1.54342 از دست داده بود. +- `Enum` مقادیر را می توان در استفاده `min`, `max`, `sum` و برخی دیگر توابع. در این موارد با استفاده از آن مربوط به مقادیر عددی. این ویژگی قبلا در دسترس بود اما از دست رفته در انتشار 1.1.54337. +- اضافه شده `max_expanded_ast_elements` برای محدود کردن اندازه از اس تی پس از نام مستعار به صورت بازگشتی در حال گسترش است. + +#### رفع اشکال: {#bug-fixes-27} + +- ثابت مواردی که غیر ضروری ستون حذف شده از subqueries در خطا یا حذف نشده از subqueries حاوی `UNION ALL`. +- رفع اشکال در ادغام برای `ReplacingMergeTree` میز +- درج همزمان ثابت در `Distributed` جداول (`insert_distributed_sync = 1`). +- پیش فرض ثابت برای استفاده های خاص از `FULL` و `RIGHT JOIN` با ستون تکراری در کارخانه های فرعی. +- پیش فرض ثابت برای استفاده های خاص از `replace_running_query` و `KILL QUERY`. +- ثابت منظور از `source` و `last_exception` ستون ها در `system.dictionaries` جدول +- رفع اشکال زمانی که `DROP DATABASE` پرس و جو فایل را با ابرداده را حذف کنید. +- ثابت `DROP DATABASE` پرسوجو برای `Dictionary` پایگاه داده. +- ثابت دقت کم `uniqHLL12` و `uniqCombined` توابع برای کارتنیت بیشتر از 100 میلیون مورد (الکس بوچاروف). +- ثابت محاسبه مقادیر پیش فرض ضمنی در صورت لزوم به طور همزمان محاسبه عبارات صریح و روشن به طور پیش فرض در `INSERT` نمایش داده شد (ژانگ2014). +- ثابت یک مورد نادر زمانی که یک پرس و جو به یک `MergeTree` جدول نمی تواند به پایان برسد (فکس ایکس سی). +- ثابت تصادف رخ داده است که در حال اجرا `CHECK` پرسوجو برای `Distributed` جداول اگر تمام خرده ریز محلی هستند (فکس.اطلاعات دقیق +- ثابت رگرسیون عملکرد کمی با توابع است که با استفاده از عبارات منظم. +- ثابت رگرسیون عملکرد در هنگام ایجاد مجموعه های چند بعدی از عبارات پیچیده است. +- ثابت یک اشکال است که می تواند اضافی `FORMAT` بخش به نظر می رسد در یک `.sql` فایل با ابرداده. +- رفع اشکال که باعث `max_table_size_to_drop` محدود به درخواست در هنگام تلاش برای حذف یک `MATERIALIZED VIEW` با نگاهی به یک جدول به صراحت مشخص. +- ناسازگاری ثابت با مشتریان قدیمی (مشتریان قدیمی گاهی اوقات داده ها را با `DateTime('timezone')` نوع, که درک نمی کنند). +- رفع اشکال در هنگام خواندن `Nested` عناصر ستون سازه هایی که با استفاده از اضافه شد `ALTER` اما این برای پارتیشن های قدیمی خالی است, زمانی که شرایط را برای این ستون ها به نقل مکان کرد `PREWHERE`. +- رفع اشکال هنگام فیلتر کردن جداول توسط مجازی `_table` ستون در نمایش داده شد به `Merge` میز +- رفع اشکال در هنگام استفاده از `ALIAS` ستونها در `Distributed` میز +- رفع اشکال ساخته شده است که تلفیقی پویا غیر ممکن است برای نمایش داده شد با توابع کل از `quantile` خانواده +- ثابت شرایط مسابقه در خط لوله اجرای پرس و جو که در موارد بسیار نادر رخ داده است در هنگام استفاده از `Merge` جداول با تعداد زیادی از جداول, و در هنگام استفاده از `GLOBAL` subqueries. +- تصادف را هنگام عبور از اندازه های مختلف به یک ثابت کرد `arrayReduce` تابع در هنگام استفاده از توابع کل از استدلال های متعدد. +- ممنوع استفاده از نمایش داده شد با `UNION ALL` در یک `MATERIALIZED VIEW`. +- خطا در هنگام مقدار دهی اولیه از ثابت `part_log` جدول سیستم زمانی که سرور شروع می شود (به طور پیش فرض, `part_log` غیر فعال است). + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-10} + +- حذف `distributed_ddl_allow_replicated_alter` انتخاب این رفتار به طور پیش فرض فعال. +- حذف `strict_insert_defaults` تنظیمات. اگر شما با استفاده از این قابلیت, ارسال به `clickhouse-feedback@yandex-team.com`. +- حذف `UnsortedMergeTree` موتور + +### انتشار کلیک 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} + +- اضافه شدن پشتیبانی از ماکرو برای تعریف نام خوشه در نمایش داده شد ددل توزیع و سازنده جداول توزیع شده است: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- در حال حاضر نمایش داده شد مانند `SELECT ... FROM table WHERE expr IN (subquery)` با استفاده از پردازش `table` نمایه. +- پردازش تکراری بهبود یافته در هنگام قرار دادن به جداول تکرار, به طوری که دیگر کم کردن سرعت اجرای صف تکرار. + +### انتشار کلیک 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} + +این نسخه شامل رفع اشکال برای نسخه قبلی 1.1.54337: + +- ثابت رگرسیون در 1.1.54337: اگر کاربر به طور پیش فرض دسترسی خوانده است, سپس سرور حاضر به راه اندازی با پیام `Cannot create database in readonly mode`. +- ثابت رگرسیون در 1.1.54337: در سیستم های با سیستم, سیاهههای مربوط همیشه به سای لاگ صرف نظر از پیکربندی نوشته شده; اسکریپت دیده بان هنوز هم با استفاده از اینیت.د +- ثابت رگرسیون در 1.1.54337: پیکربندی پیش فرض اشتباه در تصویر کارگر بارانداز. +- ثابت nondeterministic رفتار graphitemergetree (شما می توانید آن را در ورود به سیستم پیام `Data after merge is not byte-identical to the data on another replicas`). +- رفع اشکال که ممکن است منجر به ادغام متناقض پس از بهینه سازی پرس و جو به تکرار جداول (شما ممکن است در پیام ورود به سیستم را ببینید `Part ... intersects the previous part`). +- جداول بافر در حال حاضر به درستی کار زمانی که ستون محقق در جدول مقصد وجود دارد (توسط ژانگ2014). +- رفع اشکال در اجرای پوچ. + +### انتشار کلیک 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} + +#### ویژگی های جدید: {#new-features-17} + +- اضافه شدن پشتیبانی برای ذخیره سازی از مجموعه های چند بعدی و تاپل (`Tuple` نوع داده) در جداول. +- پشتیبانی از توابع جدول برای `DESCRIBE` و `INSERT` نمایش داده شد. اضافه شدن پشتیبانی برای کارخانه های فرعی در `DESCRIBE`. مثالها: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. پشتیبانی از `INSERT INTO TABLE` علاوه بر `INSERT INTO`. +- پشتیبانی بهبود یافته برای مناطق زمانی. این `DateTime` نوع داده را می توان با منطقه زمانی است که برای تجزیه و قالب بندی در فرمت های متنی استفاده مشروح. مثال: `DateTime('Europe/Moscow')`. هنگامی که زمان در توابع برای مشخص `DateTime` استدلال, نوع بازگشت منطقه زمانی پیگیری, و ارزش نمایش داده خواهد شد به عنوان انتظار می رود. +- اضافه شدن توابع `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. این `toRelativeHour`/`Minute`/`Second` توابع می توانند یک مقدار از نوع را `Date` به عنوان یک استدلال. این `now` نام تابع حساس به حروف است. +- اضافه شدن `toStartOfFifteenMinutes` تابع (کریل شواکوف). +- اضافه شدن `clickhouse format` ابزار برای قالب بندی نمایش داده شد. +- اضافه شدن `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` قالب. فایل های طرح را می توان تنها در دایرکتوری مشخص شده واقع شده است. +- اضافه شدن پشتیبانی برای تعویض پیکربندی (`incl` و `conf.d`) برای پیکربندی لغت نامه ها و مدل های خارجی (پاول یاکونین). +- اضافه شدن یک ستون با اسناد و مدارک برای `system.settings` جدول (Kirill Shvakov). +- اضافه شدن `system.parts_columns` جدول با اطلاعات در مورد اندازه ستون در هر بخش داده ها از `MergeTree` میز +- اضافه شدن `system.models` جدول با اطلاعات در مورد لود `CatBoost` مدل های یادگیری ماشین. +- اضافه شدن `mysql` و `odbc` عملکرد جدول و متناظر `MySQL` و `ODBC` موتورهای جدول برای دسترسی به پایگاه داده از راه دور. این قابلیت در مرحله بتا است. +- اضافه شدن امکان به تصویب یک استدلال از نوع `AggregateFunction` برای `groupArray` تابع جمع (بنابراین شما می توانید مجموعه ای از کشورهای برخی از تابع جمع ایجاد). +- محدودیت حذف در ترکیب های مختلف از ترکیب تابع جمع. مثلا, شما می توانید استفاده کنید `avgForEachIf` و همچنین `avgIfForEach` توابع مجموع, که رفتارهای مختلف. +- این `-ForEach` ترکیب تابع مجموع برای مورد توابع مجموع استدلال های متعدد گسترش یافته است. +- اضافه شدن پشتیبانی از توابع کل `Nullable` استدلال حتی برای موارد زمانی که تابع غیر گرداند-`Nullable` نتیجه (اضافه شده با سهم سیلو کاروجا). مثال: `groupArray`, `groupUniqArray`, `topK`. +- اضافه شدن `max_client_network_bandwidth` برای `clickhouse-client` (Kirill Shvakov). +- کاربران با `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- اضافه شدن پشتیبانی برای استفاده از مصرف کنندگان متعدد با `Kafka` موتور گزینه های پیکربندی گسترده برای `Kafka` (Marek Vavruša). +- اضافه شدن `intExp3` و `intExp4` توابع. +- اضافه شدن `sumKahan` تابع جمع. +- اضافه شده به \* شماره\* توابع پرنده, جایی که \* شماره \* یک نوع عددی است. +- اضافه شدن پشتیبانی برای `WITH` جملات برای `INSERT SELECT` پرس و جو (نویسنده: ژانگ2014). +- تنظیمات اضافه شده: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. به خصوص این تنظیمات برای دانلود قطعات داده ها برای تکرار استفاده می شود. تغییر این تنظیمات اجازه می دهد تا برای عدم موفقیت سریع تر اگر شبکه غیرمنتظره است. +- اضافه شدن پشتیبانی برای `ALTER` برای جداول نوع `Null` هشدار داده می شود +- این `reinterpretAsString` تابع برای تمام انواع داده ها که به روشنی در حافظه ذخیره می شود گسترش یافته است. +- اضافه شدن `--silent` گزینه ای برای `clickhouse-local` ابزار. این سرکوب چاپ اطلاعات اجرای پرس و جو در خ. +- اضافه شدن پشتیبانی برای خواندن مقادیر نوع `Date` از متن در قالب ای که ماه و / یا روز از ماه مشخص شده است با استفاده از یک رقم واحد به جای دو رقم (پرنده ایموس). + +#### بهینه سازی عملکرد: {#performance-optimizations} + +- عملکرد بهبود یافته از توابع کل `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` از استدلال رشته. +- عملکرد بهبود یافته از توابع `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- بهبود عملکرد تجزیه و قالب بندی `Date` و `DateTime` ارزش نوع در قالب متن. +- بهبود عملکرد و دقت تجزیه اعداد ممیز شناور. +- کاهش استفاده از حافظه برای `JOIN` در مورد زمانی که قطعات چپ و راست ستون با نام یکسان است که در موجود نیست `USING` . +- عملکرد بهبود یافته از توابع کل `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` با کاهش ثبات محاسباتی. توابع قدیمی تحت نام در دسترس هستند `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### رفع اشکال: {#bug-fixes-28} + +- ثابت data deduplication را پس از اجرا `DROP` یا `DETACH PARTITION` پرس و جو. در نسخه های قبلی, حذف یک پارتیشن و قرار دادن داده های مشابه دوباره کار نمی کند چرا بلوک قرار داده تکراری در نظر گرفته شد. +- رفع اشکال که می تواند به تفسیر نادرست از منجر شود `WHERE` بند برای `CREATE MATERIALIZED VIEW` نمایش داده شد با `POPULATE` . +- رفع اشکال در استفاده از `root_path` پارامتر در `zookeeper_servers` پیکربندی. +- نتایج غیر منتظره ثابت از عبور از `Date` نشانوند به `toStartOfDay` . +- ثابت `addMonths` و `subtractMonths` توابع و حساب برای `INTERVAL n MONTH` در مواردی که نتیجه سال گذشته است. +- اضافه شدن پشتیبانی از دست رفته برای `UUID` نوع داده برای `DISTINCT` , `JOIN` و `uniq` توابع جمع و لغت نامه های خارجی (اوگنی ایوانف). پشتیبانی از `UUID` هنوز ناقصه +- ثابت `SummingMergeTree` رفتار در مواردی که ردیف خلاصه به صفر است. +- رفع مختلف برای `Kafka` engine (Marek Vavruša). +- رفتار نادرست ثابت از `Join` موتور جدول (پرنده ایموس). +- رفتار تخصیص نادرست ثابت تحت بورس و سیستم عامل ایکس. +- این `extractAll` تابع در حال حاضر مسابقات خالی پشتیبانی می کند. +- ثابت خطا که استفاده از مسدود `libressl` به جای `openssl` . +- ثابت `CREATE TABLE AS SELECT` پرس و جو از جداول موقت. +- ثابت غیر atomicity از به روز رسانی تکرار صف. این می تواند منجر به کپی بودن از همگام سازی تا سرور ری استارت. +- سرریز ممکن ثابت در `gcd` , `lcm` و `modulo` (`%` اپراتور) (ماکس اسکروخد). +- `-preprocessed` فایل ها در حال حاضر پس از تغییر ایجاد شده است `umask` (`umask` را می توان در پیکربندی تغییر). +- رفع اشکال در چک پس زمینه از قطعات (`MergeTreePartChecker` )هنگام استفاده از یک کلید پارتیشن سفارشی . +- تجزیه ثابت از تاپل (ارزش های `Tuple` نوع داده) در فرمت های متن. +- پیام های خطا بهبود یافته در مورد انواع ناسازگار منتقل شده به `multiIf` , `array` و برخی دیگر توابع. +- پشتیبانی دوباره طراحی شده برای `Nullable` انواع. اشکالات ثابت که ممکن است به یک تصادف سرور منجر شود. ثابت تقریبا تمام اشکالات دیگر مربوط به `NULL` پشتیبانی: نادرست نوع تبدیل در وارد کردن را انتخاب کنید کافی برای حمایت از Nullable در داشتن و PREWHERE, `join_use_nulls` حالت, انواع قابل ابطال به عنوان استدلال `OR` اپراتور و غیره +- اشکالات مختلف ثابت مربوط به معانی داخلی انواع داده ها. نمونه: جمع غیر ضروری از `Enum` فیلدهای تایپ شده `SummingMergeTree` ; تراز دلخواه `Enum` انواع در `Pretty` فرمت, و غیره. +- چک سختگیرانه تر برای ترکیب مجاز از ستون کامپوزیت. +- ثابت سرریز در هنگام تعیین یک پارامتر بسیار بزرگ برای `FixedString` نوع داده. +- رفع اشکال در `topK` تابع جمع در یک مورد عمومی. +- اضافه شدن چک از دست رفته برای برابری اندازه مجموعه ای در استدلال از انواع ن-عرایی از توابع کل با `-Array` ترکیب کننده. +- رفع اشکال در `--pager` برای `clickhouse-client` (نویسنده: کس1322). +- ثابت دقت از `exp10` تابع. +- ثابت رفتار `visitParamExtract` تابع برای انطباق بهتر با اسناد و مدارک. +- ثابت تصادف زمانی که انواع داده های نادرست مشخص شده است. +- رفتار را ثابت کرد `DISTINCT` در مورد زمانی که همه ستون ثابت هستند. +- قالب بندی پرس و جو ثابت در مورد استفاده از `tupleElement` تابع با یک عبارت ثابت پیچیده به عنوان شاخص عنصر تاپل. +- رفع اشکال در `Dictionary` جداول برای `range_hashed` واژهنامهها. +- رفع اشکال که منجر به ردیف بیش از حد در نتیجه `FULL` و `RIGHT JOIN` (پرنده ایموس). +- ثابت سقوط سرور در هنگام ایجاد و از بین بردن فایل های موقت در `config.d` دایرکتوری در طول بازنگری پیکربندی. +- ثابت `SYSTEM DROP DNS CACHE` پرس و جو: کش سرخ شد اما نشانی از گره های خوشه ای به روز شد. +- رفتار را ثابت کرد `MATERIALIZED VIEW` پس از اجرای `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### بهبود ساخت: {#build-improvements-4} + +- این `pbuilder` ابزار برای ساخت استفاده می شود. روند ساخت تقریبا به طور کامل مستقل از محیط میزبان ساخت است. +- ساخت تک برای نسخه های سیستم عامل های مختلف استفاده می شود. بسته ها و فایل های باینری سازگار با طیف گسترده ای از سیستم های لینوکس ساخته شده است. +- اضافه شدن `clickhouse-test` بسته این می تواند مورد استفاده قرار گیرد برای اجرای تست های کاربردی. +- قطار سریع السیر منبع هم اکنون می توانید به مخزن منتشر شود. این می تواند مورد استفاده قرار گیرد به تولید مثل ساخت بدون استفاده از گیتهاب. +- اضافه شده ادغام محدود با تراویس سی. با توجه به محدودیت در زمان ساخت در تراویس, تنها ساخت اشکال زدایی تست شده است و یک زیر مجموعه محدود از تست اجرا می شوند. +- اضافه شدن پشتیبانی برای `Cap'n'Proto` در ساخت به طور پیش فرض. +- فرمت منابع اسناد را تغییر داد `Restricted Text` به `Markdown`. +- اضافه شدن پشتیبانی برای `systemd` (ولادیمیر اسمیرنوف). این است که به طور پیش فرض به دلیل ناسازگاری با برخی از تصاویر سیستم عامل غیر فعال است و می تواند به صورت دستی فعال کنید. +- برای تولید کد پویا, `clang` و `lld` به جاسازی شده `clickhouse` دودویی. همچنین می توانند به عنوان `clickhouse clang` و `clickhouse lld` . +- استفاده از پسوندهای گنو از کد حذف شده است. فعال کردن `-Wextra` انتخاب هنگام ساخت با `clang` به طور پیش فرض است `libc++` به جای `libstdc++`. +- استخراج شده `clickhouse_parsers` و `clickhouse_common_io` کتابخانه ها برای سرعت بخشیدن به ایجاد ابزارهای مختلف. + +#### تغییرات ناسازگار به عقب: {#backward-incompatible-changes-11} + +- قالب برای علامت در `Log` جداول نوع که شامل `Nullable` ستون در راه ناسازگار به عقب تغییر یافت. اگر شما این جداول, شما باید به تبدیل `TinyLog` قبل از شروع نسخه سرور جدید تایپ کنید. برای انجام این کار جایگزین کنید `ENGINE = Log` با `ENGINE = TinyLog` در مربوطه `.sql` پرونده در `metadata` فهرست راهنما. اگر جدول شما ندارد `Nullable` ستون و یا اگر نوع جدول خود را نمی `Log` پس نیازی نیست کاری بکنی +- حذف `experimental_allow_extended_storage_definition_syntax` تنظیمات. در حال حاضر این ویژگی به طور پیش فرض فعال است. +- این `runningIncome` تابع به تغییر نام داد `runningDifferenceStartingWithFirstvalue` برای جلوگیری از سردرگمی. +- حذف `FROM ARRAY JOIN arr` نحو زمانی که مجموعه اضافه کردن به طور مستقیم پس از با هیچ جدول مشخص (پرنده ایموس). +- حذف `BlockTabSeparated` فرمت که صرفا برای اهداف تظاهرات مورد استفاده قرار گرفت. +- فرمت دولت برای توابع کل تغییر `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. اگر شما ایالات از این توابع کل در جداول ذخیره شده اند (با استفاده از `AggregateFunction` نوع داده و یا نمایش تحقق با کشورهای مربوطه), لطفا به ارسال clickhouse-feedback@yandex-team.com. +- در نسخه های سرور قبلی یک ویژگی مستند نشده وجود داشت: اگر یک تابع جمع شده به پارامترها بستگی داشته باشد هنوز هم می توانید بدون پارامتر در نوع داده قابلیت کارکرد مشخص کنید. مثال: `AggregateFunction(quantiles, UInt64)` به جای `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. این ویژگی از دست داده بود. ما قصد داریم دوباره در نسخه های بعدی پشتیبانی کنیم. +- انواع داده شمارشی را نمی توان در توابع جمع دقیقه/حداکثر استفاده می شود. این توانایی خواهد شد در نسخه بعدی بازگشت. + +#### لطفا توجه داشته باشید در هنگام به روز رسانی: {#please-note-when-upgrading} + +- هنگام انجام یک به روز رسانی نورد در یک خوشه, در نقطه ای که برخی از کپی در حال اجرا هستند نسخه های قدیمی از تاتر و برخی در حال اجرا هستند نسخه جدید, تکرار است به طور موقت متوقف و پیام `unknown parameter 'shard'` به نظر می رسد در ورود به سیستم. تکرار ادامه خواهد داد پس از همه کپی از خوشه به روز می شوند. +- اگر نسخه های مختلف از تاتر در حال اجرا بر روی سرورهای خوشه, ممکن است که نمایش داده شد توزیع با استفاده از توابع زیر نتایج نادرست داشته باشد: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. شما باید تمام گره های خوشه ای به روز رسانی. + +## [تغییرات برای 2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/fa/whats_new/changelog/2019.md b/docs/fa/whats_new/changelog/2019.md new file mode 100644 index 00000000000..60e34307c25 --- /dev/null +++ b/docs/fa/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 77 +toc_title: '2019' +--- + +## انتشار کلیک و19. 17 {#clickhouse-release-v19-17} + +### ClickHouse انتشار v19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### رفع اشکال {#bug-fix} + +- سرریز بافر بالقوه ثابت در حالت فشرده خارج. کاربر مخرب می تواند داده های فشرده ساخته شده است که می تواند باعث به عنوان خوانده شده پس از بافر منتقل می کند. این موضوع توسط الدار زیتوف از تیم امنیت اطلاعات یاندکس یافت شد. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت سقوط سرور ممکن است (`std::terminate`) هنگامی که سرور نمی تواند ارسال و یا ارسال داده ها در فرمت جسون یا میلی لیتر با ارزش از نوع داده رشته (که نیاز به اعتبار سنجی-8) و یا زمانی که فشرده سازی داده ها نتیجه با الگوریتم بروتلی و یا در برخی موارد نادر دیگر. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت لغت نامه با منبع از یک clickhouse `VIEW` در حال حاضر خواندن چنین واژهنامهها خطا ایجاد نمی کند `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- ثابت چک کردن اگر یک مشتری میزبان مجاز است با host\_regexp مشخص شده در کاربران.. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([ویتالی بارانو](https://github.com/vitlibar)) +- `RENAME TABLE` برای یک جدول توزیع در حال حاضر تغییر نام پوشه حاوی داده های درج شده قبل از ارسال به خرده ریز. این رفع یک موضوع را با تغییر نام های پی در پی `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([تاولوبیکس](https://github.com/tavplubix)) +- `range_hashed` واژهنامهها خارجی ایجاد شده توسط دی ال نمایش داده شد در حال حاضر اجازه می دهد محدوده از انواع عددی دلخواه. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([الساپین](https://github.com/alesapin)) +- ثابت `INSERT INTO table SELECT ... FROM mysql(...)` تابع جدول. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت segfault در `INSERT INTO TABLE FUNCTION file()` در حالی که قرار دادن به یک فایل که وجود ندارد. در حال حاضر در این مورد فایل ایجاد می شود و سپس قرار دادن پردازش می شود. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- خطای بیت مپ ثابت زمانی که متقاطع بیت مپ جمع و بیت مپ اسکالر. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([یو هوانگ](https://github.com/moon03432)) +- ثابت زمانی که segfault `EXISTS` پرس و جو بدون استفاده شد `TABLE` یا `DICTIONARY` مقدماتی, درست مثل `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نوع بازگشت ثابت برای توابع `rand` و `randConstant` در صورت بحث باطل. در حال حاضر توابع همیشه بازگشت `UInt32` و هرگز `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- ثابت `DROP DICTIONARY IF EXISTS db.dict` در حال حاضر استثنا پرتاب نمی کند اگر `db` وجود نداره [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([ویتالی بارانو](https://github.com/vitlibar)) +- اگر یک جدول به دلیل سقوط سرور به طور کامل کاهش یافته است, سرور سعی خواهد کرد برای بازگرداندن و بارگذاری [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت پرس و جو تعداد بی اهمیت برای یک جدول توزیع اگر بیش از دو میز محلی سفال وجود دارد. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- اشکال ثابت که منجر به یک مسابقه داده در db::blockstreamprofileinfo::calculaterowsbeforelimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([الکساندر کازاکوف](https://github.com/Akazz)) +- ثابت `ALTER table MOVE part` اعدام بلافاصله پس از ادغام بخش مشخص, که می تواند باعث حرکت بخشی که بخش مشخص شده به هم ادغام شدند. در حال حاضر به درستی حرکت می کند بخش مشخص شده است. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- عبارات برای لغت نامه را می توان به عنوان رشته در حال حاضر مشخص شده است. این بسیار مفید است برای محاسبه ویژگی های در حالی که استخراج داده ها از غیر clickhouse منابع به دلیل آن اجازه می دهد تا به استفاده از غیر clickhouse نحو برای آن دسته از عبارات. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([الساپین](https://github.com/alesapin)) +- ثابت یک مسابقه بسیار نادر در `clickhouse-copier` به دلیل سرریز در زکسید. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([هشدار داده می شود](https://github.com/dingxiangfei2009)) +- رفع اشکال زمانی که پس از پرس و جو شکست خورده (با توجه به “Too many simultaneous queries” به عنوان مثال) این اطلاعات جداول خارجی را نمی خواند و + درخواست بعدی این اطلاعات را به عنوان ابتدای پرس و جو بعدی تفسیر می کند که باعث خطا می شود `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([ازات خوژین](https://github.com/azat)) +- اجتناب از اختلاف پوچ پس از “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([ازات خوژین](https://github.com/azat)) +- بازگرداندن پشتیبانی از تمام مناطق ایکو, اضافه کردن توانایی به درخواست تلفیقی برای عبارات ثابت و اضافه کردن نام زبان به سیستم.collations جدول. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([الساپین](https://github.com/alesapin)) +- تعداد جریان برای خواندن از `StorageFile` و `StorageHDFS` در حال حاضر محدود, برای جلوگیری از بیش از حد حافظه. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([الساپین](https://github.com/alesapin)) +- ثابت `CHECK TABLE` پرسوجو برای `*MergeTree` جداول بدون کلید. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([الساپین](https://github.com/alesapin)) +- حذف تعداد جهش از یک نام بخشی در صورتی که هیچ جهش وجود دارد. این حذف بهبود سازگاری با نسخه های قدیمی تر. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([الساپین](https://github.com/alesapin)) +- رفع اشکال که جهش برای برخی از قطعات متصل به دلیل انحراف خود قلم بزرگتر از نسخه جهش جدول. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([ژیچنگ یو](https://github.com/yuzhichang)) +- اجازه شروع سرور با کپی کار برکنار شده از قطعات پس از حرکت به دستگاه دیگر. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- خطا را ثابت کرد “Sizes of columns doesn’t match” که ممکن است در هنگام استفاده از ستون تابع جمع به نظر می رسد. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([بوریس گرانویو](https://github.com/bgranvea)) +- در حال حاضر یک استثنا خواهد شد در صورت استفاده با روابط در کنار محدودیت های پرتاب. و در حال حاضر امکان استفاده از بالا با محدودیت توسط. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع بارگذاری مجدد فرهنگ لغت در صورتی که `invalidate_query` که متوقف به روز رسانی و برخی از استثنا در به روز رسانی قبلی تلاش می کند. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([الساپین](https://github.com/alesapin)) + +### ClickHouse انتشار v19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### تغییر ناسازگار به عقب {#backward-incompatible-change} + +- با استفاده از ستون به جای اس تی برای ذخیره نتایج زیرخاکری اسکالر برای عملکرد بهتر است. تنظیم `enable_scalar_subquery_optimization` در 19.17 اضافه شد و به طور پیش فرض فعال شد. این منجر به اشتباهات مانند [این](https://github.com/ClickHouse/ClickHouse/issues/7851) در طی ارتقا به 19.17.2 یا 19.17.3 از نسخه های قبلی است. این تنظیم به طور پیش فرض فعال در 19.17.4 را ممکن است به روز رسانی از 19.16 و نسخه های قدیمی تر و بدون خطا. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([ایموس پرنده](https://github.com/amosbird)) + +#### ویژگی جدید {#new-feature} + +- اضافه کردن توانایی برای ایجاد لغت نامه با پرس و جو ددل. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([الساپین](https://github.com/alesapin)) +- ساخت `bloom_filter` نوع حمایت از شاخص `LowCardinality` و `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- افزودن تابع `isValidJSON` برای بررسی رشته گذشت که جانسون معتبر است. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([ولادیمیر](https://github.com/Vdimir)) +- پیاده سازی `arrayCompact` تابع [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([یادداشت](https://github.com/Joeywzr)) +- تابع ایجاد شده `hex` برای اعداد اعشاری. این کار مانند `hex(reinterpretAsString())` اما صفر بایت گذشته را حذف کنید. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([میخیل کوروتف](https://github.com/millb)) +- افزودن `arrayFill` و `arrayReverseFill` توابع که عناصر را با عناصر دیگر در جلو/عقب در مجموعه جایگزین می کنند. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([هکز](https://github.com/hczhcz)) +- افزودن `CRC32IEEE()`/`CRC64()` پردازشگر پشتیبانی شده: [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([ازات خوژین](https://github.com/azat)) +- پیاده سازی `char` عملکرد شبیه به یک در [خروجی زیر](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- افزودن `bitmapTransform` تابع. این مجموعه ای از ارزش ها را در بیت مپ به مجموعه ای دیگر از ارزش ها تبدیل می کند و نتیجه یک بیت مپ جدید است [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([ژیچنگ یو](https://github.com/yuzhichang)) +- پیادهسازی شده `javaHashUTF16LE()` تابع [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([ایشیمب](https://github.com/achimbab)) +- افزودن `_shard_num` ستون مجازی برای موتور توزیع شده [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([ازات خوژین](https://github.com/azat)) + +#### ویژگی تجربی {#experimental-feature} + +- پشتیبانی از پردازنده (خط لوله اجرای پرس و جو جدید) در `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### رفع اشکال {#bug-fix-1} + +- رفع شناور نادرست تجزیه در `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع بن بست نادر است که می تواند رخ دهد که ردیاب را فعال کنید. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([فیلیمونف](https://github.com/filimonov)) +- جلوگیری از تکرار پیام در هنگام تولید جدول کافکا دارای هر گونه رزومه انتخاب از [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([ایوان](https://github.com/abyss7)) +- پشتیبانی از `Array(LowCardinality(Nullable(String)))` داخل `IN`. برطرف [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([ایشیمب](https://github.com/achimbab)) +- اضافه کردن سیستم های انتقال مواد از `SQL_TINYINT` و `SQL_BIGINT`, و رفع دست زدن به `SQL_FLOAT` انواع منبع داده در اد بی سی پل. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([دنیس گلازاشف](https://github.com/traceon)) +- رفع تجمع (`avg` و تعداد کمی) روی ستونهای دهدهی خالی [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([اندری کونیایف](https://github.com/akonyaev90)) +- ثابت `INSERT` به توزیع با `MATERIALIZED` ستونها [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([ازات خوژین](https://github.com/azat)) +- ساخت `MOVE PARTITION` کار اگر برخی از قسمت های پارتیشن در حال حاضر بر روی دیسک مقصد یا حجم [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اشکال ثابت با لینک های سخت عدم در طول جهش در ایجاد می شود `ReplicatedMergeTree` در تنظیمات چند دیسک. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع اشکال با یک جهش در ادغام زمانی که تمام قسمت بدون تغییر باقی می ماند و بهترین فضا است که بر روی دیسک دیگر یافت می شود [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اشکال ثابت با `keep_free_space_ratio` از پیکربندی دیسکها قابل خواندن نیست [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع اشکال با جدول شامل تنها `Tuple` ستون ها یا ستون ها با مسیرهای پیچیده. رفع [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([الساپین](https://github.com/alesapin)) +- حافظه را برای موتور بافر در حداکثر\_موری\_سیاژ حساب نکنید [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([ازات خوژین](https://github.com/azat)) +- رفع استفاده از علامت نهایی در `MergeTree` جداول مرتب شده بر اساس `tuple()`. در موارد نادر می تواند منجر به `Can't adjust last granule` خطا هنگام انتخاب. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال در جهش که با اقداماتی که نیاز به زمینه (به عنوان مثال توابع برای جانسون) مسند که ممکن است منجر به سقوط و یا استثنا عجیب و غریب. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([الساپین](https://github.com/alesapin)) +- رفع عدم تطابق پایگاه داده و نام جدول فرار در `data/` و `shadow/` & فهرستهای راهنما [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([الکساندر بورمک](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. رفع سقوط در این مورد. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([زویکوف](https://github.com/4ertus2)) +- ثابت `Not found column in block` هنگام پیوستن به در بیان با راست یا کامل ملحق. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([زویکوف](https://github.com/4ertus2)) +- یکی دیگر از تلاش برای رفع حلقه بی نهایت در `PrettySpace` قالب [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- رفع اشکال در `concat` تابع زمانی که همه استدلال شد `FixedString` از همان اندازه. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([الساپین](https://github.com/alesapin)) +- استثنا ثابت در صورت استفاده از 1 استدلال در حالی که تعریف اس3, نشانی اینترنتی و ذخیره سازی اچ دی. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع دامنه تفسیری برای نمایش با پرس و جو [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([ازات خوژین](https://github.com/azat)) + +#### بهبود {#improvement} + +- `Nullable` ستون به رسمیت شناخته شده و تهی ارزش به درستی توسط ان بی سی پل به کار گرفته [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([واسیلی نمکو](https://github.com/Enmk)) +- ارسال دسته ای در حال حاضر برای توزیع ارسال اتمی [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([ازات خوژین](https://github.com/azat)) +- پرتاب یک استثنا اگر ما می توانیم جدول برای نام ستون در پرس و جو تشخیص نیست. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([زویکوف](https://github.com/4ertus2)) +- افزودن `merge_max_block_size` تنظیم به `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([زویکوف](https://github.com/4ertus2)) +- نمایش داده شد با `HAVING` و بدون `GROUP BY` فرض گروه های ثابت. پس, `SELECT 1 HAVING 1` در حال حاضر نتیجه را برمی گرداند. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([ایموس پرنده](https://github.com/amosbird)) +- تجزیه پشتیبانی `(X,)` به عنوان تاپل شبیه به پایتون. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([ایموس پرنده](https://github.com/amosbird)) +- ساخت `range` رفتارهای تابع تقریبا مانند یک پیتون. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- افزودن `constraints` ستونها به جدول `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([ویتالی بارانو](https://github.com/vitlibar)) +- فرمت پوچ بهتر برای کنترل کننده تی پی, به طوری که ممکن است به استفاده از `select ignore() from table format Null` برای اندازه گیری نیروی هوایی پاکستان از طریق کلیک مشتری [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([ایموس پرنده](https://github.com/amosbird)) +- نمایش داده شد مانند `CREATE TABLE ... AS (SELECT (1, 2))` به درستی تجزیه شده است [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([هکز](https://github.com/hczhcz)) + +#### بهبود عملکرد {#performance-improvement} + +- عملکرد تجمع بیش از کلید های رشته کوتاه بهبود یافته است. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([الکساندر کوزمنکوف](https://github.com/akuzm), [ایموس پرنده](https://github.com/amosbird)) +- یک پاس دیگر از تجزیه و تحلیل نحو/بیان را اجرا کنید تا بهینه سازی های بالقوه پس از پیش بینی های ثابت خورده شوند. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([ایموس پرنده](https://github.com/amosbird)) +- استفاده از ذخیره سازی متا اطلاعات به ارزیابی بی اهمیت `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([ایموس پرنده](https://github.com/amosbird), [الکسی میلویدو](https://github.com/alexey-milovidov)) +- Vectorize پردازش `arrayReduce` شبیه به تجمعی `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([ایموس پرنده](https://github.com/amosbird)) +- بهبود صغیر در عملکرد `Kafka` مصرف [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([ایوان](https://github.com/abyss7)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement} + +- اضافه کردن پشتیبانی برای متقابل کامپایل به معماری پردازنده عاشق64. refactor packager اسکریپت. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([ایوان](https://github.com/abyss7)) +- باز کردن داروین-x86\_64 و لینوکس-aarch64 toolchains به نصب docker دوره زمانی که ساختمان بسته [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([ایوان](https://github.com/abyss7)) +- به روز رسانی تصویر کارگر بارانداز برای باینری بسته بندی [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([ایوان](https://github.com/abyss7)) +- خطاهای کامپایل ثابت در مکینتاش کاتالینا [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([ارنست پلتایف](https://github.com/ernestp)) +- برخی از فاکتورگیری مجدد در منطق تجزیه و تحلیل پرس و جو: تقسیم کلاس پیچیده را به چند ساده. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([زویکوف](https://github.com/4ertus2)) +- رفع ساخت بدون زیر منو [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([پرولر](https://github.com/proller)) +- بهتر `add_globs` در فایل های کیک [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([ایموس پرنده](https://github.com/amosbird)) +- حذف مسیرهای سختشده در `unwind` هدف [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([کنستانتین پودشوموک](https://github.com/podshumok)) +- مجاز به استفاده از فرمت خروجی زیر بدون اس اس ال [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([پرولر](https://github.com/proller)) + +#### غیره {#other} + +- اضافه شده antlr4 گرامر برای clickhouse sql گویش [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +## انتشار کلیک و19. 16 {#clickhouse-release-v19-16} + +#### انتشار کلیک و19.16. 14. 65, 2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- رفع اشکال در محاسبات باریکش از عملیات منطقی سه تایی در استدلال های متعدد (بیش از 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([الکساندر کازاکوف](https://github.com/Akazz)) این bugfix شد backported به نسخه 19.16 توسط یک درخواست ویژه از Altinity. + +#### Clickhouse انتشار v19.16.14.65 و 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- رفع ناسازگاری خرده فروشی توزیع با نسخه های قدیمی تر کانال. رفع [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(تبلوبیکس)](https://github.com/tavplubix) +- هنگام اجرای `CREATE` پرس و جو, برابر عبارات ثابت در استدلال موتور ذخیره سازی. جایگزین کردن نام دادگان خالی با دادگان فعلی. رفع [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). همچنین رفع بررسی برای نشانی محلی در `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(تبلوبیکس)](https://github.com/tavplubix) +- در حال حاضر پس زمینه ادغام در `*MergeTree` موتورهای جدول خانواده حفظ سیاست ذخیره سازی حجم سفارش دقیق تر. + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- جلوگیری از از دست دادن داده ها در `Kafka` در موارد نادر زمانی که استثنا اتفاق می افتد پس از خواندن پسوند اما قبل از ارتکاب. رفع [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). مرتبط: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(فیلیمونف)](https://github.com/filimonov) +- رفع اشکال منجر به ختم سرور در هنگام تلاش برای استفاده / رها کردن `Kafka` جدول ایجاد شده با پارامترهای اشتباه. رفع [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). دارای [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(فیلیمونف)](https://github.com/filimonov) +- اجازه استفاده `MaterializedView` با subqueries بالا `Kafka` میز + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([فیلیمونف](https://github.com/filimonov)) + +#### ویژگی جدید {#new-feature-1} + +- افزودن `deduplicate_blocks_in_dependent_materialized_views` گزینه ای برای کنترل رفتار درج ژولیده به جداول با نمایش محقق. این ویژگی جدید توسط یک درخواست ویژه از التیت به نسخه رفع اشکال اضافه شد. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [.)](https://github.com/urykhy) + +### ClickHouse انتشار v19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-1} + +- اضافه کردن گم شده arity اعتبار برای تعداد/counif. + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([ولادیمیر](https://github.com/Vdimir)) +- حذف میراث `asterisk_left_columns_only` تنظیم (به طور پیش فرض غیر فعال شد). + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([علم هنر + زویکوف](https://github.com/4ertus2)) +- رشته فرمت برای قالب فرمت داده در حال حاضر در فایل های مشخص شده است. + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([تاولوبیکس](https://github.com/tavplubix)) + +#### ویژگی جدید {#new-feature-2} + +- معرفی uniqcombined64() برای محاسبه cardinality بیشتر از uint\_max. + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([ازات + خوژین](https://github.com/azat)) +- پشتیبانی از شاخص های فیلتر بلوم در ستون های مجموعه. + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([ایشیمب](https://github.com/achimbab)) +- اضافه کردن یک تابع `getMacro(name)` که رشته را برمی گرداند با ارزش مربوطه `` + از پیکربندی سرور. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تنظیم دو گزینه پیکربندی برای یک فرهنگ لغت بر اساس یک منبع قام: `credentials` و + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([گیمه + کاسری](https://github.com/YiuRULE)) +- افزودن سابقه جدید `Merge` که تعداد پس زمینه راه اندازی ادغام. + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([میخیل + کوروتف](https://github.com/millb)) +- اضافه کردن تابع نام کامل است که یک نام دامنه به طور کامل واجد شرایط را برمی گرداند. + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- افزودن تابع `arraySplit` و `arrayReverseSplit` که یک مجموعه تقسیم شده توسط “cut off” + شرایط. در دست زدن به توالی زمان مفید هستند. + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([هکز](https://github.com/hczhcz)) +- اضافه کردن توابع جدید است که بازگشت مجموعه ای از تمام شاخص های همسان در خانواده چند از توابع. + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([دنیلا + کوتنین](https://github.com/danlark1)) +- اضافه کردن یک موتور پایگاه داده جدید `Lazy` این است که برای ذخیره سازی تعداد زیادی از کوچک ورود به سیستم بهینه شده است + میز [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([نیکیتا + واسیلیف](https://github.com/nikvas0)) +- اضافه کردن توابع جمع گروهبیت مپند,- یا, - صخره نوردی برای ستون بیت مپ. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([ژیچنگ + یو](https://github.com/yuzhichang)) +- اضافه کردن مجموع عملکرد combinators -ornull و ordefault که بازگشت تهی + یا مقادیر پیش فرض زمانی که هیچ چیز به جمع وجود دارد. + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([هکز](https://github.com/hczhcz)) +- قالب داده های سفارشی را معرفی کنید که از فرار سفارشی پشتیبانی می کند و + قوانین جداساز. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([تاولوبیکس](https://github.com/tavplubix)) +- پشتیبانی ردیس به عنوان منبع فرهنگ لغت خارجی. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([کموندی](https://github.com/comunodi), [انتون + پوپوف](https://github.com/CurtizJ)) + +#### رفع اشکال {#bug-fix-2} + +- رفع نتیجه پرس و جو اشتباه در صورتی که `WHERE IN (SELECT ...)` بخش و `optimize_read_in_order` هست + استفاده می شود. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([انتون + پوپوف](https://github.com/CurtizJ)) +- پلاگین احراز هویت ماریادب غیر فعال, که بستگی به فایل های خارج از پروژه. + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([یوری + Baranov](https://github.com/yurriy)) +- رفع استثنا `Cannot convert column ... because it is constant but values of constants are different in source and result` که به ندرت می تواند رخ دهد زمانی که توابع `now()`, `today()`, + `yesterday()`, `randConstant()` استفاده می شود. + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([نیکولای + کوچتو](https://github.com/KochetovNicolai)) +- شماره ثابت با استفاده از اچ.تی. پی را نگه دارید ایست زنده به جای تی. پی نگه داشتن ایست زنده است. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([واسیلی + نمکو](https://github.com/Enmk)) +- گسل تقسیم بندی را در گروهبیتماپور ثابت کرد (شماره [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([ژیچنگ + یو](https://github.com/yuzhichang)) +- برای نمایش تحقق مرتکب کافکا نامیده می شود پس از تمام داده ها نوشته شده است. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([ایوان](https://github.com/abyss7)) +- اشتباه ثابت `duration_ms` مقدار در `system.part_log` جدول ده بار خاموش بود. + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([ولادیمیر + Chebotarev](https://github.com/excitoon)) +- رفع سریع برای حل و فصل سقوط در جدول نمایش زنده و دوباره قادر می سازد تمام تست نمایش زنده. + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- مرتب کردن مقادیر صفر به درستی در شاخص دقیقه/حداکثر از قطعات ادغام. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([الکساندر + کوزمنکوف](https://github.com/akuzm)) +- قرار نیست ستون مجازی به .sql ابرداده هنگامی که جدول ایجاد شده است `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([ایوان](https://github.com/abyss7)) +- رفع گسل تقسیم بندی در `ATTACH PART` پرس و جو. + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([الساپین](https://github.com/alesapin)) +- رفع نتیجه اشتباه برای برخی از نمایش داده شد داده شده توسط بهینه سازی خالی در کارخانه های فرعی و خالی + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([نیکولای + کوچتو](https://github.com/KochetovNicolai)) +- رفع addresssanitizer خطا در نمایش زنده getheader روش (). + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### بهبود {#improvement-1} + +- اضافه کردن یک پیام در صورت صبر کردن \_موا\_مایش صورت می گیرد. + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([ازات + خوژین](https://github.com/azat)) +- تنظیم ساخته شده `s3_min_upload_part_size` جدول سطح. + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([ولادیمیر + Chebotarev](https://github.com/excitoon)) +- در حال بارگذاری [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([بستنی و مغز گردو](https://github.com/sundy-li)) +- بلوک کدو دست چپ در ادغام بخشی ملحق (بهینه سازی). + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([علم هنر + زویکوف](https://github.com/4ertus2)) +- اجازه ندهید که توابع غیر قطعی در جهش موتورهای جدول تکرار, چرا که این + می توانید تناقضات بین کپی معرفی. + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([الکساندر + کازاکوف](https://github.com/Akazz)) +- غیر فعال کردن ردیاب حافظه در حالی که تبدیل ردیابی پشته استثنا به رشته. این می تواند از دست دادن جلوگیری کند + از پیغام خطا از نوع `Memory limit exceeded` بر روی سرور, که باعث `Attempt to read after eof` استثنا در مشتری. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- بهبود فرمت های دیگر. برطرف + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([تاولوبیکس](https://github.com/tavplubix)) +- خانه را نادیده می گیرد ارزش در سمت راست در اپراتور که قابل تبدیل به سمت چپ نیست + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([الکساندر + کوزمنکوف](https://github.com/akuzm)) +- پشتیبانی از نابرابری های از دست رفته برای عضویت. این ممکن است برای پیوستن به نوع کمتر یا برابر و سخت + انواع بیشتر و کمتر برای ستون اسوف در نحو. + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([علم هنر + Zuikov](https://github.com/4ertus2)) +- بهینه سازی بخشی ادغام اضافه کردن. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([زویکوف](https://github.com/4ertus2)) +- آیا استفاده از بیش از 98k حافظه در uniqcombined توابع. + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([ازات + خوژین](https://github.com/azat)) +- قطعات خیط و پیت کردن از جدول پیوستن به دست راست بر روی دیسک در پارتیالمگرمین (اگر کافی نیست + حافظه). بارگیری داده ها در صورت نیاز. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([زویکوف](https://github.com/4ertus2)) + +#### بهبود عملکرد {#performance-improvement-1} + +- سرعت جوینت با استدلال توایع با اجتناب از تکرار داده ها. + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([ایموس + پرنده](https://github.com/amosbird)) +- بازگشت اولیه اگر زیرخاکری خالی است. + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- بهینه سازی تجزیه بیان گذاشتن در مقادیر. + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([تاولوبیکس](https://github.com/tavplubix)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-1} + +- غیر فعال کردن برخی از موارد برای متقابل تلفیقی به سیستم عامل مک. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([ایوان](https://github.com/abyss7)) +- اضافه کردن گم شده ارتباط با pocoxml برای clickhouse\_common\_io. + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([ازات + خوژین](https://github.com/azat)) +- قبول متعدد تست فیلتر استدلال در clickhouse آزمون. + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([الکساندر + کوزمنکوف](https://github.com/akuzm)) +- فعال musl و jemalloc برای arm. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([ایموس پرنده](https://github.com/amosbird)) +- اضافه شده `--client-option` پارامتر به `clickhouse-test` به تصویب پارامترهای اضافی به مشتری. + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([نیکولای + کوچتو](https://github.com/KochetovNicolai)) +- حفظ تنظیمات موجود در دور در دقیقه ارتقا بسته. + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([فیلیمونف](https://github.com/filimonov)) +- رفع خطاهای شناسایی شده توسط پوس. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([علم هنر + Zuikov](https://github.com/4ertus2)) +- رفع ساخت برای داروین. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([ایوان](https://github.com/abyss7)) +- glibc 2.29 سازگاری. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([ایموس + پرنده](https://github.com/amosbird)) +- اطمینان حاصل کنید که دکلین می کند فایل های منبع بالقوه را لمس نمی. + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([ایموس + پرنده](https://github.com/amosbird)) +- تلاش برای جلوگیری از درگیری در هنگام به روز رسانی از دور در دقیقه التیت - فایل پیکربندی بسته بندی شده به طور جداگانه + در کلیک-سرور-مشترک. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([فیلیمونف](https://github.com/filimonov)) +- بهینه سازی برخی از فایل های هدر برای بازسازی سریع تر. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([الکساندر + کوزمنکوف](https://github.com/akuzm)) +- اضافه کردن تست عملکرد برای تاریخ و تاریخ ساعت. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([واسیلی + نمکو](https://github.com/Enmk)) +- رفع برخی از تست هایی که حاوی جهش های غیر قطعی هستند. + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([الکساندر + کازاکوف](https://github.com/Akazz)) +- اضافه کردن ساخت با حفظ به سی. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- اجتناب از استفاده از آماده سازی نشده ارزش در metricstransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([ازات + خوژین](https://github.com/azat)) +- رفع برخی از مشکلات در زمینه های پیدا شده توسط حفظ کننده. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([الکساندر + کوزمنکوف](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([ایموس پرنده](https://github.com/amosbird)) +- رفع رفتار تعریف نشده در سوفلش32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([ایموس + پرنده](https://github.com/amosbird)) +- رفع رفتار تعریف نشده در استورگیسنفستولینگ. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([تاولوبیکس](https://github.com/tavplubix)) +- عبارات ثابت ثابت تاشو برای موتورهای پایگاه داده خارجی (خروجی زیر ,او بی سی, ال بی سی). در گذشته + نسخه این بود برای عبارات ثابت متعدد کار نمی کند و در همه برای تاریخ کار نمی کند, + تاریخ ساعت و امید. این رفع [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع threadsanitizer اطلاعات مسابقه خطا در نمایش زنده در هنگام دسترسی به no\_users\_thread متغیر است. + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([وزکازنیکوف](https://github.com/vzakaznikov)) +- خلاص شدن از شر نمادها مالوک در لیبکمون + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([ایموس + پرنده](https://github.com/amosbird)) +- اضافه کردن توانمندسازهای پرچم جهانی برای غیر فعال کردن تمام کتابخانه ها. + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([پرولر](https://github.com/proller)) + +#### پاکسازی کد {#code-cleanup} + +- تعمیم مخزن پیکربندی برای تهیه دی ال برای لغت نامه. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([الساپین](https://github.com/alesapin)) +- تجزیه کننده برای لغت نامه دی ال بدون هیچ معنایی. + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([الساپین](https://github.com/alesapin)) +- تقسیم پارسرکری به تجزیه کننده های مختلف کوچکتر. + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([الساپین](https://github.com/alesapin)) +- فاکتورگیری مجدد کوچک و تغییر نام در نزدیکی لغت نامه های خارجی. + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([الساپین](https://github.com/alesapin)) +- گیرنده برخی از کد برای تهیه برای کنترل دسترسی مبتنی بر نقش. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([ویتالی + بارانوف](https://github.com/vitlibar)) +- برخی از پیشرفت های در کد بانک اطلاعاتی. + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([نیکیتا + واسیلیف](https://github.com/nikvas0)) +- آیا استفاده از iterators در پیدا کردن() و emplace() روش جداول هش. + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([الکساندر + کوزمنکوف](https://github.com/akuzm)) +- رفع گیرنده در صورتی که ریشه پارامتر خالی نیست. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([میخیل کوروتف](https://github.com/millb)) +- حذف برخی از کپی و چسباندن (temporaryfile و temporaryfilestream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([علم هنر + زویکوف](https://github.com/4ertus2)) +- بهبود خوانایی کد کمی (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([ولادیمیر + Chebotarev](https://github.com/excitoon)) +- منتظر تمام مشاغل برنامه ریزی شده باشید که از اشیا محلی استفاده می کنند `ThreadPool::schedule(...)` پرت + یک استثنا. تغییر نام `ThreadPool::schedule(...)` به `ThreadPool::scheduleOrThrowOnError(...)` و + رفع نظرات برای ایجاد واضح است که ممکن است پرتاب. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([تاولوبیکس](https://github.com/tavplubix)) + +## انتشار کلیک 19.15 {#clickhouse-release-19-15} + +### انتشار کلیک خانه 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### رفع اشکال {#bug-fix-3} + +- اضافه شده دست زدن به sql\_tinyint و sql\_bigint و ثابت دست زدن به sql\_float منبع داده در انواع odbc پل. + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([دنیس گلازاشف](https://github.com/traceon)) +- مجاز به برخی از قطعات بر روی دیسک مقصد و یا حجم در پارتیشن حرکت. + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ثابت null-ارزش در nullable ستون از طریق odbc-پل. + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([واسیلی نمکو](https://github.com/Enmk)) +- درج ثابت به گره غیر محلی توزیع شده با ستون محقق. + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([ازات خوژین](https://github.com/azat)) +- تابع ثابت دریافت می کند. + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([میخیل کوروتف](https://github.com/millb)) +- شماره ثابت با استفاده از اچ.تی. پی را نگه دارید ایست زنده به جای تی. پی نگه داشتن ایست زنده است. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([واسیلی نمکو](https://github.com/Enmk)) +- صبر کنید برای همه مشاغل را به پایان برساند در استثنا (رفع حملات نادر). + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([تاولوبیکس](https://github.com/tavplubix)) +- هنگام وارد کردن به میز کافکا به موفکا فشار ندهید. + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([ایوان](https://github.com/abyss7)) +- غیر فعال کردن ردیاب حافظه برای استثنا پشته. + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- کد بد ثابت در تبدیل پرس و جو برای پایگاه داده خارجی. + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از استفاده از ارزش های بی قید و شرط در مترسستر. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([ازات خوژین](https://github.com/azat)) +- اضافه شده به عنوان مثال پیکربندی با ماکروها برای تست ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### رفع اشکال {#bug-fix-4} + +- بد\_واریانت ثابت در درهم فرهنگ لغت. + ([الساپین](https://github.com/alesapin)) +- ثابت کردن اشکال با گسل تقسیم بندی در ضمیمه پرس و جو بخش. + ([الساپین](https://github.com/alesapin)) +- محاسبه زمان ثابت در `MergeTreeData`. + ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- متعهد به کافکا به صراحت پس از نوشتن نهایی است. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([ایوان](https://github.com/abyss7)) +- مرتب کردن مقادیر صفر به درستی در شاخص دقیقه/حداکثر از قطعات ادغام. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([الکساندر کوزمنکوف](https://github.com/akuzm)) + +### انتشار کلیک خانه 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### ویژگی جدید {#new-feature-3} + +- ذخیره سازی چند طبقه: پشتیبانی از استفاده از حجم ذخیره سازی های متعدد برای جداول با موتور ادغام. امکان ذخیره داده های تازه بر روی اس اس دی و انتقال خودکار داده های قدیمی به هارد وجود دارد. ([مثال](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([ایگر](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([الساپین](https://github.com/alesapin)) +- اضافه کردن تابع جدول `input` برای خواندن داده های ورودی در `INSERT SELECT` پرس و جو. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([هشدار داده می شود](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([انتون پوپوف](https://github.com/CurtizJ)) +- افزودن یک `sparse_hashed` طرح فرهنگ لغت, که عملکرد به معادل `hashed` طرح, اما حافظه بیشتر موثر است. این در مورد حافظه دو برابر کمتر با هزینه بازیابی ارزش کندتر استفاده می کند. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([ازات خوژین](https://github.com/azat)) +- پیاده سازی توانایی تعریف لیستی از کاربران برای دسترسی به لغت نامه. فقط پایگاه داده متصل فعلی با استفاده از. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- افزودن `LIMIT` گزینه ای برای `SHOW` پرس و جو. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([فیلیپ مالکوفسکی](https://github.com/malkfilipp)) +- افزودن `bitmapSubsetLimit(bitmap, range_start, limit)` تابع, که زیر مجموعه از کوچکترین گرداند `limit` ارزش ها در مجموعه ای است که هیچ کوچکتر از `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([ژیچنگ یو](https://github.com/yuzhichang)) +- افزودن `bitmapMin` و `bitmapMax` توابع. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([ژیچنگ یو](https://github.com/yuzhichang)) +- افزودن تابع `repeat` مربوط به [شماره-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([فلین](https://github.com/ucasFL)) + +#### ویژگی تجربی {#experimental-feature-1} + +- پیاده سازی (در حافظه) ادغام پیوستن به نوع که خط لوله فعلی را تغییر دهید. نتیجه تا حدی توسط کلید ادغام طبقه بندی شده اند. تنظیم `partial_merge_join = 1` برای استفاده از این ویژگی. ادغام پیوستن هنوز در حال توسعه است. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([زویکوف](https://github.com/4ertus2)) +- افزودن `S3` موتور و عملکرد جدول. هنوز در حال توسعه است (هنوز پشتیبانی احراز هویت وجود ندارد). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([ولادیمیر چبوتراف](https://github.com/excitoon)) + +#### بهبود {#improvement-2} + +- هر پیامی که از کافکا خوانده می شود به صورت عام وارد می شود. این حل مشکلات تقریبا همه شناخته شده با موتور کافکا. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([ایوان](https://github.com/abyss7)) +- بهبود برای عدم موفقیت نمایش داده شد توزیع شده است. کوتاه شدن زمان بازیابی, همچنین در حال حاضر قابل تنظیم است و می تواند در دیده `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([واسیلی نمکو](https://github.com/Enmk)) +- پشتیبانی از مقادیر عددی برای شمارشی به طور مستقیم در `IN` بخش. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([دیماروب2000](https://github.com/dimarub2000)) +- پشتیبانی (اختیاری, غیر فعال به طور پیش فرض) تغییرمسیر در ذخیره سازی نشانی وب. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([رول](https://github.com/maqroll)) +- اضافه کردن پیام اطلاعات زمانی که مشتری با نسخه های قدیمی تر متصل به یک سرور. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([فیلیپ مالکوفسکی](https://github.com/malkfilipp)) +- حذف حداکثر محدودیت زمانی خواب برای ارسال داده ها در جداول توزیع شده [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([ازات خوژین](https://github.com/azat)) +- اضافه کردن توانایی ارسال رویدادهای پروفایل (شمارنده) با ارزش تجمعی به گرافیت. این را می توان تحت فعال `` در کارساز `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([ازات خوژین](https://github.com/azat)) +- اضافه کردن نوع بازیگران به طور خودکار `T` به `LowCardinality(T)` در حالی که قرار دادن داده ها در ستون نوع `LowCardinality(T)` در قالب بومی از طریق قام. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه کردن توانایی استفاده از تابع `hex` بدون استفاده از `reinterpretAsString` برای `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([میخیل کوروتف](https://github.com/millb)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-2} + +- اضافه کردن گدی شاخص به کلیک باینری با اطلاعات اشکال زدایی. این سرعت زمان راه اندازی را افزایش می دهد `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([الساپین](https://github.com/alesapin)) +- بسته بندی با سرعت بالا با بسته بندی پچ پچ که با استفاده از `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([الساپین](https://github.com/alesapin)) +- تنظیم `enable_fuzzing = 1` برای فعال کردن ابزار دقیق رایگان از تمام کد پروژه. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([کیپریزل](https://github.com/kyprizel)) +- اضافه کردن تست دود ساخت تقسیم در سی. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([الساپین](https://github.com/alesapin)) +- اضافه کردن ساخت با حفظ به سی. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- جایگزینی `libsparsehash` با `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([ازات خوژین](https://github.com/azat)) + +#### رفع اشکال {#bug-fix-5} + +- تجزیه عملکرد ثابت تجزیه و تحلیل شاخص بر روی کلید های پیچیده در جداول بزرگ. این رفع \# 6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع خطا منطقی باعث حملات در هنگام انتخاب از کافکا موضوع خالی. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([ایوان](https://github.com/abyss7)) +- رفع خیلی زود خروجی زیر اتصال نزدیک در `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- پشتیبانی از دانه های لینوکس بسیار قدیمی (ثابت [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع از دست دادن داده ها ممکن است در `insert select` پرس و جو در صورت بلوک خالی در جریان ورودی. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- ثابت برای عملکرد `АrrayEnumerateUniqRanked` با بند خالی در پارامز [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([پرولر](https://github.com/proller)) +- رفع نمایش داده شد پیچیده با مجموعه ای می پیوندد و فرعی جهانی است. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([ایوان](https://github.com/abyss7)) +- ثابت `Unknown identifier` خطا در ترتیب و گروه با چند می پیوندد [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([زویکوف](https://github.com/4ertus2)) +- ثابت `MSan` هشدار هنگام اجرای تابع با `LowCardinality` استدلال کردن. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-2} + +- تغییر فرمت ترتیب از بیت مپ \* تابع مجموع می گوید برای بهبود عملکرد. ایالات سریال بیت مپ\* از نسخه های قبلی را نمی توان به عنوان خوانده شده. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([ژیچنگ یو](https://github.com/yuzhichang)) + +## انتشار کلیک 19.14 {#clickhouse-release-19-14} + +### انتشار کلیک کنیدهاوس 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### رفع اشکال {#bug-fix-6} + +- این نسخه همچنین شامل تمام رفع اشکال از 19.11.12.69. +- سازگاری ثابت برای نمایش داده شد توزیع بین 19.14 و نسخه های قبلی. این رفع [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک کنیدهاوس 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### رفع اشکال {#bug-fix-7} + +- ثابت برای عملکرد `АrrayEnumerateUniqRanked` با بند خالی در پارامز. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([پرولر](https://github.com/proller)) +- نام زیرخاکی ثابت در نمایش داده شد با `ARRAY JOIN` و `GLOBAL IN subquery` با نام مستعار. استفاده از نام مستعار زیرخاکی برای نام جدول خارجی اگر مشخص شده است. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([ایوان](https://github.com/abyss7)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-3} + +- ثابت [زدن](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) تست `00715_fetch_merged_or_mutated_part_zookeeper` با بازنویسی به اسکریپت پوسته چون نیاز به صبر برای جهش به درخواست. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([الکساندر کازاکوف](https://github.com/Akazz)) +- شکست ثابت اوبسان و ممسان در عملکرد `groupUniqArray` با استدلال امتپی ار این با قرار دادن خالی ایجاد شد `PaddedPODArray` به هش جدول صفر سلول به دلیل سازنده برای ارزش سلول صفر نامیده می شد. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([ایموس پرنده](https://github.com/amosbird)) + +### انتشار کلیک خانه 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### ویژگی جدید {#new-feature-4} + +- `WITH FILL` تغییردهنده برای `ORDER BY`. (ادامه [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([انتون پوپوف](https://github.com/CurtizJ)) +- `WITH TIES` تغییردهنده برای `LIMIT`. (ادامه [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([انتون پوپوف](https://github.com/CurtizJ)) +- تجزیه unquoted `NULL` تحت اللفظی به عنوان پوچ (اگر تنظیم `format_csv_unquoted_null_literal_as_null=1`). مقداردهی اولیه زمینه های تهی با مقادیر پیش فرض اگر نوع داده ها از این زمینه است قابل ابطال نیست (اگر تنظیم `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([تاولوبیکس](https://github.com/tavplubix)) +- پشتیبانی از نویسه عام در مسیرهای توابع جدول `file` و `hdfs`. اگر مسیر شامل نویسه عام, جدول خوانده خواهد شد. مثال استفاده: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` و `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- جدید `system.metric_log` جدول است که فروشگاه ها ارزش `system.events` و `system.metrics` با فاصله زمانی مشخص شده است. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه به نوشتن سیاهههای مربوط به متن کلیک به `system.text_log` جدول [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نمایش علامت خصوصی در اثری پشته (این است که از طریق تجزیه جداول نماد فایل های جن انجام). اضافه شدن اطلاعات در مورد فایل و شماره خط در اثری پشته اگر اطلاعات اشکال زدایی وجود دارد. نام نماد افزایش سرعت مراجعه با علامت نمایه سازی در حال حاضر در برنامه. اضافه شده توابع جدید گذاشتن برای درون گرایی: `demangle` و `addressToLine`. تابع تغییر نام داد `symbolizeAddress` به `addressToSymbol` برای ثبات. تابع `addressToSymbol` خواهد نام لت و پار به دلایل عملکرد بازگشت و شما باید به درخواست `demangle`. اضافه شدن تنظیمات `allow_introspection_functions` که به طور پیش فرض خاموش است. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تابع جدول `values` (نام غیر حساس به حروف است). این اجازه می دهد تا از خواندن `VALUES` فهرست پیشنهادی در [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). مثال: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([دیماروب2000](https://github.com/dimarub2000)) +- توانایی تغییر تنظیمات ذخیره سازی اضافه شده است. نحو: `ALTER TABLE
    MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([الساپین](https://github.com/alesapin)) +- پشتیبانی از حذف قطعات جدا شده. نحو: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([تاولوبیکس](https://github.com/tavplubix)) +- محدودیت های جدول. اجازه می دهد تا برای اضافه کردن محدودیت به تعریف جدول خواهد شد که در درج بررسی می شود. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([گلب novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پشتیبانی برای نمایش ساختگی محقق. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([ایموس پرنده](https://github.com/amosbird)) +- روشن کردن پیشفیلتر پرس و جو به طور پیش فرض برای نمونه هر موضوع اعدام پرس و جو یک بار در ثانیه. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- قالب ورودی `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([اکونیک 90](https://github.com/akonyaev90)) +- اضافه شدن دو توابع جدید: `sigmoid` و `tanh` (که برای برنامه های کاربردی یادگیری ماشین مفید هستند). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تابع `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` برای بررسی اگر نشانه داده شده است در انبار کاه. رمز یک زیر رشته طول حداکثر بین دو شخصیت اسکی غیر الفبایی است (یا مرزهای انبار کاه). رمز باید یک رشته ثابت باشد. پشتیبانی شده توسط تخصص شاخص توکنبف1. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([واسیلی نمکو](https://github.com/Enmk)) +- عملکرد جدید `neighbor(value, offset[, default_value])`. اجازه می دهد تا برای رسیدن به مقدار قبلی/بعدی در ستون در یک بلوک از داده ها. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([الکس کرش](https://github.com/alex-krash)) [6685365اب8ک5ب74ف9650492ج88012596ب1ب06](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341الکترونیکی4587وا18065سی2دا1ک888ج73389ف48ک36 درجه سانتیگراد](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [الکسی میلویدو](https://github.com/alexey-milovidov) +- ایجاد یک تابع `currentUser()`, ورود بازگشت از کاربر مجاز. نام مستعار اضافه شده است `user()` برای سازگاری با خروجی زیر. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([الکس کرش](https://github.com/alex-krash)) +- توابع جدید مجموع `quantilesExactInclusive` و `quantilesExactExclusive` که در پیشنهاد شد [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([دیماروب2000](https://github.com/dimarub2000)) +- تابع `bitmapRange(bitmap, range_begin, range_end)` که برمی گرداند مجموعه ای جدید با محدوده مشخص شده (شامل نمی شود `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([ژیچنگ یو](https://github.com/yuzhichang)) +- تابع `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` که مجموعه ای از رشته های دقیق طولانی از جعبه های جغرافیایی را پوشش می دهد. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([واسیلی نمکو](https://github.com/Enmk)) +- پیاده سازی پشتیبانی برای قرار دادن پرس و جو با `Kafka` میز [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([ایوان](https://github.com/abyss7)) +- اضافه شدن پشتیبانی برای `_partition` و `_timestamp` ستون مجازی به موتور کافکا. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([ایوان](https://github.com/abyss7)) +- امکان حذف اطلاعات حساس از `query_log`, سیاهههای مربوط به سرور, لیست فرایند با قوانین مبتنی بر عبارت منظم. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([فیلیمونف](https://github.com/filimonov)) + +#### ویژگی تجربی {#experimental-feature-2} + +- فرمت داده ورودی و خروجی `Template`. این اجازه می دهد برای مشخص رشته فرمت های سفارشی برای ورودی و خروجی. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([تاولوبیکس](https://github.com/tavplubix)) +- اجرای `LIVE VIEW` جداول که در اصل در پیشنهاد شد [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898) در حال بارگذاری [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925) و سپس به روز شده در [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). ببینید [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) برای شرح مفصلی. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([وزکازنیکوف](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) توجه داشته باشید که `LIVE VIEW` ویژگی ممکن است در نسخه های بعدی حذف شده است. + +#### رفع اشکال {#bug-fix-8} + +- این نسخه همچنین شامل رفع اشکال از 19.13 و 19.11. +- رفع گسل تقسیم بندی زمانی که جدول شاخص جست و خیز و ادغام عمودی اتفاق می افتد. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([الساپین](https://github.com/alesapin)) +- رفع ستون در هر ستون با پیش فرض ستون غیر بدیهی است. پیش از این در مورد نیروی تی ال ادغام با `OPTIMIZE ... FINAL` پرس و جو, ارزش منقضی شده توسط پیش فرض نوع به جای پیش فرض ستون مشخص شده توسط کاربر جایگزین شد. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع کافکا پیام های تکراری مشکل در سرور معمولی راه اندازی مجدد. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([ایوان](https://github.com/abyss7)) +- حلقه بی نهایت ثابت در هنگام خواندن پیام کافکا. مکث نکنید / مصرف کننده رزومه در اشتراک در همه-در غیر این صورت ممکن است به طور نامحدود در برخی از حالات متوقف. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([ایوان](https://github.com/abyss7)) +- ثابت `Key expression contains comparison between inconvertible types` استثنا در `bitmapContains` تابع. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([دیماروب2000](https://github.com/dimarub2000)) +- اصلاح سگو با فعال `optimize_skip_unused_shards` و از دست رفته کلید شاردینگ. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([انتون پوپوف](https://github.com/CurtizJ)) +- کد اشتباه ثابت در جهش است که ممکن است به فساد حافظه منجر شود. سگو ثابت با خواندن نشانی `0x14c0` که ممکن است به دلیل همزمان اتفاق `DROP TABLE` و `SELECT` از `system.parts` یا `system.parts_columns`. شرایط مسابقه ثابت در تهیه نمایش داده شد جهش. بن بست ثابت ناشی از `OPTIMIZE` از جداول تکرار و عملیات اصلاح همزمان مانند تغییر. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف ورود به سیستم طولانی اضافی در رابط خروجی زیر [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بازگشت توانایی به تجزیه تنظیمات بولی از ‘true’ و ‘false’ در فایل پیکربندی. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([الساپین](https://github.com/alesapin)) +- رفع سقوط در `quantile` و `median` عملکرد بیش از `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([زویکوف](https://github.com/4ertus2)) +- ثابت نتیجه ناقص ممکن توسط بازگشت `SELECT` پرسوجو با `WHERE` شرایط در کلید اولیه شامل تبدیل به نوع شناور. این با چک کردن نادرست از یکنواختی در ایجاد شد `toFloat` تابع. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([دیماروب2000](https://github.com/dimarub2000)) +- بررسی `max_expanded_ast_elements` تنظیم برای جهش. پاک کردن جهش پس از `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([زمستان ژانگ](https://github.com/zhang2014)) +- ثابت پیوستن به نتایج برای ستون های کلیدی زمانی که با استفاده `join_use_nulls`. ضمیمه نقاط صفر به جای ستون پیش فرض. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([زویکوف](https://github.com/4ertus2)) +- ثابت برای پرش شاخص با ادغام عمودی و تغییر دهید. ثابت برای `Bad size of marks file` استثنا. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([الساپین](https://github.com/alesapin)) +- رفع سقوط نادر در `ALTER MODIFY COLUMN` و ادغام عمودی زمانی که یکی از قطعات ادغام شده/تغییر داده شده خالی است (0 ردیف) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([الساپین](https://github.com/alesapin)) +- اشکال ثابت در تبدیل `LowCardinality` انواع در `AggregateFunctionFactory`. این رفع [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع رفتار اشتباه و حملات احتمالی در `topK` و `topKWeighted` توابع جمع. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([انتون پوپوف](https://github.com/CurtizJ)) +- کد ناامن ثابت در اطراف `getIdentifier` تابع. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اشکال ثابت در خروجی زیر پروتکل سیم (استفاده می شود در حالی که اتصال به خانه فرم خروجی زیر مشتری). ناشی از سرریز بافر پشته در `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([یوری بارانوف](https://github.com/yurriy)) +- نشت حافظه ثابت در `bitmapSubsetInRange` تابع. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([ژیچنگ یو](https://github.com/yuzhichang)) +- رفع اشکال نادر زمانی که جهش اجرا پس از تغییر دانه بودن. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([الساپین](https://github.com/alesapin)) +- اجازه دادن به پیام پروتوبوف با تمام زمینه ها به طور پیش فرض. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع اشکال با `nullIf` تابع هنگامی که ما ارسال `NULL` استدلال در بحث دوم. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- رفع اشکال نادر با تخصیص حافظه اشتباه / تخصیص در لغت نامه کش کلید پیچیده با رشته های رشته ای که منجر به مصرف حافظه بی نهایت (به نظر می رسد مانند نشت حافظه). اشکال بازتولید زمانی که اندازه رشته قدرت دو شروع از هشت بود (8, 16, 32, و غیره). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([الساپین](https://github.com/alesapin)) +- پشتیبانی می کند گوریل ثابت در توالی های کوچک که باعث استثنا `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([واسیلی نمکو](https://github.com/Enmk)) +- اجازه استفاده از انواع قابل ابطال نیست در می پیوندد با `join_use_nulls` فعال شد [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([زویکوف](https://github.com/4ertus2)) +- غیرفعالسازی `Poco::AbstractConfiguration` جایگزینی پرسوجو در `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از بن بست در `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- با استفاده از `arrayReduce` برای استدلال های ثابت ممکن است به پیش فرض منجر شود. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع قطعات متناقض است که می تواند به نظر می رسد اگر ماکت پس از ترمیم شد `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت قطع در `JSONExtractRaw` تابع. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال با نادرست جست و خیز شاخص ترتیب و تجمع با دانه دانه تطبیقی. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([الساپین](https://github.com/alesapin)) +- ثابت `WITH ROLLUP` و `WITH CUBE` اصلاح کننده های `GROUP BY` با تجمع دو سطح. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال با نوشتن شاخص ثانویه نشانه با دانه دانه تطبیقی. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([الساپین](https://github.com/alesapin)) +- رفع مقدار دهی اولیه سفارش در حالی که راه اندازی سرور. از `StorageMergeTree::background_task_handle` مقدار دهی اولیه در `startup()` این `MergeTreeBlockOutputStream::write()` ممکن است سعی کنید قبل از مقدار دهی اولیه استفاده کنید. فقط بررسی کنید اگر مقداردهی اولیه شده است. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([ایوان](https://github.com/abyss7)) +- پاک کردن بافر داده ها از عملیات خواندن قبلی که با یک خطا تکمیل شد. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([نیکولای](https://github.com/bopohaa)) +- رفع اشکال با فعال کردن دانه دانه تطبیقی در هنگام ایجاد یک ماکت جدید برای تکرار\*جدول ادغام. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([الساپین](https://github.com/alesapin)) +- تصادف ممکن است ثابت در هنگام راه اندازی سرور در صورت استثنا در اتفاق افتاد `libunwind` در طول استثنا در دسترسی به بی قید و شرط `ThreadStatus` ساختار. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع سقوط در `yandexConsistentHash` تابع. پیدا شده توسط تست ریش شدن. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت امکان معلق نمایش داده شد زمانی که سرور غیرمنتظره است و استخر موضوع جهانی در نزدیکی کامل می شود. این بالاتر شانس اتفاق می افتد در خوشه با تعداد زیادی از خرده ریز (صدها), به دلیل نمایش داده شد توزیع اختصاص یک موضوع در هر اتصال به هر سفال. مثلا, این موضوع ممکن است تکثیر اگر یک خوشه از 330 خرده ریز در حال پردازش 30 نمایش داده شد توزیع همزمان. این موضوع بر تمام نسخه های با شروع از 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- منطق ثابت `arrayEnumerateUniqRanked` تابع. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت شده است که جدول نماد رمز گشایی. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([ایموس پرنده](https://github.com/amosbird)) +- استثنا بی ربط ثابت در بازیگران `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- حذف نقل قول اضافی از توضیحات در `system.settings` جدول [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از بن بست ممکن است در `TRUNCATE` از جدول تکرار. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع خواندن به منظور مرتب سازی کلید. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([انتون پوپوف](https://github.com/CurtizJ)) +- ثابت `ALTER TABLE ... UPDATE` پرسو جو برای جداول با `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([الساپین](https://github.com/alesapin)) +- رفع اشکال باز شده توسط [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (پیش 19.4.0). بازتولید در نمایش داده شد به جداول توزیع بیش از جداول ادغام هنگامی که ما هیچ ستون پرس و جو نیست (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([الساپین](https://github.com/alesapin)) +- سرریز ثابت در تقسیم عدد صحیح از نوع امضا شده به نوع بدون علامت. رفتار دقیقا همانطور که در ج یا ج++ زبان (قوانین ترویج عدد صحیح) که ممکن است جای تعجب بود. لطفا توجه داشته باشید که سرریز هنوز هم ممکن است در هنگام تقسیم تعداد زیادی امضا به تعداد بزرگ بدون علامت و یا بالعکس (اما این مورد کمتر معمول است). این موضوع در تمام نسخه های سرور وجود داشته است. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- محدود کردن حداکثر زمان خواب برای متوقف کردن زمانی که `max_execution_speed` یا `max_execution_speed_bytes` قرار است. خطاهای غلط ثابت مانند `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- مشکلات ثابت در مورد استفاده از `MATERIALIZED` ستون ها و نام مستعار در `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([ایموس پرنده](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `FormatFactory` رفتار برای جریان ورودی که به عنوان پردازنده اجرا نمی شود. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- خطای تایپی ثابت. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([الکس ریندین](https://github.com/alexryndin)) +- خطای تایپی در پیام خطا (است - \> هستند ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([دنیس ژورولو](https://github.com/den-crane)) +- خطا ثابت در حالی که تجزیه لیست ستون از رشته اگر نوع حاوی کاما (این موضوع مربوط به بود `File`, `URL`, `HDFS` ذخیره سازی) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([دیماروب2000](https://github.com/dimarub2000)) + +#### تعمیر امنیتی {#security-fix} + +- این نسخه همچنین شامل تمام اشکال امنیتی رفع از 19.13 و 19.11. +- ثابت امکان پرس و جو ساخته به علت سقوط سرور به دلیل سرریز پشته در پارسر گذاشتن. ثابت امکان سرریز پشته در ادغام و توزیع جداول محقق انداز و شرایط برای ردیف-سطح امنیتی است که شامل subqueries. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود {#improvement-3} + +- اجرای صحیح منطق سه تایی برای `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([الکساندر کازاکوف](https://github.com/Akazz)) +- در حال حاضر ارزش ها و ردیف با تت ال منقضی شده خواهد شد پس از حذف `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` پرس و جو. اضافه شده نمایش داده شد `SYSTEM STOP/START TTL MERGES` برای غیرفعال کردن / اجازه می دهد ادغام اختصاص با کنترل از راه دور و فیلتر مقادیر منقضی شده در تمام ادغام. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([انتون پوپوف](https://github.com/CurtizJ)) +- امکان تغییر محل فایل تاریخچه کلیک برای مشتری با استفاده از `CLICKHOUSE_HISTORY_FILE` انوف [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([فیلیمونف](https://github.com/filimonov)) +- حذف `dry_run` پرچم از `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- پشتیبانی `ASOF JOIN` با `ON` بخش. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([زویکوف](https://github.com/4ertus2)) +- پشتیبانی بهتر از شاخص جست و خیز برای جهش و تکرار. پشتیبانی از `MATERIALIZE/CLEAR INDEX ... IN PARTITION` پرس و جو. `UPDATE x = x` محاسبه تمام شاخص هایی که از ستون استفاده می کنند `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- اجازه دادن به `ATTACH` نمایش زنده (مثلا, در هنگام راه اندازی سرور) بدون در نظر گرفتن به `allow_experimental_live_view` تنظیمات. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- برای ردیابی پشته جمع شده توسط پیشفیلتر پرس و جو, انجام فریم پشته تولید شده توسط پیشفیلتر پرس و جو خود را شامل نمی شود. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر توابع جدول `values`, `file`, `url`, `hdfs` پشتیبانی از ستون نام مستعار. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پرتاب یک استثنا اگر `config.d` فایل عنصر ریشه مربوطه به عنوان فایل پیکربندی ندارد. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([دیماروب2000](https://github.com/dimarub2000)) +- چاپ اطلاعات اضافی در پیام استثنا برای `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([تاولوبیکس](https://github.com/tavplubix)) +- هنگام تعیین تکه های یک `Distributed` جدول به یک پرس و جو به عنوان خوانده شده تحت پوشش (برای `optimize_skip_unused_shards` = 1) تاتر در حال حاضر چک شرایط از هر دو `prewhere` و `where` بند از بیانیه را انتخاب کنید. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([الکساندر کازاکوف](https://github.com/Akazz)) +- فعال شد `SIMDJSON` برای ماشین های بدون AVX2 اما با SSE 4.2 و PCLMUL مجموعه آموزش. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تاتر می توانید بر روی فایل سیستم بدون کار `O_DIRECT` پردازشگر پشتیبانی شده: [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پشتیبانی از فشار پایین گزاره برای خرده فروشی نهایی. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([تسیسون](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهتر `JOIN ON` استخراج کلید [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([زویکوف](https://github.com/4ertus2)) +- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهینه سازی انتخاب کوچکترین ستون برای `SELECT count()` پرس و جو. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([ایموس پرنده](https://github.com/amosbird)) +- اضافه شده `strict` پارامتر در `windowFunnel()`. هنگامی که `strict` تنظیم شده است `windowFunnel()` اعمال شرایط تنها برای ارزش های منحصر به فرد. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([ایشیمب](https://github.com/achimbab)) +- رابط امن تر از `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([مربی](https://github.com/avasiliev)) +- گزینه های اندازه خط در هنگام اجرای با `--help` گزینه در حال حاضر با اندازه ترمینال مربوط. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([دیماروب2000](https://github.com/dimarub2000)) +- غیرفعالسازی “read in order” بهینه سازی برای تجمع بدون کلید. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([انتون پوپوف](https://github.com/CurtizJ)) +- کد وضعیت قام برای `INCORRECT_DATA` و `TYPE_MISMATCH` کد خطا از پیش فرض تغییر یافت `500 Internal Server Error` به `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([الکساندر رودین](https://github.com/a-rodin)) +- حرکت پیوستن شی از `ExpressionAction` به `AnalyzedJoin`. `ExpressionAnalyzer` و `ExpressionAction` در مورد نمی دانم `Join` کلاس دیگر. منطق خود را با پنهان `AnalyzedJoin` صورت. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([زویکوف](https://github.com/4ertus2)) +- بن بست ممکن ثابت از نمایش داده شد توزیع زمانی که یکی از خرده ریز جایلهاست اما پرس و جو از طریق اتصال به شبکه ارسال می شود. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تغییر معنایی جداول چندگانه `RENAME` برای جلوگیری از بن بست ممکن است. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بازنویسی خروجی زیر سرور سازگاری برای جلوگیری از بارگذاری محموله بسته کامل در حافظه است. کاهش مصرف حافظه برای هر اتصال به حدود `2 * DBMS_DEFAULT_BUFFER_SIZE` (خواندن / نوشتن بافر). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([یوری بارانوف](https://github.com/yurriy)) +- حرکت اس تی نام مستعار تفسیر منطق از تجزیه کننده که لازم نیست به دانستن هر چیزی در مورد معناشناسی پرس و جو. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([زویکوف](https://github.com/4ertus2)) +- تجزیه کمی امن تر از `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `clickhouse-copier`: اجازه استفاده `where_condition` از پیکربندی با `partition_key` نام مستعار در پرس و جو برای چک کردن وجود پارتیشن (قبلا فقط در خواندن نمایش داده شد داده ها مورد استفاده قرار گرفت). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([پرولر](https://github.com/proller)) +- اضافه شده استدلال پیام اختیاری در `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([ولادیمیر](https://github.com/Vdimir)) +- استثنا سرور کردم در حالی که ارسال داده های درج در حال حاضر در حال پردازش در مشتری نیز هست. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([دیماروب2000](https://github.com/dimarub2000)) +- اضافه شده متریک `DistributedFilesToInsert` که نشان می دهد تعداد کل فایل ها در سیستم فایل که انتخاب می شوند برای ارسال به سرور از راه دور توسط جداول توزیع شده است. تعداد در تمام خرده ریز خلاصه. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حرکت بسیاری از می پیوندد تهیه منطق از `ExpressionAction/ExpressionAnalyzer` به `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([زویکوف](https://github.com/4ertus2)) +- رفع تسان [اخطار](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([واسیلی نمکو](https://github.com/Enmk)) +- پیام های اطلاعات بهتر در مورد عدم قابلیت های لینوکس. ورود به سیستم خطاهای کشنده با “fatal” سطح, که ساده تر خواهد شد برای پیدا کردن در `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هنگام فعال کردن اطلاعات موقت تخلیه به دیسک برای محدود کردن استفاده از حافظه در طول `GROUP BY`, `ORDER BY` فضای دیسک رایگان را بررسی نکرد. ثابت اضافه کردن یک محیط جدید `min_free_disk_space`, هنگامی که فضای دیسک رایگان کوچکتر و سپس حد, پرس و جو را متوقف خواهد کرد و پرتاب `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing زو](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رواندا بازگشتی حذف شده توسط موضوع. این باعث می شود هیچ حس, چرا که موضوعات بین نمایش داده شد مورد استفاده مجدد قرار. `SELECT` پرس و جو ممکن است یک قفل در یک موضوع, یک قفل از موضوع دیگر نگه دارید و خروج از موضوع اول. در همان زمان اولین موضوع را می توان با استفاده مجدد `DROP` پرس و جو. این به نادرست منجر شود “Attempt to acquire exclusive lock recursively” پیام [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شکافتن `ExpressionAnalyzer.appendJoin()`. تهیه یک مکان در `ExpressionAnalyzer` برای `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده `mysql_native_password` پلاگین احراز هویت به خروجی زیر سرور سازگاری. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([یوری بارانوف](https://github.com/yurriy)) +- تعداد کمتر از `clock_gettime` سازگاری ابی ثابت بین اشکال زدایی / انتشار در `Allocator` (موضوع ناچیز). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حرکت کن `collectUsedColumns` از `ExpressionAnalyzer` به `SyntaxAnalyzer`. `SyntaxAnalyzer` می سازد `required_source_columns` خود را در حال حاضر. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([زویکوف](https://github.com/4ertus2)) +- افزودن تنظیمات `joined_subquery_requires_alias` برای نیاز به نام مستعار برای انتخاب و توابع جدول در `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([زویکوف](https://github.com/4ertus2)) +- استخراج `GetAggregatesVisitor` رده از `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([زویکوف](https://github.com/4ertus2)) +- `system.query_log`: تغییر نوع داده `type` ستون به `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- پیوند استاتیک `sha256_password` پلاگین احراز هویت. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([یوری بارانوف](https://github.com/yurriy)) +- اجتناب از وابستگی اضافی برای تنظیم `compile` سر کار. در نسخه های قبلی کاربر ممکن است مانند خطا دریافت کنید `cannot open crti.o`, `unable to find library -lc` و غیره [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اعتبار سنجی بیشتر از ورودی که ممکن است از ماکت های مخرب است. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حالا `clickhouse-obfuscator` پرونده در دسترس است `clickhouse-client` بسته در نسخه های قبلی در دسترس بود `clickhouse obfuscator` (با فضای خالی). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([دیماروب2000](https://github.com/dimarub2000)) +- بن بست ثابت زمانی که ما حداقل دو نمایش داده شد که حداقل دو جدول در جهت های مختلف و پرس و جو دیگری که انجام عملیات دسیدل در یکی از جداول به عنوان خوانده شده. ثابت دیگر بن بست بسیار نادر است. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده `os_thread_ids` ستون به `system.processes` و `system.query_log` برای احتمالات اشکال زدایی بهتر. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- یک راه حل برای اشکالات پسوند پی اچ پی میسورند که زمانی رخ می دهد `sha256_password` به عنوان یک پلاگین احراز هویت پیش فرض (شرح داده شده در [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([یوری baranov](https://github.com/yurriy)) +- حذف محل غیر ضروری با ستون ابطال تغییر. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([زویکوف](https://github.com/4ertus2)) +- تنظیم مقدار پیشفرض `queue_max_wait_ms` به صفر, به دلیل ارزش فعلی (پنج ثانیه) می سازد هیچ حس. شرایط نادر وجود دارد که این تنظیمات هر گونه استفاده. تنظیمات اضافه شده `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` و `connection_pool_max_wait_ms` برای ابهامزدایی. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- استخراج `SelectQueryExpressionAnalyzer` از `ExpressionAnalyzer`. نگه داشتن یکی از گذشته برای نمایش داده شد غیر را انتخاب کنید. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([زویکوف](https://github.com/4ertus2)) +- حذف تکثیر فرمت های ورودی و خروجی. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اجازه دادن به کاربر برای لغو `poll_interval` و `idle_connection_timeout` تنظیمات در اتصال. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `MergeTree` در حال حاضر دارای یک گزینه اضافی `ttl_only_drop_parts` (غیر فعال به طور پیش فرض) برای جلوگیری از هرس بخشی از قطعات, به طوری که به طور کامل کاهش یافته است که تمام ردیف در یک بخش منقضی شده است. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([سرگی ولدیکین](https://github.com/svladykin)) +- نوع چک برای توابع شاخص مجموعه. پرتاب استثنا اگر تابع یک نوع اشتباه. این رفع تست ریش شدن با اوبسان. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([نیکیتا واسیلیف](https://github.com/nikvas0)) + +#### بهبود عملکرد {#performance-improvement-2} + +- بهینه سازی نمایش داده شد با `ORDER BY expressions` بند, جایی که `expressions` پیشوند همزمان با مرتب سازی کلید در `MergeTree` میز این بهینه سازی توسط کنترل `optimize_read_in_order` تنظیمات. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([انتون پوپوف](https://github.com/CurtizJ)) +- اجازه می دهد به استفاده از موضوعات مختلف در بخش بارگذاری و حذف. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نوع دسته ای اجرا از به روز رسانی کشورهای تابع جمع. ممکن است به مزایای عملکرد منجر شود. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- با استفاده از `FastOps` کتابخانه برای توابع `exp`, `log`, `sigmoid`, `tanh`. FastOps سریع بردار ریاضی کتابخانه از مایکل Parakhin (Yandex CTO). بهبود عملکرد `exp` و `log` توابع بیش از 6 بار. توابع `exp` و `log` از `Float32` استدلال باز خواهد گشت `Float32` (در نسخه های قبلی همیشه باز می گردند `Float64`). حالا `exp(nan)` ممکن است بازگشت `inf`. نتیجه `exp` و `log` توابع ممکن است نزدیکترین ماشین تعداد نمایندگی به پاسخ واقعی نیست. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([الکسی میلویدو](https://github.com/alexey-milovidov)) با استفاده از Danila Kutenin نوع را fastops کار [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- غیر فعال کردن بهینه سازی کلید متوالی برای `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([اکوزم](https://github.com/akuzm)) +- بهبود عملکرد `simdjson` کتابخانه با خلاص شدن از تخصیص پویا در `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([ویتالی بارانو](https://github.com/vitlibar)) +- صفحات پیش گسل هنگام تخصیص حافظه با `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([اکوزم](https://github.com/akuzm)) +- رفع اشکال عملکرد در `Decimal` مقایسه. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([زویکوف](https://github.com/4ertus2)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-4} + +- حذف کامپایلر (نمونه زمان اجرا الگو) چرا که ما بیش از عملکرد این برنده ام. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تست عملکرد اضافه شده برای نشان دادن تخریب عملکرد در شورای همکاری خلیج فارس-9 در راه بیشتر جدا شده است. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تابع جدول اضافه شده است `numbers_mt`, که است که نسخه چند رشته ای از `numbers`. تست های عملکرد به روز شده با توابع هش. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- حالت مقایسه در `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([دیماروب2000](https://github.com/dimarub2000)) +- بهترین تلاش برای چاپ اثری پشته. همچنین اضافه شده است `SIGPROF` به عنوان یک سیگنال اشکال زدایی برای چاپ ردیابی پشته از یک موضوع در حال اجرا. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هر تابع در فایل خود را, بخش 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف دو برابر const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([فیلیمونف](https://github.com/filimonov)) +- تغییرات قالب بندی برای `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([اکوزم](https://github.com/akuzm)) +- خرده فروشی بهتر برای پیوستن به ایجاد در `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([زویکوف](https://github.com/4ertus2)) +- حذف یک وضعیت کار برکنار شده (پیدا شده توسط پوس استودیو). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([اکوزم](https://github.com/akuzm)) +- جدا کردن رابط جدول هش برای `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([اکوزم](https://github.com/akuzm)) +- فاکتورگیری مجدد از تنظیمات. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([الساپین](https://github.com/alesapin)) +- اضافه کردن نظر برای `set` توابع شاخص. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- افزایش نمره اتم در نسخه اشکال زدایی در لینوکس. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([اکوزم](https://github.com/akuzm)) +- در حال حاضر در ساخت اشکال زدایی کار می کنند. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing زو](https://github.com/weiqxu)) +- اضافه شدن یک تست به `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن تست برای نمایش چند مادی برای جدول کافکا. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([ایوان](https://github.com/abyss7)) +- ایجاد یک طرح ساخت بهتر است. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([ایوان](https://github.com/abyss7)) +- ثابت `test_external_dictionaries` ادغام در صورتی که تحت کاربر غیر ریشه اعدام شد. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اشکال بازتولید زمانی که اندازه کل بسته های نوشته شده بیش از `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([یوری بارانوف](https://github.com/yurriy)) +- اضافه شدن یک تست برای `RENAME` شرایط مسابقه جدول [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از مسابقه داده ها در تنظیمات در `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن تست ادغام برای دست زدن به اشتباهات توسط یک فرهنگ لغت کش. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([ویتالی بارانو](https://github.com/vitlibar)) +- غیر فعال کردن تجزیه فایل های شی جن در سیستم عامل مک, زیرا باعث می شود هیچ حس. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تلاش برای ایجاد ژنراتور تغییرات بهتر است. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن `-Wshadow` تغییر به شورای همکاری خلیج فارس. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([کروزرکریگ](https://github.com/kreuzerkrieg)) +- حذف کد منسوخ برای `mimalloc` پشتیبانی [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `zlib-ng` قابلیت های ایکس86 را تعیین می کند و این اطلاعات را به متغیرهای جهانی ذخیره می کند. این است که در تماس دفالتینیت انجام, که ممکن است توسط موضوعات مختلف به طور همزمان ساخته شده. برای جلوگیری از چند رشته ای می نویسد, این کار را در هنگام راه اندازی کتابخانه. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([اکوزم](https://github.com/akuzm)) +- تست رگرسیون برای یک اشکال که در پیوستن که در ثابت شد [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([بختیاری روازیف](https://github.com/theruziev)) +- ثابت msan گزارش. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع زدن تست فلش. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([انتون پوپوف](https://github.com/CurtizJ)) +- مسابقه داده های نادرست ثابت در `MergeTreeDataPart::is_frozen` رشته. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- وقفه ثابت در تست ریش شدن. در نسخه های قبلی این موفق به پیدا کردن غلط معوق در پرس و جو `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن چک اشکال زدایی به `static_cast` از ستون. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پشتیبانی از اوراکل لینوکس در بسته های رسمی دور در دقیقه. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تغییر کامل جانسون از `once` به `loop` نوع. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` تعریف `main()` بنابراین نباید در `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([اوروج دش](https://github.com/orivej)) +- تست تصادف در `FULL|RIGHT JOIN` با نقاط صفر در کلید جدول سمت راست. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده یک تست برای حد در گسترش نام مستعار فقط در مورد. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تغییر از `boost::filesystem` به `std::filesystem` از کجا مناسب. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده بسته دور در دقیقه به وب سایت. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن یک تست برای ثابت `Unknown identifier` استثنا در `IN` بخش. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([زویکوف](https://github.com/4ertus2)) +- ساده کردن `shared_ptr_helper` چون مردم با مشکلاتی روبرو هستند که این را درک می کنند. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تست عملکرد اضافه شده برای گوریل ثابت و کدک مضاعف. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([واسیلی نمکو](https://github.com/Enmk)) +- تقسیم تست ادغام `test_dictionaries` به 4 تست جداگانه. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([ویتالی بارانو](https://github.com/vitlibar)) +- پردازشگر پشتیبانی شده: `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اجازه می دهد به استفاده از `library` منبع فرهنگ لغت با اسان. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن گزینه برای تولید تغییرات از یک لیست از روابط عمومی. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- قفل `TinyLog` ذخیره سازی در هنگام خواندن. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([اکوزم](https://github.com/akuzm)) +- بررسی برای پیوندهای نمادی شکسته در سی. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- افزایش اتمام وقت برای “stack overflow” تست کنید زیرا ممکن است مدت زمان طولانی در ساخت اشکال زدایی طول بکشد. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن یک چک برای فضاهای خالی دو برابر شود. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `new/delete` ردیابی حافظه در هنگام ساخت با ضد عفونی کننده. ردیابی مشخص نیست. این تنها مانع از استثنا حد حافظه در تست. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([زویکوف](https://github.com/4ertus2)) +- فعال کردن چک از علامت تعریف نشده در حالی که ارتباط. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([ایوان](https://github.com/abyss7)) +- اجتناب از بازسازی `hyperscan` هر روز [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گزارش ثابت اوبسان در `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه ندهید که از پیشفیلتر پرس و جو با ضدعفونی کننده استفاده کنید زیرا سازگار نیست. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن تست برای بارگذاری یک فرهنگ لغت پس از شکست تایمر. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع تناقض در `PipelineExecutor::prepareProcessor` استدلال نوع. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه شده یک تست برای ادرار بد. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن چک بیشتر به `CAST` تابع. این باید دریافت اطلاعات بیشتر در مورد گسل تقسیم بندی فازی در تست. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه شده `gcc-9` پشتیبانی به `docker/builder` کانتینر که ایجاد تصویر به صورت محلی. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([گلب novikov](https://github.com/NanoBjorn)) +- تست برای کلید اولیه با `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([دیماروب2000](https://github.com/dimarub2000)) +- تست های ثابت تحت تاثیر اثر کند پشته اثری چاپ. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن یک مورد تست برای سقوط در `groupUniqArray` ثابت در [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([اکوزم](https://github.com/akuzm)) +- شاخص های ثابت تست جهش. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- در تست عملکرد, انجام ورود پرس و جو برای نمایش داده شد ما را اجرا کنید به عنوان خوانده شده. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([اکوزم](https://github.com/akuzm)) +- محقق نمایش در حال حاضر می تواند با ایجاد هر گونه کم cardinality انواع بدون توجه به تنظیمات مورد مشکوک کم cardinality انواع. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- تست های به روز شده برای `send_logs_level` تنظیمات. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع ساخت تحت شورای همکاری خلیج فارس-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([مکس اخمدوف](https://github.com/zlobober)) +- ثابت ساخت با لیبک داخلی++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([ایوان](https://github.com/abyss7)) +- رفع ساخت مشترک با `rdkafka` کتابخانه [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([ایوان](https://github.com/abyss7)) +- رفع برای سیستم عامل مک ساخت (ناقص). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([الکسی میلویدو](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([اطلاعات دقیق](https://github.com/alex-zaitsev)) +- ثابت “splitted” ساختن. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- دیگر رفع ساخت: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([ایموس پرنده](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([وکسیدر](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([ایوان](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([پرولر](https://github.com/proller)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-3} + +- حذف تابع جدول به ندرت استفاده می شود `catBoostPool` و ذخیره سازی `CatBoostPool`. اگر شما این تابع جدول استفاده کرده اند, لطفا ارسال ایمیل به `clickhouse-feedback@yandex-team.com`. توجه داشته باشید که ادغام ادم کودن و احمق باقی می ماند و پشتیبانی خواهد شد. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- غیرفعالسازی `ANY RIGHT JOIN` و `ANY FULL JOIN` به طور پیش فرض. تنظیم `any_join_distinct_right_table_keys` تنظیم برای فعال کردن. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([زویکوف](https://github.com/4ertus2)) + +## انتشار کلیک 19.13 {#clickhouse-release-19-13} + +### انتشار کلیک خانه 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### رفع اشکال {#bug-fix-9} + +- این نسخه همچنین شامل تمام رفع اشکال از 19.11.12.69. + +### انتشار کلیک خانه 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### رفع اشکال {#bug-fix-10} + +- این نسخه همچنین شامل تمام رفع اشکال از 19.14.6.12. +- ثابت وضعیت متناقض ممکن است از جدول در حالی که اجرای `DROP` پرس و جو برای جدول تکرار در حالی که باغ وحش در دسترس نیست. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- ثابت برای مسابقه داده ها در انبار ذخیره سازی [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال معرفی شده در پیشفیلتر پرس و جو که منجر به ضبط بی پایان از سوکت. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([الساپین](https://github.com/alesapin)) +- رفع استفاده از پردازنده بیش از حد در حالی که اجرای `JSONExtractRaw` عملکرد بیش از یک مقدار بولی. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع رگرسیون در حالی که هل دادن به مشاهده محقق. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([ایوان](https://github.com/abyss7)) +- تابع جدول `url` در صورت امکان پذیری اجازه مهاجم برای تزریق هدر قام دلخواه در درخواست. این موضوع توسط [نیکیتا تیکومیرو](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع بی فایده `AST` بررسی در شاخص مجموعه. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- تجزیه ثابت از `AggregateFunction` ارزش های جاسازی شده در پرس و جو. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([ژیچنگ یو](https://github.com/yuzhichang)) +- رفتار اشتباه ثابت `trim` توابع خانواده. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### ClickHouse انتشار 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### رفع اشکال {#bug-fix-11} + +- این نسخه همچنین شامل تمام اشکال امنیتی رفع از 19.11.9.52 و 19.11.10.54. +- مسابقه داده ثابت در `system.parts` جدول و `ALTER` پرس و جو. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هدر عدم تطابق ثابت در جریان در صورت خواندن از جدول توزیع خالی با نمونه و قبل از وقوع اتفاق افتاد. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang کیان](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- تصادف ثابت در هنگام استفاده از `IN` بند با یک زیر کشت با یک تاپل. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت مورد با نام ستون در `GLOBAL JOIN ON` بخش. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([زویکوف](https://github.com/4ertus2)) +- رفع تصادف در هنگام ریخته گری انواع به `Decimal` که این کار را پشتیبانی نمی کند. پرتاب استثنا به جای. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([زویکوف](https://github.com/4ertus2)) +- تصادف ثابت در `extractAll()` تابع. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([زویکوف](https://github.com/4ertus2)) +- تبدیل پرسوجو برای `MySQL`, `ODBC`, `JDBC` توابع جدول در حال حاضر به درستی کار می کند برای `SELECT WHERE` نمایش داده شد با چند `AND` عبارات. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([دیماروب2000](https://github.com/dimarub2000)) +- اضافه شده چک اعلامیه قبلی برای خروجی زیر 8 ادغام. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([مایکل دیوید تینکو](https://github.com/rafaeldtinoco)) + +#### تعمیر امنیتی {#security-fix-1} + +- رفع دو ناپایداری در کدک در فاز رفع فشار (کاربر مخرب می تواند داده های فشرده که منجر به سرریز بافر در رفع فشار ساخت). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([زویکوف](https://github.com/4ertus2)) + +### انتشار کلیک کنیدهاوس 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### رفع اشکال {#bug-fix-12} + +- ثابت `ALTER TABLE ... UPDATE` پرسو جو برای جداول با `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([الساپین](https://github.com/alesapin)) +- ثابت نانپ در هنگام استفاده در بند با یک زیرخاکری با یک تاپل. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت یک موضوع است که اگر یک ماکت کهنه زنده می شود, هنوز هم ممکن است قطعات داده که توسط پارتیشن قطره حذف شد. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([تاولوبیکس](https://github.com/tavplubix)) +- موضوع ثابت با تجزیه سی اس وی [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([تاولوبیکس](https://github.com/tavplubix)) +- مسابقه داده ثابت در سیستم.جدول قطعات و تغییر پرس و جو. این رفع [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کد اشتباه ثابت در جهش است که ممکن است به فساد حافظه منجر شود. سگو ثابت با خواندن نشانی `0x14c0` که ممکن است به دلیل همزمان اتفاق `DROP TABLE` و `SELECT` از `system.parts` یا `system.parts_columns`. شرایط مسابقه ثابت در تهیه نمایش داده شد جهش. بن بست ثابت ناشی از `OPTIMIZE` از جداول تکرار و عملیات اصلاح همزمان مانند تغییر. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- از دست دادن داده های ممکن ثابت پس از `ALTER DELETE` پرس و جو در جدول با پرش شاخص. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([نیکیتا واسیلیف](https://github.com/nikvas0)) + +#### تعمیر امنیتی {#security-fix-2} + +- اگر مهاجم دارای دسترسی نوشتن به باغ وحش است و قادر به اجرای سفارشی سرور موجود در شبکه که در آن clickhouse اجرای آن می توانید ایجاد و سفارشی-ساخته شده در سرور های مخرب است که به عنوان clickhouse المثنی و ثبت آن در باغ وحش. هنگامی که ماکت دیگر بخش داده ها از ماکت های مخرب واکشی, می تواند فاحشه خانه و سرور را مجبور به ارسال به مسیر دلخواه در فایل سیستم. پیدا شده توسط الدار زیتوف, تیم امنیت اطلاعات در یاندکس. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### ویژگی جدید {#new-feature-5} + +- نمونه برداری پیشفیلتر در سطح پرس و جو. [مثال](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([الکسی میلویدو](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- اجازه برای مشخص کردن یک لیست از ستون با `COLUMNS('regexp')` بیان که مانند یک نوع پیچیده تر از کار می کند `*` ستاره دار. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([مازریدنتال](https://github.com/mfridental)), ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` در حال حاضر امکان [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([دیماروب2000](https://github.com/dimarub2000)) +- بهینه ساز ادام برای نزول گرادیان تصادفی به طور پیش فرض در استفاده `stochasticLinearRegression()` و `stochasticLogisticRegression()` توابع مجموع, چرا که نشان می دهد با کیفیت خوب و بدون تقریبا هر تنظیم. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([کد37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([اندی یانگ](https://github.com/andyyzh)) +- `RENAME` نمایش داده شد در حال حاضر با تمام ذخیره سازی کار می کنند. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([ایوان](https://github.com/abyss7)) +- در حال حاضر مشتری دریافت سیاهههای مربوط از سرور با هر سطح مورد نظر با تنظیم `send_logs_level` بدون در نظر گرفتن سطح ورود به سیستم مشخص شده در تنظیمات سرور. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-4} + +- تنظیمات `input_format_defaults_for_omitted_fields` به طور پیش فرض فعال است. درج در جداول توزیع شده نیاز به این تنظیم به همان در خوشه (شما نیاز به تنظیم قبل از بروز رسانی نورد). این را قادر می سازد محاسبه عبارات پیش فرض پیچیده برای زمینه های حذف شده در `JSONEachRow` و `CSV*` فرمتها. این باید رفتار مورد انتظار باشد اما ممکن است منجر به تفاوت عملکرد ناچیز. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([زویکوف](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([اکوزم](https://github.com/akuzm)) + +#### ویژگی های تجربی {#experimental-features} + +- خط لوله پردازش پرس و جو جدید. استفاده `experimental_use_processors=1` گزینه ای برای فعال کردن. برای مشکل خود استفاده کنید. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### رفع اشکال {#bug-fix-13} + +- ادغام کافکا در این نسخه ثابت شده است. +- ثابت `DoubleDelta` کدبندی `Int64` برای بزرگ `DoubleDelta` ارزش, بهبود یافته `DoubleDelta` رمزگذاری برای داده های تصادفی برای `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([واسیلی نمکو](https://github.com/Enmk)) +- بیش از حد ثابت از `max_rows_to_read` اگر تنظیمات `merge_tree_uniform_read_distribution` تنظیم 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود {#improvement-4} + +- می اندازد یک استثنا اگر `config.d` فایل عنصر ریشه مربوطه به عنوان فایل پیکربندی ندارد [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([دیماروب2000](https://github.com/dimarub2000)) + +#### بهبود عملکرد {#performance-improvement-3} + +- بهینهسازی `count()`. در حال حاضر با استفاده از کوچکترین ستون (در صورت امکان). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([ایموس پرنده](https://github.com/amosbird)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-5} + +- گزارش استفاده از حافظه در تست عملکرد. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([اکوزم](https://github.com/akuzm)) +- رفع ساخت با خارجی `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([ایوان](https://github.com/abyss7)) +- رفع ساخت مشترک با `rdkafka` کتابخانه [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([ایوان](https://github.com/abyss7)) + +## انتشار کلیک 19.11 {#clickhouse-release-19-11} + +### انتشار کلیک 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### رفع اشکال {#bug-fix-14} + +- تصادف نادر ثابت در `ALTER MODIFY COLUMN` و ادغام عمودی زمانی که یکی از قطعات با هم ادغام شدند/تغییر خالی است (0 ردیف). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([الساپین](https://github.com/alesapin)) +- به روز رسانی دستی `SIMDJSON`. این رفع جاری شدن سیل ممکن است از فایل های استدر با جعلی جانسون پیام های تشخیصی. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([الکساندر کازاکوف](https://github.com/Akazz)) +- اشکال ثابت با `mrk` پسوند پرونده برای جهش ([الساپین](https://github.com/alesapin)) + +### انتشار کلیک خانه 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### رفع اشکال {#bug-fix-15} + +- تجزیه عملکرد ثابت تجزیه و تحلیل شاخص بر روی کلید های پیچیده در جداول بزرگ. این رفع [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از سیگزگف نادر در حالی که ارسال داده ها در جداول با موتور توزیع شده (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([ازات خوژین](https://github.com/azat)) +- ثابت `Unknown identifier` با چند می پیوندد. این رفع [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([زویکوف](https://github.com/4ertus2)) + +### انتشار تاتر 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- رفع خطا منطقی باعث حملات در هنگام انتخاب از کافکا موضوع خالی. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([ایوان](https://github.com/abyss7)) +- ثابت برای عملکرد `АrrayEnumerateUniqRanked` با بند خالی در پارامز. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([پرولر](https://github.com/proller)) + +### انتشار کلیک خانه 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### رفع اشکال {#bug-fix-16} + +- فراموش فروشگاه شیپور خاموشی برای کافکا پیام های دستی قادر به ارتکاب آنها را همه در یک بار و برای همه پارتیشن. رفع بالقوه تقلید در “one consumer - many partitions” سناریو [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([ایوان](https://github.com/abyss7)) + +### انتشار کلیک کنیدهاوس 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- بهبود دست زدن به خطا در لغت نامه کش. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([ویتالی بارانو](https://github.com/vitlibar)) +- اشکال ثابت در عملکرد `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([پرولر](https://github.com/proller)) +- ثابت `JSONExtract` عملکرد در حالی که استخراج یک `Tuple` از جسون [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([ویتالی بارانو](https://github.com/vitlibar)) +- از دست دادن داده های ممکن ثابت پس از `ALTER DELETE` پرس و جو در جدول با پرش شاخص. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- تست عملکرد ثابت. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پارکت: رفع خواندن ستون بولی. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفتار اشتباه ثابت `nullIf` تابع برای استدلال ثابت. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع مشکل پیام های کافکا تقلید در راه اندازی مجدد سرور طبیعی است. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([ایوان](https://github.com/abyss7)) +- ثابت موضوع زمانی که طولانی است `ALTER UPDATE` یا `ALTER DELETE` ممکن است ادغام به طور منظم به اجرا جلوگیری می کند. جلوگیری از جهش از اجرای اگر هیچ موضوعات رایگان به اندازه کافی در دسترس وجود دارد. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([تاولوبیکس](https://github.com/tavplubix)) +- خطا ثابت با پردازش “timezone” در فایل پیکربندی سرور. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع تست کافکا. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([ایوان](https://github.com/abyss7)) + +#### تعمیر امنیتی {#security-fix-3} + +- اگر مهاجم دارای دسترسی نوشتن به باغ وحش است و قادر به اجرای سفارشی سرور موجود در شبکه که در آن clickhouse اجرا می شود, آن می تواند ایجاد سفارشی-ساخته شده در سرور های مخرب است که به عنوان clickhouse المثنی و ثبت آن در باغ وحش. هنگامی که ماکت دیگر بخش داده ها از ماکت های مخرب واکشی, می تواند فاحشه خانه و سرور را مجبور به ارسال به مسیر دلخواه در فایل سیستم. پیدا شده توسط الدار زیتوف, تیم امنیت اطلاعات در یاندکس. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک کنیدهاوس 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### رفع اشکال {#bug-fix-17} + +- ثابت `ALTER TABLE ... UPDATE` پرسو جو برای جداول با `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([الساپین](https://github.com/alesapin)) +- ثابت نانپ در هنگام استفاده در بند با یک زیرخاکری با یک تاپل. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت یک موضوع است که اگر یک ماکت کهنه زنده می شود, هنوز هم ممکن است قطعات داده که توسط پارتیشن قطره حذف شد. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([تاولوبیکس](https://github.com/tavplubix)) +- موضوع ثابت با تجزیه سی اس وی [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([تاولوبیکس](https://github.com/tavplubix)) +- مسابقه داده ثابت در سیستم.جدول قطعات و تغییر پرس و جو. این رفع [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت کد اشتباه در جهش است که ممکن است منجر به حافظه فساد است. ثابت segfault با خواندن آدرس `0x14c0` که ممکن است به دلیل همزمان اتفاق `DROP TABLE` و `SELECT` از `system.parts` یا `system.parts_columns`. شرایط مسابقه ثابت در تهیه نمایش داده شد جهش. بن بست ثابت ناشی از `OPTIMIZE` از جداول تکرار و عملیات اصلاح همزمان مانند تغییر. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک کنیدهاوس 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### رفع اشکال {#bug-fix-18} + +- ادغام کافکا در این نسخه ثابت شده است. +- در هنگام استفاده از سگو را رفع کنید `arrayReduce` برای استدلال ثابت. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `toFloat()` یکنواختی. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([دیماروب2000](https://github.com/dimarub2000)) +- اصلاح سگو با فعال `optimize_skip_unused_shards` و از دست رفته کلید شاردینگ. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([کورتیزج](https://github.com/CurtizJ)) +- منطق ثابت `arrayEnumerateUniqRanked` تابع. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف ورود به سیستم طولانی اضافی از کنترل خروجی زیر. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع رفتار اشتباه و حملات احتمالی در `topK` و `topKWeighted` توابع جمع. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([کورتیزج](https://github.com/CurtizJ)) +- ستون های مجازی را در معرض قرار ندهید `system.columns` جدول این برای سازگاری عقب مورد نیاز است. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال با تخصیص حافظه برای رشته در فرهنگ لغت کش کلید پیچیده است. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([الساپین](https://github.com/alesapin)) +- رفع اشکال با فعال کردن دانه دانه تطبیقی در هنگام ایجاد ماکت جدید برای `Replicated*MergeTree` جدول [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([الساپین](https://github.com/alesapin)) +- رفع حلقه بی نهایت در هنگام خواندن پیام کافکا. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([پایان 7](https://github.com/abyss7)) +- ثابت امکان پرس و جو ساخته به علت سقوط سرور به دلیل سرریز پشته در گذاشتن مربع و امکان سرریز پشته در `Merge` و `Distributed` جداول [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطا را پشتیبانی می کند گوریل ثابت در توالی های کوچک. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([انمک](https://github.com/Enmk)) + +#### بهبود {#improvement-5} + +- اجازه دادن به کاربر برای لغو `poll_interval` و `idle_connection_timeout` تنظیمات در اتصال. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### رفع اشکال {#bug-fix-19} + +- ثابت امکان معلق نمایش داده شد زمانی که سرور غیرمنتظره است. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در مورد نیاز: این رفع [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اشکال ثابت در تبدیل `LowCardinality` انواع در `AggregateFunctionFactory`. این رفع [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع تجزیه از `bool` تنظیمات از `true` و `false` رشته ها در فایل های پیکربندی. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([الساپین](https://github.com/alesapin)) +- رفع اشکال نادر با هدر جریان ناسازگار در نمایش داده شد به `Distributed` جدول بیش از `MergeTree` جدول زمانی که بخشی از `WHERE` حرکت به `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([الساپین](https://github.com/alesapin)) +- سرریز ثابت در تقسیم عدد صحیح از نوع امضا شده به نوع بدون علامت. این رفع [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-5} + +- `Kafka` هنوز شکسته. + +### انتشار کلیک خانه 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### رفع اشکال {#bug-fix-20} + +- رفع اشکال با نوشتن شاخص ثانویه نشانه با دانه دانه تطبیقی. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([الساپین](https://github.com/alesapin)) +- ثابت `WITH ROLLUP` و `WITH CUBE` اصلاح کننده های `GROUP BY` با تجمع دو سطح. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([انتون پوپوف](https://github.com/CurtizJ)) +- ثابت قطع در `JSONExtractRaw` تابع. ثابت [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع segfault در externalloader::reloadoutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([ویتالی بارانو](https://github.com/vitlibar)) +- ثابت مورد زمانی که سرور ممکن است گوش دادن سوکت اما خاموش نمی بستن و ادامه خدمت نمایش داده شد باقی مانده است. شما ممکن است در نهایت با دو در حال اجرا فرایندهای فاحشه خانه و سرور. گاهی, سرور ممکن است یک خطا بازگشت `bad_function_call` برای نمایش داده شد باقی مانده است. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شرایط بی فایده و نادرست ثابت در زمینه به روز رسانی برای بارگذاری اولیه از لغت نامه های خارجی از طریق ال بی سی, خروجی زیر, کلیک و قام. این رفع [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- استثنا بی ربط ثابت در بازیگران `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع نتیجه غیر قطعی از “uniq” تابع مجموع در موارد شدید نادر. اشکال در حال حاضر در تمام نسخه های تاتر بود. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- Segfault هنگامی که ما مجموعه ای کمی بیش از حد بالا CIDR در تابع `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- ثابت نشت حافظه کوچک زمانی که سرور پرتاب استثنا بسیاری از بسیاری از زمینه های مختلف. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع وضعیت زمانی که مصرف کننده قبل از اشتراک متوقف شد و پس از سر گرفت. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([ایوان](https://github.com/abyss7)) توجه داشته باشید که کافکا در این نسخه شکسته. +- پاک کردن بافر داده کافکا از عملیات خواندن قبلی که با یک خطا تکمیل شد [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([نیکولای](https://github.com/bopohaa)) توجه داشته باشید که کافکا در این نسخه شکسته. +- از `StorageMergeTree::background_task_handle` مقدار دهی اولیه در `startup()` این `MergeTreeBlockOutputStream::write()` ممکن است سعی کنید قبل از مقدار دهی اولیه استفاده کنید. فقط بررسی کنید اگر مقداردهی اولیه شده است. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([ایوان](https://github.com/abyss7)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-6} + +- اضافه شده رسمی `rpm` بسته. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([پرولر](https://github.com/proller)) ([الساپین](https://github.com/alesapin)) +- اضافه کردن توانایی ساخت `.rpm` و `.tgz` بسته با `packager` خط نوشتن. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([الساپین](https://github.com/alesapin)) +- رفع برای “Arcadia” ساخت سیستم. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([پرولر](https://github.com/proller)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-6} + +- `Kafka` در این نسخه شکسته. + +### انتشار کلیک خانه 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### ویژگی جدید {#new-feature-6} + +- اضافه شدن پشتیبانی از اظهارات تهیه شده است. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([الکساندر](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `DoubleDelta` و `Gorilla` کدکهای ستون [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([واسیلی نمکو](https://github.com/Enmk)) +- اضافه شده `os_thread_priority` تنظیم که اجازه می دهد تا برای کنترل “nice” ارزش موضوعات پردازش پرس و جو است که توسط سیستم عامل مورد استفاده برای تنظیم اولویت برنامه ریزی پویا. این نیاز دارد `CAP_SYS_NICE` قابلیت به کار. این پیاده سازی [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیاده سازی `_topic`, `_offset`, `_key` ستون برای موتور کافکا [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([ایوان](https://github.com/abyss7)) توجه داشته باشید که کافکا در این نسخه شکسته. +- اضافه کردن ترکیب تابع جمع `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([هکز](https://github.com/hczhcz)) +- توابع مجموع `groupArrayMovingSum(win_size)(x)` و `groupArrayMovingAvg(win_size)(x)` که محاسبه حرکت مجموع / میانگین با یا بدون محدودیت اندازه پنجره. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([اختراع2004](https://github.com/inv2004)) +- افزودن سینونیم `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([هکز](https://github.com/hczhcz)) +- Intergate H3 تابع `geoToH3` از بارگذاری. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen ایوان](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### رفع اشکال {#bug-fix-21} + +- پیاده سازی کش دی ان اس با به روز رسانی ناهمزمان. موضوع جداگانه برطرف تمام میزبان و به روز رسانی دی ان اس کش با دوره (تنظیم `dns_cache_update_period`). این باید زمانی کمک کند که میزبان اغلب تغییر کند. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع segfault در `Delta` کدک که ستون با مقادیر کمتر از 32 بیت اندازه تاثیر می گذارد. اشکال منجر به فساد حافظه تصادفی. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([الساپین](https://github.com/alesapin)) +- رفع اشکال در ادغام ستون های غیر فیزیکی در بلوک. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال نادر در چک کردن بخشی با `LowCardinality` ستون. قبلا `checkDataPart` همیشه برای بخشی با شکست مواجه `LowCardinality` ستون. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([الساپین](https://github.com/alesapin)) +- اجتناب از اتصالات معلق زمانی که سرور استخر موضوع کامل است. این برای اتصال از مهم است `remote` تابع جدول و یا اتصال به یک سفال بدون کپی زمانی که فاصله اتصال طولانی وجود دارد. این رفع [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پشتیبانی از استدلال های ثابت به `evalMLModel` تابع. این رفع [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت موضوع زمانی که تاتر تعیین منطقه زمانی به طور پیش فرض به عنوان `UCT` به جای `UTC`. این رفع [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- زیر جریان بافر ثابت در `visitParamExtractRaw`. این رفع [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر توزیع شده است `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` نمایش داده شد خواهد شد به طور مستقیم در ماکت رهبر اجرا شده است. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([الساپین](https://github.com/alesapin)) +- ثابت `coalesce` برای `ColumnConst` با `ColumnNullable` + تغییرات مرتبط . [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([زویکوف](https://github.com/4ertus2)) +- رفع `ReadBufferFromKafkaConsumer` به طوری که آن را نگه می دارد و خواندن پیام های جدید پس از `commit()` حتی اگر قبلا متوقف شده بود [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([ایوان](https://github.com/abyss7)) +- ثابت `FULL` و `RIGHT` پیوستن به نتایج در هنگام پیوستن به `Nullable` کلید در جدول سمت راست. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([زویکوف](https://github.com/4ertus2)) +- رفع احتمالی خواب بی نهایت نمایش داده شد اولویت پایین. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع شرایط مسابقه, که باعث می شود که برخی از نمایش داده شد ممکن است در کواریلاگ پس از به نظر می رسد `SYSTEM FLUSH LOGS` پرس و جو. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([انتون پوپوف](https://github.com/CurtizJ)) +- ثابت `heap-use-after-free` اسان هشدار در خوشه بندی ناشی از سازمان دیده بان که سعی کنید به استفاده از شی کپی در حال حاضر حذف شده است. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اشتباه ثابت `StringRef` گر بازگشت توسط برخی از پیاده سازی `IColumn::deserializeAndInsertFromArena`. این اشکال تحت تاثیر قرار تنها واحد تست. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- جلوگیری از منبع و مجموعه ای متوسط پیوستن به ستون از پوشش ستون همان نام. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([زویکوف](https://github.com/4ertus2)) +- رفع درج و پرس و جو را انتخاب کنید به خروجی زیر موتور با خروجی زیر شناسه سبک به نقل از. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([زمستان ژانگ](https://github.com/zhang2014)) +- حالا `CHECK TABLE` پرس و جو می تواند با خانواده موتور ادغام کار می کنند. این گرداند بررسی وضعیت و پیام اگر هر برای هر بخش (و یا فایل در مورد موتورهای ساده تر). همچنین, رفع اشکال در واکشی از یک بخش شکسته. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([الساپین](https://github.com/alesapin)) +- رفع split\_shared\_libraries در زمان اجرا [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([نام و نام خانوادگی](https://github.com/danlark1)) +- مقدار دهی اولیه منطقه زمانی ثابت زمانی که `/etc/localtime` پیوند نمادی نسبی مانند است `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- clickhouse-کپی: ثابت استفاده رایگان در خاموش کردن [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([پرولر](https://github.com/proller)) +- به روز شده `simdjson`. ثابت موضوع است که برخی از پارسونز نامعتبر با صفر بایت موفقیت تجزیه. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع خاموش کردن systemlogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع زمانی که وضعیت در باطل کردن\_قرقمی بستگی به یک فرهنگ لغت. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([ویتالی بارانو](https://github.com/vitlibar)) + +#### بهبود {#improvement-6} + +- اجازه دادن به نشانیهای غیر قابل حل در پیکربندی خوشه. در دسترس نیستند و سعی می کنند در هر تلاش اتصال حل شوند. این امر به ویژه برای کوبرنتیس مفید. این رفع [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بستن اتصالات تی پی بیکار (با فاصله یک ساعت به طور پیش فرض). این امر به ویژه برای خوشه های بزرگ با جداول توزیع متعدد بر روی هر سرور مهم, چرا که هر سرور احتمالا می تواند نگه داشتن یک استخر اتصال به هر سرور دیگر, و پس از همزمانی پرس و جو اوج, اتصالات متوقف خواهد شد. این رفع [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کیفیت بهتر `topK` تابع. تغییر رفتار فضا مجموعه به حذف عنصر گذشته اگر عنصر جدید یک وزن بزرگتر. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- توابع نشانی وب برای کار با دامنه هم اکنون می توانید برای نشانیهای اینترنتی ناقص بدون طرح کار می کنند [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([الساپین](https://github.com/alesapin)) +- چک سام به اضافه `system.parts_columns` جدول [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- اضافه شده `Enum` نوع داده به عنوان یک سینونیم برای `Enum8` یا `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([دیماروب2000](https://github.com/dimarub2000)) +- بیت کامل انتقال نوع برای `T64` وابسته به کدک. می تواند منجر به فشرده سازی بهتر با `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([زویکوف](https://github.com/4ertus2)) +- شرط در `startsWith` تابع هم اکنون می توانید کلید اصلی استفاده می کند. این رفع [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) و [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([دیماروب2000](https://github.com/dimarub2000)) +- مجاز به استفاده `clickhouse-copier` با توپولوژی خوشه متقابل تکرار با اجازه نام پایگاه داده خالی. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([نوارتولومی](https://github.com/nvartolomei)) +- استفاده `UTC` به عنوان منطقه زمانی پیش فرض بر روی یک سیستم بدون `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` چاپ شد و سرور و یا مشتری حاضر به شروع. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بازگشت پشتیبانی از استدلال نقطه شناور در تابع `quantileTiming` برای سازگاری به عقب. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نشان می دهد که جدول از دست رفته است ستون در پیام های خطا. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([ایوان](https://github.com/abyss7)) +- حذف پرسوجوی اجرا با همان کویری\_ید توسط کاربران مختلف [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([پرولر](https://github.com/proller)) +- کد قوی تر برای ارسال معیارهای گرافیت. این حتی در طول چند طولانی کار خواهد کرد `RENAME TABLE` عمل [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بیشتر پیام های خطا اطلاع رسانی نمایش داده خواهد شد که سه گانه می توانید یک کار برای اجرای برنامه نیست. این رفع [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- معکوس نگرامش به بصری تر [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([نام و نام خانوادگی](https://github.com/danlark1)) +- در حال بارگذاری [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([اکونیک 90](https://github.com/akonyaev90)) +- به روز رسانی مقدار پیش فرض `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([قاسم کونوالوف](https://github.com/izebit)) +- اضافه شدن یک مفهوم از تنظیمات منسوخ باشد. تنظیم منسوخ `allow_experimental_low_cardinality_type` می توان بدون اثر استفاده کرد. [0ف15ج016802ف7سی141494سی12846ب898944](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [الکسی میلویدو](https://github.com/alexey-milovidov) + +#### بهبود عملکرد {#performance-improvement-4} + +- افزایش تعداد جریان از جدول ادغام برای توزیع یکنواخت تر از موضوعات را انتخاب کنید. اضافه شدن تنظیمات `max_streams_multiplier_for_merge_tables`. این رفع [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-7} + +- اضافه کردن یک تست سازگاری رو به عقب برای تعامل مشتری و سرور با نسخه های مختلف از تاتر. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([الساپین](https://github.com/alesapin)) +- تست اطلاعات پوشش در هر مرتکب و جلو و درخواست. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([الساپین](https://github.com/alesapin)) +- همکاری با نشانی ضد عفونی کننده برای حمایت از تخصیص سفارشی ما (`Arena` و `ArenaWithFreeLists`) برای اشکال زدایی بهتر از “use-after-free” خطاها. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([اکوزم](https://github.com/akuzm)) +- سودهی به [اجرای رایگان](https://github.com/llvm-mirror/libunwind) برای ج++ دست زدن به استثنا و برای ردیابی پشته چاپ [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([نیکیتا لکوف](https://github.com/laplab)) +- اضافه کردن دو هشدار بیشتر از-ابزار [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه می دهد برای ساخت تاتر با ضد عفونی کننده حافظه. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت ubsan گزارش مورد `bitTest` تابع در تست ریش شدن. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کارگر بارانداز: اضافه شدن امکان به داخل یک نمونه کلیک که نیاز به احراز هویت. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([هشدار داده می شود](https://github.com/shurshun)) +- به روز رسانی کتابدار به نسخه 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([ایوان](https://github.com/abyss7)) +- اضافه کردن ایست جهانی برای تست ادغام و غیر فعال کردن برخی از در کد تست. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([الساپین](https://github.com/alesapin)) +- رفع برخی از شکست های سه گانه. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([اکوزم](https://github.com/akuzm)) +- این `--no-undefined` گزینه نیروهای لینکر برای بررسی تمام نام های خارجی برای وجود در حالی که ارتباط. این بسیار مفید برای ردیابی وابستگی واقعی بین کتابخانه ها در حالت ساخت تقسیم شده است. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([ایوان](https://github.com/abyss7)) +- تست عملکرد اضافه شده برای [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- سازگاری ثابت با شورای همکاری خلیج فارس-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن پشتیبانی برای شورای همکاری خلیج فارس-9. این رفع [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطا ثابت زمانی که لیبناد را می توان به اشتباه مرتبط کرد. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت چند هشدار پیدا شده توسط پوس استودیو. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن پشتیبانی اولیه برای `clang-tidy` تجزیه و تحلیل استاتیک. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تبدیل ماکرو اس دی/لینوکس اندی( ‘be64toh’ و ‘htobe64’) به معادل سیستم عامل مک ایکس [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([فو چن](https://github.com/fredchenbj)) +- بهبود ادغام تست راهنمای. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع ساخت در مکینتاش [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([فیلیمونف](https://github.com/filimonov)) +- رفع سخت به نقطه تایپی: سنگدانه -\> مصالح. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([اکوزم](https://github.com/akuzm)) +- رفع ساخت بورس [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([پرولر](https://github.com/proller)) +- اضافه کردن لینک به کانال یوتیوب تجربی به وب سایت [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([ایوان بلینکو](https://github.com/blinkov)) +- چوب کوره: اضافه کردن گزینه برای پرچم پوشش: با برقی [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([پرولر](https://github.com/proller)) +- رفع اندازه اولیه برخی از درون خطی پودرای است. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([اکوزم](https://github.com/akuzm)) +- فاحشه خانه-سرور.پستینست: رفع تشخیص سیستم عامل برای لینوکس 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([پرولر](https://github.com/proller)) +- نسل بسته بندی قوس لینوکس اضافه شده است. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- تقسیم مشترک / پیکربندی.هشدار داده می شود) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([پرولر](https://github.com/proller)) +- رفع برای “Arcadia” ساخت پلت فرم [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([پرولر](https://github.com/proller)) +- رفع برای غیر متعارف ساخت (gcc9 هیچ submodules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([پرولر](https://github.com/proller)) +- نیاز به نوع صریح و روشن در فروشگاه بدون خط زیرا ثابت شده بود که مستعد ابتلا به اشکال [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([اکوزم](https://github.com/akuzm)) +- رفع مکینتاش ساخت [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([فیلیمونف](https://github.com/filimonov)) +- تست عملکرد مربوط به ویژگی دستگاه گوارش جدید با مجموعه داده های بزرگتر, همانطور که در اینجا درخواست [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- تست های نفرت انگیز را در تست استرس اجرا کنید [12693568722ف11 است19859742ف56428455501فرد2ا](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([الساپین](https://github.com/alesapin)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-7} + +- `Kafka` در این نسخه شکسته. +- فعالسازی `adaptive_index_granularity` = 10 مگابایت به طور پیش فرض برای جدید `MergeTree` میز اگر شما ایجاد شده جدید MergeTree جداول در نسخه 19.11+, دانگرید به نسخه های قبل 19.6 غیر ممکن خواهد بود. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([الساپین](https://github.com/alesapin)) +- حذف منسوخ لغت نامه تعبیه شده مستند نشده است که توسط یاندکس مورد استفاده قرار گرفت.متریکا توابع `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` دیگر در دسترس نیست. اگر شما با استفاده از این توابع, ارسال ایمیل به clickhouse-feedback@yandex-team.com. توجه: در حال حاضر ما تصمیم به نگه داشتن این توابع در حالی که برای. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +## انتشار کلیک 19.10 {#clickhouse-release-19-10} + +### انتشار کلیک خانه 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### ویژگی جدید {#new-feature-7} + +- اضافه کردن کدک ستون جدید: `T64`. ساخته شده به صورت (U)IntX/EnumX/Data(زمان)/DecimalX ستون. این باید برای ستون ها با مقادیر محدوده ثابت یا کوچک مناسب باشد. کدک خود را اجازه می دهد تا بزرگ و یا کوچک نوع داده بدون فشرده سازی مجدد. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([زویکوف](https://github.com/4ertus2)) +- افزودن موتور دادگان `MySQL` که اجازه می دهد برای مشاهده تمام جداول در سرور خروجی از راه دور [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([زمستان ژانگ](https://github.com/zhang2014)) +- `bitmapContains` اجرا کردن. این 2 برابر سریعتر از `bitmapHasAny` اگر بیت مپ دوم شامل یک عنصر. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([ژیچنگ یو](https://github.com/yuzhichang)) +- پشتیبانی از `crc32` تابع (با رفتار دقیقا همانطور که در خروجی زیر و یا پی اچ پی). اگر شما نیاز به یک تابع هش استفاده نکنید. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen ایوان](https://github.com/BHYCHIK)) +- پیادهسازی شده `SYSTEM START/STOP DISTRIBUTED SENDS` نمایش داده شد برای کنترل درج ناهمزمان به `Distributed` میز [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([زمستان ژانگ](https://github.com/zhang2014)) + +#### رفع اشکال {#bug-fix-22} + +- نادیده گرفتن محدودیت اجرای پرس و جو و حداکثر اندازه قطعات برای محدودیت ادغام در حالی که اجرای جهش. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال که ممکن است به تقسیم بلوک های طبیعی (بسیار نادر) و درج بلوک های تکراری (در اغلب موارد) منجر شود. [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([الساپین](https://github.com/alesapin)) +- رفع عملکرد `arrayEnumerateUniqRanked` برای نشانوندها با عرضهای خالی [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([پرولر](https://github.com/proller)) +- هنوز به موضوعات کافکا بدون قصد نظرسنجی هر پیام مشترک نیست. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([ایوان](https://github.com/abyss7)) +- ایجاد تنظیمات `join_use_nulls` هیچ تاثیری برای انواع است که نمی تواند در داخل قابل ابطال است [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- ثابت `Incorrect size of index granularity` خطاها [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([کوراستر](https://github.com/coraxster)) +- ثابت شناور به دهدهی تبدیل سرریز [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([کوراستر](https://github.com/coraxster)) +- بافر خیط و پیت کردن زمانی که `WriteBufferFromHDFS`مخرب نامیده می شود. این رفع نوشتن به `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong پنگ](https://github.com/eejoin)) + +#### بهبود {#improvement-7} + +- درمان سلول های خالی در `CSV` به عنوان مقادیر پیش فرض هنگام تنظیم `input_format_defaults_for_omitted_fields` فعال است. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([اکوزم](https://github.com/akuzm)) +- عدم مسدود کردن بارگذاری لغت نامه های خارجی. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([ویتالی بارانو](https://github.com/vitlibar)) +- وقفه شبکه را می توان به صورت پویا برای اتصالات در حال حاضر تاسیس با توجه به تنظیمات تغییر کرده است. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([کنستانتین پودشوموک](https://github.com/podshumok)) +- با استفاده از “public\_suffix\_list” برای توابع `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. این با استفاده از یک جدول هش کامل تولید شده توسط `gperf` با یک لیست تولید شده از فایل: https://publicsuffix.org/list/public\_suffix\_list.dat. (مثلا, در حال حاضر ما دامنه را تشخیص `ac.uk` به عنوان غیر قابل توجهی). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- به تصویب رسید `IPv6` نوع داده در جداول سیستم. `system.processes` و `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- با استفاده از جلسات برای ارتباط با پروتکل سازگاری خروجی زیر. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([یوری بارانوف](https://github.com/yurriy)) +- پشتیبانی بیشتر `ALTER` نمایش داده شد `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- پشتیبانی `` بخش در `clickhouse-local` فایل پیکربندی. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([پرولر](https://github.com/proller)) +- اجازه اجرای پرس و جو با `remote` تابع جدول در `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([پرولر](https://github.com/proller)) + +#### بهبود عملکرد {#performance-improvement-5} + +- اضافه کردن امکان ارسال علامت نهایی در پایان ستون ادغام. این اجازه می دهد برای جلوگیری از بی فایده می خواند برای کلید های که خارج از محدوده داده های جدول می باشد. این فعال است تنها اگر دانه دانه دانه تطبیقی در حال استفاده است. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([الساپین](https://github.com/alesapin)) +- بهبود عملکرد جداول ادغام در فایل سیستم بسیار کند با کاهش تعداد `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تخریب عملکرد ثابت در خواندن از جداول ادغام که در نسخه معرفی شد 19.6. رفع \# 5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-8} + +- پیادهسازی شده `TestKeeper` به عنوان یک پیاده سازی از رابط باغ وحش مورد استفاده برای تست [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([الکسی میلویدو](https://github.com/alexey-milovidov)) ([لوشکین الکسیر](https://github.com/alexey-milovidov)) +- از حالا به بعد `.sql` تست ها را می توان به صورت موازی با پایگاه داده تصادفی جدا شده توسط سرور اجرا کرد. این اجازه می دهد تا سریعتر اجرا شود و تست های جدید را با تنظیمات سرور سفارشی اضافه کنید و اطمینان حاصل کنید که تست های مختلف بر یکدیگر تاثیر نمی گذارد. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([ایوان](https://github.com/abyss7)) +- حذف `` و `` از تست های عملکرد [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- ثابت “select\_format” تست عملکرد برای `Pretty` فرشها [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +## انتشار کلیک 19.9 {#clickhouse-release-19-9} + +### انتشار تاتر 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### رفع اشکال {#bug-fix-23} + +- اصلاح سگو در کدک دلتا که ستون ها را با مقادیر کمتر از اندازه 32 بیت تحت تاثیر قرار می دهد. اشکال منجر به فساد حافظه تصادفی. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([الساپین](https://github.com/alesapin)) +- رفع اشکال نادر در چک کردن بخشی با ستون کم هزینه. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([الساپین](https://github.com/alesapin)) +- رفع اشکال در ادغام ستون های غیر فیزیکی در بلوک. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع خواب بی نهایت بالقوه نمایش داده شد با اولویت پایین. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت کنید که چگونه کلیک هاوس منطقه زمانی پیش فرض را به عنوان کمپانی یوسیت تعیین می کند. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال در مورد اجرای توزیع قطره/تغییر/کوتاه / بهینه سازی در نمایش داده شد خوشه در ماکت پیرو قبل از ماکت رهبر. در حال حاضر به طور مستقیم بر روی ماکت رهبر اجرا خواهد شد. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([الساپین](https://github.com/alesapin)) +- رفع شرایط مسابقه, که باعث می شود که برخی از نمایش داده شد ممکن است در انبار کردن بلافاصله پس از سیستم پرس و جو سیاهههای مربوط خیط و پیت کردن ظاهر نمی شود. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([انتون پوپوف](https://github.com/CurtizJ)) +- اضافه شدن پشتیبانی از دست رفته برای استدلال ثابت به `evalMLModel` تابع. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### ویژگی جدید {#new-feature-8} + +- اطلاعات چاپ در مورد قطعات یخ زده در `system.parts` جدول [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([پرولر](https://github.com/proller)) +- درخواست رمز عبور مشتری در کلیک-مشتری شروع در تیتی اگر در استدلال تنظیم نشده است [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([پرولر](https://github.com/proller)) +- پیاده سازی `dictGet` و `dictGetOrDefault` توابع برای انواع اعشاری. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([زویکوف](https://github.com/4ertus2)) + +#### بهبود {#improvement-8} + +- دبیان اینیت: اضافه کردن سرویس ایست ایست [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([پرولر](https://github.com/proller)) +- اضافه کردن تنظیم ممنوع به طور پیش فرض برای ایجاد جدول با انواع مشکوک برای کم کاری [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- توابع رگرسیون وزن مدل بازگشت زمانی که به عنوان دولت در تابع استفاده نمی شود `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([کد37](https://github.com/Quid37)) +- تغییر نام و بهبود روش رگرسیون. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([کد37](https://github.com/Quid37)) +- رابط های واضح تر از جستجوگران رشته. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([نام و نام خانوادگی](https://github.com/danlark1)) + +#### رفع اشکال {#bug-fix-24} + +- رفع از دست دادن داده های بالقوه در کافکا [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([ایوان](https://github.com/abyss7)) +- رفع حلقه بی نهایت بالقوه در `PrettySpace` فرمت زمانی که با صفر ستون نامیده می شود [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- اشکال سرریز ثابت در مدل های خطی. اجازه مدل میلی لیتر اومال برای استدلال مدل غیر توایع. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` اگر شاخص موجود وجود ندارد باید یک استثنا را افزایش نمی دهد [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([گلب novikov](https://github.com/NanoBjorn)) +- رفع segfault با `bitmapHasAny` در زیر مقیاس [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([ژیچنگ یو](https://github.com/yuzhichang)) +- خطا ثابت زمانی که استخر اتصال تکرار کند سعی مجدد نیست برای حل و فصل میزبان, حتی زمانی که کش دی ان اس کاهش یافته بود. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([الساپین](https://github.com/alesapin)) +- ثابت `ALTER ... MODIFY TTL` در تکرار غذای اصلی. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع درج در جدول توزیع شده با ستون محقق شده [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([ازات خوژین](https://github.com/azat)) +- رفع تخصیص بد زمانی که کوتاه پیوستن به ذخیره سازی [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([تسیسون](https://github.com/TCeason)) +- در نسخه های اخیر از بسته tzdata برخی از فایل ها symlinks در حال حاضر. مکانیسم فعلی برای تشخیص منطقه زمانی پیش فرض شکسته می شود و نام اشتباه برای برخی از جغرافیایی می دهد. در حال حاضر حداقل ما نام منطقه زمانی را مجبور به محتویات انجمن تاز در صورت فراهم. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([ایوان](https://github.com/abyss7)) +- رفع برخی از موارد بسیار نادر با جستجوگر چندگانه هنگامی که سوزن ثابت در مجموع حداقل 16 کیلوبایت طولانی است. الگوریتم از دست رفته و یا بیش از حد نتایج قبلی که می تواند به نتیجه نادرست منجر شود `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([نام و نام خانوادگی](https://github.com/danlark1)) +- رفع مشکل زمانی که تنظیمات برای externaldata درخواست نمی تواند با استفاده از clickhouse تنظیمات. همچنین در حال حاضر تنظیمات `date_time_input_format` و `low_cardinality_allow_in_native_format` می تواند به دلیل ابهام از نام استفاده نمی شود (در داده های خارجی را می توان به عنوان فرمت جدول تفسیر و در پرس و جو می تواند یک محیط). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([نام و نام خانوادگی](https://github.com/danlark1)) +- رفع اشکال زمانی که قطعات تنها از فدراسیون فوتبال بدون رها کردن از باغ وحش حذف شد. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([الساپین](https://github.com/alesapin)) +- حذف ورود اشکال زدایی از پروتکل خروجی زیر [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رد کردن زنود در طول پردازش پرس و جو دی ال [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([ازات خوژین](https://github.com/azat)) +- ثابت مخلوط `UNION ALL` نوع ستون نتیجه. موارد با داده های متناقض و انواع ستون ستون نتیجه وجود دارد. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([زویکوف](https://github.com/4ertus2)) +- پرتاب یک استثنا در اعداد صحیح اشتباه در `dictGetT` توابع به جای تصادف. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([زویکوف](https://github.com/4ertus2)) +- رفع اشتباه element\_count و load\_factor برای درهم لغت در `system.dictionaries` جدول [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([ازات خوژین](https://github.com/azat)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-9} + +- ساخت ثابت بدون `Brotli` پردازشگر پشتیبانی شده: (`ENABLE_BROTLI=OFF` هشدار داده می شود [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([انت یوژنینو](https://github.com/citrin)) +- شامل خروش.ساعت به عنوان خروش / خروشان.ه [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([اوروج دش](https://github.com/orivej)) +- رفع gcc9 هشدار در hyperscan (\#خط بخشنامه بد است!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([نام و نام خانوادگی](https://github.com/danlark1)) +- رفع تمام هشدارهای هنگام کامپایل با شورای همکاری خلیج فارس-9. رفع برخی از مشکلات احتمالی. یخ گرم9 رو درست کن و به باگزیلا بفرست [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([نام و نام خانوادگی](https://github.com/danlark1)) +- ثابت ارتباط با [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف تخصص های استفاده نشده در لغت نامه [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([زویکوف](https://github.com/4ertus2)) +- تست های عملکرد بهبود برای قالب بندی و تجزیه جداول برای انواع مختلف فایل ها [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- رفع تست موازی اجرا [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([پرولر](https://github.com/proller)) +- کارگر بارانداز: تنظیمات استفاده از کلیک-تست [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([پرولر](https://github.com/proller)) +- رفع کامپایل برای بورس [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([پرولر](https://github.com/proller)) +- ارتقا افزایش به 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([پرولر](https://github.com/proller)) +- رفع ساخت clickhouse به عنوان submodule [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([پرولر](https://github.com/proller)) +- بهبود تست های عملکرد فوق العاده [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([ویتالی بارانو](https://github.com/vitlibar)) + +## انتشار کلیک 19.8 {#clickhouse-release-19-8} + +### انتشار کلیک خانه 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### ویژگی های جدید {#new-features} + +- اضافه شدن توابع برای کار با جانسون [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([هکز](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([ویتالی بارانو](https://github.com/vitlibar)) +- اضافه کردن یک تابع basename با مشابه رفتار یک تابع basename که وجود دارد در بسیاری از زبان ها (`os.path.basename` در پایتون, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- اضافه شده `LIMIT n, m BY` یا `LIMIT m OFFSET n BY` نحو به مجموعه جبران نفر برای حد بند. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([انتون پوپوف](https://github.com/CurtizJ)) +- نوع داده جدید اضافه شده است `SimpleAggregateFunction` که اجازه می دهد ستون ها را با تجمع نور در یک `AggregatingMergeTree`. این تنها می تواند با توابع ساده مانند استفاده می شود `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([بوریس گرانویو](https://github.com/bgranvea)) +- اضافه شدن پشتیبانی برای استدلال غیر ثابت در تابع `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه شدن توابع `skewPop`, `skewSamp`, `kurtPop` و `kurtSamp` برای محاسبه به صورت دنباله skewness نمونه skewness, kurtosis و نمونه kurtosis بود. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([هکز](https://github.com/hczhcz)) +- پشتیبانی تغییر نام عملیات برای `MaterializeView` انبار. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- اضافه شده سرور که اجازه می دهد تا اتصال به تاتر با استفاده از مشتری خروجی زیر. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([یوری بارانوف](https://github.com/yurriy)) +- افزودن `toDecimal*OrZero` و `toDecimal*OrNull` توابع. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([زویکوف](https://github.com/4ertus2)) +- پشتیبانی از انواع دهدهی در توابع: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه شده `format` تابع. قالب بندی الگوی ثابت (الگوی فرمت پایتون ساده) با رشته های ذکر شده در استدلال. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه شده `system.detached_parts` جدول حاوی اطلاعات در مورد قطعات جدا شده از `MergeTree` میز [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([اکوزم](https://github.com/akuzm)) +- اضافه شده `ngramSearch` تابع برای محاسبه تفاوت غیر متقارن بین سوزن و انبار کاه. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اجرای روشهای یادگیری ماشین پایه (رگرسیون خطی تصادفی و رگرسیون لجستیک) با استفاده از رابط توابع کل. دارای استراتژی های مختلف برای به روز رسانی وزن مدل (نزول گرادیان ساده, روش شتاب, روش نستروف). همچنین پشتیبانی از مینی دسته از اندازه های سفارشی. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([کد37](https://github.com/Quid37)) +- اجرای `geohashEncode` و `geohashDecode` توابع. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([واسیلی نمکو](https://github.com/Enmk)) +- اضافه شدن تابع جمع `timeSeriesGroupSum`, که می تواند سری های زمانی مختلف جمع می شوند که برچسب زمان نمونه هم ترازی نیست. این برون یابی خطی بین دو برچسب زمان نمونه و سپس مجموع زمان سری با هم استفاده کنید. اضافه شدن تابع جمع `timeSeriesGroupRateSum` که محاسبه نرخ زمان سری و سپس مجموع نرخ با هم. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([یانگکوان لیو](https://github.com/LiuYangkuan)) +- اضافه شدن توابع `IPv4CIDRtoIPv4Range` و `IPv6CIDRtoIPv6Range` برای محاسبه محدوده های پایین تر و بالاتر برای یک ای پی در زیر شبکه با استفاده از یک سیدر. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- اضافه کردن یک ایکس کلیک-هدر خلاصه زمانی که ما یک پرس و جو با استفاده از قام با تنظیم را فعال کنید ارسال `send_progress_in_http_headers`. بازگشت اطلاعات معمول از ایکس کلیک-پیشرفت, با اطلاعات اضافی مانند چند سطر و بایت در پرس و جو قرار داده شد. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) + +#### بهبود {#improvements} + +- اضافه شده `max_parts_in_total` تنظیم برای خانواده ادغام جداول (به طور پیش فرض: 100 000) که مانع از مشخصات نا امن از کلید پارتیشن \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`: استخراج دانه برای ستون های فردی با ترکیب دانه اولیه با نام ستون, موقعیت ستون نیست. این در نظر گرفته شده برای تبدیل مجموعه داده با جداول مرتبط متعدد, به طوری که جداول پس از تحول قابل اجرا باقی خواهد ماند. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن توابع `JSONExtractRaw`, `JSONExtractKeyAndValues`. توابع تغییر نام داد `jsonExtract` به `JSONExtract`. هنگامی که چیزی را اشتباه می رود این توابع بازگشت ارزش خبرنگار, نه `NULL`. تابع اصلاح شده `JSONExtract` در حال حاضر نوع بازگشتی از پارامتر گذشته خود را دریافت می کند و نابل ها را تزریق نمی کند. اجرا مجدد به RapidJSON در مورد AVX2 دستورالعمل در دسترس نیست. کتابخانه سیمدجسون به روز شده به یک نسخه جدید. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([ویتالی بارانو](https://github.com/vitlibar)) +- حالا `if` و `multiIf` توابع به وضعیت تکیه نمی کنند `Nullable`, اما در شاخه برای سازگاری گذاشتن تکیه. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([جیان وو](https://github.com/janplus)) +- `In` گزاره در حال حاضر تولید می کند `Null` نتیجه از `Null` ورودی مانند `Equal` تابع. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([جیان وو](https://github.com/janplus)) +- بررسی محدودیت زمانی هر (flush\_interval / poll\_timeout) تعداد سطر از کافکا. این اجازه می دهد تا خواندن را از مصرف کننده کافکا بیشتر کند و محدودیت های زمانی جریان های سطح بالا را بررسی کند [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([ایوان](https://github.com/abyss7)) +- لینک rdkafka با همراه sasl. باید اجازه می دهد به استفاده از sasl گریختن احراز هویت [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([ایوان](https://github.com/abyss7)) +- Batched نسخه RowRefList برای همه می پیوندد. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([زویکوف](https://github.com/4ertus2)) +- کلیک سرور: اطلاعات بیشتر گوش پیام های خطا. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([پرولر](https://github.com/proller)) +- واژهنامهها پشتیبانی در تاتر-کپی برای توابع در `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([پرولر](https://github.com/proller)) +- افزودن تنظیمات جدید `kafka_commit_every_batch` برای تنظیم کافکا ارتکاب سیاست. + این اجازه می دهد تا به مجموعه ای متعهد حالت: پس از هر دسته ای از پیام های به کار گرفته شده است, و یا پس از کل بلوک به ذخیره سازی نوشته شده. این یک تجارت بین از دست دادن برخی از پیام ها و یا خواندن دو بار در برخی شرایط شدید است. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([ایوان](https://github.com/abyss7)) +- ساخت `windowFunnel` پشتیبانی از دیگر انواع عدد صحیح بدون علامت. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- اجازه دادن به سایه ستون مجازی `_table` در ادغام موتور. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([ایوان](https://github.com/abyss7)) +- ساخت `sequenceMatch` توابع مجموع پشتیبانی دیگر انواع عدد صحیح بدون علامت [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- پیام های خطا بهتر اگر عدم تطابق کنترلی است به احتمال زیاد توسط شکست سخت افزار ایجاد می شود. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بررسی کنید که جداول پایه نمونه برداری را پشتیبانی می کنند `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([ایوان](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- بهبود پروتکل سیم خروجی زیر. تغییر نام قالب به میسورقلوایر. با استفاده از raii برای تماس rsa\_free. غیر فعال کردن اس اس ال اگر زمینه نمی تواند ایجاد شود. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([یوری baranov](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([پرولر](https://github.com/proller)) +- تنظیمات پرس و جو احترام در درج ناهمزمان به جداول توزیع. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([تسیسون](https://github.com/TCeason)) +- توابع تغییر نام داد `leastSqr` به `simpleLinearRegression`, `LinearRegression` به `linearRegression`, `LogisticRegression` به `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### بهبود عملکرد {#performance-improvements} + +- Parallelize پردازش قطعات غیر تکراری MergeTree جداول در تغییر و اصلاح پرس و جو. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([ایوان کوش](https://github.com/IvanKush)) +- بهینه سازی در استخراج عبارات منظم. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه کردن راست پیوستن به ستون های کلیدی برای پیوستن به نتیجه اگر فقط در عضویت در بخش استفاده می شود. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([زویکوف](https://github.com/4ertus2)) +- یخ بافر کافکا پس از اولین پاسخ خالی. از چندین اختراع اجتناب می کند `ReadBuffer::next()` برای نتیجه خالی در برخی از جریان ردیف تجزیه. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([ایوان](https://github.com/abyss7)) +- `concat` بهینه سازی عملکرد برای استدلال های متعدد. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([نام و نام خانوادگی](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([زویکوف](https://github.com/4ertus2)) +- پیاده سازی لنز 4 ما را با یک مرجع ارتقا دهید تا فشرده سازی سریع تر داشته باشید. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([نام و نام خانوادگی](https://github.com/danlark1)) +- پردازشگر پشتیبانی شده: [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([اوگنی پراودا](https://github.com/kvinty)) + +#### رفع اشکال {#bug-fixes} + +- رفع فشار نیاز به ستون با پیوستن [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([زمستان ژانگ](https://github.com/zhang2014)) +- اشکال ثابت, زمانی که تاتر توسط سیستم اجرا, فرمان `sudo service clickhouse-server forcerestart` کار نمی کند به عنوان انتظار می رود. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([پرولر](https://github.com/proller)) +- رفع کدهای خطای اچ. تی. اچ. تی. پی در کد 9009 پورت همیشه کد 200 را حتی در خطاها برگرداند. [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([پرولر](https://github.com/proller)) +- رفع simpleaggregatefunction برای رشته بیش از max\_small\_string\_size [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([ازات خوژین](https://github.com/azat)) +- رفع خطا برای `Decimal` به `Nullable(Decimal)` تبدیل در. پشتیبانی از دیگر دهدهی به دهدهی تبدیل (از جمله مقیاس های مختلف). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([زویکوف](https://github.com/4ertus2)) +- مسدود کردن فلش ثابت در کتابخانه سیمدجسون که منجر به محاسبه اشتباه از `uniqHLL` و `uniqCombined` تابع کلی و توابع ریاضی مانند `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- دست زدن به ثابت موارد مخلوط ثابت/غیرنقدی در توابع جانسون. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([ویتالی بارانو](https://github.com/vitlibar)) +- ثابت `retention` تابع. در حال حاضر تمام شرایطی که در یک ردیف از داده ها راضی به دولت داده ها اضافه شده است. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- رفع نوع نتیجه برای `quantileExact` با اعشار. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([زویکوف](https://github.com/4ertus2)) + +#### مستندات {#documentation} + +- ترجمه مستندات برای `CollapsingMergeTree` به سلامتی چینیها [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- ترجمه برخی از اسناد و مدارک در مورد موتورهای جدول به چینی. + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([هرگز لی](https://github.com/neverlee)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements} + +- رفع برخی از گزارش ضد عفونی که نشان می دهد احتمالی استفاده پس از رایگان.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([ایوان](https://github.com/abyss7)) +- حرکت تست عملکرد از دایرکتوری جداگانه برای راحتی. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع تست عملکرد نادرست. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([الساپین](https://github.com/alesapin)) +- اضافه شدن یک ابزار برای محاسبه چک سام ناشی از کمی پایین بپرد به مشکلات اشکال زدایی سخت افزار. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اسکریپت دونده قابل استفاده تر است. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([فیلیمونف](https://github.com/filimonov)) +- اضافه کردن دستور العمل کوچک چگونه برای نوشتن تست عملکرد. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([الساپین](https://github.com/alesapin)) +- اضافه کردن قابلیت تعویض در ایجاد, پر کردن و رها کردن پرس و جو در تست عملکرد [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([اولگا خوستیکوا](https://github.com/stavrolia)) + +## انتشار کلیک 19.7 {#clickhouse-release-19-7} + +### انتشار کلیک خانه 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### رفع اشکال {#bug-fix-25} + +- رفع رگرسیون عملکرد در برخی از نمایش داده شد با پیوستن. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([زمستان ژانگ](https://github.com/zhang2014)) + +### انتشار کلیک خانه 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### ویژگی های جدید {#new-features-1} + +- اضافه شده توابع مربوط بیت مپ `bitmapHasAny` و `bitmapHasAll` مشابه به `hasAny` و `hasAll` توابع برای ارریس. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([سرگی ولدیکین](https://github.com/svladykin)) + +#### رفع اشکال {#bug-fixes-1} + +- رفع segfault در `minmax` شاخص با ارزش صفر. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- علامت گذاری به عنوان تمام ستون های ورودی در حد خروجی به عنوان مورد نیاز است. این رفع ‘Not found column’ خطا در برخی از نمایش داده شد توزیع. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([پان سنتانتین](https://github.com/kvap)) +- ثابت “Column ‘0’ already exists” خطا در `SELECT .. PREWHERE` در ستون با پیش فرض [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([پرولر](https://github.com/proller)) +- ثابت `ALTER MODIFY TTL` پرسوجو در `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([انتون پوپوف](https://github.com/CurtizJ)) +- هنگامی که مصرف کنندگان کافکا برای شروع شکست خورده اند سرور تصادف نیست. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([ایوان](https://github.com/abyss7)) +- توابع بیت مپ ثابت تولید نتیجه اشتباه است. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([اندی یانگ](https://github.com/andyyzh)) +- حذف عناصر برای دیکشنری درهم (شامل موارد تکراری نیست) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([ازات خوژین](https://github.com/azat)) +- استفاده از محتویات محیط زیست تغییر تنظیمات متغیر به عنوان نام برای منطقه زمانی. این کمک می کند تا به درستی شناسایی منطقه زمانی پیش فرض در برخی موارد.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([ایوان](https://github.com/abyss7)) +- سعی نکنید برای تبدیل اعداد صحیح در `dictGetT` توابع, چرا که به درستی کار نمی کند. پرتاب یک استثنا به جای. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([زویکوف](https://github.com/4ertus2)) +- رفع تنظیمات در درخواست خارجی. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([دنیلا + کوتنین](https://github.com/danlark1)) +- رفع اشکال زمانی که قطعات تنها از فدراسیون فوتبال بدون رها کردن از باغ وحش حذف شد. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([الساپین](https://github.com/alesapin)) +- رفع گسل تقسیم بندی در `bitmapHasAny` تابع. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([ژیچنگ یو](https://github.com/yuzhichang)) +- خطا ثابت زمانی که استخر اتصال تکرار کند سعی مجدد نیست برای حل و فصل میزبان, حتی زمانی که کش دی ان اس کاهش یافته بود. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([الساپین](https://github.com/alesapin)) +- ثابت `DROP INDEX IF EXISTS` پرس و جو. حالا `ALTER TABLE ... DROP INDEX IF EXISTS ...` پرس و جو یک استثنا را افزایش نمی دهد اگر شاخص وجود ندارد. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([گلب نویکوف](https://github.com/NanoBjorn)) +- رفع اتحادیه تمام ستون نوع فوق. موارد با داده های متناقض و انواع ستون ستون نتیجه وجود دارد. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([زویکوف](https://github.com/4ertus2)) +- جست و خیز znonode در طول دستورات پرس و جو پردازش. قبل از اگر گره دیگر حذف زنود در صف کار, یکی که + اما در حال حاضر لیستی از کودکان را دریافت نمی کند موضوع کار کرم را خاتمه می دهد. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([ازات خوژین](https://github.com/azat)) +- ثابت قرار دادن به توزیع() جدول با ستون محقق. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([ازات خوژین](https://github.com/azat)) + +### انتشار کلیک خانه 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### ویژگی های جدید {#new-features-2} + +- اجازه می دهد برای محدود کردن طیف وسیعی از یک محیط است که می تواند توسط کاربر مشخص شده است. + این محدودیت ها را می توان در مشخصات تنظیمات کاربر تنظیم شده است. + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([ویتالی + بارانوف](https://github.com/vitlibar)) +- اضافه کردن نسخه دوم از تابع `groupUniqArray` با اختیاری + `max_size` پارامتر که اندازه مجموعه نتیجه را محدود می کند. این + رفتار شبیه به `groupArray(max_size)(x)` تابع. + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([گیمه + کاسری](https://github.com/YiuRULE)) +- برای tsvwithnames/csvwithnames ورودی فرمت های فایل های ستون سفارش در حال حاضر می تواند + تعیین شده از هدر فایل. این است که با کنترل + `input_format_with_names_use_header` پارامتر. + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([الکساندر](https://github.com/Akazz)) + +#### رفع اشکال {#bug-fixes-2} + +- سقوط با غیر فشرده + عضویت در هنگام ادغام (\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([دنیلا + کوتنین](https://github.com/danlark1)) +- گسل تقسیم بندی در یک پرس و جو کلیک مشتری به جداول سیستم. \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([ایوان](https://github.com/abyss7)) +- از دست دادن داده ها در بار سنگین از طریق کافکاینگین (\#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([ایوان](https://github.com/abyss7)) +- ثابت شرایط مسابقه داده بسیار نادر است که می تواند در هنگام اجرای یک پرس و جو با اتحادیه همه شامل حداقل دو انتخاب از سیستم اتفاق می افتد.ستون, سیستم.جداول, سیستم.قطعات, سیستم.\_شبردارها یا جداول ادغام خانواده و انجام تغییر ستون از جداول مرتبط به صورت همزمان. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود عملکرد {#performance-improvements-1} + +- استفاده از مرتب سازی بر رادیوگرافی برای مرتب سازی بر اساس ستون عددی در `ORDER BY` بدون + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([اوگنی پراودا](https://github.com/kvinty), + [الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### مستندات {#documentation-1} + +- ترجمه اسناد و مدارک برای برخی از موتورهای جدول به چینی. + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([هرگز + لی](https://github.com/neverlee)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-1} + +- نسخه قابل چاپ سخن گفتن-8 کاراکتر به درستی در `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن پارامتر خط فرمان برای فاحشه خانه-مشتری برای همیشه بار پیشنهاد + داده ها. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حل و فصل برخی از هشدارهای پوس استودیو. + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز رسانی lz4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([دنیلا + کوتنین](https://github.com/danlark1)) +- اضافه کردن پردازنده گرافیکی برای ساخت الزامات مورد نیاز برای درخواست پیش رو کشیدن \# 5030. + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([پرولر](https://github.com/proller)) + +## انتشار کلیک 19.6 {#clickhouse-release-19-6} + +### انتشار کلیک خانه 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### رفع اشکال {#bug-fixes-3} + +- ثابت در شرایط فشار برای نمایش داده شد از توابع جدول `mysql` و `odbc` و موتورهای جدول مربوطه. این رفع \# 3540 و \# 2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع بن بست در باغ وحش. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([گیتهاب 1یولک](https://github.com/github1youlc)) +- اجازه اعشار نقل در سی سی. وی. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([زویکوف](https://github.com/4ertus2) +- عدم تبدیل از اینف شناور / نان به اعشار (استثنا پرتاب). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([زویکوف](https://github.com/4ertus2)) +- رفع مسابقه داده ها در تغییر نام پرس و جو. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([زمستان ژانگ](https://github.com/zhang2014)) +- به طور موقت غیر فعال کردن کلیک. استفاده از lfalloc ممکن است منجر به بسیاری از map\_failed در تخصیص uncompressedcache و در نتیجه به سقوط از نمایش داده شد در بالا لود سرور. [لامپ کم مصرف93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([نام و نام خانوادگی](https://github.com/danlark1)) + +### انتشار کلیک خانه 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### ویژگی های جدید {#new-features-3} + +- عبارات برای ستون ها و جداول. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([انتون پوپوف](https://github.com/CurtizJ)) +- اضافه شدن پشتیبانی برای `brotli` فشردهسازی برای پاسخهای قام (پذیرش کدبندی: برزیلی) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([میخیل](https://github.com/fandyushin)) +- اضافه شدن تابع جدید `isValidUTF8` برای بررسی اینکه مجموعه ای از بایت ها به درستی کد گذاری شده باشد-8. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه کردن سیاست متعادل کننده بار جدید `first_or_random` که نمایش داده شد به اولین میزبان مشخص می فرستد و اگر غیر قابل دسترس ارسال نمایش داده شد به میزبان تصادفی از سفال. مفید برای تنظیم توپولوژی متقابل تکرار. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([نوارتولومی](https://github.com/nvartolomei)) + +#### ویژگی های تجربی {#experimental-features-1} + +- افزودن تنظیمات `index_granularity_bytes` (تطبیقی دانه دانه دانه شاخص) برای ادغام \* خانواده جداول. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([الساپین](https://github.com/alesapin)) + +#### بهبود {#improvements-1} + +- اضافه شدن پشتیبانی برای اندازه و طول استدلال غیر ثابت و منفی برای عملکرد `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- غیر فعال کردن فشار به پایین به جدول سمت راست در سمت چپ عضویت, جدول سمت چپ در راست عضویت, و هر دو جدول به طور کامل ملحق. این رفع اشتباه پیوستن به نتایج در برخی موارد. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([ایوان](https://github.com/abyss7)) +- `clickhouse-copier`: بارگذاری خودکار پیکربندی وظیفه از `--task-file` گزینه [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([پرولر](https://github.com/proller)) +- اضافه شده کنترل غلط املایی برای کارخانه ذخیره سازی و توابع جدول کارخانه. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([نام و نام خانوادگی](https://github.com/danlark1)) +- ستاره پشتیبانی و ستاره واجد شرایط برای چند می پیوندد بدون زیر کشت [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([زویکوف](https://github.com/4ertus2)) +- پیام خطای ستون گم شده را کاربر پسند تر کنید. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([زویکوف](https://github.com/4ertus2)) + +#### بهبود عملکرد {#performance-improvements-2} + +- افزایش سرعت قابل توجهی از عضویت [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([مرتیجن باکر](https://github.com/Gladdy)) + +#### تغییرات ناسازگار به عقب {#backward-incompatible-changes} + +- HTTP header `Query-Id` به تغییر نام داد `X-ClickHouse-Query-Id` برای ثبات. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([میخیل](https://github.com/fandyushin)) + +#### رفع اشکال {#bug-fixes-4} + +- اختلاف اشاره گر صفر پتانسیل ثابت در `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([پرولر](https://github.com/proller)) +- خطای ثابت در پرس و جو با پیوستن + مجموعه پیوستن [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([زویکوف](https://github.com/4ertus2)) +- ثابت معلق در شروع سرور زمانی که یک فرهنگ لغت بستگی به فرهنگ لغت دیگری از طریق یک پایگاه داده با موتور=فرهنگ لغت. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([ویتالی بارانو](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([زویکوف](https://github.com/4ertus2)) +- رفع نتیجه به طور بالقوه اشتباه برای `SELECT DISTINCT` با `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([زویکوف](https://github.com/4ertus2)) +- ثابت شرایط مسابقه داده بسیار نادر است که می تواند در هنگام اجرای یک پرس و جو با اتحادیه همه شامل حداقل دو انتخاب از سیستم اتفاق می افتد.ستون, سیستم.جداول, سیستم.قطعات, سیستم.\_شبردارها یا جداول ادغام خانواده و انجام تغییر ستون از جداول مرتبط به صورت همزمان. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-2} + +- شکست تست ثابت زمانی که در حال اجرا خانه عروسکی سرور در میزبان های مختلف [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([واسیلی نمکو](https://github.com/Enmk)) +- تاتر-تست: غیر فعال کردن توالی کنترل رنگ در محیط غیر تیتی. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([الساپین](https://github.com/alesapin)) +- کلیک-تست: اجازه استفاده از هر پایگاه داده تست (برداشتن `test.` صلاحیت در صورت امکان) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([پرولر](https://github.com/proller)) +- رفع خطاهای اوبان [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([ویتالی بارانو](https://github.com/vitlibar)) +- Yandex LFAlloc اضافه شد به ClickHouse به تخصیص MarkCache و UncompressedCache داده ها در روش های مختلف برای گرفتن segfaults بیشتر قابل اعتماد [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([نام و نام خانوادگی](https://github.com/danlark1)) +- پایتون ییل برای کمک به با کوله پشتی و تغییرات. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([ایوان](https://github.com/abyss7)) + +## انتشار کلیک 19.5 {#clickhouse-release-19-5} + +### انتشار کلیک خانه 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### رفع اشکال {#bug-fixes-5} + +- تصادف ممکن است ثابت در بیت مپ [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([اندی یانگ](https://github.com/andyyzh)) +- ثابت شرایط مسابقه داده بسیار نادر است که می تواند در هنگام اجرای یک پرس و جو با اتحادیه همه شامل حداقل دو انتخاب از سیستم اتفاق می افتد.ستون, سیستم.جداول, سیستم.قطعات, سیستم.\_شبردارها یا جداول ادغام خانواده و انجام تغییر ستون از جداول مرتبط به صورت همزمان. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطای ثابت `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. این خطا اتفاق افتاد اگر ستون کمکاری بخشی از کلید اصلی بود. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اصلاح تابع حفظ و نگهداری: اگر یک ردیف ارضا هر دو شرط اول و بعد از ظهر, تنها اولین شرط راضی به دولت داده اضافه. در حال حاضر تمام شرایطی که در یک ردیف از داده ها راضی به دولت داده ها اضافه شده است. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### انتشار کلیک خانه 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### رفع اشکال {#bug-fixes-6} + +- نوع ثابت تنظیم `max_partitions_per_insert_block` از بولی به UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([محمد حسین sekhavat](https://github.com/mhsekhavat)) + +### انتشار کلیک خانه 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### ویژگی های جدید {#new-features-4} + +- [Hyperscan](https://github.com/intel/hyperscan) تطبیق عبارت منظم چندگانه اضافه شد (توابع `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([نام و نام خانوادگی](https://github.com/danlark1)) +- `multiSearchFirstPosition` تابع اضافه شد. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([نام و نام خانوادگی](https://github.com/danlark1)) +- فیلتر بیان از پیش تعریف شده را در هر ردیف برای جداول اجرا کنید. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([ایوان](https://github.com/abyss7)) +- نوع جدیدی از شاخص های داده پرش بر اساس فیلتر بلوم (می تواند برای استفاده `equal`, `in` و `like` توابع). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- اضافه شده `ASOF JOIN` که اجازه می دهد برای اجرای نمایش داده شد که پیوستن به ارزش های اخیر شناخته شده است. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([مرتیجن باکر](https://github.com/Gladdy), [زویکوف](https://github.com/4ertus2)) +- بازنویسی چندگانه `COMMA JOIN` به `CROSS JOIN`. سپس دوباره نوشتن به `INNER JOIN` در صورت امکان. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([زویکوف](https://github.com/4ertus2)) + +#### بهبود {#improvement-9} + +- `topK` و `topKWeighted` در حال حاضر پشتیبانی سفارشی `loadFactor` (رفع مشکل [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) +- مجاز به استفاده `parallel_replicas_count > 1` حتی برای جداول بدون نمونه گیری (تنظیم به سادگی برای نادیده گرفته می شود). در نسخه های قبلی منجر به استثنا شد. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([الکسی الیمانوف](https://github.com/digitalist)) +- پشتیبانی از `CREATE OR REPLACE VIEW`. اجازه می دهد برای ایجاد یک نمایش و یا تنظیم یک تعریف جدید در یک بیانیه واحد. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([بوریس گرانویو](https://github.com/bgranvea)) +- `Buffer` موتور جدول در حال حاضر پشتیبانی می کند `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([یانگکوان لیو](https://github.com/LiuYangkuan)) +- اضافه کردن توانایی برای شروع جدول تکرار بدون ابرداده در باغ وحش در `readonly` حالت. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([الساپین](https://github.com/alesapin)) +- سوسو زدن ثابت از نوار پیشرفت در خانه کلیک مشتری. این موضوع در هنگام استفاده قابل توجه بود `FORMAT Null` با نمایش داده شد جریان. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه برای غیر فعال کردن توابع با `hyperscan` کتابخانه بر اساس هر کاربر برای محدود کردن استفاده از منابع بالقوه بیش از حد و کنترل نشده. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن شماره نسخه ورود به سیستم در تمام اشتباهات. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([پرولر](https://github.com/proller)) +- محدودیت اضافه شده به `multiMatch` توابع که نیاز به اندازه رشته به جا به `unsigned int`. همچنین اضافه شده تعداد استدلال محدود به `multiSearch` توابع. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([نام و نام خانوادگی](https://github.com/danlark1)) +- استفاده بهبود یافته از فضای خراش و دست زدن به خطا در هیپرسکان. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([نام و نام خانوادگی](https://github.com/danlark1)) +- پر کردن `system.graphite_detentions` از پیکربندی جدول `*GraphiteMergeTree` جداول موتور. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- تغییر نام `trigramDistance` تابع به `ngramDistance` و اضافه کردن توابع بیشتر با `CaseInsensitive` و `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([نام و نام خانوادگی](https://github.com/danlark1)) +- داده های بهبود پرش محاسبه شاخص. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- عادی نگه دارید, `DEFAULT`, `MATERIALIZED` و `ALIAS` ستون ها در یک لیست واحد (رفع مشکل [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([الکس زتلپین](https://github.com/ztlpn)) + +#### رفع اشکال {#bug-fix-26} + +- اجتناب از `std::terminate` در صورت شکست تخصیص حافظه. حالا `std::bad_alloc` استثنا پرتاب به عنوان انتظار می رود. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع کاپپروتو خواندن از بافر. گاهی اوقات فایل ها با موفقیت توسط اچ تی پی لود نمی شد. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([ولادیسلاو](https://github.com/smirnov-vs)) +- رفع خطا `Unknown log entry type: 0` پس از `OPTIMIZE TABLE FINAL` پرس و جو. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([ایموس پرنده](https://github.com/amosbird)) +- نشانوندهای نادرست برای `hasAny` یا `hasAll` توابع ممکن است منجر به تقسیم بندی شوند. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بن بست ممکن است رخ دهد در حالی که اجرای `DROP DATABASE dictionary` پرس و جو. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع رفتار تعریف نشده در `median` و `quantile` توابع. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([هکز](https://github.com/hczhcz)) +- رفع تشخیص سطح فشرده سازی زمانی که `network_compression_method` با حروف کوچک. شکسته در و19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([پرولر](https://github.com/proller)) +- جهل ثابت `UTC` تنظیم (رفع مشکل [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([پرولر](https://github.com/proller)) +- ثابت `histogram` رفتار عملکرد با `Distributed` میز [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([اولگ](https://github.com/olegkv)) +- ثابت گزارش تسان `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گزارش ساان ثابت در خاموش کردن با توجه به شرایط مسابقه در استفاده از سیستم سیاهههای مربوط. استفاده از پتانسیل ثابت-پس از رایگان در خاموش کردن زمانی که قطعه فعال است. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع قطعات بررسی مجدد در `ReplicatedMergeTreeAlterThread` در صورت خطا. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- عملیات ریاضی در کشورهای تابع جمع متوسط برای استدلال ثابت کار نمی کند (مانند نتایج زیرخاکی). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نام ستون همیشه نقل مکان کردن در ابرداده. در غیر این صورت ایجاد یک جدول با ستون به نام غیرممکن است `index` (سرور به دلیل ناقص راه اندازی مجدد نخواهد شد `ATTACH` پرس و جو در ابرداده). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع سقوط در `ALTER ... MODIFY ORDER BY` روشن `Distributed` جدول [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([تسیسون](https://github.com/TCeason)) +- رفع segfault در `JOIN ON` با فعال `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([زمستان ژانگ](https://github.com/zhang2014)) +- رفع اشکال با اضافه کردن یک ردیف غیر اصلی پس از مصرف یک پیام محافظ از کافکا. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع سقوط از `JOIN` نه-nullable در مقابل nullable ستون. ثابت `NULLs` در کلید سمت راست در `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([زویکوف](https://github.com/4ertus2)) +- رفع گسل تقسیم بندی در `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([پرولر](https://github.com/proller)) +- شرایط مسابقه ثابت در `SELECT` از `system.tables` اگر جدول تغییر نام و یا تغییر به صورت همزمان. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- مسابقه داده ثابت زمانی که واکشی بخش داده است که در حال حاضر منسوخ شده است. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- مسابقه داده های نادر ثابت که می تواند در طول اتفاق می افتد `RENAME` جدول خانواده ادغام. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گسل تقسیم بندی ثابت در عملکرد `arrayIntersect`. گسل تقسیم بندی می تواند رخ دهد اگر تابع با استدلال ثابت و عادی مخلوط نامیده می شد. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang کیان](https://github.com/fancyqlx)) +- خواندن ثابت از `Array(LowCardinality)` ستون در مورد نادر زمانی که ستون شامل یک توالی طولانی از مجموعه خالی. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع سقوط در `FULL/RIGHT JOIN` هنگامی که ما در پیوستن به nullable در مقابل نه nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([زویکوف](https://github.com/4ertus2)) +- ثابت `No message received` استثنا در حالی که دلربا قطعات بین کپی. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([الساپین](https://github.com/alesapin)) +- ثابت `arrayIntersect` عملکرد نتیجه اشتباه در مورد چندین مقدار تکرار در مجموعه تک. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع شرایط مسابقه در طول همزمان `ALTER COLUMN` نمایش داده شد که می تواند به یک تصادف سرور منجر شود (رفع مشکل [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([الکس زتلپین](https://github.com/ztlpn)) +- رفع نتیجه نادرست در `FULL/RIGHT JOIN` با ستون توایع. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([زویکوف](https://github.com/4ertus2)) +- رفع موارد تکراری در `GLOBAL JOIN` با ستاره. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([زویکوف](https://github.com/4ertus2)) +- رفع کسر پارامتر در `ALTER MODIFY` از ستون `CODEC` هنگامی که نوع ستون مشخص نشده است. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([الساپین](https://github.com/alesapin)) +- توابع `cutQueryStringAndFragment()` و `queryStringAndFragment()` در حال حاضر به درستی کار می کند زمانی که `URL` شامل یک قطعه و بدون پرس و جو. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع اشکال نادر هنگام تنظیم `min_bytes_to_use_direct_io` بزرگتر از صفر است, که رخ می دهد زمانی که موضوع باید به دنبال عقب در فایل ستون. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([الساپین](https://github.com/alesapin)) +- رفع انواع استدلال اشتباه برای توابع مجموع با `LowCardinality` نشانوندها (رفع مشکل [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع صلاحیت نام اشتباه در `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([زویکوف](https://github.com/4ertus2)) +- تابع ثابت `toISOWeek` نتیجه برای سال 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `DROP`, `TRUNCATE` و `OPTIMIZE` نمایش داده شد تقلید, زمانی که در اجرا `ON CLUSTER` برای `ReplicatedMergeTree*` خانواده جداول. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([الساپین](https://github.com/alesapin)) + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-8} + +- تغییر نام تنظیمات `insert_sample_with_metadata` به تنظیم `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([زویکوف](https://github.com/4ertus2)) +- اضافه شدن تنظیمات `max_partitions_per_insert_block` (با مقدار 100 به طور پیش فرض). اگر بلوک قرار داده شامل تعداد بیشتری از پارتیشن, یک استثنا پرتاب می شود. تنظیم به 0 اگر شما می خواهید به حذف حد (توصیه نمی شود). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- توابع چند جستجو تغییر نام داد (`multiPosition` به `multiSearchAllPositions`, `multiSearch` به `multiSearchAny`, `firstMatch` به `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([نام و نام خانوادگی](https://github.com/danlark1)) + +#### بهبود عملکرد {#performance-improvement-6} + +- بهینه سازی volnitsky جستجوگر توسط inlining دادن حدود 5-10% بهبود جستجو برای نمایش داده شد و با بسیاری از سوزن و یا بسیاری از شبیه bigrams. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([نام و نام خانوادگی](https://github.com/danlark1)) +- رفع مشکل عملکرد در هنگام تنظیم `use_uncompressed_cache` بزرگتر از صفر است, که به نظر می رسد زمانی که همه اطلاعات به عنوان خوانده شده موجود در کش. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([الساپین](https://github.com/alesapin)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-10} + +- سخت شدن اشکال زدایی ساخت: بیشتر نگاشت حافظه دانه و اصل مطلب; اضافه کردن حفاظت از حافظه برای کش علامت و شاخص. این اجازه می دهد تا برای پیدا کردن حافظه بیشتر کوبیدن اشکالات در صورتی که زمانی که اسان و ام اسان نمی تواند این کار را انجام. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن پشتیبانی از متغیرهای کیک `ENABLE_PROTOBUF`, `ENABLE_PARQUET` و `ENABLE_BROTLI` که اجازه می دهد برای فعال/غیر فعال کردن ویژگی های فوق (همان است که ما می توانیم برای کتابدار انجام, خروجی زیر, و غیره). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([سیلو کاراژیا](https://github.com/silviucpp)) +- اضافه کردن توانایی برای چاپ لیست روند و بند از تمام موضوعات اگر برخی از نمایش داده شد پس از اجرای تست را قطع کرد. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([الساپین](https://github.com/alesapin)) +- اضافه کردن مجدد در `Connection loss` خطا در `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([الساپین](https://github.com/alesapin)) +- اضافه کردن ساخت بورس با ولگرد و ساخت با ضد عفونی کننده موضوع به اسکریپت بسته بندی. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([الساپین](https://github.com/alesapin)) +- در حال حاضر کاربر برای رمز عبور برای کاربر خواسته `'default'` در هنگام نصب. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([پرولر](https://github.com/proller)) +- سرکوب هشدار در `rdkafka` کتابخونه. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه توانایی ساخت بدون اس اس ال. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([پرولر](https://github.com/proller)) +- اضافه کردن یک راه برای راه اندازی تاتر-تصویر سرور از یک کاربر سفارشی. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- ارتقا تقویت میله به 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([پرولر](https://github.com/proller)) +- غیر فعال کردن استفاده از `mremap` هنگامی که با ضد عفونی کننده موضوع وارد شده است. با کمال تعجب به اندازه کافی, تسان می کند رهگیری نیست `mremap` (هر چند که رهگیری `mmap`, `munmap`) که منجر به مثبت کاذب. گزارش تسان ثابت در تست نفرت انگیز. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن چک کردن تست با استفاده از طرح فرمت از طریق رابط قام. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([ویتالی بارانو](https://github.com/vitlibar)) + +## انتشار کلیک 19.4 {#clickhouse-release-19-4} + +### انتشار کلیک خانه 19.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### رفع اشکال {#bug-fixes-7} + +- اجتناب از `std::terminate` در صورت شکست تخصیص حافظه. حالا `std::bad_alloc` استثنا پرتاب به عنوان انتظار می رود. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع کاپپروتو خواندن از بافر. گاهی اوقات فایل ها با موفقیت توسط اچ تی پی لود نمی شد. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([ولادیسلاو](https://github.com/smirnov-vs)) +- رفع خطا `Unknown log entry type: 0` پس از `OPTIMIZE TABLE FINAL` پرس و جو. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([ایموس پرنده](https://github.com/amosbird)) +- نشانوندهای نادرست برای `hasAny` یا `hasAll` توابع ممکن است منجر به تقسیم بندی شوند. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بن بست ممکن است رخ دهد در حالی که اجرای `DROP DATABASE dictionary` پرس و جو. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع رفتار تعریف نشده در `median` و `quantile` توابع. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([هکز](https://github.com/hczhcz)) +- رفع تشخیص سطح فشرده سازی زمانی که `network_compression_method` با حروف کوچک. شکسته در و19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([پرولر](https://github.com/proller)) +- جهل ثابت `UTC` تنظیم (رفع مشکل [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([پرولر](https://github.com/proller)) +- ثابت `histogram` رفتار عملکرد با `Distributed` میز [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([اولگ](https://github.com/olegkv)) +- ثابت گزارش تسان `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گزارش ساان ثابت در خاموش کردن با توجه به شرایط مسابقه در استفاده از سیستم سیاهههای مربوط. استفاده از پتانسیل ثابت-پس از رایگان در خاموش کردن زمانی که قطعه فعال است. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع قطعات بررسی مجدد در `ReplicatedMergeTreeAlterThread` در صورت خطا. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- عملیات ریاضی در کشورهای تابع جمع متوسط برای استدلال ثابت کار نمی کند (مانند نتایج زیرخاکی). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نام ستون همیشه نقل مکان کردن در ابرداده. در غیر این صورت ایجاد یک جدول با ستون به نام غیرممکن است `index` (سرور به دلیل ناقص راه اندازی مجدد نخواهد شد `ATTACH` پرس و جو در ابرداده). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع سقوط در `ALTER ... MODIFY ORDER BY` روشن `Distributed` جدول [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([تسیسون](https://github.com/TCeason)) +- رفع segfault در `JOIN ON` با فعال `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([زمستان ژانگ](https://github.com/zhang2014)) +- رفع اشکال با اضافه کردن یک ردیف غیر اصلی پس از مصرف یک پیام محافظ از کافکا. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع گسل تقسیم بندی در `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([پرولر](https://github.com/proller)) +- شرایط مسابقه ثابت در `SELECT` از `system.tables` اگر جدول تغییر نام و یا تغییر به صورت همزمان. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- مسابقه داده ثابت زمانی که واکشی بخش داده است که در حال حاضر منسوخ شده است. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- مسابقه داده های نادر ثابت که می تواند در طول اتفاق می افتد `RENAME` جدول خانواده ادغام. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گسل تقسیم بندی ثابت در عملکرد `arrayIntersect`. گسل تقسیم بندی می تواند رخ دهد اگر تابع با استدلال ثابت و عادی مخلوط نامیده می شد. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang کیان](https://github.com/fancyqlx)) +- خواندن ثابت از `Array(LowCardinality)` ستون در مورد نادر زمانی که ستون شامل یک توالی طولانی از مجموعه خالی. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- ثابت `No message received` استثنا در حالی که دلربا قطعات بین کپی. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([الساپین](https://github.com/alesapin)) +- ثابت `arrayIntersect` عملکرد نتیجه اشتباه در مورد چندین مقدار تکرار در مجموعه تک. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع شرایط مسابقه در طول همزمان `ALTER COLUMN` نمایش داده شد که می تواند به یک تصادف سرور منجر شود (رفع مشکل [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([الکس زتلپین](https://github.com/ztlpn)) +- رفع کسر پارامتر در `ALTER MODIFY` از ستون `CODEC` هنگامی که نوع ستون مشخص نشده است. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([الساپین](https://github.com/alesapin)) +- توابع `cutQueryStringAndFragment()` و `queryStringAndFragment()` در حال حاضر به درستی کار می کند زمانی که `URL` شامل یک قطعه و بدون پرس و جو. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع اشکال نادر هنگام تنظیم `min_bytes_to_use_direct_io` بزرگتر از صفر است, که رخ می دهد زمانی که موضوع باید به دنبال عقب در فایل ستون. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([الساپین](https://github.com/alesapin)) +- رفع انواع استدلال اشتباه برای توابع مجموع با `LowCardinality` نشانوندها (رفع مشکل [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- تابع ثابت `toISOWeek` نتیجه برای سال 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `DROP`, `TRUNCATE` و `OPTIMIZE` نمایش داده شد تقلید, زمانی که در اجرا `ON CLUSTER` برای `ReplicatedMergeTree*` خانواده جداول. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([الساپین](https://github.com/alesapin)) + +#### بهبود {#improvements-2} + +- عادی نگه دارید, `DEFAULT`, `MATERIALIZED` و `ALIAS` ستون ها در یک لیست واحد (رفع مشکل [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([الکس زتلپین](https://github.com/ztlpn)) + +### انتشار کلیک کنیدهاوس 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### رفع اشکال {#bug-fixes-8} + +- رفع سقوط در `FULL/RIGHT JOIN` هنگامی که ما در پیوستن به nullable در مقابل نه nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([زویکوف](https://github.com/4ertus2)) +- رفع گسل تقسیم بندی در `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([پرولر](https://github.com/proller)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-11} + +- اضافه کردن یک راه برای راه اندازی تاتر-تصویر سرور از یک کاربر سفارشی. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### انتشار کلیک خانه 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### رفع اشکال {#bug-fixes-9} + +- خواندن ثابت از `Array(LowCardinality)` ستون در مورد نادر زمانی که ستون شامل یک توالی طولانی از مجموعه خالی. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +### انتشار کلیک خانه 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### رفع اشکال {#bug-fixes-10} + +- نمایش داده شد از راه دور ثابت که شامل هر دو `LIMIT BY` و `LIMIT`. قبلا اگر `LIMIT BY` و `LIMIT` برای پرس و جو از راه دور استفاده شد, `LIMIT` می تواند قبل از اتفاق می افتد `LIMIT BY`, که منجر به نتیجه بیش از حد فیلتر. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([پان سنتانتین](https://github.com/kvap)) + +### انتشار کلیک خانه 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### ویژگی های جدید {#new-features-5} + +- اضافه شدن پشتیبانی کامل برای `Protobuf` قالب (ورودی و خروجی, ساختارهای داده تو در تو). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([ویتالی بارانو](https://github.com/vitlibar)) +- توابع بیت مپ اضافه شده با بیت مپ خروشان. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([اندی یانگ](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([ویتالی بارانو](https://github.com/vitlibar)) +- پشتیبانی از فرمت پارکت. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([پرولر](https://github.com/proller)) +- فاصله ان گرم برای مقایسه رشته فازی اضافه شد. این شبیه به معیارهای پرسش گرم در زبان تحقیق است. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([نام و نام خانوادگی](https://github.com/danlark1)) +- ترکیب قوانین برای رولپ گرافیت از تجمع اختصاصی و الگوهای نگهداری. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- اضافه شده `max_execution_speed` و `max_execution_speed_bytes` برای محدود کردن استفاده از منابع. اضافه شده `min_execution_speed_bytes` تنظیم برای تکمیل `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([زمستان ژانگ](https://github.com/zhang2014)) +- تابع اجرا شده `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([الکسی میلویدو](https://github.com/alexey-milovidov), [کزون](https://github.com/kzon)) +- اضافه شدن توابع `arrayEnumerateDenseRanked` و `arrayEnumerateUniqRanked` (مثل این است `arrayEnumerateUniq` اما اجازه می دهد تا به عمق مجموعه لحن خوب به داخل مجموعه چند بعدی نگاه). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([پرولر](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([زویکوف](https://github.com/4ertus2)) + +#### رفع اشکال {#bug-fixes-11} + +- این نسخه همچنین شامل تمام رفع اشکال از 19.3 و 19.1. +- اشکال ثابت در داده های پرش شاخص: سفارش گرانول پس از درج نادرست بود. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- ثابت `set` نمایه برای `Nullable` و `LowCardinality` ستون ها قبل از اون, `set` نمایه با `Nullable` یا `LowCardinality` ستون منجر به خطا `Data type must be deserialized with multiple streams` در حالی که انتخاب. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- به درستی تنظیم update\_time کامل `executable` فرهنگ لغت به روز رسانی. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([تما نویکوف](https://github.com/temoon)) +- رفع نوار پیشرفت شکسته در 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([فیلیمونف](https://github.com/filimonov)) +- مقادیر متناقض ثابت از یادداشت دوست داشتنی زمانی که منطقه حافظه کفن شد, در موارد خاص. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفتار تعریف نشده ثابت در استخر. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تصادف بسیار نادر با پیام را ثابت کرد `mutex lock failed: Invalid argument` که می تواند رخ دهد زمانی که یک جدول ادغام همزمان با انتخاب کاهش یافته بود. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([الکس زتلپین](https://github.com/ztlpn)) +- سازگاری درایور اودن بی سی با `LowCardinality` نوع داده. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([پرولر](https://github.com/proller)) +- FreeBSD: Fixup برای `AIOcontextPool: Found io_event with unknown id 0` خطا. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([هشدار داده می شود](https://github.com/urgordeadbeef)) +- `system.part_log` جدول ایجاد شد بدون در نظر گرفتن به پیکربندی. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع رفتار تعریف نشده در `dictIsIn` تابع برای لغت نامه کش. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([الساپین](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([الکس زتلپین](https://github.com/ztlpn)) +- غیر فعال کردن اکسپرس به طور پیش فرض تا زمانی که ما خود را دریافت کنید `llvm` تماس با ما و می توانید با تست `clang` و `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([الساپین](https://github.com/alesapin)) +- جلوگیری از `std::terminate` چه زمانی `invalidate_query` برای `clickhouse` منبع فرهنگ لغت خارجی نتیجه اشتباه بازگشته است (خالی یا بیش از یک ردیف یا بیش از یک ستون). موضوع ثابت زمانی که `invalidate_query` هر پنج ثانیه انجام شد بدون در نظر گرفتن به `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- جلوگیری از بن بست زمانی که `invalidate_query` برای یک فرهنگ لغت با `clickhouse` منبع شامل شد `system.dictionaries` جدول یا `Dictionaries` پایگاه (مورد نادر). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع صلیب با خالی جایی که بپیوندید. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([زویکوف](https://github.com/4ertus2)) +- پیش فرض ثابت در عملکرد “replicate” هنگامی که استدلال ثابت به تصویب می رسد. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع عملکرد لامبدا با بهینه ساز پیش فرض. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([زمستان ژانگ](https://github.com/zhang2014)) +- چندگانه می پیوندد رفع متعدد. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([زویکوف](https://github.com/4ertus2)) + +#### بهبود {#improvements-3} + +- نام مستعار پشتیبانی در عضویت در بخش ستون جدول سمت راست. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([زویکوف](https://github.com/4ertus2)) +- نتیجه چند پیوستن نیاز به نام نتیجه درست در زیر مجموعه استفاده می شود. جایگزین نام مستعار تخت با نام منبع در نتیجه. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([زویکوف](https://github.com/4ertus2)) +- بهبود منطق فشار به پایین برای اظهارات پیوست. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([ایوان](https://github.com/abyss7)) + +#### بهبود عملکرد {#performance-improvements-3} + +- اکتشافی بهبود یافته از “move to PREWHERE” بهینهسازی. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- استفاده از جداول مراجعه مناسب است که با استفاده از رابط برنامه کاربردی هش را برای کلید 8 بیتی و 16 بیتی. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([ایموس پرنده](https://github.com/amosbird)) +- بهبود عملکرد مقایسه رشته. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پاکسازی صف دی ال در یک موضوع جداگانه توزیع به طوری که کم کردن سرعت حلقه اصلی که پردازش توزیع وظایف دسیدال نیست. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([الکس زتلپین](https://github.com/ztlpn)) +- زمانی که `min_bytes_to_use_direct_io` تنظیم شده است 1, هر فایل با حالت اچ باز شد چرا که اندازه داده ها به خواندن گاهی اوقات به اندازه یک بلوک فشرده دست کم گرفت. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-12} + +- اضافه شدن پشتیبانی برای صدای جرنگ جرنگ-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشتباه `__asm__` دستورالعمل ها (دوباره) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([کنستانتین پودشوموک](https://github.com/podshumok)) +- اضافه کردن قابلیت مشخص کردن تنظیمات برای `clickhouse-performance-test` از خط فرمان. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([الساپین](https://github.com/alesapin)) +- اضافه کردن لغت نامه تست به تست ادغام. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([الساپین](https://github.com/alesapin)) +- اضافه شده نمایش داده شد از معیار در وب سایت به تست عملکرد خودکار. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `xxhash.h` در لنز خارجی 4 وجود ندارد زیرا این یک پیاده سازی دقیق است و نمادهای خود را با فضای نام `XXH_NAMESPACE` ماکرو. زمانی که lz4 خارجی xxHash به خارجی بیش از حد و وابستگان به لینک به آن. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([اوروج دش](https://github.com/orivej)) +- ثابت مورد زمانی که `quantileTiming` تابع جمع را می توان با استدلال نقطه منفی یا شناور نامیده می شود (این رفع تست ریش شدن با ضد عفونی کننده رفتار تعریف نشده). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تصحیح خطای املایی. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- رفع تلفیقی در مک. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([ویتالی بارانو](https://github.com/vitlibar)) +- ساخت رفع برای بورس و تنظیمات مختلف ساخت غیر معمول. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([پرولر](https://github.com/proller)) + +## انتشار کلیک 19.3 {#clickhouse-release-19-3} + +### انتشار کلیک خانه 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### رفع اشکال {#bug-fixes-12} + +- رفع سقوط در `FULL/RIGHT JOIN` هنگامی که ما در پیوستن به nullable در مقابل نه nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([زویکوف](https://github.com/4ertus2)) +- رفع گسل تقسیم بندی در `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([پرولر](https://github.com/proller)) +- خواندن ثابت از `Array(LowCardinality)` ستون در مورد نادر زمانی که ستون شامل یک توالی طولانی از مجموعه خالی. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-13} + +- اضافه کردن یک راه برای راه اندازی تاتر-تصویر سرور از یک کاربر سفارشی [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### انتشار کلیک خانه 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### رفع اشکال {#bug-fixes-13} + +- خطا ثابت در \# 3920. این خطا خود را به عنوان فساد کش تصادفی (پیام) نشان می دهد `Unknown codec family code`, `Cannot seek through file`) و segfaults. این اشکال اول به نظر می رسد در نسخه 19.1 و در حال حاضر در نسخه های تا به 19.1.10 و 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### رفع اشکال {#bug-fixes-14} + +- هنگامی که بیش از 1000 موضوع در استخر موضوع وجود دارد, `std::terminate` ممکن است در خروج موضوع اتفاق می افتد. [ازات خوژین](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر این امکان وجود دارد برای ایجاد `ReplicatedMergeTree*` جداول با نظرات در ستون بدون پیش فرض و جداول با کدک ستون بدون نظر و پیش فرض. همچنین مقایسه کدک ها را رفع کنید. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([الساپین](https://github.com/alesapin)) +- تصادف ثابت در پیوستن با مجموعه یا تاپل. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([زویکوف](https://github.com/4ertus2)) +- تصادف ثابت در تاتر-کپی با پیام `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([زویکوف](https://github.com/4ertus2)) +- قطع ثابت در خاموش کردن سرور اگر دستی توزیع مورد استفاده قرار گرفت. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([الکس زتلپین](https://github.com/ztlpn)) +- تعداد ستون نادرست در پیام خطا در مورد فرمت متن تجزیه برای ستون با تعداد بیشتر از چاپ شد 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-3} + +- ساخت ثابت با اوکس را فعال کنید. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حسابداری گسترده و حسابداری یو را بر اساس نسخه شناخته شده خوب به جای هسته ای که وارد شده است فعال کنید. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([نوارتولومی](https://github.com/nvartolomei)) +- اجازه به جست و خیز تنظیم از رمز و راز.کلیمی, هشدار به جای پرتاب اگر مجموعه ای محدود شکست. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([پرولر](https://github.com/proller)) +- حذف `inline` برچسب ها از `void readBinary(...)` داخل `Field.cpp`. همچنین با هم ادغام شدند کار برکنار شده `namespace DB` بلاک [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([هکز](https://github.com/hczhcz)) + +### انتشار کلیک خانه 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### رفع اشکال {#bug-fixes-15} + +- اشکال ثابت با پردازش نمایش داده شد قرار دادن قام بزرگ است. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([الساپین](https://github.com/alesapin)) +- ناسازگاری عقب ثابت با نسخه های قدیمی به دلیل اجرای اشتباه از `send_logs_level` تنظیمات. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ناسازگاری عقب مانده تابع جدول ثابت `remote` معرفی شده با نظرات ستون. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### بهبود {#improvements-4} + +- اندازه شاخص جدول برای محدودیت حافظه در هنگام انجام به حساب نمی `ATTACH TABLE` پرس و جو. اجتناب از احتمال که یک جدول می تواند پس از جدا شدن متصل نمی شود. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کمی محدودیت در حداکثر رشته و مجموعه ای اندازه دریافت شده از باغ وحش مطرح شده است. این اجازه می دهد تا برای ادامه کار با افزایش اندازه `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` در باغ وحش. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه می دهد برای تعمیر ماکت رها حتی اگر در حال حاضر تعداد زیادی از گره ها در صف خود. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن یک استدلال مورد نیاز برای `SET` شاخص (حداکثر تعداد ردیف های ذخیره شده). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([نیکیتا واسیلیف](https://github.com/nikvas0)) + +#### رفع اشکال {#bug-fixes-16} + +- ثابت `WITH ROLLUP` نتیجه برای گروه های تک `LowCardinality` کلیدی است. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اشکال ثابت در شاخص مجموعه (حذف گرانول اگر شامل بیش از `max_rows` ردیف). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- بسیاری از رفع بورس ساخت. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([پرولر](https://github.com/proller)) +- جایگزینی نام مستعار ثابت در نمایش داده شد با زیرخاکی حاوی همان نام مستعار (موضوع [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([زویکوف](https://github.com/4ertus2)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-4} + +- اضافه کردن قابلیت اجرا `clickhouse-server` برای تست بدون تابعیت در تصویر کارگر بارانداز. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([واسیلی نمکو](https://github.com/Enmk)) + +### انتشار تاتر 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### ویژگی های جدید {#new-features-6} + +- اضافه شدن `KILL MUTATION` بیانیه ای که اجازه می دهد تا از بین بردن جهش است که برای برخی از دلایل گیر کرده است. اضافه شده `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` زمینه به `system.mutations` جدول برای عیب یابی ساده تر. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([الکس زتلپین](https://github.com/ztlpn)) +- اضافه شدن تابع جمع `entropy` که شانون انتروپی رو محاسبه میکنه [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([کد37](https://github.com/Quid37)) +- توانایی اضافه شده به ارسال نمایش داده شد `INSERT INTO tbl VALUES (....` به سرور بدون تقسیم در `query` و `data` قطعات. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([الساپین](https://github.com/alesapin)) +- اجرای عمومی `arrayWithConstant` تابع اضافه شد. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیادهسازی شده `NOT BETWEEN` اپراتور مقایسه. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([دیمیتری نوموف](https://github.com/nezed)) +- پیاده سازی `sumMapFiltered` به منظور قادر به محدود کردن تعداد کلید که ارزش خواهد شد خلاصه `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- اضافه شدن پشتیبانی از `Nullable` انواع در `mysql` تابع جدول. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- پشتیبانی از عبارات ثابت دلخواه در `LIMIT` بند بند. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([جعبه 3](https://github.com/k3box)) +- اضافه شده `topKWeighted` تابع مجموع طول می کشد که استدلال اضافی با (عدد صحیح بدون علامت) وزن. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([اندرو گولمن](https://github.com/andrewgolman)) +- `StorageJoin` در حال حاضر پشتیبانی `join_any_take_last_row` تنظیم که اجازه می دهد تا جای نوشتن ارزش های موجود از همان کلید. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([ایموس پرنده](https://github.com/amosbird) +- اضافه شدن تابع `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([ویتالی بارانو](https://github.com/vitlibar)) +- اضافه شده `RowBinaryWithNamesAndTypes` قالب. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([اولگ و کوزلوک](https://github.com/DarkWanderer)) +- اضافه شده `IPv4` و `IPv6` انواع داده ها. پیاده سازی موثر تر از `IPv*` توابع. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([واسیلی نمکو](https://github.com/Enmk)) +- اضافه شدن تابع `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([ویتالی بارانو](https://github.com/vitlibar)) +- اضافه شده `Protobuf` فرمت خروجی. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([ویتالی بارانو](https://github.com/vitlibar)) +- اضافه شده brotli پشتیبانی از http رابط برای واردات داده ها (درج). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([میخیل](https://github.com/fandyushin)) +- نکات اضافه شده در حالی که کاربر را تایپی در نام تابع و یا نوع در مشتری خط فرمان. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([نام و نام خانوادگی](https://github.com/danlark1)) +- اضافه شده `Query-Id` به هدر پاسخ قام سرور. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([میخیل](https://github.com/fandyushin)) + +#### ویژگی های تجربی {#experimental-features-2} + +- اضافه شده `minmax` و `set` داده پرش شاخص برای ادغام جدول موتورهای خانواده. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- تبدیل اضافه شده از `CROSS JOIN` به `INNER JOIN` در صورت امکان. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([زویکوف](https://github.com/4ertus2)) + +#### رفع اشکال {#bug-fixes-17} + +- ثابت `Not found column` برای ستون های تکراری در `JOIN ON` بخش. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([زویکوف](https://github.com/4ertus2)) +- ساخت `START REPLICATED SENDS` شروع فرمان تکرار می فرستد. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([نوارتولومی](https://github.com/nvartolomei)) +- ثابت توابع مجموع اعدام با `Array(LowCardinality)` بحث کردن. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- رفتار اشتباه ثابت هنگام انجام `INSERT ... SELECT ... FROM file(...)` پرس و جو و فایل است `CSVWithNames` یا `TSVWIthNames` فرمت و ردیف داده برای اولین بار از دست رفته است. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تصادف ثابت در بازنگری فرهنگ لغت اگر فرهنگ لغت در دسترس نیست. این اشکال در 19.1.6 ظاهر شد. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([پرولر](https://github.com/proller)) +- ثابت `ALL JOIN` با تکراری در جدول سمت راست. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([زویکوف](https://github.com/4ertus2)) +- گسل تقسیم بندی ثابت با `use_uncompressed_cache=1` و استثنا با اندازه غیر فشرده اشتباه است. این اشکال در 19.1.6 ظاهر شد. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([الساپین](https://github.com/alesapin)) +- ثابت `compile_expressions` اشکال با مقایسه بزرگ (بیش از مقدمه16) تاریخ. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([الساپین](https://github.com/alesapin)) +- حلقه بی نهایت ثابت در هنگام انتخاب از تابع جدول `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به طور موقت بهینه سازی پیش فرض برای غیر فعال کردن `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([زمستان ژانگ](https://github.com/zhang2014)) +- ثابت `Illegal instruction` خطا در هنگام استفاده از توابع بازه64 در پردازنده های قدیمی. این خطا تکثیر شده است تنها زمانی که فاحشه خانه با شورای همکاری خلیج فارس وارد شد-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `No message received` خطا در هنگام تعامل با راننده خیال راحت اد بی سی از طریق اتصال. همچنین رفع segfault در هنگام استفاده از MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نتیجه نادرست ثابت زمانی که `Date` و `DateTime` استدلال ها در شاخه های اپراتور شرطی (تابع `if`). مورد عمومی برای عملکرد اضافه شده است `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- لغت نامه کلیک کن در داخل بار `clickhouse` روند. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بن بست ثابت زمانی که `SELECT` از یک جدول با `File` موتور پس از دوباره انجام شد `No such file or directory` خطا. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شرایط مسابقه ثابت در هنگام انتخاب از `system.tables` ممکن است به `table doesn't exist` خطا. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `clickhouse-client` می توانید در هنگام خروج به طور مرتب داده ها را برای پیشنهادات خط فرمان بارگیری کنید اگر در حالت تعاملی اجرا شود. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال زمانی که اجرای جهش های حاوی `IN` اپراتورها نتایج نادرست تولید می کردند. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([الکس زتلپین](https://github.com/ztlpn)) +- خطا ثابت: اگر یک پایگاه داده با وجود `Dictionary` موتور, تمام لغت نامه مجبور به بارگذاری در هنگام راه اندازی سرور, و اگر یک فرهنگ لغت با منبع کلیک از منبع محلی هاست وجود دارد, فرهنگ لغت نمی تواند بار. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطا ثابت زمانی که سیستم سیاهههای مربوط به تلاش برای ایجاد دوباره در خاموش کردن سرور. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به درستی نوع مناسب را باز کنید و قفل ها را به درستی مدیریت کنید `joinGet` تابع. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([ایموس پرنده](https://github.com/amosbird)) +- اضافه شده `sumMapWithOverflow` تابع. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- ثابت segfault با `allow_experimental_multiple_joins_emulation`. [52دسی 2 درجه سانتیگراد](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([زویکوف](https://github.com/4ertus2)) +- اشکال ثابت با نادرست `Date` و `DateTime` مقایسه. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([حداکثر](https://github.com/valexey)) +- تست ریش ریش شدن ثابت تحت تعریف نشده رفتار ضد عفونی کننده: اضافه شدن نوع پارامتر را بررسی کنید `quantile*Weighted` خانواده از توابع. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شرایط مسابقه نادر ثابت در هنگام از بین بردن قطعات داده های قدیمی می تواند با شکست مواجه `File not found` خطا. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت نصب بسته با گم شده /و غیره/خانه کلیک-سرور / پیکربندی.. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([پرولر](https://github.com/proller)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-5} + +- دبیان بسته بندی: درست /و غیره/کلیک-سرور/لینک پیش پردازش با توجه به پیکربندی. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([پرولر](https://github.com/proller)) +- رفع ساخت های مختلف برای بورس. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([پرولر](https://github.com/proller)) +- توانایی اضافه شده به ایجاد, پر کردن و رها کردن جداول در اصلح. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([الساپین](https://github.com/alesapin)) +- اضافه شده یک اسکریپت برای تکراری بررسی شامل. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- توانایی اضافه شده به اجرا نمایش داده شد توسط شاخص در تست عملکرد. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([الساپین](https://github.com/alesapin)) +- بسته بندی با علامت اشکال زدایی پیشنهاد می شود نصب شود. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بازسازی عملکرد-تست. ورود به سیستم بهتر و سیگنال های دست زدن به. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([الساپین](https://github.com/alesapin)) +- اسناد اضافه شده به ناشناس یاندکس.مجموعه داده های متریکا. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([الساپین](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([الکس زتلپین](https://github.com/ztlpn)) +- اسناد اضافه شده در مورد دو مجموعه داده در اس3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([الساپین](https://github.com/alesapin)) +- اضافه شده اسکریپت که ایجاد تغییرات از کشش درخواست توضیحات. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([کوچتونیکولای](https://github.com/KochetovNicolai)) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- اضافه شده ماژول عروسک خیمه شب بازی برای فاحشه خانه. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([ماکسیم فدوتف](https://github.com/MaxFedotov)) +- اسناد اضافه شده برای یک گروه از توابع مستند نشده. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([زمستان ژانگ](https://github.com/zhang2014)) +- بازوی رفع ساخت. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([پرولر](https://github.com/proller)) ([پرولر](https://github.com/proller)) +- تست فرهنگ لغت در حال حاضر قادر به اجرا از `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([پرولر](https://github.com/proller)) +- حالا `/etc/ssl` به عنوان دایرکتوری پیش فرض با گواهی اس اس ال استفاده می شود. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده چک کردن اس اس اس و توضیحات در شروع. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([ایگر](https://github.com/igron99)) +- اسکریپت اینیت سرور صبر کنید تا شروع. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([پرولر](https://github.com/proller)) + +#### تغییرات ناسازگار به عقب {#backward-incompatible-changes-1} + +- حذف شد `allow_experimental_low_cardinality_type` تنظیمات. `LowCardinality` انواع داده ها تولید می شوند. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کاهش اندازه علامت کش و اندازه کش غیر فشرده بر این اساس به مقدار حافظه در دسترس است. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([لوپاتین کنستانتین](https://github.com/k-lopatin) +- کلمه کلیدی اضافه شده است `INDEX` داخل `CREATE TABLE` پرس و جو. یک ستون با نام `index` باید با پشتی یا نقل قول های دوگانه نقل قول شود: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- `sumMap` در حال حاضر نوع نتیجه به جای سرریز ترویج. قدیمی `sumMap` رفتار را می توان با استفاده از `sumMapWithOverflow` تابع. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### بهبود عملکرد {#performance-improvements-4} + +- `std::sort` جایگزین شده توسط `pdqsort` برای نمایش داده شد بدون `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([اوگنی پراودا](https://github.com/kvinty)) +- در حال حاضر سرور موضوعات استفاده مجدد از استخر موضوع جهانی است. این عملکرد در برخی موارد گوشه تاثیر می گذارد. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود {#improvements-5} + +- پیاده سازی پشتیبانی برای بورس. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([هشدار داده می شود](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` حالا برگرد `a` و `b` ستون تنها از جدول سمت چپ. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([زویکوف](https://github.com/4ertus2)) +- اجازه داده شود `-C` گزینه مشتری برای کار به عنوان `-c` انتخاب [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([سیمینسرگی](https://github.com/syominsergey)) +- در حال حاضر گزینه `--password` استفاده بدون ارزش نیاز به رمز عبور از استدین. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([حداقل صفحه نمایش:](https://github.com/bsd-conqueror)) +- افزود برجسته از unescaped metacharacters در string literals که حاوی `LIKE` عبارات و یا عبارت منظم. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده لغو قام به عنوان خوانده شده تنها نمایش داده شد اگر سوکت مشتری از بین می رود. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([نوارتولومی](https://github.com/nvartolomei)) +- در حال حاضر گزارش سرور پیشرفت برای زنده نگه داشتن اتصالات مشتری. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([ایوان](https://github.com/abyss7)) +- پیام کمی بهتر با دلیل برای بهینه سازی پرس و جو با `optimize_throw_if_noop` تنظیم را فعال کنید. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن پشتیبانی از `--version` گزینه ای برای سرور کلیک. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([لوپاتین کنستانتین](https://github.com/k-lopatin)) +- اضافه شده `--help/-h` گزینه ای برای `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([یوری baranov](https://github.com/yurriy)) +- اضافه شدن پشتیبانی برای زیرمجموعه اسکالر با تابع کل نتیجه دولت است. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- بهبود زمان خاموش کردن سرور و تغییر زمان انتظار. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن اطلاعات در مورد تنظیمات \_شروع مجدد به سیستم.کپی و اضافه کردن ورود به سیستم اگر ماکت نمی خواهد سعی کنید برای تبدیل شدن به رهبر. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([الکس زتلپین](https://github.com/ztlpn)) + +## انتشار کلیک 19.1 {#clickhouse-release-19-1} + +### انتشار کلیک خانه 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- خطای ثابت `Column ... queried more than once` که ممکن است اتفاق می افتد اگر تنظیمات `asterisk_left_columns_only` در صورت استفاده به 1 تنظیم می شود `GLOBAL JOIN` با `SELECT *` (مورد نادر). این موضوع در وجود ندارد 19.3 و جدیدتر. [6باک7د8د](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([زویکوف](https://github.com/4ertus2)) + +### انتشار کلیک خانه 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +این نسخه شامل دقیقا همان مجموعه ای از تکه های به عنوان 19.3.7. + +### انتشار کلیک خانه 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +این نسخه شامل دقیقا همان مجموعه ای از تکه های به عنوان 19.3.6. + +## انتشار کلیک 19.1 {#clickhouse-release-19-1-1} + +### انتشار کلیک خانه 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### رفع اشکال {#bug-fixes-18} + +- ناسازگاری عقب ثابت با نسخه های قدیمی به دلیل اجرای اشتباه از `send_logs_level` تنظیمات. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ناسازگاری عقب مانده تابع جدول ثابت `remote` معرفی شده با نظرات ستون. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### رفع اشکال {#bug-fixes-19} + +- ثابت نصب بسته با گم شده /و غیره/خانه کلیک-سرور / پیکربندی.. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([پرولر](https://github.com/proller)) + +## انتشار کلیک 19.1 {#clickhouse-release-19-1-2} + +### انتشار کلیک خانه 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### رفع اشکال {#bug-fixes-20} + +- به درستی نوع مناسب را باز کنید و قفل ها را به درستی مدیریت کنید `joinGet` تابع. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([ایموس پرنده](https://github.com/amosbird)) +- خطا ثابت زمانی که سیستم سیاهههای مربوط به تلاش برای ایجاد دوباره در خاموش کردن سرور. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطا ثابت: اگر یک پایگاه داده با وجود `Dictionary` موتور, تمام لغت نامه مجبور به بارگذاری در هنگام راه اندازی سرور, و اگر یک فرهنگ لغت با منبع کلیک از منبع محلی هاست وجود دارد, فرهنگ لغت نمی تواند بار. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال زمانی که اجرای جهش های حاوی `IN` اپراتورها نتایج نادرست تولید می کردند. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([الکس زتلپین](https://github.com/ztlpn)) +- `clickhouse-client` می توانید در هنگام خروج به طور مرتب داده ها را برای پیشنهادات خط فرمان بارگیری کنید اگر در حالت تعاملی اجرا شود. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شرایط مسابقه ثابت در هنگام انتخاب از `system.tables` ممکن است به `table doesn't exist` خطا. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بن بست ثابت زمانی که `SELECT` از یک جدول با `File` موتور پس از دوباره انجام شد `No such file or directory` خطا. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت موضوع: لغت نامه خانه کلیک محلی از طریق تی سی پی لود, اما باید در روند بارگذاری. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `No message received` خطا در هنگام تعامل با راننده خیال راحت اد بی سی از طریق اتصال. همچنین رفع segfault در هنگام استفاده از MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به طور موقت بهینه سازی پیش فرض برای غیر فعال کردن `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([زمستان ژانگ](https://github.com/zhang2014)) +- حلقه بی نهایت ثابت در هنگام انتخاب از تابع جدول `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `compile_expressions` اشکال با مقایسه بزرگ (بیش از مقدمه16) تاریخ. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([الساپین](https://github.com/alesapin)) +- گسل تقسیم بندی ثابت با `uncompressed_cache=1` و استثنا با اندازه غیر فشرده اشتباه است. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([الساپین](https://github.com/alesapin)) +- ثابت `ALL JOIN` با تکراری در جدول سمت راست. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([زویکوف](https://github.com/4ertus2)) +- رفتار اشتباه ثابت هنگام انجام `INSERT ... SELECT ... FROM file(...)` پرس و جو و فایل است `CSVWithNames` یا `TSVWIthNames` فرمت و ردیف داده برای اولین بار از دست رفته است. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت توابع مجموع اعدام با `Array(LowCardinality)` بحث کردن. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- دبیان بسته بندی: درست /و غیره/کلیک-سرور/لینک پیش پردازش با توجه به پیکربندی. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([پرولر](https://github.com/proller)) +- تست ریش ریش شدن ثابت تحت تعریف نشده رفتار ضد عفونی کننده: اضافه شدن نوع پارامتر را بررسی کنید `quantile*Weighted` خانواده از توابع. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت `START REPLICATED SENDS` شروع فرمان تکرار می فرستد. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([نوارتولومی](https://github.com/nvartolomei)) +- ثابت `Not found column` برای ستون های تکراری در عضویت در بخش. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([زویکوف](https://github.com/4ertus2)) +- حالا `/etc/ssl` به عنوان دایرکتوری پیش فرض با گواهی اس اس ال استفاده می شود. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تصادف ثابت در بازنگری فرهنگ لغت اگر فرهنگ لغت در دسترس نیست. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([پرولر](https://github.com/proller)) +- اشکال ثابت با نادرست `Date` و `DateTime` مقایسه. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([حداکثر](https://github.com/valexey)) +- نتیجه نادرست ثابت زمانی که `Date` و `DateTime` استدلال ها در شاخه های اپراتور شرطی (تابع `if`). مورد عمومی برای عملکرد اضافه شده است `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +### انتشار کلیک خانه 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### ویژگی های جدید {#new-features-7} + +- سفارشی در هر کدک های فشرده سازی ستون برای جداول. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([الساپین](https://github.com/alesapin), [زمستان ژانگ](https://github.com/zhang2014), [انتولی](https://github.com/Sindbag)) +- اضافه شده کدک فشرده سازی `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([الساپین](https://github.com/alesapin)) +- اجازه دادن به `ALTER` کدک های فشرده سازی. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([الساپین](https://github.com/alesapin)) +- اضافه شدن توابع `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` برای سازگاری استاندارد گذاشتن. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([ایوان بلینکو](https://github.com/blinkov)) +- پشتیبانی از نوشتن در `HDFS` جداول و `hdfs` تابع جدول. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([الساپین](https://github.com/alesapin)) +- اضافه شدن توابع به جستجو برای رشته ثابت متعدد از انبار کاه بزرگ: `multiPosition`, `multiSearch` ,`firstMatch` همچنین با `-UTF8`, `-CaseInsensitive` و `-CaseInsensitiveUTF8` انواع. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([نام و نام خانوادگی](https://github.com/danlark1)) +- هرس کردن تکه های استفاده نشده اگر `SELECT` فیلترهای پرس و جو با کلید شاردینگ (تنظیم `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([گلب کنتروف](https://github.com/kanterov), [ایوان](https://github.com/abyss7)) +- اجازه داده شود `Kafka` موتور به چشم پوشی از برخی از تعدادی از خطاهای تجزیه در هر بلوک. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([ایوان](https://github.com/abyss7)) +- اضافه شدن پشتیبانی برای `CatBoost` multiclass مدل های ارزیابی. تابع `modelEvaluate` را برمی گرداند تاپل با پیش بینی های خام در هر کلاس برای مدل های چند طبقه. `libcatboostmodel.so` باید با ساخته شده است [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- اضافه شدن توابع `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([بوریس گرانویو](https://github.com/bgranvea)) +- اضافه شده توابع هش کردن `xxHash64` و `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([فیلیمونف](https://github.com/filimonov)) +- اضافه شده `gccMurmurHash` تابع هش (شورای همکاری خلیج فارس طعم زمزمه هش) که با استفاده از همان دانه هش به عنوان [شورای همکاری خلیج فارس](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- اضافه شده توابع هش کردن `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([شانگشویی365](https://github.com/shangshujie365)) +- تابع جدول اضافه شده است `remoteSecure`. تابع به عنوان کار می کند `remote`, اما با استفاده از اتصال امن. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([پرولر](https://github.com/proller)) + +#### ویژگی های تجربی {#experimental-features-3} + +- اضافه شده چند تقلید می پیوندد (`allow_experimental_multiple_joins_emulation` تنظیمات). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([زویکوف](https://github.com/4ertus2)) + +#### رفع اشکال {#bug-fixes-21} + +- ساخت `compiled_expression_cache_size` تنظیم به طور پیش فرض برای کاهش مصرف حافظه محدود شده است. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([الساپین](https://github.com/alesapin)) +- رفع اشکال که منجر به hangups در تاپیک های که انجام تغییر از تکرار جداول و در این موضوع است که به روز رسانی پیکربندی از باغ وحش. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([الکس زتلپین](https://github.com/ztlpn)) +- ثابت شرایط مسابقه در هنگام اجرای یک کار تغییر توزیع شده است. شرایط مسابقه منجر به بیش از یک ماکت در تلاش برای اجرای کار و همه کپی به جز یک شکست با یک خطای باغ وحش. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([الکس زتلپین](https://github.com/ztlpn)) +- رفع اشکال زمانی که `from_zk` عناصر پیکربندی شد پس از یک درخواست به باغ وحش به پایان رسیده است تجدید نیست. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([الکس زتلپین](https://github.com/ztlpn)) +- رفع اشکال با پیشوند اشتباه برای ماسک زیر شبکه ایپسویچ. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([الساپین](https://github.com/alesapin)) +- تصادف ثابت (`std::terminate`) در موارد نادر زمانی که یک موضوع جدید نمی تواند به دلیل منابع خسته ایجاد شود. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال زمانی که در `remote` اجرای تابع جدول زمانی که محدودیت اشتباه برای در مورد استفاده قرار گرفت `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([الساپین](https://github.com/alesapin)) +- رفع نشت از سوکت نتلینک. در یک استخر قرار گرفتند که هرگز حذف نشدند و سوکت های جدید در ابتدای یک موضوع جدید ایجاد شد زمانی که همه سوکت های فعلی استفاده می شد. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([الکس زتلپین](https://github.com/ztlpn)) +- رفع اشکال با بسته شدن `/proc/self/fd` دایرکتوری زودتر از همه پزشکان از خوانده شد `/proc` پس از مجبور `odbc-bridge` مانع عبور. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([الساپین](https://github.com/alesapin)) +- رشته ثابت به تبدیل یکنواخت کهنه در مورد رشته استفاده در کلید اولیه. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([زمستان ژانگ](https://github.com/zhang2014)) +- خطا ثابت در محاسبه یکنواختی تابع تبدیل عدد صحیح. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت segfault در `arrayEnumerateUniq`, `arrayEnumerateDense` توابع در مورد برخی از استدلال نامعتبر. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال بارگذاری [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([ایموس پرنده](https://github.com/amosbird)) +- پیش فرض ثابت در توابع `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطای ثابت: توابع `round`, `floor`, `trunc`, `ceil` ممکن است نتیجه جعلی بازگشت زمانی که در استدلال صحیح و مقیاس بزرگ منفی اجرا. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال ناشی از ‘kill query sync’ که منجر به روگرفت هسته. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([شناسه بسته:](https://github.com/fancyqlx)) +- رفع اشکال با تاخیر طولانی پس از صف تکرار خالی است. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([الساپین](https://github.com/alesapin)) +- استفاده از حافظه بیش از حد ثابت در صورت قرار دادن به جدول با `LowCardinality` کلید اصلی. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- ثابت `LowCardinality` ترتیب برای `Native` فرمت در مورد مجموعه های خالی. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- نتیجه نادرست ثابت در حالی که با استفاده از مجزا توسط ستون عددی کم حافظه. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- تجمع تخصصی ثابت با کلید کمکاری (در صورتی که `compile` تنظیم فعال است). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- رفع کاربر و رمز عبور حمل و نقل برای جداول تکرار نمایش داده شد. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([الساپین](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- ثابت شرایط مسابقه بسیار نادر است که می تواند رخ دهد که لیست جداول در پایگاه داده فرهنگ لغت در حالی که بارگذاری لغت نامه. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نتیجه نادرست ثابت زمانی که با رولپ یا مکعب مورد استفاده قرار گرفت. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([سام چو](https://github.com/reflection)) +- نام مستعار ستون ثابت برای پرس و جو با `JOIN ON` نحو و جداول توزیع. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([زمستان ژانگ](https://github.com/zhang2014)) +- خطای ثابت در اجرای داخلی `quantileTDigest` (که توسط آرتم Vakhrushev). این خطا اتفاق هرگز در ClickHouse و مناسب بود تنها برای کسانی که استفاده از ClickHouse کدهای به عنوان یک کتابخانه به طور مستقیم. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود {#improvements-6} + +- پشتیبانی از `IF NOT EXISTS` داخل `ALTER TABLE ADD COLUMN` بیانیه همراه با `IF EXISTS` داخل `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([بوریس گرانویو](https://github.com/bgranvea)) +- تابع `parseDateTimeBestEffort`: پشتیبانی از فرمت های `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` و مشابه. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` در حال حاضر سازه های ناهموار ممکن میکند پشتیبانی می کند. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- بهبود قابلیت استفاده: اضافه شدن چک که روند سرور از صاحب دایرکتوری داده ها شروع شده است. اجازه ندهید سرور را از ریشه شروع کنید اگر داده ها متعلق به کاربر غیر ریشه باشد. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([سرگی-وی-گالتسف](https://github.com/sergey-v-galtsev)) +- منطق بهتر از چک کردن ستون های مورد نیاز در طول تجزیه و تحلیل نمایش داده شد با می پیوندد. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([زویکوف](https://github.com/4ertus2)) +- تعداد اتصالات در صورت تعداد زیادی از جداول توزیع شده در یک سرور کاهش یافته است. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([زمستان ژانگ](https://github.com/zhang2014)) +- بالغ پشتیبانی ردیف برای `WITH TOTALS` پرس و جو برای راننده او بی سی. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([مکسیم کریتکی](https://github.com/nightweb)) +- مجاز به استفاده `Enum`به عنوان اعداد صحیح در داخل اگر تابع. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([ایوان](https://github.com/abyss7)) +- اضافه شده `low_cardinality_allow_in_native_format` تنظیمات. اگر غیر فعال, استفاده نکنید `LowCadrinality` نوع در `Native` قالب. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([کوچتونیکولای](https://github.com/KochetovNicolai)) +- حذف برخی از اجسام کار برکنار شده از عبارات وارد کش به کاهش استفاده از حافظه است. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([الساپین](https://github.com/alesapin)) +- اضافه کردن چک که `SET send_logs_level = 'value'` پرس و جو قبول ارزش مناسب. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([سابین ماکسیم](https://github.com/s-mx)) +- بررسی نوع داده ثابت در توابع تبدیل نوع. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([زمستان ژانگ](https://github.com/zhang2014)) + +#### بهبود عملکرد {#performance-improvements-5} + +- افزودن تنظیمات ادغام `use_minimalistic_part_header_in_zookeeper`. اگر فعال, جداول تکرار خواهد ابرداده بخش جمع و جور در یک قسمت صفر ذخیره. این به طور چشمگیری می تواند به کاهش اندازه عکس فوری باغ وحش (به خصوص اگر جداول یک مقدار زیادی از ستون). توجه داشته باشید که پس از فعال کردن این تنظیمات شما قادر نخواهد بود به جمع و جور کردن به نسخه ای که پشتیبانی نمی کند. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([الکس زتلپین](https://github.com/ztlpn)) +- اضافه کردن یک پیاده سازی مبتنی بر وزارت امور خارجه برای توابع `sequenceMatch` و `sequenceCount` در مورد الگوی حاوی زمان نیست. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- بهبود عملکرد برای اعداد صحیح ترتیب. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([ایموس پرنده](https://github.com/amosbird)) +- صفر پدرارهای چپ به طوری که -1 عنصر همیشه معتبر و صفر است. این برای محاسبه بدون شاخه از شیپور خاموشی استفاده می شود. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([ایموس پرنده](https://github.com/amosbird)) +- واگردانی `jemalloc` نسخه که منجر به تخریب عملکرد. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### تغییرات ناسازگار به عقب {#backward-incompatible-changes-2} + +- حذف ویژگی مستند نشده است `ALTER MODIFY PRIMARY KEY` چرا که توسط جایگزین شد `ALTER MODIFY ORDER BY` فرمان. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([الکس زتلپین](https://github.com/ztlpn)) +- تابع حذف شده `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- منع استفاده از زیرکورهای اسکالر با نتیجه نوع `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([ایوان](https://github.com/abyss7)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvements-6} + +- اضافه شدن پشتیبانی برای پاور (`ppc64le`) ساختن . [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([نام و نام خانوادگی](https://github.com/danlark1)) +- تست های عملکردی نفرت انگیز بر روی مجموعه داده های عمومی در دسترس اجرا. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطا ثابت زمانی که سرور نمی تواند با شروع `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` پیام در داکر یا سیستم-خوانده. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز شده `rdkafka` کتابخانه به 1. 0. 0-ارک5. کپکافکا مورد استفاده به جای رابط ج خام. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([ایوان](https://github.com/abyss7)) +- به روز شده `mariadb-client` کتابخونه. ثابت یکی از مشکلات پیدا شده توسط اوبسان. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- برخی از رفع برای اوبسان ایجاد. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده در هر مرتکب اجرا می شود از تست با ساخت اوبان. +- اضافه شده در هر مرتکب اجرا می شود از پوس استودیو تجزیه و تحلیل استاتیک می باشد. +- اشکالات ثابت پیدا شده توسط پوس استودیو. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت glibc مسائل مربوط به سازگاری. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- انتقال تصاویر کارگر بارانداز به 18.10 و اضافه کردن فایل سازگاری برای چرب \> = 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([الساپین](https://github.com/alesapin)) +- اضافه کردن متغیر پاکت اگر کاربر نمی خواهید به دایرکتوری پی ان در سرور تصویر کارگر بارانداز. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([الساپین](https://github.com/alesapin)) +- فعال بسیاری از هشدارهای از `-Weverything` تو کلانگ فعال شد `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده چند هشدار که تنها در صدای جرنگ جرنگ در دسترس هستند 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیوند به `libLLVM` در هنگام استفاده از پیوند مشترک به جای استفاده از لوب های فردی لورم. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([اوروج دش](https://github.com/orivej)) +- اضافه شده متغیرهای ضد عفونی کننده برای تصاویر تست. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([الساپین](https://github.com/alesapin)) +- `clickhouse-server` بسته دبیان توصیه خواهد شد `libcap2-bin` بسته برای استفاده `setcap` ابزار برای تنظیم قابلیت های. این اختیاری است. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- زمان تدوین بهبود, ثابت شامل. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([پرولر](https://github.com/proller)) +- تست عملکرد اضافه شده برای توابع هش. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([فیلیمونف](https://github.com/filimonov)) +- وابستگی کتابخانه چرخهای ثابت. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([پرولر](https://github.com/proller)) +- تلفیقی بهبود یافته با حافظه کم در دسترس است. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([پرولر](https://github.com/proller)) +- اضافه شدن اسکریپت تست به تکثیر تخریب عملکرد در `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت misspells در نظرات و string literals تحت `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([مامان](https://github.com/maiha)) +- غلط املایی ثابت در نظرات. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([اوگنی پراودا](https://github.com/kvinty)) + +## [تغییرات برای 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/fa/whats_new/changelog/index.md b/docs/fa/whats_new/changelog/index.md new file mode 100644 index 00000000000..c7eb257e5a9 --- /dev/null +++ b/docs/fa/whats_new/changelog/index.md @@ -0,0 +1,668 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' +--- + +## انتشار کلیک و 20.3 {#clickhouse-release-v20-3} + +### ClickHouse انتشار v20.3.4.10, 2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### رفع اشکال {#bug-fix} + +- این نسخه همچنین شامل تمام رفع اشکال از 20.1.8.41 +- رفع از دست رفته `rows_before_limit_at_least` برای نمایش داده شد بیش از قام (با پردازنده خط لوله). این رفع [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +### انتشار کلیک و 20.3.3.6, 2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### رفع اشکال {#bug-fix-1} + +- این نسخه همچنین شامل تمام رفع اشکال از 20.1.7.38 +- رفع اشکال در تکرار می کند که تکرار اجازه نمی دهد به کار اگر کاربر جهش در نسخه های قبلی اجرا کرده است. این رفع [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([الساپین](https://github.com/alesapin)). این باعث می شود نسخه 20.3 به عقب سازگار دوباره. +- افزودن تنظیمات `use_compact_format_in_distributed_parts_names` که اجازه می دهد برای نوشتن فایل برای `INSERT` نمایش داده شد به `Distributed` جدول با فرمت جمع و جور تر. این رفع [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([الساپین](https://github.com/alesapin)). این باعث می شود نسخه 20.3 به عقب سازگار دوباره. + +### انتشار کلیک و 20.3.2.1, 2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### تغییر ناسازگار به عقب {#backward-incompatible-change} + +- ثابت موضوع `file name too long` هنگام ارسال داده ها برای `Distributed` جداول برای تعداد زیادی از کپی. ثابت این موضوع که اعتبار ماکت در ورود به سیستم سرور قرار گرفتند. فرمت نام دایرکتوری بر روی دیسک به تغییر یافت `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([میخیل کوروتف](https://github.com/millb)) پس از شما به نسخه جدید ارتقا دهید, شما نمی قادر خواهد بود به جمع و جور کردن بدون دخالت دستی, به دلیل نسخه سرور قدیمی می کند فرمت دایرکتوری جدید به رسمیت نمی شناسد. اگر شما می خواهید به جمع و جور کردن, شما باید به صورت دستی تغییر نام دایرکتوری مربوطه را به فرمت های قدیمی. این تغییر مربوط است تنها اگر شما ناهمزمان استفاده کرده اند `INSERT`اس به `Distributed` میز در نسخه 20.3.3 ما یک محیط است که به شما اجازه فعال کردن فرمت جدید به تدریج معرفی. +- فرمت ورودی ورود به سیستم تکرار برای دستورات جهش تغییر کرده است. شما باید صبر کنید برای جهش های قدیمی برای پردازش قبل از نصب نسخه جدید. +- پیاده سازی پیشفیلتر حافظه ساده است که افسردگی زخم به `system.trace_log` هر بایت بر سر حد تخصیص نرم [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([ایوان](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([الکسی میلویدو](https://github.com/alexey-milovidov)) ستون `system.trace_log` از تغییر نام داد `timer_type` به `trace_type`. این تغییرات در تجزیه و تحلیل عملکرد شخص ثالث و ابزارهای پردازش فلامگراف نیاز دارد. +- استفاده از شناسه موضوع سیستم عامل در همه جا به جای شماره موضوع داخلی. این رفع [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) قدیمی `clickhouse-client` می توانید سیاهههای مربوط که از سرور ارسال زمانی که تنظیم دریافت نمی `send_logs_level` فعال است, چرا که نام و نوع پیام ورود به سیستم ساختار تغییر یافت. از سوی دیگر نسخه های سرور مختلف سیاهههای مربوط با انواع مختلف به یکدیگر ارسال کنید. هنگامی که شما از `send_logs_level` محیط, شما باید مهم نیست. [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف `indexHint` تابع [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف `findClusterIndex`, `findClusterValue` توابع. این رفع [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). اگر شما با استفاده از این توابع, ارسال یک ایمیل به `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر مجاز به ایجاد ستون و یا اضافه کردن ستون با `SELECT` زیرخاکری به عنوان عبارت پیش فرض. [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([الساپین](https://github.com/alesapin)) +- نیاز نام مستعار برای کارخانه های فرعی در عضویت. [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([زویکوف](https://github.com/4ertus2)) +- بهبود `ALTER MODIFY/ADD` نمایش داده شد منطق. حالا شما نمی توانید `ADD` ستون بدون نوع, `MODIFY` عبارت پیش فرض نوع ستون را تغییر نمی دهد و `MODIFY` نوع ارزش بیان پیش فرض را از دست نمی دهد. رفع [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([الساپین](https://github.com/alesapin)) +- نیاز به سرور برای راه اندازی مجدد برای اعمال تغییرات در پیکربندی ورود به سیستم. این یک راه حل موقت برای جلوگیری از اشکال که سرور سیاهههای مربوط به یک فایل ورود به سیستم حذف شده است (دیدن [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- تنظیمات `experimental_use_processors` به طور پیش فرض فعال است. این تنظیم را قادر می سازد استفاده از خط لوله پرس و جو جدید. این فاکتورگیری مجدد داخلی است و ما انتظار داریم هیچ تغییری قابل مشاهده. اگر مشکلی خواهید دید صفر را تنظیم کنید. [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ویژگی جدید {#new-feature} + +- افزودن `Avro` و `AvroConfluent` فرمت های ورودی/خروجی [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([اندرو انیشچوک](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([اندرو انیشچوک](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز رسانی چند رشته ای و غیر مسدود کردن کلید منقضی شده در `cache` واژهنامهها (با اجازه اختیاری به خواندن قدیمی). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- افزودن پرسوجو `ALTER ... MATERIALIZE TTL`. این اجرا می شود جهش است که نیروهای به حذف داده های منقضی شده توسط کنترل از راه دور و دوباره حساب متا اطلاعات در مورد کنترل از راه دور در تمام نقاط. [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([انتون پوپوف](https://github.com/CurtizJ)) +- سوئیچ از hashjoin به mergejoin (بر روی دیسک) در صورت نیاز [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده `MOVE PARTITION` فرمان برای `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- بارگذاری مجدد پیکربندی ذخیره سازی از فایل پیکربندی در پرواز. [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- مجاز به تغییر `storage_policy` به یکی کمتر غنی. [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اضافه شدن پشتیبانی برای کره / نویسه عام برای ذخیره سازی اس3 و عملکرد جدول. [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- پیاده سازی `bitAnd`, `bitOr`, `bitXor`, `bitNot` برای `FixedString(N)` نوع داده. [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- اضافه شدن تابع `bitCount`. این رفع [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([الکسی میلویدو](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([درباره ما](https://github.com/ikopylov)) +- افزودن `generateRandom` تابع جدول برای تولید ردیف تصادفی با طرح داده شده است. اجازه می دهد تا به جمعیت جدول تست دلخواه با داده ها. [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([ایلیا یاتسیشین](https://github.com/qoega)) +- `JSONEachRowFormat`: پشتیبانی از موارد خاص زمانی که اشیا محصور شده در مجموعه سطح بالا. [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([کروگلو پاول](https://github.com/Avogar)) +- در حال حاضر امکان ایجاد یک ستون با `DEFAULT` عبارت که به یک ستون با پیش فرض بستگی دارد `ALIAS` اصطلاح. [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([الساپین](https://github.com/alesapin)) +- اجازه مشخص کردن `--limit` بیش از اندازه منبع داده در `clickhouse-obfuscator`. داده ها خود را با دانه های مختلف تصادفی تکرار. [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده `groupArraySample` تابع (شبیه به `groupArray`) با الگوریتم نمونه برداری ذخیره. [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([ایموس پرنده](https://github.com/amosbird)) +- حالا شما می توانید اندازه صف به روز رسانی در نظارت `cache`/`complex_key_cache` واژهنامهها از طریق معیارهای سیستم. [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- اجازه دهید به عنوان یک جداساز خط در فرمت خروجی سی. سی. وی با تنظیم استفاده کنید `output_format_csv_crlf_end_of_line` به 1 تنظیم شده است [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([میخیل کوروتف](https://github.com/millb)) +- اجرای توابع بیشتری از [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` و `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([نیکو مان پودری](https://github.com/nmandery)) +- تنظیمات جدید معرفی شده است: `max_parser_depth` برای کنترل حداکثر اندازه پشته و اجازه می دهد نمایش داده شد پیچیده بزرگ است. این رفع [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) و [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([ماکسیم اسمیرنوف](https://github.com/qMBQx8GH)) +- افزودن یک تنظیم `force_optimize_skip_unused_shards` تنظیم به پرتاب اگر پرش از خرده ریز استفاده نشده امکان پذیر نیست [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([ازات خوژین](https://github.com/azat)) +- مجاز به پیکربندی چندین دیسک / حجم برای ذخیره سازی داده ها برای ارسال در `Distributed` موتور [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([ازات خوژین](https://github.com/azat)) +- سیاست ذخیره سازی پشتیبانی (``)برای ذخیره سازی داده های موقت . [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([ازات خوژین](https://github.com/azat)) +- اضافه شده `X-ClickHouse-Exception-Code` هدر قام است که اگر استثنا قبل از ارسال داده ها پرتاب شد. این پیاده سازی [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([میخیل کوروتف](https://github.com/millb)) +- اضافه شدن تابع `ifNotFinite`. این فقط یک شکر نحوی است: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده `last_successful_update_time` ستون در `system.dictionaries` جدول [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- افزودن `blockSerializedSize` تابع (اندازه بر روی دیسک بدون فشرده سازی) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([ازات خوژین](https://github.com/azat)) +- افزودن تابع `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([هکز](https://github.com/hczhcz)) +- جداول سیستم اضافه شده است `system.zeros` و `system.zeros_mt` و همچنین توابع داستان `zeros()` و `zeros_mt()`. جداول (و توابع جدول) شامل یک ستون با نام `zero` و نوع `UInt8`. این ستون حاوی صفر. این است که برای اهداف تست به عنوان سریع ترین روش برای تولید بسیاری از ردیف مورد نیاز است. این رفع [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) + +#### ویژگی تجربی {#experimental-feature} + +- اضافه کردن فرمت جمع و جور جدید از قطعات در `MergeTree`- جداول خانواده که در تمام ستون ها در یک فایل ذخیره می شود . این کمک می کند برای افزایش عملکرد درج کوچک و مکرر. فرمت قدیمی (یک فایل در هر ستون) در حال حاضر گسترده ای نامیده می شود. فرمت ذخیره سازی داده ها توسط تنظیمات کنترل می شود `min_bytes_for_wide_part` و `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([انتون پوپوف](https://github.com/CurtizJ)) +- پشتیبانی از ذخیره سازی اس 3 برای `Log`, `TinyLog` و `StripeLog` میز [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([پاول کووالنکو](https://github.com/Jokser)) + +#### رفع اشکال {#bug-fix-2} + +- فضاهای خالی متناقض ثابت در پیام های ورود به سیستم. [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال که در مجموعه ای از تاپل نامش ذکر نشده به عنوان ساختارهای تو در تو در ایجاد جدول پهن شد. [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([اچولکوف2](https://github.com/achulkov2)) +- ثابت موضوع زمانی که “Too many open files” خطا ممکن است رخ دهد اگر بیش از حد بسیاری از فایل های تطبیق الگوی لکه در وجود دارد `File` جدول یا `file` تابع جدول. در حال حاضر فایل ها باز تنبلی. این رفع [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- قطره جدول موقت در حال حاضر قطره تنها جدول موقت. [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([ویتالی بارانو](https://github.com/vitlibar)) +- حذف پارتیشن منسوخ شده زمانی که ما خاموش کردن سرور و یا جدا / ضمیمه یک جدول. [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- برای چگونه دیسک پیش فرض محاسبه فضای رایگان از `data` شاخه فرعی. ثابت موضوع زمانی که مقدار فضای رایگان به درستی محاسبه نمی شود اگر `data` دایرکتوری به یک دستگاه جداگانه (مورد نادر) نصب شده است. این رفع [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([میخیل کوروتف](https://github.com/millb)) +- اجازه کاما (صلیب) عضویت با در () داخل. [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([زویکوف](https://github.com/4ertus2)) +- اجازه می دهد به بازنویسی صلیب به درونی ملحق اگر وجود دارد \[نه\] مانند اپراتور در جایی که بخش. [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([زویکوف](https://github.com/4ertus2)) +- رفع نتیجه نادرست ممکن است پس از `GROUP BY` با تنظیم فعال `distributed_aggregation_memory_efficient`. رفع [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- کلید پیدا شده است به عنوان در معیارهای لغت نامه کش از دست رفته شمارش شد. [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع ناسازگاری پروتکل تکرار معرفی شده در [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([الساپین](https://github.com/alesapin)) +- شرایط مسابقه ثابت در `queue_task_handle` در هنگام راه اندازی `ReplicatedMergeTree` میز [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رمز `NOT` جواب نداد `SHOW TABLES NOT LIKE` پرسوجو [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن محدوده چک به تابع `h3EdgeLengthM`. بدون این چک, سرریز بافر امکان پذیر است. [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال در محاسبات باریکش از عملیات منطقی سه تایی در استدلال های متعدد (بیش از 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([الکساندر کازاکوف](https://github.com/Akazz)) +- رفع خطای prewhere بهینه سازی است که می تواند منجر به segfaults یا `Inconsistent number of columns got from MergeTreeRangeReader` استثنا. [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع غیر منتظره `Timeout exceeded while reading from socket` استثنا, که به طور تصادفی در اتصال امن اتفاق می افتد قبل از ایست در واقع بیش از حد و هنگامی که پیشفیلتر پرس و جو فعال است. همچنین اضافه کنید `connect_timeout_with_failover_secure_ms` تنظیمات (به طور پیش فرض 100 مگابایت), که شبیه به است `connect_timeout_with_failover_ms`, اما برای اتصالات امن استفاده می شود (به دلیل دست دادن اس اس ال کندتر است, از اتصال تی پی معمولی) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع اشکال با جهش نهایی, زمانی که جهش ممکن است در حالت قطع با `parts_to_do=0` و `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([الساپین](https://github.com/alesapin)) +- استفاده از جدید هر پیوستن به منطق با `partial_merge_join` تنظیمات. این ممکن است به `ANY|ALL|SEMI LEFT` و `ALL INNER` می پیوندد با `partial_merge_join=1` حالا [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([زویکوف](https://github.com/4ertus2)) +- سفال در حال حاضر بست تنظیمات کردم از مبتکر به مواضع سفال به جای پرتاب یک استثنا. این ثابت اجازه می دهد تا برای ارسال نمایش داده شد به یک سفال با محدودیت های دیگر. [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([ویتالی بارانو](https://github.com/vitlibar)) +- مشکل مدیریت حافظه ثابت در `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ثابت `toDecimal*OrNull()` توابع خانواده که با رشته به نام `e`. رفع [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([زویکوف](https://github.com/4ertus2)) +- اطمینان حاصل کنید که `FORMAT Null` هیچ اطلاعاتی را به مشتری ارسال نمی کند. [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- رفع اشکال که برچسب زمان در `LiveViewBlockInputStream` به روز نمی. `LIVE VIEW` یکی از ویژگی های تجربی است. [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([وکسیدر](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([وکسیدر](https://github.com/Vxider)) +- ثابت `ALTER MODIFY TTL` رفتار اشتباه است که اجازه نمی دهد به حذف عبارات قدیمی. [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- گزارش ثابت اوبسان در ادغام. این رفع [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفتار را ثابت کرد `match` و `extract` توابع زمانی که کومه علف خشک است صفر بایت. رفتار اشتباه بود که کومه علف خشک ثابت بود. این رفع [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([الکسی میلویدو](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از پرتاب از تخریب کننده در کتابخانه ' 3-حزب خود نمایی. [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([اندرو انیشچوک](https://github.com/oandrew)) +- هنوز یک دسته نظرسنجی از مرتکب نشده `Kafka` تا حدی که می تواند به سوراخ در داده ها منجر شود. [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([فیلیمونف](https://github.com/filimonov)) +- ثابت `joinGet` با انواع بازگشت باطل. https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([ایموس پرنده](https://github.com/amosbird)) +- رفع ناسازگاری داده ها در هنگام فشرده با `T64` وابسته به کدک. [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([زویکوف](https://github.com/4ertus2)) رفع شناسه نوع داده در `T64` کدک فشرده سازی است که منجر به اشتباه (د) فشرده سازی در نسخه های تحت تاثیر قرار. [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([زویکوف](https://github.com/4ertus2)) +- افزودن تنظیمات `enable_early_constant_folding` و در برخی موارد که منجر به خطا غیر فعال کنید. [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([زویکوف](https://github.com/4ertus2)) +- رفع pushdown گزاره optimizer با مشاهده و فعال کردن آزمون [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([زمستان ژانگ](https://github.com/zhang2014)) +- رفع segfault در `Merge` جداول, که می تواند رخ دهد در هنگام خواندن از `File` ذخیره سازی [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([تاولوبیکس](https://github.com/tavplubix)) +- اضافه شدن یک چک برای سیاست ذخیره سازی در `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. در غیر این صورت می تواند داده ها از بخش غیر قابل دسترس پس از راه اندازی مجدد و جلوگیری از کلیک برای شروع. [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اصلاح تغییر می دهد اگر برای جدول تنظیم شده است. [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع شرایط مسابقه است که می تواند رخ دهد که `SYSTEM RELOAD ALL DICTIONARIES` اعدام در حالی که برخی از فرهنگ لغت است که اصلاح/اضافه/حذف شده است. [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([ویتالی بارانو](https://github.com/vitlibar)) +- در نسخه های قبلی `Memory` موتور پایگاه داده استفاده از مسیر داده خالی, بنابراین جداول در ایجاد `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([تاولوبیکس](https://github.com/tavplubix)) +- ثابت پیام ورود به سیستم اشتباه در مورد از دست رفته دیسک به طور پیش فرض و یا سیاست. [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ثابت نیست (است ()) برای شاخص نفخ\_فیلتر از انواع مجموعه. [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([ایشیمب](https://github.com/achimbab)) +- اجازه دادن ستون اول (ها) در یک جدول با `Log` موتور یک نام مستعار است [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([ایوان](https://github.com/abyss7)) +- رفع منظور از محدوده در حالی که خواندن از `MergeTree` جدول در یک موضوع. این می تواند به استثنا از منجر `MergeTreeRangeReader` یا نتایج پرس و جو اشتباه است. [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([انتون پوپوف](https://github.com/CurtizJ)) +- ساخت `reinterpretAsFixedString` برای بازگشت `FixedString` به جای `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([اندرو انیشچوک](https://github.com/oandrew)) +- اجتناب از موارد بسیار نادر زمانی که کاربر می تواند پیغام خطا اشتباه دریافت کنید (`Success` به جای شرح خطا دقیق). [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هنگام استفاده از تصادف نکنید `Template` قالب با قالب ردیف خالی. [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- فایل های ابرداده برای جداول سیستم را می توان در جای اشتباه ایجاد شده است [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([تاولوبیکس](https://github.com/tavplubix)) رفع [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- رفع مسابقه داده ها به استثنای در فرهنگ لغت کش [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- یک استثنا برای پرس و جو پرتاب نکنید `ATTACH TABLE IF NOT EXISTS`. پیش از این پرتاب شد اگر جدول در حال حاضر وجود دارد, با وجود `IF NOT EXISTS` بند بند. [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([انتون پوپوف](https://github.com/CurtizJ)) +- ثابت بسته شدن از دست رفته پین در پیام استثنا. [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از پیام `Possible deadlock avoided` در راه اندازی تاتر مشتری در حالت تعاملی. [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت موضوع زمانی که بالشتک در پایان پایگاه64 ارزش کد گذاری را می توان ناقص. به روز رسانی پایگاه64 کتابخانه. این رفع [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491) بسته [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- جلوگیری از از دست دادن داده ها در `Kafka` در موارد نادر زمانی که استثنا اتفاق می افتد پس از خواندن پسوند اما قبل از ارتکاب. رفع [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([فیلیمونف](https://github.com/filimonov)) +- استثنا ثابت در `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([نیکیتا واسیلیف](https://github.com/nikvas0)) +- رفع سقوط زمانی که یک کاربر تلاش می کند `ALTER MODIFY SETTING` برای قدیمی شکل گرفته `MergeTree` موتورهای جدول خانواده. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([الساپین](https://github.com/alesapin)) +- پشتیبانی از uint64 اعداد است که مناسب نیست در int64 در json-توابع مربوط. به روز رسانی سیمدجسون به استاد. این رفع [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت اعدام inversed predicates که غیر به شدت monotinic عملکردی شاخص استفاده شده است. [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([الکساندر کازاکوف](https://github.com/Akazz)) +- سعی نکنید به برابر `IN` ثابت در `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([ایموس پرنده](https://github.com/amosbird)) +- رفع اشکال در `ALTER DELETE` جهش که منجر به شاخص فساد. این رفع [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) و [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). علاوه بر این رفع شرایط مسابقه بسیار نادر در `ReplicatedMergeTree` `ALTER` نمایش داده شد. [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([الساپین](https://github.com/alesapin)) +- هنگامی که تنظیمات `compile_expressions` فعال است, شما می توانید `unexpected column` داخل `LLVMExecutableFunction` هنگامی که ما با استفاده از `Nullable` نوع [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- رفع چندگانه برای `Kafka` موتور: 1) ثابت تکراری که در طول توازن گروه مصرف کننده ظاهر شد. 2) رفع نادر ‘holes’ به نظر می رسد زمانی که داده ها از چندین پارتیشن با یک نظرسنجی نظرسنجی نظرسنجی نظرسنجی شد و تا حدی متعهد (در حال حاضر ما همیشه پردازش / مرتکب کل نظرسنجی بلوک از پیام). 3) رفع حملات گرگرفتگی با اندازه بلوک (قبل از که تنها با فاصله گرگرفتگی به درستی کار می کرد). 4) روش اشتراک بهتر (با بازخورد انتساب). 5) را تست کار سریع تر (با فواصل پیش فرض و وقفه). با توجه به این واقعیت است که داده ها توسط اندازه بلوک قبل از سرخ نیست (همانطور که باید با توجه به اسناد و مدارک), که روابط عمومی ممکن است به برخی از تخریب عملکرد با تنظیمات پیش فرض منجر شود (با توجه به بیشتر & حملات گرگرفتگی قلع که کمتر بهینه هستند). اگر موضوع عملکرد شما روبرو می شوند که پس از تغییر - لطفا افزایش می دهد `kafka_max_block_size` در جدول به ارزش بزرگتر ( به عنوان مثال `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). رفع [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([فیلیمونف](https://github.com/filimonov)) +- ثابت `Parameter out of bound` استثنا در برخی از نمایش داده شد پس از بهینه سازی در همه جا. [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- ثابت مورد مخلوط ثابت استدلال از تابع `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هنگام اجرای `CREATE` پرس و جو, برابر عبارات ثابت در استدلال موتور ذخیره سازی. جایگزین کردن نام دادگان خالی با دادگان فعلی. رفع [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([تاولوبیکس](https://github.com/tavplubix)) +- در حال حاضر امکان ایجاد یا اضافه کردن ستون ها با نام مستعار چرخه ای ساده مانند وجود ندارد `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([الساپین](https://github.com/alesapin)) +- رفع اشکال با حرکت دو که ممکن است بخش اصلی فاسد است. این مربوط است اگر شما استفاده کنید `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اجازه داده شود `interval` شناسه به درستی تجزیه و تحلیل بدون پشت. موضوع ثابت زمانی که پرس و جو نمی تواند اجرا شود حتی اگر `interval` شناسه در پشت و یا به نقل از دو محصور. این رفع [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تست ریش ریش شدن ثابت و رفتار نادرست از `bitTestAll`/`bitTestAny` توابع. [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع سقوط احتمالی / تعداد اشتباه ردیف در `LIMIT n WITH TIES` هنگامی که بسیاری از ردیف به نفر ردیف برابر وجود دارد. [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع جهش با قطعات نوشته شده با فعال `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([الساپین](https://github.com/alesapin)) +- رفع مسابقه داده ها در نابودی `Poco::HTTPServer`. این می تواند رخ دهد زمانی که سرور شروع شده است و بلافاصله تعطیل. [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال که در هنگام اجرای پیغام خطای گمراه کننده نشان داده شده است `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([اچولکوف2](https://github.com/achulkov2)) +- ثابت `Parameters are out of bound` استثنا در برخی موارد نادر زمانی که ما یک ثابت در `SELECT` بند زمانی که ما یک `ORDER BY` و یک `LIMIT` بند بند. [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- رفع جهش نهایی, زمانی که در حال حاضر انجام جهش می تواند وضعیت را داشته باشد `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([الساپین](https://github.com/alesapin)) +- جلوگیری از اجرای `ALTER ADD INDEX` برای جداول ادغام با نحو قدیمی, چرا که کار نمی کند. [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([میخیل کوروتف](https://github.com/millb)) +- در هنگام راه اندازی سرور جدول دسترسی پیدا کنید, که `LIVE VIEW` بستگی دارد, بنابراین سرور قادر به شروع خواهد بود. همچنین حذف `LIVE VIEW` وابستگی زمانی که جدا `LIVE VIEW`. `LIVE VIEW` یکی از ویژگی های تجربی است. [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع اشکال احتمالی در `MergeTreeRangeReader`, در حالی که اجرای `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع چک عدم تطابق ممکن است با ستون. [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع اشکال زمانی که قطعات شد که در پس زمینه با قوانین تغییر زمان در مورد نقل مکان کرد زمانی که تنها یک حجم وجود دارد. [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ثابت موضوع `Method createColumn() is not implemented for data type Set`. این رفع [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر ما سعی خواهد کرد نهایی جهش بیشتر. [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([الساپین](https://github.com/alesapin)) +- ثابت `intDiv` توسط منهای یک ثابت [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([هکز](https://github.com/hczhcz)) +- رفع شرایط مسابقه ممکن است در `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع اشکال منجر به ختم سرور در هنگام تلاش برای استفاده / رها کردن `Kafka` جدول ایجاد شده با اشتباه پارامترهای. [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([فیلیمونف](https://github.com/filimonov)) +- اضافه شده راه حل اگر سیستم عامل نتیجه اشتباه را برمی گرداند `timer_create` تابع. [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خطای ثابت در استفاده از `min_marks_for_seek` پارامتر. ثابت پیام خطا زمانی که هیچ کلید شارژ در جدول توزیع وجود دارد و ما سعی می کنیم به جست و خیز خرده ریز استفاده نشده. [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([ازات خوژین](https://github.com/azat)) + +#### بهبود {#improvement} + +- پیاده سازی `ALTER MODIFY/DROP` نمایش داده شد در بالای جهش برای `ReplicatedMergeTree*` خانواده موتور. حالا `ALTERS` بلوک تنها در مرحله به روز رسانی ابرداده, و بعد از که مسدود نمی. [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([الساپین](https://github.com/alesapin)) +- اضافه کردن توانایی بازنویسی صلیب به درونی می پیوندد با `WHERE` بخش حاوی نام بدون تغییر. [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([زویکوف](https://github.com/4ertus2)) +- ساخت `SHOW TABLES` و `SHOW DATABASES` نمایش داده شد حمایت از `WHERE` عبارات و `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([بستنی و مغز گردو](https://github.com/sundy-li)) +- اضافه شدن یک تنظیم `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([اوریخی](https://github.com/urykhy)) +- پس از تغییرات اخیر مشتری خروجی زیر شروع به چاپ رشته های باینری در سحر و جادو در نتیجه ساخت قابل خواندن نیست ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). راه حل در محل کلیک است به علامت ستون رشته به عنوان سخن گفتن-8, که همیشه نمی, اما معمولا مورد. [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([یوری بارانوف](https://github.com/yurriy)) +- اضافه کردن پشتیبانی از رشته و کلید رشته برای `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- کلید های رشته پشتیبانی در نقشه های خلاصه [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- سیگنال ختم موضوع به استخر موضوع حتی اگر موضوع استثنا پرتاب کرده است [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([هشدار داده می شود](https://github.com/dingxiangfei2009)) +- مجاز به تنظیم `query_id` داخل `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([انتون پوپوف](https://github.com/CurtizJ)) +- اجازه نمی دهد عبارات عجیب و غریب در `ALTER TABLE ... PARTITION partition` پرس و جو. این آدرس [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- جدول `system.table_engines` در حال حاضر اطلاعات در مورد پشتیبانی از ویژگی فراهم می کند (مانند `supports_ttl` یا `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([مکس اخمدوف](https://github.com/zlobober)) +- فعالسازی `system.metric_log` به طور پیش فرض. آن را شامل ردیف های با ارزش از ProfileEvents, CurrentMetrics جمع آوری شده با “collect\_interval\_milliseconds” فاصله (یک ثانیه به طور پیش فرض). جدول بسیار کوچک است (معمولا به ترتیب مگابایت) و جمع اوری این داده ها به طور پیش فرض معقول است. [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([ایوان](https://github.com/abyss7)) +- در حال حاضر موقت `LIVE VIEW` ایجاد شده توسط `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` به جای `CREATE TEMPORARY LIVE VIEW ...`, چرا که نحو قبلی بود سازگار با نیست `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([تاولوبیکس](https://github.com/tavplubix)) +- اضافه کردن \_خروج.پارامتر پیکربندی سطح برای محدود کردن ورودی که می رود به `system.text_log` جدول [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([ازات خوژین](https://github.com/azat)) +- اجازه می دهد برای قرار دادن بخش دانلود شده به دیسک/حجم با توجه به قوانین تی ال [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- برای لغت نامه خروجی زیر خارجی, اجازه می دهد برای تغییر دادن خروجی زیر استخر اتصال به “share” در میان واژهنامهها. این گزینه به طور قابل توجهی تعداد اتصالات به سرور خروجی زیر را کاهش می دهد. [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- نمایش نزدیکترین زمان اجرای پرس و جو برای کوانتوم در `clickhouse-benchmark` خروجی به جای مقادیر درونیابی. بهتر است برای نشان دادن ارزش هایی که مربوط به زمان اجرای برخی از نمایش داده شد. [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- امکان اضافه کردن کلید و زمان بندی برای پیام هنگام قرار دادن داده ها به کافکا. رفع [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([فیلیمونف](https://github.com/filimonov)) +- اگر سرور از ترمینال اجرا, تعداد موضوع برجسته, شناسه پرس و جو و اولویت ورود به سیستم با رنگ. این است که برای خوانایی بهبود یافته از پیام ورود به سیستم در ارتباط برای توسعه دهندگان. [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیام استثنا بهتر در حالی که بارگذاری جداول برای `Ordinary` پایگاه داده است. [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیاده سازی `arraySlice` برای ارریس با عملکرد کل ایالات. این رفع [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجازه می دهد توابع ثابت و مجموعه های ثابت در سمت راست در اپراتور استفاده می شود. [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([انتون پوپوف](https://github.com/CurtizJ)) +- اگر استثنا باغ وحش اتفاق افتاده است در حالی که واکشی داده ها برای سیستم.تکرار, نمایش در یک ستون جداگانه. این پیاده سازی [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بطور عام حذف قطعات داده ادغام در نابود کردن. [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- پشتیبانی امنیت سطح ردیف برای جداول توزیع شده است. [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([ایوان](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([میخیل کوروتف](https://github.com/millb)) +- جلوگیری از حافظه در حالی که ساخت نتیجه یک بزرگ بپیوندید. [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده نام خوشه به پیشنهادات در حالت تعاملی در `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([ایوان](https://github.com/abyss7)) +- ستون اضافه شده `exception_code` داخل `system.query_log` جدول [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([میخیل کوروتف](https://github.com/millb)) +- فعال خروجی زیر سرور سازگاری در بندر `9004` در فایل پیکربندی سرور به طور پیش فرض. دستور تولید رمز عبور ثابت در مثال در پیکربندی. [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([یوری بارانوف](https://github.com/yurriy)) +- جلوگیری از سقط جنین در خاموش کردن اگر سیستم فایل به صورت خوانده است. این رفع [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پیام استثنا بهتر زمانی که طول در قام پست پرس و جو مورد نیاز است. [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- افزودن `_path` و `_file` ستون مجازی به `HDFS` و `File` موتور و `hdfs` و `file` توابع جدول [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- رفع خطا `Cannot find column` در حالی که قرار دادن به `MATERIALIZED VIEW` در صورتی که اگر ستون جدید برای مشاهده جدول داخلی اضافه شد. [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع پیشرفت بیش از پروتکل کلاینت سرور بومی, با ارسال پیشرفت پس از به روز رسانی نهایی (مانند سیاهههای مربوط). این ممکن است تنها مربوط به برخی از ابزار های شخص ثالث که با استفاده از پروتکل های بومی. [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([ازات خوژین](https://github.com/azat)) +- اضافه کردن یک سیستم متریک ردیابی تعداد اتصالات مشتری با استفاده از پروتکل خروجی زیر ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([یوجین کلیموف](https://github.com/Slach)) +- از این به بعد پاسخ های اچ تی پی `X-ClickHouse-Timezone` هدر را به مقدار منطقه زمانی همان است که `SELECT timezone()` دوست گزارش. [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([دنیس گلازاشف](https://github.com/traceon)) + +#### بهبود عملکرد {#performance-improvement} + +- بهبود عملکرد شاخص تجزیه و تحلیل با در [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([انتون پوپوف](https://github.com/CurtizJ)) +- کد ساده تر و موثر تر در توابع منطقی + پاکسازی کد. پیگیری به [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([الکساندر کازاکوف](https://github.com/Akazz)) +- بهبود عملکرد کلی (در محدوده 5%..200% برای نمایش داده شد تحت تاثیر قرار) با تضمین و حتی بایاس سخت تر با ج++20 ویژگی های. [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([ایموس پرنده](https://github.com/amosbird)) +- مقایسه دقیق تر برای حلقه های داخلی توابع مقایسه. [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بایاس سخت تر برای حلقه داخلی از توابع ریاضی. [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- A ~3 بار سریع تر برای اجرای ColumnVector::تکرار () از طریق آن ColumnConst::convertToFullColumn() اجرا شده است. همچنین در تست مفید خواهد بود که تحقق ثابت. [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([الکساندر کازاکوف](https://github.com/Akazz)) +- یکی دیگر از بهبود عملکرد کوچک به `ColumnVector::replicate()` (این سرعت `materialize` عملکرد و توابع سفارش بالاتر) بهبود و حتی بیشتر به [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([الکساندر کازاکوف](https://github.com/Akazz)) +- بهبود عملکرد `stochasticLinearRegression` تابع جمع. این پچ توسط اینتل کمک. [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهبود عملکرد `reinterpretAsFixedString` تابع. [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بلوک ها را به مشتری ارسال نکنید `Null` فرمت در پردازنده خط لوله. [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([الکساندر کوزمنکوف](https://github.com/akuzm)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement} + +- سیستم های انتقال مواد استثنا در حال حاضر به درستی کار می کند در زیر سیستم ویندوز برای لینوکس. ببینید https://github.com/clickhouse-extras/libunwind/pull/3 این رفع [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([سوبولسو](https://github.com/sobolevsv)) +- جایگزینی `readline` با `replxx` برای ویرایش خط تعاملی در `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([ایوان](https://github.com/abyss7)) +- بهتر است زمان ساخت و کمتر در قالب instantiations در functionscomparison. [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ادغام اضافه شده با `clang-tidy` در سی. همچنین نگاه کنید به [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر ما لینک کلیک در سی با استفاده از `lld` حتی برای `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([الساپین](https://github.com/alesapin)) +- اجازه می دهد به صورت تصادفی برنامه ریزی موضوع و درج اشکالات زمانی که `THREAD_FUZZER_*` متغیرهای محیطی تنظیم شده است. این کمک می کند تا تست. [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- فعال کردن سوکت امن در تست بدون تابعیت [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([تاولوبیکس](https://github.com/tavplubix)) +- را split\_shared\_libraries=خاموش قوی تر [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([ازات خوژین](https://github.com/azat)) +- ساخت “performance\_introspection\_and\_logging” تست قابل اعتماد به سرور تصادفی گیر کرده است. این ممکن است در محیط زیست سی اتفاق می افتد. همچنین نگاه کنید به [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اعتبار میلی لیتر در چک سبک. [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شرایط مسابقه ثابت در تست `00738_lock_for_inner_table`. این تست به خواب متکی بود. [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تست عملکرد نوع را حذف کنید `once`. این مورد نیاز است برای اجرای تمام تست های عملکرد در حالت مقایسه استاتیک (قابل اعتماد تر). [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده تست عملکرد برای توابع ریاضی. [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تست عملکرد اضافه شده برای `sumMap` و `sumMapWithOverflow` توابع مجموع. پیگیری برای [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اطمینان از سبک کد های خطا با چک سبک. [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن اسکریپت برای تست تاریخ. [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([الساپین](https://github.com/alesapin)) +- افزودن هشدار شورای همکاری خلیج فارس `-Wsuggest-override` برای قرار دادن و رفع تمام مکان هایی که `override` کلمه کلیدی باید استفاده شود. [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([کروزرکریگ](https://github.com/kreuzerkrieg)) +- نادیده گرفتن نماد ضعیف تحت سیستم عامل مک ایکس زیرا باید تعریف شود [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([کاربر حذفشده](https://github.com/ghost)) +- عادی زمان در حال اجرا از برخی از نمایش داده شد در تست عملکرد. این است که در تهیه برای اجرای تمام تست های عملکرد در حالت مقایسه انجام می شود. [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع برخی از تست ها برای حمایت از تست های پرس و جو [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([ایوان](https://github.com/abyss7)) +- فعال کردن اس اس ال در ساخت با ام اس ان, بنابراین سرور نمی خواهد شکست در هنگام راه اندازی در حال اجرا تست بدون تابعیت [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع جایگزینی پایگاه داده در نتایج تست [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([ایلیا یاتسیشین](https://github.com/qoega)) +- ساخت رفع برای سیستم عامل های دیگر [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([پرولر](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([پرولر](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([پرولر](https://github.com/proller)) +- اضافه شدن بخش دیسک به بدون تابعیت با پوشش تصویر داکر تست [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([پاول کووالنکو](https://github.com/Jokser)) +- خلاص شدن از شر در منبع درخت فایل در هنگام ساخت با پتروشیمی [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([ایموس پرنده](https://github.com/amosbird)) +- زمان ساخت کمی سریع تر با از بین بردن تجزیه طلبان از زمینه. کد را ساده تر کنید. [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز شده در چک کردن برای نمایش داده شد را قطع کرد در اسکریپت کلیک تست [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([الکساندر کازاکوف](https://github.com/Akazz)) +- حذف برخی از فایل های بی فایده از مخزن. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نوع تغییر کامل ریاضی از `once` به `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه کردن تصویر کارگر بارانداز که اجازه می دهد تا برای ساخت کد تعاملی مرورگر گزارش متنی برای کدهای ما. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([الساپین](https://github.com/alesapin)) ببینید [مرورگر کد ووبوک](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- سرکوب برخی از شکست تست تحت مسان. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- افزایش سرعت “exception while insert” امتحان این تست اغلب زمان در اشکال زدایی با پوشش ساخت. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز شده `libcxx` و `libcxxabi` به سلامتی استاد در تهیه به [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع تست پوسته شدن `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پاک کردن پرچم لینکر تکرار. اطمینان حاصل کنید که لینکر نمی خواهد نگاه کردن یک نماد غیر منتظره. [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([ایموس پرنده](https://github.com/amosbird)) +- افزودن `clickhouse-odbc` درایور را به تصاویر تست. این اجازه می دهد تا به تست تعامل ClickHouse با ClickHouse از طریق خود ODBC driver. [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([فیلیمونف](https://github.com/filimonov)) +- رفع چندین باگ در تست واحد. [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([الساپین](https://github.com/alesapin)) +- فعالسازی `-Wmissing-include-dirs` هشدار شورای همکاری خلیج فارس برای از بین بردن تمام غیر موجود شامل-عمدتا به عنوان یک نتیجه از خطاهای برنامه نویسی کیک [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([کروزرکریگ](https://github.com/kreuzerkrieg)) +- توصیف دلایل اگر پیشفیلتر پرس و جو نمی تواند کار کند. این است که برای در نظر گرفته شده [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز رسانی اپنسسل به بالادست استاد. ثابت موضوع زمانی که اتصالات ال اس ممکن است با پیام شکست `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` و `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. این موضوع در حال حاضر در نسخه 20.1 بود. [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز رسانی صفحه مدیریت برای سرور [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([ایلیا مزایف](https://github.com/ne-ray)) +- اصلاحات جزیی در ساخت شورای همکاری خلیج فارس از منابع اسکریپت [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([مایکل ناچاروف](https://github.com/mnach)) +- جایگزینی `numbers` به `zeros` در کامل کجا `number` ستون استفاده نمی شود. این به نتایج تست تمیز تر منجر شود. [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع مشکل سرریز پشته در هنگام استفاده از مقدار دهی اولیه در سازنده ستون. [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([کاربر حذفشده](https://github.com/ghost)) +- ارتقا کتابدار به نسخه 1. 3.0. فعالسازی همراه `rdkafka` و `gsasl` کتابخانه ها در سیستم عامل مک ایکس. [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([اندرو انیشچوک](https://github.com/oandrew)) +- ساخت ثابت در شورای همکاری خلیج فارس 9.2.0 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([وکسیدر](https://github.com/Vxider)) + +## انتشار کلیک و 20.1 {#clickhouse-release-v20-1} + +### ClickHouse انتشار v20.1.8.41, 2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### رفع اشکال {#bug-fix-3} + +- رفع همیشگی احتمالی `Cannot schedule a task` خطا (با توجه به استثنای بدون مانع در `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). این رفع [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([ازات خوژین](https://github.com/azat)) +- رفع مصرف حافظه بیش از حد در `ALTER` نمایش داده شد (جهش). این رفع [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) و [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([الساپین](https://github.com/alesapin)) +- رفع اشکال در عقب نشینی در لغت نامه های خارجی. این رفع [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([الساپین](https://github.com/alesapin)) + +### ClickHouse انتشار v20.1.7.38, 2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### رفع اشکال {#bug-fix-4} + +- ثابت نادرست نام تابع داخلی برای `sumKahan` و `sumWithOverflow`. من به استثنا منجر شود در حالی که با استفاده از این توابع در نمایش داده شد از راه دور. [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([ازات خوژین](https://github.com/azat)). این موضوع در تمام نسخه های کلیک خانه بود. +- اجازه داده شود `ALTER ON CLUSTER` از `Distributed` جداول با تکرار داخلی. این رفع [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([شینی2](https://github.com/shinoi2)). این موضوع در تمام نسخه های کلیک خانه بود. +- رفع استثناهای احتمالی `Size of filter doesn't match size of column` و `Invalid number of rows in Chunk` داخل `MergeTreeRangeReader`. می توانند هنگام اجرای ظاهر شوند `PREWHERE` در برخی موارد. رفع [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([انتون پوپوف](https://github.com/CurtizJ)) +- ثابت موضوع: منطقه زمانی حفظ نشده بود اگر شما نوشتن یک عبارت ریاضی ساده مانند `time + 1` (در مقایسه با بیان مانند `time + INTERVAL 1 SECOND`). این رفع [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([الکسی میلویدو](https://github.com/alexey-milovidov)). این موضوع در تمام نسخه های کلیک خانه بود. +- در حال حاضر امکان ایجاد یا اضافه کردن ستون ها با نام مستعار چرخه ای ساده مانند وجود ندارد `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([الساپین](https://github.com/alesapin)) +- ثابت موضوع که بالشتک در پایان base64 کد گذاری ارزش می تواند ناقص. به روز رسانی پایگاه64 کتابخانه. این رفع [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491) بسته [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع مسابقه داده ها در نابودی `Poco::HTTPServer`. این می تواند رخ دهد زمانی که سرور شروع شده است و بلافاصله تعطیل. [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع سقوط احتمالی / تعداد اشتباه ردیف در `LIMIT n WITH TIES` هنگامی که بسیاری از ردیف به نفر ردیف برابر وجود دارد. [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع چک عدم تطابق ممکن است با ستون. [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([انتون پوپوف](https://github.com/CurtizJ)) +- رفع سقوط زمانی که یک کاربر تلاش می کند `ALTER MODIFY SETTING` برای قدیمی شکل گرفته `MergeTree` موتورهای جدول خانواده. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([الساپین](https://github.com/alesapin)) +- در حال حاضر ما سعی خواهد کرد نهایی جهش بیشتر. [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([الساپین](https://github.com/alesapin)) +- رفع ناسازگاری پروتکل تکرار معرفی شده در [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([الساپین](https://github.com/alesapin)) +- ثابت نیست (است ()) برای شاخص نفخ\_فیلتر از انواع مجموعه. [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([ایشیمب](https://github.com/achimbab)) +- رفتار را ثابت کرد `match` و `extract` توابع زمانی که کومه علف خشک است صفر بایت. رفتار اشتباه بود که کومه علف خشک ثابت بود. این رفع [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([الکسی میلویدو](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-1} + +- سیستم های انتقال مواد استثنا در حال حاضر به درستی کار می کند در زیر سیستم ویندوز برای لینوکس. ببینید https://github.com/clickhouse-extras/libunwind/pull/3 این رفع [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([سوبولسو](https://github.com/sobolevsv)) + +### ClickHouse انتشار v20.1.6.30, 2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### رفع اشکال {#bug-fix-5} + +- رفع ناسازگاری داده ها در هنگام فشرده با `T64` وابسته به کدک. + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(بی پایان 7)](https://github.com/abyss7) +- ثابت منظور از محدوده در حالی که خواندن از جدول ادغام در یک موضوع. رفع [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(کورتیزج)](https://github.com/CurtizJ) +- رفع اشکال احتمالی در `MergeTreeRangeReader`, در حالی که اجرای `PREWHERE`. رفع [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(کورتیزج)](https://github.com/CurtizJ) +- ثابت `reinterpretAsFixedString` برای بازگشت `FixedString` به جای `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(اوندرو)](https://github.com/oandrew) +- ثابت `joinGet` با انواع بازگشت باطل. رفع [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(طرقه)](https://github.com/amosbird) +- تعمیر ریش ریش شدن آزمون و نادرست رفتار bittestall/bittestany توابع. + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(الکسی میلویدو)](https://github.com/alexey-milovidov) +- رفع رفتار توابع بازی و استخراج زمانی که انبار کاه است صفر بایت. رفتار اشتباه بود که کومه علف خشک ثابت بود. رفع [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(الکسی میلویدو)](https://github.com/alexey-milovidov) +- ثابت اعدام inversed predicates که غیر به شدت monotinic عملکردی شاخص استفاده شده است. رفع [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(اکازز)](https://github.com/Akazz) +- اجازه به بازنویسی `CROSS` به `INNER JOIN` اگر وجود دارد `[NOT] LIKE` اپراتور در `WHERE` بخش. رفع [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4رتوس2)](https://github.com/4ertus2) +- اجازه ستون اول (بازدید کنندگان) در یک جدول با موتور ورود به سیستم یک نام مستعار. + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(بی پایان 7)](https://github.com/abyss7) +- اجازه دادن به کاما از هم پیوستن با `IN()` داخل رفع [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4رتوس2)](https://github.com/4ertus2) +- بهبود `ALTER MODIFY/ADD` نمایش داده شد منطق. حالا شما نمی توانید `ADD` ستون بدون نوع, `MODIFY` عبارت پیش فرض نوع ستون را تغییر نمی دهد و `MODIFY` نوع ارزش بیان پیش فرض را از دست نمی دهد. رفع [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(الساپین)](https://github.com/alesapin) +- رفع جهش نهایی, زمانی که جهش در حال حاضر انجام می شود می توانید وضعیت داشته باشد \_دون=0. + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(الساپین)](https://github.com/alesapin) +- پشتیبانی “Processors” خط لوله برای سیستم.اعداد و سیستم.\_شماره. این نیز رفع اشکال زمانی که `max_execution_time` محترم نیست. + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [.)](https://github.com/KochetovNicolai) +- رفع شمارش اشتباه از `DictCacheKeysRequestedFound` متریک. + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(نیکیتامیخایلو)](https://github.com/nikitamikhaylov) +- اضافه شدن یک چک برای سیاست ذخیره سازی در `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` که در غیر این صورت می تواند داده ها از بخش غیر قابل دسترس پس از راه اندازی مجدد و جلوگیری از کلیک برای شروع. + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(هیجان)](https://github.com/excitoon) +- گزارش ثابت اوبسان در `MergeTreeIndexSet`. این رفع [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(الکسی میلویدو)](https://github.com/alexey-milovidov) +- رفع فضای داده ممکن در بلوکیو. + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [.)](https://github.com/KochetovNicolai) +- پشتیبانی از `UInt64` اعداد است که متناسب نیست در درون64 در توابع مربوط به جانسون. بهروزرسانی `SIMDJSON` به سلامتی استاد این رفع [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(الکسی میلویدو)](https://github.com/alexey-milovidov) +- رفع مشکل زمانی که مقدار فضای رایگان به درستی محاسبه نمی شود اگر دایرکتوری داده ها به یک دستگاه جداگانه نصب شود. برای دیسک پیش فرض محاسبه فضای رایگان از دایرکتوری فرعی داده ها. این رفع [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(میلب)](https://github.com/millb) +- رفع مشکل زمانی که اتصالات ال اس ممکن است با پیام شکست `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` به روز رسانی اپنسسل به بالادست استاد. + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(الکسی میلویدو)](https://github.com/alexey-milovidov) +- هنگام اجرای `CREATE` پرس و جو, برابر عبارات ثابت در استدلال موتور ذخیره سازی. جایگزین کردن نام دادگان خالی با دادگان فعلی. رفع [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). همچنین رفع بررسی برای نشانی های محلی در کلیک کنیدشاهارزش. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(تبلوبیکس)](https://github.com/tavplubix) +- رفع segfault در `StorageMerge`, که می تواند در هنگام خواندن از داستان اتفاق می افتد. + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(تبلوبیکس)](https://github.com/tavplubix) +- جلوگیری از از دست دادن داده ها در `Kafka` در موارد نادر زمانی که استثنا اتفاق می افتد پس از خواندن پسوند اما قبل از ارتکاب. رفع [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). مرتبط: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(فیلیمونف)](https://github.com/filimonov) +- رفع اشکال منجر به ختم سرور در هنگام تلاش برای استفاده / رها کردن `Kafka` جدول ایجاد شده با پارامترهای اشتباه. رفع [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). دارای [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(فیلیمونف)](https://github.com/filimonov) + +#### ویژگی جدید {#new-feature-1} + +- افزودن `deduplicate_blocks_in_dependent_materialized_views` گزینه ای برای کنترل رفتار درج ژولیده به جداول با نمایش محقق. این ویژگی جدید توسط یک درخواست ویژه از التیت به نسخه رفع اشکال اضافه شد. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [.)](https://github.com/urykhy) + +### ClickHouse انتشار v20.1.2.4, 2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### تغییر ناسازگار به عقب {#backward-incompatible-change-1} + +- ایجاد تنظیمات `merge_tree_uniform_read_distribution` منسوخ شده. سرور هنوز این تنظیم به رسمیت می شناسد اما هیچ اثر. [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- نوع بازگشت تغییر از تابع `greatCircleDistance` به `Float32` چرا که در حال حاضر نتیجه محاسبه است `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر انتظار می رود که پارامترهای پرس و جو در “escaped” قالب. مثلا, به تصویب رشته `ab` شما باید برای نوشتن `a\tb` یا `a\b` و به ترتیب, `a%5Ctb` یا `a%5C%09b` در URL. این مورد نیاز است برای اضافه کردن امکان به تصویب تهی به عنوان `\N`. این رفع [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- فعالسازی `use_minimalistic_part_header_in_zookeeper` تنظیم برای `ReplicatedMergeTree` به طور پیش فرض. این به طور قابل توجهی مقدار داده های ذخیره شده در باغ وحش را کاهش دهد. این تنظیم از نسخه 19.1 پشتیبانی می شود و ما در حال حاضر در تولید خدمات متعدد بدون هیچ مشکلی برای بیش از نیم سال استفاده می کنیم. غیر فعال کردن این تنظیم اگر شما یک فرصت برای جمع و جور کردن به نسخه های قدیمی تر از 19.1. [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شاخص های پرش داده ها تولید می شوند و به طور پیش فرض فعال می شوند. تنظیمات `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` و `allow_experimental_multiple_joins_emulation` در حال حاضر منسوخ و هیچ چیز. [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- افزودن جدید `ANY JOIN` منطق برای `StorageJoin` سازگار با `JOIN` عمل به روز رسانی بدون تغییر در رفتار شما نیاز به اضافه کردن `SETTINGS any_join_distinct_right_table_keys = 1` به موتور پیوستن ابرداده جداول و یا از نو خلق این جداول پس از ارتقا دهید. [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([زویکوف](https://github.com/4ertus2)) +- نیاز به سرور برای راه اندازی مجدد برای اعمال تغییرات در پیکربندی ورود به سیستم. این یک راه حل موقت برای جلوگیری از اشکال که سرور سیاهههای مربوط به یک فایل ورود به سیستم حذف شده است (دیدن [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([الکساندر کوزمنکوف](https://github.com/akuzm)) + +#### ویژگی جدید {#new-feature-2} + +- اطلاعات اضافه شده در مورد مسیرهای بخشی به `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- افزودن قابلیت اجرا `SYSTEM RELOAD DICTIONARY` پرسوجو در `ON CLUSTER` حالت. [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) +- افزودن قابلیت اجرا `CREATE DICTIONARY` نمایش داده شد در `ON CLUSTER` حالت. [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([الساپین](https://github.com/alesapin)) +- در حال حاضر مشخصات کاربر در `users.xml` می توانید پروفایل های متعدد به ارث می برند. [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- اضافه شده `system.stack_trace` جدول که اجازه می دهد تا در اثری پشته از تمام موضوعات سرور نگاه. این برای توسعه دهندگان به درون نگری دولت سرور مفید است. این رفع [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- افزودن `DateTime64` نوع داده با دقت زیر دوم قابل تنظیم. [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([واسیلی نمکو](https://github.com/Enmk)) +- اضافه کردن تابع جدول `clusterAllReplicas` که اجازه می دهد به پرس و جو تمام گره در خوشه. [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([کیران سونکری](https://github.com/kiransunkari)) +- اضافه کردن تابع جمع `categoricalInformationValue` که محاسبه ارزش اطلاعات از ویژگی های گسسته. [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([هکز](https://github.com/hczhcz)) +- سرعت تجزیه فایل های داده در `CSV`, `TSV` و `JSONEachRow` قالب با انجام این کار به صورت موازی. [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- افزودن تابع `bankerRound` که انجام گرد بانکدار. [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([هکز](https://github.com/hczhcz)) +- پشتیبانی از زبان های بیشتر در فرهنگ لغت تعبیه شده برای نام منطقه: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهبود در سازگاری `ANY JOIN` منطق. حالا `t1 ANY LEFT JOIN t2` برابر `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([زویکوف](https://github.com/4ertus2)) +- افزودن تنظیمات `any_join_distinct_right_table_keys` که رفتار قدیمی را قادر می سازد `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([زویکوف](https://github.com/4ertus2)) +- افزودن جدید `SEMI` و `ANTI JOIN`. قدیمی `ANY INNER JOIN` رفتار در حال حاضر به عنوان در دسترس `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده `Distributed` قالب برای `File` موتور و `file` تابع جدول که اجازه می دهد تا از خواندن `.bin` فایل های تولید شده توسط درج ناهمزمان به `Distributed` جدول [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه کردن استدلال ستون تنظیم مجدد اختیاری برای `runningAccumulate` که اجازه می دهد برای تنظیم مجدد نتایج تجمع برای هر مقدار کلید جدید. [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([سرگی کنوننکو](https://github.com/kononencheg)) +- اضافه کردن توانایی استفاده از فاحشه خانه به عنوان نقطه پایانی پرومته. [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([ولادیمیر](https://github.com/Vdimir)) +- اضافه کردن بخش `` داخل `config.xml` که میزبان مجاز برای موتورهای جدول از راه دور و توابع جدول را محدود می کند `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([میخیل کوروتف](https://github.com/millb)) +- اضافه شدن تابع `greatCircleAngle` که محاسبه فاصله در یک کره در درجه. [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- شعاع زمین تغییر می شود سازگار با ساعت3 کتابخانه. [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شده `JSONCompactEachRow` و `JSONCompactEachRowWithNamesAndTypes` فرمت برای ورودی و خروجی. [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([میخیل کوروتف](https://github.com/millb)) +- ویژگی اضافه شده برای موتورهای جدول مربوط به فایل و توابع جدول (`File`, `S3`, `URL`, `HDFS`) که اجازه می دهد به خواندن و نوشتن `gzip` فایل ها بر اساس پارامتر موتور اضافی و یا فرمت فایل. [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([هشدار داده می شود](https://github.com/apbodrov)) +- اضافه شدن `randomASCII(length)` تابع, تولید یک رشته با یک مجموعه تصادفی از [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) شخصیت های قابل چاپ. [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([سرنیزه](https://github.com/BayoNet)) +- اضافه شدن تابع `JSONExtractArrayRaw` که مجموعه ای از عناصر مجموعه ای از جیسون نامحدود را باز می کند `JSON` رشته. [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([اولگ متررخین](https://github.com/errx)) +- افزودن `arrayZip` تابع است که اجازه می دهد تا به ترکیب مجموعه ای از چند بند از طول برابر به یک مجموعه ای از تاپل. [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([زمستان ژانگ](https://github.com/zhang2014)) +- اضافه کردن توانایی انتقال داده ها بین دیسک ها با توجه به پیکربندی `TTL`- عبارات برای `*MergeTree` موتورهای جدول خانواده. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اضافه شدن تابع جمع جدید `avgWeighted` که اجازه می دهد برای محاسبه میانگین وزن. [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([هشدار داده می شود](https://github.com/apbodrov)) +- در حال حاضر تجزیه موازی به طور پیش فرض برای فعال `TSV`, `TSKV`, `CSV` و `JSONEachRow` فرمتها. [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- اضافه کردن چندین توابع جغرافیایی از `H3` کتابخانه: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` و `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([کنستانتین مالانچو](https://github.com/hombit)) +- اضافه شدن پشتیبانی برای برتلی (`br`) فشرده سازی در ذخیره سازی مربوط به فایل و توابع جدول . این رفع [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- افزودن `groupBit*` توابع برای `SimpleAggregationFunction` نوع. [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([کارخانه شراب سازی گیوم](https://github.com/YiuRULE)) + +#### رفع اشکال {#bug-fix-6} + +- رفع تغییر نام جداول با `Distributed` موتور رفع مشکل [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([تاولوبیکس](https://github.com/tavplubix)) +- در حال حاضر پشتیبانی لغت نامه `EXPRESSION` برای ویژگی های در رشته دلخواه در گویش غیر کلیک میدان. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([الساپین](https://github.com/alesapin)) +- رفع شکسته `INSERT SELECT FROM mysql(...)` پرس و جو. این رفع [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) و [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع خطا “Mismatch column sizes” هنگام وارد کردن پیشفرض `Tuple` از `JSONEachRow`. این رفع [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([تاولوبیکس](https://github.com/tavplubix)) +- در حال حاضر یک استثنا خواهد شد در صورت استفاده از پرتاب `WITH TIES` در کنار `LIMIT BY`. همچنین توانایی استفاده را اضافه کنید `TOP` با `LIMIT BY`. این رفع [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع unintendent وابستگی از تازه glibc نسخه در `clickhouse-odbc-bridge` دودویی. [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([ایموس پرنده](https://github.com/amosbird)) +- رفع اشکال در بررسی عملکرد `*MergeTree` خانواده موتور. در حال حاضر در صورتی که زمانی که ما مقدار مساوی از ردیف در گرانول گذشته و علامت گذشته (غیر نهایی) شکست نیست. [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([الساپین](https://github.com/alesapin)) +- رفع درج در `Enum*` ستون پس از `ALTER` پرس و جو, زمانی که زمینه ای نوع عددی به جدول نوع مشخص برابر است. این رفع [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([انتون پوپوف](https://github.com/CurtizJ)) +- مجاز منفی غیر ثابت “size” استدلال برای عملکرد `substring`. این اشتباه مجاز نیست. این رفع [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال تجزیه زمانی که تعداد اشتباه استدلال به تصویب رسید `(O|J)DBC` موتور جدول. [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([الساپین](https://github.com/alesapin)) +- با استفاده از نام فرمان از روند در حال اجرا خانه کلیک در هنگام ارسال سیاهههای مربوط به سیسلوگ. در نسخه های قبلی, رشته خالی به جای نام فرمان مورد استفاده قرار گرفت. [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([مایکل ناچاروف](https://github.com/mnach)) +- رفع چک میزبان مجاز برای `localhost`. این روابط عمومی رفع راه حل در [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع سقوط نادر در `argMin` و `argMax` توابع برای استدلال رشته طولانی, زمانی که نتیجه در استفاده `runningAccumulate` تابع. این رفع [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([دایناسور](https://github.com/769344359)) +- رفع بیش از حد حافظه برای جداول با `Buffer` موتور [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([ازات خوژین](https://github.com/azat)) +- اشکال بالقوه ثابت در توابع است که می تواند `NULL` به عنوان یکی از استدلال و بازگشت غیر تهی. [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- معیارهای محاسبات بهتر در استخر موضوع برای فرایندهای پس زمینه برای `MergeTree` موتورهای جدول. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- تابع ثابت `IN` داخل `WHERE` بیانیه ای که در سطح ردیف فیلتر جدول وجود دارد. رفع [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([ایوان](https://github.com/abyss7)) +- در حال حاضر یک استثنا پرتاب می شود اگر ارزش جدایی ناپذیر است به طور کامل برای مقادیر تنظیمات تجزیه نشده است. [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([میخیل کوروتف](https://github.com/millb)) +- رفع استثنا زمانی که عملکرد دانه ها در پرس و جو به جدول توزیع شده با بیش از دو خرده ریز محلی استفاده می شود. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- در حال حاضر فیلتر بلوم می توانید طول صفر عرایز رسیدگی می کند و محاسبات کار برکنار شده انجام نمی دهد. [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([ایشیمب](https://github.com/achimbab)) +- چک کردن ثابت اگر یک میزبان مشتری با تطبیق میزبان مشتری به اجازه `host_regexp` مشخص شده در `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([ویتالی بارانو](https://github.com/vitlibar)) +- استراحت چک ستون مبهم است که منجر به مثبت کاذب در چند `JOIN ON` بخش. [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([زویکوف](https://github.com/4ertus2)) +- ثابت سقوط سرور ممکن است (`std::terminate`) هنگامی که سرور نمی تواند ارسال و یا نوشتن داده ها در `JSON` یا `XML` قالب با مقادیر `String` نوع داده (که نیاز به `UTF-8` اعتبار سنجی) و یا زمانی که فشرده سازی داده های نتیجه با الگوریتم برتلی و یا در برخی موارد نادر دیگر. این رفع [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع شرایط مسابقه در `StorageDistributedDirectoryMonitor` پیدا شده توسط سی. این رفع [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- در حال حاضر پس زمینه ادغام در `*MergeTree` جدول موتورهای خانواده حفظ سیاست ذخیره سازی حجم سفارش دقیق تر است. [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- در حال حاضر موتور جدول `Kafka` به درستی کار می کند با `Native` قالب. این رفع [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([فیلیمونف](https://github.com/filimonov)) +- فرمت های ثابت با هدر (مانند `CSVWithNames`) که پرتاب شد استثنا در مورد ایف برای موتور جدول `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([فیلیمونف](https://github.com/filimonov)) +- رفع اشکال با ساخت مجموعه ای از زیرخاکری در قسمت سمت راست از `IN` بخش. این رفع [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) و [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع سقوط احتمالی در هنگام خواندن از ذخیره سازی `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- خواندن ثابت از فایل ها در `Parquet` قالب حاوی ستونهای نوع `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([مکسولان](https://github.com/maxulan)) +- رفع خطا `Not found column` برای نمایش داده شد توزیع با `PREWHERE` شرایط وابسته به کلید نمونه برداری اگر `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع خطا `Not found column` اگر پرسوجو استفاده شود `PREWHERE` وابسته به نام مستعار جدول و مجموعه نتیجه به دلیل شرایط کلیدی اولیه خالی بود. [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- نوع بازگشت ثابت برای توابع `rand` و `randConstant` در صورت `Nullable` استدلال کردن. در حال حاضر توابع همیشه بازگشت `UInt32` و هرگز `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- پیش فرض غیر فعال فشار پایین برای `WITH FILL` اصطلاح. این رفع [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([زمستان ژانگ](https://github.com/zhang2014)) +- نادرست ثابت `count()` نتیجه برای `SummingMergeTree` چه زمانی `FINAL` بخش استفاده شده است. [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- رفع نتیجه نادرست ممکن است برای توابع ثابت از سرور از راه دور. این برای نمایش داده شد با توابع مانند اتفاق افتاد `version()`, `uptime()`, و غیره. که برمی گرداند مقادیر ثابت مختلف برای سرورهای مختلف. این رفع [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع اشکال پیچیده در بهینه سازی پیش فرض فشار به پایین که منجر به نتایج اشتباه است. این مشکلات زیادی را در بهینه سازی پیش فرض پایین حل می کند. [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([زمستان ژانگ](https://github.com/zhang2014)) +- رفع سقوط در `CREATE TABLE .. AS dictionary` پرس و جو. [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([ازات خوژین](https://github.com/azat)) +- چند بهبود دستور زبان کلیک در `.g4` پرونده. [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([taiyang-li](https://github.com/taiyang-li)) +- رفع اشکال که منجر به سقوط در `JOIN`با جداول با موتور `Join`. این رفع [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([زویکوف](https://github.com/4ertus2)) +- رفع لغت نامه کار برکنار بارگذاری مجدد در `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([ازات خوژین](https://github.com/azat)) +- محدود کردن حداکثر تعداد جریان برای خواندن از `StorageFile` و `StorageHDFS`. رفع https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([الساپین](https://github.com/alesapin)) +- رفع اشکال در `ALTER ... MODIFY ... CODEC` پرس و جو, زمانی که کاربر هر دو عبارت به طور پیش فرض و کدک را مشخص کنید. رفع [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([الساپین](https://github.com/alesapin)) +- رفع خطا در ادغام پس زمینه ستون ها با `SimpleAggregateFunction(LowCardinality)` نوع. [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- بررسی نوع ثابت در عملکرد `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([واسیلی نمکو](https://github.com/Enmk)) +- در حال حاضر سرور در سقوط نیست `LEFT` یا `FULL JOIN` با و پیوستن به موتور و پشتیبانی نشده `join_use_nulls` تنظیمات. [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([زویکوف](https://github.com/4ertus2)) +- حالا `DROP DICTIONARY IF EXISTS db.dict` پرس و جو می کند و پرتاب استثنا اگر `db` وجود نداره [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([ویتالی بارانو](https://github.com/vitlibar)) +- رفع سقوط احتمالی در توابع جدول (`file`, `mysql`, `remote`) ناشی از استفاده از مرجع به حذف `IStorage` اعتراض. رفع تجزیه نادرست از ستون مشخص شده در درج به تابع جدول. [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([تاولوبیکس](https://github.com/tavplubix)) +- اطمینان از شبکه قبل از شروع `clickhouse-server`. این رفع [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([ژیچنگ یو](https://github.com/yuzhichang)) +- رفع وقفه دست زدن به برای اتصالات امن, بنابراین نمایش داده شد می کند بی تعریف قطع نمی. این رفع [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `clickhouse-copier`مشاجره کار برکنار بین کارگران همزمان. [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([هشدار داده می شود](https://github.com/dingxiangfei2009)) +- در حال حاضر جهش می کند قطعات متصل جست و خیز نیست, حتی اگر نسخه جهش خود را بزرگتر از نسخه جهش فعلی بود. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([ژیچنگ یو](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([الساپین](https://github.com/alesapin)) +- چشمپوشی از نسخههای برکنار شده `*MergeTree` قطعات داده ها پس از حرکت به یکی دیگر از دیسک و سرور راه اندازی مجدد. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع سقوط در `FULL JOIN` با `LowCardinality` داخل `JOIN` کلیدی است. [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([زویکوف](https://github.com/4ertus2)) +- ممنوع به استفاده از نام ستون بیش از یک بار در قرار دادن پرس و جو مانند `INSERT INTO tbl (x, y, x)`. این رفع [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([الساپین](https://github.com/alesapin)) +- اضافه شدن مجدد برای تشخیص تعداد هسته های پردازنده فیزیکی برای پردازنده ناشناخته (با استفاده از تعدادی از هسته های پردازنده منطقی). این رفع [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `There's no column` خطا برای مفردات و نام مستعار ستون. [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([زویکوف](https://github.com/4ertus2)) +- تصادف شدید ثابت زمانی که `EXISTS` پرس و جو بدون استفاده شد `TABLE` یا `DICTIONARY` مقدماتی. درست مثل `EXISTS t`. این رفع [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). این اشکال در نسخه 19.17 معرفی شد. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشکال نادر با خطا `"Sizes of columns doesn't match"` که ممکن است به نظر می رسد در هنگام استفاده از `SimpleAggregateFunction` ستون. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([بوریس گرانویو](https://github.com/bgranvea)) +- رفع اشکال که کاربر با خالی است `allow_databases` دسترسی به تمام پایگاه های داده کردم (و همین کار را برای `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([دوفایت](https://github.com/DeifyTheGod)) +- رفع سقوط مشتری زمانی که سرور در حال حاضر از مشتری قطع شده است. [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([ازات خوژین](https://github.com/azat)) +- ثابت `ORDER BY` رفتار در صورت مرتب سازی بر اساس پیشوند کلید اولیه و پسوند کلید غیر اولیه. [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([انتون پوپوف](https://github.com/CurtizJ)) +- بررسی کنید که در حال حاضر ستون واجد شرایط در جدول. این رفع [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([زویکوف](https://github.com/4ertus2)) +- رفتار ثابت با `ALTER MOVE` فرار بلافاصله پس از ادغام پایان حرکت سوپر شروع مشخص شده است. رفع [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع سقوط سرور ممکن است در حالی که با استفاده از `UNION` با شماره های مختلف ستون. رفع [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع اندازه زیر رشته نتیجه برای عملکرد `substr` با اندازه منفی. [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- در حال حاضر سرور جهش بخشی در اجرا نیست `MergeTree` اگر موضوعات رایگان به اندازه کافی در استخر پس زمینه وجود ندارد. [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع خطای تایپی کوچک در قالب بندی `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([لیتا91](https://github.com/litao91)) +- ثابت نتایج فیلتر شکوفه نادرست برای اعداد منفی. این رفع [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([زمستان ژانگ](https://github.com/zhang2014)) +- سرریز بافر بالقوه ثابت در حالت فشرده خارج. کاربر مخرب می تواند داده های فشرده ساخته شده است که باعث می شود پس از بافر به عنوان خوانده شده منتقل می کند. این موضوع توسط الدار زیتوف از تیم امنیت اطلاعات یاندکس یافت شد. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع نتیجه نادرست به دلیل سرریز اعداد صحیح در `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- حالا `OPTIMIZE TABLE` پرس و جو نمی خواهد برای کپی نیست منتظر به انجام عملیات. [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([جوی سانتانا](https://github.com/javisantana)) +- ثابت `ALTER TTL` تجزیه کننده برای `Replicated*MergeTree` میز [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع ارتباط بین سرور و کلاینت, بنابراین سرور خواندن اطلاعات جداول موقت پس از شکست پرس و جو. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([ازات خوژین](https://github.com/azat)) +- ثابت `bitmapAnd` خطای تابع هنگام تقاطع کردن بیت مپ جمع و بیت مپ اسکالر. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([یو هوانگ](https://github.com/moon03432)) +- اصلاح تعریف `ZXid` با توجه به راهنمای برنامه نویس باغ وحش که رفع اشکال در `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([هشدار داده می شود](https://github.com/dingxiangfei2009)) +- `odbc` تابع جدول در حال حاضر احترام می گذارد `external_table_functions_use_nulls` تنظیمات. [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([واسیلی نمکو](https://github.com/Enmk)) +- اشکال ثابت که منجر به یک مسابقه داده نادر است. [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([الکساندر کازاکوف](https://github.com/Akazz)) +- حالا `SYSTEM RELOAD DICTIONARY` بارگذاری مجدد یک فرهنگ لغت به طور کامل, نادیده گرفتن `update_field`. این رفع [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([ویتالی بارانو](https://github.com/vitlibar)) +- اضافه کردن توانایی برای بررسی اگر فرهنگ لغت در ایجاد پرس و جو وجود دارد. [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([الساپین](https://github.com/alesapin)) +- ثابت `Float*` تجزیه در `Values` قالب. این رفع [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([تاولوبیکس](https://github.com/tavplubix)) +- رفع سقوط زمانی که ما نمی توانیم فضای ذخیره در برخی از عملیات پس زمینه از `*MergeTree` موتورهای جدول خانواده. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- رفع سقوط عملیات ادغام زمانی که جدول شامل `SimpleAggregateFunction(LowCardinality)` ستون. این رفع [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([ازات خوژین](https://github.com/azat)) +- بازگرداندن پشتیبانی از تمام مناطق فناوری اطلاعات و ارتباطات و اضافه کردن توانایی به درخواست تلفیقی برای عبارات ثابت. همچنین نام زبان را اضافه کنید `system.collations` جدول [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([الساپین](https://github.com/alesapin)) +- رفع اشکال در هنگام خارجی لغت نامه صفر و حداقل طول عمر (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`) در پس زمینه به روز رسانی نیست . [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([الساپین](https://github.com/alesapin)) +- رفع تصادف در هنگام فرهنگ لغت خارجی با منبع کلیک است زیرخاکی در پرس و جو. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- رفع نادرست تجزیه پسوند فایل در جدول با موتور `URL`. این رفع [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([هشدار داده می شود](https://github.com/apbodrov)) +- ثابت `CHECK TABLE` پرسوجو برای `*MergeTree` جداول بدون کلید. رفع [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([الساپین](https://github.com/alesapin)) +- تبدیل ثابت از `Float64` به نوع خروجی زیر. [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([یوری بارانوف](https://github.com/yurriy)) +- در حال حاضر اگر جدول به طور کامل به دلیل سقوط سرور کاهش یافته است, سرور سعی خواهد کرد برای بازگرداندن و بارگذاری. [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([تاولوبیکس](https://github.com/tavplubix)) +- تصادف ثابت در عملکرد جدول `file` در حالی که قرار دادن به فایل که وجود ندارد. در حال حاضر در این مورد فایل ایجاد می شود و سپس قرار دادن پردازش می شود. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- رفع بن بست نادر است که می تواند زمانی اتفاق می افتد `trace_log` در را فعال کنید. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([فیلیمونف](https://github.com/filimonov)) +- اضافه کردن توانایی برای کار با انواع مختلف علاوه بر `Date` داخل `RangeHashed` فرهنگ لغت خارجی ایجاد شده از پرس و جو دی ال. رفع [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([الساپین](https://github.com/alesapin)) +- رفع سقوط زمانی که `now64()` با نتیجه تابع دیگری نامیده می شود. [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([واسیلی نمکو](https://github.com/Enmk)) +- اشکال ثابت با تشخیص قانون مجازات اسلامی مشتری برای اتصال به شبکه از طریق پروتکل سیم خروجی زیر. [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([دیمیتری موزیکا](https://github.com/dmitriy-myz)) +- رفع دست زدن به مجموعه خالی در `arraySplit` تابع. این رفع [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([هکز](https://github.com/hczhcz)) +- ثابت موضوع زمانی که `pid-file` از یکی دیگر از در حال اجرا `clickhouse-server` ممکن است حذف شود. [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([Weiqing زو](https://github.com/weiqxu)) +- رفع بارگذاری مجدد فرهنگ لغت در صورتی که `invalidate_query` که متوقف به روز رسانی و برخی از استثنا در به روز رسانی قبلی تلاش می کند. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([الساپین](https://github.com/alesapin)) +- ثابت خطا در تابع `arrayReduce` که ممکن است منجر به “double free” و خطا در ترکیب تابع کل `Resample` که ممکن است به نشت حافظه منجر شود. اضافه شدن تابع جمع `aggThrow`. این تابع را می توان برای اهداف تست استفاده کرد. [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود {#improvement-1} + +- ورود به سیستم بهبود یافته در هنگام کار با `S3` موتور جدول. [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([گریگوری پرواکوف](https://github.com/GrigoryPervakov)) +- پیام راهنما چاپ شده زمانی که هیچ استدلال در هنگام تماس گذشت `clickhouse-local`. این رفع [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([هشدار داده می شود](https://github.com/Melancholic)) +- افزودن تنظیمات `mutations_sync` که اجازه می دهد تا صبر کنید `ALTER UPDATE/DELETE` نمایش داده شد همزمان. [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([الساپین](https://github.com/alesapin)) +- اجازه برای راه اندازی نسبی `user_files_path` داخل `config.xml` (در راه شبیه به `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([هکز](https://github.com/hczhcz)) +- اضافه کردن استثنا برای انواع غیر قانونی برای توابع تبدیل با `-OrZero` پسوند. [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([اندری کونیایف](https://github.com/akonyaev90)) +- ساده فرمت هدر داده ارسال به سفال در پرس و جو توزیع شده است. [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([ویتالی بارانو](https://github.com/vitlibar)) +- `Live View` تعمیر مجدد موتور جدول. [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- اضافه کردن چک های اضافی برای لغت نامه های خارجی ایجاد شده از دی ال نمایش داده شد. [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([الساپین](https://github.com/alesapin)) +- رفع خطا `Column ... already exists` در حالی که با استفاده از `FINAL` و `SAMPLE` together, e.g. `select count() from table final sample 1/2`. رفع [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- در حال حاضر جدول استدلال اول `joinGet` تابع می تواند جدول شناسایی. [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([ایموس پرنده](https://github.com/amosbird)) +- اجازه استفاده `MaterializedView` با subqueries بالا `Kafka` میز [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([فیلیمونف](https://github.com/filimonov)) +- در حال حاضر پس زمینه بین دیسک ها حرکت می کند استخر تار شده را اجرا می کند. [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` در حال حاضر اجرا همزمان. [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([ویتالی بارانو](https://github.com/vitlibar)) +- ردیابی پشته در حال حاضر نمایش نشانی های فیزیکی (شیپور خاموشی در فایل شی) به جای نشانی های حافظه مجازی (جایی که فایل شی لود شد). که اجازه می دهد تا استفاده از `addr2line` هنگامی که باینری موقعیت مستقل است و اصل فعال است. این رفع [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- پشتیبانی از نحو جدید برای فیلترهای امنیتی سطح ردیف: `
    `. رفع [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([ایوان](https://github.com/abyss7)) +- حالا `cityHash` تابع می تواند با کار `Decimal` و `UUID` انواع. رفع [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([میخیل کوروتف](https://github.com/millb)) +- دانه دانه دانه دانه ثابت حذف (1024 بود) از سیاهههای مربوط به سیستم به دلیل منسوخ پس از اجرای دانه دانه تطبیقی. [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- فعال خروجی زیر سرور سازگاری زمانی که تاتر بدون اس اس ال وارد شده است. [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([یوری بارانوف](https://github.com/yurriy)) +- در حال حاضر چک سرور دسته توزیع, می دهد که خطاهای طولانی تر در مورد داده های خراب شده در دسته ای. [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([ازات خوژین](https://github.com/azat)) +- پشتیبانی `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` و `ATTACH TABLE` برای `MySQL` موتور پایگاه داده. [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([زمستان ژانگ](https://github.com/zhang2014)) +- اضافه کردن احراز هویت در اس 3 تابع جدول و موتور جدول. [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اضافه شدن چک برای قطعات اضافی از `MergeTree` در دیسک های مختلف, به منظور اجازه نمی دهد به دست قطعات داده در دیسک تعریف نشده. [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- فعال کردن پشتیبانی اس اس ال برای مشتری مک و سرور. [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([ایوان](https://github.com/abyss7)) +- در حال حاضر clickhouse کار می تواند به عنوان خروجی يکپارچه سرور (دیدن https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html). [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([ماکسیم فدوتف](https://github.com/MaxFedotov)) +- `clickhouse-client` در حال حاضر تنها فعال `bracketed-paste` هنگامی که چند ضلعی روشن است و چند خطی خاموش است. این رفع (\#7757) \[https://github.com/ClickHouse/ClickHouse/issues/7757\]. [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([ایموس پرنده](https://github.com/amosbird)) +- پشتیبانی `Array(Decimal)` داخل `if` تابع. [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([زویکوف](https://github.com/4ertus2)) +- اعشار پشتیبانی در `arrayDifference`, `arrayCumSum` و `arrayCumSumNegative` توابع. [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([زویکوف](https://github.com/4ertus2)) +- اضافه شده `lifetime` ستون به `system.dictionaries` جدول [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([کیککول](https://github.com/kekekekule)) +- بررسی بهبود یافته برای قطعات موجود بر روی دیسک های مختلف برای `*MergeTree` موتورهای جدول. نشانیهای [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ادغام با `AWS SDK` برای `S3` فعل و انفعالات که اجازه می دهد تا به استفاده از تمام 3 ویژگی های خارج از جعبه. [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([پاول کووالنکو](https://github.com/Jokser)) +- اضافه شدن پشتیبانی برای کارخانه های فرعی در `Live View` میز [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- بررسی برای استفاده از `Date` یا `DateTime` ستون از `TTL` عبارات حذف شد. [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- اطلاعات در مورد دیسک به اضافه شد `system.detached_parts` جدول [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- در حال حاضر تنظیمات `max_(table|partition)_size_to_drop` را می توان بدون راه اندازی مجدد تغییر کرده است. [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([گریگوری پرواکوف](https://github.com/GrigoryPervakov)) +- قابلیت استفاده کمی بهتر از پیام های خطا. از کاربر بخواهید که خطوط زیر را حذف کند `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- خواندن بهتر پیام ها از `Kafka` موتور در فرمت های مختلف پس از [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([ایوان](https://github.com/abyss7)) +- سازگاری بهتر با مشتریان خروجی زیر که پشتیبانی نمی کند `sha2_password` auth پلاگین. [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([یوری baranov](https://github.com/yurriy)) +- پشتیبانی از انواع ستون بیشتر در خروجی زیر سرور سازگاری. [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([یوری baranov](https://github.com/yurriy)) +- پیاده سازی `ORDER BY` بهینه سازی برای `Merge`, `Buffer` و `Materilized View` ذخیره سازی با زمینه `MergeTree` میز [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([انتون پوپوف](https://github.com/CurtizJ)) +- در حال حاضر ما همیشه اجرای پسوند استفاده از `getrandom` برای سازگاری بهتر با دانه های قدیمی (\<3.17). [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([ایموس پرنده](https://github.com/amosbird)) +- بررسی بهتر برای مقصد معتبر در یک قانون حرکت تغییر جنسیت. [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- چک بهتر برای دسته درج شکسته برای `Distributed` موتور جدول. [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([ازات خوژین](https://github.com/azat)) +- اضافه کردن ستون با مجموعه ای از نام قطعات که جهش باید در اینده پردازش به `system.mutations` جدول [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([الساپین](https://github.com/alesapin)) +- موازی با مرتب کردن بر اساس ادغام بهینه سازی برای پردازنده. [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- تنظیمات `mark_cache_min_lifetime` در حال حاضر منسوخ و هیچ کاری نمی کند. در نسخه های قبلی, علامت گذاری به عنوان کش می تواند در حافظه بزرگتر از رشد `mark_cache_size` به جای داده ها در `mark_cache_min_lifetime` چند ثانیه که منجر به سردرگمی و استفاده از حافظه بالاتر از حد انتظار است که به خصوص بد در حافظه محدود سیستم. اگر شما تخریب عملکرد پس از نصب این نسخه را ببینید, شما باید افزایش `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- طرز تهیه برای استفاده `tid` همه جا این برای مورد نیاز است [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +#### بهبود عملکرد {#performance-improvement-1} + +- بهینه سازی عملکرد در خط لوله پردازنده. [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- به روز رسانی غیر مسدود کردن کلید منقضی شده در لغت نامه کش (با اجازه خواندن قدیمی). [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- کامپایل تاتر بدون `-fno-omit-frame-pointer` در سطح جهانی به فراغت یک ثبت نام بیشتر. [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([ایموس پرنده](https://github.com/amosbird)) +- افزایش سرعت `greatCircleDistance` عملکرد و تست عملکرد را اضافه کنید. [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([اولگا خوستیکوا](https://github.com/stavrolia)) +- بهبود عملکرد عملکرد `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهبود عملکرد `max`, `min`, `argMin`, `argMax` برای `DateTime64` نوع داده. [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([واسیلی نمکو](https://github.com/Enmk)) +- بهبود عملکرد مرتب سازی بدون محدودیت و یا با حد بزرگ و مرتب سازی خارجی. [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهبود عملکرد قالب بندی اعداد ممیز شناور تا 6 بار. [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهبود عملکرد `modulo` تابع. [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([ایموس پرنده](https://github.com/amosbird)) +- بهینهسازی شده `ORDER BY` و ادغام با کلید ستون. [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجرای بهتر برای `arrayReduce`, `-Array` و `-State` ترکیب کننده ها [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([ایموس پرنده](https://github.com/amosbird)) +- حالا `PREWHERE` باید بهینه سازی شود حداقل به عنوان موثر به عنوان `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([ایموس پرنده](https://github.com/amosbird)) +- بهبود راه `round` و `roundBankers` دست زدن به اعداد منفی. [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([هکز](https://github.com/hczhcz)) +- رمزگشایی بهبود عملکرد `DoubleDelta` و `Gorilla` کدک های تقریبا 30-40%. این رفع [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([واسیلی نمکو](https://github.com/Enmk)) +- بهبود عملکرد `base64` توابع مرتبط. [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه شدن یک تابع `geoDistance`. این شبیه به `greatCircleDistance` اما با استفاده از تقریب به دندان-84 مدل بیضی. عملکرد هر دو توابع در نزدیکی یکسان هستند. [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- سریع تر `min` و `max` توابع تجمع برای `Decimal` نوع داده. [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([زویکوف](https://github.com/4ertus2)) +- Vectorize پردازش `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([ایموس پرنده](https://github.com/amosbird)) +- `if` زنجیر در حال حاضر به عنوان بهینه سازی شده `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([کمالف روسلان](https://github.com/kamalov-ruslan)) +- رفع رگرسیون عملکرد `Kafka` موتور جدول معرفی شده در 19.15. این رفع [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([فیلیمونف](https://github.com/filimonov)) +- حذف شد “pie” تولید کد که `gcc` از بسته های دبیان گاهی اوقات به طور پیش فرض به ارمغان بیاورد. [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- موازی تجزیه فرمت های داده [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([نیکیتا میخایلو](https://github.com/nikitamikhaylov)) +- به کار انداختن تجزیه کننده بهینه `Values` با عبارات به طور پیش فرض (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([تاولوبیکس](https://github.com/tavplubix)) + +#### ساخت/تست / بهبود بسته بندی {#buildtestingpackaging-improvement-2} + +- ساخت رفع برای `ARM` و در حالت حداقل. [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([پرولر](https://github.com/proller)) +- اضافه کردن پوشش خیط و پیت کردن فایل برای `clickhouse-server` وقتی بیماری مقاربتی:: اختلال نامیده می شود نیست. همچنین کمی بهبود یافته ورود به سیستم در تست بدون تابعیت با پوشش. [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([الساپین](https://github.com/alesapin)) +- به روز رسانی llvm کتابخانه در contrib. اجتناب از استفاده از llvm از سیستم عامل بسته است. [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- را همراه `curl` ساخت کاملا ساکت. [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([پاول کووالنکو](https://github.com/Jokser)) +- رفع برخی از `MemorySanitizer` اخطار. [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- استفاده `add_warning` و `no_warning` ماکروها در `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([ایوان](https://github.com/abyss7)) +- اضافه کردن پشتیبانی از minio s3 سازگار شی (https://min.io/) برای ادغام بهتر شد. [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([پاول کووالنکو](https://github.com/Jokser)) +- وارد شده `libc` سرصفحهها به کنترب. این اجازه می دهد تا به ایجاد بیشتر سازگار در سراسر سیستم های مختلف (فقط برای `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حذف `-fPIC` از برخی از کتابخانه ها. [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تمیز `CMakeLists.txt` برای حلقه. ببینید https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- هشدار خاموش در `CapNProto` کتابخونه. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اضافه کردن تست عملکرد برای رشته کوتاه بهینه سازی جداول هش. [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([ایموس پرنده](https://github.com/amosbird)) +- در حال حاضر خانه کلیک بر روی ساخت `AArch64` حتی اگر `MADV_FREE` در دسترس نیست. این رفع [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([ایموس پرنده](https://github.com/amosbird)) +- بهروزرسانی `zlib-ng` برای رفع مشکلات ضدعفونی کننده حافظه. [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- فعال کردن کتابخانه خروجی زیر داخلی در سیستم غیر لینوکس, به دلیل استفاده از بسته های سیستم عامل بسیار شکننده است و معمولا در همه کار نمی کند. این رفع [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت ثابت در برخی از سیستم های پس از فعال کردن `libc++`. این جانشین [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت `Field` مواد و روش ها نوع بیشتر-امن برای پیدا کردن خطاهای بیشتر. [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- اضافه شده فایل های از دست رفته به `libc-headers` submodule. [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع اشتباه `JSON` نقل قول در خروجی تست عملکرد. [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- در حال حاضر ردیابی پشته برای نمایش داده می شود `std::exception` و `Poco::Exception`. در نسخه های قبلی فقط برای `DB::Exception`. این باعث بهبود تشخیص می شود. [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بارینگ `clock_gettime` و `clock_nanosleep` برای نسخه های چرب زبان تازه. [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([ایموس پرنده](https://github.com/amosbird)) +- فعالسازی `part_log` در مثال پیکربندی برای توسعه دهندگان. [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- رفع ماهیت کالاهای کابل از بازنگری در `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([ازات خوژین](https://github.com/azat)) +- تست عملکرد کدک ثابت. [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([واسیلی نمکو](https://github.com/Enmk)) +- اضافه کردن اسکریپت نصب برای `.tgz` ساخت و اسناد و مدارک برای خود. [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([الساپین](https://github.com/alesapin)) +- حذف قدیمی `ZSTD` تست (این در سال 2016 ایجاد شد به تکثیر اشکال که قبل از 1.0 نسخه از زد تا به حال). این رفع [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت ثابت در سیستم عامل مک کاتالینا. [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([مو](https://github.com/meob)) +- افزایش تعداد ردیف در تست عملکرد کدک برای ایجاد نتایج قابل توجه است. [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([واسیلی نمکو](https://github.com/Enmk)) +- در اشکال زدایی ایجاد, درمان `LOGICAL_ERROR` استثنا به عنوان شکست ادعای, به طوری که راحت تر به اطلاع. [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- تست عملکرد مربوط به فرمت را قطعی تر کنید. [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- بهروزرسانی `lz4` برای رفع یک شکست حفظ کننده. [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- سرکوب یک یادداشت شناخته شده مثبت کاذب در دست زدن به استثنا. [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- بهروزرسانی `gcc` و `g++` به نسخه 9 در `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([تلیتسکی](https://github.com/tlightsky)) +- اضافه کردن مورد تست عملکرد برای تست که `PREWHERE` بدتر از `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([ایموس پرنده](https://github.com/amosbird)) +- پیشرفت به سمت رفع یک تست فلکی. [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- اجتناب از گزارش یادداشت برای داده ها از `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- به روز شده `libc++` به جدیدترین نسخه. [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت کتابخانه ایکو از منابع. این رفع [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تغییر از `libressl` به `openssl`. ClickHouse باید پشتیبانی از TLS 1.3 و SNI پس از این تغییر. این رفع [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- گزارش ثابت اوبسان هنگام استفاده `chacha20_poly1305` از اس اس ال (اتفاق می افتد در اتصال به https://yandex.ru/). [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- حالت ثابت فایل رمز عبور پیش فرض برای `.deb` توزیع لینوکس. [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([پرولر](https://github.com/proller)) +- بیان بهبود یافته برای گرفتن `clickhouse-server` PID در `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([الکساندر کازاکوف](https://github.com/Akazz)) +- به روز شده در تماس/ماده چسبنده و لزج به نسخه برداری1.10.0. [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([الکساندر بورمک](https://github.com/Alex-Burmak)) +- گزارش سه گانه ثابت در `base64` کتابخونه. همچنین این کتابخانه به جدیدترین نسخه به روز, اما مهم نیست. این رفع [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ثابت `00600_replace_running_query` برای پردازنده. [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- حذف پشتیبانی برای `tcmalloc` برای ساخت `CMakeLists.txt` ساده تر [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- انتشار شورای همکاری خلیج فارس ایجاد در حال حاضر استفاده کنید `libc++` به جای `libstdc++`. اخیرا `libc++` تنها با صدای جرنگ جرنگ مورد استفاده قرار گرفت. این ثبات تنظیمات ساخت و قابلیت حمل را بهبود بخشد. [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- فعال کردن کتابخانه فناوری اطلاعات و ارتباطات برای ساخت با حفظ. [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- سرکوب هشدارها از `CapNProto` کتابخونه. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- موارد خاص حذف کد برای `tcmalloc`, زیرا دیگر پشتیبانی. [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- کشتن سرور برازنده اجازه می دهد تا برای ذخیره گزارش پوشش. این رفع گزارش پوشش ناقص ما به تازگی دیدن شده است. [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([الساپین](https://github.com/alesapin)) +- تست عملکرد برای تمام کدک در برابر `Float64` و `UInt64` ارزشهای خبری عبارتند از: [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([واسیلی نمکو](https://github.com/Enmk)) +- `termcap` بسیار توصیه شده و منجر به مشکلات مختلف (f.g. از دست رفته “up” کلاه و انعکاس `^J` به جای چند خط). به نفع `terminfo` یا همراه `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([ایموس پرنده](https://github.com/amosbird)) +- ثابت `test_storage_s3` تست ادغام. [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- پشتیبانی `StorageFile(, null)` برای قرار دادن بلوک به فایل فرمت داده شده بدون در واقع به دیسک ارسال. این برای تست عملکرد مورد نیاز است. [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([ایموس پرنده](https://github.com/amosbird)) +- استدلال اضافه شده `--print-time` به تست های عملکردی که زمان اجرای هر تست را چاپ می کند. [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اضافه ادعا به `KeyCondition` در حالی که ارزیابی مالیات بر ارزش افزوده. این هشدار را از شورای همکاری خلیج فارس-9 حل کنند. [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- تخلیه گزینه های چوب کوره در سی ایجاد. [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- اطلاعات اشکال زدایی برای برخی از کتابخانه های چربی تولید نمی کند. [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- ساخت `log_to_console.xml` همیشه به خ ورود, صرف نظر از تعاملی است یا نه. [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([الکساندر کوزمنکوف](https://github.com/akuzm)) +- حذف برخی از ویژگی های استفاده نشده از `clickhouse-performance-test` ابزار. [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- در حال حاضر ما نیز برای جستجو `lld-X` با متناظر `clang-X` نسخه. [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([الساپین](https://github.com/alesapin)) +- پارکت ساخت بهبود. [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([مکسولان](https://github.com/maxulan)) +- هشدارهای شورای همکاری خلیج فارس بیشتر [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([کروزرکریگ](https://github.com/kreuzerkrieg)) +- بسته بندی برای قوس لینوکس در حال حاضر اجازه می دهد تا برای اجرای سرور کلیک, و نه تنها مشتری. [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([ولادیمیر چبوتراف](https://github.com/excitoon)) +- ثابت تست با پردازنده. رفع عملکرد کوچک. [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) +- اطلاعات دقیق [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([هشدار داده می شود](https://github.com/matwey)) +- در تهیه تعویض به سی++20 به عنوان یک جشن سال نو. “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([ایموس پرنده](https://github.com/amosbird)) + +#### ویژگی تجربی {#experimental-feature-1} + +- اضافه شدن تنظیمات تجربی `min_bytes_to_use_mmap_io`. این اجازه می دهد به خواندن فایل های بزرگ بدون کپی کردن داده ها از هسته به فضای کاربری. تنظیمات به طور پیش فرض غیر فعال. توصیه می شود در حدود 64 مگابایت است, چون اماسپ / مون مپ کند است. [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([الکسی میلویدو](https://github.com/alexey-milovidov)) +- سهمیه به عنوان بخشی از سیستم کنترل دسترسی دوباره. جدول جدید اضافه شده است `system.quotas` توابع جدید `currentQuota`, `currentQuotaKey` نحو گذاشتن جدید `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([ویتالی بارانو](https://github.com/vitlibar)) +- اجازه پرش تنظیمات ناشناخته با هشدار به جای پرتاب استثنا. [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([ویتالی بارانو](https://github.com/vitlibar)) +- سیاست های ردیف به عنوان بخشی از سیستم کنترل دسترسی دوباره. جدول جدید اضافه شده است `system.row_policies` تابع جدید `currentRowPolicies()` نحو گذاشتن جدید `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([ویتالی بارانو](https://github.com/vitlibar)) + +#### تعمیر امنیتی {#security-fix} + +- ثابت امکان خواندن ساختار دایرکتوری در جداول با `File` موتور جدول. این رفع [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([الکسی میلویدو](https://github.com/alexey-milovidov)) + +## [تغییرات برای 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/fa/whats_new/index.md b/docs/fa/whats_new/index.md new file mode 100644 index 00000000000..ac27b70b8bd --- /dev/null +++ b/docs/fa/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: What's New +toc_priority: 72 +--- + + diff --git a/docs/fa/whats_new/roadmap.md b/docs/fa/whats_new/roadmap.md new file mode 100644 index 00000000000..f51b57e3a2a --- /dev/null +++ b/docs/fa/whats_new/roadmap.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 74 +toc_title: "\u0646\u0642\u0634\u0647 \u0631\u0627\u0647" +--- + +# نقشه راه {#roadmap} + +## Q1 2020 {#q1-2020} + +- کنترل دسترسی مبتنی بر نقش + +## Q2 2020 {#q2-2020} + +- ادغام با خدمات احراز هویت خارجی +- استخر منابع برای توزیع دقیق تر از ظرفیت خوشه ای بین کاربران + +{## [مقاله اصلی](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/fa/whats_new/security_changelog.md b/docs/fa/whats_new/security_changelog.md new file mode 100644 index 00000000000..79eb2230b03 --- /dev/null +++ b/docs/fa/whats_new/security_changelog.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 76 +toc_title: "\u062A\u063A\u06CC\u06CC\u0631\u0627\u062A \u0627\u0645\u0646\u06CC\u062A\ + \u06CC" +--- + +## ثابت در clickhouse انتشار 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} + +### CVE-2019-15024 {#cve-2019-15024} + +Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. + +اعتبار: الدار زیتوف تیم امنیت اطلاعات یاندکس + +### CVE-2019-16535 {#cve-2019-16535} + +Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. + +اعتبار: الدار زیتوف تیم امنیت اطلاعات یاندکس + +### CVE-2019-16536 {#cve-2019-16536} + +سرریز پشته منجر به داس را می توان با یک مشتری تصدیق مخرب باعث. + +اعتبار: الدار زیتوف تیم امنیت اطلاعات یاندکس + +## ثابت در clickhouse انتشار 19.13.6.1, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} + +### CVE-2019-18657 {#cve-2019-18657} + +تابع جدول `url` در صورت امکان پذیری اجازه مهاجم برای تزریق هدر قام دلخواه در درخواست. + +اعتبار: [نیکیتا تیکومیرو](https://github.com/NSTikhomirov) + +## ثابت در clickhouse انتشار 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} + +### CVE-2018-14672 {#cve-2018-14672} + +توابع برای بارگذاری مدل های ادم کودن و احمق اجازه عبور مسیر و خواندن فایل های دلخواه از طریق پیام های خطا. + +اعتبار: اندری کراسیچکوف از تیم امنیت اطلاعات یاندکس + +## ثابت در clickhouse انتشار 18.10.3, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} + +### CVE-2018-14671 {#cve-2018-14671} + +unixODBC اجازه بارگذاری دلخواه اشیاء مشترک از فایل سیستم است که منجر به اجرای کد از راه دور آسیب پذیری. + +اعتبار: اندری کراسیچکوف و اوگنی سیدوروف از تیم امنیت اطلاعات یاندکس + +## ثابت در clickhouse انتشار 1.1.54388, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} + +### CVE-2018-14668 {#cve-2018-14668} + +“remote” تابع جدول اجازه نمادهای دلخواه در “user”, “password” و “default\_database” زمینه های که منجر به عبور از پروتکل درخواست حملات جعل. + +اعتبار: اندری کراسیچکوف از تیم امنیت اطلاعات یاندکس + +## ثابت در clickhouse انتشار 1.1.54390, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} + +### CVE-2018-14669 {#cve-2018-14669} + +کلاینت خروجی زیر کلیک بود “LOAD DATA LOCAL INFILE” قابلیت را فعال کنید که اجازه یک پایگاه داده خروجی زیر مخرب خواندن فایل های دلخواه از سرور کلیک متصل می شود. + +اعتبار: اندری کراسیچکوف و اوگنی سیدوروف از تیم امنیت اطلاعات یاندکس + +## ثابت در clickhouse انتشار 1.1.54131, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} + +### CVE-2018-14670 {#cve-2018-14670} + +پیکربندی نادرست در بسته دب می تواند به استفاده غیر مجاز از پایگاه داده منجر شود. + +اعتبار: انگلستان national cyber security centre (ncsc) + +{## [مقاله اصلی](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/fr/changelog/2019.md b/docs/fr/changelog/2019.md deleted file mode 120000 index f9081e1f66b..00000000000 --- a/docs/fr/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -../../en/whats_new/changelog/2019.md \ No newline at end of file diff --git a/docs/fr/commercial/cloud.md b/docs/fr/commercial/cloud.md index ec592154f52..3a04f95fc9a 100644 --- a/docs/fr/commercial/cloud.md +++ b/docs/fr/commercial/cloud.md @@ -1,5 +1,6 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 --- # Fournisseurs De Services Cloud ClickHouse {#clickhouse-cloud-service-providers} @@ -11,7 +12,7 @@ machine_translated: true [Service géré Yandex pour ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) offre les fonctionnalités suivantes: -- Service ZooKeeper entièrement géré pour [Réplication de ClickHouse](../operations/table_engines/replication.md) +- Service ZooKeeper entièrement géré pour [Réplication de ClickHouse](../engines/table_engines/mergetree_family/replication.md) - Choix multiples de type de stockage - Répliques dans différentes zones de disponibilité - Le chiffrement et l'isolement diff --git a/docs/fr/commercial/index.md b/docs/fr/commercial/index.md new file mode 100644 index 00000000000..296ea571a59 --- /dev/null +++ b/docs/fr/commercial/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Commercial +toc_priority: 70 +--- + + diff --git a/docs/fr/data_types/special_data_types/set.md b/docs/fr/data_types/special_data_types/set.md deleted file mode 100644 index 82dcef10abe..00000000000 --- a/docs/fr/data_types/special_data_types/set.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# Définir {#set} - -Utilisé pour la moitié droite d'un [IN](../../query_language/select.md#select-in-operators) expression. - -[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/fr/development/architecture.md b/docs/fr/development/architecture.md index ad19a0d4231..eb9cab7ecf9 100644 --- a/docs/fr/development/architecture.md +++ b/docs/fr/development/architecture.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 62 +toc_title: Vue d'ensemble de L'Architecture ClickHouse --- # Vue d'ensemble de L'Architecture ClickHouse {#overview-of-clickhouse-architecture} @@ -119,7 +122,7 @@ Il y a des fonctions ordinaires et des fonctions agrégées. Pour les fonctions Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`'s de données pour implémenter l'exécution de requête vectorisée. -Il y a quelques fonctions diverses, comme [la taille de bloc](../query_language/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock), et [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate), qui exploitent le traitement de bloc et violent l'indépendance des lignes. +Il y a quelques fonctions diverses, comme [la taille de bloc](../sql_reference/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../sql_reference/functions/other_functions.md#function-rownumberinblock), et [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate), qui exploitent le traitement de bloc et violent l'indépendance des lignes. ClickHouse a un typage fort, donc il n'y a pas de conversion de type implicite. Si une fonction ne prend pas en charge une combinaison spécifique de types, elle lève une exception. Mais les fonctions peuvent fonctionner (être surchargées) pour de nombreuses combinaisons de types différentes. Par exemple, l' `plus` fonction (pour mettre en œuvre la `+` opérateur) fonctionne pour toute combinaison de types numériques: `UInt8` + `Float32`, `UInt16` + `Int8` et ainsi de suite. En outre, certaines fonctions variadiques peuvent accepter n'importe quel nombre d'arguments, tels que `concat` fonction. diff --git a/docs/fr/development/browse_code.md b/docs/fr/development/browse_code.md index 664f0dc9d48..62caa530b5d 100644 --- a/docs/fr/development/browse_code.md +++ b/docs/fr/development/browse_code.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 63 +toc_title: Parcourir Le Code Source De ClickHouse --- # Parcourir Le Code Source De ClickHouse {#browse-clickhouse-source-code} -Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/src/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. +Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. Aussi, vous pouvez parcourir les sources sur [GitHub](https://github.com/ClickHouse/ClickHouse) comme à l'habitude. diff --git a/docs/fr/development/build.md b/docs/fr/development/build.md index 5bf9e439849..0a5e5838d66 100644 --- a/docs/fr/development/build.md +++ b/docs/fr/development/build.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 64 +toc_title: Comment Construire ClickHouse sur Linux --- # Comment Construire ClickHouse pour le développement {#how-to-build-clickhouse-for-development} diff --git a/docs/fr/development/build_cross_arm.md b/docs/fr/development/build_cross_arm.md index c7ec1857151..7da37869583 100644 --- a/docs/fr/development/build_cross_arm.md +++ b/docs/fr/development/build_cross_arm.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 67 +toc_title: Comment Construire ClickHouse sur Linux pour AARCH64 (ARM64) --- # Comment Construire ClickHouse sur Linux pour l'architecture AARCH64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} diff --git a/docs/fr/development/build_cross_osx.md b/docs/fr/development/build_cross_osx.md index f95663992c3..809b6ef0dd6 100644 --- a/docs/fr/development/build_cross_osx.md +++ b/docs/fr/development/build_cross_osx.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 66 +toc_title: Comment Construire ClickHouse sur Linux pour Mac OS X --- # Comment Construire ClickHouse sur Linux pour Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} diff --git a/docs/fr/development/build_osx.md b/docs/fr/development/build_osx.md index 9730a864726..b2122c2117f 100644 --- a/docs/fr/development/build_osx.md +++ b/docs/fr/development/build_osx.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 65 +toc_title: Comment Construire ClickHouse sur Mac OS X --- # Comment Construire ClickHouse sur Mac OS X {#how-to-build-clickhouse-on-mac-os-x} diff --git a/docs/fr/development/contrib.md b/docs/fr/development/contrib.md index 2d06428dba0..9750b348541 100644 --- a/docs/fr/development/contrib.md +++ b/docs/fr/development/contrib.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 70 +toc_title: "Biblioth\xE8ques Tierces Utilis\xE9es" --- # Bibliothèques Tierces Utilisées {#third-party-libraries-used} diff --git a/docs/fr/development/developer_instruction.md b/docs/fr/development/developer_instruction.md index d5082869b9b..cf15f8f36ac 100644 --- a/docs/fr/development/developer_instruction.md +++ b/docs/fr/development/developer_instruction.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 61 +toc_title: "Le D\xE9butant Clickhouse Developer Instruction" --- La construction de ClickHouse est prise en charge sous Linux, FreeBSD et Mac OS X. @@ -246,7 +249,7 @@ Le code Style Guide: https://clickhouse.tech/docs/fr/développement/style/ Rédaction de tests: https://clickhouse.tech/docs/fr/développement/tests/ -Liste des tâches: https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/instructions/easy\_tasks\_sorted\_en.md +Liste des tâches: https://github.com/ClickHouse/ClickHouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md # Des Données De Test {#test-data} diff --git a/docs/fr/development/index.md b/docs/fr/development/index.md index a888dbc7d14..1461fcae7bb 100644 --- a/docs/fr/development/index.md +++ b/docs/fr/development/index.md @@ -1,5 +1,10 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Development +toc_hidden: true +toc_priority: 58 +toc_title: "cach\xE9s" --- # Développement De ClickHouse {#clickhouse-development} diff --git a/docs/fr/development/style.md b/docs/fr/development/style.md index 782aca7d1d3..606e6af920e 100644 --- a/docs/fr/development/style.md +++ b/docs/fr/development/style.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 68 +toc_title: "Comment \xE9crire du Code C++ " --- # Comment écrire du Code C++ {#how-to-write-c-code} diff --git a/docs/fr/development/tests.md b/docs/fr/development/tests.md index b4f746e46e0..9c79c65ba9d 100644 --- a/docs/fr/development/tests.md +++ b/docs/fr/development/tests.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 69 +toc_title: "Comment ex\xE9cuter des Tests ClickHouse" --- # ClickHouse Test {#clickhouse-testing} @@ -10,15 +13,15 @@ Les tests fonctionnels sont les plus simples et pratiques à utiliser. La plupar Chaque test fonctionnel envoie une ou plusieurs requêtes au serveur clickhouse en cours d'exécution et compare le résultat avec la référence. -Les Tests sont situés dans `src/tests/queries` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. +Les Tests sont situés dans `testsies` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. Chaque test peut être de deux types: `.sql` et `.sh`. `.sql` test est le script SQL simple qui est canalisé vers `clickhouse-client --multiquery --testmode`. `.sh` test est un script qui est exécuté par lui-même. -Pour exécuter tous les tests, utilisez `src/tests/clickhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. +Pour exécuter tous les tests, utilisez `testskhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. Le moyen le plus simple d'invoquer des tests fonctionnels est de copier `clickhouse-client` de `/usr/bin/`, exécuter `clickhouse-server` et puis exécutez `./clickhouse-test` à partir de son propre répertoire. -Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `src/tests/queries/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. +Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `testsies/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. Les Tests doivent utiliser (create, drop, etc) uniquement des tables dans `test` base de données supposée être créée au préalable; les tests peuvent également utiliser des tables temporaires. @@ -33,13 +36,13 @@ désactivez ces groupes de tests en utilisant `--no-zookeeper`, `--no-shard` et ## Bugs connus {#known-bugs} -Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `src/tests/queries/bugs` répertoire. Ces tests seront déplacés à `src/tests/queries/0_stateless` quand les bugs sont corrigés. +Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `testsies/bugs` répertoire. Ces tests seront déplacés à `teststests_stateless` quand les bugs sont corrigés. ## Les Tests D'Intégration {#integration-tests} Les tests d'intégration permettent de tester ClickHouse en configuration cluster et clickhouse interaction avec D'autres serveurs comme MySQL, Postgres, MongoDB. Ils sont utiles pour émuler les splits réseau, les chutes de paquets, etc. Ces tests sont exécutés sous Docker et créent plusieurs conteneurs avec divers logiciels. -Voir `src/tests/integration/README.md` sur la façon d'exécuter ces tests. +Voir `testsgration/README.md` sur la façon d'exécuter ces tests. Notez que l'intégration de ClickHouse avec des pilotes tiers n'est pas testée. De plus, nous n'avons actuellement pas de tests d'intégration avec nos pilotes JDBC et ODBC. @@ -51,7 +54,7 @@ Ce n'est pas nécessairement d'avoir des tests unitaires si le code est déjà c ## Tests De Performance {#performance-tests} -Les tests de Performance permettent de mesurer et de comparer les performances d'une partie isolée de ClickHouse sur des requêtes synthétiques. Les Tests sont situés à `src/tests/performance`. Chaque test est représenté par `.xml` fichier avec description du cas de test. Les Tests sont exécutés avec `clickhouse performance-test` outil (qui est incorporé dans `clickhouse` binaire). Voir `--help` pour l'invocation. +Les tests de Performance permettent de mesurer et de comparer les performances d'une partie isolée de ClickHouse sur des requêtes synthétiques. Les Tests sont situés à `tests/performance`. Chaque test est représenté par `.xml` fichier avec description du cas de test. Les Tests sont exécutés avec `clickhouse performance-test` outil (qui est incorporé dans `clickhouse` binaire). Voir `--help` pour l'invocation. Chaque essai un ou miltiple requêtes (éventuellement avec des combinaisons de paramètres) dans une boucle avec certaines conditions pour l'arrêt (comme “maximum execution speed is not changing in three seconds”) et mesurer certaines mesures sur les performances de la requête (comme “maximum execution speed”). Certains tests peuvent contenir des conditions préalables sur un ensemble de données de test préchargé. @@ -59,13 +62,13 @@ Si vous souhaitez améliorer les performances de ClickHouse dans certains scéna ## Outils Et Scripts De Test {#test-tools-and-scripts} -Certains programmes dans `tests` directory ne sont pas des tests préparés, mais sont des outils de test. Par exemple, pour `Lexer` il est un outil `src/src/Parsers/tests/lexer` Cela fait juste la tokenisation de stdin et écrit le résultat colorisé dans stdout. Vous pouvez utiliser ce genre d'outils comme exemples de code et pour l'exploration et les tests manuels. +Certains programmes dans `tests` directory ne sont pas des tests préparés, mais sont des outils de test. Par exemple, pour `Lexer` il est un outil `dbms/Parsers/tests/lexer` Cela fait juste la tokenisation de stdin et écrit le résultat colorisé dans stdout. Vous pouvez utiliser ce genre d'outils comme exemples de code et pour l'exploration et les tests manuels. Vous pouvez également placer une paire de fichiers `.sh` et `.reference` avec l'outil pour l'exécuter sur une entrée prédéfinie - alors le résultat du script peut être comparé à `.reference` fichier. Ce genre de tests ne sont pas automatisés. ## Tests Divers {#miscellanous-tests} -Il existe des tests pour les dictionnaires externes situés à `src/tests/external_dictionaries` et pour machine appris modèles dans `src/tests/external_models`. Ces tests ne sont pas mis à jour et doivent être transférés aux tests d'intégration. +Il existe des tests pour les dictionnaires externes situés à `tests/external_dictionaries` et pour machine appris modèles dans `tests/external_models`. Ces tests ne sont pas mis à jour et doivent être transférés aux tests d'intégration. Il y a un test séparé pour les inserts de quorum. Ce test exécute le cluster ClickHouse sur des serveurs séparés et émule divers cas d'échec: scission réseau, chute de paquets (entre les nœuds ClickHouse, entre Clickhouse et ZooKeeper, entre le serveur ClickHouse et le client, etc.), `kill -9`, `kill -STOP` et `kill -CONT` , comme [Jepsen](https://aphyr.com/tags/Jepsen). Ensuite, le test vérifie que toutes les insertions reconnues ont été écrites et que toutes les insertions rejetées ne l'ont pas été. @@ -75,9 +78,9 @@ Le test de Quorum a été écrit par une équipe distincte avant que ClickHouse Lorsque vous développez une nouvelle fonctionnalité, il est raisonnable de tester également manuellement. Vous pouvez le faire avec les étapes suivantes: -Construire ClickHouse. Exécuter ClickHouse à partir du terminal: changer le répertoire à `src/src/programs/clickhouse-server` et de l'exécuter avec `./clickhouse-server`. Il utilisera la configuration (`config.xml`, `users.xml` et les fichiers à l'intérieur `config.d` et `users.d` répertoires) à partir du répertoire courant par défaut. Pour vous connecter au serveur ClickHouse, exécutez `src/src/programs/clickhouse-client/clickhouse-client`. +Construire ClickHouse. Exécuter ClickHouse à partir du terminal: changer le répertoire à `programs/clickhouse-server` et de l'exécuter avec `./clickhouse-server`. Il utilisera la configuration (`config.xml`, `users.xml` et les fichiers à l'intérieur `config.d` et `users.d` répertoires) à partir du répertoire courant par défaut. Pour vous connecter au serveur ClickHouse, exécutez `programs/clickhouse-client/clickhouse-client`. -Notez que tous les outils clickhouse (serveur, client, etc.) ne sont que des liens symboliques vers un seul binaire nommé `clickhouse`. Vous pouvez trouver ce binaire à `src/src/programs/clickhouse`. Tous les outils peuvent également être invoquée comme `clickhouse tool` plutôt `clickhouse-tool`. +Notez que tous les outils clickhouse (serveur, client, etc.) ne sont que des liens symboliques vers un seul binaire nommé `clickhouse`. Vous pouvez trouver ce binaire à `programs/clickhouse`. Tous les outils peuvent également être invoquée comme `clickhouse tool` plutôt `clickhouse-tool`. Alternativement, vous pouvez installer le paquet ClickHouse: soit une version stable du référentiel Yandex, soit vous pouvez créer un paquet pour vous-même avec `./release` dans les sources de ClickHouse racine. Puis démarrez le serveur avec `sudo service clickhouse-server start` (ou stop pour arrêter le serveur). Rechercher des journaux à `/etc/clickhouse-server/clickhouse-server.log`. @@ -206,7 +209,7 @@ Les gens du Département Cloud de Yandex font un aperçu de base des capacités ## Analyseurs Statiques {#static-analyzers} -Nous courons `PVS-Studio` par commettre base. Nous avons évalué `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Vous trouverez des instructions pour l'utilisation dans `src/tests/instructions/` répertoire. Aussi, vous pouvez lire [l'article en russe](https://habr.com/company/yandex/blog/342018/). +Nous courons `PVS-Studio` par commettre base. Nous avons évalué `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Vous trouverez des instructions pour l'utilisation dans `tests/instructions/` répertoire. Aussi, vous pouvez lire [l'article en russe](https://habr.com/company/yandex/blog/342018/). Si vous utilisez `CLion` en tant QU'IDE, vous pouvez tirer parti de certains `clang-tidy` contrôles de la boîte. @@ -246,3 +249,4 @@ Nous n'utilisons pas Travis CI en raison de la limite de temps et de puissance d On n'utilise pas Jenkins. Il a été utilisé avant et maintenant nous sommes heureux de ne pas utiliser Jenkins. [Article Original](https://clickhouse.tech/docs/en/development/tests/) +développement/tests/) diff --git a/docs/fr/database_engines/index.md b/docs/fr/engines/database_engines/index.md similarity index 61% rename from docs/fr/database_engines/index.md rename to docs/fr/engines/database_engines/index.md index 6bd365428e0..5b019ef4e75 100644 --- a/docs/fr/database_engines/index.md +++ b/docs/fr/engines/database_engines/index.md @@ -1,12 +1,16 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Database Engines +toc_priority: 27 +toc_title: Introduction --- # Moteurs De Base De Données {#database-engines} Moteurs de base de données vous permettent de travailler avec des tables. -Par défaut, ClickHouse utilise son moteur de base de données natif, qui fournit [moteurs de table](../operations/table_engines/index.md) et un [Dialecte SQL](../query_language/syntax.md). +Par défaut, ClickHouse utilise son moteur de base de données natif, qui fournit [moteurs de table](../../engines/table_engines/index.md) et un [Dialecte SQL](../../sql_reference/syntax.md). Vous pouvez également utiliser les moteurs de base de données suivants: diff --git a/docs/fr/database_engines/lazy.md b/docs/fr/engines/database_engines/lazy.md similarity index 84% rename from docs/fr/database_engines/lazy.md rename to docs/fr/engines/database_engines/lazy.md index 203c62d815b..77a2cb15dde 100644 --- a/docs/fr/database_engines/lazy.md +++ b/docs/fr/engines/database_engines/lazy.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 31 +toc_title: Paresseux --- # Paresseux {#lazy} diff --git a/docs/fr/database_engines/mysql.md b/docs/fr/engines/database_engines/mysql.md similarity index 60% rename from docs/fr/database_engines/mysql.md rename to docs/fr/engines/database_engines/mysql.md index c56771e34c2..ebe50498967 100644 --- a/docs/fr/database_engines/mysql.md +++ b/docs/fr/engines/database_engines/mysql.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 30 +toc_title: MySQL --- -# MySQL {#mysql} +# Mysql {#mysql} Permet de se connecter à des bases de données sur un serveur MySQL distant et `INSERT` et `SELECT` requêtes pour échanger des données entre Clickhouse et MySQL. @@ -30,27 +33,27 @@ ENGINE = MySQL('host:port', 'database', 'user', 'password') ## Types De Données Soutien {#data_types-support} -| MySQL | ClickHouse | -|----------------------------------|---------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOAT | [Float32](../data_types/float.md) | -| DOUBLE | [Float64](../data_types/float.md) | -| DATE | [Date](../data_types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) | -| BINARY | [FixedString](../data_types/fixedstring.md) | +| MySQL | ClickHouse | +|----------------------------------|--------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [Date](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [DateTime](../../sql_reference/data_types/datetime.md) | +| BINARY | [FixedString](../../sql_reference/data_types/fixedstring.md) | -Tous les autres types de données MySQL sont convertis en [Chaîne](../data_types/string.md). +Tous les autres types de données MySQL sont convertis en [Chaîne](../../sql_reference/data_types/string.md). -[Nullable](../data_types/nullable.md) est pris en charge. +[Nullable](../../sql_reference/data_types/nullable.md) est pris en charge. -## Exemples D'utilisation {#examples-of-use} +## Exemples D'Utilisation {#examples-of-use} Table dans MySQL: @@ -68,11 +71,11 @@ mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from mysql_table; -+--------+-------+ ++------+-----+ | int_id | value | -+--------+-------+ ++------+-----+ | 1 | 2 | -+--------+-------+ ++------+-----+ 1 row in set (0,00 sec) ``` diff --git a/docs/fr/engines/index.md b/docs/fr/engines/index.md new file mode 100644 index 00000000000..af36619876c --- /dev/null +++ b/docs/fr/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Engines +toc_priority: 25 +--- + + diff --git a/docs/fr/operations/table_engines/index.md b/docs/fr/engines/table_engines/index.md similarity index 51% rename from docs/fr/operations/table_engines/index.md rename to docs/fr/engines/table_engines/index.md index 74401386322..3e199de8ebd 100644 --- a/docs/fr/operations/table_engines/index.md +++ b/docs/fr/engines/table_engines/index.md @@ -1,8 +1,12 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Table Engines +toc_priority: 26 +toc_title: Introduction --- -# Moteurs de Table {#table_engines} +# Moteurs De Table {#table_engines} Le moteur de table (type de table) détermine: @@ -15,60 +19,60 @@ Le moteur de table (type de table) détermine: ## Familles De Moteurs {#engine-families} -### MergeTree {#mergetree} +### Mergetree {#mergetree} -Les moteurs de table les plus universels et fonctionnels pour les tâches à forte charge. La propriété partagée par ces moteurs est l'insertion rapide des données avec traitement ultérieur des données d'arrière-plan. `MergeTree` les moteurs de la famille prennent en charge la réplication des données (avec [Répliqué\*](replication.md) versions de moteurs), le partitionnement, et d'autres fonctionnalités non prises en charge dans d'autres moteurs. +Les moteurs de table les plus universels et fonctionnels pour les tâches à forte charge. La propriété partagée par ces moteurs est l'insertion rapide des données avec traitement ultérieur des données d'arrière-plan. `MergeTree` les moteurs de la famille prennent en charge la réplication des données (avec [Répliqué\*](mergetree_family/replication.md) versions de moteurs), le partitionnement, et d'autres fonctionnalités non prises en charge dans d'autres moteurs. Moteurs dans la famille: -- [MergeTree](mergetree.md) -- [ReplacingMergeTree](replacingmergetree.md) -- [SummingMergeTree](summingmergetree.md) -- [AggregatingMergeTree](aggregatingmergetree.md) -- [CollapsingMergeTree](collapsingmergetree.md) -- [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -- [GraphiteMergeTree](graphitemergetree.md) +- [MergeTree](mergetree_family/mergetree.md) +- [ReplacingMergeTree](mergetree_family/replacingmergetree.md) +- [SummingMergeTree](mergetree_family/summingmergetree.md) +- [AggregatingMergeTree](mergetree_family/aggregatingmergetree.md) +- [CollapsingMergeTree](mergetree_family/collapsingmergetree.md) +- [VersionedCollapsingMergeTree](mergetree_family/versionedcollapsingmergetree.md) +- [GraphiteMergeTree](mergetree_family/graphitemergetree.md) ### Journal {#log} -Léger [moteur](log_family.md) avec une fonctionnalité minimale. Ils sont les plus efficaces lorsque vous devez écrire rapidement de nombreuses petites tables (jusqu'à environ 1 million de lignes) et les lire plus tard dans leur ensemble. +Léger [moteur](log_family/index.md) avec une fonctionnalité minimale. Ils sont les plus efficaces lorsque vous devez écrire rapidement de nombreuses petites tables (jusqu'à environ 1 million de lignes) et les lire plus tard dans leur ensemble. Moteurs dans la famille: -- [TinyLog](tinylog.md) -- [StripeLog](stripelog.md) -- [Journal](log.md) +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [Journal](log_family/log.md) -### Moteurs d'Integration {#integration-engines} +### Moteurs D'Intégration {#integration-engines} Moteurs de communication avec d'autres systèmes de stockage et de traitement de données. Moteurs dans la famille: -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) -- [HDFS](hdfs.md) +- [Kafka](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) -### Moteurs spéciaux {#special-engines} +### Moteurs Spéciaux {#special-engines} Moteurs dans la famille: -- [Distribué](distributed.md) -- [MaterializedView](materializedview.md) -- [Dictionnaire](dictionary.md) -- [Fusionner](merge.md) -- [Fichier](file.md) -- [NULL](null.md) -- [Définir](set.md) -- [Rejoindre](join.md) -- [URL](url.md) -- [Vue](view.md) -- [Mémoire](memory.md) -- [Tampon](buffer.md) +- [Distribué](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [Dictionnaire](special/dictionary.md) +- [Fusionner](special/merge.md) +- [Fichier](special/file.md) +- [NULL](special/null.md) +- [Définir](special/set.md) +- [Rejoindre](special/join.md) +- [URL](special/url.md) +- [Vue](special/view.md) +- [Mémoire](special/memory.md) +- [Tampon](special/buffer.md) -## Les colonnes virtuelles {#table_engines-virtual-columns} +## Les Colonnes Virtuelles {#table_engines-virtual-columns} Colonne virtuelle est un attribut de moteur de table intégrale qui est défini dans le code source du moteur. diff --git a/docs/fr/operations/table_engines/hdfs.md b/docs/fr/engines/table_engines/integrations/hdfs.md similarity index 90% rename from docs/fr/operations/table_engines/hdfs.md rename to docs/fr/engines/table_engines/integrations/hdfs.md index 7fe493861d7..0cc1b423983 100644 --- a/docs/fr/operations/table_engines/hdfs.md +++ b/docs/fr/engines/table_engines/integrations/hdfs.md @@ -1,11 +1,14 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: HDFS --- # HDFS {#table_engines-hdfs} Ce moteur fournit l'intégration avec [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) l'écosystème en permettant de gérer les données sur [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. Ce moteur est similaire -à l' [Fichier](file.md) et [URL](url.md) moteurs, mais fournit des fonctionnalités spécifiques Hadoop. +à l' [Fichier](../special/file.md) et [URL](../special/url.md) moteurs, mais fournit des fonctionnalités spécifiques Hadoop. ## Utilisation {#usage} @@ -17,7 +20,7 @@ Le `URI` paramètre est L'URI du fichier entier dans HDFS. Le `format` paramètre spécifie l'un des formats de fichier disponibles. Effectuer `SELECT` requêtes, le format doit être pris en charge pour l'entrée, et à effectuer `INSERT` queries – for output. The available formats are listed in the -[Format](../../interfaces/formats.md#formats) section. +[Format](../../../interfaces/formats.md#formats) section. Le chemin le cadre de `URI` peut contenir des globules. Dans ce cas, le tableau serait en lecture seule. **Exemple:** @@ -64,7 +67,7 @@ Plusieurs composants de chemin peuvent avoir des globs. Pour être traité, le f - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{N..M}` — Substitutes any number in range from N to M including both borders. -Les Constructions avec `{}` sont similaires à l' [distant](../../query_language/table_functions/remote.md) table de fonction. +Les Constructions avec `{}` sont similaires à l' [distant](../../../sql_reference/table_functions/remote.md) table de fonction. **Exemple** @@ -115,6 +118,6 @@ CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9 **Voir Aussi** -- [Les colonnes virtuelles](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) +- [Les colonnes virtuelles](../index.md#table_engines-virtual_columns) [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/fr/engines/table_engines/integrations/index.md b/docs/fr/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..705aa507d6a --- /dev/null +++ b/docs/fr/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Integrations +toc_priority: 30 +--- + + diff --git a/docs/fr/operations/table_engines/jdbc.md b/docs/fr/engines/table_engines/integrations/jdbc.md similarity index 86% rename from docs/fr/operations/table_engines/jdbc.md rename to docs/fr/engines/table_engines/integrations/jdbc.md index 4987ca290d3..de636f70abc 100644 --- a/docs/fr/operations/table_engines/jdbc.md +++ b/docs/fr/engines/table_engines/integrations/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 34 +toc_title: JDBC --- # JDBC {#table-engine-jdbc} @@ -8,7 +11,7 @@ Permet à ClickHouse de se connecter à des bases de données externes via [JDBC Pour implémenter la connexion JDBC, ClickHouse utilise le programme séparé [clickhouse-JDBC-pont](https://github.com/alex-krash/clickhouse-jdbc-bridge) cela devrait fonctionner comme un démon. -Ce moteur prend en charge le [Nullable](../../data_types/nullable.md) type de données. +Ce moteur prend en charge le [Nullable](../../../sql_reference/data_types/nullable.md) type de données. ## Création d'une Table {#creating-a-table} @@ -48,11 +51,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -82,6 +85,6 @@ FROM jdbc_table ## Voir Aussi {#see-also} -- [Fonction de table JDBC](../../query_language/table_functions/jdbc.md). +- [Fonction de table JDBC](../../../sql_reference/table_functions/jdbc.md). [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/fr/operations/table_engines/kafka.md b/docs/fr/engines/table_engines/integrations/kafka.md similarity index 92% rename from docs/fr/operations/table_engines/kafka.md rename to docs/fr/engines/table_engines/integrations/kafka.md index 2c9d3abff9d..b7a538f27dc 100644 --- a/docs/fr/operations/table_engines/kafka.md +++ b/docs/fr/engines/table_engines/integrations/kafka.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 32 +toc_title: Kafka --- # Kafka {#kafka} @@ -37,7 +40,7 @@ Les paramètres requis: - `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). - `kafka_topic_list` – A list of Kafka topics. - `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. -- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` la fonction, tels que `JSONEachRow`. Pour plus d'informations, voir le [Format](../../interfaces/formats.md) section. +- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` la fonction, tels que `JSONEachRow`. Pour plus d'informations, voir le [Format](../../../interfaces/formats.md) section. Paramètres facultatifs: @@ -127,7 +130,7 @@ Exemple: SELECT level, sum(total) FROM daily GROUP BY level; ``` -Pour améliorer les performances, les messages reçus sont regroupées en blocs de la taille de [max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size). Si le bloc n'a pas été formé à l'intérieur [stream\_flush\_interval\_ms](../settings/settings.md) millisecondes, les données seront vidées dans le tableau, indépendamment de l'intégralité du bloc. +Pour améliorer les performances, les messages reçus sont regroupées en blocs de la taille de [max\_insert\_block\_size](../../../operations/server_configuration_parameters/settings.md#settings-max_insert_block_size). Si le bloc n'a pas été formé à l'intérieur [stream\_flush\_interval\_ms](../../../operations/server_configuration_parameters/settings.md) millisecondes, les données seront vidées dans le tableau, indépendamment de l'intégralité du bloc. Pour arrêter de recevoir des données de rubrique ou pour modifier la logique de conversion, détachez la vue matérialisée: @@ -168,6 +171,6 @@ Pour obtenir une liste des options de configuration possibles, consultez [librdk **Voir Aussi** -- [Les colonnes virtuelles](index.md#table_engines-virtual_columns) +- [Les colonnes virtuelles](../index.md#table_engines-virtual_columns) [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/fr/operations/table_engines/mysql.md b/docs/fr/engines/table_engines/integrations/mysql.md similarity index 81% rename from docs/fr/operations/table_engines/mysql.md rename to docs/fr/engines/table_engines/integrations/mysql.md index 1ae36126221..5cec6701228 100644 --- a/docs/fr/operations/table_engines/mysql.md +++ b/docs/fr/engines/table_engines/integrations/mysql.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 33 +toc_title: MySQL --- -# MySQL {#mysql} +# Mysql {#mysql} Le moteur MySQL vous permet d'effectuer `SELECT` requêtes sur les données stockées sur un serveur MySQL distant. @@ -17,12 +20,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` -Voir une description détaillée de la [CREATE TABLE](../../query_language/create.md#create-table-query) requête. +Voir une description détaillée de la [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) requête. La structure de la table peut différer de la structure de la table MySQL d'origine: - Les noms de colonnes doivent être les mêmes que dans la table MySQL d'origine, mais vous pouvez utiliser seulement certaines de ces colonnes et dans n'importe quel ordre. -- Les types de colonnes peuvent différer de ceux de la table MySQL d'origine. ClickHouse essaie de [jeter](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) valeurs des types de données ClickHouse. +- Les types de colonnes peuvent différer de ceux de la table MySQL d'origine. ClickHouse essaie de [jeter](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) valeurs des types de données ClickHouse. **Les Paramètres Du Moteur** @@ -65,11 +68,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -96,7 +99,7 @@ SELECT * FROM mysql_table ## Voir Aussi {#see-also} -- [Le ‘mysql’ fonction de table](../../query_language/table_functions/mysql.md) -- [Utilisation de MySQL comme source de dictionnaire externe](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [Le ‘mysql’ fonction de table](../../../sql_reference/table_functions/mysql.md) +- [Utilisation de MySQL comme source de dictionnaire externe](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/fr/operations/table_engines/odbc.md b/docs/fr/engines/table_engines/integrations/odbc.md similarity index 80% rename from docs/fr/operations/table_engines/odbc.md rename to docs/fr/engines/table_engines/integrations/odbc.md index f0433114ea1..22db868bdad 100644 --- a/docs/fr/operations/table_engines/odbc.md +++ b/docs/fr/engines/table_engines/integrations/odbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 35 +toc_title: ODBC --- # ODBC {#table-engine-odbc} @@ -8,7 +11,7 @@ Permet à ClickHouse de se connecter à des bases de données externes via [ODBC Pour implémenter en toute sécurité les connexions ODBC, ClickHouse utilise un programme distinct `clickhouse-odbc-bridge`. Si le pilote ODBC est chargé directement depuis `clickhouse-server`, les problèmes de pilote peuvent planter le serveur ClickHouse. Clickhouse démarre automatiquement `clickhouse-odbc-bridge` lorsque cela est nécessaire. Le programme ODBC bridge est installé à partir du même package que `clickhouse-server`. -Ce moteur prend en charge le [Nullable](../../data_types/nullable.md) type de données. +Ce moteur prend en charge le [Nullable](../../../sql_reference/data_types/nullable.md) type de données. ## Création d'une Table {#creating-a-table} @@ -22,12 +25,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ENGINE = ODBC(connection_settings, external_database, external_table) ``` -Voir une description détaillée de la [CREATE TABLE](../../query_language/create.md#create-table-query) requête. +Voir une description détaillée de la [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) requête. La structure de la table peut différer de la structure de la table source: - Les noms de colonnes doivent être les mêmes que dans la table source, mais vous pouvez utiliser quelques-unes de ces colonnes et dans n'importe quel ordre. -- Les types de colonnes peuvent différer de ceux de la table source. ClickHouse essaie de [jeter](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) valeurs des types de données ClickHouse. +- Les types de colonnes peuvent différer de ceux de la table source. ClickHouse essaie de [jeter](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) valeurs des types de données ClickHouse. **Les Paramètres Du Moteur** @@ -71,7 +74,7 @@ Vous pouvez vérifier la connexion en utilisant le `isql` utilitaire de l'instal ``` bash $ isql -v mysqlconn -+---------------------------------------+ ++-------------------------+ | Connected! | | | ... @@ -92,11 +95,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -123,7 +126,7 @@ SELECT * FROM odbc_t ## Voir Aussi {#see-also} -- [Dictionnaires externes ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Fonction de table ODBC](../../query_language/table_functions/odbc.md) +- [Dictionnaires externes ODBC](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [Fonction de table ODBC](../../../sql_reference/table_functions/odbc.md) [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/fr/engines/table_engines/log_family/index.md b/docs/fr/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..c698303a8ac --- /dev/null +++ b/docs/fr/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Log Family +toc_priority: 29 +--- + + diff --git a/docs/fr/operations/table_engines/log.md b/docs/fr/engines/table_engines/log_family/log.md similarity index 92% rename from docs/fr/operations/table_engines/log.md rename to docs/fr/engines/table_engines/log_family/log.md index 799976e66c1..9adcd4cf9fa 100644 --- a/docs/fr/operations/table_engines/log.md +++ b/docs/fr/engines/table_engines/log_family/log.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 33 +toc_title: Journal --- # Journal {#log} diff --git a/docs/fr/operations/table_engines/log_family.md b/docs/fr/engines/table_engines/log_family/log_family.md similarity index 90% rename from docs/fr/operations/table_engines/log_family.md rename to docs/fr/engines/table_engines/log_family/log_family.md index 99b822b5911..e16f9e6639a 100644 --- a/docs/fr/operations/table_engines/log_family.md +++ b/docs/fr/engines/table_engines/log_family/log_family.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 31 +toc_title: Introduction --- # Famille De Moteurs En Rondins {#log-engine-family} @@ -12,7 +15,7 @@ Les moteurs de la famille: - [Journal](log.md) - [TinyLog](tinylog.md) -## Propriétés communes {#common-properties} +## Propriétés Communes {#common-properties} Moteur: @@ -24,7 +27,7 @@ Moteur: Lors `INSERT` requêtes, la table est verrouillée, et d'autres requêtes pour la lecture et l'écriture de données attendent que la table se déverrouille. S'il n'y a pas de requêtes d'écriture de données, un certain nombre de requêtes de lecture de données peuvent être effectuées simultanément. -- Ne prennent pas en charge [mutation](../../query_language/alter.md#alter-mutations) opérations. +- Ne prennent pas en charge [mutation](../../../sql_reference/statements/alter.md#alter-mutations) opérations. - Ne prennent pas en charge les index. diff --git a/docs/fr/operations/table_engines/stripelog.md b/docs/fr/engines/table_engines/log_family/stripelog.md similarity index 92% rename from docs/fr/operations/table_engines/stripelog.md rename to docs/fr/engines/table_engines/log_family/stripelog.md index 969569c139b..35d4706c592 100644 --- a/docs/fr/operations/table_engines/stripelog.md +++ b/docs/fr/engines/table_engines/log_family/stripelog.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 32 +toc_title: StripeLog --- -# StripeLog {#stripelog} +# Stripelog {#stripelog} Ce moteur appartient à la famille des moteurs en rondins. Voir les propriétés communes des moteurs de journal et leurs différences dans le [Famille De Moteurs En Rondins](log_family.md) article. @@ -19,7 +22,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = StripeLog ``` -Voir la description détaillée de la [CREATE TABLE](../../query_language/create.md#create-table-query) requête. +Voir la description détaillée de la [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) requête. ## L'écriture des Données {#table_engines-stripelog-writing-the-data} @@ -36,7 +39,7 @@ Le `StripeLog` moteur ne prend pas en charge la `ALTER UPDATE` et `ALTER DELETE` Le fichier avec des marques permet à ClickHouse de paralléliser la lecture des données. Cela signifie qu'une `SELECT` la requête renvoie des lignes dans un ordre imprévisible. L'utilisation de la `ORDER BY` clause pour trier les lignes. -## Exemple D'utilisation {#table_engines-stripelog-example-of-use} +## Exemple D'Utilisation {#table_engines-stripelog-example-of-use} Création d'une table: diff --git a/docs/fr/operations/table_engines/tinylog.md b/docs/fr/engines/table_engines/log_family/tinylog.md similarity index 91% rename from docs/fr/operations/table_engines/tinylog.md rename to docs/fr/engines/table_engines/log_family/tinylog.md index d2f528e07ac..ddf935ba789 100644 --- a/docs/fr/operations/table_engines/tinylog.md +++ b/docs/fr/engines/table_engines/log_family/tinylog.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 34 +toc_title: TinyLog --- # TinyLog {#tinylog} diff --git a/docs/fr/operations/table_engines/aggregatingmergetree.md b/docs/fr/engines/table_engines/mergetree_family/aggregatingmergetree.md similarity index 89% rename from docs/fr/operations/table_engines/aggregatingmergetree.md rename to docs/fr/engines/table_engines/mergetree_family/aggregatingmergetree.md index f0585261d2b..307e9ac3aff 100644 --- a/docs/fr/operations/table_engines/aggregatingmergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -1,14 +1,17 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 35 +toc_title: AggregatingMergeTree --- -# AggregatingMergeTree {#aggregatingmergetree} +# Aggregatingmergetree {#aggregatingmergetree} Le moteur hérite de [MergeTree](mergetree.md#table_engines-mergetree), modifier la logique pour les parties de données Fusion. ClickHouse remplace toutes les lignes avec la même clé primaire (ou, plus précisément, avec la même [clé de tri](mergetree.md)) avec une seule ligne (dans un rayon d'une partie des données) qui stocke une combinaison d'états de fonctions d'agrégation. Vous pouvez utiliser `AggregatingMergeTree` tables pour l'agrégation incrémentielle des données, y compris pour les vues matérialisées agrégées. -Le moteur traite toutes les colonnes avec [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) type. +Le moteur traite toutes les colonnes avec [AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) type. Il est approprié d'utiliser `AggregatingMergeTree` si elle réduit le nombre de lignes par commande. @@ -28,7 +31,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres de requête, voir [demande de description](../../query_language/create.md). +Pour une description des paramètres de requête, voir [demande de description](../../../sql_reference/statements/create.md). **Les clauses de requête** @@ -55,7 +58,7 @@ Tous les paramètres ont la même signification que dans `MergeTree`. ## Sélectionner et insérer {#select-and-insert} -Pour insérer des données, utilisez [INSERT SELECT](../../query_language/insert_into.md) requête avec l'ensemble-l'État des fonctions. +Pour insérer des données, utilisez [INSERT SELECT](../../../sql_reference/statements/insert_into.md) requête avec l'ensemble-l'État des fonctions. Lors de la sélection des données `AggregatingMergeTree` table, utilisez `GROUP BY` et les mêmes fonctions d'agrégat que lors de l'insertion de données, mais en utilisant `-Merge` suffixe. Dans les résultats de `SELECT` requête, les valeurs de `AggregateFunction` type ont une représentation binaire spécifique à l'implémentation pour tous les formats de sortie ClickHouse. Si les données de vidage dans, par exemple, `TabSeparated` format avec `SELECT` requête alors ce vidage peut être chargé en utilisant `INSERT` requête. diff --git a/docs/fr/operations/table_engines/collapsingmergetree.md b/docs/fr/engines/table_engines/mergetree_family/collapsingmergetree.md similarity index 97% rename from docs/fr/operations/table_engines/collapsingmergetree.md rename to docs/fr/engines/table_engines/mergetree_family/collapsingmergetree.md index 9c769b4a156..b58a4e7ebe8 100644 --- a/docs/fr/operations/table_engines/collapsingmergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: CollapsingMergeTree --- -# CollapsingMergeTree {#table_engine-collapsingmergetree} +# Collapsingmergetree {#table_engine-collapsingmergetree} Le moteur hérite de [MergeTree](mergetree.md) et ajoute la logique de l'effondrement des lignes de données de pièces algorithme de fusion. @@ -25,7 +28,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres de requête, voir [description de la requête](../../query_language/create.md). +Pour une description des paramètres de requête, voir [description de la requête](../../../sql_reference/statements/create.md). **Paramètres CollapsingMergeTree** @@ -136,7 +139,7 @@ Aggregate `count`, `sum` et `avg` pourrait être calculée de cette manière. Ag Si vous avez besoin d'extraire des données sans agrégation (par exemple, pour vérifier si des lignes sont présentes dont les valeurs les plus récentes correspondent à certaines conditions), vous pouvez utiliser `FINAL` le modificateur du `FROM` clause. Cette approche est nettement moins efficace. -## Exemple d'utilisation {#example-of-use} +## Exemple D'Utilisation {#example-of-use} Les données de l'exemple: @@ -226,7 +229,7 @@ SELECT * FROM UAct FINAL Cette façon de sélectionner les données est très inefficace. Ne l'utilisez pas pour les grandes tables. -## Exemple d'une autre approche {#example-of-another-approach} +## Exemple D'Une Autre Approche {#example-of-another-approach} Les données de l'exemple: diff --git a/docs/fr/operations/table_engines/custom_partitioning_key.md b/docs/fr/engines/table_engines/mergetree_family/custom_partitioning_key.md similarity index 89% rename from docs/fr/operations/table_engines/custom_partitioning_key.md rename to docs/fr/engines/table_engines/mergetree_family/custom_partitioning_key.md index cd850f3595c..499d71f3f00 100644 --- a/docs/fr/operations/table_engines/custom_partitioning_key.md +++ b/docs/fr/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 32 +toc_title: "Cl\xE9 De Partitionnement Personnalis\xE9e" --- # Clé De Partitionnement Personnalisée {#custom-partitioning-key} -Le partitionnement est disponible pour [MergeTree](mergetree.md) table de famille (y compris les [répliqué](replication.md) table). [Les vues matérialisées](materializedview.md) basé sur les tables MergeTree prennent également en charge le partitionnement. +Le partitionnement est disponible pour [MergeTree](mergetree.md) table de famille (y compris les [répliqué](replication.md) table). [Les vues matérialisées](../special/materializedview.md) basé sur les tables MergeTree prennent également en charge le partitionnement. Une partition est une combinaison logique d'enregistrements dans une table selon un critère spécifié. Vous pouvez définir une partition par un critère arbitraire, comme, par mois, par jour, ou par type d'événement. Chaque partition est stockée séparément pour simplifier les manipulations de ces données. Lors de l'accès aux données, ClickHouse utilise le plus petit sous-ensemble de partitions possible. @@ -37,7 +40,7 @@ Lors de l'insertion de nouvelles données dans une table, ces données sont stoc !!! info "Info" Une fusion ne fonctionne que pour les parties de données qui ont la même valeur pour l'expression de partitionnement. Cela signifie **vous ne devriez pas faire des partitions trop granulaires** (plus d'un millier de partitions). Sinon, l' `SELECT` la requête fonctionne mal en raison d'un nombre déraisonnablement élevé de fichiers dans le système de fichiers et des descripteurs de fichiers ouverts. -L'utilisation de la [système.partie](../system_tables.md#system_tables-parts) table pour afficher les parties et les partitions de la table. Par exemple, supposons que nous avons une `visits` table avec partitionnement par mois. Nous allons effectuer le `SELECT` la requête pour l' `system.parts` table: +L'utilisation de la [système.partie](../../../operations/system_tables.md#system_tables-parts) table pour afficher les parties et les partitions de la table. Par exemple, supposons que nous avons une `visits` table avec partitionnement par mois. Nous allons effectuer le `SELECT` la requête pour l' `system.parts` table: ``` sql SELECT @@ -76,7 +79,7 @@ Décomposons le nom de la première partie: `201901_1_3_1`: Le `active` colonne indique le statut de la partie. `1` est active; `0` est inactif. Les parties inactives sont, par exemple, des parties source restant après la fusion à une partie plus grande. Les parties de données corrompues sont également indiquées comme inactives. -Comme vous pouvez le voir dans l'exemple, il y a plusieurs parties séparées de la même partition (par exemple, `201901_1_3_1` et `201901_1_9_2`). Cela signifie que ces parties ne sont pas encore fusionnées. Clickhouse fusionne les parties insérées des données périodiquement, environ 15 minutes après l'insertion. En outre, vous pouvez effectuer une fusion non planifiée en utilisant [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) requête. Exemple: +Comme vous pouvez le voir dans l'exemple, il y a plusieurs parties séparées de la même partition (par exemple, `201901_1_3_1` et `201901_1_9_2`). Cela signifie que ces parties ne sont pas encore fusionnées. Clickhouse fusionne les parties insérées des données périodiquement, environ 15 minutes après l'insertion. En outre, vous pouvez effectuer une fusion non planifiée en utilisant [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) requête. Exemple: ``` sql OPTIMIZE TABLE visits PARTITION 201902; @@ -115,10 +118,10 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached Dossier ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ et ainsi de suite sont les répertoires des parties. Chaque partie se rapporte à une partition correspondante et contient des données juste pour un certain mois (la table dans cet exemple a partitionnement par mois). -Le `detached` le répertoire contient des parties qui ont été détachées de la table à l'aide [DETACH](#alter_detach-partition) requête. Les parties corrompues sont également déplacées dans ce répertoire, au lieu d'être supprimées. Le serveur n'utilise pas les pièces de la `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../query_language/alter.md#alter_attach-partition) requête. +Le `detached` le répertoire contient des parties qui ont été détachées de la table à l'aide [DETACH](#alter_detach-partition) requête. Les parties corrompues sont également déplacées dans ce répertoire, au lieu d'être supprimées. Le serveur n'utilise pas les pièces de la `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql_reference/statements/alter.md#alter_attach-partition) requête. Notez que sur le serveur d'exploitation, vous ne pouvez pas modifier manuellement l'ensemble de pièces ou leurs données sur le système de fichiers, car le serveur ne le saura pas. Pour les tables non répliquées, vous pouvez le faire lorsque le serveur est arrêté, mais ce n'est pas recommandé. Pour les tables répliquées, l'ensemble de pièces ne peut en aucun cas être modifié. -ClickHouse vous permet d'effectuer des opérations avec les partitions: les supprimer, copier d'une table à une autre, ou créer une sauvegarde. Voir la liste de toutes les opérations de la section [Manipulations avec des Partitions et des pièces](../../query_language/alter.md#alter_manipulations-with-partitions). +ClickHouse vous permet d'effectuer des opérations avec les partitions: les supprimer, copier d'une table à une autre, ou créer une sauvegarde. Voir la liste de toutes les opérations de la section [Manipulations avec des Partitions et des pièces](../../../sql_reference/statements/alter.md#alter_manipulations-with-partitions). [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/fr/operations/table_engines/graphitemergetree.md b/docs/fr/engines/table_engines/mergetree_family/graphitemergetree.md similarity index 90% rename from docs/fr/operations/table_engines/graphitemergetree.md rename to docs/fr/engines/table_engines/mergetree_family/graphitemergetree.md index 584bb5acd5c..e0cb60c088f 100644 --- a/docs/fr/operations/table_engines/graphitemergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/graphitemergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: GraphiteMergeTree --- -# GraphiteMergeTree {#graphitemergetree} +# Graphitemergetree {#graphitemergetree} Ce moteur est conçu pour l'amincissement et l'agrégation / moyenne (cumul) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) données. Il peut être utile aux développeurs qui veulent utiliser ClickHouse comme un magasin de données pour Graphite. @@ -27,7 +30,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Voir une description détaillée de la [CREATE TABLE](../../query_language/create.md#create-table-query) requête. +Voir une description détaillée de la [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) requête. Un tableau pour les données de Graphite devrait avoir les colonnes suivantes pour les données suivantes: @@ -76,9 +79,9 @@ Tous les paramètres excepté `config_section` ont la même signification que da -## Configuration de cumul {#rollup-configuration} +## Configuration De Cumul {#rollup-configuration} -Les paramètres de cumul sont définis par [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) paramètre dans la configuration du serveur. Le nom du paramètre pourrait être tout. Vous pouvez créer plusieurs configurations et les utiliser pour différentes tables. +Les paramètres de cumul sont définis par [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) paramètre dans la configuration du serveur. Le nom du paramètre pourrait être tout. Vous pouvez créer plusieurs configurations et les utiliser pour différentes tables. Structure de configuration de cumul: diff --git a/docs/fr/engines/table_engines/mergetree_family/index.md b/docs/fr/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..82c36cfc29f --- /dev/null +++ b/docs/fr/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: MergeTree Family +toc_priority: 28 +--- + + diff --git a/docs/fr/operations/table_engines/mergetree.md b/docs/fr/engines/table_engines/mergetree_family/mergetree.md similarity index 82% rename from docs/fr/operations/table_engines/mergetree.md rename to docs/fr/engines/table_engines/mergetree_family/mergetree.md index f44a5f596a2..ada3437d714 100644 --- a/docs/fr/operations/table_engines/mergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/mergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 30 +toc_title: MergeTree --- -# MergeTree {#table_engines-mergetree} +# Mergetree {#table_engines-mergetree} Le `MergeTree` moteur et autres moteurs de cette famille (`*MergeTree`) sont les moteurs de table ClickHouse les plus robustes. @@ -27,7 +30,7 @@ Principales caractéristiques: Si nécessaire, vous pouvez définir la méthode d'échantillonnage des données dans le tableau. !!! info "Info" - Le [Fusionner](merge.md) le moteur n'appartient pas à la `*MergeTree` famille. + Le [Fusionner](../special/merge.md) le moteur n'appartient pas à la `*MergeTree` famille. ## Création d'une Table {#table_engine-mergetree-creating-a-table} @@ -48,7 +51,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres, voir [Créer une description de requête](../../query_language/create.md). +Pour une description des paramètres, voir [Créer une description de requête](../../../sql_reference/statements/create.md). !!! note "Note" `INDEX` est une fonctionnalité expérimentale, voir [Index De Saut De Données](#table_engine-mergetree-data_skipping-indexes). @@ -59,7 +62,7 @@ Pour une description des paramètres, voir [Créer une description de requête]( - `PARTITION BY` — The [clé de partitionnement](custom_partitioning_key.md). - Pour le partitionnement par mois, utilisez les `toYYYYMM(date_column)` l'expression, où `date_column` est une colonne avec une date du type [Date](../../data_types/date.md). Les noms de partition ici ont le `"YYYYMM"` format. + Pour le partitionnement par mois, utilisez les `toYYYYMM(date_column)` l'expression, où `date_column` est une colonne avec une date du type [Date](../../../sql_reference/data_types/date.md). Les noms de partition ici ont le `"YYYYMM"` format. - `ORDER BY` — The sorting key. @@ -87,7 +90,7 @@ Pour une description des paramètres, voir [Créer une description de requête]( - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Le Stockage De Données](#mergetree-data-storage). - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Le Stockage De Données](#mergetree-data-storage). - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` paramètre. Avant la version 19.11, il n'y avait que le `index_granularity` réglage pour restreindre la taille des granules. Le `index_granularity_bytes` le paramètre améliore les performances de ClickHouse lors de la sélection de données à partir de tables avec de grandes lignes (des dizaines et des centaines de mégaoctets). Si vous avez des tables avec de grandes lignes, vous pouvez activer ce paramètre pour les tables d'améliorer l'efficacité de `SELECT` requête. - - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, puis Zookeeper stocke moins de données. Pour plus d'informations, voir le [Description du réglage](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) dans “Server configuration parameters”. + - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, puis Zookeeper stocke moins de données. Pour plus d'informations, voir le [Description du réglage](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) dans “Server configuration parameters”. - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` octets, ClickHouse lit et écrit les données sur le disque de stockage en utilisant l'interface d'E/S directe (`O_DIRECT` option). Si `min_merge_bytes_to_use_direct_io = 0`, puis les e/s directes sont désactivées. Valeur par défaut: `10 * 1024 * 1024 * 1024` octet. - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). @@ -103,7 +106,7 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa Dans l'exemple, nous définissons le partitionnement par mois. -Nous définissons également une expression pour l'échantillonnage en tant que hachage par l'ID utilisateur. Cela vous permet de pseudorandomiser les données dans la table pour chaque `CounterID` et `EventDate`. Si vous définissez un [SAMPLE](../../query_language/select.md#select-sample-clause) clause lors de la sélection des données, ClickHouse retournera un échantillon de données uniformément pseudo-aléatoire pour un sous-ensemble d'utilisateurs. +Nous définissons également une expression pour l'échantillonnage en tant que hachage par l'ID utilisateur. Cela vous permet de pseudorandomiser les données dans la table pour chaque `CounterID` et `EventDate`. Si vous définissez un [SAMPLE](../../../sql_reference/statements/select.md#select-sample-clause) clause lors de la sélection des données, ClickHouse retournera un échantillon de données uniformément pseudo-aléatoire pour un sous-ensemble d'utilisateurs. Le `index_granularity` paramètre peut être omis, car 8192 est la valeur par défaut. @@ -125,9 +128,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Paramètres MergeTree ()** -- `date-column` — The name of a column of the [Date](../../data_types/date.md) type. ClickHouse crée automatiquement des partitions par mois en fonction de cette colonne. Les noms de partition sont dans le `"YYYYMM"` format. +- `date-column` — The name of a column of the [Date](../../../sql_reference/data_types/date.md) type. ClickHouse crée automatiquement des partitions par mois en fonction de cette colonne. Les noms de partition sont dans le `"YYYYMM"` format. - `sampling_expression` — An expression for sampling. -- `(primary, key)` — Primary key. Type: [Tuple()](../../data_types/tuple.md) +- `(primary, key)` — Primary key. Type: [Tuple()](../../../sql_reference/data_types/tuple.md) - `index_granularity` — The granularity of an index. The number of data rows between the “marks” d'un index. La valeur 8192 est appropriée pour la plupart des tâches. **Exemple** @@ -155,7 +158,7 @@ La taille de granule est limitée par `index_granularity` et `index_granularity_ Prendre la `(CounterID, Date)` clé primaire comme un exemple. Dans ce cas, le tri et l'index peuvent être illustrés comme suit: - Whole data: [-------------------------------------------------------------------------] + Whole data: [---------------------------------------------] CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] Marks: | | | | | | | | | | | @@ -206,7 +209,7 @@ Cette fonctionnalité est utile lorsque vous utilisez le [SummingMergeTree](summ Dans ce cas, il est logique de ne laisser que quelques colonnes dans la clé primaire qui fourniront des analyses de plage efficaces et ajouteront les colonnes de dimension restantes au tuple de clé de tri. -[ALTER](../../query_language/alter.md) la clé de tri est une opération légère car lorsqu'une nouvelle colonne est ajoutée simultanément à la table et à la clé de tri, les parties de données existantes n'ont pas besoin d'être modifiées. Comme l'ancienne clé de tri est un préfixe de la nouvelle clé de tri et qu'il n'y a pas de données dans la colonne nouvellement ajoutée, les données sont triées à la fois par l'ancienne et la nouvelle clé de tri au moment de la modification de la table. +[ALTER](../../../sql_reference/statements/alter.md) la clé de tri est une opération légère car lorsqu'une nouvelle colonne est ajoutée simultanément à la table et à la clé de tri, les parties de données existantes n'ont pas besoin d'être modifiées. Comme l'ancienne clé de tri est un préfixe de la nouvelle clé de tri et qu'il n'y a pas de données dans la colonne nouvellement ajoutée, les données sont triées à la fois par l'ancienne et la nouvelle clé de tri au moment de la modification de la table. ### Utilisation D'Index et de Partitions dans les requêtes {#use-of-indexes-and-partitions-in-queries} @@ -236,7 +239,7 @@ Dans l'exemple ci-dessous, l'index ne peut pas être utilisé. SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' ``` -Pour vérifier si ClickHouse pouvez utiliser l'index lors de l'exécution d'une requête, utilisez les paramètres [force\_index\_by\_date](../settings/settings.md#settings-force_index_by_date) et [force\_primary\_key](../settings/settings.md). +Pour vérifier si ClickHouse pouvez utiliser l'index lors de l'exécution d'une requête, utilisez les paramètres [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) et [force\_primary\_key](../../../operations/settings/settings.md). La clé de partitionnement par mois permet de lire uniquement les blocs de données qui contiennent des dates de la plage appropriée. Dans ce cas, le bloc de données peut contenir des données pour plusieurs dates (jusqu'à un mois entier). Dans un bloc, les données sont triées par clé primaire, qui peut ne pas contenir la date comme première colonne. Pour cette raison, l'utilisation d'une requête avec seulement une condition de date qui ne spécifie pas le préfixe de clé primaire entraînera la lecture de plus de données que pour une seule date. @@ -248,7 +251,7 @@ ClickHouse ne peut pas utiliser un index si les valeurs de la clé primaire dans ClickHouse utilise cette logique non seulement pour les séquences de jours du mois, mais pour toute clé primaire qui représente une séquence partiellement monotone. -### Index De Saut De Données (Expérimental) {#table_engine-mergetree-data_skipping-indexes} +### Index de saut de données (expérimental) {#table_engine-mergetree-data_skipping-indexes} La déclaration d'index se trouve dans la section colonnes du `CREATE` requête. @@ -282,7 +285,7 @@ SELECT count() FROM table WHERE s < 'z' SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 ``` -#### Types d'Indices disponibles {#available-types-of-indices} +#### Types D'Indices Disponibles {#available-types-of-indices} - `minmax` @@ -311,7 +314,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 Types de données pris en charge: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. - Les fonctions suivantes peuvent l'utiliser: [égal](../../query_language/functions/comparison_functions.md), [notEquals](../../query_language/functions/comparison_functions.md), [dans](../../query_language/functions/in_functions.md), [notIn](../../query_language/functions/in_functions.md), [avoir](../../query_language/functions/array_functions.md). + Les fonctions suivantes peuvent l'utiliser: [égal](../../../sql_reference/functions/comparison_functions.md), [notEquals](../../../sql_reference/functions/comparison_functions.md), [dans](../../../sql_reference/functions/in_functions.md), [notIn](../../../sql_reference/functions/in_functions.md), [avoir](../../../sql_reference/functions/array_functions.md). @@ -327,24 +330,24 @@ Les Conditions dans le `WHERE` la clause contient des appels des fonctions qui f Le `set` l'indice peut être utilisé avec toutes les fonctions. Les sous-ensembles de fonctions pour les autres index sont présentés dans le tableau ci-dessous. -| Fonction (opérateur) / Indice de | clé primaire | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | -|------------------------------------------------------------------------------------------------------------|--------------|--------|-------------|-------------|---------------| -| [égal (=, ==)](../../query_language/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals (!= , \<\>)](../../query_language/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [comme](../../query_language/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [notLike](../../query_language/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [startsWith](../../query_language/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../query_language/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../query_language/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [dans](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [peu (\<)](../../query_language/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [grand (\>)](../../query_language/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../query_language/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals ( \> =)](../../query_language/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [vide](../../query_language/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../query_language/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | +| Fonction (opérateur) / Indice de | clé primaire | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | +|--------------------------------------------------------------------------------------------------------------|--------------|--------|-------------|-------------|---------------| +| [égal (=, ==)](../../../sql_reference/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals (!=, \<\>)](../../../sql_reference/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [comme](../../../sql_reference/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [notLike](../../../sql_reference/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [startsWith](../../../sql_reference/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../sql_reference/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../sql_reference/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [dans](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [peu (\<)](../../../sql_reference/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [grand (\>)](../../../sql_reference/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql_reference/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals ( \> =)](../../../sql_reference/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [vide](../../../sql_reference/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql_reference/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Les fonctions avec un argument constant inférieur à la taille ngram ne peuvent pas être utilisées par `ngrambf_v1` pour l'optimisation de la requête. @@ -375,7 +378,7 @@ Détermine la durée de vie de des valeurs. Le `TTL` clause peut être définie pour la table entière et pour chaque colonne individuelle. Ttl de niveau Table peut également spécifier la logique de déplacement automatique des données entre les disques et les volumes. -Les Expressions doivent évaluer pour [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md) type de données. +Les Expressions doivent évaluer pour [Date](../../../sql_reference/data_types/date.md) ou [DateTime](../../../sql_reference/data_types/datetime.md) type de données. Exemple: @@ -384,7 +387,7 @@ TTL time_column TTL time_column + interval ``` -Définir `interval`, utiliser [intervalle](../../query_language/operators.md#operators-datetime) opérateur. +Définir `interval`, utiliser [intervalle](../../../sql_reference/operators.md#operators-datetime) opérateur. ``` sql TTL date_time + INTERVAL 1 MONTH @@ -475,26 +478,24 @@ Les données avec un TTL expiré sont supprimées lorsque ClickHouse fusionne de Lorsque ClickHouse voit que les données sont expirées, il effectue une fusion hors calendrier. Pour contrôler la fréquence de ces fusions, vous pouvez définir [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout). Si la valeur est trop faible, il effectuera de nombreuses fusions hors calendrier qui peuvent consommer beaucoup de ressources. -Si vous effectuez la `SELECT` requête entre les fusionne, vous pouvez obtenir des données expirées. Pour éviter cela, utilisez la [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) requête avant de l' `SELECT`. +Si vous effectuez la `SELECT` requête entre les fusionne, vous pouvez obtenir des données expirées. Pour éviter cela, utilisez la [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) requête avant de l' `SELECT`. -[Article Original](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) - -## Utilisation de plusieurs périphériques de bloc pour le stockage de données {#table_engine-mergetree-multiple-volumes} +## Utilisation De Plusieurs Périphériques De Bloc Pour Le Stockage De Données {#table_engine-mergetree-multiple-volumes} ### Introduction {#introduction} `MergeTree` les moteurs de table de famille peuvent stocker des données sur plusieurs périphériques de bloc. Par exemple, il peut être utile lorsque les données d'un tableau sont implicitement divisé en “hot” et “cold”. Les données les plus récentes sont régulièrement demandées mais ne nécessitent qu'une petite quantité d'espace. Au contraire, les données historiques à queue grasse sont rarement demandées. Si plusieurs disques sont disponibles, la “hot” les données peuvent être situées sur des disques rapides (par exemple, SSD NVMe ou en mémoire), tandis que le “cold” des données relativement lente (par exemple, disque dur). -La partie de données est l'unité mobile minimum pour `MergeTree`-tables de moteur. Les données appartenant à une partie sont stockées sur un disque. Les parties de données peuvent être déplacées entre les disques en arrière-plan (selon les paramètres de l'utilisateur) ainsi qu'au moyen du [ALTER](../../query_language/alter.md#alter_move-partition) requête. +La partie de données est l'unité mobile minimum pour `MergeTree`-tables de moteur. Les données appartenant à une partie sont stockées sur un disque. Les parties de données peuvent être déplacées entre les disques en arrière-plan (selon les paramètres de l'utilisateur) ainsi qu'au moyen du [ALTER](../../../sql_reference/statements/alter.md#alter_move-partition) requête. ### Terme {#terms} - Disk — Block device mounted to the filesystem. -- Default disk — Disk that stores the path specified in the [chemin](../server_settings/settings.md#server_settings-path) paramètre de serveur. +- Default disk — Disk that stores the path specified in the [chemin](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) paramètre de serveur. - Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). - Storage policy — Set of volumes and the rules for moving data between them. -Les noms donnés aux entités décrites peuvent être trouvés dans les tables système, [système.storage\_policies](../system_tables.md#system_tables-storage_policies) et [système.disque](../system_tables.md#system_tables-disks). Pour appliquer l'une des stratégies de stockage configurées pour une table, utilisez `storage_policy` réglage de `MergeTree`-moteur de table de famille. +Les noms donnés aux entités décrites peuvent être trouvés dans les tables système, [système.storage\_policies](../../../operations/system_tables.md#system_tables-storage_policies) et [système.disque](../../../operations/system_tables.md#system_tables-disks). Pour appliquer l'une des stratégies de stockage configurées pour une table, utilisez `storage_policy` réglage de `MergeTree`-moteur de table de famille. ### Configuration {#table_engine-mergetree-multiple-volumes-configure} @@ -629,9 +630,9 @@ Le `default` la Politique de stockage implique d'utiliser un seul volume, qui se Dans le cas de `MergeTree` les tableaux, les données sont sur le disque de différentes façons: - En tant que résultat d'un insert (`INSERT` requête). -- En arrière-plan fusionne et [mutation](../../query_language/alter.md#alter-mutations). +- En arrière-plan fusionne et [mutation](../../../sql_reference/statements/alter.md#alter-mutations). - Lors du téléchargement à partir d'une autre réplique. -- À la suite du gel de la partition [ALTER TABLE … FREEZE PARTITION](../../query_language/alter.md#alter_freeze-partition). +- À la suite du gel de la partition [ALTER TABLE … FREEZE PARTITION](../../../sql_reference/statements/alter.md#alter_freeze-partition). Dans tous ces cas, à l'exception des mutations et du gel de partition, une pièce est stockée sur un volume et un disque selon la Politique de stockage donnée: @@ -641,9 +642,9 @@ Dans tous ces cas, à l'exception des mutations et du gel de partition, une piè Sous le capot, les mutations et la congélation des cloisons utilisent [des liens en dur](https://en.wikipedia.org/wiki/Hard_link). Les liens durs entre différents disques ne sont pas pris en charge, donc dans de tels cas, les pièces résultantes sont stockées sur les mêmes disques que les disques initiaux. En arrière - plan, les pièces sont déplacées entre les volumes en fonction de la quantité d'espace libre (`move_factor` paramètre) selon l'ordre les volumes sont déclarées dans le fichier de configuration. -Les données ne sont jamais transférées du dernier et dans le premier. On peut utiliser des tables système [système.part\_log](../system_tables.md#system_tables-part-log) (champ `type = MOVE_PART`) et [système.partie](../system_tables.md#system_tables-parts) (Fields `path` et `disk`) pour surveiller l'arrière-plan se déplace. Aussi, les informations détaillées peuvent être trouvées dans les journaux du serveur. +Les données ne sont jamais transférées du dernier et dans le premier. On peut utiliser des tables système [système.part\_log](../../../operations/system_tables.md#system_tables-part-log) (champ `type = MOVE_PART`) et [système.partie](../../../operations/system_tables.md#system_tables-parts) (Fields `path` et `disk`) pour surveiller l'arrière-plan se déplace. Aussi, les informations détaillées peuvent être trouvées dans les journaux du serveur. -L'utilisateur peut forcer le déplacement d'une partie ou d'une partition d'un volume à l'autre à l'aide de la requête [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../query_language/alter.md#alter_move-partition), toutes les restrictions pour les opérations de fond sont prises en compte. La requête initie un mouvement seul et n'attend pas que les opérations d'arrière-plan soient terminées. L'utilisateur recevra un message d'erreur si pas assez d'espace libre est disponible ou si l'une des conditions requises ne sont pas remplies. +L'utilisateur peut forcer le déplacement d'une partie ou d'une partition d'un volume à l'autre à l'aide de la requête [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql_reference/statements/alter.md#alter_move-partition), toutes les restrictions pour les opérations de fond sont prises en compte. La requête initie un mouvement seul et n'attend pas que les opérations d'arrière-plan soient terminées. L'utilisateur recevra un message d'erreur si pas assez d'espace libre est disponible ou si l'une des conditions requises ne sont pas remplies. Le déplacement des données n'interfère pas avec la réplication des données. Par conséquent, différentes stratégies de stockage peuvent être spécifiées pour la même table sur différents réplicas. diff --git a/docs/fr/operations/table_engines/replacingmergetree.md b/docs/fr/engines/table_engines/mergetree_family/replacingmergetree.md similarity index 92% rename from docs/fr/operations/table_engines/replacingmergetree.md rename to docs/fr/engines/table_engines/mergetree_family/replacingmergetree.md index c947dd51b42..8051bf5bfcc 100644 --- a/docs/fr/operations/table_engines/replacingmergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/replacingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 33 +toc_title: ReplacingMergeTree --- -# ReplacingMergeTree {#replacingmergetree} +# Replacingmergetree {#replacingmergetree} Le moteur diffère de [MergeTree](mergetree.md#table_engines-mergetree) en ce qu'il supprime les doublons avec la même valeur de clé primaire (ou, plus précisément, avec la même [clé de tri](mergetree.md) valeur). @@ -26,7 +29,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres de requête, voir [demande de description](../../query_language/create.md). +Pour une description des paramètres de requête, voir [demande de description](../../../sql_reference/statements/create.md). **ReplacingMergeTree Paramètres** diff --git a/docs/fr/operations/table_engines/replication.md b/docs/fr/engines/table_engines/mergetree_family/replication.md similarity index 94% rename from docs/fr/operations/table_engines/replication.md rename to docs/fr/engines/table_engines/mergetree_family/replication.md index f3e485fceb9..cf02e3da217 100644 --- a/docs/fr/operations/table_engines/replication.md +++ b/docs/fr/engines/table_engines/mergetree_family/replication.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 31 +toc_title: "R\xE9plication Des Donn\xE9es" --- # Réplication Des Données {#table_engines-replication} @@ -18,7 +21,7 @@ La réplication fonctionne au niveau d'une table individuelle, Pas du serveur en La réplication ne dépend pas de la fragmentation. Chaque fragment a sa propre réplication indépendante. -Données compressées pour `INSERT` et `ALTER` les requêtes sont répliquées (pour plus d'informations, consultez la documentation de [ALTER](../../query_language/alter.md#query_language_queries_alter)). +Données compressées pour `INSERT` et `ALTER` les requêtes sont répliquées (pour plus d'informations, consultez la documentation de [ALTER](../../../sql_reference/statements/alter.md#query_language_queries_alter)). `CREATE`, `DROP`, `ATTACH`, `DETACH` et `RENAME` les requêtes sont exécutées sur un seul serveur et ne sont pas répliquées: @@ -28,7 +31,7 @@ Données compressées pour `INSERT` et `ALTER` les requêtes sont répliquées ( Clickhouse utilise [Apache ZooKeeper](https://zookeeper.apache.org) pour stocker des informations méta répliques. Utilisez ZooKeeper version 3.4.5 ou plus récente. -Pour utiliser la réplication, définissez les paramètres [zookeeper](../server_settings/settings.md#server-settings_zookeeper) section de configuration du serveur. +Pour utiliser la réplication, définissez les paramètres [zookeeper](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) section de configuration du serveur. !!! attention "Attention" Ne négligez pas la sécurité. Clickhouse soutient le `digest` [Schéma ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) du sous-système de sécurité ZooKeeper. @@ -56,7 +59,7 @@ Vous pouvez spécifier N'importe quel cluster Zookeeper existant et le système Si ZooKeeper n'est pas défini dans le fichier de configuration, vous ne pouvez pas créer de tables répliquées et toutes les tables répliquées existantes seront en lecture seule. -La gardienne n'est pas utilisé dans `SELECT` requêtes car la réplication n'affecte pas les performances de `SELECT` et les requêtes s'exécutent aussi vite que pour les tables non répliquées. Lors de l'interrogation de tables répliquées distribuées, le comportement de ClickHouse est contrôlé par les paramètres [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) et [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). +La gardienne n'est pas utilisé dans `SELECT` requêtes car la réplication n'affecte pas les performances de `SELECT` et les requêtes s'exécutent aussi vite que pour les tables non répliquées. Lors de l'interrogation de tables répliquées distribuées, le comportement de ClickHouse est contrôlé par les paramètres [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) et [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). Pour chaque `INSERT` requête, environ dix entrées sont ajoutées à ZooKeeper par le biais de plusieurs transactions. (Pour être plus précis, c'est pour chaque bloc de données inséré; une requête D'insertion contient un bloc ou un bloc par `max_insert_block_size = 1048576` rangée.) Cela conduit à des latences légèrement plus longues pour `INSERT` par rapport aux tables non répliquées. Mais si vous suivez les recommandations pour insérer des données dans des lots de pas plus d'un `INSERT` par seconde, cela ne crée aucun problème. L'ensemble du cluster clickhouse utilisé pour coordonner un cluster ZooKeeper a un total de plusieurs centaines `INSERTs` par seconde. Le débit sur les insertions de données (le nombre de lignes par seconde) est aussi élevé que pour les non-données répliquées. @@ -68,7 +71,7 @@ Par défaut, une requête INSERT attend la confirmation de l'écriture des donn Chaque bloc de données est écrit de manière atomique. La requête D'insertion est divisée en blocs jusqu'à `max_insert_block_size = 1048576` rangée. En d'autres termes, si l' `INSERT` la requête a moins de 1048576 lignes, elle est faite de manière atomique. -Les blocs de données sont dédupliquées. Pour plusieurs écritures du même bloc de données (blocs de données de même taille contenant les mêmes lignes dans le même ordre), le bloc n'est écrit qu'une seule fois. La raison en est en cas de défaillance du réseau lorsque l'application cliente ne sait pas si les données ont été écrites dans la base de données, de sorte que le `INSERT` requête peut simplement être répété. Peu importe à quelles insertions de réplica ont été envoyées avec des données identiques. `INSERTs` sont idempotents. Les paramètres de déduplication sont contrôlés par [merge\_tree](../server_settings/settings.md#server_settings-merge_tree) les paramètres du serveur. +Les blocs de données sont dédupliquées. Pour plusieurs écritures du même bloc de données (blocs de données de même taille contenant les mêmes lignes dans le même ordre), le bloc n'est écrit qu'une seule fois. La raison en est en cas de défaillance du réseau lorsque l'application cliente ne sait pas si les données ont été écrites dans la base de données, de sorte que le `INSERT` requête peut simplement être répété. Peu importe à quelles insertions de réplica ont été envoyées avec des données identiques. `INSERTs` sont idempotents. Les paramètres de déduplication sont contrôlés par [merge\_tree](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) les paramètres du serveur. Pendant la réplication, seules les données source à insérer sont transférées sur le réseau. D'autres transformations de données (fusion) sont coordonnées et effectuées sur toutes les répliques de la même manière. Cela minimise l'utilisation du réseau, ce qui signifie que la réplication fonctionne bien lorsque les répliques résident dans différents centres de données. (Notez que la duplication de données dans différents centres de données est l'objectif principal de la réplication.) @@ -185,7 +188,7 @@ Une autre option de récupération consiste à supprimer des informations sur la Il n'y a aucune restriction sur la bande passante réseau pendant la récupération. Gardez cela à l'esprit si vous restaurez de nombreuses répliques à la fois. -## Conversion de MergeTree en ReplicatedMergeTree {#converting-from-mergetree-to-replicatedmergetree} +## Conversion De Mergetree En Replicatedmergetree {#converting-from-mergetree-to-replicatedmergetree} Nous utilisons le terme `MergeTree` pour consulter tous les moteurs de la `MergeTree family` le même que pour `ReplicatedMergeTree`. @@ -197,7 +200,7 @@ Renommez la table mergetree existante, puis créez un `ReplicatedMergeTree` tabl Déplacez les données de l'ancienne table vers `detached` sous-répertoire à l'intérieur du répertoire avec les nouvelles données de la table (`/var/lib/clickhouse/data/db_name/table_name/`). Ensuite, exécutez `ALTER TABLE ATTACH PARTITION` sur l'une des répliques d'ajouter ces données à des parties de l'ensemble de travail. -## Conversion de ReplicatedMergeTree en MergeTree {#converting-from-replicatedmergetree-to-mergetree} +## Conversion De Replicatedmergetree En Mergetree {#converting-from-replicatedmergetree-to-mergetree} Créez une table MergeTree avec un nom différent. Déplacez toutes les données du répertoire avec le `ReplicatedMergeTree` données de la table dans le répertoire de données de la nouvelle table. Ensuite, supprimer le `ReplicatedMergeTree` table et redémarrez le serveur. @@ -208,7 +211,7 @@ Si vous voulez vous débarrasser d'un `ReplicatedMergeTree` table sans lancer le Après cela, vous pouvez lancer le serveur, créer un `MergeTree` tableau, déplacer les données de son répertoire, puis redémarrez le serveur. -## Récupération lorsque les métadonnées du Cluster ZooKeeper sont perdues ou endommagées {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} +## Récupération lorsque les métadonnées du Cluster Zookeeper sont perdues ou endommagées {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} Si les données de ZooKeeper ont été perdues ou endommagées, vous pouvez les enregistrer en les déplaçant dans une table non compliquée comme décrit ci-dessus. diff --git a/docs/fr/operations/table_engines/summingmergetree.md b/docs/fr/engines/table_engines/mergetree_family/summingmergetree.md similarity index 87% rename from docs/fr/operations/table_engines/summingmergetree.md rename to docs/fr/engines/table_engines/mergetree_family/summingmergetree.md index 098a51a6408..52bfaee15ca 100644 --- a/docs/fr/operations/table_engines/summingmergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/summingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 34 +toc_title: SummingMergeTree --- -# SummingMergeTree {#summingmergetree} +# Summingmergetree {#summingmergetree} Le moteur hérite de [MergeTree](mergetree.md#table_engines-mergetree). La différence est que lors de la fusion de parties de données pour `SummingMergeTree` tables ClickHouse remplace toutes les lignes avec la même clé primaire (ou, plus précisément, avec la même [clé de tri](mergetree.md)) avec une ligne qui contient des valeurs résumées pour les colonnes avec le type de données numériques. Si la clé de tri est composée de telle sorte qu'une seule valeur de clé correspond à un grand nombre de lignes, cela réduit considérablement le volume de stockage et accélère la sélection des données. @@ -23,7 +26,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres de requête, voir [demande de description](../../query_language/create.md). +Pour une description des paramètres de requête, voir [demande de description](../../../sql_reference/statements/create.md). **Paramètres de SummingMergeTree** @@ -95,9 +98,9 @@ SELECT key, sum(value) FROM summtt GROUP BY key Lorsque les données sont insérées dans une table, elles sont enregistrées telles quelles. Clickhouse fusionne périodiquement les parties de données insérées et c'est à ce moment que les lignes avec la même clé primaire sont additionnées et remplacées par une pour chaque partie de données résultante. -ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) une fonction d'agrégation [somme()](../../query_language/agg_functions/reference.md#agg_function-sum) et `GROUP BY` la clause doit être utilisé dans une requête comme décrit dans l'exemple ci-dessus. +ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) une fonction d'agrégation [somme()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) et `GROUP BY` la clause doit être utilisé dans une requête comme décrit dans l'exemple ci-dessus. -### Règles communes pour la sommation {#common-rules-for-summation} +### Règles Communes Pour La Sommation {#common-rules-for-summation} Les valeurs dans les colonnes avec le type de données numériques sont résumées. L'ensemble des colonnes est défini par le paramètre `columns`. @@ -107,9 +110,9 @@ Si la colonne n'est pas dans la clé primaire et n'est pas résumée, une valeur Les valeurs ne sont pas résumés des colonnes de la clé primaire. -### La somme dans les colonnes AggregateFunction {#the-summation-in-the-aggregatefunction-columns} +### La somme dans les colonnes Aggregatefunction {#the-summation-in-the-aggregatefunction-columns} -Pour les colonnes de [Type AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) ClickHouse se comporte comme [AggregatingMergeTree](aggregatingmergetree.md) moteur d'agrégation selon la fonction. +Pour les colonnes de [Type AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) ClickHouse se comporte comme [AggregatingMergeTree](aggregatingmergetree.md) moteur d'agrégation selon la fonction. ### Structures Imbriquées {#nested-structures} @@ -131,7 +134,7 @@ Exemple: [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] ``` -Lorsque vous demandez des données, utilisez [sumMap (clé, valeur)](../../query_language/agg_functions/reference.md) fonction pour l'agrégation de `Map`. +Lorsque vous demandez des données, utilisez [sumMap (clé, valeur)](../../../sql_reference/aggregate_functions/reference.md) fonction pour l'agrégation de `Map`. Pour la structure de données imbriquée, vous n'avez pas besoin de spécifier ses colonnes dans le tuple de colonnes pour la sommation. diff --git a/docs/fr/operations/table_engines/versionedcollapsingmergetree.md b/docs/fr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md similarity index 97% rename from docs/fr/operations/table_engines/versionedcollapsingmergetree.md rename to docs/fr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md index dadd4b42858..9f80a1683bd 100644 --- a/docs/fr/operations/table_engines/versionedcollapsingmergetree.md +++ b/docs/fr/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: VersionedCollapsingMergeTree --- -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} +# Versionedcollapsingmergetree {#versionedcollapsingmergetree} Ce moteur: @@ -28,7 +31,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Pour une description des paramètres de requête, voir les [description de la requête](../../query_language/create.md). +Pour une description des paramètres de requête, voir les [description de la requête](../../../sql_reference/statements/create.md). **Les Paramètres Du Moteur** @@ -140,7 +143,7 @@ Aggregate `count`, `sum` et `avg` peut être calculée de cette manière. Aggreg Si vous avez besoin d'extraire les données avec “collapsing” mais sans agrégation (par exemple, pour vérifier si des lignes sont présentes dont les valeurs les plus récentes correspondent à certaines conditions), vous pouvez utiliser `FINAL` le modificateur du `FROM` clause. Cette approche est inefficace et ne devrait pas être utilisée avec de grandes tables. -## Exemple D'utilisation {#example-of-use} +## Exemple D'Utilisation {#example-of-use} Les données de l'exemple: diff --git a/docs/fr/operations/table_engines/buffer.md b/docs/fr/engines/table_engines/special/buffer.md similarity index 98% rename from docs/fr/operations/table_engines/buffer.md rename to docs/fr/engines/table_engines/special/buffer.md index 2b7064cfc42..5ffc6e6724c 100644 --- a/docs/fr/operations/table_engines/buffer.md +++ b/docs/fr/engines/table_engines/special/buffer.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: Tampon --- # Tampon {#buffer} diff --git a/docs/fr/operations/table_engines/dictionary.md b/docs/fr/engines/table_engines/special/dictionary.md similarity index 85% rename from docs/fr/operations/table_engines/dictionary.md rename to docs/fr/engines/table_engines/special/dictionary.md index 6a517449863..79b05a5c855 100644 --- a/docs/fr/operations/table_engines/dictionary.md +++ b/docs/fr/engines/table_engines/special/dictionary.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 35 +toc_title: Dictionnaire --- # Dictionnaire {#dictionary} -Le `Dictionary` le moteur affiche le [dictionnaire](../../query_language/dicts/external_dicts.md) données comme une table ClickHouse. +Le `Dictionary` le moteur affiche le [dictionnaire](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) données comme une table ClickHouse. À titre d'exemple, considérons un dictionnaire de `products` avec la configuration suivante: @@ -61,7 +64,7 @@ WHERE name = 'products' └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ ``` -Vous pouvez utiliser l' [dictGet\*](../../query_language/functions/ext_dict_functions.md#ext_dict_functions) fonction pour obtenir les données du dictionnaire dans ce format. +Vous pouvez utiliser l' [dictGet\*](../../../sql_reference/functions/ext_dict_functions.md#ext_dict_functions) fonction pour obtenir les données du dictionnaire dans ce format. Cette vue n'est pas utile lorsque vous avez besoin d'obtenir des données brutes ou `JOIN` opération. Pour ces cas, vous pouvez utiliser le `Dictionary` moteur, qui affiche les données du dictionnaire dans une table. diff --git a/docs/fr/operations/table_engines/distributed.md b/docs/fr/engines/table_engines/special/distributed.md similarity index 87% rename from docs/fr/operations/table_engines/distributed.md rename to docs/fr/engines/table_engines/special/distributed.md index 60eecc8481b..39ce3d72e3e 100644 --- a/docs/fr/operations/table_engines/distributed.md +++ b/docs/fr/engines/table_engines/special/distributed.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 33 +toc_title: "Distribu\xE9" --- # Distribué {#distributed} @@ -22,7 +25,7 @@ Le moteur distribué accepte les paramètres: Voir aussi: - `insert_distributed_sync` paramètre - - [MergeTree](mergetree.md#table_engine-mergetree-multiple-volumes) pour les exemples + - [MergeTree](../mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) pour les exemples Exemple: @@ -83,12 +86,12 @@ Les noms de Cluster ne doivent pas contenir de points. Paramètre `host`, `port` et , éventuellement, `user`, `password`, `secure`, `compression` sont spécifiés pour chaque serveur: - `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. - `port` – The TCP port for messenger activity (‘tcp\_port’ dans la configuration, généralement définie sur 9000). Ne le confondez pas avec http\_port. -- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Les droits d'accès](../../operations/access_rights.md). +- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Les droits d'accès](../../../operations/access_rights.md). - `password` – The password for connecting to a remote server (not masked). Default value: empty string. - `secure` - Utilisez ssl pour la connexion, généralement vous devez également définir `port` = 9440. Le serveur doit écouter 9440 et avoir des certificats corrects. - `compression` - Utiliser la compression de données. Valeur par défaut: true. -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [équilibrage](../settings/settings.md#settings-load_balancing) paramètre. +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [équilibrage](../../../operations/settings/settings.md#settings-load_balancing) paramètre. Si la connexion avec le serveur n'est pas établie, il y aura une tentative de connexion avec un court délai. Si la connexion échoue, la réplique suivante sera sélectionnée, et ainsi de suite pour toutes les répliques. Si la tentative de connexion a échoué pour toutes les répliques, la tentative sera répété de la même façon, plusieurs fois. Cela fonctionne en faveur de la résilience, mais ne fournit pas de tolérance aux pannes complète: un serveur distant peut accepter la connexion, mais peut ne pas fonctionner ou fonctionner mal. @@ -100,9 +103,7 @@ Pour afficher vos clusters, utilisez ‘system.clusters’ table. Le moteur distribué permet de travailler avec un cluster comme un serveur local. Cependant, le cluster est inextensible: vous devez écrire sa configuration dans le fichier de configuration du serveur (encore mieux, pour tous les serveurs du cluster). -Il n'y a pas de support pour les tables distribuées qui regardent d'autres tables distribuées (sauf dans les cas où une table distribuée n'a qu'un seul fragment). Comme alternative, faites en sorte que la table distribuée regarde le “final” table. - -The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ fonction de table à la place. Voir la section [Les fonctions de Table](../../query_language/table_functions/index.md). +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ fonction de table à la place. Voir la section [Les fonctions de Table](../../../sql_reference/table_functions/index.md). Il existe deux méthodes pour écrire des données dans un cluster: @@ -131,21 +132,21 @@ Vous devriez être préoccupé par le système de sharding dans les cas suivants - Les requêtes sont utilisées qui nécessitent des données de jointure (IN ou JOIN) par une clé spécifique. Si les données sont partagées par cette clé, vous pouvez utiliser local in ou JOIN au lieu de GLOBAL IN ou global JOIN, ce qui est beaucoup plus efficace. - Un grand nombre de serveurs est utilisé (des centaines ou plus) avec un grand nombre de petites requêtes (requêtes de clients individuels - sites Web, annonceurs ou partenaires). Pour que les petites requêtes n'affectent pas l'ensemble du cluster, il est logique de localiser les données d'un seul client sur un seul fragment. Alternativement, comme nous l'avons fait dans Yandex.Metrica, vous pouvez configurer le sharding à deux niveaux: divisez le cluster entier en “layers”, où une couche peut être constituée de plusieurs éclats. Les données d'un seul client sont situées sur une seule couche, mais des fragments peuvent être ajoutés à une couche si nécessaire, et les données sont distribuées aléatoirement à l'intérieur de celles-ci. Des tables distribuées sont créées pour chaque couche et une seule table distribuée partagée est créée pour les requêtes globales. -Les données sont écrites de manière asynchrone. Lorsqu'il est inséré dans la table, le bloc de données est simplement écrit dans le système de fichiers local. Les données sont envoyées aux serveurs distants en arrière-plan dès que possible. La période d'envoi des données est gérée par [distributed\_directory\_monitor\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_sleep_time_ms) et [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) paramètre. Le `Distributed` moteur envoie chaque fichier de données insérées séparément, mais vous pouvez activer le lot envoi de fichiers avec l' [distributed\_directory\_monitor\_batch\_inserts](../settings/settings.md#distributed_directory_monitor_batch_inserts) paramètre. Ce paramètre améliore les performances du cluster en utilisant mieux les ressources réseau et serveur local. Vous devriez vérifier si les données sont envoyées avec succès en vérifiant la liste des fichiers (données en attente d'envoi) dans le répertoire de la table: `/var/lib/clickhouse/data/database/table/`. +Les données sont écrites de manière asynchrone. Lorsqu'il est inséré dans la table, le bloc de données est simplement écrit dans le système de fichiers local. Les données sont envoyées aux serveurs distants en arrière-plan dès que possible. La période d'envoi des données est gérée par [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) et [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) paramètre. Le `Distributed` moteur envoie chaque fichier de données insérées séparément, mais vous pouvez activer le lot envoi de fichiers avec l' [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) paramètre. Ce paramètre améliore les performances du cluster en utilisant mieux les ressources réseau et serveur local. Vous devriez vérifier si les données sont envoyées avec succès en vérifiant la liste des fichiers (données en attente d'envoi) dans le répertoire de la table: `/var/lib/clickhouse/data/database/table/`. Si le serveur a cessé d'exister ou a subi un redémarrage Brutal (par exemple, après une panne de périphérique) après une insertion dans une table distribuée, les données insérées peuvent être perdues. Si une partie de données endommagée est détectée dans le répertoire de la table, elle est transférée ‘broken’ sous-répertoire et n'est plus utilisé. -Lorsque l'option max\_parallel\_replicas est activée, le traitement des requêtes est parallélisé entre toutes les répliques d'un seul fragment. Pour plus d'informations, consultez la section [max\_parallel\_replicas](../settings/settings.md#settings-max_parallel_replicas). +Lorsque l'option max\_parallel\_replicas est activée, le traitement des requêtes est parallélisé entre toutes les répliques d'un seul fragment. Pour plus d'informations, consultez la section [max\_parallel\_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). ## Les Colonnes Virtuelles {#virtual-columns} -- `_shard_num` — Contains the `shard_num` (de `system.clusters`). Type: [UInt32](../../data_types/int_uint.md). +- `_shard_num` — Contains the `shard_num` (de `system.clusters`). Type: [UInt32](../../../sql_reference/data_types/int_uint.md). !!! note "Note" - Depuis [`remote`](../../query_language/table_functions/remote.md)/`cluster` les fonctions de table créent en interne une instance temporaire du même moteur distribué, `_shard_num` est disponible là-bas aussi. + Depuis [`remote`](../../../sql_reference/table_functions/remote.md)/`cluster` les fonctions de table créent en interne une instance temporaire du même moteur distribué, `_shard_num` est disponible là-bas aussi. **Voir Aussi** -- [Les colonnes virtuelles](index.md#table_engines-virtual_columns) +- [Les colonnes virtuelles](../index.md#table_engines-virtual_columns) [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/fr/operations/table_engines/external_data.md b/docs/fr/engines/table_engines/special/external_data.md similarity index 97% rename from docs/fr/operations/table_engines/external_data.md rename to docs/fr/engines/table_engines/special/external_data.md index 79b3b32076b..2c1f1633e3e 100644 --- a/docs/fr/operations/table_engines/external_data.md +++ b/docs/fr/engines/table_engines/special/external_data.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 34 +toc_title: "De donn\xE9es externes" --- # Données externes pour le traitement des requêtes {#external-data-for-query-processing} diff --git a/docs/fr/operations/table_engines/file.md b/docs/fr/engines/table_engines/special/file.md similarity index 73% rename from docs/fr/operations/table_engines/file.md rename to docs/fr/engines/table_engines/special/file.md index 8b2f0ada797..c24444e09f2 100644 --- a/docs/fr/operations/table_engines/file.md +++ b/docs/fr/engines/table_engines/special/file.md @@ -1,11 +1,14 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: Fichier --- # Fichier {#table_engines-file} Le moteur de table de fichiers conserve les données dans un fichier dans l'un des [fichier -format](../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). +format](../../../interfaces/formats.md#formats) (TabSeparated, Native, etc.). Exemples d'utilisation: @@ -13,7 +16,7 @@ Exemples d'utilisation: - Convertir des données d'un format à un autre. - Mise à jour des données dans ClickHouse via l'édition d'un fichier sur un disque. -## Utilisation dans le serveur ClickHouse {#usage-in-clickhouse-server} +## Utilisation Dans Le Serveur Clickhouse {#usage-in-clickhouse-server} ``` sql File(Format) @@ -22,13 +25,13 @@ File(Format) Le `Format` paramètre spécifie l'un des formats de fichier disponibles. Effectuer `SELECT` requêtes, le format doit être pris en charge pour l'entrée, et à effectuer `INSERT` queries – for output. The available formats are listed in the -[Format](../../interfaces/formats.md#formats) section. +[Format](../../../interfaces/formats.md#formats) section. -ClickHouse ne permet pas de spécifier le chemin du système de fichiers pour`File`. Il utilisera le dossier défini par [chemin](../server_settings/settings.md) réglage dans la configuration du serveur. +ClickHouse ne permet pas de spécifier le chemin du système de fichiers pour`File`. Il utilisera le dossier défini par [chemin](../../../operations/server_configuration_parameters/settings.md) réglage dans la configuration du serveur. Lors de la création de la table en utilisant `File(Format)` il crée sous-répertoire vide dans ce dossier. Lorsque les données sont écrites dans cette table, elles sont mises dans `data.Format` fichier dans ce répertoire. -Vous pouvez créer manuellement ce sous dossier et ce fichier dans le système de fichiers [ATTACH](../../query_language/misc.md) il à la table des informations avec le nom correspondant, de sorte que vous pouvez interroger les données de ce fichier. +Vous pouvez créer manuellement ce sous dossier et ce fichier dans le système de fichiers [ATTACH](../../../sql_reference/statements/misc.md) il à la table des informations avec le nom correspondant, de sorte que vous pouvez interroger les données de ce fichier. !!! warning "Avertissement" Soyez prudent avec cette fonctionnalité, car ClickHouse ne garde pas trace des modifications externes apportées à ces fichiers. Le résultat des Écritures simultanées via ClickHouse et en dehors de ClickHouse n'est pas défini. @@ -66,14 +69,14 @@ SELECT * FROM file_engine_table ## Utilisation dans Clickhouse-local {#usage-in-clickhouse-local} -Dans [clickhouse-local](../utils/clickhouse-local.md) Fichier moteur accepte chemin d'accès au fichier en plus `Format`. Les flux d'entrée / sortie par défaut peuvent être spécifiés en utilisant des noms numériques ou lisibles par l'homme comme `0` ou `stdin`, `1` ou `stdout`. +Dans [clickhouse-local](../../../operations/utilities/clickhouse-local.md) Fichier moteur accepte chemin d'accès au fichier en plus `Format`. Les flux d'entrée / sortie par défaut peuvent être spécifiés en utilisant des noms numériques ou lisibles par l'homme comme `0` ou `stdin`, `1` ou `stdout`. **Exemple:** ``` bash $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" ``` -## Les détails de mise en Œuvre {#details-of-implementation} +## Les Détails De Mise En Œuvre {#details-of-implementation} - Plusieurs `SELECT` les requêtes peuvent être effectuées simultanément, mais `INSERT` requêtes s'attendre les uns les autres. - Prise en charge de la création d'un nouveau fichier par `INSERT` requête. diff --git a/docs/fr/operations/table_engines/generate.md b/docs/fr/engines/table_engines/special/generate.md similarity index 74% rename from docs/fr/operations/table_engines/generate.md rename to docs/fr/engines/table_engines/special/generate.md index 4b418f39734..43709c54af1 100644 --- a/docs/fr/operations/table_engines/generate.md +++ b/docs/fr/engines/table_engines/special/generate.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 46 +toc_title: GenerateRandom --- -# GenerateRandom {#table_engines-generate} +# Generaterandom {#table_engines-generate} Le moteur de table GenerateRandom produit des données aléatoires pour un schéma de table donné. @@ -11,7 +14,7 @@ Exemples d'utilisation: - Utiliser dans le test pour remplir une grande table reproductible. - Générer une entrée aléatoire pour les tests de fuzzing. -## Utilisation dans le serveur ClickHouse {#usage-in-clickhouse-server} +## Utilisation Dans Le Serveur Clickhouse {#usage-in-clickhouse-server} ``` sql ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) @@ -22,7 +25,7 @@ colonnes et chaînes de tableau en conséquence dans les données générées. Générer le moteur de table prend en charge uniquement `SELECT` requête. -Il prend en charge tous les [Les types de données](../../data_types/index.md) cela peut être stocké dans une table sauf `LowCardinality` et `AggregateFunction`. +Il prend en charge tous les [Les types de données](../../../sql_reference/data_types/index.md) cela peut être stocké dans une table sauf `LowCardinality` et `AggregateFunction`. **Exemple:** @@ -46,7 +49,7 @@ SELECT * FROM generate_engine_table LIMIT 3 └──────┴────────────┘ ``` -## Les détails de mise en Œuvre {#details-of-implementation} +## Les Détails De Mise En Œuvre {#details-of-implementation} - Pas pris en charge: - `ALTER` diff --git a/docs/fr/engines/table_engines/special/index.md b/docs/fr/engines/table_engines/special/index.md new file mode 100644 index 00000000000..481be47314c --- /dev/null +++ b/docs/fr/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Special +toc_priority: 31 +--- + + diff --git a/docs/fr/operations/table_engines/join.md b/docs/fr/engines/table_engines/special/join.md similarity index 66% rename from docs/fr/operations/table_engines/join.md rename to docs/fr/engines/table_engines/special/join.md index a5210598195..946b9464cdb 100644 --- a/docs/fr/operations/table_engines/join.md +++ b/docs/fr/engines/table_engines/special/join.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 40 +toc_title: Rejoindre --- # Rejoindre {#join} -Structure de données préparée pour l'utilisation dans [JOIN](../../query_language/select.md#select-join) opérations. +Structure de données préparée pour l'utilisation dans [JOIN](../../../sql_reference/statements/select.md#select-join) opérations. ## Création d'une Table {#creating-a-table} @@ -16,12 +19,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) ``` -Voir la description détaillée de la [CREATE TABLE](../../query_language/create.md#create-table-query) requête. +Voir la description détaillée de la [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) requête. **Les Paramètres Du Moteur** -- `join_strictness` – [ADHÉRER à la rigueur](../../query_language/select.md#select-join-strictness). -- `join_type` – [Type de jointure](../../query_language/select.md#select-join-types). +- `join_strictness` – [ADHÉRER à la rigueur](../../../sql_reference/statements/select.md#select-join-strictness). +- `join_type` – [Type de jointure](../../../sql_reference/statements/select.md#select-join-types). - `k1[, k2, ...]` – Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la. Entrer `join_strictness` et `join_type` paramètres sans guillemets, par exemple, `Join(ANY, LEFT, col1)`. Ils doivent correspondre à la `JOIN` fonctionnement que le tableau sera utilisé pour. Si les paramètres ne correspondent pas, ClickHouse ne lance pas d'exception et peut renvoyer des données incorrectes. @@ -83,21 +86,21 @@ Vous pouvez utiliser `INSERT` requêtes pour ajouter des données au `Join`-tabl Vous ne pouvez pas effectuer un `SELECT` requête directement à partir de la table. Au lieu de cela, utilisez l'une des méthodes suivantes: - Placez la table sur le côté droit dans un `JOIN` clause. -- Appelez le [joinGet](../../query_language/functions/other_functions.md#joinget) fonction, qui vous permet d'extraire des données de la table de la même manière que d'un dictionnaire. +- Appelez le [joinGet](../../../sql_reference/functions/other_functions.md#joinget) fonction, qui vous permet d'extraire des données de la table de la même manière que d'un dictionnaire. ### Limitations et paramètres {#join-limitations-and-settings} Lors de la création d'un tableau, les paramètres suivants sont appliqués: -- [join\_use\_nulls](../settings/settings.md#join_use_nulls) -- [max\_rows\_in\_join](../settings/query_complexity.md#settings-max_rows_in_join) -- [max\_bytes\_in\_join](../settings/query_complexity.md#settings-max_bytes_in_join) -- [join\_overflow\_mode](../settings/query_complexity.md#settings-join_overflow_mode) -- [join\_any\_take\_last\_row](../settings/settings.md#settings-join_any_take_last_row) +- [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) +- [max\_rows\_in\_join](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [max\_bytes\_in\_join](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [join\_overflow\_mode](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) Le `Join`- les tables de moteur ne peuvent pas être utilisées dans `GLOBAL JOIN` opérations. -Le `Join`-moteur permet d'utiliser [join\_use\_nulls](../settings/settings.md#join_use_nulls) réglage de la `CREATE TABLE` déclaration. Et [SELECT](../../query_language/select.md) requête permet d'utiliser `join_use_nulls` trop. Si vous avez différents `join_use_nulls` paramètres, vous pouvez obtenir une table de jointure d'erreur. Il dépend de type de JOINTURE. Lorsque vous utilisez [joinGet](../../query_language/functions/other_functions.md#joinget) fonction, vous devez utiliser le même `join_use_nulls` réglage en `CRATE TABLE` et `SELECT` déclaration. +Le `Join`-moteur permet d'utiliser [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) réglage de la `CREATE TABLE` déclaration. Et [SELECT](../../../sql_reference/statements/select.md) requête permet d'utiliser `join_use_nulls` trop. Si vous avez différents `join_use_nulls` paramètres, vous pouvez obtenir une table de jointure d'erreur. Il dépend de type de JOINTURE. Lorsque vous utilisez [joinGet](../../../sql_reference/functions/other_functions.md#joinget) fonction, vous devez utiliser le même `join_use_nulls` réglage en `CRATE TABLE` et `SELECT` déclaration. ## Le Stockage De Données {#data-storage} diff --git a/docs/fr/engines/table_engines/special/materializedview.md b/docs/fr/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..bd9177e7203 --- /dev/null +++ b/docs/fr/engines/table_engines/special/materializedview.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 43 +toc_title: MaterializedView +--- + +# Materializedview {#materializedview} + +Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../../sql_reference/statements/create.md)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur. + +[Article Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fr/operations/table_engines/memory.md b/docs/fr/engines/table_engines/special/memory.md similarity index 93% rename from docs/fr/operations/table_engines/memory.md rename to docs/fr/engines/table_engines/special/memory.md index 6bd043d02ee..4f8411188ee 100644 --- a/docs/fr/operations/table_engines/memory.md +++ b/docs/fr/engines/table_engines/special/memory.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: "M\xE9moire" --- # Mémoire {#memory} diff --git a/docs/fr/operations/table_engines/merge.md b/docs/fr/engines/table_engines/special/merge.md similarity index 95% rename from docs/fr/operations/table_engines/merge.md rename to docs/fr/engines/table_engines/special/merge.md index 5a751251b0d..4f879c5e76e 100644 --- a/docs/fr/operations/table_engines/merge.md +++ b/docs/fr/engines/table_engines/special/merge.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: Fusionner --- # Fusionner {#merge} @@ -56,7 +59,7 @@ FROM WatchLog ## Les Colonnes Virtuelles {#virtual-columns} -- `_table` — Contains the name of the table from which data was read. Type: [Chaîne](../../data_types/string.md). +- `_table` — Contains the name of the table from which data was read. Type: [Chaîne](../../../sql_reference/data_types/string.md). Vous pouvez définir les conditions constantes sur `_table` dans le `WHERE/PREWHERE` clause (par exemple, `WHERE _table='xyz'`). Dans ce cas l'opération de lecture est effectuée uniquement pour les tables où la condition sur `_table` est satisfaite, pour le `_table` colonne agit comme un index. diff --git a/docs/fr/operations/table_engines/null.md b/docs/fr/engines/table_engines/special/null.md similarity index 80% rename from docs/fr/operations/table_engines/null.md rename to docs/fr/engines/table_engines/special/null.md index 99e0a5c7126..8122f91ae7c 100644 --- a/docs/fr/operations/table_engines/null.md +++ b/docs/fr/engines/table_engines/special/null.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: 'NULL' --- # NULL {#null} diff --git a/docs/fr/operations/table_engines/set.md b/docs/fr/engines/table_engines/special/set.md similarity index 91% rename from docs/fr/operations/table_engines/set.md rename to docs/fr/engines/table_engines/special/set.md index f564f146499..44b03fd89d4 100644 --- a/docs/fr/operations/table_engines/set.md +++ b/docs/fr/engines/table_engines/special/set.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: "D\xE9finir" --- # Définir {#set} diff --git a/docs/fr/operations/table_engines/url.md b/docs/fr/engines/table_engines/special/url.md similarity index 84% rename from docs/fr/operations/table_engines/url.md rename to docs/fr/engines/table_engines/special/url.md index a7b58c4ad5d..a32c49a180c 100644 --- a/docs/fr/operations/table_engines/url.md +++ b/docs/fr/engines/table_engines/special/url.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: URL --- # URL (URL, Format) {#table_engines-url} @@ -7,11 +10,11 @@ machine_translated: true Gère les données sur un serveur HTTP / HTTPS distant. Ce moteur est similaire à l' [Fichier](file.md) moteur. -## Utilisation du moteur dans le serveur ClickHouse {#using-the-engine-in-the-clickhouse-server} +## Utilisation du moteur dans le serveur Clickhouse {#using-the-engine-in-the-clickhouse-server} Le `format` doit être celui que ClickHouse peut utiliser dans `SELECT` les requêtes et, si nécessaire, en `INSERTs`. Pour la liste complète des formats pris en charge, voir -[Format](../../interfaces/formats.md#formats). +[Format](../../../interfaces/formats.md#formats). Le `URL` doit être conforme à la structure D'un Localisateur de ressources uniforme. L'URL spécifiée doit pointer vers un serveur qui utilise le protocole HTTP ou HTTPS. Cela ne nécessite pas de @@ -21,7 +24,7 @@ en-têtes supplémentaires pour obtenir une réponse du serveur. respectivement. Pour le traitement `POST` demandes, le serveur distant doit prendre en charge [Encodage de transfert en morceaux](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). -Vous pouvez limiter le nombre maximal de sauts de redirection HTTP GET en utilisant [max\_http\_get\_redirects](../settings/settings.md#setting-max_http_get_redirects) paramètre. +Vous pouvez limiter le nombre maximal de sauts de redirection HTTP GET en utilisant [max\_http\_get\_redirects](../../../operations/settings/settings.md#setting-max_http_get_redirects) paramètre. **Exemple:** @@ -68,7 +71,7 @@ SELECT * FROM url_engine_table └───────┴───────┘ ``` -## Les détails de mise en Œuvre {#details-of-implementation} +## Les Détails De Mise En Œuvre {#details-of-implementation} - Les lectures et les écritures peuvent être parallèles - Pas pris en charge: diff --git a/docs/fr/operations/table_engines/view.md b/docs/fr/engines/table_engines/special/view.md similarity index 82% rename from docs/fr/operations/table_engines/view.md rename to docs/fr/engines/table_engines/special/view.md index e6650a20399..4898e7ba41f 100644 --- a/docs/fr/operations/table_engines/view.md +++ b/docs/fr/engines/table_engines/special/view.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 42 +toc_title: Vue --- # Vue {#table_engines-view} diff --git a/docs/fr/faq/general.md b/docs/fr/faq/general.md index 51193676500..d42e1432f99 100644 --- a/docs/fr/faq/general.md +++ b/docs/fr/faq/general.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 78 +toc_title: "Questions G\xE9n\xE9rales" --- # Questions Générales {#general-questions} @@ -44,7 +47,7 @@ SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV ### Utilisation d'une Table de moteur de fichiers {#using-a-file-engine-table} -Voir [Fichier](../operations/table_engines/file.md). +Voir [Fichier](../engines/table_engines/special/file.md). ### Utilisation De La Redirection En Ligne De Commande {#using-command-line-redirection} diff --git a/docs/fr/faq/index.md b/docs/fr/faq/index.md new file mode 100644 index 00000000000..062df2298ea --- /dev/null +++ b/docs/fr/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/fr/getting_started/example_datasets/amplab_benchmark.md b/docs/fr/getting_started/example_datasets/amplab_benchmark.md index 21b27e23615..5e552480e01 100644 --- a/docs/fr/getting_started/example_datasets/amplab_benchmark.md +++ b/docs/fr/getting_started/example_datasets/amplab_benchmark.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 17 +toc_title: AMPLab Big Data Benchmark --- # AMPLab Big Data Benchmark {#amplab-big-data-benchmark} diff --git a/docs/fr/getting_started/example_datasets/criteo.md b/docs/fr/getting_started/example_datasets/criteo.md index 610ed78febd..7669209d032 100644 --- a/docs/fr/getting_started/example_datasets/criteo.md +++ b/docs/fr/getting_started/example_datasets/criteo.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 19 +toc_title: "T\xE9raoctet click Logs de Criteo" --- # Téraoctet de journaux de clics de Criteo {#terabyte-of-click-logs-from-criteo} diff --git a/docs/fr/getting_started/example_datasets/index.md b/docs/fr/getting_started/example_datasets/index.md index 584214d5718..f3aaf473527 100644 --- a/docs/fr/getting_started/example_datasets/index.md +++ b/docs/fr/getting_started/example_datasets/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Example Datasets +toc_priority: 12 +toc_title: Introduction --- # Exemple De Jeux De Données {#example-datasets} diff --git a/docs/fr/getting_started/example_datasets/metrica.md b/docs/fr/getting_started/example_datasets/metrica.md index 01af9b4fdd6..7b148781e3a 100644 --- a/docs/fr/getting_started/example_datasets/metrica.md +++ b/docs/fr/getting_started/example_datasets/metrica.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 21 +toc_title: "Yandex.Metrica De Donn\xE9es" --- # Yandex Anonymisé.Metrica De Données {#anonymized-yandex-metrica-data} @@ -64,4 +67,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" [Tutoriel ClickHouse](../../getting_started/tutorial.md) est basé sur Yandex.Metrica dataset et la façon recommandée pour commencer avec cet ensemble de données est de simplement passer par tutoriel. -D'autres exemples de requêtes pour ces tables peuvent être trouvés parmi [tests avec État](https://github.com/ClickHouse/ClickHouse/tree/master/src/tests/queries/1_stateful) de ClickHouse (ils sont nommés `test.hists` et `test.visits` y). +D'autres exemples de requêtes pour ces tables peuvent être trouvés parmi [tests avec État](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) de ClickHouse (ils sont nommés `test.hists` et `test.visits` y). diff --git a/docs/fr/getting_started/example_datasets/nyc_taxi.md b/docs/fr/getting_started/example_datasets/nyc_taxi.md index a1179c22d37..6f002ecec9a 100644 --- a/docs/fr/getting_started/example_datasets/nyc_taxi.md +++ b/docs/fr/getting_started/example_datasets/nyc_taxi.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 16 +toc_title: "New York Taxi Donn\xE9es" --- # New York Taxi Données {#new-york-taxi-data} diff --git a/docs/fr/getting_started/example_datasets/ontime.md b/docs/fr/getting_started/example_datasets/ontime.md index ffb3845a49c..2ee93d3d556 100644 --- a/docs/fr/getting_started/example_datasets/ontime.md +++ b/docs/fr/getting_started/example_datasets/ontime.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 15 +toc_title: OnTime --- # OnTime {#ontime} diff --git a/docs/fr/getting_started/example_datasets/star_schema.md b/docs/fr/getting_started/example_datasets/star_schema.md index 5c98762648b..f39d810623b 100644 --- a/docs/fr/getting_started/example_datasets/star_schema.md +++ b/docs/fr/getting_started/example_datasets/star_schema.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 20 +toc_title: "R\xE9f\xE9rence Du Sch\xE9ma En \xC9toile" --- # Référence Du Schéma En Étoile {#star-schema-benchmark} diff --git a/docs/fr/getting_started/example_datasets/wikistat.md b/docs/fr/getting_started/example_datasets/wikistat.md index 6b8ab4ae28d..e3b8a322f09 100644 --- a/docs/fr/getting_started/example_datasets/wikistat.md +++ b/docs/fr/getting_started/example_datasets/wikistat.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 18 +toc_title: WikiStat --- # WikiStat {#wikistat} diff --git a/docs/fr/getting_started/index.md b/docs/fr/getting_started/index.md index 62f11602e64..375c87b112c 100644 --- a/docs/fr/getting_started/index.md +++ b/docs/fr/getting_started/index.md @@ -1,5 +1,10 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Getting Started +toc_hidden: true +toc_priority: 8 +toc_title: "cach\xE9s" --- # Prise En Main {#getting-started} diff --git a/docs/fr/getting_started/install.md b/docs/fr/getting_started/install.md index f158d9ce629..72e59b279f1 100644 --- a/docs/fr/getting_started/install.md +++ b/docs/fr/getting_started/install.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 11 +toc_title: Installation --- # Installation {#installation} @@ -111,7 +114,7 @@ Vous pouvez compiler des paquets et les installer ou utiliser des programmes san Client: programs/clickhouse-client Server: programs/clickhouse-server -Vous devrez créer un dossier de données et de métadonnées et `chown` pour l'utilisateur souhaité. Leurs chemins peuvent être modifiés dans la configuration du serveur (src / SGBD / programs / server / config.xml), par défaut, ils sont: +Vous devrez créer un dossier de données et de métadonnées et `chown` pour l'utilisateur souhaité. Leurs chemins peuvent être modifiés dans la configuration du serveur (src / programs / server / config.xml), par défaut, ils sont: /opt/clickhouse/data/default/ /opt/clickhouse/metadata/default/ diff --git a/docs/fr/getting_started/playground.md b/docs/fr/getting_started/playground.md index 99cbd7b5747..57993309923 100644 --- a/docs/fr/getting_started/playground.md +++ b/docs/fr/getting_started/playground.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 14 +toc_title: "R\xE9cr\xE9ation" --- # Clickhouse Aire De Jeux {#clickhouse-playground} diff --git a/docs/fr/getting_started/tutorial.md b/docs/fr/getting_started/tutorial.md index bb36508efd0..1806268c1b4 100644 --- a/docs/fr/getting_started/tutorial.md +++ b/docs/fr/getting_started/tutorial.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 12 +toc_title: Tutoriel --- # Tutoriel ClickHouse {#clickhouse-tutorial} @@ -103,11 +106,11 @@ Comme dans la plupart des systèmes de gestion de bases de données, clickhouse clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -La syntaxe pour créer des tables est beaucoup plus compliquée par rapport aux bases de données (voir [référence](../query_language/create.md). En général `CREATE TABLE` déclaration doit spécifier trois choses clés: +La syntaxe pour créer des tables est beaucoup plus compliquée par rapport aux bases de données (voir [référence](../sql_reference/statements/create.md). En général `CREATE TABLE` déclaration doit spécifier trois choses clés: 1. Nom de la table à créer. -2. Table schema, i.e. list of columns and their [types de données](../data_types/index.md). -3. [Tableau moteur](../operations/table_engines/index.md) et ce sont les paramètres, qui déterminent tous les détails sur la façon dont les requêtes à cette table seront physiquement exécutées. +2. Table schema, i.e. list of columns and their [types de données](../sql_reference/data_types/index.md). +3. [Tableau moteur](../engines/table_engines/index.md) et ce sont les paramètres, qui déterminent tous les détails sur la façon dont les requêtes à cette table seront physiquement exécutées. Yandex.Metrica est un service d'analyse web, et l'exemple de jeu de données ne couvre pas toutes ses fonctionnalités, il n'y a donc que deux tables à créer: @@ -459,11 +462,11 @@ SETTINGS index_granularity = 8192 Vous pouvez exécuter ces requêtes en utilisant le mode interactif de `clickhouse-client` (lancez - le simplement dans un terminal sans spécifier une requête à l'avance) ou essayez-en [interface de rechange](../interfaces/index.md) Si tu veux. -Comme nous pouvons le voir, `hits_v1` utilise la [moteur MergeTree de base](../operations/table_engines/mergetree.md) tandis que le `visits_v1` utilise la [Effondrer](../operations/table_engines/collapsingmergetree.md) variante. +Comme nous pouvons le voir, `hits_v1` utilise la [moteur MergeTree de base](../engines/table_engines/mergetree_family/mergetree.md) tandis que le `visits_v1` utilise la [Effondrer](../engines/table_engines/mergetree_family/collapsingmergetree.md) variante. ### Importer Des Données {#import-data} -L'importation de données vers ClickHouse se fait via [INSERT INTO](../query_language/insert_into.md) requête comme dans de nombreuses autres bases de données SQL. Toutefois, les données sont généralement fournies dans l'une des [formats de sérialisation pris en charge](../interfaces/formats.md) plutôt `VALUES` clause (qui est également pris en charge). +L'importation de données vers ClickHouse se fait via [INSERT INTO](../sql_reference/statements/insert_into.md) requête comme dans de nombreuses autres bases de données SQL. Toutefois, les données sont généralement fournies dans l'une des [formats de sérialisation pris en charge](../interfaces/formats.md) plutôt `VALUES` clause (qui est également pris en charge). Les fichiers que nous avons téléchargés plus tôt sont au format séparé par des onglets, alors voici comment les importer via le client console: @@ -528,9 +531,9 @@ Clickhouse cluster est un cluster homogène. Étapes pour configurer: 1. Installer clickhouse server sur toutes les machines du cluster 2. Configurer les configurations de cluster dans les fichiers de configuration 3. Créer des tables locales sur chaque instance -4. Créer un [Distribué table](../operations/table_engines/distributed.md) +4. Créer un [Distribué table](../engines/table_engines/special/distributed.md) -[Distribué table](../operations/table_engines/distributed.md) est en fait une sorte de “view” aux tables locales du cluster ClickHouse. SELECT query from a distributed table s'exécute à l'aide des ressources de tous les fragments du cluster. Vous pouvez spécifier des configurations pour plusieurs clusters et créer plusieurs tables distribuées fournissant des vues à différents clusters. +[Distribué table](../engines/table_engines/special/distributed.md) est en fait une sorte de “view” aux tables locales du cluster ClickHouse. SELECT query from a distributed table s'exécute à l'aide des ressources de tous les fragments du cluster. Vous pouvez spécifier des configurations pour plusieurs clusters et créer plusieurs tables distribuées fournissant des vues à différents clusters. Exemple de configuration pour un cluster avec trois fragments, une réplique chacun: @@ -572,16 +575,16 @@ CREATE TABLE tutorial.hits_all AS tutorial.hits_local ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); ``` -Une pratique courante consiste à créer des tables distribuées similaires sur toutes les machines du cluster. Il permet d'exécuter des requêtes distribuées sur n'importe quelle machine du cluster. Il existe également une autre option pour créer une table distribuée temporaire pour une requête SELECT donnée en utilisant [distant](../query_language/table_functions/remote.md) table de fonction. +Une pratique courante consiste à créer des tables distribuées similaires sur toutes les machines du cluster. Il permet d'exécuter des requêtes distribuées sur n'importe quelle machine du cluster. Il existe également une autre option pour créer une table distribuée temporaire pour une requête SELECT donnée en utilisant [distant](../sql_reference/table_functions/remote.md) table de fonction. -Passons à l'exécution de [INSERT SELECT](../query_language/insert_into.md) dans les Distribué table la table à plusieurs serveurs. +Passons à l'exécution de [INSERT SELECT](../sql_reference/statements/insert_into.md) dans les Distribué table la table à plusieurs serveurs. ``` sql INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` !!! warning "Avis" - Cette approche ne convient pas au sharding de grandes tables. Il y a un outil séparé [clickhouse-copieur](../operations/utils/clickhouse-copier.md) cela peut re-fragmenter de grandes tables arbitraires. + Cette approche ne convient pas au sharding de grandes tables. Il y a un outil séparé [clickhouse-copieur](../operations/utilities/clickhouse-copier.md) cela peut re-fragmenter de grandes tables arbitraires. Comme vous pouvez vous y attendre, les requêtes lourdes de calcul s'exécutent N fois plus vite si elles utilisent 3 serveurs au lieu d'un. @@ -657,7 +660,7 @@ ENGINE = ReplcatedMergeTree( ... ``` -Ici, nous utilisons [ReplicatedMergeTree](../operations/table_engines/replication.md) tableau moteur. Dans les paramètres, nous spécifions le chemin Zookeeper contenant des identificateurs de fragments et de répliques. +Ici, nous utilisons [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) tableau moteur. Dans les paramètres, nous spécifions le chemin Zookeeper contenant des identificateurs de fragments et de répliques. ``` sql INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; diff --git a/docs/fr/guides/apply_catboost_model.md b/docs/fr/guides/apply_catboost_model.md index 613de6371cb..698f26d10d2 100644 --- a/docs/fr/guides/apply_catboost_model.md +++ b/docs/fr/guides/apply_catboost_model.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: "Application Des Mod\xE8les CatBoost" --- # Application D'un modèle Catboost dans ClickHouse {#applying-catboost-model-in-clickhouse} @@ -113,7 +116,7 @@ FROM amazon_train +-count()-+ | 65538 | -+---------+ ++-------+ ``` ## 3. Intégrer CatBoost dans ClickHouse {#integrate-catboost-into-clickhouse} @@ -182,7 +185,7 @@ LIMIT 10 ``` !!! note "Note" - Fonction [modelEvaluate](../query_language/functions/other_functions.md#function-modelevaluate) retourne tuple avec des prédictions brutes par classe pour les modèles multiclasse. + Fonction [modelEvaluate](../sql_reference/functions/other_functions.md#function-modelevaluate) retourne tuple avec des prédictions brutes par classe pour les modèles multiclasse. Prédisons la probabilité: @@ -205,7 +208,7 @@ LIMIT 10 ``` !!! note "Note" - Plus d'infos sur [exp()](../query_language/functions/math_functions.md) fonction. + Plus d'infos sur [exp()](../sql_reference/functions/math_functions.md) fonction. Calculons LogLoss sur l'échantillon: @@ -231,6 +234,6 @@ FROM ``` !!! note "Note" - Plus d'infos sur [avg()](../query_language/agg_functions/reference.md#agg_function-avg) et [journal()](../query_language/functions/math_functions.md) fonction. + Plus d'infos sur [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) et [journal()](../sql_reference/functions/math_functions.md) fonction. [Article Original](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/fr/guides/index.md b/docs/fr/guides/index.md index 57a705f7e86..1149406b662 100644 --- a/docs/fr/guides/index.md +++ b/docs/fr/guides/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Guides +toc_priority: 38 +toc_title: "Aper\xE7u" --- # ClickHouse Guides {#clickhouse-guides} diff --git a/docs/fr/index.md b/docs/fr/index.md index 75cb70e21e6..0e53167306e 100644 --- a/docs/fr/index.md +++ b/docs/fr/index.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 3 +toc_title: "Aper\xE7u" --- # Qu'est-ce que ClickHouse? {#what-is-clickhouse} @@ -17,7 +20,7 @@ Dans un “normal” SGBD orienté ligne, les données sont stockées dans cet o En d'autres termes, toutes les valeurs liées à une ligne sont physiquement stockées l'une à côté de l'autre. -Des exemples d'un SGBD orienté ligne sont MySQL, Postgres et MS SQL Server. {: .gris } +Des exemples d'un SGBD orienté ligne sont MySQL, Postgres et MS SQL Server. Dans un SGBD orienté colonne, les données sont stockées comme ceci: @@ -31,7 +34,7 @@ Dans un SGBD orienté colonne, les données sont stockées comme ceci: Ces exemples montrent l'ordre que les données sont organisées en. Les valeurs de différentes colonnes sont stockés séparément, et les données de la même colonne sont stockées ensemble. -Exemples D'un SGBD orienté colonne: Vertica, Paraccel (matrice Actian et Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise et Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid et kdb+. {: .gris } +Exemples D'un SGBD orienté colonne: Vertica, Paraccel (matrice Actian et Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise et Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid et kdb+. Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. diff --git a/docs/fr/interfaces/cli.md b/docs/fr/interfaces/cli.md index 84318bbb50a..118dc40ce7d 100644 --- a/docs/fr/interfaces/cli.md +++ b/docs/fr/interfaces/cli.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 17 +toc_title: Client De Ligne De Commande --- # Client de ligne de commande {#command-line-client} @@ -86,7 +89,7 @@ Formatez une requête comme d'habitude, puis placez les valeurs que vous souhait ``` - `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. -- `data type` — [Type de données](../data_types/index.md) de l'application valeur de paramètre. Par exemple, une structure de données comme `(integer, ('string', integer))` peut avoir la `Tuple(UInt8, Tuple(String, UInt8))` type de données (vous pouvez également utiliser un autre [entier](../data_types/int_uint.md) type). +- `data type` — [Type de données](../sql_reference/data_types/index.md) de l'application valeur de paramètre. Par exemple, une structure de données comme `(integer, ('string', integer))` peut avoir la `Tuple(UInt8, Tuple(String, UInt8))` type de données (vous pouvez également utiliser un autre [entier](../sql_reference/data_types/int_uint.md) type). #### Exemple {#example} diff --git a/docs/fr/interfaces/cpp.md b/docs/fr/interfaces/cpp.md index ecf3ca349c4..7863e448ad6 100644 --- a/docs/fr/interfaces/cpp.md +++ b/docs/fr/interfaces/cpp.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 24 +toc_title: "Biblioth\xE8que Client C++ " --- # Bibliothèque Client C++ {#c-client-library} diff --git a/docs/fr/interfaces/formats.md b/docs/fr/interfaces/formats.md index 5f5b693b550..590812fe59c 100644 --- a/docs/fr/interfaces/formats.md +++ b/docs/fr/interfaces/formats.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 21 +toc_title: "Formats d'entr\xE9e et de sortie" --- # Formats pour les données D'entrée et de sortie {#formats} @@ -107,9 +110,9 @@ Seul un petit ensemble de symboles sont échappés. Vous pouvez facilement tombe Les tableaux sont écrits sous la forme d'une liste de valeurs séparées par des virgules entre crochets. Le nombre d'éléments dans le tableau sont formatés comme normalement. `Date` et `DateTime` les types sont écrits entre guillemets simples. Les chaînes sont écrites entre guillemets simples avec les mêmes règles d'échappement que ci-dessus. -[NULL](../query_language/syntax.md) est formaté en tant qu' `\N`. +[NULL](../sql_reference/syntax.md) est formaté en tant qu' `\N`. -Chaque élément de [Imbriqué](../data_types/nested_data_structures/nested.md) structures est représenté sous forme de tableau. +Chaque élément de [Imbriqué](../sql_reference/data_types/nested_data_structures/nested.md) structures est représenté sous forme de tableau. Exemple: @@ -329,7 +332,7 @@ SearchPhrase=curtain designs count()=1064 SearchPhrase=baku count()=1000 ``` -[NULL](../query_language/syntax.md) est formaté en tant qu' `\N`. +[NULL](../sql_reference/syntax.md) est formaté en tant qu' `\N`. ``` sql SELECT * FROM t_null FORMAT TSKV @@ -461,7 +464,7 @@ Si la requête contient GROUP BY, rows\_before\_limit\_at\_least est le nombre e Ce format n'est approprié que pour la sortie d'un résultat de requête, mais pas pour l'analyse (récupération des données à insérer dans une table). -Supports ClickHouse [NULL](../query_language/syntax.md) s'affiche à l'écran `null` dans la sortie JSON. +Supports ClickHouse [NULL](../sql_reference/syntax.md) s'affiche à l'écran `null` dans la sortie JSON. Voir aussi l' [JSONEachRow](#jsoneachrow) format. @@ -538,7 +541,7 @@ ClickHouse ignore les espaces entre les éléments et les virgules après les ob **Valeurs omises traitement** -Clickhouse remplace les valeurs omises par les valeurs par défaut pour le [types de données](../data_types/index.md). +Clickhouse remplace les valeurs omises par les valeurs par défaut pour le [types de données](../sql_reference/data_types/index.md). Si `DEFAULT expr` clickhouse utilise différentes règles de substitution en fonction de [input\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) paramètre. @@ -583,7 +586,7 @@ Contrairement à l' [JSON](#json) format, il n'y a pas de substitution de séque ### Utilisation de Structures imbriquées {#jsoneachrow-nested} -Si vous avez une table avec [Imbriqué](../data_types/nested_data_structures/nested.md) colonnes de type de données, vous pouvez insérer des données JSON avec la même structure. Activer cette fonctionnalité avec le [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) paramètre. +Si vous avez une table avec [Imbriqué](../sql_reference/data_types/nested_data_structures/nested.md) colonnes de type de données, vous pouvez insérer des données JSON avec la même structure. Activer cette fonctionnalité avec le [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) paramètre. Par exemple, considérez le tableau suivant: @@ -657,7 +660,7 @@ Affiche les données sous forme de tables Unicode-art, en utilisant également d Une grille complète de la table est dessinée, et chaque ligne occupe deux lignes dans le terminal. Chaque bloc de résultat est sorti sous la forme d'une table séparée. Ceci est nécessaire pour que les blocs puissent être sortis sans résultats de mise en mémoire tampon (la mise en mémoire tampon serait nécessaire pour pré-calculer la largeur visible de toutes les valeurs). -[NULL](../query_language/syntax.md) est sortie `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) est sortie `ᴺᵁᴸᴸ`. Exemple (montré pour le [PrettyCompact](#prettycompact) format): @@ -761,7 +764,7 @@ FixedString est représenté simplement comme une séquence d'octets. Le tableau est représenté sous la forme d'une longueur varint (non signée [LEB128](https://en.wikipedia.org/wiki/LEB128)), suivie par les éléments de la matrice. -Pour [NULL](../query_language/syntax.md#null-literal) un soutien, un octet supplémentaire contenant 1 ou 0 est ajouté avant chaque [Nullable](../data_types/nullable.md) valeur. Si la valeur est 1, alors la valeur est `NULL` et cet octet est interprétée comme une valeur distincte. Si 0, la valeur après l'octet n'est pas `NULL`. +Pour [NULL](../sql_reference/syntax.md#null-literal) un soutien, un octet supplémentaire contenant 1 ou 0 est ajouté avant chaque [Nullable](../sql_reference/data_types/nullable.md) valeur. Si la valeur est 1, alors la valeur est `NULL` et cet octet est interprétée comme une valeur distincte. Si 0, la valeur après l'octet n'est pas `NULL`. ## Rowbinarywithnamesettypes {#rowbinarywithnamesandtypes} @@ -773,7 +776,7 @@ Semblable à [RowBinary](#rowbinary) mais avec l'ajout de l'en-tête: ## Valeur {#data-format-values} -Imprime chaque ligne entre parenthèses. Les lignes sont séparées par des virgules. Il n'y a pas de virgule après la dernière ligne. Les valeurs entre parenthèses sont également séparées par des virgules. Les nombres sont produits dans un format décimal sans guillemets. Les tableaux sont affichés entre crochets. Les chaînes, les dates et les dates avec des heures sont affichées entre guillemets. Les règles d'échappement et l'analyse sont similaires à [TabSeparated](#tabseparated) format. Pendant le formatage, les espaces supplémentaires ne sont pas insérés, mais pendant l'analyse, ils sont autorisés et ignorés (sauf pour les espaces à l'intérieur des valeurs de tableau, qui ne sont pas autorisés). [NULL](../query_language/syntax.md) est représentée comme `NULL`. +Imprime chaque ligne entre parenthèses. Les lignes sont séparées par des virgules. Il n'y a pas de virgule après la dernière ligne. Les valeurs entre parenthèses sont également séparées par des virgules. Les nombres sont produits dans un format décimal sans guillemets. Les tableaux sont affichés entre crochets. Les chaînes, les dates et les dates avec des heures sont affichées entre guillemets. Les règles d'échappement et l'analyse sont similaires à [TabSeparated](#tabseparated) format. Pendant le formatage, les espaces supplémentaires ne sont pas insérés, mais pendant l'analyse, ils sont autorisés et ignorés (sauf pour les espaces à l'intérieur des valeurs de tableau, qui ne sont pas autorisés). [NULL](../sql_reference/syntax.md) est représentée comme `NULL`. The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. @@ -785,7 +788,7 @@ Voir aussi: [input\_format\_values\_interpret\_expressions](../operations/settin Imprime chaque valeur sur une ligne distincte avec le nom de colonne spécifié. Ce format est pratique pour imprimer une ou plusieurs lignes si chaque ligne est constituée d'un grand nombre de colonnes. -[NULL](../query_language/syntax.md) est sortie `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) est sortie `ᴺᵁᴸᴸ`. Exemple: @@ -964,7 +967,7 @@ message MessageType { ``` ClickHouse tente de trouver une colonne nommée `x.y.z` (ou `x_y_z` ou `X.y_Z` et ainsi de suite). -Les messages imbriqués conviennent à l'entrée ou à la sortie d'un [structures de données imbriquées](../data_types/nested_data_structures/nested.md). +Les messages imbriqués conviennent à l'entrée ou à la sortie d'un [structures de données imbriquées](../sql_reference/data_types/nested_data_structures/nested.md). Valeurs par défaut définies dans un schéma protobuf comme ceci @@ -976,7 +979,7 @@ message MessageType { } ``` -ne sont pas appliquées; la [valeurs par défaut de la table](../query_language/create.md#create-default-values) sont utilisés à leur place. +ne sont pas appliquées; la [valeurs par défaut de la table](../sql_reference/statements/create.md#create-default-values) sont utilisés à leur place. Clickhouse entrées et sorties messages protobuf dans le `length-delimited` format. Cela signifie avant que chaque message devrait être écrit sa longueur comme un [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). @@ -990,23 +993,23 @@ ClickHouse Avro format prend en charge la lecture et l'écriture [Fichiers de do ### Types De Données Correspondant {#data_types-matching} -Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../data_types/index.md) dans `INSERT` et `SELECT` requête. +Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../sql_reference/data_types/index.md) dans `INSERT` et `SELECT` requête. -| Type de données Avro `INSERT` | Type de données ClickHouse | Type de données Avro `SELECT` | -|---------------------------------------------|-----------------------------------------------------------------------------------------|-------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [Int (8/16/32)](../data_types/int_uint.md), [UInt (8/16/32)](../data_types/int_uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../data_types/int_uint.md), [UInt64](../data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [Chaîne](../data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [FixedString (N)](../data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum (8/16)](../data_types/enum.md) | `enum` | -| `array(T)` | [Array(T)](../data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nullable (T)](../data_types/date.md) | `union(null, T)` | -| `null` | [Les Valeurs Null(Nothing)](../data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Date](../data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [DateTime64 (3)](../data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [DateTime64 (6)](../data_types/datetime.md) | `long (timestamp-micros)` \* | +| Type de données Avro `INSERT` | Type de données ClickHouse | Type de données Avro `SELECT` | +|---------------------------------------------|---------------------------------------------------------------------------------------------------------------------|-------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [Int (8/16/32)](../sql_reference/data_types/int_uint.md), [UInt (8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [Chaîne](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [FixedString (N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [Enum (8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [Array(T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Nullable (T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Les Valeurs Null(Nothing)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [Date](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64 (3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64 (6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | \* [Types logiques Avro](http://avro.apache.org/docs/current/spec.html#Logical+Types) @@ -1060,7 +1063,7 @@ Même que [Avro](#data-format-avro) ### Utilisation {#usage} -Pour vérifier rapidement la résolution du schéma, vous pouvez utiliser [kafkacat](https://github.com/edenhill/kafkacat) avec [clickhouse-local](../operations/utils/clickhouse-local.md): +Pour vérifier rapidement la résolution du schéma, vous pouvez utiliser [kafkacat](https://github.com/edenhill/kafkacat) avec [clickhouse-local](../operations/utilities/clickhouse-local.md): ``` bash $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' @@ -1069,7 +1072,7 @@ $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse- 3 c ``` -Utiliser `AvroConfluent` avec [Kafka](../operations/table_engines/kafka.md): +Utiliser `AvroConfluent` avec [Kafka](../engines/table_engines/integrations/kafka.md): ``` sql CREATE TABLE topic1_stream @@ -1098,25 +1101,25 @@ SELECT * FROM topic1_stream; ### Types De Données Correspondant {#data_types-matching-2} -Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../data_types/index.md) dans `INSERT` et `SELECT` requête. +Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../sql_reference/data_types/index.md) dans `INSERT` et `SELECT` requête. -| Type de données Parquet (`INSERT`) | Type de données ClickHouse | Type de données Parquet (`SELECT`) | -|------------------------------------|---------------------------------------------|------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Date](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [Chaîne](../data_types/string.md) | `STRING` | -| — | [FixedString](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Décimal](../data_types/decimal.md) | `DECIMAL` | +| Type de données Parquet (`INSERT`) | Type de données ClickHouse | Type de données Parquet (`SELECT`) | +|------------------------------------|-----------------------------------------------------------|------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [Date](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [Chaîne](../sql_reference/data_types/string.md) | `STRING` | +| — | [FixedString](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [Décimal](../sql_reference/data_types/decimal.md) | `DECIMAL` | Clickhouse prend en charge la précision configurable de `Decimal` type. Le `INSERT` requête traite le Parquet `DECIMAL` tapez comme le ClickHouse `Decimal128` type. @@ -1138,7 +1141,7 @@ Vous pouvez sélectionner des données à partir d'une table de ClickHouse et le $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -Pour échanger des données avec Hadoop, vous pouvez utiliser [Moteur de table HDFS](../operations/table_engines/hdfs.md). +Pour échanger des données avec Hadoop, vous pouvez utiliser [Moteur de table HDFS](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} @@ -1146,24 +1149,24 @@ Pour échanger des données avec Hadoop, vous pouvez utiliser [Moteur de table H ### Types De Données Correspondant {#data_types-matching-3} -Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../data_types/index.md) dans `INSERT` requête. +Le tableau ci-dessous montre les types de données pris en charge et comment ils correspondent à ClickHouse [types de données](../sql_reference/data_types/index.md) dans `INSERT` requête. -| Type de données ORC (`INSERT`) | Type de données ClickHouse | -|--------------------------------|---------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Date](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | -| `STRING`, `BINARY` | [Chaîne](../data_types/string.md) | -| `DECIMAL` | [Décimal](../data_types/decimal.md) | +| Type de données ORC (`INSERT`) | Type de données ClickHouse | +|--------------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [Date](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [Chaîne](../sql_reference/data_types/string.md) | +| `DECIMAL` | [Décimal](../sql_reference/data_types/decimal.md) | Clickhouse prend en charge la précision configurable de la `Decimal` type. Le `INSERT` requête traite de l'ORC `DECIMAL` tapez comme le ClickHouse `Decimal128` type. @@ -1179,7 +1182,7 @@ Vous pouvez insérer des données ORC à partir d'un fichier dans la table Click $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -Pour échanger des données avec Hadoop, vous pouvez utiliser [Moteur de table HDFS](../operations/table_engines/hdfs.md). +Pour échanger des données avec Hadoop, vous pouvez utiliser [Moteur de table HDFS](../engines/table_engines/integrations/hdfs.md). ## Schéma De Format {#formatschema} @@ -1195,7 +1198,7 @@ peut contenir un chemin absolu, soit un chemin relatif au répertoire courant su Si vous utilisez le client dans le [mode batch](../interfaces/cli.md#cli_usage), le chemin d'accès au schéma doit être relatif pour des raisons de sécurité. Si vous entrez ou sortez des données via le [Interface HTTP](../interfaces/http.md) le nom de fichier spécifié dans le format de schéma -doit être situé dans le répertoire spécifié dans [format\_schema\_path](../operations/server_settings/settings.md#server_settings-format_schema_path) +doit être situé dans le répertoire spécifié dans [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) dans la configuration du serveur. ## Sauter Les Erreurs {#skippingerrors} diff --git a/docs/fr/interfaces/http.md b/docs/fr/interfaces/http.md index a8a6bc5f37d..bd90f511841 100644 --- a/docs/fr/interfaces/http.md +++ b/docs/fr/interfaces/http.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 19 +toc_title: Interface HTTP --- # Interface HTTP {#http-interface} @@ -8,7 +11,7 @@ L'interface HTTP vous permet D'utiliser ClickHouse sur n'importe quelle plate-fo Par défaut, clickhouse-server écoute HTTP sur le port 8123 (cela peut être modifié dans la configuration). -Si vous faites une requête GET / sans Paramètres, elle renvoie le code de réponse 200 et la chaîne définie dans [http\_server\_default\_response](../operations/server_settings/settings.md#server_settings-http_server_default_response) valeur par défaut “Ok.” (avec un saut de ligne à la fin) +Si vous faites une requête GET / sans Paramètres, elle renvoie le code de réponse 200 et la chaîne définie dans [http\_server\_default\_response](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response) valeur par défaut “Ok.” (avec un saut de ligne à la fin) ``` bash $ curl 'http://localhost:8123/' @@ -282,4 +285,227 @@ Vous pouvez créer une requête avec paramètres et transmettre des valeurs des $ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" ``` +## Interface HTTP prédéfinie {#predefined_http_interface} + +ClickHouse prend en charge des requêtes spécifiques via L'interface HTTP. Par exemple, vous pouvez écrire des données dans un tableau comme suit: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +ClickHouse prend également en charge L'Interface HTTP prédéfinie qui peut vous aider à une intégration plus facile avec des outils tiers tels que [Prometheus exportateur](https://github.com/percona-lab/clickhouse_exporter). + +Exemple: + +- Tout d'abord, ajoutez cette section au fichier de configuration du serveur: + + + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +- Vous pouvez maintenant demander l'url directement pour les données au format Prometheus: + + + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +Comme vous pouvez le voir dans l'exemple, si `` est configuré dans la configuration.fichier xml, ClickHouse fera correspondre les requêtes HTTP reçues au type prédéfini dans ``, puis ClickHouse exécutera la requête prédéfinie correspondante si la correspondance est réussie. + +Maintenant `` pouvez configurer ``, ``, ``, `` et `` . + +## root\_handler {#root_handler} + +`` renvoie le contenu spécifié pour la requête de chemin racine. Le contenu de retour spécifique est configuré par `http_server_default_response` dans la configuration.XML. si non spécifié, le retour **OK.** + +`http_server_default_response` n'est pas défini et une requête HTTP est envoyée à ClickHouse. Le résultat est comme suit: + +``` xml + + + +``` + + $ curl 'http://localhost:8123' + Ok. + +`http_server_default_response` est défini et une requête HTTP est envoyée à ClickHouse. Le résultat est comme suit: + +``` xml +
    ]]>
    + + + + +``` + + $ curl 'http://localhost:8123' +
    % + +## ping\_handler {#ping_handler} + +`` peut être utilisé pour sonder la santé du serveur clickhouse actuel. Lorsque le serveur HTTP ClickHouse est normal, l'accès à ClickHouse via `` sera de retour **OK.**. + +Exemple: + +``` xml + + /ping + +``` + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## replicas\_status\_handler {#replicas_status_handler} + +`` est utilisé pour détecter l'état du nœud de réplica et le retour **OK.** si le nœud réplique n'a pas de délai. S'il y a un retard, renvoyez le retard spécifique. La valeur de `` prend en charge la personnalisation. Si vous ne spécifiez pas ``, ClickHouse réglage par défaut `` être **/ replicas\_status**. + +Exemple: + +``` xml + + /replicas_status + +``` + +Aucun retard de cas: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +Retard de cas: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## predefined\_query\_handler {#predefined_query_handler} + +Vous pouvez configurer ``, ``, `` et `` dans ``. + +`` est responsable de la correspondance de la partie méthode de la requête HTTP. `` entièrement conforme à la définition de [méthode](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) dans le protocole HTTP. C'est une option de configuration. S'il n'est pas défini dans le fichier de configuration, il ne correspond pas à la partie méthode de la requête HTTP + +`` est responsable de la correspondance de la partie url de la requête HTTP. Il est compatible avec [RE2](https://github.com/google/re2)s 'expressions régulières. C'est une option de configuration. S'il n'est pas défini dans le fichier de configuration, il ne correspond pas à la partie url de la requête HTTP + +`` est responsable de la correspondance de la partie d'en-tête de la requête HTTP. Il est compatible avec les expressions régulières de RE2. C'est une option de configuration. S'il n'est pas défini dans le fichier de configuration, il ne correspond pas à la partie d'en-tête de la requête HTTP + +`` la valeur est une requête prédéfinie de ``, qui est exécuté par ClickHouse lorsqu'une requête HTTP est mise en correspondance et que le résultat de la requête est renvoyé. C'est une configuration incontournable. + +`` prend en charge les paramètres de réglage et les valeurs query\_params. + +L'exemple suivant définit les valeurs de `max_threads` et `max_alter_threads` Paramètres, puis interroge la table système pour vérifier si ces paramètres ont été définis avec succès. + +Exemple: + +``` xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "Note" + Dans un ``, un `` prend en charge un seul `` d'un type d'insertion. + +## dynamic\_query\_handler {#dynamic_query_handler} + +`` que `` augmenter `` . + +Clickhouse extrait et exécute la valeur correspondant au `` valeur dans l'url de la requête HTTP. +ClickHouse réglage par défaut `` être `/query` . C'est une option de configuration. Si il n'y a pas de définition dans le fichier de configuration, le paramètre n'est pas passé. + +Pour expérimenter cette fonctionnalité, l'exemple définit les valeurs de max\_threads et max\_alter\_threads et demande si les paramètres ont été définis avec succès. +La différence est que dans ``, la requête est écrite dans le fichier de configuration. Mais dans ``, la requête est écrite sous la forme de param de la requête HTTP. + +Exemple: + +``` xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + [Article Original](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/fr/interfaces/index.md b/docs/fr/interfaces/index.md index 99c81e26e1b..d85ce9cdbe3 100644 --- a/docs/fr/interfaces/index.md +++ b/docs/fr/interfaces/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Interfaces +toc_priority: 14 +toc_title: Introduction --- # Interface {#interfaces} diff --git a/docs/fr/interfaces/jdbc.md b/docs/fr/interfaces/jdbc.md index 8912124ff39..1bd9dfe8e9f 100644 --- a/docs/fr/interfaces/jdbc.md +++ b/docs/fr/interfaces/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 22 +toc_title: JDBC --- # JDBC {#jdbc-driver} diff --git a/docs/fr/interfaces/mysql.md b/docs/fr/interfaces/mysql.md index 56f798517e7..ac57deefaba 100644 --- a/docs/fr/interfaces/mysql.md +++ b/docs/fr/interfaces/mysql.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 20 +toc_title: Interface MySQL --- # Interface MySQL {#mysql-interface} -ClickHouse prend en charge le protocole de fil MySQL. Il peut être activé par [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) paramètre dans le fichier de configuration: +ClickHouse prend en charge le protocole de fil MySQL. Il peut être activé par [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) paramètre dans le fichier de configuration: ``` xml 9004 diff --git a/docs/fr/interfaces/odbc.md b/docs/fr/interfaces/odbc.md index 52b9fc7905e..1f366f77d51 100644 --- a/docs/fr/interfaces/odbc.md +++ b/docs/fr/interfaces/odbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 23 +toc_title: Pilote ODBC --- # Pilote ODBC {#odbc-driver} diff --git a/docs/fr/interfaces/tcp.md b/docs/fr/interfaces/tcp.md index 7678cdc2878..45e89776547 100644 --- a/docs/fr/interfaces/tcp.md +++ b/docs/fr/interfaces/tcp.md @@ -1,9 +1,12 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 18 +toc_title: Interface Native (TCP) --- # Interface Native (TCP) {#native-interface-tcp} -Le protocole natif est utilisé dans le [client de ligne de commande](cli.md), pour la communication inter-serveur pendant le traitement de requête distribué, et également dans d'autres programmes C++. Malheureusement, le protocole clickhouse natif n'a pas encore de spécification formelle, mais il peut être rétro-conçu à partir du code source ClickHouse (démarrage [ici](https://github.com/ClickHouse/ClickHouse/tree/master/src/src/Client)) et/ou en interceptant et en analysant le trafic TCP. +Le protocole natif est utilisé dans le [client de ligne de commande](cli.md), pour la communication inter-serveur pendant le traitement de requête distribué, et également dans d'autres programmes C++. Malheureusement, le protocole clickhouse natif n'a pas encore de spécification formelle, mais il peut être rétro-conçu à partir du code source ClickHouse (démarrage [ici](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client)) et/ou en interceptant et en analysant le trafic TCP. [Article Original](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/fr/interfaces/third-party/client_libraries.md b/docs/fr/interfaces/third-party/client_libraries.md index 7c5e51c6e02..940887e4010 100644 --- a/docs/fr/interfaces/third-party/client_libraries.md +++ b/docs/fr/interfaces/third-party/client_libraries.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 26 +toc_title: "Biblioth\xE8ques Clientes" --- # Bibliothèques clientes de développeurs tiers {#client-libraries-from-third-party-developers} diff --git a/docs/fr/interfaces/third-party/gui.md b/docs/fr/interfaces/third-party/gui.md index 3e6b039dea0..1682c78fdbd 100644 --- a/docs/fr/interfaces/third-party/gui.md +++ b/docs/fr/interfaces/third-party/gui.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 28 +toc_title: Les Interfaces Visuelles --- # Interfaces visuelles de développeurs tiers {#visual-interfaces-from-third-party-developers} diff --git a/docs/fr/interfaces/third-party/index.md b/docs/fr/interfaces/third-party/index.md new file mode 100644 index 00000000000..75062f3bad4 --- /dev/null +++ b/docs/fr/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Third-Party +toc_priority: 24 +--- + + diff --git a/docs/fr/interfaces/third-party/integrations.md b/docs/fr/interfaces/third-party/integrations.md index fba357772a6..76648d8613d 100644 --- a/docs/fr/interfaces/third-party/integrations.md +++ b/docs/fr/interfaces/third-party/integrations.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 27 +toc_title: "Int\xE9gration" --- # Bibliothèques d'intégration de développeurs tiers {#integration-libraries-from-third-party-developers} @@ -39,7 +42,7 @@ machine_translated: true - [graphouse](https://github.com/yandex/graphouse) - [carbone-clickhouse](https://github.com/lomik/carbon-clickhouse) + - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-CH-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimise les partitions calées dans [\* GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) si les règles de [configuration de cumul](../../operations/table_engines/graphitemergetree.md#rollup-configuration) pourrait être appliquée + - [graphite-CH-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimise les partitions calées dans [\* GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) si les règles de [configuration de cumul](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) pourrait être appliquée - [Grafana](https://grafana.com/) - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - [Prometheus](https://prometheus.io/) @@ -76,7 +79,7 @@ machine_translated: true - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (utiliser [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [clickhouse-HDFS-chargeur](https://github.com/jaykelin/clickhouse-hdfs-loader) (utiliser [JDBC](../../query_language/table_functions/jdbc.md)) + - [clickhouse-HDFS-chargeur](https://github.com/jaykelin/clickhouse-hdfs-loader) (utiliser [JDBC](../../sql_reference/table_functions/jdbc.md)) - Scala - [Akka](https://akka.io) - [clickhouse-Scala-client](https://github.com/crobox/clickhouse-scala-client) diff --git a/docs/fr/interfaces/third-party/proxy.md b/docs/fr/interfaces/third-party/proxy.md index 84eea4fe78d..154c0b8efc1 100644 --- a/docs/fr/interfaces/third-party/proxy.md +++ b/docs/fr/interfaces/third-party/proxy.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 29 +toc_title: Proxy --- # Serveurs Proxy de développeurs tiers {#proxy-servers-from-third-party-developers} diff --git a/docs/fr/introduction/adopters.md b/docs/fr/introduction/adopters.md index 9f0e2eec474..f03f0242c72 100644 --- a/docs/fr/introduction/adopters.md +++ b/docs/fr/introduction/adopters.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 8 +toc_title: Adoptant --- # Clickhouse Adopteurs {#clickhouse-adopters} diff --git a/docs/fr/introduction/distinctive_features.md b/docs/fr/introduction/distinctive_features.md index 8ae1607d63c..dcea4046fcd 100644 --- a/docs/fr/introduction/distinctive_features.md +++ b/docs/fr/introduction/distinctive_features.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 4 +toc_title: "particularit\xE9" --- # Caractéristiques distinctives de ClickHouse {#distinctive-features-of-clickhouse} @@ -63,6 +66,6 @@ ClickHouse offre différentes façons d'échanger la précision pour la performa ClickHouse utilise la réplication multi-maître asynchrone. Après avoir été écrit dans n'importe quelle réplique disponible, toutes les répliques restantes récupèrent leur copie en arrière-plan. Le système conserve des données identiques sur différentes répliques. La récupération après la plupart des échecs est effectuée automatiquement ou semi-automatiquement dans les cas complexes. -Pour plus d'informations, consultez la section [Réplication des données](../operations/table_engines/replication.md). +Pour plus d'informations, consultez la section [Réplication des données](../engines/table_engines/mergetree_family/replication.md). [Article Original](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/fr/introduction/features_considered_disadvantages.md b/docs/fr/introduction/features_considered_disadvantages.md index d6a3886d489..dc9fe708fef 100644 --- a/docs/fr/introduction/features_considered_disadvantages.md +++ b/docs/fr/introduction/features_considered_disadvantages.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 5 +toc_title: "Caract\xE9ristiques de ClickHouse qui peuvent \xEAtre consid\xE9r\xE9\ + es comme des inconv\xE9nients" --- # Caractéristiques de ClickHouse qui peuvent être considérées comme des inconvénients {#clickhouse-features-that-can-be-considered-disadvantages} diff --git a/docs/fr/introduction/history.md b/docs/fr/introduction/history.md index c7a07eb64d0..f40ae1eda52 100644 --- a/docs/fr/introduction/history.md +++ b/docs/fr/introduction/history.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 7 +toc_title: Histoire --- # Histoire De ClickHouse {#clickhouse-history} diff --git a/docs/fr/introduction/index.md b/docs/fr/introduction/index.md new file mode 100644 index 00000000000..365784abfca --- /dev/null +++ b/docs/fr/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Introduction +toc_priority: 1 +--- + + diff --git a/docs/fr/introduction/performance.md b/docs/fr/introduction/performance.md index 927579f8d31..dcda1878d61 100644 --- a/docs/fr/introduction/performance.md +++ b/docs/fr/introduction/performance.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 6 +toc_title: Performance --- # Performance {#performance} diff --git a/docs/fr/operations/access_rights.md b/docs/fr/operations/access_rights.md index 1b7bf384518..e01f8428209 100644 --- a/docs/fr/operations/access_rights.md +++ b/docs/fr/operations/access_rights.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 48 +toc_title: "Les Droits D'Acc\xE8s" --- # Les Droits D'Accès {#access-rights} @@ -69,7 +72,7 @@ Les utilisateurs sont enregistrés dans le `users` section. Voici un fragment de Vous pouvez voir une déclaration de deux utilisateurs: `default`et`web`. Nous avons ajouté l' `web` utilisateur séparément. -Le `default` l'utilisateur est choisi dans les cas où le nom d'utilisateur n'est pas passé. Le `default` l'utilisateur est également utilisé pour le traitement des requêtes distribuées, si la configuration du serveur ou du cluster `user` et `password` (voir la section sur les [Distribué](../operations/table_engines/distributed.md) moteur). +Le `default` l'utilisateur est choisi dans les cas où le nom d'utilisateur n'est pas passé. Le `default` l'utilisateur est également utilisé pour le traitement des requêtes distribuées, si la configuration du serveur ou du cluster `user` et `password` (voir la section sur les [Distribué](../engines/table_engines/special/distributed.md) moteur). The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. diff --git a/docs/fr/operations/backup.md b/docs/fr/operations/backup.md index 74d6a90afb1..6d66e5a609c 100644 --- a/docs/fr/operations/backup.md +++ b/docs/fr/operations/backup.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 49 +toc_title: "La Sauvegarde Des Donn\xE9es" --- # La Sauvegarde Des Données {#data-backup} -Alors [réplication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [vous ne pouvez pas simplement supprimer des tables avec un moteur de type MergeTree contenant plus de 50 Go de données](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Toutefois, ces garanties ne couvrent pas tous les cas possibles et peuvent être contournés. +Alors [réplication](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [vous ne pouvez pas simplement supprimer des tables avec un moteur de type MergeTree contenant plus de 50 Go de données](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Toutefois, ces garanties ne couvrent pas tous les cas possibles et peuvent être contournés. Afin d'atténuer efficacement les erreurs humaines possibles, vous devez préparer soigneusement une stratégie de sauvegarde et de restauration de vos données **préalablement**. @@ -19,19 +22,19 @@ Souvent, les données qui sont ingérées dans ClickHouse sont livrées via une ## Instantanés Du Système De Fichiers {#filesystem-snapshots} -Certains systèmes de fichiers locaux fournissent des fonctionnalités d'instantané (par exemple, [ZFS](https://en.wikipedia.org/wiki/ZFS)), mais ils pourraient ne pas être le meilleur choix pour servir les requêtes actives. Une solution possible consiste à créer des répliques supplémentaires avec ce type de système de fichiers et à les exclure du [Distribué](table_engines/distributed.md) les tables qui sont utilisés pour `SELECT` requête. Les instantanés sur ces répliques seront hors de portée des requêtes qui modifient les données. En prime, ces répliques pourraient avoir des configurations matérielles spéciales avec plus de disques attachés par serveur, ce qui serait rentable. +Certains systèmes de fichiers locaux fournissent des fonctionnalités d'instantané (par exemple, [ZFS](https://en.wikipedia.org/wiki/ZFS)), mais ils pourraient ne pas être le meilleur choix pour servir les requêtes actives. Une solution possible consiste à créer des répliques supplémentaires avec ce type de système de fichiers et à les exclure du [Distribué](../engines/table_engines/special/distributed.md) les tables qui sont utilisés pour `SELECT` requête. Les instantanés sur ces répliques seront hors de portée des requêtes qui modifient les données. En prime, ces répliques pourraient avoir des configurations matérielles spéciales avec plus de disques attachés par serveur, ce qui serait rentable. ## clickhouse-copieur {#clickhouse-copier} -[clickhouse-copieur](utils/clickhouse-copier.md) est un outil polyvalent qui a été initialement créé pour re-shard petaoctet - sized tables. Il peut également être utilisé à des fins de sauvegarde et de restauration car il copie de manière fiable les données entre les tables ClickHouse et les clusters. +[clickhouse-copieur](utilities/clickhouse-copier.md) est un outil polyvalent qui a été initialement créé pour re-shard petaoctet - sized tables. Il peut également être utilisé à des fins de sauvegarde et de restauration car il copie de manière fiable les données entre les tables ClickHouse et les clusters. Pour de plus petits volumes de données, un simple `INSERT INTO ... SELECT ...` les tables distantes peuvent également fonctionner. -## Manipulations avec des pièces {#manipulations-with-parts} +## Manipulations Avec Des Pièces {#manipulations-with-parts} ClickHouse permet d'utiliser le `ALTER TABLE ... FREEZE PARTITION ...` requête pour créer une copie locale des partitions de table. Ceci est implémenté en utilisant des liens durs vers le `/var/lib/clickhouse/shadow/` dossier, donc il ne consomme généralement pas d'espace disque supplémentaire pour les anciennes données. Les copies créées des fichiers ne sont pas gérées par clickhouse server, vous pouvez donc les laisser là: vous aurez une sauvegarde simple qui ne nécessite aucun système externe supplémentaire, mais elle sera toujours sujette à des problèmes matériels. Pour cette raison, il est préférable de les copier à distance vers un autre emplacement, puis de supprimer les copies locales. Les systèmes de fichiers distribués et les magasins d'objets sont toujours une bonne option pour cela, mais les serveurs de fichiers attachés normaux avec une capacité suffisante peuvent également fonctionner (dans ce cas, le transfert se fera via le système de fichiers réseau ou peut-être [rsync](https://en.wikipedia.org/wiki/Rsync)). -Pour plus d'informations sur les requêtes liées aux manipulations de [Modifier la documentation](../query_language/alter.md#alter_manipulations-with-partitions). +Pour plus d'informations sur les requêtes liées aux manipulations de [Modifier la documentation](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). Un outil tiers est disponible pour automatiser cette approche: [clickhouse-sauvegarde](https://github.com/AlexAkulov/clickhouse-backup). diff --git a/docs/fr/operations/configuration_files.md b/docs/fr/operations/configuration_files.md index 4ead122eb41..0322551ab63 100644 --- a/docs/fr/operations/configuration_files.md +++ b/docs/fr/operations/configuration_files.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 50 +toc_title: Fichiers De Configuration --- # Fichiers De Configuration {#configuration_files} @@ -17,7 +20,7 @@ Si `replace` est spécifié, il remplace l'élément entier par celui spécifié Si `remove` est spécifié, il supprime l'élément. -La configuration peut également définir “substitutions”. Si un élément a le `incl` attribut, la substitution correspondante du fichier sera utilisée comme valeur. Par défaut, le chemin d'accès au fichier avec des substitutions est `/etc/metrika.xml`. Ceci peut être changé dans le [include\_from](server_settings/settings.md#server_settings-include_from) élément dans la configuration du serveur. Les valeurs de substitution sont spécifiées dans `/yandex/substitution_name` les éléments de ce fichier. Si une substitution spécifiée dans `incl` n'existe pas, il est enregistré dans le journal. Pour empêcher ClickHouse de consigner les substitutions manquantes, spécifiez `optional="true"` attribut (par exemple, les paramètres de [macro](server_settings/settings.md)). +La configuration peut également définir “substitutions”. Si un élément a le `incl` attribut, la substitution correspondante du fichier sera utilisée comme valeur. Par défaut, le chemin d'accès au fichier avec des substitutions est `/etc/metrika.xml`. Ceci peut être changé dans le [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) élément dans la configuration du serveur. Les valeurs de substitution sont spécifiées dans `/yandex/substitution_name` les éléments de ce fichier. Si une substitution spécifiée dans `incl` n'existe pas, il est enregistré dans le journal. Pour empêcher ClickHouse de consigner les substitutions manquantes, spécifiez `optional="true"` attribut (par exemple, les paramètres de [macro](server_configuration_parameters/settings.md)). Les Substitutions peuvent également être effectuées à partir de ZooKeeper. Pour ce faire, spécifiez l'attribut `from_zk = "/path/to/node"`. La valeur de l'élément est remplacé par le contenu du noeud au `/path/to/node` dans ZooKeeper. Vous pouvez également placer un sous-arbre XML entier sur le nœud ZooKeeper et il sera entièrement inséré dans l'élément source. diff --git a/docs/fr/operations/index.md b/docs/fr/operations/index.md index a6c0a82ef73..92265ce9fcf 100644 --- a/docs/fr/operations/index.md +++ b/docs/fr/operations/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Operations +toc_priority: 41 +toc_title: Introduction --- # Opérations {#operations} @@ -16,9 +20,9 @@ Le manuel d'exploitation de ClickHouse comprend les principales sections suivant - [Fichiers De Configuration](configuration_files.md) - [Quota](quotas.md) - [Les Tables Système](system_tables.md) -- [Paramètres De Configuration Du Serveur](server_settings/index.md) +- [Paramètres De Configuration Du Serveur](server_configuration_parameters/index.md) - [Comment Tester Votre Matériel Avec ClickHouse](performance_test.md) - [Paramètre](settings/index.md) -- [Utilitaire](utils/index.md) +- [Utilitaire](utilities/index.md) [Article Original](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/fr/operations/monitoring.md b/docs/fr/operations/monitoring.md index 7921447910c..851f3c73ce8 100644 --- a/docs/fr/operations/monitoring.md +++ b/docs/fr/operations/monitoring.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: Surveiller --- # Surveiller {#monitoring} @@ -21,20 +24,20 @@ Il est fortement recommandé de configurer la surveillance de: - Utilisation du système de stockage, de la RAM et du réseau. -## Métriques Du Serveur ClickHouse {#clickhouse-server-metrics} +## Métriques Du Serveur Clickhouse {#clickhouse-server-metrics} Clickhouse server a des instruments embarqués pour la surveillance de l'auto-état. -Pour suivre les événements du serveur, utilisez les journaux du serveur. Voir la [enregistreur](server_settings/settings.md#server_settings-logger) section du fichier de configuration. +Pour suivre les événements du serveur, utilisez les journaux du serveur. Voir la [enregistreur](server_configuration_parameters/settings.md#server_configuration_parameters-logger) section du fichier de configuration. Clickhouse recueille: - Différentes mesures de la façon dont le serveur utilise les ressources de calcul. - Statistiques communes sur le traitement des requêtes. -Vous pouvez trouver des mesures dans le [système.métrique](system_tables.md#system_tables-metrics), [système.événement](system_tables.md#system_tables-events), et [système.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) table. +Vous pouvez trouver des mesures dans le [système.métrique](../operations/system_tables.md#system_tables-metrics), [système.événement](../operations/system_tables.md#system_tables-events), et [système.asynchronous\_metrics](../operations/system_tables.md#system_tables-asynchronous_metrics) table. -Vous pouvez configurer ClickHouse pour exporter des métriques vers [Graphite](https://github.com/graphite-project). Voir la [Graphite section](server_settings/settings.md#server_settings-graphite) dans le fichier de configuration du serveur ClickHouse. Avant de configurer l'exportation des métriques, vous devez configurer Graphite en suivant leur [guide](https://graphite.readthedocs.io/en/latest/install.html). +Vous pouvez configurer ClickHouse pour exporter des métriques vers [Graphite](https://github.com/graphite-project). Voir la [Graphite section](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) dans le fichier de configuration du serveur ClickHouse. Avant de configurer l'exportation des métriques, vous devez configurer Graphite en suivant leur [guide](https://graphite.readthedocs.io/en/latest/install.html). De plus, vous pouvez surveiller la disponibilité du serveur via L'API HTTP. Envoyer la `HTTP GET` demande à `/ping`. Si le serveur est disponible, il répond avec `200 OK`. diff --git a/docs/fr/operations/optimizing_performance/index.md b/docs/fr/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..a596d617d8f --- /dev/null +++ b/docs/fr/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Optimizing Performance +toc_priority: 52 +--- + + diff --git a/docs/fr/operations/performance/sampling_query_profiler.md b/docs/fr/operations/optimizing_performance/sampling_query_profiler.md similarity index 74% rename from docs/fr/operations/performance/sampling_query_profiler.md rename to docs/fr/operations/optimizing_performance/sampling_query_profiler.md index cc64ed72b68..819569616f8 100644 --- a/docs/fr/operations/performance/sampling_query_profiler.md +++ b/docs/fr/operations/optimizing_performance/sampling_query_profiler.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 54 +toc_title: "Profilage De Requ\xEAte" --- # Échantillonnage Du Profileur De Requête {#sampling-query-profiler} @@ -8,9 +11,9 @@ ClickHouse exécute un profileur d'échantillonnage qui permet d'analyser l'exé Utilisation du générateur de profils: -- Installation de la [trace\_log](../server_settings/settings.md#server_settings-trace_log) la section de la configuration du serveur. +- Installation de la [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) la section de la configuration du serveur. - Cette section configure le [trace\_log](../system_tables.md#system_tables-trace_log) tableau système contenant les résultats du fonctionnement du profileur. Il est configuré par défaut. Rappelez-vous que les données de ce tableau est valable que pour un serveur en cours d'exécution. Après le redémarrage du serveur, ClickHouse ne nettoie pas la table et toute l'adresse de mémoire virtuelle stockée peut devenir invalide. + Cette section configure le [trace\_log](../../operations/system_tables.md#system_tables-trace_log) tableau système contenant les résultats du fonctionnement du profileur. Il est configuré par défaut. Rappelez-vous que les données de ce tableau est valable que pour un serveur en cours d'exécution. Après le redémarrage du serveur, ClickHouse ne nettoie pas la table et toute l'adresse de mémoire virtuelle stockée peut devenir invalide. - Installation de la [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) ou [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) paramètre. Les deux paramètres peuvent être utilisés simultanément. @@ -26,7 +29,7 @@ Pour analyser les `trace_log` système de table: Pour des raisons de sécurité, les fonctions d'introspection sont désactivées par défaut. -- L'utilisation de la `addressToLine`, `addressToSymbol` et `demangle` [fonctions d'introspection](../../query_language/functions/introspection.md) pour obtenir les noms de fonctions et leurs positions dans le code ClickHouse. Pour obtenir un profil pour une requête, vous devez agréger les données du `trace_log` table. Vous pouvez agréger des données par des fonctions individuelles ou par l'ensemble des traces de la pile. +- L'utilisation de la `addressToLine`, `addressToSymbol` et `demangle` [fonctions d'introspection](../../sql_reference/functions/introspection.md) pour obtenir les noms de fonctions et leurs positions dans le code ClickHouse. Pour obtenir un profil pour une requête, vous devez agréger les données du `trace_log` table. Vous pouvez agréger des données par des fonctions individuelles ou par l'ensemble des traces de la pile. Si vous avez besoin de visualiser `trace_log` info, essayez [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) et [speedscope](https://github.com/laplab/clickhouse-speedscope). diff --git a/docs/fr/operations/performance/sampling_query_profiler_example_result.txt b/docs/fr/operations/performance/sampling_query_profiler_example_result.txt deleted file mode 120000 index 58c5abe7122..00000000000 --- a/docs/fr/operations/performance/sampling_query_profiler_example_result.txt +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler_example_result.txt \ No newline at end of file diff --git a/docs/fr/operations/performance/sampling_query_profiler_example_result.txt b/docs/fr/operations/performance/sampling_query_profiler_example_result.txt new file mode 100644 index 00000000000..8a38ef927f9 --- /dev/null +++ b/docs/fr/operations/performance/sampling_query_profiler_example_result.txt @@ -0,0 +1,542 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +--- + +La ligne 1: +────── +nombre (): 6344 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +lire + +DB:: ReadBufferFromFileDescriptor:: nextImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / ReadBufferFromFileDescriptor.rpc:56 +DB:: CompressedReadBufferBase:: readCompressedData(unsigned long&, unsigned long&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / ReadBuffer.h: 54 +DB:: CompressedReadBufferFromFile:: nextImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Compression / CompressedReadBufferFromFile.rpc:22 +DB:: CompressedReadBufferFromFile:: seek(unsigned long, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Compression / CompressedReadBufferFromFile.rpc:63 +DB:: MergeTreeReaderStream:: seekToMark(unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeReaderStream.rpc:200 +std::\_Function\_handler\ \> const&), DB::MergeTreeReader:: readData(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool)::{lambda(bool)\#1}::operator()(bool) const:: {lambda (std::vector\ \> const&) \# 1}\>:: \_M\_invoke(std:: \_any\_data const&, std:: vector\ \> const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:212 +DB::IDataType::deserializeBinaryBulkWithMultiplestreams(DB::IColumn&, unsigned long, DB::IDataType::DeserializeBinaryBulkSettings& std::shared\_ptr& ) const +/ usr / local / include/c++/9.1.0/bits / std\_function.h: 690 +DB:: MergeTreeReader:: readData(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB:: IDataType const&, DB:: IColumn& , unsigned long, bool, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:232 +DB:: MergeTreeReader:: readRows(unsigned long, bool, unsigned long, DB:: Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:111 +DB::MergeTreeRangeReader::DelayedStream::finaliser(DB::Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:35 +DB:: MergeTreeRangeReader:: continueReadingChain(DB:: MergeTreeRangeReader:: ReadResult&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:219 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:487 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +Ligne 2: +────── +nombre (): 3295 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +\_\_pthread\_cond\_wait + +std:: condition\_variable:: wait(STD:: unique\_lock&) +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/src/c++11/../../../../../gcc-9.1.0/libstdc++-v3/src/c++11/condition\_variable.cc:55 +Poco::Sémaphore::attendre() +/accueil / milovidov / ClickHouse / build\_gcc9/../contrib/poco/Fondation/src/Sémaphore.rpc:61 +DB:: UnionBlockInputStream:: readImpl() +/usr / local / include/c++/9.1.0/x86\_64-pc-linux-gnu/bits / GTHR-default.h: 748 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Core / bloc.h: 90 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: LimitBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: AsynchronousBlockInputStream:: calculer() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +std:: \_Function\_handler\:: \_M\_invoke(STD:: \_any\_data const&) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 551 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/usr / local / include/c++/9.1.0/x86\_64-pc-linux-gnu/bits / GTHR-default.h: 748 +ThreadFromGlobalPool:: ThreadFromGlobalPool\:: scheduleImpl(std:: function \< void ()\>, int, std:: facultatif):: {lambda () \# 3}\>(ThreadPoolImpl:: scheduleImpl(std:: function \< void ()\>, int, std:: facultatif):: {lambda()\#3}&&)::{lambda () \# 1}:: opérateur()() const +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / ThreadPool.h: 146 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +La ligne 3: +────── +nombre (): 1978 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +DB:: VolnitskyBase\ \>:: search (unsigned char const\*, unsigned long) const +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\, 15ul, 16ul\> const&, DB::PODArray\, 15ul, 16ul\> const&, STD::\_\_cxx11::basic\_string\, std:: allocateur \> const&, DB:: PODArray\, 15ul, 16ul\>&) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: FunctionsStringSearch\, DB:: NameLike\>:: executeImpl(DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: PreparedFunctionImpl:: execute (DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / fonctions / IFunction.rpc:464 +DB:: ExpressionAction:: execute (DB::Block&, bool) const +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 677 +DB:: ExpressionActions:: execute (DB::Block&, bool) const +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / interprètes / ExpressionActions.rpc:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:660 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:546 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +Ligne 4: +────── +comte (): 1913 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +DB:: VolnitskyBase\ \>:: search (unsigned char const\*, unsigned long) const +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\, 15ul, 16ul\> const&, DB::PODArray\, 15ul, 16ul\> const&, STD::\_\_cxx11::basic\_string\, std:: allocateur \> const&, DB:: PODArray\, 15ul, 16ul\>&) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: FunctionsStringSearch\, DB:: NameLike\>:: executeImpl(DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: PreparedFunctionImpl:: execute (DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / fonctions / IFunction.rpc:464 +DB:: ExpressionAction:: execute (DB::Block&, bool) const +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 677 +DB:: ExpressionActions:: execute (DB::Block&, bool) const +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / interprètes / ExpressionActions.rpc:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:660 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:546 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +La ligne 5: +────── +nombre (): 1672 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +DB:: VolnitskyBase\ \>:: search (unsigned char const\*, unsigned long) const +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\, 15ul, 16ul\> const&, DB::PODArray\, 15ul, 16ul\> const&, STD::\_\_cxx11::basic\_string\, std:: allocateur \> const&, DB:: PODArray\, 15ul, 16ul\>&) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: FunctionsStringSearch\, DB:: NameLike\>:: executeImpl(DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: PreparedFunctionImpl:: execute (DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / fonctions / IFunction.rpc:464 +DB:: ExpressionAction:: execute (DB::Block&, bool) const +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 677 +DB:: ExpressionActions:: execute (DB::Block&, bool) const +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / interprètes / ExpressionActions.rpc:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:660 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:546 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +La ligne 6: +────── +nombre (): 1531 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +lire + +DB:: ReadBufferFromFileDescriptor:: nextImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / ReadBufferFromFileDescriptor.rpc:56 +DB:: CompressedReadBufferBase:: readCompressedData(unsigned long&, unsigned long&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / ReadBuffer.h: 54 +DB:: CompressedReadBufferFromFile:: nextImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Compression / CompressedReadBufferFromFile.rpc:22 +nul DB::deserializeBinarySSE2\<4\>(DB::PODArray\, 15ul, 16ul\>&, DB::PODArray\, 15ul, 16ul\>&, DB::ReadBuffer&, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / ReadBuffer.h: 53 +DB:: DataTypeString:: deserializeBinaryBulk(DB:: IColumn&, DB:: ReadBuffer& , unsigned long, double) const +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / DataTypes / DataTypeString.rpc:202 +DB:: MergeTreeReader:: readData(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB:: IDataType const&, DB:: IColumn& , unsigned long, bool, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:232 +DB:: MergeTreeReader:: readRows(unsigned long, bool, unsigned long, DB:: Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:111 +DB::MergeTreeRangeReader::DelayedStream::finaliser(DB::Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:35 +DB:: MergeTreeRangeReader:: startReadingChain(unsigned long, std:: vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:219 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +Ligne 7: +────── +nombre (): 1034 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +DB:: VolnitskyBase\ \>:: search (unsigned char const\*, unsigned long) const +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\, 15ul, 16ul\> const&, DB::PODArray\, 15ul, 16ul\> const&, STD::\_\_cxx11::basic\_string\, std:: allocateur \> const&, DB:: PODArray\, 15ul, 16ul\>&) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: FunctionsStringSearch\, DB:: NameLike\>:: executeImpl(DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long) +/ opt / milovidov / ClickHouse / build\_gcc9 / programmes / clickhouse +DB:: PreparedFunctionImpl:: execute (DB:: Block&, std:: vector\ \> const&, unsigned long, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / fonctions / IFunction.rpc:464 +DB:: ExpressionAction:: execute (DB::Block&, bool) const +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 677 +DB:: ExpressionActions:: execute (DB::Block&, bool) const +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / interprètes / ExpressionActions.rpc:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:660 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:546 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +La ligne 8: +────── +nombre (): 989 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +\_\_lll\_lock\_wait + +pthread\_mutex\_lock + +DB:: MergeTreeReaderStream:: loadMarks() +/ usr / local / include/c++/9.1.0/bits / std\_mutex.h: 103 +DB:: MergeTreeReaderStream:: MergeTreeReaderStream(std:: \_ \_ cxx11:: basic\_string\, std:: allocateur \> const&, std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, unsigned long, std::vector\ \> const&, DB:: MarkCache*, bool, DB:: UncompressedCache*, unsigned long, unsigned long, unsigned long, DB:: MergeTreeIndexGranularityInfo const\*, std:: function\ const&, int) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeReaderStream.rpc:107 +std:: \_Function\_handler\ \> const&), DB::MergeTreeReader:: addStreams(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB::IDataType const&, std::function\ const&, int):: {lambda (std::vector\ \> const&) \# 1}\>:: \_M\_invoke(std:: \_any\_data const&, std:: vector\ \> const&) +/ usr / local / include/c++/9.1.0/bits / unique\_ptr.h: 147 +DB:: MergeTreeReader:: addStreams(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB:: IDataType const&, std::function\ const&, int) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 677 +DB:: MergeTreeReader:: MergeTreeReader(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, std:: shared\_ptr const&, DB:: NamesAndTypesList const&, DB:: UncompressedCache*, DB:: MarkCache*, bool, DB:: mergetreedata const&, std::vector\ \> const&, unsigned long, unsigned long, std:: map\, std:: allocateur \> , double, std:: less\, std:: allocateur \> \> , std:: allocateur\, std:: allocateur \> const, double \> \> \> const&, std:: function\ const&, int) +/ usr / local / include/c++/9.1.0/bits / stl\_list.h: 303 +DB:: MergeTreeThreadSelectBlockInputStream:: getNewTask() +/ usr / local / include/c++/9.1.0/bits / std\_function.h: 259 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:54 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +Ligne 9: +─────── +nombre (): 779 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +nul DB::deserializeBinarySSE2\<4\>(DB::PODArray\, 15ul, 16ul\>&, DB::PODArray\, 15ul, 16ul\>&, DB::ReadBuffer&, unsigned long) +/usr / local / lib/gcc/x86\_64-pc-linux-gnu/9.1.0/include / emmintrin.h: 727 +DB:: DataTypeString:: deserializeBinaryBulk(DB:: IColumn&, DB:: ReadBuffer& , unsigned long, double) const +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / DataTypes / DataTypeString.rpc:202 +DB:: MergeTreeReader:: readData(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB:: IDataType const&, DB:: IColumn& , unsigned long, bool, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:232 +DB:: MergeTreeReader:: readRows(unsigned long, bool, unsigned long, DB:: Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:111 +DB::MergeTreeRangeReader::DelayedStream::finaliser(DB::Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:35 +DB:: MergeTreeRangeReader:: startReadingChain(unsigned long, std:: vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:219 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone + +La ligne 10: +─────── +nombre (): 666 +sym: StackTrace:: StackTrace(ucontext\_t const&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / commun / StackTrace.rpc:208 +DB:: (espace de noms anonyme):: writeTraceInfo(DB:: TimerType, int, siginfo\_t*, vide*) \[clone .l'isra.0\] +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / IO / BufferBase.h: 99 + +nul DB::deserializeBinarySSE2\<4\>(DB::PODArray\, 15ul, 16ul\>&, DB::PODArray\, 15ul, 16ul\>&, DB::ReadBuffer&, unsigned long) +/usr / local / lib/gcc/x86\_64-pc-linux-gnu/9.1.0/include / emmintrin.h: 727 +DB:: DataTypeString:: deserializeBinaryBulk(DB:: IColumn&, DB:: ReadBuffer& , unsigned long, double) const +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / DataTypes / DataTypeString.rpc:202 +DB:: MergeTreeReader:: readData(std::\_\_cxx11:: basic\_string\, std:: allocateur \> const&, DB:: IDataType const&, DB:: IColumn& , unsigned long, bool, unsigned long, bool) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:232 +DB:: MergeTreeReader:: readRows(unsigned long, bool, unsigned long, DB:: Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree / MergeTreeReader.rpc:111 +DB::MergeTreeRangeReader::DelayedStream::finaliser(DB::Block&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:35 +DB:: MergeTreeRangeReader:: startReadingChain(unsigned long, std:: vector\ \>&) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / stockages / MergeTree/MergeTreeRangeReader.rpc:219 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeRangeReader:: read (unsigned long, std::vector\ \>&) +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: MergeTreeBaseSelectBlockInputStream:: readFromPartImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / stockages / MergeTree/MergeTreeBaseSelectBlockInputStream.rpc:158 +DB:: MergeTreeBaseSelectBlockInputStream:: readImpl() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ExpressionBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream/ExpressionBlockInputStream.rpc:34 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: PartialSortingBlockInputStream:: readImpl() +/accueil / milovidov / ClickHouse / build\_gcc9/../SGBD / Datastream / PartialSortingBlockInputStream.rpc:13 +DB:: IBlockInputStream:: lire() +/ usr / local / include/c++/9.1.0/bits / stl\_vector.h: 108 +DB:: ParallelInputsProcessor:: boucle (unsigned long) +/ usr / local / include/c++/9.1.0/bits / atomic\_base.h: 419 +DB:: ParallelInputsProcessor:: thread (std:: shared\_ptr, unsigned long) +/accueil / milovidov / ClickHouse / build\_gcc9/../ SGBD / Datastream / ParallelInputsProcessor.H: 215 +ThreadFromGlobalPool:: ThreadFromGlobalPool\::*) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*, std:: shared\_ptr, unsigned long&\>(void (DB:: ParallelInputsProcessor::*&&) (std:: shared\_ptr, unsigned long), DB:: ParallelInputsProcessor*&& , std:: shared\_ptr&& , unsigned long&):: {lambda () \# 1}:: opérateur () () const +/usr / local / include/c++/9.1.0/bits / shared\_ptr\_base.h: 729 +ThreadPoolImpl:: travailleur(std:: \_List\_iterator) +/ usr / local / include/c++/9.1.0/bits / unique\_lock.h: 69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h: 81 +fil\_début + +\_\_clone diff --git a/docs/fr/operations/performance_test.md b/docs/fr/operations/performance_test.md index 9c0424b4c22..9b4df8e7fd4 100644 --- a/docs/fr/operations/performance_test.md +++ b/docs/fr/operations/performance_test.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 54 +toc_title: "Tester Le Mat\xE9riel" --- # Comment Tester Votre Matériel Avec ClickHouse {#how-to-test-your-hardware-with-clickhouse} @@ -37,9 +40,9 @@ Avec cette instruction, vous pouvez exécuter le test de performance clickhouse - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/src/benchmark/clickhouse/benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/src/benchmark/clickhouse/queries.sql + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql 1. Télécharger les données de test selon le [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” tableau contenant 100 millions de lignes). diff --git a/docs/fr/operations/quotas.md b/docs/fr/operations/quotas.md index 6b921c5c665..d81ff33c403 100644 --- a/docs/fr/operations/quotas.md +++ b/docs/fr/operations/quotas.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 51 +toc_title: Quota --- # Quota {#quotas} diff --git a/docs/fr/operations/requirements.md b/docs/fr/operations/requirements.md index b8baa200f92..56730105760 100644 --- a/docs/fr/operations/requirements.md +++ b/docs/fr/operations/requirements.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: Exigence --- # Exigence {#requirements} @@ -21,9 +24,9 @@ Le volume de RAM requis dépend de: - La complexité des requêtes. - La quantité de données traitées dans les requêtes. -Pour calculer le volume de RAM requis, vous devez estimer la taille des données temporaires pour [GROUP BY](../query_language/select.md#select-group-by-clause), [DISTINCT](../query_language/select.md#select-distinct), [JOIN](../query_language/select.md#select-join) et d'autres opérations que vous utilisez. +Pour calculer le volume de RAM requis, vous devez estimer la taille des données temporaires pour [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) et d'autres opérations que vous utilisez. -ClickHouse peut utiliser la mémoire externe pour les données temporaires. Voir [Groupe par dans la mémoire externe](../query_language/select.md#select-group-by-in-external-memory) pour plus de détails. +ClickHouse peut utiliser la mémoire externe pour les données temporaires. Voir [Groupe par dans la mémoire externe](../sql_reference/statements/select.md#select-group-by-in-external-memory) pour plus de détails. ## Fichier D'Échange {#swap-file} diff --git a/docs/fr/operations/server_settings/index.md b/docs/fr/operations/server_configuration_parameters/index.md similarity index 77% rename from docs/fr/operations/server_settings/index.md rename to docs/fr/operations/server_configuration_parameters/index.md index 02e7d5cb305..04581bc4f17 100644 --- a/docs/fr/operations/server_settings/index.md +++ b/docs/fr/operations/server_configuration_parameters/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Server Configuration Parameters +toc_priority: 54 +toc_title: Introduction --- # Paramètres de configuration du serveur {#server-settings} @@ -12,4 +16,4 @@ D'autres paramètres sont décrits dans le “[Paramètre](../settings/index.md# Avant d'étudier les paramètres, lire la [Fichiers de Configuration](../configuration_files.md#configuration_files) section et notez l'utilisation de substitutions (le `incl` et `optional` attribut). -[Article Original](https://clickhouse.tech/docs/en/operations/server_settings/) +[Article Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/fr/operations/server_settings/settings.md b/docs/fr/operations/server_configuration_parameters/settings.md similarity index 81% rename from docs/fr/operations/server_settings/settings.md rename to docs/fr/operations/server_configuration_parameters/settings.md index fb5c5c11261..eda744c384b 100644 --- a/docs/fr/operations/server_settings/settings.md +++ b/docs/fr/operations/server_configuration_parameters/settings.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 57 +toc_title: "Les Param\xE8tres Du Serveur" --- # Les Paramètres Du Serveur {#server-settings} @@ -20,7 +23,7 @@ Valeur par défaut: 3600. ## compression {#server-settings-compression} -Paramètres de compression de données pour [MergeTree](../table_engines/mergetree.md)-tables de moteur. +Paramètres de compression de données pour [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-tables de moteur. !!! warning "Avertissement" Ne l'utilisez pas si vous venez de commencer à utiliser ClickHouse. @@ -69,7 +72,7 @@ Si aucune condition n'est remplie pour une partie de données, ClickHouse utilis La base de données par défaut. -Pour obtenir une liste de bases de données, utilisez la [SHOW DATABASES](../../query_language/show.md#show-databases) requête. +Pour obtenir une liste de bases de données, utilisez la [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) requête. **Exemple** @@ -89,7 +92,7 @@ Les paramètres des profils sont situés dans le fichier spécifié dans le para default ``` -## dictionaries\_config {#server_settings-dictionaries_config} +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} Chemin d'accès au fichier de configuration des dictionnaires externes. @@ -98,7 +101,7 @@ Chemin: - Spécifiez le chemin absolu ou le chemin relatif au fichier de configuration du serveur. - Le chemin peut contenir des caractères génériques \* et ?. -Voir aussi “[Dictionnaires externes](../../query_language/dicts/external_dicts.md)”. +Voir aussi “[Dictionnaires externes](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. **Exemple** @@ -106,7 +109,7 @@ Voir aussi “[Dictionnaires externes](../../query_language/dicts/external_dicts *_dictionary.xml ``` -## dictionaries\_lazy\_load {#server_settings-dictionaries_lazy_load} +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} Chargement paresseux des dictionnaires. @@ -122,7 +125,7 @@ La valeur par défaut est `true`. true ``` -## format\_schema\_path {#server_settings-format_schema_path} +## format\_schema\_path {#server_configuration_parameters-format_schema_path} Le chemin d'accès au répertoire avec des régimes pour l'entrée de données, tels que les schémas pour l' [CapnProto](../../interfaces/formats.md#capnproto) format. @@ -133,7 +136,7 @@ Le chemin d'accès au répertoire avec des régimes pour l'entrée de données, format_schemas/ ``` -## graphite {#server_settings-graphite} +## graphite {#server_configuration_parameters-graphite} Envoi de données à [Graphite](https://github.com/graphite-project). @@ -144,10 +147,10 @@ Paramètre: - interval – The interval for sending, in seconds. - timeout – The timeout for sending data, in seconds. - root\_path – Prefix for keys. -- metrics – Sending data from the [système.métrique](../system_tables.md#system_tables-metrics) table. -- events – Sending deltas data accumulated for the time period from the [système.événement](../system_tables.md#system_tables-events) table. -- events\_cumulative – Sending cumulative data from the [système.événement](../system_tables.md#system_tables-events) table. -- asynchronous\_metrics – Sending data from the [système.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) table. +- metrics – Sending data from the [système.métrique](../../operations/system_tables.md#system_tables-metrics) table. +- events – Sending deltas data accumulated for the time period from the [système.événement](../../operations/system_tables.md#system_tables-events) table. +- events\_cumulative – Sending cumulative data from the [système.événement](../../operations/system_tables.md#system_tables-events) table. +- asynchronous\_metrics – Sending data from the [système.asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) table. Vous pouvez configurer plusieurs `` clause. Par exemple, vous pouvez l'utiliser pour envoyer des données différentes à différents intervalles. @@ -167,11 +170,11 @@ Vous pouvez configurer plusieurs `` clause. Par exemple, vous pouvez l ``` -## graphite\_rollup {#server_settings-graphite-rollup} +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} Paramètres pour l'amincissement des données pour le Graphite. -Pour plus de détails, voir [GraphiteMergeTree](../table_engines/graphitemergetree.md). +Pour plus de détails, voir [GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md). **Exemple** @@ -199,7 +202,7 @@ Pour plus de détails, voir [GraphiteMergeTree](../table_engines/graphitemergetr Port de connexion au serveur via HTTP(S). -Si `https_port` est spécifié, [openSSL](#server_settings-openssl) doit être configuré. +Si `https_port` est spécifié, [openSSL](#server_configuration_parameters-openssl) doit être configuré. Si `http_port` est spécifié, la configuration OpenSSL est ignorée même si elle est définie. @@ -209,7 +212,7 @@ Si `http_port` est spécifié, la configuration OpenSSL est ignorée même si el 0000 ``` -## http\_server\_default\_response {#server_settings-http_server_default_response} +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} Page affichée par défaut lorsque vous accédez au serveur HTTP(S) ClickHouse. La valeur par défaut est “Ok.” (avec un saut de ligne à la fin) @@ -224,7 +227,7 @@ Ouvrir `https://tabix.io/` lors de l'accès à `http://localhost: http_port`. ``` -## include\_from {#server_settings-include_from} +## include\_from {#server_configuration_parameters-include_from} Le chemin d'accès au fichier avec des substitutions. @@ -262,7 +265,7 @@ Utile pour rompre avec une interface réseau spécifique. ## interserver\_http\_credentials {#server-settings-interserver-http-credentials} -Le nom d'utilisateur et le mot de passe utilisés pour [réplication](../table_engines/replication.md) avec les moteurs \* répliqués. Ces informations d'identification sont utilisées uniquement pour la communication entre les répliques et ne sont pas liées aux informations d'identification des clients ClickHouse. Le serveur vérifie ces informations d'identification pour la connexion de répliques et utilise les mêmes informations d'identification lors de la connexion à d'autres répliques. Donc, ces informations d'identification doivent être identiques pour tous les réplicas dans un cluster. +Le nom d'utilisateur et le mot de passe utilisés pour [réplication](../../engines/table_engines/mergetree_family/replication.md) avec les moteurs \* répliqués. Ces informations d'identification sont utilisées uniquement pour la communication entre les répliques et ne sont pas liées aux informations d'identification des clients ClickHouse. Le serveur vérifie ces informations d'identification pour la connexion de répliques et utilise les mêmes informations d'identification lors de la connexion à d'autres répliques. Donc, ces informations d'identification doivent être identiques pour tous les réplicas dans un cluster. Par défaut, l'authentification n'est pas utilisé. Cette section contient les paramètres suivants: @@ -289,7 +292,7 @@ Le nombre de secondes que ClickHouse attend pour les demandes entrantes avant de 3 ``` -## listen\_host {#server_settings-listen_host} +## listen\_host {#server_configuration_parameters-listen_host} Restriction sur les hôtes dont les demandes peuvent provenir. Si vous voulez que le serveur réponde à tous, spécifiez `::`. @@ -300,7 +303,7 @@ Exemple: 127.0.0.1 ``` -## enregistreur {#server_settings-logger} +## enregistreur {#server_configuration_parameters-logger} Paramètres de journalisation. @@ -353,7 +356,7 @@ Substitutions de paramètres pour les tables répliquées. Peut être omis si les tables répliquées ne sont pas utilisées. -Pour plus d'informations, consultez la section “[Création de tables répliquées](../../operations/table_engines/replication.md)”. +Pour plus d'informations, consultez la section “[Création de tables répliquées](../../engines/table_engines/mergetree_family/replication.md)”. **Exemple** @@ -363,7 +366,7 @@ Pour plus d'informations, consultez la section “[Création de tables répliqu ## mark\_cache\_size {#server-mark-cache-size} -Taille approximative (en octets) du cache des marques utilisées par les [MergeTree](../table_engines/mergetree.md) famille. +Taille approximative (en octets) du cache des marques utilisées par les [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) famille. Le cache est partagé pour le serveur et la mémoire est allouée au besoin. La taille du cache doit être d'au moins 5368709120. @@ -411,7 +414,7 @@ Nous vous recommandons d'utiliser cette option sous Mac OS X depuis le `getrlimi Restriction sur la suppression de tables. -Si la taille d'un [MergeTree](../table_engines/mergetree.md) table dépasse `max_table_size_to_drop` (en octets), vous ne pouvez pas le supprimer à l'aide d'une requête DROP. +Si la taille d'un [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) table dépasse `max_table_size_to_drop` (en octets), vous ne pouvez pas le supprimer à l'aide d'une requête DROP. Si vous devez toujours supprimer la table sans redémarrer le serveur ClickHouse, créez le `/flags/force_drop_table` fichier et exécutez la requête DROP. @@ -425,9 +428,9 @@ La valeur 0 signifie que vous pouvez supprimer toutes les tables sans aucune res 0 ``` -## merge\_tree {#server_settings-merge_tree} +## merge\_tree {#server_configuration_parameters-merge_tree} -Réglage fin des tables dans le [MergeTree](../table_engines/mergetree.md). +Réglage fin des tables dans le [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). Pour plus d'informations, consultez MergeTreeSettings.h fichier d'en-tête. @@ -439,7 +442,7 @@ Pour plus d'informations, consultez MergeTreeSettings.h fichier d'en-tête. ``` -## openSSL {#server_settings-openssl} +## openSSL {#server_configuration_parameters-openssl} Configuration client/serveur SSL. @@ -498,17 +501,17 @@ Clés pour les paramètres Serveur/client: ``` -## part\_log {#server_settings-part-log} +## part\_log {#server_configuration_parameters-part-log} -Journalisation des événements associés à [MergeTree](../table_engines/mergetree.md). Par exemple, ajouter ou fusionner des données. Vous pouvez utiliser le journal pour simuler des algorithmes de fusion et comparer leurs caractéristiques. Vous pouvez visualiser le processus de fusion. +Journalisation des événements associés à [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). Par exemple, ajouter ou fusionner des données. Vous pouvez utiliser le journal pour simuler des algorithmes de fusion et comparer leurs caractéristiques. Vous pouvez visualiser le processus de fusion. -Les requêtes sont enregistrées dans le [système.part\_log](../system_tables.md#system_tables-part-log) table, pas dans un fichier séparé. Vous pouvez configurer le nom de cette table dans le `table` paramètre (voir ci-dessous). +Les requêtes sont enregistrées dans le [système.part\_log](../../operations/system_tables.md#system_tables-part-log) table, pas dans un fichier séparé. Vous pouvez configurer le nom de cette table dans le `table` paramètre (voir ci-dessous). Utilisez les paramètres suivants pour configurer la journalisation: - `database` – Name of the database. - `table` – Name of the system table. -- `partition_by` – Sets a [partitionnement personnalisé clé](../../operations/table_engines/custom_partitioning_key.md). +- `partition_by` – Sets a [partitionnement personnalisé clé](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). - `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. **Exemple** @@ -522,7 +525,7 @@ Utilisez les paramètres suivants pour configurer la journalisation: ``` -## chemin {#server_settings-path} +## chemin {#server_configuration_parameters-path} Chemin d'accès au répertoire contenant des données. @@ -535,17 +538,17 @@ Chemin d'accès au répertoire contenant des données. /var/lib/clickhouse/ ``` -## query\_log {#server_settings-query-log} +## query\_log {#server_configuration_parameters-query-log} Réglage de la journalisation des requêtes reçues avec [log\_queries=1](../settings/settings.md) paramètre. -Les requêtes sont enregistrées dans le [système.query\_log](../system_tables.md#system_tables-query_log) table, pas dans un fichier séparé. Vous pouvez modifier le nom de la table dans le `table` paramètre (voir ci-dessous). +Les requêtes sont enregistrées dans le [système.query\_log](../../operations/system_tables.md#system_tables-query_log) table, pas dans un fichier séparé. Vous pouvez modifier le nom de la table dans le `table` paramètre (voir ci-dessous). Utilisez les paramètres suivants pour configurer la journalisation: - `database` – Name of the database. - `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [partitionnement personnalisé clé](../../operations/table_engines/custom_partitioning_key.md) pour une table. +- `partition_by` – Sets a [partitionnement personnalisé clé](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) pour une table. - `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. Si la table n'existe pas, ClickHouse la créera. Si la structure du journal des requêtes a été modifiée lors de la mise à jour du serveur ClickHouse, la table avec l'ancienne structure est renommée et une nouvelle table est créée automatiquement. @@ -561,17 +564,17 @@ Si la table n'existe pas, ClickHouse la créera. Si la structure du journal des ``` -## query\_thread\_log {#server_settings-query-thread-log} +## query\_thread\_log {#server_configuration_parameters-query-thread-log} Réglage de la journalisation des threads de requêtes reçues avec [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) paramètre. -Les requêtes sont enregistrées dans le [système.query\_thread\_log](../system_tables.md#system_tables-query-thread-log) table, pas dans un fichier séparé. Vous pouvez modifier le nom de la table dans le `table` paramètre (voir ci-dessous). +Les requêtes sont enregistrées dans le [système.query\_thread\_log](../../operations/system_tables.md#system_tables-query-thread-log) table, pas dans un fichier séparé. Vous pouvez modifier le nom de la table dans le `table` paramètre (voir ci-dessous). Utilisez les paramètres suivants pour configurer la journalisation: - `database` – Name of the database. - `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [partitionnement personnalisé clé](../../operations/table_engines/custom_partitioning_key.md) pour un système de tableau. +- `partition_by` – Sets a [partitionnement personnalisé clé](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) pour un système de tableau. - `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. Si la table n'existe pas, ClickHouse la créera. Si la structure du journal des threads de requête a été modifiée lors de la mise à jour du serveur ClickHouse, la table avec l'ancienne structure est renommée et une nouvelle table est créée automatiquement. @@ -587,15 +590,15 @@ Si la table n'existe pas, ClickHouse la créera. Si la structure du journal des ``` -## trace\_log {#server_settings-trace_log} +## trace\_log {#server_configuration_parameters-trace_log} -Paramètres pour le [trace\_log](../system_tables.md#system_tables-trace_log) opération de table de système. +Paramètres pour le [trace\_log](../../operations/system_tables.md#system_tables-trace_log) opération de table de système. Paramètre: - `database` — Database for storing a table. - `table` — Table name. -- `partition_by` — [Partitionnement personnalisé clé](../../operations/table_engines/custom_partitioning_key.md) pour un système de tableau. +- `partition_by` — [Partitionnement personnalisé clé](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) pour un système de tableau. - `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. Le fichier de configuration du serveur par défaut `config.xml` contient la section Paramètres suivante: @@ -642,7 +645,7 @@ les nœuds seront stockés sans masquage. ## remote\_servers {#server-settings-remote-servers} -Configuration des clusters utilisés par le [Distribué](../../operations/table_engines/distributed.md) moteur de table et par le `cluster` table de fonction. +Configuration des clusters utilisés par le [Distribué](../../engines/table_engines/special/distributed.md) moteur de table et par le `cluster` table de fonction. **Exemple** @@ -656,7 +659,7 @@ Pour la valeur de l' `incl` attribut, voir la section “[Fichiers de Configurat - [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) -## fuseau {#server_settings-timezone} +## fuseau {#server_configuration_parameters-timezone} Le fuseau horaire du serveur. @@ -670,7 +673,7 @@ Le fuseau horaire est nécessaire pour les conversions entre les formats String Europe/Moscow ``` -## tcp\_port {#server_settings-tcp_port} +## tcp\_port {#server_configuration_parameters-tcp_port} Port pour communiquer avec les clients via le protocole TCP. @@ -680,9 +683,9 @@ Port pour communiquer avec les clients via le protocole TCP. 9000 ``` -## tcp\_port\_secure {#server_settings-tcp_port-secure} +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} -Port TCP pour une communication sécurisée avec les clients. Utilisez le avec [OpenSSL](#server_settings-openssl) paramètre. +Port TCP pour une communication sécurisée avec les clients. Utilisez le avec [OpenSSL](#server_configuration_parameters-openssl) paramètre. **Valeurs possibles** @@ -694,7 +697,7 @@ Entier positif. 9440 ``` -## mysql\_port {#server_settings-mysql_port} +## mysql\_port {#server_configuration_parameters-mysql_port} Port pour communiquer avec les clients via le protocole MySQL. @@ -723,7 +726,7 @@ Chemin d'accès aux données temporaires pour le traitement des requêtes volumi ## tmp\_policy {#server-settings-tmp-policy} -La politique de [`storage_configuration`](../table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) pour stocker des fichiers temporaires. +La politique de [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) pour stocker des fichiers temporaires. Si cela n'est pas [`tmp_path`](#server-settings-tmp_path) est utilisé, sinon elle est ignorée. !!! note "Note" @@ -734,7 +737,7 @@ Si cela n'est pas [`tmp_path`](#server-settings-tmp_path) est utilisé, sinon el ## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} -Taille du Cache (en octets) pour les données non compressées utilisées par les [MergeTree](../table_engines/mergetree.md). +Taille du Cache (en octets) pour les données non compressées utilisées par les [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). Il y a un cache partagé pour le serveur. La mémoire est allouée à la demande. Le cache est utilisé si l'option [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) est activé. @@ -746,9 +749,9 @@ Le cache non compressé est avantageux pour les requêtes très courtes dans des 8589934592 ``` -## user\_files\_path {#server_settings-user_files_path} +## user\_files\_path {#server_configuration_parameters-user_files_path} -Le répertoire avec les fichiers utilisateur. Utilisé dans la fonction de table [fichier()](../../query_language/table_functions/file.md). +Le répertoire avec les fichiers utilisateur. Utilisé dans la fonction de table [fichier()](../../sql_reference/table_functions/file.md). **Exemple** @@ -821,7 +824,7 @@ Cette section contient les paramètres suivants: **Voir Aussi** -- [Réplication](../../operations/table_engines/replication.md) +- [Réplication](../../engines/table_engines/mergetree_family/replication.md) - [Guide du programmeur ZooKeeper](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) ## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} @@ -830,20 +833,20 @@ Méthode de stockage pour les en-têtes de partie de données dans ZooKeeper. Ce paramètre s'applique uniquement à l' `MergeTree` famille. Il peut être spécifié: -- À l'échelle mondiale dans le [merge\_tree](#server_settings-merge_tree) la section de la `config.xml` fichier. +- À l'échelle mondiale dans le [merge\_tree](#server_configuration_parameters-merge_tree) la section de la `config.xml` fichier. ClickHouse utilise le paramètre pour toutes les tables du serveur. Vous pouvez modifier le réglage à tout moment. Les tables existantes changent de comportement lorsque le paramètre change. - Pour chaque table. - Lors de la création d'un tableau, indiquer la [moteur de réglage](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). Le comportement d'une table existante avec ce paramètre ne change pas, même si le paramètre global des changements. + Lors de la création d'un tableau, indiquer la [moteur de réglage](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). Le comportement d'une table existante avec ce paramètre ne change pas, même si le paramètre global des changements. **Valeurs possibles** - 0 — Functionality is turned off. - 1 — Functionality is turned on. -Si `use_minimalistic_part_header_in_zookeeper = 1`, puis [répliqué](../table_engines/replication.md) les tables stockent les en-têtes des parties de données de manière compacte à l'aide `znode`. Si la table contient plusieurs colonnes, cette méthode de stockage réduit considérablement le volume des données stockées dans Zookeeper. +Si `use_minimalistic_part_header_in_zookeeper = 1`, puis [répliqué](../../engines/table_engines/mergetree_family/replication.md) les tables stockent les en-têtes des parties de données de manière compacte à l'aide `znode`. Si la table contient plusieurs colonnes, cette méthode de stockage réduit considérablement le volume des données stockées dans Zookeeper. !!! attention "Attention" Après l'application de `use_minimalistic_part_header_in_zookeeper = 1`, vous ne pouvez pas rétrograder le serveur ClickHouse vers une version qui ne prend pas en charge ce paramètre. Soyez prudent lors de la mise à niveau de ClickHouse sur les serveurs d'un cluster. Ne mettez pas à niveau tous les serveurs à la fois. Il est plus sûr de tester de nouvelles versions de ClickHouse dans un environnement de test, ou sur quelques serveurs d'un cluster. @@ -866,4 +869,4 @@ La mise à jour est effectuée de manière asynchrone, dans un thread système s **Valeur par défaut**: 15. -[Article Original](https://clickhouse.tech/docs/en/operations/server_settings/settings/) +[Article Original](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/fr/operations/settings/constraints_on_settings.md b/docs/fr/operations/settings/constraints_on_settings.md index 03bc26bf9a9..18094f50294 100644 --- a/docs/fr/operations/settings/constraints_on_settings.md +++ b/docs/fr/operations/settings/constraints_on_settings.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 62 +toc_title: "Contraintes sur les param\xE8tres" --- # Contraintes sur les paramètres {#constraints-on-settings} diff --git a/docs/fr/operations/settings/index.md b/docs/fr/operations/settings/index.md index df747df3685..b29aa8c011b 100644 --- a/docs/fr/operations/settings/index.md +++ b/docs/fr/operations/settings/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Settings +toc_priority: 55 +toc_title: Introduction --- # Paramètre {#settings} diff --git a/docs/fr/operations/settings/permissions_for_queries.md b/docs/fr/operations/settings/permissions_for_queries.md index 02d735eb46d..e2b515e8d34 100644 --- a/docs/fr/operations/settings/permissions_for_queries.md +++ b/docs/fr/operations/settings/permissions_for_queries.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 58 +toc_title: "Autorisations pour les requ\xEAtes" --- # Autorisations pour les requêtes {#permissions_for_queries} diff --git a/docs/fr/operations/settings/query_complexity.md b/docs/fr/operations/settings/query_complexity.md index fc9ce0b522c..9c5e0e2d923 100644 --- a/docs/fr/operations/settings/query_complexity.md +++ b/docs/fr/operations/settings/query_complexity.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 59 +toc_title: "Restrictions sur la complexit\xE9 des requ\xEAtes" --- -# Restrictions sur la complexité des requêtes {#restrictions-on-query-complexity} +# Restrictions Sur La Complexité Des Requêtes {#restrictions-on-query-complexity} Les Restrictions sur la complexité des requêtes font partie des paramètres. Ils sont utilisés pour fournir une exécution plus sûre à partir de l'interface utilisateur. @@ -41,7 +44,7 @@ La consommation de mémoire est également limitée par les paramètres `max_mem Quantité maximale de RAM à utiliser pour exécuter les requêtes d'un utilisateur sur un seul serveur. -Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Core/Settings.h#L288). Par défaut, le montant n'est pas limité (`max_memory_usage_for_user = 0`). +Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). Par défaut, le montant n'est pas limité (`max_memory_usage_for_user = 0`). Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). @@ -49,7 +52,7 @@ Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). La quantité maximale de RAM à utiliser pour exécuter toutes les requêtes sur un seul serveur. -Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Core/Settings.h#L289). Par défaut, le montant n'est pas limité (`max_memory_usage_for_all_queries = 0`). +Les valeurs par défaut sont définies dans [Paramètre.h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). Par défaut, le montant n'est pas limité (`max_memory_usage_for_all_queries = 0`). Voir aussi la description de [max\_memory\_usage](#settings_max_memory_usage). @@ -79,11 +82,11 @@ Que faire lorsque le nombre de clés uniques pour l'agrégation dépasse la limi ## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} -Active ou désactive l'exécution de `GROUP BY` clauses dans la mémoire externe. Voir [Groupe par dans la mémoire externe](../../query_language/select.md#select-group-by-in-external-memory). +Active ou désactive l'exécution de `GROUP BY` clauses dans la mémoire externe. Voir [Groupe par dans la mémoire externe](../../sql_reference/statements/select.md#select-group-by-in-external-memory). Valeurs possibles: -- Volume maximal de RAM (en octets) pouvant être utilisé par le [GROUP BY](../../query_language/select.md#select-group-by-clause) opération. +- Volume maximal de RAM (en octets) pouvant être utilisé par le [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause) opération. - 0 — `GROUP BY` dans la mémoire externe désactivé. Valeur par défaut: 0. @@ -231,7 +234,7 @@ Que faire lorsque la quantité de données dépasse l'une des limites: ‘throw Limite le nombre de lignes dans la table de hachage utilisée lors de la jonction de tables. -Ce réglage s'applique à [SELECT … JOIN](../../query_language/select.md#select-join) les opérations et les [Rejoindre](../table_engines/join.md) tableau moteur. +Ce réglage s'applique à [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) les opérations et les [Rejoindre](../../engines/table_engines/special/join.md) tableau moteur. Si une requête contient plusieurs jointures, ClickHouse vérifie ce paramètre pour chaque résultat intermédiaire. @@ -248,7 +251,7 @@ Valeur par défaut: 0. Limite la taille en octets de la table de hachage utilisée lors de l'assemblage de tables. -Ce réglage s'applique à [SELECT … JOIN](../../query_language/select.md#select-join) les opérations et les [Rejoindre le moteur de table](../table_engines/join.md). +Ce réglage s'applique à [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) les opérations et les [Rejoindre le moteur de table](../../engines/table_engines/special/join.md). Si la requête contient des jointures, ClickHouse vérifie ce paramètre pour chaque résultat intermédiaire. @@ -277,8 +280,8 @@ Valeur par défaut: `THROW`. **Voir Aussi** -- [Clause de JOINTURE](../../query_language/select.md#select-join) -- [Rejoindre le moteur de table](../table_engines/join.md) +- [Clause de JOINTURE](../../sql_reference/statements/select.md#select-join) +- [Rejoindre le moteur de table](../../engines/table_engines/special/join.md) ## max\_partitions\_per\_insert\_block {#max-partitions-per-insert-block} diff --git a/docs/fr/operations/settings/settings.md b/docs/fr/operations/settings/settings.md index 4fed20c46cd..8de3ea3e7d5 100644 --- a/docs/fr/operations/settings/settings.md +++ b/docs/fr/operations/settings/settings.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 60 +toc_title: "Param\xE8tre" --- # Paramètre {#settings} ## distributed\_product\_mode {#distributed-product-mode} -Modifie le comportement de [distribués sous-requêtes](../../query_language/select.md). +Modifie le comportement de [distribués sous-requêtes](../../sql_reference/statements/select.md). ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. @@ -15,7 +18,7 @@ Restriction: - Uniquement appliqué pour les sous-requêtes IN et JOIN. - Uniquement si la section FROM utilise une table distribuée contenant plus d'un fragment. - Si la sous-requête concerne un distribué tableau contenant plus d'un fragment. -- Pas utilisé pour une table [distant](../../query_language/table_functions/remote.md) fonction. +- Pas utilisé pour une table [distant](../../sql_reference/table_functions/remote.md) fonction. Valeurs possibles: @@ -50,7 +53,7 @@ Si `enable_optimize_predicate_expression = 0` puis le temps d'exécution de la d ## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} -Force une requête à un réplica obsolète si les données mises à jour ne sont pas disponibles. Voir [Réplication](../table_engines/replication.md). +Force une requête à un réplica obsolète si les données mises à jour ne sont pas disponibles. Voir [Réplication](../../engines/table_engines/mergetree_family/replication.md). ClickHouse sélectionne le plus pertinent parmi les répliques obsolètes de la table. @@ -64,7 +67,7 @@ Désactive l'exécution de la requête si l'index ne peut pas être utilisé par Fonctionne avec les tables de la famille MergeTree. -Si `force_index_by_date=1`, Clickhouse vérifie si la requête a une condition de clé de date qui peut être utilisée pour restreindre les plages de données. S'il n'y a pas de condition appropriée, il lève une exception. Cependant, il ne vérifie pas si la condition réduit la quantité de données à lire. Par exemple, la condition `Date != ' 2000-01-01 '` est acceptable même lorsqu'il correspond à toutes les données de la table (c'est-à-dire que l'exécution de la requête nécessite une analyse complète). Pour plus d'informations sur les plages de données dans les tables MergeTree, voir [MergeTree](../table_engines/mergetree.md). +Si `force_index_by_date=1`, Clickhouse vérifie si la requête a une condition de clé de date qui peut être utilisée pour restreindre les plages de données. S'il n'y a pas de condition appropriée, il lève une exception. Cependant, il ne vérifie pas si la condition réduit la quantité de données à lire. Par exemple, la condition `Date != ' 2000-01-01 '` est acceptable même lorsqu'il correspond à toutes les données de la table (c'est-à-dire que l'exécution de la requête nécessite une analyse complète). Pour plus d'informations sur les plages de données dans les tables MergeTree, voir [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## force\_primary\_key {#force-primary-key} @@ -72,7 +75,7 @@ Désactive l'exécution de la requête si l'indexation par la clé primaire n'es Fonctionne avec les tables de la famille MergeTree. -Si `force_primary_key=1`, Clickhouse vérifie si la requête a une condition de clé primaire qui peut être utilisée pour restreindre les plages de données. S'il n'y a pas de condition appropriée, il lève une exception. Cependant, il ne vérifie pas si la condition réduit la quantité de données à lire. Pour plus d'informations sur les plages de données dans les tables MergeTree, voir [MergeTree](../table_engines/mergetree.md). +Si `force_primary_key=1`, Clickhouse vérifie si la requête a une condition de clé primaire qui peut être utilisée pour restreindre les plages de données. S'il n'y a pas de condition appropriée, il lève une exception. Cependant, il ne vérifie pas si la condition réduit la quantité de données à lire. Pour plus d'informations sur les plages de données dans les tables MergeTree, voir [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## format\_schema {#format-schema} @@ -133,7 +136,7 @@ Valeur par défaut: 0. ## max\_http\_get\_redirects {#setting-max_http_get_redirects} -Limite le nombre maximal de sauts de redirection HTTP GET pour [URL](../table_engines/url.md)-tables de moteur. Le paramètre s'applique aux deux types de tables: celles créées par [CREATE TABLE](../../query_language/create/#create-table-query) requête et par la [URL](../../query_language/table_functions/url.md) table de fonction. +Limite le nombre maximal de sauts de redirection HTTP GET pour [URL](../../engines/table_engines/special/url.md)-tables de moteur. Le paramètre s'applique aux deux types de tables: celles créées par [CREATE TABLE](../../query_language/create/#create-table-query) requête et par la [URL](../../sql_reference/table_functions/url.md) table de fonction. Valeurs possibles: @@ -169,7 +172,7 @@ Si les deux `input_format_allow_errors_num` et `input_format_allow_errors_ratio` ## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} -Active ou désactive L'analyseur SQL complet si l'analyseur de flux rapide ne peut pas analyser les données. Ce paramètre est utilisé uniquement pour la [Valeur](../../interfaces/formats.md#data-format-values) format lors de l'insertion des données. Pour plus d'informations sur l'analyse syntaxique, consultez [Syntaxe](../../query_language/syntax.md) section. +Active ou désactive L'analyseur SQL complet si l'analyseur de flux rapide ne peut pas analyser les données. Ce paramètre est utilisé uniquement pour la [Valeur](../../interfaces/formats.md#data-format-values) format lors de l'insertion des données. Pour plus d'informations sur l'analyse syntaxique, consultez [Syntaxe](../../sql_reference/syntax.md) section. Valeurs possibles: @@ -185,7 +188,7 @@ Valeur par défaut: 1. Exemple D'utilisation -Insérez le [DateTime](../../data_types/datetime.md) tapez valeur avec les différents paramètres. +Insérez le [DateTime](../../sql_reference/data_types/datetime.md) tapez valeur avec les différents paramètres. ``` sql SET input_format_values_interpret_expressions = 0; @@ -330,7 +333,7 @@ Valeur par défaut: 1. Permet de choisir un analyseur de la représentation textuelle de la date et de l'heure. -Le réglage ne s'applique pas à [fonctions date et heure](../../query_language/functions/date_time_functions.md). +Le réglage ne s'applique pas à [fonctions date et heure](../../sql_reference/functions/date_time_functions.md). Valeurs possibles: @@ -346,12 +349,12 @@ Valeur par défaut: `'basic'`. Voir aussi: -- [Type de données DateTime.](../../data_types/datetime.md) -- [Fonctions pour travailler avec des dates et des heures.](../../query_language/functions/date_time_functions.md) +- [Type de données DateTime.](../../sql_reference/data_types/datetime.md) +- [Fonctions pour travailler avec des dates et des heures.](../../sql_reference/functions/date_time_functions.md) ## join\_default\_strictness {#settings-join_default_strictness} -Définit la rigueur par défaut pour [JOIN clauses](../../query_language/select.md#select-join). +Définit la rigueur par défaut pour [JOIN clauses](../../sql_reference/statements/select.md#select-join). Valeurs possibles: @@ -367,7 +370,7 @@ Valeur par défaut: `ALL`. Modifie le comportement des opérations de jointure avec `ANY` rigueur. !!! warning "Attention" - Ce paramètre s'applique uniquement pour `JOIN` opérations avec [Rejoindre](../table_engines/join.md) le moteur de tables. + Ce paramètre s'applique uniquement pour `JOIN` opérations avec [Rejoindre](../../engines/table_engines/special/join.md) le moteur de tables. Valeurs possibles: @@ -378,18 +381,18 @@ Valeur par défaut: 0. Voir aussi: -- [Clause de JOINTURE](../../query_language/select.md#select-join) -- [Rejoindre le moteur de table](../table_engines/join.md) +- [Clause de JOINTURE](../../sql_reference/statements/select.md#select-join) +- [Rejoindre le moteur de table](../../engines/table_engines/special/join.md) - [join\_default\_strictness](#settings-join_default_strictness) ## join\_use\_nulls {#join_use_nulls} -Définit le type de [JOIN](../../query_language/select.md) comportement. Lors de la fusion de tables, des cellules vides peuvent apparaître. ClickHouse les remplit différemment en fonction de ce paramètre. +Définit le type de [JOIN](../../sql_reference/statements/select.md) comportement. Lors de la fusion de tables, des cellules vides peuvent apparaître. ClickHouse les remplit différemment en fonction de ce paramètre. Valeurs possibles: - 0 — The empty cells are filled with the default value of the corresponding field type. -- 1 — `JOIN` se comporte de la même manière que dans SQL standard. Le type du champ correspondant est converti en [Nullable](../../data_types/nullable.md#data_type-nullable) et les cellules vides sont remplis avec [NULL](../../query_language/syntax.md). +- 1 — `JOIN` se comporte de la même manière que dans SQL standard. Le type du champ correspondant est converti en [Nullable](../../sql_reference/data_types/nullable.md#data_type-nullable) et les cellules vides sont remplis avec [NULL](../../sql_reference/syntax.md). Valeur par défaut: 0. @@ -409,7 +412,7 @@ Par défaut: 1 000 000. Cela ne fonctionne que lors de la lecture des moteurs Me ## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} -Si le nombre de lignes à lire à partir d'un fichier d'un [MergeTree](../table_engines/mergetree.md) table dépasse `merge_tree_min_rows_for_concurrent_read` ensuite, ClickHouse essaie d'effectuer une lecture simultanée de ce fichier sur plusieurs threads. +Si le nombre de lignes à lire à partir d'un fichier d'un [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) table dépasse `merge_tree_min_rows_for_concurrent_read` ensuite, ClickHouse essaie d'effectuer une lecture simultanée de ce fichier sur plusieurs threads. Valeurs possibles: @@ -419,7 +422,7 @@ Valeur par défaut: 163840. ## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} -Si le nombre d'octets à lire à partir d'un fichier d'un [MergeTree](../table_engines/mergetree.md)- table de moteur dépasse `merge_tree_min_bytes_for_concurrent_read` puis ClickHouse essaie de lire simultanément à partir de ce fichier dans plusieurs threads. +Si le nombre d'octets à lire à partir d'un fichier d'un [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)- table de moteur dépasse `merge_tree_min_bytes_for_concurrent_read` puis ClickHouse essaie de lire simultanément à partir de ce fichier dans plusieurs threads. Valeur Possible: @@ -461,7 +464,7 @@ Valeur par défaut: 8. Si ClickHouse devrait lire plus de `merge_tree_max_rows_to_use_cache` lignes dans une requête, il n'utilise pas le cache des blocs non compressés. -Le cache des blocs non compressés stocke les données extraites pour les requêtes. ClickHouse utilise ce cache pour accélérer les réponses aux petites requêtes répétées. Ce paramètre protège le cache contre le saccage par les requêtes qui lisent une grande quantité de données. Le [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) le paramètre serveur définit la taille du cache des blocs non compressés. +Le cache des blocs non compressés stocke les données extraites pour les requêtes. ClickHouse utilise ce cache pour accélérer les réponses aux petites requêtes répétées. Ce paramètre protège le cache contre le saccage par les requêtes qui lisent une grande quantité de données. Le [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) le paramètre serveur définit la taille du cache des blocs non compressés. Valeurs possibles: @@ -473,7 +476,7 @@ Default value: 128 ✕ 8192. Si ClickHouse devrait lire plus de `merge_tree_max_bytes_to_use_cache` octets dans une requête, il n'utilise pas le cache de non compressé blocs. -Le cache des blocs non compressés stocke les données extraites pour les requêtes. ClickHouse utilise ce cache pour accélérer les réponses aux petites requêtes répétées. Ce paramètre protège le cache contre le saccage par les requêtes qui lisent une grande quantité de données. Le [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) le paramètre serveur définit la taille du cache des blocs non compressés. +Le cache des blocs non compressés stocke les données extraites pour les requêtes. ClickHouse utilise ce cache pour accélérer les réponses aux petites requêtes répétées. Ce paramètre protège le cache contre le saccage par les requêtes qui lisent une grande quantité de données. Le [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) le paramètre serveur définit la taille du cache des blocs non compressés. Valeur Possible: @@ -498,7 +501,7 @@ Valeur par défaut: 0. Configuration de la journalisation des requêtes. -Les requêtes envoyées à ClickHouse avec cette configuration sont enregistrées selon les règles du [query\_log](../server_settings/settings.md#server_settings-query-log) paramètre de configuration du serveur. +Les requêtes envoyées à ClickHouse avec cette configuration sont enregistrées selon les règles du [query\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) paramètre de configuration du serveur. Exemple: @@ -510,7 +513,7 @@ log_queries=1 Configuration de la journalisation des threads de requête. -Les threads de requêtes exécutés par ClickHouse avec cette configuration sont journalisés selon les règles du [query\_thread\_log](../server_settings/settings.md#server_settings-query-thread-log) paramètre de configuration du serveur. +Les threads de requêtes exécutés par ClickHouse avec cette configuration sont journalisés selon les règles du [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) paramètre de configuration du serveur. Exemple: @@ -532,7 +535,7 @@ La valeur par défaut est légèrement supérieure à `max_block_size`. La raiso ## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} -Désactive les répliques en retard pour les requêtes distribuées. Voir [Réplication](../../operations/table_engines/replication.md). +Désactive les répliques en retard pour les requêtes distribuées. Voir [Réplication](../../engines/table_engines/mergetree_family/replication.md). Définit le temps en secondes. Si une réplique accuse plus de retard que la valeur définie, cette réplique n'est pas utilisée. @@ -577,7 +580,7 @@ Ne confondez pas les blocs pour la compression (un morceau de mémoire constitu ## min\_compress\_block\_size {#min-compress-block-size} -Pour [MergeTree](../table_engines/mergetree.md)" table. Afin de réduire la latence lors du traitement des requêtes, un bloc est compressé lors de l'écriture de la marque suivante si sa taille est au moins ‘min\_compress\_block\_size’. Par défaut, 65 536. +Pour [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)" table. Afin de réduire la latence lors du traitement des requêtes, un bloc est compressé lors de l'écriture de la marque suivante si sa taille est au moins ‘min\_compress\_block\_size’. Par défaut, 65 536. La taille réelle du bloc, si les données non compressées sont inférieures à ‘max\_compress\_block\_size’ pas moins de cette valeur et pas moins que le volume de données pour une marque. @@ -655,7 +658,7 @@ Pour plus d'informations, consultez la section “Extreme values”. ## use\_uncompressed\_cache {#setting-use_uncompressed_cache} Indique s'il faut utiliser un cache de blocs non compressés. Accepte 0 ou 1. Par défaut, 0 (désactivé). -L'utilisation du cache non compressé (uniquement pour les tables de la famille MergeTree) peut réduire considérablement la latence et augmenter le débit lorsque vous travaillez avec un grand nombre de requêtes courtes. Activez ce paramètre pour les utilisateurs qui envoient des requêtes courtes fréquentes. Faites également attention à la [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. +L'utilisation du cache non compressé (uniquement pour les tables de la famille MergeTree) peut réduire considérablement la latence et augmenter le débit lorsque vous travaillez avec un grand nombre de requêtes courtes. Activez ce paramètre pour les utilisateurs qui envoient des requêtes courtes fréquentes. Faites également attention à la [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. Pour les requêtes qui lisent au moins un volume de données assez important (un million de lignes ou plus), le cache non compressé est désactivé automatiquement pour économiser de l'espace pour les requêtes vraiment petites. Cela signifie que vous pouvez garder la ‘use\_uncompressed\_cache’ toujours la valeur 1. @@ -866,7 +869,7 @@ Valeurs possibles: Valeur par défaut: 1. -Par défaut, les blocs insérés dans les tables répliquées `INSERT` déclaration sont dédupliquées (voir \[Réplication de Données\] (../ table\_engines / replication. md). +Par défaut, les blocs insérés dans les tables répliquées `INSERT` déclaration sont dédupliquées (voir \[Réplication de Données\] (../moteurs/table\_engines/mergetree\_family/réplication.md). ## déduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} @@ -932,15 +935,15 @@ Valeur par défaut: 0. ## count\_distinct\_implementation {#settings-count_distinct_implementation} -Spécifie de l' `uniq*` les fonctions doivent être utilisées pour [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count) construction. +Spécifie de l' `uniq*` les fonctions doivent être utilisées pour [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) construction. Valeurs possibles: -- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) -- [uniqcombiné](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) -- [uniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqcombiné](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) Valeur par défaut: `uniqExact`. @@ -1005,7 +1008,7 @@ Valeur par défaut: 0. ## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} -Active ou désactive le lancement d'une exception si [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) la requête n'a pas effectué de fusion. +Active ou désactive le lancement d'une exception si [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) la requête n'a pas effectué de fusion. Par défaut, `OPTIMIZE` retourne avec succès même s'il n'a rien fait. Ce paramètre vous permet de différencier ces situations et d'obtenir la raison dans un message d'exception. @@ -1025,7 +1028,7 @@ Contrôle la vitesse à laquelle les erreurs dans les tables distribuées sont m Voir aussi: -- [Tableau moteur Distribués](../../operations/table_engines/distributed.md) +- [Tableau moteur Distribués](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) ## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} @@ -1037,12 +1040,12 @@ Le nombre d'erreurs de chaque réplique est plafonné à cette valeur, empêchan Voir aussi: -- [Tableau moteur Distribués](../../operations/table_engines/distributed.md) +- [Tableau moteur Distribués](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) ## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} -Intervalle de Base pour le [Distribué](../table_engines/distributed.md) tableau moteur à envoyer des données. L'intervalle réel augmente de façon exponentielle en cas d'erreurs. +Intervalle de Base pour le [Distribué](../../engines/table_engines/special/distributed.md) tableau moteur à envoyer des données. L'intervalle réel augmente de façon exponentielle en cas d'erreurs. Valeurs possibles: @@ -1052,7 +1055,7 @@ Valeur par défaut: 100 millisecondes. ## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} -Intervalle maximal pour le [Distribué](../table_engines/distributed.md) tableau moteur à envoyer des données. Limite la croissance exponentielle de l'intervalle défini dans [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) paramètre. +Intervalle maximal pour le [Distribué](../../engines/table_engines/special/distributed.md) tableau moteur à envoyer des données. Limite la croissance exponentielle de l'intervalle défini dans [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) paramètre. Valeurs possibles: @@ -1064,7 +1067,7 @@ Valeur par défaut: 30000 millisecondes (30 secondes). Active / désactive l'envoi des données insérées par lots. -Lorsque l'envoi par lots est activé, le [Distribué](../table_engines/distributed.md) tableau moteur essaie d'envoyer plusieurs fichiers de données insérées dans une seule opération au lieu de les envoyer séparément. L'envoi par lots améliore les performances du cluster en utilisant mieux les ressources du serveur et du réseau. +Lorsque l'envoi par lots est activé, le [Distribué](../../engines/table_engines/special/distributed.md) tableau moteur essaie d'envoyer plusieurs fichiers de données insérées dans une seule opération au lieu de les envoyer séparément. L'envoi par lots améliore les performances du cluster en utilisant mieux les ressources du serveur et du réseau. Valeurs possibles: @@ -1090,7 +1093,7 @@ Valeur par défaut: 0. ## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} -Définit la période pour une horloge réelle de la [requête profiler](../../operations/performance/sampling_query_profiler.md). La vraie minuterie d'horloge compte le temps d'horloge murale. +Définit la période pour une horloge réelle de la [requête profiler](../../operations/optimizing_performance/sampling_query_profiler.md). La vraie minuterie d'horloge compte le temps d'horloge murale. Valeurs possibles: @@ -1103,17 +1106,17 @@ Valeurs possibles: - 0 pour éteindre la minuterie. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). Valeur par défaut: 1000000000 nanosecondes (une fois par seconde). Voir aussi: -- Système de table [trace\_log](../system_tables.md#system_tables-trace_log) +- Système de table [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} -Définit la période pour une minuterie D'horloge CPU du [requête profiler](../../operations/performance/sampling_query_profiler.md). Cette minuterie ne compte que le temps CPU. +Définit la période pour une minuterie D'horloge CPU du [requête profiler](../../operations/optimizing_performance/sampling_query_profiler.md). Cette minuterie ne compte que le temps CPU. Valeurs possibles: @@ -1126,17 +1129,17 @@ Valeurs possibles: - 0 pour éteindre la minuterie. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). Valeur par défaut: 1000000000 nanosecondes. Voir aussi: -- Système de table [trace\_log](../system_tables.md#system_tables-trace_log) +- Système de table [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## allow\_introspection\_functions {#settings-allow_introspection_functions} -Active des désactive [obscures fonctions](../../query_language/functions/introspection.md) pour le profilage de requête. +Active des désactive [obscures fonctions](../../sql_reference/functions/introspection.md) pour le profilage de requête. Valeurs possibles: @@ -1147,8 +1150,8 @@ Valeur par défaut: 0. **Voir Aussi** -- [Échantillonnage Du Profileur De Requête](../performance/sampling_query_profiler.md) -- Système de table [trace\_log](../system_tables.md#system_tables-trace_log) +- [Échantillonnage Du Profileur De Requête](../optimizing_performance/sampling_query_profiler.md) +- Système de table [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## input\_format\_parallel\_parsing {#input-format-parallel-parsing} diff --git a/docs/fr/operations/settings/settings_profiles.md b/docs/fr/operations/settings/settings_profiles.md index 23b36d698cb..8a25c440d31 100644 --- a/docs/fr/operations/settings/settings_profiles.md +++ b/docs/fr/operations/settings/settings_profiles.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 61 +toc_title: "Les Param\xE8tres Des Profils" --- # Les Paramètres Des Profils {#settings-profiles} diff --git a/docs/fr/operations/settings/settings_users.md b/docs/fr/operations/settings/settings_users.md index 13134f1a2fd..2565f820163 100644 --- a/docs/fr/operations/settings/settings_users.md +++ b/docs/fr/operations/settings/settings_users.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 63 +toc_title: "Les Param\xE8tres De L'Utilisateur" --- # Les Paramètres De L'Utilisateur {#user-settings} @@ -140,6 +143,6 @@ La configuration suivante force cet utilisateur `user1` ne peut voir les lignes ``` -Le `filter` peut être n'importe quelle expression résultant en un [UInt8](../../data_types/int_uint.md)-le type de la valeur. Il contient généralement des comparaisons et des opérateurs logiques. Les lignes de `database_name.table1` où filtrer les résultats à 0 ne sont pas retournés pour cet utilisateur. Le filtrage est incompatible avec `PREWHERE` opérations et désactive `WHERE→PREWHERE` optimisation. +Le `filter` peut être n'importe quelle expression résultant en un [UInt8](../../sql_reference/data_types/int_uint.md)-le type de la valeur. Il contient généralement des comparaisons et des opérateurs logiques. Les lignes de `database_name.table1` où filtrer les résultats à 0 ne sont pas retournés pour cet utilisateur. Le filtrage est incompatible avec `PREWHERE` opérations et désactive `WHERE→PREWHERE` optimisation. [Article Original](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/fr/operations/system_tables.md b/docs/fr/operations/system_tables.md index 85fbf31b3d1..99fe7e2a00e 100644 --- a/docs/fr/operations/system_tables.md +++ b/docs/fr/operations/system_tables.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 52 +toc_title: "Les Tables Syst\xE8me" --- -# Les tables système {#system-tables} +# Les Tables Système {#system-tables} Les tables système sont utilisées pour implémenter une partie des fonctionnalités du système et pour fournir un accès à des informations sur le fonctionnement du système. Vous ne pouvez pas supprimer une table système (mais vous pouvez effectuer un détachement). @@ -16,8 +19,8 @@ Contient des mesures qui sont calculées périodiquement en arrière-plan. Par e Colonne: -- `metric` ([Chaîne](../data_types/string.md)) — Metric name. -- `value` ([Float64](../data_types/float.md)) — Metric value. +- `metric` ([Chaîne](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. **Exemple** @@ -68,7 +71,7 @@ Veuillez noter que `errors_count` est mise à jour une fois par requête à la g **Voir aussi** -- [Tableau moteur Distribués](table_engines/distributed.md) +- [Tableau moteur Distribués](../engines/table_engines/special/distributed.md) - [paramètre distributed\_replica\_error\_cap](settings/settings.md#settings-distributed_replica_error_cap) - [paramètre distributed\_replica\_error\_half\_life](settings/settings.md#settings-distributed_replica_error_half_life) @@ -76,7 +79,7 @@ Veuillez noter que `errors_count` est mise à jour une fois par requête à la g Contient des informations sur les colonnes de toutes les tables. -Vous pouvez utiliser ce tableau pour obtenir des informations similaires à l' [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table) requête, mais pour plusieurs tables à la fois. +Vous pouvez utiliser ce tableau pour obtenir des informations similaires à l' [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) requête, mais pour plusieurs tables à la fois. Le `system.columns` le tableau contient les colonnes suivantes (la colonne type est indiqué entre parenthèses): @@ -144,7 +147,7 @@ Cette table système est utilisée pour implémenter `SHOW DATABASES` requête. ## système.detached\_parts {#system_tables-detached_parts} -Contient des informations sur les pièces détachées de [MergeTree](table_engines/mergetree.md) table. Le `reason` colonne spécifie pourquoi la pièce a été détachée. Pour les pièces détachées par l'utilisateur, la raison est vide. De telles pièces peuvent être attachées avec [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) commande. Pour la description des autres colonnes, voir [système.partie](#system_tables-parts). Si le nom de pièce n'est pas valide, les valeurs de certaines colonnes peuvent être `NULL`. Ces pièces peuvent être supprimés avec [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). +Contient des informations sur les pièces détachées de [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) table. Le `reason` colonne spécifie pourquoi la pièce a été détachée. Pour les pièces détachées par l'utilisateur, la raison est vide. De telles pièces peuvent être attachées avec [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) commande. Pour la description des autres colonnes, voir [système.partie](#system_tables-parts). Si le nom de pièce n'est pas valide, les valeurs de certaines colonnes peuvent être `NULL`. Ces pièces peuvent être supprimés avec [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). ## système.dictionnaire {#system-dictionaries} @@ -174,9 +177,9 @@ Contient des informations sur le nombre d'événements survenus dans le système Colonne: -- `event` ([Chaîne](../data_types/string.md)) — Event name. -- `value` ([UInt64](../data_types/int_uint.md)) — Number of events occurred. -- `description` ([Chaîne](../data_types/string.md)) — Event description. +- `event` ([Chaîne](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([Chaîne](../sql_reference/data_types/string.md)) — Event description. **Exemple** @@ -212,7 +215,7 @@ Colonne: ## système.graphite\_retentions {#system-graphite-retentions} -Contient des informations sur les paramètres [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) qui sont utilisés dans les tableaux avec [\* GraphiteMergeTree](table_engines/graphitemergetree.md) moteur. +Contient des informations sur les paramètres [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) qui sont utilisés dans les tableaux avec [\* GraphiteMergeTree](../engines/table_engines/mergetree_family/graphitemergetree.md) moteur. Colonne: @@ -252,11 +255,11 @@ Contient des mesures qui peuvent être calculées instantanément, ou ont une va Colonne: -- `metric` ([Chaîne](../data_types/string.md)) — Metric name. -- `value` ([Int64](../data_types/int_uint.md)) — Metric value. -- `description` ([Chaîne](../data_types/string.md)) — Metric description. +- `metric` ([Chaîne](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([Chaîne](../sql_reference/data_types/string.md)) — Metric description. -La liste des mesures que vous pouvez trouver dans le [SGBD / src / Common / CurrentMetrics.rpc](https://github.com/ClickHouse/ClickHouse/blob/master/src/src/Common/CurrentMetrics.cpp) fichier source de ClickHouse. +La liste des mesures que vous pouvez trouver dans le [SGBD / commun / CurrentMetrics.rpc](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) fichier source de ClickHouse. **Exemple** @@ -361,13 +364,13 @@ Ceci est similaire à la table double trouvée dans d'autres SGBD. ## système.partie {#system_tables-parts} -Contient des informations sur les parties de [MergeTree](table_engines/mergetree.md) table. +Contient des informations sur les parties de [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) table. Chaque ligne décrit une partie des données. Colonne: -- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../query_language/alter.md#query_language_queries_alter) requête. +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) requête. Format: @@ -418,7 +421,7 @@ Colonne: - `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../query_language/alter.md#alter_freeze-partition) +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) - `database` (`String`) – Name of the database. @@ -430,11 +433,11 @@ Colonne: - `disk` (`String`) – Name of a disk that stores the data part. -- `hash_of_all_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) de fichiers compressés. +- `hash_of_all_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) de fichiers compressés. -- `hash_of_uncompressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) de fichiers non compressés (fichiers avec des marques, fichier d'index, etc.). +- `hash_of_uncompressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) de fichiers non compressés (fichiers avec des marques, fichier d'index, etc.). -- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) des données dans les fichiers compressés comme s'ils étaient non compressé. +- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) des données dans les fichiers compressés comme s'ils étaient non compressé. - `bytes` (`UInt64`) – Alias for `bytes_on_disk`. @@ -442,9 +445,9 @@ Colonne: ## système.part\_log {#system_tables-part-log} -Le `system.part_log` la table est créée uniquement si [part\_log](server_settings/settings.md#server_settings-part-log) serveur paramètre est spécifié. +Le `system.part_log` la table est créée uniquement si [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) serveur paramètre est spécifié. -Ce tableau contient des informations sur les événements survenus avec [les parties de données](table_engines/custom_partitioning_key.md) dans le [MergeTree](table_engines/mergetree.md) table de famille, telles que l'ajout ou la fusion de données. +Ce tableau contient des informations sur les événements survenus avec [les parties de données](../engines/table_engines/mergetree_family/custom_partitioning_key.md) dans le [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) table de famille, telles que l'ajout ou la fusion de données. Le `system.part_log` le tableau contient les colonnes suivantes: @@ -452,7 +455,7 @@ Le `system.part_log` le tableau contient les colonnes suivantes: - `NEW_PART` — Inserting of a new data part. - `MERGE_PARTS` — Merging of data parts. - `DOWNLOAD_PART` — Downloading a data part. - - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). - `MUTATE_PART` — Mutating of a data part. - `MOVE_PART` — Moving the data part from the one disk to another one. - `event_date` (Date) — Event date. @@ -523,7 +526,7 @@ Contient des informations sur l'exécution de requêtes. Pour chaque requête, v !!! note "Note" Le tableau ne contient pas les données d'entrée pour `INSERT` requête. -Clickhouse crée cette table uniquement si [query\_log](server_settings/settings.md#server_settings-query-log) serveur paramètre est spécifié. Ce paramètre définit les règles de journalisation, tels que l'intervalle d'enregistrement ou le nom de la table, la requête sera connecté. +Clickhouse crée cette table uniquement si [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) serveur paramètre est spécifié. Ce paramètre définit les règles de journalisation, tels que l'intervalle d'enregistrement ou le nom de la table, la requête sera connecté. Pour activer la journalisation des requêtes, définissez [log\_queries](settings/settings.md#settings-log-queries) paramètre 1. Pour plus de détails, voir le [Paramètre](settings/settings.md) section. @@ -593,20 +596,20 @@ Chaque requête crée une ou deux lignes dans le `query_log` le tableau, en fonc 2. Si une erreur s'est produite pendant le traitement de la requête, deux événements avec les types 1 et 4 sont créés. 3. Si une erreur s'est produite avant le lancement de la requête, un seul événement de type 3 est créé. -Par défaut, les journaux sont ajoutés à la table à des intervalles de 7,5 secondes. Vous pouvez définir cet intervalle dans la [query\_log](server_settings/settings.md#server_settings-query-log) configuration du serveur (voir `flush_interval_milliseconds` paramètre). Pour vider les journaux de force du tampon mémoire dans la table, utilisez le `SYSTEM FLUSH LOGS` requête. +Par défaut, les journaux sont ajoutés à la table à des intervalles de 7,5 secondes. Vous pouvez définir cet intervalle dans la [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) configuration du serveur (voir `flush_interval_milliseconds` paramètre). Pour vider les journaux de force du tampon mémoire dans la table, utilisez le `SYSTEM FLUSH LOGS` requête. Lorsque la table est supprimée manuellement, il sera automatiquement créé à la volée. Notez que tous les précédents journaux seront supprimés. !!! note "Note" La période de stockage des journaux est illimitée. Les journaux ne sont pas automatiquement supprimés de la table. Vous devez organiser vous-même la suppression des journaux obsolètes. -Vous pouvez spécifier une clé de partitionnement arbitraire pour `system.query_log` la table dans le [query\_log](server_settings/settings.md#server_settings-query-log) configuration du serveur (voir `partition_by` paramètre). +Vous pouvez spécifier une clé de partitionnement arbitraire pour `system.query_log` la table dans le [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) configuration du serveur (voir `partition_by` paramètre). ## système.query\_thread\_log {#system_tables-query-thread-log} La table contient des informations sur chaque thread d'exécution de requête. -Clickhouse crée cette table uniquement si [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) serveur paramètre est spécifié. Ce paramètre définit les règles de journalisation, tels que l'intervalle d'enregistrement ou le nom de la table, la requête sera connecté. +Clickhouse crée cette table uniquement si [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) serveur paramètre est spécifié. Ce paramètre définit les règles de journalisation, tels que l'intervalle d'enregistrement ou le nom de la table, la requête sera connecté. Pour activer la journalisation des requêtes, définissez [log\_query\_threads](settings/settings.md#settings-log-query-threads) paramètre 1. Pour plus de détails, voir le [Paramètre](settings/settings.md) section. @@ -658,43 +661,43 @@ Colonne: - `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [système.événement](#system_tables-events) - `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` colonne. -Par défaut, les journaux sont ajoutés à la table à des intervalles de 7,5 secondes. Vous pouvez définir cet intervalle dans la [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) configuration du serveur (voir `flush_interval_milliseconds` paramètre). Pour vider les journaux de force du tampon mémoire dans la table, utilisez le `SYSTEM FLUSH LOGS` requête. +Par défaut, les journaux sont ajoutés à la table à des intervalles de 7,5 secondes. Vous pouvez définir cet intervalle dans la [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) configuration du serveur (voir `flush_interval_milliseconds` paramètre). Pour vider les journaux de force du tampon mémoire dans la table, utilisez le `SYSTEM FLUSH LOGS` requête. Lorsque la table est supprimée manuellement, il sera automatiquement créé à la volée. Notez que tous les précédents journaux seront supprimés. !!! note "Note" La période de stockage des journaux est illimitée. Les journaux ne sont pas automatiquement supprimés de la table. Vous devez organiser vous-même la suppression des journaux obsolètes. -Vous pouvez spécifier une clé de partitionnement arbitraire pour `system.query_thread_log` la table dans le [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) configuration du serveur (voir `partition_by` paramètre). +Vous pouvez spécifier une clé de partitionnement arbitraire pour `system.query_thread_log` la table dans le [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) configuration du serveur (voir `partition_by` paramètre). ## système.trace\_log {#system_tables-trace_log} Contient des traces de pile collectées par le profileur de requête d'échantillonnage. -Clickhouse crée cette table lorsque le [trace\_log](server_settings/settings.md#server_settings-trace_log) la section de configuration du serveur est définie. Aussi l' [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) et [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) paramètres doivent être définis. +Clickhouse crée cette table lorsque le [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) la section de configuration du serveur est définie. Aussi l' [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) et [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) paramètres doivent être définis. Pour analyser les journaux, utilisez `addressToLine`, `addressToSymbol` et `demangle` fonctions d'introspection. Colonne: -- `event_date`([Date](../data_types/date.md)) — Date of sampling moment. +- `event_date`([Date](../sql_reference/data_types/date.md)) — Date of sampling moment. -- `event_time`([DateTime](../data_types/datetime.md)) — Timestamp of sampling moment. +- `event_time`([DateTime](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. -- `revision`([UInt32](../data_types/int_uint.md)) — ClickHouse server build revision. +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. Lors de la connexion au serveur par `clickhouse-client`, vous voyez la chaîne similaire à `Connected to ClickHouse server version 19.18.1 revision 54429.`. Ce champ contient le `revision` mais pas le `version` d'un serveur. -- `timer_type`([Enum8](../data_types/enum.md)) — Timer type: +- `timer_type`([Enum8](../sql_reference/data_types/enum.md)) — Timer type: - `Real` représente l'horloge murale. - `CPU` représente le temps CPU. -- `thread_number`([UInt32](../data_types/int_uint.md)) — Thread identifier. +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. -- `query_id`([Chaîne](../data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) système de table. +- `query_id`([Chaîne](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) système de table. -- `trace`([Tableau (UInt64)](../data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. +- `trace`([Tableau (UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. **Exemple** @@ -845,25 +848,42 @@ Colonne: - `name` (String) — Setting name. - `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). - `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. +- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [contraintes](settings/constraints_on_settings.md#constraints-on-settings)). +- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [contraintes](settings/constraints_on_settings.md#constraints-on-settings)). +- `readonly` (UInt8) — Can user change this setting (for more info, look into [contraintes](settings/constraints_on_settings.md#constraints-on-settings)). Exemple: ``` sql -SELECT * +SELECT name, value FROM system.settings WHERE changed ``` ``` text -┌─name───────────────────┬─value───────┬─changed─┐ -│ max_threads │ 8 │ 1 │ -│ use_uncompressed_cache │ 0 │ 1 │ -│ load_balancing │ random │ 1 │ -│ max_memory_usage │ 10000000000 │ 1 │ -└────────────────────────┴─────────────┴─────────┘ +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ ``` +## système.merge\_tree\_settings {#system-merge_tree_settings} + +Contient des informations sur les paramètres pour `MergeTree` table. + +Colonne: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + ## système.tableau\_moteurs {#system-table-engines} Contient une description des moteurs de table pris en charge par le serveur et leurs informations de support de fonctionnalité. @@ -872,10 +892,10 @@ Ce tableau contient les colonnes suivantes (le type de colonne est indiqué entr - `name` (String) — The name of table engine. - `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clause. -- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [sauter les indices](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [sauter les indices](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). - `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` et `SAMPLE_BY`. -- `supports_replication` (UInt8) — Flag that indicates if table engine supports [réplication des données](table_engines/replication/). +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [réplication des données](../engines/table_engines/mergetree_family/replication.md). - `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. Exemple: @@ -896,9 +916,9 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') **Voir aussi** -- Famille MergeTree [les clauses de requête](table_engines/mergetree.md#mergetree-query-clauses) -- Kafka [paramètre](table_engines/kafka.md#table_engine-kafka-creating-a-table) -- Rejoindre [paramètre](table_engines/join.md#join-limitations-and-settings) +- Famille MergeTree [les clauses de requête](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- Kafka [paramètre](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- Rejoindre [paramètre](../engines/table_engines/special/join.md#join-limitations-and-settings) ## système.table {#system-tables} @@ -907,21 +927,47 @@ Contient les métadonnées de chaque table que le serveur connaît. Les tableaux Ce tableau contient les colonnes suivantes (le type de colonne est indiqué entre parenthèses): - `database` (String) — The name of the database the table is in. + - `name` (String) — Table name. + - `engine` (String) — Table engine name (without parameters). + - `is_temporary` (UInt8) - indicateur qui indique si la table est temporaire. + - `data_path` (Chaîne) - chemin d'accès aux données de la table dans le système de fichiers. + - `metadata_path` (Chaîne) - chemin d'accès aux métadonnées de la table dans le système de fichiers. + - `metadata_modification_time` (DateTime) - Heure de la dernière modification des métadonnées de la table. + - `dependencies_database` (Array (String)) - dépendances de base de données. -- `dependencies_table` (Array (String)) - dépendances de Table ([MaterializedView](table_engines/materializedview.md) tables basées sur le tableau actuel). + +- `dependencies_table` (Array (String)) - dépendances de Table ([MaterializedView](../engines/table_engines/special/materializedview.md) tables basées sur le tableau actuel). + - `create_table_query` (Chaîne) - la requête qui a été utilisée pour créer la table. + - `engine_full` (Chaîne) - paramètres du moteur de table. + - `partition_key` (String) - l'expression de clé de partition spécifiée dans le tableau. + - `sorting_key` (String) - l'expression de clé de tri spécifiée dans la table. + - `primary_key` (String) - l'expression de clé primaire spécifiée dans la table. + - `sampling_key` (String) - l'expression de clé d'échantillonnage spécifiée dans la table. +- `storage_policy` (String) - La politique de stockage: + + - [MergeTree](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [Distribué](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable (UInt64)) - nombre Total de lignes, s'il est possible de déterminer rapidement le nombre exact de lignes dans la table, sinon `Null` (y compris underying `Buffer` table). + +- `total_bytes` (Nullable (UInt64)) - nombre Total d'octets, s'il est possible de déterminer rapidement le nombre exact d'octets pour la table sur le stockage, sinon `Null` (**ne pas** comprend tout de stockage sous-jacent). + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - Si la table stocke des données en mémoire, renvoie un nombre approximatif d'octets utilisés en mémoire. + Le `system.tables` le tableau est utilisé dans `SHOW TABLES` implémentation de requête. ## système.zookeeper {#system-zookeeper} @@ -997,7 +1043,7 @@ path: /clickhouse/tables/01-08/visits/replicas ## système.mutation {#system_tables-mutations} -Le tableau contient des informations sur [mutation](../query_language/alter.md#alter-mutations) des tables MergeTree et leur progression. Chaque commande de mutation est représentée par une seule ligne. Le tableau comporte les colonnes suivantes: +Le tableau contient des informations sur [mutation](../sql_reference/statements/alter.md#alter-mutations) des tables MergeTree et leur progression. Chaque commande de mutation est représentée par une seule ligne. Le tableau comporte les colonnes suivantes: **base de données**, **table** - Le nom de la base de données et de la table à laquelle la mutation a été appliquée. @@ -1023,28 +1069,28 @@ S'il y avait des problèmes avec la mutation de certaines parties, les colonnes ## système.disque {#system_tables-disks} -Contient des informations sur les disques définis dans [configuration du serveur](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Contient des informations sur les disques définis dans [configuration du serveur](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Colonne: -- `name` ([Chaîne](../data_types/string.md)) — Name of a disk in the server configuration. -- `path` ([Chaîne](../data_types/string.md)) — Path to the mount point in the file system. -- `free_space` ([UInt64](../data_types/int_uint.md)) — Free space on disk in bytes. -- `total_space` ([UInt64](../data_types/int_uint.md)) — Disk volume in bytes. -- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` paramètre de configuration du disque. +- `name` ([Chaîne](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([Chaîne](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` paramètre de configuration du disque. ## système.storage\_policies {#system_tables-storage_policies} -Contient des informations sur les stratégies de stockage et les volumes définis [configuration du serveur](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Contient des informations sur les stratégies de stockage et les volumes définis [configuration du serveur](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Colonne: -- `policy_name` ([Chaîne](../data_types/string.md)) — Name of the storage policy. -- `volume_name` ([Chaîne](../data_types/string.md)) — Volume name defined in the storage policy. -- `volume_priority` ([UInt64](../data_types/int_uint.md)) — Volume order number in the configuration. -- `disks` ([Tableau(String)](../data_types/array.md)) — Disk names, defined in the storage policy. -- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). -- `move_factor` ([Float64](../data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. +- `policy_name` ([Chaîne](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([Chaîne](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([Tableau(String)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. Si la stratégie de stockage contient plus d'un volume, les informations pour chaque volume sont stockées dans la ligne individuelle de la table. diff --git a/docs/fr/operations/table_engines/materializedview.md b/docs/fr/operations/table_engines/materializedview.md deleted file mode 100644 index 599b7274028..00000000000 --- a/docs/fr/operations/table_engines/materializedview.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -machine_translated: true ---- - -# MaterializedView {#materializedview} - -Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../query_language/create.md)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur. - -[Article Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fr/operations/tips.md b/docs/fr/operations/tips.md index 55dbf8a065b..5122f21c0c1 100644 --- a/docs/fr/operations/tips.md +++ b/docs/fr/operations/tips.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 58 +toc_title: Recommandations D'Utilisation --- # Recommandations D'Utilisation {#usage-recommendations} @@ -245,4 +248,4 @@ script end script ``` -[Article Original](https://clickhouse.tech/docs/en/operations/tips/) +{## [Article Original](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/fr/operations/troubleshooting.md b/docs/fr/operations/troubleshooting.md index d8214cdcddc..e334786194f 100644 --- a/docs/fr/operations/troubleshooting.md +++ b/docs/fr/operations/troubleshooting.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 46 +toc_title: "D\xE9pannage" --- # Dépannage {#troubleshooting} @@ -11,12 +14,12 @@ machine_translated: true ## Installation {#troubleshooting-installation-errors} -### Vous ne pouvez pas obtenir de paquets deb à partir du référentiel ClickHouse avec apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} +### Vous ne pouvez pas obtenir de paquets deb à partir du référentiel Clickhouse avec Apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} - Vérifiez les paramètres du pare-feu. - Si vous ne pouvez pas accéder au référentiel pour quelque raison que ce soit, téléchargez les packages comme décrit dans [Prise en main](../getting_started/index.md) article et les installer manuellement en utilisant le `sudo dpkg -i ` commande. Vous aurez aussi besoin d' `tzdata` paquet. -## Connexion au Serveur {#troubleshooting-accepts-no-connections} +## Connexion au serveur {#troubleshooting-accepts-no-connections} Problèmes possibles: @@ -102,7 +105,7 @@ Vérifier: - Paramètres du point de terminaison. - Vérifier [listen\_host](server_settings/settings.md#server_settings-listen_host) et [tcp\_port](server_settings/settings.md#server_settings-tcp_port) paramètre. + Vérifier [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) et [tcp\_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) paramètre. Clickhouse server accepte les connexions localhost uniquement par défaut. @@ -114,8 +117,8 @@ Vérifier: Vérifier: - - Le [tcp\_port\_secure](server_settings/settings.md#server_settings-tcp_port_secure) paramètre. - - Paramètres pour [SSL sertificates](server_settings/settings.md#server_settings-openssl). + - Le [tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) paramètre. + - Paramètres pour [SSL sertificates](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). Utilisez les paramètres appropriés lors de la connexion. Par exemple, l'utilisation de la `port_secure` paramètre avec `clickhouse_client`. @@ -136,7 +139,7 @@ Si vous commencez à `clickhouse-client` avec l' `stack-trace` paramètre, Click Vous pouvez voir un message sur une connexion rompue. Dans ce cas, vous pouvez répéter la requête. Si la connexion se rompt chaque fois que vous effectuez la requête, vérifiez les journaux du serveur pour détecter les erreurs. -## Efficacité du traitement des requêtes {#troubleshooting-too-slow} +## Efficacité Du Traitement Des Requêtes {#troubleshooting-too-slow} Si vous voyez que ClickHouse fonctionne trop lentement, vous devez profiler la charge sur les ressources du serveur et le réseau pour vos requêtes. diff --git a/docs/fr/operations/update.md b/docs/fr/operations/update.md index e92147e2d65..67aecbc9c1a 100644 --- a/docs/fr/operations/update.md +++ b/docs/fr/operations/update.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 47 +toc_title: "Mise \xC0 Jour De ClickHouse" --- # Mise À Jour De ClickHouse {#clickhouse-update} diff --git a/docs/fr/operations/utils/clickhouse-benchmark.md b/docs/fr/operations/utilities/clickhouse-benchmark.md similarity index 98% rename from docs/fr/operations/utils/clickhouse-benchmark.md rename to docs/fr/operations/utilities/clickhouse-benchmark.md index 00c3f588039..ebf31a962e1 100644 --- a/docs/fr/operations/utils/clickhouse-benchmark.md +++ b/docs/fr/operations/utilities/clickhouse-benchmark.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 61 +toc_title: clickhouse-benchmark --- # clickhouse-benchmark {#clickhouse-benchmark} diff --git a/docs/fr/operations/utils/clickhouse-copier.md b/docs/fr/operations/utilities/clickhouse-copier.md similarity index 98% rename from docs/fr/operations/utils/clickhouse-copier.md rename to docs/fr/operations/utilities/clickhouse-copier.md index de7bb56f32b..31901b29baf 100644 --- a/docs/fr/operations/utils/clickhouse-copier.md +++ b/docs/fr/operations/utilities/clickhouse-copier.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 59 +toc_title: clickhouse-copieur --- # clickhouse-copieur {#clickhouse-copier} diff --git a/docs/fr/operations/utils/clickhouse-local.md b/docs/fr/operations/utilities/clickhouse-local.md similarity index 94% rename from docs/fr/operations/utils/clickhouse-local.md rename to docs/fr/operations/utilities/clickhouse-local.md index 60e3c885d57..6051825dda0 100644 --- a/docs/fr/operations/utils/clickhouse-local.md +++ b/docs/fr/operations/utilities/clickhouse-local.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 60 +toc_title: clickhouse-local --- # clickhouse-local {#clickhouse-local} Le `clickhouse-local` programme vous permet d'effectuer un traitement rapide sur les fichiers locaux, sans avoir à déployer et configurer le serveur ClickHouse. -Accepte les données qui représentent des tables et les interroge en utilisant [Clickhouse dialecte SQL](../../query_language/index.md). +Accepte les données qui représentent des tables et les interroge en utilisant [Clickhouse dialecte SQL](../../sql_reference/index.md). `clickhouse-local` utilise le même noyau que clickhouse server, de sorte qu'il prend en charge la plupart des fonctionnalités et le même ensemble de formats et de moteurs de table. diff --git a/docs/fr/operations/utils/index.md b/docs/fr/operations/utilities/index.md similarity index 80% rename from docs/fr/operations/utils/index.md rename to docs/fr/operations/utilities/index.md index 29f9b647301..7523d5dd216 100644 --- a/docs/fr/operations/utils/index.md +++ b/docs/fr/operations/utilities/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Utilities +toc_priority: 56 +toc_title: "Aper\xE7u" --- # Clickhouse Utilitaire {#clickhouse-utility} diff --git a/docs/fr/query_language/functions/machine_learning_functions.md b/docs/fr/query_language/functions/machine_learning_functions.md deleted file mode 100644 index 9a348958ece..00000000000 --- a/docs/fr/query_language/functions/machine_learning_functions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -machine_translated: true ---- - -# Fonctions d'apprentissage automatique {#machine-learning-functions} - -## evalMLMethod (prédiction) {#machine_learning_methods-evalmlmethod} - -Prédiction utilisant des modèles de régression ajustés utilise `evalMLMethod` fonction. Voir le lien dans la `linearRegression`. - -### Régression Linéaire Stochastique {#stochastic-linear-regression} - -Le [stochasticLinearRegression](../agg_functions/reference.md#agg_functions-stochasticlinearregression) la fonction d'agrégat implémente une méthode de descente de gradient stochastique utilisant un modèle linéaire et une fonction de perte MSE. Utiliser `evalMLMethod` prédire sur de nouvelles données. - -### Régression Logistique Stochastique {#stochastic-logistic-regression} - -Le [stochasticLogisticRegression](../agg_functions/reference.md#agg_functions-stochasticlogisticregression) la fonction d'agrégation implémente la méthode de descente de gradient stochastique pour le problème de classification binaire. Utiliser `evalMLMethod` prédire sur de nouvelles données. diff --git a/docs/fr/query_language/index.md b/docs/fr/query_language/index.md deleted file mode 100644 index cf135585ea4..00000000000 --- a/docs/fr/query_language/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -machine_translated: true ---- - -# Référence SQL {#sql-reference} - -- [SELECT](select.md) -- [INSERT INTO](insert_into.md) -- [CREATE](create.md) -- [ALTER](alter.md#query_language_queries_alter) -- [Autres types de requêtes](misc.md) - -[Article Original](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/fr/query_language/select.md b/docs/fr/query_language/select.md deleted file mode 100644 index c5955b5f45d..00000000000 --- a/docs/fr/query_language/select.md +++ /dev/null @@ -1,1379 +0,0 @@ ---- -machine_translated: true ---- - -# Sélectionnez la syntaxe des requêtes {#select-queries-syntax} - -`SELECT` effectue la récupération des données. - -``` sql -[WITH expr_list|(subquery)] -SELECT [DISTINCT] expr_list -[FROM [db.]table | (subquery) | table_function] [FINAL] -[SAMPLE sample_coeff] -[ARRAY JOIN ...] -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list -[PREWHERE expr] -[WHERE expr] -[GROUP BY expr_list] [WITH TOTALS] -[HAVING expr] -[ORDER BY expr_list] -[LIMIT [offset_value, ]n BY columns] -[LIMIT [n, ]m] -[UNION ALL ...] -[INTO OUTFILE filename] -[FORMAT format] -``` - -Toutes les clauses sont facultatives, à l'exception de la liste d'expressions requise immédiatement après SELECT. -Les clauses ci-dessous sont décrites dans presque le même ordre que dans l'exécution de la requête convoyeur. - -Si la requête omet le `DISTINCT`, `GROUP BY` et `ORDER BY` les clauses et les `IN` et `JOIN` sous-requêtes, la requête sera complètement traitée en flux, en utilisant O (1) quantité de RAM. -Sinon, la requête peut consommer beaucoup de RAM si les restrictions appropriées ne sont pas spécifiées: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Pour plus d'informations, consultez la section “Settings”. Il est possible d'utiliser le tri externe (sauvegarde des tables temporaires sur un disque) et l'agrégation externe. `The system does not have "merge join"`. - -### AVEC la Clause {#with-clause} - -Cette section prend en charge les Expressions de Table courantes ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), avec quelques limitations: -1. Les requêtes récursives ne sont pas prises en charge -2. Lorsque la sous-requête est utilisée à l'intérieur avec section, son résultat doit être scalaire avec exactement une ligne -3. Les résultats d'Expression ne sont pas disponibles dans les sous requêtes -Les résultats des expressions de clause WITH peuvent être utilisés dans la clause SELECT. - -Exemple 1: Utilisation d'une expression constante comme “variable” - -``` sql -WITH '2019-08-01 15:23:00' as ts_upper_bound -SELECT * -FROM hits -WHERE - EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound -``` - -Exemple 2: Expulsion de la somme(octets) résultat de l'expression de clause SELECT de la liste de colonnes - -``` sql -WITH sum(bytes) as s -SELECT - formatReadableSize(s), - table -FROM system.parts -GROUP BY table -ORDER BY s -``` - -Exemple 3: Utilisation des résultats de la sous-requête scalaire - -``` sql -/* this example would return TOP 10 of most huge tables */ -WITH - ( - SELECT sum(bytes) - FROM system.parts - WHERE active - ) AS total_disk_usage -SELECT - (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, - table -FROM system.parts -GROUP BY table -ORDER BY table_disk_usage DESC -LIMIT 10 -``` - -Exemple 4: réutilisation de l'expression dans la sous-requête -Comme solution de contournement pour la limitation actuelle de l'utilisation de l'expression dans les sous-requêtes, Vous pouvez la dupliquer. - -``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) -``` - -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` - -### De la Clause {#select-from} - -Si la clause FROM est omise, les données seront lues à partir `system.one` table. -Le `system.one` table contient exactement une ligne (cette table remplit le même but que la table double trouvée dans d'autres SGBD). - -Le `FROM` clause spécifie la source à partir de laquelle lire les données: - -- Table -- Sous-requête -- [Fonction de Table](table_functions/index.md) - -`ARRAY JOIN` et le régulier `JOIN` peuvent également être inclus (voir ci-dessous). - -Au lieu d'une table, l' `SELECT` sous-requête peut être spécifiée entre parenthèses. -Contrairement à SQL standard, un synonyme n'a pas besoin d'être spécifié après une sous-requête. - -Pour exécuter une requête, toutes les colonnes mentionnées dans la requête sont extraites de la table appropriée. Toutes les colonnes non nécessaires pour la requête externe sont rejetées des sous-requêtes. -Si une requête ne répertorie aucune colonne (par exemple, `SELECT count() FROM t`), une colonne est extraite de la table de toute façon (la plus petite est préférée), afin de calculer le nombre de lignes. - -#### Modificateur FINAL {#select-from-final} - -Applicable lors de la sélection de données à partir de tables [MergeTree](../operations/table_engines/mergetree.md)-famille de moteurs autres que `GraphiteMergeTree`. Lorsque `FINAL` est spécifié, ClickHouse fusionne complètement les données avant de renvoyer le résultat et effectue ainsi toutes les transformations de données qui se produisent lors des fusions pour le moteur de table donné. - -Également pris en charge pour: -- [Répliqué](../operations/table_engines/replication.md) les versions de `MergeTree` moteur. -- [Vue](../operations/table_engines/view.md), [Tampon](../operations/table_engines/buffer.md), [Distribué](../operations/table_engines/distributed.md), et [MaterializedView](../operations/table_engines/materializedview.md) moteurs qui fonctionnent sur d'autres moteurs, à condition qu'ils aient été créés sur `MergeTree`-tables de moteur. - -Requêtes qui utilisent `FINAL` sont exécutés pas aussi vite que les requêtes similaires qui ne le font pas, car: - -- La requête est exécutée dans un seul thread et les données sont fusionnées lors de l'exécution de la requête. -- Les requêtes avec `FINAL` lire les colonnes de clé primaire en plus des colonnes spécifiées dans la requête. - -Dans la plupart des cas, évitez d'utiliser `FINAL`. - -### Exemple de Clause {#select-sample-clause} - -Le `SAMPLE` la clause permet un traitement de requête approximatif. - -Lorsque l'échantillonnage de données est activé, la requête n'est pas effectuée sur toutes les données, mais uniquement sur une certaine fraction de données (échantillon). Par exemple, si vous avez besoin de calculer des statistiques pour toutes les visites, il suffit d'exécuter la requête sur le 1/10 de la fraction de toutes les visites, puis multiplier le résultat par 10. - -Le traitement approximatif des requêtes peut être utile dans les cas suivants: - -- Lorsque vous avez des exigences de synchronisation strictes (comme \<100ms), mais que vous ne pouvez pas justifier le coût des ressources matérielles supplémentaires pour y répondre. -- Lorsque vos données brutes ne sont pas précises, l'approximation ne dégrade pas sensiblement la qualité. -- Les exigences commerciales ciblent des résultats approximatifs (pour la rentabilité, ou afin de commercialiser des résultats exacts aux utilisateurs premium). - -!!! note "Note" - Vous ne pouvez utiliser l'échantillonnage qu'avec les tables [MergeTree](../operations/table_engines/mergetree.md) famille, et seulement si l'expression d'échantillonnage a été spécifiée lors de la création de la table (voir [Moteur MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table)). - -Les caractéristiques de l'échantillonnage des données sont énumérées ci-dessous: - -- L'échantillonnage de données est un mécanisme déterministe. Le résultat de la même `SELECT .. SAMPLE` la requête est toujours le même. -- L'échantillonnage fonctionne de manière cohérente pour différentes tables. Pour les tables avec une seule clé d'échantillonnage, un échantillon avec le même coefficient sélectionne toujours le même sous-ensemble de données possibles. Par exemple, un exemple d'ID utilisateur prend des lignes avec le même sous-ensemble de tous les ID utilisateur possibles de différentes tables. Cela signifie que vous pouvez utiliser l'exemple dans les sous-requêtes dans la [IN](#select-in-operators) clause. En outre, vous pouvez joindre des échantillons en utilisant le [JOIN](#select-join) clause. -- L'échantillonnage permet de lire moins de données à partir d'un disque. Notez que vous devez spécifier l'échantillonnage clé correctement. Pour plus d'informations, voir [Création d'une Table MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table). - -Pour l' `SAMPLE` clause la syntaxe suivante est prise en charge: - -| SAMPLE Clause Syntax | Description | -|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `SAMPLE k` | Ici `k` est le nombre de 0 à 1.
    La requête est exécutée sur `k` fraction des données. Exemple, `SAMPLE 0.1` exécute la requête sur 10% des données. [Lire plus](#select-sample-k) | -| `SAMPLE n` | Ici `n` est un entier suffisamment grand.
    La requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. [Lire plus](#select-sample-n) | -| `SAMPLE k OFFSET m` | Ici `k` et `m` sont les nombres de 0 à 1.
    La requête est exécutée sur un échantillon de `k` fraction des données. Les données utilisées pour l'échantillon est compensée par `m` fraction. [Lire plus](#select-sample-offset) | - -#### L'ÉCHANTILLON k {#select-sample-k} - -Ici `k` est le nombre de 0 à 1 (les notations fractionnaires et décimales sont prises en charge). Exemple, `SAMPLE 1/2` ou `SAMPLE 0.5`. - -Dans un `SAMPLE k` clause, l'échantillon est prélevé à partir de la `k` fraction des données. L'exemple est illustré ci-dessous: - -``` sql -SELECT - Title, - count() * 10 AS PageViews -FROM hits_distributed -SAMPLE 0.1 -WHERE - CounterID = 34 -GROUP BY Title -ORDER BY PageViews DESC LIMIT 1000 -``` - -Dans cet exemple, la requête est exécutée sur un échantillon de 0,1 (10%) de données. Les valeurs des fonctions d'agrégat ne sont pas corrigées automatiquement, donc pour obtenir un résultat approximatif, la valeur `count()` est multiplié manuellement par 10. - -#### Échantillon n {#select-sample-n} - -Ici `n` est un entier suffisamment grand. Exemple, `SAMPLE 10000000`. - -Dans ce cas, la requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. - -Puisque l'unité minimale pour la lecture des données est un granule (sa taille est définie par le `index_granularity` de réglage), il est logique de définir un échantillon beaucoup plus grand que la taille du granule. - -Lors de l'utilisation de la `SAMPLE n` clause, vous ne savez pas quel pourcentage relatif de données a été traité. Donc, vous ne connaissez pas le coefficient par lequel les fonctions agrégées doivent être multipliées. L'utilisation de la `_sample_factor` colonne virtuelle pour obtenir le résultat approximatif. - -Le `_sample_factor` colonne contient des coefficients relatifs qui sont calculés dynamiquement. Cette colonne est créée automatiquement lorsque vous [créer](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table) une table avec la clé d'échantillonnage spécifiée. Les exemples d'utilisation de la `_sample_factor` colonne sont indiqués ci-dessous. - -Considérons la table `visits` qui contient des statistiques sur les visites de site. Le premier exemple montre comment calculer le nombre de pages vues: - -``` sql -SELECT sum(PageViews * _sample_factor) -FROM visits -SAMPLE 10000000 -``` - -L'exemple suivant montre comment calculer le nombre total de visites: - -``` sql -SELECT sum(_sample_factor) -FROM visits -SAMPLE 10000000 -``` - -L'exemple ci-dessous montre comment calculer la durée moyenne de la session. Notez que vous n'avez pas besoin d'utiliser le coefficient relatif pour calculer les valeurs moyennes. - -``` sql -SELECT avg(Duration) -FROM visits -SAMPLE 10000000 -``` - -#### Échantillon K décalage m {#select-sample-offset} - -Ici `k` et `m` sont des nombres de 0 à 1. Des exemples sont présentés ci-dessous. - -**Exemple 1** - -``` sql -SAMPLE 1/10 -``` - -Dans cet exemple, l'échantillon représente 1 / 10e de toutes les données: - -`[++------------------]` - -**Exemple 2** - -``` sql -SAMPLE 1/10 OFFSET 1/2 -``` - -Ici, un échantillon de 10% est prélevé à partir de la seconde moitié des données. - -`[----------++--------]` - -### Clause de jointure de tableau {#select-array-join-clause} - -Permet l'exécution de `JOIN` avec un tableau ou une structure de données imbriquée. L'intention est similaire à la [arrayJoin](functions/array_join.md#functions_arrayjoin) la fonction, mais sa fonctionnalité est plus large. - -``` sql -SELECT -FROM -[LEFT] ARRAY JOIN -[WHERE|PREWHERE ] -... -``` - -Vous pouvez spécifier qu'un seul `ARRAY JOIN` la clause dans une requête. - -L'ordre d'exécution de la requête est optimisé lors de l'exécution `ARRAY JOIN`. Bien `ARRAY JOIN` doit toujours être spécifié avant l' `WHERE/PREWHERE` clause, il peut être effectué soit avant `WHERE/PREWHERE` (si le résultat est nécessaire dans cette clause), ou après l'avoir terminé (pour réduire le volume de calculs). L'ordre de traitement est contrôlée par l'optimiseur de requête. - -Types pris en charge de `ARRAY JOIN` sont énumérés ci-dessous: - -- `ARRAY JOIN` - Dans ce cas, des tableaux vides ne sont pas inclus dans le résultat de `JOIN`. -- `LEFT ARRAY JOIN` - Le résultat de `JOIN` contient des lignes avec des tableaux vides. La valeur d'un tableau vide est définie sur la valeur par défaut pour le type d'élément de tableau (généralement 0, chaîne vide ou NULL). - -Les exemples ci-dessous illustrent l'utilisation de la `ARRAY JOIN` et `LEFT ARRAY JOIN` clause. Créons une table avec un [Tableau](../data_types/array.md) tapez colonne et insérez des valeurs dedans: - -``` sql -CREATE TABLE arrays_test -( - s String, - arr Array(UInt8) -) ENGINE = Memory; - -INSERT INTO arrays_test -VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); -``` - -``` text -┌─s───────────┬─arr─────┐ -│ Hello │ [1,2] │ -│ World │ [3,4,5] │ -│ Goodbye │ [] │ -└─────────────┴─────────┘ -``` - -L'exemple ci-dessous utilise la `ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -ARRAY JOIN arr; -``` - -``` text -┌─s─────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -└───────┴─────┘ -``` - -L'exemple suivant utilise l' `LEFT ARRAY JOIN` clause: - -``` sql -SELECT s, arr -FROM arrays_test -LEFT ARRAY JOIN arr; -``` - -``` text -┌─s───────────┬─arr─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ World │ 3 │ -│ World │ 4 │ -│ World │ 5 │ -│ Goodbye │ 0 │ -└─────────────┴─────┘ -``` - -#### À L'Aide D'Alias {#using-aliases} - -Un alias peut être spécifié pour un tableau `ARRAY JOIN` clause. Dans ce cas, un élément de tableau peut être consulté par ce pseudonyme, mais le tableau lui-même est accessible par le nom d'origine. Exemple: - -``` sql -SELECT s, arr, a -FROM arrays_test -ARRAY JOIN arr AS a; -``` - -``` text -┌─s─────┬─arr─────┬─a─┐ -│ Hello │ [1,2] │ 1 │ -│ Hello │ [1,2] │ 2 │ -│ World │ [3,4,5] │ 3 │ -│ World │ [3,4,5] │ 4 │ -│ World │ [3,4,5] │ 5 │ -└───────┴─────────┴───┘ -``` - -En utilisant des alias, vous pouvez effectuer `ARRAY JOIN` avec un groupe externe. Exemple: - -``` sql -SELECT s, arr_external -FROM arrays_test -ARRAY JOIN [1, 2, 3] AS arr_external; -``` - -``` text -┌─s───────────┬─arr_external─┐ -│ Hello │ 1 │ -│ Hello │ 2 │ -│ Hello │ 3 │ -│ World │ 1 │ -│ World │ 2 │ -│ World │ 3 │ -│ Goodbye │ 1 │ -│ Goodbye │ 2 │ -│ Goodbye │ 3 │ -└─────────────┴──────────────┘ -``` - -Plusieurs tableaux peuvent être séparés par des virgules `ARRAY JOIN` clause. Dans ce cas, `JOIN` est effectuée avec eux simultanément (la somme directe, pas le produit cartésien). Notez que tous les tableaux doivent avoir la même taille. Exemple: - -``` sql -SELECT s, arr, a, num, mapped -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ -│ Hello │ [1,2] │ 1 │ 1 │ 2 │ -│ Hello │ [1,2] │ 2 │ 2 │ 3 │ -│ World │ [3,4,5] │ 3 │ 1 │ 4 │ -│ World │ [3,4,5] │ 4 │ 2 │ 5 │ -│ World │ [3,4,5] │ 5 │ 3 │ 6 │ -└───────┴─────────┴───┴─────┴────────┘ -``` - -L'exemple ci-dessous utilise la [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) fonction: - -``` sql -SELECT s, arr, a, num, arrayEnumerate(arr) -FROM arrays_test -ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; -``` - -``` text -┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ -│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ -│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ -│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ -│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ -│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ -└───────┴─────────┴───┴─────┴─────────────────────┘ -``` - -#### Jointure de tableau avec la Structure de données imbriquée {#array-join-with-nested-data-structure} - -`ARRAY`Rejoindre " fonctionne également avec [structures de données imbriquées](../data_types/nested_data_structures/nested.md). Exemple: - -``` sql -CREATE TABLE nested_test -( - s String, - nest Nested( - x UInt8, - y UInt32) -) ENGINE = Memory; - -INSERT INTO nested_test -VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); -``` - -``` text -┌─s───────┬─nest.x──┬─nest.y─────┐ -│ Hello │ [1,2] │ [10,20] │ -│ World │ [3,4,5] │ [30,40,50] │ -│ Goodbye │ [] │ [] │ -└─────────┴─────────┴────────────┘ -``` - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Lorsque vous spécifiez des noms de structures de données imbriquées dans `ARRAY JOIN` le sens est le même que `ARRAY JOIN` avec tous les éléments du tableau qui la compose. Des exemples sont énumérés ci-dessous: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`, `nest.y`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─┐ -│ Hello │ 1 │ 10 │ -│ Hello │ 2 │ 20 │ -│ World │ 3 │ 30 │ -│ World │ 4 │ 40 │ -│ World │ 5 │ 50 │ -└───────┴────────┴────────┘ -``` - -Cette variation a également du sens: - -``` sql -SELECT s, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN `nest.x`; -``` - -``` text -┌─s─────┬─nest.x─┬─nest.y─────┐ -│ Hello │ 1 │ [10,20] │ -│ Hello │ 2 │ [10,20] │ -│ World │ 3 │ [30,40,50] │ -│ World │ 4 │ [30,40,50] │ -│ World │ 5 │ [30,40,50] │ -└───────┴────────┴────────────┘ -``` - -Un alias peut être utilisé pour une structure de données imbriquée, afin de sélectionner `JOIN` le résultat ou le tableau source. Exemple: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` -FROM nested_test -ARRAY JOIN nest AS n; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ -└───────┴─────┴─────┴─────────┴────────────┘ -``` - -Exemple d'utilisation de l' [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate) fonction: - -``` sql -SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num -FROM nested_test -ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; -``` - -``` text -┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ -│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ -│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ -│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ -│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ -│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ -└───────┴─────┴─────┴─────────┴────────────┴─────┘ -``` - -### Clause de JOINTURE {#select-join} - -Rejoint les données dans la normale [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) sens. - -!!! info "Note" - Pas liées à [ARRAY JOIN](#select-array-join-clause). - -``` sql -SELECT -FROM -[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN -(ON )|(USING ) ... -``` - -Les noms de table peuvent être spécifiés au lieu de `` et ``. Ceci est équivalent à la `SELECT * FROM table` sous-requête, sauf dans un cas particulier lorsque la table a [Rejoindre](../operations/table_engines/join.md) engine – an array prepared for joining. - -#### Types pris en charge de `JOIN` {#select-join-types} - -- `INNER JOIN` (ou `JOIN`) -- `LEFT JOIN` (ou `LEFT OUTER JOIN`) -- `RIGHT JOIN` (ou `RIGHT OUTER JOIN`) -- `FULL JOIN` (ou `FULL OUTER JOIN`) -- `CROSS JOIN` (ou `,` ) - -Voir la norme [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) Description. - -#### Plusieurs REJOINDRE {#multiple-join} - -En effectuant des requêtes, ClickHouse réécrit les jointures multi-tables dans la séquence des jointures à deux tables. Par exemple, S'il y a quatre tables pour join clickhouse rejoint la première et la seconde, puis rejoint le résultat avec la troisième table, et à la dernière étape, il rejoint la quatrième. - -Si une requête contient l' `WHERE` clickhouse essaie de pousser les filtres de cette clause à travers la jointure intermédiaire. S'il ne peut pas appliquer le filtre à chaque jointure intermédiaire, ClickHouse applique les filtres une fois toutes les jointures terminées. - -Nous recommandons l' `JOIN ON` ou `JOIN USING` syntaxe pour créer des requêtes. Exemple: - -``` sql -SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a -``` - -Vous pouvez utiliser des listes de tables séparées par des virgules `FROM` clause. Exemple: - -``` sql -SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a -``` - -Ne mélangez pas ces syntaxes. - -ClickHouse ne supporte pas directement la syntaxe avec des virgules, Nous ne recommandons donc pas de les utiliser. L'algorithme tente de réécrire la requête en termes de `CROSS JOIN` et `INNER JOIN` clauses et procède ensuite au traitement des requêtes. Lors de la réécriture de la requête, ClickHouse tente d'optimiser les performances et la consommation de mémoire. Par défaut, ClickHouse traite les virgules comme `INNER JOIN` clause et convertit `INNER JOIN` de `CROSS JOIN` lorsque l'algorithme ne peut pas garantir que `INNER JOIN` retourne les données requises. - -#### Rigueur {#select-join-strictness} - -- `ALL` — If the right table has several matching rows, ClickHouse creates a [Produit cartésien](https://en.wikipedia.org/wiki/Cartesian_product) à partir des lignes correspondantes. C'est la norme `JOIN` comportement en SQL. -- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` et `ALL` les mots clés sont les mêmes. -- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous. - -**ASOF joindre L'utilisation** - -`ASOF JOIN` est utile lorsque vous devez joindre des enregistrements qui n'ont pas de correspondance exacte. - -Tables pour `ASOF JOIN` doit avoir une colonne de séquence ordonnée. Cette colonne ne peut pas être seule dans une table et doit être l'un des types de données: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, et `DateTime`. - -Syntaxe `ASOF JOIN ... ON`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF LEFT JOIN table_2 -ON equi_cond AND closest_match_cond -``` - -Vous pouvez utiliser n'importe quel nombre de conditions d'égalité et exactement une condition de correspondance la plus proche. Exemple, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. - -Conditions prises en charge pour la correspondance la plus proche: `>`, `>=`, `<`, `<=`. - -Syntaxe `ASOF JOIN ... USING`: - -``` sql -SELECT expressions_list -FROM table_1 -ASOF JOIN table_2 -USING (equi_column1, ... equi_columnN, asof_column) -``` - -`ASOF JOIN` utiliser `equi_columnX` pour rejoindre sur l'égalité et `asof_column` pour rejoindre le match le plus proche avec le `table_1.asof_column >= table_2.asof_column` condition. Le `asof_column` colonne toujours la dernière dans le `USING` clause. - -Par exemple, considérez les tableaux suivants: - -``` text - table_1 table_2 - - event | ev_time | user_id event | ev_time | user_id -----------|---------|---------- ----------|---------|---------- - ... ... -event_1_1 | 12:00 | 42 event_2_1 | 11:59 | 42 - ... event_2_2 | 12:30 | 42 -event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42 - ... ... -``` - -`ASOF JOIN` peut prendre la date d'un événement utilisateur de `table_1` et trouver un événement dans `table_2` où le timestamp est plus proche de l'horodatage de l'événement à partir de `table_1` correspondant à la condition de correspondance la plus proche. Les valeurs d'horodatage égales sont les plus proches si elles sont disponibles. Ici, l' `user_id` la colonne peut être utilisée pour joindre sur l'égalité et le `ev_time` la colonne peut être utilisée pour se joindre à la correspondance la plus proche. Dans notre exemple, `event_1_1` peut être jointe à `event_2_1` et `event_1_2` peut être jointe à `event_2_3`, mais `event_2_2` ne peut pas être rejoint. - -!!! note "Note" - `ASOF` jointure est **pas** pris en charge dans le [Rejoindre](../operations/table_engines/join.md) tableau moteur. - -Pour définir la valeur de rigueur par défaut, utilisez le paramètre de configuration de session [join\_default\_strictness](../operations/settings/settings.md#settings-join_default_strictness). - -#### GLOBAL JOIN {#global-join} - -Lors de l'utilisation normale `JOIN` la requête est envoyée aux serveurs distants. Les sous-requêtes sont exécutées sur chacune d'elles afin de créer la bonne table, et la jointure est effectuée avec cette table. En d'autres termes, la table de droite est formée sur chaque serveur séparément. - -Lors de l'utilisation de `GLOBAL ... JOIN`, d'abord le serveur demandeur exécute une sous-requête pour calculer la bonne table. Cette table temporaire est transmise à chaque serveur distant, et les requêtes sont exécutées sur eux en utilisant les données temporaires qui ont été transmises. - -Soyez prudent lorsque vous utilisez `GLOBAL`. Pour plus d'informations, consultez la section [Sous-requêtes distribuées](#select-distributed-subqueries). - -#### Recommandations D'Utilisation {#usage-recommendations} - -Lors de l'exécution d'un `JOIN`, il n'y a pas d'optimisation de la commande d'exécution par rapport aux autres stades de la requête. La jointure (une recherche dans la table de droite) est exécutée avant de filtrer `WHERE` et avant l'agrégation. Afin de définir explicitement l'ordre de traitement, nous vous recommandons d'exécuter une `JOIN` sous-requête avec une sous-requête. - -Exemple: - -``` sql -SELECT - CounterID, - hits, - visits -FROM -( - SELECT - CounterID, - count() AS hits - FROM test.hits - GROUP BY CounterID -) ANY LEFT JOIN -( - SELECT - CounterID, - sum(Sign) AS visits - FROM test.visits - GROUP BY CounterID -) USING CounterID -ORDER BY hits DESC -LIMIT 10 -``` - -``` text -┌─CounterID─┬───hits─┬─visits─┐ -│ 1143050 │ 523264 │ 13665 │ -│ 731962 │ 475698 │ 102716 │ -│ 722545 │ 337212 │ 108187 │ -│ 722889 │ 252197 │ 10547 │ -│ 2237260 │ 196036 │ 9522 │ -│ 23057320 │ 147211 │ 7689 │ -│ 722818 │ 90109 │ 17847 │ -│ 48221 │ 85379 │ 4652 │ -│ 19762435 │ 77807 │ 7026 │ -│ 722884 │ 77492 │ 11056 │ -└───────────┴────────┴────────┘ -``` - -Les sous-requêtes ne vous permettent pas de définir des noms ou de les utiliser pour référencer une colonne à partir d'une sous-requête spécifique. -Les colonnes spécifiées dans `USING` doit avoir les mêmes noms dans les deux sous-requêtes, et les autres colonnes doivent être nommées différemment. Vous pouvez utiliser des alias pour les noms des colonnes dans les sous-requêtes (l'exemple utilise l'alias `hits` et `visits`). - -Le `USING` clause spécifie une ou plusieurs colonnes de jointure, qui établit l'égalité de ces colonnes. La liste des colonnes est définie sans crochets. Les conditions de jointure plus complexes ne sont pas prises en charge. - -La table de droite (le résultat de la sous-requête) réside dans la RAM. S'il n'y a pas assez de mémoire, vous ne pouvez pas exécuter un `JOIN`. - -Chaque fois qu'une requête est exécutée avec la même `JOIN`, la sous-requête est exécutée à nouveau car le résultat n'est pas mis en cache. Pour éviter cela, utilisez la spéciale [Rejoindre](../operations/table_engines/join.md) table engine, qui est un tableau préparé pour l'assemblage qui est toujours en RAM. - -Dans certains cas, il est plus efficace d'utiliser `IN` plutôt `JOIN`. -Parmi les différents types de `JOIN`, le plus efficace est d' `ANY LEFT JOIN`, puis `ANY INNER JOIN`. Les moins efficaces sont `ALL LEFT JOIN` et `ALL INNER JOIN`. - -Si vous avez besoin d'un `JOIN` pour se joindre à des tables de dimension (ce sont des tables relativement petites qui contiennent des propriétés de dimension, telles que des noms pour des campagnes publicitaires), un `JOIN` peut-être pas très pratique en raison du fait que la bonne table est ré-accédée pour chaque requête. Pour de tels cas, il y a un “external dictionaries” la fonctionnalité que vous devez utiliser à la place de `JOIN`. Pour plus d'informations, consultez la section [Dictionnaires externes](dicts/external_dicts.md). - -**Limitations De Mémoire** - -ClickHouse utilise le [jointure de hachage](https://en.wikipedia.org/wiki/Hash_join) algorithme. ClickHouse prend le `` et crée une table de hachage pour cela dans la RAM. Si vous devez restreindre la consommation de mémoire de l'opération join utilisez les paramètres suivants: - -- [max\_rows\_in\_join](../operations/settings/query_complexity.md#settings-max_rows_in_join) — Limits number of rows in the hash table. -- [max\_bytes\_in\_join](../operations/settings/query_complexity.md#settings-max_bytes_in_join) — Limits size of the hash table. - -Lorsque l'une de ces limites est atteinte, ClickHouse agit comme [join\_overflow\_mode](../operations/settings/query_complexity.md#settings-join_overflow_mode) réglage des instructions. - -#### Traitement des cellules vides ou nulles {#processing-of-empty-or-null-cells} - -Lors de la jonction de tables, les cellules vides peuvent apparaître. Paramètre [join\_use\_nulls](../operations/settings/settings.md#join_use_nulls) définir comment clickhouse remplit ces cellules. - -Si l' `JOIN` les touches sont [Nullable](../data_types/nullable.md) champs, les lignes où au moins une des clés a la valeur [NULL](syntax.md#null-literal) ne sont pas jointes. - -#### Limitations De Syntaxe {#syntax-limitations} - -Pour plusieurs `JOIN` clauses dans un seul `SELECT` requête: - -- Prendre toutes les colonnes via `*` n'est disponible que si les tables sont jointes, pas les sous-requêtes. -- Le `PREWHERE` la clause n'est pas disponible. - -Pour `ON`, `WHERE`, et `GROUP BY` clause: - -- Les expressions arbitraires ne peuvent pas être utilisées dans `ON`, `WHERE`, et `GROUP BY` mais vous pouvez définir une expression dans un `SELECT` clause et ensuite l'utiliser dans ces clauses via un alias. - -### Clause where {#select-where} - -S'il existe une clause WHERE, elle doit contenir une expression de type UInt8. C'est généralement une expression avec comparaison et opérateurs logiques. -Cette expression est utilisée pour filtrer les données avant toutes les autres transformations. - -Si les index sont pris en charge par le moteur de table de base de données, l'expression est évaluée sur la possibilité d'utiliser des index. - -### Clause PREWHERE {#prewhere-clause} - -Cette clause a le même sens que la clause WHERE. La différence est dans laquelle les données sont lues à partir de la table. -Lors de L'utilisation de PREWHERE, d'abord, seules les colonnes nécessaires à L'exécution de PREWHERE sont lues. Ensuite, les autres colonnes sont lues qui sont nécessaires pour exécuter la requête, mais seulement les blocs où L'expression PREWHERE est vraie. - -Il est logique d'utiliser PREWHERE s'il existe des conditions de filtration qui sont utilisées par une minorité de colonnes dans la requête, mais qui fournissent une filtration de données forte. Cela réduit le volume de données à lire. - -Par exemple, il est utile d'écrire PREWHERE pour les requêtes qui extraient un grand nombre de colonnes, mais qui n'ont que la filtration pour quelques colonnes. - -PREWHERE est uniquement pris en charge par les tables `*MergeTree` famille. - -Une requête peut spécifier simultanément PREWHERE et WHERE. Dans ce cas, PREWHERE précède WHERE. - -Si l' ‘optimize\_move\_to\_prewhere’ le paramètre est défini sur 1 et PREWHERE est omis, le système utilise des heuristiques pour déplacer automatiquement des parties d'expressions D'où vers PREWHERE. - -### Clause GROUP BY {#select-group-by-clause} - -C'est l'une des parties les plus importantes d'un SGBD orienté colonne. - -S'il existe une clause GROUP BY, elle doit contenir une liste d'expressions. Chaque expression sera appelée ici comme “key”. -Toutes les expressions des clauses SELECT, HAVING et ORDER BY doivent être calculées à partir de clés ou de fonctions d'agrégation. En d'autres termes, chaque colonne sélectionnée dans la table doit être utilisée soit dans les clés, soit dans les fonctions d'agrégation. - -Si une requête ne contient que des colonnes de table dans les fonctions d'agrégation, la clause GROUP BY peut être omise et l'agrégation par un ensemble de clés vide est supposée. - -Exemple: - -``` sql -SELECT - count(), - median(FetchTiming > 60 ? 60 : FetchTiming), - count() - sum(Refresh) -FROM hits -``` - -Cependant, contrairement au SQL standard, si la table n'a pas de lignes (soit il n'y en a pas du tout, soit il n'y en a pas après avoir utilisé WHERE to filter), un résultat vide est renvoyé, et non le résultat d'une des lignes contenant les valeurs initiales des fonctions d'agrégat. - -Contrairement à MySQL (et conforme à SQL standard), vous ne pouvez pas obtenir une valeur d'une colonne qui n'est pas dans une fonction clé ou agrégée (sauf les expressions constantes). Pour contourner ce problème, vous pouvez utiliser le ‘any’ fonction d'agrégation (récupère la première valeur rencontrée) ou ‘min/max’. - -Exemple: - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - count(), - any(Title) AS title -- getting the first occurred page header for each domain. -FROM hits -GROUP BY domain -``` - -Pour chaque valeur de clé différente rencontrée, GROUP BY calcule un ensemble de valeurs de fonction d'agrégation. - -GROUP BY n'est pas pris en charge pour les colonnes de tableau. - -Une constante ne peut pas être spécifiée comme arguments pour les fonctions d'agrégation. Exemple: somme(1). Au lieu de cela, vous pouvez vous débarrasser de la constante. Exemple: `count()`. - -#### Le traitement NULL {#null-processing} - -Pour le regroupement, ClickHouse interprète [NULL](syntax.md) comme une valeur, et `NULL=NULL`. - -Voici un exemple pour montrer ce que cela signifie. - -Supposons que vous avez cette table: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Requête `SELECT sum(x), y FROM t_null_big GROUP BY y` résultats dans: - -``` text -┌─sum(x)─┬────y─┐ -│ 4 │ 2 │ -│ 3 │ 3 │ -│ 5 │ ᴺᵁᴸᴸ │ -└────────┴──────┘ -``` - -Vous pouvez voir que `GROUP BY` pour `y = NULL` résumer `x` comme si `NULL` a cette valeur. - -Si vous passez plusieurs clés `GROUP BY` le résultat vous donnera toutes les combinaisons de la sélection, comme si `NULL` ont une valeur spécifique. - -#### Avec modificateur de totaux {#with-totals-modifier} - -Si le modificateur avec totaux est spécifié, une autre ligne sera calculée. Cette ligne aura des colonnes clés contenant des valeurs par défaut (zéros ou lignes vides), et des colonnes de fonctions d'agrégat avec les valeurs calculées sur toutes les lignes (le “total” valeur). - -Cette ligne supplémentaire est sortie dans les formats JSON\*, TabSeparated\* et Pretty\*, séparément des autres lignes. Dans les autres formats, cette ligne n'est pas sortie. - -Dans les formats JSON\*, cette ligne est sortie en tant que ‘totals’ champ. Dans les formats TabSeparated\*, la ligne vient après le résultat principal, précédée d'une ligne vide (après les autres données). Dans les formats Pretty\*, la ligne est sortie sous forme de table séparée après le résultat principal. - -`WITH TOTALS` peut être exécuté de différentes manières lorsqu'il est présent. Le comportement dépend de l' ‘totals\_mode’ paramètre. -Par défaut, `totals_mode = 'before_having'`. Dans ce cas, ‘totals’ est calculé sur toutes les lignes, y compris celles qui ne passent pas par ‘max\_rows\_to\_group\_by’. - -Les autres alternatives incluent uniquement les lignes qui passent à travers avoir dans ‘totals’, et se comporter différemment avec le réglage `max_rows_to_group_by` et `group_by_overflow_mode = 'any'`. - -`after_having_exclusive` – Don't include rows that didn't pass through `max_rows_to_group_by`. En d'autres termes, ‘totals’ aura moins ou le même nombre de lignes que si `max_rows_to_group_by` ont été omis. - -`after_having_inclusive` – Include all the rows that didn't pass through ‘max\_rows\_to\_group\_by’ dans ‘totals’. En d'autres termes, ‘totals’ aura plus ou le même nombre de lignes que si `max_rows_to_group_by` ont été omis. - -`after_having_auto` – Count the number of rows that passed through HAVING. If it is more than a certain amount (by default, 50%), include all the rows that didn't pass through ‘max\_rows\_to\_group\_by’ dans ‘totals’. Sinon, ne pas les inclure. - -`totals_auto_threshold` – By default, 0.5. The coefficient for `after_having_auto`. - -Si `max_rows_to_group_by` et `group_by_overflow_mode = 'any'` ne sont pas utilisés, toutes les variations de `after_having` sont les mêmes, et vous pouvez utiliser l'un d'eux (par exemple, `after_having_auto`). - -Vous pouvez utiliser avec les totaux dans les sous-requêtes, y compris les sous-requêtes dans la clause JOIN (dans ce cas, les valeurs totales respectives sont combinées). - -#### Groupe par dans la mémoire externe {#select-group-by-in-external-memory} - -Vous pouvez activer le dumping des données temporaires sur le disque pour limiter l'utilisation de la mémoire pendant `GROUP BY`. -Le [max\_bytes\_before\_external\_group\_by](../operations/settings/settings.md#settings-max_bytes_before_external_group_by) réglage détermine le seuil de consommation de RAM pour le dumping `GROUP BY` données temporaires dans le système de fichiers. Si elle est définie sur 0 (valeur par défaut), elle est désactivée. - -Lors de l'utilisation de `max_bytes_before_external_group_by`, nous vous recommandons de définir `max_memory_usage` environ deux fois plus élevé. Ceci est nécessaire car il y a deux étapes à l'agrégation: la lecture de la date et la formation des données intermédiaires (1) et la fusion des données intermédiaires (2). Le Dumping des données dans le système de fichiers ne peut se produire qu'au cours de l'étape 1. Si les données temporaires n'ont pas été vidées, l'étape 2 peut nécessiter jusqu'à la même quantité de mémoire qu'à l'étape 1. - -Par exemple, si [max\_memory\_usage](../operations/settings/settings.md#settings_max_memory_usage) a été défini sur 10000000000 et que vous souhaitez utiliser l'agrégation externe, il est logique de définir `max_bytes_before_external_group_by` à 10000000000, et max\_memory\_usage à 20000000000. Lorsque l'agrégation externe est déclenchée (s'il y a eu au moins un vidage de données temporaires), la consommation maximale de RAM n'est que légèrement supérieure à `max_bytes_before_external_group_by`. - -Avec le traitement des requêtes distribuées, l'agrégation externe est effectuée sur des serveurs distants. Pour que le serveur demandeur n'utilise qu'une petite quantité de RAM, définissez `distributed_aggregation_memory_efficient` 1. - -Lors de la fusion de données vidées sur le disque, ainsi que lors de la fusion des résultats de serveurs distants lorsque `distributed_aggregation_memory_efficient` paramètre est activé, consomme jusqu'à `1/256 * the_number_of_threads` à partir de la quantité totale de mémoire RAM. - -Lorsque l'agrégation externe est activée, s'il y a moins de `max_bytes_before_external_group_by` of data (i.e. data was not flushed), the query runs just as fast as without external aggregation. If any temporary data was flushed, the run time will be several times longer (approximately three times). - -Si vous avez un `ORDER BY` avec un `LIMIT` après `GROUP BY` puis la quantité de RAM dépend de la quantité de données dans `LIMIT`, pas dans l'ensemble de la table. Mais si l' `ORDER BY` n'a pas `LIMIT`, n'oubliez pas d'activer externe de tri (`max_bytes_before_external_sort`). - -### Limite par Clause {#limit-by-clause} - -Une requête avec l' `LIMIT n BY expressions` la clause sélectionne le premier `n` lignes pour chaque valeur distincte de `expressions`. La clé pour `LIMIT BY` peut contenir n'importe quel nombre de [expression](syntax.md#syntax-expressions). - -ClickHouse prend en charge la syntaxe suivante: - -- `LIMIT [offset_value, ]n BY expressions` -- `LIMIT n OFFSET offset_value BY expressions` - -Pendant le traitement de la requête, ClickHouse sélectionne les données classées par clé de tri. La clé de tri est définie explicitement à l'aide [ORDER BY](#select-order-by) clause ou implicitement en tant que propriété du moteur de table. Puis clickhouse s'applique `LIMIT n BY expressions` et renvoie le premier `n` lignes pour chaque combinaison distincte de `expressions`. Si `OFFSET` est spécifié, puis pour chaque bloc de données qui appartient à une combinaison particulière de `expressions`, Clickhouse saute `offset_value` nombre de lignes depuis le début du bloc et renvoie un maximum de `n` les lignes en conséquence. Si `offset_value` est plus grand que le nombre de lignes dans le bloc de données, ClickHouse renvoie zéro lignes du bloc. - -`LIMIT BY` n'est pas liée à `LIMIT`. Ils peuvent tous deux être utilisés dans la même requête. - -**Exemple** - -Exemple de table: - -``` sql -CREATE TABLE limit_by(id Int, val Int) ENGINE = Memory; -INSERT INTO limit_by values(1, 10), (1, 11), (1, 12), (2, 20), (2, 21); -``` - -Requête: - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 10 │ -│ 1 │ 11 │ -│ 2 │ 20 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -``` sql -SELECT * FROM limit_by ORDER BY id, val LIMIT 1, 2 BY id -``` - -``` text -┌─id─┬─val─┐ -│ 1 │ 11 │ -│ 1 │ 12 │ -│ 2 │ 21 │ -└────┴─────┘ -``` - -Le `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` requête renvoie le même résultat. - -La requête suivante renvoie les 5 principaux référents pour chaque `domain, device_type` paire avec un maximum de 100 lignes au total (`LIMIT n BY + LIMIT`). - -``` sql -SELECT - domainWithoutWWW(URL) AS domain, - domainWithoutWWW(REFERRER_URL) AS referrer, - device_type, - count() cnt -FROM hits -GROUP BY domain, referrer, device_type -ORDER BY cnt DESC -LIMIT 5 BY domain, device_type -LIMIT 100 -``` - -### Clause HAVING {#having-clause} - -Permet de filtrer le résultat reçu après GROUP BY, similaire à la clause WHERE. -Où et ayant diffèrent en ce que Où est effectué avant l'agrégation (GROUP BY), tout en ayant est effectué après. -Si l'agrégation n'est pas effectuée, HAVING ne peut pas être utilisé. - -### Clause ORDER BY {#select-order-by} - -La clause ORDER BY contient une liste d'expressions, qui peuvent chacune être affectées à DESC ou ASC (la direction de tri). Si la direction n'est pas spécifiée, ASC est supposé. ASC est trié dans l'ordre croissant, et DESC dans l'ordre décroissant. La direction de tri s'applique à une seule expression, pas à la liste entière. Exemple: `ORDER BY Visits DESC, SearchPhrase` - -Pour le tri par valeurs de chaîne, vous pouvez spécifier le classement (comparaison). Exemple: `ORDER BY SearchPhrase COLLATE 'tr'` - pour le tri par mot-clé dans l'ordre croissant, en utilisant l'alphabet turc, insensible à la casse, en supposant que les chaînes sont encodées en UTF-8. COLLATE peut être spécifié ou non pour chaque expression dans L'ordre par indépendamment. Si ASC ou DESC est spécifié, COLLATE est spécifié après. Lors de L'utilisation de COLLATE, le tri est toujours insensible à la casse. - -Nous recommandons uniquement D'utiliser COLLATE pour le tri final d'un petit nombre de lignes, car le tri avec COLLATE est moins efficace que le tri normal par octets. - -Les lignes qui ont des valeurs identiques pour la liste des expressions de tri sont sorties dans un ordre arbitraire, qui peut également être non déterministe (différent à chaque fois). -Si la clause ORDER BY est omise, l'ordre des lignes est également indéfini et peut également être non déterministe. - -`NaN` et `NULL` ordre de tri: - -- Avec le modificateur `NULLS FIRST` — First `NULL`, puis `NaN` puis d'autres valeurs. -- Avec le modificateur `NULLS LAST` — First the values, then `NaN`, puis `NULL`. -- Default — The same as with the `NULLS LAST` modificateur. - -Exemple: - -Pour la table - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 2 │ -│ 1 │ nan │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ nan │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Exécuter la requête `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` obtenir: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 7 │ ᴺᵁᴸᴸ │ -│ 1 │ nan │ -│ 6 │ nan │ -│ 2 │ 2 │ -│ 2 │ 2 │ -│ 3 │ 4 │ -│ 5 │ 6 │ -│ 6 │ 7 │ -│ 8 │ 9 │ -└───┴──────┘ -``` - -Lorsque les nombres à virgule flottante sont triés, les Nan sont séparés des autres valeurs. Quel que soit l'ordre de tri, NaNs viennent à la fin. En d'autres termes, pour le Tri ascendant, ils sont placés comme s'ils étaient plus grands que tous les autres nombres, tandis que pour le Tri descendant, ils sont placés comme s'ils étaient plus petits que les autres. - -Moins de RAM est utilisé si une limite assez petite est spécifiée en plus de ORDER BY. Sinon, la quantité de mémoire dépensée est proportionnelle au volume de données à trier. Pour le traitement des requêtes distribuées, si GROUP BY est omis, le tri est partiellement effectué sur des serveurs distants et les résultats sont fusionnés sur le serveur demandeur. Cela signifie que pour le tri distribué, le volume de données à trier peut être supérieur à la quantité de mémoire sur un seul serveur. - -S'il N'y a pas assez de RAM, il est possible d'effectuer un tri dans la mémoire externe (création de fichiers temporaires sur un disque). Utilisez le paramètre `max_bytes_before_external_sort` pour ce but. S'il est défini sur 0 (par défaut), le tri externe est désactivé. Si elle est activée, lorsque le volume de données à trier atteint le nombre spécifié d'octets, les données collectées sont triés et déposés dans un fichier temporaire. Une fois toutes les données lues, tous les fichiers triés sont fusionnés et les résultats sont générés. Les fichiers sont écrits dans le répertoire/var/lib / clickhouse / tmp / dans la configuration (par défaut, mais vous pouvez ‘tmp\_path’ paramètre pour modifier ce paramètre). - -L'exécution d'une requête peut utiliser plus de mémoire que ‘max\_bytes\_before\_external\_sort’. Pour cette raison, ce paramètre doit avoir une valeur significativement inférieure à ‘max\_memory\_usage’. Par exemple, si votre serveur dispose de 128 Go de RAM et que vous devez exécuter une seule requête, définissez ‘max\_memory\_usage’ à 100 Go, et ‘max\_bytes\_before\_external\_sort’ à 80 Go. - -Le tri externe fonctionne beaucoup moins efficacement que le tri dans la RAM. - -### Clause SELECT {#select-select} - -[Expression](syntax.md#syntax-expressions) spécifié dans le `SELECT` clause sont calculés après toutes les opérations dans les clauses décrites ci-dessus sont terminés. Ces expressions fonctionnent comme si elles s'appliquaient à des lignes séparées dans le résultat. Si les expressions dans le `SELECT` la clause contient des fonctions d'agrégation, puis clickhouse traite les fonctions d'agrégation et les expressions utilisées [GROUP BY](#select-group-by-clause) agrégation. - -Si vous souhaitez inclure toutes les colonnes dans le résultat, utilisez l'astérisque (`*`) symbole. Exemple, `SELECT * FROM ...`. - -Pour correspondre à certaines colonnes dans le résultat avec un [re2](https://en.wikipedia.org/wiki/RE2_(software)) expression régulière, vous pouvez utiliser le `COLUMNS` expression. - -``` sql -COLUMNS('regexp') -``` - -Par exemple, considérez le tableau: - -``` sql -CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog -``` - -La requête suivante sélectionne les données de toutes les colonnes contenant les `a` symbole dans leur nom. - -``` sql -SELECT COLUMNS('a') FROM col_names -``` - -``` text -┌─aa─┬─ab─┐ -│ 1 │ 1 │ -└────┴────┘ -``` - -Les colonnes sélectionnées sont retournés pas dans l'ordre alphabétique. - -Vous pouvez utiliser plusieurs `COLUMNS` expressions dans une requête et leur appliquer des fonctions. - -Exemple: - -``` sql -SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names -``` - -``` text -┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐ -│ 1 │ 1 │ 1 │ Int8 │ -└────┴────┴────┴────────────────┘ -``` - -Chaque colonne renvoyée par le `COLUMNS` expression est passée à la fonction en tant qu'argument séparé. Vous pouvez également passer d'autres arguments à la fonction si elle les supporte. Soyez prudent lorsque vous utilisez des fonctions. Si une fonction ne prend pas en charge le nombre d'arguments que vous lui avez transmis, ClickHouse lève une exception. - -Exemple: - -``` sql -SELECT COLUMNS('a') + COLUMNS('c') FROM col_names -``` - -``` text -Received exception from server (version 19.14.1): -Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2. -``` - -Dans cet exemple, `COLUMNS('a')` retourne deux colonnes: `aa` et `ab`. `COLUMNS('c')` renvoie la `bc` colonne. Le `+` l'opérateur ne peut pas s'appliquer à 3 arguments, donc ClickHouse lève une exception avec le message pertinent. - -Colonnes qui correspondent à la `COLUMNS` l'expression peut avoir différents types de données. Si `COLUMNS` ne correspond à aucune colonne et est la seule expression dans `SELECT`, ClickHouse lance une exception. - -### La Clause DISTINCT {#select-distinct} - -Si DISTINCT est spécifié, une seule ligne restera hors de tous les ensembles de lignes entièrement correspondantes dans le résultat. -Le résultat sera le même que si GROUP BY était spécifié dans tous les champs spécifiés dans SELECT without aggregate functions. Mais il y a plusieurs différences de GROUP BY: - -- DISTINCT peut être appliqué avec GROUP BY. -- Lorsque ORDER BY est omis et que LIMIT est défini, la requête s'arrête immédiatement après la lecture du nombre requis de lignes différentes. -- Les blocs de données sont produits au fur et à mesure qu'ils sont traités, sans attendre que la requête entière se termine. - -DISTINCT n'est pas pris en charge si SELECT a au moins une colonne de tableau. - -`DISTINCT` fonctionne avec [NULL](syntax.md) comme si `NULL` ont une valeur spécifique, et `NULL=NULL`. En d'autres termes, dans le `DISTINCT` résultats, différentes combinaisons avec `NULL` qu'une seule fois. - -Clickhouse prend en charge l'utilisation du `DISTINCT` et `ORDER BY` clauses pour différentes colonnes dans une requête. Le `DISTINCT` la clause est exécutée avant la `ORDER BY` clause. - -Exemple de table: - -``` text -┌─a─┬─b─┐ -│ 2 │ 1 │ -│ 1 │ 2 │ -│ 3 │ 3 │ -│ 2 │ 4 │ -└───┴───┘ -``` - -Lors de la sélection de données avec le `SELECT DISTINCT a FROM t1 ORDER BY b ASC` requête, nous obtenons le résultat suivant: - -``` text -┌─a─┐ -│ 2 │ -│ 1 │ -│ 3 │ -└───┘ -``` - -Si nous changeons la direction de tri `SELECT DISTINCT a FROM t1 ORDER BY b DESC`, nous obtenons le résultat suivant: - -``` text -┌─a─┐ -│ 3 │ -│ 1 │ -│ 2 │ -└───┘ -``` - -Rangée `2, 4` a été coupé avant de les trier. - -Prenez en compte cette spécificité d'implémentation lors de la programmation des requêtes. - -### Clause LIMIT {#limit-clause} - -`LIMIT m` vous permet de sélectionner la première `m` lignes du résultat. - -`LIMIT n, m` vous permet de sélectionner la première `m` lignes du résultat après avoir sauté le premier `n` rangée. Le `LIMIT m OFFSET n` la syntaxe est également prise en charge. - -`n` et `m` doivent être des entiers non négatifs. - -Si il n'y a pas un `ORDER BY` clause qui trie explicitement les résultats, le résultat peut être arbitraire et non déterministe. - -### Clause UNION ALL {#union-all-clause} - -Vous pouvez utiliser UNION ALL pour combiner n'importe quel nombre de requêtes. Exemple: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -Seule UNION ALL est prise en charge. L'UNION régulière (Union distincte) n'est pas prise en charge. Si vous avez besoin D'UNION DISTINCT, vous pouvez écrire SELECT DISTINCT à partir d'une sous-requête contenant UNION ALL. - -Les requêtes qui font partie de L'UNION peuvent toutes être exécutées simultanément et leurs résultats peuvent être mélangés. - -La structure des résultats (le nombre et le type de colonnes) doit correspondre aux requêtes. Mais les noms des colonnes peuvent différer. Dans ce cas, les noms de colonne pour le résultat final seront tirés de la première requête. La coulée de Type est effectuée pour les syndicats. Par exemple, si deux requêtes combinées ont le même champ avec non-`Nullable` et `Nullable` types d'un type compatible, la `UNION ALL` a un `Nullable` type de champ. - -Les requêtes qui font partie de UNION ALL ne peuvent pas être placées entre crochets. ORDER BY et LIMIT sont appliqués à des requêtes distinctes, pas au résultat final. Si vous devez appliquer une conversion au résultat final, vous pouvez placer toutes les requêtes avec UNION ALL dans une sous-requête de la clause FROM. - -### Dans OUTFILE Clause {#into-outfile-clause} - -Ajouter l' `INTO OUTFILE filename` clause (où filename est un littéral de chaîne) pour rediriger la sortie de la requête vers le fichier spécifié. -Contrairement à MySQL, le fichier est créé du côté client. La requête échouera si un fichier portant le même nom existe déjà. -Cette fonctionnalité est disponible dans le client de ligne de commande et clickhouse-local (une requête envoyée via L'interface HTTP échouera). - -Le format de sortie par défaut est TabSeparated (le même que dans le mode batch client de ligne de commande). - -### FORMAT de la Clause {#format-clause} - -Spécifier ‘FORMAT format’ pour obtenir des données dans n'importe quel format spécifié. -Vous pouvez l'utiliser pour plus de commodité, ou pour créer des vidages. -Pour plus d'informations, consultez la section “Formats”. -Si la clause FORMAT est omise, le format par défaut est utilisé, ce qui dépend à la fois des paramètres et de l'interface utilisée pour accéder à la base de données. Pour L'interface HTTP et le client de ligne de commande en mode batch, le format par défaut est TabSeparated. Pour le client de ligne de commande en mode interactif, le format par défaut est PrettyCompact (il a des tables attrayantes et compactes). - -Lors de l'utilisation du client de ligne de commande, les données sont transmises au client dans un format efficace interne. Le client interprète indépendamment la clause de FORMAT de la requête et formate les données elles-mêmes (soulageant ainsi le réseau et le serveur de la charge). - -### Dans les opérateurs {#select-in-operators} - -Le `IN`, `NOT IN`, `GLOBAL IN`, et `GLOBAL NOT IN` les opérateurs sont traitées séparément, car leur fonctionnalité est assez riche. - -Le côté gauche de l'opérateur, soit une seule colonne ou un tuple. - -Exemple: - -``` sql -SELECT UserID IN (123, 456) FROM ... -SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... -``` - -Si le côté gauche est une colonne unique qui est dans l'index, et le côté droit est un ensemble de constantes, le système utilise l'index pour le traitement de la requête. - -Don't list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section “External data for query processing”), puis utiliser une sous-requête. - -Le côté droit de l'opérateur peut être un ensemble d'expressions constantes, un ensemble de tuples avec des expressions constantes (illustrées dans les exemples ci-dessus), ou le nom d'une table de base de données ou une sous-requête SELECT entre parenthèses. - -Si le côté droit de l'opérateur est le nom d'une table (par exemple, `UserID IN users`), ceci est équivalent à la sous-requête `UserID IN (SELECT * FROM users)`. Utilisez ceci lorsque vous travaillez avec des données externes envoyées avec la requête. Par exemple, la requête peut être envoyée avec un ensemble d'ID utilisateur chargés dans le ‘users’ table temporaire, qui doit être filtrée. - -Si le côté droit de l'opérateur est un nom de table qui a le moteur Set (un ensemble de données préparé qui est toujours en RAM), l'ensemble de données ne sera pas créé à nouveau pour chaque requête. - -La sous-requête peut spécifier plusieurs colonnes pour filtrer les tuples. -Exemple: - -``` sql -SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ... -``` - -Les colonnes à gauche et à droite de l'opérateur doit avoir le même type. - -L'opérateur IN et la sous-requête peuvent se produire dans n'importe quelle partie de la requête, y compris dans les fonctions d'agrégation et les fonctions lambda. -Exemple: - -``` sql -SELECT - EventDate, - avg(UserID IN - ( - SELECT UserID - FROM test.hits - WHERE EventDate = toDate('2014-03-17') - )) AS ratio -FROM test.hits -GROUP BY EventDate -ORDER BY EventDate ASC -``` - -``` text -┌──EventDate─┬────ratio─┐ -│ 2014-03-17 │ 1 │ -│ 2014-03-18 │ 0.807696 │ -│ 2014-03-19 │ 0.755406 │ -│ 2014-03-20 │ 0.723218 │ -│ 2014-03-21 │ 0.697021 │ -│ 2014-03-22 │ 0.647851 │ -│ 2014-03-23 │ 0.648416 │ -└────────────┴──────────┘ -``` - -Pour chaque jour après le 17 mars, comptez le pourcentage de pages vues par les utilisateurs qui ont visité le site le 17 mars. -Une sous-requête dans la clause est toujours exécuter une seule fois sur un seul serveur. Il n'y a pas de sous-requêtes dépendantes. - -#### Le traitement NULL {#null-processing-1} - -Pendant le traitement de la demande, l'opérateur n'assume que le résultat d'une opération avec [NULL](syntax.md) est toujours égale à `0` indépendamment de savoir si `NULL` est sur le côté droit ou gauche de l'opérateur. `NULL` les valeurs ne sont incluses dans aucun jeu de données, ne correspondent pas entre elles et ne peuvent pas être comparées. - -Voici un exemple avec le `t_null` table: - -``` text -┌─x─┬────y─┐ -│ 1 │ ᴺᵁᴸᴸ │ -│ 2 │ 3 │ -└───┴──────┘ -``` - -L'exécution de la requête `SELECT x FROM t_null WHERE y IN (NULL,3)` vous donne le résultat suivant: - -``` text -┌─x─┐ -│ 2 │ -└───┘ -``` - -Vous pouvez voir que la ligne dans laquelle `y = NULL` est jeté hors de résultats de la requête. C'est parce que ClickHouse ne peut pas décider si `NULL` est inclus dans le `(NULL,3)` ensemble, les retours `0` comme le résultat de l'opération, et `SELECT` exclut cette ligne de la sortie finale. - -``` sql -SELECT y IN (NULL, 3) -FROM t_null -``` - -``` text -┌─in(y, tuple(NULL, 3))─┐ -│ 0 │ -│ 1 │ -└───────────────────────┘ -``` - -#### Sous-Requêtes Distribuées {#select-distributed-subqueries} - -Il y a deux options pour IN-S avec des sous-requêtes (similaires aux jointures): normal `IN` / `JOIN` et `GLOBAL IN` / `GLOBAL JOIN`. Ils diffèrent dans la façon dont ils sont exécutés pour le traitement des requêtes distribuées. - -!!! attention "Attention" - Rappelez-vous que les algorithmes décrits ci-dessous peuvent travailler différemment en fonction de la [paramètre](../operations/settings/settings.md) `distributed_product_mode` paramètre. - -Lors de l'utilisation de l'IN régulier, la requête est envoyée à des serveurs distants, et chacun d'eux exécute les sous-requêtes dans le `IN` ou `JOIN` clause. - -Lors de l'utilisation de `GLOBAL IN` / `GLOBAL JOINs`, d'abord toutes les sous-requêtes sont exécutées pour `GLOBAL IN` / `GLOBAL JOINs`, et les résultats sont recueillis dans des tableaux temporaires. Ensuite, les tables temporaires sont envoyés à chaque serveur distant, où les requêtes sont exécutées à l'aide temporaire de données. - -Pour une requête non distribuée, utilisez `IN` / `JOIN`. - -Soyez prudent lorsque vous utilisez des sous-requêtes dans le `IN` / `JOIN` clauses pour le traitement des requêtes distribuées. - -Regardons quelques exemples. Supposons que chaque serveur du cluster a un **local\_table**. Chaque serveur dispose également d'une **table distributed\_table** table avec le **Distribué** type, qui regarde tous les serveurs du cluster. - -Pour une requête à l' **table distributed\_table**, la requête sera envoyée à tous les serveurs distants et exécutée sur eux en utilisant le **local\_table**. - -Par exemple, la requête - -``` sql -SELECT uniq(UserID) FROM distributed_table -``` - -sera envoyé à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table -``` - -et l'exécuter sur chacun d'eux en parallèle, jusqu'à ce qu'il atteigne le stade où les résultats intermédiaires peuvent être combinés. Ensuite, les résultats intermédiaires seront retournés au demandeur de serveur et de fusion, et le résultat final sera envoyé au client. - -Examinons maintenant une requête avec IN: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -- Calcul de l'intersection des audiences de deux sites. - -Cette requête sera envoyée à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34) -``` - -En d'autres termes, l'ensemble de données de la clause IN sera collecté sur chaque serveur indépendamment, uniquement à travers les données stockées localement sur chacun des serveurs. - -Cela fonctionnera correctement et de manière optimale si vous êtes prêt pour ce cas et que vous avez réparti les données entre les serveurs de cluster de telle sorte que les données d'un seul ID utilisateur résident entièrement sur un seul serveur. Dans ce cas, toutes les données nécessaires seront disponibles localement sur chaque serveur. Sinon, le résultat sera erroné. Nous nous référons à cette variation de la requête que “local IN”. - -Pour corriger le fonctionnement de la requête lorsque les données sont réparties aléatoirement sur les serveurs de cluster, vous pouvez spécifier **table distributed\_table** à l'intérieur d'une sous-requête. La requête ressemblerait à ceci: - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -Cette requête sera envoyée à tous les serveurs distants - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -La sous-requête commencera à s'exécuter sur chaque serveur distant. Étant donné que la sous-requête utilise une table distribuée, la sous-requête qui se trouve sur chaque serveur distant sera renvoyée à chaque serveur distant comme - -``` sql -SELECT UserID FROM local_table WHERE CounterID = 34 -``` - -Par exemple, si vous avez un cluster de 100 SERVEURS, l'exécution de la requête entière nécessitera 10 000 requêtes élémentaires, ce qui est généralement considéré comme inacceptable. - -Dans de tels cas, vous devez toujours utiliser GLOBAL IN au lieu de IN. Voyons comment cela fonctionne pour la requête - -``` sql -SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34) -``` - -Le serveur demandeur exécutera la sous requête - -``` sql -SELECT UserID FROM distributed_table WHERE CounterID = 34 -``` - -et le résultat sera mis dans une table temporaire en RAM. Ensuite, la demande sera envoyée à chaque serveur distant - -``` sql -SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1 -``` - -et la table temporaire `_data1` sera envoyé à chaque serveur distant avec la requête (le nom de la table temporaire est défini par l'implémentation). - -Ceci est plus optimal que d'utiliser la normale dans. Cependant, gardez les points suivants à l'esprit: - -1. Lors de la création d'une table temporaire, les données ne sont pas uniques. Pour réduire le volume de données transmises sur le réseau, spécifiez DISTINCT dans la sous-requête. (Vous n'avez pas besoin de le faire pour un IN normal.) -2. La table temporaire sera envoyé à tous les serveurs distants. La Transmission ne tient pas compte de la topologie du réseau. Par exemple, si 10 serveurs distants résident dans un centre de données très distant par rapport au serveur demandeur, les données seront envoyées 10 fois sur le canal au centre de données distant. Essayez d'éviter les grands ensembles de données lorsque vous utilisez GLOBAL IN. -3. Lors de la transmission de données à des serveurs distants, les restrictions sur la bande passante réseau ne sont pas configurables. Vous pourriez surcharger le réseau. -4. Essayez de distribuer les données entre les serveurs afin que vous n'ayez pas besoin D'utiliser GLOBAL IN sur une base régulière. -5. Si vous devez utiliser GLOBAL in souvent, planifiez l'emplacement du cluster ClickHouse de sorte qu'un seul groupe de répliques ne réside pas dans plus d'un centre de données avec un réseau rapide entre eux, de sorte qu'une requête puisse être traitée entièrement dans un seul centre de données. - -Il est également judicieux de spécifier une table locale dans le `GLOBAL IN` clause, dans le cas où cette table locale est uniquement disponible sur le serveur demandeur et que vous souhaitez utiliser les données de celui-ci sur des serveurs distants. - -### Les Valeurs Extrêmes {#extreme-values} - -En plus des résultats, vous pouvez également obtenir des valeurs minimales et maximales pour les colonnes de résultats. Pour ce faire, définissez la **extrême** réglage sur 1. Les Minimums et les maximums sont calculés pour les types numériques, les dates et les dates avec des heures. Pour les autres colonnes, les valeurs par défaut sont sorties. - -An extra two rows are calculated – the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, et `Pretty*` [format](../interfaces/formats.md), séparés des autres lignes. Ils ne sont pas Produits pour d'autres formats. - -Dans `JSON*` formats, les valeurs extrêmes sont sorties dans un ‘extremes’ champ. Dans `TabSeparated*` formats, la ligne vient après le résultat principal, et après ‘totals’ si elle est présente. Elle est précédée par une ligne vide (après les autres données). Dans `Pretty*` formats, la ligne est sortie comme une table séparée après le résultat principal, et après `totals` si elle est présente. - -Les valeurs extrêmes sont calculées pour les lignes avant `LIMIT` mais après `LIMIT BY`. Cependant, lors de l'utilisation de `LIMIT offset, size`, les lignes avant de les `offset` sont inclus dans `extremes`. Dans les requêtes de flux, le résultat peut également inclure un petit nombre de lignes qui ont traversé `LIMIT`. - -### Note {#notes} - -Le `GROUP BY` et `ORDER BY` les clauses ne supportent pas les arguments positionnels. Cela contredit MySQL, mais est conforme à SQL standard. -Exemple, `GROUP BY 1, 2` will be interpreted as grouping by constants (i.e. aggregation of all rows into one). - -Vous pouvez utiliser des synonymes (`AS` alias) dans n'importe quelle partie d'une requête. - -Vous pouvez mettre un astérisque dans quelque partie de la requête au lieu d'une expression. Lorsque la requête est analysée, l'astérisque est étendu à une liste de toutes les colonnes `MATERIALIZED` et `ALIAS` colonne). Il n'y a que quelques cas où l'utilisation d'un astérisque est justifiée: - -- Lors de la création d'un vidage de table. -- Pour les tables contenant seulement quelques colonnes, comme les tables système. -- Pour obtenir des informations sur ce que sont les colonnes dans une table. Dans ce cas, la valeur `LIMIT 1`. Mais il est préférable d'utiliser la `DESC TABLE` requête. -- Quand il y a une forte filtration sur un petit nombre de colonnes en utilisant `PREWHERE`. -- Dans les sous-requêtes (puisque les colonnes qui ne sont pas nécessaires pour la requête externe sont exclues des sous-requêtes). - -Dans tous les autres cas, nous ne recommandons pas d'utiliser l'astérisque, car il ne vous donne que les inconvénients d'un SGBD colonnaire au lieu des avantages. En d'autres termes, l'utilisation de l'astérisque n'est pas recommandée. - -[Article Original](https://clickhouse.tech/docs/en/query_language/select/) diff --git a/docs/fr/query_language/table_functions/index.md b/docs/fr/query_language/table_functions/index.md deleted file mode 100644 index fc227e2c18a..00000000000 --- a/docs/fr/query_language/table_functions/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -machine_translated: true ---- - -# Les Fonctions De Table {#table-functions} - -Les fonctions de Table sont des méthodes pour construire des tables. - -Vous pouvez utiliser les fonctions de table dans: - -- [FROM](../select.md#select-from) la clause de la `SELECT` requête. - - The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. - -- [Créer une TABLE en tant que \< table\_function ()\>](../create.md#create-table-query) requête. - - It's one of the methods of creating a table. - -!!! warning "Avertissement" - Vous ne pouvez pas utiliser les fonctions de table si [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) paramètre est désactivé. - -| Fonction | Description | -|-----------------------|--------------------------------------------------------------------------------------------------------------------------------| -| [fichier](file.md) | Crée un [Fichier](../../operations/table_engines/file.md)-moteur de table. | -| [fusionner](merge.md) | Crée un [Fusionner](../../operations/table_engines/merge.md)-moteur de table. | -| [nombre](numbers.md) | Crée une table avec une seule colonne remplie de nombres entiers. | -| [distant](remote.md) | Vous permet d'accéder à des serveurs distants sans [Distribué](../../operations/table_engines/distributed.md)-moteur de table. | -| [URL](url.md) | Crée un [URL](../../operations/table_engines/url.md)-moteur de table. | -| [mysql](mysql.md) | Crée un [MySQL](../../operations/table_engines/mysql.md)-moteur de table. | -| [jdbc](jdbc.md) | Crée un [JDBC](../../operations/table_engines/jdbc.md)-moteur de table. | -| [ODBC](odbc.md) | Crée un [ODBC](../../operations/table_engines/odbc.md)-moteur de table. | -| [hdfs](hdfs.md) | Crée un [HDFS](../../operations/table_engines/hdfs.md)-moteur de table. | - -[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/fr/query_language/agg_functions/combinators.md b/docs/fr/sql_reference/aggregate_functions/combinators.md similarity index 93% rename from docs/fr/query_language/agg_functions/combinators.md rename to docs/fr/sql_reference/aggregate_functions/combinators.md index bbf28b12c96..7dce4a46de3 100644 --- a/docs/fr/query_language/agg_functions/combinators.md +++ b/docs/fr/sql_reference/aggregate_functions/combinators.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: "Combinateurs de fonction d'agr\xE9gat" --- -# Combinateurs de fonction d'agrégat {#aggregate_functions_combinators} +# Combinateurs De Fonction D'Agrégat {#aggregate_functions_combinators} Le nom d'une fonction d'agrégat peut avoir un suffixe ajouté. Cela change la façon dont la fonction d'agrégation fonctionne. @@ -30,9 +33,9 @@ Si vous appliquez ce combinateur, la fonction d'agrégation ne renvoie pas la va Pour travailler avec ces états, utilisez: -- [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) tableau moteur. -- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) fonction. -- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) fonction. +- [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) tableau moteur. +- [finalizeAggregation](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) fonction. +- [runningAccumulate](../../sql_reference/functions/other_functions.md#function-runningaccumulate) fonction. - [-Fusionner](#aggregate_functions_combinators_merge) combinator. - [-MergeState](#aggregate_functions_combinators_mergestate) combinator. diff --git a/docs/fr/query_language/agg_functions/index.md b/docs/fr/sql_reference/aggregate_functions/index.md similarity index 92% rename from docs/fr/query_language/agg_functions/index.md rename to docs/fr/sql_reference/aggregate_functions/index.md index c8d24c2c241..4be1632a29e 100644 --- a/docs/fr/query_language/agg_functions/index.md +++ b/docs/fr/sql_reference/aggregate_functions/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Aggregate Functions +toc_priority: 33 +toc_title: Introduction --- # Les fonctions d'agrégation {#aggregate-functions} diff --git a/docs/fr/query_language/agg_functions/parametric_functions.md b/docs/fr/sql_reference/aggregate_functions/parametric_functions.md similarity index 94% rename from docs/fr/query_language/agg_functions/parametric_functions.md rename to docs/fr/sql_reference/aggregate_functions/parametric_functions.md index 51a2c755cdc..7611c04b0de 100644 --- a/docs/fr/query_language/agg_functions/parametric_functions.md +++ b/docs/fr/sql_reference/aggregate_functions/parametric_functions.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: "Fonctions d'agr\xE9gat param\xE9triques" --- -# Fonctions d'agrégat paramétriques {#aggregate_functions_parametric} +# Fonctions D'Agrégat Paramétriques {#aggregate_functions_parametric} Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. @@ -23,7 +26,7 @@ Les fonctions utilise [Un Algorithme D'Arbre De Décision Parallèle En Continu] **Valeurs renvoyées** -- [Tableau](../../data_types/array.md) de [Tuple](../../data_types/tuple.md) de le format suivant: +- [Tableau](../../sql_reference/data_types/array.md) de [Tuple](../../sql_reference/data_types/tuple.md) de le format suivant: ``` [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] @@ -50,7 +53,7 @@ FROM ( └─────────────────────────────────────────────────────────────────────────┘ ``` -Vous pouvez visualiser un histogramme avec la [bar](../functions/other_functions.md#function-bar) fonction, par exemple: +Vous pouvez visualiser un histogramme avec la [bar](../../sql_reference/functions/other_functions.md#function-bar) fonction, par exemple: ``` sql WITH histogram(5)(rand() % 100) AS hist @@ -92,7 +95,7 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...) - `pattern` — Pattern string. See [Syntaxe du motif](#sequence-function-pattern-syntax). -- `timestamp` — Column considered to contain time data. Typical data types are `Date` et `DateTime`. Vous pouvez également utiliser les prises en charge [UInt](../../data_types/int_uint.md) types de données. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` et `DateTime`. Vous pouvez également utiliser les prises en charge [UInt](../../sql_reference/data_types/int_uint.md) types de données. - `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Vous pouvez passer jusqu'à 32 arguments de condition. La fonction ne prend en compte que les événements décrits dans ces conditions. Si la séquence contient des données qui ne sont pas décrites dans une condition, la fonction les ignore. @@ -179,7 +182,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...) - `pattern` — Pattern string. See [Syntaxe du motif](#sequence-function-pattern-syntax). -- `timestamp` — Column considered to contain time data. Typical data types are `Date` et `DateTime`. Vous pouvez également utiliser les prises en charge [UInt](../../data_types/int_uint.md) types de données. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` et `DateTime`. Vous pouvez également utiliser les prises en charge [UInt](../../sql_reference/data_types/int_uint.md) types de données. - `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. Vous pouvez passer jusqu'à 32 arguments de condition. La fonction ne prend en compte que les événements décrits dans ces conditions. Si la séquence contient des données qui ne sont pas décrites dans une condition, la fonction les ignore. @@ -243,8 +246,8 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) - `window` — Length of the sliding window in seconds. - `mode` - C'est un argument facultatif. - `'strict'` - Lorsque le `'strict'` est défini, le windowFunnel() applique des conditions uniquement pour les valeurs uniques. -- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md#data_type-datetime) et d'autres types entiers non signés (notez que même si timestamp prend en charge le `UInt64` type, sa valeur ne peut pas dépasser le maximum Int64, qui est 2^63 - 1). -- `cond` — Conditions or data describing the chain of events. [UInt8](../../data_types/int_uint.md). +- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql_reference/data_types/date.md), [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) et d'autres types entiers non signés (notez que même si timestamp prend en charge le `UInt64` type, sa valeur ne peut pas dépasser le maximum Int64, qui est 2^63 - 1). +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). **Valeur renvoyée** @@ -313,7 +316,7 @@ Résultat: ## rétention {#retention} La fonction prend comme arguments un ensemble de conditions de 1 à 32 arguments de type `UInt8` qui indiquent si une certaine condition est remplie pour l'événement. -Toute condition peut être spécifiée comme argument (comme dans [WHERE](../../query_language/select.md#select-where)). +Toute condition peut être spécifiée comme argument (comme dans [WHERE](../../sql_reference/statements/select.md#select-where)). Les conditions, à l'exception de la première, s'appliquent par paires: le résultat de la seconde sera vrai si la première et la deuxième sont remplies, le troisième si la première et la fird sont vraies, etc. diff --git a/docs/fr/query_language/agg_functions/reference.md b/docs/fr/sql_reference/aggregate_functions/reference.md similarity index 92% rename from docs/fr/query_language/agg_functions/reference.md rename to docs/fr/sql_reference/aggregate_functions/reference.md index 347b68dd3f4..6a525bc37f6 100644 --- a/docs/fr/query_language/agg_functions/reference.md +++ b/docs/fr/sql_reference/aggregate_functions/reference.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: "R\xE9f\xE9rence" --- # La Fonction De Référence {#function-reference} @@ -22,9 +25,9 @@ La fonction peut prendre: **Valeur renvoyée** - Si la fonction est appelée sans paramètres, il compte le nombre de lignes. -- Si l' [expression](../syntax.md#syntax-expressions) est passé, alors la fonction compte combien de fois cette expression retournée not null. Si l'expression renvoie un [Nullable](../../data_types/nullable.md)- tapez la valeur, puis le résultat de `count` séjours pas `Nullable`. La fonction renvoie 0 si l'expression est retournée `NULL` pour toutes les lignes. +- Si l' [expression](../syntax.md#syntax-expressions) est passé, alors la fonction compte combien de fois cette expression retournée not null. Si l'expression renvoie un [Nullable](../../sql_reference/data_types/nullable.md)- tapez la valeur, puis le résultat de `count` séjours pas `Nullable`. La fonction renvoie 0 si l'expression est retournée `NULL` pour toutes les lignes. -Dans les deux cas le type de la valeur renvoyée est [UInt64](../../data_types/int_uint.md). +Dans les deux cas le type de la valeur renvoyée est [UInt64](../../sql_reference/data_types/int_uint.md). **Détail** @@ -243,7 +246,7 @@ binary decimal ## groupBitmap {#groupbitmap} -Calculs Bitmap ou agrégés à partir d'une colonne entière non signée, retour cardinalité de type UInt64, si Ajouter suffixe-State, puis retour [objet bitmap](../functions/bitmap_functions.md). +Calculs Bitmap ou agrégés à partir d'une colonne entière non signée, retour cardinalité de type UInt64, si Ajouter suffixe-State, puis retour [objet bitmap](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmap(expr) @@ -379,7 +382,7 @@ skewPop(expr) **Valeur renvoyée** -The skewness of the given distribution. Type — [Float64](../../data_types/float.md) +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) **Exemple** @@ -403,7 +406,7 @@ skewSamp(expr) **Valeur renvoyée** -The skewness of the given distribution. Type — [Float64](../../data_types/float.md). Si `n <= 1` (`n` est la taille de l'échantillon), alors la fonction renvoie `nan`. +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Si `n <= 1` (`n` est la taille de l'échantillon), alors la fonction renvoie `nan`. **Exemple** @@ -425,7 +428,7 @@ kurtPop(expr) **Valeur renvoyée** -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md) +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) **Exemple** @@ -449,7 +452,7 @@ kurtSamp(expr) **Valeur renvoyée** -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md). Si `n <= 1` (`n` la taille de l'échantillon), alors la fonction renvoie `nan`. +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). Si `n <= 1` (`n` la taille de l'échantillon), alors la fonction renvoie `nan`. **Exemple** @@ -540,7 +543,7 @@ La fonction prend un nombre variable de paramètres. Les paramètres peuvent êt **Valeur renvoyée** -- A [UInt64](../../data_types/int_uint.md)numéro de type. +- A [UInt64](../../sql_reference/data_types/int_uint.md)numéro de type. **Détails de mise en œuvre** @@ -581,7 +584,7 @@ La fonction prend un nombre variable de paramètres. Les paramètres peuvent êt **Valeur renvoyée** -- Nombre [UInt64](../../data_types/int_uint.md)numéro de type. +- Nombre [UInt64](../../sql_reference/data_types/int_uint.md)numéro de type. **Détails de mise en œuvre** @@ -629,7 +632,7 @@ La fonction prend un nombre variable de paramètres. Les paramètres peuvent êt **Valeur renvoyée** -- A [UInt64](../../data_types/int_uint.md)numéro de type. +- A [UInt64](../../sql_reference/data_types/int_uint.md)numéro de type. **Détails de mise en œuvre** @@ -871,7 +874,7 @@ Alias: `median`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). **Valeur renvoyée** @@ -879,9 +882,9 @@ Alias: `median`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -934,7 +937,7 @@ Alias: `medianDeterministic`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). - `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. **Valeur renvoyée** @@ -943,9 +946,9 @@ Alias: `medianDeterministic`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -998,7 +1001,7 @@ Alias: `medianExact`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). **Valeur renvoyée** @@ -1006,9 +1009,9 @@ Alias: `medianExact`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -1050,7 +1053,7 @@ Alias: `medianExactWeighted`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). - `weight` — Column with weights of sequence members. Weight is a number of value occurrences. **Valeur renvoyée** @@ -1059,9 +1062,9 @@ Alias: `medianExactWeighted`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -1115,7 +1118,7 @@ Alias: `medianTiming`. - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — [Expression](../syntax.md#syntax-expressions) sur une colonne Valeurs renvoyant un [Flottant\*](../../data_types/float.md)numéro de type. +- `expr` — [Expression](../syntax.md#syntax-expressions) sur une colonne Valeurs renvoyant un [Flottant\*](../../sql_reference/data_types/float.md)numéro de type. - If negative values are passed to the function, the behavior is undefined. - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. @@ -1139,7 +1142,7 @@ Sinon, le résultat du calcul est arrondi au plus proche multiple de 16 ms. Type: `Float32`. !!! note "Note" - Si aucune valeur n'est transmise à la fonction (lors de l'utilisation de `quantileTimingIf`), [Nan](../../data_types/float.md#data_type-float-nan-inf) est retourné. Le but est de différencier ces cas de cas qui aboutissent à zéro. Voir [Clause ORDER BY](../select.md#select-order-by) pour des notes sur le tri `NaN` valeur. + Si aucune valeur n'est transmise à la fonction (lors de l'utilisation de `quantileTimingIf`), [Nan](../../sql_reference/data_types/float.md#data_type-float-nan-inf) est retourné. Le but est de différencier ces cas de cas qui aboutissent à zéro. Voir [Clause ORDER BY](../statements/select.md#select-order-by) pour des notes sur le tri `NaN` valeur. **Exemple** @@ -1198,7 +1201,7 @@ Alias: `medianTimingWeighted`. - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — [Expression](../syntax.md#syntax-expressions) sur une colonne Valeurs renvoyant un [Flottant\*](../../data_types/float.md)numéro de type. +- `expr` — [Expression](../syntax.md#syntax-expressions) sur une colonne Valeurs renvoyant un [Flottant\*](../../sql_reference/data_types/float.md)numéro de type. - If negative values are passed to the function, the behavior is undefined. - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. @@ -1224,7 +1227,7 @@ Sinon, le résultat du calcul est arrondi au plus proche multiple de 16 ms. Type: `Float32`. !!! note "Note" - Si aucune valeur n'est transmise à la fonction (lors de l'utilisation de `quantileTimingIf`), [Nan](../../data_types/float.md#data_type-float-nan-inf) est retourné. Le but est de différencier ces cas de cas qui aboutissent à zéro. Voir [Clause ORDER BY](../select.md#select-order-by) pour des notes sur le tri `NaN` valeur. + Si aucune valeur n'est transmise à la fonction (lors de l'utilisation de `quantileTimingIf`), [Nan](../../sql_reference/data_types/float.md#data_type-float-nan-inf) est retourné. Le but est de différencier ces cas de cas qui aboutissent à zéro. Voir [Clause ORDER BY](../statements/select.md#select-order-by) pour des notes sur le tri `NaN` valeur. **Exemple** @@ -1281,7 +1284,7 @@ Alias: `medianTDigest`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). **Valeur renvoyée** @@ -1289,9 +1292,9 @@ Alias: `medianTDigest`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -1335,7 +1338,7 @@ Alias: `medianTDigest`. **Paramètre** - `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` la valeur dans la plage de `[0.01, 0.99]`. Valeur par défaut: 0.5. À `level=0.5` la fonction calcule [médian](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [types de données](../../data_types/index.md#data_types), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `expr` — Expression over the column values resulting in numeric [types de données](../../sql_reference/data_types/index.md#data_types), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). - `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. **Valeur renvoyée** @@ -1344,9 +1347,9 @@ Alias: `medianTDigest`. Type: -- [Float64](../../data_types/float.md) pour l'entrée de type de données numériques. -- [Date](../../data_types/date.md) si les valeurs d'entrée ont le `Date` type. -- [DateTime](../../data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. +- [Float64](../../sql_reference/data_types/float.md) pour l'entrée de type de données numériques. +- [Date](../../sql_reference/data_types/date.md) si les valeurs d'entrée ont le `Date` type. +- [DateTime](../../sql_reference/data_types/datetime.md) si les valeurs d'entrée ont le `DateTime` type. **Exemple** @@ -1493,7 +1496,7 @@ topKWeighted(N)(x, weight) **Argument** - `x` – The value. -- `weight` — The weight. [UInt8](../../data_types/int_uint.md). +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). **Valeur renvoyée** @@ -1704,7 +1707,7 @@ stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') ## groupBitmapAnd {#groupbitmapand} -Calculs le et d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../functions/bitmap_functions.md). +Calculs le et d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmapAnd(expr) @@ -1747,7 +1750,7 @@ SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_ ## groupBitmapOr {#groupbitmapor} -Calculs le ou d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../functions/bitmap_functions.md). C'est l'équivalent de `groupBitmapMerge`. +Calculs le ou d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../../sql_reference/functions/bitmap_functions.md). C'est l'équivalent de `groupBitmapMerge`. ``` sql groupBitmapOr(expr) @@ -1790,7 +1793,7 @@ SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_t ## groupBitmapXor {#groupbitmapxor} -Calculs le XOR d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../functions/bitmap_functions.md). +Calculs le XOR d'une colonne bitmap, retour cardinalité de type UInt64, si Ajouter suffixe-État, puis retour [objet bitmap](../../sql_reference/functions/bitmap_functions.md). ``` sql groupBitmapOr(expr) diff --git a/docs/fr/data_types/nested_data_structures/aggregatefunction.md b/docs/fr/sql_reference/data_types/aggregatefunction.md similarity index 70% rename from docs/fr/data_types/nested_data_structures/aggregatefunction.md rename to docs/fr/sql_reference/data_types/aggregatefunction.md index ba4d1e9cd73..99decd0ccd3 100644 --- a/docs/fr/data_types/nested_data_structures/aggregatefunction.md +++ b/docs/fr/sql_reference/data_types/aggregatefunction.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 52 +toc_title: AggregateFunction (nom, types_of_arguments...) --- # AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} -Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [une vue matérialisée](../../query_language/select.md#create-view). La manière courante de produire un État de fonction d'agrégat est d'appeler la fonction d'agrégat avec le `-State` suffixe. Pour obtenir le résultat final de l'agrégation dans l'avenir, vous devez utiliser la même fonction d'agrégation avec la `-Merge`suffixe. +Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [une vue matérialisée](../../sql_reference/statements/select.md#create-view). La manière courante de produire un État de fonction d'agrégat est d'appeler la fonction d'agrégat avec le `-State` suffixe. Pour obtenir le résultat final de l'agrégation dans l'avenir, vous devez utiliser la même fonction d'agrégation avec la `-Merge`suffixe. `AggregateFunction` — parametric data type. @@ -27,7 +30,7 @@ CREATE TABLE t ) ENGINE = ... ``` -[uniq](../../query_language/agg_functions/reference.md#agg_function-uniq), anyIf ([tout](../../query_language/agg_functions/reference.md#agg_function-any)+[Si](../../query_language/agg_functions/combinators.md#agg-functions-combinator-if)) et [les quantiles](../../query_language/agg_functions/reference.md) les fonctions d'agrégation sont-elles prises en charge dans ClickHouse. +[uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq), anyIf ([tout](../../sql_reference/aggregate_functions/reference.md#agg_function-any)+[Si](../../sql_reference/aggregate_functions/combinators.md#agg-functions-combinator-if)) et [les quantiles](../../sql_reference/aggregate_functions/reference.md) les fonctions d'agrégation sont-elles prises en charge dans ClickHouse. ## Utilisation {#usage} @@ -62,6 +65,6 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP ## Exemple D'Utilisation {#usage-example} -Voir [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) Description du moteur. +Voir [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) Description du moteur. [Article Original](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/fr/data_types/array.md b/docs/fr/sql_reference/data_types/array.md similarity index 85% rename from docs/fr/data_types/array.md rename to docs/fr/sql_reference/data_types/array.md index f20abaabc49..80c0b7c0bf9 100644 --- a/docs/fr/data_types/array.md +++ b/docs/fr/sql_reference/data_types/array.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 51 +toc_title: Array(T) --- -# Array(T) {#data-type-array} +# Array(t) {#data-type-array} Un tableau de `T`les éléments de type. `T` peut être n'importe quel type de données, y compris un tableau. -## La création d'un tableau {#creating-an-array} +## La création d'un Tableau {#creating-an-array} Vous pouvez utiliser une fonction pour créer un tableau: @@ -42,9 +45,9 @@ SELECT [1, 2] AS x, toTypeName(x) └───────┴────────────────────┘ ``` -## Utilisation de Types de données {#working-with-data-types} +## Utilisation De Types De Données {#working-with-data-types} -Lors de la création d'un tableau à la volée, ClickHouse définit automatiquement le type d'argument comme le type de données le plus étroit pouvant stocker tous les arguments listés. S'il y a des [Nullable](nullable.md#data_type-nullable) ou littéral [NULL](../query_language/syntax.md#null-literal) les valeurs, le type d'un élément de tableau devient également [Nullable](nullable.md). +Lors de la création d'un tableau à la volée, ClickHouse définit automatiquement le type d'argument comme le type de données le plus étroit pouvant stocker tous les arguments listés. S'il y a des [Nullable](nullable.md#data_type-nullable) ou littéral [NULL](../../sql_reference/syntax.md#null-literal) les valeurs, le type d'un élément de tableau devient également [Nullable](nullable.md). Si ClickHouse n'a pas pu déterminer le type de données, il génère une exception. Par exemple, cela se produit lorsque vous essayez de créer un tableau avec des chaînes et des nombres simultanément (`SELECT array(1, 'a')`). diff --git a/docs/fr/data_types/boolean.md b/docs/fr/sql_reference/data_types/boolean.md similarity index 72% rename from docs/fr/data_types/boolean.md rename to docs/fr/sql_reference/data_types/boolean.md index 693709cda9f..54c1311c375 100644 --- a/docs/fr/data_types/boolean.md +++ b/docs/fr/sql_reference/data_types/boolean.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 43 +toc_title: "Bool\xE9en" --- # Les Valeurs Booléennes {#boolean-values} diff --git a/docs/fr/data_types/date.md b/docs/fr/sql_reference/data_types/date.md similarity index 85% rename from docs/fr/data_types/date.md rename to docs/fr/sql_reference/data_types/date.md index 812b0682cd2..ece3bc85c99 100644 --- a/docs/fr/data_types/date.md +++ b/docs/fr/sql_reference/data_types/date.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 47 +toc_title: Date --- # Date {#date} diff --git a/docs/fr/data_types/datetime.md b/docs/fr/sql_reference/data_types/datetime.md similarity index 77% rename from docs/fr/data_types/datetime.md rename to docs/fr/sql_reference/data_types/datetime.md index 797364b3f51..d66a4276d66 100644 --- a/docs/fr/data_types/datetime.md +++ b/docs/fr/sql_reference/data_types/datetime.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 48 +toc_title: DateTime --- -# DateTime {#data_type-datetime} +# Datetime {#data_type-datetime} Permet de stocker un instant dans le temps, qui peut être exprimé comme une date de calendrier et une heure d'une journée. @@ -22,13 +25,13 @@ Le point dans le temps est enregistré en tant que [Le timestamp Unix](https://e Une liste des fuseaux horaires pris en charge peut être trouvée dans le [Base de données de fuseau horaire IANA](https://www.iana.org/time-zones). Le `tzdata` paquet, contenant [Base de données de fuseau horaire IANA](https://www.iana.org/time-zones), doit être installé dans le système. L'utilisation de la `timedatectl list-timezones` commande pour lister les fuseaux horaires connus par un système local. -Vous pouvez définir explicitement un fuseau horaire `DateTime`- tapez des colonnes lors de la création d'une table. Si le fuseau horaire n'est pas défini, ClickHouse utilise la valeur [fuseau](../operations/server_settings/settings.md#server_settings-timezone) paramètre dans les paramètres du serveur ou les paramètres du système d'exploitation au moment du démarrage du serveur ClickHouse. +Vous pouvez définir explicitement un fuseau horaire `DateTime`- tapez des colonnes lors de la création d'une table. Si le fuseau horaire n'est pas défini, ClickHouse utilise la valeur [fuseau](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) paramètre dans les paramètres du serveur ou les paramètres du système d'exploitation au moment du démarrage du serveur ClickHouse. -Le [clickhouse-client](../interfaces/cli.md) applique le fuseau horaire du serveur par défaut si un fuseau horaire n'est pas explicitement défini lors de l'initialisation du type de données. Pour utiliser le fuseau horaire du client, exécutez `clickhouse-client` avec l' `--use_client_time_zone` paramètre. +Le [clickhouse-client](../../interfaces/cli.md) applique le fuseau horaire du serveur par défaut si un fuseau horaire n'est pas explicitement défini lors de l'initialisation du type de données. Pour utiliser le fuseau horaire du client, exécutez `clickhouse-client` avec l' `--use_client_time_zone` paramètre. -Clickhouse affiche les valeurs dans `YYYY-MM-DD hh:mm:ss` format de texte par défaut. Vous pouvez modifier la sortie avec le [formatDateTime](../query_language/functions/date_time_functions.md#formatdatetime) fonction. +Clickhouse affiche les valeurs dans `YYYY-MM-DD hh:mm:ss` format de texte par défaut. Vous pouvez modifier la sortie avec le [formatDateTime](../../sql_reference/functions/date_time_functions.md#formatdatetime) fonction. -Lorsque vous insérez des données dans ClickHouse, vous pouvez utiliser différents formats de chaînes de date et d'heure, en fonction de la valeur du [date\_time\_input\_format](../operations/settings/settings.md#settings-date_time_input_format) paramètre. +Lorsque vous insérez des données dans ClickHouse, vous pouvez utiliser différents formats de chaînes de date et d'heure, en fonction de la valeur du [date\_time\_input\_format](../../operations/settings/settings.md#settings-date_time_input_format) paramètre. ## Exemple {#examples} @@ -115,12 +118,12 @@ FROM dt ## Voir Aussi {#see-also} -- [Fonctions de conversion de Type](../query_language/functions/type_conversion_functions.md) -- [Fonctions pour travailler avec des dates et des heures](../query_language/functions/date_time_functions.md) -- [Fonctions pour travailler avec des tableaux](../query_language/functions/array_functions.md) -- [Le `date_time_input_format` paramètre](../operations/settings/settings.md#settings-date_time_input_format) -- [Le `timezone` paramètre de configuration du serveur](../operations/server_settings/settings.md#server_settings-timezone) -- [Opérateurs pour travailler avec des dates et des heures](../query_language/operators.md#operators-datetime) +- [Fonctions de conversion de Type](../../sql_reference/functions/type_conversion_functions.md) +- [Fonctions pour travailler avec des dates et des heures](../../sql_reference/functions/date_time_functions.md) +- [Fonctions pour travailler avec des tableaux](../../sql_reference/functions/array_functions.md) +- [Le `date_time_input_format` paramètre](../../operations/settings/settings.md#settings-date_time_input_format) +- [Le `timezone` paramètre de configuration du serveur](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Opérateurs pour travailler avec des dates et des heures](../../sql_reference/operators.md#operators-datetime) - [Le `Date` type de données](date.md) [Article Original](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/fr/data_types/datetime64.md b/docs/fr/sql_reference/data_types/datetime64.md similarity index 85% rename from docs/fr/data_types/datetime64.md rename to docs/fr/sql_reference/data_types/datetime64.md index 3b9acff1731..f7ef13d7014 100644 --- a/docs/fr/data_types/datetime64.md +++ b/docs/fr/sql_reference/data_types/datetime64.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 49 +toc_title: DateTime64 --- -# DateTime64 {#data_type-datetime64} +# Datetime64 {#data_type-datetime64} Permet de stocker un instant dans le temps, qui peut être exprimé comme une date de calendrier et une heure d'un jour, avec une précision de sous-seconde définie @@ -91,11 +94,11 @@ FROM dt ## Voir Aussi {#see-also} -- [Fonctions de conversion de Type](../query_language/functions/type_conversion_functions.md) -- [Fonctions pour travailler avec des dates et des heures](../query_language/functions/date_time_functions.md) -- [Fonctions pour travailler avec des tableaux](../query_language/functions/array_functions.md) -- [Le `date_time_input_format` paramètre](../operations/settings/settings.md#settings-date_time_input_format) -- [Le `timezone` paramètre de configuration du serveur](../operations/server_settings/settings.md#server_settings-timezone) -- [Opérateurs pour travailler avec des dates et des heures](../query_language/operators.md#operators-datetime) +- [Fonctions de conversion de Type](../../sql_reference/functions/type_conversion_functions.md) +- [Fonctions pour travailler avec des dates et des heures](../../sql_reference/functions/date_time_functions.md) +- [Fonctions pour travailler avec des tableaux](../../sql_reference/functions/array_functions.md) +- [Le `date_time_input_format` paramètre](../../operations/settings/settings.md#settings-date_time_input_format) +- [Le `timezone` paramètre de configuration du serveur](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [Opérateurs pour travailler avec des dates et des heures](../../sql_reference/operators.md#operators-datetime) - [`Date` type de données](date.md) - [`DateTime` type de données](datetime.md) diff --git a/docs/fr/data_types/decimal.md b/docs/fr/sql_reference/data_types/decimal.md similarity index 97% rename from docs/fr/data_types/decimal.md rename to docs/fr/sql_reference/data_types/decimal.md index 902df0d9519..74a0ecfbe64 100644 --- a/docs/fr/data_types/decimal.md +++ b/docs/fr/sql_reference/data_types/decimal.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 42 +toc_title: "D\xE9cimal" --- # Décimal (P, S), Décimal32 (S), Décimal64 (S), Décimal128 (S) {#decimalp-s-decimal32s-decimal64s-decimal128s} diff --git a/docs/fr/sql_reference/data_types/domains/index.md b/docs/fr/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..cc3206921b9 --- /dev/null +++ b/docs/fr/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Domains +toc_priority: 56 +--- + + diff --git a/docs/fr/data_types/domains/ipv4.md b/docs/fr/sql_reference/data_types/domains/ipv4.md similarity index 97% rename from docs/fr/data_types/domains/ipv4.md rename to docs/fr/sql_reference/data_types/domains/ipv4.md index bba8fcaf1e1..391c06bf66f 100644 --- a/docs/fr/data_types/domains/ipv4.md +++ b/docs/fr/sql_reference/data_types/domains/ipv4.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 59 +toc_title: IPv4 --- ## IPv4 {#ipv4} diff --git a/docs/fr/data_types/domains/ipv6.md b/docs/fr/sql_reference/data_types/domains/ipv6.md similarity index 97% rename from docs/fr/data_types/domains/ipv6.md rename to docs/fr/sql_reference/data_types/domains/ipv6.md index 52cc1c92536..0946ac38ed3 100644 --- a/docs/fr/data_types/domains/ipv6.md +++ b/docs/fr/sql_reference/data_types/domains/ipv6.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 60 +toc_title: IPv6 --- ## IPv6 {#ipv6} diff --git a/docs/fr/data_types/domains/overview.md b/docs/fr/sql_reference/data_types/domains/overview.md similarity index 93% rename from docs/fr/data_types/domains/overview.md rename to docs/fr/sql_reference/data_types/domains/overview.md index c7341b2252f..0f977f79a4e 100644 --- a/docs/fr/data_types/domains/overview.md +++ b/docs/fr/sql_reference/data_types/domains/overview.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 58 +toc_title: "Aper\xE7u" --- # Domaine {#domains} diff --git a/docs/fr/data_types/enum.md b/docs/fr/sql_reference/data_types/enum.md similarity index 97% rename from docs/fr/data_types/enum.md rename to docs/fr/sql_reference/data_types/enum.md index bb1e43ea15f..dcfd6266749 100644 --- a/docs/fr/data_types/enum.md +++ b/docs/fr/sql_reference/data_types/enum.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 50 +toc_title: Enum --- # Enum {#enum} @@ -15,7 +18,7 @@ Supports ClickHouse: Clickhouse choisit automatiquement le type de `Enum` lorsque les données sont insérées. Vous pouvez également utiliser `Enum8` ou `Enum16` types pour être sûr de la taille de stockage. -## Exemples d'utilisation {#usage-examples} +## Exemples D'Utilisation {#usage-examples} Ici, nous créons une table avec une `Enum8('hello' = 1, 'world' = 2)` type de colonne: @@ -90,7 +93,7 @@ SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) Chacune des valeurs se voit attribuer un nombre dans la plage `-128 ... 127` pour `Enum8` ou dans la gamme `-32768 ... 32767` pour `Enum16`. Toutes les chaînes et les nombres doivent être différents. Une chaîne vide est autorisé. Si ce type est spécifié (dans une définition de table), les nombres peuvent être dans un ordre arbitraire. Toutefois, l'ordre n'a pas d'importance. -Ni la chaîne ni la valeur numérique dans un `Enum` peut être [NULL](../query_language/syntax.md). +Ni la chaîne ni la valeur numérique dans un `Enum` peut être [NULL](../../sql_reference/syntax.md). Un `Enum` peut être contenue dans [Nullable](nullable.md) type. Donc, si vous créez une table en utilisant la requête diff --git a/docs/fr/data_types/fixedstring.md b/docs/fr/sql_reference/data_types/fixedstring.md similarity index 83% rename from docs/fr/data_types/fixedstring.md rename to docs/fr/sql_reference/data_types/fixedstring.md index 0bb91baf681..2b31f461099 100644 --- a/docs/fr/data_types/fixedstring.md +++ b/docs/fr/sql_reference/data_types/fixedstring.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: FixedString (N) --- -# FixedString {#fixedstring} +# Fixedstring {#fixedstring} Une chaîne de longueur fixe de `N` octets (ni caractères ni points de code). @@ -55,6 +58,6 @@ WHERE a = 'b\0' Ce comportement diffère de MySQL pour le `CHAR` type (où les chaînes sont remplies d'espaces et les espaces sont supprimés pour la sortie). -À noter que la longueur de la `FixedString(N)` la valeur est constante. Le [longueur](../query_language/functions/array_functions.md#array_functions-length) la fonction renvoie `N` même si l' `FixedString(N)` la valeur est remplie uniquement avec des octets [vide](../query_language/functions/string_functions.md#empty) la fonction renvoie `1` dans ce cas. +À noter que la longueur de la `FixedString(N)` la valeur est constante. Le [longueur](../../sql_reference/functions/array_functions.md#array_functions-length) la fonction renvoie `N` même si l' `FixedString(N)` la valeur est remplie uniquement avec des octets [vide](../../sql_reference/functions/string_functions.md#empty) la fonction renvoie `1` dans ce cas. [Article Original](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/fr/data_types/float.md b/docs/fr/sql_reference/data_types/float.md similarity index 93% rename from docs/fr/data_types/float.md rename to docs/fr/sql_reference/data_types/float.md index 8458f0e13da..c7aac2adb58 100644 --- a/docs/fr/data_types/float.md +++ b/docs/fr/sql_reference/data_types/float.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: Float32, Float64 --- # Float32, Float64 {#float32-float64} @@ -79,6 +82,6 @@ SELECT 0 / 0 └──────────────┘ ``` - See the rules for `NaN` sorting in the section [ORDER BY clause](../query_language/select.md). + See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select.md). [Article Original](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/fr/data_types/index.md b/docs/fr/sql_reference/data_types/index.md similarity index 73% rename from docs/fr/data_types/index.md rename to docs/fr/sql_reference/data_types/index.md index f5f9386c042..2b278d7606a 100644 --- a/docs/fr/data_types/index.md +++ b/docs/fr/sql_reference/data_types/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Data Types +toc_priority: 37 +toc_title: Introduction --- # Types De Données {#data_types} diff --git a/docs/fr/data_types/int_uint.md b/docs/fr/sql_reference/data_types/int_uint.md similarity index 80% rename from docs/fr/data_types/int_uint.md rename to docs/fr/sql_reference/data_types/int_uint.md index ec3018d42c8..0b9bb3c05d5 100644 --- a/docs/fr/data_types/int_uint.md +++ b/docs/fr/sql_reference/data_types/int_uint.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 40 +toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 --- # UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} diff --git a/docs/fr/data_types/nested_data_structures/index.md b/docs/fr/sql_reference/data_types/nested_data_structures/index.md similarity index 54% rename from docs/fr/data_types/nested_data_structures/index.md rename to docs/fr/sql_reference/data_types/nested_data_structures/index.md index 9691b587181..2c25a6afac8 100644 --- a/docs/fr/data_types/nested_data_structures/index.md +++ b/docs/fr/sql_reference/data_types/nested_data_structures/index.md @@ -1,5 +1,10 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Nested Data Structures +toc_hidden: true +toc_priority: 54 +toc_title: "cach\xE9s" --- # Structures De Données Imbriquées {#nested-data-structures} diff --git a/docs/fr/data_types/nested_data_structures/nested.md b/docs/fr/sql_reference/data_types/nested_data_structures/nested.md similarity index 92% rename from docs/fr/data_types/nested_data_structures/nested.md rename to docs/fr/sql_reference/data_types/nested_data_structures/nested.md index f6dceb4b6c1..298e8d25038 100644 --- a/docs/fr/data_types/nested_data_structures/nested.md +++ b/docs/fr/sql_reference/data_types/nested_data_structures/nested.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 57 +toc_title: "Imbriqu\xE9e(Type1 Nom1, Nom2 Type2, ...)" --- -# Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} +# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} -A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../query_language/create.md) requête. Chaque ligne de table peut correspondre à n'importe quel nombre de lignes dans une structure de données imbriquée. +A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql_reference/statements/create.md) requête. Chaque ligne de table peut correspondre à n'importe quel nombre de lignes dans une structure de données imbriquée. Exemple: diff --git a/docs/fr/data_types/nullable.md b/docs/fr/sql_reference/data_types/nullable.md similarity index 74% rename from docs/fr/data_types/nullable.md rename to docs/fr/sql_reference/data_types/nullable.md index 3a5c6bcff6c..09bfd0cb9bb 100644 --- a/docs/fr/data_types/nullable.md +++ b/docs/fr/sql_reference/data_types/nullable.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 54 +toc_title: Nullable --- -# Nullable(TypeName) {#data_type-nullable} +# Nullable(typename) {#data_type-nullable} -Permet de stocker marqueur spécial ([NULL](../query_language/syntax.md)) qui dénote “missing value” aux valeurs normales autorisées par `TypeName`. Par exemple, un `Nullable(Int8)` type colonne peut stocker `Int8` type de valeurs, et les lignes qui n'ont pas de valeur magasin `NULL`. +Permet de stocker marqueur spécial ([NULL](../../sql_reference/syntax.md)) qui dénote “missing value” aux valeurs normales autorisées par `TypeName`. Par exemple, un `Nullable(Int8)` type colonne peut stocker `Int8` type de valeurs, et les lignes qui n'ont pas de valeur magasin `NULL`. Pour un `TypeName` vous ne pouvez pas utiliser les types de données composites [Tableau](array.md) et [Tuple](tuple.md). Les types de données composites peuvent contenir `Nullable` valeurs de type, telles que `Array(Nullable(Int8))`. @@ -12,14 +15,14 @@ A `Nullable` le champ type ne peut pas être inclus dans les index de table. `NULL` est la valeur par défaut pour tout `Nullable` type, sauf indication contraire dans la configuration du serveur ClickHouse. -## Caractéristiques de stockage {#storage-features} +## Caractéristiques De Stockage {#storage-features} Stocker `Nullable` valeurs de type dans une colonne de table, ClickHouse utilise un fichier séparé avec `NULL` masques en plus du fichier normal avec des valeurs. Les entrées du fichier masks permettent à ClickHouse de faire la distinction entre `NULL` et une valeur par défaut du type de données correspondant pour chaque ligne de table. En raison d'un fichier supplémentaire, `Nullable` colonne consomme de l'espace de stockage supplémentaire par rapport à une normale similaire. !!! info "Note" Utiliser `Nullable` affecte presque toujours négativement les performances, gardez cela à l'esprit lors de la conception de vos bases de données. -## Exemple d'utilisation {#usage-example} +## Exemple D'Utilisation {#usage-example} ``` sql CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog diff --git a/docs/fr/data_types/special_data_types/expression.md b/docs/fr/sql_reference/data_types/special_data_types/expression.md similarity index 71% rename from docs/fr/data_types/special_data_types/expression.md rename to docs/fr/sql_reference/data_types/special_data_types/expression.md index a368afa07ba..9435321af9f 100644 --- a/docs/fr/data_types/special_data_types/expression.md +++ b/docs/fr/sql_reference/data_types/special_data_types/expression.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 58 +toc_title: Expression --- # Expression {#expression} diff --git a/docs/fr/data_types/special_data_types/index.md b/docs/fr/sql_reference/data_types/special_data_types/index.md similarity index 73% rename from docs/fr/data_types/special_data_types/index.md rename to docs/fr/sql_reference/data_types/special_data_types/index.md index a55535d7541..60a700d0d22 100644 --- a/docs/fr/data_types/special_data_types/index.md +++ b/docs/fr/sql_reference/data_types/special_data_types/index.md @@ -1,5 +1,10 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Special Data Types +toc_hidden: true +toc_priority: 55 +toc_title: "cach\xE9s" --- # Types De Données Spéciaux {#special-data-types} diff --git a/docs/fr/data_types/special_data_types/interval.md b/docs/fr/sql_reference/data_types/special_data_types/interval.md similarity index 82% rename from docs/fr/data_types/special_data_types/interval.md rename to docs/fr/sql_reference/data_types/special_data_types/interval.md index b5ee83bf6bd..0ca20cdc513 100644 --- a/docs/fr/data_types/special_data_types/interval.md +++ b/docs/fr/sql_reference/data_types/special_data_types/interval.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 61 +toc_title: Intervalle --- # Intervalle {#data-type-interval} -Famille de types de données représentant des intervalles d'heure et de date. Les types de la [INTERVAL](../../query_language/operators.md#operator-interval) opérateur. +Famille de types de données représentant des intervalles d'heure et de date. Les types de la [INTERVAL](../../../sql_reference/operators.md#operator-interval) opérateur. !!! warning "Avertissement" `Interval` les valeurs de type de données ne peuvent pas être stockées dans les tables. @@ -39,7 +42,7 @@ SELECT toTypeName(INTERVAL 4 DAY) ## Utilisation Remarques {#data-type-interval-usage-remarks} -Vous pouvez utiliser `Interval`-tapez des valeurs dans des opérations arithmétiques avec [Date](../../data_types/date.md) et [DateTime](../../data_types/datetime.md)-type de valeurs. Par exemple, vous pouvez ajouter 4 jours à l'heure actuelle: +Vous pouvez utiliser `Interval`-tapez des valeurs dans des opérations arithmétiques avec [Date](../../../sql_reference/data_types/date.md) et [DateTime](../../../sql_reference/data_types/datetime.md)-type de valeurs. Par exemple, vous pouvez ajouter 4 jours à l'heure actuelle: ``` sql SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY @@ -78,5 +81,5 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu ## Voir Aussi {#see-also} -- [INTERVAL](../../query_language/operators.md#operator-interval) opérateur -- [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) type fonctions de conversion +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) opérateur +- [toInterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) type fonctions de conversion diff --git a/docs/fr/data_types/special_data_types/nothing.md b/docs/fr/sql_reference/data_types/special_data_types/nothing.md similarity index 67% rename from docs/fr/data_types/special_data_types/nothing.md rename to docs/fr/sql_reference/data_types/special_data_types/nothing.md index 1e93f58d9d9..4a83629db00 100644 --- a/docs/fr/data_types/special_data_types/nothing.md +++ b/docs/fr/sql_reference/data_types/special_data_types/nothing.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 60 +toc_title: Rien --- # Rien {#nothing} Le seul but de ce type de données est de représenter les cas où une valeur n'est pas prévu. Donc vous ne pouvez pas créer un `Nothing` type de valeur. -Par exemple, littéral [NULL](../../query_language/syntax.md#null-literal) a type de `Nullable(Nothing)`. Voir plus sur [Nullable](../../data_types/nullable.md). +Par exemple, littéral [NULL](../../../sql_reference/syntax.md#null-literal) a type de `Nullable(Nothing)`. Voir plus sur [Nullable](../../../sql_reference/data_types/nullable.md). Le `Nothing` type peut également être utilisé pour désigner des tableaux vides: diff --git a/docs/fr/sql_reference/data_types/special_data_types/set.md b/docs/fr/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..fde43421dab --- /dev/null +++ b/docs/fr/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 59 +toc_title: "D\xE9finir" +--- + +# Définir {#set} + +Utilisé pour la moitié droite d'un [IN](../../../sql_reference/statements/select.md#select-in-operators) expression. + +[Article Original](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/fr/data_types/string.md b/docs/fr/sql_reference/data_types/string.md similarity index 92% rename from docs/fr/data_types/string.md rename to docs/fr/sql_reference/data_types/string.md index a34495c0f79..375fcb44f38 100644 --- a/docs/fr/data_types/string.md +++ b/docs/fr/sql_reference/data_types/string.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: "Cha\xEEne" --- # Chaîne {#string} diff --git a/docs/fr/data_types/tuple.md b/docs/fr/sql_reference/data_types/tuple.md similarity index 75% rename from docs/fr/data_types/tuple.md rename to docs/fr/sql_reference/data_types/tuple.md index 2f180046753..85ea5e8e02b 100644 --- a/docs/fr/data_types/tuple.md +++ b/docs/fr/sql_reference/data_types/tuple.md @@ -1,16 +1,19 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 53 +toc_title: Tuple (T1, T2,...) --- -# Tuple(T1, T2, …) {#tuplet1-t2} +# Tuple(t1, T2, …) {#tuplet1-t2} Un n-uplet d'éléments, chacun ayant une personne [type](index.md#data_types). -Les Tuples sont utilisés pour le regroupement temporaire de colonnes. Les colonnes peuvent être regroupées lorsqu'une expression IN est utilisée dans une requête et pour spécifier certains paramètres formels des fonctions lambda. Pour plus d'informations, voir les sections [Dans les opérateurs](../query_language/select.md) et [Des fonctions d'ordre supérieur](../query_language/functions/higher_order_functions.md). +Les Tuples sont utilisés pour le regroupement temporaire de colonnes. Les colonnes peuvent être regroupées lorsqu'une expression IN est utilisée dans une requête et pour spécifier certains paramètres formels des fonctions lambda. Pour plus d'informations, voir les sections [Dans les opérateurs](../../sql_reference/statements/select.md) et [Des fonctions d'ordre supérieur](../../sql_reference/functions/higher_order_functions.md). Les Tuples peuvent être le résultat d'une requête. Dans ce cas, pour les formats de texte autres que JSON, les valeurs sont séparées par des virgules entre parenthèses. Dans les formats JSON, les tuples sont sortis sous forme de tableaux (entre crochets). -## La création d'un tuple {#creating-a-tuple} +## La création d'un Tuple {#creating-a-tuple} Vous pouvez utiliser une fonction pour créer un tuple: @@ -30,9 +33,9 @@ SELECT tuple(1,'a') AS x, toTypeName(x) └─────────┴───────────────────────────┘ ``` -## Utilisation de types de données {#working-with-data-types} +## Utilisation De Types De Données {#working-with-data-types} -Lors de la création d'un tuple à la volée, ClickHouse détecte automatiquement le type de chaque argument comme le minimum des types qui peuvent stocker la valeur de l'argument. Si l'argument est [NULL](../query_language/syntax.md#null-literal) le type de l'élément tuple est [Nullable](nullable.md). +Lors de la création d'un tuple à la volée, ClickHouse détecte automatiquement le type de chaque argument comme le minimum des types qui peuvent stocker la valeur de l'argument. Si l'argument est [NULL](../../sql_reference/syntax.md#null-literal) le type de l'élément tuple est [Nullable](nullable.md). Exemple de détection automatique de type de données: diff --git a/docs/fr/data_types/uuid.md b/docs/fr/sql_reference/data_types/uuid.md similarity index 74% rename from docs/fr/data_types/uuid.md rename to docs/fr/sql_reference/data_types/uuid.md index 86790c6265e..4ccf55ce349 100644 --- a/docs/fr/data_types/uuid.md +++ b/docs/fr/sql_reference/data_types/uuid.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 46 +toc_title: UUID --- # UUID {#uuid-data-type} @@ -18,11 +21,11 @@ Si vous ne spécifiez pas la valeur de la colonne UUID lors de l'insertion d'un 00000000-0000-0000-0000-000000000000 ``` -## Comment générer {#how-to-generate} +## Comment Générer {#how-to-generate} -Pour générer la valeur UUID, ClickHouse fournit [generateUUIDv4](../query_language/functions/uuid_functions.md) fonction. +Pour générer la valeur UUID, ClickHouse fournit [generateUUIDv4](../../sql_reference/functions/uuid_functions.md) fonction. -## Exemple d'utilisation {#usage-example} +## Exemple D'Utilisation {#usage-example} **Exemple 1** @@ -67,8 +70,8 @@ SELECT * FROM t_uuid ## Restriction {#restrictions} -Le type de données UUID ne prend en charge que les fonctions qui [Chaîne](string.md) type de données prend également en charge (par exemple, [min](../query_language/agg_functions/reference.md#agg_function-min), [Max](../query_language/agg_functions/reference.md#agg_function-max), et [compter](../query_language/agg_functions/reference.md#agg_function-count)). +Le type de données UUID ne prend en charge que les fonctions qui [Chaîne](string.md) type de données prend également en charge (par exemple, [min](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [Max](../../sql_reference/aggregate_functions/reference.md#agg_function-max), et [compter](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). -Le type de données UUID n'est pas pris en charge par les opérations arithmétiques (par exemple, [ABS](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) ou des fonctions d'agrégation, comme [somme](../query_language/agg_functions/reference.md#agg_function-sum) et [avg](../query_language/agg_functions/reference.md#agg_function-avg). +Le type de données UUID n'est pas pris en charge par les opérations arithmétiques (par exemple, [ABS](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs)) ou des fonctions d'agrégation, comme [somme](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) et [avg](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). [Article Original](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/fr/query_language/dicts/external_dicts.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts.md similarity index 68% rename from docs/fr/query_language/dicts/external_dicts.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts.md index eb6829206b3..d008472b339 100644 --- a/docs/fr/query_language/dicts/external_dicts.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: "Description G\xE9n\xE9rale" --- # Dictionnaires Externes {#dicts-external-dicts} @@ -10,11 +13,11 @@ ClickHouse: - Stocke entièrement ou partiellement les dictionnaires en RAM. - Met à jour périodiquement les dictionnaires et charge dynamiquement les valeurs manquantes. En d'autres mots, les dictionnaires peuvent être chargés dynamiquement. -- Permet de créer des dictionnaires externes avec des fichiers xml ou [Les requêtes DDL](../create.md#create-dictionary-query). +- Permet de créer des dictionnaires externes avec des fichiers xml ou [Les requêtes DDL](../../statements/create.md#create-dictionary-query). -La configuration des dictionnaires externes peut être située dans un ou plusieurs fichiers xml. Le chemin d'accès à la configuration spécifiée dans le [dictionaries\_config](../../operations/server_settings/settings.md#server_settings-dictionaries_config) paramètre. +La configuration des dictionnaires externes peut être située dans un ou plusieurs fichiers xml. Le chemin d'accès à la configuration spécifiée dans le [dictionaries\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) paramètre. -Les dictionnaires peuvent être chargés au démarrage du serveur ou à la première utilisation, en fonction [dictionaries\_lazy\_load](../../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load) paramètre. +Les dictionnaires peuvent être chargés au démarrage du serveur ou à la première utilisation, en fonction [dictionaries\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) paramètre. Le fichier de configuration du dictionnaire a le format suivant: @@ -36,18 +39,18 @@ Le fichier de configuration du dictionnaire a le format suivant: Vous pouvez [configurer](external_dicts_dict.md) le nombre de dictionnaires dans le même fichier. -[Requêtes DDL pour les dictionnaires](../create.md#create-dictionary-query) ne nécessite aucun enregistrement supplémentaire dans la configuration du serveur. Ils permettent de travailler avec des dictionnaires en tant qu'entités de première classe, comme des tables ou des vues. +[Requêtes DDL pour les dictionnaires](../../statements/create.md#create-dictionary-query) ne nécessite aucun enregistrement supplémentaire dans la configuration du serveur. Ils permettent de travailler avec des dictionnaires en tant qu'entités de première classe, comme des tables ou des vues. !!! attention "Attention" - Vous pouvez convertir les valeurs pour un petit dictionnaire en le décrivant dans un `SELECT` requête (voir la [transformer](../functions/other_functions.md) fonction). Cette fonctionnalité n'est pas liée aux dictionnaires externes. + Vous pouvez convertir les valeurs pour un petit dictionnaire en le décrivant dans un `SELECT` requête (voir la [transformer](../../../sql_reference/functions/other_functions.md) fonction). Cette fonctionnalité n'est pas liée aux dictionnaires externes. -## Voir aussi {#ext-dicts-see-also} +## Voir Aussi {#ext-dicts-see-also} - [Configuration D'un dictionnaire externe](external_dicts_dict.md) - [Stockage des dictionnaires en mémoire](external_dicts_dict_layout.md) - [Mises À Jour Du Dictionnaire](external_dicts_dict_lifetime.md) - [Sources de dictionnaires externes](external_dicts_dict_sources.md) - [Clé et champs du dictionnaire](external_dicts_dict_structure.md) -- [Fonctions pour travailler avec des dictionnaires externes](../functions/ext_dict_functions.md) +- [Fonctions pour travailler avec des dictionnaires externes](../../../sql_reference/functions/ext_dict_functions.md) [Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/fr/query_language/dicts/external_dicts_dict.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md similarity index 85% rename from docs/fr/query_language/dicts/external_dicts_dict.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md index 8012fc15cef..01721ef1eb3 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 40 +toc_title: Configuration D'un dictionnaire externe --- # Configuration D'un dictionnaire externe {#dicts-external-dicts-dict} @@ -28,7 +31,7 @@ Si dictionary est configuré à l'aide d'un fichier xml, than dictionary configu ``` -Correspondant [DDL-requête](../create.md#create-dictionary-query) a la structure suivante: +Correspondant [DDL-requête](../../statements/create.md#create-dictionary-query) a la structure suivante: ``` sql CREATE DICTIONARY dict_name diff --git a/docs/fr/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md similarity index 87% rename from docs/fr/query_language/dicts/external_dicts_dict_hierarchical.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md index 923e231e6b5..30bfb97c52f 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict_hierarchical.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: "Dictionnaires hi\xE9rarchiques" --- # Dictionnaires Hiérarchiques {#hierarchical-dictionaries} @@ -36,7 +39,7 @@ Ce tableau contient une colonne `parent_region` qui contient la clé du parent l Clickhouse soutient le [hiérarchique](external_dicts_dict_structure.md#hierarchical-dict-attr) propriété pour [externe dictionnaire](index.md) attribut. Cette propriété vous permet de configurer le dictionnaire hiérarchique comme décrit ci-dessus. -Le [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) la fonction vous permet d'obtenir la chaîne parent d'un élément. +Le [dictGetHierarchy](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) la fonction vous permet d'obtenir la chaîne parent d'un élément. Pour notre exemple, la structure du dictionnaire peut être la suivante: diff --git a/docs/fr/query_language/dicts/external_dicts_dict_layout.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md similarity index 94% rename from docs/fr/query_language/dicts/external_dicts_dict_layout.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md index 1132af859af..62a62a75160 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict_layout.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: "Stockage des dictionnaires en m\xE9moire" --- -# Stockage des dictionnaires en mémoire {#dicts-external-dicts-dict-layout} +# Stockage Des Dictionnaires En Mémoire {#dicts-external-dicts-dict-layout} Il existe une variété de façons de stocker les dictionnaires en mémoire. @@ -38,7 +41,7 @@ La configuration ressemble à ceci: ``` -Correspondant [DDL-requête](../create.md#create-dictionary-query): +Correspondant [DDL-requête](../../statements/create.md#create-dictionary-query): ``` sql CREATE DICTIONARY (...) @@ -47,7 +50,7 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings ... ``` -## Façons de stocker des dictionnaires en mémoire {#ways-to-store-dictionaries-in-memory} +## Façons De Stocker Des Dictionnaires En Mémoire {#ways-to-store-dictionaries-in-memory} - [plat](#flat) - [haché](#dicts-external_dicts_dict_layout-hashed) @@ -143,15 +146,15 @@ Cette méthode de stockage fonctionne de la même manière que hachée et permet Exemple: Le tableau contient des réductions pour chaque annonceur dans le format: ``` text -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | advertiser id | discount start date | discount end date | amount | +===============+=====================+===================+========+ | 123 | 2015-01-01 | 2015-01-15 | 0.15 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | 123 | 2015-01-16 | 2015-01-31 | 0.25 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ | 456 | 2015-01-01 | 2015-01-15 | 0.05 | -+---------------|---------------------|-------------------|--------+ ++---------|-------------|-------------|------+ ``` Pour utiliser un échantillon pour les plages de dates, définissez `range_min` et `range_max` éléments dans le [structure](external_dicts_dict_structure.md). Ces éléments doivent contenir des éléments `name` et`type` (si `type` n'est pas spécifié, le type par défaut sera utilisé-Date). `type` peut être n'importe quel type numérique (Date / DateTime / UInt64 / Int32 / autres). @@ -300,17 +303,17 @@ Ce type de stockage permet de mapper des préfixes de réseau (adresses IP) à d Exemple: la table contient les préfixes de réseau et leur correspondant en tant que numéro et Code de pays: ``` text - +-----------------|-------|--------+ + +-----------|-----|------+ | prefix | asn | cca2 | +=================+=======+========+ | 202.79.32.0/20 | 17501 | NP | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2620:0:870::/48 | 3856 | US | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2a02:6b8:1::/48 | 13238 | RU | - +-----------------|-------|--------+ + +-----------|-----|------+ | 2001:db8::/32 | 65536 | ZZ | - +-----------------|-------|--------+ + +-----------|-----|------+ ``` Lorsque vous utilisez ce type de mise en page, la structure doit avoir une clé composite. diff --git a/docs/fr/query_language/dicts/external_dicts_dict_lifetime.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md similarity index 96% rename from docs/fr/query_language/dicts/external_dicts_dict_lifetime.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md index ee4d1717a26..2dcca4bfbbe 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict_lifetime.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 42 +toc_title: "Mises \xC0 Jour Du Dictionnaire" --- # Mises À Jour Du Dictionnaire {#dictionary-updates} diff --git a/docs/fr/query_language/dicts/external_dicts_dict_sources.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md similarity index 92% rename from docs/fr/query_language/dicts/external_dicts_dict_sources.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 2c25aed19f2..55a982862ed 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -1,8 +1,11 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 43 +toc_title: Sources de dictionnaires externes --- -# Sources de dictionnaires externes {#dicts-external-dicts-dict-sources} +# Sources De Dictionnaires Externes {#dicts-external-dicts-dict-sources} Externe dictionnaire peut être connecté à partir de nombreuses sources différentes. @@ -23,7 +26,7 @@ Si dictionary est configuré à l'aide de xml-file, la configuration ressemble ``` -En cas de [DDL-requête](../create.md#create-dictionary-query), configuration égale ressemblera à: +En cas de [DDL-requête](../../statements/create.md#create-dictionary-query), configuration égale ressemblera à: ``` sql CREATE DICTIONARY dict_name (...) @@ -68,7 +71,7 @@ SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) Définition des champs: - `path` – The absolute path to the file. -- `format` – The file format. All the formats described in “[Format](../../interfaces/formats.md#formats)” sont pris en charge. +- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. ## Fichier Exécutable {#dicts-external_dicts_dict_sources-executable} @@ -94,9 +97,9 @@ SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) Définition des champs: - `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` – The file format. All the formats described in “[Format](../../interfaces/formats.md#formats)” sont pris en charge. +- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. -## HTTP(S) {#dicts-external_dicts_dict_sources-http} +## Http(s) {#dicts-external_dicts_dict_sources-http} Travailler avec un serveur HTTP (S) dépend de [comment le dictionnaire est stocké dans la mémoire](external_dicts_dict_layout.md). Si le dictionnaire est stocké en utilisant `cache` et `complex_key_cache`, Clickhouse demande les clés nécessaires en envoyant une demande via le `POST` méthode. @@ -132,12 +135,12 @@ SOURCE(HTTP( )) ``` -Pour que ClickHouse accède à une ressource HTTPS, vous devez [configurer openSSL](../../operations/server_settings/settings.md#server_settings-openssl) dans la configuration du serveur. +Pour que ClickHouse accède à une ressource HTTPS, vous devez [configurer openSSL](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) dans la configuration du serveur. Définition des champs: - `url` – The source URL. -- `format` – The file format. All the formats described in “[Format](../../interfaces/formats.md#formats)” sont pris en charge. +- `format` – The file format. All the formats described in “[Format](../../../interfaces/formats.md#formats)” sont pris en charge. - `credentials` – Basic HTTP authentication. Optional parameter. - `user` – Username required for the authentication. - `password` – Password required for the authentication. @@ -183,7 +186,7 @@ Définition des champs: ClickHouse reçoit des symboles de citation D'ODBC-driver et cite tous les paramètres des requêtes au pilote, il est donc nécessaire de définir le nom de la table en conséquence sur le cas du nom de la table dans la base de données. -Si vous avez des problèmes avec des encodages lors de l'utilisation d'Oracle, consultez le [FAQ](../../faq/general.md#oracle-odbc-encodings) article. +Si vous avez des problèmes avec des encodages lors de l'utilisation d'Oracle, consultez le [FAQ](../../../faq/general.md#oracle-odbc-encodings) article. ### Vulnérabilité connue de la fonctionnalité du dictionnaire ODBC {#known-vulnerability-of-the-odbc-dictionary-functionality} @@ -213,7 +216,7 @@ SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); Le pilote ODBC enverra des valeurs de `USERNAME` et `PASSWORD` de `odbc.ini` de `some-server.com`. -### Exemple de connexion PostgreSQL {#example-of-connecting-postgresql} +### Exemple De Connexion Postgresql {#example-of-connecting-postgresql} Ubuntu OS. @@ -390,7 +393,7 @@ LIFETIME(MIN 300 MAX 360) ## DBMS {#dbms} -### MySQL {#dicts-external_dicts_dict_sources-mysql} +### Mysql {#dicts-external_dicts_dict_sources-mysql} Exemple de paramètres: @@ -487,7 +490,7 @@ SOURCE(MYSQL( )) ``` -### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} +### Clickhouse {#dicts-external_dicts_dict_sources-clickhouse} Exemple de paramètres: @@ -521,7 +524,7 @@ SOURCE(CLICKHOUSE( Définition des champs: -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distribué](../../operations/table_engines/distributed.md) table et entrez-le dans les configurations suivantes. +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distribué](../../../engines/table_engines/special/distributed.md) table et entrez-le dans les configurations suivantes. - `port` – The port on the ClickHouse server. - `user` – Name of the ClickHouse user. - `password` – Password of the ClickHouse user. @@ -530,7 +533,7 @@ Définition des champs: - `where` – The selection criteria. May be omitted. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Mise à jour des dictionnaires](external_dicts_dict_lifetime.md). -### MongoDB {#dicts-external_dicts_dict_sources-mongodb} +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} Exemple de paramètres: diff --git a/docs/fr/query_language/dicts/external_dicts_dict_structure.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md similarity index 86% rename from docs/fr/query_language/dicts/external_dicts_dict_structure.md rename to docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md index bca7b07f7d9..2aed425c6da 100644 --- a/docs/fr/query_language/dicts/external_dicts_dict_structure.md +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: "Cl\xE9 et champs du dictionnaire" --- # Clé et champs du dictionnaire {#dictionary-key-and-fields} @@ -155,18 +158,18 @@ CREATE DICTIONARY somename ( Champs de Configuration: -| Balise | Description | Requis | -|------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| -| `name` | Nom de la colonne. | Oui | -| `type` | Type de données ClickHouse.
    ClickHouse tente de convertir la valeur du dictionnaire vers le type de données spécifié. Par exemple, pour MySQL, le champ peut être `TEXT`, `VARCHAR`, ou `BLOB` dans la table source MySQL, mais il peut être téléchargé comme `String` à ClickHouse.
    [Nullable](../../data_types/nullable.md) n'est pas pris en charge. | Oui | -| `null_value` | Valeur par défaut pour un élément inexistant.
    Dans l'exemple, c'est une chaîne vide. Vous ne pouvez pas utiliser `NULL` dans ce domaine. | Oui | -| `expression` | [Expression](../syntax.md#syntax-expressions) que ClickHouse s'exécute sur la valeur.
    L'expression peut être un nom de colonne dans la base de données SQL distante. Ainsi, vous pouvez l'utiliser pour créer un alias pour la colonne à distance.

    Valeur par défaut: aucune expression. | Aucun | -| `hierarchical` | Si `true`, l'attribut contient la valeur d'un parent clé de la clé actuelle. Voir [Dictionnaires Hiérarchiques](external_dicts_dict_hierarchical.md).

    Valeur par défaut: `false`. | Aucun | -| `injective` | Indicateur qui indique si le `id -> attribute` l'image est [injective](https://en.wikipedia.org/wiki/Injective_function).
    Si `true`, ClickHouse peut automatiquement placer après le `GROUP BY` clause les requêtes aux dictionnaires avec injection. Habituellement, il réduit considérablement le montant de ces demandes.

    Valeur par défaut: `false`. | Aucun | -| `is_object_id` | Indicateur qui indique si la requête est exécutée pour un document MongoDB par `ObjectID`.

    Valeur par défaut: `false`. | Aucun | +| Balise | Description | Requis | +|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| +| `name` | Nom de la colonne. | Oui | +| `type` | Type de données ClickHouse.
    ClickHouse tente de convertir la valeur du dictionnaire vers le type de données spécifié. Par exemple, pour MySQL, le champ peut être `TEXT`, `VARCHAR`, ou `BLOB` dans la table source MySQL, mais il peut être téléchargé comme `String` à ClickHouse.
    [Nullable](../../../sql_reference/data_types/nullable.md) n'est pas pris en charge. | Oui | +| `null_value` | Valeur par défaut pour un élément inexistant.
    Dans l'exemple, c'est une chaîne vide. Vous ne pouvez pas utiliser `NULL` dans ce domaine. | Oui | +| `expression` | [Expression](../../syntax.md#syntax-expressions) que ClickHouse s'exécute sur la valeur.
    L'expression peut être un nom de colonne dans la base de données SQL distante. Ainsi, vous pouvez l'utiliser pour créer un alias pour la colonne à distance.

    Valeur par défaut: aucune expression. | Aucun | +| `hierarchical` | Si `true`, l'attribut contient la valeur d'un parent clé de la clé actuelle. Voir [Dictionnaires Hiérarchiques](external_dicts_dict_hierarchical.md).

    Valeur par défaut: `false`. | Aucun | +| `injective` | Indicateur qui indique si le `id -> attribute` l'image est [injective](https://en.wikipedia.org/wiki/Injective_function).
    Si `true`, ClickHouse peut automatiquement placer après le `GROUP BY` clause les requêtes aux dictionnaires avec injection. Habituellement, il réduit considérablement le montant de ces demandes.

    Valeur par défaut: `false`. | Aucun | +| `is_object_id` | Indicateur qui indique si la requête est exécutée pour un document MongoDB par `ObjectID`.

    Valeur par défaut: `false`. | Aucun | ## Voir Aussi {#see-also} -- [Fonctions pour travailler avec des dictionnaires externes](../functions/ext_dict_functions.md). +- [Fonctions pour travailler avec des dictionnaires externes](../../../sql_reference/functions/ext_dict_functions.md). [Article Original](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/fr/sql_reference/dictionaries/external_dictionaries/index.md b/docs/fr/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..ffe4019061f --- /dev/null +++ b/docs/fr/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: External Dictionaries +toc_priority: 37 +--- + + diff --git a/docs/fr/query_language/dicts/index.md b/docs/fr/sql_reference/dictionaries/index.md similarity index 65% rename from docs/fr/query_language/dicts/index.md rename to docs/fr/sql_reference/dictionaries/index.md index dc48bf5d4e6..68b484d46c4 100644 --- a/docs/fr/query_language/dicts/index.md +++ b/docs/fr/sql_reference/dictionaries/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Dictionaries +toc_priority: 35 +toc_title: Introduction --- # Dictionnaire {#dictionaries} @@ -12,7 +16,7 @@ ClickHouse prend en charge des fonctions spéciales pour travailler avec des dic Supports ClickHouse: -- [Construit-dans les dictionnaires](internal_dicts.md#internal_dicts) avec un [ensemble de fonctions](../functions/ym_dict_functions.md). -- [Plug-in (externe) dictionnaires](external_dicts.md) avec un [ensemble de fonctions](../functions/ext_dict_functions.md). +- [Construit-dans les dictionnaires](internal_dicts.md#internal_dicts) avec un [ensemble de fonctions](../../sql_reference/functions/ym_dict_functions.md). +- [Plug-in (externe) dictionnaires](external_dictionaries/external_dicts.md) avec un [net de fonctions](../../sql_reference/functions/ext_dict_functions.md). [Article Original](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/fr/query_language/dicts/internal_dicts.md b/docs/fr/sql_reference/dictionaries/internal_dicts.md similarity index 96% rename from docs/fr/query_language/dicts/internal_dicts.md rename to docs/fr/sql_reference/dictionaries/internal_dicts.md index a678254a39a..bee5904dbdc 100644 --- a/docs/fr/query_language/dicts/internal_dicts.md +++ b/docs/fr/sql_reference/dictionaries/internal_dicts.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: Dictionnaires Internes --- # Dictionnaires internes {#internal_dicts} diff --git a/docs/fr/query_language/functions/arithmetic_functions.md b/docs/fr/sql_reference/functions/arithmetic_functions.md similarity index 97% rename from docs/fr/query_language/functions/arithmetic_functions.md rename to docs/fr/sql_reference/functions/arithmetic_functions.md index 3cb55bc1103..2eeea83297d 100644 --- a/docs/fr/query_language/functions/arithmetic_functions.md +++ b/docs/fr/sql_reference/functions/arithmetic_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 35 +toc_title: "Arithm\xE9tique" --- # Fonctions arithmétiques {#arithmetic-functions} diff --git a/docs/fr/query_language/functions/array_functions.md b/docs/fr/sql_reference/functions/array_functions.md similarity index 91% rename from docs/fr/query_language/functions/array_functions.md rename to docs/fr/sql_reference/functions/array_functions.md index 81951e5ee62..9aefe83fae0 100644 --- a/docs/fr/query_language/functions/array_functions.md +++ b/docs/fr/sql_reference/functions/array_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 46 +toc_title: Travailler avec des tableaux --- # Fonctions pour travailler avec des tableaux {#functions-for-working-with-arrays} @@ -62,7 +65,7 @@ arrayConcat(arrays) **Paramètre** -- `arrays` – Arbitrary number of arguments of [Tableau](../../data_types/array.md) type. +- `arrays` – Arbitrary number of arguments of [Tableau](../../sql_reference/data_types/array.md) type. **Exemple** @@ -186,7 +189,6 @@ SELECT indexOf([1, 3, NULL, NULL], NULL) ``` ``` text - ┌─indexOf([1, 3, NULL, NULL], NULL)─┐ │ 3 │ └───────────────────────────────────┘ @@ -367,7 +369,7 @@ arrayPushBack(array, single_value) **Paramètre** - `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../data_types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../sql_reference/data_types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. **Exemple** @@ -392,7 +394,7 @@ arrayPushFront(array, single_value) **Paramètre** - `array` – Array. -- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../data_types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` type pour le type de données du tableau. Pour plus d'informations sur les types de données dans ClickHouse, voir “[Types de données](../../sql_reference/data_types/index.md#data_types)”. Peut être `NULL`. La fonction ajoute un `NULL` tableau, et le type d'éléments de tableau convertit en `Nullable`. **Exemple** @@ -808,11 +810,24 @@ SELECT └──────────────┴───────────┘ ``` -## arrayReduce(agg\_func, arr1, …) {#array-functions-arrayreduce} +## arrayReduce {#arrayreduce} Applique une fonction d'agrégation aux éléments du tableau et renvoie son résultat. Le nom de la fonction d'agrégation est passé sous forme de chaîne entre guillemets simples `'max'`, `'sum'`. Lorsque vous utilisez des fonctions d'agrégat paramétriques, le paramètre est indiqué après le nom de la fonction entre parenthèses `'uniqUpTo(6)'`. -Exemple: +**Syntaxe** + +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` + +**Paramètre** + +- `agg_func` — The name of an aggregate function which should be a constant [chaîne](../../sql_reference/data_types/string.md). +- `arr` — Any number of [tableau](../../sql_reference/data_types/array.md) tapez les colonnes comme paramètres de la fonction d'agrégation. + +**Valeur renvoyée** + +**Exemple** ``` sql SELECT arrayReduce('max', [1, 2, 3]) @@ -826,8 +841,6 @@ SELECT arrayReduce('max', [1, 2, 3]) Si une fonction d'agrégation prend plusieurs arguments, cette fonction doit être appliqué à plusieurs ensembles de même taille. -Exemple: - ``` sql SELECT arrayReduce('maxIf', [3, 5], [1, 0]) ``` @@ -850,7 +863,41 @@ SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) └─────────────────────────────────────────────────────────────┘ ``` -## arrayReverse(arr) {#array_functions-arrayreverse} +## arrayReduceInRanges {#arrayreduceinranges} + +Applique une fonction d'agrégation d'éléments de tableau dans des plages et retourne un tableau contenant le résultat correspondant à chaque gamme. La fonction retourne le même résultat que plusieurs `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. + +**Syntaxe** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**Paramètre** + +- `agg_func` — The name of an aggregate function which should be a constant [chaîne](../../sql_reference/data_types/string.md). +- `ranges` — The ranges to aggretate which should be an [tableau](../../sql_reference/data_types/array.md) de [tuple](../../sql_reference/data_types/tuple.md) qui contient l'indice et la longueur de chaque plage. +- `arr` — Any number of [tableau](../../sql_reference/data_types/array.md) tapez les colonnes comme paramètres de la fonction d'agrégation. + +**Valeur renvoyée** + +**Exemple** + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## arrayReverse(arr) {#arrayreverse} Retourne un tableau de la même taille que l'original tableau contenant les éléments dans l'ordre inverse. @@ -891,7 +938,7 @@ Alias: `flatten`. **Paramètre** -- `array_of_arrays` — [Tableau](../../data_types/array.md) de tableaux. Exemple, `[[1,2,3], [4,5]]`. +- `array_of_arrays` — [Tableau](../../sql_reference/data_types/array.md) de tableaux. Exemple, `[[1,2,3], [4,5]]`. **Exemple** @@ -917,7 +964,7 @@ arrayCompact(arr) **Paramètre** -`arr` — The [tableau](../../data_types/array.md) inspecter. +`arr` — The [tableau](../../sql_reference/data_types/array.md) inspecter. **Valeur renvoyée** @@ -953,7 +1000,7 @@ arrayZip(arr1, arr2, ..., arrN) **Paramètre** -`arr` — Any number of [tableau](../../data_types/array.md) tapez les colonnes à combiner. +`arr` — Any number of [tableau](../../sql_reference/data_types/array.md) tapez les colonnes à combiner. **Valeur renvoyée** @@ -1004,7 +1051,7 @@ Résultat: ``` text ┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ │ 0.75 │ -└────────────────────────────────────────-----──┘ +└────────────────────────────────────────---──┘ ``` [Article Original](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/fr/query_language/functions/array_join.md b/docs/fr/sql_reference/functions/array_join.md similarity index 93% rename from docs/fr/query_language/functions/array_join.md rename to docs/fr/sql_reference/functions/array_join.md index 1985191b319..f835950003d 100644 --- a/docs/fr/query_language/functions/array_join.md +++ b/docs/fr/sql_reference/functions/array_join.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 61 +toc_title: arrayJoin --- # fonction arrayJoin {#functions_arrayjoin} diff --git a/docs/fr/query_language/functions/bit_functions.md b/docs/fr/sql_reference/functions/bit_functions.md similarity index 93% rename from docs/fr/query_language/functions/bit_functions.md rename to docs/fr/sql_reference/functions/bit_functions.md index c04c0a77cf7..149b0084e16 100644 --- a/docs/fr/query_language/functions/bit_functions.md +++ b/docs/fr/sql_reference/functions/bit_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 48 +toc_title: Bit --- # Peu de fonctions {#bit-functions} @@ -221,7 +224,7 @@ bitCount(x) **Paramètre** -- `x` — [Entier](../../data_types/int_uint.md) ou [virgule flottante](../../data_types/float.md) nombre. La fonction utilise la représentation de la valeur en mémoire. Il permet de financer les nombres à virgule flottante. +- `x` — [Entier](../../sql_reference/data_types/int_uint.md) ou [virgule flottante](../../sql_reference/data_types/float.md) nombre. La fonction utilise la représentation de la valeur en mémoire. Il permet de financer les nombres à virgule flottante. **Valeur renvoyée** diff --git a/docs/fr/query_language/functions/bitmap_functions.md b/docs/fr/sql_reference/functions/bitmap_functions.md similarity index 96% rename from docs/fr/query_language/functions/bitmap_functions.md rename to docs/fr/sql_reference/functions/bitmap_functions.md index 38d3c17211e..ab464e3f7fa 100644 --- a/docs/fr/query_language/functions/bitmap_functions.md +++ b/docs/fr/sql_reference/functions/bitmap_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 49 +toc_title: Bitmap --- # Fonctions de Bitmap {#bitmap-functions} @@ -71,8 +74,8 @@ bitmapSubsetInRange(bitmap, range_start, range_end) **Paramètre** - `bitmap` – [Objet Bitmap](#bitmap_functions-bitmapbuild). -- `range_start` – range start point. Type: [UInt32](../../data_types/int_uint.md). -- `range_end` – range end point(excluded). Type: [UInt32](../../data_types/int_uint.md). +- `range_start` – range start point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -99,8 +102,8 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) **Paramètre** - `bitmap` – [Objet Bitmap](#bitmap_functions-bitmapbuild). -- `range_start` – The subset starting point. Type: [UInt32](../../data_types/int_uint.md). -- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../data_types/int_uint.md). +- `range_start` – The subset starting point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Valeur renvoyée** @@ -135,7 +138,7 @@ bitmapContains(haystack, needle) **Paramètre** - `haystack` – [Objet Bitmap](#bitmap_functions-bitmapbuild) où la fonction recherche. -- `needle` – Value that the function searches. Type: [UInt32](../../data_types/int_uint.md). +- `needle` – Value that the function searches. Type: [UInt32](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** diff --git a/docs/fr/query_language/functions/comparison_functions.md b/docs/fr/sql_reference/functions/comparison_functions.md similarity index 58% rename from docs/fr/query_language/functions/comparison_functions.md rename to docs/fr/sql_reference/functions/comparison_functions.md index 50477b89596..a532f69d9b3 100644 --- a/docs/fr/query_language/functions/comparison_functions.md +++ b/docs/fr/sql_reference/functions/comparison_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: Comparaison --- # Fonctions de comparaison {#comparison-functions} @@ -19,18 +22,16 @@ Par exemple, vous ne pouvez pas comparer une date avec une chaîne. Vous devez u Les chaînes sont comparées par octets. Une courte chaîne est plus petite que toutes les chaînes qui commencent par elle et qui contiennent au moins un caractère de plus. -Note. Jusqu'à la version 1.1.54134, les numéros signés et non signés étaient comparés de la même manière qu'en C++. En d'autres termes, vous pourriez obtenir un résultat incorrect dans des cas comme SELECT 9223372036854775807 \> -1. Ce comportement a changé dans la version 1.1.54134 et est maintenant mathématiquement correct. - ## égal, A = B et a = = b opérateur {#function-equals} -## notEquals, a ! opérateur= b et a `<>` b {#function-notequals} +## notEquals, a ! opérateur= b et a \<\> b {#function-notequals} -## peu, `< operator` {#function-less} +## moins, opérateur \< {#function-less} -## grand, `> operator` {#function-greater} +## de plus, \> opérateur {#function-greater} -## lessOrEquals, `<= operator` {#function-lessorequals} +## lessOrEquals, \< = opérateur {#function-lessorequals} -## greaterOrEquals, `>= operator` {#function-greaterorequals} +## greaterOrEquals, \> = opérateur {#function-greaterorequals} [Article Original](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/fr/query_language/functions/conditional_functions.md b/docs/fr/sql_reference/functions/conditional_functions.md similarity index 98% rename from docs/fr/query_language/functions/conditional_functions.md rename to docs/fr/sql_reference/functions/conditional_functions.md index 18c2c5ef081..efe22799b49 100644 --- a/docs/fr/query_language/functions/conditional_functions.md +++ b/docs/fr/sql_reference/functions/conditional_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 43 +toc_title: 'Conditionnel ' --- # Fonctions conditionnelles {#conditional-functions} diff --git a/docs/fr/query_language/functions/date_time_functions.md b/docs/fr/sql_reference/functions/date_time_functions.md similarity index 98% rename from docs/fr/query_language/functions/date_time_functions.md rename to docs/fr/sql_reference/functions/date_time_functions.md index be36b0477f7..58449316398 100644 --- a/docs/fr/query_language/functions/date_time_functions.md +++ b/docs/fr/sql_reference/functions/date_time_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: Travailler avec les Dates et les heures --- # Fonctions pour travailler avec des dates et des heures {#functions-for-working-with-dates-and-times} @@ -368,7 +371,7 @@ dateDiff('unit', startdate, enddate, [timezone]) Supported values: | unit | - | ------ | + | ---- | |second | |minute | |hour | @@ -378,9 +381,9 @@ dateDiff('unit', startdate, enddate, [timezone]) |quarter | |year | -- `startdate` — The first time value to compare. [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `startdate` — The first time value to compare. [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). -- `enddate` — The second time value to compare. [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `enddate` — The second time value to compare. [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). - `timezone` — Optional parameter. If specified, it is applied to both `startdate` et `enddate`. Si non spécifié, fuseaux horaires de l' `startdate` et `enddate` sont utilisés. Si elles ne sont pas identiques, le résultat n'est pas spécifié. diff --git a/docs/fr/query_language/functions/encoding_functions.md b/docs/fr/sql_reference/functions/encoding_functions.md similarity index 90% rename from docs/fr/query_language/functions/encoding_functions.md rename to docs/fr/sql_reference/functions/encoding_functions.md index 87a848cf1a8..f0738d2c684 100644 --- a/docs/fr/query_language/functions/encoding_functions.md +++ b/docs/fr/sql_reference/functions/encoding_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 52 +toc_title: Encodage --- # L'encodage des fonctions {#encoding-functions} @@ -16,7 +19,7 @@ char(number_1, [number_2, ..., number_n]); **Paramètre** -- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../data_types/int_uint.md), [Flottant](../../data_types/float.md). +- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../sql_reference/data_types/int_uint.md), [Flottant](../../sql_reference/data_types/float.md). **Valeur renvoyée** @@ -108,7 +111,7 @@ Les valeurs des types virgule flottante et décimale sont codées comme leur rep **Paramètre** -- `arg` — A value to convert to hexadecimal. Types: [Chaîne](../../data_types/string.md), [UInt](../../data_types/int_uint.md), [Flottant](../../data_types/float.md), [Décimal](../../data_types/decimal.md), [Date](../../data_types/date.md) ou [DateTime](../../data_types/datetime.md). +- `arg` — A value to convert to hexadecimal. Types: [Chaîne](../../sql_reference/data_types/string.md), [UInt](../../sql_reference/data_types/int_uint.md), [Flottant](../../sql_reference/data_types/float.md), [Décimal](../../sql_reference/data_types/decimal.md), [Date](../../sql_reference/data_types/date.md) ou [DateTime](../../sql_reference/data_types/datetime.md). **Valeur renvoyée** diff --git a/docs/fr/query_language/functions/ext_dict_functions.md b/docs/fr/sql_reference/functions/ext_dict_functions.md similarity index 77% rename from docs/fr/query_language/functions/ext_dict_functions.md rename to docs/fr/sql_reference/functions/ext_dict_functions.md index 40d606b18bd..126d400d7b1 100644 --- a/docs/fr/query_language/functions/ext_dict_functions.md +++ b/docs/fr/sql_reference/functions/ext_dict_functions.md @@ -1,10 +1,13 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 58 +toc_title: Travailler avec des dictionnaires externes --- -# Fonctions pour travailler avec des dictionnaires externes {#ext_dict_functions} +# Fonctions Pour Travailler Avec Des Dictionnaires Externes {#ext_dict_functions} -Pour plus d'informations sur la connexion et la configuration de dictionnaires externes, voir [Dictionnaires externes](../dicts/external_dicts.md). +Pour plus d'informations sur la connexion et la configuration de dictionnaires externes, voir [Dictionnaires externes](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). ## dictGet {#dictget} @@ -19,12 +22,12 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). - `attr_name` — Name of the column of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md) ou [Tuple](../../data_types/tuple.md)- tapez la valeur en fonction de la configuration du dictionnaire. +- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md) ou [Tuple](../../sql_reference/data_types/tuple.md)- tapez la valeur en fonction de la configuration du dictionnaire. - `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` clé. [Expression](../syntax.md#syntax-expressions) renvoyer la valeur dans le type de données configuré pour `attr_name` attribut. **Valeur renvoyée** -- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. +- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. - Si il n'y a pas la clé, correspondant à `id_expr` dans le dictionnaire, puis: @@ -94,7 +97,7 @@ LIMIT 3 **Voir Aussi** -- [Dictionnaires Externes](../dicts/external_dicts.md) +- [Dictionnaires Externes](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) ## dictHas {#dicthas} @@ -107,7 +110,7 @@ dictHas('dict_name', id_expr) **Paramètre** - `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md)-le type de la valeur. +- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md)-le type de la valeur. **Valeur renvoyée** @@ -118,7 +121,7 @@ Type: `UInt8`. ## dictGetHierarchy {#dictgethierarchy} -Crée un tableau contenant tous les parents d'une clé dans le [hiérarchique dictionnaire](../dicts/external_dicts_dict_hierarchical.md). +Crée un tableau contenant tous les parents d'une clé dans le [hiérarchique dictionnaire](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). **Syntaxe** @@ -129,13 +132,13 @@ dictGetHierarchy('dict_name', key) **Paramètre** - `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `key` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md)-le type de la valeur. +- `key` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md)-le type de la valeur. **Valeur renvoyée** - Les Parents pour la clé. -Type: [Tableau (UInt64)](../../data_types/array.md). +Type: [Tableau (UInt64)](../../sql_reference/data_types/array.md). ## dictisine {#dictisin} @@ -148,8 +151,8 @@ dictIsIn('dict_name', child_id_expr, ancestor_id_expr) **Paramètre** - `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `child_id_expr` — Key to be checked. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md)-le type de la valeur. -- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` clé. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md)-le type de la valeur. +- `child_id_expr` — Key to be checked. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md)-le type de la valeur. +- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` clé. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md)-le type de la valeur. **Valeur renvoyée** @@ -158,7 +161,7 @@ dictIsIn('dict_name', child_id_expr, ancestor_id_expr) Type: `UInt8`. -## D'autres fonctions {#ext_dict_functions-other} +## D'Autres Fonctions {#ext_dict_functions-other} ClickHouse prend en charge des fonctions spécialisées qui convertissent les valeurs d'attribut de dictionnaire en un type de données spécifique, quelle que soit la configuration du dictionnaire. @@ -185,12 +188,12 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dict_name` — Name of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). - `attr_name` — Name of the column of the dictionary. [Chaîne littérale](../syntax.md#syntax-string-literal). -- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../data_types/int_uint.md)-le type de la valeur. +- `id_expr` — Key value. [Expression](../syntax.md#syntax-expressions) de retour d'un [UInt64](../../sql_reference/data_types/int_uint.md)-le type de la valeur. - `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` clé. [Expression](../syntax.md#syntax-expressions) renvoyer une valeur dans le type de données configuré pour `attr_name` attribut. **Valeur renvoyée** -- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. +- Si ClickHouse analyse l'attribut avec succès dans le [l'attribut type de données](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), les fonctions renvoient la valeur du dictionnaire de l'attribut qui correspond à `id_expr`. - Si il n'est pas demandé `id_expr` dans le dictionnaire,: diff --git a/docs/fr/query_language/functions/functions_for_nulls.md b/docs/fr/sql_reference/functions/functions_for_nulls.md similarity index 95% rename from docs/fr/query_language/functions/functions_for_nulls.md rename to docs/fr/sql_reference/functions/functions_for_nulls.md index 361c758765c..78c85b22de2 100644 --- a/docs/fr/query_language/functions/functions_for_nulls.md +++ b/docs/fr/sql_reference/functions/functions_for_nulls.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 63 +toc_title: Travailler avec des arguments nullables --- # Fonctions pour travailler avec des agrégats nullables {#functions-for-working-with-nullable-aggregates} @@ -208,7 +211,7 @@ SELECT nullIf(1, 2) ## assumeNotNull {#assumenotnull} -Résultats dans une valeur de type [Nullable](../../data_types/nullable.md) pour un non- `Nullable` si la valeur n'est pas `NULL`. +Résultats dans une valeur de type [Nullable](../../sql_reference/data_types/nullable.md) pour un non- `Nullable` si la valeur n'est pas `NULL`. ``` sql assumeNotNull(x) diff --git a/docs/fr/query_language/functions/geo.md b/docs/fr/sql_reference/functions/geo.md similarity index 85% rename from docs/fr/query_language/functions/geo.md rename to docs/fr/sql_reference/functions/geo.md index 1bff6d147d9..2a929604a5b 100644 --- a/docs/fr/query_language/functions/geo.md +++ b/docs/fr/sql_reference/functions/geo.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 62 +toc_title: "Travailler avec des coordonn\xE9es g\xE9ographiques" --- # Fonctions pour travailler avec des coordonnées géographiques {#functions-for-working-with-geographical-coordinates} @@ -82,8 +85,8 @@ pointInPolygon((x, y), [(a, b), (c, d) ...], ...) **Les valeurs d'entrée** -- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../data_types/tuple.md) — A tuple of two numbers. -- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Tableau](../../data_types/array.md). Chaque sommet est représenté par une paire de coordonnées `(a, b)`. Les sommets doivent être spécifiés dans le sens horaire ou antihoraire. Le nombre minimum de sommets est 3. Le polygone doit être constante. +- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../sql_reference/data_types/tuple.md) — A tuple of two numbers. +- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Tableau](../../sql_reference/data_types/array.md). Chaque sommet est représenté par une paire de coordonnées `(a, b)`. Les sommets doivent être spécifiés dans le sens horaire ou antihoraire. Le nombre minimum de sommets est 3. Le polygone doit être constante. - La fonction prend également en charge les polygones avec des trous (découper des sections). Dans ce cas, ajoutez des polygones qui définissent les sections découpées en utilisant des arguments supplémentaires de la fonction. La fonction ne prend pas en charge les polygones non simplement connectés. **Valeurs renvoyées** @@ -173,9 +176,9 @@ geoToH3(lon, lat, resolution) **Paramètre** -- `lon` — Longitude. Type: [Float64](../../data_types/float.md). -- `lat` — Latitude. Type: [Float64](../../data_types/float.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). +- `lon` — Longitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `lat` — Latitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** @@ -245,11 +248,11 @@ h3GetBaseCell(index) **Paramètre** -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Numéro de cellule de base hexagonale. Type: [UInt8](../../data_types/int_uint.md). +- Numéro de cellule de base hexagonale. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -279,11 +282,11 @@ h3HexAreaM2(resolution) **Paramètre** -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Area in m². Type: [Float64](../../data_types/float.md). +- Area in m². Type: [Float64](../../sql_reference/data_types/float.md). **Exemple** @@ -313,12 +316,12 @@ h3IndexesAreNeighbors(index1, index2) **Paramètre** -- `index1` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `index2` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). +- `index1` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `index2` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Retourner `1` si les index sont voisins, `0` autrement. Type: [UInt8](../../data_types/int_uint.md). +- Retourner `1` si les index sont voisins, `0` autrement. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -348,12 +351,12 @@ h3ToChildren(index, resolution) **Paramètre** -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Tableau avec les index H3 enfants. Tableau de type: [UInt64](../../data_types/int_uint.md). +- Tableau avec les index H3 enfants. Tableau de type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -383,12 +386,12 @@ h3ToParent(index, resolution) **Paramètre** -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). -- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Parent H3 index. Type: [UInt64](../../data_types/int_uint.md). +- Parent H3 index. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -416,11 +419,11 @@ h3ToString(index) **Paramètre** -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- Représentation en chaîne de l'index H3. Type: [Chaîne](../../data_types/string.md). +- Représentation en chaîne de l'index H3. Type: [Chaîne](../../sql_reference/data_types/string.md). **Exemple** @@ -448,11 +451,11 @@ stringToH3(index_str) **Paramètre** -- `index_str` — String representation of the H3 index. Type: [Chaîne](../../data_types/string.md). +- `index_str` — String representation of the H3 index. Type: [Chaîne](../../sql_reference/data_types/string.md). **Valeurs renvoyées** -- Numéro d'indice hexagonal. Renvoie 0 en cas d'erreur. Type: [UInt64](../../data_types/int_uint.md). +- Numéro d'indice hexagonal. Renvoie 0 en cas d'erreur. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -482,11 +485,11 @@ h3GetResolution(index) **Paramètre** -- `index` — Hexagon index number. Type: [UInt64](../../data_types/int_uint.md). +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Valeurs renvoyées** -- L'indice de la résolution. Gamme: `[0, 15]`. Type: [UInt8](../../data_types/int_uint.md). +- L'indice de la résolution. Gamme: `[0, 15]`. Type: [UInt8](../../sql_reference/data_types/int_uint.md). **Exemple** diff --git a/docs/fr/query_language/functions/hash_functions.md b/docs/fr/sql_reference/functions/hash_functions.md similarity index 85% rename from docs/fr/query_language/functions/hash_functions.md rename to docs/fr/sql_reference/functions/hash_functions.md index 82ea213fdcd..7996a3c1f13 100644 --- a/docs/fr/query_language/functions/hash_functions.md +++ b/docs/fr/sql_reference/functions/hash_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 50 +toc_title: Hachage --- # Les fonctions de hachage {#hash-functions} @@ -8,7 +11,7 @@ Les fonctions de hachage peuvent être utilisées pour le brassage pseudo-aléat ## halfMD5 {#hash-functions-halfmd5} -[Interpréter](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule le [MD5](https://en.wikipedia.org/wiki/MD5) la valeur de hachage pour chacun d'eux. Puis combine les hachages, prend les 8 premiers octets du hachage de la chaîne résultante, et les interprète comme `UInt64` dans l'ordre des octets big-endian. +[Interpréter](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule le [MD5](https://en.wikipedia.org/wiki/MD5) la valeur de hachage pour chacun d'eux. Puis combine les hachages, prend les 8 premiers octets du hachage de la chaîne résultante, et les interprète comme `UInt64` dans l'ordre des octets big-endian. ``` sql halfMD5(par1, ...) @@ -19,11 +22,11 @@ Envisager l'utilisation de la [sipHash64](#hash_functions-siphash64) la fonction **Paramètre** -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -A [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +A [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -53,7 +56,7 @@ sipHash64(par1,...) C'est une fonction de hachage cryptographique. Il fonctionne au moins trois fois plus vite que le [MD5](#hash_functions-md5) fonction. -Fonction [interpréter](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule la valeur de hachage pour chacun d'eux. Puis combine les hachages par l'algorithme suivant: +Fonction [interpréter](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) tous les paramètres d'entrée sous forme de chaînes et calcule la valeur de hachage pour chacun d'eux. Puis combine les hachages par l'algorithme suivant: 1. Après avoir haché tous les paramètres d'entrée, la fonction obtient le tableau de hachages. 2. La fonction prend le premier et le second éléments et calcule un hachage pour le tableau d'entre eux. @@ -62,11 +65,11 @@ Fonction [interpréter](../../query_language/functions/type_conversion_functions **Paramètre** -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -A [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +A [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -98,11 +101,11 @@ Ceci est une fonction de hachage non cryptographique rapide. Il utilise L'algori **Paramètre** -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -A [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +A [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -164,11 +167,11 @@ La fonction utilise le `Hash64` la méthode de tous les [les méthodes disponibl **Paramètre** -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -A [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +A [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -290,11 +293,11 @@ metroHash64(par1, ...) **Paramètre** -La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +La fonction prend un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -A [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +A [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -325,12 +328,12 @@ murmurHash2_64(par1, ...) **Paramètre** -Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -- Le `murmurHash2_32` fonction renvoie la valeur de hachage ayant le [UInt32](../../data_types/int_uint.md) type de données. -- Le `murmurHash2_64` fonction renvoie la valeur de hachage ayant le [UInt64](../../data_types/int_uint.md) type de données. +- Le `murmurHash2_32` fonction renvoie la valeur de hachage ayant le [UInt32](../../sql_reference/data_types/int_uint.md) type de données. +- Le `murmurHash2_64` fonction renvoie la valeur de hachage ayant le [UInt64](../../sql_reference/data_types/int_uint.md) type de données. **Exemple** @@ -355,12 +358,12 @@ murmurHash3_64(par1, ...) **Paramètre** -Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../data_types/index.md). +Les deux fonctions prennent un nombre variable de paramètres d'entrée. Les paramètres peuvent être tout de la [types de données pris en charge](../../sql_reference/data_types/index.md). **Valeur Renvoyée** -- Le `murmurHash3_32` la fonction retourne un [UInt32](../../data_types/int_uint.md) valeur de hachage du type de données. -- Le `murmurHash3_64` la fonction retourne un [UInt64](../../data_types/int_uint.md) valeur de hachage du type de données. +- Le `murmurHash3_32` la fonction retourne un [UInt32](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. +- Le `murmurHash3_64` la fonction retourne un [UInt64](../../sql_reference/data_types/int_uint.md) valeur de hachage du type de données. **Exemple** @@ -384,11 +387,11 @@ murmurHash3_128( expr ) **Paramètre** -- `expr` — [Expression](../syntax.md#syntax-expressions) de retour d'un [Chaîne](../../data_types/string.md)-le type de la valeur. +- `expr` — [Expression](../syntax.md#syntax-expressions) de retour d'un [Chaîne](../../sql_reference/data_types/string.md)-le type de la valeur. **Valeur Renvoyée** -A [FixedString (16)](../../data_types/fixedstring.md) valeur de hachage du type de données. +A [FixedString (16)](../../sql_reference/data_types/fixedstring.md) valeur de hachage du type de données. **Exemple** diff --git a/docs/fr/query_language/functions/higher_order_functions.md b/docs/fr/sql_reference/functions/higher_order_functions.md similarity index 98% rename from docs/fr/query_language/functions/higher_order_functions.md rename to docs/fr/sql_reference/functions/higher_order_functions.md index 6abdb7d4280..4d6d31c42f7 100644 --- a/docs/fr/query_language/functions/higher_order_functions.md +++ b/docs/fr/sql_reference/functions/higher_order_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 57 +toc_title: "D'Ordre Sup\xE9rieur" --- # Fonctions d'ordre supérieur {#higher-order-functions} diff --git a/docs/fr/query_language/functions/in_functions.md b/docs/fr/sql_reference/functions/in_functions.md similarity index 77% rename from docs/fr/query_language/functions/in_functions.md rename to docs/fr/sql_reference/functions/in_functions.md index aa9b9b04e8c..d9f083250cc 100644 --- a/docs/fr/query_language/functions/in_functions.md +++ b/docs/fr/sql_reference/functions/in_functions.md @@ -1,12 +1,15 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 60 +toc_title: "Mise en \u0153uvre de L'op\xE9rateur IN" --- -# Fonctions pour la mise en œuvre de l'opérateur {#functions-for-implementing-the-in-operator} +# Fonctions de mise en œuvre de L'opérateur IN {#functions-for-implementing-the-in-operator} ## in, notin, globalIn, globalNotIn {#in-functions} -Voir la section [Dans les opérateurs](../select.md#select-in-operators). +Voir la section [Dans les opérateurs](../statements/select.md#select-in-operators). ## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} diff --git a/docs/fr/query_language/functions/index.md b/docs/fr/sql_reference/functions/index.md similarity index 97% rename from docs/fr/query_language/functions/index.md rename to docs/fr/sql_reference/functions/index.md index 717d0605656..f4e6bf7566a 100644 --- a/docs/fr/query_language/functions/index.md +++ b/docs/fr/sql_reference/functions/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Functions +toc_priority: 32 +toc_title: Introduction --- # Fonction {#functions} diff --git a/docs/fr/query_language/functions/introspection.md b/docs/fr/sql_reference/functions/introspection.md similarity index 91% rename from docs/fr/query_language/functions/introspection.md rename to docs/fr/sql_reference/functions/introspection.md index 722c76fb7e1..6205d096909 100644 --- a/docs/fr/query_language/functions/introspection.md +++ b/docs/fr/sql_reference/functions/introspection.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 65 +toc_title: Introspection --- # Fonctions D'Introspection {#introspection-functions} @@ -33,19 +36,19 @@ addressToLine(address_of_binary_instruction) **Paramètre** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. **Valeur renvoyée** - Nom de fichier du code Source et le numéro de ligne dans ce fichier délimité par deux-points. - For example, `/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. - Nom d'un binaire, si la fonction n'a pas pu trouver les informations de débogage. - Chaîne vide, si l'adresse n'est pas valide. -Type: [Chaîne](../../data_types/string.md). +Type: [Chaîne](../../sql_reference/data_types/string.md). **Exemple** @@ -84,7 +87,7 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 ``` Application de la fonction à la trace de la pile entière: @@ -104,8 +107,8 @@ Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../src/src/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so @@ -124,14 +127,14 @@ addressToSymbol(address_of_binary_instruction) **Paramètre** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. **Valeur renvoyée** - Symbole des fichiers D'objets ClickHouse. - Chaîne vide, si l'adresse n'est pas valide. -Type: [Chaîne](../../data_types/string.md). +Type: [Chaîne](../../sql_reference/data_types/string.md). **Exemple** @@ -221,14 +224,14 @@ demangle(symbol) **Paramètre** -- `symbol` ([Chaîne](../../data_types/string.md)) — Symbol from an object file. +- `symbol` ([Chaîne](../../sql_reference/data_types/string.md)) — Symbol from an object file. **Valeur renvoyée** - Nom de la fonction C++. - Chaîne vide si un symbole n'est pas valide. -Type: [Chaîne](../../data_types/string.md). +Type: [Chaîne](../../sql_reference/data_types/string.md). **Exemple** diff --git a/docs/fr/query_language/functions/ip_address_functions.md b/docs/fr/sql_reference/functions/ip_address_functions.md similarity index 95% rename from docs/fr/query_language/functions/ip_address_functions.md rename to docs/fr/sql_reference/functions/ip_address_functions.md index f9541e7d247..3c303e1a262 100644 --- a/docs/fr/query_language/functions/ip_address_functions.md +++ b/docs/fr/sql_reference/functions/ip_address_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 55 +toc_title: Travailler avec des adresses IP --- # Fonctions pour travailler avec des adresses IP {#functions-for-working-with-ip-addresses} @@ -180,7 +183,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); ## toipv4 (chaîne) {#toipv4string} -Un alias `IPv4StringToNum()` cela prend une forme de chaîne D'adresse IPv4 et renvoie la valeur de [IPv4](../../data_types/domains/ipv4.md) type, qui est binaire égal à la valeur renvoyée par `IPv4StringToNum()`. +Un alias `IPv4StringToNum()` cela prend une forme de chaîne D'adresse IPv4 et renvoie la valeur de [IPv4](../../sql_reference/data_types/domains/ipv4.md) type, qui est binaire égal à la valeur renvoyée par `IPv4StringToNum()`. ``` sql WITH @@ -212,7 +215,7 @@ SELECT ## toipv6 (chaîne) {#toipv6string} -Un alias `IPv6StringToNum()` cela prend une forme de chaîne D'adresse IPv6 et renvoie la valeur de [IPv6](../../data_types/domains/ipv6.md) type, qui est binaire égal à la valeur renvoyée par `IPv6StringToNum()`. +Un alias `IPv6StringToNum()` cela prend une forme de chaîne D'adresse IPv6 et renvoie la valeur de [IPv6](../../sql_reference/data_types/domains/ipv6.md) type, qui est binaire égal à la valeur renvoyée par `IPv6StringToNum()`. ``` sql WITH diff --git a/docs/fr/query_language/functions/json_functions.md b/docs/fr/sql_reference/functions/json_functions.md similarity index 98% rename from docs/fr/query_language/functions/json_functions.md rename to docs/fr/sql_reference/functions/json_functions.md index 137df62dfce..ece6df40168 100644 --- a/docs/fr/query_language/functions/json_functions.md +++ b/docs/fr/sql_reference/functions/json_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 56 +toc_title: Travailler avec JSON. --- # Fonctions pour travailler avec JSON {#functions-for-working-with-json} diff --git a/docs/fr/query_language/functions/logical_functions.md b/docs/fr/sql_reference/functions/logical_functions.md similarity index 84% rename from docs/fr/query_language/functions/logical_functions.md rename to docs/fr/sql_reference/functions/logical_functions.md index bae864784d3..e13dd4143d1 100644 --- a/docs/fr/query_language/functions/logical_functions.md +++ b/docs/fr/sql_reference/functions/logical_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: Logique --- # Les fonctions logiques {#logical-functions} diff --git a/docs/fr/sql_reference/functions/machine_learning_functions.md b/docs/fr/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..b3e8b7bd7c4 --- /dev/null +++ b/docs/fr/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 64 +toc_title: Fonctions D'Apprentissage Automatique +--- + +# Fonctions D'Apprentissage Automatique {#machine-learning-functions} + +## evalMLMethod (prédiction) {#machine_learning_methods-evalmlmethod} + +Prédiction utilisant des modèles de régression ajustés utilise `evalMLMethod` fonction. Voir le lien dans la `linearRegression`. + +### Régression Linéaire Stochastique {#stochastic-linear-regression} + +Le [stochasticLinearRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlinearregression) la fonction d'agrégat implémente une méthode de descente de gradient stochastique utilisant un modèle linéaire et une fonction de perte MSE. Utiliser `evalMLMethod` prédire sur de nouvelles données. + +### Régression Logistique Stochastique {#stochastic-logistic-regression} + +Le [stochasticLogisticRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlogisticregression) la fonction d'agrégation implémente la méthode de descente de gradient stochastique pour le problème de classification binaire. Utiliser `evalMLMethod` prédire sur de nouvelles données. diff --git a/docs/fr/query_language/functions/math_functions.md b/docs/fr/sql_reference/functions/math_functions.md similarity index 96% rename from docs/fr/query_language/functions/math_functions.md rename to docs/fr/sql_reference/functions/math_functions.md index 4696eb1438a..75d35b24fde 100644 --- a/docs/fr/query_language/functions/math_functions.md +++ b/docs/fr/sql_reference/functions/math_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: "Math\xE9matique" --- # Fonctions mathématiques {#mathematical-functions} diff --git a/docs/fr/query_language/functions/other_functions.md b/docs/fr/sql_reference/functions/other_functions.md similarity index 96% rename from docs/fr/query_language/functions/other_functions.md rename to docs/fr/sql_reference/functions/other_functions.md index 50a2d9f7e35..99258bfd8ed 100644 --- a/docs/fr/query_language/functions/other_functions.md +++ b/docs/fr/sql_reference/functions/other_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 66 +toc_title: Autre --- # D'autres fonctions {#other-functions} @@ -52,7 +55,7 @@ basename( expr ) **Paramètre** -- `expr` — Expression resulting in a [Chaîne](../../data_types/string.md) type de valeur. Tous les antislashs doivent être échappés dans la valeur résultante. +- `expr` — Expression resulting in a [Chaîne](../../sql_reference/data_types/string.md) type de valeur. Tous les antislashs doivent être échappés dans la valeur résultante. **Valeur Renvoyée** @@ -198,8 +201,8 @@ Vérifie si la valeur à virgule flottante est finie. **Paramètre** -- `x` — Value to be checked for infinity. Type: [Flottant\*](../../data_types/float.md). -- `y` — Fallback value. Type: [Flottant\*](../../data_types/float.md). +- `x` — Value to be checked for infinity. Type: [Flottant\*](../../sql_reference/data_types/float.md). +- `y` — Fallback value. Type: [Flottant\*](../../sql_reference/data_types/float.md). **Valeur renvoyée** @@ -436,7 +439,7 @@ Si vous créez une sous-requête avec ORDER BY et appelez la fonction depuis l'e **Paramètre** - `column` — A column name or scalar expression. -- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../data_types/int_uint.md). +- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql_reference/data_types/int_uint.md). - `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. **Valeurs renvoyées** @@ -621,7 +624,7 @@ Accepte une adresse MAC au format AA:BB:CC: DD:EE: FF (Nombres séparés par deu ## getSizeOfEnumType {#getsizeofenumtype} -Retourne le nombre de champs dans [Enum](../../data_types/enum.md). +Retourne le nombre de champs dans [Enum](../../sql_reference/data_types/enum.md). ``` sql getSizeOfEnumType(value) @@ -762,7 +765,7 @@ defaultValueOfArgumentType(expression) - `0` pour les nombres. - Chaîne vide pour les chaînes. -- `ᴺᵁᴸᴸ` pour [Nullable](../../data_types/nullable.md). +- `ᴺᵁᴸᴸ` pour [Nullable](../../sql_reference/data_types/nullable.md). **Exemple** @@ -837,7 +840,7 @@ filesystemAvailable() - La quantité d'espace restant disponible en octets. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -869,7 +872,7 @@ filesystemFree() - Quantité d'espace libre en octets. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -889,7 +892,7 @@ Résultat: ## filesystemCapacity {#filesystemcapacity} -Renvoie la capacité du système de fichiers en octets. Pour l'évaluation, la [chemin](../../operations/server_settings/settings.md#server_settings-path) le répertoire de données doit être configuré. +Renvoie la capacité du système de fichiers en octets. Pour l'évaluation, la [chemin](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) le répertoire de données doit être configuré. **Syntaxe** @@ -901,7 +904,7 @@ filesystemCapacity() - Informations de capacité du système de fichiers en octets. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). **Exemple** @@ -931,9 +934,9 @@ Ainsi, le résultat de la fonction dépend de la partition des données aux bloc ## joinGet {#joinget} -La fonction vous permet d'extraire les données de la table de la même manière qu'à partir d'un [dictionnaire](../../query_language/dicts/index.md). +La fonction vous permet d'extraire les données de la table de la même manière qu'à partir d'un [dictionnaire](../../sql_reference/dictionaries/index.md). -Obtient les données de [Rejoindre](../../operations/table_engines/join.md#creating-a-table) tables utilisant la clé de jointure spécifiée. +Obtient les données de [Rejoindre](../../engines/table_engines/special/join.md#creating-a-table) tables utilisant la clé de jointure spécifiée. Ne prend en charge que les tables créées avec `ENGINE = Join(ANY, LEFT, )` déclaration. @@ -955,7 +958,7 @@ Retourne la liste des valeurs correspond à la liste des clés. Si certain n'existe pas dans la table source alors `0` ou `null` seront renvoyés basé sur [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls) paramètre. -Plus d'infos sur `join_use_nulls` dans [Opération de jointure](../../operations/table_engines/join.md). +Plus d'infos sur `join_use_nulls` dans [Opération de jointure](../../engines/table_engines/special/join.md). **Exemple** @@ -1057,7 +1060,7 @@ randomPrintableASCII(length) - Chaîne avec un ensemble aléatoire de [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) caractères imprimables. -Type: [Chaîne](../../data_types/string.md) +Type: [Chaîne](../../sql_reference/data_types/string.md) **Exemple** diff --git a/docs/fr/query_language/functions/random_functions.md b/docs/fr/sql_reference/functions/random_functions.md similarity index 89% rename from docs/fr/query_language/functions/random_functions.md rename to docs/fr/sql_reference/functions/random_functions.md index 386ad3953bc..92f15395bb1 100644 --- a/docs/fr/query_language/functions/random_functions.md +++ b/docs/fr/sql_reference/functions/random_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 51 +toc_title: "La G\xE9n\xE9ration De Nombres Pseudo-Al\xE9atoires" --- # Fonctions pour générer des nombres pseudo-aléatoires {#functions-for-generating-pseudo-random-numbers} diff --git a/docs/fr/query_language/functions/rounding_functions.md b/docs/fr/sql_reference/functions/rounding_functions.md similarity index 96% rename from docs/fr/query_language/functions/rounding_functions.md rename to docs/fr/sql_reference/functions/rounding_functions.md index 7ad425c0ebb..f2d20b0c4bf 100644 --- a/docs/fr/query_language/functions/rounding_functions.md +++ b/docs/fr/sql_reference/functions/rounding_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: Arrondi --- # Fonctions d'arrondi {#rounding-functions} @@ -36,7 +39,7 @@ round(expression [, decimal_places]) **Paramètre:** -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../data_types/index.md#data_types). +- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../sql_reference/data_types/index.md#data_types). - `decimal-places` — An integer value. - Si `decimal-places > 0` alors la fonction arrondit la valeur à droite du point décimal. - Si `decimal-places < 0` alors la fonction arrondit la valeur à gauche de la virgule décimale. @@ -115,7 +118,7 @@ roundBankers(expression [, decimal_places]) **Paramètre** -- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../data_types/index.md#data_types). +- `expression` — A number to be rounded. Can be any [expression](../syntax.md#syntax-expressions) retour du numérique [type de données](../../sql_reference/data_types/index.md#data_types). - `decimal-places` — Decimal places. An integer number. - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. diff --git a/docs/fr/query_language/functions/splitting_merging_functions.md b/docs/fr/sql_reference/functions/splitting_merging_functions.md similarity index 82% rename from docs/fr/query_language/functions/splitting_merging_functions.md rename to docs/fr/sql_reference/functions/splitting_merging_functions.md index 8f207f38259..87ca53debc9 100644 --- a/docs/fr/query_language/functions/splitting_merging_functions.md +++ b/docs/fr/sql_reference/functions/splitting_merging_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 47 +toc_title: "Fractionnement et fusion de cha\xEEnes et de tableaux" --- # Fonctions pour diviser et fusionner des chaînes et des tableaux {#functions-for-splitting-and-merging-strings-and-arrays} @@ -17,8 +20,8 @@ splitByChar(, ) **Paramètre** -- `separator` — The separator which should contain exactly one character. [Chaîne](../../data_types/string.md). -- `s` — The string to split. [Chaîne](../../data_types/string.md). +- `separator` — The separator which should contain exactly one character. [Chaîne](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée(s)** @@ -28,7 +31,7 @@ Retourne un tableau de certaines chaînes. Des sous-chaînes vides peuvent être - Il existe plusieurs séparateurs consécutifs; - La chaîne d'origine `s` est vide. -Type: [Tableau](../../data_types/array.md) de [Chaîne](../../data_types/string.md). +Type: [Tableau](../../sql_reference/data_types/array.md) de [Chaîne](../../sql_reference/data_types/string.md). **Exemple** @@ -54,14 +57,14 @@ splitByString(, ) **Paramètre** -- `separator` — The separator. [Chaîne](../../data_types/string.md). -- `s` — The string to split. [Chaîne](../../data_types/string.md). +- `separator` — The separator. [Chaîne](../../sql_reference/data_types/string.md). +- `s` — The string to split. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée(s)** Retourne un tableau de certaines chaînes. Des sous-chaînes vides peuvent être sélectionnées lorsque: -Type: [Tableau](../../data_types/array.md) de [Chaîne](../../data_types/string.md). +Type: [Tableau](../../sql_reference/data_types/array.md) de [Chaîne](../../sql_reference/data_types/string.md). - Un séparateur non vide se produit au début ou à la fin de la chaîne; - Il existe plusieurs séparateurs consécutifs non vides; diff --git a/docs/fr/query_language/functions/string_functions.md b/docs/fr/sql_reference/functions/string_functions.md similarity index 95% rename from docs/fr/query_language/functions/string_functions.md rename to docs/fr/sql_reference/functions/string_functions.md index fb6c5d608d1..0201046529a 100644 --- a/docs/fr/query_language/functions/string_functions.md +++ b/docs/fr/sql_reference/functions/string_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 40 +toc_title: "Travailler avec des cha\xEEnes" --- # Fonctions pour travailler avec des chaînes {#functions-for-working-with-strings} @@ -74,7 +77,7 @@ toValidUTF8( input_string ) Paramètre: -- input\_string — Any set of bytes represented as the [Chaîne](../../data_types/string.md) type de données objet. +- input\_string — Any set of bytes represented as the [Chaîne](../../sql_reference/data_types/string.md) type de données objet. Valeur renvoyée: chaîne UTF-8 valide. @@ -102,8 +105,8 @@ repeat(s, n) **Paramètre** -- `s` — The string to repeat. [Chaîne](../../data_types/string.md). -- `n` — The number of times to repeat the string. [UInt](../../data_types/int_uint.md). +- `s` — The string to repeat. [Chaîne](../../sql_reference/data_types/string.md). +- `n` — The number of times to repeat the string. [UInt](../../sql_reference/data_types/int_uint.md). **Valeur renvoyée** @@ -326,8 +329,8 @@ trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) **Paramètre** -- `trim_character` — specified characters for trim. [Chaîne](../../data_types/string.md). -- `input_string` — string for trim. [Chaîne](../../data_types/string.md). +- `trim_character` — specified characters for trim. [Chaîne](../../sql_reference/data_types/string.md). +- `input_string` — string for trim. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée** @@ -365,7 +368,7 @@ Alias: `ltrim(input_string)`. **Paramètre** -- `input_string` — string to trim. [Chaîne](../../data_types/string.md). +- `input_string` — string to trim. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée** @@ -403,7 +406,7 @@ Alias: `rtrim(input_string)`. **Paramètre** -- `input_string` — string to trim. [Chaîne](../../data_types/string.md). +- `input_string` — string to trim. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée** @@ -441,7 +444,7 @@ Alias: `trim(input_string)`. **Paramètre** -- `input_string` — string to trim. [Chaîne](../../data_types/string.md). +- `input_string` — string to trim. [Chaîne](../../sql_reference/data_types/string.md). **Valeur renvoyée** diff --git a/docs/fr/query_language/functions/string_replace_functions.md b/docs/fr/sql_reference/functions/string_replace_functions.md similarity index 97% rename from docs/fr/query_language/functions/string_replace_functions.md rename to docs/fr/sql_reference/functions/string_replace_functions.md index 522451ef910..6b1a58db55b 100644 --- a/docs/fr/query_language/functions/string_replace_functions.md +++ b/docs/fr/sql_reference/functions/string_replace_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 42 +toc_title: "Pour remplacer dans les cha\xEEnes" --- # Les fonctions de recherche et de remplacement dans les chaînes {#functions-for-searching-and-replacing-in-strings} diff --git a/docs/fr/query_language/functions/string_search_functions.md b/docs/fr/sql_reference/functions/string_search_functions.md similarity index 99% rename from docs/fr/query_language/functions/string_search_functions.md rename to docs/fr/sql_reference/functions/string_search_functions.md index d519005ef58..2dfc66b287a 100644 --- a/docs/fr/query_language/functions/string_search_functions.md +++ b/docs/fr/sql_reference/functions/string_search_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: "Pour Rechercher Des Cha\xEEnes" --- # Fonctions de recherche de chaînes {#functions-for-searching-strings} diff --git a/docs/fr/query_language/functions/type_conversion_functions.md b/docs/fr/sql_reference/functions/type_conversion_functions.md similarity index 74% rename from docs/fr/query_language/functions/type_conversion_functions.md rename to docs/fr/sql_reference/functions/type_conversion_functions.md index 23c84e9fb40..7bb1d7c4a47 100644 --- a/docs/fr/query_language/functions/type_conversion_functions.md +++ b/docs/fr/sql_reference/functions/type_conversion_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: La Conversion De Type --- # Fonctions De Conversion De Type {#type-conversion-functions} @@ -12,7 +15,7 @@ ClickHouse a le [même comportement que les programmes C++ ](https://en.cpprefer ## toInt (8/16/32/64) {#toint8163264} -Convertit une valeur d'entrée en [Int](../../data_types/int_uint.md) type de données. Cette fonction comprend: +Convertit une valeur d'entrée en [Int](../../sql_reference/data_types/int_uint.md) type de données. Cette fonction comprend: - `toInt8(expr)` — Results in the `Int8` type de données. - `toInt16(expr)` — Results in the `Int16` type de données. @@ -29,7 +32,7 @@ Valeur entière dans le `Int8`, `Int16`, `Int32`, ou `Int64` type de données. Fonctions d'utilisation [l'arrondi vers zéro](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), ce qui signifie qu'ils tronquent des chiffres fractionnaires de nombres. -Le comportement des fonctions pour le [NaN et Inf](../../data_types/float.md#data_type-float-nan-inf) arguments est indéfini. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. +Le comportement des fonctions pour le [NaN et Inf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) arguments est indéfini. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. **Exemple** @@ -77,7 +80,7 @@ select toInt64OrNull('123123'), toInt8OrNull('123qwe123') ## toUInt (8/16/32/64) {#touint8163264} -Convertit une valeur d'entrée en [UInt](../../data_types/int_uint.md) type de données. Cette fonction comprend: +Convertit une valeur d'entrée en [UInt](../../sql_reference/data_types/int_uint.md) type de données. Cette fonction comprend: - `toUInt8(expr)` — Results in the `UInt8` type de données. - `toUInt16(expr)` — Results in the `UInt16` type de données. @@ -94,7 +97,7 @@ Valeur entière dans le `UInt8`, `UInt16`, `UInt32`, ou `UInt64` type de donnée Fonctions d'utilisation [l'arrondi vers zéro](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), ce qui signifie qu'ils tronquent des chiffres fractionnaires de nombres. -Le comportement des fonctions pour les agruments négatifs et pour le [NaN et Inf](../../data_types/float.md#data_type-float-nan-inf) arguments est indéfini. Si vous passez une chaîne avec un nombre négatif, par exemple `'-32'`, ClickHouse soulève une exception. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. +Le comportement des fonctions pour les agruments négatifs et pour le [NaN et Inf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) arguments est indéfini. Si vous passez une chaîne avec un nombre négatif, par exemple `'-32'`, ClickHouse soulève une exception. Rappelez-vous sur [problèmes de conversion numérique](#numeric-conversion-issues), lorsque vous utilisez les fonctions. **Exemple** @@ -132,7 +135,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) ## toDecimal (32/64/128) {#todecimal3264128} -Convertir `value` à l' [Décimal](../../data_types/decimal.md) type de données avec précision de `S`. Le `value` peut être un nombre ou une chaîne. Le `S` (l'échelle) paramètre spécifie le nombre de décimales. +Convertir `value` à l' [Décimal](../../sql_reference/data_types/decimal.md) type de données avec précision de `S`. Le `value` peut être un nombre ou une chaîne. Le `S` (l'échelle) paramètre spécifie le nombre de décimales. - `toDecimal32(value, S)` - `toDecimal64(value, S)` @@ -140,7 +143,7 @@ Convertir `value` à l' [Décimal](../../data_types/decimal.md) type de données ## toDecimal (32/64/128) OrNull {#todecimal3264128ornull} -Convertit une chaîne d'entrée en [Nullable (Décimal (P, S))](../../data_types/decimal.md) valeur de type de données. Cette famille de fonctions comprennent: +Convertit une chaîne d'entrée en [Nullable (Décimal (P, S))](../../sql_reference/data_types/decimal.md) valeur de type de données. Cette famille de fonctions comprennent: - `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` type de données. - `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` type de données. @@ -150,7 +153,7 @@ Ces fonctions devraient être utilisées à la place de `toDecimal*()` fonctions **Paramètre** -- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../data_types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. +- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../sql_reference/data_types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. - `S` — Scale, the number of decimal places in the resulting value. **Valeur renvoyée** @@ -184,7 +187,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) ## toDecimal (32/64/128)OrZero {#todecimal3264128orzero} -Convertit une valeur d'entrée en [Decimal(P,S)](../../data_types/decimal.md) type de données. Cette famille de fonctions comprennent: +Convertit une valeur d'entrée en [Decimal(P,S)](../../sql_reference/data_types/decimal.md) type de données. Cette famille de fonctions comprennent: - `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` type de données. - `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` type de données. @@ -194,7 +197,7 @@ Ces fonctions devraient être utilisées à la place de `toDecimal*()` fonctions **Paramètre** -- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../data_types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. +- `expr` — [Expression](../syntax.md#syntax-expressions), retourne une valeur dans l' [Chaîne](../../sql_reference/data_types/string.md) type de données. ClickHouse attend la représentation textuelle du nombre décimal. Exemple, `'1.111'`. - `S` — Scale, the number of decimal places in the resulting value. **Valeur renvoyée** @@ -339,7 +342,7 @@ SELECT La Conversion en FixedString (N) ne fonctionne que pour les arguments de type String ou FixedString (N). -Type conversion en [Nullable](../../data_types/nullable.md) et le dos est pris en charge. Exemple: +Type conversion en [Nullable](../../sql_reference/data_types/nullable.md) et le dos est pris en charge. Exemple: ``` sql SELECT toTypeName(x) FROM t_null @@ -365,7 +368,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null ## toInterval (année / trimestre / Mois / Semaine / Jour / Heure / Minute / Seconde) {#function-tointerval} -Convertit un argument de type Number en [Intervalle](../../data_types/special_data_types/interval.md) type de données. +Convertit un argument de type Number en [Intervalle](../../sql_reference/data_types/special_data_types/interval.md) type de données. **Syntaxe** @@ -406,18 +409,126 @@ SELECT └───────────────────────────┴──────────────────────────────┘ ``` -## parseDateTimeBestEffort {#type_conversion_functions-parsedatetimebesteffort} +## parseDateTimeBestEffort {#parsedatetimebesteffort} -Analysez un argument de type number en un type Date ou DateTime. -différent de toDate et toDateTime, parseDateTimeBestEffort peut progresser format de date plus complexe. -Pour plus d'informations, voir le lien: [Format De Date Complexe](https://xkcd.com/1179/) +Convertit une date et une heure dans le [Chaîne](../../sql_reference/data_types/string.md) la représentation de [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) type de données. + +La fonction d'analyse [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 date et heure Spécification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse et d'autres formats de date et d'heure. + +**Syntaxe** + +``` sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**Paramètre** + +- `time_string` — String containing a date and time to convert. [Chaîne](../../sql_reference/data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` selon le fuseau horaire. [Chaîne](../../sql_reference/data_types/string.md). + +**Formats non standard pris en charge** + +- Une chaîne contenant 9..10 chiffres [le timestamp unix](https://en.wikipedia.org/wiki/Unix_time). +- Une chaîne avec une date et une heure composant: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc. +- Une chaîne avec une date, mais pas de composant de temps: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` etc. +- Une chaîne avec un jour et une heure: `DD`, `DD hh`, `DD hh:mm`. Dans ce cas `YYYY-MM` sont substitués comme suit `2000-01`. +- Une chaîne qui inclut la date et l'heure ainsi que des informations de décalage de fuseau horaire: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. Exemple, `2020-12-12 17:36:00 -5:00`. + +Pour tous les formats avec séparateur, la fonction analyse les noms de mois exprimés par leur nom complet ou par les trois premières lettres d'un nom de mois. Exemple: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**Valeur renvoyée** + +- `time_string` converti à l' `DateTime` type de données. + +**Exemple** + +Requête: + +``` sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +Résultat: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +Requête: + +``` sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +Résultat: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +Requête: + +``` sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +Résultat: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +Requête: + +``` sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +Résultat: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +Requête: + +``` sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +Résultat: + +``` text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**Voir Aussi** + +- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [jour](#todate) +- [toDateTime](#todatetime) ## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} -De même que pour [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) sauf qu'il renvoie null lorsqu'il rencontre un format de date qui ne peut pas être traité. +De même que pour [parseDateTimeBestEffort](#parsedatetimebesteffort) sauf qu'il renvoie null lorsqu'il rencontre un format de date qui ne peut pas être traité. ## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} -De même que pour [parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort) sauf qu'il renvoie une date zéro ou une date zéro lorsqu'il rencontre un format de date qui ne peut pas être traité. +De même que pour [parseDateTimeBestEffort](#parsedatetimebesteffort) sauf qu'il renvoie une date zéro ou une date zéro lorsqu'il rencontre un format de date qui ne peut pas être traité. [Article Original](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/fr/query_language/functions/url_functions.md b/docs/fr/sql_reference/functions/url_functions.md similarity index 96% rename from docs/fr/query_language/functions/url_functions.md rename to docs/fr/sql_reference/functions/url_functions.md index 72e239ad38f..f8d7ef4ccd4 100644 --- a/docs/fr/query_language/functions/url_functions.md +++ b/docs/fr/sql_reference/functions/url_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 54 +toc_title: Travailler avec des URL --- # Fonctions pour travailler avec des URL {#functions-for-working-with-urls} @@ -26,7 +29,7 @@ domain(url) **Paramètre** -- `url` — URL. Type: [Chaîne](../../data_types/string.md). +- `url` — URL. Type: [Chaîne](../../sql_reference/data_types/string.md). L'URL peut être spécifiée avec ou sans schéma. Exemple: @@ -77,7 +80,7 @@ topLevelDomain(url) **Paramètre** -- `url` — URL. Type: [Chaîne](../../data_types/string.md). +- `url` — URL. Type: [Chaîne](../../sql_reference/data_types/string.md). L'URL peut être spécifiée avec ou sans schéma. Exemple: diff --git a/docs/fr/query_language/functions/uuid_functions.md b/docs/fr/sql_reference/functions/uuid_functions.md similarity index 86% rename from docs/fr/query_language/functions/uuid_functions.md rename to docs/fr/sql_reference/functions/uuid_functions.md index 1cd5645b660..43363235b9e 100644 --- a/docs/fr/query_language/functions/uuid_functions.md +++ b/docs/fr/sql_reference/functions/uuid_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 53 +toc_title: Travailler avec UUID --- # Fonctions pour travailler avec UUID {#functions-for-working-with-uuid} @@ -8,7 +11,7 @@ Les fonctions pour travailler avec UUID sont listées ci-dessous. ## generateUUIDv4 {#uuid-function-generate} -Génère le [UUID](../../data_types/uuid.md) de [la version 4](https://tools.ietf.org/html/rfc4122#section-4.4). +Génère le [UUID](../../sql_reference/data_types/uuid.md) de [la version 4](https://tools.ietf.org/html/rfc4122#section-4.4). ``` sql generateUUIDv4() @@ -62,7 +65,7 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid ## UUIDStringToNum {#uuidstringtonum} -Accepte une chaîne contenant 36 caractères dans le format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, et le renvoie comme un ensemble d'octets dans un [FixedString (16)](../../data_types/fixedstring.md). +Accepte une chaîne contenant 36 caractères dans le format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, et le renvoie comme un ensemble d'octets dans un [FixedString (16)](../../sql_reference/data_types/fixedstring.md). ``` sql UUIDStringToNum(String) @@ -81,7 +84,6 @@ SELECT ``` ``` text - ┌─uuid─────────────────────────────────┬─bytes────────────┐ │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ └──────────────────────────────────────┴──────────────────┘ @@ -89,7 +91,7 @@ SELECT ## UUIDNumToString {#uuidnumtostring} -Accepte un [FixedString (16)](../../data_types/fixedstring.md) valeur, et renvoie une chaîne contenant 36 caractères au format texte. +Accepte un [FixedString (16)](../../sql_reference/data_types/fixedstring.md) valeur, et renvoie une chaîne contenant 36 caractères au format texte. ``` sql UUIDNumToString(FixedString(16)) diff --git a/docs/fr/query_language/functions/ym_dict_functions.md b/docs/fr/sql_reference/functions/ym_dict_functions.md similarity index 90% rename from docs/fr/query_language/functions/ym_dict_functions.md rename to docs/fr/sql_reference/functions/ym_dict_functions.md index df4a6fc5c10..378beb4f45b 100644 --- a/docs/fr/query_language/functions/ym_dict_functions.md +++ b/docs/fr/sql_reference/functions/ym_dict_functions.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 59 +toc_title: Travailler avec Yandex.Dictionnaires Metrica --- # Fonctions pour travailler avec Yandex.Dictionnaires Metrica {#functions-for-working-with-yandex-metrica-dictionaries} @@ -104,6 +107,28 @@ Exemple: `regionToCountry(toUInt32(213)) = 225` convertit Moscou (213) en Russie Convertit une région en continent. Dans tous les autres cas, cette fonction est la même que ‘regionToCity’. Exemple: `regionToContinent(toUInt32(213)) = 10001` convertit Moscou (213) en Eurasie (10001). +### regionToTopContinent (\#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent} + +Trouve le continent le plus élevé dans la hiérarchie de la région. + +**Syntaxe** + +``` sql +regionToTopContinent(id[, geobase]); +``` + +**Paramètre** + +- `id` — Region ID from the Yandex geobase. [UInt32](../../sql_reference/data_types/int_uint.md). +- `geobase` — Dictionary key. See [Plusieurs Geobases](#multiple-geobases). [Chaîne](../../sql_reference/data_types/string.md). Facultatif. + +**Valeur renvoyée** + +- Identifiant du continent de haut niveau (ce dernier lorsque vous grimpez dans la hiérarchie des régions). +- 0, si il n'y a aucun. + +Type: `UInt32`. + ### regionToPopulation (id \[, geobase\]) {#regiontopopulationid-geobase} Obtient la population d'une région. diff --git a/docs/fr/sql_reference/index.md b/docs/fr/sql_reference/index.md new file mode 100644 index 00000000000..bd29a0328e9 --- /dev/null +++ b/docs/fr/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: SQL Reference +toc_hidden: true +toc_priority: 28 +toc_title: "cach\xE9s" +--- + +# Référence SQL {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [Autres types de requêtes](statements/misc.md) + +[Article Original](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/fr/query_language/operators.md b/docs/fr/sql_reference/operators.md similarity index 84% rename from docs/fr/query_language/operators.md rename to docs/fr/sql_reference/operators.md index 29dd4813e29..42537917ba5 100644 --- a/docs/fr/query_language/operators.md +++ b/docs/fr/sql_reference/operators.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: "Op\xE9rateur" --- # Opérateur {#operators} @@ -57,9 +60,9 @@ Les groupes d'opérateurs sont listés par ordre de priorité (plus il est élev `a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. -## Opérateurs pour travailler avec des ensembles de données {#operators-for-working-with-data-sets} +## Opérateurs Pour Travailler Avec Des Ensembles De Données {#operators-for-working-with-data-sets} -*Voir [Dans les opérateurs](select.md#select-in-operators).* +*Voir [Dans les opérateurs](statements/select.md#select-in-operators).* `a IN ...` – The `in(a, b)` fonction. @@ -90,7 +93,7 @@ Le `part` paramètre spécifie la partie de la date à récupérer. Les valeurs Le `part` le paramètre est insensible à la casse. -Le `date` paramètre spécifie la date ou l'heure à traiter. Soit [Date](../data_types/date.md) ou [DateTime](../data_types/datetime.md) le type est pris en charge. +Le `date` paramètre spécifie la date ou l'heure à traiter. Soit [Date](../sql_reference/data_types/date.md) ou [DateTime](../sql_reference/data_types/datetime.md) le type est pris en charge. Exemple: @@ -133,11 +136,11 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -Vous pouvez voir plus d'exemples de [test](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00619_extract.sql). +Vous pouvez voir plus d'exemples de [test](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} -Crée un [Intervalle](../data_types/special_data_types/interval.md)- valeur de type qui doit être utilisée dans les opérations arithmétiques avec [Date](../data_types/date.md) et [DateTime](../data_types/datetime.md)-type de valeurs. +Crée un [Intervalle](../sql_reference/data_types/special_data_types/interval.md)- valeur de type qui doit être utilisée dans les opérations arithmétiques avec [Date](../sql_reference/data_types/date.md) et [DateTime](../sql_reference/data_types/datetime.md)-type de valeurs. Types d'intervalles: - `SECOND` @@ -166,8 +169,8 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL **Voir Aussi** -- [Intervalle](../data_types/special_data_types/interval.md) type de données -- [toInterval](functions/type_conversion_functions.md#function-tointerval) type fonctions de conversion +- [Intervalle](../sql_reference/data_types/special_data_types/interval.md) type de données +- [toInterval](../sql_reference/functions/type_conversion_functions.md#function-tointerval) type fonctions de conversion ## Opérateur De Négation Logique {#logical-negation-operator} @@ -187,7 +190,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL Note: -L'opérateur conditionnel calcule les valeurs de b et c, puis vérifie si la condition a est remplie, puis renvoie la valeur correspondante. Si `b` ou `C` est un [arrayJoin()](functions/array_join.md#functions_arrayjoin) fonction, chaque ligne sera répliquée indépendamment de la “a” condition. +L'opérateur conditionnel calcule les valeurs de b et c, puis vérifie si la condition a est remplie, puis renvoie la valeur correspondante. Si `b` ou `C` est un [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) fonction, chaque ligne sera répliquée indépendamment de la “a” condition. ## Expression Conditionnelle {#operator_case} @@ -230,13 +233,13 @@ Parfois, cela ne fonctionne pas de la façon que vous attendez. Exemple, `SELECT Pour l'efficacité, le `and` et `or` les fonctions acceptent n'importe quel nombre d'arguments. Les chaînes de `AND` et `OR` les opérateurs sont transformés en un seul appel de ces fonctions. -## La vérification de `NULL` {#checking-for-null} +## La Vérification De `NULL` {#checking-for-null} Clickhouse soutient le `IS NULL` et `IS NOT NULL` opérateur. ### IS NULL {#operator-is-null} -- Pour [Nullable](../data_types/nullable.md) type de valeurs, l' `IS NULL` opérateur retourne: +- Pour [Nullable](../sql_reference/data_types/nullable.md) type de valeurs, l' `IS NULL` opérateur retourne: - `1` si la valeur est `NULL`. - `0` autrement. - Pour les autres valeurs, la `IS NULL` l'opérateur renvoie toujours `0`. @@ -255,7 +258,7 @@ SELECT x+100 FROM t_null WHERE y IS NULL ### IS NOT NULL {#is-not-null} -- Pour [Nullable](../data_types/nullable.md) type de valeurs, l' `IS NOT NULL` opérateur retourne: +- Pour [Nullable](../sql_reference/data_types/nullable.md) type de valeurs, l' `IS NOT NULL` opérateur retourne: - `0` si la valeur est `NULL`. - `1` autrement. - Pour les autres valeurs, la `IS NOT NULL` l'opérateur renvoie toujours `1`. diff --git a/docs/fr/query_language/alter.md b/docs/fr/sql_reference/statements/alter.md similarity index 90% rename from docs/fr/query_language/alter.md rename to docs/fr/sql_reference/statements/alter.md index 8e0a435207d..0a1c93e2863 100644 --- a/docs/fr/query_language/alter.md +++ b/docs/fr/sql_reference/statements/alter.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 36 +toc_title: ALTER --- ## ALTER {#query_language_queries_alter} @@ -37,7 +40,7 @@ Ajoute une nouvelle colonne à la table spécifiée `name`, `type`, [`codec`](cr Si l' `IF NOT EXISTS` la clause est incluse, la requête ne retournera pas d'erreur si la colonne existe déjà. Si vous spécifiez `AFTER name_after` (le nom d'une autre colonne), la colonne est ajoutée après celle spécifiée dans la liste des colonnes de la table. Sinon, la colonne est ajoutée à la fin de la table. Notez qu'il n'existe aucun moyen d'ajouter une colonne au début d'un tableau. Pour une chaîne d'actions, `name_after` peut être le nom d'une colonne est ajoutée dans l'une des actions précédentes. -L'ajout d'une colonne modifie simplement la structure de la table, sans effectuer d'actions avec des données. Les données n'apparaissent pas sur le disque après la `ALTER`. Si les données sont manquantes pour une colonne lors de la lecture de la table, elles sont remplies avec des valeurs par défaut (en exécutant l'expression par défaut s'il y en a une, ou en utilisant des zéros ou des chaînes vides). La colonne apparaît sur le disque après la fusion des parties de données (voir [MergeTree](../operations/table_engines/mergetree.md)). +L'ajout d'une colonne modifie simplement la structure de la table, sans effectuer d'actions avec des données. Les données n'apparaissent pas sur le disque après la `ALTER`. Si les données sont manquantes pour une colonne lors de la lecture de la table, elles sont remplies avec des valeurs par défaut (en exécutant l'expression par défaut s'il y en a une, ou en utilisant des zéros ou des chaînes vides). La colonne apparaît sur le disque après la fusion des parties de données (voir [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)). Cette approche nous permet de compléter le `ALTER` requête instantanément, sans augmenter le volume de données anciennes. @@ -111,11 +114,11 @@ Cette requête modifie le `name` les propriétés de la colonne: - TTL - For examples of columns TTL modifying, see [Column TTL](../operations/table_engines/mergetree.md#mergetree-column-ttl). + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). Si l' `IF EXISTS` la clause est spécifiée, la requête ne retournera pas d'erreur si la colonne n'existe pas. -Lors de la modification du type, les valeurs sont converties comme si [toType](functions/type_conversion_functions.md) les fonctions ont été appliquées. Si seule l'expression par défaut est modifiée, la requête ne fait rien de complexe et est terminée presque instantanément. +Lors de la modification du type, les valeurs sont converties comme si [toType](../../sql_reference/functions/type_conversion_functions.md) les fonctions ont été appliquées. Si seule l'expression par défaut est modifiée, la requête ne fait rien de complexe et est terminée presque instantanément. Exemple: @@ -143,7 +146,7 @@ Le `ALTER` query vous permet de créer et de supprimer des éléments distincts Il n'y a pas de support pour supprimer des colonnes dans la clé primaire ou la clé d'échantillonnage (colonnes qui sont utilisées dans le `ENGINE` expression). La modification du type des colonnes incluses dans la clé primaire n'est possible que si cette modification n'entraîne pas la modification des données (par exemple, vous êtes autorisé à ajouter des valeurs à une énumération ou à modifier un type de `DateTime` de `UInt32`). -Si l' `ALTER` la requête n'est pas suffisante pour apporter les modifications de table dont vous avez besoin, vous pouvez créer une nouvelle table, y copier les données en utilisant le [INSERT SELECT](insert_into.md#insert_query_insert-select) requête, puis changer les tables en utilisant le [RENAME](misc.md#misc_operations-rename) requête et supprimer l'ancienne table. Vous pouvez utiliser l' [clickhouse-copieur](../operations/utils/clickhouse-copier.md) comme une alternative à la `INSERT SELECT` requête. +Si l' `ALTER` la requête n'est pas suffisante pour apporter les modifications de table dont vous avez besoin, vous pouvez créer une nouvelle table, y copier les données en utilisant le [INSERT SELECT](insert_into.md#insert_query_insert-select) requête, puis changer les tables en utilisant le [RENAME](misc.md#misc_operations-rename) requête et supprimer l'ancienne table. Vous pouvez utiliser l' [clickhouse-copieur](../../operations/utilities/clickhouse-copier.md) comme une alternative à la `INSERT SELECT` requête. Le `ALTER` query bloque toutes les lectures et écritures pour la table. En d'autres termes, si une longue `SELECT` est en cours d'exécution au moment de la `ALTER` requête, la `ALTER` la requête va attendre qu'elle se termine. Dans le même temps, toutes les nouvelles requêtes à la même table attendre que ce `ALTER` est en cours d'exécution. @@ -157,9 +160,9 @@ La commande suivante est prise en charge: MODIFY ORDER BY new_expression ``` -Cela ne fonctionne que pour les tables du [`MergeTree`](../operations/table_engines/mergetree.md) de la famille (y compris les -[répliqué](../operations/table_engines/replication.md) table). La commande change l' -[clé de tri](../operations/table_engines/mergetree.md) de la table +Cela ne fonctionne que pour les tables du [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) de la famille (y compris les +[répliqué](../../engines/table_engines/mergetree_family/replication.md) table). La commande change l' +[clé de tri](../../engines/table_engines/mergetree_family/mergetree.md) de la table de `new_expression` (une expression ou un tuple d'expressions). Clé primaire reste le même. La commande est légère en ce sens qu'elle ne modifie que les métadonnées. Pour conserver la propriété cette partie de données @@ -168,8 +171,8 @@ les lignes sont ordonnées par l'expression de clé de tri vous ne pouvez pas aj ### Manipulations Avec Des Indices De Saut De Données {#manipulations-with-data-skipping-indices} -Cela ne fonctionne que pour les tables du [`*MergeTree`](../operations/table_engines/mergetree.md) de la famille (y compris les -[répliqué](../operations/table_engines/replication.md) table). Les opérations suivantes +Cela ne fonctionne que pour les tables du [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) de la famille (y compris les +[répliqué](../../engines/table_engines/mergetree_family/replication.md) table). Les opérations suivantes sont disponibles: - `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Ajoute la description de l'index aux métadonnées des tables. @@ -179,7 +182,7 @@ sont disponibles: Ces commandes sont légères dans le sens où elles ne modifient que les métadonnées ou suppriment des fichiers. En outre, ils sont répliqués (synchronisation des métadonnées des indices via ZooKeeper). -### Manipulations avec contraintes {#manipulations-with-constraints} +### Manipulations Avec Contraintes {#manipulations-with-constraints} En voir plus sur [contraintes](create.md#constraints) @@ -198,7 +201,7 @@ Toutes les modifications sur les tables répliquées sont diffusées sur ZooKeep ### Manipulations avec des Partitions et des pièces {#alter_manipulations-with-partitions} -Les opérations suivantes avec [partition](../operations/table_engines/custom_partitioning_key.md) sont disponibles: +Les opérations suivantes avec [partition](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) sont disponibles: - [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` répertoire et de l'oublier. - [DROP PARTITION](#alter_drop-partition) – Deletes a partition. @@ -233,7 +236,7 @@ Lisez à propos de la définition de l'expression de partition dans une section Une fois la requête exécutée, vous pouvez faire ce que vous voulez avec les données du `detached` directory — delete it from the file system, or just leave it. -This query is replicated – it moves the data to the `detached` répertoire sur toutes les répliques. Notez que vous ne pouvez exécuter cette requête que sur un réplica leader. Pour savoir si une réplique est un leader, effectuez le `SELECT` requête à l' [système.réplique](../operations/system_tables.md#system_tables-replicas) table. Alternativement, il est plus facile de faire une `DETACH` requête sur toutes les répliques - toutes les répliques lancent une exception, à l'exception de la réplique leader. +This query is replicated – it moves the data to the `detached` répertoire sur toutes les répliques. Notez que vous ne pouvez exécuter cette requête que sur un réplica leader. Pour savoir si une réplique est un leader, effectuez le `SELECT` requête à l' [système.réplique](../../operations/system_tables.md#system_tables-replicas) table. Alternativement, il est plus facile de faire une `DETACH` requête sur toutes les répliques - toutes les répliques lancent une exception, à l'exception de la réplique leader. #### DROP PARTITION {#alter_drop-partition} @@ -349,7 +352,7 @@ Au moment de l'exécution, pour un instantané de données, la requête crée de - `N` est le numéro incrémental de la sauvegarde. !!! note "Note" - Si vous utilisez [un ensemble de disques pour le stockage des données dans une table](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes), le `shadow/N` le répertoire apparaît sur chaque disque, stockant les parties de données correspondant `PARTITION` expression. + Si vous utilisez [un ensemble de disques pour le stockage des données dans une table](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes), le `shadow/N` le répertoire apparaît sur chaque disque, stockant les parties de données correspondant `PARTITION` expression. La même structure de répertoires est créée à l'intérieur de la sauvegarde qu'à l'intérieur `/var/lib/clickhouse/`. La requête effectue ‘chmod’ pour tous les fichiers, interdisant d'écrire en eux. @@ -367,7 +370,7 @@ Pour restaurer des données à partir d'une sauvegarde, procédez comme suit: La restauration à partir d'une sauvegarde ne nécessite pas l'arrêt du serveur. -Pour plus d'informations sur les sauvegardes et la restauration [La Sauvegarde Des Données](../operations/backup.md) section. +Pour plus d'informations sur les sauvegardes et la restauration [La Sauvegarde Des Données](../../operations/backup.md) section. #### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} @@ -408,7 +411,7 @@ Bien que la requête soit appelée `ALTER TABLE`, il ne modifie pas la structure #### MOVE PARTITION\|PART {#alter_move-partition} -Déplace des partitions ou des parties de données vers un autre volume ou disque pour `MergeTree`-tables de moteur. Voir [Utilisation de plusieurs périphériques de bloc pour le stockage de données](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). +Déplace des partitions ou des parties de données vers un autre volume ou disque pour `MergeTree`-tables de moteur. Voir [Utilisation de plusieurs périphériques de bloc pour le stockage de données](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). ``` sql ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' @@ -434,7 +437,7 @@ Vous pouvez spécifier l'expression de partition dans `ALTER ... PARTITION` requ - Comme une valeur de l' `partition` la colonne de la `system.parts` table. Exemple, `ALTER TABLE visits DETACH PARTITION 201901`. - Comme expression de la colonne de la table. Les constantes et les expressions constantes sont prises en charge. Exemple, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. - À l'aide de l'ID de partition. Partition ID est un identifiant de chaîne de la partition (lisible par l'homme, si possible) qui est utilisé comme noms de partitions dans le système de fichiers et dans ZooKeeper. L'ID de partition doit être spécifié dans `PARTITION ID` clause, entre guillemets simples. Exemple, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- Dans le [ALTER ATTACH PART](#alter_attach-partition) et [DROP DETACHED PART](#alter_drop-detached) requête, pour spécifier le nom d'une partie, utilisez le littéral de chaîne avec une valeur de `name` la colonne de la [système.detached\_parts](../operations/system_tables.md#system_tables-detached_parts) table. Exemple, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. +- Dans le [ALTER ATTACH PART](#alter_attach-partition) et [DROP DETACHED PART](#alter_drop-detached) requête, pour spécifier le nom d'une partie, utilisez le littéral de chaîne avec une valeur de `name` la colonne de la [système.detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) table. Exemple, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. L'utilisation de guillemets lors de la spécification de la partition dépend du type d'expression de partition. Par exemple, pour la `String` type, vous devez spécifier son nom entre guillemets (`'`). Pour l' `Date` et `Int*` types aucune citation n'est nécessaire. @@ -446,11 +449,11 @@ Toutes les règles ci-dessus sont aussi valables pour la [OPTIMIZE](misc.md#misc OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -Les exemples de `ALTER ... PARTITION` les requêtes sont démontrées dans les tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00502_custom_partitioning_local.sql) et [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/src/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). +Les exemples de `ALTER ... PARTITION` les requêtes sont démontrées dans les tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) et [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). ### Manipulations avec Table TTL {#manipulations-with-table-ttl} -Vous pouvez modifier [tableau TTL](../operations/table_engines/mergetree.md#mergetree-table-ttl) avec une demande du formulaire suivant: +Vous pouvez modifier [tableau TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) avec une demande du formulaire suivant: ``` sql ALTER TABLE table-name MODIFY TTL ttl-expression @@ -495,7 +498,7 @@ Pour les tables \* MergeTree, les mutations s'exécutent en réécrivant des par Les Mutations sont totalement ordonnées par leur ordre de création et sont appliquées à chaque partie dans cet ordre. Les Mutations sont également partiellement ordonnées avec des insertions - les données insérées dans la table avant la soumission de la mutation seront mutées et les données insérées après ne seront pas mutées. Notez que les mutations ne bloquent en aucune façon les INSERTs. -Une requête de mutation retourne immédiatement après l'ajout de l'entrée de mutation (dans le cas de tables répliquées à ZooKeeper, pour les tables non compliquées - au système de fichiers). La mutation elle-même s'exécute de manière asynchrone en utilisant les paramètres du profil système. Pour suivre l'avancement des mutations vous pouvez utiliser la [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table. Une mutation qui a été soumise avec succès continuera à s'exécuter même si les serveurs ClickHouse sont redémarrés. Il n'y a aucun moyen de faire reculer la mutation une fois qu'elle est soumise, mais si la mutation est bloquée pour une raison quelconque, elle peut être annulée avec le [`KILL MUTATION`](misc.md#kill-mutation) requête. +Une requête de mutation retourne immédiatement après l'ajout de l'entrée de mutation (dans le cas de tables répliquées à ZooKeeper, pour les tables non compliquées - au système de fichiers). La mutation elle-même s'exécute de manière asynchrone en utilisant les paramètres du profil système. Pour suivre l'avancement des mutations vous pouvez utiliser la [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) table. Une mutation qui a été soumise avec succès continuera à s'exécuter même si les serveurs ClickHouse sont redémarrés. Il n'y a aucun moyen de faire reculer la mutation une fois qu'elle est soumise, mais si la mutation est bloquée pour une raison quelconque, elle peut être annulée avec le [`KILL MUTATION`](misc.md#kill-mutation) requête. Les entrées pour les mutations finies ne sont pas supprimées immédiatement (le nombre d'entrées conservées est déterminé par `finished_mutations_to_keep` le moteur de stockage de paramètre). Les anciennes entrées de mutation sont supprimées. diff --git a/docs/fr/query_language/create.md b/docs/fr/sql_reference/statements/create.md similarity index 88% rename from docs/fr/query_language/create.md rename to docs/fr/sql_reference/statements/create.md index ffe2125146b..3cfb7a8bfa4 100644 --- a/docs/fr/query_language/create.md +++ b/docs/fr/sql_reference/statements/create.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 35 +toc_title: CREATE --- # Créer des requêtes {#create-queries} @@ -27,11 +30,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. - `ENGINE` - - [MySQL](../database_engines/mysql.md) + - [MySQL](../engines/database_engines/mysql.md) Allows you to retrieve data from the remote MySQL server. - By default, ClickHouse uses its own [database engine](../database_engines/index.md). + By default, ClickHouse uses its own [database engine](../engines/database_engines/index.md). ## CREATE TABLE {#create-table-query} @@ -62,7 +65,7 @@ Crée une table avec la même structure qu'une autre table. Vous pouvez spécifi CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() ``` -Crée une table avec la structure et les données renvoyées par [fonction de table](table_functions/index.md). +Crée une table avec la structure et les données renvoyées par [fonction de table](../table_functions/index.md). ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... @@ -72,7 +75,7 @@ Crée une table avec une structure comme le résultat de l' `SELECT` une requêt Dans tous les cas, si `IF NOT EXISTS` est spécifié, la requête ne renvoie pas une erreur si la table existe déjà. Dans ce cas, la requête ne font rien. -Il peut y avoir d'autres clauses après le `ENGINE` la clause dans la requête. Voir la documentation détaillée sur la façon de créer des tables dans les descriptions de [moteurs de table](../operations/table_engines/index.md#table_engines). +Il peut y avoir d'autres clauses après le `ENGINE` la clause dans la requête. Voir la documentation détaillée sur la façon de créer des tables dans les descriptions de [moteurs de table](../../engines/table_engines/index.md#table_engines). ### Les Valeurs Par Défaut {#create-default-values} @@ -129,11 +132,11 @@ L'ajout d'une grande quantité de contraintes peut affecter négativement les pe ### Expression TTL {#ttl-expression} -Définit la durée de stockage des valeurs. Peut être spécifié uniquement pour les tables mergetree-family. Pour la description détaillée, voir [TTL pour les colonnes et les tableaux](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). +Définit la durée de stockage des valeurs. Peut être spécifié uniquement pour les tables mergetree-family. Pour la description détaillée, voir [TTL pour les colonnes et les tableaux](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). ### Codecs De Compression De Colonne {#codecs} -Par défaut, ClickHouse applique le `lz4` méthode de compression. Pour `MergeTree`- famille de moteurs Vous pouvez modifier la méthode de compression par défaut dans le [compression](../operations/server_settings/settings.md#server-settings-compression) section d'une configuration de serveur. Vous pouvez également définir la méthode de compression pour chaque colonne `CREATE TABLE` requête. +Par défaut, ClickHouse applique le `lz4` méthode de compression. Pour `MergeTree`- famille de moteurs Vous pouvez modifier la méthode de compression par défaut dans le [compression](../../operations/server_configuration_parameters/settings.md#server-settings-compression) section d'une configuration de serveur. Vous pouvez également définir la méthode de compression pour chaque colonne `CREATE TABLE` requête. ``` sql CREATE TABLE codec_example @@ -155,10 +158,10 @@ Si un codec est spécifié, le codec par défaut ne s'applique pas. Les Codecs p La Compression est prise en charge pour les moteurs de tableau suivants: -- [MergeTree](../operations/table_engines/mergetree.md) famille. Prend en charge les codecs de compression de colonne et la sélection de la méthode de compression par défaut par [compression](../operations/server_settings/settings.md#server-settings-compression) paramètre. -- [Journal](../operations/table_engines/log_family.md) famille. Utilise la `lz4` méthode de compression par défaut et prend en charge les codecs de compression de colonne. -- [Définir](../operations/table_engines/set.md). Uniquement pris en charge la compression par défaut. -- [Rejoindre](../operations/table_engines/join.md). Uniquement pris en charge la compression par défaut. +- [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) famille. Prend en charge les codecs de compression de colonne et la sélection de la méthode de compression par défaut par [compression](../../operations/server_configuration_parameters/settings.md#server-settings-compression) paramètre. +- [Journal](../../engines/table_engines/log_family/log_family.md) famille. Utilise la `lz4` méthode de compression par défaut et prend en charge les codecs de compression de colonne. +- [Définir](../../engines/table_engines/special/set.md). Uniquement pris en charge la compression par défaut. +- [Rejoindre](../../engines/table_engines/special/join.md). Uniquement pris en charge la compression par défaut. ClickHouse prend en charge les codecs à usage commun et les codecs spécialisés. @@ -184,7 +187,7 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` -#### Codecs à usage commun {#create-query-common-purpose-codecs} +#### Codecs À Usage Commun {#create-query-common-purpose-codecs} Codec: @@ -219,7 +222,7 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name Dans la plupart des cas, les tables temporaires ne sont pas créées manuellement, mais lors de l'utilisation de données externes pour une requête ou pour `(GLOBAL) IN`. Pour plus d'informations, consultez les sections appropriées -Il est possible d'utiliser des tables avec [Moteur = mémoire](../operations/table_engines/memory.md) au lieu de tables temporaires. +Il est possible d'utiliser des tables avec [Moteur = mémoire](../../engines/table_engines/special/memory.md) au lieu de tables temporaires. ## Requêtes DDL distribuées (sur la clause CLUSTER) {#distributed-ddl-queries-on-cluster-clause} @@ -295,12 +298,12 @@ LAYOUT(LAYOUT_NAME([param_name param_value])) LIFETIME([MIN val1] MAX val2) ``` -Crée [externe dictionnaire](dicts/external_dicts.md) avec le [structure](dicts/external_dicts_dict_structure.md), [source](dicts/external_dicts_dict_sources.md), [disposition](dicts/external_dicts_dict_layout.md) et [vie](dicts/external_dicts_dict_lifetime.md). +Crée [externe dictionnaire](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) avec le [structure](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md), [source](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md), [disposition](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) et [vie](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md). Structure de dictionnaire externe se compose d'attributs. Les attributs du dictionnaire sont spécifiés de la même manière que les colonnes du tableau. La seule propriété d'attribut requise est son type, toutes les autres propriétés peuvent avoir des valeurs par défaut. -Selon le dictionnaire [disposition](dicts/external_dicts_dict_layout.md) un ou plusieurs attributs peuvent être spécifiés comme les clés de dictionnaire. +Selon le dictionnaire [disposition](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) un ou plusieurs attributs peuvent être spécifiés comme les clés de dictionnaire. -Pour plus d'informations, voir [Dictionnaires Externes](dicts/external_dicts.md) section. +Pour plus d'informations, voir [Dictionnaires Externes](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) section. [Article Original](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/fr/sql_reference/statements/index.md b/docs/fr/sql_reference/statements/index.md new file mode 100644 index 00000000000..eef857c8cdb --- /dev/null +++ b/docs/fr/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Statements +toc_priority: 31 +--- + + diff --git a/docs/fr/query_language/insert_into.md b/docs/fr/sql_reference/statements/insert_into.md similarity index 86% rename from docs/fr/query_language/insert_into.md rename to docs/fr/sql_reference/statements/insert_into.md index 9169c71240b..621d543d577 100644 --- a/docs/fr/query_language/insert_into.md +++ b/docs/fr/sql_reference/statements/insert_into.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 34 +toc_title: INSERT INTO --- ## INSERT {#insert} @@ -17,9 +20,9 @@ La requête peut spécifier une liste de colonnes à insérer `[(c1, c2, c3)]`. - Les valeurs calculées à partir `DEFAULT` expressions spécifiées dans la définition de la table. - Zéros et chaînes vides, si `DEFAULT` les expressions ne sont pas définies. -Si [strict\_insert\_defaults=1](../operations/settings/settings.md), les colonnes qui n'ont pas `DEFAULT` défini doit être répertorié dans la requête. +Si [strict\_insert\_defaults=1](../../operations/settings/settings.md), les colonnes qui n'ont pas `DEFAULT` défini doit être répertorié dans la requête. -Les données peuvent être transmises à L'INSERT dans n'importe quel [format](../interfaces/formats.md#formats) soutenu par ClickHouse. Le format doit être spécifié explicitement dans la requête: +Les données peuvent être transmises à L'INSERT dans n'importe quel [format](../../interfaces/formats.md#formats) soutenu par ClickHouse. Le format doit être spécifié explicitement dans la requête: ``` sql INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set @@ -41,13 +44,13 @@ INSERT INTO t FORMAT TabSeparated 22 Qwerty ``` -Vous pouvez insérer des données séparément de la requête à l'aide du client de ligne de commande ou de L'interface HTTP. Pour plus d'informations, consultez la section “[Interface](../interfaces/index.md#interfaces)”. +Vous pouvez insérer des données séparément de la requête à l'aide du client de ligne de commande ou de L'interface HTTP. Pour plus d'informations, consultez la section “[Interface](../../interfaces/index.md#interfaces)”. ### Contraintes {#constraints} Si la table a [contraintes](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. -### Insertion des résultats de `SELECT` {#insert_query_insert-select} +### Insertion Des Résultats De `SELECT` {#insert_query_insert-select} ``` sql INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... @@ -60,7 +63,7 @@ Aucun des formats de données à l'exception des Valeurs permettent de définir Les autres requêtes de modification des parties de données ne sont pas prises en charge: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. Cependant, vous pouvez supprimer les anciennes données en utilisant `ALTER TABLE ... DROP PARTITION`. -`FORMAT` la clause doit être spécifié à la fin de la requête si `SELECT` la clause contient la fonction de table [entrée()](table_functions/input.md). +`FORMAT` la clause doit être spécifié à la fin de la requête si `SELECT` la clause contient la fonction de table [entrée()](../table_functions/input.md). ### Considérations De Performance {#performance-considerations} diff --git a/docs/fr/query_language/misc.md b/docs/fr/sql_reference/statements/misc.md similarity index 83% rename from docs/fr/query_language/misc.md rename to docs/fr/sql_reference/statements/misc.md index 3ca724f57d3..4dfc42372a1 100644 --- a/docs/fr/query_language/misc.md +++ b/docs/fr/sql_reference/statements/misc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: Autre --- # Diverses Requêtes {#miscellaneous-queries} @@ -31,17 +34,17 @@ CHECK TABLE [db.]name Le `CHECK TABLE` requête compare réelle des tailles de fichier avec les valeurs attendues qui sont stockés sur le serveur. Si le fichier tailles ne correspondent pas aux valeurs stockées, cela signifie que les données sont endommagées. Cela peut être causé, par exemple, par un plantage du système lors de l'exécution de la requête. La réponse de la requête contient `result` colonne avec une seule ligne. La ligne a une valeur de -[Booléen](../data_types/boolean.md) type: +[Booléen](../../sql_reference/data_types/boolean.md) type: - 0 - les données de la table sont corrompues. - 1 - les données maintiennent l'intégrité. Le `CHECK TABLE` query prend en charge les moteurs de table suivants: -- [Journal](../operations/table_engines/log.md) -- [TinyLog](../operations/table_engines/tinylog.md) -- [StripeLog](../operations/table_engines/stripelog.md) -- [Famille MergeTree](../operations/table_engines/mergetree.md) +- [Journal](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [Famille MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) Effectué sur les tables avec un autre moteur de table provoque une exception. @@ -54,7 +57,7 @@ Pour `MergeTree` moteurs de la famille, le `CHECK TABLE` query affiche un État Si la table est corrompue, vous pouvez copier les données non corrompues dans une autre table. Pour ce faire: 1. Créez une nouvelle table avec la même structure que la table endommagée. Pour ce faire exécutez la requête `CREATE TABLE AS `. -2. Définir le [max\_threads](../operations/settings/settings.md#settings-max_threads) la valeur 1 pour traiter la requête suivante dans un seul thread. Pour ce faire, exécutez la requête `SET max_threads = 1`. +2. Définir le [max\_threads](../../operations/settings/settings.md#settings-max_threads) la valeur 1 pour traiter la requête suivante dans un seul thread. Pour ce faire, exécutez la requête `SET max_threads = 1`. 3. Exécuter la requête `INSERT INTO SELECT * FROM `. Cette demande copie les données non corrompues de la table endommagée vers une autre table. Seules les données avant la partie corrompue seront copiées. 4. Redémarrez l' `clickhouse-client` pour réinitialiser l' `max_threads` valeur. @@ -162,7 +165,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Essaie d'annuler et supprimer [mutation](alter.md#alter-mutations) actuellement en cours d'exécution. Les Mutations à annuler sont sélectionnées parmi [`system.mutations`](../operations/system_tables.md#system_tables-mutations) tableau à l'aide du filtre spécifié par le `WHERE` la clause de la `KILL` requête. +Essaie d'annuler et supprimer [mutation](alter.md#alter-mutations) actuellement en cours d'exécution. Les Mutations à annuler sont sélectionnées parmi [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) tableau à l'aide du filtre spécifié par le `WHERE` la clause de la `KILL` requête. Une requête de test (`TEST`) vérifie uniquement les droits de l'utilisateur et affiche une liste de requêtes à arrêter. @@ -186,13 +189,13 @@ Les modifications déjà apportées par la mutation ne sont pas annulées. OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] ``` -Cette requête tente d'initialiser une fusion non programmée de parties de données pour les tables avec un moteur de [MergeTree](../operations/table_engines/mergetree.md) famille. +Cette requête tente d'initialiser une fusion non programmée de parties de données pour les tables avec un moteur de [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) famille. -Le `OPTMIZE` la requête est également prise en charge pour [MaterializedView](../operations/table_engines/materializedview.md) et la [Tampon](../operations/table_engines/buffer.md) moteur. Les autres moteurs de table ne sont pas pris en charge. +Le `OPTMIZE` la requête est également prise en charge pour [MaterializedView](../../engines/table_engines/special/materializedview.md) et la [Tampon](../../engines/table_engines/special/buffer.md) moteur. Les autres moteurs de table ne sont pas pris en charge. -Lorsque `OPTIMIZE` est utilisé avec le [ReplicatedMergeTree](../operations/table_engines/replication.md) famille de moteurs de table, ClickHouse crée une tâche pour la fusion et attend l'exécution sur tous les nœuds (si le `replication_alter_partitions_sync` paramètre est activé). +Lorsque `OPTIMIZE` est utilisé avec le [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md) famille de moteurs de table, ClickHouse crée une tâche pour la fusion et attend l'exécution sur tous les nœuds (si le `replication_alter_partitions_sync` paramètre est activé). -- Si `OPTIMIZE` n'effectue pas de fusion pour une raison quelconque, il ne notifie pas le client. Pour activer les notifications, utilisez [optimize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop) paramètre. +- Si `OPTIMIZE` n'effectue pas de fusion pour une raison quelconque, il ne notifie pas le client. Pour activer les notifications, utilisez [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) paramètre. - Si vous spécifiez un `PARTITION`, seule la partition spécifiée est optimisé. [Comment définir l'expression de la partition](alter.md#alter-how-to-specify-part-expr). - Si vous spécifiez `FINAL`, l'optimisation est effectuée, même lorsque toutes les données sont déjà dans une partie. - Si vous spécifiez `DEDUPLICATE`, alors des lignes complètement identiques seront dédupliquées (toutes les colonnes sont comparées), cela n'a de sens que pour le moteur MergeTree. @@ -216,7 +219,7 @@ Toutes les tables sont renommées sous verrouillage global. Renommer des tables SET param = value ``` -Assigner `value` à l' `param` [paramètre](../operations/settings/index.md) pour la session en cours. Vous ne pouvez pas modifier [les paramètres du serveur](../operations/server_settings/index.md) de cette façon. +Assigner `value` à l' `param` [paramètre](../../operations/settings/index.md) pour la session en cours. Vous ne pouvez pas modifier [les paramètres du serveur](../../operations/server_configuration_parameters/index.md) de cette façon. Vous pouvez également définir toutes les valeurs de certains paramètres de profil dans une seule requête. @@ -224,7 +227,7 @@ Vous pouvez également définir toutes les valeurs de certains paramètres de pr SET profile = 'profile-name-from-the-settings-file' ``` -Pour plus d'informations, voir [Paramètre](../operations/settings/settings.md). +Pour plus d'informations, voir [Paramètre](../../operations/settings/settings.md). ## TRUNCATE {#truncate} @@ -234,7 +237,7 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Supprime toutes les données d'une table. Lorsque la clause `IF EXISTS` est omis, la requête renvoie une erreur si la table n'existe pas. -Le `TRUNCATE` la requête n'est pas prise en charge pour [Vue](../operations/table_engines/view.md), [Fichier](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) et [NULL](../operations/table_engines/null.md) table des moteurs. +Le `TRUNCATE` la requête n'est pas prise en charge pour [Vue](../../engines/table_engines/special/view.md), [Fichier](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) et [NULL](../../engines/table_engines/special/null.md) table des moteurs. ## USE {#use} diff --git a/docs/fr/sql_reference/statements/select.md b/docs/fr/sql_reference/statements/select.md new file mode 100644 index 00000000000..227313885f3 --- /dev/null +++ b/docs/fr/sql_reference/statements/select.md @@ -0,0 +1,610 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 33 +toc_title: SELECT +--- + +# Sélectionnez la syntaxe des requêtes {#select-queries-syntax} + +`SELECT` effectue la récupération des données. + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] +[UNION ALL ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +Toutes les clauses sont facultatives, à l'exception de la liste d'expressions requise immédiatement après SELECT. +Les clauses ci-dessous sont décrites dans presque le même ordre que dans l'exécution de la requête convoyeur. + +Si la requête omet le `DISTINCT`, `GROUP BY` et `ORDER BY` les clauses et les `IN` et `JOIN` sous-requêtes, la requête sera complètement traitée en flux, en utilisant O (1) quantité de RAM. +Sinon, la requête peut consommer beaucoup de RAM si les restrictions appropriées ne sont pas spécifiées: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Pour plus d'informations, consultez la section “Settings”. Il est possible d'utiliser le tri externe (sauvegarde des tables temporaires sur un disque) et l'agrégation externe. `The system does not have "merge join"`. + +### AVEC la Clause {#with-clause} + +Cette section prend en charge les Expressions de Table courantes ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), avec quelques limitations: +1. Les requêtes récursives ne sont pas prises en charge +2. Lorsque la sous-requête est utilisée à l'intérieur avec section, son résultat doit être scalaire avec exactement une ligne +3. Les résultats d'Expression ne sont pas disponibles dans les sous requêtes +Les résultats des expressions de clause WITH peuvent être utilisés dans la clause SELECT. + +Exemple 1: Utilisation d'une expression constante comme “variable” + +``` sql +WITH '2019-08-01 15:23:00' as ts_upper_bound +SELECT * +FROM hits +WHERE + EventDate = toDate(ts_upper_bound) AND + EventTime <= ts_upper_bound +``` + +Exemple 2: Expulsion de la somme(octets) résultat de l'expression de clause SELECT de la liste de colonnes + +``` sql +WITH sum(bytes) as s +SELECT + formatReadableSize(s), + table +FROM system.parts +GROUP BY table +ORDER BY s +``` + +Exemple 3: Utilisation des résultats de la sous-requête scalaire + +``` sql +/* this example would return TOP 10 of most huge tables */ +WITH + ( + SELECT sum(bytes) + FROM system.parts + WHERE active + ) AS total_disk_usage +SELECT + (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, + table +FROM system.parts +GROUP BY table +ORDER BY table_disk_usage DESC +LIMIT 10 +``` + +Exemple 4: réutilisation de l'expression dans la sous-requête +Comme solution de contournement pour la limitation actuelle de l'utilisation de l'expression dans les sous-requêtes, Vous pouvez la dupliquer. + +``` sql +WITH ['hello'] AS hello +SELECT + hello, + * +FROM +( + WITH ['hello'] AS hello + SELECT hello +) +``` + +``` text +┌─hello─────┬─hello─────┐ +│ ['hello'] │ ['hello'] │ +└───────────┴───────────┘ +``` + +### De la Clause {#select-from} + +Si la clause FROM est omise, les données seront lues à partir `system.one` table. +Le `system.one` table contient exactement une ligne (cette table remplit le même but que la table double trouvée dans d'autres SGBD). + +Le `FROM` clause spécifie la source à partir de laquelle lire les données: + +- Table +- Sous-requête +- [Fonction de Table](../table_functions/index.md) + +`ARRAY JOIN` et le régulier `JOIN` peuvent également être inclus (voir ci-dessous). + +Au lieu d'une table, l' `SELECT` sous-requête peut être spécifiée entre parenthèses. +Contrairement à SQL standard, un synonyme n'a pas besoin d'être spécifié après une sous-requête. + +Pour exécuter une requête, toutes les colonnes mentionnées dans la requête sont extraites de la table appropriée. Toutes les colonnes non nécessaires pour la requête externe sont rejetées des sous-requêtes. +Si une requête ne répertorie aucune colonne (par exemple, `SELECT count() FROM t`), une colonne est extraite de la table de toute façon (la plus petite est préférée), afin de calculer le nombre de lignes. + +#### Modificateur FINAL {#select-from-final} + +Applicable lors de la sélection de données à partir de tables [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-famille de moteurs autres que `GraphiteMergeTree`. Lorsque `FINAL` est spécifié, ClickHouse fusionne complètement les données avant de renvoyer le résultat et effectue ainsi toutes les transformations de données qui se produisent lors des fusions pour le moteur de table donné. + +Également pris en charge pour: +- [Répliqué](../../engines/table_engines/mergetree_family/replication.md) les versions de `MergeTree` moteur. +- [Vue](../../engines/table_engines/special/view.md), [Tampon](../../engines/table_engines/special/buffer.md), [Distribué](../../engines/table_engines/special/distributed.md), et [MaterializedView](../../engines/table_engines/special/materializedview.md) moteurs qui fonctionnent sur d'autres moteurs, à condition qu'ils aient été créés sur `MergeTree`-tables de moteur. + +Requêtes qui utilisent `FINAL` sont exécutés pas aussi vite que les requêtes similaires qui ne le font pas, car: + +- La requête est exécutée dans un seul thread et les données sont fusionnées lors de l'exécution de la requête. +- Les requêtes avec `FINAL` lire les colonnes de clé primaire en plus des colonnes spécifiées dans la requête. + +Dans la plupart des cas, évitez d'utiliser `FINAL`. + +### Exemple de Clause {#select-sample-clause} + +Le `SAMPLE` la clause permet un traitement de requête approximatif. + +Lorsque l'échantillonnage de données est activé, la requête n'est pas effectuée sur toutes les données, mais uniquement sur une certaine fraction de données (échantillon). Par exemple, si vous avez besoin de calculer des statistiques pour toutes les visites, il suffit d'exécuter la requête sur le 1/10 de la fraction de toutes les visites, puis multiplier le résultat par 10. + +Le traitement approximatif des requêtes peut être utile dans les cas suivants: + +- Lorsque vous avez des exigences de synchronisation strictes (comme \<100ms), mais que vous ne pouvez pas justifier le coût des ressources matérielles supplémentaires pour y répondre. +- Lorsque vos données brutes ne sont pas précises, l'approximation ne dégrade pas sensiblement la qualité. +- Les exigences commerciales ciblent des résultats approximatifs (pour la rentabilité, ou afin de commercialiser des résultats exacts aux utilisateurs premium). + +!!! note "Note" + Vous ne pouvez utiliser l'échantillonnage qu'avec les tables [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) famille, et seulement si l'expression d'échantillonnage a été spécifiée lors de la création de la table (voir [Moteur MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). + +Les caractéristiques de l'échantillonnage des données sont énumérées ci-dessous: + +- L'échantillonnage de données est un mécanisme déterministe. Le résultat de la même `SELECT .. SAMPLE` la requête est toujours le même. +- L'échantillonnage fonctionne de manière cohérente pour différentes tables. Pour les tables avec une seule clé d'échantillonnage, un échantillon avec le même coefficient sélectionne toujours le même sous-ensemble de données possibles. Par exemple, un exemple d'ID utilisateur prend des lignes avec le même sous-ensemble de tous les ID utilisateur possibles de différentes tables. Cela signifie que vous pouvez utiliser l'exemple dans les sous-requêtes dans la [IN](#select-in-operators) clause. En outre, vous pouvez joindre des échantillons en utilisant le [JOIN](#select-join) clause. +- L'échantillonnage permet de lire moins de données à partir d'un disque. Notez que vous devez spécifier l'échantillonnage clé correctement. Pour plus d'informations, voir [Création d'une Table MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). + +Pour l' `SAMPLE` clause la syntaxe suivante est prise en charge: + +| SAMPLE Clause Syntax | Description | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | Ici `k` est le nombre de 0 à 1.
    La requête est exécutée sur `k` fraction des données. Exemple, `SAMPLE 0.1` exécute la requête sur 10% des données. [Lire plus](#select-sample-k) | +| `SAMPLE n` | Ici `n` est un entier suffisamment grand.
    La requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. [Lire plus](#select-sample-n) | +| `SAMPLE k OFFSET m` | Ici `k` et `m` sont les nombres de 0 à 1.
    La requête est exécutée sur un échantillon de `k` fraction des données. Les données utilisées pour l'échantillon est compensée par `m` fraction. [Lire plus](#select-sample-offset) | + +#### SAMPLE K {#select-sample-k} + +Ici `k` est le nombre de 0 à 1 (les notations fractionnaires et décimales sont prises en charge). Exemple, `SAMPLE 1/2` ou `SAMPLE 0.5`. + +Dans un `SAMPLE k` clause, l'échantillon est prélevé à partir de la `k` fraction des données. L'exemple est illustré ci-dessous: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +Dans cet exemple, la requête est exécutée sur un échantillon de 0,1 (10%) de données. Les valeurs des fonctions d'agrégat ne sont pas corrigées automatiquement, donc pour obtenir un résultat approximatif, la valeur `count()` est multiplié manuellement par 10. + +#### SAMPLE N {#select-sample-n} + +Ici `n` est un entier suffisamment grand. Exemple, `SAMPLE 10000000`. + +Dans ce cas, la requête est exécutée sur un échantillon d'au moins `n` lignes (mais pas significativement plus que cela). Exemple, `SAMPLE 10000000` exécute la requête sur un minimum de 10 000 000 lignes. + +Puisque l'unité minimale pour la lecture des données est un granule (sa taille est définie par le `index_granularity` de réglage), il est logique de définir un échantillon beaucoup plus grand que la taille du granule. + +Lors de l'utilisation de la `SAMPLE n` clause, vous ne savez pas quel pourcentage relatif de données a été traité. Donc, vous ne connaissez pas le coefficient par lequel les fonctions agrégées doivent être multipliées. L'utilisation de la `_sample_factor` colonne virtuelle pour obtenir le résultat approximatif. + +Le `_sample_factor` colonne contient des coefficients relatifs qui sont calculés dynamiquement. Cette colonne est créée automatiquement lorsque vous [créer](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) une table avec la clé d'échantillonnage spécifiée. Les exemples d'utilisation de la `_sample_factor` colonne sont indiqués ci-dessous. + +Considérons la table `visits` qui contient des statistiques sur les visites de site. Le premier exemple montre comment calculer le nombre de pages vues: + +``` sql +SELECT sum(PageViews * _sample_factor) +FROM visits +SAMPLE 10000000 +``` + +L'exemple suivant montre comment calculer le nombre total de visites: + +``` sql +SELECT sum(_sample_factor) +FROM visits +SAMPLE 10000000 +``` + +L'exemple ci-dessous montre comment calculer la durée moyenne de la session. Notez que vous n'avez pas besoin d'utiliser le coefficient relatif pour calculer les valeurs moyennes. + +``` sql +SELECT avg(Duration) +FROM visits +SAMPLE 10000000 +``` + +#### SAMPLE K OFFSET M {#select-sample-offset} + +Ici `k` et `m` sont des nombres de 0 à 1. Des exemples sont présentés ci-dessous. + +**Exemple 1** + +``` sql +SAMPLE 1/10 +``` + +Dans cet exemple, l'échantillon représente 1 / 10e de toutes les données: + +`[++------------]` + +**Exemple 2** + +``` sql +SAMPLE 1/10 OFFSET 1/2 +``` + +Ici, un échantillon de 10% est prélevé à partir de la seconde moitié des données. + +`[------++------]` + +### Clause de jointure de tableau {#select-array-join-clause} + +Permet l'exécution de `JOIN` avec un tableau ou une structure de données imbriquée. L'intention est similaire à la [arrayJoin](../../sql_reference/functions/array_join.md#functions_arrayjoin) la fonction, mais sa fonctionnalité est plus large. + +``` sql +SELECT +FROM +[LEFT] ARRAY JOIN +[WHERE|PREWHERE ] +... +``` + +Vous pouvez spécifier qu'un seul `ARRAY JOIN` la clause dans une requête. + +L'ordre d'exécution de la requête est optimisé lors de l'exécution `ARRAY JOIN`. Bien `ARRAY JOIN` doit toujours être spécifié avant l' `WHERE/PREWHERE` clause, il peut être effectué soit avant `WHERE/PREWHERE` (si le résultat est nécessaire dans cette clause), ou après l'avoir terminé (pour réduire le volume de calculs). L'ordre de traitement est contrôlée par l'optimiseur de requête. + +Types pris en charge de `ARRAY JOIN` sont énumérés ci-dessous: + +- `ARRAY JOIN` - Dans ce cas, des tableaux vides ne sont pas inclus dans le résultat de `JOIN`. +- `LEFT ARRAY JOIN` - Le résultat de `JOIN` contient des lignes avec des tableaux vides. La valeur d'un tableau vide est définie sur la valeur par défaut pour le type d'élément de tableau (généralement 0, chaîne vide ou NULL). + +Les exemples ci-dessous illustrent l'utilisation de la `ARRAY JOIN` et `LEFT ARRAY JOIN` clause. Créons une table avec un [Tableau](../../sql_reference/data_types/array.md) tapez colonne et insérez des valeurs dedans: + +``` sql +CREATE TABLE arrays_test +( + s String, + arr Array(UInt8) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +``` + +``` text +┌─s───────────┬─arr─────┐ +│ Hello │ [1,2] │ +│ World │ [3,4,5] │ +│ Goodbye │ [] │ +└─────────────┴─────────┘ +``` + +L'exemple ci-dessous utilise la `ARRAY JOIN` clause: + +``` sql +SELECT s, arr +FROM arrays_test +ARRAY JOIN arr; +``` + +``` text +┌─s─────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +└───────┴─────┘ +``` + +L'exemple suivant utilise l' `LEFT ARRAY JOIN` clause: + +``` sql +SELECT s, arr +FROM arrays_test +LEFT ARRAY JOIN arr; +``` + +``` text +┌─s───────────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +│ Goodbye │ 0 │ +└─────────────┴─────┘ +``` + +#### À L'Aide D'Alias {#using-aliases} + +Un alias peut être spécifié pour un tableau `ARRAY JOIN` clause. Dans ce cas, un élément de tableau peut être consulté par ce pseudonyme, mais le tableau lui-même est accessible par le nom d'origine. Exemple: + +``` sql +SELECT s, arr, a +FROM arrays_test +ARRAY JOIN arr AS a; +``` + +``` text +┌─s─────┬─arr─────┬─a─┐ +│ Hello │ [1,2] │ 1 │ +│ Hello │ [1,2] │ 2 │ +│ World │ [3,4,5] │ 3 │ +│ World │ [3,4,5] │ 4 │ +│ World │ [3,4,5] │ 5 │ +└───────┴─────────┴───┘ +``` + +En utilisant des alias, vous pouvez effectuer `ARRAY JOIN` avec un groupe externe. Exemple: + +``` sql +SELECT s, arr_external +FROM arrays_test +ARRAY JOIN [1, 2, 3] AS arr_external; +``` + +``` text +┌─s───────────┬─arr_external─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ Hello │ 3 │ +│ World │ 1 │ +│ World │ 2 │ +│ World │ 3 │ +│ Goodbye │ 1 │ +│ Goodbye │ 2 │ +│ Goodbye │ 3 │ +└─────────────┴──────────────┘ +``` + +Plusieurs tableaux peuvent être séparés par des virgules `ARRAY JOIN` clause. Dans ce cas, `JOIN` est effectuée avec eux simultanément (la somme directe, pas le produit cartésien). Notez que tous les tableaux doivent avoir la même taille. Exemple: + +``` sql +SELECT s, arr, a, num, mapped +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ +│ Hello │ [1,2] │ 1 │ 1 │ 2 │ +│ Hello │ [1,2] │ 2 │ 2 │ 3 │ +│ World │ [3,4,5] │ 3 │ 1 │ 4 │ +│ World │ [3,4,5] │ 4 │ 2 │ 5 │ +│ World │ [3,4,5] │ 5 │ 3 │ 6 │ +└───────┴─────────┴───┴─────┴────────┘ +``` + +L'exemple ci-dessous utilise la [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) fonction: + +``` sql +SELECT s, arr, a, num, arrayEnumerate(arr) +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ +│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ +│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ +│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ +│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ +│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ +└───────┴─────────┴───┴─────┴─────────────────────┘ +``` + +#### Jointure de tableau avec la Structure de données imbriquée {#array-join-with-nested-data-structure} + +`ARRAY`Rejoindre " fonctionne également avec [structures de données imbriquées](../../sql_reference/data_types/nested_data_structures/nested.md). Exemple: + +``` sql +CREATE TABLE nested_test +( + s String, + nest Nested( + x UInt8, + y UInt32) +) ENGINE = Memory; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +``` + +``` text +┌─s───────┬─nest.x──┬─nest.y─────┐ +│ Hello │ [1,2] │ [10,20] │ +│ World │ [3,4,5] │ [30,40,50] │ +│ Goodbye │ [] │ [] │ +└─────────┴─────────┴────────────┘ +``` + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +Lorsque vous spécifiez des noms de structures de données imbriquées dans `ARRAY JOIN` le sens est le même que `ARRAY JOIN` avec tous les éléments du tableau qui la compose. Des exemples sont énumérés ci-dessous: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`, `nest.y`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +Cette variation a également du sens: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─────┐ +│ Hello │ 1 │ [10,20] │ +│ Hello │ 2 │ [10,20] │ +│ World │ 3 │ [30,40,50] │ +│ World │ 4 │ [30,40,50] │ +│ World │ 5 │ [30,40,50] │ +└───────┴────────┴────────────┘ +``` + +Un alias peut être utilisé pour une structure de données imbriquée, afin de sélectionner `JOIN` le résultat ou le tableau source. Exemple: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest AS n; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ +└───────┴─────┴─────┴─────────┴────────────┘ +``` + +Exemple d'utilisation de l' [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) fonction: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num +FROM nested_test +ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ +└───────┴─────┴─────┴─────────┴────────────┴─────┘ +``` + +### Clause de JOINTURE {#select-join} + +Rejoint les données dans la normale [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) sens. + +!!! info "Note" + Pas liées à [ARRAY JOIN](#select-array-join-clause). + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN +(ON )|(USING ) ... +``` + +Les noms de table peuvent être spécifiés au lieu de `` et ``. Ceci est équivalent à la `SELECT * FROM table` sous-requête, sauf dans un cas particulier lorsque la table a [Rejoindre](../../engines/table_engines/special/join.md) engine – an array prepared for joining. + +#### Types Pris En Charge De `JOIN` {#select-join-types} + +- `INNER JOIN` (ou `JOIN`) +- `LEFT JOIN` (ou `LEFT OUTER JOIN`) +- `RIGHT JOIN` (ou `RIGHT OUTER JOIN`) +- `FULL JOIN` (ou `FULL OUTER JOIN`) +- `CROSS JOIN` (ou `,` ) + +Voir la norme [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) Description. + +#### Plusieurs REJOINDRE {#multiple-join} + +En effectuant des requêtes, ClickHouse réécrit les jointures multi-tables dans la séquence des jointures à deux tables. Par exemple, S'il y a quatre tables pour join clickhouse rejoint la première et la seconde, puis rejoint le résultat avec la troisième table, et à la dernière étape, il rejoint la quatrième. + +Si une requête contient l' `WHERE` clickhouse essaie de pousser les filtres de cette clause à travers la jointure intermédiaire. S'il ne peut pas appliquer le filtre à chaque jointure intermédiaire, ClickHouse applique les filtres une fois toutes les jointures terminées. + +Nous recommandons l' `JOIN ON` ou `JOIN USING` syntaxe pour créer des requêtes. Exemple: + +``` sql +SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a +``` + +Vous pouvez utiliser des listes de tables séparées par des virgules `FROM` clause. Exemple: + +``` sql +SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a +``` + +Ne mélangez pas ces syntaxes. + +ClickHouse ne supporte pas directement la syntaxe avec des virgules, Nous ne recommandons donc pas de les utiliser. L'algorithme tente de réécrire la requête en termes de `CROSS JOIN` et `INNER JOIN` clauses et procède ensuite au traitement des requêtes. Lors de la réécriture de la requête, ClickHouse tente d'optimiser les performances et la consommation de mémoire. Par défaut, ClickHouse traite les virgules comme `INNER JOIN` clause et convertit `INNER JOIN` de `CROSS JOIN` lorsque l'algorithme ne peut pas garantir que `INNER JOIN` retourne les données requises. + +#### Rigueur {#select-join-strictness} + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [Produit cartésien](https://en.wikipedia.org/wiki/Cartesian_product) à partir des lignes correspondantes. C'est la norme `JOIN` comportement en SQL. +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` et `ALL` les mots clés sont les mêmes. +- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous. + +**ASOF joindre L'utilisation** + +`ASOF JOIN` est utile lorsque vous devez joindre des enregistrements qui n'ont pas de correspondance exacte. + +Tables pour `ASOF JOIN` doit avoir une colonne de séquence ordonnée. Cette colonne ne peut pas être seule dans une table et doit être l'un des types de données: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, et `DateTime`. + +Syntaxe `ASOF JOIN ... ON`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF LEFT JOIN table_2 +ON equi_cond AND closest_match_cond +``` + +Vous pouvez utiliser n'importe quel nombre de conditions d'égalité et exactement une condition de correspondance la plus proche. Exemple, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. + +Conditions prises en charge pour la correspondance la plus proche: `>`, `>=`, `<`, `<=`. + +Syntaxe `ASOF JOIN ... USING`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF JOIN table_2 +USING (equi_column1, ... equi_columnN, asof_column) +``` + +`ASOF JOIN` utiliser `equi_columnX` pour rejoindre sur l'égalité et `asof_column` pour rejoindre le match le plus proche avec le `table_1.asof_column >= table_2.asof_column` condition. Le `asof_column` colonne toujours la dernière dans le `USING` clause. + +Par exemple, considérez les tableaux suivants: + +\`\`\` texte +table\_1 table\_2 + +événement \| ev\_time \| user\_id événement \| ev\_time \| user\_id diff --git a/docs/fr/query_language/show.md b/docs/fr/sql_reference/statements/show.md similarity index 87% rename from docs/fr/query_language/show.md rename to docs/fr/sql_reference/statements/show.md index 1125dec2b53..d609c5ea4ef 100644 --- a/docs/fr/query_language/show.md +++ b/docs/fr/sql_reference/statements/show.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: SHOW --- # Afficher les requêtes {#show-queries} @@ -27,7 +30,7 @@ Cette requête est identique à `SELECT name FROM system.databases [INTO OUTFILE SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] ``` -Sorties le contenu de la [système.processus](../operations/system_tables.md#system_tables-processes) table, qui contient une liste de requêtes en cours de traitement en ce moment, à l'exception `SHOW PROCESSLIST` requête. +Sorties le contenu de la [système.processus](../../operations/system_tables.md#system_tables-processes) table, qui contient une liste de requêtes en cours de traitement en ce moment, à l'exception `SHOW PROCESSLIST` requête. Le `SELECT * FROM system.processes` requête renvoie des données sur toutes les requêtes en cours. @@ -70,7 +73,7 @@ SHOW TABLES FROM system LIKE '%co%' LIMIT 2 ## SHOW DICTIONARIES {#show-dictionaries} -Affiche une liste de [dictionnaires externes](dicts/external_dicts.md). +Affiche une liste de [dictionnaires externes](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). ``` sql SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] diff --git a/docs/fr/query_language/system.md b/docs/fr/sql_reference/statements/system.md similarity index 78% rename from docs/fr/query_language/system.md rename to docs/fr/sql_reference/statements/system.md index 621287fb13f..0494f24f62b 100644 --- a/docs/fr/query_language/system.md +++ b/docs/fr/sql_reference/statements/system.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: SYSTEM --- # SYSTÈME de Requêtes {#query-language-system} @@ -21,10 +24,10 @@ machine_translated: true ## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} Recharge tous les dictionnaires qui ont déjà été chargés avec succès. -Par défaut, les dictionnaires sont chargés paresseusement (voir [dictionaries\_lazy\_load](../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load)), donc au lieu d'être chargés automatiquement au démarrage, ils sont initialisés lors du premier accès via la fonction dictGet ou sélectionnez dans les tables avec ENGINE = Dictionary . Le `SYSTEM RELOAD DICTIONARIES` query recharge ces dictionnaires (chargés). +Par défaut, les dictionnaires sont chargés paresseusement (voir [dictionaries\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), donc au lieu d'être chargés automatiquement au démarrage, ils sont initialisés lors du premier accès via la fonction dictGet ou sélectionnez dans les tables avec ENGINE = Dictionary . Le `SYSTEM RELOAD DICTIONARIES` query recharge ces dictionnaires (chargés). Retourne toujours `Ok.` quel que soit le résultat de la mise à jour du dictionnaire. -## Recharger le dictionnaire dictionary\_name {#query_language-system-reload-dictionary} +## Recharger le dictionnaire Dictionary\_name {#query_language-system-reload-dictionary} Recharge complètement un dictionnaire `dictionary_name`, quel que soit l'état du dictionnaire (LOADED / NOT\_LOADED / FAILED). Retourne toujours `Ok.` quel que soit le résultat de la mise à jour du dictionnaire. @@ -62,7 +65,7 @@ Annule le processus de ClickHouse (comme `kill -9 {$ pid_clickhouse-server}`) ## Gestion Des Tables Distribuées {#query-language-system-distributed} -ClickHouse peut gérer [distribué](../operations/table_engines/distributed.md) table. Lorsqu'un utilisateur insère des données dans ces tables, ClickHouse crée d'abord une file d'attente des données qui doivent être envoyées aux nœuds de cluster, puis l'envoie de manière asynchrone. Vous pouvez gérer le traitement des files d'attente avec [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), et [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) requête. Vous pouvez également insérer de manière synchrone des données distribuées avec `insert_distributed_sync` paramètre. +ClickHouse peut gérer [distribué](../../engines/table_engines/special/distributed.md) table. Lorsqu'un utilisateur insère des données dans ces tables, ClickHouse crée d'abord une file d'attente des données qui doivent être envoyées aux nœuds de cluster, puis l'envoie de manière asynchrone. Vous pouvez gérer le traitement des files d'attente avec [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), et [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) requête. Vous pouvez également insérer de manière synchrone des données distribuées avec `insert_distributed_sync` paramètre. ### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} diff --git a/docs/fr/query_language/syntax.md b/docs/fr/sql_reference/syntax.md similarity index 95% rename from docs/fr/query_language/syntax.md rename to docs/fr/sql_reference/syntax.md index 416775e6e29..ba27e16dc8f 100644 --- a/docs/fr/query_language/syntax.md +++ b/docs/fr/sql_reference/syntax.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 31 +toc_title: Syntaxe --- # Syntaxe {#syntax} @@ -73,13 +76,13 @@ Un littéral numérique tente d'être analysé: - Sinon, une erreur est renvoyée. La valeur correspondante aura le plus petit type dans lequel la valeur correspond. -Par exemple, 1 est analysé comme `UInt8`, mais 256 est analysé comme `UInt16`. Pour plus d'informations, voir [Types de données](../data_types/index.md). +Par exemple, 1 est analysé comme `UInt8`, mais 256 est analysé comme `UInt16`. Pour plus d'informations, voir [Types de données](../sql_reference/data_types/index.md). Exemple: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. ### Chaîne {#syntax-string-literal} -Seuls les littéraux de chaîne entre guillemets simples sont pris en charge. Le clos de caractères barre oblique inverse échappé. Les séquences d'échappement suivantes ont une valeur spéciale correspondante: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Dans tous les autres cas, des séquences d'échappement au format `\c`, où `c` est un caractère, sont convertis à `c`. Cela signifie que vous pouvez utiliser les séquences `\'`et`\\`. La valeur aurez l' [Chaîne](../data_types/string.md) type. +Seuls les littéraux de chaîne entre guillemets simples sont pris en charge. Le clos de caractères barre oblique inverse échappé. Les séquences d'échappement suivantes ont une valeur spéciale correspondante: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Dans tous les autres cas, des séquences d'échappement au format `\c`, où `c` est un caractère, sont convertis à `c`. Cela signifie que vous pouvez utiliser les séquences `\'`et`\\`. La valeur aurez l' [Chaîne](../sql_reference/data_types/string.md) type. L'ensemble minimum de caractères que vous devez échapper dans les littéraux de chaîne: `'` et `\`. Apostrophe peut être échappé avec l'apostrophe, les littéraux `'It\'s'` et `'It''s'` sont égaux. @@ -88,13 +91,13 @@ L'ensemble minimum de caractères que vous devez échapper dans les littéraux d Les Constructions sont prises en charge pour les tableaux: `[1, 2, 3]` et les tuples: `(1, 'Hello, world!', 2)`.. En fait, ce ne sont pas des littéraux, mais des expressions avec l'opérateur de création de tableau et l'opérateur de création de tuple, respectivement. Un tableau doit être composé d'au moins un élément, et un tuple doit avoir au moins deux éléments. -Les Tuples ont un but spécial pour l'usage dans le `IN` clause de a `SELECT` requête. Les Tuples peuvent être obtenus à la suite d'une requête, mais ils ne peuvent pas être enregistrées dans une base de données (à l'exception de [Mémoire](../operations/table_engines/memory.md) table). +Les Tuples ont un but spécial pour l'usage dans le `IN` clause de a `SELECT` requête. Les Tuples peuvent être obtenus à la suite d'une requête, mais ils ne peuvent pas être enregistrées dans une base de données (à l'exception de [Mémoire](../engines/table_engines/special/memory.md) table). ### NULL {#null-literal} Indique que la valeur est manquante. -Afin de stocker `NULL` dans un champ de table, il doit être de la [Nullable](../data_types/nullable.md) type. +Afin de stocker `NULL` dans un champ de table, il doit être de la [Nullable](../sql_reference/data_types/nullable.md) type. Selon le format de données (entrée ou sortie), `NULL` peut avoir une représentation différente. Pour plus d'informations, consultez la documentation de [formats de données](../interfaces/formats.md#formats). @@ -128,7 +131,7 @@ expr AS alias For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - In the [CAST](functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. - `expr` — Any expression supported by ClickHouse. diff --git a/docs/fr/query_language/table_functions/file.md b/docs/fr/sql_reference/table_functions/file.md similarity index 88% rename from docs/fr/query_language/table_functions/file.md rename to docs/fr/sql_reference/table_functions/file.md index 1ea8fc7b531..3d330b1f20f 100644 --- a/docs/fr/query_language/table_functions/file.md +++ b/docs/fr/sql_reference/table_functions/file.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 37 +toc_title: fichier --- # fichier {#file} @@ -12,7 +15,7 @@ file(path, format, structure) **Les paramètres d'entrée** -- `path` — The relative path to the file from [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Chemin d'accès à la prise en charge des fichiers suivant les globs en mode Lecture seule: `*`, `?`, `{abc,def}` et `{N..M}` où `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). Chemin d'accès à la prise en charge des fichiers suivant les globs en mode Lecture seule: `*`, `?`, `{abc,def}` et `{N..M}` où `N`, `M` — numbers, \``'abc', 'def'` — strings. - `format` — The [format](../../interfaces/formats.md#formats) de le fichier. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. @@ -63,7 +66,7 @@ Plusieurs composants de chemin peuvent avoir des globs. Pour être traité, le f - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{N..M}` — Substitutes any number in range from N to M including both borders. -Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../query_language/table_functions/remote.md)). +Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../sql_reference/table_functions/remote.md)). **Exemple** diff --git a/docs/fr/query_language/table_functions/generate.md b/docs/fr/sql_reference/table_functions/generate.md similarity index 95% rename from docs/fr/query_language/table_functions/generate.md rename to docs/fr/sql_reference/table_functions/generate.md index a02ee1b9385..a4ccc30ec88 100644 --- a/docs/fr/query_language/table_functions/generate.md +++ b/docs/fr/sql_reference/table_functions/generate.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 47 +toc_title: generateRandom --- # generateRandom {#generaterandom} diff --git a/docs/fr/query_language/table_functions/hdfs.md b/docs/fr/sql_reference/table_functions/hdfs.md similarity index 95% rename from docs/fr/query_language/table_functions/hdfs.md rename to docs/fr/sql_reference/table_functions/hdfs.md index d69206322d4..01607102276 100644 --- a/docs/fr/query_language/table_functions/hdfs.md +++ b/docs/fr/sql_reference/table_functions/hdfs.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 45 +toc_title: hdfs --- # hdfs {#hdfs} @@ -46,7 +49,7 @@ Plusieurs composants de chemin peuvent avoir des globs. Pour être traité, le f - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{N..M}` — Substitutes any number in range from N to M including both borders. -Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../query_language/table_functions/remote.md)). +Les Constructions avec `{}` sont similaires à l' [fonction de table à distance](../../sql_reference/table_functions/remote.md)). **Exemple** diff --git a/docs/fr/sql_reference/table_functions/index.md b/docs/fr/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..1a43dd298c1 --- /dev/null +++ b/docs/fr/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Table Functions +toc_priority: 34 +toc_title: Introduction +--- + +# Les Fonctions De Table {#table-functions} + +Les fonctions de Table sont des méthodes pour construire des tables. + +Vous pouvez utiliser les fonctions de table dans: + +- [FROM](../statements/select.md#select-from) la clause de la `SELECT` requête. + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [Créer une TABLE en tant que \< table\_function ()\>](../statements/create.md#create-table-query) requête. + + It's one of the methods of creating a table. + +!!! warning "Avertissement" + Vous ne pouvez pas utiliser les fonctions de table si [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) paramètre est désactivé. + +| Fonction | Description | +|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| [fichier](file.md) | Crée un [Fichier](../../engines/table_engines/special/file.md)-moteur de table. | +| [fusionner](merge.md) | Crée un [Fusionner](../../engines/table_engines/special/merge.md)-moteur de table. | +| [nombre](numbers.md) | Crée une table avec une seule colonne remplie de nombres entiers. | +| [distant](remote.md) | Vous permet d'accéder à des serveurs distants sans [Distribué](../../engines/table_engines/special/distributed.md)-moteur de table. | +| [URL](url.md) | Crée un [URL](../../engines/table_engines/special/url.md)-moteur de table. | +| [mysql](mysql.md) | Crée un [MySQL](../../engines/table_engines/integrations/mysql.md)-moteur de table. | +| [jdbc](jdbc.md) | Crée un [JDBC](../../engines/table_engines/integrations/jdbc.md)-moteur de table. | +| [ODBC](odbc.md) | Crée un [ODBC](../../engines/table_engines/integrations/odbc.md)-moteur de table. | +| [hdfs](hdfs.md) | Crée un [HDFS](../../engines/table_engines/integrations/hdfs.md)-moteur de table. | + +[Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/fr/query_language/table_functions/input.md b/docs/fr/sql_reference/table_functions/input.md similarity index 95% rename from docs/fr/query_language/table_functions/input.md rename to docs/fr/sql_reference/table_functions/input.md index 5f717431c6a..9e3ea01e35e 100644 --- a/docs/fr/query_language/table_functions/input.md +++ b/docs/fr/sql_reference/table_functions/input.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 46 +toc_title: "entr\xE9e" --- # entrée {#input} diff --git a/docs/fr/query_language/table_functions/jdbc.md b/docs/fr/sql_reference/table_functions/jdbc.md similarity index 88% rename from docs/fr/query_language/table_functions/jdbc.md rename to docs/fr/sql_reference/table_functions/jdbc.md index 20f70e53c7c..dcdd332b3fb 100644 --- a/docs/fr/query_language/table_functions/jdbc.md +++ b/docs/fr/sql_reference/table_functions/jdbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 43 +toc_title: jdbc --- # jdbc {#table-function-jdbc} diff --git a/docs/fr/query_language/table_functions/merge.md b/docs/fr/sql_reference/table_functions/merge.md similarity index 79% rename from docs/fr/query_language/table_functions/merge.md rename to docs/fr/sql_reference/table_functions/merge.md index 23759fae9f1..6e9bf8216d3 100644 --- a/docs/fr/query_language/table_functions/merge.md +++ b/docs/fr/sql_reference/table_functions/merge.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 38 +toc_title: fusionner --- # fusionner {#merge} diff --git a/docs/fr/query_language/table_functions/mysql.md b/docs/fr/sql_reference/table_functions/mysql.md similarity index 86% rename from docs/fr/query_language/table_functions/mysql.md rename to docs/fr/sql_reference/table_functions/mysql.md index 465fd1da1f6..d5bc698fc30 100644 --- a/docs/fr/query_language/table_functions/mysql.md +++ b/docs/fr/sql_reference/table_functions/mysql.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 42 +toc_title: mysql --- # mysql {#mysql} @@ -55,11 +58,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -77,7 +80,7 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') ## Voir Aussi {#see-also} -- [Le ‘MySQL’ tableau moteur](../../operations/table_engines/mysql.md) -- [Utilisation de MySQL comme source de dictionnaire externe](../dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [Le ‘MySQL’ tableau moteur](../../engines/table_engines/integrations/mysql.md) +- [Utilisation de MySQL comme source de dictionnaire externe](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) [Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/fr/query_language/table_functions/numbers.md b/docs/fr/sql_reference/table_functions/numbers.md similarity index 89% rename from docs/fr/query_language/table_functions/numbers.md rename to docs/fr/sql_reference/table_functions/numbers.md index 663b481cb3d..6e44a04184a 100644 --- a/docs/fr/query_language/table_functions/numbers.md +++ b/docs/fr/sql_reference/table_functions/numbers.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 39 +toc_title: nombre --- # nombre {#numbers} diff --git a/docs/fr/query_language/table_functions/odbc.md b/docs/fr/sql_reference/table_functions/odbc.md similarity index 87% rename from docs/fr/query_language/table_functions/odbc.md rename to docs/fr/sql_reference/table_functions/odbc.md index 741ec92c951..3e30310476b 100644 --- a/docs/fr/query_language/table_functions/odbc.md +++ b/docs/fr/sql_reference/table_functions/odbc.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 44 +toc_title: ODBC --- # ODBC {#table-functions-odbc} @@ -56,7 +59,7 @@ Vous pouvez vérifier la connexion en utilisant le `isql` utilitaire de l'instal ``` bash $ isql -v mysqlconn -+---------------------------------------+ ++-------------------------+ | Connected! | | | ... @@ -77,11 +80,11 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` @@ -99,7 +102,7 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') ## Voir Aussi {#see-also} -- [Dictionnaires externes ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Moteur de table ODBC](../../operations/table_engines/odbc.md). +- [Dictionnaires externes ODBC](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [Moteur de table ODBC](../../engines/table_engines/integrations/odbc.md). [Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/fr/query_language/table_functions/remote.md b/docs/fr/sql_reference/table_functions/remote.md similarity index 94% rename from docs/fr/query_language/table_functions/remote.md rename to docs/fr/sql_reference/table_functions/remote.md index c9dac905c16..c618215d795 100644 --- a/docs/fr/query_language/table_functions/remote.md +++ b/docs/fr/sql_reference/table_functions/remote.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 40 +toc_title: distant --- # à distance, remoteSecure {#remote-remotesecure} @@ -75,6 +78,6 @@ Le `remote` table de fonction peut être utile dans les cas suivants: Si l'utilisateur n'est pas spécifié, `default` est utilisée. Si le mot de passe n'est spécifié, un mot de passe vide est utilisé. -`remoteSecure` - la même chose que `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_settings/settings.md#server_settings-tcp_port_secure) de config ou 9440. +`remoteSecure` - la même chose que `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) de config ou 9440. [Article Original](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/fr/query_language/table_functions/url.md b/docs/fr/sql_reference/table_functions/url.md similarity index 89% rename from docs/fr/query_language/table_functions/url.md rename to docs/fr/sql_reference/table_functions/url.md index 7b7795eef79..a932c1fccce 100644 --- a/docs/fr/query_language/table_functions/url.md +++ b/docs/fr/sql_reference/table_functions/url.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 41 +toc_title: URL --- # URL {#url} diff --git a/docs/fr/changelog/2017.md b/docs/fr/whats_new/changelog/2017.md similarity index 99% rename from docs/fr/changelog/2017.md rename to docs/fr/whats_new/changelog/2017.md index 8cb6e3ef2e0..aaf9b52f749 100644 --- a/docs/fr/changelog/2017.md +++ b/docs/fr/whats_new/changelog/2017.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 79 +toc_title: '2017' --- ### Clickhouse version 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} diff --git a/docs/fr/changelog/2018.md b/docs/fr/whats_new/changelog/2018.md similarity index 99% rename from docs/fr/changelog/2018.md rename to docs/fr/whats_new/changelog/2018.md index cae840dfc34..3062f4f1d18 100644 --- a/docs/fr/changelog/2018.md +++ b/docs/fr/whats_new/changelog/2018.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 78 +toc_title: '2018' --- ## Clickhouse version 18.16 {#clickhouse-release-18-16} diff --git a/docs/fr/whats_new/changelog/2019.md b/docs/fr/whats_new/changelog/2019.md new file mode 100644 index 00000000000..5c52d19ebae --- /dev/null +++ b/docs/fr/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 77 +toc_title: '2019' +--- + +## Clickhouse version v19. 17 {#clickhouse-release-v19-17} + +### Clickhouse version v19. 17. 6. 36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### Bug Fix {#bug-fix} + +- Dépassement de tampon potentiel fixe en décompression. Un utilisateur malveillant peut transmettre des données compressées fabriquées qui pourraient provoquer une lecture après le tampon. Ce problème a été trouvé par Eldar Zaitov de l'équipe de sécurité de L'information Yandex. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction possible plantage du serveur (`std::terminate`) lorsque le serveur ne peut pas envoyer ou écrire des données au format JSON ou XML avec des valeurs de type string data (qui nécessitent une validation UTF-8) ou lors de la compression des données de résultat avec l'algorithme Brotli ou dans certains autres cas rares. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Dictionnaires fixes avec la source d'un clickhouse `VIEW`, maintenant la lecture de tels dictionnaires ne provoque pas l'erreur `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixe vérifier si un hôte client est autorisé par host\_regexp spécifié dans les utilisateurs.XML. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- `RENAME TABLE` pour une table distribuée renomme maintenant le dossier contenant les données insérées avant d'envoyer aux fragments. Cela résout un problème avec les renommages successifs `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` les dictionnaires externes créés par des requêtes DDL autorisent désormais des plages de types numériques arbitraires. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alésapine](https://github.com/alesapin)) +- Fixe `INSERT INTO table SELECT ... FROM mysql(...)` table de fonction. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- Fixe erreur de segmentation dans `INSERT INTO TABLE FUNCTION file()` lors de l'insertion dans un fichier qui n'existe pas. Maintenant, dans ce cas, le fichier sera créé et insérez seraient traités. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- Correction d'une erreur bitmapAnd lors de l'intersection d'un bitmap agrégé et d'un bitmap scalaire. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) +- Correction de segfault quand `EXISTS` la requête a été utilisé sans `TABLE` ou `DICTIONARY` qualificatif, tout comme `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Type de retour fixe pour les fonctions `rand` et `randConstant` en cas d'argument nullable. Maintenant renvoient toujours `UInt32` et jamais `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixe `DROP DICTIONARY IF EXISTS db.dict` maintenant il ne lance pas d'exception si `db` n'existe pas. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- Si une table n'a pas été complètement abandonnée en raison d'un plantage du serveur, le serveur essaiera de la restaurer et de la charger [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- Correction d'une requête de comptage trivial pour une table distribuée s'il y a plus de deux tables locales de fragments. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- Correction d'un bug qui conduisait à une course de données dans DB:: BlockStreamProfileInfo:: calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- Fixe `ALTER table MOVE part` exécuté immédiatement après la fusion de la partie spécifiée, ce qui pourrait provoquer le déplacement d'une partie la partie fusionné. Maintenant, il déplace correctement la partie spécifiée. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Les Expressions pour les dictionnaires peuvent maintenant être spécifiées en tant que chaînes. Ceci est utile pour le calcul des attributs lors de l'extraction de données à partir de sources non-ClickHouse, car il permet d'utiliser une syntaxe non-ClickHouse pour ces expressions. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alésapine](https://github.com/alesapin)) +- Correction d'une course très rare dans `clickhouse-copier` en raison d'un débordement dans ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +- Correction du bug quand après la requête a échoué (en raison de “Too many simultaneous queries” par exemple) il ne lirait pas les informations des tables externes, et le + la requête suivante interpréterait cette information comme le début de la requête suivante provoquant une erreur comme `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- Éviter la déréférence nulle après “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- Restaurer la prise en charge de tous les paramètres régionaux ICU, ajouter la possibilité d'appliquer des collations pour les expressions constantes et ajouter le nom de la langue au système.tableau de collations. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alésapine](https://github.com/alesapin)) +- Nombre de flux pour lire à partir `StorageFile` et `StorageHDFS` est maintenant limitée, pour éviter de dépasser la limite de mémoire. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alésapine](https://github.com/alesapin)) +- Fixe `CHECK TABLE` requête pour `*MergeTree` les tables sans clé. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alésapine](https://github.com/alesapin)) +- Suppression du numéro de mutation d'un nom de pièce au cas où il n'y aurait pas de mutations. Cette suppression a amélioré la compatibilité avec les anciennes versions. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alésapine](https://github.com/alesapin)) +- Correction du bug que les mutations sont ignorées pour certaines parties attachées en raison de leur data\_version sont plus grandes que la version de mutation de table. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) +- Autoriser le démarrage du serveur avec des copies redondantes des pièces après les avoir déplacées vers un autre périphérique. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction de l'erreur “Sizes of columns doesn’t match” qui pourraient apparaître lors de l'utilisation de fonction d'agrégation des colonnes. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- Maintenant, une exception sera levée en cas d'utilisation avec des liens à côté de LIMIT BY. Et maintenant, il est possible d'utiliser TOP avec LIMIT BY. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Correction du rechargement du dictionnaire s'il a `invalidate_query`, qui a arrêté les mises à jour et une exception sur les tentatives de mise à jour précédentes. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alésapine](https://github.com/alesapin)) + +### Clickhouse version v19. 17. 4. 11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### Modification Incompatible En Arrière {#backward-incompatible-change} + +- Utilisation de column au lieu de AST pour stocker les résultats de la sous-requête scalaire pour de meilleures performances. Paramètre `enable_scalar_subquery_optimization` a été ajouté dans 19.17 et il a été activé par défaut. Cela conduit à des erreurs comme [ce](https://github.com/ClickHouse/ClickHouse/issues/7851) lors de la mise à niveau vers 19.17.2 ou 19.17.3 à partir des versions précédentes. Ce paramètre a été désactivé par défaut dans 19.17.4, pour permettre la mise à niveau à partir de 19.16 et des versions plus anciennes sans erreurs. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Oiseau](https://github.com/amosbird)) + +#### Nouveauté {#new-feature} + +- Ajoutez la possibilité de créer des dictionnaires avec des requêtes DDL. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alésapine](https://github.com/alesapin)) +- Faire `bloom_filter` type de support d'index `LowCardinality` et `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ajouter une fonction `isValidJSON` pour vérifier que la chaîne est un json valide. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- Mettre `arrayCompact` fonction [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Mémo](https://github.com/Joeywzr)) +- Créé fonction `hex` pour les nombres Décimaux. Il fonctionne comme `hex(reinterpretAsString())`, mais ne supprime pas les derniers octets zéro. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) +- Ajouter `arrayFill` et `arrayReverseFill` fonctions, qui remplacent les éléments par d'autres éléments en avant / arrière d'eux dans le tableau. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- Ajouter `CRC32IEEE()`/`CRC64()` soutien [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- Mettre `char` fonction similaire à celle dans [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- Ajouter `bitmapTransform` fonction. Il transforme un tableau de valeurs d'une image bitmap dans un autre tableau de valeurs, le résultat est un nouveau bitmap [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) +- Mettre `javaHashUTF16LE()` fonction [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) +- Ajouter `_shard_num` colonne virtuelle pour le moteur distribué [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### Caractéristique Expérimentale {#experimental-feature} + +- Prise en charge des processeurs (nouveau pipeline d'exécution de requêtes) dans `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Bug Fix {#bug-fix-1} + +- Correction d'une analyse float incorrecte Dans `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- Correction d'un blocage rare qui peut se produire lorsque trace\_log est activé. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- Empêcher la duplication des messages lors de la production de la table Kafka a tout MVS en sélectionnant [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) +- Soutien pour `Array(LowCardinality(Nullable(String)))` dans `IN`. Résoudre [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) +- Ajouter le traitement de `SQL_TINYINT` et `SQL_BIGINT` et correction de la gestion des `SQL_FLOAT` types de sources de données dans ODBC Bridge. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Correction de l'agrégation (`avg` et quantiles) sur des colonnes décimales vides [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) +- Fixer `INSERT` en Distribué avec `MATERIALIZED` colonne [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Faire `MOVE PARTITION` fonctionne si certaines parties de la partition sont déjà sur le disque ou le volume de destination [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction d'un bug avec hardlinks ne pas être créé lors de mutations dans `ReplicatedMergeTree` dans des configurations multi-disques. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction d'un bug avec une mutation sur un MergeTree lorsque la partie entière reste inchangée et le meilleur espace est trouvé sur un autre disque [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction d'un bug avec `keep_free_space_ratio` ne pas être lu à partir de la configuration des disques [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction d'un bug avec la table ne contient que `Tuple` colonnes ou colonnes avec des chemins complexes. Fixer [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alésapine](https://github.com/alesapin)) +- Ne pas tenir compte de la mémoire pour le moteur tampon dans la limite max\_memory\_usage [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- Correction de l'utilisation finale de la marque dans `MergeTree` tableaux commandés par `tuple()`. Dans de rares cas cela pourrait conduire à `Can't adjust last granule` erreur lors de la sélection. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) +- Correction d'un bug dans les mutations qui ont un prédicat avec des actions qui nécessitent un contexte (par exemple des fonctions pour json), ce qui peut entraîner des plantages ou des exceptions étranges. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alésapine](https://github.com/alesapin)) +- Correction de l'inadéquation des noms de base de données et de table s'échappant dans `data/` et `shadow/` annuaire [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Correction d'un crash dans ce cas. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixer `Not found column in block` lors de la jointure sur l'expression avec jointure droite ou complète. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- Une tentative de plus pour corriger la boucle infinie dans `PrettySpace` format [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- Correction d'un bug dans `concat` fonction lorsque tous les arguments étaient `FixedString` de la même taille. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alésapine](https://github.com/alesapin)) +- Correction d'une exception en cas d'utilisation de 1 argument lors de la définition des stockages S3, URL et HDFS. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction de la portée de InterpreterSelectQuery pour les vues Avec requête [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### Amélioration {#improvement} + +- `Nullable` colonnes reconnues et valeurs NULL gérées correctement par ODBC-bridge [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- Ecrire le lot actuel pour distribué envoyer atomiquement [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- Lancez une exception si nous ne pouvons pas détecter la table pour le nom de la colonne dans la requête. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter `merge_max_block_size` réglage de `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- Les requêtes avec `HAVING` et sans `GROUP BY` supposons groupe par constante. Si, `SELECT 1 HAVING 1` maintenant retourne un résultat. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Oiseau](https://github.com/amosbird)) +- Soutien à l'analyse `(X,)` comme tuple similaire à python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Oiseau](https://github.com/amosbird)) +- Faire `range` les comportements de fonction ressemblent presque à ceux de pythonic. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- Ajouter `constraints` les colonnes de la table `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) +- Meilleur format Null pour le gestionnaire tcp, de sorte qu'il est possible d'utiliser `select ignore() from table format Null` pour perf mesure via clickhouse-client [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Oiseau](https://github.com/amosbird)) +- Les requêtes comme `CREATE TABLE ... AS (SELECT (1, 2))` sont analysés correctement [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### Amélioration Des Performances {#performance-improvement} + +- Les performances de l'agrégation sur les clés de chaîne courte sont améliorées. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Oiseau](https://github.com/amosbird)) +- Exécutez une autre passe d'analyse de syntaxe / expression pour obtenir des optimisations potentielles après que les prédicats constants sont pliés. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Oiseau](https://github.com/amosbird)) +- Utilisez les méta informations de stockage pour évaluer trivial `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Oiseau](https://github.com/amosbird), [alexeï-milovidov](https://github.com/alexey-milovidov)) +- Vectoriser le traitement `arrayReduce` semblable à Agrégateur `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Oiseau](https://github.com/amosbird)) +- Améliorations mineures des performances de `Kafka` consommation [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement} + +- Ajouter la prise en charge de la compilation croisée à L'architecture du processeur AARCH64. Refactoriser le code emballeur script. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) +- Décompressez les chaînes d'outils darwin-x86\_64 et linux-aarch64 dans le volume Docker monté lors de la construction de paquets [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) +- Mise à jour de L'Image Docker pour le Packager binaire [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) +- Correction des erreurs de compilation sur macOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) +- Certains refactoring dans la logique d'analyse de requête: diviser la classe complexe en plusieurs classes simples. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix construire sans submodules [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- Mieux `add_globs` dans les fichiers CMake [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Oiseau](https://github.com/amosbird)) +- Supprimer les chemins codés en dur dans `unwind` cible [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- Permettre d'utiliser le format mysql sans ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### Autre {#other} + +- Ajout de la grammaire ANTLR4 pour le dialecte ClickHouse SQL [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +## Clickhouse version v19. 16 {#clickhouse-release-v19-16} + +#### Clickhouse version v19. 16. 14. 65, 2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- Correction d'un bug dans les calculs par lots des opérations logiques ternaires sur plusieurs arguments (plus de 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) Ce correctif a été rétroporté à la version 19.16 par une demande spéciale D'Altinity. + +#### Clickhouse version v19. 16. 14. 65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- Correction de l'incompatibilité des sous-requêtes distribuées avec les anciennes versions de CH. Fixer [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- Lors de l'exécution de `CREATE` requête, plier les expressions constantes dans les arguments du moteur de stockage. Remplacez le nom de base de données vide par la base de données actuelle. Fixer [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Corrigez également la vérification de l'adresse locale dans `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Maintenant fond fusionne dans `*MergeTree` la famille des moteurs de table préserve l'ordre de volume de la Politique de stockage avec plus de précision. + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Empêcher la perte de données dans `Kafka` dans de rares cas, lorsque l'exception se produit après la lecture du suffixe mais avant la validation. Fixer [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Concerner: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +- Correction d'un bug menant à la résiliation du serveur lorsque vous essayez d'utiliser / drop `Kafka` tableau créé avec de mauvais paramètres. Fixer [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporer [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) +- Autoriser l'utilisation des `MaterializedView` avec les sous-requêtes ci-dessus `Kafka` table. + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) + +#### Nouveauté {#new-feature-1} + +- Ajouter `deduplicate_blocks_in_dependent_materialized_views` option pour contrôler le comportement des insertions idempotent dans des tables avec des vues matérialisées. Cette nouvelle fonctionnalité a été ajoutée à la version de bugfix par une demande spéciale D'Altinity. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### Clickhouse version v19. 16. 2. 2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### Modification Incompatible En Arrière {#backward-incompatible-change-1} + +- Ajouter une validation d'arité manquante pour count / counIf. + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- Supprimer l'héritage `asterisk_left_columns_only` paramètre (il est désactivé par défaut). + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem + Zuikov](https://github.com/4ertus2)) +- Les chaînes de Format pour le format de données de modèle sont maintenant spécifiées dans les fichiers. + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### Nouveauté {#new-feature-2} + +- Introduisez uniqCombined64() pour calculer la cardinalité supérieure à UINT\_MAX. + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- Soutenir les index de filtre Bloom sur les colonnes de tableau. + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbab](https://github.com/achimbab)) +- Ajouter une fonction `getMacro(name)` cela renvoie une chaîne avec la valeur de `` + à partir de la configuration du serveur. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Définissez deux options de configuration pour un dictionnaire basé sur une source HTTP: `credentials` et + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- Ajouter un nouveau ProfileEvent `Merge` cela compte le nombre de fusions d'arrière-plan lancées. + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail + Korotov](https://github.com/millb)) +- Ajouter la fonction fullHostName qui renvoie un nom de domaine complet. + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- Ajouter une fonction `arraySplit` et `arrayReverseSplit` qui divise un tableau par “cut off” + condition. Ils sont utiles dans la gestion de la séquence temporelle. + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- Ajoutez de nouvelles fonctions qui renvoient le tableau de tous les indices appariés dans la famille de fonctions multiMatch. + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila + Kutenin](https://github.com/danlark1)) +- Ajouter un nouveau moteur de base de données `Lazy` qui est optimisé pour stocker un grand nombre de petits journaux + table. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita + Vasiliev](https://github.com/nikvas0)) +- Ajouter des fonctions d'agrégation groupBitmapAnd, - ou, - Xor pour les colonnes bitmap. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang + Yu](https://github.com/yuzhichang)) +- Ajouter des combinateurs de fonctions d'agrégat-OrNull et-OrDefault, qui renvoient null + ou des valeurs par défaut lorsqu'il n'y a rien à agréger. + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- Introduire le format de données CustomSeparated qui prend en charge l'échappement personnalisé et + séparateur de règles. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- Soutien Redis comme source de dictionnaire externe. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton + Popov](https://github.com/CurtizJ)) + +#### Bug Fix {#bug-fix-2} + +- Correction d'un résultat de requête incorrect s'il a `WHERE IN (SELECT ...)` la section et `optimize_read_in_order` être + utiliser. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton + Popov](https://github.com/CurtizJ)) +- Plugin D'authentification MariaDB désactivé, qui dépend des fichiers en dehors du projet. + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Iouri + Baranov](https://github.com/yurriy)) +- Correction d'une exception `Cannot convert column ... because it is constant but values of constants are different in source and result` ce qui pourrait rarement arriver lorsque les fonctions `now()`, `today()`, + `yesterday()`, `randConstant()` sont utilisés. + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolaï + Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un problème d'utilisation de HTTP keep alive timeout au lieu de TCP keep alive timeout. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vassili + Nemkov](https://github.com/Enmk)) +- Correction d'un défaut de segmentation dans groupBitmapOr (problème [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang + Yu](https://github.com/yuzhichang)) +- Pour les vues matérialisées, le commit pour Kafka est appelé après l'écriture de toutes les données. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) +- Fixe de mal `duration_ms` valeur en `system.part_log` table. Il y a dix reprises. + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Une solution rapide pour résoudre le crash dans la table LIVE VIEW et réactiver tous les tests LIVE VIEW. + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Sérialiser correctement les valeurs NULL dans les index min / max des parties MergeTree. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Ne mettez pas de colonnes virtuelles à .métadonnées sql lorsque la table est créée en tant que `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) +- Correction d'un défaut de segmentation dans `ATTACH PART` requête. + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([alésapine](https://github.com/alesapin)) +- Correction d'un mauvais résultat pour certaines requêtes données par l'optimisation de empty IN subqueries et empty + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolaï + Kochetov](https://github.com/KochetovNicolai)) +- Correction D'une erreur AddressSanitizer dans la méthode LIVE VIEW getHeader (). + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### Amélioration {#improvement-1} + +- Ajouter un message en cas d'attente queue\_wait\_max\_ms. + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- Faites le réglage de `s3_min_upload_part_size` au niveau de la table. + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Vérifiez TTL dans StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- Squash blocs de gauche en fusion partielle join (optimisation). + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem + Zuikov](https://github.com/4ertus2)) +- N'autorisez pas les fonctions non déterministes dans les mutations des moteurs de table répliqués, car + peut introduire des incohérences entre les répliques. + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander + Kazakov](https://github.com/Akazz)) +- Désactivez le suivi de la mémoire lors de la conversion de trace de pile d'exception en chaîne. Il peut empêcher la perte + des messages d'erreur de type `Memory limit exceeded` sur le serveur, qui a causé la `Attempt to read after eof` exception sur le client. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Améliorations diverses du format. Résoudre + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouse ignore les valeurs du côté droit de L'opérateur IN qui ne sont pas convertibles vers la gauche + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Soutenir les inégalités manquantes pour ASOF JOIN. Il est possible de rejoindre une variante moins ou égale et stricte + plus grandes et moins de variantes pour la colonne ASOF dans la syntaxe ON. + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem + Zuikov](https://github.com/4ertus2)) +- Optimiser la fusion partielle jointure. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- N'utilisez pas plus de 98K de mémoire dans les fonctions uniqCombined. + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- Rincer les parties de la table de jonction de droite sur le disque dans PartialMergeJoin (s'il n'y en a pas assez + mémoire). Chargez les données en arrière en cas de besoin. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### Amélioration Des Performances {#performance-improvement-1} + +- Accélérez joinGet avec des arguments const en évitant la duplication des données. + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos + Oiseau](https://github.com/amosbird)) +- De retour plus tôt si la sous-requête est vide. + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- Optimiser l'analyse de l'expression SQL dans les valeurs. + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-1} + +- Désactivez certaines contribs pour la compilation croisée sur Mac OS. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) +- Ajouter un lien manquant avec PocoXML pour clickhouse\_common\_io. + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- Accepter plusieurs arguments de filtre de test dans clickhouse-test. + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Activer musl et jemalloc pour ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([Amos Oiseau](https://github.com/amosbird)) +- Ajouter `--client-option` paramètre `clickhouse-test` pour passer des paramètres supplémentaires au client. + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolaï + Kochetov](https://github.com/KochetovNicolai)) +- Préserver les configurations existantes lors de la mise à niveau du package rpm. + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([filimonov](https://github.com/filimonov)) +- Correction des erreurs détectées par PVS. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem + Zuikov](https://github.com/4ertus2)) +- Correction de la construction pour Darwin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([Ivan](https://github.com/abyss7)) +- compatibilité glibc 2.29. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos + Oiseau](https://github.com/amosbird)) +- Assurez-vous que dh\_clean ne touche pas les fichiers sources potentiels. + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos + Oiseau](https://github.com/amosbird)) +- Essayez d'éviter les conflits lors de la mise à jour à partir d'altinity rpm-le fichier de configuration est emballé séparément + dans clickhouse-serveur commun. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([filimonov](https://github.com/filimonov)) +- Optimisez certains fichiers d'en-tête pour des reconstructions plus rapides. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Ajouter des tests de performance pour Date et DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vassili + Nemkov](https://github.com/Enmk)) +- Correction de certains tests contenant des mutations non déterministes. + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander + Kazakov](https://github.com/Akazz)) +- Ajouter build avec MemorySanitizer à CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Évitez l'utilisation de valeurs non initialisées dans MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- Correction de certains problèmes dans les champs trouvés par MemorySanitizer. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander + Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([Amos Oiseau](https://github.com/amosbird)) +- Correction d'un comportement indéfini dans murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos + Oiseau](https://github.com/amosbird)) +- Correction d'un comportement indéfini dans StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- Correction du pliage d'expressions constantes pour les moteurs de base de données externes (MySQL, ODBC, JDBC). Dans les précédents + versions il ne fonctionnait pas pour plusieurs expressions constantes et ne fonctionnait pas du tout pour la Date, + DateTime et UUID. Cela corrige [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction D'une erreur de course de données ThreadSanitizer dans la vue en direct lors de l'accès à la variable no\_users\_thread. + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Débarrassez-vous des symboles malloc dans libcommon + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos + Oiseau](https://github.com/amosbird)) +- Ajoutez l'indicateur global ENABLE\_LIBRARIES pour désactiver toutes les bibliothèques. + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### Nettoyage de Code {#code-cleanup} + +- Généraliser le référentiel de configuration pour préparer DDL pour les dictionnaires. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([alésapine](https://github.com/alesapin)) +- Parser pour les dictionnaires DDL sans aucune sémantique. + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([alésapine](https://github.com/alesapin)) +- Divisez ParserCreateQuery en différents analyseurs plus petits. + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([alésapine](https://github.com/alesapin)) +- Petit refactoring et renommage près de dictionnaires externes. + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([alésapine](https://github.com/alesapin)) +- Refactorisez du code pour vous préparer au contrôle d'accès basé sur les rôles. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly + Baranov](https://github.com/vitlibar)) +- Quelques améliorations dans Databasecode ordinaire. + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita + Vasiliev](https://github.com/nikvas0)) +- N'utilisez pas d'itérateurs dans les méthodes find() et emplace () des tables de hachage. + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- Fix getMultipleValuesFromConfig dans le cas où le paramètre root n'est pas vide. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([Mikhail Korotov](https://github.com/millb)) +- Supprimer un copier-coller (TemporaryFile et TemporaryFileStream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem + Zuikov](https://github.com/4ertus2)) +- Amélioration de la lisibilité du code un peu (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- Attendez tous les travaux planifiés, qui utilisent des objets locaux, si `ThreadPool::schedule(...)` jeter + exception. Renommer `ThreadPool::schedule(...)` de `ThreadPool::scheduleOrThrowOnError(...)` et + correction des commentaires pour rendre évident qu'il peut jeter. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## Version ClickHouse 19.15 {#clickhouse-release-19-15} + +### Clickhouse version 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### Bug Fix {#bug-fix-3} + +- Ajout de la gestion de SQL\_TINYINT et SQL\_BIGINT, et correction de la gestion des types de sources de données SQL\_FLOAT dans ODBC Bridge. + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- Autorisé à avoir certaines parties sur le disque de destination ou le volume dans la PARTITION de déplacement. + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Valeurs NULL fixes dans les colonnes nullables via ODBC-bridge. + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- Insertion fixe dans un nœud non Local distribué avec des colonnes matérialisées. + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- Fonction fixe getMultipleValuesFromConfig. + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) +- Correction d'un problème d'utilisation de HTTP keep alive timeout au lieu de TCP keep alive timeout. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) +- Attendez que tous les travaux se terminent à l'exception (corrige les segfaults rares). + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- Ne poussez pas vers MVs lors de l'insertion dans la table Kafka. + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) +- Désactiver le suivi de la mémoire pour la pile d'exception. + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un mauvais code dans la transformation de la requête pour la base de données externe. + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Évitez l'utilisation de valeurs non initialisées dans MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- Ajout d'un exemple de configuration avec des macros pour les tests ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### Bug Fix {#bug-fix-4} + +- Correction de bad\_variant dans le dictionnaire haché. + ([alésapine](https://github.com/alesapin)) +- Correction d'un bug avec défaut de segmentation dans la requête de pièce jointe. + ([alésapine](https://github.com/alesapin)) +- Calcul du temps fixe en `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- Commit à Kafka explicitement après la finalisation de l'écriture. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) +- Sérialiser correctement les valeurs NULL dans les index min / max des parties MergeTree. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### Clickhouse version 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### Nouveauté {#new-feature-3} + +- Stockage à plusieurs niveaux: prise en charge de l'utilisation de plusieurs volumes de stockage pour les tables avec mergetree engine. Il est possible de stocker de nouvelles données sur SSD et de déplacer automatiquement les anciennes données sur le disque dur. ([exemple](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alésapine](https://github.com/alesapin)) +- Ajouter une fonction de table `input` pour lire les données entrantes dans `INSERT SELECT` requête. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonique1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) +- Ajouter un `sparse_hashed` mise en page du dictionnaire, qui est fonctionnellement équivalente à la `hashed` mise en page, mais est plus efficace en mémoire. Il utilise environ deux fois moins de mémoire au prix d'une récupération de valeur plus lente. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- Implémenter la capacité de définir la liste des utilisateurs pour l'accès aux dictionnaires. Seule la base de données connectée actuelle utilisant. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Ajouter `LIMIT` option pour `SHOW` requête. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Ajouter `bitmapSubsetLimit(bitmap, range_start, limit)` fonction, qui renvoie le sous-ensemble du plus petit `limit` valeurs dans l'ensemble qui n'est pas inférieure à `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) +- Ajouter `bitmapMin` et `bitmapMax` fonction. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) +- Ajouter une fonction `repeat` liées à la [numéro-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([Flynn](https://github.com/ucasFL)) + +#### Caractéristique Expérimentale {#experimental-feature-1} + +- Implémentez (en mémoire) une variante de jointure de fusion qui ne change pas le pipeline actuel. Le résultat est partiellement trié par clé de fusion. Définir `partial_merge_join = 1` pour utiliser cette fonctionnalité. La Jointure de Fusion est toujours en développement. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter `S3` fonction de moteur et de table. Il est encore en développement (pas encore de support d'authentification). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### Amélioration {#improvement-2} + +- Chaque message lu à partir de Kafka est inséré atomiquement. Cela résout presque tous les problèmes connus avec Kafka engine. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) +- Améliorations pour le basculement des requêtes distribuées. Raccourcir le temps de récupération, il est maintenant configurable et peut être vu dans `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) +- Supporte les valeurs numériques pour les énumérations directement dans `IN` section. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- Support (facultatif, désactivé par défaut) redirige sur le stockage D'URL. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- Ajouter un message d'information lorsque le client avec une ancienne version se connecte à un serveur. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- Supprimer la limite de temps de veille maximale pour l'envoi de données dans les tables distribuées [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- Ajouter la possibilité d'envoyer des événements de profil (compteurs) avec des valeurs cumulatives à graphite. Il peut être activé sous `` dans serveur `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- Ajouter automatiquement le type de fonte `T` de `LowCardinality(T)` lors de l'insertion de données dans la colonne de type `LowCardinality(T)` au format natif via HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ajout de la capacité à utiliser la fonction `hex` sans l'aide de `reinterpretAsString` pour `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-2} + +- Ajouter gdb-index au binaire clickhouse avec des informations de débogage. Il permettra d'accélérer le temps de démarrage de `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alésapine](https://github.com/alesapin)) +- Accélérez l'emballage deb avec dpkg-deb patché qui utilise `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alésapine](https://github.com/alesapin)) +- Définir `enable_fuzzing = 1` pour activer l'instrumentation libfuzzer de tout le code du projet. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- Ajouter Split build smoke test dans CI. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alésapine](https://github.com/alesapin)) +- Ajouter build avec MemorySanitizer à CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- Remplacer `libsparsehash` avec `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### Bug Fix {#bug-fix-5} + +- Correction de la dégradation des performances de l'analyse d'index sur les clés complexes sur les grandes tables. Cela corrige \# 6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur logique provoquant segfaults lors de la sélection de Kafka sujet vide. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) +- Correction d'une connexion MySQL trop tôt fermer `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Retour du support pour les très anciens noyaux Linux (correction [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corriger les éventuelles pertes de données dans `insert select` requête en cas de bloc vide dans le flux d'entrée. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correctif pour la fonction `АrrayEnumerateUniqRanked` avec des tableaux vides dans params [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Correction de requêtes complexes avec des jointures de tableau et des sous-requêtes globales. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) +- Fixer `Unknown identifier` erreur dans ORDER BY et GROUP BY avec plusieurs jointures [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixe `MSan` avertissement lors de l'exécution de la fonction avec `LowCardinality` argument. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-2} + +- Format de sérialisation modifié de bitmap \* états de fonction d'agrégation pour améliorer les performances. Les États sérialisés de bitmap \* des versions précédentes ne peuvent pas être lus. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) + +## Clickhouse version 19.14 {#clickhouse-release-19-14} + +### Clickhouse version 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### Bug Fix {#bug-fix-6} + +- Cette version contient également toutes les corrections de bugs de 19.11.12.69. +- Compatibilité fixe pour les requêtes distribuées entre 19.14 et les versions antérieures. Cela corrige [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### Bug Fix {#bug-fix-7} + +- Correctif pour la fonction `АrrayEnumerateUniqRanked` avec des tableaux vides dans params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Nom de sous-requête fixe dans les requêtes avec `ARRAY JOIN` et `GLOBAL IN subquery` avec alias. Utilisez l'alias de sous-requête pour le nom de table externe s'il est spécifié. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-3} + +- Fixer [le battement](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` en le réécrivant dans un script shell car il doit attendre que des mutations s'appliquent. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) +- Correction de L'échec UBSan et MemSan en fonction `groupUniqArray` avec l'argument de tableau emtpy. Il a été causé par le placement de vide `PaddedPODArray` dans la table de hachage zéro cellule parce que le constructeur pour la valeur de cellule Zéro n'a pas été appelé. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Oiseau](https://github.com/amosbird)) + +### Clickhouse version 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### Nouveauté {#new-feature-4} + +- `WITH FILL` le modificateur `ORDER BY`. (suite de la [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- `WITH TIES` le modificateur `LIMIT`. (suite de la [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) +- Analyser non cotées `NULL` littéral comme NULL (si paramètre `format_csv_unquoted_null_literal_as_null=1`). Initialiser les champs null avec des valeurs par défaut si le type de données de ce champ n'est pas nullable (si `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- Prise en charge des caractères génériques dans les chemins des fonctions de table `file` et `hdfs`. Si le chemin contient des caractères génériques, la table sera en lecture seule. Exemple d'utilisation: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` et `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- Nouveau `system.metric_log` le tableau qui stocke les valeurs de `system.events` et `system.metrics` avec l'intervalle de temps spécifié. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Permettre d'écrire des journaux de texte ClickHouse à `system.text_log` table. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Afficher les symboles privés dans les traces de pile (cela se fait via l'analyse des tables de symboles des fichiers ELF). Ajout d'informations sur le numéro de fichier et de ligne dans les traces de pile si les informations de débogage sont présentes. Accélérer la recherche de nom de symbole avec des symboles d'indexation présents dans le programme. Ajout de nouvelles fonctions SQL pour l'introspection: `demangle` et `addressToLine`. Renommé fonction `symbolizeAddress` de `addressToSymbol` pour des raisons de cohérence. Fonction `addressToSymbol` retournera le nom mutilé pour des raisons de performance et vous devez appliquer `demangle`. Ajout d'un réglage `allow_introspection_functions` qui est désactivée par défaut. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fonction de Table `values` (le nom est sensible à la casse). Il permet de lire à partir de `VALUES` la liste proposée dans [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Exemple: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- Ajout d'une capacité de modifier les paramètres de stockage. Syntaxe: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alésapine](https://github.com/alesapin)) +- Support pour enlever des pièces détachées. Syntaxe: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- Les contraintes de Table. Permet d'ajouter une contrainte à la définition de la table qui sera vérifiée lors de l'insertion. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Soutien en cascade des vues matérialisées. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Oiseau](https://github.com/amosbird)) +- Activez query profiler par défaut pour échantillonner chaque thread d'exécution de requête une fois par seconde. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Format d'entrée `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- Ajout de deux nouvelles fonctions: `sigmoid` et `tanh` (qui sont utiles pour les applications d'apprentissage automatique). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fonction `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` pour vérifier si un jeton est dans la botte de foin. Le jeton est une sous-chaîne de longueur maximale entre deux caractères ASCII non alphanumériques (ou limites de haystack). Le jeton doit être une chaîne constante. Pris en charge par tokenbf\_v1 spécialisation de l'index. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) +- Nouvelle fonction `neighbor(value, offset[, default_value])`. Permet d'atteindre la valeur prev / next dans la colonne d'un bloc de données. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Laquelle Le Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- Créé une fonction `currentUser()`, retour connexion de l'utilisateur autorisé. Ajout d'alias `user()` pour la compatibilité avec MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Laquelle Le Krash](https://github.com/alex-krash)) +- Nouvelles fonctions d'agrégation `quantilesExactInclusive` et `quantilesExactExclusive` qui étaient proposés dans [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- Fonction `bitmapRange(bitmap, range_begin, range_end)` qui renvoie un nouvel ensemble avec une plage spécifiée (ne pas inclure `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fonction `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` ce qui crée un tableau de chaînes de précision longues de geohash-boîtes couvrant la zone fournie. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) +- Implémenter la prise en charge de la requête INSERT avec `Kafka` table. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) +- Ajout du support pour `_partition` et `_timestamp` colonnes virtuelles au moteur Kafka. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) +- Possibilité de supprimer des données sensibles de `query_log`, journaux de serveur, liste de processus avec des règles basées sur regexp. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) + +#### Caractéristique Expérimentale {#experimental-feature-2} + +- Format de données d'entrée et de sortie `Template`. Il permet de spécifier une chaîne de format personnalisée pour l'entrée et la sortie. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- La mise en œuvre de `LIVE VIEW` tableaux qui ont été initialement proposés dans [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898) préparés dans [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), puis mis à jour dans [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). Voir [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) pour une description détaillée. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note que `LIVE VIEW` fonction peut être supprimée dans les prochaines versions. + +#### Bug Fix {#bug-fix-8} + +- Cette version contient également toutes les corrections de bugs de 19.13 et 19.11. +- Correction d'un défaut de segmentation lorsque la table a des indices de saut et que la fusion verticale se produit. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alésapine](https://github.com/alesapin)) +- Correction de TTL par colonne avec des valeurs par défaut de colonne non triviales. Auparavant en cas de force TTL fusionner avec `OPTIMIZE ... FINAL` requête, les valeurs expirées ont été remplacées par des valeurs par défaut de type au lieu des valeurs par défaut de colonne spécifiées par l'utilisateur. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) +- Correction du problème de duplication des messages Kafka lors du redémarrage normal du serveur. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) +- Boucle infinie fixe lors de la lecture des messages Kafka. Ne pas mettre en pause/reprendre le consommateur sur l'abonnement du tout - sinon il peut être mis en pause indéfiniment dans certains scénarios. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) +- Fixer `Key expression contains comparison between inconvertible types` exception dans `bitmapContains` fonction. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- Correction de segfault avec activé `optimize_skip_unused_shards` et clé de sharding manquante. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) +- Correction du mauvais code dans les mutations qui peuvent conduire à la corruption de la mémoire. Correction de segfault avec lecture de l'adresse `0x14c0` cela peut se produire en raison de simultané `DROP TABLE` et `SELECT` de `system.parts` ou `system.parts_columns`. Condition de course fixe dans la préparation des requêtes de mutation. Blocage fixe causé par `OPTIMIZE` des tables répliquées et des opérations de modification simultanées comme ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Suppression de la journalisation supplémentaire dans L'interface MySQL [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Renvoie la possibilité d'analyser les paramètres booléens depuis ‘true’ et ‘false’ dans le fichier de configuration. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alésapine](https://github.com/alesapin)) +- Correction d'un crash dans l' `quantile` et `median` la fonction sur `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction possible résultat incomplet retourné par `SELECT` requête avec `WHERE` condition sur la clé primaire contient la conversion en type Float. Il a été causé par une vérification incorrecte de la monotonie dans `toFloat` fonction. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Vérifier `max_expanded_ast_elements` réglage des mutations. Mutations claires après `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction des résultats de jointure pour les colonnes clés lorsqu'elles sont utilisées avec `join_use_nulls`. Attachez des valeurs NULL au lieu des valeurs par défaut des colonnes. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction des indices de saut avec Fusion verticale et modification. Correctif pour `Bad size of marks file` exception. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alésapine](https://github.com/alesapin)) +- Correction plantage rare dans `ALTER MODIFY COLUMN` et fusion verticale lorsque l'une des parties fusionnées/modifiées est vide (0 lignes) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug dans la conversion de `LowCardinality` types de `AggregateFunctionFactory`. Cela corrige [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un comportement incorrect et de segfaults possibles dans `topK` et `topKWeighted` agrégé fonctions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) +- Code dangereux fixe autour `getIdentifier` fonction. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug dans le protocole de fil MySQL (est utilisé lors de la connexion à clickhouse forme client MySQL). Causé par un débordement de tampon de tas `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) +- Fuite de mémoire fixe dans `bitmapSubsetInRange` fonction. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) +- Correction d'un bug rare lorsque la mutation est exécutée après un changement de granularité. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alésapine](https://github.com/alesapin)) +- Autoriser le message protobuf avec tous les champs par défaut. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) +- Résoudre un bug avec `nullIf` fonction lorsque nous envoyer un `NULL` l'argument sur le deuxième argument. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Correction d'un bug rare avec une mauvaise allocation/désallocation de la mémoire dans des dictionnaires de cache de clés Complexes avec des champs de chaîne qui conduit à une consommation de mémoire infinie (ressemble à une fuite de mémoire). Bug se reproduit lorsque la taille de la chaîne était une puissance de deux à partir de huit (8, 16, 32, etc.). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alésapine](https://github.com/alesapin)) +- Correction de L'encodage Gorilla sur les petites séquences qui a provoqué une exception `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) +- Permet d'utiliser des types Non nullables dans les jointures avec `join_use_nulls` permettre. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- Désactiver `Poco::AbstractConfiguration` substitutions dans la requête dans `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Éviter l'impasse dans `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Utiliser `arrayReduce` pour des arguments constants peuvent conduire à segfault. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction des parties incohérentes qui peuvent apparaître si la réplique a été restaurée après `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Correction du blocage dans `JSONExtractRaw` fonction. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug avec des indices de saut incorrects sérialisation et agrégation avec granularité adaptative. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alésapine](https://github.com/alesapin)) +- Fixer `WITH ROLLUP` et `WITH CUBE` les modificateurs de `GROUP BY` avec agrégation à deux niveaux. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Correction d'un bug avec l'écriture de marques d'indices secondaires avec une granularité adaptative. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alésapine](https://github.com/alesapin)) +- Correction de l'ordre d'initialisation lors du démarrage du serveur. Depuis `StorageMergeTree::background_task_handle` est initialisée dans `startup()` le `MergeTreeBlockOutputStream::write()` peut tenter de l'utiliser avant l'initialisation. Vérifiez simplement s'il est initialisé. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) +- Effacement du tampon de données de l'opération de lecture précédente terminée par une erreur. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) +- Correction d'un bug avec l'activation de la granularité adaptative lors de la création d'une nouvelle réplique pour la table répliquée\*MergeTree. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alésapine](https://github.com/alesapin)) +- Correction d'un crash possible lors du démarrage du serveur en cas d'exception `libunwind` au cours de l'exception à l'accès à uninitialized `ThreadStatus` structure. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Correction d'un crash dans l' `yandexConsistentHash` fonction. Trouvé par fuzz test. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la possibilité de suspendre les requêtes lorsque le serveur est surchargé et que le pool de threads global devient presque complet. Cela a plus de chances de se produire sur les clusters avec un grand nombre de fragments (des centaines), car les requêtes distribuées allouent un thread par connexion à chaque fragment. Par exemple, ce problème peut se reproduire si un cluster de 330 fragments traite 30 requêtes distribuées simultanées. Ce problème affecte toutes les versions à partir de 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe la logique de `arrayEnumerateUniqRanked` fonction. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de segfault lors du décodage de la table des symboles. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Oiseau](https://github.com/amosbird)) +- Correction d'une exception non pertinente dans la distribution de `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Suppression de guillemets supplémentaires de description dans `system.settings` table. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Éviter l'impasse possible dans `TRUNCATE` de table Répliquée. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la lecture dans l'ordre de la clé de tri. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) +- Fixer `ALTER TABLE ... UPDATE` requête pour les tables avec `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug ouvert par [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (depuis 19.4.0). Reproduit dans les requêtes aux tables distribuées sur les tables MergeTree lorsque nous n'interrogeons aucune colonne (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alésapine](https://github.com/alesapin)) +- Dépassement fixe dans la division entière du type signé au type non signé. Le comportement était exactement comme dans le langage C ou c++ (règles de promotion entières) qui peut être surprenant. Veuillez noter que le débordement est toujours possible lors de la division d'un grand nombre signé en un grand nombre non signé ou vice-versa (mais ce cas est moins habituel). Le problème existait dans toutes les versions du serveur. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Limiter le temps de sommeil maximal pour l'étranglement lorsque `max_execution_speed` ou `max_execution_speed_bytes` est définie. Correction de fausses erreurs comme `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de problèmes d'utilisation `MATERIALIZED` colonnes et alias dans `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Oiseau](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer `FormatFactory` comportement pour les flux d'entrée qui ne sont pas implémentés en tant que Processeur. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'une faute. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- Typo dans le message d'erreur (is - \> are). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- Correction d'une erreur lors de l'analyse de la liste des colonnes de la chaîne si le type contenait une virgule (Ce problème était pertinent pour `File`, `URL`, `HDFS` stockage) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### Correction De Sécurité {#security-fix} + +- Cette version contient également tous les correctifs de sécurité de bugs de 19.13 et 19.11. +- Correction de la possibilité d'une requête fabriquée pour provoquer un crash du serveur en raison d'un débordement de pile dans L'analyseur SQL. Correction de la possibilité de débordement de pile dans les tables de fusion et distribuées, les vues matérialisées et les conditions de sécurité au niveau des lignes impliquant des sous-requêtes. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Amélioration {#improvement-3} + +- Mise en œuvre correcte de la logique ternaire pour `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) +- Maintenant les valeurs et les lignes avec TTL expiré seront supprimées après `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` requête. Ajouté requêtes `SYSTEM STOP/START TTL MERGES` pour interdire / autoriser les fusions d'affectation avec TTL et filtrer les valeurs expirées dans toutes les fusions. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) +- Possibilité de changer l'emplacement du fichier d'historique ClickHouse pour le client à l'aide `CLICKHOUSE_HISTORY_FILE` env. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) +- Supprimer `dry_run` drapeau de `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Soutien `ASOF JOIN` avec `ON` section. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- Meilleure prise en charge des index de saut pour les mutations et la réplication. Soutien pour `MATERIALIZE/CLEAR INDEX ... IN PARTITION` requête. `UPDATE x = x` recalcule tous les indices qui utilisent la colonne `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) +- Permettre à `ATTACH` affichages en direct (par exemple, au démarrage du serveur), indépendamment de `allow_experimental_live_view` paramètre. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Pour les traces de pile recueillies par le profileur de requête, n'incluez pas les trames de pile générées par le profileur de requête lui-même. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Maintenant fonctions de table `values`, `file`, `url`, `hdfs` avoir un support pour les colonnes ALIAS. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Lancer une exception si `config.d` le fichier n'a pas l'élément racine correspondant comme fichier de configuration. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- Imprimer des informations supplémentaires dans le message d'exception pour `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- Lors de la détermination des éclats d'un `Distributed` table à couvrir par une requête de lecture (pour `optimize_skip_unused_shards` = 1) ClickHouse vérifie maintenant les conditions des deux `prewhere` et `where` clauses de l'instruction select. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) +- Permettre `SIMDJSON` pour les machines sans AVX2 mais avec SSE 4.2 et pclmul jeu d'instructions. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- ClickHouse peut fonctionner sur les systèmes de fichiers sans `O_DIRECT` soutien (tels que ZFS et BtrFS) sans réglage supplémentaire. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Soutien pousser vers le bas prédicat pour la sous-requête finale. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Mieux `JOIN ON` extraction des clés [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Mise à jour `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Optimiser la sélection de la plus petite colonne pour `SELECT count()` requête. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Oiseau](https://github.com/amosbird)) +- Ajouter `strict` paramètre `windowFunnel()`. Lorsque l' `strict` est définie, le `windowFunnel()` applique des conditions uniquement pour les valeurs uniques. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) +- Interface plus sûre de `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- Options taille de ligne lors de l'exécution avec `--help` l'option correspond maintenant à la taille du terminal. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- Désactiver “read in order” optimisation pour l'agrégation, sans touches. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) +- Code D'état HTTP pour `INCORRECT_DATA` et `TYPE_MISMATCH` les codes d'erreur ont été modifiés par défaut `500 Internal Server Error` de `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) +- Déplacer Rejoindre objet de `ExpressionAction` dans `AnalyzedJoin`. `ExpressionAnalyzer` et `ExpressionAction` ne sais pas à propos de `Join` classe de plus. Sa logique est cachée par `AnalyzedJoin` iface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un blocage possible des requêtes distribuées lorsque l'un des fragments est localhost mais que la requête est envoyée via une connexion réseau. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Sémantique modifiée de plusieurs tables `RENAME` pour éviter les blocages possibles. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Serveur de compatibilité MySQL réécrit pour empêcher le chargement de la charge utile de paquet complet en mémoire. Diminution de la consommation de mémoire pour chaque connexion à environ `2 * DBMS_DEFAULT_BUFFER_SIZE` (tampons de lecture/écriture). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) +- Déplacez l'alias AST interprétant la logique hors de l'analyseur qui n'a rien à savoir sur la sémantique des requêtes. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- Analyse légèrement plus sûre de `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-copier`: Permet d'utiliser les `where_condition` de config avec `partition_key` alias dans la requête pour vérifier l'existence de la partition (auparavant, il était utilisé uniquement dans la lecture des requêtes de données). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- Ajout d'un argument de message Facultatif dans `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- L'exception du serveur obtenue lors de l'envoi des données d'insertion est également en cours de traitement dans le client. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- Ajout d'une métrique `DistributedFilesToInsert` cela montre le nombre total de fichiers dans le système de fichiers qui sont sélectionnés pour envoyer aux serveurs distants par des tables distribuées. Le nombre est additionné à travers tous les fragments. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Déplacer la plupart des jointures préparer la logique de `ExpressionAction/ExpressionAnalyzer` de `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- Fix TSan [avertissement](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) +- De meilleurs messages d'information sur le manque de capacités Linux. Journalisation des erreurs fatales avec “fatal” niveau, cela le rendra plus facile à trouver dans `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Lorsque activer le dumping des données temporaires sur le disque pour limiter l'utilisation de la mémoire pendant `GROUP BY`, `ORDER BY` il n'a pas vérifier l'espace disque libre. Le correctif ajoute un nouveau paramètre `min_free_disk_space`, lorsque l'espace disque libre est plus petit que le seuil, la requête s'arrêtera et lancera `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Supprimé rwlock récursif par thread. Cela n'a aucun sens, car les threads sont réutilisés entre les requêtes. `SELECT` la requête peut acquérir un verrou dans un thread, tenir un verrou d'un autre thread et quitter le premier thread. Dans le même temps, le premier fil peut être réutilisé par `DROP` requête. Cela mènera à la faux “Attempt to acquire exclusive lock recursively” message. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Split `ExpressionAnalyzer.appendJoin()`. Préparer une place dans `ExpressionAnalyzer` pour `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter `mysql_native_password` plugin d'authentification au serveur de compatibilité MySQL. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) +- Un moins grand nombre de `clock_gettime` appels; correction de la compatibilité ABI entre debug / release in `Allocator` (question insignifiante). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Déplacer `collectUsedColumns` de `ExpressionAnalyzer` de `SyntaxAnalyzer`. `SyntaxAnalyzer` faire `required_source_columns` lui-même maintenant. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter un paramètre `joined_subquery_requires_alias` pour exiger des alias pour les sous-sélections et les `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- Extrait `GetAggregatesVisitor` classe de `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`: modifier le type de données de `type` colonne de `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- La liaison statique de `sha256_password` greffon d'authentification. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) +- Évitez la dépendance supplémentaire pour le paramètre `compile` travailler. Dans les versions précédentes, l'utilisateur peut obtenir une erreur comme `cannot open crti.o`, `unable to find library -lc` etc. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Plus de validation de l'entrée qui peut provenir d'une réplique malveillante. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Maintenant `clickhouse-obfuscator` le fichier est disponible dans `clickhouse-client` paquet. Dans les versions précédentes, il était disponible en tant que `clickhouse obfuscator` (avec des espaces). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- Blocage fixe lorsque nous avons au moins deux requêtes qui lisent au moins deux tables dans un ordre différent et une autre requête qui effectue une opération DDL sur l'une des tables. Correction d'une autre impasse très rare. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter `os_thread_ids` colonne de `system.processes` et `system.query_log` pour une meilleure mise possibilités. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Une solution de contournement pour les bogues D'extension PHP mysqlnd qui se produisent lorsque `sha256_password` est utilisé comme un plugin d'authentification par défaut (décrit dans [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) +- Supprimez la place inutile avec les colonnes de nullité modifiées. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- Définir la valeur par défaut de `queue_max_wait_ms` à zéro, parce que la valeur actuelle (cinq secondes) n'a aucun sens. Il y a de rares circonstances où ce paramètre a une utilité. Ajout des paramètres de `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` et `connection_pool_max_wait_ms` pour la désambiguïsation. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Extrait `SelectQueryExpressionAnalyzer` de `ExpressionAnalyzer`. Conservez le dernier pour les requêtes non-select. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- Suppression de la duplication des formats d'entrée et de sortie. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Autoriser l'utilisateur à remplacer `poll_interval` et `idle_connection_timeout` paramètres de connexion. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `MergeTree` a maintenant une option supplémentaire `ttl_only_drop_parts` (désactivé par défaut) pour éviter l'élagage partiel des pièces, afin qu'elles tombent complètement lorsque toutes les lignes d'une pièce sont expirées. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) +- Type vérifie les fonctions set index. Throw exception si la fonction a un mauvais type. Cela corrige le test fuzz avec UBSan. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Amélioration Des Performances {#performance-improvement-2} + +- Optimiser les requêtes avec `ORDER BY expressions` clause, où `expressions` ont coïncidé préfixe avec clé de tri dans `MergeTree` table. Cette optimisation est contrôlée par `optimize_read_in_order` paramètre. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) +- Permettre d'utiliser plusieurs threads pendant le chargement et le retrait des pièces. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Variante de lot implémentée de la mise à jour des états de fonction d'agrégat. Il peut conduire à des avantages de performance. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Utiliser `FastOps` bibliothèque de fonctions `exp`, `log`, `sigmoid`, `tanh`. FastOps est une bibliothèque mathématique vectorielle rapide de Michael Parakhin (Yandex CTO). Amélioration des performances de `exp` et `log` fonctions plus de 6 fois. Fonction `exp` et `log` de `Float32` l'argument retournera `Float32` (dans les versions précédentes, ils reviennent toujours `Float64`). Maintenant `exp(nan)` peut-retour `inf`. Le résultat de `exp` et `log` les fonctions peuvent ne pas être le nombre représentable de la machine le plus proche de la vraie réponse. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexeï-milovidov](https://github.com/alexey-milovidov)) Utilisation de la variante Danila Kutenin pour faire fonctionner les fasttops [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Désactiver l'optimisation de clé consécutive pour `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) +- Amélioration des performances de `simdjson` bibliothèque en se débarrassant de l'allocation dynamique dans `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) +- Pages de pré-défaut lors de l'allocation de mémoire avec `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) +- Correction d'un bug de performance dans `Decimal` comparaison. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-4} + +- Supprimer le compilateur (instanciation du modèle d'exécution) car nous avons gagné sur ses performances. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout d'un test de performance pour montrer la dégradation des performances dans gcc-9 de manière plus isolée. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout d'un tableau de fonction `numbers_mt`, qui est la version multithread `numbers`. Mise à jour des tests de performance avec des fonctions de hachage. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Mode de comparaison dans `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- Meilleur effort pour imprimer des traces de pile. Également ajouté `SIGPROF` comme un signal de débogage pour imprimer la trace de la pile d'un thread en cours d'exécution. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Chaque fonction dans son propre fichier, partie 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Supprimer doublé const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) +- Changements de formatage pour `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) +- Meilleure sous-requête pour la création de jointures dans `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- Supprimer une condition redondante (trouvée par PVS Studio). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) +- Séparer l'interface de table de hachage pour `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) +- Refactoring des paramètres. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alésapine](https://github.com/alesapin)) +- Ajouter des commentaires pour `set` fonctions d'index. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) +- Augmenter le score OOM dans la version de débogage sur Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) +- HDFS HA fonctionne maintenant dans la construction de débogage. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) +- Ajout d'un test pour `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un test pour plusieurs vues matérialisées pour la table Kafka. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) +- Faire mieux construire régime. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) +- Fixe `test_external_dictionaries` intégration dans le cas où il a été exécuté sous un utilisateur non root. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Le bogue se reproduit lorsque la taille totale des paquets écrits dépasse `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) +- Ajout d'un test pour `RENAME` tableau condition de course [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Évitez la course de données sur les paramètres dans `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un test d'intégration pour gérer les erreurs par un dictionnaire de cache. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) +- Désactiver l'analyse des fichiers objet ELF sur Mac OS, car cela n'a aucun sens. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Essayez de rendre le générateur de changelog meilleur. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Additionneur `-Wshadow` passer à la GCC. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Suppression du code obsolète pour `mimalloc` soutien. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `zlib-ng` détermine les capacités x86 et enregistre ces informations dans les variables globales. Ceci est fait dans l'appel defalteInit, qui peut être fait par différents threads simultanément. Pour éviter les Écritures multithread, faites-le au démarrage de la bibliothèque. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) +- Test de régression pour un bug qui dans join qui a été corrigé dans [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- Rapport MSAN fixe. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du test TTL battement. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) +- Correction de fausse course de données dans `MergeTreeDataPart::is_frozen` champ. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Délais d'attente fixes dans le test fuzz. Dans la version précédente, il a réussi à trouver false hangup dans la requête `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouté debug contrôles `static_cast` des colonnes. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Prise en charge D'Oracle Linux dans les paquets RPM officiels. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Changé JSON perftests de `once` de `loop` type. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` définit `main()` donc, il ne devrait pas être inclus dans `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) +- Test de crash dans l' `FULL|RIGHT JOIN` avec nulls dans les clés de la table de droite. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajout d'un test pour la limite d'extension des alias, juste au cas où. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Commutation de `boost::filesystem` de `std::filesystem` échéant. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de paquets RPM au site web. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un test pour fixe `Unknown identifier` exception dans `IN` section. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- Simplifier `shared_ptr_helper` parce que les gens confrontés à des difficultés à le comprendre. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de tests de performance pour le codec Gorilla et DoubleDelta fixe. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) +- Diviser le test d'intégration `test_dictionaries` dans 4 tests distincts. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction de L'avertissement PVS-Studio dans `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Permettre d'utiliser `library` dictionnaire source avec ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout d'une option pour générer changelog à partir d'une liste de PRs. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Verrouiller le `TinyLog` de stockage lors de la lecture. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) +- Vérifiez les liens symboliques brisés dans CI. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Augmentez le délai pour “stack overflow” test car cela peut prendre beaucoup de temps dans la construction de débogage. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout d'une vérification pour les doubles espaces. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer `new/delete` suivi de la mémoire lors de la construction avec des désinfectants. Le suivi n'est pas clair. Il empêche uniquement les exceptions de limite de mémoire dans les tests. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- Activez la vérification des symboles non définis lors de la liaison. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) +- Éviter la reconstruction `hyperscan` quotidien. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Rapport UBSan fixe dans `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ne permettez pas d'utiliser query profiler avec des désinfectants car il n'est pas compatible. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un test pour recharger un dictionnaire après échec par minuterie. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction de l'incohérence dans `PipelineExecutor::prepareProcessor` type d'argument. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ajout d'un test pour les mauvais URI. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouté plus de contrôles `CAST` fonction. Cela devrait obtenir plus d'informations sur la faille de segmentation dans le test flou. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Ajouter `gcc-9` soutien à `docker/builder` conteneur qui construit l'image localement. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Test de la clé primaire avec `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- Correction des tests affectés par l'impression de traces de pile lente. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un cas de test pour crash in `groupUniqArray` fixe dans [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) +- Tests de mutations d'indices fixes. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) +- Dans le test de performance, ne lisez pas le journal des requêtes pour les requêtes que nous n'avons pas exécutées. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) +- La vue matérialisée peut maintenant être créée avec n'importe quel type de cardinalité faible quel que soit le paramètre concernant les types de cardinalité faible suspects. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- Mise à jour des tests pour `send_logs_level` paramètre. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction de la construction sous gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- Correction de la construction avec libc++interne. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) +- Correction de la construction partagée avec `rdkafka` bibliothèque [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) +- Correctifs pour Mac OS build (incomplet). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexeï-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- Fixer “splitted” construire. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Autres correctifs: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Oiseau](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-3} + +- Suppression de la fonction de table rarement utilisée `catBoostPool` et de stockage `CatBoostPool`. Si vous avez utilisé cette fonction de table, veuillez écrire un courriel à `clickhouse-feedback@yandex-team.com`. Notez que L'intégration CatBoost reste et sera prise en charge. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Désactiver `ANY RIGHT JOIN` et `ANY FULL JOIN` par défaut. Définir `any_join_distinct_right_table_keys` réglage pour les activer. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## Clickhouse version 19.13 {#clickhouse-release-19-13} + +### Clickhouse version 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### Bug Fix {#bug-fix-9} + +- Cette version contient également toutes les corrections de bugs de 19.11.12.69. + +### Clickhouse version 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### Bug Fix {#bug-fix-10} + +- Cette version contient également toutes les corrections de bugs de 19.14.6.12. +- Correction d'un état incohérent possible de la table lors de l'exécution `DROP` requête pour la table répliquée alors que zookeeper n'est pas accessible. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Correction de la course de données dans StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug introduit dans query profiler qui conduit à recv sans fin de socket. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alésapine](https://github.com/alesapin)) +- Correction de l'utilisation excessive du processeur lors de l'exécution `JSONExtractRaw` la fonction sur une valeur booléenne. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) +- Corrige la régression tout en poussant vers la vue matérialisée. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) +- Fonction de Table `url` la vulnérabilité avait-elle permis à l'attaquant d'injecter des en-têtes HTTP arbitraires dans la requête. Ce problème a été trouvé par [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fix inutile `AST` vérifier dans L'index de jeu. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) +- Fixe l'analyse de `AggregateFunction` valeurs ancrées dans la requête. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) +- Fixe mauvais comportement de `trim` les fonctions de la famille. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### Bug Fix {#bug-fix-11} + +- Cette version contient également tous les correctifs de sécurité de bugs de 19.11.9.52 et 19.11.10.54. +- Les données fixes de course `system.parts` table et `ALTER` requête. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'en-tête non apparié dans les flux se sont produits en cas de lecture à partir d'une table distribuée vide avec sample et prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un crash lors de l'utilisation de `IN` clause avec une sous-requête avec un tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Corrigé cas avec les mêmes noms de colonnes dans `GLOBAL JOIN ON` section. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un crash lors de la coulée de types à `Decimal` qui ne la supportent pas. Jetez exception à la place. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un crash dans `extractAll()` fonction. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- Transformation de requête pour `MySQL`, `ODBC`, `JDBC` fonctions de table fonctionne maintenant correctement pour `SELECT WHERE` requêtes avec plusieurs `AND` expression. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- Ajout de vérifications de déclaration précédentes pour L'intégration de MySQL 8. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) + +#### Correction De Sécurité {#security-fix-1} + +- Correction de deux vulnérabilités dans les codecs en phase de décompression (l'utilisateur malveillant peut fabriquer des données compressées qui conduiront à un débordement de tampon en décompression). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### Clickhouse version 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### Bug Fix {#bug-fix-12} + +- Fixer `ALTER TABLE ... UPDATE` requête pour les tables avec `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alésapine](https://github.com/alesapin)) +- Correction de NPE lors de l'utilisation de la clause IN avec une sous-requête avec un tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Correction d'un problème que si un réplica périmé devient vivant, il peut encore avoir des parties de données qui ont été supprimés par la partition DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Correction d'un problème avec l'analyse CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Course de données fixe dans le système.table de pièces et ALTER query. Cela corrige [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du mauvais code dans les mutations qui peuvent conduire à la corruption de la mémoire. Correction de segfault avec lecture de l'adresse `0x14c0` cela peut se produire en raison de simultané `DROP TABLE` et `SELECT` de `system.parts` ou `system.parts_columns`. Condition de course fixe dans la préparation des requêtes de mutation. Blocage fixe causé par `OPTIMIZE` des tables répliquées et des opérations de modification simultanées comme ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction possible perte de données après `ALTER DELETE` requête sur la table avec l'index de saut. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Correction De Sécurité {#security-fix-2} + +- Si L'attaquant a un accès en écriture à ZooKeeper et est capable d'exécuter un serveur personnalisé disponible à partir du réseau où clickhouse s'exécute, il peut créer un serveur malveillant sur mesure qui agira comme réplique de ClickHouse et l'enregistrer dans ZooKeeper. Lorsqu'une autre réplique récupère une partie de données à partir d'une réplique malveillante, elle peut forcer clickhouse-server à écrire sur un chemin arbitraire sur le système de fichiers. Trouvé par Eldar Zaitov, équipe de sécurité de L'information chez Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### Nouveauté {#new-feature-5} + +- Échantillonnage du profileur au niveau de la requête. [Exemple](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexeï-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- Permet de spécifier une liste de colonnes avec `COLUMNS('regexp')` expression qui fonctionne comme une variante plus sophistiquée de `*` astérisque. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` est maintenant possible [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- Adam optimizer pour la descente de gradient stochastique est utilisé par défaut dans `stochasticLinearRegression()` et `stochasticLogisticRegression()` fonctions d'agrégation, car il montre une bonne qualité sans presque aucun réglage. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) +- `RENAME` les requêtes fonctionnent maintenant avec tous les stockages. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) +- Maintenant client de recevoir les journaux du serveur avec n'importe quel niveau de `send_logs_level` quel que soit le niveau de journal spécifié dans les paramètres du serveur. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-4} + +- Paramètre `input_format_defaults_for_omitted_fields` est activé par défaut. Les insertions dans les tables distribuées ont besoin que ce paramètre soit le même sur le cluster (vous devez le définir avant de lancer la mise à jour). Il permet de calculer des expressions par défaut Complexes pour les champs omis dans `JSONEachRow` et `CSV*` format. Il devrait être le comportement attendu, mais peut conduire à négligeable différence de performances. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) + +#### Caractéristiques expérimentales {#experimental-features} + +- Nouveau pipeline de traitement des requêtes. Utiliser `experimental_use_processors=1` une option pour l'activer. Utilisez pour votre propre problème. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Bug Fix {#bug-fix-13} + +- L'intégration de Kafka a été corrigée dans cette version. +- Fixe `DoubleDelta` l'encodage de `Int64` pour les grands `DoubleDelta` les valeurs, l'amélioration de la `DoubleDelta` encodage de données aléatoires pour `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) +- Surestimation fixe de `max_rows_to_read` si le paramètre `merge_tree_uniform_read_distribution` est réglé sur 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Amélioration {#improvement-4} + +- Lève une exception si `config.d` le fichier n'a pas l'élément racine correspondant comme fichier de configuration [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### Amélioration Des Performances {#performance-improvement-3} + +- Optimiser `count()`. Maintenant, il utilise la plus petite colonne (si possible). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Oiseau](https://github.com/amosbird)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-5} + +- Signaler l'utilisation de la mémoire dans les tests de performance. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) +- Correction de la construction avec externe `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) +- Correction de la construction partagée avec `rdkafka` bibliothèque [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) + +## Clickhouse version 19.11 {#clickhouse-release-19-11} + +### Version de ClickHouse 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### Bug Fix {#bug-fix-14} + +- Correction d'un crash rare dans `ALTER MODIFY COLUMN` et fusion verticale lorsque l'une des parties fusionnées/modifiées est vide (0 lignes). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alésapine](https://github.com/alesapin)) +- Mise à jour manuelle de `SIMDJSON`. Cela corrige l'inondation possible des fichiers stderr avec des messages de diagnostic JSON faux. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) +- Correction d'un bug avec `mrk` extension de fichier pour mutations ([alésapine](https://github.com/alesapin)) + +### Version de ClickHouse 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### Bug Fix {#bug-fix-15} + +- Correction de la dégradation des performances de l'analyse d'index sur les clés complexes sur les grandes tables. Cela corrige [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Évitez SIGSEGV rare lors de l'envoi de données dans des tables avec moteur distribué (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- Fixer `Unknown identifier` avec plusieurs jointures. Cela corrige [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### Clickhouse version 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- Correction d'une erreur logique provoquant segfaults lors de la sélection de Kafka sujet vide. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) +- Correctif pour la fonction `АrrayEnumerateUniqRanked` avec des tableaux vides dans params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### Clickhouse version 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### Bug Fix {#bug-fix-16} + +- Stockez manuellement les décalages des messages Kafka pour pouvoir les valider tous à la fois pour toutes les partitions. Corrige la duplication potentielle dans “one consumer - many partitions” scénario. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) + +### Clickhouse version 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- Améliorer la gestion des erreurs dans les dictionnaires de cache. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction d'un bug dans la fonction `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- Fixer `JSONExtract` fonction lors de l'extraction d'une `Tuple` de JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction possible perte de données après `ALTER DELETE` requête sur la table avec l'index de saut. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) +- Test de performance fixe. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Parquet: Correction de la lecture des colonnes booléennes. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe mauvais comportement de `nullIf` fonction pour les arguments constants. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du problème de duplication des messages Kafka lors du redémarrage normal du serveur. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) +- Correction d'un problème quand long `ALTER UPDATE` ou `ALTER DELETE` peut empêcher régulière fusionne à exécuter. Empêcher les mutations de s'exécuter s'il n'y a pas assez de threads libres disponibles. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- Correction d'une erreur avec le traitement “timezone” dans le fichier de configuration du serveur. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction des tests kafka. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) + +#### Correction De Sécurité {#security-fix-3} + +- Si L'attaquant a un accès en écriture à ZooKeeper et est capable d'exécuter un serveur personnalisé disponible à partir du réseau où clickhouse s'exécute, il peut créer un serveur malveillant personnalisé qui agira comme réplique de ClickHouse et l'enregistrer dans ZooKeeper. Lorsqu'une autre réplique récupère une partie de données à partir d'une réplique malveillante, elle peut forcer clickhouse-server à écrire sur un chemin arbitraire sur le système de fichiers. Trouvé par Eldar Zaitov, équipe de sécurité de L'information chez Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### Bug Fix {#bug-fix-17} + +- Fixer `ALTER TABLE ... UPDATE` requête pour les tables avec `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alésapine](https://github.com/alesapin)) +- Correction de NPE lors de l'utilisation de la clause IN avec une sous-requête avec un tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- Correction d'un problème que si un réplica périmé devient vivant, il peut encore avoir des parties de données qui ont été supprimés par la partition DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- Correction d'un problème avec l'analyse CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- Course de données fixe dans le système.table de pièces et ALTER query. Cela corrige [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du mauvais code dans les mutations qui peuvent conduire à la corruption de la mémoire. Correction de segfault avec lecture de l'adresse `0x14c0` cela peut se produire en raison de simultané `DROP TABLE` et `SELECT` de `system.parts` ou `system.parts_columns`. Condition de course fixe dans la préparation des requêtes de mutation. Blocage fixe causé par `OPTIMIZE` des tables répliquées et des opérations de modification simultanées comme ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### Bug fix {#bug-fix-18} + +- L'intégration de Kafka a été corrigée dans cette version. +- Correction de segfault lors de l'utilisation `arrayReduce` pour les querelles constantes. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe `toFloat()` monotonie. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Correction de segfault avec activé `optimize_skip_unused_shards` et clé de sharding manquante. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- Fixe la logique de `arrayEnumerateUniqRanked` fonction. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Suppression de la journalisation supplémentaire du gestionnaire MySQL. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un comportement incorrect et de segfaults possibles dans `topK` et `topKWeighted` agrégé fonctions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- N'exposez pas les colonnes virtuelles dans `system.columns` table. Ceci est nécessaire pour la compatibilité descendante. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug avec l'allocation de mémoire pour les champs de chaîne dans le dictionnaire de cache de clé complexe. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug avec l'activation de la granularité adaptative lors de la création d'une nouvelle réplique pour `Replicated*MergeTree` table. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alésapine](https://github.com/alesapin)) +- Correction de la boucle infinie lors de la lecture des messages Kafka. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- Correction de la possibilité d'une requête fabriquée pour provoquer un crash du serveur en raison d'un débordement de pile dans L'analyseur SQL et de la possibilité d'un débordement de pile dans `Merge` et `Distributed` table [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction D'une erreur D'encodage de gorille sur les petites séquences. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### Amélioration {#improvement-5} + +- Autoriser l'utilisateur à remplacer `poll_interval` et `idle_connection_timeout` paramètres de connexion. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### Bug fix {#bug-fix-19} + +- Correction de la possibilité de suspendre les requêtes lorsque le serveur est surchargé. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de FPE dans la fonction yandexconsistenthash. Cela corrige [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug dans la conversion de `LowCardinality` types de `AggregateFunctionFactory`. Cela corrige [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Corrigé de l'analyse de `bool` les paramètres de `true` et `false` chaînes de caractères dans les fichiers de configuration. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug rare avec des en-têtes de flux incompatibles dans les requêtes `Distributed` table de `MergeTree` table quand une partie de `WHERE` se déplace à `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alésapine](https://github.com/alesapin)) +- Dépassement fixe dans la division entière du type signé au type non signé. Cela corrige [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-5} + +- `Kafka` toujours en panne. + +### Clickhouse version 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### Bug Fix {#bug-fix-20} + +- Correction d'un bug avec l'écriture de marques d'indices secondaires avec une granularité adaptative. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alésapine](https://github.com/alesapin)) +- Fixer `WITH ROLLUP` et `WITH CUBE` les modificateurs de `GROUP BY` avec agrégation à deux niveaux. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) +- Correction du blocage dans `JSONExtractRaw` fonction. Fixe [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de segfault dans ExternalLoader::reloadOutdated (). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction du cas où le serveur peut fermer les sockets d'écoute mais ne pas arrêter et continuer à servir les requêtes restantes. Vous pouvez vous retrouver avec deux processus clickhouse-server en cours d'exécution. Parfois, le serveur peut renvoyer une erreur `bad_function_call` pour les requêtes restantes. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une condition inutile et incorrecte sur le champ de mise à jour pour le chargement initial des dictionnaires externes via ODBC, MySQL, ClickHouse et HTTP. Cela corrige [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une exception non pertinente dans la distribution de `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un résultat non déterministe de “uniq” fonction agrégée dans des cas extrêmement rares. Le bug était présent dans toutes les versions de ClickHouse. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Segfault lorsque nous définissons un peu trop haut CIDR sur la fonction `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Correction d'une petite fuite de mémoire lorsque le serveur lance de nombreuses exceptions dans de nombreux contextes différents. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corrigez la situation lorsque le consommateur a été mis en pause avant l'abonnement et n'a pas repris après. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Notez que Kafka est cassé dans cette version. +- Effacement du tampon de données Kafka de l'opération de lecture précédente terminée par une erreur [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Notez que Kafka est cassé dans cette version. +- Depuis `StorageMergeTree::background_task_handle` est initialisée dans `startup()` le `MergeTreeBlockOutputStream::write()` peut tenter de l'utiliser avant l'initialisation. Vérifiez simplement s'il est initialisé. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-6} + +- Ajouté officiel `rpm` paquet. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alésapine](https://github.com/alesapin)) +- Ajouter une capacité à construire `.rpm` et `.tgz` les paquets avec `packager` script. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alésapine](https://github.com/alesapin)) +- Des correctifs pour “Arcadia” système de construction. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-6} + +- `Kafka` est cassé dans cette version. + +### Clickhouse version 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### Nouveauté {#new-feature-6} + +- Ajout du support pour les déclarations préparées. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `DoubleDelta` et `Gorilla` codecs de colonne [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) +- Ajouter `os_thread_priority` paramètre qui permet de contrôler la “nice” valeur des threads de traitement de requête utilisés par le système d'exploitation pour ajuster la priorité de planification dynamique. Il exige `CAP_SYS_NICE` capacités à travailler. Cela met en œuvre [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Mettre `_topic`, `_offset`, `_key` colonnes pour moteur Kafka [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Notez que Kafka est cassé dans cette version. +- Ajouter agrégat fonction combinateur `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- Les fonctions d'agrégation `groupArrayMovingSum(win_size)(x)` et `groupArrayMovingAvg(win_size)(x)`, qui calculent la somme mobile / avg avec ou sans limitation de taille de fenêtre. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- Ajouter synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Fonction Intergate H3 `geoToH3` de Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Bug Fix {#bug-fix-21} + +- Implémentez le cache DNS avec une mise à jour asynchrone. Thread séparé résout tous les hôtes et met à jour le cache DNS avec la période (Paramètre `dns_cache_update_period`). Cela devrait aider, lorsque l'adresse ip des hôtes change fréquemment. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) +- Fixer erreur de segmentation dans `Delta` codec qui affecte les colonnes avec des valeurs inférieures à 32 bits. Le bug a conduit à la corruption de la mémoire aléatoire. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alésapine](https://github.com/alesapin)) +- Correction de segfault dans la fusion TTL avec des colonnes non physiques en bloc. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Correction d'un bug rare dans la vérification de la pièce avec `LowCardinality` colonne. Précédemment `checkDataPart` échoue toujours pour une partie avec `LowCardinality` colonne. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alésapine](https://github.com/alesapin)) +- Évitez de suspendre les connexions lorsque le pool de threads du serveur est plein. Il est important pour les connexions de `remote` fonction de table ou connexions à un fragment sans réplicas lorsqu'il y a un long délai de connexion. Cela corrige [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Prise en charge des arguments constants pour `evalMLModel` fonction. Cela corrige [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du problème lorsque ClickHouse détermine le fuseau horaire par défaut comme `UCT` plutôt `UTC`. Cela corrige [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe de type dépassement de tampon dans `visitParamExtractRaw`. Cela corrige [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Maintenant distribué `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` les requêtes seront exécutées directement sur la réplique leader. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alésapine](https://github.com/alesapin)) +- Fixer `coalesce` pour `ColumnConst` avec `ColumnNullable` + changements connexes. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixer le `ReadBufferFromKafkaConsumer` alors qu'il continue à lire de nouveaux messages après `commit()` même si elle a été interrompue avant [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) +- Fixer `FULL` et `RIGHT` Joindre les résultats lors de l'adhésion sur `Nullable` clés dans la table de droite. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction Possible du sommeil infini des requêtes de faible priorité. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la condition de concurrence, qui fait que certaines requêtes peuvent ne pas apparaître dans query\_log après `SYSTEM FLUSH LOGS` requête. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Fixe `heap-use-after-free` Avertissement ASan dans ClusterCopier causé par la montre qui essaie d'utiliser l'objet copieur déjà supprimé. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixe de mal `StringRef` pointeur retourné par certaines implémentations de `IColumn::deserializeAndInsertFromArena`. Ce bogue n'a affecté que les tests unitaires. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Empêcher les colonnes de jointure de tableau source et intermédiaire de masquer les colonnes de même nom. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixer insérer et sélectionner la requête au moteur MySQL avec l'identifiant de style MySQL citant. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Maintenant `CHECK TABLE` query peut fonctionner avec la famille de moteurs MergeTree. Il renvoie l'état de contrôle et le message le cas échéant pour chaque partie (ou fichier dans le cas de moteurs plus simples). Aussi, correction d'un bug dans l'extraction d'une partie cassée. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alésapine](https://github.com/alesapin)) +- Correction de L'exécution SPLIT\_SHARED\_LIBRARIES [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- Correction de l'initialisation du fuseau horaire lorsque `/etc/localtime` est un lien symbolique comme `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- clickhouse-copieur: Fix utiliser-après livraison à l'arrêt [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- Mettre `simdjson`. Correction du problème que certains JSONs invalides avec zéro octets analysent avec succès. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de l'arrêt des SystemLogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) +- Correction de la suspension lorsque la condition dans invalidate\_query dépend d'un dictionnaire. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### Amélioration {#improvement-6} + +- Autoriser les adresses insolubles dans la configuration du cluster. Ils seront considérés comme indisponibles et essayés de résoudre à chaque tentative de connexion. Ceci est particulièrement utile pour Kubernetes. Cela corrige [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fermez les connexions TCP inactives (avec un délai d'attente d'une heure par défaut). Ceci est particulièrement important pour les grands clusters avec plusieurs tables distribuées sur chaque serveur, car chaque serveur peut éventuellement conserver un pool de connexions à tous les autres serveurs, et après la concurrence maximale des requêtes, les connexions calent. Cela corrige [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Meilleure qualité de `topK` fonction. Modification du comportement de L'ensemble SavingSpace pour supprimer le dernier élément si le nouvel élément a un poids plus important. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Les fonctions D'URL pour travailler avec des domaines peuvent maintenant fonctionner pour les URL incomplètes sans schéma [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alésapine](https://github.com/alesapin)) +- Sommes de contrôle ajoutées à la `system.parts_columns` table. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- Ajouter `Enum` type de données comme synonyme de `Enum8` ou `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- Variante de transposition de bits complète pour `T64` codec. Pourrait conduire à une meilleure compression avec `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- Condition sur `startsWith` fonction maintenant peut utilise la clé primaire. Cela corrige [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) et [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- Permettre d'utiliser `clickhouse-copier` avec la topologie de cluster de réplication croisée en permettant le nom de base de données vide. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) +- Utiliser `UTC` comme fuseau horaire par défaut sur un système sans `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` a été imprimé et le serveur ou le client a refusé de démarrer. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Retourné soutien à virgule flottante argument dans la fonction `quantileTiming` pour la compatibilité descendante. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Afficher quelle table est manquante colonne dans les messages d'erreur. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) +- Interdire l'exécution d'une requête avec le même query\_id par divers utilisateurs [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- Code plus robuste pour envoyer des métriques au Graphite. Cela fonctionnera même pendant de longues périodes multiples `RENAME TABLE` opération. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Des messages d'erreur plus informatifs seront affichés lorsque ThreadPool ne peut pas planifier une tâche pour l'exécution. Cela corrige [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Inverser ngramSearch pour être plus intuitif [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- Ajouter l'analyse utilisateur dans HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- Mettre à jour la valeur par défaut de `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- Ajout d'une notion de paramètres obsolètes. Le paramètre obsolète `allow_experimental_low_cardinality_type` peut être utilisé avec aucun effet. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### Amélioration Des Performances {#performance-improvement-4} + +- Augmentez le nombre de flux à sélectionner dans la table de fusion pour une distribution plus uniforme des threads. Ajout d'un réglage `max_streams_multiplier_for_merge_tables`. Cela corrige [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-7} + +- Ajoutez un test de rétrocompatibilité pour l'interaction client-serveur avec différentes versions de clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alésapine](https://github.com/alesapin)) +- Testez les informations de couverture dans chaque demande de validation et de tirage. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alésapine](https://github.com/alesapin)) +- Coopérez avec le désinfectant d'adresse pour soutenir nos allocateurs faits sur commande (`Arena` et `ArenaWithFreeLists`) pour une meilleure mise “use-after-free” erreur. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) +- Interrupteur à [Implémentation de LLVM libunwind](https://github.com/llvm-mirror/libunwind) pour la gestion des exceptions C++ et pour l'impression des traces de pile [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) +- Ajouter deux autres avertissements de-Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Permettre de construire ClickHouse avec aseptisant mémoire. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Rapport ubsan fixe sur `bitTest` fonction dans fuzz test. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Docker: ajout de la possibilité d'initialiser une instance de ClickHouse qui nécessite une authentification. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) +- Mettre à jour librdkafka vers la version 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) +- Ajoutez un délai d'attente global pour les tests d'intégration et désactivez certains d'entre eux dans le code des tests. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alésapine](https://github.com/alesapin)) +- Correction de quelques échecs ThreadSanitizer. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) +- Le `--no-undefined` option force l'éditeur de liens à vérifier l'existence de tous les noms externes lors de la liaison. Il est très utile de suivre les dépendances réelles entre les bibliothèques en mode de construction fractionnée. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) +- Ajouté test de performance pour [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la compatibilité avec gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout du support pour gcc-9. Cela corrige [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur lorsque libunwind peut être lié de manière incorrecte. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de quelques avertissements trouvés par PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout du support initial pour `clang-tidy` analyseur statique. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Convertir les macros BSD / Linux endian( ‘be64toh’ et ‘htobe64’) aux équivalents Mac OS X [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) +- Amélioration du guide des tests d'intégration. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Correction de la construction sur macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) +- Correction d'une faute de frappe difficile à repérer: aggreAGte - \> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) +- Correction de la construction freebsd [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- Ajouter un lien vers la chaîne Youtube expérimentale au site web [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) +- CMake: ajouter une option pour les drapeaux de couverture: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- Correction de la taille initiale de certains PODArray en ligne. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) +- clickhouse-serveur.postinst: correction de la détection du système d'exploitation pour centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- Ajout de la génération de paquets Arch linux. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Diviser commun / config.h par libs (SGBD) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- Des correctifs pour “Arcadia” créer une plate-forme [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- Correctifs pour la construction non conventionnelle (gcc9, pas de sous-modules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- Exiger un type explicite dans unalignedStore car il a été prouvé qu'il était sujet aux bugs [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) +- Corrige la construction MacOS [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) +- Test de Performance concernant la nouvelle fonctionnalité JIT avec un ensemble de données plus grand, comme demandé ici [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Exécuter des tests avec État dans le test de stress [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alésapine](https://github.com/alesapin)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-7} + +- `Kafka` est cassé dans cette version. +- Permettre `adaptive_index_granularity` = 10 Mo par défaut pour les nouveaux `MergeTree` table. Si vous avez créé de nouvelles tables MergeTree sur la version 19.11+, il sera impossible de passer à des versions antérieures à 19.6. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alésapine](https://github.com/alesapin)) +- Suppression des dictionnaires intégrés obsolètes non documentés utilisés par Yandex.Metrica. Fonction `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` ne sont plus disponibles. Si vous utilisez ces fonctions, écrivez un courriel à clickhouse-feedback@yandex-team.com. Note: au dernier moment, nous avons décidé de garder ces fonctions pendant un certain temps. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +## Clickhouse version 19.10 {#clickhouse-release-19-10} + +### Clickhouse version 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### Nouveauté {#new-feature-7} + +- Ajouter un nouveau codec de colonne: `T64`. Fait pour les colonnes(U)IntX/EnumX/Data (Time)/DecimalX. Il devrait être bon pour les colonnes avec des valeurs de plage constantes ou petites. Codec lui-même permet agrandir ou réduire le type de données sans re-compression. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter un moteur de base de données `MySQL` qui permettent d'afficher toutes les tables dans le serveur MySQL distant [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([L'Hiver Zhang](https://github.com/zhang2014)) +- `bitmapContains` application. C'est 2x plus rapide que `bitmapHasAny` si le second bitmap contient un élément. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) +- Soutien pour `crc32` fonction (avec un comportement exactement comme dans MySQL ou PHP). Ne l'utilisez pas si vous avez besoin d'une fonction de hachage. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- Mettre `SYSTEM START/STOP DISTRIBUTED SENDS` requêtes pour contrôler les insertions asynchrones dans `Distributed` table. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([L'Hiver Zhang](https://github.com/zhang2014)) + +#### Bug Fix {#bug-fix-22} + +- Ignorer les limites d'exécution des requêtes et la taille maximale des pièces pour les limites de fusion lors de l'exécution des mutations. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) +- Correction d'un bug qui peut conduire à la déduplication de blocs normaux (extrêmement rare) et l'insertion de blocs en double (plus souvent). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alésapine](https://github.com/alesapin)) +- Correction de la fonction `arrayEnumerateUniqRanked` pour les arguments avec des tableaux vides [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- Ne vous abonnez pas aux sujets Kafka sans l'intention d'interroger des messages. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) +- Faire la mise en `join_use_nulls` n'obtenez aucun effet pour les types qui ne peuvent pas être à L'intérieur de Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixe `Incorrect size of index granularity` erreur [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) +- Correction du flotteur en décimal convertir le débordement [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) +- Vider le tampon quand `WriteBufferFromHDFS`'s destructeur est appelé. Cela corrige l'écriture dans `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) + +#### Amélioration {#improvement-7} + +- Traiter les cellules vides dans `CSV` comme valeurs par défaut lorsque le paramètre `input_format_defaults_for_omitted_fields` est activé. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) +- Chargement Non bloquant des dictionnaires externes. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) +- Les délais d'attente réseau peuvent être modifiés dynamiquement pour les connexions déjà établies en fonction des paramètres. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- Utiliser “public\_suffix\_list” pour les fonctions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. Il utilise une table de hachage parfaite générée par `gperf` avec une liste générée à partir du fichier: https://publicsuffix.org/list/public\_suffix\_list.dat. (par exemple, nous reconnaissons maintenant le domaine `ac.uk` comme non significatif). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Adopté `IPv6` type de données dans les tables système; colonnes unified client info dans `system.processes` et `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Utilisation de sessions pour les connexions avec le protocole de compatibilité MySQL. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) +- Support plus `ALTER` requête `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- Soutien `` section dans `clickhouse-local` fichier de configuration. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- Autoriser exécuter la requête avec `remote` fonction de table dans `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### Amélioration Des Performances {#performance-improvement-5} + +- Ajoutez la possibilité d'écrire la marque finale à la fin des colonnes MergeTree. Il permet d'éviter les lectures inutiles pour les clés qui sont hors de la plage de données de la table. Elle n'est activée que si la granularité d'index adaptatif est utilisée. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alésapine](https://github.com/alesapin)) +- Amélioration des performances des tables MergeTree sur les systèmes de fichiers très lents en réduisant `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la dégradation des performances en lecture à partir des tables MergeTree introduites dans la version 19.6. Corrections \# 5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-8} + +- Mettre `TestKeeper` en tant qu'implémentation de l'interface ZooKeeper utilisée pour les tests [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexeï-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- Désormais `.sql` les tests peuvent être exécutés isolés par le serveur, en parallèle, avec une base de données aléatoire. Il permet de les exécuter plus rapidement, d'ajouter de nouveaux tests avec des configurations de serveur personnalisées et de s'assurer que les différents tests ne s'affectent pas les uns les autres. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) +- Supprimer `` et `` à partir de tests de performance [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- Fixe “select\_format” essai de performance pour `Pretty` format [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +## Clickhouse version 19.9 {#clickhouse-release-19-9} + +### Clickhouse version 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### Bug Fix {#bug-fix-23} + +- Correction de segfault dans le codec Delta qui affecte les colonnes avec des valeurs inférieures à 32 bits. Le bug a conduit à la corruption de la mémoire aléatoire. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug rare dans la vérification de la partie avec la colonne LowCardinality. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alésapine](https://github.com/alesapin)) +- Correction de segfault dans la fusion TTL avec des colonnes non physiques en bloc. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) +- Correction du sommeil infini potentiel des requêtes de faible priorité. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la façon dont ClickHouse détermine le fuseau horaire par défaut comme UCT au lieu de UTC. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug sur l'exécution distribuée DROP/ALTER/TRUNCATE / OPTIMIZE sur les requêtes de CLUSTER sur la réplique suiveur avant la réplique leader. Maintenant, ils seront exécutés directement sur la réplique leader. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alésapine](https://github.com/alesapin)) +- Correction de la condition de concurrence, ce qui fait que certaines requêtes peuvent ne pas apparaître instantanément dans query\_log après la requête system FLUSH LOGS. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) +- Ajout du support manquant pour les arguments constants à `evalMLModel` fonction. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### Nouveauté {#new-feature-8} + +- Imprimer des informations sur les pièces gelées dans `system.parts` table. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- Demander mot de passe du client sur clickhouse-client démarrer sur ats s'il n'est pas dans les arguments [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- Mettre `dictGet` et `dictGetOrDefault` fonctions pour les types Décimaux. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Amélioration {#improvement-8} + +- Debian init: ajout d'un délai d'arrêt du service [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- Ajouter un paramètre interdit par défaut pour créer une table avec des types suspects pour LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- Les fonctions de régression renvoient les poids du modèle lorsqu'elles ne sont pas utilisées comme État dans la fonction `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- Renommer et améliorer les méthodes de régression. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- Interfaces plus claires des chercheurs de chaînes. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### Bug Fix {#bug-fix-24} + +- Correction de la perte de données potentielle dans Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) +- Correction d'une boucle infinie potentielle dans `PrettySpace` format lorsqu'il est appelé avec zéro colonnes [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- Correction d'un bug de débordement UInt32 dans les modèles linéaires. Autoriser le modèle EVAL ML pour l'argument du modèle non const. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` ne devrait pas soulever une exception si l'index n'existe pas [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Fixer erreur de segmentation avec `bitmapHasAny` dans la sous-requête scalaire [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Correction d'une erreur lorsque le pool de connexions de réplication ne tente pas de résoudre l'hôte, même lorsque le cache DNS a été supprimé. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alésapine](https://github.com/alesapin)) +- Fixe `ALTER ... MODIFY TTL` sur ReplicatedMergeTree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) +- Correction de L'insertion dans une table distribuée avec une colonne matérialisée [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- Correction d'un mauvais alloc lorsque truncate Join storage [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- Dans les versions récentes du paquet tzdata, certains fichiers sont maintenant des liens symboliques. Le mécanisme actuel de détection du fuseau horaire par défaut est cassé et donne des noms erronés pour certains fuseaux horaires. Maintenant, au moins, nous forçons le nom du fuseau horaire au contenu de TZ si fourni. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) +- Fixer certains cas extrêmement rares avec multivolnitsky searcher lorsque les aiguilles constantes en somme sont au moins 16KB long. L'algorithme a manqué ou écrasé les résultats précédents ce qui peut conduire au résultat incorrect de `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- Résolvez le problème lorsque les paramètres des requêtes ExternalData ne pouvaient pas utiliser les paramètres ClickHouse. Aussi, pour l'instant, paramètres `date_time_input_format` et `low_cardinality_allow_in_native_format` ne peut pas être utilisé en raison de l'ambiguïté des noms (en données externe, il peut être interprété comme format de tableau et dans la requête, il peut être un paramètre). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- Correction d'un bug lorsque des pièces ont été supprimées uniquement de FS sans les laisser tomber de Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alésapine](https://github.com/alesapin)) +- Supprimer la journalisation de débogage du protocole MySQL [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ignorer ZNONODE pendant le traitement de la requête DDL [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Fix mix `UNION ALL` type de colonne de résultat. Il y avait des cas avec des données incohérentes et des types de colonnes de colonnes résultantes. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- Lancer une exception sur des entiers erronés dans `dictGetT` fonctions au lieu de crash. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction de mauvais element\_count et load\_factor pour le dictionnaire haché dans `system.dictionaries` table. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-9} + +- Construction fixe Sans `Brotli` Prise en charge de la compression HTTP (`ENABLE_BROTLI=OFF` cmake variable). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- Inclure rugissement.h comme rugissant / rugissant.h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) +- Correction des avertissements gcc9 dans hyperscan(la directive \# line est mauvaise!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- Corrigez tous les avertissements lors de la compilation avec gcc-9. Correction de certains problèmes de contrib. Corrigez GCC9 ICE et soumettez-le à bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- Liaison fixe avec lld [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Supprimer les spécialisations dans les dictionnaires [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- Amélioration des tests de performance pour le formatage et l'analyse des tables pour différents types de fichiers [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- Corrections pour l'exécution de test parallèle [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker: utiliser les configs de clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- Correction de la compilation Pour FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- Mise à niveau boost à 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- Fixer construire clickhouse comme sous-module [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- Améliorer les tests de performance JSONExtract [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) + +## Clickhouse version 19.8 {#clickhouse-release-19-8} + +### Clickhouse version 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### Nouveauté {#new-features} + +- Ajout de fonctions pour travailler avec JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) +- Ajouter une fonction basename, avec un comportement similaire à une fonction basename, qui existe dans beaucoup de langues (`os.path.basename` en python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Ajouter `LIMIT n, m BY` ou `LIMIT m OFFSET n BY` syntaxe pour définir le décalage de N pour la clause LIMIT BY. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) +- Ajouté nouveau type de données `SimpleAggregateFunction`, ce qui permet d'avoir des colonnes avec agrégation de lumière dans un `AggregatingMergeTree`. Cela ne peut être utilisé avec des fonctions simples comme `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- Ajout du support pour les arguments non constants dans la fonction `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- L'ajout de fonctions `skewPop`, `skewSamp`, `kurtPop` et `kurtSamp` pour calculer l'asymétrie de séquence, l'asymétrie de l'échantillon, la kurtose et la kurtose de l'échantillon respectivement. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- Soutien renommer opération pour `MaterializeView` stockage. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Serveur Ajouté qui permet de se connecter à ClickHouse en utilisant le client MySQL. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) +- Ajouter `toDecimal*OrZero` et `toDecimal*OrNull` fonction. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- Soutenir les types décimaux dans les fonctions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajouter `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- Ajouter `format` fonction. Formatage du motif constant (modèle de format Python simplifié) avec les chaînes listées dans les arguments. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- Ajouter `system.detached_parts` tableau contenant des informations sur les parties détachées de `MergeTree` table. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) +- Ajouter `ngramSearch` fonction pour calculer la différence non symétrique entre l'aiguille et la botte de foin. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- Mise en œuvre de méthodes d'apprentissage automatique de base (régression linéaire stochastique et régression logistique) à l'aide de l'interface des fonctions agrégées. A différentes stratégies pour mettre à jour les poids du modèle (descente de gradient simple, méthode momentum, méthode Nesterov). Prend également en charge les mini-lots de taille personnalisée. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- La mise en œuvre de `geohashEncode` et `geohashDecode` fonction. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) +- Fonction agrégée ajoutée `timeSeriesGroupSum` qui peut regrouper différentes séries de l'échantillon d'horodatage de l'alignement. Il utilisera une interpolation linéaire entre deux échantillons d'horodatage, puis additionnera les séries temporelles ensemble. Fonction agrégée ajoutée `timeSeriesGroupRateSum`, qui calcule le taux de séries chronologiques, puis additionne les taux ensemble. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- L'ajout de fonctions `IPv4CIDRtoIPv4Range` et `IPv6CIDRtoIPv6Range` pour calculer les limites inférieures et supérieures pour une adresse IP dans le sous-réseau à l'aide D'un CIDR. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) +- Ajouter un en-tête X-Clickhouse-Summary lorsque nous envoyons une requête en utilisant HTTP avec paramètre activé `send_progress_in_http_headers`. Renvoie les informations habituelles de X-ClickHouse-Progress, avec des informations supplémentaires telles que le nombre de lignes et d'octets insérés dans la requête. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) + +#### Amélioration {#improvements} + +- Ajouter `max_parts_in_total` paramètre pour la famille de tables MergeTree (par défaut: 100 000) qui empêche la spécification dangereuse de la clé de partition \# 5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`: dériver la graine pour les colonnes individuelles en combinant la graine initiale avec le nom de la colonne, pas la position de la colonne. Ceci est destiné à transformer des ensembles de données avec plusieurs tables associées, de sorte que les tables restent joignables après la transformation. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- L'ajout de fonctions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renommé fonctions `jsonExtract` de `JSONExtract`. Quand quelque chose ne va pas, ces fonctions renvoient les valeurs correspondantes, pas `NULL`. Modifié la fonction `JSONExtract`, maintenant, il obtient le type de retour de son dernier paramètre et n'injecte pas nullables. Implémenté repli vers RapidJSON dans le cas où les instructions AVX2 ne sont pas disponibles. Bibliothèque Simdjson mise à jour vers une nouvelle version. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) +- Maintenant `if` et `multiIf` les fonctions ne dépendent pas de la condition `Nullable`, mais comptez sur les branches pour la compatibilité sql. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) +- `In` prédicat génère maintenant `Null` résultat de `Null` d'entrée comme l' `Equal` fonction. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) +- Vérifiez la limite de temps chaque (flush\_interval / poll\_timeout) nombre de lignes de Kafka. Cela permet de casser la lecture de Kafka consumer plus fréquemment et de vérifier les délais pour les flux de niveau supérieur [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) +- Lien rdkafka avec SASL fourni. Il devrait permettre d'utiliser l'authentification SASL SCRAM [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) +- Version par lots de RowRefList pour toutes les jointures. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse-server: messages d'erreur d'écoute plus informatifs. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- Soutien dictionnaires dans clickhouse-copieur pour les fonctions dans `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- Ajouter un nouveau paramètre `kafka_commit_every_batch` pour réglementer Kafka engager la Politique. + Il permet de définir le mode de validation: après chaque lot de messages personnels, ou après le bloc entier est écrit dans le stockage. C'est un compromis entre perdre des messages ou les lire deux fois dans certaines situations extrêmes. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) +- Faire `windowFunnel` supporte d'autres types entiers non signés. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- Autoriser à ombrer la colonne virtuelle `_table` dans le moteur de Fusion. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) +- Faire `sequenceMatch` les fonctions d'agrégation prennent en charge d'autres types entiers non signés [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- Meilleurs messages d'erreur si l'inadéquation de la somme de contrôle est probablement causée par des défaillances matérielles. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Vérifiez que les tables sous-jacentes prennent en charge l'échantillonnage pour `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Améliorations du protocole de fil MySQL. Changement du nom du format en MySQLWire. Utiliser RAII pour appeler RSA\_free. Désactivation de SSL si le contexte ne peut pas être créé. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- Respectez les paramètres de requête dans les insertions asynchrones dans les tables distribuées. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- Renommé fonctions `leastSqr` de `simpleLinearRegression`, `LinearRegression` de `linearRegression`, `LogisticRegression` de `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Amélioration Des Performances {#performance-improvements} + +- Paralléliser le traitement des parties des tables MergeTree non répliquées dans la requête ALTER MODIFY. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) +- Optimisations dans l'extraction d'expressions régulières. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- N'ajoutez pas de colonne de clé de jointure droite pour joindre le résultat si elle est utilisée uniquement dans la section join on. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- Geler le tampon Kafka après la première réponse vide. Il évite les invocations multiples de `ReadBuffer::next()` pour un résultat vide dans certains flux d'analyse de ligne. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) +- `concat` optimisation de la fonction pour plusieurs arguments. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- Mettez à niveau notre implémentation LZ4 avec reference one pour avoir une décompression plus rapide. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- Implémenté MSD radix sort (basé sur kxsort), et le tri partiel. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### Corrections De Bugs {#bug-fixes} + +- Correction des colonnes push require avec jointure [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction d'un bug, lorsque ClickHouse est exécuté par systemd, la commande `sudo service clickhouse-server forcerestart` ne fonctionne pas comme prévu. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- Correction des codes d'erreur http dans DataPartsExchange (le serveur HTTP interserver sur le port 9009 renvoie toujours le code 200, même en cas d'erreurs). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- Correction de SimpleAggregateFunction pour une chaîne plus longue que MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- Correction d'une erreur pour `Decimal` de `Nullable(Decimal)` conversion en en. Soutenir D'autres conversions décimales à décimales (y compris différentes échelles). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction FPU clobbering dans la bibliothèque simdjson qui conduisent à un mauvais calcul de `uniqHLL` et `uniqCombined` fonction agrégée et fonctions mathématiques telles que `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la gestion des cas const/nonconst mixtes dans les fonctions JSON. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) +- Fixer `retention` fonction. Maintenant, toutes les conditions qui satisfont dans une rangée de données sont ajoutées à l'état des données. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- Correction du type de résultat pour `quantileExact` avec des Décimales. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Documentation {#documentation} + +- Traduire la documentation pour `CollapsingMergeTree` pour les chinois. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- Traduire une documentation sur les moteurs de table en chinois. + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([jamais lee](https://github.com/neverlee)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements} + +- Correction de certains rapports de désinfectant qui montrent une utilisation probable après-livraison.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) +- Déplacez les tests de performance hors de répertoires séparés pour plus de commodité. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de tests de performance incorrects. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alésapine](https://github.com/alesapin)) +- Ajout d'un outil pour calculer les sommes de contrôle causées par les retournements de bits pour déboguer les problèmes matériels. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Rendre le script runner plus utilisable. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) +- Ajouter petite instruction comment écrire des tests de performance. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alésapine](https://github.com/alesapin)) +- Ajout de la possibilité de faire des substitutions dans Créer, remplir et supprimer la requête dans les tests de performance [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## Clickhouse version 19.7 {#clickhouse-release-19-7} + +### Clickhouse version 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### Bug Fix {#bug-fix-25} + +- Correction de la régression des performances dans certaines requêtes avec jointure. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([L'Hiver Zhang](https://github.com/zhang2014)) + +### Clickhouse version 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### Nouveauté {#new-features-1} + +- Ajout de fonctions liées au bitmap `bitmapHasAny` et `bitmapHasAll` analogue à `hasAny` et `hasAll` fonctions pour les tableaux. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) + +#### Corrections De Bugs {#bug-fixes-1} + +- Fixer erreur de segmentation sur `minmax` INDEX avec valeur nulle. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) +- Marquez toutes les colonnes d'entrée dans LIMIT BY comme sortie requise. Il fixe ‘Not found column’ erreur dans certaines requêtes distribuées. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) +- Fixer “Column ‘0’ already exists” erreur dans `SELECT .. PREWHERE` sur la colonne avec défaut [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- Fixer `ALTER MODIFY TTL` requête sur `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) +- Ne plantez pas le serveur lorsque les consommateurs Kafka n'ont pas réussi à démarrer. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) +- Les fonctions Bitmap fixes produisent un mauvais résultat. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) +- Correction d'element\_count pour le dictionnaire haché (ne pas inclure les doublons) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- Utilisez le contenu de la variable d'environnement TZ comme nom pour le fuseau horaire. Il aide à détecter correctement le fuseau horaire par défaut dans certains cas.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) +- N'essayez pas de convertir les entiers dans `dictGetT` fonctions, car il ne fonctionne pas correctement. Jetez une exception à la place. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction des paramètres dans la requête HTTP ExternalData. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila + Kutenin](https://github.com/danlark1)) +- Correction d'un bug lorsque des pièces ont été supprimées uniquement de FS sans les laisser tomber de Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alésapine](https://github.com/alesapin)) +- Correction d'un défaut de segmentation dans `bitmapHasAny` fonction. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- Correction d'une erreur lorsque le pool de connexions de réplication ne tente pas de résoudre l'hôte, même lorsque le cache DNS a été supprimé. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alésapine](https://github.com/alesapin)) +- Fixe `DROP INDEX IF EXISTS` requête. Maintenant `ALTER TABLE ... DROP INDEX IF EXISTS ...` la requête ne déclenche pas d'exception si l'index fourni n'existe pas. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Correction de la colonne union all supertype. Il y avait des cas avec des données incohérentes et des types de colonnes de colonnes résultantes. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- Ignorer ZNONODE pendant le traitement de la requête DDL. Avant si un autre nœud supprime le znode dans la file d'attente des tâches, celui qui + ne pas le traiter, mais déjà obtenir la liste des enfants, mettra fin au thread DDLWorker. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- Correction de L'insertion dans la table Distributed() avec une colonne matérialisée. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### Clickhouse version 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### Nouveauté {#new-features-2} + +- Permet de limiter la plage d'un paramètre qui peut être spécifiée par l'utilisateur. + Ces contraintes peuvent être configurées dans le profil des paramètres utilisateur. + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly + Baranov](https://github.com/vitlibar)) +- Ajouter une deuxième version de la fonction `groupUniqArray` avec une option de + `max_size` paramètre qui limite la taille du tableau résultant. Ce + le comportement est similaire à `groupArray(max_size)(x)` fonction. + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- Pour les formats de fichier D'entrée TSVWithNames/CSVWithNames, l'ordre des colonnes peut maintenant être + déterminé à partir de l'en-tête du fichier. Ceci est contrôlé par + `input_format_with_names_use_header` paramètre. + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([Alexander](https://github.com/Akazz)) + +#### Corrections De Bugs {#bug-fixes-2} + +- Crash avec uncompressed\_cache + JOIN lors de la fusion (\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila + Kutenin](https://github.com/danlark1)) +- Erreur de Segmentation sur une requête clickhouse-client aux tables système. \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([Ivan](https://github.com/abyss7)) +- Perte de données sur une charge lourde via KafkaEngine (\#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([Ivan](https://github.com/abyss7)) +- Correction d'une condition de concurrence de données très rare qui pourrait se produire lors de l'exécution d'une requête avec UNION impliquant au moins deux sélections du système.colonnes, système.tables système.les pièces, système d'.parts\_tables ou tables de la famille de fusion et effectuer des modifications simultanées des colonnes des tables associées. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Amélioration Des Performances {#performance-improvements-1} + +- Utilisez le tri radix pour trier par colonne numérique unique dans `ORDER BY` sans + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Documentation {#documentation-1} + +- Traduire la documentation de certains moteurs de table en chinois. + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([jamais + Lee](https://github.com/neverlee)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-1} + +- Imprimer les caractères UTF-8 correctement dans `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un paramètre de ligne de commande pour clickhouse-client pour toujours charger la suggestion + données. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Résolvez certains des avertissements PVS-Studio. + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Mise à jour LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila + Kutenin](https://github.com/danlark1)) +- Ajouter gperf pour construire des exigences pour la prochaine demande de tirage \# 5030. + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## Clickhouse version 19.6 {#clickhouse-release-19-6} + +### Clickhouse version 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### Corrections De Bugs {#bug-fixes-3} + +- Correction de l'état pushdown pour les requêtes des fonctions de table `mysql` et `odbc` et les moteurs de table correspondants. Cela corrige \#3540 et \# 2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de l'impasse dans Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- Autoriser les décimales entre guillemets en CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- Interdire la conversion de float Inf / NaN en décimales (exception de lancer). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction de la course de données dans la requête renommer. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Désactiver temporairement LFAlloc. L'utilisation de LFAlloc peut conduire à beaucoup de MAP\_FAILED dans l'allocation de UncompressedCache et, par conséquent, à des plantages de requêtes sur des serveurs chargés en haut. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### Clickhouse version 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### Nouveauté {#new-features-3} + +- Expressions TTL pour les colonnes et les tables. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) +- Ajout du support pour `brotli` compression pour les réponses HTTP (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) +- Ajout d'une fonction nouvelle `isValidUTF8` pour vérifier si un ensemble d'octets est correctement codé en utf-8. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- Ajouter la nouvelle stratégie d'équilibrage de charge `first_or_random` qui envoie des requêtes au premier hôte spécifié et s'il est inaccessible, envoie des requêtes à des hôtes aléatoires de shard. Utile pour les configurations de topologie de réplication croisée. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) + +#### Caractéristiques Expérimentales {#experimental-features-1} + +- Ajouter un paramètre `index_granularity_bytes` (granularité d'index adaptatif) pour la famille de tables MergeTree\*. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alésapine](https://github.com/alesapin)) + +#### Amélioration {#improvements-1} + +- Ajout du support pour les arguments de taille et de longueur non constants et négatifs pour la fonction `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Désactivez push-down to right table dans LEFT join, left table dans RIGHT join et les deux tables dans full join. Cela corrige les mauvais résultats de jointure dans certains cas. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) +- `clickhouse-copier`: configuration automatique de tâche de téléchargement de `--task-file` option [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- Ajout d'un gestionnaire de fautes de frappe pour l'usine de stockage et les fonctions de table factory. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- Support des astérisques et des astérisques qualifiés pour plusieurs jointures sans sous-requêtes [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- Rendre le message d'erreur de colonne manquant plus convivial. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Amélioration Des Performances {#performance-improvements-2} + +- Accélération significative de la jointure ASOF [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### Modifications Incompatibles En Arrière {#backward-incompatible-changes} + +- L'en-tête HTTP `Query-Id` a été renommé `X-ClickHouse-Query-Id` pour des raisons de cohérence. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) + +#### Corrections De Bugs {#bug-fixes-4} + +- Déréférence du pointeur nul potentiel fixe dans `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- Correction d'une erreur sur la requête avec JOIN + ARRAY JOIN [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixe suspendu au début du serveur lorsqu'un dictionnaire dépend d'un autre dictionnaire via une base de données avec moteur=Dictionnaire. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un résultat potentiellement erroné pour `SELECT DISTINCT` avec `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'une condition de concurrence de données très rare qui pourrait se produire lors de l'exécution d'une requête avec UNION impliquant au moins deux sélections du système.colonnes, système.tables système.les pièces, système d'.parts\_tables ou tables de la famille de fusion et effectuer des modifications simultanées des colonnes des tables associées. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-2} + +- Correction des échecs de test lors de l'exécution de clickhouse-server sur un hôte différent [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) +- clickhouse-test: désactiver les séquences de contrôle de couleur dans un environnement non tty. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alésapine](https://github.com/alesapin)) +- clickhouse-test: Autoriser l'utilisation de toute base de données de test (supprimer `test.` qualification lorsque cela est possible) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- Correction des erreurs ubsan [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Yandex LFAlloc a été ajouté à ClickHouse pour allouer des données MarkCache et UncompressedCache de différentes manières pour attraper des segfaults plus fiables [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- Python util pour aider avec les rétroportages et les changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) + +## Clickhouse version 19.5 {#clickhouse-release-19-5} + +### Clickhouse version 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### Corrections de bugs {#bug-fixes-5} + +- Correction d'un crash possible dans les fonctions bitmap\* [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) +- Correction d'une condition de concurrence de données très rare qui pourrait se produire lors de l'exécution d'une requête avec UNION impliquant au moins deux sélections du système.colonnes, système.tables système.les pièces, système d'.parts\_tables ou tables de la famille de fusion et effectuer des modifications simultanées des colonnes des tables associées. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. Cette erreur s'est produite si la colonne LowCardinality était la partie de la clé primaire. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Modification de la fonction de rétention: si une ligne satisfait à la fois la première et la nième condition, seule la première condition satisfaite est ajoutée à l'état des données. Maintenant, toutes les conditions qui satisfont dans une rangée de données sont ajoutées à l'état des données. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### Clickhouse version 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### Corrections de bugs {#bug-fixes-6} + +- Type de réglage fixe `max_partitions_per_insert_block` de booléen à UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) + +### Clickhouse version 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### Nouveauté {#new-features-4} + +- [Hyperscan](https://github.com/intel/hyperscan) plusieurs expressions rationnelles a été ajouté (fonctions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` la fonction a été ajoutée. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- Implémentez le filtre d'expression prédéfini par ligne pour les tables. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) +- Un nouveau type d'indices de saut de données basés sur des filtres bloom (peut être utilisé pour `equal`, `in` et `like` fonction). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) +- Ajouter `ASOF JOIN` ce qui permet d'exécuter des requêtes, qui se joignent à la valeur la plus récente connue. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- Réécrire plusieurs `COMMA JOIN` de `CROSS JOIN`. Puis réécrire `INNER JOIN` si cela est possible. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Amélioration {#improvement-9} + +- `topK` et `topKWeighted` prend désormais en charge personnalisée `loadFactor` (corrige le problème [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) +- Permettre d'utiliser `parallel_replicas_count > 1` même pour les tables sans échantillonnage (le paramètre est simplement ignoré pour elles). Dans les versions précédentes, il a été conduit à l'exception. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- Soutien pour `CREATE OR REPLACE VIEW`. Permettent de créer un afficher ou définir une nouvelle définition dans un seul énoncé. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` tableau moteur prend désormais en charge `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) +- Ajouter la possibilité de démarrer la table répliquée sans métadonnées dans zookeeper dans `readonly` mode. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alésapine](https://github.com/alesapin)) +- Correction du scintillement de la barre de progression dans clickhouse-client. Le problème était le plus perceptible lors de l'utilisation `FORMAT Null` avec des flux de requêtes. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Permettent de désactiver les fonctions avec `hyperscan` bibliothèque par utilisateur pour limiter l'utilisation potentiellement excessive et incontrôlée des ressources. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter le numéro de version se connectant à toutes les erreurs. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- Ajout d'une restriction à la `multiMatch` fonctions qui nécessite une taille de chaîne pour s'adapter à `unsigned int`. A également ajouté le nombre d'arguments limite à la `multiSearch` fonction. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- Amélioration de l'utilisation de l'espace scratch et de la gestion des erreurs dans Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- Remplir `system.graphite_detentions` à partir d'une table de config `*GraphiteMergeTree` le moteur de tables. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Renommer `trigramDistance` la fonction de `ngramDistance` et d'ajouter plus de fonctions avec `CaseInsensitive` et `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- Amélioration du calcul des indices de saut de données. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) +- Garder ordinaire, `DEFAULT`, `MATERIALIZED` et `ALIAS` colonnes dans une seule liste (corrige le problème [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### Bug Fix {#bug-fix-26} + +- Éviter `std::terminate` en cas d'échec d'allocation de mémoire. Maintenant `std::bad_alloc` exception est levée comme prévu. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corrige la lecture de capnproto à partir du tampon. Parfois, les fichiers N'ont pas été chargés avec succès par HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Correction d'erreur `Unknown log entry type: 0` après `OPTIMIZE TABLE FINAL` requête. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Oiseau](https://github.com/amosbird)) +- Mauvais arguments `hasAny` ou `hasAll` fonctions peut conduire à l'erreur de segmentation. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Blocage peut se produire lors de l'exécution `DROP DATABASE dictionary` requête. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corriger un comportement indéfini dans `median` et `quantile` fonction. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Correction de la détection du niveau de compression lorsque `network_compression_method` en minuscules. Cassé dans v19. 1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Correction de l'ignorance de `UTC` paramètre (corrige le problème [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Fixer `histogram` comportement de la fonction avec `Distributed` table. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Rapport Tsan fixe `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Rapport Tsan fixe sur l'arrêt en raison de la condition de concurrence dans l'utilisation des journaux système. Correction de l'utilisation potentielle-après-libre à l'arrêt lorsque part\_log est activé. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer revérifier les pièces dans `ReplicatedMergeTreeAlterThread` en cas d'erreur. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Les opérations arithmétiques sur les états de fonction d'agrégat intermédiaire ne fonctionnaient pas pour les arguments constants (tels que les résultats de sous-requête). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Toujours renvoyer les noms de colonne dans les métadonnées. Sinon il est impossible de créer une table avec une colonne nommée `index` (le serveur ne redémarre pas en raison de malformé `ATTACH` requête dans les métadonnées). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un crash dans l' `ALTER ... MODIFY ORDER BY` sur `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Fixer erreur de segmentation dans `JOIN ON` avec l'option `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction d'un bug avec l'ajout d'une ligne étrangère après avoir consommé un message protobuf de Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction d'un arrêt brutal de `JOIN` sur la colonne non nullable vs nullable. Fixer `NULLs` dans la droite dans `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un défaut de segmentation dans `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Condition de course fixe dans `SELECT` de `system.tables` si la table est renommée ou modifiée simultanément. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la course de données lors de la récupération d'une partie de données déjà obsolète. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la course de données rares qui peut se produire pendant `RENAME` table de la famille MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un défaut de segmentation en fonction `arrayIntersect`. Une erreur de Segmentation pourrait se produire si la fonction était appelée avec des arguments constants et ordinaires mixtes. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Correction de la lecture de `Array(LowCardinality)` colonne dans de rares cas où la colonne contenait une longue séquence de tableaux vides. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un crash dans l' `FULL/RIGHT JOIN` lorsque nous rejoignons sur nullable vs non nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixer `No message received` exception lors de la récupération de pièces entre les répliques. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alésapine](https://github.com/alesapin)) +- Fixe `arrayIntersect` fonction mauvais résultat en cas de plusieurs valeurs répétées dans un seul tableau. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'une condition de course pendant `ALTER COLUMN` requêtes qui pourraient conduire à un plantage du serveur (corrige le problème [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'un résultat incorrect dans `FULL/RIGHT JOIN` avec colonne const. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction des doublons dans `GLOBAL JOIN` avec asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- Paramètre correction de la déduction `ALTER MODIFY` de la colonne `CODEC` lorsque le type de colonne n'est pas spécifié. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alésapine](https://github.com/alesapin)) +- Fonction `cutQueryStringAndFragment()` et `queryStringAndFragment()` fonctionne maintenant correctement quand `URL` contient un fragment et aucune requête. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction d'un bug rare lors du réglage `min_bytes_to_use_direct_io` est supérieur à zéro, ce qui se produit lorsque le thread doit chercher en arrière dans le fichier de colonne. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alésapine](https://github.com/alesapin)) +- Correction de mauvais types d'arguments pour les fonctions d'agrégation avec `LowCardinality` arguments (corrige le problème [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'une qualification de nom incorrecte Dans `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- Fonction Fix `toISOWeek` résultat pour l'année 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer `DROP`, `TRUNCATE` et `OPTIMIZE` requêtes duplication, lorsqu'elles sont exécutées sur `ON CLUSTER` pour `ReplicatedMergeTree*` les tables de la famille. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alésapine](https://github.com/alesapin)) + +#### Modification Incompatible En Arrière {#backward-incompatible-change-8} + +- Renommer le paramètre `insert_sample_with_metadata` définir `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- Ajout d'un réglage `max_partitions_per_insert_block` (avec la valeur 100 par défaut). Si le bloc inséré contient un plus grand nombre de partitions, une exception est levée. Mettre à 0 si vous souhaitez supprimer la limite (pas recommandé). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Les fonctions multi-recherche ont été renommées (`multiPosition` de `multiSearchAllPositions`, `multiSearch` de `multiSearchAny`, `firstMatch` de `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### Amélioration Des Performances {#performance-improvement-6} + +- Optimisez volnitsky searcher en inlining, donnant environ 5-10% d'amélioration de la recherche pour les requêtes avec de nombreuses aiguilles ou de nombreux bigrams similaires. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- Correction d'un problème de performance lors du réglage `use_uncompressed_cache` est supérieur à zéro, qui est apparu lorsque toutes les données lues contenues dans le cache. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alésapine](https://github.com/alesapin)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-10} + +- Durcissement debug build: mappages de mémoire plus granulaires et ASLR; ajouter une protection de mémoire pour le cache de marque et l'index. Cela permet de trouver plus de bugs de piétinement de mémoire dans le cas où ASan et MSan ne peuvent pas le faire. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout du support pour les variables cmake `ENABLE_PROTOBUF`, `ENABLE_PARQUET` et `ENABLE_BROTLI` ce qui permet d'activer / désactiver les fonctionnalités ci-dessus (comme nous pouvons le faire pour librdkafka, mysql, etc). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- Ajouter la possibilité d'Imprimer la liste des processus et stacktraces de tous les threads si certaines requêtes sont suspendues après l'exécution du test. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alésapine](https://github.com/alesapin)) +- Ajouter des tentatives sur `Connection loss` erreur dans `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alésapine](https://github.com/alesapin)) +- Ajouter freebsd build avec vagrant et construire avec thread sanitizer au script packager. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alésapine](https://github.com/alesapin)) +- Maintenant l'Utilisateur a demandé un mot de passe pour l'utilisateur `'default'` lors de l'installation. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- Supprimer l'avertissement dans `rdkafka` bibliothèque. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Permettre la capacité de construire sans ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- Ajouter un moyen de lancer l'image clickhouse-server à partir d'un utilisateur personnalisé. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Mise à niveau contrib boost à 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- Désactiver l'utilisation de `mremap` lorsqu'il est compilé avec thread Sanitizer. Étonnamment, TSan n'intercepte pas `mremap` (bien qu'il ne intercepter `mmap`, `munmap`) qui conduit à des faux positifs. Rapport Tsan fixe dans les tests avec État. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter la vérification de test en utilisant le schéma de format via L'interface HTTP. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) + +## Clickhouse version 19.4 {#clickhouse-release-19-4} + +### Clickhouse version 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### Corrections De Bugs {#bug-fixes-7} + +- Éviter `std::terminate` en cas d'échec d'allocation de mémoire. Maintenant `std::bad_alloc` exception est levée comme prévu. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corrige la lecture de capnproto à partir du tampon. Parfois, les fichiers N'ont pas été chargés avec succès par HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) +- Correction d'erreur `Unknown log entry type: 0` après `OPTIMIZE TABLE FINAL` requête. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Oiseau](https://github.com/amosbird)) +- Mauvais arguments `hasAny` ou `hasAll` fonctions peut conduire à l'erreur de segmentation. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Blocage peut se produire lors de l'exécution `DROP DATABASE dictionary` requête. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corriger un comportement indéfini dans `median` et `quantile` fonction. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Correction de la détection du niveau de compression lorsque `network_compression_method` en minuscules. Cassé dans v19. 1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Correction de l'ignorance de `UTC` paramètre (corrige le problème [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Fixer `histogram` comportement de la fonction avec `Distributed` table. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Rapport Tsan fixe `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Rapport Tsan fixe sur l'arrêt en raison de la condition de concurrence dans l'utilisation des journaux système. Correction de l'utilisation potentielle-après-libre à l'arrêt lorsque part\_log est activé. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer revérifier les pièces dans `ReplicatedMergeTreeAlterThread` en cas d'erreur. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Les opérations arithmétiques sur les états de fonction d'agrégat intermédiaire ne fonctionnaient pas pour les arguments constants (tels que les résultats de sous-requête). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Toujours renvoyer les noms de colonne dans les métadonnées. Sinon il est impossible de créer une table avec une colonne nommée `index` (le serveur ne redémarre pas en raison de malformé `ATTACH` requête dans les métadonnées). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un crash dans l' `ALTER ... MODIFY ORDER BY` sur `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Fixer erreur de segmentation dans `JOIN ON` avec l'option `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction d'un bug avec l'ajout d'une ligne étrangère après avoir consommé un message protobuf de Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction d'un défaut de segmentation dans `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Condition de course fixe dans `SELECT` de `system.tables` si la table est renommée ou modifiée simultanément. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la course de données lors de la récupération d'une partie de données déjà obsolète. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la course de données rares qui peut se produire pendant `RENAME` table de la famille MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un défaut de segmentation en fonction `arrayIntersect`. Une erreur de Segmentation pourrait se produire si la fonction était appelée avec des arguments constants et ordinaires mixtes. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- Correction de la lecture de `Array(LowCardinality)` colonne dans de rares cas où la colonne contenait une longue séquence de tableaux vides. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fixer `No message received` exception lors de la récupération de pièces entre les répliques. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alésapine](https://github.com/alesapin)) +- Fixe `arrayIntersect` fonction mauvais résultat en cas de plusieurs valeurs répétées dans un seul tableau. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'une condition de course pendant `ALTER COLUMN` requêtes qui pourraient conduire à un plantage du serveur (corrige le problème [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- Paramètre correction de la déduction `ALTER MODIFY` de la colonne `CODEC` lorsque le type de colonne n'est pas spécifié. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alésapine](https://github.com/alesapin)) +- Fonction `cutQueryStringAndFragment()` et `queryStringAndFragment()` fonctionne maintenant correctement quand `URL` contient un fragment et aucune requête. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- Correction d'un bug rare lors du réglage `min_bytes_to_use_direct_io` est supérieur à zéro, ce qui se produit lorsque le thread doit chercher en arrière dans le fichier de colonne. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alésapine](https://github.com/alesapin)) +- Correction de mauvais types d'arguments pour les fonctions d'agrégation avec `LowCardinality` arguments (corrige le problème [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Fonction Fix `toISOWeek` résultat pour l'année 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixer `DROP`, `TRUNCATE` et `OPTIMIZE` requêtes duplication, lorsqu'elles sont exécutées sur `ON CLUSTER` pour `ReplicatedMergeTree*` les tables de la famille. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alésapine](https://github.com/alesapin)) + +#### Amélioration {#improvements-2} + +- Garder ordinaire, `DEFAULT`, `MATERIALIZED` et `ALIAS` colonnes dans une seule liste (corrige le problème [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### Clickhouse version 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### Corrections De Bugs {#bug-fixes-8} + +- Correction d'un crash dans l' `FULL/RIGHT JOIN` lorsque nous rejoignons sur nullable vs non nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un défaut de segmentation dans `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-11} + +- Ajouter un moyen de lancer l'image clickhouse-server à partir d'un utilisateur personnalisé. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### Clickhouse version 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### Corrections De Bugs {#bug-fixes-9} + +- Correction de la lecture de `Array(LowCardinality)` colonne dans de rares cas où la colonne contenait une longue séquence de tableaux vides. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### Clickhouse version 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### Corrections De Bugs {#bug-fixes-10} + +- Correction des requêtes distantes qui contiennent les deux `LIMIT BY` et `LIMIT`. Auparavant, si `LIMIT BY` et `LIMIT` ont été utilisés pour la requête distante, `LIMIT` ça pourrait arriver avant `LIMIT BY`, ce qui a conduit à un résultat trop filtré. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) + +### Clickhouse version 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### Nouveauté {#new-features-5} + +- Ajout d'un support complet pour `Protobuf` format (entrée et sortie, structures de données imbriquées). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ajout de fonctions bitmap avec des bitmaps rugissants. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) +- Support du format Parquet. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- La distance de n-gramme a été ajoutée pour la comparaison de chaîne floue. Il est similaire aux métriques Q-gram en langage R. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- Combinez des règles pour le cumul de graphite à partir de modèles d'agrégation et de rétention dédiés. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Ajouter `max_execution_speed` et `max_execution_speed_bytes` pour limiter l'utilisation des ressources. Ajouter `min_execution_speed_bytes` réglage pour compléter le `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Mise en œuvre de la fonction `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexeï-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- L'ajout de fonctions `arrayEnumerateDenseRanked` et `arrayEnumerateUniqRanked` (c'est comme `arrayEnumerateUniq` mais permet d'affiner la profondeur du tableau pour regarder à l'intérieur des tableaux multidimensionnels). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Corrections De Bugs {#bug-fixes-11} + +- Cette version contient également toutes les corrections de bugs de 19.3 et 19.1. +- Correction d'un bug dans les indices de saut de données: l'ordre des granules après L'insertion était incorrect. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) +- Fixe `set` indice pour `Nullable` et `LowCardinality` colonne. Avant d', `set` index avec `Nullable` ou `LowCardinality` colonne a conduit à l'erreur `Data type must be deserialized with multiple streams` lors de la sélection. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Définir correctement update\_time sur full `executable` dictionnaire de mise à jour. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- Correction de la barre de progression cassée dans 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) +- Correction de valeurs incohérentes de MemoryTracker lorsque la région de mémoire a été réduite, dans certains cas. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un comportement indéfini dans ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un crash très rare avec le message `mutex lock failed: Invalid argument` cela peut se produire lorsqu'une table MergeTree a été supprimée simultanément avec un SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- Compatibilité du pilote ODBC avec `LowCardinality` type de données. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD: correction pour `AIOcontextPool: Found io_event with unknown id 0` erreur. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` table a été créée, indépendamment de la configuration. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du comportement indéfini dans `dictIsIn` fonction pour les dictionnaires de cache. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alésapine](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- Désactiver compile\_expressions par défaut jusqu'à ce que nous obtenions propre `llvm` contrib et peut le tester avec `clang` et `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alésapine](https://github.com/alesapin)) +- Empêcher `std::terminate` lorsque `invalidate_query` pour `clickhouse` externe dictionnaire source a retourné mauvais jeu de résultats (vide ou plusieurs lignes ou plusieurs colonnes). Correction d'un problème lors de l' `invalidate_query` a été effectuée toutes les cinq secondes quel que soit le `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Éviter l'impasse lorsque le `invalidate_query` pour un dictionnaire avec `clickhouse` source était impliquant `system.dictionaries` table ou `Dictionaries` base de données (cas rare). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corrections pour CROSS JOIN avec vide où. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixe erreur de segmentation en fonction “replicate” lorsque l'argument constant est passé. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de la fonction lambda avec l'optimiseur de prédicats. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Plusieurs jointures plusieurs correctifs. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Amélioration {#improvements-3} + +- Prend en charge les alias dans la section JOIN ON pour les colonnes de la table droite. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- Le résultat de plusieurs jointures nécessite des noms de résultats corrects à utiliser dans les sous-sélections. Remplacez les alias plats par des noms de source dans le résultat. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- Améliorer la logique push-down pour les instructions jointes. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) + +#### Amélioration Des Performances {#performance-improvements-3} + +- Heuristiques améliorées de “move to PREWHERE” optimisation. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Utilisez des tables de recherche appropriées qui utilisent L'API de HashTable pour les clés 8 bits et 16 bits. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Oiseau](https://github.com/amosbird)) +- Amélioration des performances de la comparaison de chaînes. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Nettoyage de la file D'attente DDL distribuée dans un thread séparé afin de ne pas ralentir la boucle principale qui traite les tâches DDL distribuées. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- Lorsque `min_bytes_to_use_direct_io` est défini sur 1, Tous les fichiers n'ont pas été ouverts avec le mode O\_DIRECT car la taille des données à lire était parfois sous-estimée par la taille d'un bloc compressé. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-12} + +- Ajout du support pour clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corrigé de mal `__asm__` instructions (encore une fois) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- Ajouter la possibilité de spécifier les paramètres pour `clickhouse-performance-test` à partir de la ligne de commande. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alésapine](https://github.com/alesapin)) +- Ajouter des tests de dictionnaires aux tests d'intégration. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alésapine](https://github.com/alesapin)) +- Ajout de requêtes du benchmark sur le site web à des tests de performance automatisés. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `xxhash.h` n'existe pas dans LZ4 externe car il s'agit d'un détail d'implémentation et ses symboles sont `XXH_NAMESPACE` macro. Lorsque lz4 est externe, xxHash doit aussi être externe, et les personnes à charge doivent y accéder. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) +- Correction d'un cas lorsque `quantileTiming` la fonction d'agrégation peut être appelée avec un argument négatif ou à virgule flottante (cela corrige le test fuzz avec un désinfectant de comportement indéfini). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'erreur d'orthographe. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- Correction de la compilation sur Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) +- Corrections de construction Pour FreeBSD et diverses configurations de construction inhabituelles. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## Clickhouse version 19.3 {#clickhouse-release-19-3} + +### Clickhouse version 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### Corrections De Bugs {#bug-fixes-12} + +- Correction d'un crash dans l' `FULL/RIGHT JOIN` lorsque nous rejoignons sur nullable vs non nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un défaut de segmentation dans `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Correction de la lecture de `Array(LowCardinality)` colonne dans de rares cas où la colonne contenait une longue séquence de tableaux vides. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Construction / Test / Amélioration De L'Emballage {#buildtestingpackaging-improvement-13} + +- Ajouter un moyen de lancer l'image clickhouse-server à partir d'un utilisateur personnalisé [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### Clickhouse version 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### Corrections de bugs {#bug-fixes-13} + +- Correction d'une erreur dans \#3920. Cette erreur se manifeste par une corruption aléatoire du cache (messages `Unknown codec family code`, `Cannot seek through file`) et de segmentation. Cette anomalie est apparue dans la version 19.1 et est présente dans les versions jusqu'à 19.1.10 et 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### Corrections de bugs {#bug-fixes-14} + +- Quand il y a plus de 1000 threads dans un pool de threads, `std::terminate` peut se produire à la sortie thread. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Maintenant, il est possible de créer `ReplicatedMergeTree*` tables avec commentaires sur les colonnes sans valeurs par défaut et tables avec colonnes codecs Sans commentaires et valeurs par défaut. Corrigez également la comparaison des codecs. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alésapine](https://github.com/alesapin)) +- Correction d'un crash sur la jointure avec un tableau ou un tuple. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un crash dans clickhouse-copieur avec le message `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction du blocage à l'arrêt du serveur si des DDL distribués étaient utilisés. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- Des numéros de colonne incorrects ont été imprimés dans un message d'erreur concernant l'analyse de format de texte pour les colonnes dont le nombre est supérieur à 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-3} + +- Correction de la construction avec AVX activé. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Activer la comptabilité étendue et la comptabilité D'E / S basée sur une version bien connue au lieu du noyau sous lequel elle est compilée. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) +- Permet d'ignorer le paramètre core\_dump.size\_limit, avertissement au lieu de lancer si la limite définie échoue. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- Enlevé le `inline` les balises de `void readBinary(...)` dans `Field.cpp`. Également fusionné redondant `namespace DB` bloc. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### Clickhouse version 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### Corrections de bugs {#bug-fixes-15} + +- Correction d'un bug avec de grandes requêtes d'insertion http traitement. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alésapine](https://github.com/alesapin)) +- Correction de l'incompatibilité avec les anciennes versions en raison d'une mauvaise implémentation de `send_logs_level` paramètre. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Incompatibilité arrière fixe de la fonction de table `remote` introduit avec des commentaires de colonne. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### Amélioration {#improvements-4} + +- La taille de l'index de la Table n'est pas prise en compte des limites de mémoire lors de `ATTACH TABLE` requête. Éviter la possibilité qu'un tableau ne peut pas être attachée après avoir été détaché. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Légèrement augmenté la limite sur la chaîne max et la taille du tableau reçu de ZooKeeper. Il permet de continuer à travailler avec une taille accrue de `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` sur la Gardienne. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Permettre de réparer la réplique abandonnée même si elle a déjà un grand nombre de nœuds dans sa file d'attente. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajouter un argument requis à `SET` index (nombre maximum de lignes stockées). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### Corrections De Bugs {#bug-fixes-16} + +- Fixe `WITH ROLLUP` résultat pour le groupe par un seul `LowCardinality` clé. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Correction d'un bug dans l'index set (laissant tomber un granule s'il contient plus de `max_rows` rangée). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) +- Beaucoup de corrections de construction FreeBSD. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- Correction de la substitution des alias dans les requêtes avec une sous-requête contenant le même alias (problème [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-4} + +- Ajouter la possibilité d'exécuter `clickhouse-server` pour les tests sans état dans l'image docker. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) + +### Clickhouse version 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### Nouveauté {#new-features-6} + +- Ajouté le `KILL MUTATION` instruction qui permet de supprimer les mutations qui sont pour certaines raisons coincé. Ajouter `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` champs à la `system.mutations` tableau pour faciliter le dépannage. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- Fonction agrégée ajoutée `entropy` qui calcule l'entropie de Shannon. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- Ajout de la possibilité d'envoyer des requêtes `INSERT INTO tbl VALUES (....` au serveur sans fractionnement sur `query` et `data` partie. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alésapine](https://github.com/alesapin)) +- Mise en œuvre générique de `arrayWithConstant` la fonction a été ajoutée. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Mettre `NOT BETWEEN` opérateur de comparaison. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) +- Mettre `sumMapFiltered` afin de pouvoir limiter le nombre de clés pour lesquelles les valeurs seront additionnées par `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Ajout du support de `Nullable` types de `mysql` table de fonction. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- Prise en charge des expressions constantes arbitraires dans `LIMIT` clause. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- Ajouter `topKWeighted` fonction d'agrégation qui prend un argument supplémentaire avec un poids (entier non signé). [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) +- `StorageJoin` maintenant prend en charge `join_any_take_last_row` paramètre qui permet d'écraser les valeurs existantes de la même clé. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Oiseau](https://github.com/amosbird) +- Ajout de la fonction `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ajouter `RowBinaryWithNamesAndTypes` format. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) +- Ajouter `IPv4` et `IPv6` types de données. Implémentations plus efficaces de `IPv*` fonction. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) +- Ajout de la fonction `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ajouter `Protobuf` le format de sortie. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) +- Ajout du support brotli pour L'interface HTTP pour l'importation de données (INSERTs). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) +- Ajout d'astuces pendant que l'utilisateur fait une faute de frappe dans le nom de la fonction ou tapez dans le client de ligne de commande. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- Ajouter `Query-Id` de du Serveur HTTP en-tête de Réponse. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) + +#### Caractéristiques expérimentales {#experimental-features-2} + +- Ajouter `minmax` et `set` index de saut de données pour la famille de moteurs de table MergeTree. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- Conversion ajoutée de `CROSS JOIN` de `INNER JOIN` si cela est possible. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Corrections De Bugs {#bug-fixes-17} + +- Fixe `Not found column` pour les colonnes en double dans `JOIN ON` section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Faire `START REPLICATED SENDS` commande démarrer les envois répliqués. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Fixe l'exécution des fonctions d'agrégat avec `Array(LowCardinality)` argument. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixe mauvais comportement lors `INSERT ... SELECT ... FROM file(...)` requête et fichier a `CSVWithNames` ou `TSVWIthNames` le format et la première ligne de données est manquant. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un crash sur dictionnaire recharger si le dictionnaire n'est pas disponible. Ce bug est apparu dans 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Fixe `ALL JOIN` avec des doublons dans la table de droite. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un défaut de segmentation avec `use_uncompressed_cache=1` et exception avec une mauvaise taille non compressée. Ce bug est apparu dans 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alésapine](https://github.com/alesapin)) +- Fixe `compile_expressions` bug avec comparaison de grandes dates (plus de int16). [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alésapine](https://github.com/alesapin)) +- Boucle infinie fixe lors de la sélection de la fonction de table `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Désactiver temporairement l'optimisation des prédicats pour `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Fixe `Illegal instruction` erreur lors de l'utilisation des fonctions base64 sur les anciens processeurs. Cette erreur n'a été reproduite que lorsque ClickHouse a été compilé avec gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe `No message received` erreur lors de l'interaction avec le pilote ODBC PostgreSQL via une connexion TLS. Corrige également segfault lors de l'utilisation du pilote MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un résultat incorrect lorsque `Date` et `DateTime` les arguments sont utilisés dans les branches de l'opérateur conditionnel (fonction `if`). Ajouté cas générique pour la fonction `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Les dictionnaires ClickHouse se chargent maintenant dans `clickhouse` processus. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe blocage lorsqu' `SELECT` à partir d'une table avec `File` moteur a été rejugé après `No such file or directory` erreur. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Condition de course fixe lors de la sélection de `system.tables` peut donner `table doesn't exist` erreur. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-client` peut segfault à la sortie lors du chargement des données pour les suggestions de ligne de commande si elle a été exécutée en mode interactif. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug lors de l'exécution de mutations contenant `IN` les opérateurs produisaient des résultats incorrects. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'une erreur: si une base de données avec `Dictionary` moteur, tous les dictionnaires forcés de charger au démarrage du serveur, et s'il y a un dictionnaire avec la source de ClickHouse de localhost, le dictionnaire ne peut pas charger. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur lorsque les journaux système sont tentés de créer à nouveau à l'arrêt du serveur. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Renvoyer correctement le bon type et gérer correctement les verrous `joinGet` fonction. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Oiseau](https://github.com/amosbird)) +- Ajouter `sumMapWithOverflow` fonction. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Fixe erreur de segmentation avec `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- Correction d'un bug avec incorrect `Date` et `DateTime` comparaison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Correction d'un test de fuzz sous un désinfectant de comportement indéfini: ajout d'une vérification de type de paramètre pour `quantile*Weighted` la famille de fonctions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une condition de course rare lors de la suppression d'anciennes pièces de données peuvent échouer avec `File not found` erreur. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction du paquet d'installation avec /etc/clickhouse-server/config manquant.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-5} + +- Paquet Debian: correct/etc/clickhouse-server / lien prétraité selon config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Divers correctifs de construction Pour FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- Ajout de la possibilité de créer, remplir et déposer des tables dans perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alésapine](https://github.com/alesapin)) +- Ajout d'un script pour vérifier les doublons comprend. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de la possibilité d'exécuter des requêtes par index dans le test de performance. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alésapine](https://github.com/alesapin)) +- Paquet avec des symboles de débogage est suggéré d'être installé. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Refactoring de performance-test. Meilleure journalisation et gestion des signaux. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alésapine](https://github.com/alesapin)) +- Ajout de documents à Yandex anonymisé.Jeux de données Metrika. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alésapine](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- Ajout de documents sur deux ensembles de données dans s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alésapine](https://github.com/alesapin)) +- Ajout d'un script qui crée le journal des modifications à partir de la description des requêtes d'extraction. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Ajout du module puppet pour Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- Ajout de documents pour un groupe de fonctions non documentées. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([L'Hiver Zhang](https://github.com/zhang2014)) +- ARM construire des correctifs. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- Tests de dictionnaire maintenant capables de s'exécuter à partir de `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- Maintenant `/etc/ssl` est utilisé comme répertoire par défaut avec les certificats SSL. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de la vérification SSE et AVX instruction au début. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- Le script d'initialisation attendra le serveur jusqu'au démarrage. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### Modifications Incompatibles En Arrière {#backward-incompatible-changes-1} + +- Retiré `allow_experimental_low_cardinality_type` paramètre. `LowCardinality` les types de données sont prêts pour la production. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Réduisez la taille du cache mark et la taille du cache non compressé en conséquence à la quantité de mémoire disponible. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) +- Ajouté le mot `INDEX` dans `CREATE TABLE` requête. Une colonne avec le nom `index` doivent être indiqués avec backticks ou des guillemets: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- `sumMap` désormais promouvoir le type de résultat au lieu de débordement. Vieux `sumMap` le comportement peut être obtenu en utilisant `sumMapWithOverflow` fonction. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### Amélioration Des Performances {#performance-improvements-4} + +- `std::sort` remplacé par `pdqsort` pour les requêtes sans `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- Maintenant, le serveur réutilise les threads du pool de threads global. Cela affecte les performances dans certains cas particuliers. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Amélioration {#improvements-5} + +- Implémentation du support AIO Pour FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` maintenant de retour `a` et `b` colonnes uniquement à partir de la table de gauche. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- Permettre `-C` option du client pour travailler comme `-c` option. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- Option `--password` utilisé sans valeur nécessite un mot de passe de stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- Ajout de la mise en évidence des métacaractères Non échappés dans les littéraux de chaîne qui contiennent `LIKE` expressions ou expressions rationnelles. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de l'annulation des requêtes HTTP en lecture seule si le socket client disparaît. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) +- Maintenant, le serveur signale la progression pour maintenir les connexions client en vie. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) +- Message légèrement meilleur avec raison pour optimiser la requête avec `optimize_throw_if_noop` paramètre est activé. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout du support de `--version` option pour le serveur clickhouse. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) +- Ajouter `--help/-h` option pour `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) +- Ajout du support pour les sous-requêtes scalaires avec le résultat de l'état de la fonction d'agrégation. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Amélioration du temps d'arrêt du serveur et modifie le temps d'attente. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout d'informations sur le paramètre replicated\_can\_become\_leader au système.réplicas et ajouter la journalisation si la réplique ne sera pas essayer de devenir leader. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## Clickhouse Version 19.1 {#clickhouse-release-19-1} + +### Clickhouse version 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- Correction d'une erreur `Column ... queried more than once` cela peut arriver si le réglage `asterisk_left_columns_only` est réglé sur 1 en cas d'utilisation `GLOBAL JOIN` avec `SELECT *` (cas rare). Le problème n'existe pas dans 19.3 et plus récent. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### Clickhouse version 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +Cette version contient exactement le même ensemble de patchs 19.3.7. + +### Clickhouse version 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +Cette version contient exactement le même ensemble de patchs 19.3.6. + +## Clickhouse Version 19.1 {#clickhouse-release-19-1-1} + +### Clickhouse version 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### Corrections de bugs {#bug-fixes-18} + +- Correction de l'incompatibilité avec les anciennes versions en raison d'une mauvaise implémentation de `send_logs_level` paramètre. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Incompatibilité arrière fixe de la fonction de table `remote` introduit avec des commentaires de colonne. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### Corrections De Bugs {#bug-fixes-19} + +- Correction du paquet d'installation avec /etc/clickhouse-server/config manquant.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## Clickhouse Version 19.1 {#clickhouse-release-19-1-2} + +### Clickhouse version 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### Corrections De Bugs {#bug-fixes-20} + +- Renvoyer correctement le bon type et gérer correctement les verrous `joinGet` fonction. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Oiseau](https://github.com/amosbird)) +- Correction d'une erreur lorsque les journaux système sont tentés de créer à nouveau à l'arrêt du serveur. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur: si une base de données avec `Dictionary` moteur, tous les dictionnaires forcés de charger au démarrage du serveur, et s'il y a un dictionnaire avec la source de ClickHouse de localhost, le dictionnaire ne peut pas charger. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug lors de l'exécution de mutations contenant `IN` les opérateurs produisaient des résultats incorrects. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` peut segfault à la sortie lors du chargement des données pour les suggestions de ligne de commande si elle a été exécutée en mode interactif. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Condition de course fixe lors de la sélection de `system.tables` peut donner `table doesn't exist` erreur. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe blocage lorsqu' `SELECT` à partir d'une table avec `File` moteur a été rejugé après `No such file or directory` erreur. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un problème: les dictionnaires ClickHouse locaux sont chargés via TCP, mais devraient être chargés dans le processus. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe `No message received` erreur lors de l'interaction avec le pilote ODBC PostgreSQL via une connexion TLS. Corrige également segfault lors de l'utilisation du pilote MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Désactiver temporairement l'optimisation des prédicats pour `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Boucle infinie fixe lors de la sélection de la fonction de table `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe `compile_expressions` bug avec comparaison de grandes dates (plus de int16). [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alésapine](https://github.com/alesapin)) +- Correction d'un défaut de segmentation avec `uncompressed_cache=1` et exception avec une mauvaise taille non compressée. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alésapine](https://github.com/alesapin)) +- Fixe `ALL JOIN` avec des doublons dans la table de droite. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- Fixe mauvais comportement lors `INSERT ... SELECT ... FROM file(...)` requête et fichier a `CSVWithNames` ou `TSVWIthNames` le format et la première ligne de données est manquant. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe l'exécution des fonctions d'agrégat avec `Array(LowCardinality)` argument. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Paquet Debian: correct/etc/clickhouse-server / lien prétraité selon config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Correction d'un test de fuzz sous un désinfectant de comportement indéfini: ajout d'une vérification de type de paramètre pour `quantile*Weighted` la famille de fonctions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Faire `START REPLICATED SENDS` commande démarrer les envois répliqués. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Fixe `Not found column` pour les colonnes en double dans JOIN ON section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- Maintenant `/etc/ssl` est utilisé comme répertoire par défaut avec les certificats SSL. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un crash sur dictionnaire recharger si le dictionnaire n'est pas disponible. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Correction d'un bug avec incorrect `Date` et `DateTime` comparaison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Correction d'un résultat incorrect lorsque `Date` et `DateTime` les arguments sont utilisés dans les branches de l'opérateur conditionnel (fonction `if`). Ajouté cas générique pour la fonction `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +### Clickhouse version 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### Nouveauté {#new-features-7} + +- Codecs de compression personnalisés par colonne pour les tables. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alésapine](https://github.com/alesapin), [L'Hiver Zhang](https://github.com/zhang2014), [Anatoli](https://github.com/Sindbag)) +- Ajout du codec de compression `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alésapine](https://github.com/alesapin)) +- Permettre à `ALTER` codecs de compression. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alésapine](https://github.com/alesapin)) +- L'ajout de fonctions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` pour la compatibilité standard SQL. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) +- Soutien pour écrire dans `HDFS` des tables et des `hdfs` table de fonction. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alésapine](https://github.com/alesapin)) +- L'ajout de fonctions pour rechercher plusieurs chaines de grosse botte de foin: `multiPosition`, `multiSearch` ,`firstMatch` aussi avec `-UTF8`, `-CaseInsensitive`, et `-CaseInsensitiveUTF8` variantes. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- Taille des éclats inutilisés si `SELECT` filtres de requête par clé de sharding (réglage `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) +- Permettre `Kafka` moteur à ignorer certains nombre d'erreurs d'analyse par bloc. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) +- Ajout du support pour `CatBoost` évaluation des modèles multiclass. Fonction `modelEvaluate` retourne tuple avec des prédictions brutes par classe pour les modèles multiclasse. `libcatboostmodel.so` devrait être construit avec [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- L'ajout de fonctions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- Ajouté les fonctions de hachage `xxHash64` et `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) +- Ajouter `gccMurmurHash` fonction de hachage (hachage Murmur aromatisé GCC) qui utilise la même graine de hachage que [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- Ajouté les fonctions de hachage `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) +- Ajout d'un tableau de fonction `remoteSecure`. Fonction fonctionne comme `remote`, mais il utilise une connexion sécurisée. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### Caractéristiques expérimentales {#experimental-features-3} + +- Ajout de plusieurs jointures émulation (`allow_experimental_multiple_joins_emulation` paramètre). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### Corrections De Bugs {#bug-fixes-21} + +- Faire `compiled_expression_cache_size` réglage limité par défaut pour réduire la consommation de mémoire. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alésapine](https://github.com/alesapin)) +- Correction d'un bug qui a conduit à des interruptions dans les threads qui effectuent des modifications de tables répliquées et dans le thread qui met à jour la configuration de ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'une condition de concurrence lors de l'exécution d'une tâche alter distribuée. La condition de concurrence a conduit à plus d'une réplique essayant d'exécuter la tâche et toutes les répliques sauf une échouant avec une erreur ZooKeeper. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'un bug lors de l' `from_zk` les éléments de configuration n'ont pas été actualisés après l'expiration d'une requête à ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'un bug avec un mauvais préfixe pour les masques de sous-réseau IPv4. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alésapine](https://github.com/alesapin)) +- Correction d'un crash (`std::terminate`) dans de rares cas où un nouveau thread ne peut pas être créé en raison de ressources épuisées. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug quand dans `remote` exécution de la fonction de table lorsque des restrictions incorrectes ont été utilisées pour in `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alésapine](https://github.com/alesapin)) +- Correction d'une fuite de sockets netlink. Ils ont été placés dans un pool où ils n'ont jamais été supprimés et de nouvelles sockets ont été créées au début d'un nouveau thread lorsque toutes les sockets actuelles étaient en cours d'utilisation. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- Correction d'un bug avec fermeture `/proc/self/fd` répertoire plus tôt que tous les fds ont été lus à partir de `/proc` après la bifurcation `odbc-bridge` sous-processus. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alésapine](https://github.com/alesapin)) +- Correction de la chaîne à la conversion monotone UInt en cas d'utilisation de la chaîne dans la clé primaire. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction d'une erreur dans le calcul de la monotonie de la fonction de conversion entière. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Fixe erreur de segmentation dans `arrayEnumerateUniq`, `arrayEnumerateDense` fonctions en cas d'arguments non valides. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Corriger UB dans StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Oiseau](https://github.com/amosbird)) +- Correction de segfault dans les fonctions `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur: fonctions `round`, `floor`, `trunc`, `ceil` peut renvoyer un résultat faux lorsqu'il est exécuté sur un argument entier et une grande échelle négative. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un bug induit par ‘kill query sync’ ce qui conduit à une décharge de base. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- Correction d'un bug avec un long délai après la file d'attente de réplication vide. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alésapine](https://github.com/alesapin)) +- Correction d'une utilisation excessive de la mémoire en cas d'insertion dans la table avec `LowCardinality` clé primaire. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Fixe `LowCardinality` la sérialisation de `Native` format en cas de tableaux vides. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Correction d'un résultat incorrect lors de l'utilisation de la colonne numérique distinct by single LowCardinality. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Agrégation spécialisée fixe avec la clé LowCardinality (dans le cas où `compile` paramètre est activé). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Correction du transfert d'utilisateur et de mot de passe pour les requêtes de tables répliquées. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alésapine](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- Correction d'une condition de course très rare qui peut se produire lors de la liste des tables dans la base de données du dictionnaire lors du rechargement des dictionnaires. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'un résultat incorrect lors de L'utilisation avec ROLLUP ou CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) +- Alias de colonne fixe pour la requête avec `JOIN ON` syntaxe et tables distribuées. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Correction d'une erreur dans la mise en œuvre interne de `quantileTDigest` (trouvé par Artem Vakhrushev). Cette erreur ne se produit jamais dans ClickHouse et n'était pertinente que pour ceux qui utilisent directement clickhouse codebase comme bibliothèque. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Amélioration {#improvements-6} + +- Soutien pour `IF NOT EXISTS` dans `ALTER TABLE ADD COLUMN` les déclarations avec `IF EXISTS` dans `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- Fonction `parseDateTimeBestEffort`: prise en charge de formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` et similaires. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` maintenant soutenir les structures déchiquetées. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- Amélioration de la facilité d'utilisation: ajout d'une vérification que le processus du serveur est démarré à partir du propriétaire du répertoire de données. Ne pas autoriser le démarrage du serveur à partir de la racine si les données appartiennent à un utilisateur non root. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-V-galtsev](https://github.com/sergey-v-galtsev)) +- Meilleure logique de vérification des colonnes requises lors de l'analyse des requêtes avec des jointures. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- Diminution du nombre de connexions en cas de grand nombre de tables distribuées dans un seul serveur. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([L'Hiver Zhang](https://github.com/zhang2014)) +- Appuyé ligne de totaux pour `WITH TOTALS` requête pour le pilote ODBC. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- Autorisé à utiliser `Enum`s comme entiers à l'intérieur de la fonction if. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) +- Ajouter `low_cardinality_allow_in_native_format` paramètre. Si désactivé, ne pas utiliser `LowCadrinality` type de `Native` format. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Suppression de certains objets redondants du cache des expressions compilées pour réduire l'utilisation de la mémoire. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alésapine](https://github.com/alesapin)) +- Ajouter vérifier que `SET send_logs_level = 'value'` requête accepter la valeur appropriée. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) +- Vérification de type de données fixe dans les fonctions de conversion de type. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([L'Hiver Zhang](https://github.com/zhang2014)) + +#### Amélioration Des Performances {#performance-improvements-5} + +- Ajouter un paramètre MergeTree `use_minimalistic_part_header_in_zookeeper`. Si cette option est activée, les tables répliquées stockent les métadonnées de partie compacte dans un znode de partie unique. Cela peut réduire considérablement la taille de l'instantané ZooKeeper (surtout si les tables ont beaucoup de colonnes). Notez qu'après avoir activé ce paramètre, vous ne pourrez pas passer à une version qui ne le supporte pas. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- Ajouter une implémentation basée sur DFA pour les fonctions `sequenceMatch` et `sequenceCount` en cas de motif ne contient pas de temps. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Amélioration des performances pour la sérialisation des nombres entiers. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Oiseau](https://github.com/amosbird)) +- Zéro gauche padding PODArray de sorte que -1 élément est toujours valide et mis à zéro. Il est utilisé pour le calcul sans branche des décalages. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Oiseau](https://github.com/amosbird)) +- Revenir `jemalloc` version qui conduisent à la dégradation des performances. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexeï-milovidov](https://github.com/alexey-milovidov)) + +#### Modifications Incompatibles En Arrière {#backward-incompatible-changes-2} + +- Fonctionnalité non documentée supprimée `ALTER MODIFY PRIMARY KEY` parce qu'il a été remplacé par le `ALTER MODIFY ORDER BY` commande. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- Retiré de la fonction `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Interdire l'utilisation de sous-requêtes scalaires avec le résultat de type `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) + +#### Construire/Test/Emballage Améliorations {#buildtestingpackaging-improvements-6} + +- Ajout du support pour PowerPC (`ppc64le`) construire. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- Les tests fonctionnels avec État sont exécutés sur un ensemble de données Public disponible. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction d'une erreur lorsque le serveur ne peut pas démarrer avec le `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message dans Docker ou systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Mettre `rdkafka` bibliothèque à V1.0. 0-RC5. Utilisé cppkafka au lieu de l'interface c brute. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) +- Mettre `mariadb-client` bibliothèque. Correction d'un des problèmes trouvés par UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Quelques corrections pour UBSan construit. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de tests par validation avec UBSan build. +- Ajout d'exécutions par validation de PVS-Studio static analyzer. +- Correction de bugs trouvés par PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de problèmes de compatibilité glibc. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Déplacez les images Docker vers 18.10 et ajoutez un fichier de compatibilité pour glibc \> = 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alésapine](https://github.com/alesapin)) +- Ajouter une variable env si l'utilisateur ne veut pas chown répertoires dans l'image Docker du serveur. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alésapine](https://github.com/alesapin)) +- Activé la plupart des avertissements de `-Weverything` à clang. Permettre `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Ajout de quelques avertissements supplémentaires disponibles uniquement dans clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Lien vers `libLLVM` plutôt que de libs LLVM individuels lors de l'utilisation de liens partagés. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) +- Ajout de variables de désinfection pour les images de test. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alésapine](https://github.com/alesapin)) +- `clickhouse-server` le paquet debian recommandera `libcap2-bin` package à utiliser `setcap` outil pour définir les capacités. Cette option est facultative. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Amélioration du temps de compilation, fixe comprend. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- Ajout de tests de performance pour les fonctions de hachage. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) +- Dépendances de bibliothèque cycliques fixes. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- Amélioration de la compilation avec une faible mémoire disponible. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- Ajout d'un script de test pour reproduire la dégradation des performances dans `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexeï-milovidov](https://github.com/alexey-milovidov)) +- Correction de fautes d'orthographe dans les commentaires et les littéraux de chaîne sous `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) +- Correction de fautes dans les commentaires. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [Changelog pour 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/fr/changelog/index.md b/docs/fr/whats_new/changelog/index.md similarity index 99% rename from docs/fr/changelog/index.md rename to docs/fr/whats_new/changelog/index.md index f6bcc4593ff..d45e36b1d8f 100644 --- a/docs/fr/changelog/index.md +++ b/docs/fr/whats_new/changelog/index.md @@ -1,5 +1,9 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' --- ## Clickhouse version v20. 3 {#clickhouse-release-v20-3} diff --git a/docs/fr/whats_new/index.md b/docs/fr/whats_new/index.md new file mode 100644 index 00000000000..787b260a862 --- /dev/null +++ b/docs/fr/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_folder_title: What's New +toc_priority: 72 +--- + + diff --git a/docs/fr/roadmap.md b/docs/fr/whats_new/roadmap.md similarity index 77% rename from docs/fr/roadmap.md rename to docs/fr/whats_new/roadmap.md index c0b2e685679..af1b904ca36 100644 --- a/docs/fr/roadmap.md +++ b/docs/fr/whats_new/roadmap.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 74 +toc_title: Feuille de route --- # Feuille de route {#roadmap} diff --git a/docs/fr/security_changelog.md b/docs/fr/whats_new/security_changelog.md similarity index 96% rename from docs/fr/security_changelog.md rename to docs/fr/whats_new/security_changelog.md index 0b37478e9aa..c5c7a5ec4f8 100644 --- a/docs/fr/security_changelog.md +++ b/docs/fr/whats_new/security_changelog.md @@ -1,5 +1,8 @@ --- machine_translated: true +machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 +toc_priority: 76 +toc_title: "S\xE9curit\xE9 Changelog" --- ## Correction dans la version 19.14.3.3 de ClickHouse, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} diff --git a/docs/ja/commercial/cloud.md b/docs/ja/commercial/cloud.md deleted file mode 120000 index eb58e4a90be..00000000000 --- a/docs/ja/commercial/cloud.md +++ /dev/null @@ -1 +0,0 @@ -../../en/commercial/cloud.md \ No newline at end of file diff --git a/docs/ja/commercial/cloud.md b/docs/ja/commercial/cloud.md new file mode 100644 index 00000000000..6510c9d3aac --- /dev/null +++ b/docs/ja/commercial/cloud.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +--- + +# ClickHouseしてクラウドサービスプロバイダ {#clickhouse-cloud-service-providers} + +!!! info "情報" + の場合において公共クラウド管理clickhouseサービス、お気軽に [プルリクエストを開く](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) それを次のリストに追加します。 + +## Yandexクラウド {#yandex-cloud} + +[Yandex管理サービスClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) 次の主な機能を提供します: + +- 完全に管理されたzookeeperサービス [ClickHouse複製](../engines/table_engines/mergetree_family/replication.md) +- 複数ストレージタイプの選択 +- 異なる可用性ゾーンのレプリカ +- 暗号化と分離 +- 自動保守 + +{## [元の記事](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/ja/commercial/index.md b/docs/ja/commercial/index.md deleted file mode 120000 index a0a49c8b9eb..00000000000 --- a/docs/ja/commercial/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/commercial/index.md \ No newline at end of file diff --git a/docs/ja/commercial/index.md b/docs/ja/commercial/index.md new file mode 100644 index 00000000000..71bc1afac05 --- /dev/null +++ b/docs/ja/commercial/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Commercial +toc_priority: 70 +--- + + diff --git a/docs/ja/development/architecture.md b/docs/ja/development/architecture.md deleted file mode 120000 index abda4dd48a8..00000000000 --- a/docs/ja/development/architecture.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/architecture.md \ No newline at end of file diff --git a/docs/ja/development/architecture.md b/docs/ja/development/architecture.md new file mode 100644 index 00000000000..9d446212905 --- /dev/null +++ b/docs/ja/development/architecture.md @@ -0,0 +1,203 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u30AF\u30EA\u30C3\u30AF\u30CF\u30A6\u30B9\u306E\u6982\u8981" +--- + +# クリックハウスの概要 {#overview-of-clickhouse-architecture} + +ClickHouseは真の列指向DBMSです。 データは、列によって、および配列(ベクトルまたは列のチャンク)の実行中に格納されます。 可能な限り、操作は個々の値ではなく、配列にディスパッチされます。 それは呼ばれます “vectorized query execution,” そしてそれは実際のデータ処理の費用を下げるのを助けます。 + +> この考え方は、新しいものではない。 それはに遡ります `APL` プログラミング言語とその子孫: `A +`, `J`, `K`、と `Q`. 配列プログラミングは科学的データ処理に使用されます。 このアイデアは、リレーショナルデータベースで新しいものでもありません。 `Vectorwise` システム。 + +クエリ処理の高速化には、vectorizedクエリの実行とランタイムコードの生成という二つのアプローチがあります。 後者は、すべての間接指定と動的ディスパッチを削除します。 ずれのアプローチは厳重によります。 ランタイムコードを生成するときにヒューズが多く、業務として活用cpuの実行単位のパイプライン vectorizedクエリを実行できる実用的では一時的ベクトルを明記のことは、キャッシュを読みます。 一時的なデータがl2キャッシュに収まらない場合、これが問題になります。 しかし、ベクトル化されたクエリの実行は、cpuのsimd機能をより簡単に利用します。 a [研究論文](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) 書面による友人達しないことであり、両アプローチ。 ClickHouse用vectorizedクエリを実行して初期支援のためにランタイムコード。 + +## 列 {#columns} + +`IColumn` interfaceは、メモリ内の列(実際には列のチャンク)を表すために使用されます。 このインタフェースのヘルパーの方法の実施のための様々な関係です。 ほとんどすべての操作は不変です:元の列は変更されませんが、新しい変更された列を作成します。 たとえば、 `IColumn :: filter` 法を受け入れフィルタのバイトマスクです。 それはのために使用されます `WHERE` と `HAVING` 関係演算子。 その他の例: `IColumn :: permute` サポートする方法 `ORDER BY`、を `IColumn :: cut` サポートする方法 `LIMIT`. + +様々な `IColumn` 実装 (`ColumnUInt8`, `ColumnString`、というように)列のメモリレイアウトを担当しています。 メモリレイアウトは、通常、連続した配列です。 列の整数型の場合、次のような連続した配列にすぎません `std :: vector`. のために `String` と `Array` すべての配列要素のベクトル、連続して配置されたベクトル、および各配列の先頭にオフセットするためのベクトルです。 また、 `ColumnConst` これはメモリに一つの値だけを格納しますが、列のように見えます。 + +## フィールド {#field} + +それにもかかわらず、個々の価値を扱うことも可能です。 個々の値を表すには、 `Field` 使用される。 `Field` のちょうど差別された組合である `UInt64`, `Int64`, `Float64`, `String` と `Array`. `IColumn` は、 `operator[]` n番目の値をaとして取得するメソッド `Field` そして `insert` aを追加するメソッド `Field` 列の終わりまで。 これらの方法はあまり効率的ではありません。 `Field` 個々の値を表すオブジェクト。 より効率的な方法があります。 `insertFrom`, `insertRangeFrom`、というように。 + +`Field` テーブルの特定のデータ型に関する十分な情報がありません。 例えば, `UInt8`, `UInt16`, `UInt32`、と `UInt64` すべてとして表されます `UInt64` で `Field`. + +## 漏れやすい抽象化 {#leaky-abstractions} + +`IColumn` は方法のための共通の関係変容のデータもあるんですが、そのすべて満たす。 例えば, `ColumnUInt64` 二つの列の合計を計算する方法を持っていない、と `ColumnString` 部分文字列検索を実行するメソッドはありません。 これらの無数のルーチンは、 `IColumn`. + +列のさまざまな関数は、次のように一般的で効率的でない方法で実装できます `IColumn` 抽出する方法 `Field` 値、または特定のデータの内部メモリレイアウトの知識を使用して、特殊な方法で `IColumn` 実装。 これは、関数を特定の `IColumn` 内部表現を直接入力して処理する。 例えば, `ColumnUInt64` は、 `getData` 内部配列への参照を返し、別のルーチンがその配列を直接読み取ったり塗りつぶしたりするメソッドです。 我々は持っている “leaky abstractions” さまざまなルーチンの効率的な専門化を可能にする。 + +## データ型 {#data_types} + +`IDataType` バイナリ形式またはテキスト形式で列または個々の値のチャンクを読み書きするためのシリアル化と逆シリアル化を担当します。 `IDataType` テーブルのデータ型に直接対応します。 たとえば、次のとおりです `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` というように。 + +`IDataType` と `IColumn` 互いにゆるやかに関係しているだけです 異なるデータ型は同じでメモリ内に表すことができます `IColumn` 実装。 例えば, `DataTypeUInt32` と `DataTypeDateTime` どちらも `ColumnUInt32` または `ColumnConstUInt32`. また、同じデータ型を異なるデータ型で表すこともできます `IColumn` 実装。 例えば, `DataTypeUInt8` で表すことができます `ColumnUInt8` または `ColumnConstUInt8`. + +`IDataType` 貨物のメタデータを指すものとします。 例えば, `DataTypeUInt8` 何も全く保存しません(vptrを除きます)。 `DataTypeFixedString` 店だけ `N` (固定サイズの文字列のサイズ)。 + +`IDataType` はヘルパーの方法のための様々なデータフォーマット たとえば、引用符で値をシリアル化したり、JSONの値をシリアル化したり、XML形式の一部として値をシリアル化したりするメソッドがあります。 データ形式への直接の対応はありません。 たとえば、さまざまなデータ形式 `Pretty` と `TabSeparated` 同じを使用できます `serializeTextEscaped` からヘルパーメソッド `IDataType` インタフェース + +## ブロック {#block} + +A `Block` メモリ内のテーブルのサブセット(チャンク)を表すコンテナです。 それはちょうどトリプルのセットです: `(IColumn, IDataType, column name)`. クエリの実行中、データは次の方法で処理されます `Block`我々が持っている場合 `Block`、我々はデータを持っている(で `IColumn` オブジェクト)、そのタイプに関する情報があります `IDataType`)それはその列をどう扱うかを教えてくれるので、列名があります。 テーブルの元の列名か、計算の一時的な結果を得るために割り当てられた人工的な名前のいずれかになります。 + +ブロック内の列に対していくつかの関数を計算すると、その結果を持つ別の列がブロックに追加され、関数の引数の列には触れません。 その後、不要な列はブロックから削除できますが、変更はできません。 それは共通の部分式の除去のために便利です。 + +ブロックの作成のための各処理チャンクのデータです。 同じタイプの計算では、列名とタイプは異なるブロックで同じままで、列データのみが変更されることに注意してください。 ブロックヘッダーからブロックデータを分割する方が良いのは、小さなブロックサイズでは、shared\_ptrsと列名をコピーするための一時文字列のオーバーヘッドが + +## ブロックの流れ {#block-streams} + +ブロッ を使用していま流のブロックからデータを読み込むためのどこかに、データ変換、または書き込みデータをどこかということです。 `IBlockInputStream` は、 `read` 利用可能な間に次のブロックを取得するメソッドです。 `IBlockOutputStream` は、 `write` ブロックをどこかにプッシュする方法。 + +ストリー: + +1. テーブルへの読み書き。 のテーブルだけを返しますストリームを読み取りまたは書き込みブロックとなります。 +2. データ形式の実装。 たとえば、データを端末に出力する場合は、 `Pretty` ブロックをプッシュするブロック出力ストリームを作成し、それらをフォーマットします。 +3. データ変換の実行。 たとえば、 `IBlockInputStream` いを作ろうというストリームです。 あなたが作成する `FilterBlockInputStream` ストリームで初期化します。 その後、ブロックを引き出すと `FilterBlockInputStream` で引きブロックからストリーム、フィルタでは、フィルタを返しますブロックします。 クエリの実行パイプラインで表現しました。 + +より洗練された変換があります。 たとえば、あなたから引くとき `AggregatingBlockInputStream` ソースからすべてのデータを読み取り、集計してから、集計データのストリームを返します。 別の例: `UnionBlockInputStream` コンストラクタ内の多数の入力ソースと、多数のスレッドを受け入れます。 複数のスレッドを起動し、複数のソースから並行して読み取ります。 + +> ブロックストリームは、 “pull” 制御フローへのアプローチ:最初のストリームからブロックをプルすると、ネストされたストリームから必要なブロックがプルされ、実行パイプライン全体 どちらも “pull” また “push” 制御フローは暗黙的であり、複数のクエリの同時実行(多くのパイプラインを一緒にマージする)などのさまざまな機能の実装を制限するため、最適なソ この制限は、コルーチンや、お互いを待つ余分なスレッドを実行するだけで克服することができます。 ある計算単位から別の計算単位にデータを渡すためのロジックを特定すると、制御フローを明示的にすると、より多くの可能性があります。 これを読む [記事](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) より多くの思考のために。 + +クエリ実行パイプラインは、各ステップで一時的なデータを作成します。 ブロックサイズを十分に小さくして、一時的なデータがcpuキャッシュに収まるようにします。 その仮定では、一時的なデータの書き込みと読み取りは、他の計算と比較してほとんど無料です。 したことを検討してはどうかと思うの代替、ヒューズの多くの業務のパイプラインです。 パイプラインをできるだけ短くし、一時的なデータの多くを削除することができますが、これは利点かもしれませんが、欠点もあります。 たとえば、分割パイプラインを使用すると、中間データのキャッシュ、同時に実行される類似クエリからの中間データの盗み取り、類似クエリのパイプラ + +## 形式 {#formats} + +データフォーマットにて実施しブロックわれている。 あります “presentational” 次のように、クライアントへのデータの出力にのみ適した書式を設定します `Pretty` のみを提供する形式 `IBlockOutputStream`. また、次のような入力/出力形式があります `TabSeparated` または `JSONEachRow`. + +行ストリームもあります: `IRowInputStream` と `IRowOutputStream`. ブロックではなく、個々の行でデータをプル/プッシュすることができます。 また、行指向のフォーマットの実装を単純化するためにのみ必要です。 その他の機能 `BlockInputStreamFromRowInputStream` と `BlockOutputStreamFromRowOutputStream` 行指向のストリームを通常のブロック指向のストリームに変換できます。 + +## I/O {#io} + +バイト指向の入出力には、以下のものがあります `ReadBuffer` と `WriteBuffer` 抽象クラス。 それらはC++の代わりに使用されます `iostream`すべての成熟したC++プロジェクトは、 `iostream`良い理由のためのs。 + +`ReadBuffer` と `WriteBuffer` 単に連続したバッファであり、そのバッファ内の位置を指すカーソルです。 実装にはない独自のメモリにバッファです。 バッファに次のデータを入力するための仮想メソッドがあります `ReadBuffer` または、バッファをどこかにフラッシュする( `WriteBuffer`). 仮想手法が呼び出されます。 + +の実装 `ReadBuffer`/`WriteBuffer` 使用ファイルを操作するため、ファイル記述子およびネットワークソケット実施のための圧縮 (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`、と `HashingWriteBuffer` 自分のために話す。 + +Read/WriteBuffersはバイトのみを処理します。 からの機能があります `ReadHelpers` と `WriteHelpers` 入力/出力の書式設定に役立つヘッダーファイル。 たとえば、decimal形式で数値を書くヘルパーがあります。 + +結果セットを書きたいときに何が起こるかを見てみましょう `JSON` 標準出力にフォーマットします。 あなたは結果セットをフェッチする準備ができています `IBlockInputStream`. あなたが作成する `WriteBufferFromFileDescriptor(STDOUT_FILENO)` stdoutにバイトを書き込む。 あなたが作成する `JSONRowOutputStream`、それで初期化 `WriteBuffer`、行を書き込む `JSON` stdoutに。 あなたが作成する `BlockOutputStreamFromRowOutputStream` その上に、としてそれを表すために `IBlockOutputStream`. その後、呼び出す `copyData` からデータを転送するには `IBlockInputStream` に `IBlockOutputStream`、そしてすべてが動作します。 内部的には, `JSONRowOutputStream` さまざまなJSON区切り文字を書き込み、 `IDataType::serializeTextJSON` への参照を持つメソッド `IColumn` そして、引数として行番号。 その結果, `IDataType::serializeTextJSON` からメソッドを呼び出します `WriteHelpers.h`:例えば, `writeText` 数値型の場合 `writeJSONString` のために `DataTypeString`. + +## テーブル {#tables} + +その `IStorage` インタフェースです。 異なる実装のインタフェースの異なるテーブルエンジンです。 例としては `StorageMergeTree`, `StorageMemory`、というように。 これらのクラスのインスタ + +キー `IStorage` 方法は `read` と `write`. また、 `alter`, `rename`, `drop`、というように。 その `read` メソッドは次の引数を受け取ります:テーブルから読み取る列のセット。 `AST` 検討するクエリ、および返すストリームの必要数。 一つまたは複数を返します `IBlockInputStream` クエリの実行中にテーブルエンジン内で完了したデータ処理のステージに関するオブジェクトと情報。 + +ほとんどの場合、readメソッドは、それ以降のデータ処理ではなく、テーブルから指定された列を読み取るだけです。 すべてのデータ処理が行われるクエリの通訳や外部の責任 `IStorage`. + +しかし、顕著な例外があります: + +- ASTクエリは、 `read` 法により処理し、テーブルエンジンを使用できる指の利用と読みの少ないデータを表示します。 +- 時々のテーブルエンジンを処理できるデータそのものである場合でも特定の段階にある。 例えば, `StorageDistributed` クエリをリモートサーバーに送信し、異なるリモートサーバーのデータをマージできるステージにデータを処理するように要求し、その前処理されたデータを返すことが クエリの通訳を仕上げ加工のデータです。 + +テーブルの `read` メソッドは、複数の `IBlockInputStream` 並列データ処理を可能にするオブジェクト。 これらの複数のブロックの入力ストリームでテーブルから行なった。 次に、これらのストリームを、独立して計算して作成できるさまざまな変換(式の評価やフィルター処理など)でラップできます。 `UnionBlockInputStream` それらの上に、並行して複数のストリームから読み取ります。 + +また、 `TableFunction`s.これらは一時的な関数を返す関数です。 `IStorage` で使用するオブジェクト `FROM` クエリの句。 + +テーブルエンジンを実装する方法を簡単に知るには、次のような単純なものを見てください `StorageMemory` または `StorageTinyLog`. + +> の結果として `read` 方法, `IStorage` を返します `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. + +## パーサー {#parsers} + +手書きの再帰的降下パーサーは、クエリを解析します。 例えば, `ParserSelectQuery` クエリのさまざまな部分の基礎となるパーサーを再帰的に呼び出すだけです。 パーサーは `AST`. その `AST` ノードによって表されます。 `IAST`. + +> パーサジェネレータは、使用しない歴史的な理由があります。 + +## 通訳者 {#interpreters} + +クエリ実行パ `AST`. 以下のような単純な通訳があります `InterpreterExistsQuery` と `InterpreterDropQuery` または、より洗練された `InterpreterSelectQuery`. クエリの実行パイプラインの組み合わせたブロック入力または出力ストリーム. たとえば、次のように解釈されます。 `SELECT` クエリは、 `IBlockInputStream` 結果セットを読み取るには、INSERTクエリの結果は次のようになります。 `IBlockOutputStream` に挿入するためのデータを書き込むには、 `INSERT SELECT` クエリは、 `IBlockInputStream` これは、最初の読み取りで空の結果セットを返しますが、データをコピーします `SELECT` に `INSERT` 同時に。 + +`InterpreterSelectQuery` 使用 `ExpressionAnalyzer` と `ExpressionActions` クエリ分析と変換のための機械。 これは、ほとんどのルールベースのクエリ最適化が行われる場所です。 `ExpressionAnalyzer` モジュラー変換やクエリを可能にするために、さまざまなクエリ変換と最適化を別々のクラスに抽出する必要があります。 + +## 機能 {#functions} + +通常の関数と集約関数があります。 集計関数は、次のセクションを参照してください。 + +Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`ベクトル化されたクエリ実行を実装するためのデータです。 + +いくつかの雑多な機能があります [ブロックサイズ](../sql_reference/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../sql_reference/functions/other_functions.md#function-rownumberinblock)、と [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate)、それはブロック処理を悪用し、行の独立性に違反します。 + +ClickHouseには厳密な型指定があるため、暗黙の型変換はありません。 関数が型の特定の組み合わせをサポートしていない場合は、例外がスローされます。 ものの機能で作業する過負荷のもとに多くの異なる組み合わせます。 たとえば、 `plus` 機能(実装するには `+` 演算子)数値型の任意の組み合わせに対して機能します: `UInt8` + `Float32`, `UInt16` + `Int8`、というように。 また、いくつかの可変個引数関数は、任意の数の引数を受け入れることができます。 `concat` 機能。 + +実施の機能が少し不便での機能を明示的に派遣サポートされているデータの種類と対応 `IColumns`. たとえば、 `plus` 関数は、数値型の各組み合わせのためのC++テンプレートのインスタンス化によって生成されたコードを持っており、定数または非定数左右引数。 + +優れた場所をランタイムコード生成を避けるテンプレートコードで膨張. また、fused multiply-addようなfused関数を追加したり、あるループ反復で多重比較を行うこともできます。 + +ベクトル化されたクエリの実行により、関数は短絡されません。 たとえば、次のように書くと `WHERE f(x) AND g(y)`、両側は、行に対しても計算されます。 `f(x)` はゼロです(ただし、 `f(x)` はゼロ定数式である。 しかしの選択率 `f(x)` 条件が高い、との計算 `f(x)` よりもはるかに安いです `g(y)`、それはマルチパス計算を実装する方が良いでしょう。 それは最初に計算します `f(x)`、その後、結果によって列をフィルタリングし、計算 `g(y)` フィルター処理された小さいデータチャンクのみ。 + +## 集計関数 {#aggregate-functions} + +集計関数はステートフル関数です。 渡された値をある状態に蓄積し、その状態から結果を得ることができます。 それらはと管理されます `IAggregateFunction` インタフェース 状態はかなり単純なものにすることができます `AggregateFunctionCount` 単なる `UInt64` 値)または非常に複雑な(の状態 `AggregateFunctionUniqCombined` は、線形配列、ハッシュテーブル、および `HyperLogLog` 確率的データ構造)。 + +状態は `Arena` 高カーディナリティを実行しながら複数の状態を処理する(メモリプール) `GROUP BY` クエリ。 たとえば、複雑な集計状態では、追加のメモリ自体を割り当てることができます。 それは、州を創設し、破壊し、所有権と破壊秩序を適切に渡すことにある程度の注意を払う必要があります。 + +分散クエリの実行中にネットワーク経由で渡すか、十分なramがないディスクに書き込むために、集約状態をシリアル化および逆シリアル化できます。 それらはテーブルで貯えることができます `DataTypeAggregateFunction` データの増分集計を可能にする。 + +> 集計関数状態のシリアル化されたデータ形式は、現在バージョン化されていません。 集約状態が一時的にのみ格納されている場合は問題ありません。 しかし、我々は持っている `AggregatingMergeTree` テーブルエンジンが増えた場合の集約、人々に基づき使用されている。 これは、将来の集約関数の直列化形式を変更する際に下位互換性が必要な理由です。 + +## サーバ {#server} + +サーバを実装し複数の複数のインタフェース: + +- 外部クライアントのhttpインターフェイス。 +- ネイティブclickhouseクライアント用のtcpインターフェイス、および分散クエリ実行中のサーバー間通信。 +- インターフェース転送データレプリケーション. + +内部的には、コルーチンやファイバーのない原始的なマルチスレッドサーバーです。 サーバーは、単純なクエリの高いレートを処理するが、複雑なクエリの比較的低いレートを処理するように設計されていないので、それらのそれぞれは、分析 + +サーバーは、 `Context` クエリ実行に必要な環境を持つクラス:利用可能なデータベース、ユーザーおよびアクセス権、設定、クラスター、プロセスリスト、クエリログなどのリスト。 通訳者はこの環境を使用します。 + +古いクライアントは新しいサーバーと通信でき、新しいクライアントは古いサーバーと通信できます。 しかし、私たちは永遠にそれを維持したくない、と私たちは約一年後に古いバージョンのサポートを削除しています。 + +!!! note "メモ" + ほとんどの外部アプリケーションの利用を推奨します。http面でシンプルで使いやすいです。 tcpプロトコルは、内部データ構造により緊密にリンクされています。 そのプロトコルのcライブラリは、実用的ではないclickhouseコードベースのほとんどをリンクする必要があるため、リリースしていません。 + +## 分散クエリの実行 {#distributed-query-execution} + +サーバーにクラスターセットアップがほぼ独立しています。 を作成することができ `Distributed` クラスター内の一つまたはすべてのサーバーの表。 その `Distributed` table does not store data itself – it only provides a “view” クラスターの複数ノード上のすべてのローカルテーブル。 から選択すると `Distributed` クエリを書き換え、ロードバランシング設定に従ってリモートノードを選択し、クエリをクエリに送信します。 その `Distributed` テーブル要求をリモートサーバー処理クエリーだけで最大のペースでの中間結果から異なるサーバできます。 その後、中間結果を受け取り、それらをマージします。 のテーブルを配布してできる限りの仕事へのリモートサーバーを送信しない多くの中間データのネットワーク. + +INまたはJOIN節にサブクエリがあり、それぞれがaを使用すると、事態はより複雑になります。 `Distributed` テーブル。 これらのクエリの実行にはさまざまな戦略があります。 + +分散クエリ実行用のグローバルクエリプランはありません。 各ノ リモートノードのクエリを送信し、結果をマージします。 しかし、これは、高い基数グループbysまたは結合のための大量の一時的なデータを持つ複雑なクエリでは不可能です。 そのような場合、 “reshuffle” 追加の調整が必要なサーバー間のデータ。 ClickHouseはそのような種類のクエリ実行をサポートしておらず、その上で作業する必要があります。 + +## ツリーをマージ {#merge-tree} + +`MergeTree` 家族のストレージエンジンを支える指数付けによりその有効なタイプを利用します。 主キーは、列または式の任意のタプルにすることができます。 Aのデータ `MergeTree` テーブルは “parts”. 各パーツは主キーの順序でデータを格納するので、データは主キータプルによって辞書式で順序付けされます。 すべてのテーブル列は別々に格納されます `column.bin` これらの部分のファイル。 ファイルは圧縮ブロックで構成されます。 各ブロックは、平均値のサイズに応じて、通常64KBから1MBの非圧縮データです。 ブロックは、連続して配置された列の値で構成されます。 列の値は各列の順序が同じです(主キーで順序が定義されています)ので、多くの列で反復すると、対応する行の値が取得されます。 + +主キー自体は次のとおりです “sparse”. なアドレス毎に単列、ある種のデータです。 独立した `primary.idx` ファイルには、N番目の各行の主キーの値があります。 `index_granularity` (通常、N=8192)。 また、各列について、 `column.mrk` 以下のファイル “marks,” これは、データファイル内の各N番目の行にオフセットされます。 各マークは、圧縮されたブロックの先頭にファイル内のオフセット、およびデータの先頭に圧縮解除されたブロック内のオフセットのペアです。 通常、圧縮ブロックはマークによって整列され、圧縮解除されたブロックのオフセットはゼロです。 データのための `primary.idx` 常にメモリに常駐し、データのための `column.mrk` ファイ + +我々は一部から何かを読むつもりされているとき `MergeTree`、我々は見て `primary.idx` 要求されたデータを含む可能性のあるデータと範囲を見つけ、次に `column.mrk` これらの範囲の読み取りを開始する場所のオフセットを計算します。 まばらであるため、余分なデータを読み取ることができます。 ClickHouseは、単純なポイントクエリの高負荷には適していません。 `index_granularity` 各キーに対して行を読み取り、圧縮されたブロック全体を各列に対して圧縮解除する必要があります。 インデックスのメモリ消費量が目立つことなく、単一のサーバーごとに数兆行を維持できる必要があるため、インデックスを疎にしました。 また、主キーはスパースであるため、一意ではありません。 テーブルに同じキーを持つ多くの行を持つことができます。 + +ときあなた `INSERT` データの束に `MergeTree` その束は主キーの順序でソートされ、新しい部分を形成します。 が背景のスレッドを定期的に、選択部分と統合して単一のソート部の部品点数が比較的低い。 それが呼び出される理由です `MergeTree`. もちろん、マージにつながる “write amplification”. すべての部分は不変です:作成され、削除されますが、変更されません。 SELECTが実行されると、テーブル(部品のセット)のスナップショットが保持されます。 マージした後、我々はまた、我々はいくつかのマージされた部分は、おそらく壊れていることを確認した場合、我々は、そのソース部分とそれを置き換えることがで + +`MergeTree` LSMツリーが含まれていないためではありません “memtable” と “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. + +> MergeTreeテーブルには、一つの(プライマリ)インデックスしか持たせることができません。 たとえば、データを複数の物理的順序で格納したり、元のデータとともに事前に集計されたデータを使用して表現することもできます。 + +バックグラウンドマージ中に追加作業を行っているmergetreeエンジンがあります。 例としては `CollapsingMergeTree` と `AggregatingMergeTree`. この処理として特別支援しました。 なぜなら、ユーザーは通常、バックグラウンドマージが実行される時間と、バックグラウンドマージが実行されるデータを制御できないからです。 `MergeTree` テーブルは、ほとんどの場合、完全にマージされた形式ではなく、複数の部分に格納されます。 + +## 複製 {#replication} + +ClickHouseでのレプリケーションは、テーブルごとに構成できます。 きも複製されない一部の複製のテーブルと同じサーバです。 また、テーブルをさまざまな方法でレプリケートすることもできます。 + +レプリケーションは `ReplicatedMergeTree` 貯蔵エンジン。 のパス `ZooKeeper` ストレージエンジンのパラメータとして指定します。 同じパスを持つすべてのテーブル `ZooKeeper` 互いのレプリカになる:彼らは彼らのデータを同期させ、一貫性を維持する。 テーブルを作成または削除するだけで、レプリカを動的に追加および削除できます。 + +レプリケーション 次のセッションを持つ任意のレプリカにデータを挿入できます `ZooKeeper` データは、他のすべてのレプリカに非同期的に複製されます。 でClickHouseをサポートしていない更新、複製する紛争は無料です。 挿入のクォーラム確認がないため、あるノードが失敗すると、挿入されたばかりのデータが失われる可能性があります。 + +メタデータレプリケーションが格納され飼育係. 実行する操作を一覧表示するレプリケーションログがあります。 アクションとしては、get part、merge parts、drop partitionなどがあります。 各レプリカコピー、複製のログをキューにその行動からのキューに挿入します 例えば、挿入時には、 “get the part” actionを作成し、ログイン、レプリカのダウンロードいます。 マージはレプリカ間で調整され、バイトと同じ結果が得られます。 すべての部品を合併した場合と同様にすべてのレプリカ. では達成を補い、一つのレプリカのリーダーとして、レプリカを始めと融合し、書き込みます “merge parts” ログへのアクション。 + +クエリではなく、ノード間で転送されるのは圧縮された部分だけです。 合併処理され、各レプリカ多くの場合、自主的に下げるネットワークコストを回避することによるネットワークが増幅。 大合併をさらにネットワークする場合に限り重複製に遅れて波及してきています。 + +さらに、各レプリカは、部品とそのチェックサムのセットとしてzookeeperにその状態を保存します。 ローカルファイルシステム上の状態がzookeeperの参照状態から逸脱すると、レプリカは他のレプリカから欠落した部分や破損した部分をダウンロードする ローカルファイルシステムに予期しないデータや破損したデータがある場合、clickhouseはそれを削除せず、別のディレクトリに移動して忘れてしまいます。 + +!!! note "メモ" + クリックハウスクラスタは独立したシャードから成り,各シャードはレプリカから成る。 クラスタは **ない弾性** したがって、新しいシャードを追加した後、データは自動的にシャード間で再調整されません。 代わりに、クラスタの負荷が不均一になるように調整されるはずです。 この実装では、より多くの制御が可能になり、数十のノードなど、比較的小さなクラスタでも問題ありません。 がクラスターの何百人ものノードを用いて、生産-このアプローチが大きな欠点. を実行すべきである"と述べていテーブルエンジンで広がる、クラスターを動的に再現れる可能性がある地域分割のバランスとクラスターの動します。 + +{## [元の記事](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/ja/development/browse_code.md b/docs/ja/development/browse_code.md deleted file mode 120000 index 8c08c622129..00000000000 --- a/docs/ja/development/browse_code.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/browse_code.md \ No newline at end of file diff --git a/docs/ja/development/browse_code.md b/docs/ja/development/browse_code.md new file mode 100644 index 00000000000..f8357fcca27 --- /dev/null +++ b/docs/ja/development/browse_code.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "ClickHouse\u306E\u30BD\u30FC\u30B9\u30B3\u30FC\u30C9\u3092\u53C2\u7167" +--- + +# ClickHouseのソースコードを参照 {#browse-clickhouse-source-code} + +を使用することができ **Woboq** オンラインのコードブラウザをご利用 [ここに](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). このコードナビゲーションや意味のハイライト表示、検索インデックス. コードのスナップショットは随時更新中です。 + +また、ソースを閲覧することもできます [GitHub](https://github.com/ClickHouse/ClickHouse) いつものように + +使用するideに興味がある場合は、clion、qt creator、vs code、およびkdevelop(注意点あり)をお勧めします。 お気に入りのideを使用できます。 vimとemacsもカウントされます。 diff --git a/docs/ja/development/build.md b/docs/ja/development/build.md deleted file mode 120000 index 480dbc2e9f5..00000000000 --- a/docs/ja/development/build.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build.md \ No newline at end of file diff --git a/docs/ja/development/build.md b/docs/ja/development/build.md new file mode 100644 index 00000000000..036323a4ac0 --- /dev/null +++ b/docs/ja/development/build.md @@ -0,0 +1,141 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 64 +toc_title: "Linux\u3067ClickHouse\u3092\u69CB\u7BC9\u3059\u308B\u65B9\u6CD5" +--- + +# 開発のためのclickhouseを構築する方法 {#how-to-build-clickhouse-for-development} + +次のチュートリアルはubuntu linuxシステムに基づいています。 +適切な変更により、他のlinuxディストリビューションでも動作するはずです。 +サポートされるプラットフォーム:x86\_64およびaarch64。 power9のサポートは実験的です。 + +## Git、CMake、Pythonと忍者をインストールします。 {#install-git-cmake-python-and-ninja} + +``` bash +$ sudo apt-get install git cmake python ninja-build +``` + +または古いシステムのcmakeの代わりにcmake3。 + +## GCC9のインストール {#install-gcc-9} + +これを行うにはいくつかの方法があります。 + +### PPAパッケージからインストール {#install-from-a-ppa-package} + +``` bash +$ sudo apt-get install software-properties-common +$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test +$ sudo apt-get update +$ sudo apt-get install gcc-9 g++-9 +``` + +### ソースからのイ {#install-from-sources} + +見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) + +## ビルドにはgcc9を使う {#use-gcc-9-for-builds} + +``` bash +$ export CC=gcc-9 +$ export CXX=g++-9 +``` + +## レclickhouse源 {#checkout-clickhouse-sources} + +``` bash +$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git +``` + +または + +``` bash +$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git +``` + +## クリックハウスを構築 {#build-clickhouse} + +``` bash +$ cd ClickHouse +$ mkdir build +$ cd build +$ cmake .. +$ ninja +$ cd .. +``` + +実行可能ファイルを作成するには `ninja clickhouse`. +これは作成します `programs/clickhouse` 実行可能ファイルは、次の場所で使用できます。 `client` または `server` 引数。 + +# 任意のlinux上でclickhouseを構築する方法 {#how-to-build-clickhouse-on-any-linux} + +の構築が必要で以下のコンポーネント: + +- Git(ソースのチェックアウトにのみ使用され、ビルドには必要ありません) +- CMake3.10以降 +- 忍者(推奨)または作る +- C++コンパイラ:gcc9またはclang8以降 +- リンカ:lldまたはgold(古典的なgnu ldは動作しません) +- Python(LLVMビルド内でのみ使用され、オプションです) + +すべてのコンポーネントがインストールされている場合は、上記の手順と同じ方法で構築できます。 + +Ubuntu Eoanの例: + + sudo apt update + sudo apt install git cmake ninja-build g++ python + git clone --recursive https://github.com/ClickHouse/ClickHouse.git + mkdir build && cd build + cmake ../ClickHouse + ninja + +OpenSUSE Tumbleweedの例: + + sudo zypper install git cmake ninja gcc-c++ python lld + git clone --recursive https://github.com/ClickHouse/ClickHouse.git + mkdir build && cd build + cmake ../ClickHouse + ninja + +Fedoraの生皮のための例: + + sudo yum update + yum --nogpg install git cmake make gcc-c++ python2 + git clone --recursive https://github.com/ClickHouse/ClickHouse.git + mkdir build && cd build + cmake ../ClickHouse + make -j $(nproc) + +# クリックハウスを構築する必要はありません {#you-dont-have-to-build-clickhouse} + +ClickHouseは、事前に構築されたバイナリとパッケージで利用可能です。 バイナリは移植性があり、任意のLinuxフレーバーで実行できます。 + +これらのために、安定したprestable-試験スリリースして毎にコミットマスターすべてを引きます。 + +から新鮮なビルドを見つけるには `master`、に行く [コミットページ](https://github.com/ClickHouse/ClickHouse/commits/master) 最初の緑色のチェックマークまたはコミット近くの赤い十字をクリックし、 “Details” 右後にリンク “ClickHouse Build Check”. + +# ClickHouse Debianパッケージをビルドする方法 {#how-to-build-clickhouse-debian-package} + +## イgitありそう {#install-git-and-pbuilder} + +``` bash +$ sudo apt-get update +$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring +``` + +## レclickhouse源 {#checkout-clickhouse-sources-1} + +``` bash +$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git +$ cd ClickHouse +``` + +## Releaseスクリプトを実行 {#run-release-script} + +``` bash +$ ./release +``` + +[元の記事](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/ja/development/build_cross_arm.md b/docs/ja/development/build_cross_arm.md deleted file mode 120000 index 983a9872dc1..00000000000 --- a/docs/ja/development/build_cross_arm.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/ja/development/build_cross_arm.md b/docs/ja/development/build_cross_arm.md new file mode 100644 index 00000000000..dda1aab587b --- /dev/null +++ b/docs/ja/development/build_cross_arm.md @@ -0,0 +1,44 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 67 +toc_title: "AARCH64\u7528Linux\u3067ClickHouse\u3092\u30D3\u30EB\u30C9\u3059\u308B\ + \u65B9\u6CD5\uFF08ARM64)" +--- + +# Aarch64(ARM64)アーキテクチャ用のLinuxでClickHouseをビルドする方法 {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} + +これは、linuxマシンがあり、それを使ってビルドしたい場合のためのものです `clickhouse` AARCH64CPUアーキテクチャを持つ別のLinuxマシンで実行されるバイナリ。 この目的のために継続的インテグレーションをチェックを実行Linuxサーバー + +AARCH64のクロスビルドは、次の条件に基づいています [ビルド手順](build.md)、最初にそれらに続きなさい。 + +# インストールclang-8 {#install-clang-8} + +以下の指示に従ってくださいhttps://apt.llvm.org/あなたのubuntuやdebianの設定のために. +たとえば、ubuntu bionicでは、次のコマンドを使用できます: + +``` bash +echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list +sudo apt-get update +sudo apt-get install clang-8 +``` + +# クロスコンパイルツールセット {#install-cross-compilation-toolset} + +``` bash +cd ClickHouse +mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 +wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 +``` + +# クリックハウスを構築 {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-arm64 +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake +ninja -C build-arm64 +``` + +結果のバイナリは、aarch64cpuアーキテクチャを持つlinux上でのみ実行されます。 diff --git a/docs/ja/development/build_cross_osx.md b/docs/ja/development/build_cross_osx.md deleted file mode 120000 index 72e64e8631f..00000000000 --- a/docs/ja/development/build_cross_osx.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ja/development/build_cross_osx.md b/docs/ja/development/build_cross_osx.md new file mode 100644 index 00000000000..cdbee0e7a61 --- /dev/null +++ b/docs/ja/development/build_cross_osx.md @@ -0,0 +1,65 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 66 +toc_title: "Mac OS X\u7528\u306ELinux\u3067ClickHouse\u3092\u69CB\u7BC9\u3059\u308B\ + \u65B9\u6CD5" +--- + +# Mac OS X用のLinuxでClickHouseを構築する方法 {#how-to-build-clickhouse-on-linux-for-mac-os-x} + +これは、linuxマシンがあり、それを使ってビルドしたい場合のためのものです `clickhouse` OS X上で実行されるバイナリこれは、Linuxサーバー上で実行される継続的な統合チェックを目的としています。 Mac OS XでClickHouseを直接ビルドする場合は、次の手順に進んでください [別の命令](build_osx.md). + +Mac OS X用のクロスビルドは、以下に基づいています。 [ビルド手順](build.md)、最初にそれらに続きなさい。 + +# インストールclang-8 {#install-clang-8} + +以下の指示に従ってくださいhttps://apt.llvm.org/あなたのubuntuやdebianの設定のために. +例えば、コマンドバイオニックのような: + +``` bash +sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list +sudo apt-get install clang-8 +``` + +# クロスコンパイルツールセット {#install-cross-compilation-toolset} + +いっしょうにパスを設置 `cctools` として${CCTOOLS} + +``` bash +mkdir ${CCTOOLS} + +git clone https://github.com/tpoechtrager/apple-libtapi.git +cd apple-libtapi +INSTALLPREFIX=${CCTOOLS} ./build.sh +./install.sh +cd .. + +git clone https://github.com/tpoechtrager/cctools-port.git +cd cctools-port/cctools +./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin +make install +``` + +また、macos x sdkを作業ツリーにダウンロードする必要があります。 + +``` bash +cd ClickHouse +wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz' +mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 +tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 +``` + +# クリックハウスを構築 {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-osx +CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ + -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ + -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ + -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld +ninja -C build-osx +``` + +結果のバイナリはmach-o実行可能フォーマットを持ち、linuxでは実行できません。 diff --git a/docs/ja/development/build_osx.md b/docs/ja/development/build_osx.md deleted file mode 120000 index f9adaf24584..00000000000 --- a/docs/ja/development/build_osx.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/build_osx.md \ No newline at end of file diff --git a/docs/ja/development/build_osx.md b/docs/ja/development/build_osx.md new file mode 100644 index 00000000000..b0314495cd0 --- /dev/null +++ b/docs/ja/development/build_osx.md @@ -0,0 +1,93 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 65 +toc_title: "Mac OS X\u3067ClickHouse\u3092\u69CB\u7BC9\u3059\u308B\u65B9\u6CD5" +--- + +# Mac OS XでClickHouseを構築する方法 {#how-to-build-clickhouse-on-mac-os-x} + +ビルドはmac os x10.15(catalina)で動作するはずです) + +## ト自作 {#install-homebrew} + +``` bash +$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +``` + +## 必要なコンパイラ、ツール、ライブラ {#install-required-compilers-tools-and-libraries} + +``` bash +$ brew install cmake ninja libtool gettext +``` + +## レclickhouse源 {#checkout-clickhouse-sources} + +``` bash +$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git +``` + +または + +``` bash +$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git + +$ cd ClickHouse +``` + +## クリックハウスを構築 {#build-clickhouse} + +``` bash +$ mkdir build +$ cd build +$ cmake .. -DCMAKE_CXX_COMPILER=`which clang++` -DCMAKE_C_COMPILER=`which clang` +$ ninja +$ cd .. +``` + +## 警告 {#caveats} + +Clickhouse-serverを実行する場合は、システムのmaxfiles変数を増やしてください。 + +!!! info "メモ" + Sudoを使用する必要があります。 + +これを行うには、次のファイルを作成します: + +/ライブラリ/LaunchDaemons/制限.マックスファイルplist: + +``` xml + + + + + Label + limit.maxfiles + ProgramArguments + + launchctl + limit + maxfiles + 524288 + 524288 + + RunAtLoad + + ServiceIPC + + + +``` + +次のコマンドを実行します: + +``` bash +$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +``` + +再起動しろ + +それが動作しているかどうかを確認するには、 `ulimit -n` 司令部 + +[元の記事](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/ja/development/contrib.md b/docs/ja/development/contrib.md deleted file mode 120000 index 4749f95f9ef..00000000000 --- a/docs/ja/development/contrib.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/contrib.md \ No newline at end of file diff --git a/docs/ja/development/contrib.md b/docs/ja/development/contrib.md new file mode 100644 index 00000000000..5fb0ae8a77e --- /dev/null +++ b/docs/ja/development/contrib.md @@ -0,0 +1,43 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 70 +toc_title: "\u30B5\u30FC\u30C9\u30D1\u30FC\u30C6\u30A3\u88FD\u30E9\u30A4\u30D6\u30E9\ + \u30EA" +--- + +# サードパーティ製ライブラリ {#third-party-libraries-used} + +| ライブラリ | ライセンス | +|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| base64 | [BSD2-条項ライセンス](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | +| ブースト | [Boost Software License1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | +| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | +| cctz | [Apacheライセンス2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | +| ダブル変換 | [BSD3-条項ライセンス](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | +| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | +| googletest | [BSD3-条項ライセンス](https://github.com/google/googletest/blob/master/LICENSE) | +| h3unit description in lists | [Apacheライセンス2.0](https://github.com/uber/h3/blob/master/LICENSE) | +| hyperscan | [BSD3-条項ライセンス](https://github.com/intel/hyperscan/blob/master/LICENSE) | +| libbtrie | [BSD2-条項ライセンス](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | +| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| libdividename | [Zlibライセンス](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | +| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | +| libhdfs3 | [Apacheライセンス2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | +| libmetrohash | [Apacheライセンス2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | +| libpcg-ランダム | [Apacheライセンス2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libressl | [OpenSSLライセンス](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | +| librdkafka | [BSD2-条項ライセンス](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | +| libwidechar\_widthname | [CC0 1.0ユニバーサル](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | +| llvm | [BSD3-条項ライセンス](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | +| lz4comment | [BSD2-条項ライセンス](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | +| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| murmurhash | [パブリック](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | +| pdqsort | [Zlibライセンス](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | +| ポコ | [Boostソフトウェアライセンス-Version1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | +| protobuf | [BSD3-条項ライセンス](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | +| re2unit description in lists | [BSD3-条項ライセンス](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | +| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | +| zlib-ng | [Zlibライセンス](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | +| zstd | [BSD3-条項ライセンス](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/ja/development/developer_instruction.md b/docs/ja/development/developer_instruction.md deleted file mode 120000 index bdfa9047aa2..00000000000 --- a/docs/ja/development/developer_instruction.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/developer_instruction.md \ No newline at end of file diff --git a/docs/ja/development/developer_instruction.md b/docs/ja/development/developer_instruction.md new file mode 100644 index 00000000000..32850a643b5 --- /dev/null +++ b/docs/ja/development/developer_instruction.md @@ -0,0 +1,286 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u521D\u5FC3\u8005\u306E\u65B9ClickHouse\u958B\u767A\u8005\u306E\u6307\ + \u793A" +--- + +ClickHouseの建物は、Linux、FreeBSDおよびMac OS Xでサポートされています。 + +# Windowsを使用する場合 {#if-you-use-windows} + +Windowsを使用する場合は、Ubuntuで仮想マシンを作成する必要があります。 するのは、仮想マシンをインストールしてくださいVirtualBox. ダウンロードできますUbuntuのウェブサイト:https://www.ubuntu.com/\#download. を作成してください仮想マシンからダウンロードした画像を保少なくとも4GB RAMめます。 Ubuntuでコマンドライン端末を実行するには、その単語を含むプログラムを探してください “terminal” その名前(gnome端末、konsoleなど)で)またはCtrl+Alt+Tを押すだけです。 + +# 32ビットシステムを使用する場合 {#if-you-use-a-32-bit-system} + +ClickHouseは32ビットシステム上で動作または構築することはできません。 きの獲得へのアクセスでは、64ビットのシステムを継続できる。 + +# GitHubでのリポジトリの作成 {#creating-a-repository-on-github} + +ClickHouseリポジトリでの作業を開始するには、GitHubアカウントが必要です。 + +きょうてつもない場合は、ご登録が必要でhttps://github.com. についてsshキーを押すと、すべき操作を行うというテーマをアップロードしてgithub. それはあなたのパッチを送信するために必要です。 他のsshサーバーと同じsshキーを使用することも可能です。 + +ClickHouseリポジトリのフォークを作成します。 それを行うにはクリックしてください “fork” 右上のボタンhttps://github.com/ClickHouse/ClickHouse.それはフォークあなた自身のコピーのClickHouse/ClickHouseにあなたのアカウント。 + +開発プロセスは、最初に意図した変更をclickhouseのフォークにコミットし、次に “pull request” これらの変更がメインリポジトリ(ClickHouse/ClickHouse)に受け入れられるために。 + +Gitリポジトリで作業するには `git`. + +Ubuntuでこれを行うには、コマンドラインターミナルで実行します: + + sudo apt update + sudo apt install git + +Gitの使用に関する簡単なマニュアルは、ここにあります:https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf。 +Gitの詳細なマニュアルについては、https://git-scm.com/book/en/v2。 + +# 開発マシンへのリポジトリのクローン作成 {#cloning-a-repository-to-your-development-machine} + +次に、ソースファイルを作業マシンにダウンロードする必要があります。 これは “to clone a repository” 作業マシン上にリポジトリのローカルコピーを作成するためです。 + +コマンドライン端末で: + + git clone --recursive git@guthub.com:your_github_username/ClickHouse.git + cd ClickHouse + +注:ください、代用 *your\_github\_username* 適切なもので! + +このコマ `ClickHouse` プロジェクトの作業コピーを含む。 + +作業ディレクトリへのパスには、ビルドシステムの実行に問題が生じる可能性があるため、空白が含まれていないことが重要です。 + +ごclickhouseリポジトリ用 `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` 上記の例のようにフラグ。 場合のリポジトリにてクローニングなsubmodules、ダウンロードを実行する必要がありますの: + + git submodule init + git submodule update + +ステータスは次のコマンドで確認できます: `git submodule status`. + +次のエラーメッセージが表示された場合: + + Permission denied (publickey). + fatal: Could not read from remote repository. + + Please make sure you have the correct access rights + and the repository exists. + +一般的には、githubに接続するためのsshキーがないことを意味します。 これらのキーは普通あります `~/.ssh`. SSHキーを受け入れるには、GitHub UIの設定セクションにそれらをアップロードする必要があります。 + +またクローンをリポジトリによhttpsプロトコル: + + git clone https://github.com/ClickHouse/ClickHouse.git + +ただし、これにより、変更をサーバーに送信することはできません。 一時的に使用して、後でリポジトリのリモートアドレスを置き換えるsshキーを追加することはできます `git remote` 司令部 + +元のclickhouseレポのアドレスをローカルリポジトリに追加して、そこから更新を取得することもできます: + + git remote add upstream git@github.com:ClickHouse/ClickHouse.git + +このコマンドを正常に実行すると、メインのclickhouseレポから更新をプルすることができます `git pull upstream master`. + +## サブモジュールの操作 {#working-with-submodules} + +Gitでサブモジュールを操作するのは苦痛です。 次のコマンドは管理に役立ちます: + + # ! each command accepts --recursive + # Update remote URLs for submodules. Barely rare case + git submodule sync + # Add new submodules + git submodule init + # Update existing submodules to the current state + git submodule update + # Two last commands could be merged together + git submodule update --init + +次のコマンドは、すべてのサブモジュールを初期状態にリセットするのに役立ちます(!警告! -内部の変更は削除されます): + + # Synchronizes submodules' remote URL with .gitmodules + git submodule sync --recursive + # Update the registered submodules with initialize not yet initialized + git submodule update --init --recursive + # Reset all changes done after HEAD + git submodule foreach git reset --hard + # Clean files from .gitignore + git submodule foreach git clean -xfd + # Repeat last 4 commands for all submodule + git submodule foreach git submodule sync --recursive + git submodule foreach git submodule update --init --recursive + git submodule foreach git submodule foreach git reset --hard + git submodule foreach git submodule foreach git clean -xfd + +# ビルドシステム {#build-system} + +ClickHouseはCMakeとNinjaを使用して建物を建てます。 + +CMake-Ninjaファイル(ビルドタスク)を生成できるメタビルドシステム。 +Ninja-これらのcmake生成タスクを実行するために使用される速度に焦点を当てた小さなビルドシステム。 + +Ubuntu、DebianまたはMint runにインストールするには `sudo apt install cmake ninja-build`. + +セントスでは、redhatは実行します `sudo yum install cmake ninja-build`. + +ArchまたはGentooを使用している場合は、CMakeをインストールする方法を知っているでしょう。 + +のためのcmakeおよび忍者mac os x初めて自作としてインストールインストールさんによbrew: + + /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" + brew install cmake ninja + +次に、cmakeのバージョンを確認します: `cmake --version`. 場合は以下の3.3v、必要新しいバージョンからのウェブサイト:https://cmake.org/download/. + +# 省略可能な外部ライブラリ {#optional-external-libraries} + +ClickHouse複数の外部ライブラリのためのビルです。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されるので、別々にインストールする必要はありません。 リストをチェックすることができ `contrib`. + +# C++コンパイラ {#c-compiler} + +バージョン9とclangバージョン8以上から始まるコンパイラgccは、clickhouseの構築に対応しています。 + +公式のyandexビルドは、現在、gccを使用しています。 そしてclangは開発のために通常より便利です。 が、当社の継続的インテグレーション(ci)プラットフォームを運チェックのための十数の組み合わせとなります。 + +UbuntuにGCCをインストールするには: `sudo apt install gcc g++` + +Gccのバージョンを確認する: `gcc --version`. それが9以下の場合は、ここで指示に従ってください:https://clickhouse。テック/ドキュメント/en/開発/ビルド/\#インストール-gcc-9. + +Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm` + +Clangを使用する場合は、インストールすることもできます `libc++` と `lld` くださるのです。 を使用して `ccache` また、推奨されます。 + +# 建築プロセス {#the-building-process} + +ClickHouseをビルドする準備ができたら、別のディレクトリを作成することをお勧めします `build` 中 `ClickHouse` それはすべてのビルドの成果物が含まれています: + + mkdir build + cd build + +いくつかの異なるディレクトリ(build\_release、build\_debugなど)を持つことができます。)ビルドの異なるタイプのために。 + +内部にいる間 `build` ディレクトリ、CMakeを実行してビルドを設定します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。 + +Linux: + + export CC=gcc-9 CXX=g++-9 + cmake .. + +Mac OS X: + + export CC=clang CXX=clang++ + cmake .. + +その `CC` variableはCのコンパイラ(Cコンパイラの略)を指定する。 `CXX` 変数は、どのC++コンパイラをビルドに使用するかを指示します。 + +より速いビルドのために、あなたは `debug` ビルドタイプ-最適化のないビルド。 その供給のために次のパラメータ `-D CMAKE_BUILD_TYPE=Debug`: + + cmake -D CMAKE_BUILD_TYPE=Debug .. + +このコマンドを実行することで、ビルドのタイプを変更できます。 `build` ディレクトリ。 + +ビルドするために忍者を実行: + + ninja clickhouse-server clickhouse-client + +この例では、必要なバイナリのみがビルドされます。 + +すべてのバイナリ(ユーティリティとテス: + + ninja + +フルビルドでは、メインバイナリを構築するために約30gbの空きディスク容量または15gbが必要です。 + +ビルドマシンで大量のramが利用可能な場合は、並列に実行されるビルドタスクの数を制限する必要があります `-j` パラメータcomment: + + ninja -j 1 clickhouse-server clickhouse-client + +RAMが4GBのマシンでは、1をRAM8GBに指定することをお勧めします `-j 2` は推奨。 + +メッセージが表示された場合: `ninja: error: loading 'build.ninja': No such file or directory` これは、ビルド構成の生成に失敗し、上記のメッセージを調べる必要があることを意味します。 + +ビルドプロセスが正常に開始されると、ビルドの進行状況、つまり処理されたタスクの数とタスクの総数が表示されます。 + +ながらメッセージについてprotobufファイルlibhdfs2図書館のような `libprotobuf WARNING` 現れるかもしれない 彼らは何にも影響せず、無視されても安全です。 + +成功を構築するの実行ファイル `ClickHouse//programs/clickhouse`: + + ls -l programs/clickhouse + +# ClickHouseの構築された実行可能ファイルを実行する {#running-the-built-executable-of-clickhouse} + +のサーバーの現在のユーザーに必要なナビゲート `ClickHouse/programs/server/` (の外にあります `build`)と実行: + + ../../../build/programs/clickhouse server + +この場合、clickhouseは現在のディレクトリにある設定ファイルを使用します。 実行することができ `clickhouse server` からのディレクトリのパスを指定し、設定ファイルとしてコマンドラインパラメータ `--config-file`. + +別のターミナルのclickhouse-clientでclickhouseに接続するには、次のように移動します `ClickHouse/build/programs/` と実行 `clickhouse client`. + +あなたが得れば `Connection refused` Mac OS XまたはFreeBSDで、ホストアドレス127.0.0.1を指定してみてください: + + clickhouse client --host 127.0.0.1 + +に置き換えることができ生産版clickhouseバイナリインストールされるシステムのカスタム構築clickhouseバイナリー. これを行うには、公式サイトの指示に従ってマシンにclickhouseをインストールします。 次に、以下を実行します: + + sudo service clickhouse-server stop + sudo cp ClickHouse/build/programs/clickhouse /usr/bin/ + sudo service clickhouse-server start + +それに注意 `clickhouse-client`, `clickhouse-server` どsymlinksの共通 `clickhouse` バイナリ + +を運営することも可能ですカスタム構築clickhouseバイナリのコンフィグファイルからのclickhouseパッケージをインストールシステム: + + sudo service clickhouse-server stop + sudo -u clickhouse ClickHouse/build/programs/clickhouse server --config-file /etc/clickhouse-server/config.xml + +# IDE(統合開発環境) {#ide-integrated-development-environment} + +使用するideがわからない場合は、clionを使用することをお勧めします。 clionは商用ソフトウェアですが、30日間の無料試用期間を提供しています。 また、学生のための無料です。 clionは、linuxとmac os xの両方で使用できます。 + +KDevelopとQTCreatorは、ClickHouseを開発するためのIDEの他の優れた選択肢です。 KDevelopは非常に便利なIDEとして提供されますが、不安定です。 プロジェクトを開いてしばらくしてからKDevelopがクラッシュした場合は、 “Stop All” プロジェクトのファイルのリストを開くとすぐにボタンを押します。 その後、KDevelopはうまく動作するはずです。 + +シンプルなコードエディタとして、sublime textまたはvisual studio code、またはkate(すべてlinuxで利用可能)を使用できます。 + +その場合には、clionが `build` パスはそれ自身で、それはまたそれ自身で選択します `debug` ビルドタイプの場合は、設定のために、あなたがインストールしたものではなく、CLionで定義されたバージョンのCMakeを使用し、最後にCLionが使用します `make` ビルドタスクを実行するには `ninja`. これは通常の動作ですが、混乱を避けるためにそれを念頭に置いてください。 + +# コードの記述 {#writing-code} + +ClickHouseの建築の記述はここに見つけることができる:https://clickhouse。技術/ドキュメント/en/開発/アーキテクチャ/ + +コードスタイルガイド:https://clickhouse。テック/ドキュメント/en/開発/スタイル/ + +書き込みテスト:https://clickhouse。技術/ドキュメント/en/開発/テスト/ + +タスクのリスト:https://github.com/clickhouse/clickhouse/blob/master/testsructions/easy\_tasks\_sorted\_en.md + +# テストデータ {#test-data} + +開発clickhouseが必要となり載荷実ックスです。 パフォーマンステストでは特に重要です。 yandexからの匿名化されたデータの特別に準備されたセットがあります。メトリカ さらに、3gbの空きディスク容量が必要です。 このデータがないの達成に必要なものの開発事ができます。 + + sudo apt install wget xz-utils + + wget https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz + wget https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz + + xz -v -d hits_v1.tsv.xz + xz -v -d visits_v1.tsv.xz + + clickhouse-client + + CREATE TABLE test.hits ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime); + + CREATE TABLE test.visits ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), `Goals.ID` Array(UInt32), `Goals.Serial` Array(UInt32), `Goals.EventTime` Array(DateTime), `Goals.Price` Array(Int64), `Goals.OrderID` Array(String), `Goals.CurrencyID` Array(UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, `TraficSource.ID` Array(Int8), `TraficSource.SearchEngineID` Array(UInt16), `TraficSource.AdvEngineID` Array(UInt8), `TraficSource.PlaceID` Array(UInt16), `TraficSource.SocialSourceNetworkID` Array(UInt8), `TraficSource.Domain` Array(String), `TraficSource.SearchPhrase` Array(String), `TraficSource.SocialSourcePage` Array(String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `Market.Type` Array(UInt8), `Market.GoalID` Array(UInt32), `Market.OrderID` Array(String), `Market.OrderPrice` Array(Int64), `Market.PP` Array(UInt32), `Market.DirectPlaceID` Array(UInt32), `Market.DirectOrderID` Array(UInt32), `Market.DirectBannerID` Array(UInt32), `Market.GoodID` Array(String), `Market.GoodName` Array(String), `Market.GoodQuantity` Array(Int32), `Market.GoodPrice` Array(Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) SAMPLE BY intHash32(UserID) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID); + + clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv + clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv + +# プル要求の作成 {#creating-pull-request} + +GitHubのUIであなたのフォークリポジトリに移動します。 ブランチで開発している場合は、そのブランチを選択する必要があります。 があるでしょう “Pull request” 画面にあるボタン。 本質的に、これは “create a request for accepting my changes into the main repository”. + +作業がまだ完了していない場合でも、プル要求を作成できます。 この場合、単語を入れてください “WIP” (進行中の作業)タイトルの冒頭で、後で変更することができます。 これは、利用可能なすべてのテストを実行するだけでなく、変更の協調的なレビューや議論にも役立ちます。 変更内容の簡単な説明を入力することが重要ですが、後でリリースチェンジログの生成に使用されます。 + +テストは、yandexの従業員がタグであなたのprにラベルを付けるとすぐに開始します “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. + +システムは、プル要求のclickhouseバイナリビルドを個別に準備します。 これらのビルドを取得するには “Details” 次へのリンク “ClickHouse build check” チェックのリストのエントリ。 そこには、ビルドへの直接リンクがあります。あなたも、あなたの生産サーバー上に展開することができClickHouseのdebパッケージ(あなたは恐れていない場合). + +おそらくいくつかのビルドは最初は失敗します。 これは、gccとclangの両方のビルドをチェックし、既存の警告のほとんどすべてをチェックするためです(常に `-Werror` フラグ)clangで有効になっています。 その同じページでは、すべてのビルドログを見つけることができるので、可能な限りすべての方法でClickHouseをビルドする必要はありません。 diff --git a/docs/ja/development/index.md b/docs/ja/development/index.md deleted file mode 120000 index 1e2ad97dcc5..00000000000 --- a/docs/ja/development/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/index.md \ No newline at end of file diff --git a/docs/ja/development/index.md b/docs/ja/development/index.md new file mode 100644 index 00000000000..10a029893fb --- /dev/null +++ b/docs/ja/development/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Development +toc_hidden: true +toc_priority: 58 +toc_title: "\u96A0\u3055\u308C\u305F" +--- + +# ClickHouse開発 {#clickhouse-development} + +[元の記事](https://clickhouse.tech/docs/en/development/) diff --git a/docs/ja/development/style.md b/docs/ja/development/style.md deleted file mode 120000 index c1bbf11f421..00000000000 --- a/docs/ja/development/style.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/style.md \ No newline at end of file diff --git a/docs/ja/development/style.md b/docs/ja/development/style.md new file mode 100644 index 00000000000..93e4d8972aa --- /dev/null +++ b/docs/ja/development/style.md @@ -0,0 +1,841 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 68 +toc_title: "C++\u30B3\u30FC\u30C9\u306E\u66F8\u304D\u65B9" +--- + +# C++コードの書き方 {#how-to-write-c-code} + +## 一般的な推奨事項 {#general-recommendations} + +**1.** 以下は要件ではなく推奨事項です。 + +**2.** コードを編集している場合は、既存のコードの書式設定に従うことが理にかなっています。 + +**3.** 一貫性のためにコードスタイルが必要です。 一貫性により、コードを読みやすくなり、コードの検索も容易になります。 + +**4.** ルールの多くは論理的な理由を持っていない;彼らは確立された慣行によって決定されます。 + +## 書式設定 {#formatting} + +**1.** 書式設定のほとんどは自動的に行われます `clang-format`. + +**2.** インデントは4スペースです。 タブが四つのスペースを追加するように開発環境を構成します。 + +**3.** 中括弧を開くと閉じるには、別の行にする必要があります。 + +``` cpp +inline void readBoolText(bool & x, ReadBuffer & buf) +{ + char tmp = '0'; + readChar(tmp, buf); + x = tmp != '0'; +} +``` + +**4.** 関数本体全体が単一の場合 `statement`、それは単一ラインに置くことができます。 中括弧の周りにスペースを配置します(行末のスペース以外)。 + +``` cpp +inline size_t mask() const { return buf_size() - 1; } +inline size_t place(HashValue x) const { return x & mask(); } +``` + +**5.** 機能のため。 をかけないスットに固定して使用します。 + +``` cpp +void reinsert(const Value & x) +``` + +``` cpp +memcpy(&buf[place_value], &x, sizeof(x)); +``` + +**6.** で `if`, `for`, `while` その他の式では、関数呼び出しではなく、開き括弧の前にスペースが挿入されます。 + +``` cpp +for (size_t i = 0; i < rows; i += storage.index_granularity) +``` + +**7.** 二項演算子の前後にスペースを追加 (`+`, `-`, `*`, `/`, `%`, …) and the ternary operator `?:`. + +``` cpp +UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); +UInt8 month = (s[5] - '0') * 10 + (s[6] - '0'); +UInt8 day = (s[8] - '0') * 10 + (s[9] - '0'); +``` + +**8.** ラインフィードが入力された場合は、オペレータを新しい行に置き、その前にインデントを増やします。 + +``` cpp +if (elapsed_ns) + message << " (" + << rows_read_on_server * 1000000000 / elapsed_ns << " rows/s., " + << bytes_read_on_server * 1000.0 / elapsed_ns << " MB/s.) "; +``` + +**9.** 必要に応じて、行内の整列にスペースを使用できます。 + +``` cpp +dst.ClickLogID = click.LogID; +dst.ClickEventID = click.EventID; +dst.ClickGoodEvent = click.GoodEvent; +``` + +**10.** 演算子の周囲にスペースを使用しない `.`, `->`. + +必要に応じて、オペレータは次の行にラップすることができます。 この場合、その前のオフセットが増加する。 + +**11.** 単項演算子を区切るためにスペースを使用しない (`--`, `++`, `*`, `&`, …) from the argument. + +**12.** 入れの後に空白、コンマといえるものです。 同じルールはaの中のセミコロンのために行く `for` 式。 + +**13.** を区切るためにスペースを使用しない。 `[]` オペレーター + +**14.** で `template <...>` 式の間にスペースを使用します `template` と `<`;後にスペースなし `<` または前に `>`. + +``` cpp +template +struct AggregatedStatElement +{} +``` + +**15.** クラスと構造では、 `public`, `private`、と `protected` 同じレベルで `class/struct` コードの残りの部分をインデントします。 + +``` cpp +template +class MultiVersion +{ +public: + /// Version of object for usage. shared_ptr manage lifetime of version. + using Version = std::shared_ptr; + ... +} +``` + +**16.** 同じ場合 `namespace` ファイル全体に使用され、他に重要なものはありません。 `namespace`. + +**17.** のためのブロック `if`, `for`, `while`、または他の式は、単一の `statement` 中括弧は省略可能です。 場所は `statement` 別の行に、代わりに。 この規則は、ネストされた場合にも有効です `if`, `for`, `while`, … + +しかし、内側の場合 `statement` 中括弧または `else`、外部ブロックは中括弧で記述する必要があります。 + +``` cpp +/// Finish write. +for (auto & stream : streams) + stream.second->finalize(); +``` + +**18.** ラインの端にスペースがあってはなりません。 + +**19.** ソースファイルはUTF-8エンコードです。 + +**20.** ASCII以外の文字は、文字列リテラルで使用できます。 + +``` cpp +<< ", " << (timer.elapsed() / chunks_stats.hits) << " μsec/hit."; +``` + +**21.** 単一行に複数の式を書き込まないでください。 + +**22.** 関数内のコードのセクションをグループ化し、複数の空行で区切ります。 + +**23.** 関数、クラスなどを一つまたは二つの空行で区切ります。 + +**24.** `A const` (値に関連する)型名の前に記述する必要があります。 + +``` cpp +//correct +const char * pos +const std::string & s +//incorrect +char const * pos +``` + +**25.** ポインタまたは参照を宣言するとき、 `*` と `&` 記号は両側のスペースで区切る必要があります。 + +``` cpp +//correct +const char * pos +//incorrect +const char* pos +const char *pos +``` + +**26.** テンプレート-タイプを使用する場合は、それらを `using` キーワード(最も単純な場合を除く)。 + +つまり、テンプレートのパラメータは指定しみ `using` そして、コードで繰り返されていません。 + +`using` 関数の内部など、ローカルで宣言できます。 + +``` cpp +//correct +using FileStreams = std::map>; +FileStreams streams; +//incorrect +std::map> streams; +``` + +**27.** なを宣言するのに複数の変数の異なる種類の一つです。 + +``` cpp +//incorrect +int x, *y; +``` + +**28.** Cスタイルのキャストは使用しないでください。 + +``` cpp +//incorrect +std::cerr << (int)c <<; std::endl; +//correct +std::cerr << static_cast(c) << std::endl; +``` + +**29.** 授業や構造体、グループのメンバーは、機能別に各部の可視性です。 + +**30.** 小さなクラスや構造体の場合、メソッド宣言を実装から分離する必要はありません。 + +同じことが、クラスや構造体の小さなメソッドにも当てはまります。 + +テンプレート化されたクラスと構造体の場合、メソッド宣言を実装から分離しないでください(そうでない場合は、同じ翻訳単位で定義する必要があ + +**31.** 行を140文字で折り返すことができます(80文字ではなく)。 + +**32.** Postfixが不要な場合は、必ずprefix increment/decrement演算子を使用してください。 + +``` cpp +for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) +``` + +## コメント {#comments} + +**1.** コードのすべての非自明な部分にコメントを追加してください。 + +これは非常に重要です。 書面でのコメントだけを更新したいのですが--このコードに必要な、又は間違っています。 + +``` cpp +/** Part of piece of memory, that can be used. + * For example, if internal_buffer is 1MB, and there was only 10 bytes loaded to buffer from file for reading, + * then working_buffer will have size of only 10 bytes + * (working_buffer.end() will point to position right after those 10 bytes available for read). + */ +``` + +**2.** コメントは、必要に応じて詳細に記述できます。 + +**3.** コメントを記述するコードの前に配置します。 まれに、コメントは同じ行のコードの後に来ることがあります。 + +``` cpp +/** Parses and executes the query. +*/ +void executeQuery( + ReadBuffer & istr, /// Where to read the query from (and data for INSERT, if applicable) + WriteBuffer & ostr, /// Where to write the result + Context & context, /// DB, tables, data types, engines, functions, aggregate functions... + BlockInputStreamPtr & query_plan, /// Here could be written the description on how query was executed + QueryProcessingStage::Enum stage = QueryProcessingStage::Complete /// Up to which stage process the SELECT query + ) +``` + +**4.** コメントは英語のみで記述する必要があります。 + +**5.** を書いていて図書館を含む詳細なコメントで説明を主なヘッダファイルです。 + +**6.** 追加情報を提供しないコメントを追加しないでください。 特に放置しないでください空のコメントこのような: + +``` cpp +/* +* Procedure Name: +* Original procedure name: +* Author: +* Date of creation: +* Dates of modification: +* Modification authors: +* Original file name: +* Purpose: +* Intent: +* Designation: +* Classes used: +* Constants: +* Local variables: +* Parameters: +* Date of creation: +* Purpose: +*/ +``` + +この例はリソースから借用されていますhttp://home.tamk.fi/~jaalto/course/coding-style/doc/unmaintainable-code/。 + +**7.** ガベージコメント(作成者、作成日)を書いてはいけません。.)各ファイルの先頭に。 + +**8.** シングルラインのコメントずつスラッシュ: `///` 複数行のコメントは次の形式で始まります `/**`. これらのコメントは、 “documentation”. + +注:doxygenを使用して、これらのコメントからドキュメントを生成できます。 しかし、doxygenはideのコードをナビゲートする方が便利なので、一般的には使用されません。 + +**9.** 複数行のコメントは、先頭と末尾に空の行を含めることはできません(複数行のコメントを閉じる行を除く)。 + +**10.** コメント行コードは、基本的なコメントは、 “documenting” コメント. + +**11.** コミットする前に、コードのコメント部分を削除します。 + +**12.** コメントやコードで冒涜を使用しないでください。 + +**13.** 大文字は使用しないでください。 過度の句読点を使用しないでください。 + +``` cpp +/// WHAT THE FAIL??? +``` + +**14.** 区切り文字の作成にはコメントを使用しません。 + +``` cpp +///****************************************************** +``` + +**15.** コメントで議論を開始しないでください。 + +``` cpp +/// Why did you do this stuff? +``` + +**16.** それが何であるかを記述するブロックの最後にコメントを書く必要はありません。 + +``` cpp +/// for +``` + +## 名前 {#names} + +**1.** 変数とクラスメンバーの名前には、アンダースコア付きの小文字を使用します。 + +``` cpp +size_t max_block_size; +``` + +**2.** 関数(メソッド)の名前には、小文字で始まるcamelCaseを使用します。 + +``` cpp +std::string getName() const override { return "Memory"; } +``` + +**3.** クラス(構造体)の名前には、大文字で始まるCamelCaseを使用します。 I以外の接頭辞はインターフェイスには使用されません。 + +``` cpp +class StorageMemory : public IStorage +``` + +**4.** `using` クラスと同じように名前が付けられます。 `_t` 最後に。 + +**5.** テンプレート型引数の名前:単純なケースでは、 `T`; `T`, `U`; `T1`, `T2`. + +より複雑なケースでは、クラス名の規則に従うか、プレフィックスを追加します `T`. + +``` cpp +template +struct AggregatedStatElement +``` + +**6.** テンプレート定数引数の名前:変数名の規則に従うか、または `N` 単純なケースでは。 + +``` cpp +template +struct ExtractDomain +``` + +**7.** 抽象クラス(インターフェイス)の場合は、 `I` 接頭辞。 + +``` cpp +class IBlockInputStream +``` + +**8.** ローカルで変数を使用する場合は、短い名前を使用できます。 + +それ以外の場合は、意味を説明する名前を使用します。 + +``` cpp +bool info_successfully_loaded = false; +``` + +**9.** の名前 `define`sおよびグローバル定数は、ALL\_CAPSとアンダースコアを使用します。 + +``` cpp +#define MAX_SRC_TABLE_NAMES_TO_STORE 1000 +``` + +**10.** ファイル名は内容と同じスタイルを使用する必要があります。 + +ファイルに単一のクラスが含まれている場合は、クラス(camelcase)と同じようにファイルに名前を付けます。 + +ファイルに単一の関数が含まれている場合は、そのファイルに関数(camelcase)と同じ名前を付けます。 + +**11.** 名前に略語が含まれている場合は、: + +- 変数名の場合、省略形は小文字を使用する必要があります `mysql_connection` (ない `mySQL_connection`). +- クラスと関数の名前については、大文字を省略形にしておきます`MySQLConnection` (ない `MySqlConnection`). + +**12.** クラスメンバを初期化するためだけに使用されるコンストラクタ引数は、クラスメンバと同じように名前を付ける必要がありますが、最後にアン + +``` cpp +FileQueueProcessor( + const std::string & path_, + const std::string & prefix_, + std::shared_ptr handler_) + : path(path_), + prefix(prefix_), + handler(handler_), + log(&Logger::get("FileQueueProcessor")) +{ +} +``` + +のアンダースコアの接尾辞ければ省略することができ、引数を使用していないのコンストラクタ。 + +**13.** ローカル変数とクラスメンバの名前に違いはありません(接頭辞は必要ありません)。 + +``` cpp +timer (not m_timer) +``` + +**14.** の定数のために `enum`、大文字でキャメルケースを使用します。 ALL\_CAPSも許容されます。 この `enum` ローカルではなく、 `enum class`. + +``` cpp +enum class CompressionMethod +{ + QuickLZ = 0, + LZ4 = 1, +}; +``` + +**15.** すべての名前は英語である必要があります。 ロシア語の音訳は許可されていません。 + + not Stroka + +**16.** 略語は、よく知られている場合(ウィキペディアや検索エンジンで略語の意味を簡単に見つけることができる場合)には許容されます。 + + `AST`, `SQL`. + + Not `NVDH` (some random letters) + +短くされた版が共通の使用なら不完全な単語は受諾可能である。 + +コメントの横にフルネームが含まれている場合は、省略形を使用することもできます。 + +**17.** C++ソースコードを持つファイル名には、 `.cpp` 拡張子。 ヘッダーファイルには、 `.h` 拡張子。 + +## コードの書き方 {#how-to-write-code} + +**1.** メモリ管理。 + +手動メモリ割り当て解除 (`delete`)ライブラリコードでのみ使用できます。 + +ライブラリコードでは、 `delete` 演算子はデストラクターでのみ使用できます。 + +アプリケーショ + +例: + +- 最も簡単な方法は、スタックにオブジェクトを配置するか、別のクラスのメンバーにすることです。 +- 多数の小さなオブジェクトの場合は、コンテナを使用します。 +- ヒープに存在する少数のオブジェクトの自動割り当て解除の場合は、以下を使用します `shared_ptr/unique_ptr`. + +**2.** リソース管理。 + +使用 `RAII` と上記参照。 + +**3.** エラー処理。 + +例外を使用します。 ほとんどの場合、例外をスローするだけで、例外をキャッチする必要はありません `RAII`). + +オフラインのデータ処理アプリケーションでは、しばしば可能な漁例外をスローしました。 + +ユーザー要求を処理するサーバーでは、通常、接続ハンドラの最上位レベルで例外をキャッチするだけで十分です。 + +スレッド機能、とくすべての例外rethrowのメインスレッド後 `join`. + +``` cpp +/// If there weren't any calculations yet, calculate the first block synchronously +if (!started) +{ + calculate(); + started = true; +} +else /// If calculations are already in progress, wait for the result + pool.wait(); + +if (exception) + exception->rethrow(); +``` + +ない非表示の例外なります。 盲目的にすべての例外をログに記録することはありません。 + +``` cpp +//Not correct +catch (...) {} +``` + +いくつかの例外を無視する必要がある場合は、特定の例外に対してのみ行い、残りを再スローします。 + +``` cpp +catch (const DB::Exception & e) +{ + if (e.code() == ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION) + return nullptr; + else + throw; +} +``` + +応答コード付きの関数を使用する場合、または `errno`、常に結果をチェックし、エラーの場合は例外をスローします。 + +``` cpp +if (0 != close(fd)) + throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); +``` + +`Do not use assert`. + +**4.** 例外タイプ。 + +アプリケーションコードで複雑な例外階層を使用する必要はありません。 例外テキストは、システム管理者が理解できるはずです。 + +**5.** 投げから例外がスローされる場合destructors. + +これは推奨されませんが、許可されています。 + +次のオプションを使用: + +- 関数の作成 (`done()` または `finalize()`)それは例外につながる可能性のあるすべての作業を事前に行います。 その関数が呼び出された場合、後でデストラクタに例外はないはずです。 +- 複雑すぎるタスク(ネットワーク経由でメッセージを送信するなど)は、クラスユーザーが破棄する前に呼び出す必要がある別のメソッドに入れることがで +- デストラクタに例外がある場合は、それを隠すよりもログに記録する方が良いでしょう(ロガーが利用可能な場合)。 +- 簡単な適用では、頼ることは受諾可能です `std::terminate` (以下の場合 `noexcept` デフォルトではC++11)例外を処理する。 + +**6.** 匿名のコードブロック。 + +特定の変数をローカルにするために、単一の関数内に別のコードブロックを作成して、ブロックを終了するときにデストラクタが呼び出されるように + +``` cpp +Block block = data.in->read(); + +{ + std::lock_guard lock(mutex); + data.ready = true; + data.block = block; +} + +ready_any.set(); +``` + +**7.** マルチスレッド。 + +オフラインのデータ処理プログラム: + +- 単一のcpuコアで最高のパフォーマンスを得るようにしてください。 必要に応じてコードを並列化できます。 + +でサーバアプリケーション: + +- スレッドプールを使用して要求を処理します。 この時点で、まだたタスクを必要とした管理コスイッチング時の値です。※ + +Forkは並列化には使用されません。 + +**8.** スレッドの同期。 + +多くの場合、異なるスレッドに異なるメモリセルを使用させることができます(さらに良い:異なるキャッシュライン)。 `joinAll`). + +同期が必要な場合は、ほとんどの場合、mutexを使用すれば十分です。 `lock_guard`. + +他の例では、システム同期プリミティブを使用します。 使用中の待ち時間を使用しないで下さい。 + +原子操作は、最も単純な場合にのみ使用する必要があります。 + +主な専門分野でない限り、ロックフリーのデータ構造を実装しようとしないでください。 + +**9.** ポインタ対参照。 + +ほとんどの場合、参照を好む。 + +**10.** const + +定数参照、定数へのポインタを使用する, `const_iterator`、およびconstメソッド。 + +考える `const` デフォルトにしてnonを使用するには-`const` 必要なときだけ。 + +値によって変数を渡すとき、 `const` 通常は意味をなさない。 + +**11.** 署名なし + +使用 `unsigned` 必要であれば。 + +**12.** 数値型。 + +タイプの使用 `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`、と `Int64`、同様に `size_t`, `ssize_t`、と `ptrdiff_t`. + +これらの型を数値に使用しないでください: `signed/unsigned long`, `long long`, `short`, `signed/unsigned char`, `char`. + +**13.** 引数を渡す。 + +参照によって複雑な値を渡す(以下を含む `std::string`). + +関数がヒープで作成されたオブジェクトの所有権を取得する場合は、引数の型を作成します `shared_ptr` または `unique_ptr`. + +**14.** 戻り値。 + +ほとんどの場合、単に `return`. 書き込まない `[return std::move(res)]{.strike}`. + +関数がヒープ上にオブジェクトを割り当て、それを返す場合は、 `shared_ptr` または `unique_ptr`. + +まれに、引数を使用して値を返す必要がある場合があります。 この場合、引数は参照でなければなりません。 + +``` cpp +using AggregateFunctionPtr = std::shared_ptr; + +/** Allows creating an aggregate function by its name. + */ +class AggregateFunctionFactory +{ +public: + AggregateFunctionFactory(); + AggregateFunctionPtr get(const String & name, const DataTypes & argument_types) const; +``` + +**15.** 名前空間。 + +別のものを使用する必要はありません `namespace` 適用コードのため。 + +小さな図書館にもこれは必要ありません。 + +中規模から大規模のライブラリの場合は、すべてを `namespace`. + +ライブラリの中で `.h` ファイル、使用できます `namespace detail` アプリケーションコードに必要のない実装の詳細を非表示にする。 + +で `.cpp` ファイル、あなたが使用できる `static` または匿名の名前空間は、記号を非表示にします。 + +また、 `namespace` に使用することができ `enum` 対応する名前が外部に落ちないようにするには `namespace` (しかし、それを使用する方が良いです `enum class`). + +**16.** 遅延初期化。 + +初期化に引数が必要な場合は、通常はデフォルトのコンストラクタを記述すべきではありません。 + +後で初期化を遅らせる必要がある場合は、無効なオブジェクトを作成する既定のコンストラクターを追加できます。 または、少数のオブジェクトの場合は、次のものを使用できます `shared_ptr/unique_ptr`. + +``` cpp +Loader(DB::Connection * connection_, const std::string & query, size_t max_block_size_); + +/// For deferred initialization +Loader() {} +``` + +**17.** 仮想関数。 + +クラスが多態的な使用を意図していない場合は、関数を仮想にする必要はありません。 これはデストラクタにも当てはまります。 + +**18.** エンコーディング。 + +どこでもutf-8を使用します。 使用 `std::string`と`char *`. 使用しない `std::wstring`と`wchar_t`. + +**19.** ログ記録。 + +コードのどこにでも例を見てください。 + +コミットする前に、無意味なデバッグログとその他のデバッグ出力をすべて削除します。 + +トレースレベルでも、サイクルでのログ記録は避けるべきです。 + +ログには必読でログインです。 + +ログインできるアプリケーションコードにすることができます。 + +ログメッセージは英語で書く必要があります。 + +ログは、システム管理者が理解できることが望ましいです。 + +ログに冒涜を使用しないでください。 + +ログにutf-8エンコーディングを使用します。 まれに、ログに非ascii文字を使用できます。 + +**20.** 入出力。 + +使用しない `iostreams` アプリケーショ `stringstream`). + +を使用 `DB/IO` 代わりに図書館。 + +**21.** 日付と時刻。 + +を見る `DateLUT` ライブラリ。 + +**22.** 含める。 + +常に使用 `#pragma once` 代わりに、警備員を含みます。 + +**23.** 使用。 + +`using namespace` は使用されません。 を使用することができ `using` 特定の何かと。 しかし、クラスや関数の中でローカルにします。 + +**24.** 使用しない `trailing return type` 必要な場合を除き、機能のため。 + +``` cpp +[auto f() -> void;]{.strike} +``` + +**25.** 変数の宣言と初期化。 + +``` cpp +//right way +std::string s = "Hello"; +std::string s{"Hello"}; + +//wrong way +auto s = std::string{"Hello"}; +``` + +**26.** 仮想関数の場合は、以下を記述します `virtual` 基本クラスでは、次のように記述します `override` 代わりに `virtual` 子孫クラスで。 + +## C++の未使用機能 {#unused-features-of-c} + +**1.** 仮想継承は使用されません。 + +**2.** C++03の例外指定子は使用されません。 + +## Platform {#platform} + +**1.** を書いていますコードの特定の。 + +それが同じ場合には、クロス-プラットフォームまたは携帯コードが好ましい。 + +**2.** 言語:C++17。 + +**3.** コンパイラ: `gcc`. この時点で(December2017)、をコードはコンパイル使用してバージョン7.2。 (コンパイルすることもできます `clang 4`.) + +標準ライブラリが使用されます (`libstdc++` または `libc++`). + +**4.**OS:LinuxのUbuntuは、正確なよりも古いではありません。 + +**5.**コードはx86\_64cpuアーキテクチャ用に書かれている。 + +CPU命令セットは、当社のサーバー間でサポートされる最小セットです。 現在、SSE4.2です。 + +**6.** 使用 `-Wall -Wextra -Werror` コンパイルフラグ。 + +**7.** 静的に接続するのが難しいライブラリを除くすべてのライブラリとの静的リンクを使用します。 `ldd` コマンド)。 + +**8.** コードは開発され、リリース設定でデバッグされます。 + +## ツール {#tools} + +**1.** KDevelopは良いIDEです。 + +**2.** デバッグのために、 `gdb`, `valgrind` (`memcheck`), `strace`, `-fsanitize=...`、または `tcmalloc_minimal_debug`. + +**3.** プロファイ `Linux Perf`, `valgrind` (`callgrind`)、または `strace -cf`. + +**4.** ソースはGitにあります。 + +**5.** アセンブリ使用 `CMake`. + +**6.** プログラムは `deb` パッケージ。 + +**7.** ることを約束し、マスターが破ってはいけないの。 + +選択したリビジョンのみが実行可能と見なされます。 + +**8.** コードが部分的にしか準備されていなくても、できるだけ頻繁にコミットを行います。 + +用の支店です。 + +あなたのコードが `master` ブランチはまだビルド可能ではない。 `push`. あなたはそれを終了するか、数日以内にそれを削除する必要があります。 + +**9.** 些細な変更の場合は、ブランチを使用してサーバーに公開します。 + +**10.** 未使用のコードはリポジトリから削除されます。 + +## ライブラリ {#libraries} + +**1.** C++14標準ライブラリが使用されています(実験的な拡張が許可されています)。 `boost` と `Poco` フレームワーク + +**2.** 必要に応じて、OSパッケージで利用可能な既知のライブラリを使用することができます。 + +すでに利用可能な良い解決策がある場合は、別のライブラリをインストールする必要があることを意味していても使用してください。 + +(が準備をしておいてくださ去の悪い図書館からのコードです。) + +**3.** パッケージに必要なものがないか、古いバージョンや間違ったタイプのコンパイルがない場合は、パッケージに含まれていないライブラリをインストール + +**4.** ライブラリが小さく、独自の複雑なビルドシステムを持たない場合は、ソースファイルを `contrib` フォルダ。 + +**5.** すでに使用されているライブラリが優先されます。 + +## 一般的な推奨事項 {#general-recommendations-1} + +**1.** できるだけ少ないコードを書く。 + +**2.** う最も単純な解決策です。 + +**3.** それがどのように機能し、内部ループがどのように機能するかを知るまで、コードを書かないでください。 + +**4.** 最も単純な場合は、次のようにします `using` クラスや構造体の代わりに。 + +**5.** 可能であれば、コピーコンストラクター、代入演算子、デストラクター(仮想関数を除く、クラスに少なくとも一つの仮想関数が含まれている場合)、コンストラ つまり、コンパイラ生成機能しないでください。 を使用することができ `default`. + +**6.** コードの単純化が推奨されます。 可能な場合は、コードのサイズを小さくします。 + +## その他の推奨事項 {#additional-recommendations} + +**1.** 明示的に指定する `std::` からのタイプの場合 `stddef.h` + +は推奨されません。 つまり、我々は書くことをお勧めします `size_t` 代わりに `std::size_t`、それは短いですので。 + +それは可能追加する `std::`. + +**2.** 明示的に指定する `std::` 標準Cライブラリの関数の場合 + +は推奨されません。 言い換えれば、 `memcpy` 代わりに `std::memcpy`. + +その理由は、次のような非標準的な機能があるからです `memmem`. 私達は機会にこれらの機能を使用します。 これらの関数は `namespace std`. + +あなたが書く場合 `std::memcpy` 代わりに `memcpy` どこでも、その後 `memmem` なし `std::` 奇妙に見えます。 + +それでも、あなたはまだ `std::` あなたがそれを好むなら。 + +**3.** 同じものが標準C++ライブラリで利用可能な場合、Cの関数を使用する。 + +これは、より効率的であれば許容されます。 + +たとえば、以下を使用します `memcpy` 代わりに `std::copy` メモリの大きな塊をコピーするため。 + +**4.** 複数行の関数の引数。 + +の他の包装のスタイルを許可: + +``` cpp +function( + T1 x1, + T2 x2) +``` + +``` cpp +function( + size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function(size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function(size_t left, size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +``` cpp +function( + size_t left, + size_t right, + const & RangesInDataParts ranges, + size_t limit) +``` + +[元の記事](https://clickhouse.tech/docs/en/development/style/) diff --git a/docs/ja/development/tests.md b/docs/ja/development/tests.md deleted file mode 120000 index c03d36c3916..00000000000 --- a/docs/ja/development/tests.md +++ /dev/null @@ -1 +0,0 @@ -../../en/development/tests.md \ No newline at end of file diff --git a/docs/ja/development/tests.md b/docs/ja/development/tests.md new file mode 100644 index 00000000000..80901a859e7 --- /dev/null +++ b/docs/ja/development/tests.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 69 +toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6CD5" +--- + +# ClickHouse試験 {#clickhouse-testing} + +## 機能テスト {#functional-tests} + +機能テストは最も簡単で使いやすいものです。 clickhouseの機能のほとんどは、機能テストでテストすることができ、彼らはそのようにテストすることができclickhouseコード内のすべての変更のために使用する + +各機能テストは、実行中のclickhouseサーバーに一つまたは複数のクエリを送信し、参照と結果を比較します。 + +テストは `testsies` ディレクトリ。 つのサブディレクトリがあります: `stateless` と `stateful`. ステートレステストでは、プリロードされたテストデータを使用せずにクエリを実行します。 ステートフルテストでは、Yandexのテストデータが必要です。メトリカと一般市民には利用できません。 我々は唯一の使用する傾向があります `stateless` テストと新しい追加を避ける `stateful` テスト + +それぞれの試験できるの種類: `.sql` と `.sh`. `.sql` testは、パイプ処理される単純なSQLスクリプトです `clickhouse-client --multiquery --testmode`. `.sh` テストは、単独で実行されるスクリプトです。 + +すべてのテストを実行するには、 `testskhouse-test` ツール。 見て! `--help` 可能なオプションのリストについて。 できるだけ実行すべての試験または実行のサブセットの試験フィルター部分文字列の試験名: `./clickhouse-test substring`. + +機能テストを呼び出す最も簡単な方法は、コピーすることです `clickhouse-client` に `/usr/bin/`、実行 `clickhouse-server` そして、実行 `./clickhouse-test` 独自のディレクトリから。 + +新しいテストを追加するには、 `.sql` または `.sh` ファイル `testsies/0_stateless` ディレクトリは、手動でチェックしてから生成 `.reference` 次の方法でファイル: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` または `./00000_test.sh > ./00000_test.reference`. + +テストでは、(create、dropなど)テーブルのみを使用する必要があります `test` テストでは一時テーブルを使用することもできます。 + +機能テストで分散クエリを使用する場合は、次のようにします `remote` テーブル機能との `127.0.0.{1..2}` または、サーバー構成ファイル内の事前定義されたテストクラスターを次のように使用できます `test_shard_localhost`. + +いくつかのテストは `zookeeper`, `shard` または `long` 彼らの名前で。 +`zookeeper` ZooKeeperを使用しているテストのためのものです。 `shard` そのテストのためです +サーバーのリッスンが必要 `127.0.0.*`; `distributed` または `global` 同じを持っている +意味は... `long` 少し長く走るテストのためのものです。 あなたはできる +次のテストグループを無効にする `--no-zookeeper`, `--no-shard` と +`--no-long` オプション、それぞれ。 + +## 既知のバグ {#known-bugs} + +機能テストで簡単に再現できるいくつかのバグを知っていれば、準備された機能テストを `testsies/bugs` ディレクトリ。 これらのテストはに移動されます `teststests_stateless` バグが修正されたとき。 + +## 統合テスト {#integration-tests} + +統合テストでは、クラスター化された設定でclickhouseをテストし、mysql、postgres、mongodbのような他のサーバーとのclickhouseの相互作用を可能にします。 それらはネットワークの割れ目、包みの低下、等を競争して有用である。 これらの試験する方向に作用しdockerを複数の容器を様々なソフトウェアです。 + +見る `testsgration/README.md` これらのテストを実行する方法について。 + +ClickHouseとサードパーティドライバの統合はテストされていません。 また、現在、JDBCおよびODBCドライバとの統合テストはありません。 + +## 単体テスト {#unit-tests} + +単体テストは、clickhouse全体ではなく、単一の孤立したライブラリまたはクラスをテストする場合に便利です。 テストのビルドを有効または無効にするには `ENABLE_TESTS` CMakeオプション。 単体テスト(およびその他のテストプログラム)は、 `tests` コード全体のサブディレクトリ。 単体テストを実行するには `ninja test`. いくつかのテストは `gtest` しかし、テストの失敗でゼロ以外の終了コードを返すプログラムだけです。 + +コードがすでに機能テストでカバーされている場合は、単体テストを行う必要はありません(機能テストは通常ははるかに簡単に使用できます)。 + +## 性能テスト {#performance-tests} + +性能試験を測定して比較の一部の縁の一部clickhouse合成ます。 試験は `tests/performance`. 各テストは `.xml` テストケースの説明を含むファイル。 テストは以下で実行されます `clickhouse performance-test` ツール(埋め込まれていること `clickhouse` バイナリ)。 見る `--help` 呼び出しのため。 + +各試験の実行はmiltiple索の可能性のある組み合わせのパラメータ)のループ条件のための停止など “maximum execution speed is not changing in three seconds” 測定一部の指標につクエリの性能など “maximum execution speed”). いくつかの試験を含むことができ前提条件に予圧試験データを得る。 + +いくつかのシナリオでclickhouseのパフォーマンスを向上させたい場合や、単純なクエリで改善が見られる場合は、パフォーマンステストを作成することを強 それは常に使用する意味があります `perf top` またはあなたのテスト中に他のperfツール。 + +## テストツール、スクリプト {#test-tools-and-scripts} + +の一部のプログラム `tests` directoryは準備されたテストではなく、テストツールです。 たとえば、 `Lexer` ツールがあります `dbms/Parsers/tests/lexer` これはstdinのトークン化を行い、結果をstdoutに色付けします。 これらの種類のツールをコード例として、また調査と手動テストに使用できます。 + +でも一対のファイル `.sh` と `.reference` のツールであるかの定義済みの入力-その後スクリプトの結果と比較することができ `.reference` ファイル。 この種のテストは自動化されていません。 + +## Miscellanous試験 {#miscellanous-tests} + +外部辞書のテストは次の場所にあります `tests/external_dictionaries` そして機械学ばれたモデルのために `tests/external_models`. これらのテストは更新されず、統合テストに転送する必要があります。 + +定足数の挿入には個別のテストがあります。 ネットワーク分割、パケットドロップ(clickhouseノード間、clickhouseとzookeeper間、clickhouseサーバーとクライアント間など)など、さまざまな障害ケースをエミュレートします。), `kill -9`, `kill -STOP` と `kill -CONT` 、のように [Jepsen](https://aphyr.com/tags/Jepsen). その後、試験チェックすべての認識を挿入したすべて拒否された挿入しました。 + +定足数を緩和試験の筆に別々のチーム前clickhouseしたオープン達した. このチームは、もはやclickhouseで動作しません。 テストはaccidentially javaで書かれました。 これらのことから、決議の定足数テストを書き換え及び移転統合。 + +## 手動テスト {#manual-testing} + +新しい機能を開発するときは、手動でテストすることも合理的です。 次の手順で行うことができます: + +ClickHouseをビルドします。 ターミナルからClickHouseを実行します。 `programs/clickhouse-server` そして、それを実行します `./clickhouse-server`. それは構成を使用します (`config.xml`, `users.xml` と内のファイル `config.d` と `users.d` ディレクトリ)から、現在のディレクトリがデフォルトです。 ClickHouseサーバーに接続するには、以下を実行します `programs/clickhouse-client/clickhouse-client`. + +これらのclickhouseツール(サーバ、クライアント、などだそうでsymlinks単一のバイナリ名 `clickhouse`. このバイナリは次の場所にあります `programs/clickhouse`. すべてのツ `clickhouse tool` 代わりに `clickhouse-tool`. + +または、yandexリポジトリからの安定したリリースか、あなた自身のためのパッケージを構築することができます `./release` ClickHouseのソースのルートで. 次に、 `sudo service clickhouse-server start` (またはサーバーを停止するために停止)。 でログを探します `/etc/clickhouse-server/clickhouse-server.log`. + +ClickHouseが既にシステムにインストールされている場合は、新しい `clickhouse` バイナリと既存のバイナリを交換: + +``` bash +$ sudo service clickhouse-server stop +$ sudo cp ./clickhouse /usr/bin/ +$ sudo service clickhouse-server start +``` + +また、システムclickhouse-serverを停止し、同じ設定でターミナルにログインして独自に実行することもできます: + +``` bash +$ sudo service clickhouse-server stop +$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml +``` + +Gdbの例: + +``` bash +$ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml +``` + +システムクリックハウスサーバーが既に実行されていて、それを停止したくない場合は、ポート番号を変更することができます `config.xml` (またはファイル内でそれらを上書きする `config.d` ディレクトリ)を指定し、適切なデータパスを指定して実行します。 + +`clickhouse` バイナリーはほとんどない依存関係の作品を広い範囲のLinuxディストリビューション. サーバー上の変更をすばやく汚れてテストするには、次のようにします `scp` あなたの新鮮な内蔵 `clickhouse` サーバーへのバイナリを作成し、上記の例のように実行します。 + +## テスト環境 {#testing-environment} + +安定版としてリリースを公開する前に、テスト環境に展開します。 テスト環境は1/39の部分を処理するクラスタです [Yandexの。Metrica](https://metrica.yandex.com/) データ。 テスト環境をYandexと共有します。メトリカチーム。 ツづツつソツづォツづアツ鳴ウツ猟ソツづツつキツ。 まずデータを処理しなが遅れから、オシロスコープのリアルタイムレプリケーションの継続作業とな問題に見えるYandex.メトリカチーム。 最初のチェックは次の方法で行うことができます: + +``` sql +SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h; +``` + +場合によっては、yandex:market、cloudなどの友人チームのテスト環境にも展開します。 また、開発目的で使用されるハードウェアサーバーもあります。 + +## 負荷テスト {#load-testing} + +テスト環境に展開した後、本番クラスターからのクエリで負荷テストを実行します。 これは手動で行われます。 + +有効にしていることを確認します `query_log` あなたの生産の集りで。 + +一日以上のクエリログを収集する: + +``` bash +$ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv +``` + +これは複雑な例です。 `type = 2` 正常に実行されたクエリをフィルタ処理します。 `query LIKE '%ym:%'` Yandexのから関連するクエリを選択することです。メトリカ `is_initial_query` ClickHouse自体ではなく、クライアントによって開始されたクエリのみを選択することです(分散クエリ処理の一部として)。 + +`scp` このログを試験クラスターとして以下の: + +``` bash +$ clickhouse benchmark --concurrency 16 < queries.tsv +``` + +(おそらく、あなたはまた、指定したいです `--user`) + +それから夜か週末の間それを残し、取得残りを行きなさい。 + +きることを確認 `clickhouse-server` なクラッシュメモリのフットプリントは有界性なつ品位を傷つける。 + +正確なクエリ実行タイミングは記録されず、クエリと環境の変動が大きいため比較されません。 + +## ビルドテスト {#build-tests} + +構築を試験できることを確認の構築においても様々な代替構成されており、外国のシステム。 試験は `ci` ディレクトリ。 彼らはDocker、Vagrantの中のソースからビルドを実行し、時には `qemu-user-static` ドッカー内部。 これらのテストは開発中であり、テスト実行は自動化されていません。 + +動機づけ: + +通常、clickhouseビルドの単一のバリアントですべてのテストをリリースして実行します。 しかし、徹底的にテストされていない代替ビルドの変種があります。 例: + +- FreeBSD上でのビルド; +- システムパッケージのライブ; +- ライブラリの共有リンク付きビルド; +- AArch64プラットフォーム上に構築; +- PowerPcプラットフォーム上に構築。 + +たとえば、構築システムのパッケージが悪い練習ができませんので保証ものに版のパッケージシステムです。 しかし、これは本当にdebianのメンテナに必要です。 このため、少なくともこのビルドの変種をサポートする必要があります。 別の例:共有リンクは一般的なトラブルの原因ですが、一部の愛好家には必要です。 + +ができませんので実行した全試験はすべての変異体を構築し、チェックしたい少なくとも上記に記載された各種の構築異な破となりました。 この目的のためにビルドテストを使用します。 + +## プロトコル互換性のテスト {#testing-for-protocol-compatibility} + +我々はclickhouseのネットワークプロトコルを拡張するとき,我々は、古いclickhouse-クライアントが新しいclickhouse-serverで動作し、新しいclickhouse-clientが古いclickhouse-serverで動作することを手動で + +## コンパイラからの助け {#help-from-the-compiler} + +メインクリックハウスコード `dbms` ディレクトリ)は `-Wall -Wextra -Werror` そして、いくつかの追加の有効な警告と。 これらのオプションは有効になっていないためにサードパーティーのライブラリ. + +Clangにはさらに便利な警告があります。 `-Weverything` デフォルトのビルドに何かを選ぶ。 + +プロダクションビルドでは、gccが使用されます(clangよりもやや効率的なコードが生成されます)。 開発のために、clangは通常使用するのがより便利です。 デバッグモードで自分のマシン上に構築することができます(ラップトップのバッテリーを節約するため)が、コンパイラはより多くの警告を生成する `-O3` よりよい制御流れおよび相互プロシージャの分析が原因で。 Clangでビルドするとき, `libc++` の代わりに使用される。 `libstdc++` そして、デバッグモードでビルドするときは、 `libc++` 使用可能にするにはより誤差があります。. + +## 消毒剤 {#sanitizers} + +**アドレス消毒剤**. +ASanの下でコミットごとに機能テストと統合テストを実行します。 + +**Valgrind(Memcheck)**. +私たちは一晩valgrindの下で機能テストを実行します。 それは複数の時間がかかります。 現在、既知の偽陽性があります `re2` ライブラリ、参照 [この記事](https://research.swtch.com/sparse). + +**未定義の動作消毒剤。** +ASanの下でコミットごとに機能テストと統合テストを実行します。 + +**スレッド消毒剤**. +TSanの下でコミットごとに機能テストを実行します。 TSanの下では、コミットごとに統合テストは実行されません。 + +**メモリ消毒剤**. +現在、我々はまだmsanを使用していません。 + +**デバッグアロケータ。** +デバッグバージョン `jemalloc` デバッグビルドに使用されます。 + +## Fuzzing {#fuzzing} + +単純なfuzzテストを使用して、ランダムなsqlクエリを生成し、サーバーが死んでいないことを確認します。 ファジーテストはアドレスサニタイザーで実行されます。 あなたはそれを見つける `00746_sql_fuzzy.pl`. このテストは継続的に実行する必要があります(夜間および長期)。 + +December2018の時点では、ライブラリコードの孤立したファズテストはまだ使用していません。 + +## セキュリティ監査 {#security-audit} + +Yandexのクラウド部門の人々は、セキュリティの観点からClickHouse機能のいくつかの基本的な概要を行います。 + +## 静的分析器 {#static-analyzers} + +私たちは走る `PVS-Studio` コミットごとに。 我々は評価した `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. あなたは、使用中の使用方法を見つけるでしょう `tests/instructions/` ディレクトリ。 また読むことができます [ロシア語の記事](https://habr.com/company/yandex/blog/342018/). + +使用する場合 `CLion` IDEとして、次のいくつかを活用できます `clang-tidy` 箱からの点検。 + +## 硬化 {#hardening} + +`FORTIFY_SOURCE` デフォルトで使用されます。 それはほとんど役に立たないですが、まれに意味があり、私たちはそれを無効にしません。 + +## コードスタイル {#code-style} + +コードのスタイルのルールを記述 [ここに](https://clickhouse.tech/docs/en/development/style/). + +一般的なスタイル違反を確認するには、次のようにします `utils/check-style` スクリプト + +コードの適切なスタイルを強制するには、次のようにします `clang-format`. ファイル `.clang-format` ソースのルートにあります。 主に実際のコードスタイルに対応しています。 しかし、適用することはお勧めしません `clang-format` 既存のファイルには、書式設定が悪化するためです。 を使用することができ `clang-format-diff` clangソースリポジトリにあるツールです。 + +あるいは、 `uncrustify` コードを再フォーマットするツール。 設定は `uncrustify.cfg` ソースのルートで。 での試験によ `clang-format`. + +`CLion` 独自のコードをフォーマッタしていると見ることができる調整のためのコードです。 + +## メトリカb2bテスト {#metrica-b2b-tests} + +各clickhouseのリリースは、yandexのメトリカとappmetricaエンジンでテストされています。 クリックハウスのテストおよび安定版は、vm上に配備され、入力データの固定サンプルを処理しているmetricaエンジンの小さなコピーで実行されます。 次に,メトリカエンジンの二つのインスタンスの結果を共に比較した。 + +これらの試験により自動化されており、別のチームです。 可動部品の数が多いため、テストはほとんどの場合完全に無関係な理由で失敗します。 がこれらの試験は負の値です。 しかしこれらの試験することが明らかとなったが有用である一又は二倍の数百名 + +## テスト範囲 {#test-coverage} + +July2018の時点で、テストカバレッジは追跡されません。 + +## テストの自動化 {#test-automation} + +また試験のyandex内ciと雇用自動化システムの名前 “Sandbox”. + +ビルドジョブとテストは、コミットごとにsandboxで実行されます。 結果のパッケージとテスト結果はgithubに公開され、直接リンクでダウンロードできます。 成果物は永遠に保存されます。 githubでpullリクエストを送信すると、次のようにタグ付けします “can be tested” そして私達のCIシステムはあなたのためのClickHouseのパッケージを造ります(解放、住所のsanitizer、等と、デバッグします)。 + +私たちは、時間と計算能力の限界のためにtravis ciを使用しません。 +ジェンキンスは使わない で使用される前に、現しました嬉しい使用していないjenkins. + +[元の記事](https://clickhouse.tech/docs/en/development/tests/) +ベロップメント/テスト/) diff --git a/docs/ja/engines/database_engines/index.md b/docs/ja/engines/database_engines/index.md deleted file mode 120000 index 212787448da..00000000000 --- a/docs/ja/engines/database_engines/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/engines/database_engines/index.md \ No newline at end of file diff --git a/docs/ja/engines/database_engines/index.md b/docs/ja/engines/database_engines/index.md new file mode 100644 index 00000000000..93a92e08fc4 --- /dev/null +++ b/docs/ja/engines/database_engines/index.md @@ -0,0 +1,21 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Database Engines +toc_priority: 27 +toc_title: "\u5C0E\u5165" +--- + +# データベース {#database-engines} + +データベースエンジンできる仕事です。 + +既定では、clickhouseはネイティブのデータベースエンジンを使用します。 [表エンジン](../../engines/table_engines/index.md) と [SQLダイアレクト](../../sql_reference/syntax.md). + +次のデータベースエンジンも使用できます: + +- [MySQL](mysql.md) + +- [怠け者](lazy.md) + +[元の記事](https://clickhouse.tech/docs/en/database_engines/) diff --git a/docs/ja/engines/database_engines/lazy.md b/docs/ja/engines/database_engines/lazy.md deleted file mode 120000 index 034862e683f..00000000000 --- a/docs/ja/engines/database_engines/lazy.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/engines/database_engines/lazy.md \ No newline at end of file diff --git a/docs/ja/engines/database_engines/lazy.md b/docs/ja/engines/database_engines/lazy.md new file mode 100644 index 00000000000..7eb7fe5cc93 --- /dev/null +++ b/docs/ja/engines/database_engines/lazy.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u6020\u3051\u8005" +--- + +# 怠け者 {#lazy} + +RAMのみのテーブルを保持します `expiration_time_in_seconds` 最後のアクセスの後の秒。 \*ログテーブルでのみ使用できます。 + +これは、アクセス間に長い時間間隔がある多くの小さな\*ログテーブルを格納するために最適化されています。 + +## データベースの作成 {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[元の記事](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/ja/engines/database_engines/mysql.md b/docs/ja/engines/database_engines/mysql.md deleted file mode 120000 index 4370eccbb8a..00000000000 --- a/docs/ja/engines/database_engines/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/engines/database_engines/mysql.md \ No newline at end of file diff --git a/docs/ja/engines/database_engines/mysql.md b/docs/ja/engines/database_engines/mysql.md new file mode 100644 index 00000000000..4fdb785698f --- /dev/null +++ b/docs/ja/engines/database_engines/mysql.md @@ -0,0 +1,135 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 30 +toc_title: MySQL +--- + +# Mysql {#mysql} + +リモートmysqlサーバー上のデータベースに接続し、 `INSERT` と `SELECT` ClickHouseとMySQLの間でデータを交換するためのクエリ。 + +その `MySQL` データベースエンジンの翻訳のクエリのMySQLサーバーでの操作を行うことができなど `SHOW TABLES` または `SHOW CREATE TABLE`. + +次のクエリは実行できません: + +- `RENAME` +- `CREATE TABLE` +- `ALTER` + +## データベースの作成 {#creating-a-database} + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] +ENGINE = MySQL('host:port', 'database', 'user', 'password') +``` + +**エンジン変数** + +- `host:port` — MySQL server address. +- `database` — Remote database name. +- `user` — MySQL user. +- `password` — User password. + +## データ型のサポート {#data_types-support} + +| MySQL | クリックハウス | +|----------------------------------|--------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [日付](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [DateTime](../../sql_reference/data_types/datetime.md) | +| BINARY | [FixedString](../../sql_reference/data_types/fixedstring.md) | + +他のすべてのmysqlデータ型に変換され [文字列](../../sql_reference/data_types/string.md). + +[Nullable](../../sql_reference/data_types/nullable.md) サポートされます。 + +## 使用例 {#examples-of-use} + +MySQLのテーブル: + +``` text +mysql> USE test; +Database changed + +mysql> CREATE TABLE `mysql_table` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `float` FLOAT NOT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into mysql_table (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from mysql_table; ++------+-----+ +| int_id | value | ++------+-----+ +| 1 | 2 | ++------+-----+ +1 row in set (0,00 sec) +``` + +ClickHouseのデータベース、MySQLサーバとのデータ交換: + +``` sql +CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') +``` + +``` sql +SHOW DATABASES +``` + +``` text +┌─name─────┐ +│ default │ +│ mysql_db │ +│ system │ +└──────────┘ +``` + +``` sql +SHOW TABLES FROM mysql_db +``` + +``` text +┌─name─────────┐ +│ mysql_table │ +└──────────────┘ +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +└────────┴───────┘ +``` + +``` sql +INSERT INTO mysql_db.mysql_table VALUES (3,4) +``` + +``` sql +SELECT * FROM mysql_db.mysql_table +``` + +``` text +┌─int_id─┬─value─┐ +│ 1 │ 2 │ +│ 3 │ 4 │ +└────────┴───────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/database_engines/mysql/) diff --git a/docs/ja/engines/index.md b/docs/ja/engines/index.md deleted file mode 120000 index 542bc661997..00000000000 --- a/docs/ja/engines/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/engines/index.md \ No newline at end of file diff --git a/docs/ja/engines/index.md b/docs/ja/engines/index.md new file mode 100644 index 00000000000..dcebf255df6 --- /dev/null +++ b/docs/ja/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Engines +toc_priority: 25 +--- + + diff --git a/docs/ja/engines/table_engines/index.md b/docs/ja/engines/table_engines/index.md deleted file mode 120000 index 6766b40a7bb..00000000000 --- a/docs/ja/engines/table_engines/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/engines/table_engines/index.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/index.md b/docs/ja/engines/table_engines/index.md new file mode 100644 index 00000000000..39ccd66eaa7 --- /dev/null +++ b/docs/ja/engines/table_engines/index.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Table Engines +toc_priority: 26 +toc_title: "\u5C0E\u5165" +--- + +# 表エンジン {#table_engines} + +表エンジン(表のタイプ: + +- どのようにデータが格納されている場所、それをどこに書き込むか、どこから読み込むか。 +- どのクエリがサポートされ、どのように。 +- 同時データアクセス。 +- インデックスが存在する場合の使用。 +- マルチスレッドリクエストの実行が可能かどうか。 +- データ複製パラメーター。 + +## エンジン家族 {#engine-families} + +### Mergetree {#mergetree} + +高負荷仕事のための最も普遍的な、機能テーブルエンジン。 本物件の共有によるこれらのエンジンには迅速にデータを挿入とその後のバックグラウンドデータを処となります。 `MergeTree` 家族のエンジンの支援データレプリケーション( [複製された\*](mergetree_family/replication.md) バージョンのエンジン)分割、その他の機能で対応していないその他のエンジンです。 + +家族のエンジン: + +- [MergeTree](mergetree_family/mergetree.md) +- [ツつィツ姪"ツつ"ツ債ツつケ](mergetree_family/replacingmergetree.md) +- [SummingMergeTree](mergetree_family/summingmergetree.md) +- [ツつィツ姪"ツつ"ツ債ツづュツつケ](mergetree_family/aggregatingmergetree.md) +- [CollapsingMergeTree](mergetree_family/collapsingmergetree.md) +- [VersionedCollapsingMergeTree](mergetree_family/versionedcollapsingmergetree.md) +- [グラフィットメールグツリー](mergetree_family/graphitemergetree.md) + +### ログ {#log} + +軽量 [エンジン](log_family/index.md) 最低の機能性を使って。 多くの小さなテーブル(約1万行まで)をすばやく作成し、後でそれらを全体として読み取る必要がある場合、これらは最も効果的です。 + +家族のエンジン: + +- [TinyLog](log_family/tinylog.md) +- [ストリップログ](log_family/stripelog.md) +- [ログ](log_family/log.md) + +### 統合エンジン {#integration-engines} + +エンジン用プリケーションデータストレージと処理システム。 + +家族のエンジン: + +- [カフカname](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) + +### 特殊エンジン {#special-engines} + +家族のエンジン: + +- [分散](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [辞書](special/dictionary.md) +- [マージ](special/merge.md) +- [ファイル](special/file.md) +- [ヌル](special/null.md) +- [セット](special/set.md) +- [参加](special/join.md) +- [URL](special/url.md) +- [ビュー](special/view.md) +- [メモリ](special/memory.md) +- [バッファ](special/buffer.md) + +## 仮想列 {#table_engines-virtual-columns} + +Virtual columnは、エンジンのソースコードで定義されているテーブルエンジンの属性です。 + +仮想列を指定しないでください。 `CREATE TABLE` クエリとあなたはそれらを見るこ `SHOW CREATE TABLE` と `DESCRIBE TABLE` クエリ結果。 仮想列も読み取り専用であるため、仮想列にデータを挿入することはできません。 + +仮想カラムからデータを選択するには、仮想カラムの名前を指定する必要があります。 `SELECT` クエリ。 `SELECT *` 仮想列から値を返しません。 + +テーブル仮想列のいずれかと同じ名前の列を持つテーブルを作成すると、仮想列にアクセスできなくなります。 これはお勧めしません。 競合を回避するために、通常、仮想列名にはアンダースコアが付加されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/) diff --git a/docs/ja/engines/table_engines/integrations/hdfs.md b/docs/ja/engines/table_engines/integrations/hdfs.md deleted file mode 120000 index b9a45139c00..00000000000 --- a/docs/ja/engines/table_engines/integrations/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/hdfs.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/hdfs.md b/docs/ja/engines/table_engines/integrations/hdfs.md new file mode 100644 index 00000000000..501f9210748 --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/hdfs.md @@ -0,0 +1,123 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: HDFS +--- + +# HDFS {#table_engines-hdfs} + +このエンジンは、 [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) データの管理を可能にすることによって生態系 [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)クリックハウス経由。 このエンジンは同様です +に [ファイル](../special/file.md) と [URL](../special/url.md) エンジンが、Hadoop固有の機能を提供します。 + +## 使い方 {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +その `URI` parameterは、HDFSのファイルURI全体です。 +その `format` パラメータを指定するか、ファイルのファイルフォーマット 実行するには +`SELECT` クエリは、形式は、入力のためにサポートされ、実行する必要があります +`INSERT` queries – for output. The available formats are listed in the +[形式](../../../interfaces/formats.md#formats) セクション。 +のパス部分 `URI` グロブを含む可能性があります。 この場合、テーブルは読み取り専用になります。 + +**例えば:** + +**1.** セットアップ `hdfs_engine_table` テーブル: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** Fillファイル: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** データのクエリ: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## 実装の詳細 {#implementation-details} + +- 読み書きできる並列 +- サポートなし: + - `ALTER` と `SELECT...SAMPLE` オペレーション + - インデックス。 + - 複製だ + +**パス内のグロブ** + +複数のパスコンポーネン のための処理中のファイルが存在するマッチのパスのパターンです。 ファイルのリストは、 `SELECT` (ないで `CREATE` 瞬間)。 + +- `*` — Substitutes any number of any characters except `/` 空の文字列を含む。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +構造との `{}` に類似していて下さい [リモート](../../../sql_reference/table_functions/remote.md) テーブル機能。 + +**例えば** + +1. HDFSに次のUriを持つTSV形式のファイルがいくつかあるとします: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. あはいくつかの方法が考えられているテーブルの構成は、すべてのファイル: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +別の方法: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +テーブルはすべてのファイルの両方のディレクトリ(すべてのファイルが満たすべき書式は、スキーマに記載のクエリ): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "警告" + ファイルのリストに先行するゼロが付いた数値範囲が含まれている場合は、各桁ごとに中かっこで囲みます。 `?`. + +**例えば** + +このように作成されたテーブルとファイル名 `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## 仮想列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**また見なさい** + +- [仮想列](../index.md#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/ja/engines/table_engines/integrations/index.md b/docs/ja/engines/table_engines/integrations/index.md deleted file mode 120000 index ee44dada50a..00000000000 --- a/docs/ja/engines/table_engines/integrations/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/index.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/index.md b/docs/ja/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..8d7196c323a --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Integrations +toc_priority: 30 +--- + + diff --git a/docs/ja/engines/table_engines/integrations/jdbc.md b/docs/ja/engines/table_engines/integrations/jdbc.md deleted file mode 120000 index 45bfff72fad..00000000000 --- a/docs/ja/engines/table_engines/integrations/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/jdbc.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/jdbc.md b/docs/ja/engines/table_engines/integrations/jdbc.md new file mode 100644 index 00000000000..994ef54e330 --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/jdbc.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: JDBC +--- + +# JDBC {#table-engine-jdbc} + +ClickHouseが外部データベースに接続できるようにします [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). + +JDBC接続を実装するには、ClickHouseは別のプログラムを使用します [clickhouse-jdbc橋](https://github.com/alex-krash/clickhouse-jdbc-bridge) うにしてくれました。 + +このエンジンは、 [Nullable](../../../sql_reference/data_types/nullable.md) データ型。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name +( + columns list... +) +ENGINE = JDBC(dbms_uri, external_database, external_table) +``` + +**エンジン変数** + +- `dbms_uri` — URI of an external DBMS. + + 書式: `jdbc:://:/?user=&password=`. + MySQLの例: `jdbc:mysql://localhost:3306/?user=root&password=root`. + +- `external_database` — Database in an external DBMS. + +- `external_table` — Name of the table in `external_database`. + +## 使用例 {#usage-example} + +コンソールクライアントと直接接続してmysqlサーバーにテーブルを作成する: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouseサーバーでテーブルを作成し、そこからデータを選択する: + +``` sql +CREATE TABLE jdbc_table +( + `int_id` Int32, + `int_nullable` Nullable(Int32), + `float` Float32, + `float_nullable` Nullable(Float32) +) +ENGINE JDBC('jdbc:mysql://localhost:3306/?user=root&password=root', 'test', 'test') +``` + +``` sql +SELECT * +FROM jdbc_table +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## また見なさい {#see-also} + +- [JDBCテーブル関数](../../../sql_reference/table_functions/jdbc.md). + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/ja/engines/table_engines/integrations/kafka.md b/docs/ja/engines/table_engines/integrations/kafka.md deleted file mode 120000 index 49fa5cb5bb3..00000000000 --- a/docs/ja/engines/table_engines/integrations/kafka.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/kafka.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/kafka.md b/docs/ja/engines/table_engines/integrations/kafka.md new file mode 100644 index 00000000000..15bd543e73e --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/kafka.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u30AB\u30D5\u30ABname" +--- + +# カフカname {#kafka} + +このエンジンは [アパッチ-カフカ](http://kafka.apache.org/). + +カフカはあなたを可能にします: + +- データフローを公開または購読する。 +- 整理-フォールトトレラント保管します。 +- プロセスストリームが使用可能になるとき。 + +## テーブルの作成 {#table_engine-kafka-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = Kafka() +SETTINGS + kafka_broker_list = 'host:port', + kafka_topic_list = 'topic1,topic2,...', + kafka_group_name = 'group_name', + kafka_format = 'data_format'[,] + [kafka_row_delimiter = 'delimiter_symbol',] + [kafka_schema = '',] + [kafka_num_consumers = N,] + [kafka_skip_broken_messages = N] +``` + +必須パラメータ: + +- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). +- `kafka_topic_list` – A list of Kafka topics. +- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` 機能、のような `JSONEachRow`. 詳細については、 [形式](../../../interfaces/formats.md) セクション。 + +任意変数: + +- `kafka_row_delimiter` – Delimiter character, which ends the message. +- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) スキーマファイルへのパスとルートの名前が必要です `schema.capnp:Message` オブジェクト。 +- `kafka_num_consumers` – The number of consumers per table. Default: `1`. 指定しこれからも、多くの消費者の場合、スループットの消費が不足しています。 の総数消費者を超えることはできませんパーティションの数の問題から一つだけの消費者割り当てることができた。 +- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. もし `kafka_skip_broken_messages = N` その後、エンジンはスキップ *N* 解析できないKafkaメッセージ(メッセージはデータの行と同じです)。 + +例: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; +``` + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 用途では使用しないでください方法で新規プロジェクト. 可能であれば、古いプロジェクトを上記の方法に切り替えます。 + +``` sql +Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) +``` + +
    + +## 説明 {#description} + +届いたメッセージは自動的に追跡で、それぞれのメッセージグループでは数えます。 データを取得したい場合は、別のグループ名を持つテーブルのコピーを作成します。 + +グループは柔軟で、クラスター上で同期されます。 たとえば、クラスター内のテーブルの10のトピックと5つのコピーがある場合、各コピーは2つのトピックを取得します。 コピー数が変更されると、トピックは自動的にコピー全体に再配布されます。 これについての詳細を読むhttp://kafka.apache.org/intro。 + +`SELECT` は特に役立つメッセージを読む(以外のデバッグ)では、それぞれのメッセージでしか読み込むことができます。 ではより実践的な創出の実時間スレッドを実現します。 これを行うには: + +1. エンジンを使用してkafkaコンシューマーを作成し、データストリームとみなします。 +2. 目的の構造を持つテーブルを作成します。 +3. を実現しュに変換するデータからのエンジンを入れていたとして作成されます。 + +とき `MATERIALIZED VIEW` 入、エンジンでデータを収集しみいただけます。 これにより、Kafkaからメッセージを継続的に受信し、必要な形式に変換することができます `SELECT`. +このようにして、異なる詳細レベル(grouping-aggregation and without)を持つ複数のテーブルに書き込むことができます。 + +例えば: + +``` sql + CREATE TABLE queue ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); + + CREATE TABLE daily ( + day Date, + level String, + total UInt64 + ) ENGINE = SummingMergeTree(day, (day, level), 8192); + + CREATE MATERIALIZED VIEW consumer TO daily + AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total + FROM queue GROUP BY day, level; + + SELECT level, sum(total) FROM daily GROUP BY level; +``` + +受信したメッ [max\_insert\_block\_size](../../../operations/server_configuration_parameters/settings.md#settings-max_insert_block_size). ブロックが内に形成されなかった場合 [stream\_flush\_interval\_ms](../../../operations/server_configuration_parameters/settings.md) ミリ秒では、データはブロックの完全性に関係なくテーブルにフラッシュされます。 + +リクエストを受けた話題のデータは変更に変換ロジック、切り離しを実現ビュー: + +``` sql + DETACH TABLE consumer; + ATTACH MATERIALIZED VIEW consumer; +``` + +ターゲットテーブルを次のように変更する場合 `ALTER` お勧めいたしま字の材質ビューを避ける異なるターゲットテーブルのデータからの眺め。 + +## 設定 {#configuration} + +GraphiteMergeTreeと同様に、KafkaエンジンはClickHouse configファイルを使用した拡張構成をサポートしています。 使用できる設定キーは二つあります:global (`kafka`)とトピックレベル (`kafka_*`). グローバル構成が最初に適用され、トピックレベル構成が適用されます(存在する場合)。 + +``` xml + + + cgrp + smallest + + + + + 250 + 100000 + +``` + +可能な構成オプションの一覧については、 [librdkafka設定リファレンス](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). アンダースコアを使う (`_`)ClickHouse構成のドットの代わりに。 例えば, `check.crcs=true` されます `true`. + +## 仮想列 {#virtual-columns} + +- `_topic` — Kafka topic. +- `_key` — Key of the message. +- `_offset` — Offset of the message. +- `_timestamp` — Timestamp of the message. +- `_partition` — Partition of Kafka topic. + +**また見なさい** + +- [仮想列](../index.md#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) diff --git a/docs/ja/engines/table_engines/integrations/mysql.md b/docs/ja/engines/table_engines/integrations/mysql.md deleted file mode 120000 index 2d42ce3c198..00000000000 --- a/docs/ja/engines/table_engines/integrations/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/mysql.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/mysql.md b/docs/ja/engines/table_engines/integrations/mysql.md new file mode 100644 index 00000000000..0f4582645eb --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/mysql.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: MySQL +--- + +# Mysql {#mysql} + +MySQLエンジンでは、次の操作を実行できます `SELECT` リモートMySQLサーバーに格納されているデータを照会します。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +の詳細な説明を参照してください [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) クエリ。 + +テーブル構造は元のmysqlテーブル構造とは異なる場合があります: + +- 列名は元のmysqlテーブルと同じでなければなりませんが、これらの列の一部だけを任意の順序で使用できます。 +- カラムの型は、元のmysqlテーブルの型と異なる場合があります。 クリックハウスは [キャスト](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) クリックハウスのデータ型への値。 + +**エンジン変数** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` へのクエリ `REPLACE INTO`. もし `replace_query=1` クエリは置換されます。 + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` に追加される式 `INSERT` クエリ。 + + 例えば: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`、どこ `on_duplicate_clause` は `UPDATE c2 = c2 + 1`. を見る [MySQLの文書](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) これを見つけるには `on_duplicate_clause` あなたはで使用することができ `ON DUPLICATE KEY` 句。 + + 指定するには `on_duplicate_clause` 合格する必要があります `0` に `replace_query` パラメータ。 あなたが同時に渡す場合 `replace_query = 1` と `on_duplicate_clause`、ClickHouseは例外を生成します。 + +シンプル `WHERE` 次のような句 `=, !=, >, >=, <, <=` MySQLサーバで実行されます。 + +残りの条件と `LIMIT` サンプリング制約は、MySQLへのクエリが終了した後にのみClickHouseで実行されます。 + +## 使用例 {#usage-example} + +MySQLのテーブル: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouseのテーブル、上記で作成したMySQLテーブルからデータを取得する: + +``` sql +CREATE TABLE mysql_table +( + `float_nullable` Nullable(Float32), + `int_id` Int32 +) +ENGINE = MySQL('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` sql +SELECT * FROM mysql_table +``` + +``` text +┌─float_nullable─┬─int_id─┐ +│ ᴺᵁᴸᴸ │ 1 │ +└────────────────┴────────┘ +``` + +## また見なさい {#see-also} + +- [その ‘mysql’ テーブル機能](../../../sql_reference/table_functions/mysql.md) +- [MySQLを外部辞書のソースとして使用する](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) diff --git a/docs/ja/engines/table_engines/integrations/odbc.md b/docs/ja/engines/table_engines/integrations/odbc.md deleted file mode 120000 index 0dc491657a2..00000000000 --- a/docs/ja/engines/table_engines/integrations/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/integrations/odbc.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/integrations/odbc.md b/docs/ja/engines/table_engines/integrations/odbc.md new file mode 100644 index 00000000000..600e502999f --- /dev/null +++ b/docs/ja/engines/table_engines/integrations/odbc.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: ODBC +--- + +# ODBC {#table-engine-odbc} + +ClickHouseが外部データベースに接続できるようにします [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +ODBC接続を安全に実装するには、ClickHouseは別のプログラムを使用します `clickhouse-odbc-bridge`. ODBCドライバーが直接読み込まれている場合 `clickhouse-server` ドライバの問題でクラッシュのClickHouseサーバーです。 クリックハウスが自動的に起動 `clickhouse-odbc-bridge` それが必要なとき。 ODBCブリッジプログラムは、次のパッケージと同じパッケー `clickhouse-server`. + +このエンジンは、 [Nullable](../../../sql_reference/data_types/nullable.md) データ型。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +の詳細な説明を参照してください [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) クエリ。 + +のテーブル構造が異なるソースからテーブル構造: + +- 列名はソーステーブルと同じにする必要がありますが、これらの列の一部だけを任意の順序で使用できます。 +- 列の型は、ソーステーブルの型と異なる場合があります。 クリックハウスは [キャスト](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) クリックハウスのデータ型への値。 + +**エンジン変数** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` ファイル。 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## 使用例 {#usage-example} + +**ODBC経由でローカルMySQLインストールからデータを取得** + +この例は、ubuntu linux18.04およびmysql server5.7で確認されています。 + +UnixODBCとMySQL Connectorがインストールされていることを確認します。 + +デフォルトでインストールされた場合、パッケージから),clickhouse開始してユーザー `clickhouse`. したがって、MySQLサーバでこのユーザを作成して設定する必要があります。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +次に、接続を設定します `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +を使用して接続を確認することができ `isql` unixODBCインストールからのユーティリティ。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQLのテーブル: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouseのテーブル、MySQLテーブルからデータを取得する: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## また見なさい {#see-also} + +- [ODBC外部辞書](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBCテーブル関数](../../../sql_reference/table_functions/odbc.md) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/ja/engines/table_engines/log_family/index.md b/docs/ja/engines/table_engines/log_family/index.md deleted file mode 120000 index c98b72aa933..00000000000 --- a/docs/ja/engines/table_engines/log_family/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/log_family/index.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/log_family/index.md b/docs/ja/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..330dac5b3af --- /dev/null +++ b/docs/ja/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Log Family +toc_priority: 29 +--- + + diff --git a/docs/ja/engines/table_engines/log_family/log.md b/docs/ja/engines/table_engines/log_family/log.md deleted file mode 120000 index 4668805c1ab..00000000000 --- a/docs/ja/engines/table_engines/log_family/log.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/log_family/log.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/log_family/log.md b/docs/ja/engines/table_engines/log_family/log.md new file mode 100644 index 00000000000..2d3dce244f8 --- /dev/null +++ b/docs/ja/engines/table_engines/log_family/log.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\u30ED\u30B0" +--- + +# ログ {#log} + +エンジンは、ログエンジンの家族に属します。 ログエンジ [丸太エンジン家族](log_family.md) 記事。 + +ログとは異なります [TinyLog](tinylog.md) その中での小さなファイル “marks” 列ファイルに常駐します。 これらのマークはすべてのデータブロックに書き込まれ、指定された行数をスキップするためにファイルの読み取りを開始する場所を示すオフセッ この読み取りを可能にする機能がありテーブルデータを複数のスレッド)。 +同時データアクセスの場合、読み取り操作は同時に実行でき、書き込み操作は読み取りをブロックします。 +ログのエンジンなスを作成します。 同様に、執筆する場合はテーブルに失敗したり、テーブルに破からの読み出しでエラーを返します。 ログのエンジンは適切な一時データ書き込み回のテーブルの試験-デモンストレーション。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/log/) diff --git a/docs/ja/engines/table_engines/log_family/log_family.md b/docs/ja/engines/table_engines/log_family/log_family.md deleted file mode 120000 index 42a6360728c..00000000000 --- a/docs/ja/engines/table_engines/log_family/log_family.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/log_family/log_family.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/log_family/log_family.md b/docs/ja/engines/table_engines/log_family/log_family.md new file mode 100644 index 00000000000..9fbdeb11064 --- /dev/null +++ b/docs/ja/engines/table_engines/log_family/log_family.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u5C0E\u5165" +--- + +# 丸太エンジン家族 {#log-engine-family} + +これらのエンジンは、多くの小さなテーブル(最大約1万行)をすばやく作成し、後で全体として読む必要があるシナリオ用に開発されました。 + +家族のエンジン: + +- [ストリップログ](stripelog.md) +- [ログ](log.md) +- [TinyLog](tinylog.md) + +## 一般的なプロパティ {#common-properties} + +エンジン: + +- ディスク上のデータを格納します。 + +- 書き込み時にファイルの末尾にデータを追加します。 + +- 同時データアクセスのサポートロック。 + + の間 `INSERT` クエリのテーブルがロックされ、その他の質問を読み込みおよび書き込みデータの両方のテーブルを作成する データ書き込みクエリがない場合は、任意の数のデータ読み取りクエリを同時に実行できます。 + +- ないサポート [突然変異](../../../sql_reference/statements/alter.md#alter-mutations) オペレーション + +- 索引をサポートしない。 + + これは、 `SELECT` データ範囲のクエリは効率的ではありません。 + +- データを原子的に書き込まない。 + + 取得できるテーブルデータが破損した場合も破れ、書き込み操作は、例えば、異常サーバをシャットダウンしました。 + +## 違い {#differences} + +その `TinyLog` エンジンは家族の最も簡単で、最も貧しい機能性および最も低い効率を提供する。 その `TinyLog` エンジンをサポートしていない並列データの読み取りによる複数のスレッド)。 それは、並列読み取りをサポートするファミリ内の他のエンジンよりも遅いデータを読み取り、 `Log` エンジンは、各列を別々のファイルに格納するためです。 シンプルな低負荷シナリオで使用します。 + +その `Log` と `StripeLog` エンジンは平行データ読書を支える。 デー 各スレ その `Log` エンジンは、テーブルの各列に別々のファイルを使用します。 `StripeLog` すべてのデータファイルです。 その結果、 `StripeLog` エンジン用の少ない記述子に、経営システムが、 `Log` エンジンはデータを読むとき高性能を提供する。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/log_family/) diff --git a/docs/ja/engines/table_engines/log_family/stripelog.md b/docs/ja/engines/table_engines/log_family/stripelog.md deleted file mode 120000 index 74b53f28a21..00000000000 --- a/docs/ja/engines/table_engines/log_family/stripelog.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/log_family/stripelog.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/log_family/stripelog.md b/docs/ja/engines/table_engines/log_family/stripelog.md new file mode 100644 index 00000000000..98d2c099ef7 --- /dev/null +++ b/docs/ja/engines/table_engines/log_family/stripelog.md @@ -0,0 +1,95 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u30B9\u30C8\u30EA\u30C3\u30D7\u30ED\u30B0" +--- + +# ストリップログ {#stripelog} + +このエン ログエンジ [丸太エンジン家族](log_family.md) 記事。 + +少量のデータ(1万行未満)を含む多数のテーブルを作成する必要がある場合は、このエンジンをシナリオで使用します。 + +## テーブルの作成 {#table_engines-stripelog-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + column1_name [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + column2_name [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = StripeLog +``` + +の詳細な説明を参照してください [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) クエリ。 + +## データの書き込み {#table_engines-stripelog-writing-the-data} + +その `StripeLog` エンジンの店舗のすべての列を一つのファイルです。 それぞれの `INSERT` クエリ、ClickHouseは、列を一つずつ書き込み、テーブルファイルの最後にデータブロックを追加します。 + +各テーブルclickhouseに書き込み中のファイル: + +- `data.bin` — Data file. +- `index.mrk` — File with marks. Marks contain offsets for each column of each data block inserted. + +その `StripeLog` エンジンはサポートしません `ALTER UPDATE` と `ALTER DELETE` オペレーション + +## データの読み込み {#table_engines-stripelog-reading-the-data} + +ファイルをマークでclickhouseを並列化したデータです。 これは、 `SELECT` クエリは、予期しない順序で行を返します。 を使用 `ORDER BY` 行をソートする句。 + +## 使用例 {#table_engines-stripelog-example-of-use} + +テーブルの作成: + +``` sql +CREATE TABLE stripe_log_table +( + timestamp DateTime, + message_type String, + message String +) +ENGINE = StripeLog +``` + +データの挿入: + +``` sql +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The first regular message') +INSERT INTO stripe_log_table VALUES (now(),'REGULAR','The second regular message'),(now(),'WARNING','The first warning message') +``` + +私たちは二つの `INSERT` データブロックを作成するためのクエリ `data.bin` ファイル。 + +ClickHouse利用は、複数のスレッド選択時のデータです。 各スレッドを読み込み、別のデータブロックを返しますよ列として自立で終了します。 結果として、出力の行のブロックの順序は、ほとんどの場合、入力の同じブロックの順序と一致しません。 例えば: + +``` sql +SELECT * FROM stripe_log_table +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +┌───────────timestamp─┬─message_type─┬─message───────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +└─────────────────────┴──────────────┴───────────────────────────┘ +``` + +結果の並べ替え(デフォルトでは昇順): + +``` sql +SELECT * FROM stripe_log_table ORDER BY timestamp +``` + +``` text +┌───────────timestamp─┬─message_type─┬─message────────────────────┐ +│ 2019-01-18 14:23:43 │ REGULAR │ The first regular message │ +│ 2019-01-18 14:27:32 │ REGULAR │ The second regular message │ +│ 2019-01-18 14:34:53 │ WARNING │ The first warning message │ +└─────────────────────┴──────────────┴────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/stripelog/) diff --git a/docs/ja/engines/table_engines/log_family/tinylog.md b/docs/ja/engines/table_engines/log_family/tinylog.md deleted file mode 120000 index 51054c2f9c1..00000000000 --- a/docs/ja/engines/table_engines/log_family/tinylog.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/log_family/tinylog.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/log_family/tinylog.md b/docs/ja/engines/table_engines/log_family/tinylog.md new file mode 100644 index 00000000000..e3237915cf2 --- /dev/null +++ b/docs/ja/engines/table_engines/log_family/tinylog.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: TinyLog +--- + +# TinyLog {#tinylog} + +エンジンはログエンジンファミリに属します。 見る [丸太エンジン家族](log_family.md) ログエンジンとその違いの一般的な特性のために。 + +このテーブルエンジンは、通常、write-onceメソッドで使用されます。 たとえば、次のものを使用できます `TinyLog`-小さなバッチで処理される中間データのテーブルを入力します。 多数の小さなテーブルにデータを格納することは非効率的です。 + +クエリは単一のストリームで実行されます。 言い換えれば、このエンジンは比較的小さなテーブル(約1,000,000行まで)を対象としています。 小さなテーブルがたくさんある場合は、このテーブルエンジンを使用するのが理にかなっています。 [ログ](log.md) エンジン(少数のファイルは開く必要があります)。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/tinylog/) diff --git a/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md deleted file mode 120000 index 326463fabb6..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/aggregatingmergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md new file mode 100644 index 00000000000..c7ac73d902b --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -0,0 +1,103 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\uFF82\u3064\uFF68\uFF82\u59EA\"\uFF82\u3064\"\uFF82\u50B5\uFF82\u3065\ + \uFF6D\uFF82\u3064\uFF79" +--- + +# ツつィツ姪"ツつ"ツ債ツづュツつケ {#aggregatingmergetree} + +エンジンは [MergeTree](mergetree.md#table_engines-mergetree)、データパーツのマージのロジックを変更する。 ClickHouseは、すべての行を同じ主キー(またはより正確には同じ)で置き換えます [ソートキー](mergetree.md))集計関数の状態の組み合わせを格納する単一の行(一つのデータ部門内)。 + +を使用することができ `AggregatingMergeTree` テーブルが増えた場合のデータ収集、集計を実現します。 + +エンジンプロセスの全てのカラム [AggregateFunction](../../../sql_reference/data_types/aggregatefunction.md) タイプ。 + +使用するのが適切です `AggregatingMergeTree` 注文によって行数が減る場合。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = AggregatingMergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[TTL expr] +[SETTINGS name=value, ...] +``` + +説明リクエストパラメータの参照 [要求の説明](../../../sql_reference/statements/create.md). + +**クエリ句** + +作成するとき `AggregatingMergeTree` テーブル同じ [句](mergetree.md) 作成するときと同じように、必須です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 可能であれば、古いプロジェクトを上記の方法に切り替えてください。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] AggregatingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +すべてのパラメーターの意味は、次のようになります `MergeTree`. +
    + +## 選択して挿入 {#select-and-insert} + +データを挿入するには [INSERT SELECT](../../../sql_reference/statements/insert_into.md) aggregate-State-functionsを使用したクエリ。 +データを選択するとき `AggregatingMergeTree` テーブル、使用 `GROUP BY` 句とデータを挿入するときと同じ集約関数が、 `-Merge` 接尾辞。 + +の結果で `SELECT` クエリ、値の `AggregateFunction` タイプは、すべてのClickHouse出力形式に対して実装固有のバイナリ表現を持ちます。 たとえば、データをダンプする場合, `TabSeparated` フォーマット `SELECT` このダンプは、次のようにロードされます `INSERT` クエリ。 + +## 集約マテリアライズドビューの例 {#example-of-an-aggregated-materialized-view} + +`AggregatingMergeTree` マテリアライズドビュー `test.visits` テーブル: + +``` sql +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; +``` + +データを挿入する `test.visits` テーブル。 + +``` sql +INSERT INTO test.visits ... +``` + +データは、テーブルとビューの両方に挿入されます `test.basic` それは集約を実行します。 + +集計データを取得するには、次のようなクエリを実行する必要があります `SELECT ... GROUP BY ...` ビューから `test.basic`: + +``` sql +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/aggregatingmergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md deleted file mode 120000 index 3d859f9ca08..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/collapsingmergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md new file mode 100644 index 00000000000..65cc225eac7 --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -0,0 +1,309 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: CollapsingMergeTree +--- + +# Collapsingmergetree {#table_engine-collapsingmergetree} + +エンジンは [MergeTree](mergetree.md) 加算の論理行の崩壊データ部品の統合アルゴリズムです。 + +`CollapsingMergeTree` 並べ替えキー内のすべてのフィールドの場合、行のペアを非同期的に削除(折りたたみ)します (`ORDER BY`)は、特定のフィールドを除いて同等です `Sign` これは `1` と `-1` 値。 ペアのない行は保持されます。 詳細については、 [折りたたみ](#table_engine-collapsingmergetree-collapsing) 文書のセクション。 + +エンジンはかなり貯蔵の容積を減らし、効率をの高めるかもしれません `SELECT` 結果としてのクエリ。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = CollapsingMergeTree(sign) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +説明のクエリパラメータは、 [クエリの説明](../../../sql_reference/statements/create.md). + +**CollapsingMergeTreeパラメータ** + +- `sign` — Name of the column with the type of row: `1` は “state” 行, `-1` は “cancel” 行 + + Column data type — `Int8`. + +**クエリ句** + +作成するとき `CollapsingMergeTree` テーブル、同じ [クエリ句](mergetree.md#table_engine-mergetree-creating-a-table) 作成するときと同じように、必須です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 可能であれば、古いプロジェクトを上記の方法に切り替えてください。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] CollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign) +``` + +すべてのパラメーターを除く `sign` と同じ意味を持つ `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` — “state” 行, `-1` — “cancel” 行 + + Column Data Type — `Int8`. + +
    + +## 折りたたみ {#table_engine-collapsingmergetree-collapsing} + +### データ {#data} + +あるオブジェクトのデータを継続的に変更する必要がある状況を考えてみましょう。 これは、オブジェクトのための一つの行を持っており、任意の変更でそれを更新する論理的に聞こえるが、更新操作は、ストレージ内のデータの書き換え が必要な場合にデータを書き込むには、迅速に更新できませんが、きの変化をオブジェクトの順にしております。 + +特定の列を使用する `Sign`. もし `Sign = 1` これは、行がオブジェクトの状態であることを意味します。 “state” 行 もし `Sign = -1` これは、同じ属性を持つオブジェクトの状態の取り消しを意味し、それを呼び出しましょう “cancel” 行 + +例えば、しい計算のページのユーザーかサイトは、長くご愛用いただけると思いがあります。 ある時点で、ユーザーアクティビティの状態で次の行を書きます: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +しばらくして、ユーザーアクティビティの変更を登録し、次の二つの行を書き込みます。 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +最初の行は、オブジェクト(user)の前の状態を取り消します。 取り消された状態の並べ替えキーフィールドをコピーします `Sign`. + +次の行には、現在の状態が含まれています。 + +ユーザーアクティビティの最後の状態だけが必要なので、行 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +オブジェクトの無効な(古い)状態を崩壊させることを削除することができます。 `CollapsingMergeTree` これは、データ部分のマージ中に行います。 + +なぜ私たちは必要2行の各変更のために読む [Algorithm](#table_engine-collapsingmergetree-collapsing-algorithm) 段落。 + +**そのようなアプローチ** + +1. プログラムを書き込み、データ意のオブジェクトをキャンセルはできます。 “Cancel” 文字列はコピーのソートキーの分野において “state” 文字列とその逆 `Sign`. この増加の初期サイズでの保存が可能なデータを書き込む。 +2. 列の長い配列は、書き込みの負荷によるエンジンの効率を低下させます。 より簡単なデータ、より高い効率。 +3. その `SELECT` 結果は、オブジェクトの変更履歴の整合性に強く依存します。 挿入するデータを準備するときは正確です。 たとえば、セッションの深さなどの負でない指標の負の値などです。 + +### Algorithm {#table_engine-collapsingmergetree-collapsing-algorithm} + +ClickHouseがデータパーツをマージすると、同じソートキーを持つ連続した行の各グループ (`ORDER BY` これは、以下の二つの行に縮小されています。 `Sign = 1` (“state” 行)と別の `Sign = -1` (“cancel” 行)。 言い換えれば、エントリは崩壊する。 + +それぞれの結果のデータ部分clickhouse保存: + +1. 最初の “cancel” そして最後の “state” 行の数が “state” と “cancel” 行は一致し、最後の行はaです “state” 行 + +2. 最後の “state” 行、より多くのがある場合 “state” 行よりも “cancel” 行。 + +3. 最初の “cancel” 行、より多くのがある場合 “cancel” 行よりも “state” 行。 + +4. 他のすべてのケースでは、行はありません。 + +また、少なくとも2以上がある場合 “state” 行よりも “cancel” 行、または少なくとも2以上 “cancel” その後の行 “state” ただし、ClickHouseはこの状況を論理エラーとして扱い、サーバーログに記録します。 このエラーは、同じデータが複数回挿入された場合に発生します。 + +したがって、崩壊は統計の計算結果を変更すべきではありません。 +変更は徐々に崩壊し、最終的にはほぼすべてのオブジェクトの最後の状態だけが残った。 + +その `Sign` マージアルゴリズムは、同じソートキーを持つすべての行が同じ結果のデータ部分にあり、同じ物理サーバー上にあることを保証するものではないため、必須で ClickHouse過程 `SELECT` 複数のスレッドを持つクエリは、結果の行の順序を予測することはできません。 完全に取得する必要がある場合は、集計が必要です “collapsed” からのデータ `CollapsingMergeTree` テーブル。 + +折りたたみを完了するには、次のクエリを記述します `GROUP BY` この符号を考慮する句および集計関数。 たとえば、数量を計算するには、以下を使用します `sum(Sign)` 代わりに `count()`. 何かの合計を計算するには、次のようにします `sum(Sign * x)` 代わりに `sum(x)`、など、とも追加 `HAVING sum(Sign) > 0`. + +凝集体 `count`, `sum` と `avg` この方法で計算できます。 合計 `uniq` 算出できる場合にはオブジェクトは、少なくとも一つの状態はまだ崩れていない。 凝集体 `min` と `max` 以下の理由で計算できませんでした `CollapsingMergeTree` 折りたたまれた状態の値の履歴は保存されません。 + +集計せずにデータを抽出する必要がある場合(たとえば、最新の値が特定の条件に一致する行が存在するかどうかをチェックする場合)は、次のように `FINAL` のための修飾語 `FROM` 句。 このアプローチは大幅に少ない効率的です。 + +## 使用例 {#example-of-use} + +データ例: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +テーブルの作成: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +データの挿入: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1),(4324182021466249494, 6, 185, 1) +``` + +私たちは二つを使う `INSERT` 二つの異なるデータ部分を作成するクエリ。 また、データの挿入につクエリClickHouseを一つのデータ部分を行いませんが合併します。 + +データの取得: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +私たちは何を見て、どこで崩壊していますか? + +二つの `INSERT` クエリーを作成し、2つのデータ部品です。 その `SELECT` クエリは2つのスレッドで実行され、ランダムな行の順序が得られました。 データ部分のマージがまだなかったため、折りたたみは発生しません。 ClickHouseは予測できない未知の瞬間にデータ部分をマージします。 + +このようにして集計: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration +FROM UAct +GROUP BY UserID +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +集約を必要とせず、強制的に崩壊させたい場合は、以下を使用できます `FINAL` の修飾子 `FROM` 句。 + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +データを選択するこの方法は非常に非効率的です。 大きなテーブルには使用しないでください。 + +## 別のアプローチの例 {#example-of-another-approach} + +データ例: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ -5 │ -146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +アイデアは、マージが唯一のキーフィールドを考慮することです。 そして、 “Cancel” 行符号列を使用せずに合計すると、行の以前のバージョンを等しくする負の値を指定できます。 この方法では、データ型を変更する必要があります `PageViews`,`Duration` UInt8-\>Int16の負の値を格納します。 + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews Int16, + Duration Int16, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY UserID +``` + +アプローチをテストしよう: + +``` sql +insert into UAct values(4324182021466249494, 5, 146, 1); +insert into UAct values(4324182021466249494, -5, -146, -1); +insert into UAct values(4324182021466249494, 6, 185, 1); + +select * from UAct final; // avoid using final in production (just for a test or small tables) +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +``` sql +SELECT + UserID, + sum(PageViews) AS PageViews, + sum(Duration) AS Duration +FROM UAct +GROUP BY UserID +```text +┌──────────────UserID─┬─PageViews─┬─Duration─┐ +│ 4324182021466249494 │ 6 │ 185 │ +└─────────────────────┴───────────┴──────────┘ +``` + +``` sqk +select count() FROM UAct +``` + +``` text +┌─count()─┐ +│ 3 │ +└─────────┘ +``` + +``` sql +optimize table UAct final; + +select * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/collapsingmergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md b/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md deleted file mode 120000 index 0a75de92ec2..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/custom_partitioning_key.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md b/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md new file mode 100644 index 00000000000..ced7707e770 --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -0,0 +1,127 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 32 +toc_title: "\u30AB\u30B9\u30BF\u30E0\u5206\u5272\u30AD\u30FC" +--- + +# カスタム分割キー {#custom-partitioning-key} + +パーティション分割は [MergeTree](mergetree.md) ファミリーテーブル [複製された](replication.md) テーブル)。 [マテリアライズ表示](../special/materializedview.md) に基づくMergeTreeテーブル支援を分割します。 + +パーティションが論理的に組み合わせの記録テーブルに指定された評価のポイントになります。 パーティションは、月別、日別、またはイベントタイプ別など、任意の基準によって設定できます。 各パーティションは別に保存される簡単操作のデータです。 アクセス時のデータclickhouseの最小サブセットのパーティションは可能です。 + +パーティションは `PARTITION BY expr` 節とき [テーブルの作成](mergetree.md#table_engine-mergetree-creating-a-table). これはパーティションキーにすることはでき表現からのテーブル列あります。 例えば、指定ョ月の表現を使用 `toYYYYMM(date_column)`: + +``` sql +CREATE TABLE visits +( + VisitDate Date, + Hour UInt8, + ClientID UUID +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(VisitDate) +ORDER BY Hour; +``` + +これはパーティションキーもできるタプルの表現を [主キー](mergetree.md#primary-keys-and-indexes-in-queries)). 例えば: + +``` sql +ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/name', 'replica1', Sign) +PARTITION BY (toMonday(StartDate), EventType) +ORDER BY (CounterID, StartDate, intHash32(UserID)); +``` + +この例では、現在の週に発生したイベントタイプによるパーティション分割を設定します。 + +テーブルに新しいデータを挿入すると、このデータは主キーでソートされた別の部分(チャンク)として格納されます。 挿入後10-15分で、同じパーティションのパーツがパート全体にマージされます。 + +!!! info "情報" + Mergeは、パーティショニング式と同じ値を持つデータパーツに対してのみ機能します。 これは **なんかを過度に粒状仕切り** (約千パーティション以上)。 それ以外の場合は、 `SELECT` クエリは、ファイルシステムとオープンファイル記述子のファイルの不当に大きな数のために不十分な実行します。 + +を使用 [システム。パーツ](../../../operations/system_tables.md#system_tables-parts) テーブルのテーブル部品およびパーテッション. たとえば、我々が持っていると仮定しましょう `visits` テーブルを分割する。 のは、実行してみましょう `SELECT` のための問い合わせ `system.parts` テーブル: + +``` sql +SELECT + partition, + name, + active +FROM system.parts +WHERE table = 'visits' +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 1 │ +│ 201902 │ 201902_11_11_0 │ 1 │ +└───────────┴────────────────┴────────┘ +``` + +その `partition` 列にはパーティションの名前が含まれます。 この例には二つの区画があります: `201901` と `201902`. この列の値を使用して、パーティション名を指定できます。 [ALTER … PARTITION](#alter_manipulations-with-partitions) クエリ。 + +その `name` カラムの名前を格納して、パーティションのデータ部品です。 この列を使用して、パートの名前を指定することができます。 [ALTER ATTACH PART](#alter_attach-partition) クエリ。 + +最初の部分の名前を分解してみましょう: `201901_1_3_1`: + +- `201901` パーティション名です。 +- `1` データブロックの最小数です。 +- `3` データブロックの最大数です。 +- `1` チャンクレベル(マージツリーの深さ)です。 + +!!! info "情報" + 古いタイプのテーブルの部分には名前があります: `20190117_20190123_2_2_0` (最小日付-最大日付-最小ブロック番号-最大ブロック番号-レベル)。 + +その `active` コラムは部品の状態を示します。 `1` アクティブです; `0` 非アクティブです。 に不活性部品、例えば、ソース部品の残りの後の合併によります。 破損したデータ部分も非アクティブとして示されます。 + +この例でわかるように、同じパーティションのいくつかの分離された部分があります(たとえば, `201901_1_3_1` と `201901_1_9_2`). つまり、これらの部分はまだマージされていません。 ClickHouseは、挿入してから約15分後に、データの挿入された部分を定期的にマージします。 さらに、スケジュールされていないマージを実行するには [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) クエリ。 例えば: + +``` sql +OPTIMIZE TABLE visits PARTITION 201902; +``` + +``` text +┌─partition─┬─name───────────┬─active─┐ +│ 201901 │ 201901_1_3_1 │ 0 │ +│ 201901 │ 201901_1_9_2 │ 1 │ +│ 201901 │ 201901_8_8_0 │ 0 │ +│ 201901 │ 201901_9_9_0 │ 0 │ +│ 201902 │ 201902_4_6_1 │ 0 │ +│ 201902 │ 201902_4_11_2 │ 1 │ +│ 201902 │ 201902_10_10_0 │ 0 │ +│ 201902 │ 201902_11_11_0 │ 0 │ +└───────────┴────────────────┴────────┘ +``` + +不活性パーツを削除する約10分後の統合. + +部品やパーティションのセットを表示する別の方法は、テーブルのディレクトリに移動することです: `/var/lib/clickhouse/data//
    /`. 例えば: + +``` bash +/var/lib/clickhouse/data/default/visits$ ls -l +total 40 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 +drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached +``` + +フォルダ ‘201901\_1\_1\_0’, ‘201901\_1\_7\_1’ というように部品のディレクトリです。 各部に関する対応する分割データが含まれまで一定の月のテーブルこの例では、分割による。 + +その `detached` ディレクト [DETACH](#alter_detach-partition) クエリ。 破損した部分も削除されるのではなく、このディレクトリに移動されます。 サーバーはサーバーからの部品を使用しません `detached` directory. You can add, delete, or modify the data in this directory at any time – the server will not know about this until you run the [ATTACH](../../../sql_reference/statements/alter.md#alter_attach-partition) クエリ。 + +オペレーティングサーバーでは、ファイルシステム上の部品またはそのデータのセットを手動で変更することはできません。 非複製のテーブル、これを実行する事ができます。サーバが停止中でないお勧めします。 レプリケートされたテーブルの場合、パートのセットは変更できません。 + +ClickHouseを使用すると、パーティションを削除したり、テーブル間でコピーしたり、バックアップを作成したりできます。 セクションのすべての操作の一覧を参照してください [パーティションとパーツの操作](../../../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md deleted file mode 120000 index 030b95dc58a..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/graphitemergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md new file mode 100644 index 00000000000..af5842b5b9e --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/graphitemergetree.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u30B0\u30E9\u30D5\u30A3\u30C3\u30C8\u30E1\u30FC\u30EB\u30B0\u30C4\u30EA\ + \u30FC" +--- + +# グラフィットメールグツリー {#graphitemergetree} + +このエン) [黒鉛](http://graphite.readthedocs.io/en/latest/index.html) データ。 GraphiteのデータストアとしてClickHouseを使用したい開発者にとっては役に立つかもしれません。 + +ロールアップが必要ない場合は、任意のclickhouseテーブルエンジンを使用してグラファイトデータを保存できますが、ロールアップが必要な場合は使用します `GraphiteMergeTree`. エンジンはストレージの量を減らし、Graphiteからのクエリの効率を高めます。 + +エンジンを継承性から [MergeTree](mergetree.md). + +## テーブルの作成 {#creating-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE = GraphiteMergeTree(config_section) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +の詳細な説明を参照してください [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) クエリ。 + +グラファイトデータのテーブルには、次のデータの列が必要です: + +- メトリック名(黒鉛センサ)。 データ型: `String`. + +- メトリックを測定する時間。 データ型: `DateTime`. + +- メトリックの値。 データ型:任意の数値。 + +- メトリックのバージョン。 データ型:任意の数値。 + + ClickHouseは、バージョンが同じであれば、最高のバージョンまたは最後に書かれた行を保存します。 その他の行は、データパーツのマージ中に削除されます。 + +これらの列の名前は、ロールアップ構成で設定する必要があります。 + +**GraphiteMergeTreeパラメータ** + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +**クエリ句** + +作成するとき `GraphiteMergeTree` テーブル、同じ [句](mergetree.md#table_engine-mergetree-creating-a-table) 作成するときと同じように、必須です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 可能であれば、古いプロジェクトを上記の方法に切り替えてください。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + EventDate Date, + Path String, + Time DateTime, + Value , + Version + ... +) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) +``` + +すべてのパラメーターを除く `config_section` と同じ意味を持つ `MergeTree`. + +- `config_section` — Name of the section in the configuration file, where are the rules of rollup set. + +
    + +## ロールアップ構成 {#rollup-configuration} + +ロールアップの設定は、次のように定義されます。 [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) サーバー構成のパラメーター。 パラメータの名前は任意です。 複数の構成を作成し、それらを異なるテーブルに使用できます。 + +ロールアップ構成構造: + + required-columns + patterns + +### 必要な列 {#required-columns} + +- `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. +- `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. デフォルト値: `Value`. +- `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. + +### パターン {#patterns} + +の構造 `patterns` セクション: + +``` text +pattern + regexp + function +pattern + regexp + age + precision + ... +pattern + regexp + function + age + precision + ... +pattern + ... +default + function + age + precision + ... +``` + +!!! warning "注意" + パタ: + + 1. Patterns without `function` or `retention`. + 1. Patterns with both `function` and `retention`. + 1. Pattern `default`. + +行を処理するときに、clickhouseは次のルールをチェックします。 `pattern` セクション。 それぞれの `pattern` (を含む `default`)セクションには `function` 集計のパラメータ, `retention` 変数または両方。 このメトリック名が `regexp`、からのルール `pattern` セクション(またはセクション)が適用されます。 `default` セクションを使用します。 + +フィールドの `pattern` と `default` セクション: + +- `regexp`– A pattern for the metric name. +- `age` – The minimum age of the data in seconds. +- `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). +- `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. + +### 構成例 {#configuration-example} + +``` xml + + Version + + click_cost + any + + 0 + 5 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/index.md b/docs/ja/engines/table_engines/mergetree_family/index.md deleted file mode 120000 index c64256eb04b..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/index.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/index.md b/docs/ja/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..b807da4f929 --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: MergeTree Family +toc_priority: 28 +--- + + diff --git a/docs/ja/engines/table_engines/mergetree_family/mergetree.md b/docs/ja/engines/table_engines/mergetree_family/mergetree.md deleted file mode 120000 index 1e801b2625f..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/mergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/mergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/mergetree.md b/docs/ja/engines/table_engines/mergetree_family/mergetree.md new file mode 100644 index 00000000000..19ad4537492 --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/mergetree.md @@ -0,0 +1,654 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 30 +toc_title: MergeTree +--- + +# Mergetree {#table_engines-mergetree} + +その `MergeTree` この家族 (`*MergeTree`)最も堅牢なクリックハウステーブルエンジンです。 + +のエンジン `MergeTree` ファミリは、非常に大量のデータをテーブルに挿入するために設計されています。 のデータが書き込まれ、テーブル部、そのルール適用のための統合のパーツです。 この方法は、挿入時にストレージ内のデータを継続的に書き換えるよりはるかに効率的です。 + +主な特長: + +- 店舗データを整理によりその有効なタイプを利用します。 + + これにより、データの検索を高速化する小さなスパース索引を作成できます。 + +- パーティションは、 [分割キー](custom_partitioning_key.md) が指定される。 + + ClickHouseは、同じ結果を持つ同じデータに対する一般的な操作よりも効果的なパーティションを持つ特定の操作をサポートします。 ClickHouseも自動的に遮断すると、パーティションデータのパーティショニングキーで指定されたクエリ。 この改善するためのクエリ。 + +- データ複製サポート。 + + の家族 `ReplicatedMergeTree` 表はデータ複製を提供します。 詳細については、 [データ複製](replication.md). + +- データ抜取りサポート。 + + 必要に応じて、テーブル内のデータサンプリング方法を設定できます。 + +!!! info "情報" + その [マージ](../special/merge.md) エンジンはに属しません `*MergeTree` 家族 + +## テーブルの作成 {#table_engine-mergetree-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... + INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, + INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 +) ENGINE = MergeTree() +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] +[SETTINGS name=value, ...] +``` + +パラメータの詳細については、 [クエリの説明の作成](../../../sql_reference/statements/create.md). + +!!! note "メモ" + `INDEX` は、実験的な機能です [データスキップ索引](#table_engine-mergetree-data_skipping-indexes). + +### クエリ句 {#mergetree-query-clauses} + +- `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. その `MergeTree` engineにはパラメータがありません。 + +- `PARTITION BY` — The [分割キー](custom_partitioning_key.md). + + 月単位でパーティション分割するには、 `toYYYYMM(date_column)` 式、どこ `date_column` 型の日付を持つ列を指定します [日付](../../../sql_reference/data_types/date.md). ここでのパーティション名には、 `"YYYYMM"` フォーマット。 + +- `ORDER BY` — The sorting key. + + 列または任意の式のタプル。 例えば: `ORDER BY (CounterID, EventDate)`. + +- `PRIMARY KEY` — The primary key if it [ソートキーとは異なります](mergetree.md). + + デフォルトでは、プライマリキーはソートキー(プライマリキーで指定)と同じです。 `ORDER BY` 句)。 したがって、ほとんどの場合、別の `PRIMARY KEY` 句。 + +- `SAMPLE BY` — An expression for sampling. + + サンプリング式を使用する場合は、主キーに含める必要があります。 例えば: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. + +- `TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [ディスクとボリューム間](#table_engine-mergetree-multiple-volumes). + + 式には次のものが必要です `Date` または `DateTime` 結果としての列。 例えば: + `TTL date + INTERVAL 1 DAY` + + ルールのタイプ `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'` 式が満たされている場合(現在の時間に達した場合)、その部分を使用して実行するアクションを指定します。 (`TO DISK 'xxx'`)またはボリュームに (`TO VOLUME 'xxx'`). ルールのデフォルトの種類は削除です (`DELETE`). 複数のルールのリストは指定できますが、複数のルールが存在しないはずです `DELETE` ルール。 + + 詳細については、 [列とテーブルのttl](#table_engine-mergetree-ttl) + +- `SETTINGS` — Additional parameters that control the behavior of the `MergeTree`: + + - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [データ記憶](#mergetree-data-storage). + - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [データ記憶](#mergetree-data-storage). + - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` 設定。 バージョン19.11以前は、 `index_granularity` 制限の微粒のサイズのための設定。 その `index_granularity_bytes` 設定の改善ClickHouse性能の選定からデータをテーブルの大きな行(数十、数百人のメガバイト). 大きな行を持つテーブルがある場合は、この設定を有効にしてテーブルの効率を向上させることができます `SELECT` クエリ。 + - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`、その後ZooKeeperは以下のデータを格納します。 詳細については、 [設定の説明](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) で “Server configuration parameters”. + - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` バイトClickHouseを読み込みおよび書き込み、データの保存ディスクの直接のI/Oインターフェース (`O_DIRECT` オプション)。 もし `min_merge_bytes_to_use_direct_io = 0` その後、直接I/Oが無効になります。 デフォルト値: `10 * 1024 * 1024 * 1024` バイト。 + + - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). + - `write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Don't turn it off. + - `merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192. + - `storage_policy` — Storage policy. See [複数ブロックデバイスを使用したデータ保存](#table_engine-mergetree-multiple-volumes). + +**セクション設定例** + +``` sql +ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 +``` + +この例では、月単位でパーティション分割を設定します。 + +また、ユーザーidによるハッシュとしてサンプリング用の式を設定します。 これにより、それぞれのテーブルのデータを擬似乱数化することができます `CounterID` と `EventDate`. を定義した場合 [SAMPLE](../../../sql_reference/statements/select.md#select-sample-clause) 句データを選択すると、ClickHouseはユーザーのサブセットに対して均等に擬似乱数データサンプルを返します。 + +その `index_granularity` 8192がデフォルト値であるため、設定は省略できます。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 用途では使用しないでください方法で新規プロジェクト. 可能であれば、古いプロジェクトを上記の方法に切り替えます。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +**MergeTree()パラメータ** + +- `date-column` — The name of a column of the [日付](../../../sql_reference/data_types/date.md) タイプ。 ClickHouseを自動でパーティション月に基づきます。 パーティション名は `"YYYYMM"` フォーマット。 +- `sampling_expression` — An expression for sampling. +- `(primary, key)` — Primary key. Type: [タプル()](../../../sql_reference/data_types/tuple.md) +- `index_granularity` — The granularity of an index. The number of data rows between the “marks” インデックスの。 値8192は、ほとんどのタスクに適しています。 + +**例えば** + +``` sql +MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) +``` + +その `MergeTree` エンジンは、メインエンジン構成方法について上記の例と同様に構成される。 +
    + +## データ記憶 {#mergetree-data-storage} + +テーブルのデータ部品の分別によりその有効なタイプを利用します。 + +データがテーブルに挿入されると、別々のデータパーツが作成され、それぞれが主キーで辞書式に並べ替えられます。 たとえば、プライマリキーが次の場合 `(CounterID, Date)` パーツ内のデータは `CounterID`、およびそれぞれの中で `CounterID`、それは順序付けられます `Date`. + +データに属する別のパーティションが分離の異なる部品です。 その背景にclickhouse合併しデータのパーツを効率的に保管します。 パーツに属する別のパーティションがないます。 マージメカニズムは、同じ主キーを持つすべての行が同じデータ部分にあることを保証しません。 + +各データ部分は、論理的に顆粒に分割されます。 顆粒は、データを選択するときにclickhouseが読み取る最小の不可分のデータセットです。 clickhouseは行または値を分割しないため、各granule粒には常に整数の行が含まれます。 顆粒の最初の行は、行の主キーの値でマークされます。 各データ、clickhouseを作成しインデックスファイルを格納するのです。 各列について、主キーにあるかどうかにかかわらず、clickhouseには同じマークも格納されます。 これらのマークまたはデータを見つの直列のファイルです。 + +微粒のサイズはによって制限されます `index_granularity` と `index_granularity_bytes` テーブルエンジンの設定。 微粒の列の数はで置きます `[1, index_granularity]` 行のサイズに応じた範囲。 顆粒のサイズは超えることができます `index_granularity_bytes` 単一行のサイズが設定の値より大きい場合。 この場合、顆粒のサイズは行のサイズに等しくなります。 + +## クエリの主キーとインデックス {#primary-keys-and-indexes-in-queries} + +を取る `(CounterID, Date)` 例として主キー。 この場合、並べ替えとインデックスは次のように示されます: + + Whole data: [---------------------------------------------] + CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] + Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] + Marks: | | | | | | | | | | | + a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 + Marks numbers: 0 1 2 3 4 5 6 7 8 9 10 + +デー: + +- `CounterID in ('a', 'h')`、サーバーはマークの範囲のデータを読み取ります `[0, 3)` と `[6, 8)`. +- `CounterID IN ('a', 'h') AND Date = 3`、サーバーはマークの範囲のデータを読み取ります `[1, 3)` と `[7, 8)`. +- `Date = 3`、サーバーは、マークの範囲内のデータを読み取ります `[1, 10]`. + +上記の例としては常に使用するのがより効果的指標により、フルスキャン! + +に乏指数で追加するデータを読み込みます。 主キーの単一の範囲を読み取るとき `index_granularity * 2` 余分な列の各データブロック読み取ることができます。 + +疎指標できる作業は非常に多くのテーブル行において、多くの場合、指数はコンピュータのアプリです。 + +ClickHouseは一意の主キーを必要としません。 同じ主キーで複数の行を挿入できます。 + +### 主キーの選択 {#selecting-the-primary-key} + +主キーの列数は明示的に制限されていません。 データ構造によっては、主キーに多かれ少なかれ列を含めることができます。 この: + +- インデックスのパフォーマン + + プライマリキーが `(a, b)` 次に、別の列を追加します `c` 次の条件が満たされるとパフォーマンスが向上します: + + - 列に条件があるクエリがあります `c`. + - 長いデータ範囲(数倍長い `index_granularity`)の値が同じである場合 `(a, b)` 一般的です。 言い換えれば、別の列を追加すると、非常に長いデータ範囲をスキップできます。 + +- データ圧縮を改善する。 + + ClickHouseは主キーでデータをソートするので、一貫性が高いほど圧縮率が高くなります。 + +- 追加的なロジックが統合データ部分の [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) と [SummingMergeTree](summingmergetree.md) エンジン + + この場合、それは指定することは理にかなって *ソートキー* これは主キーとは異なります。 + +長いprimary keyに悪影響を及ぼす可能性は、挿入性能やメモリ消費が別の列に主キーに影響を与えないclickhouse性能 `SELECT` クエリ。 + +### ソートキーとは異なる主キーの選択 {#choosing-a-primary-key-that-differs-from-the-sorting-key} + +ソートキー(データ部分の行をソートする式)とは異なる主キー(各マークのインデックスファイルに書き込まれる値を持つ式)を指定することができます。 この場合、主キー式タプルは、並べ替えキー式タプルのプレフィックスである必要があります。 + +この機能は、 [SummingMergeTree](summingmergetree.md) と +[ツつィツ姪"ツつ"ツ債ツづュツつケ](aggregatingmergetree.md) テーブルエンジン。 これらのエンジンを使用する一般的なケースでは、テーブルには二種類の列があります: *寸法* と *対策*. 典型的なクエリは、任意のメジャー列の値を集計します `GROUP BY` そして次元によるろ過。 SummingMergeTreeとAggregatingMergeTreeは、並べ替えキーの同じ値を持つ行を集計するので、すべての次元を追加するのが自然です。 その結果、キー式は長い列のリストで構成され、このリストは新しく追加されたディメンションで頻繁に更新される必要があります。 + +この場合、主キーにいくつかの列だけを残して、効率的な範囲スキャンを提供し、残りのディメンション列を並べ替えキータプルに追加することが理に + +[ALTER](../../../sql_reference/statements/alter.md) 新しい列がテーブルとソートキーに同時に追加されると、既存のデータパーツを変更する必要がないため、ソートキーの操作は軽量です。 古いソートキーは新しいソートキーの接頭辞であり、新しく追加された列にデータがないため、データはテーブル変更の時点で古いソートキーと新しいソートキーの両方 + +### クエリでの索引とパーティションの使用 {#use-of-indexes-and-partitions-in-queries} + +のために `SELECT` ClickHouseは、インデックスを使用できるかどうかを分析します。 インデックスが使用できるのは、 `WHERE/PREWHERE` 句には、等式または不等式の比較演算を表す式(連結要素のいずれかとして、または完全に)があります。 `IN` または `LIKE` 主キーまたはパーティショニングキーに含まれる列または式、またはこれらの列の特定の部分反復関数、またはこれらの式の論理関係に固定プレフィッ + +したがって、主キーの一つまたは複数の範囲でクエリをすばやく実行することができます。 この例では、特定のトラッキングタグ、特定のタグおよび日付範囲、特定のタグおよび日付、日付範囲を持つ複数のタグなどに対して実行すると、クエ + +次のように構成されたエンジンを見てみましょう: + + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 + +この場合、クエリで: + +``` sql +SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 +SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) +SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) +``` + +ClickHouseの主キー指標のトリムで不正なデータを毎月パーティショニングキーパンフレット、ホームページの間仕切りする不適切な日。 + +上記のクエリのインデックスが使用されるときにも複雑な表現です。 テーブルからの読み取りがいを使用した指標できないっぱいたします。 + +以下の例では、インデックスは使用できません。 + +``` sql +SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' +``` + +確認clickhouseできるとの利用時の走行クエリに対して、使用の設定 [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) と [force\_primary\_key](../../../operations/settings/settings.md). + +の分割による月で読み込みのみこれらのデータブロックを含むからスピーチへのマークの範囲内で適切に取扱います。 この場合、データブロックには多くの日付(月全体まで)のデータが含まれることがあります。 ブロック内では、データは主キーによってソートされます。 このため、主キープレフィックスを指定しない日付条件のみを持つクエリを使用すると、単一の日付よりも多くのデータが読み取られます。 + +### 部分的に単調な主キーに対するインデックスの使用 {#use-of-index-for-partially-monotonic-primary-keys} + +たとえば、月の日数を考えてみましょう。 彼らは形成する [単調系列](https://en.wikipedia.org/wiki/Monotonic_function) 一ヶ月のために、しかし、より長期間単調ではありません。 これは部分的に単調なシーケンスです。 ユーザーが部分的に単調な主キーを持つテーブルを作成する場合、ClickHouseは通常どおりスパースインデックスを作成します。 ユーザーがこの種類のテーブルからデータを選択すると、ClickHouseはクエリ条件を分析します。 ユーザーは、インデックスの二つのマークの間のデータを取得したいと、これらのマークの両方が一ヶ月以内に落ちる場合、それはクエリとインデックスマーク + +クエリパラメーターの範囲内の主キーの値が単調順序を表さない場合、clickhouseはインデックスを使用できません。 この場合、clickhouseはフルスキャン方式を使用します。 + +ClickHouseは、月シーケンスの日数だけでなく、部分的に単調なシーケンスを表すプライマリキーについても、このロジックを使用します。 + +### データスキップインデックス(実験) {#table_engine-mergetree-data_skipping-indexes} + +インデックス宣言は、次の列セクションにあります `CREATE` クエリ。 + +``` sql +INDEX index_name expr TYPE type(...) GRANULARITY granularity_value +``` + +からのテーブルの場合 `*MergeTree` 家族データの飛び指標を指定できます。 + +これらのインデックスは、ブロックの指定された式に関する情報を集約します。 `granularity_value` 微粒(微粒のサイズはを使用して指定されます `index_granularity` テーブルエンジンの設定)。 次に、これらの集約は `SELECT` ディスクから読み取るデータの量を減らすためのクエリ `where` クエリは満たされません。 + +**例えば** + +``` sql +CREATE TABLE table_name +( + u64 UInt64, + i32 Int32, + s String, + ... + INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, + INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 +) ENGINE = MergeTree() +... +``` + +この例のインデックスをclickhouseで使用すると、次のクエリでディスクから読み取るデータの量を減らすことができます: + +``` sql +SELECT count() FROM table WHERE s < 'z' +SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 +``` + +#### 使用可能なインデックスの種類 {#available-types-of-indices} + +- `minmax` + + 指定された式の極値を格納します(式が指定されている場合 `tuple` そして、それは各要素のための極端をの貯えます `tuple`)を使用して保存情報の飛びブロックのようなデータは、その有効なタイプを利用します。 + +- `set(max_rows)` + + 指定された式の一意の値を格納します。 `max_rows` 行, `max_rows=0` 意味 “no limits”). この値を使用して、 `WHERE` 式はデータブロックでは充足可能ではありません。 + +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + 店a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) これには、データブロックのすべてのngramsが含まれます。 文字列でのみ動作します。 の最適化に使用することができます `equals`, `like` と `in` 式。 + + - `n` — ngram size, + - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). + - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. + - `random_seed` — The seed for Bloom filter hash functions. + +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + + 同じように `ngrambf_v1` しかし、ngramsの代わりにトークンを格納します。 トークンは、英数字以外の文字で区切られた順序です。 + +- `bloom_filter([false_positive])` — Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) 指定された列の場合。 + + 任意 `false_positive` パラメーターは、フィルターから偽陽性の応答を受信する確率です。 可能な値:(0,1)。 デフォルト値:0.025. + + 対応データ型: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`. + + 次の関数はそれを使用できます: [等しい](../../../sql_reference/functions/comparison_functions.md), [notEquals](../../../sql_reference/functions/comparison_functions.md), [で](../../../sql_reference/functions/in_functions.md), [notIn](../../../sql_reference/functions/in_functions.md), [持っている](../../../sql_reference/functions/array_functions.md). + + + +``` sql +INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 +INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 +INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +``` + +#### 機能サポート {#functions-support} + +の条件 `WHERE` clauseには、列で操作する関数の呼び出しが含まれます。 列がインデックスの一部である場合、ClickHouseは関数の実行時にこのインデックスを使用しようとします。 ClickHouse支援の異なるサブセットの機能を使用。 + +その `set` indexは、すべての関数で使用できます。 他のインデックスの関数サブセットを以下の表に示します。 + +| 関数(演算子)/インデックス | 主キー | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | +|-------------------------------------------------------------------------------------------------------------|--------|--------|-------------|-------------|---------------| +| [equals(=,==))](../../../sql_reference/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, \<\>)](../../../sql_reference/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [のように](../../../sql_reference/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [notLike](../../../sql_reference/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [startsWith](../../../sql_reference/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [エンドスウィス](../../../sql_reference/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [マルチセアチャンネル](../../../sql_reference/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [で](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql_reference/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [less(\<)](../../../sql_reference/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [グレーター(\>)](../../../sql_reference/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals(\<=)](../../../sql_reference/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals(\>=)](../../../sql_reference/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [空](../../../sql_reference/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql_reference/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| ハストケンcity in germany | ✗ | ✗ | ✗ | ✔ | ✗ | + +Ngramサイズより小さい定数引数を持つ関数は、 `ngrambf_v1` クエリの最適化のため。 + +ブルでは偽陽性一致すので、 `ngrambf_v1`, `tokenbf_v1`、と `bloom_filter` インデックスは、関数の結果がfalseであると予想されるクエリの最適化には使用できません。: + +- 最適化することができる: + - `s LIKE '%test%'` + - `NOT s NOT LIKE '%test%'` + - `s = 1` + - `NOT s != 1` + - `startsWith(s, 'test')` +- 最適化できません: + - `NOT s LIKE '%test%'` + - `s NOT LIKE '%test%'` + - `NOT s = 1` + - `s != 1` + - `NOT startsWith(s, 'test')` + +## 同時データアクセス {#concurrent-data-access} + +同時テーブルアクセスでは、マルチバージョンを使用します。 つまり、テーブルが同時に読み取られて更新されると、クエリ時に現在のパーツのセットからデータが読み取られます。 長いロックはありません。 挿入は読み取り操作の方法では得られません。 + +テーブルからの読み取りは自動的に並列化されます。 + +## 列とテーブルのttl {#table_engine-mergetree-ttl} + +値の存続期間を決定します。 + +その `TTL` 句は、テーブル全体と個々の列ごとに設定することができます。 テーブルレベルのTTLで指定した論理の自動移動のデータディスクの間とします。 + +式は評価する必要があります [日付](../../../sql_reference/data_types/date.md) または [DateTime](../../../sql_reference/data_types/datetime.md) データ型。 + +例えば: + +``` sql +TTL time_column +TTL time_column + interval +``` + +定義する `interval`、使用 [時間間隔](../../../sql_reference/operators.md#operators-datetime) 演算子。 + +``` sql +TTL date_time + INTERVAL 1 MONTH +TTL date_time + INTERVAL 15 HOUR +``` + +### 列ttl {#mergetree-column-ttl} + +列の値が期限切れになると、clickhouseは列のデータ型の既定値に置き換えます。 すべてのカラム値のデータ部分を切clickhouse削除するこのカラムからのデータにファイルシステム. + +その `TTL` キー列には句を使用できません。 + +例: + +TTLを使用したテーブルの作成 + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int TTL d + INTERVAL 1 MONTH, + b Int TTL d + INTERVAL 1 MONTH, + c String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d; +``` + +既存のテーブルの列にttlを追加する + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 DAY; +``` + +列のttlの変更 + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 MONTH; +``` + +### テーブルttl {#mergetree-table-ttl} + +テーブルでの表現の除去に終了しました列、複数の表現を自動で部品の移動と [ディスク](#table_engine-mergetree-multiple-volumes). 時テーブルの行の有効期間ClickHouseをすべて削除して対応さい。 部品移動フィーチャの場合、部品のすべての行が移動式の基準を満たしている必要があります。 + +``` sql +TTL expr [DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'], ... +``` + +TTLルールのタイプは、各TTL式に従います。 これは、式が満たされると実行されるアクションに影響します(現在の時間に達します): + +- `DELETE` 削除行を終了しました(デフォルトアクション); +- `TO DISK 'aaa'` -ディスクに部品を移動 `aaa`; +- `TO VOLUME 'bbb'` -ディスクに部品を移動 `bbb`. + +例: + +TTLを使用したテーブルの作成 + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH [DELETE], + d + INTERVAL 1 WEEK TO VOLUME 'aaa', + d + INTERVAL 2 WEEK TO DISK 'bbb'; +``` + +テーブルのttlの変更 + +``` sql +ALTER TABLE example_table + MODIFY TTL d + INTERVAL 1 DAY; +``` + +**データの削除** + +データ切れのttlを取り除きclickhouse合併しデータの部品です。 + +時clickhouseるデータの期間は終了しましたので、行offスケジュール内スケジュールする必要がありません。 このようなマージの頻度を制御するには、次のように設定します [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout). 値が低すぎる場合は、多くのリソースを消費する可能性のある多くのオフスケジュールマージを実行します。 + +あなたが実行する場合 `SELECT` 期限切れのデータを取得できます。 それを避けるために、を使用 [OPTIMIZE](../../../sql_reference/statements/misc.md#misc_operations-optimize) 前にクエリ `SELECT`. + +## 複数ブロックデバイスを使用したデータ保存 {#table_engine-mergetree-multiple-volumes} + +### 導入 {#introduction} + +`MergeTree` 家族のテーブルエンジンでデータを複数のブロックデバイス たとえば、特定のテーブルのデータが暗黙的に分割されている場合に便利です “hot” と “cold”. 最新のデータは定期的に要求されますが、必要な領域はわずかです。 それどころか、fat-tailed履歴データはまれに要求される。 複数のディスクが使用可能な場合は、 “hot” データは高速ディスク(たとえば、NVMe Ssdまたはメモリ内)にあります。 “cold” データ-比較的遅いもの(例えば、HDD)。 + +データ部分は最低の移動可能な単位のためのです `MergeTree`-エンジンテーブル。 ある部分に属するデータは、あるディスクに格納されます。 データ部分は背景のディスクの間で(ユーザーの設定に従って)、またによって動かすことができます [ALTER](../../../sql_reference/statements/alter.md#alter_move-partition) クエリ。 + +### 条件 {#terms} + +- Disk — Block device mounted to the filesystem. +- Default disk — Disk that stores the path specified in the [パス](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) サーバー設定。 +- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). +- Storage policy — Set of volumes and the rules for moving data between them. + +の名称を記載することから、システムテーブル, [システム。ストレージ\_policies](../../../operations/system_tables.md#system_tables-storage_policies) と [システム。ディスク](../../../operations/system_tables.md#system_tables-disks). テーブ `storage_policy` の設定 `MergeTree`-エンジン家族のテーブル。 + +### 設定 {#table_engine-mergetree-multiple-volumes-configure} + +ディスク、ボリューム、およびストレージポリシーは、 `` メインファイルのいずれかのタグ `config.xml` または、 `config.d` ディレクトリ。 + +構成の構造: + +``` xml + + + + /mnt/fast_ssd/clickhouse + + + /mnt/hdd1/clickhouse + 10485760 + + + /mnt/hdd2/clickhouse + 10485760 + + + ... + + + ... + +``` + +タグ: + +- `` — Disk name. Names must be different for all disks. +- `path` — path under which a server will store data (`data` と `shadow` フォルダ)で終了する必要があります。 ‘/’. +- `keep_free_space_bytes` — the amount of free disk space to be reserved. + +ディスク定義の順序は重要ではありません。 + +ストレージポリシ: + +``` xml + + ... + + + + + disk_name_from_disks_configuration + 1073741824 + + + + + + + 0.2 + + + + + + + + ... + +``` + +タグ: + +- `policy_name_N` — Policy name. Policy names must be unique. +- `volume_name_N` — Volume name. Volume names must be unique. +- `disk` — a disk within a volume. +- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume's disks. +- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1). + +Cofigurationの例: + +``` xml + + ... + + + + + disk1 + disk2 + + + + + + + + fast_ssd + 1073741824 + + + disk1 + + + 0.2 + + + ... + +``` + +与えられた例では、 `hdd_in_order` ポリシーの実装 [ラウンドロビン](https://en.wikipedia.org/wiki/Round-robin_scheduling) アプローチ。 したがって、このポリシ (`single`)データパーツは、すべてのディスクに循環順序で格納されます。 こうした政策れぞれの知見について学ぶとともに有が複数ある場合は同様のディスク搭載のシステムがRAIDな設定を行います。 個々のディスクドライブはそれぞれ信頼できないため、複製係数が3以上になるように補正する必要があることに注意してください。 + +システムで使用可能なディスクの種類が異なる場合, `moving_from_ssd_to_hdd` ポリシーは代わりに使用できます。 ボリューム `hot` SSDディスクで構成されています (`fast_ssd`このボリュームに格納できるパーツの最大サイズは1GBです。 サイズが1GBより大きいすべての部品はで直接貯えられます `cold` HDDディスクを含むボリューム `disk1`. +また、一度ディスク `fast_ssd` 80%以上によって満たされて得ます、データはに移ります `disk1` 背景プロセ + +ストレージポリシー内のボリューム列挙の順序は重要です。 ボリュームがオーバーフィルされると、データは次のものに移動されます。 ディスク列挙の順序は、データが順番に格納されるため、重要です。 + +作成時にテーブルは、適用の設定を保存方針で: + +``` sql +CREATE TABLE table_with_non_default_policy ( + EventDate Date, + OrderID UInt64, + BannerID UInt64, + SearchPhrase String +) ENGINE = MergeTree +ORDER BY (OrderID, BannerID) +PARTITION BY toYYYYMM(EventDate) +SETTINGS storage_policy = 'moving_from_ssd_to_hdd' +``` + +その `default` ストレージポリシーは、ボリュームを一つだけ使用することを意味します。 ``. テーブルを作成すると、そのストレージポリシーは変更できません。 + +### 詳細 {#details} + +の場合 `MergeTree` テーブル、データがあるディスクには、異なる方法: + +- 挿入の結果として (`INSERT` クエリ)。 +- バックグラウンドマージ時 [突然変異](../../../sql_reference/statements/alter.md#alter-mutations). +- 別のレプリカか +- 仕切りの凍結の結果として [ALTER TABLE … FREEZE PARTITION](../../../sql_reference/statements/alter.md#alter_freeze-partition). + +すべてのこれらの場合を除き、突然変異とパーティションの凍結は、一部が保存され、大量のディスクに保存政策: + +1. パートを格納するのに十分なディスク領域を持つ最初のボリューム(定義の順序で) (`unreserved_space > current_part_size`)特定のサイズの部品を格納することができます (`max_data_part_size_bytes > current_part_size`)が選択されます。 +2. このボリューム内では、前のデータチャンクを格納するために使用されたディスクに続くディスクが選択され、パーツサイズよりも空き領域が多くなり (`unreserved_space - keep_free_space_bytes > current_part_size`). + +フードの下で、突然変異および仕切りの凍結は利用します [ハードリンク](https://en.wikipedia.org/wiki/Hard_link). ハードリンクとディスクには対応していないため、この場合、パーツの保管と同じディスクの初期ます。 + +バックグラウンドでは、空き領域の量に基づいて部品がボリューム間で移動されます (`move_factor` パラメータ)順序に従って、設定ファイルでボリュームが宣言されます。 +データは最後のものから最初のものに転送されません。 システムテーブルを使用できる [システム。part\_log](../../../operations/system_tables.md#system_tables-part-log) (フィールド `type = MOVE_PART`) [システム。パーツ](../../../operations/system_tables.md#system_tables-parts) (フィールド `path` と `disk`)背景の動きを監視する。 また、詳細な情報はサーバーログに記載されています。 + +ユーザーの力で移動中の一部またはパーティションから量別のクエリ [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql_reference/statements/alter.md#alter_move-partition)、バックグラウンド操作のすべての制限が考慮されます。 クエリは単独で移動を開始し、バックグラウンド操作が完了するのを待機しません。 十分な空き領域がない場合、または必要な条件のいずれかが満たされない場合、ユーザーはエラーメッセージが表示されます。 + +データの移動はデータの複製を妨げません。 そのため、異なる保管方針を指定することができ、同じテーブルの異なるレプリカ. + +バックグラウンドマージと突然変異の完了後、古い部分は一定時間後にのみ削除されます (`old_parts_lifetime`). +この間、他のボリュームやディスクに移動されることはありません。 したがって、部品が最終的に取り外されるまで、それらはまだ占有ディスクスペースの評価のために考慮される。 + +[元の記事](https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md deleted file mode 120000 index c7da629c3a9..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/replacingmergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md new file mode 100644 index 00000000000..11c157aed9b --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/replacingmergetree.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\uFF82\u3064\uFF68\uFF82\u59EA\"\uFF82\u3064\"\uFF82\u50B5\uFF82\u3064\ + \uFF79" +--- + +# ツつィツ姪"ツつ"ツ債ツつケ {#replacingmergetree} + +エンジンは [MergeTree](mergetree.md#table_engines-mergetree) それは、同じ主キー値(またはより正確には同じ値)を持つ重複エントリを削除するという点で [ソートキー](mergetree.md) 値)。 + +データ重複除去は、マージ中にのみ行われます。 マージは未知の時間にバックグラウンドで行われるため、計画することはできません。 一部のデータは未処理のままです。 スケジュールされていないマージを実行するには `OPTIMIZE` クエリは、それを使用してカウントされません。 `OPTIMIZE` クエリは大量のデータを読み書きします。 + +したがって, `ReplacingMergeTree` に適した清算出重複データを背景に保存するための空間が保証するものではありませんが重複している。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = ReplacingMergeTree([ver]) +[PARTITION BY expr] +[ORDER BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +説明リクエストパラメータの参照 [要求の説明](../../../sql_reference/statements/create.md). + +**ReplacingMergeTreeパラメータ** + +- `ver` — column with version. Type `UInt*`, `Date` または `DateTime`. 省略可能なパラメータ。 + + マージ時, `ReplacingMergeTree` 同じ主キーを持つすべての行から一つだけを残します: + + - 選択の最後の場合 `ver` 設定されていません。 + - 最大バージョンでは、 `ver` 指定します。 + +**クエリ句** + +作成するとき `ReplacingMergeTree` テーブル同じ [句](mergetree.md) 作成するときと同じように、必須です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 可能であれば、古いプロジェクトを上記の方法に切り替えてください。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] ReplacingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [ver]) +``` + +すべてのパラメーターを除く `ver` と同じ意味を持つ `MergeTree`. + +- `ver` -バージョンの列。 省略可能なパラメータ。 説明は上記のテキストを参照してください。 + +
    + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/replacingmergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/replication.md b/docs/ja/engines/table_engines/mergetree_family/replication.md deleted file mode 120000 index 1f7f5396086..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/replication.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/replication.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/replication.md b/docs/ja/engines/table_engines/mergetree_family/replication.md new file mode 100644 index 00000000000..f36af4208de --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/replication.md @@ -0,0 +1,218 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u30C7\u30FC\u30BF\u8907\u88FD" +--- + +# データ複製 {#table_engines-replication} + +複製がサポートされる唯一のためのテーブルのmergetree家族: + +- レプリケートされたmergetree +- ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq +- ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq +- ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq +- レプリケートされたcollapsingmergetree +- ReplicatedVersionedCollapsingMergetree +- ReplicatedGraphiteMergeTree + +複製は、サーバー全体ではなく、個々のテーブルのレベルで機能します。 サーバーでの店舗も複製、非複製のテーブルでも同時に行います。 + +複製はシャーディングに依存しません。 各シャードには独自の独立した複製があります。 + +圧縮データのための `INSERT` と `ALTER` クエリを複製(詳細については、ドキュメンテーションに [ALTER](../../../sql_reference/statements/alter.md#query_language_queries_alter)). + +`CREATE`, `DROP`, `ATTACH`, `DETACH` と `RENAME` クエリは単一サーバーで実行され、レプリケートされません: + +- その `CREATE TABLE` queryは、クエリが実行されるサーバー上に新しい複製可能テーブルを作成します。 このテーブルが既にあるその他のサーバーを加え新たなレプリカ. +- その `DROP TABLE` クエリは、クエリが実行されているサーバー上のレプリカを削除します。 +- その `RENAME` queryは、いずれかのレプリカでテーブルの名前を変更します。 つまり、複製のテーブルでの異なる名称の異なるレプリカ. + +ClickHouse用 [アパッチの飼育係](https://zookeeper.apache.org) レプリカのメタ情報を格納するため。 使用ZooKeeperバージョン3.4.5以降。 + +レプリケーションを使用するには、 [zookeeper](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) サーバー構成セクション. + +!!! attention "注意" + セキュリ クリックハウスは `digest` [ACLスキーム](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) ZooKeeperのセキュリティサブシステムの + +ZooKeeperクラスタのアドレス設定例: + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + + example3 + 2181 + + +``` + +既存のzookeeperクラスターを指定すると、システムは独自のデータ用のディレクトリを使用します(レプリケート可能なテーブルを作成するときにディレクトリ + +ZooKeeperが設定ファイルに設定されていない場合は、複製されたテーブルを作成することはできず、既存の複製されたテーブルは読み取り専用になります。 + +ZooKeeperは使用されません `SELECT` レプリケーショ `SELECT` との質問を行ってい非再現します。 分散レプリケートテーブルを照会する場合、ClickHouseの動作は設定によって制御されます [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) と [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). + +それぞれの `INSERT` クエリー、契約時に応募を追加飼育係を務取引等 (より正確には、これはデータの挿入された各ブロックに対するものです。 `max_insert_block_size = 1048576` 行。)これは、 `INSERT` と比較して再現します。 しかし、推奨事項に従ってデータを複数のバッチで挿入する場合 `INSERT` 毎秒、それは問題を作成しない。 全体のClickHouseクラスターの使用のための調整一飼育係のクラスタでは、合計数百 `INSERTs` 秒あたり。 データ挿入のスループット(秒あたりの行数)は、レプリケートされていないデータと同じくらい高くなります。 + +のための非常に大きなクラスターで異なるクラスター飼育係の異なる破片. しかし、これはyandexで必要なことは証明されていません。metricaクラスター(約300台のサーバー)。 + +複製は非同期、マルチます。 `INSERT` クエリ(と同様 `ALTER`)利用可能な任意のサーバーに送信することができます。 クエリが実行されているサーバーにデータが挿入され、そのデータが他のサーバーにコピーされます。 非同期であるため、最近挿入されたデータが他のレプリカに何らかの遅延で表示されます。 レプリカの一部が使用できない場合、データは使用できるようになった時点で書き込まれます。 レプリカが使用可能な場合、待機時間は、圧縮されたデータのブロックをネットワーク経由で転送するのにかかる時間です。 + +既定では、挿入クエリは、単一のレプリカからのデータの書き込みの確認を待機します。 データが正常に単一のレプリカに書き込まれ、このレプリカを持つサーバーが存在しなくなると、格納されたデータは失われます。 複数のレプリカからデー `insert_quorum` オプション。 + +データの各ブロックは原子的に書き込まれます。 挿入クエリは、以下のブロックに分割されます `max_insert_block_size = 1048576` 行。 言い換えれば、 `INSERT` クエリは、それが原子的に作られ、1048576未満の行を持っています。 + +データブロックは重複除外されます。 同じデータブロック(同じ順序で同じ行を含む同じサイズのデータブロック)の複数書き込みの場合、ブロックは一度だけ書き込まれます。 この理由は、クライアントアプリケーションがデータがdbに書き込まれたかどうかを知らない場合のネットワーク障害の場合です。 `INSERT` クエリーするだけで簡単に繰り返します。 どのレプリカ挿入が同じデータで送信されたかは関係ありません。 `INSERTs` べき等である。 重複排除圧縮パラメータの制御 [merge\_tree](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) サーバー設定。 + +レプリケーショ さらなるデータ変換(マージ)は、すべてのレプリカで同じ方法で調整され、実行されます。 これにより、ネットワークの使用を最小限に抑えることができます。 (複製の主な目的は、異なるデータセンター内のデータを複製することです。) + +同じデータの任意の数のレプリカを持つことができます。 yandexの。metricaは、本番環境で二重の複製を使用します。 各サーバーはraid-5またはraid-6を使用し、場合によってはraid-10を使用します。 これは比較的信頼性が高く便利な解決策です。 + +システムは、レプリカ上のデータ同期性を監視し、障害発生後に回復することができます。 フェールオーバーは、自動(データのわずかな差異の場合)または半自動(データが大きく異なる場合、構成エラーを示す可能性があります)です。 + +## 複製テーブルの作成 {#creating-replicated-tables} + +その `Replicated` テーブルエンジン名に接頭辞が追加されます。 例えば:`ReplicatedMergeTree`. + +**複製\*マージツリーパラメータ** + +- `zoo_path` — The path to the table in ZooKeeper. +- `replica_name` — The replica name in ZooKeeper. + +例えば: + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}') +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +``` + +
    + +非推奨構文の例 + +``` sql +CREATE TABLE table_name +( + EventDate DateTime, + CounterID UInt32, + UserID UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192) +``` + +
    + +その例としては、これらのパラメータを含むことができ換巻きていただけるボディーです。 置換された価値はから取られます ‘macros’ 設定ファイルのセクション。 例えば: + +``` xml + + 05 + 02 + example05-02-1.yandex.ru + +``` + +の表の飼育係るべきで機能していませんが将来的には再現します。 テーブルの異なる資料は異なる。 +この場合、パスは次の部分で構成されます: + +`/clickhouse/tables/` は共通の接頭辞です。 の使用をお勧めしまうことです。 + +`{layer}-{shard}` シャード識別子です。 この例では、Yandexので、二つの部分で構成されています。Metricaクラスターの使用インターネット上のファイル転送sharding. ほとんどのタスクでは、{shard}置換だけを残すことができます。 + +`table_name` ZooKeeperのテーブルのノードの名前です。 テーブル名と同じにすることをお勧めします。 テーブル名とは対照的に、名前の変更クエリの後に変更されないため、明示的に定義されています。 +*HINT*:データベース名を追加することができます `table_name` 同様に。 例えば `db_name.table_name` + +レプリカ名は同じテーブルの別のレプリカを識別します。 この例のように、このサーバー名を使用できます。 名前は各シャード内で一意である必要があります。 + +置換を使用する代わりに、パラメーターを明示的に定義できます。 これは、テストや小さなクラスターの構成に便利です。 ただし、分散ddlクエリは使用できません (`ON CLUSTER`)この場合。 + +組み合わせによる方がクラスターの使用をお勧めいたしま換その可能性を低減するにはエラーになります。 + +実行する `CREATE TABLE` 各レプリカに対するクエリ。 このクエ + +テーブルに他のレプリカのデータがすでに含まれている後に新しいレプリカを追加すると、データはクエリの実行後に他のレプリカから新しいレプリ つまり、新しいレプリカは他のレプリカと同期します。 + +レプリカを削除するには `DROP TABLE`. However, only one replica is deleted – the one that resides on the server where you run the query. + +## 障害後の復旧 {#recovery-after-failures} + +場合飼育係が不可の場合、サーバは、複製のテーブルスイッチ読み取り専用モードになります。 システムは定期的にzookeeperに接続しようとします。 + +ZooKeeperが使用中に利用できない場合 `INSERT`、またはZooKeeperとやり取りするとエラーが発生し、例外がスローされます。 + +ZooKeeperに接続した後、システムはローカルファイルシステムのデータセットが期待されるデータセットと一致するかどうかをチェックします(ZooKeeperはこの情報 小さな不整合がある場合、システムはデータをレプリカと同期することで解決します。 + +システムが壊れたデータ部分(ファイルのサイズが間違っている)または認識されない部分(ファイルシステムに書き込まれたがzookeeperに記録されていな `detached` サブディレクトリ(削除されません)。 他の部分がコピーからのレプリカ. + +ClickHouseは大量のデータを自動的に削除するなどの破壊的な操作を実行しません。 + +サーバーが起動(またはzookeeperとの新しいセッションを確立)すると、すべてのファイルの量とサイズのみをチェックします。 ファイルサイズが一致しているが、バイトが途中で変更されている場合、これはすぐには検出されません。 `SELECT` クエリ。 クエリは、一致しないチェックサムまたは圧縮ブロックのサイズに関する例外をスローします。 この場合、データパーツは検証キューに追加され、必要に応じてレプリカからコピーされます。 + +データのローカルセットが予想されるセットと大きく異なる場合は、安全機構がトリガーされます。 サーバーはこれをログに入力し、起動を拒否します。 この理由は、シャード上のレプリカが別のシャード上のレプリカのように誤って構成された場合など、このケースが構成エラーを示している可能性がある しかし、しきい値をこの機構の設定かなり低く、こうした状況が起こる中で、失敗を回復しました。 この場合、データは半自動的に復元されます。 “pushing a button”. + +回復を開始するには、ノードを作成します `/path_to_table/replica_name/flags/force_restore_data` で飼育係とコンテンツ、またはコマンドを実行し復元すべての複製のテーブル: + +``` bash +sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data +``` + +次に、サーバーを再起動します。 開始時に、サーバーはこれらのフラグを削除し、回復を開始します。 + +## 完全なデータの損失後の回復 {#recovery-after-complete-data-loss} + +すべてのデータやメタデータ消えたらサーバには、次の手順に従ってください復興: + +1. サーバーにclickhouseをインストール. シャード識別子とレプリカを含むコンフィグファイルで置換を正しく定義します。 +2. サーバー上で手動で複製する必要のある複雑でないテーブルがある場合は、ディレクトリ内のレプリカからデータをコピーします `/var/lib/clickhouse/data/db_name/table_name/`). +3. にあるテーブル定義のコピー `/var/lib/clickhouse/metadata/` レプリカから。 テーブル定義でシャードまたはレプリカ識別子が明示的に定義されている場合は、このレプリカに対応するように修正します。 (あるいは、サーバーを起動してすべての `ATTACH TABLE` にあったはずのクエリ。のsqlファイル `/var/lib/clickhouse/metadata/`.) +4. 回復を開始するには、zookeeperノードを作成します `/path_to_table/replica_name/flags/force_restore_data` 他のコンテンツ、またはコマンドを実行し復元すべての複製のテーブル: `sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data` + +その後、サーバーを起動します(既に実行されている場合は再起動します)。 デー + +代替の回復オプションは削除に関する情報は失われたレプリカから飼育係 (`/path_to_table/replica_name`)、レプリカを再度作成します。 “[複製テーブルの作成](#creating-replicated-tables)”. + +リカバリ中のネットワーク帯域幅に制限はありません。 一度に多くのレプリカを復元する場合は、この点に留意してください。 + +## MergetreeからReplicatedmergetreeへの変換 {#converting-from-mergetree-to-replicatedmergetree} + +我々はこの用語を使用する: `MergeTree` のすべてのテーブルエンジンを参照するには `MergeTree family`、の場合と同じ `ReplicatedMergeTree`. + +あなたが持っていた場合 `MergeTree` したテーブルを手動で再現でき換で再現します。 すでに大量のデータを収集している場合は、これを行う必要があります `MergeTree` これで、レプリケーションを有効にします。 + +さまざまなレプリカでデータが異なる場合は、最初に同期するか、レプリカ以外のすべてのデータを削除します。 + +既存のmergetreeテーブルの名前を変更し、 `ReplicatedMergeTree` 古い名前のテーブル。 +古いテーブルからデータを移動する `detached` サブディレクトリ内のディレクトリを新しいテーブルデータ (`/var/lib/clickhouse/data/db_name/table_name/`). +その後、実行 `ALTER TABLE ATTACH PARTITION` 作業セットにこれらのデータ部分を追加するレプリカのいずれか。 + +## ReplicatedmergetreeからMergetreeへの変換 {#converting-from-replicatedmergetree-to-mergetree} + +別の名前のmergetreeテーブルを作成します。 ディレクトリからすべてのデータを `ReplicatedMergeTree` テーブルデータを新しいテーブルのデータディレクトリです。 次に、 `ReplicatedMergeTree` テーブルとサーバーを再起動します。 + +あなたが取り除きたいなら `ReplicatedMergeTree` サーバーを起動せずにテーブル: + +- 対応するものを削除する `.sql` メタデータディレク (`/var/lib/clickhouse/metadata/`). +- ZooKeeperの対応するパスを削除します (`/path_to_table/replica_name`). + +この後、サーバーを起動し、 `MergeTree` テーブル、そのディレクトリにデータを移動し、サーバーを再起動します。 + +## Zookeeperクラスター内のメタデータが失われたり破損した場合の回復 {#recovery-when-metadata-in-the-zookeeper-cluster-is-lost-or-damaged} + +ZooKeeper内のデータが失われたり破損したりした場合は、上記のように単純なテーブルに移動してデータを保存することができます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/replication/) diff --git a/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md deleted file mode 120000 index 5be4624f5f9..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/summingmergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md new file mode 100644 index 00000000000..1cf967148d2 --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/summingmergetree.md @@ -0,0 +1,141 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: SummingMergeTree +--- + +# Summingmergetree {#summingmergetree} + +エンジンは [MergeTree](mergetree.md#table_engines-mergetree). 違いは、データ部分をマージするとき `SummingMergeTree` テーブルClickHouseは、すべての行を同じ主キー(またはより正確には同じ)で置き換えます [ソートキー](mergetree.md))数値データ型を持つ列の集計値を含む行。 並べ替えキーが単一のキー値が多数の行に対応するように構成されている場合、これによりストレージボリュームが大幅に削減され、データ選択がスピードア + +私たちは使用するエンジンと一緒に `MergeTree`. 完全なデータを格納する `MergeTree` テーブル、および使用 `SummingMergeTree` レポートを準備するときなど、集計データを保存する場合。 このようなアプローチは、誤って構成された主キーのために貴重なデー + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = SummingMergeTree([columns]) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +説明リクエストパラメータの参照 [要求の説明](../../../sql_reference/statements/create.md). + +**SummingMergeTreeのパラメータ** + +- `columns` -値が要約される列の名前を持つタプル。 省略可能なパラメータ。 + 列は数値型である必要があり、主キーに含めることはできません。 + + もし `columns` 指定されていない場合、ClickHouseは、プライマリキーに含まれていない数値データ型を持つすべての列の値を集計します。 + +**クエリ句** + +作成するとき `SummingMergeTree` テーブル同じ [句](mergetree.md) 作成するときと同じように、必須です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 可能であれば、古いプロジェクトを上記の方法に切り替えてください。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] SummingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, [columns]) +``` + +すべてのパラメーターを除く `columns` と同じ意味を持つ `MergeTree`. + +- `columns` — tuple with names of columns values of which will be summarized. Optional parameter. For a description, see the text above. + +
    + +## 使用例 {#usage-example} + +次の表を考えてみます: + +``` sql +CREATE TABLE summtt +( + key UInt32, + value UInt32 +) +ENGINE = SummingMergeTree() +ORDER BY key +``` + +それにデータを挿入する: + +``` sql +INSERT INTO summtt Values(1,1),(1,2),(2,1) +``` + +ClickHouseは完全ではないすべての行を合計してもよい ([以下を参照](#data-processing))ので、我々は集計関数を使用します `sum` と `GROUP BY` クエリ内の句。 + +``` sql +SELECT key, sum(value) FROM summtt GROUP BY key +``` + +``` text +┌─key─┬─sum(value)─┐ +│ 2 │ 1 │ +│ 1 │ 3 │ +└─────┴────────────┘ +``` + +## データ処理 {#data-processing} + +データがテーブルに挿入されると、そのまま保存されます。 これは、同じプライマリキーを持つ行が合計され、結果のデータ部分ごとに行が置き換えられたときです。 + +ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`)集計関数 [合計()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) と `GROUP BY` 上記の例で説明したように、クエリで句を使用する必要があります。 + +### 合計の共通ルール {#common-rules-for-summation} + +数値データ型の列の値が集計されます。 列のセットは、パラメータによって定義されます `columns`. + +合計のすべての列の値が0の場合、行は削除されます。 + +列が主キーに含まれておらず、まとめられていない場合は、既存の値から任意の値が選択されます。 + +主キーの列の値は集計されません。 + +### Aggregatefunction列の合計 {#the-summation-in-the-aggregatefunction-columns} + +列の場合 [AggregateFunctionタイプ](../../../sql_reference/data_types/aggregatefunction.md) クリックハウスは [ツつィツ姪"ツつ"ツ債ツづュツつケ](aggregatingmergetree.md) 機能に従って集約するエンジン。 + +### 入れ子構造 {#nested-structures} + +テーブルでネストしたデータ構造と加工"と言われています。 + +入れ子になったテーブルの名前が `Map` また、以下の条件を満たす少なくとも二つの列が含まれています: + +- 最初の列は数値です `(*Int*, Date, DateTime)` または文字列 `(String, FixedString)`、それを呼びましょう `key`, +- 他の列は算術演算です `(*Int*, Float32/64)`、それを呼びましょう `(values...)`, + +次に、このネストされたテーブルは、 `key => (values...)` 行をマージすると、二つのデータセットの要素は次のようにマージされます `key` 対応する `(values...)`. + +例: + +``` text +[(1, 100)] + [(2, 150)] -> [(1, 100), (2, 150)] +[(1, 100)] + [(1, 150)] -> [(1, 250)] +[(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] +[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] +``` + +データを要求するときは、 [sumMap(キー,値)](../../../sql_reference/aggregate_functions/reference.md) の集約のための関数 `Map`. + +入れ子になったデータ構造の場合、合計の列のタプルに列を指定する必要はありません。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/summingmergetree/) diff --git a/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md deleted file mode 120000 index 9acd120f965..00000000000 --- a/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..a6d337f1eef --- /dev/null +++ b/docs/ja/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -0,0 +1,238 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: VersionedCollapsingMergeTree +--- + +# Versionedcollapsingmergetree {#versionedcollapsingmergetree} + +このエンジン: + +- では迅速書き込みオブジェクトとは常に変化しています。 +- バックグラウン これを大幅に削減量に保管します。 + +セクションを見る [折りたたみ](#table_engines_versionedcollapsingmergetree) 詳細については。 + +エンジンは [MergeTree](mergetree.md#table_engines-mergetree) 追加した論理崩壊行のアルゴリズムのための統合データ部品です。 `VersionedCollapsingMergeTree` と同じ目的を果たす [CollapsingMergeTree](collapsingmergetree.md) が異なる崩壊のアルゴリズムを挿入し、データを任意の順番で複数のスレッド)。 特に、 `Version` 列は、間違った順序で挿入されていても、行を適切に折りたたむのに役立ちます。 対照的に, `CollapsingMergeTree` 厳密に連続した挿入のみを許可します。 + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +説明のクエリパラメータの [クエリの説明](../../../sql_reference/statements/create.md). + +**エンジン変数** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` は “state” 行, `-1` は “cancel” 行 + + 列データ型は次のようになります `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列データ型は次のようになります `UInt*`. + +**クエリ句** + +作成するとき `VersionedCollapsingMergeTree` テーブル、同じ [句](mergetree.md) を作成するときに必要です。 `MergeTree` テーブル。 + +
    + +テーブルを作成する非推奨の方法 + +!!! attention "注意" + 用途では使用しないでください方法で新規プロジェクト. 可能であれば、古いプロジェクトを上記の方法に切り替えます。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +以下を除くすべてのパラメータ `sign` と `version` と同じ意味を持つ `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` は “state” 行, `-1` は “cancel” 行 + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列データ型は次のようになります `UInt*`. + +
    + +## 折りたたみ {#table_engines-versionedcollapsingmergetree} + +### データ {#data} + +あるオブジェクトのデータを継続的に変更する必要がある状況を考えてみましょう。 オブジェクトに対して一つの行を持ち、変更があるときはいつでも行を更新するのが妥当です。 ただし、dbmsでは、ストレージ内のデータの書き換えが必要なため、更新操作は高価で低速です。 データをすばやく書き込む必要がある場合は更新できませんが、オブジェクトに変更を順番に書き込むことができます。 + +を使用 `Sign` 行を書き込むときの列。 もし `Sign = 1` これは、行がオブジェクトの状態であることを意味します(それを呼び出しましょう “state” 行)。 もし `Sign = -1` これは、同じ属性を持つオブジェクトの状態の取り消しを示します( “cancel” 行)。 また、 `Version` 別の番号を持つオブジェクトの各状態を識別する必要がある列。 + +たとえば、ユーザーがいくつかのサイトで訪問したページの数と、そこにいた期間を計算したいとします。 ある時点で、ユーザーアクティビティの状態で次の行を書きます: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +ある時点で、ユーザーアクティビティの変更を登録し、次の二つの行を書き込みます。 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +最初の行は、オブジェクト(user)の前の状態を取り消します。 でコピーのすべての分野を中止状態を除く `Sign`. + +次の行には、現在の状態が含まれています。 + +ユーザーアクティビティの最後の状態だけが必要なので、行 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +オブジェクトの無効な(古い)状態を削除して、削除することができます。 `VersionedCollapsingMergeTree` これは、データ部分をマージ中に行います。 + +そんなしが必要であるが、行の変更を参照 [Algorithm](#table_engines-versionedcollapsingmergetree-algorithm). + +**使用上の注意** + +1. プログラムを書き込み、データ意のオブジェクトを解除します。 その “cancel” 文字列は、 “state” 反対の文字列 `Sign`. この増加の初期サイズでの保存が可能なデータを書き込む。 +2. 列の長い配列は、書き込みの負荷のためにエンジンの効率を低下させます。 データがより簡単になればなるほど、効率は向上します。 +3. `SELECT` 結果は、オブジェクト変更の履歴の一貫性に強く依存します。 挿入するデータを準備するときは正確です。 セッションの深さなどの非負の指標の負の値など、一貫性のないデータで予測不可能な結果を得ることができます。 + +### Algorithm {#table_engines-versionedcollapsingmergetree-algorithm} + +ClickHouseは、データパーツをマージするときに、同じ主キーとバージョンが異なる行の各ペアを削除します `Sign`. 行の順序は関係ありません。 + +ClickHouseがデータを挿入すると、主キーで行を並べ替えます。 この `Version` 列は主キーにはなく、ClickHouseはそれを主キーに暗黙的に最後のフィールドとして追加し、それを順序付けに使用します。 + +## データの選択 {#selecting-data} + +ClickHouseは、同じ主キーを持つすべての行が同じ結果のデータ部分にあるか、同じ物理サーバー上にあることを保証するものではありません。 これは、データの書き込みとそれに続くデータ部分のマージの両方に当てはまります。 さらに、ClickHouseプロセス `SELECT` 複数のスレッドを持つクエリは、結果の行の順序を予測することはできません。 これは、完全に取得する必要がある場合に集約が必要であることを意味します “collapsed” からのデータ `VersionedCollapsingMergeTree` テーブル。 + +折りたたみを完了するには、次のようにクエリを記述します `GROUP BY` この符号を考慮する句および集計関数。 たとえば、数量を計算するには、以下を使用します `sum(Sign)` 代わりに `count()`. 何かの合計を計算するには、次のようにします `sum(Sign * x)` 代わりに `sum(x)`、と追加 `HAVING sum(Sign) > 0`. + +凝集体 `count`, `sum` と `avg` この方法で計算できます。 合計 `uniq` オブジェクトに折りたたまれていない状態がある場合に計算できます。 凝集体 `min` と `max` 計算できないのは `VersionedCollapsingMergeTree` 折りたたまれた状態の値の履歴は保存されません。 + +データを抽出する必要がある場合 “collapsing” な集計(例えば、確認列が存在する最新の値に一致条件)を使用できます `FINAL` のための修飾語 `FROM` 句。 このアプローチは非効率で利用すべきではありませんの大きます。 + +## 使用例 {#example-of-use} + +データ例: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +テーブルの作成: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +データの挿入: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +私たちは二つを使う `INSERT` 二つの異なるデータ部分を作成するクエリ。 単一のクエリでデータを挿入すると、ClickHouseは単一のデータ部分を作成し、マージは実行しません。 + +データの取得: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +ここでは何が見え、折りたたまれた部分はどこにありますか? +二つのデータパーツを二つ作成しました `INSERT` クエリ。 その `SELECT` クエリは二つのスレッドで実行され、結果は行のランダムな順序です。 +デー clickhouseは、予測できない未知の時点でデータパーツをマージします。 + +これが集約が必要な理由です: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +集約を必要とせず、強制的に崩壊させたい場合は、 `FINAL` のための修飾語 `FROM` 句。 + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +これは、データを選択する非常に非効率的な方法です。 大きなテーブルには使用しないでください。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/ja/engines/table_engines/special/buffer.md b/docs/ja/engines/table_engines/special/buffer.md deleted file mode 120000 index 5dacf8757c2..00000000000 --- a/docs/ja/engines/table_engines/special/buffer.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/buffer.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/buffer.md b/docs/ja/engines/table_engines/special/buffer.md new file mode 100644 index 00000000000..bc97d9ab3d7 --- /dev/null +++ b/docs/ja/engines/table_engines/special/buffer.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u30D0\u30C3\u30D5\u30A1" +--- + +# バッファ {#buffer} + +バッファのデータを書き込ram、定期的にフラッシングで別表に示す。 読み取り操作中に、バッファと他のテーブルからデータが同時に読み取られます。 + +``` sql +Buffer(database, table, num_layers, min_time, max_time, min_rows, max_rows, min_bytes, max_bytes) +``` + +エンジン変数: + +- `database` – Database name. Instead of the database name, you can use a constant expression that returns a string. +- `table` – Table to flush data to. +- `num_layers` – Parallelism layer. Physically, the table will be represented as `num_layers` 独立した緩衝の。 推奨値:16。 +- `min_time`, `max_time`, `min_rows`, `max_rows`, `min_bytes`、と `max_bytes` – Conditions for flushing data from the buffer. + +データはバッファーからフラッシュされ、すべてのファイルが `min*` 条件または少なくとも一つ `max*` 条件が満たされる。 + +- `min_time`, `max_time` – Condition for the time in seconds from the moment of the first write to the buffer. +- `min_rows`, `max_rows` – Condition for the number of rows in the buffer. +- `min_bytes`, `max_bytes` – Condition for the number of bytes in the buffer. + +書き込み操作中に、データはaに挿入されます。 `num_layers` ランダムバッファの数。 または、挿入するデータ部分が十分大きい場合( `max_rows` または `max_bytes`)、バッファを省略して、宛先テーブルに直接書き込まれます。 + +データを洗い流すための条件はのそれぞれのために別に計算されます `num_layers` バッファ。 たとえば、次の場合 `num_layers = 16` と `max_bytes = 100000000`、最大RAM消費は1.6GBです。 + +例えば: + +``` sql +CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10, 100, 10000, 1000000, 10000000, 100000000) +``` + +作成する ‘merge.hits\_buffer’ 同じ構造のテーブル ‘merge.hits’ そして緩衝エンジンを使用して。 このテーブルに書き込むとき、データはRAMにバッファリングされ、後で ‘merge.hits’ テーブル。 16バッファが作成されます。 100秒が経過した場合、または百万行が書き込まれた場合、または100MBのデータが書き込まれた場合、または同時に10秒が経過し、10,000行と10MBのデータが たとえば、一つの行だけが書き込まれた場合、100秒後には何があってもフラッシュされます。 しかし、多くの行が書き込まれた場合、データはすぐにフラッシュされます。 + +サーバが停止し、ドテーブルやテーブル取り外し、バッファのデータも出力先にフラッシュされる。 + +データベース名およびテーブル名には、一重引quotationで空の文字列を設定できます。 これは、宛先テーブルがないことを示します。 この場合、データのフラッシュ条件に達すると、バッファは単純にクリアされます。 これは、データのウィンドウをメモリに保持するのに便利です。 + +バッファテーブルから読み取るとき、データはバッファと宛先テーブルの両方から処理されます(存在する場合)。 +このバッファーとなる指数です。 つまり、データのバッファが読み取れる大きなバッファ. (下位テーブルのデータの場合、サポートするインデックスが使用されます。) + +バッファテーブルの列のセットが下位テーブルの列のセットと一致しない場合、両方のテーブルに存在する列のサブセットが挿入されます。 + +バッファテーブルと下位テーブルのいずれかの列に型が一致しない場合、サーバーログにエラーメッセージが入力され、バッファがクリアされます。 +バッファがフラッシュされたときに下位テーブルが存在しない場合も同じことが起こります。 + +下位テーブルとバッファテーブルに対してalterを実行する必要がある場合は、まずバッファテーブルを削除し、その下位テーブルに対してalterを実行してから + +サーバーが異常に再起動されると、バッファー内のデータは失われます。 + +バッファテーブルでは、finalとsampleが正しく動作しません。 これらの条件は宛先テーブルに渡されますが、バッファー内のデータの処理には使用されません。 これらの特徴が必要のみを使用することをお勧めしまうバッファのテーブルを書き込み、読みの行先表に示す。 + +を追加する場合にデータをバッファのバッファがロックされています。 これにより、読み取り操作がテーブルから同時に実行されると遅延が発生します。 + +バッファテーブルに挿入されるデータは、異なる順序で異なるブロックで従属テーブルに格納される可能性があります。 このため、バッファテーブルは、collapsingmergetreeへの書き込みには正しく使用することが困難です。 問題を回避するには、次のように設定します ‘num\_layers’ 1になります。 + +の場合は先テーブルがそのままに再現され、期待の特徴を再現でテーブルが失われた書き込みバッファへの表に示す。 データパーツの行とサイズの順序をランダムに変更すると、データ重複排除が機能しなくなります。 ‘exactly once’ 書く再現します。 + +これらの欠点のために、まれにバッファテーブルの使用を推奨することができます。 + +バッファテーブルは、ある時間単位で多数のサーバーから大量の挿入が受信され、挿入前にデータをバッファリングできない場合に使用されます。 + +バッファテーブルの場合でも、一度に一行ずつデータを挿入するのは意味がないことに注意してください。 これにより、毎秒数千行の速度しか生成されませんが、より大きなデータブロックを挿入すると、毎秒百万行を生成することができます(セクションを “Performance”). + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/buffer/) diff --git a/docs/ja/engines/table_engines/special/dictionary.md b/docs/ja/engines/table_engines/special/dictionary.md deleted file mode 120000 index 0b468772662..00000000000 --- a/docs/ja/engines/table_engines/special/dictionary.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/dictionary.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/dictionary.md b/docs/ja/engines/table_engines/special/dictionary.md new file mode 100644 index 00000000000..f2571ca09e8 --- /dev/null +++ b/docs/ja/engines/table_engines/special/dictionary.md @@ -0,0 +1,97 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\u8F9E\u66F8" +--- + +# 辞書 {#dictionary} + +その `Dictionary` エンジンは表示します [辞書](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) クリックハウス表としてのデータ。 + +例として、次の辞書を考えてみましょう `products` 次の構成では: + +``` xml + + + products + + +
    products
    + DSN=some-db-server + + + + 300 + 360 + + + + + + + product_id + + + title + String + + + + + +``` + +辞書データのクエリ: + +``` sql +SELECT + name, + type, + key, + attribute.names, + attribute.types, + bytes_allocated, + element_count, + source +FROM system.dictionaries +WHERE name = 'products' +``` + +``` text +┌─name─────┬─type─┬─key────┬─attribute.names─┬─attribute.types─┬─bytes_allocated─┬─element_count─┬─source──────────┐ +│ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ +└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ +``` + +を使用することができ [dictGet\*](../../../sql_reference/functions/ext_dict_functions.md#ext_dict_functions) この形式の辞書データを取得する関数です。 + +このビューがない便だが、rawデータ、または行う場合には、 `JOIN` オペレーション これらのケースでは、以下を使用できます。 `Dictionary` テーブル内のディクショナリデータを表示するエンジン。 + +構文: + +``` sql +CREATE TABLE %table_name% (%fields%) engine = Dictionary(%dictionary_name%)` +``` + +使用例: + +``` sql +create table products (product_id UInt64, title String) Engine = Dictionary(products); +``` + + Ok + +テーブルに何があるかを見てみましょう。 + +``` sql +select * from products limit 1; +``` + +``` text +┌────product_id─┬─title───────────┐ +│ 152689 │ Some item │ +└───────────────┴─────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) diff --git a/docs/ja/engines/table_engines/special/distributed.md b/docs/ja/engines/table_engines/special/distributed.md deleted file mode 120000 index 1cc7f62502d..00000000000 --- a/docs/ja/engines/table_engines/special/distributed.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/distributed.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/distributed.md b/docs/ja/engines/table_engines/special/distributed.md new file mode 100644 index 00000000000..0e20cf08eb3 --- /dev/null +++ b/docs/ja/engines/table_engines/special/distributed.md @@ -0,0 +1,152 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: "\u5206\u6563" +--- + +# 分散 {#distributed} + +**分散エンジンを持つテーブルは、自身がデータを保存しません** しかし、複数のサーバー上の分散クエリ処理を許可します。 +読書は自動的に平行である。 読み取り中に、リモートサーバー上のテーブルインデックスが使用されます。 + +の分散型エンジンを受け付けパラメータ: + +- サーバーの設定ファイル内のクラスター名 + +- リモートデータベースの名前 + +- リモートテーブルの名前 + +- シャーディングキー + +- (オプションで)ポリシー名は、非同期送信のための一時ファイルを格納するために使用される + + また見なさい: + + - `insert_distributed_sync` 設定 + - [MergeTree](../mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) 例のため + +例えば: + +``` sql +Distributed(logs, default, hits[, sharding_key[, policy_name]]) +``` + +データはすべてのサーバから読み込まれます。 ‘logs’ デフォルトのクラスター。ヒットテーブルに位置毎にサーバのクラスター +データは読み取られるだけでなく、リモートサーバーで部分的に処理されます(これが可能な限り)。 +たとえば、group byを使用するクエリの場合、データはリモートサーバー上で集計され、集計関数の中間状態がリクエスターサーバーに送信されます。 その後、データはさらに集約されます。 + +データベース名の代わりに、文字列を返す定数式を使用できます。 たとえば、次のようになります。 + +logs – The cluster name in the server's config file. + +クラスターがセットのようなこ: + +``` xml + + + + + 1 + + false + + example01-01-1 + 9000 + + + example01-01-2 + 9000 + + + + 2 + false + + example01-02-1 + 9000 + + + example01-02-2 + 1 + 9440 + + + + +``` + +ここでは、クラスターは名前で定義されます ‘logs’ それぞれ二つのレプリカを含む二つのシャードで構成されています。 +シャードは、データの異なる部分を含むサーバーを参照します(すべてのデータを読み取るには、すべてのシャードにアクセスする必要があります)。 +レプリカはサーバーを複製しています(すべてのデータを読み取るために、レプリカのいずれかのデータにアクセスできます)。 + +クラ + +パラメータ `host`, `port`、およびオプション `user`, `password`, `secure`, `compression` サーバーごとに指定します: +- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server doesn't start. If you change the DNS record, restart the server. +- `port` – The TCP port for messenger activity (‘tcp\_port’ 設定では、通常9000)に設定します。 それをhttp\_portと混同しないでください。 +- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [アクセス権](../../../operations/access_rights.md). +- `password` – The password for connecting to a remote server (not masked). Default value: empty string. +- `secure` -接続にsslを使用します。 `port` = 9440. サーバーがリッスンする 9440 と正しい証明書。 +- `compression` -データ圧縮を使用します。 デフォルト値:true。 + +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load\_balancing](../../../operations/settings/settings.md#settings-load_balancing) 設定。 +サーバーとの接続が確立されていない場合は、短いタイムアウトで接続しようとします。 接続に失敗すると、すべてのレプリカに対して次のレプリカが選択されます。 すべてのレプリカに対して接続の試行が失敗した場合、その試行は同じ方法で何度も繰り返されます。 +リモートサーバーは接続を受け入れる可能性がありますが、動作しない可能性があります。 + +シャードのいずれかを指定できます(この場合、クエリ処理は分散ではなくリモートと呼ばれる必要があります)、または任意の数のシャードまで指定でき 各シャードでは、レプリカのいずれかから任意の数に指定することができます。 シャードごとに異なる数のレプリカを指定できます。 + +構成では、任意の数のクラスターを指定できます。 + +クラスタを表示するには、以下を使用します ‘system.clusters’ テーブル。 + +の分散型エンジン能にすることで、社会とクラスターのように現地サーバーです。 ただし、クラスターの構成はサーバー設定ファイルに書き込む必要があります(クラスターのすべてのサーバーではさらに優れています)。 + +The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you don't need to create a Distributed table – use the ‘remote’ 代わりにテーブル関数。 セクションを見る [テーブル関数](../../../sql_reference/table_functions/index.md). + +クラスターにデータを書き込む方法は二つあります: + +まず、どのサーバーにどのデータを書き込むかを定義し、各シャードで直接書き込みを実行できます。 つまり、分散テーブルのテーブルにinsertを実行します “looks at”. これは、主題領域の要件のために自明ではないシャーディングスキームを使用できるため、最も柔軟なソリューションです。 これも最適なソリューションからデータを書き込むことができるの異なる資料が完全に独立。 + +次に、分散テーブルでinsertを実行できます。 この場合、テーブルは挿入されたデータをサーバー自体に分散します。 分散テーブルに書き込むには、シャーディングキーセット(最後のパラメータ)が必要です。 さらに、単一のシャードしかない場合、書き込み操作はシャーディングキーを指定せずに動作します。 + +各シャードは設定ファイルで定義された重みを持つことができます。 デフォルトでは、重みは一つに等しいです。 データは、シャードウェイトに比例した量でシャード全体に分散されます。 たとえば、二つのシャードがあり、最初のものが9の重みを持ち、第二のものが10の重みを持つ場合、最初のシャードは9/19の行に送られ、第二のものは10/19 + +各破片は持つことができます ‘internal\_replication’ 設定ファイルで定義されたパラメータ。 + +このパラメータが設定されている場合 ‘true’ 書き込み操作は、最初の正常なレプリカを選択し、それにデータを書き込みます。 分散テーブルの場合は、この代替を使用します “looks at” 複製されたテーブル。 言い換えれば、データが書き込まれるテーブルがそれ自体を複製する場合です。 + +に設定されている場合 ‘false’ データはすべてのレプリカに書き込まれます。 本質的に、これは、分散テーブルがデータ自体を複製することを意味します。 レプリカの整合性はチェックされず、時間の経過とともにわずかに異なるデータが含まれるためです。 + +データの行が送信されるシャードを選択するには、シャーディング式が分析され、残りの部分がシャードの合計ウェイトで除算されます。 行は、残りの半分の間隔に対応するシャードに送られます ‘prev\_weight’ に ‘prev\_weights + weight’、どこ ‘prev\_weights’ 最小の数を持つシャードの合計重量です。 ‘weight’ このシャードの重さです。 たとえば、二つのシャードがあり、最初のシャードの重みが9で、二番目のシャードの重みが10である場合、行は\[0,9)の範囲から残りのシャードの最初のシャー + +シャーディング式には、整数を返す定数とテーブル列からの任意の式を指定できます。 たとえば、次の式を使用できます ‘rand()’ データのランダムな分布の場合、または ‘UserID’ ユーザーのIDを分割する残りの部分で配布する場合(単一のユーザーのデータは単一のシャードに存在し、ユーザーの実行と参加が簡単になります)。 列のいずれかが十分に均等に分散されていない場合は、ハッシュ関数でラップすることができます:intHash64(UserID)。 + +簡単なリマインダからの限定シshardingんを常に適しています。 中規模および大量のデータ(数十のサーバー)では機能しますが、非常に大量のデータ(数百のサーバー以上)では機能しません。 後者の場合はshardingスキームに必要なのではなく、エントリに配布します。 + +SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you don't have to transfer the old data to it. You can write new data with a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. + +次の場合、シャーディングスキームについて心配する必要があります: + +- 特定のキーによるデータの結合(inまたはjoin)が必要なクエリが使用されます。 このキーによってデータがシャードされている場合は、グローバルinまたはグローバル結合の代わりにローカルinまたはjoinを使用できます。 +- 多数のサーバー(数百またはそれ以上)が使用され、多数の小さなクエリ(個々のクライアントのクエリ-ウェブサイト、広告主、またはパートナー)が使用されます。 小さなクエリがクラスタ全体に影響を与えないようにするには、単一のクライアントのデータを単一のシャードに配置することが理にかなっていま また、我々はyandexの中でやったように。metricaでは、biレベルのシャーディングを設定できます。 “layers” レイヤーが複数のシャードで構成されている場合。 単一のクライアントのデータは単一のレイヤーに配置されますが、必要に応じてシャードをレイヤーに追加することができ、データはその中にランダムに配 分散テーブルはレイヤごとに作成され、グローバルクエリ用に単一の共有分散テーブルが作成されます。 + +データは非同期に書き込まれます。 テーブルに挿入すると、データブロックはローカルファイルシステムに書き込まれます。 データはできるだけ早くバックグラウンドでリモートサーバーに送信されます。 データを送信する期間は、以下によって管理されます。 [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) と [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) 設定。 その `Distributed` エンジンを送信し、各ファイルを挿入したデータが別々にまでを一括送信ファイルの [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) 設定。 この設定の改善にクラスターの性能をより一層の活用地域のサーバやネットワーク資源です。 を確認しておきましょうか否かのデータが正常に送信されるチェックリストファイル(データまたは間に-をはさんだ)はテーブルディレクトリ: `/var/lib/clickhouse/data/database/table/`. + +分散テーブルへの挿入後にサーバーが存在しなくなった場合、または大まかな再起動(デバイス障害など)が発生した場合は、挿入されたデータが失われる可 破損したデータ部分がテーブルディレクトリで検出された場合、そのデータ部分は、 ‘broken’ サブディレクトリと、もはや使用。 + +Max\_parallel\_replicasオプションを有効にすると、単一のシャード内のすべてのレプリカでクエリ処理が並列化されます。 詳細については、以下を参照してください [max\_parallel\_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). + +## 仮想列 {#virtual-columns} + +- `_shard_num` — Contains the `shard_num` (から `system.clusters`). タイプ: [UInt32](../../../sql_reference/data_types/int_uint.md). + +!!! note "メモ" + それ以来 [`remote`](../../../sql_reference/table_functions/remote.md)/`cluster` テーブル機能の内部を一時のインスタンスと同じ分散型エンジン, `_shard_num` あまりにもそこに利用可能です。 + +**また見なさい** + +- [仮想列](index.md#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/ja/engines/table_engines/special/external_data.md b/docs/ja/engines/table_engines/special/external_data.md deleted file mode 120000 index fde0b675e01..00000000000 --- a/docs/ja/engines/table_engines/special/external_data.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/external_data.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/external_data.md b/docs/ja/engines/table_engines/special/external_data.md new file mode 100644 index 00000000000..18188960259 --- /dev/null +++ b/docs/ja/engines/table_engines/special/external_data.md @@ -0,0 +1,68 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: "\u5916\u90E8\u30C7\u30FC\u30BF" +--- + +# クエリ処理のための外部データ {#external-data-for-query-processing} + +ClickHouseでは、クエリの処理に必要なデータとSELECTクエリをサーバーに送信できます。 このデータは一時テーブルに格納されます(セクションを参照 “Temporary tables” また、クエリで使用することもできます(たとえば、IN演算子など)。 + +たとえば、重要なユーザー識別子を持つテキストファイルがある場合は、このリストのフィルタリングを使用するクエリと一緒にサーバーにアップロードで + +大量の外部データを含む複数のクエリを実行する必要がある場合は、この機能を使用しないでください。 事前にデータをdbにアップロードする方が良いです。 + +外部データは、コマンドラインクライアント(非対話モード)またはhttpインターフェイスを使用してアップロードできます。 + +コマンドラインクライアントでは、次の形式でparametersセクションを指定できます + +``` bash +--external --file=... [--name=...] [--format=...] [--types=...|--structure=...] +``` + +送信されるテーブルの数については、このような複数のセクションがあります。 + +**–external** – Marks the beginning of a clause. +**–file** – Path to the file with the table dump, or -, which refers to stdin. +Stdinから取得できるのは、単一のテーブルだけです。 + +次のパラメーターは省略可能です: **–name**– Name of the table. If omitted, \_data is used. +**–format** – Data format in the file. If omitted, TabSeparated is used. + +次のパラメーターのいずれかが必要です:**–types** – A list of comma-separated column types. For example: `UInt64,String`. The columns will be named \_1, \_2, … +**–structure**– The table structure in the format`UserID UInt64`, `URL String`. 列の名前と型を定義します。 + +で指定されたファイル ‘file’ で指定された形式によって解析されます。 ‘format’ で指定されたデータ型を使用する。 ‘types’ または ‘structure’. テーブルはサーバーにアップロードされ、そこにアクセスできるようになります。 ‘name’. + +例: + +``` bash +$ echo -ne "1\n2\n3\n" | clickhouse-client --query="SELECT count() FROM test.visits WHERE TraficSourceID IN _data" --external --file=- --types=Int8 +849897 +$ cat /etc/passwd | sed 's/:/\t/g' | clickhouse-client --query="SELECT shell, count() AS c FROM passwd GROUP BY shell ORDER BY c DESC" --external --file=- --name=passwd --structure='login String, unused String, uid UInt16, gid UInt16, comment String, home String, shell String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +HTTPインターフェイスを使用する場合、外部データはmultipart/form-data形式で渡されます。 各テーブルは別のファイルとして送信されます。 テーブル名はファイル名から取得されます。 その ‘query\_string’ パラメータが渡されます ‘name\_format’, ‘name\_types’、と ‘name\_structure’、どこ ‘name’ これらのパラメーターが対応するテーブルの名前です。 パラメータの意味は、コマンドラインクライアントを使用する場合と同じです。 + +例えば: + +``` bash +$ cat /etc/passwd | sed 's/:/\t/g' > passwd.tsv + +$ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count()+AS+c+FROM+passwd+GROUP+BY+shell+ORDER+BY+c+DESC&passwd_structure=login+String,+unused+String,+uid+UInt16,+gid+UInt16,+comment+String,+home+String,+shell+String' +/bin/sh 20 +/bin/false 5 +/bin/bash 4 +/usr/sbin/nologin 1 +/bin/sync 1 +``` + +分散クエリ処理の場合、一時テーブルはすべてのリモートサーバーに送信されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/external_data/) diff --git a/docs/ja/engines/table_engines/special/file.md b/docs/ja/engines/table_engines/special/file.md deleted file mode 120000 index 47afe351c54..00000000000 --- a/docs/ja/engines/table_engines/special/file.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/file.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/file.md b/docs/ja/engines/table_engines/special/file.md new file mode 100644 index 00000000000..5eec64c74ea --- /dev/null +++ b/docs/ja/engines/table_engines/special/file.md @@ -0,0 +1,90 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u30D5\u30A1\u30A4\u30EB" +--- + +# ファイル {#table_engines-file} + +ファイルにテーブルエンジンのデータをファイルを使ったり、 [ファイル +形式](../../../interfaces/formats.md#formats) (TabSeparated、Nativeなど)。). + +使用例: + +- データからの輸出clickhouseるファイルです。 +- ある形式から別の形式にデータを変換します。 +- ディスク上のファイルを編集して、clickhouseのデータを更新する。 + +## Clickhouseサーバーでの使用状況 {#usage-in-clickhouse-server} + +``` sql +File(Format) +``` + +その `Format` パラメータを指定するか、ファイルのファイルフォーマット 実行するには +`SELECT` クエリは、形式は、入力のためにサポートされ、実行する必要があります +`INSERT` queries – for output. The available formats are listed in the +[形式](../../../interfaces/formats.md#formats) セクション。 + +クリックハウ`File`. で定義されたフォルダを使用します [パス](../../../operations/server_configuration_parameters/settings.md) サーバー構成での設定。 + +テーブルを作成するとき `File(Format)` で空のサブディレクトリとフォルダにまとめた。 データがそのテーブルに書き込まれると、 `data.Format` サブディレクト + +このサブフォルダとファイルをserver filesystemに手動で作成してから [ATTACH](../../../sql_reference/statements/misc.md) でテーブルの情報をマッチングの名前でデータベースバックエンドからファイルです。 + +!!! warning "警告" + ClickHouseはそのようなファイルの外部変更を追跡しないため、この機能には注意してください。 ClickHouseを介して同時に書き込みを行い、ClickHouseの外部に書き込みを行った結果は未定義です。 + +**例えば:** + +**1.** セットアップ `file_engine_table` テーブル: + +``` sql +CREATE TABLE file_engine_table (name String, value UInt32) ENGINE=File(TabSeparated) +``` + +デフォルトでclickhouseフォルダを作成します `/var/lib/clickhouse/data/default/file_engine_table`. + +**2.** 手動で作成する `/var/lib/clickhouse/data/default/file_engine_table/data.TabSeparated` を含む: + +``` bash +$ cat data.TabSeparated +one 1 +two 2 +``` + +**3.** データのクエリ: + +``` sql +SELECT * FROM file_engine_table +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## Clickhouseでの使用-ローカル {#usage-in-clickhouse-local} + +で [ツつ"ツづ按つオツ!](../../../operations/utilities/clickhouse-local.md) ファイルエンジ `Format`. デフォルトの入力/出力ストリームは、数値または人間が読める名前を使用して指定できます `0` または `stdin`, `1` または `stdout`. +**例えば:** + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +``` + +## 実装の詳細 {#details-of-implementation} + +- 複数 `SELECT` クエリは同時に実行できますが、 `INSERT` クエリはお互いを待ちます。 +- 新しいファイルの作成に対応 `INSERT` クエリ。 +- ファイルが存在する場合, `INSERT` それに新しい値を追加します。 +- サポートなし: + - `ALTER` + - `SELECT ... SAMPLE` + - 指数 + - 複製 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/file/) diff --git a/docs/ja/engines/table_engines/special/generate.md b/docs/ja/engines/table_engines/special/generate.md deleted file mode 120000 index 566dc4e5382..00000000000 --- a/docs/ja/engines/table_engines/special/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/generate.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/generate.md b/docs/ja/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..340808c4278 --- /dev/null +++ b/docs/ja/engines/table_engines/special/generate.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: GenerateRandom +--- + +# Generaterandom {#table_engines-generate} + +のgeneraterandomテーブルエンジンの生産ランダムなデータが与えられたテーブルのスキーマ. + +使用例: + +- 再現可能な大きいテーブルに住むテストの使用。 +- ファジングテストのランダム入力を生成します。 + +## Clickhouseサーバーでの使用状況 {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +その `max_array_length` と `max_string_length` すべ +生成されたデータに対応する配列の列と文字列。 + +テーブル生成エンジンは `SELECT` クエリ。 + +対応して [データタイプ](../../../sql_reference/data_types/index.md) これは、以下を除いてテーブルに格納できます `LowCardinality` と `AggregateFunction`. + +**例えば:** + +**1.** セットアップ `generate_engine_table` テーブル: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** データのクエリ: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## 実装の詳細 {#details-of-implementation} + +- サポートなし: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - 指数 + - 複製 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/ja/engines/table_engines/special/index.md b/docs/ja/engines/table_engines/special/index.md deleted file mode 120000 index 60084579290..00000000000 --- a/docs/ja/engines/table_engines/special/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/index.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/index.md b/docs/ja/engines/table_engines/special/index.md new file mode 100644 index 00000000000..301ca7f0005 --- /dev/null +++ b/docs/ja/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Special +toc_priority: 31 +--- + + diff --git a/docs/ja/engines/table_engines/special/join.md b/docs/ja/engines/table_engines/special/join.md deleted file mode 120000 index 896d0942e40..00000000000 --- a/docs/ja/engines/table_engines/special/join.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/join.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/join.md b/docs/ja/engines/table_engines/special/join.md new file mode 100644 index 00000000000..2120ca61495 --- /dev/null +++ b/docs/ja/engines/table_engines/special/join.md @@ -0,0 +1,111 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u53C2\u52A0" +--- + +# 参加 {#join} + +Inを使用するための準備済みデータ構造 [JOIN](../../../sql_reference/statements/select.md#select-join) オペレーション + +## テーブルの作成 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], +) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) +``` + +の詳細な説明を参照してください [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) クエリ。 + +**エンジン変数** + +- `join_strictness` – [厳密に結合する](../../../sql_reference/statements/select.md#select-join-strictness). +- `join_type` – [結合タイプ](../../../sql_reference/statements/select.md#select-join-types). +- `k1[, k2, ...]` – Key columns from the `USING` その句 `JOIN` 操作はで行われる。 + +入力 `join_strictness` と `join_type` 引用符なしのパラメーター。, `Join(ANY, LEFT, col1)`. 彼らは `JOIN` テーブルが使用される操作。 パラメータが一致しない場合、ClickHouseは例外をスローせず、誤ったデータを返すことがあります。 + +## テーブルの使用法 {#table-usage} + +### 例えば {#example} + +左側のテーブルの作成: + +``` sql +CREATE TABLE id_val(`id` UInt32, `val` UInt32) ENGINE = TinyLog +``` + +``` sql +INSERT INTO id_val VALUES (1,11)(2,12)(3,13) +``` + +右側の作成 `Join` テーブル: + +``` sql +CREATE TABLE id_val_join(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id) +``` + +``` sql +INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23) +``` + +テーブルの結合: + +``` sql +SELECT * FROM id_val ANY LEFT JOIN id_val_join USING (id) SETTINGS join_use_nulls = 1 +``` + +``` text +┌─id─┬─val─┬─id_val_join.val─┐ +│ 1 │ 11 │ 21 │ +│ 2 │ 12 │ ᴺᵁᴸᴸ │ +│ 3 │ 13 │ 23 │ +└────┴─────┴─────────────────┘ +``` + +代わりとして、データを取り出すことができます `Join` 結合キー値を指定するテーブル: + +``` sql +SELECT joinGet('id_val_join', 'val', toUInt32(1)) +``` + +``` text +┌─joinGet('id_val_join', 'val', toUInt32(1))─┐ +│ 21 │ +└────────────────────────────────────────────┘ +``` + +### データの選択と挿入 {#selecting-and-inserting-data} + +を使用することができ `INSERT` データを追加するクエリ `Join`-エンジンテーブル。 テーブルが作成された場合 `ANY` 厳密さ、重複キーのデータは無視されます。 と `ALL` 厳密さは、すべての行が追加されます。 + +実行することはできません `SELECT` テーブルから直接クエリします。 代わりに、次のいずれかの方法を使用します: + +- テーブルをaの右側に置きます `JOIN` 句。 +- コールを [joinGet](../../../sql_reference/functions/other_functions.md#joinget) この関数を使用すると、テーブルからデータをディクショナリと同じ方法で抽出できます。 + +### 制限事項と設定 {#join-limitations-and-settings} + +テーブルを作成するときは、次の設定が適用されます: + +- [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) +- [max\_rows\_in\_join](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [max\_bytes\_in\_join](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [join\_overflow\_mode](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) + +その `Join`-エンジンテーブルは使用できません `GLOBAL JOIN` オペレーション + +その `Join`-エンジンは、使用 [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) の設定 `CREATE TABLE` 声明。 と [SELECT](../../../sql_reference/statements/select.md) クエリは、使用を可能に `join_use_nulls` あまりにも。 あなたが持って異なる `join_use_nulls` 設定は、テーブルを結合エラーを得ることができます。 それは結合の種類に依存します。 使用するとき [joinGet](../../../sql_reference/functions/other_functions.md#joinget) 機能、同じを使用しなければなりません `join_use_nulls` の設定 `CRATE TABLE` と `SELECT` 文。 + +## データ記憶 {#data-storage} + +`Join` テーブルデータは常にRAMにあります。 を挿入する際、列表ClickHouseに書き込みデータブロックのディレクトリのディスクできるように復元され、サーバが再起動してしまいます。 + +場合はサーバが再起動誤り、データブロックのディスクがいます。 この場合、破損したデータを含むファイルを手動で削除する必要があります。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/ja/engines/table_engines/special/materializedview.md b/docs/ja/engines/table_engines/special/materializedview.md deleted file mode 120000 index 89b6e631b20..00000000000 --- a/docs/ja/engines/table_engines/special/materializedview.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/materializedview.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/materializedview.md b/docs/ja/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..213066fc557 --- /dev/null +++ b/docs/ja/engines/table_engines/special/materializedview.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: MaterializedView +--- + +# Materializedview {#materializedview} + +マテリアライズドビューの実装に使用されます(詳細については、 [CREATE TABLE](../../../sql_reference/statements/create.md)). データを格納するために、ビューの作成時に指定された別のエンジンを使用します。 テーブルから読み取るときは、このエンジンを使用します。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/ja/engines/table_engines/special/memory.md b/docs/ja/engines/table_engines/special/memory.md deleted file mode 120000 index 761ce38505f..00000000000 --- a/docs/ja/engines/table_engines/special/memory.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/memory.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/memory.md b/docs/ja/engines/table_engines/special/memory.md new file mode 100644 index 00000000000..06ae102558a --- /dev/null +++ b/docs/ja/engines/table_engines/special/memory.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u30E1\u30E2\u30EA" +--- + +# メモリ {#memory} + +メモリエンジンは、データを非圧縮形式でramに格納します。 データは、読み取り時に受信されるのとまったく同じ形式で格納されます。 言い換えれば、この表からの読み取りは完全に無料です。 +同時データアクセスは同期されます。 ロックは短く、読み取り操作と書き込み操作は互いにブロックしません。 +索引はサポートされません。 読み取りは並列化されます。 +ディスクからの読み取り、デンプレッシング、データの逆シリアル化がないため、単純なクエリで最大の生産性(10gb/秒以上)に達します。 (多くの場合、mergetreeエンジンの生産性はほぼ同じ高さにあることに注意してください。) +サーバーを再起動すると、テーブルからデータが消え、テーブルが空になります。 +通常、このテーブルエンジンの使用は正当化されません。 ただし、テストや、比較的少数の行で最大速度が必要なタスク(およそ100,000,000まで)に使用できます。 + +メモリーのエンジンを使用するシステムの一時テーブルの外部クエリデータの項をご参照ください “External data for processing a query”グローバルでの実装のために(セクションを参照 “IN operators”). + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/memory/) diff --git a/docs/ja/engines/table_engines/special/merge.md b/docs/ja/engines/table_engines/special/merge.md deleted file mode 120000 index 0c5aa64880e..00000000000 --- a/docs/ja/engines/table_engines/special/merge.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/merge.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/merge.md b/docs/ja/engines/table_engines/special/merge.md new file mode 100644 index 00000000000..be0ee5afbe6 --- /dev/null +++ b/docs/ja/engines/table_engines/special/merge.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u30DE\u30FC\u30B8" +--- + +# マージ {#merge} + +その `Merge` エンジン(と混同しないように `MergeTree`)データ自体を格納しませんが、同時に他のテーブルの任意の数からの読み取りを可能にします。 +読書は自動的に平行である。 表への書き込みはサポートされません。 読み取り時には、実際に読み取られているテーブルのインデックスが存在する場合に使用されます。 +その `Merge` テーブルのデータベース名と正規表現です。 + +例えば: + +``` sql +Merge(hits, '^WatchLog') +``` + +データはテーブルから読み込まれます。 `hits` 正規表現に一致する名前を持つデータベース ‘`^WatchLog`’. + +データベース名の代わりに、文字列を返す定数式を使用できます。 例えば, `currentDatabase()`. + +Regular expressions — [re2unit description in lists](https://github.com/google/re2) (PCREのサブセットをサポート)、大文字と小文字を区別します。 +正規表現のエスケープシンボルに関する注意事項を参照してください。 “match” セクション。 + +読み込むテーブルを選択するとき、 `Merge` 正規表現と一致していても、テーブル自体は選択されません。 これはループを避けるためです。 +それは可能に作成二つ `Merge` お互いのデータを無限に読み取ろうとするテーブルですが、これは良い考えではありません。 + +使用する典型的な方法 `Merge` エンジンは多数を使用のためです `TinyLog` 単一のテーブルと同様にテーブル。 + +例2: + +古いテーブル(watchlog\_old)があり、データを新しいテーブル(watchlog\_new)に移動せずにパーティション分割を変更することにしたとしましょう。 + +``` sql +CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree(date, (UserId, EventType), 8192); +INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3); + +CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) +ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; +INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3); + +CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); + +SELECT * +FROM WatchLog +``` + +``` text +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-01 │ 1 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +┌───────date─┬─UserId─┬─EventType─┬─Cnt─┐ +│ 2018-01-02 │ 2 │ hit │ 3 │ +└────────────┴────────┴───────────┴─────┘ +``` + +## 仮想列 {#virtual-columns} + +- `_table` — Contains the name of the table from which data was read. Type: [文字列](../../../sql_reference/data_types/string.md). + + 定数条件を設定することができます `_table` で `WHERE/PREWHERE` 句(例えば, `WHERE _table='xyz'`). この場合、読み取り操作はそのテーブルに対してのみ実行されます。 `_table` は満足しているので、 `_table` 列は索引として機能します。 + +**また見なさい** + +- [仮想列](index.md#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/merge/) diff --git a/docs/ja/engines/table_engines/special/null.md b/docs/ja/engines/table_engines/special/null.md deleted file mode 120000 index d1b167b3b19..00000000000 --- a/docs/ja/engines/table_engines/special/null.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/null.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/null.md b/docs/ja/engines/table_engines/special/null.md new file mode 100644 index 00000000000..24af505190d --- /dev/null +++ b/docs/ja/engines/table_engines/special/null.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u30CC\u30EB" +--- + +# ヌル {#null} + +Nullテーブルに書き込む場合、データは無視されます。 Nullテーブルから読み取る場合、応答は空です。 + +ただし、nullテーブルにマテリアライズドビューを作成できます。 したがって、テーブルに書き込まれたデータはビューに表示されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/ja/engines/table_engines/special/set.md b/docs/ja/engines/table_engines/special/set.md deleted file mode 120000 index 82cd9cf9b40..00000000000 --- a/docs/ja/engines/table_engines/special/set.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/set.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/set.md b/docs/ja/engines/table_engines/special/set.md new file mode 100644 index 00000000000..6099fca639e --- /dev/null +++ b/docs/ja/engines/table_engines/special/set.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u30BB\u30C3\u30C8" +--- + +# セット {#set} + +常にramにあるデータセット。 これは、in演算子の右側で使用するためのものです(セクションを参照 “IN operators”). + +INSERTを使用すると、テーブルにデータを挿入できます。 新しい要素がデータセットに追加され、重複が無視されます。 +しかし、表からselectを実行することはできません。 データを取得する唯一の方法は、in演算子の右半分でデータを使用することです。 + +データは常にramにあります。 insertの場合、挿入されたデータのブロックもディスク上のテーブルのディレクトリに書き込まれます。 サーバーを起動すると、このデータはramにロードされます。 つまり、再起動後もデータはそのまま残ります。 + +ラサーバを再起動し、ブロックのデータのディスクが失われることも想定されます。 後者の場合、破損したデータを含むファイルを手動で削除する必要があります。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/ja/engines/table_engines/special/url.md b/docs/ja/engines/table_engines/special/url.md deleted file mode 120000 index 563585e8dfc..00000000000 --- a/docs/ja/engines/table_engines/special/url.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/url.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/url.md b/docs/ja/engines/table_engines/special/url.md new file mode 100644 index 00000000000..3c5d1ea8448 --- /dev/null +++ b/docs/ja/engines/table_engines/special/url.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: URL +--- + +# URL(URL,フォーマット) {#table_engines-url} + +リモートhttp/httpsサーバー上のデータを管理します。 このエンジンは同様です +に [ファイル](file.md) エンジン。 + +## Clickhouseサーバーでのエンジンの使用 {#using-the-engine-in-the-clickhouse-server} + +その `format` ClickHouseが使用できるものでなければなりません +`SELECT` クエリと、必要に応じて、 `INSERTs`. サポートさ +[形式](../../../interfaces/formats.md#formats). + +その `URL` 統一リソースロケータの構造に準拠する必要があります。 指定されたURLはサーバーを指す必要があります +HTTPまたはHTTPSを使用します。 これには何も必要ありません +サーバーからの応答を取得するための追加のヘッダー。 + +`INSERT` と `SELECT` 質問への `POST` と `GET` リクエスト, +それぞれ。 処理のため `POST` ご要望遠隔のサーバをサポートする必要があ +[チャンク転送エンコード](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). + +HTTP GETリダイレクトホップの最大数を制限するには、次のコマンドを使用します [max\_http\_get\_redirects](../../../operations/settings/settings.md#setting-max_http_get_redirects) 設定。 + +**例えば:** + +**1.** 作成する `url_engine_table` サーバー上の表 : + +``` sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** 標準のPython3ツールを使用して基本的なHTTPサーバーを作成し、 +それを開始: + +``` python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +``` bash +$ python3 server.py +``` + +**3.** 要求データ: + +``` sql +SELECT * FROM url_engine_table +``` + +``` text +┌─word──┬─value─┐ +│ Hello │ 1 │ +│ World │ 2 │ +└───────┴───────┘ +``` + +## 実装の詳細 {#details-of-implementation} + +- 読み書きできる並列 +- サポートなし: + - `ALTER` と `SELECT...SAMPLE` オペレーション + - インデックス。 + - 複製だ + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/url/) diff --git a/docs/ja/engines/table_engines/special/view.md b/docs/ja/engines/table_engines/special/view.md deleted file mode 120000 index 806e81a3f7e..00000000000 --- a/docs/ja/engines/table_engines/special/view.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/engines/table_engines/special/view.md \ No newline at end of file diff --git a/docs/ja/engines/table_engines/special/view.md b/docs/ja/engines/table_engines/special/view.md new file mode 100644 index 00000000000..cee57bd86e1 --- /dev/null +++ b/docs/ja/engines/table_engines/special/view.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u30D3\u30E5\u30FC" +--- + +# ビュー {#table_engines-view} + +ビューを実装するために使用されます(詳細については、 `CREATE VIEW query`). で格納していないデータだけで店舗を指定された `SELECT` クエリ。 テーブルから読み取るときは、このクエリを実行します(クエリから不要な列をすべて削除します)。 + +[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/ja/faq/general.md b/docs/ja/faq/general.md deleted file mode 120000 index bc267395b1b..00000000000 --- a/docs/ja/faq/general.md +++ /dev/null @@ -1 +0,0 @@ -../../en/faq/general.md \ No newline at end of file diff --git a/docs/ja/faq/general.md b/docs/ja/faq/general.md new file mode 100644 index 00000000000..e50a7aa5081 --- /dev/null +++ b/docs/ja/faq/general.md @@ -0,0 +1,60 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 78 +toc_title: "\u4E00\u822C\u7684\u306A\u8CEA\u554F" +--- + +# 一般的な質問 {#general-questions} + +## なぜmapreduceのようなものを使わないのですか? {#why-not-use-something-like-mapreduce} + +MapReduceのようなシステムは、reduce操作が分散ソートに基づいている分散計算システムとして参照することができます。 このクラ [Apache Hadoop](http://hadoop.apache.org). Yandexのは、その社内ソリューション、YTを使用しています。 + +これらのシス つまり、webインターフェイスのバックエンドとして使用することはできません。 これらのシステムなに役立つリアルタイムデータの更新をした。 分散ソートは、操作の結果とすべての中間結果(存在する場合)が単一のサーバーのramにある場合、reduce操作を実行する最善の方法ではありません。 このような場合、ハッシュテーブルはreduce操作を実行するのに最適な方法です。 map-reduceタスクを最適化する一般的なアプローチは、ramのハッシュテーブルを使用した事前集約(部分削減)です。 ユーザーはこの最適化を手動で実行します。 分散ソートは、単純なmap-reduceタスクを実行するときのパフォーマンス低下の主な原因の一つです。 + +ほとんどのmapreduce実装では、クラスター上で任意のコードを実行できます。 しかし、宣言的なクエリ言語は、実験を迅速に実行するためにolapに適しています。 たとえば、hadoopにはhiveとpigがあります。 また、sparkのためのcloudera impalaまたはshark(旧式)、spark sql、presto、およびapache drillも検討してください。 このようなタスクを実行するときのパフォーマンスは、特殊なシステムに比べて非常に低いですが、比較的待ち時間が長いため、これらのシステムを + +## ORACLEをODBC経由で使用するときにエンコードに問題がある場合はどうなりますか? {#oracle-odbc-encodings} + +外部辞書のソースとしてodbcドライバーを使用してoracleを使用する場合は、正しい値を設定する必要があります。 `NLS_LANG` の環境変数 `/etc/default/clickhouse`. 詳細については、 [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). + +**例えば** + +``` sql +NLS_LANG=RUSSIAN_RUSSIA.UTF8 +``` + +## ClickHouseからファイルにデータをエクスポートするには? {#how-to-export-to-file} + +### INTO OUTFILE句の使用 {#using-into-outfile-clause} + +追加 [INTO OUTFILE](../query_language/select/#into-outfile-clause) クエリへの句。 + +例えば: + +``` sql +SELECT * FROM table INTO OUTFILE 'file' +``` + +デフォルトでは、clickhouseは [タブ区切り](../interfaces/formats.md#tabseparated) 出力データの形式。 を選択する [データ形式](../interfaces/formats.md)、を使用 [フォーマット句](../query_language/select/#format-clause). + +例えば: + +``` sql +SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV +``` + +### ファイルエンジンテーブルの使用 {#using-a-file-engine-table} + +見る [ファイル](../engines/table_engines/special/file.md). + +### コマンドラインのリダイ {#using-command-line-redirection} + +``` sql +$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt +``` + +見る [クリックハウス-顧客](../interfaces/cli.md). + +{## [元の記事](https://clickhouse.tech/docs/en/faq/general/) ##} diff --git a/docs/ja/faq/index.md b/docs/ja/faq/index.md deleted file mode 120000 index 4bb6f727f8a..00000000000 --- a/docs/ja/faq/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/faq/index.md \ No newline at end of file diff --git a/docs/ja/faq/index.md b/docs/ja/faq/index.md new file mode 100644 index 00000000000..d0338c572e4 --- /dev/null +++ b/docs/ja/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/ja/getting_started/example_datasets/amplab_benchmark.md b/docs/ja/getting_started/example_datasets/amplab_benchmark.md deleted file mode 120000 index 78c93906bb0..00000000000 --- a/docs/ja/getting_started/example_datasets/amplab_benchmark.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/amplab_benchmark.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/amplab_benchmark.md b/docs/ja/getting_started/example_datasets/amplab_benchmark.md new file mode 100644 index 00000000000..2bf079238ba --- /dev/null +++ b/docs/ja/getting_started/example_datasets/amplab_benchmark.md @@ -0,0 +1,129 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 17 +toc_title: "AMPLab Big Data\u30D9\u30F3\u30C1\u30DE\u30FC\u30AF" +--- + +# AMPLab Big Dataベンチマーク {#amplab-big-data-benchmark} + +見るhttps://amplab.cs.berkeley.edu/benchmark/ + +で無料アカウントにサインアップhttps://aws.amazon.com.それはクレジットカード、電子メール、および電話番号が必要です。 新しいアクセスキーを取得するhttps://console.aws.amazon.com/iam/home?nc2=h\_m\_sc\#security\_credential + +コンソールで以下を実行します: + +``` bash +$ sudo apt-get install s3cmd +$ mkdir tiny; cd tiny; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/tiny/ . +$ cd .. +$ mkdir 1node; cd 1node; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/1node/ . +$ cd .. +$ mkdir 5nodes; cd 5nodes; +$ s3cmd sync s3://big-data-benchmark/pavlo/text-deflate/5nodes/ . +$ cd .. +``` + +次のclickhouseクエリを実行します: + +``` sql +CREATE TABLE rankings_tiny +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_tiny +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); + +CREATE TABLE rankings_1node +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_1node +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); + +CREATE TABLE rankings_5nodes_on_single +( + pageURL String, + pageRank UInt32, + avgDuration UInt32 +) ENGINE = Log; + +CREATE TABLE uservisits_5nodes_on_single +( + sourceIP String, + destinationURL String, + visitDate Date, + adRevenue Float32, + UserAgent String, + cCode FixedString(3), + lCode FixedString(6), + searchWord String, + duration UInt32 +) ENGINE = MergeTree(visitDate, visitDate, 8192); +``` + +コンソールに戻る: + +``` bash +$ for i in tiny/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_tiny FORMAT CSV"; done +$ for i in tiny/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_tiny FORMAT CSV"; done +$ for i in 1node/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_1node FORMAT CSV"; done +$ for i in 1node/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_1node FORMAT CSV"; done +$ for i in 5nodes/rankings/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO rankings_5nodes_on_single FORMAT CSV"; done +$ for i in 5nodes/uservisits/*.deflate; do echo $i; zlib-flate -uncompress < $i | clickhouse-client --host=example-perftest01j --query="INSERT INTO uservisits_5nodes_on_single FORMAT CSV"; done +``` + +デー: + +``` sql +SELECT pageURL, pageRank FROM rankings_1node WHERE pageRank > 1000 + +SELECT substring(sourceIP, 1, 8), sum(adRevenue) FROM uservisits_1node GROUP BY substring(sourceIP, 1, 8) + +SELECT + sourceIP, + sum(adRevenue) AS totalRevenue, + avg(pageRank) AS pageRank +FROM rankings_1node ALL INNER JOIN +( + SELECT + sourceIP, + destinationURL AS pageURL, + adRevenue + FROM uservisits_1node + WHERE (visitDate > '1980-01-01') AND (visitDate < '1980-04-01') +) USING pageURL +GROUP BY sourceIP +ORDER BY totalRevenue DESC +LIMIT 1 +``` + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/ja/getting_started/example_datasets/criteo.md b/docs/ja/getting_started/example_datasets/criteo.md deleted file mode 120000 index 507dc68cd62..00000000000 --- a/docs/ja/getting_started/example_datasets/criteo.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/criteo.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/criteo.md b/docs/ja/getting_started/example_datasets/criteo.md new file mode 100644 index 00000000000..63aee0bf6d9 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/criteo.md @@ -0,0 +1,81 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 19 +toc_title: "Criteo\u304B\u3089\u306ETerabyte\u30AF\u30EA\u30C3\u30AF\u30ED\u30B0" +--- + +# Criteoからのクリックログのテラバイト {#terabyte-of-click-logs-from-criteo} + +データのダウンロードhttp://labs.criteo.com/downloads/download-terabyte-click-logs/ + +ログをインポートするテーブルを作成する: + +``` sql +CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log +``` + +ダウンロードデータ: + +``` bash +$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done +``` + +変換されたデータのテーブルを作成します: + +``` sql +CREATE TABLE criteo +( + date Date, + clicked UInt8, + int1 Int32, + int2 Int32, + int3 Int32, + int4 Int32, + int5 Int32, + int6 Int32, + int7 Int32, + int8 Int32, + int9 Int32, + int10 Int32, + int11 Int32, + int12 Int32, + int13 Int32, + icat1 UInt32, + icat2 UInt32, + icat3 UInt32, + icat4 UInt32, + icat5 UInt32, + icat6 UInt32, + icat7 UInt32, + icat8 UInt32, + icat9 UInt32, + icat10 UInt32, + icat11 UInt32, + icat12 UInt32, + icat13 UInt32, + icat14 UInt32, + icat15 UInt32, + icat16 UInt32, + icat17 UInt32, + icat18 UInt32, + icat19 UInt32, + icat20 UInt32, + icat21 UInt32, + icat22 UInt32, + icat23 UInt32, + icat24 UInt32, + icat25 UInt32, + icat26 UInt32 +) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192) +``` + +生のログからデータを変換し、第二のテーブルにそれを置きます: + +``` sql +INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; + +DROP TABLE criteo_log; +``` + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/ja/getting_started/example_datasets/index.md b/docs/ja/getting_started/example_datasets/index.md deleted file mode 120000 index c891314f915..00000000000 --- a/docs/ja/getting_started/example_datasets/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/index.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/index.md b/docs/ja/getting_started/example_datasets/index.md new file mode 100644 index 00000000000..d2b439b3683 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Example Datasets +toc_priority: 12 +toc_title: "\u5C0E\u5165" +--- + +# データセット例 {#example-datasets} + +この方法について説明し得る例データセットおよび輸入しclickhouse. +一部のデータセットの例ではクエリーもございます。 + +- [匿名のyandexの。metricaデータセット](metrica.md) +- [スタースキーマのベンチマーク](star_schema.md) +- [WikiStat](wikistat.md) +- [Criteoからのクリックログのテラバイト](criteo.md) +- [AMPLab Big Dataベンチマーク](amplab_benchmark.md) +- [ニューヨーク](nyc_taxi.md) +- [オンタイム](ontime.md) + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/ja/getting_started/example_datasets/metrica.md b/docs/ja/getting_started/example_datasets/metrica.md deleted file mode 120000 index 984023973eb..00000000000 --- a/docs/ja/getting_started/example_datasets/metrica.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/metrica.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/metrica.md b/docs/ja/getting_started/example_datasets/metrica.md new file mode 100644 index 00000000000..5fda04b7a03 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/metrica.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 21 +toc_title: "Yandex\u306E\u3002Metrica\u30C7\u30FC\u30BF" +--- + +# 匿名のyandexの。metricaデータ {#anonymized-yandex-metrica-data} + +データセットのテーブルを含む匿名化されたデーター (`hits_v1`)および訪問 (`visits_v1`)Yandexの。メトリカ につなげていくかを学びますYandex.Metrica in [クリックハウスの歴史](../../introduction/history.md) セクション。 + +どちらのテーブルも圧縮形式でダウンロードできます `tsv.xz` ファイルに対して割. それに加えて、の拡張バージョン `hits` 100万行を含むテーブルは、TSVとして利用可能ですhttps://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz そして準備された仕切りとしてhttps://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz。 + +## 取得のテーブルから調の間仕切り {#obtaining-tables-from-prepared-partitions} + +ダウンロード、輸入のヒットテーブル: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar +tar xvf hits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +ダウンロードと読み込み: + +``` bash +curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar +tar xvf visits_v1.tar -C /var/lib/clickhouse # path to ClickHouse data directory +# check permissions on unpacked data, fix if required +sudo service clickhouse-server restart +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## 圧縮されたtsvファイルからの表の取得 {#obtaining-tables-from-compressed-tsv-file} + +圧縮されたtsvファ: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.hits_v1 ( WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat hits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.hits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" +``` + +圧縮tsv-fileからの訪問のダウンロードとインポート: + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +# now create table +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS datasets" +clickhouse-client --query "CREATE TABLE datasets.visits_v1 ( CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192" +# import data +cat visits_v1.tsv | clickhouse-client --query "INSERT INTO datasets.visits_v1 FORMAT TSV" --max_insert_block_size=100000 +# optionally you can optimize table +clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" +clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" +``` + +## クエリ例 {#example-queries} + +[ClickHouseチュートリアル](../../getting_started/tutorial.md) は、Yandexのに基づいています。Metricaデータの推奨使うことができるようにこのデータやトランザクションデータだけを通してチュートリアルです。 + +これらのテーブルへのクエリの追加の例は、 [ステートフルテスト](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) ClickHouseの(彼らは名前が付けられています `test.hists` と `test.visits` そこ)。 diff --git a/docs/ja/getting_started/example_datasets/nyc_taxi.md b/docs/ja/getting_started/example_datasets/nyc_taxi.md deleted file mode 120000 index c47fc83a293..00000000000 --- a/docs/ja/getting_started/example_datasets/nyc_taxi.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/nyc_taxi.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/nyc_taxi.md b/docs/ja/getting_started/example_datasets/nyc_taxi.md new file mode 100644 index 00000000000..340af79c9b3 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/nyc_taxi.md @@ -0,0 +1,390 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 16 +toc_title: "\u30CB\u30E5\u30FC\u30E8\u30FC\u30AF" +--- + +# ニューヨーク {#new-york-taxi-data} + +このデータセットは二つの方法で取得できます: + +- 生データからインポート +- ダウンロード調の間仕切り + +## 生データをインポートする方法 {#how-to-import-the-raw-data} + +見るhttps://github.com/toddwschneider/nyc-taxi-data とhttp://tech.marksblogg.com/billion-nyc-taxi-rides-redshift.html データセットの説明とダウンロードの手順について。 + +ダウンロードは、csvファイル内の非圧縮データの約227ギガバイトになります。 ダウンロードは約時間以上、1gbit接続方法(並列からのダウンロードs3.amazonaws.com 復少なくとも半分の1ギガビチャンネル) +一部のファイルがダウンロードす。 ファイルサイズを確認し、疑わしいものを再ダウンロードします。 + +一部のファイルが含無効にされています。 次のように修正できます: + +``` bash +sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-02.csv > data/yellow_tripdata_2010-02.csv_ +sed -E '/(.*,){18,}/d' data/yellow_tripdata_2010-03.csv > data/yellow_tripdata_2010-03.csv_ +mv data/yellow_tripdata_2010-02.csv_ data/yellow_tripdata_2010-02.csv +mv data/yellow_tripdata_2010-03.csv_ data/yellow_tripdata_2010-03.csv +``` + +その後、postgresqlでデータを前処理する必要があります。 これにより、ポリゴン内のポイントの選択が作成され(マップ上のポイントとニューヨーク市の自治区とのマッチング)、結合を使用してすべてのデータが これを行うには、postgisサポートでpostgresqlをインストールする必要があります。 + +実行中に注意してください `initialize_database.sh` すべてのテーブルが正しく作成されたことを手動で再確認します。 + +毎月のpostgresqlデータの処理には約20-30分かかり、合計で約48時間かかります。 + +ダウンロードした行数は、次のように確認できます: + +``` bash +$ time psql nyc-taxi-data -c "SELECT count(*) FROM trips;" +## Count + 1298979494 +(1 row) + +real 7m9.164s +``` + +(これは、一連のブログ記事でMark Litwintschikによって報告された11億行をわずかに超えています。) + +PostgreSQLのデータは370GBのスペースを使用します。 + +PostgreSQLからデータをエクスポート: + +``` sql +COPY +( + SELECT trips.id, + trips.vendor_id, + trips.pickup_datetime, + trips.dropoff_datetime, + trips.store_and_fwd_flag, + trips.rate_code_id, + trips.pickup_longitude, + trips.pickup_latitude, + trips.dropoff_longitude, + trips.dropoff_latitude, + trips.passenger_count, + trips.trip_distance, + trips.fare_amount, + trips.extra, + trips.mta_tax, + trips.tip_amount, + trips.tolls_amount, + trips.ehail_fee, + trips.improvement_surcharge, + trips.total_amount, + trips.payment_type, + trips.trip_type, + trips.pickup, + trips.dropoff, + + cab_types.type cab_type, + + weather.precipitation_tenths_of_mm rain, + weather.snow_depth_mm, + weather.snowfall_mm, + weather.max_temperature_tenths_degrees_celsius max_temp, + weather.min_temperature_tenths_degrees_celsius min_temp, + weather.average_wind_speed_tenths_of_meters_per_second wind, + + pick_up.gid pickup_nyct2010_gid, + pick_up.ctlabel pickup_ctlabel, + pick_up.borocode pickup_borocode, + pick_up.boroname pickup_boroname, + pick_up.ct2010 pickup_ct2010, + pick_up.boroct2010 pickup_boroct2010, + pick_up.cdeligibil pickup_cdeligibil, + pick_up.ntacode pickup_ntacode, + pick_up.ntaname pickup_ntaname, + pick_up.puma pickup_puma, + + drop_off.gid dropoff_nyct2010_gid, + drop_off.ctlabel dropoff_ctlabel, + drop_off.borocode dropoff_borocode, + drop_off.boroname dropoff_boroname, + drop_off.ct2010 dropoff_ct2010, + drop_off.boroct2010 dropoff_boroct2010, + drop_off.cdeligibil dropoff_cdeligibil, + drop_off.ntacode dropoff_ntacode, + drop_off.ntaname dropoff_ntaname, + drop_off.puma dropoff_puma + FROM trips + LEFT JOIN cab_types + ON trips.cab_type_id = cab_types.id + LEFT JOIN central_park_weather_observations_raw weather + ON weather.date = trips.pickup_datetime::date + LEFT JOIN nyct2010 pick_up + ON pick_up.gid = trips.pickup_nyct2010_gid + LEFT JOIN nyct2010 drop_off + ON drop_off.gid = trips.dropoff_nyct2010_gid +) TO '/opt/milovidov/nyc-taxi-data/trips.tsv'; +``` + +データスナップショットは、毎秒約50mbの速度で作成されます。 スナップショットの作成中に、postgresqlはディスクから約28mb/秒の速度で読み取ります。 +これには約5時間かかります。 結果のtsvファイルは590612904969バイトです。 + +ClickHouseでテンポラリテーブルを作成する: + +``` sql +CREATE TABLE trips +( +trip_id UInt32, +vendor_id String, +pickup_datetime DateTime, +dropoff_datetime Nullable(DateTime), +store_and_fwd_flag Nullable(FixedString(1)), +rate_code_id Nullable(UInt8), +pickup_longitude Nullable(Float64), +pickup_latitude Nullable(Float64), +dropoff_longitude Nullable(Float64), +dropoff_latitude Nullable(Float64), +passenger_count Nullable(UInt8), +trip_distance Nullable(Float64), +fare_amount Nullable(Float32), +extra Nullable(Float32), +mta_tax Nullable(Float32), +tip_amount Nullable(Float32), +tolls_amount Nullable(Float32), +ehail_fee Nullable(Float32), +improvement_surcharge Nullable(Float32), +total_amount Nullable(Float32), +payment_type Nullable(String), +trip_type Nullable(UInt8), +pickup Nullable(String), +dropoff Nullable(String), +cab_type Nullable(String), +precipitation Nullable(UInt8), +snow_depth Nullable(UInt8), +snowfall Nullable(UInt8), +max_temperature Nullable(UInt8), +min_temperature Nullable(UInt8), +average_wind_speed Nullable(UInt8), +pickup_nyct2010_gid Nullable(UInt8), +pickup_ctlabel Nullable(String), +pickup_borocode Nullable(UInt8), +pickup_boroname Nullable(String), +pickup_ct2010 Nullable(String), +pickup_boroct2010 Nullable(String), +pickup_cdeligibil Nullable(FixedString(1)), +pickup_ntacode Nullable(String), +pickup_ntaname Nullable(String), +pickup_puma Nullable(String), +dropoff_nyct2010_gid Nullable(UInt8), +dropoff_ctlabel Nullable(String), +dropoff_borocode Nullable(UInt8), +dropoff_boroname Nullable(String), +dropoff_ct2010 Nullable(String), +dropoff_boroct2010 Nullable(String), +dropoff_cdeligibil Nullable(String), +dropoff_ntacode Nullable(String), +dropoff_ntaname Nullable(String), +dropoff_puma Nullable(String) +) ENGINE = Log; +``` + +これは、フィールドをより正確なデータ型に変換し、可能であればnullを排除するために必要です。 + +``` bash +$ time clickhouse-client --query="INSERT INTO trips FORMAT TabSeparated" < trips.tsv + +real 75m56.214s +``` + +データは112-140mb/秒の速度で読み取られます。 +データの読み込みログタイプテーブルに一流した76分です。 +この表のデータは、142gbを使用します。 + +(Postgresから直接データをインポートすることも可能です `COPY ... TO PROGRAM`.) + +Unfortunately, all the fields associated with the weather (precipitation…average\_wind\_speed) were filled with NULL. Because of this, we will remove them from the final data set. + +まず、単一のサーバー上にテーブルを作成します。 その後、テーブルを配布します。 + +サマリーテーブルの作成と設定: + +``` sql +CREATE TABLE trips_mergetree +ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +AS SELECT + +trip_id, +CAST(vendor_id AS Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14)) AS vendor_id, +toDate(pickup_datetime) AS pickup_date, +ifNull(pickup_datetime, toDateTime(0)) AS pickup_datetime, +toDate(dropoff_datetime) AS dropoff_date, +ifNull(dropoff_datetime, toDateTime(0)) AS dropoff_datetime, +assumeNotNull(store_and_fwd_flag) IN ('Y', '1', '2') AS store_and_fwd_flag, +assumeNotNull(rate_code_id) AS rate_code_id, +assumeNotNull(pickup_longitude) AS pickup_longitude, +assumeNotNull(pickup_latitude) AS pickup_latitude, +assumeNotNull(dropoff_longitude) AS dropoff_longitude, +assumeNotNull(dropoff_latitude) AS dropoff_latitude, +assumeNotNull(passenger_count) AS passenger_count, +assumeNotNull(trip_distance) AS trip_distance, +assumeNotNull(fare_amount) AS fare_amount, +assumeNotNull(extra) AS extra, +assumeNotNull(mta_tax) AS mta_tax, +assumeNotNull(tip_amount) AS tip_amount, +assumeNotNull(tolls_amount) AS tolls_amount, +assumeNotNull(ehail_fee) AS ehail_fee, +assumeNotNull(improvement_surcharge) AS improvement_surcharge, +assumeNotNull(total_amount) AS total_amount, +CAST((assumeNotNull(payment_type) AS pt) IN ('CSH', 'CASH', 'Cash', 'CAS', 'Cas', '1') ? 'CSH' : (pt IN ('CRD', 'Credit', 'Cre', 'CRE', 'CREDIT', '2') ? 'CRE' : (pt IN ('NOC', 'No Charge', 'No', '3') ? 'NOC' : (pt IN ('DIS', 'Dispute', 'Dis', '4') ? 'DIS' : 'UNK'))) AS Enum8('CSH' = 1, 'CRE' = 2, 'UNK' = 0, 'NOC' = 3, 'DIS' = 4)) AS payment_type_, +assumeNotNull(trip_type) AS trip_type, +ifNull(toFixedString(unhex(pickup), 25), toFixedString('', 25)) AS pickup, +ifNull(toFixedString(unhex(dropoff), 25), toFixedString('', 25)) AS dropoff, +CAST(assumeNotNull(cab_type) AS Enum8('yellow' = 1, 'green' = 2, 'uber' = 3)) AS cab_type, + +assumeNotNull(pickup_nyct2010_gid) AS pickup_nyct2010_gid, +toFloat32(ifNull(pickup_ctlabel, '0')) AS pickup_ctlabel, +assumeNotNull(pickup_borocode) AS pickup_borocode, +CAST(assumeNotNull(pickup_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS pickup_boroname, +toFixedString(ifNull(pickup_ct2010, '000000'), 6) AS pickup_ct2010, +toFixedString(ifNull(pickup_boroct2010, '0000000'), 7) AS pickup_boroct2010, +CAST(assumeNotNull(ifNull(pickup_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS pickup_cdeligibil, +toFixedString(ifNull(pickup_ntacode, '0000'), 4) AS pickup_ntacode, + +CAST(assumeNotNull(pickup_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS pickup_ntaname, + +toUInt16(ifNull(pickup_puma, '0')) AS pickup_puma, + +assumeNotNull(dropoff_nyct2010_gid) AS dropoff_nyct2010_gid, +toFloat32(ifNull(dropoff_ctlabel, '0')) AS dropoff_ctlabel, +assumeNotNull(dropoff_borocode) AS dropoff_borocode, +CAST(assumeNotNull(dropoff_boroname) AS Enum8('Manhattan' = 1, 'Queens' = 4, 'Brooklyn' = 3, '' = 0, 'Bronx' = 2, 'Staten Island' = 5)) AS dropoff_boroname, +toFixedString(ifNull(dropoff_ct2010, '000000'), 6) AS dropoff_ct2010, +toFixedString(ifNull(dropoff_boroct2010, '0000000'), 7) AS dropoff_boroct2010, +CAST(assumeNotNull(ifNull(dropoff_cdeligibil, ' ')) AS Enum8(' ' = 0, 'E' = 1, 'I' = 2)) AS dropoff_cdeligibil, +toFixedString(ifNull(dropoff_ntacode, '0000'), 4) AS dropoff_ntacode, + +CAST(assumeNotNull(dropoff_ntaname) AS Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195)) AS dropoff_ntaname, + +toUInt16(ifNull(dropoff_puma, '0')) AS dropoff_puma + +FROM trips +``` + +これは3030秒で約428,000行/秒の速度で行われます。 +それをより速くロードするには、次のように表を作成します `Log` エンジンの代わりに `MergeTree`. この場合、ダウンロードは200秒より速く動作します。 + +この表では、126gbのディスク領域を使用します。 + +``` sql +SELECT formatReadableSize(sum(bytes)) FROM system.parts WHERE table = 'trips_mergetree' AND active +``` + +``` text +┌─formatReadableSize(sum(bytes))─┐ +│ 126.18 GiB │ +└────────────────────────────────┘ +``` + +とりわけ、mergetreeでoptimizeクエリを実行することができます。 でも必要ないからさま、おめでとうございますどのくらい実装されているか。 + +## ダウンロード調の間仕切り {#download-of-prepared-partitions} + +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/trips_mergetree/partitions/trips_mergetree.tar +$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.trips_mergetree" +``` + +!!! info "情報" + 以下で説明するクエリを実行する場合は、完全なテーブル名を使用する必要があります, `datasets.trips_mergetree`. + +## 単一サーバー上の結果 {#results-on-single-server} + +Q1: + +``` sql +SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type +``` + +0.490秒。 + +Q2: + +``` sql +SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count +``` + +1.224秒 + +Q3: + +``` sql +SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year +``` + +2.104秒。 + +Q4: + +``` sql +SELECT passenger_count, toYear(pickup_date) AS year, round(trip_distance) AS distance, count(*) +FROM trips_mergetree +GROUP BY passenger_count, year, distance +ORDER BY year, count(*) DESC +``` + +3.593秒 + +次のサーバーが使用されました: + +二つのインテル(r)xeon(r)cpu e5-2650v2@2.60ghzの、16物理カーネル合計、128gib ram、8×6tb hd上のハードウェアraid-5 + +実行時間は、三つの実行の最高です。 だから、クエリからデータを読み込むためのファイルシステムます。 データは読み出され、各実行で処理されます。 + +三つのサーバー上のテーブルの作成: + +各サーバーで: + +``` sql +CREATE TABLE default.trips_mergetree_third ( trip_id UInt32, vendor_id Enum8('1' = 1, '2' = 2, 'CMT' = 3, 'VTS' = 4, 'DDS' = 5, 'B02512' = 10, 'B02598' = 11, 'B02617' = 12, 'B02682' = 13, 'B02764' = 14), pickup_date Date, pickup_datetime DateTime, dropoff_date Date, dropoff_datetime DateTime, store_and_fwd_flag UInt8, rate_code_id UInt8, pickup_longitude Float64, pickup_latitude Float64, dropoff_longitude Float64, dropoff_latitude Float64, passenger_count UInt8, trip_distance Float64, fare_amount Float32, extra Float32, mta_tax Float32, tip_amount Float32, tolls_amount Float32, ehail_fee Float32, improvement_surcharge Float32, total_amount Float32, payment_type_ Enum8('UNK' = 0, 'CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), trip_type UInt8, pickup FixedString(25), dropoff FixedString(25), cab_type Enum8('yellow' = 1, 'green' = 2, 'uber' = 3), pickup_nyct2010_gid UInt8, pickup_ctlabel Float32, pickup_borocode UInt8, pickup_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), pickup_ct2010 FixedString(6), pickup_boroct2010 FixedString(7), pickup_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), pickup_ntacode FixedString(4), pickup_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), pickup_puma UInt16, dropoff_nyct2010_gid UInt8, dropoff_ctlabel Float32, dropoff_borocode UInt8, dropoff_boroname Enum8('' = 0, 'Manhattan' = 1, 'Bronx' = 2, 'Brooklyn' = 3, 'Queens' = 4, 'Staten Island' = 5), dropoff_ct2010 FixedString(6), dropoff_boroct2010 FixedString(7), dropoff_cdeligibil Enum8(' ' = 0, 'E' = 1, 'I' = 2), dropoff_ntacode FixedString(4), dropoff_ntaname Enum16('' = 0, 'Airport' = 1, 'Allerton-Pelham Gardens' = 2, 'Annadale-Huguenot-Prince\'s Bay-Eltingville' = 3, 'Arden Heights' = 4, 'Astoria' = 5, 'Auburndale' = 6, 'Baisley Park' = 7, 'Bath Beach' = 8, 'Battery Park City-Lower Manhattan' = 9, 'Bay Ridge' = 10, 'Bayside-Bayside Hills' = 11, 'Bedford' = 12, 'Bedford Park-Fordham North' = 13, 'Bellerose' = 14, 'Belmont' = 15, 'Bensonhurst East' = 16, 'Bensonhurst West' = 17, 'Borough Park' = 18, 'Breezy Point-Belle Harbor-Rockaway Park-Broad Channel' = 19, 'Briarwood-Jamaica Hills' = 20, 'Brighton Beach' = 21, 'Bronxdale' = 22, 'Brooklyn Heights-Cobble Hill' = 23, 'Brownsville' = 24, 'Bushwick North' = 25, 'Bushwick South' = 26, 'Cambria Heights' = 27, 'Canarsie' = 28, 'Carroll Gardens-Columbia Street-Red Hook' = 29, 'Central Harlem North-Polo Grounds' = 30, 'Central Harlem South' = 31, 'Charleston-Richmond Valley-Tottenville' = 32, 'Chinatown' = 33, 'Claremont-Bathgate' = 34, 'Clinton' = 35, 'Clinton Hill' = 36, 'Co-op City' = 37, 'College Point' = 38, 'Corona' = 39, 'Crotona Park East' = 40, 'Crown Heights North' = 41, 'Crown Heights South' = 42, 'Cypress Hills-City Line' = 43, 'DUMBO-Vinegar Hill-Downtown Brooklyn-Boerum Hill' = 44, 'Douglas Manor-Douglaston-Little Neck' = 45, 'Dyker Heights' = 46, 'East Concourse-Concourse Village' = 47, 'East Elmhurst' = 48, 'East Flatbush-Farragut' = 49, 'East Flushing' = 50, 'East Harlem North' = 51, 'East Harlem South' = 52, 'East New York' = 53, 'East New York (Pennsylvania Ave)' = 54, 'East Tremont' = 55, 'East Village' = 56, 'East Williamsburg' = 57, 'Eastchester-Edenwald-Baychester' = 58, 'Elmhurst' = 59, 'Elmhurst-Maspeth' = 60, 'Erasmus' = 61, 'Far Rockaway-Bayswater' = 62, 'Flatbush' = 63, 'Flatlands' = 64, 'Flushing' = 65, 'Fordham South' = 66, 'Forest Hills' = 67, 'Fort Greene' = 68, 'Fresh Meadows-Utopia' = 69, 'Ft. Totten-Bay Terrace-Clearview' = 70, 'Georgetown-Marine Park-Bergen Beach-Mill Basin' = 71, 'Glen Oaks-Floral Park-New Hyde Park' = 72, 'Glendale' = 73, 'Gramercy' = 74, 'Grasmere-Arrochar-Ft. Wadsworth' = 75, 'Gravesend' = 76, 'Great Kills' = 77, 'Greenpoint' = 78, 'Grymes Hill-Clifton-Fox Hills' = 79, 'Hamilton Heights' = 80, 'Hammels-Arverne-Edgemere' = 81, 'Highbridge' = 82, 'Hollis' = 83, 'Homecrest' = 84, 'Hudson Yards-Chelsea-Flatiron-Union Square' = 85, 'Hunters Point-Sunnyside-West Maspeth' = 86, 'Hunts Point' = 87, 'Jackson Heights' = 88, 'Jamaica' = 89, 'Jamaica Estates-Holliswood' = 90, 'Kensington-Ocean Parkway' = 91, 'Kew Gardens' = 92, 'Kew Gardens Hills' = 93, 'Kingsbridge Heights' = 94, 'Laurelton' = 95, 'Lenox Hill-Roosevelt Island' = 96, 'Lincoln Square' = 97, 'Lindenwood-Howard Beach' = 98, 'Longwood' = 99, 'Lower East Side' = 100, 'Madison' = 101, 'Manhattanville' = 102, 'Marble Hill-Inwood' = 103, 'Mariner\'s Harbor-Arlington-Port Ivory-Graniteville' = 104, 'Maspeth' = 105, 'Melrose South-Mott Haven North' = 106, 'Middle Village' = 107, 'Midtown-Midtown South' = 108, 'Midwood' = 109, 'Morningside Heights' = 110, 'Morrisania-Melrose' = 111, 'Mott Haven-Port Morris' = 112, 'Mount Hope' = 113, 'Murray Hill' = 114, 'Murray Hill-Kips Bay' = 115, 'New Brighton-Silver Lake' = 116, 'New Dorp-Midland Beach' = 117, 'New Springville-Bloomfield-Travis' = 118, 'North Corona' = 119, 'North Riverdale-Fieldston-Riverdale' = 120, 'North Side-South Side' = 121, 'Norwood' = 122, 'Oakland Gardens' = 123, 'Oakwood-Oakwood Beach' = 124, 'Ocean Hill' = 125, 'Ocean Parkway South' = 126, 'Old Astoria' = 127, 'Old Town-Dongan Hills-South Beach' = 128, 'Ozone Park' = 129, 'Park Slope-Gowanus' = 130, 'Parkchester' = 131, 'Pelham Bay-Country Club-City Island' = 132, 'Pelham Parkway' = 133, 'Pomonok-Flushing Heights-Hillcrest' = 134, 'Port Richmond' = 135, 'Prospect Heights' = 136, 'Prospect Lefferts Gardens-Wingate' = 137, 'Queens Village' = 138, 'Queensboro Hill' = 139, 'Queensbridge-Ravenswood-Long Island City' = 140, 'Rego Park' = 141, 'Richmond Hill' = 142, 'Ridgewood' = 143, 'Rikers Island' = 144, 'Rosedale' = 145, 'Rossville-Woodrow' = 146, 'Rugby-Remsen Village' = 147, 'Schuylerville-Throgs Neck-Edgewater Park' = 148, 'Seagate-Coney Island' = 149, 'Sheepshead Bay-Gerritsen Beach-Manhattan Beach' = 150, 'SoHo-TriBeCa-Civic Center-Little Italy' = 151, 'Soundview-Bruckner' = 152, 'Soundview-Castle Hill-Clason Point-Harding Park' = 153, 'South Jamaica' = 154, 'South Ozone Park' = 155, 'Springfield Gardens North' = 156, 'Springfield Gardens South-Brookville' = 157, 'Spuyten Duyvil-Kingsbridge' = 158, 'St. Albans' = 159, 'Stapleton-Rosebank' = 160, 'Starrett City' = 161, 'Steinway' = 162, 'Stuyvesant Heights' = 163, 'Stuyvesant Town-Cooper Village' = 164, 'Sunset Park East' = 165, 'Sunset Park West' = 166, 'Todt Hill-Emerson Hill-Heartland Village-Lighthouse Hill' = 167, 'Turtle Bay-East Midtown' = 168, 'University Heights-Morris Heights' = 169, 'Upper East Side-Carnegie Hill' = 170, 'Upper West Side' = 171, 'Van Cortlandt Village' = 172, 'Van Nest-Morris Park-Westchester Square' = 173, 'Washington Heights North' = 174, 'Washington Heights South' = 175, 'West Brighton' = 176, 'West Concourse' = 177, 'West Farms-Bronx River' = 178, 'West New Brighton-New Brighton-St. George' = 179, 'West Village' = 180, 'Westchester-Unionport' = 181, 'Westerleigh' = 182, 'Whitestone' = 183, 'Williamsbridge-Olinville' = 184, 'Williamsburg' = 185, 'Windsor Terrace' = 186, 'Woodhaven' = 187, 'Woodlawn-Wakefield' = 188, 'Woodside' = 189, 'Yorkville' = 190, 'park-cemetery-etc-Bronx' = 191, 'park-cemetery-etc-Brooklyn' = 192, 'park-cemetery-etc-Manhattan' = 193, 'park-cemetery-etc-Queens' = 194, 'park-cemetery-etc-Staten Island' = 195), dropoff_puma UInt16) ENGINE = MergeTree(pickup_date, pickup_datetime, 8192) +``` + +ソースサーバー上: + +``` sql +CREATE TABLE trips_mergetree_x3 AS trips_mergetree_third ENGINE = Distributed(perftest, default, trips_mergetree_third, rand()) +``` + +次のクエリは、データを再配布します: + +``` sql +INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree +``` + +これには2454秒かかります。 + +三つのサーバーで: + +Q1:0.212秒。 +Q2:0.438秒。 +Q3:0.733秒。 +Q4:1.241秒。 + +詳細ここでは、以降のクエリはスケール直線. + +我々はまた、140サーバーのクラスタからの結果を持っている: + +Q1:0.028秒。 +Q2:0.043秒。 +Q3:0.051秒。 +Q4:0.072秒。 + +この場合、クエリの処理時間を設定したすべてのネットワークの待ち時間をゼロにすることに +またクエリをクライアントに位置すyandexデータセンター、フィンランドスキーロシアで、約20msの待ち時間をゼロにすることに + +## 概要 {#summary} + +| サーバー | Q1 | Q2 | Q3 | Q4 | +|----------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/ja/getting_started/example_datasets/ontime.md b/docs/ja/getting_started/example_datasets/ontime.md deleted file mode 120000 index 87cfbb8be91..00000000000 --- a/docs/ja/getting_started/example_datasets/ontime.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/ontime.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/ontime.md b/docs/ja/getting_started/example_datasets/ontime.md new file mode 100644 index 00000000000..0f7d18d9458 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/ontime.md @@ -0,0 +1,412 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 15 +toc_title: "\u30AA\u30F3\u30BF\u30A4\u30E0" +--- + +# オンタイム {#ontime} + +このデータセットは二つの方法で取得できます: + +- 生データからインポート +- ダウンロード調の間仕切り + +## 生データからインポート {#import-from-raw-data} + +データ: + +``` bash +for s in `seq 1987 2018` +do +for m in `seq 1 12` +do +wget https://transtats.bts.gov/PREZIP/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_${s}_${m}.zip +done +done +``` + +(からhttps://github.com/Percona-Lab/ontime-airline-performance/blob/master/download.sh ) + +テーブルの作成: + +``` sql +CREATE TABLE `ontime` ( + `Year` UInt16, + `Quarter` UInt8, + `Month` UInt8, + `DayofMonth` UInt8, + `DayOfWeek` UInt8, + `FlightDate` Date, + `UniqueCarrier` FixedString(7), + `AirlineID` Int32, + `Carrier` FixedString(2), + `TailNum` String, + `FlightNum` String, + `OriginAirportID` Int32, + `OriginAirportSeqID` Int32, + `OriginCityMarketID` Int32, + `Origin` FixedString(5), + `OriginCityName` String, + `OriginState` FixedString(2), + `OriginStateFips` String, + `OriginStateName` String, + `OriginWac` Int32, + `DestAirportID` Int32, + `DestAirportSeqID` Int32, + `DestCityMarketID` Int32, + `Dest` FixedString(5), + `DestCityName` String, + `DestState` FixedString(2), + `DestStateFips` String, + `DestStateName` String, + `DestWac` Int32, + `CRSDepTime` Int32, + `DepTime` Int32, + `DepDelay` Int32, + `DepDelayMinutes` Int32, + `DepDel15` Int32, + `DepartureDelayGroups` String, + `DepTimeBlk` String, + `TaxiOut` Int32, + `WheelsOff` Int32, + `WheelsOn` Int32, + `TaxiIn` Int32, + `CRSArrTime` Int32, + `ArrTime` Int32, + `ArrDelay` Int32, + `ArrDelayMinutes` Int32, + `ArrDel15` Int32, + `ArrivalDelayGroups` Int32, + `ArrTimeBlk` String, + `Cancelled` UInt8, + `CancellationCode` FixedString(1), + `Diverted` UInt8, + `CRSElapsedTime` Int32, + `ActualElapsedTime` Int32, + `AirTime` Int32, + `Flights` Int32, + `Distance` Int32, + `DistanceGroup` UInt8, + `CarrierDelay` Int32, + `WeatherDelay` Int32, + `NASDelay` Int32, + `SecurityDelay` Int32, + `LateAircraftDelay` Int32, + `FirstDepTime` String, + `TotalAddGTime` String, + `LongestAddGTime` String, + `DivAirportLandings` String, + `DivReachedDest` String, + `DivActualElapsedTime` String, + `DivArrDelay` String, + `DivDistance` String, + `Div1Airport` String, + `Div1AirportID` Int32, + `Div1AirportSeqID` Int32, + `Div1WheelsOn` String, + `Div1TotalGTime` String, + `Div1LongestGTime` String, + `Div1WheelsOff` String, + `Div1TailNum` String, + `Div2Airport` String, + `Div2AirportID` Int32, + `Div2AirportSeqID` Int32, + `Div2WheelsOn` String, + `Div2TotalGTime` String, + `Div2LongestGTime` String, + `Div2WheelsOff` String, + `Div2TailNum` String, + `Div3Airport` String, + `Div3AirportID` Int32, + `Div3AirportSeqID` Int32, + `Div3WheelsOn` String, + `Div3TotalGTime` String, + `Div3LongestGTime` String, + `Div3WheelsOff` String, + `Div3TailNum` String, + `Div4Airport` String, + `Div4AirportID` Int32, + `Div4AirportSeqID` Int32, + `Div4WheelsOn` String, + `Div4TotalGTime` String, + `Div4LongestGTime` String, + `Div4WheelsOff` String, + `Div4TailNum` String, + `Div5Airport` String, + `Div5AirportID` Int32, + `Div5AirportSeqID` Int32, + `Div5WheelsOn` String, + `Div5TotalGTime` String, + `Div5LongestGTime` String, + `Div5WheelsOff` String, + `Div5TailNum` String +) ENGINE = MergeTree +PARTITION BY Year +ORDER BY (Carrier, FlightDate) +SETTINGS index_granularity = 8192; +``` + +データのロード: + +``` bash +$ for i in *.zip; do echo $i; unzip -cq $i '*.csv' | sed 's/\.00//g' | clickhouse-client --host=example-perftest01j --query="INSERT INTO ontime FORMAT CSVWithNames"; done +``` + +## ダウンロード調の間仕切り {#download-of-prepared-partitions} + +``` bash +$ curl -O https://clickhouse-datasets.s3.yandex.net/ontime/partitions/ontime.tar +$ tar xvf ontime.tar -C /var/lib/clickhouse # path to ClickHouse data directory +$ # check permissions of unpacked data, fix if required +$ sudo service clickhouse-server restart +$ clickhouse-client --query "select count(*) from datasets.ontime" +``` + +!!! info "情報" + 以下で説明するクエリを実行する場合は、完全なテーブル名を使用する必要があります, `datasets.ontime`. + +## クエリ {#queries} + +Q0. + +``` sql +SELECT avg(c1) +FROM +( + SELECT Year, Month, count(*) AS c1 + FROM ontime + GROUP BY Year, Month +); +``` + +Q1. 2000年から2008年までの一日あたりの便数 + +``` sql +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; +``` + +Q2. 10分以上遅れたフライトの数は、2000年から2008年まで、曜日ごとにグループ化されています + +``` sql +SELECT DayOfWeek, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY DayOfWeek +ORDER BY c DESC; +``` + +Q3. 2000-2008のための空港による遅延数 + +``` sql +SELECT Origin, count(*) AS c +FROM ontime +WHERE DepDelay>10 AND Year>=2000 AND Year<=2008 +GROUP BY Origin +ORDER BY c DESC +LIMIT 10; +``` + +Q4 2007年のキャリアによる遅延の数 + +``` sql +SELECT Carrier, count(*) +FROM ontime +WHERE DepDelay>10 AND Year=2007 +GROUP BY Carrier +ORDER BY count(*) DESC; +``` + +Q5 2007年のキャリアによる遅延の割合 + +``` sql +SELECT Carrier, c, c2, c*100/c2 as c3 +FROM +( + SELECT + Carrier, + count(*) AS c + FROM ontime + WHERE DepDelay>10 + AND Year=2007 + GROUP BY Carrier +) +JOIN +( + SELECT + Carrier, + count(*) AS c2 + FROM ontime + WHERE Year=2007 + GROUP BY Carrier +) USING Carrier +ORDER BY c3 DESC; +``` + +同じクエリのより良いバージョン: + +``` sql +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year=2007 +GROUP BY Carrier +ORDER BY c3 DESC +``` + +Q6 年のより広い範囲のための以前の要求,2000-2008 + +``` sql +SELECT Carrier, c, c2, c*100/c2 as c3 +FROM +( + SELECT + Carrier, + count(*) AS c + FROM ontime + WHERE DepDelay>10 + AND Year>=2000 AND Year<=2008 + GROUP BY Carrier +) +JOIN +( + SELECT + Carrier, + count(*) AS c2 + FROM ontime + WHERE Year>=2000 AND Year<=2008 + GROUP BY Carrier +) USING Carrier +ORDER BY c3 DESC; +``` + +同じクエリのより良いバージョン: + +``` sql +SELECT Carrier, avg(DepDelay>10)*100 AS c3 +FROM ontime +WHERE Year>=2000 AND Year<=2008 +GROUP BY Carrier +ORDER BY c3 DESC; +``` + +Q7 年間で10分以上遅れたフライトの割合 + +``` sql +SELECT Year, c1/c2 +FROM +( + select + Year, + count(*)*100 as c1 + from ontime + WHERE DepDelay>10 + GROUP BY Year +) +JOIN +( + select + Year, + count(*) as c2 + from ontime + GROUP BY Year +) USING (Year) +ORDER BY Year; +``` + +同じクエリのより良いバージョン: + +``` sql +SELECT Year, avg(DepDelay>10)*100 +FROM ontime +GROUP BY Year +ORDER BY Year; +``` + +Q8 様々な年の範囲のための直接接続された都市の数によって最も人気のある目的地 + +``` sql +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +WHERE Year >= 2000 and Year <= 2010 +GROUP BY DestCityName +ORDER BY u DESC LIMIT 10; +``` + +Q9. + +``` sql +SELECT Year, count(*) AS c1 +FROM ontime +GROUP BY Year; +``` + +Q10. + +``` sql +SELECT + min(Year), max(Year), Carrier, count(*) AS cnt, + sum(ArrDelayMinutes>30) AS flights_delayed, + round(sum(ArrDelayMinutes>30)/count(*),2) AS rate +FROM ontime +WHERE + DayOfWeek NOT IN (6,7) AND OriginState NOT IN ('AK', 'HI', 'PR', 'VI') + AND DestState NOT IN ('AK', 'HI', 'PR', 'VI') + AND FlightDate < '2010-01-01' +GROUP by Carrier +HAVING cnt>100000 and max(Year)>1990 +ORDER by rate DESC +LIMIT 1000; +``` + +ボーナス: + +``` sql +SELECT avg(cnt) +FROM +( + SELECT Year,Month,count(*) AS cnt + FROM ontime + WHERE DepDel15=1 + GROUP BY Year,Month +); + +SELECT avg(c1) FROM +( + SELECT Year,Month,count(*) AS c1 + FROM ontime + GROUP BY Year,Month +); + +SELECT DestCityName, uniqExact(OriginCityName) AS u +FROM ontime +GROUP BY DestCityName +ORDER BY u DESC +LIMIT 10; + +SELECT OriginCityName, DestCityName, count() AS c +FROM ontime +GROUP BY OriginCityName, DestCityName +ORDER BY c DESC +LIMIT 10; + +SELECT OriginCityName, count() AS c +FROM ontime +GROUP BY OriginCityName +ORDER BY c DESC +LIMIT 10; +``` + +この性能試験はvadim tkachenkoによって作成されました。 見る: + +- https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/ +- https://www.percona.com/blog/2009/10/26/air-traffic-queries-in-luciddb/ +- https://www.percona.com/blog/2009/11/02/air-traffic-queries-in-infinidb-early-alpha/ +- https://www.percona.com/blog/2014/04/21/using-apache-hadoop-and-impala-together-with-mysql-for-data-analysis/ +- https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ +- http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/ontime/) diff --git a/docs/ja/getting_started/example_datasets/star_schema.md b/docs/ja/getting_started/example_datasets/star_schema.md deleted file mode 120000 index 1c26392dd23..00000000000 --- a/docs/ja/getting_started/example_datasets/star_schema.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/star_schema.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/star_schema.md b/docs/ja/getting_started/example_datasets/star_schema.md new file mode 100644 index 00000000000..0cf23d94871 --- /dev/null +++ b/docs/ja/getting_started/example_datasets/star_schema.md @@ -0,0 +1,371 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 20 +toc_title: "\u30B9\u30BF\u30FC\u30B9\u30AD\u30FC\u30DE\u306E\u30D9\u30F3\u30C1\u30DE\ + \u30FC\u30AF" +--- + +# スタースキーマのベンチマーク {#star-schema-benchmark} + +Dbgenのコンパイル: + +``` bash +$ git clone git@github.com:vadimtk/ssb-dbgen.git +$ cd ssb-dbgen +$ make +``` + +データの生成: + +!!! warning "注意" + と `-s 100` dbgenは600万行(67GB)を生成しますが、 `-s 1000` それは6億行を生成します(これには多くの時間がかかります) + +``` bash +$ ./dbgen -s 1000 -T c +$ ./dbgen -s 1000 -T l +$ ./dbgen -s 1000 -T p +$ ./dbgen -s 1000 -T s +$ ./dbgen -s 1000 -T d +``` + +ClickHouseでのテーブルの作成: + +``` sql +CREATE TABLE customer +( + C_CUSTKEY UInt32, + C_NAME String, + C_ADDRESS String, + C_CITY LowCardinality(String), + C_NATION LowCardinality(String), + C_REGION LowCardinality(String), + C_PHONE String, + C_MKTSEGMENT LowCardinality(String) +) +ENGINE = MergeTree ORDER BY (C_CUSTKEY); + +CREATE TABLE lineorder +( + LO_ORDERKEY UInt32, + LO_LINENUMBER UInt8, + LO_CUSTKEY UInt32, + LO_PARTKEY UInt32, + LO_SUPPKEY UInt32, + LO_ORDERDATE Date, + LO_ORDERPRIORITY LowCardinality(String), + LO_SHIPPRIORITY UInt8, + LO_QUANTITY UInt8, + LO_EXTENDEDPRICE UInt32, + LO_ORDTOTALPRICE UInt32, + LO_DISCOUNT UInt8, + LO_REVENUE UInt32, + LO_SUPPLYCOST UInt32, + LO_TAX UInt8, + LO_COMMITDATE Date, + LO_SHIPMODE LowCardinality(String) +) +ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); + +CREATE TABLE part +( + P_PARTKEY UInt32, + P_NAME String, + P_MFGR LowCardinality(String), + P_CATEGORY LowCardinality(String), + P_BRAND LowCardinality(String), + P_COLOR LowCardinality(String), + P_TYPE LowCardinality(String), + P_SIZE UInt8, + P_CONTAINER LowCardinality(String) +) +ENGINE = MergeTree ORDER BY P_PARTKEY; + +CREATE TABLE supplier +( + S_SUPPKEY UInt32, + S_NAME String, + S_ADDRESS String, + S_CITY LowCardinality(String), + S_NATION LowCardinality(String), + S_REGION LowCardinality(String), + S_PHONE String +) +ENGINE = MergeTree ORDER BY S_SUPPKEY; +``` + +データの挿入: + +``` bash +$ clickhouse-client --query "INSERT INTO customer FORMAT CSV" < customer.tbl +$ clickhouse-client --query "INSERT INTO part FORMAT CSV" < part.tbl +$ clickhouse-client --query "INSERT INTO supplier FORMAT CSV" < supplier.tbl +$ clickhouse-client --query "INSERT INTO lineorder FORMAT CSV" < lineorder.tbl +``` + +変換 “star schema” 非正規化するには “flat schema”: + +``` sql +SET max_memory_usage = 20000000000; + +CREATE TABLE lineorder_flat +ENGINE = MergeTree +PARTITION BY toYear(LO_ORDERDATE) +ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS +SELECT + l.LO_ORDERKEY AS LO_ORDERKEY, + l.LO_LINENUMBER AS LO_LINENUMBER, + l.LO_CUSTKEY AS LO_CUSTKEY, + l.LO_PARTKEY AS LO_PARTKEY, + l.LO_SUPPKEY AS LO_SUPPKEY, + l.LO_ORDERDATE AS LO_ORDERDATE, + l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, + l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, + l.LO_QUANTITY AS LO_QUANTITY, + l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, + l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, + l.LO_DISCOUNT AS LO_DISCOUNT, + l.LO_REVENUE AS LO_REVENUE, + l.LO_SUPPLYCOST AS LO_SUPPLYCOST, + l.LO_TAX AS LO_TAX, + l.LO_COMMITDATE AS LO_COMMITDATE, + l.LO_SHIPMODE AS LO_SHIPMODE, + c.C_NAME AS C_NAME, + c.C_ADDRESS AS C_ADDRESS, + c.C_CITY AS C_CITY, + c.C_NATION AS C_NATION, + c.C_REGION AS C_REGION, + c.C_PHONE AS C_PHONE, + c.C_MKTSEGMENT AS C_MKTSEGMENT, + s.S_NAME AS S_NAME, + s.S_ADDRESS AS S_ADDRESS, + s.S_CITY AS S_CITY, + s.S_NATION AS S_NATION, + s.S_REGION AS S_REGION, + s.S_PHONE AS S_PHONE, + p.P_NAME AS P_NAME, + p.P_MFGR AS P_MFGR, + p.P_CATEGORY AS P_CATEGORY, + p.P_BRAND AS P_BRAND, + p.P_COLOR AS P_COLOR, + p.P_TYPE AS P_TYPE, + p.P_SIZE AS P_SIZE, + p.P_CONTAINER AS P_CONTAINER +FROM lineorder AS l +INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY +INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY +INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; +``` + +クエリの実行: + +Q1.1 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; +``` + +Q1.2 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q1.3 + +``` sql +SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue +FROM lineorder_flat +WHERE toISOWeek(LO_ORDERDATE) = 6 AND toYear(LO_ORDERDATE) = 1994 + AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35; +``` + +Q2.1 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.2 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q2.3 + +``` sql +SELECT + sum(LO_REVENUE), + toYear(LO_ORDERDATE) AS year, + P_BRAND +FROM lineorder_flat +WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' +GROUP BY + year, + P_BRAND +ORDER BY + year, + P_BRAND; +``` + +Q3.1 + +``` sql +SELECT + C_NATION, + S_NATION, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 +GROUP BY + C_NATION, + S_NATION, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.2 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.3 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q3.4 + +``` sql +SELECT + C_CITY, + S_CITY, + toYear(LO_ORDERDATE) AS year, + sum(LO_REVENUE) AS revenue +FROM lineorder_flat +WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND toYYYYMM(LO_ORDERDATE) = 199712 +GROUP BY + C_CITY, + S_CITY, + year +ORDER BY + year ASC, + revenue DESC; +``` + +Q4.1 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + C_NATION, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + C_NATION +ORDER BY + year ASC, + C_NATION ASC; +``` + +Q4.2 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_NATION, + P_CATEGORY, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') +GROUP BY + year, + S_NATION, + P_CATEGORY +ORDER BY + year ASC, + S_NATION ASC, + P_CATEGORY ASC; +``` + +Q4.3 + +``` sql +SELECT + toYear(LO_ORDERDATE) AS year, + S_CITY, + P_BRAND, + sum(LO_REVENUE - LO_SUPPLYCOST) AS profit +FROM lineorder_flat +WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' +GROUP BY + year, + S_CITY, + P_BRAND +ORDER BY + year ASC, + S_CITY ASC, + P_BRAND ASC; +``` + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/ja/getting_started/example_datasets/wikistat.md b/docs/ja/getting_started/example_datasets/wikistat.md deleted file mode 120000 index bf6e780fb27..00000000000 --- a/docs/ja/getting_started/example_datasets/wikistat.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/getting_started/example_datasets/wikistat.md \ No newline at end of file diff --git a/docs/ja/getting_started/example_datasets/wikistat.md b/docs/ja/getting_started/example_datasets/wikistat.md new file mode 100644 index 00000000000..8b92c8cff1b --- /dev/null +++ b/docs/ja/getting_started/example_datasets/wikistat.md @@ -0,0 +1,35 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 18 +toc_title: WikiStat +--- + +# WikiStat {#wikistat} + +参照:http://dumps.wikimedia.org/other/pagecounts-raw/ + +テーブルの作成: + +``` sql +CREATE TABLE wikistat +( + date Date, + time DateTime, + project String, + subproject String, + path String, + hits UInt64, + size UInt64 +) ENGINE = MergeTree(date, (path, time), 8192); +``` + +データのロード: + +``` bash +$ for i in {2007..2016}; do for j in {01..12}; do echo $i-$j >&2; curl -sSL "http://dumps.wikimedia.org/other/pagecounts-raw/$i/$i-$j/" | grep -oE 'pagecounts-[0-9]+-[0-9]+\.gz'; done; done | sort | uniq | tee links.txt +$ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/pagecounts-raw/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1/')/$(echo $link | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz/\1-\2/')/$link; done +$ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done +``` + +[元の記事](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/ja/getting_started/index.md b/docs/ja/getting_started/index.md deleted file mode 120000 index 1acedb0f03e..00000000000 --- a/docs/ja/getting_started/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/index.md \ No newline at end of file diff --git a/docs/ja/getting_started/index.md b/docs/ja/getting_started/index.md new file mode 100644 index 00000000000..2801b8d08e5 --- /dev/null +++ b/docs/ja/getting_started/index.md @@ -0,0 +1,17 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Getting Started +toc_hidden: true +toc_priority: 8 +toc_title: "\u96A0\u3055\u308C\u305F" +--- + +# はじめに {#getting-started} + +あなたがclickhouseに慣れていないし、そのパフォーマンスのハンズオン感を取得したい場合は、まず、あなたが通過する必要があります [インストール処理](install.md). その後、あなたは: + +- [詳細なチュートリアルを通過](tutorial.md) +- [データセット例の実験](example_datasets/ontime.md) + +[元の記事](https://clickhouse.tech/docs/en/getting_started/) diff --git a/docs/ja/getting_started/install.md b/docs/ja/getting_started/install.md deleted file mode 120000 index 60aa3fb93a4..00000000000 --- a/docs/ja/getting_started/install.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/install.md \ No newline at end of file diff --git a/docs/ja/getting_started/install.md b/docs/ja/getting_started/install.md new file mode 100644 index 00000000000..c048268feca --- /dev/null +++ b/docs/ja/getting_started/install.md @@ -0,0 +1,191 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 11 +toc_title: "\u8A2D\u7F6E" +--- + +# 設置 {#installation} + +## システム要件 {#system-requirements} + +ClickHouseは、x86\_64、AArch64、またはPowerPC64LE CPUアーキテクチャを使用して、Linux、FreeBSD、またはMac OS X上で実行できます。 + +公式の事前構築されたバイナリは、通常、x86\_64用にコンパイルされ、sse4.2命令セットを利用するため、特に明記されていない限り、それをサポートす 現在のcpuがsse4.2をサポートしているかどう: + +``` bash +$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" +``` + +走clickhouseにプロセッサーをサポートしていないsse4.2てaarch64はpowerpc64le建築き [ソースからのClickHouseのビルド](#from-sources) 適切な構成の調節を使って。 + +## 使用可能な設置方法 {#available-installation-options} + +### DEBパッケージから {#install-from-deb-packages} + +公式の事前コンパイルを使用することをお勧めします `deb` DebianやUbuntuのためのパッケージ. + +イ公式パッケージの追加のyandexリポジトリに `/etc/apt/sources.list` または別の `/etc/apt/sources.list.d/clickhouse.list` ファイル: + + deb http://repo.clickhouse.tech/deb/stable/ main/ + +最新のバージョンを使用する場合は、以下を置き換えます `stable` と `testing` (これはテスト環境に推奨されます)。 + +そこにこれらのコマンド置パッケージ: + +``` bash +sudo apt-get install dirmngr # optional +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 # optional +sudo apt-get update +sudo apt-get install clickhouse-client clickhouse-server +``` + +でもダウンロードとインストールパッケージを手動で下からもアクセスできます。https://repo.yandex.ru/clickhouse/deb/stable/main/. + +#### パッケージ {#packages} + +- `clickhouse-common-static` — Installs ClickHouse compiled binary files. +- `clickhouse-server` — Creates a symbolic link for `clickhouse-server` とをインストールしデフォルトのサーバーの設定をします。 +- `clickhouse-client` — Creates a symbolic link for `clickhouse-client` そして他の顧客関係した用具。 および設置お客様の設定ファイルです。 +- `clickhouse-common-static-dbg` — Installs ClickHouse compiled binary files with debug info. + +### RPMパッケージから {#from-rpm-packages} + +公式の事前コンパイルを使用することをお勧めします `rpm` パッケージCentOS,RedHat、その他のrpmベLinuxディストリビューション. + +まず、公式リポジトリを追加する必要があります: + +``` bash +sudo yum install yum-utils +sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG +sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64 +``` + +最新のバージョンを使用する場合は、以下を置き換えます `stable` と `testing` (これはテスト環境に推奨されます)。 その `prestable` タグも時々利用可能です。 + +そこにこれらのコマンド置パッケージ: + +``` bash +sudo yum install clickhouse-server clickhouse-client +``` + +でもダウンロードとインストールパッケージを手動で下からもアクセスできます。https://repo.クリックハウス。テック/rpm/安定した/x86\_64. + +### Tgzアーカイブから {#from-tgz-archives} + +公式の事前コンパイルを使用することをお勧めします `tgz` すべてのLinuxディストリビュ `deb` または `rpm` パッケージは不可能です。 + +必要なバージョンは `curl` または `wget` リポジトリからhttps://repo.yandex.ru/clickhouse/tgz/。 +その後、アーカイブをダウンロードは開梱と設置と設置のためのイントロダクションです。 最新バージョンの例: + +``` bash +export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1` +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-server-$LATEST_VERSION.tgz +curl -O https://repo.clickhouse.tech/tgz/clickhouse-client-$LATEST_VERSION.tgz + +tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz +sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz +sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh + +tar -xzvf clickhouse-server-$LATEST_VERSION.tgz +sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh +sudo /etc/init.d/clickhouse-server start + +tar -xzvf clickhouse-client-$LATEST_VERSION.tgz +sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh +``` + +運用環境では、最新のものを使用することをお勧めします `stable`-バージョン。 その番号はGitHubページにありますhttps://github.com/ClickHouse/ClickHouse/tags 後置を使って `-stable`. + +### ドッカーの画像から {#from-docker-image} + +Dockerの中でClickHouseを実行するには、次のガイドに従います [Docker拠点](https://hub.docker.com/r/yandex/clickhouse-server/). これらの画像は、公式を使用 `deb` 中のパッケージ。 + +### ソースから {#from-sources} + +ClickHouseを手動でコンパイルするには、以下の手順に従います [Linux](../development/build.md) または [Mac OS X](../development/build_osx.md). + +できるコンパイルパッケージはインストールしていたプログラムを使用もインストールせずにパッケージ。 またビルを手動で無数の組み合わせで自分だけのsse4.2に必要構築のためのaarch64定する必要はありません。 + + Client: programs/clickhouse-client + Server: programs/clickhouse-server + +データとメタデータフォルダを作成する必要があります `chown` 目的のユーザーのためのそれら。 それらのパスは、server config(src/programs/server/config)で変更できます。xml)、デフォルトでは次のとおりです: + + /opt/clickhouse/data/default/ + /opt/clickhouse/metadata/default/ + +Gentooでは、次のものを使用できます `emerge clickhouse` ソースからClickHouseをインストールする。 + +## 起動 {#launch} + +サーバを起動デーモンとして、: + +``` bash +$ sudo service clickhouse-server start +``` + +あなたが持っていない場合 `service` コマンド、実行 + +``` bash +$ sudo /etc/init.d/clickhouse-server start +``` + +のログを参照してください `/var/log/clickhouse-server/` ディレクトリ。 + +サーバーが起動しない場合は、ファイル内の設定を確認してください `/etc/clickhouse-server/config.xml`. + +または手動で開始のサーバーからのコンソール: + +``` bash +$ clickhouse-server --config-file=/etc/clickhouse-server/config.xml +``` + +この場合、ログはコンソールに印刷され、開発中に便利です。 +設定ファイルがカレントディレクトリにある場合は、以下を指定する必要はありません。 `--config-file` パラメータ。 デフォルトでは、 `./config.xml`. + +ClickHouse対応アクセス制限を設定します。 彼らはに位置しています `users.xml` ファイル(次へ `config.xml`). +デフォルトでは、アクセスはどこからでも可能です。 `default` ユーザー、パスワードなし。 見る `user/default/networks`. +詳細については、以下を参照してください [“Configuration Files”](../operations/configuration_files.md). + +サーバーの起動後、コマンドラインクライアントを使用してサーバーに接続できます: + +``` bash +$ clickhouse-client +``` + +デフォルトでは、 `localhost:9000` ユーザーに代わって `default` パスワードなし。 また、以下を使用してリモートサーバーに接続することもできます `--host` 引数。 + +端末はutf-8エンコードを使用する必要があります。 +詳細については、以下を参照してください [“Command-line client”](../interfaces/cli.md). + +例えば: + +``` bash +$ ./clickhouse-client +ClickHouse client version 0.0.18749. +Connecting to localhost:9000. +Connected to ClickHouse server version 0.0.18749. + +:) SELECT 1 + +SELECT 1 + +┌─1─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.003 sec. + +:) +``` + +**おめでとう、システムの作品!** + +継続実験をダウンロードでき、試験データセットやじ [tutorial](https://clickhouse.tech/tutorial.html). + +[元の記事](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/ja/getting_started/playground.md b/docs/ja/getting_started/playground.md deleted file mode 120000 index de5b41f453e..00000000000 --- a/docs/ja/getting_started/playground.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/playground.md \ No newline at end of file diff --git a/docs/ja/getting_started/playground.md b/docs/ja/getting_started/playground.md new file mode 100644 index 00000000000..54bf1f1fbbb --- /dev/null +++ b/docs/ja/getting_started/playground.md @@ -0,0 +1,48 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 14 +toc_title: "\u904A\u3073\u5834" +--- + +# ClickHouseの遊び場 {#clickhouse-playground} + +[ClickHouseの遊び場](https://play.clickhouse.tech?file=welcome) サーバーやクラスターを設定せずに、クエリを即座に実行することで、ClickHouseを試すことができます。 +複数の例ではデータセットの遊び場などのサンプルのクエリを表すclickhouse特徴です。 + +クエリは読み取り専用ユーザーとして実行されます。 では一部制限: + +- DDLクエリは許可されません +- 挿入クエリは許可されません + +次の設定も適用されます: +- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) +- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) +- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) +- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) + +ClickHouseの運動場はm2の経験を与える。小さい +[管理サービスclickhouse](https://cloud.yandex.com/services/managed-clickhouse) +インスタンス [Yandexの。クラウド](https://cloud.yandex.com/). +詳細については、 [クラウドプロバイダー](../commercial/cloud.md). + +ツつィツ姪"ツつ"ツ債ツつケ [HTTP API](../interfaces/http.md). +コミュニケーションが円滑にバックエンドがありclickhouseクラスターになサーバーサイド願います。 +ClickHouse HTTPS評価項目としても利用可能ですの一部が遊べない日々が続いていました。 + +できるクエリーの遊び場をhttpお客様は、例えば [カール](https://curl.haxx.se) または [wget](https://www.gnu.org/software/wget/)、または以下を使用して接続を設定する [JDBC](../interfaces/jdbc.md) または [ODBC](../interfaces/odbc.md) ドライバー +情報ソフトウェア製品を支えるclickhouse可能 [ここに](../interfaces/index.md). + +| パラメータ | 値 | +|:-----------|:----------------------------------------------| +| エンドポイ | https://play-api。クリックハウス。テック:8443 | +| ユーザ | `playground` | +| パスワード | `clickhouse` | + +このエンドポイントには安全な接続が必要です。 + +例えば: + +``` bash +curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" +``` diff --git a/docs/ja/getting_started/tutorial.md b/docs/ja/getting_started/tutorial.md deleted file mode 120000 index 8bc40816ab2..00000000000 --- a/docs/ja/getting_started/tutorial.md +++ /dev/null @@ -1 +0,0 @@ -../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ja/getting_started/tutorial.md b/docs/ja/getting_started/tutorial.md new file mode 100644 index 00000000000..6c3ec545a46 --- /dev/null +++ b/docs/ja/getting_started/tutorial.md @@ -0,0 +1,671 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 12 +toc_title: Tutorial +--- + +# ClickHouseチュートリアル {#clickhouse-tutorial} + +## このチュートリアルから何を期待する? {#what-to-expect-from-this-tutorial} + +このチュートリアルでは、クリックハウスクラスタを設定する方法について説明します。 それは小さい、しかし耐障害性および拡張可能である。 次に、例のデータセットのいずれかを使用してデータを入力し、いくつかのデモクエリを実行します。 + +## 単一ノード設定 {#single-node-setup} + +分散環境の複雑さを延期するには、まず、単一のサーバーまたは仮想マシンにclickhouseを展開します。 clickhouseは通常からインストール [deb](index.md#install-from-deb-packages) または [rpm](index.md#from-rpm-packages) パッケージがあります [代替案](index.md#from-docker-image) の営業システムな支援します。 + +たとえば、次の項目を選択しました `deb` パッケージと実行: + +``` bash +sudo apt-get install dirmngr +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 + +echo "deb http://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client +``` + +インストールされたパッケージには何がありますか: + +- `clickhouse-client` パッケージ内容: [クリックハウス-顧客](../interfaces/cli.md) ケイClickHouseコンソールです。 +- `clickhouse-common` パッケージが含まれてClickHouse実行可能ファイルです。 +- `clickhouse-server` パッケージを含む設定ファイルを実行ClickHouseしています。 + +サーバ設定ファイルを置 `/etc/clickhouse-server/`. さらに進む前に、 `` 要素の `config.xml`. パスはデータストレージの場所を決定するので、ディスク容量の大きいボリュームに配置する必要があります。 `/var/lib/clickhouse/`. 設定を調整したい場合は、直接編集するのは便利ではありません `config.xml` ファイルは、将来のパッケージ更新で書き直される可能性があります。 のオーバーライドは、config要素の作成 [config内のファイル。dディレクトリ](../operations/configuration_files.md) として役立つ “patches” 設定する。xmlだ + +あなたが気づいたように, `clickhouse-server` パッケージのイ 更新後も自動的に再起動されることはありません。 サーバーを起動する方法は、initシステムによって異なります。: + +``` bash +sudo service clickhouse-server start +``` + +または + +``` bash +sudo /etc/init.d/clickhouse-server start +``` + +サーバーログのデフォルトの場所は、 `/var/log/clickhouse-server/`. サーバーはクライアント接続を処理する準備ができています。 `Ready for connections` メッセージ + +一度 `clickhouse-server` 稼働している、我々は使用することができます `clickhouse-client` サーバーに接続し、次のようなテストクエリを実行するには `SELECT "Hello, world!";`. + +
    + +索のヒントclickhouse-クライアント +対話モード: + +``` bash +clickhouse-client +clickhouse-client --host=... --port=... --user=... --password=... +``` + +複数行のクエリを有効にする: + +``` bash +clickhouse-client -m +clickhouse-client --multiline +``` + +バッチモードでのクエリの実行: + +``` bash +clickhouse-client --query='SELECT 1' +echo 'SELECT 1' | clickhouse-client +clickhouse-client <<< 'SELECT 1' +``` + +指定した形式のファイルからデータを挿入する: + +``` bash +clickhouse-client --query='INSERT INTO table VALUES' < data.txt +clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv +``` + +
    + +## インポートサンプル {#import-sample-dataset} + +今回は入社clickhouseサーバーサンプルデータです。 このチュートリアルでは、yandexの匿名化されたデータを使用します。metricaは、オープンソースになる前にclickhouseを運用方法で実行する最初のサービスです(詳細は [履歴セクション](../introduction/history.md)). あります [Yandexをインポートする複数の方法。Metricaデータセット](example_datasets/metrica.md)、そしてチュートリアルのために、我々は最も現実的なもので行くよ。 + +### 表データのダウンロードと抽出 {#download-and-extract-table-data} + +``` bash +curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv +curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv +``` + +抽出されたファイルのサイズは約10gbです。 + +### テーブルの作成 {#create-tables} + +ほとんどのデータベース管理システムclickhouse論理的にグテーブル “databases”. そこには `default` データベースが、我々は名前の新しいものを作成します `tutorial`: + +``` bash +clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" +``` + +テーブルを作成するための構文は、データベースに比べて複雑です [参照](../sql_reference/statements/create.md). 一般的に `CREATE TABLE` 声明を設定するつもの: + +1. 作成するテーブルの名前。 +2. Table schema, i.e. list of columns and their [データ型](../sql_reference/data_types/index.md). +3. [表エンジン](../engines/table_engines/index.md) これは、このテーブルへのクエリが物理的にどのように実行されるかに関するすべての詳細を決定します。 + +Yandexの。Metricaはweb分析サービスであり、サンプルデータセットはその完全な機能をカバーしていません。: + +- `hits` とができるテーブルの各行動によるすべてのユーザーはすべてのwebサイトのサービスです。 +- `visits` はテーブルを含む組み立て済みセッションの代わりに個別に行動します。 + +これらのテーブルの実際のcreate tableクエリを見て、実行しましょう: + +``` sql +CREATE TABLE tutorial.hits_v1 +( + `WatchID` UInt64, + `JavaEnable` UInt8, + `Title` String, + `GoodEvent` Int16, + `EventTime` DateTime, + `EventDate` Date, + `CounterID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RegionID` UInt32, + `UserID` UInt64, + `CounterClass` Int8, + `OS` UInt8, + `UserAgent` UInt8, + `URL` String, + `Referer` String, + `URLDomain` String, + `RefererDomain` String, + `Refresh` UInt8, + `IsRobot` UInt8, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `FlashMinor2` String, + `NetMajor` UInt8, + `NetMinor` UInt8, + `UserAgentMajor` UInt16, + `UserAgentMinor` FixedString(2), + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `MobilePhone` UInt8, + `MobilePhoneModel` String, + `Params` String, + `IPNetworkID` UInt32, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `IsArtifical` UInt8, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `ClientTimeZone` Int16, + `ClientEventTime` DateTime, + `SilverlightVersion1` UInt8, + `SilverlightVersion2` UInt8, + `SilverlightVersion3` UInt32, + `SilverlightVersion4` UInt16, + `PageCharset` String, + `CodeVersion` UInt32, + `IsLink` UInt8, + `IsDownload` UInt8, + `IsNotBounce` UInt8, + `FUniqID` UInt64, + `HID` UInt32, + `IsOldCounter` UInt8, + `IsEvent` UInt8, + `IsParameter` UInt8, + `DontCountHits` UInt8, + `WithHash` UInt8, + `HitColor` FixedString(1), + `UTCEventTime` DateTime, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `WindowName` Int32, + `OpenerName` Int32, + `HistoryLength` Int16, + `BrowserLanguage` FixedString(2), + `BrowserCountry` FixedString(2), + `SocialNetwork` String, + `SocialAction` String, + `HTTPError` UInt16, + `SendTiming` Int32, + `DNSTiming` Int32, + `ConnectTiming` Int32, + `ResponseStartTiming` Int32, + `ResponseEndTiming` Int32, + `FetchTiming` Int32, + `RedirectTiming` Int32, + `DOMInteractiveTiming` Int32, + `DOMContentLoadedTiming` Int32, + `DOMCompleteTiming` Int32, + `LoadEventStartTiming` Int32, + `LoadEventEndTiming` Int32, + `NSToDOMContentLoadedTiming` Int32, + `FirstPaintTiming` Int32, + `RedirectCount` Int8, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `ParamPrice` Int64, + `ParamOrderID` String, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `GoalsReached` Array(UInt32), + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `RefererHash` UInt64, + `URLHash` UInt64, + `CLID` UInt32, + `YCLID` UInt64, + `ShareService` String, + `ShareURL` String, + `ShareTitle` String, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `IslandID` FixedString(16), + `RequestNum` UInt32, + `RequestTry` UInt8 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +``` sql +CREATE TABLE tutorial.visits_v1 +( + `CounterID` UInt32, + `StartDate` Date, + `Sign` Int8, + `IsNew` UInt8, + `VisitID` UInt64, + `UserID` UInt64, + `StartTime` DateTime, + `Duration` UInt32, + `UTCStartTime` DateTime, + `PageViews` Int32, + `Hits` Int32, + `IsBounce` UInt8, + `Referer` String, + `StartURL` String, + `RefererDomain` String, + `StartURLDomain` String, + `EndURL` String, + `LinkURL` String, + `IsDownload` UInt8, + `TraficSourceID` Int8, + `SearchEngineID` UInt16, + `SearchPhrase` String, + `AdvEngineID` UInt8, + `PlaceID` Int32, + `RefererCategories` Array(UInt16), + `URLCategories` Array(UInt16), + `URLRegions` Array(UInt32), + `RefererRegions` Array(UInt32), + `IsYandex` UInt8, + `GoalReachesDepth` Int32, + `GoalReachesURL` Int32, + `GoalReachesAny` Int32, + `SocialSourceNetworkID` UInt8, + `SocialSourcePage` String, + `MobilePhoneModel` String, + `ClientEventTime` DateTime, + `RegionID` UInt32, + `ClientIP` UInt32, + `ClientIP6` FixedString(16), + `RemoteIP` UInt32, + `RemoteIP6` FixedString(16), + `IPNetworkID` UInt32, + `SilverlightVersion3` UInt32, + `CodeVersion` UInt32, + `ResolutionWidth` UInt16, + `ResolutionHeight` UInt16, + `UserAgentMajor` UInt16, + `UserAgentMinor` UInt16, + `WindowClientWidth` UInt16, + `WindowClientHeight` UInt16, + `SilverlightVersion2` UInt8, + `SilverlightVersion4` UInt16, + `FlashVersion3` UInt16, + `FlashVersion4` UInt16, + `ClientTimeZone` Int16, + `OS` UInt8, + `UserAgent` UInt8, + `ResolutionDepth` UInt8, + `FlashMajor` UInt8, + `FlashMinor` UInt8, + `NetMajor` UInt8, + `NetMinor` UInt8, + `MobilePhone` UInt8, + `SilverlightVersion1` UInt8, + `Age` UInt8, + `Sex` UInt8, + `Income` UInt8, + `JavaEnable` UInt8, + `CookieEnable` UInt8, + `JavascriptEnable` UInt8, + `IsMobile` UInt8, + `BrowserLanguage` UInt16, + `BrowserCountry` UInt16, + `Interests` UInt16, + `Robotness` UInt8, + `GeneralInterests` Array(UInt16), + `Params` Array(String), + `Goals` Nested( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32), + `WatchIDs` Array(UInt64), + `ParamSumPrice` Int64, + `ParamCurrency` FixedString(3), + `ParamCurrencyID` UInt16, + `ClickLogID` UInt64, + `ClickEventID` Int32, + `ClickGoodEvent` Int32, + `ClickEventTime` DateTime, + `ClickPriorityID` Int32, + `ClickPhraseID` Int32, + `ClickPageID` Int32, + `ClickPlaceID` Int32, + `ClickTypeID` Int32, + `ClickResourceID` Int32, + `ClickCost` UInt32, + `ClickClientIP` UInt32, + `ClickDomainID` UInt32, + `ClickURL` String, + `ClickAttempt` UInt8, + `ClickOrderID` UInt32, + `ClickBannerID` UInt32, + `ClickMarketCategoryID` UInt32, + `ClickMarketPP` UInt32, + `ClickMarketCategoryName` String, + `ClickMarketPPName` String, + `ClickAWAPSCampaignName` String, + `ClickPageName` String, + `ClickTargetType` UInt16, + `ClickTargetPhraseID` UInt64, + `ClickContextType` UInt8, + `ClickSelectType` Int8, + `ClickOptions` String, + `ClickGroupBannerID` Int32, + `OpenstatServiceName` String, + `OpenstatCampaignID` String, + `OpenstatAdID` String, + `OpenstatSourceID` String, + `UTMSource` String, + `UTMMedium` String, + `UTMCampaign` String, + `UTMContent` String, + `UTMTerm` String, + `FromTag` String, + `HasGCLID` UInt8, + `FirstVisit` DateTime, + `PredLastVisit` Date, + `LastVisit` Date, + `TotalVisits` UInt32, + `TraficSource` Nested( + ID Int8, + SearchEngineID UInt16, + AdvEngineID UInt8, + PlaceID UInt16, + SocialSourceNetworkID UInt8, + Domain String, + SearchPhrase String, + SocialSourcePage String), + `Attendance` FixedString(16), + `CLID` UInt32, + `YCLID` UInt64, + `NormalizedRefererHash` UInt64, + `SearchPhraseHash` UInt64, + `RefererDomainHash` UInt64, + `NormalizedStartURLHash` UInt64, + `StartURLDomainHash` UInt64, + `NormalizedEndURLHash` UInt64, + `TopLevelDomain` UInt64, + `URLScheme` UInt64, + `OpenstatServiceNameHash` UInt64, + `OpenstatCampaignIDHash` UInt64, + `OpenstatAdIDHash` UInt64, + `OpenstatSourceIDHash` UInt64, + `UTMSourceHash` UInt64, + `UTMMediumHash` UInt64, + `UTMCampaignHash` UInt64, + `UTMContentHash` UInt64, + `UTMTermHash` UInt64, + `FromHash` UInt64, + `WebVisorEnabled` UInt8, + `WebVisorActivity` UInt32, + `ParsedParams` Nested( + Key1 String, + Key2 String, + Key3 String, + Key4 String, + Key5 String, + ValueDouble Float64), + `Market` Nested( + Type UInt8, + GoalID UInt32, + OrderID String, + OrderPrice Int64, + PP UInt32, + DirectPlaceID UInt32, + DirectOrderID UInt32, + DirectBannerID UInt32, + GoodID String, + GoodName String, + GoodQuantity Int32, + GoodPrice Int64), + `IslandID` FixedString(16) +) +ENGINE = CollapsingMergeTree(Sign) +PARTITION BY toYYYYMM(StartDate) +ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +``` + +ドできるようになりました。方のクエリのインタラクティブモードの `clickhouse-client` (事前にクエリを指定せずに端末で起動するだけです)またはいくつか試してみてください [代わりとなるインターフェース](../interfaces/index.md) あなたが望むなら。 + +ご覧の通り, `hits_v1` 使用します [基本的なMergeTreeエンジン](../engines/table_engines/mergetree_family/mergetree.md)、ながら `visits_v1` 使用します [折りたたみ](../engines/table_engines/mergetree_family/collapsingmergetree.md) バリアント。 + +### デー {#import-data} + +ClickHouseへのデータのインポートは、 [INSERT INTO](../sql_reference/statements/insert_into.md) 他の多くのSQLデータベースのような照会。 ただし、データは通常、次のいずれかで提供されます。 [対応するシリアル化形式](../interfaces/formats.md) 代わりに `VALUES` 句(これもサポートされています)。 + +以前にダウンロードしたファ: + +``` bash +clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv +clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv +``` + +ClickHouseには多くのものがあります [調整する設定](../operations/settings/index.md) そして、コンソールクライアントでそれらを指定する一つの方法は、引数を使用することです `--max_insert_block_size`. どのような設定が利用可能であるか、それらが何を意味するのか、そしてデフォルトが何であるかを理解する最も簡単な方法は、 `system.settings` テーブル: + +``` sql +SELECT name, value, changed, description +FROM system.settings +WHERE name LIKE '%max_insert_b%' +FORMAT TSV + +max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." +``` + +必要に応じ [OPTIMIZE](../query_language/misc/#misc_operations-optimize) インポート後のテーブル。 MergeTree-familyのエンジンで構成されたテーブルは、データストレージを最適化するために、常にバックグラウンドでデータ部分のマージを行います(または少なくとも これらのクエリのテーブルエンジンな保管の最適化現在の代わりについては後日、: + +``` bash +clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" +clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" +``` + +したがって、テーブルが一貫して新しいデータを受け取る場合は、そのままにして、マージをバックグラウンドで実行する方がよいでしょう。 + +テーブルインポートが成功したかどうかを確認できます: + +``` bash +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" +clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" +``` + +## クエリ例 {#example-queries} + +``` sql +SELECT + StartURL AS URL, + AVG(Duration) AS AvgDuration +FROM tutorial.visits_v1 +WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' +GROUP BY URL +ORDER BY AvgDuration DESC +LIMIT 10 +``` + +``` sql +SELECT + sum(Sign) AS visits, + sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, + (100. * goal_visits) / visits AS goal_percent +FROM tutorial.visits_v1 +WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') +``` + +## クラスターの展開 {#cluster-deployment} + +ClickHouseの集りは同種の集りである。 セットアップの手順: + +1. イclickhouseサーバーのすべての機械のクラスター +2. 構成ファイルでのクラスタ構成のセットアップ +3. 各インスタ +4. 作成する [分散テーブル](../engines/table_engines/special/distributed.md) + +[分散テーブル](../engines/table_engines/special/distributed.md) 実際には “view” 地元のテーブルのClickHouse。 SELECTクエリから分散型のテーブル実行が持つリソースを活用したすべてのクラスターの破片. を指定しますconfigs複数のクラスターを作成した複数のテーブルのビューを提供する別のクラスター + +ツつィツ姪"ツつ"ツ債ツづュツつケツづ債つアツつソツづァ: + +``` xml + + + + + example-perftest01j.yandex.ru + 9000 + + + + + example-perftest02j.yandex.ru + 9000 + + + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +さらなる実証しましょう新しい地域のテーブルと同じ `CREATE TABLE` 私たちが使用したクエリ `hits_v1`、しかし、異なるテーブル名: + +``` sql +CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... +``` + +クラスターのローカルテーブルにビューを提供する分散テーブルの作成: + +``` sql +CREATE TABLE tutorial.hits_all AS tutorial.hits_local +ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); +``` + +一般的な方法は、クラスターのすべてのマシンで同様の分散テーブルを作成することです。 クラスターの任意のマシンで分散クエリを実行できます。 また、特定のselectクエリを使用して一時分散テーブルを作成する代替オプションもあります [リモート](../sql_reference/table_functions/remote.md) テーブル機能。 + +逃げよう [INSERT SELECT](../sql_reference/statements/insert_into.md) 分散テーブルに分散テーブルを複数のサーバーに分散させます。 + +``` sql +INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; +``` + +!!! warning "気づく" + このアプローチは適しませんのshardingの大きます。 別のツールがあります [クリックハウスコピー機](../operations/utilities/clickhouse-copier.md) できるre-ザ-シャーを任意の大きます。 + +予想されるように、計算量の多いクエリは、3つのサーバーを使用する場合にn倍高速に実行されます。 + +この場合、3つのシャードを持つクラスターを使用し、それぞれに単一のレプリカが含まれています。 + +運用環境で復元性を提供するには、各シャードに、複数のアベイラビリティーゾーンまたはデータセンター(または少なくともラック)の間に2~3個のレプリカ clickhouseでは、レプリカの数に制限はありません。 + +レプリカを含むシャードのクラスタの設定例: + +``` xml + + ... + + + + example-perftest01j.yandex.ru + 9000 + + + example-perftest02j.yandex.ru + 9000 + + + example-perftest03j.yandex.ru + 9000 + + + + +``` + +ネイティブ複製を有効にする [ZooKeeper](http://zookeeper.apache.org/) は必須です。 ClickHouseは、すべてのレプリカでデータの整合性を管理し、障害後に自動的に復元手順を実行します。 ZooKeeperクラスターを別々のサーバーに展開することをお勧めします(ClickHouseを含む他のプロセスは実行されていません)。 + +!!! note "メモ" + いくつかの簡単なケースでは、アプリケーションコードからすべてのレプリカにデータを書き込むことでデータを複製できます。 このアプローチは **ない** 推奨、この場合、ClickHouseはすべてのレプリカでデータの整合性を保証することはできません。 従ってそれはあなたの適用の責任になります。 + +ZooKeeperの場所は設定ファイルで指定します: + +``` xml + + + zoo01.yandex.ru + 2181 + + + zoo02.yandex.ru + 2181 + + + zoo03.yandex.ru + 2181 + + +``` + +また、テーブル作成時に使用される各シャードとレプリカを識別するマクロを設定する必要があります: + +``` xml + + 01 + 01 + +``` + +がない場合にレプリカの瞬間に複製表を作成し、新しい最初のレプリカスのインスタンスが作成. がある場合でライブレプリカを新たなレプリカのクローンからデータを設定しています。 最初にすべての複製テーブルを作成し、それにデータを挿入するオプションがあります。 別のオプションを作れるレプリカを追加しその他の長期データを挿入出来ます。 + +``` sql +CREATE TABLE tutorial.hits_replica (...) +ENGINE = ReplcatedMergeTree( + '/clickhouse_perftest/tables/{shard}/hits', + '{replica}' +) +... +``` + +ここでは、 [レプリケートされたmergetree](../engines/table_engines/mergetree_family/replication.md) テーブルエンジン。 パラメータを指定飼育係のパスを含むザ-シャープ識別子のことです。 + +``` sql +INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; +``` + +複製はマルチマスターモードで動作します。 データは任意のレプリカにロードすることができ、システムは他のインスタンスと自動的に同期します。 複製は非同期で一定の瞬間にも、すべてのレプリカを含む場合があり、最近に挿入されます。 少なくとも一つのレプリカのようにするためのデータで測定す その同期データの修理整合性が活躍できます。 このアプローチでは、最近挿入されたデータの損失の可能性が低いことができます。 + +[元の記事](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/ja/guides/apply_catboost_model.md b/docs/ja/guides/apply_catboost_model.md deleted file mode 120000 index dd36e885974..00000000000 --- a/docs/ja/guides/apply_catboost_model.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/apply_catboost_model.md \ No newline at end of file diff --git a/docs/ja/guides/apply_catboost_model.md b/docs/ja/guides/apply_catboost_model.md new file mode 100644 index 00000000000..5f79e206ca9 --- /dev/null +++ b/docs/ja/guides/apply_catboost_model.md @@ -0,0 +1,239 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "CatBoost\u30E2\u30C7\u30EB\u306E\u9069\u7528" +--- + +# ClickHouseでのCatboostモデルの適用 {#applying-catboost-model-in-clickhouse} + +[CatBoost](https://catboost.ai) では、このオープンソースの勾配向上の図書館が開発した [Yandex](https://yandex.com/company/) 機械学習のために。 + +この命令では、sqlからモデル推論を実行して、事前に学習したモデルをclickhouseに適用する方法を学習します。 + +ClickHouseでCatBoostモデルを適用するには: + +1. [テーブルの作成](#create-table). +2. [データをテーブルに挿入する](#insert-data-to-table). +3. [ClickhouseにCatBoostを統合](#integrate-catboost-into-clickhouse) (任意ステップ)。 +4. [SQLからモデル推論を実行する](#run-model-inference). + +CatBoostモデルのトレーニングの詳細については、 [訓練用モデル](https://catboost.ai/docs/features/training.html#training). + +## 前提条件 {#prerequisites} + +あなたが持っていない場合 [Docker](https://docs.docker.com/install/) しかし、インストールしてください。 + +!!! note "メモ" + [Docker](https://www.docker.com) であるソフトウェアプラットフォームを作成することができる容器を隔離するCatBoostとClickHouse設置からシステム。 + +CatBoostモデルを適用する前に: + +**1.** を引く [Dockerイメージ](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) レジストリから: + +``` bash +$ docker pull yandex/tutorial-catboost-clickhouse +``` + +このdocker画像を含むものを実行する必要がありますcatboostとclickhouse:コードでは、ランタイム時において、図書館、環境変数の設定ファイルです。 + +**2.** Dockerイメージが正常にプルされたことを確認します: + +``` bash +$ docker image ls +REPOSITORY TAG IMAGE ID CREATED SIZE +yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB +``` + +**3.** この画像に基づいてDockerコンテナを起動します: + +``` bash +$ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse +``` + +## 1. テーブルの作成 {#create-table} + +トレーニングサンプルのclickhouseテーブルを作成するには: + +**1.** 開始ClickHouseコンソールがクライアントのインタラクティブモード: + +``` bash +$ clickhouse client +``` + +!!! note "メモ" + ClickHouseサーバーはすでにDockerコンテナ内で実行されています。 + +**2.** テーブルを作成しのコマンドを使用して: + +``` sql +:) CREATE TABLE amazon_train +( + date Date MATERIALIZED today(), + ACTION UInt8, + RESOURCE UInt32, + MGR_ID UInt32, + ROLE_ROLLUP_1 UInt32, + ROLE_ROLLUP_2 UInt32, + ROLE_DEPTNAME UInt32, + ROLE_TITLE UInt32, + ROLE_FAMILY_DESC UInt32, + ROLE_FAMILY UInt32, + ROLE_CODE UInt32 +) +ENGINE = MergeTree ORDER BY date +``` + +**3.** ClickHouseコンソールクライアン: + +``` sql +:) exit +``` + +## 2. データをテーブルに挿入する {#insert-data-to-table} + +データを挿入するには: + +**1.** 次のコマンドを実行します: + +``` bash +$ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv +``` + +**2.** 開始ClickHouseコンソールがクライアントのインタラクティブモード: + +``` bash +$ clickhouse client +``` + +**3.** データがアップロードされている: + +``` sql +:) SELECT count() FROM amazon_train + +SELECT count() +FROM amazon_train + ++-count()-+ +| 65538 | ++-------+ +``` + +## 3. ClickhouseにCatBoostを統合 {#integrate-catboost-into-clickhouse} + +!!! note "メモ" + **省略可能なステップ。** のDocker画像を含むものを実行する必要がありますCatBoostとClickHouse. + +CatBoostをClickHouseに統合するには: + +**1.** 評価ライブラリを構築します。 + +CatBoostモデルを評価する最速の方法はcompileです `libcatboostmodel.` ライブラリ。 に関する詳細については、図書館を参照 [CatBoost書](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). + +**2.** 新しいディレクトリを任意の場所に、任意の名前で作成します。, `data` 作成したライブラリをその中に入れます。 Dockerイメージにはすでにライブ `data/libcatboostmodel.so`. + +**3.** Configモデルの新しいディレクトリを任意の場所に、任意の名前で作成します。, `models`. + +**4.** 任意の名前のモデル構成ファイルを作成します。, `models/amazon_model.xml`. + +**5.** モデル構成の説明: + +``` xml + + + + catboost + + amazon + + /home/catboost/tutorial/catboost_model.bin + + 0 + + +``` + +**6.** CatBoostへのパスとモデル設定をClickHouse設定に追加します: + +``` xml + +/home/catboost/data/libcatboostmodel.so +/home/catboost/models/*_model.xml +``` + +## 4. SQLからモデル推論を実行する {#run-model-inference} + +試験モデルのclickhouseト `$ clickhouse client`. + +モデルが動作していることを確認しましょう: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) > 0 AS prediction, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "メモ" + 機能 [モデル値](../sql_reference/functions/other_functions.md#function-modelevaluate) マルチクラスモデルのクラスごとの生の予測を持つタプルを返します。 + +確率を予測してみましょう: + +``` sql +:) SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1 + exp(-prediction)) AS probability, + ACTION AS target +FROM amazon_train +LIMIT 10 +``` + +!!! note "メモ" + 詳細について [exp()](../sql_reference/functions/math_functions.md) 機能。 + +サンプルのloglossを計算してみましょう: + +``` sql +:) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss +FROM +( + SELECT + modelEvaluate('amazon', + RESOURCE, + MGR_ID, + ROLE_ROLLUP_1, + ROLE_ROLLUP_2, + ROLE_DEPTNAME, + ROLE_TITLE, + ROLE_FAMILY_DESC, + ROLE_FAMILY, + ROLE_CODE) AS prediction, + 1. / (1. + exp(-prediction)) AS prob, + ACTION AS tg + FROM amazon_train +) +``` + +!!! note "メモ" + 詳細について [平均()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) と [ログ()](../sql_reference/functions/math_functions.md) 機能。 + +[元の記事](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/ja/guides/index.md b/docs/ja/guides/index.md deleted file mode 120000 index 162dcbc3b8f..00000000000 --- a/docs/ja/guides/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/guides/index.md \ No newline at end of file diff --git a/docs/ja/guides/index.md b/docs/ja/guides/index.md new file mode 100644 index 00000000000..fcfc7ecd9d0 --- /dev/null +++ b/docs/ja/guides/index.md @@ -0,0 +1,16 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Guides +toc_priority: 38 +toc_title: "\u6982\u8981" +--- + +# クリックハウス {#clickhouse-guides} + +ClickHouseを使用してさまざまなタスクを解決するのに役立つ詳細な手順のリスト: + +- [ストリートビューを簡単にクラスタの設定](../getting_started/tutorial.md) +- [ClickHouseでのCatBoostモデルの適用](apply_catboost_model.md) + +[元の記事](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/ja/interfaces/cli.md b/docs/ja/interfaces/cli.md deleted file mode 120000 index 04588066828..00000000000 --- a/docs/ja/interfaces/cli.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/cli.md \ No newline at end of file diff --git a/docs/ja/interfaces/cli.md b/docs/ja/interfaces/cli.md new file mode 100644 index 00000000000..b8dfef00634 --- /dev/null +++ b/docs/ja/interfaces/cli.md @@ -0,0 +1,149 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 17 +toc_title: "\u30B3\u30DE\u30F3\u30C9\u30E9\u30A4\u30F3" +--- + +# コマンドライン {#command-line-client} + +ClickHouseスネイティブコマンドラインのクライアント: `clickhouse-client`. クライアン 詳細については、 [設定](#interfaces_cli_configuration). + +[設置](../getting_started/index.md) それから `clickhouse-client` パッケージとコマンドでそれを実行 `clickhouse-client`. + +``` bash +$ clickhouse-client +ClickHouse client version 19.17.1.1579 (official build). +Connecting to localhost:9000 as user default. +Connected to ClickHouse server version 19.17.1 revision 54428. + +:) +``` + +異なるクライアントとサーババージョンと互換性が、一部機能が利用できない古いクライアント. サーバーアプリと同じバージョンのクライアン 古いバージョンのクライアントを使用しようとすると、サーバー, `clickhouse-client` メッセージを表示する: + + ClickHouse client version is older than ClickHouse server. It may lack support for new features. + +## 使い方 {#cli_usage} + +このクラ バッチモードを使用するには、 ‘query’ パラメータ、または送信データに ‘stdin’ (それはそれを確認します ‘stdin’ は端末ではありません)、またはその両方。 HTTPインターフェイスと同様に、 ‘query’ パラメータとデータの送信先 ‘stdin’ リクエストはリクエストの連結である。 ‘query’ パラメータ、ラインフィード、および ‘stdin’. これは、大規模な挿入クエリに便利です。 + +クライアントを使用してデータを挿入する例: + +``` bash +$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; + +$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +3, 'some text', '2016-08-14 00:00:00' +4, 'some more text', '2016-08-14 00:00:01' +_EOF + +$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV"; +``` + +バッチモードでは、デフォルトのデータ形式はtabseparatedです。 形式は、クエリのformat句で設定できます。 + +既定では、単一のクエリのみをバッチモードで処理できます。 aから複数のクエリを作成するには “script,” を使用 `--multiquery` パラメータ。 これはINSERT以外のすべてのクエリで機能します。 クエリ結果は、追加の区切り文字なしで連続して出力されます。 同様に、多数のクエリを処理するには、以下を実行します ‘clickhouse-client’ 各クエリについて。 それは起動に数十ミリ秒かかることに注意してください ‘clickhouse-client’ プログラム。 + +対話モードでは、クエリを入力できるコマンドラインが表示されます。 + +もし ‘multiline’ クエリを実行するには、Enterキーを押します。 クエリの最後にセミコロンは必要ありません。 複数行のクエリを入力するには、円記号を入力します `\` ラインフィードの前に。 Enterキーを押すと、クエリの次の行を入力するように求められます。 + +複数行が指定されている場合:クエリを実行するには、クエリをセミコロンで終了し、enterキーを押します。 入力された行の最後にセミコロンが省略された場合は、クエリの次の行を入力するように求められます。 + +単一のクエリのみが実行されるので、セミコロンの後のすべてが無視されます。 + +指定できます `\G` 代わりに、またはセミコロンの後に。 これは縦書式を示します。 この形式では、各値は別々の行に印刷されます。 この珍しい機能は、MySQL CLIとの互換性のために追加されました。 + +コマンドラインは以下に基づきます ‘replxx’ (に似て ‘readline’). つまり、使い慣れたキーボードショートカットを使用し、履歴を保持します。 歴史はに書かれています `~/.clickhouse-client-history`. + +デフォルトでは、使用される形式はprettycompactです。 クエリのformat句で形式を変更するか、次のように指定できます `\G` クエリの最後に、次のコマンドを使用します `--format` または `--vertical` コマンドラインの引数、またはクライアント構成ファイルの使用。 + +クライアントを終了するには、ctrl+d(またはctrl+c)を押すか、クエリの代わりに次のいずれかを入力します: “exit”, “quit”, “logout”, “exit;”, “quit;”, “logout;”, “q”, “Q”, “:q” + +が処理クエリー、クライアントを示し: + +1. (デフォルトでは)毎秒せいぜい10回updatedされている進捗、。 クイッククエリの場合、進行状況が表示される時間がないことがあります。 +2. デバッグのための、解析後の書式付きクエリ。 +3. 指定された形式の結果。 +4. 結果の行数、経過時間、およびクエリ処理の平均速度。 + +ただし、サーバーが要求を中止するのを少し待つ必要があります。 特定の段階でクエリをキャンセルすることはできません。 待たずにctrl+cをもう一度押すと、クライアントは終了します。 + +コマン 詳細については、以下を参照してください “External data for query processing”. + +### クエリパラメータ {#cli-queries-with-parameters} + +を作成でき、クエリパラメータおよびパスの値からのお知らクライアントアプリケーション. これを避けるフォーマットのクエリが特定の動的価値観にクライアント側で行われます。 例えば: + +``` bash +$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}" +``` + +#### クエリの構文 {#cli-queries-with-parameters-syntax} + +通常どおりにクエリを書式設定し、次の形式でアプリパラメーターからクエリに渡す値を中かっこで囲みます: + +``` sql +{:} +``` + +- `name` — Placeholder identifier. In the console client it should be used in app parameters as `--param_ = value`. +- `data type` — [データ型](../sql_reference/data_types/index.md) アプリのパラメータ値の。 たとえば、次のようなデータ構造 `(integer, ('string', integer))` を持つことができ `Tuple(UInt8, Tuple(String, UInt8))` デー [整数](../sql_reference/data_types/int_uint.md) タイプ)。 + +#### 例えば {#example} + +``` bash +$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}" +``` + +## 設定 {#interfaces_cli_configuration} + +パラメータを渡すには `clickhouse-client` (すべてのパラメータの既定値がある): + +- コマンドラインから + + コマンドラインオプションは、構成ファイルの既定値と設定を上書きします。 + +- 構成ファイル。 + + 構成ファイルの設定は、デフォルト値を上書きします。 + +### コマンドラインオプ {#command-line-options} + +- `--host, -h` -– The server name, ‘localhost’ デフォルトでは。 名前またはIPv4アドレスまたはIPv6アドレスを使用できます。 +- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports. +- `--user, -u` – The username. Default value: default. +- `--password` – The password. Default value: empty string. +- `--query, -q` – The query to process when using non-interactive mode. +- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ デフォルトでは)。 +- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter). +- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons. +- `--format, -f` – Use the specified default format to output the result. +- `--vertical, -E` – If specified, use the Vertical format by default to output the result. This is the same as ‘–format=Vertical’. この形式では、各値は別の行に印刷されます。 +- `--time, -t` – If specified, print the query execution time to ‘stderr’ 非対話モードでは。 +- `--stacktrace` – If specified, also print the stack trace if an exception occurs. +- `--config-file` – The name of the configuration file. +- `--secure` – If specified, will connect to server over secure connection. +- `--param_` — Value for a [クエリパラメータ](#cli-queries-with-parameters). + +### 設定ファイル {#configuration_files} + +`clickhouse-client` 次の最初の既存のファイルを使用します: + +- で定義される `--config-file` パラメータ。 +- `./clickhouse-client.xml` +- `~/.clickhouse-client/config.xml` +- `/etc/clickhouse-client/config.xml` + +設定ファイルの例: + +``` xml + + username + password + False + +``` + +[元の記事](https://clickhouse.tech/docs/en/interfaces/cli/) diff --git a/docs/ja/interfaces/cpp.md b/docs/ja/interfaces/cpp.md deleted file mode 120000 index 581e50e774d..00000000000 --- a/docs/ja/interfaces/cpp.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/cpp.md \ No newline at end of file diff --git a/docs/ja/interfaces/cpp.md b/docs/ja/interfaces/cpp.md new file mode 100644 index 00000000000..fec0fb72690 --- /dev/null +++ b/docs/ja/interfaces/cpp.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 24 +toc_title: "C++\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8" +--- + +# C++クライアント {#c-client-library} + +ツつィツ姪"ツ債ツつケ [クリックハウス-cpp](https://github.com/ClickHouse/clickhouse-cpp) リポジトリ + +[元の記事](https://clickhouse.tech/docs/en/interfaces/cpp/) diff --git a/docs/ja/interfaces/formats.md b/docs/ja/interfaces/formats.md deleted file mode 120000 index 41a65ebe579..00000000000 --- a/docs/ja/interfaces/formats.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/formats.md \ No newline at end of file diff --git a/docs/ja/interfaces/formats.md b/docs/ja/interfaces/formats.md new file mode 100644 index 00000000000..c2784058a69 --- /dev/null +++ b/docs/ja/interfaces/formats.md @@ -0,0 +1,1212 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 21 +toc_title: "\u5165\u529B\u304A\u3088\u3073\u51FA\u529B\u5F62\u5F0F" +--- + +# 入力データと出力データの形式 {#formats} + +ClickHouse受け入れと返信データなフレームワークです。 入力でサポートされている形式を使用して、指定されたデータを解析できます。 `INSERT`s、実行する `SELECT`ファイル、URL、またはHDFSなどのファイルバックアップテーブルから、または外部辞書を読み取ります。 出力用にサポートされている形式を使用して、 +の結果 `SELECT`、および実行する `INSERT`ファイルによって支持される表へのs。 + +のサポートされるフォーマットは: + +| 書式 | 入力 | 出力 | +|---------------------------------------------------------------------|------|------| +| [タブ区切り](#tabseparated) | ✔ | ✔ | +| [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | +| [Tabseparatedwithnamesname](#tabseparatedwithnames) | ✔ | ✔ | +| [Tabseparatedwithnamesandtypesname](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +| [テンプレ](#format-template) | ✔ | ✔ | +| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | +| [CSV](#csv) | ✔ | ✔ | +| [Csvwithnamesname](#csvwithnames) | ✔ | ✔ | +| [CustomSeparated](#format-customseparated) | ✔ | ✔ | +| [値](#data-format-values) | ✔ | ✔ | +| [垂直](#vertical) | ✗ | ✔ | +| [VerticalRaw](#verticalraw) | ✗ | ✔ | +| [JSON](#json) | ✗ | ✔ | +| [JSONCompact](#jsoncompact) | ✗ | ✔ | +| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | +| [TSKV](#tskv) | ✔ | ✔ | +| [可愛い](#pretty) | ✗ | ✔ | +| [PrettyCompact](#prettycompact) | ✗ | ✔ | +| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | +| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | +| [PrettySpace](#prettyspace) | ✗ | ✔ | +| [Protobuf](#protobuf) | ✔ | ✔ | +| [アブロ](#data-format-avro) | ✔ | ✔ | +| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | +| [Parquet張り](#data-format-parquet) | ✔ | ✔ | +| [ORC](#data-format-orc) | ✔ | ✗ | +| [RowBinary](#rowbinary) | ✔ | ✔ | +| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | +| [ネイティブ](#native) | ✔ | ✔ | +| [ヌル](#null) | ✗ | ✔ | +| [XML](#xml) | ✗ | ✔ | +| [CapnProto](#capnproto) | ✔ | ✗ | + +ClickHouseの設定で、一部のフォーマット処理パラメータを制御できます。 詳細については、 [設定](../operations/settings/settings.md) セクション。 + +## タブ区切り {#tabseparated} + +TabSeparated形式では、データは行によって書き込まれます。 各行にはタブで区切られた値が含まれます。 各値の後には、行の最後の値を除くタブが続き、その後に改行が続きます。 厳密にUnixの改行はどこでも想定されます。 最後の行には、最後に改行が含まれている必要があります。 値は、引用符を囲まずにテキスト形式で書き込まれ、特殊文字はエスケープされます。 + +この形式は、名前の下でも利用できます `TSV`. + +その `TabSeparated` 形式は便利な加工データをカスタムプログラムやイントロダクションです。 デフォルトでは、HTTPインターフェイスとコマンドラインクライアントのバッチモードで使用されます。 この形式は、異なるDbms間でデータを転送することもできます。 たとえば、MySQLからダンプを取得してClickHouseにアップロードすることも、その逆にすることもできます。 + +その `TabSeparated` formatでは、合計値(合計と共に使用する場合)と極端な値(次の場合)の出力をサポートします ‘extremes’ 1)に設定します。 このような場合、メインデータの後に合計値と極値が出力されます。 主な結果、合計値、および極値は、空の行で区切られます。 例えば: + +``` sql +SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` +``` + +``` text +2014-03-17 1406958 +2014-03-18 1383658 +2014-03-19 1405797 +2014-03-20 1353623 +2014-03-21 1245779 +2014-03-22 1031592 +2014-03-23 1046491 + +0000-00-00 8873898 + +2014-03-17 1031592 +2014-03-23 1406958 +``` + +### データの書式設定 {#data-formatting} + +整数は小数で書かれています。 数字には、 “+” 最初の文字(解析時は無視され、書式設定時には記録されません)。 負でない数値には、負の符号を含めることはできません。 読み込むときには、空の文字列をゼロとして解析するか、(符号付きの型の場合)マイナス記号だけをゼロとして含む文字列を解析することができま 対応するデータ型に収まらない数値は、エラーメッセージなしで別の数値として解析できます。 + +浮動小数点数は小数で書かれています。 ドットは小数点として使用されます。 指数作に対応してい ‘inf’, ‘+inf’, ‘-inf’、と ‘nan’. 浮動小数点数のエントリは、小数点で開始または終了することができます。 +書式設定時に、浮動小数点数の精度が失われることがあります。 +解析中に、最も近いマシン表現可能な番号を読み取ることは厳密には必要ありません。 + +日付はyyyy-mm-dd形式で書かれ、同じ形式で解析されますが、任意の文字を区切り文字として使用します。 +時刻を含む日付は、次の形式で書き込まれます `YYYY-MM-DD hh:mm:ss` 同じ形式で解析されますが、区切り文字として任意の文字が使用されます。 +これはすべて、クライアントまたはサーバーの起動時にシステムタイムゾーンで発生します(データの形式に応じて異なります)。 時刻を含む日付の場合、夏時間は指定されません。 したがって、ダンプが夏時間の間に時間がある場合、ダンプはデータと明確に一致しません。 +読み取り操作中に、時間を含む不適切な日付と日付は、エラーメッセージなしで自然なオーバーフローまたはnull日付と時刻として解析できます。 + +例外として、時刻を含む日付の解析は、unixタイムスタンプ形式でもサポートされています(正確に10桁の数字で構成されている場合)。 結果はタイムゾーンに依存しません。 フォーマットyyyy-mm-dd hh:mm:ssとnnnnnnnnnは自動的に区別されます。 + +文字列はバックスラッシュでエスケープされた特殊文字で出力されます。 以下のエスケープシーケンスを使用出力: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\`. 解析にも対応し配列 `\a`, `\v`、と `\xHH` (hexエスケープシーケンス)および `\c` シーケンス、ここで `c` は任意の文字です(これらのシーケンスは `c`). このように、データを読み込む形式に対応し、改行して書き込み可能で `\n` または `\`、または改行として。 たとえば、文字列 `Hello world` スペースではなく単語間の改行を使用すると、次のいずれかのバリエーションで解析できます: + +``` text +Hello\nworld + +Hello\ +world +``` + +これは、mysqlがタブで区切られたダンプを書き込むときに使用するためです。 + +TabSeparated形式でデータを渡すときにエスケープする必要がある文字の最小セット:tab、改行(LF)、およびバックスラッシュ。 + +小さなシンボルのみがエスケープされます。 あなたの端末が出力で台無しにする文字列値に簡単につまずくことができます。 + +配列は、角かっこで囲まれたコンマ区切りの値のリストとして記述されます。 配列内の数値項目は通常どおりに書式設定されます。 `Date` と `DateTime` 型は一重引quotesで書き込まれます。 文字列は、上記と同じエスケープ規則で一重引quotesで書き込まれます。 + +[NULL](../sql_reference/syntax.md) フォーマットとして `\N`. + +の各要素 [ネスト](../sql_reference/data_types/nested_data_structures/nested.md) 構造体は配列として表されます。 + +例えば: + +``` sql +CREATE TABLE nestedt +( + `id` UInt8, + `aux` Nested( + a UInt8, + b String + ) +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO nestedt Values ( 1, [1], ['a']) +``` + +``` sql +SELECT * FROM nestedt FORMAT TSV +``` + +``` text +1 [1] ['a'] +``` + +## TabSeparatedRaw {#tabseparatedraw} + +とは異なります `TabSeparated` エスケープせずに行が書き込まれるという形式です。 +この形式は、クエリ結果を出力する場合にのみ適切ですが、解析(テーブルに挿入するデータの取得)には適していません。 + +この形式は、名前の下でも利用できます `TSVRaw`. + +## Tabseparatedwithnamesname {#tabseparatedwithnames} + +とは異なり `TabSeparated` 列名が最初の行に書き込まれる形式。 +解析中、最初の行は完全に無視されます。 列名を使用して、列の位置を特定したり、列の正確性を確認したりすることはできません。 +(ヘッダー行の解析のサポートは、将来追加される可能性があります。) + +この形式は、名前の下でも利用できます `TSVWithNames`. + +## Tabseparatedwithnamesandtypesname {#tabseparatedwithnamesandtypes} + +とは異なり `TabSeparated` 列名が最初の行に書き込まれ、列タイプが次の行に書き込まれるという形式です。 +解析時には、最初と二番目の行は完全に無視されます。 + +この形式は、名前の下でも利用できます `TSVWithNamesAndTypes`. + +## テンプレ {#format-template} + +このフォーマットで指定するカスタムフォーマット文字列とプレースホルダーのための値を指定して逃げます。 + +それは設定を使用します `format_template_resultset`, `format_template_row`, `format_template_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` 使用する場合 `JSON` エスケープ,さらに見る) + +設定 `format_template_row` 次の構文の行の書式文字列を含むファイルへのパスを指定します: + +`delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, + +どこに `delimiter_i` 値間の区切り文字です (`$` シンボルは `$$`), +`column_i` 値が選択または挿入される列の名前またはインデックスを指定します(空の場合、列はスキップされます), +`serializeAs_i` 列の値のエスケープ規則です。 以下の脱出ルールに対応: + +- `CSV`, `JSON`, `XML` (同じ名前の形式と同様に) +- `Escaped` (同様に `TSV`) +- `Quoted` (同様に `Values`) +- `Raw` (エスケープせずに、同様に `TSVRaw`) +- `None` (エスケープルールはありません。) + +エスケープルールが省略された場合は、 `None` 使用されます。 `XML` と `Raw` 出力にのみ適しています。 + +したがって、次の書式文字列については: + + `Search phrase: ${SearchPhrase:Quoted}, count: ${c:Escaped}, ad price: $$${price:JSON};` + +の値 `SearchPhrase`, `c` と `price` としてエスケープされる列 `Quoted`, `Escaped` と `JSON` (選択のために)印刷されるか、または(挿入のために)その間期待されます `Search phrase:`, `, count:`, `, ad price: $` と `;` それぞれ区切り文字。 例えば: + +`Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` + +その `format_template_rows_between_delimiter` 最後の行を除くすべての行の後に印刷される(または期待される)行間の区切り文字を指定します (`\n` デフォルトでは) + +設定 `format_template_resultset` resultsetの書式文字列を含むファイルへのパスを指定します。 Resultsetの書式文字列は、行の書式文字列と同じ構文を持ち、接頭辞、接尾辞、およびいくつかの追加情報を出力する方法を指定できます。 で以下のプレースホルダの代わりにカラム名: + +- `data` データのある行ですか `format_template_row` フォーマット `format_template_rows_between_delimiter`. このプレースホルダーの最初のプレースホルダー形式の文字列になります。 +- `totals` 合計値が入っている行です `format_template_row` 形式(合計と共に使用する場合) +- `min` 最小値を持つ行です `format_template_row` フォーマット(極値が1に設定されている場合) +- `max` は、最大値を持つ行です `format_template_row` フォーマット(極値が1に設定されている場合) +- `rows` 出力行の合計数です +- `rows_before_limit` そこにあったであろう行の最小数は制限なしです。 出力の場合のみを含むクエリを制限します。 クエリにGROUP BYが含まれている場合、rows\_before\_limit\_at\_leastは、制限なしで存在していた正確な行数です。 +- `time` リクエストの実行時間を秒単位で指定します +- `rows_read` 読み取られた行の数です +- `bytes_read` 読み込まれたバイト数(圧縮されていないバイト数)を指定します + +プレースホルダ `data`, `totals`, `min` と `max` 必要な脱出ルールの指定(または `None` 明示的に指定する必要があります)。 残りのプレースホ +この `format_template_resultset` 設定は空の文字列です, `${data}` デフォルト値として使用されます。 +Insertクエリ形式では、いくつかの列またはいくつかのフィールドをスキップすることができます。 + +選択例: + +``` sql +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' +``` + +`/some/path/resultset.format`: + +``` text + + Search phrases + + + + ${data} +
    Search phrases
    Search phrase Count
    + + ${max} +
    Max
    + Processed ${rows_read:XML} rows in ${time:XML} sec + + +``` + +`/some/path/row.format`: + +``` text + ${0:XML} ${1:XML} +``` + +結果: + +``` html + + Search phrases + + + + + + + + +
    Search phrases
    Search phrase Count
    8267016
    bathroom interior design 2166
    yandex 1655
    spring 2014 fashion 1549
    freeform photos 1480
    + + +
    Max
    8873898
    + Processed 3095973 rows in 0.1569913 sec + + +``` + +挿入例: + +``` text +Some header +Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 +Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 +Total rows: 2 +``` + +``` sql +INSERT INTO UserActivity FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' +``` + +`/some/path/resultset.format`: + +``` text +Some header\n${data}\nTotal rows: ${:CSV}\n +``` + +`/some/path/row.format`: + +``` text +Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} +``` + +`PageViews`, `UserID`, `Duration` と `Sign` 内部のプレースホルダーは、テーブル内の列の名前です。 その後の値 `Useless field` 行とその後 `\nTotal rows:` サフィックスでは無視されます。 +すべての区切り文字の入力データを厳密に等しい区切り文字で指定されたフォーマット文字列です。 + +## TemplateIgnoreSpaces {#templateignorespaces} + +この形式は入力にのみ適しています。 +に似て `Template` ただし、入力ストリームの区切り文字と値の間の空白文字はスキップします。 ただし、書式指定文字列に空白文字が含まれている場合は、これらの文字が入力ストリームに必要になります。 空のプレースホルダも指定できます (`${}` または `${:None}`)いくつかの区切り文字を別々の部分に分割して、それらの間の空白を無視する。 などのプレースホルダを使用させていただきますの飛び空白文字です。 +それは読むことが可能です `JSON` 列の値がすべての行で同じ順序を持つ場合、この形式を使用します。 たとえば、次のリクエストは、formatの出力例からデータを挿入するために使用できます [JSON](#json): + +``` sql +INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = ',' +``` + +`/some/path/resultset.format`: + +``` text +{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} +``` + +`/some/path/row.format`: + +``` text +{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} +``` + +## TSKV {#tskv} + +TabSeparatedに似ていますが、name=value形式で値を出力します。 名前はTabSeparated形式と同じようにエスケープされ、=記号もエスケープされます。 + +``` text +SearchPhrase= count()=8267016 +SearchPhrase=bathroom interior design count()=2166 +SearchPhrase=yandex count()=1655 +SearchPhrase=2014 spring fashion count()=1549 +SearchPhrase=freeform photos count()=1480 +SearchPhrase=angelina jolie count()=1245 +SearchPhrase=omsk count()=1112 +SearchPhrase=photos of dog breeds count()=1091 +SearchPhrase=curtain designs count()=1064 +SearchPhrase=baku count()=1000 +``` + +[NULL](../sql_reference/syntax.md) フォーマットとして `\N`. + +``` sql +SELECT * FROM t_null FORMAT TSKV +``` + +``` text +x=1 y=\N +``` + +多数の小さな列がある場合、この形式は無効であり、一般的にそれを使用する理由はありません。 それにもかかわらず、それは効率の面でjsoneachrowよりも悪くありません。 + +Both data output and parsing are supported in this format. For parsing, any order is supported for the values of different columns. It is acceptable for some values to be omitted – they are treated as equal to their default values. In this case, zeros and blank rows are used as default values. Complex values that could be specified in the table are not supported as defaults. + +解析により、追加フィールドの存在が許可されます `tskv` 等号または値なし。 この項目は無視されます。 + +## CSV {#csv} + +コンマ区切りの値の形式 ([RFC](https://tools.ietf.org/html/rfc4180)). + +書式設定の場合、行は二重引用符で囲まれます。 文字列内の二重引用符は、行内の二つの二重引用符として出力されます。 文字をエスケープする他の規則はありません。 日付と日時は二重引用符で囲みます。 数字は引用符なしで出力されます。 値は区切り文字で区切られます。 `,` デフォルトでは。 区切り文字は設定で定義されます [format\_csv\_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter). 行は、Unixの改行(LF)を使用して区切られます。 まず、配列をTabSeparated形式のように文字列にシリアル化し、結果の文字列を二重引用符でCSVに出力します。 CSV形式の組は、別々の列としてシリアル化されます(つまり、組内の入れ子は失われます)。 + +``` bash +$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` + +\*デフォルトでは、区切り文字は `,`. を見る [format\_csv\_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter) より多くの情報のための設定。 + +解析時には、すべての値を引用符で囲んで解析することができます。 二重引用符と一重引quotesの両方がサポートされます。 行は、引用符なしで配置することもできます。 この場合、それらは区切り文字または改行(crまたはlf)まで解析されます。 rfcに違反して、引用符なしで行を解析するとき、先頭と末尾のスペースとタブは無視されます。 改行には、unix(lf)、windows(cr lf)、およびmac os classic(cr lf)タイプがすべてサポートされています。 + +空の引用符で囲まれていない入力値は、それぞれの列のデフォルト値に置き換えられます。 +[input\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) +は有効です。 + +`NULL` フォーマットとして `\N` または `NULL` または、引用符で囲まれていない空の文字列("設定"を参照 [input\_format\_csv\_unquoted\_null\_literal\_as\_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) と [input\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)). + +CSV形式は、totalsとextremesの出力を次のようにサポートします `TabSeparated`. + +## Csvwithnamesname {#csvwithnames} + +また、次のようなヘッダー行も出力します `TabSeparatedWithNames`. + +## CustomSeparated {#format-customseparated} + +に似て [テンプレ](#format-template) ですが、版画を読み込みまたは全てのカラムを使用脱出ルールからの設定 `format_custom_escaping_rule` 設定からの区切り文字 `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` と `format_custom_result_after_delimiter`、書式文字列からではありません。 +また、 `CustomSeparatedIgnoreSpaces` フォーマット `TemplateIgnoreSpaces`. + +## JSON {#json} + +JSON形式でデータを出力します。 データテーブルのほかに、列名と型、およびいくつかの追加情報(出力行の合計数、および制限がない場合に出力される可能性のある行の数)も出力します。 例えば: + +``` sql +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON +``` + +``` json +{ + "meta": + [ + { + "name": "SearchPhrase", + "type": "String" + }, + { + "name": "c", + "type": "UInt64" + } + ], + + "data": + [ + { + "SearchPhrase": "", + "c": "8267016" + }, + { + "SearchPhrase": "bathroom interior design", + "c": "2166" + }, + { + "SearchPhrase": "yandex", + "c": "1655" + }, + { + "SearchPhrase": "spring 2014 fashion", + "c": "1549" + }, + { + "SearchPhrase": "freeform photos", + "c": "1480" + } + ], + + "totals": + { + "SearchPhrase": "", + "c": "8873898" + }, + + "extremes": + { + "min": + { + "SearchPhrase": "", + "c": "1480" + }, + "max": + { + "SearchPhrase": "", + "c": "8267016" + } + }, + + "rows": 5, + + "rows_before_limit_at_least": 141137 +} +``` + +JSONはJavaScriptと互換性があります。 これを確実にするために、一部の文字は追加でエスケープされます。 `/` としてエスケープ `\/`;代替改行 `U+2028` と `U+2029` いくつかのブラウザを破る、としてエスケープ `\uXXXX`. バックスペース、フォームフィード、ラインフィード、キャリッジリターン、および水平タブがエスケープされます `\b`, `\f`, `\n`, `\r`, `\t` 00-1F範囲の残りのバイトと同様に、 `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double-quotes by default. To remove the quotes, you can set the configuration parameter [output\_format\_json\_quote\_64bit\_integers](../operations/settings/settings.md#session_settings-output_format_json_quote_64bit_integers) に0. + +`rows` – The total number of output rows. + +`rows_before_limit_at_least` そこにある行の最小数は制限なしであったでしょう。 出力の場合のみを含むクエリを制限します。 +クエリにgroup byが含まれている場合、rows\_before\_limit\_at\_leastは、制限なしで存在していた正確な行数です。 + +`totals` – Total values (when using WITH TOTALS). + +`extremes` – Extreme values (when extremes are set to 1). + +この形式は、クエリ結果を出力する場合にのみ適切ですが、解析(テーブルに挿入するデータの取得)には適していません。 + +ClickHouse支援 [NULL](../sql_reference/syntax.md) として表示されます `null` JSON出力で。 + +また、 [JSONEachRow](#jsoneachrow) フォーマット。 + +## JSONCompact {#jsoncompact} + +JSONとは異なり、データ行はオブジェクトではなく配列内に出力されます。 + +例えば: + +``` json +{ + "meta": + [ + { + "name": "SearchPhrase", + "type": "String" + }, + { + "name": "c", + "type": "UInt64" + } + ], + + "data": + [ + ["", "8267016"], + ["bathroom interior design", "2166"], + ["yandex", "1655"], + ["fashion trends spring 2014", "1549"], + ["freeform photo", "1480"] + ], + + "totals": ["","8873898"], + + "extremes": + { + "min": ["","1480"], + "max": ["","8267016"] + }, + + "rows": 5, + + "rows_before_limit_at_least": 141137 +} +``` + +この形式は、クエリ結果を出力する場合にのみ適切ですが、解析(テーブルに挿入するデータの取得)には適していません。 +また、 `JSONEachRow` フォーマット。 + +## JSONEachRow {#jsoneachrow} + +この形式を使用する場合、clickhouseは行を区切られた改行で区切られたjsonオブジェクトとして出力しますが、データ全体が有効なjsonではありません。 + +``` json +{"SearchPhrase":"curtain designs","count()":"1064"} +{"SearchPhrase":"baku","count()":"1000"} +{"SearchPhrase":"","count()":"8267016"} +``` + +データを挿入するときは、各行に別々のjsonオブジェクトを指定する必要があります。 + +### データの挿入 {#inserting-data} + +``` sql +INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +クリックハウスは: + +- オブジェクト内のキーと値のペアの順序。 +- いくつかの値を省略する。 + +ClickHouseを無視した空間要素には、カンマの後にオブジェクト。 すべてのオブジェクトを一行で渡すことができます。 改行で区切る必要はありません。 + +**省略された値の処理** + +ClickHouseは、省略された値を対応するデフォルト値に置き換えます [データ型](../sql_reference/data_types/index.md). + +もし `DEFAULT expr` は、ClickHouseはに応じて異なる置換規則を使用して、指定されています [input\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) 設定。 + +次の表を考えてみます: + +``` sql +CREATE TABLE IF NOT EXISTS example_table +( + x UInt32, + a DEFAULT x * 2 +) ENGINE = Memory; +``` + +- もし `input_format_defaults_for_omitted_fields = 0` のデフォルト値を返します。 `x` と `a` 等しい `0` のデフォルト値として `UInt32` データ型)。 +- もし `input_format_defaults_for_omitted_fields = 1` のデフォルト値を返します。 `x` 等しい `0` しかし、デフォルト値は `a` 等しい `x * 2`. + +!!! note "警告" + データを挿入するとき `insert_sample_with_metadata = 1`、ClickHouseは、より多くの計算リソースを消費します。 `insert_sample_with_metadata = 0`. + +### データの選択 {#selecting-data} + +考慮する `UserActivity` 例として表: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +クエリ `SELECT * FROM UserActivity FORMAT JSONEachRow` を返します: + +``` text +{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} +{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +とは異なり [JSON](#json) 形式は、無効なUTF-8シーケンスの置換はありません。 値は、forと同じ方法でエスケープされます `JSON`. + +!!! note "メモ" + 任意のバイトセットを文字列に出力することができます。 を使用 `JSONEachRow` テーブル内のデータをJSON形式にすることができると確信している場合は、情報を失うことなく書式設定します。 + +### 入れ子構造の使用法 {#jsoneachrow-nested} + +あなたがテーブルを持っている場合 [ネスト](../sql_reference/data_types/nested_data_structures/nested.md) データ型の列には、同じ構造でJSONデータを挿入することができます。 この機能を有効にするには [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) 設定。 + +たとえば、次の表を考えてみます: + +``` sql +CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory +``` + +あなたが見ることができるように `Nested` データ型の説明、ClickHouseは、入れ子構造の各コンポーネントを個別の列として扱います (`n.s` と `n.i` 私達のテーブルのため)。 次の方法でデータを挿入できます: + +``` sql +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} +``` + +データを階層jsonオブジェクトとして挿入するには、 [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). + +``` json +{ + "n": { + "s": ["abc", "def"], + "i": [1, 23] + } +} +``` + +この設定がない場合、clickhouseは例外をスローします。 + +``` sql +SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' +``` + +``` text +┌─name────────────────────────────┬─value─┐ +│ input_format_import_nested_json │ 0 │ +└─────────────────────────────────┴───────┘ +``` + +``` sql +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} +``` + +``` text +Code: 117. DB::Exception: Unknown field found while parsing JSONEachRow format: n: (at row 1) +``` + +``` sql +SET input_format_import_nested_json=1 +INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n": {"s": ["abc", "def"], "i": [1, 23]}} +SELECT * FROM json_each_row_nested +``` + +``` text +┌─n.s───────────┬─n.i────┐ +│ ['abc','def'] │ [1,23] │ +└───────────────┴────────┘ +``` + +## ネイティブ {#native} + +最も効率的な形式。 データ書き込みおよび読み込みをブロックのバイナリ形式です。 各ブロックについて、行数、列数、列名と型、およびこのブロック内の列の一部が次々に記録されます。 つまり、この形式は次のとおりです “columnar” – it doesn't convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients. + +この形式を使用すると、clickhouse dbmsでのみ読み取ることができるダンプをすばやく生成できます。 この形式を自分で操作するのは意味がありません。 + +## ヌル {#null} + +何も出力されません。 ただし、クエリが処理され、コマンドラインクライアントを使用すると、データがクライアントに送信されます。 パフォーマンステストを含むテストに使用されます。 +明らかに、この形式は出力にのみ適しており、解析には適していません。 + +## 可愛い {#pretty} + +出力データとしてのunicodeトテーブルも用ansi-エスケープシーケンス設定色の端子です。 +テーブルの完全なグリッドが描画され、各行は端末内の二行を占めています。 +各結果ブロックは、別のテーブルとして出力されます。 これは、結果をバッファリングせずにブロックを出力できるようにするために必要です(すべての値の可視幅を事前に計算するためにバッファリ + +[NULL](../sql_reference/syntax.md) として出力されます `ᴺᵁᴸᴸ`. + +例(以下に示す [PrettyCompact](#prettycompact) 書式): + +``` sql +SELECT * FROM t_null +``` + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +行はpretty\*形式でエスケープされません。 例はのために示されています [PrettyCompact](#prettycompact) 書式: + +``` sql +SELECT 'String with \'quotes\' and \t character' AS Escaping_test +``` + +``` text +┌─Escaping_test────────────────────────┐ +│ String with 'quotes' and character │ +└──────────────────────────────────────┘ +``` + +ターミナルへのデータのダンプを避けるために、最初の10,000行だけが出力されます。 行数が10,000以上の場合、メッセージは次のようになります “Showed first 10 000” 印刷されます。 +この形式は、クエリ結果を出力する場合にのみ適切ですが、解析(テーブルに挿入するデータの取得)には適していません。 + +かの形式に対応出力の合計値(利用の場合との合計)は、極端な場合 ‘extremes’ 1)に設定します。 このような場合、合計値と極値がメインデータの後に別のテーブルで出力されます。 例(以下に示す [PrettyCompact](#prettycompact) 書式): + +``` sql +SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT PrettyCompact +``` + +``` text +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1406958 │ +│ 2014-03-18 │ 1383658 │ +│ 2014-03-19 │ 1405797 │ +│ 2014-03-20 │ 1353623 │ +│ 2014-03-21 │ 1245779 │ +│ 2014-03-22 │ 1031592 │ +│ 2014-03-23 │ 1046491 │ +└────────────┴─────────┘ + +Totals: +┌──EventDate─┬───────c─┐ +│ 0000-00-00 │ 8873898 │ +└────────────┴─────────┘ + +Extremes: +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1031592 │ +│ 2014-03-23 │ 1406958 │ +└────────────┴─────────┘ +``` + +## PrettyCompact {#prettycompact} + +とは異なります [可愛い](#pretty) グリッドが行間に描画され、結果がよりコンパクトになるという点です。 +この形式は、対話モードのコマンドラインクライアントでは既定で使用されます。 + +## PrettyCompactMonoBlock {#prettycompactmonoblock} + +とは異なります [PrettyCompact](#prettycompact) その中で最大10,000行がバッファリングされ、ブロックではなく単一のテーブルとして出力されます。 + +## PrettyNoEscapes {#prettynoescapes} + +Ansi-escapeシーケンスが使用されていない点がPrettyと異なります。 これは、ブラウザでこの形式を表示するためだけでなく、 ‘watch’ コマンドラインユーティ + +例えば: + +``` bash +$ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" +``` + +ブラウザに表示するためにhttpインターフェイスを使用できます。 + +### Prettompactnoescapes {#prettycompactnoescapes} + +前の設定と同じです。 + +### PrettySpaceNoEscapes {#prettyspacenoescapes} + +前の設定と同じです。 + +## PrettySpace {#prettyspace} + +とは異なります [PrettyCompact](#prettycompact) その空白(空白文字)では、グリッドの代わりに使用されます。 + +## RowBinary {#rowbinary} + +バイナリ形式の行ごとにデータを書式設定および解析します。 行と値は、区切り文字なしで連続して一覧表示されます。 +この形式は、行ベースであるため、ネイティブ形式よりも効率的ではありません。 + +整数は固定長のリトルエンディアン表現を使用します。 たとえば、uint64は8バイトを使用します。 +DateTimeは、Unixタイムスタンプを値として含むUInt32として表されます。 +日付はuint16オブジェクトとして表され、このオブジェクトには1970-01-01からの日数が値として含まれます。 +Stringは、varintの長さ(符号なし)として表されます [LEB128](https://en.wikipedia.org/wiki/LEB128)その後に文字列のバイトが続きます。 +FixedStringは、単純にバイトのシーケンスとして表されます。 + +配列は、varintの長さ(符号なし)として表されます [LEB128](https://en.wikipedia.org/wiki/LEB128))、配列の連続した要素が続きます。 + +のために [NULL](../sql_reference/syntax.md#null-literal) 支援、追加のバイトを含む1または0が追加される前に各 [Nullable](../sql_reference/data_types/nullable.md) 値。 1の場合、値は次のようになります `NULL` このバイトは別の値として解釈されます。 0の場合、バイトの後の値はそうではありません `NULL`. + +## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} + +に似て [RowBinary](#rowbinary)、しかし、追加されたヘッダと: + +- [LEB128](https://en.wikipedia.org/wiki/LEB128)-エンコードされた列数(N) +- N `String`s列名の指定 +- N `String`s列タイプの指定 + +## 値 {#data-format-values} + +版画毎に行ットに固定して使用します。 行はコンマで区切られます。 最後の行の後にコンマはありません。 角かっこ内の値もコンマで区切られます。 数字は引用符なしの小数点形式で出力されます。 配列は角かっこで囲まれて出力されます。 文字列、日付、および時刻を含む日付が引用符で囲まれて出力されます。 ルールのエスケープと解析は、 [タブ区切り](#tabseparated) フォーマット。 書式設定時には、余分なスペースは挿入されませんが、解析時には、それらは許可され、スキップされます(配列値内のスペースは許可されません)。 [NULL](../sql_reference/syntax.md) として表されます `NULL`. + +The minimum set of characters that you need to escape when passing data in Values ​​format: single quotes and backslashes. + +これは、以下で使用される形式です `INSERT INTO t VALUES ...` ただし、クエリ結果の書式設定にも使用できます。 + +また見なさい: [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) と [input\_format\_values\_deduce\_templates\_of\_expressions](../operations/settings/settings.md#settings-input_format_values_deduce_templates_of_expressions) 設定。 + +## 垂直 {#vertical} + +各値を、指定された列名とは別の行に出力します。 このフォーマットは、各行が多数の列で構成されている場合に、単一または少数の行だけを印刷する場合に便利です。 + +[NULL](../sql_reference/syntax.md) として出力されます `ᴺᵁᴸᴸ`. + +例えば: + +``` sql +SELECT * FROM t_null FORMAT Vertical +``` + +``` text +Row 1: +────── +x: 1 +y: ᴺᵁᴸᴸ +``` + +行は縦書式でエスケープされません: + +``` sql +SELECT 'string with \'quotes\' and \t with some special \n characters' AS test FORMAT Vertical +``` + +``` text +Row 1: +────── +test: string with 'quotes' and with some special + characters +``` + +この形式は、クエリ結果を出力する場合にのみ適切ですが、解析(テーブルに挿入するデータの取得)には適していません。 + +## VerticalRaw {#verticalraw} + +に似て [垂直](#vertical) しかし、無効にエスケープすると。 この形式は、クエリ結果の出力にのみ適しており、解析(データの受信とテーブルへの挿入)には適していません。 + +## XML {#xml} + +XML形式は出力にのみ適しており、解析には適していません。 例えば: + +``` xml + + + + + + SearchPhrase + String + + + count() + UInt64 + + + + + + + 8267016 + + + bathroom interior design + 2166 + + + yandex + 1655 + + + 2014 spring fashion + 1549 + + + freeform photos + 1480 + + + angelina jolie + 1245 + + + omsk + 1112 + + + photos of dog breeds + 1091 + + + curtain designs + 1064 + + + baku + 1000 + + + 10 + 141137 + +``` + +列名に許容可能な形式がない場合は、 ‘field’ 要素名として使用されます。 一般に、XML構造はJSON構造に従います。 +Just as for JSON, invalid UTF-8 sequences are changed to the replacement character � so the output text will consist of valid UTF-8 sequences. + +文字列値では、文字 `<` と `&` としてエスケープ `<` と `&`. + +配列は出力されます `HelloWorld...`、およびタプルとして `HelloWorld...`. + +## CapnProto {#capnproto} + +Cap'n Protoは、プロトコルバッファやThriftに似たバイナリメッセージ形式ですが、JSONやMessagePackには似ていません。 + +Cap'n Protoメッセージは厳密に型付けされており、自己記述型ではありません。 スキーマはその場で適用され、クエリごとにキャッシュされます。 + +``` bash +$ cat capnproto_messages.bin | clickhouse-client --query "INSERT INTO test.hits FORMAT CapnProto SETTINGS format_schema='schema:Message'" +``` + +どこに `schema.capnp` このように見える: + +``` capnp +struct Message { + SearchPhrase @0 :Text; + c @1 :Uint64; +} +``` + +逆シリアル化は効果的であり、通常はシステムの負荷を増加させません。 + +また見なさい [書式スキーマ](#formatschema). + +## Protobuf {#protobuf} + +Protobufは-です [プロトコル](https://developers.google.com/protocol-buffers/) フォーマット。 + +この形式には、外部形式スキーマが必要です。 このスキーマをキャッシュ間のクエリ. +クリックハウスは、 `proto2` と `proto3` 構文。 繰り返し/省略可能/必須項目がサポートされます。 + +使用例: + +``` sql +SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' +``` + +``` bash +cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" +``` + +ここで、ファイル `schemafile.proto` このように見える: + +``` capnp +syntax = "proto3"; + +message MessageType { + string name = 1; + string surname = 2; + uint32 birthDate = 3; + repeated string phoneNumbers = 4; +}; +``` + +の対応関係はテーブル列-分野のプロトコルバッファのメッセージタイプclickhouseを比較しつけられた名前が使われている。 +この比較では、大文字と小文字は区別されません `_` (アンダースコア)と `.` (ドット)は等しいとみなされます。 +列の型とプロトコルバッファのメッセージのフィールドが異なる場合、必要な変換が適用されます。 + +ネストしたメッセージに対応します。 たとえば、フィールドの場合 `z` 次のメッセージタイプ + +``` capnp +message MessageType { + message XType { + message YType { + int32 z; + }; + repeated YType y; + }; + XType x; +}; +``` + +ClickHouseは、名前の付いた列を検索しようとします `x.y.z` (または `x_y_z` または `X.y_Z` など)。 +ネストしたメッセージを入力と出力 [入れ子のデータ構造](../sql_reference/data_types/nested_data_structures/nested.md). + +このようなprotobufスキーマで定義されたデフォルト値 + +``` capnp +syntax = "proto2"; + +message MessageType { + optional int32 result_per_page = 3 [default = 10]; +} +``` + +適用されない。 [表のデフォルト](../sql_reference/statements/create.md#create-default-values) それらの代わりに使用されます。 + +クリックハウスの入力および出力のprotobufメッセージ `length-delimited` フォーマット。 +これは、すべてのメッセージがその長さを [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +また見なさい [一般的な言語で長さ区切りのprotobufメッセージを読み書きする方法](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). + +## アブロ {#data-format-avro} + +[Apache Avro](http://avro.apache.org/) は、列指向データを直列化の枠組みに発展してApache Hadoopのプロジェクト. + +ClickHouseアブロ形式の読み書きの支援 [Avroデータファイル](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). + +### 一致するデータ型 {#data_types-matching} + +下の表に、サポートされているデータの種類とどのように試合clickhouse [データ型](../sql_reference/data_types/index.md) で `INSERT` と `SELECT` クエリ。 + +| Avroデータ型 `INSERT` | ClickHouseデータタイプ | Avroデータ型 `SELECT` | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [Int(8/16/32)](../sql_reference/data_types/int_uint.md), [UInt(8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [文字列](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [FixedString(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [Enum(8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [配列(t)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Nullable(何もなし)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [日付](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64(3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64(6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | + +\* [Avro論理型](http://avro.apache.org/docs/current/spec.html#Logical+Types) + +未サポートのavroデータ型: `record` (非ルート), `map` + +サポートされないavro論理データ型: `uuid`, `time-millis`, `time-micros`, `duration` + +### データの挿入 {#inserting-data-1} + +AvroファイルのデータをClickHouseテーブルに挿入するには: + +``` bash +$ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" +``` + +入力avroファ `record` タイプ。 + +AvroスキーマClickHouseのテーブル列とフィールド間の対応を検索するには、その名前を比較します。 この比較では、大文字と小文字が区別されます。 +未使用の項目はスキップされます。 + +データの種類clickhouseテーブルの列ができ、対応する分野においてアブロのデータを挿入します。 データを挿入するとき、clickhouseは上記の表に従ってデータ型を解釈します [キャスト](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 対応する列タイプのデータ。 + +### データの選択 {#selecting-data-1} + +ClickHouseテーブルからAvroファイルにデータを選択するには: + +``` bash +$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro +``` + +列名は必須です: + +- 始める `[A-Za-z_]` +- その後のみ `[A-Za-z0-9_]` + +出力avroファイル圧縮およびsync間隔はと形成することができます [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) と [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) それぞれ。 + +## AvroConfluent {#data-format-avro-confluent} + +AvroConfluent支援復号単一のオブジェクトアブロのメッセージを使用する [カフカname](https://kafka.apache.org/) と [Confluentスキーマレジストリ](https://docs.confluent.io/current/schema-registry/index.html). + +各avroメッセージには、スキーマレジストリを使用して実際のスキーマに解決できるスキーマidが埋め込まれます。 + +スキーマがキャッシュ一度に解決されます。 + +スキーマレジスト [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) + +### 一致するデータ型 {#data_types-matching-1} + +と同じ [アブロ](#data-format-avro) + +### 使い方 {#usage} + +使用できるスキーマ解決をすばやく検証するには [kafkacat](https://github.com/edenhill/kafkacat) と [ツつ"ツづ按つオツ!](../operations/utilities/clickhouse-local.md): + +``` bash +$ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' +1 a +2 b +3 c +``` + +使用するには `AvroConfluent` と [カフカname](../engines/table_engines/integrations/kafka.md): + +``` sql +CREATE TABLE topic1_stream +( + field1 String, + field2 String +) +ENGINE = Kafka() +SETTINGS +kafka_broker_list = 'kafka-broker', +kafka_topic_list = 'topic1', +kafka_group_name = 'group1', +kafka_format = 'AvroConfluent'; + +SET format_avro_schema_registry_url = 'http://schema-registry'; + +SELECT * FROM topic1_stream; +``` + +!!! note "警告" + 設定 `format_avro_schema_registry_url` で構成する必要があります `users.xml` 再起動後にその価値を維持する。 + +## Parquet張り {#data-format-parquet} + +[Apacheの寄木細工](http://parquet.apache.org/) Hadoopエコシステムでは柱状のストレージ形式が普及しています。 ClickHouse支援を読み取りと書き込みの操作のためにこの形式です。 + +### 一致するデータ型 {#data_types-matching-2} + +下の表に、サポートされているデータの種類とどのように試合clickhouse [データ型](../sql_reference/data_types/index.md) で `INSERT` と `SELECT` クエリ。 + +| Parquetデータ型 (`INSERT`) | ClickHouseデータタイプ | Parquetデータ型 (`SELECT`) | +|----------------------------|-----------------------------------------------------------|----------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [日付](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [文字列](../sql_reference/data_types/string.md) | `STRING` | +| — | [FixedString](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [小数](../sql_reference/data_types/decimal.md) | `DECIMAL` | + +ClickHouseは構成可能の精密をの支えます `Decimal` タイプ。 その `INSERT` クエリは寄木細工を扱います `DECIMAL` クリックハウスとして入力 `Decimal128` タイプ。 + +Parquetデータ型: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. + +データの種類clickhouseテーブルの列ができ、対応する分野の寄木細工のデータを挿入します。 データを挿入するとき、clickhouseは上記の表に従ってデータ型を解釈します [キャスト](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) ClickHouseテーブル列に設定されているデータ型へのデータ。 + +### データの挿入と選択 {#inserting-and-selecting-data} + +次のコマンドを使用して、ファイルのparquetデータをclickhouseテーブルに挿入できます: + +``` bash +$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" +``` + +次のコマンドを実行すると、clickhouseテーブルからデータを選択し、parquet形式のファイルに保存することができます: + +``` bash +$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} +``` + +Hadoopとデータを交換するには、次のようにします [HDFSテーブルエンジン](../engines/table_engines/integrations/hdfs.md). + +## ORC {#data-format-orc} + +[Apache ORC](https://orc.apache.org/) Hadoopエコシステムでは柱状のストレージ形式が普及しています。 この形式のデータのみをClickHouseに挿入できます。 + +### 一致するデータ型 {#data_types-matching-3} + +下の表に、サポートされているデータの種類とどのように試合clickhouse [データ型](../sql_reference/data_types/index.md) で `INSERT` クエリ。 + +| ORCデータ型 (`INSERT`) | ClickHouseデータタイプ | +|------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [日付](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [文字列](../sql_reference/data_types/string.md) | +| `DECIMAL` | [小数](../sql_reference/data_types/decimal.md) | + +ClickHouseはの構成可能の精密を支えます `Decimal` タイプ。 その `INSERT` クエリはORCを処理します `DECIMAL` クリックハウスとして入力 `Decimal128` タイプ。 + +サポートされていな: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. + +ClickHouseテーブル列のデータ型は、対応するORCデータフィールドと一致する必要はありません。 データを挿入するとき、ClickHouseは上記の表に従ってデータ型を解釈します [キャスト](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) ClickHouseテーブル列のデータ型セットに対するデータ。 + +### データの挿入 {#inserting-data-2} + +次のコマンドを実行すると、ファイルのorcデータをclickhouseテーブルに挿入できます: + +``` bash +$ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" +``` + +Hadoopとデータを交換するには、次のようにします [HDFSテーブルエンジン](../engines/table_engines/integrations/hdfs.md). + +## 書式スキーマ {#formatschema} + +ファイルのファイル名を含む形式スキーマを設定し、設定 `format_schema`. +いずれかの形式を使用する場合は、この設定を行う必要があります `Cap'n Proto` と `Protobuf`. +フォーマット-スキーマは、このファイル内のファイル名とメッセージ-タイプ名の組み合わせで、コロンで区切られます, +e.g. `schemafile.proto:MessageType`. +ファイルにフォーマットの標準拡張子がある場合(例えば, `.proto` のために `Protobuf`), +これは省略することができ、この場合、形式スキーマは次のようになります `schemafile:MessageType`. + +データを入力するか、または出力すれば [お客様](../interfaces/cli.md) で [対話モード](../interfaces/cli.md#cli_usage)、フォーマットスキーマで指定されたファイル名 +クライアン +でクライアントを使用する場合 [バッチモード](../interfaces/cli.md#cli_usage) は、パスのスキーマ"相対的"に指定する必要があります。 + +データを入力するか、または出力すれば [HTTPインター](../interfaces/http.md) フォーマットスキーマで指定したファイル名 +に指定されたディレクトリにあるはずです。 [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) +サーバー構成で。 + +## エラーのスキップ {#skippingerrors} + +以下のようないくつかの形式 `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` と `Protobuf` キ壊れた列が構文解析エラーが発生したときの解析から初めます。 見る [input\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) と +[input\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) 設定。 +制限: +-解析エラーの場合 `JSONEachRow` 新しい行(またはEOF)まですべてのデータをスキップするので、行は次のように区切られます `\n` エラーを正確にカウントする。 +- `Template` と `CustomSeparated` 最後の列の後にdelimiterを使用し、次の行の先頭を見つけるために行間の区切り文字を使用するので、エラーをスキップすると、少なくとも一方が空でない + +[元の記事](https://clickhouse.tech/docs/en/interfaces/formats/) diff --git a/docs/ja/interfaces/http.md b/docs/ja/interfaces/http.md deleted file mode 120000 index fb293841d8b..00000000000 --- a/docs/ja/interfaces/http.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/http.md \ No newline at end of file diff --git a/docs/ja/interfaces/http.md b/docs/ja/interfaces/http.md new file mode 100644 index 00000000000..a794b3de269 --- /dev/null +++ b/docs/ja/interfaces/http.md @@ -0,0 +1,511 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 19 +toc_title: "HTTP\u30A4\u30F3\u30BF\u30FC" +--- + +# HTTPインター {#http-interface} + +ツづツつソツづォツづアツ鳴ウツ猟ソツづツづツつォツづ慊つキツ。 私たちは、javaやperl、シェルスクリプトから作業するためにそれを使用します。 他の部門では、perl、python、goからhttpインターフェイスが使用されています。 httpのインタフェースが限られにより、ネイティブインタフェースでより良い対応しています。 + +デフォルトでは、clickhouse-serverはポート8123でhttpをリッスンします(これは設定で変更できます)。 + +パラメータを指定せずにget/requestを実行すると、200個の応答コードと、以下で定義されている文字列が返されます [http\_server\_default\_response](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response) デフォルト値 “Ok.” (最後にラインフィード付き) + +``` bash +$ curl 'http://localhost:8123/' +Ok. +``` + +ヘルスチェックスクリプトでget/ping要求を使用します。 このハンドラは常に “Ok.” (最後に改行を入れて)。 バージョン18.12.13から利用可能。 + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +リクエストをurlとして送信する ‘query’ パラメータ、または投稿として。 または、クエリの先頭を ‘query’ パラメータ、およびポストの残りの部分(これが必要な理由を後で説明します)。 URLのサイズは16KBに制限されているため、大きなクエリを送信するときはこの点に注意してください。 + +成功すると、200応答コードとその結果が応答本文に表示されます。 +エラーが発生すると、応答本文に500応答コードとエラーの説明テキストが表示されます。 + +GETメソッドを使用する場合, ‘readonly’ 設定されています。 つまり、データを変更するクエリでは、POSTメソッドのみを使用できます。 クエリ自体は、POST本体またはURLパラメータのいずれかで送信できます。 + +例: + +``` bash +$ curl 'http://localhost:8123/?query=SELECT%201' +1 + +$ wget -O- -q 'http://localhost:8123/?query=SELECT 1' +1 + +$ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123 +HTTP/1.0 200 OK +Date: Wed, 27 Nov 2019 10:30:18 GMT +Connection: Close +Content-Type: text/tab-separated-values; charset=UTF-8 +X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal +X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f +X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} + +1 +``` + +あなたが見ることができるように、curlはurlエスケープする必要があります。 +Wgetはそれ自体をすべてエスケープしますが、keep-aliveとTransfer-Encoding:chunkedを使用するとHTTP1.1よりもうまく動作しないため、使用することはお勧めしません。 + +``` bash +$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @- +1 + +$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @- +1 + +$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @- +1 +``` + +クエリの一部がパラメータで送信され、投稿の一部が送信されると、これらの二つのデータ部分の間に改行が挿入されます。 +例(これは動作しません): + +``` bash +$ echo 'ECT 1' | curl 'http://localhost:8123/?query=SEL' --data-binary @- +Code: 59, e.displayText() = DB::Exception: Syntax error: failed at position 0: SEL +ECT 1 +, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception +``` + +デフォルトでは、データはtabseparated形式で返されます(詳細については、 “Formats” セクション)。 +他の形式を要求するには、クエリのformat句を使用します。 + +``` bash +$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- +┏━━━┓ +┃ 1 ┃ +┡━━━┩ +│ 1 │ +└───┘ +``` + +データを送信するpostメソッドは、insertクエリに必要です。 この場合、urlパラメータにクエリの先頭を書き込み、postを使用して挿入するデータを渡すことができます。 挿入するデータは、たとえば、mysqlからタブで区切られたダンプです。 このようにして、insertクエリはmysqlからのload data local infileを置き換えます。 + +例:テーブルの作成: + +``` bash +$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @- +``` + +データ挿入用の使い慣れたinsertクエリの使用: + +``` bash +$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @- +``` + +データはクエリとは別に送信できます: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +任意のデータ形式を指定できます。 その ‘Values’ formatは、INSERTをt値に書き込むときに使用されるものと同じです: + +``` bash +$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @- +``` + +タブ区切りのダンプからデータを挿入するには、対応する形式を指定します: + +``` bash +$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @- +``` + +テーブルの内容を読む。 データは、並列クエリ処理によりランダムな順序で出力されます: + +``` bash +$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t' +7 +8 +9 +10 +11 +12 +1 +2 +3 +4 +5 +6 +``` + +テーブルの削除。 + +``` bash +$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @- +``` + +データテーブルを返さない要求が成功すると、空のレスポンスボディが返されます。 + +データを送信するときには、内部のclickhouse圧縮形式を使用できます。 圧縮されたデータには標準以外の形式があり、特別な形式を使用する必要があります `clickhouse-compressor` それを使用するプログラム(それは `clickhouse-client` パッケージ)。 データ挿入の効率を高めるために、サーバー側のチェックサム検証を無効にするには [http\_native\_compression\_disable\_checksumming\_on\_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) 設定。 + +指定した場合 `compress=1` URLでは、サーバーは送信するデータを圧縮します。 +指定した場合 `decompress=1` URLでは、サーバーは渡したデータと同じデータを解凍します。 `POST` 方法。 + +また、使用することを選択 [HTTP圧縮](https://en.wikipedia.org/wiki/HTTP_compression). 圧縮を送信するには `POST` 要求、要求ヘッダーを追加します `Content-Encoding: compression_method`. ClickHouseが応答を圧縮するには、次のように追加する必要があります `Accept-Encoding: compression_method`. ClickHouse支援 `gzip`, `br`、と `deflate` [圧縮方法](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens). HTTP圧縮を有効にするには、ClickHouseを使用する必要があります [enable\_http\_compression](../operations/settings/settings.md#settings-enable_http_compression) 設定。 データ圧縮レベルを設定することができます [http\_zlib\_compression\_level](#settings-http_zlib_compression_level) すべての圧縮方法の設定。 + +利用することができ削減ネットワーク通信の送受信には大量のデータをダンプすると直ちに圧縮されます。 + +圧縮によるデータ送信の例: + +``` bash +#Sending data to the server: +$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip' + +#Sending data to the client: +$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/' +``` + +!!! note "メモ" + 一部のhttpクライアントは、デフォルトでサーバーからデータを解凍します `gzip` と `deflate` 圧縮設定を正しく使用していても、圧縮解除されたデータが得られることがあります。 + +を使用することができ ‘database’ URLパラメータの指定はデフォルトのデータベースです。 + +``` bash +$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +``` + +既定では、サーバー設定に登録されているデータベースが既定のデータベースとして使用されます。 既定では、これはデータベースと呼ばれます ‘default’. あるいは、必ず指定のデータベースをドットの前にテーブルの名前です。 + +ユーザー名とパスワードは、次のいずれかの方法で指定できます: + +1. HTTP基本認証を使用する。 例えば: + + + +``` bash +$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @- +``` + +1. で ‘user’ と ‘password’ URLパラメーター。 例えば: + + + +``` bash +$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @- +``` + +1. を使用して ‘X-ClickHouse-User’ と ‘X-ClickHouse-Key’ ヘッダー。 例えば: + + + +``` bash +$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @- +``` + +ユーザー名が指定されていない場合は、 `default` 名前が使用されます。 パスワードを指定しない場合は、空のパスワードが使用されます。 +にお使いいただけますurlパラメータで指定した設定処理の単一クエリーまたは全体をプロファイルを設定します。 例:http://localhost:8123/?プロファイル=ウェブ&max\_rows\_to\_read=1000000000&クエリ=選択+1 + +詳細については、 [設定](../operations/settings/index.md) セクション。 + +``` bash +$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @- +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +``` + +その他のパラ “SET”. + +同様に、httpプロトコルでclickhouseセッションを使用できます。 これを行うには、以下を追加する必要があります `session_id` 要求のパラメーターを取得します。 セッションIDとして任意の文字列を使用できます。 デフォルトでは、セッションは60秒の非アクティブの後に終了します。 このタイムアウトを変更するには、 `default_session_timeout` サーバー構成での設定、または `session_timeout` 要求のパラメーターを取得します。 セッショ `session_check=1` パラメータ。 単一のセッション内で実行できるのは、一度にひとつのクエリのみです。 + +クエリの進行状況に関する情報を受け取ることができます `X-ClickHouse-Progress` 応答ヘッダー。 これを行うには、 [send\_progress\_in\_http\_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). ヘッダーシーケンスの例: + +``` text +X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"} +X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"} +``` + +可能なヘッダフィールド: + +- `read_rows` — Number of rows read. +- `read_bytes` — Volume of data read in bytes. +- `total_rows_to_read` — Total number of rows to be read. +- `written_rows` — Number of rows written. +- `written_bytes` — Volume of data written in bytes. + +HTTP接続が失われた場合、実行中の要求は自動的に停止しません。 解析とデータフォーマットはサーバー側で実行され、ネットワークを使用することは効果がありません。 +任意 ‘query\_id’ パラメータは、クエリID(任意の文字列)として渡すことができます。 詳細については、以下を参照してください “Settings, replace\_running\_query”. + +任意 ‘quota\_key’ パラメータとして渡すことができ、クォーターキー(切文字列). 詳細については、以下を参照してください “Quotas”. + +HTTPのインターフェースにより通外部データ(外部テンポラリテーブル)照会. 詳細については、以下を参照してください “External data for query processing”. + +## 応答バッファリング {#response-buffering} + +サーバー側で応答バッファリングを有効にできます。 その `buffer_size` と `wait_end_of_query` URLパラメータを提供しています。 + +`buffer_size` サーバーメモリ内のバッファーに結果内のバイト数を決定します。 結果本体がこのしきい値より大きい場合、バッファはHTTPチャネルに書き込まれ、残りのデータはHTTPチャネルに直接送信されます。 + +応答全体を確実にバッファリングするには、以下を設定します `wait_end_of_query=1`. この場合、メモリに格納されていないデータは、一時サーバーファイルにバッファーされます。 + +例えば: + +``` bash +$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary' +``` + +使用バッファリングなどでクエリの処理エラーが発生した後は、応答コード、httpヘッダが送信されます。 この状況では、エラーメッセージが応答本文の最後に書き込まれ、クライアント側では、解析の段階でのみエラーを検出できます。 + +### クエリパラメータ {#cli-queries-with-parameters} + +を作成でき、クエリパラメータおよびパスの値から、対応するhttpリクエストパラメータ. 詳細については、 [CLIのパラメータを持つクエリ](cli.md#cli-queries-with-parameters). + +### 例えば {#example} + +``` bash +$ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}" +``` + +## 所定のhttpス {#predefined_http_interface} + +ClickHouse支特定のクエリはHTTPインターフェース。 たとえば、次のように表にデータを書き込むことができます: + +``` bash +$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- +``` + +ClickHouseは定義済みのHTTPインターフェイスもサポートしています。 [プロメテウス輸出](https://github.com/percona-lab/clickhouse_exporter). + +例えば: + +- まず、このセクションをサーバー構成ファイルに追加します: + + + +``` xml + + + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + + + +``` + +- Prometheus形式のデータのurlを直接要求できるようになりました: + + + +``` bash +curl -vvv 'http://localhost:8123/metrics' +* Trying ::1... +* Connected to localhost (::1) port 8123 (#0) +> GET /metrics HTTP/1.1 +> Host: localhost:8123 +> User-Agent: curl/7.47.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Wed, 27 Nov 2019 08:54:25 GMT +< Connection: Keep-Alive +< Content-Type: text/plain; charset=UTF-8 +< X-ClickHouse-Server-Display-Name: i-tl62qd0o +< Transfer-Encoding: chunked +< X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 +< Keep-Alive: timeout=3 +< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} +< +# HELP "Query" "Number of executing queries" +# TYPE "Query" counter +"Query" 1 + +# HELP "Merge" "Number of executing background merges" +# TYPE "Merge" counter +"Merge" 0 + +# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" +# TYPE "PartMutation" counter +"PartMutation" 0 + +# HELP "ReplicatedFetch" "Number of data parts being fetched from replica" +# TYPE "ReplicatedFetch" counter +"ReplicatedFetch" 0 + +# HELP "ReplicatedSend" "Number of data parts being sent to replicas" +# TYPE "ReplicatedSend" counter +"ReplicatedSend" 0 + +* Connection #0 to host localhost left intact +``` + +この例からわかるように、 `` 設定で設定されています。xmlファイル、ClickHouseは、定義済みのタイプで受信したHTTP要求と一致します `` 一致が成功した場合、ClickHouseは対応する事前定義されたクエリを実行します。 + +さて `` 設定できます ``, ``, ``, `` と `` . + +## root\_handler {#root_handler} + +`` ルートパス要求の指定された内容を返します。 特定の戻りコンテンツは、 `http_server_default_response` 設定で。xmlだ 指定しない場合は、戻り値 **わかった** + +`http_server_default_response` 定義されておらず、HttpリクエストがClickHouseに送信されます。 結果は次のとおりです: + +``` xml + + + +``` + + $ curl 'http://localhost:8123' + Ok. + +`http_server_default_response` 定義され、HttpリクエストがClickHouseに送信されます。 結果は次のとおりです: + +``` xml +
    ]]>
    + + + + +``` + + $ curl 'http://localhost:8123' +
    % + +## ping\_handler {#ping_handler} + +`` 現在のClickHouseサーバーの健康を調査するのに使用することができます。 がClickHouse HTTPサーバが正常にアクセスClickHouseを通じて `` 戻ります **わかった**. + +例えば: + +``` xml + + /ping + +``` + +``` bash +$ curl 'http://localhost:8123/ping' +Ok. +``` + +## replicas\_status\_handler {#replicas_status_handler} + +`` レプリカノードの状態を検出して返すために使用されます **わかった** レプリカノードに遅延がない場合。 遅延がある場合、リターン特定の遅延。 の値 `` カスタマイズ対応。 指定しない場合 ``、ClickHouseデフォルト設定 `` は **/replicas\_status**. + +例えば: + +``` xml + + /replicas_status + +``` + +遅延なしケース: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +Ok. +``` + +遅延ケース: + +``` bash +$ curl 'http://localhost:8123/replicas_status' +db.stats: Absolute delay: 22. Relative delay: 22. +``` + +## predefined\_query\_handler {#predefined_query_handler} + +設定できます ``, ``, `` と `` で ``. + +`` は、HTTPリクエストのメソッド部分のマッチングを担当します。 `` 定義にの十分に合致します [方法](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) HTTPプロトコルで。 これはオプションの設定です。 構成ファイルで定義されていない場合は、HTTP要求のメソッド部分と一致しません + +`` HTTPリクエストのurl部分を照合する責任があります。 それはと互換性があります [RE2](https://github.com/google/re2)'の正規表現。 これはオプションの設定です。 構成ファイルで定義されていない場合は、HTTP要求のurl部分と一致しません + +`` HTTPリクエストのヘッダ部分に一致させる責任があります。 これはRE2の正規表現と互換性があります。 これはオプションの設定です。 構成ファイルで定義されていない場合は、HTTP要求のヘッダー部分と一致しません + +`` 値は、以下の定義済みのクエリです `` これは、HTTP要求が一致し、クエリの結果が返されたときにClickHouseによって実行されます。 これは必須の設定です。 + +`` 設定とquery\_params値の設定をサポートしています。 + +次の例では、次の値を定義します `max_threads` と `max_alter_threads` 設定、そしてクエリのテーブルから設定設定します。 + +例えば: + +``` xml + + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' +1 +max_alter_threads 2 +``` + +!!! note "メモ" + 一つに ``、ワン `` のみをサポート `` 挿入タイプの。 + +## dynamic\_query\_handler {#dynamic_query_handler} + +`` より `` 増加した `` . + +ClickHouse抽出し実行値に対応する `` HTTPリクエストのurlの値。 +ClickHouseのデフォルト設定 `` は `/query` . これはオプションの設定です。 設定ファイルに定義がない場合、paramは渡されません。 + +この機能を試すために、この例では、max\_threadsとmax\_alter\_threadsの値を定義し、設定が正常に設定されたかどうかを照会します。 +違いは、 ``、クエリは、設定ファイルに書き込まれます。 しかし、 ``、クエリは、HTTP要求のparamの形で書かれています。 + +例えば: + +``` xml + + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + + +``` + +``` bash +$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' +1 +2 +``` + +[元の記事](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/ja/interfaces/index.md b/docs/ja/interfaces/index.md deleted file mode 120000 index 61537763cac..00000000000 --- a/docs/ja/interfaces/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/index.md \ No newline at end of file diff --git a/docs/ja/interfaces/index.md b/docs/ja/interfaces/index.md new file mode 100644 index 00000000000..544b39b8e57 --- /dev/null +++ b/docs/ja/interfaces/index.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Interfaces +toc_priority: 14 +toc_title: "\u5C0E\u5165" +--- + +# 界面 {#interfaces} + +ツつィツ姪"ツつ"ツ債ツづュツつケツづ債つアツつソツづァツつゥツづァ): + +- [HTTP](http.md)、文書化され、直接使いやすいです。 +- [ネイティブtcp](tcp.md) オーバーヘッドが少ない。 + +るケースがほとんどでの使用をお勧めの適切なツールや図書館での交流と直結。 公式にサポートしよyandexは次のとお: + +- [コマンドライン](cli.md) +- [JDBCドライバ](jdbc.md) +- [ODBCドライバ](odbc.md) +- [C++クライアント](cpp.md) + +また、clickhouseで作業するための幅広いサードパーティ製ライブラリもあります: + +- [クライアント](third-party/client_libraries.md) +- [統合](third-party/integrations.md) +- [ビジュアル](third-party/gui.md) + +[元の記事](https://clickhouse.tech/docs/en/interfaces/) diff --git a/docs/ja/interfaces/jdbc.md b/docs/ja/interfaces/jdbc.md deleted file mode 120000 index 27dfe0cfa5a..00000000000 --- a/docs/ja/interfaces/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/jdbc.md \ No newline at end of file diff --git a/docs/ja/interfaces/jdbc.md b/docs/ja/interfaces/jdbc.md new file mode 100644 index 00000000000..c86882e7023 --- /dev/null +++ b/docs/ja/interfaces/jdbc.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 22 +toc_title: "JDBC\u30C9\u30E9\u30A4\u30D0" +--- + +# JDBCドライバ {#jdbc-driver} + +- **[公式ドライバー](https://github.com/ClickHouse/clickhouse-jdbc)** +- サードパーティドライバ: + - [ツつィツ姪"ツつ"ツ債ツつケ](https://github.com/housepower/ClickHouse-Native-JDBC) + - [clickhouse4j](https://github.com/blynkkk/clickhouse4j) + +[元の記事](https://clickhouse.tech/docs/en/interfaces/jdbc/) diff --git a/docs/ja/interfaces/mysql.md b/docs/ja/interfaces/mysql.md deleted file mode 120000 index df728b35f80..00000000000 --- a/docs/ja/interfaces/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/mysql.md \ No newline at end of file diff --git a/docs/ja/interfaces/mysql.md b/docs/ja/interfaces/mysql.md new file mode 100644 index 00000000000..9582fb0880b --- /dev/null +++ b/docs/ja/interfaces/mysql.md @@ -0,0 +1,49 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 20 +toc_title: "MySQL\u30A4\u30F3" +--- + +# MySQLイン {#mysql-interface} + +ツつィツ姪"ツつ"ツ債ツづュツつケ これは次の方法で有効にできます [mysql\_portgenericname](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) 設定ファイルでの設定: + +``` xml +9004 +``` + +コマンドラインツールを使用した接続例 `mysql`: + +``` bash +$ mysql --protocol tcp -u default -P 9004 +``` + +接続が成功した場合の出力: + +``` text +Welcome to the MySQL monitor. Commands end with ; or \g. +Your MySQL connection id is 4 +Server version: 20.2.1.1-ClickHouse + +Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> +``` + +すべてのmysqlクライアントとの互換性のために、ユーザーパスワードを [ダブルSHA1](../operations/settings/settings_users.md#password_double_sha1_hex) 構成ファイルで。 +ユー [SHA256](../operations/settings/settings_users.md#password_sha256_hex) いくつかのクライアントは認証できません(mysqljsと古いバージョンのコマンドラインツールmysql)。 + +制限: + +- 作成問合せには対応していない + +- 一部のデータ型は文字列として送信されます + +[元の記事](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/ja/interfaces/odbc.md b/docs/ja/interfaces/odbc.md deleted file mode 120000 index 5ff7610e061..00000000000 --- a/docs/ja/interfaces/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/odbc.md \ No newline at end of file diff --git a/docs/ja/interfaces/odbc.md b/docs/ja/interfaces/odbc.md new file mode 100644 index 00000000000..d316f760536 --- /dev/null +++ b/docs/ja/interfaces/odbc.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 23 +toc_title: "ODBC\u30C9\u30E9\u30A4\u30D0" +--- + +# ODBCドライバ {#odbc-driver} + +- [公式ドライバー](https://github.com/ClickHouse/clickhouse-odbc). + +[元の記事](https://clickhouse.tech/docs/en/interfaces/odbc/) diff --git a/docs/ja/interfaces/tcp.md b/docs/ja/interfaces/tcp.md deleted file mode 120000 index a0529a856e4..00000000000 --- a/docs/ja/interfaces/tcp.md +++ /dev/null @@ -1 +0,0 @@ -../../en/interfaces/tcp.md \ No newline at end of file diff --git a/docs/ja/interfaces/tcp.md b/docs/ja/interfaces/tcp.md new file mode 100644 index 00000000000..d006d0da27a --- /dev/null +++ b/docs/ja/interfaces/tcp.md @@ -0,0 +1,13 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 18 +toc_title: "\u30CD\u30A4\u30C6\u30A3\u30D6\u30A4\u30F3\u30BF\u30D5\u30A7\u30FC\u30B9\ + (TCP)" +--- + +# ネイティブインタフェース(tcp) {#native-interface-tcp} + +ネイティブプロトコルは [コマンドライン](cli.md)、分散クエリ処理中のサーバー間通信のために、また、他のC++プログラムで。 残念ながら、ネイティブのClickHouseプロトコルには正式な仕様はまだありませんが、ClickHouseのソースコードから逆操作することができます(開始 [この辺りは](https://github.com/ClickHouse/ClickHouse/tree/master/dbms/Client) TCPトラフィックを傍受および分析することによって。 + +[元の記事](https://clickhouse.tech/docs/en/interfaces/tcp/) diff --git a/docs/ja/interfaces/third-party/client_libraries.md b/docs/ja/interfaces/third-party/client_libraries.md deleted file mode 120000 index 5320bbe1e16..00000000000 --- a/docs/ja/interfaces/third-party/client_libraries.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/interfaces/third-party/client_libraries.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/client_libraries.md b/docs/ja/interfaces/third-party/client_libraries.md new file mode 100644 index 00000000000..bcd0ff895d5 --- /dev/null +++ b/docs/ja/interfaces/third-party/client_libraries.md @@ -0,0 +1,58 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 26 +toc_title: "\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8" +--- + +# お客様の図書館からのサードパーティー {#client-libraries-from-third-party-developers} + +!!! warning "免責事項" + Yandexは **ない** を維持する図書館が以下の各号に記載されていない為にその質を評価する。 + +- Python + - [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm) + - [クリックハウスドライバ](https://github.com/mymarilyn/clickhouse-driver) + - [クリックハウス-顧客](https://github.com/yurial/clickhouse-client) + - [aiochclient](https://github.com/maximdanilchenko/aiochclient) +- PHP + - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) + - [8bitov/clickhouse-php-クライアント](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [bozerkins/clickhouse-クライアント](https://packagist.org/packages/bozerkins/clickhouse-client) + - [simpod/clickhouse-クライアント](https://packagist.org/packages/simpod/clickhouse-client) + - [seva-コード/php-クリック-ハウス-クライアント](https://packagist.org/packages/seva-code/php-click-house-client) + - [SeasClick C++クライアント](https://github.com/SeasX/SeasClick) +- 行け + - [クリックハウス](https://github.com/kshvakov/clickhouse/) + - [ゴークリックハウス](https://github.com/roistat/go-clickhouse) + - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) + - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) +- NodeJs + - [clickhouse(NodeJs)](https://github.com/TimonKK/clickhouse) + - [ノードクリックハウス](https://github.com/apla/node-clickhouse) +- Perl + - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) + - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) + - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) +- Ruby + - [クリックハウス(ruby)](https://github.com/shlima/click_house) +- R + - [クリックハウス-r](https://github.com/hannesmuehleisen/clickhouse-r) + - [Rクリックハウス](https://github.com/IMSMWU/RClickhouse) +- Java + - [clickhouse-クライアント-java](https://github.com/VirtusAI/clickhouse-client-java) + - [クリックハウス-顧客](https://github.com/Ecwid/clickhouse-client) +- Scala + - [clickhouse-scala-クライアント](https://github.com/crobox/clickhouse-scala-client) +- Kotlin + - [AORM](https://github.com/TanVD/AORM) +- C\# + - [クリックハウス。ado](https://github.com/killwort/ClickHouse-Net) + - [クリックハウス。お客様](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) +- エリクサー + - [clickhousex](https://github.com/appodeal/clickhousex/) +- Nim + - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) + +[元の記事](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/ja/interfaces/third-party/gui.md b/docs/ja/interfaces/third-party/gui.md deleted file mode 120000 index ef7bc904197..00000000000 --- a/docs/ja/interfaces/third-party/gui.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/interfaces/third-party/gui.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/gui.md b/docs/ja/interfaces/third-party/gui.md new file mode 100644 index 00000000000..46deefb7919 --- /dev/null +++ b/docs/ja/interfaces/third-party/gui.md @@ -0,0 +1,152 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 28 +toc_title: "\u30D3\u30B8\u30E5\u30A2\u30EB" +--- + +# サードパー {#visual-interfaces-from-third-party-developers} + +## オープンソース {#open-source} + +### Tabix {#tabix} + +のclickhouseのための網インターフェイス [Tabix](https://github.com/tabixio/tabix) プロジェクト。 + +特徴: + +- 追加のソフトウェアをインストールする必要なく、ブラウザから直接clickhouseで動作します。 +- 構文強調表示のクエリエディター。 +- コマンドの自動補完。 +- クエリ実行のグラフ分析のためのツール。 +- 配色オプション。 + +[Tabixドキュメント](https://tabix.io/doc/). + +### HouseOps {#houseops} + +[HouseOps](https://github.com/HouseOps/HouseOps) OSX、Linux、Windows用のUI/IDEです。 + +特徴: + +- 構文の強調表示を使用したクエリビルダー。 テーブルまたはjsonビューで応答を表示します。 +- CSVまたはJSONとしてエクスポートクエリ結果。 +- 説明付きのプロセスのリスト。 書き込みモード。 停止する能力 (`KILL`)プロセス。 +- データベースグラフ すべてのテーブルとその列に追加情報を表示します。 +- 列サイズのクイックビュー。 +- サーバー構成。 + +次の機能は、開発のために計画されています: + +- データベース管理。 +- ユーザー管理。 +- リアルタイムデータ分析。 +- クラスタ監視。 +- クラスター管理。 +- 監視レプリケートおよびkafkaテーブル。 + +### 灯台 {#lighthouse} + +[灯台](https://github.com/VKCOM/lighthouse) ClickHouseのための軽量のwebインターフェイスです。 + +特徴: + +- テーブルのリストのフィルタリング、メタデータを指すものとします。 +- テーブルのプレビューとフィルタです。 +- 読み取り専用クエリの実行。 + +### Redash {#redash} + +[Redash](https://github.com/getredash/redash) めるためのプラットフォームのデータを可視化する。 + +サポート、多数のデータソースを含むclickhouse,redash参加できる結果のクエリからデータソースへの最終データセットである。 + +特徴: + +- クエリの強力なエディタ。 +- データベ +- 視覚化ツールを使用すると、さまざまな形式のデータを表現できます。 + +### デービーバーname {#dbeaver} + +[デービーバーname](https://dbeaver.io/) -ユニバーサルデスクトップのデータベースのクライアントClickHouseます。 + +特徴: + +- 構文ハイライトと自動補完によるクエリ開発。 +- テーブルリフィルとメタデータを検索する +- 表データプレビュー。 +- フルテキスト検索。 + +### クリックハウス-cli {#clickhouse-cli} + +[クリックハウス-cli](https://github.com/hatarist/clickhouse-cli) Python3で書かれたClickHouseの代替コマンドラインクライアントです。 + +特徴: + +- 自動補完。 +- クエリとデータ出力の構文強調表示。 +- データ出力のためのポケベルサポート。 +- カスタムpostgresqlのようなコマンド。 + +### clickhouse-flamegraph {#clickhouse-flamegraph} + +[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 視覚化する専門にされた用具はある `system.trace_log` として [flamegraph](http://www.brendangregg.com/flamegraphs.html). + +## 商業 {#commercial} + +### データグリップ {#datagrip} + +[データグリップ](https://www.jetbrains.com/datagrip/) JetbrainsのデータベースIDEで、ClickHouse専用サポートがあります。 PyCharm、IntelliJ IDEA、GoLand、PhpStormなどの他のIntelliJベースのツールにも組み込まれています。 + +特徴: + +- 非常に高速なコード補完。 +- ClickHouse構文の強調表示。 +- ClickHouse固有の機能のサポート。 +- データエディタ。 +- リファクタリング。 +- 検索とナビゲーション。 + +### YandexのDataLens {#yandex-datalens} + +[YandexのDataLens](https://cloud.yandex.ru/services/datalens) データの可視化と分析のサービスです。 + +特徴: + +- シンプルな棒グラフから複雑なダッシュボードまで、幅広い視覚化が可能です。 +- ダッシュボードは一般公開されます。 +- ClickHouseを含む複数のデータソースのサポート。 +- ClickHouseに基づく具体化されたデータのための貯蔵。 + +データレンスは [無料で利用可能](https://cloud.yandex.com/docs/datalens/pricing) 商業使用のための低負荷プロジェクトのため。 + +- [DataLens書](https://cloud.yandex.com/docs/datalens/). +- [Tutorial](https://cloud.yandex.com/docs/solutions/datalens/data-from-ch-visualization) ClickHouseデータベースからデータを視覚化する。 + +### Holisticsソフトウェア {#holistics-software} + +[ホリスティクス](https://www.holistics.io/) フルスタックのデータプラットフォームは、ビジネスインツールです。 + +特徴: + +- 自動メール、slackやグーグルシートのスケジュール。 +- SQLエディタと可視化、バージョン管理の自動完了し、再利用可能なクエリー部品、ダイナミックフィルター. +- Iframe経由のレポートとダッシュボードの埋め込み分析。 +- データ準備およびetl機能。 +- SQLデータモデリング支援のためのリレーショナルマッピングのデータです。 + +### 見物人 {#looker} + +[見物人](https://looker.com) ClickHouseを含む50以上のデータベース方言をサポートするdata platform and business intelligenceツールです。 LookerはSaaSプラットフォームとして利用でき、セルフホスト型です。 ユーザーが利用できLookerる場合は、vpnクライアントの直接探索、データの構築の可視化とダッシュボード、スケジュール、識農場管理について学んでいます。 Lookerのツールを埋め込むためのチャプターでは、これらの機能の他のアプリケーション、およびAPI +統合データを、他のアプリケーション + +特徴: + +- 簡単-アジャイル開発をlookml、言語に対応したキュレーション + [データモデル](https://looker.com/platform/data-modeling) レポート作成者とエンドユーザーをサポートする。 +- Lookerのを経由して強力なワークフローの統合 [データ操作](https://looker.com/platform/actions). + +[LookerでClickHouseを設定する方法。](https://docs.looker.com/setup-and-management/database-config/clickhouse) + +[元の記事](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) diff --git a/docs/ja/interfaces/third-party/index.md b/docs/ja/interfaces/third-party/index.md deleted file mode 120000 index 7bce45929ce..00000000000 --- a/docs/ja/interfaces/third-party/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/interfaces/third-party/index.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/index.md b/docs/ja/interfaces/third-party/index.md new file mode 100644 index 00000000000..5b94c79ad35 --- /dev/null +++ b/docs/ja/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Third-Party +toc_priority: 24 +--- + + diff --git a/docs/ja/interfaces/third-party/integrations.md b/docs/ja/interfaces/third-party/integrations.md deleted file mode 120000 index 9cd0a21e676..00000000000 --- a/docs/ja/interfaces/third-party/integrations.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/interfaces/third-party/integrations.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/integrations.md b/docs/ja/interfaces/third-party/integrations.md new file mode 100644 index 00000000000..7f5bfb93325 --- /dev/null +++ b/docs/ja/interfaces/third-party/integrations.md @@ -0,0 +1,96 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 27 +toc_title: "\u7D71\u5408" +--- + +# サードパーティ開発者からの統合ライブラリ {#integration-libraries-from-third-party-developers} + +!!! warning "免責事項" + Yandexは **ない** 以下に示すツールとライブラリを維持し、その品質を保証するための広範なテストを行っていません。 + +## インフラ製品 {#infrastructure-products} + +- リレーショナルデータベース管理システム + - [MySQL](https://www.mysql.com) + - [Proxysqlcomment](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) + - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) + - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) + - [PostgreSQL](https://www.postgresql.org) + - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) + - [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (用途 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [pg2ch](https://github.com/mkabilov/pg2ch) + - [clickhouse\_fdw](https://github.com/adjust/clickhouse_fdw) + - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) + - [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator) +- メッセージキュ + - [カフカname](https://kafka.apache.org) + - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (用途 [クライアントへ](https://github.com/kshvakov/clickhouse/)) +- オブジェクト保存 + - [S3](https://en.wikipedia.org/wiki/Amazon_S3) + - [clickhouse-バックアップ](https://github.com/AlexAkulov/clickhouse-backup) +- 容器の協奏 + - [Kubernetes](https://kubernetes.io) + - [クリックハウス-演算子](https://github.com/Altinity/clickhouse-operator) +- 構成管理 + - [人形](https://puppet.com) + - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) + - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) +- 監視 + - [黒鉛](https://graphiteapp.org) + - [グラファウス](https://github.com/yandex/graphouse) + - [カーボンクリックハウス](https://github.com/lomik/carbon-clickhouse) + + - [黒鉛-clickhouse](https://github.com/lomik/graphite-clickhouse) + - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -staledパーティションを最適化します。 [\*グラフィットマージツリー](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) からのルールの場合 [ロールアップ構成](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) 適用できます + - [グラファナ](https://grafana.com/) + - [クリックハウス-グラファナ](https://github.com/Vertamedia/clickhouse-grafana) + - [プロメテウス](https://prometheus.io/) + - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) + - [プロムハウス](https://github.com/Percona-Lab/PromHouse) + - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [クライアントへ](https://github.com/kshvakov/clickhouse/)) + - [Nagios](https://www.nagios.org/) + - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) + - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) + - [Zabbix](https://www.zabbix.com) + - [clickhouse-zabbix-テンプレート](https://github.com/Altinity/clickhouse-zabbix-template) + - [Sematextgenericname](https://sematext.com/) + - [クリックハウス統合](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) +- ログ記録 + - [rsyslog](https://www.rsyslog.com/) + - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [fluentd](https://www.fluentd.org) + - [ログハウス](https://github.com/flant/loghouse) (のために [Kubernetes](https://kubernetes.io)) + - [logagent](https://www.sematext.com/logagent) + - [logagent出力-プラグイン-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) +- ジオ + - [MaxMind](https://dev.maxmind.com/geoip/) + - [クリックハウス-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + +## プログラミング言語の生態系 {#programming-language-ecosystems} + +- Python + - [SQLAlchemy](https://www.sqlalchemy.org) + - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (用途 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [パンダ](https://pandas.pydata.org) + - [パンダハウス](https://github.com/kszucs/pandahouse) +- R + - [dplyr](https://db.rstudio.com/dplyr/) + - [Rクリックハウス](https://github.com/IMSMWU/RClickhouse) (用途 [クリックハウス-cpp](https://github.com/artpaul/clickhouse-cpp)) +- Java + - [Hadoop](http://hadoop.apache.org) + - [クリックハウス-hdfs-ローダー](https://github.com/jaykelin/clickhouse-hdfs-loader) (用途 [JDBC](../../sql_reference/table_functions/jdbc.md)) +- Scala + - [Akka](https://akka.io) + - [clickhouse-scala-クライアント](https://github.com/crobox/clickhouse-scala-client) +- C\# + - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) + - [クリックハウス。ado](https://github.com/killwort/ClickHouse-Net) + - [クリックハウス。お客様](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) + - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) +- エリクサー + - [Ecto](https://github.com/elixir-ecto/ecto) + - [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto) + +[元の記事](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/ja/interfaces/third-party/proxy.md b/docs/ja/interfaces/third-party/proxy.md deleted file mode 120000 index 877f1b51dab..00000000000 --- a/docs/ja/interfaces/third-party/proxy.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/interfaces/third-party/proxy.md \ No newline at end of file diff --git a/docs/ja/interfaces/third-party/proxy.md b/docs/ja/interfaces/third-party/proxy.md new file mode 100644 index 00000000000..7305a8e7f22 --- /dev/null +++ b/docs/ja/interfaces/third-party/proxy.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 29 +toc_title: "\u30D7\u30ED\u30AD\u30B7" +--- + +# サードパーティ開発者のプロキシサーバー {#proxy-servers-from-third-party-developers} + +## chproxy {#chproxy} + +[chproxy](https://github.com/Vertamedia/chproxy)、ClickHouseデータベース用のHTTPプロキシとロードバランサです。 + +特徴: + +- ユーザー単位のルーティ +- 柔軟な制限。 +- 自動ssl証明書の更新。 + +Goで実装されています。 + +## KittenHouse {#kittenhouse} + +[KittenHouse](https://github.com/VKCOM/kittenhouse) することが重要である現地代理人とClickHouse、アプリケーションサーバの場合でもバッファに挿入データとお客様側です。 + +特徴: + +- メモリ内およびディスク上のデータバッファリング。 +- テーブル単位のルーティング。 +- 負荷分散とヘルスチェック。 + +Goで実装されています。 + +## クリックハウス-バルク {#clickhouse-bulk} + +[クリックハウス-バルク](https://github.com/nikepan/clickhouse-bulk) 簡単なClickHouseの挿入物のコレクターはある。 + +特徴: + +- グループの要求送信によるしきい値または間隔で出ています。 +- 複数のリモートサーバー。 +- 基本認証。 + +Goで実装されています。 + +[元の記事](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md deleted file mode 120000 index 659153d5f6c..00000000000 --- a/docs/ja/introduction/adopters.md +++ /dev/null @@ -1 +0,0 @@ -../../en/introduction/adopters.md \ No newline at end of file diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md new file mode 100644 index 00000000000..ffc938c9eb9 --- /dev/null +++ b/docs/ja/introduction/adopters.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 8 +toc_title: "\u30A2\u30FC\u30EA\u30FC\u30A2\u30C0\u30D7\u30BF\u30FC" +--- + +# ClickHouseアーリーアダプター {#clickhouse-adopters} + +!!! warning "免責事項" + 以下のリストを使用している企業のclickhouseとその成功を組み立てから共に、これとは異なる場合がござ流を実現しました。 あなたの会社でclickhouseを採用するという話を共有していただければ幸いです [リストに追加します](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md) しかし、そうすることによってNDAの問題がないことを確認してください。 他の会社からの出版物との更新を提供することはまた有用である。 + +| 会社 | 産業 | Usecase | 集りのサイズ | (Un)圧縮データサイズ\* | 参照 | +|-----------------------------------------------------------------------|------------------------------|----------------------|-----------------------------------------------|--------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2gis](https://2gis.ru) | マップ | 監視 | — | — | [トークでロシア、月2019](https://youtu.be/58sPkXfq6nw) | +| [Aloha Browser](https://alohabrowser.com/) | モバイルアプリ | ブラウザバックエンド | — | — | [ロシア語のスライド、2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [アマデウス](https://amadeus.com/) | 旅行 | Analytics | — | — | [プレスリリース、april2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Appsflyer](https://www.appsflyer.com) | モバイル分析 | 主な製品 | — | — | [トークでロシア、月2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | データ基盤 | 主な製品 | — | — | [スライドでロシア、december2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [Badoo](https://badoo.com) | デート | Timeseries | — | — | [スライドでロシア、december2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [ベノク](https://www.benocs.com/) | ネットワークテレメトリと分析 | 主な製品 | — | — | [英語でのスライド,十月2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [ブルームバーグ](https://www.bloomberg.com/) | 金融、メディア | 監視 | 102サーバ | — | [スライド,もっと2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Bloxy](https://bloxy.info) | ブロック鎖 | Analytics | — | — | [ロシア語のスライド、august2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | テレコム | Analytics | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | ビジネス情報 | ジオ分析 | — | — | [クリックハウスによる地理空間処理](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | 研究 | 実験 | — | — | [プレスリリース、april2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Cisco](http://cisco.com/) | ネットワーク | トラフィック分析 | — | — | [雷トーク、月2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [シタデル証券](https://www.citadelsecurities.com/) | 金融 | — | — | — | [貢献、月2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [シティモービルcity in germany](https://city-mobil.ru) | タクシー | Analytics | — | — | [ロシアのブログ記事、march2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [コンテンツスクエア](https://contentsquare.com) | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ記事,十一月2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | トラフィック分析 | 36サーバ | — | [ブログ記事,もっと2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ記事,行進2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [コルネ](https://coru.net/) | Analytics | 主な製品 | — | — | [英語のスライド、april2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | ファイナンスai | 分析 | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [認定マーク/storetail](https://www.criteo.com/) | 小売り | 主な製品 | — | — | [英語でのスライド,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [ドイツ銀行](https://db.com) | 金融 | BI分析 | — | — | [スライドで英語、月2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [ディーバ-e](https://www.diva-e.com) | デジタルコンサル | 主な製品 | — | — | [英語でのスライド、2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Exness](https://www.exness.com) | 取引 | 指標、ロギング | — | — | [ロシア語で話す、2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [ジュニエ](https://geniee.co.jp) | 広告ネットワーク | 主な製品 | — | — | [日本語でのブログ記事,七月2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | ビデオ流出 | Analytics | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | 不動産 | Analytics | — | — | [英語でのブログ記事,四月2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [インフォビスタ](https://www.infovista.com/) | ネットワーク | Analytics | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [InnoGames](https://www.innogames.com) | ゲーム | 指標、ロギング | — | — | [スライドでロシア、2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [インテグロスcolor](https://integros.com) | Platformビデオサービス | Analytics | — | — | [ロシア語のスライド、2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Kodiakデータ](https://www.kodiakdata.com/) | 雲 | 主な製品 | — | — | [Engishでスライド,四月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Konturgenericname](https://kontur.ru) | ソフトウェア開発 | 指標 | — | — | [トークでロシア、月2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | 広告ネットワーク | 主な製品 | 75サーバー(レプリカ3) | 5.27PiB | [ブログ記事でロシア、月2017](https://habr.com/en/post/322620/) | +| [Mail.ru クラウドソリューション](https://mcs.mail.ru/) | クラウドサービス | 主な製品 | — | — | [ロシア語でのclickhouseインスタンスの実行](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | 電気通信 | 統計 | — | — | [スライドで英語、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | 広告ネットワーク | ウェブ分析 | — | — | [ロシア語での分析dbms clickhouseの実装経験](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | モニタリングとデータ解析 | 主な製品 | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [Pragma革新](http://www.pragma-innovation.fr/) | テレメトリとビッグデータ分析 | 主な製品 | — | — | [英語でのスライド,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | クラウドサービス | 主な製品 | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qratorgenericname](https://qrator.net) | DDoS保護 | 主な製品 | — | — | [ブログ記事,行進2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [北京パーセント情報技術有限公司、株式会社](https://www.percent.cn/) | Analytics | 主な製品 | — | — | [スライドで中国、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [ランブラー](https://rambler.ru) | インターネットサービス | Analytics | — | — | [ロシア語で話す、april2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [Tencent](https://www.tencent.com) | メール | ログ記録 | — | — | [トークで中国、月2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [交通星](https://trafficstars.com/) | 広告ネットワーク | — | — | — | [ロシア語のスライド,may2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7エアラインズ](https://www.s7.ru) | 航空会社 | 指標、ロギング | — | — | [ロシア語で話す、2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [セムルシ](https://www.semrush.com/) | 販売 | 主な製品 | — | — | [ロシア語のスライド、august2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [サイリウム社](https://www.scireum.de/) | eコマース | 主な製品 | — | — | [ドイツ語で話す、february2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [セントリー](https://sentry.io/) | ソフトウェア開発者 | 製品のバックエンド | — | — | [英語でのブログ記事,もっと2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | 政府の社会保障 | Analytics | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | Analytics | 主な製品 | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [シーナ](http://english.sina.com/index.html) | ニュース | — | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | ニュース | Analytics | — | — | [ブログ記事でロシア、月2017](https://habr.com/ru/company/smi2/blog/314558/) | +| [Splunk](https://www.splunk.com/) | ビジネス分析 | 主な製品 | — | — | [スライドで英語、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Spotify](https://www.spotify.com) | 音楽 | 実験 | — | — | [スライド,七月2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [Tencent](https://www.tencent.com) | ビッグデータ | データ処理 | — | — | [スライドで中国、月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [Uber](https://www.uber.com) | タクシー | ログ記録 | — | — | [スライド,二月2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [VKontakte](https://vk.com) | 社会的ネットワーク | 統計、ロギング | — | — | [ロシア語のスライド、august2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Wisebits](https://wisebits.com/) | ITソリューション | Analytics | — | — | [ロシア語のスライド、2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Xiaoxinの技術。](https://www.xiaoheiban.cn/) | 教育 | 共通の目的 | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [Ximalaya](https://www.ximalaya.com/) | オーディオ共有 | OLAP | — | — | [スライドで英語、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Yandexクラウド](https://cloud.yandex.ru/services/managed-clickhouse) | パブリック | 主な製品 | — | — | [トークでロシア、december2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [YandexのDataLens](https://cloud.yandex.ru/services/datalens) | ビジネス情報 | 主な製品 | — | — | [スライドでロシア、december2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Yandexの市場](https://market.yandex.ru/) | eコマース | 指標、ロギング | — | — | [ロシア語で話す、january2019](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Yandex Metrica](https://metrica.yandex.com) | ウェブ分析 | 主な製品 | 360サーバを一つのクラスター1862サーバーの一部 | 66.41PiB/5.68PiB | [スライド,二月2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | ソフトウェア開発 | 指標、ロギング | — | — | [ブログ記事,月2019,ロシア語で](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | 銀行 | Webシステムの監視 | — | — | [スライドでロシア、2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | BI分析 | 主な製品 | — | — | [スライドで中国、月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | + +[元の記事](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/ja/introduction/index.md b/docs/ja/introduction/index.md deleted file mode 120000 index e38b0acddbb..00000000000 --- a/docs/ja/introduction/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/introduction/index.md \ No newline at end of file diff --git a/docs/ja/introduction/index.md b/docs/ja/introduction/index.md new file mode 100644 index 00000000000..317489d277b --- /dev/null +++ b/docs/ja/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Introduction +toc_priority: 1 +--- + + diff --git a/docs/ja/operations/access_rights.md b/docs/ja/operations/access_rights.md deleted file mode 120000 index 73463029569..00000000000 --- a/docs/ja/operations/access_rights.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/access_rights.md \ No newline at end of file diff --git a/docs/ja/operations/access_rights.md b/docs/ja/operations/access_rights.md new file mode 100644 index 00000000000..03948996744 --- /dev/null +++ b/docs/ja/operations/access_rights.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: "\u30A2\u30AF\u30BB\u30B9\u6A29" +--- + +# アクセス権 {#access-rights} + +ユーザーとアクセス権は、ユーザー設定で設定されます。 これは通常 `users.xml`. + +ユーザーは `users` セクション。 ここでの断片です `users.xml` ファイル: + +``` xml + + + + + + + + + + + + default + + + default + + + + + + + web + default + + test + + + test + + + +``` + +する宣言からユーザー: `default`と`web`. 私達は加えました `web` ユーザー別途。 + +その `default` ユーザは、ユーザ名が渡されない場合に選択されます。 その `default` userは、サーバーまたはクラスターの構成が指定されていない場合は、分散クエリ処理にも使用されます。 `user` と `password` (上のセクションを参照 [分散](../engines/table_engines/special/distributed.md) エンジン)。 + +The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. + +パスワードは、クリアテキスト(非推奨)またはsha-256で指定します。 ハッシュは塩漬けじゃない この点に関し、すべきではないと考えるこれらのパスワードとして提供すことに対する潜在的悪意のある攻撃であった。 むしろ、従業員からの保護のために必要です。 + +アクセスを許可するネットワークのリストが指定されています。 この例では、両方のユーザーのネットワークの一覧が別のファイルから読み込まれます (`/etc/metrika.xml`)を含む `networks` 置換。 ここにそれの断片があります: + +``` xml + + ... + + ::/64 + 203.0.113.0/24 + 2001:DB8::/32 + ... + + +``` + +このネットワークのリストを直接 `users.xml` またはファイル内の `users.d` ディレクトリ(詳細については、セクションを参照 “[設定ファイル](configuration_files.md#configuration_files)”). + +コンフィグを含むコメントする方法を説明するオープンアクセスいたしました。 + +生産の使用のために、指定して下さいただ `ip` 要素(IPアドレスとそのマスク)、 `host` と `hoost_regexp` が原因別の待ち時間をゼロにすることに + +次のユーザー設定プロファイルが指定の項を参照 “[設定プロファイル](settings/settings_profiles.md)”. 既定のプロファイルを指定できます, `default'`. プロファイルの名前は任意です。 異なるユーザーに同じプロファイルを指定できます。 最も重要なことが書ける設定プロフィール `readonly=1` 読み取り専用アクセスを保証します。 次に、使用するクォータを指定します(セクションを参照 “[クォータ](quotas.md#quotas)”). 既定のクォータを指定できます: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. + +オプションで `` 部を指定することもできますリストのデータベースのユーザーがアクセスできる デフォルトでは、すべてのデータベースのユーザーです。 を指定することができ `default` データベース この場合、ユーザーはデフォルトでデータベースにアクセスできます。 + +オプションで `` また、ユーザーがアクセスできる辞書のリストを指定することもできます。 デフォルトでは、すべての辞書はユーザーが使用できます。 + +へのアクセス `system` データベースは常に可(このデータベースを使用して処理クエリ). + +ユーザーの一覧を取得してデータベースやテーブルを用いてこれら `SHOW` クエリやシステムテーブルの場合でも、アクセス、個人データベースなのです。 + +データベースアクセスは、 [読み取り専用](settings/permissions_for_queries.md#settings_readonly) 設定。 できな助成金の全アクセスをデータベース `readonly` 別のものへのアクセス。 + +[元の記事](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/ja/operations/backup.md b/docs/ja/operations/backup.md deleted file mode 120000 index 1003fb30e61..00000000000 --- a/docs/ja/operations/backup.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/backup.md \ No newline at end of file diff --git a/docs/ja/operations/backup.md b/docs/ja/operations/backup.md new file mode 100644 index 00000000000..c8166bbf3e6 --- /dev/null +++ b/docs/ja/operations/backup.md @@ -0,0 +1,41 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: "\u30C7\u30FC\u30BF\u30D0\u30C3\u30AF" +--- + +# データバック {#data-backup} + +しばらく [複製](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [50Gbを超えるデータを含むMergeTreeのようなエンジンでテーブルを削除することはできません](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). しかし、これらの保障措置がカバーしないすべてのケースで回避. + +人為的なミスを効果的に軽減するには、データのバックアップと復元に関する戦略を慎重に準備する必要があります **事前に**. + +クリックハウスのバックアップとリストアのためのユニバーサルソリューションはありません。 gigabyteのデータで動作するものは、数十ペタバイトでは動作しません。 自分の長所と短所を持つ様々な可能なアプローチがありますが、これについては以下で説明します。 それは彼らの様々な欠点を補うために一つの代わりに、いくつかのアプローチを使用することをお勧めします。 + +!!! note "メモ" + あなたが何かをバックアップし、それを復元しようとしたことがない場合、チャンスはあなたが実際にそれを必要とするときに正常に動作しない だから、どのようなバックアップアプローチを選択し、同様に復元プロセスを自動化することを確認し、定期的に予備のclickhouseクラスタでそれを練習。 + +## ソースデータの他の場所への複製 {#duplicating-source-data-somewhere-else} + +多くの場合、clickhouseに取り込まれたデータは、次のような何らかの永続キューを介して配信されます [アパッチ-カフカ](https://kafka.apache.org). この場合、ClickHouseに書き込まれている間に同じデータストリームを読み込み、それをどこかのコールドストレージに保存する追加のサブスクライバセットを構 これは、オブジェクトストアまたは次のような分散ファイルシステムです [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). + +## ファイル {#filesystem-snapshots} + +一部地域のファイルシステムをスナップショット機能(例えば, [ZFS](https://en.wikipedia.org/wiki/ZFS)ものではないのでいただける住ます。 可能な解決策は、この種のファイルシステムを使用して追加のレプリカを作成し、それらを [分散](../engines/table_engines/special/distributed.md) 使用されるテーブル `SELECT` クエリ。 スナップショットなどを複製するものでなければならないのクエリがデータを変更する. ボーナスパーツとして、これらのレプリカが特別なハードウェア構成によりディスクに付属のサーバ、コスト効果的です。 + +## クリックハウスコピー機 {#clickhouse-copier} + +[クリックハウスコピー機](utilities/clickhouse-copier.md) 最初にペタバイトサイズのテーブルを再シャードするために作成された汎用性の高いツールです。 でも使用できるバックアップと復元を目的なので確実にコピーしたデータのClickHouseテーブルとクラスター + +データの小さい容積のため、簡単の `INSERT INTO ... SELECT ...` リモートテーブルが作業しています。 + +## 部品による操作 {#manipulations-with-parts} + +ClickHouseは使用を可能にします `ALTER TABLE ... FREEZE PARTITION ...` クエリをコピーのテーブル割. これは、ハードリンクを使用して実装されます `/var/lib/clickhouse/shadow/` フォルダで、通常は消費するエディスクスペースのための古いデータです。 作成されたファイルのコピーはClickHouseサーバーによって処理されないので、そこに残すことができます:追加の外部システムを必要としない単純なバックア このため、リモートで別の場所にコピーしてからローカルコピーを削除する方がよいでしょう。 分散ファイルシステムとオブジェクトストアはまだこのための良いオプションですが、十分な大きさの容量を持つ通常の添付ファイルサーバは、同 [rsync](https://en.wikipedia.org/wiki/Rsync)). + +パーティション操作に関連するクエリの詳細については、 [変更文書](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). + +第三者ツールを自動化するこのアプローチ: [clickhouse-バックアップ](https://github.com/AlexAkulov/clickhouse-backup). + +[元の記事](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/ja/operations/configuration_files.md b/docs/ja/operations/configuration_files.md deleted file mode 120000 index a2d73dbaa25..00000000000 --- a/docs/ja/operations/configuration_files.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/configuration_files.md \ No newline at end of file diff --git a/docs/ja/operations/configuration_files.md b/docs/ja/operations/configuration_files.md new file mode 100644 index 00000000000..6f84bd36d8f --- /dev/null +++ b/docs/ja/operations/configuration_files.md @@ -0,0 +1,57 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u8A2D\u5B9A\u30D5\u30A1\u30A4\u30EB" +--- + +# 設定ファイル {#configuration_files} + +ClickHouseは複数ファイル構成管理をサポートしています。 主サーバ設定ファイルで指定することがで `/etc/clickhouse-server/config.xml`. その他のファイルは `/etc/clickhouse-server/config.d` ディレクトリ。 + +!!! note "メモ" + すべての設定ファイルはxml形式である必要があります。 また、通常は同じルート要素を持つ必要があります ``. + +メイン構成ファイルで指定された一部の設定は、他の構成ファイルで上書きできます。 その `replace` または `remove` 属性は、これらの構成ファイルの要素に対して指定できます。 + +どちらも指定されていない場合は、重複する子の値を置き換えて、要素の内容を再帰的に結合します。 + +もし `replace` 指定された要素全体を指定された要素で置き換えます。 + +もし `remove` 指定した要素を削除します。 + +設定はまた定義できます “substitutions”. 要素が持っている場合 `incl` 属性は、ファイルから対応する置換は、値として使用されます。 デフォルトでは、置換を含むファイルへのパスは、 `/etc/metrika.xml`. これはで変えることができます [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) サーバー設定の要素。 置換値は、以下で指定されます `/yandex/substitution_name` このファイルの要素。 で指定された置換の場合 `incl` 存在しない、それはログに記録されます。 防ClickHouseからログイン失、置換、指定し `optional="true"` 属性(たとえば、 [マクロ](server_configuration_parameters/settings.md)). + +置換はまた、飼育係から行うことができます。 これを行うには、属性を指定します `from_zk = "/path/to/node"`. 要素の値は、次の場所にあるノードの内容に置き換えられます `/path/to/node` 飼育係で また、Xmlサブツリー全体をZooKeeperノードに置くこともでき、ソース要素に完全に挿入されます。 + +その `config.xml` ファイルを指定することで別のconfigユーザー設定、プロファイルに割り振ります。 この設定への相対パスは `users_config` 要素。 デフォルトでは、 `users.xml`. もし `users_config` ユーザー設定、プロファイル、およびクォータは、次の場所で直接指定されます `config.xml`. + +ユーザー構成は、次のような別々のファイルに分割できます `config.xml` と `config.d/`. +ディレクトリ名は、 `users_config` 設定なし `.xml` 後置は `.d`. +ディレク `users.d` デフォルトでは、 `users_config` デフォルトは `users.xml`. +たとえば、次のようにユーザーごとに別々の設定ファイルを作成できます: + +``` bash +$ cat /etc/clickhouse-server/users.d/alice.xml +``` + +``` xml + + + + analytics + + ::/0 + + ... + analytics + + + +``` + +各設定ファイルについて、サーバーも生成します `file-preprocessed.xml` 起動時のファイル。 これらのファイルには、完了した置換と上書きがすべて含まれており、情報の使用を目的としています。 ZooKeeperの置換が設定ファイルで使用されたが、ZooKeeperがサーバーの起動時に利用できない場合、サーバーは前処理されたファイルから設定をロードします。 + +サーバーは、設定ファイルの変更を追跡するだけでなく、置換と上書きを実行するときに使用されたファイルとzookeeperノード、およびその場でユーザーとクラスタ つまり、サーバーを再起動することなく、クラスター、ユーザー、およびその設定を変更できます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/ja/operations/index.md b/docs/ja/operations/index.md deleted file mode 120000 index ce854687b86..00000000000 --- a/docs/ja/operations/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/index.md \ No newline at end of file diff --git a/docs/ja/operations/index.md b/docs/ja/operations/index.md new file mode 100644 index 00000000000..a25660e79a3 --- /dev/null +++ b/docs/ja/operations/index.md @@ -0,0 +1,28 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Operations +toc_priority: 41 +toc_title: "\u5C0E\u5165" +--- + +# 操作 {#operations} + +ClickHouse操作マニュアルの以下の主要部: + +- [要件](requirements.md) +- [監視](monitoring.md) +- [トラブル](troubleshooting.md) +- [使用の推奨事項](tips.md) +- [更新手順](update.md) +- [アクセス権](access_rights.md) +- [データバック](backup.md) +- [設定ファイル](configuration_files.md) +- [クォータ](quotas.md) +- [システム表](system_tables.md) +- [サーバ設定パラメータ](server_configuration_parameters/index.md) +- [ClickHouseでハードウェアをテストする方法](performance_test.md) +- [設定](settings/index.md) +- [ユーティリ](utilities/index.md) + +[元の記事](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/ja/operations/monitoring.md b/docs/ja/operations/monitoring.md deleted file mode 120000 index 515ae8b4fff..00000000000 --- a/docs/ja/operations/monitoring.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/monitoring.md \ No newline at end of file diff --git a/docs/ja/operations/monitoring.md b/docs/ja/operations/monitoring.md new file mode 100644 index 00000000000..9173bffd48f --- /dev/null +++ b/docs/ja/operations/monitoring.md @@ -0,0 +1,44 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u76E3\u8996" +--- + +# 監視 {#monitoring} + +監視できます: + +- ハードウェアリソースの利用。 +- クリックハウスサーバー指標。 + +## リソース使用率 {#resource-utilization} + +ClickHouseは、ハードウェアリソースの状態を単独で監視しません。 + +それは強く推奨するセットアップ監視のための: + +- プロセッサの負荷と温度。 + + を使用することができ [dmesg](https://en.wikipedia.org/wiki/Dmesg), [ターボスタット](https://www.linux.org/docs/man8/turbostat.html) または他の楽器。 + +- ストレージシステムの利用,ramとネットワーク. + +## Clickhouseサーバーメトリクス {#clickhouse-server-metrics} + +ClickHouseサーバーは自己状態の監視のための器械を埋め込んだ。 + +追跡サーバのイベントサーバーを利用ます。 を見る [ロガー](server_configuration_parameters/settings.md#server_configuration_parameters-logger) 設定ファイルのセクション。 + +ClickHouseの収集: + +- 異なるメトリクスのサーバがどのように利用計算資源です。 +- クエリ処理に関する一般的な統計。 + +メトリックは次の場所にあります [システム。指標](../operations/system_tables.md#system_tables-metrics), [システム。イベント](../operations/system_tables.md#system_tables-events)、と [システム。asynchronous\_metrics](../operations/system_tables.md#system_tables-asynchronous_metrics) テーブル。 + +を設定することができclickhouse輸出の指標に [黒鉛](https://github.com/graphite-project). を見る [グラファイト部](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) クリックハウスサーバー設定ファイルで。 を設定する前に輸出のメトリックトに設定する必要があります黒鉛は以下の公式 [ガイド](https://graphite.readthedocs.io/en/latest/install.html). + +さらに、http apiを使用してサーバーの可用性を監視できます。 を送信 `HTTP GET` への要求 `/ping`. サーバーが使用可能な場合は、次のように応答します `200 OK`. + +クラスター構成内のサーバーを監視するには、以下を設定します。 [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) HTTPリソースのパラメーターと使用 `/replicas_status`. を要求する `/replicas_status` を返します `200 OK` レプリカが使用可能で、他のレプリカの後ろに遅延されていない場合。 レプリカが遅延している場合は、 `503 HTTP_SERVICE_UNAVAILABLE` ギャップについての情報と。 diff --git a/docs/ja/operations/optimizing_performance/index.md b/docs/ja/operations/optimizing_performance/index.md deleted file mode 120000 index 87beee8bc3e..00000000000 --- a/docs/ja/operations/optimizing_performance/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/optimizing_performance/index.md \ No newline at end of file diff --git a/docs/ja/operations/optimizing_performance/index.md b/docs/ja/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..13dc51db028 --- /dev/null +++ b/docs/ja/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Optimizing Performance +toc_priority: 52 +--- + + diff --git a/docs/ja/operations/optimizing_performance/sampling_query_profiler.md b/docs/ja/operations/optimizing_performance/sampling_query_profiler.md deleted file mode 120000 index 9f3b57cd086..00000000000 --- a/docs/ja/operations/optimizing_performance/sampling_query_profiler.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/optimizing_performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/ja/operations/optimizing_performance/sampling_query_profiler.md b/docs/ja/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..c1595d10de7 --- /dev/null +++ b/docs/ja/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,64 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "Query\u30D7\u30ED\u30D5\u30A1\u30A4\u30EA\u30F3\u30B0" +--- + +# クエリプ {#sampling-query-profiler} + +ClickHouse運転サンプリングプロファイラでの分析クエリを実行します。 使用プロファイラでソースコードのルーチンを用いた最中に頻繁にクエリを実行します。 CPU時間とアイドル時間を含む壁時計の時間をトレースできます。 + +プロファイラを使用する: + +- この [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) サーバー設定のセクション。 + + このセクションでは、 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) プロファイラーの機能の結果を含むシステムテーブル。 デフォルトで設定されています。 このデータをこのテーブルのみ有効なオペレーティングシステムサーバーです。 後、サーバを再起動ClickHouseないクリーンのテーブルに格納された仮想メモリアドレスが無効になります。 + +- この [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) または [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) 設定。 両方の設定を同時に使用できます。 + + これらの設定を許可する設定プロファイラータイマー. これらはセッション設定であるため、サーバー全体、個々のユーザーまたはユーザープロファイル、対話型セッション、および個々のクエリごとに異なるサンプリング + +デフォルトのサンプリング周波数はサンプルや、cpu、リアルタイマーが有効になっています。 この周波数により収集に関する情報を十分にclickhouse。 同時に、この頻度で作業しても、プロファイラーはclickhouseサーバーのパフォーマンスに影響しません。 が必要な場合にプロファイル毎に個別のクエリを利用するようにして高サンプリング周波数です。 + +分析するため `trace_log` システム表: + +- インストール `clickhouse-common-static-dbg` パッケージ。 見る [DEBパッケージからのイ](../../getting_started/install.md#install-from-deb-packages). + +- によってイントロスペクション機能を許可する [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) 設定。 + + セキュ + +- を使用 `addressToLine`, `addressToSymbol` と `demangle` [イントロスペクション関数](../../sql_reference/functions/introspection.md) ClickHouseコードで関数名とその位置を取得する。 いくつかのクエリのプロファイルを取得するには、 `trace_log` テーブル。 個々の関数またはスタックトレース全体でデータを集計できます。 + +あなたが視覚化する必要がある場合 `trace_log` 情報、試してみる [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) と [speedscope](https://github.com/laplab/clickhouse-speedscope). + +## 例えば {#example} + +この例では、: + +- フィルタ `trace_log` クエリ識別子と現在の日付によるデータ。 + +- スタックトレースによる集計。 + +- イントロスペクション関数を使用して、我々のレポートを取得します: + + - シンボルおよび対応するソースコード関数の名前。 + - これらの関数のソースコードの場所。 + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/ja/operations/performance/sampling_query_profiler_example_result.txt b/docs/ja/operations/performance/sampling_query_profiler_example_result.txt deleted file mode 120000 index 58c5abe7122..00000000000 --- a/docs/ja/operations/performance/sampling_query_profiler_example_result.txt +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/performance/sampling_query_profiler_example_result.txt \ No newline at end of file diff --git a/docs/ja/operations/performance/sampling_query_profiler_example_result.txt b/docs/ja/operations/performance/sampling_query_profiler_example_result.txt new file mode 100644 index 00000000000..27da5b32e0a --- /dev/null +++ b/docs/ja/operations/performance/sampling_query_profiler_example_result.txt @@ -0,0 +1,542 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +--- + +行1: +────── +カウント():6344 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +読む + +DB::ReadBufferFromFileDescriptor::nextImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&,unsigned long&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Compression/CompressedReadBufferFromFile。cpp:22 +DB::CompressedReadBufferFromFile::seek(unsigned long,unsigned long) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Compression/CompressedReadBufferFromFile。cpp:63 +DB::Mergetreaderstream::seekToMark(unsigned long) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/Mergetreaderstream。cpp:200 +データベースサブストリングを使用すると、データベースサブストリングを使用してデータベースサブストリングを行うことができます。 \>const&),DB::MergeTreeReader::readData(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,DB::IColumn&,unsigned long,bool,unsigned long,bool)::{lambda(bool)\#1}::operator(bool)const::{lambda(std::vector\ \>const&)\#1}\>::\_M\_invoke(std::\_Any\_data const&,std::vector\ \>const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:212 +DB::IDataType::deserializeBinaryBulkWithMultiplestreams(DB::IColumn&,unsigned long,DB::IDataType::DeserializeBinaryBulkSettings&,std::shared\_ptr&)const +/usr/local/include/c++/9.1.0/bits/std\_function。h:690 +DB::MergeTreeReader::readData(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,DB::IColumn&,unsigned long,bool,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:232 +DB::MergeTreeReader::readRows(unsigned long,bool,unsigned long,DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:35 +DB::MergeTreeRangeReader::continueReadingChain(DB::MergeTreeRangeReader::ReadResult&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:219 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:487 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +行2: +────── +カウント():3295 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +\_\_pthread\_cond\_wait + +クライアントコンピューターのパフォーマンスを監視します。&) +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/src/c++11/../../../../../gcc-9.1.0/libstdc++-v3/src/c++11/condition\_variable.cc:55 +Poco::Semaphore::wait() +/ホーム/milovidov/ClickHouse/build\_gcc9/../contrib/ポコ/財団/src/セマフォ.cpp:61 +DB::UnionBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/x86\_64-pc-linux-gnu/bits/gthr-default.h:748 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/コア/ブロック.h:90 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::LimitBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::AsynchronousBlockInputStream::calculate() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +std::\_Function\_handler\::\_M\_invoke(std:::\_Any\_data const&) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:551 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/x86\_64-pc-linux-gnu/bits/gthr-default.h:748 +ツ環板篠ョツ嘉ッツ偲青ツエツδツ-ツアツイツ-ツエツス::scheduleImpl(std::function\,int,std::省略可能)::{lambda()\#3}\>(ThreadPoolImpl::scheduleImpl(std::function\,int,std::省略可能)::{ラムダ()\#3}&&)::{ラムダ()\#1}::演算子()()const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/共通/スレッドプール。h:146 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +3行目: +────── +カウント():1978 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +DB::VolnitskyBase\\>::search(unsigned char const\*,unsigned long)const +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\,15ul,16ul\>const&,DB::PODArray\,\_\_\_\_\_\_\_\_\_\_\_\_\_,cxx11::Basic\_string\,std::allocator \>const&,DB::PODArray\,15ul,16ul\>&) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::FunctionsStringSearch\,DB::NameLike\>::executeImpl(DB::Block&,std::vector\ \>const&、unsigned long、unsigned long) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&,std::vector\ \>const&,unsigned long,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&,bool)const +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:677 +DB::ExpressionActions::execute(DB::Block&,bool)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/インタプリタ/ExpressionActions。cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:660 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:546 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +4行目: +────── +カウント():1913 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +DB::VolnitskyBase\\>::search(unsigned char const\*,unsigned long)const +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\,15ul,16ul\>const&,DB::PODArray\,\_\_\_\_\_\_\_\_\_\_\_\_\_,cxx11::Basic\_string\,std::allocator \>const&,DB::PODArray\,15ul,16ul\>&) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::FunctionsStringSearch\,DB::NameLike\>::executeImpl(DB::Block&,std::vector\ \>const&、unsigned long、unsigned long) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&,std::vector\ \>const&,unsigned long,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&,bool)const +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:677 +DB::ExpressionActions::execute(DB::Block&,bool)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/インタプリタ/ExpressionActions。cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:660 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:546 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +5行目: +────── +カウント():1672 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +DB::VolnitskyBase\\>::search(unsigned char const\*,unsigned long)const +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\,15ul,16ul\>const&,DB::PODArray\,\_\_\_\_\_\_\_\_\_\_\_\_\_,cxx11::Basic\_string\,std::allocator \>const&,DB::PODArray\,15ul,16ul\>&) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::FunctionsStringSearch\,DB::NameLike\>::executeImpl(DB::Block&,std::vector\ \>const&、unsigned long、unsigned long) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&,std::vector\ \>const&,unsigned long,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&,bool)const +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:677 +DB::ExpressionActions::execute(DB::Block&,bool)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/インタプリタ/ExpressionActions。cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:660 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:546 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +6行目: +────── +カウント():1531 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +読む + +DB::ReadBufferFromFileDescriptor::nextImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/ReadBufferFromFileDescriptor.cpp:56 +DB::CompressedReadBufferBase::readCompressedData(unsigned long&,unsigned long&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/ReadBuffer.h:54 +DB::CompressedReadBufferFromFile::nextImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Compression/CompressedReadBufferFromFile。cpp:22 +void DB::deserializeBinarySSE2\<4\>(DB::PODArray\,15ul,16ul\>&,DB::PODArray\,16buff,&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/ReadBuffer.h:53 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&,DB::ReadBuffer&,unsigned long,double)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データタイプ/DataTypeString。cpp:202 +DB::MergeTreeReader::readData(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,DB::IColumn&,unsigned long,bool,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:232 +DB::MergeTreeReader::readRows(unsigned long,bool,unsigned long,DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:35 +データベースのメンバーシップは、データベースのメンバーシップに基づいています。 \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:219 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +7行目: +────── +カウント():1034 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +DB::VolnitskyBase\\>::search(unsigned char const\*,unsigned long)const +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::MatchImpl\::vector\_constant(DB::PODArray\,15ul,16ul\>const&,DB::PODArray\,\_\_\_\_\_\_\_\_\_\_\_\_\_,cxx11::Basic\_string\,std::allocator \>const&,DB::PODArray\,15ul,16ul\>&) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::FunctionsStringSearch\,DB::NameLike\>::executeImpl(DB::Block&,std::vector\ \>const&、unsigned long、unsigned long) +/opt/milovidov/ClickHouse/build\_gcc9/プログラム/clickhouse +DB::PreparedFunctionImpl::execute(DB::Block&,std::vector\ \>const&,unsigned long,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Functions/IFunction.cpp:464 +DB::ExpressionAction::execute(DB::Block&,bool)const +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:677 +DB::ExpressionActions::execute(DB::Block&,bool)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/インタプリタ/ExpressionActions。cpp:739 +DB::MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(DB::MergeTreeRangeReader::ReadResult&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:660 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:546 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +8行目: +────── +カウント():989 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +\_\_ll\_lock\_wait + +pthread\_mutex\_lock + +DB::Mergetreaderstream::ロードマーク() +/usr/local/include/c++/9.1.0/bits/std\_mutex.h:103 +DB::MergeTreeReaderStream::MergeTreeReaderStream(std::\_\_cxx11::basic\_string\,std::allocator \>const&,std::\_\_cxx11::basic\_string\,std::allocator \>const&,unsigned long,std::vector\ \>const&,DB:MarkCache*、bool、DB::UncompressedCache*,unsigned long,unsigned long,unsigned long,unsigned long,DB::MergeTreeIndexGranularityInfo const\*,std::function\const&,int) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/Mergetreaderstream。cpp:107 +データベースサブストリングを使用すると、データベースサブストリングを使用してデータベースサブストリングを行うことができます。 \>const&),DB::MergeTreeReader::addStreams(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,std::function\const&,int)::{lambda(std::vector\ \>const&)\#1}\>::\_M\_invoke(std::\_Any\_data const&,std::vector\ \>const&) +/usr/local/include/c++/9.1.0/bits/unique\_ptr。h:147 +DB::MergeTreeReader::addStreams(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,std::function\const&,int) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:677 +DB::MergeTreeReader::MergeTreeReader(std::\_\_cxx11::basic\_string\,std::allocator \>const&,std::shared\_ptr const&,DB::NamesAndTypesList const&,DB::UncompressedCache*,DB::MarkCache*,bool,DB::MergeTreeData const&,std::vector\ \>const&、unsigned long、unsigned long、std::map\,std::allocator \>,double,std::less\,std::allocator \>\>,std::allocator\,std::allocator \>const,double\>\>\>const&,std::function\const&,int) +/usr/local/include/c++/9.1.0/bits/stl\_list。h:303 +DB::MergeTreeThreadSelectBlockInputStream::getNewTask() +/usr/local/include/c++/9.1.0/bits/std\_function。h:259 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:54 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +9行目: +─────── +カウント():779 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +void DB::deserializeBinarySSE2\<4\>(DB::PODArray\,15ul,16ul\>&,DB::PODArray\,16buff,&) +/usr/local/lib/gcc/x86\_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&,DB::ReadBuffer&,unsigned long,double)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データタイプ/DataTypeString。cpp:202 +DB::MergeTreeReader::readData(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,DB::IColumn&,unsigned long,bool,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:232 +DB::MergeTreeReader::readRows(unsigned long,bool,unsigned long,DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:35 +データベースのメンバーシップは、データベースのメンバーシップに基づいています。 \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:219 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone + +10行目: +─────── +カウント():666 +sym:StackTrace::StackTrace(ucontext\_t const&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/Common/StackTrace。cpp:208 +DB::(匿名ネームスペース)::writeTraceInfo(DB::TimerType,int,siginfo\_t*,無効*\[クローン。イスラ0\] +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/IO/BufferBase。h:99 + +void DB::deserializeBinarySSE2\<4\>(DB::PODArray\,15ul,16ul\>&,DB::PODArray\,16buff,&) +/usr/local/lib/gcc/x86\_64-pc-linux-gnu/9.1.0/include/emmintrin.h:727 +DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&,DB::ReadBuffer&,unsigned long,double)const +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データタイプ/DataTypeString。cpp:202 +DB::MergeTreeReader::readData(std::\_\_cxx11::basic\_string\,std::allocator \>const&,DB::IDataType const&,DB::IColumn&,unsigned long,bool,unsigned long,bool) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:232 +DB::MergeTreeReader::readRows(unsigned long,bool,unsigned long,DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeReader。cpp:111 +DB::MergeTreeRangeReader::DelayedStream::finalize(DB::Block&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:35 +データベースのメンバーシップは、データベースのメンバーシップに基づいています。 \>&) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeRangeReader。cpp:219 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeRangeReader::read(unsigned long,std::vector\ \>&) +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::MergeTreeBaseSelectBlockInputStream::readFromPartImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/ストレージ/MergeTree/MergeTreeBaseSelectBlockInputStream。cpp:158 +DB::MergeTreeBaseSelectBlockInputStream::readImpl() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ExpressionBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ExpressionBlockInputStream.cpp:34 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::PartialSortingBlockInputStream::readImpl() +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/PartialSortingBlockInputStream.cpp:13 +DB::IBlockInputStream::read() +/usr/local/include/c++/9.1.0/bits/stl\_vector。h:108 +DB::ParallelInputsProcessor::ループ(unsigned long) +/usr/local/include/c++/9.1.0/bits/atomic\_base。h:419 +DB::ParallelInputsProcessor::スレッド(std::shared\_ptr,符号なし長) +/ホーム/milovidov/ClickHouse/build\_gcc9/../dbms/データストリーム/ParallelInputsProcessor。h:215 +次の例では、threadfromglobalpoolについて説明します::*ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq、unsigned long),DB::Parallelinputプロセッサ* ツつィツ姪"ツつ"ツ債ツづュツつケ コンストラクターです。::*&&)(std::shared\_ptr、unsigned long),DB::Parallelinputプロセッサ*&&,std::shared\_ptr&&,unsigned long&)::{lambda()\#1}::operator()()const +/usr/local/include/c++/9.1.0/bits/shared\_ptr\_base。h:729 +ThreadPoolImpl::ワーカー(std::\_List\_iterator) +/usr/local/include/c++/9.1.0/bits/unique\_lock。h:69 +execute\_native\_thread\_routine +/home/milovidov/ClickHouse/ci/workspace/gcc/gcc-build/x86\_64-pc-linux-gnu/libstdc++-v3/include/bits/unique\_ptr.h:81 +start\_thread + +\_\_clone diff --git a/docs/ja/operations/performance_test.md b/docs/ja/operations/performance_test.md deleted file mode 120000 index a74c126c63f..00000000000 --- a/docs/ja/operations/performance_test.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/performance_test.md \ No newline at end of file diff --git a/docs/ja/operations/performance_test.md b/docs/ja/operations/performance_test.md new file mode 100644 index 00000000000..e58aec13b4f --- /dev/null +++ b/docs/ja/operations/performance_test.md @@ -0,0 +1,82 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "\u30CF\u30FC\u30C9\u30A6\u30A7\u30A2\u8A66\u9A13" +--- + +# ClickHouseでハードウェアをテストする方法 {#how-to-test-your-hardware-with-clickhouse} + +この命令を実行できますが基本的なclickhouse性能試験はサーバーなしでの設置clickhouseパッケージ。 + +1. に行く “commits” ページ:https://github.com/ClickHouse/ClickHouse/commits/master + +2. 最初の緑色のチェックマークまたは緑色の赤い十字をクリックします “ClickHouse Build Check” とをクリック “Details” リンク近く “ClickHouse Build Check”. + +3. リンクをコピーする “clickhouse” amd64またはaarch64のバイナリ。 + +4. サーバーにsshを実行し、wgetでダウンロードします: + + + + # For amd64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse + # For aarch64: + wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse + # Then do: + chmod a+x clickhouse + +1. ダウンロードconfigs: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml + mkdir config.d + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml + +1. Benchmarkファイル: + + + + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh + chmod a+x benchmark-new.sh + wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql + +1. ダウンロード試験データによると [Yandexの。Metricaデータセット](../getting_started/example_datasets/metrica.md) 指示 (“hits” 100万行を含むテーブル)。 + + + + wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz + tar xvf hits_100m_obfuscated_v1.tar.xz -C . + mv hits_100m_obfuscated_v1/* . + +1. サーバーの実行: + + + + ./clickhouse server + +1. データを確認する:別のターミナルのサーバーへのssh + + + + ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" + 100000000 + +1. 編集するbenchmark-new.sh,変更 “clickhouse-client” に “./clickhouse client” と追加 “–max\_memory\_usage 100000000000” パラメータ。 + + + + mcedit benchmark-new.sh + +1. ベンチマークの実行: + + + + ./benchmark-new.sh hits_100m_obfuscated + +1. ハードウェア構成に関する番号と情報を以下に送信しますclickhouse-feedback@yandex-team.com + +すべての結果はここに掲載されています:https://clickhouse。ツつィツ姪"ツつ"ツ債ツづュツつケhtml diff --git a/docs/ja/operations/quotas.md b/docs/ja/operations/quotas.md deleted file mode 120000 index 1c52cdf1e91..00000000000 --- a/docs/ja/operations/quotas.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/quotas.md \ No newline at end of file diff --git a/docs/ja/operations/quotas.md b/docs/ja/operations/quotas.md new file mode 100644 index 00000000000..bec5f1ebda0 --- /dev/null +++ b/docs/ja/operations/quotas.md @@ -0,0 +1,112 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "\u30AF\u30A9\u30FC\u30BF" +--- + +# クォータ {#quotas} + +クォータを使用すると、一定期間にわたってリソースの使用を制限したり、単にリソースの使用を追跡したりできます。 +クォータは、ユーザー設定で設定されます。 これは通常 ‘users.xml’. + +システムには、単一のクエリの複雑さを制限する機能もあります。 セクションを見る “Restrictions on query complexity”). + +クエリの複雑さの制限とは対照的に、クォータ: + +- 単一のクエリを制限するのではなく、ある期間にわたって実行できるクエリのセットに制限を設定します。 +- 口座のために費やすべてのリモートサーバーのための分散クエリ処となります。 + +のセクションを見てみましょう ‘users.xml’ クォータを定義するファイル。 + +``` xml + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + +``` + +デフォルトでは、クォータだけでトラック資源の消費のそれぞれの時間を狭く限定することなく、利用 +各間隔で計算されたリソース消費は、各要求の後にサーバーログに出力されます。 + +``` xml + + + + + 3600 + + 1000 + 100 + 1000000000 + 100000000000 + 900 + + + + 86400 + + 10000 + 1000 + 5000000000 + 500000000000 + 7200 + + +``` + +のための ‘statbox’ クォータ、制限は、毎時および24時間(86,400秒)ごとに設定されます。 時間間隔は、実装定義の固定moment間から開始してカウントされます。 言い換えれば、24時間の間隔は必ずしも真夜中に始まるとは限りません。 + +間隔が終了すると、収集された値はすべてクリアされます。 次の時間には、クォータの計算が最初からやり直されます。 + +制限できる金額は次のとおりです: + +`queries` – The total number of requests. + +`errors` – The number of queries that threw an exception. + +`result_rows` – The total number of rows given as the result. + +`read_rows` – The total number of source rows read from tables for running the query, on all remote servers. + +`execution_time` – The total query execution time, in seconds (wall time). + +制限を少なくとも一回の間隔で超えた場合は、制限を超過したテキスト、間隔、および新しい間隔の開始時(クエリを再度送信できる場合)についての + +クォータは以下を使用できます “quota key” 複数のキーのリソースを個別に報告する機能。 これの例は次のとおりです: + +``` xml + + + + +``` + +クォータはユーザーに割り当てられます。 ‘users’ 設定のセクション。 セクションを見る “Access rights”. + +分散クエリ処理では、累積金額がリクエスタサーバに格納されます。 ここでは、ユーザーが別のサーバーの定員がありま “start over”. + +サーバーを再起動すると、クォータがリセットされます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/ja/operations/requirements.md b/docs/ja/operations/requirements.md deleted file mode 120000 index a71283af25c..00000000000 --- a/docs/ja/operations/requirements.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/requirements.md \ No newline at end of file diff --git a/docs/ja/operations/requirements.md b/docs/ja/operations/requirements.md new file mode 100644 index 00000000000..28a2308ab5f --- /dev/null +++ b/docs/ja/operations/requirements.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u8981\u4EF6" +--- + +# 要件 {#requirements} + +## CPU {#cpu} + +Prebuilt debパッケージからインストールする場合は、X86\_64アーキテクチャを持つCPUを使用し、SSE4.2命令をサポートします。 Sse4.2をサポートしていない、またはAArch64またはPowerPC64LEアーキテクチャを持つプロセッサでClickHouseを実行するには、ソースからClickHouseをビルドする必要があり + +ClickHouseは、並列処理を実装し、利用可能なすべてのハードウェアリソースを使用します。 プロセッサを選択するときは、コア数が多い構成では、コア数が少ない構成よりもクロックレートが低く、クロックレートが高い構成では、クリックハウ 例えば、16MHzの2600コアは、8MHzの3600コアよりも好ましい。 + +の使用 **ターボブースト** と **ハイパースレッド** 技術が推奨されます。 典型的な負荷でパフォーマンスが大幅に向上します。 + +## RAM {#ram} + +些細なクエリを実行するには、最小4gbのramを使用することをお勧めします。 のclickhouseサーバーへアクセスできる走りはるかに小さなramが要求されるメモリ処理ます。 + +必要なramの容量は次のとおりです: + +- クエリの複雑さ。 +- クエリで処理されるデータの量。 + +計算に必要な量のram、推定値のサイズを一時的にデータのための [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) そしてあなたが使用する他の操作。 + +ClickHouseは、一時的なデータに外部メモリを使用できます。 見る [外部メモリによるグループ化](../sql_reference/statements/select.md#select-group-by-in-external-memory) 詳細については。 + +## Swapファイル {#swap-file} + +運用環境用のスワップファイルを無効にします。 + +## 格納サブシステム {#storage-subsystem} + +ClickHouseをインストールするには2GBの空きディスク容量が必要です。 + +データに必要なストレージ容量は、個別に計算する必要があります。 評価には: + +- データ量の推定。 + + データのサンプルを取得し、そこから行の平均サイズを取得できます。 次に、値に格納する予定の行の数を掛けます。 + +- データ圧縮係数。 + + データ圧縮係数を推定するには、データのサンプルをclickhouseにロードし、データの実際のサイズと格納されているテーブルのサイズを比較します。 たとえば、通常、クリックストリームデータは6-10倍圧縮されます。 + +保存するデータの最終ボリュームを計算するには、推定データボリュームに圧縮係数を適用します。 複数のレプリカにデータを格納する場合は、推定ボリュームにレプリカの数を掛けます。 + +## ネットワーク {#network} + +可能であれば、10g以上のネットワークを使用してください。 + +ネットワーク帯域幅は、大量の中間データを使用して分散クエリを処理する場合に重要です。 また、ネットワーク速度に影響する複製プロセス。 + +## ソフト {#software} + +ClickHouseが開発されたLinuxの家族システムです。 推奨されるLinuxの配布はUbuntuです。 その `tzdata` パッケージを設置する必要がある。 + +ClickHouse働きかけることができ、その他業務システム。 の詳細を参照してください [はじめに](../getting_started/index.md) ドキュメントのセクション。 diff --git a/docs/ja/operations/server_configuration_parameters/index.md b/docs/ja/operations/server_configuration_parameters/index.md deleted file mode 120000 index f7ef875f9de..00000000000 --- a/docs/ja/operations/server_configuration_parameters/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/server_configuration_parameters/index.md \ No newline at end of file diff --git a/docs/ja/operations/server_configuration_parameters/index.md b/docs/ja/operations/server_configuration_parameters/index.md new file mode 100644 index 00000000000..db832cb78db --- /dev/null +++ b/docs/ja/operations/server_configuration_parameters/index.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Server Configuration Parameters +toc_priority: 54 +toc_title: "\u5C0E\u5165" +--- + +# サーバ設定パラメータ {#server-settings} + +ここでの記述が含まれてサーバーの設定を変更することはできないのセッションor検索。 + +これらの設定は `config.xml` ClickHouseサーバー上のファイル。 + +その他の設定については、 “[設定](../settings/index.md#settings)” セクション。 + +設定を勉強する前に、 [設定ファイル](../configuration_files.md#configuration_files) の使用に注意してください。 `incl` と `optional` 属性)。 + +[元の記事](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/ja/operations/server_configuration_parameters/settings.md b/docs/ja/operations/server_configuration_parameters/settings.md deleted file mode 120000 index 8e8eda9e48b..00000000000 --- a/docs/ja/operations/server_configuration_parameters/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/server_configuration_parameters/settings.md \ No newline at end of file diff --git a/docs/ja/operations/server_configuration_parameters/settings.md b/docs/ja/operations/server_configuration_parameters/settings.md new file mode 100644 index 00000000000..82e1967eb4c --- /dev/null +++ b/docs/ja/operations/server_configuration_parameters/settings.md @@ -0,0 +1,872 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u30B5\u30FC\u30D0\u30FC\u8A2D\u5B9A" +--- + +# サーバー設定 {#server-settings} + +## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} + +組み込みの辞書を再ロードする前の秒単位の間隔。 + +クリックハウスは、内蔵の辞書ごとにx秒をリロードします。 これにより、辞書の編集が可能になります “on the fly” サーバーを再起動せずに。 + +デフォルト値:3600. + +**例えば** + +``` xml +3600 +``` + +## 圧縮 {#server-settings-compression} + +以下のためのデータ圧縮設定 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-エンジンテーブル。 + +!!! warning "警告" + ClickHouseを使用し始めたばかりの場合は使用しないでください。 + +構成テンプレート: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` フィールド: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` または `zstd`. + +複数を設定できます `` セクション。 + +条件が満たされたときの動作: + +- データパーツが条件セットと一致する場合、clickhouseは指定された圧縮方法を使用します。 +- データパートが複数の条件セットと一致する場合、clickhouseは最初に一致した条件セットを使用します。 + +デー `lz4` 圧縮。 + +**例えば** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## default\_database {#default-database} + +既定のデータベース。 + +データベースのリストを取得するには、 [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) クエリ。 + +**例えば** + +``` xml +default +``` + +## default\_profile {#default-profile} + +既定の設定プロファイル。 + +設定プロファイルはパラ `user_config`. + +**例えば** + +``` xml +default +``` + +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} + +外部ディクショナリの設定ファイルへのパス。 + +パス: + +- サーバー設定ファイルに対する絶対パスまたは相対パスを指定します。 +- のパスを含むことができワイルドカード\*や?. + +また見なさい “[外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. + +**例えば** + +``` xml +*_dictionary.xml +``` + +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} + +辞書の遅延ロード。 + +もし `true` その後、各辞書は最初の使用時に作成されます。 辞書の作成に失敗した場合、辞書を使用していた関数は例外をスローします。 + +もし `false` すべての辞書は、サーバーの起動時に作成され、エラーが発生した場合、サーバーはシャットダウンされます。 + +デフォルトは `true`. + +**例えば** + +``` xml +true +``` + +## format\_schema\_path {#server_configuration_parameters-format_schema_path} + +入力データのスキームを持つディレクトリへのパス。 [CapnProto](../../interfaces/formats.md#capnproto) フォーマット。 + +**例えば** + +``` xml + + format_schemas/ +``` + +## 黒鉛 {#server_configuration_parameters-graphite} + +データの送信先 [黒鉛](https://github.com/graphite-project). + +設定: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [システム。指標](../../operations/system_tables.md#system_tables-metrics) テーブル。 +- events – Sending deltas data accumulated for the time period from the [システム。イベント](../../operations/system_tables.md#system_tables-events) テーブル。 +- events\_cumulative – Sending cumulative data from the [システム。イベント](../../operations/system_tables.md#system_tables-events) テーブル。 +- asynchronous\_metrics – Sending data from the [システム。asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) テーブル。 + +複数を設定できます `` 句。 たとえば、異なる間隔で異なるデータを送信するためにこれを使用できます。 + +**例えば** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} + +グラファイトの間引きデータの設定。 + +詳細については、 [グラフィットメールグツリー](../../engines/table_engines/mergetree_family/graphitemergetree.md). + +**例えば** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## http\_port/https\_port {#http-porthttps-port} + +HTTP経由でサーバーに接続するためのポート。 + +もし `https_port` が指定される。, [openSSL](#server_configuration_parameters-openssl) 構成する必要があります。 + +もし `http_port` が指定されている場合、OpenSSL設定が設定されていても、その設定は無視される。 + +**例えば** + +``` xml +0000 +``` + +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} + +ClickHouse HTTP(s)サーバーにアクセスするときにデフォルトで表示されるページ。 +デフォルト値は “Ok.” (最後にラインフィード付き) + +**例えば** + +開く `https://tabix.io/` アクセス時 `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## include\_from {#server_configuration_parameters-include_from} + +置換を伴うファイルへのパス。 + +詳細については、以下を参照してください “[設定ファイル](../configuration_files.md#configuration_files)”. + +**例えば** + +``` xml +/etc/metrica.xml +``` + +## interserver\_http\_port {#interserver-http-port} + +ClickHouseサーバ間でデータを交換するためのポート。 + +**例えば** + +``` xml +9009 +``` + +## interserver\_http\_host {#interserver-http-host} + +このサーバーへのアクセスに他のサーバーが使用できるホスト名。 + +省略された場合、それは同じ方法で定義されます `hostname-f` 司令部 + +特定のネットワー + +**例えば** + +``` xml +example.yandex.ru +``` + +## interserver\_http\_credentials {#server-settings-interserver-http-credentials} + +認証に使用するユーザー名とパスワード [複製](../../engines/table_engines/mergetree_family/replication.md) レプリケートされた\*エンジン。 これらの資格情報は、レプリカ間の通信にのみ使用され、ClickHouseクライアントの資格情報とは無関係です。 サーバーにあるチェックにこれらの資格の接続にはレプリカと同じ資格を接続する場合はその他のレプリカ. なので、これらの資格を設定する同じすべてのレプリカ、クラスター +デフォルトでは、認証は使用されません。 + +このセクショ: + +- `user` — username. +- `password` — password. + +**例えば** + +``` xml + + admin + 222 + +``` + +## keep\_alive\_timeout {#keep-alive-timeout} + +接続を閉じる前に、clickhouseが着信要求を待機する秒数。 デフォルトは3秒です。 + +**例えば** + +``` xml +3 +``` + +## listen\_host {#server_configuration_parameters-listen_host} + +要求元のホストの制限。 したい場合はサーバーの回答をしているが、それらを指定し `::`. + +例: + +``` xml +::1 +127.0.0.1 +``` + +## ロガー {#server_configuration_parameters-logger} + +ログの設定。 + +キー: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`と`errorlog`. ファイルが届くと `size`、ClickHouseのアーカイブと名前を変更し、その場所に新しいログファイルを作成します。 +- count – The number of archived log files that ClickHouse stores. + +**例えば** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +Syslogへの書き込みもサポートされています。 設定例: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +キー: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [Syslog機能キーワード](https://en.wikipedia.org/wiki/Syslog#Facility) 大文字で “LOG\_” 接頭辞: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`、というように)。 + デフォルト値: `LOG_USER` もし `address` が指定される。, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` と `syslog.` + +## マクロ {#macros} + +パラメータの置換のために再現します。 + +ければ省略することができ複製のテーブルは使用しておりません。 + +詳細については、以下を参照してください “[複製テーブルの作成](../../engines/table_engines/mergetree_family/replication.md)”. + +**例えば** + +``` xml + +``` + +## mark\_cache\_size {#server-mark-cache-size} + +約サイズ(バイトのキャッシュのマークの使用によりテーブルエンジンの [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家族 + +キャッシュの共有のサーバーメモリが割り当てられます。 キャッシュサイズは5368709120以上である必要があります。 + +**例えば** + +``` xml +5368709120 +``` + +## max\_concurrent\_queries {#max-concurrent-queries} + +同時に処理された要求の最大数。 + +**例えば** + +``` xml +100 +``` + +## max\_connections {#max-connections} + +受信接続の最大数。 + +**例えば** + +``` xml +4096 +``` + +## max\_open\_files {#max-open-files} + +開いているファイルの最大数。 + +デフォルトでは: `maximum`. + +Mac OS Xでこのオプションを使用することをお勧めします。 `getrlimit()` 関数は不正な値を返します。 + +**例えば** + +``` xml +262144 +``` + +## max\_table\_size\_to\_drop {#max-table-size-to-drop} + +テーブルの削除に関する制限。 + +のサイズ [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) テーブルを超え `max_table_size_to_drop` (バイト単位)、ドロップクエリを使用して削除することはできません。 + +それでもclickhouseサーバーを再起動せずにテーブルを削除する必要がある場合は、 `/flags/force_drop_table` ドロップクエリを実行します。 + +デフォルト値:50gb. + +値0は、制限なしにすべてのテーブルを削除できることを意味します。 + +**例えば** + +``` xml +0 +``` + +## merge\_tree {#server_configuration_parameters-merge_tree} + +のテーブルのための微調整 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +詳細については、"mergetreesettings"を参照してください。hヘッダファイル。 + +**例えば** + +``` xml + + 5 + +``` + +## openSSL {#server_configuration_parameters-openssl} + +SSLクライアント/サーバー構成。 + +SSLのサポートは、 `libpoco` ライブラリ。 ユーザーインターフェイスはファイルに記述 [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +サーバー/クライアント設定のキー: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` 証明書が含まれています。 +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [文脈](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) クラス。 可能な値: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. 許容値: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. このパラメータは常にお勧めです問題を避けることになるだろう両方の場合はサーバのキャッシュのセッションがクライアントの要望はキャッシュ. デフォルト値: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**設定例:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## part\_log {#server_configuration_parameters-part-log} + +関連付けられているログイベント [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). たとえば、データの追加やマージなどです。 利用できるログを統合アルゴリズムと比較しています。 マージプロセスを視覚化できます。 + +クエリはログに記録されます [システム。part\_log](../../operations/system_tables.md#system_tables-part-log) テーブル、別のファイルではありません。 このテーブルの名前を設定することができます `table` パラメータ(下記参照)。 + +以下のパラメータの設定ロギング: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [カスタム分割キー](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**例えば** + +``` xml + + system + part_log
    + toMonday(event_date) + 7500 +
    +``` + +## パス {#server_configuration_parameters-path} + +データを含むディレクトリへのパス。 + +!!! note "メモ" + 末尾のスラッシュは必須です。 + +**例えば** + +``` xml +/var/lib/clickhouse/ +``` + +## クエリーログ {#server_configuration_parameters-query-log} + +で受信したロギングクエリの設定 [log\_queries=1](../settings/settings.md) 設定。 + +クエリはログに記録されます [システム。クエリーログ](../../operations/system_tables.md#system_tables-query_log) テーブル、別のファイルではありません。 テーブルの名前を変更することができます `table` パラメータ(下記参照)。 + +以下のパラメータの設定ロギング: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [カスタム分割キー](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) テーブルのため。 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +テーブルが存在しない場合、clickhouseはそれを作成します。 clickhouseサーバーが更新されたときにクエリログの構造が変更された場合、古い構造を持つテーブルの名前が変更され、新しいテーブルが自動的に作成されます。 + +**例えば** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## query\_thread\_log {#server_configuration_parameters-query-thread-log} + +受信したクエリのスレッドをログに記録する設定 [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) 設定。 + +クエリはログに記録されます [システム。query\_thread\_log](../../operations/system_tables.md#system_tables-query-thread-log) テーブル、別のファイルではありません。 テーブルの名前を変更することができます `table` パラメータ(下記参照)。 + +以下のパラメータの設定ロギング: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [カスタム分割キー](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) システムテーブルの場合。 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +テーブルが存在しない場合、clickhouseはそれを作成します。 clickhouseサーバーの更新時にクエリスレッドログの構造が変更された場合、古い構造を持つテーブルの名前が変更され、新しいテーブルが自動的に作成されます。 + +**例えば** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## trace\_log {#server_configuration_parameters-trace_log} + +のための設定 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) システムテーブル操作。 + +パラメータ: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [カスタム分割キー](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) システムテーブルの場合。 +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +既定のサーバー設定ファイル `config.xml` 次の設定セクションを含みます: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## query\_masking\_rules {#query-masking-rules} + +サーバーログに保存する前に、クエリとすべてのログメッセージに適用される、regexpベースのルール, +`system.query_log`, `system.text_log`, `system.processes` テーブル、およびクライアントに送信されたログ。 これにより +SQLクエリからの機密データ漏えい(名前、電子メール、個人など) +ログへの識別子またはクレジットカード番号)。 + +**例えば** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +設定フィールド: +- `name` -ルールの名前(オプション) +- `regexp` -RE2互換の正規表現(必須) +- `replace` -機密データのための置換文字列(デフォルトではオプション-sixアスタリスク) + +マスキングルールは、クエリ全体に適用されます(不正な形式の非解析可能なクエリからの機密データの漏洩を防ぐため)。 + +`system.events` テーブルに反対がある `QueryMaskingRulesMatch` クエリマスキングルールの総数が一致する。 + +分散クエリの場合、各サーバーを個別に構成する必要があります。 +ノードはマスクせずに保存されます。 + +## リモートサーバー {#server-settings-remote-servers} + +によって使用されるクラスターの構成 [分散](../../engines/table_engines/special/distributed.md) テーブルエンジンと `cluster` テーブル機能。 + +**例えば** + +``` xml + +``` + +の値について `incl` 属性、セクションを参照 “[設定ファイル](../configuration_files.md#configuration_files)”. + +**また見なさい** + +- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) + +## タイムゾーン {#server_configuration_parameters-timezone} + +サーバーのタイムゾーン。 + +UTCタイムゾーンまたは地理的位置(たとえば、Africa/Abidjan)のIANA識別子として指定します。 + +タイムゾーンは、datetimeフィールドがテキスト形式(画面またはファイルに印刷される)に出力される場合、および文字列からdatetimeを取得する場合に、文字列とdatetime さらに、タイムゾーンは、入力パラメータでタイムゾーンを受信しなかった場合、時刻と日付を扱う関数で使用されます。 + +**例えば** + +``` xml +Europe/Moscow +``` + +## tcp\_portgenericname {#server_configuration_parameters-tcp_port} + +TCPプロトコル経由でクライアントと通信するポート。 + +**例えば** + +``` xml +9000 +``` + +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} + +クライアン それを使用する [OpenSSL](#server_configuration_parameters-openssl) 設定。 + +**可能な値** + +正の整数。 + +**デフォルト値** + +``` xml +9440 +``` + +## mysql\_portgenericname {#server_configuration_parameters-mysql_port} + +ポートと通信すmysqlプロトコルです。 + +**可能な値** + +正の整数。 + +例えば + +``` xml +9004 +``` + +## tmp\_path {#server-settings-tmp_path} + +大規模なクエリを処理するための一時データへのパス。 + +!!! note "メモ" + 末尾のスラッシュは必須です。 + +**例えば** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## tmp\_policy {#server-settings-tmp-policy} + +からのポリシー [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) 一時ファイルを保存する。 +設定されていない場合 [`tmp_path`](#server-settings-tmp_path) それ以外の場合は無視されます。 + +!!! note "メモ" + - `move_factor` は無視されます +- `keep_free_space_bytes` は無視されます +- `max_data_part_size_bytes` は無視されます +-そのポリシーには正確に一つのボリュームが必要です + +## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} + +テーブルエンジンによって使用される非圧縮データのキャッシュサイズ(バイト単位)。 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +サーバーの共有キャッシュがあります。 メモ このオプ [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) は有効です。 + +圧縮されていないキャッシュは、個々のケースで非常に短いクエリに有利です。 + +**例えば** + +``` xml +8589934592 +``` + +## user\_files\_path {#server_configuration_parameters-user_files_path} + +ユー テーブル関数で使用されます [ファイル()](../../sql_reference/table_functions/file.md). + +**例えば** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## users\_config {#users-config} + +以下を含むファイルへのパス: + +- ユーザー構成。 +- アクセス権。 +- 設定プロファイル。 +- クォータの設定。 + +**例えば** + +``` xml +users.xml +``` + +## zookeeper {#server-settings_zookeeper} + +ClickHouseとの対話を許可する設定が含まれています [ZooKeeper](http://zookeeper.apache.org/) クラスター + +ClickHouse用飼育係の保存メタデータのレプリカの使用時に再現します。 場合は複製のテーブルを使用していないので、このパラメータを省略することができます。 + +このセクショ: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + 例えば: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) これは、ClickHouseサーバーで使用されるznodesのルートとして使用されます。 任意です。 +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**設定例** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**また見なさい** + +- [複製](../../engines/table_engines/mergetree_family/replication.md) +- [ZooKeeperプログラマーズガイド](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} + +ZooKeeperのデータパートヘッダーの保存方法。 + +この設定は、 `MergeTree` 家族 指定できます: + +- グローバルに [merge\_tree](#server_configuration_parameters-merge_tree) のセクション `config.xml` ファイル。 + + ClickHouseは、サーバー上のすべてのテーブルの設定を使用します。 設定はいつでも変更できます。 既存の表は、設定が変更されたときの動作を変更します。 + +- 各テーブルのため。 + + テーブルを作成するときは、対応する [エンジンの設定](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). この設定を持つ既存のテーブルの動作は、グローバル設定が変更されても変更されません。 + +**可能な値** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +もし `use_minimalistic_part_header_in_zookeeper = 1`、その後 [複製された](../../engines/table_engines/mergetree_family/replication.md) テーブルのヘッダのデータ部品のコンパクトを `znode`. の場合はテーブルを含む多く、この保管方法を大幅に低減量のデータが保存されて飼育係. + +!!! attention "注意" + 適用後 `use_minimalistic_part_header_in_zookeeper = 1` ClickHouseサーバーをこの設定をサポートしないバージョンにダウングレードすることはできません。 するとアップグレード時に注意ClickHouseサーバーにクラスター なアップの全てのサーバーです。 テスト環境で、またはクラスターのほんの数台のサーバーで、新しいバージョンのClickHouseをテストする方が安全です。 + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**デフォルト値:** 0. + +## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} + +内部dnsキャッシュを無効にします。 システムの作動のclickhouseのために推薦される +Kubernetesのような頻繁に変更の下部組織を使って。 + +**デフォルト値:** 0. + +## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} + +ClickHouse内部DNSキャッシュに保存されているIPアドレスの更新期間(秒単位)。 +更新は、別のシステムスレッドで非同期に実行されます。 + +**デフォルト値**: 15. + +[元の記事](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/ja/operations/settings/constraints_on_settings.md b/docs/ja/operations/settings/constraints_on_settings.md deleted file mode 120000 index 4dacf908662..00000000000 --- a/docs/ja/operations/settings/constraints_on_settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/constraints_on_settings.md \ No newline at end of file diff --git a/docs/ja/operations/settings/constraints_on_settings.md b/docs/ja/operations/settings/constraints_on_settings.md new file mode 100644 index 00000000000..db917fe96e8 --- /dev/null +++ b/docs/ja/operations/settings/constraints_on_settings.md @@ -0,0 +1,75 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u8A2D\u5B9A\u306E\u5236\u7D04" +--- + +# 設定の制約 {#constraints-on-settings} + +設定に関する制約は、次のように定義できます。 `profiles` のセクション `user.xml` 設定ファイルおよびユーザーが設定の一部を変更することを禁止します。 `SET` クエリ。 +制約は次のように定義されます: + +``` xml + + + + + lower_boundary + + + upper_boundary + + + lower_boundary + upper_boundary + + + + + + + +``` + +ユーザーが制約に違反しようとすると、例外がスローされ、設定は変更されません。 +サポートされている制約は次の三種類です: `min`, `max`, `readonly`. その `min` と `max` 制約は数値設定の上限と下限を指定し、組み合わせて使用できます。 その `readonly` 制約を指定すると、ユーザーは変更できませんので、対応する設定です。 + +**例えば:** さあ `users.xml` 行を含む: + +``` xml + + + 10000000000 + 0 + ... + + + 5000000000 + 20000000000 + + + + + + + +``` + +以下のクエリすべての例外をスロー: + +``` sql +SET max_memory_usage=20000000001; +SET max_memory_usage=4999999999; +SET force_index_by_date=1; +``` + +``` text +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be greater than 20000000000. +Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not be less than 5000000000. +Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. +``` + +**メモ:** その `default` プロファイルには特別な処理があります。 `default` プロファイルはデフォルトの制約になるため、すべてのユーザーを明示的に上書きするまで制限します。 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/ja/operations/settings/index.md b/docs/ja/operations/settings/index.md deleted file mode 120000 index fc3968d1f1e..00000000000 --- a/docs/ja/operations/settings/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/index.md \ No newline at end of file diff --git a/docs/ja/operations/settings/index.md b/docs/ja/operations/settings/index.md new file mode 100644 index 00000000000..9eae8f24799 --- /dev/null +++ b/docs/ja/operations/settings/index.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Settings +toc_priority: 55 +toc_title: "\u5C0E\u5165" +--- + +# 設定 {#settings} + +以下に説明するすべての設定を行うには、複数の方法があります。 +設定はレイヤーで構成されるので、後続の各レイヤーは以前の設定を再定義します。 + +優先順位の順に設定を構成する方法: + +- の設定 `users.xml` サーバー構成ファイル。 + + 要素内に設定する ``. + +- セッションの設定。 + + 送信 `SET setting=value` 対話モードでのClickHouseコンソールクライアントから。 + 同様に、httpプロトコルでclickhouseセッションを使用できます。 これを行うには、以下を指定する必要があります。 `session_id` HTTPパラメータ。 + +- クエリの設定。 + + - を開始する場合にclickhouseコンソールがクライアントを非インタラクティブモードの設定を起動パラメータ `--setting=value`. + - HTTP APIを使用する場合は、CGIパラメーターを渡します (`URL?setting_1=value&setting_2=value...`). + +このセクションでは、server configファイルでのみ行うことができる設定については説明しません。 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/ja/operations/settings/permissions_for_queries.md b/docs/ja/operations/settings/permissions_for_queries.md deleted file mode 120000 index ce8473bf01c..00000000000 --- a/docs/ja/operations/settings/permissions_for_queries.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/permissions_for_queries.md \ No newline at end of file diff --git a/docs/ja/operations/settings/permissions_for_queries.md b/docs/ja/operations/settings/permissions_for_queries.md new file mode 100644 index 00000000000..c69eccae696 --- /dev/null +++ b/docs/ja/operations/settings/permissions_for_queries.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u30AF\u30A8\u30EA\u306E\u6A29\u9650" +--- + +# クエリの権限 {#permissions_for_queries} + +問合せclickhouse大きく分けて複数の種類: + +1. データを読み込むためのクエリー: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. データクエリの記述: `INSERT`, `OPTIMIZE`. +3. 設定の変更クエリ: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) クエリ: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +5. `KILL QUERY`. + +次の設定では、クエリの種類に応じてユーザー権限を調整します: + +- [読み取り専用](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. + +`KILL QUERY` 任意の設定で実行できます。 + +## 読み取り専用 {#settings_readonly} + +データの読み取り、データの書き込み、設定の変更の権限を制限します。 + +クエリを型に分割する方法を参照してください [上](#permissions_for_queries). + +可能な値: + +- 0 — All queries are allowed. +- 1 — Only read data queries are allowed. +- 2 — Read data and change settings queries are allowed. + +設定後 `readonly = 1`、ユーザーは変更できません `readonly` と `allow_ddl` 現在のセッションの設定。 + +を使用する場合 `GET` の方法 [HTTPインター](../../interfaces/http.md), `readonly = 1` 自動的に設定されます。 データを変更するには、 `POST` 方法。 + +設定 `readonly = 1` ユーザーによるすべての設定の変更を禁止します。 ユーザーを禁止する方法があります +特定の設定のみを変更するから、詳細については [設定の制約](constraints_on_settings.md). + +デフォルト値:0 + +## allow\_ddl {#settings_allow_ddl} + +許可または拒否 [DDL](https://en.wikipedia.org/wiki/Data_definition_language) クエリ。 + +クエリを型に分割する方法を参照してください [上](#permissions_for_queries). + +可能な値: + +- 0 — DDL queries are not allowed. +- 1 — DDL queries are allowed. + +あなたは実行できません `SET allow_ddl = 1` もし `allow_ddl = 0` 現在のセッションの場合。 + +デフォルト値:1 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/ja/operations/settings/query_complexity.md b/docs/ja/operations/settings/query_complexity.md deleted file mode 120000 index 9a9c6d975a9..00000000000 --- a/docs/ja/operations/settings/query_complexity.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/query_complexity.md \ No newline at end of file diff --git a/docs/ja/operations/settings/query_complexity.md b/docs/ja/operations/settings/query_complexity.md new file mode 100644 index 00000000000..af498be5863 --- /dev/null +++ b/docs/ja/operations/settings/query_complexity.md @@ -0,0 +1,301 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u30AF\u30A8\u30EA\u306E\u8907\u96D1\u3055\u306E\u5236\u9650" +--- + +# クエリの複雑さの制限 {#restrictions-on-query-complexity} + +クエリの複雑さの制限は、設定の一部です。 +これらをより安全な実行のユーザーインターフェースです。 +ほぼすべての制限が適用されます `SELECT`. 分散クエリ処理では、各サーバーに個別に制限が適用されます。 + +ClickHouseは、各行ではなく、データパーツの制限をチェックします。 これは、データ部分のサイズで制限の値を超えることができることを意味します。 + +の制限 “maximum amount of something” 値0を取ることができます。 “unrestricted”. +ほとんどの制限には、 ‘overflow\_mode’ 設定、制限を超えたときに何をすべきかを意味します。 +それは二つの値のいずれか: `throw` または `break`. 集計の制限(group\_by\_overflow\_mode)にも値があります `any`. + +`throw` – Throw an exception (default). + +`break` – Stop executing the query and return the partial result, as if the source data ran out. + +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. + +## max\_memory\_usage {#settings_max_memory_usage} + +単一のサーバーでクエリを実行するために使用するramの最大量。 + +デフォルトの設定ファイルでは、最大値は10gbです。 + +この設定では、使用可能なメモリの量やマシン上のメモリの総量は考慮されません。 +この制限は、単一のサーバー内の単一のクエリに適用されます。 +を使用することができ `SHOW PROCESSLIST` 各クエリの現在のメモリ消費量を表示します。 +さらに、各クエリに対してピークのメモリ消費が追跡され、ログに書き込まれます。 + +特定の集計関数の状態に対するメモリ使用量は監視されません。 + +集計関数の状態に対するメモリ使用量は完全には追跡されません `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` から `String` と `Array` 引数。 + +メモリ消費もパラメータによって制限されます `max_memory_usage_for_user` と `max_memory_usage_for_all_queries`. + +## max\_memory\_usage\_for\_user {#max-memory-usage-for-user} + +単一のサーバー上でユーザーのクエリを実行するために使用するramの最大量。 + +デフォルト値は [設定。h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L288). デフォルトでは、金額は制限されません (`max_memory_usage_for_user = 0`). + +の説明も参照してください [max\_memory\_usage](#settings_max_memory_usage). + +## max\_memory\_usage\_for\_all\_queries {#max-memory-usage-for-all-queries} + +単一のサーバー上ですべてのクエリを実行するために使用するramの最大量。 + +デフォルト値は [設定。h](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Core/Settings.h#L289). デフォルトでは、金額は制限されません (`max_memory_usage_for_all_queries = 0`). + +の説明も参照してください [max\_memory\_usage](#settings_max_memory_usage). + +## max\_rows\_to\_read {#max-rows-to-read} + +各行ではなく、各ブロックで次の制限を確認できます。 つまり、制限は少し壊れる可能性があります。 +複数のスレッドでクエリを実行する場合、次の制限が各スレッドに個別に適用されます。 + +クエリの実行時にテーブルから読み取ることができる最大行数。 + +## max\_bytes\_to\_read {#max-bytes-to-read} + +クエリの実行時にテーブルから読み取ることができる最大バイト数(圧縮されていないデータ)。 + +## read\_overflow\_mode {#read-overflow-mode} + +データの読み取り量がいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## max\_rows\_to\_group\_by {#settings-max-rows-to-group-by} + +集約から受け取った一意のキーの最大数。 この設定では、集計時のメモリ消費量を制限できます。 + +## group\_by\_overflow\_mode {#group-by-overflow-mode} + +集計の一意のキーの数が制限を超えた場合の対処方法: ‘throw’, ‘break’、または ‘any’. デフォルトでは、投げる。 +を使用して ‘any’ valueを使用すると、GROUP BYの近似を実行できます。 この近似の品質は、データの統計的性質に依存します。 + +## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} + +の実行を有効または無効にします。 `GROUP BY` 外部メモリ内の句。 見る [外部メモリによるグループ化](../../sql_reference/statements/select.md#select-group-by-in-external-memory). + +可能な値: + +- シングルで使用できるramの最大ボリューム(バイト単位)。 [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause) オペレーション +- 0 — `GROUP BY` 外部メモリで無効。 + +デフォルト値:0. + +## max\_rows\_to\_sort {#max-rows-to-sort} + +並べ替え前の行の最大数。 これにより、ソート時のメモリ消費量を制限できます。 + +## max\_bytes\_to\_sort {#max-bytes-to-sort} + +ソート前の最大バイト数。 + +## sort\_overflow\_mode {#sort-overflow-mode} + +ソート前に受け取った行の数がいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## max\_result\_rows {#setting-max_result_rows} + +結果の行数を制限します。 またチェックサブクエリは、windowsアプリケーションの実行時にパーツの分散を返します。 + +## max\_result\_bytes {#max-result-bytes} + +結果のバイト数を制限します。 前の設定と同じです。 + +## result\_overflow\_mode {#result-overflow-mode} + +結果のボリュームがいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +を使用して ‘break’ LIMITを使用するのと似ています。 `Break` ブロックレベルでのみ実行を中断します。 これは、返される行の量が [max\_result\_rows](#setting-max_result_rows)、の倍数 [max\_block\_size](settings.md#setting-max_block_size) そして依存します [max\_threads](settings.md#settings-max_threads). + +例えば: + +``` sql +SET max_threads = 3, max_block_size = 3333; +SET max_result_rows = 3334, result_overflow_mode = 'break'; + +SELECT * +FROM numbers_mt(100000) +FORMAT Null; +``` + +結果: + +``` text +6666 rows in set. ... +``` + +## max\_execution\_time {#max-execution-time} + +クエリの最大実行時間(秒)。 +このとき、ソート段階のいずれか、または集計関数のマージおよびファイナライズ時にはチェックされません。 + +## timeout\_overflow\_mode {#timeout-overflow-mode} + +クエリがより長く実行される場合の対処方法 ‘max\_execution\_time’: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## min\_execution\_speed {#min-execution-speed} + +毎秒行の最小の実行速度。 チェックすべてのデータブロックの場合 ‘timeout\_before\_checking\_execution\_speed’ 期限が切れる 実行速度が遅い場合は、例外がスローされます。 + +## min\_execution\_speed\_bytes {#min-execution-speed-bytes} + +実行バイト/秒の最小数。 チェックすべてのデータブロックの場合 ‘timeout\_before\_checking\_execution\_speed’ 期限が切れる 実行速度が遅い場合は、例外がスローされます。 + +## max\_execution\_speed {#max-execution-speed} + +秒あたりの実行行の最大数。 チェックすべてのデータブロックの場合 ‘timeout\_before\_checking\_execution\_speed’ 期限が切れる 実行速度が速い場合、実行速度が低下します。 + +## max\_execution\_speed\_bytes {#max-execution-speed-bytes} + +実行バイト/秒の最大数。 チェックすべてのデータブロックの場合 ‘timeout\_before\_checking\_execution\_speed’ 期限が切れる 実行速度が速い場合、実行速度が低下します。 + +## timeout\_before\_checking\_execution\_speed {#timeout-before-checking-execution-speed} + +実行速度が遅すぎないことをチェックする ‘min\_execution\_speed’指定された時間が経過した後、秒で)。 + +## max\_columns\_to\_read {#max-columns-to-read} + +単一のクエリでテーブルから読み取ることができる列の最大数。 クエリでより多くの列を読み取る必要がある場合は、例外がスローされます。 + +## max\_temporary\_columns {#max-temporary-columns} + +定数の列を含む、クエリを実行するときにramに同時に保持する必要がある一時的な列の最大数。 これよりも一時的な列が多い場合は、例外がスローされます。 + +## max\_temporary\_non\_const\_columns {#max-temporary-non-const-columns} + +同じものとして ‘max\_temporary\_columns’ しかし、一定の列を数えずに。 +定数の列は、クエリを実行するときにかなり頻繁に形成されますが、計算リソースはほぼゼロになります。 + +## max\_subquery\_depth {#max-subquery-depth} + +サブクエリの最大ネスト深度。 サブクエリが深い場合は、例外がスローされます。 デフォルトでは、100。 + +## max\_pipeline\_depth {#max-pipeline-depth} + +パイプラインの最大深さ。 クエリ処理中に各データブロックが通過する変換の数に対応します。 単一のサーバーの制限内で数えられます。 パイプラインの深さが大きい場合は、例外がスローされます。 デフォルトでは、1000。 + +## max\_ast\_depth {#max-ast-depth} + +クエリ構文ツリーの最大ネスト深さ。 超過すると、例外がスローされます。 +現時点では、解析中にチェックされず、クエリを解析した後でのみチェックされます。 つまり、構文解析中に深すぎる構文木を作成することはできますが、クエリは失敗します。 デフォルトでは、1000。 + +## max\_ast\_elements {#max-ast-elements} + +クエリ構文ツリー内の要素の最大数。 超過すると、例外がスローされます。 +以前の設定と同じように、クエリを解析した後にのみチェックされます。 デフォルトでは、50,000。 + +## max\_rows\_in\_set {#max-rows-in-set} + +サブクエリから作成されたin句のデータ-セットの最大行数。 + +## max\_bytes\_inset {#max-bytes-in-set} + +サブクエリから作成されたin句のセットによって使用される最大バイト数(圧縮されていないデータ)。 + +## set\_overflow\_mode {#set-overflow-mode} + +データの量がいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## max\_rows\_in\_distinct {#max-rows-in-distinct} + +DISTINCTを使用する場合の異なる行の最大数。 + +## max\_bytes\_in\_distinct {#max-bytes-in-distinct} + +DISTINCTを使用するときにハッシュテーブルで使用される最大バイト数。 + +## distinct\_overflow\_mode {#distinct-overflow-mode} + +データの量がいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## max\_rows\_tokenトランスファー {#max-rows-to-transfer} + +リモートサーバーに渡すか、global inを使用するときに一時テーブルに保存できる行の最大数。 + +## max\_bytes\_to\_transfer {#max-bytes-to-transfer} + +リモートサーバーに渡すか、global inを使用するときに一時テーブルに保存できる最大バイト数(圧縮されていないデータ)。 + +## transfer\_overflow\_mode {#transfer-overflow-mode} + +データの量がいずれかの制限を超えた場合の対処方法: ‘throw’ または ‘break’. デフォルトでは、投げる。 + +## max\_rows\_in\_join {#settings-max_rows_in_join} + +テーブルを結合するときに使用されるハッシュテーブルの行数を制限します。 + +この設定は以下に適用されます [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) 業務の [参加](../../engines/table_engines/special/join.md) テーブルエンジン。 + +クエリに複数の結合が含まれている場合、clickhouseは中間結果ごとにこの設定をチェックします。 + +ClickHouseは、制限に達したときにさまざまなアクションを実行できます。 を使用 [join\_overflow\_mode](#settings-join_overflow_mode) アクションを選択する設定。 + +可能な値: + +- 正の整数。 +- 0 — Unlimited number of rows. + +デフォルト値:0. + +## max\_bytes\_in\_join {#settings-max_bytes_in_join} + +制限サイズをバイトのハッシュテーブルが参加す。 + +この設定は以下に適用されます [SELECT … JOIN](../../sql_reference/statements/select.md#select-join) 操作と [結合テーブルエンジン](../../engines/table_engines/special/join.md). + +クエリに結合が含まれている場合、clickhouseは中間結果ごとにこの設定をチェックします。 + +ClickHouseは、制限に達したときにさまざまなアクションを実行できます。 使用 [join\_overflow\_mode](#settings-join_overflow_mode) アクションを選択するための設定。 + +可能な値: + +- 正の整数。 +- 0 — Memory control is disabled. + +デフォルト値:0. + +## join\_overflow\_mode {#settings-join_overflow_mode} + +次の結合制限のいずれかに達したときにclickhouseが実行するアクションを定義します: + +- [max\_bytes\_in\_join](#settings-max_bytes_in_join) +- [max\_rows\_in\_join](#settings-max_rows_in_join) + +可能な値: + +- `THROW` — ClickHouse throws an exception and breaks operation. +- `BREAK` — ClickHouse breaks operation and doesn't throw an exception. + +デフォルト値: `THROW`. + +**また見なさい** + +- [JOIN句](../../sql_reference/statements/select.md#select-join) +- [結合テーブルエンジン](../../engines/table_engines/special/join.md) + +## max\_partitions\_per\_insert\_block {#max-partitions-per-insert-block} + +単一の挿入ブロック内のパーティションの最大数を制限します。 + +- 正の整数。 +- 0 — Unlimited number of partitions. + +デフォルト値:100。 + +**詳細** + +を挿入する際、データclickhouse計算パーティションの数に挿入されます。 パーティションの数が `max_partitions_per_insert_block`、ClickHouseは、次のテキストで例外をスローします: + +> “Too many partitions for single INSERT block (more than” +toString(max\_parts)+ “). The limit is controlled by ‘max\_partitions\_per\_insert\_block’ setting. A large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).” + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/ja/operations/settings/settings.md b/docs/ja/operations/settings/settings.md deleted file mode 120000 index 0c8df3cfc90..00000000000 --- a/docs/ja/operations/settings/settings.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings.md b/docs/ja/operations/settings/settings.md new file mode 100644 index 00000000000..916677b1a4d --- /dev/null +++ b/docs/ja/operations/settings/settings.md @@ -0,0 +1,1202 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u8A2D\u5B9A" +--- + +# 設定 {#settings} + +## distributed\_product\_mode {#distributed-product-mode} + +の動作を変更します。 [分散サブクエリ](../../sql_reference/statements/select.md). + +ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. + +制限: + +- INおよびJOINサブクエリにのみ適用されます。 +- FROMセクションが複数のシャードを含む分散テーブルを使用する場合のみ。 +- サブクエリが複数のシャードを含む分散テーブルに関係する場合。 +- テーブル値には使用されません [リモート](../../sql_reference/table_functions/remote.md) 機能。 + +可能な値: + +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” 例外)。 +- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` +- `global` — Replaces the `IN`/`JOIN` クエリと `GLOBAL IN`/`GLOBAL JOIN.` +- `allow` — Allows the use of these types of subqueries. + +## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} + +述語プッシュダウンをオンにする `SELECT` クエリ。 + +プレディケートプッシュダウ場合を大幅に削減ネットワーク通信のために配布します。 + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:1。 + +使い方 + +次のクエリを検討します: + +1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` +2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` + +もし `enable_optimize_predicate_expression = 1` ClickHouseが適用されるため、これらのクエリの実行時間は等しくなります `WHERE` それを処理するときにサブクエリに。 + +もし `enable_optimize_predicate_expression = 0` その後、第二のクエリの実行時間がはるかに長いです。 `WHERE` サブクエリが終了した後、すべてのデータに句が適用されます。 + +## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} + +更新されたデータが利用できない場合、クエリを古いレプリカに強制的に適用します。 見る [複製](../../engines/table_engines/mergetree_family/replication.md). + +ClickHouseは、テーブルの古いレプリカから最も関連性の高いものを選択します。 + +実行するときに使用 `SELECT` レプリケートされたテーブルを指す分散テーブルから + +デフォルトでは、1(有効)。 + +## force\_index\_by\_date {#settings-force_index_by_date} + +インデックスを日付で使用できない場合は、クエリの実行を無効にします。 + +MergeTreeファミリーのテーブルで動作します。 + +もし `force_index_by_date=1` ClickHouseは、データ範囲の制限に使用できる日付キー条件がクエリにあるかどうかをチェックします。 適切な条件がない場合は、例外がスローされます。 ただし、読み取るデータの量が条件によって減少するかどうかはチェックされません。 たとえば、条件 `Date != ' 2000-01-01 '` テーブル内のすべてのデータに一致する場合でも許容されます(つまり、クエリを実行するにはフルスキャンが必要です)。 MergeTreeテーブルのデータ範囲の詳細については、次を参照してください [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## force\_primary\_key {#force-primary-key} + +オクエリの実行が指数付けにより、主キーはできません。 + +MergeTreeファミリーのテーブルで動作します。 + +もし `force_primary_key=1` ClickHouseは、データ範囲の制限に使用できる主キー条件がクエリにあるかどうかを確認します。 適切な条件がない場合は、例外がスローされます。 ただし、読み取るデータの量が条件によって減少するかどうかはチェックされません。 MergeTreeテーブルのデータ範囲の詳細については、 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +## format\_schema {#format-schema} + +このパラメーターは、次のようなスキーマ定義を必要とする形式を使用する場合に便利です [Cap'n Proto](https://capnproto.org/) または [Protobuf](https://developers.google.com/protocol-buffers/). 値は形式によって異なります。 + +## fsync\_metadata {#fsync-metadata} + +有効または無効 [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) 書くとき `.sql` ファイル 既定で有効になっています。 + +ことは、あってはならないことで無効にすることもできれば、サーバは、数百万の小さなテーブルが続々と生まれてくると破壊されました。 + +## enable\_http\_compression {#settings-enable_http_compression} + +HTTP要求に対する応答のデータ圧縮を有効または無効にします。 + +詳細については、 [HTTPインタフェースの説明](../../interfaces/http.md). + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} + +HTTP要求に対する応答のデータ圧縮レベルを次の場合に設定します [enable\_http\_compression=1](#settings-enable_http_compression). + +可能な値:1から9までの数字。 + +デフォルト値:3. + +## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} + +クライアントからのhttp postデータの解凍時にチェックサム検証を有効または無効にします。 にのみ使用clickhouseネイティブの圧縮フォーマット(使用されません `gzip` または `deflate`). + +詳細については、 [HTTPインタフェースの説明](../../interfaces/http.md). + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} + +有効または無効 `X-ClickHouse-Progress` HTTP応答ヘッダー `clickhouse-server` 応答。 + +詳細については、 [HTTPインタフェースの説明](../../interfaces/http.md). + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +## max\_http\_get\_redirects {#setting-max_http_get_redirects} + +HTTP GETリダイレクトホップの最大数を制限する [URL](../../engines/table_engines/special/url.md)-エンジンテーブル。 この設定は、両方のタイプのテーブルに適用されます。 [CREATE TABLE](../../query_language/create/#create-table-query) クエリとによって [url](../../sql_reference/table_functions/url.md) テーブル機能。 + +可能な値: + +- 任意の正の整数のホップ数。 +- 0 — No hops allowed. + +デフォルト値:0. + +## input\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} + +テキスト形式(csv、tsvなど)から読み取るときに許容されるエラーの最大数を設定します。). + +デフォルト値は0です。 + +常にペアそれと `input_format_allow_errors_ratio`. + +行の読み取り中にエラーが発生したが、エラーカウンタがまだ小さい場合 `input_format_allow_errors_num`、ClickHouseは、行を無視して、次のいずれかに移動します。 + +両方の場合 `input_format_allow_errors_num` と `input_format_allow_errors_ratio` 超過すると、ClickHouseは例外をスローします。 + +## input\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} + +テキスト形式(csv、tsvなど)から読み取るときに許可されるエラーの最大パーセントを設定します。). +エラーの割合は、0~1の間の浮動小数点数として設定されます。 + +デフォルト値は0です。 + +常にペアそれと `input_format_allow_errors_num`. + +行の読み取り中にエラーが発生したが、エラーカウンタがまだ小さい場合 `input_format_allow_errors_ratio`、ClickHouseは、行を無視して、次のいずれかに移動します。 + +両方の場合 `input_format_allow_errors_num` と `input_format_allow_errors_ratio` 超過すると、ClickHouseは例外をスローします。 + +## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} + +を有効または無効にしのsqlのパーサの場合の高速ストリームのパーサで構文解析のデータです。 この設定は、 [値](../../interfaces/formats.md#data-format-values) データ挿入時のフォーマット。 構文の解析の詳細については、以下を参照してください [構文](../../sql_reference/syntax.md) セクション。 + +可能な値: + +- 0 — Disabled. + + この場合、提供しなければなりません形式のデータです。 を見る [形式](../../interfaces/formats.md) セクション。 + +- 1 — Enabled. + + この場合、sql式を値として使用できますが、データの挿入はこの方法ではるかに遅くなります。 書式設定されたデータのみを挿入する場合、clickhouseは設定値が0であるかのように動作します。 + +デフォルト値:1。 + +使用例 + +を挿入 [DateTime](../../sql_reference/data_types/datetime.md) 異なる設定で値を入力します。 + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Exception on client: +Code: 27. DB::Exception: Cannot parse input: expected ) before: now()): (at row 1) +``` + +``` sql +SET input_format_values_interpret_expressions = 1; +INSERT INTO datetime_t VALUES (now()) +``` + +``` text +Ok. +``` + +最後のクエリは次のクエリと同じです: + +``` sql +SET input_format_values_interpret_expressions = 0; +INSERT INTO datetime_t SELECT now() +``` + +``` text +Ok. +``` + +## input\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} + +Sql式のテンプレート控除を有効または無効にします。 [値](../../interfaces/formats.md#data-format-values) フォーマット。 これにより、式の解析と解釈が可能になります。 `Values` 連続する行の式が同じ構造を持つ場合、はるかに高速です。 ClickHouseは、式のテンプレートを推測し、このテンプレートを使用して次の行を解析し、正常に解析された行のバッチで式を評価しようとします。 次のクエリの場合: + +``` sql +INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... +``` + +- もし `input_format_values_interpret_expressions=1` と `format_values_deduce_templates_of_expressions=0` 式は行ごとに別々に解釈されます(これは、多数の行では非常に遅いです) +- もし `input_format_values_interpret_expressions=0` と `format_values_deduce_templates_of_expressions=1` 最初、二番目、および三番目の行の式は、テンプレートを使用して解析されます `lower(String)` 一緒に解釈されると、式はforth行が別のテンプレートで解析されます (`upper(String)`) +- もし `input_format_values_interpret_expressions=1` と `format_values_deduce_templates_of_expressions=1` -前の場合と同じですが、テンプレートを推論することができない場合は、式を別々に解釈することもできます。 + +既定で有効になっています。 + +## input\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} + +この設定は次の場合にのみ使用されます `input_format_values_deduce_templates_of_expressions = 1`. いくつかの列の式は同じ構造を持ちますが、異なる型の数値リテラルを含んでいます + +``` sql +(..., abs(0), ...), -- UInt64 literal +(..., abs(3.141592654), ...), -- Float64 literal +(..., abs(-1), ...), -- Int64 literal +``` + +この設定を有効にすると、clickhouseは実際のリテラルの型をチェックし、対応する型の式テンプレートを使用します。 場合によっては、式の評価が大幅に遅くなることがあります。 `Values`. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` または `Int64` 代わりに `UInt64` のために `42`が、その原因となりオーバーフローおよび精度の問題です。 +既定で有効になっています。 + +## input\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} + +実行するとき `INSERT` クエリでは、省略された入力列の値をそれぞれの列の既定値に置き換えます。 このオプションは [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) と [タブ区切り](../../interfaces/formats.md#tabseparated) フォーマット。 + +!!! note "メモ" + このオプショ それはサーバーの付加的な計算資源を消費し、性能を減らすことができる。 + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:1。 + +## input\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} + +有効にすると、tsvの空の入力フィールドを既定値に置き換えます。 複雑な既定の式の場合 `input_format_defaults_for_omitted_fields` 有効にする必要があります。 + +デフォルトでは無効です。 + +## input\_format\_null\_as\_default {#settings-input-format-null-as-default} + +入力デー `NULL` しかし、対応する列のデータ型は `Nullable(T)` (テキスト入力形式の場合)。 + +## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} + +追加データの挿入のスキップを有効または無効にします。 + +書き込みデータclickhouseが例外をスローした場合入力データを含むカラム特別な権限は必要ありません使用します。 スキップが有効になっている場合、clickhouseは余分なデータを挿入せず、例外もスローしません。 + +対応フォーマット: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [Csvwithnamesname](../../interfaces/formats.md#csvwithnames) +- [Tabseparatedwithnamesname](../../interfaces/formats.md#tabseparatedwithnames) +- [TSKV](../../interfaces/formats.md#tskv) + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +## input\_format\_import\_nested\_json {#settings-input_format_import_nested_json} + +を有効または無効にし、挿入のjsonデータをネストしたオブジェクト。 + +対応フォーマット: + +- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +また見なさい: + +- [入れ子構造の使用法](../../interfaces/formats.md#jsoneachrow-nested) と `JSONEachRow` フォーマット。 + +## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} + +データ挿入時に列の順序の確認を有効または無効にします。 + +挿入パフォーマンスを向上させるには、入力データの列の順序がターゲットテーブルと同じであることが確実な場合は、このチェックを無効にすることを + +対応フォーマット: + +- [Csvwithnamesname](../../interfaces/formats.md#csvwithnames) +- [Tabseparatedwithnamesname](../../interfaces/formats.md#tabseparatedwithnames) + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:1。 + +## date\_time\_input\_format {#settings-date_time_input_format} + +日付と時刻のテキスト表現のパーサーを選択できます。 + +この設定は、以下には適用されません [日付と時刻の関数](../../sql_reference/functions/date_time_functions.md). + +可能な値: + +- `'best_effort'` — Enables extended parsing. + + ClickHouseは基本を解析することができます `YYYY-MM-DD HH:MM:SS` 形式とすべて [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) 日付と時刻の形式。 例えば, `'2018-06-08T01:02:03.000Z'`. + +- `'basic'` — Use basic parser. + + ClickHouseは基本のみを解析できます `YYYY-MM-DD HH:MM:SS` フォーマット。 例えば, `'2019-08-20 10:18:56'`. + +デフォルト値: `'basic'`. + +また見なさい: + +- [DateTimeデータ型。](../../sql_reference/data_types/datetime.md) +- [日付と時刻を操作するための関数。](../../sql_reference/functions/date_time_functions.md) + +## join\_default\_strictness {#settings-join_default_strictness} + +デフォルトの厳密さを [結合句](../../sql_reference/statements/select.md#select-join). + +可能な値: + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [デカルト積](https://en.wikipedia.org/wiki/Cartesian_product) 一致する行から。 これは正常です `JOIN` 標準SQLからの動作。 +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` と `ALL` 同じです。 +- `ASOF` — For joining sequences with an uncertain match. +- `Empty string` — If `ALL` または `ANY` クエリで指定されていない場合、ClickHouseは例外をスローします。 + +デフォルト値: `ALL`. + +## join\_any\_take\_last\_row {#settings-join_any_take_last_row} + +Join操作の動作を次のもので変更する `ANY` 厳密さ + +!!! warning "注意" + この設定は、 `JOIN` との操作 [参加](../../engines/table_engines/special/join.md) エンジンテーブル。 + +可能な値: + +- 0 — If the right table has more than one matching row, only the first one found is joined. +- 1 — If the right table has more than one matching row, only the last one found is joined. + +デフォルト値:0. + +また見なさい: + +- [JOIN句](../../sql_reference/statements/select.md#select-join) +- [結合テーブルエンジン](../../engines/table_engines/special/join.md) +- [join\_default\_strictness](#settings-join_default_strictness) + +## join\_use\_nulls {#join_use_nulls} + +のタイプを設定します。 [JOIN](../../sql_reference/statements/select.md) 行動。 際融合のテーブル、空細胞が表示される場合があります。 ClickHouseは、この設定に基づいて異なる塗りつぶします。 + +可能な値: + +- 0 — The empty cells are filled with the default value of the corresponding field type. +- 1 — `JOIN` 標準SQLと同じように動作します。 対応するフィールドの型は次のように変換されます [Nullable](../../sql_reference/data_types/nullable.md#data_type-nullable) 空のセルは [NULL](../../sql_reference/syntax.md). + +デフォルト値:0. + +## max\_block\_size {#setting-max_block_size} + +ClickHouseでは、データはブロック(列部分のセット)によって処理されます。 単一のブロックの内部処理サイクルは十分に効率的ですが、各ブロックに顕著な支出があります。 その `max_block_size` 設定は、テーブルからロードするブロックのサイズ(行数)の推奨値です。 ブロックサイズが小さすぎないようにして、各ブロックの支出はまだ目立つが、最初のブロックが迅速に処理された後に完了する制限付きのクエ 目標は、複数のスレッドで多数の列を抽出するときに大量のメモリを消費しないようにし、少なくともいくつかのキャッシュの局所性を維持する + +デフォルト値:65,536. + +ブロックのサイズ `max_block_size` ていないから読み込まれます。 少ないデータを取得する必要があることが明らかであれば、小さいブロックが処理されます。 + +## preferred\_block\_size\_bytes {#preferred-block-size-bytes} + +同じ目的のために使用される `max_block_size` しかし、ブロック内の行数に適応させることによって、推奨されるブロックサイズをバイト単位で設定します。 +ただし、ブロックサイズは `max_block_size` 行。 +デフォルト:1,000,000。 mergetreeエンジンから読み取る場合にのみ機能します。 + +## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} + +Aのファイルから読み込まれる行の数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) テーブルを超え `merge_tree_min_rows_for_concurrent_read` その後ClickHouseしようとして行な兼職の状況からの読み出しこのファイルに複数のスレッド)。 + +可能な値: + +- 任意の正の整数。 + +デフォルト値:163840. + +## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} + +ファイルから読み込むバイト数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-エンジンテーブル超え `merge_tree_min_bytes_for_concurrent_read` そのClickHouseを同時に読みこのファイルから複数のスレッド)。 + +可能な値: + +- 任意の正の整数。 + +デフォルト値:251658240. + +## merge\_tree\_min\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} + +ファイル内で読み込まれる二つのデータブロック間の距離がより小さい場合 `merge_tree_min_rows_for_seek` その後、ClickHouseはファイルをシークしませんが、データを順次読み取ります。 + +可能な値: + +- 任意の正の整数。 + +デフォルト値:0. + +## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} + +ファイル内で読み込まれる二つのデータブロック間の距離がより小さい場合 `merge_tree_min_bytes_for_seek` その後、ClickHouseは両方のブロックを含むファイルの範囲を順次読み取り、余分なシークを避けます。 + +可能な値: + +- 任意の正の整数。 + +デフォルト値:0. + +## merge\_tree\_coarse\_index\_granularitycomment {#setting-merge-tree-coarse-index-granularity} + +する場合のデータclickhouseチェックのデータにファイルです。 まclickhouseが必要なキーの一部の範囲、とりわけこの範囲を `merge_tree_coarse_index_granularity` 必要なキーを再帰的に検索します。 + +可能な値: + +- 任意の正の偶数の整数。 + +デフォルト値:8. + +## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} + +ClickHouseはより多くを読むべきであれば `merge_tree_max_rows_to_use_cache` あるクエリの行では、圧縮されていないブロックのキャッシュは使用されません。 + +のキャッシュされた、圧縮解除されたブロックの店舗データを抽出したためます。 clickhouseこのキャッシュの高速化対応小の繰り返します。 この設定は、大量のデータを読み取るクエリによってキャッシュが破棄されるのを防ぎます。 その [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) サーバー設定は、非圧縮ブロックのキャッシュのサイズを定義します。 + +可能な値: + +- 任意の正の整数。 + +Default value: 128 ✕ 8192. + +## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} + +ClickHouseはより多くを読むべきであれば `merge_tree_max_bytes_to_use_cache` バイトあるクエリでは、圧縮されていないブロックのキャッシュは使用されません。 + +のキャッシュされた、圧縮解除されたブロックの店舗データを抽出したためます。 clickhouseこのキャッシュの高速化対応小の繰り返します。 この設定は、大量のデータを読み取るクエリによってキャッシュが破棄されるのを防ぎます。 その [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) サーバー設定は、非圧縮ブロックのキャッシュのサイズを定義します。 + +可能な値: + +- 任意の正の整数。 + +デフォルト値:2013265920. + +## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} + +記憶域ディスクへの直接i/oアクセスを使用するために必要な最小データ量。 + +ClickHouseこの設定からデータを読み込むときます。 読み取られるすべてのデータの合計ストレージボリュームが `min_bytes_to_use_direct_io` ディスクからデータを読み取ります。 `O_DIRECT` オプション。 + +可能な値: + +- 0 — Direct I/O is disabled. +- 正の整数。 + +デフォルト値:0. + +## log\_queries {#settings-log-queries} + +クエリログの設定。 + +この設定でclickhouseに送信されたクエリは、次のルールに従ってログに記録されます。 [クエリーログ](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) サーバー構成パラメータ。 + +例えば: + +``` text +log_queries=1 +``` + +## log\_query\_threads {#settings-log-query-threads} + +クエリスレッドログの設定。 + +この設定でclickhouseによって実行されたクエリのスレッドは、以下のルールに従ってログに記録されます [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) サーバー構成パラメータ。 + +例えば: + +``` text +log_query_threads=1 +``` + +## max\_insert\_block\_size {#settings-max_insert_block_size} + +テーブルに挿入するために形成するブロックのサイズ。 +この設定は、サーバーがブロックを形成する場合にのみ適用されます。 +たとえば、httpインターフェイスを介した挿入の場合、サーバーはデータ形式を解析し、指定されたサイズのブロックを形成します。 +しかし、clickhouse-clientを使用すると、クライアントはデータ自体を解析し、 ‘max\_insert\_block\_size’ サーバー上の設定は、挿入されたブロックのサイズには影響しません。 +この設定は、select後に形成されるのと同じブロックを使用してデータが挿入されるため、insert selectを使用する場合にも目的がありません。 + +デフォルト値:1,048,576. + +デフォルトは、 `max_block_size`. この理由は、特定のテーブルエンジン (`*MergeTree`)挿入された各ブロックのディスク上にデータ部分を形成する。 同様に, `*MergeTree` テーブルデータを並べ替え時の挿入やるのに十分な大きさのブロックサイズを選別データにアプリです。 + +## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} + +分散クエリの遅延レプリカを無効にします。 見る [複製](../../engines/table_engines/mergetree_family/replication.md). + +時間を秒単位で設定します。 レプリカが設定値よりも遅れている場合、このレプリカは使用されません。 + +デフォルト値:300. + +実行するときに使用 `SELECT` レプリケートされたテーブルを指す分散テーブルから + +## max\_threads {#settings-max_threads} + +最大の問合せ処理のスレッドを除き、スレッドの取得のためのデータからリモートサーバーの ‘max\_distributed\_connections’ パラメータ)。 + +このパラメータに適用されるスレッドは、それらのスレッドが同じ段階での問合せ処理パイプライン。 +たとえば、テーブルから読み取るときに、関数を使用して式を評価できる場合は、whereとfilterを使用し、少なくともusingを使用してgroup byを並列に事前集計しま ‘max\_threads’ その後、スレッドの数 ‘max\_threads’ 使用されます。 + +デフォルト値:物理cpuコアの数。 + +通常、一度にサーバーで実行されるselectクエリが少ない場合は、このパラメーターを実際のプロセッサコア数より少し小さい値に設定します。 + +制限があるためすぐに完了するクエリの場合は、以下を設定できます ‘max\_threads’. たとえば、必要な数のエントリがすべてのブロックにあり、max\_threads=8の場合、8つのブロックが取得されますが、読み込むだけで十分です。 + +小さい `max_threads` 値は、消費されるメモリ量が少ない。 + +## max\_insert\_threads {#settings-max-insert-threads} + +実行するスレッドの最大数 `INSERT SELECT` クエリ。 + +可能な値: + +- 0 (or 1) — `INSERT SELECT` 並列実行なし。 +- 正の整数。 1より大きい。 + +デフォルト値:0. + +並列 `INSERT SELECT` のみ有効です。 `SELECT` パートは並列に実行されます。 [max\_threads](#settings-max_threads) 設定。 +値を大きくすると、メモリ使用量が増加します。 + +## max\_compress\_block\_size {#max-compress-block-size} + +テーブルへの書き込み用に圧縮する前の非圧縮データのブロックの最大サイズ。 デフォルトでは、1,048,576(1mib)。 サイズが縮小されると、圧縮率が大幅に低下し、キャッシュの局所性のために圧縮および圧縮解凍速度がわずかに増加し、メモリ消費が減少する。 通常、この設定を変更する理由はありません。 + +圧縮のためのブロック(バイトからなるメモリの塊)とクエリ処理のためのブロック(テーブルからの行のセット)を混同しないでください。 + +## min\_compress\_block\_size {#min-compress-block-size} + +のために [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)"テーブル。 削減のため、遅延が処理クエリーのブロックの圧縮を書くとき、次のマークがそのサイズは少なくとも ‘min\_compress\_block\_size’. デフォルトでは、65,536。 + +圧縮されていないデータが以下の場合、ブロックの実際のサイズ ‘max\_compress\_block\_size’、この値よりも小さく、一つのマークのためのデータの量よりも小さくありません。 + +例を見よう。 それを仮定する ‘index\_granularity’ テーブルの作成中に8192に設定されました。 + +私たちはuint32型の列(値あたり4バイト)を書いています。 8192行を書き込む場合、合計は32kbのデータになります。 min\_compress\_block\_size=65,536以降、圧縮ブロックは二つのマークごとに形成されます。 + +私たちは、文字列型(値あたり60バイトの平均サイズ)でurl列を書いています。 8192行を書き込む場合、平均はデータの500kbよりわずかに小さくなります。 これは65,536以上であるため、各マークに圧縮ブロックが形成されます。 この場合、ディスクからシングルマークの範囲でデータを読み取るとき、余分なデータは解凍されません。 + +通常、この設定を変更する理由はありません。 + +## max\_query\_size {#settings-max_query_size} + +SQLパーサーで解析するためにRAMに取り込むことができるクエリの最大部分。 +INSERTクエリには、この制限に含まれていない別のストリームパーサー(O(1)RAMを消費する)によって処理されるINSERTのデータも含まれています。 + +デフォルト値:256kib. + +## interactive\_delay {#interactive-delay} + +区間マイクロ秒単位で確認を行うための要求実行中止となり、送信を行います。 + +デフォルト値:100,000(キャンセルのチェックを行い、進捗を毎秒十回送信します)。 + +## connect\_timeout,receive\_timeout,send\_timeout {#connect-timeout-receive-timeout-send-timeout} + +クライアントとの通信に使用されるソケットの秒単位のタイムアウト。 + +デフォルト値:10、300、300。 + +## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} + +Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. + +デフォルト値:0 + +## poll\_interval {#poll-interval} + +指定された秒数の待機ループでロックします。 + +デフォルト値:10. + +## max\_distributed\_connections {#max-distributed-connections} + +単一の分散テーブルへの単一クエリの分散処理のためのリモートサーバーとの同時接続の最大数。 クラスター内のサーバー数以上の値を設定することをお勧めします。 + +デフォルト値:1024。 + +次のパラメータは、分散テーブルを作成するとき(およびサーバーを起動するとき)にのみ使用されるため、実行時に変更する必要はありません。 + +## distributed\_connections\_pool\_size {#distributed-connections-pool-size} + +単一の分散テーブルへのすべてのクエリの分散処理のためのリモートサーバーとの同時接続の最大数。 クラスター内のサーバー数以上の値を設定することをお勧めします。 + +デフォルト値:1024。 + +## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} + +分散テーブルエンジンのリモートサーバーに接続するためのタイムアウト(ミリ秒)。 ‘shard’ と ‘replica’ セクションはクラスター定義で使用されます。 +失敗した場合は、さまざまなレプリカへの接続を何度か試行します。 + +デフォルト値:50。 + +## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} + +分散テーブルエンジンの各レプリカでの接続試行の最大数。 + +デフォルト値:3. + +## 極端な {#extremes} + +極値(クエリ結果の列の最小値と最大値)をカウントするかどうか。 0または1を受け入れます。 デフォルトでは、0(無効)。 +詳細については、以下を参照してください “Extreme values”. + +## use\_uncompressed\_cache {#setting-use_uncompressed_cache} + +非圧縮ブロックのキャッシュを使用するかどうか。 0または1を受け入れます。 デフォルトでは、0(無効)。 +圧縮されていないキャッシュ(mergetreeファミリーのテーブルのみ)を使用すると、多数の短いクエリを処理するときに待ち時間が大幅に短縮され、スループット この設定を有効にユーザーに送信頻繁に短います。 また、に注意を払う [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. + +少なくとも大量のデータ(百万行以上)を読み取るクエリの場合、圧縮されていないキャッシュは自動的に無効になり、本当に小さなクエリの容量を節 これは保つことができることを意味する ‘use\_uncompressed\_cache’ 常に1に設定します。 + +## replace\_running\_query {#replace-running-query} + +HTTPインターフェイスを使用する場合、 ‘query\_id’ 変数は渡すことができます。 これは、クエリ識別子として機能する任意の文字列です。 +同じユーザーからのクエリが同じ場合 ‘query\_id’ この時点で既に存在している場合、その動作は ‘replace\_running\_query’ パラメータ。 + +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ すでに実行されている)。 + +`1` – Cancel the old query and start running the new one. + +Yandexの。Metricaこのパラメータセットが1の実施のための提案のための分割ます。 次の文字を入力した後、古いクエリがまだ完了していない場合は、キャンセルする必要があります。 + +## stream\_flush\_interval\_ms {#stream-flush-interval-ms} + +作品のテーブルストリーミングの場合はタイムアウトした場合、またはスレッドを生成す [max\_insert\_block\_size](#settings-max_insert_block_size) 行。 + +デフォルト値は7500です。 + +値が小さいほど、データはテーブルにフラッシュされる頻度が高くなります。 値が低すぎると、パフォーマンスが低下します。 + +## load\_balancing {#settings-load_balancing} + +分散クエリ処理に使用するレプリカ選択のアルゴリズムを指定します。 + +ClickHouse対応し、以下のようなアルゴリズムの選択のレプリカ: + +- [ランダム](#load_balancing-random) (デフォルトでは) +- [最寄りのホスト名](#load_balancing-nearest_hostname) +- [順番に](#load_balancing-in_order) +- [最初またはランダム](#load_balancing-first_or_random) + +### ランダム(デフォルト) {#load_balancing-random} + +``` sql +load_balancing = random +``` + +エラーの数は、各レプリカに対して数えられます。 クエリは、エラーが最も少なくレプリカに送信され、エラーがいくつかある場合は、そのうちの誰にも送信されます。 +レプリカに異なるデータがある場合は、異なるデータも取得します。 + +### 最寄りのホスト名 {#load_balancing-nearest_hostname} + +``` sql +load_balancing = nearest_hostname +``` + +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). + +たとえば、example01-01-1とexample01-01-2.yandex.ru example01-01-1とexample01-02-2は二つの場所で異なりますが、一つの位置では異なります。 +この方法はプリミティブに思えるかもしれませんが、ネットワークトポロジに関する外部データを必要とせず、ipv6アドレスでは複雑なipアドレスを + +したがって、同等の複製がある場合は、名前によって最も近い複製が優先されます。 +また、同じサーバーにクエリを送信するときに、障害がなければ、分散クエリも同じサーバーに移動すると想定することもできます。 したがって、レプリカに異なるデータが配置されていても、クエリはほとんど同じ結果を返します。 + +### 順番に {#load_balancing-in_order} + +``` sql +load_balancing = in_order +``` + +エラーの数が同じレプリカは、構成で指定されているのと同じ順序でアクセスされます。 +この方法は適切なが分かっている場合は、正確にレプリカが好ましい。 + +### 最初またはランダム {#load_balancing-first_or_random} + +``` sql +load_balancing = first_or_random +``` + +このアルゴリズムは、セット内の最初のレプリカを選択します。 この効果のクロス-複製をトポロジーのセットアップ、ものなどが可能です。- + +その `first_or_random` アルゴリズムはの問題を解決します `in_order` アルゴリズムだ と `in_order` あるレプリカがダウンした場合、次のレプリカは二重の負荷を受け、残りのレプリカは通常のトラフィック量を処理します。 を使用する場合 `first_or_random` アルゴリズムは、負荷が均等にまだ利用可能なレプリカ間で分散されています。 + +## prefer\_localhost\_replica {#settings-prefer-localhost-replica} + +を有効/無効にしが好ましいのlocalhostレプリカ処理時に分布します。 + +可能な値: + +- 1 — ClickHouse always sends a query to the localhost replica if it exists. +- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) 設定。 + +デフォルト値:1。 + +!!! warning "警告" + 使用する場合は、この設定を無効にします [max\_parallel\_replicas](#settings-max_parallel_replicas). + +## totals\_mode {#totals-mode} + +HAVINGが存在する場合の合計を計算する方法と、max\_rows\_to\_group\_byとgroup\_by\_overflow\_mode= ‘any’ 存在する。 +セクションを見る “WITH TOTALS modifier”. + +## totals\_auto\_threshold {#totals-auto-threshold} + +のしきい値 `totals_mode = 'auto'`. +セクションを見る “WITH TOTALS modifier”. + +## max\_parallel\_replicas {#settings-max_parallel_replicas} + +クエリを実行するときの各シャードのレプリカの最大数。 +一貫性(同じデータ分割の異なる部分を取得する)の場合、このオプションはサンプリングキーが設定されている場合にのみ機能します。 +レプリカの遅延は制御されません。 + +## コンパイル {#compile} + +を編集ます。 デフォルトでは、0(無効)。 + +コンパイルは、クエリ処理パイプラインの一部にのみ使用されます。 +この部分のパイプラインのためのクエリを実行するアによる展開の短サイクルinlining集計機能。 複数の単純な集計関数を使用するクエリでは、パフォーマンスの最大向上(まれに、最大で四倍高速)が見られます。 通常、パフォーマンスの向上はわずかです。 まれに、クエリの実行が遅くなることがあります。 + +## min\_count\_to\_compile {#min-count-to-compile} + +コンパイルを実行する前にコンパイルされたコードのチャンクを使用する回数。 デフォルトでは、3。 +For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. +値が1以上の場合、コンパイルは別のスレッドで非同期に行われます。 結果は、現在実行中のクエリを含め、準備が整ったらすぐに使用されます。 + +コンパイルされたコードは、クエリで使用される集計関数とgroup by句のキーのタイプの組み合わせごとに必要です。 +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. + +## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} + +値がtrueの場合、json\*int64およびuint64形式を使用するときに整数が引用符で囲まれます(ほとんどのjavascript実装との互換性のため)。 + +## format\_csv\_delimiter {#settings-format_csv_delimiter} + +CSVデータの区切り文字として解釈される文字。 デフォルトでは、区切り文字は `,`. + +## input\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} + +FOR CSV入力形式では、引用符なしの解析を有効または無効にします `NULL` リテラルとして(シノニム `\N`). + +## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} + +UNIXスタイル(LF)の代わりにCSVでDOS/Windowsスタイルの行区切り(CRLF)を使用します。 + +## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} + +UNIXスタイル(LF)の代わりにTSVでDOC/Windowsスタイルの行区切り(CRLF)を使用します。 + +## insert\_quorum {#settings-insert_quorum} + +を決議の定足数を書き込みます. + +- もし `insert_quorum < 2` クォーラム書き込みは無効です。 +- もし `insert_quorum >= 2` クォーラム書き込みが有効になります。 + +デフォルト値:0. + +定足数書き込み + +`INSERT` 承ClickHouse管理を正しく書き込みデータの `insert_quorum` の間のレプリカの `insert_quorum_timeout`. 何らかの理由で書き込みが成功したレプリカの数に達しない場合 `insert_quorum` を書くのは失敗したとClickHouseを削除するに挿入したブロックからすべてのレプリカがデータがすでに記されています。 + +クォーラム内のすべてのレプリカは一貫性があります。 `INSERT` クエリ。 その `INSERT` シーケンスは線形化されます。 + +から書き込まれたデータを読み取る場合 `insert_quorum`、を使用することができ [select\_sequential\_consistency](#settings-select_sequential_consistency) オプション。 + +ClickHouseは例外を生成します + +- クエリの時点で利用可能なレプリカの数がクエリの時点でのレプリカの数より少ない場合 `insert_quorum`. +- 前のブロックがまだ挿入されていないときにデータを書き込もうとします。 `insert_quorum` レプリカの。 こうした状況が発生した場合、ユーザーしようとしを行う `INSERT` 前のものの前に `insert_quorum` 完了です。 + +また見なさい: + +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## insert\_quorum\_timeout {#settings-insert_quorum-timeout} + +書き込み数が定員タイムアウトを秒で指定します。 タイムアウトが経過し、まだ書き込みが行われていない場合、clickhouseは例外を生成し、クライアントは同じブロックまたは他のレプリカに同じブロック + +デフォルト値:60秒。 + +また見なさい: + +- [insert\_quorum](#settings-insert_quorum) +- [select\_sequential\_consistency](#settings-select_sequential_consistency) + +## select\_sequential\_consistency {#settings-select_sequential_consistency} + +シーケンシャル-コンシステ `SELECT` クエリ: + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:0. + +使い方 + +シーケンシャル一貫性が有効になっている場合、clickhouseはクライアントが `SELECT` 以前のすべてのデータを含むレプリカに対してのみ照会する `INSERT` 以下で実行されるクエリ `insert_quorum`. クライアントが部分レプリカを参照している場合、ClickHouseは例外を生成します。 SELECTクエリには、レプリカのクォーラムにまだ書き込まれていないデータは含まれません。 + +また見なさい: + +- [insert\_quorum](#settings-insert_quorum) +- [insert\_quorum\_timeout](#settings-insert_quorum_timeout) + +## insert\_deduplicate {#settings-insert-deduplicate} + +ブロックの重複排除を有効または無効にします。 `INSERT` (複製された\*テーブルの場合)。 + +可能な値: + +- 0 — Disabled. +- 1 — Enabled. + +デフォルト値:1。 + +デフォルトでは、レプリケートされたテーブルに `INSERT` ステートメントは重複排除されます(\[データレプリケーション\](../engines/table\_engines/mergetree\_family/replication.md)を参照)。 + +## 重複除外\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} + +を有効または無効にし、重複排除圧縮をチェックを実現し意見を受け取るデータから複製\*ます。 + +可能な値: + + 0 — Disabled. + 1 — Enabled. + +デフォルト値:0. + +使い方 + +デフォルトで、重複排除圧縮を行わないための顕在化が行われは上流のソース。 +ソーステーブルの重複排除により挿入されたブロックがスキップされた場合、マテリアライズドビューには挿入されません。 この動作は、マテリアライズドビューに高度に集計されたデータを挿入できるようにするために存在します。 +同時に、この動作 “breaks” `INSERT` 冪等性 もし `INSERT` メインテーブルに成功したと `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` この動作を変更できます。 再試行の際、マテリアライズドビューは繰り返しインサートを受け取り、重複排除チェックを単独で実行します, +ソーステーブルのチェック結果を無視すると、最初の失敗のために失われた行が挿入されます。 + +## max\_network\_bytes {#settings-max-network-bytes} + +クエリの実行時にネットワークを介して受信または送信されるデータ量をバイト単位で制限します。 この設定は、個々のクエリごとに適用されます。 + +可能な値: + +- 正の整数。 +- 0 — Data volume control is disabled. + +デフォルト値:0. + +## max\_network\_bandwidth {#settings-max-network-bandwidth} + +ネットワーク上でのデータ交換の速度をバイト/秒で制限します。 この設定は、各クエリに適用されます。 + +可能な値: + +- 正の整数。 +- 0 — Bandwidth control is disabled. + +デフォルト値:0. + +## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} + +ネットワーク上でのデータ交換の速度をバイト/秒で制限します。 この設定は、単一ユーザーが実行するすべての同時実行クエリに適用されます。 + +可能な値: + +- 正の整数。 +- 0 — Control of the data speed is disabled. + +デフォルト値:0. + +## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} + +ネットワーク上でデータが交換される速度をバイト/秒で制限します。 この設定が適用されるのはすべての同時走行に関するお問い合わせます。 + +可能な値: + +- 正の整数。 +- 0 — Control of the data speed is disabled. + +デフォルト値:0. + +## count\_distinct\_implementation {#settings-count_distinct_implementation} + +どちらを指定するか `uniq*` 機能は実行するのに使用されるべきです [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) 建設。 + +可能な値: + +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [unihll12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [ユニキャック](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) + +デフォルト値: `uniqExact`. + +## skip\_unavailable\_shards {#settings-skip_unavailable_shards} + +を有効または無効にし静キの不可欠片. + +ザ-シャーがある場合にはご利用できないそのすべてのレプリカのためご利用いただけません。 次の場合、レプリカは使用できません: + +- ClickHouseは何らかの理由でレプリカに接続できません。 + + レプリカに接続するとき、clickhouseはいくつかの試行を実行します。 すべてこれらの試みが失敗し、レプリカとはできます。 + +- レプリカはdnsで解決できません。 + + レプリカのホスト名をdnsで解決できない場合は、次のような状況を示すことができます: + + - レプリカのホストにdnsレコードがない。 たとえば、動的dnsを使用するシステムで発生する可能性があります, [Kubernetes](https://kubernetes.io) ダウンタイム中にノードが解けなくなる可能性があり、これはエラーではありません。 + + - 構成エラー。 clickhouse設定ファイルが含まれて間違ったホスト名. + +可能な値: + +- 1 — skipping enabled. + + シャードが使用できない場合、clickhouseは部分的なデータに基づいて結果を返し、ノードの可用性の問題は報告しません。 + +- 0 — skipping disabled. + + シャードが使用できない場合、clickhouseは例外をスローします。 + +デフォルト値:0. + +## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} + +PREWHERE/WHEREでシャーディングキー条件を持つSELECTクエリの未使用シャードのスキップを有効または無効にします(データがシャーディングキーによって分散されてい + +デフォルト値:0 + +## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} + +を有効または無効にしクエリの実行の場合 [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) 未使用のシャードを有効にしてスキップすることはできません。 スキップが不可能で、設定が有効になっている場合は例外がスローされます。 + +可能な値: + +- 0-無効(スローしない) +- 1-テーブ +- 2-を無効にクエリの実行に関わらずshardingキー定義のテーブル + +デフォルト値:0 + +## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} + +リセット [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) 入れ子のため `Distributed` テーブル + +可能な値: + +- 1 — Enabled. +- 0 — Disabled. + +デフォルト値:0. + +## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} + +例外のスローを有効または無効にします。 [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) クエリはマージを実行しませんでした。 + +デフォルトでは, `OPTIMIZE` 何もしなかった場合でも正常に戻ります。 この設定では、これらの状況を区別し、例外メッセージの理由を取得できます。 + +可能な値: + +- 1 — Throwing an exception is enabled. +- 0 — Throwing an exception is disabled. + +デフォルト値:0. + +## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} + +- タイプ:秒 +- デフォルト値:60秒 + +分散テーブルのエラーをゼロにする速度を制御します。 レプリカがしばらく使用できず、5つのエラーが蓄積され、distributed\_replica\_error\_half\_lifeが1秒に設定されている場合、レプリカは最後のエラーから3秒後に通常の状態 + +また見なさい: + +- [分散テーブルエンジン](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) + +## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} + +- タイプ:unsigned int +- デフォルト値:1000 + +各レプリカのエラー数がこの値に制限されるため、単一のレプリカによるエラーの蓄積が妨げられます。 + +また見なさい: + +- [分散テーブルエンジン](../../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) + +## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} + +のための基礎間隔 [分散](../../engines/table_engines/special/distributed.md) データを送信する表エンジン。 実際の間隔は、エラーが発生した場合に指数関数的に増加します。 + +可能な値: + +- ミリ秒の正の整数の数。 + +デフォルト値:100ミリ秒。 + +## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} + +のための最大間隔 [分散](../../engines/table_engines/special/distributed.md) データを送信する表エンジン。 インターバルセットの指数関数的な成長を制限する [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) 設定。 + +可能な値: + +- ミリ秒の正の整数の数。 + +デフォルト値:30000ミリ秒(30秒)。 + +## distributed\_directory\_monitor\_batch\_inserts {#distributed_directory_monitor_batch_inserts} + +挿入されたデータのバッチでの送信を有効/無効にします。 + +バッチ送信が有効になっている場合は、 [分散](../../engines/table_engines/special/distributed.md) テーブルエンジンをお送り複数のファイルの挿入データを移動するようになっていますの代わりに送信します。 一括送信の改善にクラスターの性能をより活用してサーバやネットワーク資源です。 + +可能な値: + +- 1 — Enabled. +- 0 — Disabled. + +デフォルト値:0. + +## os\_thread\_priority {#setting-os-thread-priority} + +優先度を設定します ([ニース](https://en.wikipedia.org/wiki/Nice_(Unix)))クエリを実行するスレッドの場合。 OSスケジューラは、使用可能な各CPUコアで実行する次のスレッドを選択するときにこの優先度を考慮します。 + +!!! warning "警告" + この設定を使用するには、以下を設定する必要があります。 `CAP_SYS_NICE` 機能。 その `clickhouse-server` パッケージ設定します。 一部の仮想環境では、以下の設定を行うことができません。 `CAP_SYS_NICE` 機能。 この場合, `clickhouse-server` 開始時にそれについてのメッセージを表示します。 + +可能な値: + +- 範囲内で値を設定できます `[-20, 19]`. + +低値が高い優先されます。 低いの糸 `nice` 優先度の値は、高い値を持つスレッドよりも頻繁に実行されます。 長時間実行される非インタラクティブクエリでは、短いインタラクティブクエリが到着したときにリソースをすばやく放棄することができるため、 + +デフォルト値:0. + +## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} + +の実際のクロックタイマーの期間を設定します。 [クエリプロファイラ](../../operations/optimizing_performance/sampling_query_profiler.md). リアルクロックタイマーカウント壁時計の時間。 + +可能な値: + +- ナノ秒単位の正の整数。 + + 推奨値: + + - 10000000 (100 times a second) nanoseconds and less for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- タイマーを消すための0。 + +タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +デフォルト値:1000000000ナノ秒(秒)。 + +また見なさい: + +- システム表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} + +のcpuクロックタイマーの期間を設定します。 [クエリプロファイラ](../../operations/optimizing_performance/sampling_query_profiler.md). このタ + +可能な値: + +- ナノ秒の正の整数。 + + 推奨値: + + - 10000000 (100 times a second) nanoseconds and more for single queries. + - 1000000000 (once a second) for cluster-wide profiling. + +- タイマーを消すための0。 + +タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +デフォルト値:1000000000ナノ秒。 + +また見なさい: + +- システム表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## allow\_introspection\_functions {#settings-allow_introspection_functions} + +ディスエーブルの有効 [introspections関数](../../sql_reference/functions/introspection.md) のためのクエリープロファイリング. + +可能な値: + +- 1 — Introspection functions enabled. +- 0 — Introspection functions disabled. + +デフォルト値:0. + +**また見なさい** + +- [クエリプ](../optimizing_performance/sampling_query_profiler.md) +- システム表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) + +## input\_format\_parallel\_parallel\_paralsing {#input-format-parallel-parsing} + +- タイプ:bool +- デフォルト値:true + +データ形式の並行解析を有効にします。 tsv、tksv、csvおよびjsoneachrow形式でのみサポートされています。 + +## min\_chunk\_bytes\_for\_parallel\_parall\_paral {#min-chunk-bytes-for-parallel-parsing} + +- タイプ:unsigned int +- デフォルト値:1mib + +各スレッドが並列に解析する最小チャンクサイズ(バイト単位)。 + +## output\_format\_avro\_codec {#settings-output_format_avro_codec} + +出力avroファイルに使用する圧縮コーデックを設定します。 + +タイプ:文字列 + +可能な値: + +- `null` — No compression +- `deflate` — Compress with Deflate (zlib) +- `snappy` — Compress with [てきぱき](https://google.github.io/snappy/) + +デフォルト値: `snappy` (利用可能な場合)または `deflate`. + +## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} + +出力avroファイルの同期マーカー間の最小データサイズ(バイト単位)を設定します。 + +タイプ:unsigned int + +使用可能な値:32(32バイト)-1073741824(1gib) + +デフォルト値:32768(32kib) + +## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} + +使用するスキーマレジストリurlを設定します [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) 書式 + +タイプ:url + +デフォルト値:空 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/ja/operations/settings/settings_profiles.md b/docs/ja/operations/settings/settings_profiles.md deleted file mode 120000 index 35d9747ad56..00000000000 --- a/docs/ja/operations/settings/settings_profiles.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_profiles.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings_profiles.md b/docs/ja/operations/settings/settings_profiles.md new file mode 100644 index 00000000000..d9d2d1ff114 --- /dev/null +++ b/docs/ja/operations/settings/settings_profiles.md @@ -0,0 +1,71 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u8A2D\u5B9A\u30D7\u30ED\u30D5\u30A1\u30A4\u30EB" +--- + +# 設定プロファイル {#settings-profiles} + +設定プロファイルは、同じ名前でグループ化された設定の集合です。 各clickhouseユーザプロファイル. +プロファイル内のすべての設定を適用するには、 `profile` 設定。 + +例えば: + +インストール `web` プロフィール + +``` sql +SET profile = 'web' +``` + +設定プロファイルは、user configファイルで宣言されます。 これは通常 `users.xml`. + +例えば: + +``` xml + + + + + + 8 + + + + + 1000000000 + 100000000000 + + 1000000 + any + + 1000000 + 1000000000 + + 100000 + 100000000 + break + + 600 + 1000000 + 15 + + 25 + 100 + 50 + + 2 + 25 + 50 + 100 + + 1 + + +``` + +この例では、: `default` と `web`. その `default` プロファイルには特別な目的があります。 つまり、 `default` オプションの設定デフォルトを設定します。 その `web` プロファイルは通常のプロファイルです。 `SET` HTTPクエリでURLパラメーターを照会または使用する。 + +設定プロファイルは相互に継承できます。 継承を使用するには、一つまたは複数を指定します `profile` プロファイルにリストされている他の設定の前に設定します。 ある設定が異なるプロファイルで定義されている場合は、最新の設定が使用されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/ja/operations/settings/settings_users.md b/docs/ja/operations/settings/settings_users.md deleted file mode 120000 index 3a6a7cf6948..00000000000 --- a/docs/ja/operations/settings/settings_users.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/settings/settings_users.md \ No newline at end of file diff --git a/docs/ja/operations/settings/settings_users.md b/docs/ja/operations/settings/settings_users.md new file mode 100644 index 00000000000..5ab057efa54 --- /dev/null +++ b/docs/ja/operations/settings/settings_users.md @@ -0,0 +1,148 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "\u30E6\u30FC\u30B6\u30FC\u8A2D\u5B9A" +--- + +# ユーザー設定 {#user-settings} + +その `users` のセクション `user.xml` 設定ファイルにユーザを設定します。 + +の構造 `users` セクション: + +``` xml + + + + + + + + + + + profile_name + + default + + + + + expression + + + + + + +``` + +### user\_name/パスワード {#user-namepassword} + +パスワードは、平文またはsha256(hex形式)で指定できます。 + +- 平文でパスワードを割り当てるには (**推奨しない**)、それを置く `password` 要素。 + + 例えば, `qwerty`. パスワードは空白のままにできます。 + + + +- SHA256ハッシュを使用してパスワードを割り当てるには、 `password_sha256_hex` 要素。 + + 例えば, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + + シェルからパスワードを生成する方法の例: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' + + 結果の最初の行はパスワードです。 第二の行は、対応するsha256ハッシュです。 + + + +- MySQLクライアントとの互換性のために、passwordはダブルSHA1ハッシュで指定できます。 それを置く `password_double_sha1_hex` 要素。 + + 例えば, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + + シェルからパスワードを生成する方法の例: + + PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' + + 結果の最初の行はパスワードです。 第二の行は、対応するダブルsha1ハッシュです。 + +### user\_name/networks {#user-namenetworks} + +ユーザーがclickhouseサーバーに接続できるネットワークのリスト。 + +リストの各要素には、次のいずれかの形式があります: + +- `` — IP address or network mask. + + 例: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + +- `` — Hostname. + + 例えば: `example01.host.ru`. + + アクセスを確認するには、dnsクエリが実行され、返されたすべてのipアドレスがピアアドレスと比較されます。 + +- `` — Regular expression for hostnames. + + 例えば, `^example\d\d-\d\d-\d\.host\.ru$` + + アクセスを確認するには、 [DNS PTRクエリ](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) ピアアドレスに対して実行され、指定された正規表現が適用されます。 次に、PTRクエリの結果に対して別のDNSクエリが実行され、すべての受信アドレスがピアアドレスと比較されます。 Regexpは$で終わることを強くお勧めします。 + +すべての結果のdnsの要求をキャッシュまでのサーバが再起動してしまいます。 + +**例** + +オープンアクセスのためのユーザーからネットワークのいずれかを指定し: + +``` xml +::/0 +``` + +!!! warning "警告" + この不安にオープンアクセスからネットワークを持っていない場合、ファイアウォールを適切に設定されたサーバーに直接接続されます。 + +オープンアクセスのみからlocalhostを指定し: + +``` xml +::1 +127.0.0.1 +``` + +### user\_name/プロファイル {#user-nameprofile} + +を割り当てることができる設定プロファイルをユーザーです。 設定プロファイルはの別のセクションで設定されます `users.xml` ファイル。 詳細については、 [設定のプロファイル](settings_profiles.md). + +### ユーザー名/クォータ {#user-namequota} + +クォータを使用すると、一定期間にわたってリソース使用量を追跡または制限できます。 クォータは、 `quotas` +のセクション `users.xml` 構成ファイル。 + +ユーザにクォータセットを割り当てることができます。 クォータ設定の詳細については、以下を参照してください [クォータ](../quotas.md#quotas). + +### user\_name/データベース {#user-namedatabases} + +このセクションでは、clickhouseによって返される行を制限することができます `SELECT` 現在のユーザーが行うクエリは、基本的な行レベルのセキュリティを実装します。 + +**例えば** + +以下の構成力がユーザー `user1` の行だけを見ることができます `table1` の結果として `SELECT` クエリ、ここでの値 `id` フィールドは1000です。 + +``` xml + + + + + id = 1000 + + + + +``` + +その `filter` 結果として得られる任意の式を指定できます [UInt8](../../sql_reference/data_types/int_uint.md)-タイプ値。 通常、比較演算子と論理演算子が含まれます。 からの行 `database_name.table1` このユーザーに対して0のフィルター結果は返されません。 フィルタリングは `PREWHERE` 操作および無効化 `WHERE→PREWHERE` 最適化。 + +[元の記事](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/ja/operations/system_tables.md b/docs/ja/operations/system_tables.md deleted file mode 120000 index c5701190dca..00000000000 --- a/docs/ja/operations/system_tables.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/system_tables.md \ No newline at end of file diff --git a/docs/ja/operations/system_tables.md b/docs/ja/operations/system_tables.md new file mode 100644 index 00000000000..6df12d9c907 --- /dev/null +++ b/docs/ja/operations/system_tables.md @@ -0,0 +1,1097 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: "\u30B7\u30B9\u30C6\u30E0\u8868" +--- + +# システム表 {#system-tables} + +システムテーブルは、システムの機能の一部を実装し、システムの動作に関する情報へのアクセスを提供するために使用されます。 +システムテーブルは削除できません(ただし、デタッチを実行できます)。 +システムテーブ サーバーは起動時にすべてのシステムテーブルを作成します。 +システムテーブルは読み取り専用です。 +彼らはに位置しています ‘system’ データベース + +## システム。asynchronous\_metrics {#system_tables-asynchronous_metrics} + +バックグラウンドで定期的に計算される指標が含まれます。 例えば、使用中のramの量。 + +列: + +- `metric` ([文字列](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. + +**例えば** + +``` sql +SELECT * FROM system.asynchronous_metrics LIMIT 10 +``` + +``` text +┌─metric──────────────────────────────────┬──────value─┐ +│ jemalloc.background_thread.run_interval │ 0 │ +│ jemalloc.background_thread.num_runs │ 0 │ +│ jemalloc.background_thread.num_threads │ 0 │ +│ jemalloc.retained │ 422551552 │ +│ jemalloc.mapped │ 1682989056 │ +│ jemalloc.resident │ 1656446976 │ +│ jemalloc.metadata_thp │ 0 │ +│ jemalloc.metadata │ 10226856 │ +│ UncompressedCacheCells │ 0 │ +│ MarkCacheFiles │ 0 │ +└─────────────────────────────────────────┴────────────┘ +``` + +**また見なさい** + +- [監視](monitoring.md) — Base concepts of ClickHouse monitoring. +- [システム。指標](#system_tables-metrics) — Contains instantly calculated metrics. +- [システム。イベント](#system_tables-events) — Contains a number of events that have occurred. +- [システム。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. + +## システム。クラスター {#system-clusters} + +についての情報が含まれてクラスターのコンフィグファイルをサーバーです。 + +列: + +- `cluster` (String) — The cluster name. +- `shard_num` (UInt32) — The shard number in the cluster, starting from 1. +- `shard_weight` (UInt32) — The relative weight of the shard when writing data. +- `replica_num` (UInt32) — The replica number in the shard, starting from 1. +- `host_name` (String) — The host name, as specified in the config. +- `host_address` (String) — The host IP address obtained from DNS. +- `port` (UInt16) — The port to use for connecting to the server. +- `user` (String) — The name of the user for connecting to the server. +- `errors_count` (UInt32)-このホストがレプリカに到達できなかった回数。 +- `estimated_recovery_time` (UInt32)-レプリカエラーカウントがゼロになるまで残された秒数で、正常に戻ったと見なされます。 + +ご注意ください `errors_count` クエリごとにクラスターに一度updatedされますが、 `estimated_recovery_time` オンデマンドで再計算されます。 したがって、ゼロ以外の場合があります `errors_count` とゼロ `estimated_recovery_time`、その次のクエリはゼロ `errors_count` また、エラーがないかのようにreplicaを使用してみてください。 + +**また見なさい** + +- [分散テーブルエンジン](../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap設定](settings/settings.md#settings-distributed_replica_error_cap) +- [distributed\_replica\_error\_half\_life設定](settings/settings.md#settings-distributed_replica_error_half_life) + +## システム。列 {#system-columns} + +すべてのテーブルの列に関する情報を含みます。 + +このテーブルを使用して、次のような情報を取得できます。 [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) 一度に複数のテーブルのクエリ。 + +その `system.columns` テーブルを含む以下のカラムのカラムタイプはブラケット): + +- `database` (String) — Database name. +- `table` (String) — Table name. +- `name` (String) — Column name. +- `type` (String) — Column type. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`)デフォルト値の場合、または空の文字列が定義されていない場合。 +- `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. +- `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. +- `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. +- `marks_bytes` (UInt64) — The size of marks, in bytes. +- `comment` (String) — Comment on the column, or an empty string if it is not defined. +- `is_in_partition_key` (UInt8) — Flag that indicates whether the column is in the partition expression. +- `is_in_sorting_key` (UInt8) — Flag that indicates whether the column is in the sorting key expression. +- `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. +- `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. + +## システム。貢献者 {#system-contributors} + +を含むに関する情報提供者が保持しています。 ランダムな順序ですべてのconstributors。 順序は、クエリの実行時にランダムです。 + +列: + +- `name` (String) — Contributor (author) name from git log. + +**例えば** + +``` sql +SELECT * FROM system.contributors LIMIT 10 +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +│ Max Vetrov │ +│ LiuYangkuan │ +│ svladykin │ +│ zamulla │ +│ Šimon Podlipský │ +│ BayoNet │ +│ Ilya Khomutov │ +│ Amy Krishnevsky │ +│ Loud_Scream │ +└──────────────────┘ +``` + +テーブルで自分自身を見つけるには、クエリを使用します: + +``` sql +SELECT * FROM system.contributors WHERE name='Olga Khvostikova' +``` + +``` text +┌─name─────────────┐ +│ Olga Khvostikova │ +└──────────────────┘ +``` + +## システム。データ {#system-databases} + +このテーブルを含む単一の文字列カラムと呼ばれ ‘name’ – the name of a database. +各データベースのサーバーについて知っていて対応するエントリの表に示す。 +このシステム表は実行のために使用されます `SHOW DATABASES` クエリ。 + +## システム。detached\_parts {#system_tables-detached_parts} + +についての情報が含まれて外部 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) テーブル。 その `reason` カラムを指定理由の一部でした。 ユーザーがデタッチしたパーツの場合、理由は空です。 このような部品は [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) 司令部 の内容その他のカラムを参照 [システム。パーツ](#system_tables-parts). パート名が無効な場合、一部の列の値は次のようになります `NULL`. このような部分は、 [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). + +## システム。辞書 {#system-dictionaries} + +外部辞書に関する情報が含まれます。 + +列: + +- `name` (String) — Dictionary name. +- `type` (String) — Dictionary type: Flat, Hashed, Cache. +- `origin` (String) — Path to the configuration file that describes the dictionary. +- `attribute.names` (Array(String)) — Array of attribute names provided by the dictionary. +- `attribute.types` (Array(String)) — Corresponding array of attribute types that are provided by the dictionary. +- `has_hierarchy` (UInt8) — Whether the dictionary is hierarchical. +- `bytes_allocated` (UInt64) — The amount of RAM the dictionary uses. +- `hit_rate` (Float64) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` (UInt64) — The number of items stored in the dictionary. +- `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. +- `source` (String) — Text describing the data source for the dictionary. + +辞書によって使用されるメモリの量は、それに格納されているアイテムの数に比例しないことに注意してください。 くフラットおよびキャッシュされた辞書のすべてのメモリー細胞により、予告なしにご指定済みを問わずどのように辞書を実現する + +## システム。イベント {#system_tables-events} + +システムで発生したイベントの数に関する情報が含まれています。 たとえば、テーブルでは、次のように多くの `SELECT` ClickHouseサーバーの起動後にクエリが処理されました。 + +列: + +- `event` ([文字列](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([文字列](../sql_reference/data_types/string.md)) — Event description. + +**例えば** + +``` sql +SELECT * FROM system.events LIMIT 5 +``` + +``` text +┌─event─────────────────────────────────┬─value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 12 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ +│ SelectQuery │ 8 │ Same as Query, but only for SELECT queries. │ +│ FileOpen │ 73 │ Number of files opened. │ +│ ReadBufferFromFileDescriptorRead │ 155 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ +│ ReadBufferFromFileDescriptorReadBytes │ 9931 │ Number of bytes read from file descriptors. If the file is compressed, this will show the compressed data size. │ +└───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [システム。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [システム。指標](#system_tables-metrics) — Contains instantly calculated metrics. +- [システム。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [監視](monitoring.md) — Base concepts of ClickHouse monitoring. + +## システム。機能 {#system-functions} + +通常の関数と集計関数に関する情報が含まれます。 + +列: + +- `name`(`String`) – The name of the function. +- `is_aggregate`(`UInt8`) — Whether the function is aggregate. + +## システム。graphite\_retentions {#system-graphite-retentions} + +についての情報が含まれてパラメータ [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) これは、次の表で使用されます [\*グラフィットマージツリー](../engines/table_engines/mergetree_family/graphitemergetree.md) エンジン + +列: + +- `config_name` (文字列) - `graphite_rollup` パラメータ名。 +- `regexp` (String)-メトリック名のパターン。 +- `function` (String)-集計関数の名前。 +- `age` (UInt64)-データの最小経過時間(秒)。 +- `precision` (UInt64)-どのように正確に秒単位でデータの年齢を定義します。 +- `priority` (UInt16)-パターンの優先順位。 +- `is_default` (UInt8)-パターンがデフォルトであるかどうか。 +- `Tables.database` (Array(String))-使用するデータベーステーブルの名前の配列 `config_name` パラメータ。 +- `Tables.table` (Array(String))-使用するテーブル名の配列 `config_name` パラメータ。 + +## システム。マージ {#system-merges} + +MergeTreeファミリーのテーブルのマージおよび現在処理中のパーツの変更に関する情報を格納します。 + +列: + +- `database` (String) — The name of the database the table is in. +- `table` (String) — Table name. +- `elapsed` (Float64) — The time elapsed (in seconds) since the merge started. +- `progress` (Float64) — The percentage of completed work from 0 to 1. +- `num_parts` (UInt64) — The number of pieces to be merged. +- `result_part_name` (String) — The name of the part that will be formed as the result of merging. +- `is_mutation` (UInt8)-1このプロセスが部分突然変異の場合。 +- `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. +- `total_size_marks` (UInt64) — The total number of marks in the merged parts. +- `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. +- `rows_read` (UInt64) — Number of rows read. +- `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. +- `rows_written` (UInt64) — Number of rows written. + +## システム。指標 {#system_tables-metrics} + +瞬時に計算されるか、現在の値を持つことができる指標が含まれています。 たとえば、同時に処理されたクエリの数や現在のレプリカの遅延などです。 このテーブルは常に最新です。 + +列: + +- `metric` ([文字列](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([文字列](../sql_reference/data_types/string.md)) — Metric description. + +サポートされている指標のリストを以下に示します [dbms/Common/CurrentMetrics。cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) ClickHouseのソースファイル。 + +**例えば** + +``` sql +SELECT * FROM system.metrics LIMIT 10 +``` + +``` text +┌─metric─────────────────────┬─value─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Query │ 1 │ Number of executing queries │ +│ Merge │ 0 │ Number of executing background merges │ +│ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ +│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replicas │ +│ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ +│ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ +│ BackgroundPoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping) │ +│ BackgroundSchedulePoolTask │ 0 │ Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc. │ +│ DiskSpaceReservedForMerge │ 0 │ Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts. │ +│ DistributedSend │ 0 │ Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode. │ +└────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [システム。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [システム。イベント](#system_tables-events) — Contains a number of events that occurred. +- [システム。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [監視](monitoring.md) — Base concepts of ClickHouse monitoring. + +## システム。metric\_log {#system_tables-metric_log} + +表からのメトリック値の履歴を含む `system.metrics` と `system.events`、定期的にディスクにフラッシュ。 +メトリック履歴の収集をオンにするには `system.metric_log`,作成 `/etc/clickhouse-server/config.d/metric_log.xml` 以下の内容で: + +``` xml + + + system + metric_log
    + 7500 + 1000 +
    +
    +``` + +**例えば** + +``` sql +SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +event_date: 2020-02-18 +event_time: 2020-02-18 07:15:33 +milliseconds: 554 +ProfileEvent_Query: 0 +ProfileEvent_SelectQuery: 0 +ProfileEvent_InsertQuery: 0 +ProfileEvent_FileOpen: 0 +ProfileEvent_Seek: 0 +ProfileEvent_ReadBufferFromFileDescriptorRead: 1 +ProfileEvent_ReadBufferFromFileDescriptorReadFailed: 0 +ProfileEvent_ReadBufferFromFileDescriptorReadBytes: 0 +ProfileEvent_WriteBufferFromFileDescriptorWrite: 1 +ProfileEvent_WriteBufferFromFileDescriptorWriteFailed: 0 +ProfileEvent_WriteBufferFromFileDescriptorWriteBytes: 56 +... +CurrentMetric_Query: 0 +CurrentMetric_Merge: 0 +CurrentMetric_PartMutation: 0 +CurrentMetric_ReplicatedFetch: 0 +CurrentMetric_ReplicatedSend: 0 +CurrentMetric_ReplicatedChecks: 0 +... +``` + +**また見なさい** + +- [システム。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [システム。イベント](#system_tables-events) — Contains a number of events that occurred. +- [システム。指標](#system_tables-metrics) — Contains instantly calculated metrics. +- [監視](monitoring.md) — Base concepts of ClickHouse monitoring. + +## システム。数字 {#system-numbers} + +このテーブルを一uint64カラム名 ‘number’ ゼロから始まるほぼすべての自然数が含まれています。 +このテーブルをテストに使用するか、ブルートフォース検索を実行する必要がある場合に使用できます。 +この表からの読み取りは並列化されません。 + +## システム。numbers\_mt {#system-numbers-mt} + +同じように ‘system.numbers’ しかし、読み込みは平行です。 数字は任意の順序で返すことができます。 +使用試験までを実施。 + +## システム。ワン {#system-one} + +このテーブルには、単一行と単一行が含まれます ‘dummy’ 値を含むUInt8列0. +このテーブルは、selectクエリがfrom句を指定しない場合に使用されます。 +これは、他のdbmsで見つかったデュアルテーブルに似ています。 + +## システム。パーツ {#system_tables-parts} + +の部分に関する情報が含まれます [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) テーブル。 + +各行は、一つのデータ部分を記述します。 + +列: + +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) クエリ。 + + 形式: + + - `YYYYMM` 月別の自動パーティション分割の場合。 + - `any_string` 手動で分割する場合。 + +- `name` (`String`) – Name of the data part. + +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. + +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` インデックスの粒度(通常は8192)(このヒントは適応的な粒度では機能しません)。 + +- `rows` (`UInt64`) – The number of rows. + +- `bytes_on_disk` (`UInt64`) – Total size of all the data part files in bytes. + +- `data_compressed_bytes` (`UInt64`) – Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `data_uncompressed_bytes` (`UInt64`) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `marks_bytes` (`UInt64`) – The size of the file with marks. + +- `modification_time` (`DateTime`) – The time the directory with the data part was modified. This usually corresponds to the time of data part creation.\| + +- `remove_time` (`DateTime`) – The time when the data part became inactive. + +- `refcount` (`UInt32`) – The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. + +- `min_date` (`Date`) – The minimum value of the date key in the data part. + +- `max_date` (`Date`) – The maximum value of the date key in the data part. + +- `min_time` (`DateTime`) – The minimum value of the date and time key in the data part. + +- `max_time`(`DateTime`) – The maximum value of the date and time key in the data part. + +- `partition_id` (`String`) – ID of the partition. + +- `min_block_number` (`UInt64`) – The minimum number of data parts that make up the current part after merging. + +- `max_block_number` (`UInt64`) – The maximum number of data parts that make up the current part after merging. + +- `level` (`UInt32`) – Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. + +- `data_version` (`UInt64`) – Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). + +- `primary_key_bytes_in_memory` (`UInt64`) – The amount of memory (in bytes) used by primary key values. + +- `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. + +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) + +- `database` (`String`) – Name of the database. + +- `table` (`String`) – Name of the table. + +- `engine` (`String`) – Name of the table engine without parameters. + +- `path` (`String`) – Absolute path to the folder with data part files. + +- `disk` (`String`) – Name of a disk that stores the data part. + +- `hash_of_all_files` (`String`) – [サイファシー128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 圧縮されたファイルの。 + +- `hash_of_uncompressed_files` (`String`) – [サイファシー128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 非圧縮ファイル(マーク付きファイル、インデックスファイルなど)). + +- `uncompressed_hash_of_compressed_files` (`String`) – [サイファシー128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 圧縮されていないかのように圧縮されたファイル内のデータ。 + +- `bytes` (`UInt64`) – Alias for `bytes_on_disk`. + +- `marks_size` (`UInt64`) – Alias for `marks_bytes`. + +## システム。part\_log {#system_tables-part-log} + +その `system.part_log` テーブルが作成されるのは、 [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) サーバー設定を指定します。 + +このテーブルには、 [データ部品](../engines/table_engines/mergetree_family/custom_partitioning_key.md) で [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) データの追加やマージなどのファミリテーブル。 + +その `system.part_log` テーブルを含む以下のカラム: + +- `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: + - `NEW_PART` — Inserting of a new data part. + - `MERGE_PARTS` — Merging of data parts. + - `DOWNLOAD_PART` — Downloading a data part. + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). + - `MUTATE_PART` — Mutating of a data part. + - `MOVE_PART` — Moving the data part from the one disk to another one. +- `event_date` (Date) — Event date. +- `event_time` (DateTime) — Event time. +- `duration_ms` (UInt64) — Duration. +- `database` (String) — Name of the database the data part is in. +- `table` (String) — Name of the table the data part is in. +- `part_name` (String) — Name of the data part. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ パーティション分割が `tuple()`. +- `rows` (UInt64) — The number of rows in the data part. +- `size_in_bytes` (UInt64) — Size of the data part in bytes. +- `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). +- `bytes_uncompressed` (UInt64) — Size of uncompressed bytes. +- `read_rows` (UInt64) — The number of rows was read during the merge. +- `read_bytes` (UInt64) — The number of bytes was read during the merge. +- `error` (UInt16) — The code number of the occurred error. +- `exception` (String) — Text message of the occurred error. + +その `system.part_log` テーブルは、最初にデータを挿入した後に作成されます。 `MergeTree` テーブル。 + +## システム。プロセス {#system_tables-processes} + +このシステム表は実行のために使用されます `SHOW PROCESSLIST` クエリ。 + +列: + +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` ユーザー。 このフィールドには、特定のクエリのユーザー名が含まれています。 +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` クエリリクエスターサーバーです。 +- `elapsed` (Float64) – The time in seconds since request execution started. +- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. +- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) 設定。 +- `query` (String) – The query text. For `INSERT`、それは挿入するデータが含まれていません。 +- `query_id` (String) – Query ID, if defined. + +## システム。text\_log {#system-tables-text-log} + +ログエントリを含む。 この表に行くロギングのレベルはで限られます `text_log.level` サーバー設定。 + +列: + +- `event_date` (`Date`)-エントリの日付。 +- `event_time` (`DateTime`)-エントリの時間。 +- `microseconds` (`UInt32`)-エントリのマイクロ秒。 +- `thread_name` (String) — Name of the thread from which the logging was done. +- `thread_id` (UInt64) — OS thread ID. +- `level` (`Enum8`)-エントリーレベル。 + - `'Fatal' = 1` + - `'Critical' = 2` + - `'Error' = 3` + - `'Warning' = 4` + - `'Notice' = 5` + - `'Information' = 6` + - `'Debug' = 7` + - `'Trace' = 8` +- `query_id` (`String`)-クエリのID。 +- `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) +- `message` (`String`)-メッセージそのもの。 +- `revision` (`UInt32`)-ClickHouseリビジョン. +- `source_file` (`LowCardinality(String)`)-ロギングが行われたソースファイル。 +- `source_line` (`UInt64`)-ロギングが行われたソースライン。 + +## システム。クエリーログ {#system_tables-query_log} + +クエリの実行に関する情報が含まれます。 各クエリについて、処理の開始時間、処理時間、エラーメッセージおよびその他の情報を確認できます。 + +!!! note "メモ" + テーブルには以下の入力データは含まれません `INSERT` クエリ。 + +ClickHouseはこのテーブルを作成します。 [クエリーログ](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) サーバパラメータを指定します。 このパラメーターは、ロギング間隔やクエリがログに記録されるテーブルの名前などのロギングルールを設定します。 + +するクエリのロギングの設定を [log\_queries](settings/settings.md#settings-log-queries) 1へのパラメータ。 詳細については、 [設定](settings/settings.md) セクション。 + +その `system.query_log` テーブルレジスタ二種類のクエリ: + +1. クライアントによって直接実行された初期クエリ。 +2. 他のクエリによって開始された子クエリ(分散クエリ実行用)。 これらのタイプのクエリについては、親クエリに関する情報が表示されます。 `initial_*` 列。 + +列: + +- `type` (`Enum8`) — Type of event that occurred when executing the query. Values: + - `'QueryStart' = 1` — Successful start of query execution. + - `'QueryFinish' = 2` — Successful end of query execution. + - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. + - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. +- `event_date` (Date) — Query starting date. +- `event_time` (DateTime) — Query starting time. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` クエリ、書き込まれた行の数。 他のクエリの場合、列の値は0です。 +- `written_bytes` (UInt64) — For `INSERT` クエリ、書き込まれたバイト数。 他のクエリの場合、列の値は0です。 +- `result_rows` (UInt64) — Number of rows in the result. +- `result_bytes` (UInt64) — Number of bytes in the result. +- `memory_usage` (UInt64) — Memory consumption by the query. +- `query` (String) — Query string. +- `exception` (String) — Exception message. +- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [クリックハウス-顧客](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [クリックハウス-顧客](../interfaces/cli.md) または他のTCPクライアントが実行されます。 +- `client_name` (String) — The [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント名。 +- `client_revision` (UInt32) — Revision of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_major` (UInt32) — Major version of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_minor` (UInt32) — Minor version of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_patch` (UInt32) — Patch component of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアン +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` 方法を用いた。 + - 2 — `POST` 方法を用いた。 +- `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。 +- `quota_key` (String) — The “quota key” で指定される [クォータ](quotas.md) 設定(参照 `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [システム。イベント](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` コラム +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 1へのパラメータ。 +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` コラム + +それぞれのクエリでは、次の行が作成されます。 `query_log` クエリのステータスに応じたテーブル: + +1. クエリの実行が成功すると、タイプ1とタイプ2のイベントが作成されます。 `type` 列)。 +2. クエリ処理中にエラーが発生した場合は、タイプ1とタイプ4のイベントが作成されます。 +3. クエリを起動する前にエラーが発生した場合は、タイプ3の単一のイベントが作成されます。 + +デフォルトでは、7.5秒間隔でログがテーブルに追加されます。 この間隔を設定することができます [クエリーログ](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) サーバー設定(参照してください `flush_interval_milliseconds` パラメータ)。 フラッシュを強制的にログからのメモリバッファ、テーブルを使用 `SYSTEM FLUSH LOGS` クエリ。 + +テーブルが手動で削除されると、その場で自動的に作成されます。 以前のログはすべて削除されることに注意してください。 + +!!! note "メモ" + ログの保存期間は無制限です。 ログはテーブルから自動的に削除されません。 古いログの削除を自分で整理する必要があります。 + +任意のパーティション分割キーを指定できます。 `system.query_log` のテーブル [クエリーログ](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) サーバー設定(参照してください `partition_by` パラメータ)。 + +## システム。query\_thread\_log {#system_tables-query-thread-log} + +のテーブルについての情報が含まれてそれぞれの検索キーワード実行スレッド. + +ClickHouseはこのテーブルを作成します。 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) サーバパラメータを指定します。 このパラメーターは、ロギング間隔やクエリがログに記録されるテーブルの名前などのロギングルールを設定します。 + +するクエリのロギングの設定を [log\_query\_threads](settings/settings.md#settings-log-query-threads) 1へのパラメータ。 詳細については、 [設定](settings/settings.md) セクション。 + +列: + +- `event_date` (Date) — the date when the thread has finished execution of the query. +- `event_time` (DateTime) — the date and time when the thread has finished execution of the query. +- `query_start_time` (DateTime) — Start time of query execution. +- `query_duration_ms` (UInt64) — Duration of query execution. +- `read_rows` (UInt64) — Number of read rows. +- `read_bytes` (UInt64) — Number of read bytes. +- `written_rows` (UInt64) — For `INSERT` クエリ、書き込まれた行の数。 他のクエリの場合、列の値は0です。 +- `written_bytes` (UInt64) — For `INSERT` クエリ、書き込まれたバイト数。 他のクエリの場合、列の値は0です。 +- `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. +- `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. +- `thread_name` (String) — Name of the thread. +- `thread_number` (UInt32) — Internal thread ID. +- `os_thread_id` (Int32) — OS thread ID. +- `master_thread_id` (UInt64) — OS initial ID of initial thread. +- `query` (String) — Query string. +- `is_initial_query` (UInt8) — Query type. Possible values: + - 1 — Query was initiated by the client. + - 0 — Query was initiated by another query for distributed query execution. +- `user` (String) — Name of the user who initiated the current query. +- `query_id` (String) — ID of the query. +- `address` (IPv6) — IP address that was used to make the query. +- `port` (UInt16) — The client port that was used to make the query. +- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution). +- `initial_query_id` (String) — ID of the initial query (for distributed query execution). +- `initial_address` (IPv6) — IP address that the parent query was launched from. +- `initial_port` (UInt16) — The client port that was used to make the parent query. +- `interface` (UInt8) — Interface that the query was initiated from. Possible values: + - 1 — TCP. + - 2 — HTTP. +- `os_user` (String) — OS's username who runs [クリックハウス-顧客](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [クリックハウス-顧客](../interfaces/cli.md) または他のTCPクライアントが実行されます。 +- `client_name` (String) — The [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント名。 +- `client_revision` (UInt32) — Revision of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_major` (UInt32) — Major version of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_minor` (UInt32) — Minor version of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアント。 +- `client_version_patch` (UInt32) — Patch component of the [クリックハウス-顧客](../interfaces/cli.md) または別のTCPクライアン +- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: + - 0 — The query was launched from the TCP interface. + - 1 — `GET` 方法を用いた。 + - 2 — `POST` 方法を用いた。 +- `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。 +- `quota_key` (String) — The “quota key” で指定される [クォータ](quotas.md) 設定(参照 `keyed`). +- `revision` (UInt32) — ClickHouse revision. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [システム。イベント](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` コラム + +デフォルトでは、7.5秒間隔でログがテーブルに追加されます。 この間隔を設定することができます [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) サーバー設定(参照してください `flush_interval_milliseconds` パラメータ)。 フラッシュを強制的にログからのメモリバッファ、テーブルを使用 `SYSTEM FLUSH LOGS` クエリ。 + +テーブルが手動で削除されると、その場で自動的に作成されます。 以前のログはすべて削除されることに注意してください。 + +!!! note "メモ" + ログの保存期間は無制限です。 ログはテーブルから自動的に削除されません。 古いログの削除を自分で整理する必要があります。 + +任意のパーティション分割キーを指定できます。 `system.query_thread_log` のテーブル [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) サーバー設定(参照してください `partition_by` パラメータ)。 + +## システム。trace\_log {#system_tables-trace_log} + +サンプリングクエリプ + +ClickHouseはこのテーブルを作成します [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) サーバの設定が設定されます。 また、 [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) と [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) 設定は設定する必要があります。 + +ログを分析するには、以下を使用します `addressToLine`, `addressToSymbol` と `demangle` イントロスペクション関数。 + +列: + +- `event_date`([日付](../sql_reference/data_types/date.md)) — Date of sampling moment. + +- `event_time`([DateTime](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. + +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. + + サーバーに接続する場合 `clickhouse-client`、あなたは次のような文字列が表示されます `Connected to ClickHouse server version 19.18.1 revision 54429.`. このフィールドには、 `revision`、しかしない `version` サーバーの。 + +- `timer_type`([Enum8](../sql_reference/data_types/enum.md)) — Timer type: + + - `Real` 壁時計の時刻を表します。 + - `CPU` CPU時間を表します。 + +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. + +- `query_id`([文字列](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [クエリーログ](#system_tables-query_log) システムテーブル。 + +- `trace`([配列(uint64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. + +**例えば** + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-15 +event_time: 2019-11-15 15:09:38 +revision: 54428 +timer_type: Real +thread_number: 48 +query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 +trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] +``` + +## システム。レプリカ {#system_tables-replicas} + +情報および状況を再現しテーブル在住の地元のサーバーです。 +このテーブルは、監視に使用できます。 のテーブルが含まれて行毎に再現\*ます。 + +例えば: + +``` sql +SELECT * +FROM system.replicas +WHERE table = 'visits' +FORMAT Vertical +``` + +``` text +Row 1: +────── +database: merge +table: visits +engine: ReplicatedCollapsingMergeTree +is_leader: 1 +can_become_leader: 1 +is_readonly: 0 +is_session_expired: 0 +future_parts: 1 +parts_to_check: 0 +zookeeper_path: /clickhouse/tables/01-06/visits +replica_name: example01-06-1.yandex.ru +replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru +columns_version: 9 +queue_size: 1 +inserts_in_queue: 0 +merges_in_queue: 1 +part_mutations_in_queue: 0 +queue_oldest_time: 2020-02-20 08:34:30 +inserts_oldest_time: 0000-00-00 00:00:00 +merges_oldest_time: 2020-02-20 08:34:30 +part_mutations_oldest_time: 0000-00-00 00:00:00 +oldest_part_to_get: +oldest_part_to_merge_to: 20200220_20284_20840_7 +oldest_part_to_mutate_to: +log_max_index: 596273 +log_pointer: 596274 +last_queue_update: 2020-02-20 08:34:32 +absolute_delay: 0 +total_replicas: 2 +active_replicas: 2 +``` + +列: + +- `database` (`String`)-データベース名 +- `table` (`String`)-テーブル名 +- `engine` (`String`)-テーブルエンジン名 +- `is_leader` (`UInt8`)-レプリカがリーダーであるかどうか。 + リーダーは一度にひとつのレプリカのみです。 リーダーは実行するバックグラウンドマージの選択を担当します。 + 書き込みは、リーダーであるかどうかにかかわらず、利用可能であり、zkにセッションを持つ任意のレプリカに対して実行できます。 +- `can_become_leader` (`UInt8`)-レプリカをリーダーとして選出できるかどうか。 +- `is_readonly` (`UInt8`)-レプリカが読み取り専用モードであるかどうか。 + このモードは、zookeeperでセッションを再初期化するときに不明なエラーが発生した場合、およびzookeeperでのセッション再初期化中に、zookeeperとのセクションが設定さ +- `is_session_expired` (`UInt8`)-ZooKeeperとのセッションが終了しました。 基本的には `is_readonly`. +- `future_parts` (`UInt32`)-まだ行われていない挿入またはマージの結果として表示されるデータパーツの数。 +- `parts_to_check` (`UInt32`)-検証のためのキュー内のデータパーツの数。 破損している可能性があるという疑いがある場合、部品は検証キューに入れられます。 +- `zookeeper_path` (`String`)-ZooKeeperのテーブルデータへのパス。 +- `replica_name` (`String`)-飼育係のレプリカ名。 異なるレプリカと同じテーブルの異名をとります。 +- `replica_path` (`String`)-飼育係のレプリカデータへのパス。 連結と同じです ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`)-テーブル構造のバージョン番号。 ALTERが実行された回数を示します。 場合にレプリカは異なるバージョンで一部のレプリカさんのすべての変更はまだない。 +- `queue_size` (`UInt32`)-実行待ち操作のキューのサイズ。 業務などのブロックを挿入し、データ統合し、行動します。 それは通常 `future_parts`. +- `inserts_in_queue` (`UInt32`)-作成する必要があるデータブロックの挿入数。 挿入は、通常、かなり迅速に複製されます。 この数が大きい場合は、何かが間違っていることを意味します。 +- `merges_in_queue` (`UInt32`)-作成されるのを待機しているマージの数。 マージが時間がかかることがあるので、この値は長い間ゼロより大きくなることがあります。 +- `part_mutations_in_queue` (`UInt32`)-作られるのを待っている突然変異の数。 +- `queue_oldest_time` (`DateTime`)-If `queue_size` 0より大きい場合、最も古い操作がキューに追加された日時が表示されます。 +- `inserts_oldest_time` (`DateTime`)-見る `queue_oldest_time` +- `merges_oldest_time` (`DateTime`)-見る `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime`)-見る `queue_oldest_time` + +次の4列は、zkとのアクティブなセッションがある場合にのみ、ゼロ以外の値を持ちます。 + +- `log_max_index` (`UInt64`)-一般的な活動のログの最大エントリ番号。 +- `log_pointer` (`UInt64`)-レプリカが実行キューにコピーした一般的なアクティビティのログの最大エントリ番号。 もし `log_pointer` はるかに小さいよりも `log_max_index`、何かが間違っている。 +- `last_queue_update` (`DateTime`)-キューが前回updatedされたとき。 +- `absolute_delay` (`UInt64`)-どのように大きな遅れ秒で現在のレプリカがあります。 +- `total_replicas` (`UInt8`)-このテーブルの既知のレプリカの総数。 +- `active_replicas` (`UInt8`)-ZooKeeperでセッションを持つこのテーブルのレプリカの数(つまり、機能するレプリカの数)。 + +希望される場合は、すべての列は、テーブルが少しゆっくりと、くつかの読み込みから飼育係したがって行います。 +最後の4列(log\_max\_index、log\_pointer、total\_replicas、active\_replicas)を要求しないと、テーブルはすぐに動作します。 + +たとえば、次のようにすべてが正常に動作していることを確認できます: + +``` sql +SELECT + database, + table, + is_leader, + is_readonly, + is_session_expired, + future_parts, + parts_to_check, + columns_version, + queue_size, + inserts_in_queue, + merges_in_queue, + log_max_index, + log_pointer, + total_replicas, + active_replicas +FROM system.replicas +WHERE + is_readonly + OR is_session_expired + OR future_parts > 20 + OR parts_to_check > 10 + OR queue_size > 20 + OR inserts_in_queue > 10 + OR log_max_index - log_pointer > 10 + OR total_replicas < 2 + OR active_replicas < total_replicas +``` + +このクエリが何も返さない場合は、すべてが正常であることを意味します。 + +## システム。設定 {#system-settings} + +現在使用中の設定に関する情報が含まれます。 +つまり、システムからの読み取りに使用しているクエリを実行するために使用されます。設定テーブル. + +列: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. +- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [制約](settings/constraints_on_settings.md#constraints-on-settings)). +- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [制約](settings/constraints_on_settings.md#constraints-on-settings)). +- `readonly` (UInt8) — Can user change this setting (for more info, look into [制約](settings/constraints_on_settings.md#constraints-on-settings)). + +例えば: + +``` sql +SELECT name, value +FROM system.settings +WHERE changed +``` + +``` text +┌─name───────────────────┬─value───────┐ +│ max_threads │ 8 │ +│ use_uncompressed_cache │ 0 │ +│ load_balancing │ random │ +│ max_memory_usage │ 10000000000 │ +└────────────────────────┴─────────────┘ +``` + +## システム。merge\_tree\_settings {#system-merge_tree_settings} + +についての情報が含まれて設定 `MergeTree` テーブル。 + +列: + +- `name` (String) — Setting name. +- `value` (String) — Setting value. +- `description` (String) — Setting description. +- `type` (String) — Setting type (implementation specific string value). +- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. + +## システム。table\_engines {#system-table-engines} + +を含むの記述のテーブルエンジンをサポートサーバーとその特徴を支援す。 + +このテーブル以下のカラムのカラムタイプはブラケット): + +- `name` (String) — The name of table engine. +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` 句。 +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [索引のスキップ](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` と `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [データ複製](../engines/table_engines/mergetree_family/replication.md). +- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. + +例えば: + +``` sql +SELECT * +FROM system.table_engines +WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') +``` + +``` text +┌─name──────────────────────────┬─supports_settings─┬─supports_skipping_indices─┬─supports_sort_order─┬─supports_ttl─┬─supports_replication─┬─supports_deduplication─┐ +│ Kafka │ 1 │ 0 │ 0 │ 0 │ 0 │ 0 │ +│ MergeTree │ 1 │ 1 │ 1 │ 1 │ 0 │ 0 │ +│ ReplicatedCollapsingMergeTree │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ +└───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ +``` + +**また見なさい** + +- マージツリーファミリー [クエリ句](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- カフカname [設定](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- 参加 [設定](../engines/table_engines/special/join.md#join-limitations-and-settings) + +## システム。テーブル {#system-tables} + +を含むメタデータは各テーブルサーバーに知っています。 デタッチされたテーブルは `system.tables`. + +このテーブル以下のカラムのカラムタイプはブラケット): + +- `database` (String) — The name of the database the table is in. + +- `name` (String) — Table name. + +- `engine` (String) — Table engine name (without parameters). + +- `is_temporary` (UInt8)-テーブルが一時的かどうかを示すフラグ。 + +- `data_path` (文字列)-ファイルシステム内のテーブルデータへのパス。 + +- `metadata_path` (String)-ファイルシステム内のテーブルメタデータへのパス。 + +- `metadata_modification_time` (DateTime)-テーブルメタデータの最新の変更の時刻。 + +- `dependencies_database` (Array(String))-データベースの依存関係。 + +- `dependencies_table` (Array(String))-テーブルの依存関係 ([MaterializedView](../engines/table_engines/special/materializedview.md) 現在のテーブルに基づくテーブル)。 + +- `create_table_query` (String)-テーブルの作成に使用されたクエリ。 + +- `engine_full` (String)-テーブルエンジンのパラメータ。 + +- `partition_key` (String)-テーブルで指定されたパーティションキー式。 + +- `sorting_key` (String)-テーブルで指定された並べ替えキー式。 + +- `primary_key` (String)-テーブルで指定された主キー式。 + +- `sampling_key` (String)-テーブルで指定されたサンプリングキー式。 + +- `storage_policy` (文字列)-ストレージポリシー: + + - [MergeTree](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [分散](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64))-テーブル内の正確な行数をすばやく決定できる場合は、行の総数。 `Null` (含むunderying `Buffer` テーブル)。 + +- `total_bytes` (Nullable(UInt64))-ストレージ上のテーブルの正確なバイト数を迅速に決定できる場合は、合計バイト数。 `Null` (**しない** を含みます)。 + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - テーブルがメモリにデータを格納する場合,メモリ内の使用バイトの近似数を返します. + +その `system.tables` テーブルは `SHOW TABLES` クエリの実装。 + +## システム。zookeeper {#system-zookeeper} + +ZooKeeperが設定されていない場合、テーブルは存在しません。 できるデータを読み込んで飼育係クラスタで定義され、config. +クエリには次のものが必要です ‘path’ WHERE句の等価条件です。 これは、データを取得したい子供のためのZooKeeperのパスです。 + +クエリ `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` すべての子のデータを出力します。 `/clickhouse` ノード +すべてのルートノードのデータを出力するには、path= ‘/’. +で指定されたパスの場合 ‘path’ 存在しない場合、例外がスローされます。 + +列: + +- `name` (String) — The name of the node. +- `path` (String) — The path to the node. +- `value` (String) — Node value. +- `dataLength` (Int32) — Size of the value. +- `numChildren` (Int32) — Number of descendants. +- `czxid` (Int64) — ID of the transaction that created the node. +- `mzxid` (Int64) — ID of the transaction that last changed the node. +- `pzxid` (Int64) — ID of the transaction that last deleted or added descendants. +- `ctime` (DateTime) — Time of node creation. +- `mtime` (DateTime) — Time of the last modification of the node. +- `version` (Int32) — Node version: the number of times the node was changed. +- `cversion` (Int32) — Number of added or removed descendants. +- `aversion` (Int32) — Number of changes to the ACL. +- `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. + +例えば: + +``` sql +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/tables/01-08/visits/replicas' +FORMAT Vertical +``` + +``` text +Row 1: +────── +name: example01-08-1.yandex.ru +value: +czxid: 932998691229 +mzxid: 932998691229 +ctime: 2015-03-27 16:49:51 +mtime: 2015-03-27 16:49:51 +version: 0 +cversion: 47 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021031383 +path: /clickhouse/tables/01-08/visits/replicas + +Row 2: +────── +name: example01-08-2.yandex.ru +value: +czxid: 933002738135 +mzxid: 933002738135 +ctime: 2015-03-27 16:57:01 +mtime: 2015-03-27 16:57:01 +version: 0 +cversion: 37 +aversion: 0 +ephemeralOwner: 0 +dataLength: 0 +numChildren: 7 +pzxid: 987021252247 +path: /clickhouse/tables/01-08/visits/replicas +``` + +## システム。突然変異 {#system_tables-mutations} + +のテーブルについての情報が含まれて [突然変異](../sql_reference/statements/alter.md#alter-mutations) マージツリーテーブルとその進捗状況の。 各突然変異コマンドは、単一の行で表されます。 テーブルには次の列があります: + +**データ**, **テーブル** -突然変異が適用されたデータベースとテーブルの名前。 + +**mutation\_id** -変異のID。 のための複製のテーブルこのIdに対応すznode名の `/mutations/` ZooKeeperのディレクトリ。 複雑でないテーブルの場合、Idはテーブルのデータディレクトリ内のファイル名に対応します。 + +**コマンド** -突然変異コマンド文字列(後のクエリの一部 `ALTER TABLE [db.]table`). + +**create\_time** -この突然変異コマンドが実行のために提出されたとき。 + +**ブロック番号。partition\_id**, **ブロック番号。番号** -入れ子になった列。 つまり、パーティションIDと、そのパーティションの変更によって取得されたブロック番号より小さい数のブロックを含むパーティションのみが変更さ 非複製のテーブル、ブロック番号の全ての仕切りがひとつのシーケンスです。 こないということを意味している変異体再現し、テーブルの列として展開しているのが記録するとともにシングルブロック番号の取得による突然変異が原因です。 + +**parts\_to\_do** -突然変異が終了するために突然変異する必要があるデータ部分の数。 + +**is\_done** -変異は終わったのか? たとえそうであっても `parts_to_do = 0` レプリケートされたテーブルの変更は、変更する必要のある新しいデータ部分を作成する実行時間の長いINSERTのためにまだ行われていない可能性があり + +一部の部分の変更に問題があった場合、次の列には追加情報が含まれています: + +**latest\_failed\_part** -突然変異できなかった最新の部分の名前。 + +**latest\_fail\_time** -最も最近の部分変異失敗の時間。 + +**latest\_fail\_reason** -最も最近の部分の突然変異の失敗を引き起こした例外メッセージ。 + +## システム。ディスク {#system_tables-disks} + +についての情報が含まれてディスクの定義に [サーバー構成](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +列: + +- `name` ([文字列](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([文字列](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` ディスク構成のパラメータ。 + +## システム。ストレージ\_policies {#system_tables-storage_policies} + +についての情報が含まれて保管方針の量を定義する [サーバー構成](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). + +列: + +- `policy_name` ([文字列](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([文字列](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([配列(文字列)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. + +ストレージポリシーに複数のボリュームが含まれている場合、各ボリュームの情報はテーブルの個々の行に格納されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/ja/operations/tips.md b/docs/ja/operations/tips.md deleted file mode 120000 index 9b3413bdbc3..00000000000 --- a/docs/ja/operations/tips.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/tips.md \ No newline at end of file diff --git a/docs/ja/operations/tips.md b/docs/ja/operations/tips.md new file mode 100644 index 00000000000..92a13d27f2c --- /dev/null +++ b/docs/ja/operations/tips.md @@ -0,0 +1,251 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u4F7F\u7528\u306E\u63A8\u5968\u4E8B\u9805" +--- + +# 使用の推奨事項 {#usage-recommendations} + +## CPUスケールガバナー {#cpu-scaling-governor} + +常に使用する `performance` スケーリング知事。 その `on-demand` スケーリング知事は、常に高い需要とはるかに悪い作品。 + +``` bash +$ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor +``` + +## CPUの制限 {#cpu-limitations} + +プロセッサでの過熱を防止します。 使用 `dmesg` 過熱によりCPUのクロックレートが制限されているかどうかを確認します。 +の制限を設定することもできます外部のデータセンターです。 を使用することができ `turbostat` 負荷の下でそれを監視する。 + +## RAM {#ram} + +少量のデータ(最大-200gb圧縮)の場合は、データ量と同じくらいのメモリを使用するのが最善です。 +大量のデータと対話型(オンライン)クエリを処理する場合は、ホットデータサブセットがページのキャッシュに収まるように、妥当な量のram(128gb以上)を使 +でもデータ量の50tbサーバ用のもの128gb ramを大幅に向上するクエリの性能に比べて64gbにサンプルがあります。 + +Overcommitを無効にしないでください。 を値 `cat /proc/sys/vm/overcommit_memory` 0または1である必要があります。 走れ。 + +``` bash +$ echo 0 | sudo tee /proc/sys/vm/overcommit_memory +``` + +## ヒュージページ {#huge-pages} + +常に透明な巨大ページを無効にします。 これはメモリアロケータに干渉し、パフォーマンスが大幅に低下します。 + +``` bash +$ echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled +``` + +使用 `perf top` メモリ管理のためにカーネルに費やされた時間を監視する。 +永続的なヒュージページも割り当てる必要はありません。 + +## 格納サブシステム {#storage-subsystem} + +予算でssdを使用できる場合は、ssdを使用してください。 +そうでない場合は、hddを使用します。 sata hdd7200rpmが実行されます。 + +優先のサーバー地域のハードディスク上に小さな複数のサーバーが付属ディスクが増す。 +ものの保存アーカイブでクエリー、棚します。 + +## RAID {#raid} + +HDDを使用する場合は、RAID-10、RAID-5、RAID-6またはRAID-50を組み合わせることができます。 +Linuxでは、ソフトウェアRAIDが優れている(と `mdadm`). LVMの使用はお勧めしません。 +RAID-10を作成するときは、以下を選択します。 `far` レイアウト。 +予算が許せば、raid-10を選択します。 + +4台以上のディスクがある場合は、raid-6(優先)またはraid-50を使用します(raid-5の代わりに使用します)。 +RAID-5、RAID-6またはRAID-50を使用する場合、デフォルト値は通常最良の選択ではないので、常にstripe\_cache\_sizeを増加させます。 + +``` bash +$ echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size +``` + +次の式を使用して、デバイスの数とブロックサイズから正確な数を計算します: `2 * num_devices * chunk_size_in_bytes / 4096`. + +1024kbのブロックサイズは、すべてのraid構成で十分です。 +ないセットのブロックサイズでは多すぎます。 + +SSDにRAID-0を使用できます。 +に関わらずraidの利用、使用複製のためのデータです。 + +長いキューでncqを有効にします。 hddの場合はcfqスケジューラを選択し、ssdの場合はnoopを選択します。 減らしてはいけない ‘readahead’ 設定。 +のためのハードディスク(hdd)を、書き込みます。 + +## ファイル {#file-system} + +Ext4は最も信頼性の高いオプションです。 マウントオプションの設定 `noatime, nobarrier`. +XFSも適していますが、ClickHouseで徹底的にテストされていません。 +他のほとんどのファイルシステム仕様。 ファイルシステムの遅配ます。 + +## Linuxカーネル {#linux-kernel} + +古いlinuxカーネルを使用しないでください。 + +## ネットワーク {#network} + +IPv6を使用している場合は、ルートキャッシュのサイズを大きくします。 +3.2より前のlinuxカーネルでは、ipv6の実装に多くの問題がありました。 + +可能な場合は、少なくとも10gbのネットワークを使用します。 1gbも動作しますが、数十テラバイトのデータを含むレプリカにパッチを適用したり、大量の中間データを含む分散クエリを処理する場合は、さらに悪 + +## ZooKeeper {#zookeeper} + +おそらく既にzookeeperを他の目的で使用しているでしょう。 それがまだ過負荷になっていない場合は、zookeeperと同じインストールを使用できます。 + +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. + +異なるzookeeperクラスタ間でデータを転送するために手動で記述されたスクリプトを使用することはありません。 決して使用 “zkcopy” 同じ理由でユーティリティ:https://github.com/ksprojects/zkcopy/issues/15 + +既存のzookeeperクラスターを二つに分割したい場合、正しい方法はレプリカの数を増やし、それを二つの独立したクラスターとして再構成することです。 + +ClickHouseと同じサーバーでZooKeeperを実行しないでください。 で飼育係が非常に敏感なために時間遅れとClickHouseを利用することも可能で利用可能なすべてシステム資源です。 + +デフォルトの設定では、zookeeperは時限爆弾です: + +> ZooKeeperサーバーは、デフォルト設定(autopurgeを参照)を使用するときに古いスナップショットやログからファイルを削除することはありません。 + +この爆弾は取り除かれなければならない + +以下のzookeeper(3.5.1)設定はyandexで使用されています。月のメトリカの生産環境20,2017: + +動物園cfg: + +``` bash +# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html + +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=30000 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=10 + +maxClientCnxns=2000 + +maxSessionTimeout=60000000 +# the directory where the snapshot is stored. +dataDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/data +# Place the dataLogDir to a separate physical disc for better performance +dataLogDir=/opt/zookeeper/{{ '{{' }} cluster['name'] {{ '}}' }}/logs + +autopurge.snapRetainCount=10 +autopurge.purgeInterval=1 + + +# To avoid seeks ZooKeeper allocates space in the transaction log file in +# blocks of preAllocSize kilobytes. The default block size is 64M. One reason +# for changing the size of the blocks is to reduce the block size if snapshots +# are taken more often. (Also, see snapCount). +preAllocSize=131072 + +# Clients can submit requests faster than ZooKeeper can process them, +# especially if there are a lot of clients. To prevent ZooKeeper from running +# out of memory due to queued requests, ZooKeeper will throttle clients so that +# there is no more than globalOutstandingLimit outstanding requests in the +# system. The default limit is 1,000.ZooKeeper logs transactions to a +# transaction log. After snapCount transactions are written to a log file a +# snapshot is started and a new transaction log file is started. The default +# snapCount is 10,000. +snapCount=3000000 + +# If this option is defined, requests will be will logged to a trace file named +# traceFile.year.month.day. +#traceFile= + +# Leader accepts client connections. Default value is "yes". The leader machine +# coordinates updates. For higher update throughput at thes slight expense of +# read throughput the leader can be configured to not accept clients and focus +# on coordination. +leaderServes=yes + +standaloneEnabled=false +dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic +``` + +Javaバージョン: + +``` text +Java(TM) SE Runtime Environment (build 1.8.0_25-b17) +Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) +``` + +JVMパラメータ: + +``` bash +NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} +ZOOCFGDIR=/etc/$NAME/conf + +# TODO this is really ugly +# How to find out, which jars are needed? +# seems, that log4j requires the log4j.properties file to be in the classpath +CLASSPATH="$ZOOCFGDIR:/usr/build/classes:/usr/build/lib/*.jar:/usr/share/zookeeper/zookeeper-3.5.1-metrika.jar:/usr/share/zookeeper/slf4j-log4j12-1.7.5.jar:/usr/share/zookeeper/slf4j-api-1.7.5.jar:/usr/share/zookeeper/servlet-api-2.5-20081211.jar:/usr/share/zookeeper/netty-3.7.0.Final.jar:/usr/share/zookeeper/log4j-1.2.16.jar:/usr/share/zookeeper/jline-2.11.jar:/usr/share/zookeeper/jetty-util-6.1.26.jar:/usr/share/zookeeper/jetty-6.1.26.jar:/usr/share/zookeeper/javacc.jar:/usr/share/zookeeper/jackson-mapper-asl-1.9.11.jar:/usr/share/zookeeper/jackson-core-asl-1.9.11.jar:/usr/share/zookeeper/commons-cli-1.2.jar:/usr/src/java/lib/*.jar:/usr/etc/zookeeper" + +ZOOCFG="$ZOOCFGDIR/zoo.cfg" +ZOO_LOG_DIR=/var/log/$NAME +USER=zookeeper +GROUP=zookeeper +PIDDIR=/var/run/$NAME +PIDFILE=$PIDDIR/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME +JAVA=/usr/bin/java +ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" +ZOO_LOG4J_PROP="INFO,ROLLINGFILE" +JMXLOCALONLY=false +JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ + -Xmx{{ '{{' }} cluster.get('xmx','1G') {{ '}}' }} \ + -Xloggc:/var/log/$NAME/zookeeper-gc.log \ + -XX:+UseGCLogFileRotation \ + -XX:NumberOfGCLogFiles=16 \ + -XX:GCLogFileSize=16M \ + -verbose:gc \ + -XX:+PrintGCTimeStamps \ + -XX:+PrintGCDateStamps \ + -XX:+PrintGCDetails + -XX:+PrintTenuringDistribution \ + -XX:+PrintGCApplicationStoppedTime \ + -XX:+PrintGCApplicationConcurrentTime \ + -XX:+PrintSafepointStatistics \ + -XX:+UseParNewGC \ + -XX:+UseConcMarkSweepGC \ +-XX:+CMSParallelRemarkEnabled" +``` + +塩init: + +``` text +description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +limit nofile 8192 8192 + +pre-start script + [ -r "/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment" ] || exit 0 + . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment + [ -d $ZOO_LOG_DIR ] || mkdir -p $ZOO_LOG_DIR + chown $USER:$GROUP $ZOO_LOG_DIR +end script + +script + . /etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/environment + [ -r /etc/default/zookeeper ] && . /etc/default/zookeeper + if [ -z "$JMXDISABLE" ]; then + JAVA_OPTS="$JAVA_OPTS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY" + fi + exec start-stop-daemon --start -c $USER --exec $JAVA --name zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} \ + -- -cp $CLASSPATH $JAVA_OPTS -Dzookeeper.log.dir=${ZOO_LOG_DIR} \ + -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG +end script +``` + +{## [元の記事](https://clickhouse.tech/docs/en/operations/tips/) ##} diff --git a/docs/ja/operations/troubleshooting.md b/docs/ja/operations/troubleshooting.md deleted file mode 120000 index 84f0ff34f41..00000000000 --- a/docs/ja/operations/troubleshooting.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/troubleshooting.md \ No newline at end of file diff --git a/docs/ja/operations/troubleshooting.md b/docs/ja/operations/troubleshooting.md new file mode 100644 index 00000000000..91545454b9e --- /dev/null +++ b/docs/ja/operations/troubleshooting.md @@ -0,0 +1,146 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u30C8\u30E9\u30D6\u30EB" +--- + +# トラブル {#troubleshooting} + +- [設置](#troubleshooting-installation-errors) +- [サーバーへの接続](#troubleshooting-accepts-no-connections) +- [クエリ処理](#troubleshooting-does-not-process-queries) +- [クエリ処理の効率](#troubleshooting-too-slow) + +## 設置 {#troubleshooting-installation-errors} + +### Apt-getでClickhouseリポジトリからDebパッケージを取得できません {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} + +- ファイア +- できない場合はアクセスリポジトリのために、何らかの理由でダウンロードパッケージに記載のとおり [はじめに](../getting_started/index.md) を使用して手動でインストールします。 `sudo dpkg -i ` 司令部 また、必要になります `tzdata` パッケージ。 + +## サーバーへの接続 {#troubleshooting-accepts-no-connections} + +考えられる問題: + +- サーバーが実行されていません。 +- 想定外または誤った設定パラメータ。 + +### サーバーが実行中でない {#server-is-not-running} + +**サーバーがrunnnigかどうかチェック** + +コマンド: + +``` bash +$ sudo service clickhouse-server status +``` + +サーバーが実行されていない場合は、次のコマンドで起動します: + +``` bash +$ sudo service clickhouse-server start +``` + +**ログの確認** + +のメインログ `clickhouse-server` にある `/var/log/clickhouse-server/clickhouse-server.log` デフォルトでは。 + +サーバーが正常に起動した場合は、文字列が表示されます: + +- ` Application: starting up.` — Server started. +- ` Application: Ready for connections.` — Server is running and ready for connections. + +もし `clickhouse-server` 設定エラーで起動に失敗しました。 `` エラーの説明を含む文字列。 例えば: + +``` text +2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused +``` + +ファイルの最後にエラーが表示されない場合は、文字列から始まるファイル全体を調べます: + +``` text + Application: starting up. +``` + +次のインスタンスを起動しようとすると、 `clickhouse-server` サーバーには、次のログが表示されます: + +``` text +2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 +2019.01.11 15:25:11.154578 [ 1 ] {} Application: starting up +2019.01.11 15:25:11.156361 [ 1 ] {} StatusFile: Status file ./status already exists - unclean restart. Contents: +PID: 8510 +Started at: 2019-01-11 15:24:23 +Revision: 54413 + +2019.01.11 15:25:11.156673 [ 1 ] {} Application: DB::Exception: Cannot lock file ./status. Another server instance in same directory is already running. +2019.01.11 15:25:11.156682 [ 1 ] {} Application: shutting down +2019.01.11 15:25:11.156686 [ 1 ] {} Application: Uninitializing subsystem: Logging Subsystem +2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread +``` + +**システムを参照。dログ** + +有用な情報が見つからない場合は `clickhouse-server` ログがないか、ログがない場合は、次のように表示できます `system.d` コマンドを使用したログ: + +``` bash +$ sudo journalctl -u clickhouse-server +``` + +**インタラクティブモードでclickhouse-serverを起動** + +``` bash +$ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml +``` + +このコマ このモードでは `clickhouse-server` 版画のすべてのイベントメッセージです。 + +### 構成変数 {#configuration-parameters} + +チェック: + +- Dockerの設定。 + + IPv6ネットワークのDockerでClickHouseを実行する場合は、次のことを確認してください `network=host` 設定されています。 + +- エンドポイント設定。 + + チェック [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) と [tcp\_portgenericname](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) 設定。 + + ClickHouseサーバーを受け入れlocalhostの接続のみによるデフォルトです。 + +- HTTPプロトコル設定。 + + HTTP APIのプロトコル設定を確認します。 + +- 安全な接続設定。 + + チェック: + + - その [tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 設定。 + - の設定 [SSL sertificates](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). + + 適切なパラメータを接続 たとえば、以下を使用します `port_secure` 変数との `clickhouse_client`. + +- ユーザー設定。 + + きの違うユーザー名やパスワードになります。 + +## クエリ処理 {#troubleshooting-does-not-process-queries} + +ClickHouseがクエリを処理できない場合は、クライアントにエラーの説明を送信します。 で `clickhouse-client` コンソールにエラーの説明が表示されます。 HTTPインターフェイスを使用している場合、ClickHouseはレスポンス本文にエラーの説明を送信します。 例えば: + +``` bash +$ curl 'http://localhost:8123/' --data-binary "SELECT a" +Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception +``` + +あなたが始めるなら `clickhouse-client` と `stack-trace` パラメータ、ClickHouseは、エラーの説明とサーバースタックトレースを返します。 + +あるいは、メッセージが壊れて接続します。 この場合、クエリを繰り返すことができます。 クエリを実行するたびに接続が切断された場合は、サーバーログにエラーがないか確認します。 + +## クエリ処理の効率 {#troubleshooting-too-slow} + +だがclickhouseでもゆっくりが必要にプロファイルをサーバーに負荷をかける資源とネットワークのためのご質問. + +で利用できますclickhouse-ベンチマークユーティリティプます。 これは、毎秒処理されるクエリの数、毎秒処理される行の数、およびクエリの処理時間の百分位数を示します。 diff --git a/docs/ja/operations/update.md b/docs/ja/operations/update.md deleted file mode 120000 index 88a092c0dff..00000000000 --- a/docs/ja/operations/update.md +++ /dev/null @@ -1 +0,0 @@ -../../en/operations/update.md \ No newline at end of file diff --git a/docs/ja/operations/update.md b/docs/ja/operations/update.md new file mode 100644 index 00000000000..e68ed4b4500 --- /dev/null +++ b/docs/ja/operations/update.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u30AF\u30EA\u30C3\u30AF\u30CF\u30A6\u30B9\u66F4\u65B0" +--- + +# クリックハウス更新 {#clickhouse-update} + +まclickhouse設置されたからdebパッケージ、以下のコマンドを実行し、サーバー: + +``` bash +$ sudo apt-get update +$ sudo apt-get install clickhouse-client clickhouse-server +$ sudo service clickhouse-server restart +``` + +推奨されるdebパッケージ以外のものを使用してclickhouseをインストールした場合は、適切な更新方法を使用します。 + +ClickHouseは分散updateをサポートしていません。 この操作は、個別のサーバーごとに連続して実行する必要があります。 クラスター上のすべてのサーバーを同時に更新しないでください。 diff --git a/docs/ja/operations/utilities/clickhouse-benchmark.md b/docs/ja/operations/utilities/clickhouse-benchmark.md deleted file mode 120000 index 3695c9fbdd3..00000000000 --- a/docs/ja/operations/utilities/clickhouse-benchmark.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utilities/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/ja/operations/utilities/clickhouse-benchmark.md b/docs/ja/operations/utilities/clickhouse-benchmark.md new file mode 100644 index 00000000000..f51d5f5001e --- /dev/null +++ b/docs/ja/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1,156 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "clickhouse-\u30D9\u30F3\u30C1\u30DE\u30FC\u30AF" +--- + +# clickhouse-ベンチマーク {#clickhouse-benchmark} + +ClickHouseサーバーに接続し、指定されたクエリを繰り返し送信します。 + +構文: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +または + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +一連のクエリを送信する場合は、テキストファイルを作成し、各クエリをこのファイル内の個々の文字列に配置します。 例えば: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +次に、このファイルを次の標準入力に渡します `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## キー {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` 同時に送信します。 デフォルト値:1。 +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. のための [比較モード](#clickhouse-benchmark-comparison-mode) 複数を使用できます `-h` 鍵を +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [比較モード](#clickhouse-benchmark-comparison-mode) 複数を使用できます `-p` 鍵を +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` 指定された制限時間に達すると、クエリの送信を停止します。 デフォルト値:0(制限時間は無効)。 +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [比較モード](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` を実行します。 [独立した二sample学生のt-テスト](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) 二つの分布が選択した信頼度と異ならないかどうかをテストします。 +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` 指定したJSONファイルにレポートを出力します。 +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` 例外スタックトレースを出力します。 +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` 指定された段階で。 可能な値: `complete`, `fetch_columns`, `with_mergeable_state`. デフォルト値: `complete`. +- `--help` — Shows the help message. + +あなたがいくつかを適用したい場合 [設定](../../operations/settings/index.md) クエリの場合は、キーとして渡します `--= SETTING_VALUE`. 例えば, `--max_memory_usage=1048576`. + +## 出力 {#clickhouse-benchmark-output} + +デフォルトでは, `clickhouse-benchmark` それぞれのレポート `--delay` 間隔。 + +レポートの例: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +このレポートでは、: + +- その中のクエリの数 `Queries executed:` フィールド。 + +- ステータスストリングを含む(順): + + - ClickHouseサーバーのエンドポイント。 + - 処理されたクエリの数。 + - QPS:QPS:クエリサーバーは、指定された期間に毎秒何回実行されたか `--delay` 引数。 + - RPS:指定された期間にサーバが毎秒読み込んだ行数 `--delay` 引数。 + - MiB/s:指定された期間中に毎秒読み取られるmebibytesサーバーの数 `--delay` 引数。 + - result RPS:サーバーによって指定された期間における秒あたりのクエリの結果に配置された行数 `--delay` 引数。 + - どのように多くのmebibytesは秒あたりのクエリの結果にサーバーによって配置されます。 `--delay` 引数。 + +- クエリの実行時間の百分位数。 + +## 比較モード {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` ツつィツ姪"ツつ"ツ債ツづュツつケ + +利用の比較モードを指定し端のサーバーによるペア `--host`, `--port` 鍵を キーは、最初の引数リスト内の位置によって一致します `--host` は最初のものと一致します `--port` というように。 `clickhouse-benchmark` 両方のサーバーへの接続を確立し、クエリを送信します。 各クエリは、ランダムに選択されたサーバー宛。 結果は、サーバーごとに個別に表示されます。 + +## 例えば {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/ja/operations/utilities/clickhouse-copier.md b/docs/ja/operations/utilities/clickhouse-copier.md deleted file mode 120000 index 734772841b6..00000000000 --- a/docs/ja/operations/utilities/clickhouse-copier.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utilities/clickhouse-copier.md \ No newline at end of file diff --git a/docs/ja/operations/utilities/clickhouse-copier.md b/docs/ja/operations/utilities/clickhouse-copier.md new file mode 100644 index 00000000000..bb230a03ca8 --- /dev/null +++ b/docs/ja/operations/utilities/clickhouse-copier.md @@ -0,0 +1,176 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u30AF\u30EA\u30C3\u30AF\u30CF\u30A6\u30B9\u30B3\u30D4\u30FC\u6A5F" +--- + +# クリックハウスコピー機 {#clickhouse-copier} + +コピーデータからのテーブルを一つクラスターテーブルの他の同クラスター + +複数実行できます `clickhouse-copier` インスタンスの異なるサーバーを行う仕事です。 ZooKeeperはプロセスの同期に使用されます。 + +始まることの後, `clickhouse-copier`: + +- ZooKeeperに接続して受信する: + + - ジョブのコピー。 + - コピージョブの状態。 + +- これは、ジョブを実行します。 + + 各実行中のプロセスは、 “closest” ザ-シャーのソースクラスタのデータ転送先のクラスター resharding場合はそのデータが必要です。 + +`clickhouse-copier` ZooKeeperの変更を追跡し、その場でそれらを適用します。 + +ネットワークトラフィッ `clickhouse-copier` ソースデータがある同じサーバー上。 + +## ランニングclickhouse-コピー機 {#running-clickhouse-copier} + +このユーティ: + +``` bash +$ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir +``` + +パラメータ: + +- `daemon` — Starts `clickhouse-copier` デーモンモードで。 +- `config` — The path to the `zookeeper.xml` ZooKeeperへの接続のためのパラメータを持つファイル。 +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` プロセスと格納タスク。 タスクは `$task-path/description`. +- `task-file` — Optional path to file with task configuration for initial upload to ZooKeeper. +- `task-upload-force` — Force upload `task-file` ノードが既に存在する場合でも。 +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` 作成 `clickhouse-copier_YYYYMMHHSS_` サブディレクトリ `$base-dir`. このパラメーターを省略すると、ディレクトリーは次の場所に作成されます `clickhouse-copier` 発売されました。 + +## 飼育係のフォーマット。xml {#format-of-zookeeper-xml} + +``` xml + + + trace + 100M + 3 + + + + + 127.0.0.1 + 2181 + + + +``` + +## コピータスクの設定 {#configuration-of-copying-tasks} + +``` xml + + + + + + false + + 127.0.0.1 + 9000 + + + ... + + + + ... + + + + + 2 + + + + 1 + + + + + 0 + + + + + 3 + + 1 + + + + + + + + source_cluster + test + hits + + + destination_cluster + test + hits2 + + + + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hits2', '{replica}') + PARTITION BY toMonday(date) + ORDER BY (CounterID, EventDate) + + + + jumpConsistentHash(intHash64(UserID), 2) + + + CounterID != 0 + + + + '2018-02-26' + '2018-03-05' + ... + + + + + + ... + + ... + + +``` + +`clickhouse-copier` の変更を追跡します。 `/task/path/description` そして、その場でそれらを適用します。 たとえば、次の値を変更すると `max_workers`、タスクを実行しているプロセスの数も変更されます。 + +[元の記事](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/ja/operations/utilities/clickhouse-local.md b/docs/ja/operations/utilities/clickhouse-local.md deleted file mode 120000 index b565359c891..00000000000 --- a/docs/ja/operations/utilities/clickhouse-local.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utilities/clickhouse-local.md \ No newline at end of file diff --git a/docs/ja/operations/utilities/clickhouse-local.md b/docs/ja/operations/utilities/clickhouse-local.md new file mode 100644 index 00000000000..07956919861 --- /dev/null +++ b/docs/ja/operations/utilities/clickhouse-local.md @@ -0,0 +1,81 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\uFF82\u3064\"\uFF82\u3065\u6309\u3064\uFF75\uFF82\uFF01" +--- + +# ツつ"ツづ按つオツ! {#clickhouse-local} + +その `clickhouse-local` プログラムは、展開し、ClickHouseサーバーを構成することなく、ローカルファイルに高速処理を実行できます。 + +データを受け入れを表すテーブル、クエリを利用して [クリックハウスsql方言](../../sql_reference/index.md). + +`clickhouse-local` ClickHouse serverと同じコアを使用するため、ほとんどの機能と同じフォーマットとテーブルエンジンをサポートします。 + +デフォルトでは `clickhouse-local` 同じホスト上のデータにアクセスすることはできませんが、 `--config-file` 引数。 + +!!! warning "警告" + るおそれがあります。負荷生産サーバの設定に `clickhouse-local` 人為的なミスの場合、データが破損する可能性があるためです。 + +## 使い方 {#usage} + +基本的な使用法: + +``` bash +$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" +``` + +引数: + +- `-S`, `--structure` — table structure for input data. +- `-if`, `--input-format` — input format, `TSV` デフォルトでは。 +- `-f`, `--file` — path to data, `stdin` デフォルトでは。 +- `-q` `--query` — queries to execute with `;` デリメートルとして +- `-N`, `--table` — table name where to put output data, `table` デフォルトでは。 +- `-of`, `--format`, `--output-format` — output format, `TSV` デフォルトでは。 +- `--stacktrace` — whether to dump debug output in case of exception. +- `--verbose` — more details on query execution. +- `-s` — disables `stderr` ログ記録。 +- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. +- `--help` — arguments references for `clickhouse-local`. + +また、より一般的に使用される各clickhouse構成変数の引数があります `--config-file`. + +## 例 {#examples} + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" +Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. +1 2 +3 4 +``` + +前の例は次のようになります: + +``` bash +$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. +1 2 +3 4 +``` + +次に、各unixユーザーのメモリユーザーを出力します: + +``` bash +$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +``` + +``` text +Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. +┏━━━━━━━━━━┳━━━━━━━━━━┓ +┃ user ┃ memTotal ┃ +┡━━━━━━━━━━╇━━━━━━━━━━┩ +│ bayonet │ 113.5 │ +├──────────┼──────────┤ +│ root │ 8.8 │ +├──────────┼──────────┤ +... +``` + +[元の記事](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/ja/operations/utilities/index.md b/docs/ja/operations/utilities/index.md deleted file mode 120000 index 695b0da9a95..00000000000 --- a/docs/ja/operations/utilities/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/operations/utilities/index.md \ No newline at end of file diff --git a/docs/ja/operations/utilities/index.md b/docs/ja/operations/utilities/index.md new file mode 100644 index 00000000000..89b111da578 --- /dev/null +++ b/docs/ja/operations/utilities/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Utilities +toc_priority: 56 +toc_title: "\u6982\u8981" +--- + +# ClickHouse明 {#clickhouse-utility} + +- [ツつ"ツづ按つオツ!](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` これを行います。 +- [クリックハウスコピー機](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [clickhouse-ベンチマーク](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[元の記事](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/ja/sql_reference/aggregate_functions/combinators.md b/docs/ja/sql_reference/aggregate_functions/combinators.md deleted file mode 120000 index c16c11068f6..00000000000 --- a/docs/ja/sql_reference/aggregate_functions/combinators.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/aggregate_functions/combinators.md \ No newline at end of file diff --git a/docs/ja/sql_reference/aggregate_functions/combinators.md b/docs/ja/sql_reference/aggregate_functions/combinators.md new file mode 100644 index 00000000000..0aba4497269 --- /dev/null +++ b/docs/ja/sql_reference/aggregate_functions/combinators.md @@ -0,0 +1,166 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u96C6\u8A08\u95A2\u6570\u306E\u30B3\u30F3\u30D3\u30CD\u30FC\u30BF" +--- + +# 集計関数のコンビネータ {#aggregate_functions_combinators} + +集計関数の名前には、それに接尾辞を付けることができます。 これにより、集計関数の動作方法が変更されます。 + +## -もし {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +例: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` というように。 + +条件付集計関数を使用すると、サブクエリを使用せずに複数の条件の集計を一度に計算できます。 `JOIN`例えば、Yandexの中。Metrica、条件付き集約関数は、セグメント比較機能を実装するために使用されます。 + +## -配列 {#agg-functions-combinator-array} + +-arrayサフィックスは、任意の集計関数に追加できます。 この場合、aggregate関数は次の引数を取ります ‘Array(T)’ 代わりにタイプ(配列) ‘T’ 型引数。 集計関数が複数の引数を受け入れる場合、これは同じ長さの配列でなければなりません。 配列を処理する場合、aggregate関数は、すべての配列要素にわたって元の集計関数と同様に機能します。 + +例1: `sumArray(arr)` -すべてのすべての要素を合計します ‘arr’ 配列だ この例では、より簡単に書かれている可能性があります: `sum(arraySum(arr))`. + +例2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ 配列だ これは簡単な方法で行うことができます: `uniq(arrayJoin(arr))` しかし、それは常に追加することはできません ‘arrayJoin’ クエリに。 + +-Ifと-配列を組み合わせることができます。 しかし, ‘Array’ 第一だから、その ‘If’. 例: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. この順序のために、 ‘cond’ 引数は配列ではありません。 + +## -状態 {#agg-functions-combinator-state} + +このコンビネーターを適用すると、集計関数は結果の値を返しません(たとえば、このコンビネーターの一意の値の数など)。 [uniq](reference.md#agg_function-uniq) の中間状態である。 `uniq`、これは一意の値の数を計算するためのハッシュテーブルです)。 これは `AggregateFunction(...)` これをさらなる処理に使用したり、テーブルに格納して後で集計を完了することができます。 + +これらの国は、利用: + +- [ツつィツ姪"ツつ"ツ債ツづュツつケ](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) テーブルエンジン。 +- [finalizeAggregation](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) 機能。 +- [runningAccumulate](../../sql_reference/functions/other_functions.md#function-runningaccumulate) 機能。 +- [-マージ](#aggregate_functions_combinators_merge) コンビネータ +- [-MergeState](#aggregate_functions_combinators_mergestate) コンビネータ + +## -マージ {#aggregate_functions_combinators-merge} + +このコンビネーターを適用すると、aggregate関数は中間の集約状態を引数として受け取り、状態を結合して集計を終了し、結果の値を返します。 + +## -MergeState {#aggregate_functions_combinators-mergestate} + +-mergeコンビネータと同じ方法で中間の集約状態をマージします。 しかし、結果の値を返すのではなく、-stateコンビネータに似た中間の集約状態を返します。 + +## -ForEach {#agg-functions-combinator-foreach} + +テーブルの集計関数を、対応する配列項目を集約して結果の配列を返す配列の集計関数に変換します。 例えば, `sumForEach` 配列の場合 `[1, 2]`, `[3, 4, 5]`と`[6, 7]`結果を返します `[10, 13, 5]` 対応する配列項目を一緒に追加した後。 + +## -オルデフォルト {#agg-functions-combinator-ordefault} + +集約する値が何もない場合は、集計関数の戻り値のデフォルト値を設定します。 + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## -オルヌル {#agg-functions-combinator-ornull} + +塗りつぶし `null` 集計するものがない場合。 戻り列はnull可能になります。 + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +-OrDefaultと-OrNullは他のコンビネータと組み合わせることができます。 これは、集計関数が空の入力を受け入れない場合に便利です。 + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## -リサンプル {#agg-functions-combinator-resample} + +データをグループに分割し、それらのグループのデータを個別に集計できます。 グループは、ある列の値を間隔に分割することによって作成されます。 + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**パラメータ** + +- `start` — Starting value of the whole required interval for `resampling_key` 値。 +- `stop` — Ending value of the whole required interval for `resampling_key` 値。 全体の間隔は含まれていません `stop` 値 `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` 実行されるそれぞれのsubintervals。 +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` パラメータ。 + +**戻り値** + +- の配列 `aggFunction` 各サブインターバルの結果。 + +**例えば** + +考慮する `people` テーブルのデータ: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +のは、その年齢の間隔にある人の名前を取得してみましょう `[30,60)` と `[60,75)`. 私たちは年齢の整数表現を使用しているので、私たちはで年齢を取得します `[30, 59]` と `[60,74]` 間隔。 + +配列内の名前を集約するには、次のものを使用します [グルーパー](reference.md#agg_function-grouparray) 集計関数。 それは一つの議論を取る。 私たちの場合、それは `name` コラム その `groupArrayResample` 関数は `age` 年齢別に名前を集計する列。 必要な間隔を定義するために、 `30, 75, 30` への引数 `groupArrayResample` 機能。 + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +結果を考慮する。 + +`Jonh` 彼は若すぎるので、サンプルの外です。 他の人は、指定された年齢区間に従って配布されます。 + +プラグインのインス数の合計人数とその平均賃金には、指定された年齢の間隔とします。 + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/ja/sql_reference/aggregate_functions/index.md b/docs/ja/sql_reference/aggregate_functions/index.md deleted file mode 120000 index e8bc7c1342f..00000000000 --- a/docs/ja/sql_reference/aggregate_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/aggregate_functions/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/aggregate_functions/index.md b/docs/ja/sql_reference/aggregate_functions/index.md new file mode 100644 index 00000000000..d36ade9a637 --- /dev/null +++ b/docs/ja/sql_reference/aggregate_functions/index.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Aggregate Functions +toc_priority: 33 +toc_title: "\u5C0E\u5165" +--- + +# 集計関数 {#aggregate-functions} + +集計関数は、 [通常の](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) 方法として期待されデータベースの専門家です。 + +ClickHouseはまた支えます: + +- [パラメトリックに集計機能](parametric_functions.md#aggregate_functions_parametric) 列に加えて他のパラメータを受け入れる。 +- [Combinators](combinators.md#aggregate_functions_combinators)、集計関数の動作を変更します。 + +## NULLの場合の処理 {#null-processing} + +集計中、すべて `NULL`sはスキップされます。 + +**例:** + +この表を考慮する: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +の値を合計する必要があるとしましょう `y` 列: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +その `sum` 関数の解釈 `NULL` として `0`. 特に、これは、関数がすべての値がある選択の入力を受け取った場合 `NULL` その後、結果は次のようになります `0`、ない `NULL`. + +今すぐ使用できます `groupArray` から配列を作成する関数 `y` 列: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` 含まれていません `NULL` 結果の配列です。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/ja/sql_reference/aggregate_functions/parametric_functions.md b/docs/ja/sql_reference/aggregate_functions/parametric_functions.md deleted file mode 120000 index 4b9a90f6e4c..00000000000 --- a/docs/ja/sql_reference/aggregate_functions/parametric_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/aggregate_functions/parametric_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/aggregate_functions/parametric_functions.md b/docs/ja/sql_reference/aggregate_functions/parametric_functions.md new file mode 100644 index 00000000000..6d61ee95c46 --- /dev/null +++ b/docs/ja/sql_reference/aggregate_functions/parametric_functions.md @@ -0,0 +1,499 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u30D1\u30E9\u30E1\u30C8\u30EA\u30C3\u30AF\u96C6\u8A08\u95A2\u6570" +--- + +# パラメトリック集計関数 {#aggregate_functions_parametric} + +Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. + +## ヒストグラム {#histogram} + +適応ヒストグラムを計算します。 正確な結果を保証するものではありません。 + +``` sql +histogram(number_of_bins)(values) +``` + +関数は以下を使用します [ストリーミングの並列決定木アルゴリズム](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). ヒストグラムビンの境界は、新しいデータが関数に入ると調整されます。 一般的なケースでは、ビンの幅は等しくありません。 + +**パラメータ** + +`number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. +`values` — [式](../syntax.md#syntax-expressions) その結果、入力値が得られます。 + +**戻り値** + +- [配列](../../sql_reference/data_types/array.md) の [タプル](../../sql_reference/data_types/tuple.md) 次の形式の: + + ``` + [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] + ``` + + - `lower` — Lower bound of the bin. + - `upper` — Upper bound of the bin. + - `height` — Calculated height of the bin. + +**例えば** + +``` sql +SELECT histogram(5)(number + 1) +FROM ( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─histogram(5)(plus(number, 1))───────────────────────────────────────────┐ +│ [(1,4.5,4),(4.5,8.5,4),(8.5,12.75,4.125),(12.75,17,4.625),(17,20,3.25)] │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +ヒストグラムを視覚化することができます [バー](../../sql_reference/functions/other_functions.md#function-bar) たとえば、関数: + +``` sql +WITH histogram(5)(rand() % 100) AS hist +SELECT + arrayJoin(hist).3 AS height, + bar(height, 0, 6, 5) AS bar +FROM +( + SELECT * + FROM system.numbers + LIMIT 20 +) +``` + +``` text +┌─height─┬─bar───┐ +│ 2.125 │ █▋ │ +│ 3.25 │ ██▌ │ +│ 5.625 │ ████▏ │ +│ 5.625 │ ████▏ │ +│ 3.375 │ ██▌ │ +└────────┴───────┘ +``` + +この場合、ヒストグラムビンの境界線がわからないことを覚えておく必要があります。 + +## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} + +かどうかをチェックします配列を含むイベントのチェーンに一致するパターンです。 + +``` sql +sequenceMatch(pattern)(timestamp, cond1, cond2, ...) +``` + +!!! warning "警告" + 同じ秒で発生するイベントは、結果に影響を与える未定義の順序でシーケンス内に置くことができます。 + +**パラメータ** + +- `pattern` — Pattern string. See [パターン構文](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` と `DateTime`. も利用できますの対応 [UInt](../../sql_reference/data_types/int_uint.md) データ型。 + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最大32個の条件引数を渡すことができます。 この関数は、これらの条件で説明されているイベントのみを考慮します。 シーケンスに条件に記述されていないデータが含まれている場合、関数はそれらをスキップします。 + +**戻り値** + +- パターンが一致すれば、1。 +- 0、パターンが一致しない場合。 + +タイプ: `UInt8`. + + +**パターン構文** + +- `(?N)` — Matches the condition argument at position `N`. 条件には、 `[1, 32]` 範囲。 例えば, `(?1)` に渡された引数にマッチします。 `cond1` パラメータ。 + +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. + +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` 互いに1800秒を超えて発生するイベントに一致します。 任意の数は、当社が定めるインターネットを築くことです。 を使用することができ `>=`, `>`, `<`, `<=` 演算子。 + +**例** + +のデータを考慮して下さい `t` テーブル: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +└──────┴────────┘ +``` + +クエリの実行: + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 1 │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +この関数は、番号2が番号1に続くイベントチェーンを見つけました。 数字はイベントとして記述されていないため、3番をスキップしました。 この例で与えられたイベントチェーンを検索するときにこの番号を考慮に入れたい場合は、その条件を作成する必要があります。 + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 3))─┐ +│ 0 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +この場合、関数は、3番のイベントが1と2の間で発生したため、パターンに一致するイベントチェーンを見つけることができませんでした。 同じケースで4の条件をチェックした場合、シーケンスはパターンに一致します。 + +``` sql +SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t +``` + +``` text +┌─sequenceMatch('(?1)(?2)')(time, equals(number, 1), equals(number, 2), equals(number, 4))─┐ +│ 1 │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [sequenceCount](#function-sequencecount) + +## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} + +パターンに一致するイベントチェーンの数を数えます。 この関数は、重複しないイベントチェーンを検索します。 現在のチェーンが一致した後、次のチェーンの検索を開始します。 + +!!! warning "警告" + 同じ秒で発生するイベントは、結果に影響を与える未定義の順序でシーケンス内に置くことができます。 + +``` sql +sequenceCount(pattern)(timestamp, cond1, cond2, ...) +``` + +**パラメータ** + +- `pattern` — Pattern string. See [パターン構文](#sequence-function-pattern-syntax). + +- `timestamp` — Column considered to contain time data. Typical data types are `Date` と `DateTime`. も利用できますの対応 [UInt](../../sql_reference/data_types/int_uint.md) データ型。 + +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最大32個の条件引数を渡すことができます。 この関数は、これらの条件で説明されているイベントのみを考慮します。 シーケンスに条件に記述されていないデータが含まれている場合、関数はそれらをスキップします。 + +**戻り値** + +- 一致する重複しないイベントチェーンの数。 + +タイプ: `UInt64`. + +**例えば** + +のデータを考慮して下さい `t` テーブル: + +``` text +┌─time─┬─number─┐ +│ 1 │ 1 │ +│ 2 │ 3 │ +│ 3 │ 2 │ +│ 4 │ 1 │ +│ 5 │ 3 │ +│ 6 │ 2 │ +└──────┴────────┘ +``` + +数2は、それらの間の他の数字の任意の量と数1の後に発生した回数をカウント: + +``` sql +SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t +``` + +``` text +┌─sequenceCount('(?1).*(?2)')(time, equals(number, 1), equals(number, 2))─┐ +│ 2 │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [sequenceMatch](#function-sequencematch) + +## windowfunnelcomment {#windowfunnel} + +スライドタイムウィンドウでイベントチェーンを検索し、チェーンから発生したイベントの最大数を計算します。 + +関数はアルゴリズムに従って動作します: + +- この関数は、チェーン内の最初の条件をトリガーするデータを検索し、イベントカウンターを1に設定します。 これは、スライドウィンドウが始まる瞬間です。 + +- だから、チェーンが順次内のウインドウのカウンタを増加されます。 イベントのシーケンスが中断された場合、カウンターは増分されません。 + +- データにさまざまな完了点で複数のイベントチェーンがある場合、関数は最長チェーンのサイズのみを出力します。 + +**構文** + +``` sql +windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) +``` + +**パラメータ** + +- `window` — Length of the sliding window in seconds. +- `mode` -省略可能な引数です。 + - `'strict'` -とき `'strict'` windowFunnel()は、一意の値に対してのみ条件を適用します。 +- `timestamp` — Name of the column containing the timestamp. Data types supported: [日付](../../sql_reference/data_types/date.md), [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) その他の符号なし整数型(timestampがサポートしているにもかかわらず `UInt64` 値はInt64最大値を超えることはできません.2^63-1)。 +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +スライディングタイムウィンドウ内のチェーンからの連続トリガー条件の最大数。 +選択内のすべてのチェーンが分析されます。 + +タイプ: `Integer`. + +**例えば** + +ユーザーが電話を選択してオンラインストアで二度購入するのに十分な期間が設定されているかどうかを判断します。 + +次の一連のイベントを設定します: + +1. ユーザーがストアのアカウントにログインした場合 (`eventID = 1003`). +2. ユーザーは電話を検索します (`eventID = 1007, product = 'phone'`). +3. ユーザーが注文した (`eventID = 1009`). +4. ユーザーが再び注文した (`eventID = 1010`). + +入力テーブル: + +``` text +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-28 │ 1 │ 2019-01-29 10:00:00 │ 1003 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-31 │ 1 │ 2019-01-31 09:00:00 │ 1007 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-01-30 │ 1 │ 2019-01-30 08:00:00 │ 1009 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ +│ 2019-02-01 │ 1 │ 2019-02-01 08:00:00 │ 1010 │ phone │ +└────────────┴─────────┴─────────────────────┴─────────┴─────────┘ +``` + +ユーザーの距離を調べる `user_id` を介して得ることができるチェーンで期間で月-月の2019。 + +クエリ: + +``` sql +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(6048000000000000)(timestamp, eventID = 1003, eventID = 1009, eventID = 1007, eventID = 1010) AS level + FROM trend + WHERE (event_date >= '2019-01-01') AND (event_date <= '2019-02-02') + GROUP BY user_id +) +GROUP BY level +ORDER BY level ASC +``` + +結果: + +``` text +┌─level─┬─c─┐ +│ 4 │ 1 │ +└───────┴───┘ +``` + +## 保持 {#retention} + +関数は引数として1から32までの条件のセットを受け取ります。 `UInt8` るかどうかを示す一定の条件を満ためのイベントです。 +任意の条件を引数として指定することができます。 [WHERE](../../sql_reference/statements/select.md#select-where)). + +第一と第二が真であれば第二の結果は真であり、第一と第二が真であれば第三の結果は真である。 + +**構文** + +``` sql +retention(cond1, cond2, ..., cond32); +``` + +**パラメータ** + +- `cond` — an expression that returns a `UInt8` 結果(1または0)。 + +**戻り値** + +1または0の配列。 + +- 1 — condition was met for the event. +- 0 — condition wasn't met for the event. + +タイプ: `UInt8`. + +**例えば** + +の計算の例を考えてみましょう `retention` サイトトラフィックを決定する機能。 + +**1.** Сreate a table to illustrate an example. + +``` sql +CREATE TABLE retention_test(date Date, uid Int32) ENGINE = Memory; + +INSERT INTO retention_test SELECT '2020-01-01', number FROM numbers(5); +INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); +INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); +``` + +入力テーブル: + +クエリ: + +``` sql +SELECT * FROM retention_test +``` + +結果: + +``` text +┌───────date─┬─uid─┐ +│ 2020-01-01 │ 0 │ +│ 2020-01-01 │ 1 │ +│ 2020-01-01 │ 2 │ +│ 2020-01-01 │ 3 │ +│ 2020-01-01 │ 4 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-02 │ 0 │ +│ 2020-01-02 │ 1 │ +│ 2020-01-02 │ 2 │ +│ 2020-01-02 │ 3 │ +│ 2020-01-02 │ 4 │ +│ 2020-01-02 │ 5 │ +│ 2020-01-02 │ 6 │ +│ 2020-01-02 │ 7 │ +│ 2020-01-02 │ 8 │ +│ 2020-01-02 │ 9 │ +└────────────┴─────┘ +┌───────date─┬─uid─┐ +│ 2020-01-03 │ 0 │ +│ 2020-01-03 │ 1 │ +│ 2020-01-03 │ 2 │ +│ 2020-01-03 │ 3 │ +│ 2020-01-03 │ 4 │ +│ 2020-01-03 │ 5 │ +│ 2020-01-03 │ 6 │ +│ 2020-01-03 │ 7 │ +│ 2020-01-03 │ 8 │ +│ 2020-01-03 │ 9 │ +│ 2020-01-03 │ 10 │ +│ 2020-01-03 │ 11 │ +│ 2020-01-03 │ 12 │ +│ 2020-01-03 │ 13 │ +│ 2020-01-03 │ 14 │ +└────────────┴─────┘ +``` + +**2.** グループのユーザーによるユニークID `uid` を使用して `retention` 機能。 + +クエリ: + +``` sql +SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r +FROM retention_test +WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') +GROUP BY uid +ORDER BY uid ASC +``` + +結果: + +``` text +┌─uid─┬─r───────┐ +│ 0 │ [1,1,1] │ +│ 1 │ [1,1,1] │ +│ 2 │ [1,1,1] │ +│ 3 │ [1,1,1] │ +│ 4 │ [1,1,1] │ +│ 5 │ [0,0,0] │ +│ 6 │ [0,0,0] │ +│ 7 │ [0,0,0] │ +│ 8 │ [0,0,0] │ +│ 9 │ [0,0,0] │ +│ 10 │ [0,0,0] │ +│ 11 │ [0,0,0] │ +│ 12 │ [0,0,0] │ +│ 13 │ [0,0,0] │ +│ 14 │ [0,0,0] │ +└─────┴─────────┘ +``` + +**3.** 一日あたりのサイト訪問の合計数を計算します。 + +クエリ: + +``` sql +SELECT + sum(r[1]) AS r1, + sum(r[2]) AS r2, + sum(r[3]) AS r3 +FROM +( + SELECT + uid, + retention(date = '2020-01-01', date = '2020-01-02', date = '2020-01-03') AS r + FROM retention_test + WHERE date IN ('2020-01-01', '2020-01-02', '2020-01-03') + GROUP BY uid +) +``` + +結果: + +``` text +┌─r1─┬─r2─┬─r3─┐ +│ 5 │ 5 │ 5 │ +└────┴────┴────┘ +``` + +どこに: + +- `r1`-2020-01-01の間にサイトを訪問したユニーク訪問者の数 `cond1` 条件)。 +- `r2`-2020-01-01から2020-01-02までの特定の期間にサイトを訪問したユニーク訪問者の数 (`cond1` と `cond2` 条件)。 +- `r3`-2020-01-01から2020-01-03までの特定の期間にサイトを訪問したユニーク訪問者の数 (`cond1` と `cond3` 条件)。 + +## uniqUpTo(N)(x) {#uniquptonx} + +Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. + +小さいnsの使用のために推薦される、10まで。 nの最大値は100です。 + +集計関数の状態については、1+n\*に等しいメモリの量をバイトの一つの値のサイズを使用しています。 +文字列の場合、8バイトの非暗号化ハッシュを格納します。 つまり、計算は文字列に対して近似されます。 + +この関数は、いくつかの引数でも機能します。 + +大きなn値が使用され、一意の値の数がnよりわずかに少ない場合を除いて、できるだけ速く動作します。 + +使用例: + +``` text +Problem: Generate a report that shows only keywords that produced at least 5 unique users. +Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) + +## sumMapFiltered(keys\_to\_keep)(キー、値) {#summapfilteredkeys-to-keepkeys-values} + +同じ動作として [sumMap](reference.md#agg_functions-summap) キーの配列がパラメータとして渡されることを除いて。 これは、キーの高い基数を扱うときに特に便利です。 diff --git a/docs/ja/sql_reference/aggregate_functions/reference.md b/docs/ja/sql_reference/aggregate_functions/reference.md deleted file mode 120000 index fde121df440..00000000000 --- a/docs/ja/sql_reference/aggregate_functions/reference.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/aggregate_functions/reference.md \ No newline at end of file diff --git a/docs/ja/sql_reference/aggregate_functions/reference.md b/docs/ja/sql_reference/aggregate_functions/reference.md new file mode 100644 index 00000000000..fb0f2310100 --- /dev/null +++ b/docs/ja/sql_reference/aggregate_functions/reference.md @@ -0,0 +1,1837 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u53C2\u7167" +--- + +# 関数リファレンス {#function-reference} + +## カウント {#agg_function-count} + +行数またはnull以外の値をカウントします。 + +ClickHouseは以下の構文をサポートしています `count`: +- `count(expr)` または `COUNT(DISTINCT expr)`. +- `count()` または `COUNT(*)`. その `count()` 構文はClickHouse固有です。 + +**パラメータ** + +機能は取ることができます: + +- ゼロ変数。 +- ワン [式](../syntax.md#syntax-expressions). + +**戻り値** + +- 関数がパラメータなしで呼び出されると、行数がカウントされます。 +- この [式](../syntax.md#syntax-expressions) が渡されると、この関数は、この式がnullではなく返された回数をカウントします。 式がaを返す場合 [Nullable](../../sql_reference/data_types/nullable.md)-タイプ値、そして結果の `count` 滞在しない `Nullable`. 式が返された場合、関数は0を返します `NULL` すべての行について。 + +どちらの場合も、戻り値の型は次のようになります [UInt64](../../sql_reference/data_types/int_uint.md). + +**詳細** + +クリックハウスは `COUNT(DISTINCT ...)` 構文。 この構成の動作は、 [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) 設定。 それはどれをの定義します [uniq\*](#agg_function-uniq) 関数は、操作を実行するために使用されます。 デフォルトは [ユニキャック](#agg_function-uniqexact) 機能。 + +その `SELECT count() FROM table` テーブル内のエントリの数が別々に格納されていないため、クエリは最適化されません。 テーブルから小さな列を選択し、その中の値の数を数えます。 + +**例** + +例1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +例2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +この例では、 `count(DISTINCT num)` によって実行される。 `uniqExact` に従う機能 `count_distinct_implementation` 設定値。 + +## 任意(x) {#agg_function-any} + +最初に検出された値を選択します。 +クエリは、毎回異なる順序で実行することができるため、この関数の結果は不確定です。 +確定的な結果を得るには、 ‘min’ または ‘max’ 関数の代わりに ‘any’. + +場合によっては、実行順序に頼ることができます。 これは、order byを使用するサブクエリからのselectの場合に適用されます。 + +とき `SELECT` クエリには `GROUP BY` 句または少なくとも一つの集計関数、ClickHouse(MySQLとは対照的に)内のすべての式ということが必要です `SELECT`, `HAVING`、と `ORDER BY` 句は、キーまたは集計関数から計算されます。 つまり、テーブルから選択された各列は、キーまたは集計関数内で使用する必要があります。 MySQLのような動作を得るには、他の列を `any` 集計関数。 + +## anyHeavy(x) {#anyheavyx} + +頻繁に発生する値を選択します。 [ヘビーヒッターズ](http://www.cs.umd.edu/~samir/498/karp.pdf) アルゴリズムだ 各クエリの実行スレッドのケースの半分を超える値がある場合は、この値が返されます。 通常、結果は非決定的です。 + +``` sql +anyHeavy(column) +``` + +**引数** + +- `column` – The column name. + +**例えば** + +を取る [オンタイム](../../getting_started/example_datasets/ontime.md) データセットと選択頻繁に発生する値で `AirlineID` コラム + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## anyllast(x) {#anylastx} + +最後に検出された値を選択します。 +結果は、次の場合と同様に不確定です `any` 機能。 + +## groupBitAnd {#groupbitand} + +ビットごとの適用 `AND` 一連の数字のために。 + +``` sql +groupBitAnd(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `UInt*` タイプ。 + +**戻り値** + +の値 `UInt*` タイプ。 + +**例えば** + +テストデータ: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +クエリ: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +どこに `num` テストデータの列です。 + +結果: + +``` text +binary decimal +00000100 = 4 +``` + +## groupBitOr {#groupbitor} + +ビットごとの適用 `OR` 一連の数字のために。 + +``` sql +groupBitOr(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `UInt*` タイプ。 + +**戻り値** + +の値 `UInt*` タイプ。 + +**例えば** + +テストデータ: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +クエリ: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +どこに `num` テストデータの列です。 + +結果: + +``` text +binary decimal +01111101 = 125 +``` + +## groupBitXor {#groupbitxor} + +ビットごとの適用 `XOR` 一連の数字のために。 + +``` sql +groupBitXor(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `UInt*` タイプ。 + +**戻り値** + +の値 `UInt*` タイプ。 + +**例えば** + +テストデータ: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +クエリ: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +どこに `num` テストデータの列です。 + +結果: + +``` text +binary decimal +01101000 = 104 +``` + +## groupBitmap {#groupbitmap} + +符号なし整数列からのビットマップ計算または集計計算を行い、uint64型のカーディナリティを返します。 [ビットマップ](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `UInt*` タイプ。 + +**戻り値** + +の値 `UInt64` タイプ。 + +**例えば** + +テストデータ: + +``` text +UserID +1 +1 +2 +3 +``` + +クエリ: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +結果: + +``` text +num +3 +``` + +## 最小(x) {#agg_function-min} + +最小値を計算します。 + +## 最大(x) {#agg_function-max} + +最大値を計算します。 + +## argMin(arg,val) {#agg-function-argmin} + +を計算し ‘arg’ 最小値の値 ‘val’ 値。 いくつかの異なる値がある場合 ‘arg’ のための最小値 ‘val’ これらの値のうち、最初に検出された値が出力されます。 + +**例えば:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## argMax(arg,val) {#agg-function-argmax} + +を計算し ‘arg’ 最大値の値 ‘val’ 値。 いくつかの異なる値がある場合 ‘arg’ の最大値 ‘val’ これらの値のうち、最初に検出された値が出力されます。 + +## sum(x) {#agg_function-sum} + +合計を計算します。 +数字のためにのみ動作します。 + +## sumWithOverflow(x) {#sumwithoverflowx} + +入力パラメーターの結果と同じデータ型を使用して、数値の合計を計算します。 合計がこのデータ型の最大値を超えると、関数はエラーを返します。 + +数字のためにのみ動作します。 + +## sumMap(キー,値) {#agg_functions-summap} + +合計 ‘value’ 配列に指定されたキーに応じて ‘key’ 配列だ +の要素の数 ‘key’ と ‘value’ 合計される行ごとに同じでなければなりません。 +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +例えば: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## skewPop {#skewpop} + +を計算します [歪み](https://en.wikipedia.org/wiki/Skewness) シーケンスの。 + +``` sql +skewPop(expr) +``` + +**パラメータ** + +`expr` — [式](../syntax.md#syntax-expressions) 番号を返す。 + +**戻り値** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**例えば** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## 串焼き {#skewsamp} + +を計算します [サンプルの歪度](https://en.wikipedia.org/wiki/Skewness) シーケンスの。 + +これは、渡された値がそのサンプルを形成する場合、確率変数の歪度の不偏推定値を表します。 + +``` sql +skewSamp(expr) +``` + +**パラメータ** + +`expr` — [式](../syntax.md#syntax-expressions) 番号を返す。 + +**戻り値** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). もし `n <= 1` (`n` はサンプルのサイズです)、関数は次の値を返します `nan`. + +**例えば** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## kurtPop {#kurtpop} + +を計算します [尖度](https://en.wikipedia.org/wiki/Kurtosis) シーケンスの。 + +``` sql +kurtPop(expr) +``` + +**パラメータ** + +`expr` — [式](../syntax.md#syntax-expressions) 番号を返す。 + +**戻り値** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**例えば** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## kurtSamp {#kurtsamp} + +を計算します [サンプル尖度](https://en.wikipedia.org/wiki/Kurtosis) のシーケンスです。 + +これは、渡された値がサンプルを形成する場合、確率変数の尖度の不偏推定値を表します。 + +``` sql +kurtSamp(expr) +``` + +**パラメータ** + +`expr` — [式](../syntax.md#syntax-expressions) 番号を返す。 + +**戻り値** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). もし `n <= 1` (`n` はサンプルのサイズです)、関数は次の値を返します `nan`. + +**例えば** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## timeSeriesGroupSum(uid,タイムスタンプ,値) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` 総異なる時系列のサンプルのタイムスタンプなアライメントを実施します。 +これは、二つのサンプルタイムスタンプ間の線形補間を使用して、一緒に時系列を合計します。 + +- `uid` タイムシリーズの一意のidです, `UInt64`. +- `timestamp` ミリ秒またはマイクロ秒をサポートするためにInt64型です。 +- `value` メトリックです。 + +この関数は、以下のタプルの配列を返します `(timestamp, aggregated_value)` のペアになっています。 + +この機能を使用する前に確認 `timestamp` は昇順です。 + +例えば: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +結果は次のようになります: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## timeSeriesGroupRateSum(uid,ts,val) {#agg-function-timeseriesgroupratesum} + +同様にtimeseriesgroupratesum、timeseriesgroupratesumは、時系列のレートを計算し、その後、一緒にレートを合計します。 +また、この関数を使用する前にタイムスタンプが昇順になるはずです。 + +この関数を使用すると、上記の結果は次のようになります: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## 平均(x) {#agg_function-avg} + +平均を計算します。 +数字のためにのみ動作します。 +結果は常にfloat64です。 + +## uniq {#agg_function-uniq} + +引数の異なる値のおおよその数を計算します。 + +``` sql +uniq(x[, ...]) +``` + +**パラメータ** + +この関数は、可変個のパラメータを受け取ります。 変数は `Tuple`, `Array`, `Date`, `DateTime`, `String`、または数値型。 + +**戻り値** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ番号。 + +**実装の詳細** + +機能: + +- 集計内のすべてのパラメータのハッシュを計算し、それを計算に使用します。 + +- を使用して適応サンプリングアルゴリズムです。 計算状態の場合、関数は65536までの要素ハッシュ値のサンプルを使用します。 + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- 結果を確定的に提供します(クエリ処理の順序に依存しません)。 + +使用をお勧めしますこの機能はほとんど全てのシナリオ. + +**また見なさい** + +- [uniqCombined](#agg_function-uniqcombined) +- [uniqCombined64](#agg_function-uniqcombined64) +- [unihll12](#agg_function-uniqhll12) +- [ユニキャック](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +異なる引数値のおおよその数を計算します。 + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +その `uniqCombined` 関数は、異なる値の数を計算するのに適しています。 + +**パラメータ** + +この関数は、可変個のパラメータを受け取ります。 変数は `Tuple`, `Array`, `Date`, `DateTime`, `String`、または数値型。 + +`HLL_precision` は、2のセル数の底の対数です [ハイパーログ](https://en.wikipedia.org/wiki/HyperLogLog). オプションで、次のように関数を使用できます `uniqCombined(x[, ...])`. のデフォルト値 `HLL_precision` は17で、これは効果的に96KiBのスペース(2^17セル、6ビットそれぞれ)です。 + +**戻り値** + +- を番号 [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ番号。 + +**実装の詳細** + +機能: + +- ハッシュを計算します(64ビットのハッシュ `String` それ以外の場合は32ビット)は、集計内のすべてのパラメータに対して、それを計算に使用します。 + +- 配列、ハッシュテーブル、およびhyperloglogとエラー修正テーブルの組み合わせを使用します。 + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- 結果を確定的に提供します(クエリ処理の順序に依存しません)。 + +!!! note "メモ" + それは32ビットハッシュを使用しているので-`String` タイプすると、結果はカーディナリティのエラーが非常に大きくなります `UINT_MAX` (エラーは数十億の異なる値の後にすぐに発生します)、この場合は次のようにしてください [uniqCombined64](#agg_function-uniqcombined64) + +に比べて [uniq](#agg_function-uniq) 機能、を `uniqCombined`: + +- 数回少ないメモリを消費します。 +- 数倍高い精度で計算します。 +- 通常は若干低い性能を持っています。 一部のシナリオでは, `uniqCombined` より良い実行できる `uniq` たとえば、ネットワークを介して多数の集約状態を送信する分散クエリを使用します。 + +**また見なさい** + +- [uniq](#agg_function-uniq) +- [uniqCombined64](#agg_function-uniqcombined64) +- [unihll12](#agg_function-uniqhll12) +- [ユニキャック](#agg_function-uniqexact) + +## uniqCombined64 {#agg_function-uniqcombined64} + +と同じ [uniqCombined](#agg_function-uniqcombined) ただし、すべてのデータ型に64ビットハッシュを使用します。 + +## unihll12 {#agg_function-uniqhll12} + +を使用して、異なる引数値のおおよその数を計算します [ハイパーログ](https://en.wikipedia.org/wiki/HyperLogLog) アルゴリズムだ + +``` sql +uniqHLL12(x[, ...]) +``` + +**パラメータ** + +この関数は、可変個のパラメータを受け取ります。 変数は `Tuple`, `Array`, `Date`, `DateTime`, `String`、または数値型。 + +**戻り値** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ番号。 + +**実装の詳細** + +機能: + +- 集計内のすべてのパラメータのハッシュを計算し、それを計算に使用します。 + +- HyperLogLogアルゴリズムを使用して、異なる引数値の数を近似します。 + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- 確定的な結果を提供します(クエリ処理の順序に依存しません)。 + +この機能を使用することはお勧めしません。 ほとんどの場合、 [uniq](#agg_function-uniq) または [uniqCombined](#agg_function-uniqcombined) 機能。 + +**また見なさい** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [ユニキャック](#agg_function-uniqexact) + +## ユニキャック {#agg_function-uniqexact} + +異なる引数値の正確な数を計算します。 + +``` sql +uniqExact(x[, ...]) +``` + +を使用 `uniqExact` 機能あなたは絶対に正確な結果が必要な場合。 それ以外の場合は、 [uniq](#agg_function-uniq) 機能。 + +その `uniqExact` 機能の使用ます。 `uniq`、状態のサイズは、異なる値の数が増加するにつれて無制限の成長を有するからである。 + +**パラメータ** + +この関数は、可変個のパラメータを受け取ります。 変数は `Tuple`, `Array`, `Date`, `DateTime`, `String`、または数値型。 + +**また見なさい** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [unihll12](#agg_function-uniqhll12) + +## groupArray(x),groupArray(max\_size)(x) {#agg_function-grouparray} + +引数の値の配列を作成します。 +値は、任意の(不確定な)順序で配列に追加できます。 + +第二のバージョン( `max_size` パラメータ)結果の配列のサイズを次のように制限します `max_size` 要素。 +例えば, `groupArray (1) (x)` に相当します `[any (x)]`. + +場合によっては、実行の順序に依拠することもできます。 これは、次の場合に適用されます `SELECT` 使用するサブクエリーから取得されます `ORDER BY`. + +## groupArrayInsertAt(値、位置) {#grouparrayinsertatvalue-position} + +指定した位置の配列に値を挿入します。 + +!!! note "メモ" + この関数はゼロベースの位置を使用します。 + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +任意変数: + +- 空の位置に置き換えるためのデフォルト値。 +- 結果の配列の長さ。 これにより、すべての集約キーで同じサイズの配列を受け取ることができます。 このパラメーターを使用する場合は、既定値を指定する必要があります。 + +## グルーパーレイモビングサムcity in new mexico usa {#agg_function-grouparraymovingsum} + +入力値の移動和を計算します。 + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +この機能できるウィンドウサイズとしてのパラメータとします。 指定しない場合、この関数は、列の行数と同じウィンドウサイズをとります。 + +**パラメータ** + +- `numbers_for_summing` — [式](../syntax.md#syntax-expressions) その結果、数値データ型の値が返されます。 +- `window_size` — Size of the calculation window. + +**戻り値** + +- 入力データと同じサイズおよびタイプの配列。 + +**例えば** + +サンプルテーブル: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +クエリ: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## groupparraymovingavg {#agg_function-grouparraymovingavg} + +入力値の移動平均を計算します。 + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +この機能できるウィンドウサイズとしてのパラメータとします。 指定しない場合、この関数は、列の行数と同じウィンドウサイズをとります。 + +**パラメータ** + +- `numbers_for_summing` — [式](../syntax.md#syntax-expressions) その結果、数値データ型の値が返されます。 +- `window_size` — Size of the calculation window. + +**戻り値** + +- 入力データと同じサイズおよびタイプの配列。 + +この関数は [ゼロに向かって丸め](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). 結果のデータ型の小数点以下の桁を切り捨てます。 + +**例えば** + +サンプルテーブル `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +クエリ: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## groupUniqArray(x),groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} + +異なる引数値から配列を作成します。 メモリ消費量は、 `uniqExact` 機能。 + +第二のバージョン( `max_size` パラメータ)結果の配列のサイズを次のように制限します `max_size` 要素。 +例えば, `groupUniqArray(1)(x)` に相当します `[any(x)]`. + +## 分位値 {#quantile} + +近似値を計算します [分位値](https://en.wikipedia.org/wiki/Quantile) 数値データシーケンス。 + +この関数が適用されます [貯蔵所の見本抽出](https://en.wikipedia.org/wiki/Reservoir_sampling) 8192までの貯蔵所のサイズおよび見本抽出のための乱数発電機を使って。 結果は非決定的です。 正確な分位値を取得するには、以下を使用します [quantileExact](#quantileexact) 機能。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantile(level)(expr) +``` + +エイリアス: `median`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +**戻り値** + +- 指定したレベルの概算値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +入力テーブル: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +クエリ: + +``` sql +SELECT quantile(val) FROM t +``` + +結果: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantiedeterministic {#quantiledeterministic} + +近似値を計算します [分位値](https://en.wikipedia.org/wiki/Quantile) 数値データシーケンス。 + +この関数が適用されます [貯蔵所の見本抽出](https://en.wikipedia.org/wiki/Reservoir_sampling) 8192までの貯蔵所のサイズおよび見本抽出の決定論のアルゴリズムを使って。 結果は決定的です。 正確な分位値を取得するには、以下を使用します [quantileExact](#quantileexact) 機能。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +エイリアス: `medianDeterministic`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**戻り値** + +- 指定したレベルの概算値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +入力テーブル: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +クエリ: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +結果: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantileExact {#quantileexact} + +正確に計算する [分位値](https://en.wikipedia.org/wiki/Quantile) 数値データシーケンス。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` メモリ、どこ `n` 渡された値の数です。 しかし、少数の値の場合、関数は非常に効果的です。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileExact(level)(expr) +``` + +エイリアス: `medianExact`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +**戻り値** + +- 指定されたレベルの分位値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +クエリ: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +結果: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantileExactWeighted {#quantileexactweighted} + +正確に計算する [分位値](https://en.wikipedia.org/wiki/Quantile) 各要素の重みを考慮した数値データシーケンス。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). この関数は、次の代わりに使用できます `quantileExact` そして、重み1を指定します。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +エイリアス: `medianExactWeighted`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**戻り値** + +- 指定されたレベルの分位値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +入力テーブル: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +クエリ: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +結果: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## クオンタイミング {#quantiletiming} + +決定された精度では、 [分位値](https://en.wikipedia.org/wiki/Quantile) 数値データシーケンス。 + +結果は決定的です(クエリ処理の順序に依存しません)。 この機能を最適化と配列における分布のような積載ウェブページではバックエンド対応。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileTiming(level)(expr) +``` + +エイリアス: `medianTiming`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). + +- `expr` — [式](../syntax.md#syntax-expressions) aを返す列の値を超える [フロート\*](../../sql_reference/data_types/float.md)-タイプ番号。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**精度** + +計算は次の場合に正確です: + +- 値の総数は5670を超えません。 +- 値の総数は5670を超えていますが、ページの読み込み時間は1024ms未満です。 + +それ以外の場合、計算の結果は16msの最も近い倍数に丸められます。 + +!!! note "メモ" + ページの読み込み時間の分位数を計算するために、この関数はより効果的で正確です [分位値](#quantile). + +**戻り値** + +- 指定されたレベルの分位値。 + +タイプ: `Float32`. + +!!! note "メモ" + 関数に値が渡されない場合(以下を使用する場合 `quantileTimingIf`), [ナン](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 返されます。 この目的は、これらのケースをゼロになるケースと区別することです。 見る [ORDER BY句](../statements/select.md#select-order-by) ソートに関する注意事項 `NaN` 値。 + +**例えば** + +入力テーブル: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +クエリ: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +結果: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantitimingweighted {#quantiletimingweighted} + +決定された精度では、 [分位値](https://en.wikipedia.org/wiki/Quantile) 各シーケンスメンバの重みに応じた数値データシーケンス。 + +結果は決定的です(クエリ処理の順序に依存しません)。 この機能を最適化と配列における分布のような積載ウェブページではバックエンド対応。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +エイリアス: `medianTimingWeighted`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). + +- `expr` — [式](../syntax.md#syntax-expressions) aを返す列の値を超える [フロート\*](../../sql_reference/data_types/float.md)-タイプ番号。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**精度** + +計算は次の場合に正確です: + +- 値の総数は5670を超えません。 +- 値の総数は5670を超えていますが、ページの読み込み時間は1024ms未満です。 + +それ以外の場合、計算の結果は16msの最も近い倍数に丸められます。 + +!!! note "メモ" + ページの読み込み時間の分位数を計算するために、この関数はより効果的で正確です [分位値](#quantile). + +**戻り値** + +- 指定されたレベルの分位値。 + +タイプ: `Float32`. + +!!! note "メモ" + 関数に値が渡されない場合(以下を使用する場合 `quantileTimingIf`), [ナン](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 返されます。 この目的は、これらのケースをゼロになるケースと区別することです。 見る [ORDER BY句](../statements/select.md#select-order-by) ソートに関する注意事項 `NaN` 値。 + +**例えば** + +入力テーブル: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +クエリ: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +結果: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantiletdigestcomment {#quantiletdigest} + +近似値を計算します [分位値](https://en.wikipedia.org/wiki/Quantile) を使用する数値データシーケンスの [t-ダイジェスト](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) アルゴリズムだ + +最大誤差は1%です。 メモリ消費量は `log(n)`、どこ `n` 値の数です。 結果は、クエリの実行順序によって異なり、非決定的です。 + +機能の性能は性能より低いですの [分位値](#quantile) または [クオンタイミング](#quantiletiming). 状態サイズと精度の比に関しては、この関数はよりもはるかに優れています `quantile`. + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileTDigest(level)(expr) +``` + +エイリアス: `medianTDigest`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +**戻り値** + +- 指定したレベルの概算値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +クエリ: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +結果: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## quantileTDigestWeighted {#quantiletdigestweighted} + +近似値を計算します [分位値](https://en.wikipedia.org/wiki/Quantile) を使用する数値データシーケンスの [t-ダイジェスト](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) アルゴリズムだ この関数は、各シーケンスメンバーの重みを考慮に入れます。 最大誤差は1%です。 メモリ消費量は `log(n)`、どこ `n` 値の数です。 + +機能の性能は性能より低いですの [分位値](#quantile) または [クオンタイミング](#quantiletiming). 状態サイズと精度の比に関しては、この関数はよりもはるかに優れています `quantile`. + +結果は、クエリの実行順序によって異なり、非決定的です。 + +複数を使用する場合 `quantile*` クエリの異なるレベルを持つ関数は、内部状態が結合されていません(つまり、クエリはそれほど効率的ではありません)。 この場合は、 [分位数](#quantiles) 機能。 + +**構文** + +``` sql +quantileTDigest(level)(expr) +``` + +エイリアス: `medianTDigest`. + +**パラメータ** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` の範囲内の値 `[0.01, 0.99]`. デフォルト値:0.5. で `level=0.5` 機能は計算する [中央値](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [データ型](../../sql_reference/data_types/index.md#data_types), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**戻り値** + +- 指定したレベルの概算値。 + +タイプ: + +- [Float64](../../sql_reference/data_types/float.md) 数値データ型の入力。 +- [日付](../../sql_reference/data_types/date.md) 入力値が `Date` タイプ。 +- [DateTime](../../sql_reference/data_types/datetime.md) 入力値が `DateTime` タイプ。 + +**例えば** + +クエリ: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +結果: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**また見なさい** + +- [中央値](#median) +- [分位数](#quantiles) + +## 中央値 {#median} + +その `median*` 関数は、対応する関数のエイリアスです `quantile*` 機能。 数値データサンプルの中央値を計算します。 + +機能: + +- `median` — Alias for [分位値](#quantile). +- `medianDeterministic` — Alias for [quantiedeterministic](#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](#quantileexact). +- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). +- `medianTiming` — Alias for [クオンタイミング](#quantiletiming). +- `medianTimingWeighted` — Alias for [quantitimingweighted](#quantiletimingweighted). +- `medianTDigest` — Alias for [quantiletdigestcomment](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). + +**例えば** + +入力テーブル: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +クエリ: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +結果: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +すべての分位数関数には、対応する分位数関数もあります: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. これらの関数は、あるパスでリストされたレベルのすべての分位数を計算し、結果の値の配列を返します。 + +## varSamp(x) {#varsampx} + +金額を計算します `Σ((x - x̅)^2) / (n - 1)`、どこ `n` サンプルサイズは `x̅`の平均値です `x`. + +これは、渡された値がそのサンプルを形成する場合、確率変数の分散の不偏推定値を表します。 + +を返します `Float64`. とき `n <= 1`、戻り値 `+∞`. + +## varPop(x) {#varpopx} + +金額を計算します `Σ((x - x̅)^2) / n`、どこ `n` サンプルサイズは `x̅`の平均値です `x`. + +つまり、値のセットの分散。 を返します `Float64`. + +## stddevSamp(x) {#stddevsampx} + +結果はの平方根に等しい `varSamp(x)`. + +## stddevPop(x) {#stddevpopx} + +結果はの平方根に等しい `varPop(x)`. + +## topK(N)(x) {#topknx} + +指定された列のほぼ最も頻繁に使用される値の配列を返します。 結果の配列は、値のおおよその頻度の降順でソートされます(値そのものではありません)。 + +実装する [ろ過されたスペース節約](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) からのreduce-and-combineアルゴリズムに基づいてTopKを分析するアルゴリズム [パラレル省スペース](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +この関数は保証された結果を提供しません。 特定の状況では、エラーが発生し、最も頻度の高い値ではない頻繁な値が返されることがあります。 + +私達は使用を推薦します `N < 10` 価値;性能は大きいと減ります `N` 値。 の最大値 `N = 65536`. + +**パラメータ** + +- ‘N’ 返す要素の数です。 + +パラメーターを省略すると、既定値10が使用されます。 + +**引数** + +- ' x ' – The value to calculate frequency. + +**例えば** + +を取る [オンタイム](../../getting_started/example_datasets/ontime.md) データセットを選択し、最も頻繁に発生する三つの値を選択します。 `AirlineID` コラム + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## トップクイット {#topkweighted} + +に似て `topK` しかし、整数型の一つの追加の引数を取ります - `weight`. あらゆる価値は説明されます `weight` 頻度計算のための時間。 + +**構文** + +``` sql +topKWeighted(N)(x, weight) +``` + +**パラメータ** + +- `N` — The number of elements to return. + +**引数** + +- `x` – The value. +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +最大およその重みの合計を持つ値の配列を返します。 + +**例えば** + +クエリ: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +結果: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## covarSamp(x,y) {#covarsampx-y} + +の値を計算します `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +Float64を返します。 とき `n <= 1`, returns +∞. + +## covarPop(x,y) {#covarpopx-y} + +の値を計算します `Σ((x - x̅)(y - y̅)) / n`. + +## corr(x,y) {#corrx-y} + +ピアソン相関係数を計算します: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## categoricalInformationValue {#categoricalinformationvalue} + +の値を計算します `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` 各カテゴリの。 + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +結果は、離散(カテゴリカル)フィーチャがどのようにして `[category1, category2, ...]` の値を予測する学習モデルに貢献する `tag`. + +## simplelearregression {#simplelinearregression} + +単純な(一次元的な)線形回帰を実行します。 + +``` sql +simpleLinearRegression(x, y) +``` + +パラメータ: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +戻り値: + +定数 `(a, b)` 結果の行の `y = a*x + b`. + +**例** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## stochasticLinearRegression {#agg_functions-stochasticlinearregression} + +この関数は、確率的線形回帰を実装します。 それは率、l2正則化係数、ミニバッチサイズを学ぶための注文変数を支え、重量を更新するための少数の方法を有する ([アダム](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (デフォルトで使用), [シンプルSGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [勢い](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [ネステロフ](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### パラメータ {#agg_functions-stochasticlinearregression-parameters} + +が4カスタマイズ可能パラメータ。 それらは関数に順番に渡されますが、すべての四つのデフォルト値を渡す必要はありませんが、良いモデルにはいくつかのパラメータ調整が必要で + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` 勾配降下ステップが実行されるときのステップ長の係数です。 大きすぎる学習率の原因となり無限の量のモデルです。 デフォルトは `0.00001`. +2. `l2 regularization coefficient` これは過食防止に役立つ可能性があります。 デフォルトは `0.1`. +3. `mini-batch size` グラデーションディセントのステップを実行するために、グラデーションを計算して合計する要素の数を設定します。 純粋な確率降下は一つの要素を使用しますが、小さなバッチ(約10の要素)を持つことで勾配ステップがより安定します。 デフォルトは `15`. +4. `method for updating weights`、彼らは: `Adam` (デフォルトでは), `SGD`, `Momentum`, `Nesterov`. `Momentum` と `Nesterov` もう少し計算とメモリが必要ですが、確率勾配法の収束と安定性の点で有用です。 + +### 使い方 {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` モデルのフィッティングと新しいデータの予測です。 モデルを適合させ、後で使用するためにその状態を保存するために、 `-State` 基本的に状態(モデルの重みなど)を保存するcombinator。 +予測するには、関数を使用します [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod) これは、状態を予測する機能と同様に引数として取ります。 + + + +**1.** 継手 + +このようなクエリを使用できます。 + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +ここでは、データを挿入する必要もあります `train_data` テーブル。 パラメータの数は固定されていません。 `linearRegressionState`. 彼らはすべての必要数値です。 +ターゲット値(予測することを学びたい)を持つ列が最初の引数として挿入されることに注意してください。 + +**2.** 予測 + +状態をテーブルに保存した後、予測に複数回使用したり、他の状態とマージして新しいモデルを作成したりすることもできます。 + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +クエリは予測値の列を返します。 その最初の引数に注意してください `evalMLMethod` は `AggregateFunctionState` オブジェクト、次はフィーチャの列です。 + +`test_data` のようなテーブルです `train_data` が含まれないことがあります。 + +### 備考 {#agg_functions-stochasticlinearregression-notes} + +1. 統合モデルにはユーザーの作成などのクエリ: + `sql SELECT state1 + state2 FROM your_models` + どこに `your_models` テーブルの両方のモデルです。 このクエリはnewを返します `AggregateFunctionState` オブジェクト。 + +2. ユーザーのフェッチのウエイトを作成したモデルとして独自の目的で保存しないモデルについていない場合 `-State` combinatorが使用されます。 + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + そのようなクエリはモデルに適合し、その重みを返します-最初はモデルのパラメータに対応する重みです。 したがって、上記の例では、クエリは3つの値を持つ列を返します。 + +**また見なさい** + +- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) +- [線形およびロジスティック回帰の違い](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +この関数は、確率論的ロジスティック回帰を実装します。 これは、バイナリ分類問題に使用することができ、stochasticlinearregressionと同じカスタムパラメータをサポートし、同じ方法で動作します。 + +### パラメータ {#agg_functions-stochasticlogisticregression-parameters} + +パラメーターは、stochasticlinearregressionとまったく同じです: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +詳細については、 [パラメータ](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. 継手 + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. 予測 + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**また見なさい** + +- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) +- [線形およびロジスティック回帰の違い。](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## groupbitmapandgenericname {#groupbitmapand} + +ビットマップ列のandを計算し、型uint64のカーディナリティを返します。 [ビットマップ](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` タイプ。 + +**戻り値** + +の値 `UInt64` タイプ。 + +**例えば** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## groupBitmapOr {#groupbitmapor} + +ビットマップ列のorを計算し、型uint64のカーディナリティを返します。 [ビットマップ](../../sql_reference/functions/bitmap_functions.md). これは `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` タイプ。 + +**戻り値** + +の値 `UInt64` タイプ。 + +**例えば** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## groupBitmapXor {#groupbitmapxor} + +ビットマップ列のxorを計算し、uint64型のカーディナリティを返します。 [ビットマップ](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**パラメータ** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` タイプ。 + +**戻り値** + +の値 `UInt64` タイプ。 + +**例えば** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/ja/sql_reference/data_types/aggregatefunction.md b/docs/ja/sql_reference/data_types/aggregatefunction.md deleted file mode 120000 index 0b65b7173d6..00000000000 --- a/docs/ja/sql_reference/data_types/aggregatefunction.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/aggregatefunction.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/aggregatefunction.md b/docs/ja/sql_reference/data_types/aggregatefunction.md new file mode 100644 index 00000000000..8436f02198f --- /dev/null +++ b/docs/ja/sql_reference/data_types/aggregatefunction.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: AggregateFunction(name,types_of_arguments)...) +--- + +# AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} + +Aggregate functions can have an implementation-defined intermediate state that can be serialized to an AggregateFunction(…) data type and stored in a table, usually, by means of [マテリアライズドビュー](../../sql_reference/statements/select.md#create-view). 集計関数の状態を生成する一般的な方法は、集計関数を呼び出すことです `-State` 接尾辞。 将来の集約の最終結果を得るには、同じ集計関数を使用する必要があります `-Merge`接尾辞。 + +`AggregateFunction` — parametric data type. + +**パラメータ** + +- 集計関数の名前。 + + If the function is parametric, specify its parameters too. + +- 集計関数の引数の型です。 + +**例えば** + +``` sql +CREATE TABLE t +( + column1 AggregateFunction(uniq, UInt64), + column2 AggregateFunction(anyIf, String, UInt8), + column3 AggregateFunction(quantiles(0.5, 0.9), UInt64) +) ENGINE = ... +``` + +[uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq),アーニフ ([任意の](../../sql_reference/aggregate_functions/reference.md#agg_function-any)+[もし](../../sql_reference/aggregate_functions/combinators.md#agg-functions-combinator-if))と [分位数](../../sql_reference/aggregate_functions/reference.md) ClickHouseでサポートされている集計関数です。 + +## 使い方 {#usage} + +### データ挿入 {#data-insertion} + +データを挿入するには `INSERT SELECT` 総計を使って `-State`-機能。 + +**関数の例** + +``` sql +uniqState(UserID) +quantilesState(0.5, 0.9)(SendTiming) +``` + +対応する機能とは対照的に `uniq` と `quantiles`, `-State`-関数は、最終的な値の代わりに状態を返します。 言い換えれば、それらは次の値を返します `AggregateFunction` タイプ。 + +の結果で `SELECT` クエリ、値の `AggregateFunction` タイプは、すべてのClickHouse出力形式に対して実装固有のバイナリ表現を持ちます。 たとえば、データをダンプする場合, `TabSeparated` フォーマット `SELECT` このダンプは、次のようにロードされます `INSERT` クエリ。 + +### データ選択 {#data-selection} + +データを選択するとき `AggregatingMergeTree` テーブル、使用 `GROUP BY` 句とデータを挿入するときと同じ集約関数が、 `-Merge`接尾辞。 + +以下の集計関数 `-Merge` suffixは、状態のセットを取得し、それらを結合し、完全なデータ集約の結果を返します。 + +たとえば、次の二つのクエリは同じ結果を返します: + +``` sql +SELECT uniq(UserID) FROM table + +SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP BY RegionID) +``` + +## 使用例 {#usage-example} + +見る [ツつィツ姪"ツつ"ツ債ツづュツつケ](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) エンジンの説明。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/ja/sql_reference/data_types/array.md b/docs/ja/sql_reference/data_types/array.md deleted file mode 120000 index c004198c797..00000000000 --- a/docs/ja/sql_reference/data_types/array.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/array.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/array.md b/docs/ja/sql_reference/data_types/array.md new file mode 100644 index 00000000000..c9bd9910bff --- /dev/null +++ b/docs/ja/sql_reference/data_types/array.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "\u914D\u5217(T)" +--- + +# 配列(t) {#data-type-array} + +の配列 `T`-タイプのアイテム。 `T` 配列を含む任意のデータ型を指定できます。 + +## 配列の作成 {#creating-an-array} + +関数を使用して配列を作成できます: + +``` sql +array(T) +``` + +角括弧を使用することもできます。 + +``` sql +[] +``` + +配列の作成例: + +``` sql +SELECT array(1, 2) AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName(array(1, 2))─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴─────────────────────────┘ +``` + +``` sql +SELECT [1, 2] AS x, toTypeName(x) +``` + +``` text +┌─x─────┬─toTypeName([1, 2])─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴────────────────────┘ +``` + +## データ型の操作 {#working-with-data-types} + +その場で配列を作成するとき、clickhouseは自動的にすべてのリストされた引数を格納できる最も狭いデータ型として引数の型を定義します。 あれば [Nullable](nullable.md#data_type-nullable) またはリテラル [NULL](../../sql_reference/syntax.md#null-literal) 値は、配列要素の型も次のようになります [Nullable](nullable.md). + +ClickHouseでデータ型を特定できなかった場合は、例外が生成されます。 たとえば、文字列と数値を同時に持つ配列を作成しようとすると、これが発生します (`SELECT array(1, 'a')`). + +自動データ型検出の例: + +``` sql +SELECT array(1, 2, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ +│ [1,2,NULL] │ Array(Nullable(UInt8)) │ +└────────────┴───────────────────────────────┘ +``` + +互換性のないデータ型の配列を作成しようとすると、clickhouseは例外をスローします: + +``` sql +SELECT array(1, 'a') +``` + +``` text +Received exception from server (version 1.1.54388): +Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/array/) diff --git a/docs/ja/sql_reference/data_types/boolean.md b/docs/ja/sql_reference/data_types/boolean.md deleted file mode 120000 index 5aeca990276..00000000000 --- a/docs/ja/sql_reference/data_types/boolean.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/boolean.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/boolean.md b/docs/ja/sql_reference/data_types/boolean.md new file mode 100644 index 00000000000..60e10ece0b2 --- /dev/null +++ b/docs/ja/sql_reference/data_types/boolean.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u30D6\u30FC\u30EB\u5024" +--- + +# ブール値 {#boolean-values} + +ブール値には別の型はありません。 uint8型を使用し、値0または1に制限します。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/boolean/) diff --git a/docs/ja/sql_reference/data_types/date.md b/docs/ja/sql_reference/data_types/date.md deleted file mode 120000 index f683731e764..00000000000 --- a/docs/ja/sql_reference/data_types/date.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/date.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/date.md b/docs/ja/sql_reference/data_types/date.md new file mode 100644 index 00000000000..701b15f8fca --- /dev/null +++ b/docs/ja/sql_reference/data_types/date.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u65E5\u4ED8" +--- + +# 日付 {#date} + +デートだ 1970-01-01(符号なし)からの日数として二バイトで格納されます。 unixエポックの開始直後から、コンパイル段階で定数によって定義された上限しきい値に値を格納できます(現在、これは2106年までですが、完全にサポート +最小値は0000-00-00として出力されます。 + +日付の値は、タイムゾーンなしで格納されます。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/date/) diff --git a/docs/ja/sql_reference/data_types/datetime.md b/docs/ja/sql_reference/data_types/datetime.md deleted file mode 120000 index d84c5b09089..00000000000 --- a/docs/ja/sql_reference/data_types/datetime.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/datetime.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/datetime.md b/docs/ja/sql_reference/data_types/datetime.md new file mode 100644 index 00000000000..5cb696f0564 --- /dev/null +++ b/docs/ja/sql_reference/data_types/datetime.md @@ -0,0 +1,129 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: DateTime +--- + +# Datetime {#data_type-datetime} + +カレンダーの日付と時刻として表現することができ、時間内にインスタントを格納することができます。 + +構文: + +``` sql +DateTime([timezone]) +``` + +サポートされる値の範囲: \[1970-01-01 00:00:00, 2105-12-31 23:59:59\]. + +解像度:1秒。 + +## 使用上の注意 {#usage-remarks} + +のとして保存すると、 [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) タイムゾーンまたは夏時間に関係なく。 さらに、 `DateTime` タイプは、列全体で同じタイムゾーンを格納することができます。 `DateTime` 型の値は、テキスト形式で表示され、文字列として指定された値がどのように解析されますか (‘2020-01-01 05:00:01’). タイムゾーンはテーブルの行(またはresultset)に格納されず、列のメタデータに格納されます。 +リストの対応時間帯の [IANA時間帯のデータベース](https://www.iana.org/time-zones). +その `tzdata` パッケージ、含む [IANA時間帯のデータベース](https://www.iana.org/time-zones)、システムに取付けられているべきです。 を使用 `timedatectl list-timezones` ローカルシステ + +タイムゾーンを明示的に設定することができます `DateTime`-テーブルを作成するときに列を入力します。 タイムゾーンが設定されていない場合、ClickHouseはタイムゾーンの値を使用します。 [タイムゾーン](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) クリックハウスサーバーの起動時にサーバー設定またはオペレーティングシステム設定のパラメータ。 + +その [クリックハウス-顧客](../../interfaces/cli.md) データ型の初期化時にタイムゾーンが明示的に設定されていない場合は、既定でサーバーのタイムゾーンを適用します。 クライアントのタイムゾーンを使用するには `clickhouse-client` と `--use_client_time_zone` パラメータ。 + +ClickHouse出力値で `YYYY-MM-DD hh:mm:ss` デフォルトではテキスト形式。 出力を変更するには、次のようにします [formatDateTime](../../sql_reference/functions/date_time_functions.md#formatdatetime) 機能。 + +ClickHouseにデータを挿入するときは、日付と時刻の文字列の異なる形式を使用することができます。 [date\_time\_input\_format](../../operations/settings/settings.md#settings-date_time_input_format) 設定。 + +## 例 {#examples} + +**1.** テーブルを作成する `DateTime`-列を入力してデータを挿入する: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime('Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog; +``` + +``` sql +INSERT INTO dt Values (1546300800, 1), ('2019-01-01 00:00:00', 2); +``` + +``` sql +SELECT * FROM dt; +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +- Datetimeを整数として挿入すると、Unixタイムスタンプ(UTC)として扱われます。 `1546300800` を表します `'2019-01-01 00:00:00'` UTC。 しかし、として `timestamp` 列は `Europe/Moscow` (UTC+3)タイムゾーン指定、文字列として出力すると、値は次のように表示されます `'2019-01-01 03:00:00'` +- 文字列値をdatetimeとして挿入すると、列timezoneにあるものとして扱われます。 `'2019-01-01 00:00:00'` するものとして扱われる `Europe/Moscow` タイムゾーンとして保存 `1546290000`. + +**2.** フィルタリング `DateTime` 値 + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Europe/Moscow') +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00 │ 2 │ +└─────────────────────┴──────────┘ +``` + +`DateTime` 列の値は、文字列値を使用してフィルター処理できます。 `WHERE` 述語。 それはに変換されます `DateTime` 自動的に: + +``` sql +SELECT * FROM dt WHERE timestamp = '2019-01-01 00:00:00' +``` + +``` text +┌───────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00 │ 1 │ +└─────────────────────┴──────────┘ +``` + +**3.** Aのタイムゾーンを取得する `DateTime`-タイプ列: + +``` sql +SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────column─┬─x─────────────────────────┐ +│ 2019-10-16 04:12:04 │ DateTime('Europe/Moscow') │ +└─────────────────────┴───────────────────────────┘ +``` + +**4.** タイムゾーン変換 + +``` sql +SELECT +toDateTime(timestamp, 'Europe/London') as lon_time, +toDateTime(timestamp, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────lon_time──┬────────────mos_time─┐ +│ 2019-01-01 00:00:00 │ 2019-01-01 03:00:00 │ +│ 2018-12-31 21:00:00 │ 2019-01-01 00:00:00 │ +└─────────────────────┴─────────────────────┘ +``` + +## また見なさい {#see-also} + +- [型変換機能](../../sql_reference/functions/type_conversion_functions.md) +- [日付と時刻を操作するための関数](../../sql_reference/functions/date_time_functions.md) +- [配列を操作するための関数](../../sql_reference/functions/array_functions.md) +- [その `date_time_input_format` 設定](../../operations/settings/settings.md#settings-date_time_input_format) +- [その `timezone` サーバ設定パラメータ](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [日付と時刻を操作する演算子](../../sql_reference/operators.md#operators-datetime) +- [その `Date` データ型](date.md) + +[元の記事](https://clickhouse.tech/docs/en/data_types/datetime/) diff --git a/docs/ja/sql_reference/data_types/datetime64.md b/docs/ja/sql_reference/data_types/datetime64.md deleted file mode 120000 index 68c4e482323..00000000000 --- a/docs/ja/sql_reference/data_types/datetime64.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/datetime64.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/datetime64.md b/docs/ja/sql_reference/data_types/datetime64.md new file mode 100644 index 00000000000..d189ecf2b3b --- /dev/null +++ b/docs/ja/sql_reference/data_types/datetime64.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: DateTime64 +--- + +# Datetime64 {#data_type-datetime64} + +定義されたサブ秒の精度で、カレンダーの日付と時刻として表現することができる時刻にインスタントを格納することができます + +目盛りサイズ(精度):10-精密 秒 + +構文: + +``` sql +DateTime64(precision, [timezone]) +``` + +内部的には、データを次の数として格納します ‘ticks’ エポックスタート(1970-01-01 00:00:00UTC)はInt64です。 目盛りの分解能は、precisionパラメーターによって決まります。 さらに、 `DateTime64` タイプは、列全体で同じタイムゾーンを格納することができます。 `DateTime64` 型の値は、テキスト形式で表示され、文字列として指定された値がどのように解析されますか (‘2020-01-01 05:00:01.000’). タイムゾーンはテーブルの行(またはresultset)に格納されず、列のメタデータに格納されます。 詳細はこちら [DateTime](datetime.md). + +## 例 {#examples} + +**1.** テーブルの作成 `DateTime64`-列を入力してデータを挿入する: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- Datetimeを整数として挿入すると、適切にスケーリングされたUnixタイムスタンプ(UTC)として扱われます。 `1546300800000` (精度3)を表します `'2019-01-01 00:00:00'` UTC。 しかし、として `timestamp` 列は `Europe/Moscow` (UTC+3)タイムゾーン指定、文字列として出力すると、値は次のように表示されます `'2019-01-01 03:00:00'` +- 文字列値をdatetimeとして挿入すると、列timezoneにあるものとして扱われます。 `'2019-01-01 00:00:00'` れる。 `Europe/Moscow` タイムゾーンとして保存 `1546290000000`. + +**2.** フィルタリング `DateTime64` 値 + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +とは異なり `DateTime`, `DateTime64` 値は変換されません `String` 自動的に + +**3.** Aのタイムゾーンを取得する `DateTime64`-タイプ値: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** タイムゾーン変換 + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## また見なさい {#see-also} + +- [タイプ変換関数](../../sql_reference/functions/type_conversion_functions.md) +- [日付と時刻を操作するための関数](../../sql_reference/functions/date_time_functions.md) +- [配列を操作するための関数](../../sql_reference/functions/array_functions.md) +- [その `date_time_input_format` 設定](../../operations/settings/settings.md#settings-date_time_input_format) +- [その `timezone` サーバ設定パラメータ](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [日付と時刻を操作する演算子](../../sql_reference/operators.md#operators-datetime) +- [`Date` データ型](date.md) +- [`DateTime` データ型](datetime.md) diff --git a/docs/ja/sql_reference/data_types/decimal.md b/docs/ja/sql_reference/data_types/decimal.md deleted file mode 120000 index f1616db007b..00000000000 --- a/docs/ja/sql_reference/data_types/decimal.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/decimal.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/decimal.md b/docs/ja/sql_reference/data_types/decimal.md new file mode 100644 index 00000000000..32bd83fbd85 --- /dev/null +++ b/docs/ja/sql_reference/data_types/decimal.md @@ -0,0 +1,109 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u5C0F\u6570" +--- + +# 小数点(p,s)、小数点32(s)、小数点64(s)、小数点128(s) {#decimalp-s-decimal32s-decimal64s-decimal128s} + +加算、減算、および乗算の演算時に精度を維持する符号付き固定小数点数。 除算の場合、最下位の数字は破棄されます(丸められていません)。 + +## パラメータ {#parameters} + +- P-精度。 有効な範囲:\[1:38\]。 小数点以下の桁数(分数を含む)を指定します。 +- S-スケール。 有効な範囲:\[0:P\]。 小数部の桁数を指定します。 + +Pパラメータ値に応じてDecimal(P,S)は以下のシノニムです: +-Pから\[1:9\]-用Decimal32(S) +-Pから\[10:18\]-用Decimal64(S) +-Pから\[19:38\]-用Decimal128(S) + +## 小数値の範囲 {#decimal-value-ranges} + +- デシマル32(s) - ( -1 \* 10^(9 - s)、1\*10^(9-s) ) +- Decimal64(S) - ( -1 \* 10^(18 - S)、1\*10^(18-S) ) +- Decimal128(S) - ( -1 \* 10^(38 - S)、1\*10^(38-S) ) + +たとえば、decimal32(4)には、-99999.9999から99999.9999までの0.0001ステップの数値を含めることができます。 + +## 内部表現 {#internal-representation} + +社内データとして表される通常の署名の整数をそれぞれのビット幅になります。 メモリに格納できる実際の値の範囲は、上記で指定した値より少し大きくなり、文字列からの変換でのみチェックされます。 + +現代のcpuはネイティブに128ビットの整数をサポートしていないため、decimal128の演算はエミュレートされます。 このため、decimal128はdecimal32/decimal64よりも大幅に遅く動作します。 + +## 操作と結果の種類 {#operations-and-result-type} + +Decimalのバイナリ演算では、結果の型が広くなります(引数の順序は任意です)。 + +- Decimal64(S1) Decimal32(S2)-\>Decimal64(S) +- デシマル128(s1) Decimal32(S2)-\>Decimal128S) +- デシマル128(s1) Decimal64(S2)-\>Decimal128(S) + +スケールのルール: + +- 加算、減算:S=最大(S1、S2)。 +- multuply:S=S1+S2. +- 分割:S=S1。 + +Decimalと整数の間の同様の演算の場合、結果は引数と同じサイズのDecimalになります。 + +DecimalとFloat32/Float64の間の演算は定義されていません。 それらが必要な場合は、toDecimal32、toDecimal64、toDecimal128またはtoFloat32、tofat64組み込み関数を使用して明示的に引数をキャストできます。 結果は精度を失い、型変換は計算コストのかかる演算であることに注意してください。 + +Decimalの一部の関数は、Float64として結果を返します(たとえば、varまたはstddev)。 これは、Float64とDecimal入力の間で同じ値を持つ異なる結果につながる可能性があります。 + +## オーバーフロ {#overflow-checks} + +中計算は小数,整数であふれかが起こる。 小数部の余分な桁は破棄されます(丸められていません)。 整数部分の数字が多すぎると、例外が発生します。 + +``` sql +SELECT toDecimal32(2, 4) AS x, x / 3 +``` + +``` text +┌──────x─┬─divide(toDecimal32(2, 4), 3)─┐ +│ 2.0000 │ 0.6666 │ +└────────┴──────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, x * x +``` + +``` text +DB::Exception: Scale is out of bounds. +``` + +``` sql +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +DB::Exception: Decimal math overflow. +``` + +オーバーフローチェックが業務に減速した。 オーバーフローが不可能であることがわかっている場合は、 `decimal_check_overflow` 設定。 時チェックを無効とオーバーフローが起こり、結果は正しくあり: + +``` sql +SET decimal_check_overflow = 0; +SELECT toDecimal32(4.2, 8) AS x, 6 * x +``` + +``` text +┌──────────x─┬─multiply(6, toDecimal32(4.2, 8))─┐ +│ 4.20000000 │ -17.74967296 │ +└────────────┴──────────────────────────────────┘ +``` + +オーバーフローチェックは算術演算だけでなく、値の比較にも発生します: + +``` sql +SELECT toDecimal32(1, 8) < 100 +``` + +``` text +DB::Exception: Can't compare. +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/decimal/) diff --git a/docs/ja/sql_reference/data_types/domains/index.md b/docs/ja/sql_reference/data_types/domains/index.md deleted file mode 120000 index 144d08678a3..00000000000 --- a/docs/ja/sql_reference/data_types/domains/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/domains/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/domains/index.md b/docs/ja/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..3a743f84290 --- /dev/null +++ b/docs/ja/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Domains +toc_priority: 56 +--- + + diff --git a/docs/ja/sql_reference/data_types/domains/ipv4.md b/docs/ja/sql_reference/data_types/domains/ipv4.md deleted file mode 120000 index 6cd9b70cb90..00000000000 --- a/docs/ja/sql_reference/data_types/domains/ipv4.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/domains/ipv4.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/domains/ipv4.md b/docs/ja/sql_reference/data_types/domains/ipv4.md new file mode 100644 index 00000000000..983ca07d184 --- /dev/null +++ b/docs/ja/sql_reference/data_types/domains/ipv4.md @@ -0,0 +1,84 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: IPv4 +--- + +## IPv4 {#ipv4} + +`IPv4` ドメインは以下に基づきます `UInt32` 入力し、IPv4値を格納するための型指定された置換として機能します。 それは点検で人間に適する入出力形式およびコラムのタイプ情報を密集した貯蔵に与える。 + +### 基本的な使用法 {#basic-usage} + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` text +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv4 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +または、ipv4ドメインをキーとして使用できます: + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; +``` + +`IPv4` ドメインはIPv4文字列としてカスタム入力形式をサポート: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); + +SELECT * FROM hits; +``` + +``` text +┌─url────────────────────────────────┬───────────from─┐ +│ https://clickhouse.tech/docs/en/ │ 116.106.34.242 │ +│ https://wikipedia.org │ 116.253.40.133 │ +│ https://clickhouse.tech │ 183.247.232.58 │ +└────────────────────────────────────┴────────────────┘ +``` + +値が格納されコンパクトにバイナリ形式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(from)─┬─hex(from)─┐ +│ IPv4 │ B7F7E83A │ +└──────────────────┴───────────┘ +``` + +ドメイン値は、以下の型以外の型に暗黙的に変換できません `UInt32`. +変換したい場合 `IPv4` 値を文字列に変換するには、それを明示的に行う必要があります `IPv4NumToString()` 機能: + +``` sql +SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; +``` + + ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ + │ String │ 183.247.232.58 │ + └───────────────────────────────────┴────────────────┘ + +または `UInt32` 値: + +``` sql +SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ +│ UInt32 │ 3086477370 │ +└──────────────────────────────────┴────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/domains/ipv4) diff --git a/docs/ja/sql_reference/data_types/domains/ipv6.md b/docs/ja/sql_reference/data_types/domains/ipv6.md deleted file mode 120000 index 5d853ae2a59..00000000000 --- a/docs/ja/sql_reference/data_types/domains/ipv6.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/domains/ipv6.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/domains/ipv6.md b/docs/ja/sql_reference/data_types/domains/ipv6.md new file mode 100644 index 00000000000..268bde0ff06 --- /dev/null +++ b/docs/ja/sql_reference/data_types/domains/ipv6.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: IPv6 +--- + +## IPv6 {#ipv6} + +`IPv6` ドメインは以下に基づきます `FixedString(16)` タイプと入力の代替品の保管IPv6数値です。 それは点検で人間に適する入出力形式およびコラムのタイプ情報を密集した貯蔵に与える。 + +### 基本的な使用法 {#basic-usage} + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` text +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv6 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +またはあなたが使用 `IPv6` 鍵としてのドメイン: + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; +``` + +`IPv6` ドメイン対応のカスタム入力としてIPv6-文字列: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); + +SELECT * FROM hits; +``` + +``` text +┌─url────────────────────────────────┬─from──────────────────────────┐ +│ https://clickhouse.tech │ 2001:44c8:129:2632:33:0:252:2 │ +│ https://clickhouse.tech/docs/en/ │ 2a02:e980:1e::1 │ +│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ +└────────────────────────────────────┴───────────────────────────────┘ +``` + +値が格納されコンパクトにバイナリ形式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(from)─┬─hex(from)────────────────────────┐ +│ IPv6 │ 200144C8012926320033000002520002 │ +└──────────────────┴──────────────────────────────────┘ +``` + +ドメイン値は、以下の型以外の型に暗黙的に変換できません `FixedString(16)`. +変換したい場合 `IPv6` 値を文字列に変換するには、それを明示的に行う必要があります `IPv6NumToString()` 機能: + +``` sql +SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ +│ String │ 2001:44c8:129:2632:33:0:252:2 │ +└───────────────────────────────────┴───────────────────────────────┘ +``` + +または `FixedString(16)` 値: + +``` sql +SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; +``` + +``` text +┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ +│ FixedString(16) │ ��� │ +└───────────────────────────────────────────┴─────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/domains/ipv6) diff --git a/docs/ja/sql_reference/data_types/domains/overview.md b/docs/ja/sql_reference/data_types/domains/overview.md deleted file mode 120000 index e297a304c7f..00000000000 --- a/docs/ja/sql_reference/data_types/domains/overview.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/domains/overview.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/domains/overview.md b/docs/ja/sql_reference/data_types/domains/overview.md new file mode 100644 index 00000000000..24a14055491 --- /dev/null +++ b/docs/ja/sql_reference/data_types/domains/overview.md @@ -0,0 +1,32 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u6982\u8981" +--- + +# 藩 {#domains} + +ドメインは、既存の基本タイプの上にいくつかの追加機能を追加する特別な目的のタイプですが、基になるデータタイプのオンワイヤとオンディス 現時点では、clickhouseはユーザー定義ドメインをサポートしていません。 + +ドメインは、対応する基本タイプを使用できる任意の場所で使用できます。: + +- ドメイン型の列を作成する +- ドメイン列からの値の読み取り/書き込み +- 基本型をインデックスとして使用できる場合は、インデックスとして使用します +- ドメイン列の値を持つ関数の呼び出し + +### ドメインの追加機能 {#extra-features-of-domains} + +- 明示的な列タイプ名in `SHOW CREATE TABLE` または `DESCRIBE TABLE` +- 人間に優しいフォーマットからの入力 `INSERT INTO domain_table(domain_column) VALUES(...)` +- 人間に優しいフォーマットへの出力 `SELECT domain_column FROM domain_table` +- 人間に優しい形式で外部ソースからデータをロードする: `INSERT INTO domain_table FORMAT CSV ...` + +### 制限 {#limitations} + +- ベース型のインデックス列をドメイン型に変換できません `ALTER TABLE`. +- できない暗黙的に変換し文字列値がコマンドライン値を挿入する際、データから別のテーブルや列. +- ドメインは、格納された値に制約を追加しません。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/domains/overview) diff --git a/docs/ja/sql_reference/data_types/enum.md b/docs/ja/sql_reference/data_types/enum.md deleted file mode 120000 index 31941c2bf9b..00000000000 --- a/docs/ja/sql_reference/data_types/enum.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/enum.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/enum.md b/docs/ja/sql_reference/data_types/enum.md new file mode 100644 index 00000000000..45705f0b608 --- /dev/null +++ b/docs/ja/sql_reference/data_types/enum.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u5217\u6319\u578B" +--- + +# 列挙型 {#enum} + +名前付きの値で構成される列挙型。 + +名前の値として宣言された `'string' = integer` ペア。 ClickHouseは数字のみを格納しますが、名前による値の操作をサポートします。 + +ClickHouse支援: + +- 8ビット `Enum`. それはで列挙可能な256までの値を含むことができます `[-128, 127]` 範囲。 +- 16ビット `Enum`. それはで列挙可能な65536まで値を含むことができます `[-32768, 32767]` 範囲。 + +ClickHouseは自動的に次のタイプを選択します `Enum` データが挿入されるとき。 また、 `Enum8` または `Enum16` ストレージのサイズを確認するタイプ。 + +## 使用例 {#usage-examples} + +ここでは、 `Enum8('hello' = 1, 'world' = 2)` タイプ列: + +``` sql +CREATE TABLE t_enum +( + x Enum('hello' = 1, 'world' = 2) +) +ENGINE = TinyLog +``` + +列 `x` 型定義にリストされている値のみを格納できます: `'hello'` または `'world'`. 他の値を保存しようとすると、ClickHouseは例外を発生させます。 このため8ビットサイズ `Enum` 自動的に選択されます。 + +``` sql +INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') +``` + +``` text +Ok. +``` + +``` sql +INSERT INTO t_enum values('a') +``` + +``` text +Exception on client: +Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) +``` + +テーブルからデータをクエリすると、clickhouseから文字列の値が出力されます `Enum`. + +``` sql +SELECT * FROM t_enum +``` + +``` text +┌─x─────┐ +│ hello │ +│ world │ +│ hello │ +└───────┘ +``` + +行に相当する数値を表示する必要がある場合は、次の行をキャストする必要があります `Enum` 整数型への値。 + +``` sql +SELECT CAST(x, 'Int8') FROM t_enum +``` + +``` text +┌─CAST(x, 'Int8')─┐ +│ 1 │ +│ 2 │ +│ 1 │ +└─────────────────┘ +``` + +クエリで列挙値を作成するには、次のものも使用する必要があります `CAST`. + +``` sql +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) +``` + +``` text +┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└─────────────────────────────────────────────────────┘ +``` + +## 一般的なルールと使用法 {#general-rules-and-usage} + +各値には、範囲内の数値が割り当てられます `-128 ... 127` のために `Enum8` または範囲で `-32768 ... 32767` のために `Enum16`. すべての文字列と数字は異なる必要があります。 空の文字列が許可されます。 この型が(テーブル定義で)指定されている場合、数値は任意の順序で指定できます。 しかし、順序は重要ではありません。 + +文字列と数値のどちらも `Enum` できる。 [NULL](../../sql_reference/syntax.md). + +アン `Enum` に含まれることができる [Nullable](nullable.md) タイプ。 そのため、クエリを使用してテーブルを作成する場合 + +``` sql +CREATE TABLE t_enum_nullable +( + x Nullable( Enum8('hello' = 1, 'world' = 2) ) +) +ENGINE = TinyLog +``` + +それだけでなく、 `'hello'` と `'world'`、しかし `NULL` 同様に、。 + +``` sql +INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) +``` + +ラムでは、 `Enum` 列は次のように保存されます `Int8` または `Int16` 対応する数値の。 + +テキストフォームで読み取る場合、clickhouseは値を文字列として解析し、一連のenum値から対応する文字列を検索します。 見つからない場合は、例外がスローされます。 テキスト形式で読み込むと、文字列が読み込まれ、対応する数値が検索されます。 見つからない場合は、例外がスローされます。 +テキスト形式で書くときは、値を対応する文字列として書き込みます。 列データにガベージ(有効なセットに含まれていない数値)が含まれている場合は、例外がスローされます。 バイナリ形式で読み書きするときは、int8とint16のデータ型と同じように動作します。 +暗黙的なデフォルト値は、数値が最も小さい値です。 + +の間 `ORDER BY`, `GROUP BY`, `IN`, `DISTINCT` そして、Enumは対応する数字と同じように動作します。 たとえば、ORDER BYは数値的に並べ替えます。 等価演算子と比較演算子は、基になる数値と同じようにEnumでも機能します。 + +Enum値を数値と比較することはできません。 列挙型は、定数文字列と比較できます。 比較された文字列が列挙型の有効な値でない場合は、例外がスローされます。 IN演算子は、左側の列挙型と右側の文字列のセットでサポートされています。 文字列は、対応する列挙型の値です。 + +Most numeric and string operations are not defined for Enum values, e.g. adding a number to an Enum or concatenating a string to an Enum. +しかし、列挙型は自然を持っています `toString` 文字列値を返す関数。 + +また、enumの値は、以下を使用して数値型に変換できます。 `toT` ここで、Tは数値型です。 Tが列挙型の基になる数値型に対応する場合、この変換はゼロコストになります。 +値のセットのみが変更された場合、列挙型は、alterを使用してコストをかけずに変更できます。 alterを使用して列挙型のメンバーを追加および削除することができます(削除された値がテーブルで一度も使用されていない場合にのみ、削除は安全で セーフガードとして、以前に定義されたenumメンバの数値を変更すると例外がスローされます。 + +ALTERを使用すると、Int8をInt16に変更するのと同じように、Enum8をEnum16に変更することも、その逆も可能です。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/enum/) diff --git a/docs/ja/sql_reference/data_types/fixedstring.md b/docs/ja/sql_reference/data_types/fixedstring.md deleted file mode 120000 index 1df50262eb7..00000000000 --- a/docs/ja/sql_reference/data_types/fixedstring.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/fixedstring.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/fixedstring.md b/docs/ja/sql_reference/data_types/fixedstring.md new file mode 100644 index 00000000000..ae9a23581da --- /dev/null +++ b/docs/ja/sql_reference/data_types/fixedstring.md @@ -0,0 +1,63 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: FixedString(N) +--- + +# Fixedstring {#fixedstring} + +の固定長文字列 `N` バイト(文字もコードポイントもない)。 + +の列を宣言するには `FixedString` タイプ、次の構文を使用します: + +``` sql + FixedString(N) +``` + +どこに `N` 自然数です。 + +その `FixedString` 型は、データの長さが正確である場合に効率的です `N` バイト。 他のすべてのケースでは、効率を低下させる可能性があります。 + +効率的に格納できる値の例 `FixedString`-型指定された列: + +- IPアドレスのバイナリ表現 (`FixedString(16)` IPv6用)。 +- Language codes (ru\_RU, en\_US … ). +- Currency codes (USD, RUB … ). +- ハッシュのバイナリ表現 (`FixedString(16)` MD5の場合, `FixedString(32)` 用SHA256)。 + +UUID値を格納するには、以下を使用します [UUID](uuid.md) データ型。 + +データを挿入するときは、clickhouse: + +- 文字列が含まれていない場合、nullバイトの文字列を補完します `N` バイト。 +- スロー `Too large value for FixedString(N)` る場合の例外の文字列です。 `N` バイトまでとなります。 + +データを選択するとき、clickhouseは文字列の末尾にあるnullバイトを削除しません。 を使用する場合 `WHERE` この節では、nullバイトを手動で追加して、 `FixedString` 値。 次の例では、次の例を使用する方法を示します `WHERE` との節 `FixedString`. + +のは、単一の次の表を考えてみましょう `FixedString(2)` 列: + +``` text +┌─name──┐ +│ b │ +└───────┘ +``` + +クエリ `SELECT * FROM FixedStringTable WHERE a = 'b'` 結果としてデータを返さない。 このフィルターパターンはnullバイトまでとなります。 + +``` sql +SELECT * FROM FixedStringTable +WHERE a = 'b\0' +``` + +``` text +┌─a─┐ +│ b │ +└───┘ +``` + +この動作は、mysqlとは異なります `CHAR` タイプ(文字列は空白で埋められ、空白は出力用に削除されます)。 + +の長さに注意してください。 `FixedString(N)` 値が一定になります。 その [長さ](../../sql_reference/functions/array_functions.md#array_functions-length) 関数の戻り値 `N` 場合においても `FixedString(N)` 値はnullバイトでのみ入力されるが、 [空](../../sql_reference/functions/string_functions.md#empty) 関数の戻り値 `1` この場合。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/ja/sql_reference/data_types/float.md b/docs/ja/sql_reference/data_types/float.md deleted file mode 120000 index 187b665cc78..00000000000 --- a/docs/ja/sql_reference/data_types/float.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/float.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/float.md b/docs/ja/sql_reference/data_types/float.md new file mode 100644 index 00000000000..d6843e492ba --- /dev/null +++ b/docs/ja/sql_reference/data_types/float.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: Float32,Float64 +--- + +# Float32,Float64 {#float32-float64} + +[浮動小数点数](https://en.wikipedia.org/wiki/IEEE_754). + +型はcの型と同じです: + +- `Float32` - `float` +- `Float64` - `double` + +可能な限り、データを整数形式で格納することをお勧めします。 たとえば、固定精度の数値を、金額やページの読み込み時間などの整数値にミリ秒単位で変換します。 + +## 浮動小数点数の使用 {#using-floating-point-numbers} + +- 浮動小数点数を使用した計算では、丸め誤差が発生することがあります。 + + + +``` sql +SELECT 1 - 0.9 +``` + +``` text +┌───────minus(1, 0.9)─┐ +│ 0.09999999999999998 │ +└─────────────────────┘ +``` + +- 計算の結果は、計算方法(プロセッサの種類とコンピュータシステムのアーキテクチャ)に依存します。 +- 浮動小数点の計算は、無限大などの数値になる可能性があります (`Inf`) “not-a-number” (`NaN`). これは、計算の結果を処理する際に考慮する必要があります。 +- テキストから浮動小数点数を解析する場合、結果は最も近いマシン表現可能な数値ではない可能性があります。 + +## NaNおよびInf {#data_type-float-nan-inf} + +標準sqlとは対照的に、clickhouseは浮動小数点数の次のカテゴリをサポートしています: + +- `Inf` – Infinity. + + + +``` sql +SELECT 0.5 / 0 +``` + +``` text +┌─divide(0.5, 0)─┐ +│ inf │ +└────────────────┘ +``` + +- `-Inf` – Negative infinity. + + + +``` sql +SELECT -0.5 / 0 +``` + +``` text +┌─divide(-0.5, 0)─┐ +│ -inf │ +└─────────────────┘ +``` + +- `NaN` – Not a number. + + + +``` sql +SELECT 0 / 0 +``` + +``` text +┌─divide(0, 0)─┐ +│ nan │ +└──────────────┘ +``` + + See the rules for `NaN` sorting in the section [ORDER BY clause](../sql_reference/statements/select.md). + +[元の記事](https://clickhouse.tech/docs/en/data_types/float/) diff --git a/docs/ja/sql_reference/data_types/index.md b/docs/ja/sql_reference/data_types/index.md deleted file mode 120000 index 66fe846170f..00000000000 --- a/docs/ja/sql_reference/data_types/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/index.md b/docs/ja/sql_reference/data_types/index.md new file mode 100644 index 00000000000..126f3b95f04 --- /dev/null +++ b/docs/ja/sql_reference/data_types/index.md @@ -0,0 +1,15 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Data Types +toc_priority: 37 +toc_title: "\u5C0E\u5165" +--- + +# データ型 {#data_types} + +ClickHouseは、表のセルにさまざまな種類のデータを格納できます。 + +この章ではサポートされているデータの種類と特別な配慮のための利用および/または実施しています。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/) diff --git a/docs/ja/sql_reference/data_types/int_uint.md b/docs/ja/sql_reference/data_types/int_uint.md deleted file mode 120000 index 4fad252b81a..00000000000 --- a/docs/ja/sql_reference/data_types/int_uint.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/int_uint.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/int_uint.md b/docs/ja/sql_reference/data_types/int_uint.md new file mode 100644 index 00000000000..17854f5d484 --- /dev/null +++ b/docs/ja/sql_reference/data_types/int_uint.md @@ -0,0 +1,27 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "UInt8\u3001UInt16\u3001UInt32\u3001UInt64\u3001Int8\u3001Int16\u3001Int32\u3001\ + Int64" +--- + +# UInt8、UInt16、UInt32、UInt64、Int8、Int16、Int32、Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} + +符号の有無にかかわらず、固定長の整数。 + +## Intの範囲 {#int-ranges} + +- Int8-\[-128:127\] +- Int16-\[-32768:32767\] +- Int32-\[-2147483648:2147483647\] +- Int64-\[-9223372036854775808:9223372036854775807\] + +## Uint範囲 {#uint-ranges} + +- UInt8-\[0:255\] +- UInt16-\[0:65535\]\] +- UInt32-\[0:4294967295\] +- UInt64-\[0:18446744073709551615\] + +[元の記事](https://clickhouse.tech/docs/en/data_types/int_uint/) diff --git a/docs/ja/sql_reference/data_types/nested_data_structures/index.md b/docs/ja/sql_reference/data_types/nested_data_structures/index.md deleted file mode 120000 index 05ee08381d1..00000000000 --- a/docs/ja/sql_reference/data_types/nested_data_structures/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/nested_data_structures/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/nested_data_structures/index.md b/docs/ja/sql_reference/data_types/nested_data_structures/index.md new file mode 100644 index 00000000000..c1177b2d9c0 --- /dev/null +++ b/docs/ja/sql_reference/data_types/nested_data_structures/index.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Nested Data Structures +toc_hidden: true +toc_priority: 54 +toc_title: "\u96A0\u3055\u308C\u305F" +--- + +# 入れ子のデータ構造 {#nested-data-structures} + +[元の記事](https://clickhouse.tech/docs/en/data_types/nested_data_structures/) diff --git a/docs/ja/sql_reference/data_types/nested_data_structures/nested.md b/docs/ja/sql_reference/data_types/nested_data_structures/nested.md deleted file mode 120000 index f20fc672085..00000000000 --- a/docs/ja/sql_reference/data_types/nested_data_structures/nested.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/nested_data_structures/nested.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/nested_data_structures/nested.md b/docs/ja/sql_reference/data_types/nested_data_structures/nested.md new file mode 100644 index 00000000000..2bbf4ed73a7 --- /dev/null +++ b/docs/ja/sql_reference/data_types/nested_data_structures/nested.md @@ -0,0 +1,106 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u30CD\u30B9\u30C8(Name1\u30BF\u30A4\u30D71,Name2\u30BF\u30A4\u30D72,...)" +--- + +# Nested(name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} + +A nested data structure is like a table inside a cell. The parameters of a nested data structure – the column names and types – are specified the same way as in a [CREATE TABLE](../../../sql_reference/statements/create.md) クエリ。 各テーブル行は、入れ子になったデータ構造内の任意の数の行に対応できます。 + +例えば: + +``` sql +CREATE TABLE test.visits +( + CounterID UInt32, + StartDate Date, + Sign Int8, + IsNew UInt8, + VisitID UInt64, + UserID UInt64, + ... + Goals Nested + ( + ID UInt32, + Serial UInt32, + EventTime DateTime, + Price Int64, + OrderID String, + CurrencyID UInt32 + ), + ... +) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192, Sign) +``` + +この例では、 `Goals` ネストされたデータ構造。 の各行 ‘visits’ 表は、ゼロまたは任意の数の変換に対応することができます。 + +単一の入れ子レベルのみがサポートされます。 配列を含むネストされた構造体の列は、多次元配列と同等であるため、サポートが制限されています(mergetreeエンジンを使用してこれらの列をテーブルに格 + +ほとんどの場合、入れ子になったデータ構造で作業する場合、その列はドットで区切られた列名で指定されます。 これらの列は、一致する型の配列を構成します。 単一の入れ子になったデータ構造のすべての列配列の長さは同じです。 + +例えば: + +``` sql +SELECT + Goals.ID, + Goals.EventTime +FROM test.visits +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goals.ID───────────────────────┬─Goals.EventTime───────────────────────────────────────────────────────────────────────────┐ +│ [1073752,591325,591325] │ ['2014-03-17 16:38:10','2014-03-17 16:38:48','2014-03-17 16:42:27'] │ +│ [1073752] │ ['2014-03-17 00:28:25'] │ +│ [1073752] │ ['2014-03-17 10:46:20'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:59:20','2014-03-17 22:17:55','2014-03-17 22:18:07','2014-03-17 22:18:51'] │ +│ [] │ [] │ +│ [1073752,591325,591325] │ ['2014-03-17 11:37:06','2014-03-17 14:07:47','2014-03-17 14:36:21'] │ +│ [] │ [] │ +│ [] │ [] │ +│ [591325,1073752] │ ['2014-03-17 00:46:05','2014-03-17 00:46:05'] │ +│ [1073752,591325,591325,591325] │ ['2014-03-17 13:28:33','2014-03-17 13:30:26','2014-03-17 18:51:21','2014-03-17 18:51:45'] │ +└────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +ネストされたデータ構造を、同じ長さの複数の列配列のセットと考えるのが最も簡単です。 + +SELECTクエリで、個々の列ではなくネストされたデータ構造全体の名前を指定できる唯一の場所は、ARRAY JOIN句です。 詳細については、 “ARRAY JOIN clause”. 例えば: + +``` sql +SELECT + Goal.ID, + Goal.EventTime +FROM test.visits +ARRAY JOIN Goals AS Goal +WHERE CounterID = 101500 AND length(Goals.ID) < 5 +LIMIT 10 +``` + +``` text +┌─Goal.ID─┬──────Goal.EventTime─┐ +│ 1073752 │ 2014-03-17 16:38:10 │ +│ 591325 │ 2014-03-17 16:38:48 │ +│ 591325 │ 2014-03-17 16:42:27 │ +│ 1073752 │ 2014-03-17 00:28:25 │ +│ 1073752 │ 2014-03-17 10:46:20 │ +│ 1073752 │ 2014-03-17 13:59:20 │ +│ 591325 │ 2014-03-17 22:17:55 │ +│ 591325 │ 2014-03-17 22:18:07 │ +│ 591325 │ 2014-03-17 22:18:51 │ +│ 1073752 │ 2014-03-17 11:37:06 │ +└─────────┴─────────────────────┘ +``` + +ネストされたデータ構造全体のselectは実行できません。 明示的にリストできるのは、その一部である個々の列のみです。 + +挿入クエリでは、入れ子になったデータ構造のすべてのコンポーネント列配列を個別に渡す必要があります(個々の列配列と同様)。 挿入時に、システムは同じ長さを持つことをチェックします。 + +DESCRIBEクエリの場合、入れ子になったデータ構造内の列は、同じ方法で別々にリストされます。 + +入れ子になったデータ構造内の要素に対するalter queryには制限があります。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/nested_data_structures/nested/) diff --git a/docs/ja/sql_reference/data_types/nullable.md b/docs/ja/sql_reference/data_types/nullable.md deleted file mode 120000 index bf5d978c517..00000000000 --- a/docs/ja/sql_reference/data_types/nullable.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/nullable.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/nullable.md b/docs/ja/sql_reference/data_types/nullable.md new file mode 100644 index 00000000000..79b06a832f4 --- /dev/null +++ b/docs/ja/sql_reference/data_types/nullable.md @@ -0,0 +1,46 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: Nullable +--- + +# Nullable(typename) {#data_type-nullable} + +特別なマーカーを保存できます ([NULL](../../sql_reference/syntax.md))それは意味する “missing value” 通常の値と並んで `TypeName`. たとえば、 `Nullable(Int8)` タイプ列は保存できます `Int8` 値を入力すると、値を持たない行が格納されます `NULL`. + +のための `TypeName` 複合データ型は使用できません [配列](array.md) と [タプル](tuple.md). 複合データタイプを含むことができ `Nullable` 以下のようなタイプの値 `Array(Nullable(Int8))`. + +A `Nullable` typeフィールドできない含まれてテーブルスを作成します。 + +`NULL` anyのデフォルト値を指定します。 `Nullable` ClickHouseサーバー構成で別途指定されている場合を除き、入力します。 + +## ストレージ機能 {#storage-features} + +保存する `Nullable` 型の値テーブルのカラムClickHouse用途別のファイル `NULL` 値を持つ通常のファイルに加えて、マスク。 マスクファイル内のエントリは、ClickHouseが `NULL` そして、各テーブルの行の対応するデータ型のデフォルト値。 追加のファイルのため, `Nullable` 列は、同様の通常の記憶領域と比較して追加の記憶領域を消費します。 + +!!! info "メモ" + を使用して `Nullable` ほとんどの場合、パフォーマンスに悪影響を及ぼします。 + +## 使用例 {#usage-example} + +``` sql +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog +``` + +``` sql +INSERT INTO t_null VALUES (1, NULL), (2, 3) +``` + +``` sql +SELECT x + y FROM t_null +``` + +``` text +┌─plus(x, y)─┐ +│ ᴺᵁᴸᴸ │ +│ 5 │ +└────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/nullable/) diff --git a/docs/ja/sql_reference/data_types/special_data_types/expression.md b/docs/ja/sql_reference/data_types/special_data_types/expression.md deleted file mode 120000 index 5d177037f8e..00000000000 --- a/docs/ja/sql_reference/data_types/special_data_types/expression.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/special_data_types/expression.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/special_data_types/expression.md b/docs/ja/sql_reference/data_types/special_data_types/expression.md new file mode 100644 index 00000000000..9786e8349c8 --- /dev/null +++ b/docs/ja/sql_reference/data_types/special_data_types/expression.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u5F0F" +--- + +# 式 {#expression} + +式は、高階関数でラムダを表すために使用されます。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/special_data_types/expression/) diff --git a/docs/ja/sql_reference/data_types/special_data_types/index.md b/docs/ja/sql_reference/data_types/special_data_types/index.md deleted file mode 120000 index 553777ba110..00000000000 --- a/docs/ja/sql_reference/data_types/special_data_types/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/special_data_types/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/special_data_types/index.md b/docs/ja/sql_reference/data_types/special_data_types/index.md new file mode 100644 index 00000000000..9e670337040 --- /dev/null +++ b/docs/ja/sql_reference/data_types/special_data_types/index.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Special Data Types +toc_hidden: true +toc_priority: 55 +toc_title: "\u96A0\u3055\u308C\u305F" +--- + +# 特殊なデータ型 {#special-data-types} + +特別なデータ型の値は、テーブルに保存するためにシリアル化することも、クエリ結果に出力することもできませんが、クエリの実行中に中間結果とし + +[元の記事](https://clickhouse.tech/docs/en/data_types/special_data_types/) diff --git a/docs/ja/sql_reference/data_types/special_data_types/interval.md b/docs/ja/sql_reference/data_types/special_data_types/interval.md deleted file mode 120000 index 2092aa219e3..00000000000 --- a/docs/ja/sql_reference/data_types/special_data_types/interval.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/special_data_types/interval.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/special_data_types/interval.md b/docs/ja/sql_reference/data_types/special_data_types/interval.md new file mode 100644 index 00000000000..affd9034376 --- /dev/null +++ b/docs/ja/sql_reference/data_types/special_data_types/interval.md @@ -0,0 +1,85 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: "\u9593\u9694" +--- + +# 間隔 {#data-type-interval} + +時刻と日付の間隔を表すデータ型のファミリ。 結果のタイプ [INTERVAL](../../../sql_reference/operators.md#operator-interval) オペレーター + +!!! warning "警告" + `Interval` データ型の値はテーブルに格納できません。 + +構造: + +- 符号なし整数値としての時間間隔。 +- 間隔のタイプ。 + +サポートさ: + +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +各区間タイプには、個別のデータタイプがあります。 たとえば、 `DAY` 間隔はに対応します `IntervalDay` データ型: + +``` sql +SELECT toTypeName(INTERVAL 4 DAY) +``` + +``` text +┌─toTypeName(toIntervalDay(4))─┐ +│ IntervalDay │ +└──────────────────────────────┘ +``` + +## 使用上の注意 {#data-type-interval-usage-remarks} + +を使用することができ `Interval`-との算術操作のタイプ値 [日付](../../../sql_reference/data_types/date.md) と [DateTime](../../../sql_reference/data_types/datetime.md)-タイプの値。 たとえば、現在の時刻に4日を追加できます: + +``` sql +SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY +``` + +``` text +┌───current_date_time─┬─plus(now(), toIntervalDay(4))─┐ +│ 2019-10-23 10:58:45 │ 2019-10-27 10:58:45 │ +└─────────────────────┴───────────────────────────────┘ +``` + +間隔の異なる種類できない。 次のような間隔は使用できません `4 DAY 1 HOUR`. 間隔は、間隔の最小単位(間隔など)より小さいか等しい単位で指定します `1 day and an hour` 間隔は次のように表現できます `25 HOUR` または `90000 SECOND`. + +あなたは算術演算を実行することはできません `Interval`-値を入力しますが、異なるタイプの間隔を追加することができます。 `Date` または `DateTime` データ型。 例えば: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +次のクエリでは、例外が発生します: + +``` sql +select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) +``` + +``` text +Received exception from server (version 19.14.1): +Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. +``` + +## また見なさい {#see-also} + +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) 演算子 +- [toInterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) 型変換関数 diff --git a/docs/ja/sql_reference/data_types/special_data_types/nothing.md b/docs/ja/sql_reference/data_types/special_data_types/nothing.md deleted file mode 120000 index 48281bdede6..00000000000 --- a/docs/ja/sql_reference/data_types/special_data_types/nothing.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/special_data_types/nothing.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/special_data_types/nothing.md b/docs/ja/sql_reference/data_types/special_data_types/nothing.md new file mode 100644 index 00000000000..8bd4b852d9f --- /dev/null +++ b/docs/ja/sql_reference/data_types/special_data_types/nothing.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "\u4F55\u3082\u306A\u3044" +--- + +# 何もない {#nothing} + +このデータ型の唯一の目的は、値が予期されないケースを表すことです。 だから、作成することはできません `Nothing` タイプ値。 + +たとえば、リテラル [NULL](../../../sql_reference/syntax.md#null-literal) はタイプの `Nullable(Nothing)`. 詳細はこちら [Nullable](../../../sql_reference/data_types/nullable.md). + +その `Nothing` 型は、空の配列を示すためにも使用できます: + +``` sql +SELECT toTypeName(array()) +``` + +``` text +┌─toTypeName(array())─┐ +│ Array(Nothing) │ +└─────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/special_data_types/nothing/) diff --git a/docs/ja/sql_reference/data_types/special_data_types/set.md b/docs/ja/sql_reference/data_types/special_data_types/set.md deleted file mode 120000 index 9650840adde..00000000000 --- a/docs/ja/sql_reference/data_types/special_data_types/set.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/data_types/special_data_types/set.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/special_data_types/set.md b/docs/ja/sql_reference/data_types/special_data_types/set.md new file mode 100644 index 00000000000..a9fb57e7249 --- /dev/null +++ b/docs/ja/sql_reference/data_types/special_data_types/set.md @@ -0,0 +1,12 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "\u30BB\u30C3\u30C8" +--- + +# セット {#set} + +の右半分のために使用される [IN](../../../sql_reference/statements/select.md#select-in-operators) 式。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) diff --git a/docs/ja/sql_reference/data_types/string.md b/docs/ja/sql_reference/data_types/string.md deleted file mode 120000 index 9366c56e949..00000000000 --- a/docs/ja/sql_reference/data_types/string.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/string.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/string.md b/docs/ja/sql_reference/data_types/string.md new file mode 100644 index 00000000000..893e79e14c2 --- /dev/null +++ b/docs/ja/sql_reference/data_types/string.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u6587\u5B57\u5217" +--- + +# 文字列 {#string} + +任意の長さの文字列。 長さは限定されない。 値には、nullバイトを含む任意のバイトセットを含めることができます。 +文字列型は、他のdbmsのvarchar、blob、clobなどの型を置き換えます。 + +## エンコード {#encodings} + +ClickHouseにはエンコーディングの概念はありません。 文字列には、任意のバイトセットを含めることができます。 +が必要な場合は店舗テキストの使用をお勧めしまutf-8エンコーディングです。 少なくとも、端末がutf-8を使用している場合(推奨)、変換を行わずに値を読み書きできます。 +同様に、文字列を操作するための特定の関数には、文字列にutf-8でエンコードされたテキストを表すバイトのセットが含まれているという前提の下 +たとえば、 ‘length’ 関数は文字列の長さをバイト単位で計算します。 ‘lengthUTF8’ 関数を計算し、文字列の長さはUnicodeコードポイント価値をとした場合は、UTF-8エンコードされます。 + +[元の記事](https://clickhouse.tech/docs/en/data_types/string/) diff --git a/docs/ja/sql_reference/data_types/tuple.md b/docs/ja/sql_reference/data_types/tuple.md deleted file mode 120000 index a2515dd791c..00000000000 --- a/docs/ja/sql_reference/data_types/tuple.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/tuple.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/tuple.md b/docs/ja/sql_reference/data_types/tuple.md new file mode 100644 index 00000000000..61492fe28f4 --- /dev/null +++ b/docs/ja/sql_reference/data_types/tuple.md @@ -0,0 +1,52 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 53 +toc_title: "\u30BF\u30D7\u30EB(T1,T2,...)" +--- + +# Tuple(t1, T2, …) {#tuplet1-t2} + +要素のタプル。 [タイプ](index.md#data_types). + +タプルは、一時列のグループ化に使用されます。 列は、in式がクエリで使用されている場合、およびラムダ関数の特定の仮パラメータを指定するためにグループ化できます。 詳細については、以下を参照してください [演算子の場合](../../sql_reference/statements/select.md) と [高階関数](../../sql_reference/functions/higher_order_functions.md). + +タプルは、クエリの結果になります。 この場合、json以外のテキスト形式の場合、値は角かっこでカンマ区切りになります。 json形式では、タプルは配列として出力されます(角括弧内)。 + +## タプルの作成 {#creating-a-tuple} + +関数を使用してタプルを作成することができます: + +``` sql +tuple(T1, T2, ...) +``` + +タプルの作成例: + +``` sql +SELECT tuple(1,'a') AS x, toTypeName(x) +``` + +``` text +┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ +│ (1,'a') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────────┘ +``` + +## データ型の操作 {#working-with-data-types} + +タプルをオンザフライで作成するとき、clickhouseは引数の値を格納できる型の最小値として各引数の型を自動的に検出します。 引数が [NULL](../../sql_reference/syntax.md#null-literal) タプル要素の型は次のとおりです [Nullable](nullable.md). + +自動データ型検出の例: + +``` sql +SELECT tuple(1, NULL) AS x, toTypeName(x) +``` + +``` text +┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ +│ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ +└──────────┴─────────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/data_types/tuple/) diff --git a/docs/ja/sql_reference/data_types/uuid.md b/docs/ja/sql_reference/data_types/uuid.md deleted file mode 120000 index f7879287ada..00000000000 --- a/docs/ja/sql_reference/data_types/uuid.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/data_types/uuid.md \ No newline at end of file diff --git a/docs/ja/sql_reference/data_types/uuid.md b/docs/ja/sql_reference/data_types/uuid.md new file mode 100644 index 00000000000..788e7ade8a1 --- /dev/null +++ b/docs/ja/sql_reference/data_types/uuid.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: UUID +--- + +# UUID {#uuid-data-type} + +Universally unique identifier(UUID)は、レコードを識別するために使用される16バイトの数値です。 UUIDの詳細については、以下を参照してください [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +UUID型の値の例を以下に示します: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +新しいレコードを挿入するときにuuid列の値を指定しない場合、uuidの値はゼロで埋められます: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## 生成する方法 {#how-to-generate} + +UUID値を生成するには、ClickHouseに次の値を指定します [generateUUIDv4](../../sql_reference/functions/uuid_functions.md) 機能。 + +## 使用例 {#usage-example} + +**例1** + +この例では、uuid型の列を使用してテーブルを作成し、テーブルに値を挿入する方法を示します。 + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**例2** + +この例では、新しいレコードを挿入するときにuuid列の値が指定されていません。 + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## 制限 {#restrictions} + +UUIDデータ型は、以下の関数のみをサポートします [文字列](string.md) データ型もサポートします(たとえば, [分](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [最大](../../sql_reference/aggregate_functions/reference.md#agg_function-max)、と [カウント](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). + +UUIDデータ型は、算術演算ではサポートされません(たとえば, [abs](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs) 以下のような)または集約関数 [合計](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) と [平均](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). + +[元の記事](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md deleted file mode 120000 index 54b494ae612..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md new file mode 100644 index 00000000000..0bee2e95916 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u4E00\u822C\u7684\u306A\u8AAC\u660E" +--- + +# 外部辞書 {#dicts-external-dicts} + +さまざまなデータソースから独自の辞書を追加できます。 ディクショナリのデータソースには、ローカルテキストまたは実行可能ファイル、http(s)リソース、または別のdbmsを指定できます。 詳細については、 “[外部辞書のソース](external_dicts_dict_sources.md)”. + +クリックハウス: + +- 完全または部分的にramに辞書を格納します。 +- 辞書を定期的に更新し、欠損値を動的に読み込みます。 つまり、辞書を動的に読み込むことができます。 +- Xmlファイルを使用して外部辞書を作成したり [DDLクエリ](../../statements/create.md#create-dictionary-query). + +外部辞書の設定は、一つ以上のxmlファイルに配置することができます。 設定へのパスは [dictionaries\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) パラメータ。 + +辞書は、サーバーの起動時または最初の使用時にロードすることができます。 [dictionaries\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) 設定。 + +辞書構成ファイルの形式は次のとおりです: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +あなたはできる [設定](external_dicts_dict.md) 同じファイル内の任意の数の辞書。 + +[辞書のddlクエリ](../../statements/create.md#create-dictionary-query) サーバー構成に追加のレコードは必要ありません。 この仕事を辞書として第一級の体のように、テーブルやビュー。 + +!!! attention "注意" + 小さな辞書の値をaに記述することによって変換できます `SELECT` クエリ(参照 [変換](../../../sql_reference/functions/other_functions.md) 機能)。 この機能は外部辞書とは関係ありません。 + +## また見なさい {#ext-dicts-see-also} + +- [外部ディクショナリの設定](external_dicts_dict.md) +- [辞書をメモリに保存する](external_dicts_dict_layout.md) +- [辞書の更新](external_dicts_dict_lifetime.md) +- [外部辞書のソース](external_dicts_dict_sources.md) +- [辞書のキーとフィールド](external_dicts_dict_structure.md) +- [外部辞書を操作するための関数](../../../sql_reference/functions/ext_dict_functions.md) + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md deleted file mode 120000 index 43857a5d998..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md new file mode 100644 index 00000000000..2d10818a085 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -0,0 +1,53 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u5916\u90E8\u30C7\u30A3\u30AF\u30B7\u30E7\u30CA\u30EA\u306E\u8A2D\u5B9A" +--- + +# 外部ディクショナリの設定 {#dicts-external-dicts-dict} + +Dictionaryがxmlファイルを使用して構成されている場合、than dictionary構成は次の構造を持ちます: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +対応 [DDL-クエリ](../../statements/create.md#create-dictionary-query) 次の構造を持っています: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [ソース](external_dicts_dict_sources.md) — Source of the dictionary. +- [レイアウト](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [構造](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [寿命](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md deleted file mode 120000 index 5a9e801e0ae..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..9ca1ef49b30 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u968E\u5C64\u8F9E\u66F8" +--- + +# 階層辞書 {#hierarchical-dictionaries} + +クリックハウスは、 [数値キー](external_dicts_dict_structure.md#ext_dict-numeric-key). + +次の階層構造を見てください: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +この階層として表現することができ、以下の辞書。 + +| region\_id | parent\_region | region\_name | +|------------|----------------|--------------| +| 1 | 0 | ロシア | +| 2 | 1 | モスクワ | +| 3 | 2 | 中央 | +| 4 | 0 | イギリス | +| 5 | 4 | ロンドン | + +この表には列が含まれます `parent_region` これには、要素の最も近い親のキーが含まれます。 + +クリックハウスは [階層](external_dicts_dict_structure.md#hierarchical-dict-attr) のための特性 [外部辞書](index.md) 属性。 このプロパティを使用すると、上記のような階層辞書を構成できます。 + +その [独裁主義体制](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) 関数を使用すると、要素の親チェーンを取得できます。 + +この例では、dictionaryの構造は次のようになります: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md deleted file mode 120000 index adeeed331ad..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md new file mode 100644 index 00000000000..cbfc661263e --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -0,0 +1,373 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u8F9E\u66F8\u3092\u30E1\u30E2\u30EA\u306B\u4FDD\u5B58\u3059\u308B" +--- + +# 辞書をメモリに保存する {#dicts-external-dicts-dict-layout} + +辞書をメモリに保存するには、さまざまな方法があります。 + +私たちはお勧め [フラット](#flat), [ハッシュ](#dicts-external_dicts_dict_layout-hashed) と [complex\_key\_hashed](#complex-key-hashed). 最適の処理速度を提供するかどれが。 + +キャッシュ推奨されていないものになる可能性のある性能や困難の選定に最適なパラメータ。 セクションの続きを読む “[キャッシュ](#cache)”. + +辞書のパフォーマ: + +- 後に辞書を操作する関数を呼び出します `GROUP BY`. +- 射影として抽出する属性をマークします。 異なる属性値が異なるキーに対応する場合、属性はinjectiveと呼ばれます。 だからとき `GROUP BY` キーによって属性値をフェッチする関数を使用します。 `GROUP BY`. + +ClickHouseは、辞書のエラーの例外を生成します。 エラーの例: + +- アクセス中の辞書を読み込めませんでした。 +- エラーの照会 `cached` 辞書だ + +外部辞書のリストとそのステータスを表示することができます `system.dictionaries` テーブル。 + +構成は次のようになります: + +``` xml + + + ... + + + + + + ... + + +``` + +対応 [DDL-クエリ](../../statements/create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## 辞書をメモリに格納する方法 {#ways-to-store-dictionaries-in-memory} + +- [フラット](#flat) +- [ハッシュ](#dicts-external_dicts_dict_layout-hashed) +- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) +- [キャッシュ](#cache) +- [range\_hashed](#range-hashed) +- [complex\_key\_hashed](#complex-key-hashed) +- [complex\_key\_cache](#complex-key-cache) +- [ip\_trie](#ip-trie) + +### フラット {#flat} + +辞書は完全にフラット配列の形でメモリに格納されています。 辞書はどのくらいのメモリを使用しますか? この量は、最大のキーのサイズに比例します(使用されるスペース)。 + +辞書キーには、 `UInt64` 値は500,000に制限されます。 辞書を作成するときに大きなキーが検出された場合、ClickHouseは例外をスローし、辞書を作成しません。 + +すべてのタイプの源は支えられる。 更新時には、ファイルまたはテーブルからのデータが完全に読み取られます。 + +この方法は最高性能の中で利用可能なすべての方法を格納する辞書です。 + +構成例: + +``` xml + + + +``` + +または + +``` sql +LAYOUT(FLAT()) +``` + +### ハッシュ {#dicts-external_dicts_dict_layout-hashed} + +辞書は、ハッシュテーブルの形式でメモリに完全に格納されます。 辞書には、実際には任意の識別子を持つ任意の数の要素を含めることができ、キーの数は数千万のアイテムに達することができます。 + +すべてのタイプの源は支えられる。 更新時には、ファイルまたはテーブルからのデータが完全に読み取られます。 + +構成例: + +``` xml + + + +``` + +または + +``` sql +LAYOUT(HASHED()) +``` + +### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} + +に似て `hashed` が、使用メモリ賛以上のCPUます。 + +構成例: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### complex\_key\_hashed {#complex-key-hashed} + +このタイプの貯蔵は合成の使用のためです [キー](external_dicts_dict_structure.md). に似て `hashed`. + +構成例: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### range\_hashed {#range-hashed} + +辞書は、範囲とそれに対応する値の順序付けられた配列を持つハッシュテーブルの形式でメモリに格納されます。 + +このストレージメソッドは、hashedと同じように機能し、キーに加えて日付/時刻(任意の数値型)の範囲を使用できます。 + +例:この表には、各広告主の割引の形式が含まれています: + +``` text ++---------|-------------|-------------|------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------|-------------|-------------|------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------|-------------|-------------|------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------|-------------|-------------|------+ +``` + +日付範囲のサンプルを使用するには、以下を定義します `range_min` と `range_max` の要素 [構造](external_dicts_dict_structure.md). これらの要素の要素が含まれている必要があ `name` と`type` (もし `type` デフォルトのタイプが使用されます-Date)。 `type` 任意の数値型(Date/DateTime/UInt64/Int32/others)を指定できます。 + +例えば: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +または + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +これらの辞書を操作するには、追加の引数を渡す必要があります `dictGetT` 範囲が選択される関数: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +この関数は、指定された値を返します `id`sと、渡された日付を含む日付範囲。 + +アルゴリズムの詳細: + +- この `id` が見つからないか、または範囲が見つからない。 `id`、辞書のデフォルト値を返します。 +- 範囲が重複している場合は、anyを使用できます。 +- 範囲区切り文字が次の場合 `NULL` または無効な日付(1900-01-01や2039-01-01など)では、範囲は開いたままになります。 範囲は両側で開きます。 + +構成例: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +または + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### キャッシュ {#cache} + +辞書は、固定数のセルを持つキャッシュに格納されます。 これらの細胞を含む使用頻度の高います。 + +辞書を検索するときは、まずキャッシュが検索されます。 各ブロックのデータは、すべてのキーが見つからないのキャッシュ古いから要求されてソースを使用 `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. 受信したデータは、キャッシュに書き込まれます。 + +キャッシュ辞書の場合、有効期限 [寿命](external_dicts_dict_lifetime.md) キャッシュ内のデータの設定が可能です。 より多くの時間より `lifetime` セルにデータをロードしてから、セルの値は使用されず、次に使用する必要があるときに再要求されます。 +これは、辞書を格納するすべての方法の中で最も効果的ではありません。 キャッシュの速度は、正しい設定と使用シナリオに強く依存します。 キャッシュタイプの辞書は、ヒット率が十分に高い場合にのみうまく機能します(推奨99%以上)。 あなたは平均ヒット率を表示することができます `system.dictionaries` テーブル。 + +キャッシュパフォーマン `LIMIT`、および外部ディクショナリで関数を呼び出します。 + +サポート [ソース](external_dicts_dict_sources.md):MySQL、ClickHouse、実行可能ファイル、HTTP。 + +設定例: + +``` xml + + + + 1000000000 + + +``` + +または + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +十分なキャッシュサイズを設定します。 あなたは、セルの数を選択する実験する必要があります: + +1. 値を設定します。 +2. キャッシュが一杯になるまでクエリを実行します。 +3. を使用してメモリ消費量を評価する `system.dictionaries` テーブル。 +4. 必要なメモリ消費量に達するまで、セル数を増減します。 + +!!! warning "警告" + ランダムな読み取りでクエリを処理するのが遅いため、ソースとしてclickhouseを使用しないでください。 + +### complex\_key\_cache {#complex-key-cache} + +このタイプの貯蔵は合成の使用のためです [キー](external_dicts_dict_structure.md). に似て `cache`. + +### ip\_trie {#ip-trie} + +このタイプの貯蔵するマッピングするネットワーク接頭辞(ipアドレスへのメタデータなどのasn. + +例:テーブルを含むネットワークの接頭辞およびその対応としての数および国コード: + +``` text + +-----------|-----|------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------|-----|------+ + | 2620:0:870::/48 | 3856 | US | + +-----------|-----|------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------|-----|------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------|-----|------+ +``` + +このタイプのレイアウトを使用する場合、構造に複合キーが必要です。 + +例えば: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +または + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +キーには、許可されたipプレフィックスを含む文字列型属性のみが必要です。 その他のタイプはサポートされていませんか。 + +クエリの場合は、同じ関数を使用する必要があります (`dictGetT` 複合キーを持つ辞書については、タプルを使用する: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +のいずれか `UInt32` IPv4の場合、または `FixedString(16)` IPv6の場合: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +その他のタイプはサポートされていませんか。 この関数は、このipアドレスに対応するプレフィックスの属性を返します。 がある場合に重なる接頭辞であり、具体的には返却されます。 + +データは `trie`. それは完全にRAMに収まる必要があります。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md deleted file mode 120000 index dacf01c959a..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..fbbc6b8fa97 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u8F9E\u66F8\u306E\u66F4\u65B0" +--- + +# 辞書の更新 {#dictionary-updates} + +ClickHouseは定期的に辞書を更新します。 完全にダウンロードされたディクショナリの更新間隔とキャッシュされたディクショナ `` 秒の札。 + +辞書の更新(最初の使用のための読み込み以外)は、クエリをブロックしません。 更新時には、古いバージョンの辞書が使用されます。 更新中にエラーが発生すると、エラーがサーバーログに書き込まれ、古いバージョンの辞書が引き続き使用されます。 + +設定例: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +設定 `0` (`LIFETIME(0)`)辞書が更新されないようにします。 + +アップグレードの時間間隔を設定することができ、clickhouseはこの範囲内で一様にランダムな時間を選択します。 これは、多数のサーバーでアップグレードするときに、ディクショナリソースに負荷を分散するために必要です。 + +設定例: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +または + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +ップする場合には辞書にclickhouseサーバーに適用の異なるロジックの種類によって [ソース](external_dicts_dict_sources.md): + +- テキストファイルの場合は、変更の時間をチェックします。 時間が以前に記録された時間と異なる場合、辞書は更新されます。 +- MyISAMテーブルの場合、変更時刻は次のようにしてチェックされます `SHOW TABLE STATUS` クエリ。 +- 他のソースからの辞書は、デフォルトで毎回updatedされます。 + +MySQL(InnoDB)、ODBC、ClickHouseのソースでは、辞書が実際に変更された場合にのみ、毎回ではなく、辞書を更新するクエリを設定できます。 これを行うには、次の手順に従います: + +- 辞書に表れてい分野に常に変化するソースデータを更新しました。 +- ソースの設定では、変更フィールドを取得するクエリを指定する必要があります。 クリックハウスサーバーは、クエリ結果を行として解釈し、この行が以前の状態に対して変更されている場合は、辞書が更新されます。 クエリを指定します。 `` の設定のフィールド [ソース](external_dicts_dict_sources.md). + +設定例: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +または + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md deleted file mode 120000 index 1b289a34c68..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md new file mode 100644 index 00000000000..7876b1d00b0 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -0,0 +1,608 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u5916\u90E8\u8F9E\u66F8\u306E\u30BD\u30FC\u30B9" +--- + +# 外部辞書のソース {#dicts-external-dicts-dict-sources} + +外部辞書は、さまざまなソースから接続できます。 + +辞書がxmlファイルを使用して設定されている場合、設定は次のようになります: + +``` xml + + + ... + + + + + + ... + + ... + +``` + +の場合 [DDL-クエリ](../../statements/create.md#create-dictionary-query)、等しい構成は次のようになります: + +``` sql +CREATE DICTIONARY dict_name (...) +... +SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration +... +``` + +ソースは、 `source` セクション。 + +ソースの種類 (`source_type`): + +- [Localファイル](#dicts-external_dicts_dict_sources-local_file) +- [実行可能ファイル](#dicts-external_dicts_dict_sources-executable) +- [HTTP(s)](#dicts-external_dicts_dict_sources-http) +- DBMS + - [ODBC](#dicts-external_dicts_dict_sources-odbc) + - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [クリックハウス](#dicts-external_dicts_dict_sources-clickhouse) + - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) + - [レディス](#dicts-external_dicts_dict_sources-redis) + +## Localファイル {#dicts-external_dicts_dict_sources-local_file} + +設定例: + +``` xml + + + /opt/dictionaries/os.tsv + TabSeparated + + +``` + +または + +``` sql +SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +フィールドの設定: + +- `path` – The absolute path to the file. +- `format` – The file format. All the formats described in “[形式](../../../interfaces/formats.md#formats)” サポートされます。 + +## 実行可能ファイル {#dicts-external_dicts_dict_sources-executable} + +実行可能ファイルの操作は [辞書がメモリにどのように格納されるか](external_dicts_dict_layout.md). 辞書が以下を使用して格納されている場合 `cache` と `complex_key_cache` ClickHouseは、実行可能ファイルのSTDINに要求を送信することによって、必要なキーを要求します。 その他、ClickHouse始まり実行可能ファイルを扱い、その出力としての辞書のデータです。 + +設定例: + +``` xml + + + cat /opt/dictionaries/os.tsv + TabSeparated + + +``` + +または + +``` sql +SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) +``` + +フィールドの設定: + +- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). +- `format` – The file format. All the formats described in “[形式](../../../interfaces/formats.md#formats)” サポートされます。 + +## Http(s) {#dicts-external_dicts_dict_sources-http} + +HTTP(s)サーバーの操作は次の条件によって異なります [辞書がメモリにどのように格納されるか](external_dicts_dict_layout.md). 辞書が以下を使用して格納されている場合 `cache` と `complex_key_cache`、ClickHouse要求を送信することによって必要なキーを経由して `POST` 方法。 + +設定例: + +``` xml + + + http://[::1]/os.tsv + TabSeparated + + user + password + + +
    + API-KEY + key +
    +
    +
    + +``` + +または + +``` sql +SOURCE(HTTP( + url 'http://[::1]/os.tsv' + format 'TabSeparated' + credentials(user 'user' password 'password') + headers(header(name 'API-KEY' value 'key')) +)) +``` + +ClickHouseがHTTPSリソースにアクセスするには、次のことが必要です [openSSLを設定](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) サーバー構成で。 + +フィールドの設定: + +- `url` – The source URL. +- `format` – The file format. All the formats described in “[形式](../../../interfaces/formats.md#formats)” サポートされます。 +- `credentials` – Basic HTTP authentication. Optional parameter. + - `user` – Username required for the authentication. + - `password` – Password required for the authentication. +- `headers` – All custom HTTP headers entries used for the HTTP request. Optional parameter. + - `header` – Single HTTP header entry. + - `name` – Identifiant name used for the header send on the request. + - `value` – Value set for a specific identifiant name. + +## ODBC {#dicts-external_dicts_dict_sources-odbc} + +このメソッドを使用して、odbcドライバーを持つデータベースを接続できます。 + +設定例: + +``` xml + + + DatabaseName + ShemaName.TableName
    + DSN=some_parameters + SQL_QUERY +
    + +``` + +または + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' +)) +``` + +フィールドの設定: + +- `db` – Name of the database. Omit it if the database name is set in the `` パラメータ。 +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [辞書の更新](external_dicts_dict_lifetime.md). + +ClickHouseはODBCドライバからクォート記号を受け取り、クエリのすべての設定をドライバに引用するので、データベースのテーブル名に応じてテーブル名を設定する + +Oracleを使用しているときにエンコードに問題がある場合は、対応する [FAQ](../../../faq/general.md#oracle-odbc-encodings) 記事。 + +### ODBCディクショナリ機能の既知の脆弱性 {#known-vulnerability-of-the-odbc-dictionary-functionality} + +!!! attention "注意" + ODBC driver connectionパラメーターを使用してデータベースに接続する場合 `Servername` 置換することができる。 この場合、 `USERNAME` と `PASSWORD` から `odbc.ini` リモートサーバーに送信され、侵害される可能性があります。 + +**安全でない使用例** + +PostgreSQL用にunixODBCを設定してみましょう。 の内容 `/etc/odbc.ini`: + +``` text +[gregtest] +Driver = /usr/lib/psqlodbca.so +Servername = localhost +PORT = 5432 +DATABASE = test_db +#OPTION = 3 +USERNAME = test +PASSWORD = test +``` + +次に、次のようなクエリを作成します + +``` sql +SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); +``` + +ODBCドライバーの値を送信します `USERNAME` と `PASSWORD` から `odbc.ini` に `some-server.com`. + +### Postgresqlの接続例 {#example-of-connecting-postgresql} + +UbuntuのOS。 + +UnixODBCとPostgreSQL用のODBCドライバのインストール: + +``` bash +$ sudo apt-get install -y unixodbc odbcinst odbc-postgresql +``` + +設定 `/etc/odbc.ini` (または `~/.odbc.ini`): + +``` text + [DEFAULT] + Driver = myconnection + + [myconnection] + Description = PostgreSQL connection to my_db + Driver = PostgreSQL Unicode + Database = my_db + Servername = 127.0.0.1 + UserName = username + Password = password + Port = 5432 + Protocol = 9.3 + ReadOnly = No + RowVersioning = No + ShowSystemTables = No + ConnSettings = +``` + +クリックハウスの辞書構成: + +``` xml + + + table_name + + + + + DSN=myconnection + postgresql_table
    +
    + + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
    +
    +``` + +または + +``` sql +CREATE DICTIONARY table_name ( + id UInt64, + some_column UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 300 MAX 360) +``` + +編集が必要な場合があります `odbc.ini` ドライバを使用してライブラリへのフルパスを指定するには `DRIVER=/usr/local/lib/psqlodbcw.so`. + +### MS SQL Serverの接続例 {#example-of-connecting-ms-sql-server} + +UbuntuのOS。 + +設置のドライバー: : + +``` bash +$ sudo apt-get install tdsodbc freetds-bin sqsh +``` + +ドライバの設定: + +``` bash + $ cat /etc/freetds/freetds.conf + ... + + [MSSQL] + host = 192.168.56.101 + port = 1433 + tds version = 7.0 + client charset = UTF-8 + + $ cat /etc/odbcinst.ini + ... + + [FreeTDS] + Description = FreeTDS + Driver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so + Setup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so + FileUsage = 1 + UsageCount = 5 + + $ cat ~/.odbc.ini + ... + + [MSSQL] + Description = FreeTDS + Driver = FreeTDS + Servername = MSSQL + Database = test + UID = test + PWD = test + Port = 1433 +``` + +ClickHouseでの辞書の設定: + +``` xml + + + test + + + dict
    + DSN=MSSQL;UID=test;PWD=test +
    + + + + 300 + 360 + + + + + + + + + k + + + s + String + + + +
    +
    +``` + +または + +``` sql +CREATE DICTIONARY test ( + k UInt64, + s String DEFAULT '' +) +PRIMARY KEY k +SOURCE(ODBC(table 'dict' connection_string 'DSN=MSSQL;UID=test;PWD=test')) +LAYOUT(FLAT()) +LIFETIME(MIN 300 MAX 360) +``` + +## DBMS {#dbms} + +### Mysql {#dicts-external_dicts_dict_sources-mysql} + +設定例: + +``` xml + + + 3306 + clickhouse + qwerty + + example01-1 + 1 + + + example01-2 + 1 + + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +または + +``` sql +SOURCE(MYSQL( + port 3306 + user 'clickhouse' + password 'qwerty' + replica(host 'example01-1' priority 1) + replica(host 'example01-2' priority 1) + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +フィールドの設定: + +- `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). + +- `user` – Name of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `password` – Password of the MySQL user. You can specify it for all replicas, or for each one individually (inside ``). + +- `replica` – Section of replica configurations. There can be multiple sections. + + - `replica/host` – The MySQL host. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. + +- `db` – Name of the database. + +- `table` – Name of the table. + +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` たとえば、MySQLの句, `id > 10 AND id < 20`. 省略可能なパラメータ。 + +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [辞書の更新](external_dicts_dict_lifetime.md). + +MySQLは、ローカルホスト上でソケット経由で接続できます。 これを行うには、 `host` と `socket`. + +設定例: + +``` xml + + + localhost + /path/to/socket/file.sock + clickhouse + qwerty + db_name + table_name
    + id=10 + SQL_QUERY +
    + +``` + +または + +``` sql +SOURCE(MYSQL( + host 'localhost' + socket '/path/to/socket/file.sock' + user 'clickhouse' + password 'qwerty' + db 'db_name' + table 'table_name' + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +### クリックハウス {#dicts-external_dicts_dict_sources-clickhouse} + +設定例: + +``` xml + + + example01-01-1 + 9000 + default + + default + ids
    + id=10 +
    + +``` + +または + +``` sql +SOURCE(CLICKHOUSE( + host 'example01-01-1' + port 9000 + user 'default' + password '' + db 'default' + table 'ids' + where 'id=10' +)) +``` + +フィールドの設定: + +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [分散](../../../engines/table_engines/special/distributed.md) テーブルと後続の構成でそれを入力します。 +- `port` – The port on the ClickHouse server. +- `user` – Name of the ClickHouse user. +- `password` – Password of the ClickHouse user. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. May be omitted. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [辞書の更新](external_dicts_dict_lifetime.md). + +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} + +設定例: + +``` xml + + + localhost + 27017 + + + test + dictionary_source + + +``` + +または + +``` sql +SOURCE(MONGO( + host 'localhost' + port 27017 + user '' + password '' + db 'test' + collection 'dictionary_source' +)) +``` + +フィールドの設定: + +- `host` – The MongoDB host. +- `port` – The port on the MongoDB server. +- `user` – Name of the MongoDB user. +- `password` – Password of the MongoDB user. +- `db` – Name of the database. +- `collection` – Name of the collection. + +### レディス {#dicts-external_dicts_dict_sources-redis} + +設定例: + +``` xml + + + localhost + 6379 + simple + 0 + + +``` + +または + +``` sql +SOURCE(REDIS( + host 'localhost' + port 6379 + storage_type 'simple' + db_index 0 +)) +``` + +フィールドの設定: + +- `host` – The Redis host. +- `port` – The port on the Redis server. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` 単純なソースとハッシュされた単一のキーソース用です, `hash_map` は用ハッシュソースで二つのキー。 距源およびキャッシュ源の複雑な鍵サポートされていません。 省略することができ、デフォルト値は `simple`. +- `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md deleted file mode 120000 index a6a92b9ab7b..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md new file mode 100644 index 00000000000..07d639043a6 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u8F9E\u66F8\u306E\u30AD\u30FC\u3068\u30D5\u30A3\u30FC\u30EB\u30C9" +--- + +# 辞書のキーとフィールド {#dictionary-key-and-fields} + +その `` 条項の辞書のキーや分野での利用ます。 + +XMLの説明: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +属性は要素に記述されています: + +- `` — [キー列](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [データ列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 複数の属性を指定できます。 + +DDLクエリ: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +属性はクエリ本文に記述されます: + +- `PRIMARY KEY` — [キー列](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [データ列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 複数の属性を指定できます。 + +## キー {#ext_dict_structure-key} + +ClickHouseは次の種類のキーをサポートしています: + +- 数値キー。 `UInt64`. で定義される `` タグまたは使用 `PRIMARY KEY` キーワード。 +- 複合キー。 異なるタイプの値のセット。 タグ内で定義されている `` または `PRIMARY KEY` キーワード。 + +Xmlの構造を含むことができま `` または ``. DDL-クエリにsingleを含める必要があります `PRIMARY KEY`. + +!!! warning "警告" + Keyを属性として記述することはできません。 + +### 数値キー {#ext_dict-numeric-key} + +タイプ: `UInt64`. + +構成例: + +``` xml + + Id + +``` + +設定フィールド: + +- `name` – The name of the column with keys. + +DDLクエリの場合: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### 複合キー {#composite-key} + +キーは次のようになります `tuple` フィールドの任意のタイプから。 その [レイアウト](external_dicts_dict_layout.md) この場合、 `complex_key_hashed` または `complex_key_cache`. + +!!! tip "ヒント" + 複合キーは、単一の要素で構成できます。 これにより、たとえば文字列をキーとして使用することができます。 + +キー構造は要素で設定されます ``. キーフィールドは、ディクショナリと同じ形式で指定します [属性](external_dicts_dict_structure.md). 例えば: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +または + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +クエリの場合 `dictGet*` 関数は、タプルがキーとして渡されます。 例えば: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## 属性 {#ext_dict_structure-attributes} + +構成例: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +または + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +設定フィールド: + +| タグ | 説明 | 必須 | +|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `name` | 列名。 | はい。 | +| `type` | ClickHouseデータタイプ。
    ClickHouseは、dictionaryから指定されたデータ型に値をキャストしようとします。 例えば、MySQLの場合、フィールドは次のようになります `TEXT`, `VARCHAR`、または `BLOB` MySQLソーステーブルでは、次のようにアップロードできます `String` クリックハウスで。
    [Nullable](../../../sql_reference/data_types/nullable.md) サポートされていない。 | はい。 | +| `null_value` | 既存の要素以外の要素のデフォルト値。
    この例では、空の文字列です。 使用できません `NULL` この分野で。 | はい。 | +| `expression` | [式](../../syntax.md#syntax-expressions) そのClickHouseはその値を実行します。
    この式には、リモートsqlデータベースの列名を指定できます。 したがって、これを使用して、リモート列の別名を作成できます。

    デフォルト値:式なし。 | いいえ。 | +| `hierarchical` | もし `true`、属性は、現在のキーの親キーの値が含まれています。 見る [階層辞書](external_dicts_dict_hierarchical.md).

    デフォルト値: `false`. | いいえ。 | +| `injective` | このフラグは、 `id -> attribute` 画像は [射影](https://en.wikipedia.org/wiki/Injective_function).
    もし `true`、ClickHouseはの後に自動的に置くことができます `GROUP BY` 句注入を伴う辞書への要求。 通常、そのような要求の量が大幅に削減されます。

    デフォルト値: `false`. | いいえ。 | +| `is_object_id` | MongoDBドキュメントに対してクエリが実行されるかどうかを示すフラグ `ObjectID`.

    デフォルト値: `false`. | いいえ。 | + +## また見なさい {#see-also} + +- [外部辞書を操作するための関数](../../../sql_reference/functions/ext_dict_functions.md). + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md deleted file mode 120000 index 9f4831d3d39..00000000000 --- a/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql_reference/dictionaries/external_dictionaries/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md b/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..b5c506f5d93 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: External Dictionaries +toc_priority: 37 +--- + + diff --git a/docs/ja/sql_reference/dictionaries/index.md b/docs/ja/sql_reference/dictionaries/index.md deleted file mode 120000 index a697f128478..00000000000 --- a/docs/ja/sql_reference/dictionaries/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/dictionaries/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/index.md b/docs/ja/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..bd08b421555 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Dictionaries +toc_priority: 35 +toc_title: "\u5C0E\u5165" +--- + +# 辞書 {#dictionaries} + +辞書はマッピングです (`key -> attributes`)それはさまざまなタイプの参照リストのために便利です。 + +ClickHouseは、クエリで使用できる辞書を操作するための特別な機能をサポートしています。 Aよりも関数で辞書を使用する方が簡単で効率的です `JOIN` 参照テーブルと。 + +[NULL](../syntax.md#null) 値を辞書に格納することはできません。 + +ClickHouse支援: + +- [内蔵の辞書](internal_dicts.md#internal_dicts) 特定の [関数のセット](../../sql_reference/functions/ym_dict_functions.md). +- [プラグイン(外部)辞書](external_dictionaries/external_dicts.md) と [機能のネット](../../sql_reference/functions/ext_dict_functions.md). + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/ja/sql_reference/dictionaries/internal_dicts.md b/docs/ja/sql_reference/dictionaries/internal_dicts.md deleted file mode 120000 index 741d4dc1f46..00000000000 --- a/docs/ja/sql_reference/dictionaries/internal_dicts.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/dictionaries/internal_dicts.md \ No newline at end of file diff --git a/docs/ja/sql_reference/dictionaries/internal_dicts.md b/docs/ja/sql_reference/dictionaries/internal_dicts.md new file mode 100644 index 00000000000..93e1b990819 --- /dev/null +++ b/docs/ja/sql_reference/dictionaries/internal_dicts.md @@ -0,0 +1,55 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u5185\u90E8\u8F9E\u66F8" +--- + +# 内部辞書 {#internal_dicts} + +ClickHouseには、ジオベースを操作するための組み込み機能が含まれています。 + +ことができ: + +- 地域のidを使用して、目的の言語でその名前を取得します。 +- 地域のidを使用して、都市、地域、連邦区、国、または大陸のidを取得します。 +- 領域が別の領域の一部であるかどうかを確認します。 +- 親領域のチェーンを取得します。 + +すべての機能サポート “translocality,” 同時に地域の所有権に異なる視点を使用する機能。 詳細については、以下を参照してください “Functions for working with Yandex.Metrica dictionaries”. + +内部辞書は、既定のパッケージでは無効になっています。 +よって、strncasecmpのパラメータ `path_to_regions_hierarchy_file` と `path_to_regions_names_files` サーバー設定ファイルで。 + +Geobaseはテキストファイルからロードされます。 + +場所は `regions_hierarchy*.txt` へのファイル `path_to_regions_hierarchy_file` ディレクトリ。 この構成パラ `regions_hierarchy.txt` ファイル(デフォルトの地域階層)、およびその他のファイル (`regions_hierarchy_ua.txt`)同じディレクトリに配置する必要があります。 + +を置く `regions_names_*.txt` のファイル `path_to_regions_names_files` ディレクトリ。 + +を作ることもできますこれらのファイル。 ファイルフォーマットは以下: + +`regions_hierarchy*.txt`:TabSeparated(ヘッダーなし)、列: + +- 地域ID (`UInt32`) +- 親リージョンID (`UInt32`) +- 地域タイプ (`UInt8`):1-大陸,3-国,4-連邦区,5-地域,6-都市;その他のタイプには値がありません +- 人口 (`UInt32`) — optional column + +`regions_names_*.txt`:TabSeparated(ヘッダーなし)、列: + +- 地域ID (`UInt32`) +- 地域名 (`String`) — Can't contain tabs or line feeds, even escaped ones. + +フラット配列は、ramに格納するために使用されます。 このため、idは百万を超えてはいけません。 + +辞書は、サーバーを再起動せずに更新できます。 ただし、使用可能な辞書のセットは更新されません。 +更新の場合、ファイルの修正時刻がチェックされます。 ファイルが変更された場合は、辞書が更新されます。 +変更をチェックする間隔は、 `builtin_dictionaries_reload_interval` パラメータ。 +辞書updates(最初の使用時の読み込み以外)は、クエリをブロックしません。 更新時には、クエリは古いバージョンの辞書を使用します。 更新中にエラーが発生すると、エラーがサーバーログに書き込まれ、古いバージョンの辞書が引き続き使用されます。 + +Geobaseで辞書を定期的に更新することをお勧めします。 更新中に、新しいファイルを生成し、別の場所に書き込みます。 すべての準備ができたら、サーバーが使用するファイルに名前を変更します。 + +また、os識別子とyandexを操作するための機能もあります。metricaの調査エンジン、しかしそれらは使用されるべきではない。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/ja/sql_reference/functions/arithmetic_functions.md b/docs/ja/sql_reference/functions/arithmetic_functions.md deleted file mode 120000 index 0b84812b58f..00000000000 --- a/docs/ja/sql_reference/functions/arithmetic_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/arithmetic_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/arithmetic_functions.md b/docs/ja/sql_reference/functions/arithmetic_functions.md new file mode 100644 index 00000000000..cbcc2d8dca7 --- /dev/null +++ b/docs/ja/sql_reference/functions/arithmetic_functions.md @@ -0,0 +1,87 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: "\u7B97\u6570" +--- + +# 算術関数 {#arithmetic-functions} + +すべての算術関数の場合、結果の型は、そのような型がある場合、結果が収まる最小の数値型として計算されます。 最小値は、ビット数、符号付きかどうか、および浮動小数点かどうかに基づいて同時に取得されます。 十分なビットがない場合、最高のビットタイプが取られます。 + +例えば: + +``` sql +SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 + 0 + 0) +``` + +``` text +┌─toTypeName(0)─┬─toTypeName(plus(0, 0))─┬─toTypeName(plus(plus(0, 0), 0))─┬─toTypeName(plus(plus(plus(0, 0), 0), 0))─┐ +│ UInt8 │ UInt16 │ UInt32 │ UInt64 │ +└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘ +``` + +算術関数は、uint8、uint16、uint32、uint64、int8、int16、int32、int64、float32、またはfloat64の任意のペアの型に対して機能します。 + +オーバーフローはc++と同じ方法で生成されます。 + +## プラス(a、b)、a+b演算子 {#plusa-b-a-b-operator} + +数値の合計を計算します。 +また、日付または日付と時刻を持つ整数を追加することができます。 日付の場合、整数を追加すると、対応する日数を追加することを意味します。 時間のある日付の場合、対応する秒数を加算することを意味します。 + +## マイナス(a,b),a-b演算子 {#minusa-b-a-b-operator} + +差を計算します。 結果は常に署名されます。 + +You can also calculate integer numbers from a date or date with time. The idea is the same – see above for ‘plus’. + +## 乗算(a,b),a\*b演算子 {#multiplya-b-a-b-operator} + +数字の積を計算します。 + +## 除算(a,b),a/b演算子 {#dividea-b-a-b-operator} + +数値の商を計算します。 結果の型は常に浮動小数点型です。 +整数除算ではありません。 整数除算の場合は、以下を使用します ‘intDiv’ 機能。 +ゼロで割ると ‘inf’, ‘-inf’、または ‘nan’. + +## intDiv(a,b) {#intdiva-b} + +数値の商を計算します。 整数に分割し、(絶対値によって)切り捨てます。 +ゼロで除算するとき、または最小の負の数をマイナスの数で除算するときに例外がスローされます。 + +## intDivOrZero(a,b) {#intdivorzeroa-b} + +とは異なります ‘intDiv’ ゼロで除算するとき、または最小の負の数をマイナスの数で除算するときにゼロを返すという点で。 + +## モジュロ(a,b),a%b演算子 {#moduloa-b-a-b-operator} + +除算の後の剰余を計算します。 +引数が浮動小数点数である場合、それらは小数部分を削除することによって整数にあらかじめ変換されます。 +残りはc++と同じ意味で取られます。 負の数には切り捨て除算が使用されます。 +ゼロで除算するとき、または最小の負の数をマイナスの数で除算するときに例外がスローされます。 + +## モジュロオルゼロ(a,b) {#moduloorzeroa-b} + +とは異なります ‘modulo’ 除数がゼロのときにゼロを返すという点です。 + +## 否定(a),-演算子 {#negatea-a-operator} + +逆符号を持つ数値を計算します。 結果は常に署名されます。 + +## abs(a) {#arithm_func-abs} + +数値(a)の絶対値を計算します。 つまり、a\<0の場合、-a返します。 符号付き整数型の場合は、符号なしの数値を返します。 + +## gcd(a,b) {#gcda-b} + +数値の最大公約数を返します。 +ゼロで除算するとき、または最小の負の数をマイナスの数で除算するときに例外がスローされます。 + +## lcm(a,b) {#lcma-b} + +数値の最小公倍数を返します。 +ゼロで除算するとき、または最小の負の数をマイナスの数で除算するときに例外がスローされます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/ja/sql_reference/functions/array_functions.md b/docs/ja/sql_reference/functions/array_functions.md deleted file mode 120000 index ab7508b6cce..00000000000 --- a/docs/ja/sql_reference/functions/array_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/array_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/array_functions.md b/docs/ja/sql_reference/functions/array_functions.md new file mode 100644 index 00000000000..bf3e70eac33 --- /dev/null +++ b/docs/ja/sql_reference/functions/array_functions.md @@ -0,0 +1,1057 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u914D\u5217\u306E\u64CD\u4F5C" +--- + +# 配列を操作するための関数 {#functions-for-working-with-arrays} + +## 空 {#function-empty} + +空の配列の場合は1、空でない配列の場合は0を返します。 +結果の型はuint8です。 +この関数は文字列に対しても機能します。 + +## notEmpty {#function-notempty} + +空の配列の場合は0、空でない配列の場合は1を返します。 +結果の型はuint8です。 +この関数は文字列に対しても機能します。 + +## 長さ {#array_functions-length} + +配列内の項目の数を返します。 +結果の型はuint64です。 +この関数は文字列に対しても機能します。 + +## emptyArrayUInt8,emptyArrayUInt16,emptyArrayUInt32,emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} + +## emptyArrayInt8,emptyArrayInt16,emptyArrayInt32,emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} + +## emptyArrayFloat32,emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} + +## emptyArrayDate,emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} + +## constellation name(optional) {#emptyarraystring} + +ゼロ引数を受け取り、適切な型の空の配列を返します。 + +## emptyArrayToSingle {#emptyarraytosingle} + +空の配列を受け取り、デフォルト値と等しいワンエレメント配列を返します。 + +## 範囲(終了)、範囲(開始、終了\[、ステップ\]) {#rangeend-rangestart-end-step} + +開始から終了までの数字の配列を返します-1ステップごとに。 +これは、 `start` デフォルトは0です。 +これは、 `step` デフォルトは1です。 +それはpythonicのように動作します `range`. しかし、違いは、すべての引数の型が `UInt` ナンバーズ +場合によっては、データブロック内に100,000,000要素を超える長さの配列が作成された場合、例外がスローされます。 + +## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} + +関数の引数から配列を作成します。 +引数は定数でなければならず、最小の共通型を持つ型を持つ必要があります。 なぜなら、それ以外の場合は、どのタイプの配列を作成するかは明確ではないからです。 つまり、この関数を使用して空の配列を作成することはできません(これを行うには、 ‘emptyArray\*’ 上記の関数)。 +を返します。 ‘Array(T)’ タイプの結果、ここで ‘T’ 渡された引数のうち最小の共通型です。 + +## arrayConcat {#arrayconcat} + +引数として渡される配列を結合します。 + +``` sql +arrayConcat(arrays) +``` + +**パラメータ** + +- `arrays` – Arbitrary number of arguments of [配列](../../sql_reference/data_types/array.md) タイプ。 + **例えば** + + + +``` sql +SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,3,4,5,6] │ +└───────────────┘ +``` + +## arrayElement(arr,n),演算子arr\[n\] {#arrayelementarr-n-operator-arrn} + +インデックスを持つ要素を取得する `n` 配列から `arr`. `n` 任意の整数型でなければなりません。 +インデックス配列の開始からです。 +負の索引がサポートされます。 この場合、最後から番号が付けられた対応する要素を選択します。 例えば, `arr[-1]` 配列の最後の項目です。 + +インデックスが配列の境界の外にある場合、いくつかのデフォルト値(数値の場合は0、文字列の場合は空の文字列など)を返します。)、非定数配列と定数インデックス0の場合を除いて(この場合はエラーが発生します `Array indices are 1-based`). + +## has(arr,elem) {#hasarr-elem} + +この ‘arr’ 配列には ‘elem’ 要素。 +要素が配列にない場合は0、ない場合は1を返します。 + +`NULL` 値として処理されます。 + +``` sql +SELECT has([1, 2, NULL], NULL) +``` + +``` text +┌─has([1, 2, NULL], NULL)─┐ +│ 1 │ +└─────────────────────────┘ +``` + +## hasAll {#hasall} + +ある配列が別の配列のサブセットかどうかを調べます。 + +``` sql +hasAll(set, subset) +``` + +**パラメータ** + +- `set` – Array of any type with a set of elements. +- `subset` – Array of any type with elements that should be tested to be a subset of `set`. + +**戻り値** + +- `1`,もし `set` からのすべての要素を含みます `subset`. +- `0` そうでなければ + +**特有の性質** + +- 空の配列は、任意の配列のサブセットです。 +- `Null` 値として処理されます。 +- 両方の配列の値の順序は関係ありません。 + +**例** + +`SELECT hasAll([], [])` 戻り値1. + +`SELECT hasAll([1, Null], [Null])` 戻り値1. + +`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` 戻り値1. + +`SELECT hasAll(['a', 'b'], ['a'])` 戻り値1. + +`SELECT hasAll([1], ['a'])` 0を返します。 + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` 0を返します。 + +## hasAny {#hasany} + +るかどうかを判二つの配列が互いの交差点にある。 + +``` sql +hasAny(array1, array2) +``` + +**パラメータ** + +- `array1` – Array of any type with a set of elements. +- `array2` – Array of any type with a set of elements. + +**戻り値** + +- `1`,もし `array1` と `array2` 少なくとも同様の要素を持っている。 +- `0` そうでなければ + +**特有の性質** + +- `Null` 値として処理されます。 +- 両方の配列の値の順序は関係ありません。 + +**例** + +`SELECT hasAny([1], [])` を返します `0`. + +`SELECT hasAny([Null], [Null, 1])` を返します `1`. + +`SELECT hasAny([-128, 1., 512], [1])` を返します `1`. + +`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` を返します `0`. + +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` を返します `1`. + +## インデクサー(arr,x) {#indexofarr-x} + +最初のインデックスを返します ‘x’ 要素(配列内にある場合は1から開始)、そうでない場合は0。 + +例えば: + +``` sql +SELECT indexOf([1, 3, NULL, NULL], NULL) +``` + +``` text +┌─indexOf([1, 3, NULL, NULL], NULL)─┐ +│ 3 │ +└───────────────────────────────────┘ +``` + +に設定された要素 `NULL` 通常の値として扱われます。 + +## countEqual(arr,x) {#countequalarr-x} + +配列内のxと等しい要素の数を返します。arraycount(elem-\>elem=x,arr)と等価です。 + +`NULL` 要素は個別の値として処理されます。 + +例えば: + +``` sql +SELECT countEqual([1, 2, NULL, NULL], NULL) +``` + +``` text +┌─countEqual([1, 2, NULL, NULL], NULL)─┐ +│ 2 │ +└──────────────────────────────────────┘ +``` + +## arrayEnumerate(arr) {#array_functions-arrayenumerate} + +Returns the array \[1, 2, 3, …, length (arr) \] + +この関数は通常、array joinと共に使用されます。 この計数かけま配列に適用後の配列。 例えば: + +``` sql +SELECT + count() AS Reaches, + countIf(num = 1) AS Hits +FROM test.hits +ARRAY JOIN + GoalsReached, + arrayEnumerate(GoalsReached) AS num +WHERE CounterID = 160656 +LIMIT 10 +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +この例では、reachesは変換の数(配列結合を適用した後に受け取った文字列)であり、hitsはページビューの数(配列結合の前の文字列)です。 この特定のケースでは、同じ結果をより簡単な方法で得ることができます: + +``` sql +SELECT + sum(length(GoalsReached)) AS Reaches, + count() AS Hits +FROM test.hits +WHERE (CounterID = 160656) AND notEmpty(GoalsReached) +``` + +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` + +この関数は、高階関数でも使用できます。 たとえば、これを使用して、条件に一致する要素の配列インデックスを取得できます。 + +## arrayEnumerateUniq(arr, …) {#arrayenumerateuniqarr} + +ソース配列と同じサイズの配列を返し、各要素に対して同じ値を持つ要素間の位置を示します。 +例えば:arrayenumerateuniq(\[10, 20, 10, 30\]) = \[1, 1, 2, 1\]. + +この関数は、配列要素の配列の結合と集約を使用する場合に便利です。 +例えば: + +``` sql +SELECT + Goals.ID AS GoalID, + sum(Sign) AS Reaches, + sumIf(Sign, num = 1) AS Visits +FROM test.visits +ARRAY JOIN + Goals, + arrayEnumerateUniq(Goals.ID) AS num +WHERE CounterID = 160656 +GROUP BY GoalID +ORDER BY Reaches DESC +LIMIT 10 +``` + +``` text +┌──GoalID─┬─Reaches─┬─Visits─┐ +│ 53225 │ 3214 │ 1097 │ +│ 2825062 │ 3188 │ 1097 │ +│ 56600 │ 2803 │ 488 │ +│ 1989037 │ 2401 │ 365 │ +│ 2830064 │ 2396 │ 910 │ +│ 1113562 │ 2372 │ 373 │ +│ 3270895 │ 2262 │ 812 │ +│ 1084657 │ 2262 │ 345 │ +│ 56599 │ 2260 │ 799 │ +│ 3271094 │ 2256 │ 812 │ +└─────────┴─────────┴────────┘ +``` + +この例では、各ゴールidには、コンバージョン数(ゴールネストされたデータ構造の各要素は、達成されたゴールであり、コンバージョンと呼ばれます)とセッション 配列の結合がなければ、セッション数をsum(sign)としてカウントします。 しかし、この特定のケースでは、行はネストされたgoals構造体で乗算されたので、この後に各セッションをカウントするために、arrayenumerateuniq()の値に条件を適用しまgoals.id)関数。 + +ArrayEnumerateUniq関数は、引数と同じサイズの複数の配列を取ることができます。 この場合、すべての配列の同じ位置にある要素のタプルに対して一意性が考慮されます。 + +``` sql +SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res +``` + +``` text +┌─res───────────┐ +│ [1,2,1,1,2,1] │ +└───────────────┘ +``` + +これは、ネストされたデータ構造で配列結合を使用し、この構造内の複数の要素間でさらに集約する場合に必要です。 + +## arrayPopBack {#arraypopback} + +配列から最後の項目を削除します。 + +``` sql +arrayPopBack(array) +``` + +**パラメータ** + +- `array` – Array. + +**例えば** + +``` sql +SELECT arrayPopBack([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## arrayPopFront {#arraypopfront} + +配列から最初の項目を削除します。 + +``` sql +arrayPopFront(array) +``` + +**パラメータ** + +- `array` – Array. + +**例えば** + +``` sql +SELECT arrayPopFront([1, 2, 3]) AS res +``` + +``` text +┌─res───┐ +│ [2,3] │ +└───────┘ +``` + +## arrayPushBack {#arraypushback} + +配列の末尾に一つの項目を追加します。 + +``` sql +arrayPushBack(array, single_value) +``` + +**パラメータ** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` 配列のデータ型の型。 ClickHouseのデータ型の詳細については、以下を参照してください “[データ型](../../sql_reference/data_types/index.md#data_types)”. できる。 `NULL`. この関数は、 `NULL` 配列への要素、および配列要素の型に変換します `Nullable`. + +**例えば** + +``` sql +SELECT arrayPushBack(['a'], 'b') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## arrayPushFront {#arraypushfront} + +配列の先頭に一つの要素を追加します。 + +``` sql +arrayPushFront(array, single_value) +``` + +**パラメータ** + +- `array` – Array. +- `single_value` – A single value. Only numbers can be added to an array with numbers, and only strings can be added to an array of strings. When adding numbers, ClickHouse automatically sets the `single_value` 配列のデータ型の型。 ClickHouseのデータ型の詳細については、以下を参照してください “[データ型](../../sql_reference/data_types/index.md#data_types)”. できる。 `NULL`. この関数は、 `NULL` 配列への要素、および配列要素の型に変換します `Nullable`. + +**例えば** + +``` sql +SELECT arrayPushFront(['b'], 'a') AS res +``` + +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` + +## arrayResize {#arrayresize} + +配列の長さを変更します。 + +``` sql +arrayResize(array, size[, extender]) +``` + +**パラメータ:** + +- `array` — Array. +- `size` — Required length of the array. + - もし `size` 配列の元のサイズより小さい場合、配列は右から切り捨てられます。 +- もし `size` は配列の初期サイズより大きく、配列は次のように右に拡張されます `extender` 配列項目のデータ型の値または既定値。 +- `extender` — Value for extending an array. Can be `NULL`. + +**戻り値:** + +長さの配列 `size`. + +**通話の例** + +``` sql +SELECT arrayResize([1], 3) +``` + +``` text +┌─arrayResize([1], 3)─┐ +│ [1,0,0] │ +└─────────────────────┘ +``` + +``` sql +SELECT arrayResize([1], 3, NULL) +``` + +``` text +┌─arrayResize([1], 3, NULL)─┐ +│ [1,NULL,NULL] │ +└───────────────────────────┘ +``` + +## arraySlice {#arrayslice} + +配列のスライスを返します。 + +``` sql +arraySlice(array, offset[, length]) +``` + +**パラメータ** + +- `array` – Array of data. +- `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. +- `length` -必要なスライスの長さ。 負の値を指定すると、関数は開いているスライスを返します `[offset, array_length - length)`. 値を省略すると、関数はスライスを返します `[offset, the_end_of_array]`. + +**例えば** + +``` sql +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +``` + +``` text +┌─res────────┐ +│ [2,NULL,4] │ +└────────────┘ +``` + +に設定された配列要素 `NULL` 通常の値として扱われます。 + +## arraySort(\[func,\] arr, …) {#array_functions-sort} + +の要素をソートします `arr` 昇順の配列。 この `func` の結果によって決定される。 `func` 関数は、配列の要素に適用されます。 もし `func` 複数の引数を受け取る。 `arraySort` 関数はいくつかの配列を渡されます。 `func` に対応します。 詳しい例は終わりにの示されています `arraySort` 説明。 + +整数値のソート例: + +``` sql +SELECT arraySort([1, 3, 3, 0]); +``` + +``` text +┌─arraySort([1, 3, 3, 0])─┐ +│ [0,1,3,3] │ +└─────────────────────────┘ +``` + +文字列値のソートの例: + +``` sql +SELECT arraySort(['hello', 'world', '!']); +``` + +``` text +┌─arraySort(['hello', 'world', '!'])─┐ +│ ['!','hello','world'] │ +└────────────────────────────────────┘ +``` + +次の並べ替え順序を考えてみましょう。 `NULL`, `NaN` と `Inf` 値: + +``` sql +SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); +``` + +``` text +┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ +│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────────────────────────┘ +``` + +- `-Inf` 値は配列の最初のものです。 +- `NULL` 値は配列の最後です。 +- `NaN` 値は直前です `NULL`. +- `Inf` 値は直前です `NaN`. + +それに注意 `arraySort` は [高階関数](higher_order_functions.md). 最初の引数としてラムダ関数を渡すことができます。 この場合、並べ替え順序は、配列の要素に適用されるlambda関数の結果によって決まります。 + +次の例を考えてみましょう: + +``` sql +SELECT arraySort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,2,1] │ +└─────────┘ +``` + +For each element of the source array, the lambda function returns the sorting key, that is, \[1 –\> -1, 2 –\> -2, 3 –\> -3\]. Since the `arraySort` 関数はキーを昇順にソートし、結果は\[3,2,1\]になります。 このように、 `(x) –> -x` ラムダ関数は、 [降順](#array_functions-reverse-sort) ソートで。 + +Lambda関数は複数の引数を受け取ることができます。 この場合、次のものを渡す必要があります `arraySort` 関数lambda関数の引数が対応する同じ長さのいくつかの配列。 結果の配列は最初の入力配列の要素で構成され、次の入力配列の要素はソートキーを指定します。 例えば: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +ここでは、第二の配列(\[2、1\])に渡される要素は、ソース配列から対応する要素のソートキーを定義します (\[‘hello’, ‘world’\])、それは, \[‘hello’ –\> 2, ‘world’ –\> 1\]. Since the lambda function doesn't use `x` ソース配列の実際の値は、結果の順序には影響しません。 だから, ‘hello’ 結果の二番目の要素になります。 ‘world’ 最初になります。 + +その他の例を以下に示す。 + +``` sql +SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +``` sql +SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [2,1,0] │ +└─────────┘ +``` + +!!! note "メモ" + 効率を分類することを改善するため [シュワルツ語変換](https://en.wikipedia.org/wiki/Schwartzian_transform) 使用される。 + +## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} + +の要素をソートします `arr` 降順での配列。 この `func` 機能は指定されます, `arr` の結果に従ってソートされます。 `func` 関数は、配列の要素に適用され、その後、ソートされた配列が反転されます。 もし `func` 複数の引数を受け取る。 `arrayReverseSort` 関数はいくつかの配列を渡されます。 `func` に対応します。 詳しい例は終わりにの示されています `arrayReverseSort` 説明。 + +整数値のソート例: + +``` sql +SELECT arrayReverseSort([1, 3, 3, 0]); +``` + +``` text +┌─arrayReverseSort([1, 3, 3, 0])─┐ +│ [3,3,1,0] │ +└────────────────────────────────┘ +``` + +文字列値のソートの例: + +``` sql +SELECT arrayReverseSort(['hello', 'world', '!']); +``` + +``` text +┌─arrayReverseSort(['hello', 'world', '!'])─┐ +│ ['world','hello','!'] │ +└───────────────────────────────────────────┘ +``` + +次の並べ替え順序を考えてみましょう。 `NULL`, `NaN` と `Inf` 値: + +``` sql +SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; +``` + +``` text +┌─res───────────────────────────────────┐ +│ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────┘ +``` + +- `Inf` 値は配列の最初のものです。 +- `NULL` 値は配列の最後です。 +- `NaN` 値は直前です `NULL`. +- `-Inf` 値は直前です `NaN`. + +それに注意しなさい `arrayReverseSort` は [高階関数](higher_order_functions.md). 最初の引数としてラムダ関数を渡すことができます。 例を以下に示す。 + +``` sql +SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [1,2,3] │ +└─────────┘ +``` + +配列は次の方法でソートされます: + +1. 最初に、ソース配列(\[1、2、3\])は、配列の要素に適用されたラムダ関数の結果に従ってソートされます。 結果は配列\[3,2,1\]です。 +2. 前のステップで取得された配列は、逆になります。 したがって、最終的な結果は\[1,2,3\]です。 + +Lambda関数は複数の引数を受け取ることができます。 この場合、次のものを渡す必要があります `arrayReverseSort` 関数lambda関数の引数が対応する同じ長さのいくつかの配列。 結果の配列は最初の入力配列の要素で構成され、次の入力配列の要素はソートキーを指定します。 例えば: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +この例では、配列は次のようにソートされています: + +1. 最初に、ソース配列 (\[‘hello’, ‘world’\])は、配列の要素に適用されたラムダ関数の結果に従ってソートされます。 第二の配列(\[2、1\])に渡される要素は、ソース配列から対応する要素のソートキーを定義します。 結果は配列です \[‘world’, ‘hello’\]. +2. 前のステップでソートされた配列は、逆になります。 したがって、最終的な結果は \[‘hello’, ‘world’\]. + +その他の例を以下に示す。 + +``` sql +SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; +``` + +``` text +┌─res─────┐ +│ [5,3,4] │ +└─────────┘ +``` + +``` sql +SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; +``` + +``` text +┌─res─────┐ +│ [4,3,5] │ +└─────────┘ +``` + +## arrayUniq(arr, …) {#arrayuniqarr} + +一つの引数が渡された場合、それは、配列内の異なる要素の数をカウントします。 +複数の引数が渡されると、複数の配列内の対応する位置にある要素の異なるタプルの数がカウントされます。 + +配列内の一意の項目のリストを取得する場合は、arrayreduceを使用できます(‘groupUniqArray’、arr)。 + +## arrayJoin(arr) {#array-functions-join} + +特殊関数。 セクションを見る [“ArrayJoin function”](array_join.md#functions_arrayjoin). + +## arrayDifference {#arraydifference} + +隣接する配列要素間の差を計算します。 最初の要素が0になる配列を返します。 `a[1] - a[0]`, etc. The type of elements in the resulting array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`). + +**構文** + +``` sql +arrayDifference(array) +``` + +**パラメータ** + +- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/). + +**戻り値** + +隣接する要素間の差異の配列を返します。 + +タイプ: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [フロート\*](https://clickhouse.yandex/docs/en/data_types/float/). + +**例えば** + +クエリ: + +``` sql +SELECT arrayDifference([1, 2, 3, 4]) +``` + +結果: + +``` text +┌─arrayDifference([1, 2, 3, 4])─┐ +│ [0,1,1,1] │ +└───────────────────────────────┘ +``` + +結果の型int64によるオーバーフローの例: + +クエリ: + +``` sql +SELECT arrayDifference([0, 10000000000000000000]) +``` + +結果: + +``` text +┌─arrayDifference([0, 10000000000000000000])─┐ +│ [0,-8446744073709551616] │ +└────────────────────────────────────────────┘ +``` + +## arrayDistinct {#arraydistinct} + +配列をとり、distinct要素のみを含む配列を返します。 + +**構文** + +``` sql +arrayDistinct(array) +``` + +**パラメータ** + +- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/). + +**戻り値** + +Distinct要素を含む配列を返します。 + +**例えば** + +クエリ: + +``` sql +SELECT arrayDistinct([1, 2, 2, 3, 1]) +``` + +結果: + +``` text +┌─arrayDistinct([1, 2, 2, 3, 1])─┐ +│ [1,2,3] │ +└────────────────────────────────┘ +``` + +## arrayEnumerateDense(arr) {#array_functions-arrayenumeratedense} + +ソース配列と同じサイズの配列を返し、各要素がソース配列のどこに最初に現れるかを示します。 + +例えば: + +``` sql +SELECT arrayEnumerateDense([10, 20, 10, 30]) +``` + +``` text +┌─arrayEnumerateDense([10, 20, 10, 30])─┐ +│ [1,2,1,3] │ +└───────────────────────────────────────┘ +``` + +## arrayIntersect(arr) {#array-functions-arrayintersect} + +複数の配列を取り、すべてのソース配列に存在する要素を持つ配列を返します。 結果の配列内の要素の順序は、最初の配列と同じです。 + +例えば: + +``` sql +SELECT + arrayIntersect([1, 2], [1, 3], [2, 3]) AS no_intersect, + arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect +``` + +``` text +┌─no_intersect─┬─intersect─┐ +│ [] │ [1] │ +└──────────────┴───────────┘ +``` + +## arrayReduce {#arrayreduce} + +集計関数を配列要素に適用し、その結果を返します。 集約関数の名前は、一重引quotesで文字列として渡されます `'max'`, `'sum'`. パラメトリック集約関数を使用する場合、パラメータは関数名の後に括弧で囲んで示されます `'uniqUpTo(6)'`. + +**構文** + +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` + +**パラメータ** + +- `agg_func` — The name of an aggregate function which should be a constant [文字列](../../sql_reference/data_types/string.md). +- `arr` — Any number of [配列](../../sql_reference/data_types/array.md) 集計関数のパラメーターとして列を入力します。 + +**戻り値** + +**例えば** + +``` sql +SELECT arrayReduce('max', [1, 2, 3]) +``` + +``` text +┌─arrayReduce('max', [1, 2, 3])─┐ +│ 3 │ +└───────────────────────────────┘ +``` + +集計関数が複数の引数を取る場合、この関数は同じサイズの複数の配列に適用する必要があります。 + +``` sql +SELECT arrayReduce('maxIf', [3, 5], [1, 0]) +``` + +``` text +┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ +│ 3 │ +└──────────────────────────────────────┘ +``` + +パラメトリック集計関数の使用例: + +``` sql +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +``` + +``` text +┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ +│ 4 │ +└─────────────────────────────────────────────────────────────┘ +``` + +## arrayreduceinrangesname {#arrayreduceinranges} + +指定された範囲の配列要素に集計関数を適用し、各範囲に対応する結果を含む配列を返します。 この関数は、同じ結果を複数として返します `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)`. + +**構文** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**パラメータ** + +- `agg_func` — The name of an aggregate function which should be a constant [文字列](../../sql_reference/data_types/string.md). +- `ranges` — The ranges to aggretate which should be an [配列](../../sql_reference/data_types/array.md) の [タプル](../../sql_reference/data_types/tuple.md) 各範囲のインデックスと長さを含む。 +- `arr` — Any number of [配列](../../sql_reference/data_types/array.md) 集計関数のパラメーターとして列を入力します。 + +**戻り値** + +**例えば** + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## arrayReverse(arr) {#arrayreverse} + +要素を含む元の配列と同じサイズの配列を逆の順序で返します。 + +例えば: + +``` sql +SELECT arrayReverse([1, 2, 3]) +``` + +``` text +┌─arrayReverse([1, 2, 3])─┐ +│ [3,2,1] │ +└─────────────────────────┘ +``` + +## リバース(arr) {#array-functions-reverse} + +の同義語 [“arrayReverse”](#array_functions-arrayreverse) + +## arrayFlatten {#arrayflatten} + +配列の配列をフラット配列に変換します。 + +機能: + +- ネストされた配列の任意の深さに適用されます。 +- 既にフラットな配列は変更されません。 + +の平坦化された配列を含むすべての要素をすべてソース配列. + +**構文** + +``` sql +flatten(array_of_arrays) +``` + +エイリアス: `flatten`. + +**パラメータ** + +- `array_of_arrays` — [配列](../../sql_reference/data_types/array.md) 配列の。 例えば, `[[1,2,3], [4,5]]`. + +**例** + +``` sql +SELECT flatten([[[1]], [[2], [3]]]) +``` + +``` text +┌─flatten(array(array([1]), array([2], [3])))─┐ +│ [1,2,3] │ +└─────────────────────────────────────────────┘ +``` + +## arrayCompact {#arraycompact} + +配列から連続した重複する要素を削除します。 結果値の順序は、ソース配列の順序によって決まります。 + +**構文** + +``` sql +arrayCompact(arr) +``` + +**パラメータ** + +`arr` — The [配列](../../sql_reference/data_types/array.md) 検査する。 + +**戻り値** + +重複のない配列。 + +タイプ: `Array`. + +**例えば** + +クエリ: + +``` sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +``` + +結果: + +``` text +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ +``` + +## arrayZip {#arrayzip} + +Combine multiple Array type columns into one Array\[Tuple(…)\] column + +**構文** + +``` sql +arrayZip(arr1, arr2, ..., arrN) +``` + +**パラメータ** + +`arr` — Any number of [配列](../../sql_reference/data_types/array.md) 結合する列を入力します。 + +**戻り値** + +The result of Array\[Tuple(…)\] type after the combination of these arrays + +**例えば** + +クエリ: + +``` sql +SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f']); +``` + +結果: + +``` text +┌─arrayZip(['a', 'b', 'c'], ['d', 'e', 'f'])─┐ +│ [('a','d'),('b','e'),('c','f')] │ +└────────────────────────────────────────────┘ +``` + +## arrayAUC {#arrayauc} + +計算auc(機械学習の概念である曲線の下の面積は、詳細を参照してください:https://en.wikipedia.org/wiki/receiver\_operating\_characteristic\#area\_under\_the\_curve). + +**構文** + +``` sql +arrayAUC(arr_scores, arr_labels) +``` + +**パラメータ** +- `arr_scores` — scores prediction model gives. +- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. + +**戻り値** +Float64型のAUC値を返します。 + +**例えば** +クエリ: + +``` sql +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +``` + +結果: + +``` text +┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ +│ 0.75 │ +└────────────────────────────────────────---──┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/ja/sql_reference/functions/array_join.md b/docs/ja/sql_reference/functions/array_join.md deleted file mode 120000 index 9fb5dad2d17..00000000000 --- a/docs/ja/sql_reference/functions/array_join.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/array_join.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/array_join.md b/docs/ja/sql_reference/functions/array_join.md new file mode 100644 index 00000000000..4cbc0690b8b --- /dev/null +++ b/docs/ja/sql_reference/functions/array_join.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 61 +toc_title: arrayJoin +--- + +# arrayJoin関数 {#functions_arrayjoin} + +これは非常に珍しい機能です。 + +通常の関数は行のセットを変更するのではなく、各行(map)の値を変更するだけです。 +集計関数は、行のセットを圧縮します(折り畳みまたは縮小)。 +その ‘arrayJoin’ 関数は、各行を受け取り、行のセット(展開)を生成します。 + +この関数は、引数として配列を受け取り、配列内の要素の数のために複数の行にソース行を伝播します。 +列内のすべての値は、この関数が適用される列内の値を除いて単純にコピーされます。 + +クエリは複数を使用できます `arrayJoin` 機能。 この場合、変換は複数回実行されます。 + +メモselectクエリの配列結合の構文は、より広範な可能性を提供します。 + +例えば: + +``` sql +SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src +``` + +``` text +┌─dst─┬─\'Hello\'─┬─src─────┐ +│ 1 │ Hello │ [1,2,3] │ +│ 2 │ Hello │ [1,2,3] │ +│ 3 │ Hello │ [1,2,3] │ +└─────┴───────────┴─────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/ja/sql_reference/functions/bit_functions.md b/docs/ja/sql_reference/functions/bit_functions.md deleted file mode 120000 index 53d37658c03..00000000000 --- a/docs/ja/sql_reference/functions/bit_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/bit_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/bit_functions.md b/docs/ja/sql_reference/functions/bit_functions.md new file mode 100644 index 00000000000..d5d8c0ca769 --- /dev/null +++ b/docs/ja/sql_reference/functions/bit_functions.md @@ -0,0 +1,255 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 48 +toc_title: "\u30D3\u30C3\u30C8" +--- + +# ビット機能 {#bit-functions} + +ビット関数は、uint8、uint16、uint32、uint64、int8、int16、int32、int64、float32、またはfloat64のいずれかの種類のペアで機能します。 + +結果の型は、その引数の最大ビットに等しいビットを持つ整数です。 引数のうち少なくとも一方が署名されている場合、結果は署名された番号になります。 引数が浮動小数点数の場合、int64にキャストされます。 + +## bitAnd(a,b) {#bitanda-b} + +## bitOr(a,b) {#bitora-b} + +## bitXor(a,b) {#bitxora-b} + +## bitNot(a) {#bitnota} + +## ビットシフトレフト(a,b) {#bitshiftlefta-b} + +## ビットシフトライト(a,b) {#bitshiftrighta-b} + +## bitRotateLeft(a,b) {#bitrotatelefta-b} + +## bitRotateRight(a,b) {#bitrotaterighta-b} + +## 適者生存 {#bittest} + +任意の整数を受け取り、それを [バイナリ形式](https://en.wikipedia.org/wiki/Binary_number)、指定された位置にあるビットの値を返します。 カウントダウンは右から左に0から始まります。 + +**構文** + +``` sql +SELECT bitTest(number, index) +``` + +**パラメータ** + +- `number` – integer number. +- `index` – position of bit. + +**戻り値** + +指定された位置にあるbitの値を返します。 + +タイプ: `UInt8`. + +**例えば** + +たとえば、ベース43(バイナリ)数値システムの数値は101011です。 + +クエリ: + +``` sql +SELECT bitTest(43, 1) +``` + +結果: + +``` text +┌─bitTest(43, 1)─┐ +│ 1 │ +└────────────────┘ +``` + +別の例: + +クエリ: + +``` sql +SELECT bitTest(43, 2) +``` + +結果: + +``` text +┌─bitTest(43, 2)─┐ +│ 0 │ +└────────────────┘ +``` + +## bitTestAll {#bittestall} + +の結果を返します [論理結合](https://en.wikipedia.org/wiki/Logical_conjunction) 与えられた位置にあるすべてのビットの(and演算子)。 カウントダウンは右から左に0から始まります。 + +ビット演算のためのconjuction: + +0 AND 0 = 0 + +0 AND 1 = 0 + +1 AND 0 = 0 + +1 AND 1 = 1 + +**構文** + +``` sql +SELECT bitTestAll(number, index1, index2, index3, index4, ...) +``` + +**パラメータ** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4` すべてのポジションがtrueの場合にのみtrue (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). + +**戻り値** + +論理conjuctionの結果を返します。 + +タイプ: `UInt8`. + +**例えば** + +たとえば、ベース43(バイナリ)数値システムの数値は101011です。 + +クエリ: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5) +``` + +結果: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5)─┐ +│ 1 │ +└────────────────────────────┘ +``` + +別の例: + +クエリ: + +``` sql +SELECT bitTestAll(43, 0, 1, 3, 5, 2) +``` + +結果: + +``` text +┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ +│ 0 │ +└───────────────────────────────┘ +``` + +## bitTestAny {#bittestany} + +の結果を返します [論理和](https://en.wikipedia.org/wiki/Logical_disjunction) 与えられた位置にあるすべてのビットの(または演算子)。 カウントダウンは右から左に0から始まります。 + +ビットごとの操作のための分離: + +0 OR 0 = 0 + +0 OR 1 = 1 + +1 OR 0 = 1 + +1 OR 1 = 1 + +**構文** + +``` sql +SELECT bitTestAny(number, index1, index2, index3, index4, ...) +``` + +**パラメータ** + +- `number` – integer number. +- `index1`, `index2`, `index3`, `index4` – positions of bit. + +**戻り値** + +論理disjuctionの結果を返します。 + +タイプ: `UInt8`. + +**例えば** + +たとえば、ベース43(バイナリ)数値システムの数値は101011です。 + +クエリ: + +``` sql +SELECT bitTestAny(43, 0, 2) +``` + +結果: + +``` text +┌─bitTestAny(43, 0, 2)─┐ +│ 1 │ +└──────────────────────┘ +``` + +別の例: + +クエリ: + +``` sql +SELECT bitTestAny(43, 4, 2) +``` + +結果: + +``` text +┌─bitTestAny(43, 4, 2)─┐ +│ 0 │ +└──────────────────────┘ +``` + +## ビット数 {#bitcount} + +数値のバイナリ表現で設定されたビット数を計算します。 + +**構文** + +``` sql +bitCount(x) +``` + +**パラメータ** + +- `x` — [整数](../../sql_reference/data_types/int_uint.md) または [浮動小数点](../../sql_reference/data_types/float.md) 番号 この関数は、メモリ内の値表現を使用します。 浮動小数点数をサポートすることができます。 + +**戻り値** + +- 入力番号内のビット数を一つに設定します。 + +この関数は入力値を大きな型に変換しません ([記号の拡張子](https://en.wikipedia.org/wiki/Sign_extension)). 例えば, `bitCount(toUInt8(-1)) = 8`. + +タイプ: `UInt8`. + +**例えば** + +例えば、数333を取ります。 そのバイナリ表現:00000000101001101。 + +クエリ: + +``` sql +SELECT bitCount(333) +``` + +結果: + +``` text +┌─bitCount(333)─┐ +│ 5 │ +└───────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/ja/sql_reference/functions/bitmap_functions.md b/docs/ja/sql_reference/functions/bitmap_functions.md deleted file mode 120000 index 432951da5ed..00000000000 --- a/docs/ja/sql_reference/functions/bitmap_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/bitmap_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/bitmap_functions.md b/docs/ja/sql_reference/functions/bitmap_functions.md new file mode 100644 index 00000000000..ddcef7b2093 --- /dev/null +++ b/docs/ja/sql_reference/functions/bitmap_functions.md @@ -0,0 +1,496 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 49 +toc_title: Bitmap +--- + +# ビットマップ関数 {#bitmap-functions} + +ビットマップ関数は、二つのビットマップオブジェクトの値の計算のために働く、そのような、および、または、xor、およびない、などの式の計算を使用し + +ビットマップオブジェクトの構築方法には2種類あります。 一つは-stateを持つ集約関数groupbitmapによって構築されることであり、もう一つは配列オブジェクトによって構築されることである。 また、bitmapオブジェクトをarrayオブジェクトに変換します。 + +RoaringBitmapは、ビットマップオブジェクトの実際の格納中にデータ構造にラップされます。 基数が32以下の場合、Set objetが使用されます。 カーディナリティが32より大きい場合、Rooaringbitmapオブジェクトが使用されます。 そのため、低カーディナリティセットの保存が高速になります。 + +RoaringBitmapの詳細については、以下を参照してください: [鳴き声](https://github.com/RoaringBitmap/CRoaring). + +## bitmapBuild {#bitmap_functions-bitmapbuild} + +符号なし整数配列からビットマップを作成します。 + +``` sql +bitmapBuild(array) +``` + +**パラメータ** + +- `array` – unsigned integer array. + +**例えば** + +``` sql +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) +``` + +``` text +┌─res─┬─toTypeName(bitmapBuild([1, 2, 3, 4, 5]))─────┐ +│  │ AggregateFunction(groupBitmap, UInt8) │ +└─────┴──────────────────────────────────────────────┘ +``` + +## bitmapToArray {#bitmaptoarray} + +ビットマップを整数配列に変換します。 + +``` sql +bitmapToArray(bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## bitmapSubsetInRange {#bitmap-functions-bitmapsubsetinrange} + +指定された範囲のサブセットを返します(range\_endは含みません)。 + +``` sql +bitmapSubsetInRange(bitmap, range_start, range_end) +``` + +**パラメータ** + +- `bitmap` – [ビットマップ](#bitmap_functions-bitmapbuild). +- `range_start` – range start point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +``` text +┌─res───────────────┐ +│ [30,31,32,33,100] │ +└───────────────────┘ +``` + +## bitmapSubsetLimit {#bitmapsubsetlimit} + +ビットマップのサブセットを作成します。 `range_start` と `cardinality_limit`. + +**構文** + +``` sql +bitmapSubsetLimit(bitmap, range_start, cardinality_limit) +``` + +**パラメータ** + +- `bitmap` – [ビットマップ](#bitmap_functions-bitmapbuild). +- `range_start` – The subset starting point. Type: [UInt32](../../sql_reference/data_types/int_uint.md). +- `cardinality_limit` – The subset cardinality upper limit. Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +サブセット。 + +タイプ: `Bitmap object`. + +**例えば** + +クエリ: + +``` sql +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +結果: + +``` text +┌─res───────────────────────┐ +│ [30,31,32,33,100,200,500] │ +└───────────────────────────┘ +``` + +## bitmapContains {#bitmap_functions-bitmapcontains} + +かどうかをチェックしますビットマップを含む要素になります。 + +``` sql +bitmapContains(haystack, needle) +``` + +**パラメータ** + +- `haystack` – [ビットマップ](#bitmap_functions-bitmapbuild)、関数が検索する場所。 +- `needle` – Value that the function searches. Type: [UInt32](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- 0 — If `haystack` 含まない `needle`. +- 1 — If `haystack` 含む `needle`. + +タイプ: `UInt8`. + +**例えば** + +``` sql +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapHasAny {#bitmaphasany} + +るかどうかを判二つのビットマップしていることで交差点にある。 + +``` sql +bitmapHasAny(bitmap1, bitmap2) +``` + +あなたが確信している場合 `bitmap2` 一つの要素が含まれています。 [bitmapContains](#bitmap_functions-bitmapcontains) 機能。 これは、より効率的に動作します。 + +**パラメータ** + +- `bitmap*` – bitmap object. + +**戻り値** + +- `1`,もし `bitmap1` と `bitmap2` 少なくとも同様の要素を持っている。 +- `0` そうでなければ + +**例えば** + +``` sql +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## bitmapHasAll {#bitmaphasall} + +に類似した `hasAll(array, array)` 最初のビットマップに1番目のビットマップのすべての要素が含まれる場合は0を返します。 +二番目の引数が空のビットマップの場合、1を返します。 + +``` sql +bitmapHasAll(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +``` + +``` text +┌─res─┐ +│ 0 │ +└─────┘ +``` + +## bitmapCardinality {#bitmapcardinality} + +UInt64型のビットマップのカーディナリティを再度実行可能。 + +``` sql +bitmapCardinality(bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## bitmapmincomment {#bitmapmin} + +セット内のタイプuint64の最小値を再度取り消し、セットが空の場合はuint32\_max。 + + bitmapMin(bitmap) + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 1 │ + └─────┘ + +## bitmapMax {#bitmapmax} + +セット内のuint64型の最大値を取り消し、セットが空の場合は0になります。 + + bitmapMax(bitmap) + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res +``` + + ┌─res─┐ + │ 5 │ + └─────┘ + +## bitmapTransform {#bitmaptransform} + +ビットマップ内の値の配列を別の値の配列に変換すると、結果は新しいビットマップになります。 + + bitmapTransform(bitmap, from_array, to_array) + +**パラメータ** + +- `bitmap` – bitmap object. +- `from_array` – UInt32 array. For idx in range \[0, from\_array.size()), if bitmap contains from\_array\[idx\], then replace it with to\_array\[idx\]. Note that the result depends on array ordering if there are common elements between from\_array and to\_array. +- `to_array` – UInt32 array, its size shall be the same to from\_array. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res +``` + + ┌─res───────────────────┐ + │ [1,3,4,6,7,8,9,10,20] │ + └───────────────────────┘ + +## bitmapAnd {#bitmapand} + +二つのビットマップと計算、結果は新しいビットマップです。 + +``` sql +bitmapAnd(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─┐ +│ [3] │ +└─────┘ +``` + +## bitmapOr {#bitmapor} + +二つのビットマップや計算、結果は新しいビットマップです。 + +``` sql +bitmapOr(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res─────────┐ +│ [1,2,3,4,5] │ +└─────────────┘ +``` + +## bitmapXor {#bitmapxor} + +二つのビットマップxor計算、結果は新しいビットマップです。 + +``` sql +bitmapXor(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,4,5] │ +└───────────┘ +``` + +## bitmapAndnot {#bitmapandnot} + +二つのビットマップと計算ではなく、結果は新しいビットマップです。 + +``` sql +bitmapAndnot(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +``` + +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` + +## bitmapAndCardinality {#bitmapandcardinality} + +二つのビットマップと計算、型uint64の戻り値のカーディナリティ。 + +``` sql +bitmapAndCardinality(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## ビットmapcardinality {#bitmaporcardinality} + +二つのビットマップまたは計算、型uint64の戻り値のカーディナリティ。 + +``` sql +bitmapOrCardinality(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +## bitmapXorCardinality {#bitmapxorcardinality} + +二つのビットマップxor計算、型uint64の戻り値のカーディナリティ。 + +``` sql +bitmapXorCardinality(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 4 │ +└─────┘ +``` + +## bitmapAndnotCardinality {#bitmapandnotcardinality} + +二つのビットマップと計算ではなく、型uint64のカーディナリティを返します。 + +``` sql +bitmapAndnotCardinality(bitmap,bitmap) +``` + +**パラメータ** + +- `bitmap` – bitmap object. + +**例えば** + +``` sql +SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; +``` + +``` text +┌─res─┐ +│ 2 │ +└─────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/ja/sql_reference/functions/comparison_functions.md b/docs/ja/sql_reference/functions/comparison_functions.md deleted file mode 120000 index d51247ae39c..00000000000 --- a/docs/ja/sql_reference/functions/comparison_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/comparison_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/comparison_functions.md b/docs/ja/sql_reference/functions/comparison_functions.md new file mode 100644 index 00000000000..e1b7364bda6 --- /dev/null +++ b/docs/ja/sql_reference/functions/comparison_functions.md @@ -0,0 +1,37 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: "\u6BD4\u8F03" +--- + +# 比較関数 {#comparison-functions} + +比較関数は常に0または1(uint8)を返します。 + +次のタイプを比較できます: + +- 数字 +- 文字列と固定文字列 +- 日付 +- 時間のある日付 + +各グループ内ではなく、異なるグループ間。 + +たとえば、日付と文字列を比較することはできません。 文字列を日付に変換するには関数を使用する必要があります。 + +文字列はバイトで比較されます。 短い文字列は、それで始まり、少なくとも一つ以上の文字を含むすべての文字列よりも小さくなります。 + +## 等号、a=bおよびa===b演算子 {#function-equals} + +## notEquals,a! 演算子=bとa\<\>b {#function-notequals} + +## 演算子 {#function-less} + +## より大きい、\>演算子 {#function-greater} + +## リース、\<=演算子 {#function-lessorequals} + +## greaterOrEquals,\>=演算子 {#function-greaterorequals} + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/ja/sql_reference/functions/conditional_functions.md b/docs/ja/sql_reference/functions/conditional_functions.md deleted file mode 120000 index 18090abf24d..00000000000 --- a/docs/ja/sql_reference/functions/conditional_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/conditional_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/conditional_functions.md b/docs/ja/sql_reference/functions/conditional_functions.md new file mode 100644 index 00000000000..4bb83bd44f7 --- /dev/null +++ b/docs/ja/sql_reference/functions/conditional_functions.md @@ -0,0 +1,207 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: "\u6761\u4EF6\u4ED8\u304D " +--- + +# 条件関数 {#conditional-functions} + +## もし {#if} + +条件分岐を制御します。 と異なりほとんどのシステムclickhouse常に評価さの両方表現 `then` と `else`. + +**構文** + +``` sql +SELECT if(cond, then, else) +``` + +条件の場合 `cond` ゼロ以外の値として評価し、式の結果を返します `then`、および式の結果 `else`、存在する場合は、スキップされます。 この `cond` ゼロまたは `NULL` その後の結果 `then` 式はスキップされる。 `else` 式が存在する場合は、その式が返されます。 + +**パラメータ** + +- `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. +- `then` -条件が満たされた場合に返される式。 +- `else` -条件が満たされていない場合に返される式。 + +**戻り値** + +関数が実行されます `then` と `else` 式とその結果を返します。 `cond` ゼロかどうかに終わった。 + +**例えば** + +クエリ: + +``` sql +SELECT if(1, plus(2, 2), plus(2, 6)) +``` + +結果: + +``` text +┌─plus(2, 2)─┐ +│ 4 │ +└────────────┘ +``` + +クエリ: + +``` sql +SELECT if(0, plus(2, 2), plus(2, 6)) +``` + +結果: + +``` text +┌─plus(2, 6)─┐ +│ 8 │ +└────────────┘ +``` + +- `then` と `else` 共通タイプが最も小さい。 + +**例えば:** + +これを取る `LEFT_RIGHT` テーブル: + +``` sql +SELECT * +FROM LEFT_RIGHT + +┌─left─┬─right─┐ +│ ᴺᵁᴸᴸ │ 4 │ +│ 1 │ 3 │ +│ 2 │ 2 │ +│ 3 │ 1 │ +│ 4 │ ᴺᵁᴸᴸ │ +└──────┴───────┘ +``` + +次のクエリは比較します `left` と `right` 値: + +``` sql +SELECT + left, + right, + if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller +FROM LEFT_RIGHT +WHERE isNotNull(left) AND isNotNull(right) + +┌─left─┬─right─┬─is_smaller──────────────────────────┐ +│ 1 │ 3 │ left is smaller than right │ +│ 2 │ 2 │ right is greater or equal than left │ +│ 3 │ 1 │ right is greater or equal than left │ +└──────┴───────┴─────────────────────────────────────┘ +``` + +メモ: `NULL` この例では値は使用されません。 [条件のnull値](#null-values-in-conditionals) セクション。 + +## 三項演算子 {#ternary-operator} + +この作品と同じ `if` 機能。 + +構文: `cond ? then : else` + +を返します `then` この `cond` true(ゼロより大きい)と評価され、それ以外の場合は `else`. + +- `cond` のタイプである必要があります `UInt8`、と `then` と `else` 共通タイプが最も小さい。 + +- `then` と `else` できる。 `NULL` + +**また見なさい** + +- [ifNotFinite](other_functions.md#ifnotfinite). + +## multif {#multiif} + +あなたが書くことができます [CASE](../operators.md#operator_case) クエリでよりコンパクトに演算子。 + +構文: `multiIf(cond_1, then_1, cond_2, then_2, ..., else)` + +**パラメータ:** + +- `cond_N` — The condition for the function to return `then_N`. +- `then_N` — The result of the function when executed. +- `else` — The result of the function if none of the conditions is met. + +この関数は、 `2N+1` パラメータ。 + +**戻り値** + +この関数は、いずれかの値を返します `then_N` または `else`、条件に応じて `cond_N`. + +**例えば** + +再度を使用して `LEFT_RIGHT` テーブル。 + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'left is greater', left = right, 'Both equal', 'Null value') AS result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─result──────────┐ +│ ᴺᵁᴸᴸ │ 4 │ Null value │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ left is greater │ +│ 4 │ ᴺᵁᴸᴸ │ Null value │ +└──────┴───────┴─────────────────┘ +``` + +## 条件付き結果を直接使用する {#using-conditional-results-directly} + +条件は常に次のようになります `0`, `1` または `NULL`. できますので使用条件と結果が直接このような: + +``` sql +SELECT left < right AS is_small +FROM LEFT_RIGHT + +┌─is_small─┐ +│ ᴺᵁᴸᴸ │ +│ 1 │ +│ 0 │ +│ 0 │ +│ ᴺᵁᴸᴸ │ +└──────────┘ +``` + +## 条件のnull値 {#null-values-in-conditionals} + +とき `NULL` 値は条件文に含まれ、結果は次のようになります `NULL`. + +``` sql +SELECT + NULL < 1, + 2 < NULL, + NULL < NULL, + NULL = NULL + +┌─less(NULL, 1)─┬─less(2, NULL)─┬─less(NULL, NULL)─┬─equals(NULL, NULL)─┐ +│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└───────────────┴───────────────┴──────────────────┴────────────────────┘ +``` + +したがって、型が `Nullable`. + +次の例は、equals条件を追加できないことを示しています `multiIf`. + +``` sql +SELECT + left, + right, + multiIf(left < right, 'left is smaller', left > right, 'right is smaller', 'Both equal') AS faulty_result +FROM LEFT_RIGHT + +┌─left─┬─right─┬─faulty_result────┐ +│ ᴺᵁᴸᴸ │ 4 │ Both equal │ +│ 1 │ 3 │ left is smaller │ +│ 2 │ 2 │ Both equal │ +│ 3 │ 1 │ right is smaller │ +│ 4 │ ᴺᵁᴸᴸ │ Both equal │ +└──────┴───────┴──────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/ja/sql_reference/functions/date_time_functions.md b/docs/ja/sql_reference/functions/date_time_functions.md deleted file mode 120000 index eb99cee34a4..00000000000 --- a/docs/ja/sql_reference/functions/date_time_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/date_time_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/date_time_functions.md b/docs/ja/sql_reference/functions/date_time_functions.md new file mode 100644 index 00000000000..6a076711e4a --- /dev/null +++ b/docs/ja/sql_reference/functions/date_time_functions.md @@ -0,0 +1,450 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u65E5\u4ED8\u3068\u6642\u523B\u306E\u64CD\u4F5C" +--- + +# 日付と時刻を操作するための関数 {#functions-for-working-with-dates-and-times} + +タイムゾーンのサポート + +タイムゾーンの論理的使用を持つ日付と時刻を操作するためのすべての関数は、二番目の省略可能なタイムゾーン引数を受け入れることができます。 例:アジア/エカテリンブルク。 この場合、ローカル(デフォルト)の代わりに指定されたタイムゾーンを使用します。 + +``` sql +SELECT + toDateTime('2016-06-15 23:00:00') AS time, + toDate(time) AS date_local, + toDate(time, 'Asia/Yekaterinburg') AS date_yekat, + toString(time, 'US/Samoa') AS time_samoa +``` + +``` text +┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │ +└─────────────────────┴────────────┴────────────┴─────────────────────┘ +``` + +UTCと時間数が異なるタイムゾーンのみがサポートされます。 + +## トティメゾン {#totimezone} + +時刻または日付と時刻を指定したタイムゾーンに変換します。 + +## toYear {#toyear} + +時刻を含む日付または日付を年番号(ad)を含むuint16番号に変換します。 + +## toQuarter {#toquarter} + +時刻を含む日付または日付を、四半期番号を含むuint8番号に変換します。 + +## トモント県france.kgm {#tomonth} + +時刻を含む日付または日付を、月番号(1~12)を含むuint8番号に変換します。 + +## 今日の年 {#todayofyear} + +時刻を含む日付または日付を、その年の日付の番号(1-366)を含むuint16番号に変換します。 + +## toDayOfMonth {#todayofmonth} + +時刻を含む日付または日付を、その月の日の番号(1-31)を含むuint8番号に変換します。 + +## toDayOfWeek {#todayofweek} + +時刻を含む日付または日付を、曜日の番号を含むuint8番号に変換します(月曜日は1、日曜日は7)。 + +## tohourgenericname {#tohour} + +時刻を含む日付を、uint8の24時間(0-23)の時刻を含む数値に変換します。 +This function assumes that if clocks are moved ahead, it is by one hour and occurs at 2 a.m., and if clocks are moved back, it is by one hour and occurs at 3 a.m. (which is not always true – even in Moscow the clocks were twice changed at a different time). + +## toMinute {#tominute} + +時刻を含む日付を、時刻の分(0~59)の数を含むuint8数値に変換します。 + +## ト秒 {#tosecond} + +Timeを含む日付をUInt8の数値に変換します(0~59)。 +うるう秒は説明されていません。 + +## toUnixTimestamp {#to-unix-timestamp} + +For DateTime argument:値を内部の数値表現(Unixタイムスタンプ)に変換します。 +文字列引数の場合:タイムゾーンに従って文字列からのdatetimeを解析します(オプションの第二引数、サーバーのタイムゾーンはデフォルトで使用されます)。 +日付の引数の場合:この動作は指定されていません。 + +**構文** + +``` sql +toUnixTimestamp(datetime) +toUnixTimestamp(str, [timezone]) +``` + +**戻り値** + +- Unixタイムスタンプを返す。 + +タイプ: `UInt32`. + +**例えば** + +クエリ: + +``` sql +SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp +``` + +結果: + +``` text +┌─unix_timestamp─┐ +│ 1509836867 │ +└────────────────┘ +``` + +## toStartOfYear {#tostartofyear} + +日付または日付のある時刻を年の最初の日に切り捨てます。 +日付を返します。 + +## tostartofisoyearcomment {#tostartofisoyear} + +日付または日付と時刻をiso暦年の最初の日に切り捨てます。 +日付を返します。 + +## toStartOfQuarter {#tostartofquarter} + +日付または日付のある時刻を四半期の最初の日に切り捨てます。 +四半期の最初の日はどちらかです1月,1四月,1七月,若しくは1十月. +日付を返します。 + +## toStartOfMonth {#tostartofmonth} + +日付または日付と時刻を月の最初の日に切り捨てます。 +日付を返します。 + +!!! attention "注意" + 間違った日付を解析する動作は実装固有です。 clickhouseはゼロの日付を返したり、例外をスローしたりします “natural” オーバーフロー + +## toMonday {#tomonday} + +日付または日付と時刻を最も近い月曜日に切り捨てます。 +日付を返します。 + +## toStartOfWeek(t\[,mode\]) {#tostartofweektmode} + +日付または日付と時刻を、モード別に最も近い日曜日または月曜日に切り捨てます。 +日付を返します。 +Mode引数は、toWeek()のmode引数とまったく同じように動作します。 単一引数の構文では、モード値0が使用されます。 + +## toStartOfDay {#tostartofday} + +時刻を含む日付をその日の始まりに切り捨てます。 + +## toStartOfHour {#tostartofhour} + +時刻を含む日付を時間の開始位置に切り捨てます。 + +## toStartOfMinute {#tostartofminute} + +日付と時刻が分の先頭に切り捨てられます。 + +## toStartOfFiveMinute {#tostartoffiveminute} + +日付と時刻を切り捨てます。 + +## トスタートオフテンミニュート {#tostartoftenminutes} + +日付と時刻を切り捨てます。 + +## トスタートオフィフテンミニュート {#tostartoffifteenminutes} + +日付と時刻を切り捨てます。 + +## toStartOfInterval(time\_or\_data,間隔xユニット\[,time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} + +これは、名前の付いた他の関数の一般化です `toStartOf*`. 例えば, +`toStartOfInterval(t, INTERVAL 1 year)` と同じを返します `toStartOfYear(t)`, +`toStartOfInterval(t, INTERVAL 1 month)` と同じを返します `toStartOfMonth(t)`, +`toStartOfInterval(t, INTERVAL 1 day)` と同じを返します `toStartOfDay(t)`, +`toStartOfInterval(t, INTERVAL 15 minute)` と同じを返します `toStartOfFifteenMinutes(t)` など。 + +## トタイム {#totime} + +時刻を保持しながら、時刻を含む日付を特定の固定日付に変換します。 + +## torelativeyearnumcomment {#torelativeyearnum} + +時刻または日付の日付を、過去の特定の固定小数点から始まる年の数に変換します。 + +## torelativequarternumcomment {#torelativequarternum} + +時刻または日付の日付を、過去の特定の固定小数点から開始して、四半期の数に変換します。 + +## torelativemonthnumcomment {#torelativemonthnum} + +時刻または日付を含む日付を、過去の特定の固定小数点から始まる月の数に変換します。 + +## torelativeweeknumcomment {#torelativeweeknum} + +時刻または日付を含む日付を、過去の特定の固定小数点から始まる週の数に変換します。 + +## torrelativedaynumcomment {#torelativedaynum} + +時刻または日付を含む日付を、過去の特定の固定小数点から始まる日の数に変換します。 + +## torrelativehournumgenericname {#torelativehournum} + +時刻または日付の日付を、過去の特定の固定小数点から始まる時間の数値に変換します。 + +## toRelativeMinuteNum {#torelativeminutenum} + +時刻または日付の日付を、過去の特定の固定小数点から始まる分の数値に変換します。 + +## torrelativesecondnumcomdnamescription {#torelativesecondnum} + +時刻または日付の日付を、過去の特定の固定小数点から開始して秒の数値に変換します。 + +## toISOYear {#toisoyear} + +時刻を含む日付または日付を、iso年番号を含むuint16番号に変換します。 + +## toISOWeek {#toisoweek} + +時刻を含む日付または日付を、iso週番号を含むuint8番号に変換します。 + +## toWeek(日付\[,モード\]) {#toweekdatemode} + +この関数は、dateまたはdatetimeの週番号を返します。 また、戻り値の範囲が0から53または1から53のどちらであるかを指定することができます。 引数modeを省略すると、デフォルトのモードは0になります。 +`toISOWeek()`は、以下と同等の互換性関数です `toWeek(date,3)`. +次の表では、mode引数の動作について説明します。 + +| モード | 週の最初の日 | 範囲 | Week 1 is the first week … | +|--------|--------------|------|----------------------------| +| 0 | 日曜日 | 0-53 | 今年の日曜日に | +| 1 | 月曜日 | 0-53 | 今年は4日以上 | +| 2 | 日曜日 | 1-53 | 今年の日曜日に | +| 3 | 月曜日 | 1-53 | 今年は4日以上 | +| 4 | 日曜日 | 0-53 | 今年は4日以上 | +| 5 | 月曜日 | 0-53 | 今年の月曜日と | +| 6 | 日曜日 | 1-53 | 今年は4日以上 | +| 7 | 月曜日 | 1-53 | 今年の月曜日と | +| 8 | 日曜日 | 1-53 | 含まれ月1 | +| 9 | 月曜日 | 1-53 | 含まれ月1 | + +意味のあるモード値の場合 “with 4 or more days this year,” 週はISO8601:1988に従って番号が付けられます: + +- 1月を含む週がある場合4新年の日,それは週です1. + +- それ以外の場合は、前年の最後の週であり、次の週は1週です。 + +意味のあるモード値の場合 “contains January 1” の週の月には1週間に1. たとえそれが一日だけ含まれていても、その週に含まれている新年の日数は関係ありません。 + +``` sql +toWeek(date, [, mode][, Timezone]) +``` + +**パラメータ** + +- `date` – Date or DateTime. +- `mode` – Optional parameter, Range of values is \[0,9\], default is 0. +- `Timezone` – Optional parameter, it behaves like any other conversion function. + +**例えば** + +``` sql +SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9; +``` + +``` text +┌───────date─┬─week0─┬─week1─┬─week9─┐ +│ 2016-12-27 │ 52 │ 52 │ 1 │ +└────────────┴───────┴───────┴───────┘ +``` + +## toeearweek(日付\[,モード\]) {#toyearweekdatemode} + +日付の年と週を返します。 結果の年は、その年の最初と最後の週の日付の引数の年とは異なる場合があります。 + +Mode引数は、toWeek()のmode引数とまったく同じように動作します。 単一引数の構文では、モード値0が使用されます。 + +`toISOYear()`は、以下と同等の互換性関数です `intDiv(toYearWeek(date,3),100)`. + +**例えば** + +``` sql +SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9; +``` + +``` text +┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐ +│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ +└────────────┴───────────┴───────────┴───────────┘ +``` + +## さて {#now} + +ゼロ引数を受け取り、リクエスト実行のいずれかの時点で現在の時刻を返します。 +この関数は、要求が完了するまでに長い時間がかかった場合でも、定数を返します。 + +## 今日 {#today} + +ゼロ引数を受け取り、リクエスト実行のいずれかの時点で現在の日付を返します。 +同じように ‘toDate(now())’. + +## 昨日 {#yesterday} + +ゼロの引数を受け取り、リクエストの実行のいずれかの時点で、昨日の日付を返します。 +同じように ‘today() - 1’. + +## タイムスロット {#timeslot} + +時間を半分時間に丸めます。 +この機能はyandexに固有です。トラッキングタグがこの量よりも厳密に時間が異なる単一のユーザーの連続したページビューを表示する場合、セッションを二つのセッションに分割す つまり、タプル(タグid、ユーザー id、およびタイムスロット)を使用して、対応するセッションに含まれるページビューを検索できます。 + +## toYYYYMM {#toyyyymm} + +時刻を含む日付または日付を、年と月の数値(yyyy\*100+mm)を含むuint32番号に変換します。 + +## toyyymmdd {#toyyyymmdd} + +時刻を含む日付または日付を、年と月の数値(yyyy\*10000+mm\*100+dd)を含むuint32番号に変換します。 + +## toYYYYMMDDhhmmss {#toyyyymmddhhmmss} + +時刻付きの日付または日付を、年と月の数値を含むuint64番号に変換します(yyyy\*10000000000+mm\*100000000+dd\*1000000+hh\*10000+mm\*100+ss)。 + +## addYears,addMonths,addweks,addDays,addHours,addMinutes,addSeconds,addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} + +関数は、日付/日付時刻の間隔を日付/日付時刻に追加してから、日付/日付時刻を返します。 例えば: + +``` sql +WITH + toDate('2018-01-01') AS date, + toDateTime('2018-01-01 00:00:00') AS date_time +SELECT + addYears(date, 1) AS add_years_with_date, + addYears(date_time, 1) AS add_years_with_date_time +``` + +``` text +┌─add_years_with_date─┬─add_years_with_date_time─┐ +│ 2019-01-01 │ 2019-01-01 00:00:00 │ +└─────────────────────┴──────────────────────────┘ +``` + +## subtractYears,subtractMonths,subtractWeeks,subtractDays,subtractHours,subtractMinutes,subtractSeconds,subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} + +関数date/datetime間隔をdate/datetimeに減算し、date/datetimeを返します。 例えば: + +``` sql +WITH + toDate('2019-01-01') AS date, + toDateTime('2019-01-01 00:00:00') AS date_time +SELECT + subtractYears(date, 1) AS subtract_years_with_date, + subtractYears(date_time, 1) AS subtract_years_with_date_time +``` + +``` text +┌─subtract_years_with_date─┬─subtract_years_with_date_time─┐ +│ 2018-01-01 │ 2018-01-01 00:00:00 │ +└──────────────────────────┴───────────────────────────────┘ +``` + +## dateDiff {#datediff} + +日付または日付時刻値の差を返します。 + +**構文** + +``` sql +dateDiff('unit', startdate, enddate, [timezone]) +``` + +**パラメータ** + +- `unit` — Time unit, in which the returned value is expressed. [文字列](../syntax.md#syntax-string-literal). + + Supported values: + + | unit | + | ---- | + |second | + |minute | + |hour | + |day | + |week | + |month | + |quarter | + |year | + +- `startdate` — The first time value to compare. [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +- `enddate` — The second time value to compare. [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +- `timezone` — Optional parameter. If specified, it is applied to both `startdate` と `enddate`. 指定されていない場合は、 `startdate` と `enddate` 使用されます。 それらが同じでない場合、結果は不特定です。 + +**戻り値** + +の違い `startdate` と `enddate` で表現 `unit`. + +タイプ: `int`. + +**例えば** + +クエリ: + +``` sql +SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); +``` + +結果: + +``` text +┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐ +│ 25 │ +└────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## timeSlots(StartTime,Duration,\[,Size\]) {#timeslotsstarttime-duration-size} + +で始まる時間間隔のために ‘StartTime’ そしてのために継続 ‘Duration’ 秒、この間隔からのポイントからなる時間内のモーメントの配列を返します。 ‘Size’ 数秒で。 ‘Size’ オプションのパラメーターです:既定では定数UInt32を1800に設定します。 +例えば, `timeSlots(toDateTime('2012-01-01 12:20:00'), 600) = [toDateTime('2012-01-01 12:00:00'), toDateTime('2012-01-01 12:30:00')]`. +これは、対応するセッションでページビューを検索するために必要です。 + +## formatDateTime(時間,フォーマット\[,タイムゾーン\]) {#formatdatetime} + +Function formats a Time according given Format string. N.B.: Format is a constant expression, e.g. you can not have multiple formats for single result column. + +形式のサポートされている修飾子: +(“Example” このページを正しく表示するフォーマット結果のための時間 `2018-01-02 22:33:44`) + +| 修飾子 | 説明 | 例えば | +|--------|----------------------------------------------------|------------| +| %C | 年を100で除算し、整数(00から99)に切り捨てられます) | 20 | +| %d | 月の日、ゼロ-パディング(01-31) | 02 | +| %D | %m/%d/%yに相当する短いmm/dd/yy日付 | 01/02/18 | +| %e | 月の日、スペース埋め(1-31) | 2 | +| %F | %Y-%m-%dに相当する短いYYYY-MM-DD日付 | 2018-01-02 | +| %H | 24時間形式(00-23)の時間) | 22 | +| %I | 時間で12h形式(01-12) | 10 | +| %j | 年の日(001-366) | 002 | +| %m | 月を小数(01-12)として指定します) | 01 | +| %M | 分(00-59) | 33 | +| %n | 改行文字(") | | +| %p | AMまたはPMの指定 | PM | +| %R | 24時間HH:MM時間、%Hに相当する:%M | 22:33 | +| %S | 二番目に(00-59) | 44 | +| %t | 水平タブ文字(') | | +| %T | ISO8601時刻フォーマット(HH:MM:SS)、%H:%M:%Sに相当 | 22:33:44 | +| %u | ISO8601月曜日が1(1-7)の数値としての平日) | 2 | +| %V | ISO8601週番号(01-53) | 01 | +| %w | weekday as a decimal number with Sunday as0(0-6) | 2 | +| %y | 年,最後の二つの数字(00-99) | 18 | +| %Y | 年 | 2018 | +| %% | %記号 | % | + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/ja/sql_reference/functions/encoding_functions.md b/docs/ja/sql_reference/functions/encoding_functions.md deleted file mode 120000 index 197dd14ddbe..00000000000 --- a/docs/ja/sql_reference/functions/encoding_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/encoding_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/encoding_functions.md b/docs/ja/sql_reference/functions/encoding_functions.md new file mode 100644 index 00000000000..4f0ce676fb8 --- /dev/null +++ b/docs/ja/sql_reference/functions/encoding_functions.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 52 +toc_title: "\u30A8\u30F3\u30B3\u30FC\u30C9" +--- + +# エンコード関数 {#encoding-functions} + +## 文字 {#char} + +渡された引数の数として長さを持つ文字列を返し、各バイトは対応する引数の値を持ちます。 数値型の複数の引数を受け取ります。 引数の値がuint8データ型の範囲外である場合は、丸めとオーバーフローが可能な状態でuint8に変換されます。 + +**構文** + +``` sql +char(number_1, [number_2, ..., number_n]); +``` + +**パラメータ** + +- `number_1, number_2, ..., number_n` — Numerical arguments interpreted as integers. Types: [Int](../../sql_reference/data_types/int_uint.md), [フロート](../../sql_reference/data_types/float.md). + +**戻り値** + +- 指定されたバイトの文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello +``` + +結果: + +``` text +┌─hello─┐ +│ hello │ +└───────┘ +``` + +を構築できます文字列の任意のエンコードに対応するバイトまでとなります。 utf-8の例を次に示します: + +クエリ: + +``` sql +SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; +``` + +結果: + +``` text +┌─hello──┐ +│ привет │ +└────────┘ +``` + +クエリ: + +``` sql +SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; +``` + +結果: + +``` text +┌─hello─┐ +│ 你好 │ +└───────┘ +``` + +## 六角 {#hex} + +引数の十六進表現を含む文字列を返します。 + +**構文** + +``` sql +hex(arg) +``` + +この関数は大文字を使用しています `A-F` 接頭辞(など)を使用しないでください `0x` または接尾辞(のような `h`). + +整数引数の場合は、六角数字を出力します (“nibbles”)最も重要なものから最も重要なもの(ビッグエンディアンまたは “human readable” 順序)。 最も重要な非ゼロバイト(先行ゼロバイトは省略されています)で始まりますが、先行桁がゼロであっても常に各バイトの両方の桁を出力します。 + +例えば: + +**例えば** + +クエリ: + +``` sql +SELECT hex(1); +``` + +結果: + +``` text +01 +``` + +タイプの値 `Date` と `DateTime` 対応する整数としてフォーマットされます(日付のエポックからの日数と、DateTimeのUnixタイムスタンプの値)。 + +のために `String` と `FixedString` すべてのバイトは、単に二進数として符号化される。 ゼロバイトは省略されません。 + +浮動小数点型と小数型の値は、メモリ内での表現としてエンコードされます。 支援においても少しエンディアン、建築、その符号化されたのでちょっとエンディアンです。※ ゼロ先行/末尾のバイトは省略されません。 + +**パラメータ** + +- `arg` — A value to convert to hexadecimal. Types: [文字列](../../sql_reference/data_types/string.md), [UInt](../../sql_reference/data_types/int_uint.md), [フロート](../../sql_reference/data_types/float.md), [小数](../../sql_reference/data_types/decimal.md), [日付](../../sql_reference/data_types/date.md) または [DateTime](../../sql_reference/data_types/datetime.md). + +**戻り値** + +- 引数の十六進表現を持つ文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT hex(toFloat32(number)) as hex_presentation FROM numbers(15, 2); +``` + +結果: + +``` text +┌─hex_presentation─┐ +│ 00007041 │ +│ 00008041 │ +└──────────────────┘ +``` + +クエリ: + +``` sql +SELECT hex(toFloat64(number)) as hex_presentation FROM numbers(15, 2); +``` + +結果: + +``` text +┌─hex_presentation─┐ +│ 0000000000002E40 │ +│ 0000000000003040 │ +└──────────────────┘ +``` + +## unhex(str)) {#unhexstr} + +任意の数の進数を含む文字列を受け取り、対応するバイトを含む文字列を返します。 十六進数の数字は偶数である必要はありません。 奇数の場合、最後の桁は、00-0fバイトの最下位半分として解釈されます。 引数stringに十六進数以外の桁数が含まれている場合、実装定義の結果が返されます(例外はスローされません)。 +結果を数値に変換したい場合は、 ‘reverse’ と ‘reinterpretAsType’ 機能。 + +## UUIDStringToNum(str)) {#uuidstringtonumstr} + +次の形式の36文字を含む文字列を受け取ります `123e4567-e89b-12d3-a456-426655440000`、およびFixedString(16)のバイトのセットとして返します。 + +## UUIDNumToString(str)) {#uuidnumtostringstr} + +FixedString(16)値を受け取ります。 テキスト形式で36文字を含む文字列を返します。 + +## ビットマスクトリスト(num) {#bitmasktolistnum} + +整数を受け入れます。 合計されたときにソース番号を合計する二つの累乗のリストを含む文字列を返します。 これらは、昇順で、テキスト形式のスペースなしでコンマ区切りです。 + +## ビットマスクアレール(num) {#bitmasktoarraynum} + +整数を受け入れます。 合計されたときにソース番号を合計する二つの累乗のリストを含むuint64数の配列を返します。 配列内の数字は昇順です。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/ja/sql_reference/functions/ext_dict_functions.md b/docs/ja/sql_reference/functions/ext_dict_functions.md deleted file mode 120000 index bd0efa8d747..00000000000 --- a/docs/ja/sql_reference/functions/ext_dict_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/ext_dict_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/ext_dict_functions.md b/docs/ja/sql_reference/functions/ext_dict_functions.md new file mode 100644 index 00000000000..8b9299cb524 --- /dev/null +++ b/docs/ja/sql_reference/functions/ext_dict_functions.md @@ -0,0 +1,205 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 58 +toc_title: "\u5916\u90E8\u8F9E\u66F8\u306E\u64CD\u4F5C" +--- + +# 外部辞書を操作するための関数 {#ext_dict_functions} + +情報の接続や設定の外部辞書参照 [外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +## dictGet {#dictget} + +外部ディクショナリから値を取得します。 + +``` sql +dictGet('dict_name', 'attr_name', id_expr) +dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**パラメータ** + +- `dict_name` — Name of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md) または [タプル](../../sql_reference/data_types/tuple.md)-辞書構成に応じて値を入力します。 +- `default_value_expr` — Value returned if the dictionary doesn't contain a row with the `id_expr` キー。 [式](../syntax.md#syntax-expressions) に設定されたデータ型の値を返します。 `attr_name` 属性。 + +**戻り値** + +- クリックハウスで属性が正常に解析された場合 [属性のデータ型](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes)、関数は、に対応する辞書属性の値を返します `id_expr`. + +- キーがない場合、対応する `id_expr`、辞書では、その後: + + - `dictGet` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGetOrDefault` returns the value passed as the `default_value_expr` parameter. + +ClickHouseは、属性の値を解析できない場合、または値が属性データ型と一致しない場合に例外をスローします。 + +**例えば** + +テキストファイルの作成 `ext-dict-text.csv` 以下を含む: + +``` text +1,1 +2,2 +``` + +最初の列は次のとおりです `id`、第二の列は `c1`. + +外部ディクショナリの設定: + +``` xml + + + ext-dict-test + + + /path-to/ext-dict-test.csv + CSV + + + + + + + + id + + + c1 + UInt32 + + + + 0 + + +``` + +クエリの実行: + +``` sql +SELECT + dictGetOrDefault('ext-dict-test', 'c1', number + 1, toUInt32(number * 10)) AS val, + toTypeName(val) AS type +FROM system.numbers +LIMIT 3 +``` + +``` text +┌─val─┬─type───┐ +│ 1 │ UInt32 │ +│ 2 │ UInt32 │ +│ 20 │ UInt32 │ +└─────┴────────┘ +``` + +**また見なさい** + +- [外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) + +## dictHas {#dicthas} + +キーが辞書に存在するかどうかを確認します。 + +``` sql +dictHas('dict_name', id_expr) +``` + +**パラメータ** + +- `dict_name` — Name of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ値。 + +**戻り値** + +- 0、キーがない場合。 +- 1、キーがある場合。 + +タイプ: `UInt8`. + +## 独裁主義体制 {#dictgethierarchy} + +キーのすべての親を含む配列を作成します。 [階層辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). + +**構文** + +``` sql +dictGetHierarchy('dict_name', key) +``` + +**パラメータ** + +- `dict_name` — Name of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `key` — Key value. [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ値。 + +**戻り値** + +- キーの親。 + +タイプ: [配列(uint64)](../../sql_reference/data_types/array.md). + +## ディクティシン {#dictisin} + +辞書内の階層チェーン全体を通じてキーの祖先をチェックします。 + +``` sql +dictIsIn('dict_name', child_id_expr, ancestor_id_expr) +``` + +**パラメータ** + +- `dict_name` — Name of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `child_id_expr` — Key to be checked. [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ値。 +- `ancestor_id_expr` — Alleged ancestor of the `child_id_expr` キー。 [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ値。 + +**戻り値** + +- 0,if `child_id_expr` の子ではありません `ancestor_id_expr`. +- 1、場合 `child_id_expr` の子です `ancestor_id_expr` または `child_id_expr` は `ancestor_id_expr`. + +タイプ: `UInt8`. + +## その他の機能 {#ext_dict_functions-other} + +ClickHouseは、辞書構成に関係なく、辞書属性値を特定のデータ型に変換する特殊な関数をサポートしています。 + +機能: + +- `dictGetInt8`, `dictGetInt16`, `dictGetInt32`, `dictGetInt64` +- `dictGetUInt8`, `dictGetUInt16`, `dictGetUInt32`, `dictGetUInt64` +- `dictGetFloat32`, `dictGetFloat64` +- `dictGetDate` +- `dictGetDateTime` +- `dictGetUUID` +- `dictGetString` + +これらの機能はすべて、 `OrDefault` 変更。 例えば, `dictGetDateOrDefault`. + +構文: + +``` sql +dictGet[Type]('dict_name', 'attr_name', id_expr) +dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) +``` + +**パラメータ** + +- `dict_name` — Name of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `attr_name` — Name of the column of the dictionary. [文字列リテラル](../syntax.md#syntax-string-literal). +- `id_expr` — Key value. [式](../syntax.md#syntax-expressions) を返す [UInt64](../../sql_reference/data_types/int_uint.md)-タイプ値。 +- `default_value_expr` — Value which is returned if the dictionary doesn't contain a row with the `id_expr` キー。 [式](../syntax.md#syntax-expressions) に設定されたデータ型の値を返します。 `attr_name` 属性。 + +**戻り値** + +- クリックハウスで属性が正常に解析された場合 [属性のデータ型](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes)、関数は、に対応する辞書属性の値を返します `id_expr`. + +- 要求がない場合 `id_expr` 辞書では、: + + - `dictGet[Type]` returns the content of the `` element specified for the attribute in the dictionary configuration. + - `dictGet[Type]OrDefault` returns the value passed as the `default_value_expr` parameter. + +ClickHouseは、属性の値を解析できない場合、または値が属性データ型と一致しない場合に例外をスローします。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/ja/sql_reference/functions/functions_for_nulls.md b/docs/ja/sql_reference/functions/functions_for_nulls.md deleted file mode 120000 index 9b1597d5841..00000000000 --- a/docs/ja/sql_reference/functions/functions_for_nulls.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/functions_for_nulls.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/functions_for_nulls.md b/docs/ja/sql_reference/functions/functions_for_nulls.md new file mode 100644 index 00000000000..655eecd3df2 --- /dev/null +++ b/docs/ja/sql_reference/functions/functions_for_nulls.md @@ -0,0 +1,312 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 63 +toc_title: "\u30CC\u30EB\u53EF\u80FD\u306A\u5F15\u6570\u306E\u64CD\u4F5C" +--- + +# Null可能な集計を操作するための関数 {#functions-for-working-with-nullable-aggregates} + +## isNull {#isnull} + +引数が [NULL](../syntax.md#null). + +``` sql +isNull(x) +``` + +**パラメータ** + +- `x` — A value with a non-compound data type. + +**戻り値** + +- `1` もし `x` は `NULL`. +- `0` もし `x` はない `NULL`. + +**例えば** + +入力テーブル + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +クエリ + +``` sql +SELECT x FROM t_null WHERE isNull(y) +``` + +``` text +┌─x─┐ +│ 1 │ +└───┘ +``` + +## isNotNull {#isnotnull} + +引数が [NULL](../syntax.md#null). + +``` sql +isNotNull(x) +``` + +**パラメータ:** + +- `x` — A value with a non-compound data type. + +**戻り値** + +- `0` もし `x` は `NULL`. +- `1` もし `x` はない `NULL`. + +**例えば** + +入力テーブル + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +クエリ + +``` sql +SELECT x FROM t_null WHERE isNotNull(y) +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## 合体 {#coalesce} + +左から右にチェックするかどうか `NULL` 引数が渡され、最初の非を返します-`NULL` 引数。 + +``` sql +coalesce(x,...) +``` + +**パラメータ:** + +- 非化合物タイプの任意の数のパラメーター。 すべてのパラメータに対応していることが必要となるデータ型になります。 + +**戻り値** + +- 最初の非-`NULL` 引数。 +- `NULL` すべての引数が `NULL`. + +**例えば** + +顧客に連絡する複数の方法を指定する可能性のある連絡先のリストを考えてみましょう。 + +``` text +┌─name─────┬─mail─┬─phone─────┬──icq─┐ +│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ +│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└──────────┴──────┴───────────┴──────┘ +``` + +その `mail` と `phone` フィールドの型はStringですが、 `icq` フィールドは `UInt32`、それはに変換する必要があります `String`. + +コンタクトリストから顧客の最初の利用可能なコンタクトメソッドを取得する: + +``` sql +SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook +``` + +``` text +┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ +│ client 1 │ 123-45-67 │ +│ client 2 │ ᴺᵁᴸᴸ │ +└──────────┴──────────────────────────────────────────────────────┘ +``` + +## ifNull {#ifnull} + +メイン引数がある場合は、代替値を返します `NULL`. + +``` sql +ifNull(x,alt) +``` + +**パラメータ:** + +- `x` — The value to check for `NULL`. +- `alt` — The value that the function returns if `x` は `NULL`. + +**戻り値** + +- を値 `x`,もし `x` はない `NULL`. +- を値 `alt`,もし `x` は `NULL`. + +**例えば** + +``` sql +SELECT ifNull('a', 'b') +``` + +``` text +┌─ifNull('a', 'b')─┐ +│ a │ +└──────────────────┘ +``` + +``` sql +SELECT ifNull(NULL, 'b') +``` + +``` text +┌─ifNull(NULL, 'b')─┐ +│ b │ +└───────────────────┘ +``` + +## nullifname {#nullif} + +を返します `NULL` 引数が等しい場合。 + +``` sql +nullIf(x, y) +``` + +**パラメータ:** + +`x`, `y` — Values for comparison. They must be compatible types, or ClickHouse will generate an exception. + +**戻り値** + +- `NULL` 引数が等しい場合。 +- その `x` 引数が等しくない場合の値。 + +**例えば** + +``` sql +SELECT nullIf(1, 1) +``` + +``` text +┌─nullIf(1, 1)─┐ +│ ᴺᵁᴸᴸ │ +└──────────────┘ +``` + +``` sql +SELECT nullIf(1, 2) +``` + +``` text +┌─nullIf(1, 2)─┐ +│ 1 │ +└──────────────┘ +``` + +## assumeNotNull {#assumenotnull} + +結果はtypeの値になります [Nullable](../../sql_reference/data_types/nullable.md) 非のために- `Nullable` 値がない場合、 `NULL`. + +``` sql +assumeNotNull(x) +``` + +**パラメータ:** + +- `x` — The original value. + +**戻り値** + +- 元の値から非-`Nullable` そうでない場合はタイプします `NULL`. +- のデフォルト値。-`Nullable` 元の値が `NULL`. + +**例えば** + +考慮する `t_null` テーブル。 + +``` sql +SHOW CREATE TABLE t_null +``` + +``` text +┌─statement─────────────────────────────────────────────────────────────────┐ +│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ +└───────────────────────────────────────────────────────────────────────────┘ +``` + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +を適用 `assumeNotNull` に機能 `y` コラム + +``` sql +SELECT assumeNotNull(y) FROM t_null +``` + +``` text +┌─assumeNotNull(y)─┐ +│ 0 │ +│ 3 │ +└──────────────────┘ +``` + +``` sql +SELECT toTypeName(assumeNotNull(y)) FROM t_null +``` + +``` text +┌─toTypeName(assumeNotNull(y))─┐ +│ Int8 │ +│ Int8 │ +└──────────────────────────────┘ +``` + +## toNullable {#tonullable} + +引数の型を次のように変換します `Nullable`. + +``` sql +toNullable(x) +``` + +**パラメータ:** + +- `x` — The value of any non-compound type. + +**戻り値** + +- Aの入力値 `Nullable` タイプ。 + +**例えば** + +``` sql +SELECT toTypeName(10) +``` + +``` text +┌─toTypeName(10)─┐ +│ UInt8 │ +└────────────────┘ +``` + +``` sql +SELECT toTypeName(toNullable(10)) +``` + +``` text +┌─toTypeName(toNullable(10))─┐ +│ Nullable(UInt8) │ +└────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/ja/sql_reference/functions/geo.md b/docs/ja/sql_reference/functions/geo.md deleted file mode 120000 index cfb8b7068d5..00000000000 --- a/docs/ja/sql_reference/functions/geo.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/geo.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/geo.md b/docs/ja/sql_reference/functions/geo.md new file mode 100644 index 00000000000..c61027f8bcd --- /dev/null +++ b/docs/ja/sql_reference/functions/geo.md @@ -0,0 +1,510 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 62 +toc_title: "\u5730\u7406\u5EA7\u6A19\u306E\u64CD\u4F5C" +--- + +# 地理座標を操作するための関数 {#functions-for-working-with-geographical-coordinates} + +## グレートサークル距離 {#greatcircledistance} + +を使用して、地球の表面上の二つの点の間の距離を計算します [大円式](https://en.wikipedia.org/wiki/Great-circle_distance). + +``` sql +greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg) +``` + +**入力パラメータ** + +- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`. +- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`. +- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`. +- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`. + +正の値は北緯と東経に対応し、負の値は南緯と西経に対応します。 + +**戻り値** + +メートルで、地球の表面上の二つの点の間の距離。 + +入力パラメーター値が範囲外になったときに例外を生成します。 + +**例えば** + +``` sql +SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) +``` + +``` text +┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐ +│ 14132374.194975413 │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## pointInEllipses {#pointinellipses} + +ポイントが楕円の少なくとも一つに属するかどうかをチェックします。 +座標は、デカルト座標系ではジオメトリです。 + +``` sql +pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ) +``` + +**入力パラメータ** + +- `x, y` — Coordinates of a point on the plane. +- `xᵢ, yᵢ` — Coordinates of the center of the `i`-番目の省略記号。 +- `aᵢ, bᵢ` — Axes of the `i`-x、y座標の単位で番目の省略記号。 + +入力パラ `2+4⋅n`、どこ `n` 楕円の数です。 + +**戻り値** + +`1` ポイントは、楕円の少なくとも一つの内側にある場合; `0`そうでない場合。 + +**例えば** + +``` sql +SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999) +``` + +``` text +┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐ +│ 1 │ +└─────────────────────────────────────────────────┘ +``` + +## pointInPolygon {#pointinpolygon} + +るかどうかを判の点に属する多角形の面。 + +``` sql +pointInPolygon((x, y), [(a, b), (c, d) ...], ...) +``` + +**入力値** + +- `(x, y)` — Coordinates of a point on the plane. Data type — [タプル](../../sql_reference/data_types/tuple.md) — A tuple of two numbers. +- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [配列](../../sql_reference/data_types/array.md). 各頂点は、座標のペアで表されます `(a, b)`. 頂点は、時計回りまたは反時計回りの順序で指定する必要があります。 頂点の最小数は3です。 多角形は一定でなければなりません。 +- この機能は、穴(切り取られた部分)を持つ多角形もサポートします。 この場合、関数の追加の引数を使用してカットアウトセクションを定義するポリゴンを追加します。 この関数は、単純接続以外のポリゴンをサポートしません。 + +**戻り値** + +`1` ポイントがポリゴンの内側にある場合, `0` そうでない場合。 +ポイントがポリゴン境界上にある場合、関数は0または1を返します。 + +**例えば** + +``` sql +SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res +``` + +``` text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +## geohashEncode {#geohashencode} + +緯度と経度をgeohash-stringとしてエンコードします。http://geohash.org/,https://en.wikipedia.org/wiki/geohash). + +``` sql +geohashEncode(longitude, latitude, [precision]) +``` + +**入力値** + +- 経度-エンコードしたい座標の経度の部分。 範囲の浮遊`[-180°, 180°]` +- latitude-エンコードする座標の緯度部分。 範囲の浮遊 `[-90°, 90°]` +- precision-オプション、結果としてエンコードされる文字列の長さ。 `12`. 範囲の整数 `[1, 12]`. より小さい任意の値 `1` またはより大きい `12` に変換される。 `12`. + +**戻り値** + +- 英数字 `String` エンコードされた座標(base32エンコードアルファベットの修正版が使用されます)。 + +**例えば** + +``` sql +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +``` + +``` text +┌─res──────────┐ +│ ezs42d000000 │ +└──────────────┘ +``` + +## geohashDecode {#geohashdecode} + +Geohashでエンコードされた文字列を経度と緯度にデコードします。 + +**入力値** + +- エンコードされた文字列-geohashエンコードされた文字列。 + +**戻り値** + +- (経度、緯度)-2-のタプル `Float64` 経度と緯度の値。 + +**例えば** + +``` sql +SELECT geohashDecode('ezs42') AS res +``` + +``` text +┌─res─────────────────────────────┐ +│ (-5.60302734375,42.60498046875) │ +└─────────────────────────────────┘ +``` + +## geoToH3 {#geotoh3} + +を返します [H3](https://uber.github.io/h3/#/documentation/overview/introduction) 点指数 `(lon, lat)` 指定決断を使って。 + +[H3](https://uber.github.io/h3/#/documentation/overview/introduction) 地理的指標システムであり、地球の表面は六角形のタイルに分割されています。 トップレベルの各六角形は、より小さいものに分割することができます。 + +このインデックスは、主にバケットの場所やその他の地理空間の操作に使用されます。 + +**構文** + +``` sql +geoToH3(lon, lat, resolution) +``` + +**パラメータ** + +- `lon` — Longitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `lat` — Latitude. Type: [Float64](../../sql_reference/data_types/float.md). +- `resolution` — Index resolution. Range: `[0, 15]`. タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- 六角形のインデックス番号。 +- エラーの場合は0。 + +タイプ: `UInt64`. + +**例えば** + +クエリ: + +``` sql +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +``` + +結果: + +``` text +┌────────────h3Index─┐ +│ 644325524701193974 │ +└────────────────────┘ +``` + +## geohashesInBox {#geohashesinbox} + +与えられたボックスの内側にあり、境界と交差する、与えられた精度のgeohashエンコードされた文字列の配列を返します。 + +**入力値** + +- longitude\_min-最小経度、範囲内の浮動小数点数 `[-180°, 180°]` +- latitude\_min-最小緯度、範囲内の浮動小数点数 `[-90°, 90°]` +- longitude\_max-最高の経度、範囲の浮遊価値 `[-180°, 180°]` +- latitude\_max-最大緯度、範囲内の浮動小数点数 `[-90°, 90°]` +- 精密-geohashの精密, `UInt8` 範囲内 `[1, 12]` + +すべての座標パラメータは同じタイプである必要があります。 `Float32` または `Float64`. + +**戻り値** + +- 提供された領域をカバーするgeohash-ボックスの精度の長い文字列の配列、あなたはアイテムの順序に頼るべきではありません。 +- \[\]-空の配列の場合 *分* の値 *緯度* と *経度* 対応するよりも小さくない *最大* 値。 + +結果の配列が10'000'000項目を超える場合、関数は例外をスローすることに注意してください。 + +**例えば** + +``` sql +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +``` + +``` text +┌─thasos──────────────────────────────────────┐ +│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │ +└─────────────────────────────────────────────┘ +``` + +## h3GetBaseCell {#h3getbasecell} + +インデックスの基本セル番号を返します。 + +**構文** + +``` sql +h3GetBaseCell(index) +``` + +**パラメータ** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- 六角形ベースセル番号。 タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3GetBaseCell(612916788725809151) as basecell +``` + +結果: + +``` text +┌─basecell─┐ +│ 12 │ +└──────────┘ +``` + +## h3HexAreaM2 {#h3hexaream2} + +与えられた解像度で平方メートルの平均六角形の面積。 + +**構文** + +``` sql +h3HexAreaM2(resolution) +``` + +**パラメータ** + +- `resolution` — Index resolution. Range: `[0, 15]`. タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- Area in m². Type: [Float64](../../sql_reference/data_types/float.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3HexAreaM2(13) as area +``` + +結果: + +``` text +┌─area─┐ +│ 43.9 │ +└──────┘ +``` + +## h3IndexesAreNeighbors {#h3indexesareneighbors} + +指定されたh3indexesが近傍であるかどうかを返します。 + +**構文** + +``` sql +h3IndexesAreNeighbors(index1, index2) +``` + +**パラメータ** + +- `index1` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `index2` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- を返します `1` インデックスが隣接している場合, `0` そうでなければ タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n +``` + +結果: + +``` text +┌─n─┐ +│ 1 │ +└───┘ +``` + +## h3ToChildren {#h3tochildren} + +指定したインデックスの子インデックスを持つ配列を返します。 + +**構文** + +``` sql +h3ToChildren(index, resolution) +``` + +**パラメータ** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- 子h3インデックスを持つ配列。 タイプの配列: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3ToChildren(599405990164561919, 6) AS children +``` + +結果: + +``` text +┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## h3親 {#h3toparent} + +を返します(粗)インデックスを含むとして指定されたインデックス. + +**構文** + +``` sql +h3ToParent(index, resolution) +``` + +**パラメータ** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). +- `resolution` — Index resolution. Range: `[0, 15]`. タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- 親のh3インデックス。 タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3ToParent(599405990164561919, 3) as parent +``` + +結果: + +``` text +┌─────────────parent─┐ +│ 590398848891879423 │ +└────────────────────┘ +``` + +## h3ToString {#h3tostring} + +インデックスのh3index表現を文字列表現に変換します。 + +``` sql +h3ToString(index) +``` + +**パラメータ** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- H3インデックスの文字列表現。 タイプ: [文字列](../../sql_reference/data_types/string.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3ToString(617420388352917503) as h3_string +``` + +結果: + +``` text +┌─h3_string───────┐ +│ 89184926cdbffff │ +└─────────────────┘ +``` + +## stringToH3 {#stringtoh3} + +文字列表現をh3index(uint64)表現に変換します。 + +``` sql +stringToH3(index_str) +``` + +**パラメータ** + +- `index_str` — String representation of the H3 index. Type: [文字列](../../sql_reference/data_types/string.md). + +**戻り値** + +- 六角形のインデックス番号。 エラー時に0を返します。 タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT stringToH3('89184926cc3ffff') as index +``` + +結果: + +``` text +┌──────────────index─┐ +│ 617420388351344639 │ +└────────────────────┘ +``` + +## h3GetResolution {#h3getresolution} + +インデックスの解像度を返します。 + +**構文** + +``` sql +h3GetResolution(index) +``` + +**パラメータ** + +- `index` — Hexagon index number. Type: [UInt64](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +- インデックスの解決。 範囲: `[0, 15]`. タイプ: [UInt8](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT h3GetResolution(617420388352917503) as res +``` + +結果: + +``` text +┌─res─┐ +│ 9 │ +└─────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/geo/) diff --git a/docs/ja/sql_reference/functions/hash_functions.md b/docs/ja/sql_reference/functions/hash_functions.md deleted file mode 120000 index 35d7ebc68c5..00000000000 --- a/docs/ja/sql_reference/functions/hash_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/hash_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/hash_functions.md b/docs/ja/sql_reference/functions/hash_functions.md new file mode 100644 index 00000000000..7b2eb3fc239 --- /dev/null +++ b/docs/ja/sql_reference/functions/hash_functions.md @@ -0,0 +1,446 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 50 +toc_title: "\u30CF\u30C3\u30B7\u30E5" +--- + +# ハッシュ関数 {#hash-functions} + +ハッシュ関数は、要素の決定論的な擬似ランダムシャッフルに使用できます。 + +## ハーフmd5 {#hash-functions-halfmd5} + +[解釈する](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) すべての入力パラメーターを文字列として計算します [MD5](https://en.wikipedia.org/wiki/MD5) それぞれのハッシュ値。 次に、ハッシュを結合し、結果の文字列のハッシュの最初の8バイトを取り、それらを次のように解釈します `UInt64` ビッグエンディアンのバイト順。 + +``` sql +halfMD5(par1, ...) +``` + +この関数は比較的遅い(プロセッサコアあたり5万個の短い文字列)。 +を使用することを検討 [サイファッシュ64](#hash_functions-siphash64) 代わりに機能。 + +**パラメータ** + +この関数は、可変個の入力パラメータを受け取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +A [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type +``` + +``` text +┌────────halfMD5hash─┬─type───┐ +│ 186182704141653334 │ UInt64 │ +└────────────────────┴────────┘ +``` + +## MD5 {#hash_functions-md5} + +文字列からmd5を計算し、結果のバイトセットをfixedstring(16)として返します。 +特にmd5を必要としないが、適切な暗号化128ビットハッシュが必要な場合は、 ‘sipHash128’ 代わりに機能。 +Md5sumユーティリティによる出力と同じ結果を得たい場合は、lower(hex(MD5(s)))を使用します。 + +## サイファッシュ64 {#hash_functions-siphash64} + +64ビットを生成する [サイファッシュ](https://131002.net/siphash/) ハッシュ値。 + +``` sql +sipHash64(par1,...) +``` + +これは暗号化ハッシュ関数です。 それはより速い少なくとも三回働きます [MD5](#hash_functions-md5) 機能。 + +機能 [解釈する](../../sql_reference/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) すべての入力パラメータを文字列として計算し、それぞれのハッシュ値を計算します。 その融合ハッシュにより、次のアルゴリズム: + +1. すべての入力パラメータをハッシュした後、関数はハッシュの配列を取得します。 +2. 関数は、第一及び第二の要素を取り、それらの配列のためのハッシュを計算します。 +3. 次に、関数は、前のステップで計算されたハッシュ値と最初のハッシュ配列の第三の要素を取り、それらの配列のハッシュを計算します。 +4. 最初のハッシュ配列の残りのすべての要素について、前の手順が繰り返されます。 + +**パラメータ** + +この関数は、可変個の入力パラメータを受け取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +A [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type +``` + +``` text +┌──────────────SipHash─┬─type───┐ +│ 13726873534472839665 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## サイファシー128 {#hash_functions-siphash128} + +文字列からサイファッシュを計算します。 +文字列型引数を受け取ります。 fixedstring(16)を返します。 +Siphash64とは異なり、最終的なxor折りたたみ状態は128ビットまでしか行われません。 + +## cityHash64 {#cityhash64} + +64ビットを生成する [CityHash](https://github.com/google/cityhash) ハッシュ値。 + +``` sql +cityHash64(par1,...) +``` + +これは高速な非暗号化ハッシュ関数です。 文字列パラメーターにはcityhashアルゴリズムを使用し、他のデータ型のパラメーターには実装固有の高速非暗号化ハッシュ関数を使用します。 この関数は、最終的な結果を得るためにcityhashコンビネータを使用します。 + +**パラメータ** + +この関数は、可変個の入力パラメータを受け取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +A [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例** + +呼び出しの例: + +``` sql +SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type +``` + +``` text +┌─────────────CityHash─┬─type───┐ +│ 12072650598913549138 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +次の例は、行順序までの精度でテーブル全体のチェックサムを計算する方法を示しています: + +``` sql +SELECT groupBitXor(cityHash64(*)) FROM table +``` + +## intHash32 {#inthash32} + +任意のタイプの整数から32ビットのハッシュコードを計算します。 +これは、数値の平均品質の比較的高速な非暗号化ハッシュ関数です。 + +## intHash64 {#inthash64} + +任意のタイプの整数から64ビットのハッシュコードを計算します。 +それはinthash32より速く働きます。 平均品質。 + +## SHA1 {#sha1} + +## SHA224 {#sha224} + +## SHA256 {#sha256} + +文字列からsha-1、sha-224、またはsha-256を計算し、結果のバイトセットをfixedstring(20)、fixedstring(28)、またはfixedstring(32)として返します。 +この関数はかなりゆっくりと動作します(sha-1はプロセッサコアあたり約5百万個の短い文字列を処理し、sha-224とsha-256は約2.2百万プロセス)。 +特定のハッシュ関数が必要で選択できない場合にのみ、この関数を使用することをお勧めします。 +このような場合でも、テーブルに値を挿入するときは、selectsに値を適用するのではなく、関数をオフラインで適用して事前計算することをお勧めします。 + +## URLHash(url\[,N\]) {#urlhashurl-n} + +いくつかのタイプの正規化を使用してurlから取得した文字列の高速でまともな品質の非暗号化ハッシュ関数。 +`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` または `#` 最後に、存在する場合。 +`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` または `#` 最後に、存在する場合。 +レベルはurlhierarchyと同じです。 この機能はyandexに固有です。メトリカ + +## farmHash64 {#farmhash64} + +64ビットを生成する [FarmHash](https://github.com/google/farmhash) ハッシュ値。 + +``` sql +farmHash64(par1, ...) +``` + +この関数は、 `Hash64` すべてからの方法 [利用可能な方法](https://github.com/google/farmhash/blob/master/src/farmhash.h). + +**パラメータ** + +この関数は、可変個の入力パラメータを受け取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +A [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type +``` + +``` text +┌─────────────FarmHash─┬─type───┐ +│ 17790458267262532859 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## javaHash {#hash_functions-javahash} + +計算 [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) 文字列から。 このハッシュ関数は高速でも良い品質でもありません。 理由はただそれだけに使用する場合もこのアルゴリズムは実用化が始まっており他のシステムとしての計算は、全く同じ働きをします。 + +**構文** + +``` sql +SELECT javaHash(''); +``` + +**戻り値** + +A `Int32` データ型ハッシュ値。 + +**例えば** + +クエリ: + +``` sql +SELECT javaHash('Hello, world!'); +``` + +結果: + +``` text +┌─javaHash('Hello, world!')─┐ +│ -1880044555 │ +└───────────────────────────┘ +``` + +## javaHashUTF16LE {#javahashutf16le} + +計算 [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) 文字列から、UTF-16LEエンコーディングで文字列を表すバイトが含まれていると仮定します。 + +**構文** + +``` sql +javaHashUTF16LE(stringUtf16le) +``` + +**パラメータ** + +- `stringUtf16le` — a string in UTF-16LE encoding. + +**戻り値** + +A `Int32` データ型ハッシュ値。 + +**例えば** + +UTF-16LEでエンコードされた文字列で正しいクエリー。 + +クエリ: + +``` sql +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +``` + +結果: + +``` text +┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ +│ 3556498 │ +└──────────────────────────────────────────────────────────────┘ +``` + +## hiveHash {#hash-functions-hivehash} + +計算 `HiveHash` 文字列から。 + +``` sql +SELECT hiveHash(''); +``` + +これはちょうど [JavaHash](#hash_functions-javahash) サインビットをゼロにします。 この関数は、 [Apacheハイブ](https://en.wikipedia.org/wiki/Apache_Hive) 3.0より前のバージョンの場合。 このハッシュ関数は高速でも良い品質でもありません。 理由はただそれだけに使用する場合もこのアルゴリズムは実用化が始まっており他のシステムとしての計算は、全く同じ働きをします。 + +**戻り値** + +A `Int32` データ型ハッシュ値。 + +タイプ: `hiveHash`. + +**例えば** + +クエリ: + +``` sql +SELECT hiveHash('Hello, world!'); +``` + +結果: + +``` text +┌─hiveHash('Hello, world!')─┐ +│ 267439093 │ +└───────────────────────────┘ +``` + +## metroHash64 {#metrohash64} + +64ビットを生成する [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) ハッシュ値。 + +``` sql +metroHash64(par1, ...) +``` + +**パラメータ** + +この関数は、可変個の入力パラメータを受け取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +A [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type +``` + +``` text +┌────────────MetroHash─┬─type───┐ +│ 14235658766382344533 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## jumpConsistentHash {#jumpconsistenthash} + +JumpConsistentHashフォームUInt64を計算します。 +UInt64型のキーとバケットの数です。 Int32を返します。 +詳細については、リンク: [JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) + +## murmurHash2\_32,murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} + +を生成する [MurmurHash2](https://github.com/aappleby/smhasher) ハッシュ値。 + +``` sql +murmurHash2_32(par1, ...) +murmurHash2_64(par1, ...) +``` + +**パラメータ** + +どちらの関数も、入力パラメータの可変数を取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +- その `murmurHash2_32` 関数は、ハッシュ値を返します [UInt32](../../sql_reference/data_types/int_uint.md) データ型。 +- その `murmurHash2_64` 関数は、ハッシュ値を返します [UInt64](../../sql_reference/data_types/int_uint.md) データ型。 + +**例えば** + +``` sql +SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type +``` + +``` text +┌──────────MurmurHash2─┬─type───┐ +│ 11832096901709403633 │ UInt64 │ +└──────────────────────┴────────┘ +``` + +## murmurHash3\_32,murmurHash3\_64 {#murmurhash3-32-murmurhash3-64} + +を生成する [MurmurHash3](https://github.com/aappleby/smhasher) ハッシュ値。 + +``` sql +murmurHash3_32(par1, ...) +murmurHash3_64(par1, ...) +``` + +**パラメータ** + +どちらの関数も、入力パラメータの可変数を取ります。 パラメー [対応データ型](../../sql_reference/data_types/index.md). + +**戻り値** + +- その `murmurHash3_32` 関数は、 [UInt32](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 +- その `murmurHash3_64` 関数は、 [UInt64](../../sql_reference/data_types/int_uint.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3─┬─type───┐ +│ 2152717 │ UInt32 │ +└─────────────┴────────┘ +``` + +## murmurHash3\_128 {#murmurhash3-128} + +128ビットを生成する [MurmurHash3](https://github.com/aappleby/smhasher) ハッシュ値。 + +``` sql +murmurHash3_128( expr ) +``` + +**パラメータ** + +- `expr` — [式](../syntax.md#syntax-expressions) を返す [文字列](../../sql_reference/data_types/string.md)-タイプ値。 + +**戻り値** + +A [FixedString(16)](../../sql_reference/data_types/fixedstring.md) データ型ハッシュ値。 + +**例えば** + +``` sql +SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +``` + +``` text +┌─MurmurHash3──────┬─type────────────┐ +│ 6�1�4"S5KT�~~q │ FixedString(16) │ +└──────────────────┴─────────────────┘ +``` + +## xxHash32,xxHash64 {#hash-functions-xxhash32} + +計算 `xxHash` 文字列から。 これは、二つの味、32および64ビットで提案されています。 + +``` sql +SELECT xxHash32(''); + +OR + +SELECT xxHash64(''); +``` + +**戻り値** + +A `Uint32` または `Uint64` データ型ハッシュ値。 + +タイプ: `xxHash`. + +**例えば** + +クエリ: + +``` sql +SELECT xxHash32('Hello, world!'); +``` + +結果: + +``` text +┌─xxHash32('Hello, world!')─┐ +│ 834093149 │ +└───────────────────────────┘ +``` + +**また見なさい** + +- [xxHash](http://cyan4973.github.io/xxHash/). + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/ja/sql_reference/functions/higher_order_functions.md b/docs/ja/sql_reference/functions/higher_order_functions.md deleted file mode 120000 index 53a472efd3a..00000000000 --- a/docs/ja/sql_reference/functions/higher_order_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/higher_order_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/higher_order_functions.md b/docs/ja/sql_reference/functions/higher_order_functions.md new file mode 100644 index 00000000000..3dac2bd98aa --- /dev/null +++ b/docs/ja/sql_reference/functions/higher_order_functions.md @@ -0,0 +1,264 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 57 +toc_title: "\u3088\u308A\u9AD8\u3044\u6B21\u6570" +--- + +# 高階関数 {#higher-order-functions} + +## `->` 演算子、ラムダ(params,expr)関数 {#operator-lambdaparams-expr-function} + +Allows describing a lambda function for passing to a higher-order function. The left side of the arrow has a formal parameter, which is any ID, or multiple formal parameters – any IDs in a tuple. The right side of the arrow has an expression that can use these formal parameters, as well as any table columns. + +例: `x -> 2 * x, str -> str != Referer.` + +高階関数は、関数の引数としてラムダ関数のみを受け入れることができます。 + +複数の引数を受け入れるラムダ関数は、高階関数に渡すことができます。 この場合、高次関数は、これらの引数が対応する同一の長さのいくつかの配列を渡されます。 + +いくつかの機能については、 [arrayCount](#higher_order_functions-array-count) または [arraySum](#higher_order_functions-array-count)、最初の引数(ラムダ関数)を省略することができます。 この場合、同一のマッピングが想定されます。 + +以下の関数ではラムダ関数を省略することはできません: + +- [arrayMap](#higher_order_functions-array-map) +- [arrayFilter](#higher_order_functions-array-filter) +- [arrayfillanguage](#higher_order_functions-array-fill) +- [arrayReverseFill](#higher_order_functions-array-reverse-fill) +- [arraySplit](#higher_order_functions-array-split) +- [arrayReverseSplit](#higher_order_functions-array-reverse-split) +- [arrayFirst](#higher_order_functions-array-first) +- [arrayFirstIndex](#higher_order_functions-array-first-index) + +### arrayMap(func, arr1, …) {#higher_order_functions-array-map} + +の元のアプリケーションから取得した配列を返します。 `func` の各要素への機能 `arr` 配列だ + +例: + +``` sql +SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,4,5] │ +└─────────┘ +``` + +次の例は、異なる配列から要素のタプルを作成する方法を示しています: + +``` sql +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res +``` + +``` text +┌─res─────────────────┐ +│ [(1,4),(2,5),(3,6)] │ +└─────────────────────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayMap` 機能。 + +### arrayFilter(func, arr1, …) {#higher_order_functions-array-filter} + +要素のみを含む配列を返します `arr1` そのために `func` 0以外の値を返します。 + +例: + +``` sql +SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res +``` + +``` text +┌─res───────────┐ +│ ['abc World'] │ +└───────────────┘ +``` + +``` sql +SELECT + arrayFilter( + (i, x) -> x LIKE '%World%', + arrayEnumerate(arr), + ['Hello', 'abc World'] AS arr) + AS res +``` + +``` text +┌─res─┐ +│ [2] │ +└─────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayFilter` 機能。 + +### arrayFill(func, arr1, …) {#higher_order_functions-array-fill} + +スキャンスルー `arr1` 最初の要素から最後の要素まで `arr1[i]` によって `arr1[i - 1]` もし `func` 0を返します。 の最初の要素 `arr1` 交換されません。 + +例: + +``` sql +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res──────────────────────────────┐ +│ [1,1,3,11,12,12,12,5,6,14,14,14] │ +└──────────────────────────────────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayFill` 機能。 + +### arrayReverseFill(func, arr1, …) {#higher_order_functions-array-reverse-fill} + +スキャンスルー `arr1` 最後の要素から最初の要素へと置き換えます `arr1[i]` によって `arr1[i + 1]` もし `func` 0を返します。 の最後の要素 `arr1` 交換されません。 + +例: + +``` sql +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res────────────────────────────────┐ +│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ +└────────────────────────────────────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayReverseFill` 機能。 + +### arraySplit(func, arr1, …) {#higher_order_functions-array-split} + +分割 `arr1` 複数の配列に変換します とき `func` 0以外のものを返すと、配列は要素の左側で分割されます。 配列は最初の要素の前に分割されません。 + +例: + +``` sql +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res─────────────┐ +│ [[1,2,3],[4,5]] │ +└─────────────────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arraySplit` 機能。 + +### arrayReverseSplit(func, arr1, …) {#higher_order_functions-array-reverse-split} + +分割 `arr1` 複数の配列に変換します とき `func` 0以外のものを返すと、配列は要素の右側で分割されます。 配列は最後の要素の後に分割されません。 + +例: + +``` sql +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res───────────────┐ +│ [[1],[2,3,4],[5]] │ +└───────────────────┘ +``` + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arraySplit` 機能。 + +### arrayCount(\[func,\] arr1, …) {#higher_order_functions-array-count} + +Funcが0以外を返すarr配列内の要素の数を返します。 もし ‘func’ 指定されていない場合は、配列内の非ゼロ要素の数を返します。 + +### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} + +少なくとも1つの要素がある場合は1を返します ‘arr’ そのために ‘func’ 0以外の値を返します。 それ以外の場合は、0を返します。 + +### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} + +場合は1を返します ‘func’ すべての要素に対して0以外のものを返します ‘arr’. それ以外の場合は、0を返します。 + +### arraySum(\[func,\] arr1, …) {#higher-order-functions-array-sum} + +の合計を返します ‘func’ 値。 関数が省略された場合、それだけで配列要素の合計を返します。 + +### arrayFirst(func, arr1, …) {#higher_order_functions-array-first} + +の最初の要素を返します ‘arr1’ そのための配列 ‘func’ 0以外の値を返します。 + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayFirst` 機能。 + +### arrayFirstIndex(func, arr1, …) {#higher_order_functions-array-first-index} + +最初の要素のインデックスを返します ‘arr1’ そのための配列 ‘func’ 0以外の値を返します。 + +最初の引数(ラムダ関数)を省略することはできないことに注意してください。 `arrayFirstIndex` 機能。 + +### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} + +ソース配列内の要素の部分和の配列(実行中の合計)を返します。 この `func` 関数が指定されると、配列要素の値が合計される前にこの関数によって変換されます。 + +例えば: + +``` sql +SELECT arrayCumSum([1, 1, 1, 1]) AS res +``` + +``` text +┌─res──────────┐ +│ [1, 2, 3, 4] │ +└──────────────┘ +``` + +### ツ環板篠ョツ嘉ッツ偲青ツエツδツ-ツアツイツ-ツエツス) {#arraycumsumnonnegativearr} + +と同じ `arrayCumSum`,ソース配列の要素の部分和の配列を返します(実行中の合計). 異なる `arrayCumSum`、その後、戻り値がゼロ未満の値が含まれている場合、値がゼロで置き換えられ、その後の計算は、ゼロのパラメータで実行されます。 例えば: + +``` sql +SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,0,1] │ +└───────────┘ +``` + +### arraySort(\[func,\] arr1, …) {#arraysortfunc-arr1} + +の要素をソートした結果として配列を返します `arr1` 昇順で。 この `func` 関数が指定され、ソート順序は関数の結果によって決定されます `func` 配列(配列)の要素に適用されます) + +その [シュワルツ語変換](https://en.wikipedia.org/wiki/Schwartzian_transform) 分類の効率を改善するのに使用されています。 + +例えば: + +``` sql +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); +``` + +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` + +の詳細については、 `arraySort` 方法は、を参照してください [配列を操作するための関数](array_functions.md#array_functions-sort) セクション。 + +### arrayReverseSort(\[func,\] arr1, …) {#arrayreversesortfunc-arr1} + +の要素をソートした結果として配列を返します `arr1` 降順で。 この `func` 関数が指定され、ソート順序は関数の結果によって決定されます `func` 配列(配列)の要素に適用されます。 + +例えば: + +``` sql +SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; +``` + +``` text +┌─res───────────────┐ +│ ['hello','world'] │ +└───────────────────┘ +``` + +の詳細については、 `arrayReverseSort` 方法は、を参照してください [配列を操作するための関数](array_functions.md#array_functions-reverse-sort) セクション。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/higher_order_functions/) diff --git a/docs/ja/sql_reference/functions/in_functions.md b/docs/ja/sql_reference/functions/in_functions.md deleted file mode 120000 index 69f878a680d..00000000000 --- a/docs/ja/sql_reference/functions/in_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/in_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/in_functions.md b/docs/ja/sql_reference/functions/in_functions.md new file mode 100644 index 00000000000..5f6d09af1f1 --- /dev/null +++ b/docs/ja/sql_reference/functions/in_functions.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 60 +toc_title: "IN\u6F14\u7B97\u5B50\u306E\u5B9F\u88C5" +--- + +# IN演算子を実装するための関数 {#functions-for-implementing-the-in-operator} + +## で,ノッチン,グロバリン,グロバルノチン {#in-functions} + +セクションを見る [演算子の場合](../statements/select.md#select-in-operators). + +## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} + +複数の列をグループ化できる関数。 +For columns with the types T1, T2, …, it returns a Tuple(T1, T2, …) type tuple containing these columns. There is no cost to execute the function. +タプルは、通常、in演算子の引数の中間値として、またはラムダ関数の仮パラメータのリストを作成するために使用されます。 タプルはテーブルに書き込むことはできません。 + +## tupleElement(tuple,n),演算子x.N {#tupleelementtuple-n-operator-x-n} + +タプルから列を取得できる関数。 +‘N’ 1から始まる列インデックスです。 Nは定数でなければなりません。 ‘N’ 定数でなければなりません。 ‘N’ タプルのサイズを超えない厳密なポジティブな整数でなければなりません。 +関数を実行するコストはかかりません。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/ja/sql_reference/functions/index.md b/docs/ja/sql_reference/functions/index.md deleted file mode 120000 index 6b19d8b6d8f..00000000000 --- a/docs/ja/sql_reference/functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/index.md b/docs/ja/sql_reference/functions/index.md new file mode 100644 index 00000000000..1094c1857cf --- /dev/null +++ b/docs/ja/sql_reference/functions/index.md @@ -0,0 +1,74 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Functions +toc_priority: 32 +toc_title: "\u5C0E\u5165" +--- + +# 機能 {#functions} + +少なくとも\*つのタイプの関数があります-通常の関数(これらは単に呼び出されます “functions”) and aggregate functions. These are completely different concepts. Regular functions work as if they are applied to each row separately (for each row, the result of the function doesn't depend on the other rows). Aggregate functions accumulate a set of values from various rows (i.e. they depend on the entire set of rows). + +このセクションでは、定期的な機能を説明します。 のための集計関数の項をご参照ください “Aggregate functions”. + +\*-その機能の第三のタイプがあります ‘arrayJoin’ テーブルの機能も別々に言及することができます。\* + +## 強力なタイピング {#strong-typing} + +標準sqlとは対照的に、clickhouseは強力な型付けをしています。 言い換えれば、型間の暗黙の変換は行われません。 各関数は、特定のタイプのセットに対して機能します。 これは、時には型変換関数を使用する必要があることを意味します。 + +## 共通部分式の削除 {#common-subexpression-elimination} + +同じast(構文解析の同じレコードまたは同じ結果)を持つクエリ内のすべての式は、同じ値を持つとみなされます。 このような式は連結され、一度実行されます。 この方法では、同一のサブクエリも削除されます。 + +## 結果のタイプ {#types-of-results} + +すべての関数は、結果として単一の戻り値を返します(複数の値ではなく、ゼロの値ではありません)。 結果の型は、通常、値ではなく引数の型によってのみ定義されます。 例外は、tupleelement関数(a.n演算子)とtofixedstring関数です。 + +## 定数 {#constants} + +簡単にするために、特定の関数はいくつかの引数の定数のみで動作します。 たとえば、like演算子のright引数は定数でなければなりません。 +ほとんどすべての関数は定数引数の定数を返します。 例外は、乱数を生成する関数です。 +その ‘now’ 関数は、異なる時間に実行されたクエリに対して異なる値を返しますが、定数は単一のクエリ内でのみ重要であるため、結果は定数と見なされます。 +定数式も定数と見なされます(たとえば、like演算子の右半分は複数の定数から構築できます)。 + +関数は、定数と非定数引数(異なるコードが実行される)のために異なる方法で実装することができます。 しかし、定数と同じ値のみを含む真の列の結果は、互いに一致する必要があります。 + +## ヌル処理 {#null-processing} + +関数の動作は次のとおりです: + +- 関数の引数のうち少なくとも一つが `NULL`、機能結果はまたあります `NULL`. +- 各機能の説明で個別に指定される特別な動作。 のclickhouseソースコードは、これらの機能の `UseDefaultImplementationForNulls=false`. + +## 不変性 {#constancy} + +Functions can't change the values of their arguments – any changes are returned as the result. Thus, the result of calculating separate functions does not depend on the order in which the functions are written in the query. + +## エラー処理 {#error-handling} + +データが無効な場合、一部の関数は例外をスローする可能性があります。 この場合、クエリは取り消され、エラーテキストがクライアントに返されます。 分散処理の場合、いずれかのサーバーで例外が発生すると、他のサーバーもクエリを中止しようとします。 + +## 引数式の評価 {#evaluation-of-argument-expressions} + +ほぼすべてのプログラミング言語の一つの引数が評価される。 これは通常、演算子です `&&`, `||`、と `?:`. +しかし、clickhouseでは、関数(演算子)の引数は常に評価されます。 これは、各行を別々に計算するのではなく、列の全部分が一度に評価されるためです。 + +## 分散クエリ処理のための関数の実行 {#performing-functions-for-distributed-query-processing} + +分散クエリ処理では、できるだけ多くのクエリ処理がリモートサーバーで実行され、その他のステージ(中間結果とそれ以降のすべてのステージ)はリクエスター + +つまり、異なるサーバーで機能を実行できます。 +たとえば、クエリでは `SELECT f(sum(g(x))) FROM distributed_table GROUP BY h(y),` + +- もし `distributed_table` は、少なくとも二つのシャード、機能 ‘g’ と ‘h’ リモートサーバー上で実行される。 ‘f’ 要求元サーバーで実行されます。 +- もし `distributed_table` 一つだけシャード、すべてを持っています ‘f’, ‘g’、と ‘h’ 機能は、このシャードのサーバー上で実行されます。 + +関数の結果は、通常、実行されるサーバーに依存しません。 しかし、時にはこれが重要です。 +たとえば、辞書を操作する関数は、実行しているサーバー上に存在する辞書を使用します。 +別の例は、 `hostName` この関数は、実行されているサーバーの名前を返します。 `GROUP BY` aのサーバーによって `SELECT` クエリ。 + +クエリ内の関数がリクエストサーバー上で実行されているが、リモートサーバー上で実行する必要がある場合は、次のようにラップします。 ‘any’ 関数を集めるか、それをキーに追加します `GROUP BY`. + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/ja/sql_reference/functions/introspection.md b/docs/ja/sql_reference/functions/introspection.md deleted file mode 120000 index 4236070391b..00000000000 --- a/docs/ja/sql_reference/functions/introspection.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/introspection.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/introspection.md b/docs/ja/sql_reference/functions/introspection.md new file mode 100644 index 00000000000..78a1c6ae557 --- /dev/null +++ b/docs/ja/sql_reference/functions/introspection.md @@ -0,0 +1,310 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 65 +toc_title: "\u30A4\u30F3\u30C8\u30ED\u30B9\u30DA\u30AF\u30B7\u30E7\u30F3" +--- + +# イントロスペクション関数 {#introspection-functions} + +利用できる表示可能なプラグインで説明してこの章にintrospect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) と [DWARF](https://en.wikipedia.org/wiki/DWARF) のためのクエリープロファイリング. + +!!! warning "警告" + これらの機能は、が必要となる場合があり安全に配慮し + +イントロスペクション機能を適切に動作させるため: + +- インストール `clickhouse-common-static-dbg` パッケージ。 + +- セットを [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) 1に設定します。 + + For security reasons introspection functions are disabled by default. + +ClickHouseはプロファイラーレポートを保存します [trace\_log](../../operations/system_tables.md#system_tables-trace_log) システムテーブル。 のテーブルプロファイラで設定されます。 + +## アドレスストリンcolor {#addresstoline} + +ClickHouseサーバープロセス内の仮想メモリアドレスを、ClickHouseソースコード内のファイル名と行番号に変換します。 + +公式のclickhouseのパッケージを使用すれば、取付ける必要があります `clickhouse-common-static-dbg` パッケージ。 + +**構文** + +``` sql +addressToLine(address_of_binary_instruction) +``` + +**パラメータ** + +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**戻り値** + +- コロンで区切られたこのファイル内のソースコードのファイル名と行番号。 + + For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. + +- 関数がデバッグ情報を見つけることができなかった場合、バイナリの名前。 + +- アドレスが有効でない場合は、空の文字列。 + +タイプ: [文字列](../../sql_reference/data_types/string.md). + +**例えば** + +イントロスペクション機能の有効化: + +``` sql +SET allow_introspection_functions=1 +``` + +から最初の文字列を選択する `trace_log` システム表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-19 +event_time: 2019-11-19 18:57:23 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 421b6855-1858-45a5-8f37-f383409d6d72 +trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] +``` + +その `trace` 分野のスタックトレースを瞬時にサンプリングします。 + +単一のアドレスのソースコードファイル名と行番号を取得する: + +``` sql +SELECT addressToLine(94784076370703) \G +``` + +``` text +Row 1: +────── +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +``` + +スタックトレース全体に関数を適用する: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToLine(x), trace), '\n') AS trace_source_code_lines +FROM system.trace_log +LIMIT 1 +\G +``` + +その [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 機能はの各々の個々の要素を処理することを割り当てます `trace` による配列 `addressToLine` 機能。 この処理の結果は、次のように表示されます。 `trace_source_code_lines` 出力の列。 + +``` text +Row 1: +────── +trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so +/usr/lib/debug/usr/bin/clickhouse +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/usr/include/c++/9/bits/atomic_base.h:551 +/usr/lib/debug/usr/bin/clickhouse +/lib/x86_64-linux-gnu/libpthread-2.27.so +/build/glibc-OTsEL5/glibc-2.27/misc/../sysdeps/unix/sysv/linux/x86_64/clone.S:97 +``` + +## addressToSymbol {#addresstosymbol} + +に変換する仮想メモリアドレス内clickhouseサーバプロセスのシンボルからclickhouseオブジェクトファイルです。 + +**構文** + +``` sql +addressToSymbol(address_of_binary_instruction) +``` + +**パラメータ** + +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. + +**戻り値** + +- ClickHouseオブジェクトファイルからの記号。 +- アドレスが有効でない場合は、空の文字列。 + +タイプ: [文字列](../../sql_reference/data_types/string.md). + +**例えば** + +イントロスペクション機能の有効化: + +``` sql +SET allow_introspection_functions=1 +``` + +から最初の文字列を選択する `trace_log` システム表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +その `trace` 分野のスタックトレースを瞬時にサンプリングします。 + +単一のアドレスのシンボルを取得する: + +``` sql +SELECT addressToSymbol(94138803686098) \G +``` + +``` text +Row 1: +────── +addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +``` + +スタックトレース全体に関数を適用する: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> addressToSymbol(x), trace), '\n') AS trace_symbols +FROM system.trace_log +LIMIT 1 +\G +``` + +その [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 機能はの各々の個々の要素を処理することを割り当てます `trace` による配列 `addressToSymbols` 機能。 この処理の結果は、次のように表示されます。 `trace_symbols` 出力の列。 + +``` text +Row 1: +────── +trace_symbols: _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE +_ZNK2DB10Aggregator21executeWithoutKeyImplERPcmPNS0_28AggregateFunctionInstructionEPNS_5ArenaE +_ZN2DB10Aggregator14executeOnBlockESt6vectorIN3COWINS_7IColumnEE13immutable_ptrIS3_EESaIS6_EEmRNS_22AggregatedDataVariantsERS1_IPKS3_SaISC_EERS1_ISE_SaISE_EERb +_ZN2DB10Aggregator14executeOnBlockERKNS_5BlockERNS_22AggregatedDataVariantsERSt6vectorIPKNS_7IColumnESaIS9_EERS6_ISB_SaISB_EERb +_ZN2DB10Aggregator7executeERKSt10shared_ptrINS_17IBlockInputStreamEERNS_22AggregatedDataVariantsE +_ZN2DB27AggregatingBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB26ExpressionBlockInputStream8readImplEv +_ZN2DB17IBlockInputStream4readEv +_ZN2DB28AsynchronousBlockInputStream9calculateEv +_ZNSt17_Function_handlerIFvvEZN2DB28AsynchronousBlockInputStream4nextEvEUlvE_E9_M_invokeERKSt9_Any_data +_ZN14ThreadPoolImplI20ThreadFromGlobalPoolE6workerESt14_List_iteratorIS0_E +_ZZN20ThreadFromGlobalPoolC4IZN14ThreadPoolImplIS_E12scheduleImplIvEET_St8functionIFvvEEiSt8optionalImEEUlvE1_JEEEOS4_DpOT0_ENKUlvE_clEv +_ZN14ThreadPoolImplISt6threadE6workerESt14_List_iteratorIS0_E +execute_native_thread_routine +start_thread +clone +``` + +## デマングル {#demangle} + +を使用して取得できるシンボルを変換します。 [addressToSymbol](#addresstosymbol) C++の関数名に関数。 + +**構文** + +``` sql +demangle(symbol) +``` + +**パラメータ** + +- `symbol` ([文字列](../../sql_reference/data_types/string.md)) — Symbol from an object file. + +**戻り値** + +- C++関数の名前。 +- シンボルが有効でない場合は、空の文字列。 + +タイプ: [文字列](../../sql_reference/data_types/string.md). + +**例えば** + +イントロスペクション機能の有効化: + +``` sql +SET allow_introspection_functions=1 +``` + +から最初の文字列を選択する `trace_log` システム表: + +``` sql +SELECT * FROM system.trace_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2019-11-20 +event_time: 2019-11-20 16:57:59 +revision: 54429 +timer_type: Real +thread_number: 48 +query_id: 724028bf-f550-45aa-910d-2af6212b94ac +trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] +``` + +その `trace` 分野のスタックトレースを瞬時にサンプリングします。 + +単一アドレスの関数名の取得: + +``` sql +SELECT demangle(addressToSymbol(94138803686098)) \G +``` + +``` text +Row 1: +────── +demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +``` + +スタックトレース全体に関数を適用する: + +``` sql +SELECT + arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions +FROM system.trace_log +LIMIT 1 +\G +``` + +その [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 機能はの各々の個々の要素を処理することを割り当てます `trace` による配列 `demangle` 機能。 この処理の結果は、次のように表示されます。 `trace_functions` 出力の列。 + +``` text +Row 1: +────── +trace_functions: DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const +DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) const +DB::Aggregator::executeOnBlock(std::vector::immutable_ptr, std::allocator::immutable_ptr > >, unsigned long, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::executeOnBlock(DB::Block const&, DB::AggregatedDataVariants&, std::vector >&, std::vector >, std::allocator > > >&, bool&) +DB::Aggregator::execute(std::shared_ptr const&, DB::AggregatedDataVariants&) +DB::AggregatingBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::ExpressionBlockInputStream::readImpl() +DB::IBlockInputStream::read() +DB::AsynchronousBlockInputStream::calculate() +std::_Function_handler::_M_invoke(std::_Any_data const&) +ThreadPoolImpl::worker(std::_List_iterator) +ThreadFromGlobalPool::ThreadFromGlobalPool::scheduleImpl(std::function, int, std::optional)::{lambda()#3}>(ThreadPoolImpl::scheduleImpl(std::function, int, std::optional)::{lambda()#3}&&)::{lambda()#1}::operator()() const +ThreadPoolImpl::worker(std::_List_iterator) +execute_native_thread_routine +start_thread +clone +``` diff --git a/docs/ja/sql_reference/functions/ip_address_functions.md b/docs/ja/sql_reference/functions/ip_address_functions.md deleted file mode 120000 index d67d13e8912..00000000000 --- a/docs/ja/sql_reference/functions/ip_address_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/ip_address_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/ip_address_functions.md b/docs/ja/sql_reference/functions/ip_address_functions.md new file mode 100644 index 00000000000..1eca3b316fc --- /dev/null +++ b/docs/ja/sql_reference/functions/ip_address_functions.md @@ -0,0 +1,248 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 55 +toc_title: "IP\u30A2\u30C9\u30EC\u30B9\u306E\u64CD\u4F5C" +--- + +# IPアドレスを操作するための機能 {#functions-for-working-with-ip-addresses} + +## IPv4NumToString(num) {#ipv4numtostringnum} + +UInt32番号を受け取ります。 ビッグエンディアンのIPv4アドレスとして解釈します。 対応するIPv4アドレスをA.B.C.dという形式で含む文字列を返します。 + +## IPv4StringToNum(s) {#ipv4stringtonums} + +IPv4NumToStringの逆関数. IPv4アドレスの形式が無効な場合は、0を返します。 + +## IPv4NumToStringClassC(num) {#ipv4numtostringclasscnum} + +IPv4NumToStringに似ていますが、最後のオクテットの代わりにxxxを使用します。 + +例えば: + +``` sql +SELECT + IPv4NumToStringClassC(ClientIP) AS k, + count() AS c +FROM test.hits +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─k──────────────┬─────c─┐ +│ 83.149.9.xxx │ 26238 │ +│ 217.118.81.xxx │ 26074 │ +│ 213.87.129.xxx │ 25481 │ +│ 83.149.8.xxx │ 24984 │ +│ 217.118.83.xxx │ 22797 │ +│ 78.25.120.xxx │ 22354 │ +│ 213.87.131.xxx │ 21285 │ +│ 78.25.121.xxx │ 20887 │ +│ 188.162.65.xxx │ 19694 │ +│ 83.149.48.xxx │ 17406 │ +└────────────────┴───────┘ +``` + +使用して以来 ‘xxx’ 非常に珍しいです、これは将来変更されるかもしれません。 このフラグメントの正確なフォーマットに依存しないことをお勧めします。 + +### IPv6NumToString(x) {#ipv6numtostringx} + +バイナリ形式のipv6アドレスを含むfixedstring(16)値を受け入れます。 このアドレスを含む文字列をテキスト形式で返します。 +IPv6にマップされたIPv4アドレスは、::ffff:111.222.33.44の形式で出力されます。 例: + +``` sql +SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr +``` + +``` text +┌─addr─────────┐ +│ 2a02:6b8::11 │ +└──────────────┘ +``` + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() AND substring(ClientIP6, 1, 12) != unhex('00000000000000000000FFFF') +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─IPv6NumToString(ClientIP6)──────────────┬─────c─┐ +│ 2a02:2168:aaa:bbbb::2 │ 24695 │ +│ 2a02:2698:abcd:abcd:abcd:abcd:8888:5555 │ 22408 │ +│ 2a02:6b8:0:fff::ff │ 16389 │ +│ 2a01:4f8:111:6666::2 │ 16016 │ +│ 2a02:2168:888:222::1 │ 15896 │ +│ 2a01:7e00::ffff:ffff:ffff:222 │ 14774 │ +│ 2a02:8109:eee:ee:eeee:eeee:eeee:eeee │ 14443 │ +│ 2a02:810b:8888:888:8888:8888:8888:8888 │ 14345 │ +│ 2a02:6b8:0:444:4444:4444:4444:4444 │ 14279 │ +│ 2a01:7e00::ffff:ffff:ffff:ffff │ 13880 │ +└─────────────────────────────────────────┴───────┘ +``` + +``` sql +SELECT + IPv6NumToString(ClientIP6 AS k), + count() AS c +FROM hits_all +WHERE EventDate = today() +GROUP BY k +ORDER BY c DESC +LIMIT 10 +``` + +``` text +┌─IPv6NumToString(ClientIP6)─┬──────c─┐ +│ ::ffff:94.26.111.111 │ 747440 │ +│ ::ffff:37.143.222.4 │ 529483 │ +│ ::ffff:5.166.111.99 │ 317707 │ +│ ::ffff:46.38.11.77 │ 263086 │ +│ ::ffff:79.105.111.111 │ 186611 │ +│ ::ffff:93.92.111.88 │ 176773 │ +│ ::ffff:84.53.111.33 │ 158709 │ +│ ::ffff:217.118.11.22 │ 154004 │ +│ ::ffff:217.118.11.33 │ 148449 │ +│ ::ffff:217.118.11.44 │ 148243 │ +└────────────────────────────┴────────┘ +``` + +## IPv6StringToNum(s) {#ipv6stringtonums} + +IPv6NumToStringの逆関数. IPv6アドレスの形式が無効な場合は、nullバイトの文字列を返します。 +HEXは大文字または小文字にできます。 + +## IPv4ToIPv6(x) {#ipv4toipv6x} + +を取る `UInt32` 番号 IPv4アドレスとして解釈します。 [ビッグエンディアン](https://en.wikipedia.org/wiki/Endianness). Aを返します `FixedString(16)` バイナリ形式のIPv6アドレスを含む値。 例: + +``` sql +SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr +``` + +``` text +┌─addr───────────────┐ +│ ::ffff:192.168.0.1 │ +└────────────────────┘ +``` + +## cutIPv6(x,bitsToCutForIPv6,bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} + +バイナリ形式のipv6アドレスを含むfixedstring(16)値を受け入れます。 テキスト形式で削除された指定されたビット数のアドレスを含む文字列を返します。 例えば: + +``` sql +WITH + IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D') AS ipv6, + IPv4ToIPv6(IPv4StringToNum('192.168.0.1')) AS ipv4 +SELECT + cutIPv6(ipv6, 2, 0), + cutIPv6(ipv4, 0, 2) +``` + +``` text +┌─cutIPv6(ipv6, 2, 0)─────────────────┬─cutIPv6(ipv4, 0, 2)─┐ +│ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ +└─────────────────────────────────────┴─────────────────────┘ +``` + +## IPv4CIDRToRange(ipv4,cidr), {#ipv4cidrtorangeipv4-cidr} + +IPv4とUInt8の値を受け取ります。 [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). 下位のサブネットの範囲と上位の範囲を含むIPv4を持つタプルを返します。 + +``` sql +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) +``` + +``` text +┌─IPv4CIDRToRange(toIPv4('192.168.5.2'), 16)─┐ +│ ('192.168.0.0','192.168.255.255') │ +└────────────────────────────────────────────┘ +``` + +## IPv6CIDRToRange(ipv6,cidr), {#ipv6cidrtorangeipv6-cidr} + +Cidrを含むIPv6およびUInt8値を受け入れます。 IPv6の下位範囲と上位のサブネットを含むタプルを返します。 + +``` sql +SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); +``` + +``` text +┌─IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32)─┐ +│ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +## toIPv4(文字列) {#toipv4string} + +別名を指定する `IPv4StringToNum()` これはIPv4アドレスの文字列形式をとり、その値を返します。 [IPv4](../../sql_reference/data_types/domains/ipv4.md) 返される値に等しいバイナリです `IPv4StringToNum()`. + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + toTypeName(IPv4StringToNum(IPv4_string)), + toTypeName(toIPv4(IPv4_string)) +``` + +``` text +┌─toTypeName(IPv4StringToNum(IPv4_string))─┬─toTypeName(toIPv4(IPv4_string))─┐ +│ UInt32 │ IPv4 │ +└──────────────────────────────────────────┴─────────────────────────────────┘ +``` + +``` sql +WITH + '171.225.130.45' as IPv4_string +SELECT + hex(IPv4StringToNum(IPv4_string)), + hex(toIPv4(IPv4_string)) +``` + +``` text +┌─hex(IPv4StringToNum(IPv4_string))─┬─hex(toIPv4(IPv4_string))─┐ +│ ABE1822D │ ABE1822D │ +└───────────────────────────────────┴──────────────────────────┘ +``` + +## toIPv6(文字列) {#toipv6string} + +別名を指定する `IPv6StringToNum()` これはIPv6アドレスの文字列形式を取り、値をの戻します [IPv6](../../sql_reference/data_types/domains/ipv6.md) 返される値に等しいバイナリです `IPv6StringToNum()`. + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + toTypeName(IPv6StringToNum(IPv6_string)), + toTypeName(toIPv6(IPv6_string)) +``` + +``` text +┌─toTypeName(IPv6StringToNum(IPv6_string))─┬─toTypeName(toIPv6(IPv6_string))─┐ +│ FixedString(16) │ IPv6 │ +└──────────────────────────────────────────┴─────────────────────────────────┘ +``` + +``` sql +WITH + '2001:438:ffff::407d:1bc1' as IPv6_string +SELECT + hex(IPv6StringToNum(IPv6_string)), + hex(toIPv6(IPv6_string)) +``` + +``` text +┌─hex(IPv6StringToNum(IPv6_string))─┬─hex(toIPv6(IPv6_string))─────────┐ +│ 20010438FFFF000000000000407D1BC1 │ 20010438FFFF000000000000407D1BC1 │ +└───────────────────────────────────┴──────────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/ja/sql_reference/functions/json_functions.md b/docs/ja/sql_reference/functions/json_functions.md deleted file mode 120000 index 35134dff7d1..00000000000 --- a/docs/ja/sql_reference/functions/json_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/json_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/json_functions.md b/docs/ja/sql_reference/functions/json_functions.md new file mode 100644 index 00000000000..f3695a3bab9 --- /dev/null +++ b/docs/ja/sql_reference/functions/json_functions.md @@ -0,0 +1,231 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 56 +toc_title: "JSON\u3067\u306E\u4F5C\u696D." +--- + +# JSONを操作するための関数 {#functions-for-working-with-json} + +Yandexの中。Metrica、JSONで送信したユーザーとしてセッションパラメータ。 このJSONを操作するための特別な関数がいくつかあります。 (ほとんどの場合、JSONsはさらに前処理され、結果の値は処理された形式で別々の列に格納されます。)これらの関数はすべて、JSONができることについての強い前提に基づいていますが、仕事を終わらせるためにできるだけ少なくしようとします。 + +以下の仮定が行われます: + +1. フィールド名(関数の引数)は定数でなければなりません。 +2. フィールド名は何とかcanonicallyで符号化されたjson. 例えば: `visitParamHas('{"abc":"def"}', 'abc') = 1`、しかし `visitParamHas('{"\\u0061\\u0062\\u0063":"def"}', 'abc') = 0` +3. フィールドは、任意の入れ子レベルで無差別に検索されます。 一致するフィールドが複数ある場合は、最初のオカレンスが使用されます。 +4. JSONには、文字列リテラルの外側にスペース文字はありません。 + +## visitParamHas(パラメータ,名前) {#visitparamhasparams-name} + +フィールドがあるかどうかをチェック ‘name’ 名前だ + +## visitParamExtractUInt(パラメータ,名前) {#visitparamextractuintparams-name} + +指定されたフィールドの値からuint64を解析します ‘name’. これが文字列フィールドの場合、文字列の先頭から数値を解析しようとします。 フィールドが存在しないか、存在するが数値が含まれていない場合は、0を返します。 + +## visitParamExtractInt(パラメータ,名前) {#visitparamextractintparams-name} + +Int64の場合と同じです。 + +## visitParamExtractFloat(パラメーター,名前) {#visitparamextractfloatparams-name} + +Float64の場合と同じです。 + +## visitParamExtractBool(パラメーター,名前) {#visitparamextractboolparams-name} + +True/false値を解析します。 結果はUInt8です。 + +## visitParamExtractRaw(パラメータ,名前) {#visitparamextractrawparams-name} + +セパレータを含むフィールドの値を返します。 + +例: + +``` sql +visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' +visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' +``` + +## visitParamExtractString(パラメーター,名前) {#visitparamextractstringparams-name} + +文字列を二重引用符で解析します。 値はエスケープされません。 エスケープ解除に失敗した場合は、空の文字列を返します。 + +例: + +``` sql +visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +visitParamExtractString('{"abc":"\\u263"}', 'abc') = '' +visitParamExtractString('{"abc":"hello}', 'abc') = '' +``` + +現在、この形式のコードポイントはサポートされていません `\uXXXX\uYYYY` これは、(彼らはCESU-8の代わりにUTF-8に変換されます)基本的な多言語面からではありません。 + +次の関数は、以下に基づいています [simdjson](https://github.com/lemire/simdjson) より複雑なJSON解析要件のために設計。 上記の前提2は依然として適用されます。 + +## isValidJSON(json) {#isvalidjsonjson} + +渡された文字列が有効なjsonであることを確認します。 + +例: + +``` sql +SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}') = 1 +SELECT isValidJSON('not a json') = 0 +``` + +## JSONHas(json\[, indices\_or\_keys\]…) {#jsonhasjson-indices-or-keys} + +JSONドキュメントに値が存在する場合, `1` は返却されます。 + +値が存在しない場合, `0` は返却されます。 + +例: + +``` sql +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 1 +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4) = 0 +``` + +`indices_or_keys` それぞれの引数は、文字列または整数のいずれかになります。 + +- 文字列=アクセスオブジェクトにより、会員に対す。 +- 正の整数=最初からn番目のメンバー/キーにアクセスします。 +- 負の整数=最後からn番目のメンバー/キーにアクセスします。 + +要素の最小インデックスは1です。 したがって、要素0は存在しません。 + +整数を使用して、json配列とjsonオブジェクトの両方にアクセスできます。 + +例えば: + +``` sql +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'a' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1) = 'b' +SELECT JSONExtractKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2) = 'a' +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1) = 'hello' +``` + +## JSONLength(json\[, indices\_or\_keys\]…) {#jsonlengthjson-indices-or-keys} + +JSON配列またはJSONオブジェクトの長さを返します。 + +値が存在しないか、間違った型を持っている場合, `0` は返却されます。 + +例: + +``` sql +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 3 +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}') = 2 +``` + +## JSONType(json\[, indices\_or\_keys\]…) {#jsontypejson-indices-or-keys} + +JSON値の型を返します。 + +値が存在しない場合, `Null` は返却されます。 + +例: + +``` sql +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}') = 'Object' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'String' +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = 'Array' +``` + +## JSONExtractUInt(json\[, indices\_or\_keys\]…) {#jsonextractuintjson-indices-or-keys} + +## JSONExtractInt(json\[, indices\_or\_keys\]…) {#jsonextractintjson-indices-or-keys} + +## JSONExtractFloat(json\[, indices\_or\_keys\]…) {#jsonextractfloatjson-indices-or-keys} + +## JSONExtractBool(json\[, indices\_or\_keys\]…) {#jsonextractbooljson-indices-or-keys} + +JSONを解析し、値を抽出します。 これらの機能と類似 `visitParam` 機能。 + +値が存在しないか、間違った型を持っている場合, `0` は返却されます。 + +例: + +``` sql +SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1) = -100 +SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2) = 200.0 +SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1) = 300 +``` + +## JSONExtractString(json\[, indices\_or\_keys\]…) {#jsonextractstringjson-indices-or-keys} + +JSONを解析し、文字列を抽出します。 この関数は次のようになります `visitParamExtractString` 機能。 + +値が存在しないか、間違った型を持っている場合は、空の文字列が返されます。 + +値はエスケープされません。 エスケープ解除に失敗した場合は、空の文字列を返します。 + +例: + +``` sql +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a') = 'hello' +SELECT JSONExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0' +SELECT JSONExtractString('{"abc":"\\u263a"}', 'abc') = '☺' +SELECT JSONExtractString('{"abc":"\\u263"}', 'abc') = '' +SELECT JSONExtractString('{"abc":"hello}', 'abc') = '' +``` + +## JSONExtract(json\[, indices\_or\_keys…\], return\_type) {#jsonextractjson-indices-or-keys-return-type} + +JSONを解析し、指定されたClickHouseデータ型の値を抽出します。 + +これは以前の一般化です `JSONExtract` 機能。 +これは +`JSONExtract(..., 'String')` とまったく同じを返します `JSONExtractString()`, +`JSONExtract(..., 'Float64')` とまったく同じを返します `JSONExtractFloat()`. + +例: + +``` sql +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))') = ('hello',[-100,200,300]) +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)') = ([-100,200,300],'hello') +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))') = [-100, NULL, NULL] +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)') = NULL +SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8') = 1 +SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Thursday' +SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)') = 'Friday' +``` + +## JSONExtractKeysAndValues(json\[, indices\_or\_keys…\], value\_type) {#jsonextractkeysandvaluesjson-indices-or-keys-value-type} + +値が指定されたclickhouseデータ型のjsonからキーと値のペアを解析します。 + +例えば: + +``` sql +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; +``` + +## JSONExtractRaw(json\[, indices\_or\_keys\]…) {#jsonextractrawjson-indices-or-keys} + +JSONの一部を返します。 + +パートが存在しないか、間違った型を持っている場合は、空の文字列が返されます。 + +例えば: + +``` sql +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' +``` + +## JSONExtractArrayRaw(json\[, indices\_or\_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} + +それぞれが未解析の文字列として表されるjson配列の要素を持つ配列を返します。 + +その部分が存在しない場合、または配列でない場合は、空の配列が返されます。 + +例えば: + +``` sql +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/ja/sql_reference/functions/logical_functions.md b/docs/ja/sql_reference/functions/logical_functions.md deleted file mode 120000 index 5fcc82de7fe..00000000000 --- a/docs/ja/sql_reference/functions/logical_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/logical_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/logical_functions.md b/docs/ja/sql_reference/functions/logical_functions.md new file mode 100644 index 00000000000..7ccc14b8b95 --- /dev/null +++ b/docs/ja/sql_reference/functions/logical_functions.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u8AD6\u7406" +--- + +# 論理関数 {#logical-functions} + +論理関数は任意の数値型を受け入れますが、uint8の数値を0または1に返します。 + +引数としてのゼロが考慮されます “false,” ゼロ以外の値は考慮されますが “true”. + +## and、および演算子 {#and-and-operator} + +## or、OR演算子 {#or-or-operator} + +## not,not演算子 {#not-not-operator} + +## xor {#xor} + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/ja/sql_reference/functions/machine_learning_functions.md b/docs/ja/sql_reference/functions/machine_learning_functions.md deleted file mode 120000 index fa3a0b4e5ab..00000000000 --- a/docs/ja/sql_reference/functions/machine_learning_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/machine_learning_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/machine_learning_functions.md b/docs/ja/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..9c7bc3df243 --- /dev/null +++ b/docs/ja/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,20 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 64 +toc_title: "\u6A5F\u68B0\u5B66\u7FD2\u306E\u6A5F\u80FD" +--- + +# 機械学習の機能 {#machine-learning-functions} + +## evalMLMethod(予測) {#machine_learning_methods-evalmlmethod} + +適合回帰モデルを使用した予測 `evalMLMethod` 機能。 リンクを見る `linearRegression`. + +### 確率的線形回帰 {#stochastic-linear-regression} + +その [stochasticLinearRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlinearregression) 集合関数は,線形モデルとMSE損失関数を用いた確率的勾配降下法を実装する。 使用 `evalMLMethod` 新しいデータを予測する。 + +### 確率論的ロジスティック回帰 {#stochastic-logistic-regression} + +その [stochasticLogisticRegression](../../sql_reference/aggregate_functions/reference.md#agg_functions-stochasticlogisticregression) 集合関数は,二値分類問題に対して確率的勾配降下法を実装する。 使用 `evalMLMethod` 新しいデータを予測する。 diff --git a/docs/ja/sql_reference/functions/math_functions.md b/docs/ja/sql_reference/functions/math_functions.md deleted file mode 120000 index 2d7d7aa9cc9..00000000000 --- a/docs/ja/sql_reference/functions/math_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/math_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/math_functions.md b/docs/ja/sql_reference/functions/math_functions.md new file mode 100644 index 00000000000..1376d518913 --- /dev/null +++ b/docs/ja/sql_reference/functions/math_functions.md @@ -0,0 +1,116 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: "\u6570\u5B66" +--- + +# 数学関数 {#mathematical-functions} + +すべての関数はfloat64番号を返します。 結果の精度は可能な最大精度に近いですが、結果は対応する実数に最も近いマシン表現可能な数値と一致しない場合があります。 + +## e() {#e} + +数値eに近いfloat64数値を返します。 + +## pi() {#pi} + +Returns a Float64 number that is close to the number π. + +## exp(x) {#expx} + +数値引数を受け取り、引数の指数に近いfloat64数値を返します。 + +## ログ(x),ln(x) {#logx-lnx} + +数値引数を受け取り、引数の自然対数に近いfloat64数値を返します。 + +## exp2(x) {#exp2x} + +数値引数を受け取り、float64の数値を2に近づけてxの累乗を返します。 + +## log2(x) {#log2x} + +数値引数を受け取り、引数のバイナリ対数に近いfloat64数値を返します。 + +## exp10(x) {#exp10x} + +数値引数を受け取り、float64の数値をxの累乗に近い10に返します。 + +## log10(x) {#log10x} + +数値引数を受け取り、引数の小数点以下の対数に近いfloat64数値を返します。 + +## sqrt(x) {#sqrtx} + +数値引数を受け取り、引数の平方根に近いfloat64数値を返します。 + +## cbrt(x) {#cbrtx} + +数値引数を受け取り、引数の三次根に近いfloat64数値を返します。 + +## erf(x) {#erfx} + +もし ‘x’ is non-negative, then erf(x / σ√2) 標準偏差を持つ正規分布を持つ確率変数です ‘σ’ 期待値から分離されている値を次の値よりも大きくします ‘x’. + +例(三つのシグマルール): + +``` sql +SELECT erf(3 / sqrt(2)) +``` + +``` text +┌─erf(divide(3, sqrt(2)))─┐ +│ 0.9973002039367398 │ +└─────────────────────────┘ +``` + +## erfc(x) {#erfcx} + +数値引数を受け取り、float64数値を1-erf(x)に近い値に返しますが、大きな値の精度を失うことはありません ‘x’ 値。 + +## lgamma(x) {#lgammax} + +ガンマ関数の対数。 + +## tgamma(x) {#tgammax} + +ガンマ関数。 + +## sin(x) {#sinx} + +サイン。 + +## cos(x) {#cosx} + +コサイン + +## tan(x) {#tanx} + +タンジェント。 + +## asin(x) {#asinx} + +アークの正弦。 + +## acos(x) {#acosx} + +アークコサイン。 + +## atan(x) {#atanx} + +アークタンジェント。 + +## pow(x,y),パワー(x,y) {#powx-y-powerx-y} + +Xに近いFloat64数をyの累乗に返します。 + +## intExp2 {#intexp2} + +数値引数を受け取り、xの累乗に対してuint64の2に近い数値を返します。 + +## intExp10 {#intexp10} + +数値引数を受け取り、xの累乗に近いuint64の数値を10に返します。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/ja/sql_reference/functions/other_functions.md b/docs/ja/sql_reference/functions/other_functions.md deleted file mode 120000 index 062256854bd..00000000000 --- a/docs/ja/sql_reference/functions/other_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/other_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/other_functions.md b/docs/ja/sql_reference/functions/other_functions.md new file mode 100644 index 00000000000..ae9bb898405 --- /dev/null +++ b/docs/ja/sql_reference/functions/other_functions.md @@ -0,0 +1,1079 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 66 +toc_title: "\u305D\u306E\u4ED6" +--- + +# その他の機能 {#other-functions} + +## ホスト名() {#hostname} + +この関数が実行されたホストの名前を持つ文字列を返します。 分散処理の場合、機能がリモートサーバー上で実行される場合、これはリモートサーバーホストの名前です。 + +## FQDN {#fqdn} + +完全修飾ドメイン名を返します。 + +**構文** + +``` sql +fqdn(); +``` + +この関数は、大文字と小文字を区別しません。 + +**戻り値** + +- 完全修飾ドメイン名の文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT FQDN(); +``` + +結果: + +``` text +┌─FQDN()──────────────────────────┐ +│ clickhouse.ru-central1.internal │ +└─────────────────────────────────┘ +``` + +## ベース名 {#basename} + +最後のスラッシュまたはバックスラッシュの後の文字列の末尾の部分を抽出します。 この関数は、パスからファイル名を抽出するためによく使用されます。 + +``` sql +basename( expr ) +``` + +**パラメータ** + +- `expr` — Expression resulting in a [文字列](../../sql_reference/data_types/string.md) タイプ値。 すべての円記号は、結果の値でエスケープする必要があります。 + +**戻り値** + +以下を含む文字列: + +- 最後のスラッシュまたはバックスラッシュの後の文字列の末尾の部分。 + + If the input string contains a path ending with slash or backslash, for example, `/` or `c:\`, the function returns an empty string. + +- スラッシュまたはバックスラッシュがない場合は、元の文字列。 + +**例えば** + +``` sql +SELECT 'some/long/path/to/file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some\\long\\path\\to\\file' AS a, basename(a) +``` + +``` text +┌─a──────────────────────┬─basename('some\\long\\path\\to\\file')─┐ +│ some\long\path\to\file │ file │ +└────────────────────────┴────────────────────────────────────────┘ +``` + +``` sql +SELECT 'some-file-name' AS a, basename(a) +``` + +``` text +┌─a──────────────┬─basename('some-file-name')─┐ +│ some-file-name │ some-file-name │ +└────────────────┴────────────────────────────┘ +``` + +## visibleWidth(x) {#visiblewidthx} + +テキスト形式(タブ区切り)でコンソールに値を出力するときのおおよその幅を計算します。 +この関数は、システムがpretty形式を実装するために使用します。 + +`NULL` に対応する文字列として表される。 `NULL` で `Pretty` フォーマット。 + +``` sql +SELECT visibleWidth(NULL) +``` + +``` text +┌─visibleWidth(NULL)─┐ +│ 4 │ +└────────────────────┘ +``` + +## toTypeName(x) {#totypenamex} + +渡された引数の型名を含む文字列を返します。 + +もし `NULL` 入力として関数に渡され、その後、それが返されます `Nullable(Nothing)` 内部に対応するタイプ `NULL` ClickHouseでの表現。 + +## ブロックサイズ() {#function-blocksize} + +ブロックのサイズを取得します。 +ClickHouseでは、クエリは常にブロック(列部分のセット)で実行されます。 この関数は、それを呼び出したブロックのサイズを取得することができます。 + +## マテリアライズ(x) {#materializex} + +一つの値だけを含む完全な列に定数を変換します。 +ClickHouseでは、完全な列と定数はメモリ内で異なる方法で表されます。 関数は定数引数と通常の引数(異なるコードが実行される)では異なる動作をしますが、結果はほとんど常に同じです。 この関数は、この動作のデバッグ用です。 + +## ignore(…) {#ignore} + +以下を含む任意の引数を受け取る `NULL`. 常に0を返します。 +ただし、引数はまだ評価されます。 これはベンチマークに使用できます。 + +## スリープ(秒) {#sleepseconds} + +眠る ‘seconds’ 各データブロックの秒。 整数または浮動小数点数を指定できます。 + +## sleepEachRow(秒) {#sleepeachrowseconds} + +眠る ‘seconds’ 各行の秒。 整数または浮動小数点数を指定できます。 + +## currentDatabase() {#currentdatabase} + +現在のデータベースの名前を返します。 +この関数は、データベースを指定する必要があるcreate tableクエリのテーブルエンジンパラメーターで使用できます。 + +## currentUser() {#other-function-currentuser} + +現在のユーザーのログインを返します。 ユーザのログインは、そのクエリを開始し、ケースdistibutedクエリで返されます。 + +``` sql +SELECT currentUser(); +``` + +エイリアス: `user()`, `USER()`. + +**戻り値** + +- 現在のユーザーのログイン。 +- クエリを開始したユーザーのログイン。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT currentUser(); +``` + +結果: + +``` text +┌─currentUser()─┐ +│ default │ +└───────────────┘ +``` + +## isFinite(x) {#isfinitex} + +引数が無限でなくnanでない場合はfloat32とfloat64を受け取り、uint8を1に返します(それ以外の場合は0)。 + +## イシンフィナイト(x) {#isinfinitex} + +引数が無限の場合はfloat32とfloat64を受け取り、uint8を1に戻し、それ以外の場合は0を返します。 nanの場合は0が返されることに注意してください。 + +## ifNotFinite {#ifnotfinite} + +浮動小数点値が有限かどうかをチェックします。 + +**構文** + + ifNotFinite(x,y) + +**パラメータ** + +- `x` — Value to be checked for infinity. Type: [フロート\*](../../sql_reference/data_types/float.md). +- `y` — Fallback value. Type: [フロート\*](../../sql_reference/data_types/float.md). + +**戻り値** + +- `x` もし `x` 有限です。 +- `y` もし `x` 有限ではない。 + +**例えば** + +クエリ: + + SELECT 1/0 as infimum, ifNotFinite(infimum,42) + +結果: + + ┌─infimum─┬─ifNotFinite(divide(1, 0), 42)─┐ + │ inf │ 42 │ + └─────────┴───────────────────────────────┘ + +同様の結果を得るには、次のようにします [三項演算子](conditional_functions.md#ternary-operator): `isFinite(x) ? x : y`. + +## isNaN(x) {#isnanx} + +引数がnanの場合はfloat32とfloat64を受け取り、uint8を1に返します。 + +## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} + +データベース名、テーブル名、列名などの定数文字列を受け入れます。 列がある場合はuint8定数式を1に、それ以外の場合は0を返します。 hostnameパラメーターを設定すると、テストはリモートサーバーで実行されます。 +テーブルが存在しない場合、関数は例外をスローします。 +入れ子になったデータ構造内の要素の場合、この関数は列の存在をチェックします。 入れ子になったデータ構造自体の場合、関数は0を返します。 + +## バー {#function-bar} + +ユニコードアート図を作成できます。 + +`bar(x, min, max, width)` に比例する幅を持つバンドを描画します `(x - min)` とに等しい `width` 文字の場合 `x = max`. + +パラメータ: + +- `x` — Size to display. +- `min, max` — Integer constants. The value must fit in `Int64`. +- `width` — Constant, positive integer, can be fractional. + +バンドは、シンボルの第八に精度で描かれています。 + +例えば: + +``` sql +SELECT + toHour(EventTime) AS h, + count() AS c, + bar(c, 0, 600000, 20) AS bar +FROM test.hits +GROUP BY h +ORDER BY h ASC +``` + +``` text +┌──h─┬──────c─┬─bar────────────────┐ +│ 0 │ 292907 │ █████████▋ │ +│ 1 │ 180563 │ ██████ │ +│ 2 │ 114861 │ ███▋ │ +│ 3 │ 85069 │ ██▋ │ +│ 4 │ 68543 │ ██▎ │ +│ 5 │ 78116 │ ██▌ │ +│ 6 │ 113474 │ ███▋ │ +│ 7 │ 170678 │ █████▋ │ +│ 8 │ 278380 │ █████████▎ │ +│ 9 │ 391053 │ █████████████ │ +│ 10 │ 457681 │ ███████████████▎ │ +│ 11 │ 493667 │ ████████████████▍ │ +│ 12 │ 509641 │ ████████████████▊ │ +│ 13 │ 522947 │ █████████████████▍ │ +│ 14 │ 539954 │ █████████████████▊ │ +│ 15 │ 528460 │ █████████████████▌ │ +│ 16 │ 539201 │ █████████████████▊ │ +│ 17 │ 523539 │ █████████████████▍ │ +│ 18 │ 506467 │ ████████████████▊ │ +│ 19 │ 520915 │ █████████████████▎ │ +│ 20 │ 521665 │ █████████████████▍ │ +│ 21 │ 542078 │ ██████████████████ │ +│ 22 │ 493642 │ ████████████████▍ │ +│ 23 │ 400397 │ █████████████▎ │ +└────┴────────┴────────────────────┘ +``` + +## 変換 {#transform} + +いくつかの要素の明示的に定義されたマッピングに従って値を他の要素に変換します。 +この関数には二つの違いがあります: + +### transform(x,array\_from,array\_to,デフォルト) {#transformx-array-from-array-to-default} + +`x` – What to transform. + +`array_from` – Constant array of values for converting. + +`array_to` – Constant array of values to convert the values in ‘from’ に。 + +`default` – Which value to use if ‘x’ 値のいずれにも等しくありません。 ‘from’. + +`array_from` と `array_to` – Arrays of the same size. + +タイプ: + +`transform(T, Array(T), Array(U), U) -> U` + +`T` と `U` 数値、文字列、または日付または日時の型を指定できます。 +同じ文字(tまたはu)が示されている場合、数値型の場合、これらは一致する型ではなく、共通の型を持つ型である可能性があります。 +たとえば、最初の引数はint64型を持つことができ、二番目の引数はarray(uint16)型を持つことができます。 + +この ‘x’ 値は、次のいずれかの要素に等しくなります。 ‘array\_from’ 配列の場合は、既存の要素(同じ番号が付けられています)を返します。 ‘array\_to’ 配列だ それ以外の場合は、 ‘default’. 一致する要素が複数ある場合 ‘array\_from’、それはマッチのいずれかを返します。 + +例えば: + +``` sql +SELECT + transform(SearchEngineID, [2, 3], ['Yandex', 'Google'], 'Other') AS title, + count() AS c +FROM test.hits +WHERE SearchEngineID != 0 +GROUP BY title +ORDER BY c DESC +``` + +``` text +┌─title─────┬──────c─┐ +│ Yandex │ 498635 │ +│ Google │ 229872 │ +│ Other │ 104472 │ +└───────────┴────────┘ +``` + +### トランスフォーム(x,array\_from,array\_to) {#transformx-array-from-array-to} + +最初のバリエーションとは異なります ‘default’ 引数は省略する。 +この ‘x’ 値は、次のいずれかの要素に等しくなります。 ‘array\_from’ 配列の場合は、マッチする要素(同じ番号を付けられた要素)を返します。 ‘array\_to’ 配列だ それ以外の場合は、 ‘x’. + +タイプ: + +`transform(T, Array(T), Array(T)) -> T` + +例えば: + +``` sql +SELECT + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + count() AS c +FROM test.hits +GROUP BY domain(Referer) +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +┌─s──────────────┬───────c─┐ +│ │ 2906259 │ +│ www.yandex │ 867767 │ +│ ███████.ru │ 313599 │ +│ mail.yandex.ru │ 107147 │ +│ ██████.ru │ 100355 │ +│ █████████.ru │ 65040 │ +│ news.yandex.ru │ 64515 │ +│ ██████.net │ 59141 │ +│ example.com │ 57316 │ +└────────────────┴─────────┘ +``` + +## formatReadableSize(x) {#formatreadablesizex} + +サイズ(バイト数)を受け入れます。 サフィックス(kib、mibなど)を含む丸められたサイズを返します。)文字列として。 + +例えば: + +``` sql +SELECT + arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes, + formatReadableSize(filesize_bytes) AS filesize +``` + +``` text +┌─filesize_bytes─┬─filesize───┐ +│ 1 │ 1.00 B │ +│ 1024 │ 1.00 KiB │ +│ 1048576 │ 1.00 MiB │ +│ 192851925 │ 183.92 MiB │ +└────────────────┴────────────┘ +``` + +## 少なくとも(a,b) {#leasta-b} + +Aとbの最小値を返します。 + +## 最大(a,b) {#greatesta-b} + +Aとbの最大値を返します。 + +## アップタイム() {#uptime} + +サーバーの稼働時間を秒単位で返します。 + +## バージョン() {#version} + +サーバーのバージョンを文字列として返します。 + +## タイムゾーン() {#timezone} + +サーバーのタイムゾーンを返します。 + +## bloknumber {#blocknumber} + +行があるデータブロックのシーケンス番号を返します。 + +## rowNumberInBlock {#function-rownumberinblock} + +データブロック内の行の序数を返します。 異なるデータブロックは常に再計算されます。 + +## rowNumberInAllBlocks() {#rownumberinallblocks} + +データブロック内の行の序数を返します。 この機能のみを考慮した影響のデータブロックとなります。 + +## 隣人 {#neighbor} + +指定された列の現在の行の前または後に来る指定されたオフセットで行へのアクセスを提供するウィンドウ関数。 + +**構文** + +``` sql +neighbor(column, offset[, default_value]) +``` + +関数の結果は、影響を受けるデータブロックと、ブロック内のデータの順序によって異なります。 +ORDER BYを使用してサブクエリを作成し、サブクエリの外部から関数を呼び出すと、期待される結果を得ることができます。 + +**パラメータ** + +- `column` — A column name or scalar expression. +- `offset` — The number of rows forwards or backwards from the current row of `column`. [Int64](../../sql_reference/data_types/int_uint.md). +- `default_value` — Optional. The value to be returned if offset goes beyond the scope of the block. Type of data blocks affected. + +**戻り値** + +- の値 `column` で `offset` 現在の行からの距離 `offset` 値はブロック境界の外側ではありません。 +- のデフォルト値 `column` もし `offset` 値はブロック境界の外側です。 もし `default_value` 与えられ、それが使用されます。 + +型:影響を受けるデータブロックの種類または既定値の種類。 + +**例えば** + +クエリ: + +``` sql +SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; +``` + +結果: + +``` text +┌─number─┬─neighbor(number, 2)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 0 │ +│ 9 │ 0 │ +└────────┴─────────────────────┘ +``` + +クエリ: + +``` sql +SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; +``` + +結果: + +``` text +┌─number─┬─neighbor(number, 2, 999)─┐ +│ 0 │ 2 │ +│ 1 │ 3 │ +│ 2 │ 4 │ +│ 3 │ 5 │ +│ 4 │ 6 │ +│ 5 │ 7 │ +│ 6 │ 8 │ +│ 7 │ 9 │ +│ 8 │ 999 │ +│ 9 │ 999 │ +└────────┴──────────────────────────┘ +``` + +この関数は、年間指標の値を計算するために使用できます: + +クエリ: + +``` sql +WITH toDate('2018-01-01') AS start_date +SELECT + toStartOfMonth(start_date + (number * 32)) AS month, + toInt32(month) % 100 AS money, + neighbor(money, -12) AS prev_year, + round(prev_year / money, 2) AS year_over_year +FROM numbers(16) +``` + +結果: + +``` text +┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ +│ 2018-01-01 │ 32 │ 0 │ 0 │ +│ 2018-02-01 │ 63 │ 0 │ 0 │ +│ 2018-03-01 │ 91 │ 0 │ 0 │ +│ 2018-04-01 │ 22 │ 0 │ 0 │ +│ 2018-05-01 │ 52 │ 0 │ 0 │ +│ 2018-06-01 │ 83 │ 0 │ 0 │ +│ 2018-07-01 │ 13 │ 0 │ 0 │ +│ 2018-08-01 │ 44 │ 0 │ 0 │ +│ 2018-09-01 │ 75 │ 0 │ 0 │ +│ 2018-10-01 │ 5 │ 0 │ 0 │ +│ 2018-11-01 │ 36 │ 0 │ 0 │ +│ 2018-12-01 │ 66 │ 0 │ 0 │ +│ 2019-01-01 │ 97 │ 32 │ 0.33 │ +│ 2019-02-01 │ 28 │ 63 │ 2.25 │ +│ 2019-03-01 │ 56 │ 91 │ 1.62 │ +│ 2019-04-01 │ 87 │ 22 │ 0.25 │ +└────────────┴───────┴───────────┴────────────────┘ +``` + +## ランニングダイファレンス(x) {#other_functions-runningdifference} + +Calculates the difference between successive row values ​​in the data block. +最初の行に対して0を返し、後続の各行に対して前の行との差を返します。 + +関数の結果は、影響を受けるデータブロックと、ブロック内のデータの順序によって異なります。 +ORDER BYを使用してサブクエリを作成し、サブクエリの外部から関数を呼び出すと、期待される結果を得ることができます。 + +例えば: + +``` sql +SELECT + EventID, + EventTime, + runningDifference(EventTime) AS delta +FROM +( + SELECT + EventID, + EventTime + FROM events + WHERE EventDate = '2016-11-24' + ORDER BY EventTime ASC + LIMIT 5 +) +``` + +``` text +┌─EventID─┬───────────EventTime─┬─delta─┐ +│ 1106 │ 2016-11-24 00:00:04 │ 0 │ +│ 1107 │ 2016-11-24 00:00:05 │ 1 │ +│ 1108 │ 2016-11-24 00:00:05 │ 0 │ +│ 1109 │ 2016-11-24 00:00:09 │ 4 │ +│ 1110 │ 2016-11-24 00:00:10 │ 1 │ +└─────────┴─────────────────────┴───────┘ +``` + +ご注意-ブロックサイズは結果に影響します。 それぞれの新しいブロックでは、 `runningDifference` 状態がリセットされます。 + +``` sql +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +┌─number─┬─diff─┐ +│ 65536 │ 0 │ +└────────┴──────┘ +``` + +``` sql +set max_block_size=100000 -- default value is 65536! + +SELECT + number, + runningDifference(number + 1) AS diff +FROM numbers(100000) +WHERE diff != 1 +``` + +``` text +┌─number─┬─diff─┐ +│ 0 │ 0 │ +└────────┴──────┘ +``` + +## runningDifferenceStartingWithFirstvalue {#runningdifferencestartingwithfirstvalue} + +と同じ [runningDifference](./other_functions.md#other_functions-runningdifference)、差は、最初の行の値であり、最初の行の値を返し、後続の各行は、前の行からの差を返します。 + +## マクナムトストリング(num) {#macnumtostringnum} + +UInt64番号を受け取ります。 ビッグエンディアンのMACアドレスとして解釈します。 対応するMACアドレスをAA:BB:CC:DD:EE:FF形式で含む文字列を返します。 + +## MACStringToNum(s) {#macstringtonums} + +MACNumToStringの逆関数。 MACアドレスに無効な形式がある場合は、0を返します。 + +## MACStringToOUI(s) {#macstringtoouis} + +AA:BB:CC:DD:EE:FF形式のMACアドレスを受け付けます。 最初の三つのオクテットをUInt64の数値として返します。 MACアドレスに無効な形式がある場合は、0を返します。 + +## getSizeOfEnumType {#getsizeofenumtype} + +フィールドの数を返します [列挙型](../../sql_reference/data_types/enum.md). + +``` sql +getSizeOfEnumType(value) +``` + +**パラメータ:** + +- `value` — Value of type `Enum`. + +**戻り値** + +- を持つフィールドの数 `Enum` 入力値。 +- 型が型でない場合は、例外がスローされます `Enum`. + +**例えば** + +``` sql +SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## blockSerializedSize {#blockserializedsize} + +(圧縮を考慮せずに)ディスク上のサイズを返します。 + +``` sql +blockSerializedSize(value[, value[, ...]]) +``` + +**パラメータ:** + +- `value` — Any value. + +**戻り値** + +- 値のブロックのためにディスクに書き込まれるバイト数(圧縮なし)。 + +**例えば** + +``` sql +SELECT blockSerializedSize(maxState(1)) as x +``` + +``` text +┌─x─┐ +│ 2 │ +└───┘ +``` + +## toColumnTypeName {#tocolumntypename} + +RAM内の列のデータ型を表すクラスの名前を返します。 + +``` sql +toColumnTypeName(value) +``` + +**パラメータ:** + +- `value` — Any type of value. + +**戻り値** + +- を表すために使用されるクラスの名前を持つ文字列 `value` RAMのデータ型。 + +**違いの例`toTypeName ' and ' toColumnTypeName`** + +``` sql +SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime │ +└─────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) +``` + +``` text +┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ Const(UInt32) │ +└───────────────────────────────────────────────────────────┘ +``` + +この例では、 `DateTime` データタイプはメモリに記憶として `Const(UInt32)`. + +## dumpColumnStructure {#dumpcolumnstructure} + +RAM内のデータ構造の詳細な説明を出力します + +``` sql +dumpColumnStructure(value) +``` + +**パラメータ:** + +- `value` — Any type of value. + +**戻り値** + +- を表すために使用される構造体を記述する文字列。 `value` RAMのデータ型。 + +**例えば** + +``` sql +SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) +``` + +``` text +┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime, Const(size = 1, UInt32(size = 1)) │ +└──────────────────────────────────────────────────────────────┘ +``` + +## defaultValueOfArgumentType {#defaultvalueofargumenttype} + +データ型の既定値を出力します。 + +ユーザーが設定したカスタム列の既定値は含まれません。 + +``` sql +defaultValueOfArgumentType(expression) +``` + +**パラメータ:** + +- `expression` — Arbitrary type of value or an expression that results in a value of an arbitrary type. + +**戻り値** + +- `0` 数字のために. +- 文字列の空の文字列。 +- `ᴺᵁᴸᴸ` のために [Nullable](../../sql_reference/data_types/nullable.md). + +**例えば** + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ +│ 0 │ +└─────────────────────────────────────────────┘ +``` + +``` sql +SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) +``` + +``` text +┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ +│ ᴺᵁᴸᴸ │ +└───────────────────────────────────────────────────────┘ +``` + +## 複製 {#other-functions-replicate} + +単一の値を持つ配列を作成します。 + +内部実装のために使用される [arrayJoin](array_join.md#functions_arrayjoin). + +``` sql +SELECT replicate(x, arr); +``` + +**パラメータ:** + +- `arr` — Original array. ClickHouse creates a new array of the same length as the original and fills it with the value `x`. +- `x` — The value that the resulting array will be filled with. + +**戻り値** + +値で満たされた配列 `x`. + +タイプ: `Array`. + +**例えば** + +クエリ: + +``` sql +SELECT replicate(1, ['a', 'b', 'c']) +``` + +結果: + +``` text +┌─replicate(1, ['a', 'b', 'c'])─┐ +│ [1,1,1] │ +└───────────────────────────────┘ +``` + +## filesystemAvailable {#filesystemavailable} + +返金額の残存スペースのファイルシステムのファイルのデータベースはあります。 それは常に合計空き領域よりも小さいです ([filesystemFree](#filesystemfree) でもスペースはOS. + +**構文** + +``` sql +filesystemAvailable() +``` + +**戻り値** + +- バイト単位で使用可能な残りのスペースの量。 + +タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT formatReadableSize(filesystemAvailable()) AS "Available space", toTypeName(filesystemAvailable()) AS "Type"; +``` + +結果: + +``` text +┌─Available space─┬─Type───┐ +│ 30.75 GiB │ UInt64 │ +└─────────────────┴────────┘ +``` + +## filesystemFree {#filesystemfree} + +データベースのファイルがあるファイルシステム上の空き領域の合計を返します。 また見なさい `filesystemAvailable` + +**構文** + +``` sql +filesystemFree() +``` + +**戻り値** + +- バイト単位の空き領域の量。 + +タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; +``` + +結果: + +``` text +┌─Free space─┬─Type───┐ +│ 32.39 GiB │ UInt64 │ +└────────────┴────────┘ +``` + +## filesystemCapacity {#filesystemcapacity} + +ファイルシステムの容量をバイト単位で返します。 評価のために、 [パス](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path) データディレク + +**構文** + +``` sql +filesystemCapacity() +``` + +**戻り値** + +- ファイルシステムの容量情報(バイト単位)。 + +タイプ: [UInt64](../../sql_reference/data_types/int_uint.md). + +**例えば** + +クエリ: + +``` sql +SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" +``` + +結果: + +``` text +┌─Capacity──┬─Type───┐ +│ 39.32 GiB │ UInt64 │ +└───────────┴────────┘ +``` + +## finalizeAggregation {#function-finalizeaggregation} + +集約関数の状態を取ります。 集計結果を返します(ファイナライズされた状態)。 + +## runningAccumulate {#function-runningaccumulate} + +集約関数の状態を取り、値を持つ列を返します,ブロックラインのセットのためにこれらの状態の蓄積の結果であります,最初から現在の行へ.これ +たとえば、集計関数の状態(例:runningaccumulate(uniqstate(userid)))を取り、ブロックの各行について、前のすべての行と現在の行の状態をマージしたときの集計関数の結果を返しま +したがって、関数の結果は、ブロックへのデータの分割とブロック内のデータの順序に依存します。 + +## joinGet {#joinget} + +この関数を使用すると、テーブルからのデータと同じ方法でデータを抽出できます [辞書](../../sql_reference/dictionaries/index.md). + +データの取得 [参加](../../engines/table_engines/special/join.md#creating-a-table) 指定された結合キーを使用するテーブル。 + +サポートされているのは、 `ENGINE = Join(ANY, LEFT, )` 声明。 + +**構文** + +``` sql +joinGet(join_storage_table_name, `value_column`, join_keys) +``` + +**パラメータ** + +- `join_storage_table_name` — an [識別子](../syntax.md#syntax-identifiers) 検索が実行される場所を示します。 識別子は既定のデータベースで検索されます(パラメータを参照 `default_database` の設定ファイル)。 デフォル `USE db_name` またはを指定しデータベースのテーブルのセパレータ `db_name.db_table`、例を参照してください。 +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. + +**戻り値** + +キーのリストに対応する値のリストを返します。 + +ソーステーブルに特定のものが存在しない場合 `0` または `null` に基づいて返されます [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls) 設定。 + +詳細について `join_use_nulls` で [結合操作](../../engines/table_engines/special/join.md). + +**例えば** + +入力テーブル: + +``` sql +CREATE DATABASE db_test +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 +INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) +``` + +``` text +┌─id─┬─val─┐ +│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +クエリ: + +``` sql +SELECT joinGet(db_test.id_val,'val',toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 +``` + +結果: + +``` text +┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┐ +│ 0 │ +│ 11 │ +│ 12 │ +│ 0 │ +└──────────────────────────────────────────────────┘ +``` + +## modelEvaluate(model\_name, …) {#function-modelevaluate} + +外部モデルを評価します。 +モデル名とモデル引数を受け取ります。 float64を返します。 + +## throwIf(x\[,custom\_message\]) {#throwifx-custom-message} + +引数がゼロ以外の場合は例外をスローします。 +custom\_message-オプションのパラメータです。 + +``` sql +SELECT throwIf(number = 3, 'Too many') FROM numbers(10); +``` + +``` text +↙ Progress: 0.00 rows, 0.00 B (0.00 rows/s., 0.00 B/s.) Received exception from server (version 19.14.1): +Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many. +``` + +## id {#identity} + +引数として使用されたのと同じ値を返します。 デバッグに使用され、試験が可能でャックのクエリーの性能を満たします。 がクエリーの分析のために利用できる可能性指標分析装置が外部サンプリング方式な見てみよう `identity` 機能。 + +**構文** + +``` sql +identity(x) +``` + +**例えば** + +クエリ: + +``` sql +SELECT identity(42) +``` + +結果: + +``` text +┌─identity(42)─┐ +│ 42 │ +└──────────────┘ +``` + +## randomprintableasii {#randomascii} + +のランダムなセットを持つ文字列を生成します [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 印刷可能な文字。 + +**構文** + +``` sql +randomPrintableASCII(length) +``` + +**パラメータ** + +- `length` — Resulting string length. Positive integer. + + If you pass `length < 0`, behavior of the function is undefined. + +**戻り値** + +- のランダムなセットを持つ文字列 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 印刷可能な文字。 + +タイプ: [文字列](../../sql_reference/data_types/string.md) + +**例えば** + +``` sql +SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers LIMIT 3 +``` + +``` text +┌─number─┬─str────────────────────────────┬─length(randomPrintableASCII(30))─┐ +│ 0 │ SuiCOSTvC0csfABSw=UcSzp2.`rv8x │ 30 │ +│ 1 │ 1Ag NlJ &RCN:*>HVPG;PE-nO"SUFD │ 30 │ +│ 2 │ /"+<"wUTh:=LjJ Vm!c&hI*m#XTfzz │ 30 │ +└────────┴────────────────────────────────┴──────────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/ja/sql_reference/functions/random_functions.md b/docs/ja/sql_reference/functions/random_functions.md deleted file mode 120000 index f308eea0730..00000000000 --- a/docs/ja/sql_reference/functions/random_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/random_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/random_functions.md b/docs/ja/sql_reference/functions/random_functions.md new file mode 100644 index 00000000000..d356558481d --- /dev/null +++ b/docs/ja/sql_reference/functions/random_functions.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 51 +toc_title: "\u64EC\u4F3C\u4E71\u6570\u306E\u751F\u6210" +--- + +# 擬似乱数を生成するための関数 {#functions-for-generating-pseudo-random-numbers} + +擬似乱数の非暗号生成器が使用される。 + +全ての機能を受け入れゼロの引数または一つの引数。 +引数が渡された場合は、任意の型にすることができ、その値は何にも使用されません。 +この引数の唯一の目的は、同じ関数の二つの異なるインスタンスが異なる乱数を持つ異なる列を返すように、共通の部分式の除去を防ぐことです。 + +## ランド {#rand} + +すべてのuint32型の数値に均等に分布する、擬似ランダムなuint32数値を返します。 +線形合同発生器を使用します。 + +## rand64 {#rand64} + +すべてのuint64型の数値に均等に分布する、擬似ランダムなuint64数値を返します。 +線形合同発生器を使用します。 + +## randconstantname {#randconstant} + +値は、異なるブロックのためのものである、擬似ランダムuint32番号を返します。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/ja/sql_reference/functions/rounding_functions.md b/docs/ja/sql_reference/functions/rounding_functions.md deleted file mode 120000 index 459b31ef8b9..00000000000 --- a/docs/ja/sql_reference/functions/rounding_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/rounding_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/rounding_functions.md b/docs/ja/sql_reference/functions/rounding_functions.md new file mode 100644 index 00000000000..ab0bc719d5a --- /dev/null +++ b/docs/ja/sql_reference/functions/rounding_functions.md @@ -0,0 +1,190 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: "\u4E38\u3081" +--- + +# 丸め関数 {#rounding-functions} + +## 床(x\[,N\]) {#floorx-n} + +次の値以下の最大ラウンド数を返します `x`. ラウンド数は1/10Nの倍数、または1/10Nが正確でない場合は適切なデータ型の最も近い数です。 +‘N’ 整数定数、オプションのパラメータです。 デフォルトではゼロで、整数に丸めることを意味します。 +‘N’ 負の可能性があります。 + +例: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` + +`x` 任意の数値型です。 結果は同じタイプの数です。 +整数引数の場合、負の値で丸めるのが理にかなっています `N` 値(非負の場合 `N`、関数は何もしません)。 +丸めによってオーバーフロー(たとえば、floor(-128,-1))が発生した場合は、実装固有の結果が返されます。 + +## ceil(x\[,N\]),ceiling(x\[,N\]) {#ceilx-n-ceilingx-n} + +次の値以上の最小の丸め数を返します `x`. 他のすべての方法では、それはと同じです `floor` 関数(上記参照)。 + +## trunc(x\[,N\]),truncate(x\[,N\]) {#truncx-n-truncatex-n} + +絶対値が以下の最大絶対値を持つラウンド数を返します `x`‘s. In every other way, it is the same as the ’floor’ 関数(上記参照)。 + +## ラウンド(x\[,N\]) {#rounding_functions-round} + +指定した小数点以下の桁数に値を丸めます。 + +この関数は、指定された順序の最も近い番号を返します。 与えられた数が周囲の数と等しい距離を持つ場合、関数は浮動小数点数型に対してバンカーの丸めを使用し、他の数値型に対してゼロから丸めます。 + +``` sql +round(expression [, decimal_places]) +``` + +**パラメータ:** + +- `expression` — A number to be rounded. Can be any [式](../syntax.md#syntax-expressions) 数値を返す [データ型](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — An integer value. + - もし `decimal-places > 0` 次に、この関数は値を小数点の右側に丸めます。 + - もし `decimal-places < 0` 次に、この関数は値を小数点の左側に丸めます。 + - もし `decimal-places = 0` 次に、この関数は値を整数に丸めます。 この場合、引数は省略できます。 + +**戻り値:** + +入力番号と同じタイプの丸められた数。 + +### 例 {#examples} + +**使用例** + +``` sql +SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 +``` + +``` text +┌───x─┬─round(divide(number, 2))─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +└─────┴──────────────────────────┘ +``` + +**丸めの例** + +最も近い数値に丸めます。 + +``` text +round(3.2, 0) = 3 +round(4.1267, 2) = 4.13 +round(22,-1) = 20 +round(467,-2) = 500 +round(-467,-2) = -500 +``` + +銀行の丸め。 + +``` text +round(3.5) = 4 +round(4.5) = 4 +round(3.55, 1) = 3.6 +round(3.65, 1) = 3.6 +``` + +**また見なさい** + +- [ラウンドバンカー](#roundbankers) + +## ラウンドバンカー {#roundbankers} + +数値を指定した小数点以下の桁数に丸めます。 + +- 丸め番号が二つの数字の中間にある場合、関数はバンカーの丸めを使用します。 + + Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. + + It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. + +- それ以外の場合、関数は数値を最も近い整数に丸めます。 + +バンカーの丸めを使用すると、丸め数値がこれらの数値の加算または減算の結果に与える影響を減らすことができます。 + +たとえば、異なる丸めの合計1.5、2.5、3.5、4.5: + +- 丸めなし: 1.5 + 2.5 + 3.5 + 4.5 = 12. +- 銀行の丸め: 2 + 2 + 4 + 4 = 12. +- 最も近い整数への丸め: 2 + 3 + 4 + 5 = 14. + +**構文** + +``` sql +roundBankers(expression [, decimal_places]) +``` + +**パラメータ** + +- `expression` — A number to be rounded. Can be any [式](../syntax.md#syntax-expressions) 数値を返す [データ型](../../sql_reference/data_types/index.md#data_types). +- `decimal-places` — Decimal places. An integer number. + - `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. + - `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`. + - `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. + +**戻り値** + +バンカーの丸めメソッドによって丸められた値。 + +### 例 {#examples-1} + +**使用例** + +クエリ: + +``` sql + SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 +``` + +結果: + +``` text +┌───x─┬─b─┐ +│ 0 │ 0 │ +│ 0.5 │ 0 │ +│ 1 │ 1 │ +│ 1.5 │ 2 │ +│ 2 │ 2 │ +│ 2.5 │ 2 │ +│ 3 │ 3 │ +│ 3.5 │ 4 │ +│ 4 │ 4 │ +│ 4.5 │ 4 │ +└─────┴───┘ +``` + +**銀行の丸めの例** + +``` text +roundBankers(0.4) = 0 +roundBankers(-3.5) = -4 +roundBankers(4.5) = 4 +roundBankers(3.55, 1) = 3.6 +roundBankers(3.65, 1) = 3.6 +roundBankers(10.35, 1) = 10.4 +roundBankers(10.755, 2) = 11,76 +``` + +**また見なさい** + +- [ラウンド](#rounding_functions-round) + +## roundToExp2(num) {#roundtoexp2num} + +数値を受け取ります。 数値が小さい場合は0を返します。 それ以外の場合は、数値を最も近い(非負の全体)程度に丸めます。 + +## ラウンドデュレーション(num) {#rounddurationnum} + +数値を受け取ります。 数値が小さい場合は0を返します。 それ以外の場合は、数値をセットから数値に切り下げます: 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000. この機能はyandexに固有です。metricaとセッションの長さに関するレポートを実装するために使用。 + +## roundAge(num) {#roundagenum} + +数値を受け取ります。 数値が18未満の場合、0を返します。 それ以外の場合は、数値をセットから数値に切り下げます: 18, 25, 35, 45, 55. この機能はyandexに固有です。metricaとユーザーの年齢に関するレポートを実装するために使用。 + +## ラウンドダウン(num,arr) {#rounddownnum-arr} + +数値を受け取り、指定した配列内の要素に切り捨てます。 値が下限より小さい場合は、下限が返されます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/ja/sql_reference/functions/splitting_merging_functions.md b/docs/ja/sql_reference/functions/splitting_merging_functions.md deleted file mode 120000 index cf304ac3dbe..00000000000 --- a/docs/ja/sql_reference/functions/splitting_merging_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/splitting_merging_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/splitting_merging_functions.md b/docs/ja/sql_reference/functions/splitting_merging_functions.md new file mode 100644 index 00000000000..7e21f5a97f3 --- /dev/null +++ b/docs/ja/sql_reference/functions/splitting_merging_functions.md @@ -0,0 +1,117 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: "\u6587\u5B57\u5217\u3068\u914D\u5217\u306E\u5206\u5272\u3068\u30DE\u30FC\ + \u30B8" +--- + +# 文字列と配列の分割とマージのための関数 {#functions-for-splitting-and-merging-strings-and-arrays} + +## splitByChar(セパレータ,s) {#splitbycharseparator-s} + +文字列を、指定した文字で区切った部分文字列に分割します。 定数文字列を使用します `separator` その正確に一つの文字からなる。 +選択した部分文字列の配列を返します。 空の部分文字列は、文字列の先頭または末尾にセパレータがある場合、または複数の連続するセパレータがある場合に選択できます。 + +**構文** + +``` sql +splitByChar(, ) +``` + +**パラメータ** + +- `separator` — The separator which should contain exactly one character. [文字列](../../sql_reference/data_types/string.md). +- `s` — The string to split. [文字列](../../sql_reference/data_types/string.md). + +**戻り値(s)** + +選択した部分文字列の配列を返します。 空の部分文字列は、次の場合に選択できます: + +- 区切り記号は、文字列の先頭または末尾に表示されます; +- 複数の連続した区切り文字があります; +- 元の文字列 `s` 空です。 + +タイプ: [配列](../../sql_reference/data_types/array.md) の [文字列](../../sql_reference/data_types/string.md). + +**例えば** + +``` sql +SELECT splitByChar(',', '1,2,3,abcde') +``` + +``` text +┌─splitByChar(',', '1,2,3,abcde')─┐ +│ ['1','2','3','abcde'] │ +└─────────────────────────────────┘ +``` + +## splitByString(separator,s) {#splitbystringseparator-s} + +文字列を文字列で区切られた部分文字列に分割します。 定数文字列を使用します `separator` 区切り文字として複数の文字が使用されます。 文字列の場合 `separator` 空である場合は、文字列を分割します `s` 単一の文字の配列に変換します。 + +**構文** + +``` sql +splitByString(, ) +``` + +**パラメータ** + +- `separator` — The separator. [文字列](../../sql_reference/data_types/string.md). +- `s` — The string to split. [文字列](../../sql_reference/data_types/string.md). + +**戻り値(s)** + +選択した部分文字列の配列を返します。 空の部分文字列は、次の場合に選択できます: + +タイプ: [配列](../../sql_reference/data_types/array.md) の [文字列](../../sql_reference/data_types/string.md). + +- 空でない区切り文字は、文字列の先頭または末尾に作成されます; +- 複数の連続する空でない区切り記号があります; +- 元の文字列 `s` 区切り記号が空でない間は空です。 + +**例えば** + +``` sql +SELECT splitByString(', ', '1, 2 3, 4,5, abcde') +``` + +``` text +┌─splitByString(', ', '1, 2 3, 4,5, abcde')─┐ +│ ['1','2 3','4,5','abcde'] │ +└───────────────────────────────────────────┘ +``` + +``` sql +SELECT splitByString('', 'abcde') +``` + +``` text +┌─splitByString('', 'abcde')─┐ +│ ['a','b','c','d','e'] │ +└────────────────────────────┘ +``` + +## arrayStringConcat(arr\[,separator\]) {#arraystringconcatarr-separator} + +配列にリストされている文字列を区切り文字と連結します。デフォルトでは空の文字列に設定されています。 +文字列を返します。 + +## alphaTokens(s) {#alphatokenss} + +範囲a-zおよびa-zから連続するバイトの部分文字列を選択します。 + +**例えば** + +``` sql +SELECT alphaTokens('abca1abc') +``` + +``` text +┌─alphaTokens('abca1abc')─┐ +│ ['abca','abc'] │ +└─────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/ja/sql_reference/functions/string_functions.md b/docs/ja/sql_reference/functions/string_functions.md deleted file mode 120000 index 55e2445de32..00000000000 --- a/docs/ja/sql_reference/functions/string_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/string_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/string_functions.md b/docs/ja/sql_reference/functions/string_functions.md new file mode 100644 index 00000000000..1e7948edf89 --- /dev/null +++ b/docs/ja/sql_reference/functions/string_functions.md @@ -0,0 +1,489 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u6587\u5B57\u5217\u306E\u64CD\u4F5C" +--- + +# 文字列を扱うための関数 {#functions-for-working-with-strings} + +## 空 {#empty} + +空の文字列の場合は1、空でない文字列の場合は0を返します。 +結果の型はuint8です。 +文字列が空白またはnullバイトであっても、少なくとも一つのバイトが含まれている場合、文字列は空ではないと見なされます。 +この関数は配列に対しても機能します。 + +## notEmpty {#notempty} + +空の文字列の場合は0、空でない文字列の場合は1を返します。 +結果の型はuint8です。 +この関数は配列に対しても機能します。 + +## 長さ {#length} + +文字列の長さをバイトで返します(コード-ポイントではなく、文字ではありません)。 +結果の型はuint64です。 +この関数は配列に対しても機能します。 + +## lengthUTF8 {#lengthutf8} + +文字列にutf-8でエンコードされたテキストを構成するバイトのセットが含まれていると仮定して、unicodeコードポイント(文字ではない)の文字列の長さを この仮定が満たされない場合、いくつかの結果が返されます(例外はスローされません)。 +結果の型はuint64です。 + +## char\_length,CHAR\_LENGTH {#char-length} + +文字列にutf-8でエンコードされたテキストを構成するバイトのセットが含まれていると仮定して、unicodeコードポイント(文字ではない)の文字列の長さを この仮定が満たされない場合、いくつかの結果が返されます(例外はスローされません)。 +結果の型はuint64です。 + +## character\_length,CHARACTER\_LENGTH {#character-length} + +文字列にutf-8でエンコードされたテキストを構成するバイトのセットが含まれていると仮定して、unicodeコードポイント(文字ではない)の文字列の長さを この仮定が満たされない場合、いくつかの結果が返されます(例外はスローされません)。 +結果の型はuint64です。 + +## lower,lcase {#lower} + +文字列内のasciiラテン文字記号を小文字に変換します。 + +## アッパー,ucase {#upper} + +文字列内のasciiラテン文字記号を大文字に変換します。 + +## lowerUTF8 {#lowerutf8} + +文字列にutf-8でエンコードされたテキストを構成するバイトのセットが含まれていると仮定して、文字列を小文字に変換します。 +それは言語を検出しません。 そのためにトルコに結果が正確に正しい。 +コード-ポイントの大文字と小文字でutf-8バイト-シーケンスの長さが異なる場合、このコード-ポイントでは結果が正しくない可能性があります。 +文字列にutf-8でないバイトのセットが含まれている場合、その動作は未定義です。 + +## upperUTF8 {#upperutf8} + +文字列にutf-8でエンコードされたテキストを構成するバイトのセットが含まれている場合、文字列を大文字に変換します。 +それは言語を検出しません。 そのためにトルコに結果が正確に正しい。 +コード-ポイントの大文字と小文字でutf-8バイト-シーケンスの長さが異なる場合、このコード-ポイントでは結果が正しくない可能性があります。 +文字列にutf-8でないバイトのセットが含まれている場合、その動作は未定義です。 + +## isValidUTF8 {#isvalidutf8} + +バイトのセットが有効なutf-8エンコードの場合は1を返し、それ以外の場合は0を返します。 + +## toValidUTF8 {#tovalidutf8} + +無効なutf-8文字を `�` (U+FFFD)文字。 すべての行で実行されている無効な文字は、置換文字に折りたたまれています。 + +``` sql +toValidUTF8( input_string ) +``` + +パラメータ: + +- input\_string — Any set of bytes represented as the [文字列](../../sql_reference/data_types/string.md) データ型オブジェクト。 + +戻り値:有効なutf-8文字列。 + +**例えば** + +``` sql +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +``` + +``` text +┌─toValidUTF8('a����b')─┐ +│ a�b │ +└───────────────────────┘ +``` + +## 繰り返す {#repeat} + +指定した回数だけ文字列を繰り返し、レプリケートされた値を単一の文字列として連結します。 + +**構文** + +``` sql +repeat(s, n) +``` + +**パラメータ** + +- `s` — The string to repeat. [文字列](../../sql_reference/data_types/string.md). +- `n` — The number of times to repeat the string. [UInt](../../sql_reference/data_types/int_uint.md). + +**戻り値** + +文字列を含む単一の文字列 `s` 繰り返す `n` 回。 もし `n` \<1、関数は、空の文字列を返します。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT repeat('abc', 10) +``` + +結果: + +``` text +┌─repeat('abc', 10)──────────────┐ +│ abcabcabcabcabcabcabcabcabcabc │ +└────────────────────────────────┘ +``` + +## 反転 {#reverse} + +文字列を逆にします(バイトのシーケンスとして)。 + +## reverseUTF8 {#reverseutf8} + +文字列にutf-8テキストを表すバイトのセットが含まれていると仮定して、unicodeコードポイントのシーケンスを逆にします。 それ以外の場合は、何か他のことをします(例外はスローされません)。 + +## format(pattern, s0, s1, …) {#format} + +引数にリストされている文字列で定数パターンを書式設定する。 `pattern` 単純なPython形式のパターンです。 書式文字列に含まれる “replacement fields” 中括弧で囲まれています `{}`. 中括弧に含まれていないものは、リテラルテキストと見なされ、出力には変更されません。 リテラルテキストに中かっこ文字を含める必要がある場合は、倍にすることでエスケープできます: `{{ '{{' }}` と `{{ '}}' }}`. フィールド名は数字(ゼロから始まる)または空(それらは結果番号として扱われます)。 + +``` sql +SELECT format('{1} {0} {1}', 'World', 'Hello') +``` + +``` text +┌─format('{1} {0} {1}', 'World', 'Hello')─┐ +│ Hello World Hello │ +└─────────────────────────────────────────┘ +``` + +``` sql +SELECT format('{} {}', 'Hello', 'World') +``` + +``` text +┌─format('{} {}', 'Hello', 'World')─┐ +│ Hello World │ +└───────────────────────────────────┘ +``` + +## concat {#concat} + +引数にリストされている文字列を区切り文字なしで連結します。 + +**構文** + +``` sql +concat(s1, s2, ...) +``` + +**パラメータ** + +String型またはFixedString型の値。 + +**戻り値** + +引数を連結した結果の文字列を返します。 + +引数の値のいずれかがある場合 `NULL`, `concat` を返します `NULL`. + +**例えば** + +クエリ: + +``` sql +SELECT concat('Hello, ', 'World!') +``` + +結果: + +``` text +┌─concat('Hello, ', 'World!')─┐ +│ Hello, World! │ +└─────────────────────────────┘ +``` + +## ツづツつソツづォツづアツ、ツ債。 {#concatassumeinjective} + +と同じ [concat](#concat) の差であることを確認する必要があり `concat(s1, s2, ...) → sn` injectiveは、GROUP BYの最適化に使用されます。 + +関数の名前は次のとおりです “injective” 引数の異なる値に対して常に異なる結果を返す場合。 言い換えれば異なる引数のない利回り同一の結果です。 + +**構文** + +``` sql +concatAssumeInjective(s1, s2, ...) +``` + +**パラメータ** + +String型またはFixedString型の値。 + +**戻り値** + +引数を連結した結果の文字列を返します。 + +引数の値のいずれかがある場合 `NULL`, `concatAssumeInjective` を返します `NULL`. + +**例えば** + +入力テーブル: + +``` sql +CREATE TABLE key_val(`key1` String, `key2` String, `value` UInt32) ENGINE = TinyLog; +INSERT INTO key_val VALUES ('Hello, ','World',1), ('Hello, ','World',2), ('Hello, ','World!',3), ('Hello',', World!',2); +SELECT * from key_val; +``` + +``` text +┌─key1────┬─key2─────┬─value─┐ +│ Hello, │ World │ 1 │ +│ Hello, │ World │ 2 │ +│ Hello, │ World! │ 3 │ +│ Hello │ , World! │ 2 │ +└─────────┴──────────┴───────┘ +``` + +クエリ: + +``` sql +SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) +``` + +結果: + +``` text +┌─concat(key1, key2)─┬─sum(value)─┐ +│ Hello, World! │ 3 │ +│ Hello, World! │ 2 │ +│ Hello, World │ 3 │ +└────────────────────┴────────────┘ +``` + +## サブストリング(s,オフセット,長さ)、中(s,オフセット,長さ)、サブストリング(s,オフセット,長さ) {#substring} + +からのバイトで始まる部分文字列を返します。 ‘offset’ あるインデックス ‘length’ バイト長。 文字の索引付けは、(標準SQLのように)文字から始まります。 その ‘offset’ と ‘length’ 引数は定数である必要があります。 + +## substringUTF8(s,オフセット,長さ) {#substringutf8} + +同じように ‘substring’ しかし、Unicodeコードポイントの場合。 作品は、この文字列が含まれるセットを表すバイトのUTF-8で符号化されます。 この仮定が満たされない場合、いくつかの結果が返されます(例外はスローされません)。 + +## appendTrailingCharIfAbsent(s,c) {#appendtrailingcharifabsent} + +この ‘s’ 文字列は空ではなく、空の文字列を含みません。 ‘c’ 最後の文字は、 ‘c’ 最後に文字。 + +## convertCharset(s,from,to) {#convertcharset} + +文字列を返します ‘s’ それはエンコーディングから変換された ‘from’ でのエンコーディングに ‘to’. + +## base64Encode(s) {#base64encode} + +エンコード ‘s’ base64への文字列 + +## base64Decode(s) {#base64decode} + +Base64エンコードされた文字列のデコード ‘s’ 元の文字列に。 失敗した場合には例外を発生させます。 + +## tryBase64Decode(s) {#trybase64decode} + +Base64Decodeに似ていますが、エラーの場合は空の文字列が返されます。 + +## endsWith(s,suffix) {#endswith} + +指定された接尾辞で終了するかどうかを返します。 文字列が指定された接尾辞で終わる場合は1を返し、それ以外の場合は0を返します。 + +## startsWith(str,プレフィックス) {#startswith} + +1を返しますか否かの文字列の開始を、指定された接頭辞、そうでない場合は0を返します。 + +``` sql +SELECT startsWith('Spider-Man', 'Spi'); +``` + +**戻り値** + +- 1、文字列が指定された接頭辞で始まる場合。 +- 文字列が指定された接頭辞で始まらない場合は0。 + +**例えば** + +クエリ: + +``` sql +SELECT startsWith('Hello, world!', 'He'); +``` + +結果: + +``` text +┌─startsWith('Hello, world!', 'He')─┐ +│ 1 │ +└───────────────────────────────────┘ +``` + +## トリム {#trim} + +文字列の先頭または末尾から指定されたすべての文字を削除します。 +デフォルトでは、文字列の両端から共通の空白(ascii文字32)が連続して出現するすべてを削除します。 + +**構文** + +``` sql +trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) +``` + +**パラメータ** + +- `trim_character` — specified characters for trim. [文字列](../../sql_reference/data_types/string.md). +- `input_string` — string for trim. [文字列](../../sql_reference/data_types/string.md). + +**戻り値** + +先頭および(または)末尾に指定された文字を含まない文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT trim(BOTH ' ()' FROM '( Hello, world! )') +``` + +結果: + +``` text +┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ +│ Hello, world! │ +└───────────────────────────────────────────────┘ +``` + +## trimLeft {#trimleft} + +文字列の先頭から、共通の空白文字(ascii文字32)のすべての連続した出現を削除します。 他の種類の空白文字(タブ、改行なしなど)は削除されません。). + +**構文** + +``` sql +trimLeft(input_string) +``` + +エイリアス: `ltrim(input_string)`. + +**パラメータ** + +- `input_string` — string to trim. [文字列](../../sql_reference/data_types/string.md). + +**戻り値** + +共通の空白をリードしない文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT trimLeft(' Hello, world! ') +``` + +結果: + +``` text +┌─trimLeft(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## trimRight {#trimright} + +文字列の末尾から共通の空白文字(ascii文字32)のすべての連続した出現を削除します。 他の種類の空白文字(タブ、改行なしなど)は削除されません。). + +**構文** + +``` sql +trimRight(input_string) +``` + +エイリアス: `rtrim(input_string)`. + +**パラメータ** + +- `input_string` — string to trim. [文字列](../../sql_reference/data_types/string.md). + +**戻り値** + +共通の空白を末尾に付けない文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT trimRight(' Hello, world! ') +``` + +結果: + +``` text +┌─trimRight(' Hello, world! ')─┐ +│ Hello, world! │ +└──────────────────────────────────────┘ +``` + +## トリンボスcity in california usa {#trimboth} + +文字列の両端から共通の空白文字(ascii文字32)が連続して出現するすべてを削除します。 他の種類の空白文字(タブ、改行なしなど)は削除されません。). + +**構文** + +``` sql +trimBoth(input_string) +``` + +エイリアス: `trim(input_string)`. + +**パラメータ** + +- `input_string` — string to trim. [文字列](../../sql_reference/data_types/string.md). + +**戻り値** + +先頭と末尾の共通の空白を含まない文字列。 + +タイプ: `String`. + +**例えば** + +クエリ: + +``` sql +SELECT trimBoth(' Hello, world! ') +``` + +結果: + +``` text +┌─trimBoth(' Hello, world! ')─┐ +│ Hello, world! │ +└─────────────────────────────────────┘ +``` + +## CRC32(s) {#crc32} + +CRC-32-IEEE802.3多項式と初期値を使用して、文字列のCRC32チェックサムを返します `0xffffffff` (zlibの実装)。 + +結果の型はuint32です。 + +## CRC32IEEE(s) {#crc32ieee} + +CRC-32-IEEE802.3多項式を使用して、文字列のCRC32チェックサムを返します。 + +結果の型はuint32です。 + +## CRC64(s) {#crc64} + +CRC-64-ECMA多項式を使用して、文字列のCRC64チェックサムを返します。 + +結果の型はuint64です。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/ja/sql_reference/functions/string_replace_functions.md b/docs/ja/sql_reference/functions/string_replace_functions.md deleted file mode 120000 index 836a8922e0c..00000000000 --- a/docs/ja/sql_reference/functions/string_replace_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/string_replace_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/string_replace_functions.md b/docs/ja/sql_reference/functions/string_replace_functions.md new file mode 100644 index 00000000000..27fe01a9f47 --- /dev/null +++ b/docs/ja/sql_reference/functions/string_replace_functions.md @@ -0,0 +1,94 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: "\u6587\u5B57\u5217\u3067\u7F6E\u63DB\u3059\u308B\u5834\u5408" +--- + +# 文字列の検索と置換のための関数 {#functions-for-searching-and-replacing-in-strings} + +## replaceOne(干し草の山,パターン,交換) {#replaceonehaystack-pattern-replacement} + +が存在する場合は、その最初のオカレンスを置き換えます。 ‘pattern’ の部分文字列 ‘haystack’ と ‘replacement’ サブストリング。 +以後, ‘pattern’ と ‘replacement’ 定数である必要があります。 + +## replaceAll(干し草の山、パターン、交換)、交換(干し草の山、パターン、交換) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} + +すべての出現を置き換えます。 ‘pattern’ の部分文字列 ‘haystack’ と ‘replacement’ サブストリング。 + +## replaceRegexpOne(haystack,pattern,replacement) {#replaceregexponehaystack-pattern-replacement} + +を使用して交換 ‘pattern’ 正規表現。 Re2正規表現。 +最初のオカレンスのみを置き換えます(存在する場合)。 +パターンは次のように指定できます ‘replacement’. このパタ `\0-\9`. +置換 `\0` 正規表現全体を含みます。 置換 `\1-\9` サブパターンに対応しますnumbers.To を使用 `\` テンプレート内の文字をエスケープします `\`. +また、文字列リテラルには余分なエスケープが必要です。 + +例1. 日付をアメリカ形式に変換する: + +``` sql +SELECT DISTINCT + EventDate, + replaceRegexpOne(toString(EventDate), '(\\d{4})-(\\d{2})-(\\d{2})', '\\2/\\3/\\1') AS res +FROM test.hits +LIMIT 7 +FORMAT TabSeparated +``` + +``` text +2014-03-17 03/17/2014 +2014-03-18 03/18/2014 +2014-03-19 03/19/2014 +2014-03-20 03/20/2014 +2014-03-21 03/21/2014 +2014-03-22 03/22/2014 +2014-03-23 03/23/2014 +``` + +例2. 文字列を十回コピーする: + +``` sql +SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') AS res +``` + +``` text +┌─res────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ +└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +## replaceRegexpAll(干し草の山,パターン,置換) {#replaceregexpallhaystack-pattern-replacement} + +これは同じことをしますが、すべての出現を置き換えます。 例えば: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res +``` + +``` text +┌─res────────────────────────┐ +│ HHeelllloo,, WWoorrlldd!! │ +└────────────────────────────┘ +``` + +例外として、正規表現が空の部分文字列で処理された場合、置換は複数回行われません。 +例えば: + +``` sql +SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res +``` + +``` text +┌─res─────────────────┐ +│ here: Hello, World! │ +└─────────────────────┘ +``` + +## regexpQuoteMeta(s) {#regexpquotemetas} + +この関数は、文字列内のいくつかの定義済み文字の前に円記号を追加します。 +定義済み文字: ‘0’, ‘\\’, ‘\|’, ‘(’, ‘)’, ‘^’, ‘$’, ‘.’, ‘\[’, '\]', ‘?’, '\*‘,’+‘,’{‘,’:‘,’-'. +この実装はre2::re2::quotemetaとは若干異なります。 ゼロバイトを\\0の代わりに\\00としてエスケープし、必要な文字だけをエスケープします。 +詳細については、リンク: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/ja/sql_reference/functions/string_search_functions.md b/docs/ja/sql_reference/functions/string_search_functions.md deleted file mode 120000 index 9f7a410f36c..00000000000 --- a/docs/ja/sql_reference/functions/string_search_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/string_search_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/string_search_functions.md b/docs/ja/sql_reference/functions/string_search_functions.md new file mode 100644 index 00000000000..3487aefdeb5 --- /dev/null +++ b/docs/ja/sql_reference/functions/string_search_functions.md @@ -0,0 +1,379 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: "\u6587\u5B57\u5217\u3092\u691C\u7D22\u3059\u308B\u5834\u5408" +--- + +# 文字列を検索するための関数 {#functions-for-searching-strings} + +これらのすべての機能では、既定で検索では大文字と小文字が区別されます。 あるvariantのための大文字と小文字を区別しません。 + +## 位置(干し草の山、針)、位置(干し草の山、針) {#position} + +1から始まる、文字列内の見つかった部分文字列の位置(バイト)を返します。 + +作品は、この文字列が含まれるセットを表すバイトの単一のバイトの符号化されます。 この仮定が満たされておらず、単一のバイトを使用して文字を表現できない場合、関数は例外をスローせず、予期しない結果を返します。 文字が二つのバイトを使用して表現できる場合は、二つのバイトなどを使用します。 + +大文字と小文字を区別しない検索では、次の関数を使用します [positionCaseInsensitive](#positioncaseinsensitive). + +**構文** + +``` sql +position(haystack, needle) +``` + +エイリアス: `locate(haystack, needle)`. + +**パラメータ** + +- `haystack` — string, in which substring will to be searched. [文字列](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [文字列](../syntax.md#syntax-string-literal). + +**戻り値** + +- 部分文字列が見つかった場合、バイト単位で開始位置(1から数えます)。 +- 0、部分文字列が見つからなかった場合。 + +タイプ: `Integer`. + +**例** + +フレーズ “Hello, world!” を含むの設定を表すバイトの単一のバイトの符号化されます。 この関数は、期待される結果を返します: + +クエリ: + +``` sql +SELECT position('Hello, world!', '!') +``` + +結果: + +``` text +┌─position('Hello, world!', '!')─┐ +│ 13 │ +└────────────────────────────────┘ +``` + +ロシア語の同じ句には、単一のバイトを使用して表現できない文字が含まれています。 この関数は、予期しない結果を返します [positionUTF8](#positionutf8) マルチバイトエンコードテキストの機能): + +クエリ: + +``` sql +SELECT position('Привет, мир!', '!') +``` + +結果: + +``` text +┌─position('Привет, мир!', '!')─┐ +│ 21 │ +└───────────────────────────────┘ +``` + +## positionCaseInsensitive {#positioncaseinsensitive} + +同じように [位置](#position) 1から始まる、文字列内の見つかった部分文字列の位置(バイト)を返します。 大文字小文字を区別しない検索には、この関数を使用します。 + +作品は、この文字列が含まれるセットを表すバイトの単一のバイトの符号化されます。 この仮定が満たされておらず、単一のバイトを使用して文字を表現できない場合、関数は例外をスローせず、予期しない結果を返します。 文字が二つのバイトを使用して表現できる場合は、二つのバイトなどを使用します。 + +**構文** + +``` sql +positionCaseInsensitive(haystack, needle) +``` + +**パラメータ** + +- `haystack` — string, in which substring will to be searched. [文字列](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [文字列](../syntax.md#syntax-string-literal). + +**戻り値** + +- 部分文字列が見つかった場合、バイト単位で開始位置(1から数えます)。 +- 0、部分文字列が見つからなかった場合。 + +タイプ: `Integer`. + +**例えば** + +クエリ: + +``` sql +SELECT positionCaseInsensitive('Hello, world!', 'hello') +``` + +結果: + +``` text +┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ +│ 1 │ +└───────────────────────────────────────────────────┘ +``` + +## positionUTF8 {#positionutf8} + +文字列内の見つかった部分文字列の位置(unicodeポイント単位)を、1から開始して返します。 + +作品は、この文字列が含まれるセットを表すバイトのutf-8で符号化されます。 この仮定が満たされない場合、関数は例外をスローせず、予期しない結果を返します。 文字が二つのunicodeポイントを使って表現できる場合は、二つのポイントを使います。 + +大文字と小文字を区別しない検索では、次の関数を使用します [位置caseinsensitiveutf8](#positioncaseinsensitiveutf8). + +**構文** + +``` sql +positionUTF8(haystack, needle) +``` + +**パラメータ** + +- `haystack` — string, in which substring will to be searched. [文字列](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [文字列](../syntax.md#syntax-string-literal). + +**戻り値** + +- 部分文字列が見つかった場合、unicodeポイントの開始位置(1から数えます)。 +- 0、部分文字列が見つからなかった場合。 + +タイプ: `Integer`. + +**例** + +フレーズ “Hello, world!” ロシア語のUnicodeのポイントを表すシングルポイントで符号化されます。 この関数は、期待される結果を返します: + +クエリ: + +``` sql +SELECT positionUTF8('Привет, мир!', '!') +``` + +結果: + +``` text +┌─positionUTF8('Привет, мир!', '!')─┐ +│ 12 │ +└───────────────────────────────────┘ +``` + +フレーズ “Salut, étudiante!”、どこの文字 `é` 一つの点を使用して表すことができます (`U+00E9`)または二つのポイント (`U+0065U+0301` 関数は、いくつかの予想外の結果を返すことができます: + +手紙のためのクエリ `é`、一つのUnicodeポイントを表している `U+00E9`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +結果: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 17 │ +└────────────────────────────────────────┘ +``` + +手紙のためのクエリ `é` これは二つのユニコード点を表します `U+0065U+0301`: + +``` sql +SELECT positionUTF8('Salut, étudiante!', '!') +``` + +結果: + +``` text +┌─positionUTF8('Salut, étudiante!', '!')─┐ +│ 18 │ +└────────────────────────────────────────┘ +``` + +## 位置caseinsensitiveutf8 {#positioncaseinsensitiveutf8} + +同じように [positionUTF8](#positionutf8) ただし、大文字と小文字は区別されません。 文字列内の見つかった部分文字列の位置(Unicodeポイント単位)を、1から開始して返します。 + +作品は、この文字列が含まれるセットを表すバイトのutf-8で符号化されます。 この仮定が満たされない場合、関数は例外をスローせず、予期しない結果を返します。 文字が二つのunicodeポイントを使って表現できる場合は、二つのポイントを使います。 + +**構文** + +``` sql +positionCaseInsensitiveUTF8(haystack, needle) +``` + +**パラメータ** + +- `haystack` — string, in which substring will to be searched. [文字列](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [文字列](../syntax.md#syntax-string-literal). + +**戻り値** + +- 部分文字列が見つかった場合、unicodeポイントの開始位置(1から数えます)。 +- 0、部分文字列が見つからなかった場合。 + +タイプ: `Integer`. + +**例えば** + +クエリ: + +``` sql +SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') +``` + +結果: + +``` text +┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ +│ 9 │ +└────────────────────────────────────────────────────┘ +``` + +## マルチアーチャルポジション {#multisearchallpositions} + +同じように [位置](string_search_functions.md#position) しかし、返す `Array` 文字列内で見つかった対応する部分文字列の位置(バイト単位)。 位置は1から始まる索引付けされます。 + +検索は、文字列のエンコードおよび照合順序に関係なく、バイトのシーケンスで実行されます。 + +- 大文字と小文字を区別しないascii検索では、次の関数を使用します `multiSearchAllPositionsCaseInsensitive`. +- UTF-8で検索する場合は、次の関数を使用します [multiSearchAllPositionsUTF8](#multiSearchAllPositionsUTF8). +- 大文字と小文字を区別しないutf-8検索の場合は、関数multitsearchallpositionscaseinsensitiveutf8を使用します。 + +**構文** + +``` sql +multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) +``` + +**パラメータ** + +- `haystack` — string, in which substring will to be searched. [文字列](../syntax.md#syntax-string-literal). +- `needle` — substring to be searched. [文字列](../syntax.md#syntax-string-literal). + +**戻り値** + +- 対応する部分文字列が見つかった場合は1から数え、見つからなかった場合は0のバイト単位の開始位置の配列。 + +**例えば** + +クエリ: + +``` sql +SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) +``` + +結果: + +``` text +┌─multiSearchAllPositions('Hello, World!', ['hello', '!', 'world'])─┐ +│ [0,13,0] │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## multiSearchAllPositionsUTF8 {#multiSearchAllPositionsUTF8} + +見る `multiSearchAllPositions`. + +## マルチアーチファーストポジション(干し草の山,\[ニードル1、針2, …, needlen\]) {#multisearchfirstposition} + +同じように `position` しかし、文字列の左端のオフセットを返します `haystack` それは針のいくつかに一致します。 + +大文字と小文字を区別しない検索やutf-8形式の場合は、関数を使用します `multiSearchFirstPositionCaseInsensitive, multiSearchFirstPositionUTF8, multiSearchFirstPositionCaseInsensitiveUTF8`. + +## マルチアーチファーストインデックス(haystack,\[needle1、針2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} + +インデックスを返す `i` (1から始まる)見つかった最も左の針の私は 文字列の中で `haystack` それ以外の場合は0。 + +大文字と小文字を区別しない検索やutf-8形式の場合は、関数を使用します `multiSearchFirstIndexCaseInsensitive, multiSearchFirstIndexUTF8, multiSearchFirstIndexCaseInsensitiveUTF8`. + +## ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq1、針2, …, needlen\]) {#function-multisearchany} + +少なくとも一つの文字列の針場合、1を返します私は 文字列に一致します `haystack` それ以外の場合は0。 + +大文字と小文字を区別しない検索やutf-8形式の場合は、関数を使用します `multiSearchAnyCaseInsensitive, multiSearchAnyUTF8, multiSearchAnyCaseInsensitiveUTF8`. + +!!! note "メモ" + すべて `multiSearch*` 機能は針の数2よりより少しべきです8 実装仕様のため。 + +## マッチ(干し草の山、パターン) {#matchhaystack-pattern} + +その文字列が `pattern` 正規表現。 A `re2` 正規表現。 その [構文](https://github.com/google/re2/wiki/Syntax) の `re2` 正規表現は、Perl正規表現の構文よりも制限されています。 + +一致しない場合は0、一致する場合は1を返します。 + +バックスラッシュ記号に注意してください (`\`)は、正規表現でエスケープするために使用されます。 同じ記号が文字列リテラルでエスケープするために使用されます。 したがって、正規表現でシンボルをエスケープするには、文字列リテラルに二つの円記号(\\)を記述する必要があります。 + +正規表現は、文字列がバイトのセットであるかのように動作します。 正規表現にnullバイトを含めることはできません。 +パターンが文字列内の部分文字列を検索するには、likeまたは ‘position’、彼らははるかに高速に動作するので。 + +## マルチャチャー(干し草の山、\[パターン1、パターン2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} + +同じように `match` ただし、正規表現のどれも一致しない場合は0を返し、パターンのいずれかが一致する場合は1を返します。 それは使用します [hyperscan](https://github.com/intel/hyperscan) ライブラリ。 文字列の部分文字列を検索するパターンの場合は、次のように使用する方がよいでしょう `multiSearchAny` それははるかに速く動作するので。 + +!!! note "メモ" + の長さ `haystack` 文字列は2未満でなければなりません32 それ以外の場合は、例外がスローされます。 この制限は、hyperscan APIのために行われます。 + +## インデックスを作成します。1、パターン2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} + +同じように `multiMatchAny`、しかし、haystackに一致する任意のインデックスを返します。 + +## ツつィツ姪"ツつ"ツ債ツづュツつケツ-ツ篠堕猟ソツ青ソツ仰1、パターン2, …, patternn\]) {#multimatchallindiceshaystack-pattern1-pattern2-patternn} + +同じように `multiMatchAny` しかし、haystackに一致するすべての指標の配列を任意の順序で返します。 + +## マルチフザイマチャニ(干し草の山、距離、\[パターン1、パターン2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} + +同じように `multiMatchAny` ただし、定数内のhaystackに一致するパターンがある場合は1を返します [距離を編集](https://en.wikipedia.org/wiki/Edit_distance). この機能は実験モードでもあり、非常に遅くなる可能性があります。 詳細については、 [hyperscanマニュアル](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching). + +## インデックスを作成します。1、パターン2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} + +同じように `multiFuzzyMatchAny` しかし、一定の編集距離内のhaystackに一致する任意のインデックスを返します。 + +## multiFuzzyMatchAllIndices(haystack、距離、\[パターン1、パターン2, …, patternn\]) {#multifuzzymatchallindiceshaystack-distance-pattern1-pattern2-patternn} + +同じように `multiFuzzyMatchAny` しかし、一定の編集距離内のhaystackに一致する任意の順序ですべてのインデックスの配列を返します。 + +!!! note "メモ" + `multiFuzzyMatch*` 関数はUTF-8の正規表現をサポートしておらず、hyperscanの制限により、このような式はバイトとして扱われます。 + +!!! note "メモ" + Hyperscanを使用するすべての機能をオフにするには、設定を使用します `SET allow_hyperscan = 0;`. + +## エキス(干し草の山、パターン) {#extracthaystack-pattern} + +正規表現を使用して文字列の断片を抽出します。 もし ‘haystack’ この ‘pattern’ 正規表現では、空の文字列が返されます。 正規表現にサブパターンが含まれていない場合は、正規表現全体に一致するフラグメントを取ります。 それ以外の場合は、最初のサブパターンに一致するフラグメントを取得します。 + +## extractAll(干し草の山,パターン) {#extractallhaystack-pattern} + +正規表現を使用して、文字列のすべてのフラグメントを抽出します。 もし ‘haystack’ この ‘pattern’ 正規表現では、空の文字列が返されます。 正規表現に対するすべての一致で構成される文字列の配列を返します。 一般に、この動作は、 ‘extract’ 関数(最初のサブパターン、またはサブパターンがない場合は式全体を取ります)。 + +## like(haystack,pattern),haystack LIKEパターン演算子 {#function-like} + +文字列が単純な正規表現に一致するかどうかを調べます。 +正規表現には、メタシンボルを含めることができます `%` と `_`. + +`%` 任意のバイト数(ゼロ文字を含む)を示します。 + +`_` 任意のバイトを示します。 + +バックスラッシュを使う (`\`)メタシンボルをエスケープするため。 の説明でエスケープの注意事項を参照してください。 ‘match’ 機能。 + +次のような正規表現の場合 `%needle%`、コードはより最適であり、高速として動作します `position` 機能。 +その他の正規表現の場合、コードは次のようになります。 ‘match’ 機能。 + +## ノットライク(干し草、パターン)、干し草は、パターン演算子が好きではありません {#function-notlike} + +同じものとして ‘like’、しかし否定的。 + +## ngramDistance(干し草の山,針) {#ngramdistancehaystack-needle} + +間の4グラムの距離を計算します `haystack` と `needle`: counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns float number from 0 to 1 – the closer to zero, the more strings are similar to each other. If the constant `needle` または `haystack` 32Kb以上で、例外をスローします。 いくつかの非定数の場合 `haystack` または `needle` 文字列は32Kb以上で、距離は常に一つです。 + +大文字と小文字を区別しない検索やutf-8形式の場合は、関数を使用します `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8`. + +## ngramSearch(干し草の山,針) {#ngramsearchhaystack-needle} + +と同じ `ngramDistance` しかし、非対称の違いを計算します `needle` と `haystack` – the number of n-grams from needle minus the common number of n-grams normalized by the number of `needle` nグラム。 一つに近いほど、より多くの可能性が高い `needle` にある `haystack`. あいまい文字列検索に便利です。 + +大文字と小文字を区別しない検索やutf-8形式の場合は、関数を使用します `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8`. + +!!! note "メモ" + For UTF-8 case we use 3-gram distance. All these are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters. + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/ja/sql_reference/functions/type_conversion_functions.md b/docs/ja/sql_reference/functions/type_conversion_functions.md deleted file mode 120000 index 6d0948216c9..00000000000 --- a/docs/ja/sql_reference/functions/type_conversion_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/type_conversion_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/type_conversion_functions.md b/docs/ja/sql_reference/functions/type_conversion_functions.md new file mode 100644 index 00000000000..b9576df7784 --- /dev/null +++ b/docs/ja/sql_reference/functions/type_conversion_functions.md @@ -0,0 +1,534 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u30BF\u30A4\u30D7\u5909\u63DB" +--- + +# タイプ変換関数 {#type-conversion-functions} + +## 数値変換の一般的な問題 {#numeric-conversion-issues} + +値をあるデータ型から別のデータ型に変換するときは、一般的なケースでは、データの損失につながる危険な操作であることを覚えておく必要があります。 大きいデータ型の値を小さいデータ型にフィットさせる場合、または異なるデータ型の間で値を変換する場合、データ損失が発生する可能性があります。 + +クリックハウスには [C++プログラムと同じ動作](https://en.cppreference.com/w/cpp/language/implicit_conversion). + +## toInt(8/16/32/64) {#toint8163264} + +入力値を次の値に変換します。 [Int](../../sql_reference/data_types/int_uint.md) データ型。 この関数ファミ: + +- `toInt8(expr)` — Results in the `Int8` データ型。 +- `toInt16(expr)` — Results in the `Int16` データ型。 +- `toInt32(expr)` — Results in the `Int32` データ型。 +- `toInt64(expr)` — Results in the `Int64` データ型。 + +**パラメータ** + +- `expr` — [式](../syntax.md#syntax-expressions) 数値または数値の小数表現を含む文字列を返します。 数値のBinary、octal、およびhexadecimal表現はサポートされていません。 先頭のゼロは除去されます。 + +**戻り値** + +の整数値 `Int8`, `Int16`, `Int32`、または `Int64` データ型。 + +関数の使用 [ゼロに向かって丸め](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) つまり、数字の小数桁を切り捨てます。 + +のための機能の動作 [NaNおよびInf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 引数は未定義です。 覚えておいて [数値変換の問題](#numeric-conversion-issues)、機能を使用する場合。 + +**例えば** + +``` sql +SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8) +``` + +``` text +┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ +│ -9223372036854775808 │ 32 │ 16 │ 8 │ +└──────────────────────┴─────────────┴───────────────┴─────────────┘ +``` + +## toInt(8/16/32/64)OrZero {#toint8163264orzero} + +これは、string型の引数をとり、int型にそれを解析しようとします(8 \| 16 \| 32 \| 64). 失敗した場合は0を返します。 + +**例えば** + +``` sql +select toInt64OrZero('123123'), toInt8OrZero('123qwe123') +``` + +``` text +┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ +│ 123123 │ 0 │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toInt(8/16/32/64)OrNull {#toint8163264ornull} + +これは、string型の引数をとり、int型にそれを解析しようとします(8 \| 16 \| 32 \| 64). 失敗した場合はnullを返します。 + +**例えば** + +``` sql +select toInt64OrNull('123123'), toInt8OrNull('123qwe123') +``` + +``` text +┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ +│ 123123 │ ᴺᵁᴸᴸ │ +└─────────────────────────┴───────────────────────────┘ +``` + +## toUInt(8/16/32/64) {#touint8163264} + +入力値を次の値に変換します。 [UInt](../../sql_reference/data_types/int_uint.md) データ型。 この関数ファミ: + +- `toUInt8(expr)` — Results in the `UInt8` データ型。 +- `toUInt16(expr)` — Results in the `UInt16` データ型。 +- `toUInt32(expr)` — Results in the `UInt32` データ型。 +- `toUInt64(expr)` — Results in the `UInt64` データ型。 + +**パラメータ** + +- `expr` — [式](../syntax.md#syntax-expressions) 数値または数値の小数表現を含む文字列を返します。 数値のBinary、octal、およびhexadecimal表現はサポートされていません。 先頭のゼロは除去されます。 + +**戻り値** + +の整数値 `UInt8`, `UInt16`, `UInt32`、または `UInt64` データ型。 + +関数の使用 [ゼロに向かって丸め](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) つまり、数字の小数桁を切り捨てます。 + +負のagrumentsのための関数の動作と [NaNおよびInf](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 引数は未定義です。 負の数の文字列を渡すと、次のようになります `'-32'`、ClickHouseは例外を発生させます。 覚えておいて [数値変換の問題](#numeric-conversion-issues)、機能を使用する場合。 + +**例えば** + +``` sql +SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) +``` + +``` text +┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ +│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ +└─────────────────────┴───────────────┴────────────────┴──────────────┘ +``` + +## toUInt(8/16/32/64)OrZero {#touint8163264orzero} + +## toUInt(8/16/32/64)OrNull {#touint8163264ornull} + +## toFloat(32/64) {#tofloat3264} + +## toFloat(32/64)OrZero {#tofloat3264orzero} + +## toFloat(32/64)OrNull {#tofloat3264ornull} + +## toDate {#todate} + +## toDateOrZero {#todateorzero} + +## toDateOrNull {#todateornull} + +## toDateTime {#todatetime} + +## toDateTimeOrZero {#todatetimeorzero} + +## toDateTimeOrNull {#todatetimeornull} + +## toDecimal(32/64/128) {#todecimal3264128} + +変換 `value` に [小数](../../sql_reference/data_types/decimal.md) 精度の高いデータ型 `S`. その `value` 数値または文字列を指定できます。 その `S` (スケール)パラメータ小数点以下の桁数を指定します。 + +- `toDecimal32(value, S)` +- `toDecimal64(value, S)` +- `toDecimal128(value, S)` + +## toDecimal(32/64/128)OrNull {#todecimal3264128ornull} + +入力文字列をaに変換します [Nullable(小数点(P,S)))](../../sql_reference/data_types/decimal.md) データ型の値。 このファミリの機能など: + +- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` データ型。 +- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` データ型。 +- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` データ型。 + +これらの関数は、次の代わりに使用します `toDecimal*()` を取得したい場合は、 `NULL` 入力値の解析エラーが発生した場合の例外の代わりに値を指定します。 + +**パラメータ** + +- `expr` — [式](../syntax.md#syntax-expressions)、値を返します [文字列](../../sql_reference/data_types/string.md) データ型。 ClickHouseは、小数のテキスト表現を想定しています。 例えば, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**戻り値** + +の値 `Nullable(Decimal(P,S))` データ型。 値は次のとおりです: + +- 数との `S` ClickHouseが入力文字列を数値として解釈する場合、小数点以下の桁数。 +- `NULL` ClickHouseが入力文字列を数値として解釈できない場合、または入力番号に `S` 小数点以下の桁数。 + +**例** + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ +│ -1.11100 │ Nullable(Decimal(9, 5)) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ +│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toDecimal(32/64/128)OrZero {#todecimal3264128orzero} + +入力値を次の値に変換します。 [小数点(p,s))](../../sql_reference/data_types/decimal.md) データ型。 このファミリの機能など: + +- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` データ型。 +- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` データ型。 +- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` データ型。 + +これらの関数は、次の代わりに使用します `toDecimal*()` を取得したい場合は、 `0` 入力値の解析エラーが発生した場合の例外の代わりに値を指定します。 + +**パラメータ** + +- `expr` — [式](../syntax.md#syntax-expressions)、値を返します [文字列](../../sql_reference/data_types/string.md) データ型。 ClickHouseは、小数のテキスト表現を想定しています。 例えば, `'1.111'`. +- `S` — Scale, the number of decimal places in the resulting value. + +**戻り値** + +の値 `Nullable(Decimal(P,S))` データ型。 値は次のとおりです: + +- 数との `S` ClickHouseが入力文字列を数値として解釈する場合、小数点以下の桁数。 +- 0とともに `S` ClickHouseが入力文字列を数値として解釈できない場合、または入力番号に `S` 小数点以下の桁数。 + +**例えば** + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val) +``` + +``` text +┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ +│ -1.11100 │ Decimal(9, 5) │ +└──────────┴────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val) +``` + +``` text +┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ +│ 0.00 │ Decimal(9, 2) │ +└──────┴────────────────────────────────────────────────────┘ +``` + +## toString {#tostring} + +数値、文字列(固定文字列ではない)、日付、および日付を時刻で変換するための関数。 +これら全ての機能を受け入れを一つの引数。 + +文字列に変換するとき、または文字列から変換するとき、値はtabseparated形式(および他のほとんどすべてのテキスト形式)と同じ規則を使用して書式設定ま 文字列を解析できない場合は、例外がスローされ、要求はキャンセルされます。 + +日付を数値またはその逆に変換する場合、日付はunixエポックの開始からの日数に対応します。 +時刻を含む日付を数値またはその逆に変換する場合、時刻を含む日付は、unixエポックの開始からの秒数に対応します。 + +ToDate/toDateTime関数の日時形式は、次のように定義されています: + +``` text +YYYY-MM-DD +YYYY-MM-DD hh:mm:ss +``` + +例外として、uint32、int32、uint64、またはint64の数値型からdateに変換し、その数値が65536以上の場合、その数値はunixタイムスタンプとして(日数ではなく)解釈さ これにより、一般的な執筆のサポートが可能になります ‘toDate(unix\_timestamp)’ それ以外の場合はエラーになり、より面倒な書き込みが必要になります ‘toDate(toDateTime(unix\_timestamp))’. + +時間を伴う日付と日付の間の変換は、ヌル時間を追加するか、時間を落とすことによって自然な方法で行われます。 + +数値型間の変換は、c++で異なる数値型間の代入と同じ規則を使用します。 + +さらに、datetime引数のtostring関数は、タイムゾーンの名前を含む第二の文字列引数を取ることができます。 例えば: `Asia/Yekaterinburg` この場合、時刻は指定されたタイムゾーンに従ってフォーマットされます。 + +``` sql +SELECT + now() AS now_local, + toString(now(), 'Asia/Yekaterinburg') AS now_yekat +``` + +``` text +┌───────────now_local─┬─now_yekat───────────┐ +│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │ +└─────────────────────┴─────────────────────┘ +``` + +また、 `toUnixTimestamp` 機能。 + +## toFixedString(s,N) {#tofixedstrings-n} + +文字列型引数をfixedstring(n)型(固定長nの文字列)に変換します。 nは定数でなければなりません。 +文字列のバイト数がnより少ない場合は、右側にnullバイトが渡されます。 文字列のバイト数がnより多い場合は、例外がスローされます。 + +## tostringクットゼロ(s) {#tostringcuttozeros} + +文字列またはfixedstring引数を受け取ります。 最初のゼロ-バイトで切り捨てられたコンテンツを持つ文字列を返します。 + +例えば: + +``` sql +SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s─────────────┬─s_cut─┐ +│ foo\0\0\0\0\0 │ foo │ +└───────────────┴───────┘ +``` + +``` sql +SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut +``` + +``` text +┌─s──────────┬─s_cut─┐ +│ foo\0bar\0 │ foo │ +└────────────┴───────┘ +``` + +## reinterpretAsUInt(8/16/32/64) {#reinterpretasuint8163264} + +## 再解釈(8/16/32/64) {#reinterpretasint8163264} + +## 再解釈(32/64) {#reinterpretasfloat3264} + +## 再解釈アスデート {#reinterpretasdate} + +## タスデータタイムの再解釈 {#reinterpretasdatetime} + +これらの関数は文字列を受け取り、文字列の先頭に置かれたバイトをホスト順(リトルエンディアン)の数値として解釈します。 文字列が十分な長さでない場合、関数は、文字列が必要な数のヌルバイトで埋められているかのように機能します。 文字列が必要以上に長い場合、余分なバイトは無視されます。 日付はunixエポックの開始からの日数として解釈され、時刻付きの日付はunixエポックの開始からの秒数として解釈されます。 + +## 文字列の再解釈 {#type_conversion_functions-reinterpretAsString} + +この関数は、時刻を含む数値または日付または日付を受け取り、対応する値をホスト順(リトルエンディアン)で表すバイトを含む文字列を返します。 nullバイトは、末尾から削除されます。 たとえば、uint32型の値255は、バイト長の文字列です。 + +## 再解釈された文字列 {#reinterpretasfixedstring} + +この関数は、時刻を含む数値または日付または日付を受け取り、対応する値をホスト順(リトルエンディアン)で表すバイトを含むfixedstringを返します。 nullバイトは、末尾から削除されます。 たとえば、uint32型の値255は、バイト長のfixedstringです。 + +## キャスト(x,t) {#type_conversion_function-cast} + +変換 ‘x’ に ‘t’ データ型。 構文CAST(x AS t)もサポートされています。 + +例えば: + +``` sql +SELECT + '2016-06-15 23:00:00' AS timestamp, + CAST(timestamp AS DateTime) AS datetime, + CAST(timestamp AS Date) AS date, + CAST(timestamp, 'String') AS string, + CAST(timestamp, 'FixedString(22)') AS fixed_string +``` + +``` text +┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐ +│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │ +└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘ +``` + +FixedString(N)への変換は、String型またはFixedString(N)型の引数に対してのみ機能します。 + +タイプへの変換 [Nullable](../../sql_reference/data_types/nullable.md) そして背部は支えられます。 例えば: + +``` sql +SELECT toTypeName(x) FROM t_null +``` + +``` text +┌─toTypeName(x)─┐ +│ Int8 │ +│ Int8 │ +└───────────────┘ +``` + +``` sql +SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null +``` + +``` text +┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ +│ Nullable(UInt16) │ +│ Nullable(UInt16) │ +└─────────────────────────────────────────┘ +``` + +## toInterval(年/四半期\|月/週\|日/時/分/秒) {#function-tointerval} + +数値型の引数を [間隔](../../sql_reference/data_types/special_data_types/interval.md) データ型。 + +**構文** + +``` sql +toIntervalSecond(number) +toIntervalMinute(number) +toIntervalHour(number) +toIntervalDay(number) +toIntervalWeek(number) +toIntervalMonth(number) +toIntervalQuarter(number) +toIntervalYear(number) +``` + +**パラメータ** + +- `number` — Duration of interval. Positive integer number. + +**戻り値** + +- の値 `Interval` データ型。 + +**例えば** + +``` sql +WITH + toDate('2019-01-01') AS date, + INTERVAL 1 WEEK AS interval_week, + toIntervalWeek(1) AS interval_to_week +SELECT + date + interval_week, + date + interval_to_week +``` + +``` text +┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ +│ 2019-01-08 │ 2019-01-08 │ +└───────────────────────────┴──────────────────────────────┘ +``` + +## parseDateTimeBestEffort {#parsedatetimebesteffort} + +の日付と時刻を変換します。 [文字列](../../sql_reference/data_types/string.md) 表現する [DateTime](../../sql_reference/data_types/datetime.md#data_type-datetime) データ型。 + +関数は解析します [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC1123-5.2.14RFC-822日付と時刻の指定](https://tools.ietf.org/html/rfc1123#page-55)、ClickHouseのと他のいくつかの日付と時刻の形式。 + +**構文** + +``` sql +parseDateTimeBestEffort(time_string [, time_zone]); +``` + +**パラメータ** + +- `time_string` — String containing a date and time to convert. [文字列](../../sql_reference/data_types/string.md). +- `time_zone` — Time zone. The function parses `time_string` タイムゾーンによると。 [文字列](../../sql_reference/data_types/string.md). + +**サポートされている非標準形式** + +- 9を含む文字列。.10桁 [unix timestamp](https://en.wikipedia.org/wiki/Unix_time). +- 日付と時刻コンポーネントを含む文字列: `YYYYMMDDhhmmss`, `DD/MM/YYYY hh:mm:ss`, `DD-MM-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`、等。 +- 日付を含む文字列で、時間の要素は含まれません: `YYYY`, `YYYYMM`, `YYYY*MM`, `DD/MM/YYYY`, `DD-MM-YY` など。 +- 日と時間のある文字列: `DD`, `DD hh`, `DD hh:mm`. この場合 `YYYY-MM` として代入される。 `2000-01`. +- タイムゾーンオフセット情報と共に日付と時刻を含む文字列: `YYYY-MM-DD hh:mm:ss ±h:mm`、等。 例えば, `2020-12-12 17:36:00 -5:00`. + +Separatorを持つすべての形式について、この関数は、フルネームまたは月名の最初の三文字で表される月の名前を解析します。 例: `24/DEC/18`, `24-Dec-18`, `01-September-2018`. + +**戻り値** + +- `time_string` に変換される。 `DateTime` データ型。 + +**例** + +クエリ: + +``` sql +SELECT parseDateTimeBestEffort('12/12/2020 12:12:57') +AS parseDateTimeBestEffort; +``` + +結果: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2020-12-12 12:12:57 │ +└─────────────────────────┘ +``` + +クエリ: + +``` sql +SELECT parseDateTimeBestEffort('Sat, 18 Aug 2018 07:22:16 GMT', 'Europe/Moscow') +AS parseDateTimeBestEffort +``` + +結果: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-08-18 10:22:16 │ +└─────────────────────────┘ +``` + +クエリ: + +``` sql +SELECT parseDateTimeBestEffort('1284101485') +AS parseDateTimeBestEffort +``` + +結果: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2015-07-07 12:04:41 │ +└─────────────────────────┘ +``` + +クエリ: + +``` sql +SELECT parseDateTimeBestEffort('2018-12-12 10:12:12') +AS parseDateTimeBestEffort +``` + +結果: + +``` text +┌─parseDateTimeBestEffort─┐ +│ 2018-12-12 10:12:12 │ +└─────────────────────────┘ +``` + +クエリ: + +``` sql +SELECT parseDateTimeBestEffort('10 20:19') +``` + +結果: + +``` text +┌─parseDateTimeBestEffort('10 20:19')─┐ +│ 2000-01-10 20:19:00 │ +└─────────────────────────────────────┘ +``` + +**また見なさい** + +- \[ISO 8601 announcement by @xkcd\](https://xkcd.com/1179/) +- [RFC 1123](https://tools.ietf.org/html/rfc1123) +- [toDate](#todate) +- [toDateTime](#todatetime) + +## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} + +と同じ [parseDateTimeBestEffort](#parsedatetimebesteffort) ただし、処理できない日付形式が検出された場合はnullを返します。 + +## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} + +と同じ [parseDateTimeBestEffort](#parsedatetimebesteffort) ただし、処理できない日付形式に遭遇した場合は、日付またはゼロの日時が返されます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/ja/sql_reference/functions/url_functions.md b/docs/ja/sql_reference/functions/url_functions.md deleted file mode 120000 index 669709bd995..00000000000 --- a/docs/ja/sql_reference/functions/url_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/url_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/url_functions.md b/docs/ja/sql_reference/functions/url_functions.md new file mode 100644 index 00000000000..a594708354f --- /dev/null +++ b/docs/ja/sql_reference/functions/url_functions.md @@ -0,0 +1,209 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 54 +toc_title: "Url\u306E\u64CD\u4F5C" +--- + +# Urlを操作するための関数 {#functions-for-working-with-urls} + +これらの関数はすべてrfcに従いません。 それらは改善された性能のために最大限に簡単である。 + +## URLの一部を抽出する関数 {#functions-that-extract-parts-of-a-url} + +関連する部分がurlに存在しない場合は、空の文字列が返されます。 + +### プロトコル {#protocol} + +URLからプロトコルを抽出します。 + +Examples of typical returned values: http, https, ftp, mailto, tel, magnet… + +### ドメイン {#domain} + +URLからホスト名を抽出します。 + +``` sql +domain(url) +``` + +**パラメータ** + +- `url` — URL. Type: [文字列](../../sql_reference/data_types/string.md). + +URLは、スキームの有無にかかわらず指定できます。 例: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +これらの例では、 `domain` 関数は、次の結果を返します: + +``` text +some.svn-hosting.com +some.svn-hosting.com +yandex.com +``` + +**戻り値** + +- ホスト名。 clickhouseが入力文字列をurlとして解析できる場合。 +- 空の文字列。 clickhouseが入力文字列をurlとして解析できない場合。 + +タイプ: `String`. + +**例えば** + +``` sql +SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')─┐ +│ some.svn-hosting.com │ +└────────────────────────────────────────────────────────┘ +``` + +### ドメインウィズなしwww {#domainwithoutwww} + +ドメインを返し、複数のドメインを削除します ‘www.’ それの初めから、存在する場合。 + +### topleveldomaincomment {#topleveldomain} + +URLからトップレベルドメインを抽出します。 + +``` sql +topLevelDomain(url) +``` + +**パラメータ** + +- `url` — URL. Type: [文字列](../../sql_reference/data_types/string.md). + +URLは、スキームの有無にかかわらず指定できます。 例: + +``` text +svn+ssh://some.svn-hosting.com:80/repo/trunk +some.svn-hosting.com:80/repo/trunk +https://yandex.com/time/ +``` + +**戻り値** + +- ドメイン名。 clickhouseが入力文字列をurlとして解析できる場合。 +- 空の文字列。 clickhouseが入力文字列をurlとして解析できない場合。 + +タイプ: `String`. + +**例えば** + +``` sql +SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') +``` + +``` text +┌─topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')─┐ +│ com │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### 最初のサブドメイン {#firstsignificantsubdomain} + +を返します “first significant subdomain”. これはYandex固有の非標準的な概念です。メトリカ 最初の重要なサブドメインは、セカンドレベルドメインです。 ‘com’, ‘net’, ‘org’、または ‘co’. それ以外の場合は、サードレベルのドメインです。 例えば, `firstSignificantSubdomain (‘https://news.yandex.ru/’) = ‘yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’`. のリスト “insignificant” 二次レベルドメインおよびその他の実施内容に変化する可能性があります。 + +### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} + +トップレベルのサブドメインを含むドメインの部分を返します。 “first significant subdomain” (上記の説明を参照)。 + +例えば, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. + +### パス {#path} + +パスを返します。 例えば: `/top/news.html` パスにはクエリ文字列は含まれません。 + +### pathFull {#pathfull} + +上記と同じですが、クエリ文字列とフラグメントを含みます。 例:/トップ/ニュース。html?ページ=2\#コメント + +### クエリ文字列 {#querystring} + +クエリ文字列を返します。 例:ページ=1&lr=213。 query-stringには、最初の疑問符と\#と\#後のすべてが含まれていません。 + +### 断片 {#fragment} + +フラグメント識別子を返します。 fragmentには、最初のハッシュ記号は含まれません。 + +### queryStringAndFragment {#querystringandfragment} + +クエリ文字列とフラグメント識別子を返します。 例:ページ=1\#29390. + +### extractURLParameter(URL,名前) {#extracturlparameterurl-name} + +の値を返します ‘name’ URL内にパラメータが存在する場合。 それ以外の場合は、空の文字列。 この名前のパラメータが多数ある場合は、最初のオカレンスが返されます。 この関数は、パラメータ名が渡された引数とまったく同じ方法でURLにエンコードされるという前提の下で機能します。 + +### extractURLParameters(URL) {#extracturlparametersurl} + +URLパラメータに対応するname=value文字列の配列を返します。 値は決してデコードされません。 + +### extractURLParameterNames(URL) {#extracturlparameternamesurl} + +URLパラメータの名前に対応する名前文字列の配列を返します。 値は決してデコードされません。 + +### URLHierarchy(URL) {#urlhierarchyurl} + +最後に/,?記号で切り捨てられたurlを含む配列を返します。 パスとクエリ文字列で。 連続セパレータ文字として数えます。 カットは、すべての連続した区切り文字の後の位置に作られています。 + +### URLPathHierarchy(URL) {#urlpathhierarchyurl} + +上記と同じですが、結果のプロトコルとホストはありません。 要素(ルート)は含まれません。 例:この関数は、ツリーを実装するために使用されるyandexのurlを報告します。 メトリック。 + +``` text +URLPathHierarchy('https://example.com/browse/CONV-6788') = +[ + '/browse/', + '/browse/CONV-6788' +] +``` + +### decodeURLComponent(URL) {#decodeurlcomponenturl} + +復号化されたurlを返します。 +例えば: + +``` sql +SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS DecodedURL; +``` + +``` text +┌─DecodedURL─────────────────────────────┐ +│ http://127.0.0.1:8123/?query=SELECT 1; │ +└────────────────────────────────────────┘ +``` + +## URLの一部を削除する関数。 {#functions-that-remove-part-of-a-url} + +URLに類似したものがない場合、URLは変更されません。 + +### cutWWW {#cutwww} + +一つ以下を削除します ‘www.’ URLのドメインの先頭から、存在する場合。 + +### cutQueryString {#cutquerystring} + +クエリ文字列を削除します。 疑問符も削除されます。 + +### カットフラグメント {#cutfragment} + +フラグメント識別子を削除します。 番号記号も削除されます。 + +### cutQueryStringAndFragment {#cutquerystringandfragment} + +クエリ文字列とフラグメント識別子を削除します。 疑問符と番号記号も削除されます。 + +### cutURLParameter(URL,名前) {#cuturlparameterurl-name} + +削除する ‘name’ URLパラメーターがある場合。 この関数は、パラメータ名が渡された引数とまったく同じ方法でURLにエンコードされるという前提の下で機能します。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/ja/sql_reference/functions/uuid_functions.md b/docs/ja/sql_reference/functions/uuid_functions.md deleted file mode 120000 index 1cff756166d..00000000000 --- a/docs/ja/sql_reference/functions/uuid_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/uuid_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/uuid_functions.md b/docs/ja/sql_reference/functions/uuid_functions.md new file mode 100644 index 00000000000..65956e93ddd --- /dev/null +++ b/docs/ja/sql_reference/functions/uuid_functions.md @@ -0,0 +1,122 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 53 +toc_title: "UUID\u306E\u64CD\u4F5C" +--- + +# UUIDを操作するための関数 {#functions-for-working-with-uuid} + +UUIDを操作するための関数を以下に示します。 + +## generateUUIDv4 {#uuid-function-generate} + +を生成する。 [UUID](../../sql_reference/data_types/uuid.md) の [バージョン4](https://tools.ietf.org/html/rfc4122#section-4.4). + +``` sql +generateUUIDv4() +``` + +**戻り値** + +UUID型の値。 + +**使用例** + +この例では、uuid型の列を使用してテーブルを作成し、テーブルに値を挿入する方法を示します。 + +``` sql +CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog + +INSERT INTO t_uuid SELECT generateUUIDv4() + +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┐ +│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │ +└──────────────────────────────────────┘ +``` + +## toUUID(x) {#touuid-x} + +文字列型の値をuuid型に変換します。 + +``` sql +toUUID(String) +``` + +**戻り値** + +UUID型の値。 + +**使用例** + +``` sql +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid +``` + +``` text +┌─────────────────────────────────uuid─┐ +│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │ +└──────────────────────────────────────┘ +``` + +## UUIDStringToNum {#uuidstringtonum} + +次の形式の36文字を含む文字列を受け取ります `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` そして、それをaのバイトのセットとして返します [FixedString(16)](../../sql_reference/data_types/fixedstring.md). + +``` sql +UUIDStringToNum(String) +``` + +**戻り値** + +FixedString(16) + +**使用例** + +``` sql +SELECT + '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, + UUIDStringToNum(uuid) AS bytes +``` + +``` text +┌─uuid─────────────────────────────────┬─bytes────────────┐ +│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │ +└──────────────────────────────────────┴──────────────────┘ +``` + +## UUIDNumToString {#uuidnumtostring} + +を受け入れる [FixedString(16)](../../sql_reference/data_types/fixedstring.md) 値、およびテキスト形式で36文字を含む文字列を返します。 + +``` sql +UUIDNumToString(FixedString(16)) +``` + +**戻り値** + +文字列。 + +**使用例** + +``` sql +SELECT + 'a/<@];!~p{jTj={)' AS bytes, + UUIDNumToString(toFixedString(bytes, 16)) AS uuid +``` + +``` text +┌─bytes────────────┬─uuid─────────────────────────────────┐ +│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ +└──────────────────┴──────────────────────────────────────┘ +``` + +## また見なさい {#see-also} + +- [dictGetUUID](ext_dict_functions.md#ext_dict_functions-other) + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/ja/sql_reference/functions/ym_dict_functions.md b/docs/ja/sql_reference/functions/ym_dict_functions.md deleted file mode 120000 index 2f954de7d42..00000000000 --- a/docs/ja/sql_reference/functions/ym_dict_functions.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/functions/ym_dict_functions.md \ No newline at end of file diff --git a/docs/ja/sql_reference/functions/ym_dict_functions.md b/docs/ja/sql_reference/functions/ym_dict_functions.md new file mode 100644 index 00000000000..858ff9aa7a9 --- /dev/null +++ b/docs/ja/sql_reference/functions/ym_dict_functions.md @@ -0,0 +1,156 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 59 +toc_title: "Yandex\u306E\u3067\u306E\u4F5C\u696D\u3002\u30E1\u30C8\u30EA\u30AB\u8F9E\ + \u66F8" +--- + +# Yandexで作業するための機能。メトリカ辞書 {#functions-for-working-with-yandex-metrica-dictionaries} + +以下の機能が機能するためには、サーバー設定はすべてのyandexを取得するためのパスとアドレスを指定する必要があります。メトリカ辞書。 辞書は、これらの関数の最初の呼び出し時にロードされます。 参照リストをロードできない場合は、例外がスローされます。 + +のための情報を参照リストの項をご参照ください “Dictionaries”. + +## 複数のジオベース {#multiple-geobases} + +ClickHouseは、複数の代替ジオベース(地域階層)を同時に使用して、特定の地域が属する国のさまざまな視点をサポートします。 + +その ‘clickhouse-server’ configは、地域階層を持つファイルを指定します::`/opt/geo/regions_hierarchy.txt` + +このファイルのほかに、それはまた、(ファイル拡張子の前に)\_シンボルと名前に追加任意の接尾辞を持っている近くのファイルを検索します。 +たとえば、ファイルも検索します `/opt/geo/regions_hierarchy_ua.txt`、もしあれば。 + +`ua` 辞書キーと呼ばれます。 接尾辞のない辞書の場合、キーは空の文字列です。 + +すべての辞書は実行時に再ロードされます(builtin\_dictionaries\_reload\_interval設定パラメータで定義されているすべての秒数、またはデフォルトで時間が一度)。 ただし、使用可能な辞書のリストは、サーバーの起動時に一度だけ定義されます。 + +All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. +例えば: + +``` sql +regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt +regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt +``` + +### ツつィツ姪"ツつ"ツ債ツづュツつケツ-faq\]) {#regiontocityid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. + +### regionToArea(id\[,geobase\]) {#regiontoareaid-geobase} + +領域を領域に変換します(ジオベースのタイプ5)。 他のすべての方法では、この関数は次のようになります ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToArea(toUInt32(number), \'ua\'))─┐ +│ │ +│ Moscow and Moscow region │ +│ St. Petersburg and Leningrad region │ +│ Belgorod region │ +│ Ivanovsk region │ +│ Kaluga region │ +│ Kostroma region │ +│ Kursk region │ +│ Lipetsk region │ +│ Orlov region │ +│ Ryazan region │ +│ Smolensk region │ +│ Tambov region │ +│ Tver region │ +│ Tula region │ +└──────────────────────────────────────────────────────┘ +``` + +### ツつィツ姪"ツつ"ツ債ツづュツつケツ-ツ篠堕猟ソツ青ソツ仰\]) {#regiontodistrictid-geobase} + +地域を連邦区(ジオベースのタイプ4)に変換します。 他のすべての方法では、この関数は次のようになります ‘regionToCity’. + +``` sql +SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) +FROM system.numbers +LIMIT 15 +``` + +``` text +┌─regionToName(regionToDistrict(toUInt32(number), \'ua\'))─┐ +│ │ +│ Central federal district │ +│ Northwest federal district │ +│ South federal district │ +│ North Caucases federal district │ +│ Privolga federal district │ +│ Ural federal district │ +│ Siberian federal district │ +│ Far East federal district │ +│ Scotland │ +│ Faroe Islands │ +│ Flemish region │ +│ Brussels capital region │ +│ Wallonia │ +│ Federation of Bosnia and Herzegovina │ +└──────────────────────────────────────────────────────────┘ +``` + +### ツつィツ姪"ツつ"ツ債ツづュツつケツ-ツつイツ堕環談\]) {#regiontocountryid-geobase} + +地域を国に変換します。 他のすべての方法では、この関数は次のようになります ‘regionToCity’. +例えば: `regionToCountry(toUInt32(213)) = 225` モスクワ(213)をロシア(225)に変換する。 + +### ツつィツ姪"ツつ"ツ債ツづュツつケツ-ツつイツ堕環談\]) {#regiontocontinentid-geobase} + +地域を大陸に変換します。 他のすべての方法では、この関数は次のようになります ‘regionToCity’. +例えば: `regionToContinent(toUInt32(213)) = 10001` モスクワ(213)をユーラシア(10001)に変換する。 + +### regionToTopContinent(\#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent} + +リージョンの階層で最上位の大陸を検索します。 + +**構文** + +``` sql +regionToTopContinent(id[, geobase]); +``` + +**パラメータ** + +- `id` — Region ID from the Yandex geobase. [UInt32](../../sql_reference/data_types/int_uint.md). +- `geobase` — Dictionary key. See [複数のジオベース](#multiple-geobases). [文字列](../../sql_reference/data_types/string.md). 任意です。 + +**戻り値** + +- トップレベルの大陸の識別子(後者は地域の階層を登るとき)。 +- 0、何もない場合。 + +タイプ: `UInt32`. + +### ツ環板篠ョツ嘉ッツ偲青エツδツ-ツエツスツ-ツシツ\]) {#regiontopopulationid-geobase} + +地域の人口を取得します。 +人口はgeobaseのファイルに記録することができます。 セクションを見る “External dictionaries”. +リージョンに対して母集団が記録されていない場合は、0を返します。 +Yandex geobaseでは、母集団は子地域に対して記録されますが、親地域に対しては記録されません。 + +### ツつィツ姪"ツつ"ツ債ツづュツつケツ-ツ篠堕猟ソツ青ソツ仰\]) {#regioninlhs-rhs-geobase} + +をチェックする。 ‘lhs’ リージョンは ‘rhs’ 地域。 UInt8が属している場合は1、属していない場合は0を返します。 +The relationship is reflexive – any region also belongs to itself. + +### ツ環板篠ョツ嘉ッツ偲青エツδツ-ツエツスツ-ツシツ\]) {#regionhierarchyid-geobase} + +Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. +例えば: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. + +### リージョン名(id\[,lang\]) {#regiontonameid-lang} + +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. + +`ua` と `uk` もうクです。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/ja/sql_reference/index.md b/docs/ja/sql_reference/index.md deleted file mode 120000 index 438c3fc7069..00000000000 --- a/docs/ja/sql_reference/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/sql_reference/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/index.md b/docs/ja/sql_reference/index.md new file mode 100644 index 00000000000..dfa46ab7510 --- /dev/null +++ b/docs/ja/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: SQL Reference +toc_hidden: true +toc_priority: 28 +toc_title: "\u96A0\u3055\u308C\u305F" +--- + +# SQL参照 {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [その他の種類のクエリ](statements/misc.md) + +[元の記事](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/ja/sql_reference/operators.md b/docs/ja/sql_reference/operators.md deleted file mode 120000 index 55ed4e96920..00000000000 --- a/docs/ja/sql_reference/operators.md +++ /dev/null @@ -1 +0,0 @@ -../../en/sql_reference/operators.md \ No newline at end of file diff --git a/docs/ja/sql_reference/operators.md b/docs/ja/sql_reference/operators.md new file mode 100644 index 00000000000..918c243ceba --- /dev/null +++ b/docs/ja/sql_reference/operators.md @@ -0,0 +1,278 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u6F14\u7B97\u5B50" +--- + +# 演算子 {#operators} + +すべての演算子は、優先順位と結合性に従って、クエリの解析段階で対応する関数に変換されます。 +演算子のグループは優先順位の順にリストされます(リスト内の高いほど、演算子はその引数に接続されています)。 + +## アクセス事業者 {#access-operators} + +`a[N]` – Access to an element of an array. The `arrayElement(a, N)` 機能。 + +`a.N` – Access to a tuple element. The `tupleElement(a, N)` 機能。 + +## 数値の否定の演算子 {#numeric-negation-operator} + +`-a` – The `negate (a)` 機能。 + +## 乗算演算子と除算演算子 {#multiplication-and-division-operators} + +`a * b` – The `multiply (a, b)` 機能。 + +`a / b` – The `divide(a, b)` 機能。 + +`a % b` – The `modulo(a, b)` 機能。 + +## 加算および減算演算子 {#addition-and-subtraction-operators} + +`a + b` – The `plus(a, b)` 機能。 + +`a - b` – The `minus(a, b)` 機能。 + +## 比較演算子 {#comparison-operators} + +`a = b` – The `equals(a, b)` 機能。 + +`a == b` – The `equals(a, b)` 機能。 + +`a != b` – The `notEquals(a, b)` 機能。 + +`a <> b` – The `notEquals(a, b)` 機能。 + +`a <= b` – The `lessOrEquals(a, b)` 機能。 + +`a >= b` – The `greaterOrEquals(a, b)` 機能。 + +`a < b` – The `less(a, b)` 機能。 + +`a > b` – The `greater(a, b)` 機能。 + +`a LIKE s` – The `like(a, b)` 機能。 + +`a NOT LIKE s` – The `notLike(a, b)` 機能。 + +`a BETWEEN b AND c` – The same as `a >= b AND a <= c`. + +`a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. + +## データセットを操作する演算子 {#operators-for-working-with-data-sets} + +*見る [演算子の場合](statements/select.md#select-in-operators).* + +`a IN ...` – The `in(a, b)` 機能。 + +`a NOT IN ...` – The `notIn(a, b)` 機能。 + +`a GLOBAL IN ...` – The `globalIn(a, b)` 機能。 + +`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` 機能。 + +## 日付と時刻を操作する演算子 {#operators-datetime} + +### EXTRACT {#operator-extract} + +``` sql +EXTRACT(part FROM date); +``` + +指定した日付からパートを抽出します。 たとえば、指定した日付から月、または時刻から秒を取得できます。 + +その `part` パラメーター取得する日付の部分を指定します。 次の値を使用できます: + +- `DAY` — The day of the month. Possible values: 1–31. +- `MONTH` — The number of a month. Possible values: 1–12. +- `YEAR` — The year. +- `SECOND` — The second. Possible values: 0–59. +- `MINUTE` — The minute. Possible values: 0–59. +- `HOUR` — The hour. Possible values: 0–23. + +その `part` パラ + +その `date` パラメーター処理する日付または時刻を指定します。 どちらか [日付](../sql_reference/data_types/date.md) または [DateTime](../sql_reference/data_types/datetime.md) タイプに対応しています。 + +例: + +``` sql +SELECT EXTRACT(DAY FROM toDate('2017-06-15')); +SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); +SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); +``` + +次の例では、テーブルを作成し、その中に値を挿入します `DateTime` タイプ。 + +``` sql +CREATE TABLE test.Orders +( + OrderId UInt64, + OrderName String, + OrderDate DateTime +) +ENGINE = Log; +``` + +``` sql +INSERT INTO test.Orders VALUES (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); +``` + +``` sql +SELECT + toYear(OrderDate) AS OrderYear, + toMonth(OrderDate) AS OrderMonth, + toDayOfMonth(OrderDate) AS OrderDay, + toHour(OrderDate) AS OrderHour, + toMinute(OrderDate) AS OrderMinute, + toSecond(OrderDate) AS OrderSecond +FROM test.Orders; +``` + +``` text +┌─OrderYear─┬─OrderMonth─┬─OrderDay─┬─OrderHour─┬─OrderMinute─┬─OrderSecond─┐ +│ 2008 │ 10 │ 11 │ 13 │ 23 │ 44 │ +└───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ +``` + +より多くの例を見ることができる [テスト](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). + +### INTERVAL {#operator-interval} + +を作成します。 [間隔](../sql_reference/data_types/special_data_types/interval.md)-との算術演算で使用されるべきであるタイプ値 [日付](../sql_reference/data_types/date.md) と [DateTime](../sql_reference/data_types/datetime.md)-タイプの値。 + +間隔のタイプ: +- `SECOND` +- `MINUTE` +- `HOUR` +- `DAY` +- `WEEK` +- `MONTH` +- `QUARTER` +- `YEAR` + +!!! warning "警告" + 間隔の異なる種類できない。 次のような式は使用できません `INTERVAL 4 DAY 1 HOUR`. たとえば、区間の最小単位が小さいか等しい単位で区間を表します `INTERVAL 25 HOUR`. 以下の例のように、結果としての操作を使用できます。 + +例えば: + +``` sql +SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR +``` + +``` text +┌───current_date_time─┬─plus(plus(now(), toIntervalDay(4)), toIntervalHour(3))─┐ +│ 2019-10-23 11:16:28 │ 2019-10-27 14:16:28 │ +└─────────────────────┴────────────────────────────────────────────────────────┘ +``` + +**また見なさい** + +- [間隔](../sql_reference/data_types/special_data_types/interval.md) データ型 +- [toInterval](../sql_reference/functions/type_conversion_functions.md#function-tointerval) 型変換関数 + +## 論理否定演算子 {#logical-negation-operator} + +`NOT a` – The `not(a)` 機能。 + +## 論理and演算子 {#logical-and-operator} + +`a AND b` – The`and(a, b)` 機能。 + +## 論理or演算子 {#logical-or-operator} + +`a OR b` – The `or(a, b)` 機能。 + +## 条件演算子 {#conditional-operator} + +`a ? b : c` – The `if(a, b, c)` 機能。 + +メモ: + +条件演算子は、bとcの値を計算し、条件aが満たされているかどうかをチェックし、対応する値を返します。 もし `b` または `C` は [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) この関数は、各行は関係なくレプリケートされます。 “a” 条件。 + +## 条件式 {#operator_case} + +``` sql +CASE [x] + WHEN a THEN b + [WHEN ... THEN ...] + [ELSE c] +END +``` + +もし `x` が指定されたら、 `transform(x, [a, ...], [b, ...], c)` function is used. Otherwise – `multiIf(a, b, ..., c)`. + +がない場合 `ELSE c` 式中の句は、デフォルト値は次のとおりです `NULL`. + +その `transform` 機能は動作しません `NULL`. + +## 連結演算子 {#concatenation-operator} + +`s1 || s2` – The `concat(s1, s2) function.` + +## ラムダ作成演算子 {#lambda-creation-operator} + +`x -> expr` – The `lambda(x, expr) function.` + +次の演算子は、角かっこであるため、優先順位がありません: + +## 配列作成演算子 {#array-creation-operator} + +`[x1, ...]` – The `array(x1, ...) function.` + +## タプル作成演算子 {#tuple-creation-operator} + +`(x1, x2, ...)` – The `tuple(x2, x2, ...) function.` + +## 結合性 {#associativity} + +すべての二項演算子は結合性を残しています。 例えば, `1 + 2 + 3` に変換される。 `plus(plus(1, 2), 3)`. +時にはこれはあなたが期待するように動作しません。 例えば, `SELECT 4 > 2 > 3` 結果は0になります。 + +効率のため、 `and` と `or` 関数は任意の数の引数を受け取ります。 対応する鎖の `AND` と `OR` 演算子は、これらの関数の単一の呼び出しに変換されます。 + +## のチェック `NULL` {#checking-for-null} + +クリックハウスは `IS NULL` と `IS NOT NULL` 演算子。 + +### IS NULL {#operator-is-null} + +- のために [Nullable](../sql_reference/data_types/nullable.md) タイプ値は、 `IS NULL` 演算子の戻り値: + - `1` 値が `NULL`. + - `0` そうでなければ +- その他の値については、 `IS NULL` 演算子は常に戻ります `0`. + + + +``` sql +SELECT x+100 FROM t_null WHERE y IS NULL +``` + +``` text +┌─plus(x, 100)─┐ +│ 101 │ +└──────────────┘ +``` + +### IS NOT NULL {#is-not-null} + +- のために [Nullable](../sql_reference/data_types/nullable.md) タイプ値は、 `IS NOT NULL` 演算子の戻り値: + - `0` 値が `NULL`. + - `1` そうでなければ +- その他の値については、 `IS NOT NULL` 演算子は常に戻ります `1`. + + + +``` sql +SELECT * FROM t_null WHERE y IS NOT NULL +``` + +``` text +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/ja/sql_reference/statements/alter.md b/docs/ja/sql_reference/statements/alter.md deleted file mode 120000 index 595bdd51d5f..00000000000 --- a/docs/ja/sql_reference/statements/alter.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/alter.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/alter.md b/docs/ja/sql_reference/statements/alter.md new file mode 100644 index 00000000000..cbcc66aaf5c --- /dev/null +++ b/docs/ja/sql_reference/statements/alter.md @@ -0,0 +1,505 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 36 +toc_title: ALTER +--- + +## ALTER {#query_language_queries_alter} + +その `ALTER` クエリーのみ対応して `*MergeTree` テーブルだけでなく、 `Merge`と`Distributed`. クエリに複数のバリエーションがあります。 + +### 列の操作 {#column-manipulations} + +テーブル構造の変更。 + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +クエリで、コンマ区切りのアクションのリストを指定します。 +各アクションは、列に対する操作です。 + +次の操作がサポートされます: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. + +これらの動作については、以下で詳述する。 + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +指定したテーブルに新しい列を追加します `name`, `type`, [`codec`](create.md#codecs) と `default_expr` (セクションを参照 [既定の式](create.md#create-default-values)). + +この `IF NOT EXISTS` 句が含まれている場合、列がすでに存在する場合、クエリはエラーを返しません。 指定した場合 `AFTER name_after` (名前のカラムのカラムを追加したものを指定されたもののリストテーブル列あります。 そうしないと、カラムが追加されるのです。 場合がありますので注意してない方の追加カラムの最初に表示します。 アクションの連鎖のために, `name_after` 前のアクションのいずれかで追加される列の名前を指定できます。 + +列を追加すると、データでアクションを実行せずにテーブル構造が変更されます。 データは後にディスクに表示されません `ALTER`. テーブルから読み取るときに列のデータが欠落している場合は、デフォルト値(デフォルトの式がある場合はデフォルトの式を実行するか、ゼロまたは データパーツをマージした後、ディスク上に列が表示されます [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)). + +このアプローチにより、 `ALTER` 古いデータの量を増やすことなく、即座に照会します。 + +例えば: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +名前の列を削除します `name`. この `IF EXISTS` 句が指定されている場合、列が存在しない場合、クエリはエラーを返しません。 + +ファイルシステ これはファイル全体を削除するので、クエリはほぼ即座に完了します。 + +例えば: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +すべてリセットデータ列の指定されたパーティション 詳細設定、パーティションの名前の部 [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +この `IF EXISTS` 句が指定されている場合、列が存在しない場合、クエリはエラーを返しません。 + +例えば: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +列にコメントを追加します。 この `IF EXISTS` 句が指定されている場合、列が存在しない場合、クエリはエラーを返しません。 + +それぞれの列ができています。 列にコメントが既に存在する場合、新しいコメントは前のコメントを上書きします。 + +コメントは `comment_expression` によって返される列 [DESCRIBE TABLE](misc.md#misc-describe-table) クエリ。 + +例えば: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +このクエリは、 `name` 列のプロパティ: + +- タイプ + +- 既定の式 + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). + +この `IF EXISTS` 句が指定されている場合、列が存在しない場合、クエリはエラーを返しません。 + +タイプを変更すると、値は次のように変換されます。 [toType](../../sql_reference/functions/type_conversion_functions.md) 関数がそれらに適用された。 デフォルトの式だけが変更された場合、クエリは何も複雑ではなく、ほぼ即座に完了します。 + +例えば: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +いくつかの処理段階があります: + +- 変更されたデータを含む一時(新しい)ファイルの準備。 +- 古いファイルの名前を変更する。 +- 一時(新しい)ファイルの名前を古い名前に変更します。 +- 古いファイルを削除する。 + +最初の段階だけに時間がかかります。 この段階で障害が発生した場合、データは変更されません。 +連続したステージのいずれかで障害が発生した場合は、データを手動で復元できます。 古いファイルがファイルシステムから削除されたが、新しいファイルのデータは、ディスクに書き込まれませんでしたし、失われた場合は例外です。 + +その `ALTER` クエリの変更カラムがそのままに再現されています。 指示はZooKeeperに保存され、各レプリカはそれらを適用します。 すべて `ALTER` クエリは同じ順序で実行されます。 クエリは、他のレプリカで適切なアクションが完了するのを待機します。 ただし、レプリケートされたテーブルの列を変更するクエリは中断され、すべてのアクションは非同期に実行されます。 + +#### ALTER Queryの制限 {#alter-query-limitations} + +その `ALTER` クエリを作成および削除個別要素(カラム)をネストしたデータ構造が全体に入れ子データ構造です。 ネストされたデータ構造を追加するには、次のような名前の列を追加します `name.nested_name` そしてタイプ `Array(T)`. ネストされたデータ構造は、ドットの前に同じ接頭辞を持つ名前を持つ複数の配列列列と同等です。 + +主キーまたはサンプリングキーの列の削除はサポートされていません。 `ENGINE` 式)。 主キーに含まれる列の型を変更することは、この変更によってデータが変更されない場合にのみ可能です(たとえば、値を列挙型に追加したり、型を変更 `DateTime` に `UInt32`). + +この `ALTER` クエリは必要なテーブルの変更を行うのに十分ではありません。 [INSERT SELECT](insert_into.md#insert_query_insert-select) クエリを使用してテーブルを切り替えます。 [RENAME](misc.md#misc_operations-rename) 古いテーブルを照会して削除します。 を使用することができ [クリックハウスコピー機](../../operations/utilities/clickhouse-copier.md) に代わるものとして `INSERT SELECT` クエリ。 + +その `ALTER` クエリーのブロックすべてを読み込みと書き込んでいます。 言い換えれば、長い場合 `SELECT` の時に動いています `ALTER` クエリ、 `ALTER` クエリはそれが完了するのを待ちます。 同時に、同じテーブルに対するすべての新しいクエリは、 `ALTER` 走ってる + +データ自体を格納しないテーブルの場合 `Merge` と `Distributed`), `ALTER` テーブル構造を変更するだけで、下位テーブルの構造は変更されません。 たとえば、ALTERを実行している場合 `Distributed` テーブル、また、実行する必要があります `ALTER` テーブルのすべてすることができます。 + +### キー式による操作 {#manipulations-with-key-expressions} + +以下のコマン: + +``` sql +MODIFY ORDER BY new_expression +``` + +それはの表のためにだけ働きます [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) ファミリー(含む +[複製された](../../engines/table_engines/mergetree_family/replication.md) テーブル)。 このコマンドは、 +[ソートキー](../../engines/table_engines/mergetree_family/mergetree.md) テーブルの +に `new_expression` (式または式のタプル)。 主キーは同じままです。 + +このコマンドは、メタデータのみを変更するという意味で軽量です。 データ部分のプロパティを保持するには +既存の列を含む式を追加することはできません。 +ソートキーに(列のみが追加されました。 `ADD COLUMN` 同じでコマンド `ALTER` クエリ)。 + +### データスキップインデックスの操作 {#manipulations-with-data-skipping-indices} + +それはの表のためにだけ働きます [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) ファミリー(含む +[複製された](../../engines/table_engines/mergetree_family/replication.md) テーブル)。 次の操作 +利用できます: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` -付加価指数の説明をテーブルメタデータを指すものとします。 + +- `ALTER TABLE [db].name DROP INDEX name` -除去す指標の説明からテーブルメタデータを削除を行指数のファイルからディスク。 + +これらのコマ +また、その複製(同期指標のメタデータを通して飼育係). + +### 制約による操作 {#manipulations-with-constraints} + +るの詳細を参照してください [制約](create.md#constraints) + +次の構文を使用して制約を追加または削除できます: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +クエリに追加または削除約メタデータの制約からテーブルで、速やかに処理します。 + +制約チェック *実行されません* 既存のデータが追加された場合。 + +変更後の内容の複製のテーブル放送への飼育係で適用されますその他のレプリカ. + +### パーティションとパーツの操作 {#alter_manipulations-with-partitions} + +以下の操作 [パーティシ](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 利用できます: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` ディレク +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` テーブルへのディレクトリ。 +- [REPLACE PARTITION](#alter_replace-partition) -データパーティションをテーブル間でコピーします。 +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) -コピーするデータを仕切りからテーブルにも置き換え. +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition)-あるテーブルから別のテーブルにデータパーティションを移動します。 +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) -パーティション内の指定された列の値をリセットします。 +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) -リセットの指定された二次インデックス、パーティション +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### デタッチパーティション{\#alter\_detach-partition} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +指定されたパーティションのすべてのデータを `detached` ディレクトリ。 サーバーのを忘れているのは、一戸建てのデータを分配していない場合は存在します。 サーバーはこのデータについて知りません。 [ATTACH](#alter_attach-partition) クエリ。 + +例えば: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +セクションのpartition expressionの設定についての記事を読む [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +クエリが実行された後、データを使用して必要な操作を行うことができます `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` すべての複製のディレクトリ。 このクエリはリーダーレプリカでのみ実行できます。 レプリカがリーダーかどうかを調べるには、以下を実行します `SELECT` にクエリ [システム。レプリカ](../../operations/system_tables.md#system_tables-replicas) テーブル。 また、作ることは容易です `DETACH` クエリはすべてのレプリカ-すべてのレプリカ、例外をスロー以外のリーダーレプリカ. + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +削除指定された分割テーブルから. このクエリのタグの仕切りとして休止または消去いたしますデータを完全に約10分です。 + +セクションのpartition expressionの設定についての記事を読む [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +指定したパーティショ `detached`. +セクションのパーティション式の設定の詳細 [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +テーブルにデータを追加します。 `detached` ディレクトリ。 パーティション全体または別のパートにデータを追加することができます。 例: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +セクションのパーティション式の設定の詳細 [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +このクエリは複製されます。 のレプリカ-イニシエータチェックがあるか否かのデータを `detached` ディレクトリ。 データが存在する場合、クエリは整合性をチェックします。 すべてが正しい場合、クエリはデータをテーブルに追加します。 他のすべてのレプリカをダウンロードからデータのレプリカ-イニシエータです。 + +したがって、データを `detached` ディレクトリを使用します。 `ALTER ... ATTACH` すべてのレプリカのテーブルにクエリを追加します。 + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +このクエリは、データパーティションを `table1` に `table2` のexsistingにデータを加えます `table2`. データは削除されないことに注意してください `table1`. + +クエリを正常に実行するには、次の条件を満たす必要があります: + +- 両方のテーブルに同じ構造が必要です。 +- 両方の表に同じパーティション-キーが必要です。 + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +このクエリは、データパーティションを `table1` に `table2` そしての既存の仕切りを取り替えます `table2`. データは削除されないことに注意してください `table1`. + +クエリを正常に実行するには、次の条件を満たす必要があります: + +- 両方のテーブルに同じ構造が必要です。 +- 両方の表に同じパーティション-キーが必要です。 + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +このクエリは、データパーティションを `table_source` に `table_dest` からデータを削除すると `table_source`. + +クエリを正常に実行するには、次の条件を満たす必要があります: + +- 両方のテーブルに同じ構造が必要です。 +- 両方の表に同じパーティション-キーが必要です。 +- 両方のテーブルと同じでなければならエンジンです。 (複製または非レプリケート) +- 両方の表に同じストレージポリシーが必要です。 + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +パーティショ この `DEFAULT` このクエリは、指定されたデフォルト値に列の値を設定し、テーブルを作成するときに句が決定された。 + +例えば: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +このクエ この `PARTITION` 条項を省略して、クエリーを作成し、バックアップの仕切ります。 + +!!! note "メモ" + バックアップ処理全体は、サーバーを停止せずに実行されます。 + +古いスタイルのテーブルでは、パーティション名のプレフィックスを指定できます(例, ‘2019’)のクエリーを作成し、バックアップのためのすべてに対応する隔壁 セクションのpartition expressionの設定についての記事を読む [パーティション式の指定方法](#alter-how-to-specify-part-expr). + +実行時に、データスナップショットの場合、クエリはテーブルデータへのハードリンクを作成します。 ディレクト `/var/lib/clickhouse/shadow/N/...`、どこ: + +- `/var/lib/clickhouse/` 設定で指定されたClickHouseの作業ディレクトリです。 +- `N` バックアップの増分数です。 + +!!! note "メモ" + 使用する場合 [テーブル内のデータストレージのディスクのセット](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes)、を `shadow/N` ディレクトリが表示される毎にディスクデータを格納する部品と合わせによる `PARTITION` 式。 + +同じディレクトリ構造がバックアップ内に作成されます。 `/var/lib/clickhouse/`. クエリが実行されます ‘chmod’ すべてのファイルについて、それらへの書き込みを禁止。 + +バックアップを作成したら、次のデータをコピーできます `/var/lib/clickhouse/shadow/` リモートサーバーに移動し、ローカルサーバーから削除します。 それに注意しなさい `ALTER t FREEZE PARTITION` クエリは複製されません。 するための地元のバックアップ、現地サーバーです。 + +クエリをバックアップトで最初のでお待ちしておりますので、現在のクエリーに対応するテーブルに仕上げた。 + +`ALTER TABLE t FREEZE PARTITION` コピーのみのデータのないテーブルメタデータを指すものとします。 をバックアップテーブルメタデータ、コピー、ファイル `/var/lib/clickhouse/metadata/database/table.sql` + +バックアップからデータを復元するには: + +1. テーブルが存在しない場合はテーブルを作成します。 クエリを表示するには、を使用します。sqlファイル(置換 `ATTACH` それで `CREATE`). +2. からデータをコピーします `data/database/table/` バックアップの中のディレクトリ `/var/lib/clickhouse/data/database/table/detached/` ディレクトリ。 +3. 走れ。 `ALTER TABLE t ATTACH PARTITION` データをテーブルに追加するクエリ。 + +バックア + +バックアップおよびデータの復元の詳細については、次を参照 [データバック](../../operations/backup.md) セクション。 + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +クエリは次のように動作します `CLEAR COLUMN` しかし、列データの代わりに索引をリセットします。 + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +ダウンロードパーティションから別のサーバーです。 このクエリーだけを再現します。 + +クエリは次の処理を行います: + +1. 指定したシャードからパーティションをダウ で ‘path-in-zookeeper’ ZooKeeperでシャードへのパスを指定する必要があります。 +2. 次に、クエリはダウンロードされたデータを `detached` のディレクトリ `table_name` テーブル。 を使用 [ATTACH PARTITION\|PART](#alter_attach-partition) データをテーブルに追加するためのクエリ。 + +例えば: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +それに注意: + +- その `ALTER ... FETCH PARTITION` クエリは複製されません。 それはに仕切りを置きます `detached` ディレクト +- その `ALTER TABLE ... ATTACH` クエリが複製されます。 すべてのレプリカにデータを追加します。 データは、次のいずれかのレプリカに追加されます。 `detached` ディレクトリ、および他の人に-近隣のレプリカから。 + +ダウンロードする前に、システムかどうかをチェックすると、パーティションが存在するとテーブル構造。 最も適切なレプリカは、正常なレプリカから自動的に選択されます。 + +クエリは呼び出されますが `ALTER TABLE` テーブル構造は変更されず、テーブルで使用できるデータもすぐには変更されません。 + +#### MOVE PARTITION\|PART {#alter_move-partition} + +別のボリュームまたはディ `MergeTree`-エンジンテーブル。 見る [複数ブロックデバイスを使用したデータ保存](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +その `ALTER TABLE t MOVE` クエリ: + +- な再現が異なるレプリカで保管。 +- 指定されたディスクまたはボリ また、ストレージポリシーで指定されたデータ移動の条件を適用できない場合は、エラーが返されます。 +- 移動するデータがバックグラウンドプロセスによって既に移動されている場合にエラーを返すことができます。 `ALTER TABLE t MOVE` クエリとして結果データの統合. この場合、ユーザーは追加の操作を行うべきではありません。 + +例えば: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### パーティション式の設定方法 {#alter-how-to-specify-part-expr} + +パーティション式を指定するには `ALTER ... PARTITION` 異なる方法でクエリ: + +- からの値として `partition` の列 `system.parts` テーブル。 例えば, `ALTER TABLE visits DETACH PARTITION 201901`. +- テーブル列からの式として。 定数と定数式がサポートされています。 例えば, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- パーティションidの使用。 パーティションidは、ファイルシステムおよびzookeeper内のパーティションの名前として使用されるパーティションの文字列識別子です(可能であれば、人間が パーティションidを指定する必要があります。 `PARTITION ID` 一重引quotesでの句。 例えば, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- で [ALTER ATTACH PART](#alter_attach-partition) と [DROP DETACHED PART](#alter_drop-detached) クエリ、パートの名前を指定するには、文字列リテラルを使用します。 `name` の列 [システム。detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) テーブル。 例えば, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +ご利用の引用符を指定する場合、パーティションのエントランスは目を引く壁面緑化を表現。 たとえば、 `String` その名前を引用符で指定する必要があります (`'`). のための `Date` と `Int*` タイプ引用符は必要ありません。 + +古いスタイルのテーブルの場合は、パーティションを数値として指定できます `201901` または文字列 `'201901'`. 新しいスタイルのテーブルの構文は、型が厳しくなります(VALUES入力フォーマットのパーサーと同様)。 + +上記のすべてのルールは、 [OPTIMIZE](misc.md#misc_operations-optimize) クエリ。 を指定する場合にのみ分配時の最適化、非仕切られたテーブルセットの表現 `PARTITION tuple()`. 例えば: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +の例 `ALTER ... PARTITION` クエリはテストで実証されています [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) と [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### テーブルttlによる操作 {#manipulations-with-table-ttl} + +変更することができ [テーブルTTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) 次のフォームの要求で: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### ALTERクエリのシンクロニシティ {#synchronicity-of-alter-queries} + +非複製可能なテーブルの場合は、すべて `ALTER` クエリは同期的に実行されます。 のためのreplicatableテーブル、クエリーだけで追加指示のための適切な行動を `ZooKeeper`、そしてアクション自体はできるだけ早く実行されます。 しかし、クエリーが待機するためにこれらの行動は完了するすべてのレプリカ. + +のために `ALTER ... ATTACH|DETACH|DROP` クエリを使用することができます `replication_alter_partitions_sync` 待ちを設定する設定。 +可能な値: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### 突然変異 {#alter-mutations} + +突然変異は、テーブル内の行を変更または削除できるようにするalter query variantです。 標準とは対照的に `UPDATE` と `DELETE` ポイントデータの変更を目的としたクエリ、突然変異は、テーブル内の多くの行を変更する重い操作を目的としています。 のために支えられる `MergeTree` 家族のテーブルエンジンなどのエンジンの複製です。 + +既存のテーブルはそのまま変異可能です(変換は必要ありません)が、最初の変更がテーブルに適用されると、そのメタデータ形式は以前のサーバーバージョンと + +現在使用可能なコマンド: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +その `filter_expr` 型でなければならな `UInt8`. クエリは、この式がゼロ以外の値をとるテーブルの行を削除します。 + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +その `filter_expr` 型でなければならな `UInt8`. このクエリは、指定された列の値を、対応する式の値に更新します。 `filter_expr` ゼロ以外の値をとります。 値は列タイプにキャストされます。 `CAST` オペレーター プライマリキーまたはパーティションキーの計算で使用される列の更新はサポートされません。 + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +クエリを再建の二次指数 `name` パーティション内 `partition_name`. + +一つのクエリを含むことができ複数のコマンドをカンマで区切られています。 + +用\*mergetreeテーブル突然変異の実行による書き換え全体のデータ部品です。 atomicity-部品は準備ができているおよびaとすぐ変異する部品の代わりになりますありません `SELECT` 変異中に実行を開始したクエリには、まだ変更されていない部分のデータと共に既に変更されている部分のデータが表示されます。 + +突然変異は、作成順序によって完全に順序付けられ、その順序で各パートに適用されます。 突然変異が提出される前にテーブルに挿入されたデータは突然変異され、その後に挿入されたデータは突然変異されません。 この変異のないブロックを挿入します。 + +変更クエリは、変更エントリが追加された直後に返されます(レプリケートされたテーブルがzookeeperにある場合、非レプリケートされたテーブルがファイルシス の突然変異体の執行を非同利用システムの概要を設定します。 突然変異の進行状況を追跡するには、 [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) テーブル。 正常に送信された変更は、ClickHouseサーバーが再起動されても引き続き実行されます。 それが提出されると突然変異をロールバックする方法はありませんが、何らかの理由で突然変異が起こった場合、それをキャンセルすることができ [`KILL MUTATION`](misc.md#kill-mutation) クエリ。 + +終了した突然変異のためのエントリはすぐに削除されません(保存されたエントリの数は `finished_mutations_to_keep` ストレージエンジン変数)。 古い変異エントリが削除されます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/ja/sql_reference/statements/create.md b/docs/ja/sql_reference/statements/create.md deleted file mode 120000 index d8bc16ca60f..00000000000 --- a/docs/ja/sql_reference/statements/create.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/create.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/create.md b/docs/ja/sql_reference/statements/create.md new file mode 100644 index 00000000000..95fac55bfc0 --- /dev/null +++ b/docs/ja/sql_reference/statements/create.md @@ -0,0 +1,309 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 35 +toc_title: CREATE +--- + +# クエリの作成 {#create-queries} + +## CREATE DATABASE {#query-language-create-database} + +データベースの作成。 + +``` sql +CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] +``` + +### 句 {#clauses} + +- `IF NOT EXISTS` + + If the `db_name` database already exists, then ClickHouse doesn't create a new database and: + + - Doesn't throw an exception if clause is specified. + - Throws an exception if clause isn't specified. + +- `ON CLUSTER` + + ClickHouse creates the `db_name` database on all the servers of a specified cluster. + +- `ENGINE` + + - [MySQL](../engines/database_engines/mysql.md) + + Allows you to retrieve data from the remote MySQL server. + + By default, ClickHouse uses its own [database engine](../engines/database_engines/index.md). + +## CREATE TABLE {#create-table-query} + +その `CREATE TABLE` クエリには複数の形式を使用できます。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], + ... +) ENGINE = engine +``` + +名前の付いた表を作成します ‘name’ で ‘db’ データベースまたは ‘db’ は設定されていない。 ‘engine’ エンジン。 +テーブルの構造は、列の説明のリストです。 た場合の指数については、エンジンとして表示していパラメータテーブルのエンジンです。 + +列の説明は次のとおりです `name type` 最も単純なケースでは。 例えば: `RegionID UInt32`. +デフォルト値に対して式を定義することもできます(下記参照)。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine] +``` + +別のテーブルと同じ構造のテーブルを作成します。 テーブルに別のエンジンを指定できます。 エンジンが指定されていない場合は、同じエンジンが `db2.name2` テーブル。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function() +``` + +テーブルを作成しますの構造やデータによって返される [テーブル機能](../table_functions/index.md). + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... +``` + +の結果のような構造を持つテーブルを作成します。 `SELECT` クエリ、 ‘engine’ エンジンは、SELECTからのデータでそれを埋めます。 + +すべての場合において、 `IF NOT EXISTS` テーブルが既に存在する場合、クエリはエラーを返しません。 この場合、クエリは何もしません。 + +後に他の節がある場合もあります `ENGINE` クエリ内の句。 テーブルの作成方法に関する詳細なドキュメントを参照してください [表エンジン](../../engines/table_engines/index.md#table_engines). + +### デフォルト値 {#create-default-values} + +列の説明では、次のいずれかの方法で、既定値の式を指定できます:`DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. +例えば: `URLDomain String DEFAULT domain(URL)`. + +デフォルト値の式が定義されていない場合、デフォルト値は数値の場合はゼロに、文字列の場合は空の文字列に、配列の場合は空の配列に設定され `0000-00-00` 日付または `0000-00-00 00:00:00` 時間の日付のため。 Nullはサポートされていません。 + +既定の式が定義されている場合、列の型は省略可能です。 明示的に定義された型がない場合は、既定の式の型が使用されます。 例えば: `EventDate DEFAULT toDate(EventTime)` – the ‘Date’ タイプは ‘EventDate’ コラム + +データ型と既定の式が明示的に定義されている場合、この式は型キャスト関数を使用して指定された型にキャストされます。 例えば: `Hits UInt32 DEFAULT 0` と同じことを意味します `Hits UInt32 DEFAULT toUInt32(0)`. + +Default expressions may be defined as an arbitrary expression from table constants and columns. When creating and changing the table structure, it checks that expressions don't contain loops. For INSERT, it checks that expressions are resolvable – that all columns they can be calculated from have been passed. + +`DEFAULT expr` + +通常のデフォルト値。 insertクエリで対応する列が指定されていない場合は、対応する式を計算して入力します。 + +`MATERIALIZED expr` + +マテリアライズド式。 このような列は、常に計算されるため、insertに指定することはできません。 +列のリストのないinsertの場合、これらの列は考慮されません。 +また、selectクエリでアスタリスクを使用する場合、この列は置換されません。 これは、ダンプが以下を使用して取得した不変量を保持するためです `SELECT *` 列のリストを指定せずにINSERTを使用してテーブルに戻すことができます。 + +`ALIAS expr` + +同義語。 このような列は、テーブルにはまったく格納されません。 +その値はテーブルに挿入することはできず、selectクエリでアスタリスクを使用するときは置換されません。 +クエリの解析中にエイリアスが展開されている場合は、selectで使用できます。 + +ALTER queryを使用して新しい列を追加する場合、これらの列の古いデータは書き込まれません。 代わりに、新しい列の値を持たない古いデータを読み取る場合、式は既定でオンザフライで計算されます。 ただし、式を実行するために、クエリで指定されていない異なる列が必要な場合、これらの列は追加で読み取られますが、必要なデータブロックに対し + +新しい列をテーブルに追加し、後でそのデフォルトの式を変更すると、古いデータに使用される値が変更されます(ディスクに値が格納されていないデー バックグラウンドマージを実行すると、マージパーツのいずれかにない列のデータがマージされたパーツに書き込まれます。 + +入れ子になったデータ構造の要素の既定値を設定することはできません。 + +### 制約 {#constraints} + +列と共に、説明の制約を定義することができます: + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + ... + CONSTRAINT constraint_name_1 CHECK boolean_expr_1, + ... +) ENGINE = engine +``` + +`boolean_expr_1` 任意のブール式でできます。 場合に制約の定義のテーブルのそれぞれチェック毎に行 `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. + +追加大量の制約になる可能性の性能を大 `INSERT` クエリ。 + +### TTL式 {#ttl-expression} + +値の保存時間を定義します。 mergetree-familyテーブルにのみ指定できます。 詳細な説明については、 [列とテーブルのttl](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). + +### 列圧縮コーデック {#codecs} + +デフォルトでは、clickhouseは `lz4` 圧縮方法。 のために `MergeTree`-エンジンファミリでは、デフォルトの圧縮方法を変更できます [圧縮](../../operations/server_configuration_parameters/settings.md#server-settings-compression) サーバー構成のセクション。 また、各列の圧縮方法を定義することもできます。 `CREATE TABLE` クエリ。 + +``` sql +CREATE TABLE codec_example +( + dt Date CODEC(ZSTD), + ts DateTime CODEC(LZ4HC), + float_value Float32 CODEC(NONE), + double_value Float64 CODEC(LZ4HC(9)) + value Float32 CODEC(Delta, ZSTD) +) +ENGINE = +... +``` + +コーデックが指定されている場合、既定のコーデックは適用されません。 コーデックの組合せでのパイプライン、例えば, `CODEC(Delta, ZSTD)`. の選定と大型ブリッジダイオードコーデックの組み合わせますプロジェクト、ベンチマークと同様に記載のAltinity [ClickHouseの効率を改善する新しいエンコーディング](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) 記事。 + +!!! warning "警告" + できない解凍clickhouseデータベースファイルを外部の事のように `lz4`. 代わりに、特別な [clickhouse-コンプレッサー](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) 効用だ + +圧縮できるようになりました以下のテーブルエンジン: + +- [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家族 列圧縮コーデックをサポートし、既定の圧縮方法を選択する [圧縮](../../operations/server_configuration_parameters/settings.md#server-settings-compression) 設定。 +- [ログ](../../engines/table_engines/log_family/log_family.md) 家族 使用します `lz4` 圧縮メソッドはデフォルト対応カラムの圧縮コーデック. +- [セット](../../engines/table_engines/special/set.md). 唯一のデフォルトの圧縮をサポート。 +- [参加](../../engines/table_engines/special/join.md). 唯一のデフォルトの圧縮をサポート。 + +ClickHouse支援共通の目的コーデックや専門のコーデック. + +#### 特殊コーデック {#create-query-specialized-codecs} + +これらのコーデックしていただくための圧縮により効果的な利用の特徴データです。 これらのコーデックの一部は、データ自身を圧縮しない。 その代わりに、それらのデータを共通の目的コーデックは、圧縮です。 + +特殊コーデック: + +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` デルタ値を格納するために使用されます。 `delta_bytes` raw値の最大サイズです。 可能 `delta_bytes` 値:1,2,4,8. のデフォルト値 `delta_bytes` は `sizeof(type)` 1、2、4、または8に等しい場合。 それ以外の場合は1です。 +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla:高速でスケーラブルなメモリ内の時系列データベース](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla:高速でスケーラブルなメモリ内の時系列データベース](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` と `DateTime`). アルゴリズムの各ステップで、codecは64値のブロックを取り、64x64ビット行列にそれらを入れ、それを転置し、未使用の値をトリミングし、残りをシーケ 未使用のビットは、圧縮が使用されるデータ部分全体の最大値と最小値の間で異ならないビットです。 + +`DoubleDelta` と `Gorilla` コーデックは、その圧縮アルゴリズムの構成要素としてゴリラTSDBで使用されています。 Gorillaのアプローチは、タイムスタンプで徐々に変化する値のシーケンスがある場合のシナリオで有効です。 タイムスタンプは、 `DoubleDelta` コーデックおよび値はによって効果的に圧縮されます `Gorilla` コーデック。 たとえば、効果的に格納されたテーブルを取得するには、次の構成でテーブルを作成します: + +``` sql +CREATE TABLE codec_example +( + timestamp DateTime CODEC(DoubleDelta), + slow_values Float32 CODEC(Gorilla) +) +ENGINE = MergeTree() +``` + +#### 一般的な目的のコーデック {#create-query-common-purpose-codecs} + +コーデック: + +- `NONE` — No compression. +- `LZ4` — Lossless [データ圧縮](https://github.com/lz4/lz4) 既定で使用されます。 LZ4高速圧縮を適用します。 +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` 既定のレベルを適用します。 可能なレベル:\[1、12\]。 推奨レベル範囲:\[4、9\]。 +- `ZSTD[(level)]` — [ZSTD圧縮アルゴリズム](https://en.wikipedia.org/wiki/Zstandard) 構成可能を使って `level`. 可能なレベル:\[1、22\]。 デフォルト値:1。 + +圧縮レベルが高い場合は、圧縮回数、繰り返しの解凍などの非対称シナリオに役立ちます。 高いレベルは、より良い圧縮と高いcpu使用率を意味します。 + +## 一時テーブル {#temporary-tables} + +ClickHouseは次の特徴がある一時テーブルを支える: + +- 一時テーブルは、接続が失われた場合など、セッションが終了すると消えます。 +- 一時テーブルはメモリエンジンのみを使用します。 +- 一時テーブルにdbを指定することはできません。 データベースの外部で作成されます。 +- すべてのクラスタサーバー上に分散ddlクエリを使用して一時テーブルを作成することは不可能です `ON CLUSTER`):このテーブルは現在のセッションにのみ存在します。 +- テンポラリテーブルの名前が別のテーブルと同じ場合、クエリでdbを指定せずにテーブル名を指定すると、テンポラリテーブルが使用されます。 +- 分散クエリ処理では、クエリで使用される一時テーブルがリモートサーバーに渡されます。 + +一時テーブルを作成するには、次の構文を使用します: + +``` sql +CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) +``` + +ほとんどの場合、一時テーブルを手動で作成され、外部データを利用するためのクエリに対して、または配布 `(GLOBAL) IN`. 詳細は、該当するセクションを参照してください + +テーブルを使用することは可能です [エンジン=メモリ](../../engines/table_engines/special/memory.md) 一時テーブルの代わりに。 + +## 分散ddlクエリ(on cluster clause) {#distributed-ddl-queries-on-cluster-clause} + +その `CREATE`, `DROP`, `ALTER`、と `RENAME` クエリの支援の分散実行クラスター +たとえば、次のクエリを作成します `all_hits` `Distributed` 各ホストのテーブル `cluster`: + +``` sql +CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits) +``` + +これらのクエリを正しく実行するには、各ホストが同じクラスタ定義を持っている必要があります(設定の同期を簡単にするために、zookeeperからの置換 彼らはまた、zookeeperサーバに接続する必要があります。 +クエリのローカルバージョンは、一部のホストが現在利用できない場合でも、最終的にクラスター内の各ホストに実装されます。 単一のホスト内でクエリを実行する順序は保証されます。 + +## CREATE VIEW {#create-view} + +``` sql +CREATE [MATERIALIZED] VIEW [IF NOT EXISTS] [db.]table_name [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +``` + +ビューを作成します。 通常とマテリアライズド:ビューの二つのタイプがあります。 + +通常のビューにはデータは保存されませんが、別のテーブルから読み取るだけです。 言い換えれば、通常のビューは、保存されたクエリに過ぎません。 ビューから読み取る場合、この保存されたクエリはfrom句のサブクエリとして使用されます。 + +たとえば、ビューを作成したとします: + +``` sql +CREATE VIEW view AS SELECT ... +``` + +とクエリを書かれた: + +``` sql +SELECT a, b, c FROM view +``` + +このクエリは、サブクエリの使用と完全に同じです: + +``` sql +SELECT a, b, c FROM (SELECT ...) +``` + +実現の景色でデータ変換に対応する選択を返します。 + +マテリアライズドビューを作成するとき `TO [db].[table]`, you must specify ENGINE – the table engine for storing data. + +マテリアライズドビューを作成するとき `TO [db].[table]`、使用してはならない `POPULATE`. + +SELECTで指定されたテーブルにデータを挿入すると、挿入されたデータの一部がこのSELECTクエリによって変換され、結果がビューに挿入されます。 + +POPULATEを指定すると、作成時に既存のテーブルデータがビューに挿入されます。 `CREATE TABLE ... AS SELECT ...` . そうしないと、クエリーを含み、データを挿入し、表の作成後、作成した。 ビューの作成時にテーブルに挿入されたデータは挿入されないため、POPULATEを使用することはお勧めしません。 + +A `SELECT` クエ `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`… Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` が設定され、データは挿入中に集約されるが、挿入されたデータの単一パケット内にのみ存在する。 データはそれ以上集計されません。 例外は、次のようなデータの集計を個別に実行するエンジンを使用する場合です `SummingMergeTree`. + +の実行 `ALTER` クエリを実現眺めなが十分に整備されていないので、いかに不便です。 マテリアライズドビュ `TO [db.]name`、できます `DETACH` ビュー、実行 `ALTER` ターゲットテーブルの場合 `ATTACH` 以前に切り離さ (`DETACH`)ビュー。 + +ビューの外観は、通常のテーブルと同じです。 例えば、それらはの結果にリストされています `SHOW TABLES` クエリ。 + +ビューを削除するための別のクエリはありません。 ビューを削除するには `DROP TABLE`. + +## CREATE DICTIONARY {#create-dictionary-query} + +``` sql +CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +( + key1 type1 [DEFAULT|EXPRESSION expr1] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + key2 type2 [DEFAULT|EXPRESSION expr2] [HIERARCHICAL|INJECTIVE|IS_OBJECT_ID], + attr1 type2 [DEFAULT|EXPRESSION expr3], + attr2 type2 [DEFAULT|EXPRESSION expr4] +) +PRIMARY KEY key1, key2 +SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) +LAYOUT(LAYOUT_NAME([param_name param_value])) +LIFETIME([MIN val1] MAX val2) +``` + +作成 [外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) 与えられると [構造](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md), [ソース](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md), [レイアウト](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) と [寿命](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md). + +外部辞書構造の属性です。 ディクショナリ属性は、表の列と同様に指定します。 唯一の必須の属性は、そのタイプ、その他すべてのプロパティがデフォルト値がある。 + +辞書に応じて [レイアウト](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md) 一つ以上の属性は、辞書キーとして指定することができます。 + +詳細については、 [外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md) セクション。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/create/) diff --git a/docs/ja/sql_reference/statements/index.md b/docs/ja/sql_reference/statements/index.md deleted file mode 120000 index b5804beb2de..00000000000 --- a/docs/ja/sql_reference/statements/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/index.md b/docs/ja/sql_reference/statements/index.md new file mode 100644 index 00000000000..a2246679149 --- /dev/null +++ b/docs/ja/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Statements +toc_priority: 31 +--- + + diff --git a/docs/ja/sql_reference/statements/insert_into.md b/docs/ja/sql_reference/statements/insert_into.md deleted file mode 120000 index e2f3f6af496..00000000000 --- a/docs/ja/sql_reference/statements/insert_into.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/insert_into.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/insert_into.md b/docs/ja/sql_reference/statements/insert_into.md new file mode 100644 index 00000000000..f9e7922cd77 --- /dev/null +++ b/docs/ja/sql_reference/statements/insert_into.md @@ -0,0 +1,80 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 34 +toc_title: INSERT INTO +--- + +## INSERT {#insert} + +データの追加。 + +基本的なクエリ形式: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... +``` + +クエリでは、挿入する列のリストを指定できます `[(c1, c2, c3)]`. この場合、残りの列は次のように入力されます: + +- から計算された値 `DEFAULT` テーブル定義で指定された式。 +- ゼロと空の文字列の場合 `DEFAULT` 式は定義されていません。 + +もし [strict\_insert\_defaults=1](../../operations/settings/settings.md)、持っていない列 `DEFAULT` definedは、クエリに一覧表示する必要があります。 + +データは、任意の挿入物に渡すことができます [書式](../../interfaces/formats.md#formats) ClickHouseでサポートされている。 形式は、クエリで明示的に指定する必要があります: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set +``` + +For example, the following query format is identical to the basic version of INSERT … VALUES: + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] FORMAT Values (v11, v12, v13), (v21, v22, v23), ... +``` + +ClickHouseは、データの前にすべてのスペースと改行(存在する場合)を削除します。 クエリを作成するときは、クエリ演算子の後に新しい行にデータを置くことをお勧めします(データがスペースで始まる場合は重要です)。 + +例えば: + +``` sql +INSERT INTO t FORMAT TabSeparated +11 Hello, world! +22 Qwerty +``` + +挿入することができます。データから別のクエリのコマンドラインクライアント、httpインターフェース。 詳細については、以下を参照してください “[界面](../../interfaces/index.md#interfaces)”. + +### 制約 {#constraints} + +テーブルが [制約](create.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped. + +### の結果を挿入する `SELECT` {#insert_query_insert-select} + +``` sql +INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... +``` + +列は、select句内の位置に応じてマップされます。 ただし、select式とinsertのテーブルの名前は異なる場合があります。 必要に応じて、タイプ鋳造が行われる。 + +値以外のデータ形式では、次のような式に値を設定できません `now()`, `1 + 2`、というように。 この場合、非効率的なコードが実行に使用されるためです。 + +その他のクエリをデータ部品に対応していない: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. +ただし、次の方法で古いデータを削除できます `ALTER TABLE ... DROP PARTITION`. + +`FORMAT` 次の場合、クエリの最後に句を指定する必要があります `SELECT` テーブル関数を含む句 [入力()](../table_functions/input.md). + +### パフォーマン {#performance-considerations} + +`INSERT` 入力データを主キーでソートし、パーティションキーでパーティションに分割します。 た場合のデータを挿入し複数の仕切りは一度で大幅に低減できることの `INSERT` クエリ。 これを避けるには: + +- 一度に100,000行など、かなり大きなバッチでデータを追加します。 +- グループによるデータのパーティション鍵のアップロード前にでclickhouse. + +性能が減少することはありませんが: + +- データはリアルタイムで追加されます。 +- アップロードしたデータとは、通常はソートされました。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/ja/sql_reference/statements/misc.md b/docs/ja/sql_reference/statements/misc.md deleted file mode 120000 index b83a93f54ad..00000000000 --- a/docs/ja/sql_reference/statements/misc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/misc.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/misc.md b/docs/ja/sql_reference/statements/misc.md new file mode 100644 index 00000000000..217f76d6621 --- /dev/null +++ b/docs/ja/sql_reference/statements/misc.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u305D\u306E\u4ED6" +--- + +# その他のクエリ {#miscellaneous-queries} + +## ATTACH {#attach} + +このクエリは `CREATE`、しかし + +- 単語の代わりに `CREATE` それは単語を使用します `ATTACH`. +- クエリはディスク上にデータを作成しませんが、データがすでに適切な場所にあるとみなし、テーブルに関する情報をサーバーに追加するだけです。 + 添付クエリを実行すると、サーバーはテーブルの存在を知ることになります。 + +テーブルが以前に分離されていた場合 (`DETACH`)、その構造が知られていることを意味し、構造を定義することなく省略形を使用することができます。 + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +このクエリは、サーバーの起動時に使用されます。 サーバーに店舗のテーブルメタデータとしてファイル `ATTACH` クエリは、起動時に実行されるだけです(サーバー上に明示的に作成されたシステムテーブルは例外です)。 + +## CHECK TABLE {#check-table} + +Trueの場合、ただちにパージを行うデータをテーブルが壊れる可能性があります。 + +``` sql +CHECK TABLE [db.]name +``` + +その `CHECK TABLE` queryは、実際のファイルサイズをサーバーに格納されている期待値と比較します。 ファイルサイズが格納された値と一致しない場合は、データが破損していることを意味します。 このが発生する可能性があります、例えば、システムがクラッシュ時のクエリを実行します。 + +のクエリーの応答を含む `result` 単一行の列。 行に値がのあります +[ブール値](../../sql_reference/data_types/boolean.md) タイプ: + +- 0-テーブル内のデータが破損しています。 +- 1-データは整合性を維持します。 + +その `CHECK TABLE` クエリは以下のテーブルエンジン: + +- [ログ](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [ストリップログ](../../engines/table_engines/log_family/stripelog.md) +- [マージツリーファミリー](../../engines/table_engines/mergetree_family/mergetree.md) + +これは、テーブルが別のテーブルエンジンの原因となる例外です。 + +からのエンジン `*Log` 家族は失敗の自動データ回復を提供しない。 を使用 `CHECK TABLE` タイムリーにデータ損失を追跡するためのクエリ。 + +のために `MergeTree` 家族のエンジンは、 `CHECK TABLE` クエリを示すステータス確認のための個人データのテーブルに現地サーバーです。 + +**データが破損している場合** + +テーブルが破損している場合は、破損していないデータを別のテーブルにコピーできます。 これを行うには: + +1. 破損したテーブルと同じ構造の新しいテーブルを作成します。 このためクセスしてください `CREATE TABLE AS `. +2. セットを [max\_threads](../../operations/settings/settings.md#settings-max_threads) 単一のスレッドで次のクエリを処理するには、1の値を指定します。 これを行うにはクエリを実行します `SET max_threads = 1`. +3. クエリの実行 `INSERT INTO SELECT * FROM `. この要求にコピーする非破損データからの表-別表に示す。 破損した部分の前のデータのみがコピーされます。 +4. 再起動する `clickhouse-client` リセットするには `max_threads` 値。 + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +以下を返します `String` タイプ列: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [既定の式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` または `ALIAS`). 既定の式が指定されていない場合、Columnには空の文字列が含まれます。 +- `default_expression` — Value specified in the `DEFAULT` 句。 +- `comment_expression` — Comment text. + +入れ子にされたデータ構造は “expanded” フォーマット。 各列は別々に表示され、名前はドットの後に続きます。 + +## DETACH {#detach} + +に関する情報を削除します。 ‘name’ サーバーからのテーブル。 サーバーはテーブルの存在を知ることをやめます。 + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +テーブルのデータやメタデータは削除されません。 次のサーバーの起動時に、サーバーはメタデータを読み取り、再度テーブルについて調べます。 +同様に、 “detached” 表はを使用して再付けることができます `ATTACH` クエリ(メタデータが格納されていないシステムテーブルを除く)。 + +ありません `DETACH DATABASE` クエリ。 + +## DROP {#drop} + +このクエ: `DROP DATABASE` と `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +内部のすべてのテーブルを削除 ‘db’ データベース、その後削除 ‘db’ データベース自体。 +もし `IF EXISTS` データベースが存在しない場合、エラーは返されません。 + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +テーブルを削除します。 +もし `IF EXISTS` テーブルが存在しない場合、またはデータベースが存在しない場合は、エラーを返しません。 + + DROP DICTIONARY [IF EXISTS] [db.]name + +辞書をdeletsします。 +もし `IF EXISTS` テーブルが存在しない場合、またはデータベースが存在しない場合は、エラーを返しません。 + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +シングルを返します `UInt8`-単一の値を含むタイプの列 `0` テーブルまたはデータベースが存在しない場合、または `1` 指定されたデータベースにテーブルが存在する場合。 + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +現在実行中のクエリを強制的に終了しようとします。 +終了するクエリは、システムから選択されます。プロセステーブルで定義された基準を使用して、 `WHERE` の句 `KILL` クエリ。 + +例: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +読み取り専用ユーザーは、自分のクエリのみを停止できます。 + +既定では、非同期バージョンのクエリが使用されます (`ASYNC`)、クエリが停止したことの確認を待つことはありません。 + +同期バージョン (`SYNC`)待機のためのすべての問い合わせに対応停止に関する情報を表示し各工程で停止します。 +応答には、 `kill_status` 次の値を取ることができる列: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. + +テストクエリ (`TEST`)ユーザーの権限のみをチェックし、停止するクエリのリストを表示します。 + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +キャンセルと削除を試みます [突然変異](alter.md#alter-mutations) これは現在実行中です。 取り消すべき突然変異はから選ばれます [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) テーブルのフィルタで指定された `WHERE` の句 `KILL` クエリ。 + +テストクエリ (`TEST`)ユーザーの権限のみをチェックし、停止するクエリのリストを表示します。 + +例: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +突然変異によって既に行われた変更はロールバックされません。 + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +このクエリを初期化予定外の統合データのパーツを使ったテーブルのテーブルエンジンからの [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家族 + +その `OPTMIZE` クエリもサポートされています [MaterializedView](../../engines/table_engines/special/materializedview.md) そして [バッファ](../../engines/table_engines/special/buffer.md) エンジン その他のテーブルエンジンなサポート。 + +とき `OPTIMIZE` とともに使用されます [レプリケートされたmergetree](../../engines/table_engines/mergetree_family/replication.md) テーブルエンジンのファミリ、ClickHouseはマージのためのタスクを作成し、すべてのノードでの実行を待ちます。 `replication_alter_partitions_sync` 設定が有効になっています)。 + +- もし `OPTIMIZE` 何らかの理由でマージを実行せず、クライアントに通知しません。 通知を有効にするには、以下を使用します [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) 設定。 +- を指定した場合 `PARTITION` 指定したパーティションのみが最適化されます。 [パーティション式の設定方法](alter.md#alter-how-to-specify-part-expr). +- 指定した場合 `FINAL`、最適化は、すべてのデータがすでに一つの部分にある場合でも行われる。 +- 指定した場合 `DEDUPLICATE`、その後、完全に同一の行が重複排除されます(すべての列が比較されます)、それだけでMergeTreeエンジンのために理にかなっています。 + +!!! warning "警告" + `OPTIMIZE` 修正できません “Too many parts” エラー。 + +## RENAME {#misc_operations-rename} + +テーブルの名前を変更します。 + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +すべてのテーブル名変更"グローバルチェンジにおけるロックしなければなりません。 テーブルの名前を変更するのは簡単な操作です。 場合は正しく表示す他のデータベースのテーブルに表示されるようになります。本データベースです。 しかし、そのディレクトリのデータベースに格納してある必要がある同一ファイルシステム(それ以外の場合、エラーを返す。 + +## SET {#query-set} + +``` sql +SET param = value +``` + +割り当て `value` に `param` [設定](../../operations/settings/index.md) 現在のセッションの場合。 変更はできません [サーバー設定](../../operations/server_configuration_parameters/index.md) こっちだ + +また、指定した設定プロファイルのすべての値を単一のクエリで設定することもできます。 + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +詳細については、 [設定](../../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +表からすべてのデータを削除します。 ときに句 `IF EXISTS` テーブルが存在しない場合、クエリはエラーを返します。 + +その `TRUNCATE` queryはサポートされていません [ビュー](../../engines/table_engines/special/view.md), [ファイル](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) と [ヌル](../../engines/table_engines/special/null.md) テーブルエンジン。 + +## USE {#use} + +``` sql +USE db +``` + +セッションの現在のデータベースを設定できます。 +現在のデータベース検索用テーブルの場合はデータベースが明示的に定義されたクエリードの前にテーブルの名前です。 +このクエリできません利用の場合は、httpプロトコルが存在しない概念です。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/ja/sql_reference/statements/select.md b/docs/ja/sql_reference/statements/select.md deleted file mode 120000 index 6333e2118bd..00000000000 --- a/docs/ja/sql_reference/statements/select.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/select.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/select.md b/docs/ja/sql_reference/statements/select.md new file mode 100644 index 00000000000..7775b2f7cdc --- /dev/null +++ b/docs/ja/sql_reference/statements/select.md @@ -0,0 +1,610 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 33 +toc_title: SELECT +--- + +# クエリ構文の選択 {#select-queries-syntax} + +`SELECT` データ検索を実行します。 + +``` sql +[WITH expr_list|(subquery)] +SELECT [DISTINCT] expr_list +[FROM [db.]table | (subquery) | table_function] [FINAL] +[SAMPLE sample_coeff] +[ARRAY JOIN ...] +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN (subquery)|table USING columns_list +[PREWHERE expr] +[WHERE expr] +[GROUP BY expr_list] [WITH TOTALS] +[HAVING expr] +[ORDER BY expr_list] +[LIMIT [offset_value, ]n BY columns] +[LIMIT [n, ]m] +[UNION ALL ...] +[INTO OUTFILE filename] +[FORMAT format] +``` + +SELECT直後の必須の式リストを除き、すべての句はオプションです。 +以下の句は、クエリ実行コンベアとほぼ同じ順序で記述されています。 + +クエリが省略された場合、 `DISTINCT`, `GROUP BY` と `ORDER BY` 句および `IN` と `JOIN` サブクエリでは、クエリはO(1)量のRAMを使用して完全にストリーム処理されます。 +それ以外の場合、適切な制限が指定されていない場合、クエリは大量のramを消費する可能性があります: `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. 詳細については、以下を参照してください “Settings”. 外部ソート(一時テーブルをディスクに保存する)と外部集約を使用することが可能です。 `The system does not have "merge join"`. + +### With句 {#with-clause} + +このセクション支援のための共通表現 ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL))、いくつかの制限があります: +1. 再帰的な問合せには対応していない +2. サブクエリがsection内で使用される場合、その結果は正確に一つの行を持つスカラーになります +3. 式の結果はサブクエリでは使用できません +WITH句の式の結果は、SELECT句の中で使用できます。 + +例1:定数式をasとして使用する “variable” + +``` sql +WITH '2019-08-01 15:23:00' as ts_upper_bound +SELECT * +FROM hits +WHERE + EventDate = toDate(ts_upper_bound) AND + EventTime <= ts_upper_bound +``` + +例2:select句の列リストからsum(bytes)式の結果を削除する + +``` sql +WITH sum(bytes) as s +SELECT + formatReadableSize(s), + table +FROM system.parts +GROUP BY table +ORDER BY s +``` + +例3:結果のスカラサブクエリ + +``` sql +/* this example would return TOP 10 of most huge tables */ +WITH + ( + SELECT sum(bytes) + FROM system.parts + WHERE active + ) AS total_disk_usage +SELECT + (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, + table +FROM system.parts +GROUP BY table +ORDER BY table_disk_usage DESC +LIMIT 10 +``` + +例4:サブクエリでの式の再利用 +サブクエリでの式の現在の使用制限の回避策として、複製することができます。 + +``` sql +WITH ['hello'] AS hello +SELECT + hello, + * +FROM +( + WITH ['hello'] AS hello + SELECT hello +) +``` + +``` text +┌─hello─────┬─hello─────┐ +│ ['hello'] │ ['hello'] │ +└───────────┴───────────┘ +``` + +### FROM句 {#select-from} + +FROM句が省略された場合、データは `system.one` テーブル。 +その `system.one` このテーブルは、他のDbmsで見つかったデュアルテーブルと同じ目的を果たします。 + +その `FROM` 句は、データを読み取るソースを指定します: + +- テーブル +- サブクエリ +- [テーブル機能](../table_functions/index.md) + +`ARRAY JOIN` そして、定期的に `JOIN` また、(下記参照)が含まれていてもよいです。 + +テーブルの代わりに、 `SELECT` サブクエリは、かっこで指定できます。 +標準sqlとは対照的に、サブクエリの後にシノニムを指定する必要はありません。 + +実行をクエリに対して、すべての列をクエリを取り出しに適します。 任意の列は不要のため、外部クエリはスローされ、サブクエリ. +クエリで列がリストされない場合(たとえば, `SELECT count() FROM t`)行の数を計算するために、いくつかの列がテーブルから抽出されます(最小の列が優先されます)。 + +#### 最終修飾子 {#select-from-final} + +が異なる場合は、それぞれの選定からデータをテーブルからの [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-エンジンファミリー以外 `GraphiteMergeTree`. とき `FINAL` 指定されている場合、ClickHouseは結果を返す前にデータを完全にマージするため、指定されたテーブルエンジンのマージ中に発生するすべてのデータ変換を実行し + +また、: +- [複製された](../../engines/table_engines/mergetree_family/replication.md) のバージョン `MergeTree` エンジン +- [ビュー](../../engines/table_engines/special/view.md), [バッファ](../../engines/table_engines/special/buffer.md), [分散](../../engines/table_engines/special/distributed.md)、と [MaterializedView](../../engines/table_engines/special/materializedview.md) 他のエンジンを操作するエンジンは、それらが上に作成された提供 `MergeTree`-エンジンテーブル。 + +使用するクエリ `FINAL` そうでない類似のクエリと同じくらい速く実行されません。: + +- クエリは単一のスレッドで実行され、クエリの実行中にデータがマージされます。 +- とのクエリ `FINAL` クエリで指定された列に加えて、主キー列を読み取ります。 + +ほとんどの場合、使用を避けます `FINAL`. + +### サンプル句 {#select-sample-clause} + +その `SAMPLE` 句は、近似クエリ処理を可能にします。 + +データサンプリングを有効にすると、すべてのデータに対してクエリは実行されず、特定のデータ(サンプル)に対してのみ実行されます。 たとえば、すべての訪問の統計を計算する必要がある場合は、すべての訪問の1/10分のクエリを実行し、その結果に10を掛けるだけで十分です。 + +近似クエリ処理は、次の場合に役立ちます: + +- 厳密なタイミング要件(\<100ms)がありますが、それらを満たすために追加のハードウェアリソースのコストを正当化できない場合。 +- 生データが正確でない場合、近似は品質を著しく低下させません。 +- ビジネス要件は、おおよその結果(費用対効果のため、または正確な結果をプレミアムユーザーに販売するため)を対象とします。 + +!!! note "メモ" + サンプリングを使用できるのは、次の表のみです [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) テーブル作成時にサンプリング式が指定された場合にのみ [MergeTreeエンジン](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). + +データサンプリングの機能を以下に示します: + +- データサンプリングは確定的なメカニズムです。 同じの結果 `SELECT .. SAMPLE` クエリは常に同じです。 +- サンプリン テーブルに単一のサンプリングキーは、サンプルと同じ係数を常に選択と同じサブセットのデータです。 たとえば、ユーザー idのサンプルでは、異なるテーブルのすべてのユーザー idのサブセットが同じ行になります。 これは、サブクエリでサンプルを使用できることを意味します [IN](#select-in-operators) 句。 また、以下を使用してサンプルを結合できます [JOIN](#select-join) 句。 +- サンプリングで読み下からのデータディスク。 サンプリングキーを正しく指定する必要があります。 詳細については、 [MergeTreeテーブルの作成](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). + +のための `SAMPLE` 句次の構文がサポートされています: + +| SAMPLE Clause Syntax | 説明 | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SAMPLE k` | ここに `k` 0から1までの数値です。
    クエリが実行される `k` データの割合。 例えば, `SAMPLE 0.1` データの10%に対してクエリを実行します。 [もっと読む](#select-sample-k) | +| `SAMPLE n` | ここに `n` は十分に大きい整数です。
    クエリは、少なくとものサンプルで実行されます `n` 行(しかし、これ以上のものではない)。 例えば, `SAMPLE 10000000` 最小10,000,000行に対してクエリを実行します。 [もっと読む](#select-sample-n) | +| `SAMPLE k OFFSET m` | ここに `k` と `m` 0から1までの数字です。
    クエリは次のサンプルで実行されます `k` データの割合。 に使用されるデータのサンプルと相殺することにより `m` 分数。 [もっと読む](#select-sample-offset) | + +#### SAMPLE K {#select-sample-k} + +ここに `k` は0から1までの数値です(小数表記と小数表記の両方がサポートされています)。 例えば, `SAMPLE 1/2` または `SAMPLE 0.5`. + +で `SAMPLE k` 句は、サンプルから取られます `k` データの割合。 例を以下に示します: + +``` sql +SELECT + Title, + count() * 10 AS PageViews +FROM hits_distributed +SAMPLE 0.1 +WHERE + CounterID = 34 +GROUP BY Title +ORDER BY PageViews DESC LIMIT 1000 +``` + +この例では、0.1(10%)のデータのサンプルでクエリが実行されます。 集計関数の値は自動的には修正されないので、おおよその結果を得るには値 `count()` 手動で10倍します。 + +#### SAMPLE N {#select-sample-n} + +ここに `n` は十分に大きい整数です。 例えば, `SAMPLE 10000000`. + +この場合、クエリは少なくともサンプルで実行されます `n` 行(しかし、これ以上のものではない)。 例えば, `SAMPLE 10000000` 最小10,000,000行に対してクエリを実行します。 + +データ読み取りのための最小単位は一つの顆粒であるため(そのサイズは `index_granularity` それは、顆粒のサイズよりもはるかに大きいサンプルを設定することは理にかなっています。 + +を使用する場合 `SAMPLE n` 句、データの相対パーセントが処理されたかわからない。 したがって、集計関数に掛ける係数はわかりません。 を使用 `_sample_factor` おおよその結果を得るための仮想列。 + +その `_sample_factor` 列には、動的に計算される相対係数が含まれます。 この列は、次の場合に自動的に作成されます [作成](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) 指定したサンプリングキーを持つテーブル。 の使用例 `_sample_factor` 列は以下の通りです。 + +テーブルを考えてみましょう `visits` これには、サイト訪問に関する統計情報が含まれます。 最初の例は、ページビューの数を計算する方法を示しています: + +``` sql +SELECT sum(PageViews * _sample_factor) +FROM visits +SAMPLE 10000000 +``` + +次の例では、訪問回数の合計を計算する方法を示します: + +``` sql +SELECT sum(_sample_factor) +FROM visits +SAMPLE 10000000 +``` + +以下の例は、平均セッション期間を計算する方法を示しています。 相対係数を使用して平均値を計算する必要はありません。 + +``` sql +SELECT avg(Duration) +FROM visits +SAMPLE 10000000 +``` + +#### SAMPLE K OFFSET M {#select-sample-offset} + +ここに `k` と `m` は0から1までの数字です。 例を以下に示す。 + +**例1** + +``` sql +SAMPLE 1/10 +``` + +この例では、サンプルはすべてのデータの1/10thです: + +`[++------------]` + +**例2** + +``` sql +SAMPLE 1/10 OFFSET 1/2 +``` + +ここでは、データの後半から10%のサンプルを採取します。 + +`[------++------]` + +### 配列結合句 {#select-array-join-clause} + +実行を許可する `JOIN` 配列または入れ子になったデータ構造。 その意図は、 [arrayJoin](../../sql_reference/functions/array_join.md#functions_arrayjoin) 機能が、その機能はより広いです。 + +``` sql +SELECT +FROM +[LEFT] ARRAY JOIN +[WHERE|PREWHERE ] +... +``` + +単一のみを指定できます `ARRAY JOIN` クエリ内の句。 + +実行時にクエリの実行順序が最適化されます `ARRAY JOIN`. が `ARRAY JOIN` の前に必ず指定する必要があります。 `WHERE/PREWHERE` のいずれかを実行することができます。 `WHERE/PREWHERE` (結果がこの節で必要な場合)、またはそれを完了した後(計算量を減らすため)。 処理順序はクエリオプティマイザによって制御されます。 + +サポートされる種類の `ARRAY JOIN` は以下の通りです: + +- `ARRAY JOIN` -この場合、空の配列は結果に含まれません `JOIN`. +- `LEFT ARRAY JOIN` -結果の `JOIN` 空の配列を含む行を含みます。 空の配列の値は、配列要素タイプのデフォルト値に設定されます(通常は0、空の文字列またはNULL)。 + +以下の例は、以下の使用例を示しています。 `ARRAY JOIN` と `LEFT ARRAY JOIN` 句。 テーブルを作成してみましょう [配列](../../sql_reference/data_types/array.md) 列を入力して値を挿入します: + +``` sql +CREATE TABLE arrays_test +( + s String, + arr Array(UInt8) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +``` + +``` text +┌─s───────────┬─arr─────┐ +│ Hello │ [1,2] │ +│ World │ [3,4,5] │ +│ Goodbye │ [] │ +└─────────────┴─────────┘ +``` + +以下の例では、 `ARRAY JOIN` 句: + +``` sql +SELECT s, arr +FROM arrays_test +ARRAY JOIN arr; +``` + +``` text +┌─s─────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +└───────┴─────┘ +``` + +次の例では、 `LEFT ARRAY JOIN` 句: + +``` sql +SELECT s, arr +FROM arrays_test +LEFT ARRAY JOIN arr; +``` + +``` text +┌─s───────────┬─arr─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ World │ 3 │ +│ World │ 4 │ +│ World │ 5 │ +│ Goodbye │ 0 │ +└─────────────┴─────┘ +``` + +#### エイリアスの使用 {#using-aliases} + +配列のエイリアスを指定することができます。 `ARRAY JOIN` 句。 この場合、配列項目はこのエイリアスでアクセスできますが、配列自体は元の名前でアクセスされます。 例えば: + +``` sql +SELECT s, arr, a +FROM arrays_test +ARRAY JOIN arr AS a; +``` + +``` text +┌─s─────┬─arr─────┬─a─┐ +│ Hello │ [1,2] │ 1 │ +│ Hello │ [1,2] │ 2 │ +│ World │ [3,4,5] │ 3 │ +│ World │ [3,4,5] │ 4 │ +│ World │ [3,4,5] │ 5 │ +└───────┴─────────┴───┘ +``` + +別名を使用すると、次の操作を実行できます `ARRAY JOIN` 外部配列を使用する。 例えば: + +``` sql +SELECT s, arr_external +FROM arrays_test +ARRAY JOIN [1, 2, 3] AS arr_external; +``` + +``` text +┌─s───────────┬─arr_external─┐ +│ Hello │ 1 │ +│ Hello │ 2 │ +│ Hello │ 3 │ +│ World │ 1 │ +│ World │ 2 │ +│ World │ 3 │ +│ Goodbye │ 1 │ +│ Goodbye │ 2 │ +│ Goodbye │ 3 │ +└─────────────┴──────────────┘ +``` + +複数の配列をコンマで区切ることができます。 `ARRAY JOIN` 句。 この場合, `JOIN` それらと同時に実行されます(直積ではなく、直積)。 すべての配列は同じサイズでなければなりません。 例えば: + +``` sql +SELECT s, arr, a, num, mapped +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─mapped─┐ +│ Hello │ [1,2] │ 1 │ 1 │ 2 │ +│ Hello │ [1,2] │ 2 │ 2 │ 3 │ +│ World │ [3,4,5] │ 3 │ 1 │ 4 │ +│ World │ [3,4,5] │ 4 │ 2 │ 5 │ +│ World │ [3,4,5] │ 5 │ 3 │ 6 │ +└───────┴─────────┴───┴─────┴────────┘ +``` + +以下の例では、 [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) 機能: + +``` sql +SELECT s, arr, a, num, arrayEnumerate(arr) +FROM arrays_test +ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +``` + +``` text +┌─s─────┬─arr─────┬─a─┬─num─┬─arrayEnumerate(arr)─┐ +│ Hello │ [1,2] │ 1 │ 1 │ [1,2] │ +│ Hello │ [1,2] │ 2 │ 2 │ [1,2] │ +│ World │ [3,4,5] │ 3 │ 1 │ [1,2,3] │ +│ World │ [3,4,5] │ 4 │ 2 │ [1,2,3] │ +│ World │ [3,4,5] │ 5 │ 3 │ [1,2,3] │ +└───────┴─────────┴───┴─────┴─────────────────────┘ +``` + +#### 配列の参加入れ子データ構造 {#array-join-with-nested-data-structure} + +`ARRAY`また、"参加"で動作します [入れ子のデータ構造](../../sql_reference/data_types/nested_data_structures/nested.md). 例えば: + +``` sql +CREATE TABLE nested_test +( + s String, + nest Nested( + x UInt8, + y UInt32) +) ENGINE = Memory; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +``` + +``` text +┌─s───────┬─nest.x──┬─nest.y─────┐ +│ Hello │ [1,2] │ [10,20] │ +│ World │ [3,4,5] │ [30,40,50] │ +│ Goodbye │ [] │ [] │ +└─────────┴─────────┴────────────┘ +``` + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +ネストされたデータ構造の名前を指定する場合 `ARRAY JOIN`、意味は同じです `ARRAY JOIN` それが構成されているすべての配列要素。 例を以下に示します: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`, `nest.y`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─┐ +│ Hello │ 1 │ 10 │ +│ Hello │ 2 │ 20 │ +│ World │ 3 │ 30 │ +│ World │ 4 │ 40 │ +│ World │ 5 │ 50 │ +└───────┴────────┴────────┘ +``` + +この変化はまた意味を成している: + +``` sql +SELECT s, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN `nest.x`; +``` + +``` text +┌─s─────┬─nest.x─┬─nest.y─────┐ +│ Hello │ 1 │ [10,20] │ +│ Hello │ 2 │ [10,20] │ +│ World │ 3 │ [30,40,50] │ +│ World │ 4 │ [30,40,50] │ +│ World │ 5 │ [30,40,50] │ +└───────┴────────┴────────────┘ +``` + +エイリアスは、ネストされたデータ構造のために使用することができます。 `JOIN` 結果またはソース配列。 例えば: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y` +FROM nested_test +ARRAY JOIN nest AS n; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ +└───────┴─────┴─────┴─────────┴────────────┘ +``` + +使用例 [arrayEnumerate](../../sql_reference/functions/array_functions.md#array_functions-arrayenumerate) 機能: + +``` sql +SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num +FROM nested_test +ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num; +``` + +``` text +┌─s─────┬─n.x─┬─n.y─┬─nest.x──┬─nest.y─────┬─num─┐ +│ Hello │ 1 │ 10 │ [1,2] │ [10,20] │ 1 │ +│ Hello │ 2 │ 20 │ [1,2] │ [10,20] │ 2 │ +│ World │ 3 │ 30 │ [3,4,5] │ [30,40,50] │ 1 │ +│ World │ 4 │ 40 │ [3,4,5] │ [30,40,50] │ 2 │ +│ World │ 5 │ 50 │ [3,4,5] │ [30,40,50] │ 3 │ +└───────┴─────┴─────┴─────────┴────────────┴─────┘ +``` + +### JOIN句 {#select-join} + +通常のデータを結合します [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) センス + +!!! info "メモ" + 関連しない [ARRAY JOIN](#select-array-join-clause). + +``` sql +SELECT +FROM +[GLOBAL] [ANY|ALL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER] JOIN +(ON )|(USING ) ... +``` + +テーブル名は次の代わりに指定できます `` と ``. これは、 `SELECT * FROM table` サブクエリは、テーブルが次のものを持つ特殊な場合を除きます。 [参加](../../engines/table_engines/special/join.md) engine – an array prepared for joining. + +#### サポートされる種類の `JOIN` {#select-join-types} + +- `INNER JOIN` (または `JOIN`) +- `LEFT JOIN` (または `LEFT OUTER JOIN`) +- `RIGHT JOIN` (または `RIGHT OUTER JOIN`) +- `FULL JOIN` (または `FULL OUTER JOIN`) +- `CROSS JOIN` (または `,` ) + +標準を参照してください [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) 説明。 + +#### 複数の結合 {#multiple-join} + +クエリを実行すると、clickhouseはマルチテーブル結合を二つのテーブル結合のシーケンスに書き換えます。 たとえば、clickhouseに参加するための四つのテーブルがある場合は、最初と二番目のテーブルを結合し、その結果を三番目のテーブルに結合し、最後のステップで + +クエリが含まれている場合、 `WHERE` 句、ClickHouseはこの句から中間結合を介してプッシュダウンフィルターを試行します。 各中間結合にフィルタを適用できない場合、ClickHouseはすべての結合が完了した後にフィルタを適用します。 + +私たちはお勧め `JOIN ON` または `JOIN USING` クエリを作成するための構文。 例えば: + +``` sql +SELECT * FROM t1 JOIN t2 ON t1.a = t2.a JOIN t3 ON t1.a = t3.a +``` + +テーブルのコンマ区切りリストを使用することができます `FROM` 句。 例えば: + +``` sql +SELECT * FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a +``` + +これらの構文を混在させないでください。 + +ClickHouseはカンマで構文を直接サポートしていないので、使用することはお勧めしません。 このアルゴ `CROSS JOIN` と `INNER JOIN` クエリ処理に進みます。 クエリを書き換えるとき、ClickHouseはパフォーマンスとメモリ消費の最適化を試みます。 デフォルトでは、ClickHouseはコンマを `INNER JOIN` 句と変換 `INNER JOIN` に `CROSS JOIN` アルゴリズムが保証できない場合 `INNER JOIN` 必要なデータを返します。 + +#### 厳密さ {#select-join-strictness} + +- `ALL` — If the right table has several matching rows, ClickHouse creates a [デカルト積](https://en.wikipedia.org/wiki/Cartesian_product) 一致する行から。 これが標準です `JOIN` SQLでの動作。 +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of queries with `ANY` と `ALL` キーワードは同じです。 +- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` 以下に使用方法を説明します。 + +**ASOF結合の使用** + +`ASOF JOIN` 完全一致のないレコードを結合する必要がある場合に便利です。 + +テーブルのため `ASOF JOIN` 順序列の列を持つ必要があります。 この列はテーブル内で単独で使用することはできません。: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`、と `DateTime`. + +構文 `ASOF JOIN ... ON`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF LEFT JOIN table_2 +ON equi_cond AND closest_match_cond +``` + +任意の数の等価条件と正確に最も近い一致条件を使用できます。 例えば, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`. + +最も近い一致でサポートされる条件: `>`, `>=`, `<`, `<=`. + +構文 `ASOF JOIN ... USING`: + +``` sql +SELECT expressions_list +FROM table_1 +ASOF JOIN table_2 +USING (equi_column1, ... equi_columnN, asof_column) +``` + +`ASOF JOIN` 使用 `equi_columnX` 平等に参加するための `asof_column` との最も近い試合に参加するための `table_1.asof_column >= table_2.asof_column` 条件。 その `asof_column` 列は常に最後の列です `USING` 句。 + +たとえば、次の表を考えてみます: + +'テキスト +テーブル1テーブル2 + +イベント/ev\_time\|user\_idイベント/ev\_time\|user\_id diff --git a/docs/ja/sql_reference/statements/show.md b/docs/ja/sql_reference/statements/show.md deleted file mode 120000 index 8329b9839b2..00000000000 --- a/docs/ja/sql_reference/statements/show.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/show.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/show.md b/docs/ja/sql_reference/statements/show.md new file mode 100644 index 00000000000..a3c564b3522 --- /dev/null +++ b/docs/ja/sql_reference/statements/show.md @@ -0,0 +1,105 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: SHOW +--- + +# クエリを表示 {#show-queries} + +## SHOW CREATE TABLE {#show-create-table} + +``` sql +SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +シングルを返します `String`-タイプ ‘statement’ column, which contains a single value – the `CREATE` 指定したオブジェク + +## SHOW DATABASES {#show-databases} + +``` sql +SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] +``` + +一覧の全てのデータベースです。 +このクエリは次と同じです `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. + +## SHOW PROCESSLIST {#show-processlist} + +``` sql +SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] +``` + +の内容を出力します [システム。プロセス](../../operations/system_tables.md#system_tables-processes) 現在処理中のクエリのリストを含むテーブル。 `SHOW PROCESSLIST` クエリ。 + +その `SELECT * FROM system.processes` クエリを返しますデータに現在のすべてのクエリ. + +ヒント(コンソールで実行): + +``` bash +$ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" +``` + +## SHOW TABLES {#show-tables} + +テーブルのリストを表示します。 + +``` sql +SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +この `FROM` 句が指定されていない場合、クエリは現在のデータベースからテーブルの一覧を返します。 + +あなたは同じ結果を得ることができます `SHOW TABLES` 次の方法でクエリ: + +``` sql +SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**例えば** + +次のクエリは、テーブルのリストから最初の二つの行を選択します。 `system` 名前に含まれるデータベース `co`. + +``` sql +SHOW TABLES FROM system LIKE '%co%' LIMIT 2 +``` + +``` text +┌─name───────────────────────────┐ +│ aggregate_function_combinators │ +│ collations │ +└────────────────────────────────┘ +``` + +## SHOW DICTIONARIES {#show-dictionaries} + +リストをの表示します [外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). + +``` sql +SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +この `FROM` 句が指定されていない場合、クエリは現在のデータベースから辞書のリストを返します。 + +あなたは同じ結果を得ることができます `SHOW DICTIONARIES` 次の方法でクエリ: + +``` sql +SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] +``` + +**例えば** + +次のクエリは、テーブルのリストから最初の二つの行を選択します。 `system` 名前に含まれるデータベース `reg`. + +``` sql +SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 +``` + +``` text +┌─name─────────┐ +│ regions │ +│ region_names │ +└──────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/ja/sql_reference/statements/system.md b/docs/ja/sql_reference/statements/system.md deleted file mode 120000 index 4c2dfe71047..00000000000 --- a/docs/ja/sql_reference/statements/system.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/statements/system.md \ No newline at end of file diff --git a/docs/ja/sql_reference/statements/system.md b/docs/ja/sql_reference/statements/system.md new file mode 100644 index 00000000000..a54514da2a3 --- /dev/null +++ b/docs/ja/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: SYSTEM +--- + +# システムクエリ {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +前に正常に読み込まれたすべての辞書を再読み込みします。 +デフォルトでは、辞書を取り込みの遅延を参照 [dictionaries\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load))、起動時に自動的にロードされるのではなく、dictGet関数を使用して最初のアクセス時に初期化されるか、ENGINE=Dictionaryテーブルから選択されます。 その `SYSTEM RELOAD DICTIONARIES` クエバなどの辞書(ロード). +常に戻る `Ok.` 辞書の更新の結果にかかわらず。 + +## 辞書dictionary\_nameを再読み込み {#query_language-system-reload-dictionary} + +辞書を完全に再読み込みする `dictionary_name` 辞書の状態にかかわらず(LOADED/NOT\_LOADED/FAILED)。 +常に戻る `Ok.` 辞書を更新した結果にかかわらず。 +ディクショナリのステータスは以下のクエリで確認できます。 `system.dictionaries` テーブル。 + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +ClickHouseの内部DNSキャッシュをリセットします。 場合によっては(古いClickHouseバージョンの場合)、インフラストラクチャを変更するとき(別のClickHouseサーバーのIPアドレスまたは辞書で使用されるサーバーを変更する + +より便利な(自動)キャッシュ管理については、"disable\_internal\_dns\_cache,dns\_cache\_update\_periodパラメータ"を参照してください。 + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +リセットをマークします。 clickhouseおよび性能試験の開発で使用される。 + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +ClickHouse構成を再読み込みします。 設定がZooKeeeperに格納されている場合に使用されます。 + +## SHUTDOWN {#query_language-system-shutdown} + +通常シャットダウンclickhouse(のような `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +異常終了しclickhouse工程など `kill -9 {$ pid_clickhouse-server}`) + +## 分散テーブルの管理 {#query-language-system-distributed} + +ClickHouse管理 [分散](../../engines/table_engines/special/distributed.md) テーブル。 ユーザーがこれらのテーブルにデータを挿入すると、ClickHouseはまずクラスターノードに送信するデータのキューを作成し、次に非同期に送信します。 キューの処理を管理することができます [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed)、と [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) クエリ。 また、分散データを同期的に挿入することもできます。 `insert_distributed_sync` 設定。 + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +を無効にした背景データの分布を挿入する際、データを配布します。 + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +クラスタノードにデータを同期送信するようにclickhouseを強制します。 ノードが使用できない場合、clickhouseは例外をスローし、クエリの実行を停止します。 これは、すべてのノードがオンラインに戻ったときに発生します。 + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +を背景データの分布を挿入する際、データを配布します。 + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +提供可能停止を背景に合併したテーブルのmergetree家族: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "メモ" + `DETACH / ATTACH` テーブルは、以前にすべてのMergeTreeテーブルのマージが停止された場合でも、テーブルのバックグラウンドマージを開始します。 + +### START MERGES {#query_language-system-start-merges} + +の提供が開始背景に合併したテーブルのmergetree家族: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/ja/sql_reference/syntax.md b/docs/ja/sql_reference/syntax.md deleted file mode 120000 index 46b967d47ae..00000000000 --- a/docs/ja/sql_reference/syntax.md +++ /dev/null @@ -1 +0,0 @@ -../../en/sql_reference/syntax.md \ No newline at end of file diff --git a/docs/ja/sql_reference/syntax.md b/docs/ja/sql_reference/syntax.md new file mode 100644 index 00000000000..a3706c2f855 --- /dev/null +++ b/docs/ja/sql_reference/syntax.md @@ -0,0 +1,187 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 31 +toc_title: "\u69CB\u6587" +--- + +# 構文 {#syntax} + +システムには、完全なsqlパーサー(再帰的降下パーサー)とデータフォーマットパーサー(高速ストリームパーサー)の二種類のパーサーがあります。 +を除くすべての場合において `INSERT` クエリでは、完全なSQLパーサーのみが使用されます。 +その `INSERT` クエリの両方を使用のパーサ: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +その `INSERT INTO t VALUES` フラグメントは完全なパーサーとデータによって解析されます `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 高速ストリームパーサーによって解析されます。 データの完全なパーサーをオンにするには、次のコマンドを使用します [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) 設定。 とき `input_format_values_interpret_expressions = 1`、ClickHouseは最初に高速ストリームパーサーで値を解析しようとします。 失敗した場合、ClickHouseはデータの完全なパーサーを使用し、SQLのように扱います [式](#syntax-expressions). + +データには任意の形式を使用できます。 クエリが受信されると、サーバーは以下を計算します [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) RAM内の要求のバイト(デフォルトでは1MB)、残りはストリーム解析されます。 +これはシステムに大きいの問題がないことを意味します `INSERT` MySQLのようなクエリ。 + +を使用する場合 `Values` フォーマット `INSERT` クエリは、データがaの式と同じように解析されるように見えるかもしれません `SELECT` クエリが、これは真実ではありません。 その `Values` 形式は、はるかに限られています。 + +次に、完全なパーサーをカバーします。 情報の形式のパーサは、 [形式](../interfaces/formats.md) セクション。 + +## スペース {#spaces} + +構文構成(クエリの開始と終了を含む)の間には、任意の数のスペースシンボルが存在する可能性があります。 スペースシンボルには、スペース、タブ、改行、cr、フォームフィードがあります。 + +## コメント {#comments} + +SQL形式およびC形式のコメントがサポートされています。 +SQLスタイルのコメント:from `--` ラインの終わりまで。 後のスペース `--` 省略可能です。 +Cスタイルのコメント:from `/*` に `*/`. これらのコメントは複数行にできます。 ここでもスペースは必要ありません。 + +## キーワード {#syntax-keywords} + +キーワードが対応する場合、大文字と小文字は区別されません: + +- SQL標準。 例えば, `SELECT`, `select` と `SeLeCt` すべて有効です。 +- いくつかの一般的なdbms(mysqlまたはpostgres)での実装。 例えば, `DateTime` は同じとして `datetime`. + +データ型名が大文字小文字を区別するかどうかをチェックできます。 `system.data_type_families` テーブル。 + +標準sqlとは対照的に、他のすべてのキーワード(関数名を含む)は **大文字と小文字を区別する**. + +キーワードはこの数は予約されていません(そうとして構文解析キーワードに対応するコンテキスト. 使用する場合 [識別子](#syntax-identifiers) キーワードと同じで、引用符で囲みます。 たとえば、クエリ `SELECT "FROM" FROM table_name` テーブルの場合は有効です。 `table_name` 名前の列があります `"FROM"`. + +## 識別子 {#syntax-identifiers} + +識別子は: + +- クラスターデータベース、テーブル、パーティションおよびカラム名になってしまいます +- 機能。 +- データ型。 +- [式の別名](#syntax-expression_aliases). + +識別子は、引用符または非引用することができます。 非引用符付き識別子を使用することをお勧めします。 + +非引用識別子に一致しなければならなregex `^[a-zA-Z_][0-9a-zA-Z_]*$` とに等しくすることはできません [キーワード](#syntax-keywords). 例: `x, _1, X_y__Z123_.` + +キーワードと同じ識別子を使用する場合、または識別子に他の記号を使用する場合は、二重引用符またはバッククォートを使用して引用符を引用します。, `"id"`, `` `id` ``. + +## リテラル {#literals} + +以下があります:数値、文字列、複合および `NULL` リテラル + +### 数値 {#numeric} + +数値リテラルは解析を試みます: + +- 最初に64ビットの符号付き数値として、 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) 機能。 +- 失敗した場合、64ビット符号なしの数値として、 [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) 機能。 +- 失敗した場合は、浮動小数点数として [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) 機能。 +- それ以外の場合は、エラーが返されます。 + +対応する値は、値が収まる最小の型を持ちます。 +たとえば、1は次のように解析されます `UInt8` しかし、256は次のように解析されます `UInt16`. 詳細については、 [データ型](../sql_reference/data_types/index.md). + +例: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### 文字列 {#syntax-string-literal} + +単一引quotesの文字列リテラルのみがサポートされます。 囲まれた文字はバックスラッシュでエスケープできます。 以下のエスケープシーケンスに対応する特殊な値: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. 他のすべての場合において、エスケープシーケンスの形式 `\c`、どこ `c` は任意の文字です。 `c`. つまり、次のシーケンスを使用できます `\'`と`\\`. この値は、 [文字列](../sql_reference/data_types/string.md) タイプ。 + +文字列リテラルでエスケープする必要がある文字の最小セット: `'` と `\`. 単一引quoteは、単一引quoteでエスケープすることができます。 `'It\'s'` と `'It''s'` 等しい。 + +### 化合物 {#compound} + +配列では構文がサポートされます: `[1, 2, 3]` とタプル: `(1, 'Hello, world!', 2)`.. +実際には、これらはリテラルではなく、配列作成演算子とタプル作成演算子を持つ式です。 +配列によって構成された少なくとも一つの項目には、タプル以上あることが必要です。石二鳥の優れものだ。 +タプルに使用のための特別な目的があります `IN` aの句 `SELECT` クエリ。 タプルはクエリの結果として取得できますが、データベースに保存することはできません(ただし、 [メモリ](../engines/table_engines/special/memory.md) テーブル)。 + +### NULL {#null-literal} + +値が欠落していることを示します。 + +を格納するために `NULL` テーブルフィールドでは、テーブルフィールド [Nullable](../sql_reference/data_types/nullable.md) タイプ。 + +データ形式(入力または出力)に応じて), `NULL` 異なる表現を持つことがあります。 詳細については、以下の文書を参照してください [データ形式](../interfaces/formats.md#formats). + +処理には多くの微妙な違いがあります `NULL`. たとえば、比較操作の引数のうちの少なくとも一つが `NULL` この操作の結果も次のようになります `NULL`. 乗算、加算、およびその他の演算についても同様です。 詳細については、各操作のドキュメントを参照してください。 + +クエリでは、以下を確認できます `NULL` を使用して [IS NULL](operators.md#operator-is-null) と [IS NOT NULL](operators.md) 演算子と関連する関数 `isNull` と `isNotNull`. + +## 機能 {#functions} + +関数は、括弧内の引数のリスト(おそらく空)を持つ識別子のように書かれています。 標準sqlとは対照的に、空の引数リストであっても括弧が必要です。 例えば: `now()`. +通常の関数と集計関数があります(セクションを参照 “Aggregate functions”). 一部の集計関数を含むことができ二つのリストの引数ットに固定して使用します。 例えば: `quantile (0.9) (x)`. これらの集計関数が呼び出される “parametric” 関数と最初のリストの引数が呼び出されます “parameters”. パラメータを指定しない集計関数の構文は、通常の関数と同じです。 + +## 演算子 {#operators} + +演算子は、優先度と結合性を考慮して、クエリの解析中に対応する関数に変換されます。 +たとえば、次の式 `1 + 2 * 3 + 4` に変換される。 `plus(plus(1, multiply(2, 3)), 4)`. + +## データ型とデータベ {#data_types-and-database-table-engines} + +のデータ型とテーブルエンジン `CREATE` クエリは、識別子または関数と同じ方法で記述されます。 言い換えれば、それらは括弧内に引数リストを含んでいてもいなくてもよい。 詳細については、以下を参照してください “Data types,” “Table engines,” と “CREATE”. + +## 式の別名 {#syntax-expression_aliases} + +別名は、クエリ内の式のユーザー定義名です。 + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` 使用しない句 `AS` キーワード。 + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. エイリアスはに従うべきです [識別子](#syntax-identifiers) 構文。 + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### 使用上の注意 {#notes-on-usage} + +エイリアスは、クエリまたはサブクエリのグローバルであり、任意の式のクエリの任意の部分にエイリアスを定義できます。 例えば, `SELECT (1 AS n) + 2, n`. + +エイリアスは、サブクエリやサブクエリ間では表示されません。 たとえば、クエリの実行中などです `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouseは例外を生成します `Unknown identifier: num`. + +結果列に別名が定義されている場合 `SELECT` サブクエリの句は、これらの列は、外側のクエリで表示されます。 例えば, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +列名またはテーブル名と同じ別名には注意してください。 次の例を考えてみましょう: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +この例では、テーブルを宣言しました `t` コラムを使って `b`. 次に、データを選択するときに、 `sum(b) AS b` エイリアス としてエイリアスは、グローバルClickHouse置換されているリテラル `b` 式の中で `argMax(a, b)` 式を使って `sum(b)`. この置換によって例外が発生しました。 + +## アスタリスク {#asterisk} + +で `SELECT` クエリー、アスタリスクで置き換え異なるアイコンで表示されます。 詳細については、以下を参照してください “SELECT”. + +## 式 {#syntax-expressions} + +式は、関数、識別子、リテラル、演算子の適用、角かっこ内の式、サブクエリ、またはアスタリスクです。 別名を含めることもできます。 +式のリストは、コンマで区切られた式です。 +関数と演算子は、次に、引数として式を持つことができます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/ja/sql_reference/table_functions/file.md b/docs/ja/sql_reference/table_functions/file.md deleted file mode 120000 index 64efa65e7c2..00000000000 --- a/docs/ja/sql_reference/table_functions/file.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/file.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/file.md b/docs/ja/sql_reference/table_functions/file.md new file mode 100644 index 00000000000..69b3f7199e7 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/file.md @@ -0,0 +1,121 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 37 +toc_title: "\u30D5\u30A1\u30A4\u30EB" +--- + +# ファイル {#file} + +ファイルからテーブルを作成します。 この表関数は次のようになります [url](url.md) と [hdfs](hdfs.md) もの。 + +``` sql +file(path, format, structure) +``` + +**入力パラメータ** + +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). 読み取り専用モードのglobsに続くファイルサポートのパス: `*`, `?`, `{abc,def}` と `{N..M}` どこに `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [書式](../../interfaces/formats.md#formats) ファイルの +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**戻り値** + +指定したファイルにデータを読み書きするための、指定した構造体を持つテーブル。 + +**例えば** + +設定 `user_files_path` そして、ファイルの内容 `test.csv`: + +``` bash +$ grep user_files_path /etc/clickhouse-server/config.xml + /var/lib/clickhouse/user_files/ + +$ cat /var/lib/clickhouse/user_files/test.csv + 1,2,3 + 3,2,1 + 78,43,45 +``` + +テーブルから`test.csv` そしてそれからの最初の二つの行の選択: + +``` sql +SELECT * +FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +``` sql +-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file +SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 +``` + +**パス内のグロブ** + +複数のパスコンポーネン 処理されるためには、ファイルが存在し、パスパターン全体(接尾辞や接頭辞だけでなく)に一致する必要があります。 + +- `*` — Substitutes any number of any characters except `/` 空の文字列を含む。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +構造との `{}` に類似していて下さい [遠隔テーブル機能](../../sql_reference/table_functions/remote.md)). + +**例えば** + +1. 次の相対パスを持つ複数のファイルがあるとします: + +- ‘some\_dir/some\_file\_1’ +- ‘some\_dir/some\_file\_2’ +- ‘some\_dir/some\_file\_3’ +- ‘another\_dir/some\_file\_1’ +- ‘another\_dir/some\_file\_2’ +- ‘another\_dir/some\_file\_3’ + +1. これらのファイルの行数を照会します: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. クエリの量の行のすべてのファイルのディレクトリ: + + + +``` sql +SELECT count(*) +FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "警告" + ファイルのリストに先行するゼロを持つ数値範囲が含まれている場合は、各桁のために中かっこで囲みます。 `?`. + +**例えば** + +クエリからのデータファイル名 `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## 仮想列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**また見なさい** + +- [仮想列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/ja/sql_reference/table_functions/generate.md b/docs/ja/sql_reference/table_functions/generate.md deleted file mode 120000 index fee5c28f0b9..00000000000 --- a/docs/ja/sql_reference/table_functions/generate.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/generate.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/generate.md b/docs/ja/sql_reference/table_functions/generate.md new file mode 100644 index 00000000000..684301061d3 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/generate.md @@ -0,0 +1,45 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 47 +toc_title: generateRandom +--- + +# generateRandom {#generaterandom} + +をランダムなデータを指定されたschema. +テストテーブルにデータを設定できます。 +テーブルに格納できるすべてのデータ型をサポートします `LowCardinality` と `AggregateFunction`. + +``` sql +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +``` + +**パラメータ** + +- `name` — Name of corresponding column. +- `TypeName` — Type of corresponding column. +- `limit` — Number of rows to generate. +- `max_array_length` — Maximum array length for all generated arrays. Defaults to `10`. +- `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. +- `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. + +**戻り値** + +テーブルオブジェクトご希望のスキーマ. + +## 使用例 {#usage-example} + +``` sql +SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); +``` + +``` text +┌─a────────┬────────────d─┬─c──────────────────────────────────────────────────────────────────┐ +│ [77] │ -124167.6723 │ ('2061-04-17 21:59:44.573','3f72f405-ec3e-13c8-44ca-66ef335f7835') │ +│ [32,110] │ -141397.7312 │ ('1979-02-09 03:43:48.526','982486d1-5a5d-a308-e525-7bd8b80ffa73') │ +│ [68] │ -67417.0770 │ ('2080-03-12 14:17:31.269','110425e5-413f-10a6-05ba-fa6b3e929f15') │ +└──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/ja/sql_reference/table_functions/hdfs.md b/docs/ja/sql_reference/table_functions/hdfs.md deleted file mode 120000 index 02b734d517c..00000000000 --- a/docs/ja/sql_reference/table_functions/hdfs.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/hdfs.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/hdfs.md b/docs/ja/sql_reference/table_functions/hdfs.md new file mode 100644 index 00000000000..17726208584 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/hdfs.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 45 +toc_title: hdfs +--- + +# hdfs {#hdfs} + +HDFSのファイルからテーブルを作成します。 この表関数は次のようになります [url](url.md) と [ファイル](file.md) もの。 + +``` sql +hdfs(URI, format, structure) +``` + +**入力パラメータ** + +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` と `{N..M}` どこに `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [書式](../../interfaces/formats.md#formats) ファイルの +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. + +**戻り値** + +指定したファイルにデータを読み書きするための、指定した構造体を持つテーブル。 + +**例えば** + +テーブルから `hdfs://hdfs1:9000/test` そしてそれからの最初の二つの行の選択: + +``` sql +SELECT * +FROM hdfs('hdfs://hdfs1:9000/test', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**パス内のグロブ** + +複数のパスコンポーネン 処理されるためには、ファイルが存在し、パスパターン全体(接尾辞や接頭辞だけでなく)に一致する必要があります。 + +- `*` — Substitutes any number of any characters except `/` 空の文字列を含む。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +構造との `{}` に類似していて下さい [遠隔テーブル機能](../../sql_reference/table_functions/remote.md)). + +**例えば** + +1. HDFS上に次のUriを持ついくつかのファイルがあるとします: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. これらのファイルの行数を照会します: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') +``` + +1. これら二つのディレ: + + + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') +``` + +!!! warning "警告" + ファイルのリストに先行するゼロを持つ数値範囲が含まれている場合は、各桁のために中かっこで囲みます。 `?`. + +**例えば** + +クエリからのデータファイル名 `file000`, `file001`, … , `file999`: + +``` sql +SELECT count(*) +FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') +``` + +## 仮想列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**また見なさい** + +- [仮想列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/ja/sql_reference/table_functions/index.md b/docs/ja/sql_reference/table_functions/index.md deleted file mode 120000 index ebd79cdfd24..00000000000 --- a/docs/ja/sql_reference/table_functions/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/index.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/index.md b/docs/ja/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..508317ffa90 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Table Functions +toc_priority: 34 +toc_title: "\u5C0E\u5165" +--- + +# テーブル関数 {#table-functions} + +テーブル機能の方法を構築します。 + +テーブル関数は次の場所で使用できます: + +- [FROM](../statements/select.md#select-from) の句 `SELECT` クエリ。 + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [テーブルを\として作成](../statements/create.md#create-table-query) クエリ。 + + It's one of the methods of creating a table. + +!!! warning "警告" + テーブル関数を使用することはできません。 [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) 設定は無効です。 + +| 機能 | 説明 | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------| +| [ファイル](file.md) | を作成します。 [ファイル](../../engines/table_engines/special/file.md)-エンジンのテーブル。 | +| [マージ](merge.md) | を作成します。 [マージ](../../engines/table_engines/special/merge.md)-エンジンのテーブル。 | +| [数字](numbers.md) | 単一の列が整数で埋められたテーブルを作成します。 | +| [リモート](remote.md) | へ自由にアクセスできるリモートサーバーを作成することなく [分散](../../engines/table_engines/special/distributed.md)-エンジンのテーブル。 | +| [url](url.md) | を作成します。 [Url](../../engines/table_engines/special/url.md)-エンジンのテーブル。 | +| [mysql](mysql.md) | を作成します。 [MySQL](../../engines/table_engines/integrations/mysql.md)-エンジンのテーブル。 | +| [jdbc](jdbc.md) | を作成します。 [JDBC](../../engines/table_engines/integrations/jdbc.md)-エンジンのテーブル。 | +| [odbc](odbc.md) | を作成します。 [ODBC](../../engines/table_engines/integrations/odbc.md)-エンジンのテーブル。 | +| [hdfs](hdfs.md) | を作成します。 [HDFS](../../engines/table_engines/integrations/hdfs.md)-エンジンのテーブル。 | + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/ja/sql_reference/table_functions/input.md b/docs/ja/sql_reference/table_functions/input.md deleted file mode 120000 index c528e3325af..00000000000 --- a/docs/ja/sql_reference/table_functions/input.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/input.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/input.md b/docs/ja/sql_reference/table_functions/input.md new file mode 100644 index 00000000000..1fdd93c10a6 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/input.md @@ -0,0 +1,47 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 46 +toc_title: "\u5165\u529B" +--- + +# 入力 {#input} + +`input(structure)` -テーブル関数に送信されたデータを効果的に変換して挿入できるようにする +別の構造を持つテーブルに与えられた構造を持つサーバー。 + +`structure` -次の形式でサーバーに送信されるデータの構造 `'column1_name column1_type, column2_name column2_type, ...'`. +例えば, `'id UInt32, name String'`. + +この関数は、以下でのみ使用できます `INSERT SELECT` query and only once but otherwiseは通常のテーブル関数のように動作します +(たとえば、サブクエリなどで使用することができます。). + +データを送ることができ、そのような通常の `INSERT` 利用できる照会および渡される [書式](../../interfaces/formats.md#formats) +これは、クエリの最後に指定する必要があります(通常とは異なり `INSERT SELECT`). + +この機能はサーバからデータを受け取clientで同時に変換して +式のリストによると、 `SELECT` ターゲットテーブルに句と挿入します。 一時テーブル +すべての転送されたデータは作成されません。 + +**例** + +- させる `test` テーブルには以下の構造 `(a String, b String)` + とデータで `data.csv` 異なる構造を持っています `(col1 String, col2 Date, col3 Int32)`. 挿入のクエリ + からのデータ `data.csv` に `test` 同時変換のテーブルは次のようになります: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- もし `data.csv` 同じ構造のデータを含む `test_structure` としてのテーブル `test` そしてこれら二つのクエリが等しい: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/ja/sql_reference/table_functions/jdbc.md b/docs/ja/sql_reference/table_functions/jdbc.md deleted file mode 120000 index 10c687ebcdd..00000000000 --- a/docs/ja/sql_reference/table_functions/jdbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/jdbc.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/jdbc.md b/docs/ja/sql_reference/table_functions/jdbc.md new file mode 100644 index 00000000000..dd32bef2989 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/jdbc.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 43 +toc_title: jdbc +--- + +# jdbc {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` -JDBCドライバ経由で接続されたテーブルを返します。 + +このテーブル関数は、個別の `clickhouse-jdbc-bridge` 実行するプログラム。 +でnullable種類に基づくddlのリモートテーブルが照会される). + +**例** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/ja/sql_reference/table_functions/merge.md b/docs/ja/sql_reference/table_functions/merge.md deleted file mode 120000 index 4101f14ac54..00000000000 --- a/docs/ja/sql_reference/table_functions/merge.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/merge.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/merge.md b/docs/ja/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..7e20c9526d7 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 38 +toc_title: "\u30DE\u30FC\u30B8" +--- + +# マージ {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +テーブル構造は、正規表現に一致する最初のテーブルから取得されます。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/ja/sql_reference/table_functions/mysql.md b/docs/ja/sql_reference/table_functions/mysql.md deleted file mode 120000 index 6208480e126..00000000000 --- a/docs/ja/sql_reference/table_functions/mysql.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/mysql.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/mysql.md b/docs/ja/sql_reference/table_functions/mysql.md new file mode 100644 index 00000000000..2b64b9a1a17 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/mysql.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 42 +toc_title: mysql +--- + +# mysql {#mysql} + +許可 `SELECT` リモートMySQLサーバーに格納されているデータに対して実行されるクエリ。 + +``` sql +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +``` + +**パラメータ** + +- `host:port` — MySQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — MySQL user. + +- `password` — User password. + +- `replace_query` — Flag that converts `INSERT INTO` へのクエリ `REPLACE INTO`. もし `replace_query=1`、クエリが置き換えられます。 + +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` に追加される式 `INSERT` クエリ。 + + Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. + + To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. + +シンプル `WHERE` 次のような句 `=, !=, >, >=, <, <=` 現在、MySQLサーバで実行されています。 + +残りの条件と `LIMIT` サンプリング制約は、MySQLへのクエリが終了した後にのみClickHouseで実行されます。 + +**戻り値** + +元のmysqlテーブルと同じカラムを持つテーブルオブジェクト。 + +## 使用例 {#usage-example} + +MySQLのテーブル: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouseからのデータの選択: + +``` sql +SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## また見なさい {#see-also} + +- [その ‘MySQL’ 表エンジン](../../engines/table_engines/integrations/mysql.md) +- [MySQLを外部辞書のソースとして使用する](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/ja/sql_reference/table_functions/numbers.md b/docs/ja/sql_reference/table_functions/numbers.md deleted file mode 120000 index 7ccae937e91..00000000000 --- a/docs/ja/sql_reference/table_functions/numbers.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/numbers.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/numbers.md b/docs/ja/sql_reference/table_functions/numbers.md new file mode 100644 index 00000000000..668e3eee1c8 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/numbers.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 39 +toc_title: "\u6570\u5B57" +--- + +# 数字 {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ 0からN-1までの整数を含む列(UInt64)。 +`numbers(N, M)` -単一のテーブルを返す ‘number’ nから(N+M-1)までの整数を含む列(UInt64)。 + +に似て `system.numbers` テーブルに使用でき試験および発生連続値, `numbers(N, M)` より有効より `system.numbers`. + +次のクエリは同等です: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +例: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/ja/sql_reference/table_functions/odbc.md b/docs/ja/sql_reference/table_functions/odbc.md deleted file mode 120000 index af2edcdc7a3..00000000000 --- a/docs/ja/sql_reference/table_functions/odbc.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/odbc.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/odbc.md b/docs/ja/sql_reference/table_functions/odbc.md new file mode 100644 index 00000000000..be43586454c --- /dev/null +++ b/docs/ja/sql_reference/table_functions/odbc.md @@ -0,0 +1,108 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 44 +toc_title: odbc +--- + +# odbc {#table-functions-odbc} + +接続されたテーブルを返します。 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +パラメータ: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` ファイル。 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +ODBC接続を安全に実装するには、ClickHouseは別のプログラムを使用します `clickhouse-odbc-bridge`. ODBCドライバーが直接読み込まれている場合 `clickhouse-server` ドライバの問題でクラッシュのClickHouseサーバーです。 クリックハウスが自動的に起動 `clickhouse-odbc-bridge` それが必要なとき。 ODBCブリッジプログラムは、次のパッケージと同じパッケー `clickhouse-server`. + +のフィールド `NULL` 外部テーブルの値は、基本データ型のデフォルト値に変換されます。 例えば、リモートMySQLテーブル分野の `INT NULL` タイプは0に変換されます(ClickHouseのデフォルト値 `Int32` データ型)。 + +## 使用例 {#usage-example} + +**PpsはインタラクティブのMySQLのインストール目盛** + +この例は、ubuntu linux18.04およびmysql server5.7で確認されています。 + +UnixODBCとMySQL Connectorがインストールされていることを確認します。 + +デフォルトでインストールされた場合、パッケージから),clickhouse開始してユーザー `clickhouse`. したがって、MySQLサーバでこのユーザを作成して設定する必要があります。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +次に、接続を設定します `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +チェックでき、接続を使用 `isql` unixODBCインストールからのユーティリティ。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQLのテーブル: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouseのMySQLテーブルからデータを取得する: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## また見なさい {#see-also} + +- [ODBC外部辞書](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBCテーブルエンジン](../../engines/table_engines/integrations/odbc.md). + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/ja/sql_reference/table_functions/remote.md b/docs/ja/sql_reference/table_functions/remote.md deleted file mode 120000 index 2846ac2c309..00000000000 --- a/docs/ja/sql_reference/table_functions/remote.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/remote.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/remote.md b/docs/ja/sql_reference/table_functions/remote.md new file mode 100644 index 00000000000..82ccc31d80a --- /dev/null +++ b/docs/ja/sql_reference/table_functions/remote.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 40 +toc_title: "\u30EA\u30E2\u30FC\u30C8" +--- + +# リモート,remoteSecure {#remote-remotesecure} + +へ自由にアクセスできるリモートサーバーを作成することなく `Distributed` テーブル。 + +署名: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`、またはちょうど `host`. ホストは、サーバー名またはIPv4またはIPv6アドレスとして指定できます。 IPv6アドレスは角かっこで指定します。 ポートは、リモートサーバー上のTCPポートです。 ポートが省略された場合は、以下を使用します `tcp_port` サーバの設定ファイルから(デフォルトでは9000) + +!!! important "重要" + このポートはipv6アドレスに必要です。 + +例: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +複数のアドレスをコンマ区切りにできます。 この場合、clickhouseは分散処理を使用するため、指定されたすべてのアドレス(異なるデータを持つシャードなど)にクエリを送信します。 + +例えば: + +``` text +example01-01-1,example01-02-1 +``` + +式の一部は、中括弧で指定できます。 前の例は次のように書くことができます: + +``` text +example01-0{1,2}-1 +``` + +中括弧は、二つのドット(負でない整数)で区切られた数の範囲を含めることができます。 この場合、範囲はシャードアドレスを生成する値のセットに拡張されます。 最初の数値がゼロから始まる場合、値は同じゼロ整列で形成されます。 前の例は次のように書くことができます: + +``` text +example01-{01..02}-1 +``` + +中括弧の複数のペアがある場合は、対応するセットの直接積が生成されます。 + +中括弧の中のアドレスとアドレスの一部は、パイプ記号(\|)で区切ることができます。 この場合、対応するアドレスのセットはレプリカとして解釈され、クエリは最初の正常なレプリカに送信されます。 ただし、レプリカは、現在設定されている順序で反復されます。 [load\_balancing](../../operations/settings/settings.md) 設定。 + +例えば: + +``` text +example01-{01..02}-{1|2} +``` + +この例では、指定さつする資料をそれぞれ二つのレプリカ. + +生成されるアドレスの数は定数によって制限されます。 今これは1000アドレスです。 + +を使用して `remote` テーブル関数が作成するよりも最適ではない `Distributed` この場合、サーバー接続はすべての要求に対して再確立されるためです。 さらに、ホスト名が設定されている場合、名前は解決され、さまざまなレプリカを操作するときにエラーはカウントされません。 多数のクエリを処理する場合は、常に `Distributed` テーブルは時間に先んじて、使用しないし `remote` テーブル機能。 + +その `remote` テーブル関数は、次の場合に便利です: + +- アクセスの特定のサーバーのためのデータとの比較、デバッグ、テスト実施をしておりました。 +- 研究目的のための様々なclickhouseクラスタ間のクエリ。 +- 手動で行われる頻度の低い分散要求。 +- サーバーのセットが毎回再定義される分散要求。 + +ユーザーが指定されていない場合, `default` 使用される。 +パスワードを指定しない場合は、空のパスワードが使用されます。 + +`remoteSecure` -と同じ `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 設定または9440から。 + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/ja/sql_reference/table_functions/url.md b/docs/ja/sql_reference/table_functions/url.md deleted file mode 120000 index e336a603800..00000000000 --- a/docs/ja/sql_reference/table_functions/url.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/sql_reference/table_functions/url.md \ No newline at end of file diff --git a/docs/ja/sql_reference/table_functions/url.md b/docs/ja/sql_reference/table_functions/url.md new file mode 100644 index 00000000000..d46c567d402 --- /dev/null +++ b/docs/ja/sql_reference/table_functions/url.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 41 +toc_title: url +--- + +# url {#url} + +`url(URL, format, structure)` -から作成されたテーブルを返します `URL` 与えられると +`format` と `structure`. + +URL-受け入れることができるHTTPまたはHTTPSサーバアドレス `GET` および/または `POST` 要求。 + +書式 - [書式](../../interfaces/formats.md#formats) データの。 + +構造-テーブルの構造 `'UserID UInt64, Name String'` フォーマット。 列の名前と型を決定します。 + +**例えば** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[元の記事](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/ja/whats_new/changelog/2017.md b/docs/ja/whats_new/changelog/2017.md deleted file mode 120000 index a098eddf1d8..00000000000 --- a/docs/ja/whats_new/changelog/2017.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats_new/changelog/2017.md \ No newline at end of file diff --git a/docs/ja/whats_new/changelog/2017.md b/docs/ja/whats_new/changelog/2017.md new file mode 100644 index 00000000000..ba53fbcf9d3 --- /dev/null +++ b/docs/ja/whats_new/changelog/2017.md @@ -0,0 +1,268 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 79 +toc_title: '2017' +--- + +### ClickHouseリリース1.1.54327、2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +このリリ: + +- 固定バグでレースの条件で接続文字列の構文は以下のようにこのデータが失われます。 この問題はバージョン1.1.54310および1.1.54318に影響します。 ご利用の場合これらのバージョンとの複製のテーブルを更新することが強く推奨されます。 この問題にログ警告メッセージのように `Part ... from own log doesn't exist.` この問題は、これらのメッセージがログに表示されない場合でも関連します。 + +### ClickHouseリリース1.1.54318、2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} + +このリリ: + +- SummingMergeTreeエンジンのマージ中に誤った行の削除が行われていた問題を修正しました +- 複雑でないmergetreeエンジンのメモリリークを修正しました +- MergeTreeエンジンで頻繁に挿入されるパフォーマンスの低下を修正 +- レプリケーションキューの実行を停止する問題を修正しました +- サーバーログの固定回転とアーカイブ + +### ClickHouseリリース1.1.54310、2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### 新しい機能: {#new-features} + +- カスタムパーティショニングキーのmergetree家族のテーブルエンジンです。 +- [カフカname](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) テーブルエンジン。 +- 読み込みのサポートを追加 [CatBoost](https://catboost.yandex/) モデルとそれらをClickHouseに格納されたデータに適用する。 +- UTCから非整数オフセットのタイムゾーンのサポートが追加されました。 +- 時間間隔による算術演算のサポートが追加されました。 +- Date型とDateTime型の値の範囲は、2105年に拡張されます。 +- を追加しました `CREATE MATERIALIZED VIEW x TO y` query(マテリアライズドビューのデータを格納するための既存のテーブルを指定します)。 +- を追加しました `ATTACH TABLE` 引数なしのクエリ。 +- SummingMergeTreeテーブルのマップで終わる名前を持つ入れ子になった列の処理ロジックが、sumMap集計関数に抽出されました。 これで、このような列を明示的に指定できます。 +- IP trie辞書の最大サイズは128Mのエントリに増加します。 +- GetSizeOfEnumType関数を追加しました。 +- SumWithOverflow集計関数を追加しました。 +- Cap'n Proto入力形式のサポートが追加されました。 +- では、今までのカスタマイズの圧縮レベル使用時のzstdアルゴリズムです。 + +#### 下位互換性のない変更: {#backward-incompatible-changes} + +- メモリ以外のエンジンを使用して一時テーブルを作成することはできません。 +- ビューまたはmaterializedviewエンジンでテーブルを明示的に作成することはできません。 +- 中表を作成し、新しいチェックを確認し、サンプリングキー表現が含まれ、その有効なタイプを利用します。 + +#### バグ修正: {#bug-fixes} + +- 分散テーブルに同期的に挿入するときのハングアップを修正しました。 +- レプリケートされたテーブル内の部品の追加と削除を修正しました。 +- マテリアライズドビューに挿入されたデータは、不要な重複排除を受けません。 +- ローカルレプリカが遅延し、リモートレプリカが使用できない分散テーブルへのクエリを実行しても、エラーは発生しません。 +- ユーザーにはアクセス許可は必要ありません。 `default` 一時テーブルを作成するデータベース。 +- 固定チを指定する場合は、配列型な論争することができます。 +- 固定hangups時のディスク容量を含むサーバのログします。 +- Unixエポックの最初の週のtoRelativeWeekNum関数のオーバーフローを修正しました。 + +#### ビルドの改善: {#build-improvements} + +- いくつかのサードパーティのライブラリ(特にpoco)が更新され、git submodulesに変換されました。 + +### ClickHouseリリース1.1.54304,2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### 新しい機能: {#new-features-1} + +- ネイティブプロトコルでのtlsサポート(有効にするには、 `tcp_ssl_port` で `config.xml` ). + +#### バグ修正: {#bug-fixes-1} + +- `ALTER` のための複製のテーブル現しようと走り出しています。 +- 固定波データを読み込むときに設定 `preferred_block_size_bytes=0.` +- の固定クラッシュ `clickhouse-client` 押すとき `Page Down` +- 特定の複雑なクエリの正しい解釈 `GLOBAL IN` と `UNION ALL` +- `FREEZE PARTITION` 常に今原子的に動作します。 +- 空のpost要求は、コード411で応答を返すようになりました。 +- のような式のための固定解釈エラー `CAST(1 AS Nullable(UInt8)).` +- 読み込み時のエラーを修正 `Array(Nullable(String))` からの列 `MergeTree` テーブル。 +- 固定表の構文解析時にクエリのように `SELECT dummy AS dummy, dummy AS b` +- ユーザーは無効で正しく更新されます `users.xml` +- 実行可能ディクショナリがゼロ以外の応答コードを返す場合の正しい処理。 + +### ClickHouseリリース1.1.54292、2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### 新しい機能: {#new-features-2} + +- を追加しました `pointInPolygon` 座標平面上の座標を操作するための関数。 +- を追加しました `sumMap` 次のような配列の合計を計算するための集約関数 `SummingMergeTree`. +- を追加しました `trunc` 機能。 丸め関数のパフォーマンスの向上 (`round`, `floor`, `ceil`, `roundToExp2` を正の論理をどのように。 のロジックを変更 `roundToExp2` 分数と負の数の関数。 +- ClickHouseの実行可能ファイルはlibcのバージョンにあまり依存しません。 同じClickHouseの実行可能ファイルは、Linuxシステムの多種多様で実行することができます。 コンパイルされたクエリを使用するときにはまだ依存関係があります(設定 `compile = 1` デフォルトでは使用されません)。 +- クエリの動的コンパイルに要する時間を短縮しました。 + +#### バグ修正: {#bug-fixes-2} + +- 時々生成エラーを修正しました `part ... intersects previous part` メッセージとレプリカの一貫性の低下。 +- 固定エラーの原因となったサーバをロックした場合飼育係できなかった時のシャットダウンしました。 +- レプリカの復元時に過剰なログを削除。 +- ユニオンすべての実装でエラーを修正しました。 +- ブロック内の最初の列に配列型がある場合に発生したconcat関数のエラーを修正しました。 +- の進捗を正しく表示されます。テーブルをマージする。 + +### ClickHouseリリース1.1.54289、2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### 新しい機能: {#new-features-3} + +- `SYSTEM` サーバ管理のクエリ: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- 配列を操作するための関数を追加: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- 追加 `root` と `identity` ZooKeeper設定のパラメーターです。 これにより、同じZooKeeperクラスター上の個々のユーザーを分離できます。 +- 集計関数の追加 `groupBitAnd`, `groupBitOr`、と `groupBitXor` (互換性のために、それらは名前の下にも利用可能です `BIT_AND`, `BIT_OR`、と `BIT_XOR`). +- 外部の辞書からロード可能でmysqlを指定するソケットのファイルシステム. +- 外部辞書はmysqlからssl経由でロードできます (`ssl_cert`, `ssl_key`, `ssl_ca` パラメータ)。 +- を追加しました `max_network_bandwidth_for_user` ユーザーごとのクエリの全体的な帯域幅の使用を制限する設定。 +- のサポート `DROP TABLE` 一時テーブルの場合。 +- 読書のためのサポート `DateTime` Unixのタイムスタンプ形式の値 `CSV` と `JSONEachRow` フォーマット。 +- 分散クエリの遅延レプリカは、既定で除外されるようになりました(既定のしきい値は5分)。 +- 継続的に実行されるクエリでは、alterクエリが無期限にブロックされることはありません。 +- 設定するオプション `umask` 設定ファイルで。 +- クエリのパフォーマンスの向上 `DISTINCT` . + +#### バグ修正: {#bug-fixes-3} + +- ZooKeeperの古いノードを削除するプロセスを改善しました。 以前は、非常に頻繁な挿入があった場合、古いノードが削除されなかったことがあり、サーバーのシャットダウンが遅くなっていました。 +- ZooKeeperへの接続のためのホストを選択する際に固定ランダム化。 +- レプリカがlocalhostの場合、分散クエリでの遅延レプリカの除外を修正しました。 +- Aのデータ部分エラーを修正しました `ReplicatedMergeTree` テーブ `ALTER MODIFY` aの要素に `Nested` 構造。 +- 選択クエリの原因となるエラーを修正しました “hang”. +- 分散ddlクエリの改善。 +- クエリを修正 `CREATE TABLE ... AS `. +- でデッドロックを解決しました `ALTER ... CLEAR COLUMN IN PARTITION` のためのクエリ `Buffer` テーブル。 +- の無効なデフォルト値を修正しました `Enum` s(最小値の代わりに0)を使用する場合、 `JSONEachRow` と `TSKV` フォーマット。 +- 解決の姿ゾンビプロセスが辞書を使いながら、 `executable` ソース。 +- ヘッドクエリのための固定segfault。 + +#### 改善ワークフローの開発と組み立てclickhouse: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- を使用することができ `pbuilder` ClickHouseを構築する。 +- を使用することができ `libc++` 代わりに `libstdc++` Linux上のビルドの場合。 +- 静的コード分析ツールを使用するための手順を追加: `Coverage`, `clang-tidy`, `cppcheck`. + +#### ご注意ください: {#please-note-when-upgrading} + +- これで、mergetree設定のデフォルト値が大きくなりました `max_bytes_to_merge_at_max_space_in_pool` (マージするデータ部分の最大合計サイズ(バイト単位)):100GiBから150GiBに増加しました。 この結果、大型の合併-走行後のサーバグの原因になりの増加に負荷をディスクサブシステムです。 サーバー上で利用可能な空き領域が、実行中のマージの合計量の倍未満である場合、小さなデータ部分のマージを含め、他のすべてのマージが実行を停止します。 その結果、INSERTクエリはメッセージで失敗します “Merges are processing significantly slower than inserts.” を使用 `SELECT * FROM system.merges` 状況を監視するためのクエリ。 また点検できます `DiskSpaceReservedForMerge` のメトリック `system.metrics` テーブル、またはグラファイト。 大きなマージが完了すると問題は解決するので、これを修正するために何もする必要はありません。 これが受け入れられない場合は、以前の値を復元することができます `max_bytes_to_merge_at_max_space_in_pool` 設定。 これを行うには、 設定のセクション。xml、セット ``` ``107374182400 ``` サーバーを再起動します。 + +### ClickHouseリリース1.1.54284,2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} + +- これは以前の1.1.54282リリースのバグ修正リリースです。 それはzookeeperの部品ディレクトリに漏れを修正します。 + +### ClickHouseリリース1.1.54282、2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} + +このリリ: + +- 固定 `DB::Exception: Assertion violation: !_path.empty()` 分散テーブルに挿入するとき。 +- 入力データが';'で始まる場合、rowbinary形式で挿入時の解析を修正しました。 +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### Clickhouseリリース1.1.54276,2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} + +#### 新しい機能: {#new-features-4} + +- 選択クエリのセクションでオプションを追加しました。 クエリ例: `WITH 1+1 AS a SELECT a, a*a` +- すべてのデータがすべてのシャードに保存された後にのみ、okが返されます。 これは、insert\_distributed\_sync=1の設定によって有効になります。 +- 16バイトの識別子を扱うためのuuidデータ型を追加しました。 +- タブローとの互換性のためにchar、floatおよび他のタイプのエイリアスを追加しました。 +- 時間を数値に変換する関数toyyyyymm、toyyyymmdd、およびtoyyyymmddhhmmssを追加しました。 +- IPアドレス(ホスト名と共に)を使用して、クラスタ化されたDDLクエリのサーバーを識別できます。 +- 関数内の非定数引数と負のオフセットのサポートが追加されました `substring(str, pos, len).` +- のmax\_sizeパラメーターを追加しました。 `groupArray(max_size)(column)` 集約関数、およびその性能を最適化しました。 + +#### 主な変更点: {#main-changes} + +- セキュリティの改善:すべてのサーバーファイルは0640権限で作成されます(変更できます 設定パラメータ)。 +- 無効な構文のクエリの改善されたエラーメッセージ。 +- 低メモリ消費をとした場合の性能改善の統合大分mergetreeデータです。 +- ReplacingMergeTreeエンジンのデータマージのパフォーマンスが大幅に向上しました。 +- 性能向上のための非同期に挿入しますから分散型のテーブルを組み合わせで複数のソースしました。 この機能を有効にするには、distributed\_directory\_monitor\_batch\_inserts=1という設定を使用します。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-1} + +- の集約状態のバイナリ形式を変更しました `groupArray(array_column)` 配列の関数。 + +#### 変更の完全なリスト: {#complete-list-of-changes} + +- を追加しました `output_format_json_quote_denormals` これにより、nanとinfの値をJSON形式で出力することができます。 +- 最適化したストリーム配置の場合を読み込んで配布します。 +- 値が変更されない場合は、readonlyモードで設定を行うことができます。 +- Preferred\_block\_size\_bytes設定で指定されたブロックサイズの制限を満たすために、MergeTreeエンジンの非整数granules粒を取得する機能を追加しました。 のための消費量を削減RAMを増やキャッシュの地域が処理クエリーテーブルの大きい。 +- 次のような式を含むインデックスの効率的な使用 `toStartOfHour(x)` のような条件の場合 `toStartOfHour(x) op сonstexpr.` +- MergeTreeエンジンの新しい設定(configのmerge\_treeセクション)を追加しました。xml): + - replicated\_deduplication\_window\_secondsは、レプリケートされたテーブルの挿入の重複除外に使用できる秒数を設定します。 + - cleanup\_delay\_periodは、古いデータを削除するためにクリーンアップを開始する頻度を設定します。 + - replicated\_can\_become\_leaderでは、レプリカがリーダーにならないようにすることができます(マージの割り当て)。 +- 加速の清掃除時代遅れからのデータの飼育係. +- クラスタ化されたddlクエリの複数の改善と修正。 特に重要なのは、クラスタ内のサーバーからの応答を待つ時間を制限する新しい設定distributed\_ddl\_task\_timeoutです。 ddl要求がすべてのホストで実行されていない場合、応答にタイムアウトエラーが含まれ、要求は非同期モードで実行されます。 +- サーバーログにおけるスタックトレースの表示の改善。 +- を追加しました “none” 圧縮方法の値。 +- Configで複数のdictionaries\_configセクションを使用できます。xmlだ +- ファイルシステムのソケットを介してmysqlに接続することは可能です。 +- システム。部品表は、新しい列に関する情報はサイズメッセージが表示され、バイトです。 + +#### バグ修正: {#bug-fixes-4} + +- マージテーブルを使用する分散テーブルは、現在の条件と選択クエリのために正しく動作します `_table` フィールド。 +- ReplicatedMergeTreeでデータパーツをチェックする際の競合状態を修正しました。 +- 上の固定可能な凍結 “leader election” サーバーを起動するとき。 +- データソースのローカルレプリカを使用する場合、max\_replica\_delay\_for\_distributed\_queries設定は無視されました。 これは修正されました。 +- の誤った動作を修正しました `ALTER TABLE CLEAR COLUMN IN PARTITION` 既存の列以外の列をクリーンアップしようとした場合。 +- 空の配列または文字列を使用する場合、multif関数の例外を修正しました。 +- 固定の過剰なメモリ割当ての場合deserializingネイティブ形式です。 +- トライ辞書の不正な自動更新を修正しました。 +- 固定の例外実行時にクエリを処理するクラウドの場合、group by節からmergeテーブル使用時のサンプルです。 +- Distributed\_aggregation\_memory\_efficient=1を使用したときにGROUP BYがクラッシュする問題を修正しました。 +- これで、データベースを指定できます。inとjoinの右側にあるテーブル。 +- 並列集約に使用されるスレッドが多すぎます。 これは修正されました。 +- どのように固定 “if” 関数は、FixedString引数で動作します。 +- 重量が0のシャードの分散テーブルから誤って処理されたものを選択します。 これは修正されました。 +- 実行中 `CREATE VIEW IF EXISTS no longer causes crashes.` +- Input\_format\_skip\_unknown\_fields=1が設定され、負の数がある場合の動作が正しくない問題を修正しました。 +- の無限ループを修正しました `dictGetHierarchy()` 辞書に無効なデータがある場合は機能します。 +- 固定 `Syntax error: unexpected (...)` INまたはJOIN句およびMergeテーブル内のサブクエリを使用して分散クエリを実行するときのエラー。 +- 辞書テーブルからの選択クエリの誤った解釈を修正しました。 +- 修正された “Cannot mremap” 2億以上の要素を持つinおよびJOIN句で配列を使用するときにエラーが発生します。 +- ソースとしてmysqlと辞書のフェイルオーバーを修正しました。 + +#### 改善ワークフローの開発と組み立てclickhouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- ビルドはarcadiaで組み立てることができます。 +- Gcc7を使用してClickHouseをコンパイルできます。 +- Ccache+distccを使用した並列ビルドの方が高速になりました。 + +### ClickHouseリリース1.1.54245,2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} + +#### 新しい機能: {#new-features-5} + +- 分散ddl(例, `CREATE TABLE ON CLUSTER`) +- 複製されたクエリ `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- エンジンのための辞書のテーブル(アクセスの辞書データの形式で表)。 +- 辞書データベースエンジン(タイプのデータベースに自動的に辞書で使用可能なテーブルを接続外部辞書). +- ソースに要求を送信することによって、辞書の更新を確認できます。 +- 修飾された列名 +- 二重引用符を使用して識別子を引用する。 +- セッションを行うhttpのインタフェース。 +- レプリケートされたテーブルの最適化クエリは、リーダーだけでなく実行できます。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-2} + +- 削除セットグローバル。 + +#### 軽微な変更: {#minor-changes} + +- アラートがトリガーされると、ログは完全なスタックトレースを出力します。 +- 起動時に破損/余分なデータパーツの数の検証を緩和しました(誤検出が多すぎます)。 + +#### バグ修正: {#bug-fixes-5} + +- 接続不良を修正しました “sticking” 分散テーブルに挿入するとき。 +- GLOBAL INは、分散テーブルを参照するMergeテーブルからのクエリに対して機能します。 +- Google Compute Engine仮想マシンでコアの数が正しくないことが検出されました。 これは修正されました。 +- キャッシュされた外部ディクショナリの実行可能ソースの動作の変更 +- ヌル文字を含む文字列の比較を修正しました。 +- 定数を持つfloat32主キーフィールドの比較を修正しました。 +- 従来、不正確な見積りのサイズの分野が過度に大きな分配すべき。 +- ALTERを使用してテーブルに追加されたNull許容列を照会するとクラッシュする問題を修正しました。 +- 行の数が制限よりも小さい場合、null可能な列でソートするとクラッシュする問題を修正しました。 +- 定数値のみで構成されるorder byサブクエリを修正しました。 +- については、従来、複製テーブルが残る無効な状態にした後、失敗した下表に示す。 +- 空の結果を持つスカラーサブクエリのエイリアスは失われなくなりました。 +- .soファイルが破損した場合、コンパイルを使用したクエリはエラーで失敗しません。 diff --git a/docs/ja/whats_new/changelog/2018.md b/docs/ja/whats_new/changelog/2018.md deleted file mode 120000 index 124fb19e175..00000000000 --- a/docs/ja/whats_new/changelog/2018.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats_new/changelog/2018.md \ No newline at end of file diff --git a/docs/ja/whats_new/changelog/2018.md b/docs/ja/whats_new/changelog/2018.md new file mode 100644 index 00000000000..be61253c022 --- /dev/null +++ b/docs/ja/whats_new/changelog/2018.md @@ -0,0 +1,1063 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 78 +toc_title: '2018' +--- + +## ClickHouseリリース18.16 {#clickhouse-release-18-16} + +### ClickHouseリリース18.16.1、2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} + +#### バグ修正: {#bug-fixes} + +- ODBCソースで辞書を更新する際の問題を引き起こしたエラーを修正しました。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- 集計関数のjitコンパイルは、lowcardinality列で動作するようになりました。 [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### 改善: {#improvements} + +- を追加しました `low_cardinality_allow_in_native_format` 設定(デフォルトで有効)。 無効にすると、LowCardinality列はSELECTクエリでは通常の列に変換され、INSERTクエリでは通常の列が必要になります。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### ビルドの改善: {#build-improvements} + +- MacOSとARM上のビルドの修正。 + +### ClickHouseリリース18.16.0、2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} + +#### 新しい機能: {#new-features} + +- `DEFAULT` 式の評価のために欠けるのはデータの読み込みに半構造の入力形式 (`JSONEachRow`, `TSKV`). この機能は、 `insert_sample_with_metadata` 設定。 [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- その `ALTER TABLE` クエリは現在、 `MODIFY ORDER BY` テーブル列を追加または削除するときの並べ替えキーの変更のアクション。 これは、 `MergeTree` 家族を追加タスクの場合はこのソートキー、など `SummingMergeTree`, `AggregatingMergeTree`、というように。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- のテーブルのため `MergeTree` これで、別の並べ替えキーを指定できます (`ORDER BY`)および索引 (`PRIMARY KEY`). ソートキーはインデックスよりも長くできます。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- を追加しました `hdfs` テーブル機能および `HDFS` テーブルエンジンの輸出入データHDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- Base64での作業のための機能を追加しました: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) +- これで、パラメータを使用しての精度を設定できます `uniqCombined` 集約関数(HyperLogLogセルの数を選択する)。 [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- を追加しました `system.contributors` ClickHouseでコミットを行ったすべてのユーザーの名前を含むテーブル。 [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- のパーティションを省略する機能を追加しました `ALTER TABLE ... FREEZE` クエリをバックアップするには、全ての仕切ります。 [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- 追加 `dictGet` と `dictGetOrDefault` 戻り値の型を指定する必要のない関数。 タイプは、辞書記述から自動的に決定されます。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3564) +- これで、テーブルの説明で列のコメントを指定し、次の方法で変更できます `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- 読書はのために支えられる `Join` 単純なキーで表を入力します。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3728) +- これで、オプションを指定できます `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`、と `join_overflow_mode` 作成するとき `Join` タイプテーブル。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3728) +- を追加しました `joinGet` を使用できるようにする関数 `Join` 辞書のようにテーブルを入力します。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3728) +- を追加しました `partition_key`, `sorting_key`, `primary_key`、と `sampling_key` に列 `system.tables` テーブルのための情報を提供するテーブル鍵となります。 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- を追加しました `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`、と `is_in_sampling_key` に列 `system.columns` テーブル。 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- を追加しました `min_time` と `max_time` に列 `system.parts` テーブル。 これらのカラムは人口がパーティショニングキーは表現で構成され `DateTime` 列。 [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### バグ修正: {#bug-fixes-1} + +- の修正とパフォーマンスの改善 `LowCardinality` データ型。 `GROUP BY` を使用して `LowCardinality(Nullable(...))`. の値を取得する `extremes`. 高階関数の処理。 `LEFT ARRAY JOIN`. 分散 `GROUP BY`. 返される関数 `Array`. の実行 `ORDER BY`. への書き込み `Distributed` テーブル(nicelulu)。 下位互換性のための `INSERT` 実装する古いクライアントからのクエリ `Native` プロトコル のサポート `LowCardinality` のために `JOIN`. した場合の性能改善作業を単一のストリームです。 [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- どのように固定 `select_sequential_consistency` オプションの作品。 従来、この設定が可能になり、不完全な結果が返され始めてから書き込み新しいパーティション [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- DDL実行時にデータベースが正しく指定されている `ON CLUSTER` クエリと `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- データベースが正しく指定されたサブクエリの中でも色々と用意が必要なもの。 [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- でバグを修正しました `PREWHERE` と `FINAL` のために `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- 今すぐ使用できます `KILL QUERY` テーブルがロックされるのを待っているため、まだ開始されていないクエリを取り消す。 [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- クロックが真夜中に戻った場合の日付と時刻の計算を修正しました(これはイランで起こり、1981年から1983年にモスクワで起こりました)。 以前は、これは一日早く必要以上にリセットされる時間につながった、また、テキスト形式での日付と時刻の誤った書式設定を引き起こしました。 [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- いくつかのケースで修正されたバグ `VIEW` データベースを省略するサブクエリ。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Aから同時に読み取るときの競合状態を修正しました `MATERIALIZED VIEW` を削除する。 `MATERIALIZED VIEW` 内部を締めないことが原因で `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- エラーを修正しました `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- 固定クエリ処理 `compile_expressions` オプションを有効にすると(このデフォルトで有効です). のような非決定的な定数式 `now` 機能は展開されなくなりました。 [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- 非定数スケールの引数を指定するとクラッシュする問題を修正しました `toDecimal32/64/128` 機能。 +- と配列を挿入しようとするとエラーを修正しました `NULL` の要素 `Values` 型の列にフォーマットする `Array` なし `Nullable` (もし `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- 固定連続エラーログイン `DDLWorker` ZooKeeperが利用できない場合。 [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- の戻り値の型を修正しました `quantile*` からの機能 `Date` と `DateTime` 引数のタイプ。 [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- 修正された `WITH` 式のない単純なエイリアスを指定する場合の句。 [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- 名前付きサブクエリと修飾された列名を持つクエリの処理を修正しました `enable_optimize_predicate_expression` は有効です。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3588) +- エラーを修正しました `Attempt to attach to nullptr thread group` マテリアライズドビューで作業する場合。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- 特定の不正な引数を渡すときにクラッシュを修正しました。 `arrayReverse` 機能。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- のバッファオーバーフローを修正 `extractURLParameter` 機能。 パフォーマンスの向上 ゼロバイトを含む文字列の正しい処理を追加しました。 [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- バッファオーバーフローを修正 `lowerUTF8` と `upperUTF8` 機能。 これらの関数を実行する機能を削除しました `FixedString` 型引数。 [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- 削除時のレアな競合状態を修正しました `MergeTree` テーブル。 [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- から読むときに競合状態を修正しました `Buffer` テーブルと同時に実行 `ALTER` または `DROP` ターゲットテーブルです。 [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- 場合は、固定segfault `max_temporary_non_const_columns` 制限を超えました。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### 改善: {#improvements-1} + +- サーバーは、処理された構成ファイルを `/etc/clickhouse-server/` ディレクトリ。 代わりに、それらを保存します `preprocessed_configs` ディレクトリ内 `path`. これはことを意味します `/etc/clickhouse-server/` ディレク `clickhouse` ユーザーが改善されます。 [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- その `min_merge_bytes_to_use_direct_io` optionはデフォルトで10GiBに設定されています。 MergeTreeファミリーからテーブルの大部分を形成するマージは、 `O_DIRECT` 過度のページキャッシュの削除を防止するモード。 [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- テーブルの非常に多数がある場合に加速サーバーの開始。 [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- 接続プールとhttpを追加しました `Keep-Alive` レプリカ間の接続のため。 [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- クエリの構文が無効な場合は、 `400 Bad Request` コードは `HTTP` インターフェイス(500返還し上げます。 [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- その `join_default_strictness` オプションは `ALL` 互換性のためにデフォルトで。 [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- ログの削除先 `stderr` から `re2` 無効または複雑な正規表現のライブラリ。 [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- のために追加 `Kafka` テーブルエンジン:Kafkaからの読み取りを開始する前にサブスクリプションを確認します。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- その `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`、と `murmurHash3_64` 関数は現在、任意の数の引数とタプルの形の引数に対して機能します。 [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- その `arrayReverse` 関数は現在、配列の任意のタイプで動作します。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- オプションのパラメーターを追加しました。 `timeSlots` 機能。 [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) +- のために `FULL` と `RIGHT JOIN`、を `max_block_size` 設定は、右のテーブルからの非結合データのストリームに使用されます。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3699) +- を追加しました `--secure` コマンドライン引数in `clickhouse-benchmark` と `clickhouse-performance-test` TLSを有効にする。 [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- Aの構造のときの型変換 `Buffer` type tableは、コピー先のテーブルの構造と一致しません。 [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) +- を追加しました `tcp_keep_alive_timeout` オプションをキープアライブパケットの後、運動不足のために指定された時間間隔で出ています。 [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- パーティションキーの値の不要なクォートを削除しました。 `system.parts` テーブルが単一の列で構成されている場合。 [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- モジュロ機能はのために働きます `Date` と `DateTime` データ型。 [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- の同義語を追加しました `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`、と `MID` 機能。 [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) いくつかの関数名は、SQL標準との互換性のために大文字と小文字を区別しません。 構文砂糖を追加しました `SUBSTRING(expr FROM start FOR length)` SQLとの互換性のために。 [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- に能力を追加しました `mlock` 対応するメモリページ `clickhouse-server` メモリから強制的に解放されないようにするための実行可能コード。 この機能はデフォルトでは無効です。 [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- した場合の性能改善からの読み出し `O_DIRECT` (とともに `min_bytes_to_use_direct_io` オプション有効)。 [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- の改善された性能 `dictGet...OrDefault` 定数キー引数と非定数既定の引数の関数。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3563) +- その `firstSignificantSubdomain` 関数は現在、ドメインを処理します `gov`, `mil`、と `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) パフォーマンスの向上 [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- 起動するためのカスタム環境変数を指定する機能 `clickhouse-server` を使用して `SYS-V init.d` 定義によるスクリプト `CLICKHOUSE_PROGRAM_ENV` で `/etc/default/clickhouse`. + [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Clickhouse-server initスクリプトの戻りコードを修正します。 [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- その `system.metrics` テーブルは今持っている `VersionInteger` メトリック、および `system.build_options` 追加された行があります `VERSION_INTEGER` これには、ClickHouseバージョンの数値形式が含まれます。 `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- 比較する能力を削除しました `Date` 型番号のようなエラーの可能性を回避 `date = 2018-12-17` 日付の前後の引用符が誤って省略されている場所。 [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- のようなステートフルな関数の動作を修正 `rowNumberInAllBlocks`. 彼らは以前に起因するクエリ分析中に開始に一つの数より大きかった結果を出力します。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3729) +- この `force_restore_data` ファイルは削除できません。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### ビルドの改善: {#build-improvements-1} + +- 更新された `jemalloc` 潜在的なメモリリークを修正するライブラリ。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3557) +- プロファイル `jemalloc` ビルドをデバッグするには、既定で有効になります。 [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- ときにのみ統合テストを実行する機能を追加しました `Docker` は、システムにインス [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- 選択クエリでファズ式テストを追加しました。 [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- コミットのストレステストを追加しました。 [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- Dockerイメージでclickhouse-serverを起動する方法を改善しました。 [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) +- のためのdocker画像の追加支援のための初期化データベース用のファイルを `/docker-entrypoint-initdb.d` ディレクトリ。 [コンスタンチン-レベデフ](https://github.com/ClickHouse/ClickHouse/pull/3695) +- ARM上のビルドの修正。 [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### 下位互換性のない変更: {#backward-incompatible-changes} + +- 比較する能力を削除しました `Date` 数字で入力します。 代わりに `toDate('2018-12-18') = 17883` 明示的な型変換を使用する必要があります `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouseリリース18.14 {#clickhouse-release-18-14} + +### ClickHouseリリース18.14.19、2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} + +#### バグ修正: {#bug-fixes-2} + +- ODBCソースで辞書を更新する際の問題を引き起こしたエラーを修正しました。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- DDL実行時にデータベースが正しく指定されている `ON CLUSTER` クエリ。 [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- 場合は、固定segfault `max_temporary_non_const_columns` 制限を超えました。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### ビルドの改善: {#build-improvements-2} + +- ARM上のビルドの修正。 + +### ClickHouseリリース18.14.18、2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} + +#### バグ修正: {#bug-fixes-3} + +- 固定エラーで `dictGet...` タイプの辞書の関数 `range`、引数の一方が定数であり、他方が定数でない場合。 [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- メッセー `netlink: '...': attribute type 1 has an invalid length` Linuxカーネルログに印刷するには、linuxカーネルの新鮮なバージョンでのみ起こっていました。 [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- 機能の固定segfault `empty` の議論のために `FixedString` タイプ。 [ダニエルダオ-クアン-ミン](https://github.com/ClickHouse/ClickHouse/pull/3703) +- の大きな値を使用して過度のメモリ割り当てを修正 `max_query_size` 設定(メモリチャンクの `max_query_size` バイトは一度に事前に割り当てられました)。 [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### ビルドの変更: {#build-changes} + +- 固定の構築をllvm/clangライブラリのバージョン7のosのパッケージ(これらのライブラリを利用しています実行時のクエリを集める [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouseリリース18.14.17、2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} + +#### バグ修正: {#bug-fixes-4} + +- 固定の場合には、odbc橋工程終了しなかったのサーバーです。 [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- に固定同期挿入 `Distributed` リモートテーブルの列リストとは異なる列リストを持つテーブル。 [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- MergeTreeテーブルをドロップしたときにクラッシュする稀な競合状態を修正しました。 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- クエリスレッドの作成が失敗した場合のクエリのデッドロックを修正しました `Resource temporarily unavailable` エラー。 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- の固定解析 `ENGINE` ときの句 `CREATE AS table` 構文が使用され、 `ENGINE` の前に句が指定されました。 `AS table` (エラーの結果、指定されたエンジンが無視されました)。 [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### ClickHouseリリース18.14.15、2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} + +#### バグ修正: {#bug-fixes-5} + +- メモリチャンクのサイズは、型の列を逆シリアル化しながら過大評価されました `Array(String)` それはにつながる “Memory limit exceeded” エラー。 この問題はバージョン18.12.13に登場しました。 [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### ClickHouseリリース18.14.14、2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} + +#### バグ修正: {#bug-fixes-6} + +- 固定 `ON CLUSTER` クエリがクラスタ設定の確保(フラグ ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### ビルドの変更: {#build-changes-1} + +- 問題を修正しました(システムからのllvm-7、macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouseリリース18.14.13、2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} + +#### バグ修正: {#bug-fixes-7} + +- 修正された `Block structure mismatch in MergingSorted stream` エラー。 [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- 固定 `ON CLUSTER` クラスタ設定でセキュリティで保護された接続がオンになっている場合のクエリ `` フラグ)。 [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- 使用されるクエリのエラーを修正しました `SAMPLE`, `PREWHERE` とエイリアス列。 [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- レアを修正しました `unknown compression method` エラー時 `min_bytes_to_use_direct_io` 設定は有効でした。 [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### 性能の改善: {#performance-improvements} + +- とクエリの固定パフォーマンス回帰 `GROUP BY` AMD EPYCプロセッサで実行するときのUInt16またはDate型の列の数。 [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) +- 長い文字列を処理するクエリのパフォーマン [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### ビルドの改善: {#build-improvements-3} + +- Arcadiaビルドを簡素化するための改善。 [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### ClickHouseリリース18.14.12、2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} + +#### バグ修正: {#bug-fixes-8} + +- 固定クラッシュに当社では豊富な種類の名前のないサブクエリ. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- 不正なクエリを生成する修正(空の `WHERE` 句)外部データベースを照会するとき。 [ホティッド](https://github.com/ClickHouse/ClickHouse/pull/3477) +- ODBC辞書で間違ったタイムアウト値を使用して修正。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### ClickHouseリリース18.14.11、2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} + +#### バグ修正: {#bug-fixes-9} + +- エラーを修正しました `Block structure mismatch in UNION stream: different number of columns` 制限クエリで。 [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- 固定誤差が統合データテーブルを含む配列内の入れ子構造です。 [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- 修正された不正なクエリの結果 `merge_tree_uniform_read_distribution` 設定は無効になっています(既定で有効になっています)。 [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- ネイティブ形式の分散テーブルへの挿入エラーを修正しました。 [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### ClickHouseリリース18.14.10、2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} + +- その `compile_expressions` 既定では、設定(式のJITコンパイル)は無効になっています。 [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- その `enable_optimize_predicate_expression` 設定はデフォルトでは無効です。 + +### ClickHouseリリース18.14.9、2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} + +#### 新しい機能: {#new-features-1} + +- その `WITH CUBE` の修飾子 `GROUP BY` (代替構文 `GROUP BY CUBE(...)` また利用できます)。 [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- を追加しました `formatDateTime` 機能。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) +- を追加しました `JDBC` テーブルエンジンと `jdbc` 表関数(clickhouse-jdbc-bridgeのインストールが必要) [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- ISO週番号を操作するための機能を追加しました: `toISOWeek`, `toISOYear`, `toStartOfISOYear`、と `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- 今すぐ使用できます `Nullable` の列 `MySQL` と `ODBC` テーブル。 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- ネストされたデータ構造は、ネストされたオブジェ `JSONEachRow` フォーマット。 を追加しました `input_format_import_nested_json` 設定。 [ベロマン雲館](https://github.com/ClickHouse/ClickHouse/pull/3144) +- 並列処理は、多くの利用可能です `MATERIALIZED VIEW`sデータを挿入するとき。 を見る `parallel_view_processing` 設定。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- を追加しました `SYSTEM FLUSH LOGS` クエリ(次のようなシステムテーブルへの強制ログフラッシュ `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- これで、事前定義を使用できます `database` と `table` 宣言するときのマクロ `Replicated` テーブル。 [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- 読む能力を追加しました `Decimal` 工学表記で値を入力します(十の累乗を示します)。 [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### 実験の特徴: {#experimental-features} + +- のgroup by句の最適化 `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- のための式の最適化された計算 `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### 改善: {#improvements-2} + +- クエリのメモリ消費量を大幅に削減 `ORDER BY` と `LIMIT`. を見る `max_bytes_before_remerge_sort` 設定。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- の不在で `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` が想定される。 [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- 修飾されたアスタリスクは `JOIN`. [冬張](https://github.com/ClickHouse/ClickHouse/pull/3202) +- その `ODBC` テーブルエンジンが正しく選択の方法のために引用識別子のSQL方言のリモートデータベースです。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- その `compile_expressions` 既定では、設定(式のJITコンパイル)が有効になっています。 +- 同時drop database/table if existsとcreate database/table if not existsの動作を修正しました。 以前は、 `CREATE DATABASE ... IF NOT EXISTS` クエリが返すエラーメッセージ “File … already exists”、と `CREATE TABLE ... IF NOT EXISTS` と `DROP TABLE IF EXISTS` クエリが返されます `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- MYSQLまたはODBCテーブルからクエリを実行するときに、定数の右半分を持つLIKE式がリモートサーバーに渡されます。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- MYSQLおよびODBCテーブルからクエリを実行すると、WHERE句の定数式との比較がリモートサーバーに渡されます。 以前は、定数との比較のみが渡されました。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- 端末の行幅の正しい計算 `Pretty` 象形文字の文字列を含む形式。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` 指定することができるため `ALTER UPDATE` クエリ。 +- 性能向上のためのデータを読み込むには `JSONEachRow` フォーマット。 [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- の同義語を追加しました `LENGTH` と `CHARACTER_LENGTH` 互換性のための機能。 その `CONCAT` 関数では、大文字と小文字は区別されなくなりました。 [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- を追加しました `TIMESTAMP` の同義語 `DateTime` タイプ。 [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- ログ行がクエリに関連していない場合でも、サーバーログには常にquery\_id用に予約された領域があります。 それによる解析サーバーテキストログとして第三者ツールです。 +- クエリによるメモリ消費は、次のレベルの整数のギガバイトを超えるとログに記録されます。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- 追加の互換性モードの場合はライブラリを使用するネイティブプロトコルを送の少ないカラムによる間違い、サーバーの期待に挿入モードです。 このシナリオは、clickhouse-cppライブラリを使用する場合に可能でした。 従来、このシナリオのサーバーンダリングする能力があります。 [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- ユーザー定義のwhere式 `clickhouse-copier`、今使用することができ `partition_key` エイリアス(追加フィルタリングによるソーステーブルの分割). これがなければならないときに便利でのパーティショニングスキーマの変化の中で複製、変更するわけです。 [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- のワークフロー `Kafka` エンジンとして、バックグラウンドスレッドプールのために自動的に減速のデータを読高ます。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- 読書のためのサポート `Tuple` と `Nested` 以下のような構造体の値 `struct` で `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- トップレベルドメインのリスト `firstSignificantSubdomain` 機能の現在のドメイン `biz`. [デカシール](https://github.com/ClickHouse/ClickHouse/pull/3219) +- 外部辞書の設定で, `null_value` デフォルトのデータ型の値として解釈されます。 [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- のサポート `intDiv` と `intDivOrZero` 機能のための `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- のサポート `Date`, `DateTime`, `UUID`、と `Decimal` のキーとしてタイプします `sumMap` 集計関数。 [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- のサポート `Decimal` 外部ディクショナリのデータ型。 [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- のサポート `Decimal` データタイプ `SummingMergeTree` テーブル。 [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- のための追加された専門分野 `UUID` で `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- 数を減らしましたの `open` と `close` システムコールからの読み取り `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` クエリは任意のレプリカで実行できます(クエリはリーダーレプリカに渡されます)。 [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### バグ修正: {#bug-fixes-10} + +- との問題を修正しました `Dictionary` テーブルのため `range_hashed` 辞書だ このエラーはバージョン18.12.17で発生しました。 [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- ロード時にエラーを修正 `range_hashed` 辞書(メッセージ `Unsupported type Nullable (...)`). このエラーはバージョン18.12.17で発生しました。 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- で修正されたエラー `pointInPolygon` 多数の頂点が互いに近接して配置されているポリゴンの不正確な計算の蓄積による関数。 [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- データパーツをマージした後、結果のパーツのチェックサムが別のレプリカで同じマージの結果と異なる場合、マージの結果が削除され、データパーツが別のレプリ しかし、データパーツをダウンロードした後、そのパーツがすでに存在するというエラーのため(マージ後にデータパーツが何らかの遅延で削除されたため)、ワーキング これにより、同じデータを周期的にダウンロードしようとした。 [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- クエリによる総メモリ消費の誤った計算を修正しました(誤った計算のために、 `max_memory_usage_for_all_queries` 設定が間違って働いたと `MemoryTracking` メトリックの値が正しくない)。 このエラーはバージョン18.12.13で発生しました。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- の機能を修正しました `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` このエラーはバージョン18.12.13で発生しました。 [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- のためのデータ構造の固定不要な準備 `JOIN`サーバー上では、クエリを開始するサーバー上の `JOIN` リモートサーバーでのみ実行されます。 [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- で修正されたバグ `Kafka` エンジン:データの読み取りを開始するときの例外後のデッドロック、完了時のロック [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- のために `Kafka` テーブル、オプション `schema` のスキー `Cap'n'Proto` フォーマット)。 [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- ZooKeeperサーバーのアンサンブルに、接続を受け入れるがハンドシェイクに応答する代わりにすぐに閉じるサーバーがある場合、ClickHouseは別のサーバーを接続することを 以前は、これはエラーを生成しました `Cannot read all data. Bytes read: 0. Bytes expected: 4.` サーバーは起動できませんでした [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- ZooKeeperサーバーの集合にDNSクエリがエラーを返すサーバーが含まれている場合、これらのサーバーは無視されます。 [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- 固定型変換の間 `Date` と `DateTime` データを挿入するとき `VALUES` フォーマット(if `input_format_values_interpret_expressions = 1`). 以前は、Unixエポック時間の日数の数値とUnixタイムスタンプの間で変換が行われ、予期しない結果が生じました。 [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- 間の補正型変換 `Decimal` そして整数数。 [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- で修正されたエラー `enable_optimize_predicate_expression` 設定。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3231) +- デフォルト以外のcsv区切り記号が使用されている場合、浮動小数点数を使用したcsv形式の解析エラーが修正されました `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- 修正された `arrayCumSumNonNegative` 関数(アキュムレータがゼロより小さい場合、負の値は累積されません)。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) +- 固定方法 `Merge` テーブルの上に動作 `Distributed` 使用する場合のテーブル `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- のバグ修正 `ALTER UPDATE` クエリ。 +- で修正されたバグ `odbc` バージョン18.12に登場したテーブル関数。 [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- との集計関数の動作を修正しました `StateArray` コンビネーター [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- 分割するときにクラッシュを修正 `Decimal` ゼロによる値。 [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- 以下を使用した操作のタイプの固定出力 `Decimal` そして整数引数。 [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- 中にsegfaultを修正しました `GROUP BY` に `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- その `log_query_threads` 設定(クエリ実行の各スレッドに関する情報のロギング)は、次の場合にのみ有効になります `log_queries` オプション(照会に関する情報のロギング)は1に設定されます。 ので、 `log_query_threads` このオプションは既定で有効になっています。 [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- 分位数集計関数の分散操作におけるエラーを修正しました(エラーメッセージ `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- 同時にバージョン18.12.17サーバーと古いサーバーのクラスタで作業するときの互換性の問題を修正しました。 固定長および固定長以外のキーを持つ分散クエリの場合、集計するデータが大量にあった場合、返されるデータは常に完全に集計されるとは限りません [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- の置換の処理を修正しました。 `clickhouse-performance-test` クエリにテストで宣言された置換の一部のみが含まれている場合。 [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- 使用時にエラーが修正されました `FINAL` と `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- 使用時にエラーが修正されました `PREWHERE` 中に追加された列の上に `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- の不在のためのチェックを追加しました `arrayJoin` のために `DEFAULT` と `MATERIALIZED` 式。 以前は, `arrayJoin` データ挿入時にエラーが発生しました。 [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- の不在のためのチェックを追加しました `arrayJoin` で `PREWHERE` 句。 以前は、このようなメッセージに `Size ... doesn't match` または `Unknown compression method` クエリを実行するとき。 [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- 修正されたsegfaultは、置換された最適化の後にまれに発生する可能性があり、式の中で対応すると等価性評価からチェーン。 [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) +- マイナーな訂正への `clickhouse-benchmark` 実行されたクエリの数は、シャットダウン時および反復回数を制限するために、より正確に計算されるようになりました。 [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### 下位互換性のない変更: {#backward-incompatible-changes-1} + +- 削除された `allow_experimental_decimal_type` オプション。 その `Decimal` データ型は、デフォルトで使用できます。 [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouseリリース18.12 {#clickhouse-release-18-12} + +### ClickHouseリリース18.12.17,2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} + +#### 新しい機能: {#new-features-2} + +- `invalidate_query` (外部ディクショナリを更新する必要があるかどうかを確認するクエリを指定する機能)。 `clickhouse` ソース。 [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- 使用する機能を追加しました `UInt*`, `Int*`、と `DateTime` データ型(データ型と共に `Date` タイプ)として `range_hashed` 範囲の境界を定義する外部ディクショナリキー。 さて `NULL` 開いている範囲を指定するために使用できます。 [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) +- その `Decimal` タイプ今サポート `var*` と `stddev*` 集計関数。 [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- その `Decimal` タイプに対応しま数理機能 (`exp`, `sin` というように。) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- その `system.part_log` テーブルは今持っている `partition_id` コラム [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### バグ修正: {#bug-fixes-11} + +- `Merge` 今正しくオンに動作します `Distributed` テーブル。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3159) +- 固定された非互換性(上の不要な依存関係 `glibc` クリックハウスを実行することが不可能になったバージョン) `Ubuntu Precise` と古いバージョン。 この非互換性はバージョン18.12.13で発生しました。 [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- で修正されたエラー `enable_optimize_predicate_expression` 設定。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3107) +- 18.12.13より前のバージョンのレプリカのクラスターを操作し、新しいバージョンのテーブルの新しいレプリカをサーバーに作成する際に、下位互換性の問題を `Can not clone replica, because the ... updated to new ClickHouse version` これは論理的ですが、起こるべきではありません)。 [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### 下位互換性のない変更: {#backward-incompatible-changes-2} + +- その `enable_optimize_predicate_expression` optionはデフォルトで有効になっています(これは楽観的です)。 列名の検索に関連するクエリ分析エラーが発生した場合は、次のように設定します `enable_optimize_predicate_expression` に0. [冬張](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### ClickHouseリリース18.12.14,2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} + +#### 新しい機能: {#new-features-3} + +- のサポートを追加 `ALTER UPDATE` クエリ。 [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- を追加しました `allow_ddl` DDLクエリへのユーザーのアクセスを制限するオプション。 [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- を追加しました `min_merge_bytes_to_use_direct_io` のための選択 `MergeTree` マージの合計サイズのしきい値を設定することができます(しきい値を超えると、データパーツファイルはO\_DIRECTを使用して処理されます)。 [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- その `system.merges` システムテーブルは現在 `partition_id` コラム [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### 改善 {#improvements-3} + +- 場合のデータ部分を据え置き期間中、突然変異なダウンロードによるレプリカ. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- オートコンプリートは、操作時に設定の名前に使用できます `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### バグ修正: {#bug-fixes-12} + +- の要素である配列のサイズのチェックを追加しました `Nested` 挿入時にfieldsを入力します。 [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- と外部辞書を更新するエラーを修正しました `ODBC` ソースと `hashed` ストレージ。 このエラーはバージョン18.12.13で発生しました。 +- クエリから一時テーブルを作成するときにクラッシュする問題を修正 `IN` 条件。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3098) +- 持つことができる配列の集計関数のエラーを修正しました `NULL` 要素。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### ClickHouseリリース18.12.13,2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} + +#### 新しい機能: {#new-features-4} + +- を追加しました `DECIMAL(digits, scale)` データ型 (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). 有効にするには、次の設定を使用します `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- 新しい `WITH ROLLUP` の修飾子 `GROUP BY` (代替構文: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- JOINを使用したクエリでは、スター文字はSQL標準に準拠してすべてのテーブルの列のリストに展開されます。 古い動作を復元するには、以下を設定します `asterisk_left_columns_only` ユーザー構成レベルで1に設定します。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/2787) +- テーブ [冬張](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Clickhouseクライアントのタブを押してオートコンプリート。 [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) +- Clickhouse-client内のCtrl+Cは、入力されたクエリをクリアします。 [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- を追加しました `join_default_strictness` 設定(値: `"`, `'any'`, `'all'`). これにより、指定しないでください `ANY` または `ALL` のために `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- クエリ処理に関連するサーバーログの各行には、クエリidが表示されます。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- これでclickhouse-clientでクエリ実行ログを取得できるようになりました。 `send_logs_level` 設定)。 分散クエリ処理では、ログはすべてのサーバからカスケード接続されます。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- その `system.query_log` と `system.processes` (`SHOW PROCESSLIST`)クエリを実行するときに変更されたすべての設定に関する情報が表に表示されるようになりました。 `Settings` データ)。 を追加しました `log_query_settings` 設定。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- その `system.query_log` と `system.processes` テーブルには、クエリの実行に参加しているスレッドの数に関する情報が表示されます。 `thread_numbers` 列)。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 追加 `ProfileEvents` ネットワーク上の読み取りと書き込み、およびディスクへの読み取りと書き込みに費やされる時間、ネットワークエラーの数、およびネットワーク帯域幅が [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 追加 `ProfileEvents`rusageのシステムメトリックスを含むカウンター(ユーザー空間のCPU使用率、カーネル、ページフォールト、およびコンテキストスイッチに関する情報を取得できます)、 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- その `ProfileEvents` カウンタは、クエリごとにグローバルに適用され、クエリごとにリソース消費を詳細にプロファイルできるように、クエリ実行スレッドごとに適用され [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- を追加しました `system.query_thread_log` 各クエリ実行スレッドに関する情報を含むテーブル。 を追加しました `log_query_threads` 設定。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- その `system.metrics` と `system.events` 表現を内蔵しております。 [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- を追加しました `arrayEnumerateDense` 機能。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2975) +- を追加しました `arrayCumSumNonNegative` と `arrayDifference` 機能。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) +- を追加しました `retention` 集計関数。 [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) +- これで、プラス演算子を使用して集計関数の状態を追加(マージ)し、集計関数の状態に非負の定数を掛けることができます。 [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- MergeTreeファミリーのテーブルに仮想列が追加されました `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### 実験の特徴: {#experimental-features-1} + +- を追加しました `LowCardinality(T)` データ型。 このデー [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- JITコンパイルされた関数のキャッシュと、コンパイル前の使用回数のカウンターを追加しました。 式をJITコンパイルするには、 `compile_expressions` 設定。 [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### 改善: {#improvements-4} + +- 放棄されたレプリカがある場合、複製ログの無制限の蓄積の問題を修正しました。 長いラグを持つレプリカの効果的な復旧モードを追加しました。 +- 改善された性能の `GROUP BY` 複数の集約フィールドがある場合、一方がstringで、他方が固定長の場合。 +- 使用時のパフォーマンスの向上 `PREWHERE` そして、式の暗黙的な転送で `PREWHERE`. +- テキスト形式の解析性能の向上 (`CSV`, `TSV`). [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- バイナリ形式での文字列と配列の読み取りパフォーマンスの向上。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2955) +- クエリのパフォーマンスの向上とメモリ消費の削減 `system.tables` と `system.columns` 単一のサーバー上のテーブルの非常に大きな数がある場合。 [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- エラーが発生するクエリの大きなストリームの場合のパフォーマンスの問題を修正しました `_dl_addr` 機能は目に見えます `perf top` しかし、サーバーは多くのCPUを使用していません)。 [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- 条件はビューにキャストされます(以下の場合 `enable_optimize_predicate_expression` が有効になっている)。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/2907) +- の機能の改善 `UUID` データ型。 [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- その `UUID` データ型は、-Alchemist辞書でサポートされています。 [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- その `visitParamExtractRaw` 関数は入れ子構造で正しく動作します。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/2974) +- とき `input_format_skip_unknown_fields` 設定が有効になっています。 `JSONEachRow` フォーマットはスキップされます。 [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- のための `CASE` 条件付きの式を省略できるようになりました `ELSE` に相当します `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- ZooKeeperで作業するときに操作タイムアウトを設定できます。 [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- のオフセットを指定できます `LIMIT n, m` として `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- を使用することができ `SELECT TOP n` 代替としての構文 `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- システムテーブルに書き込むキューのサイズが大きくなるため、 `SystemLog parameter queue is full` エラーは頻繁に発生しません。 +- その `windowFunnel` 集計機能に対応しまイベントが複数の条件です。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2801) +- 重複する列は、 `USING` のための節 `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` フォーマットは現在、幅によって列の配置に制限があります。 を使用 `output_format_pretty_max_column_pad_width` 設定。 値が広い場合、それはまだその全体が表示されますが、テーブル内の他のセルが広すぎることはありません。 [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- その `odbc` テーブル機能することはできなくなるようです指定のデータベースのスキーマの名前です。 [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2885) +- で指定されたユーザ名を使用する機能を追加しました `clickhouse-client` 設定ファイル。 [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) +- その `ZooKeeperExceptions` カウンター分割された三つのカウンター: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`、と `ZooKeeperOtherExceptions`. +- `ALTER DELETE` クエリはマテリアライズドビュ +- のために定期的に `ReplicatedMergeTree` テーブルは非常に多数があるとき周期的な負荷スパイクを避けるために `ReplicatedMergeTree` テーブル。 +- のサポート `ATTACH TABLE ... ON CLUSTER` クエリ。 [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### バグ修正: {#bug-fixes-13} + +- との問題を修正しました `Dictionary` テーブル `Size of offsets doesn't match size of column` または `Unknown compression method` 例外)。 このバグはバージョ [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- マージ時のバグを修正 `CollapsingMergeTree` データパーツのいずれかが空の場合(これらのパーツはマージ中に形成されます。 `ALTER DELETE` すべてのデータが削除された場合)、 `vertical` マージにはアルゴリズムを使用した。 [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- 中に競合状態を修正しました `DROP` または `TRUNCATE` のために `Memory` 同時のテーブル `SELECT` サーバーがクラッシュする可能性があります。 このバグはバージョン1.1.54388に登場しました。 [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- 挿入するときにデータが失われる可能性を修正しました `Replicated` テーブルの場合 `Session is expired` エラーが返されるデータ損失を検出することができますの `ReplicatedDataLoss` メトリック)。 このエラーはバージョン1.1.54378で発生しました。 [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- 中にセグメントフォールトを修正 `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- カラム名を検索する際のエラーを修正しました `WHERE` 式は完全に修飾された列名で構成されます。 `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- 修正された “Not found column” サブクエリを持つIN式で構成される単一の列がリモートサーバーから要求された場合に、分散クエリを実行するときに発生したエラーです。 [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- 修正された `Block structure mismatch in UNION stream: different number of columns` シャードの一方がローカルで、他方がローカルでない場合に分散クエリに対して発生したエラー、および移動の最適化 `PREWHERE` トリガーされます。 [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- 修正された `pointInPolygon` 非凸多角形の特定のケースの関数です。 [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- 比較時の誤った結果を修正しました `nan` 整数で。 [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- のエラーを修正しました `zlib-ng` まれにsegfaultにつながる可能性のあるライブラリ。 [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- テーブルに挿入するときにメモリリークを修正しました `AggregateFunction` 列、集計関数の状態が単純でない場合(メモリを別々に割り当てる)、単一の挿入要求が複数の小さなブロックになる場合。 [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- 同じを作成し、削除する際の競合状態を修正しました `Buffer` または `MergeTree` 同時のテーブル。 +- タプルなどの特定の非自明な型で構成されたタプルを比較するときに、segfaultの可能性を修正しました。 [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- 特定の実行時にsegfaultの可能性を修正しました `ON CLUSTER` クエリ。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/2960) +- のエラーを修正しました `arrayDistinct` 機能のための `Nullable` 配列要素。 [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- その `enable_optimize_predicate_expression` オプションは現在、正しく `SELECT *`. [冬張](https://github.com/ClickHouse/ClickHouse/pull/2929) +- ZooKeeperセッションを再初期化するときにsegfaultを修正しました。 [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- 固定性ブロックが飼育係. +- 修正コードをネストしたデータ構造 `SummingMergeTree`. +- 集計関数の状態にメモリを割り当てるとき、整列が正しく考慮され、集計関数の状態を実装するときに整列を必要とする操作を使用することがで [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### セキュリティ修正: {#security-fix} + +- ODBCデータソースの安全な使用。 ODBCドライバとの対話では、 `clickhouse-odbc-bridge` プロセス。 誤りや第三者ODBCドライバーにな問題となるとサーバの安定性や脆弱性があります。 [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- のファイルパスの修正された誤った検証 `catBoostPool` テーブル機能。 [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- システムテーブルの内容 (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`、と `replication_queue`)フィルタによると、ユーザーの設定データベースへのアクセス (`allow_databases`). [冬張](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### 下位互換性のない変更: {#backward-incompatible-changes-3} + +- JOINを使用したクエリでは、スター文字はSQL標準に準拠してすべてのテーブルの列のリストに展開されます。 古い動作を復元するには、以下を設定します `asterisk_left_columns_only` ユーザー構成レベルで1に設定します。 + +#### ビルドの変更: {#build-changes-2} + +- ほとんどの統合テス +- コードのスタイルチェックによって解決することができます +- その `memcpy` 実装は、CentOS7/Fedoraをビルドするときに正しく選択されます。 [エティエンシャンペティエ](https://github.com/ClickHouse/ClickHouse/pull/2912) +- Clangをビルドに使用するときは、次の警告が表示されます `-Weverything` 通常のものに加えて、追加されています `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- ビルドのデバッグには、 `jemalloc` debugオプション。 +- のインタフェースを図書館との交流の飼育係で宣言されていく。 [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouseリリース18.10 {#clickhouse-release-18-10} + +### ClickHouseリリース18.10.3、2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} + +#### 新しい機能: {#new-features-5} + +- Httpsは複製に使用できます。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- 機能を追加しました `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`、と `murmurHash3_128` 既存のものに加えて `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- ClickHouse ODBCドライバでのNull許容型のサポート (`ODBCDriver2` 出力フォーマット)。 [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- のサポート `UUID` キー列に。 + +#### 改善: {#improvements-5} + +- クラスターは、設定ファイルから削除されたときに、サーバーを再起動せずに削除できます。 [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- 外部辞書は、設定ファイルから削除されたときにサーバーを再起動せずに削除できます。 [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- 追加 `SETTINGS` のサポート `Kafka` テーブルエンジン。 [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) +- のための改善 `UUID` データ型(まだ完了していない)。 [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- の併合の後の空の部品のためのサポート `SummingMergeTree`, `CollapsingMergeTree` と `VersionedCollapsingMergeTree` エンジン [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- 完了した変異の古いレコードが削除されます (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- を追加しました `system.merge_tree_settings` テーブル。 [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) +- その `system.tables` 表現の依存関係列: `dependencies_database` と `dependencies_table`. [冬張](https://github.com/ClickHouse/ClickHouse/pull/2851) +- を追加しました `max_partition_size_to_drop` 設定オプション。 [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- を追加しました `output_format_json_escape_forward_slashes` オプション。 [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) +- を追加しました `max_fetch_partition_retries_count` 設定。 [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- を追加しました `prefer_localhost_replica` ローカルレプリカの設定を無効にし、プロセス間の対話を行わずにローカルレプリカに移動するための設定。 [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- その `quantileExact` 集計関数の戻り値 `nan` 空の上の集約の場合 `Float32` または `Float64` セット。 [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### バグ修正: {#bug-fixes-14} + +- それは不可能な接続を確立するために作られたodbcの接続文字列パラメータの不要なエスケープを削除しました。 このエラーはバージョン18.6.0で発生しました。 +- 処理のロジックを修正しました `REPLACE PARTITION` 複製キュー内のコマンド。 がある場合は二つ `REPLACE` コマンドと同じパーティション、間違ったロジックが原因の一つの複製のキューは行われなくなります。 [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- すべてのデータ部分が空であったときにマージのバグを修正しました(マージまたはから形成された部分 `ALTER DELETE` すべてのデータが削除された場合)。 このバグに登場したバージョン18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- 同時実行のエラーを修正しました `Set` または `Join`. [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2823) +- 修正された `Block structure mismatch in UNION stream: different number of columns` のために発生したエラー `UNION ALL` サブクエリ内のクエリのいずれかの場合 `SELECT` クエリを含む複製のカラム名をとります。 [冬張](https://github.com/ClickHouse/ClickHouse/pull/2094) +- MySQLサーバに接続するときに例外が発生した場合、メモリリークを修正しました。 +- クエリエラーの場合に修正された不正なclickhouse-クライアント応答コード。 +- 固定誤動作の実現の景色を含む異なるものとみなされます。 [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### 下位互換性のない変更 {#backward-incompatible-changes-4} + +- 除去支援のためのチェックテーブルのクエリのために配布します。 + +#### ビルドの変更: {#build-changes-3} + +- アロケータが置き換えられました: `jemalloc` の代りに今使用されます `tcmalloc`. いくつかのシナリオでは、この増加は20%まで高速化します。 しかし、20%まで減速したクエリがあります。 いくつかのシナリオでは、メモリ消費量が約10%削減され、安定性が向上しました。 非常に競争の激しい負荷では、ユーザスペースとシステムでのCPU使用率はわずかに増加します。 [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- サブモジュールからのlibresslの使用。 [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- サブモジュールからのunixodbcの使用。 [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- サブモジュールからのmariadb-connector-cの使用。 [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- テストデータの可用性に依存する機能テストファイルをリポジトリに追加しました(当面はテストデータ自体なし)。 + +## ClickHouseリリース18.6 {#clickhouse-release-18-6} + +### ClickHouseリリース18.6.0、2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} + +#### 新しい機能: {#new-features-6} + +- JOIN ON構文のON式のサポートが追加されました: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + 式は、and演算子によって結合された等量の連鎖でなければなりません。 等式の各辺は、いずれかのテーブルの列に対する任意の式にすることができます。 完全修飾列名の使用がサポートされています (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`)右のテーブルのために。 [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- HTTPSで有効にする必要があります。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### 改善: {#improvements-6} + +- サーバにパッチ部品のバージョンです。 データのパッチ版コンポーネントが `system.processes` と `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouseリリース18.5 {#clickhouse-release-18-5} + +### ClickHouseリリース18.5.1、2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} + +#### 新しい機能: {#new-features-7} + +- ハッシュ関数を追加しました `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### 改善: {#improvements-7} + +- 今すぐ使用できます `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) 環境変数から設定ファイルの値を設定する属性。 +- 大文字と小文字を区別しないバージョンの追加 `coalesce`, `ifNull`、と `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### バグ修正: {#bug-fixes-15} + +- レプリカを起動する際のバグを修正 [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouseリリース18.4 {#clickhouse-release-18-4} + +### ClickHouseリリース18.4.0、2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} + +#### 新しい機能: {#new-features-8} + +- 追加されたシステム表: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- Aの引数としてテーブルの代わりにテーブル関数を使用する機能を追加しました `remote` または `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- のサポート `HTTP Basic` 複製プロトコルでの認証 [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- その `has` 関数は、現在の配列の数値を検索することができます `Enum` 値 [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). +- 支援のための追加は任意のメッセージセパレータから読み取る際の `Kafka` [アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### 改善: {#improvements-8} + +- その `ALTER TABLE t DELETE WHERE` WHERE条件の影響を受けないデータ部分は、クエリで書き換えられません [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- その `use_minimalistic_checksums_in_zookeeper` のための選択 `ReplicatedMergeTree` テーブルはデフォルトで有効です。 この設定は、バージョン1.1.54378、2018-04-16で追加されました。 1.1.54378より古いバージョンはインストールできなくなりました。 +- 実行のサポート `KILL` と `OPTIMIZE` 指定するクエリ `ON CLUSTER` [冬張](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### バグ修正: {#bug-fixes-16} + +- エラーを修正しました `Column ... is not under an aggregate function and not in GROUP BY` IN式を使用した集計の場合。 このバグに登場したバージョン18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- のバグを修正しました `windowFunnel aggregate function` [冬張](https://github.com/ClickHouse/ClickHouse/pull/2735). +- のバグを修正しました `anyHeavy` 集計関数 ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- 固定サーバークラッシュを使用する場合 `countArray()` 集計関数。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-5} + +- 変数のための `Kafka` エンジンは `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` に `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. テーブルが使用する場合 `kafka_schema` または `kafka_num_consumers` メタデータファイルを手動で編集する必要があります `path/metadata/database/table.sql` と追加 `kafka_row_delimiter` 変数との `''` 値。 + +## ClickHouseリリース18.1 {#clickhouse-release-18-1} + +### ClickHouseリリース18.1.0、2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} + +#### 新しい機能: {#new-features-9} + +- のサポート `ALTER TABLE t DELETE WHERE` 非レプリケートされたMergeTreeテーブルのクエリ ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- の任意の型のサポート `uniq*` 集約関数のファミリ ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- 比較演算子での任意の型のサポート ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- その `users.xml` ファイルは、サブネットマス `10.0.0.1/255.255.255.0`. これは、中央にゼロを持つIPv6ネットワークのマスクを使用する場合に必要です ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- を追加しました `arrayDistinct` 機能 ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- SummingMergeTreeエンジンは、AggregateFunctionタイプの列を処理できるようになりました ([Constantin S.Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### 改善: {#improvements-9} + +- リリースバージョンの採番スキーマを変更。 今、最初の部分は、リリースの年(a.d.、モスクワタイムゾーン、マイナス2000)が含まれ、第二の部分は、主要な変更(ほとんどのリリースで増加)のための番号が含ま リリースは、changelogに特に明記されていない限り、後方互換性があります。 +- 浮動小数点数の文字列への変換を高速化 ([アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- パースエラーのために挿入中に一部の行がスキップされた場合(これは、 `input_allow_errors_num` と `input_allow_errors_ratio` スキップされた行の数がサーバーログに書き込まれるようになりました ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### バグ修正: {#bug-fixes-17} + +- 一時表のtruncateコマンドを修正しました ([アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- 固定アロの飼育係顧客の図書館で発生した場合またはネットワークエラーを読みながら対応 ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- Nullable型へのキャスト中にエラーが修正されました ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- の誤った結果を修正しました `maxIntersection()` 間隔の境界が一致したときの関数 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- 関数引数のor式チェーンの誤った変換を修正しました ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- クエリのパフォーマンスの低下を修正 `IN (subquery)` 別のサブクエリ内の式 ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- を使用する分散クエリで異なるバージョンを持つサーバー間の非互換性を修正 `CAST` 大文字でない関数 ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- 外部dbmsへのクエリに対する識別子のクォートの欠落を追加しました ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### 下位互換性のない変更: {#backward-incompatible-changes-6} + +- 数値ゼロを含む文字列をdatetimeに変換することはできません。 例えば: `SELECT toDateTime('0')`. これはまた、 `DateTime DEFAULT '0'` テーブルでは動作しません。 `0` 辞書で。 解決策:置換 `0` と `0000-00-00 00:00:00`. + +## ClickHouseリリース1.1 {#clickhouse-release-1-1} + +### ClickHouseリリース1.1.54394,2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} + +#### 新しい機能: {#new-features-10} + +- を追加しました `histogram` 集計関数 ([ミハイル-スリン](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- さて `OPTIMIZE TABLE ... FINAL` パーティションを指定せずに使用できます `ReplicatedMergeTree` ([アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### バグ修正: {#bug-fixes-18} + +- ネットワークまたはディスクに負荷がある場合に、より大きなパーツをダウンロードすることができなくなった(パーツをダウンロードするための周期的な このエラーはバージョン1.1.54388で発生しました。 +- テーブルに重複データブロックを挿入した場合、zookeeperでchrootを使用する際の問題を修正しました。 +- その `has` Nullable要素を持つ配列の関数が正しく動作するようになりました ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- その `system.tables` テーブルが正しく動作に用いた場合に配布します。 その `metadata_modification_time` と `engine_full` 列は現在、非仮想です。 これらの列のみがテーブルから照会された場合に発生するエラーを修正しました。 +- 固定どのように空 `TinyLog` テーブル動作後に挿入し、空データブロック ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- その `system.zookeeper` ZooKeeperのノードの値がNULLの場合、テーブルは機能します。 + +### ClickHouseリリース1.1.54390、2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} + +#### 新しい機能: {#new-features-11} + +- クエリは次の場所で送信できます `multipart/form-data` フォーマット `query` これは、外部データもクエリ処理のために送信される場合に便利です ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- CSV形式でデータを読み込むときに、単一引quotesまたは二重引用符の処理を有効または無効にする機能を追加しました。 これを設定することができます `format_csv_allow_single_quotes` と `format_csv_allow_double_quotes` 設定 ([アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- さて `OPTIMIZE TABLE ... FINAL` 使用可能を指定せずに、パーティションのための非期待の `MergeTree` ([アモスの鳥](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### 改善: {#improvements-10} + +- 性能向上、メモリーの消耗を減らし、正しいメモリ消費の位置姿勢トラッキング用のオペレーターがテーブル-インデックスを使用できる ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- データパーツの追加時にチェックサムの冗長チェックを削除しました。 これは、多数のレプリカがある場合に重要です。 +- のサポートを追加 `Array(Tuple(...))` のための議論 `arrayEnumerateUniq` 機能 ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- 追加 `Nullable` のサポート `runningDifference` 機能 ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- 非常に多数の式がある場合のクエリ分析パフォーマンスが改善されました ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- マージのためのデータパーツの高速選択 `ReplicatedMergeTree` テーブル。 ZooKeeperセッションの迅速な復旧 ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- その `format_version.txt` ファイル `MergeTree` これは、ファイルなしでディレクトリ構造をコピーした後にClickHouseを起動すると意味があります ([シプリアン-ハックマン](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### バグ修正: {#bug-fixes-19} + +- サーバを再起動する前に、それが不可能なセッションとテーブルの読み取り専用状態を回復するために作ることができるzookeeperでの作業のバグを修正し +- セッションが中断された場合、古いノードが削除されないことがzookeeperでの作業時のバグを修正しました。 +- のエラーを修正しました `quantileTDigest` Float引数の関数(このバグはバージョン1.1.54388で導入されました) ([ミハイル-スリン](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- 同じサイズの符号付き整数と符号なし整数の間で型を変換するための関数内に主キー列がある場合、mergetreeテーブルのインデックスのバグを修正しまし ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- 固定segfaultの場合 `macros` 使用されますが、設定ファイルにはありません ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- デフォル ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- ときに発生したバグを修正 `use_index_for_in_with_subqueries` 設定が無効になりました。 + +#### セキュリティ修正: {#security-fix-1} + +- MySQLに接続したときにファイルを送信できなくなりました (`LOAD DATA LOCAL INFILE`). + +### ClickHouseリリース1.1.54388、2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} + +#### 新しい機能: {#new-features-12} + +- のサポート `ALTER TABLE t DELETE WHERE` クエリーのための複製です。 を追加しました `system.mutations` このタイプのクエリの進行状況を追跡するテーブル。 +- のサポート `ALTER TABLE t [REPLACE|ATTACH] PARTITION` \*MergeTreeテーブルのクエリ。 +- のサポート `TRUNCATE TABLE` クエリ ([冬張](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- 新しいいくつか `SYSTEM` 複製テーブルのクエリ (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- MySQLエンジンと対応するテーブル関数を使用してテーブルに書き込む機能を追加しました ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- を追加しました `url()` テーブル機能および `URL` 表エンジン ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- を追加しました `windowFunnel` 集計関数 ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- 新しい `startsWith` と `endsWith` 文字列の関数 ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- その `numbers()` テーブル機能することはできなくなるようです指定のオフセット ([冬張](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- パスワードに `clickhouse-client` 対話的に入力できます。 +- サーバログをsyslogに送信できるようになりました ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- 共有ライブラリソースによる辞書のログインのサポート ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- カスタムcsvデリミタのサポート ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- を追加しました `date_time_input_format` 設定。 この設定を次のように切り替えると `'best_effort'`、DateTime値は、幅広い形式で読み込まれます。 +- を追加しました `clickhouse-obfuscator` データの難読化のためのユーティリティ。 使用例:パフォーマンステストで使用されるデータの公開。 + +#### 実験の特徴: {#experimental-features-2} + +- 計算する機能を追加しました `and` 引数が必要な場所のみ ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- JITコへのネイティブコードが可能となりますので一部表現 ([pyosunit description in lists](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### バグ修正: {#bug-fixes-20} + +- クエリの重複が表示されなくなりました `DISTINCT` と `ORDER BY`. +- とのクエリ `ARRAY JOIN` と `arrayFilter` もはや間違った結果を返さない。 +- 固定の読み込み時にエラー配列カラムからの入れ子構造 ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- HAVING句でクエリを分析する際のエラーを修正しました `HAVING tuple IN (...)`. +- 誤りを修正で解析する場合、クエリを処理するクラウドの場合再帰的別名になります。 +- すべての行をフィルタリングするprewhereの条件でreplacingmergetreeから読み取るときにエラーが修正されました ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- ユーザープロファイルの設定が適用されていない使用時のセッションを行うhttpのインタフェース。 +- Clickhouse-localのコマンドラインパラメータから設定を適用する方法を修正しました。 +- の飼育係お客様の図書館におけるその使用は、セッションタイムアウトサーバから受信した. +- バグ修正の飼育係ライブラリとクライアントが待ってサーバの応答のものより長いタイムアウト. +- パーティションキー列の条件を持つクエリのパーツの固定pruning定 ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- マージは、後に可能になりました `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- ODBCテーブル関数の型マッピングが修正されました ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- 型の比較が修正されました `DateTime` タイムゾーンの有無にかかわらず ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- の固定構文解析とフォーマット `CAST` オペレーター +- 分散テーブルエンジンのマテリアライズドビューへの挿入を修正 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- からデータを書き込む際の競合状態を修正しました `Kafka` エンジンを実現しの景色 ([楊関劉](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- Remote()テーブル機能のSSRFを修正しました。 +- の固定終了動作 `clickhouse-client` 複数行モード ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### 改善: {#improvements-11} + +- 背景の業務を複製のテーブルを用いて行ったスレッドプールの代わりに別のスレッド ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- LZ4圧縮パフォーマンスの向上。 +- 多数の結合およびサブクエリを含むクエリの分析を高速化します。 +- ネットワークエラーが多すぎると、dnsキャッシュが自動的に更新されます。 +- テーブル挿入は、マテリアライズドビューのいずれかに挿入ができない場合、パーツが多すぎるために発生しなくなりました。 +- イベントカウンターの不一致を修正しました `Query`, `SelectQuery`、と `InsertQuery`. +- のような式 `tuple IN (SELECT tuple)` タプルの型が一致する場合は許可されます。 +- サーバーとの複製のテーブルでかかわらず、飼育係の設定. +- 使用可能なcpuコアの数を計算するときに、cgroupの制限が考慮されるようになりました ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- Systemd設定ファイルに設定ディレクトリのchownを追加しました ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### ビルドの変更: {#build-changes-4} + +- Gcc8コンパイラはビルドに使用できます。 +- サブモジュールからllvmを構築する機能を追加しました。 +- Librdkafkaライブラリのバージョンがv0.11.4に更新されました。 +- システムlibcpuidライブラリを使用する機能を追加しました。 ライブラリのバージョンは0.4.0に更新されました。 +- ベクトルクラスライブラリを使用してビルドを修正 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmakeはninjaのファイルをデフォルトで生成するようになりました `-G Ninja`). +- Libtermcapの代わりにlibtinfoライブラリを使用する機能を追加しました ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- Fedoraの生皮におけるヘッダファイルの競合を修正しました ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### 下位互換性のない変更: {#backward-incompatible-changes-7} + +- でエスケープ削除 `Vertical` と `Pretty*` フォーマットおよび削除 `VerticalRaw` フォーマット。 +- バージョン1.1.54388(またはそれより新しい)のサーバーと古いバージョンのサーバーが分散クエリで同時に使用され、クエリに分散クエリが存在する場合 `cast(x, 'Type')` なしの式 `AS` キーワードと単語を持っていません `cast` 大文字では、次のようなメッセージで例外がスローされます `Not found column cast(0, 'UInt8') in block`. 解決方法:クラスター全体のサーバーを更新します。 + +### ClickHouseリリース1.1.54385,2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} + +#### バグ修正: {#bug-fixes-21} + +- 場合によっては、zookeeperの操作がブロックされるエラーを修正しました。 + +### ClickHouseリリース1.1.54383、2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} + +#### バグ修正: {#bug-fixes-22} + +- 固定減速のレプリケーションのキューにした場合には多くのレプリカ. + +### ClickHouseリリース1.1.54381,2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} + +#### バグ修正: {#bug-fixes-23} + +- 固定、ノードの漏洩、飼育係がclickhouse失接続を飼育係サーバーです。 + +### ClickHouseリリース1.1.54380、2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} + +#### 新しい機能: {#new-features-13} + +- テーブル機能を追加しました `file(path, format, structure)`. バイトを読み取る例 `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### 改善: {#improvements-12} + +- サブクエリは `()` クエリの可読性を高めるための括弧。 例えば: `(SELECT 1) UNION ALL (SELECT 1)`. +- シンプル `SELECT` からのクエリ `system.processes` テーブルはに含まれていません `max_concurrent_queries` 限界だ + +#### バグ修正: {#bug-fixes-24} + +- の誤った動作を修正しました `IN` から選択するときの演算子 `MATERIALIZED VIEW`. +- 修正フィルタリングパーテーションによるインデックスのように表現 `partition_key_column IN (...)`. +- 実行できない問題を修正 `OPTIMIZE` リーダー以外のレプリカに対するクエリ `REANAME` テーブルの上に行った。 +- 実行時承認エラーを修正しました `OPTIMIZE` または `ALTER` リーダー以外のレプリカに対するクエリ。 +- 固定凍結の `KILL QUERY`. +- ZooKeeperクライアントライブラリのエラーを修正し、ウォッチの喪失、分散DDLキューのフリーズ、およびレプリケーションキューのスローダウンが空でない場合に発生 `chroot` 接頭辞は、ZooKeeperの設定で使用されます。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-8} + +- のような式のサポートを削除 `(a, b) IN (SELECT (a, b))` (同等の式を使用できます `(a, b) IN (SELECT a, b)`). 以前のリリースでは、これらの式は未定につながった `WHERE` フィルタリングが起こっている。 + +### ClickHouseリリース1.1.54378、2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} + +#### 新しい機能: {#new-features-14} + +- ログレベルは、サーバーを再起動せずに変更できます。 +- を追加しました `SHOW CREATE DATABASE` クエリ。 +- その `query_id` に渡すことができます `clickhouse-client` (elBroom)。 +- 新しい設定: `max_network_bandwidth_for_all_users`. +- のサポートを追加 `ALTER TABLE ... PARTITION ...` のために `MATERIALIZED VIEW`. +- システムテーブルに非圧縮形式のデータパーツのサイズに関する情報を追加しました。 +- サーバ間の暗号化支援のための分散型のテーブル (`1` レプリカの設定で ``). +- のためのテーブルのレベルの設定 `ReplicatedMergeTree` Zookeeperに格納されているデータの量を最小限にするために、家族: : `use_minimalistic_checksums_in_zookeeper = 1` +- の構成 `clickhouse-client` プロンプト 既定では、サーバー名がプロンプトに出力されます。 サーバーの表示名を変更することができます。 それはまたで送られます `X-ClickHouse-Display-Name` HTTPヘッダ(キリルShvakov)。 +- 複数のコンマ区切り `topics` 指定することができる `Kafka` エンジン(トビアスアダムソン) +- クエリが停止した場合 `KILL QUERY` または `replace_running_query` クライアントは、 `Query was canceled` 不完全な結果の代わりに例外。 + +#### 改善: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` クエリは、レプリケーションキューの先頭で実行されます。 +- `SELECT ... FINAL` と `OPTIMIZE ... FINAL` テーブルに単一のデータ部分がある場合でも使用できます。 +- A `query_log` それは(キリルShvakov)手動で削除された場合、テーブルがその場で再作成されます。 +- その `lengthUTF8` 関数はより速く実行されます(zhang2014)。 +- の同期挿入物の改善された性能 `Distributed` テーブル (`insert_distributed_sync = 1`)破片の非常に大きな数がある場合。 +- サーバーは `send_timeout` と `receive_timeout` クライアントから設定し、クライアントに接続するときにそれらを適用します(逆の順序で適用されます:サーバーソケット `send_timeout` に設定される。 `receive_timeout` クライアントから受け取った値、およびその逆)。 +- 非同期挿入のためのより強い衝突の回復に `Distributed` テーブル。 +- の戻り値の型 `countEqual` 関数の変更元 `UInt32` に `UInt64` (谢磊). + +#### バグ修正: {#bug-fixes-25} + +- とエラーを修正しました `IN` 式の左側が `Nullable`. +- 正しい結果は返される利用時のタプルと `IN` タプルコンポーネントの一部がテーブルインデックスにある場合。 +- その `max_execution_time` limitは現在、分散クエリで正しく動作します。 +- 複合列のサイズを計算する際のエラーを修正しました `system.columns` テーブル。 +- 固定エラーの作成時に一時テーブル `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- 固定エラー `StorageKafka` (\#\#2075) +- 固定サーバのクラッシュからでは無効引数の特定の集ます。 +- エラーを修正しました。 `DETACH DATABASE` バックグラウ `ReplicatedMergeTree` テーブル。 +- `Too many parts` 状態は、集約されたマテリアライズドビュー(\#\#2084)に挿入すると発生する可能性が低くなります。 +- 置換の後に同じレベルの別の置換が必要な場合は、config内の置換の再帰的な処理を修正しました。 +- メタデータファイルの構文を修正しました。 `VIEW` これは次のクエリを使用します `UNION ALL`. +- `SummingMergeTree` が正しく動作のための集計の入れ子データ構造との複合鍵があります。 +- 固定の可能性、レースの条件を選択する際にはリーダー `ReplicatedMergeTree` テーブル。 + +#### ビルドの変更: {#build-changes-5} + +- ビルドのサポート `ninja` 代わりに `make` と用途 `ninja` デフォルトのビルリリースなどで行っています。 +- パッケージ名変更: `clickhouse-server-base` で `clickhouse-common-static`; `clickhouse-server-common` で `clickhouse-server`; `clickhouse-common-dbg` で `clickhouse-common-static-dbg`. インストールするには、 `clickhouse-server clickhouse-client`. 古い名前のパッケージは、下位互換性のためにリポジトリにロードされます。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-9} + +- 削除され、特別の解釈で表現場配列を指定することができます。 以前は、式 `arr IN (set)` として解釈された “at least one `arr` element belongs to the `set`”. 新しいバ `arrayExists(x -> x IN (set), arr)`. +- ソケットオプションの不正な使用を無効にします `SO_REUSEPORT` Pocoライブラリでデフォルトで正しく有効になっていません。 Linuxでは、アドレスを同時に指定する理由はもはやないことに注意してください `::` と `0.0.0.0` for listen – use just `::` これにより、IPv4とIPv6の両方の接続をリッスンすることができます(デフォルトのカーネル設定設定で)。 もできますに帰属する為には、以前のバージョンを指定する `1` 設定で。 + +### ClickHouseリリース1.1.54370、2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} + +#### 新しい機能: {#new-features-15} + +- を追加しました `system.macros` 設定ファイルが変更されたときのマクロのテーブルと自動更新。 +- を追加しました `SYSTEM RELOAD CONFIG` クエリ。 +- を追加しました `maxIntersections(left_col, right_col)` 同時に交差する区間の最大数を返す集計関数 `[left; right]`. その `maxIntersectionsPosition(left, right)` 関数の先頭を返します “maximum” 間隔。 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### 改善: {#improvements-14} + +- Aにデータを挿入するとき `Replicated` テーブルに対する要求が少なくなる。 `ZooKeeper` (ユーザレベルのエラーのほとんどは、 `ZooKeeper` ログ)。 +- データセットのエイリアスを作成する機能を追加しました。 例えば: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### バグ修正: {#bug-fixes-26} + +- 修正された `Illegal PREWHERE` マージテーブルからの読み取りエラー `Distributed`テーブル。 +- IPv4専用のDockerコンテナでclickhouse-serverを起動できるようにする修正を追加しました。 +- システムか `system.parts_columns tables.` +- に同期挿入中に削除されたダブルバッファリング `Distributed` 接続がタイムアウトする原因となった可能性のあるテーブル。 +- になるバグを修正し過度に長い待ちきレプリカを定めるものであ `SELECT` クエリ。 +- 固定間違った日付で `system.parts` テーブル。 +- それは不可能でデータを挿入するために作られたバグを修正 `Replicated` テーブルの場合 `chroot` した空の構成 `ZooKeeper` クラスター +- 空の垂直統合アルゴリズムを修正しました `ORDER BY` テーブル。 +- リモートテーブルへのクエリでディクショナリを使用する機能を復元しました。 この機能はリリース1.1.54362で失われました。 +- のようなクエリの動作を復元 `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` ときの右側 `IN` リモートを使うべきです `default.table` くます。 この動作られた版1.1.54358. +- の無関係なエラーレベルのログを削除しました `Not found column ... in block`. + +### Clickhouseリリース1.1.54362,2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} + +#### 新しい機能: {#new-features-16} + +- 集計なし `GROUP BY` 空のセット(以下のような)の場合 `SELECT count(*) FROM table WHERE 0` これで、sql標準に準拠して、集計関数のnull値を持つ行がある結果が返されるようになりました。 古い動作を復元するには(空の結果を返す)、次のように設定します `empty_result_for_aggregation_by_empty_set` 1になります。 +- のための追加された型変換 `UNION ALL`. 異なるエイリアス名は `SELECT` の位置 `UNION ALL` を遵のSQL標準装備。 +- 任意の式がサポートされます `LIMIT BY` 句。 これまでは、以下の結果の列のみを使用することができました `SELECT`. +- の指標 `MergeTree` 表は次の場合に使用されます `IN` 主キーの列からの式のタプルに適用されます。 例えば: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (アナスタシヤTsarkova)。 +- を追加しました `clickhouse-copier` ツールのコピーとクラスター reshardingデータ(β). +- 一貫性のあるハッシュ機能を追加: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. ものとして使用できshardingキーを減らすため、ネットワークトラフィックの中でその後のreshardings. +- 機能追加: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- を追加しました `arrayCumSum` 関数(Javi Santana)。 +- を追加しました `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`、と `parseDateTimeBestEffortOrNull` さまざまな形式のテキストを含む文字列からDateTimeを読み取る関数。 +- データは、更新中に外部の辞書から部分的にリロードすることができます(前回のダウンロードよりも指定されたフィールドの値が大きいレコードのみをロー +- を追加しました `cluster` テーブル機能。 例えば: `cluster(cluster_name, db, table)`. その `remote` テーブル機能で、クラスター名の最初の引数として、指定された場合は、識別子です。 +- その `remote` と `cluster` テーブル関数は以下で使用できます `INSERT` クエリ。 +- を追加しました `create_table_query` と `engine_full` への仮想列 `system.tables`テーブル。 その `metadata_modification_time` 列は仮想です。 +- を追加しました `data_path` と `metadata_path` 列へ `system.tables`と`system.databases` テーブル、および追加された `path` コラムへの `system.parts` と `system.parts_columns` テーブル。 +- のマージに関する追加情報を追加しました。 `system.part_log` テーブル。 +- 任意のパーティショニングキーを使用できます。 `system.query_log` テーブル(キリルShvakov)。 +- その `SHOW TABLES` クエリの現在も仮設ます。 追加された一時テーブルと `is_temporary` コラムへの `system.tables` (zhang2014)。 +- 追加 `DROP TEMPORARY TABLE` と `EXISTS TEMPORARY TABLE` クエリ(zhang2014)。 +- のサポート `SHOW CREATE TABLE` 一時テーブル(zhang2014)のため。 +- を追加しました `system_profile` 内部プロセスで使用される設定の構成パラメーター。 +- 読み込みのサポート `object_id` 属性として `MongoDB` 辞書(パベルLitvinenko)。 +- 読む `null` 外部ディクショナリのデータを読み込むときのデフォルト値として `MongoDB` ソース(パベルLitvinenko)。 +- 読む `DateTime` の値 `Values` 一重引quotesのないUnixタイムスタンプからの形式。 +- サポート `remote` レプリカの一部が要求されたテーブルが欠落している場合のテーブル関数。 +- 構成設定は、実行時にコマンドラインでオーバーライドできます `clickhouse-server`. 例えば: `clickhouse-server -- --logger.level=information`. +- 実装された `empty` aからの関数 `FixedString` 引数:文字列が完全にヌルバイト(zhang2014)で構成されている場合、関数は1を返します。 +- を追加しました `listen_try`一部のアドレスがリッスンできない場合は、少なくとも一つのリッスンアドレスをリッスンするための設定パラメータ(IPv4またはIPv6のサポートが +- を追加しました `VersionedCollapsingMergeTree` テーブルエンジン。 +- の行と任意の数値型のサポート `library` 辞書ソース。 +- `MergeTree` テーブルは主キーなしで使用できます(指定する必要があります `ORDER BY tuple()`). +- A `Nullable` タイプは `CAST` 非に-`Nullable` 引数が指定されていない場合はtype `NULL`. +- `RENAME TABLE` 以下のために実行できます `VIEW`. +- を追加しました `throwIf` 機能。 +- を追加しました `odbc_default_field_size` ODBCソースからロードされた値の最大サイズを拡張できるオプション(既定では1024)。 +- その `system.processes` テーブルと `SHOW PROCESSLIST` 今すぐ `is_cancelled` と `peak_memory_usage` 列。 + +#### 改善: {#improvements-15} + +- 結果の制限とクォータは、以下の中間データには適用されなくなりました `INSERT SELECT` クエリまたは `SELECT` サブクエリー +- 偽のトリガーが少ない `force_restore_data` 状態をの点検した場合 `Replicated` サーバーの起動時のテーブル。 +- を追加しました `allow_distributed_ddl` オプション。 +- 非決定的関数は、次の式では使用できません `MergeTree` テーブルキー。 +- からの置換を伴うファイル `config.d` ディレクト +- の改善された性能 `arrayElement` 要素の一つとして空の配列を持つ定数多次元配列の場合の関数。 例えば: `[[1], []][x]`. +- サーバの開始に高速化した現在の使用時に設定ファイルの非常に大き換(例えば、非常に大きなリストのipネットワーク). +- クエリを実行する場合、テーブル値関数は一度実行されます。 以前は, `remote` と `mysql` テーブル機能評価を行い、同じクエリを取得するにはテーブル構造からのリモートサーバーです。 +- その `MkDocs` ドキュメントジェネレータを使用します。 +- テーブルの列を削除しようとすると `DEFAULT`/`MATERIALIZED` 他の列の式は、例外がスローされる(zhang2014)。 +- テキスト形式の空行を0の数値として解析する機能を追加しました `Float` データ型。 この機能は以前は利用可能でしたが、リリース1.1.54342で失われました。 +- `Enum` 値は次の場所で使用できます。 `min`, `max`, `sum` といくつかの他の機能。 このような場合は、対応する数値を使用します。 この機能は以前は利用可能でしたが、リリース1.1.54337で失われました。 +- 追加 `max_expanded_ast_elements` 再帰的にエイリアスを展開した後にASTのサイズを制限する。 + +#### バグ修正: {#bug-fixes-27} + +- サブクエリから不要な列が誤って削除された場合、またはサブクエリを含むサブクエリから削除されなかった場合 `UNION ALL`. +- のためのマージのバグを修正しました `ReplacingMergeTree` テーブル。 +- 固定synchronousの挿入 `Distributed` テーブル (`insert_distributed_sync = 1`). +- 特定の用途のための固定segfault `FULL` と `RIGHT JOIN` サブクエリに重複する列がある場合。 +- 特定の用途のための固定segfault `replace_running_query` と `KILL QUERY`. +- の順序を修正しました `source` と `last_exception` の列 `system.dictionaries` テーブル。 +- ときにバグを修正 `DROP DATABASE` クエリを削除しなかったファイルとメタデータを指すものとします。 +- 修正された `DROP DATABASE` のためのクエリ `Dictionary` データベース +- 固定の低精度 `uniqHLL12` と `uniqCombined` 100万のアイテム(アレックスBocharov)より大きな基数のための機能。 +- 固定の計算の暗黙のデフォルト値が必要な場合には同時に計算デフォルトの明示的な表現 `INSERT` クエリ(zhang2014)。 +- まれなケースを修正するときにクエリ `MergeTree` テーブルは終えることができなかった(chenxing-xc)。 +- 実行しているときに発生したクラッシュを修正 `CHECK` のためのクエリ `Distributed` すべてのシャードがローカルの場合はテーブル(chenxing.xc)。 +- 正規表現を使用する関数で、わずかなパフォーマンス回帰を修正しました。 +- 複雑な式から多次元配列を作成する際のパフォーマンスの回帰を修正しました。 +- 余分な原因となるバグを修正しました `FORMAT` に表示されるセクション `.sql` ファイ +- その原因となったバグを修正 `max_table_size_to_drop` 削除しようとするときに適用する制限 `MATERIALIZED VIEW` 明示的に指定されたテーブルを調べる。 +- 古いクライアントとの非互換性を修正しました(古いクライアン `DateTime('timezone')` 彼らは理解していないタイプ)。 +- 読み込み時のバグを修正 `Nested` を使用して追加された構造体の列要素 `ALTER` しかし、これらの列の条件が移動したときに、古いパーティションでは空です `PREWHERE`. +- 時の不具合を修正フィルタリングテーブルによる仮想 `_table` クエリの列 `Merge` テーブル。 +- 使用する際のバグを修正しました `ALIAS` の列 `Distributed` テーブル。 +- から集計関数を持つクエリのための動的コンパイルが不可能になったバグを修正 `quantile` 家族 +- クエリ実行パイプラインの競合状態を修正しました。 `Merge` 多数のテーブルを持つテーブル、および使用時 `GLOBAL` サブクエリー +- に異なるサイズの配列を渡すときにクラッシュを修正 `arrayReduce` 複数の引数から集約関数を使用する場合の関数です。 +- とのクエリの使用を禁止 `UNION ALL` で `MATERIALIZED VIEW`. +- の初期化中にエラーを修正しました `part_log` サーバ起動時のシステムテーブル(デフォルト, `part_log` が無効になっています)。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-10} + +- 削除された `distributed_ddl_allow_replicated_alter` オプション。 この動作は既定で有効になっています。 +- 削除された `strict_insert_defaults` 設定。 この機能を使用している場合は、次のように記述します `clickhouse-feedback@yandex-team.com`. +- 削除された `UnsortedMergeTree` エンジン。 + +### Clickhouseリリース1.1.54343、2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} + +- 分散ddlクエリおよび分散テーブルのコンストラクタでクラスタ名を定義するためのマクロの追加: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- 今のようなクエリ `SELECT ... FROM table WHERE expr IN (subquery)` を使用して処理されます `table` インデックス +- 処理能の重複を挿入する場合はを再現し、テーブルではなく実行を複製します。 + +### Clickhouseリリース1.1.54342、2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} + +このリリースには、以前のリリース1.1.54337のバグ修正が含まれています: + +- 1.1.54337の回帰を修正しました:デフォルトのユーザーが読み取り専用アクセスを持っている場合、サーバーは、メッセージで起動することを拒否 `Cannot create database in readonly mode`. +- 1.1.54337の回帰を修正しました:systemdを持つシステムでは、設定に関係なく常にsyslogにログが書き込まれます。d +- ドッカー画像で間違ったデフォルト設定:1.1.54337での回帰を修正しました。 +- GraphiteMergeTreeの非決定的動作を修正しました(ログメッセージで見ることができます `Data after merge is not byte-identical to the data on another replicas`). +- なることがある不具合を修正しつ一貫性のない合併後最適なクエリーの複製のテーブル(きめて発見されたログメッセージ `Part ... intersects the previous part`). +- バッファーの表現は正常に動作が実現列の先テーブル(zhang2014). +- NULLの実装のバグを修正しました。 + +### Clickhouseリリース1.1.54337、2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} + +#### 新しい機能: {#new-features-17} + +- 多次元配列およびタプルの格納に対するサポートが追加されました (`Tuple` テーブル内のデータ型)。 +- テーブル関数のサポート `DESCRIBE` と `INSERT` クエリ。 サブクエリのサポートが追加されました `DESCRIBE`. 例: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. のサポート `INSERT INTO TABLE` に加えて `INSERT INTO`. +- タイムゾーンのサポートの改善。 その `DateTime` データ型は、テキスト形式の解析と書式設定に使用されるタイムゾーンで注釈を付けることができます。 例えば: `DateTime('Europe/Moscow')`. タイムゾーンが指定されている場合 `DateTime` 引数を指定すると、戻り値の型はタイムゾーンを追跡し、値は期待どおりに表示されます。 +- 機能を追加しました `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. その `toRelativeHour`/`Minute`/`Second` 関数はtypeの値を取ることができます `Date` 引数として。 その `now` 関数名は大文字と小文字が区別されます。 +- を追加しました `toStartOfFifteenMinutes` 機能(キリルShvakov)。 +- を追加しました `clickhouse format` ツールのためのフォーマットのクエリ. +- を追加しました `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` フォーマット。 スキーマファイ +- 設定置換のサポートが追加されました (`incl` と `conf.d`)外部辞書やモデル(パベルYakunin)の構成のために。 +- のドキュメントと列を追加しました `system.settings` テーブル(キリルShvakov)。 +- を追加しました `system.parts_columns` 各データ部分の列サイズに関する情報を持つテーブル `MergeTree` テーブル。 +- を追加しました `system.models` loadedに関する情報を含む表 `CatBoost` 機械学習モデル。 +- を追加しました `mysql` と `odbc` テーブル機能と対応 `MySQL` と `ODBC` テーブルエンジン この機能はベータ段階です。 +- 型の引数を渡す可能性を追加しました `AggregateFunction` のための `groupArray` 集計関数(集計関数の状態の配列を作成できます)。 +- 削除の制限を様々に組み合わせて集計機能combinators. たとえば、次のものを使用できます `avgForEachIf` 同様に `avgIfForEach` 集計機能の異なる行動です。 +- その `-ForEach` 集合関数combinatorのは、複数の引数の集約関数の場合に拡張されます。 +- の集計関数のサポートを追加しました `Nullable` 引数をもた場合は、関数が返す非-`Nullable` 結果(Silviu Carageaの貢献を追加しました)。 例えば: `groupArray`, `groupUniqArray`, `topK`. +- を追加しました `max_client_network_bandwidth` のために `clickhouse-client` (キリルShvakov)。 +- ユーザーの `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- 複数の消費者を使用するためのサポートを追加 `Kafka` エンジン。 の拡張構成オプション `Kafka` (Marek Vavruša). +- を追加しました `intExp3` と `intExp4` 機能。 +- を追加しました `sumKahan` 集計関数。 +- ここで、\*number\*は数値型です。 +- のサポートを追加 `WITH` aの句 `INSERT SELECT` クエリ(著者:zhang2014)。 +- 追加された設定: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. 特に、これらの設定のためのデータのダウンロードパーツのレプリケーション. を変更する設定できる高速フェイルオーバーの場合はネットワークが過負荷状態で運転されています。 +- のサポートを追加 `ALTER` テーブルの型 `Null` (アナスタシヤTsarkova)。 +- その `reinterpretAsString` 関数は、メモリに連続して格納されているすべてのデータ型に対して拡張されます。 +- を追加しました `--silent` のための選択 `clickhouse-local` ツール。 Stderrでクエリ実行情報を出力しないようにします。 +- Typeの値を読み取るためのサポートを追加 `Date` fromテキストの形式で、月の月または日が二桁(Amos Bird)ではなく一桁の数字を使用して指定されています。 + +#### パフォーマンスの最適化: {#performance-optimizations} + +- 集計関数のパフォーマンスの向上 `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` 文字列引数から。 +- 機能のパフォーマンスの向上 `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- 解析と書式設定のパフォーマンスの向上 `Date` と `DateTime` テキスト形式で値を入力します。 +- 浮動小数点数の解析のパフォーマンスと精度が向上しました。 +- メモリ使用量の削減 `JOIN` れた場合、左右のパーツはカラムと同じ名前がつけられていますが含まれない `USING` . +- 集計関数のパフォーマンスの向上 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` 計算の安定性を減らします 古い関数は、名前の下で利用可能です `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### バグ修正: {#bug-fixes-28} + +- 実行後のデータ重複除外を修正しました `DROP` または `DETACH PARTITION` クエリ。 以前のバージョン、パーティションに挿入し、同じデータを再度はありませんが挿入されブロックを考慮した重複している。 +- の誤った解釈につながる可能性のあるバグを修正しました `WHERE` のための節 `CREATE MATERIALIZED VIEW` とのクエリ `POPULATE` . +- を使用してのバグを修正しました `root_path` のパラメータ `zookeeper_servers` 構成。 +- 固定予期しない結果を渡す `Date` の引数 `toStartOfDay` . +- 修正された `addMonths` と `subtractMonths` 機能および算術のための `INTERVAL n MONTH` 結果は前年を持っている場合には。 +- のためのサポートが不足して追加 `UUID` のデータ型 `DISTINCT` , `JOIN` 、と `uniq` 集計機能-外部辞書(Evgeniyイワノフ). のサポート `UUID` まだ不完全です。 +- 固定 `SummingMergeTree` 行がゼロに加算された場合の動作。 +- のための様々な修正 `Kafka` engine (Marek Vavruša). +- の誤った動作を修正しました `Join` テーブルエンジン(アモス鳥)。 +- 固定の誤ったアロケータの組み合わせによる動作os、os xで動作します。 +- その `extractAll` 機能に対応しま空いる。 +- の使用をブロックエラーを修正しました `libressl` 代わりに `openssl` . +- 修正された `CREATE TABLE AS SELECT` 一時テーブルからのクエリ。 +- 固定非atomicityの更新を複製します。 このレプリカとの同期までのサーバが再起動してしまいます。 +- 固定可能なオーバーフロー `gcd` , `lcm` と `modulo` (`%` 演算子)(Maks Skorokhod). +- `-preprocessed` ファイルは現在、変更後に作成されます `umask` (`umask` 設定で変更することができます)。 +- 部品のバックグラウンドチェックのバグを修正 (`MergeTreePartChecker` カスタムパーティションキーを使用する場合。 +- タプル(の値)の固定解析 `Tuple` テキスト形式のデータ型)。 +- 向上しエラーメッセージとして互換性のない型を渡す `multiIf` , `array` といくつかの他の機能。 +- サポートの再設計 `Nullable` タイプ。 サーバーがクラッシュするバグを修正。 固定に関連する他のほとんどすべてのバグ `NULL` サポート:INSERT SELECTでの型変換が正しくない、HAVINGおよびPREWHEREでのNullableのサポートが不十分, `join_use_nulls` の引数としてNull可能な型 `OR` オペレーター等 +- データ型の内部セマンティクスに関連するさまざまなバグを修正。 例:不要な加算 `Enum` タイプフィールド `SummingMergeTree` ;の整列 `Enum` タイプ `Pretty` フォーマット、等。 +- 複合列の許容される組み合わせを厳密にチェックします。 +- オーバーフローを修正 `FixedString` データ型。 +- のバグを修正しました `topK` ジェネリックケースの集計関数。 +- 集計関数のn-aryバリアントの引数に配列サイズが等しいかどうかのチェックが欠けていることを追加しました `-Array` コンビネータ +- でバグを修正しました `--pager` のために `clickhouse-client` (著者:ks1322)。 +- の精度を修正しました `exp10` 機能。 +- の動作を修正しました `visitParamExtract` 機能により遵守します。 +- 固定をした場合クラッシュする不正確なデータ種を指定します。 +- の動作を修正しました `DISTINCT` すべての列が定数の場合。 +- 使用する場合の固定クエリの書式設定 `tupleElement` タプル要素インデックスとして複素数定数式を持つ関数。 +- でバグを修正しました `Dictionary` テーブルのため `range_hashed` 辞書だ +- の結果に過度の行につながるバグを修正しました `FULL` と `RIGHT JOIN` (アモス鳥)。 +- 固定サーバのクラッシュの作成時に除去の一時ファイル `config.d` ディレクトリの中configしてください。 +- 修正された `SYSTEM DROP DNS CACHE` クエリのキャッシュをフラッシュがアドレスのクラスタノードが更新されません。 +- の動作を修正しました `MATERIALIZED VIEW` 実行後 `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### ビルドの改善: {#build-improvements-4} + +- その `pbuilder` ツールはビルドに使用されます。 ビルドプロセスは、ビルドホスト環境からほぼ完全に独立しています。 +- 単一のビルドは、異なるosバージョンに使用されます。 パッケージおよびバイナリはlinuxシステムの広い範囲と互換性があるようにされた。 +- を追加しました `clickhouse-test` パッケージ。 機能テストの実行に使用できます。 +- ソースtarballをリポジトリに公開できるようになりました。 これは、githubを使用せずにビルドを再現するために使用できます。 +- Travis CIとの限定的な統合が追加されました。 Travisのビルド時間に制限があるため、デバッグビルドのみがテストされ、テストの限られたサブセットが実行されます。 +- のサポートを追加 `Cap'n'Proto` デフォルトのビルドで。 +- ドキュメントソースのフォーマットを `Restricted Text` に `Markdown`. +- のサポートを追加 `systemd` (ウラジミールスミルノフ)。 これはデフォルトでは無効に設定により不適合一部のOSの画像を有効にすることができます。 +- 動的コード生成の場合, `clang` と `lld` に埋め込まれます `clickhouse` バイナリ 彼らはまた、 `clickhouse clang` と `clickhouse lld` . +- コードからgnu拡張の使用を削除しました。 を有効にする `-Wextra` オプション。 ビルドするとき `clang` デフォルトは `libc++` 代わりに `libstdc++`. +- 抽出 `clickhouse_parsers` と `clickhouse_common_io` 図書館を高速ビルドの様々なツールです。 + +#### 下位互換性のない変更: {#backward-incompatible-changes-11} + +- マークの書式 `Log` 以下を含む表を入力します `Nullable` 列は、下位互換性のない方法で変更されました。 これらのテーブルがある場合は、これらのテーブルを `TinyLog` タイプ前のサーババージョン。 これを行うには、 `ENGINE = Log` と `ENGINE = TinyLog` 対応する `.sql` のファイル `metadata` ディレクトリ。 あなたのテーブルに `Nullable` 列またはテーブルの型がそうでない場合 `Log`、その後、あなたは何もする必要はありません。 +- 削除された `experimental_allow_extended_storage_definition_syntax` 設定。 この機能はデフォルトで有効になりました。 +- その `runningIncome` 関数の名前が変更されました `runningDifferenceStartingWithFirstvalue` 混乱を避けるため。 +- 削除された `FROM ARRAY JOIN arr` 配列結合がテーブルなしでFROMの直後に指定されたときの構文(Amos Bird)。 +- 削除された `BlockTabSeparated` デモンストレーション目的でのみ使用された形式。 +- 集計関数の状態フォーマットを変更 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. これらの集計関数の状態をテーブルに格納している場合(以下を使用 `AggregateFunction` 対応する状態のデータ-タイプか具体化された眺めは)、に書きますclickhouse-feedback@yandex-team.com。 +- 集計関数がパラメータに依存している場合でも、aggregatefunctionデータ型でパラメータなしで指定できます。 例えば: `AggregateFunction(quantiles, UInt64)` 代わりに `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. この機能は失われました。 それは文書化されていませんでしたが、我々は将来のリリースで再びそれをサポートする予定。 +- Enumデータ型は、最小/最大集計関数では使用できません。 この能力は、次のリリースで返されます。 + +#### ご注意ください: {#please-note-when-upgrading} + +- クラスター上でローリングアップデートを実行する場合、レプリカの一部が古いバージョンのclickhouseを実行していて、一部のレプリカが新しいバージョンを実行 `unknown parameter 'shard'` ログに表示されます。 レプリケーションは今後すべてのレプリカクラスターを更新しました。 +- 場合により、異なるバージョンのclickhouseにクラスタサーバで分散クエリは以下の関数を使って誤った結果: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. まアップデートすべきであるすべてのクラスタノード。 + +## [2017年の変更履歴](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/ja/whats_new/changelog/2019.md b/docs/ja/whats_new/changelog/2019.md deleted file mode 120000 index 740d1edd238..00000000000 --- a/docs/ja/whats_new/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats_new/changelog/2019.md \ No newline at end of file diff --git a/docs/ja/whats_new/changelog/2019.md b/docs/ja/whats_new/changelog/2019.md new file mode 100644 index 00000000000..47351882033 --- /dev/null +++ b/docs/ja/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 77 +toc_title: '2019' +--- + +## ClickHouseリリースv19.17 {#clickhouse-release-v19-17} + +### ClickHouseリリースv19.17.6.36,2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### バグ修正 {#bug-fix} + +- Decompressでバッファオーバーフローの可能性を修正 悪意のあるユーザーで製作した圧縮データが読み後のバッファです。 この問題は、Yandexの情報セキュリティチームのEldar Zaitovによって発見されました。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定可能なサーバのクラッシュ (`std::terminate`)サーバーがJsonまたはXML形式で文字列データ型(UTF-8検証が必要)の値を使用してデータを送信または書き込むことができない場合、またはBrotliアルゴリズムま [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クリックハウスからのソースを持つ固定辞書 `VIEW` 今、そのような辞書を読んでもエラーは発生しません `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定照合するクライアントホストで許可されhost\_regexp指定されます。xmlだ [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- `RENAME TABLE` 分散表現に変更、フォルダが挿入されデータを送信する前に破片. これにより、連続した名前変更の問題が修正されます `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` 外部辞書作成DDLわきの範囲は任意の数値です。 [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +- 固定 `INSERT INTO table SELECT ... FROM mysql(...)` テーブル機能。 [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 固定segfaultで `INSERT INTO TABLE FUNCTION file()` 存在しないファイルに挿入している間。 この場合、ファイルが作成され、insertが処理されます。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 集計ビットマップとスカラビットマップとの交差時にビットマップとエラーを修正 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([越黄](https://github.com/moon03432)) +- 固定segfault時 `EXISTS` クエリが使用されなかった `TABLE` または `DICTIONARY` 修飾子,ちょうどのような `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 関数の戻り値の型を修正 `rand` と `randConstant` nullable引数の場合。 Now関数は常に戻ります `UInt32` そして決して `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定 `DROP DICTIONARY IF EXISTS db.dict` 今は例外をスローしません `db` 存在しない [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- さん完全に落ちてしまったがサーバのクラッシュ、サーバへの復元とその負荷で [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 複数のシャードローカルテーブルが存在する場合、分散テーブルの簡単なカウントクエリが修正されました。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- DB::BlockStreamProfileInfo::calculateRowsBeforeLimitのデータレースにつながるバグを修正しました() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- 固定 `ALTER table MOVE part` 実行直後の統合、指定された部分を引き起こす可動部を、指定された部分に統合します。 今度は、指定された部分を正しく移動します。 [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 辞書の式を文字列として指定できるようになりました。 これは、非clickhouseソースからデータを抽出する際に、それらの式に非clickhouse構文を使用できるため、属性の計算に役立ちます。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +- で非常にまれなレースを修正しました `clickhouse-copier` ZXidのオーバーフローのため。 [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁象飛](https://github.com/dingxiangfei2009)) +- クエリが失敗した後にバグを修正しました(原因 “Too many simultaneous queries” たとえば、外部テーブル情報を読み込まず、 + 次の要求は、この情報を次のクエリの先頭と解釈し、次のようなエラーを引き起こします `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 後にnull逆参照を避ける “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- すべてのicuロケールのサポートを復元し、定数式の照合順序を適用し、システムに言語名を追加する機能を追加します。照合テーブル。 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +- 読み込み元のストリームの数 `StorageFile` と `StorageHDFS` メモリ制限を超えないように制限されています。 [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +- 固定 `CHECK TABLE` のためのクエリ `*MergeTree` キーのないテーブル。 [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +- 変異がなかった場合には、部分名から突然変異番号を削除しました。 この除去との互換性を高め、古いバージョン. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +- テーブルバージョンよりもdata\_versionが大きいため、一部のアタッチされたパーツに対して変異がスキップされる不具合を修正した。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) +- 別のデバイスに移動した後、部品の冗長コピーを使用してサーバーを起動できます。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- エラーを修正しました “Sizes of columns doesn’t match” これは、集計関数列を使用する場合に表示されます。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- LIMIT BYと一緒にWITH TIESを使用する場合は、例外がスローされます。 そして今、トップをLIMIT BYで使うことができます。 [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- それが持っている場合 `invalidate_query` これは、以前の更新試行時に更新といくつかの例外を停止しました。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) + +### ClickHouseリリースv19.17.4.11、2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### 下位互換性のない変更 {#backward-incompatible-change} + +- 使用カラムの代わりにastビスカラサブクエリの結果よりよい。 設定 `enable_scalar_subquery_optimization` 19.17で追加され、デフォルトで有効になりました。 繋がるようなエラー [この](https://github.com/ClickHouse/ClickHouse/issues/7851) 以前のバージョンから19.17.2または19.17.3にアップグレード中。 この設定はデフォルトで無効にで19.17.4、可能からのアップグレード19.16、以前のバージョンなします。 [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([アモスの鳥](https://github.com/amosbird)) + +#### 新しい機能 {#new-feature} + +- DDLクエリで辞書を作成する機能を追加します。 [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) +- 作る `bloom_filter` 索引をサポートするタイプ `LowCardinality` と `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 機能を追加 `isValidJSON` 渡された文字列が有効なjsonであることを確認する。 [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- 実装 `arrayCompact` 機能 [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([メモ](https://github.com/Joeywzr)) +- 作成された機能 `hex` 十進数のため。 それはのように働く `hex(reinterpretAsString())`、しかし、最後のゼロバイトを削除しません。 [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) +- 追加 `arrayFill` と `arrayReverseFill` 配列内のそれらの前後の他の要素によって要素を置き換える関数。 [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- 追加 `CRC32IEEE()`/`CRC64()` サポート [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- 実装 `char` 関数に似ています。 [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- 追加 `bitmapTransform` 機能。 これは、ビットマップ内の値の配列を別の値の配列に変換し、結果は新しいビットマップになります [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) +- 実装 `javaHashUTF16LE()` 機能 [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbabcomment](https://github.com/achimbab)) +- 追加 `_shard_num` 分散エンジンの仮想列 [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### 実験的特徴 {#experimental-feature} + +- 新しいクエリ実行パイプライン)のサポート `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### バグ修正 {#bug-fix-1} + +- 不正な浮動小数点解析の修正 `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- Trace\_logが有効になっているときに発生する稀なデッドロックを修正します。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([フィリモノフ](https://github.com/filimonov)) +- 防止のメッセージの複製を制作するカフカテーブルには、mvsの選択からで [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([イワン](https://github.com/abyss7)) +- のサポート `Array(LowCardinality(Nullable(String)))` で `IN`. 解決 [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbabcomment](https://github.com/achimbab)) +- の処理を追加 `SQL_TINYINT` と `SQL_BIGINT`、および修正の取り扱い `SQL_FLOAT` ODBCブリッジのデータソースの種類。 [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 集約の修正 (`avg` 空の小数点以下桁数を超えた場合 [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) +- 修正 `INSERT` 分散に `MATERIALIZED` 列 [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 作る `MOVE PARTITION` 仕事のパーツがあった場合にエントランスは目を引く壁面緑化依存度はさほど高くないものの、保存先ディスクまたは量 [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- ハードリンクがで突然変異の間に作成されるために失敗したバグを修正 `ReplicatedMergeTree` マルチディスク構成で。 [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 部分全体が変更されず、最高のスペースが別のディスク上に発見されているときmergetreeに変異を持つバグを修正しました [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- とのバグを修正 `keep_free_space_ratio` ディスク構成から読み取られない [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- テーブルのみを含むバグを修正 `Tuple` 複雑なパスを持つ列または列。 修正 [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) +- Max\_memory\_usage制限のバッファエンジンのメモリを考慮しない [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- で最終的なマークの使用状況を修正 `MergeTree` テーブルの順序付け `tuple()`. まれにそれはに導くことができます `Can't adjust last granule` 選択中にエラー。 [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([アントン-ポポフ](https://github.com/CurtizJ)) +- クラッシュや奇妙な例外につながる可能性があり、コンテキスト(jsonのための例の機能)を必要とするアクションと述語を持っている変異のバグを [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) +- データベース名とテーブル名の不一致を修正 `data/` と `shadow/` ディレク [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. この場合のクラッシュを修正。 [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正 `Not found column in block` RIGHTまたはFULL JOINでexpressionに参加するとき。 [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- 無限ループを修正するもう一つの試み `PrettySpace` 書式 [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- バグを修正 `concat` すべての引数が `FixedString` 同じサイズの。 [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) +- S3、URLおよびHDFSストレージを定義しながら、1引数を使用した場合の例外を修正しました。 [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- クエリでビューのinterpreterselectqueryのスコープを修正 [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### 改善 {#improvement} + +- `Nullable` ODBCブリッジによって正しく処理される列とNULL値 [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- 分散送信の現在のバッチをアトミックに書き込む [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- クエリで列名のテーブルを検出できない場合は、例外をスローします。 [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `merge_max_block_size` に設定する `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- とのクエリ `HAVING` とせずに `GROUP BY` 当グループによる定数です。 だから, `SELECT 1 HAVING 1` 今すぐ結果を返します。 [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([アモスの鳥](https://github.com/amosbird)) +- サポート解析 `(X,)` pythonに似たタプルとして。 [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([アモスの鳥](https://github.com/amosbird)) +- 作る `range` 関数の振る舞いはpythonicのようなものです。 [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- 追加 `constraints` テーブルへの列 `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) +- より良いnullの場合形式のtcpハンドラで利用可能 `select ignore() from table format Null` clickhouseによるperfの測定のため-顧客 [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([アモスの鳥](https://github.com/amosbird)) +- のようなクエリ `CREATE TABLE ... AS (SELECT (1, 2))` 正しく解析されます [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### 性能向上 {#performance-improvement} + +- 短い文字列キーに対する集約のパフォーマンスが向上しました。 [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [アモスの鳥](https://github.com/amosbird)) +- 構文/式分析の別のパスを実行して、定数述部が折り畳まれた後に潜在的な最適化を取得します。 [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([アモスの鳥](https://github.com/amosbird)) +- ストレージメタ情報を使用して簡単に評価する `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([アモスの鳥](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) +- ベクトル化処理 `arrayReduce` アグリゲータと同様です `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([アモスの鳥](https://github.com/amosbird)) +- の性能のマイナーな改善 `Kafka` 消費 [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([イワン](https://github.com/abyss7)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement} + +- クロスコンパイルのサポートをcpuアーキテクチャaarch64に追加します。 リファクタリング [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([イワン](https://github.com/abyss7)) +- パッケージのビルド時に、darwin-x86\_64およびlinux-aarch64ツールチェーンをマウントされたdockerボリュームに解凍する [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([イワン](https://github.com/abyss7)) +- 更新docker画像のバイナリーベル [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([イワン](https://github.com/abyss7)) +- MacOSのカタリナの固定コンパイルエラー [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) +- クエリ分析ロジックのリファクタリング:複雑なクラスを複数の単純なクラスに分割します。 [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- サブモジュールなしでビルドを修正 [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- より良い `add_globs` CMakeファイル内 [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([アモスの鳥](https://github.com/amosbird)) +- ハードコードされたパスの削除 `unwind` ターゲット [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- Sslなしでmysql形式を使用できるようにする [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### その他 {#other} + +- ClickHouse SQLダイアレクトのANTLR4文法を追加 [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouseリリースv19.16 {#clickhouse-release-v19-16} + +#### Clickhouseリリースv19.16.14.65,2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- 複数の引数(10以上)の三元論理演算のバッチ計算のバグを修正しました。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz))このバグ修正は、Altinityからの特別な要求によってバージョン19.16にバックポートされました。 + +#### Clickhouseリリースv19.16.14.65,2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- 古いchバージョンとの分散サブクエリの非互換性を修正しました。 修正 [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- 実行時期 `CREATE` クエリー、倍定表現のストレージエンジンの引数です。 空のデータベース名を現在のデータベ 修正 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). また、ローカルアドレスの確認を修正 `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 今、背景には、マージ `*MergeTree` テーブルエンジンの家族の保存-保存政策に大量注文しております。 + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- データを失うのを防ぐ `Kafka` まれに、接尾辞を読んだ後でコミットする前に例外が発生した場合。 修正 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 関連: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(フィリモノフ)](https://github.com/filimonov) +- のを修正した。先サーバを終了しようとした場合に使用/drop `Kafka` テーブル作成されたパラメータ。 修正 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 組み込み [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(フィリモノフ)](https://github.com/filimonov) +- 使用を許可する `MaterializedView` 上記のサブクエリを使用する `Kafka` テーブル。 + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([フィリモノフ](https://github.com/filimonov)) + +#### 新しい機能 {#new-feature-1} + +- 追加 `deduplicate_blocks_in_dependent_materialized_views` マテリアライズドビューを持つテーブルへの冪等挿入の動作を制御するオプション。 この新機能は、Altinityからの特別な要求によってbugfixリリースに追加されました。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouseリリースv19.16.2.2、2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### 下位互換性のない変更 {#backward-incompatible-change-1} + +- Count/counIfに不足しているアリティ検証を追加します。 + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- レガシーを削除 `asterisk_left_columns_only` 設定(デフォルトでは無効になっていました)。 + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem + Zuikov](https://github.com/4ertus2)) +- フォーマット文字列のためのテンプレートデータの形式は指定のファイルです。 + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### 新しい機能 {#new-feature-2} + +- Uint\_maxより大きい基数を計算するために、uniqCombined64()を導入します。 + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- 支援bloom filterを指標配列に列あります。 + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbabcomment](https://github.com/achimbab)) +- 関数を追加する `getMacro(name)` これは、対応する値を持つ文字列を返します `` + サーバー構成から。 [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- HTTPソースに基づいてディクショナリの設定オプションを設定する: `credentials` と + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- 新しいprofileeventを追加する `Merge` これは、起動された背景のマージの数を数えます。 + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([ミハイル + Korotov](https://github.com/millb)) +- 完全修飾ドメイン名を返すfullhostname関数を追加します。 + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- 機能を追加 `arraySplit` と `arrayReverseSplit` これは配列を分割する “cut off” + 条件。 これらは、時系列の処理に役立ちます。 + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- マッチしたすべてのインデックスの配列をmultimatchファミリの関数に返す新しい関数を追加します。 + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([ダニラ + クテニン](https://github.com/danlark1)) +- 新規データベースエンジンの追加 `Lazy` それは多数の小さい丸太を貯えるために最大限に活用されます + テーブル。 [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([ニキータ + Vasilev](https://github.com/nikvas0)) +- ビットマップ列の集計関数groupbitmapand、-または、-xorを追加します。 [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang + ゆう](https://github.com/yuzhichang)) +- Nullを返す集計関数combinators-OrNullと-OrDefaultを追加します + または、集計するものがない場合のデフォルト値。 + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- カスタ + 区切り文字ルール。 [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- 外部辞書のソースとしてサポートredis。 [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [アントン + ポポフ](https://github.com/CurtizJ)) + +#### バグ修正 {#bug-fix-2} + +- 修正誤ったクエリの結果の場合で `WHERE IN (SELECT ...)` セクションと `optimize_read_in_order` は + 使用される。 [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([アントン + ポポフ](https://github.com/CurtizJ)) +- プロジェクトの外のファイルに依存する無効mariadb認証プラグイン、。 + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([ユーリ + Baranov](https://github.com/yurriy)) +- 例外を修正 `Cannot convert column ... because it is constant but values of constants are different in source and result` これは、関数が `now()`, `today()`, + `yesterday()`, `randConstant()` 使用されます。 + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([ニコライ + Kochetov](https://github.com/KochetovNicolai)) +- 固定問題のhttp生き生きと保つタイムアウトの代わりにtcpのままにしておくタイムアウト. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([バシリー + Nemkov](https://github.com/Enmk)) +- GroupBitmapOrのセグメンテーション障害を修正しました(問題 [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang + ゆう](https://github.com/yuzhichang)) +- のための実現の為のためのカフカでとても間近に見ることができすべてのデータが書かれています。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([イワン](https://github.com/abyss7)) +- 固定間違った `duration_ms` 値の `system.part_log` テーブル。 それは十回オフだった。 + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- 修正を解決するクラッシュライブビューテーブルの再可能なすべてのライブビュー。 + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- MergeTreeパーツの最小/最大インデックスでNULL値を正しくシリアル化します。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- 仮想列を置かないでください。テーブル作成時のsqlメタデータ `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([イワン](https://github.com/abyss7)) +- セグメンテーショ `ATTACH PART` クエリ。 + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([alesapin](https://github.com/alesapin)) +- サブクエリの空と空の最適化によって与えられたいくつかのクエリの間違った結果を修正 + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([ニコライ + Kochetov](https://github.com/KochetovNicolai)) +- ライブビューのgetheader()メソッドでaddresssanitizerエラーを修正します。 + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### 改善 {#improvement-1} + +- Queue\_wait\_max\_ms待機が発生した場合にメッセージを追加します。 + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- メイドの設定 `s3_min_upload_part_size` テーブルレベル。 + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- StorageFactoryでTTLをチェックします。 [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- 部分マージ結合(最適化)でスカッシュ左側のブロック。 + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem + Zuikov](https://github.com/4ertus2)) +- レプリケートされたテーブルエンジンの変異時に非決定論的関数を許可しないでください。 + レプリカ間に不整合が生じる可能性があります。 + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander + Kazakov](https://github.com/Akazz)) +- 無効メモリにトラッカーが変換の例外のスタックトレースを文字列になります。 それを防ぐことができ損失 + タイプのエラーメッセージ `Memory limit exceeded` サーバーでは、 `Attempt to read after eof` クライアントの例外。 [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- その他の形式の改善。 解決 + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouseは、左側に変換不可能なIN演算子の右側の値を無視します + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- ASOF JOINの欠落した不等式をサポートします。 以下または等しいバリアントと厳密に結合することが可能です + 構文上のASOF列の大きいと少ない変種。 + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem + Zuikov](https://github.com/4ertus2)) +- 部分マージ結合を最適化します。 [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- 使用しない以上98kのメモリでuniqcombined機能。 + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- PartialMergeJoinのディスク上の右側の結合テーブルのフラッシュ部分(十分でない場合 + メモリ)。 負荷データが必要です。 [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能向上 {#performance-improvement-1} + +- データの重複を避けることによって、joingetをconst引数で高速化します。 + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos + 鳥](https://github.com/amosbird)) +- サブクエリが空の場合は、earlyを返します。 + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- 値のsql式の解析を最適化します。 + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-1} + +- 無効化あcontribsクロスコンパイルをmac os. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([イワン](https://github.com/abyss7)) +- Clickhouse\_common\_ioのPocoXMLとのリンクが見つかりませんを追加します。 + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- Clickhouse-testで複数のテストフィルター引数を受け入れます。 + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- ARMのmuslとjemallocを有効にします。 [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([アモスの鳥](https://github.com/amosbird)) +- 追加 `--client-option` へのパラメータ `clickhouse-test` ための追加のパラメータです。 + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([ニコライ + Kochetov](https://github.com/KochetovNicolai)) +- Rpmパッケージのアップグ + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([フィリモノフ](https://github.com/filimonov)) +- PVSによって検出されたエラーを修正。 [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem + Zuikov](https://github.com/4ertus2)) +- ダーウィンのためのビルドを修正. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([イワン](https://github.com/abyss7)) +- glibc2.29互換性。 [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos + 鳥](https://github.com/amosbird)) +- Dh\_cleanがソースファイルに触れないようにします。 + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos + 鳥](https://github.com/amosbird)) +- Altinity rpmから更新するときに競合を避けるようにしてください。 + clickhouse-server-commonにあります。 [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([フィリモノフ](https://github.com/filimonov)) +- 最適なヘッダファイルにより再建. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- 日付と日時のパフォーマンステストを追加する。 [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([バシリー + Nemkov](https://github.com/Enmk)) +- 非決定性の変異を含むいくつかのテストを修正します。 + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander + Kazakov](https://github.com/Akazz)) +- CIにMemorySanitizerでビルドを追加します。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- MetricsTransmitterでは、初期化されていない値の使用は避けてください。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- 固定のいくつかの問題分野によっmemorysanitizer. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander + Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([アモスの鳥](https://github.com/amosbird)) +- Murmurhash32で未定義の動作を修正しました。 [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos + 鳥](https://github.com/amosbird)) +- StoragesInfoStreamの未定義の動作を修正しました。 [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- 外部データベースエンジン(mysql、odbc、jdbc)のために折り畳まれた固定定数式。 前に + バージョンな作業のための複数の定数で表現したで働くすべての日付, + DateTimeおよびUUID。 この修正 [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- No\_users\_thread変数にアクセスする際のライブビューでのThreadSanitizer data raceエラーの修正。 + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- Libcommonでmallocシンボルを取り除く + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos + 鳥](https://github.com/amosbird)) +- 追加グローバル旗enable\_libraries無効化のためのすべての図書館です。 + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### コードの整理 {#code-cleanup} + +- 構成リポジトリを一般化して、辞書のddlを準備します。 [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([alesapin](https://github.com/alesapin)) +- 任意のセマンティックなしの辞書ddlのパーサー。 + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([alesapin](https://github.com/alesapin)) +- ParserCreateQueryをさまざまな小さなパーサーに分割します。 + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([alesapin](https://github.com/alesapin)) +- 小さなリファクタリングと外部辞書の近くに名前を変更します。 + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([alesapin](https://github.com/alesapin)) +- Refactor一部のコードの準備のための役割ベースのアクセス制御です。 [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([ヴィタリ + Baranov](https://github.com/vitlibar)) +- DatabaseOrdinaryコードのいくつかの改善点。 + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([ニキータ + Vasilev](https://github.com/nikvas0)) +- ハッシュテーブルのfind()およびemplace()メソッドではイテレータを使用しないでください。 + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander + Kuzmenkov](https://github.com/akuzm)) +- パラメータのルートが空でない場合にgetmultiplevaluesfromconfigを修正しました。 [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([Mikhail Korotov](https://github.com/millb)) +- いくつかのコピー&ペーストを削除する(temporaryfileとtemporaryfilestream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem + Zuikov](https://github.com/4ertus2)) +- コードの可読性を少し改善 (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir + Chebotarev](https://github.com/excitoon)) +- お待ちすべての予定の仕事をローカルオブジェクトの場合 `ThreadPool::schedule(...)` 投球 + 例外です。 名前変更 `ThreadPool::schedule(...)` に `ThreadPool::scheduleOrThrowOnError(...)` と + 固定のコメントを明らかな場合にスロー. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## ClickHouseリリース19.15 {#clickhouse-release-19-15} + +### ClickHouseリリース19.15.4.10、2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### バグ修正 {#bug-fix-3} + +- SQL\_TINYINTとSQL\_BIGINTの処理を追加し、ODBCブリッジでSQL\_FLOATデータソース型の処理を修正しました。 + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 移動パーティショ + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- ODBCブリッジを介してnull可能な列のNULL値を修正しました。 + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) +- マテリアライズ列を持つ分散非ローカルノードに固定挿入。 + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 固定機能getmultiplevaluesfromconfig。 + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) +- 固定問題のhttp生き生きと保つタイムアウトの代わりにtcpのままにしておくタイムアウト. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) +- すべてのジョブが例外で終了するのを待ちます(まれなsegfaultsを修正します)。 + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- Kafkaテーブルに挿入するときにMVsにプッシュしないでください。 + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([イワン](https://github.com/abyss7)) +- 無効メモリにトラッカーのための例外をスタックです。 + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 外部データベ + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MetricsTransmitterでは、初期化されていない値の使用は避けてください。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- テスト用のマクロを追加しました例の設定 ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### クリックハウスリリース19.15.3.6,2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### バグ修正 {#bug-fix-4} + +- ハッシュ化された辞書のbad\_variantを修正しました。 + ([alesapin](https://github.com/alesapin)) +- 修正のバグと分割詳しくは動画内で、タグは付け部分を返します。 + ([alesapin](https://github.com/alesapin)) +- 固定時間の計算 `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- 執筆が確定した後、kafkaに明示的にコミットします。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([イワン](https://github.com/abyss7)) +- MergeTreeパーツの最小/最大インデックスでNULL値を正しくシリアル化します。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### クリックハウスリリース19.15.2.2,2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### 新しい機能 {#new-feature-3} + +- 段階のストレージへのサポート使用数量のためのテーブルmergetreeエンジンです。 新鮮なデータをssdに保存し、古いデータを自動的にhddに移動することができます。 ([例えば](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) +- テーブル関数の追加 `input` 受信データを読むため `INSERT SELECT` クエリ。 [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([パラソリック1color](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([アントン-ポポフ](https://github.com/CurtizJ)) +- を追加 `sparse_hashed` 辞書のレイアウトは、機能的には `hashed` レイアウトが、より効率的なメモリです。 それはより遅い価値検索の費用でより少ない記憶として二度約使用する。 [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- 実施能力の定義の一覧をユーザーへのアクセスする事ができます。 使用する現在の接続データベースのみ。 [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 追加 `LIMIT` オプションへ `SHOW` クエリ。 [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 追加 `bitmapSubsetLimit(bitmap, range_start, limit)` 最小のサブセットを返す関数 `limit` より小さくないセット内の値 `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) +- 追加 `bitmapMin` と `bitmapMax` 機能。 [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) +- 機能を追加 `repeat` に関連する [問題-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) + +#### 実験的特徴 {#experimental-feature-1} + +- 現在のパイプラインを変更しないマージ結合バリアントをメモリに実装します。 結果はマージキーで部分的にソートされます。 セット `partial_merge_join = 1` この機能を使用するには. マージ結合はまだ開発中です。 [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `S3` エンジンおよびテーブル機能。 まだ開発中です(まだ認証サポートはありません)。 [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### 改善 {#improvement-2} + +- 全てのメッセージから読み込むカフカを挿入し、原子. これは、カフカエンジンのほぼすべての既知の問題を解決します。 [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([イワン](https://github.com/abyss7)) +- 分散クエリのフェイルオーバーの改善。 回復時間を短くして下さい、またそれは今構成され、見ることができます `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) +- 列挙型の数値を直接サポート `IN` セクション。 \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([ディマルブ2000](https://github.com/dimarub2000)) +- サポート(オプション、障害者によるデフォルト)のリダイレクトurlに保管します。 [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- 追加情報をメッセージがクライアントよりも古いバージョンを接続するサーバーです。 [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 分散テーブ [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- Graphiteに累積値を持つプロファイルイベント(カウンター)を送信する機能を追加。 で有効にすることができ下 `` サーバー内 `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- 自動キャストタイプの追加 `T` に `LowCardinality(T)` 型の列にデータを挿入している間 `LowCardinality(T)` HTTP経由でネイティブ形式で。 [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 機能を使用する機能を追加する `hex` 使用せずに `reinterpretAsString` のために `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-2} + +- デバッグ情報を含むclickhouseバイナリにgdb-indexを追加します。 それはスピードアップ起動時間の `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) +- パッチを当てたdpkg-debを使用してdebパッケージをスピードアップ `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) +- セット `enable_fuzzing = 1` すべてのプロジェクトコードのlibfuzzerの計測を有効にするには. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- CIに分割ビルド煙テストを追加します。 [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) +- CIにMemorySanitizerでビルドを追加します。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 置換 `libsparsehash` と `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### バグ修正 {#bug-fix-5} + +- 固定性能の劣化指標分析複雑なテンキーの大きます。 これは#6924を修正します。 [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- カフカ空のトピックから選択する際にsegfaultsを引き起こす論理的なエラーを修正。 [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([イワン](https://github.com/abyss7)) +- あまりにも早いmysql接続を閉じる `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 非常に古いlinuxカーネルのサポートが返される(fix [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- で可能なデータ損失を修正 `insert select` 入力ストリーム内の空のブロックの場合のクエリ。 \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 関数の修正 `АrrayEnumerateUniqRanked` paramsで空の配列を使用する [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- 配列結合とグローバルサブクエリを使用した複雑なクエリの修正 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([イワン](https://github.com/abyss7)) +- 修正 `Unknown identifier` 複数のジョインを持つORDER BYおよびGROUP BYでのエラー [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定 `MSan` 関数の実行中の警告 `LowCardinality` 引数。 [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### 下位互換性のない変更 {#backward-incompatible-change-2} + +- 変更直列化形式のビットマップ\*集計機能状態の性能の向上を図ります。 以前のバージョンのbitmap\*のシリアル化状態は読み取りできません。 [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) + +## ClickHouseリリース19.14 {#clickhouse-release-19-14} + +### ClickHouseリリース19.14.7.15,2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### バグ修正 {#bug-fix-6} + +- このリリースも含む全てのバグ修正から19.11.12.69. +- 19.14以前のバージョン間の分散クエリの互換性を修正しました。 この修正 [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.14.6.12,2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### バグ修正 {#bug-fix-7} + +- 関数の修正 `АrrayEnumerateUniqRanked` paramsに空の配列があります。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- とクエリ内の固定サブクエリ名 `ARRAY JOIN` と `GLOBAL IN subquery` エイリアス付き。 外部テーブル名が指定されている場合は、サブクエリ別名を使用します。 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([イワン](https://github.com/abyss7)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-3} + +- 修正 [フラッピング](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) テスト `00715_fetch_merged_or_mutated_part_zookeeper` それは突然変異が適用されるのを待つ必要があるためです。 [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) +- 機能の固定ubsanおよびmemsanの失敗 `groupUniqArray` emtpy配列の引数を持つ。 それは空の配置によって引き起こされた `PaddedPODArray` へのハッシュテーブルのゼロの細胞でのコンストラクターゼの細胞値ませんでした。 [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([アモスの鳥](https://github.com/amosbird)) + +### ClickHouseリリース19.14.3.3,2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### 新しい機能 {#new-feature-4} + +- `WITH FILL` の修飾子 `ORDER BY`. (の継続 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([アントン-ポポフ](https://github.com/CurtizJ)) +- `WITH TIES` の修飾子 `LIMIT`. (の継続 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([アントン-ポポフ](https://github.com/CurtizJ)) +- パースクォートなし `NULL` リテラルとしてNULL(設定の場合 `format_csv_unquoted_null_literal_as_null=1`). このフィールドのデー `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- テーブル関数のパスのワイルドカードのサポート `file` と `hdfs`. 場合に経路を含むワイルドカード、テーブルが読み取り専用になります。 使用例: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` と `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- 新しい `system.metric_log` の値を格納するテーブル `system.events` と `system.metrics` 指定時間間隔を使って。 [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouseテキストログを書き込むことを許可する `system.text_log` テーブル。 [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ショー民間のシンボルスタックトレース(ターを通じて構文解析シンボルテーブルのelfファイル). 追加情報ファイルと行番号をスタックトレースの場合のデバッグ情報があります。 高速シンボル名のルックアップインデックスの記号が存在します。 イントロスペクション用の新しいsql関数を追加: `demangle` と `addressToLine`. 関数の名前を変更 `symbolizeAddress` に `addressToSymbol` 一貫性のために。 機能 `addressToSymbol` パフォーマンス上の理由から、マングルされた名前を返します。 `demangle`. 追加された設定 `allow_introspection_functions` デフォルトではオフになっています。 [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル機能 `values` (名前は大文字と小文字を区別しません)。 それは読むことを可能にする `VALUES` で提案されたリスト [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). 例えば: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([ディマルブ2000](https://github.com/dimarub2000)) +- ストレージの設定を変更する機能を追加しました。 構文: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) +- 取り外した部品の取り外しをサポート。 構文: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- テーブルの制約。 挿入時にチェックされるテーブル定義に制約を追加することができます。 [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 支援のためのカスケード型を実現します。 [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([アモスの鳥](https://github.com/amosbird)) +- On queryプロファイラのデフォルトでサンプル毎にクエリの実行スレッドだ。 [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 入力形式 `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- 二つの新機能を追加しました: `sigmoid` と `tanh` (これは機械学習アプリケーションに便利です)。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能 `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` 指定されたトークンがhaystackにあるかどうかを確認する。 トークンは、二つの非英数字ASCII文字(またはhaystackの境界)の間の最大長の部分文字列です。 トークンを入力する必要がある定数文字列になります。 支tokenbf\_v1指数の専門性を高めます。 [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) +- 新しい機能 `neighbor(value, offset[, default_value])`. データブロック内の列内の前/次の値に到達することができます。 [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- 関数の作成 `currentUser()`、承認されたユーザのログインを返す。 エイリアスを追加 `user()` MySQLとの互換性のために。 [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) +- 新しい集計関数 `quantilesExactInclusive` と `quantilesExactExclusive` これはで提案されました [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([ディマルブ2000](https://github.com/dimarub2000)) +- 機能 `bitmapRange(bitmap, range_begin, range_end)` これは、指定された範囲の新しいセットを返します( `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) +- 機能 `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` 提供された区域をカバーするgeohash箱の精密長い一連の配列を作成するかどれが。 [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) +- INSERTクエリのサポートを実装する `Kafka` テーブル。 [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([イワン](https://github.com/abyss7)) +- のサポートを追加 `_partition` と `_timestamp` カフカエンジンへの仮想列。 [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([イワン](https://github.com/abyss7)) +- から機密データを削除する可能性 `query_log`、サーバーログ、regexpベースのルールを持つプロセスリスト。 [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([フィリモノフ](https://github.com/filimonov)) + +#### 実験的特徴 {#experimental-feature-2} + +- 入力および出力データ形式 `Template`. これは、入力と出力のカスタム書式文字列を指定することができます。 [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- の実装 `LIVE VIEW` 最初に提案されたテーブル [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898)、準備される [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925)、その後で更新 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). 見る [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) 詳細な説明のため。 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov))ことに注意 `LIVE VIEW` 特集は削除される可能性があり、来できます。 + +#### バグ修正 {#bug-fix-8} + +- このリリースも含む全てのバグ修正から19.13と19.11. +- 固定の区分断層のテーブルはスキップ指標および垂直統合などが挙げられる。 [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) +- 非自明な列のデフォルトで列ごとのttlを修正しました。 以前は、force ttl mergeの場合は次のようになりました `OPTIMIZE ... FINAL` クエリー、終了しました値に置き換えられたタイプのデフォルトの代わりにユーザが指定した列のデフォルトする [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 通常のサーバーの再起動時にkafkaメッセージの重複の問題を修正。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([イワン](https://github.com/abyss7)) +- カフカメッセージを読むときに固定無限ループ。 それ以外の場合は、いくつかのシナリオで無期限に一時停止することがあります。 [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([イワン](https://github.com/abyss7)) +- 修正 `Key expression contains comparison between inconvertible types` での例外 `bitmapContains` 機能。 [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([ディマルブ2000](https://github.com/dimarub2000)) +- 有効にしてsegfaultを修正 `optimize_skip_unused_shards` シャーディングキーがない [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([アントン-ポポフ](https://github.com/CurtizJ)) +- メモリの破損につながる可能性があり、突然変異で間違ったコードを修正. アドレスの読み取りによる固定segfault `0x14c0` それは同時に起こったかもしれない `DROP TABLE` と `SELECT` から `system.parts` または `system.parts_columns`. 突然変異クエリの準備の競合状態を修正しました。 によるデッドロックを修正 `OPTIMIZE` レプリケートされたテーブルと同時変更操作のような変更。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQLインターフェイスで削除された余分な冗長ログ [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- からブール値の設定を解析する機能を返します ‘true’ と ‘false’ 設定ファイルで。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- でクラッシュを修正 `quantile` と `median` 関数オーバー `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- によって返された可能な不完全な結果を修正 `SELECT` クエリと `WHERE` 主キーの条件には、Float型への変換が含まれていました。 それは単調性の誤ったチェックによって引き起こされた `toFloat` 機能。 [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([ディマルブ2000](https://github.com/dimarub2000)) +- チェック `max_expanded_ast_elements` 突然変異のための設定。 後の明確な突然変異 `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([冬張](https://github.com/zhang2014)) +- と共に使用するときにキー列の結合結果を修正 `join_use_nulls`. 列の既定値の代わりにNullをアタッチします。 [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- 垂直マージと変更とスキップインデックスの修正。 修正のための `Bad size of marks file` 例外だ [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) +- でレアクラッシュを修正 `ALTER MODIFY COLUMN` マージ/変更された部分のいずれかが空(0行)のときに垂直マージ) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- の変換のバグを修正しました `LowCardinality` タイプ `AggregateFunctionFactory`. この修正 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 間違った動作と可能なsegfaultsを修正 `topK` と `topKWeighted` 集計関数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 周りの固定安全でないコード `getIdentifier` 機能。 [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 問題を修正しましたmysqlのワイヤーロを使用すると接続するclickhouse形mysqlクライアント). によって引き起こされる `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([ユーリーバラノフ](https://github.com/yurriy)) +- 固定メモリリーク `bitmapSubsetInRange` 機能。 [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) +- 粒度の変更後に突然変異が実行されたときに稀なバグを修正しました。 [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) +- きprotobufメッセージの全ての分野でのデフォルトです。 [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) +- バグを解決するには `nullIf` 私達がaを送る場合の機能 `NULL` 第二引数の引数。 [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 文字列フィールドを持つ複雑なキーキャッシュ辞書で間違ったメモリ割り当て/割り当て解除を使用して、まれなバグを修正しました(メモリリークのよ バグは、文字列サイズが八(8、16、32、等)から始まる二つの累乗だったときに再現します。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- 例外を引き起こした小さな配列に固定ゴリラエンコード `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) +- Nullableではない型をJOINsで使用できるようにする `join_use_nulls` 有効。 [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- 無効にする `Poco::AbstractConfiguration` クエリ内での置換 `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- デッドロックを回避 `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- を使用して `arrayReduce` 定数引数の場合、segfaultにつながる可能性があります。 [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- レプリカが後に復元された場合に表示される不整合な部分を修正 `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 固定こつ `JSONExtractRaw` 機能。 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正のバグと誤ったスキップ指数の直列化を行い、その凝集と適応粒度. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) +- 修正 `WITH ROLLUP` と `WITH CUBE` の修飾子 `GROUP BY` 二レベルの集計。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([アントン-ポポフ](https://github.com/CurtizJ)) +- のを修正した。筆二次指標マーク適応型粒度. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- サーバーの起動中に初期化の順序を修正します。 それ以来 `StorageMergeTree::background_task_handle` で初期化される。 `startup()` その `MergeTreeBlockOutputStream::write()` 初期化の前に使用しようとするかもしれません。 すぐチェックインの場合は初期化されます。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([イワン](https://github.com/abyss7)) +- エラーで完了した前の読み取り操作からデータバッファーをクリアします。 [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([ニコライ](https://github.com/bopohaa)) +- 固定バを適応的粒度を新規作成時のレプリカのための複製\*mergetreeます。 [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- 例外が発生した場合のサーバーの起動時に可能なクラッシュを修正しました `libunwind` 初期化されていないアクセス時の例外時 `ThreadStatus` 構造。 [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- でクラッシュを修正 `yandexConsistentHash` 機能。 ファズテストによって発見。 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定の可能性を掛けのクエリがサーバが過負荷状態で運転され、グローバルスレッドプールが近い。 これは、分散クエリが各シャードに接続ごとにスレッドを割り当てるため、多数のシャード(数百)を持つクラスターで発生する可能性が高くなります。 たとえば、330個のシャードのクラスターが30個の同時分散クエリを処理している場合、この問題は再現されます。 この問題に影響するすべてのバージョンから19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ロジックの `arrayEnumerateUniqRanked` 機能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- シンボルテーブルのデコード時にsegfaultを修正。 [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([アモスの鳥](https://github.com/amosbird)) +- のキャストで修正された無関係な例外 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- で説明の余分な引用を削除しました `system.settings` テーブル。 [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- デッドロックの回避 `TRUNCATE` 複製されたテーブルの。 [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ソートキーの順に読み取りを修正しました。 [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 修正 `ALTER TABLE ... UPDATE` とテーブルのクエリ `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- によって開かバグを修正 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (19.4.0以来)。 列をクエリしない場合は、MergeTreeテーブルを使用して分散テーブルへのクエリを再現します (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- 固定オーバーフローの整数部署名-タイプを符号なしタイプです。 この動作は、cまたはc++言語(整数昇格ルール)とまったく同じで、驚くかもしれません。 大きな符号付き数を大きな符号なし数に分割する場合、またはその逆の場合にはオーバーフローが可能であることに注意してください(ただし、その場合 の問題が全てのサーバーのバージョン [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- スロットリング時の最大スリープ時間を制限する `max_execution_speed` または `max_execution_speed_bytes` 設定されています。 固定偽のようなエラー `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 使用に関する問題を修正 `MATERIALIZED` の列とエイリアス `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([アモスの鳥](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `FormatFactory` プロセッサとして実装されていない入力ストリームの動作。 [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 修正されたタイプミス。 [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- エラーメッセージのタイプミス(is-\>are)。 [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- 固定誤差を解析カラムのリストから文字列の場合タイプが含まれるコンマ(この問題に関連する `File`, `URL`, `HDFS` ストレージ) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([ディマルブ2000](https://github.com/dimarub2000)) + +#### セキュリティ修正 {#security-fix} + +- このリリースも含む全てのバグのセキュリティ修正をか19.13と19.11. +- SQLパーサーのスタックオーバーフローによりサーバーがクラッシュする可能性がある問題を修正 固定の可能性スタックオーバーフローに統合、配布し、テーブルが現実の景色の件本件は、行レベルのセキュリティなサブクエリ. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 改善 {#improvement-3} + +- のための三元論理の正しい実装 `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) +- これで、ttlの有効期限が切れた値と行が削除されます。 `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` クエリ。 追加されたクエリ `SYSTEM STOP/START TTL MERGES` 可に/を割り当てを合併TTLおよびフィルター終了しました値をすべてが合併。 [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([アントン-ポポフ](https://github.com/CurtizJ)) +- クライアントのclickhouse履歴ファイルの場所を変更する可能性 `CLICKHOUSE_HISTORY_FILE` env [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([フィリモノフ](https://github.com/filimonov)) +- 削除 `dry_run` フラグから `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- サポート `ASOF JOIN` と `ON` セクション。 [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- 突然変異および複製のためのskip索引のよりよいサポート。 のサポート `MATERIALIZE/CLEAR INDEX ... IN PARTITION` クエリ。 `UPDATE x = x` 列を使用するすべてのインデックスの再計算 `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) +- 許可する `ATTACH` ライブビュー(たとえば、サーバーの起動時など) `allow_experimental_live_view` 設定。 [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のためのスタックトレースに集まるエリプロファイラに含まれているものも含むスタックフレームが発生するqueryプロファイラです。 [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Nowテーブル関数 `values`, `file`, `url`, `hdfs` ALIAS列をサポートしている。 [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 次の場合に例外をスローする `config.d` ファイルには、対応するルート要素が設定ファイルとして存在しません。 [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([ディマルブ2000](https://github.com/dimarub2000)) +- 例外メッセージの余分な情報を印刷する `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- Aの破片を決定するとき `Distributed` 読み取りクエリによってカバーされるテーブル(for `optimize_skip_unused_shards` =1)ClickHouseは両方から条件をチェックします `prewhere` と `where` select文の句。 [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) +- 有効 `SIMDJSON` AVX2のないしかしSSE4.2およびPCLMULの命令セットの機械のため。 [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ClickHouseでファイルシステムな `O_DIRECT` 追加のチューニングなしでサポート(ZFSとBtrFSなど)。 [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 支援を押しであったが,最終的にサブクエリです。 [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([Tcheason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良い `JOIN ON` キーの抽出 [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 最小の列の選択を最適化する `SELECT count()` クエリ。 [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([アモスの鳥](https://github.com/amosbird)) +- 追加 `strict` パラメータin `windowFunnel()`. とき `strict` は、 `windowFunnel()` 一意の値にのみ条件を適用します。 [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbabcomment](https://github.com/achimbab)) +- より安全なインタフェース `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- オプション行サイズ `--help` オプションに対応した端末のサイズです。 [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([ディマルブ2000](https://github.com/dimarub2000)) +- 無効にする “read in order” キーなしの集約の最適化。 [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([アントン-ポポフ](https://github.com/CurtizJ)) +- のhttpステータスコード `INCORRECT_DATA` と `TYPE_MISMATCH` エラーコードをデフォルトから変更 `500 Internal Server Error` に `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) +- 結合オブジェクトの移動元 `ExpressionAction` に `AnalyzedJoin`. `ExpressionAnalyzer` と `ExpressionAction` 知らない `Join` もはやクラス。 その論理は `AnalyzedJoin` フェイス [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定可能な行き詰まりの分散クエリーの資料はlocalhostでのクエリを送ネットワーク経由で接続します。 [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 複数のテーブルの意味を変更 `RENAME` 可能なデッドロックを避けるため。 [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 書き換えmysqlサーバーの互換性防止への負荷フルパケットペイロードに。 各接続のメモリ消費量の減少 `2 * DBMS_DEFAULT_BUFFER_SIZE` (読み取り/書き込みバッファ)。 [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([ユーリーバラノフ](https://github.com/yurriy)) +- クエリのセマンティクスについて何も知る必要のないast alias interpreting logicをパーサーから外します。 [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- もう少し安全な構文解析 `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-copier`:使用を許可する `where_condition` 設定から `partition_key` エイリアスクエリのためのチェック分配の存在についても同様とすでに使用されただけでデータを読み込むクエリ). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- 追加オプションのメッセージ引数 `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- クライアントでも挿入データの送信中にサーバー例外が発生しました。 [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([ディマルブ2000](https://github.com/dimarub2000)) +- メトリックを追加 `DistributedFilesToInsert` その総数のファイルをファイルシステムを送信リモートサーバーに配布します。 数はすべての破片を合計します。 [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ほとんどの結合を準備ロジックから移動する `ExpressionAction/ExpressionAnalyzer` に `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- TSanを修正 [警告](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) +- Linuxの機能の欠如に関するより良い情報メッセージ。 致命的なエラーのログ記録 “fatal” レベル、それはそれが簡単で見つけることになります `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ディスクへの一時データのダンプを有効にして、使用中のメモリ使用量を制限する場合 `GROUP BY`, `ORDER BY` でかチェックのディスクスペース。 修正は、新しい設定を追加します `min_free_disk_space`、ときに空きディスク領域それ小さいし、しきい値は、クエリが停止し、スローされます `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- スレッドによる再帰的なrwlockの削除。 スレッドはクエリ間で再利用されるため、意味がありません。 `SELECT` クエリがロックを取得するスレッド、ロックから別のスレッドの出口から。 同時に、最初のスレッドは次の方法で再利用できます `DROP` クエリ。 これはfalseにつながります “Attempt to acquire exclusive lock recursively” メッセージ [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 分割 `ExpressionAnalyzer.appendJoin()`. 場所を準備する `ExpressionAnalyzer` のために `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `mysql_native_password` MySQLの互換性サーバーへの認証プラグイン。 [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([ユーリーバラノフ](https://github.com/yurriy)) +- より少ない数の `clock_gettime` のデバッグ/リリース間のABIの互換性を修正しました `Allocator` (取るに足りない問題)。 [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 移動 `collectUsedColumns` から `ExpressionAnalyzer` に `SyntaxAnalyzer`. `SyntaxAnalyzer` 作る `required_source_columns` 今自体。 [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- 設定を追加 `joined_subquery_requires_alias` サブセレクトおよびテーブル関数のエイリアスを要求するには `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- 抽出 `GetAggregatesVisitor` クラスから `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`:データタイプの変更 `type` コラムへの `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 静的リンクの `sha256_password` 認証プラグイン。 [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([ユーリーバラノフ](https://github.com/yurriy)) +- 設定の余分な依存関係を避ける `compile` 働くため。 以前のバージョンでは `cannot open crti.o`, `unable to find library -lc` など。 [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 悪意のあるレプリカから来る可能性のある入力のより多くの検証。 [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- さて `clickhouse-obfuscator` ファイルは `clickhouse-client` パッケージ。 以前のバージョンでは、 `clickhouse obfuscator` (空白を含む)。 [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([ディマルブ2000](https://github.com/dimarub2000)) +- 固定行き詰まりが少なくとも二つのクエリの読み取り少なくとも二つのテーブルに異なる秩序や他のクエリを実行するddl操作の一つです。 固定も非常に珍しいデッドロックします。 [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `os_thread_ids` コラムへの `system.processes` と `system.query_log` のためのデバッグ可能です。 [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 次の場合に発生するphp mysqlnd拡張バグの回避策 `sha256_password` デフォルトの認証プラグインとして使用されます。 [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([ユーリーバラノフ](https://github.com/yurriy)) +- Nullability列が変更された不要な場所を削除します。 [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- 設定のデフォルト値 `queue_max_wait_ms` 現在の値(五秒)は意味をなさないので、ゼロに。 この設定を使用している場合は、まれな状況があります。 追加された設定 `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` と `connection_pool_max_wait_ms` 曖昧さ回避のために。 [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 抽出 `SelectQueryExpressionAnalyzer` から `ExpressionAnalyzer`. 選択されていないクエリの最後のクエリを保持します。 [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- 重複する入力および出力形式を削除しました。 [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- ユーザーの上書きを許可する `poll_interval` と `idle_connection_timeout` 接続時の設定。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `MergeTree` 現在、追加オプション `ttl_only_drop_parts` (デフォルトでは無効)パーツの部分的な枝刈りを避けるため、パーツ内のすべての行が期限切れになったときに完全に削除されます。 [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Catalonia\_comarques.Kgm](https://github.com/svladykin)) +- セットインデックス関数の型チェック。 関数が間違った型を持つ場合は例外をスローします。 これはubsanでファズテストを修正します。 [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### 性能向上 {#performance-improvement-2} + +- クエリを最適化する `ORDER BY expressions` 句、どこ `expressions` プレフィックスとソートキーが一致している `MergeTree` テーブル。 この最適化は `optimize_read_in_order` 設定。 [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 使用に応じることは、複数のスレッドの中で部品の搭載となります。 [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 集計関数の状態を更新するバッチバリアントを実装。 で与えられる実装になっていると性能です。 [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- を使用して `FastOps` 関数のライブラリ `exp`, `log`, `sigmoid`, `tanh`. FastOpsはマイケルParakhin(YandexのCTO)からの高速ベクトル数学ライブラリです。 改善された性能の `exp` と `log` 機能6回以上。 を機能 `exp` と `log` から `Float32` 引数戻ります `Float32` (以前のバージョンでは、常に戻ります `Float64`). さて `exp(nan)` 戻る可能性がある `inf`. 結果の `exp` と `log` 関数は、真の答えに最も近いマシン表現可能な番号ではないかもしれません。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov))働くfastopsを作るダニラKuteninの変形を使用して [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の連続したキーの最適化を無効にする `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([azerbaijan.kgm](https://github.com/akuzm)) +- 改善された性能の `simdjson` の動的割り当てを取り除くことによって `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) +- メモリを割り当てるときの事前フォールトページ `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([azerbaijan.kgm](https://github.com/akuzm)) +- 固定性能のバグを修正 `Decimal` 比較。 [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-4} + +- コンパイラ(ランタイムテンプレートのインスタンス化)を削除します。 [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加の性能試験への性能の低下gcc-9により孤立した。 [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル機能を追加 `numbers_mt` のマルチスレッドバージョンです。 `numbers`. 更新性能試験のハッシュ機能 [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 比較モード `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([ディマルブ2000](https://github.com/dimarub2000)) +- スタックトレース印刷のための最善の努力。 また、追加 `SIGPROF` 実行中のスレッドのスタックトレースを出力するデバッグ信号として。 [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 独自のファイル、パート10のすべての機能。 [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 倍精度の定数を削除 `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([フィリモノフ](https://github.com/filimonov)) +- のための書式設定の変更 `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([azerbaijan.kgm](https://github.com/akuzm)) +- Join作成のためのより良いサブクエリ `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- 冗長な条件(pvs studioによって検出された)を削除します。 [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([azerbaijan.kgm](https://github.com/akuzm)) +- 別々のハッシュテーブルインタフェース `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([azerbaijan.kgm](https://github.com/akuzm)) +- 設定のリファクタリング。 [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) +- コメントの追加 `set` インデックス関数。 [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) +- 増oomスコアデバッグ版プログラムを利用しています。. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([azerbaijan.kgm](https://github.com/akuzm)) +- HDFS HAはデバッグビルドで動作します。 [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) +- にテストを追加しました `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafkaテーブルのマテリアライズドビューのテストを追加します。 [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([イワン](https://github.com/abyss7)) +- よりよい造りの機構を作りなさい。 [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([イワン](https://github.com/abyss7)) +- 固定 `test_external_dictionaries` 非rootユーザーの下で実行された場合の統合。 [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 書き込まれたパケットの合計サイズが `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([ユーリーバラノフ](https://github.com/yurriy)) +- のテストを追加しました `RENAME` テーブルの競合状態 [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 設定のデータ競争をの避けて下さい `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- キャッ [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) +- Mac OSでELFオブジェクトファイルの解析を無効にする。 [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 変更ログジェネレータを改善しようとします。 [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `-Wshadow` GCCに切り替えます。 [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- の廃止されたコードを削除 `mimalloc` ます。 [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `zlib-ng` x86機能を決定し、この情報をグローバル変数に保存します。 これは、異なるスレッドによって同時に行うことができるdefalteInit呼び出しで行われます。 を避けるマルチスレッドに書き込み、図書館で起動します。 [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([azerbaijan.kgm](https://github.com/akuzm)) +- In結合で修正されたバグの回帰テスト [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- 固定msanレポート。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正フラッピングttlテスト。 [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 固定偽データレースで `MergeTreeDataPart::is_frozen` フィールド。 [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ファズテストでタイムアウトを修正しました。 以前のバージョ `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- デバッグチェックを追加 `static_cast` 列の。 [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 公式のrpmパッケージでのoracle linuxのサポート。 [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- からの変更json perftests `once` に `loop` タイプ。 [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` 定義 `main()` したがって、それは `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) +- クラッシュのテスト `FULL|RIGHT JOIN` 右側のテーブルのキーにヌルがあります。 [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- 念のためにエイリアスの拡張の制限のためのテストを追加しました。 [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- から切り替え `boost::filesystem` に `std::filesystem` 適切な場合。 [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加rpmパッケージです。 [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定のテストを追加する `Unknown identifier` での例外 `IN` セクション。 [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- 簡略化 `shared_ptr_helper` 人々はそれを理解困難に直面しているので。 [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定gorillaとdoubledeltaコーデックのパフォーマンステストを追加しました。 [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) +- 統合テストの分割 `test_dictionaries` 4つの別々のテストに。 [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) +- PVS-Studioの警告を修正する `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 使用を許可する `library` ASanの辞書ソース。 [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Prのリストからchangelogを生成するオプションを追加しました。 [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- をロック `TinyLog` 読む場合の貯蔵。 [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([azerbaijan.kgm](https://github.com/akuzm)) +- チェックを破symlinks ci. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の増加のタイムアウト “stack overflow” デバッグビルドでは長い時間がかかるため、テストします。 [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 二重の空白のチェックを追加しました。 [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `new/delete` メモリ追跡ときビルドで消毒。 追跡は明確ではありません。 テストでのメモリ制限の例外を防止するだけです。 [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- リンク中に未定義のシンボルのチェックを有効にします。 [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([イワン](https://github.com/abyss7)) +- 再構築を避ける `hyperscan` 毎日です。 [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ubsanレポートで `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 互換性がないため、クエリプロファイラーをサニタイザーで使用することはできません。 [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加試験のためのリロード辞書の後に失敗するタイマー. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) +- 矛盾を修正する `PipelineExecutor::prepareProcessor` 引数の型。 [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 追加の試験のための悪いuriです。 [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より多くのチェックを追加 `CAST` 機能。 こいつの間にか.ファジィテストです。 [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 追加 `gcc-9` サポートへの `docker/builder` ローカルで画像を構築するコンテナ。 [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) +- 主キーのテスト `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([ディマルブ2000](https://github.com/dimarub2000)) +- 固定試験の影響を受けゆっくりとしたスタックトレースの印刷もできます。 [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クラッシュのテストケースを追加する `groupUniqArray` 固定で [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([azerbaijan.kgm](https://github.com/akuzm)) +- 固定インデックス突然変異テスト。 [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) +- パフォーマンステス [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([azerbaijan.kgm](https://github.com/akuzm)) +- マテリアライズドビューは、疑わしい低基数タイプに関する設定に関係なく、低基数タイプで作成できるようになりました。 [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- 更新されたテスト `send_logs_level` 設定。 [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- Gcc-8.2でビルドを修正しました。 [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- 内部libc++でビルドを修正しました。 [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([イワン](https://github.com/abyss7)) +- 共有ビルドを修正する `rdkafka` ライブラリ [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([イワン](https://github.com/abyss7)) +- Mac OSビルドの修正(不完全)。 [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- 修正 “splitted” ビルド。 [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- その他のビルドの修正: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([アモスの鳥](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([イワン](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### 下位互換性のない変更 {#backward-incompatible-change-3} + +- 削除が使用されることが少なテーブル機能 `catBoostPool` および貯蔵 `CatBoostPool`. このテーブル機能を使用したら、電子メールをに書いて下さい `clickhouse-feedback@yandex-team.com`. CatBoost統合は引き続きサポートされることに注意してください。 [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 無効にする `ANY RIGHT JOIN` と `ANY FULL JOIN` デフォルトでは。 セット `any_join_distinct_right_table_keys` それらを有効にする設定。 [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## クリックハウスリリース19.13 {#clickhouse-release-19-13} + +### クリックハウスリリース19.13.6.51,2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### バグ修正 {#bug-fix-9} + +- このリリースも含む全てのバグ修正から19.11.12.69. + +### ClickHouseリリース19.13.5.44、2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### バグ修正 {#bug-fix-10} + +- このリリースには、19.14.6.12のすべてのバグ修正も含まれています。 +- 実行中のテーブルの一貫性のない状態を修正 `DROP` クエリーのための複製テーブルが飼育係アクセスすることはできません。 [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- StorageMergeでのデータレースの修正 [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ソケットから無限のrecvにつながるクエリプロファイラで導入されたバグを修正。 [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) +- 実行中の過度のcpu使用率を修正 `JSONExtractRaw` ブール値に対する関数です。 [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) +- マテリアライズドビュ [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([イワン](https://github.com/abyss7)) +- テーブル機能 `url` この脆弱性により、攻撃者が要求に任意のHTTPヘッダーを挿入することができました。 この問題は、 [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 役に立たない修正 `AST` セットの索引のチェックイン。 [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) +- の固定解析 `AggregateFunction` クエリに埋め込まれた値。 [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) +- 固定間違った動作の `trim` 機能ファミリ。 [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.13.4.32、2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### バグ修正 {#bug-fix-11} + +- このリリースには、19.11.9.52と19.11.10.54のすべてのバグセキュリティ修正も含まれています。 +- 固定データレースで `system.parts` テーブルと `ALTER` クエリ。 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ストリームのヘッダーの不一致を修正したのは、sampleとprewhereで空の分散テーブルからの読み取りの場合でした。 [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 使用して固定クラッシュ `IN` タプルを含むサブクエリを含む句。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 同じ列名のケースを修正 `GLOBAL JOIN ON` セクション。 [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定したときにクラッシュする場合が鋳造型 `Decimal` それをサポートしていません。 代わりに例外をスロー。 [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- で固定クラッシュ `extractAll()` 機能。 [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- 以下のためのクエリ変換 `MySQL`, `ODBC`, `JDBC` テーブル関数は現在、 `SELECT WHERE` 複数のクエリ `AND` 式。 [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([ディマルブ2000](https://github.com/dimarub2000)) +- MySQL8の統合のための追加された以前の宣言チェック。 [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([ラファエルdavid tinoco](https://github.com/rafaeldtinoco)) + +#### セキュリティ修正 {#security-fix-1} + +- 修二の脆弱性がコーデックに減圧相(悪意のあるユーザーが可能で圧縮データにつながるバッファオーバーフローの減圧). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouseリリース19.13.3.26、2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### バグ修正 {#bug-fix-12} + +- 修正 `ALTER TABLE ... UPDATE` とテーブルのクエリ `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- タプルを持つサブクエリでin句を使用するときにnpeを修正しました。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 固定問題の場合はュレプリカになり、生存していてデータ部分が撤去されることによります。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- CSVを解析する問題を修正しました [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- システム内の固定データレース.パーツテーブルと変更クエリ。 この修正 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- メモリの破損につながる可能性があり、突然変異で間違ったコードを修正. アドレスの読み取りによる固定segfault `0x14c0` それは同時に起こったかもしれない `DROP TABLE` と `SELECT` から `system.parts` または `system.parts_columns`. 突然変異クエリの準備の競合状態を修正しました。 によるデッドロックを修正 `OPTIMIZE` レプリケートされたテーブルと同時変更操作のような変更。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 後に固定可能なデータ損失 `ALTER DELETE` 索引をスキップしてテーブルを照会します。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### セキュリティ修正 {#security-fix-2} + +- 攻撃者は、zookeeperへの書き込みアクセス権を持っており、clickhouseの実行ネットワークから利用できるカスタムサーバーを実行することができる場合,それはclickhouseのレ きものレプリカまでデータを取得すから悪意のあるレプリカで力clickhouse-サーバへの書き込みを任意のパスにファイルシステム. eldar zaitov、yandexの情報セキュリティチームによって発見された。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.13.2.19、2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### 新しい機能 {#new-feature-5} + +- サンプリングプロファイラーに照会です。 [例えば](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- 列のリストを指定できるようにする `COLUMNS('regexp')` より洗練された変種のように動作する表現 `*` アスタリスク [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` 可能になりました [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([ディマルブ2000](https://github.com/dimarub2000)) +- デフォルトでは、確率的勾配降下のためのadamオプティマイザが `stochasticLinearRegression()` と `stochasticLogisticRegression()` 集計機能を示すためのもので、良質なほとんど調整することがあります。 [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([アンディヤング](https://github.com/andyyzh)) +- `RENAME` 問合せで出てきますが、すべての倉庫. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([イワン](https://github.com/abyss7)) +- 現在お客様の受信ログからサーバに要求レベルに設定 `send_logs_level` サーバー設定で指定されたログレベルにかかわらず。 [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) + +#### 下位互換性のない変更 {#backward-incompatible-change-4} + +- を設定 `input_format_defaults_for_omitted_fields` デフォルトでは有効です。 分散テーブルの挿入では、この設定をクラスタで同じにする必要があります(更新をロールする前に設定する必要があります)。 省略されたフィールドの複雑な既定の式の計算を有効にします `JSONEachRow` と `CSV*` フォーマット。 この挙動があるが無視できる性能の差です。 [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([azerbaijan.kgm](https://github.com/akuzm)) + +#### 実験の特徴 {#experimental-features} + +- 新規クエリ処理パイプライン。 使用 `experimental_use_processors=1` それを有効にするオプション。 あなた自身の悩みのための使用。 [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### バグ修正 {#bug-fix-13} + +- Kafkaの統合は、このバージョンで修正されました。 +- 固定 `DoubleDelta` の符号化 `Int64` 大きいのため `DoubleDelta` 値、改善 `DoubleDelta` ランダムデータのエンコード `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) +- の固定過大評価 `max_rows_to_read` 設定の場合 `merge_tree_uniform_read_distribution` は0に設定されます。 [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 改善 {#improvement-4} + +- 次の場合に例外をスローする `config.d` ファイルを持っていないので対応するルート要素としての設定ファイル [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([ディマルブ2000](https://github.com/dimarub2000)) + +#### 性能向上 {#performance-improvement-3} + +- 最適化 `count()`. 今では(可能な場合)最小の列を使用しています。 [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([アモスの鳥](https://github.com/amosbird)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-5} + +- パフォーマンステス [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([azerbaijan.kgm](https://github.com/akuzm)) +- 外部でビルドを修正する `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([イワン](https://github.com/abyss7)) +- 共有ビルドを修正する `rdkafka` ライブラリ [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([イワン](https://github.com/abyss7)) + +## ClickHouseリリース19.11 {#clickhouse-release-19-11} + +### ClickHouseリリース19.11.13.74,2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### バグ修正 {#bug-fix-14} + +- 固定珍しいクラッシュ `ALTER MODIFY COLUMN` そして、マージ/変更された部分のいずれかが空(0行)のときに垂直マージ。 [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) +- の手動update `SIMDJSON`. これにより、偽のjson診断メッセージでstderrファイルが氾濫する可能性が修正されます。 [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) +- とのバグを修正 `mrk` 突然変異のファイル拡張子 ([alesapin](https://github.com/alesapin)) + +### ClickHouseリリース19.11.12.69、2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### バグ修正 {#bug-fix-15} + +- 固定性能の劣化指標分析複雑なテンキーの大きます。 この修正 [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- を避ける希少sigsegvを送信するデータテーブル分散型エンジン (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- 修正 `Unknown identifier` 複数の結合を持つ。 この修正 [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### ClickHouseリリース19.11.11.57,2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- カフカ空のトピックから選択する際にsegfaultsを引き起こす論理的なエラーを修正。 [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([イワン](https://github.com/abyss7)) +- 関数の修正 `АrrayEnumerateUniqRanked` paramsに空の配列があります。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouseリリース19.11.10.54,2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### バグ修正 {#bug-fix-16} + +- Kafkaメッセージのオフセットを手動で保存すると、すべてのパーティションに対して一度にコミットできます。 潜在的な重複を修正 “one consumer - many partitions” シナリオだ [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([イワン](https://github.com/abyss7)) + +### ClickHouseリリース19.11.9.52,2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- キャッ [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) +- 機能のバグを修正 `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- 修正 `JSONExtract` 関数を抽出しながら `Tuple` JSONから。 [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) +- 後に固定可能なデータ損失 `ALTER DELETE` 索引をスキップしてテーブルを照会します。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) +- 固定性能テスト。 [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 寄木細工:ブール値列の読み取りを修正。 [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定間違った動作の `nullIf` 定数引数の関数。 [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([ギヨームタッセリー](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 通常のサーバーの再起動時にkafkaメッセージの重複の問題を修正。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([イワン](https://github.com/abyss7)) +- ときに長い問題を修正しました `ALTER UPDATE` または `ALTER DELETE` 通常のマージが実行されない場合があります。 利用可能な十分な空きスレッドがない場合、突然変異の実行を防ぎます。 [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- 処理によるエラーの修正 “timezone” サーバー構成ファイルで。 [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- カフカのテストを修正。 [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([イワン](https://github.com/abyss7)) + +#### セキュリティ修正 {#security-fix-3} + +- 攻撃者がzookeeperへの書き込みアクセス権を持ち、clickhouseが実行されるネットワークから利用可能なカスタムサーバーを実行できる場合、それはclickhouseレプリカとして きものレプリカまでデータを取得すから悪意のあるレプリカで力clickhouse-サーバへの書き込みを任意のパスにファイルシステム. eldar zaitov、yandexの情報セキュリティチームによって発見された。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.11.8.46,2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### バグ修正 {#bug-fix-17} + +- 修正 `ALTER TABLE ... UPDATE` とテーブルのクエリ `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) +- タプルを持つサブクエリでin句を使用するときにnpeを修正しました。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 固定問題の場合はュレプリカになり、生存していてデータ部分が撤去されることによります。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- CSVを解析する問題を修正しました [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- システム内の固定データレース.パーツテーブルと変更クエリ。 この修正 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- メモリの破損につながる可能性があり、突然変異で間違ったコードを修正. アドレスの読み取りによる固定segfault `0x14c0` それは同時に起こったかもしれない `DROP TABLE` と `SELECT` から `system.parts` または `system.parts_columns`. 突然変異クエリの準備の競合状態を修正しました。 によるデッドロックを修正 `OPTIMIZE` レプリケートされたテーブルと同時変更操作のような変更。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.11.7.40,2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### バグ修正 {#bug-fix-18} + +- Kafkaの統合は、このバージョンで修正されました。 +- 使用しているときにsegfaultを修正 `arrayReduce` 定数の引数の場合。 [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定 `toFloat()` 単調性 [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([ディマルブ2000](https://github.com/dimarub2000)) +- 有効にしてsegfaultを修正 `optimize_skip_unused_shards` シャーディングキーがない [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- 固定ロジックの `arrayEnumerateUniqRanked` 機能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQLハンドラから余分な冗長ロギングを削除しました。 [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 間違った動作と可能なsegfaultsを修正 `topK` と `topKWeighted` 集計関数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- 仮想列を公開しないでください `system.columns` テーブル。 これは、下位互換性のために必要です。 [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 複雑なキーキャッシュ辞書の文字列フィールドのメモリ割り当てのバグを修正。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) +- Bug Fixとを可能に適応粒度の作成時に新たなレプリカのために `Replicated*MergeTree` テーブル。 [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) +- 修正の無限ループ読み込み時にカフカメッセージ [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- 固定の可能性に作製したクエリがサーバのクラッシュによるスタックオーバーフローアプリケーションのパーサの可能性スタックオーバーフロー `Merge` と `Distributed` テーブル [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定gorillaエンコードエラーの小型dnaの塩基配列を決定した。 [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### 改善 {#improvement-5} + +- ユーザーの上書きを許可する `poll_interval` と `idle_connection_timeout` 接続時の設定。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.11.5.28、2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### バグ修正 {#bug-fix-19} + +- 固定の可能性を掛けクエリの場合はサーバが過負荷状態で運転されています。 [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定fpeにyandexconsistenthashます。 この修正 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の変換のバグを修正しました `LowCardinality` タイプ `AggregateFunctionFactory`. この修正 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 修正の解析 `bool` からの設定 `true` と `false` 構成ファイル内の文字列。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) +- クエリの互換性のないストリ `Distributed` テーブルオーバ `MergeTree` テーブルの一部 `WHERE` に移動します `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) +- 固定オーバーフローの整数部署名-タイプを符号なしタイプです。 この修正 [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 下位互換性のない変更 {#backward-incompatible-change-5} + +- `Kafka` まだ壊れてる + +### ClickHouseリリース19.11.4.24,2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### バグ修正 {#bug-fix-20} + +- のを修正した。筆二次指標マーク適応型粒度. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) +- 修正 `WITH ROLLUP` と `WITH CUBE` の修飾子 `GROUP BY` 二レベルの集計。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 固定こつ `JSONExtractRaw` 機能。 固定 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ExternalLoader::reloadOutdated()のセグフォルトを修正しました。 [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) +- 固定の場合はサーバが切れることがあり聞くソケットがセットのリスクマネジメントの継続け残ります。 ツつィツ姪"ツつ"ツ債ツづュツつケツづ債つアツつソツづァ サーバーがエラーを返す場合があります `bad_function_call` 残りのクエリの場合。 [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ODBC、MySQL、ClickHouseとHTTP経由で外部辞書の初期ロードのための更新フィールド上の固定役に立たないと間違った条件。 この修正 [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のキャストで修正された無関係な例外 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 非決定性の結果を修正する “uniq” 極端なまれなケースでの集計関数。 バグはすべてのClickHouseバージョンに存在していました。 [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Segfault私たちは少し高すぎる設定CIDRに機能 `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 固定小さなメモリリークがサーバに捨てる多くの例外から多くの異なるコンテキストを共有します。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定の状態で消費者も一時停止前の契約とな再開します。 [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([イワン](https://github.com/abyss7) 注このカフカを砕このバージョン。 +- エラーで完了した前の読み取り操作からkafkaデータバッファーをクリアします [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([ニコライ](https://github.com/bopohaa) 注このカフカを砕このバージョン。 +- それ以来 `StorageMergeTree::background_task_handle` で初期化される。 `startup()` その `MergeTreeBlockOutputStream::write()` 初期化の前に使用しようとするかもしれません。 すぐチェックインの場合は初期化されます。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([イワン](https://github.com/abyss7)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-6} + +- 追加された公式 `rpm` パッケージ。 [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) +- ビルドする機能を追加する `.rpm` と `.tgz` パッケージと `packager` スクリプト [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) +- 以下のための修正 “Arcadia” ビルドシステム。 [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### 下位互換性のない変更 {#backward-incompatible-change-6} + +- `Kafka` このバージョンでは壊れている。 + +### ClickHouseリリース19.11.3.11,2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### 新しい機能 {#new-feature-6} + +- 準備文のサポートが追加されました。 [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `DoubleDelta` と `Gorilla` 列コーデック [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) +- 追加 `os_thread_priority` を制御することを可能にする設定 “nice” OSが動的スケジューリング優先順位を調整するために使用するクエリ処理スレッドの値。 それは必要です `CAP_SYS_NICE` 動作する機能。 これは [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 実装 `_topic`, `_offset`, `_key` カフカエンジンの列 [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([イワン](https://github.com/abyss7) 注このカフカを砕このバージョン。 +- 集計関数コンビネータを追加 `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- 集計関数 `groupArrayMovingSum(win_size)(x)` と `groupArrayMovingAvg(win_size)(x)`、ウィンドウサイズの制限の有無にかかわらず、移動の合計/平均を計算します。 [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- Synonimを追加 `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- インターゲートh3機能 `geoToH3` ユーバーから. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### バグ修正 {#bug-fix-21} + +- 非同期updateでdnsキャッシュを実装します。 個別のスレッドで解決すべてのホストを更新dnsキャッシュが期間(設定 `dns_cache_update_period`). ホストのipが頻繁に変更されるときに役立ちます。 [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([アントン-ポポフ](https://github.com/CurtizJ)) +- Segfaultを修正する `Delta` 32ビットサイズ未満の値を持つ列に影響を与えるコーデック。 バグはランダムメモリの破損につながった。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- ブロック内の非物理列とttlマージでsegfaultを修正しました。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([アントン-ポポフ](https://github.com/CurtizJ)) +- との部分のチェックでまれなバグを修正 `LowCardinality` コラム 以前は `checkDataPart` 常に `LowCardinality` コラム [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- 回避掛けに接続した場合、サーバスレッドプールを行います。 それはからの接続のために重要です `remote` 長い接続タイムアウトがある場合、テーブル関数またはレプリカなしのシャードへの接続。 この修正 [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 定数引数のサポート `evalMLModel` 機能。 この修正 [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定に問題がclickhouse判断したデフォルトのタイムゾーンとして `UCT` 代わりに `UTC`. この修正 [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定バッファアンダーフローで `visitParamExtractRaw`. この修正 [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 今すぐ配布 `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` クエリはリーダーレプリカで直接実行されます。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- 修正 `coalesce` のために `ColumnConst` と `ColumnNullable` +関連する変更。 [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正する `ReadBufferFromKafkaConsumer` なで読む新しいメッセージ `commit()` たとえそれが以前に失速したとしても [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([イワン](https://github.com/abyss7)) +- 修正 `FULL` と `RIGHT` 結合時の結合結果 `Nullable` 右のテーブルのキー。 [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- 優先度の低いクエリの無限スリープの可能性のある修正。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- これにより、いくつかのクエリがquery\_logの後に表示されないことがあります `SYSTEM FLUSH LOGS` クエリ。 [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 固定 `heap-use-after-free` 阿讃注意ClusterCopierによる腕時計を利用するようにしてすでに削除され複写機のオブジェクトです。 [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定間違った `StringRef` の実装によって返されるポインタ `IColumn::deserializeAndInsertFromArena`. このバグは単体テストのみに影響しました。 [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 同じ名前の列をマスクするソースおよび中間配列の結合列を防ぎます。 [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- MySQLスタイル識別子の引用とMySQLエンジンへの挿入と選択クエリを修正しました。 [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([冬張](https://github.com/zhang2014)) +- さて `CHECK TABLE` queryは、MergeTreeエンジンファミリで動作します。 各パート(またはsimplierエンジンの場合はファイル)のチェックステータスとメッセージが返されます。 また、壊れた部分のフェッチのバグを修正しました。 [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) +- SPLIT\_SHARED\_LIBRARIESランタイムを修正 [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- 固定タイムゾーンの初期化 `/etc/localtime` シンボリックリン `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- clickhouse-コピー機:シャットダウン時に使用-after freeを修正 [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- 更新 `simdjson`. ゼロバイトを持ついくつかの無効なJSONsが正常に解析する問題を修正しました。 [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- SystemLogsのシャットダウンを修正 [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([アントン-ポポフ](https://github.com/CurtizJ)) +- Invalidate\_queryの条件が辞書に依存しているときにハングする問題を修正しました。 [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### 改善 {#improvement-6} + +- クラスター構成で解決できないアドレスを許可します。 彼らは利用できないとみなされ、すべての接続試行で解決しようとします。 これはkubernetesに特に便利です。 この修正 [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- アイドル状態のtcp接続を閉じる(デフォルトでは一時間待ち)。 これは、すべてのサーバーが他のすべてのサーバーへの接続プールを保持する可能性があり、ピーククエリの同時実行の後に接続が停止するためです。 この修正 [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良い品質の `topK` 機能。 新しい要素の重みが大きい場合、最後の要素を削除するようにSavingSpace設定の動作を変更しました。 [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- URLの機能と作業領域は今では不完全なUrlなスキーム [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) +- に追加されたチェックサム `system.parts_columns` テーブル。 [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 追加 `Enum` synonimとしてのデータ型 `Enum8` または `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([ディマルブ2000](https://github.com/dimarub2000)) +- フルビット転置変異体のための `T64` コーデック。 より良い圧縮につながる可能性がある `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- 条件に `startsWith` 機能は主キーを使用することができます。 この修正 [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) と [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([ディマルブ2000](https://github.com/dimarub2000)) +- 使用を許可する `clickhouse-copier` クロス-複製クラスタトポロジーを許可する空のデータベースの名前です。 [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) +- 使用 `UTC` システム上のデフォルトのタイムゾーンとして `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` 印刷され、サーバまたはクライアン [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 関数の浮動小数点引数のサポートを返しました `quantileTiming` 下位互換性のため。 [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- エラーメッセージの列がないテーブルを表示します。 [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([イワン](https://github.com/abyss7)) +- さまざまなユーザーが同じquery\_idで実行クエリを許可しない [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- より強固なコードの送信メトリクスをグラファイトを表してい それは長い倍数の間に働きます `RENAME TABLE` オペレーション [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- よりエラーメッセージが表示されますのでthreadpoolできない予定、タスクを実行します。 この修正 [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- NgramSearchを反転させると、より直感的になります [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- HDFS engine builderでのユーザー解析の追加 [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- 更新のデフォルト値 `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- 廃止された設定の概念を追加しました。 廃止された設定 `allow_experimental_low_cardinality_type` 効果なしで使用することができます。 [0f15c01c6802f7ce1a1494c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### 性能向上 {#performance-improvement-4} + +- 増加数の河川から選択するとmergeテーブルにより均一に分布す。 追加された設定 `max_streams_multiplier_for_merge_tables`. この修正 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-7} + +- Clickhouseの異なるバージョンとクライアン [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) +- テスト対象の情報を毎にコミットを引きます。 [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) +- カスタムアロケータをサポートするaddress sanitizerと連携 (`Arena` と `ArenaWithFreeLists`)より良いデバッグのための “use-after-free” エラー。 [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([azerbaijan.kgm](https://github.com/akuzm)) +- に切り替える [LLVM libunwindの実装](https://github.com/llvm-mirror/libunwind) C++例外処理およびスタックトレース印刷用 [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) +- さらに二つの警告を追加-weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- メモリ消毒剤とclickhouseを構築することができます。 [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ubsanレポートについて `bitTest` ファズテストの機能。 [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Docker:認証を必要とするClickHouseインスタンスを初期化する可能性を追加しました。 [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) +- バージョン1.1.0にlibrdkafkaを更新 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([イワン](https://github.com/abyss7)) +- 追加グローバルタイムアウトのための統合の試験を無効にし試験ます。 [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) +- いくつかのthreadsanitizerの障害を修正します。 [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([azerbaijan.kgm](https://github.com/akuzm)) +- その `--no-undefined` オプション力、リンカーをチェックすべての外部の名の存在をリンク 分割ビルドモードでライブラリ間の実際の依存関係を追跡することは非常に便利です。 [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([イワン](https://github.com/abyss7)) +- のための追加された性能試験 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gcc-7との互換性を修正しました。 [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Gcc-9のサポートが追加されました。 この修正 [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Libunwindが正しくリンクできない場合のエラーを修正。 [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- PVS-Studioによって検出されたいくつかの警告を修正しました。 [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加された初期サポート `clang-tidy` 静的な検光子。 [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- BSD/Linuxエンディアンマクロを変換する( ‘be64toh’ と ‘htobe64’)Mac OS Xに相当するもの [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([フーチェン](https://github.com/fredchenbj)) +- 統合テストガイドの改善。 [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Macosx+gcc9でのビルドの修正 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([フィリモノフ](https://github.com/filimonov)) +- 難しいタイプミスを修正:aggreagte-\>aggregate。 [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([azerbaijan.kgm](https://github.com/akuzm)) +- Freebsdビルドの修正 [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- 追加リンク実験youtubeチャンネルサイト [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) +- CMake:カバレッジフラグのオプションを追加:WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- いくつかのインラインpodarrayの初期サイズを修正。 [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([azerbaijan.kgm](https://github.com/akuzm)) +- clickhouse-サーバー.postinst:centos6のos検出を修正 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- 追加されたアーチlinuxパッケージ生成。 [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 共通/設定を分割します。h by libs(dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- 以下のための修正 “Arcadia” ムの構築 [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- 型にはまらないビルドの修正(gcc9、サブモジュールなし) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- バグが発生しやすいことが証明されているため、unalignedstoreで明示的な型を必要とします [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([azerbaijan.kgm](https://github.com/akuzm)) +- Macosのビルドを修正 [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([フィリモノフ](https://github.com/filimonov)) +- ここで要求されたより大きなデータセットを持つ新しいjit機能に関する性能試験 [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 走行状態での試験はストレステスト [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) + +#### 下位互換性のない変更 {#backward-incompatible-change-7} + +- `Kafka` このバージョンでは壊れている。 +- 有効 `adaptive_index_granularity` =新しいのためのデフォルトで10MB `MergeTree` テーブル。 バージョン19.11以降で新しいMergeTreeテーブルを作成した場合、19.6より前のバージョンへのダウングレードは不可能になります。 [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) +- Yandexのによって使用された廃止された文書化されていない埋め込まれた辞書を削除。メトリカ を機能 `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` もはや利用できません。 これらの機能を使用している場合は、電子メールをclickhouse-feedback@yandex-team.com。注:最後の瞬間に我々はしばらくの間、これらの機能を維持することを決めました。 [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## ClickHouseリリース19.10 {#clickhouse-release-19-10} + +### クリックハウスリリース19.10.1.5,2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### 新しい機能 {#new-feature-7} + +- 新しい列コーデックを追加: `T64`. (U)IntX/EnumX/Data(Time)/DecimalX列用に作成されます。 定数値または小さい範囲値を持つ列に適しているはずです。 コーデック自体は、拡大または再圧縮せずにデータ型を縮小できます。 [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- データベースエンジ `MySQL` できる全てのテーブルをリモートMySQLサーバー [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([冬張](https://github.com/zhang2014)) +- `bitmapContains` 実装。 それは2xより速いです `bitmapHasAny` 第二のビットマップが一つの要素を含む場合。 [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) +- のサポート `crc32` 関数(MySQLやPHPとまったく同じ動作)。 ハッシュ関数が必要な場合は使用しないでください。 [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- 実装 `SYSTEM START/STOP DISTRIBUTED SENDS` 非同期挿入を制御するクエリ `Distributed` テーブル。 [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([冬張](https://github.com/zhang2014)) + +#### バグ修正 {#bug-fix-22} + +- マージ制限のクエリ実行制限および最大パーツサイズを無視する。 [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 通常のブロックの重複排除(非常にまれ)と重複ブロックの挿入(より頻繁に)につながる可能性のあるバグを修正しました。 [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) +- 機能の修正 `arrayEnumerateUniqRanked` 空の配列を持つ引数の場合 [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- な購読カフカ題なく意思をポーリングメッセージ. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([イワン](https://github.com/abyss7)) +- 設定を行う `join_use_nulls` Nullable内にできない型に対しては何の効果も得られません [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 `Incorrect size of index granularity` エラー [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([コラクススター](https://github.com/coraxster)) +- 小数変換オーバーフローを修正 [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([コラクススター](https://github.com/coraxster)) +- フラッシュバッファの場合 `WriteBufferFromHDFS`のデストラクタが呼び出されます。 これにより、 `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) + +#### 改善 {#improvement-7} + +- 空のセルを `CSV` としてデフォルト値を設定 `input_format_defaults_for_omitted_fields` は有効です。 [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([azerbaijan.kgm](https://github.com/akuzm)) +- 外部辞書の非ブロッキングロード。 [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) +- ネットワークタイムアウトできるダイナミックな変化のための既存の接続に従って設定します。 [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- を使用して “public\_suffix\_list” 機能のため `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. これは、 `gperf` ファイルから生成されたリスト:https://publicsuffix.org/list/public\_suffix\_list.dat(例えば、今我々はドメインを認識する `ac.uk` 有意ではない)。 [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 採用 `IPv6` システムテーブルのデータ型。 `system.processes` と `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MySQL互換性プロトコルとの接続にセッションを使用する。 \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([ユーリーバラノフ](https://github.com/yurriy)) +- サポートより `ALTER` クエリ `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- サポート `` のセクション `clickhouse-local` 設定ファイル。 [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- クエリの実行を許可する `remote` テーブル機能 `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### 性能向上 {#performance-improvement-5} + +- MergeTree列の最後に最後のマークを書き込む可能性を追加します。 これにより、テーブルデータ範囲外のキーの無駄な読み込みを回避できます。 適応インデックスの粒度が使用されている場合にのみ有効になります。 [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) +- 非常に遅いファイルシステム上のmergetreeテーブルのパフォーマンスの向上 `stat` シスコール [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定性能の劣化からの読み出しmergetreeテーブルで導入されたバージョン19.6. 修正\#5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-8} + +- 実装 `TestKeeper` テストに使用されるZooKeeperインタフェースの実装として [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- これからは `.sql` 試験走行ができるによって切り離されたサーバを並列には、ランダムなデータベースです。 それらをより速く実行し、カスタムサーバー構成で新しいテストを追加し、異なるテストが互いに影響しないことを確認します。 [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([イワン](https://github.com/abyss7)) +- 削除 `` と `` 性能テストから [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 “select\_format” 性能試験のための `Pretty` 形式 [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## クリックハウスリリース19.9 {#clickhouse-release-19-9} + +### クリックハウスリリース19.9.3.31,2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### バグ修正 {#bug-fix-23} + +- 32ビットサイズ未満の値を持つ列に影響を与えるデルタコーデックでsegfaultを修正しました。 バグはランダムメモリの破損につながった。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) +- LowCardinalityのコラムと部分の点検でまれな虫を修理して下さい。 [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) +- ブロック内の非物理列とttlマージでsegfaultを修正しました。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 低優先度のクエリの潜在的な無限の睡眠を修正しました。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定方法clickhouse判断したデフォルトのタイムゾーンとしてuctの代わりにutcです。 [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のを修正した。約の実行の分散drop/alter/quick/最適化クラスターに関するお問い合わせフォロワレプリカの前にリーダーレプリカ. 今、彼らはリーダーのレプリカに直接実行されます。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) +- 固定レースの条件を回避することが可能となり、一部のクエリーのような画面が、表示されないでquery\_logぐにシステムのフラッシュログを返します。 [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 定数引数のサポートがないことを追加 `evalMLModel` 機能。 [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### クリックハウスリリース19.9.2.4,2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### 新しい機能 {#new-feature-8} + +- 凍結する部品についての印刷物情報 `system.parts` テーブル。 [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- 引数に設定されていない場合は、ttyでclickhouse-client startにクライアントパスワードを尋ねる [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- 実装 `dictGet` と `dictGetOrDefault` 小数タイプの関数。 [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改善 {#improvement-8} + +- Debian init:サービス停止タイムアウトの追加 [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- 疑わしいタイプのテーブルを作成するには、デフォルトで禁止されている設定を追加します [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- 回帰機能を返却時の重量モデルとして用いられていない状態で機能 `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- 回帰方法の名前を変更して改善します。 [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- 明のインタフェースを文字列が揃. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### バグ修正 {#bug-fix-24} + +- カフカの潜在的なデータ損失を修正 [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([イワン](https://github.com/abyss7)) +- 潜在的な無限ループを修正 `PrettySpace` ゼロ列で呼び出されたときの形式 [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- 線形モデルのuint32オーバーフローバグを修正。 非constモデル引数のeval mlモデルを許可します。 [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` なる例外を提供される場合指数が存在しない [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- セグメントフォールトを修正 `bitmapHasAny` スカラーサブクエリ [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- 固定の場合にはエラー複製を接続プールなリトライを解決するホストでも、dnsキャッシュした。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- 固定 `ALTER ... MODIFY TTL` レプリケートされたマーゲットリーで [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([アントン-ポポフ](https://github.com/CurtizJ)) +- マテリアライズ列を使用して分散テーブルに挿入を修正 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- Truncate Joinストレージの割り当ての問題を修正 [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([Tcheason](https://github.com/TCeason)) +- に最近のバージョンのパッケージtzdata一部のファイルsymlinksます。 現在の仕組みを検出するデフォルトのタイムゾーンの故障が考えられ、間違った名前に一部のタイムゾーン. 少なくとも、提供されていれば、タイムゾーン名をtzの内容に強制します。 [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([イワン](https://github.com/abyss7)) +- 一定の針が合計で少なくとも16kbの長さである場合、multivolnitsky searcherでいくつかの非常にまれなケースを修正します。 このアルゴリズムは、以前の結果を見逃したり上書きしたりして、誤った結果につながります `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- ExternalData要求の設定でClickHouseの設定を使用できない場合の問題を修正しました。 また、今のところ、設定 `date_time_input_format` と `low_cardinality_allow_in_native_format` 名前のあいまいさのために使用することはできません(外部データではテーブル形式と解釈でき、クエリでは設定にすることができます)。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- 部品は飼育係からそれらを落とすことなく、fsからのみ削除されたバグを修正。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- 削除デバッグログインからmysqlプロトコル [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- DDLクエリ処理中にZNONODEをスキップ [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- 修正ミックス `UNION ALL` 結果列の種類。 結果の列のデータ型および列の型が一致しない場合がありました。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- 間違った整数の例外をスローする `dictGetT` 機能の代わりにクラッシュ。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- ハッシュ化された辞書のための間違ったelement\_countとload\_factorを修正 `system.dictionaries` テーブル。 [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-9} + +- 固定ビルドなし `Brotli` HTTP圧縮のサポート (`ENABLE_BROTLI=OFF` cmake変数)。 [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- Roaringを含める。轟音/轟音としてh。h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) +- ハイパースキャンでgcc9の警告を修正(\#lineディレクティブは悪です!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- Gcc-9でコンパイルする際のすべての警告を修正。 いくつかのcontribの問題を修正しました。 Gcc9氷を修正しbugzillaに提出. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- Lldとのリンクを修正 [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 辞書で使用されていない特殊化を削除する [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- 向上性能試験のためのフォーマットと構文解析表の異なる種類のファイル [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- 並列テスト実行のための修正 [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker:clickhouse-testからconfigsを使う [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- FreeBSD用のコンパイルを修正 [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- Boostを1.70にアップグレード [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- サブモジュールとしてビルドclickhouseを修正 [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- JSONExtractパフォーマンステストの改善 [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) + +## ClickHouseリリース19.8 {#clickhouse-release-19-8} + +### ClickHouseリリース19.8.3.8、2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### 新しい機能 {#new-features} + +- JSONで動作する機能を追加しました [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) +- 多くの言語に存在するbasename関数と同様の動作を持つ関数basenameを追加します (`os.path.basename` pythonでは, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 追加 `LIMIT n, m BY` または `LIMIT m OFFSET n BY` LIMIT BY句にnのオフセットを設定する構文。 [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 追加された新しいデータ型 `SimpleAggregateFunction` を持つことができます。 `AggregatingMergeTree`. これは、次のような単純な関数でのみ使用できます `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- 関数の非定数引数のサポートが追加されました `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- 機能追加 `skewPop`, `skewSamp`, `kurtPop` と `kurtSamp` シーケンスの歪度、標本の歪度、尖度、標本の尖度をそれぞれ計算します。 [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- 支援の名前変更操作のための `MaterializeView` ストレージ。 [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 追加のサーバで接続するclickhouse mysqlを使用してクライアント [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([ユーリーバラノフ](https://github.com/yurriy)) +- 追加 `toDecimal*OrZero` と `toDecimal*OrNull` 機能。 [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- 機能のサポート十進のタイプ: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`、medianExactWeighted。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- 追加 `format` 機能。 引数にリストされた文字列を含む定数パターン(簡略化されたPython形式のパターン)の書式設定。 [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- 追加 `system.detached_parts` テーブルの情報を含む外部 `MergeTree` テーブル。 [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([azerbaijan.kgm](https://github.com/akuzm)) +- 追加 `ngramSearch` 針と干し草の山の間の非対称差を計算する関数。 [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- 集約関数インタフェースを使用して、基本的な機械学習方法(確率線形回帰とロジスティック回帰)の実装。 モデルの重みを更新するための戦略が異なります(単純勾配降下、運動量法、nesterov法)。 また注文のサイズのミニバッチを支える。 [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- の実装 `geohashEncode` と `geohashDecode` 機能。 [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) +- 集計関数の追加 `timeSeriesGroupSum` る累積の異なる時系列のサンプルのタイムスタンプなアライメントを実施します。 これは、二つのサンプルタイムスタンプ間の線形補間を使用して、一緒に時系列を合計します。 集計関数の追加 `timeSeriesGroupRateSum` これは、時系列のレートを計算し、その後一緒にレートを合計します。 [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([楊関劉](https://github.com/LiuYangkuan)) +- 機能追加 `IPv4CIDRtoIPv4Range` と `IPv6CIDRtoIPv6Range` CIDRを使用してサブネット内のIPの下限と上限を計算する。 [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 有効な設定でhttpを使用してクエリを送信するときにx-clickhouse-summaryヘッダーを追加します `send_progress_in_http_headers`. X-ClickHouse-Progressの通常の情報を返し、クエリに挿入された行数やバイト数などの追加情報を返します。 [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([ギヨームタッセリー](https://github.com/YiuRULE)) + +#### 改善 {#improvements} + +- 追加 `max_parts_in_total` パーティションキー\#5166の安全でない指定を防ぐテーブルのMergeTreeファミリーの設定(デフォルト:100 000)。 [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`:derive種のため、個々のカラムを組み合わせ種の利用が重要であると考えられとカラム名、カラム位置にします。 ことを目的として変換するデータセットに複数の関連するテーブル、テーブルはJOINableに設定します。 [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能追加 `JSONExtractRaw`, `JSONExtractKeyAndValues`. 関数の名前を変更 `jsonExtract` に `JSONExtract`. 何かがうまくいかない場合、これらの関数は対応する値を返します。 `NULL`. 変更された機能 `JSONExtract` 今度は、最後のパラメータから戻り値の型を取得し、nullableを注入しません。 AVX2命令が利用できない場合にRapidJSONにフォールバックを実装しました。 新しいバージョンに更新Simdjsonライブラリ。 [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) +- さて `if` と `multiIf` 機能は条件に頼りません `Nullable` しかし、sqlの互換性のためにブランチに依存しています。 [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([建呉](https://github.com/janplus)) +- `In` 述語が生成されます `Null` 結果から `Null` のような入力 `Equal` 機能。 [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([建呉](https://github.com/janplus)) +- Kafkaからの行数ごとの時間制限(flush\_interval/poll\_timeout)をチェックします。 この読みからのカフカの消費者をより頻繁にチェックの時間制限のトップレベルの流れ [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([イワン](https://github.com/abyss7)) +- バンドルsaslとリンクrdkafka. sasl scram認証を使用できるようにする必要があります [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([イワン](https://github.com/abyss7)) +- すべてのジョインのrowreflistのバッチバージョン。 [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse-サーバ:より有益なエラーメッセージを聞きます. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- Clickhouseのサポート辞書-機能のための複写機 `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- 新しい設定を追加 `kafka_commit_every_batch` カフカの政策を規制する。 + ることができる設定のコミットモード:バッチのメッセージの取り扱い、後のブロック全体に書きます。 このトレードオフの関係を失うメッセージやみ表示することを目的としていま倍もいる。 [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([イワン](https://github.com/abyss7)) +- 作る `windowFunnel` 他の符号なし整数型をサポート。 [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- 仮想列をシャドウする `_table` マージエンジンで。 [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([イワン](https://github.com/abyss7)) +- 作る `sequenceMatch` 集計関数は、他の符号なし整数型をサポート [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- より良いエラーメッセージの場合はチェックサムミスマッチは一によるものと考えられるハードウェアです。 [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- チェックすると配下のテーブル支援のためのサンプリング `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([イワン](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- MySQLワイヤプロトコルの改善。 フォーマットの名前をMySQLWireに変更しました。 RAIIをRSA\_freeを呼び出すために使用します。 コンテキス [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([ユーリーバラノフ](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- 分散テーブルへの非同期挿入のクエリ設定を考慮します。 [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([Tcheason](https://github.com/TCeason)) +- 関数の名前を変更 `leastSqr` に `simpleLinearRegression`, `LinearRegression` に `linearRegression`, `LogisticRegression` に `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### 性能の改善 {#performance-improvements} + +- ALTER MODIFYクエリの非レプリケートされたMergeTreeテーブルの一部の並列処理。 [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) +- 正規表現の抽出における最適化。 [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- 結果を結合するために右結合キー列を追加しないでください。 [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- 最初の空の応答の後にkafkaバッファをフリーズします。 それは多数のinvokationsをの避けます `ReadBuffer::next()` いくつかの行解析ストリームの空の結果。 [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([イワン](https://github.com/abyss7)) +- `concat` 複数の引数の関数の最適化。 [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- より速い解凍を持つように、lz4実装を参照してアップグレードします。 [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- MSD基数ソート(kxsortに基づく)、および部分ソートを実装しました。 [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### バグ修正 {#bug-fixes} + +- プッシュが結合で列を必要と修正 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([冬張](https://github.com/zhang2014)) +- バグを修正,clickhouseはsystemdによって実行されると,コマンド `sudo service clickhouse-server forcerestart` 期待通りに動作しませんでした。 [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- (9009ポート上のサーバー間のhttpサーバーは常に偶数エラーで、コード200を返しました)datapartsexchangeのhttpエラーコードを修正します。 [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- MAX\_SMALL\_STRING\_SIZEより長い文字列のSimpleAggregateFunctionを修正しました [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- のためのエラーを修正 `Decimal` に `Nullable(Decimal)` 変換で。 支援その他数を小数点の変換を含む異なる)を採用。 [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- 間違った計算につながるsimdjsonライブラリ内の固定fpu clobbering `uniqHLL` と `uniqCombined` 次のような集計関数と数学関数 `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- JSON関数でのconst/nonconstの混合ケースの処理を修正しました。 [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) +- 修正 `retention` 機能。 これで、データの行を満たすすべての条件がデータの状態に追加されます。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- 結果のタイプを修正する `quantileExact` 小数で。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 文書 {#documentation} + +- 以下のための文書を翻訳 `CollapsingMergeTree` 中国語に。 [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- テーブルエンジンに関する文書を中国語に翻訳します。 + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([決してリー](https://github.com/neverlee)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements} + +- ツづツつ、ツつシツつイツ猟.用ツつュツつセツつウツつ"ツ。[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([イワン](https://github.com/abyss7)) +- 移動性能試験の個別のディレクトリが便利です。 [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- パフォーマンステストの修正 [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) +- 追加ツールをチェックサムを計算によるビット切り替えデバッグハードウェアます。 [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- くランナーのスクリプトをより使用できます。 [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([フィリモノフ](https://github.com/filimonov)) +- パフォーマンステス [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) +- パフォーマンステストでのクエリの作成、入力、および削除での置換を行う機能を追加 [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## クリックハウスリリース19.7 {#clickhouse-release-19-7} + +### ClickHouseリリース19.7.5.29,2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### バグ修正 {#bug-fix-25} + +- 固定能の回帰一部のクエリを処理するクラウドの場合。 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([冬張](https://github.com/zhang2014)) + +### クリックハウスリリース19.7.5.27,2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### 新しい機能 {#new-features-1} + +- ビットマップ関連の機能を追加 `bitmapHasAny` と `bitmapHasAll` に類似した `hasAny` と `hasAll` 配列の関数。 [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Catalonia\_comarques.Kgm](https://github.com/svladykin)) + +#### バグ修正 {#bug-fixes-1} + +- Segfaultをオンにする `minmax` Null値を持つインデックス。 [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) +- LIMITのすべての入力列を必要な出力としてマークします。 それは修正します ‘Not found column’ いくつかの分散クエリのエラー。 [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S.Pan](https://github.com/kvap)) +- 修正 “Column ‘0’ already exists” エラーで `SELECT .. PREWHERE` デフォルトの列 [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- 修正 `ALTER MODIFY TTL` 上のクエリ `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([アントン-ポポフ](https://github.com/CurtizJ)) +- Kafkaの消費者が起動に失敗したときにサーバをクラッシュさせないでください。 [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([イワン](https://github.com/abyss7)) +- 固定ビットマップ機能を誤った結果です。 [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([アンディヤング](https://github.com/andyyzh)) +- ハッシュ化された辞書のための修正element\_count(重複を含めないでください) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- タイムゾーンの名前として環境変数tzの内容を使用します。 すでに正しく検出デフォルトのタイムゾーンもあります。[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([イワン](https://github.com/abyss7)) +- 整数を変換しようとしないでください `dictGetT` それが正しく動作しないため、機能。 代わりに例外をスローします。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- ExternalData HTTP要求の設定を修正しました。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([ダニラ + クテニン](https://github.com/danlark1)) +- 部品は飼育係からそれらを落とすことなく、fsからのみ削除されたバグを修正。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) +- セグメンテーショ `bitmapHasAny` 機能。 [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) +- 固定の場合にはエラー複製を接続プールなリトライを解決するホストでも、dnsキャッシュした。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) +- 固定 `DROP INDEX IF EXISTS` クエリ。 さて `ALTER TABLE ... DROP INDEX IF EXISTS ...` 指定されたインデックスが存在しない場合、queryは例外を発生させません。 [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) +- Union all supertype列を修正しました。 結果の列のデータ型および列の型が一致しない場合がありました。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- DDLクエリの処理中にZNONODEをスキップします。 別のノードがタスクキューのznodeを削除する前に、 + それを処理しませんでしたが、すでに子のリストを取得していますが、DDLWorkerスレッドを終了します。 [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- マテリアライズドカラムを使用して分散()テーブルに挿入を修正。 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### ClickHouseリリース19.7.3.9,2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### 新しい機能 {#new-features-2} + +- ユーザーが指定できる設定の範囲を制限することができます。 + これらの制約は、ユーザー設定プロファイルで設定できます。 + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([ヴィタリ + Baranov](https://github.com/vitlibar)) +- 関数の第二のバージョンを追加します `groupUniqArray` 任意を使って + `max_size` 結果の配列のサイズを制限するパラメーター。 この + 動作は次のようになります `groupArray(max_size)(x)` 機能。 + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume + Tassery](https://github.com/YiuRULE)) +- TSVWithNames/CSVWithNames入力ファイル形式の場合、列の順序は次のようになります + ファイルヘッダーから決定。 これは、 + `input_format_with_names_use_header` パラメータ。 + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([Alexander](https://github.com/Akazz)) + +#### バグ修正 {#bug-fixes-2} + +- マージ中にuncompressed\_cache+joinでクラッシュ(\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([ダニラ + クテニン](https://github.com/danlark1)) +- Segmentation faultにclickhouse-クライアントがクエリーのシステムです。 \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([イワン](https://github.com/abyss7)) +- KafkaEngine経由で重い負荷のデータ損失(#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([イワン](https://github.com/abyss7)) +- 固定非常に珍しいデータ競合状態が起こるのを実行する際にクエリとeuのすべての関少なくとも二つから選択します。列、システム。テーブル、システム。部品、システム。マージファミリのparts\_tablesまたはテーブルと、関連するテーブルの列の変更を同時に実行する。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 性能の改善 {#performance-improvements-1} + +- の単一の数値列によるソートに基数ソートを使用します `ORDER BY` なし + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 文書 {#documentation-1} + +- 翻訳書類の一部のテーブルエンジン。 + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([しない + リー](https://github.com/neverlee)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-1} + +- UTF-8文字を正しく印刷する `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加コマンドラインパラメータclickhouse-クライアントに常に負荷の提案 + データ。 [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- いくつかのpvs-studioの警告を解決します。 + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 更新lz4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([ダニラ + クテニン](https://github.com/danlark1)) +- 今後のプル要求#5030の要件を構築するためにgperfを追加します。 + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouseリリース19.6 {#clickhouse-release-19-6} + +### クリックハウスリリース19.6.3.18,2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### バグ修正 {#bug-fixes-3} + +- テーブル関数からのクエリの条件プッシュダウンで修正 `mysql` と `odbc` と対応するテーブルエンジン。 これは#3540と#2384を修正します。 [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Zookeeperのデッドロックを修正します。 [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlcname](https://github.com/github1youlc)) +- CSVで引用小数を許可します。 [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- Float Inf/NaNからDecimalsへの変換を禁止します(例外をスローします)。 [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- リネームクエリでデータレースを修正。 [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([冬張](https://github.com/zhang2014)) +- 一時的にlfallocを無効にします。 lfallocの使用は、uncompressedcacheを割り当てる際に多くのmap\_failedにつながり、その結果、高負荷のサーバーでのクエリのクラッシュにつながる可能性があります。 [cfdba93comment](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### ClickHouseリリース19.6.2.11、2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### 新しい機能 {#new-features-3} + +- 列およびテーブルのttl式。 [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([アントン-ポポフ](https://github.com/CurtizJ)) +- のサポートを追加 `brotli` HTTPレスポンスの圧縮(Accept-Encoding:br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([ミハイル](https://github.com/fandyushin)) +- 新しい機能を追加 `isValidUTF8` バイトのセットが正しくutf-8エンコードされているかどう [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- 新しい負荷分散ポリシーの追加 `first_or_random` 送信されるクエリを最初に指定されたホストの場合は利用できな送信をクエリーダ主催のチャームのボー. クロスレプリケーショ [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) + +#### 実験の特徴 {#experimental-features-1} + +- 設定を追加 `index_granularity_bytes` (アダプティブインデックス粒度)MergeTree\*テーブルファミリの場合。 [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) + +#### 改善 {#improvements-1} + +- 関数の非定数および負のサイズと長さの引数のサポートが追加されました `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 左結合ではプッシュダウンから右テーブル、右結合では左テーブル、フル結合では両方のテーブルを無効にします。 このおかしくなる問題を修正に入実績もあります。 [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([イワン](https://github.com/abyss7)) +- `clickhouse-copier`:タスク設定の自動アップロード `--task-file` オプション [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- 追加の誤字ハンドラに保存工場とテーブル機能の工場です。 [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- サブクエリなしで複数のジョインのアスタリスクと修飾アスタリスクをサポート [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- く不カラムのエラーメッセージよりユーザーにも優しい。 [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能の改善 {#performance-improvements-2} + +- ASOF結合の大幅な高速化 [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### 下位互換性のない変更 {#backward-incompatible-changes} + +- HTTPヘッダ `Query-Id` に改名された `X-ClickHouse-Query-Id` 一貫性のために。 [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([ミハイル](https://github.com/fandyushin)) + +#### バグ修正 {#bug-fixes-4} + +- 固定された潜在的なnullポインタの逆参照 `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- JOIN+ARRAY JOINによるクエリのエラーを修正 [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定掛けの開始にサーバーが辞書により他の辞書を介してデータベースエンジン=辞書で調べました。 [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- のための潜在的に間違った結果を修正 `SELECT DISTINCT` と `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定非常に珍しいデータ競合状態が起こるのを実行する際にクエリとeuのすべての関少なくとも二つから選択します。列、システム。テーブル、システム。部品、システム。マージファミリのparts\_tablesまたはテーブルと、関連するテーブルの列の変更を同時に実行する。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-2} + +- 別のホストでclickhouse-serverを実行しているときのテストの失敗を修正 [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) +- clickhouse-テスト:非tty環境でカラーコントロールシーケンスを無効にします。 [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) +- clickhouse-test:任意のテストデータベースの使用を許可する(削除する `test.` それが可能な資格) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- Ubsanエラーの修正 [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Yandex LFAllocがClickHouseに追加され、MarkCacheとUncompressedCacheデータをさまざまな方法で割り当てて、より信頼性の高いsegfaultをキャッチしました [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- バックポートとチェンジログを支援するためのpython util。 [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([イワン](https://github.com/abyss7)) + +## ClickHouseリリース19.5 {#clickhouse-release-19-5} + +### ClickHouseリリース19.5.4.22、2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### バグ修正 {#bug-fixes-5} + +- ビットマップ\*機能のクラッシュを修正 [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([アンディヤング](https://github.com/andyyzh)) +- 固定非常に珍しいデータ競合状態が起こるのを実行する際にクエリとeuのすべての関少なくとも二つから選択します。列、システム。テーブル、システム。部品、システム。マージファミリのparts\_tablesまたはテーブルと、関連するテーブルの列の変更を同時に実行する。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定エラー `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. このエラーは、LowCardinality列が主キーの一部であった場合に発生しました。 \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 保持機能の変更:行が最初の条件とn番目の条件の両方を満たす場合、最初の満足条件のみがデータ状態に追加されます。 これで、データの行を満たすすべての条件がデータの状態に追加されます。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### ClickHouseリリース19.5.3.8,2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### バグ修正 {#bug-fixes-6} + +- 設定の固定タイプ `max_partitions_per_insert_block` ブール値からUInt64へ。 [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hostsein Sekhavat](https://github.com/mhsekhavat)) + +### ClickHouseリリース19.5.2.6,2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### 新しい機能 {#new-features-4} + +- [Hyperscan](https://github.com/intel/hyperscan) 複数の正規表現マッチングが追加されました(関数 `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` 機能が追加されました。 [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- の実施を所定の表現フィルター配列です。 [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([イワン](https://github.com/abyss7)) +- 新しいタイプのデータを飛び指標に基づくブル(使用可能 `equal`, `in` と `like` 機能)。 [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) +- 追加 `ASOF JOIN` これにより、既知の最新の値に結合するクエリを実行できます。 [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- 複数の書き換え `COMMA JOIN` に `CROSS JOIN`. 次にそれらを書き換える `INNER JOIN` 可能であれば。 [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改善 {#improvement-9} + +- `topK` と `topKWeighted` 今サポートカスタム `loadFactor` (修正の問題 [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([キリル丹心](https://github.com/kirillDanshin)) +- 使用を許可する `parallel_replicas_count > 1` サンプリングされていないテーブルの場合でも(設定は単に無視されます)。 以前のバージョンでは、例外が発生しました。 [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- のサポート `CREATE OR REPLACE VIEW`. ビューの作成または単一のステートメントでの新しい定義の設定を許可します。 [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` テーブルエンジン `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([楊関劉](https://github.com/LiuYangkuan)) +- Zookeeperのメタデータなしで複製テーブルを開始する機能を追加 `readonly` モード。 [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) +- Clickhouse-clientのプログレスバーのフリッカーを修正しました。 この問題は、 `FORMAT Null` ストリーミングクエ [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能を無効にするには `hyperscan` ユーザーごとにライブラリを使用して、過度かつ無制限のリソース使用量を制限します。 [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加バージョン番号でログインしてすべてのエラー. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- に制限を追加しました `multiMatch` 文字列サイズを必要とする関数 `unsigned int`. また、引数の数の制限を追加しました。 `multiSearch` 機能。 [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- 改善の利用をゼロスペースおよびエラー処理にhyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- 塗りつぶし `system.graphite_detentions` テーブルの設定から `*GraphiteMergeTree` エンジンテーブル。 [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 名前変更 `trigramDistance` 機能への `ngramDistance` と追加より機能で `CaseInsensitive` と `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- データスキップインデックス計算の改善。 [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) +- 普通に保つ, `DEFAULT`, `MATERIALIZED` と `ALIAS` 単一のリストの列(修正の問題 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### バグ修正 {#bug-fix-26} + +- 避ける `std::terminate` メモリ割り当てに失敗した場合。 さて `std::bad_alloc` 期待どおりに例外がスローされます。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- バッファからcapnprotoの読み取りを修正します。 時にファイルなロードに成功するhttp. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([ウラジスラフ](https://github.com/smirnov-vs)) +- エラーの修正 `Unknown log entry type: 0` 後に `OPTIMIZE TABLE FINAL` クエリ。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([アモスの鳥](https://github.com/amosbird)) +- 間違った引数へ `hasAny` または `hasAll` 関数はsegfaultにつながる可能性があります。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 行き詰まりが発生する恐れがあるとしながら実行 `DROP DATABASE dictionary` クエリ。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 未定義の動作を修正する `median` と `quantile` 機能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- ときに圧縮レベル検出を修正 `network_compression_method` 小文字で。 V19.1で壊れた。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- の固定無知 `UTC` 設定(修正の問題 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修正 `histogram` 関数の振る舞い `Distributed` テーブル。 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsanレポート `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- システムログ使用時の競合状態によるシャットダウン時のtsanレポートを修正。 part\_logが有効になっているときにシャットダウン時に固定された潜在的な使用後無料。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- パーツの再チェックを修正 `ReplicatedMergeTreeAlterThread` エラーの場合。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 中間集計関数状態に対する算術演算は、定数引数(サブクエリ結果など)に対して機能していませんでした。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 常にメタデータの列名を引用します。 それ以外の場合は、列という名前の表を作成することは不可能です `index` (サーバーは不正な形式のために再起動しません `ATTACH` メタデータ内のクエリ)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- でクラッシュを修正 `ALTER ... MODIFY ORDER BY` に `Distributed` テーブル。 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([Tcheason](https://github.com/TCeason)) +- Segfaultを修正する `JOIN ON` 有効にした場合 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([冬張](https://github.com/zhang2014)) +- カフカからprotobufメッセージを消費した後、余分な行を追加するとバグを修正しました。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- のクラッシュを修正 `JOIN` nullable列とnullable列ではありません。 修正 `NULLs` 右キーで `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- セグメンテーショ `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 固定競合状態で `SELECT` から `system.tables` テーブルが同時に名前変更または変更された場合。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 既に廃止されたデータ部分をフェッチする際のデータレースを修正。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 中に発生することができます `RENAME` MergeTree家族のテーブル。 [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能の固定細分化の欠陥 `arrayIntersect`. Segmentation faultう場合は関数と呼ばれたとの混合の定数、通常の引数になります。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- 固定読み取りから `Array(LowCardinality)` 列に空の配列の長いシーケンスが含まれている場合はまれです。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- でクラッシュを修正 `FULL/RIGHT JOIN` 私たちはnullable対nullableではないに参加するとき. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正 `No message received` レプリカ間のパーツの取得中の例外。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- 固定 `arrayIntersect` 単一の配列のいくつかの繰り返しの値の場合に機能間違った結果。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 同時実行中の競合状態の修正 `ALTER COLUMN` クエリを生み出しうるサーバストレスとソーシャル-サポート問題の修正 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 誤った結果を修正 `FULL/RIGHT JOIN` constの列を持つ。 [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- 重複を修正する `GLOBAL JOIN` アスタリスク付き。 [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正パラメータ控除で `ALTER MODIFY` 列の `CODEC` 列の型が指定されていない場合。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- 機能 `cutQueryStringAndFragment()` と `queryStringAndFragment()` 今すぐ正しく動作します `URL` くれないのを返します。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- 設定時にまれなバグを修正 `min_bytes_to_use_direct_io` これは、スレッドが列ファイル内で逆方向にシークする必要があるときに発生します。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- 集計関数の誤った引数の型を修正する `LowCardinality` 引数(修正の問題 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 間違った名前の修飾を修正 `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正機能 `toISOWeek` 1970年の結果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `DROP`, `TRUNCATE` と `OPTIMIZE` 実行されたときのクエリの重複 `ON CLUSTER` のために `ReplicatedMergeTree*` テーブルの家族。 [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### 下位互換性のない変更 {#backward-incompatible-change-8} + +- 名前変更の設定 `insert_sample_with_metadata` 設定する `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加された設定 `max_partitions_per_insert_block` (デフォルトでは値100)。 場合に挿入したブロックを含むより多くのパーティション例外がスローされます。 制限を削除する場合は0に設定します(推奨されません)。 [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- マルチサーチ機能の名称を変更 (`multiPosition` に `multiSearchAllPositions`, `multiSearch` に `multiSearchAny`, `firstMatch` に `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### 性能向上 {#performance-improvement-6} + +- 多くの針または多くの同様のbigramsとのクエリのために約5-10%の検索改善を与え、インライン化することによってvolnitsky検索を最適化します。 [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- 設定時のパフォーマンス問題を修正 `use_uncompressed_cache` がゼロより大き登場したときのすべてのデータに含まれる。 [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-10} + +- マークキャッシュとインデックスのメモリ保護を追加します。 これによりメモリの揃い踏みのバグの場合には豆やmsanできます。 [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Cmake変数のサポートの追加 `ENABLE_PROTOBUF`, `ENABLE_PARQUET` と `ENABLE_BROTLI` これにより、上記の機能を有効/無効にすることができます(librdkafka、mysqlなどでも同じことができます)。 [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- 追加ダイレクトに刷版を出力するプロセス一覧表示およびstacktracesのすべてのスレッドの場合一部のクエリで吊るされているだけなので後の試験です。 [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) +- 再試行の追加 `Connection loss` エラーで `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) +- Freebsdのビルドとスレッドのサニタイザを使ったビルドをパッケージャスクリプトに追加します。 [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) +- 現在ユーザーのためのパスワードユーザー `'default'` 取付けの間。 [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- の警告を抑制する `rdkafka` ライブラリ。 [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Sslなしで構築する能力を許可します。 [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- カスタムユーザーからclickhouse-serverイメージを起動する方法を追加します。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Contrib boostを1.69にアップグレードします。 [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- 使用を無効にする `mremap` きめにスレッドに指消毒剤. 驚いたことに、TSanは傍受しません `mremap` (それは傍受しますが `mmap`, `munmap`)それは偽陽性につながります. ステートフルテストで修正TSanレポート。 [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加試験にチェックを使用形式スキーマによhttpインターフェース。 [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) + +## クリックハウスリリース19.4 {#clickhouse-release-19-4} + +### ClickHouseリリース19.4.4.33,2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### バグ修正 {#bug-fixes-7} + +- 避ける `std::terminate` メモリ割り当てに失敗した場合。 さて `std::bad_alloc` 期待どおりに例外がスローされます。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- バッファからcapnprotoの読み取りを修正します。 時にファイルなロードに成功するhttp. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([ウラジスラフ](https://github.com/smirnov-vs)) +- エラーの修正 `Unknown log entry type: 0` 後に `OPTIMIZE TABLE FINAL` クエリ。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([アモスの鳥](https://github.com/amosbird)) +- 間違った引数へ `hasAny` または `hasAll` 関数はsegfaultにつながる可能性があります。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 行き詰まりが発生する恐れがあるとしながら実行 `DROP DATABASE dictionary` クエリ。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 未定義の動作を修正する `median` と `quantile` 機能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- ときに圧縮レベル検出を修正 `network_compression_method` 小文字で。 V19.1で壊れた。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- の固定無知 `UTC` 設定(修正の問題 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修正 `histogram` 関数の振る舞い `Distributed` テーブル。 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsanレポート `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- システムログ使用時の競合状態によるシャットダウン時のtsanレポートを修正。 part\_logが有効になっているときにシャットダウン時に固定された潜在的な使用後無料。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- パーツの再チェックを修正 `ReplicatedMergeTreeAlterThread` エラーの場合。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 中間集計関数状態に対する算術演算は、定数引数(サブクエリ結果など)に対して機能していませんでした。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 常にメタデータの列名を引用します。 それ以外の場合は、列という名前の表を作成することは不可能です `index` (サーバーは不正な形式のために再起動しません `ATTACH` メタデータ内のクエリ)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- でクラッシュを修正 `ALTER ... MODIFY ORDER BY` に `Distributed` テーブル。 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([Tcheason](https://github.com/TCeason)) +- Segfaultを修正する `JOIN ON` 有効にした場合 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([冬張](https://github.com/zhang2014)) +- カフカからprotobufメッセージを消費した後、余分な行を追加するとバグを修正しました。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) +- セグメンテーショ `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 固定競合状態で `SELECT` から `system.tables` テーブルが同時に名前変更または変更された場合。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 既に廃止されたデータ部分をフェッチする際のデータレースを修正。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 中に発生することができます `RENAME` MergeTree家族のテーブル。 [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能の固定細分化の欠陥 `arrayIntersect`. Segmentation faultう場合は関数と呼ばれたとの混合の定数、通常の引数になります。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) +- 固定読み取りから `Array(LowCardinality)` 列に空の配列の長いシーケンスが含まれている場合はまれです。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 修正 `No message received` レプリカ間のパーツの取得中の例外。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) +- 固定 `arrayIntersect` 単一の配列のいくつかの繰り返しの値の場合に機能間違った結果。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 同時実行中の競合状態の修正 `ALTER COLUMN` クエリを生み出しうるサーバストレスとソーシャル-サポート問題の修正 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修正パラメータ控除で `ALTER MODIFY` 列の `CODEC` 列の型が指定されていない場合。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) +- 機能 `cutQueryStringAndFragment()` と `queryStringAndFragment()` 今すぐ正しく動作します `URL` くれないのを返します。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) +- 設定時にまれなバグを修正 `min_bytes_to_use_direct_io` これは、スレッドが列ファイル内で逆方向にシークする必要があるときに発生します。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) +- 集計関数の誤った引数の型を修正する `LowCardinality` 引数(修正の問題 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 修正機能 `toISOWeek` 1970年の結果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `DROP`, `TRUNCATE` と `OPTIMIZE` 実行されたときのクエリの重複 `ON CLUSTER` のために `ReplicatedMergeTree*` テーブルの家族。 [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) + +#### 改善 {#improvements-2} + +- 普通に保つ, `DEFAULT`, `MATERIALIZED` と `ALIAS` 単一のリストの列(修正の問題 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### ClickHouseリリース19.4.3.11,2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### バグ修正 {#bug-fixes-8} + +- でクラッシュを修正 `FULL/RIGHT JOIN` 私たちはnullable対nullableではないに参加するとき. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- セグメンテーショ `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-11} + +- カスタムユーザーからclickhouse-serverイメージを起動する方法を追加します。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### クリックハウスリリース19.4.2.7,2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### バグ修正 {#bug-fixes-9} + +- 固定読み取りから `Array(LowCardinality)` 列に空の配列の長いシーケンスが含まれている場合はまれです。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### クリックハウスリリース19.4.1.3,2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### バグ修正 {#bug-fixes-10} + +- 両方を含む固定リモートクエリ `LIMIT BY` と `LIMIT`. 以前は、 `LIMIT BY` と `LIMIT` リモートクエリに使用された, `LIMIT` 前に起こる可能性が `LIMIT BY` るもの濾過します。 [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S.Pan](https://github.com/kvap)) + +### ClickHouseリリース19.4.0.49,2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### 新しい機能 {#new-features-5} + +- 追加の完全なサポート `Protobuf` フォーマット(入出力、ネストされたデータ構造)。 [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) +- ビットマップを追加しました。 [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([アンディヤング](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) +- 寄木細工の形式のサポート。 [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- ファジィ文字列の比較のために、n-gram距離が追加されました。 これは、r言語のq-gramメトリックに似ています。 [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- 結合ルールのための黒鉛rollupから専用の凝集-保持。 [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 追加 `max_execution_speed` と `max_execution_speed_bytes` リソースの使用を制限する。 追加 `min_execution_speed_bytes` 補完する設定 `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([冬張](https://github.com/zhang2014)) +- 機能実装 `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- 機能追加 `arrayEnumerateDenseRanked` と `arrayEnumerateUniqRanked` (それはのようなものだ `arrayEnumerateUniq` しかし、多次元配列の内部を調べるために配列の深さを微調整することができます)。 [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### バグ修正 {#bug-fixes-11} + +- このリリースも含む全てのバグ修正から19.3 19.1. +- データスキップインデックスのバグを修正:挿入後の顆粒の順序が間違っていた。 [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) +- 固定 `set` インデックス `Nullable` と `LowCardinality` 列。 その前に, `set` 索引とともに `Nullable` または `LowCardinality` 列ledにエラー `Data type must be deserialized with multiple streams` 選択中。 [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 完全にupdate\_timeを正しく設定する `executable` 辞書の更新。 [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- 19.3で壊れたプログレスバーを修正。 [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([フィリモノフ](https://github.com/filimonov)) +- 特定のケースで、メモリ領域をシュリンクしたときのmemorytrackerの値の不一致を修正しました。 [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ThreadPoolでの未定義の動作を修正しました。 [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- メッセージで非常にまれなクラッシュを修正 `mutex lock failed: Invalid argument` これは、MergeTreeテーブルがSELECTと同時に削除されたときに発生する可能性があります。 [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- ODBCドライバとの互換性 `LowCardinality` データ型。 [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD:Fixup for `AIOcontextPool: Found io_event with unknown id 0` エラー。 [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` テーブルは構成に関係なく作成されました。 [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 未定義の動作を修正する `dictIsIn` キャッシュ辞書の関数。 [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- デフォルトでcompile\_expressionsを無効にします。 `llvm` contribはそれをとのテストし、 `clang` と `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) +- 防ぐ `std::terminate` とき `invalidate_query` のために `clickhouse` 外部辞書ソースが間違った結果セット(空または複数の行または複数の列)を返しました。 ときに問題を修正しました `invalidate_query` にかかわらず、五秒ごとに実行されました `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ときにデッドロックを避ける `invalidate_query` 辞書のために `clickhouse` ソースが関与していた `system.dictionaries` テーブルまたは `Dictionaries` データベース(まれなケース)。 [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クロスのための修正は、空のwhereに参加します。 [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- 機能の固定segfault “replicate” 定数引数が渡されるとき。 [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ラムダ機能と述語オプティマイザ. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([冬張](https://github.com/zhang2014)) +- 複数のフィックスを結合します。 [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改善 {#improvements-3} + +- 支援のエイリアスに参加でき課右テーブル列あります。 [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- 複数の結合の結果は、サブセレクトで使用される正しい結果名が必要です。 平置き換えエイリアスとソース名ます。 [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- 結合文のプッシュダウンロジックを改善します。 [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([イワン](https://github.com/abyss7)) + +#### 性能の改善 {#performance-improvements-3} + +- 改善されたヒューリスティック “move to PREWHERE” 最適化。 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 8ビットおよび16ビットのキーにhashtableのapiを使用する適切なルックアップテーブルを使用します。 [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([アモスの鳥](https://github.com/amosbird)) +- 文字列比較のパフォーマンスの向上。 [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 分散ddlタスクを処理するメインループが遅くならないように、別のスレッドで分散ddlキューをクリーンアップします。 [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- とき `min_bytes_to_use_direct_io` 読み込むデータサイズが圧縮されたブロックのサイズによって過小評価されることがあるため、すべてのファイルがO\_DIRECTモードで開かれたわけでは [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-12} + +- Clang-9のサポートを追加 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 間違った修正 `__asm__` 指示(再び) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- 設定を指定する機能を追加する `clickhouse-performance-test` コマンドラインから。 [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) +- 統合テストに辞書テストを追加します。 [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) +- 追加のクエリからのベンチマークのサイトを自動化性能試験までを実施。 [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `xxhash.h` それは実装の詳細であり、そのシンボルは次の名前空間であるため、外部lz4には存在しません `XXH_NAMESPACE` マクロ Lz4が外部の場合、xxHashも外部になければならず、扶養家族はそれにリンクする必要があります。 [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) +- 次の場合にケースを修正 `quantileTiming` 集約関数は、負の引数または浮動小数点引数で呼び出すことができます(これは、未定義の動作消滅器でfuzzテストを修正します)。 [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- スペルエラー訂正。 [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2unit description in lists](https://github.com/sdk2)) +- Macでのコンパイルの修正。 [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) +- FreeBSDおよび様々な異常なビルド設定のためのビルドの修正。 [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## クリックハウスリリース19.3 {#clickhouse-release-19-3} + +### ClickHouseリリース19.3.9.1,2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### バグ修正 {#bug-fixes-12} + +- でクラッシュを修正 `FULL/RIGHT JOIN` 私たちはnullable対nullableではないに参加するとき. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- セグメンテーショ `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 固定読み取りから `Array(LowCardinality)` 列に空の配列の長いシーケンスが含まれている場合はまれです。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-13} + +- カスタムユーザーからclickhouse-serverイメージを起動する方法を追加する [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouseリリース19.3.7,2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### バグ修正 {#bug-fixes-13} + +- #3920で修正されたエラー。 このエラ `Unknown codec family code`, `Cannot seek through file` とsegfaults。 このバグはバージョン19.1で最初に登場し、19.1.10および19.3.6までのバージョンに存在します。 [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.3.6,2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### バグ修正 {#bug-fixes-14} + +- スレッドプールに1000を超えるスレッドがある場合, `std::terminate` が起こるためのスレッド終了します。 [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 今では作成することが可能です `ReplicatedMergeTree*` コメントとデフォルトのない列のコメントを持つ表。 また、コーデックの比較を修正。 [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) +- 配列またはタプルとの結合にクラッシュを修正しました。 [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- メッセージとclickhouse-コピー機で固定クラッシュ `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定電話を切るサーバー停止の場合は分散ddlsを使用した。 [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- 10より大きい列のテキスト形式の解析に関するエラーメッセージで、誤った列番号が出力されました。 [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-3} + +- AVXを有効にした固定ビルド。 [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- コンパイルされるカーネルの代わりに、既知のバージョンに基づいて拡張会計とio会計を有効にします。 [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) +- Core\_dumpの設定をスキップできるようにします。size\_limit、制限セットが失敗した場合はスローの代わりに警告します。 [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- 削除された `inline` タグの `void readBinary(...)` で `Field.cpp`. また、冗長マージ `namespace DB` ブロック。 [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### クリックハウスリリース19.3.5,2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### バグ修正 {#bug-fixes-15} + +- 大規模なhttp挿入クエリ処理のバグを修正しました。 [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) +- の間違った実装による古いバージョンとの固定後方の非互換性 `send_logs_level` 設定。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル機能の後方互換性を修正しました `remote` 列のコメントと共に導入。 [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.3.4,2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### 改善 {#improvements-4} + +- テーブル-インデックスをさせていただく事があり占めのメモリの制限を行うと `ATTACH TABLE` クエリ。 デタッチされた後にテーブルを添付できない可能性を回避しました。 [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ZooKeeperから受け取った最大文字列と配列サイズの上限をわずかに上げました。 でも引き続き増加しのサイズ `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` 飼育係に。 [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- すでにキューに多数のノードがある場合でも、放棄されたレプリカを修復できます。 [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 必要な引数を一つ追加する `SET` インデックス(最大保存行数)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) + +#### バグ修正 {#bug-fixes-16} + +- 固定 `WITH ROLLUP` 単一のグループの結果 `LowCardinality` キー。 [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定バグの設定指数を落と顆粒が含まれている場合以 `max_rows` 行)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) +- 多くのfreebsdビルドの修正。 [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- 固定エイリアス置換にクエリサブクエリを含む同じエイリアス(発行 [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-4} + +- 実行する機能を追加 `clickhouse-server` dockerイメージのステートレステストの場合。 [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) + +### クリックハウスリリース19.3.3,2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### 新しい機能 {#new-features-6} + +- を追加しました `KILL MUTATION` いくつかの理由である突然変異を除去することを可能にする声明。 追加 `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` フィールドに `system.mutations` テーブルやtroubleshooting. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- 集計関数の追加 `entropy` シャノンのエントロピーを計算します [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- クエリを送信する機能を追加 `INSERT INTO tbl VALUES (....` 分割せずにサーバーに `query` と `data` パーツだ [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) +- の一般的な実装 `arrayWithConstant` 機能が追加されました。 [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 実装 `NOT BETWEEN` 比較演算子です。 [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([ドミトリー-ナウモフ](https://github.com/nezed)) +- 実装 `sumMapFiltered` 値が合計されるキーの数を制限できるようにするには `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- のサポートを追加 `Nullable` タイプ `mysql` テーブル機能。 [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- 任意の定数式のサポート `LIMIT` 句。 [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- 追加 `topKWeighted` (符号なし整数)重みを持つ追加の引数を取る集約関数。 [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([アンドリュー golman](https://github.com/andrewgolman)) +- `StorageJoin` 今サポート `join_any_take_last_row` 同じキーの既存の値を上書きできるようにする設定。 [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([アモスの鳥](https://github.com/amosbird) +- 機能追加 `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) +- 追加 `RowBinaryWithNamesAndTypes` フォーマット。 [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V.Kozlyuk](https://github.com/DarkWanderer)) +- 追加 `IPv4` と `IPv6` データ型。 より効果的な実装 `IPv*` 機能。 [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) +- 機能追加 `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) +- 追加 `Protobuf` 出力形式。 [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) +- 追加brotli支援のためのhttpインタフェースデータインポート(挿入します). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([ミハイル](https://github.com/fandyushin)) +- ユーザーが関数名にタイプミスをしたり、コマンドラインクライアン [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- 追加 `Query-Id` サーバーのHTTP応答ヘッダーへ。 [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([ミハイル](https://github.com/fandyushin)) + +#### 実験の特徴 {#experimental-features-2} + +- 追加 `minmax` と `set` データ飛指標MergeTreeテーブルエンジンです。 [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- の追加された変換 `CROSS JOIN` に `INNER JOIN` 可能であれば。 [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### バグ修正 {#bug-fixes-17} + +- 固定 `Not found column` 重複する列の場合 `JOIN ON` セクション。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- 作る `START REPLICATED SENDS` コマンド開始レプリケート送信。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- 固定集計関数の実行 `Array(LowCardinality)` 引数。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 修正された間違った行動 `INSERT ... SELECT ... FROM file(...)` クエリとファイルは `CSVWithNames` または `TSVWIthNames` フォーマットと最初のデータ行がありません。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 辞書が利用できない場合辞書リロードに固定クラッシュ。 このバグは19.1.6で登場しました。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 固定 `ALL JOIN` 右のテーブルに重複しています。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定細分化の欠陥との `use_uncompressed_cache=1` そして、間違った非圧縮サイズの例外。 このバグは19.1.6で登場しました。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- 固定 `compile_expressions` 大きな(int16以上の)日付の比較を伴うバグ。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- テーブル関数から選択する固定無限ループ `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 述語の最適化を一時的に無効にする `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([冬張](https://github.com/zhang2014)) +- 固定 `Illegal instruction` 古いCpuでbase64関数を使用するときにエラーが発生しました。 このエラーは、ClickHouseがgcc-8でコンパイルされた場合にのみ再現されています。 [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定 `No message received` エラーが発生との交流PostgreSQL ODBCドライバーを通してTLS接続します。 MySQLのODBCドライバを使用する場合にも、segfaultを修正します。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定間違った結果 `Date` と `DateTime` 引数は、条件付き演算子(関数)の分岐で使用されます `if`). 機能のための追加された汎用ケース `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クリックハウス辞書は今内ロード `clickhouse` プロセス。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定デッドロック時 `SELECT` テーブルから `File` エンジンは後に再試行されました `No such file or directory` エラー。 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定レース条件から選択すると `system.tables` を与える `table doesn't exist` エラー。 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `clickhouse-client` でsegfault出口がデータを読み込むためのコマンドラインの提案いたインタラクティブモードになります。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 変異の実行が含むバグを修正しました `IN` 演算子は、誤った結果を生成していた。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- 固定エラー:データベースがある場合 `Dictionary` エンジン、サーバーの起動時に強制的にロードされるすべての辞書、およびlocalhostからのClickHouseソースを持つ辞書がある場合、辞書はロードできません。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定エラー時のシステムログのようにして作成時サーバをシャットダウンしました。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 正しい型を正しく返し、適切にロックを処理する `joinGet` 機能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([アモスの鳥](https://github.com/amosbird)) +- 追加 `sumMapWithOverflow` 機能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 固定segfaultと `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- 間違ったとのバグを修正 `Date` と `DateTime` 比較。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 未定義の動作の下で固定ファズテストサニタイザ:追加されたパラメータの型チェック `quantile*Weighted` 機能の系列。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 古いデータパーツの削除が失敗することがある稀な競合状態を修正しました `File not found` エラー。 [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- /etc/clickhouse-server/configが見つからないインストールパッケージを修正xmlだ [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-5} + +- Debianパッケージ:設定に従って/etc/clickhouse-server/preprocessedリンクを修正します。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- FreeBSDのための様々なビルドの修正. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- Perftestでテーブルを作成、入力、削除する機能を追加しました。 [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) +- 重複をチェックするスクリプトを追加しました。 [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- パフォーマンステス [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) +- パッケージはデバッグシンボルとを示唆を設置することができます。 [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- パフォーマンステストのリファクタリング。 より良いロギングと信号の処理。 [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) +- 匿名のyandexにドキュメントを追加しました。メトリカのデータセット。 [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- 追加docsつのデータセットにs3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) +- プル要求の説明からchangelogを作成するスクリプトを追加しました。 [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Clickhouseの人形モジュールを追加しました。 [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- 文書化されていない関数のグループのドキュメントを追加しました。 [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([冬張](https://github.com/zhang2014)) +- ARMビルドの修正。 [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- 辞書テストを実行できるようになりました `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- さて `/etc/ssl` はデフォルトとして使用されると、ディレクトリにSSL証明書 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 開始時にsseおよびavx命令の確認を追加しました。 [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- Initスクリプトは開始までサーバを待機します。 [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### 下位互換性のない変更 {#backward-incompatible-changes-1} + +- 削除 `allow_experimental_low_cardinality_type` 設定。 `LowCardinality` デー [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 削減マークのキャッシュされた、圧縮解除されたキャッシュサイズに従ってメインメニューを開きます。 [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([ロパチンコンスタンチン](https://github.com/k-lopatin) +- キーワードを追加 `INDEX` で `CREATE TABLE` クエリ。 名前のある列 `index` バッククォートまたは二重引用符で囲む必要があります: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) +- `sumMap` を推進する結果の型の代わりにオーバーフロー. 古いの `sumMap` 動作は、以下を使用して取得できます `sumMapWithOverflow` 機能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### 性能の改善 {#performance-improvements-4} + +- `std::sort` に置き換え `pdqsort` なしのクエリの場合 `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- 現在サーバーの再利用にスレッドからグローバルスレッドプールがあります。 この影響性能の一部のコーナー。 [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 改善 {#improvements-5} + +- FreeBSDのAIOサポートを実装しました。 [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` 今すぐ戻る `a` と `b` 左側のテーブルからの列のみ。 [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- 許可 `-C` オプションクライアントとして `-c` オプション。 [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- Nowオプション `--password` 使用せずに値を必要とパスワードからstdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- エスケープされていないメタ文字を含む文字列リテラルで強調表示するようにした `LIKE` 式または正規表現。 [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クライアントソケッ [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) +- 現在サーバーの進捗報告書くクライアント接続の待機を開始。 [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([イワン](https://github.com/abyss7)) +- 最適化クエリの理由がわずかに良いメッセージ `optimize_throw_if_noop` 設定は有効です。 [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のサポートを追加 `--version` clickhouseサーバーのための選択。 [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([ロパチンコンスタンチン](https://github.com/k-lopatin)) +- 追加 `--help/-h` オプションへ `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([ユーリーバラノフ](https://github.com/yurriy)) +- るためのサポートを追加しましたスカラサブクエリと集計関数の状態ます。 [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 向上サーバー停止時間の変更を待ってます。 [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Replicated\_can\_become\_leader設定に関する情報をsystemに追加しました。レトロギングの場合、レプリカなくなります。 [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## クリックハウスリリース19.1 {#clickhouse-release-19-1} + +### ClickHouseリリース19.1.14、2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- 固定エラー `Column ... queried more than once` それは起こるかもしれません設定 `asterisk_left_columns_only` 使用する場合は1に設定します `GLOBAL JOIN` と `SELECT *` (まれなケース)。 この問題は19.3以降には存在しません。 [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### クリックハウスリリース19.1.13,2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +このリリースには、19.3.7とまったく同じパッチが含まれています。 + +### ClickHouseリリース19.1.10、2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +このリリースには、19.3.6とまったく同じパッチが含まれています。 + +## クリックハウスリリース19.1 {#clickhouse-release-19-1-1} + +### ClickHouseリリース19.1.9,2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### バグ修正 {#bug-fixes-18} + +- の間違った実装による古いバージョンとの固定後方の非互換性 `send_logs_level` 設定。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル機能の後方互換性を修正しました `remote` 列のコメントと共に導入。 [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### クリックハウスリリース19.1.8,2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### バグ修正 {#bug-fixes-19} + +- /etc/clickhouse-server/configが見つからないインストールパッケージを修正xmlだ [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## クリックハウスリリース19.1 {#clickhouse-release-19-1-2} + +### クリックハウスリリース19.1.7,2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### バグ修正 {#bug-fixes-20} + +- 正しい型を正しく返し、適切にロックを処理する `joinGet` 機能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([アモスの鳥](https://github.com/amosbird)) +- 固定エラー時のシステムログのようにして作成時サーバをシャットダウンしました。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定エラー:データベースがある場合 `Dictionary` エンジン、サーバーの起動時に強制的にロードされるすべての辞書、およびlocalhostからのClickHouseソースを持つ辞書がある場合、辞書はロードできません。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 変異の実行が含むバグを修正しました `IN` 演算子は、誤った結果を生成していた。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` でsegfault出口がデータを読み込むためのコマンドラインの提案いたインタラクティブモードになります。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定レース条件から選択すると `system.tables` を与える `table doesn't exist` エラー。 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定デッドロック時 `SELECT` テーブルから `File` エンジンは後に再試行されました `No such file or directory` エラー。 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定問題:地方clickhouse辞書読み込まれtcpが負荷以内です。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定 `No message received` エラーが発生との交流PostgreSQL ODBCドライバーを通してTLS接続します。 MySQLのODBCドライバを使用する場合にも、segfaultを修正します。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 述語の最適化を一時的に無効にする `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([冬張](https://github.com/zhang2014)) +- テーブル関数から選択する固定無限ループ `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定 `compile_expressions` 大きな(int16以上の)日付の比較を伴うバグ。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) +- 固定細分化の欠陥との `uncompressed_cache=1` そして、間違った非圧縮サイズの例外。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) +- 固定 `ALL JOIN` 右のテーブルに重複しています。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正された間違った行動 `INSERT ... SELECT ... FROM file(...)` クエリとファイルは `CSVWithNames` または `TSVWIthNames` フォーマットと最初のデータ行がありません。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定集計関数の実行 `Array(LowCardinality)` 引数。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Debianパッケージ:設定に従って/etc/clickhouse-server/preprocessedリンクを修正します。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- 未定義の動作の下で固定ファズテストサニタイザ:追加されたパラメータの型チェック `quantile*Weighted` 機能の系列。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 作る `START REPLICATED SENDS` コマンド開始レプリケート送信。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- 固定 `Not found column` セクションの結合で重複する列の場合。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- さて `/etc/ssl` はデフォルトとして使用されると、ディレクトリにSSL証明書 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 辞書が利用できない場合辞書リロードに固定クラッシュ。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 間違ったとのバグを修正 `Date` と `DateTime` 比較。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 固定間違った結果 `Date` と `DateTime` 引数は、条件付き演算子(関数)の分岐で使用されます `if`). 機能のための追加された汎用ケース `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +### ClickHouseリリース19.1.6,2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### 新しい機能 {#new-features-7} + +- 表の列ごとのカスタム圧縮コーデック。 [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [冬張](https://github.com/zhang2014), [アナトリー](https://github.com/Sindbag)) +- 圧縮コーデックを追加 `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) +- 許可する `ALTER` 圧縮コーデック。 [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) +- 機能追加 `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` SQL標準の互換性のために。 [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) +- 書き込みのサポート `HDFS` テーブルと `hdfs` テーブル機能。 [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) +- 大きな干し草の山から複数の定数文字列を検索する機能を追加しました: `multiPosition`, `multiSearch` ,`firstMatch` また `-UTF8`, `-CaseInsensitive`、と `-CaseInsensitiveUTF8` バリアント。 [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- 未使用の破片の切り取ること `SELECT` シャーディングキーによるクエリフィ `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [イワン](https://github.com/abyss7)) +- 許可 `Kafka` エンジンを無視するいくつかの構文解析誤りのブロックです。 [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([イワン](https://github.com/abyss7)) +- のサポートを追加 `CatBoost` マルチクラスモデルの評価。 機能 `modelEvaluate` マルチクラスモデルのクラスごとの生の予測を持つタプルを返します。 `libcatboostmodel.so` で構築する必要があります [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 機能追加 `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- ハッシュ機能を追加 `xxHash64` と `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([フィリモノフ](https://github.com/filimonov)) +- 追加 `gccMurmurHash` 同じハッシュシードを使用するハッシュ関数(GCC風味のつぶやきハッシュ) [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- ハッシュ機能を追加 `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) +- テーブル機能を追加 `remoteSecure`. 機能として動作 `remote` しかし、安全な接続を使用しています。 [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### 実験の特徴 {#experimental-features-3} + +- 複数の結合エミュレーションを追加 (`allow_experimental_multiple_joins_emulation` 設定)。 [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### バグ修正 {#bug-fixes-21} + +- 作る `compiled_expression_cache_size` 設定により限定のデフォルトの低メモリを消費する。 [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) +- レプリケートされたテーブルの変更を実行するスレッドと、zookeeperから設定を更新するスレッドのバグを修正しました。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- 分散alterタスクを実行する際の競合状態を修正しました。 レース条件以上のレプリカを実行しようとしたところ、すべてのレプリカのものを除く失敗との飼育係エラーになります。 [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- 次の場合にバグを修正する `from_zk` config要素はないのにリフレッシュした後、求め飼育係わる。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- IPv4サブネッ [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) +- 固定クラッシュ (`std::terminate`)まれに、リソースが枯渇したために新しいスレッドを作成できない場合。 [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ときにバグを修正 `remote` 間違った制限がinに使用されたときのテーブル関数の実行 `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) +- Netlinkソケットのリークを修正します。 これらのソケットはプール内に置かれ、削除されることはなく、現在のすべてのソケットが使用されているときに、新しいソケットが新しいスレッド [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- 閉じるとバグを修正 `/proc/self/fd` すべてのfdsが読み込まれたディレクトリ `/proc` フォーク後 `odbc-bridge` サブプロセス。 [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) +- 主キーの使用文字列の場合にはuint単調変換するための固定文字列。 [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([冬張](https://github.com/zhang2014)) +- 整数変換関数の単調性の計算におけるエラーを修正しました。 [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定segfaultで `arrayEnumerateUniq`, `arrayEnumerateDense` いくつかの無効な引数の場合の関数。 [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- StorageMergeでUBを修正. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([アモスの鳥](https://github.com/amosbird)) +- 機能の固定segfault `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定エラー:機能 `round`, `floor`, `trunc`, `ceil` を返すことが偽の結果が実行される整数の引数と大きな負のです。 [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- によって誘導されるバグを修正 ‘kill query sync’ これはコアダンプにつながります。 [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- のを修正した。長の遅延の後に空の複製します。 [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) +- テーブルに挿入する場合の過度のメモリ使用量を修正しました `LowCardinality` 主キー。 [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定 `LowCardinality` のためのシリアル化 `Native` 空の配列の場合の形式。 [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 単一のlowcardinality数値列によってdistinctを使用している間、不正な結果を修正しました。 [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 低カーディナリティキーを使用した特殊な集計を修正しました(以下の場合 `compile` 設定が有効になっています)。 [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定ユーザとパスワードを転送のための複製のテーブルのクエリ. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- 固定非常に珍しい競合状態とされるようにすることが一覧表の辞書データベースをリロードを生成する事ができます。 [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ロールアップまたはcubeで使用されたときの不正な結果を修正しました。 [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([サム-チョウ](https://github.com/reflection)) +- クエリの固定列エイリアス `JOIN ON` 構文と分散テーブル。 [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([冬張](https://github.com/zhang2014)) +- の内部実装における固定エラー `quantileTDigest` (アルテムVakhrushevによって発見)。 このエラーはClickHouseでは決して起こらず、ClickHouseコードベースをライブラリとして直接使用する人にのみ関連していました。 [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 改善 {#improvements-6} + +- のサポート `IF NOT EXISTS` で `ALTER TABLE ADD COLUMN` と一緒に文 `IF EXISTS` で `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- 機能 `parseDateTimeBestEffort`:形式のサポート `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` と似ています。 [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` 今サポートギザギザの構造。 [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- ユーザビリティ向上に追加チェックがサーバプロセスからのデータディレクトリはオーナーを想定しています。 できない開始のサーバーからのルートデータが得られない場合には所属非rootユーザーです。 [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) +- 結合によるクエリの分析中に必要な列をチェックするロジックの改善。 [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- 単一のサーバーに多数の分散テーブルがある場合の接続数を減らしました。 [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([冬張](https://github.com/zhang2014)) +- サポートされている合計行 `WITH TOTALS` ODBCドライバのクエリ。 [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- 使用を許可する `Enum`関数の中の整数としてのs。 [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([イワン](https://github.com/abyss7)) +- 追加 `low_cardinality_allow_in_native_format` 設定。 無効の場合は、使用しないでください `LowCadrinality` タイプイン `Native` フォーマット。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 削除の冗長化物からの集計表現のキャッシュの低メモリ使用量 [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) +- チェックを追加する `SET send_logs_level = 'value'` クエリーを受け適切な値です。 [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) +- タイプ変換関数で固定されたデータ型のチェック。 [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([冬張](https://github.com/zhang2014)) + +#### 性能の改善 {#performance-improvements-5} + +- マージツリー設定の追加 `use_minimalistic_part_header_in_zookeeper`. 有効になっている場合、複製のテーブル店舗のコンパクト部分のメタデータの一部znode. これは著しく低下するので、飼育係スナップショットサイズ(場合には、あらゆるテーブルのカラム). この設定を有効にすると、それをサポートしていないバージョンにダウングレードすることはできません。 [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- Dfaベースの関数の実装を追加します。 `sequenceMatch` と `sequenceCount` patternに時間が含まれていない場合。 [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 整数のシリアル化のパフォーマンスの向上。 [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([アモスの鳥](https://github.com/amosbird)) +- Zero left padding PODArrayので、-1要素は常に有効でゼロになります。 これは、オフセットの分岐のない計算に使用されます。 [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([アモスの鳥](https://github.com/amosbird)) +- 元に戻す `jemalloc` パフォーマン [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 下位互換性のない変更 {#backward-incompatible-changes-2} + +- 文書化されていない機能を削除 `ALTER MODIFY PRIMARY KEY` それがによって取って代わられたので `ALTER MODIFY ORDER BY` 司令部 [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- 削除機能 `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- タイプの結果を持つスカラーサブクエリの使用を禁止する `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([イワン](https://github.com/abyss7)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvements-6} + +- PowerPCのサポートを追加 (`ppc64le`)ビルド. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- ステートフル機能試験を実般に利用可能データセットである。 [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- サーバーが起動できない場合のエラーを修正しました `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` Dockerまたはsystemd-nspawn内のメッセージ。 [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 更新 `rdkafka` v1.0.0-RC5へのライブラリ。 生のCインターフェイスの代わりにcppkafkaを使用します。 [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([イワン](https://github.com/abyss7)) +- 更新 `mariadb-client` ライブラリ。 UBSanで見つかった問題のいずれかを修正しました。 [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- UBSanビルドのいくつかの修正。 [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- UBSanビルドによるテストのコミットごとの実行を追加しました。 +- PVS-Studio static analyzerのコミットごとの実行を追加しました。 +- PVS-Studioによって発見されたバグを修正しました。 [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定glibc互換性の問題。 [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Dockerイメージを18.10に移動し、glibc\>=2.28の互換性ファイルを追加します [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) +- 追加環境変数の場合はユーザーを行わないchownディレクトリをサーバー dockerイメージです。 [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) +- からの警告のほとんどを有効に `-Weverything` クラングで。 有効 `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 唯一のクラングで利用可能ないくつかのより多くの警告を追加しました8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- リンク先 `libLLVM` 共有リンクを使用する場合は、個々のLLVMライブラリではなく。 [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) +- テスト画像のための追加された消毒剤の変数。 [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) +- `clickhouse-server` debianパッケージは `libcap2-bin` 使用するパッケージ `setcap` 機能を設定するためのツール。 これは任意です。 [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 改善されたコンパイル時間、固定includesむ。 [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- ハッシュ関数のパフォーマンステス [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([フィリモノフ](https://github.com/filimonov)) +- 固定巡回ライブラリ依存。 [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- 低利用可能なメモリとコンパイルの改善。 [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- 追加試験スクリプトの再現性能の劣化 `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 下のコメントや文字列リテラルのスペルミスを修正 `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([マイハー](https://github.com/maiha)) +- コメントの誤字を修正しました。 [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [2018年の変更履歴](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/ja/whats_new/changelog/index.md b/docs/ja/whats_new/changelog/index.md deleted file mode 120000 index bbe79caca19..00000000000 --- a/docs/ja/whats_new/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/whats_new/changelog/index.md \ No newline at end of file diff --git a/docs/ja/whats_new/changelog/index.md b/docs/ja/whats_new/changelog/index.md new file mode 100644 index 00000000000..dd382ebf5ce --- /dev/null +++ b/docs/ja/whats_new/changelog/index.md @@ -0,0 +1,668 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' +--- + +## ClickHouseリリースv20.3 {#clickhouse-release-v20-3} + +### ClickHouseリリリースv20.3.4.10,2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### バグ修正 {#bug-fix} + +- このリリースも含む全てのバグ修正から20.1.8.41 +- 不足している修正 `rows_before_limit_at_least` プロセッサパイプラインを使用したhttpクエリの場合。 この修正 [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +### ClickHouseリリリースv20.3.3.6,2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### バグ修正 {#bug-fix-1} + +- このリリースも含む全てのバグ修正から20.1.7.38 +- ユーザーが以前のバージョンで突然変異を実行した場合、複製が機能しないレプリケーションのバグを修正しました。 この修正 [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). こ版20.3後方互換。 +- 設定を追加 `use_compact_format_in_distributed_parts_names` これにより、 `INSERT` へのクエリ `Distributed` よりコンパクトな形式のテーブル。 この修正 [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). こ版20.3後方互換。 + +### ClickHouseリリリースv20.3.2.1,2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### 下位互換性のない変更 {#backward-incompatible-change} + +- 問題を修正しました `file name too long` データを送信するとき `Distributed` 多数のレプリカのテーブル。 レプリカの資格情報がサーバーログに表示される問題を修正しました。 ディスク上のディレクトリ名の形式が `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([Mikhail Korotov](https://github.com/millb))新しいバージョンにアップグレードした後、古いサーバーのバージョンが新しいディレクトリ形式を認識しないため、手動の介入なしにダウングレードするこ ダウングレードする場合は、対応するディレクトリの名前を手動で古い形式に変更する必要があります。 この変更は、非同期を使用した場合にのみ関連します `INSERT`にs `Distributed` テーブル。 バージョン20.3.3では、新しいフォーマットを徐々に有効にするための設定を紹介します。 +- 変更コマンドのレプリケーションログエントリの形式を変更。 新しいバージョンをイ +- Stacktracesをダンプするシンプルなメモリプロファイラを実装する `system.trace_log` 毎N文字以上のソフト配分を制限 [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([イワン](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([alexey-milovidov](https://github.com/alexey-milovidov))の列 `system.trace_log` から改名されました `timer_type` に `trace_type`. この変更が必要な第三者機関の性能解析およびflamegraph処理ツールです。 +- 内部スレッド番号の代わりにosスレッドidを使用します。 この修正 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) 古い `clickhouse-client` サーバーから送信されるログを受信できない `send_logs_level` これは、構造化ログメッセージの名前と種類が変更されたためです。 一方、異なるサーバーバージョンでは、異なるタイプのログを相互に送信できます。 あなたが使用しないとき `send_logs_level` 設定、あなたは気にしないでください。 [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 削除 `indexHint` 機能 [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 削除 `findClusterIndex`, `findClusterValue` 機能。 この修正 [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). これらの機能を使用していた場合は、メールを送信します `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- これで、列を作成したり、列を追加したりすることはできません `SELECT` 既定の式としてサブクエリ。 [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([alesapin](https://github.com/alesapin)) +- JOIN内のサブクエリのエイリアスが必要です。 [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- 改善された `ALTER MODIFY/ADD` クエリロジック。 今はできません `ADD` タイプのない列, `MODIFY` デフォルトの式では、列の型は変更されません。 `MODIFY` 型は既定の式の値を緩めません。 修正 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) +- ログ設定の変更を適用するには、サーバーを再起動する必要があります。 これは、サーバーが削除されたログファイルにログを記録するバグを回避するための一時的な回避策です。 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- を設定 `experimental_use_processors` デフォルトでは有効です。 この設定をご利用の新しいクエリのパイプライン これは内部リファクタリングであり、目に見える変更は期待していません。 問題が表示される場合は、ゼロをバックアップするように設定します。 [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 新しい機能 {#new-feature} + +- 追加 `Avro` と `AvroConfluent` 入力/出力形式 [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([Andrew Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([Andrew Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 期限切れのキーのマルチスレッドおよび非ブロック更新 `cache` 辞書(古いものを読むための任意の許可を持つ)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- クエリの追加 `ALTER ... MATERIALIZE TTL`. TTLによって期限切れのデータを強制的に削除し、すべての部分でTTLに関するメタ情報を再計算する突然変異を実行します。 [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 必要に応じて、hashjoinからmergejoin(ディスク上)に切り替えます [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `MOVE PARTITION` コマンド `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 設定ファイルからストレージ設定をリロードする。 [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 変更できる `storage_policy` あまり豊かではないものに。 [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- S3ストレージとテーブル機能のglobs/wildcardsのサポートを追加しました。 [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 実装 `bitAnd`, `bitOr`, `bitXor`, `bitNot` のために `FixedString(N)` データ型。 [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 機能追加 `bitCount`. この修正 [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +- 追加 `generateRandom` テーブル機能をランダム行に指定されたschema. 任意のテストテーブルにデータを設定できます。 [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([イリヤ-ヤツィシン](https://github.com/qoega)) +- `JSONEachRowFormat` 支援特別の場合オブジェ囲まれたトップレベルの配列になります。 [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([Kruglov Pavel](https://github.com/Avogar)) +- これで、列を作成することができます `DEFAULT` デフォルトの列に依存する式 `ALIAS` 式。 [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([alesapin](https://github.com/alesapin)) +- 指定できるようにする `--limit` ソースデータサイズよりも `clickhouse-obfuscator`. データは異なるランダムシードで繰り返されます。 [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `groupArraySample` 機能(に類似した `groupArray` とreserviorサンプリングアルゴリズムです。 [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([アモスの鳥](https://github.com/amosbird)) +- これで、更新キューのサイズを監視することができます `cache`/`complex_key_cache` システム指標による辞書。 [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- CSV出力形式の行区切りとしてCRLFを使用できるようにする `output_format_csv_crlf_end_of_line` は1に設定されます [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([Mikhail Korotov](https://github.com/millb)) +- より多くの機能を実装する [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` と `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([ニコ-マンデリー](https://github.com/nmandery)) +- 新しい設定を導入: `max_parser_depth` 最大スタックサイズを制御し、大規模な複雑なクエリを許可する。 この修正 [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) と [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([Maxim Smirnov](https://github.com/qMBQx8GH)) +- 設定を追加する `force_optimize_skip_unused_shards` 未使用のシャードをスキップできない場合にスローする設定 [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- この設定は複数のディスク/量のデータを格納するための送付 `Distributed` エンジン [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- 支援の保管方針 (``)一時的なデータを貯えるため。 [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- 追加 `X-ClickHouse-Exception-Code` データを送信する前に例外がスローされた場合に設定されるHTTPヘッダー。 これは [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([Mikhail Korotov](https://github.com/millb)) +- 機能追加 `ifNotFinite`. それは単なる統語的な砂糖です: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `last_successful_update_time` コラムの `system.dictionaries` テーブル [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 追加 `blockSerializedSize` 機能(圧縮なしのディスク上のサイズ) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- 機能を追加 `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- 追加されたシステム表 `system.zeros` と `system.zeros_mt` だけでなく、物語機能 `zeros()` と `zeros_mt()`. テーブル(テーブル機能を含む単一カラム名 `zero` とタイプ `UInt8`. この列にはゼロがあります。 これは、多くの行を生成する最速の方法としてテスト目的に必要です。 この修正 [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### 実験的特徴 {#experimental-feature} + +- 部品の新しいコンパクトな形式を追加 `MergeTree`-すべての列が一つのファイルに格納されている家族のテーブル。 それは小さく、頻繁な挿入物の性能を高めるのを助ける。 古いフォーマット(列ごとに一つのファイル)がwideと呼ばれます。 データ格納形式は設定によって制御 `min_bytes_for_wide_part` と `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([アントン-ポポフ](https://github.com/CurtizJ)) +- S3ストレージのサポート `Log`, `TinyLog` と `StripeLog` テーブル。 [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([Pavel Kovalenko](https://github.com/Jokser)) + +#### バグ修正 {#bug-fix-2} + +- ログメッセージの不整合な空白を修正しました。 [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル作成時に、名前のないタプルの配列がネストされた構造としてフラット化されたバグを修正。 [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2comment](https://github.com/achulkov2)) +- ときに問題を修正しました “Too many open files” エラーが発生する恐れがあると多数の場合はファイルのマッチングglobパターン `File` テーブルまたは `file` テーブル機能。 今すぐファイルが遅延開かれます。 この修正 [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- DROP TEMPORARY TABLEは現在、一時テーブルのみを削除します。 [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([Vitaly Baranov](https://github.com/vitlibar)) +- 削除した旧式の仕切りした時停止のサーバーは取り外し、添付を表示します。 [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- のためにどのようにデフォルトのディスクを算定し、自由空間から `data` サブディレクトリ。 空き容量が正しく計算されない場合の問題を修正しました。 `data` ディレクト この修正 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([Mikhail Korotov](https://github.com/millb)) +- カンマ(クロス)は、内部の()に参加することができます。 [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- WHERE節に演算子のようなものがある場合は、INNER JOINにクロスを書き換えることができます。 [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- 後に可能な誤った結果を修正 `GROUP BY` 有効に設定 `distributed_aggregation_memory_efficient`. 修正 [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 見つかりキーのカウントとして見るメトリクスのキャッシュを生成する事ができます。 [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- レプリケーションプロト [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +- 上の固定レース条件 `queue_task_handle` の起動時に `ReplicatedMergeTree` テーブル。 [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- その他の通貨 `NOT` で動作しませんでした `SHOW TABLES NOT LIKE` クエリ [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能に範囲チェックを追加しました `h3EdgeLengthM`. このチェッ [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 複数の引数(10以上)の三元論理演算のバッチ計算のバグを修正しました。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) +- Prewhere最適化のエラーを修正しました。 `Inconsistent number of columns got from MergeTreeRangeReader` 例外だ [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 予期しない修正 `Timeout exceeded while reading from socket` 例外は、ランダムに起きにセキュア接続前にタイムアウト実を超えた場queryプロファイラが有効になります。 また、追加 `connect_timeout_with_failover_secure_ms` 設定(デフォルトは100ミリ秒)です。 `connect_timeout_with_failover_ms` ただし、セキュアな接続に使用されます(SSLハンドシェイクが通常のTCP接続よりも遅いため) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- バグを修正しました。 `parts_to_do=0` と `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([alesapin](https://github.com/alesapin)) +- 新しい任意の結合ロジックを使用する `partial_merge_join` 設定。 それは作ることが可能です `ANY|ALL|SEMI LEFT` と `ALL INNER` との結合 `partial_merge_join=1` 今だ [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- シャードは、例外をスローするのではなく、イニシエータから取得した設定をシャードのconstaintsにクランプします。 この修正では、別の制約を持つシャードにクエリを送信できます。 [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([Vitaly Baranov](https://github.com/vitlibar)) +- 固定メモリ管理の問題 `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正 `toDecimal*OrNull()` 文字列で呼び出されたときの関数群 `e`. 修正 [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- う `FORMAT Null` クライアントにデータを送信しません。 [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- そのタイムスタンプを修正 `LiveViewBlockInputStream` 更新されません。 `LIVE VIEW` 実験的特徴です [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- 固定 `ALTER MODIFY TTL` 古いTTL式を削除することを許さなかった誤った動作。 [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- MergeTreeIndexSet内のUBSanレポートを修正しました。 この修正 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の動作を修正しました `match` と `extract` haystackにゼロバイトがある場合の関数。 Haystackが一定の場合、その動作は間違っていました。 この修正 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Apache Avro3rd-partyライブラリのデストラクターから投げを避けます。 [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([Andrew Onyshchuk](https://github.com/oandrew)) +- ポーリングされたバッチをコミットしない `Kafka` 部分的には、データの穴につながる可能性があります。 [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([フィリモノフ](https://github.com/filimonov)) +- 修正 `joinGet` null可能な戻り値の型を指定します。 https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([アモスの鳥](https://github.com/amosbird)) +- 圧縮時にデータの非互換性を修正する `T64` コーデック。 [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2))データ型idの修正 `T64` 影響を受けるバージョンで間違った(de)圧縮につながる圧縮コーデック。 [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- 設定を追加 `enable_early_constant_folding` 無効にするのである。 [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正プッシュダウ述語オプティマイザとビューの試験 [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([冬張](https://github.com/zhang2014)) +- Segfaultを修正する `Merge` から読み取るときに発生する可能性があります `File` ストレージ [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- ストレージポリシーのチェックを追加 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. そうでない場合はこのデータの一部になり、再起動後の防止ClickHouse。 [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- テーブルにttlが設定されている場合に変更を修正します。 [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([アントン-ポポフ](https://github.com/CurtizJ)) +- ときに発生する可能性が競合状態を修正 `SYSTEM RELOAD ALL DICTIONARIES` いくつかの辞書が変更/追加/削除されている間に実行されます。 [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([Vitaly Baranov](https://github.com/vitlibar)) +- 以前のバージョンでは `Memory` データベースエンジ `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- デフォル [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 配列型のbloom\_filterインデックスのnot(has())を修正しました。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbabcomment](https://github.com/achimbab)) +- テーブルの最初の列を許可する `Log` エンジンは別名である [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([イワン](https://github.com/abyss7)) +- から読み込み中の範囲の順序を修正 `MergeTree` 一つのスレッドのテーブル。 それは例外につながる可能性があります `MergeTreeRangeReader` または間違ったクエリ結果。 [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 作る `reinterpretAsFixedString` 戻るには `FixedString` 代わりに `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([Andrew Onyshchuk](https://github.com/oandrew)) +- を避ける非常に珍しい場合には、ユーザーで間違ったエラーメッセージ (`Success` 詳細なエラーの説明の代わりに)。 [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 使用するとき衝突しないで下さい `Template` 空の行テンプレートを使用した形式。 [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- システムテーブルのメタデータファイ [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix))修正 [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- キャッシュ辞書でexception\_ptrのデータレースを修正 [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- クエリの例外をスローしない `ATTACH TABLE IF NOT EXISTS`. 以前は、テーブルが既に存在する場合にスローされました。 `IF NOT EXISTS` 句。 [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 例外メッセージで行方不明の閉じる括弧を修正しました。 [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- メッセージの回避 `Possible deadlock avoided` インタラクティブモードでのclickhouse-クライアントの起動時に。 [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Base64でエンコードされた値の末尾にパディングが不正な形式になる場合がある問題を修正しました。 更新base64ライブラリ。 この修正 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491)、閉じます [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- データを失うのを防ぐ `Kafka` まれに、接尾辞を読んだ後でコミットする前に例外が発生した場合。 修正 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([フィリモノフ](https://github.com/filimonov)) +- 固定例外で `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([Nikita Vasilev](https://github.com/nikvas0)) +- ユーザーが `ALTER MODIFY SETTING` 古いformatedのため `MergeTree` テーブルエンジン家族。 [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +- JSON関連の関数でInt64に収まらないUInt64の数値のサポート。 SIMDJSONをmasterに更新します。 この修正 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 非厳密に単調な関数索引が使用されている場合の逆述語の実行を修正しました。 [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([Alexander Kazakov](https://github.com/Akazz)) +- 折ることを試みてはいけない `IN` 定数の `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([アモスの鳥](https://github.com/amosbird)) +- バグを修正 `ALTER DELETE` インデックスの破損につながる変異。 この修正 [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) と [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). さらに、非常にまれな競合状態を修正 `ReplicatedMergeTree` `ALTER` クエリ。 [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([alesapin](https://github.com/alesapin)) +- ときは設定 `compile_expressions` が有効になっている場合は、 `unexpected column` で `LLVMExecutableFunction` 私達が使用する時 `Nullable` タイプ [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 以下のための複数の修正 `Kafka` エンジン:1)消費者グループのリバランス中に表示された重複を修正します。 2)修正レア ‘holes’ 登場時のデータをポーリングから割と世論調査および為の一部(現在の私たちは常にプロセス/コミット全体のポーリングブロックメッセージ). 3)固定フラッシュによるブロックサイズ(前のみにフラッシングによるタイムアウトした作業と同様に扱う。 4)より契約手続(入力フィードバック. 5)テストをより速く動作させる(デフォルトの間隔とタイムアウト)。 データは以前はブロックサイズでフラッシュされていなかったため(ドキュメントによると)、PRはデフォルト設定でパフォーマンスが低下する可能性 その変更後にパフォーマンスの問題が発生した場合-増加してください `kafka_max_block_size` より大きな値へのテーブル(例えば `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). 修正 [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([フィリモノフ](https://github.com/filimonov)) +- 修正 `Parameter out of bound` PREWHERE最適化の後のいくつかのクエリの例外。 [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- 関数の引数の混合constの場合を修正しました `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 実行時期 `CREATE` クエリー、倍定表現のストレージエンジンの引数です。 空のデータベース名を現在のデータベ 修正 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- 次のような単純な循環エイリアスを持つ列を作成または追加することはできません `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +- 元の部分が破損する可能性があるダブル移動のバグを修正しました。 これは、 `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 許可 `interval` バッククォートなしで正しく解析する識別子。 場合でも、クエリを実行できない問題を修正しました。 `interval` 識別子は、バッククォートまたは二重引用符で囲まれています。 この修正 [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ファズテストとの不正な動作 `bitTestAll`/`bitTestAny` 機能。 [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 可能なクラッシュ/間違った行数を修正 `LIMIT n WITH TIES` n行目に等しい行がたくさんあるとき。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- Enabledで書かれたパーツによる修正 `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([alesapin](https://github.com/alesapin)) +- の破壊でデータレースを修正 `Poco::HTTPServer`. どこの場合のサーバを起動直ちに停止しております。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 実行中に誤解を招くエラーメッセージが表示されたバグを修正 `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2comment](https://github.com/achulkov2)) +- 固定 `Parameters are out of bound` 私たちが定数を持っているいくつかのまれなケースでは例外 `SELECT` 私たちが持っているときの句 `ORDER BY` と `LIMIT` 句。 [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 既に完了した突然変異がステータスを持つことができるとき `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([alesapin](https://github.com/alesapin)) +- 実行を防ぐ `ALTER ADD INDEX` 古い構文のMergeTreeテーブルでは、動作しないためです。 [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) +- サーバーの起動中にテーブルにアクセスしない。 `LIVE VIEW` 依存するので、サーバーは起動できます。 また、削除 `LIVE VIEW` デタッチ時の依存関係 `LIVE VIEW`. `LIVE VIEW` 実験的特徴です [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- で可能なsegfaultを修正 `MergeTreeRangeReader`,実行中 `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 列ttlsによるチェックサムの不一致を修正しました。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([アントン-ポポフ](https://github.com/CurtizJ)) +- ボリュームが一つしかない場合にttlルールによってパーツがバックグラウンドで移動されないバグを修正しました。 [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 問題を修正しました `Method createColumn() is not implemented for data type Set`. この修正 [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 今度は、突然変異をより頻繁に確定しようとします。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +- 修正 `intDiv` マイナス一つの定数による [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- 可能な競合状態を修正 `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- のを修正した。先サーバを終了しようとした場合に使用/drop `Kafka` テーブル作成されたパラメータ。 [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([フィリモノフ](https://github.com/filimonov)) +- OSが間違った結果を返す場合の回避策を追加 `timer_create` 機能。 [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の使用で修正されたエラー `min_marks_for_seek` パラメータ。 固定のエラーメッセージがない場合shardingキーテーブルの配布に努めのスキップ未使用の破片. [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### 改善 {#improvement} + +- 実装 `ALTER MODIFY/DROP` 以下のための突然変異の上にクエリ `ReplicatedMergeTree*` エンジンファミリー さて `ALTERS` メタデータ更新ステージでのみブロックし、その後はブロックしません。 [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([alesapin](https://github.com/alesapin)) +- 内部結合にcrossを書き換える機能を追加する `WHERE` シリアル化されていない名前のセクション。 [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- 作る `SHOW TABLES` と `SHOW DATABASES` クエリは、 `WHERE` 式と `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- 設定を追加しました `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- 最近の変更後、mysqlクライアントはバイナリ文字列をhexで印刷し始め、読みにくくなりました ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). ClickHouseの回避策は、文字列の列をUTF-8としてマークすることです。 [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([ユーリーバラノフ](https://github.com/yurriy)) +- 文字列とfixedstringキーのサポートを追加する `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- SummingMergeTreeマップでの文字列キーのサポート [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- スレッドが例外をスローした場合でも、スレッドプールへのスレッドの信号終端 [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([丁象飛](https://github.com/dingxiangfei2009)) +- 設定を許可する `query_id` で `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 奇妙な表現を許可しない `ALTER TABLE ... PARTITION partition` クエリ。 このアドレス [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テーブル `system.table_engines` 機能のサポートに関する情報を提供します `supports_ttl` または `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- 有効 `system.metric_log` デフォルトでは。 これには、ProfileEventsの値を持つ行が含まれます。 “collect\_interval\_milliseconds” 間隔(デフォルトでは秒)。 テーブルは非常に小さく(通常はメガバイトの順で)、デフォルトでこのデータを収集することは妥当です。 [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([イワン](https://github.com/abyss7)) +- 今すぐ一時的 `LIVE VIEW` によって作成されます `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` 代わりに `CREATE TEMPORARY LIVE VIEW ...` 前の構文は、次の構文と一致していなかったためです `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- Text\_logを追加します。行くエントリを制限するレベル構成パラメータ `system.text_log` テーブル [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- を入れてダウンロード部にディスク/量によるttlル [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 外部mysqlディクショナリの場合、mysql接続プールをmutualizeすることができます “share” それらの間で辞書。 このオプションは、MySQLサーバーへの接続数を大幅に削減します。 [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 最も近いクエリの実行時間内の変位値を表示する `clickhouse-benchmark` 内挿された値の代わりに出力します。 いくつかのクエリの実行時間に対応する値を表示する方がよいでしょう。 [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Kafkaにデータを挿入するときにメッセージのキーとタイムスタンプを追加する可能性。 修正 [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([フィリモノフ](https://github.com/filimonov)) +- 場合はサーバはターミナルから、ハイライトのスレッド号、クエリをidでログインを優先する色をします。 ここは改善の可読性の相関のログメッセージのステータスです。 [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良い例外のメッセージ読み込み中にテーブル `Ordinary` データベース [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 実装 `arraySlice` 集約関数の状態を持つ配列の場合。 この修正 [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- IN演算子の右側で定数関数と定数配列を使用できます。 [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([アントン-ポポフ](https://github.com/CurtizJ)) +- システムのデータを取得している間にzookeeperの例外が発生した場合。レプリカは、別の列に表示します。 これは [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Destroy上のMergeTreeデータ部分を原子的に削除します。 [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 支援を行レベルのセキュリティ配布します。 [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([イワン](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([Mikhail Korotov](https://github.com/millb)) +- 大きな結合の結果を構築しながらメモリ不足を防ぎます。 [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- インタラクティブモードでの提案にクラスタの名前を追加 `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([イワン](https://github.com/abyss7)) +- 追加された列 `exception_code` で `system.query_log` テーブル。 [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([Mikhail Korotov](https://github.com/millb)) +- ポート上の有効mysql互換性サーバ `9004` デフォルトのサーバー設定ファイル。 設定の例の固定パスワード生成コマンド。 [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([ユーリーバラノフ](https://github.com/yurriy)) +- 防止に停止した場合のファイルシステムが読み取り専用になります。 この修正 [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- HTTP POSTクエリで長さが必要な場合は、より良い例外メッセージ。 [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `_path` と `_file` 仮想列へ `HDFS` と `File` エンジンと `hdfs` と `file` テーブル関数 [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- エラーの修正 `Cannot find column` 挿入している間 `MATERIALIZED VIEW` 新しい列がビューの内部テーブルに追加された場合。 [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- ネイティブクライアントサーバープロトコルを介して、最終更新後の送信の進行状況を修正(ログなど)。 この問題にのみ一部の第三者ツールを使用するネイティブプロトコルです。 [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- 追加システムの指標を追跡する多数のクライアント接続の待機を開始mysqlを使用してプロトコル ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([Eugene Klimov](https://github.com/Slach)) +- これからは、httpレスポンスには `X-ClickHouse-Timezone` 同じタイムゾーン値に設定されたヘッダ `SELECT timezone()` 報告する [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### 性能向上 {#performance-improvement} + +- INとの指標の分析のパフォーマンスを向上させる [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 論理関数+コードのクリーンアップで、よりシンプルで効率的なコード。 フォローアップへ [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([Alexander Kazakov](https://github.com/Akazz)) +- 全体的なパフォーマンスの向上(5%の範囲で。.200%の影響のクエリをもっと厳しいエイリアシングとc++20特徴です。 [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([アモスの鳥](https://github.com/amosbird)) +- 比較関数の内部ループのためのより厳密なエイリアシング。 [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 算術関数の内部ループのより厳密なエイリアシング。 [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ColumnVector::replicate()の実装は、ColumnConst::convertToFullColumn()が実装されています。 また、定数を具体化する際のテストにも役立ちます。 [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([Alexander Kazakov](https://github.com/Akazz)) +- 別のマイナーな性能向上へ `ColumnVector::replicate()` (これは `materialize` 機能および高位機能)へのそれ以上の改善 [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([Alexander Kazakov](https://github.com/Akazz)) +- 改善された性能の `stochasticLinearRegression` 集計関数。 このパッチはIntelによって提供されます。 [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 性能をの改善して下さい `reinterpretAsFixedString` 機能。 [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ブロックをクライアントに送信しない `Null` フォーマットプロセッサのパイプライン [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement} + +- 例外処理は現在、linux用のwindowsサブシステム上で正しく動作します。 見るhttps://github.com/clickhouse-extras/libunwind/pull/3 この修正 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- 置換 `readline` と `replxx` インタラクティブライン編集 `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([イワン](https://github.com/abyss7)) +- FunctionsComparisonでより良いビルド時間と少ないテンプレートインスタンス化。 [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- との統合を追加しました `clang-tidy` CIで。 また見なさい [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 今、私たちはリンクを使用して、ciでclickhouse `lld` のために `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([alesapin](https://github.com/alesapin)) +- するとランダムスレッドのスケジューリングに挿入しな障害の場合 `THREAD_FUZZER_*` 環境変数が設定されます。 これはテストを助ける。 [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ステートレステストでsecure socketsを有効にす [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- SPLIT\_SHARED\_LIBRARIES=OFFをより堅牢にする [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- 作る “performance\_introspection\_and\_logging” 試験信頼性の高いランダムにサーバーの付かない。 これはCI環境で発生する可能性があります。 また見なさい [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- スタイルチェックでxmlを検証する。 [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- テストの競合状態を修正 `00738_lock_for_inner_table`. このテストは睡眠に頼った。 [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 除去性能試験の種類 `once`. このに必要なすべての性能試験の統計比較モード(信頼性の高い). [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 算術関数のパフォーマンステストを追加。 [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のための追加された性能試験 `sumMap` と `sumMapWithOverflow` 集計関数。 フォローアップのための [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 確保のスタイルerrorcodesスタイルにチェック。 [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加スクリプトのための試験。 [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([alesapin](https://github.com/alesapin)) +- GCC警告を追加する `-Wsuggest-override` すべての場所を見つけて修正するには `override` キーワー [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Mac OS Xの下で弱い記号を無視するのは、定義する必要があるためです [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([削除されたユーザ](https://github.com/ghost)) +- パフォーマンステストでの一部のクエリの実行時間の正規化。 この準備の性能試験との比較モードになります。 [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クエリテストでpytestをサポートするテストを修正 [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([イワン](https://github.com/abyss7)) +- をsslの構築とmsan、サーバーな起動時に走行時の状態試験 [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- テスト結果でのデータベース置換の修正 [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([イリヤ-ヤツィシン](https://github.com/qoega)) +- の構築に対する修正その他ー [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- 追加ディスク部無国籍-と-カバレッジ-テストdocker画像 [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([Pavel Kovalenko](https://github.com/Jokser)) +- GRPCでビルドするときに、ソースツリー内のファイルを取り除く [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([アモスの鳥](https://github.com/amosbird)) +- 少し早く構築時間を取り除いsessioncleanerからのコンテキスト sessioncleanerのコードをよりシンプルにする。 [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Clickhouseテストスクリプトでハングクエリのチェックを更新 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) +- リポジトリか [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- から数学perftestsの変更タイプ `once` に `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 追加docker画像を構築ィコードのブラウザのhtmlレポート当社のコードベース. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin))見る [Woboqコードブラウザ](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- MSanの下でいくつかのテストの失敗を抑制. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- スピードアップ “exception while insert” テスト。 このテス [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 更新 `libcxx` と `libcxxabi` マスターに。 準備のために [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- フラッキーテストの修正 `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 清掃は複製リンカのフラグがあります。 リンカーが予期しないシンボルを検索しないことを確認します。 [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([アモスの鳥](https://github.com/amosbird)) +- 追加 `clickhouse-odbc` テスト画像にドライバ。 これは、独自のODBCドライバを経由してClickHouseとClickHouseの相互作用をテストすることができます。 [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([フィリモノフ](https://github.com/filimonov)) +- 単体テストでいくつかのバグを修正。 [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([alesapin](https://github.com/alesapin)) +- 有効 `-Wmissing-include-dirs` CMakeスクリプトエラーの結果として、すべての既存のインクルードを排除するGCC警告 [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- クエリプ これは [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Opensslを上流のマスターに更新します。 メッセージでTLS接続が失敗する問題を修正しました `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` と `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. この問題はバージョン20.1に存在していました。 [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- サーバーのdockerfileの更新 [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- ビルド-gcc-from-sourcesスクリプトのマイナーな修正 [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- 置換 `numbers` に `zeros` どこperftestsで `number` 列は使用されません。 これはよりきれいなテスト結果につながります。 [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定しスタックオーバーフローされる際に問題が起きた場合、利用initializer\_list列コンストラクタ. [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([削除されたユーザ](https://github.com/ghost)) +- Libdkafkaをv1.3.0にアップグレードします。 バンドル有効 `rdkafka` と `gsasl` Mac OS X上のライブラリ [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([Andrew Onyshchuk](https://github.com/oandrew)) +- GCC9.2.0でのビルド修正 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## ClickHouseリリースv20.1 {#clickhouse-release-v20-1} + +### ClickHouseリリリースv20.1.8.41,2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### バグ修正 {#bug-fix-3} + +- 可能永久修正 `Cannot schedule a task` エラー(ハンドルされていない例外が原因で `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). この修正 [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- 過度のメモリ消費を修正 `ALTER` クエリ(突然変異)。 この修正 [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) と [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([alesapin](https://github.com/alesapin)) +- 外部辞書のddlにバッククォートのバグを修正しました。 この修正 [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) + +### ClickHouseリリリースv20.1.7.38,2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### バグ修正 {#bug-fix-4} + +- 固定誤った内部関数名のための `sumKahan` と `sumWithOverflow`. 先頭に立って例外がこの機能をリモートます。 [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). この問題はすべてClickHouseのリリースにありました。 +- 許可 `ALTER ON CLUSTER` の `Distributed` 内部レプリケーショ この修正 [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([品生2](https://github.com/shinoi2)). この問題はすべてClickHouseのリリースにありました。 +- 可能な例外を修正 `Size of filter doesn't match size of column` と `Invalid number of rows in Chunk` で `MergeTreeRangeReader`. 実行中に表示される可能性があります `PREWHERE` いくつかのケースでは。 修正 [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 次のような単純な算術式を書くと、タイムゾーンが保持されないという問題を修正しました `time + 1` (次のような表現とは対照的に `time + INTERVAL 1 SECOND`). この修正 [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). この問題はすべてClickHouseのリリースにありました。 +- 次のような単純な循環エイリアスを持つ列を作成または追加することはできません `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +- Base64でエンコードされた値の末尾にパディングが不正な形式になる場合がある問題を修正しました。 更新base64ライブラリ。 この修正 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491)、閉じます [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の破壊でデータレースを修正 `Poco::HTTPServer`. どこの場合のサーバを起動直ちに停止しております。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 可能なクラッシュ/間違った行数を修正 `LIMIT n WITH TIES` n行目に等しい行がたくさんあるとき。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 列ttlsによるチェックサムの不一致を修正しました。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([アントン-ポポフ](https://github.com/CurtizJ)) +- ユーザーが `ALTER MODIFY SETTING` 古いformatedのため `MergeTree` テーブルエンジン家族。 [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +- 今度は、突然変異をより頻繁に確定しようとします。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +- レプリケーションプロト [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +- 配列型のbloom\_filterインデックスのnot(has())を修正しました。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbabcomment](https://github.com/achimbab)) +- の動作を修正しました `match` と `extract` haystackにゼロバイトがある場合の関数。 Haystackが一定の場合、その動作は間違っていました。 この修正 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-1} + +- 例外処理は現在、linux用のwindowsサブシステム上で正しく動作します。 見るhttps://github.com/clickhouse-extras/libunwind/pull/3 この修正 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouseリリリースv20.1.6.30,2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### バグ修正 {#bug-fix-5} + +- 圧縮時にデータの非互換性を修正する `T64` コーデック。 + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- 一つのスレッドでmergetreeテーブルから読み込み中の範囲の順序を修正しました。 修正 [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ)](https://github.com/CurtizJ) +- で可能なsegfaultを修正 `MergeTreeRangeReader`,実行中 `PREWHERE`. 修正 [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ)](https://github.com/CurtizJ) +- 修正 `reinterpretAsFixedString` 戻るには `FixedString` 代わりに `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- 修正 `joinGet` null可能な戻り値の型を指定します。 修正 [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(アモスバード)](https://github.com/amosbird) +- ファズテストとbittestall/bittestany関数の不正な動作を修正しました。 + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(アレクセイ-ミロビドフ)](https://github.com/alexey-milovidov) +- Haystackにゼロバイトがある場合、match関数とextract関数の動作を修正しました。 Haystackが一定の場合、その動作は間違っていました。 修正 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(アレクセイ-ミロビドフ)](https://github.com/alexey-milovidov) +- 非厳密に単調な関数索引が使用されている場合の逆述語の実行を修正しました。 修正 [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- 書き換えを許可する `CROSS` に `INNER JOIN` もしあれば `[NOT] LIKE` 演算子in `WHERE` セクション。 修正 [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4tus2)](https://github.com/4ertus2) +- ログエンジンを持つテーブルの最初の列をエイリアスにする。 + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- カンマの結合を許可する `IN()` 中に 修正 [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4tus2)](https://github.com/4ertus2) +- 改善する `ALTER MODIFY/ADD` クエリロジック。 今はできません `ADD` タイプのない列, `MODIFY` デフォルトの式では、列の型は変更されません。 `MODIFY` 型は既定の式の値を緩めません。 修正 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- 既に行われた変異は、ステータスis\_done=0を持つことができたときに、突然変異の終了を修正。 + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- サポート “Processors” システムのため数字とシステム.numbers\_mt. これはまたバグを修正します `max_execution_time` 尊重されていません。 + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- の間違ったカウントを修正 `DictCacheKeysRequestedFound` メトリック。 + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- ストレージポリシーのチェックを追加 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` 合がデータの一部になり、再起動後の防止ClickHouse。 + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(エキシーン)](https://github.com/excitoon) +- 固定ubsanレポートで `MergeTreeIndexSet`. この修正 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(アレクセイ-ミロビドフ)](https://github.com/alexey-milovidov) +- BlockIOで可能なdataraceを修正. + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- のサポート `UInt64` JSON関連の関数のInt64に収まらない数値。 更新 `SIMDJSON` マスターに。 この修正 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(アレクセイ-ミロビドフ)](https://github.com/alexey-milovidov) +- 具合を修正しましたが、金額のフリースペースが正しく計算されませんが、データディレクトリに取り付けには別の装置です。 デフォルトのディスクの計算には無料のスペースからデータのサブディレクトリの. この修正 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(ミルブ)](https://github.com/millb) +- メッセージでtls接続が失敗する場合の問題を修正しました `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` Opensslを上流のマスターに更新します。 + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(アレクセイ-ミロビドフ)](https://github.com/alexey-milovidov) +- 実行時期 `CREATE` クエリー、倍定表現のストレージエンジンの引数です。 空のデータベース名を現在のデータベ 修正 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). また、ClickHouseDictionarySourceのローカルアドレスのチェックを修正しました。 + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Segfaultを修正する `StorageMerge` これは、StorageFileから読み込むときに発生します。 + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- データを失うのを防ぐ `Kafka` まれに、接尾辞を読んだ後でコミットする前に例外が発生した場合。 修正 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 関連: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(フィリモノフ)](https://github.com/filimonov) +- のを修正した。先サーバを終了しようとした場合に使用/drop `Kafka` テーブル作成されたパラメータ。 修正 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 組み込み [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(フィリモノフ)](https://github.com/filimonov) + +#### 新しい機能 {#new-feature-1} + +- 追加 `deduplicate_blocks_in_dependent_materialized_views` マテリアライズドビューを持つテーブルへの冪等挿入の動作を制御するオプション。 この新機能は、Altinityからの特別な要求によってbugfixリリースに追加されました。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouseリリースv20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### 下位互換性のない変更 {#backward-incompatible-change-1} + +- 設定を行う `merge_tree_uniform_read_distribution` 廃止されました。 サーバーはこの設定を認識しますが、効果はありません。 [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 関数の戻り値の型を変更しました `greatCircleDistance` に `Float32` なぜなら今計算の結果は `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- これで、クエリパラメータが “escaped” フォーマット。 たとえば、文字列を渡すには `ab` あなたは `a\tb` または `a\b` とそれぞれ, `a%5Ctb` または `a%5C%09b` URLで。 これは、NULLを渡す可能性を追加するために必要です `\N`. この修正 [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 有効 `use_minimalistic_part_header_in_zookeeper` のための設定 `ReplicatedMergeTree` デフォルトでは。 このことを大幅に削減量のデータが保存されて飼育係. この設定はバージョン19.1以降でサポートされており、半年以上問題なく複数のサービスで本番環境で使用されています。 19.1より古いバージョンにダウングレードできる場合は、この設定を無効にします。 [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- データの飛び設計生産準備、デフォルトで有効です. 設定 `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` と `allow_experimental_multiple_joins_emulation` 今は時代遅れであり、何もしません。 [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 新規追加 `ANY JOIN` のための論理 `StorageJoin` と一貫した `JOIN` オペレーション 動作を変更せずにアップグレードするには、 `SETTINGS any_join_distinct_right_table_keys = 1` エンジンにテーブルを追加のメタデータを再現これらのテーブル後のアップグレードを開始します。 [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- ログ設定の変更を適用するには、サーバーを再起動する必要があります。 これは、サーバーが削除されたログファイルにログを記録するバグを回避するための一時的な回避策です。 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 新しい機能 {#new-feature-2} + +- パーツパスに関する情報を追加 `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 実行する機能を追加 `SYSTEM RELOAD DICTIONARY` でクエリ `ON CLUSTER` モード。 [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([ギヨームタッセリー](https://github.com/YiuRULE)) +- 実行する機能を追加 `CREATE DICTIONARY` でのクエリ `ON CLUSTER` モード。 [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([alesapin](https://github.com/alesapin)) +- 今、ユーザーのプロフィール `users.xml` 継承した複数のデータ。 [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 追加 `system.stack_trace` テーブルで眺めるスタックトレースのすべてのサーバスレッド)。 これは、開発者がサーバーの状態をイントロスペクトするのに便利です。 この修正 [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `DateTime64` 設定可能な秒未満の精度を持つデータ型。 [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([Vasily Nemkov](https://github.com/Enmk)) +- テーブル関数の追加 `clusterAllReplicas` ることのできるクエリのすべてのノードのクラスター [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([キラン-スンカリ](https://github.com/kiransunkari)) +- 集計関数の追加 `categoricalInformationValue` これは、離散フィーチャの情報値を計算します。 [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- データファイルの解析を高速化 `CSV`, `TSV` と `JSONEachRow` それを並行して行うことによって書式。 [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 機能を追加 `bankerRound` これは、銀行の丸めを実行します。 [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- 地域名の埋め込み辞書でより多くの言語をサポート: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- の一貫性の改善 `ANY JOIN` ロジック。 さて `t1 ANY LEFT JOIN t2` 等しい `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 設定を追加 `any_join_distinct_right_table_keys` これは古い動作を可能にします `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 新規追加 `SEMI` と `ANTI JOIN`. 古い `ANY INNER JOIN` 行動として `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `Distributed` の形式 `File` エンジンと `file` から読むことを可能にするテーブル機能 `.bin` によって生成されたファイル `Distributed` テーブル。 [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- オプションのreset column引数を追加する `runningAccumulate` これにより、各新しいキー値の集計結果をリセットできます。 [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([Sergey Kononenko](https://github.com/kononencheg)) +- PrometheusエンドポイントとしてClickHouseを使用する機能を追加します。 [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- セクションを追加 `` で `config.xml` の制約が許されたアイテムのリモートテーブルエンジンとテーブル機能 `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([Mikhail Korotov](https://github.com/millb)) +- 機能追加 `greatCircleAngle` これは度で球の距離を計算します。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 地球の半径をh3ライブラリと一致するように変更しました。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `JSONCompactEachRow` と `JSONCompactEachRowWithNamesAndTypes` 入力と出力の形式。 [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([Mikhail Korotov](https://github.com/millb)) +- ファイル関連のテーブルエンジンとテーブル関数の機能を追加 (`File`, `S3`, `URL`, `HDFS`)読み書きすることができます `gzip` ファイルに基づく追加のエンジンのパラメータまたはファイル拡張子. [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([Andrey Bodrov](https://github.com/apbodrov)) +- を追加しました `randomASCII(length)` のランダムなセットを持つ文字列を生成する関数 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 印刷可能な文字。 [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([バヨネット](https://github.com/BayoNet)) +- 機能追加 `JSONExtractArrayRaw` これは、解析されていないjson配列要素の配列を返します `JSON` 文字列。 [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- 追加 `arrayZip` 等しい長さの複数の配列をタプルの一つの配列に結合することを可能にする関数。 [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([冬張](https://github.com/zhang2014)) +- 設定に従ってディスク間でデータを移動する機能を追加 `TTL`-のための式 `*MergeTree` テーブルエンジン家族。 [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 新しい集計関数を追加 `avgWeighted` 加重平均を計算することができます。 [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([Andrey Bodrov](https://github.com/apbodrov)) +- デフォルトでは、並列解析が有効になりました `TSV`, `TSKV`, `CSV` と `JSONEachRow` フォーマット。 [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- からいくつかの地理機能を追加 `H3` ライブラリ: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` と `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([コンスタンチン-マランチェフ](https://github.com/hombit)) +- Brotliのサポートを追加しました (`br` ファイル関連のストレージとテーブルの機能で)圧縮。 この修正 [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加 `groupBit*` のための機能 `SimpleAggregationFunction` タイプ。 [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([ギヨームタッセリー](https://github.com/YiuRULE)) + +#### バグ修正 {#bug-fix-6} + +- とテーブルの名前の変更を修正 `Distributed` エンジン。 修正の問題 [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- 今辞書サポート `EXPRESSION` 非ClickHouse SQLダイアレクト内の任意の文字列の属性の場合。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +- 壊れた修正 `INSERT SELECT FROM mysql(...)` クエリ。 この修正 [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) と [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- エラーの修正 “Mismatch column sizes” デフォルトを挿入する `Tuple` から `JSONEachRow`. この修正 [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- これで、usingの場合に例外がスローされます `WITH TIES` 一緒に `LIMIT BY`. また、使用する機能を追加 `TOP` と `LIMIT BY`. この修正 [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 新鮮なglibcバージョンからの意図しない依存関係を修正 `clickhouse-odbc-bridge` バイナリ [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([アモスの鳥](https://github.com/amosbird)) +- のチェック機能のバグを修正 `*MergeTree` エンジンファミリー 最後の顆粒と最後のマーク(最終ではない)に等しい量の行がある場合、今度は失敗しません。 [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([alesapin](https://github.com/alesapin)) +- に挿入を修正 `Enum*` 後の列 `ALTER` 基になる数値型がテーブル指定された型と等しい場合のクエリです。 この修正 [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 許可される非定数負 “size” 関数の引数 `substring`. それは誤って許可されませんでした。 この修正 [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 引数の数が間違って渡されたときにバグを解析する修正 `(O|J)DBC` テーブルエンジン。 [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([alesapin](https://github.com/alesapin)) +- Syslogにログを送信するときに、実行中のclickhouseプロセスのコマンド名を使用します。 以前のバージョ [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- 許可されたホストの修正チェック `localhost`. このPRでは、 [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +- でレアクラッシュを修正 `argMin` と `argMax` resultが使用されているときの長い文字列引数の関数 `runningAccumulate` 機能。 この修正 [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([恐竜](https://github.com/769344359)) +- とテーブルのメモリオーバーコミットを修正 `Buffer` エンジン。 [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- 取ることができる機能の潜在的なバグを修正 `NULL` 引数の一つとして、非NULLを返します。 [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良いメトリクス計算のスレッドプールを対象としたバックグラウンドプロセス `MergeTree` テーブルエンジン。 [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正機能 `IN` 中 `WHERE` 決が行レベルテーブルフィルターがあります。 修正 [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([イワン](https://github.com/abyss7)) +- これで、設定値の整数値が完全に解析されない場合、例外がスローされます。 [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([Mikhail Korotov](https://github.com/millb)) +- 複数のローカルシャードを持つ分散テーブルへのクエリで集計関数を使用すると例外が修正されました。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- Bloom filterは長さゼロの配列を扱うことができ、冗長な計算を実行しません。 [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbabcomment](https://github.com/achimbab)) +- クライアン `host_regexp` で指定される `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([Vitaly Baranov](https://github.com/vitlibar)) +- 複数の偽陽性につながるあいまいな列チェックを緩和する `JOIN ON` セクション。 [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定可能なサーバのクラッシュ (`std::terminate`)サーバーがデータを送信または書き込みできない場合 `JSON` または `XML` の値を持つ形式 `String` データ型(必要なデータ型 `UTF-8` 検証)またはBrotliアルゴリズムまたは他のまれなケースで結果データを圧縮するとき。 この修正 [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 競合状態の修正 `StorageDistributedDirectoryMonitor` CIによって発見。 この修正 [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 今、背景には、マージ `*MergeTree` テーブルエンジンの家族の保存-保存政策に大量注文しております。 [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- Nowテーブルエンジン `Kafka` で適切に動作します `Native` フォーマット。 この修正 [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([フィリモノフ](https://github.com/filimonov)) +- ヘッダーを持つ固定形式(のような `CSVWithNames` テーブルエンジンのEOFについて例外を投げていた `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([フィリモノフ](https://github.com/filimonov)) +- の右側の部分にサブクエリからセットを作るとバグを修正しました `IN` セクション。 この修正 [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) と [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- ストレージか `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- でファイルの固定読み取り `Parquet` 型の列を含む形式 `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([マクスラン](https://github.com/maxulan)) +- エラーの修正 `Not found column` 分散クエリの場合 `PREWHERE` サンプリングキーに依存する条件 `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- エラーの修正 `Not found column` クエリを使用した場合 `PREWHERE` テーブルのエイリアスに依存し、主キー条件のために結果セットは空でした。 [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 関数の戻り値の型を修正 `rand` と `randConstant` の場合 `Nullable` 引数。 Now関数は常に戻ります `UInt32` そして決して `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 無効述語プッシュダウンのための `WITH FILL` 式。 この修正 [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([冬張](https://github.com/zhang2014)) +- 固定間違った `count()` 結果のための `SummingMergeTree` とき `FINAL` セクションを使用します。 [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 固定可能な誤った結果常に機能することができます。 そのためのクエリ機能 `version()`, `uptime()`、等。 サーバーごとに異なる定数値を返します。 この修正 [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 間違った結果につながるプッシュダウン述語の最適化の複雑なバグを修正します。 これにより、プッシュダウン述語の最適化に多くの問題が修正されます。 [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([冬張](https://github.com/zhang2014)) +- でクラッシュを修正 `CREATE TABLE .. AS dictionary` クエリ。 [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- いくつかの改善clickhouse文法で `.g4` ファイル。 [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([タイヤン-リ](https://github.com/taiyang-li)) +- でクラッシュにつながるバグを修正 `JOIN`エンジン付きテーブル付きs `Join`. この修正 [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- 冗長辞書のリロードを修正 `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- 読み込み元のストリームの最大数を制限する `StorageFile` と `StorageHDFS`. 修正https://github.com/ClickHouse/ClickHouse/issues/7650。 [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +- バグを修正 `ALTER ... MODIFY ... CODEC` クエリがユーザーの両方を指定しデフォルトの表現-コーデック. 修正 [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([alesapin](https://github.com/alesapin)) +- 列のバックグラウンドマージでエラーを修正 `SimpleAggregateFunction(LowCardinality)` タイプ。 [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定型チェックイン機能 `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([Vasily Nemkov](https://github.com/Enmk)) +- 今、サーバーがクラッシュしない `LEFT` または `FULL JOIN` と参加エンジンと非サポート `join_use_nulls` 設定。 [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- さて `DROP DICTIONARY IF EXISTS db.dict` クエリが例外をスローしない場合 `db` 存在しない [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +- テーブル関数のクラッシュの修正 (`file`, `mysql`, `remote`)削除への参照の使用によって引き起こされる `IStorage` オブジェクト。 テーブル関数への挿入時に指定された列の不正な解析を修正しました。 [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- をネットワークとなる前に `clickhouse-server`. この修正 [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([Zhichang Yu](https://github.com/yuzhichang)) +- 安全な接続のためのタイムアウト処理を修正しました。 この修正 [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `clickhouse-copier`'並行作業者間の冗長な競合。 [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([丁象飛](https://github.com/dingxiangfei2009)) +- たとえその変異バージョンが現在の変異バージョ [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +- の冗長コピーを無視する。 `*MergeTree` 別のディスクに移動してサーバーを再起動した後のデータ部分。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- でクラッシュを修正 `FULL JOIN` と `LowCardinality` で `JOIN` キー。 [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- Insertクエリで列名を複数回使用することは禁じられています `INSERT INTO tbl (x, y, x)`. この修正 [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([alesapin](https://github.com/alesapin)) +- 検出のためのフォールバックを追加しました(論理cpuコアの数を使用して)未知のcpuのための物理cpuコアの数。 この修正 [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `There's no column` 誤差を実現し、エイリアス列あります。 [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定断つクラッシュ時 `EXISTS` クエリが使用されなかった `TABLE` または `DICTIONARY` 修飾子。 ただのような `EXISTS t`. この修正 [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). このバグはバージョン19.17で導入されました。 [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- エラーでまれなバグを修正 `"Sizes of columns doesn't match"` これは、 `SimpleAggregateFunction` コラム [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 空のユーザーバグを修正 `allow_databases` すべてのデータベース(同じ `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- 固定顧客のクラッシュがサーバーで接続しています。 [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 修正 `ORDER BY` 主キー接頭辞と非主キー接尾辞によるソートの場合の動作。 [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([アントン-ポポフ](https://github.com/CurtizJ)) +- テーブルに修飾列が存在するかどうかを確認します。 この修正 [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定された動作と `ALTER MOVE` merge finishが指定したスーパーパーを移動した直後に実行されます。 修正 [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 使用中のサーバーのクラッシュを修正 `UNION` 異なる数の列を持つ。 修正 [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 固定サイズの結果、部分文字列のための機能 `substr` 負のサイズ。 [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 今、サーバーは `MergeTree` が足りないときは無料のスレッドの背景プールがあります。 [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- 書式設定にマイナータイプミスを修正 `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- 固定間違ったブルームフィルタの負の数の結果。 この修正 [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([冬張](https://github.com/zhang2014)) +- Decompressでバッファオーバーフローの可能性を修正 悪意のあるユーザーで製作した圧縮データが読み後のバッファです。 この問題は、Yandexの情報セキュリティチームのEldar Zaitovによって発見されました。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 整数のオーバーフローのために誤った結果を修正 `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- さて `OPTIMIZE TABLE` クエリを待ちませんがオフラインのレプリカを行います。 [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- 固定 `ALTER TTL` のためのパーサ `Replicated*MergeTree` テーブル。 [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- サーバとクライアン [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 修正 `bitmapAnd` 機能エラーが交差に集約ビットマップおよびスカラービットマップ. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([越黄](https://github.com/moon03432)) +- 定義をの精製して下さい `ZXid` バグを修正するZooKeeperプログラマーズガイドによると `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁象飛](https://github.com/dingxiangfei2009)) +- `odbc` テーブル関数は今 `external_table_functions_use_nulls` 設定。 [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([Vasily Nemkov](https://github.com/Enmk)) +- 稀なデータレースにつながるバグを修正しました。 [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +- さて `SYSTEM RELOAD DICTIONARY` 辞書を完全にリロードし、無視します `update_field`. この修正 [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([Vitaly Baranov](https://github.com/vitlibar)) +- Create queryに辞書が存在するかどうかを確認する機能を追加します。 [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([alesapin](https://github.com/alesapin)) +- 修正 `Float*` 解析 `Values` フォーマット。 この修正 [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 私たちはいくつかのバックグ `*MergeTree` テーブルエンジン家族。 [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 固定クラッシュの統合運用がテーブル `SimpleAggregateFunction(LowCardinality)` コラム この修正 [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- すべてのicuロケールのサポートを復元し、定数式の照合順序を適用する機能を追加します。 また、言語名を追加する `system.collations` テーブル。 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +- ゼロ最小限の寿命を持つ外部辞書バグを修正しました (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)` バックグラウンドで更新しない。 [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([alesapin](https://github.com/alesapin)) +- 固定したときにクラッシュする場合が外部辞書でclickhouseソースがサブクエリに返します。 [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- エンジンでテーブル内のファイル拡張子の誤った解析を修正 `URL`. この修正 [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([Andrey Bodrov](https://github.com/apbodrov)) +- 修正 `CHECK TABLE` のためのクエリ `*MergeTree` キーのないテーブル。 修正 [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +- 固定変換の `Float64` MySQLのタイプに。 [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([ユーリーバラノフ](https://github.com/yurriy)) +- 今ればいけない完全に落ちてしまったがサーバのクラッシュ、サーバーへの復元とその負荷ます。 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- テーブル機能のクラッシュを修正 `file` 存在しないファイルに挿入している間。 この場合、ファイルが作成され、insertが処理されます。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- ときに発生する可能性がまれなデッドロックを修正 `trace_log` 有効になっています。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([フィリモノフ](https://github.com/filimonov)) +- ほかに異なるタイプで動作する機能を追加 `Date` で `RangeHashed` DDLクエリから作成された外部ディクショナリ。 修正 [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +- ときの修正クラッシュ `now64()` 別の関数の結果で呼び出されます。 [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([Vasily Nemkov](https://github.com/Enmk)) +- 固定バグ検出クライアントip接続を通じてmysqlワイヤプロトコルです。 [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- 空の配列の処理を修正 `arraySplit` 機能。 この修正 [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- ときに問題を修正しました `pid-file` 別のランニングの `clickhouse-server` 削除される可能性があります。 [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([Weiqing Xu](https://github.com/weiqxu)) +- それが持っている場合 `invalidate_query` これは、以前の更新試行時に更新といくつかの例外を停止しました。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) +- 関数のエラーを修正しました `arrayReduce` それはにつながる可能性 “double free” そして、集計関数combinatorのエラー `Resample` それはメモリリークの原因となります。 集計関数の追加 `aggThrow`. この関数は、テスト目的で使用できます。 [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 改善 {#improvement-1} + +- での作業時に改善されたロギング `S3` テーブルエンジン。 [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 印刷ヘルプメッセージがない場合引数が渡された通話の場合 `clickhouse-local`. この修正 [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([Andrey Nagorny](https://github.com/Melancholic)) +- 設定を追加 `mutations_sync` 待つことができます `ALTER UPDATE/DELETE` 同期クエリ。 [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([alesapin](https://github.com/alesapin)) +- 相対セットアップを許可する `user_files_path` で `config.xml` (同様の方法で `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- 変換関数の不正な型の例外を追加するには `-OrZero` 後置。 [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([Andrey Konyaev](https://github.com/akonyaev90)) +- 分散クエリでシャードに送信するデータのヘッダーの形式を簡素化します。 [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([Vitaly Baranov](https://github.com/vitlibar)) +- `Live View` テーブルエンジンリファクタリング。 [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- DDLクエリから作成された外部ディクショナリのチェックを追加します。 [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin)) +- エラーの修正 `Column ... already exists` 使用している間 `FINAL` と `SAMPLE` together, e.g. `select count() from table final sample 1/2`. 修正 [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 今の最初の引数を表 `joinGet` 関数はテーブル識別子にすることができます。 [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([アモスの鳥](https://github.com/amosbird)) +- 使用を許可する `MaterializedView` 上記のサブクエリを使用する `Kafka` テーブル。 [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([フィリモノフ](https://github.com/filimonov)) +- これで、ディスク間の背景移動がseprateスレッドプールを実行します。 [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` 今同期的に実行されます。 [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar)) +- スタックトレース表示の物理アドレス(オフセットオブジェクトファイルの代わりに仮想メモリのアドレスのオブジェクトファイルが読み込まれ). それは使用をの可能にします `addr2line` binaryが独立した位置でASLRがアクティブな場合。 この修正 [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 行レベルのセキュリ: `
    `. 修正 [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([イワン](https://github.com/abyss7)) +- さて `cityHash` 機能で動作することができ `Decimal` と `UUID` タイプ。 修正 [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([Mikhail Korotov](https://github.com/millb)) +- アダプティブ粒度の実装後に廃止されたため、システムログから固定インデックス粒度(1024)が削除されました。 [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 有効なmysqlサーバーの互換性がclickhouseはめずにボタンを使用します。 [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([ユーリーバラノフ](https://github.com/yurriy)) +- これにより、バッチ内のデータが破損した場合の詳細なエラーが発生します。 [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- サポート `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` と `ATTACH TABLE` のために `MySQL` データベースエンジ [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([冬張](https://github.com/zhang2014)) +- S3テーブル機能とテーブルエンジンに認証を追加します。 [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 追加されたチェックの余分な部品 `MergeTree` 異なるディスクでは、未定義のディスクでデータ部分を見逃さないようにするためです。 [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- を、sslをサポートのためにmacをクライアントとサーバーです。 [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([イワン](https://github.com/abyss7)) +- 今clickhouseできる作品としてmysql連携サーバを参照https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html). [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` 今だけ有効にする `bracketed-paste` マルチクエリがオンで、マルチラインがオフの場合。 この修正(\#7757)\[https://github.com/ClickHouse/ClickHouse/issues/7757\]。 [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([アモスの鳥](https://github.com/amosbird)) +- サポート `Array(Decimal)` で `if` 機能。 [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- サポート小数で `arrayDifference`, `arrayCumSum` と `arrayCumSumNegative` 機能。 [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- 追加 `lifetime` コラムへの `system.dictionaries` テーブル。 [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +- 改良されたチェックインのための既存の部品の異なるハードディスク `*MergeTree` テーブルエンジン。 アドレス [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- との統合 `AWS SDK` のために `S3` 箱から出してすべてのS3の機能を使用することができます相互作用。 [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([Pavel Kovalenko](https://github.com/Jokser)) +- サブクエリのサポートが追加されました `Live View` テーブル。 [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- 使用のための点検 `Date` または `DateTime` からの列 `TTL` 式は削除されました。 [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- ディスクに関する情報が追加された `system.detached_parts` テーブル。 [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 今すぐ設定 `max_(table|partition)_size_to_drop` 再起動せずに変更することができます。 [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- エラーメッ ユーザーに以下の行を削除しないように依頼する `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良いメッセージを読むら `Kafka` 後の様々な形式のエンジン [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([イワン](https://github.com/abyss7)) +- サポートしていないmysqlクライアントとの互換性の向上 `sha2_password` authプラグイン。 [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([ユーリーバラノフ](https://github.com/yurriy)) +- 支援の列タイプのmysqlサーバーの互換性. [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([ユーリーバラノフ](https://github.com/yurriy)) +- 実装 `ORDER BY` 最適化のための `Merge`, `Buffer` と `Materilized View` 下になるとストレージ `MergeTree` テーブル。 [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([アントン-ポポフ](https://github.com/CurtizJ)) +- 今、私たちは常にposixの実装を使用します `getrandom` 古いカーネル(\<3.17)との互換性を改善する。 [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([アモスの鳥](https://github.com/amosbird)) +- 移動ttlルールの有効な宛先をよりよくチェックします。 [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 壊れた挿入のバッチのためのよりよい点検 `Distributed` テーブルエンジン。 [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- 将来突然変異が処理されなければならない部品名の配列を持つ列を追加する `system.mutations` テーブル。 [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([alesapin](https://github.com/alesapin)) +- 並列マージプロセッサのソート最適化。 [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 設定 `mark_cache_min_lifetime` 今は時代遅れで、何もしません。 以前のバージョンでは、マークキャッシュはメモリ内で `mark_cache_size` 内のデータを収容するために、 `mark_cache_min_lifetime` 秒。 それは、メモリ制約のあるシステムでは特に悪いことです。 このリリースをインストールした後にパフォーマンスが低下する場合は、 `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 使用する準備 `tid` どこにでも これは次の場合に必要です [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### 性能向上 {#performance-improvement-1} + +- 性能の最適化、プロセッサのパイプライン [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 非ブロック更新の有効期限が切れたキーキャッシュの辞書(許可を読古い。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- クリック `-fno-omit-frame-pointer` 世界的に余裕一するものとする。 [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([アモスの鳥](https://github.com/amosbird)) +- スピードアップ `greatCircleDistance` それのための性能試験を機能し、加えて下さい。 [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- 機能のパフォーマンスの向上 `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 改善された性能の `max`, `min`, `argMin`, `argMax` のために `DateTime64` データ型。 [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([Vasily Nemkov](https://github.com/Enmk)) +- 大きい限界および外的な分類の限界のないまたは分類の改善された性能。 [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 6回までの浮動小数点数の書式設定のパフォーマンスが向上しました。 [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 改善された性能の `modulo` 機能。 [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([アモスの鳥](https://github.com/amosbird)) +- 最適化 `ORDER BY` 単一の列キーとのマージ。 [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より良い実装 `arrayReduce`, `-Array` と `-State` コンビネーター [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([アモスの鳥](https://github.com/amosbird)) +- さて `PREWHERE` 少なくとも次のように最適化する必要があります `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([アモスの鳥](https://github.com/amosbird)) +- 方法を改善する `round` と `roundBankers` 負の数を扱う。 [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- デコード性能の向上 `DoubleDelta` と `Gorilla` 大体30-40%のコーデック。 この修正 [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([Vasily Nemkov](https://github.com/Enmk)) +- 改善された性能の `base64` 関連機能。 [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 機能を追加しました `geoDistance`. それは類似していますに `greatCircleDistance` しかし、WGS-84楕円体モデルに近似を使用します。 両方の機能のパフォーマンスは同じに近いです。 [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- より速く `min` と `max` 以下のための集計関数 `Decimal` データ型。 [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- ベクトル化処理 `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([アモスの鳥](https://github.com/amosbird)) +- `if` 鎖は今最大限に活用されます `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- パフォーマンスの回帰の修正 `Kafka` 19.15で導入されたテーブルエンジン。 この修正 [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([フィリモノフ](https://github.com/filimonov)) +- 削除 “pie” コード生成 `gcc` からDebianパッケージの時となります。 [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- データ形式の並列解析 [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +- 最適化されたパーサーの有効化 `Values` デフォルトでの式の使用 (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### ビルド/テスト/パッケージの改善 {#buildtestingpackaging-improvement-2} + +- ビルドの修正 `ARM` そして、最小限のモードで。 [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- 追加取材ファイルのフラッシュ用 `clickhouse-server` std::atexitが呼び出されないとき。 も若干の改善にログイン状態試験。 [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([alesapin](https://github.com/alesapin)) +- ContribのLLVMライブラリを更新します。 OSパッケージからのLLVMの使用を避けます。 [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- バンドルする `curl` 完全に静かなビルド。 [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([Pavel Kovalenko](https://github.com/Jokser)) +- いくつかを修正 `MemorySanitizer` 警告。 [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使用 `add_warning` と `no_warning` マクロ `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([イワン](https://github.com/abyss7)) +- Minio S3互換オブジェクトのサポートを追加(https://min.io/)より良い統合テストのために。 [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([Pavel Kovalenko](https://github.com/Jokser)) +- インポート `libc` contribへのヘッダー。 ることができる作をより一貫性のあるさまざまなシステムのみ `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 削除 `-fPIC` いくつかの図書館から。 [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- クリーン `CMakeLists.txt` カールのため。 見るhttps://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- サイレント警告 `CapNProto` ライブラリ。 [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 追加の性能試験のため短い文字列を最適化ハッシュテーブル [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([アモスの鳥](https://github.com/amosbird)) +- 今clickhouseは上に構築されます `AArch64` たとえ `MADV_FREE` は利用できません。 この修正 [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([アモスの鳥](https://github.com/amosbird)) +- 更新 `zlib-ng` メモリ消毒の問題を修正するには. [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- OSパッケージの使用は非常に脆弱で、通常はまったく動作しないため、Linux以外のシステムで内部MySQLライブラリを有効にします。 この修正 [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定構築システムによっては後に可能 `libc++`. これは [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 作る `Field` 方法によりtype-安全なものがあります。。 [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- に不足しているファイルを追加 `libc-headers` サブモジュール [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 間違った修正 `JSON` パフォーマンステスト出力の引用。 [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- スタックトレースが表示されます `std::exception` と `Poco::Exception`. 以前のバージョンでは、 `DB::Exception`. これは診断を改善します。 [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 移植 `clock_gettime` と `clock_nanosleep` 新しいglibc版のため。 [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([アモスの鳥](https://github.com/amosbird)) +- 有効 `part_log` 開発者のための例の設定で。 [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- リロードの非同期の性質を修正 `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- 固定コーデック性能テスト。 [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([Vasily Nemkov](https://github.com/Enmk)) +- インストールスクリプト `.tgz` それらのビルドとドキュメント。 [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([alesapin](https://github.com/alesapin)) +- 古いものを削除 `ZSTD` テスト(2016年に作成され、ZSTDの1.0バージョンが持っていたバグを再現しました)。 この修正 [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- Mac OSカタリナ上の固定ビルド。 [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- 増加数行のコーデックの性能試験を果たしますのでご連絡ください [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([Vasily Nemkov](https://github.com/Enmk)) +- デバッグビルドでは、 `LOGICAL_ERROR` アサーションの失敗としての例外は、気付きやすくなります。 [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 形式関連のパフォーマンステストをより確定的にします。 [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 更新 `lz4` メモリを修正するには市民の失敗。 [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 例外処理で既知のmemorysanitizer false positiveを抑制します。 [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 更新 `gcc` と `g++` バージョン9へ `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- 追加の性能試験場合試験 `PREWHERE` より悪いです `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([アモスの鳥](https://github.com/amosbird)) +- ツつィツ姪"ツつ"ツ債ツづュツつケ [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- からのデータのmemorysanitizerレポートを避けます `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 更新 `libc++` 最新バージョンへ。 [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ソースからのビルドicuライブラリ。 この修正 [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- から切り替え `libressl` に `openssl`. ClickHouseは、この変更後にTLS1.3とSNIをサポートする必要があります。 この修正 [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 固定ubsanレポートを使用する場合 `chacha20_poly1305` SSLから(接続時に発生するhttps://yandex.ru/)。 [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- デフォルトのパスワードファイ `.deb` linuxディストリビュート。 [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- 取得のための改善された表現 `clickhouse-server` PID `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([Alexander Kazakov](https://github.com/Akazz)) +- Contrib/googletestをv1.10.0に更新しました。 [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- 。固定スレッドサニアイナイザレポートで `base64` ライブラリ。 また、このライブラリを最新バージョンに更新しましたが、問題はありません。 この修正 [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 修正 `00600_replace_running_query` プロセッサの場合。 [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- サポートの削除 `tcmalloc` 作るため `CMakeLists.txt` もっと簡単に [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- リリースgccは今使用ビルド `libc++` 代わりに `libstdc++`. 最近 `libc++` clangでのみ使用されました。 これにより、ビルド構成の一貫性と移植性が向上します。 [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- MemorySanitizerでビルドするためのICUライブラリを有効にします。 [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 抑制する警告から `CapNProto` ライブラリ。 [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- のためのコードの特別なケースを削除 `tcmalloc` サポートされなくなったからです [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- CIカバレッジタスクでは、カバレッジレポートを保存できるようにサーバーを正常に終了します。 これは、我々が最近見てきた不完全な報道レポートを修正します。 [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([alesapin](https://github.com/alesapin)) +- すべてのコーデックのパフォーマ `Float64` と `UInt64` 値。 [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([Vasily Nemkov](https://github.com/Enmk)) +- `termcap` 非常に非難され、さまざまな問題につながる(f.g.missing “up” 帽子およびエコー `^J` マルチラインの代わりに)。 お願い `terminfo` またはバンドル `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([アモスの鳥](https://github.com/amosbird)) +- 修正 `test_storage_s3` 統合テスト。 [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- サポート `StorageFile(, null)` 挿入をブロックを所定のフォーマットでなくファイルを実際に書き込みます。 これは必要な性能試験までを実施。 [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([アモスの鳥](https://github.com/amosbird)) +- 追加された引数 `--print-time` テストごとに実行時間を出力する機能テスト。 [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- にアサートを追加しました `KeyCondition` RPNを評価しながら。 これにより、gcc-9からの警告が修正されます。 [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- ダンプcmakeのオプションci構造を作成する環境が整いました [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 一部のfatライブラリのデバッグ情報を生成しません。 [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 作る `log_to_console.xml` それは対話的であるかどうかにかかわらず、常にstderrにログインします。 [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 除去も未使用の特徴から `clickhouse-performance-test` ツール。 [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- 今我々はまた検索する `lld-X` 対応を使って `clang-X` バージョン。 [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([alesapin](https://github.com/alesapin)) +- 寄木細工のビルドの改善。 [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([マクスラン](https://github.com/maxulan)) +- より多くのgccの警告 [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- パッケージアーチlinuxすることはできなくなるようで走clickhouseサーバーになります。 [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- プロセッサでテストを修正。 小さな性能の修正。 [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +- 更新contrib/protobuf. [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V.Kornilov](https://github.com/matwey)) +- 新年のお祝いとしてc++20への切り替えの準備で。 “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([アモスの鳥](https://github.com/amosbird)) + +#### 実験的特徴 {#experimental-feature-1} + +- 実験的な設定を追加しました `min_bytes_to_use_mmap_io`. ることができるreadビッグファイルのコピーをせずにデータをカーネルを使うこと. の設定が無効になってデフォルトです。 推奨しきい値は、mmap/munmapが遅いため、約64MBです。 [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([alexey-milovidov](https://github.com/alexey-milovidov)) +- アクセス制御システムの一部としてのクォータの変更。 新しいテーブルを追加 `system.quotas`、新しい機能 `currentQuota`, `currentQuotaKey`、新しいSQL構文 `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([Vitaly Baranov](https://github.com/vitlibar)) +- を飛び未設定警告の代わりに投げることができます。 [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([Vitaly Baranov](https://github.com/vitlibar)) +- アクセス制御システムの一部としての行ポリシーの変更。 新しいテーブルを追加 `system.row_policies`、新しい機能 `currentRowPolicies()`、新しいSQL構文 `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### セキュリティ修正 {#security-fix} + +- テーブル内のディレクトリ構造を読み取る可能性を修正 `File` テーブルエンジン。 この修正 [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## [2019年の変更履歴](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/ja/whats_new/index.md b/docs/ja/whats_new/index.md deleted file mode 120000 index 92d1bf0f1ee..00000000000 --- a/docs/ja/whats_new/index.md +++ /dev/null @@ -1 +0,0 @@ -../../en/whats_new/index.md \ No newline at end of file diff --git a/docs/ja/whats_new/index.md b/docs/ja/whats_new/index.md new file mode 100644 index 00000000000..ac27b70b8bd --- /dev/null +++ b/docs/ja/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_folder_title: What's New +toc_priority: 72 +--- + + diff --git a/docs/ja/whats_new/roadmap.md b/docs/ja/whats_new/roadmap.md deleted file mode 120000 index 5ef0ebdb1bb..00000000000 --- a/docs/ja/whats_new/roadmap.md +++ /dev/null @@ -1 +0,0 @@ -../../en/whats_new/roadmap.md \ No newline at end of file diff --git a/docs/ja/whats_new/roadmap.md b/docs/ja/whats_new/roadmap.md new file mode 100644 index 00000000000..e64d35b1df3 --- /dev/null +++ b/docs/ja/whats_new/roadmap.md @@ -0,0 +1,19 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 74 +toc_title: "\u30ED\u30FC\u30C9\u30DE\u30C3\u30D7" +--- + +# ロードマップ {#roadmap} + +## Q1 2020 {#q1-2020} + +- ロールベースのアクセス制御 + +## Q2 2020 {#q2-2020} + +- 外部認証サービスとの統合 +- 資源のプールのためのより精密な分布のクラスター能力とユーザー + +{## [元の記事](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/ja/whats_new/security_changelog.md b/docs/ja/whats_new/security_changelog.md deleted file mode 120000 index 41c8286f879..00000000000 --- a/docs/ja/whats_new/security_changelog.md +++ /dev/null @@ -1 +0,0 @@ -../../en/whats_new/security_changelog.md \ No newline at end of file diff --git a/docs/ja/whats_new/security_changelog.md b/docs/ja/whats_new/security_changelog.md new file mode 100644 index 00000000000..6e0bd77e2f9 --- /dev/null +++ b/docs/ja/whats_new/security_changelog.md @@ -0,0 +1,76 @@ +--- +machine_translated: true +machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 +toc_priority: 76 +toc_title: "\u30BB\u30AD\u30E5\u30EA\u30C6\u30A3\u306E\u5909\u66F4\u5C65\u6B74" +--- + +## ClickHouseリリース19.14.3.3で修正、2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} + +### CVE-2019-15024 {#cve-2019-15024} + +Аn attacker that has write access to ZooKeeper and who ican run a custom server available from the network where ClickHouse runs, can create a custom-built malicious server that will act as a ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from the malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. + +クレジット:yandex情報セキュリティチームのeldar zaitov + +### CVE-2019-16535 {#cve-2019-16535} + +Аn OOB read, OOB write and integer underflow in decompression algorithms can be used to achieve RCE or DoS via native protocol. + +クレジット:yandex情報セキュリティチームのeldar zaitov + +### CVE-2019-16536 {#cve-2019-16536} + +スタックオーバーフローへのdosによっても実行させることができ、悪意のある認証クライアント + +クレジット:yandex情報セキュリティチームのeldar zaitov + +## クリックハウスリリース19.13.6.1で修正、2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} + +### CVE-2019-18657 {#cve-2019-18657} + +テーブル機能 `url` この脆弱性により、攻撃者が要求に任意のHTTPヘッダーを挿入することができました。 + +クレジット: [Nikita Tikhomirov](https://github.com/NSTikhomirov) + +## クリックハウスリリース18.12.13で修正、2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} + +### CVE-2018-14672 {#cve-2018-14672} + +機能搭載catboostモデルの可路のフォーカストラバーサル読書任意のファイルをエラーメッセージが返されます。 + +クレジット:yandexの情報セキュリティチームのandrey krasichkov + +## クリックハウスリリース18.10.3、2018-08-13で修正 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} + +### CVE-2018-14671 {#cve-2018-14671} + +unixODBC許容荷重任意の共有オブジェクトからのファイルシステムにおけるリモートでコードが実行の脆弱性が存在します。 + +クレジット:yandexの情報セキュリティチームのandrey krasichkovとevgeny sidorov + +## クリックハウスリリース1.1.54388で修正、2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} + +### CVE-2018-14668 {#cve-2018-14668} + +“remote” テーブル関数は、任意のシンボルを “user”, “password” と “default\_database” クロスプロトコル要求偽造攻撃につながったフィールド。 + +クレジット:yandexの情報セキュリティチームのandrey krasichkov + +## クリックハウスリリース1.1.54390、2018-07-06で修正 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} + +### CVE-2018-14669 {#cve-2018-14669} + +ClickHouse MySQLクライアントは “LOAD DATA LOCAL INFILE” 接続されたClickHouseサーバから任意のファイルを読み取る悪意のあるMySQLデータベー + +クレジット:yandexの情報セキュリティチームのandrey krasichkovとevgeny sidorov + +## クリックハウスリリース1.1.54131、2017-01-10で修正されました {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} + +### CVE-2018-14670 {#cve-2018-14670} + +Debパッケージの不適切な構成は、データベースの不正使用につながる可能性があります。 + +クレジット:英国の国家サイバーセキュリティセンター(ncsc) + +{## [元の記事](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/docs/toc_es.yml b/docs/toc_es.yml deleted file mode 100644 index 8f322919ebf..00000000000 --- a/docs/toc_es.yml +++ /dev/null @@ -1,242 +0,0 @@ -nav: -- "Implantaci\xF3n": - - "Descripci\xF3n": index.md - - "Caracter\xEDsticas distintivas de ClickHouse": introduction/distinctive_features.md - - "Caracter\xEDsticas de ClickHouse que pueden considerarse desventajas": introduction/features_considered_disadvantages.md - - Rendimiento: introduction/performance.md - - Historia: introduction/history.md - - Adoptante: introduction/adopters.md -- Primeros pasos: - - oculto: getting_started/index.md - - "Instalaci\xF3n": getting_started/install.md - - Tutorial: getting_started/tutorial.md - - Datos De Ejemplo: - - A tiempo: getting_started/example_datasets/ontime.md - - Datos de taxis de Nueva York: getting_started/example_datasets/nyc_taxi.md - - Referencia de Big Data de AMPLab: getting_started/example_datasets/amplab_benchmark.md - - "Nombre de la red inal\xE1mbrica (SSID)": getting_started/example_datasets/wikistat.md - - Registros de clics de Terabyte de Criteo: getting_started/example_datasets/criteo.md - - Estrella Schema Benchmark: getting_started/example_datasets/star_schema.md - - El Yandex.Metrica Datos: getting_started/example_datasets/metrica.md -- Interfaz: - - "Implantaci\xF3n": interfaces/index.md - - "Cliente de l\xEDnea de comandos": interfaces/cli.md - - Interfaz nativa (TCP): interfaces/tcp.md - - Interfaz HTTP: interfaces/http.md - - Interfaz MySQL: interfaces/mysql.md - - Formatos de entrada y salida: interfaces/formats.md - - Controlador JDBC: interfaces/jdbc.md - - Conductor ODBC: interfaces/odbc.md - - Biblioteca de clientes de C++: interfaces/cpp.md - - tercero: - - Bibliotecas de clientes: interfaces/third-party/client_libraries.md - - "Integraci\xF3n": interfaces/third-party/integrations.md - - Interfaces Visuales: interfaces/third-party/gui.md - - Proxy: interfaces/third-party/proxy.md -- Motor: - - Motores de mesa: - - "Implantaci\xF3n": operations/table_engines/index.md - - Familia MergeTree: - - "M\xE9todo de codificaci\xF3n de datos": operations/table_engines/mergetree.md - - "Replicaci\xF3n de datos": operations/table_engines/replication.md - - "Clave de partici\xF3n personalizada": operations/table_engines/custom_partitioning_key.md - - ReplacingMergeTree: operations/table_engines/replacingmergetree.md - - SummingMergeTree: operations/table_engines/summingmergetree.md - - "Agregaci\xF3nMergeTree": operations/table_engines/aggregatingmergetree.md - - ColapsarMergeTree: operations/table_engines/collapsingmergetree.md - - VersionedCollapsingMergeTree: operations/table_engines/versionedcollapsingmergetree.md - - GraphiteMergeTree: operations/table_engines/graphitemergetree.md - - Familia de registro: - - "Implantaci\xF3n": operations/table_engines/log_family.md - - StripeLog: operations/table_engines/stripelog.md - - Registro: operations/table_engines/log.md - - TinyLog: operations/table_engines/tinylog.md - - "Integraci\xF3n": - - Kafka: operations/table_engines/kafka.md - - MySQL: operations/table_engines/mysql.md - - JDBC: operations/table_engines/jdbc.md - - ODBC: operations/table_engines/odbc.md - - HDFS: operations/table_engines/hdfs.md - - Especial: - - Distribuido: operations/table_engines/distributed.md - - Datos externos: operations/table_engines/external_data.md - - Diccionario: operations/table_engines/dictionary.md - - Fusionar: operations/table_engines/merge.md - - File: operations/table_engines/file.md - - Nulo: operations/table_engines/null.md - - Establecer: operations/table_engines/set.md - - Unir: operations/table_engines/join.md - - URL: operations/table_engines/url.md - - Vista: operations/table_engines/view.md - - "M\xE9todo de codificaci\xF3n de datos": operations/table_engines/materializedview.md - - Memoria: operations/table_engines/memory.md - - "B\xFAfer": operations/table_engines/buffer.md - - GenerateRandom: operations/table_engines/generate.md - - Motores de base de datos: - - "Implantaci\xF3n": database_engines/index.md - - MySQL: database_engines/mysql.md - - Perezoso: database_engines/lazy.md -- Referencia SQL: - - oculto: query_language/index.md - - Sintaxis: query_language/syntax.md - - "Instrucci\xF3n": - - SELECCIONAR: query_language/select.md - - INSERTAR EN: query_language/insert_into.md - - CREAR: query_language/create.md - - ALTERAR: query_language/alter.md - - SISTEMA: query_language/system.md - - MOSTRAR: query_language/show.md - - Otro: query_language/misc.md - - "Funci\xF3n": - - "Implantaci\xF3n": query_language/functions/index.md - - "Aritm\xE9tica": query_language/functions/arithmetic_functions.md - - "Comparaci\xF3n": query_language/functions/comparison_functions.md - - "L\xF3gico": query_language/functions/logical_functions.md - - "Conversi\xF3n de tipo": query_language/functions/type_conversion_functions.md - - Trabajar con fechas y horas: query_language/functions/date_time_functions.md - - Trabajar con cadenas: query_language/functions/string_functions.md - - Para buscar cadenas: query_language/functions/string_search_functions.md - - Para reemplazar en cadenas: query_language/functions/string_replace_functions.md - - 'Condicional ': query_language/functions/conditional_functions.md - - "Matem\xE1tica": query_language/functions/math_functions.md - - Redondeo: query_language/functions/rounding_functions.md - - Trabajar con matrices: query_language/functions/array_functions.md - - "Divisi\xF3n y fusi\xF3n de cuerdas y matrices": query_language/functions/splitting_merging_functions.md - - Trozo: query_language/functions/bit_functions.md - - Bits: query_language/functions/bitmap_functions.md - - Hash: query_language/functions/hash_functions.md - - "Generaci\xF3n de n\xFAmeros pseudo-aleatorios": query_language/functions/random_functions.md - - "Codificaci\xF3n": query_language/functions/encoding_functions.md - - Trabajando con UUID: query_language/functions/uuid_functions.md - - Trabajar con URL: query_language/functions/url_functions.md - - Trabajar con direcciones IP: query_language/functions/ip_address_functions.md - - Trabajando con JSON.: query_language/functions/json_functions.md - - Orden superior: query_language/functions/higher_order_functions.md - - Trabajar con diccionarios externos: query_language/functions/ext_dict_functions.md - - Trabajando con Yandex.Diccionarios de Metrica: query_language/functions/ym_dict_functions.md - - "Implementaci\xF3n del operador IN": query_language/functions/in_functions.md - - arrayJoin: query_language/functions/array_join.md - - "Trabajar con coordenadas geogr\xE1ficas": query_language/functions/geo.md - - Trabajar con argumentos Nullable: query_language/functions/functions_for_nulls.md - - "Funciones de aprendizaje autom\xE1tico": query_language/functions/machine_learning_functions.md - - "Introspecci\xF3n": query_language/functions/introspection.md - - Otro: query_language/functions/other_functions.md - - Funciones agregadas: - - "Implantaci\xF3n": query_language/agg_functions/index.md - - Referencia: query_language/agg_functions/reference.md - - Combinadores de funciones agregadas: query_language/agg_functions/combinators.md - - "Funciones agregadas param\xE9tricas": query_language/agg_functions/parametric_functions.md - - Funciones de tabla: - - "Implantaci\xF3n": query_language/table_functions/index.md - - file: query_language/table_functions/file.md - - fusionar: query_language/table_functions/merge.md - - numero: query_language/table_functions/numbers.md - - remoto: query_language/table_functions/remote.md - - URL: query_language/table_functions/url.md - - mysql: query_language/table_functions/mysql.md - - jdbc: query_language/table_functions/jdbc.md - - Nosotros: query_language/table_functions/odbc.md - - Hdfs: query_language/table_functions/hdfs.md - - entrada: query_language/table_functions/input.md - - generateRandom: query_language/table_functions/generate.md - - Diccionario: - - "Implantaci\xF3n": query_language/dicts/index.md - - Diccionarios externos: - - "Descripci\xF3n General": query_language/dicts/external_dicts.md - - "Configuraci\xF3n de un diccionario externo": query_language/dicts/external_dicts_dict.md - - Almacenamiento de diccionarios en la memoria: query_language/dicts/external_dicts_dict_layout.md - - Actualizaciones del diccionario: query_language/dicts/external_dicts_dict_lifetime.md - - Fuentes de diccionarios externos: query_language/dicts/external_dicts_dict_sources.md - - Clave y campos del diccionario: query_language/dicts/external_dicts_dict_structure.md - - "Diccionarios jer\xE1rquicos": query_language/dicts/external_dicts_dict_hierarchical.md - - Diccionarios internos: query_language/dicts/internal_dicts.md - - Operador: query_language/operators.md - - Tipos de datos: - - "Implantaci\xF3n": data_types/index.md - - UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64: data_types/int_uint.md - - "Descripci\xF3n del producto": data_types/float.md - - Decimal: data_types/decimal.md - - Booleana: data_types/boolean.md - - Cadena: data_types/string.md - - Cadena fija (N): data_types/fixedstring.md - - UUID: data_types/uuid.md - - Fecha: data_types/date.md - - FechaHora: data_types/datetime.md - - DateTime64: data_types/datetime64.md - - Enum: data_types/enum.md - - Matriz (T): data_types/array.md - - "Agregar funci\xF3n (nombre, types_of_arguments)...)": data_types/nested_data_structures/aggregatefunction.md - - Tuple (T1, T2, ...): data_types/tuple.md - - 'NULL': data_types/nullable.md - - Estructuras de datos anidados: - - oculto: data_types/nested_data_structures/index.md - - Anidado (Nombre1 Tipo1, Nombre2 Tipo2, ...): data_types/nested_data_structures/nested.md - - Tipos de datos especiales: - - oculto: data_types/special_data_types/index.md - - Expresion: data_types/special_data_types/expression.md - - Establecer: data_types/special_data_types/set.md - - Nada: data_types/special_data_types/nothing.md - - Intervalo: data_types/special_data_types/interval.md - - Dominio: - - "Descripci\xF3n": data_types/domains/overview.md - - IPv4: data_types/domains/ipv4.md - - IPv6: data_types/domains/ipv6.md -- Guiar: - - "Descripci\xF3n": guides/index.md - - "Aplicaci\xF3n de modelos CatBoost": guides/apply_catboost_model.md -- "Operaci\xF3n": - - "Implantaci\xF3n": operations/index.md - - Requisito: operations/requirements.md - - Monitoreo: operations/monitoring.md - - "Soluci\xF3n de problemas": operations/troubleshooting.md - - "Actualizaci\xF3n de ClickHouse": operations/update.md - - Derechos de acceso: operations/access_rights.md - - Copia de seguridad de datos: operations/backup.md - - "Archivos de configuraci\xF3n": operations/configuration_files.md - - Cuota: operations/quotas.md - - Tablas del sistema: operations/system_tables.md - - "Optimizaci\xF3n del rendimiento": - - "Generaci\xF3n de perfiles de consultas": operations/performance/sampling_query_profiler.md - - Prueba de hardware: operations/performance_test.md - - "Par\xE1metros de configuraci\xF3n del servidor": - - "Implantaci\xF3n": operations/server_settings/index.md - - "Configuraci\xF3n del servidor": operations/server_settings/settings.md - - "Configuraci\xF3n": - - "Implantaci\xF3n": operations/settings/index.md - - Permisos para consultas: operations/settings/permissions_for_queries.md - - Restricciones en la complejidad de consultas: operations/settings/query_complexity.md - - "Configuraci\xF3n": operations/settings/settings.md - - "Perfiles de configuraci\xF3n": operations/settings/settings_profiles.md - - "Restricciones en la configuraci\xF3n": operations/settings/constraints_on_settings.md - - "Configuraci\xF3n del usuario": operations/settings/settings_users.md - - Utilidad: - - "Descripci\xF3n": operations/utils/index.md - - "M\xE9todo de codificaci\xF3n de datos": operations/utils/clickhouse-copier.md - - Sistema abierto.: operations/utils/clickhouse-local.md - - Sistema abierto.: operations/utils/clickhouse-benchmark.md - - Recomendaciones de uso: operations/tips.md -- Desarrollo: - - oculto: development/index.md - - "La instrucci\xF3n para desarrolladores de ClickHouse para principiantes": development/developer_instruction.md - - "Descripci\xF3n general de la arquitectura ClickHouse": development/architecture.md - - "Examinar el c\xF3digo fuente de ClickHouse": development/browse_code.md - - "C\xF3mo crear ClickHouse en Linux": development/build.md - - "C\xF3mo crear ClickHouse en Mac OS X": development/build_osx.md - - "C\xF3mo construir ClickHouse en Linux para Mac OS X": development/build_cross_osx.md - - "C\xF3mo construir ClickHouse en Linux para AARCH64 (ARM64)": development/build_cross_arm.md - - "C\xF3mo escribir c\xF3digo C ++": development/style.md - - "C\xF3mo ejecutar pruebas de ClickHouse": development/tests.md - - Bibliotecas de terceros utilizadas: development/contrib.md -- Comercial: - - Proveedores de servicios en la nube: commercial/cloud.md -- "Qu\xE9 hay de Nuevo": - - Hoja de ruta: roadmap.md - - Changelog: - - Sistema abierto.: changelog/index.md - - Nuestros servicios: changelog/2019.md - - 2018 Nueva York: changelog/2018.md - - "M\xE1s informaci\xF3n": changelog/2017.md - - Seguridad Changelog: security_changelog.md -- Nivel de Cifrado WEP: - - Preguntas generales: faq/general.md - diff --git a/docs/toc_fa.yml b/docs/toc_fa.yml deleted file mode 100644 index 8905a2ab041..00000000000 --- a/docs/toc_fa.yml +++ /dev/null @@ -1,251 +0,0 @@ -nav: - -- 'ﯽﻓﺮﻌﻣ': - - 'ClickHouse چیست؟': 'index.md' - - ' ویژگی های برجسته ClickHouse': 'introduction/distinctive_features.md' - - ' ویژگی های از ClickHouse که می تواند معایبی باشد': 'introduction/features_considered_disadvantages.md' - - 'ﯽﯾﺍﺭﺎﮐ': 'introduction/performance.md' - - 'ﺦﯾﺭﺎﺗ': 'introduction/history.md' - - 'Adopters': 'introduction/adopters.md' - -- 'Getting started': - - 'hidden': 'getting_started/index.md' - - 'ﯼﺯﺍﺪﻧﺍ ﻩﺍﺭ ﻭ ﺐﺼﻧ': 'getting_started/install.md' - - 'ﺵﺯﻮﻣﺁ': 'getting_started/tutorial.md' - - 'ﻪﻧﻮﻤﻧ ﯼﺎﻫ ﻩﺩﺍﺩ ﻪﻋﻮﻤﺠﻣ': - - 'Introduction': 'getting_started/example_datasets/index.md' - - 'OnTime': 'getting_started/example_datasets/ontime.md' - - ' داده های تاکسی New York': 'getting_started/example_datasets/nyc_taxi.md' - - ' بنچمارک AMPLab Big Data': 'getting_started/example_datasets/amplab_benchmark.md' - - 'WikiStat': 'getting_started/example_datasets/wikistat.md' - - ' ترابایت از لاگ های کلیک از سرویس Criteo': 'getting_started/example_datasets/criteo.md' - - ' بنچمارک Star Schema': 'getting_started/example_datasets/star_schema.md' - - 'Yandex.Metrica Data': 'getting_started/example_datasets/metrica.md' - - 'Playground': 'getting_started/playground.md' - -- 'ﻂﺑﺍﺭ': - - 'Interface ها': 'interfaces/index.md' - - ' کلاینت Command-line': 'interfaces/cli.md' - - 'Native interface (TCP)': 'interfaces/tcp.md' - - 'HTTP interface': 'interfaces/http.md' - - 'MySQL Interface': 'interfaces/mysql.md' - - ' فرمت های Input و Output': 'interfaces/formats.md' - - ' درایور JDBC': 'interfaces/jdbc.md' - - ' درایور ODBC': 'interfaces/odbc.md' - - 'C ++ کتابخانه مشتری': 'interfaces/cpp.md' - - 'Third-party': - - 'کتابخانه های مشتری': 'interfaces/third-party/client_libraries.md' - - 'یکپارچگی': 'interfaces/third-party/integrations.md' - - 'رابط های بصری': 'interfaces/third-party/gui.md' - - 'پروکسی': 'interfaces/third-party/proxy.md' - -- 'ﻩﺩﺍﺩ ﻉﺍﻮﻧﺍ': - - 'Introduction': 'data_types/index.md' - - 'UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64': 'data_types/int_uint.md' - - 'Float32, Float64': 'data_types/float.md' - - 'Decimal': 'data_types/decimal.md' - - ' مقادیر Boolean': 'data_types/boolean.md' - - 'String': 'data_types/string.md' - - 'FixedString(N)': 'data_types/fixedstring.md' - - 'UUID': 'data_types/uuid.md' - - 'Date': 'data_types/date.md' - - 'DateTime': 'data_types/datetime.md' - - 'DateTime64': 'data_types/datetime64.md' - - 'Enum': 'data_types/enum.md' - - 'Array(T)': 'data_types/array.md' - - 'AggregateFunction(name, types_of_arguments...)': 'data_types/nested_data_structures/aggregatefunction.md' - - 'Tuple(T1, T2, ...)': 'data_types/tuple.md' - - 'Nullable': 'data_types/nullable.md' - - 'Nested data structures': - - 'hidden': 'data_types/nested_data_structures/index.md' - - 'Nested(Name1 Type1, Name2 Type2, ...)': 'data_types/nested_data_structures/nested.md' - - 'Special data types': - - 'hidden': 'data_types/special_data_types/index.md' - - 'Expression': 'data_types/special_data_types/expression.md' - - 'Set': 'data_types/special_data_types/set.md' - - 'Nothing': 'data_types/special_data_types/nothing.md' - - 'Interval': 'data_types/special_data_types/interval.md' - - 'Domains': - - 'Overview': 'data_types/domains/overview.md' - - 'IPv4': 'data_types/domains/ipv4.md' - - 'IPv6': 'data_types/domains/ipv6.md' - -- 'Database Engines': - - 'Introduction': 'database_engines/index.md' - - 'MySQL': 'database_engines/mysql.md' - - 'Lazy': 'database_engines/lazy.md' - -- 'Table Engines': - - 'Introduction': 'operations/table_engines/index.md' - - 'MergeTree Family': - - 'MergeTree': 'operations/table_engines/mergetree.md' - - 'Data Replication': 'operations/table_engines/replication.md' - - 'Custom Partitioning Key': 'operations/table_engines/custom_partitioning_key.md' - - 'ReplacingMergeTree': 'operations/table_engines/replacingmergetree.md' - - 'SummingMergeTree': 'operations/table_engines/summingmergetree.md' - - 'AggregatingMergeTree': 'operations/table_engines/aggregatingmergetree.md' - - 'CollapsingMergeTree': 'operations/table_engines/collapsingmergetree.md' - - 'VersionedCollapsingMergeTree': 'operations/table_engines/versionedcollapsingmergetree.md' - - 'GraphiteMergeTree': 'operations/table_engines/graphitemergetree.md' - - 'Log Family': - - 'Introduction': 'operations/table_engines/log_family.md' - - 'StripeLog': 'operations/table_engines/stripelog.md' - - 'Log': 'operations/table_engines/log.md' - - 'TinyLog': 'operations/table_engines/tinylog.md' - - 'Integrations': - - 'Kafka': 'operations/table_engines/kafka.md' - - 'MySQL': 'operations/table_engines/mysql.md' - - 'JDBC': 'operations/table_engines/jdbc.md' - - 'ODBC': 'operations/table_engines/odbc.md' - - 'HDFS': 'operations/table_engines/hdfs.md' - - 'Special': - - 'Distributed': 'operations/table_engines/distributed.md' - - 'External data': 'operations/table_engines/external_data.md' - - 'Dictionary': 'operations/table_engines/dictionary.md' - - 'Merge': 'operations/table_engines/merge.md' - - 'File': 'operations/table_engines/file.md' - - 'Null': 'operations/table_engines/null.md' - - 'Set': 'operations/table_engines/set.md' - - 'Join': 'operations/table_engines/join.md' - - 'URL': 'operations/table_engines/url.md' - - 'View': 'operations/table_engines/view.md' - - 'MaterializedView': 'operations/table_engines/materializedview.md' - - 'Memory': 'operations/table_engines/memory.md' - - 'Buffer': 'operations/table_engines/buffer.md' - - 'GenerateRandom': 'operations/table_engines/generate.md' - -- 'SQL Reference': - - 'hidden': 'query_language/index.md' - - 'SELECT': 'query_language/select.md' - - 'INSERT INTO': 'query_language/insert_into.md' - - 'CREATE': 'query_language/create.md' - - 'ALTER': 'query_language/alter.md' - - 'SYSTEM': 'query_language/system.md' - - 'SYSTEM': 'query_language/show.md' - - 'Other Kinds of Queries': 'query_language/misc.md' - - 'Functions': - - 'Introduction': 'query_language/functions/index.md' - - 'Arithmetic': 'query_language/functions/arithmetic_functions.md' - - 'Comparison': 'query_language/functions/comparison_functions.md' - - 'Logical': 'query_language/functions/logical_functions.md' - - 'Type Conversion': 'query_language/functions/type_conversion_functions.md' - - 'Working with Dates and Times': 'query_language/functions/date_time_functions.md' - - 'Working with strings': 'query_language/functions/string_functions.md' - - 'For Searching Strings': 'query_language/functions/string_search_functions.md' - - 'For Replacing in Strings': 'query_language/functions/string_replace_functions.md' - - 'Conditional ': 'query_language/functions/conditional_functions.md' - - 'Mathematical': 'query_language/functions/math_functions.md' - - 'Rounding': 'query_language/functions/rounding_functions.md' - - 'Working with Arrays': 'query_language/functions/array_functions.md' - - 'Splitting and Merging Strings and Arrays': 'query_language/functions/splitting_merging_functions.md' - - 'Bit': 'query_language/functions/bit_functions.md' - - 'Bitmap': 'query_language/functions/bitmap_functions.md' - - 'Hash': 'query_language/functions/hash_functions.md' - - 'Generating Pseudo-Random Numbers': 'query_language/functions/random_functions.md' - - 'Encoding': 'query_language/functions/encoding_functions.md' - - 'Working with UUID': 'query_language/functions/uuid_functions.md' - - 'Working with URLs': 'query_language/functions/url_functions.md' - - 'Working with IP Addresses': 'query_language/functions/ip_address_functions.md' - - 'Working with JSON.': 'query_language/functions/json_functions.md' - - 'Higher-Order': 'query_language/functions/higher_order_functions.md' - - 'Working with External Dictionaries': 'query_language/functions/ext_dict_functions.md' - - 'Working with Yandex.Metrica Dictionaries': 'query_language/functions/ym_dict_functions.md' - - 'Implementing the IN Operator': 'query_language/functions/in_functions.md' - - 'arrayJoin': 'query_language/functions/array_join.md' - - 'Working with geographical coordinates': 'query_language/functions/geo.md' - - 'Working with Nullable arguments': 'query_language/functions/functions_for_nulls.md' - - 'Machine Learning Functions': 'query_language/functions/machine_learning_functions.md' - - 'Introspection': 'query_language/functions/introspection.md' - - 'Other': 'query_language/functions/other_functions.md' - - 'Aggregate Functions': - - 'Introduction': 'query_language/agg_functions/index.md' - - 'Reference': 'query_language/agg_functions/reference.md' - - 'Aggregate function combinators': 'query_language/agg_functions/combinators.md' - - 'Parametric aggregate functions': 'query_language/agg_functions/parametric_functions.md' - - 'Table Functions': - - 'Introduction': 'query_language/table_functions/index.md' - - 'file': 'query_language/table_functions/file.md' - - 'merge': 'query_language/table_functions/merge.md' - - 'numbers': 'query_language/table_functions/numbers.md' - - 'remote': 'query_language/table_functions/remote.md' - - 'url': 'query_language/table_functions/url.md' - - 'mysql': 'query_language/table_functions/mysql.md' - - 'jdbc': 'query_language/table_functions/jdbc.md' - - 'odbc': 'query_language/table_functions/odbc.md' - - 'hdfs': 'query_language/table_functions/hdfs.md' - - 'input': 'query_language/table_functions/input.md' - - 'generateRandom': 'query_language/table_functions/generate.md' - - 'Dictionaries': - - 'Introduction': 'query_language/dicts/index.md' - - 'External Dictionaries': - - 'General Description': 'query_language/dicts/external_dicts.md' - - 'Configuring an External Dictionary': 'query_language/dicts/external_dicts_dict.md' - - 'Storing Dictionaries in Memory': 'query_language/dicts/external_dicts_dict_layout.md' - - 'Dictionary Updates': 'query_language/dicts/external_dicts_dict_lifetime.md' - - 'Sources of External Dictionaries': 'query_language/dicts/external_dicts_dict_sources.md' - - 'Dictionary Key and Fields': 'query_language/dicts/external_dicts_dict_structure.md' - - 'Hierarchical dictionaries': 'query_language/dicts/external_dicts_dict_hierarchical.md' - - 'Internal Dictionaries': 'query_language/dicts/internal_dicts.md' - - 'Operators': 'query_language/operators.md' - - 'General Syntax': 'query_language/syntax.md' - -- 'Guides': - - 'Overview': 'guides/index.md' - - 'Applying CatBoost Models': 'guides/apply_catboost_model.md' - -- 'Operations': - - 'Introduction': 'operations/index.md' - - 'Requirements': 'operations/requirements.md' - - 'Monitoring': 'operations/monitoring.md' - - 'Troubleshooting': 'operations/troubleshooting.md' - - 'Usage Recommendations': 'operations/tips.md' - - 'ClickHouse Update': 'operations/update.md' - - 'Access Rights': 'operations/access_rights.md' - - 'Data Backup': 'operations/backup.md' - - 'Configuration Files': 'operations/configuration_files.md' - - 'Quotas': 'operations/quotas.md' - - 'System Tables': 'operations/system_tables.md' - - 'Optimizing Performance': - - 'Query Profiling': 'operations/performance/sampling_query_profiler.md' - - 'Testing Hardware': 'operations/performance_test.md' - - 'Server Configuration Parameters': - - 'Introduction': 'operations/server_settings/index.md' - - 'Server Settings': 'operations/server_settings/settings.md' - - 'Settings': - - 'Introduction': 'operations/settings/index.md' - - 'Permissions for Queries': 'operations/settings/permissions_for_queries.md' - - 'Restrictions on Query Complexity': 'operations/settings/query_complexity.md' - - 'Settings': 'operations/settings/settings.md' - - 'Settings Profiles': 'operations/settings/settings_profiles.md' - - 'User Settings': 'operations/settings/settings_users.md' - - 'Constraints on Settings': 'operations/settings/constraints_on_settings.md' - - 'Utilities': - - 'Overview': 'operations/utils/index.md' - - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' - - 'clickhouse-local': 'operations/utils/clickhouse-local.md' - - 'clickhouse-benchmark': 'operations/utils/clickhouse-benchmark.md' - -- 'F.A.Q.': - - 'General Questions': 'faq/general.md' - -- 'Development': - - 'hidden': 'development/index.md' - - 'The Beginner ClickHouse Developer Instruction': 'development/developer_instruction.md' - - 'Overview of ClickHouse Architecture': 'development/architecture.md' - - 'Browse ClickHouse Source Code': 'development/browse_code.md' - - 'How to Build ClickHouse on Linux': 'development/build.md' - - 'How to Build ClickHouse on Mac OS X': 'development/build_osx.md' - - 'How to Build ClickHouse on Linux for Mac OS X': 'development/build_cross_osx.md' - - 'How to Build ClickHouse on Linux for AARCH64 (ARM64)': 'development/build_cross_arm.md' - - 'How to Write C++ code': 'development/style.md' - - 'How to Run ClickHouse Tests': 'development/tests.md' - - 'Third-Party Libraries Used': 'development/contrib.md' - -- 'What''s New': - - 'Roadmap': 'roadmap.md' - - 'Changelog': - - '2020': 'changelog/index.md' - - '2019': 'changelog/2019.md' - - '2018': 'changelog/2018.md' - - '2017': 'changelog/2017.md' - - 'Security Changelog': 'security_changelog.md' diff --git a/docs/toc_fr.yml b/docs/toc_fr.yml deleted file mode 100644 index cecd7327a5f..00000000000 --- a/docs/toc_fr.yml +++ /dev/null @@ -1,244 +0,0 @@ -nav: -- Introduction: - - "Aper\xE7u": index.md - - "Caract\xE9ristiques distinctives de ClickHouse": introduction/distinctive_features.md - - "Caract\xE9ristiques de ClickHouse qui peuvent \xEAtre consid\xE9r\xE9es comme des inconv\xE9nients": introduction/features_considered_disadvantages.md - - Performance: introduction/performance.md - - Histoire: introduction/history.md - - Adoptant: introduction/adopters.md -- Prise En Main: - - hidden: getting_started/index.md - - Installation: getting_started/install.md - - Tutoriel: getting_started/tutorial.md - - "Exemple De Jeux De Donn\xE9es": - - Introduction: getting_started/example_datasets/index.md - - OnTime: getting_started/example_datasets/ontime.md - - "New York Taxi Donn\xE9es": getting_started/example_datasets/nyc_taxi.md - - AMPLab Big Data Benchmark: getting_started/example_datasets/amplab_benchmark.md - - WikiStat: getting_started/example_datasets/wikistat.md - - "T\xE9raoctet click Logs de Criteo": getting_started/example_datasets/criteo.md - - "R\xE9f\xE9rence Du Sch\xE9ma En \xC9toile": getting_started/example_datasets/star_schema.md - - "Yandex.Metrica De Donn\xE9es": getting_started/example_datasets/metrica.md - - "R\xE9cr\xE9ation": getting_started/playground.md -- Interface: - - Introduction: interfaces/index.md - - Client De Ligne De Commande: interfaces/cli.md - - Interface Native (TCP): interfaces/tcp.md - - Interface HTTP: interfaces/http.md - - Interface MySQL: interfaces/mysql.md - - "Formats d'entr\xE9e et de sortie": interfaces/formats.md - - JDBC: interfaces/jdbc.md - - Pilote ODBC: interfaces/odbc.md - - "Biblioth\xE8que Client C++ ": interfaces/cpp.md - - tiers: - - "Biblioth\xE8ques Clientes": interfaces/third-party/client_libraries.md - - "Int\xE9gration": interfaces/third-party/integrations.md - - Les Interfaces Visuelles: interfaces/third-party/gui.md - - Proxy: interfaces/third-party/proxy.md -- Moteur: - - Moteurs De Table: - - Introduction: operations/table_engines/index.md - - Famille MergeTree: - - MergeTree: operations/table_engines/mergetree.md - - "R\xE9plication Des Donn\xE9es": operations/table_engines/replication.md - - "Cl\xE9 De Partitionnement Personnalis\xE9e": operations/table_engines/custom_partitioning_key.md - - ReplacingMergeTree: operations/table_engines/replacingmergetree.md - - SummingMergeTree: operations/table_engines/summingmergetree.md - - AggregatingMergeTree: operations/table_engines/aggregatingmergetree.md - - CollapsingMergeTree: operations/table_engines/collapsingmergetree.md - - VersionedCollapsingMergeTree: operations/table_engines/versionedcollapsingmergetree.md - - GraphiteMergeTree: operations/table_engines/graphitemergetree.md - - Journal De La Famille: - - Introduction: operations/table_engines/log_family.md - - StripeLog: operations/table_engines/stripelog.md - - Journal: operations/table_engines/log.md - - TinyLog: operations/table_engines/tinylog.md - - "Int\xE9gration": - - Kafka: operations/table_engines/kafka.md - - MySQL: operations/table_engines/mysql.md - - JDBC: operations/table_engines/jdbc.md - - ODBC: operations/table_engines/odbc.md - - HDFS: operations/table_engines/hdfs.md - - "Sp\xE9cial": - - "Distribu\xE9": operations/table_engines/distributed.md - - "De donn\xE9es externes": operations/table_engines/external_data.md - - Dictionnaire: operations/table_engines/dictionary.md - - Fusionner: operations/table_engines/merge.md - - Fichier: operations/table_engines/file.md - - 'NULL': operations/table_engines/null.md - - "D\xE9finir": operations/table_engines/set.md - - Rejoindre: operations/table_engines/join.md - - URL: operations/table_engines/url.md - - Vue: operations/table_engines/view.md - - MaterializedView: operations/table_engines/materializedview.md - - "M\xE9moire": operations/table_engines/memory.md - - Tampon: operations/table_engines/buffer.md - - GenerateRandom: operations/table_engines/generate.md - - "Moteurs De Base De Donn\xE9es": - - Introduction: database_engines/index.md - - MySQL: database_engines/mysql.md - - Paresseux: database_engines/lazy.md -- "R\xE9f\xE9rence SQL": - - hidden: query_language/index.md - - Syntaxe: query_language/syntax.md - - "D\xE9claration": - - SELECT: query_language/select.md - - INSERT INTO: query_language/insert_into.md - - CREATE: query_language/create.md - - ALTER: query_language/alter.md - - SYSTEM: query_language/system.md - - SHOW: query_language/show.md - - Autre: query_language/misc.md - - Fonction: - - Introduction: query_language/functions/index.md - - "Arithm\xE9tique": query_language/functions/arithmetic_functions.md - - Comparaison: query_language/functions/comparison_functions.md - - Logique: query_language/functions/logical_functions.md - - La Conversion De Type: query_language/functions/type_conversion_functions.md - - Travailler avec les Dates et les heures: query_language/functions/date_time_functions.md - - "Travailler avec des cha\xEEnes": query_language/functions/string_functions.md - - "Pour Rechercher Des Cha\xEEnes": query_language/functions/string_search_functions.md - - "Pour remplacer dans les cha\xEEnes": query_language/functions/string_replace_functions.md - - 'Conditionnel ': query_language/functions/conditional_functions.md - - "Math\xE9matique": query_language/functions/math_functions.md - - Arrondi: query_language/functions/rounding_functions.md - - Travailler avec des tableaux: query_language/functions/array_functions.md - - "Fractionnement et fusion de cha\xEEnes et de tableaux": query_language/functions/splitting_merging_functions.md - - Bit: query_language/functions/bit_functions.md - - Bitmap: query_language/functions/bitmap_functions.md - - Hachage: query_language/functions/hash_functions.md - - "La G\xE9n\xE9ration De Nombres Pseudo-Al\xE9atoires": query_language/functions/random_functions.md - - Encodage: query_language/functions/encoding_functions.md - - Travailler avec UUID: query_language/functions/uuid_functions.md - - Travailler avec des URL: query_language/functions/url_functions.md - - Travailler avec des adresses IP: query_language/functions/ip_address_functions.md - - Travailler avec JSON.: query_language/functions/json_functions.md - - "D'Ordre Sup\xE9rieur": query_language/functions/higher_order_functions.md - - Travailler avec des dictionnaires externes: query_language/functions/ext_dict_functions.md - - Travailler avec Yandex.Dictionnaires Metrica: query_language/functions/ym_dict_functions.md - - "Mise en \u0153uvre de L'op\xE9rateur IN": query_language/functions/in_functions.md - - arrayJoin: query_language/functions/array_join.md - - "Travailler avec des coordonn\xE9es g\xE9ographiques": query_language/functions/geo.md - - Travailler avec des arguments nullables: query_language/functions/functions_for_nulls.md - - Fonctions D'Apprentissage Automatique: query_language/functions/machine_learning_functions.md - - Introspection: query_language/functions/introspection.md - - Autre: query_language/functions/other_functions.md - - "Les Fonctions D'Agr\xE9gation": - - Introduction: query_language/agg_functions/index.md - - "R\xE9f\xE9rence": query_language/agg_functions/reference.md - - "Combinateurs de fonction d'agr\xE9gat": query_language/agg_functions/combinators.md - - "Fonctions d'agr\xE9gat param\xE9triques": query_language/agg_functions/parametric_functions.md - - Les Fonctions De Table: - - Introduction: query_language/table_functions/index.md - - fichier: query_language/table_functions/file.md - - fusionner: query_language/table_functions/merge.md - - nombre: query_language/table_functions/numbers.md - - distant: query_language/table_functions/remote.md - - URL: query_language/table_functions/url.md - - mysql: query_language/table_functions/mysql.md - - jdbc: query_language/table_functions/jdbc.md - - ODBC: query_language/table_functions/odbc.md - - hdfs: query_language/table_functions/hdfs.md - - "entr\xE9e": query_language/table_functions/input.md - - generateRandom: query_language/table_functions/generate.md - - Dictionnaire: - - Introduction: query_language/dicts/index.md - - Dictionnaires Externes: - - "Description G\xE9n\xE9rale": query_language/dicts/external_dicts.md - - Configuration D'un dictionnaire externe: query_language/dicts/external_dicts_dict.md - - "Stockage des dictionnaires en m\xE9moire": query_language/dicts/external_dicts_dict_layout.md - - "Mises \xC0 Jour Du Dictionnaire": query_language/dicts/external_dicts_dict_lifetime.md - - Sources de dictionnaires externes: query_language/dicts/external_dicts_dict_sources.md - - "Cl\xE9 et champs du dictionnaire": query_language/dicts/external_dicts_dict_structure.md - - "Dictionnaires hi\xE9rarchiques": query_language/dicts/external_dicts_dict_hierarchical.md - - Dictionnaires Internes: query_language/dicts/internal_dicts.md - - "Op\xE9rateur": query_language/operators.md - - "Types De Donn\xE9es": - - Introduction: data_types/index.md - - UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64: data_types/int_uint.md - - Float32, Float64: data_types/float.md - - "D\xE9cimal": data_types/decimal.md - - "Bool\xE9en": data_types/boolean.md - - "Cha\xEEne": data_types/string.md - - FixedString (N): data_types/fixedstring.md - - UUID: data_types/uuid.md - - Date: data_types/date.md - - DateTime: data_types/datetime.md - - DateTime64: data_types/datetime64.md - - Enum: data_types/enum.md - - Array(T): data_types/array.md - - AggregateFunction (nom, types_of_arguments...): data_types/nested_data_structures/aggregatefunction.md - - Tuple (T1, T2,...): data_types/tuple.md - - Nullable: data_types/nullable.md - - "Structures De Donn\xE9es Imbriqu\xE9es": - - hidden: data_types/nested_data_structures/index.md - - "Imbriqu\xE9e(Type1 Nom1, Nom2 Type2, ...)": data_types/nested_data_structures/nested.md - - "Types De Donn\xE9es Sp\xE9ciaux": - - hidden: data_types/special_data_types/index.md - - Expression: data_types/special_data_types/expression.md - - "D\xE9finir": data_types/special_data_types/set.md - - Rien: data_types/special_data_types/nothing.md - - Intervalle: data_types/special_data_types/interval.md - - Domaine: - - "Aper\xE7u": data_types/domains/overview.md - - IPv4: data_types/domains/ipv4.md - - IPv6: data_types/domains/ipv6.md -- Guide: - - "Aper\xE7u": guides/index.md - - "Application Des Mod\xE8les CatBoost": guides/apply_catboost_model.md -- "Op\xE9rations": - - Introduction: operations/index.md - - Exigence: operations/requirements.md - - Surveiller: operations/monitoring.md - - "D\xE9pannage": operations/troubleshooting.md - - "Mise \xC0 Jour De ClickHouse": operations/update.md - - "Les Droits D'Acc\xE8s": operations/access_rights.md - - "La Sauvegarde Des Donn\xE9es": operations/backup.md - - Fichiers De Configuration: operations/configuration_files.md - - Quota: operations/quotas.md - - "Les Tables Syst\xE8me": operations/system_tables.md - - Optimisation Des Performances: - - "Profilage De Requ\xEAte": operations/performance/sampling_query_profiler.md - - "Tester Le Mat\xE9riel": operations/performance_test.md - - "Param\xE8tres De Configuration Du Serveur": - - Introduction: operations/server_settings/index.md - - "Les Param\xE8tres Du Serveur": operations/server_settings/settings.md - - "Param\xE8tre": - - Introduction: operations/settings/index.md - - "Autorisations pour les requ\xEAtes": operations/settings/permissions_for_queries.md - - "Restrictions sur la complexit\xE9 des requ\xEAtes": operations/settings/query_complexity.md - - "Param\xE8tre": operations/settings/settings.md - - "Les Param\xE8tres Des Profils": operations/settings/settings_profiles.md - - "Contraintes sur les param\xE8tres": operations/settings/constraints_on_settings.md - - "Les Param\xE8tres De L'Utilisateur": operations/settings/settings_users.md - - Utilitaire: - - "Aper\xE7u": operations/utils/index.md - - clickhouse-copieur: operations/utils/clickhouse-copier.md - - clickhouse-local: operations/utils/clickhouse-local.md - - clickhouse-benchmark: operations/utils/clickhouse-benchmark.md - - Recommandations D'Utilisation: operations/tips.md -- "D\xE9veloppement": - - hidden: development/index.md - - "Le D\xE9butant Clickhouse Developer Instruction": development/developer_instruction.md - - Vue d'ensemble de L'Architecture ClickHouse: development/architecture.md - - Parcourir Le Code Source De ClickHouse: development/browse_code.md - - Comment Construire ClickHouse sur Linux: development/build.md - - Comment Construire ClickHouse sur Mac OS X: development/build_osx.md - - Comment Construire ClickHouse sur Linux pour Mac OS X: development/build_cross_osx.md - - Comment Construire ClickHouse sur Linux pour AARCH64 (ARM64): development/build_cross_arm.md - - "Comment \xE9crire du Code C++ ": development/style.md - - "Comment ex\xE9cuter des Tests ClickHouse": development/tests.md - - "Biblioth\xE8ques Tierces Utilis\xE9es": development/contrib.md -- Commercial: - - Fournisseurs De Services Cloud: commercial/cloud.md -- Ce qui est Nouveau: - - Feuille de route: roadmap.md - - Changelog: - - '2020': changelog/index.md - - '2019': changelog/2019.md - - '2018': changelog/2018.md - - '2017': changelog/2017.md - - "S\xE9curit\xE9 Changelog": security_changelog.md -- F.A.Q.: - - "Questions G\xE9n\xE9rales": faq/general.md - diff --git a/docs/tools/build.py b/docs/tools/build.py index 316a6c3f4dc..cf0bfb23d86 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -340,7 +340,7 @@ def write_redirect_html(out_path, to_url): def build_redirect_html(args, from_path, to_path): - for lang in ['en']: # TODO: restore args.lang.split(','): + for lang in ['en', 'es', 'fr', 'ja', 'fa']: # TODO: args.lang.split(','): out_path = os.path.join(args.docs_output_dir, lang, from_path.replace('.md', '/index.html')) version_prefix = args.version_prefix + '/' if args.version_prefix else '/' target_path = to_path.replace('.md', '/') diff --git a/docs/tools/translate/replace-with-translation.sh b/docs/tools/translate/replace-with-translation.sh index 453dd86ba9e..922ac65a921 100755 --- a/docs/tools/translate/replace-with-translation.sh +++ b/docs/tools/translate/replace-with-translation.sh @@ -11,7 +11,7 @@ if [[ ! -z $SLEEP ]] then sleep $[ ( $RANDOM % 20 ) + 1 ]s fi -git rm -f "${INPUT}" +rm -f "${INPUT}" mkdir -p $(dirname "${INPUT}") || true YANDEX=1 "${BASE_DIR}/translate.sh" "${TARGET_LANGUAGE}" "${TEMP_FILE}" "${INPUT}" git add "${INPUT}" diff --git a/docs/tools/translate/split_meta.py b/docs/tools/translate/split_meta.py index 021fe273790..c76d3391082 100755 --- a/docs/tools/translate/split_meta.py +++ b/docs/tools/translate/split_meta.py @@ -3,6 +3,7 @@ import os import subprocess import sys +import translate import util @@ -19,6 +20,11 @@ if __name__ == '__main__': ).decode('utf-8').strip() meta['machine_translated'] = True meta['machine_translated_rev'] = rev + title = meta.get('toc_title') + if title: + meta['toc_title'] = translate.translate(title, target_language) + if 'en_copy' in meta: + del meta['en_copy'] with open(content_path, 'w') as f: print(content, file=f) From b441e8a40883606ae745b87dc0d93fad36d1fe5a Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 6 Jan 2020 07:25:04 +0300 Subject: [PATCH 099/484] Integration test for Distributed over Distributed (from #8640) --- .../__init__.py | 0 .../configs/remote_servers.xml | 18 ++++ .../configs/set_distributed_defaults.xml | 35 +++++++ .../test_distributed_over_distributed/test.py | 98 +++++++++++++++++++ 4 files changed, 151 insertions(+) create mode 100644 tests/integration/test_distributed_over_distributed/__init__.py create mode 100644 tests/integration/test_distributed_over_distributed/configs/remote_servers.xml create mode 100644 tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml create mode 100644 tests/integration/test_distributed_over_distributed/test.py diff --git a/tests/integration/test_distributed_over_distributed/__init__.py b/tests/integration/test_distributed_over_distributed/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_distributed_over_distributed/configs/remote_servers.xml b/tests/integration/test_distributed_over_distributed/configs/remote_servers.xml new file mode 100644 index 00000000000..ebce4697529 --- /dev/null +++ b/tests/integration/test_distributed_over_distributed/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml b/tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml new file mode 100644 index 00000000000..194eb1ebb87 --- /dev/null +++ b/tests/integration/test_distributed_over_distributed/configs/set_distributed_defaults.xml @@ -0,0 +1,35 @@ + + + + 3 + 1000 + 1 + + + 5 + 3000 + 1 + + + + + + + + ::/0 + + default + default + + + + + ::/0 + + delays + default + + + + + diff --git a/tests/integration/test_distributed_over_distributed/test.py b/tests/integration/test_distributed_over_distributed/test.py new file mode 100644 index 00000000000..31d6de55bea --- /dev/null +++ b/tests/integration/test_distributed_over_distributed/test.py @@ -0,0 +1,98 @@ +# This test is a subset of the 01223_dist_on_dist. +# (just in case, with real separate instances). + +from __future__ import print_function + +import itertools +import timeit +import logging + +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + + +cluster = ClickHouseCluster(__file__) + +NODES = {'node' + str(i): cluster.add_instance( + 'node' + str(i), + main_configs=['configs/remote_servers.xml'], + user_configs=['configs/set_distributed_defaults.xml'], +) for i in (1, 2)} + +CREATE_TABLES_SQL = ''' +CREATE TABLE + base_table( + node String, + key Int32, + value Int32 + ) +ENGINE = Memory; + +CREATE TABLE + distributed_table +AS base_table +ENGINE = Distributed(test_cluster, default, base_table); + +CREATE TABLE + distributed_over_distributed_table +AS distributed_table +ENGINE = Distributed('test_cluster', default, distributed_table); +''' + +INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" + +@pytest.fixture(scope="session") +def started_cluster(): + try: + cluster.start() + for node_index, (node_name, node) in enumerate(NODES.items()): + node.query(CREATE_TABLES_SQL) + for i in range(0, 2): + node.query(INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10))) + yield cluster + + finally: + cluster.shutdown() + + +@pytest.mark.parametrize("node", NODES.values()) +@pytest.mark.parametrize("source", ["distributed_over_distributed_table", "cluster('test_cluster', default, distributed_table)"]) +class TestDistributedOverDistributedSuite: + def test_select_with_order_by_node(self, started_cluster, node, source): + assert node.query("SELECT * FROM {source} ORDER BY node, key".format(source=source)) \ + == """node1 0 0 +node1 0 0 +node1 1 1 +node1 1 1 +node2 0 10 +node2 0 10 +node2 1 11 +node2 1 11 +""" + + def test_select_with_order_by_key(self, started_cluster, node, source): + assert node.query("SELECT * FROM {source} ORDER BY key, node".format(source=source)) \ + == """node1 0 0 +node1 0 0 +node2 0 10 +node2 0 10 +node1 1 1 +node1 1 1 +node2 1 11 +node2 1 11 +""" + + def test_select_with_group_by_node(self, started_cluster, node, source): + assert node.query("SELECT node, SUM(value) FROM {source} GROUP BY node ORDER BY node".format(source=source)) \ + == "node1 2\nnode2 42\n" + + def test_select_with_group_by_key(self, started_cluster, node, source): + assert node.query("SELECT key, SUM(value) FROM {source} GROUP BY key ORDER BY key".format(source=source)) \ + == "0 20\n1 24\n" + + def test_select_sum(self, started_cluster, node, source): + assert node.query("SELECT SUM(value) FROM {source}".format(source=source)) \ + == "44\n" From 1777e2fd6b5b0df5e395eeff589c86261676e8ff Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 1 Apr 2020 21:38:01 +0300 Subject: [PATCH 100/484] Regression for Distributed-over-Distributed when nested table has only one shard --- .../0_stateless/01223_dist_on_dist.reference | 17 +++++++++ .../0_stateless/01223_dist_on_dist.sql | 36 +++++++++++++++++-- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01223_dist_on_dist.reference b/tests/queries/0_stateless/01223_dist_on_dist.reference index 7ca613f70fc..4a5dd8f316c 100644 --- a/tests/queries/0_stateless/01223_dist_on_dist.reference +++ b/tests/queries/0_stateless/01223_dist_on_dist.reference @@ -82,3 +82,20 @@ GROUP BY ORDER BY distributed_aggregation_memory_efficient/group_by_two_level_th 0 1 2 +COUNT +132 +distributed_group_by_no_merge +33 +33 +33 +33 +only one shard in nested +66 +distributed_group_by_no_merge +33 +33 +merge() +66 +distributed_group_by_no_merge +33 +33 diff --git a/tests/queries/0_stateless/01223_dist_on_dist.sql b/tests/queries/0_stateless/01223_dist_on_dist.sql index 1c239d6c666..1b9175f622e 100644 --- a/tests/queries/0_stateless/01223_dist_on_dist.sql +++ b/tests/queries/0_stateless/01223_dist_on_dist.sql @@ -1,6 +1,11 @@ -create table if not exists data_01223 (key Int) Engine=Memory(); -create table if not exists dist_layer_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01223); -create table if not exists dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); +drop table if exists merge_dist_01223; +drop table if exists dist_01223; +drop table if exists dist_layer_01223; +drop table if exists data_01223; + +create table data_01223 (key Int) Engine=Memory(); +create table dist_layer_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); select * from dist_01223; @@ -53,6 +58,31 @@ group_by_two_level_threshold=1, group_by_two_level_threshold_bytes=1, distributed_aggregation_memory_efficient=1; +select 'COUNT'; +select count() from dist_01223; +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +drop table dist_01223; +drop table dist_layer_01223; + +-- only one shard in nested +select 'only one shard in nested'; +create table dist_layer_01223 as data_01223 Engine=Distributed(test_shard_localhost, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); +select count() from dist_01223; + +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +-- wrap with merge() +select 'merge()'; +create table merge_dist_01223 as dist_01223 engine=Merge(currentDatabase(), 'dist_01223'); +select count() from merge_dist_01223; +select 'distributed_group_by_no_merge'; +select count() from merge_dist_01223 settings distributed_group_by_no_merge=1; + +drop table merge_dist_01223; drop table dist_01223; drop table dist_layer_01223; drop table data_01223; From 8d372b0be7ecda063943e6899a078294257fc2b0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 1 Apr 2020 21:38:01 +0300 Subject: [PATCH 101/484] Call getQueryProcessingStage() once, since it is heavy for StorageDistributed Refs: #9808 --- src/Storages/StorageMerge.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f3322c7dfff..dbc85feef05 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -287,7 +287,8 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer return pipes; } - if (processed_stage <= storage->getQueryProcessingStage(*modified_context, query_info.query)) + auto storage_stage = storage->getQueryProcessingStage(*modified_context, query_info.query); + if (processed_stage <= storage_stage) { /// If there are only virtual columns in query, you must request at least one other column. if (real_column_names.empty()) @@ -295,7 +296,7 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer pipes = storage->read(real_column_names, modified_query_info, *modified_context, processed_stage, max_block_size, UInt32(streams_num)); } - else if (processed_stage > storage->getQueryProcessingStage(*modified_context, query_info.query)) + else if (processed_stage > storage_stage) { modified_query_info.query->as()->replaceDatabaseAndTable(source_database, table_name); From 1232760f7836083121a53434cf19665987c16f71 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 1 Apr 2020 21:38:01 +0300 Subject: [PATCH 102/484] Fix Distributed-over-Distributed when nested table has only one shard --- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- src/Storages/IStorage.h | 13 ++++++-- src/Storages/LiveView/StorageBlocks.h | 2 +- src/Storages/StorageBuffer.cpp | 4 +-- src/Storages/StorageBuffer.h | 2 +- src/Storages/StorageDistributed.cpp | 37 +++++++++++---------- src/Storages/StorageDistributed.h | 2 +- src/Storages/StorageMaterializedView.cpp | 4 +-- src/Storages/StorageMaterializedView.h | 2 +- src/Storages/StorageMerge.cpp | 6 ++-- src/Storages/StorageMerge.h | 2 +- 11 files changed, 44 insertions(+), 32 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 514efb90a00..47079a4732c 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -510,7 +510,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl(bool try_move_to_prewhere) } if (storage && !options.only_analyze) - from_stage = storage->getQueryProcessingStage(*context, query_ptr); + from_stage = storage->getQueryProcessingStage(*context, options.to_stage, query_ptr); /// Do I need to perform the first part of the pipeline - running on remote servers during distributed processing. bool first_stage = from_stage < QueryProcessingStage::WithMergeableState diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index d3cede6e5c8..4cdfb3b29a3 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -218,9 +218,18 @@ public: * * SelectQueryInfo is required since the stage can depends on the query * (see Distributed() engine and optimize_skip_unused_shards). + * + * QueryProcessingStage::Enum required for Distributed over Distributed, + * since it cannot return Complete for intermediate queries never. */ - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const { return getQueryProcessingStage(context, {}); } - virtual QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const { return QueryProcessingStage::FetchColumns; } + QueryProcessingStage::Enum getQueryProcessingStage(const Context & context) const + { + return getQueryProcessingStage(context, QueryProcessingStage::Complete, {}); + } + virtual QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const + { + return QueryProcessingStage::FetchColumns; + } /** Watch live changes to the table. * Accepts a list of columns to read, as well as a description of the query, diff --git a/src/Storages/LiveView/StorageBlocks.h b/src/Storages/LiveView/StorageBlocks.h index fd856e27718..a21a9374137 100644 --- a/src/Storages/LiveView/StorageBlocks.h +++ b/src/Storages/LiveView/StorageBlocks.h @@ -26,7 +26,7 @@ public: return std::make_shared(table_id, columns, std::move(pipes), to_stage); } std::string getName() const override { return "Blocks"; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const override { return to_stage; } + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override { return to_stage; } Pipes read( const Names & /*column_names*/, diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 7699f8379d9..9f5403fec07 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -135,7 +135,7 @@ private: }; -QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { if (destination_id) { @@ -144,7 +144,7 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context if (destination.get() == this) throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - return destination->getQueryProcessingStage(context, query_ptr); + return destination->getQueryProcessingStage(context, to_stage, query_ptr); } return QueryProcessingStage::FetchColumns; diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 7a3d907ae76..93f95692b18 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -54,7 +54,7 @@ public: std::string getName() const override { return "Buffer"; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b4375dd5b0a..410aee748f7 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -242,6 +242,24 @@ void replaceConstantExpressions(ASTPtr & node, const Context & context, const Na visitor.visit(node); } +QueryProcessingStage::Enum getQueryProcessingStageImpl(const Context & context, QueryProcessingStage::Enum to_stage, const ClusterPtr & cluster) +{ + const Settings & settings = context.getSettingsRef(); + + size_t num_local_shards = cluster->getLocalShardCount(); + size_t num_remote_shards = cluster->getRemoteShardCount(); + size_t result_size = (num_remote_shards * settings.max_parallel_replicas) + num_local_shards; + + if (settings.distributed_group_by_no_merge) + return QueryProcessingStage::Complete; + /// Nested distributed query cannot return Complete stage, + /// since the parent query need to aggregate the results after. + if (to_stage == QueryProcessingStage::WithMergeableState) + return QueryProcessingStage::WithMergeableState; + return result_size == 1 ? QueryProcessingStage::Complete + : QueryProcessingStage::WithMergeableState; +} + } @@ -360,25 +378,10 @@ StoragePtr StorageDistributed::createWithOwnCluster( } -static QueryProcessingStage::Enum getQueryProcessingStageImpl(const Context & context, const ClusterPtr & cluster) -{ - const Settings & settings = context.getSettingsRef(); - - size_t num_local_shards = cluster->getLocalShardCount(); - size_t num_remote_shards = cluster->getRemoteShardCount(); - size_t result_size = (num_remote_shards * settings.max_parallel_replicas) + num_local_shards; - - if (settings.distributed_group_by_no_merge) - return QueryProcessingStage::Complete; - else /// Normal mode. - return result_size == 1 ? QueryProcessingStage::Complete - : QueryProcessingStage::WithMergeableState; -} - -QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { auto cluster = getOptimizedCluster(context, query_ptr); - return getQueryProcessingStageImpl(context, cluster); + return getQueryProcessingStageImpl(context, to_stage, cluster); } Pipes StorageDistributed::read( diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index e12831709f7..81c6b54a63e 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -67,7 +67,7 @@ public: bool isRemote() const override { return true; } - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 3fb25bf8275..d974367bb93 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -171,9 +171,9 @@ StorageInMemoryMetadata StorageMaterializedView::getInMemoryMetadata() const return result; } -QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { - return getTargetTable()->getQueryProcessingStage(context, query_ptr); + return getTargetTable()->getQueryProcessingStage(context, to_stage, query_ptr); } Pipes StorageMaterializedView::read( diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 6284f791f4f..b4cdabe4af2 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -59,7 +59,7 @@ public: void checkTableCanBeDropped() const override; void checkPartitionCanBeDropped(const ASTPtr & partition) override; - QueryProcessingStage::Enum getQueryProcessingStage(const Context & context, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; StoragePtr getTargetTable() const; StoragePtr tryGetTargetTable() const; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index dbc85feef05..abab85bd2b6 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -136,7 +136,7 @@ bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, cons } -QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & context, const ASTPtr & query_ptr) const +QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const { auto stage_in_source_tables = QueryProcessingStage::FetchColumns; @@ -150,7 +150,7 @@ QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & if (table.get() != this) { ++selected_table_size; - stage_in_source_tables = std::max(stage_in_source_tables, table->getQueryProcessingStage(context, query_ptr)); + stage_in_source_tables = std::max(stage_in_source_tables, table->getQueryProcessingStage(context, to_stage, query_ptr)); } iterator->next(); @@ -287,7 +287,7 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer return pipes; } - auto storage_stage = storage->getQueryProcessingStage(*modified_context, query_info.query); + auto storage_stage = storage->getQueryProcessingStage(*modified_context, QueryProcessingStage::Complete, query_info.query); if (processed_stage <= storage_stage) { /// If there are only virtual columns in query, you must request at least one other column. diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 1d2df3cb9ce..b846663bc54 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -31,7 +31,7 @@ public: NameAndTypePair getColumn(const String & column_name) const override; bool hasColumn(const String & column_name) const override; - QueryProcessingStage::Enum getQueryProcessingStage(const Context &, const ASTPtr &) const override; + QueryProcessingStage::Enum getQueryProcessingStage(const Context &, QueryProcessingStage::Enum /*to_stage*/, const ASTPtr &) const override; Pipes read( const Names & column_names, From fbc11d3bf96665aef17d88ea3d1a29680b472193 Mon Sep 17 00:00:00 2001 From: Avogar Date: Sat, 4 Apr 2020 17:02:14 +0300 Subject: [PATCH 103/484] Move files --- dbms/CMakeLists.txt | 602 ------------------ .../metric_log/202004_1_521_104/checksums.txt | Bin 8834 -> 0 bytes .../metric_log/202004_1_521_104/columns.txt | 213 ------- .../metric_log/202004_1_521_104/count.txt | 1 - .../202004_1_521_104/minmax_event_date.idx | 1 - .../metric_log/202004_1_521_104/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_1_521_104/primary.idx | 1 - .../metric_log/202004_1_526_105/checksums.txt | Bin 8832 -> 0 bytes .../metric_log/202004_1_526_105/columns.txt | 213 ------- .../metric_log/202004_1_526_105/count.txt | 1 - .../202004_1_526_105/minmax_event_date.idx | 1 - .../metric_log/202004_1_526_105/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_1_526_105/primary.idx | 1 - .../metric_log/202004_522_522_0/checksums.txt | Bin 7190 -> 0 bytes .../metric_log/202004_522_522_0/columns.txt | 213 ------- .../metric_log/202004_522_522_0/count.txt | 1 - .../202004_522_522_0/minmax_event_date.idx | 1 - .../metric_log/202004_522_522_0/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_522_522_0/primary.idx | 1 - .../metric_log/202004_523_523_0/checksums.txt | Bin 6974 -> 0 bytes .../metric_log/202004_523_523_0/columns.txt | 213 ------- .../metric_log/202004_523_523_0/count.txt | 1 - .../202004_523_523_0/minmax_event_date.idx | 1 - .../metric_log/202004_523_523_0/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_523_523_0/primary.idx | 1 - .../metric_log/202004_524_524_0/checksums.txt | Bin 6745 -> 0 bytes .../metric_log/202004_524_524_0/columns.txt | 213 ------- .../metric_log/202004_524_524_0/count.txt | 1 - .../202004_524_524_0/minmax_event_date.idx | 1 - .../metric_log/202004_524_524_0/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_524_524_0/primary.idx | 1 - .../metric_log/202004_525_525_0/checksums.txt | Bin 6553 -> 0 bytes .../metric_log/202004_525_525_0/columns.txt | 213 ------- .../metric_log/202004_525_525_0/count.txt | 1 - .../202004_525_525_0/minmax_event_date.idx | 1 - .../metric_log/202004_525_525_0/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_525_525_0/primary.idx | 1 - .../metric_log/202004_526_526_0/checksums.txt | Bin 6663 -> 0 bytes .../metric_log/202004_526_526_0/columns.txt | 213 ------- .../metric_log/202004_526_526_0/count.txt | 1 - .../202004_526_526_0/minmax_event_date.idx | 1 - .../metric_log/202004_526_526_0/partition.dat | Bin 4 -> 0 bytes .../metric_log/202004_526_526_0/primary.idx | 1 - .../202004_5465_5997_129/checksums.txt | Bin 1284 -> 0 bytes .../text_log/202004_5465_5997_129/columns.txt | 14 - .../text_log/202004_5465_5997_129/count.txt | 1 - .../minmax_event_date.idx | 1 - .../202004_5465_5997_129/partition.dat | Bin 4 -> 0 bytes .../text_log/202004_5465_5997_129/primary.idx | 2 - .../text_log/202004_5998_5998_0/checksums.txt | Bin 1183 -> 0 bytes .../text_log/202004_5998_5998_0/columns.txt | 14 - .../text_log/202004_5998_5998_0/count.txt | 1 - .../202004_5998_5998_0/minmax_event_date.idx | 1 - .../text_log/202004_5998_5998_0/partition.dat | Bin 4 -> 0 bytes .../text_log/202004_5998_5998_0/primary.idx | 1 - .../text_log/202004_5999_5999_0/checksums.txt | Bin 1178 -> 0 bytes .../text_log/202004_5999_5999_0/columns.txt | 14 - .../text_log/202004_5999_5999_0/count.txt | 1 - .../202004_5999_5999_0/minmax_event_date.idx | 1 - .../text_log/202004_5999_5999_0/partition.dat | Bin 4 -> 0 bytes .../text_log/202004_5999_5999_0/primary.idx | 1 - .../text_log/202004_6000_6000_0/checksums.txt | Bin 1180 -> 0 bytes .../text_log/202004_6000_6000_0/columns.txt | 14 - .../text_log/202004_6000_6000_0/count.txt | 1 - .../202004_6000_6000_0/minmax_event_date.idx | 1 - .../text_log/202004_6000_6000_0/partition.dat | Bin 4 -> 0 bytes .../text_log/202004_6000_6000_0/primary.idx | 1 - .../text_log/202004_6001_6001_0/checksums.txt | Bin 1179 -> 0 bytes .../text_log/202004_6001_6001_0/columns.txt | 14 - .../text_log/202004_6001_6001_0/count.txt | 1 - .../202004_6001_6001_0/minmax_event_date.idx | 1 - .../text_log/202004_6001_6001_0/partition.dat | Bin 4 -> 0 bytes .../text_log/202004_6001_6001_0/primary.idx | 1 - src/CMakeLists.txt | 2 + .../Formats/Impl/MsgPackRowInputFormat.cpp | 0 .../Formats/Impl/MsgPackRowInputFormat.h | 0 .../Formats/Impl/MsgPackRowOutputFormat.cpp | 0 .../Formats/Impl/MsgPackRowOutputFormat.h | 0 .../01098_msgpack_format.reference | 0 .../0_stateless/01098_msgpack_format.sh | 0 80 files changed, 2 insertions(+), 2200 deletions(-) delete mode 100644 dbms/CMakeLists.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_521_104/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_1_526_105/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_522_522_0/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_523_523_0/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_524_524_0/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_525_525_0/primary.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/columns.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/count.txt delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/partition.dat delete mode 100644 dbms/programs/server/data/system/metric_log/202004_526_526_0/primary.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/checksums.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/columns.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/count.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/partition.dat delete mode 100644 dbms/programs/server/data/system/text_log/202004_5465_5997_129/primary.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/columns.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/count.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/partition.dat delete mode 100644 dbms/programs/server/data/system/text_log/202004_5998_5998_0/primary.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/columns.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/count.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/partition.dat delete mode 100644 dbms/programs/server/data/system/text_log/202004_5999_5999_0/primary.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/columns.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/count.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/partition.dat delete mode 100644 dbms/programs/server/data/system/text_log/202004_6000_6000_0/primary.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/checksums.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/columns.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/count.txt delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/minmax_event_date.idx delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/partition.dat delete mode 100644 dbms/programs/server/data/system/text_log/202004_6001_6001_0/primary.idx rename {dbms/src => src}/Processors/Formats/Impl/MsgPackRowInputFormat.cpp (100%) rename {dbms/src => src}/Processors/Formats/Impl/MsgPackRowInputFormat.h (100%) rename {dbms/src => src}/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp (100%) rename {dbms/src => src}/Processors/Formats/Impl/MsgPackRowOutputFormat.h (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_msgpack_format.reference (100%) rename {dbms/tests => tests}/queries/0_stateless/01098_msgpack_format.sh (100%) diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt deleted file mode 100644 index cf22c741dd2..00000000000 --- a/dbms/CMakeLists.txt +++ /dev/null @@ -1,602 +0,0 @@ -set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.") -include_directories(${ConfigIncludePath}) - -if (USE_INCLUDE_WHAT_YOU_USE) - set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) -endif () - -if (USE_CLANG_TIDY) - set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") -endif () - -if(COMPILER_PIPE) - set(MAX_COMPILER_MEMORY 2500) -else() - set(MAX_COMPILER_MEMORY 1500) -endif() -if(MAKE_STATIC_LIBRARIES) - set(MAX_LINKER_MEMORY 3500) -else() - set(MAX_LINKER_MEMORY 2500) -endif() -include(../cmake/limit_jobs.cmake) - -set (CONFIG_VERSION ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config_version.h) -set (CONFIG_COMMON ${CMAKE_CURRENT_BINARY_DIR}/src/Common/config.h) - -include (cmake/version.cmake) -message (STATUS "Will build ${VERSION_FULL} revision ${VERSION_REVISION} ${VERSION_OFFICIAL}") -configure_file (src/Common/config.h.in ${CONFIG_COMMON}) -configure_file (src/Common/config_version.h.in ${CONFIG_VERSION}) -configure_file (src/Core/config_core.h.in ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include/config_core.h) - -if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") -endif () - -if (USE_DEBUG_HELPERS) - set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/dbms/src/Core/iostream_debug_helpers.h") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}") -endif () - -# Add some warnings that are not available even with -Wall -Wextra -Wpedantic. - -option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON) - -if (COMPILER_CLANG) - add_warning(pedantic) - no_warning(gnu-anonymous-struct) - no_warning(nested-anon-types) - no_warning(vla-extension) - no_warning(zero-length-array) - - add_warning(comma) - add_warning(conditional-uninitialized) - add_warning(covered-switch-default) - add_warning(deprecated) - add_warning(embedded-directive) - add_warning(empty-init-stmt) # linux-only - add_warning(extra-semi-stmt) # linux-only - add_warning(extra-semi) - add_warning(gnu-case-range) - add_warning(inconsistent-missing-destructor-override) - add_warning(newline-eof) - add_warning(old-style-cast) - add_warning(range-loop-analysis) - add_warning(redundant-parens) - add_warning(reserved-id-macro) - add_warning(shadow-field) # clang 8+ - add_warning(shadow-uncaptured-local) - add_warning(shadow) - add_warning(string-plus-int) # clang 8+ - add_warning(undef) - add_warning(unreachable-code-return) - add_warning(unreachable-code) - add_warning(unused-exception-parameter) - add_warning(unused-macros) - add_warning(unused-member-function) - add_warning(zero-as-null-pointer-constant) - - if (WEVERYTHING) - add_warning(everything) - no_warning(c++98-compat-pedantic) - no_warning(c++98-compat) - no_warning(c99-extensions) - no_warning(conversion) - no_warning(ctad-maybe-unsupported) # clang 9+, linux-only - no_warning(deprecated-dynamic-exception-spec) - no_warning(disabled-macro-expansion) - no_warning(documentation-unknown-command) - no_warning(double-promotion) - no_warning(exit-time-destructors) - no_warning(float-equal) - no_warning(global-constructors) - no_warning(gnu-anonymous-struct) - no_warning(missing-prototypes) - no_warning(missing-variable-declarations) - no_warning(nested-anon-types) - no_warning(packed) - no_warning(padded) - no_warning(return-std-move-in-c++11) # clang 7+ - no_warning(shift-sign-overflow) - no_warning(sign-conversion) - no_warning(switch-enum) - no_warning(undefined-func-template) - no_warning(unused-template) - no_warning(vla-extension) - no_warning(vla) - no_warning(weak-template-vtables) - no_warning(weak-vtables) - no_warning(zero-length-array) - - # TODO Enable conversion, sign-conversion, double-promotion warnings. - endif () -elseif (COMPILER_GCC) - # Add compiler options only to c++ compiler - function(add_cxx_compile_options option) - add_compile_options("$<$,CXX>:${option}>") - endfunction() - # Warn about boolean expression compared with an integer value different from true/false - add_cxx_compile_options(-Wbool-compare) - # Warn whenever a pointer is cast such that the required alignment of the target is increased. - add_cxx_compile_options(-Wcast-align) - # Warn whenever a pointer is cast so as to remove a type qualifier from the target type. - add_cxx_compile_options(-Wcast-qual) - # Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime - add_cxx_compile_options(-Wdelete-incomplete) - # Warn if a requested optimization pass is disabled. Code is too big or too complex - add_cxx_compile_options(-Wdisabled-optimization) - # Warn about duplicated conditions in an if-else-if chain - add_cxx_compile_options(-Wduplicated-cond) - # Warn about a comparison between values of different enumerated types - add_cxx_compile_options(-Wenum-compare) - # Warn about uninitialized variables that are initialized with themselves - add_cxx_compile_options(-Winit-self) - # Warn about logical not used on the left hand side operand of a comparison - add_cxx_compile_options(-Wlogical-not-parentheses) - # Warn about suspicious uses of logical operators in expressions - add_cxx_compile_options(-Wlogical-op) - # Warn if there exists a path from the function entry to a use of the variable that is uninitialized. - add_cxx_compile_options(-Wmaybe-uninitialized) - # Warn when the indentation of the code does not reflect the block structure - add_cxx_compile_options(-Wmisleading-indentation) - # Warn if a global function is defined without a previous declaration - disabled because of build times - # add_cxx_compile_options(-Wmissing-declarations) - # Warn if a user-supplied include directory does not exist - add_cxx_compile_options(-Wmissing-include-dirs) - # Obvious - add_cxx_compile_options(-Wnon-virtual-dtor) - # Obvious - add_cxx_compile_options(-Wno-return-local-addr) - # This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037 - #add_cxx_compile_options(-Wnull-dereference) - # Obvious - add_cxx_compile_options(-Wodr) - # Obvious - add_cxx_compile_options(-Wold-style-cast) - # Warn when a function declaration hides virtual functions from a base class - # add_cxx_compile_options(-Woverloaded-virtual) - # Warn about placement new expressions with undefined behavior - add_cxx_compile_options(-Wplacement-new=2) - # Warn about anything that depends on the “size of” a function type or of void - add_cxx_compile_options(-Wpointer-arith) - # Warn if anything is declared more than once in the same scope - add_cxx_compile_options(-Wredundant-decls) - # Member initialization reordering - add_cxx_compile_options(-Wreorder) - # Obvious - add_cxx_compile_options(-Wshadow) - # Warn if left shifting a negative value - add_cxx_compile_options(-Wshift-negative-value) - # Warn about a definition of an unsized deallocation function - add_cxx_compile_options(-Wsized-deallocation) - # Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition - add_cxx_compile_options(-Wsizeof-array-argument) - # Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof - add_cxx_compile_options(-Wsizeof-pointer-memaccess) - - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) - # Warn about overriding virtual functions that are not marked with the override keyword - add_cxx_compile_options(-Wsuggest-override) - endif () - - # Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type - add_cxx_compile_options(-Wswitch-bool) - # Warn if a self-comparison always evaluates to true or false - add_cxx_compile_options(-Wtautological-compare) - # Warn about trampolines generated for pointers to nested functions - add_cxx_compile_options(-Wtrampolines) - # Obvious - add_cxx_compile_options(-Wunused) - # Warn if vector operation is not implemented via SIMD capabilities of the architecture - add_cxx_compile_options(-Wvector-operation-performance) -endif () - -if (COMPILER_GCC) - # If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy. - # It leads to slow code. This is compiler bug. It looks like this: - # - # (gdb) bt - #0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11 - #1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720 - #2 memcpySmallAllowReadWriteOverflow15Impl (n=, src=, dst=) at ../dbms/src/Common/memcpySmall.h:37 - - add_definitions ("-fno-tree-loop-distribute-patterns") -endif () - -add_subdirectory (src) - -set(dbms_headers) -set(dbms_sources) - -add_headers_and_sources(clickhouse_common_io src/Common) -add_headers_and_sources(clickhouse_common_io src/Common/HashTable) -add_headers_and_sources(clickhouse_common_io src/IO) -list (REMOVE_ITEM clickhouse_common_io_sources src/Common/malloc.cpp src/Common/new_delete.cpp) - -if(USE_RDKAFKA) - add_headers_and_sources(dbms src/Storages/Kafka) -endif() - - -list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) -list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) - -list (APPEND dbms_sources src/Functions/IFunction.cpp src/Functions/FunctionFactory.cpp src/Functions/FunctionHelpers.cpp src/Functions/extractTimeZoneFromFunctionArguments.cpp) -list (APPEND dbms_headers src/Functions/IFunctionImpl.h src/Functions/FunctionFactory.h src/Functions/FunctionHelpers.h src/Functions/extractTimeZoneFromFunctionArguments.h) - -list (APPEND dbms_sources - src/AggregateFunctions/AggregateFunctionFactory.cpp - src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp - src/AggregateFunctions/AggregateFunctionState.cpp - src/AggregateFunctions/parseAggregateFunctionParameters.cpp) - -list (APPEND dbms_headers - src/AggregateFunctions/IAggregateFunction.h - src/AggregateFunctions/IAggregateFunctionCombinator.h - src/AggregateFunctions/AggregateFunctionFactory.h - src/AggregateFunctions/AggregateFunctionCombinatorFactory.h - src/AggregateFunctions/AggregateFunctionState.h - src/AggregateFunctions/FactoryHelpers.h - src/AggregateFunctions/parseAggregateFunctionParameters.h) - -list (APPEND dbms_sources src/TableFunctions/ITableFunction.cpp src/TableFunctions/TableFunctionFactory.cpp) -list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions/TableFunctionFactory.h) -list (APPEND dbms_sources src/Dictionaries/DictionaryFactory.cpp src/Dictionaries/DictionarySourceFactory.cpp src/Dictionaries/DictionaryStructure.cpp src/Dictionaries/getDictionaryConfigurationFromAST.cpp) -list (APPEND dbms_headers src/Dictionaries/DictionaryFactory.h src/Dictionaries/DictionarySourceFactory.h src/Dictionaries/DictionaryStructure.h src/Dictionaries/getDictionaryConfigurationFromAST.h) - -if (NOT ENABLE_SSL) - list (REMOVE_ITEM clickhouse_common_io_sources src/Common/OpenSSLHelpers.cpp) - list (REMOVE_ITEM clickhouse_common_io_headers src/Common/OpenSSLHelpers.h) -endif () - -add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) - -add_library (clickhouse_malloc OBJECT src/Common/malloc.cpp) -set_source_files_properties(src/Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") - -add_library (clickhouse_new_delete STATIC src/Common/new_delete.cpp) -target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io) - -if (OS_FREEBSD) - target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) -endif () - -add_subdirectory(src/Common/ZooKeeper) -add_subdirectory(src/Common/Config) - -set (all_modules) -macro(add_object_library name common_path) - if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_headers_and_sources(dbms ${common_path}) - else () - list (APPEND all_modules ${name}) - add_headers_and_sources(${name} ${common_path}) - add_library(${name} SHARED ${${name}_sources} ${${name}_headers}) - target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all) - endif () -endmacro() - -add_object_library(clickhouse_access src/Access) -add_object_library(clickhouse_core src/Core) -add_object_library(clickhouse_compression src/Compression) -add_object_library(clickhouse_datastreams src/DataStreams) -add_object_library(clickhouse_datatypes src/DataTypes) -add_object_library(clickhouse_databases src/Databases) -add_object_library(clickhouse_disks src/Disks) -add_object_library(clickhouse_interpreters src/Interpreters) -add_object_library(clickhouse_interpreters_clusterproxy src/Interpreters/ClusterProxy) -add_object_library(clickhouse_columns src/Columns) -add_object_library(clickhouse_storages src/Storages) -add_object_library(clickhouse_storages_distributed src/Storages/Distributed) -add_object_library(clickhouse_storages_mergetree src/Storages/MergeTree) -add_object_library(clickhouse_storages_liveview src/Storages/LiveView) -add_object_library(clickhouse_client src/Client) -add_object_library(clickhouse_formats src/Formats) -add_object_library(clickhouse_processors src/Processors) -add_object_library(clickhouse_processors_executors src/Processors/Executors) -add_object_library(clickhouse_processors_formats src/Processors/Formats) -add_object_library(clickhouse_processors_formats_impl src/Processors/Formats/Impl) -add_object_library(clickhouse_processors_transforms src/Processors/Transforms) -add_object_library(clickhouse_processors_sources src/Processors/Sources) - - -if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) - add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) - set (all_modules dbms) -else() - add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PUBLIC ${all_modules}) - list (APPEND all_modules dbms) - # force all split libs to be linked - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed") -endif () - -macro (dbms_target_include_directories) - foreach (module ${all_modules}) - target_include_directories (${module} ${ARGN}) - endforeach () -endmacro () - -macro (dbms_target_link_libraries) - foreach (module ${all_modules}) - target_link_libraries (${module} ${ARGN}) - endforeach () -endmacro () - -if (USE_EMBEDDED_COMPILER) - dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES}) - dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS}) -endif () - -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") - # Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size. - set_source_files_properties( - src/Dictionaries/FlatDictionary.cpp - src/Dictionaries/HashedDictionary.cpp - src/Dictionaries/CacheDictionary.cpp - src/Dictionaries/TrieDictionary.cpp - src/Dictionaries/RangeHashedDictionary.cpp - src/Dictionaries/ComplexKeyHashedDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp - src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp - src/Dictionaries/ODBCBlockInputStream.cpp - src/Dictionaries/HTTPDictionarySource.cpp - src/Dictionaries/LibraryDictionarySource.cpp - src/Dictionaries/ExecutableDictionarySource.cpp - src/Dictionaries/ClickHouseDictionarySource.cpp - PROPERTIES COMPILE_FLAGS -g0) -endif () - -# Otherwise it will slow down stack traces printing too much. -set_source_files_properties( - src/Common/Elf.cpp - src/Common/Dwarf.cpp - src/Common/SymbolIndex.cpp - PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}") - -target_link_libraries (clickhouse_common_io - PUBLIC - common - PRIVATE - string_utils - widechar_width - ${LINK_LIBRARIES_ONLY_ON_X86_64} - PUBLIC - ${DOUBLE_CONVERSION_LIBRARIES} - ryu - PUBLIC - ${Poco_Net_LIBRARY} - ${Poco_Util_LIBRARY} - ${Poco_Foundation_LIBRARY} - ${Poco_XML_LIBRARY} -) - -if(RE2_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_LIBRARY}) -endif() -if(RE2_ST_LIBRARY) - target_link_libraries(clickhouse_common_io PUBLIC ${RE2_ST_LIBRARY}) -endif() - -target_link_libraries(clickhouse_common_io - PUBLIC - ${CITYHASH_LIBRARIES} - PRIVATE - ${Poco_XML_LIBRARY} - ${ZLIB_LIBRARIES} - ${EXECINFO_LIBRARIES} - PUBLIC - ${Boost_SYSTEM_LIBRARY} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - PUBLIC - roaring -) - -if (USE_RDKAFKA) - dbms_target_link_libraries(PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY}) - if(NOT USE_INTERNAL_RDKAFKA_LIBRARY) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR}) - endif() -endif() - - -if(RE2_INCLUDE_DIR) - target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR}) -endif() - -if(CPUID_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUID_LIBRARY}) -endif() - -if(CPUINFO_LIBRARY) - target_link_libraries(clickhouse_common_io PRIVATE ${CPUINFO_LIBRARY}) -endif() - -dbms_target_link_libraries ( - PRIVATE - clickhouse_parsers - clickhouse_common_config - clickhouse_common_zookeeper - string_utils # FIXME: not sure if it's private - PUBLIC - clickhouse_common_io - PRIVATE - clickhouse_dictionaries_embedded - ${LZ4_LIBRARY} - PUBLIC - ${MYSQLXX_LIBRARY} - PRIVATE - ${BTRIE_LIBRARIES} - ${Boost_PROGRAM_OPTIONS_LIBRARY} - ${Boost_FILESYSTEM_LIBRARY} - PUBLIC - ${Boost_SYSTEM_LIBRARY} -) - -target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) # uses some includes from core -dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) - -target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) -dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR}) - -dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR}) - -if (NOT USE_INTERNAL_LZ4_LIBRARY AND LZ4_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR}) -endif () - -if (ZSTD_LIBRARY) - dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY}) - if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR) - dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR}) - endif () -endif() - -if (NOT USE_INTERNAL_BOOST_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS}) -endif () - -if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR}) -endif() - -if (USE_POCO_SQLODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQL_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR}) - endif() -endif() - -if (Poco_Data_FOUND) - target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) - dbms_target_include_directories (SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR}) -endif() - -if (USE_POCO_DATAODBC) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_DataODBC_LIBRARY}) - if (NOT USE_INTERNAL_POCO_LIBRARY) - dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR}) - endif() -endif() - -if (USE_POCO_MONGODB) - dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY}) -endif() - -if (USE_POCO_REDIS) - dbms_target_link_libraries (PRIVATE ${Poco_Redis_LIBRARY}) -endif() - -if (USE_POCO_NETSSL) - target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) - dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) -endif() - -if (USE_POCO_JSON) - dbms_target_link_libraries (PRIVATE ${Poco_JSON_LIBRARY}) -endif() - -dbms_target_link_libraries (PRIVATE ${Poco_Foundation_LIBRARY}) - -if (USE_ICU) - dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES}) - dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS}) -endif () - -if (USE_CAPNP) - dbms_target_link_libraries (PRIVATE ${CAPNP_LIBRARIES}) -endif () - -if (USE_PARQUET) - dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY}) - if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR}) - endif () -endif () - -if (USE_AVRO) - dbms_target_link_libraries(PRIVATE ${AVROCPP_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${AVROCPP_INCLUDE_DIR}) -endif () - -if (OPENSSL_CRYPTO_LIBRARY) - dbms_target_link_libraries (PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) - target_link_libraries (clickhouse_common_io PRIVATE ${OPENSSL_CRYPTO_LIBRARY}) -endif () - -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR}) -dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) - -if (USE_PROTOBUF) - dbms_target_link_libraries (PRIVATE ${Protobuf_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR}) -endif () - -if (USE_HDFS) - target_link_libraries (clickhouse_common_io PUBLIC ${HDFS3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${HDFS3_INCLUDE_DIR}) -endif() - -if (USE_AWS_S3) - target_link_libraries (clickhouse_common_io PUBLIC ${AWS_S3_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_CORE_INCLUDE_DIR}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${AWS_S3_INCLUDE_DIR}) -endif() - -if (USE_BROTLI) - target_link_libraries (clickhouse_common_io PRIVATE ${BROTLI_LIBRARY}) - target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BROTLI_INCLUDE_DIR}) -endif() - -if (USE_JEMALLOC) - dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # used in Interpreters/AsynchronousMetrics.cpp - target_include_directories (clickhouse_new_delete SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) - - if(NOT MAKE_STATIC_LIBRARIES AND ${JEMALLOC_LIBRARIES} MATCHES "${CMAKE_STATIC_LIBRARY_SUFFIX}$") - # mallctl in dbms/src/Interpreters/AsynchronousMetrics.cpp - # Actually we link JEMALLOC to almost all libraries. - # This is just hotfix for some uninvestigated problem. - target_link_libraries(clickhouse_interpreters PRIVATE ${JEMALLOC_LIBRARIES}) - endif() -endif () - -dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR}) -target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) - -target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) - -target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) - -add_subdirectory (programs) -add_subdirectory (tests) - -if (ENABLE_TESTS AND USE_GTEST) - macro (grep_gtest_sources BASE_DIR DST_VAR) - # Cold match files that are not in tests/ directories - file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp") - endmacro() - - # attach all dbms gtest sources - grep_gtest_sources(${ClickHouse_SOURCE_DIR}/dbms dbms_gtest_sources) - add_executable(unit_tests_dbms ${dbms_gtest_sources}) - - # gtest framework has substandard code - target_compile_options(unit_tests_dbms PRIVATE - -Wno-zero-as-null-pointer-constant - -Wno-undef - -Wno-sign-compare - -Wno-used-but-marked-unused - -Wno-missing-noreturn - -Wno-gnu-zero-variadic-macro-arguments - ) - - target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_parsers dbms clickhouse_common_zookeeper string_utils) - add_check(unit_tests_dbms) -endif () diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_1_521_104/checksums.txt deleted file mode 100644 index a0fdbe718cb259905a03dfc4cce35070c34c8b7c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8834 zcmYj%cR&+a_x8CnlQs!lI)n})7Nmn9Dn+RRf)vGy5k`oH1X3tAzy{cLZCF-UL0t=$ zwXS6?yz1)OSJ#dmMOQ&ZT~PVmQ2f5XGBe4Udry1LbIwghHkUD3qs!4aPE@ILWLn33 zPOXuvltGSu?5O;6hb|mH+mzNcsji=BV#Cbg&Hz3tfVRQg>7IjiYBi_S#&cS=JR^OO zEMsz(TBTEF#;fwVM3qXBBGXLvm>^efMD;J%SIr2LSp<&My+xJdqR^KwsNXd4!fQuY z)>{nmyEo3q>v*pOFliI!s3-drPKzzy)r%VR^3u+a#|oNu90HdCz!R21BfXP9_ruVe zF+uA;$6OQ4Tzk1^bs4l#ktV|q#x*Y;ySL}^bf2Q_;oW1w5YGbIp-&+2ijp(3xlEnH z1U1szQ2t1x)Jye^v!3+WQvJ_^^M3P*c#1Bd%D#F)n(L*gi&1{(8{Ct@0{;ZGwQnf2 znGBW@`EYrLme@q5E=tYKlxewPI!?z8l4;3Tnw`kf!EY4U;a|v#UQu8k63Z3+3|0=_ zBfuK31=_|b9NwvvTCPwVr^=W-LMGStCNj>^AjZ?lGLHUG7tSeUMOFj4W`I<@oQ5Kb9Rj!%Xj`{ZXcTHDC+EsCxFn9eAfL;O zRH@@Rbrv^^XuVnZth8Jj*|S-Z&|tC0{O0`$QFJSsvbWrP{1H@GF+@>(Ir>8N8X6~o z051jFuJ=-S55%bxbXxKzxq{QAs8%CbLL9IStxjjO{XC;R_Z;qiP5PHKe+hg?Iy0w- z%Vxd|tO+naRJVV);}gKcfX0h~mfS@AUGmP&|hgM|vTa?`P&)!!{f`^<)69pHh@o_AGK7z#VXlZmxO5$LZQpug;;djVV z96)t?#=3_N4T~XuRnMUv)HA_J>?y=UiQW}NkU@*%I9VpAj!-bDPp5>~kD~mbIEl+u z$TMU!kR?6^6w|4#-EPtu()=6+v4CVnc>Z)fg-hZ=7F z4d>*K)_3^wh#=aqBXq>Q(4%-RN2M-GQIqtQE3J*}p^}5~j zJpQns*k1-NcsG#?917;bS}`6^GOk}bIO828V=UuAY_t7^O?oAIg z&ki838h3a1pLLu1`jp@N{{Ek3G25(y0bw}_<^FU+QSz|3cRVNYcwt8nn}jCha!O7# znB-eh1JQmhzt6oz-d6_y?*50>+|qK5;!Y#czW=CSX8!X0adC6oe*Xuf{}k!FhW&%;_DE8#neNy4~8i8b}wuAt6|Zr5u0#X)v`+~zkZG631$El z1Gt!YeiyGKQKr_$>$I{O1JIBNxQy<;3Ro=(_-()h`{LLFtA>I42zaS9E5D`DuELXy zW~?-f^4ZPrN|2FMA^|gXQ7$p2T62K+OiBre%8|ky6-brPa0eAl0xUToJi((IZyDZc~^<6fv%+l$n%5eW% zNb&B7%Jn3TW_sjRP5y-$Lw|R5I zTxPkjN=(3_Q9Z$O_A=r95TkMVKCMgmD_Ba5#PNX3z=1ghxW>eST#A_XC$x>lUx>cL zG}3!fhLyQV*jQ_;Jr&FTyd82o?K?4!{SfQ;@NcV}-GZ9ZZq6CouZD(TgtLGV_oSgW zgsYT_A{~ogfrZ$D*XeWve?VU1#ROXi>uYZoLHk&qL5D{Tz5-JWiIE&BEe{#|o-)Pf zfKPQGruZeppAL83SR;d z>VzA)&@E-~N~1=45glN}I##yL*KN2s&uanMMlRKgYHm&%*0lEB(+ID+B`+F2ed|Ma zRsyjvcn#;&PbnB`Uf{IR?|yXn4*QXbRC zy-uT=)+e3rg5K#oRL=u73G-@nD@!!|dh|A@5T3=g&{bbd)X+j*SzqevO-EKF3ZCX$L8kx!UYdKlqPvM_~0m2rz`^cmgwAWlA^J{Y$6Rk>w} znS1GL0!N}}9~!=7AJ}-}QukLMjPqHiC}8!x0Q&)N(BN#H(uY9DCv@80{mAtY@!0Vm z`@cn&KRz;z5F5|tyJMa-Q(x03w>`P{YL?P@8iBozP?Glchbx0+N~KDhL<&GY_tf0Q z!EG3r>5I05S1^zerJYh#0-z@IRN-<>hD_aG^8YxEwL>^8QRUB|a}vCcvUG5a z1d+aQJM{DgG$^7lSG|HZ*KDTnTH4e>&+qVj@}=hhko*xZ&rqv0T!u=SS;4mwHzR5k z5MT%7(Vdn%C!7lpEEo?>;?lLYX}$W;RTZj2O^;H zO^P?uR$>L!#WftdR{-KmDX*}Y1Rlawl&OO&0Yp6_ivK{=Nf6C}gh-iOvB*R!{zb1y zJI%uvK#~w2FUz$gJmd|c_zwYfU>-gT5(ok;MRpGERY)vI20b4ZAEhNDQD}lXQ~p0^ z0>O>+QSs=Ycam*;mhQM=sNIs;FK$f`-G=^)OuW9lQZQ7~f92Gcu;aR~NW$yTAODId z!2%&ZNm=VJJ3yD#U$SJmUy%<^21>saP9r}I1>Q*1$R{1dF~^kDy%*6@^Kl2R=(byx z*CuU8ZD`a{{;cEI&%Hh2>bmx8<*YN$!GM?4?jxym3Y8Z=G&w@8R#_Y3ag3>hbtrjj z4$y2eJ2PCSm4!{za+{nT!{x?C$(l&DDu-tRH59R#skIL&ENNhX*OOrNiy$d8+zM}m zUJm}F!5(iWuLux{H@V;{GYj^MNZi5R*h3WS3+eH4g+hLkwI{d4%tC#>h$wA0ODlyD*9^#^Q$Y}u6pya;Hj(`$WcVo}O7UjAMp#m~VQ zb=b(kMNZK$*#=sK4nrdtD_lx5f)4z~fgnTz0iF#~Ow$a^hOPp8fMV#LR7i?Er;PMA z&1WV1Tk!4 zn3iCzj;HTp20x&mdyIIo)3|ha#KQWsf4QL+wCi45f=7n)rlWHOv(#bzem!H1hl50q zM+P2OTQu7Yo9?SfaHo=Xx-D_;F;TJx#k zg@=9<0{te$i$pjK5L7YVgqeJ?25*GxMW(jC=aJzW5RC-90bJ9QwK4@am$fH1Mjg)x zdwotoKDkJlG0@$M^q*DC29-`5QJBGTnG*!kAB2|uZsHQI3$&-UfGd}I(gZ&upn2Ai zP;9=QVzV<4Gs;dDmX)REvMgO-jTvQRiocoLvQh~i1xEVLl_lGa0^n_N#U9A%hvYH| z9thx{s?0!U*bCW8`({vh3_mC3@H>gd^p%0hclM?2PvdF>FxfTfC+h4YU_T0leE}OFM^-4p@gyh9$d3bzAipOZ?g3>4R8BgN`dYi>_wLv0ce$4@ zw7Iw`jN`YUugYA%3s*+1OvG55GjV_|q-v7faz_&=(S^{XT~B z0>NiOX@H^b~tZ-k2bG9CRi z-U`Tx%Zzevhvm&KInO62uHHIBb^OIfC_efkFQ1gZWe(%27o2(AFmGU)>Ci=gKRyaH z>S$Ei6^6gXDDzbcol;9iDHEh2$p!MdKwCCa@&hf+%_v_Vrp6~UEF({M$(XVLZ%wxR zv86ui5~uhyJH*8~8AnY?6dGAgv_}6?rw*nDOq53)a(Q!7eW3CDGoPD?8l=Z>p&q~4 zbQBgZQ%_zQ;2bS)BOL1-IvLizX~+K@r<}39>>c;M_xX9mS{DoFJ@$nDOL|U^hvHzv zmDUv7p)VUi^LzjvvXoc%M!F7tzT7g$;ke-Q!C=#q;%_g<77^hSqt0wdGjxjCo__Ar z=@H^5@0rdBlUf8I-rt0le-|rbW?fzV`7iSkXIGv9+WvSRDfOqd7nO0dCvG>3_c^`C z=t#LGNi@VYIiE_XZjTk_^p0Wo7kcP;4lG*wGy%-zzP3Ks-JHxDTDPaNv{2U=n7Fa z&!BF?J{$FvegC}uTuqi2;a6GJ6?xmk&ZmouGxvDcvp5WaWE8X`ilMO{QP4W3iAQR9 zQ@8VI>0yX)Diq#MI)8tL3zjGfs6$RWn-VScmOfT zbC$&TEmzN=XS2@Cm~c>AnH+T~%=?=M!RzI(ZQjmeOipY%q@9e5<^>RmPGqvTxrTT& zv3#B3y7^b$rilt>!Ly%?PW0XRcs|*-^OwV)s|rpFQp{~i3M&r&=`BF>K_4}F1do0} z;E*XoWYzP=pRPzRf3z6zyig@By4Let;0zMp9v|l)uu$d+Cl8_i`}xB{Kk|FB=XRRO zmayn|Fw)#Llq6d<(hYc~?Xmr`Z)4E3<${|kzVO{VBEP&UwYcc~zRYntL(#=$w8>pstUDW~ZZ~*^8L2 zd3x&EnN>?3`n7RYI~Hd>DlzFQb?hdC(+SCg6H|xF)moiQaSpMA%`l9XMp^IXFAh z!Ni%GNM3tf_OQqC=WoB}jDXP|fHmn;>+FGO^>aoQP69|qm${B#%Fr4g; zg_JFM_(Q!gG4FT7CTaoCt?o^0)6S3YTY9Whya)=eGHOAaRHOTT$#pi_A>yfh4nFN4 zCZ+~-MT<%LZ3*!;>fZ8)I?m)#*pt=X!98TX z1TE-Ua^$u6p~vk@p4E?N7tUdmK$CTb! zKyqMOe)!Ob<1DMjT@r^>ogd5u8n6pMkO=L3;XXx_B3+9QISN?EXD>LlSXS}6PqFRx zaZtZGuTP8k&&^Pdeu&>xj{CGv&42*4lmRg*X+&cokTpzkqzu*eTYOd?NCg zr@v-%T4 z>4k#rdtUsreJ9m|cAnrxMV@U@oSpHjcYsPT?4=k60QFd3Ihgj!(Q1GrNJHUI-cVxl zCh*1~)ve|*WAW|6ef#i&O~iuPDoDFN<-gvV9=-dWs*Zh7VT8w$u5&9$Co3>Nj)o9q*@23Z6afP`T-QWq4-;17|mu!4d+iU8y@9C$`UEWbRgUA~n zts7YX^hU_pCLL3oDwG9HN`b#Xm?CRg~qXqftT75>gOmCT0~-#%)BNc z5#2eH6fzV&95K5hQF*&fo=w_Fr=E$6Dr>RKd$Mr4?<1xaDOlMUp9tBSH*rP0=uhtT zu@aF5;&;q0q~=>uNHWJw9&W{6GeiRbb0mtq#g56GW+=}&s>O0EWusps)C9L{O|wQt&3kkkZPh<>(oeB2>Llpm}{Bpcp~HAl7^TTo8oa~nCtSo z*P6Spj2|qwaY_z3!ZI(Qb7aVl!6Mf|$ig=_K^dvgX|k6I@n^E=d43l|VkJTm8Dy7} zf+G?BVC5#Zgq1h(6J;6=mr2^ic)3!4_@v2JDKfuO*qSs}Yi;moFvesOKw5S~RBYfM zFDtADd;(drFAZ=NDd=NB-yq>=WC*5q*eMmTeM=!V8E8JYeRu_ka4-pP=U|Nu}aOvxL*K)rYq|YzY^fmc;ouI3- zNV{KRP;DPcylXqvI*9sz+hab}&9-6efIix%xv%_PTF``R9&?ZRX8BDUeR2MjcaN5h z5q4n%JDM3!bW~}zYV!mInQ}H*cOxm$l`iZB5Dv2!`E$}DH00$y`Tk|@ ztN2L|9A*z92a|c`#~lZ1ZeO)ouzUXr?%?evL^i4)`jZlX9EO*;da#ECKN?gU0dlgS zOwGJW#V`cy#kXX~A~X=h9it4^2$5+9plm*Iw59&8t0&&;C>RKa-OQ04z$y~VXPxpN zxU`O*Dfh4bxch&VXr_%9DHAmp{vJLrc9H?0FD8q7ZZ+cz$h#>AMA~LvJIO6FBP18| z1`>6y_&HmemHU4PUmWu6=WTzd&W+eYFtu{J@r}g$K1L_|PU*fw8GYE%8>fT0{uqj= zWk-F+kc9Ah##9x|GG3Rr+0QK79|$HWhfc6! z@9WD=M@LyA;gOE=y9r)TBC$l$(WTmWD^!|fAtpv5iN-@6`G$ST<#Y@D0W76XODO&a zpxWDF>yU^-IrOt86K00)Y?Kv7yxrg##bpqlQexCW&1lwK$nyp;^9n7Hskv+ZM!6+1 zIL^R|Jn!-~Iiz4bW0bYQQtVGNs2u~WJ>%rYA}KkW?R-FEA7IksbqcNgHD!r?Xol*X zsMQO2Taj0SHhUX|KO+aW{)f}?Nnoh6L$QDiE(_?ced;eAk_%`r=%k9fSe`p7CMTOp zjVvRNw#9A&M$|b+vlK8;A;vW2gp(;o+yS*XgkmV6Gd|qQiCt#M%RmO5W6Ko87>zjR zLJr_Hxhf?ikm79AHTr78q73q{2d#&;P|Mu<=i9X^%|hYWS^xS~PFV6g7;+6srkNIk z&lb_&|87WdZ#$oI{mkm#)7N~BMn4CS_Zf9G^VbBcPwra=+0ZW>h*wY^d_-P{mc1TW zlrm)Fxu(XAM=y^^1#hi9=egj6#fs^lz6(a(?tb|7{o6Iqit@tWqhr&8-cG9VFTJyq zTk+fFgofg z4$J#c@xNln|EN@$7BV+Vz58z3x<%y^pQam=SQ-rmA<8oI9Jw+_R+xSl8YS|~Lgv<$ z0p96jkCtwoIoo`X>!tBvkV_6Bp%M}0s diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/columns.txt b/dbms/programs/server/data/system/metric_log/202004_1_521_104/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_521_104/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/count.txt b/dbms/programs/server/data/system/metric_log/202004_1_521_104/count.txt deleted file mode 100644 index 7e72ea4040f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_521_104/count.txt +++ /dev/null @@ -1 +0,0 @@ -3983 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_1_521_104/minmax_event_date.idx deleted file mode 100644 index 73ef9660d53..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_521_104/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/partition.dat b/dbms/programs/server/data/system/metric_log/202004_1_521_104/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_1_521_104/primary.idx b/dbms/programs/server/data/system/metric_log/202004_1_521_104/primary.idx deleted file mode 100644 index 8589c74d308..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_521_104/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^G^G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_1_526_105/checksums.txt deleted file mode 100644 index b3c0729db40f97eff00486477257162d0fb4c979..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8832 zcmYLPcR*9g*PVGK<)sW#Lk&$tQ96RC6s0N{B-pTE2oH#a1d@OvB8a+(U01>07P0qL zvFnOI*Ist*uHD790(L>=n-K8(EAPEz-prkQ?>Xn*i87m2=I9G@_0E}EU9Q65T*&J5 zDy=5eIgokrD(U4D?HAA9zpD1_IOMD>PjLmnUkgCnz^$a`zyh6))ff_4gHEMP8=z3; zWa+d8nv6tkA)BPts#6sD9Pf0MW&<3uu7BSZw)2L)a&^|1*S@xb>ssMRo4@bH>uW>y zSYOwq+dpKw%Ybjlt;Pc#U;0Ij}~*0@5xZ0<@VA zRN(PZDy0GMi9%O2EH6W0V22j41?&KY0e_|64sE1?slXAf$20oI0;}-BY|$DZk@}4Q zc4#Fa?OmckqgG>JCm9m7%A64jm7xb7;~ED9s02^r90VRlv1&yTn-Q~P{P6T4gf^MYFJSdkfvsQ(35p125J1xa>EJaDH1YH~@;rr-O=j^63fYVp ztuB$(WwFz-)%$pvPhxMl-TaliBx-Zl5%YI`iX~g2@S0|(MZuVZk?EFn{j&eQtsqe{ z;G#u-P-CY74ir)ZZ!un;?73zQ|d9ax#*^+VPe%&HUy16rN6 zwd;GYB6W4jr+RklyM`wK4Fx1x07&5t$O!`xa#oY^3qV@hLa>|=#({Rbr9yZrIxm~e zWpxVm5N*aHi0!oyJS3PHfQs=+>k zB#1yzn#|^@RZ7KFXoF4xg6go?u>^+T7-=S&$cON*x&E)+$yY1v7hiV!FZZppfoO%U zu^wDm*lOlN%2l`c?PGPlNnR8{1$eR_G}jMs8u7@RaPa59{~|S?zmzy~j+T}BUMR*k z_FJ&I=90OyLvLrNwjt}5_X{P(ruY$bhmR83T&=DsMTc`(rO7gG)I~Cn{?~V|WZ|Lv zamAt>D=Kd)e#t3|oOy?*o&0^gU4HWV*(-Lv{kad_^C1%ms6q~$%55SW=#yQx9v(Nt zEvGGT#oO(L`|r)bqn8@4jSd(-ad5(~W4<*!@9h`DK}mo{^KjmT%>#U|c#T@HP^VS0 zdOco6BLh4jvZO4e;Vke(2l1~$lYy0Z9}a`%*5zl)KP&f_d9Dq6==$^!b9oqSBg9KN zQ@Q7|T9$14Fepqym-STuceD$S3mF2ec=ZA_9;aQOG~kN1;k2$Ga~OLrDr051^TH&<${_8FK+bX7>uQB&| z=iX8RIx+zT0dC|7oyzbU4{f1e7n?1cWp~ImL4JMr`Cpp`id&(GzR!%=sA$|X%zfE^ z#|}Mj=Bo-JHJ4HZ`Bj5_Oe?9$mMcL`6 z*8I!S9qMHl7f(FM_NM2>?&5T2$tWTTBbu%#4{vP~e7$4sPP%RYx8k$7srlGur#$Ss zxZbxa+yeh0R^J`{uEfjv`6z-21IGOj+@qlvn3kkt+1xw>dxnqpQx<}CopB*Z;KD$_ z>Cd2R91Fo(*cq+OeCOm5{l!Eq7hED-SX}}K$1Q*HhjW3-XK97jsS0~qGgEK)TZC|>NxFf)OF;Y3EX6)nGMX{A`QB2vg*-IHh00oHvRRAe<3ZR|l z*7%Z7wLc;4tuI3I4%I~Ng#*Tg@0t8d;qPxQmi!&^E=cqPZ_d^Gg%jottdESG2mU@k z(p$Ha1YrH!CuNW#HxG!am!(ud=1tc8QdxDWgD zW7X}JUlz<#D!W}xBiaaf>C)V8Cs-Seux5U1SBLNRk$^&4?7wFMupC^8o`fMxgMn6& zt{!S>E9gc4M8LZ=2mxtwr9!Q^P1y?W(j7h&egSR3pw%g|*Z~EZnXGO=k>LyIE*@+G zP6^2m0pZEoiRq*ST>^aAfi^0hSIWRDvnFx@`Dg{59|lIhYaNmvM|_vf&cgtAYw?wN zy$CY@>Wc|&TvdZ*PqKpz1OdQzIIDX~n zsgtokvcJx~TzGp(%!U;)HmMN|01u+xgUmWVx2`rF+Q6LF!QTPK= zyD)n^w{$NpURZDV-a05{#<35DlPwp5Hn?c|k%mKSf=^rhy9_K`m-GG!FoQ#ZabxWr zE(2Qb1Y9so?~Tk1Q*549VRL&=W)TF!*X=sg!&Vo&8J`U+IIS+lp`v190)i_ zkL6G|UoZj7APgDbZOyO@M2>`oKuvVY4Qba(06Dfqcx1e+0@z4BFdiBKQE&nhCjoyZ zki{re>iOojg7rpA+NB;k4}`MBL`9wr)*){o!Fd3n3G&eYfDnWNWD6aoy=tI=o$=BHxuo%u!Lx0F{zwi8<4kBI{$T{5N5LizE+U>d&m%{i zc_P{K!PjhQ9CdhS*jCsElV^qZoG|az_V*h`&zfWJvf()p(aG&Tvb7B*@}q{xqjfs1 zoftjGGQutbzcm+-jGQTtQWzAGnFe-)t8qtoWn=3pm*x6H!MAJfW6AVRCL z#evZ{Wrj=8I?!DjG#WUe-|#Df`GO7ZsK(No`N9{N#2a&vU;;o|qDrk+on)NwE#76G zF%_CS%O4U<9h&jY%{L7{+0mUTgMpe+-?=g$J1A)sA#4kdWeknRhRHFRLAxM)5g z36t{#y-B>YM0lb~e*}Z_bI=8vHcRTRB1n*94_bKAA+v!5O(Q9;3CB?g;6W}I%>cHD zBq=Zhbn@(ef+9VM@Dx>U1+H`O9b;MmQ)*796C_^10w|HX-GwA=0z{#V=%VKmNzrOW zo<5FXmSGY&-en2ZziNwj_~(za^3P224F7cj?;D&pW)0KUD15!&e`|Ab__3jZB#sKM zF)G9n0A7G-3BY9h$bVCn1CkD4_AY4MA;kB z#R+Tw9D01*J2&Olem8xq0d=Pb8jh6*MkkRsL<0e7p3;gTWD893lAZkf?ZR2H)W?SFTQ0PasphraIWK>=m#Vk4V8b;AIHr^HC&#poa3p8s&=}@PnvK}8;$pOhTB|KEL{Cz(Y(_fQ_NTCA zewe#Q6@)kuwZMbTIBAX^K?}Pct3bQfzTr`K?qG!_VP=88TRAYc z7y^Nrv5sZ%YGVPQYdw%RbipGzMwQMi7NY(D1P#+Dp(XN#4z|6|LNo@0`bPXERx0tB zGweCwKvxt+*%%+BV4^Z^{5~!f31o7u&TtY34T?P({qE( z=m^lB+ibcE24POYRXo!I_`v`!W15KvUCR?!e?9KAGOy;wjJig1LAgx}%#}^) zCrJ%T@$8ctcwM(8t>x#eUu=j34xQb-iCj!*pYPp!+`=O}KFII*bEgAm-s3qnwbv&6 z+9S39=1GPlaH8YkFE+e^z(^>zPJ1cB7jQr0T&nH1hKYUWHPUgvy~{=ap;k^~3$PqA z9LPU2&E{pr6KiSQ81yv955m&kl&Q2vn*elSGh$ua;rW_-kYvL66z+&UGj3HVT5{$* zEyOg~ydTvabJ))_@Y_BGPFX(W2q2-UTxFJu_r+@pwdw+m0e4l>ZNudgRS!cPm>A(t z)U>iB0{p3+{t=PN{DMne2y5V{&sIIQF=pIMQWpK87y51cy58f?d)7^}gDtSzzdgq{ zHm^DGEOkSBAV}s7ANJ)&q~P+((i)!QSnK*T zP2@jr#rE@#-s9MQE(=(cDie&ESBxF~)N;|Bv6D%QW1-%TUp;5ZUt8|LCfl4n2~uKS=j$eKl+qUdp{X#U~$nQO}X*cW!I` zFQ`k%IN*Y|;H5M$r~$94fx+4JvZq}0?x1_urYh!x`G>%sbM9}%EBL*Dn;F}fp7zJl zmFI@N95)G%=l~R{=Nlk`tEl#1S?ewjcJ{guqP~%R^M42VXNv2p@Zcb^!%NY}1R_Uw z;QQ>OnIn4(P!+}#lYC>IGpHY){5gYlQ+c<(vr1R~F>6lEzMGq0&HSdNm*MdR=a=po zziHp28%g=|?6Tdx@rVv+a~YZ0MgLH3aD{k$ zQ{&9aiw|brQd_gLNc?jo!^^~ilPP21VPK}eD;CNnxbcMEb}I;QICEf=&ivig^HdY~tqp$R-qFSIUpYtH_{mMP^^-Ey!A&U{aNQ7AgGY4LSIT?d-7kA%Q0jim( z;BVZt^hD1$9_=)XqA{Yt4J6|j`+!(C%J7N5lwM-_Dm;C~|3I76E z?6gsx5-@G+$n6#|^&HB$lfLf~Xq#3LN=!=Ls=3h2Y)CUPFVTvYtw@%4Wi z9$dCudRmfiSC50ot@%(d-Q@g?xDRGxmum^}p#69q?y|o3nhnlE3 z8eqqaH0pKADAbrX5$p@d!DOROuFAr-w+wgQbp@JgLc-jJSn%+Tf`_>bh>yLg!QzNy zoVl(JZ~CCq409;9ey8koIl${cTncL<#Rk^`;0vnWaB}0ZUW96 zKHNmnb8+I%A!dJAW~gkQI}a{2yPjyHKkY)iCxkwe75u-V%dhX+Mx zT)<5N?aY6lz;@vx#vdOJtl}`|U7E;^bkM2aLuwr#Ssn6koVu}g()`6Z3qv)Lo^cPZ z{S#HUe^rmb5xmtrGzMS_WNyGV$Ny7PmDTto!HMSi zEt!?NqtXnG#l`0)Z0sOl4^BD}f0>~)(t^Psn}LWNhYKBScsvhaja_esz4G4TuXyY9 zLk)$3FH6db?Ki%^5rzrvpzi3Ks#TQ>F@id~|N(%Yke*Zl998QW%jU^#NQo#UJL9`&u5#RhooQqbtB?!r(IO(|7%toeDU!ZWnC*}b7 zER0xXgSui%o-xc~z?fg32%=VF4+gpq}{;kAbJ!9u<@)+$&SE@#=C;S%PW81@5*#lhnzP{>&|t27)F zffKmvx0$jM4)N|c-m=f<`Jtnj-LJi$VL#eVNa(e`Yxn|3@VJzoI4{&s(3x6+TrB-? zGsMpx;tCs|Hq1WZpW7fE8h&qVNZ`(j$4d+Fa6|g8nCJsud2unLE;#rc%M9e2!oU`| z({LiT<_;9ynEw49r)mGazW%?qF3|}lV%Zn}^60lk;(+zd$ggC#Md4I@5JPl`Mt?vC zBr7ym2{Z{omRKVaQqk@MP-6V{Bw?cAEl<0i-^I13%D-j1|%Z6^N(oo7frY5~t<0JIJ$ zm1$zs1^VpI7)1W$@Fx!7n;H^~kjKZZ>{-A8U;R|$<_=hmJ0~edg^tH%VxmgpFi57) z)~ZkRvt?G=qtT!X!fgN)2gI;c6qKl#v=N}=(1v*-LN%Df#{gqVgyNtW)H~oOZU{iJ zXf(is+~HMQ6boJPWyceeso^Q)MS269+ZQ)i!g2Ay6K*{ix^-M(n__X1*m+X_U@7ML zxxSCBzRTXf%Sv0!sS`W?yNdghO2+VDeNSPcM>|N>!9LboE;ui!x>dKt>j-(RxTwOI z(*3F{Z~3~o|5i@-(@wmdJBIfY6Gl_x@d2CGV9;5~)VRa>%FYevLT8>Z=K*i16W`wr z`Eo3nH}EIH--e=4K(@M~5TP|=;CHeOGZc#Yfwd!8RaQ1W@YFkxl`uiVpL}+NkPX9U zlmkc{b^H1KtdmqTHa=kQUkm(Yc-0N%5!?d4DRG?Pa(`(yzj=A@-99(hH$%KqW5b_h zQlNBrnTI!Xko&VHwdXKY|8t?nx*yM|O;po~QW2GDIGCX()7hpKhc+(Q*zL8)|g7C~VP4RbXlOg2 zVTmHRnAb+A9>h@aRb@(8mrpmZ^@Kg;AT)yMRwDtHhF|%*-UiNXCjs`vG9- z8!=I#W3L67<(0*w8lD6{?{qRgnV@{)HT7Y(D2SwBd+yu$B&fRZZSkpW#{&}i11c@C zKy6UHCTx&DNf8}L(7>fPL0_36dozJPL#b_#A9SbV&`CfM{~Ln1xL-Mu>|ChvQYxQF z`hpH!xPzg4!BTvlsnIKx`J@B#;!^yMzL^b|0+j+p5-vziPy!RkqHuyDc&@0fy9={e zOe>+Nqg|N-DYFr0-SOGGK2NKmxV9)8c6PenTk)0n4-W=!!z2TxEbO-T^{2bi+o3~T z>n#^817fzlzn@{v{cIh#)4Or;yDPs3AN@0)S@B!LMff7D;z8xxwuZO~qQIn#LVm3! z!X5K`@Bt2xynb`By8H;G4^spWn;bL@_!(5W&$;idw@&>SwZ-e)96xtIH~*QzW)~QE zwPgD=_q@-kRWGj&h&ayt^>q*KPz3@w9JEqtcb=`Ed?(EB$>usQeN5XyV3u1Q-9+Bv zvJJU)$~e3KQpxyTAFAj4-o|_-QbtG_X36%A4}<$8j~^i1-vu`#Mgvi}rot*$rO8!H zO1lf1gsO~5)XgjX{L;oAnYO8XhShG5OXGnk51&4&#*j^2z43n|C|8$;W7el?|IRP> n4{NAS`2+-cI#n(%^1bnBr;>t9DPMA_a;pF3eJef7SAhQmg3-Tk diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/columns.txt b/dbms/programs/server/data/system/metric_log/202004_1_526_105/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_526_105/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/count.txt b/dbms/programs/server/data/system/metric_log/202004_1_526_105/count.txt deleted file mode 100644 index 4af051ca985..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_526_105/count.txt +++ /dev/null @@ -1 +0,0 @@ -4020 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_1_526_105/minmax_event_date.idx deleted file mode 100644 index 73ef9660d53..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_526_105/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/partition.dat b/dbms/programs/server/data/system/metric_log/202004_1_526_105/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_1_526_105/primary.idx b/dbms/programs/server/data/system/metric_log/202004_1_526_105/primary.idx deleted file mode 100644 index d1841709c4e..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_1_526_105/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^G^G^Gȯ^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_522_522_0/checksums.txt deleted file mode 100644 index d96bfb26c4829bb370ed371e1edcccb2af8c67c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7190 zcmYLOcR*8T_df3(8ShQ<21o*71(czP0)h%C5~iqZ;;5q-$rYm^!3+djwOR*lMQz=q zYF$68wXIcat5rX3>!{j(ty=e}+G>Zh)>gFgdqaTluY2!%^NxGYbDr~@ldADz)kK%4 z#uYKn;jA&cBkDw_%j&SFN2Fl>r{SXpxNT2<3)-pSuAO`H=C&srEYD6PM_=h$2>vay8t25Gi34s9{{8werR2!tAA1JI-kT3mjlj!h zL5nu}ab)1*JqJzd=$g;|xEz~QjErx#r1cMd<%BYR)x;-J7F$bn0jS7pz#8IHfp4s+ z8ZTNrHn95#0vnucgckoCGku$Dt#Z?EH9H%I)LP7LvCJcS#B8&hzH)6q z8biu32qSCgdWi!-n>ko)SOr=`k_m#za$rLubK$miA5AI~_-#YrB~^$O^O=`2iO@Bkpw zfDP?74Iao`6Dw-XRbshF7pN00`3`51=&Tl}pl6!c{Y#3|bMF^V`Kl1bu3x-#Br+<} zE={YS9W|)ly8L3gdDY0-EGYpgnGbAO+(I{*5X zh*O_|JR67`SYbCK>j`C9)FXBGy>sgddo_&v>uk>I-2;SYmHWCNW5J}yM-Mj?N9C<9 z*_-gQt06@~3IMzg7h=)d1twEzj>B#jH}m8s(gL_O|Yj947aG^y? z2Gh@-Lnpp~1%;y7B0BSIFQ9AN3_6!df$8O9t<73x_8<+}2MpKtonda2N*#WUAtqKW zM6$9hAWpi{MP8Z&k}C}Lu3Cs@RX#TgozeXwvBu$SFga<6Snbu-^lX>g&nDzzoB~{l z1eHnYH^1`DxY6}Y;vZetzxC0&wA_oxc&+Tpg-4$yCr%iC?N{-u|Kt56qzHKOCN+5f zM+j#?w}JLJVWHDeCAwU6^)8r1P!wj4g)U?lMQ(aIXjLE6@JSl<%Uz9Z&4$=Xm(Sb? znpCPq#)TI@{rs}$F`JgoM|~%4;`_b~QRD+UDYY21vY7%IL03u}1)aziipRi0VE-#4 zO$xOks4pNgj)EngEorDIEBuxZ7ZPPYr1%?4YDIg3JcnYnd_VoD?z4A~`y#S;E&nMu z@Z+YDrc21Uq@W@6*0)o>uDto{#OMdx#sudb$-Npk>&B!h zlmCj|VR;uBb^9myy|U7ShR)oVvG-~t4-+~zS#Vqr8w zqof^NgP2Mh+|Gs?is?ZN(-*wQ7xwa~8_XzmiegQzTbw5+y(DU3eur;5Nl^+&5GEe# z(M71;(Vxj9%7^``WWuDdkBJnWT_nxg==zqRBkx|_yuWsTzr|dA{d>r`_`emy@6{Ze zg=hcH?$f<`FlGx&22#7tgm5&KVP*n(4r=-&;|2n~up|u_xU1?zoya5dI^A+8W_TnF z0x}B>+yNlj{sHt>&b5xPA-XRSyN{!&S$<{riO4emz}qbl&R zgM^i*LT0&RauKT~M?sF-Vzaj_!IYD%HVA=i8;c3akT_Iye$T)?l2|P|zhs|jx~1;w z{vh7`_4Kde=IVEaFEt{gp}6JlzdcjmKh#T{{%F^GLkF?#%rS~bU-IApvVcPAk*pnQ zqq*7<4#!|oto?x?PD>_H90oBy>xQT+R4OKgBX=ntUIm_O!}E`b{{+;-0)ogB`s*K& zZwszPR_b(&qYR!`N7HX*7EQwDESt?yb%50yXimc3^g>dAFUcA2vGY=$p;H5Jtj}Hg zxL)vGj@fQ^xXURk)rpnbNJF<`(0IAukk}spg|m?+hZLAWyd&4@sxmtt2>(lZ!G>H| z=%}fs+h@7m4GPl4XbcfKAor#KLziSg*?IM~&evFjYcZoYcoREmq;C@Xf#P71waV#m ziB%4}WvoOi{EC<|B)tUk3Ls1Ak;@QMlJBus-BAVMI+mRaybn!8!7zyxo~oz-de^m$ zhP{It%QmRYs~4+0(Q2gUvTt-xH@@@CZXWV z{7+2mkvoo?XOB%^@zqge44s`C%RRWae#^f`+32Tn`iR?MMv#zNO6jqQFoEVy8iN)+ zna^s4(M)@$GDOb-BvCSnSp!R2fW*qwh8Rj5Ugi)!Ma+JX&xDeEv(;ws*9q@I9oEGcTNC-kL znwZ-I-qk;S|6#E>;{bDvTu>n6-nzKDD<&P>WJuE=vb9mkaK7(=h&5Kh+7B{;co zsxyh#1mJ^m!I$;%76vCnWs%ipv(CaH^pP4^=EVqZmheNPKo>)iSZj4UB|5x_qeRbm zJ%bNmc^ViB{9^)f0K1yCxhV| znW4BlWgZ*LNqw|PQbC5UOUMkWiytx^%z*Yxw3*@9P$tu4t(gUZgg#;4{#ftN4`o=o z{~V+>bp95xypO9CIgFp{87j@QnQLAB7~EI=T+`$1<_86%R}Q|Mer^7eRlmKThm0zR z?5_s{{;$WRq~s@OW;FiXiKS?edeNXV0y>Py+rT~3rUVCP{^B0<&w&HUQ)TXt${rx& z+r7`%jUM@2dS+BVy*MZ^q(>kkWz_!fP!o^wlj5$D=txlG$}Bf!!eP9I4w4E`cxgr1 z1#z9@@KW$MeJ+q-xtb2pxl-?3VWga70fKp463x1m!K53~geD(Es%0P_3S>3JR93jn zHn9kY&i_`$ zSTyN^!dquj<%dfZBmW|JDl!46hQN&DxdrCQJJha5Ge}7=VfFNjig!h$JYrgh=p@8Fby?plIL7N zS-qy7{9*X&>l-$%n0_zp7Nx?+@;ID)S73HpN@)(yH+yWm zdg|~#Xb(SsrqjTZ`GI(UXtO3`+vN>^-+!uc4F53xjn$XWjY7slOCryC#yrM4 zDY58r$A<1eri%oPiiHw}{L87aJ5Xi>FGuUcmvKxYNH0+36(4FHd}?({*hu;7rl7e~ zV)r0pndVJ#&0EtRoQXcwkdbqAQ5S@AdvU%Jhin8CDO%=6wlZGa4@2)v*Ewt+yPKA)XM-{;CR>;H z(PO0OAfn9yOh6L%R+2HRYLaJyAESdz*LdsbKrJ4OnM^O|$-JD0UqV?$X6M8csR;$v z+pG?&80zWyUE0uEbe@?&k(Zfw|&bf{%j}HCjmF>v5 z?@!aWk?_ZFV}|c~M{|wY5SUKQZlRm(lp_nwFi~mkW#hFHos;V5y}TM9VQ8r-^(M*i zKA(D%C5p(hNqVar4oI0~fvfutdlOYtMSJ-LDN4)vHO{V{v= zm78ec;dH*WXafqHosduB@9AgN#l7=W1$iZI=J?nXJKo z|BvvEOULZ#*Tvm9hh+i~HipRZ1n|2}hO z#-b3zZu(<-lvb63b1~WjYS$c<;$&cprhDVJs~Kyj7pp{Tow(VbBO_#Lypko9xhPxFOz6+%1I&%1 zHjSkZ?SzO;%EKiUIi*8}TAgl>*>(Wo(*dMAQqvb5L~&k0fmh<~X}kpycrnvK3N~Fy zNyWgD*C9%A3cN(qn&zZjL`lVXE%6XNo`8Lk5}1%PzOs8DjTyukTSG^Y2uLdCa~=7gYm@TazCXVb?~=c0_a^)bUP4u6&&gpA7`k1c-t z;Ins`O}&+;FEqNlegn+)?(N@4Au31nIQ|VFk0ok+SApLEX%TpF>c&JniidsHWtH|wTOD-d9qhd zD!mOH4gMwo(tte~KvCnZr*S_{Ns~f6uNNksorf>;$AhPE=)lHPZ1)TtQGp`~Fk7az2mZ587^Zi{2G{V&kaaSAJj z!YZizC97z&?AMa#+93MOD12sCtmM%l@WOwCCZW(VG+L*~VwJwSRq+qQ^#N!?qDlCy2?i$CcCI>Zg>PG*Bv z$X3YJNaCx<^xA%qSs{wkOr3WSb}v zt^TYWjekgEV2z|bJK@Vxd4HhaqNjHqYQhyl)kH+8 z**|RzRhmUsyQiSUHQr&f_SNC%!DKl2c`FEtBo#_h_9`;huLrW59%Gr3yhqvlCGbi# zl8sbg52Yg!q#0Do;j}P9KWdVfPv9<*(#6T^;B^#_6YrkwAOFU^TXHvsp@}CLKkNA zh^hd-RXs~J)NzFGgPv-8#~wHm(fIzeCBI(SwykT+!`G1UjZMq8^qtmw@z*CmkKMxe zIFxpoC8I#=ZFW*4gfZ&~6e1!GP#e_9asm-;wf30FngbWV2ITpQ5h) zc>uXb4|zx0?m61J;}Qni#S9&ZU^&zpOTh_+)~hld4X(BUmh=R!vdCj|TQ4vg62@|j z?|}NQO+3E_C6>6yuVBdUC=~DQa5dQv9HT5Eh&CF=vh4@;-Wn>#vfZIAyHCP=EHcw( zxZP#W8casfj@%0gCvPi!?@BlrAdtUlv6IVif{#B)GQ%+ik>o^|aNJ+Tk3h<{UPCeB z&|jtW4-{9e!+w$WfGSaYsrrz}tLVQ`?jG)XHyZQK7x}KwF3rsd%&u7R{V)%xM12`- zup)oSrzL z{b+~kQ|sp;E9ZSa!URcfYt0_%pPIg#{@YEn)P6B+>xjD>@+Q?5qKNG+aW7wr3bL9$ z`PlH2wz|m#h%RG-XWXbM-=F?qdusRZH3JS`o!GS!)HQy&Pqkh4XnSSG{&s(KrsU#r zeZ?=Q=>KGwCO;Ll%S&QyAHKJFN0)PcDS(E9^5n$1+8V39##~={79J?9mU>Qo%T~9| z@croOKHa`5tNo}El(n=KZrv<-5Z%LKc5|H~Z|~O~+I(ix%%1&kFEc#>q1I`wp_N1r ZI$Ke~etkXd$w#NY``}ttnrzv>@c#_Hinss( diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/columns.txt b/dbms/programs/server/data/system/metric_log/202004_522_522_0/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_522_522_0/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/count.txt b/dbms/programs/server/data/system/metric_log/202004_522_522_0/count.txt deleted file mode 100644 index 301160a9306..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_522_522_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -8 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_522_522_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_522_522_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/partition.dat b/dbms/programs/server/data/system/metric_log/202004_522_522_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_522_522_0/primary.idx b/dbms/programs/server/data/system/metric_log/202004_522_522_0/primary.idx deleted file mode 100644 index 7124d1b7297..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_522_522_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_523_523_0/checksums.txt deleted file mode 100644 index 03ec0a218cf195cd755046c0806ef98ab235f001..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6974 zcmYLOXLqL$Ks36LYRK%72RTh^Q1q2sV7Ew_a>37n!eSgi&&D=Zpp7WgNJm=IjRcmj@oLUH|TMecpKzWuh(5^_t*BG?((Fp&8c~L>OsYR&&{o)GB2Jy zN30+3E$ezMdZhQxT95LxuQr_TF$N-ARdv4FzFD=`C%^x|V9A?K~3SEu>9>VTx8dvYM z2jsG#9Fz;}0sP6okr*ufCO`sPi)&?#0AucGx#|DFXz5c4vFv%^HBaNt(;UOpcBK_6YFt>5o9G%3oViZ+FP$GA_^{JBNFHC91%vJ^u(80iCBSQr62(xkRmdLaV_=Y;>?gJjcd1HYUoQAE+ksgMneQ!h|})DvN5vVDf^1t_n=4n`#6*92Xo8i zdbi7A4-y002b|D;%7h-oie5a&u>`eVO6FDhzYu{Eir?Duo;Me4BpmM zQ8wndm?&kb!tRgEv(?L<@n4 z{QIR0vV7&w1Ke|<9XYH&DDx-yT!9uPyCCSLG*S5=aKnK;gqB+-&MUS10wuwK{Q)g= zat3LUq5~77xGD@aUsE08a1_T4g3#`yy&_46M@xOOTvs2Em#A4K(MwNv8nug(^aBN< zEQqdnmUMV%w>qhO!euR&poD!~rtC{o7-PG=#jxtiY^z; zE|NHDJmo|pjRGVNi_ikBATkPX)k50{A7}cI2>XOPQL=DC*s|($?5?>Hn`|2wl@sgb zzMs8$x$fLgwT5@kB_2C)aTO2!33GxVnRw_4g#T;P6SPW~y(}&3;K6VV3-Qe=a zC|Zp@B96SsecCi{&!S&+!}lc4&=BkCd*Y_<%am&e;)a62+-Tu-60Cv~ zMU5CU9*k%EbobX$CP^c9#@kL2tWZ_qu)FPh1(UQ>Y`3!J2DC!J>$6wO1;H6JWM4s3 z;0AQljSPpWf>&E1x7<6sgg3HJK~37jU3G52!5t^dVj?gnidgGB?(uj7a+NXJl0Fs;AqKW&4Fw>Hd~&5%3EU{rTj=sT?7r*L z|B_0qr4XL+*45+Z^8M~Un7E|(1iTQT zLEUP|kcWIU`ipt=)|3xF_cR&`vWRt+eNO6v)z8iR_wuV%`?gQ&dKUC#EQBl>7k>oA zbqQePT8+I1)L#Gg{+IbR(Ml?U8_VAf~+1C7DA3K|n2MSh?1l&Cf zW?~Y<7<8K1)4Wld%CXUC8cZ6N)P+D4S}rTU*wzZHyGn2AhNKZ<4QVIg_Jevp*oNC( zZcC&|S`j)zTkb`;YiuPY_WB4+DCY^M_zFmvKG=t#fq}rHNUWvT{}D-93L*I+HS?MZ zl4tWdUAK}UhTzGwiqnVw;IXBA_jTUs%BoXAV*O8!!~NzlDY(()fTx)9asWBbkrHV(AxMKo%^A`FdBqS;H( z&C-7&B(n9mgrJs=b!8cbDEhuy3MXLt8b`CCs>J1XyB?=;_=pBpg)l;IE5z|BVCsfQ ztatf*3KM-+Kz0v(i>EDAodcGUk=>$c6el=S;ZuNcB(irwl{eNK81C%BGI;|XE~-BQ zvE4NTLB%ITu=&J9L9Jc@iqX&}9EvzqE)g*w>4}-TmFy|txpudu@Mu*ov&6MoV`AqZ zf`m?nqgdo{|+fFa@*_ugE=}#f7g%^yxYTlCI2`t zPG4X7Uf|<_#Oj_fewpH(LFex@Upu#__~0X5c!Yy81P5Inkb{IR2jPw}Yma-q?EgFV z(4>7$Z)Kc)LP4y(FKvzdv8iFy-N#$jr(Ma~(>t25GIYQSwDAKGO4>t_nhbhcl^;MV zoFE!-kX3*t#4Fk~B6RIZS3zXu9*M=O^*DfYm7%%P{c4sE1eOSWFy&UovUFmI&wiii zR)cyxur<)Fsv=-_%Ox}pAN1idR5Ra$SWwaAaac35P@gXm)_H@0qDF@-I|m`(b_g8$ zd}K;taA^X!5xU9F$&t*%>1j}ha!V1CTeOh~JL2o}tE+u-wfzADq#L-+k!*p{Ouv-a z1WX1f?I~_{1hUa*spima$J9~{8v@XOoX7E86zfUM=KCgaY!Z^~2AuLq2!BBS72!e( z%M=WufilbsA!9933i&9qy}rOMgwPVDF6=-0sY-PpFeOQ#U#ZwAjHSo%qB@R-w?|WXu`j>|%BH25=qaJyF=ms26E~xl%XsTq^_BFe>jyfPk8vek z%tY}$$NoaTxFu%Pb4hED-x=I(L-ards}n!+l!`bZuM*Yr3r4a|Ve+9e-Ye?qCma@h z%21L_9I!K#EJ;$nTXA1jQ-yaW4?%&w0D6n{en@7`xXNpkUBU%lqll#}`sBoYWs4ub zckpWY)o(ZbHIG>5-92PV-`R5C&2#fhUm=gh;p-i7vrQiY0C`+s9}7|RPs+w%k%QuZ zHd8hjmu#jSJBItg$M(2MjEmyL??nXdxu=9#MeOFuB`>dCyLixdBeBLvss69@Gi=?p z;+ek}AH26wsU(XaWK$1@%JT#)PM&a>D&zM5Bi}BUeydnAx8DYQ8Mo4=Q)nS2yFl+>sM8#c#>q9$pSt^qab45)y}KXLM80>KSpPBC zFA-PnsA+g9Bk}oJZifpnr?HV8j7TfB2W=I@O2>_N`2s<^+fV4pDAtSU@rO>L_)3GogYPmi$0X5} zTqh^EO*UIavF$NP(Hw&i&$Q(^rI2K+a742pGn*4UTsTsqGl$hw^@_%rAzWTt=xDNl z7**^7nCUjPx((Pd$d2qAE(*(8jtfJb9ufk!>3*+U4%}zRLNT;i&mIONT_binQK@T% zQm?zI+UxP>JDsw#(!0ePLrpxE3M5pTQ>P$UA*tB&Adpi!xSGQ5BHuaxM}(ZjX6Ve6 zlQ`5RA;3N&ke-lLV)w5eJhwcEJ&F?hs=m>*IHbUYQ1+$Uv}6#9G~t@B!c~n0iVd4g zzMy9jXQW37CNgy>kQ^Kduh^&?S#-s91O5LHhH9#JZrg zbxqab*^76xYft`V8ga_NcbpB;sU}MVaWSwzk#??V=weWoKwHK*#&Jmdl=Q{!R#3sT z$%%BPij`wWXC*|FLx7io_$cC)r;$qNLg2qcbG!dVWI4I_+0gY`9nhS5^=O= zVal_R)92#$$;!R6h!ym8uU|X8cHdydXXU52j7{4pA^Ygs*yk-RD*`Qd32ujUU&ds5 ze3|+kXMEGiGmT}&Z?MmCIX*jS$gc|?ct^Ez3;#;R?hHMf3{gm(Jc2L|jEP)>p4AI_ zikD5GovS9KsoYsH3_CY8>VI#t`3>SxO3YPqp~m``4*VwX^!-Hz(u$lFol%H^4|BR+v_ z=9q{n{dQD4QEDa%Wa)5tg1+yvyDMsf0jGDi=PxkeID&{n5XF>Fa>;Jzej~eUjKOD4 z6Sl49MUi{}A*^pSWRCHUPc>=sd2P5%)%?Q=0|1tiY%I)0HB)kW9O?|cN+Zhw!siIA zN*P66v?Cp`Tu^h1fXpAlVxmpRq;wh3uvlahh#K(}#{xXZ-_y4D6Qo$m4H|_qJ=r#+ zHi~V+20$!DG|>>6*ltcuo&xqhDo8s~^H_R>`W1iR0wyAi>%|s;QBr8sdZGvyVNu4B zT5j6F+zMHqTWO>Z>qri;>7mkSI>7cZ#HVE&5tKJ*2Pml$lPZ>Mm9eap7%5eT4Q>X7 zh>f~*u04aIt8>fko)#{eexMXC1K%#tGm`Tv+}_#U)a(c`(2V4EB_f>%#D8FWQbvc6 z5Se;w9=X|PuFW&t9rV}yj_6;j=txTRtWrG`@HA$;>y!}wIFt}crS{022T)&@xIDp; zHh+!R?HXjFcVpQ^?CW9SfQZtOKHB~z_QnmscHlNvYuU?4*^?lo%UA)?fhQhElGts~ zX(wVm1Sw>QndK7;-nNhMG@!CqH2DK^9qR1whFGXq*-1eeD)L#nCYZ{m&?uUr>O|~t z;Q?%adIN#m2AdnZ3}vxNbW)* zWfNpqbq%(SO+_YpUFxK^H#yrlZ1iUFDbf!Fza8pe@pLS)jDIaPoi0^&%G19Uq}eF$ zFyMh1Qy6F^Ap=1QR~?iREIfdWP@t6!p{EW}d)uNM0y^GE5&%|)68`U7++7|`*mSl` zGj;oWe^p?Ev7)7X*Jni)V#?Lk!ofZhPoGoZ!kn34+85j8veJM~W8wgE%b2x(!?;UJ zPA|EmTb#1^w`Y&8AXd(HqkP(R^C$gho{zdUf8#4T4{6vGFowF3Xn_Q7J%cfXL4@8i z@O6mirvlrI2b?I1@<@_H_z_)LEi+P=*y)cJN(TXA+>`Wxk$%c|0%oX2M9ukJBwL9n zNYsY07nkey`n#!&NVp;i!^ZL1MmKL)zUsJH@Ln--4EEoyWf91xAI9D#3F$B}yR zO~@P}S7N&9st+emp8l*9`v531^Jd$9@`ir;`UTnSD(>-TWrx?WStn!`d*mS|mc$FB zEsSC_$GE;znJ})}2YA*OgsPIDJK*}7Gq40+;M(JAuToq~vTT8x7dZASiKlxzz0CH5 zz-dbu!Op^TzGI)ETT8`sz8AD7^I25vPVCr-_W14jquCT}!QE2`>h?^bl|_zM{Kvkj1cJ*{!X4qEi&L7Dn>Qwi+is}z z+~^^g8jCV+8PnE&)G#V#C%g|mp32(&Vv`k%+{b=AX}LYL&9 w_M1LvJ~eB8-=ROPuDl6Sz0XyLbwh8Qt=Zi>@{A+4X0ZQB@drX|7u1Bp!@Z~y=R diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/columns.txt b/dbms/programs/server/data/system/metric_log/202004_523_523_0/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_523_523_0/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/count.txt b/dbms/programs/server/data/system/metric_log/202004_523_523_0/count.txt deleted file mode 100644 index c7930257dfe..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_523_523_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -7 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_523_523_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_523_523_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/partition.dat b/dbms/programs/server/data/system/metric_log/202004_523_523_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_523_523_0/primary.idx b/dbms/programs/server/data/system/metric_log/202004_523_523_0/primary.idx deleted file mode 100644 index 6a0268c1d74..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_523_523_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_524_524_0/checksums.txt deleted file mode 100644 index d1ab9d1463ba8941700b21934bef7e7997bffdbe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6745 zcmYLOd0cOPdy2T1v5nQd-K=6uPmLl(tefEg^Z7&@?GoN(&0Y zuvBGn6cGhMWnd6NMHUw<2=WsY)M4Bh1!RPA8D$kviqhXnlXm{fyEnP_zI)DhzVn?E z#~j(w;14$XlV*E;jrKrNv+VP`yqjvnw z?&QqanI2Gu|u+W>rgaxq-> zdIEAwV1n1tP-}MuQgBV55|G#eJSHg@zAcvB_EygJ#)T>h)E~zIu59xob#k ze|lm;@zse7j!htGFRfcYome%)zFE|=JY`&qYs1$C_Knk*^UMZHwg&iwj5Tlr&^W% z*=Ki8^g7!Jy9;{hV~&mi){OT}Y=%8$ZtpC+yJn70wqJ(3hV*~$9k9A>^^Qw>fBfa{ zq)m^_C02iP+Y?!tZ>%0abW`bt9Z4U*1?thj0>DfAIn`j8RD>RBz8YHFJR!Gr_N_0* zY}#KY-K{&APORngZXQ3{Ix(eWlkJ_X|M**nC|Ef_$lFpH`D=MiP30J`$0KhS*+pWI z(z%GKz@(pYqquZg$#7DJ-csbu5vx+`$uJy<(IwmW5CiTfV(kF3D8+z?n?B{#WU z4ttOo*g@cg$UPGRBp)6AEyrqjy_C$Wih%iWr>{ljAW)p+(7SpmmDh#cDD@>*E96G6 zueHX9;CFfI>+#*rcE6icOz991Yzj6>X_R^?SOwzvF|_8W8-$L80b@NG(i1+fL-zY| zX*y*|2(iLhkk0lYEDNf@sCykcjTwda)lXgY;Ab_e}Z2q|nAF3O(>M%7Y@O~ajr&44~^2ZAkT0`UJ(v4?oQC3ZLvHXD(U$=h11 zCr$W7Oq8-!VHiYNZB4Q#OI?gWtZG-@jsE=n&AK1x!si>;pV)E9Jo^T*p6k27_)E*Y z#h%#@&);&;_`?E-Ac*CHvZ&VQ3LH}2HS~)s`JniG#?U7`VRHM#Sql zjvEf4x0l`#$pBbV>67KgrhvRk&EDqp(wZK-_fe7|pdge5(OXZEZYRE_PO7T?Ny{ZD z;UJeR`_dI1#?+S1*bkmRzrDSweZ)GUrR7CpUH6abDOVa#EThYR<`0@4I^1su&&HzJ zmO>(VnB$fL`vdeiQVC-LKX^6_IJm6KLm!HS{RNMlOgWKA;{Zv^A{+uXIw~3;)k4<{ zA8&e}2nU6`(HqX~zc)2)(9%8u>(S?DdDO%@Q+4mB{mVBm-uvvk%g;TtsLy&HatU*S zAenf`fMT!5-8zi2ix4f{5+bJ8<^s7w*J9+42@&~&NJJT;Hf&n$Z4e%Ux>6Zg?>)i5 zj(~y-hrINWpjZHG42xxl&{SdG>K!+M%V~D`V-=mo{xxSulcS~A|5!J#tv>p@DF=yl z@1EbMmi^rMMq*=S`|-@Djq#&EU~Y8B775nFrzI^Ig&t%G`z-grqD_)U?1|t5BIH9| zwZrbVzayBW17gG_nj6pw0k6+qFOLq+o-O-Ew+3!NKV5k^@D;o|9}26y^DB5GI}U0R z3A>@HteP=7dW#gpM+i#Ff$Yh$?=%NjSa!Yad!N5+7?8hj;I&Nq|1Cb2vC_ORalMsT zEfYI0|2a7SzxehtDkz=-u75i^W(qM+5{^o zSQAolS~kqZgoe@MG_y6lQF?&uP9K)k6+jePF1u)~trJ+9N^j|h5g!V9=>Xx{LA?}g zrFNIw5@nK}4831hHb$UoY!wyure2s@E)Y&}21q#Y+54b@0$|Z3&N6T#k(8AX(k#+; zUQG$`^7NXyw_7Trug_gpne{DbMO&9e&1Mfb zuXtj0iO=W#P|HdMy(MlWZaEit8b_BF+XMEZ*#Y^>6w z!}~$Yo z92>4r)b!!e&&rUWbygrp3HvJuca1|HX(+FH{6wm_;g?@k&V7DhOkL!+$Yg~GkYm%e?@9C=xJS10I8=|G~gtw22CiAwdq9Yn?cuurRHtP zgd<$23@wzVt633{Dxq`;yKp&n%CGs7J zqpMpzj{MAQl;Nv{E#6?Dq{Si2&f&;y-Qpw;sm2gooxp8{EZI3NDvkzqsFM`zjX^wZ zA;SLN_M-ZFpIl$w7X}!(ol$JL(M-RRSS^w+s&|U-S%$=LBGnvv46Q_kMJqLIBtY){ z9>>$sEQ6TMLnV$)MN)eSzx*JCDIkA}@b46sB^W{zlaPNyMp~s5icleYeStR-7ORxH zuv6%9m1-z3B}t%XRBRju$1?o!1qN%=;H!SHh_D%YJ&whZ&=ocg9m|Y?4nzN(=)$Ml zh4RFPij|`~D`ssV*5)VLXT<+?_ueHJx4r)1f{{y{T1}WQX(Icy-0pK$Vm2(b2i^My zo9IF44w@*g&sf`<7~0;u!!Yu-50AH6V}E%4X!-f5-3PXPPpq}al1BVIecqgN&$?cU z8vXhUd!;V3V68!Kf;taK9qlmWT)Xt}(Tz9blD%6R1|;_W@5jXYP1?4^_QC$#W0k)i zl|Oso&zn6524c8$1(}fsHjaInr>B7!l@&sqX-<4b;IcvaFX65s^FjM}bKcur;=7c0 zQ<)m~`!-@7xarrc7x^K1$K;d4^QvxaN++Z^R|wYu>~Dx8*|{>gi=;A^{I+O)%Q~*I zj&OMbsP=9A5G=O!2mB&_F#&MelA2jV>WegY^TC(i77y+IdO-_Rm2I_8Wf<<8p#%g z$%kU|9Z^p|;IP(Gh7wX@-hohJB1uJV#ZZ0gAtje1v1dVV$)66%Y&-6;gR)Dw-42R4 z$!Raz-?uIPpWJ%wqG{r!jXx3VCkq$9R(f*RnJ3qNUFv`FRuF&Qjrl$LkaNj0p(Dig zgLYs4>3ZwQr}lJaL?7OfvHMYC-FGKReeBnB*9yv(^1EC*#ZAg4VC{i9mTsqP1fIH` za(HTz=H2Ozi8l_SDOZ-!r%1E%jwInT>JBp*LQ1UexLdy{yY*kiSf~#_?^g( zEA&*UByA9~s0Slq0|ASZCmdUdA&CP#sw2+bdwTjD&yZ1&MKJF~@udJOGtgvdIOrxRTBAK`s~%H%|74dh5VX4w z5%yVhBF^>%@EHvP2F_<8pC{3ETu-dJO*UKgSleG9Me{L);=e1WD1{_j^&BG$GBce^ z!vK%T8Z)PEU<^W?aMmuwYqEhDRqWqjrhC-tEwrrlNCMISL!;U((7)m_j>$APN(dw@xGcLOHDjVWfCfOsZ$X0A!)2< zA&}2?a3zKPfjs2=A3<>vo2@fbPU27igrK-cAQ_NdVfQ~XVsRC=L;Mx?^@C&R*pQA9 zLYa>4(URf7kK?ZTs$KOc&~4b@@dZ6?oROX)m{`=I#6nL3*PbhMXyK9oE8@v2_|;f& zrugKMj;%}Ie&>uYkFRYyOswji+T-51%Zq0gq;LM>`d9Y;9|3n^VE6qHT57UN5FZ7W ztkToV8u}wvkzmM@+l(ZL(dusdgG6rkSf!LniB^4Q8Ww#{!}y~CR9(-5rP)EI#V=D^LWh? z;3BAdwU=TAz}QrH&A6gi_B(v8rohsXER)yMtrBvBu8SLXYgq|sxl6$ManUqW&!#~% z(iV?EiU(sN*Q{s72vz*p23mePMiQRbTLpMXy&l+8ppQ#k)l>Os^RXeMNu_h89vSnM z+EFFf%Pmc3GT0>)D;?3@L z&Ix${s|g!8G7bklK~ZDGl9uqp1U5{JrY_peudEQ%TpJKHT5OC7`HQXv8kS6~mZ;Io z@hre|{5@@?(v{+<`WwO*K(cLiLp0llrD+^RAJdSJ*;|~NJPs@$Ydbqp^O0iuSKOeI z2=OIsIT+*nA5f@f5kdDFe zc+AO3mG-D%Gf{|DxIDq~I{zH6+xcH~{2y^_3iJxeXJiE(8Kljvu(!MfY(KuoGg|f{ z66sV3DH%4J=)lt(XOdV4=(JO?B!Cn$(#(p81s@|bJPoMq)vf-3+=z1X{74J+hI(XS za>!JX69Y8CR5pVm+*Lg=Efr>BtIrzjpBX--k33kfz}XG08&_eQ%X>P11VFWoeiN;w+`&EC`W>h zH^|+<53{v+2Y4 zJez?Wa!(BVos34?7?z4fh$jDC({$@D4jTsXd@-|L?Vz|mDs8G zH+el@V@INnbQfvYnc9JWXaw#Hv;@e^C*LjgzxB<^kujsIpFLe01f6W21Y2DsivP{9 z`P9bCQUCfjdcxbA{K^@P&lCNLUkdrSnS=5=rX`z47Pa`$8pJc~Cb`$L?5UwQ_UI0{ z-j086)!Wl*U{Ju-_@?q#!|>~a_T6i&Tshr-q4AHmTbldT7q{<8|EkvzAX7kls$r$E(dB8hx72+B*EB9?i=e;cZhpmb`hohq z0mmjaz19ZWChW_)wkxis-iLpo2z`=&Xg3|%{`tJ6gGc@FOwDbOntZNCRO9#IYK_S$ UUtYW}zB%X5?>ElV-}(anAN~2X(f|Me diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/columns.txt b/dbms/programs/server/data/system/metric_log/202004_524_524_0/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_524_524_0/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/count.txt b/dbms/programs/server/data/system/metric_log/202004_524_524_0/count.txt deleted file mode 100644 index 301160a9306..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_524_524_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -8 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_524_524_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_524_524_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/partition.dat b/dbms/programs/server/data/system/metric_log/202004_524_524_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_524_524_0/primary.idx b/dbms/programs/server/data/system/metric_log/202004_524_524_0/primary.idx deleted file mode 100644 index e28d01e380f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_524_524_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_525_525_0/checksums.txt deleted file mode 100644 index 2ed8405afcf801e2198f75c3be6f774997770908..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6553 zcmYLOX<$=T7C!eaS?)`6Lz|>cOPdy2+Crf%6lmF+(gg~%DNUe&U6|oa=hu7pzTEAc?|kRGC$2fN zt0@?64yMia2b!Isv{pF~^!R~mN9Tjg56-|KJ&n+DAC_%afd*PnXvE!P`mr|N^> zezx!`vA%wN+lwo|`|h<{o7{H}56V27GXdf{RLy~=f%#*;8~@XVdadHqnOAVXwRjzx1yK6MGc+q(N5bh%0mAr*e-ggr{%@ z+9$QRogsN*SPsjhogw@b+(ZnP!g@$yFW_YPW58H4UT%LLjFtiiB(Z0JPfjm~tA1Zd zZVOHDyPE2qo=^sk=~V#|TY$@?4TkgOve(%zyQ_RbIS}HAQQ8MmIj}Yy-MbA6aQR%$ zUnkEC%fSc0BrWD)E=R`zTLgU1ev6<(6>PG%I9+m`j1#oV?n-~4S`IYI3y48l&NCY* z*;?RJ`mTi=fCiow4&iS+UODLSuOn;?7<6ZV)d0^{1Ftzk6r;dqmwoPsz##pR(^Np` zMlA~Rc}2?{xmgZ4y)}OKD%|H$j+Oyy#bczl!uBy<|1781F()89FT))}_BT8Fty#5Z z%f%f({(NWRhCj|F)?n+ZNAq&uUNe5khRO?D(vH3h>e0YLz)QP0)j*h7iiU2z8X4O< zVQ~BGUyqk<*fmDF)37I-SjW!0dGJ7cO-98A+xvOn2ipr3Y%D;;iBcAMY^=jkTjuxq zudyo?wfn&Ks zSX?K!cs(v>m>Ad|;Dp#YQ$nN&{dk^ZDQdlx&Z|m+1#qUXMP&gf&T{B9y_Cu8qTZ2u zldIKovp>-82q30CzQ#t}+nKJtN#&Flf?!jyiAtl?L&2&Mo_|DZj=DkUNa$DP>nlAP z@Vn$-5Qk<{mWIeFnFZNwC!%dw9T;_QL5Hz#$$-YGi|+rzp-NwJdG6I^MZ`Ml(@EQB zA8x8%7YBbD;I2p+em`We*Ktr$4H#8RB{mIbDxMC#*cQZ4!USOdP$gIrhXY}=5d*or zt=&Fx!r#SIDPI)@K%CXqBKz{x`r=LB>iAXZ9tZrTSD?`HFFTXw6@!*y_BR7 z6oj%cy5b4a<)L@gX?69Nv|Nf3_HcP}AX{Nf>a&GsRe0v5kV+PE_$Uyn03UqB<#1V7 zf@T*<7!wKE0^ddL9N7<_1AB@z58_{XTDdF=gLDy!5LOvzB%+kSlZ@ z#%NiDPw&S#>nm!b*3#Yr;SbPIDI@uOBpBF!P;k-slnx1s1;EN!BHM?jkJ3laqzT-h zR!=Zd(P7MSoEc3HR9?UP zV-ftobSk}cL)C?y8A8i6zkj?exBQb4!N}$zvJ$zdb(8gWosKIM*k#ZoG({nX&=W*_ z8}#+@iR>2s`UHtCA#|u}1OC}aqZO@~x*AF`fjLXPUcYM}Z?+)o(+GjHLJ)0bbHYAR zX|nWc2Z4{e%iMP`d|u}C`TU_eB#2hI!I*C8R|AF!2V3$-0gzNa-QiaPH%|1Idx9=! z;F|RRw31{ghe!R*Ex3GXDAcZDYdC`?tqjx=HnwCB0y4UyttIdvZwWrhnJtl|$X4N( zyb+-Ju-fAa_=B>`?{m*m7^O>un@E(WL2U=N9=AM^aJI^@&-J4&k+$;uN)V%HVj7Oi zw})LB@%WY#W5h_O!TlOERJ6&i@M482(xF8{!PTS-e7e7REte*{f0nox_>$3H|86NE zCHFoKkSc9R-hj2^d{t>ul<46cjrEtxBT zD70LDX_c)5Se8m}>4V`OiFfH8!tDk1Qm|DzJzh(kNqRi8zs|ggK-Ji)tDP-9Fcn-N zoZ=LaXyCJtK?B2p#gimU|L2LMtc8elkT&xgJF(gVZqIM&ED=XtBN{P!EcO!Nl+#sg zJC_i7jdBVfd8IhfOC*og0vDNt)!`4VAlgYs0t%l2Dt^+3s}~e6-|sHEBs8)8#M*H9 z_r!sZUfX*1n~yeLi~s5LdC-d2NW`LMvyWR>USqEa1pI*%1V++GoNzAiG>I-PcZQs$ zvqSQU^t5u%PG%2Q2K>z%dY}~yNduPxn*>_+93rK#0>S!e0^10EEQ6;&DtigX3u@_T zZXBkb#CK^r^*#^C>5GQAVsSjeK#S;i9O!Nr>`8Gnd zJl#jt#b6m5*C&C-b3#fc90Z8Q2HOEDYmGlt>CRz!yn$AV>Q6w7nS2siwrWi5JoF&8PTW~CW^gx&*Es=}Kv!Pa%n5u?uEgPKUIqyaTQs^pHOGmQIUd(v zL5!B(ecWe-gm*CTj`)K++%Ex{2S8?H~)^y1OW%7~YBRv-rmdklo%j04#9mN%=^ z`*saYzx8~|pXt9*FMa!%tzhn+r1bS?#3RYaRu33tfX9GMM65rKCY;$rNplpL>7b_# zr6J^=deMNL%nq7JAZxRUFt0CN4;IIJk_lTlP#GC0-KS=yKwz~{fC;W4iS;9fo`XIl zx~D-s8Q2Es(_jxdy>c~8#)dxHhuYy=5J%hFeXgS1eAL6Mg%|zdP(_rmM!*xLm>Z6m_2p3c(7#(>;7wm0-Ma9iWpa-*4^lvq8| zF3M<%8y$nx@L8rg@){Y5nuykF*hqlElYFkH;#pr}HV=_FHWg{@75w;V1Xn;_7vY-> zmM0h@9TSm(BUW0i6iQLh`U9ce2#wWBUDPr3ph`6an3AN>Qz|wN17sQg_yU7J1T%9u2jeLmJ1aMxl!taOIGI|rKR9_R|3IIdTfZEXVG+w-_3iq(eKvoidSJ-+Pi+WtylRKkX%-ay^d_i>0%@SP4Fi^Jdx)L7 z@A$S43&uCS#Wp`ktg4y!M!b|cV|U5$-+!KU+*)w6d%;iwm#rYvv%to&6GQbhl%TRg zgf-2nPYYZ=D8CZ!81m*HKy`0ExO<%{XWl1!vK}JV(WlxRXRP_v9r1gQWX=iJWfM|9 zScocj_9r0evU_EECr$Myg*zr>y97DmurOzP{tY zF5pR2G}Ch3jw)ZP-y8OY@N#uLvBW;#^UP2)C7Nyu*&I(FDX^Bh=7s0>;7m{woa331 zV5H+IR|3L?0%R+Quoaz3N~@iLrYS@6#(I9>O;pFx=>BLXFAfBFg|hoIO!P}3<{7i; zHpjLq%$9^N zb@R0Whb~Urb?nJ{{COAVx9cO$CCdc%S0SF>q-+9S1u%uu&6JJ6DK}G&9mU;XGlu!p zjUI;(4>M(M2)Tp*>);m(@<1 z>t=qYw$+&ZM9e(rorQ zMiypfI+sS#nUGgDr=foW;*@aKPPl6Fff!Zn8!*%DYIP@=5|M3~Qi#XP7{|rn^*148 zn-%nX<@ZdbSXZ=A6ljZtE&_9Z>7{wm15H!BV$dc02sriV~@bN2N<5cT4IP zgd#|*@;w0LOC4OvV0V#3-2Z16PGYllX39w%=I;oO773&;l5Awnco(d}9?9QbkE)qvg8i2B-wWs*SHw~8~;BLp*pI+7XaVc_--7GBlDMFCbM zlhyExvHzMw$7antX2KL%?0=-*rBO zj+(3%#D{^UtMv4;hCU3+YUt$Cqa25PO36U1(S#LDo1RMNs#qPCC7y%?@;AW8Fl>tW zDBg_J^2c0*V-O&@)DY-ncs`BC52JT7ZEbiicw0kHjNlA~!rbLeL`7 z=89%%9>`Sd+won*aXTJsE^}p9XvO$cdQTC%SZJmQWPJ*(q#t^m zUi+MI$nBr+`xOk>j}GS0!HIQKJ+jxm*U0V~6S2){q6gMICzAaT0pcb@-UR>TOp~UR z*G6wZn*VabP=Gar^&gppjqZSO7+En8zF1(zVm$TGE@EUQpypNqL4zeGn2>nrI-pU# z!|I6|J)F!!JjdVD#xAK+5>=lj{4hwj&2EZkui~vTiK2gL#J}uaPE8&G=204%lc@RF zCHNPdpq2>96>K>elXJ>N)hwcjN_(?$1eDlid9lMt7wSkcuvuvL8#K)>@0>saTDA#M zaw8VOJf%uZFL(`bB(Yjz#AejUW>AP&{^`fXatZpbq|WKv$0gAHO5qalF^QR*Znb;; z^ZTgTVPc@U>9L$FT>u1RXlGh(S4tI``b{gjRj}CRtMrD0bFL#CUr=-rI6b3Oj{-c5 z8Q;2-X)qa+XQx)7~Bo$;~Jg&yBQD zf20}}<%L`o`K+HNoXMtBM7pXQqLso-EUNiKq1y(V7jLc;lhWv`-AJGZLA5!Bd3&YM z3#6M~KPOt`qd)XYGUssWE;^v&iFO36tCu~EbFdm|sxZ-OQa9ne&eyT0Z+`B!ayB_3T}gcS zx_f8w8J@U!hp8;$u9&m~%zFb9$2|uP6Yhp&~5xX6- zkJ8~0l>5?V8tFm48!@9aBFe9);@Fc2gH&x4e90wVf3T0rh!iN2C~nyxeC;q|J;6Yi z<|r-$*C`8=N-Eks2ZknKeL&!2LNQJ+z7Ba~WCtdJ-uh@xe4kXPxg1$gdRY zTfQH|uHYVjRCaj{D?mbCl}{dNVrjfUI^!rQZ-VEX%7lRx>)_cy5E`n(-jL@j&cIT5 zfs1a?oU=w0Um*Fm(41#E_7mwz-|u!a+Y17xt!4!8ue12BjYeKA_E~&?h$Y#1RLmky zEN}XP&eHL0Iu>^BDFk)wwsu(|z!-`BhorJ{PLQJ9!3J>xH#VIe&Q7JHbmBCkjopoF zh(La=Mb-_r_@YReRV#5!h0Kn%IVD;iT<;nOZcRj1w(I7OE-@!Z400^h_Co2x!1Pr zi6J+(>)!Fam;Bu7_og|ZAmnM@t^CC>eCehG2Nn&J=a;!(DY`N<>jFvp#|wp9K00^z zs`B$m&x}gCbj$$+r*Xg?Vfun&U%dWCQU7CxF$c~y9wWsKd5V8WB>pF diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/columns.txt b/dbms/programs/server/data/system/metric_log/202004_525_525_0/columns.txt deleted file mode 100644 index 6b901df244b..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_525_525_0/columns.txt +++ /dev/null @@ -1,213 +0,0 @@ -columns format version: 1 -211 columns: -`event_date` Date -`event_time` DateTime -`milliseconds` UInt64 -`ProfileEvent_Query` UInt64 -`ProfileEvent_SelectQuery` UInt64 -`ProfileEvent_InsertQuery` UInt64 -`ProfileEvent_FileOpen` UInt64 -`ProfileEvent_Seek` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorRead` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadFailed` UInt64 -`ProfileEvent_ReadBufferFromFileDescriptorReadBytes` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWrite` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteFailed` UInt64 -`ProfileEvent_WriteBufferFromFileDescriptorWriteBytes` UInt64 -`ProfileEvent_ReadBufferAIORead` UInt64 -`ProfileEvent_ReadBufferAIOReadBytes` UInt64 -`ProfileEvent_WriteBufferAIOWrite` UInt64 -`ProfileEvent_WriteBufferAIOWriteBytes` UInt64 -`ProfileEvent_ReadCompressedBytes` UInt64 -`ProfileEvent_CompressedReadBufferBlocks` UInt64 -`ProfileEvent_CompressedReadBufferBytes` UInt64 -`ProfileEvent_UncompressedCacheHits` UInt64 -`ProfileEvent_UncompressedCacheMisses` UInt64 -`ProfileEvent_UncompressedCacheWeightLost` UInt64 -`ProfileEvent_IOBufferAllocs` UInt64 -`ProfileEvent_IOBufferAllocBytes` UInt64 -`ProfileEvent_ArenaAllocChunks` UInt64 -`ProfileEvent_ArenaAllocBytes` UInt64 -`ProfileEvent_FunctionExecute` UInt64 -`ProfileEvent_TableFunctionExecute` UInt64 -`ProfileEvent_MarkCacheHits` UInt64 -`ProfileEvent_MarkCacheMisses` UInt64 -`ProfileEvent_CreatedReadBufferOrdinary` UInt64 -`ProfileEvent_CreatedReadBufferAIO` UInt64 -`ProfileEvent_CreatedReadBufferAIOFailed` UInt64 -`ProfileEvent_CreatedReadBufferMMap` UInt64 -`ProfileEvent_CreatedReadBufferMMapFailed` UInt64 -`ProfileEvent_CreatedWriteBufferOrdinary` UInt64 -`ProfileEvent_CreatedWriteBufferAIO` UInt64 -`ProfileEvent_CreatedWriteBufferAIOFailed` UInt64 -`ProfileEvent_DiskReadElapsedMicroseconds` UInt64 -`ProfileEvent_DiskWriteElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkReceiveElapsedMicroseconds` UInt64 -`ProfileEvent_NetworkSendElapsedMicroseconds` UInt64 -`ProfileEvent_ThrottlerSleepMicroseconds` UInt64 -`ProfileEvent_QueryMaskingRulesMatch` UInt64 -`ProfileEvent_ReplicatedPartFetches` UInt64 -`ProfileEvent_ReplicatedPartFailedFetches` UInt64 -`ProfileEvent_ObsoleteReplicatedParts` UInt64 -`ProfileEvent_ReplicatedPartMerges` UInt64 -`ProfileEvent_ReplicatedPartFetchesOfMerged` UInt64 -`ProfileEvent_ReplicatedPartMutations` UInt64 -`ProfileEvent_ReplicatedPartChecks` UInt64 -`ProfileEvent_ReplicatedPartChecksFailed` UInt64 -`ProfileEvent_ReplicatedDataLoss` UInt64 -`ProfileEvent_InsertedRows` UInt64 -`ProfileEvent_InsertedBytes` UInt64 -`ProfileEvent_DelayedInserts` UInt64 -`ProfileEvent_RejectedInserts` UInt64 -`ProfileEvent_DelayedInsertsMilliseconds` UInt64 -`ProfileEvent_DuplicatedInsertedBlocks` UInt64 -`ProfileEvent_ZooKeeperInit` UInt64 -`ProfileEvent_ZooKeeperTransactions` UInt64 -`ProfileEvent_ZooKeeperList` UInt64 -`ProfileEvent_ZooKeeperCreate` UInt64 -`ProfileEvent_ZooKeeperRemove` UInt64 -`ProfileEvent_ZooKeeperExists` UInt64 -`ProfileEvent_ZooKeeperGet` UInt64 -`ProfileEvent_ZooKeeperSet` UInt64 -`ProfileEvent_ZooKeeperMulti` UInt64 -`ProfileEvent_ZooKeeperCheck` UInt64 -`ProfileEvent_ZooKeeperClose` UInt64 -`ProfileEvent_ZooKeeperWatchResponse` UInt64 -`ProfileEvent_ZooKeeperUserExceptions` UInt64 -`ProfileEvent_ZooKeeperHardwareExceptions` UInt64 -`ProfileEvent_ZooKeeperOtherExceptions` UInt64 -`ProfileEvent_ZooKeeperWaitMicroseconds` UInt64 -`ProfileEvent_ZooKeeperBytesSent` UInt64 -`ProfileEvent_ZooKeeperBytesReceived` UInt64 -`ProfileEvent_DistributedConnectionFailTry` UInt64 -`ProfileEvent_DistributedConnectionMissingTable` UInt64 -`ProfileEvent_DistributedConnectionStaleReplica` UInt64 -`ProfileEvent_DistributedConnectionFailAtAll` UInt64 -`ProfileEvent_CompileAttempt` UInt64 -`ProfileEvent_CompileSuccess` UInt64 -`ProfileEvent_CompileFunction` UInt64 -`ProfileEvent_CompiledFunctionExecute` UInt64 -`ProfileEvent_CompileExpressionsMicroseconds` UInt64 -`ProfileEvent_CompileExpressionsBytes` UInt64 -`ProfileEvent_ExternalSortWritePart` UInt64 -`ProfileEvent_ExternalSortMerge` UInt64 -`ProfileEvent_ExternalAggregationWritePart` UInt64 -`ProfileEvent_ExternalAggregationMerge` UInt64 -`ProfileEvent_ExternalAggregationCompressedBytes` UInt64 -`ProfileEvent_ExternalAggregationUncompressedBytes` UInt64 -`ProfileEvent_SlowRead` UInt64 -`ProfileEvent_ReadBackoff` UInt64 -`ProfileEvent_ReplicaYieldLeadership` UInt64 -`ProfileEvent_ReplicaPartialShutdown` UInt64 -`ProfileEvent_SelectedParts` UInt64 -`ProfileEvent_SelectedRanges` UInt64 -`ProfileEvent_SelectedMarks` UInt64 -`ProfileEvent_Merge` UInt64 -`ProfileEvent_MergedRows` UInt64 -`ProfileEvent_MergedUncompressedBytes` UInt64 -`ProfileEvent_MergesTimeMilliseconds` UInt64 -`ProfileEvent_MergeTreeDataWriterRows` UInt64 -`ProfileEvent_MergeTreeDataWriterUncompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterCompressedBytes` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocks` UInt64 -`ProfileEvent_MergeTreeDataWriterBlocksAlreadySorted` UInt64 -`ProfileEvent_CannotRemoveEphemeralNode` UInt64 -`ProfileEvent_LeaderElectionAcquiredLeadership` UInt64 -`ProfileEvent_RegexpCreated` UInt64 -`ProfileEvent_ContextLock` UInt64 -`ProfileEvent_StorageBufferFlush` UInt64 -`ProfileEvent_StorageBufferErrorOnFlush` UInt64 -`ProfileEvent_StorageBufferPassedAllMinThresholds` UInt64 -`ProfileEvent_StorageBufferPassedTimeMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedRowsMaxThreshold` UInt64 -`ProfileEvent_StorageBufferPassedBytesMaxThreshold` UInt64 -`ProfileEvent_DictCacheKeysRequested` UInt64 -`ProfileEvent_DictCacheKeysRequestedMiss` UInt64 -`ProfileEvent_DictCacheKeysRequestedFound` UInt64 -`ProfileEvent_DictCacheKeysExpired` UInt64 -`ProfileEvent_DictCacheKeysNotFound` UInt64 -`ProfileEvent_DictCacheKeysHit` UInt64 -`ProfileEvent_DictCacheRequestTimeNs` UInt64 -`ProfileEvent_DictCacheRequests` UInt64 -`ProfileEvent_DictCacheLockWriteNs` UInt64 -`ProfileEvent_DictCacheLockReadNs` UInt64 -`ProfileEvent_DistributedSyncInsertionTimeoutExceeded` UInt64 -`ProfileEvent_DataAfterMergeDiffersFromReplica` UInt64 -`ProfileEvent_DataAfterMutationDiffersFromReplica` UInt64 -`ProfileEvent_PolygonsAddedToPool` UInt64 -`ProfileEvent_PolygonsInPoolAllocatedBytes` UInt64 -`ProfileEvent_RWLockAcquiredReadLocks` UInt64 -`ProfileEvent_RWLockAcquiredWriteLocks` UInt64 -`ProfileEvent_RWLockReadersWaitMilliseconds` UInt64 -`ProfileEvent_RWLockWritersWaitMilliseconds` UInt64 -`ProfileEvent_DNSError` UInt64 -`ProfileEvent_RealTimeMicroseconds` UInt64 -`ProfileEvent_UserTimeMicroseconds` UInt64 -`ProfileEvent_SystemTimeMicroseconds` UInt64 -`ProfileEvent_SoftPageFaults` UInt64 -`ProfileEvent_HardPageFaults` UInt64 -`ProfileEvent_VoluntaryContextSwitches` UInt64 -`ProfileEvent_InvoluntaryContextSwitches` UInt64 -`ProfileEvent_OSIOWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUWaitMicroseconds` UInt64 -`ProfileEvent_OSCPUVirtualTimeMicroseconds` UInt64 -`ProfileEvent_OSReadBytes` UInt64 -`ProfileEvent_OSWriteBytes` UInt64 -`ProfileEvent_OSReadChars` UInt64 -`ProfileEvent_OSWriteChars` UInt64 -`ProfileEvent_CreatedHTTPConnections` UInt64 -`ProfileEvent_CannotWriteToWriteBufferDiscard` UInt64 -`ProfileEvent_QueryProfilerSignalOverruns` UInt64 -`CurrentMetric_Query` Int64 -`CurrentMetric_Merge` Int64 -`CurrentMetric_PartMutation` Int64 -`CurrentMetric_ReplicatedFetch` Int64 -`CurrentMetric_ReplicatedSend` Int64 -`CurrentMetric_ReplicatedChecks` Int64 -`CurrentMetric_BackgroundPoolTask` Int64 -`CurrentMetric_BackgroundMovePoolTask` Int64 -`CurrentMetric_BackgroundSchedulePoolTask` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueBatches` Int64 -`CurrentMetric_CacheDictionaryUpdateQueueKeys` Int64 -`CurrentMetric_DiskSpaceReservedForMerge` Int64 -`CurrentMetric_DistributedSend` Int64 -`CurrentMetric_QueryPreempted` Int64 -`CurrentMetric_TCPConnection` Int64 -`CurrentMetric_MySQLConnection` Int64 -`CurrentMetric_HTTPConnection` Int64 -`CurrentMetric_InterserverConnection` Int64 -`CurrentMetric_OpenFileForRead` Int64 -`CurrentMetric_OpenFileForWrite` Int64 -`CurrentMetric_Read` Int64 -`CurrentMetric_Write` Int64 -`CurrentMetric_SendScalars` Int64 -`CurrentMetric_SendExternalTables` Int64 -`CurrentMetric_QueryThread` Int64 -`CurrentMetric_ReadonlyReplica` Int64 -`CurrentMetric_LeaderReplica` Int64 -`CurrentMetric_MemoryTracking` Int64 -`CurrentMetric_MemoryTrackingInBackgroundProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundMoveProcessingPool` Int64 -`CurrentMetric_MemoryTrackingInBackgroundSchedulePool` Int64 -`CurrentMetric_MemoryTrackingForMerges` Int64 -`CurrentMetric_LeaderElection` Int64 -`CurrentMetric_EphemeralNode` Int64 -`CurrentMetric_ZooKeeperSession` Int64 -`CurrentMetric_ZooKeeperWatch` Int64 -`CurrentMetric_ZooKeeperRequest` Int64 -`CurrentMetric_DelayedInserts` Int64 -`CurrentMetric_ContextLockWait` Int64 -`CurrentMetric_StorageBufferRows` Int64 -`CurrentMetric_StorageBufferBytes` Int64 -`CurrentMetric_DictCacheRequests` Int64 -`CurrentMetric_Revision` Int64 -`CurrentMetric_VersionInteger` Int64 -`CurrentMetric_RWLockWaitingReaders` Int64 -`CurrentMetric_RWLockWaitingWriters` Int64 -`CurrentMetric_RWLockActiveReaders` Int64 -`CurrentMetric_RWLockActiveWriters` Int64 -`CurrentMetric_GlobalThread` Int64 -`CurrentMetric_GlobalThreadActive` Int64 -`CurrentMetric_LocalThread` Int64 -`CurrentMetric_LocalThreadActive` Int64 -`CurrentMetric_DistributedFilesToInsert` Int64 diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/count.txt b/dbms/programs/server/data/system/metric_log/202004_525_525_0/count.txt deleted file mode 100644 index 301160a9306..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_525_525_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -8 \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/minmax_event_date.idx b/dbms/programs/server/data/system/metric_log/202004_525_525_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_525_525_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/partition.dat b/dbms/programs/server/data/system/metric_log/202004_525_525_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/metric_log/202004_525_525_0/primary.idx b/dbms/programs/server/data/system/metric_log/202004_525_525_0/primary.idx deleted file mode 100644 index 52d0a189ade..00000000000 --- a/dbms/programs/server/data/system/metric_log/202004_525_525_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G¯^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/metric_log/202004_526_526_0/checksums.txt b/dbms/programs/server/data/system/metric_log/202004_526_526_0/checksums.txt deleted file mode 100644 index e6411a67832e857be22e81af8decd88872d5aae5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6663 zcmYLOd0}i54QP}o4md@M>FtoKyEH*dTe zxEXhsiP%h^ZTQgaDV_Uh;mM<_4|d!fF%zQuRBgW2(YC7}Ecfm3pXvQh`$<~q){?(K z+DZ`F{g7ZyF9#jF2Y74dL=czNHnzylpj$%{ty$GzW@~|u8&e2<(Zvq@rNq@3z(Y8E zU9;Prj(|KXChZZEUS~xB_W7C$$tLb|-!&c^sT6k=>3i*;(Q7%f0|dOi~X>CBQmy_K;41CNo^0k2GRdlx#~buB*GaS?79hi4V8cEyG4SP2o=U#v3yEWsdnqN7cJ9k5Ql76g$l>>x)C=DZjDX*)mF7|pn z@-~s3!($KUA{LAr<|eZ$(Eh%5-s)QhNm8GZ&A_uR$B8##%}m+hlzpY{n=m3W1LtyM zVOfpb?shdgg2c$)22O~aGciCWq94z2EK#kOQg~GnFdwe;rKrpS#lJapnqErhbz$#F zL+a+}xJRp&!UyFAU!xVO^-dy`5i9See8!DcBGwDsLDr>2i*p%Xh9l+F6zR=A68Di0!|Rvv1eu zb_8Dd&*?*dyO{djoe;qe8wbjwdY>!Mqh#L*dTF?*TmoDfu)EN5Ys7o19lk(iFyOdd z%bc81TB#U7#dNL#gU#2~hDe;wag!mmJLxTvjD&lueX`uv9*|e5Ssl?!s|Ssmsw87U zK`0BNE7p+#551vIuBrb)%Oxt|Z7y5(4Of`rhHmFs1)jMZlE~d0UUGyAzzZL8Ib75g zpxH$dFU_HxNTlh2)L|8RfbpCGZ`DHo6F%O2fCz63HwBj3dkmf@J0)14_IecULZD=fEKSDnzWM9S7tRU4t=N z9HP?&7-t!xHf$~JH4yHChB6t+=bwU+y$cF19G}u*L2)OrViwDK@bqE&NQj%sjp=at zV-4d4Epgg+tlsj7Y6CZy5Q4$NE)MVP-FMQ*pZv4^)?-Y61eH2x5VXdbohRj z{+CwbtR=A4+t!YsFA4;@H0%-1XiY8#b%>3v!^Z%bTH4v}yN9>>ALT68P)=kk@t5o= zpxIaHYV>*ia--MdT&OTfKM-yfQGNyLT43vO%d-e)FAI7azt_dm4xV2DVi-+K!*RCS zU}KsA??yxzG4#;rd;uCtJLSgUGKD$xK#PQeUz5)ADc-i#T(a!^QR1HB3#PiguS+qB znYVI)RB1>0#=IoGBjAOw4s|R$4SG@qAsfUe9074lBA7UYuCwuknYKEuBkYHcfAj2! z1L$~r_44Y+j@VRH`|_W>H>WSL5EQH(sdJbO7GVOy=y6)uYThK>#tqQDHGMe{g_g4w zRoMH04O8i@LowV#@h-haxV@n6278&q<+etft(hx9$J_fih)C7gD=QuCQJ4$P5>D|2 zkZ|O)1E7IPz#jy5k{PU71|lh|AtWE9ZM^0gQfT)%UB9QWSe*DX(TL%rv8M^AoUdZ9 za516VFt6~TTRI8wlFh1t3oXKG@Wnz9Ye`=WYM+dYH<#{uxaOzc6QhKMcOQ8@kJ#S+ z;lrMqk9*%bIRD4tZ436kc?Ps%c+(b(hg)1yRa@%wc|X*$GC^;R%flrX0Z-#-cZnn5 zC~6AGpQj|3xc0DGf0@tQ_GA>>!WrX6cLSRZTJ|{NWo#)z_Wc;P8HQTN&4ncPG|m^) z(y<|IwqT(9)KWMx((N1_0}Yifx7&3;jmIInR~2$E?Nx|#(zc*1@*^T$KA*x&*9l0w zA-?5l4^`)awLE%g3^j0qGaU{AghPbw2328|H&Esr!LoTHEfduTL5vuF5~%pZDArBP z6x8YmKrsVC8Qq#$K$Ifp2|ck;w~{>!JQs0D8m~G|#I=xJ>Dr+&vojDy!j`h8B@rHu z#=&8gwJHla!OaWGFk!KYpdx1#>|WG_mpK9Mg@OFAjT87)q@d2#c0a^09MSLws3lsQ z!|}L-<-}wi@-M=RVHSzc91l_c8>G6_?P&K;;pk-jAI4$($c~4;@LqXh`SsSzlSiMq zN^IA;!|lm$q>k!1m|Wv{V(V9_JfcAvLW8al$bQ290>YoBthpyH{yauewQZi{*-aN8 zMtxM6_toKp4|R8Zw|02(KMIx#5^^vTHpH=!C>zGifduv>3~i_lINWk2 zjmLprJqPfD`r=y1SA5mG<2lL)USIEtE^eR6a85EyCXc0{uWOcr`lV)aPPD77i>aT?Oc2kDm3 z?ZC`x4a)-5zd|DTN{w zyemN|m%J)_O^Z5RlvXfCzFzB-2PP3SddXBDoT@yJJy?b@=tdROn`7vCVZb;V<5 zP73DkFwO4%C^x=o!)~eHELa%OUqC$pNCWLPX87ixdEQiV*Sfqj;~p-0`A9yoJzp8K z&2h+}>B-o;^w6|xcmFc@!9WZ*TtVg!13SmEqxCcpqbh_Dvs#ii2%HU+rv%KzX-k&8 zUAXnV{Eg*3j?7NWC&ae-r_%QBlQ#>l^_=@nxlW59A|>O5u)1f@0!fjbD^mJtY{dVq zdZzc^*1wXEr(D&3Q@`vuv5D_4J-lw{+f6%1U$`%Q`rmiH3Oor5Yg(e)UE%5Qx`Uno z7GNi03u^ChZJc1CL^DV*+YFo`M|e1=u&8lya8VRzh5~$VsZok6*R5SOV!Tt?@S zqDqIab?*4=a@PgkOm!R$?~kVQ;%I>1C|i(brk@EB&sfa89NVF=SYsw3)SSR4Wn@5M z9qP_aCbC6g_MygnOVrc%IkGEMKXLuJd%`L(Bp10A)6qlWcDe5F2ZV(;!KqsxWnsft&tmLSBEKD#H6OD zkMj%zKNR^iE}Aa`SdoF}9fpGeZlQJB{r1}8>e+K#zCh67?jiJ31Ixm>b=tx3`$B{6 zhOe`cuaoJc++d`+&31cjh5bHA(;S0P^!BF=rI2i|Z85PRv(QB}49}SC;+BT27z8Te z3j6V@u>mou*f(IIuc+1iXo^L;VPB$l_W{R6@b#e(urKs`-Ev^O(T1vFho0RHCitU*vSk&N}Z)6Jx2FM>R}B^(S=-!bC`}@GJrHnGPlSBR_C91df`l62u3A{emF5sG$#nvJ(1P_!!3_mr^ns zTR1@l)21ZRMJiTkp5A`ZaWPhX2 z)gI{1>opF$z13i}{E_@bvHt0U_mqr@BDS5+PTVwq#EBcrGaH+KnexS!trCX4u0QPm zp=G6@<<3KINJI@=_|Ukm=ax5luU2NA?0I?5Y+}ou_n`f=UFl1jcFCLXe!=l&rk>pb z1|%&Wffx^_B(6ix+66tu%X6URZ^dB4T_>mjH`=TRwhr@Zf@%N`+KJe@(WKKw(xAvW zscx*1o8`{-FEZG9lrp`B0p{s7%&w6a$&Cq#WCzE57#6*T2>od03S@mEtf2c`4tH%! zFyQpw;rSJe_#APgz=DB3IbFYd0VT#2urwLn5^SDUfg%Fyz8nb75=cJo8MZ7j# zYiWMtgb4tT5SEn}hl9bWsxz@%AbhUC8k7b@Wn__QjHL&NfhK-|$78*hedxKMxhk@-wt>qwU zK2maD#Rbenh%aRifJstl)Ow-_*AW)eWv-xBmY3C;=-oP!3v40U{|Zg6mG{gfJ}ujd zkh>CL0-jPO<|(W>>f%^6G2t+5Yzrtv>@|(#B1s8-SWx5e^l&lsU8V2?@DU}OnNnEm z_TDj6&5jTw%}j|Tcj+u3x&wQXGY4|B$kaOv$+etic2Ai*=x@1<(0)pRi9i`j1oX60 zoey{x3*HT8Wq&+o<>YEdbnfja!zx{#V7cAj;&r%$TpMssu54jn8HZw2$=*@Se{c#P@w~vra&_r zL!$v9*lShhVf|(zi2#oxD`tH@YyRTTZ7-7dEvQj*K6ANuBU*^8XQ=Q^-&xA%VXQ*bY44Tv3$ENiN~1rLtCL zqAqdJAw85%21L0h`F0aM#1A4SUn8Q9dn}qgiZDphhQSwK;P(25s!T|yA_?P`jltVK z6E-i5bfrmg5x9PR8COu+>DoUb2AdHAA5omqdhtcbo+j5}HW{K1=SIvqTX-=CK#^T| zhr=g7GgjaJpK0t8Zt{EOfZMR&BotJ5Ts$$cWL_ZskrbCb)AhB=jG+~Iz_ZaHG*kxN z0oN(c$P#&hi-gu&O7SUTvjaqPb&fxb29XSNpvPFu+cR>}+cft`kKt+fmJEQn<5 z#Z(+d9N0|t_#H(v*nDgw-BbwbNKtrEA;2_=T_;JbgcGDNd9X2@z?G-4Bg2zuzD~S_ zXd_j46%oj<)!0|}w|hNbV&kHL3{-KKnYt1GeF%&SbOy-Mvya!UyZXqSFQ=b5_4K+0 z&x203%z~{hh~}@GhK?=V_Vuy}i@x4^K+vt2|LgNabCIk1`uPVh&K7>SWzV$}RU6P6 zq#b@CJNyZ)C$8^`hp4kTwD;Dh>mVoKYI|LI)i`GRwN0mQzjWd0-j(vNT}MveL1Lv_ z^Da;MEhqNAlI!A+g_{%VfZ#H9a6>peY{hHK)-El2!v4^z=8sn&1ASXm$?vA&8Mk%k zP5485X3<9WLbdXjQN)&#Iw#$lcjTO<&HP!;k&Le>PZ)V{R{PGCpl!#NtZSR%=k!tdkB5+&a$&Fe-EE&O?jD_gVMEV>iAT$bOlV}}g$f%s9UX+Z`HO{Ys z*aQFo#RUKW_e3HIV{dhCbS`vwbOr7d(SCY#b;|gn2U&T$uTcOFWp-t5bYEm)bY(7L zX>QR0nJZ}Ugb6VhmsYGgE9}6^0ff&2s$t(-nXe%&&mEj$#$6?h4>bVv2W@g|GLQs* zvIn!#N(`@H11E!)%2Rn*05NoFZCC*F7xpy9;jombA2Y|NPR!F&AdHFuh@J&p-e1vb z2Q+s4(Tr;}DU_9100A`s0$2d>2*IZUQ@orHn7sY=xQgmvkN^p6Wp-t3s{ryC^DbCv zxn0#zNUVa}kc>>67}Wv7rvYA3|9DO3!tMTYza%wWx6TSM00K?`@(gqN)LoV$sR{nE?0$K3cMe$Zr@T_tq6O;RZPf01znv0we(O3ED$j!DmCOES8Xu;vz~i zNB|3MWpi_3XJ7&J8r-Q2REozIB&LHfnpNA_sF-720gTMK0Sv9LCI+|>Ut&PX_f-*l zAPh7B0#pF-5578p@UYCe2^~4RB#{bZK;HZ*DGRVRQr{ zA?Ljo!KT!4f-*kmmsIuu3vhC2ZDDeGE@@v9s;7R50kT`#S{$z!lV#1HHuV!5xw0n>Q+Kc(*3awGynp; z0PqMVO4Zdd%jYq4XJ&`8r~fnn402_5X>*eR@CIUOZk+>4TOpwFTQ7E=D&(vkbMu)L z0rdiSF@BG=@ZCiQM0@;at&=ScGXMfq0Pzi^vSqUe!0hmV+Dnm^Th3+x4|8vIa${v* zW@&820q_H#CrUW~Pm*~rYM8mCGt8&SV15Gl0#s9%50B{NBLgep_sq~GmWz5w(W+G9^dPcxhde&>Z$cXQ(9n1lh4 zyb6b4#l7POj&#X|!|6xC+I3n00X6^vS^)D2bY{ElOIfEV>iAT$c>;u_Yo58H(+R;yo#WVe8V za|8eYjRgPz_dy~FV{dhCbS`vwbOOzyi(W%KKezpb?hBiCD{V^{zOsKE;XF15`%1i1PGI}>@C?VI@-JP$Ge^agEmYcen>BWMs@ zZPIs=HpM$Lk~2G005NoFZBzjB6fO|dlp5X+SU-`9w-j{fo~Ku06=7E zV|0iB{TG)3T!M9yPej+{Tw-_B)3}6~0iOX88b9SX!CBn3<<|E1yQ;-h;5GVivBLMLUe^p3LUln%=i0wZ|!`kAo01Itpb8}&5QvviB z(FMEIy`uA%*0z1`K$E}A;hh2A42T)$$rCaIbT?>6>;SPlPz*5u0Z;(&54k0hcTW4= z`c}&gFD+3k!~hU&X=8G4b7f<1Ze(-O0QnVnU^&X5-Vv5ZDDv{(*Xfl0`N^~WOxJ+*ML^%!DlZYTT@>b0$u_vPg#tJ5Kl3xok& z^A)fcOZM|T^fBDNC3X#J4KM%!Pyh`E0kIErZ*_8GWnX4#Y_b9P6+IsbCQXleyLl9j z#@h4A3SI#xmjTk7rzj}s&<2Ig<%%Td6f*z>W&-^e{RY~7LGQ@CvAt)G&cFpDX00AQa4Q2ua zxBxM1X>Pax^c7%6yJe1>$9d+=FR`HPwT&48xB=DRqjVX)ep0W<&s zR{-+~Uy>ZMXq8SS@S1skbts*L01b3#a%Ev;umScKH-~hP{;9PXUHYvEAGU&i?E$y} zflKFW!&pV639u#;@B5Mb4l)1_Qvd@K4IxnM~LZK#$%G{&r;^o&hIu zZ3>Sco@*8>#6p!~w|8q4GXMl)0sR$3!GqB-DA9ohPs}9?v~{?Y0WU_>k2ow9i`n>; x(^kLn4doR$00EN#_ybLjG^`Q&VIU2yv$Bml`+firC;$N?00>|KP{Z2dumET7=Oq9D diff --git a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/columns.txt b/dbms/programs/server/data/system/text_log/202004_5998_5998_0/columns.txt deleted file mode 100644 index 461d26792e1..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/columns.txt +++ /dev/null @@ -1,14 +0,0 @@ -columns format version: 1 -12 columns: -`event_date` Date -`event_time` DateTime -`microseconds` UInt32 -`thread_name` LowCardinality(String) -`thread_id` UInt64 -`level` Enum8('Fatal' = 1, 'Critical' = 2, 'Error' = 3, 'Warning' = 4, 'Notice' = 5, 'Information' = 6, 'Debug' = 7, 'Trace' = 8) -`query_id` String -`logger_name` LowCardinality(String) -`message` String -`revision` UInt32 -`source_file` LowCardinality(String) -`source_line` UInt64 diff --git a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/count.txt b/dbms/programs/server/data/system/text_log/202004_5998_5998_0/count.txt deleted file mode 100644 index b3935607590..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -23 \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/minmax_event_date.idx b/dbms/programs/server/data/system/text_log/202004_5998_5998_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/partition.dat b/dbms/programs/server/data/system/text_log/202004_5998_5998_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/primary.idx b/dbms/programs/server/data/system/text_log/202004_5998_5998_0/primary.idx deleted file mode 100644 index 0ba049044b7..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5998_5998_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/checksums.txt b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/checksums.txt deleted file mode 100644 index 1e447bba6581ada7b5e7dc8a9e616e7f72ad6436..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1178 zcmV;L1ZDeUXk}w-b9HTVAZBlJZDDjEc4cyNX>V>iAT$aBno}}L()*|b2loLHiQKA! zZUg`Ti3I=v_dy~FV{dhCbS`vwbOPvZe4NH(<2?+h%j6?;@(utFWp-t5bYEm)bY(7L zX>KG<5^KjtB_~1)!H2M({uoFB7>xxx$@+wApB5#aBffQ&cn>lF^agEmYcep(Cx^Cw zkbXg%Pl}9Z(T>ej05NoFZBzjB6f3HekS)aX*wiA!U(pgT8^QrFgtZ$@?LWUQ7=iq1 z;izSDQ~&`o00C40@CftBhH|QN-}>ea(j-NrM3DdqY-M(3Y@-136()DH29|63{Lkx9 zqNi?r-Tna#2p2S++fOkd#{DV`j9m2e3M~KuN&xZ^`mmTP!PZYL=a@8U5^7V%+mb)D<`Y0h0ju0~dZ^ zubkA`lX`^3)~Vvs761?^00AQa@dVdz0ptWO?4)uf?g>M*#&{-EQ2bjAF#rKj0Pqi)Uc=AMuYn&g zxUr(>2sa1-5N&B=a&L2GV{dL`bI$M&{cwf;00apU>O=)C!1Q6tGcB>!JU1v8d#8Ggs zhyV?6VRCe7bZKvHE@WYJ1R^2ly%xcy)Nz6`KIfNI_5cfTa%pX0a(OOkWOxk3_EoEC z1I%j>Wbl9oWDnT@3~_a3a(Q2AWV!(L6($<~Gz62}FS^UNktJrbp?m=hl~t!aQ&h>3 zx~8f(YTQt{4KM%!wg3$j0$>bsWp-(EjR5r+VrgzCmE^ax-Ze-dg#c!vjvC5b0Wi3) zV?;D0@hMZ4N5x8Zgl-Km00B?{4F&t4rvI697{cIsgHc0QdvAY7n0%0{UG0uZLrOh47;Q5GVivBLEF%0tC1K zF>GmWxB&DNJlYgrNG}eU`rRWWF^;TeegR-ja%1csk+s^*&zMDmh>^rr00A=q0agI> z38k?ET?IRPqWt$NOc;oEkpK;JXmVv?WUm4C6*o#8#2W)Kq0Aa}fo2D_DvbePg~5L3 zs|LYR>^IeA=8c$=4lw`^QUC)J4(^b diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/columns.txt b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/columns.txt deleted file mode 100644 index 461d26792e1..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/columns.txt +++ /dev/null @@ -1,14 +0,0 @@ -columns format version: 1 -12 columns: -`event_date` Date -`event_time` DateTime -`microseconds` UInt32 -`thread_name` LowCardinality(String) -`thread_id` UInt64 -`level` Enum8('Fatal' = 1, 'Critical' = 2, 'Error' = 3, 'Warning' = 4, 'Notice' = 5, 'Information' = 6, 'Debug' = 7, 'Trace' = 8) -`query_id` String -`logger_name` LowCardinality(String) -`message` String -`revision` UInt32 -`source_file` LowCardinality(String) -`source_line` UInt64 diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/count.txt b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/count.txt deleted file mode 100644 index 3cacc0b93c9..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -12 \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/minmax_event_date.idx b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/partition.dat b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/primary.idx b/dbms/programs/server/data/system/text_log/202004_5999_5999_0/primary.idx deleted file mode 100644 index 53fe4d75d28..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_5999_5999_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/checksums.txt b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/checksums.txt deleted file mode 100644 index 7bf694ff17f63c39af23ece9f63c2d21d9acc363..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1180 zcmV;N1Y`SSXk}w-b9HTVAZBlJZDDjEc4cyNX>V>iAT$b~&c&7~?Lnw*lM#gvri;Xa za0CDViv<7x_dy~FV{dhCbS`vwbOIBBc0S#{%h&xA_>!`+G06Z9Wp-t5bYEm)bY(7L zX>KGNe}o_zJ+{#li^38gQ&=wn7Sg;0f;GOJWwmr@H1S*fR1Y!$^agEmYceokZGP1G zo_(FIYg-w&G<{iA05NoFZBzjB6f&ce;aKv4)#*+pA&tiqJpKVJZxuFL(z$TU$-|*x zpL3F@Q~&`o00C40@Cb%3Cla2Axa=t3Z!!?(ARGV*Y-M(3Y@-136(zu&DilScO>#8a zSjyeAy8ZzR!rh{2=|Km`L5QiE+CUHo3M~KuN&xZa(l`@g@dMI2U0UH9hWFAAjsAgf6nA|13cBB+D06=7E zV|0iB{THwSq~h(hJArZ9HhwZRmm6650mT7Y!QX~WEI~Ti&Vv5ZDDv{(*Xfl0`N^~WOxJ+*ML^%!DlZYP(_Ka>x>9NO>ehv*A@fW&-^e*aY2fngE_q(@u*A?^ClmD1!m$ z2L9ZEPfP1osJDX_LQ>JjqZK*;0hIvw0|Hw2KGK=K)3f?&4*`J--vAIO00AQa4Q2ua zxBxM1X>Pax^b|tRE?}C%iVIH}=;(>^kbF@ASQn<^mY8v_*m60iDG6O%=2id!GXMcr z0P_jbLYP3fC(5AKlw4RmO7WnpBm0rnL=ZWkWiX%CfQAcS$zQXu{q0a#3+ zw9igw)V!ZnkHb$=`i>4U01r|C0}~G+00qVa_!NX~!SqfXq5W_FHcgIBKXL&ZAFk~h z)w}~2dJGhWaeMBi6f*z>U;+IVMH4LpcGy8%5GVivBLE0q0Z>)Ga3ugO^WoM2 diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/columns.txt b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/columns.txt deleted file mode 100644 index 461d26792e1..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/columns.txt +++ /dev/null @@ -1,14 +0,0 @@ -columns format version: 1 -12 columns: -`event_date` Date -`event_time` DateTime -`microseconds` UInt32 -`thread_name` LowCardinality(String) -`thread_id` UInt64 -`level` Enum8('Fatal' = 1, 'Critical' = 2, 'Error' = 3, 'Warning' = 4, 'Notice' = 5, 'Information' = 6, 'Debug' = 7, 'Trace' = 8) -`query_id` String -`logger_name` LowCardinality(String) -`message` String -`revision` UInt32 -`source_file` LowCardinality(String) -`source_line` UInt64 diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/count.txt b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/count.txt deleted file mode 100644 index 9d607966b72..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -11 \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/minmax_event_date.idx b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/partition.dat b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/primary.idx b/dbms/programs/server/data/system/text_log/202004_6000_6000_0/primary.idx deleted file mode 100644 index 6ae118ad85b..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6000_6000_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -G^G¯^ \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/checksums.txt b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/checksums.txt deleted file mode 100644 index 54c0f22d4ba7cb899c44d427a2f7c738f54a95c2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1179 zcmV;M1Z4YTXk}w-b9HTVAZBlJZDDjEc4cyNX>V>iAT$a~vY0*L3UfNd7&#kOD*MEO zZv+4UiUj}w_dy~FV{dhCbS`vwbOIBBc0S#{%h&xA_>!`+G06Z9Wp-t5bYEm)bY(7L zX>KGNe}o_zJ+{#li^38gQ&=wn7Sg;0f;GOJWwmr@H1S*fR1Y!$^agEmYceokZGP1G zo_(FIYg-w&G<{iA05NoFZBzjB6f$8}3Cy(yR0+pe3|4kv$kzcZAyVb0u2Ngu%1;Xn z$*%S9Q~&`o00C40@Cb%3Cla2Axa=t3Z!!?(ARGV*Y-M(3Y@-136(vM~X z0vSG21>YMBnQlAR%pT{eNCCM6a=E|^jD0nf=7Thei-n3=3^4!!Pyp}`6UL*l7;b-o z!^7dKEgver01$0yV{&hEWn*t{WOL8}`4vaIO9dQ{jD2>Ad`5Uvge92)ERL4`5md0% zuNF)6AQ9>=WDzs~0aySHsR4o&ZE0?8VR&EC0RdP7@J(rCcmxpSY<8<3(OqXZE5uQ7 zuZRE*aA9(EX>@6CZZ2eDbOa(H=e-uerqpqQGCt>*RQ3Q1aB^vFVRCscX=Hc|#9umF zMEbFzDLEhX%-^ax01R<;Wpa66X=J(p^%W%@eVHnPJ|#kJd`Do*e}@kIa>GXMl-0{s`z1dDV)0&}P*HT2`x=gWdT3jwnR zmn|1|+cX`6-Q80`Tcf?`6*>R`l>qny(E&B?hY-Rre+-+7-}Rfd01zku0V4nnW&#Ab z05NQ7ZnyyS6iTwR6)BVAYPkE&`_F=($=U%}576LYmnWk(xL~Y#BA$J_RsaDr00CA2 z^9j;Im_XP^E$to>QSd|$?vVfubZByAVPvlX_7ym`8QKAV=T1Xcg<}w|wy-<_Sfwzk z{?yl}ug}RFh46%tmJ|IXl&1HX(mH``SvP%dU z*7_Ix|C*YiCM6COGXMm|1N{|6|9reQ!J1E$>HKCjRVPtp0Wig49zpB?5RY%|LiZ-= tW`q?u00EN#_yhZKc1847)l({ut->AzM@s+@C;$N?00>?IP*uKgB>)G|*m?i} diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/columns.txt b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/columns.txt deleted file mode 100644 index 461d26792e1..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/columns.txt +++ /dev/null @@ -1,14 +0,0 @@ -columns format version: 1 -12 columns: -`event_date` Date -`event_time` DateTime -`microseconds` UInt32 -`thread_name` LowCardinality(String) -`thread_id` UInt64 -`level` Enum8('Fatal' = 1, 'Critical' = 2, 'Error' = 3, 'Warning' = 4, 'Notice' = 5, 'Information' = 6, 'Debug' = 7, 'Trace' = 8) -`query_id` String -`logger_name` LowCardinality(String) -`message` String -`revision` UInt32 -`source_file` LowCardinality(String) -`source_line` UInt64 diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/count.txt b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/count.txt deleted file mode 100644 index 9d607966b72..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/count.txt +++ /dev/null @@ -1 +0,0 @@ -11 \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/minmax_event_date.idx b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/minmax_event_date.idx deleted file mode 100644 index fc9f33a367f..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/minmax_event_date.idx +++ /dev/null @@ -1 +0,0 @@ -GG \ No newline at end of file diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/partition.dat b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/partition.dat deleted file mode 100644 index 870b71ef44bae12efece0406d75f84029c34bbf6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4 LcmWd;Wo7^X0GR+S diff --git a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/primary.idx b/dbms/programs/server/data/system/text_log/202004_6001_6001_0/primary.idx deleted file mode 100644 index 72220583214..00000000000 --- a/dbms/programs/server/data/system/text_log/202004_6001_6001_0/primary.idx +++ /dev/null @@ -1 +0,0 @@ -Gů^Gȯ^ \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 154d7c911cf..17a3cf88ecd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -578,6 +578,8 @@ target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR}) target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR}) +target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR}) + if (ENABLE_TESTS AND USE_GTEST) macro (grep_gtest_sources BASE_DIR DST_VAR) # Cold match files that are not in tests/ directories diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp rename to src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/MsgPackRowInputFormat.h rename to src/Processors/Formats/Impl/MsgPackRowInputFormat.h diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp similarity index 100% rename from dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp rename to src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp diff --git a/dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h similarity index 100% rename from dbms/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h rename to src/Processors/Formats/Impl/MsgPackRowOutputFormat.h diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.reference b/tests/queries/0_stateless/01098_msgpack_format.reference similarity index 100% rename from dbms/tests/queries/0_stateless/01098_msgpack_format.reference rename to tests/queries/0_stateless/01098_msgpack_format.reference diff --git a/dbms/tests/queries/0_stateless/01098_msgpack_format.sh b/tests/queries/0_stateless/01098_msgpack_format.sh similarity index 100% rename from dbms/tests/queries/0_stateless/01098_msgpack_format.sh rename to tests/queries/0_stateless/01098_msgpack_format.sh From 092479397a0d3d7cf82728d16edc2054433ba932 Mon Sep 17 00:00:00 2001 From: Avogar Date: Sat, 4 Apr 2020 17:07:11 +0300 Subject: [PATCH 104/484] Remove extra line. --- src/Formats/FormatFactory.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Formats/FormatFactory.h b/src/Formats/FormatFactory.h index 6e357412571..9199ed89890 100644 --- a/src/Formats/FormatFactory.h +++ b/src/Formats/FormatFactory.h @@ -175,7 +175,6 @@ void registerOutputFormatProcessorTemplate(FormatFactory & factory); void registerInputFormatProcessorMsgPack(FormatFactory & factory); void registerOutputFormatProcessorMsgPack(FormatFactory & factory); - /// File Segmentation Engines for parallel reading void registerFileSegmentationEngineTabSeparated(FormatFactory & factory); From 73b0f8db8c327a1d63cc7ebcc56087a3f9866dae Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 4 Apr 2020 19:17:01 +0300 Subject: [PATCH 105/484] Added results from Denis Glazachev --- website/benchmark_hardware.html | 52 +++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/website/benchmark_hardware.html b/website/benchmark_hardware.html index ac320cd5415..ab75e7ca063 100644 --- a/website/benchmark_hardware.html +++ b/website/benchmark_hardware.html @@ -2376,6 +2376,57 @@ var results = [0.032, 0.009, 0.007] ] }, + + { + "system": "MacBook Pro 2018, 2.7 GHz Quad-Core Intel Core i7, 16 GiB RAM, 1TB SSD", + "time": "2020-04-04 00:00:00", + "result": + [ +[0.002, 0.002, 0.002], +[0.028, 0.031, 0.025], +[0.060, 0.058, 0.047], +[0.125, 0.101, 0.070], +[0.164, 0.185, 0.168], +[0.672, 0.568, 0.557], +[0.072, 0.038, 0.037], +[0.031, 0.021, 0.021], +[0.849, 0.836, 0.820], +[0.941, 0.938, 0.942], +[0.423, 0.444, 0.457], +[0.617, 0.556, 0.555], +[1.761, 1.694, 1.641], +[2.190, 2.277, 2.226], +[1.964, 1.895, 1.934], +[1.956, 1.978, 1.884], +[6.029, 5.977, 5.975], +[3.372, 3.436, 3.439], +[12.883, 12.778, 12.572], +[0.116, 0.080, 0.076], +[1.874, 1.372, 1.467], +[2.321, 2.356, 2.238], +[5.304, 4.955, 4.912], +[2.474, 1.993, 2.033], +[0.744, 0.708, 0.719], +[0.562, 0.568, 0.602], +[0.737, 0.742, 0.719], +[1.547, 1.580, 1.583], +[3.074, 2.665, 2.697], +[5.466, 5.560, 5.693], +[1.658, 1.562, 1.543], +[2.935, 2.802, 2.743], +[19.141, 19.674, 19.212], +[8.738, 8.334, 8.302], +[8.268, 8.276, 8.364], +[3.311, 3.288, 3.243], +[0.182, 0.169, 0.169], +[0.075, 0.066, 0.066], +[0.066, 0.057, 0.053], +[0.353, 0.324, 0.327], +[0.030, 0.018, 0.018], +[0.018, 0.015, 0.015], +[0.011, 0.007, 0.007] + ] + }, ]; @@ -2810,6 +2861,7 @@ Results for AMD EPYC 7502P are from Kostiantyn Velychkovskyi.
    Results for Pinebook Pro are from Aleksey R. @kITerE.
    Results for AMD Ryzen are from Alexey Milovidov. Firefox was running in background.
    Results for Azure E32s are from Piotr Maśko.
    +Results for MacBook Pro are from Denis Glazachev. MacOS Catalina Version 10.15.4 (19E266). For "drop caches", the "Free Up RAM" in CleanMyMac is used.
    Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.
    Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.
    From e635b0e9eb39dbb38c52e6d54d99115f527eed10 Mon Sep 17 00:00:00 2001 From: Avogar Date: Sat, 4 Apr 2020 20:04:41 +0300 Subject: [PATCH 106/484] Fix build error --- src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 32f89c816c5..ee32aeb6bfe 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -31,7 +31,7 @@ bool MsgPackRowInputFormat::readObject() if (buf.eof()) return false; PeekableReadBufferCheckpoint checkpoint{buf}; - size_t offset; + size_t offset = 0; bool need_more_data = true; while (need_more_data) { From 21532f6a6d46107622a5f68754505cb977086c21 Mon Sep 17 00:00:00 2001 From: Vxider Date: Sun, 5 Apr 2020 01:33:51 +0800 Subject: [PATCH 107/484] parallel insert for materialized view --- src/Storages/StorageMaterializedView.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 6284f791f4f..357d3858d0d 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -31,6 +31,7 @@ public: bool supportsPrewhere() const override { return getTargetTable()->supportsPrewhere(); } bool supportsFinal() const override { return getTargetTable()->supportsFinal(); } bool supportsIndexForIn() const override { return getTargetTable()->supportsIndexForIn(); } + bool supportsParallelInsert() const override { return getTargetTable()->supportsParallelInsert(); } bool mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, const Context & query_context) const override { return getTargetTable()->mayBenefitFromIndexForIn(left_in_operand, query_context); From e7bbd400c7ba4ebc62c664ebc49cc2b7b8e8e3ea Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 5 Apr 2020 14:00:11 +0300 Subject: [PATCH 108/484] Fix deadlock on failed database attach at start with materialized view This is not the problem for plain DROP DATABASE query since it first remove tables. (gdb) bt 0 __lll_lock_wait () at ../sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:103 1 0x00007f353c262774 in __GI___pthread_mutex_lock (mutex=mutex@entry=0x11049288 ) at ../nptl/pthread_mutex_lock.c:80 2 0x0000000004fd5d1e in DB::pthread_mutex_lock (arg=arg@entry=0x11049288 ) at ../src/Common/ThreadFuzzer.cpp:253 3 0x000000000c70b3a9 in std::__1::__libcpp_mutex_lock (__m=__m@entry=0x11049288 ) at ../contrib/libcxx/include/__threading_support:322 4 std::__1::mutex::lock (this=this@entry=0x11049288 ) at ../contrib/libcxx/src/mutex.cpp:33 5 0x0000000008acd4e2 in std::__1::lock_guard::lock_guard (__m=..., this=) at ../contrib/libcxx/include/__mutex_base:90 6 DB::DatabaseCatalog::removeDependency (this=this@entry=0x11049280 , from=..., where=...) at ../src/Interpreters/DatabaseCatalog.cpp:388 7 0x000000000975044b in DB::StorageMaterializedView::shutdown (this=0x7f338d178a00) at ../src/Storages/StorageMaterializedView.cpp:362 8 0x0000000008a79602 in DB::DatabaseWithOwnTablesBase::shutdown (this=0x7f353be3cc60) at ../contrib/libcxx/include/__tree:184 9 0x0000000009546196 in DB::DatabaseWithDictionaries::shutdown (this=0x7f353be3cc60) at ../src/Databases/DatabaseWithDictionaries.cpp:265 10 0x0000000008acecdb in DB::DatabaseCatalog::detachDatabase (this=0x11049280 , database_name=..., drop=drop@entry=false, check_empty=check_empty@entry=false) at ../contrib/libcxx/include/memory:3826 11 0x0000000004bf0aa5 in DB::InterpreterCreateQuery::createDatabase (this=0x7ffd6e1bab80, create=...) at ../src/Interpreters/InterpreterCreateQuery.cpp:162 12 0x0000000008b04327 in DB::InterpreterCreateQuery::execute (this=this@entry=0x7ffd6e1bab80) at ../src/Interpreters/InterpreterCreateQuery.cpp:722 13 0x0000000008d0fdaa in DB::executeCreateQuery (has_force_restore_data_flag=false, file_name=..., database=..., context=..., query=...) at ../src/Interpreters/loadMetadata.cpp:48 14 DB::loadDatabase (context=..., database=..., database_path=..., force_restore_data=) at ../src/Interpreters/loadMetadata.cpp:72 15 0x0000000008d103c3 in DB::loadMetadata (context=...) at ../src/Interpreters/loadMetadata.cpp:111 16 0x0000000004f4d25e in DB::Server::main (this=) at ../contrib/libcxx/include/memory:2582 17 0x000000000bbc8963 in Poco::Util::Application::run (this=this@entry=0x7ffd6e1bcc60) at ../contrib/poco/Util/src/Application.cpp:334 18 0x0000000004ffe1de in DB::Server::run (this=0x7ffd6e1bcc60) at ../programs/server/Server.cpp:178 19 0x0000000004ff36cc in mainEntryClickHouseServer (argc=3, argv=0x7f353be58bc0) at ../programs/server/Server.cpp:1060 20 0x0000000004f49b99 in main (argc_=, argv_=) at ../contrib/libcxx/include/vector:655 --- src/Interpreters/DatabaseCatalog.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 6c860029148..3a2adc15355 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -244,14 +244,17 @@ DatabasePtr DatabaseCatalog::detachDatabase(const String & database_name, bool d if (database_name == TEMPORARY_DATABASE) throw Exception("Cannot detach database with temporary tables.", ErrorCodes::DATABASE_ACCESS_DENIED); - std::lock_guard lock{databases_mutex}; - assertDatabaseExistsUnlocked(database_name); - auto db = databases.find(database_name)->second; + std::shared_ptr db; + { + std::lock_guard lock{databases_mutex}; + assertDatabaseExistsUnlocked(database_name); + db = databases.find(database_name)->second; - if (check_empty && !db->empty(*global_context)) - throw Exception("New table appeared in database being dropped or detached. Try again.", ErrorCodes::DATABASE_NOT_EMPTY); + if (check_empty && !db->empty(*global_context)) + throw Exception("New table appeared in database being dropped or detached. Try again.", ErrorCodes::DATABASE_NOT_EMPTY); - databases.erase(database_name); + databases.erase(database_name); + } db->shutdown(); From 646f409b8e579ab0a2b3623320ecb551a024a3df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 6 Apr 2020 02:51:26 +0300 Subject: [PATCH 109/484] Fix "check style" tool --- base/daemon/BaseDaemon.cpp | 1 - utils/check-style/check-duplicate-includes.sh | 2 +- utils/check-style/check-style | 16 +- utils/compressor/CMakeLists.txt | 3 - utils/compressor/mutator.cpp | 406 ------------------ utils/iotest/iotest.cpp | 2 +- utils/iotest/iotest_aio.cpp | 3 - 7 files changed, 10 insertions(+), 423 deletions(-) delete mode 100644 utils/compressor/mutator.cpp diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index dc70d06619f..c150dc03014 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -12,7 +12,6 @@ #include #include -#include #include #include #include diff --git a/utils/check-style/check-duplicate-includes.sh b/utils/check-style/check-duplicate-includes.sh index ecef0c76bad..df843ead623 100755 --- a/utils/check-style/check-duplicate-includes.sh +++ b/utils/check-style/check-duplicate-includes.sh @@ -3,4 +3,4 @@ ROOT_PATH=$(git rev-parse --show-toplevel) # Find duplicate include directives -find $ROOT_PATH/dbms -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 471488287ab..3fd870e179e 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -15,7 +15,7 @@ ROOT_PATH=$(git rev-parse --show-toplevel) EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing' -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|\t|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' | # a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | a tab character | missing whitespace after for/if/while... before opening brace | whitespaces inside braces @@ -23,7 +23,7 @@ find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | # single-line comment | continuation of a multiline comment | a typical piece of embedded shell code | something like ending of raw string literal # // namespace comments are unneeded -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | xargs grep $@ -P '}\s*//+\s*namespace\s*' @@ -31,23 +31,23 @@ find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symlinks found" # Double whitespaces -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' 2>/dev/null | while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done # Unused ErrorCodes # NOTE: to fix automatically, replace echo with: # sed -i "/extern const int $code/d" $file -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'extern const int [_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sed -r -e 's/^.*?extern const int ([_A-Z]+);.*?$/\1/' | while read code; do grep -q "ErrorCodes::$code" $file || echo "ErrorCode $code is defined but not used in file $file"; done; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'extern const int [_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sed -r -e 's/^.*?extern const int ([_A-Z]+);.*?$/\1/' | while read code; do grep -q "ErrorCodes::$code" $file || echo "ErrorCode $code is defined but not used in file $file"; done; done # Undefined ErrorCodes # NOTE: to fix automatically, replace echo with: # ( grep -q -F 'namespace ErrorCodes' $file && sed -i -r "0,/(\s*)extern const int [_A-Z]+/s//\1extern const int $code;\n&/" $file || awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace ErrorCodes\n{\n extern const int '$code';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file ) -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'ErrorCodes::[_A-Z]+' $file | sed -r -e 's/^.*?ErrorCodes::([_A-Z]+).*?$/\1/' | while read code; do grep -q "extern const int $code" $file || echo "ErrorCode $code is used in file $file but not defined"; done; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'ErrorCodes::[_A-Z]+' $file | sed -r -e 's/^.*?ErrorCodes::([_A-Z]+).*?$/\1/' | while read code; do grep -q "extern const int $code" $file || echo "ErrorCode $code is used in file $file but not defined"; done; done # Duplicate ErrorCodes -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate ErrorCode in file $file"; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate ErrorCode in file $file"; done # Three or more consecutive empty lines -find $ROOT_PATH/{dbms,base} -name '*.h' -or -name '*.cpp' | while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do awk '/^$/ { ++i; if (i > 2) { print "More than two consecutive empty lines in file '$file'" } } /./ { i = 0 }' $file; done # Broken XML files (requires libxml2-utils) -find $ROOT_PATH/{dbms,base} -name '*.xml' | xargs xmllint --noout --nonet +find $ROOT_PATH/{src,base,programs,utils} -name '*.xml' | xargs xmllint --noout --nonet diff --git a/utils/compressor/CMakeLists.txt b/utils/compressor/CMakeLists.txt index 3498640acd1..df32330a137 100644 --- a/utils/compressor/CMakeLists.txt +++ b/utils/compressor/CMakeLists.txt @@ -1,5 +1,2 @@ -add_executable (mutator mutator.cpp) -target_link_libraries(mutator PRIVATE clickhouse_common_io) - add_executable (decompress_perf decompress_perf.cpp) target_link_libraries(decompress_perf PRIVATE dbms ${LZ4_LIBRARY}) diff --git a/utils/compressor/mutator.cpp b/utils/compressor/mutator.cpp deleted file mode 100644 index 13c80c292e2..00000000000 --- a/utils/compressor/mutator.cpp +++ /dev/null @@ -1,406 +0,0 @@ -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -/** Quick and dirty implementation of data scrambler. - * - * The task is to replace the data with pseudorandom values. - * But with keeping some probability distributions - * and with maintaining the same compression ratio. - * - * The solution is to operate directly on compressed LZ4 stream. - * The stream consists of independent compressed blocks. - * Each block is a stream of "literals" and "matches". - * Liteal is an instruction to literally put some following bytes, - * and match is an instruction to copy some bytes that was already seen before. - * - * We get literals and apply some scramble operation on it. - * But we keep literal length and matches without changes. - * - * That's how we get pseudorandom data but with keeping - * all repetitive patterns and maintaining the same compression ratio. - * - * Actually, the compression ratio, if you decompress scrambled data and compress again - * become slightly worse, because LZ4 use simple match finder based on value of hash function, - * and it can find different matches due to collisions in hash function. - * - * Scramble operation replace literals with pseudorandom bytes, - * but with some heuristics to keep some sort of data structure. - * - * It's in question, is it scramble data enough and while is it safe to publish scrambled data. - * In general, you should assume that it is not safe. - */ - - -#define ML_BITS 4 -#define ML_MASK ((1U<(src); - UInt8 * end = pos + length; - - while (pos < end) - { - if (pos + strlen("https") <= end && 0 == memcmp(pos, "https", strlen("https"))) - { - pos += strlen("https"); - continue; - } - - if (pos + strlen("http") <= end && 0 == memcmp(pos, "http", strlen("http"))) - { - pos += strlen("http"); - continue; - } - - if (pos + strlen("www") <= end && 0 == memcmp(pos, "www", strlen("www"))) - { - pos += strlen("www"); - continue; - } - - if (*pos >= '1' && *pos <= '9') - *pos = rand(generator, '1', '9'); - else if (*pos >= 'a' && *pos <= 'z') - *pos = rand(generator, 'a', 'z'); - else if (*pos >= 'A' && *pos <= 'Z') - *pos = rand(generator, 'A', 'Z'); - else if (*pos >= 0x80 && *pos <= 0xBF) - *pos = rand(generator, *pos & 0xF0U, *pos | 0x0FU); - else if (*pos == '\\') - ++pos; - - ++pos; - } - - pos = static_cast(src); - while (pos < end) - { - if (pos + 3 <= end - && isAlphaASCII(pos[0]) - && !isAlphaASCII(pos[1]) && pos[1] != '\\' && pos[1] >= 0x20 - && isAlphaASCII(pos[2])) - { - auto res = rand(generator, 0, 3); - if (res == 2) - { - std::swap(pos[0], pos[1]); - } - else if (res == 3) - std::swap(pos[1], pos[2]); - - pos += 3; - } - else if (pos + 5 <= end - && pos[0] >= 0xC0 && pos[0] <= 0xDF && pos[1] >= 0x80 && pos[1] <= 0xBF - && pos[2] >= 0x20 && pos[2] < 0x80 && !isAlphaASCII(pos[2]) - && pos[3] >= 0xC0 && pos[3] <= 0xDF && pos[4] >= 0x80 && pos[4] <= 0xBF) - { - auto res = rand(generator, 0, 3); - if (res == 2) - { - std::swap(pos[1], pos[2]); - std::swap(pos[0], pos[1]); - } - else if (res == 3) - { - std::swap(pos[3], pos[2]); - std::swap(pos[4], pos[3]); - } - - pos += 5; - } - else - ++pos; - } -} - - -static void LZ4_copy8(void* dst, const void* src) -{ - memcpy(dst,src,8); -} - -/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - UInt8* d = (UInt8*)dstPtr; - const UInt8* s = (const UInt8*)srcPtr; - UInt8* const e = (UInt8*)dstEnd; - - do { LZ4_copy8(d,s); d+=8; s+=8; } while (d>ML_BITS)) == RUN_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while (s==255); - } - - /* copy literals */ - cpy = op+length; - if (cpy>oend-WILDCOPYLENGTH) - { - if (cpy != oend) goto _output_error; /* Error : block decoding must stop exactly there */ - mutate(generator, ip, length); - memcpy(op, ip, length); - ip += length; - op += length; - break; /* Necessarily EOF, due to parsing restrictions */ - } - mutate(generator, ip, cpy - op); - LZ4_wildCopy(op, ip, cpy); - ip += length; op = cpy; - - /* get offset */ - offset = LZ4_read16(ip); ip+=2; - match = op - offset; - LZ4_write32(op, (UInt32)offset); /* costs ~1%; silence an msan warning when offset==0 */ - - /* get matchlength */ - length = token & ML_MASK; - if (length == ML_MASK) { - unsigned s; - do { - s = *ip++; - length += s; - } while (s==255); - } - length += MINMATCH; - - /* copy match within block */ - cpy = op + length; - if (unlikely(offset<8)) { - const int dec64 = dec64table[offset]; - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += dec32table[offset]; - memcpy(op+4, match, 4); - match -= dec64; - } else { LZ4_copy8(op, match); match+=8; } - op += 8; - - if (unlikely(cpy>oend-12)) { - UInt8* const oCopyLimit = oend-(WILDCOPYLENGTH-1); - if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ - if (op < oCopyLimit) { - LZ4_wildCopy(op, match, oCopyLimit); - match += oCopyLimit - op; - op = oCopyLimit; - } - while (op16) LZ4_wildCopy(op+8, match+8, cpy); - } - op=cpy; /* correction */ - } - - return (int) (((const char*)ip)-source); /* Nb of input bytes read */ - - /* Overflow error detected */ -_output_error: - return (int) (-(((const char*)ip)-source))-1; -} - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int UNKNOWN_COMPRESSION_METHOD; - extern const int TOO_LARGE_SIZE_COMPRESSED; - extern const int CANNOT_DECOMPRESS; -} - -class MutatingCompressedReadBufferBase -{ -protected: - ReadBuffer * compressed_in; - - /// If 'compressed_in' buffer has whole compressed block - then use it. Otherwise copy parts of data to 'own_compressed_buffer'. - PODArray own_compressed_buffer; - /// Points to memory, holding compressed block. - char * compressed_buffer = nullptr; - - size_t readCompressedData(size_t & size_decompressed, size_t & size_compressed_without_checksum) - { - if (compressed_in->eof()) - return 0; - - CityHash_v1_0_2::uint128 checksum; - compressed_in->readStrict(reinterpret_cast(&checksum), sizeof(checksum)); - - own_compressed_buffer.resize(COMPRESSED_BLOCK_HEADER_SIZE); - compressed_in->readStrict(&own_compressed_buffer[0], COMPRESSED_BLOCK_HEADER_SIZE); - - UInt8 method = own_compressed_buffer[0]; /// See CompressedWriteBuffer.h - - size_t & size_compressed = size_compressed_without_checksum; - - if (method == static_cast(CompressionMethodByte::LZ4) || - method == static_cast(CompressionMethodByte::ZSTD) || - method == static_cast(CompressionMethodByte::NONE)) - { - size_compressed = unalignedLoad(&own_compressed_buffer[1]); - size_decompressed = unalignedLoad(&own_compressed_buffer[5]); - } - else - throw Exception("Unknown compression method: " + toString(method), ErrorCodes::UNKNOWN_COMPRESSION_METHOD); - - if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) - throw Exception("Too large size_compressed. Most likely corrupted data.", ErrorCodes::TOO_LARGE_SIZE_COMPRESSED); - - /// Is whole compressed block located in 'compressed_in' buffer? - if (compressed_in->offset() >= COMPRESSED_BLOCK_HEADER_SIZE && - compressed_in->position() + size_compressed - COMPRESSED_BLOCK_HEADER_SIZE <= compressed_in->buffer().end()) - { - compressed_in->position() -= COMPRESSED_BLOCK_HEADER_SIZE; - compressed_buffer = compressed_in->position(); - compressed_in->position() += size_compressed; - } - else - { - own_compressed_buffer.resize(size_compressed); - compressed_buffer = &own_compressed_buffer[0]; - compressed_in->readStrict(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); - } - - return size_compressed + sizeof(checksum); - } - - void decompress(char * to, size_t size_decompressed, size_t size_compressed_without_checksum) - { - UInt8 method = compressed_buffer[0]; /// See CompressedWriteBuffer.h - - if (method == static_cast(CompressionMethodByte::LZ4)) - { - if (LZ4_decompress_mutate(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, to, size_decompressed) < 0) - throw Exception("Cannot LZ4_decompress_fast", ErrorCodes::CANNOT_DECOMPRESS); - } - else - throw Exception("Unknown compression method: " + toString(method), ErrorCodes::UNKNOWN_COMPRESSION_METHOD); - } - -public: - /// 'compressed_in' could be initialized lazily, but before first call of 'readCompressedData'. - MutatingCompressedReadBufferBase(ReadBuffer * in = nullptr) - : compressed_in(in), own_compressed_buffer(COMPRESSED_BLOCK_HEADER_SIZE) - { - } -}; - - -class MutatingCompressedReadBuffer : public MutatingCompressedReadBufferBase, public BufferWithOwnMemory -{ -private: - size_t size_compressed = 0; - - bool nextImpl() override - { - size_t size_decompressed; - size_t size_compressed_without_checksum; - size_compressed = readCompressedData(size_decompressed, size_compressed_without_checksum); - if (!size_compressed) - return false; - - memory.resize(size_decompressed); - working_buffer = Buffer(&memory[0], &memory[size_decompressed]); - - decompress(working_buffer.begin(), size_decompressed, size_compressed_without_checksum); - - return true; - } - -public: - MutatingCompressedReadBuffer(ReadBuffer & in_) - : MutatingCompressedReadBufferBase(&in_), BufferWithOwnMemory(0) - { - } -}; - -} - - -int main(int, char **) -try -{ - DB::ReadBufferFromFileDescriptor in(STDIN_FILENO); - DB::MutatingCompressedReadBuffer mutating_in(in); - DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO); - - DB::copyData(mutating_in, out); - - return 0; -} -catch (...) -{ - std::cerr << DB::getCurrentExceptionMessage(true); - return DB::getCurrentExceptionCode(); -} diff --git a/utils/iotest/iotest.cpp b/utils/iotest/iotest.cpp index ea7cd439838..47264bdfc38 100644 --- a/utils/iotest/iotest.cpp +++ b/utils/iotest/iotest.cpp @@ -113,7 +113,7 @@ int mainImpl(int argc, char ** argv) for (int i = 0; argv[2][i]; ++i) { char c = argv[2][i]; - switch(c) + switch (c) { case 'r': mode |= MODE_READ; diff --git a/utils/iotest/iotest_aio.cpp b/utils/iotest/iotest_aio.cpp index 800e605d62e..82c2d12a0b7 100644 --- a/utils/iotest/iotest_aio.cpp +++ b/utils/iotest/iotest_aio.cpp @@ -15,9 +15,6 @@ int main(int, char **) { return 0; } #include #include #include -#include -#include -#include #include #include #include From 110eb599c1a2b0f0bdbd46d7022c52436bf79a0a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 6 Apr 2020 02:57:24 +0300 Subject: [PATCH 110/484] Fix "check style" tool --- .../client/readpassphrase/readpassphrase.c | 260 +++++++++--------- .../client/readpassphrase/readpassphrase.h | 2 +- src/Common/Arena.h | 4 +- src/Functions/FunctionsBitmap.h | 22 +- src/Storages/StorageTinyLog.cpp | 4 +- utils/check-style/check-style | 9 +- 6 files changed, 153 insertions(+), 148 deletions(-) diff --git a/programs/client/readpassphrase/readpassphrase.c b/programs/client/readpassphrase/readpassphrase.c index 8c56877196c..243701239bf 100644 --- a/programs/client/readpassphrase/readpassphrase.c +++ b/programs/client/readpassphrase/readpassphrase.c @@ -1,8 +1,8 @@ -/* $OpenBSD: readpassphrase.c,v 1.26 2016/10/18 12:47:18 millert Exp $ */ +/* $OpenBSD: readpassphrase.c,v 1.26 2016/10/18 12:47:18 millert Exp $ */ /* * Copyright (c) 2000-2002, 2007, 2010 - * Todd C. Miller + * Todd C. Miller * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -53,143 +53,143 @@ static void handler(int); char * readpassphrase(const char *prompt, char *buf, size_t bufsiz, int flags) { - ssize_t nr; - int input, output, save_errno, i, need_restart; - char ch, *p, *end; - struct termios term, oterm; - struct sigaction sa, savealrm, saveint, savehup, savequit, saveterm; - struct sigaction savetstp, savettin, savettou, savepipe; + ssize_t nr; + int input, output, save_errno, i, need_restart; + char ch, *p, *end; + struct termios term, oterm; + struct sigaction sa, savealrm, saveint, savehup, savequit, saveterm; + struct sigaction savetstp, savettin, savettou, savepipe; - /* I suppose we could alloc on demand in this case (XXX). */ - if (bufsiz == 0) { - errno = EINVAL; - return(NULL); - } + /* I suppose we could alloc on demand in this case (XXX). */ + if (bufsiz == 0) { + errno = EINVAL; + return(NULL); + } restart: - for (i = 0; i < NSIG; i++) - signo[i] = 0; - nr = -1; - save_errno = 0; - need_restart = 0; - /* - * Read and write to /dev/tty if available. If not, read from - * stdin and write to stderr unless a tty is required. - */ - if ((flags & RPP_STDIN) || - (input = output = open(_PATH_TTY, O_RDWR)) == -1) { - if (flags & RPP_REQUIRE_TTY) { - errno = ENOTTY; - return(NULL); - } - input = STDIN_FILENO; - output = STDERR_FILENO; - } + for (i = 0; i < NSIG; i++) + signo[i] = 0; + nr = -1; + save_errno = 0; + need_restart = 0; + /* + * Read and write to /dev/tty if available. If not, read from + * stdin and write to stderr unless a tty is required. + */ + if ((flags & RPP_STDIN) || + (input = output = open(_PATH_TTY, O_RDWR)) == -1) { + if (flags & RPP_REQUIRE_TTY) { + errno = ENOTTY; + return(NULL); + } + input = STDIN_FILENO; + output = STDERR_FILENO; + } - /* - * Turn off echo if possible. - * If we are using a tty but are not the foreground pgrp this will - * generate SIGTTOU, so do it *before* installing the signal handlers. - */ - if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) { - memcpy(&term, &oterm, sizeof(term)); - if (!(flags & RPP_ECHO_ON)) - term.c_lflag &= ~(ECHO | ECHONL); + /* + * Turn off echo if possible. + * If we are using a tty but are not the foreground pgrp this will + * generate SIGTTOU, so do it *before* installing the signal handlers. + */ + if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) { + memcpy(&term, &oterm, sizeof(term)); + if (!(flags & RPP_ECHO_ON)) + term.c_lflag &= ~(ECHO | ECHONL); #ifdef VSTATUS - if (term.c_cc[VSTATUS] != _POSIX_VDISABLE) - term.c_cc[VSTATUS] = _POSIX_VDISABLE; + if (term.c_cc[VSTATUS] != _POSIX_VDISABLE) + term.c_cc[VSTATUS] = _POSIX_VDISABLE; #endif - (void)tcsetattr(input, TCSAFLUSH|TCSASOFT, &term); - } else { - memset(&term, 0, sizeof(term)); - term.c_lflag |= ECHO; - memset(&oterm, 0, sizeof(oterm)); - oterm.c_lflag |= ECHO; - } + (void)tcsetattr(input, TCSAFLUSH|TCSASOFT, &term); + } else { + memset(&term, 0, sizeof(term)); + term.c_lflag |= ECHO; + memset(&oterm, 0, sizeof(oterm)); + oterm.c_lflag |= ECHO; + } - /* - * Catch signals that would otherwise cause the user to end - * up with echo turned off in the shell. Don't worry about - * things like SIGXCPU and SIGVTALRM for now. - */ - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; /* don't restart system calls */ - sa.sa_handler = handler; - (void)sigaction(SIGALRM, &sa, &savealrm); - (void)sigaction(SIGHUP, &sa, &savehup); - (void)sigaction(SIGINT, &sa, &saveint); - (void)sigaction(SIGPIPE, &sa, &savepipe); - (void)sigaction(SIGQUIT, &sa, &savequit); - (void)sigaction(SIGTERM, &sa, &saveterm); - (void)sigaction(SIGTSTP, &sa, &savetstp); - (void)sigaction(SIGTTIN, &sa, &savettin); - (void)sigaction(SIGTTOU, &sa, &savettou); + /* + * Catch signals that would otherwise cause the user to end + * up with echo turned off in the shell. Don't worry about + * things like SIGXCPU and SIGVTALRM for now. + */ + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; /* don't restart system calls */ + sa.sa_handler = handler; + (void)sigaction(SIGALRM, &sa, &savealrm); + (void)sigaction(SIGHUP, &sa, &savehup); + (void)sigaction(SIGINT, &sa, &saveint); + (void)sigaction(SIGPIPE, &sa, &savepipe); + (void)sigaction(SIGQUIT, &sa, &savequit); + (void)sigaction(SIGTERM, &sa, &saveterm); + (void)sigaction(SIGTSTP, &sa, &savetstp); + (void)sigaction(SIGTTIN, &sa, &savettin); + (void)sigaction(SIGTTOU, &sa, &savettou); - if (!(flags & RPP_STDIN)) - (void)write(output, prompt, strlen(prompt)); - end = buf + bufsiz - 1; - p = buf; - while ((nr = read(input, &ch, 1)) == 1 && ch != '\n' && ch != '\r') { - if (p < end) { - if ((flags & RPP_SEVENBIT)) - ch &= 0x7f; - if (isalpha((unsigned char)ch)) { - if ((flags & RPP_FORCELOWER)) - ch = (char)tolower((unsigned char)ch); - if ((flags & RPP_FORCEUPPER)) - ch = (char)toupper((unsigned char)ch); - } - *p++ = ch; - } - } - *p = '\0'; - save_errno = errno; - if (!(term.c_lflag & ECHO)) - (void)write(output, "\n", 1); + if (!(flags & RPP_STDIN)) + (void)write(output, prompt, strlen(prompt)); + end = buf + bufsiz - 1; + p = buf; + while ((nr = read(input, &ch, 1)) == 1 && ch != '\n' && ch != '\r') { + if (p < end) { + if ((flags & RPP_SEVENBIT)) + ch &= 0x7f; + if (isalpha((unsigned char)ch)) { + if ((flags & RPP_FORCELOWER)) + ch = (char)tolower((unsigned char)ch); + if ((flags & RPP_FORCEUPPER)) + ch = (char)toupper((unsigned char)ch); + } + *p++ = ch; + } + } + *p = '\0'; + save_errno = errno; + if (!(term.c_lflag & ECHO)) + (void)write(output, "\n", 1); - /* Restore old terminal settings and signals. */ - if (memcmp(&term, &oterm, sizeof(term)) != 0) { - const int sigttou = signo[SIGTTOU]; + /* Restore old terminal settings and signals. */ + if (memcmp(&term, &oterm, sizeof(term)) != 0) { + const int sigttou = signo[SIGTTOU]; - /* Ignore SIGTTOU generated when we are not the fg pgrp. */ - while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 && - errno == EINTR && !signo[SIGTTOU]) - continue; - signo[SIGTTOU] = sigttou; - } - (void)sigaction(SIGALRM, &savealrm, NULL); - (void)sigaction(SIGHUP, &savehup, NULL); - (void)sigaction(SIGINT, &saveint, NULL); - (void)sigaction(SIGQUIT, &savequit, NULL); - (void)sigaction(SIGPIPE, &savepipe, NULL); - (void)sigaction(SIGTERM, &saveterm, NULL); - (void)sigaction(SIGTSTP, &savetstp, NULL); - (void)sigaction(SIGTTIN, &savettin, NULL); - (void)sigaction(SIGTTOU, &savettou, NULL); - if (input != STDIN_FILENO) - (void)close(input); + /* Ignore SIGTTOU generated when we are not the fg pgrp. */ + while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 && + errno == EINTR && !signo[SIGTTOU]) + continue; + signo[SIGTTOU] = sigttou; + } + (void)sigaction(SIGALRM, &savealrm, NULL); + (void)sigaction(SIGHUP, &savehup, NULL); + (void)sigaction(SIGINT, &saveint, NULL); + (void)sigaction(SIGQUIT, &savequit, NULL); + (void)sigaction(SIGPIPE, &savepipe, NULL); + (void)sigaction(SIGTERM, &saveterm, NULL); + (void)sigaction(SIGTSTP, &savetstp, NULL); + (void)sigaction(SIGTTIN, &savettin, NULL); + (void)sigaction(SIGTTOU, &savettou, NULL); + if (input != STDIN_FILENO) + (void)close(input); - /* - * If we were interrupted by a signal, resend it to ourselves - * now that we have restored the signal handlers. - */ - for (i = 0; i < NSIG; i++) { - if (signo[i]) { - kill(getpid(), i); - switch (i) { - case SIGTSTP: - case SIGTTIN: - case SIGTTOU: - need_restart = 1; - } - } - } - if (need_restart) - goto restart; + /* + * If we were interrupted by a signal, resend it to ourselves + * now that we have restored the signal handlers. + */ + for (i = 0; i < NSIG; i++) { + if (signo[i]) { + kill(getpid(), i); + switch (i) { + case SIGTSTP: + case SIGTTIN: + case SIGTTOU: + need_restart = 1; + } + } + } + if (need_restart) + goto restart; - if (save_errno) - errno = save_errno; - return(nr == -1 ? NULL : buf); + if (save_errno) + errno = save_errno; + return(nr == -1 ? NULL : buf); } //DEF_WEAK(readpassphrase); @@ -197,15 +197,15 @@ restart: char * getpass(const char *prompt) { - static char buf[_PASSWORD_LEN + 1]; + static char buf[_PASSWORD_LEN + 1]; - return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF)); + return(readpassphrase(prompt, buf, sizeof(buf), RPP_ECHO_OFF)); } #endif static void handler(int s) { - signo[s] = 1; + signo[s] = 1; } #endif /* HAVE_READPASSPHRASE */ diff --git a/programs/client/readpassphrase/readpassphrase.h b/programs/client/readpassphrase/readpassphrase.h index 272c822423a..0782a1773ea 100644 --- a/programs/client/readpassphrase/readpassphrase.h +++ b/programs/client/readpassphrase/readpassphrase.h @@ -1,4 +1,4 @@ -// /* $OpenBSD: readpassphrase.h,v 1.5 2003/06/17 21:56:23 millert Exp $ */ +// /* $OpenBSD: readpassphrase.h,v 1.5 2003/06/17 21:56:23 millert Exp $ */ /* * Copyright (c) 2000, 2002 Todd C. Miller diff --git a/src/Common/Arena.h b/src/Common/Arena.h index e1556ef73c5..32c0f4c12d1 100644 --- a/src/Common/Arena.h +++ b/src/Common/Arena.h @@ -179,8 +179,8 @@ public: /** Rollback just performed allocation. * Must pass size not more that was just allocated. - * Return the resulting head pointer, so that the caller can assert that - * the allocation it intended to roll back was indeed the last one. + * Return the resulting head pointer, so that the caller can assert that + * the allocation it intended to roll back was indeed the last one. */ void * rollback(size_t size) { diff --git a/src/Functions/FunctionsBitmap.h b/src/Functions/FunctionsBitmap.h index 3eec1d5a354..bf84bfbe47e 100644 --- a/src/Functions/FunctionsBitmap.h +++ b/src/Functions/FunctionsBitmap.h @@ -31,13 +31,13 @@ namespace ErrorCodes * bitmapBuild: integer[] -> bitmap * * Convert bitmap to integer array: - * bitmapToArray: bitmap -> integer[] + * bitmapToArray: bitmap -> integer[] * * Retrun the smallest value in the set: - * bitmapMin: bitmap -> integer + * bitmapMin: bitmap -> integer * * Retrun the greatest value in the set: - * bitmapMax: bitmap -> integer + * bitmapMax: bitmap -> integer * * Return subset in specified range (not include the range_end): * bitmapSubsetInRange: bitmap,integer,integer -> bitmap @@ -49,28 +49,28 @@ namespace ErrorCodes * bitmapTransform: bitmap,integer[],integer[] -> bitmap * * Two bitmap and calculation: - * bitmapAnd: bitmap,bitmap -> bitmap + * bitmapAnd: bitmap,bitmap -> bitmap * * Two bitmap or calculation: - * bitmapOr: bitmap,bitmap -> bitmap + * bitmapOr: bitmap,bitmap -> bitmap * * Two bitmap xor calculation: - * bitmapXor: bitmap,bitmap -> bitmap + * bitmapXor: bitmap,bitmap -> bitmap * * Two bitmap andnot calculation: - * bitmapAndnot: bitmap,bitmap -> bitmap + * bitmapAndnot: bitmap,bitmap -> bitmap * * Retrun bitmap cardinality: - * bitmapCardinality: bitmap -> integer + * bitmapCardinality: bitmap -> integer * * Two bitmap and calculation, return cardinality: - * bitmapAndCardinality: bitmap,bitmap -> integer + * bitmapAndCardinality: bitmap,bitmap -> integer * * Two bitmap or calculation, return cardinality: - * bitmapOrCardinality: bitmap,bitmap -> integer + * bitmapOrCardinality: bitmap,bitmap -> integer * * Two bitmap xor calculation, return cardinality: - * bitmapXorCardinality: bitmap,bitmap -> integer + * bitmapXorCardinality: bitmap,bitmap -> integer * * Two bitmap andnot calculation, return cardinality: * bitmapAndnotCardinality: bitmap,bitmap -> integer diff --git a/src/Storages/StorageTinyLog.cpp b/src/Storages/StorageTinyLog.cpp index f171c694f21..7b89be93f8a 100644 --- a/src/Storages/StorageTinyLog.cpp +++ b/src/Storages/StorageTinyLog.cpp @@ -402,8 +402,8 @@ Pipes StorageTinyLog::read( Pipes pipes; - // When reading, we lock the entire storage, because we only have one file - // per column and can't modify it concurrently. + // When reading, we lock the entire storage, because we only have one file + // per column and can't modify it concurrently. pipes.emplace_back(std::make_shared( max_block_size, Nested::collect(getColumns().getAllPhysical().addTypes(column_names)), *this, context.getSettingsRef().max_read_buffer_size)); diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 3fd870e179e..08b0e81c123 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -17,11 +17,16 @@ EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|c find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | - xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|\t|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' | -# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | a tab character | missing whitespace after for/if/while... before opening brace | whitespaces inside braces + xargs grep $@ -P '((class|struct|namespace|enum|if|for|while|else|throw|switch).*|\)(\s*const)?(\s*override)?\s*)\{$|\s$|^ {1,3}[^\* ]\S|\t|^\s*(if|else if|if constexpr|else if constexpr|for|while|catch|switch)\(|\( [^\s\\]|\S \)' | +# a curly brace not in a new line, but not for the case of C++11 init or agg. initialization | trailing whitespace | number of ws not a multiple of 4, but not in the case of comment continuation | missing whitespace after for/if/while... before opening brace | whitespaces inside braces grep -v -P '(//|:\s+\*|\$\(\()| \)"' # single-line comment | continuation of a multiline comment | a typical piece of embedded shell code | something like ending of raw string literal +# Tabs +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | + grep -vP $EXCLUDE_DIRS | + xargs grep $@ -F $'\t' + # // namespace comments are unneeded find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | From 71b43bfa0a17dc82937605a13ec1d69a8bb0c78c Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2020 08:13:11 +0000 Subject: [PATCH 111/484] Bump certifi from 2019.11.28 to 2020.4.5.1 in /docs/tools Bumps [certifi](https://github.com/certifi/python-certifi) from 2019.11.28 to 2020.4.5.1. - [Release notes](https://github.com/certifi/python-certifi/releases) - [Commits](https://github.com/certifi/python-certifi/commits) Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- docs/tools/translate/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 33cf57d41bb..e43b67a159a 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -2,7 +2,7 @@ Babel==2.8.0 backports-abc==0.5 backports.functools-lru-cache==1.6.1 beautifulsoup4==4.8.2 -certifi==2019.11.28 +certifi==2020.4.5.1 chardet==3.0.4 click==7.1.1 closure==20191111 diff --git a/docs/tools/translate/requirements.txt b/docs/tools/translate/requirements.txt index 41b1db836d3..d49220602a0 100644 --- a/docs/tools/translate/requirements.txt +++ b/docs/tools/translate/requirements.txt @@ -1,5 +1,5 @@ Babel==2.8.0 -certifi==2019.11.28 +certifi==2020.4.5.1 chardet==3.0.4 googletrans==2.4.0 idna==2.9 From dd5ddf19d9234622433b6f72791de28f7cb3268b Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 11:55:59 +0300 Subject: [PATCH 112/484] Rename missed files --- {dbms => src}/Interpreters/RenameColumnVisitor.cpp | 0 {dbms => src}/Interpreters/RenameColumnVisitor.h | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {dbms => src}/Interpreters/RenameColumnVisitor.cpp (100%) rename {dbms => src}/Interpreters/RenameColumnVisitor.h (100%) diff --git a/dbms/Interpreters/RenameColumnVisitor.cpp b/src/Interpreters/RenameColumnVisitor.cpp similarity index 100% rename from dbms/Interpreters/RenameColumnVisitor.cpp rename to src/Interpreters/RenameColumnVisitor.cpp diff --git a/dbms/Interpreters/RenameColumnVisitor.h b/src/Interpreters/RenameColumnVisitor.h similarity index 100% rename from dbms/Interpreters/RenameColumnVisitor.h rename to src/Interpreters/RenameColumnVisitor.h From 1d451082187fd21a52dfba4c79cd5c847554557f Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 6 Apr 2020 13:27:31 +0300 Subject: [PATCH 113/484] Fixed builds, implementation and tests * Builds shouldn't fail on platforms that do not support SSE2 and SSE4.2 and do not have corresponding headers. * Updated tests to include malicious padding * Fixed reporting tokens that cross or outside of data boundaries. --- .../MergeTree/MergeTreeIndexFullText.cpp | 27 +++++++++++-------- .../tests/gtest_SplitTokenExtractor.cpp | 8 +++++- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp index af979010dc0..93553e0619e 100644 --- a/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -19,9 +19,14 @@ #include +#if defined(__SSE2__) #include + +#if defined(__SSE4_2__) #include -#include +#endif + +#endif namespace DB @@ -620,19 +625,19 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size #if defined(__SSE4_2__) // With the help of https://www.strchr.com/strcmp_and_strlen_using_sse_4.2 - static const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, '\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0'); // Every bit represents if `haystack` character is in the ranges (1) or not(0) const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES)); #else // NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8). - static const auto number_begin = _mm_set1_epi8('0' - 1); - static const auto number_end = _mm_set1_epi8('9' + 1); - static const auto alpha_lower_begin = _mm_set1_epi8('a' - 1); - static const auto alpha_lower_end = _mm_set1_epi8('z' + 1); - static const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); - static const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); - static const auto zero = _mm_set1_epi8(0); + const auto number_begin = _mm_set1_epi8('0' - 1); + const auto number_end = _mm_set1_epi8('9' + 1); + const auto alpha_lower_begin = _mm_set1_epi8('a' - 1); + const auto alpha_lower_end = _mm_set1_epi8('z' + 1); + const auto alpha_upper_begin = _mm_set1_epi8('A' - 1); + const auto alpha_upper_end = _mm_set1_epi8('Z' + 1); + const auto zero = _mm_set1_epi8(0); // every bit represents if `haystack` character `c` statisfies condition: // (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1) @@ -669,7 +674,7 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size // check if there are leftovers in next `haystack` continue; - return true; + break; #else if (isASCII(data[*pos]) && !isAlphaNumericASCII(data[*pos])) { @@ -691,7 +696,7 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size // Could happen only if string is not padded with zeroes, and we accidentally hopped over end of data. if (*token_start > len) return false; - *token_len = len - *token_start; + *token_len = std::min(len - *token_start, *token_len); #endif return *token_len > 0; diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp index b8686f962bc..e2229792020 100644 --- a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -17,7 +17,7 @@ using namespace DB; struct SplitTokenExtractorTestCase { - const char * description; + const std::string_view description; const std::string source; const std::vector tokens; }; @@ -35,6 +35,12 @@ public: const auto & param = GetParam(); const auto & source = param.source; data = std::make_unique>(source.data(), source.data() + source.size()); + + // add predefined padding that forms tokens to ensure no reads past end of buffer. + const char extra_padding[] = "this is the end \xd1\x8d\xd1\x82\xd0\xbe\xd0\xba\xd0\xbe \xd0\xbd\xd0\xb5\xd1\x86"; + data->insert(data->end(), std::begin(extra_padding), std::end(extra_padding)); + + data->resize(data->size() - sizeof(extra_padding)); } std::unique_ptr> data; From 0117c194c50e6ac399d3eab18eec370f5908d99f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 6 Apr 2020 13:33:59 +0300 Subject: [PATCH 114/484] Fix GroupingAggregatedTransform for single-level aggregation. Add test. --- docker/test/stateless/Dockerfile | 1 + ...gingAggregatedMemoryEfficientTransform.cpp | 9 ++++++-- src/Storages/StorageDistributed.cpp | 2 +- tests/config/clusters.xml | 20 ++++++++++++++++ ...tion_memory_efficient_mix_levels.reference | 10 ++++++++ ...ggregation_memory_efficient_mix_levels.sql | 23 +++++++++++++++++++ 6 files changed, 62 insertions(+), 3 deletions(-) create mode 100644 tests/config/clusters.xml create mode 100644 tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference create mode 100644 tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 2d2025de58b..977c580ef43 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -68,6 +68,7 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \ + ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ diff --git a/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp index 4c0323fcf6a..cabe74b36e9 100644 --- a/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp +++ b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp @@ -275,15 +275,20 @@ void GroupingAggregatedTransform::work() { if (!single_level_chunks.empty()) { - auto & header = getOutputs().front().getHeader(); + auto & header = getInputs().front().getHeader(); auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns()); single_level_chunks.pop_back(); auto blocks = params->aggregator.convertBlockToTwoLevel(block); for (auto & cur_block : blocks) { + if (!cur_block) + continue; + Int32 bucket = cur_block.info.bucket_num; - chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows())); + auto chunk_info = std::make_shared(); + chunk_info->bucket_num = bucket; + chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows(), std::move(chunk_info))); } } } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b4375dd5b0a..adf444c3565 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -474,7 +474,7 @@ void StorageDistributed::alter(const AlterCommands & params, const Context & con void StorageDistributed::startup() { if (remote_database.empty() && !remote_table_function_ptr) - LOG_WARNING(log, "Name of remote database is empty. Default database will be used implicitly."); + LOG_INFO(log, "Name of remote database is empty. Default database will be used implicitly."); if (!volume) return; diff --git a/tests/config/clusters.xml b/tests/config/clusters.xml new file mode 100644 index 00000000000..c0babf0ff89 --- /dev/null +++ b/tests/config/clusters.xml @@ -0,0 +1,20 @@ + + + + + + shard_0 + localhost + 9000 + + + + + shard_1 + localhost + 9000 + + + + + \ No newline at end of file diff --git a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference new file mode 100644 index 00000000000..ac13b3f193e --- /dev/null +++ b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.reference @@ -0,0 +1,10 @@ +0 2 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 diff --git a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql new file mode 100644 index 00000000000..6e4feda346f --- /dev/null +++ b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql @@ -0,0 +1,23 @@ +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists ma_dist; + +create table shard_0.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; +create table shard_1.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; + +insert into shard_0.shard_01231_distributed_aggregation_memory_efficient select * from numbers(1); +insert into shard_1.shard_01231_distributed_aggregation_memory_efficient select * from numbers(10); + +create table ma_dist (x UInt64) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 'shard_01231_distributed_aggregation_memory_efficient'); + +set distributed_aggregation_memory_efficient = 1; +set group_by_two_level_threshold = 2; +set max_bytes_before_external_group_by = 16; + +select x, count() from ma_dist group by x order by x; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; From 79024d73a230473203ad0560f5908b59cdac8e95 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 2 Apr 2020 20:27:07 +0300 Subject: [PATCH 115/484] improve performance of index analysis with monotonic functions --- src/Interpreters/Set.cpp | 11 ++ src/Interpreters/Set.h | 2 + src/Storages/MergeTree/IMergeTreeDataPart.cpp | 4 +- src/Storages/MergeTree/KeyCondition.cpp | 121 +++++++----------- src/Storages/MergeTree/KeyCondition.h | 109 ++++++++-------- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 28 ++-- .../MergeTree/MergeTreeIndexMinMax.cpp | 4 +- tests/performance/set_index.xml | 9 +- 8 files changed, 144 insertions(+), 144 deletions(-) diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index 3c79ea5174d..2ad9f588cf6 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -588,6 +588,14 @@ BoolMask MergeTreeSetIndex::checkInRange(const std::vector & key_ranges, }; } +bool MergeTreeSetIndex::hasMonotonicFunctionsChain() const +{ + for (const auto & mapping : indexes_mapping) + if (!mapping.functions.empty()) + return true; + return false; +} + void ValueWithInfinity::update(const Field & x) { /// Keep at most one element in column. @@ -599,8 +607,11 @@ void ValueWithInfinity::update(const Field & x) const IColumn & ValueWithInfinity::getColumnIfFinite() const { +#ifndef NDEBUG if (type != NORMAL) throw Exception("Trying to get column of infinite type", ErrorCodes::LOGICAL_ERROR); +#endif + return *column; } diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index c9605d4e11e..3a16d9ed094 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -227,6 +227,8 @@ public: size_t size() const { return ordered_set.at(0)->size(); } + bool hasMonotonicFunctionsChain() const; + BoolMask checkInRange(const std::vector & key_ranges, const DataTypes & data_types); private: diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3e0caa67518..5d799d257bc 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -98,8 +98,8 @@ void IMergeTreeDataPart::MinMaxIndex::update(const Block & block, const Names & for (size_t i = 0; i < column_names.size(); ++i) { - Field min_value; - Field max_value; + FieldRef min_value; + FieldRef max_value; const ColumnWithTypeAndName & column = block.getByName(column_names[i]); column.column->getExtremes(min_value, max_value); diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index e994d254958..e755c4942a1 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -338,44 +338,6 @@ inline bool Range::equals(const Field & lhs, const Field & rhs) { return applyVi inline bool Range::less(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess(), lhs, rhs); } -FieldWithInfinity::FieldWithInfinity(const Field & field_) - : field(field_), - type(Type::NORMAL) -{ -} - -FieldWithInfinity::FieldWithInfinity(Field && field_) - : field(std::move(field_)), - type(Type::NORMAL) -{ -} - -FieldWithInfinity::FieldWithInfinity(const Type type_) - : type(type_) -{ -} - -FieldWithInfinity FieldWithInfinity::getMinusInfinity() -{ - return FieldWithInfinity(Type::MINUS_INFINITY); -} - -FieldWithInfinity FieldWithInfinity::getPlusInfinity() -{ - return FieldWithInfinity(Type::PLUS_INFINITY); -} - -bool FieldWithInfinity::operator<(const FieldWithInfinity & other) const -{ - return type < other.type || (type == other.type && type == Type::NORMAL && field < other.field); -} - -bool FieldWithInfinity::operator==(const FieldWithInfinity & other) const -{ - return type == other.type && (type != Type::NORMAL || field == other.field); -} - - /** Calculate expressions, that depend only on constants. * For index to work when something like "WHERE Date = toDate(now())" is written. */ @@ -480,24 +442,41 @@ bool KeyCondition::getConstant(const ASTPtr & expr, Block & block_with_constants } -static void applyFunction( +static Field applyFunctionForField( const FunctionBasePtr & func, - const DataTypePtr & arg_type, const Field & arg_value, - DataTypePtr & res_type, Field & res_value) + const DataTypePtr & arg_type, + const Field & arg_value) { - res_type = func->getReturnType(); - Block block { { arg_type->createColumnConst(1, arg_value), arg_type, "x" }, - { nullptr, res_type, "y" } + { nullptr, func->getReturnType(), "y" } }; func->execute(block, {0}, 1, 1); - - block.safeGetByPosition(1).column->get(0, res_value); + return (*block.safeGetByPosition(1).column)[0]; } +static FieldRef applyFunction(FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) +{ + /// Fallback for fields without block reference. + if (field.isExplicit()) + return applyFunctionForField(func, current_type, field); + + String result_name = "_" + func->getName() + "_" + toString(field.column_idx); + size_t result_idx; + const auto & block = field.block; + if (!block->has(result_name)) + { + result_idx = block->columns(); + field.block->insert({nullptr, func->getReturnType(), result_name}); + func->execute(*block, {field.column_idx}, result_idx, block->rows()); + } + else + result_idx = block->getPositionByName(result_name); + + return {field.block, field.row_idx, result_idx}; +} void KeyCondition::traverseAST(const ASTPtr & node, const Context & context, Block & block_with_constants) { @@ -569,12 +548,8 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( return false; // Apply the next transformation step - DataTypePtr new_type; - applyFunction(a.function_base, out_type, out_value, new_type, out_value); - if (!new_type) - return false; - - out_type.swap(new_type); + out_value = applyFunctionForField(a.function_base, out_type, out_value); + out_type = a.function_base->getReturnType(); expr_name = a.result_name; // Transformation results in a key expression, accept @@ -957,8 +932,8 @@ String KeyCondition::toString() const template static BoolMask forAnyHyperrectangle( size_t key_size, - const Field * key_left, - const Field * key_right, + const FieldRef * key_left, + const FieldRef * key_right, bool left_bounded, bool right_bounded, std::vector & hyperrectangle, @@ -1049,8 +1024,8 @@ static BoolMask forAnyHyperrectangle( BoolMask KeyCondition::checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, bool right_bounded, BoolMask initial_mask) const @@ -1102,19 +1077,12 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( return {}; } - /// Apply the function. - DataTypePtr new_type; if (!key_range.left.isNull()) - applyFunction(func, current_type, key_range.left, new_type, key_range.left); + key_range.left = applyFunction(func, current_type, key_range.left); if (!key_range.right.isNull()) - applyFunction(func, current_type, key_range.right, new_type, key_range.right); + key_range.right = applyFunction(func, current_type, key_range.right); - if (!new_type) - { - return {}; - } - - current_type.swap(new_type); + current_type = func->getReturnType(); if (!monotonicity.is_positive) key_range.swapLeftAndRight(); @@ -1220,8 +1188,8 @@ BoolMask KeyCondition::checkInHyperrectangle( BoolMask KeyCondition::checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, BoolMask initial_mask) const { @@ -1231,8 +1199,8 @@ BoolMask KeyCondition::checkInRange( bool KeyCondition::mayBeTrueInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types) const { return checkInRange(used_key_size, left_key, right_key, data_types, true, BoolMask::consider_only_can_be_true).can_be_true; @@ -1241,7 +1209,7 @@ bool KeyCondition::mayBeTrueInRange( BoolMask KeyCondition::checkAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types, BoolMask initial_mask) const { @@ -1251,7 +1219,7 @@ BoolMask KeyCondition::checkAfter( bool KeyCondition::mayBeTrueAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types) const { return checkInRange(used_key_size, left_key, nullptr, data_types, false, BoolMask::consider_only_can_be_true).can_be_true; @@ -1382,4 +1350,13 @@ size_t KeyCondition::getMaxKeyColumn() const return res; } +bool KeyCondition::hasMonotonicFunctionsChain() const +{ + for (const auto & element : rpn) + if (!element.monotonic_functions_chain.empty() + || (element.set_index && element.set_index->hasMonotonicFunctionsChain())) + return true; + return false; +} + } diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 8667e0aea27..a7cdd1f1e0a 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -15,10 +15,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_TYPE_OF_FIELD; -} class IFunction; using FunctionBasePtr = std::shared_ptr; @@ -26,6 +22,35 @@ using FunctionBasePtr = std::shared_ptr; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; +/** A field, that can be stored in two reperesenation: + * - A standalone field. + * - A field with reference to it's position in block. + * It's needed for execution functions on ranges during + * index analysis. If function was executed once for field, + * it's result would be cached for all block for which field's reference points to. + */ +struct FieldRef : public Field +{ + using SharedBlock = std::shared_ptr; + + FieldRef() = default; + + /// Create as explicit field without block. + template + FieldRef(const T & value) : Field(value) {} + + /// Create as reference to field in block. + FieldRef(const SharedBlock & block_, size_t row_idx_, size_t column_idx_) + : Field((*block_->getByPosition(column_idx_).column)[row_idx_]), + block(block_), row_idx(row_idx_), column_idx(column_idx_) {} + + bool isExplicit() const { return block == nullptr; } + + SharedBlock block; + size_t row_idx; + size_t column_idx; +}; + /** Range with open or closed ends; possibly unbounded. */ struct Range @@ -35,8 +60,8 @@ private: static bool less(const Field & lhs, const Field & rhs); public: - Field left; /// the left border, if any - Field right; /// the right border, if any + FieldRef left; /// the left border, if any + FieldRef right; /// the right border, if any bool left_bounded = false; /// bounded at the left bool right_bounded = false; /// bounded at the right bool left_included = false; /// includes the left border, if any @@ -46,11 +71,11 @@ public: Range() {} /// One point. - Range(const Field & point) + Range(const FieldRef & point) : left(point), right(point), left_bounded(true), right_bounded(true), left_included(true), right_included(true) {} /// A bounded two-sided range. - Range(const Field & left_, bool left_included_, const Field & right_, bool right_included_) + Range(const FieldRef & left_, bool left_included_, const FieldRef & right_, bool right_included_) : left(left_), right(right_), left_bounded(true), right_bounded(true), left_included(left_included_), right_included(right_included_) @@ -58,7 +83,7 @@ public: shrinkToIncludedIfPossible(); } - static Range createRightBounded(const Field & right_point, bool right_included) + static Range createRightBounded(const FieldRef & right_point, bool right_included) { Range r; r.right = right_point; @@ -68,7 +93,7 @@ public: return r; } - static Range createLeftBounded(const Field & left_point, bool left_included) + static Range createLeftBounded(const FieldRef & left_point, bool left_included) { Range r; r.left = left_point; @@ -84,7 +109,7 @@ public: */ void shrinkToIncludedIfPossible() { - if (left_bounded && !left_included) + if (left.isExplicit() && left_bounded && !left_included) { if (left.getType() == Field::Types::UInt64 && left.get() != std::numeric_limits::max()) { @@ -97,7 +122,7 @@ public: left_included = true; } } - if (right_bounded && !right_included) + if (right.isExplicit() && right_bounded && !right_included) { if (right.getType() == Field::Types::UInt64 && right.get() != std::numeric_limits::min()) { @@ -120,13 +145,13 @@ public: } /// x contained in the range - bool contains(const Field & x) const + bool contains(const FieldRef & x) const { return !leftThan(x) && !rightThan(x); } /// x is to the left - bool rightThan(const Field & x) const + bool rightThan(const FieldRef & x) const { return (left_bounded ? !(less(left, x) || (left_included && equals(x, left))) @@ -134,7 +159,7 @@ public: } /// x is to the right - bool leftThan(const Field & x) const + bool leftThan(const FieldRef & x) const { return (right_bounded ? !(less(x, right) || (right_included && equals(x, right))) @@ -195,42 +220,6 @@ public: String toString() const; }; - -/// Class that extends arbitrary objects with infinities, like +-inf for floats -class FieldWithInfinity -{ -public: - enum Type - { - MINUS_INFINITY = -1, - NORMAL = 0, - PLUS_INFINITY = 1 - }; - - explicit FieldWithInfinity(const Field & field_); - FieldWithInfinity(Field && field_); - - static FieldWithInfinity getMinusInfinity(); - static FieldWithInfinity getPlusInfinity(); - - bool operator<(const FieldWithInfinity & other) const; - bool operator==(const FieldWithInfinity & other) const; - - Field getFieldIfFinite() const - { - if (type != NORMAL) - throw Exception("Trying to get field of infinite type", ErrorCodes::BAD_TYPE_OF_FIELD); - return field; - } - -private: - Field field; - Type type; - - FieldWithInfinity(const Type type_); -}; - - /** Condition on the index. * * Consists of the conditions for the key belonging to all possible ranges or sets, @@ -261,8 +250,8 @@ public: /// one of the resulting mask components (see BoolMask::consider_only_can_be_XXX). BoolMask checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef* right_key, const DataTypes & data_types, BoolMask initial_mask = BoolMask(false, false)) const; @@ -270,7 +259,7 @@ public: /// left_key must contain all the fields in the sort_descr in the appropriate order. BoolMask checkAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types, BoolMask initial_mask = BoolMask(false, false)) const; @@ -278,15 +267,15 @@ public: /// This is more efficient than checkInRange(...).can_be_true. bool mayBeTrueInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types) const; /// Same as checkAfter, but calculate only may_be_true component of a result. /// This is more efficient than checkAfter(...).can_be_true. bool mayBeTrueAfter( size_t used_key_size, - const Field * left_key, + const FieldRef * left_key, const DataTypes & data_types) const; /// Checks that the index can not be used. @@ -295,6 +284,8 @@ public: /// Get the maximum number of the key element used in the condition. size_t getMaxKeyColumn() const; + bool hasMonotonicFunctionsChain() const; + /// Impose an additional condition: the value in the column `column` must be in the range `range`. /// Returns whether there is such a column in the key. bool addCondition(const String & column, const Range & range); @@ -374,8 +365,8 @@ public: private: BoolMask checkInRange( size_t used_key_size, - const Field * left_key, - const Field * right_key, + const FieldRef * left_key, + const FieldRef * right_key, const DataTypes & data_types, bool right_bounded, BoolMask initial_mask) const; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 370286117ae..39de45e07e0 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1201,11 +1201,23 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( * If fits, split it into smaller ones and put them on the stack. If not, discard it. * If the segment is already of one mark length, add it to response and discard it. */ - std::vector ranges_stack{ {0, marks_count} }; + std::vector ranges_stack = { {0, marks_count} }; + + auto index_block = std::make_shared(); + for (size_t i = 0; i < used_key_size; ++i) + index_block->insert({index[i], data.primary_key_data_types[i], data.primary_key_columns[i]}); + + std::function create_field_ref; + /// If there is no monotonic functions, there is no need to save block reference. + /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance while reading the field. + if (key_condition.hasMonotonicFunctionsChain()) + create_field_ref = [&index_block](size_t row, size_t column) -> FieldRef { return {index_block, row, column}; }; + else + create_field_ref = [&index](size_t row, size_t column) -> FieldRef { return (*index[column])[row]; }; /// NOTE Creating temporary Field objects to pass to KeyCondition. - Row index_left(used_key_size); - Row index_right(used_key_size); + std::vector index_left(used_key_size); + std::vector index_right(used_key_size); while (!ranges_stack.empty()) { @@ -1216,7 +1228,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( if (range.end == marks_count && !has_final_mark) { for (size_t i = 0; i < used_key_size; ++i) - index[i]->get(range.begin, index_left[i]); + index_left[i] = create_field_ref(range.begin, i); may_be_true = key_condition.mayBeTrueAfter( used_key_size, index_left.data(), data.primary_key_data_types); @@ -1228,8 +1240,8 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( for (size_t i = 0; i < used_key_size; ++i) { - index[i]->get(range.begin, index_left[i]); - index[i]->get(range.end, index_right[i]); + index_left[i] = create_field_ref(range.begin, i); + index_right[i] = create_field_ref(range.end, i); } may_be_true = key_condition.mayBeTrueInRange( @@ -1254,9 +1266,9 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( size_t end; for (end = range.end; end > range.begin + step; end -= step) - ranges_stack.push_back(MarkRange(end - step, end)); + ranges_stack.emplace_back(end - step, end); - ranges_stack.push_back(MarkRange(range.begin, end)); + ranges_stack.emplace_back(range.begin, end); } } } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 122f038fee6..220fc70c549 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -100,8 +100,8 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s size_t rows_read = std::min(limit, block.rows() - *pos); - Field field_min; - Field field_max; + FieldRef field_min; + FieldRef field_max; for (size_t i = 0; i < index.columns.size(); ++i) { const auto & column = block.getByName(index.columns[i]).column; diff --git a/tests/performance/set_index.xml b/tests/performance/set_index.xml index 090d8ac8c08..f158c481d93 100644 --- a/tests/performance/set_index.xml +++ b/tests/performance/set_index.xml @@ -14,7 +14,14 @@ - SELECT count() FROM test_in WHERE a IN (SELECT rand(1) FROM zeros(100000)) SETTINGS max_rows_to_read = 1, read_overflow_mode = 'break' + SELECT count() FROM test_in WHERE a IN (SELECT rand(1) FROM numbers(100000)) SETTINGS max_rows_to_read = 1, read_overflow_mode = 'break' + + SELECT count() FROM test_in WHERE toInt64(a) IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' + + + SELECT count() FROM test_in WHERE -toInt64(a) IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' + + SELECT count() FROM test_in WHERE -toInt64(a) NOT IN (SELECT toInt64(rand(1)) FROM numbers(100000)) settings max_rows_to_read=1, read_overflow_mode='break' SELECT count() FROM numbers(1000) WHERE toString(number) IN ('41577', '83972', '51697', '50014', '37553', '93459', '87438', '95971', '83186', '74326', '67871', '50406', '83678', '29655', '18580', '83905', '61518', '29059', '56700', '82787', '98672', '30884', '81822', '39850', '80852', '57627', '91346', '64522', '17781', '49467', '41099', '41929', '85618', '91389', '68564', '91769', '81219', '52218', '37220', '97097', '2129', '9886', '52049', '34847', '25364', '36429', '76897', '71868', '58121', '71199', '84819', '69991', '34046', '64507', '34892', '24228', '36986', '28588', '51159', '53444', '80531', '9941', '20256', '48103', '32565', '62890', '5379', '60302', '46434', '3205', '18821', '31030', '19794', '71557', '71703', '15024', '14004', '82164', '95659', '40227', '83358', '24395', '9610', '19814', '48491', '66412', '16012', '71586', '42143', '51103', '24463', '89949', '35694', '39193', '63904', '40489', '77144', '94014', '84836', '9980', '46554', '43905', '25588', '25205', '72624', '10249', '35888', '98478', '99030', '26834', '31', '81499', '14847', '82997', '92357', '92893', '17426', '56630', '22252', '68119', '62710', '8740', '82144', '79916', '23391', '30192', '99271', '96435', '44237', '98327', '69481', '16691', '13643', '84554', '38571', '70926', '99283', '79000', '20926', '86495', '4834', '1222', '39486', '57697', '58002', '40790', '15623', '3999', '31515', '12694', '26143', '35951', '54085', '97534', '35329', '73535', '88715', '29572', '75799', '45166', '32066', '48023', '69523', '93150', '8740', '96790', '15534', '63252', '5142', '67045', '93992', '16663', '292', '63924', '6588', '12190', '31506', '69590', '35394', '55168', '65223', '79183', '32600', '69676', '28316', '72111', '53531', '15073', '41127', '73451', '24725', '61647', '65315', '41143', '26493', '95608', '34407', '76098', '53105', '83691', '48755', '35696', '62587', '81826', '3963', '45766', '82751', '12430', '97685', '29919', '78155', '71636', '50215', '89734', '9892', '47151', '54855', '3428', '9712', '52592', '2403', '79602', '81243', '79859', '57361', '82000', '42107', '28860', '99591', '28296', '57337', '64969', '32332', '25535', '30924', '21313', '32554', '17342', '87311', '19825', '24898', '61323', '83209', '79322', '79009', '50746', '33396', '62033', '16548', '17427', '24073', '34640', '52368', '4724', '80408', '40', '33787', '16666', '19665', '86751', '27264', '2241', '88134', '53566', '10589', '79711', '92823', '58972', '91767', '60885', '51659', '7867', '96849', '30360', '20914', '9584', '1250', '22871', '23282', '99312', '4683', '33429', '68361', '82614', '81440', '47863', '69790', '11968', '75210', '66854', '37002', '61142', '71514', '1588', '42336', '11069', '26291', '2261', '71056', '13492', '9133', '91216', '72207', '71586', '86535', '83898', '24392', '45384', '48545', '61972', '503', '80180', '35834', '97025', '70411', '55039', '35430', '27631', '82533', '96831', '74077', '42533', '14451', '26943', '53783', '69489', '71969', '8432', '37230', '61348', '19472', '59115', '9886', '50951', '57109', '7141', '1902', '84130', '4323', '55889', '47784', '2220', '75988', '66988', '63721', '8131', '95601', '95207', '2311', '26541', '50991', '6717', '2969', '71857', '51034', '65958', '94716', '90275', '21012', '46859', '7984', '31131', '46457', '69578', '44540', '7294', '80117', '9925', '60155', '90608', '82684', '32193', '87071', '28006', '87604', '24501', '79087', '2848', '29237', '11221', '81319', '40966', '87641', '35325', '78705', '88636', '78717', '62831', '56390', '99271', '43821', '14453', '17923', '62695', '77322', '21038', '67677', '41271', '4376', '65426', '46091', '19887', '97251', '55583', '58763', '3826', '35037', '73533', '64267', '82319', '9836', '42622', '96829', '16363', '10455', '49290', '99992', '98229', '66356', '59087', '73998', '25986', '4279', '56790', '69540', '588', '36620', '60358', '45056', '89297', '42740', '8323', '19245', '82417', '41431', '699', '11554', '73910', '44491', '56019', '68901', '45816', '68126', '89379', '23885', '13263', '56395', '73130', '19089', '23771', '10335', '48547', '16903', '6453', '33560', '89668', '38159', '43177', '90655', '49712', '62', '66920', '34180', '12150', '48564', '39538', '85026', '87195', '14928', '8956', '71157', '53287', '39161', '67583', '83309', '92054', '86977', '56188', '15229', '88170', '60894', '58497', '89254', '40082', '86890', '60161', '97291', '45878', '23368', '14577', '92870', '37017', '97356', '99426', '76061', '89186', '99751', '85153', '61580', '39360', '90107', '25603', '26798', '76224', '6469', '7912', '69838', '16404', '67497', '28965', '80836', '80365', '91249', '48713', '17113', '33090', '40793', '70450', '66689', '83698', '17802', '43869', '13355', '18959', '79411', '87930', '9265', '37504', '44876', '97234', '94149', '35040', '22049', '49248', '6535', '36080', '28346', '94437', '78319', '17961', '89056', '56161', '35810', '41632', '45494', '53351', '89729', '99510', '51584', '59688', '6193', '70809', '51093', '92589', '90247', '34910', '78235', '17362', '49423', '63324', '525', '37638', '72325', '89356', '15298', '59116', '17848', '65429', '27029', '84781', '70247', '8825', '35082', '70451', '22522', '58125', '91879', '90531', '2478', '463', '37902', '54405', '87267', '72688', '22803', '33134', '35177', '84551', '44974', '88375', '76407', '27774', '33849', '19915', '82014', '80434', '26380', '48777', '53811', '14838', '26829', '56441', '99869', '49574', '85476', '19723', '16907', '4018', '37338', '78510', '47912', '13030', '65277', '95716', '67363', '21393', '89887', '78842', '81650', '903', '17436', '30704', '49223', '27198', '25500', '52214', '54258', '70082', '53950', '49312', '43615', '99473', '94348', '53661', '96213', '96346', '62010', '38268', '32861', '75660', '10392', '89491', '68335', '29817', '88706', '24184', '36298', '43440', '21626', '26535', '44560', '46363', '12534', '99070', '95606', '33714', '73070', '8303', '29853', '23014', '99982', '4530', '14955', '45803', '50', '90750', '30394', '81276', '95563', '47314', '58520', '91299', '88944', '54402', '67405', '29253', '47079', '71734', '99728', '17652', '13307', '35556', '18962', '26780', '17771', '53712', '60055', '37628', '35830', '90739', '61151', '41309', '27652', '3051', '53167', '98417', '19382', '36833', '75085', '65374', '87732', '30352', '31776', '32765', '97565', '92199', '49050', '29503', '51024', '18834', '8515', '24069', '96216', '10777', '90680', '18974', '68884', '85305', '36007', '56707', '4212', '47352', '34426', '13185', '92939', '95782', '70577', '58080', '98279', '3906', '5065', '56896', '16382', '31273', '17117', '98602', '12786', '24086', '63970', '72756', '35798', '82367', '7356', '53398', '68503', '2962', '16425', '67334', '68461', '65439', '15620', '70906', '29649', '46461', '74602', '38012', '71714', '16825', '89480', '53386', '88532', '35104', '28556', '82120', '23155', '23347', '24797', '60061', '54962', '99427', '82248', '82447', '39968', '63727', '27431', '81511', '91168', '71425', '80740', '84127', '40717', '15503', '15419', '46594', '61263', '19212', '53175', '70724', '74445', '23034', '71818', '40246', '18886', '53066', '4880', '83701', '86107', '87862', '44751', '392', '73440', '90291', '93395', '20894', '38463', '32664', '55158', '20090', '50004', '79070', '98471', '85478', '96615', '68149', '78334', '97752', '73207', '71678', '91238', '96757', '82598', '194', '35797', '45120', '60782', '28721', '17676', '78066', '60957', '11826', '51563', '50516', '16485', '47053', '31738', '48923', '23554', '96850', '42033', '73701', '78607', '45979', '54571', '12415', '31693', '15356', '36902', '9126', '3767', '3295', '90402', '24005', '95350', '67033', '49137', '72606', '51899', '17522', '31957', '44641', '53982', '23767', '68257', '15766', '19995', '2107', '48788', '11765', '91055', '46576', '54651', '50381', '62827', '73636', '46606', '98753', '37631', '70441', '87916', '66983', '33870', '31125', '12904', '57040', '4874', '58632', '42037', '18782', '5998', '18974', '57949', '81010', '90407', '99874', '20462', '89949', '10952', '71454', '95130', '46115', '3518', '13384', '69039', '79482', '22076', '59782', '32042', '40930', '60243', '29298', '6790', '46985', '44398', '85631', '14380', '66179', '2629', '32126', '49833', '14118', '58492', '31493', '81172', '96638', '8745', '89663', '76842', '78633', '41373', '83721', '42886', '11123', '32739', '11051', '1303', '92314', '83324', '85600', '44276', '69064', '56125', '84650', '31028', '12628', '14502', '64764', '39405', '44855', '79046', '51716', '46824', '83389', '1941', '1257', '9280', '73176', '84729', '2579', '63366', '22606', '35541', '51096', '13447', '18355', '68037', '28436', '94116', '81070', '78355', '67897', '5296', '32742', '77645', '91853', '18767', '67949', '40963', '5792', '17278', '25597', '41884', '80829', '7099', '18645', '60295', '12082', '81800', '78415', '18082', '38789', '16295', '72377', '74949', '55583', '66853', '15402', '72977', '15123', '99434', '34999', '21687', '76049', '42987', '83748', '88256', '66688', '21766', '20304', '29271', '10069', '19822', '11792', '42526', '74143', '17289', '30253', '6367', '20888', '12975', '94073', '98639', '30134', '26320', '65507', '69002', '53120', '4550', '38893', '18954', '38283', '54863', '17698', '99670', '10521', '92467', '60994', '18052', '48673', '35811', '87282', '62706', '16061', '53112', '22652', '37780', '55662', '26331', '49410', '79074', '10623', '69577', '79613', '9491', '31229', '43922', '84231', '58409', '36386', '46875', '74431', '76735', '38776', '23350', '7314', '9079', '51519', '98544', '70216', '63380', '90381', '1295', '46901', '58225', '55339', '89918', '75522', '35431', '89460', '49552', '89302', '23068', '28493', '3042', '25194', '59520', '9810', '95706', '81297', '89638', '54794', '94527', '45262', '97932', '78685', '6947', '22818', '48700', '9153', '12289', '22011', '58825', '93854', '65438', '4509', '33741', '28208', '69061', '48578', '40247', '77725', '31837', '39003', '69363', '78113', '76398', '97262', '67795', From 5ada959853275249ad7ef2aec5031a4b6651109b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 6 Apr 2020 13:36:56 +0300 Subject: [PATCH 116/484] improve performance of index analysis with monotonic functions --- src/Storages/MergeTree/KeyCondition.h | 8 ++--- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 32 ++++++++++++------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index a7cdd1f1e0a..ffc0d46a2ec 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -31,22 +31,20 @@ using ExpressionActionsPtr = std::shared_ptr; */ struct FieldRef : public Field { - using SharedBlock = std::shared_ptr; - FieldRef() = default; /// Create as explicit field without block. template - FieldRef(const T & value) : Field(value) {} + FieldRef(T && value) : Field(std::forward(value)) {} /// Create as reference to field in block. - FieldRef(const SharedBlock & block_, size_t row_idx_, size_t column_idx_) + FieldRef(Block * block_, size_t row_idx_, size_t column_idx_) : Field((*block_->getByPosition(column_idx_).column)[row_idx_]), block(block_), row_idx(row_idx_), column_idx(column_idx_) {} bool isExplicit() const { return block == nullptr; } - SharedBlock block; + Block * block; size_t row_idx; size_t column_idx; }; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 39de45e07e0..13e852765b7 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1203,17 +1203,27 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( */ std::vector ranges_stack = { {0, marks_count} }; - auto index_block = std::make_shared(); - for (size_t i = 0; i < used_key_size; ++i) - index_block->insert({index[i], data.primary_key_data_types[i], data.primary_key_columns[i]}); - - std::function create_field_ref; + std::function create_field_ref; /// If there is no monotonic functions, there is no need to save block reference. - /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance while reading the field. + /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance. if (key_condition.hasMonotonicFunctionsChain()) - create_field_ref = [&index_block](size_t row, size_t column) -> FieldRef { return {index_block, row, column}; }; + { + auto index_block = std::make_shared(); + for (size_t i = 0; i < used_key_size; ++i) + index_block->insert({index[i], data.primary_key_data_types[i], data.primary_key_columns[i]}); + + create_field_ref = [index_block](size_t row, size_t column, FieldRef & field) + { + field = {index_block.get(), row, column}; + }; + } else - create_field_ref = [&index](size_t row, size_t column) -> FieldRef { return (*index[column])[row]; }; + { + create_field_ref = [&index](size_t row, size_t column, FieldRef & field) + { + index[column]->get(row, field); + }; + } /// NOTE Creating temporary Field objects to pass to KeyCondition. std::vector index_left(used_key_size); @@ -1228,7 +1238,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( if (range.end == marks_count && !has_final_mark) { for (size_t i = 0; i < used_key_size; ++i) - index_left[i] = create_field_ref(range.begin, i); + create_field_ref(range.begin, i, index_left[i]); may_be_true = key_condition.mayBeTrueAfter( used_key_size, index_left.data(), data.primary_key_data_types); @@ -1240,8 +1250,8 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( for (size_t i = 0; i < used_key_size; ++i) { - index_left[i] = create_field_ref(range.begin, i); - index_right[i] = create_field_ref(range.end, i); + create_field_ref(range.begin, i, index_left[i]); + create_field_ref(range.end, i, index_right[i]); } may_be_true = key_condition.mayBeTrueInRange( From 848678d65698bb0712c9707a609e96282cfc53f1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 14:02:17 +0300 Subject: [PATCH 117/484] Now layout type for dictionaries DDL with no arguments can be written without brackets --- src/Parsers/ASTDictionary.cpp | 11 +++++-- src/Parsers/ASTDictionary.h | 2 ++ .../ASTFunctionWithKeyValueArguments.cpp | 4 +-- .../ASTFunctionWithKeyValueArguments.h | 7 ++++ src/Parsers/ExpressionElementParsers.cpp | 26 +++++++++++---- src/Parsers/ExpressionElementParsers.h | 7 ++++ src/Parsers/ParserDictionary.cpp | 7 +++- ...tionary_layout_without_arguments.reference | 3 ++ ...10_dictionary_layout_without_arguments.sql | 33 +++++++++++++++++++ 9 files changed, 88 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference create mode 100644 tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql diff --git a/src/Parsers/ASTDictionary.cpp b/src/Parsers/ASTDictionary.cpp index 5c477c2aab7..9ff600333c5 100644 --- a/src/Parsers/ASTDictionary.cpp +++ b/src/Parsers/ASTDictionary.cpp @@ -24,6 +24,7 @@ void ASTDictionaryRange::formatImpl(const FormatSettings & settings, << "(" << (settings.hilite ? hilite_keyword : "") << "MIN " + << (settings.hilite ? hilite_none : "") << min_attr_name << " " << (settings.hilite ? hilite_keyword : "") << "MAX " @@ -52,6 +53,7 @@ void ASTDictionaryLifetime::formatImpl(const FormatSettings & settings, << "(" << (settings.hilite ? hilite_keyword : "") << "MIN " + << (settings.hilite ? hilite_none : "") << min_sec << " " << (settings.hilite ? hilite_keyword : "") << "MAX " @@ -86,7 +88,9 @@ void ASTDictionaryLayout::formatImpl(const FormatSettings & settings, << Poco::toUpper(layout_type) << (settings.hilite ? hilite_none : ""); - settings.ostr << "("; + if (has_brackets) + settings.ostr << "("; + if (parameter) { settings.ostr << (settings.hilite ? hilite_keyword : "") @@ -96,7 +100,10 @@ void ASTDictionaryLayout::formatImpl(const FormatSettings & settings, parameter->second->formatImpl(settings, state, frame); } - settings.ostr << ")"; + + if (has_brackets) + settings.ostr << ")"; + settings.ostr << ")"; } diff --git a/src/Parsers/ASTDictionary.h b/src/Parsers/ASTDictionary.h index e146162cbdf..6982381f14d 100644 --- a/src/Parsers/ASTDictionary.h +++ b/src/Parsers/ASTDictionary.h @@ -33,6 +33,8 @@ public: String layout_type; /// optional parameter (size_in_cells) std::optional parameter; + /// has brackets after layout type + bool has_brackets = true; String getID(char) const override { return "Dictionary layout"; } diff --git a/src/Parsers/ASTFunctionWithKeyValueArguments.cpp b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp index 8fdeb90c25b..0843bddac7d 100644 --- a/src/Parsers/ASTFunctionWithKeyValueArguments.cpp +++ b/src/Parsers/ASTFunctionWithKeyValueArguments.cpp @@ -64,9 +64,9 @@ ASTPtr ASTFunctionWithKeyValueArguments::clone() const void ASTFunctionWithKeyValueArguments::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - settings.ostr << (settings.hilite ? hilite_keyword : "") << Poco::toUpper(name) << (settings.hilite ? hilite_none : "") << "("; + settings.ostr << (settings.hilite ? hilite_keyword : "") << Poco::toUpper(name) << (settings.hilite ? hilite_none : "") << (has_brackets ? "(" : ""); elements->formatImpl(settings, state, frame); - settings.ostr << ")"; + settings.ostr << (has_brackets ? ")" : ""); settings.ostr << (settings.hilite ? hilite_none : ""); } diff --git a/src/Parsers/ASTFunctionWithKeyValueArguments.h b/src/Parsers/ASTFunctionWithKeyValueArguments.h index e09e477417f..3f31b4a7c5b 100644 --- a/src/Parsers/ASTFunctionWithKeyValueArguments.h +++ b/src/Parsers/ASTFunctionWithKeyValueArguments.h @@ -44,6 +44,13 @@ public: String name; /// Expression list ASTPtr elements; + /// Has brackets around arguments + bool has_brackets; + + ASTFunctionWithKeyValueArguments(bool has_brackets_ = true) + : has_brackets(has_brackets_) + { + } public: String getID(char delim) const override; diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index c4e43829da9..30fa4a2e9fb 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -1400,18 +1400,30 @@ bool ParserFunctionWithKeyValueArguments::parseImpl(Pos & pos, ASTPtr & node, Ex if (!id_parser.parse(pos, identifier, expected)) return false; - if (pos.get().type != TokenType::OpeningRoundBracket) - return false; - ++pos; + bool left_bracket_found = false; + if (pos.get().type != TokenType::OpeningRoundBracket) + { + if (!brackets_can_be_omitted) + return false; + } + else + { + ++pos; + left_bracket_found = true; + } + if (!pairs_list_parser.parse(pos, expr_list_args, expected)) return false; - if (pos.get().type != TokenType::ClosingRoundBracket) - return false; + if (left_bracket_found) + { + if (pos.get().type != TokenType::ClosingRoundBracket) + return false; + ++pos; + } - ++pos; - auto function = std::make_shared(); + auto function = std::make_shared(left_bracket_found); function->name = Poco::toLower(typeid_cast(*identifier.get()).name); function->elements = expr_list_args; function->children.push_back(function->elements); diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index b9d8d5db42c..b02b29fb2e5 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -346,9 +346,16 @@ protected: */ class ParserFunctionWithKeyValueArguments : public IParserBase { +public: + ParserFunctionWithKeyValueArguments(bool brackets_can_be_omitted_ = false) + : brackets_can_be_omitted(brackets_can_be_omitted_) {} protected: + const char * getName() const override { return "function with key-value arguments"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + + /// brackets for function arguments can be omitted + bool brackets_can_be_omitted; }; /** Data type or table engine, possibly with parameters. For example, UInt8 or see examples from ParserIdentifierWithParameters diff --git a/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp index ca9c2ad031a..8f41882c399 100644 --- a/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -109,7 +109,7 @@ bool ParserDictionaryRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expec bool ParserDictionaryLayout::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ParserFunctionWithKeyValueArguments key_value_func_p; + ParserFunctionWithKeyValueArguments key_value_func_p(/* brackets_can_be_omitted = */ true); ASTPtr ast_func; if (!key_value_func_p.parse(pos, ast_func, expected)) return false; @@ -121,12 +121,17 @@ bool ParserDictionaryLayout::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; res->layout_type = func.name; + res->has_brackets = func.has_brackets; const ASTExpressionList & type_expr_list = func.elements->as(); /// there are no layout with more than 1 parameter if (type_expr_list.children.size() > 1) return false; + /// if layout has params than brackets must be specified + if (type_expr_list.children.size() != 0 && !res->has_brackets) + return false; + if (type_expr_list.children.size() == 1) { const ASTPair * pair = dynamic_cast(type_expr_list.children.at(0).get()); diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference new file mode 100644 index 00000000000..a0518e78891 --- /dev/null +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference @@ -0,0 +1,3 @@ +World +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout (`key1` UInt64, `value` String) PRIMARY KEY key1 SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\')) LIFETIME(MIN 1 MAX 10) LAYOUT(HASHED) +Hello diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql new file mode 100644 index 00000000000..718e7f295b3 --- /dev/null +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.sql @@ -0,0 +1,33 @@ +DROP DATABASE IF EXISTS db_for_dict; +CREATE DATABASE db_for_dict; + +CREATE TABLE db_for_dict.table_for_dict +( + key1 UInt64, + value String +) +ENGINE = Memory(); + +INSERT INTO db_for_dict.table_for_dict VALUES (1, 'Hello'), (2, 'World'); + +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout +( + key1 UInt64, + value String +) +PRIMARY KEY key1 +LAYOUT(HASHED) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'db_for_dict')) +LIFETIME(MIN 1 MAX 10); + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(2)); + +DETACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +ATTACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +SHOW CREATE DICTIONARY db_for_dict.dict_with_hashed_layout; + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(1)); + +DROP DATABASE IF EXISTS db_for_dict; From 9c5cea3035a2f877426e32f05b8c0571c98f3fd6 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 6 Apr 2020 13:43:00 +0300 Subject: [PATCH 118/484] More tests and better token checks. --- .../tests/gtest_SplitTokenExtractor.cpp | 29 +++++++++++++++++-- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp index e2229792020..de78347ebbd 100644 --- a/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/dbms/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -62,16 +62,39 @@ TEST_P(SplitTokenExtractorTest, next) { SCOPED_TRACE(++i); ASSERT_TRUE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); - EXPECT_EQ(expected_token, param.source.substr(token_start, token_len)) + + EXPECT_EQ(expected_token, std::string_view(data->data() + token_start, token_len)) << " token_start:" << token_start << " token_len: " << token_len; } - - ASSERT_FALSE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)); + ASSERT_FALSE(token_extractor.next(data->data(), data->size(), &pos, &token_start, &token_len)) + << "\n\t=> \"" << param.source.substr(token_start, token_len) << "\"" + << "\n\t" << token_start << ", " << token_len << ", " << pos << ", " << data->size(); } // Helper to allow strings with embedded '\0' chars. #define BINARY_STRING(str) std::string{str, sizeof(str) - 1} +INSTANTIATE_TEST_SUITE_P(NoTokens, + SplitTokenExtractorTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Empty input sequence produces no tokens.", + "", + {} + }, + { + "Whitespace only", + " ", + {} + }, + { + "Whitespace only large string", + " \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r \t\v\n\r", + {} + } + }) +); + INSTANTIATE_TEST_SUITE_P(ShortSingleToken, SplitTokenExtractorTest, ::testing::ValuesIn(std::initializer_list{ From f678c3f611d47495b3495c07d3c82fc01be77cc7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2020 12:47:28 +0000 Subject: [PATCH 119/484] Bump beautifulsoup4 from 4.8.2 to 4.9.0 in /docs/tools Bumps [beautifulsoup4](http://www.crummy.com/software/BeautifulSoup/bs4/) from 4.8.2 to 4.9.0. Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index e43b67a159a..7c0d63129f4 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -1,7 +1,7 @@ Babel==2.8.0 backports-abc==0.5 backports.functools-lru-cache==1.6.1 -beautifulsoup4==4.8.2 +beautifulsoup4==4.9.0 certifi==2020.4.5.1 chardet==3.0.4 click==7.1.1 From 8986cf688b719e2e7dcd3fd60cdc5b2afff793c2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 6 Apr 2020 16:28:46 +0300 Subject: [PATCH 120/484] Added comment. --- .../Transforms/MergingAggregatedMemoryEfficientTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp index cabe74b36e9..12d289deaed 100644 --- a/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp +++ b/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp @@ -275,7 +275,7 @@ void GroupingAggregatedTransform::work() { if (!single_level_chunks.empty()) { - auto & header = getInputs().front().getHeader(); + auto & header = getInputs().front().getHeader(); /// Take header from input port. Output header is empty. auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns()); single_level_chunks.pop_back(); auto blocks = params->aggregator.convertBlockToTwoLevel(block); From 2dc1eddfab04348c816ecc5ef3794f8376e491dc Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 6 Apr 2020 16:35:11 +0300 Subject: [PATCH 121/484] fix FieldRef --- src/Storages/MergeTree/KeyCondition.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index ffc0d46a2ec..7c8b63eb800 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -44,9 +44,9 @@ struct FieldRef : public Field bool isExplicit() const { return block == nullptr; } - Block * block; - size_t row_idx; - size_t column_idx; + Block * block = nullptr; + size_t row_idx = 0; + size_t column_idx = 0; }; /** Range with open or closed ends; possibly unbounded. From 174136a1e076c9d259cd1aa25280f1793ca7399d Mon Sep 17 00:00:00 2001 From: Artem Zuikov Date: Mon, 6 Apr 2020 16:39:57 +0300 Subject: [PATCH 122/484] inflating cross join (#10029) --- src/Interpreters/InterpreterSelectQuery.cpp | 8 +- src/Interpreters/Join.cpp | 98 +++++++++++++------ src/Interpreters/Join.h | 2 +- .../01109_inflating_cross_join.reference | 1 + .../01109_inflating_cross_join.sql | 7 ++ 5 files changed, 84 insertions(+), 32 deletions(-) create mode 100644 tests/queries/0_stateless/01109_inflating_cross_join.reference create mode 100644 tests/queries/0_stateless/01109_inflating_cross_join.sql diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 514efb90a00..80a7831475b 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -893,7 +893,13 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS default_totals = true; } - bool inflating_join = join && !typeid_cast(join.get()); + bool inflating_join = false; + if (join) + { + inflating_join = true; + if (auto * hash_join = typeid_cast(join.get())) + inflating_join = isCross(hash_join->getKind()); + } pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType type) { diff --git a/src/Interpreters/Join.cpp b/src/Interpreters/Join.cpp index e60f532d517..d5cec54c9ef 100644 --- a/src/Interpreters/Join.cpp +++ b/src/Interpreters/Join.cpp @@ -37,6 +37,16 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; } +namespace +{ + +struct NotProcessedCrossJoin : public ExtraBlock +{ + size_t left_position; + size_t right_block; +}; + +} static ColumnPtr filterWithBlanks(ColumnPtr src_column, const IColumn::Filter & filter, bool inverse_filter = false) { @@ -1055,53 +1065,81 @@ void Join::joinBlockImpl( } } - -void Join::joinBlockImplCross(Block & block) const +void Join::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const { - /// Add new columns to the block. + size_t max_joined_block_rows = table_join->maxJoinedBlockRows(); + size_t start_left_row = 0; + size_t start_right_block = 0; + if (not_processed) + { + auto & continuation = static_cast(*not_processed); + start_left_row = continuation.left_position; + start_right_block = continuation.right_block; + not_processed.reset(); + } + size_t num_existing_columns = block.columns(); size_t num_columns_to_add = sample_block_with_columns_to_add.columns(); + ColumnRawPtrs src_left_columns; + MutableColumns dst_columns; + + { + src_left_columns.reserve(num_existing_columns); + dst_columns.reserve(num_existing_columns + num_columns_to_add); + + for (const ColumnWithTypeAndName & left_column : block) + { + src_left_columns.push_back(left_column.column.get()); + dst_columns.emplace_back(src_left_columns.back()->cloneEmpty()); + } + + for (const ColumnWithTypeAndName & right_column : sample_block_with_columns_to_add) + dst_columns.emplace_back(right_column.column->cloneEmpty()); + + for (auto & dst : dst_columns) + dst->reserve(max_joined_block_rows); + } + size_t rows_left = block.rows(); + size_t rows_added = 0; - ColumnRawPtrs src_left_columns(num_existing_columns); - MutableColumns dst_columns(num_existing_columns + num_columns_to_add); - - for (size_t i = 0; i < num_existing_columns; ++i) - { - src_left_columns[i] = block.getByPosition(i).column.get(); - dst_columns[i] = src_left_columns[i]->cloneEmpty(); - } - - for (size_t i = 0; i < num_columns_to_add; ++i) - { - const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.getByPosition(i); - dst_columns[num_existing_columns + i] = src_column.column->cloneEmpty(); - block.insert(src_column); - } - - /// NOTE It would be better to use `reserve`, as well as `replicate` methods to duplicate the values of the left block. - - for (size_t i = 0; i < rows_left; ++i) + for (size_t left_row = start_left_row; left_row < rows_left; ++left_row) { + size_t block_number = 0; for (const Block & block_right : data->blocks) { + ++block_number; + if (block_number < start_right_block) + continue; + size_t rows_right = block_right.rows(); + rows_added += rows_right; for (size_t col_num = 0; col_num < num_existing_columns; ++col_num) - for (size_t j = 0; j < rows_right; ++j) - dst_columns[col_num]->insertFrom(*src_left_columns[col_num], i); + dst_columns[col_num]->insertManyFrom(*src_left_columns[col_num], left_row, rows_right); for (size_t col_num = 0; col_num < num_columns_to_add; ++col_num) { - const IColumn * column_right = block_right.getByPosition(col_num).column.get(); - - for (size_t j = 0; j < rows_right; ++j) - dst_columns[num_existing_columns + col_num]->insertFrom(*column_right, j); + const IColumn & column_right = *block_right.getByPosition(col_num).column; + dst_columns[num_existing_columns + col_num]->insertRangeFrom(column_right, 0, rows_right); } } + + start_right_block = 0; + + if (rows_added > max_joined_block_rows) + { + not_processed = std::make_shared( + NotProcessedCrossJoin{{block.cloneEmpty()}, left_row, block_number + 1}); + not_processed->block.swap(block); + break; + } } + for (const ColumnWithTypeAndName & src_column : sample_block_with_columns_to_add) + block.insert(src_column); + block = block.cloneWithColumns(std::move(dst_columns)); } @@ -1160,7 +1198,7 @@ void Join::joinGet(Block & block, const String & column_name) const } -void Join::joinBlock(Block & block, ExtraBlockPtr &) +void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed) { std::shared_lock lock(data->rwlock); @@ -1175,7 +1213,7 @@ void Join::joinBlock(Block & block, ExtraBlockPtr &) /// Joined } else if (kind == ASTTableJoin::Kind::Cross) - joinBlockImplCross(block); + joinBlockImplCross(block, not_processed); else throw Exception("Logical error: unknown combination of JOIN", ErrorCodes::LOGICAL_ERROR); } diff --git a/src/Interpreters/Join.h b/src/Interpreters/Join.h index d9f0cfb55cb..9380649aeee 100644 --- a/src/Interpreters/Join.h +++ b/src/Interpreters/Join.h @@ -379,7 +379,7 @@ private: const Block & block_with_columns_to_add, const Maps & maps) const; - void joinBlockImplCross(Block & block) const; + void joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const; template void joinGetImpl(Block & block, const String & column_name, const Maps & maps) const; diff --git a/tests/queries/0_stateless/01109_inflating_cross_join.reference b/tests/queries/0_stateless/01109_inflating_cross_join.reference new file mode 100644 index 00000000000..825319e1c5b --- /dev/null +++ b/tests/queries/0_stateless/01109_inflating_cross_join.reference @@ -0,0 +1 @@ +10000000 diff --git a/tests/queries/0_stateless/01109_inflating_cross_join.sql b/tests/queries/0_stateless/01109_inflating_cross_join.sql new file mode 100644 index 00000000000..315f5c43c1e --- /dev/null +++ b/tests/queries/0_stateless/01109_inflating_cross_join.sql @@ -0,0 +1,7 @@ +SET max_memory_usage = 16000000; + +SET max_joined_block_size_rows = 10000000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; -- { serverError 241 } + +SET max_joined_block_size_rows = 1000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; From 043c86dbb59680346b6ac146ca6f3c26ad19c401 Mon Sep 17 00:00:00 2001 From: BayoNet Date: Mon, 6 Apr 2020 17:10:03 +0300 Subject: [PATCH 123/484] DOCS-499: avgWeighted (#10040) * CLICKHOUSEDOCS-499: avgWeighted description is added. * CLICKHOUSEDOCS-499: Update. Co-authored-by: Sergei Shtykov --- .../aggregate_functions/reference.md | 43 +++++++++++++++++++ .../query_language/agg_functions/reference.md | 43 +++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/docs/en/sql_reference/aggregate_functions/reference.md b/docs/en/sql_reference/aggregate_functions/reference.md index 2c9eebb803b..d7bc8e963e2 100644 --- a/docs/en/sql_reference/aggregate_functions/reference.md +++ b/docs/en/sql_reference/aggregate_functions/reference.md @@ -527,6 +527,49 @@ Calculates the average. Only works for numbers. The result is always Float64. + +## avgWeighted {#avgweighted} + +Calculates the [weighted arithmetic mean](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). + +**Syntax** + +```sql +avgWeighted(x, weight) +``` + +**Parameters** + +- `x` — Values. [Integer](../data_types/int_uint.md) or [floating-point](../data_types/float.md). +- `weight` — Weights of the values. [Integer](../data_types/int_uint.md) or [floating-point](../data_types/float.md). + +Type of `x` and `weight` must be the same. + +**Returned value** + +- Weighted mean. +- `NaN`. If all the weights are equal to 0. + +Type: [Float64](../data_types/float.md). + +**Example** + +Query: + +``` sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +Result: + +```text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + + ## uniq {#agg_function-uniq} Calculates the approximate number of different values of the argument. diff --git a/docs/ru/query_language/agg_functions/reference.md b/docs/ru/query_language/agg_functions/reference.md index 62c7787fd4c..3a6c3679c54 100644 --- a/docs/ru/query_language/agg_functions/reference.md +++ b/docs/ru/query_language/agg_functions/reference.md @@ -523,6 +523,49 @@ FROM ( Работает только для чисел. Результат всегда Float64. + +## avgWeighted {#avgweighted} + +Вычисляет [среднее арифметическое взвешенное](https://ru.wikipedia.org/wiki/Среднее_арифметическое_взвешенное). + +**Синтаксис** + +```sql +avgWeighted(x, weight) +``` + +**Параметры** + +- `x` — Значения. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md). +- `weight` — Веса отдельных значений. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md). + +Типы параметров должны совпадать. + +**Возвращаемое значение** + +- Среднее арифметическое взвешенное. +- `NaN`, если все веса равны 0. + +Тип: [Float64](../../data_types/float.md) + +**Пример** + +Запрос: + +```sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +Результат: + +```text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + + ## uniq {#agg_function-uniq} Приближённо вычисляет количество различных значений аргумента. From 0fb6cf0a4c6ed8c2f2c8a241199a05aaa5722820 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Mon, 6 Apr 2020 17:10:37 +0300 Subject: [PATCH 124/484] Update README.md --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index cef9b327534..84af1e30a6b 100644 --- a/README.md +++ b/README.md @@ -15,8 +15,6 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [ClickHouse Online Meetup (in Russian)](https://events.yandex.ru/events/click-house-onlajn-vs-03-04-2020) on April 3, 2020. -* [Talk on Saint HighLoad++ (online in Russian)](https://www.highload.ru/spb/2020/abstracts/6647) on April 6, 2020. * [ClickHouse in Avito (online in Russian)](https://avitotech.timepad.ru/event/1290051/) on April 9, 2020. * [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date. * [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date. From 4be263c72f813193b92bc6ead30a77e7dfe2ec62 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 17:43:36 +0300 Subject: [PATCH 125/484] Clang-tidy fix --- src/Parsers/ParserDictionary.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp index 8f41882c399..2680c700296 100644 --- a/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -129,7 +129,7 @@ bool ParserDictionaryLayout::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; /// if layout has params than brackets must be specified - if (type_expr_list.children.size() != 0 && !res->has_brackets) + if (!type_expr_list.children.empty() && !res->has_brackets) return false; if (type_expr_list.children.size() == 1) From c17fa34fa5de8ba01fad45ec059c61be9b125d4d Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 20:00:29 +0300 Subject: [PATCH 126/484] Fix bug with uncompressed checksums in CHECK TABLE query --- src/Storages/MergeTree/checkDataPart.cpp | 35 ++++++++++++------- .../01112_check_table_with_index.reference | 1 + .../01112_check_table_with_index.sql | 15 ++++++++ 3 files changed, 38 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/01112_check_table_with_index.reference create mode 100644 tests/queries/0_stateless/01112_check_table_with_index.sql diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 6da051d04ac..03728980c69 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -99,19 +99,6 @@ IMergeTreeDataPart::Checksums checkDataPart( throw Exception("Unknown type in part " + path, ErrorCodes::UNKNOWN_PART_TYPE); } - for (auto it = disk->iterateDirectory(path); it->isValid(); it->next()) - { - const String & file_name = it->name(); - auto checksum_it = checksums_data.files.find(file_name); - if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") - { - auto file_buf = disk->readFile(it->path()); - HashingReadBuffer hashing_buf(*file_buf); - hashing_buf.tryIgnore(std::numeric_limits::max()); - checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); - } - } - /// Checksums from file checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. IMergeTreeDataPart::Checksums checksums_txt; @@ -122,6 +109,28 @@ IMergeTreeDataPart::Checksums checkDataPart( assertEOF(*buf); } + const auto & checksum_files_txt = checksums_txt.files; + for (auto it = disk->iterateDirectory(path); it->isValid(); it->next()) + { + const String & file_name = it->name(); + auto checksum_it = checksums_data.files.find(file_name); + if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") + { + auto txt_checksum_it = checksum_files_txt.find(file_name); + if (txt_checksum_it == checksum_files_txt.end() || txt_checksum_it->second.uncompressed_size == 0) + { + auto file_buf = disk->readFile(it->path()); + HashingReadBuffer hashing_buf(*file_buf); + hashing_buf.tryIgnore(std::numeric_limits::max()); + checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); + } + else /// If we have both compressed and uncompressed in txt, than calculate them + { + checksums_data.files[file_name] = checksum_compressed_file(disk, it->path()); + } + } + } + if (is_cancelled()) return {}; diff --git a/tests/queries/0_stateless/01112_check_table_with_index.reference b/tests/queries/0_stateless/01112_check_table_with_index.reference new file mode 100644 index 00000000000..2027ea099a8 --- /dev/null +++ b/tests/queries/0_stateless/01112_check_table_with_index.reference @@ -0,0 +1 @@ +all_1_1_0 1 diff --git a/tests/queries/0_stateless/01112_check_table_with_index.sql b/tests/queries/0_stateless/01112_check_table_with_index.sql new file mode 100644 index 00000000000..e9613df7d1a --- /dev/null +++ b/tests/queries/0_stateless/01112_check_table_with_index.sql @@ -0,0 +1,15 @@ +SET check_query_single_value_result = 'false'; + +DROP TABLE IF EXISTS check_table_with_indices; + +CREATE TABLE check_table_with_indices ( + id UInt64, + data String, + INDEX a (id) type minmax GRANULARITY 3 +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO check_table_with_indices VALUES (0, 'test'), (1, 'test2'); + +CHECK TABLE check_table_with_indices; + +DROP TABLE check_table_with_indices; From fd4e246f623da5cb75ca4078bb3746c429203668 Mon Sep 17 00:00:00 2001 From: BayoNet Date: Mon, 6 Apr 2020 20:25:59 +0300 Subject: [PATCH 127/484] DOCS-448: system.settings (#10013) * elenbaskakova-DOCSUP-847 (#92) * "docs(system.settings): Table 'system.settings' has been edited" * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * "docs(system.settings): Table 'system.settings' has been edited" * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * "docs(system.settings): Table 'system.settings' has been edited" * "docs(system.settings): Table 'system.settings' has been edited" Co-authored-by: elenbaskakova Co-authored-by: BayoNet * CLICKHOUSEDOCS-448: Finalizing before pull. * CLICKHOUSEDOCS-448: Merge fix. * CLICKHOUSEDOCS-448: Fixed. * CLICKHOUSEDOCS-448: Clarification. Co-authored-by: elenaspb2019 <47083263+elenaspb2019@users.noreply.github.com> Co-authored-by: elenbaskakova Co-authored-by: Sergei Shtykov --- docs/en/operations/system_tables.md | 57 +++++++++++++++++++++-------- docs/ru/operations/system_tables.md | 56 +++++++++++++++++++++------- 2 files changed, 85 insertions(+), 28 deletions(-) diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 8d0e4b74b86..34fc37f6415 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -837,30 +837,57 @@ WHERE If this query doesn’t return anything, it means that everything is fine. -## system.settings {#system-settings} +## system.settings {#system-tables-system-settings} -Contains information about settings that are currently in use. -I.e. used for executing the query you are using to read from the system.settings table. +Contains information about session settings for current user. Columns: -- `name` (String) — Setting name. -- `value` (String) — Setting value. -- `description` (String) — Setting description. -- `type` (String) — Setting type (implementation specific string value). -- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `readonly` (UInt8) — Can user change this setting (for more info, look into [constraints](settings/constraints_on_settings.md#constraints-on-settings)). +- `name` ([String](../data_types/string.md)) — Setting name. +- `value` ([String](../data_types/string.md)) — Setting value. +- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. +- `description` ([String](../data_types/string.md)) — Short setting description. +- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Minimum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../query_language/syntax.md#null-literal). +- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Maximum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../query_language/syntax.md#null-literal). +- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: + - `0` — Current user can change the setting. + - `1` — Current user can't change the setting. -Example: -``` sql -SELECT name, value +**Example** + +The following example shows how to get information about settings which name contains `min_i`. + +```sql +SELECT * FROM system.settings -WHERE changed +WHERE name LIKE '%min_i%' ``` +```text +┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ +│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ +``` + +Using of `WHERE changed` can be useful, for example, when you want to check: + +- Whether settings in configuration files are loaded correctly and are in use. +- Settings that changed in the current session. + +```sql +SELECT * FROM system.settings WHERE changed AND name='load_balancing' +``` + +**See also** + +- [Settings](settings/index.md#settings) +- [Permissions for Queries](settings/permissions_for_queries.md#settings_readonly) +- [Constraints on Settings](settings/constraints_on_settings.md) + +## system.table_engines ``` text ┌─name───────────────────┬─value───────┐ │ max_threads │ 8 │ diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index fc4ed0446e6..a8ccd369562 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -882,27 +882,57 @@ WHERE Если этот запрос ничего не возвращает - значит всё хорошо. -## system.settings {#system-settings} +## system.settings {#system-tables-system-settings} -Содержит информацию о настройках, используемых в данный момент. -То есть, используемых для выполнения запроса, с помощью которого вы читаете из таблицы system.settings. +Содержит информацию о сессионных настройках для текущего пользователя. Столбцы: -``` text -name String - имя настройки -value String - значение настройки -changed UInt8 - была ли настройка явно задана в конфиге или изменена явным образом +- `name` ([String](../data_types/string.md)) — имя настройки. +- `value` ([String](../data_types/string.md)) — значение настройки. +- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию. +- `description` ([String](../data_types/string.md)) — краткое описание настройки. +- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal). +- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal). +- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку: + - `0` — Текущий пользователь может изменять настройку. + - `1` — Текущий пользователь не может изменять настройку. + +**Пример** + +Пример показывает как получить информацию о настройках, имена которых содержат `min_i`. + +```sql +SELECT * +FROM system.settings +WHERE name LIKE '%min_i%' ``` -Пример: - -``` sql -SELECT * -FROM system.settings -WHERE changed +```text +┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ +│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ ``` +Использование `WHERE changed` может быть полезно, например, если необходимо проверить: + +- Что настройки корректно загрузились из конфигурационного файла и используются. +- Настройки, изменённые в текущей сессии. + +```sql +SELECT * FROM system.settings WHERE changed AND name='load_balancing' +``` + + +**Cм. также** + +- [Настройки](settings/index.md#settings) +- [Разрешения для запросов](settings/permissions_for_queries.md#settings_readonly) +- [Ограничения для значений настроек](settings/constraints_on_settings.md) + +## system.table_engines ``` text ┌─name───────────────────┬─value───────┬─changed─┐ │ max_threads │ 8 │ 1 │ From 4fd5ef8bad188a9bd0f4b7bc5d686a0c8761cc39 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 6 Apr 2020 21:05:45 +0300 Subject: [PATCH 128/484] Review fixes. --- src/Storages/StorageDistributed.cpp | 2 +- ...1231_distributed_aggregation_memory_efficient_mix_levels.sql | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index adf444c3565..b4375dd5b0a 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -474,7 +474,7 @@ void StorageDistributed::alter(const AlterCommands & params, const Context & con void StorageDistributed::startup() { if (remote_database.empty() && !remote_table_function_ptr) - LOG_INFO(log, "Name of remote database is empty. Default database will be used implicitly."); + LOG_WARNING(log, "Name of remote database is empty. Default database will be used implicitly."); if (!volume) return; diff --git a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql index 6e4feda346f..31f09b35bf3 100644 --- a/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql +++ b/tests/queries/0_stateless/01231_distributed_aggregation_memory_efficient_mix_levels.sql @@ -1,3 +1,5 @@ +set send_logs_level = 'error'; + create database if not exists shard_0; create database if not exists shard_1; From f0124ffc2b7b7c2a06fb5cbb0c30b8631ff1aba6 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 21:30:51 +0300 Subject: [PATCH 129/484] Fix runner script --- tests/integration/runner | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/runner b/tests/integration/runner index 8cd37e0386c..399c87dcf06 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -10,7 +10,7 @@ import subprocess import sys CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__)) -DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../../")) +DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../")) CURRENT_WORK_DIR = os.getcwd() CONTAINER_NAME = "clickhouse_integration_tests" From 738e8a7ef8af5066eb99ac714708c72414d4ce69 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 6 Apr 2020 20:16:36 +0300 Subject: [PATCH 130/484] Minor test refactoring * using string literal for binary strings * sorted includes --- .../tests/gtest_SplitTokenExtractor.cpp | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/src/Storages/tests/gtest_SplitTokenExtractor.cpp b/src/Storages/tests/gtest_SplitTokenExtractor.cpp index de78347ebbd..9255e5ca817 100644 --- a/src/Storages/tests/gtest_SplitTokenExtractor.cpp +++ b/src/Storages/tests/gtest_SplitTokenExtractor.cpp @@ -5,14 +5,15 @@ #include -#include -#include -#include #include +#include +#include +#include namespace { using namespace DB; +using namespace std::literals::string_literals; } struct SplitTokenExtractorTestCase @@ -71,9 +72,6 @@ TEST_P(SplitTokenExtractorTest, next) << "\n\t" << token_start << ", " << token_len << ", " << pos << ", " << data->size(); } -// Helper to allow strings with embedded '\0' chars. -#define BINARY_STRING(str) std::string{str, sizeof(str) - 1} - INSTANTIATE_TEST_SUITE_P(NoTokens, SplitTokenExtractorTest, ::testing::ValuesIn(std::initializer_list{ @@ -98,11 +96,6 @@ INSTANTIATE_TEST_SUITE_P(NoTokens, INSTANTIATE_TEST_SUITE_P(ShortSingleToken, SplitTokenExtractorTest, ::testing::ValuesIn(std::initializer_list{ - { - "Empty input sequence produces no tokens.", - "", - {} - }, { "Short single token", "foo", @@ -144,7 +137,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleTokens, }, { "Multiple tokens separated by non-printable chars", - BINARY_STRING("\0abc\1" "123\2XYZ\4"), + "\0abc\1" "123\2XYZ\4"s, { "abc", "123", "XYZ" } @@ -152,7 +145,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleTokens, { "ASCII table is split into numeric, upper case and lower case letters", - BINARY_STRING("\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16" + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16" "\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNO" "PQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87" "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c" @@ -160,7 +153,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleTokens, "\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6" "\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb" "\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0" - "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"), + "\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"s, { "0123456789", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "\x80\x81\x82\x83\x84\x85\x86\x87" From 22964721958118a980f9a1f7bc7021f3231970c3 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 Apr 2020 21:55:25 +0300 Subject: [PATCH 131/484] Enable access management in stateless tests. --- docker/test/stateless/Dockerfile | 1 + tests/config/access_management.xml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 tests/config/access_management.xml diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 2d2025de58b..e4c7ee0df24 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -62,6 +62,7 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ + ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \ diff --git a/tests/config/access_management.xml b/tests/config/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/config/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + From 04e3e3179cd7fb6ea329b252a218333536ebec4a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 6 Apr 2020 22:09:39 +0300 Subject: [PATCH 132/484] Add converting stream to localhost clickhouse dict --- .../ClickHouseDictionarySource.cpp | 15 +++++++++-- ...local_dictionary_type_conversion.reference | 2 ++ ...01113_local_dictionary_type_conversion.sql | 27 +++++++++++++++++++ 3 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference create mode 100644 tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index 0894a655724..015d3f499ee 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -131,6 +132,7 @@ BlockInputStreamPtr ClickHouseDictionarySource::loadAll() { BlockIO res = executeQuery(load_all_query, context, true); /// FIXME res.in may implicitly use some objects owned be res, but them will be destructed after return + res.in = std::make_shared(context, res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); return res.in; } return std::make_shared(pool, load_all_query, sample_block, context); @@ -140,7 +142,11 @@ BlockInputStreamPtr ClickHouseDictionarySource::loadUpdatedAll() { std::string load_update_query = getUpdateFieldAndDate(); if (is_local) - return executeQuery(load_update_query, context, true).in; + { + auto res = executeQuery(load_update_query, context, true); + res.in = std::make_shared(context, res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); + return res.in; + } return std::make_shared(pool, load_update_query, sample_block, context); } @@ -183,7 +189,12 @@ std::string ClickHouseDictionarySource::toString() const BlockInputStreamPtr ClickHouseDictionarySource::createStreamForSelectiveLoad(const std::string & query) { if (is_local) - return executeQuery(query, context, true).in; + { + auto res = executeQuery(query, context, true); + res.in = std::make_shared( + context, res.in, sample_block, ConvertingBlockInputStream::MatchColumnsMode::Position); + return res.in; + } return std::make_shared(pool, query, sample_block, context); } diff --git a/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference new file mode 100644 index 00000000000..ac390663059 --- /dev/null +++ b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.reference @@ -0,0 +1,2 @@ +First WINDOWS 1 +Second LINUX 2 diff --git a/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql new file mode 100644 index 00000000000..df1f405e286 --- /dev/null +++ b/tests/queries/0_stateless/01113_local_dictionary_type_conversion.sql @@ -0,0 +1,27 @@ +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +CREATE TABLE database_for_dict.table_for_dict ( + CompanyID String, + OSType Enum('UNKNOWN' = 0, 'WINDOWS' = 1, 'LINUX' = 2, 'ANDROID' = 3, 'MAC' = 4), + SomeID Int32 +) +ENGINE = Memory(); + +INSERT INTO database_for_dict.table_for_dict VALUES ('First', 'WINDOWS', 1), ('Second', 'LINUX', 2); + +CREATE DICTIONARY database_for_dict.dict_with_conversion +( + CompanyID String DEFAULT '', + OSType String DEFAULT '', + SomeID Int32 DEFAULT 0 +) +PRIMARY KEY CompanyID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 20) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT * FROM database_for_dict.dict_with_conversion ORDER BY CompanyID; + +DROP DATABASE IF EXISTS database_for_dict; From 2a8867f330506dae80a6043fbdb93cd3d87de9a1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 6 Apr 2020 23:47:55 +0300 Subject: [PATCH 133/484] Convenient defaults for perf test runner. --- docker/test/performance-comparison/perf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index dc516d7029e..55d93f89c6e 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -23,8 +23,8 @@ report_stage_end('start') parser = argparse.ArgumentParser(description='Run performance test.') # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') -parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.") -parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.") +parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.") +parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.") parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.') args = parser.parse_args() From 2951ed4f1dec9510a23118a78a9a677f3cb3e867 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 00:35:20 +0300 Subject: [PATCH 134/484] Corrected Common.RWLockDeadlock test --- src/Common/tests/gtest_rw_lock.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp index dec4c732fd5..facd7a33c1c 100644 --- a/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -150,9 +150,16 @@ TEST(Common, RWLockDeadlock) usleep(100000); usleep(100000); usleep(100000); + usleep(100000); try { auto holder2 = lock2->getLock(RWLockImpl::Read, "q1"); + if (!holder2) + { + throw Exception( + "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + } } catch (const Exception & e) { @@ -174,9 +181,16 @@ TEST(Common, RWLockDeadlock) auto holder2 = lock2->getLock(RWLockImpl::Read, "q3"); usleep(100000); usleep(100000); + usleep(100000); try { auto holder1 = lock1->getLock(RWLockImpl::Read, "q3"); + if (!holder1) + { + throw Exception( + "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + } } catch (const Exception & e) { From b98bc9afefb61645d30992b3f37dc489613363cd Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 02:44:45 +0300 Subject: [PATCH 135/484] Reworked RWLockImpl::getLock() + phase-fairness Timeout param added to getLock() method --- src/Common/RWLock.cpp | 295 ++++++++++++++++++++++-------------------- src/Common/RWLock.h | 45 ++++--- 2 files changed, 185 insertions(+), 155 deletions(-) diff --git a/src/Common/RWLock.cpp b/src/Common/RWLock.cpp index 5dfc1b55c63..a282c1c6a91 100644 --- a/src/Common/RWLock.cpp +++ b/src/Common/RWLock.cpp @@ -29,19 +29,17 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int DEADLOCK_AVOIDED; } -/** A single-use object that represents lock's ownership +/** A one-time-use-object that represents lock ownership * For the purpose of exception safety guarantees LockHolder is to be used in two steps: - * 1. Create an instance (allocating all the memory needed) + * 1. Create an instance (allocating all the needed memory) * 2. Associate the instance with the lock (attach to the lock and locking request group) */ class RWLockImpl::LockHolderImpl { bool bound{false}; - Type lock_type; String query_id; CurrentMetrics::Increment active_client_increment; RWLock parent; @@ -53,24 +51,30 @@ public: /// Implicit memory allocation for query_id is done here LockHolderImpl(const String & query_id_, Type type) - : lock_type{type}, query_id{query_id_}, - active_client_increment{ + : query_id{query_id_} + , active_client_increment{ type == Type::Read ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters} { } - ~LockHolderImpl(); + ~LockHolderImpl() + { + if (bound && parent != nullptr) + parent->unlock(it_group, query_id); + else + active_client_increment.destroy(); + } private: /// A separate method which binds the lock holder to the owned lock /// N.B. It is very important that this method produces no allocations bool bindWith(RWLock && parent_, GroupsContainer::iterator it_group_) noexcept { - if (bound) + if (bound || parent_ == nullptr) return false; it_group = it_group_; parent = std::move(parent_); - ++it_group->refererrs; + ++it_group->requests; bound = true; return true; } @@ -79,56 +83,27 @@ private: }; -namespace -{ - /// Global information about all read locks that query has. It is needed to avoid some type of deadlocks. - - class QueryLockInfo - { - private: - mutable std::mutex mutex; - std::map queries; - - public: - void add(const String & query_id) - { - std::lock_guard lock(mutex); - - const auto res = queries.emplace(query_id, 1); // may throw - if (!res.second) - ++res.first->second; - } - - void remove(const String & query_id) noexcept - { - std::lock_guard lock(mutex); - - const auto query_it = queries.find(query_id); - if (query_it != queries.cend() && --query_it->second == 0) - queries.erase(query_it); - } - - void check(const String & query_id) const - { - std::lock_guard lock(mutex); - - if (queries.find(query_id) != queries.cend()) - throw Exception("Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); - } - }; - - QueryLockInfo all_read_locks; -} - - -/** To guarantee that we do not get any piece of our data corrupted: +/** General algorithm: + * Step 1. Try the FastPath (for both Reads/Writes) + * Step 2. Find ourselves request group: attach to existing or create a new one + * Step 3. Wait/timed wait for ownership signal + * Step 3a. Check if we must handle timeout and exit + * Step 4. Persist lock ownership + * + * To guarantee that we do not get any piece of our data corrupted: * 1. Perform all actions that include allocations before changing lock's internal state * 2. Roll back any changes that make the state inconsistent * * Note: "SM" in the commentaries below stands for STATE MODIFICATION */ -RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id) +RWLockImpl::LockHolder +RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id, const std::chrono::milliseconds & lock_timeout_ms) { + const auto lock_deadline_tp = + (lock_timeout_ms == std::chrono::milliseconds(0)) + ? std::chrono::time_point::max() + : std::chrono::steady_clock::now() + lock_timeout_ms; + const bool request_has_query_id = query_id != NO_QUERY; Stopwatch watch(CLOCK_MONOTONIC_COARSE); @@ -145,100 +120,111 @@ RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & /// This object is placed above unique_lock, because it may lock in destructor. auto lock_holder = std::make_shared(query_id, type); - std::unique_lock lock(mutex); + std::unique_lock state_lock(internal_state_mtx); /// The FastPath: /// Check if the same query_id already holds the required lock in which case we can proceed without waiting if (request_has_query_id) { - const auto it_query = owner_queries.find(query_id); - if (it_query != owner_queries.end()) + const auto owner_query_it = owner_queries.find(query_id); + if (owner_query_it != owner_queries.end()) { - const auto current_owner_group = queue.begin(); + if (wrlock_owner != writers_queue.end()) + throw Exception( + "RWLockImpl::getLock(): RWLock is already locked in exclusive mode", + ErrorCodes::LOGICAL_ERROR); - /// XXX: it means we can't upgrade lock from read to write! + /// Lock upgrading is not supported if (type == Write) throw Exception( "RWLockImpl::getLock(): Cannot acquire exclusive lock while RWLock is already locked", ErrorCodes::LOGICAL_ERROR); - if (current_owner_group->type == Write) - throw Exception( - "RWLockImpl::getLock(): RWLock is already locked in exclusive mode", - ErrorCodes::LOGICAL_ERROR); - /// N.B. Type is Read here, query_id is not empty and it_query is a valid iterator - all_read_locks.add(query_id); /// SM1: may throw on insertion (nothing to roll back) - ++it_query->second; /// SM2: nothrow - lock_holder->bindWith(shared_from_this(), current_owner_group); /// SM3: nothrow + ++owner_query_it->second; /// SM1: nothrow + lock_holder->bindWith(shared_from_this(), rdlock_owner); /// SM2: nothrow finalize_metrics(); return lock_holder; } } - /** If the query already has any active read lock and tries to acquire another read lock - * but it is not in front of the queue and has to wait, deadlock is possible: - * - * Example (four queries, two RWLocks - 'a' and 'b'): - * - * --> time --> - * - * q1: ra rb - * q2: wa - * q3: rb ra - * q4: wb - * - * We will throw an exception instead. - */ - - if (type == Type::Write || queue.empty() || queue.back().type == Type::Write) + if (type == Type::Write) { - if (type == Type::Read && request_has_query_id && !queue.empty()) - all_read_locks.check(query_id); - - /// Create a new group of locking requests - queue.emplace_back(type); /// SM1: may throw (nothing to roll back) + writers_queue.emplace_back(type); /// SM1: may throw (nothing to roll back) } - else if (request_has_query_id && queue.size() > 1) - all_read_locks.check(query_id); + else if (readers_queue.empty() || + (rdlock_owner == readers_queue.begin() && !writers_queue.empty())) + { + readers_queue.emplace_back(type); /// SM1: may throw (nothing to roll back) + } + GroupsContainer::iterator it_group = + (type == Type::Write) ? std::prev(writers_queue.end()) : std::prev(readers_queue.end()); - GroupsContainer::iterator it_group = std::prev(queue.end()); + if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) + { + if (type == Type::Read) + { + rdlock_owner = it_group; /// SM2: nothrow + } + else + { + wrlock_owner = it_group; /// SM2: nothrow + } + } + else + { + /// Wait until our group becomes the lock owner + const auto predicate = [&] () { return it_group == (type == Read ? rdlock_owner : wrlock_owner); }; - /// We need to reference the associated group before waiting to guarantee - /// that this group does not get deleted prematurely - ++it_group->refererrs; + if (lock_deadline_tp == std::chrono::time_point::max()) + { + ++it_group->requests; + it_group->cv.wait(state_lock, predicate); + --it_group->requests; + } + else + { + ++it_group->requests; + const auto wait_result = it_group->cv.wait_until(state_lock, lock_deadline_tp, predicate); + --it_group->requests; - /// Wait a notification until we will be the only in the group. - it_group->cv.wait(lock, [&] () { return it_group == queue.begin(); }); + /// Step 3a. Check if we must handle timeout and exit + if (!wait_result) /// Wait timed out! + { + if (it_group->requests == 0) + { + /// Roll back SM1 + if (type == Read) + { + readers_queue.erase(it_group); /// Rollback(SM1): nothrow + } + else + { + writers_queue.erase(it_group); /// Rollback(SM1): nothrow + } + } - --it_group->refererrs; + return nullptr; + } + } + } if (request_has_query_id) { try { - if (type == Type::Read) - all_read_locks.add(query_id); /// SM2: may throw on insertion - /// and is safe to roll back unconditionally const auto emplace_res = - owner_queries.emplace(query_id, 1); /// SM3: may throw on insertion + owner_queries.emplace(query_id, 1); /// SM2: may throw on insertion if (!emplace_res.second) - ++emplace_res.first->second; /// SM4: nothrow + ++emplace_res.first->second; /// SM3: nothrow } catch (...) { /// Methods std::list<>::emplace_back() and std::unordered_map<>::emplace() provide strong exception safety - /// We only need to roll back the changes to these objects: all_read_locks and the locking queue - if (type == Type::Read) - all_read_locks.remove(query_id); /// Rollback(SM2): nothrow - - if (it_group->refererrs == 0) - { - const auto next = queue.erase(it_group); /// Rollback(SM1): nothrow - if (next != queue.end()) - next->cv.notify_all(); - } + /// We only need to roll back the changes to these objects: owner_queries and the readers/writers queue + if (it_group->requests == 0) + erase_group(it_group); /// Rollback(SM1): nothrow throw; } @@ -251,10 +237,9 @@ RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & } -/** The sequence points of acquiring lock's ownership by an instance of LockHolderImpl: - * 1. all_read_locks is updated - * 2. owner_queries is updated - * 3. request group is updated by LockHolderImpl which in turn becomes "bound" +/** The sequence points of acquiring lock ownership by an instance of LockHolderImpl: + * 1. owner_queries is updated + * 2. request group is updated by LockHolderImpl which in turn becomes "bound" * * If by the time when destructor of LockHolderImpl is called the instance has been "bound", * it is guaranteed that all three steps have been executed successfully and the resulting state is consistent. @@ -262,38 +247,74 @@ RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & * * We do not employ try-catch: if something bad happens, there is nothing we can do =( */ -RWLockImpl::LockHolderImpl::~LockHolderImpl() +void RWLockImpl::unlock(GroupsContainer::iterator owner_group, const String & query_id) noexcept { - if (!bound || parent == nullptr) + std::lock_guard state_lock(internal_state_mtx); + + /// All of theses are Undefined behavior and nothing we can do! + if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) return; - - std::lock_guard lock(parent->mutex); - - /// The associated group must exist (and be the beginning of the queue?) - if (parent->queue.empty() || it_group != parent->queue.begin()) + if (rdlock_owner != readers_queue.end() && owner_group != rdlock_owner) + return; + if (wrlock_owner != writers_queue.end() && owner_group != wrlock_owner) return; /// If query_id is not empty it must be listed in parent->owner_queries - if (query_id != RWLockImpl::NO_QUERY) + if (query_id != NO_QUERY) { - const auto owner_it = parent->owner_queries.find(query_id); - if (owner_it != parent->owner_queries.end()) + const auto owner_query_it = owner_queries.find(query_id); + if (owner_query_it != owner_queries.end()) { - if (--owner_it->second == 0) /// SM: nothrow - parent->owner_queries.erase(owner_it); /// SM: nothrow - - if (lock_type == RWLockImpl::Read) - all_read_locks.remove(query_id); /// SM: nothrow + if (--owner_query_it->second == 0) /// SM: nothrow + owner_queries.erase(owner_query_it); /// SM: nothrow } } - /// If we are the last remaining referrer, remove the group and notify the next group - if (--it_group->refererrs == 0) /// SM: nothrow - { - const auto next = parent->queue.erase(it_group); /// SM: nothrow - if (next != parent->queue.end()) - next->cv.notify_all(); - } + /// If we are the last remaining referrer, remove this QNode and notify the next one + if (--owner_group->requests == 0) /// SM: nothrow + erase_group(owner_group); } + +void RWLockImpl::erase_group(GroupsContainer::iterator group_it) noexcept +{ + rdlock_owner = readers_queue.end(); + wrlock_owner = writers_queue.end(); + + if (group_it->type == Read) + { + readers_queue.erase(group_it); + /// Prepare next phase + if (!writers_queue.empty()) + { + wrlock_owner = writers_queue.begin(); + } + else + { + rdlock_owner = readers_queue.begin(); + } + } + else + { + writers_queue.erase(group_it); + /// Prepare next phase + if (!readers_queue.empty()) + { + rdlock_owner = readers_queue.begin(); + } + else + { + wrlock_owner = writers_queue.begin(); + } + } + + if (rdlock_owner != readers_queue.end()) + { + rdlock_owner->cv.notify_all(); + } + else if (wrlock_owner != writers_queue.end()) + { + wrlock_owner->cv.notify_one(); + } +} } diff --git a/src/Common/RWLock.h b/src/Common/RWLock.h index a7084720d6c..81b8551060a 100644 --- a/src/Common/RWLock.h +++ b/src/Common/RWLock.h @@ -2,6 +2,7 @@ #include +#include #include #include #include @@ -19,7 +20,8 @@ using RWLock = std::shared_ptr; /// Implements shared lock with FIFO service -/// Can be acquired recursively (several calls for the same query) in Read mode +/// (Phase Fair RWLock as suggested in https://www.cs.unc.edu/~anderson/papers/rtsj10-for-web.pdf) +/// Can be acquired recursively (for the same query) in Read mode /// /// NOTE: it is important to allow acquiring the same lock in Read mode without waiting if it is already /// acquired by another thread of the same query. Otherwise the following deadlock is possible: @@ -42,37 +44,44 @@ public: friend class LockHolderImpl; using LockHolder = std::shared_ptr; - /// Waits in the queue and returns appropriate lock - /// Empty query_id means the lock is acquired out of the query context (e.g. in a background thread). - LockHolder getLock(Type type, const String & query_id); + /// Empty query_id means the lock is acquired from outside of query context (e.g. in a background thread). + LockHolder getLock(Type type, const String & query_id, + const std::chrono::milliseconds & lock_timeout_ms = std::chrono::milliseconds(0)); /// Use as query_id to acquire a lock outside the query context. inline static const String NO_QUERY = String(); + inline static const auto default_locking_timeout = std::chrono::milliseconds(120000); private: - RWLockImpl() = default; - - struct Group; - using GroupsContainer = std::list; - using OwnerQueryIds = std::unordered_map; - - /// Group of locking requests that should be granted concurrently - /// i.e. a group can contain several readers, but only one writer + /// Group of locking requests that should be granted simultaneously + /// i.e. one or several readers or a single writer struct Group { const Type type; - size_t refererrs; + size_t requests; std::condition_variable cv; /// all locking requests of the group wait on this condvar - explicit Group(Type type_) : type{type_}, refererrs{0} {} + explicit Group(Type type_) : type{type_}, requests{0} {} }; - GroupsContainer queue; + using GroupsContainer = std::list; + using OwnerQueryIds = std::unordered_map; + +private: + mutable std::mutex internal_state_mtx; + + GroupsContainer readers_queue; + GroupsContainer writers_queue; + GroupsContainer::iterator rdlock_owner{readers_queue.end()}; /// equals to readers_queue.begin() in read phase + /// or readers_queue.end() otherwise + GroupsContainer::iterator wrlock_owner{writers_queue.end()}; /// equals to writers_queue.begin() in write phase + /// or writers_queue.end() otherwise OwnerQueryIds owner_queries; - mutable std::mutex mutex; +private: + RWLockImpl() = default; + void unlock(GroupsContainer::iterator group_it, const String & query_id) noexcept; + void erase_group(GroupsContainer::iterator group_it) noexcept; }; - - } From 09ce548376e8631cc19f5be00caea3c048f27b94 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 02:45:51 +0300 Subject: [PATCH 136/484] All locks in IStorage have timeouts now --- src/Storages/IStorage.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 4d916ca1b46..ee751be6f5a 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -28,6 +28,7 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int TABLE_IS_DROPPED; extern const int NOT_IMPLEMENTED; + extern const int DEADLOCK_AVOIDED; } IStorage::IStorage(StorageID storage_id_, ColumnsDescription virtuals_) : storage_id(std::move(storage_id_)), virtuals(std::move(virtuals_)) @@ -314,12 +315,22 @@ bool IStorage::isVirtualColumn(const String & column_name) const return getColumns().get(column_name).is_virtual; } +RWLockImpl::LockHolder tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id) +{ + auto lock_holder = rwlock->getLock(type, query_id, RWLockImpl::default_locking_timeout); + if (!lock_holder) + throw Exception( + "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + ErrorCodes::DEADLOCK_AVOIDED); + return std::move(lock_holder); +} + TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id) { TableStructureReadLockHolder result; if (will_add_new_data) - result.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Read, query_id); - result.structure_lock = structure_lock->getLock(RWLockImpl::Read, query_id); + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Read, query_id); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Read, query_id); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); @@ -329,7 +340,7 @@ TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_d TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_id) { TableStructureWriteLockHolder result; - result.alter_intention_lock = alter_intention_lock->getLock(RWLockImpl::Write, query_id); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); @@ -342,20 +353,20 @@ void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_hol throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR); if (!lock_holder.new_data_structure_lock) - lock_holder.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id); - lock_holder.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); + lock_holder.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id); + lock_holder.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id); } TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id) { TableStructureWriteLockHolder result; - result.alter_intention_lock = alter_intention_lock->getLock(RWLockImpl::Write, query_id); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); - result.new_data_structure_lock = new_data_structure_lock->getLock(RWLockImpl::Write, query_id); - result.structure_lock = structure_lock->getLock(RWLockImpl::Write, query_id); + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id); return result; } From 081187dd1fec6a231b5e8334ff24321226865d79 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 03:06:48 +0300 Subject: [PATCH 137/484] Unit test for RWLock is set to use timeout for avoiding deadlocks --- src/Common/tests/gtest_rw_lock.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp index facd7a33c1c..73987a25508 100644 --- a/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -153,7 +153,7 @@ TEST(Common, RWLockDeadlock) usleep(100000); try { - auto holder2 = lock2->getLock(RWLockImpl::Read, "q1"); + auto holder2 = lock2->getLock(RWLockImpl::Read, "q1", std::chrono::milliseconds(100)); if (!holder2) { throw Exception( @@ -184,7 +184,7 @@ TEST(Common, RWLockDeadlock) usleep(100000); try { - auto holder1 = lock1->getLock(RWLockImpl::Read, "q3"); + auto holder1 = lock1->getLock(RWLockImpl::Read, "q3", std::chrono::milliseconds(100)); if (!holder1) { throw Exception( From 2e804173a9f7bb7c7c9cb6e8de81a9dcdb209df7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 7 Apr 2020 03:29:44 +0300 Subject: [PATCH 138/484] performance comparison --- docker/test/performance-comparison/compare.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 3044050d12b..7ed2aab66bb 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -148,7 +148,7 @@ function run_tests TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") # the grep is to filter out set -x output and keep only time output - { time "$script_dir/perf.py" "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue + { time "$script_dir/perf.py" --host=localhost --port=9001 --host=localhost --port=9002 "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue # The test completed with zero status, so we treat stderr as warnings mv "$test_name-err.log" "$test_name-warn.log" From 2e76e4d1ed0e25282ea224794f1262be07b63709 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 10:15:59 +0300 Subject: [PATCH 139/484] Made clang happy - fixed build --- src/Storages/IStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index ee751be6f5a..345ac6d5aac 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -322,7 +322,7 @@ RWLockImpl::LockHolder tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type throw Exception( "Locking attempt timed out! Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); - return std::move(lock_holder); + return lock_holder; } TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id) From d300a7da3ca687c111f97debdfb8535625274476 Mon Sep 17 00:00:00 2001 From: Dmitry Belyavtsev Date: Tue, 7 Apr 2020 15:48:41 +0800 Subject: [PATCH 140/484] Add ruby library clickhouse-activerecord to list in docs (#10069) * Add ruby library clickhouse-activerecord to list in docs * Add library clickhouse-activerecord in docsfor other locales --- docs/en/interfaces/third-party/client_libraries.md | 1 + docs/es/interfaces/third-party/client_libraries.md | 1 + docs/fa/interfaces/third-party/client_libraries.md | 1 + docs/fr/interfaces/third-party/client_libraries.md | 1 + docs/ja/interfaces/third-party/client_libraries.md | 1 + docs/ru/interfaces/third-party/client_libraries.md | 1 + docs/zh/interfaces/third-party/client_libraries.md | 1 + 7 files changed, 7 insertions(+) diff --git a/docs/en/interfaces/third-party/client_libraries.md b/docs/en/interfaces/third-party/client_libraries.md index 1f5c0e31933..f408e4e0193 100644 --- a/docs/en/interfaces/third-party/client_libraries.md +++ b/docs/en/interfaces/third-party/client_libraries.md @@ -34,6 +34,7 @@ toc_title: Client Libraries - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/es/interfaces/third-party/client_libraries.md b/docs/es/interfaces/third-party/client_libraries.md index 28c3a9529f4..31e9afcac91 100644 --- a/docs/es/interfaces/third-party/client_libraries.md +++ b/docs/es/interfaces/third-party/client_libraries.md @@ -36,6 +36,7 @@ toc_title: Bibliotecas de clientes - [Cualquier evento-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Rubí - [Haga clic en Casa (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [Sistema abierto.](https://github.com/hannesmuehleisen/clickhouse-r) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/fa/interfaces/third-party/client_libraries.md b/docs/fa/interfaces/third-party/client_libraries.md index 9d61ab4bd77..dff3dc48717 100644 --- a/docs/fa/interfaces/third-party/client_libraries.md +++ b/docs/fa/interfaces/third-party/client_libraries.md @@ -37,6 +37,7 @@ toc_title: "\u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0647\u0627\u06CC - [هرفنت-کلیکهاوس](https://metacpan.org/release/AnyEvent-ClickHouse) - روبی - [تاتر (روبی)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [کلیک تحقیق](https://github.com/hannesmuehleisen/clickhouse-r) - [خانه روستایی](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/fr/interfaces/third-party/client_libraries.md b/docs/fr/interfaces/third-party/client_libraries.md index 940887e4010..e4be3a53646 100644 --- a/docs/fr/interfaces/third-party/client_libraries.md +++ b/docs/fr/interfaces/third-party/client_libraries.md @@ -36,6 +36,7 @@ toc_title: "Biblioth\xE8ques Clientes" - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Rubis - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/ja/interfaces/third-party/client_libraries.md b/docs/ja/interfaces/third-party/client_libraries.md index bcd0ff895d5..b561e504f7c 100644 --- a/docs/ja/interfaces/third-party/client_libraries.md +++ b/docs/ja/interfaces/third-party/client_libraries.md @@ -36,6 +36,7 @@ toc_title: "\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8" - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - [クリックハウス(ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [クリックハウス-r](https://github.com/hannesmuehleisen/clickhouse-r) - [Rクリックハウス](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/ru/interfaces/third-party/client_libraries.md b/docs/ru/interfaces/third-party/client_libraries.md index 3ec3901666a..27fb3f67512 100644 --- a/docs/ru/interfaces/third-party/client_libraries.md +++ b/docs/ru/interfaces/third-party/client_libraries.md @@ -29,6 +29,7 @@ - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) diff --git a/docs/zh/interfaces/third-party/client_libraries.md b/docs/zh/interfaces/third-party/client_libraries.md index bf74c490092..4814ca5cf9a 100644 --- a/docs/zh/interfaces/third-party/client_libraries.md +++ b/docs/zh/interfaces/third-party/client_libraries.md @@ -28,6 +28,7 @@ - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) From f6b31f344dd1c9cb1939dd71e000fea1d5a3f651 Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Tue, 7 Apr 2020 11:33:49 +0300 Subject: [PATCH 141/484] Add cross-compile build for FreeBSD (#9643) * Add toolchain to Docker image --- CMakeLists.txt | 5 +- base/common/time.h | 2 + cmake/arch.cmake | 1 - cmake/find/execinfo.cmake | 8 -- cmake/find/libgsasl.cmake | 6 +- cmake/find/rdkafka.cmake | 2 +- cmake/find/ssl.cmake | 4 +- cmake/find/unwind.cmake | 9 -- cmake/find/zlib.cmake | 4 +- cmake/freebsd/default_libs.cmake | 40 +++++++++ cmake/freebsd/toolchain-x86_64.cmake | 19 ++++ cmake/target.cmake | 3 + docker/packager/binary/Dockerfile | 3 + docker/packager/binary/build.sh | 3 + docker/packager/packager | 7 +- src/CMakeLists.txt | 4 - src/Common/QueryProfiler.cpp | 5 +- src/Common/ThreadFuzzer.cpp | 127 ++++++++++++++------------- src/Common/setThreadName.cpp | 7 +- src/IO/AIO.cpp | 25 ++---- src/IO/AIO.h | 30 +++---- 21 files changed, 181 insertions(+), 133 deletions(-) delete mode 100644 cmake/find/execinfo.cmake create mode 100644 cmake/freebsd/default_libs.cmake create mode 100644 cmake/freebsd/toolchain-x86_64.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 00033d28475..92433dcbe34 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,7 +228,7 @@ else () set(NOT_UNBUNDLED 1) endif () -if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN) OR ARCH_32) +if (UNBUNDLED OR NOT (OS_LINUX OR OS_DARWIN)) # Using system libs can cause a lot of warnings in includes (on macro expansion). option (WERROR "Enable -Werror compiler option" OFF) else () @@ -251,6 +251,8 @@ if (OS_LINUX) include(cmake/linux/default_libs.cmake) elseif (OS_DARWIN) include(cmake/darwin/default_libs.cmake) +elseif (OS_FREEBSD) + include(cmake/freebsd/default_libs.cmake) endif () ###################################### @@ -316,7 +318,6 @@ include (cmake/find/poco.cmake) include (cmake/find/lz4.cmake) include (cmake/find/xxhash.cmake) include (cmake/find/sparsehash.cmake) -include (cmake/find/execinfo.cmake) include (cmake/find/re2.cmake) include (cmake/find/libgsasl.cmake) include (cmake/find/rdkafka.cmake) diff --git a/base/common/time.h b/base/common/time.h index 9a52d8e40b8..1bf588b7cb3 100644 --- a/base/common/time.h +++ b/base/common/time.h @@ -4,4 +4,6 @@ #if defined (OS_DARWIN) # define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC +#elif defined (OS_FREEBSD) +# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST #endif diff --git a/cmake/arch.cmake b/cmake/arch.cmake index ec644b6fe77..57ed42295bb 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -11,7 +11,6 @@ if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386") set (ARCH_I386 1) endif () if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386) - set (ARCH_32 1) message (FATAL_ERROR "32bit platforms are not supported") endif () diff --git a/cmake/find/execinfo.cmake b/cmake/find/execinfo.cmake deleted file mode 100644 index 85cc5cf951a..00000000000 --- a/cmake/find/execinfo.cmake +++ /dev/null @@ -1,8 +0,0 @@ -if (OS_FREEBSD) - find_library (EXECINFO_LIBRARY execinfo) - find_library (ELF_LIBRARY elf) - set (EXECINFO_LIBRARIES ${EXECINFO_LIBRARY} ${ELF_LIBRARY}) - message (STATUS "Using execinfo: ${EXECINFO_LIBRARIES}") -else () - set (EXECINFO_LIBRARIES "") -endif () diff --git a/cmake/find/libgsasl.cmake b/cmake/find/libgsasl.cmake index 589e965e19b..801b63899da 100644 --- a/cmake/find/libgsasl.cmake +++ b/cmake/find/libgsasl.cmake @@ -1,6 +1,4 @@ -if (NOT ARCH_32) - option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED}) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h") if (USE_INTERNAL_LIBGSASL_LIBRARY) @@ -16,7 +14,7 @@ if (NOT USE_INTERNAL_LIBGSASL_LIBRARY) endif () if (LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR) -elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY AND NOT ARCH_32) +elseif (NOT MISSING_INTERNAL_LIBGSASL_LIBRARY) set (LIBGSASL_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src ${ClickHouse_SOURCE_DIR}/contrib/libgsasl/linux_x86_64/include) set (USE_INTERNAL_LIBGSASL_LIBRARY 1) set (LIBGSASL_LIBRARY libgsasl) diff --git a/cmake/find/rdkafka.cmake b/cmake/find/rdkafka.cmake index dfab142a3cd..f18674dd440 100644 --- a/cmake/find/rdkafka.cmake +++ b/cmake/find/rdkafka.cmake @@ -1,5 +1,5 @@ # Freebsd: contrib/cppkafka/include/cppkafka/detail/endianness.h:53:23: error: 'betoh16' was not declared in this scope -if (NOT ARCH_ARM AND NOT ARCH_32 AND NOT OS_FREEBSD AND OPENSSL_FOUND) +if (NOT ARCH_ARM AND NOT OS_FREEBSD AND OPENSSL_FOUND) option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES}) endif () diff --git a/cmake/find/ssl.cmake b/cmake/find/ssl.cmake index 36f9d1e67ec..efc9127309c 100644 --- a/cmake/find/ssl.cmake +++ b/cmake/find/ssl.cmake @@ -2,9 +2,7 @@ option(ENABLE_SSL "Enable ssl" ${ENABLE_LIBRARIES}) if(ENABLE_SSL) -if(NOT ARCH_32) - option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) -endif() +option(USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${NOT_UNBUNDLED}) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/openssl/README") if(USE_INTERNAL_SSL_LIBRARY) diff --git a/cmake/find/unwind.cmake b/cmake/find/unwind.cmake index d3653973082..c9f5f30a5d6 100644 --- a/cmake/find/unwind.cmake +++ b/cmake/find/unwind.cmake @@ -1,14 +1,5 @@ option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES}) -if (NOT CMAKE_SYSTEM MATCHES "Linux" OR ARCH_ARM OR ARCH_32) - set (USE_UNWIND OFF) -endif () - -if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libunwind/CMakeLists.txt") - message(WARNING "submodule contrib/libunwind is missing. to fix try run: \n git submodule update --init --recursive") - set (USE_UNWIND OFF) -endif () - if (USE_UNWIND) add_subdirectory(contrib/libunwind-cmake) set (UNWIND_LIBRARIES unwind) diff --git a/cmake/find/zlib.cmake b/cmake/find/zlib.cmake index fb91622e298..f65d379f577 100644 --- a/cmake/find/zlib.cmake +++ b/cmake/find/zlib.cmake @@ -1,6 +1,4 @@ -if (NOT OS_FREEBSD AND NOT ARCH_32) - option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) -endif () +option (USE_INTERNAL_ZLIB_LIBRARY "Set to FALSE to use system zlib library instead of bundled" ${NOT_UNBUNDLED}) if (NOT MSVC) set (INTERNAL_ZLIB_NAME "zlib-ng" CACHE INTERNAL "") diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake new file mode 100644 index 00000000000..2bb76c6a761 --- /dev/null +++ b/cmake/freebsd/default_libs.cmake @@ -0,0 +1,40 @@ +set (DEFAULT_LIBS "-nodefaultlibs") + +if (NOT COMPILER_CLANG) + message (FATAL_ERROR "FreeBSD build is supported only for Clang") +endif () + +execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) + +set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread") + +message(STATUS "Default libraries: ${DEFAULT_LIBS}") + +set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS}) +set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) + +# Global libraries + +add_library(global-libs INTERFACE) + +# Unfortunately '-pthread' doesn't work with '-nodefaultlibs'. +# Just make sure we have pthreads at all. +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +include (cmake/find/unwind.cmake) +include (cmake/find/cxx.cmake) + +add_library(global-group INTERFACE) + +target_link_libraries(global-group INTERFACE + $ +) + +link_libraries(global-group) + +# FIXME: remove when all contribs will get custom cmake lists +install( + TARGETS global-group global-libs + EXPORT global +) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake new file mode 100644 index 00000000000..30468731b69 --- /dev/null +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -0,0 +1,19 @@ +set (CMAKE_SYSTEM_NAME "FreeBSD") +set (CMAKE_SYSTEM_PROCESSOR "x86_64") +set (CMAKE_C_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd12.1") +set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/freebsd-x86_64") + +set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake + +set (LINKER_NAME "lld" CACHE STRING "" FORCE) + +set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") +set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") + +set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) + +set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) +set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/target.cmake b/cmake/target.cmake index 1f40e28e76b..03d470b0aea 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -24,6 +24,9 @@ if (CMAKE_CROSSCOMPILING) set (ENABLE_PARQUET OFF CACHE INTERNAL "") set (ENABLE_MYSQL OFF CACHE INTERNAL "") endif () + elseif (OS_FREEBSD) + # FIXME: broken dependencies + set (ENABLE_PROTOBUF OFF CACHE INTERNAL "") else () message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!") endif () diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 60ac34167b9..54755d7c2f5 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -65,5 +65,8 @@ RUN wget https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/M # It contains all required headers and libraries. Note that it's named as "gcc" but actually we are using clang for cross compiling. RUN wget "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en" -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz +# Download toolchain for FreeBSD 12.1 +RUN wget https://clickhouse-datasets.s3.yandex.net/toolchains/toolchains/freebsd-12.1-toolchain.tar.xz + COPY build.sh / CMD ["/bin/bash", "/build.sh"] diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index bb9a3fcab5f..a341bbd9840 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -8,6 +8,9 @@ tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-co mkdir -p build/cmake/toolchain/linux-aarch64 tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build/cmake/toolchain/linux-aarch64 --strip-components=1 +mkdir -p build/cmake/toolchain/freebsd-x86_64 +tar xJf freebsd-12.1-toolchain.tar.xz -C build/cmake/toolchain/freebsd-x86_64 --strip-components=1 + mkdir -p build/build_docker cd build/build_docker ccache --show-stats ||: diff --git a/docker/packager/packager b/docker/packager/packager index 506ac1bc19b..360a358c6e5 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -107,6 +107,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ CLANG_PREFIX = "clang" DARWIN_SUFFIX = "-darwin" ARM_SUFFIX = "-aarch64" + FREEBSD_SUFFIX = "-freebsd" result = [] cmake_flags = ['$CMAKE_FLAGS', '-DADD_GDB_INDEX_FOR_GOLD=1'] @@ -114,7 +115,8 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ is_clang = compiler.startswith(CLANG_PREFIX) is_cross_darwin = compiler.endswith(DARWIN_SUFFIX) is_cross_arm = compiler.endswith(ARM_SUFFIX) - is_cross_compile = is_cross_darwin or is_cross_arm + is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) + is_cross_compile = is_cross_darwin or is_cross_arm or is_cross_freebsd # Explicitly use LLD with Clang by default. # Don't force linker for cross-compilation. @@ -131,6 +133,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ elif is_cross_arm: cc = compiler[:-len(ARM_SUFFIX)] cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") + elif is_cross_freebsd: + cc = compiler[:-len(FREEBSD_SUFFIX)] + cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake") else: cc = compiler diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 154d7c911cf..35f90529fd9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -274,10 +274,6 @@ set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-bui add_library (clickhouse_new_delete STATIC Common/new_delete.cpp) target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc) -if (OS_FREEBSD) - target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) -endif () - add_subdirectory(Common/ZooKeeper) add_subdirectory(Common/Config) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index ac2987a3795..dd9f36fb3ae 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -23,7 +23,9 @@ namespace DB namespace { +#if defined(OS_LINUX) thread_local size_t write_trace_iteration = 0; +#endif void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context) { @@ -53,7 +55,6 @@ namespace } #else UNUSED(info); - UNUSED(write_trace_iteration); #endif const auto signal_context = *reinterpret_cast(context); @@ -110,7 +111,7 @@ QueryProfilerBase::QueryProfilerBase(const UInt64 thread_id, const sev.sigev_notify = SIGEV_THREAD_ID; sev.sigev_signo = pause_signal; -# if defined(__FreeBSD__) +# if defined(OS_FREEBSD) sev._sigev_un._threadid = thread_id; # else sev._sigev_un._tid = thread_id; diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 2c1bb3c2744..42e133b4561 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -2,7 +2,7 @@ #include #include #if defined(OS_LINUX) - #include +# include #endif #include @@ -20,9 +20,9 @@ /// We will also wrap some thread synchronization functions to inject sleep/migration before or after. #if defined(OS_LINUX) -#define FOR_EACH_WRAPPED_FUNCTION(M) \ - M(int, pthread_mutex_lock, pthread_mutex_t * arg) \ - M(int, pthread_mutex_unlock, pthread_mutex_t * arg) +# define FOR_EACH_WRAPPED_FUNCTION(M) \ + M(int, pthread_mutex_lock, pthread_mutex_t * arg) \ + M(int, pthread_mutex_unlock, pthread_mutex_t * arg) #endif namespace DB @@ -67,20 +67,20 @@ static void initFromEnv(std::atomic & what, const char * name) static std::atomic num_cpus = 0; #if defined(OS_LINUX) -#define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \ - static std::atomic NAME ## _before_yield_probability = 0; \ - static std::atomic NAME ## _before_migrate_probability = 0; \ - static std::atomic NAME ## _before_sleep_probability = 0; \ - static std::atomic NAME ## _before_sleep_time_us = 0; \ - \ - static std::atomic NAME ## _after_yield_probability = 0; \ - static std::atomic NAME ## _after_migrate_probability = 0; \ - static std::atomic NAME ## _after_sleep_probability = 0; \ - static std::atomic NAME ## _after_sleep_time_us = 0; \ +# define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \ + static std::atomic NAME##_before_yield_probability = 0; \ + static std::atomic NAME##_before_migrate_probability = 0; \ + static std::atomic NAME##_before_sleep_probability = 0; \ + static std::atomic NAME##_before_sleep_time_us = 0; \ +\ + static std::atomic NAME##_after_yield_probability = 0; \ + static std::atomic NAME##_after_migrate_probability = 0; \ + static std::atomic NAME##_after_sleep_probability = 0; \ + static std::atomic NAME##_after_sleep_time_us = 0; FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS) -#undef DEFINE_WRAPPER_PARAMS +# undef DEFINE_WRAPPER_PARAMS #endif void ThreadFuzzer::initConfiguration() @@ -98,20 +98,20 @@ void ThreadFuzzer::initConfiguration() initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US"); #if defined(OS_LINUX) -#define INIT_WRAPPER_PARAMS(RET, NAME, ...) \ - initFromEnv(NAME ## _before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \ - initFromEnv(NAME ## _before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \ - initFromEnv(NAME ## _before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \ - initFromEnv(NAME ## _before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \ - \ - initFromEnv(NAME ## _after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \ - initFromEnv(NAME ## _after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \ - initFromEnv(NAME ## _after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \ - initFromEnv(NAME ## _after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); \ +# define INIT_WRAPPER_PARAMS(RET, NAME, ...) \ + initFromEnv(NAME##_before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \ + initFromEnv(NAME##_before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \ + initFromEnv(NAME##_before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \ + initFromEnv(NAME##_before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \ +\ + initFromEnv(NAME##_after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \ + initFromEnv(NAME##_after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \ + initFromEnv(NAME##_after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \ + initFromEnv(NAME##_after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US"); FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS) -#undef INIT_WRAPPER_PARAMS +# undef INIT_WRAPPER_PARAMS #endif } @@ -119,20 +119,28 @@ void ThreadFuzzer::initConfiguration() bool ThreadFuzzer::isEffective() const { #if defined(OS_LINUX) -#define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \ - if (NAME ## _before_yield_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_migrate_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_sleep_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)) return true; \ - \ - if (NAME ## _after_yield_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_migrate_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_sleep_probability.load(std::memory_order_relaxed)) return true; \ - if (NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)) return true; \ +# define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \ + if (NAME##_before_yield_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_migrate_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_sleep_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_before_sleep_time_us.load(std::memory_order_relaxed)) \ + return true; \ +\ + if (NAME##_after_yield_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_migrate_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_sleep_probability.load(std::memory_order_relaxed)) \ + return true; \ + if (NAME##_after_sleep_time_us.load(std::memory_order_relaxed)) \ + return true; FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS) -#undef INIT_WRAPPER_PARAMS +# undef INIT_WRAPPER_PARAMS #endif return cpu_time_period_us != 0 @@ -229,30 +237,29 @@ void ThreadFuzzer::setup() /// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion. #if defined(OS_LINUX) -#define MAKE_WRAPPER(RET, NAME, ...) \ - extern "C" RET __ ## NAME(__VA_ARGS__); /* NOLINT */ \ - extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ - { \ - injection( \ - NAME ## _before_yield_probability.load(std::memory_order_relaxed), \ - NAME ## _before_migrate_probability.load(std::memory_order_relaxed), \ - NAME ## _before_sleep_probability.load(std::memory_order_relaxed), \ - NAME ## _before_sleep_time_us.load(std::memory_order_relaxed)); \ - \ - auto && ret{__ ## NAME(arg)}; \ - \ - injection( \ - NAME ## _after_yield_probability.load(std::memory_order_relaxed), \ - NAME ## _after_migrate_probability.load(std::memory_order_relaxed), \ - NAME ## _after_sleep_probability.load(std::memory_order_relaxed), \ - NAME ## _after_sleep_time_us.load(std::memory_order_relaxed)); \ - \ - return ret; \ - } \ +# define MAKE_WRAPPER(RET, NAME, ...) \ + extern "C" RET __##NAME(__VA_ARGS__); /* NOLINT */ \ + extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ + { \ + injection( \ + NAME##_before_yield_probability.load(std::memory_order_relaxed), \ + NAME##_before_migrate_probability.load(std::memory_order_relaxed), \ + NAME##_before_sleep_probability.load(std::memory_order_relaxed), \ + NAME##_before_sleep_time_us.load(std::memory_order_relaxed)); \ +\ + auto && ret{__##NAME(arg)}; \ +\ + injection( \ + NAME##_after_yield_probability.load(std::memory_order_relaxed), \ + NAME##_after_migrate_probability.load(std::memory_order_relaxed), \ + NAME##_after_sleep_probability.load(std::memory_order_relaxed), \ + NAME##_after_sleep_time_us.load(std::memory_order_relaxed)); \ +\ + return ret; \ + } - FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER) +FOR_EACH_WRAPPED_FUNCTION(MAKE_WRAPPER) -#undef MAKE_WRAPPER +# undef MAKE_WRAPPER #endif - } diff --git a/src/Common/setThreadName.cpp b/src/Common/setThreadName.cpp index 9774d9b4b86..3c20711a761 100644 --- a/src/Common/setThreadName.cpp +++ b/src/Common/setThreadName.cpp @@ -29,11 +29,10 @@ void setThreadName(const char * name) throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR); #endif -#if defined(__FreeBSD__) +#if defined(OS_FREEBSD) pthread_set_name_np(pthread_self(), name); - return; - -#elif defined(__APPLE__) + if ((false)) +#elif defined(OS_DARWIN) if (0 != pthread_setname_np(name)) #else if (0 != prctl(PR_SET_NAME, name, 0, 0, 0)) diff --git a/src/IO/AIO.cpp b/src/IO/AIO.cpp index 33fb79fcf95..f0422d04434 100644 --- a/src/IO/AIO.cpp +++ b/src/IO/AIO.cpp @@ -1,12 +1,12 @@ -#if defined(__linux__) - -#include -#include -#include -#include - #include +#if defined(OS_LINUX) + +# include + +# include +# include + /** Small wrappers for asynchronous I/O. */ @@ -53,17 +53,10 @@ AIOContext::~AIOContext() io_destroy(ctx); } -#elif defined(__FreeBSD__) +#elif defined(OS_FREEBSD) -# include -# include -# include -# include -# include # include -# include - /** Small wrappers for asynchronous I/O. */ @@ -123,7 +116,7 @@ int io_submit(int ctx, long nr, struct iocb * iocbpp[]) int io_getevents(int ctx, long, long max_nr, struct kevent * events, struct timespec * timeout) { - return kevent(ctx, NULL, 0, events, max_nr, timeout); + return kevent(ctx, nullptr, 0, events, max_nr, timeout); } diff --git a/src/IO/AIO.h b/src/IO/AIO.h index 7a2b85dc42e..499d1f3bf60 100644 --- a/src/IO/AIO.h +++ b/src/IO/AIO.h @@ -2,20 +2,20 @@ #include -#if defined(__linux__) +#if defined(OS_LINUX) /// https://stackoverflow.com/questions/20759750/resolving-redefinition-of-timespec-in-time-h -#define timespec linux_timespec -#define timeval linux_timeval -#define itimerspec linux_itimerspec -#define sigset_t linux_sigset_t +# define timespec linux_timespec +# define timeval linux_timeval +# define itimerspec linux_itimerspec +# define sigset_t linux_sigset_t -#include +# include -#undef timespec -#undef timeval -#undef itimerspec -#undef sigset_t +# undef timespec +# undef timeval +# undef itimerspec +# undef sigset_t /** Small wrappers for asynchronous I/O. @@ -39,12 +39,12 @@ struct AIOContext : private boost::noncopyable ~AIOContext(); }; -#elif defined(__FreeBSD__) +#elif defined(OS_FREEBSD) -#include -#include -#include -#include +# include +# include +# include +# include typedef struct kevent io_event; typedef int aio_context_t; From 78f97a7a59fdf0053e2d2e9f235f7f8e5556f414 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 12:23:08 +0300 Subject: [PATCH 142/484] Build fixes --- src/Common/RWLock.cpp | 14 +++++++------- src/Common/RWLock.h | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Common/RWLock.cpp b/src/Common/RWLock.cpp index a282c1c6a91..d6b8cbd244f 100644 --- a/src/Common/RWLock.cpp +++ b/src/Common/RWLock.cpp @@ -224,7 +224,7 @@ RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id, const std::c /// Methods std::list<>::emplace_back() and std::unordered_map<>::emplace() provide strong exception safety /// We only need to roll back the changes to these objects: owner_queries and the readers/writers queue if (it_group->requests == 0) - erase_group(it_group); /// Rollback(SM1): nothrow + eraseGroup(it_group); /// Rollback(SM1): nothrow throw; } @@ -247,16 +247,16 @@ RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id, const std::c * * We do not employ try-catch: if something bad happens, there is nothing we can do =( */ -void RWLockImpl::unlock(GroupsContainer::iterator owner_group, const String & query_id) noexcept +void RWLockImpl::unlock(GroupsContainer::iterator group_it, const String & query_id) noexcept { std::lock_guard state_lock(internal_state_mtx); /// All of theses are Undefined behavior and nothing we can do! if (rdlock_owner == readers_queue.end() && wrlock_owner == writers_queue.end()) return; - if (rdlock_owner != readers_queue.end() && owner_group != rdlock_owner) + if (rdlock_owner != readers_queue.end() && group_it != rdlock_owner) return; - if (wrlock_owner != writers_queue.end() && owner_group != wrlock_owner) + if (wrlock_owner != writers_queue.end() && group_it != wrlock_owner) return; /// If query_id is not empty it must be listed in parent->owner_queries @@ -271,12 +271,12 @@ void RWLockImpl::unlock(GroupsContainer::iterator owner_group, const String & qu } /// If we are the last remaining referrer, remove this QNode and notify the next one - if (--owner_group->requests == 0) /// SM: nothrow - erase_group(owner_group); + if (--group_it->requests == 0) /// SM: nothrow + eraseGroup(group_it); } -void RWLockImpl::erase_group(GroupsContainer::iterator group_it) noexcept +void RWLockImpl::eraseGroup(GroupsContainer::iterator group_it) noexcept { rdlock_owner = readers_queue.end(); wrlock_owner = writers_queue.end(); diff --git a/src/Common/RWLock.h b/src/Common/RWLock.h index 81b8551060a..edc22cffaa0 100644 --- a/src/Common/RWLock.h +++ b/src/Common/RWLock.h @@ -82,6 +82,6 @@ private: private: RWLockImpl() = default; void unlock(GroupsContainer::iterator group_it, const String & query_id) noexcept; - void erase_group(GroupsContainer::iterator group_it) noexcept; + void eraseGroup(GroupsContainer::iterator group_it) noexcept; }; } From 8297683972bd84e9508313427260f7623a8ebf70 Mon Sep 17 00:00:00 2001 From: Artem Zuikov Date: Tue, 7 Apr 2020 12:48:47 +0300 Subject: [PATCH 143/484] renames: AnalyzedJoin -> TableJoin, Join -> HashJoin (#10065) --- src/Functions/FunctionJoinGet.cpp | 3 +- src/Functions/FunctionJoinGet.h | 4 +- src/Interpreters/CollectJoinOnKeysVisitor.cpp | 2 +- src/Interpreters/CollectJoinOnKeysVisitor.h | 4 +- src/Interpreters/ExpressionActions.cpp | 6 +- src/Interpreters/ExpressionActions.h | 6 +- src/Interpreters/ExpressionAnalyzer.cpp | 12 +- src/Interpreters/ExpressionAnalyzer.h | 2 +- src/Interpreters/{Join.cpp => HashJoin.cpp} | 108 +++++++++--------- src/Interpreters/{Join.h => HashJoin.h} | 10 +- src/Interpreters/InterpreterSelectQuery.cpp | 6 +- src/Interpreters/JoinSwitcher.cpp | 8 +- src/Interpreters/JoinSwitcher.h | 6 +- src/Interpreters/MergeJoin.cpp | 4 +- src/Interpreters/MergeJoin.h | 6 +- src/Interpreters/SubqueryForSet.cpp | 2 +- src/Interpreters/SyntaxAnalyzer.cpp | 6 +- src/Interpreters/SyntaxAnalyzer.h | 4 +- .../{AnalyzedJoin.cpp => TableJoin.cpp} | 36 +++--- .../{AnalyzedJoin.h => TableJoin.h} | 8 +- src/Interpreters/joinDispatch.h | 46 ++++---- .../Transforms/CreatingSetsTransform.cpp | 2 +- src/Storages/ReadInOrderOptimizer.cpp | 2 +- src/Storages/StorageJoin.cpp | 20 ++-- src/Storages/StorageJoin.h | 10 +- 25 files changed, 162 insertions(+), 161 deletions(-) rename src/Interpreters/{Join.cpp => HashJoin.cpp} (93%) rename src/Interpreters/{Join.h => HashJoin.h} (98%) rename src/Interpreters/{AnalyzedJoin.cpp => TableJoin.cpp} (83%) rename src/Interpreters/{AnalyzedJoin.h => TableJoin.h} (95%) diff --git a/src/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp index 0860deccb14..1745343cc33 100644 --- a/src/Functions/FunctionJoinGet.cpp +++ b/src/Functions/FunctionJoinGet.cpp @@ -3,7 +3,8 @@ #include #include #include -#include +#include +#include #include diff --git a/src/Functions/FunctionJoinGet.h b/src/Functions/FunctionJoinGet.h index 8bc1f0d1fcb..42ff2b16217 100644 --- a/src/Functions/FunctionJoinGet.h +++ b/src/Functions/FunctionJoinGet.h @@ -6,8 +6,8 @@ namespace DB { class Context; -class Join; -using HashJoinPtr = std::shared_ptr; +class HashJoin; +using HashJoinPtr = std::shared_ptr; class ExecutableFunctionJoinGet final : public IExecutableFunctionImpl { diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp index 4648366a1f4..1eddbfc5666 100644 --- a/src/Interpreters/CollectJoinOnKeysVisitor.cpp +++ b/src/Interpreters/CollectJoinOnKeysVisitor.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include namespace DB { diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.h b/src/Interpreters/CollectJoinOnKeysVisitor.h index 68109e460e5..8a1836a97ac 100644 --- a/src/Interpreters/CollectJoinOnKeysVisitor.h +++ b/src/Interpreters/CollectJoinOnKeysVisitor.h @@ -11,7 +11,7 @@ namespace DB { class ASTIdentifier; -class AnalyzedJoin; +class TableJoin; namespace ASOF { @@ -25,7 +25,7 @@ public: struct Data { - AnalyzedJoin & analyzed_join; + TableJoin & analyzed_join; const TableWithColumnNames & left_table; const TableWithColumnNames & right_table; const Aliases & aliases; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index a94638dd22c..435e493ffa9 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -147,7 +147,7 @@ ExpressionAction ExpressionAction::arrayJoin(const NameSet & array_joined_column return a; } -ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr table_join, JoinPtr join) +ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr table_join, JoinPtr join) { ExpressionAction a; a.type = JOIN; @@ -1206,7 +1206,7 @@ bool ExpressionAction::operator==(const ExpressionAction & other) const && result_name == other.result_name && argument_names == other.argument_names && same_array_join - && AnalyzedJoin::sameJoin(table_join.get(), other.table_join.get()) + && TableJoin::sameJoin(table_join.get(), other.table_join.get()) && projection == other.projection && is_function_compiled == other.is_function_compiled; } diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index f36e8b89a9f..0c3027dfbab 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -22,7 +22,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -class AnalyzedJoin; +class TableJoin; class IJoin; using JoinPtr = std::shared_ptr; @@ -97,7 +97,7 @@ public: std::shared_ptr array_join; /// For JOIN - std::shared_ptr table_join; + std::shared_ptr table_join; JoinPtr join; /// For PROJECT. @@ -114,7 +114,7 @@ public: static ExpressionAction project(const Names & projected_columns_); static ExpressionAction addAliases(const NamesWithAliases & aliased_columns_); static ExpressionAction arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left, const Context & context); - static ExpressionAction ordinaryJoin(std::shared_ptr table_join, JoinPtr join); + static ExpressionAction ordinaryJoin(std::shared_ptr table_join, JoinPtr join); /// Which columns necessary to perform this action. Names getNeededColumns() const; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index d1be66df217..6494918c532 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -27,9 +27,9 @@ #include #include #include -#include +#include #include -#include +#include #include #include @@ -502,7 +502,7 @@ bool SelectQueryExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, b return true; } -static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr analyzed_join, +static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_element, std::shared_ptr analyzed_join, const Context & context) { const auto & table_to_join = join_element.table_expression->as(); @@ -524,19 +524,19 @@ static JoinPtr tryGetStorageJoin(const ASTTablesInSelectQueryElement & join_elem return {}; } -static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const AnalyzedJoin & analyzed_join) +static ExpressionActionsPtr createJoinedBlockActions(const Context & context, const TableJoin & analyzed_join) { ASTPtr expression_list = analyzed_join.rightKeysList(); auto syntax_result = SyntaxAnalyzer(context).analyze(expression_list, analyzed_join.columnsFromJoinedTable()); return ExpressionAnalyzer(expression_list, syntax_result, context).getActions(true, false); } -static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, const Block & sample_block) +static std::shared_ptr makeJoin(std::shared_ptr analyzed_join, const Block & sample_block) { bool allow_merge_join = analyzed_join->allowMergeJoin(); if (analyzed_join->forceHashJoin() || (analyzed_join->preferMergeJoin() && !allow_merge_join)) - return std::make_shared(analyzed_join, sample_block); + return std::make_shared(analyzed_join, sample_block); else if (analyzed_join->forceMergeJoin() || (analyzed_join->preferMergeJoin() && allow_merge_join)) return std::make_shared(analyzed_join, sample_block); return std::make_shared(analyzed_join, sample_block); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 61a3c7dccba..4322a897378 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -123,7 +123,7 @@ protected: SyntaxAnalyzerResultPtr syntax; const ConstStoragePtr & storage() const { return syntax->storage; } /// The main table in FROM clause, if exists. - const AnalyzedJoin & analyzedJoin() const { return *syntax->analyzed_join; } + const TableJoin & analyzedJoin() const { return *syntax->analyzed_join; } const NamesAndTypesList & sourceColumns() const { return syntax->required_source_columns; } const std::vector & aggregates() const { return syntax->aggregates; } NamesAndTypesList sourceWithJoinedColumns() const; diff --git a/src/Interpreters/Join.cpp b/src/Interpreters/HashJoin.cpp similarity index 93% rename from src/Interpreters/Join.cpp rename to src/Interpreters/HashJoin.cpp index d5cec54c9ef..a3432ebebba 100644 --- a/src/Interpreters/Join.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -9,9 +9,9 @@ #include -#include +#include #include -#include +#include #include #include @@ -189,7 +189,7 @@ static void changeColumnRepresentation(const ColumnPtr & src_column, ColumnPtr & } -Join::Join(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_) +HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_) : table_join(table_join_) , kind(table_join->kind()) , strictness(table_join->strictness()) @@ -199,13 +199,13 @@ Join::Join(std::shared_ptr table_join_, const Block & right_sample , any_take_last_row(any_take_last_row_) , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) - , log(&Logger::get("Join")) + , log(&Logger::get("HashJoin")) { setSampleBlock(right_sample_block); } -Join::Type Join::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes) +HashJoin::Type HashJoin::chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes) { size_t keys_size = key_columns.size(); @@ -282,47 +282,47 @@ static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes return KeyGetter(key_columns, key_sizes, nullptr); } -template +template struct KeyGetterForTypeImpl; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodOneNumber; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodOneNumber; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodOneNumber; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodOneNumber; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodString; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodFixedString; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodKeysFixed; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodKeysFixed; }; -template struct KeyGetterForTypeImpl +template struct KeyGetterForTypeImpl { using Type = ColumnsHashing::HashMethodHashed; }; -template +template struct KeyGetterForType { using Value = typename Data::value_type; @@ -332,7 +332,7 @@ struct KeyGetterForType }; -void Join::init(Type type_) +void HashJoin::init(Type type_) { data->type = type_; @@ -342,7 +342,7 @@ void Join::init(Type type_) joinDispatch(kind, strictness, data->maps, [&](auto, auto, auto & map) { map.create(data->type); }); } -size_t Join::getTotalRowCount() const +size_t HashJoin::getTotalRowCount() const { size_t res = 0; @@ -359,7 +359,7 @@ size_t Join::getTotalRowCount() const return res; } -size_t Join::getTotalByteCount() const +size_t HashJoin::getTotalByteCount() const { size_t res = 0; @@ -377,7 +377,7 @@ size_t Join::getTotalByteCount() const return res; } -void Join::setSampleBlock(const Block & block) +void HashJoin::setSampleBlock(const Block & block) { /// You have to restore this lock if you call the function outside of ctor. //std::unique_lock lock(rwlock); @@ -441,7 +441,7 @@ namespace template struct Inserter { - static ALWAYS_INLINE void insertOne(const Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, + static ALWAYS_INLINE void insertOne(const HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) { auto emplace_result = key_getter.emplaceKey(map, i, pool); @@ -450,7 +450,7 @@ namespace new (&emplace_result.getMapped()) typename Map::mapped_type(stored_block, i); } - static ALWAYS_INLINE void insertAll(const Join &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) + static ALWAYS_INLINE void insertAll(const HashJoin &, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool) { auto emplace_result = key_getter.emplaceKey(map, i, pool); @@ -463,7 +463,7 @@ namespace } } - static ALWAYS_INLINE void insertAsof(Join & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool, + static ALWAYS_INLINE void insertAsof(HashJoin & join, Map & map, KeyGetter & key_getter, Block * stored_block, size_t i, Arena & pool, const IColumn * asof_column) { auto emplace_result = key_getter.emplaceKey(map, i, pool); @@ -478,7 +478,7 @@ namespace template void NO_INLINE insertFromBlockImplTypeCase( - Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, + HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) { [[maybe_unused]] constexpr bool mapped_one = std::is_same_v || @@ -508,7 +508,7 @@ namespace template void insertFromBlockImplType( - Join & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, + HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) { if (null_map) @@ -520,17 +520,17 @@ namespace template void insertFromBlockImpl( - Join & join, Join::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns, + HashJoin & join, HashJoin::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns, const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, Arena & pool) { switch (type) { - case Join::Type::EMPTY: break; - case Join::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough. + case HashJoin::Type::EMPTY: break; + case HashJoin::Type::CROSS: break; /// Do nothing. We have already saved block, and it is enough. #define M(TYPE) \ - case Join::Type::TYPE: \ - insertFromBlockImplType>::Type>(\ + case HashJoin::Type::TYPE: \ + insertFromBlockImplType>::Type>(\ join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, pool); \ break; APPLY_FOR_JOIN_VARIANTS(M) @@ -539,7 +539,7 @@ namespace } } -void Join::initRequiredRightKeys() +void HashJoin::initRequiredRightKeys() { const Names & left_keys = table_join->keyNamesLeft(); const Names & right_keys = table_join->keyNamesRight(); @@ -558,7 +558,7 @@ void Join::initRequiredRightKeys() } } -void Join::initRightBlockStructure(Block & saved_block_sample) +void HashJoin::initRightBlockStructure(Block & saved_block_sample) { /// We could remove key columns for LEFT | INNER HashJoin but we should keep them for JoinSwitcher (if any). bool save_key_columns = !table_join->forceHashJoin() || isRightOrFull(kind); @@ -580,7 +580,7 @@ void Join::initRightBlockStructure(Block & saved_block_sample) JoinCommon::convertColumnsToNullable(saved_block_sample, (isFull(kind) ? right_table_keys.columns() : 0)); } -Block Join::structureRightBlock(const Block & block) const +Block HashJoin::structureRightBlock(const Block & block) const { Block structured_block; for (auto & sample_column : savedBlockSample().getColumnsWithTypeAndName()) @@ -594,10 +594,10 @@ Block Join::structureRightBlock(const Block & block) const return structured_block; } -bool Join::addJoinedBlock(const Block & source_block, bool check_limits) +bool HashJoin::addJoinedBlock(const Block & source_block, bool check_limits) { if (empty()) - throw Exception("Logical error: Join was not initialized", ErrorCodes::LOGICAL_ERROR); + throw Exception("Logical error: HashJoin was not initialized", ErrorCodes::LOGICAL_ERROR); /// There's no optimization for right side const columns. Remove constness if any. Block block = materializeBlock(source_block); @@ -666,7 +666,7 @@ public: const Block & block, const Block & saved_block_sample, const ColumnsWithTypeAndName & extras, - const Join & join_, + const HashJoin & join_, const ColumnRawPtrs & key_columns_, const Sizes & key_sizes_) : join(join_) @@ -729,7 +729,7 @@ public: } } - const Join & join; + const HashJoin & join; const ColumnRawPtrs & key_columns; const Sizes & key_sizes; size_t rows_to_add; @@ -839,7 +839,7 @@ NO_INLINE IColumn::Filter joinRightColumns(const Map & map, AddedColumns & added if constexpr (is_asof_join) { - const Join & join = added_columns.join; + const HashJoin & join = added_columns.join; if (const RowRef * found = mapped.findAsof(join.getAsofType(), join.getAsofInequality(), asof_column, i)) { setUsed(filter, i); @@ -924,14 +924,14 @@ IColumn::Filter joinRightColumnsSwitchNullability(const Map & map, AddedColumns } template -IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, Join::Type type, const ConstNullMapPtr & null_map) +IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_columns, HashJoin::Type type, const ConstNullMapPtr & null_map) { switch (type) { #define M(TYPE) \ - case Join::Type::TYPE: \ + case HashJoin::Type::TYPE: \ return joinRightColumnsSwitchNullability>::Type>(\ + typename KeyGetterForType>::Type>(\ *maps_.TYPE, added_columns, null_map);\ break; APPLY_FOR_JOIN_VARIANTS(M) @@ -946,7 +946,7 @@ IColumn::Filter switchJoinRightColumns(const Maps & maps_, AddedColumns & added_ template -void Join::joinBlockImpl( +void HashJoin::joinBlockImpl( Block & block, const Names & key_names_left, const Block & block_with_columns_to_add, @@ -1065,7 +1065,7 @@ void Join::joinBlockImpl( } } -void Join::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const +void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const { size_t max_joined_block_rows = table_join->maxJoinedBlockRows(); size_t start_left_row = 0; @@ -1158,7 +1158,7 @@ static void checkTypeOfKey(const Block & block_left, const Block & block_right) } -DataTypePtr Join::joinGetReturnType(const String & column_name) const +DataTypePtr HashJoin::joinGetReturnType(const String & column_name) const { std::shared_lock lock(data->rwlock); @@ -1169,7 +1169,7 @@ DataTypePtr Join::joinGetReturnType(const String & column_name) const template -void Join::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const +void HashJoin::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const { joinBlockImpl( block, {block.getByPosition(0).name}, {sample_block_with_columns_to_add.getByName(column_name)}, maps_); @@ -1179,7 +1179,7 @@ void Join::joinGetImpl(Block & block, const String & column_name, const Maps & m // TODO: support composite key // TODO: return multiple columns as named tuple // TODO: return array of values when strictness == ASTTableJoin::Strictness::All -void Join::joinGet(Block & block, const String & column_name) const +void HashJoin::joinGet(Block & block, const String & column_name) const { std::shared_lock lock(data->rwlock); @@ -1198,7 +1198,7 @@ void Join::joinGet(Block & block, const String & column_name) const } -void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed) +void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) { std::shared_lock lock(data->rwlock); @@ -1219,7 +1219,7 @@ void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed) } -void Join::joinTotals(Block & block) const +void HashJoin::joinTotals(Block & block) const { JoinCommon::joinTotals(totals, sample_block_with_columns_to_add, key_names_right, block); } @@ -1268,7 +1268,7 @@ struct AdderNonJoined class NonJoinedBlockInputStream : public IBlockInputStream { public: - NonJoinedBlockInputStream(const Join & parent_, const Block & result_sample_block_, UInt64 max_block_size_) + NonJoinedBlockInputStream(const HashJoin & parent_, const Block & result_sample_block_, UInt64 max_block_size_) : parent(parent_) , max_block_size(max_block_size_) , result_sample_block(materializeBlock(result_sample_block_)) @@ -1342,7 +1342,7 @@ protected: } private: - const Join & parent; + const HashJoin & parent; UInt64 max_block_size; Block result_sample_block; @@ -1359,7 +1359,7 @@ private: std::vector> right_lowcard_changes; std::any position; - std::optional nulls_position; + std::optional nulls_position; void setRightIndex(size_t right_pos, size_t result_position) { @@ -1452,7 +1452,7 @@ private: switch (parent.data->type) { #define M(TYPE) \ - case Join::Type::TYPE: \ + case HashJoin::Type::TYPE: \ return fillColumns(*maps.TYPE, columns_keys_and_right); APPLY_FOR_JOIN_VARIANTS(M) #undef M @@ -1523,7 +1523,7 @@ private: }; -BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const +BlockInputStreamPtr HashJoin::createStreamWithNonJoinedRows(const Block & result_sample_block, UInt64 max_block_size) const { if (table_join->strictness() == ASTTableJoin::Strictness::Asof || table_join->strictness() == ASTTableJoin::Strictness::Semi) @@ -1535,7 +1535,7 @@ BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & result_sam } -bool Join::hasStreamWithNonJoinedRows() const +bool HashJoin::hasStreamWithNonJoinedRows() const { if (table_join->strictness() == ASTTableJoin::Strictness::Asof || table_join->strictness() == ASTTableJoin::Strictness::Semi) diff --git a/src/Interpreters/Join.h b/src/Interpreters/HashJoin.h similarity index 98% rename from src/Interpreters/Join.h rename to src/Interpreters/HashJoin.h index 9380649aeee..24ad2b871c9 100644 --- a/src/Interpreters/Join.h +++ b/src/Interpreters/HashJoin.h @@ -26,7 +26,7 @@ namespace DB { -class AnalyzedJoin; +class TableJoin; namespace JoinStuff { @@ -143,10 +143,10 @@ using MappedAsof = WithFlags; * If it is true, we always generate Nullable column and substitute NULLs for non-joined rows, * as in standard SQL. */ -class Join : public IJoin +class HashJoin : public IJoin { public: - Join(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_ = false); + HashJoin(std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_ = false); bool empty() { return data->type == Type::EMPTY; } @@ -315,7 +315,7 @@ public: Arena pool; }; - void reuseJoinedData(const Join & join) + void reuseJoinedData(const HashJoin & join) { data = join.data; } @@ -329,7 +329,7 @@ private: friend class NonJoinedBlockInputStream; friend class JoinSource; - std::shared_ptr table_join; + std::shared_ptr table_join; ASTTableJoin::Kind kind; ASTTableJoin::Strictness strictness; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 80a7831475b..35b33874ac1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -48,8 +48,8 @@ #include #include #include -#include -#include +#include +#include #include #include @@ -897,7 +897,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS if (join) { inflating_join = true; - if (auto * hash_join = typeid_cast(join.get())) + if (auto * hash_join = typeid_cast(join.get())) inflating_join = isCross(hash_join->getKind()); } diff --git a/src/Interpreters/JoinSwitcher.cpp b/src/Interpreters/JoinSwitcher.cpp index 5636022b563..480d105ebb6 100644 --- a/src/Interpreters/JoinSwitcher.cpp +++ b/src/Interpreters/JoinSwitcher.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include @@ -17,13 +17,13 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, return std::move(column); } -JoinSwitcher::JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_) +JoinSwitcher::JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_) : limits(table_join_->sizeLimits()) , switched(false) , table_join(table_join_) , right_sample_block(right_sample_block_.cloneEmpty()) { - join = std::make_shared(table_join, right_sample_block); + join = std::make_shared(table_join, right_sample_block); if (!limits.hasLimits()) limits.max_bytes = table_join->defaultMaxBytes(); @@ -50,7 +50,7 @@ bool JoinSwitcher::addJoinedBlock(const Block & block, bool) void JoinSwitcher::switchJoin() { - std::shared_ptr joined_data = static_cast(*join).getJoinedData(); + std::shared_ptr joined_data = static_cast(*join).getJoinedData(); BlocksList right_blocks = std::move(joined_data->blocks); /// Destroy old join & create new one. Early destroy for memory saving. diff --git a/src/Interpreters/JoinSwitcher.h b/src/Interpreters/JoinSwitcher.h index ecf042fb7ac..c0f03f08c4c 100644 --- a/src/Interpreters/JoinSwitcher.h +++ b/src/Interpreters/JoinSwitcher.h @@ -4,7 +4,7 @@ #include #include -#include +#include namespace DB { @@ -15,7 +15,7 @@ namespace DB class JoinSwitcher : public IJoin { public: - JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_); + JoinSwitcher(std::shared_ptr table_join_, const Block & right_sample_block_); /// Add block of data from right hand of JOIN into current join object. /// If join-in-memory memory limit exceeded switches to join-on-disk and continue with it. @@ -72,7 +72,7 @@ private: SizeLimits limits; bool switched; mutable std::mutex switch_mutex; - std::shared_ptr table_join; + std::shared_ptr table_join; const Block right_sample_block; /// Change join-in-memory to join-on-disk moving right hand JOIN data from one to another. diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index fde6ba2003d..1a3a84004dd 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -445,7 +445,7 @@ void MiniLSM::merge(std::function callback) } -MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block_) +MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block_) : table_join(table_join_) , size_limits(table_join->sizeLimits()) , right_sample_block(right_sample_block_) diff --git a/src/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h index 74a11fc05e4..d62083df38e 100644 --- a/src/Interpreters/MergeJoin.h +++ b/src/Interpreters/MergeJoin.h @@ -13,7 +13,7 @@ namespace DB { -class AnalyzedJoin; +class TableJoin; class MergeJoinCursor; struct MergeJoinEqualRange; @@ -48,7 +48,7 @@ struct MiniLSM class MergeJoin : public IJoin { public: - MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block); + MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block); bool addJoinedBlock(const Block & block, bool check_limits) override; void joinBlock(Block &, ExtraBlockPtr & not_processed) override; @@ -76,7 +76,7 @@ private: using Cache = LRUCache, BlockByteWeight>; mutable std::shared_mutex rwlock; - std::shared_ptr table_join; + std::shared_ptr table_join; SizeLimits size_limits; SortDescription left_sort_description; SortDescription right_sort_description; diff --git a/src/Interpreters/SubqueryForSet.cpp b/src/Interpreters/SubqueryForSet.cpp index 98f670e5c64..72831970de7 100644 --- a/src/Interpreters/SubqueryForSet.cpp +++ b/src/Interpreters/SubqueryForSet.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include diff --git a/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp index f93d11fa1da..8a9a63206ba 100644 --- a/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include /// getSmallestColumn() #include #include @@ -520,7 +520,7 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul } /// Find the columns that are obtained by JOIN. -void collectJoinedColumns(AnalyzedJoin & analyzed_join, const ASTSelectQuery & select_query, +void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query, const std::vector & tables, const Aliases & aliases) { const ASTTablesInSelectQueryElement * node = select_query.join(); @@ -795,7 +795,7 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyzeSelect( const auto & settings = context.getSettingsRef(); const NameSet & source_columns_set = result.source_columns_set; - result.analyzed_join = std::make_shared(settings, context.getTemporaryVolume()); + result.analyzed_join = std::make_shared(settings, context.getTemporaryVolume()); if (remove_duplicates) renameDuplicatedColumns(select_query); diff --git a/src/Interpreters/SyntaxAnalyzer.h b/src/Interpreters/SyntaxAnalyzer.h index 01997a8f1ea..23e8a4b79aa 100644 --- a/src/Interpreters/SyntaxAnalyzer.h +++ b/src/Interpreters/SyntaxAnalyzer.h @@ -11,7 +11,7 @@ namespace DB { class ASTFunction; -class AnalyzedJoin; +class TableJoin; class Context; struct Settings; struct SelectQueryOptions; @@ -20,7 +20,7 @@ using Scalars = std::map; struct SyntaxAnalyzerResult { ConstStoragePtr storage; - std::shared_ptr analyzed_join; + std::shared_ptr analyzed_join; NamesAndTypesList source_columns; NameSet source_columns_set; /// Set of names of source_columns. diff --git a/src/Interpreters/AnalyzedJoin.cpp b/src/Interpreters/TableJoin.cpp similarity index 83% rename from src/Interpreters/AnalyzedJoin.cpp rename to src/Interpreters/TableJoin.cpp index f3ab350c373..30b5e8e4483 100644 --- a/src/Interpreters/AnalyzedJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -15,7 +15,7 @@ namespace ErrorCodes { } -AnalyzedJoin::AnalyzedJoin(const Settings & settings, VolumePtr tmp_volume_) +TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_) : size_limits(SizeLimits{settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode}) , default_max_bytes(settings.default_max_bytes_in_join) , join_use_nulls(settings.join_use_nulls) @@ -29,7 +29,7 @@ AnalyzedJoin::AnalyzedJoin(const Settings & settings, VolumePtr tmp_volume_) join_algorithm = JoinAlgorithm::PREFER_PARTIAL_MERGE; } -void AnalyzedJoin::addUsingKey(const ASTPtr & ast) +void TableJoin::addUsingKey(const ASTPtr & ast) { key_names_left.push_back(ast->getColumnName()); key_names_right.push_back(ast->getAliasOrColumnName()); @@ -42,7 +42,7 @@ void AnalyzedJoin::addUsingKey(const ASTPtr & ast) right_key = renames[right_key]; } -void AnalyzedJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast) +void TableJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast) { key_names_left.push_back(left_table_ast->getColumnName()); key_names_right.push_back(right_table_ast->getAliasOrColumnName()); @@ -52,7 +52,7 @@ void AnalyzedJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast) } /// @return how many times right key appears in ON section. -size_t AnalyzedJoin::rightKeyInclusion(const String & name) const +size_t TableJoin::rightKeyInclusion(const String & name) const { if (hasUsing()) return 0; @@ -64,7 +64,7 @@ size_t AnalyzedJoin::rightKeyInclusion(const String & name) const return count; } -void AnalyzedJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix) +void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix) { NameSet joined_columns; NamesAndTypesList dedup_columns; @@ -90,7 +90,7 @@ void AnalyzedJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_c columns_from_joined_table.swap(dedup_columns); } -NameSet AnalyzedJoin::getQualifiedColumnsSet() const +NameSet TableJoin::getQualifiedColumnsSet() const { NameSet out; for (const auto & names : original_names) @@ -98,7 +98,7 @@ NameSet AnalyzedJoin::getQualifiedColumnsSet() const return out; } -NamesWithAliases AnalyzedJoin::getNamesWithAliases(const NameSet & required_columns) const +NamesWithAliases TableJoin::getNamesWithAliases(const NameSet & required_columns) const { NamesWithAliases out; for (const auto & column : required_columns) @@ -110,14 +110,14 @@ NamesWithAliases AnalyzedJoin::getNamesWithAliases(const NameSet & required_colu return out; } -ASTPtr AnalyzedJoin::leftKeysList() const +ASTPtr TableJoin::leftKeysList() const { ASTPtr keys_list = std::make_shared(); keys_list->children = key_asts_left; return keys_list; } -ASTPtr AnalyzedJoin::rightKeysList() const +ASTPtr TableJoin::rightKeysList() const { ASTPtr keys_list = std::make_shared(); if (hasOn()) @@ -125,7 +125,7 @@ ASTPtr AnalyzedJoin::rightKeysList() const return keys_list; } -Names AnalyzedJoin::requiredJoinedNames() const +Names TableJoin::requiredJoinedNames() const { NameSet required_columns_set(key_names_right.begin(), key_names_right.end()); for (const auto & joined_column : columns_added_by_join) @@ -134,7 +134,7 @@ Names AnalyzedJoin::requiredJoinedNames() const return Names(required_columns_set.begin(), required_columns_set.end()); } -NameSet AnalyzedJoin::requiredRightKeys() const +NameSet TableJoin::requiredRightKeys() const { NameSet required; for (const auto & name : key_names_right) @@ -144,7 +144,7 @@ NameSet AnalyzedJoin::requiredRightKeys() const return required; } -NamesWithAliases AnalyzedJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const +NamesWithAliases TableJoin::getRequiredColumns(const Block & sample, const Names & action_required_columns) const { NameSet required_columns(action_required_columns.begin(), action_required_columns.end()); @@ -155,7 +155,7 @@ NamesWithAliases AnalyzedJoin::getRequiredColumns(const Block & sample, const Na return getNamesWithAliases(required_columns); } -void AnalyzedJoin::addJoinedColumn(const NameAndTypePair & joined_column) +void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column) { if (join_use_nulls && isLeftOrFull(table_join.kind)) { @@ -166,7 +166,7 @@ void AnalyzedJoin::addJoinedColumn(const NameAndTypePair & joined_column) columns_added_by_join.push_back(joined_column); } -void AnalyzedJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const +void TableJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) const { bool right_or_full_join = isRightOrFull(table_join.kind); bool left_or_full_join = isLeftOrFull(table_join.kind); @@ -198,7 +198,7 @@ void AnalyzedJoin::addJoinedColumnsAndCorrectNullability(Block & sample_block) c } } -bool AnalyzedJoin::sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y) +bool TableJoin::sameJoin(const TableJoin * x, const TableJoin * y) { if (!x && !y) return true; @@ -212,7 +212,7 @@ bool AnalyzedJoin::sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y) && x->columns_added_by_join == y->columns_added_by_join; } -bool AnalyzedJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const +bool TableJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, ASTTableJoin::Kind kind_) const { if (strictness_ == strictness() && kind_ == kind()) return true; @@ -228,7 +228,7 @@ bool AnalyzedJoin::sameStrictnessAndKind(ASTTableJoin::Strictness strictness_, A return false; } -bool AnalyzedJoin::allowMergeJoin() const +bool TableJoin::allowMergeJoin() const { bool is_any = (strictness() == ASTTableJoin::Strictness::Any); bool is_all = (strictness() == ASTTableJoin::Strictness::All); diff --git a/src/Interpreters/AnalyzedJoin.h b/src/Interpreters/TableJoin.h similarity index 95% rename from src/Interpreters/AnalyzedJoin.h rename to src/Interpreters/TableJoin.h index f1341a16a4c..0b5ed82411a 100644 --- a/src/Interpreters/AnalyzedJoin.h +++ b/src/Interpreters/TableJoin.h @@ -25,7 +25,7 @@ struct Settings; class Volume; using VolumePtr = std::shared_ptr; -class AnalyzedJoin +class TableJoin { /** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k` * The join is made by column k. @@ -69,10 +69,10 @@ class AnalyzedJoin VolumePtr tmp_volume; public: - AnalyzedJoin(const Settings &, VolumePtr tmp_volume); + TableJoin(const Settings &, VolumePtr tmp_volume); /// for StorageJoin - AnalyzedJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, + TableJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, const Names & key_names_right_) : size_limits(limits) , default_max_bytes(0) @@ -133,7 +133,7 @@ public: /// StorageJoin overrides key names (cause of different names qualification) void setRightKeys(const Names & keys) { key_names_right = keys; } - static bool sameJoin(const AnalyzedJoin * x, const AnalyzedJoin * y); + static bool sameJoin(const TableJoin * x, const TableJoin * y); }; } diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h index 840b9b91a66..af16550e17e 100644 --- a/src/Interpreters/joinDispatch.h +++ b/src/Interpreters/joinDispatch.h @@ -3,7 +3,7 @@ #include #include -#include +#include /** Used in implementation of Join to process different data structures. @@ -15,37 +15,37 @@ namespace DB template struct MapGetter; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOneFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOneFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAll; }; -template <> struct MapGetter { using Map = Join::MapsAll; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; /// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; /// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; -template <> struct MapGetter { using Map = Join::MapsAllFlagged; }; -template <> struct MapGetter { using Map = Join::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; +template <> struct MapGetter { using Map = HashJoin::MapsAllFlagged; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; }; template struct MapGetter { - using Map = Join::MapsAsof; + using Map = HashJoin::MapsAsof; }; @@ -66,7 +66,7 @@ static constexpr std::array KINDS = { }; /// Init specified join map -inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, Join::MapsVariant & maps) +inline bool joinDispatchInit(ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness, HashJoin::MapsVariant & maps) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { diff --git a/src/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp index 79ceae64d92..f5637a21ede 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index c05acfa71ab..5bbe5be9928 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include namespace DB diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index f5e88b193cd..8912680b1dd 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -57,8 +57,8 @@ StorageJoin::StorageJoin( if (!getColumns().hasPhysical(key)) throw Exception{"Key column (" + key + ") does not exist in table declaration.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE}; - table_join = std::make_shared(limits, use_nulls, kind, strictness, key_names); - join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); + table_join = std::make_shared(limits, use_nulls, kind, strictness, key_names); + join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); restore(); } @@ -70,11 +70,11 @@ void StorageJoin::truncate(const ASTPtr &, const Context &, TableStructureWriteL Poco::File(path + "tmp/").createDirectories(); increment = 0; - join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); + join = std::make_shared(table_join, getSampleBlock().sortColumns(), overwrite); } -HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) const +HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) const { if (!analyzed_join->sameStrictnessAndKind(strictness, kind)) throw Exception("Table " + getStorageID().getNameForLogs() + " has incompatible type of JOIN.", ErrorCodes::INCOMPATIBLE_TYPE_OF_JOIN); @@ -89,7 +89,7 @@ HashJoinPtr StorageJoin::getJoin(std::shared_ptr analyzed_join) co /// Some HACK to remove wrong names qualifiers: table.column -> column. analyzed_join->setRightKeys(key_names); - HashJoinPtr join_clone = std::make_shared(analyzed_join, getSampleBlock().sortColumns()); + HashJoinPtr join_clone = std::make_shared(analyzed_join, getSampleBlock().sortColumns()); join_clone->reuseJoinedData(*join); return join_clone; } @@ -244,7 +244,7 @@ size_t rawSize(const StringRef & t) class JoinSource : public SourceWithProgress { public: - JoinSource(const Join & parent_, UInt64 max_block_size_, Block sample_block_) + JoinSource(const HashJoin & parent_, UInt64 max_block_size_, Block sample_block_) : SourceWithProgress(sample_block_) , parent(parent_) , lock(parent.data->rwlock) @@ -287,7 +287,7 @@ protected: } private: - const Join & parent; + const HashJoin & parent; std::shared_lock lock; UInt64 max_block_size; Block sample_block; @@ -326,7 +326,7 @@ private: switch (parent.data->type) { #define M(TYPE) \ - case Join::Type::TYPE: \ + case HashJoin::Type::TYPE: \ rows_added = fillColumns(*maps.TYPE); \ break; APPLY_FOR_JOIN_VARIANTS_LIMITED(M) diff --git a/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h index acfc8a8b4e7..f956abb4d3b 100644 --- a/src/Storages/StorageJoin.h +++ b/src/Storages/StorageJoin.h @@ -9,9 +9,9 @@ namespace DB { -class AnalyzedJoin; -class Join; -using HashJoinPtr = std::shared_ptr; +class TableJoin; +class HashJoin; +using HashJoinPtr = std::shared_ptr; /** Allows you save the state for later use on the right side of the JOIN. @@ -31,7 +31,7 @@ public: /// Access the innards. HashJoinPtr & getJoin() { return join; } - HashJoinPtr getJoin(std::shared_ptr analyzed_join) const; + HashJoinPtr getJoin(std::shared_ptr analyzed_join) const; /// Verify that the data structure is suitable for implementing this type of JOIN. void assertCompatible(ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_) const; @@ -53,7 +53,7 @@ private: ASTTableJoin::Strictness strictness; /// ANY | ALL bool overwrite; - std::shared_ptr table_join; + std::shared_ptr table_join; HashJoinPtr join; void insertBlock(const Block & block) override; From 53b5dade5ed488f8be0f0b8e7a624be3e5bb206c Mon Sep 17 00:00:00 2001 From: "philip.han" Date: Mon, 6 Apr 2020 22:30:16 +0900 Subject: [PATCH 144/484] Implement transform_null_in --- src/Core/Settings.h | 1 + src/Functions/in.cpp | 52 ++++++++--- src/Interpreters/ActionsVisitor.cpp | 16 ++-- src/Interpreters/ExpressionAnalyzer.cpp | 2 +- src/Interpreters/NullableUtils.cpp | 9 +- src/Interpreters/NullableUtils.h | 2 +- src/Interpreters/Set.cpp | 19 ++-- src/Interpreters/Set.h | 10 +- src/Interpreters/SyntaxAnalyzer.cpp | 42 +++++++-- src/Interpreters/misc.h | 4 +- src/Storages/StorageSet.cpp | 6 +- .../01231_operator_null_in.reference | 54 +++++++++++ .../0_stateless/01231_operator_null_in.sql | 93 +++++++++++++++++++ 13 files changed, 267 insertions(+), 43 deletions(-) create mode 100644 tests/queries/0_stateless/01231_operator_null_in.reference create mode 100644 tests/queries/0_stateless/01231_operator_null_in.sql diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 753231603b2..29bfa82d89b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -406,6 +406,7 @@ struct Settings : public SettingsCollection M(SettingBool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \ M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ + M(SettingBool, transform_null_in, false, "Enable null verification of the 'IN' operator.", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ diff --git a/src/Functions/in.cpp b/src/Functions/in.cpp index 0b25ca201bb..a89535c675a 100644 --- a/src/Functions/in.cpp +++ b/src/Functions/in.cpp @@ -21,38 +21,62 @@ namespace ErrorCodes * notIn(x, set) - and NOT IN. */ -template +template struct FunctionInName; template <> -struct FunctionInName +struct FunctionInName { static constexpr auto name = "in"; }; template <> -struct FunctionInName +struct FunctionInName { static constexpr auto name = "globalIn"; }; template <> -struct FunctionInName +struct FunctionInName { static constexpr auto name = "notIn"; }; template <> -struct FunctionInName +struct FunctionInName { static constexpr auto name = "globalNotIn"; }; -template +template <> +struct FunctionInName +{ + static constexpr auto name = "nullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalNullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "notNullIn"; +}; + +template <> +struct FunctionInName +{ + static constexpr auto name = "globalNotNullIn"; +}; + +template class FunctionIn : public IFunction { public: - static constexpr auto name = FunctionInName::name; + static constexpr auto name = FunctionInName::name; static FunctionPtr create(const Context &) { return std::make_shared(); @@ -75,6 +99,8 @@ public: bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForNulls() const override { return null_is_skipped; } + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override { /// NOTE: after updating this code, check that FunctionIgnoreExceptNull returns the same type of column. @@ -123,10 +149,14 @@ public: void registerFunctionsIn(FunctionFactory & factory) { - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); } } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 4e008a81973..f7d64d54f27 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -64,7 +64,7 @@ static NamesAndTypesList::iterator findColumn(const String & name, NamesAndTypes } template -static Block createBlockFromCollection(const Collection & collection, const DataTypes & types) +static Block createBlockFromCollection(const Collection & collection, const DataTypes & types, const Context & context) { size_t columns_num = types.size(); MutableColumns columns(columns_num); @@ -77,7 +77,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data if (columns_num == 1) { auto field = convertFieldToType(value, *types[0]); - if (!field.isNull()) + if (!field.isNull() || context.getSettingsRef().transform_null_in) columns[0]->insert(std::move(field)); } else @@ -100,7 +100,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data for (; i < tuple_size; ++i) { tuple_values[i] = convertFieldToType(tuple[i], *types[i]); - if (tuple_values[i].isNull()) + if (tuple_values[i].isNull() && !context.getSettingsRef().transform_null_in) break; } @@ -170,23 +170,23 @@ SetPtr makeExplicitSet( if (left_type_depth == right_type_depth) { Array array{right_arg_value}; - block = createBlockFromCollection(array, set_element_types); + block = createBlockFromCollection(array, set_element_types, context); } /// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)); etc. else if (left_type_depth + 1 == right_type_depth) { auto type_index = right_arg_type->getTypeId(); if (type_index == TypeIndex::Tuple) - block = createBlockFromCollection(DB::get(right_arg_value), set_element_types); + block = createBlockFromCollection(DB::get(right_arg_value), set_element_types, context); else if (type_index == TypeIndex::Array) - block = createBlockFromCollection(DB::get(right_arg_value), set_element_types); + block = createBlockFromCollection(DB::get(right_arg_value), set_element_types, context); else throw_unsupported_type(right_arg_type); } else throw_unsupported_type(right_arg_type); - SetPtr set = std::make_shared(size_limits, create_ordered_set); + SetPtr set = std::make_shared(size_limits, create_ordered_set, context); set->setHeader(block); set->insertFromBlock(block); @@ -654,7 +654,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su return subquery_for_set.set; } - SetPtr set = std::make_shared(data.set_size_limit, false); + SetPtr set = std::make_shared(data.set_size_limit, false, data.context); /** The following happens for GLOBAL INs: * - in the addExternalStorage function, the IN (SELECT ...) subquery is replaced with IN _data1, diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index d1be66df217..d0b44b91af7 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -291,7 +291,7 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr auto interpreter_subquery = interpretSubquery(subquery_or_table_name, context, {}, query_options); BlockIO res = interpreter_subquery->execute(); - SetPtr set = std::make_shared(settings.size_limits_for_set, true); + SetPtr set = std::make_shared(settings.size_limits_for_set, true, context); set->setHeader(res.in->getHeader()); res.in->readPrefix(); diff --git a/src/Interpreters/NullableUtils.cpp b/src/Interpreters/NullableUtils.cpp index fe2801f5d11..5c0202d1de3 100644 --- a/src/Interpreters/NullableUtils.cpp +++ b/src/Interpreters/NullableUtils.cpp @@ -5,7 +5,7 @@ namespace DB { -ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map) +ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map, bool exact_null) { ColumnPtr null_map_holder; @@ -38,7 +38,12 @@ ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullM PaddedPODArray & mutable_null_map = assert_cast(*mutable_null_map_holder).getData(); const PaddedPODArray & other_null_map = column_nullable->getNullMapData(); for (size_t i = 0, size = mutable_null_map.size(); i < size; ++i) - mutable_null_map[i] |= other_null_map[i]; + { + if (exact_null) + mutable_null_map[i] &= other_null_map[i]; + else + mutable_null_map[i] |= other_null_map[i]; + } null_map_holder = std::move(mutable_null_map_holder); } diff --git a/src/Interpreters/NullableUtils.h b/src/Interpreters/NullableUtils.h index ee3193919cd..054835f8bef 100644 --- a/src/Interpreters/NullableUtils.h +++ b/src/Interpreters/NullableUtils.h @@ -8,6 +8,6 @@ namespace DB * In 'null_map' return a map of positions where at least one column was NULL. * @returns ownership column of null_map. */ -ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map); +ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map, bool exact_null = false); } diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index 3c79ea5174d..e63eff37047 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -87,6 +87,8 @@ void NO_INLINE Set::insertFromBlockImplCase( { if ((*null_map)[i]) { + has_null = true; + if constexpr (build_filter) { (*out_filter)[i] = false; @@ -138,7 +140,7 @@ void Set::setHeader(const Block & header) /// We will insert to the Set only keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, context.getSettingsRef().transform_null_in); if (fill_set_elements) { @@ -228,7 +230,7 @@ static Field extractValueFromNode(const ASTPtr & node, const IDataType & type, c throw Exception("Incorrect element of set. Must be literal or constant expression.", ErrorCodes::INCORRECT_ELEMENT_OF_SET); } -void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & context) +void Set::createFromAST(const DataTypes & types, ASTPtr node) { /// Will form a block with values from the set. @@ -249,7 +251,7 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co { Field value = extractValueFromNode(elem, *types[0], context); - if (!value.isNull()) + if (!value.isNull() || context.getSettingsRef().transform_null_in) columns[0]->insert(value); } else if (const auto * func = elem->as()) @@ -284,7 +286,7 @@ void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & co : extractValueFromNode(func->arguments->children[i], *types[i], context); /// If at least one of the elements of the tuple has an impossible (outside the range of the type) value, then the entire tuple too. - if (value.isNull()) + if (value.isNull() && !context.getSettings().transform_null_in) break; tuple_values[i] = value; @@ -348,7 +350,7 @@ ColumnPtr Set::execute(const Block & block, bool negative) const /// We will check existence in Set only for keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, context.getSettingsRef().transform_null_in); executeOrdinary(key_columns, vec_res, negative, null_map); @@ -390,7 +392,12 @@ void NO_INLINE Set::executeImplCase( for (size_t i = 0; i < rows; ++i) { if (has_null_map && (*null_map)[i]) - vec_res[i] = negative; + { + if (has_null) + vec_res[i] = !negative; + else + vec_res[i] = negative; + } else { auto find_result = state.findKey(method.data, i, pool); diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index c9605d4e11e..da20ffc41b6 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -30,9 +30,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits_, bool fill_set_elements_) + Set(const SizeLimits & limits_, bool fill_set_elements_, const Context & context_) : log(&Logger::get("Set")), - limits(limits_), fill_set_elements(fill_set_elements_) + limits(limits_), fill_set_elements(fill_set_elements_), context(context_) { } @@ -45,7 +45,7 @@ public: * 'types' - types of what are on the left hand side of IN. * 'node' - list of values: 1, 2, 3 or list of tuples: (1, 2), (3, 4), (5, 6). */ - void createFromAST(const DataTypes & types, ASTPtr node, const Context & context); + void createFromAST(const DataTypes & types, ASTPtr node); /** Create a Set from stream. * Call setHeader, then call insertFromBlock for each block. @@ -113,6 +113,10 @@ private: /// Do we need to additionally store all elements of the set in explicit form for subsequent use for index. bool fill_set_elements; + const Context & context; + + bool has_null = false; + /// Check if set contains all the data. bool is_created = false; diff --git a/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp index f93d11fa1da..a1560e26367 100644 --- a/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -60,25 +60,40 @@ namespace using LogAST = DebugASTLog; /// set to true to enable logs -/// Select implementation of countDistinct based on settings. +/// Select implementation of a function based on settings. /// Important that it is done as query rewrite. It means rewritten query /// will be sent to remote servers during distributed query execution, /// and on all remote servers, function implementation will be same. +template struct CustomizeFunctionsData { using TypeToVisit = ASTFunction; - const String & count_distinct; + const String & customized_func_name; void visit(ASTFunction & func, ASTPtr &) { - if (Poco::toLower(func.name) == "countdistinct") - func.name = count_distinct; + if (Poco::toLower(func.name) == func_name) + { + func.name = customized_func_name; + } } }; -using CustomizeFunctionsMatcher = OneTypeMatcher; -using CustomizeFunctionsVisitor = InDepthNodeVisitor; +char countdistinct[] = "countdistinct"; +using CustomizeFunctionsVisitor = InDepthNodeVisitor>, true>; + +char in[] = "in"; +using CustomizeInVisitor = InDepthNodeVisitor>, true>; + +char notIn[] = "notin"; +using CustomizeNotInVisitor = InDepthNodeVisitor>, true>; + +char globalIn[] = "globalin"; +using CustomizeGlobalInVisitor = InDepthNodeVisitor>, true>; + +char globalNotIn[] = "globalnotin"; +using CustomizeGlobalNotInVisitor = InDepthNodeVisitor>, true>; /// Translate qualified names such as db.table.column, table.column, table_alias.column to names' normal form. @@ -889,6 +904,21 @@ void SyntaxAnalyzer::normalize(ASTPtr & query, Aliases & aliases, const Settings CustomizeFunctionsVisitor::Data data{settings.count_distinct_implementation}; CustomizeFunctionsVisitor(data).visit(query); + if (settings.transform_null_in) + { + CustomizeInVisitor::Data data_null_in{"nullIn"}; + CustomizeInVisitor(data_null_in).visit(query); + + CustomizeNotInVisitor::Data data_not_null_in{"notNullIn"}; + CustomizeNotInVisitor(data_not_null_in).visit(query); + + CustomizeGlobalInVisitor::Data data_global_null_in{"globalNullIn"}; + CustomizeGlobalInVisitor(data_global_null_in).visit(query); + + CustomizeGlobalNotInVisitor::Data data_global_not_null_in{"globalNotNullIn"}; + CustomizeGlobalNotInVisitor(data_global_not_null_in).visit(query); + } + /// Creates a dictionary `aliases`: alias -> ASTPtr QueryAliasesVisitor(aliases).visit(query); diff --git a/src/Interpreters/misc.h b/src/Interpreters/misc.h index e2f34375dc0..0fd0e12a4bb 100644 --- a/src/Interpreters/misc.h +++ b/src/Interpreters/misc.h @@ -5,12 +5,12 @@ namespace DB inline bool functionIsInOperator(const std::string & name) { - return name == "in" || name == "notIn"; + return name == "in" || name == "notIn" || name == "nullIn" || name == "notNullIn"; } inline bool functionIsInOrGlobalInOperator(const std::string & name) { - return functionIsInOperator(name) || name == "globalIn" || name == "globalNotIn"; + return functionIsInOperator(name) || name == "globalIn" || name == "globalNotIn" || name == "globalNullIn" || name == "globalNotNullIn"; } inline bool functionIsLikeOperator(const std::string & name) diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 72ae46787c8..45e1f81b487 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -112,7 +112,7 @@ StorageSet::StorageSet( const ConstraintsDescription & constraints_, const Context & context_) : StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_}, - set(std::make_shared(SizeLimits(), false)) + set(std::make_shared(SizeLimits(), false, context_)) { Block header = getSampleBlock(); header = header.sortColumns(); @@ -127,7 +127,7 @@ void StorageSet::finishInsert() { set->finishInsert(); } size_t StorageSet::getSize() const { return set->getTotalRowCount(); } -void StorageSet::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) +void StorageSet::truncate(const ASTPtr &, const Context & context, TableStructureWriteLockHolder &) { Poco::File(path).remove(true); Poco::File(path).createDirectories(); @@ -137,7 +137,7 @@ void StorageSet::truncate(const ASTPtr &, const Context &, TableStructureWriteLo header = header.sortColumns(); increment = 0; - set = std::make_shared(SizeLimits(), false); + set = std::make_shared(SizeLimits(), false, context); set->setHeader(header); } diff --git a/tests/queries/0_stateless/01231_operator_null_in.reference b/tests/queries/0_stateless/01231_operator_null_in.reference new file mode 100644 index 00000000000..7432b657191 --- /dev/null +++ b/tests/queries/0_stateless/01231_operator_null_in.reference @@ -0,0 +1,54 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01231_operator_null_in.sql b/tests/queries/0_stateless/01231_operator_null_in.sql new file mode 100644 index 00000000000..12361373001 --- /dev/null +++ b/tests/queries/0_stateless/01231_operator_null_in.sql @@ -0,0 +1,93 @@ +DROP TABLE IF EXISTS null_in; +CREATE TABLE null_in (dt DateTime, idx int, i Nullable(int), s Nullable(String)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; + +INSERT INTO null_in VALUES (1, 1, 1, '1') (2, 2, NULL, NULL) (3, 3, 3, '3') (4, 4, NULL, NULL) (5, 5, 5, '5'); + +SELECT count() == 2 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 2 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 2 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 2 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SET transform_null_in = 1; + +SELECT count() == 4 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 4 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 4 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 4 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SELECT count() == 3 FROM null_in WHERE i not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 3 FROM null_in WHERE s not in ('1', '3'); +SELECT count() == 3 FROM null_in WHERE i global not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 3 FROM null_in WHERE s global not in ('1', '3'); + +DROP TABLE IF EXISTS null_in; + +DROP TABLE IF EXISTS null_in_subquery; +CREATE TABLE null_in_subquery (dt DateTime, idx int, i Nullable(UInt64)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; +INSERT INTO null_in_subquery SELECT number % 3, number, number FROM system.numbers LIMIT 99999; + +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 0); + +-- For index column +SELECT count() == 33333 FROM null_in_subquery WHERE idx in (SELECT idx FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE idx not in (SELECT idx FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE idx global in (SELECT idx FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE idx global not in (SELECT idx FROM null_in_subquery WHERE dt = 0); + +INSERT INTO null_in_subquery VALUES (0, 123456780, NULL); +INSERT INTO null_in_subquery VALUES (1, 123456781, NULL); + +SELECT count() == 33335 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 33335 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 2); + +DROP TABLE IF EXISTS null_in_subquery; + + +DROP TABLE IF EXISTS null_in_tuple; +CREATE TABLE null_in_tuple (dt DateTime, idx int, t Tuple(Nullable(UInt64), Nullable(String))) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; +INSERT INTO null_in_tuple VALUES (1, 1, (1, '1')) (2, 2, (2, NULL)) (3, 3, (NULL, '3')) (4, 4, (NULL, NULL)) + +SET transform_null_in = 0; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +SET transform_null_in = 1; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +DROP TABLE IF EXISTS null_in_subquery; From 621d26bcf7fb1d04c47144a2c9f4767a0c0b4f38 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Tue, 7 Apr 2020 14:34:35 +0300 Subject: [PATCH 145/484] Better timeout diagnostics message --- src/Common/RWLock.h | 2 +- src/Storages/IStorage.cpp | 11 ++++++++--- src/Storages/IStorage.h | 3 +++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/Common/RWLock.h b/src/Common/RWLock.h index edc22cffaa0..43366192cf8 100644 --- a/src/Common/RWLock.h +++ b/src/Common/RWLock.h @@ -50,7 +50,7 @@ public: /// Use as query_id to acquire a lock outside the query context. inline static const String NO_QUERY = String(); - inline static const auto default_locking_timeout = std::chrono::milliseconds(120000); + inline static const auto default_locking_timeout_ms = std::chrono::milliseconds(120000); private: /// Group of locking requests that should be granted simultaneously diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 345ac6d5aac..ab3a750db16 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -315,13 +315,18 @@ bool IStorage::isVirtualColumn(const String & column_name) const return getColumns().get(column_name).is_virtual; } -RWLockImpl::LockHolder tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id) +RWLockImpl::LockHolder IStorage::tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id) { - auto lock_holder = rwlock->getLock(type, query_id, RWLockImpl::default_locking_timeout); + auto lock_holder = rwlock->getLock(type, query_id, RWLockImpl::default_locking_timeout_ms); if (!lock_holder) + { + const String type_str = type == RWLockImpl::Type::Read ? "READ" : "WRITE"; throw Exception( - "Locking attempt timed out! Possible deadlock avoided. Client should retry.", + type_str + " locking attempt on \"" + getStorageID().getFullTableName() + + "\" has timed out! (" + toString(RWLockImpl::default_locking_timeout_ms.count()) + "ms) " + "Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); + } return lock_holder; } diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index d3cede6e5c8..581fc8a67e7 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -195,6 +195,9 @@ private: IndicesDescription indices; ConstraintsDescription constraints; +private: + RWLockImpl::LockHolder tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id); + public: /// Acquire this lock if you need the table structure to remain constant during the execution of /// the query. If will_add_new_data is true, this means that the query will add new data to the table From 592093b749e4080333ac6563f6fd33a6493a6ac7 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 7 Apr 2020 14:47:08 +0300 Subject: [PATCH 146/484] add check with polymorphic parts --- docker/test/stateless/Dockerfile | 1 + tests/config/polymorphic_parts.xml | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 tests/config/polymorphic_parts.xml diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 82e2f3a6373..4ec48ac3fd4 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -73,6 +73,7 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ + if [ -n $USE_POLYMORPHIC_PARTS ] && [ $USE_POLYMORPHIC_PARTS -eq 1 ]; ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/; fi; \ ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml; \ service zookeeper start; sleep 5; \ service clickhouse-server start && sleep 5 && clickhouse-test --testname --shard --zookeeper $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt diff --git a/tests/config/polymorphic_parts.xml b/tests/config/polymorphic_parts.xml new file mode 100644 index 00000000000..2924aa5c69d --- /dev/null +++ b/tests/config/polymorphic_parts.xml @@ -0,0 +1,5 @@ + + + 10485760 + + From 816eccde11f36e22aa65a07efc00c0b5138fe05e Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 7 Apr 2020 16:34:57 +0300 Subject: [PATCH 147/484] Add config to debug test mode --- docker/test/stateless_with_coverage/run.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/stateless_with_coverage/run.sh b/docker/test/stateless_with_coverage/run.sh index 8848e25c47c..2b55f658a73 100755 --- a/docker/test/stateless_with_coverage/run.sh +++ b/docker/test/stateless_with_coverage/run.sh @@ -48,6 +48,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \ + ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ From 834e458f26f23faf66c78a6ce0be9248d66de127 Mon Sep 17 00:00:00 2001 From: Olga Khvostikova Date: Wed, 1 Apr 2020 18:06:20 +0300 Subject: [PATCH 148/484] Fix behaviour of globs in filepath with leading zeros --- src/Common/parseGlobs.cpp | 10 +++++++++- tests/integration/test_globs_in_filepath/test.py | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp index 79b441441bc..31e7c2eb612 100644 --- a/src/Common/parseGlobs.cpp +++ b/src/Common/parseGlobs.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -46,10 +47,17 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob std::istringstream iss_range(buffer); iss_range >> range_begin >> point >> point >> range_end; assert(iss_range.good()); + bool leading_zeros = buffer[0] == '0'; + size_t num_len = std::to_string(range_end).size(); + if (leading_zeros) + oss_for_replacing << std::setfill('0') << std::setw(num_len); oss_for_replacing << range_begin; for (size_t i = range_begin + 1; i <= range_end; ++i) { - oss_for_replacing << '|' << i; + oss_for_replacing << '|'; + if (leading_zeros) + oss_for_replacing << std::setfill('0') << std::setw(num_len); + oss_for_replacing << i; } } else diff --git a/tests/integration/test_globs_in_filepath/test.py b/tests/integration/test_globs_in_filepath/test.py index 70bdb7777fb..c85c39a8838 100644 --- a/tests/integration/test_globs_in_filepath/test.py +++ b/tests/integration/test_globs_in_filepath/test.py @@ -64,6 +64,7 @@ def test_linear_structure(start_cluster): ("file?", "10"), ("nothing*", "0"), ("file{0..9}{0..9}{0..9}", "10"), + ("file{000..999}", "10"), ("file???", "10"), ("file*", "20"), ("a_{file,data}", "4"), From dada1c931cf046d51b1667d62cd5a8571f59f07e Mon Sep 17 00:00:00 2001 From: Olga Khvostikova Date: Wed, 1 Apr 2020 18:18:16 +0300 Subject: [PATCH 149/484] Add docs --- docs/ru/operations/table_engines/hdfs.md | 2 +- docs/ru/query_language/table_functions/file.md | 2 +- docs/ru/query_language/table_functions/hdfs.md | 2 +- docs/zh/operations/table_engines/hdfs.md | 2 +- docs/zh/query_language/table_functions/file.md | 2 +- docs/zh/query_language/table_functions/hdfs.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/ru/operations/table_engines/hdfs.md b/docs/ru/operations/table_engines/hdfs.md index b55eba731ff..4f892b1e492 100644 --- a/docs/ru/operations/table_engines/hdfs.md +++ b/docs/ru/operations/table_engines/hdfs.md @@ -54,7 +54,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2 - `*` — Заменяет любое количество любых символов кроме `/`, включая отсутствие символов. - `?` — Заменяет ровно один любой символ. - `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно. +- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). Конструкция с `{}` аналогична табличной функции [remote](../../query_language/table_functions/remote.md). diff --git a/docs/ru/query_language/table_functions/file.md b/docs/ru/query_language/table_functions/file.md index e61fddadd2d..d415b20858b 100644 --- a/docs/ru/query_language/table_functions/file.md +++ b/docs/ru/query_language/table_functions/file.md @@ -50,7 +50,7 @@ LIMIT 2 - `*` — Заменяет любое количество любых символов кроме `/`, включая отсутствие символов. - `?` — Заменяет ровно один любой символ. - `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно. +- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). Конструкция с `{}` аналогична табличной функции [remote](remote.md). diff --git a/docs/ru/query_language/table_functions/hdfs.md b/docs/ru/query_language/table_functions/hdfs.md index ab88545c36a..e90f27a2eca 100644 --- a/docs/ru/query_language/table_functions/hdfs.md +++ b/docs/ru/query_language/table_functions/hdfs.md @@ -38,7 +38,7 @@ LIMIT 2 - `*` — Заменяет любое количество любых символов кроме `/`, включая отсутствие символов. - `?` — Заменяет ровно один любой символ. - `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно. +- `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). Конструкция с `{}` аналогична табличной функции [remote](remote.md). diff --git a/docs/zh/operations/table_engines/hdfs.md b/docs/zh/operations/table_engines/hdfs.md index 07bd0800aa5..576bbc49d72 100644 --- a/docs/zh/operations/table_engines/hdfs.md +++ b/docs/zh/operations/table_engines/hdfs.md @@ -62,7 +62,7 @@ Multiple path components can have globs. For being processed file should exists - `*` — Substitutes any number of any characters except `/` including empty string. - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. +- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). Constructions with `{}` are similar to the [remote](../../query_language/table_functions/remote.md) table function. diff --git a/docs/zh/query_language/table_functions/file.md b/docs/zh/query_language/table_functions/file.md index 95c3a9378bc..88bbc2a3453 100644 --- a/docs/zh/query_language/table_functions/file.md +++ b/docs/zh/query_language/table_functions/file.md @@ -61,7 +61,7 @@ Multiple path components can have globs. For being processed file should exists - `*` — Substitutes any number of any characters except `/` including empty string. - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. +- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). diff --git a/docs/zh/query_language/table_functions/hdfs.md b/docs/zh/query_language/table_functions/hdfs.md index f636b7d19bb..22e64665179 100644 --- a/docs/zh/query_language/table_functions/hdfs.md +++ b/docs/zh/query_language/table_functions/hdfs.md @@ -44,7 +44,7 @@ Multiple path components can have globs. For being processed file should exists - `*` — Substitutes any number of any characters except `/` including empty string. - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders. +- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). From 385e8839dc82417780353c61e78cee0e3f3fd642 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 7 Apr 2020 17:15:14 +0300 Subject: [PATCH 150/484] Quote expected strings in parse error messages --- src/IO/ReadHelpers.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 75682906242..dccb413af2c 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -74,12 +74,12 @@ UInt128 stringToUUID(const String & str) void NO_INLINE throwAtAssertionFailed(const char * s, ReadBuffer & buf) { WriteBufferFromOwnString out; - out << "Cannot parse input: expected " << escape << s; + out << "Cannot parse input: expected " << quote << s; if (buf.eof()) out << " at end of stream."; else - out << " before: " << escape << String(buf.position(), std::min(SHOW_CHARS_ON_SYNTAX_ERROR, buf.buffer().end() - buf.position())); + out << " before: " << quote << String(buf.position(), std::min(SHOW_CHARS_ON_SYNTAX_ERROR, buf.buffer().end() - buf.position())); throw Exception(out.str(), ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED); } From 82a87bc0d2dd84afc32da7efd97e60f36134708b Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 7 Apr 2020 18:28:29 +0300 Subject: [PATCH 151/484] Fix replicated tables startup when updating from old version --- src/Storages/StorageReplicatedMergeTree.cpp | 16 +++++- .../test_no_local_metadata_node/__init__.py | 0 .../test_no_local_metadata_node/test.py | 54 +++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_no_local_metadata_node/__init__.py create mode 100644 tests/integration/test_no_local_metadata_node/test.py diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 7107328e4ff..2f598630f65 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -256,6 +256,15 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } else { + /// In old tables this node may missing + String replica_metadata; + bool replica_metadata_exists = current_zookeeper->tryGet(replica_path + "/metadata", replica_metadata); + if (!replica_metadata_exists || replica_metadata.empty()) + { + ReplicatedMergeTreeTableMetadata current_metadata(*this); + current_zookeeper->createOrUpdate(replica_path + "/metadata", current_metadata.toString(), zkutil::CreateMode::Persistent); + } + checkTableStructure(replica_path); checkParts(skip_sanity_checks); @@ -263,8 +272,13 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( { metadata_version = parse(current_zookeeper->get(replica_path + "/metadata_version")); } - else /// This replica was created on old version, so we have to take version of global node + else { + /// This replica was created with old clickhouse version, so we have + /// to take version of global node. If somebody will alter our + /// table, than we will fill /metadata_version node in zookeeper. + /// Otherwise on the next restart we can again use version from + /// shared metadata node because it was not changed. Coordination::Stat metadata_stat; current_zookeeper->get(zookeeper_path + "/metadata", &metadata_stat); metadata_version = metadata_stat.version; diff --git a/tests/integration/test_no_local_metadata_node/__init__.py b/tests/integration/test_no_local_metadata_node/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_no_local_metadata_node/test.py b/tests/integration/test_no_local_metadata_node/test.py new file mode 100644 index 00000000000..ef240cd710c --- /dev/null +++ b/tests/integration/test_no_local_metadata_node/test.py @@ -0,0 +1,54 @@ +import time +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_table_start_without_metadata(start_cluster): + node1.query(""" + CREATE TABLE test (date Date) + ENGINE = ReplicatedMergeTree('/clickhouse/table/test_table', '1') + ORDER BY tuple() + """) + + node1.query("INSERT INTO test VALUES(toDate('2019-12-01'))") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + # some fake alter + node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-10-01')") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + node1.query("DETACH TABLE test") + zk_cli = cluster.get_kazoo_client('zoo1') + + # simulate update from old version + zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata") + zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata_version") + + node1.query("ATTACH TABLE test") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" + + node1.query("ALTER TABLE test MODIFY COLUMN date Date DEFAULT toDate('2019-09-01')") + + node1.query("DETACH TABLE test") + + zk_cli.set("/clickhouse/table/test_table/replicas/1/metadata", "") + + node1.query("ATTACH TABLE test") + + assert node1.query("SELECT date FROM test") == "2019-12-01\n" From 292579cad004d9f0c73f414067a2a80b5b373b3c Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Tue, 7 Apr 2020 18:35:55 +0300 Subject: [PATCH 152/484] Docs fixes/improvements (#10090) --- docs/en/commercial/index.md | 1 + docs/en/operations/system_tables.md | 42 +++++++++++++++-------------- docs/ru/changelog/index.md | 4 --- docs/tools/convert_toc.py | 6 ++--- docs/zh/changelog/index.md | 4 --- website/js/base.js | 10 +++++++ website/js/docs.js | 36 +++++++++++++++++++++---- website/robots.txt | 11 +++----- website/sitemap.xml | 9 ++++--- website/sitemap_static.xml | 4 +-- website/templates/common_js.html | 3 +-- 11 files changed, 80 insertions(+), 50 deletions(-) mode change 100644 => 120000 docs/ru/changelog/index.md mode change 100644 => 120000 docs/zh/changelog/index.md diff --git a/docs/en/commercial/index.md b/docs/en/commercial/index.md index 3824a634726..3e0a0ac236a 100644 --- a/docs/en/commercial/index.md +++ b/docs/en/commercial/index.md @@ -1,4 +1,5 @@ --- +toc_title: Commercial toc_folder_title: Commercial toc_priority: 70 --- diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 34fc37f6415..4ef7eda5a94 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -839,32 +839,31 @@ If this query doesn’t return anything, it means that everything is fine. ## system.settings {#system-tables-system-settings} -Contains information about session settings for current user. +Contains information about session settings for current user. Columns: -- `name` ([String](../data_types/string.md)) — Setting name. -- `value` ([String](../data_types/string.md)) — Setting value. -- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. -- `description` ([String](../data_types/string.md)) — Short setting description. -- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Minimum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../query_language/syntax.md#null-literal). -- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Maximum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../query_language/syntax.md#null-literal). -- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: - - `0` — Current user can change the setting. - - `1` — Current user can't change the setting. - +- `name` ([String](../sql_reference/data_types/string.md)) — Setting name. +- `value` ([String](../sql_reference/data_types/string.md)) — Setting value. +- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. +- `description` ([String](../sql_reference/data_types/string.md)) — Short setting description. +- `min` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — Minimum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — Maximum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: + - `0` — Current user can change the setting. + - `1` — Current user can’t change the setting. **Example** The following example shows how to get information about settings which name contains `min_i`. -```sql +``` sql SELECT * FROM system.settings WHERE name LIKE '%min_i%' ``` -```text +``` text ┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ │ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ │ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ @@ -874,20 +873,23 @@ WHERE name LIKE '%min_i%' Using of `WHERE changed` can be useful, for example, when you want to check: -- Whether settings in configuration files are loaded correctly and are in use. -- Settings that changed in the current session. +- Whether settings in configuration files are loaded correctly and are in use. +- Settings that changed in the current session. -```sql + + +``` sql SELECT * FROM system.settings WHERE changed AND name='load_balancing' ``` **See also** -- [Settings](settings/index.md#settings) -- [Permissions for Queries](settings/permissions_for_queries.md#settings_readonly) -- [Constraints on Settings](settings/constraints_on_settings.md) +- [Settings](settings/index.md#settings) +- [Permissions for Queries](settings/permissions_for_queries.md#settings_readonly) +- [Constraints on Settings](settings/constraints_on_settings.md) + +## system.table\_engines {#system.table_engines} -## system.table_engines ``` text ┌─name───────────────────┬─value───────┐ │ max_threads │ 8 │ diff --git a/docs/ru/changelog/index.md b/docs/ru/changelog/index.md deleted file mode 100644 index 1a89e03c333..00000000000 --- a/docs/ru/changelog/index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -en_copy: true ---- - -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/ru/changelog/index.md b/docs/ru/changelog/index.md new file mode 120000 index 00000000000..79b747aee1b --- /dev/null +++ b/docs/ru/changelog/index.md @@ -0,0 +1 @@ +../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/tools/convert_toc.py b/docs/tools/convert_toc.py index 9b4c71d2532..18178e3be72 100755 --- a/docs/tools/convert_toc.py +++ b/docs/tools/convert_toc.py @@ -125,9 +125,9 @@ def sync_translation(): lang_meta, lang_content = util.read_md_file(lang_src) en_meta.update(lang_meta) - for src, dst in redirects.items(): - lang_content = lang_content.replace('(' + src, '(' + dst) - lang_content = lang_content.replace('../' + src, '../' + dst) + for src_link, dst_link in redirects.items(): + lang_content = lang_content.replace('(' + src_link, '(' + dst) + lang_content = lang_content.replace('../' + src_link, '../' + dst) util.write_md_file(lang_dst, en_meta, lang_content) subprocess.check_call(f'git add {lang_dst}', shell=True) diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md deleted file mode 100644 index 1a89e03c333..00000000000 --- a/docs/zh/changelog/index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -en_copy: true ---- - -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md new file mode 120000 index 00000000000..79b747aee1b --- /dev/null +++ b/docs/zh/changelog/index.md @@ -0,0 +1 @@ +../../../CHANGELOG.md \ No newline at end of file diff --git a/website/js/base.js b/website/js/base.js index d8fc8eaf639..2c43e435f48 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -38,6 +38,16 @@ window.history.replaceState('', document.title, dst); } }); + + var top_nav = $('#top-nav.sticky-top'); + if (window.location.hash.length > 1 && top_nav.length) { + var offset = $(window.location.hash).offset().top - top_nav.height() * 1.5; + $('html, body').animate({ + scrollTop: offset + }, 70); + } + + (function (d, w, c) { (w[c] = w[c] || []).push(function() { try { diff --git a/website/js/docs.js b/website/js/docs.js index 084053f2c7d..364531f0521 100644 --- a/website/js/docs.js +++ b/website/js/docs.js @@ -1,8 +1,9 @@ function onResize() { var window_height = $(window).height(); - $('#sidebar, #toc.toc-right').css({ - 'height': (window_height - $('#top-nav').height()) + 'px' - }); + var window_width = $(window).width(); + var is_wide = window_width >= 768; + var docs_top_nav = $('#top-nav.bg-dark-alt'); + $('body').attr('data-offset', window_height.toString()); var sidebar = $('#sidebar'); var languages = $('#languages-dropdown') @@ -12,17 +13,33 @@ function onResize() { } else { single_page_switch.removeClass('float-right'); } - if ($(window).width() >= 768) { + if (is_wide) { sidebar.removeClass('collapse'); languages.detach().appendTo($('#languages-wrapper')); - } else { sidebar.addClass('collapse'); languages.detach().insertBefore(single_page_switch); languages.addClass('float-right'); single_page_switch.removeClass('float-right'); } + if (window_height < 800 && is_wide) { + docs_top_nav.removeClass('sticky-top'); + $('#sidebar, #toc.toc-right').css({ + 'height': window_height, + 'position': 'sticky', + 'top': 0 + }); + } else { + var top_nav_height = docs_top_nav.height(); + docs_top_nav.addClass('sticky-top'); + $('#sidebar, #toc.toc-right').css({ + 'height': (window_height - top_nav_height) + 'px', + 'position': 'fixed', + 'top': top_nav_height + 16 + }); + } } + $(document).ready(function () { onResize(); $('#sidebar .nav-link.active').parents('.collapse').each(function() { @@ -49,6 +66,15 @@ $(document).ready(function () { } }); }); + $('#sidebar').on('shown.bs.collapse', function () { + onResize(); + $('body').on('touchmove', function (e) { + e.preventDefault(); + }); + }); + $('#sidebar').on('hidden.bs.collapse', function () { + $('body').on('touchmove', function () {}); + }); var headers = $('#content h1, #content h2, #content h3, #content h4, #content h5, #content h6'); headers.mouseenter(function() { diff --git a/website/robots.txt b/website/robots.txt index 2af539491b1..f9970836f18 100644 --- a/website/robots.txt +++ b/website/robots.txt @@ -1,18 +1,15 @@ User-agent: * Disallow: /docs/en/single/ -Disallow: /docs/ru/single/ Disallow: /docs/zh/single/ +Disallow: /docs/es/single/ +Disallow: /docs/fr/single/ +Disallow: /docs/ru/single/ Disallow: /docs/ja/single/ Disallow: /docs/fa/single/ Disallow: /docs/v1* Disallow: /docs/v2* Disallow: /docs/v3* -Disallow: /docs/en/search.html -Disallow: /docs/ru/search.html -Disallow: /docs/ja/search.html -Disallow: /docs/zh/search.html -Disallow: /docs/fa/search.html Disallow: /cdn-cgi/ Allow: / Host: https://clickhouse.tech -Sitemap: https://clickhouse.tech/docs/sitemap.xml +Sitemap: https://clickhouse.tech/sitemap.xml diff --git a/website/sitemap.xml b/website/sitemap.xml index 9305d9d0454..a147404ec6f 100644 --- a/website/sitemap.xml +++ b/website/sitemap.xml @@ -3,14 +3,17 @@ https://clickhouse.tech/docs/en/sitemap.xml + + https://clickhouse.tech/docs/zh/sitemap.xml + https://clickhouse.tech/docs/es/sitemap.xml - https://clickhouse.tech/docs/ru/sitemap.xml + https://clickhouse.tech/docs/fr/sitemap.xml - https://clickhouse.tech/docs/zh/sitemap.xml + https://clickhouse.tech/docs/ru/sitemap.xml https://clickhouse.tech/docs/ja/sitemap.xml @@ -19,6 +22,6 @@ https://clickhouse.tech/docs/fa/sitemap.xml - https://clickhouse.tech/docs/sitemap_static.xml + https://clickhouse.tech/sitemap_static.xml diff --git a/website/sitemap_static.xml b/website/sitemap_static.xml index 7a08e066874..751ad4e8ce2 100644 --- a/website/sitemap_static.xml +++ b/website/sitemap_static.xml @@ -6,10 +6,10 @@ https://clickhouse.tech/benchmark.html - daily + weekly https://clickhouse.tech/benchmark_hardware.html - daily + weekly diff --git a/website/templates/common_js.html b/website/templates/common_js.html index 52f0e8dae32..b2bed146503 100644 --- a/website/templates/common_js.html +++ b/website/templates/common_js.html @@ -1,6 +1,5 @@ - From 27777c1d2b37627a7593088019a08bc51ab9ad59 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 7 Apr 2020 18:55:23 +0300 Subject: [PATCH 153/484] Better comment --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2f598630f65..58e9a50a345 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -256,7 +256,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } else { - /// In old tables this node may missing + /// In old tables this node may missing or be empty String replica_metadata; bool replica_metadata_exists = current_zookeeper->tryGet(replica_path + "/metadata", replica_metadata); if (!replica_metadata_exists || replica_metadata.empty()) From 2654f131cc19c26623045e9b7e6c2a481fa253c1 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 7 Apr 2020 20:08:46 +0300 Subject: [PATCH 154/484] Update checkDataPart.cpp --- src/Storages/MergeTree/checkDataPart.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 03728980c69..a4cfa6b78b9 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -63,6 +63,7 @@ IMergeTreeDataPart::Checksums checkDataPart( /// Real checksums based on contents of data. Must correspond to checksums.txt. If not - it means the data is broken. IMergeTreeDataPart::Checksums checksums_data; + /// This function calculates checksum for both compressed and decompressed contents of compressed file. auto checksum_compressed_file = [](const DiskPtr & disk_, const String & file_path) { auto file_buf = disk_->readFile(file_path); @@ -78,6 +79,7 @@ IMergeTreeDataPart::Checksums checkDataPart( }; }; + /// First calculate checksums for columns data if (part_type == MergeTreeDataPartType::COMPACT) { const auto & file_name = MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; @@ -99,7 +101,7 @@ IMergeTreeDataPart::Checksums checkDataPart( throw Exception("Unknown type in part " + path, ErrorCodes::UNKNOWN_PART_TYPE); } - /// Checksums from file checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. + /// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. IMergeTreeDataPart::Checksums checksums_txt; if (require_checksums || disk->exists(path + "checksums.txt")) @@ -114,11 +116,14 @@ IMergeTreeDataPart::Checksums checkDataPart( { const String & file_name = it->name(); auto checksum_it = checksums_data.files.find(file_name); + + /// Skip files that we already calculated. Also skip metadata files that are not checksummed. if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") { auto txt_checksum_it = checksum_files_txt.find(file_name); if (txt_checksum_it == checksum_files_txt.end() || txt_checksum_it->second.uncompressed_size == 0) { + /// The file is not compressed. auto file_buf = disk->readFile(it->path()); HashingReadBuffer hashing_buf(*file_buf); hashing_buf.tryIgnore(std::numeric_limits::max()); From ead277b71705f4f3b7fe257fc3bcd61b79d30909 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 7 Apr 2020 20:23:26 +0300 Subject: [PATCH 155/484] Update StorageReplicatedMergeTree.cpp --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 58e9a50a345..31456c8d1f1 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -276,7 +276,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( { /// This replica was created with old clickhouse version, so we have /// to take version of global node. If somebody will alter our - /// table, than we will fill /metadata_version node in zookeeper. + /// table, then we will fill /metadata_version node in zookeeper. /// Otherwise on the next restart we can again use version from /// shared metadata node because it was not changed. Coordination::Stat metadata_stat; From 2704d81aaefca33f2b0d0b1b790acc2bb3badb03 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 7 Apr 2020 20:38:48 +0300 Subject: [PATCH 156/484] Fix pipeline expansion in PipelineExecutor. --- src/Processors/Executors/PipelineExecutor.cpp | 20 +++++++++---------- src/Processors/Executors/PipelineExecutor.h | 14 ++++++------- src/Processors/Port.h | 4 +++- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 9108f5ac2a1..b804fe2e50b 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -96,7 +96,7 @@ bool PipelineExecutor::addEdges(UInt64 node) { const IProcessor * proc = &it->getOutputPort().getProcessor(); auto output_port_number = proc->getOutputPortNumber(&it->getOutputPort()); - add_edge(*it, proc, graph[node].backEdges, true, from_input, output_port_number, &graph[node].post_updated_input_ports); + add_edge(*it, proc, graph[node].backEdges, true, from_input, output_port_number, graph[node].post_updated_input_ports.get()); } } @@ -111,7 +111,7 @@ bool PipelineExecutor::addEdges(UInt64 node) { const IProcessor * proc = &it->getInputPort().getProcessor(); auto input_port_number = proc->getInputPortNumber(&it->getInputPort()); - add_edge(*it, proc, graph[node].directEdges, false, input_port_number, from_output, &graph[node].post_updated_output_ports); + add_edge(*it, proc, graph[node].directEdges, false, input_port_number, from_output, graph[node].post_updated_output_ports.get()); } } @@ -246,7 +246,7 @@ bool PipelineExecutor::tryAddProcessorToStackIfUpdated(Edge & edge, Queue & queu auto & node = graph[edge.to]; - std::unique_lock lock(node.status_mutex); + std::unique_lock lock(*node.status_mutex); ExecStatus status = node.status; @@ -340,22 +340,22 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue } { - for (auto & edge_id : node.post_updated_input_ports) + for (auto & edge_id : *node.post_updated_input_ports) { auto edge = static_cast(edge_id); updated_back_edges.emplace_back(edge); edge->update_info.trigger(); } - for (auto & edge_id : node.post_updated_output_ports) + for (auto & edge_id : *node.post_updated_output_ports) { auto edge = static_cast(edge_id); updated_direct_edges.emplace_back(edge); edge->update_info.trigger(); } - node.post_updated_input_ports.clear(); - node.post_updated_output_ports.clear(); + node.post_updated_input_ports->clear(); + node.post_updated_output_ports->clear(); } } @@ -402,7 +402,7 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue while (!stack.empty()) { auto item = stack.top(); - if (!prepareProcessor(item, thread_number, queue, std::unique_lock(graph[item].status_mutex))) + if (!prepareProcessor(item, thread_number, queue, std::unique_lock(*graph[item].status_mutex))) return false; stack.pop(); @@ -519,7 +519,7 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads auto prepare_processor = [&](UInt64 pid, Queue & queue) { - if (!prepareProcessor(pid, thread_num, queue, std::unique_lock(graph[pid].status_mutex))) + if (!prepareProcessor(pid, thread_num, queue, std::unique_lock(*graph[pid].status_mutex))) finish(); }; @@ -729,7 +729,7 @@ void PipelineExecutor::executeImpl(size_t num_threads) UInt64 proc = stack.top(); stack.pop(); - prepareProcessor(proc, 0, queue, std::unique_lock(graph[proc].status_mutex)); + prepareProcessor(proc, 0, queue, std::unique_lock(*graph[proc].status_mutex)); while (!queue.empty()) { diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index 673151bd5eb..c0ce9053e5a 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -104,10 +104,10 @@ private: Edges backEdges; ExecStatus status; - std::mutex status_mutex; + std::unique_ptr status_mutex; - std::vector post_updated_input_ports; - std::vector post_updated_output_ports; + std::unique_ptr post_updated_input_ports; + std::unique_ptr post_updated_output_ports; /// Last state for profiling. IProcessor::Status last_processor_status = IProcessor::Status::NeedData; @@ -124,12 +124,10 @@ private: execution_state->processor = processor; execution_state->processors_id = processor_id; execution_state->has_quota = processor->hasQuota(); - } - Node(Node && other) noexcept - : processor(other.processor), status(other.status) - , execution_state(std::move(other.execution_state)) - { + status_mutex = std::make_unique(); + post_updated_input_ports = std::make_unique(); + post_updated_output_ports = std::make_unique(); } }; diff --git a/src/Processors/Port.h b/src/Processors/Port.h index e200b8c1ecb..63fef27e81a 100644 --- a/src/Processors/Port.h +++ b/src/Processors/Port.h @@ -30,7 +30,9 @@ class Port public: struct UpdateInfo { - std::vector * update_list = nullptr; + using UpdateList = std::vector; + + UpdateList * update_list = nullptr; void * id = nullptr; UInt64 version = 0; UInt64 prev_version = 0; From 6b5b22cc5c4557ac01991a411872a089d865e3d2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 7 Apr 2020 20:53:14 +0300 Subject: [PATCH 157/484] Fix build. --- src/Processors/Executors/PipelineExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index b804fe2e50b..f2d2477991e 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -221,7 +221,7 @@ bool PipelineExecutor::expandPipeline(Stack & stack, UInt64 pid) if (addEdges(node)) { - std::lock_guard guard(graph[node].status_mutex); + std::lock_guard guard(*graph[node].status_mutex); for (; num_back_edges < graph[node].backEdges.size(); ++num_back_edges) graph[node].updated_input_ports.emplace_back(num_back_edges); From aadb05cbe31418dfd52603957489a8f7316b5290 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 7 Apr 2020 20:55:39 +0300 Subject: [PATCH 158/484] performance comparison --- docker/test/performance-comparison/compare.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 7ed2aab66bb..f89631522f4 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -148,7 +148,7 @@ function run_tests TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") # the grep is to filter out set -x output and keep only time output - { time "$script_dir/perf.py" --host=localhost --port=9001 --host=localhost --port=9002 "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue + { time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 -- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue # The test completed with zero status, so we treat stderr as warnings mv "$test_name-err.log" "$test_name-warn.log" From 6f3b70009772e72db38a539b7937a5dde1f5d4d4 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 7 Apr 2020 20:59:13 +0300 Subject: [PATCH 159/484] Added test. --- ...32_preparing_sets_race_condition.reference | 10 ++ .../01232_preparing_sets_race_condition.sh | 117 ++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 tests/queries/0_stateless/01232_preparing_sets_race_condition.reference create mode 100755 tests/queries/0_stateless/01232_preparing_sets_race_condition.sh diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference b/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference new file mode 100644 index 00000000000..2fc36ed5c97 --- /dev/null +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference @@ -0,0 +1,10 @@ +8 +8 +8 +8 +8 +8 +8 +8 +8 +8 diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh new file mode 100755 index 00000000000..5f7b76c0e99 --- /dev/null +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +set -o errexit +set -o pipefail + + +echo " + DROP TABLE if exists tableA; + DROP TABLE if exists tableB; + + create table tableA (id UInt64, col1 UInt64, colDate Date) engine = ReplacingMergeTree(colDate, id, 8192); + create table tableB (id UInt64, Aid UInt64, colDate Date) engine = ReplacingMergeTree(colDate, id, 8192); + + insert into tableA select number, number % 10, addDays(toDate('2020-01-01'), - number % 1000) from numbers(100000); + insert into tableB select number, number % 100000, addDays(toDate('2020-01-01'), number % 90) from numbers(50000000); +" | $CLICKHOUSE_CLIENT -n + +for i in {1..10}; do echo " +SELECT tableName +FROM + ( + SELECT + col1, + 'T1_notJoin1' AS tableName, + count(*) AS c + FROM tableA + GROUP BY col1 + UNION ALL + SELECT + a.col1, + 'T2_filteredAfterJoin1' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN tableA AS a ON a.id = b.Aid + WHERE b.colDate = '2020-01-01' + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T3_filteredAfterJoin2' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN + tableA AS a + ON a.id = b.Aid + WHERE b.colDate = '2020-01-02' + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T4_filteredBeforeJoin1' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-01' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T5_filteredBeforeJoin2' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-02' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + UNION ALL + SELECT + a.col1, + 'T6_filteredAfterJoin3' AS tableName, + count(*) AS c + FROM tableB AS b + INNER JOIN tableA AS a ON a.id = b.Aid + WHERE b.colDate = '2020-01-03' + GROUP BY a.col1 + UNION ALL + SELECT + col1, + 'T7_notJoin2' AS tableName, + count(*) AS c + FROM tableA + GROUP BY col1 + UNION ALL + SELECT + a.col1, + 'T8_filteredBeforeJoin3' AS tableName, + count(*) AS c + FROM tableA AS a + INNER JOIN + ( + SELECT + Aid + FROM tableB + WHERE colDate = '2020-01-03' + ) AS b ON a.id = b.Aid + GROUP BY a.col1 + ) AS a +GROUP BY tableName +ORDER BY tableName ASC; +" | $CLICKHOUSE_CLIENT -n | wc -l ; done; + +echo " + DROP TABLE tableA; + DROP TABLE tableB; +" | $CLICKHOUSE_CLIENT -n From b010ab8ce450b3a7850337d4fac79dc0812699a2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Tue, 7 Apr 2020 21:33:19 +0300 Subject: [PATCH 160/484] Enable access management in stateless tests #2. --- docker/test/stateless_with_coverage/run.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/stateless_with_coverage/run.sh b/docker/test/stateless_with_coverage/run.sh index 8848e25c47c..18258a32e69 100755 --- a/docker/test/stateless_with_coverage/run.sh +++ b/docker/test/stateless_with_coverage/run.sh @@ -42,6 +42,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ + ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \ From d8b76232fe5083c7e2f34d2912d968860d73e078 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Tue, 7 Apr 2020 21:23:25 +0300 Subject: [PATCH 161/484] Remove useless logging from ExternalLoader. --- src/Interpreters/ExternalLoader.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 41358da4965..893d9aa61f9 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -427,8 +427,6 @@ public: if (configs == new_configs) return; - LOG_TRACE(log, "Configuration of reloadable objects has changed"); - configs = new_configs; std::vector removed_names; @@ -437,7 +435,6 @@ public: auto new_config_it = new_configs->find(name); if (new_config_it == new_configs->end()) { - LOG_TRACE(log, "Reloadable object '" << name << "' is removed"); removed_names.emplace_back(name); } else @@ -448,8 +445,6 @@ public: if (!config_is_same) { /// Configuration has been changed. - LOG_TRACE(log, "Configuration has changed for reloadable " - "object '" << info.name << "'"); info.object_config = new_config; if (info.triedToLoad()) @@ -457,7 +452,7 @@ public: /// The object has been tried to load before, so it is currently in use or was in use /// and we should try to reload it with the new config. LOG_TRACE(log, "Will reload '" << name << "'" - " because its configuration has changed and" + " because its configuration has been changed and" " there were attempts to load it before"); startLoading(info, true); } @@ -473,7 +468,7 @@ public: Info & info = infos.emplace(name, Info{name, config}).first->second; if (always_load_everything) { - LOG_TRACE(log, "Will reload new object '" << name << "'" + LOG_TRACE(log, "Will load '" << name << "'" " because always_load_everything flag is set."); startLoading(info); } @@ -482,7 +477,15 @@ public: /// Remove from the map those objects which were removed from the configuration. for (const String & name : removed_names) - infos.erase(name); + { + if (auto it = infos.find(name); it != infos.end()) + { + const auto & info = it->second; + if (info.loaded() || info.isLoading()) + LOG_TRACE(log, "Unloading '" << name << "' because its configuration has been removed or detached"); + infos.erase(it); + } + } /// Maybe we have just added new objects which require to be loaded /// or maybe we have just removed object which were been loaded, From c2f5e3c4ada552a3edd6ea5b9895affa8ff55b75 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 23 Mar 2020 00:40:32 +0300 Subject: [PATCH 162/484] Improve declaration of access rights: single place in code instead of three. --- src/Access/AccessFlags.h | 483 +++++++----------- src/Access/AccessType.h | 418 ++++++--------- src/Access/ContextAccess.cpp | 12 +- src/Functions/FunctionsExternalDictionaries.h | 6 +- src/Parsers/ParserGrantQuery.cpp | 10 - 5 files changed, 345 insertions(+), 584 deletions(-) diff --git a/src/Access/AccessFlags.h b/src/Access/AccessFlags.h index 2c5307bbd1a..c8f57fcd419 100644 --- a/src/Access/AccessFlags.h +++ b/src/Access/AccessFlags.h @@ -5,12 +5,19 @@ #include #include #include +#include +#include #include #include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + /// Represents a combination of access types which can be granted globally, on databases, tables, columns, etc. /// For example "SELECT, CREATE USER" is an access type. class AccessFlags @@ -175,9 +182,10 @@ public: const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; } private: - enum Target + enum NodeType { - UNKNOWN_TARGET, + UNKNOWN = -2, + GROUP = -1, GLOBAL, DATABASE, TABLE, @@ -186,46 +194,190 @@ private: DICTIONARY, }; - static constexpr size_t NUM_TARGETS = static_cast(DICTIONARY) + 1; - struct Node; using NodePtr = std::unique_ptr; - using Nodes = std::vector; - - template - static Nodes nodes(Args&& ... args) - { - Nodes res; - ext::push_back(res, std::move(args)...); - return res; - } struct Node { - std::string_view keyword; - std::vector aliases; + const String keyword; + NodeType node_type; + AccessType type = AccessType::NONE; + Strings aliases; Flags flags; - Target target = UNKNOWN_TARGET; - Nodes children; + std::vector children; - Node(std::string_view keyword_, size_t flag_, Target target_) - : keyword(keyword_), target(target_) + Node(String keyword_, NodeType node_type_ = UNKNOWN) : keyword(std::move(keyword_)), node_type(node_type_) {} + + void setFlag(size_t flag) { flags.set(flag); } + + void addChild(NodePtr child) { - flags.set(flag_); + flags |= child->flags; + children.push_back(std::move(child)); } - - Node(std::string_view keyword_, Nodes children_) - : keyword(keyword_), children(std::move(children_)) - { - for (const auto & child : children) - flags |= child->flags; - } - - template - Node(std::string_view keyword_, NodePtr first_child, Args &&... other_children) - : Node(keyword_, nodes(std::move(first_child), std::move(other_children)...)) {} }; + static String replaceUnderscoreWithSpace(const std::string_view & str) + { + String res{str}; + boost::replace_all(res, "_", " "); + return res; + } + + static Strings splitAliases(const std::string_view & str) + { + Strings aliases; + boost::split(aliases, str, boost::is_any_of(",")); + for (auto & alias : aliases) + boost::trim(alias); + return aliases; + } + + static void makeFlagsToKeywordTreeNode( + AccessType type, + const std::string_view & name, + const std::string_view & aliases, + NodeType node_type, + const std::string_view & parent_group_name, + std::unordered_map & nodes, + std::unordered_map & owned_nodes, + size_t & next_flag) + { + NodePtr node; + auto keyword = replaceUnderscoreWithSpace(name); + auto it = owned_nodes.find(keyword); + if (it != owned_nodes.end()) + { + node = std::move(it->second); + owned_nodes.erase(it); + } + else + { + if (nodes.contains(keyword)) + throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR); + node = std::make_unique(keyword, node_type); + nodes[node->keyword] = node.get(); + } + + node->type = type; + node->node_type = node_type; + node->aliases = splitAliases(aliases); + if (node_type != GROUP) + node->setFlag(next_flag++); + + bool has_parent_group = (parent_group_name != "NONE"); + if (!has_parent_group) + { + std::string_view keyword_as_string_view = node->keyword; + owned_nodes[keyword_as_string_view] = std::move(node); + return; + } + + auto parent_keyword = replaceUnderscoreWithSpace(parent_group_name); + auto it_parent = nodes.find(parent_keyword); + if (it_parent == nodes.end()) + { + auto parent_node = std::make_unique(parent_keyword); + it_parent = nodes.emplace(parent_node->keyword, parent_node.get()).first; + assert(!owned_nodes.contains(parent_node->keyword)); + std::string_view parent_keyword_as_string_view = parent_node->keyword; + owned_nodes[parent_keyword_as_string_view] = std::move(parent_node); + } + it_parent->second->addChild(std::move(node)); + } + + void makeFlagsToKeywordTree() + { + std::unordered_map owned_nodes; + std::unordered_map nodes; + size_t next_flag = 0; + +#define MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE(name, aliases, node_type, parent_group_name) \ + makeFlagsToKeywordTreeNode(AccessType::name, #name, aliases, node_type, #parent_group_name, nodes, owned_nodes, next_flag); + + APPLY_FOR_ACCESS_TYPES(MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE) + +#undef MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE + + if (!owned_nodes.contains("NONE")) + throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR); + if (!owned_nodes.contains("ALL")) + throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR); + + flags_to_keyword_tree = std::move(owned_nodes["ALL"]); + none_node = std::move(owned_nodes["NONE"]); + owned_nodes.erase("ALL"); + owned_nodes.erase("NONE"); + + if (!owned_nodes.empty()) + { + const auto & unused_node = *(owned_nodes.begin()->second); + if (unused_node.node_type == UNKNOWN) + throw Exception("Parent group '" + unused_node.keyword + "' not found", ErrorCodes::LOGICAL_ERROR); + else + throw Exception("Access type '" + unused_node.keyword + "' should have parent group", ErrorCodes::LOGICAL_ERROR); + } + } + + void makeKeywordToFlagsMap(Node * start_node = nullptr) + { + if (!start_node) + { + makeKeywordToFlagsMap(none_node.get()); + start_node = flags_to_keyword_tree.get(); + } + + start_node->aliases.emplace_back(start_node->keyword); + for (auto & alias : start_node->aliases) + { + boost::to_upper(alias); + keyword_to_flags_map[alias] = start_node->flags; + } + + for (auto & child : start_node->children) + makeKeywordToFlagsMap(child.get()); + } + + void makeAccessTypeToFlagsMapping(Node * start_node = nullptr) + { + if (!start_node) + { + makeAccessTypeToFlagsMapping(none_node.get()); + start_node = flags_to_keyword_tree.get(); + } + + size_t index = static_cast(start_node->type); + access_type_to_flags_mapping.resize(std::max(index + 1, access_type_to_flags_mapping.size())); + access_type_to_flags_mapping[index] = start_node->flags; + + for (auto & child : start_node->children) + makeAccessTypeToFlagsMapping(child.get()); + } + + void collectAllFlags(const Node * start_node = nullptr) + { + if (!start_node) + { + start_node = flags_to_keyword_tree.get(); + all_flags = start_node->flags; + } + if (start_node->node_type != GROUP) + { + assert(static_cast(start_node->node_type) < std::size(all_flags_for_target)); + all_flags_for_target[start_node->node_type] |= start_node->flags; + } + for (const auto & child : start_node->children) + collectAllFlags(child.get()); + } + + Impl() + { + makeFlagsToKeywordTree(); + makeKeywordToFlagsMap(); + makeAccessTypeToFlagsMapping(); + collectAllFlags(); + } + static void flagsToKeywordsRec(const Flags & flags_, std::vector & keywords, const Node & start_node) { Flags matching_flags = (flags_ & start_node.flags); @@ -243,275 +395,12 @@ private: } } - static NodePtr makeFlagsToKeywordTree() - { - size_t next_flag = 0; - Nodes all; - - auto show_databases = std::make_unique("SHOW DATABASES", next_flag++, DATABASE); - auto show_tables = std::make_unique("SHOW TABLES", next_flag++, TABLE); - auto show_columns = std::make_unique("SHOW COLUMNS", next_flag++, COLUMN); - auto show_dictionaries = std::make_unique("SHOW DICTIONARIES", next_flag++, DICTIONARY); - auto show = std::make_unique("SHOW", std::move(show_databases), std::move(show_tables), std::move(show_columns), std::move(show_dictionaries)); - ext::push_back(all, std::move(show)); - - auto select = std::make_unique("SELECT", next_flag++, COLUMN); - auto insert = std::make_unique("INSERT", next_flag++, COLUMN); - ext::push_back(all, std::move(select), std::move(insert)); - - auto update = std::make_unique("UPDATE", next_flag++, COLUMN); - ext::push_back(update->aliases, "ALTER UPDATE"); - auto delet = std::make_unique("DELETE", next_flag++, TABLE); - ext::push_back(delet->aliases, "ALTER DELETE"); - - auto add_column = std::make_unique("ADD COLUMN", next_flag++, COLUMN); - add_column->aliases.push_back("ALTER ADD COLUMN"); - auto modify_column = std::make_unique("MODIFY COLUMN", next_flag++, COLUMN); - modify_column->aliases.push_back("ALTER MODIFY COLUMN"); - auto drop_column = std::make_unique("DROP COLUMN", next_flag++, COLUMN); - drop_column->aliases.push_back("ALTER DROP COLUMN"); - auto comment_column = std::make_unique("COMMENT COLUMN", next_flag++, COLUMN); - comment_column->aliases.push_back("ALTER COMMENT COLUMN"); - auto clear_column = std::make_unique("CLEAR COLUMN", next_flag++, COLUMN); - clear_column->aliases.push_back("ALTER CLEAR COLUMN"); - auto rename_column = std::make_unique("RENAME COLUMN", next_flag++, COLUMN); - rename_column->aliases.push_back("ALTER RENAME COLUMN"); - - auto alter_column = std::make_unique( - "ALTER COLUMN", - std::move(add_column), - std::move(modify_column), - std::move(drop_column), - std::move(comment_column), - std::move(clear_column), - std::move(rename_column)); - - auto alter_order_by = std::make_unique("ALTER ORDER BY", next_flag++, TABLE); - alter_order_by->aliases.push_back("MODIFY ORDER BY"); - alter_order_by->aliases.push_back("ALTER MODIFY ORDER BY"); - auto add_index = std::make_unique("ADD INDEX", next_flag++, TABLE); - add_index->aliases.push_back("ALTER ADD INDEX"); - auto drop_index = std::make_unique("DROP INDEX", next_flag++, TABLE); - drop_index->aliases.push_back("ALTER DROP INDEX"); - auto materialize_index = std::make_unique("MATERIALIZE INDEX", next_flag++, TABLE); - materialize_index->aliases.push_back("ALTER MATERIALIZE INDEX"); - auto clear_index = std::make_unique("CLEAR INDEX", next_flag++, TABLE); - clear_index->aliases.push_back("ALTER CLEAR INDEX"); - auto index = std::make_unique("INDEX", std::move(alter_order_by), std::move(add_index), std::move(drop_index), std::move(materialize_index), std::move(clear_index)); - index->aliases.push_back("ALTER INDEX"); - - auto add_constraint = std::make_unique("ADD CONSTRAINT", next_flag++, TABLE); - add_constraint->aliases.push_back("ALTER ADD CONSTRAINT"); - auto drop_constraint = std::make_unique("DROP CONSTRAINT", next_flag++, TABLE); - drop_constraint->aliases.push_back("ALTER DROP CONSTRAINT"); - auto alter_constraint = std::make_unique("CONSTRAINT", std::move(add_constraint), std::move(drop_constraint)); - alter_constraint->aliases.push_back("ALTER CONSTRAINT"); - - auto modify_ttl = std::make_unique("MODIFY TTL", next_flag++, TABLE); - modify_ttl->aliases.push_back("ALTER MODIFY TTL"); - auto materialize_ttl = std::make_unique("MATERIALIZE TTL", next_flag++, TABLE); - materialize_ttl->aliases.push_back("ALTER MATERIALIZE TTL"); - - auto modify_setting = std::make_unique("MODIFY SETTING", next_flag++, TABLE); - modify_setting->aliases.push_back("ALTER MODIFY SETTING"); - - auto move_partition = std::make_unique("MOVE PARTITION", next_flag++, TABLE); - ext::push_back(move_partition->aliases, "ALTER MOVE PARTITION", "MOVE PART", "ALTER MOVE PART"); - auto fetch_partition = std::make_unique("FETCH PARTITION", next_flag++, TABLE); - ext::push_back(fetch_partition->aliases, "ALTER FETCH PARTITION"); - auto freeze_partition = std::make_unique("FREEZE PARTITION", next_flag++, TABLE); - ext::push_back(freeze_partition->aliases, "ALTER FREEZE PARTITION"); - - auto alter_table = std::make_unique("ALTER TABLE", std::move(update), std::move(delet), std::move(alter_column), std::move(index), std::move(alter_constraint), std::move(modify_ttl), std::move(materialize_ttl), std::move(modify_setting), std::move(move_partition), std::move(fetch_partition), std::move(freeze_partition)); - - auto refresh_view = std::make_unique("REFRESH VIEW", next_flag++, VIEW); - ext::push_back(refresh_view->aliases, "ALTER LIVE VIEW REFRESH"); - auto modify_view_query = std::make_unique("MODIFY VIEW QUERY", next_flag++, VIEW); - auto alter_view = std::make_unique("ALTER VIEW", std::move(refresh_view), std::move(modify_view_query)); - - auto alter = std::make_unique("ALTER", std::move(alter_table), std::move(alter_view)); - ext::push_back(all, std::move(alter)); - - auto create_database = std::make_unique("CREATE DATABASE", next_flag++, DATABASE); - auto create_table = std::make_unique("CREATE TABLE", next_flag++, TABLE); - auto create_view = std::make_unique("CREATE VIEW", next_flag++, VIEW); - auto create_dictionary = std::make_unique("CREATE DICTIONARY", next_flag++, DICTIONARY); - auto create = std::make_unique("CREATE", std::move(create_database), std::move(create_table), std::move(create_view), std::move(create_dictionary)); - ext::push_back(all, std::move(create)); - - auto create_temporary_table = std::make_unique("CREATE TEMPORARY TABLE", next_flag++, GLOBAL); - ext::push_back(all, std::move(create_temporary_table)); - - auto drop_database = std::make_unique("DROP DATABASE", next_flag++, DATABASE); - auto drop_table = std::make_unique("DROP TABLE", next_flag++, TABLE); - auto drop_view = std::make_unique("DROP VIEW", next_flag++, VIEW); - auto drop_dictionary = std::make_unique("DROP DICTIONARY", next_flag++, DICTIONARY); - auto drop = std::make_unique("DROP", std::move(drop_database), std::move(drop_table), std::move(drop_view), std::move(drop_dictionary)); - ext::push_back(all, std::move(drop)); - - auto truncate_table = std::make_unique("TRUNCATE TABLE", next_flag++, TABLE); - auto truncate_view = std::make_unique("TRUNCATE VIEW", next_flag++, VIEW); - auto truncate = std::make_unique("TRUNCATE", std::move(truncate_table), std::move(truncate_view)); - ext::push_back(all, std::move(truncate)); - - auto optimize = std::make_unique("OPTIMIZE", next_flag++, TABLE); - optimize->aliases.push_back("OPTIMIZE TABLE"); - ext::push_back(all, std::move(optimize)); - - auto kill_query = std::make_unique("KILL QUERY", next_flag++, GLOBAL); - ext::push_back(all, std::move(kill_query)); - - auto create_user = std::make_unique("CREATE USER", next_flag++, GLOBAL); - auto alter_user = std::make_unique("ALTER USER", next_flag++, GLOBAL); - auto drop_user = std::make_unique("DROP USER", next_flag++, GLOBAL); - auto create_role = std::make_unique("CREATE ROLE", next_flag++, GLOBAL); - auto alter_role = std::make_unique("ALTER ROLE", next_flag++, GLOBAL); - auto drop_role = std::make_unique("DROP ROLE", next_flag++, GLOBAL); - auto create_policy = std::make_unique("CREATE POLICY", next_flag++, GLOBAL); - auto alter_policy = std::make_unique("ALTER POLICY", next_flag++, GLOBAL); - auto drop_policy = std::make_unique("DROP POLICY", next_flag++, GLOBAL); - auto create_quota = std::make_unique("CREATE QUOTA", next_flag++, GLOBAL); - auto alter_quota = std::make_unique("ALTER QUOTA", next_flag++, GLOBAL); - auto drop_quota = std::make_unique("DROP QUOTA", next_flag++, GLOBAL); - auto create_profile = std::make_unique("CREATE SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(create_profile->aliases, "CREATE PROFILE"); - auto alter_profile = std::make_unique("ALTER SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(alter_profile->aliases, "ALTER PROFILE"); - auto drop_profile = std::make_unique("DROP SETTINGS PROFILE", next_flag++, GLOBAL); - ext::push_back(drop_profile->aliases, "DROP PROFILE"); - auto role_admin = std::make_unique("ROLE ADMIN", next_flag++, GLOBAL); - ext::push_back(all, std::move(create_user), std::move(alter_user), std::move(drop_user), std::move(create_role), std::move(alter_role), std::move(drop_role), std::move(create_policy), std::move(alter_policy), std::move(drop_policy), std::move(create_quota), std::move(alter_quota), std::move(drop_quota), std::move(create_profile), std::move(alter_profile), std::move(drop_profile), std::move(role_admin)); - - auto shutdown = std::make_unique("SHUTDOWN", next_flag++, GLOBAL); - ext::push_back(shutdown->aliases, "SYSTEM SHUTDOWN", "SYSTEM KILL"); - auto drop_cache = std::make_unique("DROP CACHE", next_flag++, GLOBAL); - ext::push_back(drop_cache->aliases, "SYSTEM DROP CACHE", "DROP DNS CACHE", "SYSTEM DROP DNS CACHE", "DROP MARK CACHE", "SYSTEM DROP MARK CACHE", "DROP UNCOMPRESSED CACHE", "SYSTEM DROP UNCOMPRESSED CACHE", "DROP COMPILED EXPRESSION CACHE", "SYSTEM DROP COMPILED EXPRESSION CACHE"); - auto reload_config = std::make_unique("RELOAD CONFIG", next_flag++, GLOBAL); - ext::push_back(reload_config->aliases, "SYSTEM RELOAD CONFIG"); - auto reload_dictionary = std::make_unique("RELOAD DICTIONARY", next_flag++, GLOBAL); - ext::push_back(reload_dictionary->aliases, "SYSTEM RELOAD DICTIONARY", "RELOAD DICTIONARIES", "SYSTEM RELOAD DICTIONARIES", "RELOAD EMBEDDED DICTIONARIES", "SYSTEM RELOAD EMBEDDED DICTIONARIES"); - auto stop_merges = std::make_unique("STOP MERGES", next_flag++, TABLE); - ext::push_back(stop_merges->aliases, "SYSTEM STOP MERGES", "START MERGES", "SYSTEM START MERGES"); - auto stop_ttl_merges = std::make_unique("STOP TTL MERGES", next_flag++, TABLE); - ext::push_back(stop_ttl_merges->aliases, "SYSTEM STOP TTL MERGES", "START TTL MERGES", "SYSTEM START TTL MERGES"); - auto stop_fetches = std::make_unique("STOP FETCHES", next_flag++, TABLE); - ext::push_back(stop_fetches->aliases, "SYSTEM STOP FETCHES", "START FETCHES", "SYSTEM START FETCHES"); - auto stop_moves = std::make_unique("STOP MOVES", next_flag++, TABLE); - ext::push_back(stop_moves->aliases, "SYSTEM STOP MOVES", "START MOVES", "SYSTEM START MOVES"); - auto stop_distributed_sends = std::make_unique("STOP DISTRIBUTED SENDS", next_flag++, TABLE); - ext::push_back(stop_distributed_sends->aliases, "SYSTEM STOP DISTRIBUTED SENDS", "START DISTRIBUTED SENDS", "SYSTEM START DISTRIBUTED SENDS"); - auto stop_replicated_sends = std::make_unique("STOP REPLICATED SENDS", next_flag++, TABLE); - ext::push_back(stop_replicated_sends->aliases, "SYSTEM STOP REPLICATED SENDS", "START REPLICATED SENDS", "SYSTEM START REPLICATED SENDS"); - auto stop_replication_queues = std::make_unique("STOP REPLICATION QUEUES", next_flag++, TABLE); - ext::push_back(stop_replication_queues->aliases, "SYSTEM STOP REPLICATION QUEUES", "START REPLICATION QUEUES", "SYSTEM START REPLICATION QUEUES"); - auto sync_replica = std::make_unique("SYNC REPLICA", next_flag++, TABLE); - ext::push_back(sync_replica->aliases, "SYSTEM SYNC REPLICA"); - auto restart_replica = std::make_unique("RESTART REPLICA", next_flag++, TABLE); - ext::push_back(restart_replica->aliases, "SYSTEM RESTART REPLICA"); - auto flush_distributed = std::make_unique("FLUSH DISTRIBUTED", next_flag++, TABLE); - ext::push_back(flush_distributed->aliases, "SYSTEM FLUSH DISTRIBUTED"); - auto flush_logs = std::make_unique("FLUSH LOGS", next_flag++, GLOBAL); - ext::push_back(flush_logs->aliases, "SYSTEM FLUSH LOGS"); - auto system = std::make_unique("SYSTEM", std::move(shutdown), std::move(drop_cache), std::move(reload_config), std::move(reload_dictionary), std::move(stop_merges), std::move(stop_ttl_merges), std::move(stop_fetches), std::move(stop_moves), std::move(stop_distributed_sends), std::move(stop_replicated_sends), std::move(stop_replication_queues), std::move(sync_replica), std::move(restart_replica), std::move(flush_distributed), std::move(flush_logs)); - ext::push_back(all, std::move(system)); - - auto dict_get = std::make_unique("dictGet()", next_flag++, DICTIONARY); - dict_get->aliases.push_back("dictHas()"); - dict_get->aliases.push_back("dictGetHierarchy()"); - dict_get->aliases.push_back("dictIsIn()"); - ext::push_back(all, std::move(dict_get)); - - auto address_to_line = std::make_unique("addressToLine()", next_flag++, GLOBAL); - auto address_to_symbol = std::make_unique("addressToSymbol()", next_flag++, GLOBAL); - auto demangle = std::make_unique("demangle()", next_flag++, GLOBAL); - auto introspection = std::make_unique("INTROSPECTION", std::move(address_to_line), std::move(address_to_symbol), std::move(demangle)); - ext::push_back(introspection->aliases, "INTROSPECTION FUNCTIONS"); - ext::push_back(all, std::move(introspection)); - - auto file = std::make_unique("file()", next_flag++, GLOBAL); - auto url = std::make_unique("url()", next_flag++, GLOBAL); - auto input = std::make_unique("input()", next_flag++, GLOBAL); - auto values = std::make_unique("values()", next_flag++, GLOBAL); - auto numbers = std::make_unique("numbers()", next_flag++, GLOBAL); - auto zeros = std::make_unique("zeros()", next_flag++, GLOBAL); - auto merge = std::make_unique("merge()", next_flag++, DATABASE); - auto remote = std::make_unique("remote()", next_flag++, GLOBAL); - ext::push_back(remote->aliases, "remoteSecure()", "cluster()"); - auto mysql = std::make_unique("mysql()", next_flag++, GLOBAL); - auto odbc = std::make_unique("odbc()", next_flag++, GLOBAL); - auto jdbc = std::make_unique("jdbc()", next_flag++, GLOBAL); - auto hdfs = std::make_unique("hdfs()", next_flag++, GLOBAL); - auto s3 = std::make_unique("s3()", next_flag++, GLOBAL); - auto table_functions = std::make_unique("TABLE FUNCTIONS", std::move(file), std::move(url), std::move(input), std::move(values), std::move(numbers), std::move(zeros), std::move(merge), std::move(remote), std::move(mysql), std::move(odbc), std::move(jdbc), std::move(hdfs), std::move(s3)); - ext::push_back(all, std::move(table_functions)); - - auto node_all = std::make_unique("ALL", std::move(all)); - node_all->aliases.push_back("ALL PRIVILEGES"); - return node_all; - } - - void makeKeywordToFlagsMap(Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - keyword_to_flags_map["USAGE"] = {}; - keyword_to_flags_map["NONE"] = {}; - keyword_to_flags_map["NO PRIVILEGES"] = {}; - } - start_node->aliases.emplace_back(start_node->keyword); - for (auto & alias : start_node->aliases) - { - boost::to_upper(alias); - keyword_to_flags_map[alias] = start_node->flags; - } - for (auto & child : start_node->children) - makeKeywordToFlagsMap(child.get()); - } - - void makeAccessTypeToFlagsMapping() - { - access_type_to_flags_mapping.resize(MAX_ACCESS_TYPE); - for (auto access_type : ext::range_with_static_cast(0, MAX_ACCESS_TYPE)) - { - auto str = toKeyword(access_type); - auto it = keyword_to_flags_map.find(str); - if (it == keyword_to_flags_map.end()) - { - String uppercased{str}; - boost::to_upper(uppercased); - it = keyword_to_flags_map.find(uppercased); - } - access_type_to_flags_mapping[static_cast(access_type)] = it->second; - } - } - - void collectAllFlags(const Node * start_node = nullptr) - { - if (!start_node) - { - start_node = flags_to_keyword_tree.get(); - all_flags = start_node->flags; - } - if (start_node->target != UNKNOWN_TARGET) - all_flags_for_target[start_node->target] |= start_node->flags; - for (const auto & child : start_node->children) - collectAllFlags(child.get()); - } - - Impl() - { - flags_to_keyword_tree = makeFlagsToKeywordTree(); - makeKeywordToFlagsMap(); - makeAccessTypeToFlagsMapping(); - collectAllFlags(); - } - - std::unique_ptr flags_to_keyword_tree; + NodePtr flags_to_keyword_tree; + NodePtr none_node; std::unordered_map keyword_to_flags_map; std::vector access_type_to_flags_mapping; Flags all_flags; - Flags all_flags_for_target[NUM_TARGETS]; + Flags all_flags_for_target[static_cast(DICTIONARY) + 1]; }; diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index df8839f64ce..bf8526de535 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -11,135 +11,142 @@ namespace DB /// Represents an access type which can be granted on databases, tables, columns, etc. enum class AccessType { - NONE, /// no access - ALL, /// full access +/// Macro M should be defined as M(name, aliases, node_type, parent_group_name) +/// where name is identifier with underscores (instead of spaces); +/// aliases is a string containing comma-separated list; +/// node_type either specifies access type's level (GLOBAL/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS), +/// or specifies that the access type is a GROUP of other access types; +/// parent_group_name is the name of the group containing this access type (or NONE if there is no such group). +#define APPLY_FOR_ACCESS_TYPES(M) \ + M(SHOW_DATABASES, "", DATABASE, SHOW) /* allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE */\ + M(SHOW_TABLES, "", TABLE, SHOW) /* allows to execute SHOW TABLES, EXISTS , CHECK
    */\ + M(SHOW_COLUMNS, "", COLUMN, SHOW) /* allows to execute SHOW CREATE TABLE, DESCRIBE */\ + M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS */\ + M(SHOW, "", GROUP, ALL) /* allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE */\ + \ + M(SELECT, "", COLUMN, ALL) \ + M(INSERT, "", COLUMN, ALL) \ + M(UPDATE, "ALTER UPDATE", COLUMN, ALTER_TABLE) /* allows to execute ALTER UPDATE */\ + M(DELETE, "ALTER DELETE", COLUMN, ALTER_TABLE) /* allows to execute ALTER DELETE */\ + \ + M(ADD_COLUMN, "ALTER ADD COLUMN", COLUMN, ALTER_COLUMN) \ + M(MODIFY_COLUMN, "ALTER MODIFY COLUMN", COLUMN, ALTER_COLUMN) \ + M(DROP_COLUMN, "ALTER DROP COLUMN", COLUMN, ALTER_COLUMN) \ + M(COMMENT_COLUMN, "ALTER COMMENT COLUMN", COLUMN, ALTER_COLUMN) \ + M(CLEAR_COLUMN, "ALTER CLEAR COLUMN", COLUMN, ALTER_COLUMN) \ + M(RENAME_COLUMN, "ALTER RENAME COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_COLUMN, "", GROUP, ALTER_TABLE) /* allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN */\ + \ + M(ALTER_ORDER_BY, "MODIFY ORDER BY, ALTER MODIFY ORDER BY", TABLE, INDEX) \ + M(ADD_INDEX, "ALTER ADD INDEX", TABLE, INDEX) \ + M(DROP_INDEX, "ALTER DROP INDEX", TABLE, INDEX) \ + M(MATERIALIZE_INDEX, "ALTER MATERIALIZE INDEX", TABLE, INDEX) \ + M(CLEAR_INDEX, "ALTER CLEAR INDEX", TABLE, INDEX) \ + M(INDEX, "ALTER INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ + \ + M(ADD_CONSTRAINT, "ALTER ADD CONSTRAINT", TABLE, CONSTRAINT) \ + M(DROP_CONSTRAINT, "ALTER DROP CONSTRAINT", TABLE, CONSTRAINT) \ + M(CONSTRAINT, "ALTER CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ + \ + M(MODIFY_TTL, "ALTER MODIFY TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY TTL */\ + M(MATERIALIZE_TTL, "ALTER MATERIALIZE TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MATERIALIZE TTL */\ + M(MODIFY_SETTING, "ALTER MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ + M(MOVE_PARTITION, "ALTER MOVE PARTITION, MOVE PART, ALTER MOVE PART", TABLE, ALTER_TABLE) \ + M(FETCH_PARTITION, "ALTER FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(FREEZE_PARTITION, "ALTER FREEZE PARTITION", TABLE, ALTER_TABLE) \ + \ + M(ALTER_TABLE, "", GROUP, ALTER) \ + \ + M(REFRESH_VIEW, "ALTER LIVE VIEW REFRESH", VIEW, ALTER_VIEW) \ + M(MODIFY_VIEW_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER LIVE VIEW REFRESH, ALTER TABLE MODIFY QUERY */\ + \ + M(ALTER, "", GROUP, ALL) /* allows to execute ALTER {TABLE|LIVE VIEW} */\ + \ + M(CREATE_DATABASE, "", DATABASE, CREATE) /* allows to execute {CREATE|ATTACH} DATABASE */\ + M(CREATE_TABLE, "", TABLE, CREATE) /* allows to execute {CREATE|ATTACH} {TABLE|VIEW} */\ + M(CREATE_VIEW, "", VIEW, CREATE) /* allows to execute {CREATE|ATTACH} VIEW */\ + M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\ + M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables */ \ + M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \ + \ + M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\ + M(DROP_TABLE, "", TABLE, DROP) /* allows to execute {DROP|DETACH} TABLE */\ + M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views */\ + M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\ + M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\ + \ + M(TRUNCATE_VIEW, "", VIEW, TRUNCATE) \ + M(TRUNCATE_TABLE, "", TABLE, TRUNCATE) \ + M(TRUNCATE, "", GROUP, ALL) \ + M(OPTIMIZE, "OPTIMIZE TABLE", TABLE, ALL) \ + \ + M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user (anyone can kill his own queries) */\ + \ + M(CREATE_USER, "", GLOBAL, ALL) \ + M(ALTER_USER, "", GLOBAL, ALL) \ + M(DROP_USER, "", GLOBAL, ALL) \ + M(CREATE_ROLE, "", GLOBAL, ALL) \ + M(ALTER_ROLE, "", GLOBAL, ALL) \ + M(DROP_ROLE, "", GLOBAL, ALL) \ + M(ROLE_ADMIN, "", GLOBAL, ALL) /* allows to grant and revoke the roles which are not granted to the current user with admin option */\ + M(CREATE_POLICY, "CREATE ROW POLICY", GLOBAL, ALL) \ + M(ALTER_POLICY, "ALTER ROW POLICY", GLOBAL, ALL) \ + M(DROP_POLICY, "DROP ROW POLICY", GLOBAL, ALL) \ + M(CREATE_QUOTA, "", GLOBAL, ALL) \ + M(ALTER_QUOTA, "", GLOBAL, ALL) \ + M(DROP_QUOTA, "", GLOBAL, ALL) \ + M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ALL) \ + M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ALL) \ + M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ALL) \ + \ + M(SHUTDOWN, "SYSTEM SHUTDOWN, SYSTEM KILL", GLOBAL, SYSTEM) \ + M(DROP_CACHE, "SYSTEM DROP CACHE, DROP DNS CACHE, SYSTEM DROP DNS CACHE, DROP MARK CACHE, SYSTEM DROP MARK CACHE, DROP UNCOMPRESSED CACHE, SYSTEM DROP UNCOMPRESSED CACHE, DROP COMPILED EXPRESSION CACHE, SYSTEM DROP COMPILED EXPRESSION CACHE", GLOBAL, SYSTEM) \ + M(RELOAD_CONFIG, "SYSTEM RELOAD CONFIG", GLOBAL, SYSTEM) \ + M(RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARY, RELOAD DICTIONARIES, SYSTEM RELOAD DICTIONARIES, RELOAD EMBEDDED DICTIONARIES, SYSTEM RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM) \ + M(STOP_MERGES, "SYSTEM STOP MERGES, START MERGES, SYSTEM START MERGES", TABLE, SYSTEM) \ + M(STOP_TTL_MERGES, "SYSTEM STOP TTL MERGES, START TTL MERGES, SYSTEM START TTL MERGES", TABLE, SYSTEM) \ + M(STOP_FETCHES, "SYSTEM STOP FETCHES, START FETCHES, SYSTEM START FETCHES", TABLE, SYSTEM) \ + M(STOP_MOVES, "SYSTEM STOP MOVES, START MOVES, SYSTEM START MOVES", TABLE, SYSTEM) \ + M(STOP_DISTRIBUTED_SENDS, "SYSTEM STOP DISTRIBUTED SENDS, START DISTRIBUTED SENDS, SYSTEM START DISTRIBUTED SENDS", TABLE, SYSTEM) \ + M(STOP_REPLICATED_SENDS, "SYSTEM STOP REPLICATED SENDS, START REPLICATED SENDS, SYSTEM START REPLICATED SENDS", TABLE, SYSTEM) \ + M(STOP_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, START REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES", TABLE, SYSTEM) \ + M(SYNC_REPLICA, "SYSTEM SYNC REPLICA", TABLE, SYSTEM) \ + M(RESTART_REPLICA, "SYSTEM RESTART REPLICA", TABLE, SYSTEM) \ + M(FLUSH_DISTRIBUTED, "SYSTEM FLUSH DISTRIBUTED", TABLE, SYSTEM) \ + M(FLUSH_LOGS, "SYSTEM FLUSH LOGS", GLOBAL, SYSTEM) \ + M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \ + \ + M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\ + \ + M(addressToLine, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToLine() */\ + M(addressToSymbol, "", GLOBAL, INTROSPECTION) /* allows to execute function addressToSymbol() */\ + M(demangle, "", GLOBAL, INTROSPECTION) /* allows to execute function demangle() */\ + M(INTROSPECTION, "INTROSPECTION FUNCTIONS", GROUP, ALL) /* allows to execute functions addressToLine(), addressToSymbol(), demangle()*/\ + \ + M(file, "", GLOBAL, TABLE_FUNCTIONS) \ + M(url, "", GLOBAL, TABLE_FUNCTIONS) \ + M(input, "", GLOBAL, TABLE_FUNCTIONS) \ + M(values, "", GLOBAL, TABLE_FUNCTIONS) \ + M(numbers, "", GLOBAL, TABLE_FUNCTIONS) \ + M(zeros, "", GLOBAL, TABLE_FUNCTIONS) \ + M(merge, "", GLOBAL, TABLE_FUNCTIONS) \ + M(remote, "remoteSecure, cluster", GLOBAL, TABLE_FUNCTIONS) \ + M(mysql, "", GLOBAL, TABLE_FUNCTIONS) \ + M(odbc, "", GLOBAL, TABLE_FUNCTIONS) \ + M(jdbc, "", GLOBAL, TABLE_FUNCTIONS) \ + M(hdfs, "", GLOBAL, TABLE_FUNCTIONS) \ + M(s3, "", GLOBAL, TABLE_FUNCTIONS) \ + M(TABLE_FUNCTIONS, "", GROUP, ALL) \ + \ + M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \ + M(NONE, "USAGE, NO PRIVILEGES", GROUP, NONE) /* no access */ - SHOW_DATABASES, /// allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE - SHOW_TABLES, /// allows to execute SHOW TABLES, EXISTS
    , CHECK
    - SHOW_COLUMNS, /// allows to execute SHOW CREATE TABLE, DESCRIBE - SHOW_DICTIONARIES, /// allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS - SHOW, /// allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE +#define DECLARE_ACCESS_TYPE_ENUM_CONST(name, aliases, node_type, parent_group_name) \ + name, - SELECT, - INSERT, - UPDATE, /// allows to execute ALTER UPDATE - DELETE, /// allows to execute ALTER DELETE - - ADD_COLUMN, - DROP_COLUMN, - MODIFY_COLUMN, - COMMENT_COLUMN, - CLEAR_COLUMN, - RENAME_COLUMN, - ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN - - ALTER_ORDER_BY, - ADD_INDEX, - DROP_INDEX, - MATERIALIZE_INDEX, - CLEAR_INDEX, - INDEX, /// allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX - - ADD_CONSTRAINT, - DROP_CONSTRAINT, - ALTER_CONSTRAINT, /// allows to execute ALTER {ADD|DROP} CONSTRAINT - - MODIFY_TTL, /// allows to execute ALTER MODIFY TTL - MATERIALIZE_TTL, /// allows to execute ALTER MATERIALIZE TTL - MODIFY_SETTING, /// allows to execute ALTER MODIFY SETTING - - MOVE_PARTITION, - FETCH_PARTITION, - FREEZE_PARTITION, - - ALTER_TABLE, /// allows to execute ALTER TABLE ... - - REFRESH_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH - MODIFY_VIEW_QUERY, /// allows to execute ALTER TABLE MODIFY QUERY - ALTER_VIEW, /// allows to execute ALTER LIVE VIEW REFRESH, ALTER TABLE MODIFY QUERY - - ALTER, /// allows to execute ALTER {TABLE|LIVE VIEW} ... - - CREATE_DATABASE, /// allows to execute {CREATE|ATTACH} DATABASE - CREATE_TABLE, /// allows to execute {CREATE|ATTACH} TABLE - CREATE_VIEW, /// allows to execute {CREATE|ATTACH} VIEW - CREATE_DICTIONARY, /// allows to execute {CREATE|ATTACH} DICTIONARY - CREATE_TEMPORARY_TABLE, /// allows to create and manipulate temporary tables and views. - CREATE, /// allows to execute {CREATE|ATTACH} [TEMPORARY] {DATABASE|TABLE|VIEW|DICTIONARY} - - DROP_DATABASE, - DROP_TABLE, - DROP_VIEW, - DROP_DICTIONARY, - DROP, /// allows to execute DROP {DATABASE|TABLE|VIEW|DICTIONARY} - - TRUNCATE_TABLE, - TRUNCATE_VIEW, - TRUNCATE, /// allows to execute TRUNCATE {TABLE|VIEW} - - OPTIMIZE, /// allows to execute OPTIMIZE TABLE - - KILL_QUERY, /// allows to kill a query started by another user (anyone can kill his own queries) - - CREATE_USER, - ALTER_USER, - DROP_USER, - CREATE_ROLE, - ALTER_ROLE, - DROP_ROLE, - CREATE_POLICY, - ALTER_POLICY, - DROP_POLICY, - CREATE_QUOTA, - ALTER_QUOTA, - DROP_QUOTA, - CREATE_SETTINGS_PROFILE, - ALTER_SETTINGS_PROFILE, - DROP_SETTINGS_PROFILE, - - ROLE_ADMIN, /// allows to grant and revoke any roles. - - SHUTDOWN, - DROP_CACHE, - RELOAD_CONFIG, - RELOAD_DICTIONARY, - STOP_MERGES, - STOP_TTL_MERGES, - STOP_FETCHES, - STOP_MOVES, - STOP_DISTRIBUTED_SENDS, - STOP_REPLICATED_SENDS, - STOP_REPLICATION_QUEUES, - SYNC_REPLICA, - RESTART_REPLICA, - FLUSH_DISTRIBUTED, - FLUSH_LOGS, - SYSTEM, /// allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} - - dictGet, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictHas, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictGetHierarchy, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - dictIsIn, /// allows to execute functions dictGet, dictHas, dictGetHierarchy, dictIsIn - - addressToLine, /// allows to execute function addressToLine - addressToSymbol, /// allows to execute function addressToSymbol - demangle, /// allows to execute function demangle - INTROSPECTION, /// allows to execute functions addressToLine, addressToSymbol, demangle - - file, - url, - input, - values, - numbers, - zeros, - merge, - remote, - mysql, - odbc, - jdbc, - hdfs, - s3, - TABLE_FUNCTIONS, /// allows to execute any table function + APPLY_FOR_ACCESS_TYPES(DECLARE_ACCESS_TYPE_ENUM_CONST) +#undef DECLARE_ACCESS_TYPE_ENUM_CONST }; constexpr size_t MAX_ACCESS_TYPE = static_cast(AccessType::TABLE_FUNCTIONS) + 1; @@ -165,153 +172,26 @@ namespace impl } private: - void addToMapping(AccessType type, const std::string_view & str) + AccessTypeToKeywordConverter() + { +#define INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING(name, aliases, node_type, parent_group_name) \ + insertToMapping(AccessType::name, #name); + + APPLY_FOR_ACCESS_TYPES(INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING) + +#undef INSERT_ACCESS_TYPE_KEYWORD_PAIR_TO_MAPPING + } + + void insertToMapping(AccessType type, const std::string_view & str) { String str2{str}; boost::replace_all(str2, "_", " "); - if (islower(str2[0])) - str2 += "()"; - access_type_to_keyword_mapping[static_cast(type)] = str2; + size_t index = static_cast(type); + access_type_to_keyword_mapping.resize(std::max(index + 1, access_type_to_keyword_mapping.size())); + access_type_to_keyword_mapping[index] = str2; } - AccessTypeToKeywordConverter() - { -#define ACCESS_TYPE_TO_KEYWORD_CASE(type) \ - addToMapping(AccessType::type, #type) - - ACCESS_TYPE_TO_KEYWORD_CASE(NONE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALL); - - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_DATABASES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_TABLES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_COLUMNS); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW_DICTIONARIES); - ACCESS_TYPE_TO_KEYWORD_CASE(SHOW); - - ACCESS_TYPE_TO_KEYWORD_CASE(SELECT); - ACCESS_TYPE_TO_KEYWORD_CASE(INSERT); - ACCESS_TYPE_TO_KEYWORD_CASE(UPDATE); - ACCESS_TYPE_TO_KEYWORD_CASE(DELETE); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(RENAME_COLUMN); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ORDER_BY); - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_INDEX); - ACCESS_TYPE_TO_KEYWORD_CASE(INDEX); - - ACCESS_TYPE_TO_KEYWORD_CASE(ADD_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CONSTRAINT); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_CONSTRAINT); - - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MATERIALIZE_TTL); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_SETTING); - - ACCESS_TYPE_TO_KEYWORD_CASE(MOVE_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FETCH_PARTITION); - ACCESS_TYPE_TO_KEYWORD_CASE(FREEZE_PARTITION); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_TABLE); - - ACCESS_TYPE_TO_KEYWORD_CASE(REFRESH_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_VIEW_QUERY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_VIEW); - - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_TEMPORARY_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DATABASE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP); - - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_TABLE); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE_VIEW); - ACCESS_TYPE_TO_KEYWORD_CASE(TRUNCATE); - - ACCESS_TYPE_TO_KEYWORD_CASE(OPTIMIZE); - - ACCESS_TYPE_TO_KEYWORD_CASE(KILL_QUERY); - - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_USER); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_ROLE); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_POLICY); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_QUOTA); - ACCESS_TYPE_TO_KEYWORD_CASE(CREATE_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_SETTINGS_PROFILE); - ACCESS_TYPE_TO_KEYWORD_CASE(ROLE_ADMIN); - - ACCESS_TYPE_TO_KEYWORD_CASE(SHUTDOWN); - ACCESS_TYPE_TO_KEYWORD_CASE(DROP_CACHE); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_CONFIG); - ACCESS_TYPE_TO_KEYWORD_CASE(RELOAD_DICTIONARY); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_TTL_MERGES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_FETCHES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_MOVES); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_DISTRIBUTED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATED_SENDS); - ACCESS_TYPE_TO_KEYWORD_CASE(STOP_REPLICATION_QUEUES); - ACCESS_TYPE_TO_KEYWORD_CASE(SYNC_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(RESTART_REPLICA); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_DISTRIBUTED); - ACCESS_TYPE_TO_KEYWORD_CASE(FLUSH_LOGS); - ACCESS_TYPE_TO_KEYWORD_CASE(SYSTEM); - - ACCESS_TYPE_TO_KEYWORD_CASE(dictGet); - ACCESS_TYPE_TO_KEYWORD_CASE(dictHas); - ACCESS_TYPE_TO_KEYWORD_CASE(dictGetHierarchy); - ACCESS_TYPE_TO_KEYWORD_CASE(dictIsIn); - - ACCESS_TYPE_TO_KEYWORD_CASE(addressToLine); - ACCESS_TYPE_TO_KEYWORD_CASE(addressToSymbol); - ACCESS_TYPE_TO_KEYWORD_CASE(demangle); - ACCESS_TYPE_TO_KEYWORD_CASE(INTROSPECTION); - - ACCESS_TYPE_TO_KEYWORD_CASE(file); - ACCESS_TYPE_TO_KEYWORD_CASE(url); - ACCESS_TYPE_TO_KEYWORD_CASE(input); - ACCESS_TYPE_TO_KEYWORD_CASE(values); - ACCESS_TYPE_TO_KEYWORD_CASE(numbers); - ACCESS_TYPE_TO_KEYWORD_CASE(zeros); - ACCESS_TYPE_TO_KEYWORD_CASE(merge); - ACCESS_TYPE_TO_KEYWORD_CASE(remote); - ACCESS_TYPE_TO_KEYWORD_CASE(mysql); - ACCESS_TYPE_TO_KEYWORD_CASE(odbc); - ACCESS_TYPE_TO_KEYWORD_CASE(jdbc); - ACCESS_TYPE_TO_KEYWORD_CASE(hdfs); - ACCESS_TYPE_TO_KEYWORD_CASE(s3); - ACCESS_TYPE_TO_KEYWORD_CASE(TABLE_FUNCTIONS); - -#undef ACCESS_TYPE_TO_KEYWORD_CASE - } - - std::array access_type_to_keyword_mapping; + Strings access_type_to_keyword_mapping; }; } diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 4c690956358..0d0c23632f8 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -404,14 +404,16 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool static const AccessFlags table_ddl = AccessType::CREATE_DATABASE | AccessType::CREATE_TABLE | AccessType::CREATE_VIEW | AccessType::ALTER_TABLE | AccessType::ALTER_VIEW | AccessType::DROP_DATABASE | AccessType::DROP_TABLE | AccessType::DROP_VIEW | AccessType::TRUNCATE; + static const AccessFlags dictionary_ddl = AccessType::CREATE_DICTIONARY | AccessType::DROP_DICTIONARY; static const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl; static const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE; - static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::CREATE_ROLE | AccessType::CREATE_POLICY - | AccessType::CREATE_QUOTA | AccessType::CREATE_SETTINGS_PROFILE | AccessType::ALTER_USER | AccessType::ALTER_ROLE - | AccessType::ALTER_POLICY | AccessType::ALTER_QUOTA | AccessType::ALTER_SETTINGS_PROFILE | AccessType::DROP_USER - | AccessType::DROP_ROLE | AccessType::DROP_POLICY | AccessType::DROP_QUOTA | AccessType::DROP_SETTINGS_PROFILE - | AccessType::ROLE_ADMIN; + + static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::ALTER_USER | AccessType::DROP_USER + | AccessType::CREATE_ROLE | AccessType::ALTER_ROLE | AccessType::DROP_ROLE | AccessType::ROLE_ADMIN + | AccessType::CREATE_POLICY | AccessType::ALTER_POLICY | AccessType::DROP_POLICY + | AccessType::CREATE_SETTINGS_PROFILE | AccessType::ALTER_SETTINGS_PROFILE | AccessType::DROP_SETTINGS_PROFILE + | AccessType::CREATE_QUOTA | AccessType::ALTER_QUOTA | AccessType::DROP_QUOTA; if (readonly_) merged_access->revoke(write_table_access | all_dcl | table_and_dictionary_ddl | AccessType::SYSTEM | AccessType::KILL_QUERY); diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index e1c89dd7d53..fc3c2c583a9 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -128,7 +128,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictHas, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatchSimple(block, arguments, result, dict_ptr) && !executeDispatchSimple(block, arguments, result, dict_ptr) && @@ -1652,7 +1652,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictGetHierarchy, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatch(block, arguments, result, dict_ptr) && !executeDispatch(block, arguments, result, dict_ptr) && @@ -1816,7 +1816,7 @@ private: auto dict = dictionaries_loader.getDictionary(dict_name_col->getValue()); const auto dict_ptr = dict.get(); - context.checkAccess(AccessType::dictIsIn, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); + context.checkAccess(AccessType::dictGet, dict_ptr->getDatabaseOrNoDatabaseTag(), dict_ptr->getName()); if (!executeDispatch(block, arguments, result, dict_ptr) && !executeDispatch(block, arguments, result, dict_ptr) diff --git a/src/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp index f8533c27d88..aaf3dca6d78 100644 --- a/src/Parsers/ParserGrantQuery.cpp +++ b/src/Parsers/ParserGrantQuery.cpp @@ -17,15 +17,6 @@ namespace ErrorCodes namespace { - bool parseRoundBrackets(IParser::Pos & pos, Expected & expected) - { - return IParserBase::wrapParseImpl(pos, [&] - { - return ParserToken{TokenType::OpeningRoundBracket}.ignore(pos, expected) - && ParserToken{TokenType::ClosingRoundBracket}.ignore(pos, expected); - }); - } - bool parseAccessFlags(IParser::Pos & pos, Expected & expected, AccessFlags & access_flags) { static constexpr auto is_one_of_access_type_words = [](IParser::Pos & pos_) @@ -63,7 +54,6 @@ namespace return false; } - parseRoundBrackets(pos, expected); return true; }); } From e5d8f0525175d6e7189ee9c01dbfe353b90c8586 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 2 Apr 2020 21:31:59 +0300 Subject: [PATCH 163/484] Rename sql command "CREATE POLICY" -> "CREATE ROW POLICY", "CREATE POLICY" is now an alias. --- src/Access/AccessType.h | 6 ++--- src/Access/ContextAccess.cpp | 2 +- .../InterpreterCreateRowPolicyQuery.cpp | 2 +- .../InterpreterDropAccessEntityQuery.cpp | 2 +- src/Parsers/ASTCreateRowPolicyQuery.cpp | 6 ++--- .../test_disk_access_storage/test.py | 2 +- tests/integration/test_row_policy/test.py | 22 +++++++++---------- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index bf8526de535..1d073a4d5db 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -90,9 +90,9 @@ enum class AccessType M(ALTER_ROLE, "", GLOBAL, ALL) \ M(DROP_ROLE, "", GLOBAL, ALL) \ M(ROLE_ADMIN, "", GLOBAL, ALL) /* allows to grant and revoke the roles which are not granted to the current user with admin option */\ - M(CREATE_POLICY, "CREATE ROW POLICY", GLOBAL, ALL) \ - M(ALTER_POLICY, "ALTER ROW POLICY", GLOBAL, ALL) \ - M(DROP_POLICY, "DROP ROW POLICY", GLOBAL, ALL) \ + M(CREATE_ROW_POLICY, "CREATE POLICY", GLOBAL, ALL) \ + M(ALTER_ROW_POLICY, "ALTER POLICY", GLOBAL, ALL) \ + M(DROP_ROW_POLICY, "DROP POLICY", GLOBAL, ALL) \ M(CREATE_QUOTA, "", GLOBAL, ALL) \ M(ALTER_QUOTA, "", GLOBAL, ALL) \ M(DROP_QUOTA, "", GLOBAL, ALL) \ diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 0d0c23632f8..812dc822e63 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -411,7 +411,7 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::ALTER_USER | AccessType::DROP_USER | AccessType::CREATE_ROLE | AccessType::ALTER_ROLE | AccessType::DROP_ROLE | AccessType::ROLE_ADMIN - | AccessType::CREATE_POLICY | AccessType::ALTER_POLICY | AccessType::DROP_POLICY + | AccessType::CREATE_ROW_POLICY | AccessType::ALTER_ROW_POLICY | AccessType::DROP_ROW_POLICY | AccessType::CREATE_SETTINGS_PROFILE | AccessType::ALTER_SETTINGS_PROFILE | AccessType::DROP_SETTINGS_PROFILE | AccessType::CREATE_QUOTA | AccessType::ALTER_QUOTA | AccessType::DROP_QUOTA; diff --git a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp index 9ea47aba7bb..9f8cad51140 100644 --- a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp +++ b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp @@ -65,7 +65,7 @@ BlockIO InterpreterCreateRowPolicyQuery::execute() { const auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); - context.checkAccess(query.alter ? AccessType::ALTER_POLICY : AccessType::CREATE_POLICY); + context.checkAccess(query.alter ? AccessType::ALTER_ROW_POLICY : AccessType::CREATE_ROW_POLICY); std::optional roles_from_query; if (query.roles) diff --git a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp index 12f33250188..191fa233097 100644 --- a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp @@ -37,7 +37,7 @@ namespace case Kind::USER: return AccessType::DROP_USER; case Kind::ROLE: return AccessType::DROP_ROLE; case Kind::QUOTA: return AccessType::DROP_QUOTA; - case Kind::ROW_POLICY: return AccessType::DROP_POLICY; + case Kind::ROW_POLICY: return AccessType::DROP_ROW_POLICY; case Kind::SETTINGS_PROFILE: return AccessType::DROP_SETTINGS_PROFILE; } __builtin_unreachable(); diff --git a/src/Parsers/ASTCreateRowPolicyQuery.cpp b/src/Parsers/ASTCreateRowPolicyQuery.cpp index ac3d859e66f..9102ec1da72 100644 --- a/src/Parsers/ASTCreateRowPolicyQuery.cpp +++ b/src/Parsers/ASTCreateRowPolicyQuery.cpp @@ -122,7 +122,7 @@ namespace String ASTCreateRowPolicyQuery::getID(char) const { - return "CREATE POLICY or ALTER POLICY query"; + return "CREATE ROW POLICY or ALTER ROW POLICY query"; } @@ -136,11 +136,11 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format { if (attach) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << "ATTACH POLICY"; + settings.ostr << (settings.hilite ? hilite_keyword : "") << "ATTACH ROW POLICY"; } else { - settings.ostr << (settings.hilite ? hilite_keyword : "") << (alter ? "ALTER POLICY" : "CREATE POLICY") + settings.ostr << (settings.hilite ? hilite_keyword : "") << (alter ? "ALTER ROW POLICY" : "CREATE ROW POLICY") << (settings.hilite ? hilite_none : ""); } diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index d5e1f283167..74c133314ea 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -40,7 +40,7 @@ def test_create(): def check(): assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE rx\n" - assert instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" + assert instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" assert instance.query("SHOW CREATE QUOTA q") == "CREATE QUOTA q KEYED BY \\'none\\' FOR INTERVAL 1 HOUR MAX QUERIES = 100 TO ALL EXCEPT rx\n" assert instance.query("SHOW GRANTS FOR u1") == "" assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index 6db24f5799e..7087e6aafae 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -228,22 +228,22 @@ def test_dcl_introspection(): assert instance.query("SHOW POLICIES") == "another ON mydb.filtered_table1\nanother ON mydb.filtered_table2\nanother ON mydb.filtered_table3\nanother ON mydb.local\ndefault ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\ndefault ON mydb.local\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.local") == "CREATE POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.local") == "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" copy_policy_xml('all_rows.xml') assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" copy_policy_xml('no_rows.xml') assert instance.query("SHOW POLICIES CURRENT") == "default ON mydb.filtered_table1\ndefault ON mydb.filtered_table2\ndefault ON mydb.filtered_table3\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" - assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" + assert instance.query("SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" copy_policy_xml('no_filters.xml') assert instance.query("SHOW POLICIES") == "" @@ -268,7 +268,7 @@ def test_dcl_management(): instance.query("ALTER POLICY pA ON mydb.filtered_table1 RENAME TO pB") assert instance.query("SELECT * FROM mydb.filtered_table1") == "1\t0\n" assert instance.query("SHOW POLICIES CURRENT ON mydb.filtered_table1") == "pB\n" - assert instance.query("SHOW CREATE POLICY pB ON mydb.filtered_table1") == "CREATE POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n" + assert instance.query("SHOW CREATE POLICY pB ON mydb.filtered_table1") == "CREATE ROW POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n" instance.query("DROP POLICY pB ON mydb.filtered_table1") assert instance.query("SELECT * FROM mydb.filtered_table1") == "0\t0\n0\t1\n1\t0\n1\t1\n" From f53b4ad3a8132a16715301d7b86e83ac406f6099 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 2 Apr 2020 21:56:56 +0300 Subject: [PATCH 164/484] Replace access types "TRUNCATE_VIEW" and "TRUNCATE_TABLE" with "TRUNCATE". --- src/Access/AccessRights.cpp | 6 +----- src/Access/AccessType.h | 4 +--- src/Interpreters/InterpreterDropQuery.cpp | 4 ++-- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 6f94cfac286..26af20c8a85 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -49,8 +49,7 @@ namespace const AccessFlags create_temporary_table_flag = AccessType::CREATE_TEMPORARY_TABLE; const AccessFlags alter_table_flag = AccessType::ALTER_TABLE; const AccessFlags alter_view_flag = AccessType::ALTER_VIEW; - const AccessFlags truncate_table_flag = AccessType::TRUNCATE_TABLE; - const AccessFlags truncate_view_flag = AccessType::TRUNCATE_VIEW; + const AccessFlags truncate_flag = AccessType::TRUNCATE; const AccessFlags drop_table_flag = AccessType::DROP_TABLE; const AccessFlags drop_view_flag = AccessType::DROP_VIEW; }; @@ -426,9 +425,6 @@ private: if (access & helper.alter_table_flag) implicit_access |= helper.alter_view_flag; - - if (access & helper.truncate_table_flag) - implicit_access |= helper.truncate_view_flag; } final_access = access | implicit_access; diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 1d073a4d5db..7c8d18d53f6 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -76,9 +76,7 @@ enum class AccessType M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\ M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\ \ - M(TRUNCATE_VIEW, "", VIEW, TRUNCATE) \ - M(TRUNCATE_TABLE, "", TABLE, TRUNCATE) \ - M(TRUNCATE, "", GROUP, ALL) \ + M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \ M(OPTIMIZE, "OPTIMIZE TABLE", TABLE, ALL) \ \ M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user (anyone can kill his own queries) */\ diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 42d9528abd5..e3f5d467f38 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -99,7 +99,7 @@ BlockIO InterpreterDropQuery::executeToTable( } else if (kind == ASTDropQuery::Kind::Truncate) { - context.checkAccess(table->isView() ? AccessType::TRUNCATE_VIEW : AccessType::TRUNCATE_TABLE, table_id); + context.checkAccess(AccessType::TRUNCATE, table_id); table->checkTableCanBeDropped(); /// If table was already dropped by anyone, an exception will be thrown @@ -316,7 +316,7 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co if (drop.kind == ASTDropQuery::Kind::Drop) required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); else if (drop.kind == ASTDropQuery::Kind::Truncate) - required_access.emplace_back(AccessType::TRUNCATE_TABLE | AccessType::TRUNCATE_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::TRUNCATE, drop.database, drop.table); else if (drop.kind == ASTDropQuery::Kind::Detach) required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); } From b4d7ef390c42464815b37681b5fb269b8a6ed6c5 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 3 Apr 2020 14:54:50 +0300 Subject: [PATCH 165/484] Rename some access types: add ALTER and SYSTEM prefixes. --- src/Access/AccessRights.cpp | 17 ++- src/Access/AccessType.h | 116 ++++++++++-------- src/Interpreters/InterpreterAlterQuery.cpp | 50 ++++---- .../InterpreterKillQueryQuery.cpp | 2 +- src/Interpreters/InterpreterSystemQuery.cpp | 88 ++++++------- .../01073_grant_and_revoke.reference | 4 +- 6 files changed, 154 insertions(+), 123 deletions(-) diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 26af20c8a85..9c3b5e36ec8 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -52,6 +52,10 @@ namespace const AccessFlags truncate_flag = AccessType::TRUNCATE; const AccessFlags drop_table_flag = AccessType::DROP_TABLE; const AccessFlags drop_view_flag = AccessType::DROP_VIEW; + const AccessFlags alter_ttl_flag = AccessType::ALTER_TTL; + const AccessFlags alter_materialize_ttl_flag = AccessType::ALTER_MATERIALIZE_TTL; + const AccessFlags system_reload_dictionary = AccessType::SYSTEM_RELOAD_DICTIONARY; + const AccessFlags system_reload_embedded_dictionaries = AccessType::SYSTEM_RELOAD_EMBEDDED_DICTIONARIES; }; std::string_view checkCurrentDatabase(const std::string_view & current_database) @@ -412,8 +416,14 @@ private: implicit_access |= helper.show_tables_flag; } - if ((level == GLOBAL_LEVEL) && ((access | max_access_among_children) & helper.create_table_flag)) - implicit_access |= helper.create_temporary_table_flag; + if (level == GLOBAL_LEVEL) + { + if ((access | max_access_among_children) & helper.create_table_flag) + implicit_access |= helper.create_temporary_table_flag; + + if (access & helper.system_reload_dictionary) + implicit_access |= helper.system_reload_embedded_dictionaries; + } if (level <= TABLE_LEVEL) { @@ -425,6 +435,9 @@ private: if (access & helper.alter_table_flag) implicit_access |= helper.alter_view_flag; + + if (access & helper.alter_ttl_flag) + implicit_access |= helper.alter_materialize_ttl_flag; } final_access = access | implicit_access; diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 7c8d18d53f6..9aa9e29e026 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -18,68 +18,78 @@ enum class AccessType /// or specifies that the access type is a GROUP of other access types; /// parent_group_name is the name of the group containing this access type (or NONE if there is no such group). #define APPLY_FOR_ACCESS_TYPES(M) \ - M(SHOW_DATABASES, "", DATABASE, SHOW) /* allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE */\ - M(SHOW_TABLES, "", TABLE, SHOW) /* allows to execute SHOW TABLES, EXISTS
    , CHECK
    */\ - M(SHOW_COLUMNS, "", COLUMN, SHOW) /* allows to execute SHOW CREATE TABLE, DESCRIBE */\ - M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS */\ + M(SHOW_DATABASES, "", DATABASE, SHOW) /* allows to execute SHOW DATABASES, SHOW CREATE DATABASE, USE ; + implicitly enabled by any grant on the database */\ + M(SHOW_TABLES, "", TABLE, SHOW) /* allows to execute SHOW TABLES, EXISTS
    , CHECK
    ; + implicitly enabled by any grant on the table */\ + M(SHOW_COLUMNS, "", COLUMN, SHOW) /* allows to execute SHOW CREATE TABLE, DESCRIBE; + implicitly enabled with any grant on the column */\ + M(SHOW_DICTIONARIES, "", DICTIONARY, SHOW) /* allows to execute SHOW DICTIONARIES, SHOW CREATE DICTIONARY, EXISTS ; + implicitly enabled by any grant on the dictionary */\ M(SHOW, "", GROUP, ALL) /* allows to execute SHOW, USE, EXISTS, CHECK, DESCRIBE */\ \ M(SELECT, "", COLUMN, ALL) \ M(INSERT, "", COLUMN, ALL) \ - M(UPDATE, "ALTER UPDATE", COLUMN, ALTER_TABLE) /* allows to execute ALTER UPDATE */\ - M(DELETE, "ALTER DELETE", COLUMN, ALTER_TABLE) /* allows to execute ALTER DELETE */\ + M(ALTER_UPDATE, "UPDATE", COLUMN, ALTER_TABLE) /* allows to execute ALTER UPDATE */\ + M(ALTER_DELETE, "DELETE", COLUMN, ALTER_TABLE) /* allows to execute ALTER DELETE */\ \ - M(ADD_COLUMN, "ALTER ADD COLUMN", COLUMN, ALTER_COLUMN) \ - M(MODIFY_COLUMN, "ALTER MODIFY COLUMN", COLUMN, ALTER_COLUMN) \ - M(DROP_COLUMN, "ALTER DROP COLUMN", COLUMN, ALTER_COLUMN) \ - M(COMMENT_COLUMN, "ALTER COMMENT COLUMN", COLUMN, ALTER_COLUMN) \ - M(CLEAR_COLUMN, "ALTER CLEAR COLUMN", COLUMN, ALTER_COLUMN) \ - M(RENAME_COLUMN, "ALTER RENAME COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_ADD_COLUMN, "ADD COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_MODIFY_COLUMN, "MODIFY COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_DROP_COLUMN, "DROP COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_COMMENT_COLUMN, "COMMENT COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_CLEAR_COLUMN, "CLEAR COLUMN", COLUMN, ALTER_COLUMN) \ + M(ALTER_RENAME_COLUMN, "RENAME COLUMN", COLUMN, ALTER_COLUMN) \ M(ALTER_COLUMN, "", GROUP, ALTER_TABLE) /* allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN */\ \ - M(ALTER_ORDER_BY, "MODIFY ORDER BY, ALTER MODIFY ORDER BY", TABLE, INDEX) \ - M(ADD_INDEX, "ALTER ADD INDEX", TABLE, INDEX) \ - M(DROP_INDEX, "ALTER DROP INDEX", TABLE, INDEX) \ - M(MATERIALIZE_INDEX, "ALTER MATERIALIZE INDEX", TABLE, INDEX) \ - M(CLEAR_INDEX, "ALTER CLEAR INDEX", TABLE, INDEX) \ - M(INDEX, "ALTER INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ + M(ALTER_ORDER_BY, "ALTER MODIFY ORDER BY, MODIFY ORDER BY", TABLE, ALTER_INDEX) \ + M(ALTER_ADD_INDEX, "ADD INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_DROP_INDEX, "DROP INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_MATERIALIZE_INDEX, "MATERIALIZE INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_CLEAR_INDEX, "CLEAR INDEX", TABLE, ALTER_INDEX) \ + M(ALTER_INDEX, "INDEX", GROUP, ALTER_TABLE) /* allows to execute ALTER ORDER BY or ALTER {ADD|DROP...} INDEX */\ \ - M(ADD_CONSTRAINT, "ALTER ADD CONSTRAINT", TABLE, CONSTRAINT) \ - M(DROP_CONSTRAINT, "ALTER DROP CONSTRAINT", TABLE, CONSTRAINT) \ - M(CONSTRAINT, "ALTER CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ + M(ALTER_ADD_CONSTRAINT, "ADD CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ + M(ALTER_DROP_CONSTRAINT, "DROP CONSTRAINT", TABLE, ALTER_CONSTRAINT) \ + M(ALTER_CONSTRAINT, "CONSTRAINT", GROUP, ALTER_TABLE) /* allows to execute ALTER {ADD|DROP} CONSTRAINT */\ \ - M(MODIFY_TTL, "ALTER MODIFY TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY TTL */\ - M(MATERIALIZE_TTL, "ALTER MATERIALIZE TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MATERIALIZE TTL */\ - M(MODIFY_SETTING, "ALTER MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ - M(MOVE_PARTITION, "ALTER MOVE PARTITION, MOVE PART, ALTER MOVE PART", TABLE, ALTER_TABLE) \ - M(FETCH_PARTITION, "ALTER FETCH PARTITION", TABLE, ALTER_TABLE) \ - M(FREEZE_PARTITION, "ALTER FREEZE PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_TTL, "ALTER MODIFY TTL, MODIFY TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY TTL */\ + M(ALTER_MATERIALIZE_TTL, "MATERIALIZE TTL", TABLE, ALTER_TABLE) /* allows to execute ALTER MATERIALIZE TTL; + enabled implicitly by the grant ALTER_TABLE */\ + M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\ + M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \ + M(ALTER_FETCH_PARTITION, "FETCH PARTITION", TABLE, ALTER_TABLE) \ + M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION", TABLE, ALTER_TABLE) \ \ M(ALTER_TABLE, "", GROUP, ALTER) \ \ - M(REFRESH_VIEW, "ALTER LIVE VIEW REFRESH", VIEW, ALTER_VIEW) \ - M(MODIFY_VIEW_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ - M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER LIVE VIEW REFRESH, ALTER TABLE MODIFY QUERY */\ + M(ALTER_VIEW_REFRESH, "ALTER LIVE VIEW REFRESH, REFRESH VIEW", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW_MODIFY_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY; + implicitly enabled by the grant ALTER_TABLE */\ \ M(ALTER, "", GROUP, ALL) /* allows to execute ALTER {TABLE|LIVE VIEW} */\ \ M(CREATE_DATABASE, "", DATABASE, CREATE) /* allows to execute {CREATE|ATTACH} DATABASE */\ M(CREATE_TABLE, "", TABLE, CREATE) /* allows to execute {CREATE|ATTACH} {TABLE|VIEW} */\ - M(CREATE_VIEW, "", VIEW, CREATE) /* allows to execute {CREATE|ATTACH} VIEW */\ + M(CREATE_VIEW, "", VIEW, CREATE) /* allows to execute {CREATE|ATTACH} VIEW; + implicitly enabled by the grant CREATE_TABLE */\ M(CREATE_DICTIONARY, "", DICTIONARY, CREATE) /* allows to execute {CREATE|ATTACH} DICTIONARY */\ - M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables */ \ + M(CREATE_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables; + implicitly enabled by the grant CREATE_TABLE on any table */ \ M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \ \ M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\ M(DROP_TABLE, "", TABLE, DROP) /* allows to execute {DROP|DETACH} TABLE */\ - M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views */\ + M(DROP_VIEW, "", VIEW, DROP) /* allows to execute {DROP|DETACH} TABLE for views; + implicitly enabled by the grant DROP_TABLE */\ M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\ M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\ \ M(TRUNCATE, "TRUNCATE TABLE", TABLE, ALL) \ M(OPTIMIZE, "OPTIMIZE TABLE", TABLE, ALL) \ \ - M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user (anyone can kill his own queries) */\ + M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user + (anyone can kill his own queries) */\ \ M(CREATE_USER, "", GLOBAL, ALL) \ M(ALTER_USER, "", GLOBAL, ALL) \ @@ -98,21 +108,29 @@ enum class AccessType M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ALL) \ M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ALL) \ \ - M(SHUTDOWN, "SYSTEM SHUTDOWN, SYSTEM KILL", GLOBAL, SYSTEM) \ - M(DROP_CACHE, "SYSTEM DROP CACHE, DROP DNS CACHE, SYSTEM DROP DNS CACHE, DROP MARK CACHE, SYSTEM DROP MARK CACHE, DROP UNCOMPRESSED CACHE, SYSTEM DROP UNCOMPRESSED CACHE, DROP COMPILED EXPRESSION CACHE, SYSTEM DROP COMPILED EXPRESSION CACHE", GLOBAL, SYSTEM) \ - M(RELOAD_CONFIG, "SYSTEM RELOAD CONFIG", GLOBAL, SYSTEM) \ - M(RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARY, RELOAD DICTIONARIES, SYSTEM RELOAD DICTIONARIES, RELOAD EMBEDDED DICTIONARIES, SYSTEM RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM) \ - M(STOP_MERGES, "SYSTEM STOP MERGES, START MERGES, SYSTEM START MERGES", TABLE, SYSTEM) \ - M(STOP_TTL_MERGES, "SYSTEM STOP TTL MERGES, START TTL MERGES, SYSTEM START TTL MERGES", TABLE, SYSTEM) \ - M(STOP_FETCHES, "SYSTEM STOP FETCHES, START FETCHES, SYSTEM START FETCHES", TABLE, SYSTEM) \ - M(STOP_MOVES, "SYSTEM STOP MOVES, START MOVES, SYSTEM START MOVES", TABLE, SYSTEM) \ - M(STOP_DISTRIBUTED_SENDS, "SYSTEM STOP DISTRIBUTED SENDS, START DISTRIBUTED SENDS, SYSTEM START DISTRIBUTED SENDS", TABLE, SYSTEM) \ - M(STOP_REPLICATED_SENDS, "SYSTEM STOP REPLICATED SENDS, START REPLICATED SENDS, SYSTEM START REPLICATED SENDS", TABLE, SYSTEM) \ - M(STOP_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, START REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES", TABLE, SYSTEM) \ - M(SYNC_REPLICA, "SYSTEM SYNC REPLICA", TABLE, SYSTEM) \ - M(RESTART_REPLICA, "SYSTEM RESTART REPLICA", TABLE, SYSTEM) \ - M(FLUSH_DISTRIBUTED, "SYSTEM FLUSH DISTRIBUTED", TABLE, SYSTEM) \ - M(FLUSH_LOGS, "SYSTEM FLUSH LOGS", GLOBAL, SYSTEM) \ + M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \ + M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_MARK_CACHE, "SYSTEM DROP MARK, DROP MARK CACHE, DROP MARKS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_UNCOMPRESSED_CACHE, "SYSTEM DROP UNCOMPRESSED, DROP UNCOMPRESSED CACHE, DROP UNCOMPRESSED", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \ + M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \ + M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \ + M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\ + M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \ + M(SYSTEM_MERGES, "SYSTEM STOP MERGES, SYSTEM START MERGES, STOP_MERGES, START MERGES", TABLE, SYSTEM) \ + M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \ + M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \ + M(SYSTEM_MOVES, "SYSTEM STOP MOVES, SYSTEM START MOVES, STOP MOVES, START MOVES", TABLE, SYSTEM) \ + M(SYSTEM_DISTRIBUTED_SENDS, "SYSTEM STOP DISTRIBUTED SENDS, SYSTEM START DISTRIBUTED SENDS, STOP DISTRIBUTED SENDS, START DISTRIBUTED SENDS", TABLE, SYSTEM_SENDS) \ + M(SYSTEM_REPLICATED_SENDS, "SYSTEM STOP REPLICATED SENDS, SYSTEM START REPLICATED SENDS, STOP_REPLICATED_SENDS, START REPLICATED SENDS", TABLE, SYSTEM_SENDS) \ + M(SYSTEM_SENDS, "SYSTEM STOP SENDS, SYSTEM START SENDS, STOP SENDS, START SENDS", GROUP, SYSTEM) \ + M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP_REPLICATION_QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \ + M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \ + M(SYSTEM_RESTART_REPLICA, "RESTART REPLICA", TABLE, SYSTEM) \ + M(SYSTEM_FLUSH_DISTRIBUTED, "FLUSH DISTRIBUTED", TABLE, SYSTEM_FLUSH) \ + M(SYSTEM_FLUSH_LOGS, "FLUSH LOGS", GLOBAL, SYSTEM_FLUSH) \ + M(SYSTEM_FLUSH, "", GROUP, SYSTEM) \ M(SYSTEM, "", GROUP, ALL) /* allows to execute SYSTEM {SHUTDOWN|RELOAD CONFIG|...} */ \ \ M(dictGet, "dictHas, dictGetHierarchy, dictIsIn", DICTIONARY, ALL) /* allows to execute functions dictGet(), dictHas(), dictGetHierarchy(), dictIsIn() */\ diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index 7411537601f..7c6b9678325 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -149,35 +149,35 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS { case ASTAlterCommand::UPDATE: { - required_access.emplace_back(AccessType::UPDATE, database, table, column_names_from_update_assignments()); + required_access.emplace_back(AccessType::ALTER_UPDATE, database, table, column_names_from_update_assignments()); break; } case ASTAlterCommand::DELETE: { - required_access.emplace_back(AccessType::DELETE, database, table); + required_access.emplace_back(AccessType::ALTER_DELETE, database, table); break; } case ASTAlterCommand::ADD_COLUMN: { - required_access.emplace_back(AccessType::ADD_COLUMN, database, table, column_name_from_col_decl()); + required_access.emplace_back(AccessType::ALTER_ADD_COLUMN, database, table, column_name_from_col_decl()); break; } case ASTAlterCommand::DROP_COLUMN: { if (command.clear_column) - required_access.emplace_back(AccessType::CLEAR_COLUMN, database, table, column_name()); + required_access.emplace_back(AccessType::ALTER_CLEAR_COLUMN, database, table, column_name()); else - required_access.emplace_back(AccessType::DROP_COLUMN, database, table, column_name()); + required_access.emplace_back(AccessType::ALTER_DROP_COLUMN, database, table, column_name()); break; } case ASTAlterCommand::MODIFY_COLUMN: { - required_access.emplace_back(AccessType::MODIFY_COLUMN, database, table, column_name_from_col_decl()); + required_access.emplace_back(AccessType::ALTER_MODIFY_COLUMN, database, table, column_name_from_col_decl()); break; } case ASTAlterCommand::COMMENT_COLUMN: { - required_access.emplace_back(AccessType::COMMENT_COLUMN, database, table, column_name()); + required_access.emplace_back(AccessType::ALTER_COMMENT_COLUMN, database, table, column_name()); break; } case ASTAlterCommand::MODIFY_ORDER_BY: @@ -187,45 +187,45 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS } case ASTAlterCommand::ADD_INDEX: { - required_access.emplace_back(AccessType::ADD_INDEX, database, table); + required_access.emplace_back(AccessType::ALTER_ADD_INDEX, database, table); break; } case ASTAlterCommand::DROP_INDEX: { if (command.clear_index) - required_access.emplace_back(AccessType::CLEAR_INDEX, database, table); + required_access.emplace_back(AccessType::ALTER_CLEAR_INDEX, database, table); else - required_access.emplace_back(AccessType::DROP_INDEX, database, table); + required_access.emplace_back(AccessType::ALTER_DROP_INDEX, database, table); break; } case ASTAlterCommand::MATERIALIZE_INDEX: { - required_access.emplace_back(AccessType::MATERIALIZE_INDEX, database, table); + required_access.emplace_back(AccessType::ALTER_MATERIALIZE_INDEX, database, table); break; } case ASTAlterCommand::ADD_CONSTRAINT: { - required_access.emplace_back(AccessType::ADD_CONSTRAINT, database, table); + required_access.emplace_back(AccessType::ALTER_ADD_CONSTRAINT, database, table); break; } case ASTAlterCommand::DROP_CONSTRAINT: { - required_access.emplace_back(AccessType::DROP_CONSTRAINT, database, table); + required_access.emplace_back(AccessType::ALTER_DROP_CONSTRAINT, database, table); break; } case ASTAlterCommand::MODIFY_TTL: { - required_access.emplace_back(AccessType::MODIFY_TTL, database, table); + required_access.emplace_back(AccessType::ALTER_TTL, database, table); break; } case ASTAlterCommand::MATERIALIZE_TTL: { - required_access.emplace_back(AccessType::MATERIALIZE_TTL, database, table); + required_access.emplace_back(AccessType::ALTER_MATERIALIZE_TTL, database, table); break; } case ASTAlterCommand::MODIFY_SETTING: { - required_access.emplace_back(AccessType::MODIFY_SETTING, database, table); + required_access.emplace_back(AccessType::ALTER_SETTINGS, database, table); break; } case ASTAlterCommand::ATTACH_PARTITION: @@ -236,7 +236,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS case ASTAlterCommand::DROP_PARTITION: [[fallthrough]]; case ASTAlterCommand::DROP_DETACHED_PARTITION: { - required_access.emplace_back(AccessType::DELETE, database, table); + required_access.emplace_back(AccessType::ALTER_DELETE, database, table); break; } case ASTAlterCommand::MOVE_PARTITION: @@ -244,11 +244,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS if ((command.move_destination_type == PartDestinationType::DISK) || (command.move_destination_type == PartDestinationType::VOLUME)) { - required_access.emplace_back(AccessType::MOVE_PARTITION, database, table); + required_access.emplace_back(AccessType::ALTER_MOVE_PARTITION, database, table); } else if (command.move_destination_type == PartDestinationType::TABLE) { - required_access.emplace_back(AccessType::SELECT | AccessType::DELETE, database, table); + required_access.emplace_back(AccessType::SELECT | AccessType::ALTER_DELETE, database, table); required_access.emplace_back(AccessType::INSERT, command.to_database, command.to_table); } break; @@ -256,33 +256,33 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS case ASTAlterCommand::REPLACE_PARTITION: { required_access.emplace_back(AccessType::SELECT, command.from_database, command.from_table); - required_access.emplace_back(AccessType::DELETE | AccessType::INSERT, database, table); + required_access.emplace_back(AccessType::ALTER_DELETE | AccessType::INSERT, database, table); break; } case ASTAlterCommand::FETCH_PARTITION: { - required_access.emplace_back(AccessType::FETCH_PARTITION, database, table); + required_access.emplace_back(AccessType::ALTER_FETCH_PARTITION, database, table); break; } case ASTAlterCommand::FREEZE_PARTITION: [[fallthrough]]; case ASTAlterCommand::FREEZE_ALL: { - required_access.emplace_back(AccessType::FREEZE_PARTITION, database, table); + required_access.emplace_back(AccessType::ALTER_FREEZE_PARTITION, database, table); break; } case ASTAlterCommand::MODIFY_QUERY: { - required_access.emplace_back(AccessType::MODIFY_VIEW_QUERY, database, table); + required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_QUERY, database, table); break; } case ASTAlterCommand::LIVE_VIEW_REFRESH: { - required_access.emplace_back(AccessType::REFRESH_VIEW, database, table); + required_access.emplace_back(AccessType::ALTER_VIEW_REFRESH, database, table); break; } case ASTAlterCommand::RENAME_COLUMN: { - required_access.emplace_back(AccessType::RENAME_COLUMN, database, table, column_name()); + required_access.emplace_back(AccessType::ALTER_RENAME_COLUMN, database, table, column_name()); break; } case ASTAlterCommand::NO_TYPE: break; diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 196b2b4eef1..b23d88524e1 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -319,7 +319,7 @@ AccessRightsElements InterpreterKillQueryQuery::getRequiredAccessForDDLOnCluster if (query.type == ASTKillQueryQuery::Type::Query) required_access.emplace_back(AccessType::KILL_QUERY); else if (query.type == ASTKillQueryQuery::Type::Mutation) - required_access.emplace_back(AccessType::UPDATE | AccessType::DELETE | AccessType::MATERIALIZE_INDEX | AccessType::MATERIALIZE_TTL); + required_access.emplace_back(AccessType::ALTER_UPDATE | AccessType::ALTER_DELETE | AccessType::ALTER_MATERIALIZE_INDEX | AccessType::ALTER_MATERIALIZE_TTL); return required_access; } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 9a7d6ae7c5a..056959d372c 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -102,19 +102,19 @@ void executeCommandsAndThrowIfError(Callables && ... commands) AccessType getRequiredAccessType(StorageActionBlockType action_type) { if (action_type == ActionLocks::PartsMerge) - return AccessType::STOP_MERGES; + return AccessType::SYSTEM_MERGES; else if (action_type == ActionLocks::PartsFetch) - return AccessType::STOP_FETCHES; + return AccessType::SYSTEM_FETCHES; else if (action_type == ActionLocks::PartsSend) - return AccessType::STOP_REPLICATED_SENDS; + return AccessType::SYSTEM_REPLICATED_SENDS; else if (action_type == ActionLocks::ReplicationQueue) - return AccessType::STOP_REPLICATION_QUEUES; + return AccessType::SYSTEM_REPLICATION_QUEUES; else if (action_type == ActionLocks::DistributedSend) - return AccessType::STOP_DISTRIBUTED_SENDS; + return AccessType::SYSTEM_DISTRIBUTED_SENDS; else if (action_type == ActionLocks::PartsTTLMerge) - return AccessType::STOP_TTL_MERGES; + return AccessType::SYSTEM_TTL_MERGES; else if (action_type == ActionLocks::PartsMove) - return AccessType::STOP_MOVES; + return AccessType::SYSTEM_MOVES; else throw Exception("Unknown action type: " + std::to_string(action_type), ErrorCodes::LOGICAL_ERROR); } @@ -183,42 +183,42 @@ BlockIO InterpreterSystemQuery::execute() switch (query.type) { case Type::SHUTDOWN: - context.checkAccess(AccessType::SHUTDOWN); + context.checkAccess(AccessType::SYSTEM_SHUTDOWN); if (kill(0, SIGTERM)) throwFromErrno("System call kill(0, SIGTERM) failed", ErrorCodes::CANNOT_KILL); break; case Type::KILL: - context.checkAccess(AccessType::SHUTDOWN); + context.checkAccess(AccessType::SYSTEM_SHUTDOWN); if (kill(0, SIGKILL)) throwFromErrno("System call kill(0, SIGKILL) failed", ErrorCodes::CANNOT_KILL); break; case Type::DROP_DNS_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_DNS_CACHE); DNSResolver::instance().dropCache(); /// Reinitialize clusters to update their resolved_addresses system_context.reloadClusterConfig(); break; case Type::DROP_MARK_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_MARK_CACHE); system_context.dropMarkCache(); break; case Type::DROP_UNCOMPRESSED_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_UNCOMPRESSED_CACHE); system_context.dropUncompressedCache(); break; #if USE_EMBEDDED_COMPILER case Type::DROP_COMPILED_EXPRESSION_CACHE: - context.checkAccess(AccessType::DROP_CACHE); + context.checkAccess(AccessType::SYSTEM_DROP_COMPILED_EXPRESSION_CACHE); system_context.dropCompiledExpressionCache(); break; #endif case Type::RELOAD_DICTIONARY: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); system_context.getExternalDictionariesLoader().loadOrReload(query.target_dictionary); ExternalDictionariesLoader::resetAll(); break; case Type::RELOAD_DICTIONARIES: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); executeCommandsAndThrowIfError( [&] () { system_context.getExternalDictionariesLoader().reloadAllTriedToLoad(); }, [&] () { system_context.getEmbeddedDictionaries().reload(); } @@ -226,11 +226,11 @@ BlockIO InterpreterSystemQuery::execute() ExternalDictionariesLoader::resetAll(); break; case Type::RELOAD_EMBEDDED_DICTIONARIES: - context.checkAccess(AccessType::RELOAD_DICTIONARY); + context.checkAccess(AccessType::SYSTEM_RELOAD_EMBEDDED_DICTIONARIES); system_context.getEmbeddedDictionaries().reload(); break; case Type::RELOAD_CONFIG: - context.checkAccess(AccessType::RELOAD_CONFIG); + context.checkAccess(AccessType::SYSTEM_RELOAD_CONFIG); system_context.reloadConfig(); break; case Type::STOP_MERGES: @@ -290,7 +290,7 @@ BlockIO InterpreterSystemQuery::execute() ErrorCodes::BAD_ARGUMENTS); break; case Type::FLUSH_LOGS: - context.checkAccess(AccessType::FLUSH_LOGS); + context.checkAccess(AccessType::SYSTEM_FLUSH_LOGS); executeCommandsAndThrowIfError( [&] () { if (auto query_log = context.getQueryLog()) query_log->flush(); }, [&] () { if (auto part_log = context.getPartLog("")) part_log->flush(); }, @@ -313,7 +313,7 @@ BlockIO InterpreterSystemQuery::execute() StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, Context & system_context, bool need_ddl_guard) { - context.checkAccess(AccessType::RESTART_REPLICA, replica); + context.checkAccess(AccessType::SYSTEM_RESTART_REPLICA, replica); auto table_ddl_guard = need_ddl_guard ? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName()) : nullptr; auto [database, table] = DatabaseCatalog::instance().tryGetDatabaseAndTable(replica); @@ -387,7 +387,7 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context) void InterpreterSystemQuery::syncReplica(ASTSystemQuery &) { - context.checkAccess(AccessType::SYNC_REPLICA, table_id); + context.checkAccess(AccessType::SYSTEM_SYNC_REPLICA, table_id); StoragePtr table = DatabaseCatalog::instance().getTable(table_id); if (auto storage_replicated = dynamic_cast(table.get())) @@ -408,7 +408,7 @@ void InterpreterSystemQuery::syncReplica(ASTSystemQuery &) void InterpreterSystemQuery::flushDistributed(ASTSystemQuery &) { - context.checkAccess(AccessType::FLUSH_DISTRIBUTED, table_id); + context.checkAccess(AccessType::SYSTEM_FLUSH_DISTRIBUTED, table_id); if (auto storage_distributed = dynamic_cast(DatabaseCatalog::instance().getTable(table_id).get())) storage_distributed->flushClusterNodesAllData(); @@ -427,7 +427,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::SHUTDOWN: [[fallthrough]]; case Type::KILL: { - required_access.emplace_back(AccessType::SHUTDOWN); + required_access.emplace_back(AccessType::SYSTEM_SHUTDOWN); break; } case Type::DROP_DNS_CACHE: [[fallthrough]]; @@ -437,107 +437,107 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() #endif case Type::DROP_UNCOMPRESSED_CACHE: { - required_access.emplace_back(AccessType::DROP_CACHE); + required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE); break; } case Type::RELOAD_DICTIONARY: [[fallthrough]]; case Type::RELOAD_DICTIONARIES: [[fallthrough]]; case Type::RELOAD_EMBEDDED_DICTIONARIES: { - required_access.emplace_back(AccessType::RELOAD_DICTIONARY); + required_access.emplace_back(AccessType::SYSTEM_RELOAD_DICTIONARY); break; } case Type::RELOAD_CONFIG: { - required_access.emplace_back(AccessType::RELOAD_CONFIG); + required_access.emplace_back(AccessType::SYSTEM_RELOAD_CONFIG); break; } case Type::STOP_MERGES: [[fallthrough]]; case Type::START_MERGES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_MERGES); + required_access.emplace_back(AccessType::SYSTEM_MERGES); else - required_access.emplace_back(AccessType::STOP_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MERGES, query.database, query.table); break; } case Type::STOP_TTL_MERGES: [[fallthrough]]; case Type::START_TTL_MERGES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_TTL_MERGES); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES); else - required_access.emplace_back(AccessType::STOP_TTL_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.database, query.table); break; } case Type::STOP_MOVES: [[fallthrough]]; case Type::START_MOVES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_MOVES); + required_access.emplace_back(AccessType::SYSTEM_MOVES); else - required_access.emplace_back(AccessType::STOP_MOVES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MOVES, query.database, query.table); break; } case Type::STOP_FETCHES: [[fallthrough]]; case Type::START_FETCHES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_FETCHES); + required_access.emplace_back(AccessType::SYSTEM_FETCHES); else - required_access.emplace_back(AccessType::STOP_FETCHES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.database, query.table); break; } case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]]; case Type::START_DISTRIBUTED_SENDS: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_DISTRIBUTED_SENDS); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS); else - required_access.emplace_back(AccessType::STOP_DISTRIBUTED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.database, query.table); break; } case Type::STOP_REPLICATED_SENDS: [[fallthrough]]; case Type::START_REPLICATED_SENDS: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_REPLICATED_SENDS); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS); else - required_access.emplace_back(AccessType::STOP_REPLICATED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.database, query.table); break; } case Type::STOP_REPLICATION_QUEUES: [[fallthrough]]; case Type::START_REPLICATION_QUEUES: { if (query.table.empty()) - required_access.emplace_back(AccessType::STOP_REPLICATION_QUEUES); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES); else - required_access.emplace_back(AccessType::STOP_REPLICATION_QUEUES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.database, query.table); break; } case Type::SYNC_REPLICA: { - required_access.emplace_back(AccessType::SYNC_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.database, query.table); break; } case Type::RESTART_REPLICA: { - required_access.emplace_back(AccessType::RESTART_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.database, query.table); break; } case Type::RESTART_REPLICAS: { - required_access.emplace_back(AccessType::RESTART_REPLICA); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA); break; } case Type::FLUSH_DISTRIBUTED: { - required_access.emplace_back(AccessType::FLUSH_DISTRIBUTED, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.database, query.table); break; } case Type::FLUSH_LOGS: { - required_access.emplace_back(AccessType::FLUSH_LOGS); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_LOGS); break; } case Type::STOP_LISTEN_QUERIES: break; diff --git a/tests/queries/0_stateless/01073_grant_and_revoke.reference b/tests/queries/0_stateless/01073_grant_and_revoke.reference index 4aad0ca65f1..d7d97fa28fe 100644 --- a/tests/queries/0_stateless/01073_grant_and_revoke.reference +++ b/tests/queries/0_stateless/01073_grant_and_revoke.reference @@ -1,11 +1,11 @@ CREATE USER test_user_01073 A B -GRANT DELETE, INSERT ON *.* TO test_user_01073 +GRANT ALTER DELETE, INSERT ON *.* TO test_user_01073 GRANT SELECT ON db1.* TO test_user_01073 GRANT SELECT ON db2.table TO test_user_01073 GRANT SELECT(col1) ON db3.table TO test_user_01073 GRANT SELECT(col1, col2) ON db4.table TO test_user_01073 C -GRANT DELETE ON *.* TO test_user_01073 +GRANT ALTER DELETE ON *.* TO test_user_01073 GRANT SELECT(col1) ON db4.table TO test_user_01073 From 423fa5087a0608507e730a16dcb56465ad415856 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 5 Apr 2020 19:28:52 +0300 Subject: [PATCH 166/484] Add SHOW_USERS(SHOW ROLES, etc.) privileges. --- src/Access/AccessType.h | 39 +++++++++++-------- src/Access/ContextAccess.cpp | 8 +--- src/Access/UsersConfigAccessStorage.cpp | 9 ++++- ...InterpreterShowCreateAccessEntityQuery.cpp | 17 ++++++++ .../InterpreterShowCreateAccessEntityQuery.h | 2 + .../System/StorageSystemQuotaUsage.cpp | 3 ++ src/Storages/System/StorageSystemQuotas.cpp | 3 ++ .../System/StorageSystemRowPolicies.cpp | 2 + 8 files changed, 59 insertions(+), 24 deletions(-) diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index 9aa9e29e026..e242000635d 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -91,22 +91,29 @@ enum class AccessType M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user (anyone can kill his own queries) */\ \ - M(CREATE_USER, "", GLOBAL, ALL) \ - M(ALTER_USER, "", GLOBAL, ALL) \ - M(DROP_USER, "", GLOBAL, ALL) \ - M(CREATE_ROLE, "", GLOBAL, ALL) \ - M(ALTER_ROLE, "", GLOBAL, ALL) \ - M(DROP_ROLE, "", GLOBAL, ALL) \ - M(ROLE_ADMIN, "", GLOBAL, ALL) /* allows to grant and revoke the roles which are not granted to the current user with admin option */\ - M(CREATE_ROW_POLICY, "CREATE POLICY", GLOBAL, ALL) \ - M(ALTER_ROW_POLICY, "ALTER POLICY", GLOBAL, ALL) \ - M(DROP_ROW_POLICY, "DROP POLICY", GLOBAL, ALL) \ - M(CREATE_QUOTA, "", GLOBAL, ALL) \ - M(ALTER_QUOTA, "", GLOBAL, ALL) \ - M(DROP_QUOTA, "", GLOBAL, ALL) \ - M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ALL) \ - M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ALL) \ - M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ALL) \ + M(CREATE_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_USER, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_ROLE, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ROLE_ADMIN, "", GLOBAL, ACCESS_MANAGEMENT) /* allows to grant and revoke the roles which are not granted to the current user with admin option */\ + M(CREATE_ROW_POLICY, "CREATE POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_ROW_POLICY, "ALTER POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_ROW_POLICY, "DROP POLICY", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_QUOTA, "", GLOBAL, ACCESS_MANAGEMENT) \ + M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ACCESS_MANAGEMENT) \ + M(SHOW_USERS, "SHOW CREATE USER", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ROLES, "SHOW CREATE ROLE", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", GLOBAL, SHOW_ACCESS) \ + M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \ + M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \ + M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \ + M(ACCESS_MANAGEMENT, "", GROUP, ALL) \ \ M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \ M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \ diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 812dc822e63..d6c7abfd4ba 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -409,14 +409,8 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool static const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl; static const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE; - static const AccessFlags all_dcl = AccessType::CREATE_USER | AccessType::ALTER_USER | AccessType::DROP_USER - | AccessType::CREATE_ROLE | AccessType::ALTER_ROLE | AccessType::DROP_ROLE | AccessType::ROLE_ADMIN - | AccessType::CREATE_ROW_POLICY | AccessType::ALTER_ROW_POLICY | AccessType::DROP_ROW_POLICY - | AccessType::CREATE_SETTINGS_PROFILE | AccessType::ALTER_SETTINGS_PROFILE | AccessType::DROP_SETTINGS_PROFILE - | AccessType::CREATE_QUOTA | AccessType::ALTER_QUOTA | AccessType::DROP_QUOTA; - if (readonly_) - merged_access->revoke(write_table_access | all_dcl | table_and_dictionary_ddl | AccessType::SYSTEM | AccessType::KILL_QUERY); + merged_access->revoke(write_table_access | table_and_dictionary_ddl | AccessType::SYSTEM | AccessType::KILL_QUERY | AccessType::ACCESS_MANAGEMENT); if (readonly_ == 1) { diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index 13102528108..0842839dec8 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -168,7 +168,14 @@ namespace user->access.grant(AccessFlags::allDictionaryFlags(), IDictionary::NO_DATABASE_TAG, dictionary); } - user->access_with_grant_option = user->access; + user->access_with_grant_option = user->access; /// By default the user can grant everything he has. + + bool access_management = config.getBool(user_config + ".access_management", false); + if (!access_management) + { + user->access.revoke(AccessType::ACCESS_MANAGEMENT); + user->access_with_grant_option.clear(); + } return user; } diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index 52126b0507e..d2f435106a8 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -256,6 +256,7 @@ BlockInputStreamPtr InterpreterShowCreateAccessEntityQuery::executeImpl() ASTPtr InterpreterShowCreateAccessEntityQuery::getCreateQuery(const ASTShowCreateAccessEntityQuery & show_query) const { const auto & access_control = context.getAccessControlManager(); + context.checkAccess(getRequiredAccess()); if (show_query.current_user) { @@ -281,6 +282,22 @@ ASTPtr InterpreterShowCreateAccessEntityQuery::getCreateQuery(const ASTShowCreat } +AccessRightsElements InterpreterShowCreateAccessEntityQuery::getRequiredAccess() const +{ + const auto & show_query = query_ptr->as(); + AccessRightsElements res; + switch (show_query.kind) + { + case Kind::USER: res.emplace_back(AccessType::SHOW_USERS); break; + case Kind::ROLE: res.emplace_back(AccessType::SHOW_ROLES); break; + case Kind::ROW_POLICY: res.emplace_back(AccessType::SHOW_ROW_POLICIES); break; + case Kind::SETTINGS_PROFILE: res.emplace_back(AccessType::SHOW_SETTINGS_PROFILES); break; + case Kind::QUOTA: res.emplace_back(AccessType::SHOW_QUOTAS); break; + } + return res; +} + + ASTPtr InterpreterShowCreateAccessEntityQuery::getAttachQuery(const IAccessEntity & entity) { return getCreateQueryImpl(entity, nullptr, true); diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h index 92025bedb6c..0183c59766f 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.h @@ -9,6 +9,7 @@ namespace DB { class Context; class ASTShowCreateAccessEntityQuery; +class AccessRightsElements; struct IAccessEntity; @@ -30,6 +31,7 @@ public: private: BlockInputStreamPtr executeImpl(); ASTPtr getCreateQuery(const ASTShowCreateAccessEntityQuery & show_query) const; + AccessRightsElements getRequiredAccess() const; ASTPtr query_ptr; const Context & context; diff --git a/src/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp index 53afb1d563a..1f943d02446 100644 --- a/src/Storages/System/StorageSystemQuotaUsage.cpp +++ b/src/Storages/System/StorageSystemQuotaUsage.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -40,7 +41,9 @@ NamesAndTypesList StorageSystemQuotaUsage::getNamesAndTypes() void StorageSystemQuotaUsage::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_QUOTAS); const auto & access_control = context.getAccessControlManager(); + for (const auto & info : access_control.getQuotaUsageInfo()) { for (const auto & interval : info.intervals) diff --git a/src/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp index 228339ea305..a22bb11bbc3 100644 --- a/src/Storages/System/StorageSystemQuotas.cpp +++ b/src/Storages/System/StorageSystemQuotas.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -54,6 +55,8 @@ NamesAndTypesList StorageSystemQuotas::getNamesAndTypes() void StorageSystemQuotas::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_QUOTAS); + size_t i = 0; auto & name_column = *res_columns[i++]; auto & id_column = *res_columns[i++]; diff --git a/src/Storages/System/StorageSystemRowPolicies.cpp b/src/Storages/System/StorageSystemRowPolicies.cpp index bd302cba3cf..12221cc52de 100644 --- a/src/Storages/System/StorageSystemRowPolicies.cpp +++ b/src/Storages/System/StorageSystemRowPolicies.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -33,6 +34,7 @@ NamesAndTypesList StorageSystemRowPolicies::getNamesAndTypes() void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + context.checkAccess(AccessType::SHOW_ROW_POLICIES); const auto & access_control = context.getAccessControlManager(); std::vector ids = access_control.findAll(); From ccec944ff49173229ce265c49620314bdb73a6b3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 7 Apr 2020 23:21:43 +0300 Subject: [PATCH 167/484] Fix flacky test, take 3 --- .../tests/gtest_zkutil_test_multi_exception.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp b/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp index a07c1ae8983..cd4c6e0a159 100644 --- a/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp +++ b/src/Common/ZooKeeper/tests/gtest_zkutil_test_multi_exception.cpp @@ -123,14 +123,17 @@ TEST(zkutil, MultiAsync) ops.clear(); auto res = fut.get(); - ASSERT_EQ(res.error, Coordination::ZNODEEXISTS); - ASSERT_EQ(res.responses.size(), 2); + + /// The test is quite heavy. It is normal if session is expired during this test. + /// If we don't check that, the test will be flacky. + if (res.error != Coordination::ZSESSIONEXPIRED && res.error != Coordination::ZCONNECTIONLOSS) + { + ASSERT_EQ(res.error, Coordination::ZNODEEXISTS); + ASSERT_EQ(res.responses.size(), 2); + } } catch (const Coordination::Exception & e) { - /// The test is quite heavy. It is normal if session is expired during this test. - /// If we don't check that, the test will be flacky. - if (e.code != Coordination::ZSESSIONEXPIRED && e.code != Coordination::ZCONNECTIONLOSS) throw; } From b71ff6160a866eaabec5b60b5bb0774e4ced5520 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 Apr 2020 11:06:17 +0300 Subject: [PATCH 168/484] Add the "access_management" option to users.xml which enables a user to manage access rights. The option is turned off by default. --- programs/server/users.xml | 3 +++ .../configs/users.d/access_management.xml | 7 +++++++ tests/integration/test_authentication/test.py | 2 +- .../configs/users.d/access_management.xml | 7 +++++++ .../configs/users.d/access_management.xml | 7 +++++++ .../test_grant_and_revoke/configs/users.xml | 16 ---------------- .../configs/users.d/access_management.xml | 7 +++++++ tests/integration/test_quota/configs/users.xml | 1 - .../configs/users.d/access_management.xml | 7 +++++++ .../test_row_policy/configs/users.xml | 1 - .../configs/{ => config.d}/remote_servers.xml | 0 .../configs/users.d/access_management.xml | 7 +++++++ .../test.py | 6 +++--- .../configs/users.d/access_management.xml | 7 +++++++ tests/integration/test_settings_profile/test.py | 2 +- tests/queries/server.py | 2 ++ 16 files changed, 59 insertions(+), 23 deletions(-) create mode 100644 tests/integration/test_authentication/configs/users.d/access_management.xml create mode 100644 tests/integration/test_disk_access_storage/configs/users.d/access_management.xml create mode 100644 tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml delete mode 100644 tests/integration/test_grant_and_revoke/configs/users.xml create mode 100644 tests/integration/test_quota/configs/users.d/access_management.xml create mode 100644 tests/integration/test_row_policy/configs/users.d/access_management.xml rename tests/integration/test_settings_constraints_distributed/configs/{ => config.d}/remote_servers.xml (100%) create mode 100644 tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml create mode 100644 tests/integration/test_settings_profile/configs/users.d/access_management.xml diff --git a/programs/server/users.xml b/programs/server/users.xml index d631fbb0f8a..3d95269190b 100644 --- a/programs/server/users.xml +++ b/programs/server/users.xml @@ -83,6 +83,9 @@ default + + + diff --git a/tests/integration/test_authentication/configs/users.d/access_management.xml b/tests/integration/test_authentication/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_authentication/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_authentication/test.py b/tests/integration/test_authentication/test.py index 483b59813e5..b7ffd1ed35b 100644 --- a/tests/integration/test_authentication/test.py +++ b/tests/integration/test_authentication/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance('instance', config_dir="configs") @pytest.fixture(scope="module", autouse=True) diff --git a/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml b/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml b/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_grant_and_revoke/configs/users.xml b/tests/integration/test_grant_and_revoke/configs/users.xml deleted file mode 100644 index fd40c6a4003..00000000000 --- a/tests/integration/test_grant_and_revoke/configs/users.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - ::/0 - - default - - - diff --git a/tests/integration/test_quota/configs/users.d/access_management.xml b/tests/integration/test_quota/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_quota/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_quota/configs/users.xml b/tests/integration/test_quota/configs/users.xml index 15a5364449b..4412345a731 100644 --- a/tests/integration/test_quota/configs/users.xml +++ b/tests/integration/test_quota/configs/users.xml @@ -12,7 +12,6 @@ default myQuota - true diff --git a/tests/integration/test_row_policy/configs/users.d/access_management.xml b/tests/integration/test_row_policy/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_row_policy/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_row_policy/configs/users.xml b/tests/integration/test_row_policy/configs/users.xml index 313d8084884..ce29b7f7308 100644 --- a/tests/integration/test_row_policy/configs/users.xml +++ b/tests/integration/test_row_policy/configs/users.xml @@ -13,7 +13,6 @@ default default - true diff --git a/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml b/tests/integration/test_settings_constraints_distributed/configs/config.d/remote_servers.xml similarity index 100% rename from tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml rename to tests/integration/test_settings_constraints_distributed/configs/config.d/remote_servers.xml diff --git a/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml b/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py index b23b130b270..a58c037a2fc 100644 --- a/tests/integration/test_settings_constraints_distributed/test.py +++ b/tests/integration/test_settings_constraints_distributed/test.py @@ -8,9 +8,9 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') -node2 = cluster.add_instance('node2') -distributed = cluster.add_instance('distributed', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance('node1', config_dir="configs") +node2 = cluster.add_instance('node2', config_dir="configs") +distributed = cluster.add_instance('distributed', config_dir="configs") @pytest.fixture(scope="module") diff --git a/tests/integration/test_settings_profile/configs/users.d/access_management.xml b/tests/integration/test_settings_profile/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_settings_profile/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index 7f18327c66a..6866c6b3901 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance('instance', config_dir="configs") @pytest.fixture(scope="module", autouse=True) diff --git a/tests/queries/server.py b/tests/queries/server.py index 185b694619e..ea591c5b123 100644 --- a/tests/queries/server.py +++ b/tests/queries/server.py @@ -193,6 +193,8 @@ ServerThread.DEFAULT_USERS_CONFIG = \ default default + + 1 From d064ddfe135e8ad5c7da985d8039064e4089387d Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 5 Apr 2020 19:40:35 +0300 Subject: [PATCH 169/484] Disable MemoryAccessStorage. --- src/Access/AccessControlManager.cpp | 3 +++ tests/queries/server.py | 1 + tests/server-test.xml | 1 + 3 files changed, 5 insertions(+) diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index b5e06549c28..f8f15e425ed 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -23,7 +23,10 @@ namespace std::vector> list; list.emplace_back(std::make_unique()); list.emplace_back(std::make_unique()); + +#if 0 /// Memory access storage is disabled. list.emplace_back(std::make_unique()); +#endif return list; } diff --git a/tests/queries/server.py b/tests/queries/server.py index ea591c5b123..d1ffe9099b5 100644 --- a/tests/queries/server.py +++ b/tests/queries/server.py @@ -118,6 +118,7 @@ ServerThread.DEFAULT_SERVER_CONFIG = \ {tmp_dir}/data/ {tmp_dir}/tmp/ + {tmp_dir}/data/access/ users.xml 5368709120 diff --git a/tests/server-test.xml b/tests/server-test.xml index d9e547b4d55..c2356ec1ba0 100644 --- a/tests/server-test.xml +++ b/tests/server-test.xml @@ -47,6 +47,7 @@ /tmp/clickhouse/data/ /tmp/clickhouse/tmp/ users.xml + /tmp/clickhouse/data/access/ 5368709120 default default From bc3e2cb48c71b330e5464f2cfc8ccf4c89c722d8 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 5 Apr 2020 19:39:47 +0300 Subject: [PATCH 170/484] Add the option "access_control_path" to server config. --- programs/server/config.xml | 3 +++ .../test_disk_access_storage/configs/access_control_path.xml | 4 ---- tests/integration/test_disk_access_storage/test.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) delete mode 100644 tests/integration/test_disk_access_storage/configs/access_control_path.xml diff --git a/programs/server/config.xml b/programs/server/config.xml index e0d527f9538..f55ab02d903 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -128,6 +128,9 @@ /var/lib/clickhouse/user_files/ + + /var/lib/clickhouse/access/ + users.xml diff --git a/tests/integration/test_disk_access_storage/configs/access_control_path.xml b/tests/integration/test_disk_access_storage/configs/access_control_path.xml deleted file mode 100644 index 7814472ee9b..00000000000 --- a/tests/integration/test_disk_access_storage/configs/access_control_path.xml +++ /dev/null @@ -1,4 +0,0 @@ - - -/var/lib/clickhouse/access - diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index 74c133314ea..1f6577b9dd1 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir='configs', main_configs=['configs/access_control_path.xml'], stay_alive=True) +instance = cluster.add_instance('instance', config_dir='configs', stay_alive=True) @pytest.fixture(scope="module", autouse=True) From d0af31bbcfc84f6ee941d09721645e78c5180033 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 5 Apr 2020 21:44:28 +0300 Subject: [PATCH 171/484] Use ALTER USER DROP HOST instead of ALTER USER REMOVE HOST for consistency. --- src/Parsers/ASTCreateUserQuery.cpp | 2 +- src/Parsers/ASTCreateUserQuery.h | 2 +- src/Parsers/ParserCreateUserQuery.cpp | 2 +- src/Parsers/ParserCreateUserQuery.h | 2 +- src/Parsers/parseUserName.cpp | 4 ++-- tests/queries/0_stateless/01075_allowed_client_hosts.sql | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp index 0631d08ae74..c3cbc366d88 100644 --- a/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -195,7 +195,7 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState & if (add_hosts) formatHosts("ADD", *add_hosts, format); if (remove_hosts) - formatHosts("REMOVE", *remove_hosts, format); + formatHosts("DROP", *remove_hosts, format); if (default_roles) formatDefaultRoles(*default_roles, format); diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index fc2aa0121ed..643db2660af 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -19,7 +19,7 @@ class ASTSettingsProfileElements; * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|REMOVE] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 57f50c34116..acc8586fc84 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -318,7 +318,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (new_name.empty() && parseRenameTo(pos, expected, new_name, new_host_pattern)) continue; - if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "REMOVE", remove_hosts)) + if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "DROP", remove_hosts)) continue; } diff --git a/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h index bd6ab74d53f..4b2af34c003 100644 --- a/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -14,7 +14,7 @@ namespace DB * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|REMOVE] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ class ParserCreateUserQuery : public IParserBase diff --git a/src/Parsers/parseUserName.cpp b/src/Parsers/parseUserName.cpp index 3993935e386..9558f26cfc4 100644 --- a/src/Parsers/parseUserName.cpp +++ b/src/Parsers/parseUserName.cpp @@ -15,7 +15,7 @@ namespace boost::algorithm::trim(name); - String pattern = "@"; + String pattern = "%"; if (ParserToken{TokenType::At}.ignore(pos, expected)) { @@ -25,7 +25,7 @@ namespace boost::algorithm::trim(pattern); } - if (pattern != "@") + if (pattern != "%") name += '@' + pattern; user_name = std::move(name); diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.sql b/tests/queries/0_stateless/01075_allowed_client_hosts.sql index 77a16a9f62a..e0b1c0f9905 100644 --- a/tests/queries/0_stateless/01075_allowed_client_hosts.sql +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.sql @@ -21,10 +21,10 @@ SHOW CREATE USER test_user_01075; ALTER USER test_user_01075 ADD HOST IP '127.0.0.1'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 REMOVE HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +ALTER USER test_user_01075 DROP HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 REMOVE HOST NAME 'localhost'; +ALTER USER test_user_01075 DROP HOST NAME 'localhost'; SHOW CREATE USER test_user_01075; ALTER USER test_user_01075 HOST LIKE '@.somesite.com'; @@ -47,7 +47,7 @@ DROP USER test_user_01075; CREATE USER test_user_01075_x@localhost; SHOW CREATE USER test_user_01075_x@localhost; -ALTER USER test_user_01075_x@localhost RENAME TO test_user_01075_x@'@'; +ALTER USER test_user_01075_x@localhost RENAME TO test_user_01075_x@'%'; SHOW CREATE USER test_user_01075_x; ALTER USER test_user_01075_x RENAME TO test_user_01075_x@'192.168.23.15'; From b77e0a5b4e026d547c83c6ac44b6d54b09ff1bde Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 5 Apr 2020 22:18:30 +0300 Subject: [PATCH 172/484] Avoid writing "HOST ANY" if the host is any by default. --- src/Access/AllowedClientHosts.h | 6 ++-- src/Parsers/ParserCreateUserQuery.cpp | 14 ++++---- src/Parsers/parseUserName.cpp | 51 +++++++++++---------------- src/Parsers/parseUserName.h | 2 +- 4 files changed, 32 insertions(+), 41 deletions(-) diff --git a/src/Access/AllowedClientHosts.h b/src/Access/AllowedClientHosts.h index 9e89c2b92a1..2baafb2e04a 100644 --- a/src/Access/AllowedClientHosts.h +++ b/src/Access/AllowedClientHosts.h @@ -91,7 +91,7 @@ public: /// Allows IP addresses or host names using LIKE pattern. /// This pattern can contain % and _ wildcard characters. - /// For example, addLikePattern("@") will allow all addresses. + /// For example, addLikePattern("%") will allow all addresses. void addLikePattern(const String & pattern); void removeLikePattern(const String & like_pattern); const std::vector & getLikePatterns() const { return like_patterns; } @@ -298,7 +298,7 @@ inline void AllowedClientHosts::addLikePattern(const String & pattern) { if (boost::iequals(pattern, "localhost") || (pattern == "127.0.0.1") || (pattern == "::1")) local_host = true; - else if ((pattern == "@") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) + else if ((pattern == "%") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) any_host = true; else if (boost::range::find(like_patterns, pattern) == name_regexps.end()) like_patterns.push_back(pattern); @@ -308,7 +308,7 @@ inline void AllowedClientHosts::removeLikePattern(const String & pattern) { if (boost::iequals(pattern, "localhost") || (pattern == "127.0.0.1") || (pattern == "::1")) local_host = false; - else if ((pattern == "@") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) + else if ((pattern == "%") || (pattern == "0.0.0.0/0") || (pattern == "::/0")) any_host = false; else boost::range::remove_erase(like_patterns, pattern); diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index acc8586fc84..7564c02da45 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes namespace { - bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, String & new_host_pattern) + bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, std::optional & new_host_pattern) { return IParserBase::wrapParseImpl(pos, [&] { @@ -286,12 +286,12 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec } String name; - String host_pattern; + std::optional host_pattern; if (!parseUserName(pos, expected, name, host_pattern)) return false; String new_name; - String new_host_pattern; + std::optional new_host_pattern; std::optional authentication; std::optional hosts; std::optional add_hosts; @@ -327,10 +327,10 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!hosts) { - if (!alter) - hosts.emplace().addLikePattern(host_pattern); - else if (alter && !new_name.empty()) - hosts.emplace().addLikePattern(new_host_pattern); + if (!alter && host_pattern) + hosts.emplace().addLikePattern(*host_pattern); + else if (alter && new_host_pattern) + hosts.emplace().addLikePattern(*new_host_pattern); } auto query = std::make_shared(); diff --git a/src/Parsers/parseUserName.cpp b/src/Parsers/parseUserName.cpp index 9558f26cfc4..e6b91ba4af3 100644 --- a/src/Parsers/parseUserName.cpp +++ b/src/Parsers/parseUserName.cpp @@ -3,48 +3,39 @@ #include #include + namespace DB { -namespace +bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, std::optional & host_like_pattern) { - bool parseUserNameImpl(IParser::Pos & pos, Expected & expected, String & user_name, String * host_like_pattern) + String name; + if (!parseIdentifierOrStringLiteral(pos, expected, name)) + return false; + + boost::algorithm::trim(name); + + std::optional pattern; + if (ParserToken{TokenType::At}.ignore(pos, expected)) { - String name; - if (!parseIdentifierOrStringLiteral(pos, expected, name)) + if (!parseIdentifierOrStringLiteral(pos, expected, pattern.emplace())) return false; - boost::algorithm::trim(name); - - String pattern = "%"; - - if (ParserToken{TokenType::At}.ignore(pos, expected)) - { - if (!parseIdentifierOrStringLiteral(pos, expected, pattern)) - return false; - - boost::algorithm::trim(pattern); - } - - if (pattern != "%") - name += '@' + pattern; - - user_name = std::move(name); - if (host_like_pattern) - *host_like_pattern = std::move(pattern); - return true; + boost::algorithm::trim(*pattern); } + + if (pattern && (pattern != "%")) + name += '@' + *pattern; + + user_name = std::move(name); + host_like_pattern = std::move(pattern); + return true; } bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name) { - return parseUserNameImpl(pos, expected, user_name, nullptr); -} - - -bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, String & host_like_pattern) -{ - return parseUserNameImpl(pos, expected, user_name, &host_like_pattern); + std::optional unused_pattern; + return parseUserName(pos, expected, user_name, unused_pattern); } diff --git a/src/Parsers/parseUserName.h b/src/Parsers/parseUserName.h index c3556f4dc59..641aa09d1f3 100644 --- a/src/Parsers/parseUserName.h +++ b/src/Parsers/parseUserName.h @@ -10,7 +10,7 @@ namespace DB /// The `host` can be an ip address, ip subnet, or a host name. /// The % and _ wildcard characters are permitted in `host`. /// These have the same meaning as for pattern-matching operations performed with the LIKE operator. -bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, String & host_like_pattern); +bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name, std::optional & host_like_pattern); bool parseUserName(IParser::Pos & pos, Expected & expected, String & user_name); /// Parses either a user name or the 'CURRENT_USER' keyword (or some of the aliases). From 42b8ed3ec64d7077422afb898db174edf6c191b0 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 Apr 2020 02:03:20 +0300 Subject: [PATCH 173/484] Implement "ON CLUSTER" clause for access control SQL. --- src/Access/ExtendedRoleSet.cpp | 14 +++---- src/Access/ExtendedRoleSet.h | 6 +-- src/Interpreters/Context.cpp | 6 +-- src/Interpreters/Context.h | 2 +- src/Interpreters/DDLWorker.cpp | 5 +++ src/Interpreters/DDLWorker.h | 1 + .../InterpreterCreateQuotaQuery.cpp | 9 +++- .../InterpreterCreateRoleQuery.cpp | 4 ++ .../InterpreterCreateRowPolicyQuery.cpp | 9 +++- .../InterpreterCreateSettingsProfileQuery.cpp | 10 ++++- .../InterpreterCreateUserQuery.cpp | 8 +++- .../InterpreterDropAccessEntityQuery.cpp | 4 ++ src/Interpreters/InterpreterGrantQuery.cpp | 10 ++++- src/Parsers/ASTCreateQuotaQuery.cpp | 10 +++++ src/Parsers/ASTCreateQuotaQuery.h | 5 ++- src/Parsers/ASTCreateRoleQuery.cpp | 2 + src/Parsers/ASTCreateRoleQuery.h | 4 +- src/Parsers/ASTCreateRowPolicyQuery.cpp | 9 ++++ src/Parsers/ASTCreateRowPolicyQuery.h | 5 ++- src/Parsers/ASTCreateSettingsProfileQuery.cpp | 8 ++++ src/Parsers/ASTCreateSettingsProfileQuery.h | 5 ++- src/Parsers/ASTCreateUserQuery.cpp | 2 + src/Parsers/ASTCreateUserQuery.h | 4 +- src/Parsers/ASTDropAccessEntityQuery.cpp | 2 + src/Parsers/ASTDropAccessEntityQuery.h | 4 +- src/Parsers/ASTExtendedRoleSet.cpp | 17 ++++++++ src/Parsers/ASTExtendedRoleSet.h | 1 + src/Parsers/ASTGrantQuery.cpp | 16 ++++++-- src/Parsers/ASTGrantQuery.h | 5 ++- src/Parsers/ParserCreateQuotaQuery.cpp | 8 ++++ src/Parsers/ParserCreateRoleQuery.cpp | 8 ++++ src/Parsers/ParserCreateRowPolicyQuery.cpp | 8 ++++ .../ParserCreateSettingsProfileQuery.cpp | 8 ++++ src/Parsers/ParserCreateUserQuery.cpp | 8 ++++ src/Parsers/ParserDropAccessEntityQuery.cpp | 8 ++++ src/Parsers/ParserGrantQuery.cpp | 8 ++++ .../__init__.py | 0 .../configs/config.d/clusters.xml | 22 ++++++++++ .../configs/users.d/access_management.xml | 7 ++++ .../test_access_control_on_cluster/test.py | 41 +++++++++++++++++++ 40 files changed, 282 insertions(+), 31 deletions(-) create mode 100644 tests/integration/test_access_control_on_cluster/__init__.py create mode 100644 tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml create mode 100644 tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml create mode 100644 tests/integration/test_access_control_on_cluster/test.py diff --git a/src/Access/ExtendedRoleSet.cpp b/src/Access/ExtendedRoleSet.cpp index b59dc7ac232..eed475bc3cc 100644 --- a/src/Access/ExtendedRoleSet.cpp +++ b/src/Access/ExtendedRoleSet.cpp @@ -51,25 +51,25 @@ ExtendedRoleSet::ExtendedRoleSet(const boost::container::flat_set & ids_) ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast) { - init(ast, nullptr, nullptr); + init(ast, nullptr); } -ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const UUID & current_user_id) +ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const std::optional & current_user_id) { - init(ast, nullptr, ¤t_user_id); + init(ast, nullptr, current_user_id); } ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager) { - init(ast, &manager, nullptr); + init(ast, &manager); } -ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id) +ExtendedRoleSet::ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id) { - init(ast, &manager, ¤t_user_id); + init(ast, &manager, current_user_id); } -void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager, const UUID * current_user_id) +void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager, const std::optional & current_user_id) { all = ast.all; diff --git a/src/Access/ExtendedRoleSet.h b/src/Access/ExtendedRoleSet.h index 61a4db6e0ae..486b4277337 100644 --- a/src/Access/ExtendedRoleSet.h +++ b/src/Access/ExtendedRoleSet.h @@ -32,9 +32,9 @@ struct ExtendedRoleSet /// The constructor from AST requires the AccessControlManager if `ast.id_mode == false`. ExtendedRoleSet(const ASTExtendedRoleSet & ast); - ExtendedRoleSet(const ASTExtendedRoleSet & ast, const UUID & current_user_id); + ExtendedRoleSet(const ASTExtendedRoleSet & ast, const std::optional & current_user_id); ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager); - ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const UUID & current_user_id); + ExtendedRoleSet(const ASTExtendedRoleSet & ast, const AccessControlManager & manager, const std::optional & current_user_id); std::shared_ptr toAST() const; String toString() const; @@ -69,7 +69,7 @@ struct ExtendedRoleSet boost::container::flat_set except_ids; private: - void init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager = nullptr, const UUID * current_user_id = nullptr); + void init(const ASTExtendedRoleSet & ast, const AccessControlManager * manager = nullptr, const std::optional & current_user_id = {}); }; } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 4dc72948f8a..5e98a0267ca 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -665,12 +665,10 @@ String Context::getUserName() const return access->getUserName(); } -UUID Context::getUserID() const +std::optional Context::getUserID() const { auto lock = getLock(); - if (!user_id) - throw Exception("No current user", ErrorCodes::LOGICAL_ERROR); - return *user_id; + return user_id; } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index e5b33e43614..b34a0e0c542 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -233,7 +233,7 @@ public: UserPtr getUser() const; String getUserName() const; - UUID getUserID() const; + std::optional getUserID() const; void setCurrentRoles(const std::vector & current_roles_); void setCurrentRolesDefault(); diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index eaee356264d..4a39cc6b8a1 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -1377,4 +1377,9 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & cont } +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context) +{ + return executeDDLQueryOnCluster(query_ptr_, context, {}); +} + } diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 32b7cd5f172..62eba97032e 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -24,6 +24,7 @@ struct DDLTask; /// Pushes distributed DDL query to the queue BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, AccessRightsElements && query_required_access); +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context); class DDLWorker diff --git a/src/Interpreters/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/InterpreterCreateQuotaQuery.cpp index 4b64615dd36..13e772965ff 100644 --- a/src/Interpreters/InterpreterCreateQuotaQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuotaQuery.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -76,10 +77,16 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, BlockIO InterpreterCreateQuotaQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); context.checkAccess(query.alter ? AccessType::ALTER_QUOTA : AccessType::CREATE_QUOTA); + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::optional roles_from_query; if (query.roles) roles_from_query = ExtendedRoleSet{*query.roles, access_control, context.getUserID()}; diff --git a/src/Interpreters/InterpreterCreateRoleQuery.cpp b/src/Interpreters/InterpreterCreateRoleQuery.cpp index f64462d443b..ed9135b2bb6 100644 --- a/src/Interpreters/InterpreterCreateRoleQuery.cpp +++ b/src/Interpreters/InterpreterCreateRoleQuery.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,9 @@ BlockIO InterpreterCreateRoleQuery::execute() else context.checkAccess(AccessType::CREATE_ROLE); + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp index 9f8cad51140..c3de3876c46 100644 --- a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp +++ b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -63,10 +64,16 @@ namespace BlockIO InterpreterCreateRowPolicyQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); context.checkAccess(query.alter ? AccessType::ALTER_ROW_POLICY : AccessType::CREATE_ROW_POLICY); + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::optional roles_from_query; if (query.roles) roles_from_query = ExtendedRoleSet{*query.roles, access_control, context.getUserID()}; diff --git a/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp index 9d110a69516..cb0b5587bdc 100644 --- a/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp +++ b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -49,13 +51,19 @@ namespace BlockIO InterpreterCreateSettingsProfileQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); if (query.alter) context.checkAccess(AccessType::ALTER_SETTINGS_PROFILE); else context.checkAccess(AccessType::CREATE_SETTINGS_PROFILE); + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/src/Interpreters/InterpreterCreateUserQuery.cpp b/src/Interpreters/InterpreterCreateUserQuery.cpp index 5dba1fefc9c..78c7cc222ae 100644 --- a/src/Interpreters/InterpreterCreateUserQuery.cpp +++ b/src/Interpreters/InterpreterCreateUserQuery.cpp @@ -1,10 +1,11 @@ #include #include #include +#include #include +#include #include #include -#include #include #include @@ -67,7 +68,7 @@ namespace BlockIO InterpreterCreateUserQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); auto access = context.getAccess(); access->checkAccess(query.alter ? AccessType::ALTER_USER : AccessType::CREATE_USER); @@ -83,6 +84,9 @@ BlockIO InterpreterCreateUserQuery::execute() } } + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + std::optional settings_from_query; if (query.settings) settings_from_query = SettingsProfileElements{*query.settings, access_control}; diff --git a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp index 191fa233097..e67e0659796 100644 --- a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,9 @@ BlockIO InterpreterDropAccessEntityQuery::execute() std::type_index type = getType(query.kind); context.checkAccess(getRequiredAccessType(query.kind)); + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context); + if (query.kind == Kind::ROW_POLICY) { Strings full_names; diff --git a/src/Interpreters/InterpreterGrantQuery.cpp b/src/Interpreters/InterpreterGrantQuery.cpp index 5d215ff3a93..a5f13dbbbfe 100644 --- a/src/Interpreters/InterpreterGrantQuery.cpp +++ b/src/Interpreters/InterpreterGrantQuery.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -59,7 +61,7 @@ namespace BlockIO InterpreterGrantQuery::execute() { - const auto & query = query_ptr->as(); + auto & query = query_ptr->as(); auto & access_control = context.getAccessControlManager(); auto access = context.getAccess(); access->checkGrantOption(query.access_rights_elements); @@ -72,6 +74,12 @@ BlockIO InterpreterGrantQuery::execute() access->checkAdminOption(role_from_query); } + if (!query.cluster.empty()) + { + query.replaceCurrentUserTagWithName(context.getUserName()); + return executeDDLQueryOnCluster(query_ptr, context); + } + std::vector to_roles = ExtendedRoleSet{*query.to_roles, access_control, context.getUserID()}.getMatchingIDs(access_control); String current_database = context.getCurrentDatabase(); diff --git a/src/Parsers/ASTCreateQuotaQuery.cpp b/src/Parsers/ASTCreateQuotaQuery.cpp index 7613fce6167..8fa0dbb0d31 100644 --- a/src/Parsers/ASTCreateQuotaQuery.cpp +++ b/src/Parsers/ASTCreateQuotaQuery.cpp @@ -135,6 +135,8 @@ void ASTCreateQuotaQuery::formatImpl(const FormatSettings & settings, FormatStat settings.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(settings); + if (!new_name.empty()) formatRenameTo(new_name, settings); @@ -146,4 +148,12 @@ void ASTCreateQuotaQuery::formatImpl(const FormatSettings & settings, FormatStat if (roles && (!roles->empty() || alter)) formatToRoles(*roles, settings); } + + +void ASTCreateQuotaQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (roles) + roles->replaceCurrentUserTagWithName(current_user_name); +} + } diff --git a/src/Parsers/ASTCreateQuotaQuery.h b/src/Parsers/ASTCreateQuotaQuery.h index 2968c2cc607..09ceaea9825 100644 --- a/src/Parsers/ASTCreateQuotaQuery.h +++ b/src/Parsers/ASTCreateQuotaQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -25,7 +26,7 @@ class ASTExtendedRoleSet; * UNSET TRACKING} [,...]] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ -class ASTCreateQuotaQuery : public IAST +class ASTCreateQuotaQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -58,5 +59,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateRoleQuery.cpp b/src/Parsers/ASTCreateRoleQuery.cpp index 3d69e4dac59..f3873f7a3eb 100644 --- a/src/Parsers/ASTCreateRoleQuery.cpp +++ b/src/Parsers/ASTCreateRoleQuery.cpp @@ -54,6 +54,8 @@ void ASTCreateRoleQuery::formatImpl(const FormatSettings & format, FormatState & format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); diff --git a/src/Parsers/ASTCreateRoleQuery.h b/src/Parsers/ASTCreateRoleQuery.h index 69bb9896fa3..ab306dd5dec 100644 --- a/src/Parsers/ASTCreateRoleQuery.h +++ b/src/Parsers/ASTCreateRoleQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -15,7 +16,7 @@ class ASTSettingsProfileElements; * [RENAME TO new_name] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ -class ASTCreateRoleQuery : public IAST +class ASTCreateRoleQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -33,5 +34,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateRowPolicyQuery.cpp b/src/Parsers/ASTCreateRowPolicyQuery.cpp index 9102ec1da72..9b36f5500c1 100644 --- a/src/Parsers/ASTCreateRowPolicyQuery.cpp +++ b/src/Parsers/ASTCreateRowPolicyQuery.cpp @@ -157,6 +157,8 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format settings.ostr << " " << backQuoteIfNeed(policy_name) << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : "") << (database.empty() ? String{} : backQuoteIfNeed(database) + ".") << table_name; + formatOnCluster(settings); + if (!new_policy_name.empty()) formatRenameTo(new_policy_name, settings); @@ -168,4 +170,11 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format if (roles && (!roles->empty() || alter)) formatToRoles(*roles, settings); } + + +void ASTCreateRowPolicyQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (roles) + roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/src/Parsers/ASTCreateRowPolicyQuery.h b/src/Parsers/ASTCreateRowPolicyQuery.h index e58ed0ec46c..85ba674eeb0 100644 --- a/src/Parsers/ASTCreateRowPolicyQuery.h +++ b/src/Parsers/ASTCreateRowPolicyQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -25,7 +26,7 @@ class ASTExtendedRoleSet; * [WITH CHECK {condition | NONE}] [,...] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ -class ASTCreateRowPolicyQuery : public IAST +class ASTCreateRowPolicyQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -47,5 +48,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.cpp b/src/Parsers/ASTCreateSettingsProfileQuery.cpp index a5a5556baf3..8db82b0e1cb 100644 --- a/src/Parsers/ASTCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ASTCreateSettingsProfileQuery.cpp @@ -61,6 +61,8 @@ void ASTCreateSettingsProfileQuery::formatImpl(const FormatSettings & format, Fo format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); @@ -71,4 +73,10 @@ void ASTCreateSettingsProfileQuery::formatImpl(const FormatSettings & format, Fo formatToRoles(*to_roles, format); } + +void ASTCreateSettingsProfileQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (to_roles) + to_roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.h b/src/Parsers/ASTCreateSettingsProfileQuery.h index b3a60853e57..cc133397db4 100644 --- a/src/Parsers/ASTCreateSettingsProfileQuery.h +++ b/src/Parsers/ASTCreateSettingsProfileQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -16,7 +17,7 @@ class ASTExtendedRoleSet; * [RENAME TO new_name] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ -class ASTCreateSettingsProfileQuery : public IAST +class ASTCreateSettingsProfileQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -36,5 +37,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp index c3cbc366d88..d901ed8f5a1 100644 --- a/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -184,6 +184,8 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState & format.ostr << " " << backQuoteIfNeed(name); + formatOnCluster(format); + if (!new_name.empty()) formatRenameTo(new_name, format); diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index 643db2660af..5a5cc0d9550 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -23,7 +24,7 @@ class ASTSettingsProfileElements; * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ -class ASTCreateUserQuery : public IAST +class ASTCreateUserQuery : public IAST, public ASTQueryWithOnCluster { public: bool alter = false; @@ -49,5 +50,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTDropAccessEntityQuery.cpp b/src/Parsers/ASTDropAccessEntityQuery.cpp index 3896128ceb5..06a820bfbb5 100644 --- a/src/Parsers/ASTDropAccessEntityQuery.cpp +++ b/src/Parsers/ASTDropAccessEntityQuery.cpp @@ -75,5 +75,7 @@ void ASTDropAccessEntityQuery::formatImpl(const FormatSettings & settings, Forma settings.ostr << ' ' << backQuoteIfNeed(name); } } + + formatOnCluster(settings); } } diff --git a/src/Parsers/ASTDropAccessEntityQuery.h b/src/Parsers/ASTDropAccessEntityQuery.h index 5f0b46bd896..a3b358dcfb9 100644 --- a/src/Parsers/ASTDropAccessEntityQuery.h +++ b/src/Parsers/ASTDropAccessEntityQuery.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -13,7 +14,7 @@ namespace DB * DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] * DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] */ -class ASTDropAccessEntityQuery : public IAST +class ASTDropAccessEntityQuery : public IAST, public ASTQueryWithOnCluster { public: enum class Kind @@ -34,5 +35,6 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTExtendedRoleSet.cpp b/src/Parsers/ASTExtendedRoleSet.cpp index 3ac1052897d..9eb06a6a101 100644 --- a/src/Parsers/ASTExtendedRoleSet.cpp +++ b/src/Parsers/ASTExtendedRoleSet.cpp @@ -72,4 +72,21 @@ void ASTExtendedRoleSet::formatImpl(const FormatSettings & settings, FormatState } } } + + +void ASTExtendedRoleSet::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (current_user) + { + names.push_back(current_user_name); + current_user = false; + } + + if (except_current_user) + { + except_names.push_back(current_user_name); + except_current_user = false; + } +} + } diff --git a/src/Parsers/ASTExtendedRoleSet.h b/src/Parsers/ASTExtendedRoleSet.h index 84190211087..8d619e5d6a0 100644 --- a/src/Parsers/ASTExtendedRoleSet.h +++ b/src/Parsers/ASTExtendedRoleSet.h @@ -18,6 +18,7 @@ public: bool id_mode = false; /// If true then `names` and `except_names` keeps UUIDs, not names. bool empty() const { return names.empty() && !current_user && !all; } + void replaceCurrentUserTagWithName(const String & current_user_name); String getID(char) const override { return "ExtendedRoleSet"; } ASTPtr clone() const override { return std::make_shared(*this); } diff --git a/src/Parsers/ASTGrantQuery.cpp b/src/Parsers/ASTGrantQuery.cpp index 94521d790f2..f91a5416011 100644 --- a/src/Parsers/ASTGrantQuery.cpp +++ b/src/Parsers/ASTGrantQuery.cpp @@ -122,19 +122,22 @@ ASTPtr ASTGrantQuery::clone() const void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (attach ? "ATTACH " : "") << ((kind == Kind::GRANT) ? "GRANT" : "REVOKE") - << (settings.hilite ? IAST::hilite_none : "") << " "; + << (settings.hilite ? IAST::hilite_none : ""); + + formatOnCluster(settings); if (kind == Kind::REVOKE) { if (grant_option) - settings.ostr << (settings.hilite ? hilite_keyword : "") << "GRANT OPTION FOR " << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " GRANT OPTION FOR" << (settings.hilite ? hilite_none : ""); else if (admin_option) - settings.ostr << (settings.hilite ? hilite_keyword : "") << "ADMIN OPTION FOR " << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " ADMIN OPTION FOR" << (settings.hilite ? hilite_none : ""); } if ((!!roles + !access_rights_elements.empty()) != 1) throw Exception("Either roles or access rights elements should be set", ErrorCodes::LOGICAL_ERROR); + settings.ostr << " "; if (roles) roles->format(settings); else @@ -150,4 +153,11 @@ void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, F settings.ostr << (settings.hilite ? hilite_keyword : "") << " WITH ADMIN OPTION" << (settings.hilite ? hilite_none : ""); } } + + +void ASTGrantQuery::replaceCurrentUserTagWithName(const String & current_user_name) +{ + if (to_roles) + to_roles->replaceCurrentUserTagWithName(current_user_name); +} } diff --git a/src/Parsers/ASTGrantQuery.h b/src/Parsers/ASTGrantQuery.h index 95b5f0b8448..e1ad8dc5dc5 100644 --- a/src/Parsers/ASTGrantQuery.h +++ b/src/Parsers/ASTGrantQuery.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -15,7 +16,7 @@ class ASTExtendedRoleSet; * GRANT role [,...] TO {user_name | role_name | CURRENT_USER} [,...] [WITH ADMIN OPTION] * REVOKE [ADMIN OPTION FOR] role [,...] FROM {user_name | role_name | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] */ -class ASTGrantQuery : public IAST +class ASTGrantQuery : public IAST, public ASTQueryWithOnCluster { public: enum class Kind @@ -34,5 +35,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + void replaceCurrentUserTagWithName(const String & current_user_name); + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp index 9a6afec6941..66e72ee4968 100644 --- a/src/Parsers/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/ParserCreateQuotaQuery.cpp @@ -238,6 +238,13 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!parseIdentifierOrStringLiteral(pos, expected, name)) return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + String new_name; std::optional key_type; std::vector all_limits; @@ -266,6 +273,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->key_type = key_type; diff --git a/src/Parsers/ParserCreateRoleQuery.cpp b/src/Parsers/ParserCreateRoleQuery.cpp index e2b42c976b4..05143108480 100644 --- a/src/Parsers/ParserCreateRoleQuery.cpp +++ b/src/Parsers/ParserCreateRoleQuery.cpp @@ -80,6 +80,13 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!parseRoleName(pos, expected, name)) return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + String new_name; std::shared_ptr settings; while (true) @@ -101,6 +108,7 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->settings = std::move(settings); diff --git a/src/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp index ab0fbc87e12..8bfe54b87b2 100644 --- a/src/Parsers/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/ParserCreateRowPolicyQuery.cpp @@ -243,6 +243,13 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & || !parseDatabaseAndTableName(pos, expected, database, table_name)) return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + String new_policy_name; std::optional is_restrictive; std::vector> conditions; @@ -272,6 +279,7 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name_parts = std::move(name_parts); query->new_policy_name = std::move(new_policy_name); query->is_restrictive = is_restrictive; diff --git a/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp index c7c9e064f6c..4d3ed2f6e63 100644 --- a/src/Parsers/ParserCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ParserCreateSettingsProfileQuery.cpp @@ -96,6 +96,13 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec if (!parseIdentifierOrStringLiteral(pos, expected, name)) return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + String new_name; std::shared_ptr settings; while (true) @@ -120,6 +127,7 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->settings = std::move(settings); diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 7564c02da45..3968c26d42e 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -290,6 +290,13 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!parseUserName(pos, expected, name, host_pattern)) return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + String new_name; std::optional new_host_pattern; std::optional authentication; @@ -341,6 +348,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->if_exists = if_exists; query->if_not_exists = if_not_exists; query->or_replace = or_replace; + query->cluster = std::move(cluster); query->name = std::move(name); query->new_name = std::move(new_name); query->authentication = std::move(authentication); diff --git a/src/Parsers/ParserDropAccessEntityQuery.cpp b/src/Parsers/ParserDropAccessEntityQuery.cpp index 23e18d7d32c..ecda1691240 100644 --- a/src/Parsers/ParserDropAccessEntityQuery.cpp +++ b/src/Parsers/ParserDropAccessEntityQuery.cpp @@ -117,10 +117,18 @@ bool ParserDropAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & return false; } + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + auto query = std::make_shared(kind); node = query; query->if_exists = if_exists; + query->cluster = std::move(cluster); query->names = std::move(names); query->row_policies_names = std::move(row_policies_names); diff --git a/src/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp index aaf3dca6d78..f6eecbe5dba 100644 --- a/src/Parsers/ParserGrantQuery.cpp +++ b/src/Parsers/ParserGrantQuery.cpp @@ -259,6 +259,13 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) else return false; + String cluster; + if (ParserKeyword{"ON"}.ignore(pos, expected)) + { + if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) + return false; + } + bool grant_option = false; bool admin_option = false; if (kind == Kind::REVOKE) @@ -296,6 +303,7 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->kind = kind; query->attach = attach; + query->cluster = std::move(cluster); query->access_rights_elements = std::move(elements); query->roles = std::move(roles); query->to_roles = std::move(to_roles); diff --git a/tests/integration/test_access_control_on_cluster/__init__.py b/tests/integration/test_access_control_on_cluster/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml b/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml new file mode 100644 index 00000000000..741f862d162 --- /dev/null +++ b/tests/integration/test_access_control_on_cluster/configs/config.d/clusters.xml @@ -0,0 +1,22 @@ + + + + + + ch1 + 9000 + + + ch2 + 9000 + + + + + ch3 + 9000 + + + + + diff --git a/tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml b/tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_access_control_on_cluster/test.py b/tests/integration/test_access_control_on_cluster/test.py new file mode 100644 index 00000000000..6ca4ac15398 --- /dev/null +++ b/tests/integration/test_access_control_on_cluster/test.py @@ -0,0 +1,41 @@ +import time +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance('ch1', config_dir="configs", with_zookeeper=True) +ch2 = cluster.add_instance('ch2', config_dir="configs", with_zookeeper=True) +ch3 = cluster.add_instance('ch3', config_dir="configs", with_zookeeper=True) + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_access_control_on_cluster(): + ch1.query("CREATE USER Alex ON CLUSTER 'cluster'") + assert ch1.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + assert ch2.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + assert ch3.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" + + ch2.query("GRANT ON CLUSTER 'cluster' SELECT ON *.* TO Alex") + assert ch1.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + assert ch2.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + assert ch3.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" + + ch3.query("REVOKE ON CLUSTER 'cluster' SELECT ON *.* FROM Alex") + assert ch1.query("SHOW GRANTS FOR Alex") == "" + assert ch2.query("SHOW GRANTS FOR Alex") == "" + assert ch3.query("SHOW GRANTS FOR Alex") == "" + + ch2.query("DROP USER Alex ON CLUSTER 'cluster'") + assert "User `Alex` not found" in ch1.query_and_get_error("SHOW CREATE USER Alex") + assert "User `Alex` not found" in ch2.query_and_get_error("SHOW CREATE USER Alex") + assert "User `Alex` not found" in ch3.query_and_get_error("SHOW CREATE USER Alex") + From e573549945b6caa9ec60a5db46c7cbe8f689bea2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 Apr 2020 08:19:40 +0300 Subject: [PATCH 174/484] Rework access rights for table functions. --- src/Access/AccessType.h | 25 ++-- src/Access/ContextAccess.cpp | 2 +- src/Common/XDBCBridgeHelper.h | 9 ++ src/Interpreters/InterpreterCreateQuery.cpp | 9 +- src/Storages/StorageDistributed.cpp | 3 + src/Storages/StorageFactory.cpp | 9 ++ src/Storages/StorageFactory.h | 4 + src/Storages/StorageFile.cpp | 122 +++++++++--------- src/Storages/StorageHDFS.cpp | 3 + src/Storages/StorageMySQL.cpp | 3 + src/Storages/StorageS3.cpp | 3 + src/Storages/StorageURL.cpp | 3 + src/Storages/StorageXDBC.cpp | 3 + src/TableFunctions/ITableFunction.cpp | 4 + src/TableFunctions/ITableFunction.h | 1 + src/TableFunctions/ITableFunctionFileLike.cpp | 4 - src/TableFunctions/ITableFunctionFileLike.h | 2 - src/TableFunctions/ITableFunctionXDBC.cpp | 2 - src/TableFunctions/ITableFunctionXDBC.h | 7 +- src/TableFunctions/TableFunctionFile.cpp | 5 - src/TableFunctions/TableFunctionFile.h | 5 +- .../TableFunctionGenerateRandom.h | 1 + src/TableFunctions/TableFunctionHDFS.cpp | 5 - src/TableFunctions/TableFunctionHDFS.h | 2 +- src/TableFunctions/TableFunctionInput.cpp | 3 - src/TableFunctions/TableFunctionInput.h | 1 + src/TableFunctions/TableFunctionMerge.cpp | 3 - src/TableFunctions/TableFunctionMerge.h | 1 + src/TableFunctions/TableFunctionMySQL.cpp | 3 - src/TableFunctions/TableFunctionMySQL.h | 1 + src/TableFunctions/TableFunctionNumbers.cpp | 3 - src/TableFunctions/TableFunctionNumbers.h | 1 + src/TableFunctions/TableFunctionRemote.cpp | 3 - src/TableFunctions/TableFunctionRemote.h | 1 + src/TableFunctions/TableFunctionS3.cpp | 3 - src/TableFunctions/TableFunctionS3.h | 2 + src/TableFunctions/TableFunctionURL.cpp | 5 - src/TableFunctions/TableFunctionURL.h | 2 +- src/TableFunctions/TableFunctionValues.cpp | 3 - src/TableFunctions/TableFunctionValues.h | 1 + src/TableFunctions/TableFunctionZeros.cpp | 3 - src/TableFunctions/TableFunctionZeros.h | 1 + 42 files changed, 142 insertions(+), 134 deletions(-) diff --git a/src/Access/AccessType.h b/src/Access/AccessType.h index e242000635d..d0665a6e55f 100644 --- a/src/Access/AccessType.h +++ b/src/Access/AccessType.h @@ -147,20 +147,15 @@ enum class AccessType M(demangle, "", GLOBAL, INTROSPECTION) /* allows to execute function demangle() */\ M(INTROSPECTION, "INTROSPECTION FUNCTIONS", GROUP, ALL) /* allows to execute functions addressToLine(), addressToSymbol(), demangle()*/\ \ - M(file, "", GLOBAL, TABLE_FUNCTIONS) \ - M(url, "", GLOBAL, TABLE_FUNCTIONS) \ - M(input, "", GLOBAL, TABLE_FUNCTIONS) \ - M(values, "", GLOBAL, TABLE_FUNCTIONS) \ - M(numbers, "", GLOBAL, TABLE_FUNCTIONS) \ - M(zeros, "", GLOBAL, TABLE_FUNCTIONS) \ - M(merge, "", GLOBAL, TABLE_FUNCTIONS) \ - M(remote, "remoteSecure, cluster", GLOBAL, TABLE_FUNCTIONS) \ - M(mysql, "", GLOBAL, TABLE_FUNCTIONS) \ - M(odbc, "", GLOBAL, TABLE_FUNCTIONS) \ - M(jdbc, "", GLOBAL, TABLE_FUNCTIONS) \ - M(hdfs, "", GLOBAL, TABLE_FUNCTIONS) \ - M(s3, "", GLOBAL, TABLE_FUNCTIONS) \ - M(TABLE_FUNCTIONS, "", GROUP, ALL) \ + M(FILE, "", GLOBAL, SOURCES) \ + M(URL, "", GLOBAL, SOURCES) \ + M(REMOTE, "", GLOBAL, SOURCES) \ + M(MYSQL, "", GLOBAL, SOURCES) \ + M(ODBC, "", GLOBAL, SOURCES) \ + M(JDBC, "", GLOBAL, SOURCES) \ + M(HDFS, "", GLOBAL, SOURCES) \ + M(S3, "", GLOBAL, SOURCES) \ + M(SOURCES, "", GROUP, ALL) \ \ M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \ M(NONE, "USAGE, NO PRIVILEGES", GROUP, NONE) /* no access */ @@ -172,8 +167,6 @@ enum class AccessType #undef DECLARE_ACCESS_TYPE_ENUM_CONST }; -constexpr size_t MAX_ACCESS_TYPE = static_cast(AccessType::TABLE_FUNCTIONS) + 1; - std::string_view toString(AccessType type); diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index d6c7abfd4ba..14775f7a4de 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -416,7 +416,7 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool { /// Table functions are forbidden in readonly mode. /// For example, for readonly = 2 - allowed. - merged_access->revoke(AccessType::CREATE_TEMPORARY_TABLE | AccessType::TABLE_FUNCTIONS); + merged_access->revoke(AccessType::CREATE_TEMPORARY_TABLE); } if (!allow_ddl_ && !grant_option) diff --git a/src/Common/XDBCBridgeHelper.h b/src/Common/XDBCBridgeHelper.h index 613d1bed8d7..b9d1f2cdcdf 100644 --- a/src/Common/XDBCBridgeHelper.h +++ b/src/Common/XDBCBridgeHelper.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -230,6 +231,10 @@ struct JDBCBridgeMixin { return "JDBC"; } + static AccessType getSourceAccessType() + { + return AccessType::JDBC; + } static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration &, const Poco::Logger *, const Poco::Timespan &) { @@ -253,6 +258,10 @@ struct ODBCBridgeMixin { return "ODBC"; } + static AccessType getSourceAccessType() + { + return AccessType::ODBC; + } static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout) { diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f15796688e1..b605ce85bc2 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -765,7 +765,14 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const } if (!create.to_table.empty()) - required_access.emplace_back(AccessType::INSERT, create.to_database, create.to_table); + required_access.emplace_back(AccessType::SELECT | AccessType::INSERT, create.to_database, create.to_table); + + if (create.storage && create.storage->engine) + { + auto source_access_type = StorageFactory::instance().getSourceAccessType(create.storage->engine->name); + if (source_access_type != AccessType::NONE) + required_access.emplace_back(source_access_type); + } return required_access; } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b4375dd5b0a..b7466a7aa71 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -807,6 +807,9 @@ void registerStorageDistributed(StorageFactory & factory) storage_policy, args.relative_data_path, args.attach); + }, + { + .source_access_type = AccessType::REMOTE, }); } diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 9fb548c3893..0a8ceb4b8e5 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -189,4 +189,13 @@ StorageFactory & StorageFactory::instance() return ret; } + +AccessType StorageFactory::getSourceAccessType(const String & table_engine) const +{ + auto it = storages.find(table_engine); + if (it == storages.end()) + return AccessType::NONE; + return it->second.features.source_access_type; +} + } diff --git a/src/Storages/StorageFactory.h b/src/Storages/StorageFactory.h index e64d8647dd8..de9060769cb 100644 --- a/src/Storages/StorageFactory.h +++ b/src/Storages/StorageFactory.h @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -54,6 +55,7 @@ public: bool supports_ttl = false; bool supports_replication = false; bool supports_deduplication = false; + AccessType source_access_type = AccessType::NONE; }; using CreatorFn = std::function; @@ -83,6 +85,7 @@ public: .supports_ttl = false, .supports_replication = false, .supports_deduplication = false, + .source_access_type = AccessType::NONE, }); const Storages & getAllStorages() const @@ -108,6 +111,7 @@ public: return result; } + AccessType getSourceAccessType(const String & table_engine) const; private: Storages storages; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 48341ce918d..d1332016150 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -541,67 +541,71 @@ void StorageFile::truncate(const ASTPtr & /*query*/, const Context & /* context void registerStorageFile(StorageFactory & factory) { - factory.registerStorage("File", [](const StorageFactory::Arguments & args) - { - ASTs & engine_args = args.engine_args; - - if (!(engine_args.size() >= 1 && engine_args.size() <= 3)) // NOLINT - throw Exception( - "Storage File requires from 1 to 3 arguments: name of used format, source and compression_method.", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[0], args.local_context); - String format_name = engine_args[0]->as().value.safeGet(); - - String compression_method; - StorageFile::CommonArguments common_args{args.table_id, format_name, compression_method, - args.columns, args.constraints, args.context}; - - if (engine_args.size() == 1) /// Table in database - return StorageFile::create(args.relative_data_path, common_args); - - /// Will use FD if engine_args[1] is int literal or identifier with std* name - int source_fd = -1; - String source_path; - - if (auto opt_name = tryGetIdentifierName(engine_args[1])) + factory.registerStorage( + "File", + [](const StorageFactory::Arguments & args) { - if (*opt_name == "stdin") - source_fd = STDIN_FILENO; - else if (*opt_name == "stdout") - source_fd = STDOUT_FILENO; - else if (*opt_name == "stderr") - source_fd = STDERR_FILENO; + ASTs & engine_args = args.engine_args; + + if (!(engine_args.size() >= 1 && engine_args.size() <= 3)) // NOLINT + throw Exception( + "Storage File requires from 1 to 3 arguments: name of used format, source and compression_method.", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[0], args.local_context); + String format_name = engine_args[0]->as().value.safeGet(); + + String compression_method; + StorageFile::CommonArguments common_args{ + args.table_id, format_name, compression_method, args.columns, args.constraints, args.context}; + + if (engine_args.size() == 1) /// Table in database + return StorageFile::create(args.relative_data_path, common_args); + + /// Will use FD if engine_args[1] is int literal or identifier with std* name + int source_fd = -1; + String source_path; + + if (auto opt_name = tryGetIdentifierName(engine_args[1])) + { + if (*opt_name == "stdin") + source_fd = STDIN_FILENO; + else if (*opt_name == "stdout") + source_fd = STDOUT_FILENO; + else if (*opt_name == "stderr") + source_fd = STDERR_FILENO; + else + throw Exception( + "Unknown identifier '" + *opt_name + "' in second arg of File storage constructor", ErrorCodes::UNKNOWN_IDENTIFIER); + } + else if (const auto * literal = engine_args[1]->as()) + { + auto type = literal->value.getType(); + if (type == Field::Types::Int64) + source_fd = static_cast(literal->value.get()); + else if (type == Field::Types::UInt64) + source_fd = static_cast(literal->value.get()); + else if (type == Field::Types::String) + source_path = literal->value.get(); + else + throw Exception("Second argument must be path or file descriptor", ErrorCodes::BAD_ARGUMENTS); + } + + if (engine_args.size() == 3) + { + engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); + compression_method = engine_args[2]->as().value.safeGet(); + } else - throw Exception("Unknown identifier '" + *opt_name + "' in second arg of File storage constructor", - ErrorCodes::UNKNOWN_IDENTIFIER); - } - else if (const auto * literal = engine_args[1]->as()) + compression_method = "auto"; + + if (0 <= source_fd) /// File descriptor + return StorageFile::create(source_fd, common_args); + else /// User's file + return StorageFile::create(source_path, args.context.getUserFilesPath(), common_args); + }, { - auto type = literal->value.getType(); - if (type == Field::Types::Int64) - source_fd = static_cast(literal->value.get()); - else if (type == Field::Types::UInt64) - source_fd = static_cast(literal->value.get()); - else if (type == Field::Types::String) - source_path = literal->value.get(); - else - throw Exception("Second argument must be path or file descriptor", ErrorCodes::BAD_ARGUMENTS); - } - - if (engine_args.size() == 3) - { - engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); - compression_method = engine_args[2]->as().value.safeGet(); - } - else - compression_method = "auto"; - - if (0 <= source_fd) /// File descriptor - return StorageFile::create(source_fd, common_args); - else /// User's file - return StorageFile::create(source_path, args.context.getUserFilesPath(), common_args); - }); + .source_access_type = AccessType::FILE, + }); } - } diff --git a/src/Storages/StorageHDFS.cpp b/src/Storages/StorageHDFS.cpp index 192fb658154..c9c41cf9d54 100644 --- a/src/Storages/StorageHDFS.cpp +++ b/src/Storages/StorageHDFS.cpp @@ -339,6 +339,9 @@ void registerStorageHDFS(StorageFactory & factory) } else compression_method = "auto"; return StorageHDFS::create(url, args.table_id, format_name, args.columns, args.constraints, args.context, compression_method); + }, + { + .source_access_type = AccessType::HDFS, }); } diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 6645b41376a..055e3f8f264 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -248,6 +248,9 @@ void registerStorageMySQL(StorageFactory & factory) args.columns, args.constraints, args.context); + }, + { + .source_access_type = AccessType::MYSQL, }); } diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 4c359cafda7..679f343d0da 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -348,6 +348,9 @@ void registerStorageS3(StorageFactory & factory) compression_method = "auto"; return StorageS3::create(s3_uri, access_key_id, secret_access_key, args.table_id, format_name, min_upload_part_size, args.columns, args.constraints, args.context); + }, + { + .source_access_type = AccessType::S3, }); } diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index aaccccebef3..6c6f79b50e7 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -252,6 +252,9 @@ void registerStorageURL(StorageFactory & factory) format_name, args.columns, args.constraints, args.context, compression_method); + }, + { + .source_access_type = AccessType::URL, }); } } diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index cedd2abf38f..dd449e490aa 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -134,6 +134,9 @@ namespace args.context, bridge_helper); + }, + { + .source_access_type = BridgeHelperMixin::getSourceAccessType(), }); } } diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp index 233da7495d8..6a784c062da 100644 --- a/src/TableFunctions/ITableFunction.cpp +++ b/src/TableFunctions/ITableFunction.cpp @@ -1,4 +1,7 @@ #include +#include +#include +#include #include @@ -13,6 +16,7 @@ namespace DB StoragePtr ITableFunction::execute(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const { ProfileEvents::increment(ProfileEvents::TableFunctionExecute); + context.checkAccess(AccessType::CREATE_TEMPORARY_TABLE | StorageFactory::instance().getSourceAccessType(getStorageTypeName())); return executeImpl(ast_function, context, table_name); } diff --git a/src/TableFunctions/ITableFunction.h b/src/TableFunctions/ITableFunction.h index 9a9525d5887..0bbd7e787a5 100644 --- a/src/TableFunctions/ITableFunction.h +++ b/src/TableFunctions/ITableFunction.h @@ -38,6 +38,7 @@ public: private: virtual StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const = 0; + virtual const char * getStorageTypeName() const = 0; }; using TableFunctionPtr = std::shared_ptr; diff --git a/src/TableFunctions/ITableFunctionFileLike.cpp b/src/TableFunctions/ITableFunctionFileLike.cpp index eca507a4003..46a64cef785 100644 --- a/src/TableFunctions/ITableFunctionFileLike.cpp +++ b/src/TableFunctions/ITableFunctionFileLike.cpp @@ -10,8 +10,6 @@ #include -#include - #include #include @@ -65,8 +63,6 @@ StoragePtr ITableFunctionFileLike::executeImpl(const ASTPtr & ast_function, cons if (args.size() == 4) compression_method = args[3]->as().value.safeGet(); - context.checkAccess(getRequiredAccessType()); - /// Create table StoragePtr storage = getStorage(filename, format, columns, const_cast(context), table_name, compression_method); diff --git a/src/TableFunctions/ITableFunctionFileLike.h b/src/TableFunctions/ITableFunctionFileLike.h index e80bf158f8e..a18ca8ea4c8 100644 --- a/src/TableFunctions/ITableFunctionFileLike.h +++ b/src/TableFunctions/ITableFunctionFileLike.h @@ -5,7 +5,6 @@ namespace DB { -enum class AccessType; class ColumnsDescription; /* @@ -17,6 +16,5 @@ private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; virtual StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const = 0; - virtual AccessType getRequiredAccessType() const = 0; }; } diff --git a/src/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp index 50236b65445..adf0c9240bc 100644 --- a/src/TableFunctions/ITableFunctionXDBC.cpp +++ b/src/TableFunctions/ITableFunctionXDBC.cpp @@ -60,8 +60,6 @@ StoragePtr ITableFunctionXDBC::executeImpl(const ASTPtr & ast_function, const Co remote_table_name = args[1]->as().value.safeGet(); } - context.checkAccess(getRequiredAccessType()); - /* Infer external table structure */ /// Have to const_cast, because bridges store their commands inside context BridgeHelperPtr helper = createBridgeHelper(const_cast(context), context.getSettingsRef().http_receive_timeout.value, connection_string); diff --git a/src/TableFunctions/ITableFunctionXDBC.h b/src/TableFunctions/ITableFunctionXDBC.h index 211bac281c8..262c237bac2 100644 --- a/src/TableFunctions/ITableFunctionXDBC.h +++ b/src/TableFunctions/ITableFunctionXDBC.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -22,8 +21,6 @@ private: virtual BridgeHelperPtr createBridgeHelper(Context & context, const Poco::Timespan & http_timeout_, const std::string & connection_string_) const = 0; - - virtual AccessType getRequiredAccessType() const = 0; }; class TableFunctionJDBC : public ITableFunctionXDBC @@ -43,7 +40,7 @@ private: return std::make_shared>(context, http_timeout_, connection_string_); } - AccessType getRequiredAccessType() const override { return AccessType::jdbc; } + const char * getStorageTypeName() const override { return "JDBC"; } }; class TableFunctionODBC : public ITableFunctionXDBC @@ -63,6 +60,6 @@ private: return std::make_shared>(context, http_timeout_, connection_string_); } - AccessType getRequiredAccessType() const override { return AccessType::odbc; } + const char * getStorageTypeName() const override { return "ODBC"; } }; } diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index c27999e6199..0a68ed59aa2 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -15,11 +15,6 @@ StoragePtr TableFunctionFile::getStorage( return StorageFile::create(source, global_context.getUserFilesPath(), args); } -AccessType TableFunctionFile::getRequiredAccessType() const -{ - return AccessType::file; -} - void registerTableFunctionFile(TableFunctionFactory & factory) { factory.registerFunction(); diff --git a/src/TableFunctions/TableFunctionFile.h b/src/TableFunctions/TableFunctionFile.h index 558d5305674..ead924f6828 100644 --- a/src/TableFunctions/TableFunctionFile.h +++ b/src/TableFunctions/TableFunctionFile.h @@ -24,6 +24,5 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const std::string & compression_method) const override; - AccessType getRequiredAccessType() const override; -}; -} + const char * getStorageTypeName() const override { return "File"; } +};} diff --git a/src/TableFunctions/TableFunctionGenerateRandom.h b/src/TableFunctions/TableFunctionGenerateRandom.h index 042a5c59dbe..b0919608737 100644 --- a/src/TableFunctions/TableFunctionGenerateRandom.h +++ b/src/TableFunctions/TableFunctionGenerateRandom.h @@ -15,6 +15,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "GenerateRandom"; } }; diff --git a/src/TableFunctions/TableFunctionHDFS.cpp b/src/TableFunctions/TableFunctionHDFS.cpp index 8af41b2e772..3bd6cd3ed76 100644 --- a/src/TableFunctions/TableFunctionHDFS.cpp +++ b/src/TableFunctions/TableFunctionHDFS.cpp @@ -4,7 +4,6 @@ #if USE_HDFS #include #include -#include #include #include @@ -22,10 +21,6 @@ StoragePtr TableFunctionHDFS::getStorage( compression_method); } -AccessType TableFunctionHDFS::getRequiredAccessType() const -{ - return AccessType::hdfs; -} #if USE_HDFS void registerTableFunctionHDFS(TableFunctionFactory & factory) diff --git a/src/TableFunctions/TableFunctionHDFS.h b/src/TableFunctions/TableFunctionHDFS.h index 4bdb6703d31..443ce0aa93b 100644 --- a/src/TableFunctions/TableFunctionHDFS.h +++ b/src/TableFunctions/TableFunctionHDFS.h @@ -25,7 +25,7 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; - AccessType getRequiredAccessType() const override; + const char * getStorageTypeName() const override { return "HDFS"; } }; } diff --git a/src/TableFunctions/TableFunctionInput.cpp b/src/TableFunctions/TableFunctionInput.cpp index 41bb292c2b2..e8f3453da06 100644 --- a/src/TableFunctions/TableFunctionInput.cpp +++ b/src/TableFunctions/TableFunctionInput.cpp @@ -10,7 +10,6 @@ #include #include #include -#include #include #include "registerTableFunctions.h" @@ -37,8 +36,6 @@ StoragePtr TableFunctionInput::executeImpl(const ASTPtr & ast_function, const Co throw Exception("Table function '" + getName() + "' requires exactly 1 argument: structure", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::input); - String structure = evaluateConstantExpressionOrIdentifierAsLiteral(args[0], context)->as().value.safeGet(); auto columns = parseColumnsListFromString(structure, context); StoragePtr storage = StorageInput::create(StorageID(getDatabaseName(), table_name), columns); diff --git a/src/TableFunctions/TableFunctionInput.h b/src/TableFunctions/TableFunctionInput.h index 24e5c5b2118..92c2e3a6e54 100644 --- a/src/TableFunctions/TableFunctionInput.h +++ b/src/TableFunctions/TableFunctionInput.h @@ -16,5 +16,6 @@ public: private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Input"; } }; } diff --git a/src/TableFunctions/TableFunctionMerge.cpp b/src/TableFunctions/TableFunctionMerge.cpp index 1ced074761b..cd924270f7c 100644 --- a/src/TableFunctions/TableFunctionMerge.cpp +++ b/src/TableFunctions/TableFunctionMerge.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -68,8 +67,6 @@ StoragePtr TableFunctionMerge::executeImpl(const ASTPtr & ast_function, const Co String source_database = args[0]->as().value.safeGet(); String table_name_regexp = args[1]->as().value.safeGet(); - context.checkAccess(AccessType::merge, source_database); - auto res = StorageMerge::create( StorageID(getDatabaseName(), table_name), ColumnsDescription{chooseColumns(source_database, table_name_regexp, context)}, diff --git a/src/TableFunctions/TableFunctionMerge.h b/src/TableFunctions/TableFunctionMerge.h index 43d4b692bc8..b11a9551d34 100644 --- a/src/TableFunctions/TableFunctionMerge.h +++ b/src/TableFunctions/TableFunctionMerge.h @@ -17,6 +17,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Merge"; } }; diff --git a/src/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp index 11f797d4ecf..be707c3520d 100644 --- a/src/TableFunctions/TableFunctionMySQL.cpp +++ b/src/TableFunctions/TableFunctionMySQL.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -57,8 +56,6 @@ StoragePtr TableFunctionMySQL::executeImpl(const ASTPtr & ast_function, const Co std::string user_name = args[3]->as().value.safeGet(); std::string password = args[4]->as().value.safeGet(); - context.checkAccess(AccessType::mysql); - bool replace_query = false; std::string on_duplicate_clause; if (args.size() >= 6) diff --git a/src/TableFunctions/TableFunctionMySQL.h b/src/TableFunctions/TableFunctionMySQL.h index fd5b0219df6..850affc5887 100644 --- a/src/TableFunctions/TableFunctionMySQL.h +++ b/src/TableFunctions/TableFunctionMySQL.h @@ -20,6 +20,7 @@ public: } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "MySQL"; } }; } diff --git a/src/TableFunctions/TableFunctionNumbers.cpp b/src/TableFunctions/TableFunctionNumbers.cpp index bb414f4783f..c8c0fe96092 100644 --- a/src/TableFunctions/TableFunctionNumbers.cpp +++ b/src/TableFunctions/TableFunctionNumbers.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include "registerTableFunctions.h" @@ -33,8 +32,6 @@ StoragePtr TableFunctionNumbers::executeImpl(const ASTPtr & ast_f UInt64 offset = arguments.size() == 2 ? evaluateArgument(context, arguments[0]) : 0; UInt64 length = arguments.size() == 2 ? evaluateArgument(context, arguments[1]) : evaluateArgument(context, arguments[0]); - context.checkAccess(AccessType::numbers); - auto res = StorageSystemNumbers::create(StorageID(getDatabaseName(), table_name), multithreaded, length, offset, false); res->startup(); return res; diff --git a/src/TableFunctions/TableFunctionNumbers.h b/src/TableFunctions/TableFunctionNumbers.h index e5ab38ccad8..c3efbc426ef 100644 --- a/src/TableFunctions/TableFunctionNumbers.h +++ b/src/TableFunctions/TableFunctionNumbers.h @@ -19,6 +19,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "SystemNumbers"; } UInt64 evaluateArgument(const Context & context, ASTPtr & argument) const; }; diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index 202f8be4703..cfeb3907136 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -132,8 +131,6 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C if (arg_num < args.size()) throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::remote); - /// ExpressionAnalyzer will be created in InterpreterSelectQuery that will meet these `Identifier` when processing the request. /// We need to mark them as the name of the database or table, because the default value is column. for (auto ast : args) diff --git a/src/TableFunctions/TableFunctionRemote.h b/src/TableFunctions/TableFunctionRemote.h index ef2e5cf190c..2dd58a8a6a7 100644 --- a/src/TableFunctions/TableFunctionRemote.h +++ b/src/TableFunctions/TableFunctionRemote.h @@ -22,6 +22,7 @@ public: private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Distributed"; } std::string name; bool is_cluster_function; diff --git a/src/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp index 73121d342e2..0cf9914ed2b 100644 --- a/src/TableFunctions/TableFunctionS3.cpp +++ b/src/TableFunctions/TableFunctionS3.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -64,8 +63,6 @@ StoragePtr TableFunctionS3::executeImpl(const ASTPtr & ast_function, const Conte else compression_method = "auto"; - context.checkAccess(AccessType::s3); - ColumnsDescription columns = parseColumnsListFromString(structure, context); /// Create table diff --git a/src/TableFunctions/TableFunctionS3.h b/src/TableFunctions/TableFunctionS3.h index a49033da1b4..aef4e28ca76 100644 --- a/src/TableFunctions/TableFunctionS3.h +++ b/src/TableFunctions/TableFunctionS3.h @@ -38,6 +38,8 @@ private: Context & global_context, const std::string & table_name, const String & compression_method); + + const char * getStorageTypeName() const override { return "S3"; } }; } diff --git a/src/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp index a78ac2f2114..59978ae08b1 100644 --- a/src/TableFunctions/TableFunctionURL.cpp +++ b/src/TableFunctions/TableFunctionURL.cpp @@ -16,11 +16,6 @@ StoragePtr TableFunctionURL::getStorage( return StorageURL::create(uri, StorageID(getDatabaseName(), table_name), format, columns, ConstraintsDescription{}, global_context, compression_method); } -AccessType TableFunctionURL::getRequiredAccessType() const -{ - return AccessType::url; -} - void registerTableFunctionURL(TableFunctionFactory & factory) { factory.registerFunction(); diff --git a/src/TableFunctions/TableFunctionURL.h b/src/TableFunctions/TableFunctionURL.h index ea0ca842b48..61dca561f0c 100644 --- a/src/TableFunctions/TableFunctionURL.h +++ b/src/TableFunctions/TableFunctionURL.h @@ -20,6 +20,6 @@ public: private: StoragePtr getStorage( const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; - AccessType getRequiredAccessType() const override; + const char * getStorageTypeName() const override { return "URL"; } }; } diff --git a/src/TableFunctions/TableFunctionValues.cpp b/src/TableFunctions/TableFunctionValues.cpp index 6f568fbea60..4e166b10d8f 100644 --- a/src/TableFunctions/TableFunctionValues.cpp +++ b/src/TableFunctions/TableFunctionValues.cpp @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -75,8 +74,6 @@ StoragePtr TableFunctionValues::executeImpl(const ASTPtr & ast_function, const C throw Exception("Table function '" + getName() + "' requires 2 or more arguments: structure and values.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - context.checkAccess(AccessType::values); - /// Parsing first argument as table structure and creating a sample block std::string structure = args[0]->as().value.safeGet(); diff --git a/src/TableFunctions/TableFunctionValues.h b/src/TableFunctions/TableFunctionValues.h index f02dc69162f..3cc3687dab5 100644 --- a/src/TableFunctions/TableFunctionValues.h +++ b/src/TableFunctions/TableFunctionValues.h @@ -14,6 +14,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "Values"; } }; diff --git a/src/TableFunctions/TableFunctionZeros.cpp b/src/TableFunctions/TableFunctionZeros.cpp index d69e533c0d9..13436f04e1c 100644 --- a/src/TableFunctions/TableFunctionZeros.cpp +++ b/src/TableFunctions/TableFunctionZeros.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include "registerTableFunctions.h" @@ -32,8 +31,6 @@ StoragePtr TableFunctionZeros::executeImpl(const ASTPtr & ast_fun UInt64 length = evaluateArgument(context, arguments[0]); - context.checkAccess(AccessType::zeros); - auto res = StorageSystemZeros::create(StorageID(getDatabaseName(), table_name), multithreaded, length); res->startup(); return res; diff --git a/src/TableFunctions/TableFunctionZeros.h b/src/TableFunctions/TableFunctionZeros.h index c8f3cbabc0e..71570c23a89 100644 --- a/src/TableFunctions/TableFunctionZeros.h +++ b/src/TableFunctions/TableFunctionZeros.h @@ -19,6 +19,7 @@ public: std::string getName() const override { return name; } private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; + const char * getStorageTypeName() const override { return "SystemZeros"; } UInt64 evaluateArgument(const Context & context, ASTPtr & argument) const; }; From 78b0bb6a3fdb12d28c2ff77edda5caebc7ae8c4f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Apr 2020 00:38:31 +0300 Subject: [PATCH 175/484] Remove unwanted output from unit tests --- src/Columns/tests/gtest_weak_hash_32.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp index 4fa420db678..c79188e9e88 100644 --- a/src/Columns/tests/gtest_weak_hash_32.cpp +++ b/src/Columns/tests/gtest_weak_hash_32.cpp @@ -17,8 +17,10 @@ #include #include +#include #include + using namespace DB; template @@ -69,6 +71,8 @@ void checkColumn( std::unordered_map map; size_t num_collisions = 0; + std::stringstream collitions_str; + for (size_t i = 0; i < eq_class.size(); ++i) { auto & val = eq_class[i]; @@ -82,12 +86,16 @@ void checkColumn( if (num_collisions <= max_collisions_to_print) { - std::cout << "Collision:\n"; - std::cout << print_for_row(it->second) << '\n'; - std::cout << print_for_row(i) << std::endl; + collitions_str << "Collision:\n"; + collitions_str << print_for_row(it->second) << '\n'; + collitions_str << print_for_row(i) << std::endl; } - else if (num_collisions > allowed_collisions) + + if (num_collisions > allowed_collisions) + { + std::cerr << collitions_str.rdbuf(); break; + } } } From 7a69664e7ee98a0ac5c7ade3c4cf069a9bdbc936 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 8 Apr 2020 01:16:16 +0300 Subject: [PATCH 176/484] fix statless tests --- docker/test/stateless/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 4ec48ac3fd4..7fade12466d 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -73,7 +73,7 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ - if [ -n $USE_POLYMORPHIC_PARTS ] && [ $USE_POLYMORPHIC_PARTS -eq 1 ]; ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/; fi; \ + if [ -n $USE_POLYMORPHIC_PARTS ] && [ $USE_POLYMORPHIC_PARTS -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/; fi; \ ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml; \ service zookeeper start; sleep 5; \ service clickhouse-server start && sleep 5 && clickhouse-test --testname --shard --zookeeper $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt From 7bef5a6edbd232c45e2b7091147f2a201205e5e3 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 02:13:53 +0300 Subject: [PATCH 177/484] Update checkDataPart.cpp --- src/Storages/MergeTree/checkDataPart.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index a4cfa6b78b9..52d9a2750c7 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -116,7 +116,7 @@ IMergeTreeDataPart::Checksums checkDataPart( { const String & file_name = it->name(); auto checksum_it = checksums_data.files.find(file_name); - + /// Skip files that we already calculated. Also skip metadata files that are not checksummed. if (checksum_it == checksums_data.files.end() && file_name != "checksums.txt" && file_name != "columns.txt") { From b2fa9d875019419007c1749017fa2a7bc7704de2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 8 Apr 2020 02:02:16 +0300 Subject: [PATCH 178/484] Fix SIGSEGV on INSERT into Distributed on different struct with underlying --- .../Distributed/DistributedBlockOutputStream.cpp | 7 ++++++- ...967_insert_into_distributed_different_types.reference | 0 .../00967_insert_into_distributed_different_types.sql | 9 +++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference create mode 100644 tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index 2aba27dfc67..af17a026927 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -305,7 +305,9 @@ ThreadPool::Job DistributedBlockOutputStream::runWritingJob(DistributedBlockOutp job.local_context = std::make_unique(context); InterpreterInsertQuery interp(query_ast, *job.local_context); - job.stream = interp.execute().out; + auto block_io = interp.execute(); + assertBlocksHaveEqualStructure(block_io.out->getHeader(), shard_block, "flushing shard block for " + storage.getStorageID().getNameForLogs()); + job.stream = block_io.out; job.stream->writePrefix(); } @@ -544,6 +546,9 @@ void DistributedBlockOutputStream::writeToLocal(const Block & block, const size_ InterpreterInsertQuery interp(query_ast, context); auto block_io = interp.execute(); + + assertBlocksHaveEqualStructure(block_io.out->getHeader(), block, "flushing " + storage.getStorageID().getNameForLogs()); + block_io.out->writePrefix(); for (size_t i = 0; i < repeats; ++i) diff --git a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql new file mode 100644 index 00000000000..6b23c72981a --- /dev/null +++ b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS dist_00967; +DROP TABLE IF EXISTS underlying_00967; + +CREATE TABLE dist_00967 (key UInt64) Engine=Distributed('test_shard_localhost', currentDatabase(), underlying_00967); +-- fails for TinyLog()/MergeTree()/... but not for Memory() +CREATE TABLE underlying_00967 (key Nullable(UInt64)) Engine=TinyLog(); +INSERT INTO dist_00967 SELECT toUInt64(number) FROM system.numbers LIMIT 1; -- { serverError 171; } + +SELECT * FROM dist_00967; From 1b1661ab5028f6390c3a4a9fa4d5b1eee9de2eb1 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 02:47:58 +0300 Subject: [PATCH 179/484] Update performance_test.md --- docs/en/operations/performance_test.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index efe07bfe4c6..2af28147dbe 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -9,7 +9,7 @@ With this instruction you can run basic ClickHouse performance test on any serve 1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master -2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link. +2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. There is no such link in some commits, for example commits with documentation. In this case, choose the nearest commit having this link. 3. Copy the link to “clickhouse” binary for amd64 or aarch64. From 1b704425b6fce6af25aff753d8d0394ec0255587 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 03:42:47 +0300 Subject: [PATCH 180/484] Update StorageBuffer.cpp --- src/Storages/StorageBuffer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 9f5403fec07..ede162a180a 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -135,7 +135,7 @@ private: }; -QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const +QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage(const Context & context, QueryProcessingStage::Enum to_stage, const ASTPtr & query_ptr) const { if (destination_id) { From ebe9ae4fabe3af09b44e7a4b8cfea5c005d78b59 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 03:45:11 +0300 Subject: [PATCH 181/484] Update StorageMerge.cpp --- src/Storages/StorageMerge.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index abab85bd2b6..06c80a613c2 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -136,7 +136,7 @@ bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, cons } -QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context &context, QueryProcessingStage::Enum to_stage, const ASTPtr &query_ptr) const +QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & context, QueryProcessingStage::Enum to_stage, const ASTPtr & query_ptr) const { auto stage_in_source_tables = QueryProcessingStage::FetchColumns; From 37fe63f8d8c9fcd89192d04ca5dc51b09f64e5ee Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 03:49:09 +0300 Subject: [PATCH 182/484] Update S3Common.cpp --- src/IO/S3Common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 0d83a3df2b5..19eb2b42360 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -108,7 +108,7 @@ namespace S3 /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.Region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern("(.+\\.)?s3[.\\-][a-z0-9-.]+"); + static const RE2 virtual_hosted_style_pattern("(.+\\.)?s3[.\\-][a-z0-9\\-.]+"); /// Case when bucket name and key represented in path of S3 URL. /// E.g. (https://s3.Region.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access From f8500cf5d58546752d9e10c636832c1caccdf52d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Apr 2020 04:09:01 +0300 Subject: [PATCH 183/484] Fix unit test --- src/Common/parseGlobs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp index 31e7c2eb612..71ddbbd92ea 100644 --- a/src/Common/parseGlobs.cpp +++ b/src/Common/parseGlobs.cpp @@ -46,7 +46,7 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob char point; std::istringstream iss_range(buffer); iss_range >> range_begin >> point >> point >> range_end; - assert(iss_range.good()); + assert(!iss_range.fail()); bool leading_zeros = buffer[0] == '0'; size_t num_len = std::to_string(range_end).size(); if (leading_zeros) From 97c2d17e99a2ec9f59e9c1ff6b60e66d6f2b829c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Apr 2020 05:32:33 +0300 Subject: [PATCH 184/484] ThreadFuzzer: do not wrap pthread functions under thread and memory sanitizers --- src/Common/ThreadFuzzer.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 42e133b4561..60766e32361 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -8,6 +8,7 @@ #include +#include #include #include @@ -18,8 +19,15 @@ #include + /// We will also wrap some thread synchronization functions to inject sleep/migration before or after. -#if defined(OS_LINUX) +#if defined(OS_LINUX) && !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER) + #define THREAD_FUZZER_WRAP_PTHREAD 1 +#else + #define THREAD_FUZZER_WRAP_PTHREAD 0 +#endif + +#if THREAD_FUZZER_WRAP_PTHREAD # define FOR_EACH_WRAPPED_FUNCTION(M) \ M(int, pthread_mutex_lock, pthread_mutex_t * arg) \ M(int, pthread_mutex_unlock, pthread_mutex_t * arg) @@ -66,7 +74,7 @@ static void initFromEnv(std::atomic & what, const char * name) static std::atomic num_cpus = 0; -#if defined(OS_LINUX) +#if THREAD_FUZZER_WRAP_PTHREAD # define DEFINE_WRAPPER_PARAMS(RET, NAME, ...) \ static std::atomic NAME##_before_yield_probability = 0; \ static std::atomic NAME##_before_migrate_probability = 0; \ @@ -97,7 +105,7 @@ void ThreadFuzzer::initConfiguration() initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY"); initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US"); -#if defined(OS_LINUX) +#if THREAD_FUZZER_WRAP_PTHREAD # define INIT_WRAPPER_PARAMS(RET, NAME, ...) \ initFromEnv(NAME##_before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \ initFromEnv(NAME##_before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \ @@ -118,7 +126,7 @@ void ThreadFuzzer::initConfiguration() bool ThreadFuzzer::isEffective() const { -#if defined(OS_LINUX) +#if THREAD_FUZZER_WRAP_PTHREAD # define CHECK_WRAPPER_PARAMS(RET, NAME, ...) \ if (NAME##_before_yield_probability.load(std::memory_order_relaxed)) \ return true; \ @@ -236,7 +244,7 @@ void ThreadFuzzer::setup() /// We expect that for every function like pthread_mutex_lock there is the same function with two underscores prefix. /// NOTE We cannot use dlsym(... RTLD_NEXT), because it will call pthread_mutex_lock and it will lead to infinite recursion. -#if defined(OS_LINUX) +#if THREAD_FUZZER_WRAP_PTHREAD # define MAKE_WRAPPER(RET, NAME, ...) \ extern "C" RET __##NAME(__VA_ARGS__); /* NOLINT */ \ extern "C" RET NAME(__VA_ARGS__) /* NOLINT */ \ From a6194364ea6f0ec3613f83188adf8ba889e40ae4 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 05:54:08 +0300 Subject: [PATCH 185/484] Update MergeTreeDataSelectExecutor.cpp --- src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 13e852765b7..816af8db3e9 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1204,7 +1204,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( std::vector ranges_stack = { {0, marks_count} }; std::function create_field_ref; - /// If there is no monotonic functions, there is no need to save block reference. + /// If there are no monotonic functions, there is no need to save block reference. /// Passing explicit field to FieldRef allows to optimize ranges and shows better performance. if (key_condition.hasMonotonicFunctionsChain()) { From 94a621060d8af8607e3604e7c65c7e0b718c8182 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 05:55:03 +0300 Subject: [PATCH 186/484] Update KeyCondition.h --- src/Storages/MergeTree/KeyCondition.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 7c8b63eb800..4863ffa6f34 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -22,7 +22,7 @@ using FunctionBasePtr = std::shared_ptr; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; -/** A field, that can be stored in two reperesenation: +/** A field, that can be stored in two reperesenations: * - A standalone field. * - A field with reference to it's position in block. * It's needed for execution functions on ranges during From 723a1f41e2516f5d17d03b7240accc3a8f07e4a5 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 05:55:22 +0300 Subject: [PATCH 187/484] Update KeyCondition.h --- src/Storages/MergeTree/KeyCondition.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 4863ffa6f34..db0f9e68c60 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -24,7 +24,7 @@ using ExpressionActionsPtr = std::shared_ptr; /** A field, that can be stored in two reperesenations: * - A standalone field. - * - A field with reference to it's position in block. + * - A field with reference to its position in block. * It's needed for execution functions on ranges during * index analysis. If function was executed once for field, * it's result would be cached for all block for which field's reference points to. From a42d875a68ae50358095a63355c323a35b199a48 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 05:55:39 +0300 Subject: [PATCH 188/484] Update KeyCondition.h --- src/Storages/MergeTree/KeyCondition.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index db0f9e68c60..4c67ef258bf 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -27,7 +27,7 @@ using ExpressionActionsPtr = std::shared_ptr; * - A field with reference to its position in block. * It's needed for execution functions on ranges during * index analysis. If function was executed once for field, - * it's result would be cached for all block for which field's reference points to. + * its result would be cached for all block for which field's reference points to. */ struct FieldRef : public Field { From a46a61c970d40845fa49115eaa8c7f53d93651bf Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 05:56:25 +0300 Subject: [PATCH 189/484] Update KeyCondition.h --- src/Storages/MergeTree/KeyCondition.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 4c67ef258bf..3a3768f0e4c 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -24,10 +24,10 @@ using ExpressionActionsPtr = std::shared_ptr; /** A field, that can be stored in two reperesenations: * - A standalone field. - * - A field with reference to its position in block. - * It's needed for execution functions on ranges during + * - A field with reference to its position in a block. + * It's needed for execution of functions on ranges during * index analysis. If function was executed once for field, - * its result would be cached for all block for which field's reference points to. + * its result would be cached for whole block for which field's reference points to. */ struct FieldRef : public Field { From 0b5cc8058094c4189ccc63c32c909c4096fadb42 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Tue, 7 Apr 2020 22:52:32 +0800 Subject: [PATCH 190/484] joinGetOrNull --- src/Functions/FunctionJoinGet.cpp | 33 +++++++++++++------ src/Functions/FunctionJoinGet.h | 9 +++-- src/Interpreters/HashJoin.cpp | 33 ++++++++++++------- src/Interpreters/HashJoin.h | 6 ++-- .../01240_join_get_or_null.reference | 1 + .../0_stateless/01240_join_get_or_null.sql | 7 ++++ 6 files changed, 62 insertions(+), 27 deletions(-) create mode 100644 tests/queries/0_stateless/01240_join_get_or_null.reference create mode 100644 tests/queries/0_stateless/01240_join_get_or_null.sql diff --git a/src/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp index 1745343cc33..3e18d657ac0 100644 --- a/src/Functions/FunctionJoinGet.cpp +++ b/src/Functions/FunctionJoinGet.cpp @@ -60,7 +60,8 @@ static auto getJoin(const ColumnsWithTypeAndName & arguments, const Context & co return std::make_pair(storage_join, attr_name); } -FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const +template +FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const { auto [storage_join, attr_name] = getJoin(arguments, context); auto join = storage_join->getJoin(); @@ -70,40 +71,52 @@ FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName for (size_t i = 0; i < arguments.size(); ++i) data_types[i] = arguments[i].type; - auto return_type = join->joinGetReturnType(attr_name); - return std::make_unique(table_lock, storage_join, join, attr_name, data_types, return_type); + auto return_type = join->joinGetReturnType(attr_name, or_null); + return std::make_unique>(table_lock, storage_join, join, attr_name, data_types, return_type); } -DataTypePtr JoinGetOverloadResolver::getReturnType(const ColumnsWithTypeAndName & arguments) const +template +DataTypePtr JoinGetOverloadResolver::getReturnType(const ColumnsWithTypeAndName & arguments) const { auto [storage_join, attr_name] = getJoin(arguments, context); auto join = storage_join->getJoin(); - return join->joinGetReturnType(attr_name); + return join->joinGetReturnType(attr_name, or_null); } -void ExecutableFunctionJoinGet::execute(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) +template +void ExecutableFunctionJoinGet::execute(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) { auto ctn = block.getByPosition(arguments[2]); if (isColumnConst(*ctn.column)) ctn.column = ctn.column->cloneResized(1); ctn.name = ""; // make sure the key name never collide with the join columns Block key_block = {ctn}; - join->joinGet(key_block, attr_name); + join->joinGet(key_block, attr_name, or_null); auto & result_ctn = key_block.getByPosition(1); if (isColumnConst(*ctn.column)) result_ctn.column = ColumnConst::create(result_ctn.column, input_rows_count); block.getByPosition(result) = result_ctn; } -ExecutableFunctionImplPtr FunctionJoinGet::prepare(const Block &, const ColumnNumbers &, size_t) const +template +ExecutableFunctionImplPtr FunctionJoinGet::prepare(const Block &, const ColumnNumbers &, size_t) const { - return std::make_unique(join, attr_name); + return std::make_unique>(join, attr_name); } void registerFunctionJoinGet(FunctionFactory & factory) { - factory.registerFunction(); + // joinGet + factory.registerFunction>(); + // joinGetOrNull + factory.registerFunction>(); } +template class ExecutableFunctionJoinGet; +template class ExecutableFunctionJoinGet; +template class FunctionJoinGet; +template class FunctionJoinGet; +template class JoinGetOverloadResolver; +template class JoinGetOverloadResolver; } diff --git a/src/Functions/FunctionJoinGet.h b/src/Functions/FunctionJoinGet.h index 42ff2b16217..f233ccd8a4f 100644 --- a/src/Functions/FunctionJoinGet.h +++ b/src/Functions/FunctionJoinGet.h @@ -9,13 +9,14 @@ class Context; class HashJoin; using HashJoinPtr = std::shared_ptr; +template class ExecutableFunctionJoinGet final : public IExecutableFunctionImpl { public: ExecutableFunctionJoinGet(HashJoinPtr join_, String attr_name_) : join(std::move(join_)), attr_name(std::move(attr_name_)) {} - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } @@ -30,10 +31,11 @@ private: const String attr_name; }; +template class FunctionJoinGet final : public IFunctionBaseImpl { public: - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; FunctionJoinGet(TableStructureReadLockHolder table_lock_, StoragePtr storage_join_, HashJoinPtr join_, String attr_name_, @@ -63,10 +65,11 @@ private: DataTypePtr return_type; }; +template class JoinGetOverloadResolver final : public IFunctionOverloadResolverImpl { public: - static constexpr auto name = "joinGet"; + static constexpr auto name = or_null ? "joinGetOrNull" : "joinGet"; static FunctionOverloadResolverImplPtr create(const Context & context) { return std::make_unique(context); } explicit JoinGetOverloadResolver(const Context & context_) : context(context_) {} diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index a3432ebebba..d8c0d239c96 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -681,12 +681,10 @@ public: type_name.reserve(num_columns_to_add); right_indexes.reserve(num_columns_to_add); - for (size_t i = 0; i < num_columns_to_add; ++i) + for (auto & src_column : block_with_columns_to_add) { - const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.safeGetByPosition(i); - - /// Don't insert column if it's in left block or not explicitly required. - if (!block.has(src_column.name) && block_with_columns_to_add.has(src_column.name)) + /// Don't insert column if it's in left block + if (!block.has(src_column.name)) addColumn(src_column); } @@ -1158,28 +1156,36 @@ static void checkTypeOfKey(const Block & block_left, const Block & block_right) } -DataTypePtr HashJoin::joinGetReturnType(const String & column_name) const +DataTypePtr HashJoin::joinGetReturnType(const String & column_name, bool or_null) const { std::shared_lock lock(data->rwlock); if (!sample_block_with_columns_to_add.has(column_name)) throw Exception("StorageJoin doesn't contain column " + column_name, ErrorCodes::LOGICAL_ERROR); - return sample_block_with_columns_to_add.getByName(column_name).type; + auto ctn = sample_block_with_columns_to_add.getByName(column_name); + if (or_null) + { + if (!ctn.type->canBeInsideNullable()) + throw Exception("Type " + ctn.type->getName() + "cannot be inside Nullable", ErrorCodes::LOGICAL_ERROR); + else + ctn.type = makeNullable(ctn.type); + } + return ctn.type; } template -void HashJoin::joinGetImpl(Block & block, const String & column_name, const Maps & maps_) const +void HashJoin::joinGetImpl(Block & block, const Block & block_with_columns_to_add, const Maps & maps_) const { joinBlockImpl( - block, {block.getByPosition(0).name}, {sample_block_with_columns_to_add.getByName(column_name)}, maps_); + block, {block.getByPosition(0).name}, block_with_columns_to_add, maps_); } // TODO: support composite key // TODO: return multiple columns as named tuple // TODO: return array of values when strictness == ASTTableJoin::Strictness::All -void HashJoin::joinGet(Block & block, const String & column_name) const +void HashJoin::joinGet(Block & block, const String & column_name, bool or_null) const { std::shared_lock lock(data->rwlock); @@ -1188,10 +1194,15 @@ void HashJoin::joinGet(Block & block, const String & column_name) const checkTypeOfKey(block, right_table_keys); + auto ctn = sample_block_with_columns_to_add.getByName(column_name); + if (or_null) + ctn.type = makeNullable(ctn.type); + ctn.column = ctn.type->createColumn(); + if ((strictness == ASTTableJoin::Strictness::Any || strictness == ASTTableJoin::Strictness::RightAny) && kind == ASTTableJoin::Kind::Left) { - joinGetImpl(block, column_name, std::get(data->maps)); + joinGetImpl(block, {ctn}, std::get(data->maps)); } else throw Exception("joinGet only supports StorageJoin of type Left Any", ErrorCodes::LOGICAL_ERROR); diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h index 24ad2b871c9..b769cfc61c5 100644 --- a/src/Interpreters/HashJoin.h +++ b/src/Interpreters/HashJoin.h @@ -161,10 +161,10 @@ public: void joinBlock(Block & block, ExtraBlockPtr & not_processed) override; /// Infer the return type for joinGet function - DataTypePtr joinGetReturnType(const String & column_name) const; + DataTypePtr joinGetReturnType(const String & column_name, bool or_null) const; /// Used by joinGet function that turns StorageJoin into a dictionary - void joinGet(Block & block, const String & column_name) const; + void joinGet(Block & block, const String & column_name, bool or_null) const; /** Keep "totals" (separate part of dataset, see WITH TOTALS) to use later. */ @@ -382,7 +382,7 @@ private: void joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const; template - void joinGetImpl(Block & block, const String & column_name, const Maps & maps) const; + void joinGetImpl(Block & block, const Block & block_with_columns_to_add, const Maps & maps_) const; static Type chooseMethod(const ColumnRawPtrs & key_columns, Sizes & key_sizes); }; diff --git a/tests/queries/0_stateless/01240_join_get_or_null.reference b/tests/queries/0_stateless/01240_join_get_or_null.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/01240_join_get_or_null.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/01240_join_get_or_null.sql b/tests/queries/0_stateless/01240_join_get_or_null.sql new file mode 100644 index 00000000000..d1b9a07540a --- /dev/null +++ b/tests/queries/0_stateless/01240_join_get_or_null.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS join_test; + +CREATE TABLE join_test (id UInt16, num UInt16) engine = Join(ANY, LEFT, id); + +SELECT joinGetOrNull('join_test', 'num', 500); + +DROP TABLE join_test; From 3116b2d29b4008bd4a9784816986c9c35b03981e Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 4 Apr 2020 02:05:16 +0300 Subject: [PATCH 191/484] Make SHOW CREATE TABLE multiline (because it more readable and also mysql like) --- src/Interpreters/InterpreterShowCreateQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 8bee0b88fe8..4161b3500bd 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -73,7 +73,7 @@ BlockInputStreamPtr InterpreterShowCreateQuery::executeImpl() throw Exception("Unable to show the create query of " + show_query->table + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); std::stringstream stream; - formatAST(*create_query, stream, false, true); + formatAST(*create_query, stream, false, false); String res = stream.str(); MutableColumnPtr column = ColumnString::create(); From e2beecb8bb6af65d85fe79cc2b1e6f10b857dcb1 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2020 07:43:42 +0000 Subject: [PATCH 192/484] Bump pymdown-extensions from 6.3 to 7.0 in /docs/tools Bumps [pymdown-extensions](https://github.com/facelessuser/pymdown-extensions) from 6.3 to 7.0. - [Release notes](https://github.com/facelessuser/pymdown-extensions/releases) - [Commits](https://github.com/facelessuser/pymdown-extensions/compare/6.3.0...7.0.0) Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 7c0d63129f4..587bcabb8fb 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -25,7 +25,7 @@ nose==1.3.7 protobuf==3.11.3 numpy==1.18.2 Pygments==2.5.2 -pymdown-extensions==6.3 +pymdown-extensions==7.0 python-slugify==1.2.6 PyYAML==5.3.1 repackage==0.7.3 From ed25ac8b12bbd0a5fb9a4551bda52cd8e27a56d6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 8 Apr 2020 10:43:10 +0300 Subject: [PATCH 193/484] Rewrite access_control_path in path.xml Follow-up for: #9811 --- programs/server/config.d/path.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/server/config.d/path.xml b/programs/server/config.d/path.xml index 14b7deb9de0..8db1d18e8c7 100644 --- a/programs/server/config.d/path.xml +++ b/programs/server/config.d/path.xml @@ -3,4 +3,5 @@ ./tmp/ ./user_files/ ./format_schemas/ + ./access/ From 9b1c5c8b51bcdc636bff338b4834ecdee476e183 Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Wed, 8 Apr 2020 11:44:26 +0300 Subject: [PATCH 194/484] Fix regex for S3 URI to pass clang-tidy check. --- src/IO/S3Common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 19eb2b42360..3aca41a9c9a 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -108,7 +108,7 @@ namespace S3 /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.Region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern("(.+\\.)?s3[.\\-][a-z0-9\\-.]+"); + static const RE2 virtual_hosted_style_pattern(R"((.+\.)?s3[.\-][a-z0-9\-.]+)"); /// Case when bucket name and key represented in path of S3 URL. /// E.g. (https://s3.Region.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access From 623f29f9f21465af69eb2321da7f4b4b9c362146 Mon Sep 17 00:00:00 2001 From: Vadim Date: Wed, 8 Apr 2020 13:48:11 +0500 Subject: [PATCH 195/484] Update playground.md (#10118) fix grammar --- docs/ru/getting_started/playground.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/getting_started/playground.md b/docs/ru/getting_started/playground.md index b0b4a2de43a..3ddd066b2ed 100644 --- a/docs/ru/getting_started/playground.md +++ b/docs/ru/getting_started/playground.md @@ -5,7 +5,7 @@ ClickHouse Playground позволяет моментально выполнит Запросы выполняются под пользователем с правами `readonly` для которого есть следующие ограничения: - запрещены DDL запросы -- запроещены INSERT запросы +- запрещены INSERT запросы Также установлены следующие опции: - [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) From f6f4fb0000a99c6f7b70a8d2baf921beed612cba Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Wed, 8 Apr 2020 12:09:59 +0300 Subject: [PATCH 196/484] Documentation issues fixes (#10112) --- docs/en/faq/general.md | 4 +- docs/en/getting_started/tutorial.md | 2 +- docs/en/interfaces/formats.md | 4 +- docs/en/operations/settings/settings.md | 2 +- docs/en/operations/system_tables.md | 2 +- .../operations/utilities/clickhouse-local.md | 2 +- .../sql_reference/functions/url_functions.md | 3 +- docs/fa/sql_reference/table_functions/jdbc.md | 1 - docs/tools/build.py | 43 ++-------------- docs/tools/nav.py | 50 +++++++++++++++++++ docs/tools/translate/filter.py | 5 +- docs/tools/translate/split_meta.py | 3 ++ 12 files changed, 70 insertions(+), 51 deletions(-) create mode 100644 docs/tools/nav.py diff --git a/docs/en/faq/general.md b/docs/en/faq/general.md index 5d6fe1e7014..a382daf3f0e 100644 --- a/docs/en/faq/general.md +++ b/docs/en/faq/general.md @@ -27,7 +27,7 @@ NLS_LANG=RUSSIAN_RUSSIA.UTF8 ### Using INTO OUTFILE Clause {#using-into-outfile-clause} -Add an [INTO OUTFILE](../query_language/select/#into-outfile-clause) clause to your query. +Add an [INTO OUTFILE](../sql_reference/statements/select.md#into-outfile-clause) clause to your query. For example: @@ -35,7 +35,7 @@ For example: SELECT * FROM table INTO OUTFILE 'file' ``` -By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../query_language/select/#format-clause). +By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../sql_reference/statements/select.md#format-clause). For example: diff --git a/docs/en/getting_started/tutorial.md b/docs/en/getting_started/tutorial.md index 60a912f253a..08cca45d21d 100644 --- a/docs/en/getting_started/tutorial.md +++ b/docs/en/getting_started/tutorial.md @@ -484,7 +484,7 @@ FORMAT TSV max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." ``` -Optionally you can [OPTIMIZE](../query_language/misc/#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later: +Optionally you can [OPTIMIZE](../sql_reference/statements/misc.md#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later: ``` bash clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 05cffd40cf5..aadd229490f 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1028,7 +1028,7 @@ The root schema of input Avro file must be of `record` type. To find the correspondence between table columns and fields of Avro schema ClickHouse compares their names. This comparison is case-sensitive. Unused fields are skipped. -Data types of ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to corresponding column type. +Data types of ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) the data to corresponding column type. ### Selecting Data {#selecting-data-1} @@ -1170,7 +1170,7 @@ ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` q Unsupported ORC data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. +The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. ### Inserting Data {#inserting-data-2} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index ad8153b7e7e..69c444ebaef 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -134,7 +134,7 @@ Default value: 0. ## max\_http\_get\_redirects {#setting-max_http_get_redirects} -Limits the maximum number of HTTP GET redirect hops for [URL](../../engines/table_engines/special/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../query_language/create/#create-table-query) query and by the [url](../../sql_reference/table_functions/url.md) table function. +Limits the maximum number of HTTP GET redirect hops for [URL](../../engines/table_engines/special/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../sql_reference/statements/create.md#create-table-query) query and by the [url](../../sql_reference/table_functions/url.md) table function. Possible values: diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 4ef7eda5a94..1c7d9546ff8 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -145,7 +145,7 @@ This system table is used for implementing the `SHOW DATABASES` query. ## system.detached\_parts {#system_tables-detached_parts} -Contains information about detached parts of [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). +Contains information about detached parts of [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../sql_reference/statements/alter.md#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../sql_reference/statements/alter.md#alter_drop-detached). ## system.dictionaries {#system-dictionaries} diff --git a/docs/es/operations/utilities/clickhouse-local.md b/docs/es/operations/utilities/clickhouse-local.md index c469afbb770..d96eda80048 100644 --- a/docs/es/operations/utilities/clickhouse-local.md +++ b/docs/es/operations/utilities/clickhouse-local.md @@ -2,7 +2,7 @@ machine_translated: true machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa toc_priority: 60 -toc_title: Sistema abierto. +toc_title: Sistema abierto --- # Sistema abierto. {#clickhouse-local} diff --git a/docs/fa/sql_reference/functions/url_functions.md b/docs/fa/sql_reference/functions/url_functions.md index 61e3b5dddf3..d2a4e5589c6 100644 --- a/docs/fa/sql_reference/functions/url_functions.md +++ b/docs/fa/sql_reference/functions/url_functions.md @@ -2,8 +2,7 @@ machine_translated: true machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 toc_priority: 54 -toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0646\u0634\u0627\u0646\u06CC\u0647\u0627\ - \u06CC \u0627\u06CC\u0646\u062A\u0631\u0646\u062A\u06CC" +toc_title: "\u06A9\u0627\u0631 \u0628\u0627 \u0646\u0634\u0627\u0646\u06CC\u0647\u0627\ \u06CC \u0627\u06CC\u0646\u062A\u0631\u0646\u062A\u06CC" --- # توابع برای کار با نشانیهای اینترنتی {#functions-for-working-with-urls} diff --git a/docs/fa/sql_reference/table_functions/jdbc.md b/docs/fa/sql_reference/table_functions/jdbc.md index 4fd500df33c..766d90fa5f3 100644 --- a/docs/fa/sql_reference/table_functions/jdbc.md +++ b/docs/fa/sql_reference/table_functions/jdbc.md @@ -2,7 +2,6 @@ machine_translated: true machine_translated_rev: d734a8e46ddd7465886ba4133bff743c55190626 toc_priority: 43 -toc_title: "\u062C\u0633\u062A\u062C\u0648" --- # جستجو {#table-function-jdbc} diff --git a/docs/tools/build.py b/docs/tools/build.py index cf0bfb23d86..65b9f9f8c04 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import argparse -import collections import datetime import logging import os @@ -22,6 +21,7 @@ from mkdocs.commands import build as mkdocs_build from concatenate import concatenate import mdx_clickhouse +import nav import test import util import website @@ -41,39 +41,6 @@ class ClickHouseMarkdown(markdown.extensions.Extension): markdown.extensions.ClickHouseMarkdown = ClickHouseMarkdown -def build_nav_entry(root): - if root.endswith('images'): - return None, None, None - result_items = [] - index_meta, _ = util.read_md_file(os.path.join(root, 'index.md')) - current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', 'hidden')) - for filename in os.listdir(root): - path = os.path.join(root, filename) - if os.path.isdir(path): - prio, title, payload = build_nav_entry(path) - if title and payload: - result_items.append((prio, title, payload)) - elif filename.endswith('.md'): - path = os.path.join(root, filename) - meta, _ = util.read_md_file(path) - path = path.split('/', 2)[-1] - title = meta.get('toc_title', 'hidden') - prio = meta.get('toc_priority', 9999) - result_items.append((prio, title, path)) - result_items = sorted(result_items, key=lambda x: (x[0], x[1])) - result = collections.OrderedDict([(item[1], item[2]) for item in result_items]) - return index_meta.get('toc_priority', 10000), current_title, result - - -def build_nav(lang, args): - docs_dir = os.path.join(args.docs_dir, lang) - _, _, nav = build_nav_entry(docs_dir) - result = [] - for key, value in nav.items(): - result.append({key: value}) - return result - - def build_for_lang(lang, args): logging.info(f'Building {lang} docs') os.environ['SINGLE_PAGE'] = '0' @@ -120,10 +87,10 @@ def build_for_lang(lang, args): site_names = { 'en': 'ClickHouse %s Documentation', + 'zh': 'ClickHouse文档 %s', 'es': 'Documentación de ClickHouse %s', 'fr': 'Documentation ClickHouse %s', 'ru': 'Документация ClickHouse %s', - 'zh': 'ClickHouse文档 %s', 'ja': 'ClickHouseドキュメント %s', 'fa': 'مستندات %sClickHouse' } @@ -185,11 +152,9 @@ def build_for_lang(lang, args): ) if os.path.exists(config_path): - nav = None raw_config['config_file'] = config_path else: - nav = build_nav(lang, args) - raw_config['nav'] = nav + raw_config['nav'] = nav.build_nav(lang, args) cfg = config.load_config(**raw_config) @@ -202,7 +167,7 @@ def build_for_lang(lang, args): mkdocs_build.build(cfg) if not args.skip_single_page: - build_single_page_version(lang, args, nav, cfg) + build_single_page_version(lang, args, raw_config.get('nav'), cfg) mdx_clickhouse.PatchedMacrosPlugin.disabled = False diff --git a/docs/tools/nav.py b/docs/tools/nav.py new file mode 100644 index 00000000000..2d99d4df3fe --- /dev/null +++ b/docs/tools/nav.py @@ -0,0 +1,50 @@ +import collections +import logging +import os + +import util + + +def find_first_header(content): + for line in content.split('\n'): + if line.startswith('#'): + no_hash = line.lstrip('#') + return no_hash.split('{', 1)[0].strip() + + +def build_nav_entry(root): + if root.endswith('images'): + return None, None, None + result_items = [] + index_meta, _ = util.read_md_file(os.path.join(root, 'index.md')) + current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', 'hidden')) + for filename in os.listdir(root): + path = os.path.join(root, filename) + if os.path.isdir(path): + prio, title, payload = build_nav_entry(path) + if title and payload: + result_items.append((prio, title, payload)) + elif filename.endswith('.md'): + path = os.path.join(root, filename) + meta, content = util.read_md_file(path) + path = path.split('/', 2)[-1] + title = meta.get('toc_title', find_first_header(content)) + if title: + title = title.strip().rstrip('.') + else: + title = meta.get('toc_folder_title', 'hidden') + prio = meta.get('toc_priority', 9999) + logging.debug(f'Nav entry: {prio}, {title}, {path}') + result_items.append((prio, title, path)) + result_items = sorted(result_items, key=lambda x: (x[0], x[1])) + result = collections.OrderedDict([(item[1], item[2]) for item in result_items]) + return index_meta.get('toc_priority', 10000), current_title, result + + +def build_nav(lang, args): + docs_dir = os.path.join(args.docs_dir, lang) + _, _, nav = build_nav_entry(docs_dir) + result = [] + for key, value in nav.items(): + result.append({key: value}) + return result diff --git a/docs/tools/translate/filter.py b/docs/tools/translate/filter.py index 2e0b624f398..4376e7dc7f9 100755 --- a/docs/tools/translate/filter.py +++ b/docs/tools/translate/filter.py @@ -44,7 +44,10 @@ def process_buffer(buffer, new_value, item=None, is_header=False): if text.endswith(' ') and not translated_text.endswith(' '): translated_text = translated_text + ' ' - title_case = False # is_header and translate.default_target_language == 'en' and text[0].isupper() + if is_header and translated_text.endswith('.'): + translated_text = translated_text.rstrip('.') + + title_case = False # is_header and translate.default_target_language == 'en' and text[0].isupper() title_case_whitelist = {'a', 'an', 'the', 'and', 'or'} for token in translated_text.split(' '): if title_case and not token.isupper(): diff --git a/docs/tools/translate/split_meta.py b/docs/tools/translate/split_meta.py index c76d3391082..b38b93e10b4 100755 --- a/docs/tools/translate/split_meta.py +++ b/docs/tools/translate/split_meta.py @@ -23,6 +23,9 @@ if __name__ == '__main__': title = meta.get('toc_title') if title: meta['toc_title'] = translate.translate(title, target_language) + folder_title = meta.get('toc_folder_title') + if folder_title: + meta['toc_folder_title'] = translate.translate(folder_title, target_language) if 'en_copy' in meta: del meta['en_copy'] From ded306f0168870a0792f48da8f991b5f7ba9458b Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 8 Apr 2020 12:51:04 +0300 Subject: [PATCH 197/484] Fix adaptive granularity compatibility --- src/Storages/StorageReplicatedMergeTree.cpp | 9 +++- .../__init__.py | 0 .../test.py | 49 +++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_adaptive_granularity_different_settings/__init__.py create mode 100644 tests/integration/test_adaptive_granularity_different_settings/test.py diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 31456c8d1f1..8ce65aca3e0 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -246,6 +246,11 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( createTableIfNotExists(); + /// We have to check granularity on other replicas. It it's fixed we + /// must create our new replica with fixed granularity and store this + /// information in /replica/metadata. + other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); + checkTableStructure(zookeeper_path); Coordination::Stat metadata_stat; @@ -256,11 +261,14 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } else { + /// In old tables this node may missing or be empty String replica_metadata; bool replica_metadata_exists = current_zookeeper->tryGet(replica_path + "/metadata", replica_metadata); if (!replica_metadata_exists || replica_metadata.empty()) { + /// We have to check shared node granularity before we create ours. + other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); ReplicatedMergeTreeTableMetadata current_metadata(*this); current_zookeeper->createOrUpdate(replica_path + "/metadata", current_metadata.toString(), zkutil::CreateMode::Persistent); } @@ -291,7 +299,6 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( createNewZooKeeperNodes(); - other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); } diff --git a/tests/integration/test_adaptive_granularity_different_settings/__init__.py b/tests/integration/test_adaptive_granularity_different_settings/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_adaptive_granularity_different_settings/test.py b/tests/integration/test_adaptive_granularity_different_settings/test.py new file mode 100644 index 00000000000..b066c437e06 --- /dev/null +++ b/tests/integration/test_adaptive_granularity_different_settings/test.py @@ -0,0 +1,49 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1', with_zookeeper=True) +node2 = cluster.add_instance('node2', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_attach_detach(start_cluster): + + node1.query(""" + CREATE TABLE test (key UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/test', '1') + ORDER BY tuple() + SETTINGS index_granularity_bytes = 0""") + + node1.query("INSERT INTO test VALUES (1), (2)") + + node2.query(""" + CREATE TABLE test (key UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/test', '2') + ORDER BY tuple()""") + + node2.query("INSERT INTO test VALUES (3), (4)") + + node1.query("SYSTEM SYNC REPLICA test") + node2.query("SYSTEM SYNC REPLICA test") + + assert node1.query("SELECT COUNT() FROM test") == "4\n" + assert node2.query("SELECT COUNT() FROM test") == "4\n" + + node1.query("DETACH TABLE test") + node2.query("DETACH TABLE test") + + node1.query("ATTACH TABLE test") + node2.query("ATTACH TABLE test") + + assert node1.query("SELECT COUNT() FROM test") == "4\n" + assert node2.query("SELECT COUNT() FROM test") == "4\n" From cf14718588b94fa722fb2a89463589759044f3e1 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Wed, 8 Apr 2020 12:57:44 +0300 Subject: [PATCH 198/484] Update tests.md --- docs/en/development/tests.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 10cd3163a6c..2507940791d 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -247,4 +247,3 @@ We don’t use Travis CI due to the limit on time and computational power. We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins. [Original article](https://clickhouse.tech/docs/en/development/tests/) -velopment/tests/) From 3603c98c819857fd3b88faa403474bec32daff99 Mon Sep 17 00:00:00 2001 From: Pavel Kovalenko Date: Wed, 8 Apr 2020 14:59:24 +0300 Subject: [PATCH 199/484] Move gtest for S3 URI to proper folder. --- {dbms/src => src}/IO/tests/gtest_s3_uri.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {dbms/src => src}/IO/tests/gtest_s3_uri.cpp (100%) diff --git a/dbms/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp similarity index 100% rename from dbms/src/IO/tests/gtest_s3_uri.cpp rename to src/IO/tests/gtest_s3_uri.cpp From f1cb928737f137ecc747e7e53bafe71391ce3b3b Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 8 Apr 2020 13:27:46 +0800 Subject: [PATCH 200/484] more monotonicity for toString() --- src/Functions/FunctionHelpers.h | 6 ++++++ src/Functions/FunctionsConversion.h | 8 +++++--- .../01234_to_string_monotonic.reference | 2 ++ .../0_stateless/01234_to_string_monotonic.sql | 14 ++++++++++++++ 4 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01234_to_string_monotonic.reference create mode 100644 tests/queries/0_stateless/01234_to_string_monotonic.sql diff --git a/src/Functions/FunctionHelpers.h b/src/Functions/FunctionHelpers.h index 00957935448..34aa0add6e1 100644 --- a/src/Functions/FunctionHelpers.h +++ b/src/Functions/FunctionHelpers.h @@ -24,6 +24,12 @@ const Type * checkAndGetDataType(const IDataType * data_type) return typeid_cast(data_type); } +template +bool checkDataTypes(const IDataType * data_type) +{ + return (... || typeid_cast(data_type)); +} + template const ColumnConst * checkAndGetColumnConst(const IColumn * column) { diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 100737b43c7..d201b967fb1 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1496,10 +1496,12 @@ struct ToStringMonotonicity IFunction::Monotonicity positive(true, true); IFunction::Monotonicity not_monotonic; - /// `toString` function is monotonous if the argument is Date or DateTime, or non-negative numbers with the same number of symbols. + auto type_ptr = &type; + if (auto * low_cardinality_type = checkAndGetDataType(type_ptr)) + type_ptr = low_cardinality_type->getDictionaryType().get(); - if (checkAndGetDataType(&type) - || typeid_cast(&type)) + /// `toString` function is monotonous if the argument is Date or DateTime or String, or non-negative numbers with the same number of symbols. + if (checkDataTypes(type_ptr)) return positive; if (left.isNull() || right.isNull()) diff --git a/tests/queries/0_stateless/01234_to_string_monotonic.reference b/tests/queries/0_stateless/01234_to_string_monotonic.reference new file mode 100644 index 00000000000..75404a347a4 --- /dev/null +++ b/tests/queries/0_stateless/01234_to_string_monotonic.reference @@ -0,0 +1,2 @@ +1234 +1234 diff --git a/tests/queries/0_stateless/01234_to_string_monotonic.sql b/tests/queries/0_stateless/01234_to_string_monotonic.sql new file mode 100644 index 00000000000..87324fdda27 --- /dev/null +++ b/tests/queries/0_stateless/01234_to_string_monotonic.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test1 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +CREATE TABLE test2 (s LowCardinality(String)) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; + +INSERT INTO test1 SELECT toString(number) FROM numbers(10000); +INSERT INTO test2 SELECT toString(number) FROM numbers(10000); + +SELECT s FROM test1 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; +SELECT s FROM test2 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; + +DROP TABLE test1; +DROP TABLE test2; From b3b0731c36653111616c94d1ad265dbcdc0b3242 Mon Sep 17 00:00:00 2001 From: BohuTANG <172204+BohuTANG@users.noreply.github.com> Date: Wed, 8 Apr 2020 21:21:55 +0800 Subject: [PATCH 201/484] Documentation: change testsies to queries in tests.md (#10122) --- docs/en/development/tests.md | 6 +++--- docs/es/development/tests.md | 6 +++--- docs/fa/development/tests.md | 6 +++--- docs/fr/development/tests.md | 6 +++--- docs/ja/development/tests.md | 6 +++--- docs/ru/development/tests.md | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 2507940791d..d1ff946959e 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -11,7 +11,7 @@ Functional tests are the most simple and convenient to use. Most of ClickHouse f Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. -Tests are located in `testsies` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. +Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. @@ -19,7 +19,7 @@ To run all tests, use `testskhouse-test` tool. Look `--help` for the list of pos The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. -To add new test, create a `.sql` or `.sh` file in `testsies/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. @@ -34,7 +34,7 @@ disable these groups of tests using `--no-zookeeper`, `--no-shard` and ## Known bugs {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `testsies/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. +If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `queries/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. ## Integration Tests {#integration-tests} diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md index 2d1996e0535..388931e9436 100644 --- a/docs/es/development/tests.md +++ b/docs/es/development/tests.md @@ -13,7 +13,7 @@ Las pruebas funcionales son las más simples y cómodas de usar. La mayoría de Cada prueba funcional envía una o varias consultas al servidor ClickHouse en ejecución y compara el resultado con la referencia. -Las pruebas se encuentran en `testsies` directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. +Las pruebas se encuentran en `queries` directorio. Hay dos subdirectorios: `stateless` y `stateful`. Las pruebas sin estado ejecutan consultas sin datos de prueba precargados: a menudo crean pequeños conjuntos de datos sintéticos sobre la marcha, dentro de la prueba misma. Las pruebas estatales requieren datos de prueba precargados de Yandex.Métrica y no está disponible para el público en general. Tendemos a usar sólo `stateless` pruebas y evitar la adición de nuevos `stateful` prueba. Cada prueba puede ser de dos tipos: `.sql` y `.sh`. `.sql` test es el script SQL simple que se canaliza a `clickhouse-client --multiquery --testmode`. `.sh` test es un script que se ejecuta por sí mismo. @@ -21,7 +21,7 @@ Para ejecutar todas las pruebas, use `testskhouse-test` herramienta. Mira `--hel La forma más sencilla de invocar pruebas funcionales es copiar `clickhouse-client` a `/usr/bin/`, ejecutar `clickhouse-server` y luego ejecutar `./clickhouse-test` de su propio directorio. -Para agregar una nueva prueba, cree un `.sql` o `.sh` archivo en `testsies/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. +Para agregar una nueva prueba, cree un `.sql` o `.sh` archivo en `queries/0_stateless` directorio, compruébelo manualmente y luego genere `.reference` archivo de la siguiente manera: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` o `./00000_test.sh > ./00000_test.reference`. Las pruebas deben usar (crear, soltar, etc.) solo tablas en `test` base de datos que se supone que se crea de antemano; también las pruebas pueden usar tablas temporales. @@ -36,7 +36,7 @@ deshabilitar estos grupos de pruebas utilizando `--no-zookeeper`, `--no-shard` y ## Bugs conocidos {#known-bugs} -Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `testsies/bugs` directorio. Estas pruebas se moverán a `teststests_stateless` cuando se corrigen errores. +Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `queries/bugs` directorio. Estas pruebas se moverán a `teststests_stateless` cuando se corrigen errores. ## Pruebas de integración {#integration-tests} diff --git a/docs/fa/development/tests.md b/docs/fa/development/tests.md index 86bcd4d53c2..874ac3063b9 100644 --- a/docs/fa/development/tests.md +++ b/docs/fa/development/tests.md @@ -14,7 +14,7 @@ toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633 هر تست عملکردی یک یا چند نمایش داده شد به سرور در حال اجرا تاتر می فرستد و نتیجه را با مرجع مقایسه می کند. -تست ها در واقع `testsies` فهرست راهنما. دو زیرشاخه وجود دارد: `stateless` و `stateful`. تست های بدون تابعیت بدون هیچ گونه داده های تست پیش بارگذاری شده نمایش داده می شوند-اغلب مجموعه داده های مصنوعی کوچک را در پرواز در داخل تست خود ایجاد می کنند. تست های نفرت انگیز نیاز به داده های تست از قبل نصب شده از یاندکس.متریکا و در دسترس عموم نیست. ما تمایل به استفاده از تنها `stateless` تست ها و جلوگیری از اضافه کردن جدید `stateful` تستها +تست ها در واقع `queries` فهرست راهنما. دو زیرشاخه وجود دارد: `stateless` و `stateful`. تست های بدون تابعیت بدون هیچ گونه داده های تست پیش بارگذاری شده نمایش داده می شوند-اغلب مجموعه داده های مصنوعی کوچک را در پرواز در داخل تست خود ایجاد می کنند. تست های نفرت انگیز نیاز به داده های تست از قبل نصب شده از یاندکس.متریکا و در دسترس عموم نیست. ما تمایل به استفاده از تنها `stateless` تست ها و جلوگیری از اضافه کردن جدید `stateful` تستها هر تست می تواند یکی از دو نوع باشد: `.sql` و `.sh`. `.sql` تست اسکریپت ساده مربع است که به لوله کشی است `clickhouse-client --multiquery --testmode`. `.sh` تست یک اسکریپت است که به خودی خود اجرا است. @@ -22,7 +22,7 @@ toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633 ساده ترین راه برای فراخوانی تست های کاربردی کپی است `clickhouse-client` به `/usr/bin/` فرار کن `clickhouse-server` و سپس اجرا کنید `./clickhouse-test` از دایرکتوری خود را. -برای اضافه کردن تست جدید, ایجاد یک `.sql` یا `.sh` پرونده در `testsies/0_stateless` فهرست راهنما را به صورت دستی بررسی کنید و سپس تولید کنید `.reference` پرونده به روش زیر: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` یا `./00000_test.sh > ./00000_test.reference`. +برای اضافه کردن تست جدید, ایجاد یک `.sql` یا `.sh` پرونده در `queries/0_stateless` فهرست راهنما را به صورت دستی بررسی کنید و سپس تولید کنید `.reference` پرونده به روش زیر: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` یا `./00000_test.sh > ./00000_test.reference`. تست باید استفاده کنید (ساختن, قطره, و غیره) تنها جداول در `test` پایگاه داده است که فرض بر این است که از قبل ایجاد می شود; همچنین تست می توانید جداول موقت استفاده. @@ -37,7 +37,7 @@ toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633 ## اشکالات شناخته شده {#known-bugs} -اگر ما می دانیم برخی از اشکالات است که می تواند به راحتی توسط تست های کاربردی تکثیر, ما تست های عملکردی تهیه شده در `testsies/bugs` فهرست راهنما. این تست خواهد شد به نقل مکان کرد `teststests_stateless` هنگامی که اشکالات ثابت هستند. +اگر ما می دانیم برخی از اشکالات است که می تواند به راحتی توسط تست های کاربردی تکثیر, ما تست های عملکردی تهیه شده در `queries/bugs` فهرست راهنما. این تست خواهد شد به نقل مکان کرد `teststests_stateless` هنگامی که اشکالات ثابت هستند. ## تست های ادغام {#integration-tests} diff --git a/docs/fr/development/tests.md b/docs/fr/development/tests.md index 9c79c65ba9d..e5c8a50fa31 100644 --- a/docs/fr/development/tests.md +++ b/docs/fr/development/tests.md @@ -13,7 +13,7 @@ Les tests fonctionnels sont les plus simples et pratiques à utiliser. La plupar Chaque test fonctionnel envoie une ou plusieurs requêtes au serveur clickhouse en cours d'exécution et compare le résultat avec la référence. -Les Tests sont situés dans `testsies` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. +Les Tests sont situés dans `queries` répertoire. Il y a deux sous-répertoires: `stateless` et `stateful`. Les tests sans état exécutent des requêtes sans données de test préchargées - ils créent souvent de petits ensembles de données synthétiques à la volée, dans le test lui-même. Les tests avec État nécessitent des données de test préchargées de Yandex.Metrica et non disponible pour le grand public. Nous avons tendance à utiliser uniquement `stateless` tests et éviter d'ajouter de nouveaux `stateful` test. Chaque test peut être de deux types: `.sql` et `.sh`. `.sql` test est le script SQL simple qui est canalisé vers `clickhouse-client --multiquery --testmode`. `.sh` test est un script qui est exécuté par lui-même. @@ -21,7 +21,7 @@ Pour exécuter tous les tests, utilisez `testskhouse-test` outil. Regarder `--he Le moyen le plus simple d'invoquer des tests fonctionnels est de copier `clickhouse-client` de `/usr/bin/`, exécuter `clickhouse-server` et puis exécutez `./clickhouse-test` à partir de son propre répertoire. -Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `testsies/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. +Pour ajouter un nouveau test, créez un `.sql` ou `.sh` fichier dans `queries/0_stateless` répertoire, vérifiez-le manuellement, puis générez `.reference` fichier de la façon suivante: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` ou `./00000_test.sh > ./00000_test.reference`. Les Tests doivent utiliser (create, drop, etc) uniquement des tables dans `test` base de données supposée être créée au préalable; les tests peuvent également utiliser des tables temporaires. @@ -36,7 +36,7 @@ désactivez ces groupes de tests en utilisant `--no-zookeeper`, `--no-shard` et ## Bugs connus {#known-bugs} -Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `testsies/bugs` répertoire. Ces tests seront déplacés à `teststests_stateless` quand les bugs sont corrigés. +Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `queries/bugs` répertoire. Ces tests seront déplacés à `teststests_stateless` quand les bugs sont corrigés. ## Les Tests D'Intégration {#integration-tests} diff --git a/docs/ja/development/tests.md b/docs/ja/development/tests.md index 80901a859e7..27b8870461e 100644 --- a/docs/ja/development/tests.md +++ b/docs/ja/development/tests.md @@ -13,7 +13,7 @@ toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6C 各機能テストは、実行中のclickhouseサーバーに一つまたは複数のクエリを送信し、参照と結果を比較します。 -テストは `testsies` ディレクトリ。 つのサブディレクトリがあります: `stateless` と `stateful`. ステートレステストでは、プリロードされたテストデータを使用せずにクエリを実行します。 ステートフルテストでは、Yandexのテストデータが必要です。メトリカと一般市民には利用できません。 我々は唯一の使用する傾向があります `stateless` テストと新しい追加を避ける `stateful` テスト +テストは `queries` ディレクトリ。 つのサブディレクトリがあります: `stateless` と `stateful`. ステートレステストでは、プリロードされたテストデータを使用せずにクエリを実行します。 ステートフルテストでは、Yandexのテストデータが必要です。メトリカと一般市民には利用できません。 我々は唯一の使用する傾向があります `stateless` テストと新しい追加を避ける `stateful` テスト それぞれの試験できるの種類: `.sql` と `.sh`. `.sql` testは、パイプ処理される単純なSQLスクリプトです `clickhouse-client --multiquery --testmode`. `.sh` テストは、単独で実行されるスクリプトです。 @@ -21,7 +21,7 @@ toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6C 機能テストを呼び出す最も簡単な方法は、コピーすることです `clickhouse-client` に `/usr/bin/`、実行 `clickhouse-server` そして、実行 `./clickhouse-test` 独自のディレクトリから。 -新しいテストを追加するには、 `.sql` または `.sh` ファイル `testsies/0_stateless` ディレクトリは、手動でチェックしてから生成 `.reference` 次の方法でファイル: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` または `./00000_test.sh > ./00000_test.reference`. +新しいテストを追加するには、 `.sql` または `.sh` ファイル `queries/0_stateless` ディレクトリは、手動でチェックしてから生成 `.reference` 次の方法でファイル: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` または `./00000_test.sh > ./00000_test.reference`. テストでは、(create、dropなど)テーブルのみを使用する必要があります `test` テストでは一時テーブルを使用することもできます。 @@ -36,7 +36,7 @@ toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6C ## 既知のバグ {#known-bugs} -機能テストで簡単に再現できるいくつかのバグを知っていれば、準備された機能テストを `testsies/bugs` ディレクトリ。 これらのテストはに移動されます `teststests_stateless` バグが修正されたとき。 +機能テストで簡単に再現できるいくつかのバグを知っていれば、準備された機能テストを `queries/bugs` ディレクトリ。 これらのテストはに移動されます `teststests_stateless` バグが修正されたとき。 ## 統合テスト {#integration-tests} diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md index c9181349a2b..c703d6cd5b3 100644 --- a/docs/ru/development/tests.md +++ b/docs/ru/development/tests.md @@ -10,7 +10,7 @@ Functional tests are the most simple and convenient to use. Most of ClickHouse f Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. -Tests are located in `testsies` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. +Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. @@ -18,7 +18,7 @@ To run all tests, use `testskhouse-test` tool. Look `--help` for the list of pos The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. -To add new test, create a `.sql` or `.sh` file in `testsies/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. @@ -33,7 +33,7 @@ disable these groups of tests using `--no-zookeeper`, `--no-shard` and ## Known bugs {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `testsies/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. +If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `queries/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. ## Integration Tests {#integration-tests} From 7376428a6b123ae05c1707b60c85c7ebdd1aa62e Mon Sep 17 00:00:00 2001 From: elenaspb2019 <47083263+elenaspb2019@users.noreply.github.com> Date: Wed, 8 Apr 2020 16:22:32 +0300 Subject: [PATCH 202/484] elenbaskakova-DOCSUP-780 (#87) * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * Update docs/en/operations/system_tables.md Co-Authored-By: BayoNet * "docs(system.settings): Table 'system.settings' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries' has been edited" * "docs(system.dictionaries): Table 'system.dictionaries has been edited" Co-authored-by: elenbaskakova Co-authored-by: BayoNet --- docs/en/operations/system_tables.md | 73 ++++++++++++---- .../dicts/external_dicts_dict_lifetime.md | 83 +++++++++++++++++++ docs/ru/operations/system_tables.md | 71 ++++++++++++---- 3 files changed, 195 insertions(+), 32 deletions(-) create mode 100644 docs/en/query_language/dicts/external_dicts_dict_lifetime.md diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 8d0e4b74b86..fd6c70fb076 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -147,27 +147,68 @@ This system table is used for implementing the `SHOW DATABASES` query. Contains information about detached parts of [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). -## system.dictionaries {#system-dictionaries} +## system.dictionaries {#system_tables-dictionaries} -Contains information about external dictionaries. +Contains information about [external dictionaries](../query_language/dicts/external_dicts.md). Columns: -- `name` (String) — Dictionary name. -- `type` (String) — Dictionary type: Flat, Hashed, Cache. -- `origin` (String) — Path to the configuration file that describes the dictionary. -- `attribute.names` (Array(String)) — Array of attribute names provided by the dictionary. -- `attribute.types` (Array(String)) — Corresponding array of attribute types that are provided by the dictionary. -- `has_hierarchy` (UInt8) — Whether the dictionary is hierarchical. -- `bytes_allocated` (UInt64) — The amount of RAM the dictionary uses. -- `hit_rate` (Float64) — For cache dictionaries, the percentage of uses for which the value was in the cache. -- `element_count` (UInt64) — The number of items stored in the dictionary. -- `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). -- `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. -- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created. -- `source` (String) — Text describing the data source for the dictionary. +- `database` ([String](../data_types/string.md)) — Database name where the dictionary is located. Only for dictionaries created by DDL query, for others is always an empty string. +- `name` ([String](../data_types/string.md)) — [Dictionary name](../query_language/dicts/external_dicts_dict.md). +- `status` ([Enum8](../data_types/enum.md)) — Dictionary status. Possible values: + - `NOT_LOADED` — Dictionary was not loaded because it was not used. + - `LOADED` — Dictionary loaded successfully. + - `FAILED` — Unable to load the dictionary as a result of an error. + - `LOADING` — Dictionary is loading now. + - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../query_language/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed). + - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. +- `origin` ([String](../data_types/string.md)) — Path to the configuration file that describes the dictionary. +- `type` ([String](../data_types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../query_language/dicts/external_dicts_dict_layout.md). +- `key` — [Key type](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../data_types/int_uint.md#uint-ranges)) or Сomposite key ([String](../data_types/string.md)) — form "(type 1, type 2, ..., type n)". +- `attribute.names` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Array of [attribute names](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes) provided by the dictionary. +- `attribute.types` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Corresponding array of [attribute types](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes) that are provided by the dictionary. +- `bytes_allocated` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. +- `query_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. +- `hit_rate` ([Float64](../data_types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Number of items stored in the dictionary. +- `load_factor` ([Float64](../data_types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `source` ([String](../data_types/string.md)) — Text describing the [data source](../query_language/dicts/external_dicts_dict_sources.md) for the dictionary. +- `lifetime_min` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Minimum [lifetime](../query_language/dicts/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `lifetime_max` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Maximum [lifetime](../query_language/dicts/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `loading_start_time` ([DateTime](../data_types/datetime.md)) — Start time for loading the dictionary. +- `last_successful_update_time` ([DateTime](../data_types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. +- `loading_duration` ([Float32](../data_types/float.md)) — Duration of a dictionary loading. +- `last_exception` ([String](../data_types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. -Note that the amount of memory used by the dictionary is not proportional to the number of items stored in it. So for flat and cached dictionaries, all the memory cells are pre-assigned, regardless of how full the dictionary actually is. + +**Example** + +Configure the dictionary. + +```sql +CREATE DICTIONARY dictdb.dict +( + `key` Int64 DEFAULT -1, + `value_default` String DEFAULT 'world', + `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) +LIFETIME(MIN 0 MAX 1) +LAYOUT(FLAT()) +``` + +Make sure that the dictionary is loaded. + +```sql +SELECT * FROM system.dictionaries +``` + +```text +┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ +│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ +└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ +``` ## system.events {#system_tables-events} diff --git a/docs/en/query_language/dicts/external_dicts_dict_lifetime.md b/docs/en/query_language/dicts/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..7f7fb08b0ef --- /dev/null +++ b/docs/en/query_language/dicts/external_dicts_dict_lifetime.md @@ -0,0 +1,83 @@ + +# Dictionary Updates + +ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `` tag in seconds. + +Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. + +Example of settings: + +```xml + + ... + 300 + ... + +``` + +```sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. + +You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. + +Example of settings: + +```xml + + ... + + 300 + 360 + + ... + +``` + +or + +```sql +LIFETIME(MIN 300 MAX 360) +``` +If `0` and `0`, ClickHouse does not reload the dictionary by timeout. +In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. + +When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [ source](external_dicts_dict_sources.md): + +- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. +- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query. +- Dictionaries from other sources are updated every time by default. + +For MySQL (InnoDB), ODBC and ClickHouse sources, you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: + +- The dictionary table must have a field that always changes when the source data is updated. +- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](external_dicts_dict_sources.md). + +Example of settings: + +```xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +or + +```sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + + +[Original article](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index fc4ed0446e6..89d922793ec 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -135,27 +135,66 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). -## system.dictionaries {#system-dictionaries} +## system.dictionaries {#system_tables-dictionaries} -Содержит информацию о внешних словарях. +Содержит информацию о [внешних словарях](../query_language/dicts/external_dicts.md). Столбцы: -- `name String` — Имя словаря. -- `type String` — Тип словаря: Flat, Hashed, Cache. -- `origin String` — Путь к конфигурационному файлу, в котором описан словарь. -- `attribute.names Array(String)` — Массив имён атрибутов, предоставляемых словарём. -- `attribute.types Array(String)` — Соответствующий массив типов атрибутов, предоставляемых словарём. -- `has_hierarchy UInt8` — Является ли словарь иерархическим. -- `bytes_allocated UInt64` — Количество оперативной памяти, которое использует словарь. -- `hit_rate Float64` — Для cache-словарей - доля использований, для которых значение было в кэше. -- `element_count UInt64` — Количество хранящихся в словаре элементов. -- `load_factor Float64` — Доля заполненности словаря (для hashed словаря - доля заполнения хэш-таблицы). -- `creation_time DateTime` — Время создания или последней успешной перезагрузки словаря. -- `last_exception String` — Текст ошибки, возникшей при создании или перезагрузке словаря, если словарь не удалось создать. -- `source String` - Текст, описывающий источник данных для словаря. +- `database` ([String](../data_types/string.md)) — Имя базы данных, в которой находится словарь. Только для словарей, созданных с помощью DDL-запроса, для остальных — всегда пустая строка. +- `name` ([String](../data_types/string.md)) — [Имя словаря](../query_language/dicts/external_dicts_dict.md). +- `status` ([Enum8](../data_types/enum.md)) — Статус словаря. Возможные значения: + - `NOT_LOADED` — Словарь не загружен, потому что не использовался. + - `LOADED` — Словарь загружен успешно. + - `FAILED` — Словарь не загружен в результате ошибки. + - `LOADING` — Словарь в процессе загрузки. + - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../query_language/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). + - `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается. +- `origin` ([String](../data_types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. +- `type` ([String](../data_types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../query_language/dicts/external_dicts_dict_layout.md). +- `key` — [Тип ключа](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../data_types/int_uint.md#uint-ranges)) или Составной ключ ([String](../data_types/string.md)) — строка вида "(тип 1, тип 2, ..., тип n)". +- `attribute.names` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Массив [имен атрибутов](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `attribute.types` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Соответствующий массив [типов атрибутов](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `bytes_allocated` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. +- `query_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. +- `hit_rate` ([Float64](../data_types/float.md)) — Для cache-словарей — процент закэшированных значений. +- `element_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. +- `load_factor` ([Float64](../data_types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). +- `source` ([String](../data_types/string.md)) — Текст, описывающий [источник данных](../query_language/dicts/external_dicts_dict_sources.md) для словаря. +- `lifetime_min` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Минимальное [время обновления](../query_language/dicts/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `lifetime_max` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Максимальное [время обновления](../query_language/dicts/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `loading_start_time` ([DateTime](../data_types/datetime.md)) — Время начала загрузки словаря. +- `loading_duration` ([Float32](../data_types/float.md)) — Время, затраченное на загрузку словаря. +- `last_exception` ([String](../data_types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. -Заметим, что количество оперативной памяти, которое использует словарь, не является пропорциональным количеству элементов, хранящихся в словаре. Так, для flat и cached словарей, все ячейки памяти выделяются заранее, независимо от реальной заполненности словаря. +**Пример** + +Настройте словарь. + +```sql +CREATE DICTIONARY dictdb.dict +( + `key` Int64 DEFAULT -1, + `value_default` String DEFAULT 'world', + `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) +LIFETIME(MIN 0 MAX 1) +LAYOUT(FLAT()) +``` + +Убедитесь, что словарь загружен. + +```sql +SELECT * FROM system.dictionaries +``` + +```text +┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ +│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ +└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ +``` ## system.events {#system_tables-events} From 1aeacfb071fdd8c2c304cce5bdb07df8448b7c68 Mon Sep 17 00:00:00 2001 From: Sergei Shtykov Date: Wed, 8 Apr 2020 16:30:02 +0300 Subject: [PATCH 203/484] Updated fit new structure of en docs. --- .../dicts/external_dicts_dict_lifetime.md | 83 ------------------- .../external_dicts_dict_lifetime.md | 5 ++ 2 files changed, 5 insertions(+), 83 deletions(-) delete mode 100644 docs/en/query_language/dicts/external_dicts_dict_lifetime.md diff --git a/docs/en/query_language/dicts/external_dicts_dict_lifetime.md b/docs/en/query_language/dicts/external_dicts_dict_lifetime.md deleted file mode 100644 index 7f7fb08b0ef..00000000000 --- a/docs/en/query_language/dicts/external_dicts_dict_lifetime.md +++ /dev/null @@ -1,83 +0,0 @@ - -# Dictionary Updates - -ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `` tag in seconds. - -Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -Example of settings: - -```xml - - ... - 300 - ... - -``` - -```sql -CREATE DICTIONARY (...) -... -LIFETIME(300) -... -``` - -Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. - -You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. - -Example of settings: - -```xml - - ... - - 300 - 360 - - ... - -``` - -or - -```sql -LIFETIME(MIN 300 MAX 360) -``` -If `0` and `0`, ClickHouse does not reload the dictionary by timeout. -In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. - -When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [ source](external_dicts_dict_sources.md): - -- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. -- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query. -- Dictionaries from other sources are updated every time by default. - -For MySQL (InnoDB), ODBC and ClickHouse sources, you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: - -- The dictionary table must have a field that always changes when the source data is updated. -- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](external_dicts_dict_sources.md). - -Example of settings: - -```xml - - ... - - ... - SELECT update_time FROM dictionary_source where id = 1 - - ... - -``` - -or - -```sql -... -SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) -... -``` - - -[Original article](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md index 645c6347f66..97d5b6e4474 100644 --- a/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md +++ b/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -49,6 +49,11 @@ or LIFETIME(MIN 300 MAX 360) ``` +If `0` and `0`, ClickHouse does not reload the dictionary by timeout. +In this case, ClickHouse can reload the dictionary earlier if the dictionary configuration file was changed or the `SYSTEM RELOAD DICTIONARY` command was executed. + +When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](external_dicts_dict_sources.md): + When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](external_dicts_dict_sources.md): - For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. From 4d298fd42092f9aab67e2b9175f6fd23a35abe29 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Wed, 8 Apr 2020 17:22:25 +0300 Subject: [PATCH 204/484] Update zh docs and fix en docs (#10125) --- docs/en/commercial/index.md | 3 +- docs/en/development/architecture.md | 6 +- docs/en/development/build_cross_arm.md | 2 +- docs/en/development/contrib.md | 2 +- docs/en/development/developer_instruction.md | 14 +- docs/en/development/tests.md | 8 +- docs/en/engines/index.md | 1 + docs/en/engines/table_engines/index.md | 2 +- .../mergetree_family/collapsingmergetree.md | 2 +- .../mergetree_family/graphitemergetree.md | 2 +- .../mergetree_family/mergetree.md | 2 +- .../mergetree_family/replacingmergetree.md | 2 +- .../mergetree_family/replication.md | 4 +- .../mergetree_family/summingmergetree.md | 4 +- .../versionedcollapsingmergetree.md | 2 +- docs/en/engines/table_engines/special/file.md | 4 +- .../engines/table_engines/special/generate.md | 2 +- docs/en/engines/table_engines/special/url.md | 2 +- docs/en/faq/index.md | 1 + docs/en/getting_started/install.md | 2 +- docs/en/getting_started/playground.md | 2 +- docs/en/interfaces/formats.md | 36 +- docs/en/interfaces/http.md | 144 +- docs/en/interfaces/mysql.md | 2 +- .../third-party/client_libraries.md | 2 +- .../en/interfaces/third-party/integrations.md | 2 +- docs/en/introduction/adopters.md | 6 +- docs/en/introduction/distinctive_features.md | 2 +- .../features_considered_disadvantages.md | 2 +- docs/en/introduction/index.md | 1 + docs/en/operations/backup.md | 2 +- docs/en/operations/monitoring.md | 2 +- docs/en/operations/performance_test.md | 20 +- docs/en/operations/troubleshooting.md | 6 +- .../external_dicts_dict_sources.md | 2 +- docs/en/sql_reference/operators.md | 6 +- docs/en/whats_new/changelog/2017.md | 2 +- docs/en/whats_new/changelog/2018.md | 8 +- docs/en/whats_new/changelog/2019.md | 6 +- docs/en/whats_new/index.md | 1 + docs/toc_zh.yml | 251 -- docs/tools/convert_toc.py | 6 +- docs/tools/nav.py | 8 +- docs/tools/translate/filter.py | 19 +- .../remove_machine_translated_meta.py | 21 + docs/zh/changelog/2017.md | 265 --- docs/zh/changelog/2018.md | 1060 --------- docs/zh/changelog/2019.md | 2071 ---------------- docs/zh/changelog/index.md | 666 +++++- docs/zh/commercial/cloud.md | 25 +- docs/zh/commercial/index.md | 9 + docs/zh/data_types/datetime64.md | 101 - docs/zh/data_types/int_uint.md | 17 - docs/zh/data_types/uuid.md | 74 - docs/zh/database_engines/lazy.md | 15 - docs/zh/development/architecture.md | 7 +- docs/zh/development/browse_code.md | 13 +- docs/zh/development/build.md | 7 +- docs/zh/development/build_cross_arm.md | 24 +- docs/zh/development/build_cross_osx.md | 3 +- docs/zh/development/build_osx.md | 3 +- docs/zh/development/contrib.md | 65 +- docs/zh/development/developer_instruction.md | 7 +- docs/zh/development/index.md | 1 + docs/zh/development/style.md | 7 +- docs/zh/development/tests.md | 17 +- .../{ => engines}/database_engines/index.md | 3 +- docs/zh/engines/database_engines/lazy.md | 18 + .../{ => engines}/database_engines/mysql.md | 35 +- docs/zh/engines/index.md | 8 + .../table_engines/index.md | 63 +- .../table_engines/integrations/hdfs.md | 123 + .../table_engines/integrations/index.md | 8 + .../table_engines/integrations}/jdbc.md | 37 +- .../table_engines/integrations}/kafka.md | 11 +- .../table_engines/integrations}/mysql.md | 5 +- .../table_engines/integrations/odbc.md | 132 ++ .../engines/table_engines/log_family/index.md | 8 + .../table_engines/log_family}/log.md | 5 +- .../table_engines/log_family}/log_family.md | 5 +- .../table_engines/log_family}/stripelog.md | 3 +- .../table_engines/log_family}/tinylog.md | 3 +- .../mergetree_family}/aggregatingmergetree.md | 7 +- .../mergetree_family}/collapsingmergetree.md | 5 +- .../custom_partitioning_key.md | 11 +- .../mergetree_family}/graphitemergetree.md | 69 +- .../table_engines/mergetree_family/index.md | 8 + .../mergetree_family}/mergetree.md | 31 +- .../mergetree_family}/replacingmergetree.md | 7 +- .../mergetree_family}/replication.md | 11 +- .../mergetree_family}/summingmergetree.md | 9 +- .../versionedcollapsingmergetree.md | 238 ++ .../table_engines/special}/buffer.md | 5 +- .../table_engines/special}/dictionary.md | 5 +- .../table_engines/special}/distributed.md | 9 +- .../table_engines/special}/external_data.md | 5 +- .../table_engines/special}/file.md | 11 +- .../engines/table_engines/special/generate.md | 61 + .../zh/engines/table_engines/special/index.md | 8 + .../table_engines/special}/join.md | 11 +- .../table_engines/special/materializedview.md | 6 + .../table_engines/special}/memory.md | 5 +- .../table_engines/special}/merge.md | 3 +- .../table_engines/special}/null.md | 3 +- .../table_engines/special}/set.md | 5 +- .../table_engines/special}/url.md | 7 +- .../table_engines/special}/view.md | 5 +- docs/zh/faq/general.md | 7 +- docs/zh/faq/index.md | 8 + .../example_datasets/amplab_benchmark.md | 3 +- .../example_datasets/criteo.md | 3 +- .../getting_started/example_datasets/index.md | 28 +- .../example_datasets/metrica.md | 29 +- .../example_datasets/nyc_taxi.md | 43 +- .../example_datasets/ontime.md | 5 +- .../example_datasets/star_schema.md | 7 +- .../example_datasets/wikistat.md | 3 +- docs/zh/getting_started/index.md | 1 + docs/zh/getting_started/install.md | 17 +- docs/zh/getting_started/playground.md | 51 +- docs/zh/getting_started/tutorial.md | 147 +- docs/zh/guides/apply_catboost_model.md | 113 +- docs/zh/guides/index.md | 16 +- docs/zh/index.md | 31 +- docs/zh/interfaces/cli.md | 5 +- docs/zh/interfaces/cpp.md | 5 +- docs/zh/interfaces/formats.md | 389 ++-- docs/zh/interfaces/http.md | 5 +- docs/zh/interfaces/index.md | 1 + docs/zh/interfaces/jdbc.md | 3 +- docs/zh/interfaces/mysql.md | 25 +- docs/zh/interfaces/odbc.md | 1 + docs/zh/interfaces/tcp.md | 1 + .../third-party/client_libraries.md | 41 +- docs/zh/interfaces/third-party/gui.md | 15 +- docs/zh/interfaces/third-party/index.md | 8 + .../zh/interfaces/third-party/integrations.md | 51 +- docs/zh/interfaces/third-party/proxy.md | 5 +- docs/zh/introduction/adopters.md | 149 +- docs/zh/introduction/distinctive_features.md | 3 +- .../features_considered_disadvantages.md | 1 + docs/zh/introduction/history.md | 3 +- docs/zh/introduction/index.md | 8 + docs/zh/introduction/performance.md | 3 +- docs/zh/operations/access_rights.md | 35 +- docs/zh/operations/backup.md | 41 +- docs/zh/operations/configuration_files.md | 29 +- docs/zh/operations/index.md | 5 +- docs/zh/operations/monitoring.md | 5 +- .../optimizing_performance/index.md | 8 + .../sampling_query_profiler.md | 64 + .../performance/sampling_query_profiler.md | 61 - ...sampling_query_profiler_example_result.txt | 4 - docs/zh/operations/performance_test.md | 35 +- docs/zh/operations/quotas.md | 33 +- docs/zh/operations/requirements.md | 57 +- .../server_configuration_parameters/index.md | 12 + .../settings.md | 872 +++++++ docs/zh/operations/server_settings/index.md | 11 - .../zh/operations/server_settings/settings.md | 869 ------- .../settings/constraints_on_settings.md | 23 +- docs/zh/operations/settings/index.md | 27 +- .../settings/permissions_for_queries.md | 53 +- .../operations/settings/query_complexity.md | 137 +- docs/zh/operations/settings/settings.md | 897 +++---- .../operations/settings/settings_profiles.md | 25 +- docs/zh/operations/settings/settings_users.md | 85 +- docs/zh/operations/system_tables.md | 776 +++--- docs/zh/operations/table_engines/generate.md | 58 - docs/zh/operations/table_engines/hdfs.md | 120 - .../table_engines/materializedview.md | 5 - docs/zh/operations/table_engines/odbc.md | 129 - .../versionedcollapsingmergetree.md | 235 -- docs/zh/operations/tips.md | 131 +- docs/zh/operations/troubleshooting.md | 105 +- docs/zh/operations/update.md | 13 +- .../clickhouse-benchmark.md | 73 +- .../{utils => utilities}/clickhouse-copier.md | 43 +- .../{utils => utilities}/clickhouse-local.md | 42 +- docs/zh/operations/utilities/index.md | 8 + docs/zh/operations/utils/index.md | 7 - .../agg_functions/combinators.md | 163 -- docs/zh/query_language/agg_functions/index.md | 58 - .../query_language/agg_functions/reference.md | 1834 --------------- docs/zh/query_language/alter.md | 502 ---- .../zh/query_language/dicts/external_dicts.md | 53 - .../dicts/external_dicts_dict.md | 50 - .../dicts/external_dicts_dict_hierarchical.md | 67 - .../dicts/external_dicts_dict_layout.md | 370 --- .../dicts/external_dicts_dict_lifetime.md | 83 - .../dicts/external_dicts_dict_structure.md | 172 -- docs/zh/query_language/dicts/index.md | 18 - .../zh/query_language/dicts/internal_dicts.md | 52 - .../functions/ext_dict_functions.md | 46 - .../functions/machine_learning_functions.md | 15 - docs/zh/query_language/index.md | 13 - docs/zh/query_language/misc.md | 249 -- docs/zh/query_language/syntax.md | 184 -- docs/zh/query_language/system.md | 110 - .../query_language/table_functions/index.md | 34 - .../query_language/table_functions/input.md | 44 - .../zh/query_language/table_functions/jdbc.md | 26 - .../query_language/table_functions/merge.md | 11 - .../query_language/table_functions/numbers.md | 27 - .../zh/query_language/table_functions/odbc.md | 105 - .../query_language/table_functions/remote.md | 80 - docs/zh/query_language/table_functions/url.md | 23 - .../aggregate_functions/combinators.md | 166 ++ .../aggregate_functions/index.md | 62 + .../parametric_functions.md | 219 +- .../aggregate_functions/reference.md | 1878 +++++++++++++++ .../data_types}/aggregatefunction.md | 5 +- .../{ => sql_reference}/data_types/array.md | 5 +- .../{ => sql_reference}/data_types/boolean.md | 3 +- .../zh/{ => sql_reference}/data_types/date.md | 3 +- .../data_types/datetime.md | 3 +- .../zh/sql_reference/data_types/datetime64.md | 104 + .../{ => sql_reference}/data_types/decimal.md | 21 +- .../sql_reference/data_types/domains/index.md | 8 + .../data_types/domains/ipv4.md | 1 + .../data_types/domains/ipv6.md | 1 + .../data_types/domains/overview.md | 3 +- .../zh/{ => sql_reference}/data_types/enum.md | 7 +- .../data_types/fixedstring.md | 7 +- .../{ => sql_reference}/data_types/float.md | 7 +- .../{ => sql_reference}/data_types/index.md | 1 + docs/zh/sql_reference/data_types/int_uint.md | 18 + .../nested_data_structures/index.md | 1 + .../nested_data_structures/nested.md | 1 + .../data_types/nullable.md | 7 +- .../special_data_types/expression.md | 3 +- .../data_types/special_data_types/index.md | 3 +- .../data_types/special_data_types/interval.md | 39 +- .../data_types/special_data_types/nothing.md | 5 +- .../data_types/special_data_types/set.md | 3 +- .../{ => sql_reference}/data_types/string.md | 3 +- .../{ => sql_reference}/data_types/tuple.md | 5 +- docs/zh/sql_reference/data_types/uuid.md | 77 + .../external_dictionaries/external_dicts.md | 56 + .../external_dicts_dict.md | 53 + .../external_dicts_dict_hierarchical.md | 70 + .../external_dicts_dict_layout.md | 373 +++ .../external_dicts_dict_lifetime.md | 86 + .../external_dicts_dict_sources.md | 159 +- .../external_dicts_dict_structure.md | 175 ++ .../external_dictionaries/index.md | 8 + docs/zh/sql_reference/dictionaries/index.md | 22 + .../dictionaries/internal_dicts.md | 55 + .../functions/arithmetic_functions.md | 31 +- .../functions/array_functions.md | 75 +- .../functions/array_join.md | 3 +- .../functions/bit_functions.md | 21 +- .../functions/bitmap_functions.md | 19 +- .../functions/comparison_functions.md | 19 +- .../functions/conditional_functions.md | 5 +- .../functions/date_time_functions.md | 133 +- .../functions/encoding_functions.md | 5 +- .../functions/ext_dict_functions.md | 47 + .../functions/functions_for_nulls.md | 17 +- .../functions/geo.md | 17 +- .../functions/hash_functions.md | 13 +- .../functions/higher_order_functions.md | 21 +- .../functions/in_functions.md | 9 +- .../functions/index.md | 9 +- .../functions/introspection.md | 123 +- .../functions/ip_address_functions.md | 17 +- .../functions/json_functions.md | 15 +- .../functions/logical_functions.md | 9 +- .../functions/machine_learning_functions.md | 16 + .../functions/math_functions.md | 11 +- .../functions/other_functions.md | 77 +- .../functions/random_functions.md | 3 +- .../functions/rounding_functions.md | 23 +- .../functions/splitting_merging_functions.md | 11 +- .../functions/string_functions.md | 41 +- .../functions/string_replace_functions.md | 21 +- .../functions/string_search_functions.md | 51 +- .../functions/type_conversion_functions.md | 49 +- .../functions/url_functions.md | 35 +- .../functions/uuid_functions.md | 15 +- .../functions/ym_dict_functions.md | 67 +- docs/zh/sql_reference/index.md | 18 + .../operators.md | 35 +- docs/zh/sql_reference/statements/alter.md | 505 ++++ .../statements}/create.md | 73 +- docs/zh/sql_reference/statements/index.md | 8 + .../statements}/insert_into.md | 7 +- docs/zh/sql_reference/statements/misc.md | 252 ++ .../statements}/select.md | 63 +- .../statements}/show.md | 41 +- docs/zh/sql_reference/statements/system.md | 113 + docs/zh/sql_reference/syntax.md | 187 ++ .../table_functions/file.md | 59 +- .../table_functions/generate.md | 21 +- .../table_functions/hdfs.md | 55 +- .../zh/sql_reference/table_functions/index.md | 38 + .../zh/sql_reference/table_functions/input.md | 47 + docs/zh/sql_reference/table_functions/jdbc.md | 29 + .../zh/sql_reference/table_functions/merge.md | 14 + .../table_functions/mysql.md | 41 +- .../sql_reference/table_functions/numbers.md | 30 + docs/zh/sql_reference/table_functions/odbc.md | 108 + .../sql_reference/table_functions/remote.md | 83 + docs/zh/sql_reference/table_functions/url.md | 26 + docs/zh/whats_new/changelog/2017.md | 268 +++ docs/zh/whats_new/changelog/2018.md | 1063 +++++++++ docs/zh/whats_new/changelog/2019.md | 2074 +++++++++++++++++ docs/zh/whats_new/changelog/index.md | 665 ++++++ docs/zh/whats_new/index.md | 8 + docs/zh/{ => whats_new}/roadmap.md | 1 + docs/zh/{ => whats_new}/security_changelog.md | 1 + 311 files changed, 14602 insertions(+), 12936 deletions(-) delete mode 100644 docs/toc_zh.yml create mode 100755 docs/tools/translate/remove_machine_translated_meta.py delete mode 100644 docs/zh/changelog/2017.md delete mode 100644 docs/zh/changelog/2018.md delete mode 100644 docs/zh/changelog/2019.md mode change 120000 => 100644 docs/zh/changelog/index.md create mode 100644 docs/zh/commercial/index.md delete mode 100644 docs/zh/data_types/datetime64.md delete mode 100644 docs/zh/data_types/int_uint.md delete mode 100644 docs/zh/data_types/uuid.md delete mode 100644 docs/zh/database_engines/lazy.md rename docs/zh/{ => engines}/database_engines/index.md (69%) create mode 100644 docs/zh/engines/database_engines/lazy.md rename docs/zh/{ => engines}/database_engines/mysql.md (63%) create mode 100644 docs/zh/engines/index.md rename docs/zh/{operations => engines}/table_engines/index.md (50%) create mode 100644 docs/zh/engines/table_engines/integrations/hdfs.md create mode 100644 docs/zh/engines/table_engines/integrations/index.md rename docs/zh/{operations/table_engines => engines/table_engines/integrations}/jdbc.md (56%) rename docs/zh/{operations/table_engines => engines/table_engines/integrations}/kafka.md (85%) rename docs/zh/{operations/table_engines => engines/table_engines/integrations}/mysql.md (77%) create mode 100644 docs/zh/engines/table_engines/integrations/odbc.md create mode 100644 docs/zh/engines/table_engines/log_family/index.md rename docs/zh/{operations/table_engines => engines/table_engines/log_family}/log.md (84%) rename docs/zh/{operations/table_engines => engines/table_engines/log_family}/log_family.md (93%) rename docs/zh/{operations/table_engines => engines/table_engines/log_family}/stripelog.md (97%) rename docs/zh/{operations/table_engines => engines/table_engines/log_family}/tinylog.md (91%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/aggregatingmergetree.md (89%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/collapsingmergetree.md (98%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/custom_partitioning_key.md (87%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/graphitemergetree.md (50%) create mode 100644 docs/zh/engines/table_engines/mergetree_family/index.md rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/mergetree.md (88%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/replacingmergetree.md (92%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/replication.md (94%) rename docs/zh/{operations/table_engines => engines/table_engines/mergetree_family}/summingmergetree.md (86%) create mode 100644 docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md rename docs/zh/{operations/table_engines => engines/table_engines/special}/buffer.md (97%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/dictionary.md (95%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/distributed.md (93%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/external_data.md (94%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/file.md (74%) create mode 100644 docs/zh/engines/table_engines/special/generate.md create mode 100644 docs/zh/engines/table_engines/special/index.md rename docs/zh/{operations/table_engines => engines/table_engines/special}/join.md (75%) create mode 100644 docs/zh/engines/table_engines/special/materializedview.md rename docs/zh/{operations/table_engines => engines/table_engines/special}/memory.md (89%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/merge.md (99%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/null.md (69%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/set.md (87%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/url.md (94%) rename docs/zh/{operations/table_engines => engines/table_engines/special}/view.md (68%) create mode 100644 docs/zh/faq/index.md create mode 100644 docs/zh/interfaces/third-party/index.md create mode 100644 docs/zh/introduction/index.md create mode 100644 docs/zh/operations/optimizing_performance/index.md create mode 100644 docs/zh/operations/optimizing_performance/sampling_query_profiler.md delete mode 100644 docs/zh/operations/performance/sampling_query_profiler.md create mode 100644 docs/zh/operations/server_configuration_parameters/index.md create mode 100644 docs/zh/operations/server_configuration_parameters/settings.md delete mode 100644 docs/zh/operations/server_settings/index.md delete mode 100644 docs/zh/operations/server_settings/settings.md delete mode 100644 docs/zh/operations/table_engines/generate.md delete mode 100644 docs/zh/operations/table_engines/hdfs.md delete mode 100644 docs/zh/operations/table_engines/materializedview.md delete mode 100644 docs/zh/operations/table_engines/odbc.md delete mode 100644 docs/zh/operations/table_engines/versionedcollapsingmergetree.md rename docs/zh/operations/{utils => utilities}/clickhouse-benchmark.md (50%) rename docs/zh/operations/{utils => utilities}/clickhouse-copier.md (76%) rename docs/zh/operations/{utils => utilities}/clickhouse-local.md (55%) create mode 100644 docs/zh/operations/utilities/index.md delete mode 100644 docs/zh/operations/utils/index.md delete mode 100644 docs/zh/query_language/agg_functions/combinators.md delete mode 100644 docs/zh/query_language/agg_functions/index.md delete mode 100644 docs/zh/query_language/agg_functions/reference.md delete mode 100644 docs/zh/query_language/alter.md delete mode 100644 docs/zh/query_language/dicts/external_dicts.md delete mode 100644 docs/zh/query_language/dicts/external_dicts_dict.md delete mode 100644 docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md delete mode 100644 docs/zh/query_language/dicts/external_dicts_dict_layout.md delete mode 100644 docs/zh/query_language/dicts/external_dicts_dict_lifetime.md delete mode 100644 docs/zh/query_language/dicts/external_dicts_dict_structure.md delete mode 100644 docs/zh/query_language/dicts/index.md delete mode 100644 docs/zh/query_language/dicts/internal_dicts.md delete mode 100644 docs/zh/query_language/functions/ext_dict_functions.md delete mode 100644 docs/zh/query_language/functions/machine_learning_functions.md delete mode 100644 docs/zh/query_language/index.md delete mode 100644 docs/zh/query_language/misc.md delete mode 100644 docs/zh/query_language/syntax.md delete mode 100644 docs/zh/query_language/system.md delete mode 100644 docs/zh/query_language/table_functions/index.md delete mode 100644 docs/zh/query_language/table_functions/input.md delete mode 100644 docs/zh/query_language/table_functions/jdbc.md delete mode 100644 docs/zh/query_language/table_functions/merge.md delete mode 100644 docs/zh/query_language/table_functions/numbers.md delete mode 100644 docs/zh/query_language/table_functions/odbc.md delete mode 100644 docs/zh/query_language/table_functions/remote.md delete mode 100644 docs/zh/query_language/table_functions/url.md create mode 100644 docs/zh/sql_reference/aggregate_functions/combinators.md create mode 100644 docs/zh/sql_reference/aggregate_functions/index.md rename docs/zh/{query_language/agg_functions => sql_reference/aggregate_functions}/parametric_functions.md (57%) create mode 100644 docs/zh/sql_reference/aggregate_functions/reference.md rename docs/zh/{data_types/nested_data_structures => sql_reference/data_types}/aggregatefunction.md (82%) rename docs/zh/{ => sql_reference}/data_types/array.md (91%) rename docs/zh/{ => sql_reference}/data_types/boolean.md (73%) rename docs/zh/{ => sql_reference}/data_types/date.md (94%) rename docs/zh/{ => sql_reference}/data_types/datetime.md (95%) create mode 100644 docs/zh/sql_reference/data_types/datetime64.md rename docs/zh/{ => sql_reference}/data_types/decimal.md (85%) create mode 100644 docs/zh/sql_reference/data_types/domains/index.md rename docs/zh/{ => sql_reference}/data_types/domains/ipv4.md (99%) rename docs/zh/{ => sql_reference}/data_types/domains/ipv6.md (99%) rename docs/zh/{ => sql_reference}/data_types/domains/overview.md (98%) rename docs/zh/{ => sql_reference}/data_types/enum.md (95%) rename docs/zh/{ => sql_reference}/data_types/fixedstring.md (82%) rename docs/zh/{ => sql_reference}/data_types/float.md (89%) rename docs/zh/{ => sql_reference}/data_types/index.md (99%) create mode 100644 docs/zh/sql_reference/data_types/int_uint.md rename docs/zh/{ => sql_reference}/data_types/nested_data_structures/index.md (97%) rename docs/zh/{ => sql_reference}/data_types/nested_data_structures/nested.md (99%) rename docs/zh/{ => sql_reference}/data_types/nullable.md (72%) rename docs/zh/{ => sql_reference}/data_types/special_data_types/expression.md (64%) rename docs/zh/{ => sql_reference}/data_types/special_data_types/index.md (71%) rename docs/zh/{ => sql_reference}/data_types/special_data_types/interval.md (51%) rename docs/zh/{ => sql_reference}/data_types/special_data_types/nothing.md (61%) rename docs/zh/{ => sql_reference}/data_types/special_data_types/set.md (72%) rename docs/zh/{ => sql_reference}/data_types/string.md (97%) rename docs/zh/{ => sql_reference}/data_types/tuple.md (86%) create mode 100644 docs/zh/sql_reference/data_types/uuid.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md rename docs/zh/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_sources.md (70%) create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md create mode 100644 docs/zh/sql_reference/dictionaries/external_dictionaries/index.md create mode 100644 docs/zh/sql_reference/dictionaries/index.md create mode 100644 docs/zh/sql_reference/dictionaries/internal_dicts.md rename docs/zh/{query_language => sql_reference}/functions/arithmetic_functions.md (79%) rename docs/zh/{query_language => sql_reference}/functions/array_functions.md (88%) rename docs/zh/{query_language => sql_reference}/functions/array_join.md (93%) rename docs/zh/{query_language => sql_reference}/functions/bit_functions.md (58%) rename docs/zh/{query_language => sql_reference}/functions/bitmap_functions.md (96%) rename docs/zh/{query_language => sql_reference}/functions/comparison_functions.md (69%) rename docs/zh/{query_language => sql_reference}/functions/conditional_functions.md (91%) rename docs/zh/{query_language => sql_reference}/functions/date_time_functions.md (66%) rename docs/zh/{query_language => sql_reference}/functions/encoding_functions.md (95%) create mode 100644 docs/zh/sql_reference/functions/ext_dict_functions.md rename docs/zh/{query_language => sql_reference}/functions/functions_for_nulls.md (93%) rename docs/zh/{query_language => sql_reference}/functions/geo.md (89%) rename docs/zh/{query_language => sql_reference}/functions/hash_functions.md (92%) rename docs/zh/{query_language => sql_reference}/functions/higher_order_functions.md (76%) rename docs/zh/{query_language => sql_reference}/functions/in_functions.md (65%) rename docs/zh/{query_language => sql_reference}/functions/index.md (91%) rename docs/zh/{query_language => sql_reference}/functions/introspection.md (63%) rename docs/zh/{query_language => sql_reference}/functions/ip_address_functions.md (92%) rename docs/zh/{query_language => sql_reference}/functions/json_functions.md (92%) rename docs/zh/{query_language => sql_reference}/functions/logical_functions.md (70%) create mode 100644 docs/zh/sql_reference/functions/machine_learning_functions.md rename docs/zh/{query_language => sql_reference}/functions/math_functions.md (88%) rename docs/zh/{query_language => sql_reference}/functions/other_functions.md (85%) rename docs/zh/{query_language => sql_reference}/functions/random_functions.md (98%) rename docs/zh/{query_language => sql_reference}/functions/rounding_functions.md (79%) rename docs/zh/{query_language => sql_reference}/functions/splitting_merging_functions.md (63%) rename docs/zh/{query_language => sql_reference}/functions/string_functions.md (78%) rename docs/zh/{query_language => sql_reference}/functions/string_replace_functions.md (75%) rename docs/zh/{query_language => sql_reference}/functions/string_search_functions.md (63%) rename docs/zh/{query_language => sql_reference}/functions/type_conversion_functions.md (67%) rename docs/zh/{query_language => sql_reference}/functions/url_functions.md (69%) rename docs/zh/{query_language => sql_reference}/functions/uuid_functions.md (87%) rename docs/zh/{query_language => sql_reference}/functions/ym_dict_functions.md (56%) create mode 100644 docs/zh/sql_reference/index.md rename docs/zh/{query_language => sql_reference}/operators.md (80%) create mode 100644 docs/zh/sql_reference/statements/alter.md rename docs/zh/{query_language => sql_reference/statements}/create.md (68%) create mode 100644 docs/zh/sql_reference/statements/index.md rename docs/zh/{query_language => sql_reference/statements}/insert_into.md (89%) create mode 100644 docs/zh/sql_reference/statements/misc.md rename docs/zh/{query_language => sql_reference/statements}/select.md (91%) rename docs/zh/{query_language => sql_reference/statements}/show.md (52%) create mode 100644 docs/zh/sql_reference/statements/system.md create mode 100644 docs/zh/sql_reference/syntax.md rename docs/zh/{query_language => sql_reference}/table_functions/file.md (52%) rename docs/zh/{query_language => sql_reference}/table_functions/generate.md (76%) rename docs/zh/{query_language => sql_reference}/table_functions/hdfs.md (53%) create mode 100644 docs/zh/sql_reference/table_functions/index.md create mode 100644 docs/zh/sql_reference/table_functions/input.md create mode 100644 docs/zh/sql_reference/table_functions/jdbc.md create mode 100644 docs/zh/sql_reference/table_functions/merge.md rename docs/zh/{query_language => sql_reference}/table_functions/mysql.md (59%) create mode 100644 docs/zh/sql_reference/table_functions/numbers.md create mode 100644 docs/zh/sql_reference/table_functions/odbc.md create mode 100644 docs/zh/sql_reference/table_functions/remote.md create mode 100644 docs/zh/sql_reference/table_functions/url.md create mode 100644 docs/zh/whats_new/changelog/2017.md create mode 100644 docs/zh/whats_new/changelog/2018.md create mode 100644 docs/zh/whats_new/changelog/2019.md create mode 100644 docs/zh/whats_new/changelog/index.md create mode 100644 docs/zh/whats_new/index.md rename docs/zh/{ => whats_new}/roadmap.md (99%) rename docs/zh/{ => whats_new}/security_changelog.md (99%) diff --git a/docs/en/commercial/index.md b/docs/en/commercial/index.md index 3e0a0ac236a..f9065c7cd50 100644 --- a/docs/en/commercial/index.md +++ b/docs/en/commercial/index.md @@ -1,6 +1,7 @@ --- -toc_title: Commercial toc_folder_title: Commercial toc_priority: 70 +toc_title: Commercial --- + diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index 1932e3e31d8..cfd852637fe 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -41,7 +41,7 @@ Various functions on columns can be implemented in a generic, non-efficient way ## Block {#block} -A `Block` is a container that represents a subset (chunk) of a table in memory. It is just a set of triples: `(IColumn, IDataType, column name)`. During query execution, data is processed by `Block`s. If we have a `Block`, we have data (in the `IColumn` object), we have information about its type (in `IDataType`) that tells us how to deal with that column, and we have the column name. It could be either the original column name from the table or some artificial name assigned for getting temporary results of calculations. +A `Block` is a container that represents a subset (chunk) of a table in memory. It is just a set of triples: `(IColumn, IDataType, column name)`. During query execution, data is processed by `Block`s. If we have a `Block`, we have data (in the `IColumn` object), we have information about its type (in `IDataType`) that tells us how to deal with that column, and we have the column name. It could be either the original column name from the table or some artificial name assigned for getting temporary results of calculations. When we calculate some function over columns in a block, we add another column with its result to the block, and we don’t touch columns for arguments of the function because operations are immutable. Later, unneeded columns can be removed from the block, but not modified. It is convenient for the elimination of common subexpressions. @@ -77,7 +77,7 @@ For byte-oriented input/output, there are `ReadBuffer` and `WriteBuffer` abstrac Implementations of `ReadBuffer`/`WriteBuffer` are used for working with files and file descriptors and network sockets, for implementing compression (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, and `HashingWriteBuffer` speak for themselves. -Read/WriteBuffers only deal with bytes. There are functions from `ReadHelpers` and `WriteHelpers` header files to help with formatting input/output. For example, there are helpers to write a number in decimal format. +Read/WriteBuffers only deal with bytes. There are functions from `ReadHelpers` and `WriteHelpers` header files to help with formatting input/output. For example, there are helpers to write a number in decimal format. Let’s look at what happens when you want to write a result set in `JSON` format to stdout. You have a result set ready to be fetched from `IBlockInputStream`. You create `WriteBufferFromFileDescriptor(STDOUT_FILENO)` to write bytes to stdout. You create `JSONRowOutputStream`, initialized with that `WriteBuffer`, to write rows in `JSON` to stdout. You create `BlockOutputStreamFromRowOutputStream` on top of it, to represent it as `IBlockOutputStream`. Then you call `copyData` to transfer data from `IBlockInputStream` to `IBlockOutputStream`, and everything works. Internally, `JSONRowOutputStream` will write various JSON delimiters and call the `IDataType::serializeTextJSON` method with a reference to `IColumn` and the row number as arguments. Consequently, `IDataType::serializeTextJSON` will call a method from `WriteHelpers.h`: for example, `writeText` for numeric types and `writeJSONString` for `DataTypeString`. @@ -155,7 +155,7 @@ The server initializes the `Context` class with the necessary environment for qu We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we don’t want to maintain it eternally, and we are removing support for old versions after about one year. !!! note "Note" - For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical. + For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical. ## Distributed Query Execution {#distributed-query-execution} diff --git a/docs/en/development/build_cross_arm.md b/docs/en/development/build_cross_arm.md index 24a7c8363f6..b2b5fa6b1e0 100644 --- a/docs/en/development/build_cross_arm.md +++ b/docs/en/development/build_cross_arm.md @@ -3,7 +3,7 @@ toc_priority: 67 toc_title: How to Build ClickHouse on Linux for AARCH64 (ARM64) --- -# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} +# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index 9c594d3e03f..22f84370b6c 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -6,7 +6,7 @@ toc_title: Third-Party Libraries Used # Third-Party Libraries Used {#third-party-libraries-used} | Library | License | -|-------------|--------------------------------------------------------------------------------------| +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| | base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | | boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | | brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | diff --git a/docs/en/development/developer_instruction.md b/docs/en/development/developer_instruction.md index 5cac7203d87..60a949893d7 100644 --- a/docs/en/development/developer_instruction.md +++ b/docs/en/development/developer_instruction.md @@ -5,15 +5,15 @@ toc_title: The Beginner ClickHouse Developer Instruction Building of ClickHouse is supported on Linux, FreeBSD and Mac OS X. -# If you use Windows {#if-you-use-windows} +# If You Use Windows {#if-you-use-windows} If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/\#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T. -# If you use a 32-bit system {#if-you-use-a-32-bit-system} +# If You Use a 32-bit System {#if-you-use-a-32-bit-system} ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading. -# Creating a repository on GitHub {#creating-a-repository-on-github} +# Creating a Repository on GitHub {#creating-a-repository-on-github} To start working with ClickHouse repository you will need a GitHub account. @@ -33,7 +33,7 @@ To do that in Ubuntu you would run in the command line terminal: A brief manual on using Git can be found here: https://services.github.com/on-demand/downloads/github-git-cheat-sheet.pdf. For a detailed manual on Git see https://git-scm.com/book/en/v2. -# Cloning a repository to your development machine {#cloning-a-repository-to-your-development-machine} +# Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine} Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine. @@ -77,7 +77,7 @@ You can also add original ClickHouse repo’s address to your local repository t After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`. -## Working with submodules {#working-with-submodules} +## Working with Submodules {#working-with-submodules} Working with submodules in git could be painful. Next commands will help to manage it: @@ -145,7 +145,7 @@ Mac OS X build is supported only for Clang. Just run `brew install llvm` If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended. -# The Building process {#the-building-process} +# The Building Process {#the-building-process} Now that you are ready to build ClickHouse we recommend you to create a separate directory `build` inside `ClickHouse` that will contain all of the build artefacts: @@ -202,7 +202,7 @@ Upon successful build you get an executable file `ClickHouse//program ls -l programs/clickhouse -# Running the built executable of ClickHouse {#running-the-built-executable-of-clickhouse} +# Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse} To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run: diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index d1ff946959e..02620b92367 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -32,7 +32,7 @@ meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively. -## Known bugs {#known-bugs} +## Known Bugs {#known-bugs} If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `queries/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. @@ -58,7 +58,7 @@ Each test run one or miltiple queries (possibly with combinations of parameters) If you want to improve performance of ClickHouse in some scenario, and if improvements can be observed on simple queries, it is highly recommended to write a performance test. It always makes sense to use `perf top` or other perf tools during your tests. -## Test Tools And Scripts {#test-tools-and-scripts} +## Test Tools and Scripts {#test-tools-and-scripts} Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `src/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. @@ -163,11 +163,11 @@ For example, build with system packages is bad practice, because we cannot guara Though we cannot run all tests on all variant of builds, we want to check at least that various build variants are not broken. For this purpose we use build tests. -## Testing For Protocol Compatibility {#testing-for-protocol-compatibility} +## Testing for Protocol Compatibility {#testing-for-protocol-compatibility} When we extend ClickHouse network protocol, we test manually that old clickhouse-client works with new clickhouse-server and new clickhouse-client works with old clickhouse-server (simply by running binaries from corresponding packages). -## Help From The Compiler {#help-from-the-compiler} +## Help from the Compiler {#help-from-the-compiler} Main ClickHouse code (that is located in `dbms` directory) is built with `-Wall -Wextra -Werror` and with some additional enabled warnings. Although these options are not enabled for third-party libraries. diff --git a/docs/en/engines/index.md b/docs/en/engines/index.md index 37b3f5998c8..c4b0b299858 100644 --- a/docs/en/engines/index.md +++ b/docs/en/engines/index.md @@ -3,3 +3,4 @@ toc_folder_title: Engines toc_priority: 25 --- + diff --git a/docs/en/engines/table_engines/index.md b/docs/en/engines/table_engines/index.md index f5ef56382c5..00f53a6f9f0 100644 --- a/docs/en/engines/table_engines/index.md +++ b/docs/en/engines/table_engines/index.md @@ -17,7 +17,7 @@ The table engine (type of table) determines: ## Engine Families {#engine-families} -### Mergetree {#mergetree} +### MergeTree {#mergetree} The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](mergetree_family/replication.md) versions of engines), partitioning, and other features not supported in other engines. diff --git a/docs/en/engines/table_engines/mergetree_family/collapsingmergetree.md b/docs/en/engines/table_engines/mergetree_family/collapsingmergetree.md index 61e0d82d32f..973377c0b58 100644 --- a/docs/en/engines/table_engines/mergetree_family/collapsingmergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -3,7 +3,7 @@ toc_priority: 36 toc_title: CollapsingMergeTree --- -# Collapsingmergetree {#table_engine-collapsingmergetree} +# CollapsingMergeTree {#table_engine-collapsingmergetree} The engine inherits from [MergeTree](mergetree.md) and adds the logic of rows collapsing to data parts merge algorithm. diff --git a/docs/en/engines/table_engines/mergetree_family/graphitemergetree.md b/docs/en/engines/table_engines/mergetree_family/graphitemergetree.md index 7785af9b6d3..ebfdfe4685a 100644 --- a/docs/en/engines/table_engines/mergetree_family/graphitemergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/graphitemergetree.md @@ -3,7 +3,7 @@ toc_priority: 38 toc_title: GraphiteMergeTree --- -# Graphitemergetree {#graphitemergetree} +# GraphiteMergeTree {#graphitemergetree} This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. diff --git a/docs/en/engines/table_engines/mergetree_family/mergetree.md b/docs/en/engines/table_engines/mergetree_family/mergetree.md index aabc0b45487..fc0b6e63158 100644 --- a/docs/en/engines/table_engines/mergetree_family/mergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/mergetree.md @@ -3,7 +3,7 @@ toc_priority: 30 toc_title: MergeTree --- -# Mergetree {#table_engines-mergetree} +# MergeTree {#table_engines-mergetree} The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines. diff --git a/docs/en/engines/table_engines/mergetree_family/replacingmergetree.md b/docs/en/engines/table_engines/mergetree_family/replacingmergetree.md index 5bdad0d4074..7fbd4edec68 100644 --- a/docs/en/engines/table_engines/mergetree_family/replacingmergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/replacingmergetree.md @@ -3,7 +3,7 @@ toc_priority: 33 toc_title: ReplacingMergeTree --- -# Replacingmergetree {#replacingmergetree} +# ReplacingMergeTree {#replacingmergetree} The engine differs from [MergeTree](mergetree.md#table_engines-mergetree) in that it removes duplicate entries with the same primary key value (or more accurately, with the same [sorting key](mergetree.md) value). diff --git a/docs/en/engines/table_engines/mergetree_family/replication.md b/docs/en/engines/table_engines/mergetree_family/replication.md index 8a4edb7ac8e..9de68fdb9ed 100644 --- a/docs/en/engines/table_engines/mergetree_family/replication.md +++ b/docs/en/engines/table_engines/mergetree_family/replication.md @@ -186,7 +186,7 @@ An alternative recovery option is to delete information about the lost replica f There is no restriction on network bandwidth during recovery. Keep this in mind if you are restoring many replicas at once. -## Converting From Mergetree To Replicatedmergetree {#converting-from-mergetree-to-replicatedmergetree} +## Converting From MergeTree To ReplicatedMergeTree {#converting-from-mergetree-to-replicatedmergetree} We use the term `MergeTree` to refer to all table engines in the `MergeTree family`, the same as for `ReplicatedMergeTree`. @@ -198,7 +198,7 @@ Rename the existing MergeTree table, then create a `ReplicatedMergeTree` table w Move the data from the old table to the `detached` subdirectory inside the directory with the new table data (`/var/lib/clickhouse/data/db_name/table_name/`). Then run `ALTER TABLE ATTACH PARTITION` on one of the replicas to add these data parts to the working set. -## Converting From Replicatedmergetree To Mergetree {#converting-from-replicatedmergetree-to-mergetree} +## Converting From ReplicatedMergeTree To MergeTree {#converting-from-replicatedmergetree-to-mergetree} Create a MergeTree table with a different name. Move all the data from the directory with the `ReplicatedMergeTree` table data to the new table’s data directory. Then delete the `ReplicatedMergeTree` table and restart the server. diff --git a/docs/en/engines/table_engines/mergetree_family/summingmergetree.md b/docs/en/engines/table_engines/mergetree_family/summingmergetree.md index 5c007ee0b7a..c8f76b60c53 100644 --- a/docs/en/engines/table_engines/mergetree_family/summingmergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/summingmergetree.md @@ -3,7 +3,7 @@ toc_priority: 34 toc_title: SummingMergeTree --- -# Summingmergetree {#summingmergetree} +# SummingMergeTree {#summingmergetree} The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree). The difference is that when merging data parts for `SummingMergeTree` tables ClickHouse replaces all the rows with the same primary key (or more accurately, with the same [sorting key](mergetree.md)) with one row which contains summarized values for the columns with the numeric data type. If the sorting key is composed in a way that a single key value corresponds to large number of rows, this significantly reduces storage volume and speeds up data selection. @@ -94,7 +94,7 @@ SELECT key, sum(value) FROM summtt GROUP BY key ## Data Processing {#data-processing} -When data are inserted into a table, they are saved as-is. Clickhouse merges the inserted parts of data periodically and this is when rows with the same primary key are summed and replaced with one for each resulting part of data. +When data are inserted into a table, they are saved as-is. ClickHouse merges the inserted parts of data periodically and this is when rows with the same primary key are summed and replaced with one for each resulting part of data. ClickHouse can merge the data parts so that different resulting parts of data cat consist rows with the same primary key, i.e. the summation will be incomplete. Therefore (`SELECT`) an aggregate function [sum()](../../../sql_reference/aggregate_functions/reference.md#agg_function-sum) and `GROUP BY` clause should be used in a query as described in the example above. diff --git a/docs/en/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/en/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md index 2c7a28263f8..f9a95dbd626 100644 --- a/docs/en/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md +++ b/docs/en/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -3,7 +3,7 @@ toc_priority: 37 toc_title: VersionedCollapsingMergeTree --- -# Versionedcollapsingmergetree {#versionedcollapsingmergetree} +# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} This engine: diff --git a/docs/en/engines/table_engines/special/file.md b/docs/en/engines/table_engines/special/file.md index f0a9cd4e7e6..b9a79ed2e3e 100644 --- a/docs/en/engines/table_engines/special/file.md +++ b/docs/en/engines/table_engines/special/file.md @@ -14,7 +14,7 @@ Usage examples: - Convert data from one format to another. - Updating data in ClickHouse via editing a file on a disk. -## Usage In Clickhouse Server {#usage-in-clickhouse-server} +## Usage In ClickHouse Server {#usage-in-clickhouse-server} ``` sql File(Format) @@ -65,7 +65,7 @@ SELECT * FROM file_engine_table └──────┴───────┘ ``` -## Usage In Clickhouse-local {#usage-in-clickhouse-local} +## Usage In ClickHouse-local {#usage-in-clickhouse-local} In [clickhouse-local](../../../operations/utilities/clickhouse-local.md) File engine accepts file path in addition to `Format`. Default input/output streams can be specified using numeric or human-readable names like `0` or `stdin`, `1` or `stdout`. **Example:** diff --git a/docs/en/engines/table_engines/special/generate.md b/docs/en/engines/table_engines/special/generate.md index d4b573c6839..6e592674346 100644 --- a/docs/en/engines/table_engines/special/generate.md +++ b/docs/en/engines/table_engines/special/generate.md @@ -12,7 +12,7 @@ Usage examples: - Use in test to populate reproducible large table. - Generate random input for fuzzing tests. -## Usage In Clickhouse Server {#usage-in-clickhouse-server} +## Usage In ClickHouse Server {#usage-in-clickhouse-server} ``` sql ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) diff --git a/docs/en/engines/table_engines/special/url.md b/docs/en/engines/table_engines/special/url.md index 3c3e8d45f4c..db679b97c54 100644 --- a/docs/en/engines/table_engines/special/url.md +++ b/docs/en/engines/table_engines/special/url.md @@ -8,7 +8,7 @@ toc_title: URL Manages data on a remote HTTP/HTTPS server. This engine is similar to the [File](file.md) engine. -## Using the Engine In the Clickhouse Server {#using-the-engine-in-the-clickhouse-server} +## Using the Engine In the ClickHouse Server {#using-the-engine-in-the-clickhouse-server} The `format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see diff --git a/docs/en/faq/index.md b/docs/en/faq/index.md index 2f872ea45b0..2ee9d51e83b 100644 --- a/docs/en/faq/index.md +++ b/docs/en/faq/index.md @@ -3,3 +3,4 @@ toc_folder_title: F.A.Q. toc_priority: 76 --- + diff --git a/docs/en/getting_started/install.md b/docs/en/getting_started/install.md index 363fc928679..316cc5a47ef 100644 --- a/docs/en/getting_started/install.md +++ b/docs/en/getting_started/install.md @@ -69,7 +69,7 @@ sudo yum install clickhouse-server clickhouse-client You can also download and install packages manually from here: https://repo.clickhouse.tech/rpm/stable/x86\_64. -### From tgz archives {#from-tgz-archives} +### From Tgz Archives {#from-tgz-archives} It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible. diff --git a/docs/en/getting_started/playground.md b/docs/en/getting_started/playground.md index d7453e51eaa..353724b6e3e 100644 --- a/docs/en/getting_started/playground.md +++ b/docs/en/getting_started/playground.md @@ -32,7 +32,7 @@ You can make queries to playground using any HTTP client, for example [curl](htt More information about software products that support ClickHouse is available [here](../interfaces/index.md). | Parameter | Value | -|:------|:------------------------| +|:----------|:--------------------------------------| | Endpoint | https://play-api.clickhouse.tech:8443 | | User | `playground` | | Password | `clickhouse` | diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index aadd229490f..51985ba89e1 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -11,7 +11,7 @@ results of a `SELECT`, and to perform `INSERT`s into a file-backed table. The supported formats are: | Format | Input | Output | -|---------------------------------------|-----|------| +|-----------------------------------------------------------------|-------|--------| | [TabSeparated](#tabseparated) | ✔ | ✔ | | [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | | [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | @@ -993,21 +993,21 @@ ClickHouse Avro format supports reading and writing [Avro data files](http://avr The table below shows supported data types and how they match ClickHouse [data types](../sql_reference/data_types/index.md) in `INSERT` and `SELECT` queries. -| Avro data type `INSERT` | ClickHouse data type | Avro data type `SELECT` | -|---------------------------|-------------------------------------------------------|------------------| +| Avro data type `INSERT` | ClickHouse data type | Avro data type `SELECT` | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------|------------------------------| | `boolean`, `int`, `long`, `float`, `double` | [Int(8\|16\|32)](../sql_reference/data_types/int_uint.md), [UInt(8\|16\|32)](../sql_reference/data_types/int_uint.md) | `int` | | `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [String](../sql_reference/data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [FixedString(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum(8\|16)](../sql_reference/data_types/enum.md) | `enum` | -| `array(T)` | [Array(T)](../sql_reference/data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql_reference/data_types/date.md) | `union(null, T)` | -| `null` | [Nullable(Nothing)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Date](../sql_reference/data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [DateTime64(3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [DateTime64(6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [String](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [FixedString(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [Enum(8\|16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [Array(T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [Nullable(T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [Nullable(Nothing)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [Date](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64(3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64(6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | \* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types) @@ -1101,8 +1101,8 @@ SELECT * FROM topic1_stream; The table below shows supported data types and how they match ClickHouse [data types](../sql_reference/data_types/index.md) in `INSERT` and `SELECT` queries. -| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) | -|------------------|---------------------------|------------------| +| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) | +|------------------------------|-----------------------------------------------------------|------------------------------| | `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | | `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | | `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | @@ -1149,8 +1149,8 @@ To exchange data with Hadoop, you can use [HDFS table engine](../engines/table_e The table below shows supported data types and how they match ClickHouse [data types](../sql_reference/data_types/index.md) in `INSERT` queries. -| ORC data type (`INSERT`) | ClickHouse data type | -|----------------|-------------------------| +| ORC data type (`INSERT`) | ClickHouse data type | +|--------------------------|-----------------------------------------------------| | `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | | `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | | `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index df2ee5e19e5..0e18e00bb0d 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -286,31 +286,35 @@ $ curl -sS "
    ?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHER ## Predefined HTTP Interface {#predefined_http_interface} ClickHouse supports specific queries through the HTTP interface. For example, you can write data to a table as follows: - -```bash + +``` bash $ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @- ``` -ClickHouse also supports Predefined HTTP Interface which can help you more easy integration with third party tools like [Prometheus exporter](https://github.com/percona-lab/clickhouse_exporter). +ClickHouse also supports Predefined HTTP Interface which can help you more easy integration with third party tools like [Prometheus exporter](https://github.com/percona-lab/clickhouse_exporter). Example: - -* First of all, add this section to server configuration file: - + +- First of all, add this section to server configuration file: + + + ``` xml - /metrics - GET - - SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' - + /metrics + GET + + SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n' + ``` - -* You can now request the url directly for data in the Prometheus format: - + +- You can now request the url directly for data in the Prometheus format: + + + ``` bash curl -vvv 'http://localhost:8123/metrics' * Trying ::1... @@ -319,7 +323,7 @@ curl -vvv 'http://localhost:8123/metrics' > Host: localhost:8123 > User-Agent: curl/7.47.0 > Accept: */* -> +> < HTTP/1.1 200 OK < Date: Wed, 27 Nov 2019 08:54:25 GMT < Connection: Keep-Alive @@ -329,7 +333,7 @@ curl -vvv 'http://localhost:8123/metrics' < X-ClickHouse-Query-Id: f39235f6-6ed7-488c-ae07-c7ceafb960f6 < Keep-Alive: timeout=3 < X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"} -< +< # HELP "Query" "Number of executing queries" # TYPE "Query" counter "Query" 1 @@ -337,19 +341,19 @@ curl -vvv 'http://localhost:8123/metrics' # HELP "Merge" "Number of executing background merges" # TYPE "Merge" counter "Merge" 0 - + # HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)" # TYPE "PartMutation" counter "PartMutation" 0 - + # HELP "ReplicatedFetch" "Number of data parts being fetched from replica" # TYPE "ReplicatedFetch" counter "ReplicatedFetch" 0 - + # HELP "ReplicatedSend" "Number of data parts being sent to replicas" # TYPE "ReplicatedSend" counter "ReplicatedSend" 0 - + * Connection #0 to host localhost left intact ``` @@ -357,26 +361,24 @@ As you can see from the example, if `` is configured in the confi Now `` can configure ``, ``, ``, `` and `` . -## root_handler +## root\_handler {#root_handler} - `` returns the specified content for the root path request. The specific return content is configured by `http_server_default_response` in config.xml. if not specified, return **Ok.** +`` returns the specified content for the root path request. The specific return content is configured by `http_server_default_response` in config.xml. if not specified, return **Ok.** `http_server_default_response` is not defined and an HTTP request is sent to ClickHouse. The result is as follows: -```xml +``` xml ``` -``` -$ curl 'http://localhost:8123' -Ok. -``` + $ curl 'http://localhost:8123' + Ok. `http_server_default_response` is defined and an HTTP request is sent to ClickHouse. The result is as follows: -```xml +``` xml
    ]]>
    @@ -384,35 +386,33 @@ Ok. ``` -``` -$ curl 'http://localhost:8123' -
    % -``` + $ curl 'http://localhost:8123' +
    % -## ping_handler +## ping\_handler {#ping_handler} `` can be used to probe the health of the current ClickHouse Server. When the ClickHouse HTTP Server is normal, accessing ClickHouse through `` will return **Ok.**. Example: -```xml +``` xml /ping ``` -```bash +``` bash $ curl 'http://localhost:8123/ping' Ok. ``` -## replicas_status_handler +## replicas\_status\_handler {#replicas_status_handler} -`` is used to detect the state of the replica node and return **Ok.** if the replica node has no delay. If there is a delay, return the specific delay. The value of `` supports customization. If you do not specify ``, ClickHouse default setting `` is **/replicas_status**. +`` is used to detect the state of the replica node and return **Ok.** if the replica node has no delay. If there is a delay, return the specific delay. The value of `` supports customization. If you do not specify ``, ClickHouse default setting `` is **/replicas\_status**. Example: -```xml +``` xml /replicas_status @@ -420,90 +420,90 @@ Example: No delay case: -```bash +``` bash $ curl 'http://localhost:8123/replicas_status' Ok. ``` Delayed case: -```bash +``` bash $ curl 'http://localhost:8123/replicas_status' db.stats: Absolute delay: 22. Relative delay: 22. ``` -## predefined_query_handler +## predefined\_query\_handler {#predefined_query_handler} You can configure ``, ``, `` and `` in ``. `` is responsible for matching the method part of the HTTP request. `` fully conforms to the definition of [method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) in the HTTP protocol. It is an optional configuration. If it is not defined in the configuration file, it does not match the method portion of the HTTP request -`` is responsible for matching the url part of the HTTP request. It is compatible with [RE2](https://github.com/google/re2)'s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the url portion of the HTTP request +`` is responsible for matching the url part of the HTTP request. It is compatible with [RE2](https://github.com/google/re2)’s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the url portion of the HTTP request -`` is responsible for matching the header part of the HTTP request. It is compatible with RE2's regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request +`` is responsible for matching the header part of the HTTP request. It is compatible with RE2’s regular expressions. It is an optional configuration. If it is not defined in the configuration file, it does not match the header portion of the HTTP request `` value is a predefined query of ``, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration. -`` supports setting Settings and query_params values. +`` supports setting Settings and query\_params values. The following example defines the values of `max_threads` and `max_alter_threads` settings, then queries the system table to check whether these settings were set successfully. Example: -```xml +``` xml - - GET - - TEST_HEADER_VALUE - [^/]+)(/(?P[^/]+))?]]> - - [^/]+)(/(?P[^/]+))?]]> - - SELECT value FROM system.settings WHERE name = {name_1:String} - SELECT name, value FROM system.settings WHERE name = {name_2:String} - - + + GET + + TEST_HEADER_VALUE + [^/]+)(/(?P[^/]+))?]]> + + [^/]+)(/(?P[^/]+))?]]> + + SELECT value FROM system.settings WHERE name = {name_1:String} + SELECT name, value FROM system.settings WHERE name = {name_2:String} + + ``` -```bash +``` bash $ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2' 1 -max_alter_threads 2 +max_alter_threads 2 ``` !!! note "Note" In one ``, one `` only supports one `` of an insert type. -## dynamic_query_handler +## dynamic\_query\_handler {#dynamic_query_handler} -`` than `` increased `` . +`` than `` increased `` . ClickHouse extracts and executes the value corresponding to the `` value in the url of the HTTP request. ClickHouse default setting `` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the param is not passed in. -To experiment with this functionality, the example defines the values of max_threads and max_alter_threads and queries whether the Settings were set successfully. +To experiment with this functionality, the example defines the values of max\_threads and max\_alter\_threads and queries whether the Settings were set successfully. The difference is that in ``, query is wrote in the configuration file. But in ``, query is written in the form of param of the HTTP request. Example: -```xml +``` xml - - - TEST_HEADER_VALUE_DYNAMIC - [^/]+)(/(?P[^/]+))?]]> - - query_param - + + + TEST_HEADER_VALUE_DYNAMIC + [^/]+)(/(?P[^/]+))?]]> + + query_param + ``` -```bash +``` bash $ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/?query_param=SELECT%20value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D&max_threads=1&max_alter_threads=2¶m_name_2=max_alter_threads' 1 2 ``` -[Original article](https://clickhouse.tech/docs/en/interfaces/http_interface/) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/interfaces/http_interface/) diff --git a/docs/en/interfaces/mysql.md b/docs/en/interfaces/mysql.md index 5f469079d54..9f9d7f89a87 100644 --- a/docs/en/interfaces/mysql.md +++ b/docs/en/interfaces/mysql.md @@ -3,7 +3,7 @@ toc_priority: 20 toc_title: MySQL Interface --- -# MySQL interface {#mysql-interface} +# MySQL Interface {#mysql-interface} ClickHouse supports MySQL wire protocol. It can be enabled by [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) setting in configuration file: diff --git a/docs/en/interfaces/third-party/client_libraries.md b/docs/en/interfaces/third-party/client_libraries.md index f408e4e0193..b049c37641c 100644 --- a/docs/en/interfaces/third-party/client_libraries.md +++ b/docs/en/interfaces/third-party/client_libraries.md @@ -37,7 +37,7 @@ toc_title: Client Libraries - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) - Java - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) - [clickhouse-client](https://github.com/Ecwid/clickhouse-client) diff --git a/docs/en/interfaces/third-party/integrations.md b/docs/en/interfaces/third-party/integrations.md index ab84b3f4c19..ff0b7fe2e19 100644 --- a/docs/en/interfaces/third-party/integrations.md +++ b/docs/en/interfaces/third-party/integrations.md @@ -74,7 +74,7 @@ toc_title: Integrations - [pandahouse](https://github.com/kszucs/pandahouse) - R - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (uses [JDBC](../../sql_reference/table_functions/jdbc.md)) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 9a1103c19a5..f7006ae15c8 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -9,7 +9,7 @@ toc_title: Adopters The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We’d appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won’t have any NDA issues by doing so. Providing updates with publications from other companies is also useful. | Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size\* | Reference | -|-----------------------------------------------|---------------------|---------------|------------------------------------|------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------| +|-----------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | | [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | | [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | @@ -20,7 +20,7 @@ toc_title: Adopters | [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | | [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | | `Dataliance/UltraPower` | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | | [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | | [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | @@ -64,7 +64,7 @@ toc_title: Adopters | [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | | [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | | [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | | [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | | [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | diff --git a/docs/en/introduction/distinctive_features.md b/docs/en/introduction/distinctive_features.md index 9704c654723..31770b0bf79 100644 --- a/docs/en/introduction/distinctive_features.md +++ b/docs/en/introduction/distinctive_features.md @@ -60,7 +60,7 @@ ClickHouse provides various ways to trade accuracy for performance: 2. Running a query based on a part (sample) of data and getting an approximated result. In this case, proportionally less data is retrieved from the disk. 3. Running an aggregation for a limited number of random keys, instead of for all keys. Under certain conditions for key distribution in the data, this provides a reasonably accurate result while using fewer resources. -## Data replication and data integrity support {#data-replication-and-data-integrity-support} +## Data Replication and Data Integrity Support {#data-replication-and-data-integrity-support} ClickHouse uses asynchronous multi-master replication. After being written to any available replica, all the remaining replicas retrieve their copy in the background. The system maintains identical data on different replicas. Recovery after most failures is performed automatically, or semi-automatically in complex cases. diff --git a/docs/en/introduction/features_considered_disadvantages.md b/docs/en/introduction/features_considered_disadvantages.md index 597dad30663..e295b5570ab 100644 --- a/docs/en/introduction/features_considered_disadvantages.md +++ b/docs/en/introduction/features_considered_disadvantages.md @@ -3,7 +3,7 @@ toc_priority: 5 toc_title: ClickHouse Features that Can Be Considered Disadvantages --- -# ClickHouse Features that Can be Considered Disadvantages {#clickhouse-features-that-can-be-considered-disadvantages} +# ClickHouse Features that Can Be Considered Disadvantages {#clickhouse-features-that-can-be-considered-disadvantages} 1. No full-fledged transactions. 2. Lack of ability to modify or delete already inserted data with high rate and low latency. There are batch deletes and updates available to clean up or modify data, for example to comply with [GDPR](https://gdpr-info.eu). diff --git a/docs/en/introduction/index.md b/docs/en/introduction/index.md index 1a089551261..ba80f9c2640 100644 --- a/docs/en/introduction/index.md +++ b/docs/en/introduction/index.md @@ -3,3 +3,4 @@ toc_folder_title: Introduction toc_priority: 1 --- + diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index 9510ef4b709..27418dff884 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -28,7 +28,7 @@ Some local filesystems provide snapshot functionality (for example, [ZFS](https: For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well. -## Manipulations With Parts {#manipulations-with-parts} +## Manipulations with Parts {#manipulations-with-parts} ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that doesn’t require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index 2e848dbb313..363e9cc4bff 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -22,7 +22,7 @@ It is highly recommended to set up monitoring for: - Utilization of storage system, RAM and network. -## Clickhouse Server Metrics {#clickhouse-server-metrics} +## ClickHouse Server Metrics {#clickhouse-server-metrics} ClickHouse server has embedded instruments for self-state monitoring. diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index 2af28147dbe..d955b50fa02 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -3,7 +3,7 @@ toc_priority: 54 toc_title: Testing Hardware --- -# How To Test Your Hardware With ClickHouse {#how-to-test-your-hardware-with-clickhouse} +# How to Test Your Hardware with ClickHouse {#how-to-test-your-hardware-with-clickhouse} With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. @@ -24,7 +24,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -5. Download configs: +1. Download configs: @@ -34,7 +34,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -6. Download benchmark files: +1. Download benchmark files: @@ -42,7 +42,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -7. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). @@ -50,31 +50,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -8. Run the server: +1. Run the server: ./clickhouse server -9. Check the data: ssh to the server in another terminal +1. Check the data: ssh to the server in another terminal ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -10. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. mcedit benchmark-new.sh -11. Run the benchmark: +1. Run the benchmark: ./benchmark-new.sh hits_100m_obfuscated -12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark_hardware.html +All the results are published here: https://clickhouse.tech/benchmark\_hardware.html diff --git a/docs/en/operations/troubleshooting.md b/docs/en/operations/troubleshooting.md index 3cfcee39a22..b6afedfca7e 100644 --- a/docs/en/operations/troubleshooting.md +++ b/docs/en/operations/troubleshooting.md @@ -12,12 +12,12 @@ toc_title: Troubleshooting ## Installation {#troubleshooting-installation-errors} -### You Cannot Get Deb Packages From Clickhouse Repository With Apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} +### You Cannot Get Deb Packages from ClickHouse Repository with Apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} - Check firewall settings. - If you cannot access the repository for any reason, download packages as described in the [Getting started](../getting_started/index.md) article and install them manually using the `sudo dpkg -i ` command. You will also need the `tzdata` package. -## Connecting To the Server {#troubleshooting-accepts-no-connections} +## Connecting to the Server {#troubleshooting-accepts-no-connections} Possible issues: @@ -137,7 +137,7 @@ If you start `clickhouse-client` with the `stack-trace` parameter, ClickHouse re You might see a message about a broken connection. In this case, you can repeat the query. If the connection breaks every time you perform the query, check the server logs for errors. -## Efficiency Of Query Processing {#troubleshooting-too-slow} +## Efficiency of Query Processing {#troubleshooting-too-slow} If you see that ClickHouse is working too slowly, you need to profile the load on the server resources and network for your queries. diff --git a/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md b/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 174341f697e..1d3b78635d0 100644 --- a/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md +++ b/docs/en/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -488,7 +488,7 @@ SOURCE(MYSQL( )) ``` -### Clickhouse {#dicts-external_dicts_dict_sources-clickhouse} +### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} Example of settings: diff --git a/docs/en/sql_reference/operators.md b/docs/en/sql_reference/operators.md index 6414a1d6469..418a9e32771 100644 --- a/docs/en/sql_reference/operators.md +++ b/docs/en/sql_reference/operators.md @@ -58,7 +58,7 @@ Groups of operators are listed in order of priority (the higher it is in the lis `a NOT BETWEEN b AND c` – The same as `a < b OR a > c`. -## Operators For Working With Data Sets {#operators-for-working-with-data-sets} +## Operators for Working with Data Sets {#operators-for-working-with-data-sets} *See [IN operators](statements/select.md#select-in-operators).* @@ -70,7 +70,7 @@ Groups of operators are listed in order of priority (the higher it is in the lis `a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` function. -## Operators For Working With Dates and Times {#operators-datetime} +## Operators for Working with Dates and Times {#operators-datetime} ### EXTRACT {#operator-extract} @@ -231,7 +231,7 @@ Sometimes this doesn’t work the way you expect. For example, `SELECT 4 > 2 > 3 For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed to a single call of these functions. -## Checking For `NULL` {#checking-for-null} +## Checking for `NULL` {#checking-for-null} ClickHouse supports the `IS NULL` and `IS NOT NULL` operators. diff --git a/docs/en/whats_new/changelog/2017.md b/docs/en/whats_new/changelog/2017.md index 68744e5334a..df632d72d83 100644 --- a/docs/en/whats_new/changelog/2017.md +++ b/docs/en/whats_new/changelog/2017.md @@ -152,7 +152,7 @@ This release contains bug fixes for the previous release 1.1.54276: - Fixed parsing when inserting in RowBinary format if input data starts with’;’. - Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). -### Clickhouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} +### ClickHouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} #### New features: {#new-features-4} diff --git a/docs/en/whats_new/changelog/2018.md b/docs/en/whats_new/changelog/2018.md index f8eef5a911d..108332ce56b 100644 --- a/docs/en/whats_new/changelog/2018.md +++ b/docs/en/whats_new/changelog/2018.md @@ -839,7 +839,7 @@ toc_title: '2018' - Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. - Removed extraneous error-level logging of `Not found column ... in block`. -### Clickhouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} +### ClickHouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} #### New features: {#new-features-16} @@ -929,13 +929,13 @@ toc_title: '2018' - Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. - Removed the `UnsortedMergeTree` engine. -### Clickhouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} +### ClickHouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} - Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. - Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. - Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. -### Clickhouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} +### ClickHouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} This release contains bug fixes for the previous release 1.1.54337: @@ -947,7 +947,7 @@ This release contains bug fixes for the previous release 1.1.54337: - Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). - Fixed a bug in implementation of NULL. -### Clickhouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} +### ClickHouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} #### New features: {#new-features-17} diff --git a/docs/en/whats_new/changelog/2019.md b/docs/en/whats_new/changelog/2019.md index ec2c14055cb..5b0405f0b0a 100644 --- a/docs/en/whats_new/changelog/2019.md +++ b/docs/en/whats_new/changelog/2019.md @@ -128,11 +128,11 @@ toc_title: '2019' ## ClickHouse release v19.16 {#clickhouse-release-v19-16} -#### Clickhouse release v19.16.14.65, 2020-03-25 +#### ClickHouse release v19.16.14.65, 2020-03-25 * Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) This bugfix was backported to version 19.16 by a special request from Altinity. -#### Clickhouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} +#### ClickHouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} - Fix distributed subqueries incompatibility with older CH versions. Fixes [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) [(tabplubix)](https://github.com/tavplubix) @@ -1865,7 +1865,7 @@ toc_title: '2019' - Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) - Added docs about two datasets in s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) - Added script which creates changelog from pull requests description. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added puppet module for Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- Added puppet module for ClickHouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) - Added docs for a group of undocumented functions. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) - ARM build fixes. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) - Dictionary tests now able to run from `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) diff --git a/docs/en/whats_new/index.md b/docs/en/whats_new/index.md index 3b8886a9f0a..0901166b887 100644 --- a/docs/en/whats_new/index.md +++ b/docs/en/whats_new/index.md @@ -3,3 +3,4 @@ toc_folder_title: What's New toc_priority: 72 --- + diff --git a/docs/toc_zh.yml b/docs/toc_zh.yml deleted file mode 100644 index f4aa73d91da..00000000000 --- a/docs/toc_zh.yml +++ /dev/null @@ -1,251 +0,0 @@ -nav: - -- '介绍': - - '概貌': 'index.md' - - 'ClickHouse的独特功能': 'introduction/distinctive_features.md' - - 'ClickHouse功能可被视为缺点': 'introduction/features_considered_disadvantages.md' - - '性能': 'introduction/performance.md' - - '历史': 'introduction/history.md' - - '使用者': 'introduction/adopters.md' - -- '入门指南': - - 'hidden': 'getting_started/index.md' - - '安装': 'getting_started/install.md' - - '教程': 'getting_started/tutorial.md' - - '示例数据集': - - '介绍': 'getting_started/example_datasets/index.md' - - '航班飞行数据': 'getting_started/example_datasets/ontime.md' - - '纽约市出租车数据': 'getting_started/example_datasets/nyc_taxi.md' - - 'AMPLab大数据基准测试': 'getting_started/example_datasets/amplab_benchmark.md' - - '维基访问数据': 'getting_started/example_datasets/wikistat.md' - - 'Criteo TB级别点击日志': 'getting_started/example_datasets/criteo.md' - - 'Star Schema基准测试': 'getting_started/example_datasets/star_schema.md' - - 'Yandex.Metrica': 'getting_started/example_datasets/metrica.md' - - 'Playground': 'getting_started/playground.md' - -- '客户端': - - '介绍': 'interfaces/index.md' - - '命令行客户端接口': 'interfaces/cli.md' - - '原生客户端接口 (TCP)': 'interfaces/tcp.md' - - 'HTTP 客户端接口': 'interfaces/http.md' - - 'MySQL 客户端接口': 'interfaces/mysql.md' - - '输入输出格式': 'interfaces/formats.md' - - 'JDBC 驱动': 'interfaces/jdbc.md' - - 'ODBC 驱动': 'interfaces/odbc.md' - - 'C ++客户端库': 'interfaces/cpp.md' - - '第三方': - - '客户端库': 'interfaces/third-party/client_libraries.md' - - '集成': 'interfaces/third-party/integrations.md' - - '可视界面': 'interfaces/third-party/gui.md' - - '代理': 'interfaces/third-party/proxy.md' - -- '数据类型': - - '介绍': 'data_types/index.md' - - 'UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64': 'data_types/int_uint.md' - - 'Float32, Float64': 'data_types/float.md' - - 'Decimal': 'data_types/decimal.md' - - 'Boolean values': 'data_types/boolean.md' - - 'String': 'data_types/string.md' - - 'FixedString(N)': 'data_types/fixedstring.md' - - 'UUID': 'data_types/uuid.md' - - 'Date': 'data_types/date.md' - - 'DateTime64': 'data_types/datetime64.md' - - 'DateTime': 'data_types/datetime.md' - - 'Enum': 'data_types/enum.md' - - 'Array(T)': 'data_types/array.md' - - 'AggregateFunction(name, types_of_arguments...)': 'data_types/nested_data_structures/aggregatefunction.md' - - 'Tuple(T1, T2, ...)': 'data_types/tuple.md' - - 'Nullable': 'data_types/nullable.md' - - '嵌套数据结构': - - 'hidden': 'data_types/nested_data_structures/index.md' - - 'Nested(Name1 Type1, Name2 Type2, ...)': 'data_types/nested_data_structures/nested.md' - - '特殊数据类型': - - 'hidden': 'data_types/special_data_types/index.md' - - 'Expression': 'data_types/special_data_types/expression.md' - - 'Set': 'data_types/special_data_types/set.md' - - 'Nothing': 'data_types/special_data_types/nothing.md' - - 'Interval': 'data_types/special_data_types/interval.md' - - 'Domain类型': - - '介绍': 'data_types/domains/overview.md' - - 'IPv4': 'data_types/domains/ipv4.md' - - 'IPv6': 'data_types/domains/ipv6.md' - -- '数据库引擎': - - '介绍': 'database_engines/index.md' - - 'MySQL': 'database_engines/mysql.md' - - 'Lazy': 'database_engines/lazy.md' - -- '表引擎': - - '介绍': 'operations/table_engines/index.md' - - 'MergeTree': - - 'MergeTree': 'operations/table_engines/mergetree.md' - - 'Data Replication': 'operations/table_engines/replication.md' - - 'Custom Partitioning Key': 'operations/table_engines/custom_partitioning_key.md' - - 'ReplacingMergeTree': 'operations/table_engines/replacingmergetree.md' - - 'SummingMergeTree': 'operations/table_engines/summingmergetree.md' - - 'AggregatingMergeTree': 'operations/table_engines/aggregatingmergetree.md' - - 'CollapsingMergeTree': 'operations/table_engines/collapsingmergetree.md' - - 'VersionedCollapsingMergeTree': 'operations/table_engines/versionedcollapsingmergetree.md' - - 'GraphiteMergeTree': 'operations/table_engines/graphitemergetree.md' - - 'Log': - - '介绍': 'operations/table_engines/log_family.md' - - 'StripeLog': 'operations/table_engines/stripelog.md' - - 'Log': 'operations/table_engines/log.md' - - 'TinyLog': 'operations/table_engines/tinylog.md' - - '外部表引擎': - - 'Kafka': 'operations/table_engines/kafka.md' - - 'MySQL': 'operations/table_engines/mysql.md' - - 'JDBC': 'operations/table_engines/jdbc.md' - - 'ODBC': 'operations/table_engines/odbc.md' - - 'HDFS': 'operations/table_engines/hdfs.md' - - '其他表引擎': - - 'Distributed': 'operations/table_engines/distributed.md' - - 'External data': 'operations/table_engines/external_data.md' - - 'Dictionary': 'operations/table_engines/dictionary.md' - - 'Merge': 'operations/table_engines/merge.md' - - 'File': 'operations/table_engines/file.md' - - 'Null': 'operations/table_engines/null.md' - - 'Set': 'operations/table_engines/set.md' - - 'Join': 'operations/table_engines/join.md' - - 'URL': 'operations/table_engines/url.md' - - 'View': 'operations/table_engines/view.md' - - 'MaterializedView': 'operations/table_engines/materializedview.md' - - 'Memory': 'operations/table_engines/memory.md' - - 'Buffer': 'operations/table_engines/buffer.md' - - 'GenerateRandom': 'operations/table_engines/generate.md' - -- 'SQL语法': - - 'hidden': 'query_language/index.md' - - 'SELECT': 'query_language/select.md' - - 'INSERT INTO': 'query_language/insert_into.md' - - 'CREATE': 'query_language/create.md' - - 'ALTER': 'query_language/alter.md' - - 'SYSTEM': 'query_language/system.md' - - 'SHOW': 'query_language/show.md' - - '其他类型的查询': 'query_language/misc.md' - - '函数': - - '介绍': 'query_language/functions/index.md' - - '算术函数': 'query_language/functions/arithmetic_functions.md' - - '比较函数': 'query_language/functions/comparison_functions.md' - - '逻辑函数': 'query_language/functions/logical_functions.md' - - '类型转换函数': 'query_language/functions/type_conversion_functions.md' - - '时间日期函数': 'query_language/functions/date_time_functions.md' - - '字符串函数': 'query_language/functions/string_functions.md' - - '字符串搜索函数': 'query_language/functions/string_search_functions.md' - - '字符串替换函数': 'query_language/functions/string_replace_functions.md' - - '条件函数 ': 'query_language/functions/conditional_functions.md' - - '数学函数': 'query_language/functions/math_functions.md' - - '取整函数': 'query_language/functions/rounding_functions.md' - - '数组函数': 'query_language/functions/array_functions.md' - - '字符串拆分合并函数': 'query_language/functions/splitting_merging_functions.md' - - '位操作函数': 'query_language/functions/bit_functions.md' - - '位图函数': 'query_language/functions/bitmap_functions.md' - - 'Hash函数': 'query_language/functions/hash_functions.md' - - '随机函数': 'query_language/functions/random_functions.md' - - '编码函数': 'query_language/functions/encoding_functions.md' - - 'UUID函数': 'query_language/functions/uuid_functions.md' - - 'URL函数': 'query_language/functions/url_functions.md' - - 'IP函数': 'query_language/functions/ip_address_functions.md' - - 'JSON函数': 'query_language/functions/json_functions.md' - - '高阶函数': 'query_language/functions/higher_order_functions.md' - - '字典函数': 'query_language/functions/ext_dict_functions.md' - - 'Yandex.Metrica字典函数': 'query_language/functions/ym_dict_functions.md' - - 'IN运算符相关函数': 'query_language/functions/in_functions.md' - - 'arrayJoin函数': 'query_language/functions/array_join.md' - - 'GEO函数': 'query_language/functions/geo.md' - - 'Nullable处理函数': 'query_language/functions/functions_for_nulls.md' - - '机器学习函数': 'query_language/functions/machine_learning_functions.md' - - 'Introspection': 'query_language/functions/introspection.md' - - '其他函数': 'query_language/functions/other_functions.md' - - '聚合函数': - - '介绍': 'query_language/agg_functions/index.md' - - '函数列表': 'query_language/agg_functions/reference.md' - - '聚合函数组合子': 'query_language/agg_functions/combinators.md' - - '参数化聚合函数': 'query_language/agg_functions/parametric_functions.md' - - '表引擎函数': - - '介绍': 'query_language/table_functions/index.md' - - 'file': 'query_language/table_functions/file.md' - - 'merge': 'query_language/table_functions/merge.md' - - 'numbers': 'query_language/table_functions/numbers.md' - - 'remote': 'query_language/table_functions/remote.md' - - 'url': 'query_language/table_functions/url.md' - - 'mysql': 'query_language/table_functions/mysql.md' - - 'jdbc': 'query_language/table_functions/jdbc.md' - - 'odbc': 'query_language/table_functions/odbc.md' - - 'hdfs': 'query_language/table_functions/hdfs.md' - - 'input': 'query_language/table_functions/input.md' - - 'generateRandom': 'query_language/table_functions/generate.md' - - '字典': - - '介绍': 'query_language/dicts/index.md' - - '外部字典': - - '介绍': 'query_language/dicts/external_dicts.md' - - '配置外部字典': 'query_language/dicts/external_dicts_dict.md' - - '字典的内存布局': 'query_language/dicts/external_dicts_dict_layout.md' - - '字典的刷新策略': 'query_language/dicts/external_dicts_dict_lifetime.md' - - '字典的外部数据源': 'query_language/dicts/external_dicts_dict_sources.md' - - '字典的键和字段值': 'query_language/dicts/external_dicts_dict_structure.md' - - 'Hierarchical dictionaries': 'query_language/dicts/external_dicts_dict_hierarchical.md' - - '内部字典': 'query_language/dicts/internal_dicts.md' - - '操作符': 'query_language/operators.md' - - '语法说明': 'query_language/syntax.md' - -- 'Guides': - - 'Overview': 'guides/index.md' - - 'Applying CatBoost Models': 'guides/apply_catboost_model.md' - -- '运维': - - '介绍': 'operations/index.md' - - '环境要求': 'operations/requirements.md' - - '监控': 'operations/monitoring.md' - - '故障排查': 'operations/troubleshooting.md' - - '使用建议': 'operations/tips.md' - - '版本升级': 'operations/update.md' - - '访问权限控制': 'operations/access_rights.md' - - '数据备份': 'operations/backup.md' - - '配置文件': 'operations/configuration_files.md' - - '配额': 'operations/quotas.md' - - '系统表': 'operations/system_tables.md' - - '优化性能': - - '查询分析': 'operations/performance/sampling_query_profiler.md' - - '测试硬件': 'operations/performance_test.md' - - 'Server参数配置': - - '介绍': 'operations/server_settings/index.md' - - 'Server参数说明': 'operations/server_settings/settings.md' - - 'Settings配置': - - '介绍': 'operations/settings/index.md' - - '查询权限管理': 'operations/settings/permissions_for_queries.md' - - '查询复杂性的限制': 'operations/settings/query_complexity.md' - - 'Setting列表': 'operations/settings/settings.md' - - 'Setting配置组': 'operations/settings/settings_profiles.md' - - '用户配置': 'operations/settings/settings_users.md' - - 'Settings的约束': 'operations/settings/constraints_on_settings.md' - - '常用工具': - - '介绍': 'operations/utils/index.md' - - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' - - 'clickhouse-local': 'operations/utils/clickhouse-local.md' - - 'clickhouse-benchmark': 'operations/utils/clickhouse-benchmark.md' - -- '常见问题': - - '一般的问题': 'faq/general.md' - -- '开发者指南': - - 'hidden': 'development/index.md' - - '开发者指南': 'development/developer_instruction.md' - - 'ClickHouse架构概述': 'development/architecture.md' - - 'ClickHouse Code Browser': 'development/browse_code.md' - - '如何在Linux中编译ClickHouse': 'development/build.md' - - '如何在Mac OS X中编译ClickHouse': 'development/build_osx.md' - - '如何在Linux中编译Mac OS X ClickHouse': 'development/build_cross_osx.md' - - '如何在Linux中编译AARCH64 (ARM64) ClickHouse': 'development/build_cross_arm.md' - - '如何编写C++代码': 'development/style.md' - - '如何运行ClickHouse测试': 'development/tests.md' - - '使用的第三方库': 'development/contrib.md' - -- '新功能特性': - - '路线图': 'roadmap.md' - - '更新日志': - - '2020': 'changelog/index.md' - - '2019': 'changelog/2019.md' - - '2018': 'changelog/2018.md' - - '2017': 'changelog/2017.md' - - '安全更改日志': 'security_changelog.md' diff --git a/docs/tools/convert_toc.py b/docs/tools/convert_toc.py index 18178e3be72..9bfc347d244 100755 --- a/docs/tools/convert_toc.py +++ b/docs/tools/convert_toc.py @@ -8,7 +8,7 @@ import yaml import util -lang = 'ru' +lang = 'zh' base_dir = os.path.join(os.path.dirname(__file__), '..') en_dir = os.path.join(base_dir, 'en') docs_dir = os.path.join(base_dir, lang) @@ -57,7 +57,7 @@ def process_md_file(title, idx, original_path, proper_path): if original_path != proper_md_path: subprocess.check_call(f'git add {proper_md_path}', shell=True) if os.path.exists(original_path): - subprocess.check_call(f'git rm {original_path}', shell=True) + subprocess.check_call(f'rm {original_path}', shell=True) def process_toc_entry(entry, path, idx): @@ -131,7 +131,7 @@ def sync_translation(): util.write_md_file(lang_dst, en_meta, lang_content) subprocess.check_call(f'git add {lang_dst}', shell=True) - subprocess.check_call(f'git rm {lang_src}', shell=True) + subprocess.check_call(f'rm {lang_src}', shell=True) if __name__ == '__main__': diff --git a/docs/tools/nav.py b/docs/tools/nav.py index 2d99d4df3fe..56d47d58d07 100644 --- a/docs/tools/nav.py +++ b/docs/tools/nav.py @@ -16,8 +16,8 @@ def build_nav_entry(root): if root.endswith('images'): return None, None, None result_items = [] - index_meta, _ = util.read_md_file(os.path.join(root, 'index.md')) - current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', 'hidden')) + index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md')) + current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', find_first_header(index_content))) for filename in os.listdir(root): path = os.path.join(root, filename) if os.path.isdir(path): @@ -46,5 +46,7 @@ def build_nav(lang, args): _, _, nav = build_nav_entry(docs_dir) result = [] for key, value in nav.items(): - result.append({key: value}) + if key and value: + result.append({key: value}) + print('result', result) return result diff --git a/docs/tools/translate/filter.py b/docs/tools/translate/filter.py index 4376e7dc7f9..b5424f20921 100755 --- a/docs/tools/translate/filter.py +++ b/docs/tools/translate/filter.py @@ -36,7 +36,7 @@ def process_buffer(buffer, new_value, item=None, is_header=False): debug(f'Translate: "{text}" -> "{translated_text}"') if text and text[0].isupper() and not translated_text[0].isupper(): - translated_text = translated_text.capitalize() + translated_text = translated_text[0].upper() + translated_text[1:] if text.startswith(' ') and not translated_text.startswith(' '): translated_text = ' ' + translated_text @@ -47,12 +47,19 @@ def process_buffer(buffer, new_value, item=None, is_header=False): if is_header and translated_text.endswith('.'): translated_text = translated_text.rstrip('.') - title_case = False # is_header and translate.default_target_language == 'en' and text[0].isupper() - title_case_whitelist = {'a', 'an', 'the', 'and', 'or'} + title_case = is_header and translate.default_target_language == 'en' and text[0].isupper() + title_case_whitelist = { + 'a', 'an', 'the', 'and', 'or', 'that', + 'of', 'on', 'for', 'from', 'with', 'to', 'in' + } + is_first_iteration = True for token in translated_text.split(' '): - if title_case and not token.isupper(): - if token not in title_case_whitelist: - token = token.capitalize() + if title_case and token.isascii() and not token.isupper(): + if len(token) > 1 and token.lower() not in title_case_whitelist: + token = token[0].upper() + token[1:] + elif not is_first_iteration: + token = token.lower() + is_first_iteration = False new_value.append(pandocfilters.Str(token)) new_value.append(pandocfilters.Space()) diff --git a/docs/tools/translate/remove_machine_translated_meta.py b/docs/tools/translate/remove_machine_translated_meta.py new file mode 100755 index 00000000000..26cfde97f1e --- /dev/null +++ b/docs/tools/translate/remove_machine_translated_meta.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +import os +import sys +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +import convert_toc +import util + + +if __name__ == '__main__': + path = sys.argv[1][2:] + convert_toc.init_redirects() + try: + path = convert_toc.redirects[path] + except KeyError: + pass + meta, content = util.read_md_file(path) + if 'machine_translated' in meta: + del meta['machine_translated'] + if 'machine_translated_rev' in meta: + del meta['machine_translated_rev'] + util.write_md_file(path, meta, content) diff --git a/docs/zh/changelog/2017.md b/docs/zh/changelog/2017.md deleted file mode 100644 index 95156754100..00000000000 --- a/docs/zh/changelog/2017.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -en_copy: true ---- - -### ClickHouse release 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} - -This release contains bug fixes for the previous release 1.1.54318: - -- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don’t see these messages in logs. - -### ClickHouse release 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} - -This release contains bug fixes for the previous release 1.1.54310: - -- Fixed incorrect row deletions during merges in the SummingMergeTree engine -- Fixed a memory leak in unreplicated MergeTree engines -- Fixed performance degradation with frequent inserts in MergeTree engines -- Fixed an issue that was causing the replication queue to stop running -- Fixed rotation and archiving of server logs - -### ClickHouse release 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} - -#### New features: {#new-features} - -- Custom partitioning key for the MergeTree family of table engines. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. -- Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. -- Added support for time zones with non-integer offsets from UTC. -- Added support for arithmetic operations with time intervals. -- The range of values for the Date and DateTime types is extended to the year 2105. -- Added the `CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view). -- Added the `ATTACH TABLE` query without arguments. -- The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly. -- Max size of the IP trie dictionary is increased to 128M entries. -- Added the getSizeOfEnumType function. -- Added the sumWithOverflow aggregate function. -- Added support for the Cap’n Proto input format. -- You can now customize compression level when using the zstd algorithm. - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Creation of temporary tables with an engine other than Memory is not allowed. -- Explicit creation of tables with the View or MaterializedView engine is not allowed. -- During table creation, a new check verifies that the sampling key expression is included in the primary key. - -#### Bug fixes: {#bug-fixes} - -- Fixed hangups when synchronously inserting into a Distributed table. -- Fixed nonatomic adding and removing of parts in Replicated tables. -- Data inserted into a materialized view is not subjected to unnecessary deduplication. -- Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. -- Users don’t need access permissions to the `default` database to create temporary tables anymore. -- Fixed crashing when specifying the Array type without arguments. -- Fixed hangups when the disk volume containing server logs is full. -- Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. - -#### Build improvements: {#build-improvements} - -- Several third-party libraries (notably Poco) were updated and converted to git submodules. - -### ClickHouse release 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} - -#### New features: {#new-features-1} - -- TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ). - -#### Bug fixes: {#bug-fixes-1} - -- `ALTER` for replicated tables now tries to start running as soon as possible. -- Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.` -- Fixed crashes of `clickhouse-client` when pressing `Page Down` -- Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL` -- `FREEZE PARTITION` always works atomically now. -- Empty POST requests now return a response with code 411. -- Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).` -- Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables. -- Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b` -- Users are updated correctly with invalid `users.xml` -- Correct handling when an executable dictionary returns a non-zero response code. - -### ClickHouse release 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} - -#### New features: {#new-features-2} - -- Added the `pointInPolygon` function for working with coordinates on a coordinate plane. -- Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. -- Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. -- The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting `compile = 1` , which is not used by default). -- Reduced the time needed for dynamic compilation of queries. - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency. -- Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. -- Removed excessive logging when restoring replicas. -- Fixed an error in the UNION ALL implementation. -- Fixed an error in the concat function that occurred if the first column in a block has the Array type. -- Progress is now displayed correctly in the system.merges table. - -### ClickHouse release 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} - -#### New features: {#new-features-3} - -- `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. -- Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. -- Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. -- Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`). -- External dictionaries can be loaded from MySQL by specifying a socket in the filesystem. -- External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters). -- Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user. -- Support for `DROP TABLE` for temporary tables. -- Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats. -- Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes). -- FIFO locking is used during ALTER: an ALTER query isn’t blocked indefinitely for continuously running queries. -- Option to set `umask` in the config file. -- Improved performance for queries with `DISTINCT` . - -#### Bug fixes: {#bug-fixes-3} - -- Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn’t get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. -- Fixed randomization when choosing hosts for the connection to ZooKeeper. -- Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. -- Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running `ALTER MODIFY` on an element in a `Nested` structure. -- Fixed an error that could cause SELECT queries to “hang”. -- Improvements to distributed DDL queries. -- Fixed the query `CREATE TABLE ... AS `. -- Resolved the deadlock in the `ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables. -- Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats. -- Resolved the appearance of zombie processes when using a dictionary with an `executable` source. -- Fixed segfault for the HEAD query. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} - -- You can use `pbuilder` to build ClickHouse. -- You can use `libc++` instead of `libstdc++` for builds on Linux. -- Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don’t need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ``` ``107374182400 ``` and restart the server. - -### ClickHouse release 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} - -- This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper. - -### ClickHouse release 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} - -This release contains bug fixes for the previous release 1.1.54276: - -- Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table. -- Fixed parsing when inserting in RowBinary format if input data starts with’;’. -- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). - -### Clickhouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} - -#### New features: {#new-features-4} - -- Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` -- INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert\_distributed\_sync=1. -- Added the UUID data type for working with 16-byte identifiers. -- Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau. -- Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers. -- You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries. -- Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` -- Added the max\_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. - -#### Main changes: {#main-changes} - -- Security improvements: all server files are created with 0640 permissions (can be changed via config parameter). -- Improved error messages for queries with invalid syntax. -- Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data. -- Significantly increased the performance of data merges for the ReplacingMergeTree engine. -- Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed\_directory\_monitor\_batch\_inserts=1. - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. - -#### Complete list of changes: {#complete-list-of-changes} - -- Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. -- Optimized stream allocation when reading from a Distributed table. -- Settings can be configured in readonly mode if the value doesn’t change. -- Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred\_block\_size\_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. -- Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` -- Added new settings for MergeTree engines (the merge\_tree section in config.xml): - - replicated\_deduplication\_window\_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables. - - cleanup\_delay\_period sets how often to start cleanup to remove outdated data. - - replicated\_can\_become\_leader can prevent a replica from becoming the leader (and assigning merges). -- Accelerated cleanup to remove outdated data from ZooKeeper. -- Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed\_ddl\_task\_timeout, which limits the time to wait for a response from the servers in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. -- Improved display of stack traces in the server logs. -- Added the “none” value for the compression method. -- You can use multiple dictionaries\_config sections in config.xml. -- It is possible to connect to MySQL through a socket in the file system. -- The system.parts table has a new column with information about the size of marks, in bytes. - -#### Bug fixes: {#bug-fixes-4} - -- Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field. -- Fixed a rare race condition in ReplicatedMergeTree when checking data parts. -- Fixed possible freezing on “leader election” when starting a server. -- The max\_replica\_delay\_for\_distributed\_queries setting was ignored when using a local replica of the data source. This has been fixed. -- Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column. -- Fixed an exception in the multiIf function when using empty arrays or strings. -- Fixed excessive memory allocations when deserializing Native format. -- Fixed incorrect auto-update of Trie dictionaries. -- Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE. -- Fixed a crash of GROUP BY when using distributed\_aggregation\_memory\_efficient=1. -- Now you can specify the database.table in the right side of IN and JOIN. -- Too many threads were used for parallel aggregation. This has been fixed. -- Fixed how the “if” function works with FixedString arguments. -- SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed. -- Running `CREATE VIEW IF EXISTS no longer causes crashes.` -- Fixed incorrect behavior when input\_format\_skip\_unknown\_fields=1 is set and there are negative numbers. -- Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary. -- Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables. -- Fixed an incorrect interpretation of a SELECT query from Dictionary tables. -- Fixed the “Cannot mremap” error when using arrays in IN and JOIN clauses with more than 2 billion elements. -- Fixed the failover for dictionaries with MySQL as the source. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} - -- Builds can be assembled in Arcadia. -- You can use gcc 7 to compile ClickHouse. -- Parallel builds using ccache+distcc are faster now. - -### ClickHouse release 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} - -#### New features: {#new-features-5} - -- Distributed DDL (for example, `CREATE TABLE ON CLUSTER`) -- The replicated query `ALTER TABLE CLEAR COLUMN IN PARTITION.` -- The engine for Dictionary tables (access to dictionary data in the form of a table). -- Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries). -- You can check for updates to the dictionary by sending a request to the source. -- Qualified column names -- Quoting identifiers using double quotation marks. -- Sessions in the HTTP interface. -- The OPTIMIZE query for a Replicated table can can run not only on the leader. - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- Removed SET GLOBAL. - -#### Minor changes: {#minor-changes} - -- Now after an alert is triggered, the log prints the full stack trace. -- Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives). - -#### Bug fixes: {#bug-fixes-5} - -- Fixed a bad connection “sticking” when inserting into a Distributed table. -- GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. -- The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed. -- Changes in how an executable source of cached external dictionaries works. -- Fixed the comparison of strings containing null characters. -- Fixed the comparison of Float32 primary key fields with constants. -- Previously, an incorrect estimate of the size of a field could lead to overly large allocations. -- Fixed a crash when querying a Nullable column added to a table using ALTER. -- Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT. -- Fixed an ORDER BY subquery consisting of only constant values. -- Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE. -- Aliases for scalar subqueries with empty results are no longer lost. -- Now a query that used compilation does not fail with an error if the .so file gets damaged. diff --git a/docs/zh/changelog/2018.md b/docs/zh/changelog/2018.md deleted file mode 100644 index 49bef18cbf3..00000000000 --- a/docs/zh/changelog/2018.md +++ /dev/null @@ -1,1060 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release 18.16 {#clickhouse-release-18-16} - -### ClickHouse release 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} - -#### Bug fixes: {#bug-fixes} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- JIT compilation of aggregate functions now works with LowCardinality columns. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) - -#### Improvements: {#improvements} - -- Added the `low_cardinality_allow_in_native_format` setting (enabled by default). When disabled, LowCardinality columns will be converted to ordinary columns for SELECT queries and ordinary columns will be expected for INSERT queries. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) - -#### Build improvements: {#build-improvements} - -- Fixes for builds on macOS and ARM. - -### ClickHouse release 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} - -#### New features: {#new-features} - -- `DEFAULT` expressions are evaluated for missing fields when loading data in semi-structured input formats (`JSONEachRow`, `TSKV`). The feature is enabled with the `insert_sample_with_metadata` setting. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) -- The `ALTER TABLE` query now has the `MODIFY ORDER BY` action for changing the sorting key when adding or removing a table column. This is useful for tables in the `MergeTree` family that perform additional tasks when merging based on this sorting key, such as `SummingMergeTree`, `AggregatingMergeTree`, and so on. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) -- For tables in the `MergeTree` family, now you can specify a different sorting key (`ORDER BY`) and index (`PRIMARY KEY`). The sorting key can be longer than the index. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) -- Added the `hdfs` table function and the `HDFS` table engine for importing and exporting data to HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) -- Added functions for working with base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) -- Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) -- Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) -- Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -- Added `dictGet` and `dictGetOrDefault` functions that don’t require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) -- Now you can specify comments for a column in the table description and change it using `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) -- Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `joinGet` function that allows you to use a `Join` type table like a dictionary. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `partition_key`, `sorting_key`, `primary_key`, and `sampling_key` columns to the `system.tables` table in order to provide information about table keys. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, and `is_in_sampling_key` columns to the `system.columns` table. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `min_time` and `max_time` columns to the `system.parts` table. These columns are populated when the partitioning key is an expression consisting of `DateTime` columns. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) - -#### Bug fixes: {#bug-fixes-1} - -- Fixes and performance improvements for the `LowCardinality` data type. `GROUP BY` using `LowCardinality(Nullable(...))`. Getting the values of `extremes`. Processing high-order functions. `LEFT ARRAY JOIN`. Distributed `GROUP BY`. Functions that return `Array`. Execution of `ORDER BY`. Writing to `Distributed` tables (nicelulu). Backward compatibility for `INSERT` queries from old clients that implement the `Native` protocol. Support for `LowCardinality` for `JOIN`. Improved performance when working in a single stream. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) -- Fixed how the `select_sequential_consistency` option works. Previously, when this setting was enabled, an incomplete result was sometimes returned after beginning to write to a new partition. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries and `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Databases are correctly specified for subqueries inside a VIEW. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a bug in `PREWHERE` with `FINAL` for `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) -- Now you can use `KILL QUERY` to cancel queries that have not started yet because they are waiting for the table to be locked. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) -- Corrected date and time calculations if the clocks were moved back at midnight (this happens in Iran, and happened in Moscow from 1981 to 1983). Previously, this led to the time being reset a day earlier than necessary, and also caused incorrect formatting of the date and time in text format. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) -- Fixed bugs in some cases of `VIEW` and subqueries that omit the database. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a race condition when simultaneously reading from a `MATERIALIZED VIEW` and deleting a `MATERIALIZED VIEW` due to not locking the internal `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) -- Fixed the error `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) -- Fixed query processing when the `compile_expressions` option is enabled (it’s enabled by default). Nondeterministic constant expressions like the `now` function are no longer unfolded. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) -- Fixed a crash when specifying a non-constant scale argument in `toDecimal32/64/128` functions. -- Fixed an error when trying to insert an array with `NULL` elements in the `Values` format into a column of type `Array` without `Nullable` (if `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) -- Fixed continuous error logging in `DDLWorker` if ZooKeeper is not available. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) -- Fixed the return type for `quantile*` functions from `Date` and `DateTime` types of arguments. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) -- Fixed the `WITH` clause if it specifies a simple alias without expressions. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) -- Fixed processing of queries with named sub-queries and qualified column names when `enable_optimize_predicate_expression` is enabled. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) -- Fixed the error `Attempt to attach to nullptr thread group` when working with materialized views. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) -- Fixed a crash when passing certain incorrect arguments to the `arrayReverse` function. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Fixed the buffer overflow in the `extractURLParameter` function. Improved performance. Added correct processing of strings containing zero bytes. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) -- Fixed buffer overflow in the `lowerUTF8` and `upperUTF8` functions. Removed the ability to execute these functions over `FixedString` type arguments. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) -- Fixed a rare race condition when deleting `MergeTree` tables. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) -- Fixed a race condition when reading from `Buffer` tables and simultaneously performing `ALTER` or `DROP` on the target tables. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Improvements: {#improvements-1} - -- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn’t have write access for the `clickhouse` user, which improves security. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) -- The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) -- Accelerated server start when there is a very large number of tables. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) -- Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) -- If the query syntax is invalid, the `400 Bad Request` code is returned in the `HTTP` interface (500 was returned previously). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) -- The `join_default_strictness` option is set to `ALL` by default for compatibility. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) -- Removed logging to `stderr` from the `re2` library for invalid or complex regular expressions. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) -- Added for the `Kafka` table engine: checks for subscriptions before beginning to read from Kafka; the kafka\_max\_block\_size setting for the table. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) -- The `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, and `murmurHash3_64` functions now work for any number of arguments and for arguments in the form of tuples. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) -- The `arrayReverse` function now works with any types of arrays. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Added an optional parameter: the slot size for the `timeSlots` function. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) -- For `FULL` and `RIGHT JOIN`, the `max_block_size` setting is used for a stream of non-joined data from the right table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3699) -- Added the `--secure` command line parameter in `clickhouse-benchmark` and `clickhouse-performance-test` to enable TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) -- Type conversion when the structure of a `Buffer` type table does not match the structure of the destination table. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) -- Added the `tcp_keep_alive_timeout` option to enable keep-alive packets after inactivity for the specified time interval. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) -- Removed unnecessary quoting of values for the partition key in the `system.parts` table if it consists of a single column. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) -- The modulo function works for `Date` and `DateTime` data types. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) -- Added synonyms for the `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, and `MID` functions. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Some function names are case-insensitive for compatibility with the SQL standard. Added syntactic sugar `SUBSTRING(expr FROM start FOR length)` for compatibility with SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) -- Added the ability to `mlock` memory pages corresponding to `clickhouse-server` executable code to prevent it from being forced out of memory. This feature is disabled by default. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) -- Improved performance when reading from `O_DIRECT` (with the `min_bytes_to_use_direct_io` option enabled). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) -- Improved performance of the `dictGet...OrDefault` function for a constant key argument and a non-constant default argument. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3563) -- The `firstSignificantSubdomain` function now processes the domains `gov`, `mil`, and `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Improved performance. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) -- Ability to specify custom environment variables for starting `clickhouse-server` using the `SYS-V init.d` script by defining `CLICKHOUSE_PROGRAM_ENV` in `/etc/default/clickhouse`. - [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) -- Correct return code for the clickhouse-server init script. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) -- The `system.metrics` table now has the `VersionInteger` metric, and `system.build_options` has the added line `VERSION_INTEGER`, which contains the numeric form of the ClickHouse version, such as `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) -- Removed the ability to compare the `Date` type with a number to avoid potential errors like `date = 2018-12-17`, where quotes around the date are omitted by mistake. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) -- Fixed the behavior of stateful functions like `rowNumberInAllBlocks`. They previously output a result that was one number larger due to starting during query analysis. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3729) -- If the `force_restore_data` file can’t be deleted, an error message is displayed. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3794) - -#### Build improvements: {#build-improvements-1} - -- Updated the `jemalloc` library, which fixes a potential memory leak. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3557) -- Profiling with `jemalloc` is enabled by default in order to debug builds. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) -- Added the ability to run integration tests when only `Docker` is installed on the system. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) -- Added the fuzz expression test in SELECT queries. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) -- Added a stress test for commits, which performs functional tests in parallel and in random order to detect more race conditions. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) -- Improved the method for starting clickhouse-server in a Docker image. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) -- For a Docker image, added support for initializing databases using files in the `/docker-entrypoint-initdb.d` directory. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) -- Fixes for builds on ARM. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Removed the ability to compare the `Date` type with a number. Instead of `toDate('2018-12-18') = 17883`, you must use explicit type conversion `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) - -## ClickHouse release 18.14 {#clickhouse-release-18-14} - -### ClickHouse release 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Build improvements: {#build-improvements-2} - -- Fixes for builds on ARM. - -### ClickHouse release 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} - -#### Bug fixes: {#bug-fixes-3} - -- Fixed error in `dictGet...` function for dictionaries of type `range`, if one of the arguments is constant and other is not. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) -- Fixed error that caused messages `netlink: '...': attribute type 1 has an invalid length` to be printed in Linux kernel log, that was happening only on fresh enough versions of Linux kernel. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) -- Fixed segfault in function `empty` for argument of `FixedString` type. [Daniel, Dao Quang Minh](https://github.com/ClickHouse/ClickHouse/pull/3703) -- Fixed excessive memory allocation when using large value of `max_query_size` setting (a memory chunk of `max_query_size` bytes was preallocated at once). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) - -#### Build changes: {#build-changes} - -- Fixed build with LLVM/Clang libraries of version 7 from the OS packages (these libraries are used for runtime query compilation). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} - -#### Bug fixes: {#bug-fixes-4} - -- Fixed cases when the ODBC bridge process did not terminate with the main server process. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) -- Fixed synchronous insertion into the `Distributed` table with a columns list that differs from the column list of the remote table. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) -- Fixed a rare race condition that can lead to a crash when dropping a MergeTree table. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed a query deadlock in case when query thread creation fails with the `Resource temporarily unavailable` error. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed parsing of the `ENGINE` clause when the `CREATE AS table` syntax was used and the `ENGINE` clause was specified before the `AS table` (the error resulted in ignoring the specified engine). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) - -### ClickHouse release 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} - -#### Bug fixes: {#bug-fixes-5} - -- The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to “Memory limit exceeded” errors. The issue appeared in version 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) - -### ClickHouse release 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} - -#### Bug fixes: {#bug-fixes-6} - -- Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) - -#### Build changes: {#build-changes-1} - -- Fixed problems (llvm-7 from system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} - -#### Bug fixes: {#bug-fixes-7} - -- Fixed the `Block structure mismatch in MergingSorted stream` error. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) -- Fixed `ON CLUSTER` queries in case when secure connections were turned on in the cluster config (the `` flag). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) -- Fixed an error in queries that used `SAMPLE`, `PREWHERE` and alias columns. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) -- Fixed a rare `unknown compression method` error when the `min_bytes_to_use_direct_io` setting was enabled. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) - -#### Performance improvements: {#performance-improvements} - -- Fixed performance regression of queries with `GROUP BY` of columns of UInt16 or Date type when executing on AMD EPYC processors. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) -- Fixed performance regression of queries that process long strings. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) - -#### Build improvements: {#build-improvements-3} - -- Improvements for simplifying the Arcadia build. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) - -### ClickHouse release 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} - -#### Bug fixes: {#bug-fixes-8} - -- Fixed a crash on joining two unnamed subqueries. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) -- Fixed generating incorrect queries (with an empty `WHERE` clause) when querying external databases. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) -- Fixed using an incorrect timeout value in ODBC dictionaries. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) - -### ClickHouse release 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} - -#### Bug fixes: {#bug-fixes-9} - -- Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) -- Fixed errors when merging data in tables containing arrays inside Nested structures. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) -- Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) -- Fixed an error on inserts to a Distributed table in Native format. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) - -### ClickHouse release 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} - -- The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) -- The `enable_optimize_predicate_expression` setting is disabled by default. - -### ClickHouse release 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} - -#### New features: {#new-features-1} - -- The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) -- Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) -- Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) -- Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) -- Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) -- Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) -- Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) -- Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) - -#### Experimental features: {#experimental-features} - -- Optimization of the GROUP BY clause for `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) -- Optimized calculation of expressions for `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) - -#### Improvements: {#improvements-2} - -- Significantly reduced memory consumption for queries with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- In the absence of `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` is assumed. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) -- Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) -- The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- The `compile_expressions` setting (JIT compilation of expressions) is enabled by default. -- Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message “File … already exists”, and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) -- LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3257). -- `ON CLUSTER` can be specified for `ALTER UPDATE` queries. -- Improved performance for reading data in `JSONEachRow` format. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) -- Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) -- Added the `TIMESTAMP` synonym for the `DateTime` type. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) -- There is always space reserved for query\_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools. -- Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) -- In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) -- The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) -- The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) -- In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) -- Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) -- Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) -- Support for the `Decimal` data type in external dictionaries. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) -- Support for the `Decimal` data type in `SummingMergeTree` tables. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) -- Added specializations for `UUID` in `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) -- Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) -- A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) - -#### Bug fixes: {#bug-fixes-10} - -- Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) -- Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) -- If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn’t be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) -- Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) -- Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) -- Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the query if the `JOIN` is only performed on remote servers. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) -- Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) -- If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn’t start. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) -- If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) -- Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) -- Corrected type conversion between `Decimal` and integer numbers. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) -- Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) -- Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) -- Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) -- Bug fixes in the `ALTER UPDATE` query. -- Fixed bugs in the `odbc` table function that appeared in version 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) -- Fixed the operation of aggregate functions with `StateArray` combinators. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) -- Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) -- Fixed output of types for operations using `Decimal` and integer arguments. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) -- Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) -- The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) -- Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) -- Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) -- Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) -- Fixed an error when using `FINAL` with `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) -- Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) -- Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) - -## ClickHouse release 18.12 {#clickhouse-release-18-12} - -### ClickHouse release 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} - -#### New features: {#new-features-2} - -- `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) -- Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) -- The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `system.part_log` table now has the `partition_id` column. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Bug fixes: {#bug-fixes-11} - -- `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) -- Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) -- Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn’t happen). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) - -### ClickHouse release 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} - -#### New features: {#new-features-3} - -- Added support for `ALTER UPDATE` queries. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) -- Added the `allow_ddl` option, which restricts the user’s access to DDL queries. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) -- Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) -- The `system.merges` system table now contains the `partition_id` column. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) - -#### Improvements {#improvements-3} - -- If a data part remains unchanged during mutation, it isn’t downloaded by replicas. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) -- Autocomplete is available for names of settings when working with `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) - -#### Bug fixes: {#bug-fixes-12} - -- Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) -- Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13. -- Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) -- Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) - -### ClickHouse release 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} - -#### New features: {#new-features-4} - -- Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) -- New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) -- Added support for JOIN with table functions. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) -- Ctrl+C in clickhouse-client clears a query that was entered. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) -- Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) -- Each line of the server log related to query processing shows the query ID. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.metrics` and `system.events` tables now have built-in documentation. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) -- Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2975) -- Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) -- Added the `retention` aggregate function. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) -- Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) -- Tables in the MergeTree family now have the virtual column `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Experimental features: {#experimental-features-1} - -- Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) -- Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) - -#### Improvements: {#improvements-4} - -- Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag. -- Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length. -- Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`. -- Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) -- Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2955) -- Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) -- Fixed a performance problem in the case of a large stream of queries that result in an error (the `_dl_addr` function is visible in `perf top`, but the server isn’t using much CPU). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) -- Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Improvements to the functionality for the `UUID` data type. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) -- The `UUID` data type is supported in The-Alchemist dictionaries. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) -- The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) -- When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) -- For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) -- The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) -- You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn’t happen as often. -- The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) -- Duplicate columns can be used in a `USING` clause for `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) -- `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) -- The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2885) -- Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) -- The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`. -- `ALTER DELETE` queries work for materialized views. -- Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables. -- Support for `ATTACH TABLE ... ON CLUSTER` queries. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) - -#### Bug fixes: {#bug-fixes-13} - -- Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) -- Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) -- Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) -- Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) -- Fixed a segfault during `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) -- Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) -- Fixed the “Not found column” error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) -- Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) -- Fixed the incorrect result when comparing `nan` with integers. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) -- Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) -- Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) -- Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously. -- Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) -- Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) -- Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) -- The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) -- Fixed the segfault when re-initializing the ZooKeeper session. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) -- Fixed potential blocking when working with ZooKeeper. -- Fixed incorrect code for adding nested data structures in a `SummingMergeTree`. -- When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) - -#### Security fix: {#security-fix} - -- Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) -- Fixed incorrect validation of the file path in the `catBoostPool` table function. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) -- The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user’s configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) - -#### Backward incompatible changes: {#backward-incompatible-changes-3} - -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. - -#### Build changes: {#build-changes-2} - -- Most integration tests can now be run by commit. -- Code style checks can also be run by commit. -- The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) -- When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) -- Debugging the build uses the `jemalloc` debug option. -- The interface of the library for interacting with ZooKeeper is declared abstract. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) - -## ClickHouse release 18.10 {#clickhouse-release-18-10} - -### ClickHouse release 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} - -#### New features: {#new-features-5} - -- HTTPS can be used for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) -- Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) -- Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) -- Support for `UUID` in the key columns. - -#### Improvements: {#improvements-5} - -- Clusters can be removed without restarting the server when they are deleted from the config files. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) -- External dictionaries can be removed without restarting the server when they are removed from config files. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) -- Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) -- Improvements for the `UUID` data type (not yet complete). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) -- Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) -- Old records of completed mutations are deleted (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) -- Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) -- The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) -- Added the `max_partition_size_to_drop` config option. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) -- Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) -- Added the `max_fetch_partition_retries_count` setting. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) -- Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) -- The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) - -#### Bug fixes: {#bug-fixes-14} - -- Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0. -- Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) -- Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) -- Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2823) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) -- Fixed a memory leak if an exception occurred when connecting to a MySQL server. -- Fixed incorrect clickhouse-client response code in case of a query error. -- Fixed incorrect behavior of materialized views containing DISTINCT. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) - -#### Backward incompatible changes {#backward-incompatible-changes-4} - -- Removed support for CHECK TABLE queries for Distributed tables. - -#### Build changes: {#build-changes-3} - -- The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) -- Use of libressl from a submodule. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) -- Use of unixodbc from a submodule. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) -- Use of mariadb-connector-c from a submodule. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) -- Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself). - -## ClickHouse release 18.6 {#clickhouse-release-18-6} - -### ClickHouse release 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} - -#### New features: {#new-features-6} - -- Added support for ON expressions for the JOIN ON syntax: - `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` - The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) -- HTTPS can be enabled for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) - -#### Improvements: {#improvements-6} - -- The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) - -## ClickHouse release 18.5 {#clickhouse-release-18-5} - -### ClickHouse release 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} - -#### New features: {#new-features-7} - -- Added the hash function `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). - -#### Improvements: {#improvements-7} - -- Now you can use the `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) attribute to set values in config files from environment variables. -- Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). - -#### Bug fixes: {#bug-fixes-15} - -- Fixed a possible bug when starting a replica [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). - -## ClickHouse release 18.4 {#clickhouse-release-18-4} - -### ClickHouse release 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} - -#### New features: {#new-features-8} - -- Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). -- Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). -- Support for `HTTP Basic` authentication in the replication protocol [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). -- The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). -- Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2701). - -#### Improvements: {#improvements-8} - -- The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). -- The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. -- Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). - -#### Bug fixes: {#bug-fixes-16} - -- Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) -- Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). -- Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) -- Fixed server crash when using the `countArray()` aggregate function. - -#### Backward incompatible changes: {#backward-incompatible-changes-5} - -- Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. - -## ClickHouse release 18.1 {#clickhouse-release-18-1} - -### ClickHouse release 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} - -#### New features: {#new-features-9} - -- Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). -- Support for arbitrary types for the `uniq*` family of aggregate functions ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). -- Support for arbitrary types in comparison operators ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). -- The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). -- Added the `arrayDistinct` function ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). -- The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). - -#### Improvements: {#improvements-9} - -- Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog. -- Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). -- If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). - -#### Bug fixes: {#bug-fixes-17} - -- Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2624)). -- Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). -- Fixed an error during a CAST to Nullable types ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). -- Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). -- Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). -- Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). -- Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn’t in uppercase letters ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). -- Added missing quoting of identifiers for queries to an external DBMS ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). - -#### Backward incompatible changes: {#backward-incompatible-changes-6} - -- Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. - -## ClickHouse release 1.1 {#clickhouse-release-1-1} - -### ClickHouse release 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} - -#### New features: {#new-features-10} - -- Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2600)). - -#### Bug fixes: {#bug-fixes-18} - -- Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. -- Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. -- The `has` function now works correctly for an array with Nullable elements ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). -- The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were queried from the table. -- Fixed how an empty `TinyLog` table works after inserting an empty data block ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). -- The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. - -### ClickHouse release 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} - -#### New features: {#new-features-11} - -- Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). -- Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2574)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2599)). - -#### Improvements: {#improvements-10} - -- Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). -- Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. -- Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). -- Added `Nullable` support for the `runningDifference` function ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). -- Improved query analysis performance when there is a very large number of expressions ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). -- Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). -- The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). - -#### Bug fixes: {#bug-fixes-19} - -- Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. -- Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. -- Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). -- Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). -- Fixed segfault if `macros` are used but they aren’t in the config file ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). -- Fixed switching to the default database when reconnecting the client ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). -- Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. - -#### Security fix: {#security-fix-1} - -- Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). - -### ClickHouse release 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} - -#### New features: {#new-features-12} - -- Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. -- Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables. -- Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) -- Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). -- Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). -- Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). -- Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). -- New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). -- The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). -- The password to `clickhouse-client` can be entered interactively. -- Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). -- Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). -- Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) -- Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. -- Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. - -#### Experimental features: {#experimental-features-2} - -- Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) -- JIT compilation to native code is now available for some expressions ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). - -#### Bug fixes: {#bug-fixes-20} - -- Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. -- Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. -- Fixed an error when reading an array column from a Nested structure ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). -- Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`. -- Fixed an error when analyzing queries with recursive aliases. -- Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). -- User profile settings were not applied when using sessions in the HTTP interface. -- Fixed how settings are applied from the command line parameters in clickhouse-local. -- The ZooKeeper client library now uses the session timeout received from the server. -- Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. -- Fixed pruning of parts for queries with conditions on partition key columns ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). -- Merges are now possible after `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). -- Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). -- Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). -- Fixed syntactic parsing and formatting of the `CAST` operator. -- Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). -- Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). -- Fixed SSRF in the remote() table function. -- Fixed exit behavior of `clickhouse-client` in multiline mode ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). - -#### Improvements: {#improvements-11} - -- Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). -- Improved LZ4 compression performance. -- Faster analysis for queries with a large number of JOINs and sub-queries. -- The DNS cache is now updated automatically when there are too many network errors. -- Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. -- Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. -- Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. -- A server with replicated tables can start even if you haven’t configured ZooKeeper. -- When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). -- Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). - -#### Build changes: {#build-changes-4} - -- The gcc8 compiler can be used for builds. -- Added the ability to build llvm from submodule. -- The version of the librdkafka library has been updated to v0.11.4. -- Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. -- Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). -- Cmake now generates files for ninja by default (like when using `-G Ninja`). -- Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). -- Fixed a header file conflict in Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). - -#### Backward incompatible changes: {#backward-incompatible-changes-7} - -- Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. -- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn’t have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. - -### ClickHouse release 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} - -#### Bug fixes: {#bug-fixes-21} - -- Fixed an error that in some cases caused ZooKeeper operations to block. - -### ClickHouse release 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} - -#### Bug fixes: {#bug-fixes-22} - -- Fixed a slowdown of replication queue if a table has many replicas. - -### ClickHouse release 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} - -#### Bug fixes: {#bug-fixes-23} - -- Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. - -### ClickHouse release 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} - -#### New features: {#new-features-13} - -- Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. - -#### Improvements: {#improvements-12} - -- Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`. -- Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit. - -#### Bug fixes: {#bug-fixes-24} - -- Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`. -- Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`. -- Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table. -- Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica. -- Fixed freezing of `KILL QUERY`. -- Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration. - -#### Backward incompatible changes: {#backward-incompatible-changes-8} - -- Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors. - -### ClickHouse release 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} - -#### New features: {#new-features-14} - -- Logging level can be changed without restarting the server. -- Added the `SHOW CREATE DATABASE` query. -- The `query_id` can be passed to `clickhouse-client` (elBroom). -- New setting: `max_network_bandwidth_for_all_users`. -- Added support for `ALTER TABLE ... PARTITION ...` for `MATERIALIZED VIEW`. -- Added information about the size of data parts in uncompressed form in the system table. -- Server-to-server encryption support for distributed tables (`1` in the replica config in ``). -- Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` -- Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server’s display name can be changed. It’s also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). -- Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) -- When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result. - -#### Improvements: {#improvements-13} - -- `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. -- `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. -- A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov). -- The `lengthUTF8` function runs faster (zhang2014). -- Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards. -- The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket’s `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa). -- More robust crash recovery for asynchronous insertion into `Distributed` tables. -- The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). - -#### Bug fixes: {#bug-fixes-25} - -- Fixed an error with `IN` when the left side of the expression is `Nullable`. -- Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. -- The `max_execution_time` limit now works correctly with distributed queries. -- Fixed errors when calculating the size of composite columns in the `system.columns` table. -- Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.` -- Fixed errors in `StorageKafka` (\#\#2075) -- Fixed server crashes from invalid arguments of certain aggregate functions. -- Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables. -- `Too many parts` state is less likely to happen when inserting into aggregated materialized views (\#\#2084). -- Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level. -- Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`. -- `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. -- Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. - -#### Build changes: {#build-changes-5} - -- The build supports `ninja` instead of `make` and uses `ninja` by default for building releases. -- Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. - -#### Backward incompatible changes: {#backward-incompatible-changes-9} - -- Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as “at least one `arr` element belongs to the `set`”. To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. -- Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. - -### ClickHouse release 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} - -#### New features: {#new-features-15} - -- Added the `system.macros` table and auto updating of macros when the config file is changed. -- Added the `SYSTEM RELOAD CONFIG` query. -- Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the “maximum” interval. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). - -#### Improvements: {#improvements-14} - -- When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). -- Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. - -#### Bug fixes: {#bug-fixes-26} - -- Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables. -- Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers. -- Fixed a race condition when reading from system `system.parts_columns tables.` -- Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout. -- Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query. -- Fixed incorrect dates in the `system.parts` table. -- Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster. -- Fixed the vertical merging algorithm for an empty `ORDER BY` table. -- Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362. -- Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. -- Removed extraneous error-level logging of `Not found column ... in block`. - -### Clickhouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} - -#### New features: {#new-features-16} - -- Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. -- Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. -- Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`. -- An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). -- Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta). -- Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings. -- Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. -- Added the `arrayCumSum` function (Javi Santana). -- Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats. -- Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan). -- Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier. -- The `remote` and `cluster` table functions can be used in `INSERT` queries. -- Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual. -- Added the `data_path` and `metadata_path` columns to `system.tables`and`system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables. -- Added additional information about merges in the `system.part_log` table. -- An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov). -- The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014). -- Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014). -- Support for `SHOW CREATE TABLE` for temporary tables (zhang2014). -- Added the `system_profile` configuration parameter for the settings used by internal processes. -- Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko). -- Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko). -- Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes. -- Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table. -- Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`. -- Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014). -- Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can’t be listened to (useful for systems with disabled support for IPv4 or IPv6). -- Added the `VersionedCollapsingMergeTree` table engine. -- Support for rows and arbitrary numeric types for the `library` dictionary source. -- `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`). -- A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`. -- `RENAME TABLE` can be performed for `VIEW`. -- Added the `throwIf` function. -- Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). -- The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns. - -#### Improvements: {#improvements-15} - -- Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. -- Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. -- Added the `allow_distributed_ddl` option. -- Nondeterministic functions are not allowed in expressions for `MergeTree` table keys. -- Files with substitutions from `config.d` directories are loaded in alphabetical order. -- Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`. -- The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks). -- When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server. -- The `MkDocs` documentation generator is used. -- When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014). -- Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342. -- `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. -- Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. - -#### Bug fixes: {#bug-fixes-27} - -- Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. -- Fixed a bug in merges for `ReplacingMergeTree` tables. -- Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`). -- Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries. -- Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`. -- Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table. -- Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata. -- Fixed the `DROP DATABASE` query for `Dictionary` databases. -- Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov). -- Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014). -- Fixed a rare case when a query to a `MergeTree` table couldn’t finish (chenxing-xc). -- Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc). -- Fixed a slight performance regression with functions that use regular expressions. -- Fixed a performance regression when creating multidimensional arrays from complex expressions. -- Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata. -- Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table. -- Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand). -- Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`. -- Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables. -- Fixed a bug when using `ALIAS` columns in `Distributed` tables. -- Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family. -- Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries. -- Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. -- Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. -- Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled). - -#### Backward incompatible changes: {#backward-incompatible-changes-10} - -- Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. -- Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. -- Removed the `UnsortedMergeTree` engine. - -### Clickhouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} - -- Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. -- Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. -- Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. - -### Clickhouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} - -This release contains bug fixes for the previous release 1.1.54337: - -- Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. -- Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d. -- Fixed a regression in 1.1.54337: wrong default configuration in the Docker image. -- Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`). -- Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`). -- Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). -- Fixed a bug in implementation of NULL. - -### Clickhouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} - -#### New features: {#new-features-17} - -- Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables. -- Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`. -- Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected. -- Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive. -- Added the `toStartOfFifteenMinutes` function (Kirill Shvakov). -- Added the `clickhouse format` tool for formatting queries. -- Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory. -- Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin). -- Added a column with documentation for the `system.settings` table (Kirill Shvakov). -- Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables. -- Added the `system.models` table with information about loaded `CatBoost` machine learning models. -- Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage. -- Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function). -- Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors. -- The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments. -- Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`. -- Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov). -- Users with the `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). -- Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša). -- Added the `intExp3` and `intExp4` functions. -- Added the `sumKahan` aggregate function. -- Added the to \* Number\* OrNull functions, where \* Number\* is a numeric type. -- Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014). -- Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded. -- Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova). -- The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory. -- Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. -- Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). - -#### Performance optimizations: {#performance-optimizations} - -- Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments. -- Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. -- Improved performance of parsing and formatting `Date` and `DateTime` type values in text format. -- Improved performance and precision of parsing floating point numbers. -- Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` . -- Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. - -#### Bug fixes: {#bug-fixes-28} - -- Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates. -- Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE` . -- Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration. -- Fixed unexpected results of passing the `Date` argument to `toStartOfDay` . -- Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for `INTERVAL n MONTH` in cases when the result has the previous year. -- Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete. -- Fixed `SummingMergeTree` behavior in cases when the rows summed to zero. -- Various fixes for the `Kafka` engine (Marek Vavruša). -- Fixed incorrect behavior of the `Join` table engine (Amos Bird). -- Fixed incorrect allocator behavior under FreeBSD and OS X. -- The `extractAll` function now supports empty matches. -- Fixed an error that blocked usage of `libressl` instead of `openssl` . -- Fixed the `CREATE TABLE AS SELECT` query from temporary tables. -- Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts. -- Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod). -- `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config). -- Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key. -- Fixed parsing of tuples (values of the `Tuple` data type) in text formats. -- Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions. -- Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to `NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc. -- Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc. -- Stricter checks for allowed combinations of composite columns. -- Fixed the overflow when specifying a very large parameter for the `FixedString` data type. -- Fixed a bug in the `topK` aggregate function in a generic case. -- Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator. -- Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322). -- Fixed the precision of the `exp10` function. -- Fixed the behavior of the `visitParamExtract` function for better compliance with documentation. -- Fixed the crash when incorrect data types are specified. -- Fixed the behavior of `DISTINCT` in the case when all columns are constants. -- Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index. -- Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries. -- Fixed a bug that leads to excessive rows in the result of `FULL` and `RIGHT JOIN` (Amos Bird). -- Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload. -- Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. -- Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša). - -#### Build improvements: {#build-improvements-4} - -- The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment. -- A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. -- Added the `clickhouse-test` package. It can be used to run functional tests. -- The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub. -- Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run. -- Added support for `Cap'n'Proto` in the default build. -- Changed the format of documentation sources from `Restricted Text` to `Markdown`. -- Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually. -- For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as `clickhouse clang` and `clickhouse lld` . -- Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`. -- Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. - -#### Backward incompatible changes: {#backward-incompatible-changes-11} - -- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn’t have `Nullable` columns or if the type of your table is not `Log`, then you don’t need to do anything. -- Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. -- The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. -- Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). -- Removed the `BlockTabSeparated` format that was used solely for demonstration purposes. -- Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com. -- In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. -- Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. -- If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes. - -## [Changelog for 2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) diff --git a/docs/zh/changelog/2019.md b/docs/zh/changelog/2019.md deleted file mode 100644 index 01a0756af14..00000000000 --- a/docs/zh/changelog/2019.md +++ /dev/null @@ -1,2071 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release v19.17 {#clickhouse-release-v19-17} - -### ClickHouse release v19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} - -#### Bug Fix {#bug-fix} - -- Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn’t cause the error `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed checking if a client host is allowed by host\_regexp specified in users.xml. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) -- `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) -- `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) -- Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) -- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn’t exist. Now in this case file would be created and then insert would be processed. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) -- Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn’t throw exception if `db` doesn’t exist. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) -- If a table wasn’t completely dropped because of server crash, the server will try to restore and load it [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) -- Fixed a trivial count query for a distributed table if there are more than two shard local table. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -- Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed `ALTER table MOVE part` executed immediately after merging the specified part, which could cause moving a part which the specified part merged into. Now it correctly moves the specified part. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Expressions for dictionaries can be specified as strings now. This is useful for calculation of attributes while extracting data from non-ClickHouse sources because it allows to use non-ClickHouse syntax for those expressions. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) -- Fixed a very rare race in `clickhouse-copier` because of an overflow in ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) -- Fixed the bug when after the query failed (due to “Too many simultaneous queries” for example) it would not read external tables info, and the - next request would interpret this info as the beginning of the next query causing an error like `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) -- Avoid null dereference after “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) -- Restore support of all ICU locales, add the ability to apply collations for constant expressions and add language name to system.collations table. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) -- Number of streams for read from `StorageFile` and `StorageHDFS` is now limited, to avoid exceeding the memory limit. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) -- Fixed `CHECK TABLE` query for `*MergeTree` tables without key. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) -- Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) -- Fixed the bug that mutations are skipped for some attached parts due to their data\_version are larger than the table mutation version. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) -- Allow starting the server with redundant copies of parts after moving them to another device. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed the error “Sizes of columns doesn’t match” that might appear when using aggregate function columns. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) -- Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it’s possible to use TOP with LIMIT BY. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) - -### ClickHouse release v19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} - -#### Backward Incompatible Change {#backward-incompatible-change} - -- Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird)) - -#### New Feature {#new-feature} - -- Add the ability to create dictionaries with DDL queries. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) -- Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add function `isValidJSON` to check that passed string is a valid json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) -- Implement `arrayCompact` function [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) -- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn’t delete last zero bytes. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) -- Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) -- Add `CRC32IEEE()`/`CRC64()` support [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) -- Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) -- Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) -- Implemented `javaHashUTF16LE()` function [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) -- Add `_shard_num` virtual column for the Distributed engine [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) - -#### Experimental Feature {#experimental-feature} - -- Support for processors (new query execution pipeline) in `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-1} - -- Fix incorrect float parsing in `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) -- Fix rare deadlock which can happen when trace\_log is enabled. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) -- Prevent message duplication when producing Kafka table has any MVs selecting from it [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) -- Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Fix aggregation (`avg` and quantiles) over empty decimal columns [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) -- Fix `INSERT` into Distributed with `MATERIALIZED` columns [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with `keep_free_space_ratio` not being read from disks configuration [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) -- Do not account memory for Buffer engine in max\_memory\_usage limit [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) -- Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) -- Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) -- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Fix crash in this case. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `Not found column in block` when joining on expression with RIGHT or FULL JOIN. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) -- One more attempt to fix infinite loop in `PrettySpace` format [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fix bug in `concat` function when all arguments were `FixedString` of the same size. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) -- Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix scope of the InterpreterSelectQuery for views with query [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) - -#### Improvement {#improvement} - -- `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Write current batch for distributed send atomically [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) -- Throw an exception if we cannot detect table for column name in query. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `merge_max_block_size` setting to `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) -- Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird)) -- Support parsing `(X,)` as tuple similar to python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird)) -- Make `range` function behaviors almost like pythonic one. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) -- Add `constraints` columns to table `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) -- Better Null format for tcp handler, so that it’s possible to use `select ignore() from table format Null` for perf measure via clickhouse-client [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird)) -- Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) - -#### Performance Improvement {#performance-improvement} - -- The performance of aggregation over short string keys is improved. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird)) -- Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird)) -- Use storage meta info to evaluate trivial `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) -- Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) -- Minor improvements in performance of `Kafka` consumption [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement} - -- Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) -- Unpack darwin-x86\_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) -- Update Docker Image for Binary Packager [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) -- Fixed compile errors on MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) -- Some refactoring in query analysis logic: split complex class into several simple ones. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix build without submodules [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) -- Better `add_globs` in CMake files [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird)) -- Remove hardcoded paths in `unwind` target [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) -- Allow to use mysql format without ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) - -#### Other {#other} - -- Added ANTLR4 grammar for ClickHouse SQL dialect [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release v19.16 {#clickhouse-release-v19-16} - -#### Clickhouse release v19.16.14.65, 2020-03-25 - -* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) This bugfix was backported to version 19.16 by a special request from Altinity. - -#### Clickhouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} - -- Fix distributed subqueries incompatibility with older CH versions. Fixes [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) - [(tabplubix)](https://github.com/tavplubix) -- When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in `ClickHouseDictionarySource`. - [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) -- Now background merges in `*MergeTree` table engines family preserve storage policy volume order more accurately. - [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) - [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) -- Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). - [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) -- Allow using `MaterializedView` with subqueries above `Kafka` tables. - [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) - -#### New Feature {#new-feature-1} - -- Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. - [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) - -### ClickHouse release v19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} - -#### Backward Incompatible Change {#backward-incompatible-change-1} - -- Add missing arity validation for count/counIf. - [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) - [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) -- Remove legacy `asterisk_left_columns_only` setting (it was disabled by default). - [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem - Zuikov](https://github.com/4ertus2)) -- Format strings for Template data format are now specified in files. - [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) - -#### New Feature {#new-feature-2} - -- Introduce uniqCombined64() to calculate cardinality greater than UINT\_MAX. - [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), - [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat - Khuzhin](https://github.com/azat)) -- Support Bloom filter indexes on Array columns. - [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) - ([achimbab](https://github.com/achimbab)) -- Add a function `getMacro(name)` that returns String with the value of corresponding `` - from server configuration. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Set two configuration options for a dictionary based on an HTTP source: `credentials` and - `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- Add a new ProfileEvent `Merge` that counts the number of launched background merges. - [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail - Korotov](https://github.com/millb)) -- Add fullHostName function that returns a fully qualified domain name. - [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) - [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) -- Add function `arraySplit` and `arrayReverseSplit` which split an array by “cut off” - conditions. They are useful in time sequence handling. - [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) -- Add new functions that return the Array of all matched indices in multiMatch family of functions. - [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila - Kutenin](https://github.com/danlark1)) -- Add a new database engine `Lazy` that is optimized for storing a large number of small -Log - tables. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang - Yu](https://github.com/yuzhichang)) -- Add aggregate function combinators -OrNull and -OrDefault, which return null - or default values when there is nothing to aggregate. - [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) - ([hcz](https://github.com/hczhcz)) -- Introduce CustomSeparated data format that supports custom escaping and - delimiter rules. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) -- Support Redis as source of external dictionary. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton - Popov](https://github.com/CurtizJ)) - -#### Bug Fix {#bug-fix-2} - -- Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is - used. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton - Popov](https://github.com/CurtizJ)) -- Disabled MariaDB authentication plugin, which depends on files outside of project. - [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy - Baranov](https://github.com/yurriy)) -- Fix exception `Cannot convert column ... because it is constant but values of constants are different in source and result` which could rarely happen when functions `now()`, `today()`, - `yesterday()`, `randConstant()` are used. - [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fixed a segmentation fault in groupBitmapOr (issue [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). - [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang - Yu](https://github.com/yuzhichang)) -- For materialized views the commit for Kafka is called after all data were written. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off. - [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests. - [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Don’t put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`. - [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) -- Fix segmentation fault in `ATTACH PART` query. - [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) - ([alesapin](https://github.com/alesapin)) -- Fix wrong result for some queries given by the optimization of empty IN subqueries and empty - INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixing AddressSanitizer error in the LIVE VIEW getHeader() method. - [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) - ([vzakaznikov](https://github.com/vzakaznikov)) - -#### Improvement {#improvement-1} - -- Add a message in case of queue\_wait\_max\_ms wait takes place. - [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat - Khuzhin](https://github.com/azat)) -- Made setting `s3_min_upload_part_size` table-level. - [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Check TTL in StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) - ([sundyli](https://github.com/sundy-li)) -- Squash left-hand blocks in partial merge join (optimization). - [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem - Zuikov](https://github.com/4ertus2)) -- Do not allow non-deterministic functions in mutations of Replicated table engines, because this - can introduce inconsistencies between replicas. - [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander - Kazakov](https://github.com/Akazz)) -- Disable memory tracker while converting exception stack trace to string. It can prevent the loss - of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read after eof` exception on client. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) - ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Miscellaneous format improvements. Resolves - [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), - [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), - [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), - [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) - [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) - ([tavplubix](https://github.com/tavplubix)) -- ClickHouse ignores values on the right side of IN operator that are not convertible to the left - side type. Make it work properly for compound types – Array and Tuple. - [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Support missing inequalities for ASOF JOIN. It’s possible to join less-or-equal variant and strict - greater and less variants for ASOF column in ON syntax. - [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem - Zuikov](https://github.com/4ertus2)) -- Optimize partial merge join. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) - ([Artem Zuikov](https://github.com/4ertus2)) -- Do not use more than 98K of memory in uniqCombined functions. - [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), - [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat - Khuzhin](https://github.com/azat)) -- Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough - memory). Load data back when needed. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) - ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvement {#performance-improvement-1} - -- Speed up joinGet with const arguments by avoiding data duplication. - [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos - Bird](https://github.com/amosbird)) -- Return early if the subquery is empty. - [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) -- Optimize parsing of SQL expression in Values. - [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) - ([tavplubix](https://github.com/tavplubix)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-1} - -- Disable some contribs for cross-compilation to Mac OS. - [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) -- Add missing linking with PocoXML for clickhouse\_common\_io. - [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat - Khuzhin](https://github.com/azat)) -- Accept multiple test filter arguments in clickhouse-test. - [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Enable musl and jemalloc for ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) - ([Amos Bird](https://github.com/amosbird)) -- Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client. - [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Preserve existing configs on rpm package upgrade. - [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) - ([filimonov](https://github.com/filimonov)) -- Fix errors detected by PVS. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem - Zuikov](https://github.com/4ertus2)) -- Fix build for Darwin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) - ([Ivan](https://github.com/abyss7)) -- glibc 2.29 compatibility. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos - Bird](https://github.com/amosbird)) -- Make sure dh\_clean does not touch potential source files. - [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos - Bird](https://github.com/amosbird)) -- Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately - in clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) - ([filimonov](https://github.com/filimonov)) -- Optimize some header files for faster rebuilds. - [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), - [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Add performance tests for Date and DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fix some tests that contained non-deterministic mutations. - [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander - Kazakov](https://github.com/Akazz)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) - ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat - Khuzhin](https://github.com/azat)) -- Fix some issues in Fields found by MemorySanitizer. - [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), - [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander - Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) - ([Amos Bird](https://github.com/amosbird)) -- Fix undefined behavior in murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos - Bird](https://github.com/amosbird)) -- Fix undefined behavior in StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) - ([tavplubix](https://github.com/tavplubix)) -- Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous - versions it wasn’t working for multiple constant expressions and was not working at all for Date, - DateTime and UUID. This fixes [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no\_users\_thread variable. - [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Get rid of malloc symbols in libcommon - [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), - [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos - Bird](https://github.com/amosbird)) -- Add global flag ENABLE\_LIBRARIES for disabling all libraries. - [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) - ([proller](https://github.com/proller)) - -#### Code cleanup {#code-cleanup} - -- Generalize configuration repository to prepare for DDL for Dictionaries. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) - ([alesapin](https://github.com/alesapin)) -- Parser for dictionaries DDL without any semantic. - [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) - ([alesapin](https://github.com/alesapin)) -- Split ParserCreateQuery into different smaller parsers. - [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) - ([alesapin](https://github.com/alesapin)) -- Small refactoring and renaming near external dictionaries. - [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) - ([alesapin](https://github.com/alesapin)) -- Refactor some code to prepare for role-based access control. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Some improvements in DatabaseOrdinary code. - [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Do not use iterators in find() and emplace() methods of hash tables. - [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Fix getMultipleValuesFromConfig in case when parameter root is not empty. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) - ([Mikhail Korotov](https://github.com/millb)) -- Remove some copy-paste (TemporaryFile and TemporaryFileStream) - [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem - Zuikov](https://github.com/4ertus2)) -- Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`). - [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws - an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and - fix comments to make obvious that it may throw. - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) - ([tavplubix](https://github.com/tavplubix)) - -## ClickHouse release 19.15 {#clickhouse-release-19-15} - -### ClickHouse release 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} - -#### Bug Fix {#bug-fix-3} - -- Added handling of SQL\_TINYINT and SQL\_BIGINT, and fix handling of SQL\_FLOAT data source types in ODBC Bridge. - [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Allowed to have some parts on destination disk or volume in MOVE PARTITION. - [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed NULL-values in nullable columns through ODBC-bridge. - [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed INSERT into Distributed non local node with MATERIALIZED columns. - [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Fixed function getMultipleValuesFromConfig. - [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) -- Wait for all jobs to finish on exception (fixes rare segfaults). - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) -- Don’t push to MVs when inserting into Kafka table. - [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Disable memory tracker for exception stack. - [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bad code in transforming query for external database. - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) -- Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} - -#### Bug Fix {#bug-fix-4} - -- Fixed bad\_variant in hashed dictionary. - ([alesapin](https://github.com/alesapin)) -- Fixed up bug with segmentation fault in ATTACH PART query. - ([alesapin](https://github.com/alesapin)) -- Fixed time calculation in `MergeTreeData`. - ([Vladimir Chebotarev](https://github.com/excitoon)) -- Commit to Kafka explicitly after the writing is finalized. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) - -### ClickHouse release 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} - -#### New Feature {#new-feature-3} - -- Tiered storage: support to use multiple storage volumes for tables with MergeTree engine. It’s possible to store fresh data on SSD and automatically move old data to HDD. ([example](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) -- Add table function `input` for reading incoming data in `INSERT SELECT` query. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) -- Add a `sparse_hashed` dictionary layout, that is functionally equivalent to the `hashed` layout, but is more memory efficient. It uses about twice as less memory at the cost of slower value retrieval. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) -- Implement ability to define list of users for access to dictionaries. Only current connected database using. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add `LIMIT` option to `SHOW` query. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Add `bitmapSubsetLimit(bitmap, range_start, limit)` function, that returns subset of the smallest `limit` values in set that is no smaller than `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add `bitmapMin` and `bitmapMax` functions. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add function `repeat` related to [issue-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) - -#### Experimental Feature {#experimental-feature-1} - -- Implement (in memory) Merge Join variant that does not change current pipeline. Result is partially sorted by merge key. Set `partial_merge_join = 1` to use this feature. The Merge Join is still in development. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `S3` engine and table function. It is still in development (no authentication support yet). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) - -#### Improvement {#improvement-2} - -- Every message read from Kafka is inserted atomically. This resolves almost all known issues with Kafka engine. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) -- Improvements for failover of Distributed queries. Shorten recovery time, also it is now configurable and can be seen in `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) -- Support numeric values for Enums directly in `IN` section. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) -- Support (optional, disabled by default) redirects on URL storage. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) -- Add information message when client with an older version connects to a server. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Remove maximum backoff sleep time limit for sending data in Distributed tables [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) -- Add ability to send profile events (counters) with cumulative values to graphite. It can be enabled under `` in server `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) -- Add automatically cast type `T` to `LowCardinality(T)` while inserting data in column of type `LowCardinality(T)` in Native format via HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add ability to use function `hex` without using `reinterpretAsString` for `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-2} - -- Add gdb-index to clickhouse binary with debug info. It will speed up startup time of `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) -- Speed up deb packaging with patched dpkg-deb which uses `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) -- Set `enable_fuzzing = 1` to enable libfuzzer instrumentation of all the project code. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) -- Add split build smoke test in CI. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Replace `libsparsehash` with `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) - -#### Bug Fix {#bug-fix-5} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix too early MySQL connection close in `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Returned support for very old Linux kernels (fix [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix possible data loss in `insert select` query in case of empty block in input stream. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fix complex queries with array joins and global subqueries. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) -- Fix `Unknown identifier` error in ORDER BY and GROUP BY with multiple JOINs [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed `MSan` warning while executing function with `LowCardinality` argument. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Backward Incompatible Change {#backward-incompatible-change-2} - -- Changed serialization format of bitmap\* aggregate function states to improve performance. Serialized states of bitmap\* from previous versions cannot be read. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) - -## ClickHouse release 19.14 {#clickhouse-release-19-14} - -### ClickHouse release 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} - -#### Bug Fix {#bug-fix-6} - -- This release also contains all bug fixes from 19.11.12.69. -- Fixed compatibility for distributed queries between 19.14 and earlier versions. This fixes [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} - -#### Bug Fix {#bug-fix-7} - -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fixed subquery name in queries with `ARRAY JOIN` and `GLOBAL IN subquery` with alias. Use subquery alias for external table name if it is specified. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-3} - -- Fix [flapping](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` by rewriting it to a shell scripts because it needs to wait for mutations to apply. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed UBSan and MemSan failure in function `groupUniqArray` with emtpy array argument. It was caused by placing of empty `PaddedPODArray` into hash table zero cell because constructor for zero cell value was not called. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Bird](https://github.com/amosbird)) - -### ClickHouse release 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} - -#### New Feature {#new-feature-4} - -- `WITH FILL` modifier for `ORDER BY`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- `WITH TIES` modifier for `LIMIT`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- Parse unquoted `NULL` literal as NULL (if setting `format_csv_unquoted_null_literal_as_null=1`). Initialize null fields with default values if data type of this field is not nullable (if setting `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) -- Support for wildcards in paths of table functions `file` and `hdfs`. If the path contains wildcards, the table will be readonly. Example of usage: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` and `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) -- New `system.metric_log` table which stores values of `system.events` and `system.metrics` with specified time interval. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to write ClickHouse text logs to `system.text_log` table. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show private symbols in stack traces (this is done via parsing symbol tables of ELF files). Added information about file and line number in stack traces if debug info is present. Speedup symbol name lookup with indexing symbols present in program. Added new SQL functions for introspection: `demangle` and `addressToLine`. Renamed function `symbolizeAddress` to `addressToSymbol` for consistency. Function `addressToSymbol` will return mangled name for performance reasons and you have to apply `demangle`. Added setting `allow_introspection_functions` which is turned off by default. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Table function `values` (the name is case-insensitive). It allows to read from `VALUES` list proposed in [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Example: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) -- Added an ability to alter storage settings. Syntax: `ALTER TABLE
    MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) -- Support for removing of detached parts. Syntax: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) -- Table constraints. Allows to add constraint to table definition which will be checked at insert. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Suppport for cascaded materialized views. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Bird](https://github.com/amosbird)) -- Turn on query profiler by default to sample every query execution thread once a second. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Input format `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) -- Added two new functions: `sigmoid` and `tanh` (that are useful for machine learning applications). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Function `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` to check if given token is in haystack. Token is a maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack). Token must be a constant string. Supported by tokenbf\_v1 index specialization. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) -- New function `neighbor(value, offset[, default_value])`. Allows to reach prev/next value within column in a block of data. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) -- Created a function `currentUser()`, returning login of authorized user. Added alias `user()` for compatibility with MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) -- New aggregate functions `quantilesExactInclusive` and `quantilesExactExclusive` which were proposed in [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) -- Function `bitmapRange(bitmap, range_begin, range_end)` which returns new set with specified range (not include the `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) -- Function `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` which creates array of precision-long strings of geohash-boxes covering provided area. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) -- Implement support for INSERT query with `Kafka` tables. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) -- Added support for `_partition` and `_timestamp` virtual columns to Kafka engine. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) -- Possibility to remove sensitive data from `query_log`, server logs, process list with regexp-based rules. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) - -#### Experimental Feature {#experimental-feature-2} - -- Input and output data format `Template`. It allows to specify custom format string for input and output. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) -- Implementation of `LIVE VIEW` tables that were originally proposed in [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), prepared in [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), and then updated in [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). See [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) for detailed description. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note that `LIVE VIEW` feature may be removed in next versions. - -#### Bug Fix {#bug-fix-8} - -- This release also contains all bug fixes from 19.13 and 19.11. -- Fix segmentation fault when the table has skip indices and vertical merge happens. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) -- Fix per-column TTL with non-trivial column defaults. Previously in case of force TTL merge with `OPTIMIZE ... FINAL` query, expired values was replaced by type defaults instead of user-specified column defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed infinite loop when reading Kafka messages. Do not pause/resume consumer on subscription at all - otherwise it may get paused indefinitely in some scenarios. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) -- Fix `Key expression contains comparison between inconvertible types` exception in `bitmapContains` function. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging in MySQL interface [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Return the ability to parse boolean settings from ‘true’ and ‘false’ in the configuration file. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) -- Fix JOIN results for key columns when used with `join_use_nulls`. Attach Nulls instead of columns defaults. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix for skip indices with vertical merge and alter. Fix for `Bad size of marks file` exception. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) -- Fix rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed unsafe code around `getIdentifier` function. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in MySQL wire protocol (is used while connecting to ClickHouse form MySQL client). Caused by heap buffer overflow in `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) -- Fixed memory leak in `bitmapSubsetInRange` function. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fix rare bug when mutation executed after granularity change. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) -- Allow protobuf message with all fields by default. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) -- Resolve a bug with `nullIf` function when we send a `NULL` argument on the second argument. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fix rare bug with wrong memory allocation/deallocation in complex key cache dictionaries with string fields which leads to infinite memory consumption (looks like memory leak). Bug reproduces when string size was a power of two starting from eight (8, 16, 32, etc). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fixed Gorilla encoding on small sequences which caused exception `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) -- Allow to use not nullable types in JOINs with `join_use_nulls` enabled. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) -- Disable `Poco::AbstractConfiguration` substitutions in query in `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock in `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `arrayReduce` for constant arguments may lead to segfault. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix inconsistent parts which can appear if replica was restored after `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed hang in `JSONExtractRaw` function. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with incorrect skip indices serialization and aggregation with adaptive granularity. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) -- Clearing the data buffer from the previous read operation that was completed with an error. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) -- Fix bug with enabling adaptive granularity when creating a new replica for Replicated\*MergeTree table. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix crash in `yandexConsistentHash` function. Found by fuzz test. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault when decoding symbol table. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Removed extra quoting of description in `system.settings` table. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid possible deadlock in `TRUNCATE` of Replicated table. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix reading in order of sorting key. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix bug opened by [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn’t query any columns (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `FormatFactory` behaviour for input streams which are not implemented as processor. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed typo. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) -- Typo in the error message ( is -\> are ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) -- Fixed error while parsing of columns list from string if type contained a comma (this issue was relevant for `File`, `URL`, `HDFS` storages) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) - -#### Security Fix {#security-fix} - -- This release also contains all bug security fixes from 19.13 and 19.11. -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser. Fixed the possibility of stack overflow in Merge and Distributed tables, materialized views and conditions for row-level security that involve subqueries. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-3} - -- Correct implementation of ternary logic for `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) -- Now values and rows with expired TTL will be removed after `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` query. Added queries `SYSTEM STOP/START TTL MERGES` to disallow/allow assign merges with TTL and filter expired values in all merges. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) -- Possibility to change the location of ClickHouse history file for client using `CLICKHOUSE_HISTORY_FILE` env. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) -- Remove `dry_run` flag from `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Support `ASOF JOIN` with `ON` section. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) -- Better support of skip indexes for mutations and replication. Support for `MATERIALIZE/CLEAR INDEX ... IN PARTITION` query. `UPDATE x = x` recalculates all indices that use column `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) -- Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Throw an exception if `config.d` file doesn’t have the corresponding root element as the config file. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) -- Print extra info in exception message for `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) -- When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) -- Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse can work on filesystems without `O_DIRECT` support (such as ZFS and BtrFS) without additional tuning. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support push down predicate for final subquery. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better `JOIN ON` keys extraction [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) -- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Optimize selecting of smallest column for `SELECT count()` query. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Bird](https://github.com/amosbird)) -- Added `strict` parameter in `windowFunnel()`. When the `strict` is set, the `windowFunnel()` applies conditions only for the unique values. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) -- Safer interface of `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) -- Options line size when executing with `--help` option now corresponds with terminal size. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) -- Disable “read in order” optimization for aggregation without keys. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) -- HTTP status code for `INCORRECT_DATA` and `TYPE_MISMATCH` error codes was changed from default `500 Internal Server Error` to `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) -- Move Join object from `ExpressionAction` into `AnalyzedJoin`. `ExpressionAnalyzer` and `ExpressionAction` do not know about `Join` class anymore. Its logic is hidden by `AnalyzedJoin` iface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) -- Move AST alias interpreting logic out of parser that doesn’t have to know anything about query semantics. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) -- Slightly more safe parsing of `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) -- Added optional message argument in `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) -- Server exception got while sending insertion data is now being processed in client as well. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) -- Added a metric `DistributedFilesToInsert` that shows the total number of files in filesystem that are selected to send to remote servers by Distributed tables. The number is summed across all shards. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move most of JOINs prepare logic from `ExpressionAction/ExpressionAnalyzer` to `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix TSan [warning](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) -- Better information messages about lack of Linux capabilities. Logging fatal errors with “fatal” level, that will make it easier to find in `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- When enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`, `ORDER BY`, it didn’t check the free disk space. The fix add a new setting `min_free_disk_space`, when the free disk space it smaller then the threshold, the query will stop and throw `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed recursive rwlock by thread. It makes no sense, because threads are reused between queries. `SELECT` query may acquire a lock in one thread, hold a lock from another thread and exit from first thread. In the same time, first thread can be reused by `DROP` query. This will lead to false “Attempt to acquire exclusive lock recursively” messages. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Split `ExpressionAnalyzer.appendJoin()`. Prepare a place in `ExpressionAnalyzer` for `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `mysql_native_password` authentication plugin to MySQL compatibility server. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) -- Less number of `clock_gettime` calls; fixed ABI compatibility between debug/release in `Allocator` (insignificant issue). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move `collectUsedColumns` from `ExpressionAnalyzer` to `SyntaxAnalyzer`. `SyntaxAnalyzer` makes `required_source_columns` itself now. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) -- Add setting `joined_subquery_requires_alias` to require aliases for subselects and table functions in `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) -- Extract `GetAggregatesVisitor` class from `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) -- `system.query_log`: change data type of `type` column to `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Static linking of `sha256_password` authentication plugin. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) -- Avoid extra dependency for the setting `compile` to work. In previous versions, the user may get error like `cannot open crti.o`, `unable to find library -lc` etc. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More validation of the input that may come from malicious replica. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now `clickhouse-obfuscator` file is available in `clickhouse-client` package. In previous versions it was available as `clickhouse obfuscator` (with whitespace). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed deadlock when we have at least two queries that read at least two tables in different order and another query that performs DDL operation on one of tables. Fixed another very rare deadlock. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added `os_thread_ids` column to `system.processes` and `system.query_log` for better debugging possibilities. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- A workaround for PHP mysqlnd extension bugs which occur when `sha256_password` is used as a default authentication plugin (described in [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) -- Remove unneeded place with changed nullability columns. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) -- Set default value of `queue_max_wait_ms` to zero, because current value (five seconds) makes no sense. There are rare circumstances when this settings has any use. Added settings `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` and `connection_pool_max_wait_ms` for disambiguation. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Extract `SelectQueryExpressionAnalyzer` from `ExpressionAnalyzer`. Keep the last one for non-select queries. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) -- Removed duplicating input and output formats. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `MergeTree` now has an additional option `ttl_only_drop_parts` (disabled by default) to avoid partial pruning of parts, so that they dropped completely when all the rows in a part are expired. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) -- Type checks for set index functions. Throw exception if function got a wrong type. This fixes fuzz test with UBSan. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Performance Improvement {#performance-improvement-2} - -- Optimize queries with `ORDER BY expressions` clause, where `expressions` have coinciding prefix with sorting key in `MergeTree` tables. This optimization is controlled by `optimize_read_in_order` setting. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) -- Allow to use multiple threads during parts loading and removal. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented batch variant of updating aggregate function states. It may lead to performance benefits. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `FastOps` library for functions `exp`, `log`, `sigmoid`, `tanh`. FastOps is a fast vector math library from Michael Parakhin (Yandex CTO). Improved performance of `exp` and `log` functions more than 6 times. The functions `exp` and `log` from `Float32` argument will return `Float32` (in previous versions they always return `Float64`). Now `exp(nan)` may return `inf`. The result of `exp` and `log` functions may be not the nearest machine representable number to the true answer. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Using Danila Kutenin variant to make fastops working [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable consecutive key optimization for `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) -- Improved performance of `simdjson` library by getting rid of dynamic allocation in `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) -- Pre-fault pages when allocating memory with `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) -- Fix performance bug in `Decimal` comparison. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-4} - -- Remove Compiler (runtime template instantiation) because we’ve win over it’s performance. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance test to show degradation of performance in gcc-9 in more isolated way. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added table function `numbers_mt`, which is multithreaded version of `numbers`. Updated performance tests with hash functions. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Comparison mode in `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) -- Best effort for printing stack traces. Also added `SIGPROF` as a debugging signal to print stack trace of a running thread. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Every function in its own file, part 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove doubled const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) -- Formatting changes for `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) -- Better subquery for join creation in `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) -- Remove a redundant condition (found by PVS Studio). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) -- Separate the hash table interface for `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) -- Refactoring of settings. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) -- Add comments for `set` index functions. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) -- Increase OOM score in debug version on Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) -- HDFS HA now work in debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) -- Added a test to `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for multiple materialized views for Kafka table. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) -- Make a better build scheme. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) -- Fixed `test_external_dictionaries` integration in case it was executed under non root user. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- The bug reproduces when total size of written packets exceeds `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) -- Added a test for `RENAME` table race condition [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid data race on Settings in `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add integration test for handling errors by a cache dictionary. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) -- Disable parsing of ELF object files on Mac OS, because it makes no sense. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Attempt to make changelog generator better. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Adding `-Wshadow` switch to the GCC. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) -- Removed obsolete code for `mimalloc` support. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `zlib-ng` determines x86 capabilities and saves this info to global variables. This is done in defalteInit call, which may be made by different threads simultaneously. To avoid multithreaded writes, do it on library startup. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) -- Regression test for a bug which in join which was fixed in [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) -- Fixed MSan report. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix flapping TTL test. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed false data race in `MergeTreeDataPart::is_frozen` field. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed timeouts in fuzz test. In previous version, it managed to find false hangup in query `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added debug checks to `static_cast` of columns. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for Oracle Linux in official RPM packages. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed json perftests from `once` to `loop` type. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `odbc-bridge.cpp` defines `main()` so it should not be included in `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) -- Test for crash in `FULL|RIGHT JOIN` with nulls in right table’s keys. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) -- Added a test for the limit on expansion of aliases just in case. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Switched from `boost::filesystem` to `std::filesystem` where appropriate. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added RPM packages to website. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test for fixed `Unknown identifier` exception in `IN` section. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) -- Simplify `shared_ptr_helper` because people facing difficulties understanding it. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance tests for fixed Gorilla and DoubleDelta codec. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) -- Split the integration test `test_dictionaries` into 4 separate tests. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix PVS-Studio warning in `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow to use `library` dictionary source with ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added option to generate changelog from a list of PRs. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Lock the `TinyLog` storage when reading. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) -- Check for broken symlinks in CI. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Increase timeout for “stack overflow” test because it may take a long time in debug build. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a check for double whitespaces. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `new/delete` memory tracking when build with sanitizers. Tracking is not clear. It only prevents memory limit exceptions in tests. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) -- Enable back the check of undefined symbols while linking. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) -- Avoid rebuilding `hyperscan` every day. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed UBSan report in `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Don’t allow to use query profiler with sanitizers because it is not compatible. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for reloading a dictionary after fail by timer. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix inconsistency in `PipelineExecutor::prepareProcessor` argument type. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added a test for bad URIs. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added more checks to `CAST` function. This should get more information about segmentation fault in fuzzy test. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added `gcc-9` support to `docker/builder` container that builds image locally. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Test for primary key with `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed tests affected by slow stack traces printing. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test case for crash in `groupUniqArray` fixed in [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) -- Fixed indices mutations tests. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) -- In performance test, do not read query log for queries we didn’t run. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) -- Materialized view now could be created with any low cardinality types regardless to the setting about suspicious low cardinality types. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) -- Updated tests for `send_logs_level` setting. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix build under gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) -- Fix build with internal libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) -- Fixes for Mac OS build (incomplete). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) -- Fix “splitted” build. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Other build fixes: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Bird](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-3} - -- Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) - -## ClickHouse release 19.13 {#clickhouse-release-19-13} - -### ClickHouse release 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} - -#### Bug Fix {#bug-fix-9} - -- This release also contains all bug fixes from 19.11.12.69. - -### ClickHouse release 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} - -#### Bug Fix {#bug-fix-10} - -- This release also contains all bug fixes from 19.14.6.12. -- Fixed possible inconsistent state of table while executing `DROP` query for replicated table while zookeeper is not accessible. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix for data race in StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug introduced in query profiler which leads to endless recv from socket. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) -- Fix excessive CPU usage while executing `JSONExtractRaw` function over a boolean value. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixes the regression while pushing to materialized view. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) -- Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. This issue was found by [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix useless `AST` check in Set index. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed parsing of `AggregateFunction` values embedded in query. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed wrong behaviour of `trim` functions family. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} - -#### Bug Fix {#bug-fix-11} - -- This release also contains all bug security fixes from 19.11.9.52 and 19.11.10.54. -- Fixed data race in `system.parts` table and `ALTER` query. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed mismatched header in streams happened in case of reading from empty distributed table with sample and prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed crash when using `IN` clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fix case with same column names in `GLOBAL JOIN ON` section. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix crash when casting types to `Decimal` that do not support it. Throw exception instead. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in `extractAll()` function. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) -- Query transformation for `MySQL`, `ODBC`, `JDBC` table functions now works properly for `SELECT WHERE` queries with multiple `AND` expressions. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) -- Added previous declaration checks for MySQL 8 integration. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) - -#### Security Fix {#security-fix-1} - -- Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} - -#### Bug Fix {#bug-fix-12} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Security Fix {#security-fix-2} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} - -#### New Feature {#new-feature-5} - -- Sampling profiler on query level. [Example](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) -- Allow to specify a list of columns with `COLUMNS('regexp')` expression that works like a more sophisticated variant of `*` asterisk. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CREATE TABLE AS table_function()` is now possible [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) -- Adam optimizer for stochastic gradient descent is used by default in `stochasticLinearRegression()` and `stochasticLogisticRegression()` aggregate functions, because it shows good quality without almost any tuning. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) -- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) -- `RENAME` queries now work with all storages. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) -- Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - -#### Backward Incompatible Change {#backward-incompatible-change-4} - -- The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) - -#### Experimental features {#experimental-features} - -- New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-13} - -- Kafka integration has been fixed in this version. -- Fixed `DoubleDelta` encoding of `Int64` for large `DoubleDelta` values, improved `DoubleDelta` encoding for random data for `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed overestimation of `max_rows_to_read` if the setting `merge_tree_uniform_read_distribution` is set to 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-4} - -- Throws an exception if `config.d` file doesn’t have the corresponding root element as the config file [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - -#### Performance Improvement {#performance-improvement-3} - -- Optimize `count()`. Now it uses the smallest column (if possible). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Bird](https://github.com/amosbird)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-5} - -- Report memory usage in performance tests. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) -- Fix build with external `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.11 {#clickhouse-release-19-11} - -### ClickHouse release 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} - -#### Bug Fix {#bug-fix-14} - -- Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin)) - -### ClickHouse release 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} - -#### Bug Fix {#bug-fix-15} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid rare SIGSEGV while sending data in tables with Distributed engine (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) -- Fix `Unknown identifier` with multiple joins. This fixes [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} - -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) - -### ClickHouse release 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} - -#### Bug Fix {#bug-fix-16} - -- Do store offsets for Kafka messages manually to be able to commit them all at once for all partitions. Fixes potential duplication in “one consumer - many partitions” scenario. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) - -### ClickHouse release 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} - -- Improve error handling in cache dictionaries. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed bug in function `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) -- Fix `JSONExtract` function while extracting a `Tuple` from JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed performance test. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Parquet: Fix reading boolean columns. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong behaviour of `nullIf` function for constant arguments. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed an issue when long `ALTER UPDATE` or `ALTER DELETE` may prevent regular merges to run. Prevent mutations from executing if there is no enough free threads available. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) -- Fixed error with processing “timezone” in server configuration file. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix kafka tests. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) - -#### Security Fix {#security-fix-3} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} - -#### Bug Fix {#bug-fix-17} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} - -#### Bug fix {#bug-fix-18} - -- Kafka integration has been fixed in this version. -- Fix segfault when using `arrayReduce` for constant arguments. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `toFloat()` monotonicity. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging from MySQL handler. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) -- Do not expose virtual columns in `system.columns` table. This is required for backward compatibility. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with memory allocation for string fields in complex key cache dictionary. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fix bug with enabling adaptive granularity when creating new replica for `Replicated*MergeTree` table. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fix infinite loop when reading Kafka messages. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser and possibility of stack overflow in `Merge` and `Distributed` tables [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed Gorilla encoding error on small sequences. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) - -#### Improvement {#improvement-5} - -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} - -#### Bug fix {#bug-fix-19} - -- Fixed the possibility of hanging queries when server is overloaded. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix FPE in yandexConsistentHash function. This fixes [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix parsing of `bool` settings from `true` and `false` strings in configuration files. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix rare bug with incompatible stream headers in queries to `Distributed` table over `MergeTree` table when part of `WHERE` moves to `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. This fixes [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Change {#backward-incompatible-change-5} - -- `Kafka` still broken. - -### ClickHouse release 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} - -#### Bug Fix {#bug-fix-20} - -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed hang in `JSONExtractRaw` function. Fixed [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault in ExternalLoader::reloadOutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix non-deterministic result of “uniq” aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fixed small memory leak when server throw many exceptions from many different contexts. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix the situation when consumer got paused before subscription and not resumed afterwards. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Clearing the Kafka data buffer from the previous read operation that was completed with an error [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Note that Kafka is broken in this version. -- Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-6} - -- Added official `rpm` packages. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) -- Add an ability to build `.rpm` and `.tgz` packages with `packager` script. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) -- Fixes for “Arcadia” build system. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-6} - -- `Kafka` is broken in this version. - -### ClickHouse release 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} - -#### New Feature {#new-feature-6} - -- Added support for prepared statements. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `DoubleDelta` and `Gorilla` column codecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) -- Added `os_thread_priority` setting that allows to control the “nice” value of query processing threads that is used by OS to adjust dynamic scheduling priority. It requires `CAP_SYS_NICE` capabilities to work. This implements [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implement `_topic`, `_offset`, `_key` columns for Kafka engine [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Add aggregate function combinator `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) -- Aggregate functions `groupArrayMovingSum(win_size)(x)` and `groupArrayMovingAvg(win_size)(x)`, which calculate moving sum/avg with or without window-size limitation. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) -- Add synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) -- Intergate H3 function `geoToH3` from Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Bug Fix {#bug-fix-21} - -- Implement DNS cache with asynchronous update. Separate thread resolves all hosts and updates DNS cache with period (setting `dns_cache_update_period`). It should help, when ip of hosts changes frequently. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) -- Fix segfault in `Delta` codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix rare bug in checking of part with `LowCardinality` column. Previously `checkDataPart` always fails for part with `LowCardinality` column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Avoid hanging connections when server thread pool is full. It is important for connections from `remote` table function or connections to a shard without replicas when there is long connection timeout. This fixes [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for constant arguments to `evalMLModel` function. This fixes [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the issue when ClickHouse determines default time zone as `UCT` instead of `UTC`. This fixes [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed buffer underflow in `visitParamExtractRaw`. This fixes [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now distributed `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` queries will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix `coalesce` for `ColumnConst` with `ColumnNullable` + related changes. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix the `ReadBufferFromKafkaConsumer` so that it keeps reading new messages after `commit()` even if it was stalled before [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) -- Fix `FULL` and `RIGHT` JOIN results when joining on `Nullable` keys in right table. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) -- Possible fix of infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix race condition, which cause that some queries may not appear in query\_log after `SYSTEM FLUSH LOGS` query. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed `heap-use-after-free` ASan warning in ClusterCopier caused by watch which try to use already removed copier object. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed wrong `StringRef` pointer returned by some implementations of `IColumn::deserializeAndInsertFromArena`. This bug affected only unit-tests. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Prevent source and intermediate array join columns of masking same name columns. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix insert and select query to MySQL engine with MySQL style identifier quoting. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Winter Zhang](https://github.com/zhang2014)) -- Now `CHECK TABLE` query can work with MergeTree engine family. It returns check status and message if any for each part (or file in case of simplier engines). Also, fix bug in fetch of a broken part. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) -- Fix SPLIT\_SHARED\_LIBRARIES runtime [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed time zone initialization when `/etc/localtime` is a relative symlink like `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- clickhouse-copier: Fix use-after free on shutdown [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) -- Updated `simdjson`. Fixed the issue that some invalid JSONs with zero bytes successfully parse. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix shutdown of SystemLogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) -- Fix hanging when condition in invalidate\_query depends on a dictionary. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) - -#### Improvement {#improvement-6} - -- Allow unresolvable addresses in cluster configuration. They will be considered unavailable and tried to resolve at every connection attempt. This is especially useful for Kubernetes. This fixes [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Close idle TCP connections (with one hour timeout by default). This is especially important for large clusters with multiple distributed tables on every server, because every server can possibly keep a connection pool to every other server, and after peak query concurrency, connections will stall. This fixes [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better quality of `topK` function. Changed the SavingSpace set behavior to remove the last element if the new element have a bigger weight. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) -- URL functions to work with domains now can work for incomplete URLs without scheme [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) -- Checksums added to the `system.parts_columns` table. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Added `Enum` data type as a synonim for `Enum8` or `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) -- Full bit transpose variant for `T64` codec. Could lead to better compression with `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) -- Condition on `startsWith` function now can uses primary key. This fixes [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) and [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) -- Allow to use `clickhouse-copier` with cross-replication cluster topology by permitting empty database name. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) -- Use `UTC` as default timezone on a system without `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` was printed and server or client refused to start. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Returned back support for floating point argument in function `quantileTiming` for backward compatibility. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show which table is missing column in error messages. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) -- Disallow run query with same query\_id by various users [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) -- More robust code for sending metrics to Graphite. It will work even during long multiple `RENAME TABLE` operation. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More informative error messages will be displayed when ThreadPool cannot schedule a task for execution. This fixes [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Inverting ngramSearch to be more intuitive [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) -- Add user parsing in HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) -- Update default value of `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) -- Added a notion of obsolete settings. The obsolete setting `allow_experimental_low_cardinality_type` can be used with no effect. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) - -#### Performance Improvement {#performance-improvement-4} - -- Increase number of streams to SELECT from Merge table for more uniform distribution of threads. Added setting `max_streams_multiplier_for_merge_tables`. This fixes [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-7} - -- Add a backward compatibility test for client-server interaction with different versions of clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) -- Test coverage information in every commit and pull request. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) -- Cooperate with address sanitizer to support our custom allocators (`Arena` and `ArenaWithFreeLists`) for better debugging of “use-after-free” errors. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) -- Switch to [LLVM libunwind implementation](https://github.com/llvm-mirror/libunwind) for C++ exception handling and for stack traces printing [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) -- Add two more warnings from -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to build ClickHouse with Memory Sanitizer. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed ubsan report about `bitTest` function in fuzz test. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Docker: added possibility to init a ClickHouse instance which requires authentication. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) -- Update librdkafka to version 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) -- Add global timeout for integration tests and disable some of them in tests code. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) -- Fix some ThreadSanitizer failures. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) -- The `--no-undefined` option forces the linker to check all external names for existence while linking. It’s very useful to track real dependencies between libraries in the split build mode. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) -- Added performance test for [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed compatibility with gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support for gcc-9. This fixes [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when libunwind can be linked incorrectly. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a few warnings found by PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added initial support for `clang-tidy` static analyzer. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Convert BSD/Linux endian macros( ‘be64toh’ and ‘htobe64’) to the Mac OS X equivalents [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) -- Improved integration tests guide. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixing build at macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) -- Fix a hard-to-spot typo: aggreAGte -\> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) -- Fix freebsd build [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) -- Add link to experimental YouTube channel to website [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) -- CMake: add option for coverage flags: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) -- Fix initial size of some inline PODArray’s. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) -- clickhouse-server.postinst: fix os detection for centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) -- Added Arch linux package generation. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Split Common/config.h by libs (dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) -- Fixes for “Arcadia” build platform [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) -- Fixes for unconventional build (gcc9, no submodules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) -- Require explicit type in unalignedStore because it was proven to be bug-prone [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) -- Fixes MacOS build [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) -- Performance test concerning the new JIT feature with bigger dataset, as requested here [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Run stateful tests in stress test [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-7} - -- `Kafka` is broken in this version. -- Enable `adaptive_index_granularity` = 10MB by default for new `MergeTree` tables. If you created new MergeTree tables on version 19.11+, downgrade to versions prior to 19.6 will be impossible. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) -- Removed obsolete undocumented embedded dictionaries that were used by Yandex.Metrica. The functions `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` are no longer available. If you are using these functions, write email to clickhouse-feedback@yandex-team.com. Note: at the last moment we decided to keep these functions for a while. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.10 {#clickhouse-release-19-10} - -### ClickHouse release 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} - -#### New Feature {#new-feature-7} - -- Add new column codec: `T64`. Made for (U)IntX/EnumX/Data(Time)/DecimalX columns. It should be good for columns with constant or small range values. Codec itself allows enlarge or shrink data type without re-compression. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) -- Add database engine `MySQL` that allow to view all the tables in remote MySQL server [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Winter Zhang](https://github.com/zhang2014)) -- `bitmapContains` implementation. It’s 2x faster than `bitmapHasAny` if the second bitmap contains one element. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) -- Support for `crc32` function (with behaviour exactly as in MySQL or PHP). Do not use it if you need a hash function. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) -- Implemented `SYSTEM START/STOP DISTRIBUTED SENDS` queries to control asynchronous inserts into `Distributed` tables. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Winter Zhang](https://github.com/zhang2014)) - -#### Bug Fix {#bug-fix-22} - -- Ignore query execution limits and max parts size for merge limits while executing mutations. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug which may lead to deduplication of normal blocks (extremely rare) and insertion of duplicate blocks (more often). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) -- Fix of function `arrayEnumerateUniqRanked` for arguments with empty arrays [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) -- Don’t subscribe to Kafka topics without intent to poll any messages. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) -- Make setting `join_use_nulls` get no effect for types that cannot be inside Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed `Incorrect size of index granularity` errors [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) -- Fix Float to Decimal convert overflow [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) -- Flush buffer when `WriteBufferFromHDFS`’s destructor is called. This fixes writing into `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) - -#### Improvement {#improvement-7} - -- Treat empty cells in `CSV` as default values when the setting `input_format_defaults_for_omitted_fields` is enabled. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) -- Non-blocking loading of external dictionaries. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) -- Network timeouts can be dynamically changed for already established connections according to the settings. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) -- Using “public\_suffix\_list” for functions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. It’s using a perfect hash table generated by `gperf` with a list generated from the file: https://publicsuffix.org/list/public\_suffix\_list.dat. (for example, now we recognize the domain `ac.uk` as non-significant). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Adopted `IPv6` data type in system tables; unified client info columns in `system.processes` and `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using sessions for connections with MySQL compatibility protocol. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) -- Support more `ALTER` queries `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) -- Support `` section in `clickhouse-local` config file. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) -- Allow run query with `remote` table function in `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) - -#### Performance Improvement {#performance-improvement-5} - -- Add the possibility to write the final mark at the end of MergeTree columns. It allows to avoid useless reads for keys that are out of table data range. It is enabled only if adaptive index granularity is in use. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) -- Improved performance of MergeTree tables on very slow filesystems by reducing number of `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed performance degradation in reading from MergeTree tables that was introduced in version 19.6. Fixes \#5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-8} - -- Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) -- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn’t affect each other. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) -- Remove `` and `` from performance tests [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed “select\_format” performance test for `Pretty` formats [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.9 {#clickhouse-release-19-9} - -### ClickHouse release 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} - -#### Bug Fix {#bug-fix-23} - -- Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix rare bug in checking of part with LowCardinality column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix potential infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix how ClickHouse determines default time zone as UCT instead of UTC. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix race condition, which cause that some queries may not appear in query\_log instantly after SYSTEM FLUSH LOGS query. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Added missing support for constant arguments to `evalMLModel` function. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} - -#### New Feature {#new-feature-8} - -- Print information about frozen parts in `system.parts` table. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) -- Ask client password on clickhouse-client start on tty if not set in arguments [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) -- Implement `dictGet` and `dictGetOrDefault` functions for Decimal types. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-8} - -- Debian init: Add service stop timeout [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) -- Add setting forbidden by default to create table with suspicious types for LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) -- Regression functions return model weights when not used as State in function `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) -- Rename and improve regression methods. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) -- Clearer interfaces of string searchers. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) - -#### Bug Fix {#bug-fix-24} - -- Fix potential data loss in Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) -- Fix potential infinite loop in `PrettySpace` format when called with zero columns [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix segfault with `bitmapHasAny` in scalar subquery [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) -- Fix INSERT into Distributed table with MATERIALIZED column [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) -- Fix bad alloc when truncate Join storage [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) -- In recent versions of package tzdata some of files are symlinks now. The current mechanism for detecting default timezone gets broken and gives wrong names for some timezones. Now at least we force the timezone name to the contents of TZ if provided. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Fix some extremely rare cases with MultiVolnitsky searcher when the constant needles in sum are at least 16KB long. The algorithm missed or overwrote the previous results which can lead to the incorrect result of `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) -- Fix the issue when settings for ExternalData requests couldn’t use ClickHouse settings. Also, for now, settings `date_time_input_format` and `low_cardinality_allow_in_native_format` cannot be used because of the ambiguity of names (in external data it can be interpreted as table format and in the query it can be a setting). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Remove debug logging from MySQL protocol [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Skip ZNONODE during DDL query processing [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix mix `UNION ALL` result column type. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Throw an exception on wrong integers in `dictGetT` functions instead of crash. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix wrong element\_count and load\_factor for hashed dictionary in `system.dictionaries` table. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-9} - -- Fixed build without `Brotli` HTTP compression support (`ENABLE_BROTLI=OFF` cmake variable). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) -- Include roaring.h as roaring/roaring.h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) -- Fix gcc9 warnings in hyperscan (\#line directive is evil!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) -- Fix all warnings when compiling with gcc-9. Fix some contrib issues. Fix gcc9 ICE and submit it to bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed linking with lld [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove unused specializations in dictionaries [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) -- Improvement performance tests for formatting and parsing tables for different types of files [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixes for parallel test run [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) -- Docker: use configs from clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) -- Fix compile for FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) -- Upgrade boost to 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) -- Fix build clickhouse as submodule [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) -- Improve JSONExtract performance tests [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.8 {#clickhouse-release-19-8} - -### ClickHouse release 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} - -#### New Features {#new-features} - -- Added functions to work with JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) -- Add a function basename, with a similar behaviour to a basename function, which exists in a lot of languages (`os.path.basename` in python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added `LIMIT n, m BY` or `LIMIT m OFFSET n BY` syntax to set offset of n for LIMIT BY clause. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) -- Added new data type `SimpleAggregateFunction`, which allows to have columns with light aggregation in an `AggregatingMergeTree`. This can only be used with simple functions like `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) -- Added support for non-constant arguments in function `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) -- Added functions `skewPop`, `skewSamp`, `kurtPop` and `kurtSamp` to compute for sequence skewness, sample skewness, kurtosis and sample kurtosis respectively. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) -- Support rename operation for `MaterializeView` storage. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added server which allows connecting to ClickHouse using MySQL client. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) -- Add `toDecimal*OrZero` and `toDecimal*OrNull` functions. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) -- Support Decimal types in functions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) -- Added `format` function. Formatting constant pattern (simplified Python format pattern) with the strings listed in the arguments. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) -- Added `system.detached_parts` table containing information about detached parts of `MergeTree` tables. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) -- Added `ngramSearch` function to calculate the non-symmetric difference between needle and haystack. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) -- Implementation of basic machine learning methods (stochastic linear regression and logistic regression) using aggregate functions interface. Has different strategies for updating model weights (simple gradient descent, momentum method, Nesterov method). Also supports mini-batches of custom size. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) -- Implementation of `geohashEncode` and `geohashDecode` functions. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) -- Added aggregate function `timeSeriesGroupSum`, which can aggregate different time series that sample timestamp not alignment. It will use linear interpolation between two sample timestamp and then sum time-series together. Added aggregate function `timeSeriesGroupRateSum`, which calculates the rate of time-series and then sum rates together. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Added functions `IPv4CIDRtoIPv4Range` and `IPv6CIDRtoIPv6Range` to calculate the lower and higher bounds for an IP in the subnet using a CIDR. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add a X-ClickHouse-Summary header when we send a query using HTTP with enabled setting `send_progress_in_http_headers`. Return the usual information of X-ClickHouse-Progress, with additional information like how many rows and bytes were inserted in the query. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) - -#### Improvements {#improvements} - -- Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn’t inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) -- Now `if` and `multiIf` functions don’t rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) -- `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) -- Check the time limit every (flush\_interval / poll\_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) -- Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) -- Batched version of RowRefList for ALL JOINS. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) -- clickhouse-server: more informative listen error messages. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) -- Support dictionaries in clickhouse-copier for functions in `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) -- Add new setting `kafka_commit_every_batch` to regulate Kafka committing policy. - It allows to set commit mode: after every batch of messages is handled, or after the whole block is written to the storage. It’s a trade-off between losing some messages or reading them twice in some extreme situations. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) -- Make `windowFunnel` support other Unsigned Integer Types. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) -- Allow to shadow virtual column `_table` in Merge engine. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) -- Make `sequenceMatch` aggregate functions support other unsigned Integer types [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) -- Better error messages if checksum mismatch is most likely caused by hardware failures. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Check that underlying tables support sampling for `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) -- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Improvements of MySQL Wire Protocol. Changed name of format to MySQLWire. Using RAII for calling RSA\_free. Disabling SSL if context cannot be created. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) -- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) -- Respect query settings in asynchronous INSERTs into Distributed tables. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) -- Renamed functions `leastSqr` to `simpleLinearRegression`, `LinearRegression` to `linearRegression`, `LogisticRegression` to `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Performance Improvements {#performance-improvements} - -- Parallelize processing of parts of non-replicated MergeTree tables in ALTER MODIFY query. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) -- Optimizations in regular expressions extraction. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) -- Do not add right join key column to join result if it’s used only in join on section. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) -- Freeze the Kafka buffer after first empty response. It avoids multiple invokations of `ReadBuffer::next()` for empty result in some row-parsing streams. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) -- `concat` function optimization for multiple arguments. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) -- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) -- Upgrade our LZ4 implementation with reference one to have faster decompression. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) -- Implemented MSD radix sort (based on kxsort), and partial sorting. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) - -#### Bug Fixes {#bug-fixes} - -- Fix push require columns with join [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed bug, when ClickHouse is run by systemd, the command `sudo service clickhouse-server forcerestart` was not working as expected. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) -- Fix http error codes in DataPartsExchange (interserver http server on 9009 port always returned code 200, even on errors). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) -- Fix SimpleAggregateFunction for String longer than MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) -- Fix error for `Decimal` to `Nullable(Decimal)` conversion in IN. Support other Decimal to Decimal conversions (including different scales). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed FPU clobbering in simdjson library that lead to wrong calculation of `uniqHLL` and `uniqCombined` aggregate function and math functions such as `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed handling mixed const/nonconst cases in JSON functions. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix `retention` function. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) -- Fix result type for `quantileExact` with Decimals. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Documentation {#documentation} - -- Translate documentation for `CollapsingMergeTree` to chinese. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) -- Translate some documentation about table engines to chinese. - [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) - [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) - ([never lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements} - -- Fix some sanitizer reports that show probable use-after-free.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) -- Move performance tests out of separate directories for convenience. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix incorrect performance tests. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) -- Added a tool to calculate checksums caused by bit flips to debug hardware issues. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make runner script more usable. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) -- Add small instruction how to write performance tests. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) -- Add ability to make substitutions in create, fill and drop query in performance tests [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) - -## ClickHouse release 19.7 {#clickhouse-release-19-7} - -### ClickHouse release 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} - -#### Bug Fix {#bug-fix-25} - -- Fix performance regression in some queries with JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) - -### ClickHouse release 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} - -#### New features {#new-features-1} - -- Added bitmap related functions `bitmapHasAny` and `bitmapHasAll` analogous to `hasAny` and `hasAll` functions for arrays. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) - -#### Bug Fixes {#bug-fixes-1} - -- Fix segfault on `minmax` INDEX with Null value. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) -- Mark all input columns in LIMIT BY as required output. It fixes ‘Not found column’ error in some distributed queries. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) -- Fix “Column ‘0’ already exists” error in `SELECT .. PREWHERE` on column with DEFAULT [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) -- Fix `ALTER MODIFY TTL` query on `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) -- Don’t crash the server when Kafka consumers have failed to start. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) -- Fixed bitmap functions produce wrong result. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) -- Fix element\_count for hashed dictionary (do not include duplicates) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) -- Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Do not try to convert integers in `dictGetT` functions, because it doesn’t work correctly. Throw an exception instead. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix settings in ExternalData HTTP request. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila - Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Fix segmentation fault in `bitmapHasAny` function. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn’t raise an exception if provided index does not exist. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that - did not process it, but already get list of children, will terminate the DDLWorker thread. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix INSERT into Distributed() table with MATERIALIZED column. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) - -### ClickHouse release 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} - -#### New Features {#new-features-2} - -- Allow to limit the range of a setting that can be specified by user. - These constraints can be set up in user settings profile. - [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Add a second version of the function `groupUniqArray` with an optional - `max_size` parameter that limits the size of the resulting array. This - behavior is similar to `groupArray(max_size)(x)` function. - [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- For TSVWithNames/CSVWithNames input file formats, column order can now be - determined from file header. This is controlled by - `input_format_with_names_use_header` parameter. - [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) - ([Alexander](https://github.com/Akazz)) - -#### Bug Fixes {#bug-fixes-2} - -- Crash with uncompressed\_cache + JOIN during merge (\#5197) - [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila - Kutenin](https://github.com/danlark1)) -- Segmentation fault on a clickhouse-client query to system tables. \#5066 - [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) - ([Ivan](https://github.com/abyss7)) -- Data loss on heavy load via KafkaEngine (\#4736) - [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) - ([Ivan](https://github.com/abyss7)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Performance Improvements {#performance-improvements-1} - -- Use radix sort for sorting by single numeric column in `ORDER BY` without - `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), - [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) - ([Evgenii Pravda](https://github.com/kvinty), - [alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Documentation {#documentation-1} - -- Translate documentation for some table engines to Chinese. - [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), - [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), - [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) - ([张风啸](https://github.com/AlexZFX)), - [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([never - lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-1} - -- Print UTF-8 characters properly in `clickhouse-test`. - [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add command line parameter for clickhouse-client to always load suggestion - data. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Resolve some of PVS-Studio warnings. - [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Update LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila - Kutenin](https://github.com/danlark1)) -- Add gperf to build requirements for upcoming pull request \#5030. - [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) - ([proller](https://github.com/proller)) - -## ClickHouse release 19.6 {#clickhouse-release-19-6} - -### ClickHouse release 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} - -#### Bug Fixes {#bug-fixes-3} - -- Fixed IN condition pushdown for queries from table functions `mysql` and `odbc` and corresponding table engines. This fixes \#3540 and \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix deadlock in Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) -- Allow quoted decimals in CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) -- Disallow conversion from float Inf/NaN into Decimals (throw exception). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix data race in rename query. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Winter Zhang](https://github.com/zhang2014)) -- Temporarily disable LFAlloc. Usage of LFAlloc might lead to a lot of MAP\_FAILED in allocating UncompressedCache and in a result to crashes of queries at high loaded servers. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) - -### ClickHouse release 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} - -#### New Features {#new-features-3} - -- TTL expressions for columns and tables. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) -- Added support for `brotli` compression for HTTP responses (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) -- Added new function `isValidUTF8` for checking whether a set of bytes is correctly utf-8 encoded. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) -- Add new load balancing policy `first_or_random` which sends queries to the first specified host and if it’s inaccessible send queries to random hosts of shard. Useful for cross-replication topology setups. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) - -#### Experimental Features {#experimental-features-1} - -- Add setting `index_granularity_bytes` (adaptive index granularity) for MergeTree\* tables family. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-1} - -- Added support for non-constant and negative size and length arguments for function `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable push-down to right table in left join, left table in right join, and both tables in full join. This fixes wrong JOIN results in some cases. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) -- `clickhouse-copier`: auto upload task configuration from `--task-file` option [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) -- Added typos handler for storage factory and table functions factory. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) -- Support asterisks and qualified asterisks for multiple joins without subqueries [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) -- Make missing column error message more user friendly. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvements {#performance-improvements-2} - -- Significant speedup of ASOF JOIN [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) - -#### Backward Incompatible Changes {#backward-incompatible-changes} - -- HTTP header `Query-Id` was renamed to `X-ClickHouse-Query-Id` for consistency. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) - -#### Bug Fixes {#bug-fixes-4} - -- Fixed potential null pointer dereference in `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) -- Fixed error on query with JOIN + ARRAY JOIN [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hanging on start of the server when a dictionary depends on another dictionary via a database with engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) -- Partially fix distributed\_product\_mode = local. It’s possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There’s not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix potentially wrong result for `SELECT DISTINCT` with `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-2} - -- Fixed test failures when running clickhouse-server on different host [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) -- clickhouse-test: Disable color control sequences in non tty environment. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) -- clickhouse-test: Allow use any test database (remove `test.` qualification where it possible) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) -- Fix ubsan errors [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) -- Yandex LFAlloc was added to ClickHouse to allocate MarkCache and UncompressedCache data in different ways to catch segfaults more reliable [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) -- Python util to help with backports and changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.5 {#clickhouse-release-19-5} - -### ClickHouse release 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} - -#### Bug fixes {#bug-fixes-5} - -- Fixed possible crash in bitmap\* functions [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. This error happened if LowCardinality column was the part of primary key. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Modification of retention function: If a row satisfies both the first and NTH condition, only the first satisfied condition is added to the data state. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) - -### ClickHouse release 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} - -#### Bug fixes {#bug-fixes-6} - -- Fixed type of setting `max_partitions_per_insert_block` from boolean to UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) - -### ClickHouse release 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} - -#### New Features {#new-features-4} - -- [Hyperscan](https://github.com/intel/hyperscan) multiple regular expression matching was added (functions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) -- `multiSearchFirstPosition` function was added. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) -- Implement the predefined expression filter per row for tables. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) -- A new type of data skipping indices based on bloom filters (can be used for `equal`, `in` and `like` functions). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added `ASOF JOIN` which allows to run queries that join to the most recent value known. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) -- Rewrite multiple `COMMA JOIN` to `CROSS JOIN`. Then rewrite them to `INNER JOIN` if possible. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-9} - -- `topK` and `topKWeighted` now supports custom `loadFactor` (fixes issue [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) -- Allow to use `parallel_replicas_count > 1` even for tables without sampling (the setting is simply ignored for them). In previous versions it was lead to exception. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) -- Support for `CREATE OR REPLACE VIEW`. Allow to create a view or set a new definition in a single statement. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) -- `Buffer` table engine now supports `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Add ability to start replicated table without metadata in zookeeper in `readonly` mode. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) -- Fixed flicker of progress bar in clickhouse-client. The issue was most noticeable when using `FORMAT Null` with streaming queries. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to disable functions with `hyperscan` library on per user basis to limit potentially excessive and uncontrolled resource usage. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add version number logging in all errors. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) -- Added restriction to the `multiMatch` functions which requires string size to fit into `unsigned int`. Also added the number of arguments limit to the `multiSearch` functions. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) -- Improved usage of scratch space and error handling in Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) -- Fill `system.graphite_detentions` from a table config of `*GraphiteMergeTree` engine tables. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Rename `trigramDistance` function to `ngramDistance` and add more functions with `CaseInsensitive` and `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) -- Improved data skipping indices calculation. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -#### Bug Fix {#bug-fix-26} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix crash of `JOIN` on not-nullable vs nullable column. Fix `NULLs` in right keys in `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix incorrect result in `FULL/RIGHT JOIN` with const column. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix duplicates in `GLOBAL JOIN` with asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong name qualification in `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-8} - -- Rename setting `insert_sample_with_metadata` to setting `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) -- Added setting `max_partitions_per_insert_block` (with value 100 by default). If inserted block contains larger number of partitions, an exception is thrown. Set it to 0 if you want to remove the limit (not recommended). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multi-search functions were renamed (`multiPosition` to `multiSearchAllPositions`, `multiSearch` to `multiSearchAny`, `firstMatch` to `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) - -#### Performance Improvement {#performance-improvement-6} - -- Optimize Volnitsky searcher by inlining, giving about 5-10% search improvement for queries with many needles or many similar bigrams. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) -- Fix performance issue when setting `use_uncompressed_cache` is greater than zero, which appeared when all read data contained in cache. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-10} - -- Hardening debug build: more granular memory mappings and ASLR; add memory protection for mark cache and index. This allows to find more memory stomping bugs in case when ASan and MSan cannot do it. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add support for cmake variables `ENABLE_PROTOBUF`, `ENABLE_PARQUET` and `ENABLE_BROTLI` which allows to enable/disable the above features (same as we can do for librdkafka, mysql, etc). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) -- Add ability to print process list and stacktraces of all threads if some queries are hung after test run. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) -- Add retries on `Connection loss` error in `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) -- Add freebsd build with vagrant and build with thread sanitizer to packager script. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) -- Now user asked for password for user `'default'` during installation. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) -- Suppress warning in `rdkafka` library. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow ability to build without ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Upgrade contrib boost to 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) -- Disable usage of `mremap` when compiled with Thread Sanitizer. Surprisingly enough, TSan does not intercept `mremap` (though it does intercept `mmap`, `munmap`) that leads to false positives. Fixed TSan report in stateful tests. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test checking using format schema via HTTP interface. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.4 {#clickhouse-release-19-4} - -### ClickHouse release 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} - -#### Bug Fixes {#bug-fixes-7} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-2} - -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -### ClickHouse release 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} - -#### Bug Fixes {#bug-fixes-8} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-11} - -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} - -#### Bug Fixes {#bug-fixes-9} - -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -### ClickHouse release 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} - -#### Bug Fixes {#bug-fixes-10} - -- Fixed remote queries which contain both `LIMIT BY` and `LIMIT`. Previously, if `LIMIT BY` and `LIMIT` were used for remote query, `LIMIT` could happen before `LIMIT BY`, which led to too filtered result. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) - -### ClickHouse release 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} - -#### New Features {#new-features-5} - -- Added full support for `Protobuf` format (input and output, nested data structures). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added bitmap functions with Roaring Bitmaps. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) -- Parquet format support. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) -- N-gram distance was added for fuzzy string comparison. It is similar to q-gram metrics in R language. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) -- Combine rules for graphite rollup from dedicated aggregation and retention patterns. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Added `max_execution_speed` and `max_execution_speed_bytes` to limit resource usage. Added `min_execution_speed_bytes` setting to complement the `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Winter Zhang](https://github.com/zhang2014)) -- Implemented function `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) -- Added functions `arrayEnumerateDenseRanked` and `arrayEnumerateUniqRanked` (it’s like `arrayEnumerateUniq` but allows to fine tune array depth to look inside multidimensional arrays). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-11} - -- This release also contains all bug fixes from 19.3 and 19.1. -- Fixed bug in data skipping indices: order of granules after INSERT was incorrect. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed `set` index for `Nullable` and `LowCardinality` columns. Before it, `set` index with `Nullable` or `LowCardinality` column led to error `Data type must be deserialized with multiple streams` while selecting. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Correctly set update\_time on full `executable` dictionary update. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) -- Fix broken progress bar in 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) -- Fixed inconsistent values of MemoryTracker when memory region was shrinked, in certain cases. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed undefined behaviour in ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a very rare crash with the message `mutex lock failed: Invalid argument` that could happen when a MergeTree table was dropped concurrently with a SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) -- ODBC driver compatibility with `LowCardinality` data type. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) -- FreeBSD: Fixup for `AIOcontextPool: Found io_event with unknown id 0` error. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `system.part_log` table was created regardless to configuration. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behaviour in `dictIsIn` function for cache dictionaries. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) -- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) -- Disable compile\_expressions by default until we get own `llvm` contrib and can test it with `clang` and `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) -- Prevent `std::terminate` when `invalidate_query` for `clickhouse` external dictionary source has returned wrong resultset (empty or more than one row or more than one column). Fixed issue when the `invalidate_query` was performed every five seconds regardless to the `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock when the `invalidate_query` for a dictionary with `clickhouse` source was involving `system.dictionaries` table or `Dictionaries` database (rare case). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes for CROSS JOIN with empty WHERE. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segfault in function “replicate” when constant argument is passed. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix lambda function with predicate optimizer. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Winter Zhang](https://github.com/zhang2014)) -- Multiple JOINs multiple fixes. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvements {#improvements-3} - -- Support aliases in JOIN ON section for right table columns. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) -- Result of multiple JOINs need correct result names to be used in subselects. Replace flat aliases with source names in result. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) -- Improve push-down logic for joined statements. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) - -#### Performance Improvements {#performance-improvements-3} - -- Improved heuristics of “move to PREWHERE” optimization. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Use proper lookup tables that uses HashTable’s API for 8-bit and 16-bit keys. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) -- Improved performance of string comparison. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Cleanup distributed DDL queue in a separate thread so that it doesn’t slow down the main loop that processes distributed DDL tasks. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) -- When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O\_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-12} - -- Added support for clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong `__asm__` instructions (again) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) -- Add ability to specify settings for `clickhouse-performance-test` from command line. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) -- Add dictionaries tests to integration tests. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) -- Added queries from the benchmark on the website to automated performance tests. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `xxhash.h` does not exist in external lz4 because it is an implementation detail and its symbols are namespaced with `XXH_NAMESPACE` macro. When lz4 is external, xxHash has to be external too, and the dependents have to link to it. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) -- Fixed a case when `quantileTiming` aggregate function can be called with negative or floating point argument (this fixes fuzz test with undefined behaviour sanitizer). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Spelling error correction. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) -- Fix compilation on Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) -- Build fixes for FreeBSD and various unusual build configurations. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) - -## ClickHouse release 19.3 {#clickhouse-release-19-3} - -### ClickHouse release 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} - -#### Bug Fixes {#bug-fixes-12} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-13} - -- Add a way to launch clickhouse-server image from a custom user [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} - -#### Bug fixes {#bug-fixes-13} - -- Fixed error in \#3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} - -#### Bug fixes {#bug-fixes-14} - -- When there are more than 1000 threads in a thread pool, `std::terminate` may happen on thread exit. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now it’s possible to create `ReplicatedMergeTree*` tables with comments on columns without defaults and tables with columns codecs without comments and defaults. Also fix comparison of codecs. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) -- Fixed crash on JOIN with array or tuple. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in clickhouse-copier with the message `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hangup on server shutdown if distributed DDLs were used. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) -- Incorrect column numbers were printed in error message about text format parsing for columns with number greater than 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-3} - -- Fixed build with AVX enabled. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Enable extended accounting and IO accounting based on good known version instead of kernel under which it is compiled. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) -- Allow to skip setting of core\_dump.size\_limit, warning instead of throw if limit set fail. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) -- Removed the `inline` tags of `void readBinary(...)` in `Field.cpp`. Also merged redundant `namespace DB` blocks. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) - -### ClickHouse release 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} - -#### Bug fixes {#bug-fixes-15} - -- Fixed bug with large http insert queries processing. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} - -#### Improvements {#improvements-4} - -- Table index size is not accounted for memory limits when doing `ATTACH TABLE` query. Avoided the possibility that a table cannot be attached after being detached. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Slightly raised up the limit on max string and array size received from ZooKeeper. It allows to continue to work with increased size of `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` on ZooKeeper. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to repair abandoned replica even if it already has huge number of nodes in its queue. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add one required argument to `SET` index (max stored rows number). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Bug Fixes {#bug-fixes-16} - -- Fixed `WITH ROLLUP` result for group by single `LowCardinality` key. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bug in the set index (dropping a granule if it contains more than `max_rows` rows). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) -- A lot of FreeBSD build fixes. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) -- Fixed aliases substitution in queries with subquery containing same alias (issue [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-4} - -- Add ability to run `clickhouse-server` for stateless tests in docker image. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) - -### ClickHouse release 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} - -#### New Features {#new-features-6} - -- Added the `KILL MUTATION` statement that allows removing mutations that are for some reasons stuck. Added `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` fields to the `system.mutations` table for easier troubleshooting. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added aggregate function `entropy` which computes Shannon entropy. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) -- Added ability to send queries `INSERT INTO tbl VALUES (....` to server without splitting on `query` and `data` parts. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) -- Generic implementation of `arrayWithConstant` function was added. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented `NOT BETWEEN` comparison operator. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) -- Implement `sumMapFiltered` in order to be able to limit the number of keys for which values will be summed by `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Added support of `Nullable` types in `mysql` table function. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) -- Support for arbitrary constant expressions in `LIMIT` clause. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) -- Added `topKWeighted` aggregate function that takes additional argument with (unsigned integer) weight. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) -- `StorageJoin` now supports `join_any_take_last_row` setting that allows overwriting existing values of the same key. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Bird](https://github.com/amosbird) -- Added function `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `RowBinaryWithNamesAndTypes` format. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) -- Added `IPv4` and `IPv6` data types. More effective implementations of `IPv*` functions. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) -- Added function `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `Protobuf` output format. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added brotli support for HTTP interface for data import (INSERTs). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) -- Added hints while user make typo in function name or type in command line client. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) -- Added `Query-Id` to Server’s HTTP Response header. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) - -#### Experimental features {#experimental-features-2} - -- Added `minmax` and `set` data skipping indices for MergeTree table engines family. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added conversion of `CROSS JOIN` to `INNER JOIN` if possible. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-17} - -- Fixed `Not found column` for duplicate columns in `JOIN ON` section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. This bug was appeared in 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segmentation fault with `use_uncompressed_cache=1` and exception with wrong uncompressed size. This bug was appeared in 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed `Illegal instruction` error when using base64 functions on old CPUs. This error has been reproduced only when ClickHouse was compiled with gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse dictionaries now load within `clickhouse` process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Added `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Fixed segfault with `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare race condition when removing of old data parts can fail with `File not found` error. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-5} - -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Various build fixes for FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) -- Added ability to create, fill and drop tables in perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) -- Added a script to check for duplicate includes. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added ability to run queries by index in performance test. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) -- Package with debug symbols is suggested to be installed. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Refactoring of performance-test. Better logging and signals handling. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) -- Added docs to anonymized Yandex.Metrika datasets. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) -- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added docs about two datasets in s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) -- Added script which creates changelog from pull requests description. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added puppet module for Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) -- Added docs for a group of undocumented functions. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) -- ARM build fixes. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) -- Dictionary tests now able to run from `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added checking SSE and AVX instruction at start. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) -- Init script will wait server until start. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-1} - -- Removed `allow_experimental_low_cardinality_type` setting. `LowCardinality` data types are production ready. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Reduce mark cache size and uncompressed cache size accordingly to available memory amount. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) -- Added keyword `INDEX` in `CREATE TABLE` query. A column with name `index` must be quoted with backticks or double quotes: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- `sumMap` now promote result type instead of overflow. The old `sumMap` behavior can be obtained by using `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - -#### Performance Improvements {#performance-improvements-4} - -- `std::sort` replaced by `pdqsort` for queries without `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) -- Now server reuse threads from global thread pool. This affects performance in some corner cases. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-5} - -- Implemented AIO support for FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `SELECT * FROM a JOIN b USING a, b` now return `a` and `b` columns only from the left table. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) -- Allow `-C` option of client to work as `-c` option. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) -- Now option `--password` used without value requires password from stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) -- Added highlighting of unescaped metacharacters in string literals that contain `LIKE` expressions or regexps. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added cancelling of HTTP read only queries if client socket goes away. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) -- Now server reports progress to keep client connections alive. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) -- Slightly better message with reason for OPTIMIZE query with `optimize_throw_if_noop` setting enabled. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support of `--version` option for clickhouse server. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) -- Added `--help/-h` option to `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) -- Added support for scalar subqueries with aggregate function state result. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Improved server shutdown time and ALTERs waiting time. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added info about the replicated\_can\_become\_leader setting to system.replicas and add logging if the replica won’t try to become leader. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1} - -### ClickHouse release 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} - -- Fixed error `Column ... queried more than once` that may happen if the setting `asterisk_left_columns_only` is set to 1 in case of using `GLOBAL JOIN` with `SELECT *` (rare case). The issue does not exist in 19.3 and newer. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} - -This release contains exactly the same set of patches as 19.3.7. - -### ClickHouse release 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} - -This release contains exactly the same set of patches as 19.3.6. - -## ClickHouse release 19.1 {#clickhouse-release-19-1-1} - -### ClickHouse release 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} - -#### Bug fixes {#bug-fixes-18} - -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} - -#### Bug Fixes {#bug-fixes-19} - -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1-2} - -### ClickHouse release 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} - -#### Bug Fixes {#bug-fixes-20} - -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed segmentation fault with `uncompressed_cache=1` and exception with wrong uncompressed size. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed `Not found column` for duplicate columns in JOIN ON section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} - -#### New Features {#new-features-7} - -- Custom per column compression codecs for tables. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Winter Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) -- Added compression codec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) -- Allow to `ALTER` compression codecs. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) -- Added functions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` for SQL standard compatibility. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) -- Support for write in `HDFS` tables and `hdfs` table function. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) -- Added functions to search for multiple constant strings from big haystack: `multiPosition`, `multiSearch` ,`firstMatch` also with `-UTF8`, `-CaseInsensitive`, and `-CaseInsensitiveUTF8` variants. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) -- Pruning of unused shards if `SELECT` query filters by sharding key (setting `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) -- Allow `Kafka` engine to ignore some number of parsing errors per block. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) -- Added support for `CatBoost` multiclass models evaluation. Function `modelEvaluate` returns tuple with per-class raw predictions for multiclass models. `libcatboostmodel.so` should be built with [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added functions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) -- Added hashing functions `xxHash64` and `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) -- Added `gccMurmurHash` hashing function (GCC flavoured Murmur hash) which uses the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) -- Added hashing functions `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) -- Added table function `remoteSecure`. Function works as `remote`, but uses secure connection. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) - -#### Experimental features {#experimental-features-3} - -- Added multiple JOINs emulation (`allow_experimental_multiple_joins_emulation` setting). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-21} - -- Make `compiled_expression_cache_size` setting limited by default to lower memory consumption. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) -- Fix a bug that led to hangups in threads that perform ALTERs of Replicated tables and in the thread that updates configuration from ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed a race condition when executing a distributed ALTER task. The race condition led to more than one replica trying to execute the task and all replicas except one failing with a ZooKeeper error. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix a bug when `from_zk` config elements weren’t refreshed after a request to ZooKeeper timed out. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with wrong prefix for IPv4 subnet masks. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) -- Fixed crash (`std::terminate`) in rare cases when a new thread cannot be created due to exhausted resources. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug when in `remote` table function execution when wrong restrictions were used for in `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) -- Fix a leak of netlink sockets. They were placed in a pool where they were never deleted and new sockets were created at the start of a new thread when all current sockets were in use. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with closing `/proc/self/fd` directory earlier than all fds were read from `/proc` after forking `odbc-bridge` subprocess. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) -- Fixed String to UInt monotonic conversion in case of usage String in primary key. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in calculation of integer conversion function monotonicity. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segfault in `arrayEnumerateUniq`, `arrayEnumerateDense` functions in case of some invalid arguments. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix UB in StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Bird](https://github.com/amosbird)) -- Fixed segfault in functions `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: functions `round`, `floor`, `trunc`, `ceil` may return bogus result when executed on integer argument and large negative scale. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug induced by ‘kill query sync’ which leads to a core dump. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) -- Fix bug with long delay after empty replication queue. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) -- Fixed excessive memory usage in case of inserting into table with `LowCardinality` primary key. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed `LowCardinality` serialization for `Native` format in case of empty arrays. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed incorrect result while using distinct by single LowCardinality numeric column. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed specialized aggregation with LowCardinality key (in case when `compile` setting is enabled). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fix user and password forwarding for replicated tables queries. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) -- Fixed very rare race condition that can happen when listing tables in Dictionary database while reloading dictionaries. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when HAVING was used with ROLLUP or CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) -- Fixed column aliases for query with `JOIN ON` syntax and distributed tables. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in internal implementation of `quantileTDigest` (found by Artem Vakhrushev). This error never happens in ClickHouse and was relevant only for those who use ClickHouse codebase as a library directly. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-6} - -- Support for `IF NOT EXISTS` in `ALTER TABLE ADD COLUMN` statements along with `IF EXISTS` in `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) -- Function `parseDateTimeBestEffort`: support for formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` and similar. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CapnProtoInputStream` now support jagged structures. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) -- Usability improvement: added a check that server process is started from the data directory’s owner. Do not allow to start server from root if the data belongs to non-root user. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) -- Better logic of checking required columns during analysis of queries with JOINs. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) -- Decreased the number of connections in case of large number of Distributed tables in a single server. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Winter Zhang](https://github.com/zhang2014)) -- Supported totals row for `WITH TOTALS` query for ODBC driver. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) -- Allowed to use `Enum`s as integers inside if function. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) -- Added `low_cardinality_allow_in_native_format` setting. If disabled, do not use `LowCadrinality` type in `Native` format. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Removed some redundant objects from compiled expressions cache to lower memory usage. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) -- Add check that `SET send_logs_level = 'value'` query accept appropriate value. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) -- Fixed data type check in type conversion functions. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Winter Zhang](https://github.com/zhang2014)) - -#### Performance Improvements {#performance-improvements-5} - -- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn’t support it. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) -- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn’t contain time. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Performance improvement for integer numbers serialization. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) -- Zero left padding PODArray so that -1 element is always valid and zeroed. It’s used for branchless calculation of offsets. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) -- Reverted `jemalloc` version which lead to performance degradation. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-2} - -- Removed undocumented feature `ALTER MODIFY PRIMARY KEY` because it was superseded by the `ALTER MODIFY ORDER BY` command. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) -- Removed function `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Forbid using scalar subqueries with result of type `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-6} - -- Added support for PowerPC (`ppc64le`) build. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) -- Stateful functional tests are run on public available dataset. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when the server cannot start with the `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message within Docker or systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Updated `rdkafka` library to v1.0.0-RC5. Used cppkafka instead of raw C interface. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) -- Updated `mariadb-client` library. Fixed one of issues found by UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Some fixes for UBSan builds. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added per-commit runs of tests with UBSan build. -- Added per-commit runs of PVS-Studio static analyzer. -- Fixed bugs found by PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed glibc compatibility issues. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move Docker images to 18.10 and add compatibility file for glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) -- Add env variable if user don’t want to chown directories in server Docker image. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) -- Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a few more warnings that are available only in clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) -- Added sanitizer variables for test images. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) -- `clickhouse-server` debian package will recommend `libcap2-bin` package to use `setcap` tool for setting capabilities. This is optional. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Improved compilation time, fixed includes. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) -- Added performance tests for hash functions. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) -- Fixed cyclic library dependences. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) -- Improved compilation with low available memory. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) -- Added test script to reproduce performance degradation in `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed misspells in comments and string literals under `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) -- Fixed typos in comments. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) - -## [Changelog for 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md deleted file mode 120000 index 79b747aee1b..00000000000 --- a/docs/zh/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md new file mode 100644 index 00000000000..90bb7abe0b0 --- /dev/null +++ b/docs/zh/changelog/index.md @@ -0,0 +1,665 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + +## 碌莽禄release拢.0755-88888888 {#clickhouse-release-v20-3} + +### ClickHouse版本v20.3.4.10,2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### 错误修复 {#bug-fix} + +- 此版本还包含20.1.8.41的所有错误修复 +- 修复丢失 `rows_before_limit_at_least` 用于通过http进行查询(使用处理器管道)。 这修复 [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### ClickHouse释放v20.3.3.6,2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### 错误修复 {#bug-fix-1} + +- 此版本还包含20.1.7.38的所有错误修复 +- 修复复制中的错误,如果用户在以前的版本上执行了突变,则不允许复制工作。 这修复 [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 +- 添加设置 `use_compact_format_in_distributed_parts_names` 它允许写文件 `INSERT` 查询到 `Distributed` 表格格式更紧凑。 这修复 [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 + +### ClickHouse版本v20.3.2.1,2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 修正了这个问题 `file name too long` 当发送数据 `Distributed` 大量副本的表。 修复了服务器日志中显示副本凭据的问题。 磁盘上的目录名格式已更改为 `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([米哈伊尔\*科罗托夫](https://github.com/millb))升级到新版本后,您将无法在没有人工干预的情况下降级,因为旧的服务器版本无法识别新的目录格式。 如果要降级,则必须手动将相应的目录重命名为旧格式。 仅当您使用了异步时,此更改才相关 `INSERT`s到 `Distributed` 桌子 在版本20.3.3中,我们将介绍一个设置,让您逐渐启用新格式。 +- 更改了mutation命令的复制日志条目的格式。 在安装新版本之前,您必须等待旧的突变处理。 +- 实现简单的内存分析器,将堆栈跟踪转储到 `system.trace_log` 超过软分配限制的每N个字节 [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([伊万](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))列 `system.trace_log` 从改名 `timer_type` 到 `trace_type`. 这将需要改变第三方性能分析和flamegraph处理工具。 +- 在任何地方使用操作系统线程id,而不是内部线程编号。 这修复 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) 老 `clickhouse-client` 无法接收从服务器发送的日志,当设置 `send_logs_level` 已启用,因为结构化日志消息的名称和类型已更改。 另一方面,不同的服务器版本可以相互发送不同类型的日志。 当你不使用 `send_logs_level` 设置,你不应该关心。 [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `indexHint` 功能 [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `findClusterIndex`, `findClusterValue` 功能。 这修复 [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). 如果您正在使用这些功能,请发送电子邮件至 `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在不允许创建列或添加列 `SELECT` 子查询作为默认表达式。 [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([阿利沙平](https://github.com/alesapin)) +- 需要联接中的子查询的别名。 [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([阿利沙平](https://github.com/alesapin)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 设置 `experimental_use_processors` 默认情况下启用。 此设置允许使用新的查询管道。 这是内部重构,我们期望没有明显的变化。 如果您将看到任何问题,请将其设置为返回零。 [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 新功能 {#new-feature} + +- 添加 `Avro` 和 `AvroConfluent` 输入/输出格式 [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 过期密钥的多线程和非阻塞更新 `cache` 字典(可选的权限读取旧的)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加查询 `ALTER ... MATERIALIZE TTL`. 它运行突变,强制通过TTL删除过期的数据,并重新计算所有部分有关ttl的元信息。 [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果需要,从HashJoin切换到MergeJoin(在磁盘上 [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `MOVE PARTITION` 命令 `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 动态地从配置文件重新加载存储配置。 [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许更改 `storage_policy` 为了不那么富有的人。 [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了对s3存储和表功能的globs/通配符的支持。 [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 执行 `bitAnd`, `bitOr`, `bitXor`, `bitNot` 为 `FixedString(N)` 数据类型。 [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加功能 `bitCount`. 这修复 [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +- 添加 `generateRandom` 表函数生成具有给定模式的随机行。 允许用数据填充任意测试表。 [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +- `JSONEachRowFormat`:当对象包含在顶层数组中时,支持特殊情况。 [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([克鲁格洛夫\*帕维尔](https://github.com/Avogar)) +- 现在可以创建一个列 `DEFAULT` 取决于默认列的表达式 `ALIAS` 表达。 [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([阿利沙平](https://github.com/alesapin)) +- 允许指定 `--limit` 超过源数据大小 `clickhouse-obfuscator`. 数据将以不同的随机种子重复。 [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `groupArraySample` 功能(类似于 `groupArray`)与reservior采样算法。 [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在,您可以监视更新队列的大小 `cache`/`complex_key_cache` 通过系统指标字典。 [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 允许使用CRLF作为CSV输出格式的行分隔符与设置 `output_format_csv_crlf_end_of_line` 设置为1 [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 实现的更多功能 [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` 和 `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +- 引入新设置: `max_parser_depth` 控制最大堆栈大小并允许大型复杂查询。 这修复 [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) 和 [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([马克西姆\*斯米尔诺夫](https://github.com/qMBQx8GH)) +- 添加设置 `force_optimize_skip_unused_shards` 如果无法跳过未使用的分片,则设置为抛出 [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- 允许配置多个磁盘/卷用于存储数据发送 `Distributed` 发动机 [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- 支持存储策略 (``)用于存储临时数据。 [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- 已添加 `X-ClickHouse-Exception-Code` 如果在发送数据之前引发异常,则设置的HTTP头。 这实现了 [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `ifNotFinite`. 这只是一个句法糖: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `last_successful_update_time` 列中 `system.dictionaries` 表 [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加 `blockSerializedSize` 功能(磁盘大小不压缩) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- 添加功能 `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- 添加系统表 `system.zeros` 和 `system.zeros_mt` 以及故事功能 `zeros()` 和 `zeros_mt()`. 表(和表函数)包含具有名称的单列 `zero` 和类型 `UInt8`. 此列包含零。 为了测试目的,需要它作为生成许多行的最快方法。 这修复 [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 实验特点 {#experimental-feature} + +- 添加新的紧凑格式的部件 `MergeTree`-家庭表中的所有列都存储在一个文件中。 它有助于提高小型和频繁插入的性能。 旧的格式(每列一个文件)现在被称为wide。 数据存储格式由设置控制 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([安东\*波波夫](https://github.com/CurtizJ)) +- 支持S3存储 `Log`, `TinyLog` 和 `StripeLog` 桌子 [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([帕维尔\*科瓦连科](https://github.com/Jokser)) + +#### 错误修复 {#bug-fix-2} + +- 修正了日志消息中不一致的空格。 [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在创建表时将未命名元组数组展平为嵌套结构的错误。 [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +- 修复了以下问题 “Too many open files” 如果有太多的文件匹配glob模式可能会发生错误 `File` 表或 `file` 表功能。 现在文件懒洋洋地打开。 这修复 [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除临时表现在只删除临时表。 [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 当我们关闭服务器或分离/附加表时删除过时的分区。 [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 默认磁盘如何计算可用空间 `data` 子目录。 修复了可用空间量计算不正确的问题,如果 `data` 目录被安装到一个单独的设备(罕见的情况)。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 允许逗号(交叉)与IN()内部连接。 [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果在WHERE部分中有\[NOT\]LIKE运算符,则允许将CROSS重写为INNER JOIN。 [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复后可能不正确的结果 `GROUP BY` 启用设置 `distributed_aggregation_memory_efficient`. 修复 [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 找到的键在缓存字典的指标中被计为错过。 [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 在固定的竞争条件 `queue_task_handle` 在启动 `ReplicatedMergeTree` 桌子 [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 令牌 `NOT` 没有工作 `SHOW TABLES NOT LIKE` 查询 [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加范围检查功能 `h3EdgeLengthM`. 如果没有这个检查,缓冲区溢出是可能的。 [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修复PREWHERE优化的错误,这可能导致段错误或 `Inconsistent number of columns got from MergeTreeRangeReader` 例外。 [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复意外 `Timeout exceeded while reading from socket` 异常,在实际超时之前以及启用查询探查器时,在安全连接上随机发生。 还添加 `connect_timeout_with_failover_secure_ms` 设置(默认100ms),这是类似于 `connect_timeout_with_failover_ms`,但用于安全连接(因为SSL握手比普通TCP连接慢) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- 修复突变最终确定的错误,当突变可能处于以下状态时 `parts_to_do=0` 和 `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([阿利沙平](https://github.com/alesapin)) +- 使用新的任何连接逻辑 `partial_merge_join` 设置。 有可能使 `ANY|ALL|SEMI LEFT` 和 `ALL INNER` 加入与 `partial_merge_join=1` 现在 [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- Shard现在将从发起者获得的设置夹到shard的constaints,而不是抛出异常。 此修补程序允许将查询发送到具有另一个约束的分片。 [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修正了内存管理问题 `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复 `toDecimal*OrNull()` 使用字符串调用时的函数系列 `e`. 修复 [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- 请确保 `FORMAT Null` 不向客户端发送数据。 [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 修复时间戳中的错误 `LiveViewBlockInputStream` 不会更新。 `LIVE VIEW` 是一个实验特征。 [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- 固定 `ALTER MODIFY TTL` 不允许删除旧ttl表达式的错误行为。 [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了MergeTreeIndexSet中的UBSan报告。 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从apache Avro第三方库中的析构函数抛出。 [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 不要提交从轮询的批次 `Kafka` 部分,因为它可能会导致数据漏洞。 [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +- 修复 `joinGet` 使用可为空的返回类型。 https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复压缩时的数据不兼容 `T64` 编解ec [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2))修复数据类型id `T64` 在受影响的版本中导致错误(de)压缩的压缩编解ec。 [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `enable_early_constant_folding` 并禁用它在某些情况下,导致错误。 [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用VIEW修复下推谓词优化器并启用测试 [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([张冬](https://github.com/zhang2014)) +- 修复段错误 `Merge` 表,从读取时可能发生 `File` 储存 [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. 否则,它可以使部分数据重新启动后无法访问,并阻止ClickHouse启动。 [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复改变,如果有TTL设置表。 [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在以下情况下可能发生的竞争条件 `SYSTEM RELOAD ALL DICTIONARIES` 在某些字典被修改/添加/删除时执行。 [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在以前的版本 `Memory` 数据库引擎使用空数据路径,因此在以下位置创建表 `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- 修复了关于缺少默认磁盘或策略的错误日志消息。 [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 允许表中的第一列 `Log` 引擎是别名 [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([伊万](https://github.com/abyss7)) +- 从读取时修复范围的顺序 `MergeTree` 表中的一个线程。 它可能会导致例外 `MergeTreeRangeReader` 或错误的查询结果。 [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([安东\*波波夫](https://github.com/CurtizJ)) +- 赂眉露\>\> `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 避免极少数情况下,当用户可以得到错误的错误消息 (`Success` 而不是详细的错误描述)。 [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时不要崩溃 `Template` 使用空行模板格式化。 [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 系统表的元数据文件可能在错误的位置创建 [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix))修复 [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- 修复缓存字典中exception\_ptr上的数据竞赛 [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 不要为查询引发异常 `ATTACH TABLE IF NOT EXISTS`. 以前它是抛出,如果表已经存在,尽管 `IF NOT EXISTS` 条款 [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了异常消息中丢失的关闭paren。 [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免消息 `Possible deadlock avoided` 在clickhouse客户端在交互模式下启动。 [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +- 在固定的异常 `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 支持在JSON相关函数中不适合Int64的UInt64号码。 更新SIMDJSON掌握。 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当使用非严格单调函数索引时,固定执行反转谓词。 [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 不要试图折叠 `IN` 常量在 `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复bug `ALTER DELETE` 突变导致索引损坏。 这修复 [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) 和 [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). 另外修复极其罕见的竞争条件 `ReplicatedMergeTree` `ALTER` 查询。 [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([阿利沙平](https://github.com/alesapin)) +- 当设置 `compile_expressions` 被启用,你可以得到 `unexpected column` 在 `LLVMExecutableFunction` 当我们使用 `Nullable` 类型 [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 多个修复 `Kafka` 引擎:1)修复在消费者组重新平衡期间出现的重复项。 2)修复罕见 ‘holes’ 当数据从一个轮询的几个分区轮询并部分提交时出现(现在我们总是处理/提交整个轮询的消息块)。 3)通过块大小修复刷新(在此之前,只有超时刷新才能正常工作)。 4)更好的订阅程序(与分配反馈)。 5)使测试工作得更快(默认时间间隔和超时)。 由于数据之前没有被块大小刷新(根据文档),pr可能会导致默认设置的性能下降(由于更频繁和更小的刷新不太理想)。 如果您在更改后遇到性能问题-请增加 `kafka_max_block_size` 在表中的更大的值(例如 `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). 修复 [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +- 修复 `Parameter out of bound` 在PREWHERE优化之后的某些查询中出现异常。 [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- 修正了函数参数混合常量的情况 `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修正了双重移动可能会损坏原始部分的错误。 这是相关的,如果你使用 `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许 `interval` 用于正确解析的标识符,而无需反引号。 当一个查询不能被执行,即使固定的问题 `interval` 标识符用反引号或双引号括起来。 这修复 [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试和不正确的行为 `bitTestAll`/`bitTestAny` 功能。 [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 使用enabled编写的部件修复突变 `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([阿利沙平](https://github.com/alesapin)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复运行时显示误导性错误消息的错误 `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +- 固定 `Parameters are out of bound` 例外在一些罕见的情况下,当我们在一个常数 `SELECT` 条款时,我们有一个 `ORDER BY` 和一个 `LIMIT` 条款 [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复突变定稿,当已经完成突变可以有状态 `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([阿利沙平](https://github.com/alesapin)) +- 防止执行 `ALTER ADD INDEX` 对于旧语法的MergeTree表,因为它不起作用。 [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在服务器启动时不要访问表,这 `LIVE VIEW` 取决于,所以服务器将能够启动。 也删除 `LIVE VIEW` 分离时的依赖关系 `LIVE VIEW`. `LIVE VIEW` 是一个实验特征。 [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了一个错误,当部分没有被移动的情况下,只有一个卷的TTL规则在后台。 [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了这个问题 `Method createColumn() is not implemented for data type Set`. 这修复 [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复 `intDiv` 减一个常数 [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- 修复可能的竞争条件 `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +- 增加了解决方法,如果操作系统返回错误的结果 `timer_create` 功能。 [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在使用固定错误 `min_marks_for_seek` 参数。 修复了分布式表中没有分片键时的错误消息,并且我们尝试跳过未使用的分片。 [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- 执行 `ALTER MODIFY/DROP` 对突变的顶部查询 `ReplicatedMergeTree*` 引擎家族. 现在 `ALTERS` 仅在元数据更新阶段阻止,之后不阻止。 [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([阿利沙平](https://github.com/alesapin)) +- 添加重写交叉到内部连接的能力 `WHERE` 包含未编译名称的部分。 [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `SHOW TABLES` 和 `SHOW DATABASES` 查询支持 `WHERE` 表达式和 `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- 添加了一个设置 `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- 在最近的变化之后,MySQL客户端开始以十六进制打印二进制字符串,从而使它们不可读 ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). ClickHouse中的解决方法是将字符串列标记为UTF-8,这并不总是如此,但通常是这种情况。 [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加对字符串和FixedString键的支持 `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- 支持SummingMergeTree地图中的字符串键 [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- 即使线程已抛出异常,也向线程池发送线程终止信号 [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([丁香飞](https://github.com/dingxiangfei2009)) +- 允许设置 `query_id` 在 `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([安东\*波波夫](https://github.com/CurtizJ)) +- 不要让奇怪的表达 `ALTER TABLE ... PARTITION partition` 查询。 这个地址 [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表 `system.table_engines` 现在提供有关功能支持的信息(如 `supports_ttl` 或 `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- 启用 `system.metric_log` 默认情况下。 它将包含具有ProfileEvents值的行,CurrentMetrics收集与 “collect\_interval\_milliseconds” 间隔(默认情况下为一秒)。 该表非常小(通常以兆字节为单位),默认情况下收集此数据是合理的。 [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([伊万](https://github.com/abyss7)) +- 现在是暂时的 `LIVE VIEW` 创建者 `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` 而不是 `CREATE TEMPORARY LIVE VIEW ...`,因为以前的语法不符合 `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- 添加text\_log。级别配置参数,以限制进入 `system.text_log` 表 [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- 允许根据TTL规则将下载的部分放入磁盘/卷 [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 对于外部MySQL字典,允许将MySQL连接池共同化为 “share” 他们在字典中。 此选项显着减少到MySQL服务器的连接数。 [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 显示分位数的最近查询执行时间 `clickhouse-benchmark` 输出而不是插值值。 最好显示与某些查询的执行时间相对应的值。 [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 可以在将数据插入到Kafka时为消息添加密钥和时间戳。 修复 [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +- 如果服务器从终端运行,请按颜色突出显示线程号,查询id和日志优先级。 这是为了提高开发人员相关日志消息的可读性。 [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的异常消息,同时加载表 `Ordinary` 数据库。 [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `arraySlice` 对于具有聚合函数状态的数组。 这修复 [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在in运算符的右侧使用常量函数和常量数组。 [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果在获取系统数据时发生了zookeeper异常。副本,将其显示在单独的列中。 这实现了 [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 原子删除destroy上的MergeTree数据部分。 [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 支持分布式表的行级安全性。 [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([伊万](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在构建大型连接的结果时防止内存不足。 [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- 在交互模式下为建议添加群集名称 `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([伊万](https://github.com/abyss7)) +- 添加列 `exception_code` 在 `system.query_log` 桌子 [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在端口上启用MySQL兼容服务器 `9004` 在默认服务器配置文件中。 在配置的例子固定密码生成命令。 [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 如果文件系统是只读的,请防止在关闭时中止。 这修复 [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当HTTP POST查询中需要长度时,更好的异常消息。 [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `_path` 和 `_file` 虚拟列 `HDFS` 和 `File` 发动机和 `hdfs` 和 `file` 表函数 [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复错误 `Cannot find column` 同时插入到 `MATERIALIZED VIEW` 在情况下,如果新列被添加到视图的内部表。 [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 通过最终更新后发送进度(如日志)修复本机客户端-服务器协议的进度。 这可能仅与使用本机协议的某些第三方工具相关。 [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- 添加系统指标跟踪使用MySQL协议的客户端连接数 ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([尤金\*克里莫夫](https://github.com/Slach)) +- 从现在开始,HTTP响应将有 `X-ClickHouse-Timezone` 标题设置为相同的时区值 `SELECT timezone()` 会报告。 [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### 性能改进 {#performance-improvement} + +- 使用IN提高分析指标的性能 [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([安东\*波波夫](https://github.com/CurtizJ)) +- 逻辑函数+代码清理更简单,更有效的代码。 跟进到 [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 整体性能改善(范围为5%。.通过确保使用C++20功能进行更严格的别名处理,对于受影响的查询来说,这是200%)。 [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([阿莫斯鸟](https://github.com/amosbird)) +- 比较函数的内部循环更严格的别名。 [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于算术函数的内部循环更严格的别名。 [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ColumnVector::replicate()的实现速度快约3倍,通过该实现ColumnConst::convertToFullColumn()。 在实现常数时,也将在测试中有用。 [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 另一个小的性能改进 `ColumnVector::replicate()` (这加快了 `materialize` 函数和高阶函数),甚至进一步改进 [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 改进的性能 `stochasticLinearRegression` 聚合函数。 此补丁由英特尔贡献。 [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提高性能 `reinterpretAsFixedString` 功能。 [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要向客户端发送块 `Null` 处理器管道中的格式。 [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- 替换 `readline` 与 `replxx` 对于在交互式线编辑 `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([伊万](https://github.com/abyss7)) +- 在FunctionsComparison中更好的构建时间和更少的模板实例化。 [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了与集成 `clang-tidy` 在线人 另请参阅 [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们使用CI链接ClickHouse `lld` 即使是 `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([阿利沙平](https://github.com/alesapin)) +- 允许随机线程调度和插入毛刺时 `THREAD_FUZZER_*` 设置环境变量。 这有助于测试。 [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在无状态测试中启用安全套接字 [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- 使SPLIT\_SHARED\_LIBRARIES=OFF更强大 [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> “performance\_introspection\_and\_logging” 测试可靠的随机服务器卡住。 这可能发生在CI环境中。 另请参阅 [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在样式检查中验证XML。 [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了测试中的竞争条件 `00738_lock_for_inner_table`. 这个测试依赖于睡眠。 [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除类型的性能测试 `once`. 这是在统计比较模式下运行所有性能测试(更可靠)所需的。 [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了算术函数的性能测试。 [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试 `sumMap` 和 `sumMapWithOverflow` 聚合函数。 后续行动 [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过样式检查确保错误代码的样式。 [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为测试历史添加脚本。 [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([阿利沙平](https://github.com/alesapin)) +- 添加GCC警告 `-Wsuggest-override` 找到并修复所有地方 `override` 必须使用关键字。 [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 在Mac OS X下忽略弱符号,因为它必须被定义 [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([已删除用户](https://github.com/ghost)) +- 规范性能测试中某些查询的运行时间。 这是在准备在比较模式下运行所有性能测试时完成的。 [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复一些测试,以支持pytest与查询测试 [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([伊万](https://github.com/abyss7)) +- 使用MSan在生成中启用SSL,因此在运行无状态测试时,服务器不会在启动时失败 [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- 修复测试结果中的数据库替换 [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +- 针对其他平台构建修复程序 [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- 将磁盘部分添加到无状态复盖率测试docker映像 [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 使用GRPC构建时,摆脱源代码树中的文件 [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([阿莫斯鸟](https://github.com/amosbird)) +- 通过从上下文中删除SessionCleaner来缩短构建时间。 让SessionCleaner的代码更简单。 [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +- 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +- 修复单元测试中的几个错误。 [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin)) +- 启用 `-Wmissing-include-dirs` GCC警告消除所有不存在的包括-主要是由于CMake脚本错误 [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 描述查询探查器无法工作的原因。 这是用于 [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将OpenSSL更新到上游主机。 修复了TLS连接可能会失败并显示消息的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` 和 `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. 该问题出现在版本20.1中。 [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新服务器的Dockerfile [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- Build-gcc-from-sources脚本中的小修复 [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- 替换 `numbers` 到 `zeros` 在perftests其中 `number` 不使用列。 这将导致更干净的测试结果。 [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复列构造函数中使用initializer\_list时堆栈溢出问题。 [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([已删除用户](https://github.com/ghost)) +- 将librdkafka升级到v1.3.0。 启用bund绑 `rdkafka` 和 `gsasl` mac OS X上的库 [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 在GCC9.2.0上构建修复程序 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v20-1} + +### ClickHouse版本v20.1.8.41,2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### 错误修复 {#bug-fix-3} + +- 修复可能的永久性 `Cannot schedule a task` 错误(由于未处理的异常 `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). 这修复 [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- 修复过多的内存消耗 `ALTER` 查询(突变)。 这修复 [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) 和 [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([阿利沙平](https://github.com/alesapin)) +- 修复外部字典DDL中反引用的错误。 这修复 [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v20.1.7.38,2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### 错误修复 {#bug-fix-4} + +- 修正了不正确的内部函数名称 `sumKahan` 和 `sumWithOverflow`. 在远程查询中使用此函数时,我会导致异常。 [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). 这个问题是在所有ClickHouse版本。 +- 允许 `ALTER ON CLUSTER` 的 `Distributed` 具有内部复制的表。 这修复 [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). 这个问题是在所有ClickHouse版本。 +- 修复可能的异常 `Size of filter doesn't match size of column` 和 `Invalid number of rows in Chunk` 在 `MergeTreeRangeReader`. 它们可能在执行时出现 `PREWHERE` 在某些情况下。 修复 [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了这个问题:如果你编写一个简单的算术表达式,则不会保留时区 `time + 1` (与像这样的表达形成对比 `time + INTERVAL 1 SECOND`). 这修复 [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)). 这个问题是在所有ClickHouse版本。 +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouse释放v20.1.6.30,2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### 错误修复 {#bug-fix-5} + +- 修复压缩时的数据不兼容 `T64` 编解ec + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- 在一个线程中从MergeTree表中读取时修复范围顺序。 修复 [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ))](https://github.com/CurtizJ) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. 修复 [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ))](https://github.com/CurtizJ) +- 修复 `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- 修复 `joinGet` 使用可为空的返回类型。 修复 [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +- 修复bittestall/bitTestAny函数的模糊测试和不正确的行为。 + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 修复当干草堆有零字节时匹配和提取函数的行为。 当干草堆不变时,这种行为是错误的。 修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 当使用非严格单调函数索引时,固定执行反转谓词。 修复 [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- 允许重写 `CROSS` 到 `INNER JOIN` 如果有 `[NOT] LIKE` 操作员在 `WHERE` 科。 修复 [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +- 允许使用日志引擎的表中的第一列成为别名。 + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- 允许逗号加入 `IN()` 进去 修复 [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- 修复突变最终确定,当已经完成突变时可以具有状态is\_done=0。 + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- 碌莽禄Support: “Processors” 管道系统.数字和系统.numbers\_mt 这也修复了错误时 `max_execution_time` 不被尊重。 + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 修复错误的计数 `DictCacheKeysRequestedFound` 公制。 + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` 否则可能使部分数据在重新启动后无法访问,并阻止ClickHouse启动。 + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +- 在固定的瑞银报告 `MergeTreeIndexSet`. 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 在BlockIO中修复可能的数据集。 + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 支持 `UInt64` 在JSON相关函数中不适合Int64的数字。 更新 `SIMDJSON` 为了主人 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 如果将数据目录挂载到单独的设备,则修复可用空间量计算不正确时的问题。 对于默认磁盘,计算数据子目录的可用空间。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(米尔布)](https://github.com/millb) +- 修复TLS连接可能会失败并显示消息时的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` 将OpenSSL更新到上游主机。 + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复了ClickHouseDictionarySource中检查本地地址。 + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 修复段错误 `StorageMerge`,从StorageFile读取时可能发生。 + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 使设置 `merge_tree_uniform_read_distribution` 过时了 服务器仍可识别此设置,但无效。 [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改函数的返回类型 `greatCircleDistance` 到 `Float32` 因为现在计算的结果是 `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在预计查询参数表示为 “escaped” 格式。 例如,要传递字符串 `ab` 你必须写 `a\tb` 或 `a\b` 并分别, `a%5Ctb` 或 `a%5C%09b` 在URL中。 这是需要添加传递NULL作为的可能性 `\N`. 这修复 [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 启用 `use_minimalistic_part_header_in_zookeeper` 设置 `ReplicatedMergeTree` 默认情况下。 这将显着减少存储在ZooKeeper中的数据量。 自19.1版本以来支持此设置,我们已经在多个服务的生产中使用它,半年以上没有任何问题。 如果您有机会降级到19.1以前的版本,请禁用此设置。 [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 数据跳过索引已准备就绪并默认启用。 设置 `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` 和 `allow_experimental_multiple_joins_emulation` 现在已经过时,什么也不做。 [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加新建 `ANY JOIN` 逻辑 `StorageJoin` 符合 `JOIN` 操作。 要在不改变行为的情况下进行升级,您需要添加 `SETTINGS any_join_distinct_right_table_keys = 1` 引擎联接表元数据或在升级后重新创建这些表。 [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 新功能 {#new-feature-2} + +- 添加了有关部件路径的信息 `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 添加执行能力 `SYSTEM RELOAD DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加执行能力 `CREATE DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([阿利沙平](https://github.com/alesapin)) +- 现在用户的个人资料 `users.xml` 可以继承多个配置文件。 [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `system.stack_trace` 允许查看所有服务器线程的堆栈跟踪的表。 这对于开发人员反省服务器状态非常有用。 这修复 [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `DateTime64` 具有可配置子秒精度的数据类型。 [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加表函数 `clusterAllReplicas` 这允许查询集群中的所有节点。 [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +- 添加聚合函数 `categoricalInformationValue` 其计算出离散特征的信息值。 [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- 加快数据文件的解析 `CSV`, `TSV` 和 `JSONEachRow` 通过并行进行格式化。 [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加功能 `bankerRound` 它执行银行家的四舍五入。 [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- 支持区域名称的嵌入式字典中的更多语言: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的一致性 `ANY JOIN` 逻辑 现在 `t1 ANY LEFT JOIN t2` 等于 `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `any_join_distinct_right_table_keys` 这使旧的行为 `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加新建 `SEMI` 和 `ANTI JOIN`. 老 `ANY INNER JOIN` 行为现在可作为 `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `Distributed` 格式 `File` 发动机和 `file` 表函数,它允许从读 `.bin` 通过异步插入生成的文件 `Distributed` 桌子 [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加可选的重置列参数 `runningAccumulate` 这允许为每个新的键值重置聚合结果。 [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([谢尔盖\*科诺年科](https://github.com/kononencheg)) +- 添加使用ClickHouse作为普罗米修斯端点的能力。 [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- 添加部分 `` 在 `config.xml` 这将限制允许的主机用于远程表引擎和表函数 `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `greatCircleAngle` 它计算球体上的距离(以度为单位)。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改变地球半径与h3库一致。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `JSONCompactEachRow` 和 `JSONCompactEachRowWithNamesAndTypes` 输入和输出格式。 [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 增加了与文件相关的表引擎和表函数的功能 (`File`, `S3`, `URL`, `HDFS`)它允许读取和写入 `gzip` 基于附加引擎参数或文件扩展名的文件。 [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 添加了 `randomASCII(length)` 函数,生成一个字符串与一个随机集 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 可打印字符。 [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([刺刀](https://github.com/BayoNet)) +- 添加功能 `JSONExtractArrayRaw` 它返回从未解析的json数组元素上的数组 `JSON` 字符串。 [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- 添加 `arrayZip` 函数允许将多个长度相等的数组合成一个元组数组。 [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([张冬](https://github.com/zhang2014)) +- 添加根据配置的磁盘之间移动数据的能力 `TTL`-表达式为 `*MergeTree` 表引擎家族. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了新的聚合功能 `avgWeighted` 其允许计算加权平均值。 [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 现在并行解析默认启用 `TSV`, `TSKV`, `CSV` 和 `JSONEachRow` 格式。 [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从添加几个地理功能 `H3` 图书馆: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` 和 `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +- 增加了对brotli的支持 (`br`)压缩文件相关的存储和表函数。 这修复 [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `groupBit*` 功能的 `SimpleAggregationFunction` 类型。 [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 错误修复 {#bug-fix-6} + +- 修复重命名表 `Distributed` 引擎 修复问题 [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- 现在字典支持 `EXPRESSION` 对于非ClickHouse SQL方言中任意字符串中的属性。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修复损坏 `INSERT SELECT FROM mysql(...)` 查询。 这修复 [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) 和 [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复错误 “Mismatch column sizes” 插入默认值时 `Tuple` 从 `JSONEachRow`. 这修复 [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- 现在将在使用的情况下抛出一个异常 `WITH TIES` 旁边的 `LIMIT BY`. 还增加了使用能力 `TOP` 与 `LIMIT BY`. 这修复 [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从新鲜的glibc版本中修复unintendent依赖关系 `clickhouse-odbc-bridge` 二进制 [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正错误的检查功能 `*MergeTree` 引擎家族. 现在,当我们在最后一个颗粒和最后一个标记(非最终)中有相同数量的行时,它不会失败。 [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([阿利沙平](https://github.com/alesapin)) +- 修复插入 `Enum*` 列后 `ALTER` 查询,当基础数值类型等于表指定类型时。 这修复 [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许非常数负 “size” 函数的参数 `substring`. 这是不允许的错误。 这修复 [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复当错误数量的参数传递到解析错误 `(O|J)DBC` 表引擎。 [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([阿利沙平](https://github.com/alesapin)) +- 将日志发送到syslog时使用正在运行的clickhouse进程的命令名。 在以前的版本中,使用空字符串而不是命令名称。 [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- 修复检查允许的主机 `localhost`. 这个公关修复了在提供的解决方案 [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复罕见的崩溃 `argMin` 和 `argMax` 长字符串参数的函数,当结果被用于 `runningAccumulate` 功能。 这修复 [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([恐龙](https://github.com/769344359)) +- 修复表的内存过度使用 `Buffer` 引擎 [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- 修正了可以采取的功能中的潜在错误 `NULL` 作为参数之一,并返回非NULL。 [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在线程池中更好地计算后台进程的指标 `MergeTree` 表引擎. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复功能 `IN` 里面 `WHERE` 存在行级表筛选器时的语句。 修复 [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([伊万](https://github.com/abyss7)) +- 现在,如果整数值没有完全解析设置值,则会引发异常。 [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复当聚合函数用于查询具有两个以上本地分片的分布式表时出现的异常。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 现在,bloom filter可以处理零长度数组,并且不执行冗余计算。 [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +- 修正了通过匹配客户端主机来检查客户端主机是否允许 `host_regexp` 在指定 `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 放松不明确的列检查,导致多个误报 `JOIN ON` 科。 [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入数据 `JSON` 或 `XML` 格式与值 `String` 数据类型(需要 `UTF-8` 验证)或使用Brotli算法或其他一些罕见情况下压缩结果数据时。 这修复 [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复竞争条件 `StorageDistributedDirectoryMonitor` 被线人发现 这修复 [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在表引擎 `Kafka` 与正常工作 `Native` 格式。 这修复 [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 固定格式与标题(如 `CSVWithNames`)这是抛出关于EOF表引擎的异常 `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 修复了从子查询右侧部分制作set的错误 `IN` 科。 这修复 [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) 和 [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从存储读取时修复可能的崩溃 `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在固定的文件读取 `Parquet` 包含类型列的格式 `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([马苏兰](https://github.com/maxulan)) +- 修复错误 `Not found column` 对于分布式查询 `PREWHERE` 条件取决于采样键if `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `Not found column` 如果使用查询 `PREWHERE` 依赖于表的别名,结果集由于主键条件而为空。 [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在情况下 `Nullable` 争论。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 禁用谓词下推 `WITH FILL` 表达。 这修复 [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([张冬](https://github.com/zhang2014)) +- 修正错误 `count()` 结果 `SummingMergeTree` 当 `FINAL` 部分被使用。 [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复来自远程服务器的常量函数可能不正确的结果。 它发生在具有以下功能的查询中 `version()`, `uptime()` 等。 它为不同的服务器返回不同的常量值。 这修复 [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复下推谓词优化中导致错误结果的复杂错误。 这解决了下推谓词优化的很多问题。 [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([张冬](https://github.com/zhang2014)) +- 修复崩溃 `CREATE TABLE .. AS dictionary` 查询。 [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- 一些改进ClickHouse语法 `.g4` 文件 [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([太阳里](https://github.com/taiyang-li)) +- 修复导致崩溃的错误 `JOIN`s与表与发动机 `Join`. 这修复 [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复冗余字典重新加载 `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- 限制从读取流的最大数量 `StorageFile` 和 `StorageHDFS`. 修复https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 修复bug `ALTER ... MODIFY ... CODEC` 查询,当用户同时指定默认表达式和编解ec。 修复 [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([阿利沙平](https://github.com/alesapin)) +- 修复列的后台合并错误 `SimpleAggregateFunction(LowCardinality)` 类型。 [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定类型签入功能 `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 现在服务器不崩溃 `LEFT` 或 `FULL JOIN` 与和加入引擎和不支持 `join_use_nulls` 设置。 [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `DROP DICTIONARY IF EXISTS db.dict` 查询不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复表函数中可能出现的崩溃 (`file`, `mysql`, `remote`)引用删除引起的 `IStorage` 对象。 修复插入表函数时指定的列的不正确解析。 [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- 确保网络启动前 `clickhouse-server`. 这修复 [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([余志昌](https://github.com/yuzhichang)) +- 修复安全连接的超时处理,因此查询不会无限挂起。 这修复 [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `clickhouse-copier`并发工人之间的冗余争用。 [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([丁香飞](https://github.com/dingxiangfei2009)) +- 现在突变不会跳过附加的部分,即使它们的突变版本比当前的突变版本大。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 忽略冗余副本 `*MergeTree` 数据部分移动到另一个磁盘和服务器重新启动后。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复崩溃 `FULL JOIN` 与 `LowCardinality` 在 `JOIN` 钥匙 [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁止在插入查询中多次使用列名,如 `INSERT INTO tbl (x, y, x)`. 这修复 [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([阿利沙平](https://github.com/alesapin)) +- 增加了回退,用于检测未知Cpu的物理CPU内核数量(使用逻辑CPU内核数量)。 这修复 [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `There's no column` 实例化列和别名列出错。 [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定切断崩溃时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛 就像 `EXISTS t`. 这修复 [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). 此错误在版本19.17中引入。 [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复罕见错误 `"Sizes of columns doesn't match"` 使用时可能会出现 `SimpleAggregateFunction` 列。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 修正错误,其中用户空 `allow_databases` 可以访问所有数据库(和相同的 `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- 修复客户端崩溃时,服务器已经从客户端断开连接。 [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `ORDER BY` 在按主键前缀和非主键后缀排序的情况下的行为。 [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([安东\*波波夫](https://github.com/CurtizJ)) +- 检查表中是否存在合格列。 这修复 [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定行为 `ALTER MOVE` 合并完成后立即运行移动指定的超部分。 修复 [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 使用时修复可能的服务器崩溃 `UNION` 具有不同数量的列。 修复 [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复函数结果子字符串的大小 `substr` 负大小。 [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在服务器不执行部分突变 `MergeTree` 如果后台池中没有足够的可用线程。 [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- 修复格式化时的小错字 `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- 修正了负数不正确的布隆过滤结果。 这修复 [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([张冬](https://github.com/zhang2014)) +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递捏造的压缩数据,这将导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复因整数溢出而导致的错误结果 `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在 `OPTIMIZE TABLE` query不会等待脱机副本执行该操作。 [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- 固定 `ALTER TTL` 解析器 `Replicated*MergeTree` 桌子 [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复服务器和客户端之间的通信,以便服务器在查询失败后读取临时表信息。 [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `bitmapAnd` 在聚合位图和标量位图相交时出现函数错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 完善的定义 `ZXid` 根据动物园管理员的程序员指南,它修复了错误 `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- `odbc` 表函数现在尊重 `external_table_functions_use_nulls` 设置。 [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了导致罕见的数据竞赛的错误。 [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在 `SYSTEM RELOAD DICTIONARY` 完全重新加载字典,忽略 `update_field`. 这修复 [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加检查字典是否存在于创建查询的能力。 [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([阿利沙平](https://github.com/alesapin)) +- 修复 `Float*` 解析中 `Values` 格式。 这修复 [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复崩溃时,我们不能在一些后台操作保留空间 `*MergeTree` 表引擎家族. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复表包含合并操作时的崩溃 `SimpleAggregateFunction(LowCardinality)` 列。 这修复 [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,并添加对常量表达式应用排序规则的功能。 还添加语言名称 `system.collations` 桌子 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 修正错误时,外部字典与零最小寿命 (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`)不要在后台更新。 [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([阿利沙平](https://github.com/alesapin)) +- 修复当clickhouse源外部字典在查询中有子查询时崩溃。 [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复文件扩展名不正确的解析表与引擎 `URL`. 这修复 [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 修复 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. 修复 [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 固定转换 `Float64` 到MySQL类型。 [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在,如果表没有完全删除,因为服务器崩溃,服务器将尝试恢复并加载它。 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修复了表函数中的崩溃 `file` 同时插入到不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复罕见的死锁时,可能发生 `trace_log` 处于启用状态。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 添加能力与不同类型的工作,除了 `Date` 在 `RangeHashed` 从DDL查询创建的外部字典。 修复 [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃时 `now64()` 用另一个函数的结果调用。 [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了通过mysql有线协议检测客户端IP连接的错误。 [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- 修复空阵列处理 `arraySplit` 功能。 这修复 [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- 修复了以下问题 `pid-file` 另一个运行 `clickhouse-server` 可能会被删除。 [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([徐伟清](https://github.com/weiqxu)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) +- 修正了功能错误 `arrayReduce` 这可能会导致 “double free” 和聚合函数组合器中的错误 `Resample` 这可能会导致内存泄漏。 添加聚合功能 `aggThrow`. 此功能可用于测试目的。 [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-1} + +- 改进了使用时的日志记录 `S3` 表引擎。 [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 在调用时未传递任何参数时打印帮助消息 `clickhouse-local`. 这修复 [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([安德烈\*纳戈尔尼](https://github.com/Melancholic)) +- 添加设置 `mutations_sync` 这允许等待 `ALTER UPDATE/DELETE` 同步查询。 [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([阿利沙平](https://github.com/alesapin)) +- 允许设置相对 `user_files_path` 在 `config.xml` (在类似的方式 `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- 为转换函数添加非法类型的异常 `-OrZero` 后缀 [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 简化在分布式查询中发送到分片的数据头的格式。 [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `Live View` 表引擎重构。 [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- 为从DDL查询创建的外部字典添加额外的检查。 [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([阿利沙平](https://github.com/alesapin)) +- 修复错误 `Column ... already exists` 使用时 `FINAL` 和 `SAMPLE` together, e.g. `select count() from table final sample 1/2`. 修复 [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在表的第一个参数 `joinGet` 函数可以是表标识符。 [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([阿莫斯鸟](https://github.com/amosbird)) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +- 现在后台在磁盘之间移动,运行它的seprate线程池。 [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` 现在同步执行。 [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 堆栈跟踪现在显示物理地址(对象文件中的偏移量),而不是虚拟内存地址(加载对象文件的位置)。 这允许使用 `addr2line` 当二进制独立于位置并且ASLR处于活动状态时。 这修复 [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持行级安全筛选器的新语法: `
    `. 修复 [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([伊万](https://github.com/abyss7)) +- 现在 `cityHash` 功能可以与工作 `Decimal` 和 `UUID` 类型。 修复 [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 从系统日志中删除了固定的索引粒度(它是1024),因为它在实现自适应粒度之后已经过时。 [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ClickHouse在没有SSL的情况下编译时,启用MySQL兼容服务器。 [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在服务器校验和分布式批处理,这在批处理中损坏数据的情况下提供了更多详细的错误。 [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- 碌莽禄Support: `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` 和 `ATTACH TABLE` 为 `MySQL` 数据库引擎。 [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([张冬](https://github.com/zhang2014)) +- 在S3表功能和表引擎中添加身份验证。 [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了检查额外的部分 `MergeTree` 在不同的磁盘上,为了不允许错过未定义磁盘上的数据部分。 [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 启用Mac客户端和服务器的SSL支持。 [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([伊万](https://github.com/abyss7)) +- 现在ClickHouse可以作为MySQL联合服务器(参见https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html)。 [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` 现在只能启用 `bracketed-paste` 当多查询处于打开状态且多行处于关闭状态时。 这修复(#7757)\[https://github.com/ClickHouse/ClickHouse/issues/7757。 [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([阿莫斯鸟](https://github.com/amosbird)) +- 碌莽禄Support: `Array(Decimal)` 在 `if` 功能。 [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持小数 `arrayDifference`, `arrayCumSum` 和 `arrayCumSumNegative` 功能。 [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `lifetime` 列到 `system.dictionaries` 桌子 [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +- 改进了检查不同磁盘上的现有部件 `*MergeTree` 表引擎. 地址 [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 集成与 `AWS SDK` 为 `S3` 交互允许使用开箱即用的所有S3功能。 [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 增加了对子查询的支持 `Live View` 桌子 [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- 检查使用 `Date` 或 `DateTime` 从列 `TTL` 表达式已删除。 [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 有关磁盘的信息已添加到 `system.detached_parts` 桌子 [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在设置 `max_(table|partition)_size_to_drop` 无需重新启动即可更改。 [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 错误消息的可用性略好。 要求用户不要删除下面的行 `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地阅读消息 `Kafka` 引擎在各种格式后 [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([伊万](https://github.com/abyss7)) +- 与不支持MySQL客户端更好的兼容性 `sha2_password` 验证插件。 [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持MySQL兼容性服务器中的更多列类型。 [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 执行 `ORDER BY` 优化 `Merge`, `Buffer` 和 `Materilized View` 存储与底层 `MergeTree` 桌子 [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([安东\*波波夫](https://github.com/CurtizJ)) +- 现在我们总是使用POSIX实现 `getrandom` 与旧内核更好的兼容性(\<3.17)。 [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([阿莫斯鸟](https://github.com/amosbird)) +- 更好地检查移动ttl规则中的有效目标。 [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 更好地检查损坏的刀片批次 `Distributed` 表引擎。 [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- 添加带有部件名称数组的列,这些部件将来必须处理突变 `system.mutations` 桌子 [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([阿利沙平](https://github.com/alesapin)) +- 处理器的并行合并排序优化。 [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 设置 `mark_cache_min_lifetime` 现在已经过时了,什么也不做。 在以前的版本中,标记缓存可以在内存中增长大于 `mark_cache_size` 以容纳内的数据 `mark_cache_min_lifetime` 秒。 这导致了混乱和比预期更高的内存使用率,这在内存受限的系统上尤其糟糕。 如果您在安装此版本后会看到性能下降,则应增加 `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 准备使用 `tid` 到处都是 这是必要的 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvement-1} + +- 处理器管道中的性能优化。 [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 缓存字典中过期密钥的非阻塞更新(具有读取旧密钥的权限)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 没有编译ClickHouse `-fno-omit-frame-pointer` 在全球范围内多余一个寄存器。 [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([阿莫斯鸟](https://github.com/amosbird)) +- 加速 `greatCircleDistance` 功能,并为它添加性能测试。 [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- 改进的功能性能 `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `max`, `min`, `argMin`, `argMax` 为 `DateTime64` 数据类型。 [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进了无限制或大限制和外部排序的排序性能。 [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能格式化浮点数高达6倍。 [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `modulo` 功能。 [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([阿莫斯鸟](https://github.com/amosbird)) +- 优化 `ORDER BY` 并与单列键合并。 [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地实施 `arrayReduce`, `-Array` 和 `-State` 组合子 [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在 `PREWHERE` 应优化为至少一样高效 `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进方式 `round` 和 `roundBankers` 处理负数。 [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- 改进的解码性能 `DoubleDelta` 和 `Gorilla` 编解码器大约30-40%。 这修复 [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进的性能 `base64` 相关功能。 [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一个功能 `geoDistance`. 它类似于 `greatCircleDistance` 但使用近似于WGS-84椭球模型。 两个功能的性能几乎相同。 [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更快 `min` 和 `max` 聚合函数 `Decimal` 数据类型。 [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- 矢量化处理 `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- `if` 链现在优化为 `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- 修复性能回归 `Kafka` 表引擎在19.15中引入。 这修复 [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +- 已删除 “pie” 代码生成 `gcc` 从Debian软件包偶尔带来默认情况下。 [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 并行解析数据格式 [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 启用优化的解析器 `Values` 默认使用表达式 (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 构建修复 `ARM` 而在最小模式。 [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- 添加复盖文件刷新 `clickhouse-server` 当不调用std::atexit时。 还略微改进了无状态测试的复盖率日志记录。 [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([阿利沙平](https://github.com/alesapin)) +- 更新contrib中的LLVM库。 避免从操作系统包中使用LLVM。 [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使bund绑 `curl` 建立完全安静。 [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 修复一些 `MemorySanitizer` 警告。 [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使用 `add_warning` 和 `no_warning` 宏 `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([伊万](https://github.com/abyss7)) +- 添加对Minio S3兼容对象的支持(https://min.io/)为了更好的集成测试。 [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 导入 `libc` 标题到contrib。 它允许在各种系统中使构建更加一致(仅适用于 `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `-fPIC` 从一些图书馆。 [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清洁 `CMakeLists.txt` 对于卷曲。 看https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 无声警告 `CapNProto` 图书馆. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为短字符串优化哈希表添加性能测试。 [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在ClickHouse将建立在 `AArch64` 即使 `MADV_FREE` 不可用。 这修复 [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([阿莫斯鸟](https://github.com/amosbird)) +- 更新 `zlib-ng` 来解决记忆消毒的问题 [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在非Linux系统上启用内部MySQL库,因为操作系统包的使用非常脆弱,通常根本不起作用。 这修复 [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了启用后在某些系统上构建的问题 `libc++`. 这取代了 [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `Field` 方法更类型安全,以找到更多的错误。 [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加丢失的文件到 `libc-headers` 子模块。 [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `JSON` 引用性能测试输出。 [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在堆栈跟踪显示 `std::exception` 和 `Poco::Exception`. 在以前的版本中,它仅适用于 `DB::Exception`. 这改进了诊断。 [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移植 `clock_gettime` 和 `clock_nanosleep` 对于新鲜的glibc版本。 [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([阿莫斯鸟](https://github.com/amosbird)) +- 启用 `part_log` 在示例配置开发人员。 [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新加载的异步性质 `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- 固定编解码器性能测试。 [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加安装脚本 `.tgz` 为他们构建和文档。 [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([阿利沙平](https://github.com/alesapin)) +- 删除旧 `ZSTD` 测试(它是在2016年创建的,以重现zstd1.0版本之前的错误)。 这修复 [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定构建在Mac OS卡特琳娜。 [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- 增加编解码器性能测试中的行数,以使结果显着。 [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 在调试版本中,处理 `LOGICAL_ERROR` 异常作为断言失败,使得它们更容易被注意到。 [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使与格式相关的性能测试更具确定性。 [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `lz4` 来修复记忆消毒器的故障 [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在异常处理中抑制已知MemorySanitizer误报。 [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 更新 `gcc` 和 `g++` 到版本9在 `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- 添加性能测试用例来测试 `PREWHERE` 比 `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([阿莫斯鸟](https://github.com/amosbird)) +- 在修复一个笨拙的测试方面取得了进展。 [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从MemorySanitizer报告数据 `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libc++` 到最新版本。 [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从源头构建ICU库。 这修复 [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `libressl` 到 `openssl`. ClickHouse应在此更改后支持TLS1.3和SNI。 这修复 [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时固定的UBSan报告 `chacha20_poly1305` 从SSL(发生在连接到https://yandex.ru/)。 [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复默认密码文件的模式 `.deb` linux发行版。 [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- 改进的表达式获取 `clickhouse-server` PID输入 `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 更新contrib/googletest到v1.10.0。 [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- 修复了ThreadSaninitizer报告 `base64` 图书馆. 还将此库更新到最新版本,但无关紧要。 这修复 [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `00600_replace_running_query` 对于处理器。 [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除支持 `tcmalloc` 为了使 `CMakeLists.txt` 更简单 [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 发布海湾合作委员会构建现在使用 `libc++` 而不是 `libstdc++`. 最近 `libc++` 只与叮当一起使用。 这将提高构建配置的一致性和可移植性。 [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用MemorySanitizer启用ICU库进行构建。 [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止从警告 `CapNProto` 图书馆. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除代码的特殊情况 `tcmalloc`,因为它不再受支持。 [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI coverage任务中,优雅地终止服务器以允许它保存coverage报告。 这修复了我们最近看到的不完整的复盖率报告。 [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([阿利沙平](https://github.com/alesapin)) +- 针对所有编解码器的性能测试 `Float64` 和 `UInt64` 值。 [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- `termcap` 非常不推荐使用,并导致各种问题(f.g.missing “up” 帽和呼应 `^J` 而不是多行)。 帮个忙 `terminfo` 或bund绑 `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复 `test_storage_s3` 集成测试。 [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `StorageFile(, null)` 将块插入给定格式的文件而不实际写入磁盘。 这是性能测试所必需的。 [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加参数 `--print-time` 功能测试打印每个测试的执行时间。 [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加断言 `KeyCondition` 同时评估RPN。 这将修复来自gcc-9的警告。 [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI构建中转储cmake选项。 [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 不要为某些fat库生成调试信息。 [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `log_to_console.xml` 始终登录到stderr,无论它是否交互。 [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 删除了一些未使用的功能 `clickhouse-performance-test` 工具 [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们也将搜索 `lld-X` 与相应的 `clang-X` 版本。 [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([阿利沙平](https://github.com/alesapin)) +- 实木复合地板建设改善。 [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([马苏兰](https://github.com/maxulan)) +- 更多海湾合作委员会警告 [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Arch Linux的软件包现在允许运行ClickHouse服务器,而不仅仅是客户端。 [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复与处理器的测试。 微小的性能修复。 [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 更新contrib/protobuf。 [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V.Kornilov](https://github.com/matwey)) +- 在准备切换到c++20作为新年庆祝活动。 “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 实验特点 {#experimental-feature-1} + +- 增加了实验设置 `min_bytes_to_use_mmap_io`. 它允许读取大文件,而无需将数据从内核复制到用户空间。 默认情况下禁用该设置。 建议的阈值大约是64MB,因为mmap/munmap很慢。 [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返工配额作为访问控制系统的一部分。 增加了新表 `system.quotas`,新功能 `currentQuota`, `currentQuotaKey`,新的SQL语法 `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 允许跳过带有警告的未知设置,而不是引发异常。 [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 重新设计的行策略作为访问控制系统的一部分。 增加了新表 `system.row_policies`,新功能 `currentRowPolicies()`,新的SQL语法 `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 安全修复 {#security-fix} + +- 修正了读取目录结构中的表的可能性 `File` 表引擎。 这修复 [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## [更新日志2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/zh/commercial/cloud.md b/docs/zh/commercial/cloud.md index f096bdb92cf..765c352d098 100644 --- a/docs/zh/commercial/cloud.md +++ b/docs/zh/commercial/cloud.md @@ -1,20 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 --- -# ClickHouse Cloud Service Providers {#clickhouse-cloud-service-providers} +# ツ环板Providersョツ嘉ッ {#clickhouse-cloud-service-providers} -!!! info "Info" - If you have launched a public cloud with managed ClickHouse service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) adding it to the following list. +!!! info "信息" + 如果您已经启动了带有托管ClickHouse服务的公共云,请随时 [打开拉取请求](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) 将其添加到以下列表。 -## Yandex Cloud {#yandex-cloud} +## Yandex云 {#yandex-cloud} -[Yandex Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) provides the following key features: +[Yandex的ClickHouse托管服务](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) 提供以下主要功能: -- Fully managed ZooKeeper service for [ClickHouse replication](../operations/table_engines/replication.md) -- Multiple storage type choices -- Replicas in different availability zones -- Encryption and isolation -- Automated maintenance +- 全面管理的动物园管理员服务 [ClickHouse复制](../engines/table_engines/mergetree_family/replication.md) +- 多种存储类型选择 +- 不同可用区中的副本 +- 加密和隔离 +- 自动化维护 -{## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##} +{## [原始文章](https://clickhouse.tech/docs/en/commercial/cloud/) ##} diff --git a/docs/zh/commercial/index.md b/docs/zh/commercial/index.md new file mode 100644 index 00000000000..ec704207201 --- /dev/null +++ b/docs/zh/commercial/index.md @@ -0,0 +1,9 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5546\u4E1A" +toc_priority: 70 +toc_title: "\u5546\u4E1A" +--- + + diff --git a/docs/zh/data_types/datetime64.md b/docs/zh/data_types/datetime64.md deleted file mode 100644 index e28390bbdd4..00000000000 --- a/docs/zh/data_types/datetime64.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -en_copy: true ---- - -# DateTime64 {#data_type-datetime64} - -Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision - -Tick size (precision): 10-precision seconds - -Syntax: - -``` sql -DateTime64(precision, [timezone]) -``` - -Internally, stores data as a number of ‘ticks’ since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (‘2020-01-01 05:00:01.000’). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](datetime.md). - -## Examples {#examples} - -**1.** Creating a table with `DateTime64`-type column and inserting data into it: - -``` sql -CREATE TABLE dt -( - `timestamp` DateTime64(3, 'Europe/Moscow'), - `event_id` UInt8 -) -ENGINE = TinyLog -``` - -``` sql -INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) -``` - -``` sql -SELECT * FROM dt -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 03:00:00.000 │ 1 │ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'` -- When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. - -**2.** Filtering on `DateTime64` values - -``` sql -SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') -``` - -``` text -┌───────────────timestamp─┬─event_id─┐ -│ 2019-01-01 00:00:00.000 │ 2 │ -└─────────────────────────┴──────────┘ -``` - -Unlike `DateTime`, `DateTime64` values are not converted from `String` automatically - -**3.** Getting a time zone for a `DateTime64`-type value: - -``` sql -SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x -``` - -``` text -┌──────────────────column─┬─x──────────────────────────────┐ -│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ -└─────────────────────────┴────────────────────────────────┘ -``` - -**4.** Timezone conversion - -``` sql -SELECT -toDateTime64(timestamp, 3, 'Europe/London') as lon_time, -toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time -FROM dt -``` - -``` text -┌───────────────lon_time──┬────────────────mos_time─┐ -│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ -│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ -└─────────────────────────┴─────────────────────────┘ -``` - -## See Also {#see-also} - -- [Type conversion functions](../query_language/functions/type_conversion_functions.md) -- [Functions for working with dates and times](../query_language/functions/date_time_functions.md) -- [Functions for working with arrays](../query_language/functions/array_functions.md) -- [The `date_time_input_format` setting](../operations/settings/settings.md#settings-date_time_input_format) -- [The `timezone` server configuration parameter](../operations/server_settings/settings.md#server_settings-timezone) -- [Operators for working with dates and times](../query_language/operators.md#operators-datetime) -- [`Date` data type](date.md) -- [`DateTime` data type](datetime.md) diff --git a/docs/zh/data_types/int_uint.md b/docs/zh/data_types/int_uint.md deleted file mode 100644 index 4e01ad017ca..00000000000 --- a/docs/zh/data_types/int_uint.md +++ /dev/null @@ -1,17 +0,0 @@ -# UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} - -固定长度的整型,包括有符号整型或无符号整型。 - -## 整型范围 {#zheng-xing-fan-wei} - -- Int8 - \[-128 : 127\] -- Int16 - \[-32768 : 32767\] -- Int32 - \[-2147483648 : 2147483647\] -- Int64 - \[-9223372036854775808 : 9223372036854775807\] - -## 无符号整型范围 {#wu-fu-hao-zheng-xing-fan-wei} - -- UInt8 - \[0 : 255\] -- UInt16 - \[0 : 65535\] -- UInt32 - \[0 : 4294967295\] -- UInt64 - \[0 : 18446744073709551615\] diff --git a/docs/zh/data_types/uuid.md b/docs/zh/data_types/uuid.md deleted file mode 100644 index 4546be19371..00000000000 --- a/docs/zh/data_types/uuid.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -en_copy: true ---- - -# UUID {#uuid-data-type} - -A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). - -The example of UUID type value is represented below: - -``` text -61f0c404-5cb3-11e7-907b-a6006ad3dba0 -``` - -If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero: - -``` text -00000000-0000-0000-0000-000000000000 -``` - -## How to generate {#how-to-generate} - -To generate the UUID value, ClickHouse provides the [generateUUIDv4](../query_language/functions/uuid_functions.md) function. - -## Usage example {#usage-example} - -**Example 1** - -This example demonstrates creating a table with the UUID type column and inserting a value into the table. - -``` sql -CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog -``` - -``` sql -INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -└──────────────────────────────────────┴───────────┘ -``` - -**Example 2** - -In this example, the UUID column value is not specified when inserting a new record. - -``` sql -INSERT INTO t_uuid (y) VALUES ('Example 2') -``` - -``` sql -SELECT * FROM t_uuid -``` - -``` text -┌────────────────────────────────────x─┬─y─────────┐ -│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ -│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ -└──────────────────────────────────────┴───────────┘ -``` - -## Restrictions {#restrictions} - -The UUID data type only supports functions which [String](string.md) data type also supports (for example, [min](../query_language/agg_functions/reference.md#agg_function-min), [max](../query_language/agg_functions/reference.md#agg_function-max), and [count](../query_language/agg_functions/reference.md#agg_function-count)). - -The UUID data type is not supported by arithmetic operations (for example, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) or aggregate functions, such as [sum](../query_language/agg_functions/reference.md#agg_function-sum) and [avg](../query_language/agg_functions/reference.md#agg_function-avg). - -[Original article](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/zh/database_engines/lazy.md b/docs/zh/database_engines/lazy.md deleted file mode 100644 index 45c5fd602d7..00000000000 --- a/docs/zh/database_engines/lazy.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -en_copy: true ---- - -# Lazy {#lazy} - -Keeps tables in RAM only `expiration_time_in_seconds` seconds after last access. Can be used only with \*Log tables. - -It’s optimized for storing many small \*Log tables, for which there is a long time interval between accesses. - -## Creating a Database {#creating-a-database} - - CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); - -[Original article](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/zh/development/architecture.md b/docs/zh/development/architecture.md index 22eaaf583d6..66d5cac13b5 100644 --- a/docs/zh/development/architecture.md +++ b/docs/zh/development/architecture.md @@ -1,3 +1,4 @@ + # ClickHouse 架构概述 {#clickhouse-jia-gou-gai-shu} ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHouse 中,数据始终是按列存储的,包括矢量(向量或列块)执行的过程。只要有可能,操作都是基于矢量进行分派的,而不是单个的值,这被称为«矢量化查询执行»,它有利于降低实际的数据处理开销。 @@ -12,7 +13,7 @@ ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHous 不同的 `IColumn` 实现(`ColumnUInt8`、`ColumnString` 等)负责不同的列内存布局。内存布局通常是一个连续的数组。对于数据类型为整型的列,只是一个连续的数组,比如 `std::vector`。对于 `String` 列和 `Array` 列,则由两个向量组成:其中一个向量连续存储所有的 `String` 或数组元素,另一个存储每一个 `String` 或 `Array` 的起始元素在第一个向量中的偏移。而 `ColumnConst` 则仅在内存中存储一个值,但是看起来像一个列。 -## Field {#field} +## 字段 {#field} 尽管如此,有时候也可能需要处理单个值。表示单个值,可以使用 `Field`。`Field` 是 `UInt64`、`Int64`、`Float64`、`String` 和 `Array` 组成的联合。`IColumn` 拥有 `operator[]` 方法来获取第 `n` 个值成为一个 `Field`,同时也拥有 `insert` 方法将一个 `Field` 追加到一个列的末尾。这些方法并不高效,因为它们需要处理表示单一值的临时 `Field` 对象,但是有更高效的方法比如 `insertFrom` 和 `insertRangeFrom` 等。 @@ -115,7 +116,7 @@ ClickHouse 是一个真正的列式数据库管理系统(DBMS)。在 ClickHous 普通函数不会改变行数 - 它们的执行看起来就像是独立地处理每一行数据。实际上,函数不会作用于一个单独的行上,而是作用在以 `Block` 为单位的数据上,以实现向量查询执行。 -还有一些杂项函数,比如 [blockSize](../query_language/functions/other_functions.md#function-blocksize)、[rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock),以及 [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate),它们对块进行处理,并且不遵从行的独立性。 +还有一些杂项函数,比如 [块大小](../sql_reference/functions/other_functions.md#function-blocksize)、[rowNumberInBlock](../sql_reference/functions/other_functions.md#function-rownumberinblock),以及 [跑累积](../sql_reference/functions/other_functions.md#function-runningaccumulate),它们对块进行处理,并且不遵从行的独立性。 ClickHouse 具有强类型,因此隐式类型转换不会发生。如果函数不支持某个特定的类型组合,则会抛出异常。但函数可以通过重载以支持许多不同的类型组合。比如,`plus` 函数(用于实现 `+` 运算符)支持任意数字类型的组合:`UInt8` + `Float32`,`UInt16` + `Int8` 等。同时,一些可变参数的函数能够级接收任意数目的参数,比如 `concat` 函数。 @@ -159,7 +160,7 @@ ClickHouse 具有强类型,因此隐式类型转换不会发生。如果函数 分布式查询执行没有全局查询计划。每个节点都有针对自己的工作部分的本地查询计划。我们仅有简单的一次性分布式查询执行:将查询发送给远程节点,然后合并结果。但是对于具有高基数的 `GROUP BY` 或具有大量临时数据的 `JOIN` 这样困难的查询的来说,这是不可行的:在这种情况下,我们需要在服务器之间«改组»数据,这需要额外的协调。ClickHouse 不支持这类查询执行,我们需要在这方面进行努力。 -## Merge Tree {#merge-tree} +## 合并树 {#merge-tree} `MergeTree` 是一系列支持按主键索引的存储引擎。主键可以是一个任意的列或表达式的元组。`MergeTree` 表中的数据存储于«分块»中。每一个分块以主键序存储数据(数据按主键元组的字典序排序)。表的所有列都存储在这些«分块»中分离的 `column.bin` 文件中。`column.bin` 文件由压缩块组成,每一个块通常是 64 KB 到 1 MB 大小的未压缩数据,具体取决于平均值大小。这些块由一个接一个连续放置的列值组成。每一列的列值顺序相同(顺序由主键定义),因此当你按多列进行迭代时,你能够得到相应列的值。 diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md index c3016d5e1dc..10d3ffecd15 100644 --- a/docs/zh/development/browse_code.md +++ b/docs/zh/development/browse_code.md @@ -1,11 +1,14 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 63 +toc_title: "\u6D4F\u89C8ClickHouse\u6E90\u4EE3\u7801" --- -# Browse ClickHouse Source Code {#browse-clickhouse-source-code} +# 浏览ClickHouse源代码 {#browse-clickhouse-source-code} -You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. +您可以使用 **Woboq** 在线代码浏览器可用 [这里](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). 它提供了代码导航和语义突出显示,搜索和索引。 代码快照每天更新。 -Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. +此外,您还可以浏览源 [GitHub](https://github.com/ClickHouse/ClickHouse) 像往常一样 -If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favourite IDE. Vim and Emacs also count. +如果你有兴趣使用什么样的IDE,我们建议CLion,QT Creator,VS Code和KDevelop(有注意事项)。 您可以使用任何喜欢的IDE。 Vim和Emacs也算数。 diff --git a/docs/zh/development/build.md b/docs/zh/development/build.md index 6a46d6f2cc7..05581985a35 100644 --- a/docs/zh/development/build.md +++ b/docs/zh/development/build.md @@ -1,3 +1,4 @@ + # 如何构建 ClickHouse 发布包 {#ru-he-gou-jian-clickhouse-fa-bu-bao} ## 安装 Git 和 Pbuilder {#an-zhuang-git-he-pbuilder} @@ -32,12 +33,12 @@ cd ClickHouse sudo apt-get install git cmake ninja-build ``` -Or cmake3 instead of cmake on older systems. +或cmake3而不是旧系统上的cmake。 或者在早期版本的系统中用 cmake3 替代 cmake ## 安装 GCC 9 {#an-zhuang-gcc-9} -There are several ways to do this. +有几种方法可以做到这一点。 ### 安装 PPA 包 {#an-zhuang-ppa-bao} @@ -79,6 +80,6 @@ cd .. ``` 若要创建一个执行文件, 执行 `ninja clickhouse`。 -这个命令会使得 `programs/clickhouse` 文件可执行,您可以使用 `client` or `server` 参数运行。 +这个命令会使得 `programs/clickhouse` 文件可执行,您可以使用 `client` 或 `server` 参数运行。 [来源文章](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/zh/development/build_cross_arm.md b/docs/zh/development/build_cross_arm.md index 0936a3133b2..1061fddfacd 100644 --- a/docs/zh/development/build_cross_arm.md +++ b/docs/zh/development/build_cross_arm.md @@ -1,17 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 67 +toc_title: "\u5982\u4F55\u5728Linux\u4E0A\u6784\u5EFAClickHouse for AARCH64\uFF08\ + ARM64)" --- -# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} +# 如何在Linux上为AARCH64(ARM64)架构构建ClickHouse {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} -This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. +这是当你有Linux机器,并希望使用它来构建的情况下 `clickhouse` 二进制文件将运行在另一个Linux机器上与AARCH64CPU架构。 这适用于在Linux服务器上运行的持续集成检查。 -The cross-build for AARCH64 is based on the [Build instructions](build.md), follow them first. +Aarch64的交叉构建基于 [构建说明](build.md) 先跟着他们 -# Install Clang-8 {#install-clang-8} +# 安装Clang-8 {#install-clang-8} -Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. -For example, in Ubuntu Bionic you can use the following commands: +按照以下说明操作https://apt.llvm.org/为您的Ubuntu或Debian设置. +例如,在Ubuntu Bionic中,您可以使用以下命令: ``` bash echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list @@ -19,7 +23,7 @@ sudo apt-get update sudo apt-get install clang-8 ``` -# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} +# 安装交叉编译工具集 {#install-cross-compilation-toolset} ``` bash cd ClickHouse @@ -28,7 +32,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 ``` -# Build ClickHouse {#build-clickhouse} +# 建立ClickHouse {#build-clickhouse} ``` bash cd ClickHouse @@ -37,4 +41,4 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linu ninja -C build-arm64 ``` -The resulting binary will run only on Linux with the AARCH64 CPU architecture. +生成的二进制文件将仅在具有AARCH64CPU体系结构的Linux上运行。 diff --git a/docs/zh/development/build_cross_osx.md b/docs/zh/development/build_cross_osx.md index 20577d1213a..c74ff934c0d 100644 --- a/docs/zh/development/build_cross_osx.md +++ b/docs/zh/development/build_cross_osx.md @@ -1,10 +1,11 @@ + # 如何在Linux中编译Mac OS X ClickHouse {#ru-he-zai-linuxzhong-bian-yi-mac-os-x-clickhouse} Linux机器也可以编译运行在OS X系统的`clickhouse`二进制包,这可以用于在Linux上跑持续集成测试。如果要在Mac OS X上直接构建ClickHouse,请参考另外一篇指南: https://clickhouse.tech/docs/zh/development/build\_osx/ Mac OS X的交叉编译基于以下构建说明,请首先遵循它们。 -# Install Clang-8 {#install-clang-8} +# 安装Clang-8 {#install-clang-8} 按照https://apt.llvm.org/中的说明进行Ubuntu或Debian安装。 例如,安装Bionic的命令如下: diff --git a/docs/zh/development/build_osx.md b/docs/zh/development/build_osx.md index e471b716a33..0c1c840912e 100644 --- a/docs/zh/development/build_osx.md +++ b/docs/zh/development/build_osx.md @@ -1,3 +1,4 @@ + # 在 Mac OS X 中编译 ClickHouse {#zai-mac-os-x-zhong-bian-yi-clickhouse} ClickHouse 支持在 Mac OS X 10.12 版本中编译。若您在用更早的操作系统版本,可以尝试在指令中使用 `Gentoo Prefix` 和 `clang sl`. @@ -43,7 +44,7 @@ cd .. 为此,请创建以下文件: -/Library/LaunchDaemons/limit.maxfiles.plist: +/图书馆/LaunchDaemons/限制.maxfilesplist: ``` xml diff --git a/docs/zh/development/contrib.md b/docs/zh/development/contrib.md index 5491cc76f6f..e282856c0e8 100644 --- a/docs/zh/development/contrib.md +++ b/docs/zh/development/contrib.md @@ -1,34 +1,35 @@ + # 使用的三方库 {#shi-yong-de-san-fang-ku} -| Library | License | -|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| base64 | [BSD 2-Clause License](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | -| boost | [Boost Software License 1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | -| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | -| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | -| cctz | [Apache License 2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | -| double-conversion | [BSD 3-Clause License](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | -| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | -| googletest | [BSD 3-Clause License](https://github.com/google/googletest/blob/master/LICENSE) | -| hyperscan | [BSD 3-Clause License](https://github.com/intel/hyperscan/blob/master/LICENSE) | -| libbtrie | [BSD 2-Clause License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | -| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | -| libdivide | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | -| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | -| libhdfs3 | [Apache License 2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | -| libmetrohash | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | -| libpcg-random | [Apache License 2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | -| libressl | [OpenSSL License](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | -| librdkafka | [BSD 2-Clause License](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | -| libwidechar\_width | [CC0 1.0 Universal](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | -| llvm | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | -| lz4 | [BSD 2-Clause License](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | -| mariadb-connector-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | -| murmurhash | [Public Domain](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | -| pdqsort | [Zlib License](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | -| poco | [Boost Software License - Version 1.0](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | -| protobuf | [BSD 3-Clause License](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | -| re2 | [BSD 3-Clause License](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | -| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | -| zlib-ng | [Zlib License](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | -| zstd | [BSD 3-Clause License](https://github.com/facebook/zstd/blob/dev/LICENSE) | +| 图书馆 | 许可 | +|--------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| base64 | [BSD2-条款许可](https://github.com/aklomp/base64/blob/a27c565d1b6c676beaf297fe503c4518185666f7/LICENSE) | +| 升压 | [提升软件许可证1.0](https://github.com/ClickHouse-Extras/boost-extra/blob/6883b40449f378019aec792f9983ce3afc7ff16e/LICENSE_1_0.txt) | +| brotli | [MIT](https://github.com/google/brotli/blob/master/LICENSE) | +| capnproto | [MIT](https://github.com/capnproto/capnproto/blob/master/LICENSE) | +| cctz | [Apache许可证2.0](https://github.com/google/cctz/blob/4f9776a310f4952454636363def82c2bf6641d5f/LICENSE.txt) | +| 双转换 | [BSD3-条款许可](https://github.com/google/double-conversion/blob/cf2f0f3d547dc73b4612028a155b80536902ba02/LICENSE) | +| FastMemcpy | [MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libmemcpy/impl/LICENSE) | +| googletest | [BSD3-条款许可](https://github.com/google/googletest/blob/master/LICENSE) | +| 超扫描 | [BSD3-条款许可](https://github.com/intel/hyperscan/blob/master/LICENSE) | +| libbtrie | [BSD2-条款许可](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libbtrie/LICENSE) | +| libcxxabi | [BSD + MIT](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libglibc-compatibility/libcxxabi/LICENSE.TXT) | +| libdivide | [Zlib许可证](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libdivide/LICENSE.txt) | +| libgsasl | [LGPL v2.1](https://github.com/ClickHouse-Extras/libgsasl/blob/3b8948a4042e34fb00b4fb987535dc9e02e39040/LICENSE) | +| libhdfs3 | [Apache许可证2.0](https://github.com/ClickHouse-Extras/libhdfs3/blob/bd6505cbb0c130b0db695305b9a38546fa880e5a/LICENSE.txt) | +| libmetrohash | [Apache许可证2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libmetrohash/LICENSE) | +| libpcg-随机 | [Apache许可证2.0](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/libpcg-random/LICENSE-APACHE.txt) | +| libressl | [OpenSSL许可证](https://github.com/ClickHouse-Extras/ssl/blob/master/COPYING) | +| librdkafka | [BSD2-条款许可](https://github.com/edenhill/librdkafka/blob/363dcad5a23dc29381cc626620e68ae418b3af19/LICENSE) | +| libwidechar\_width | [CC0 1.0通用](https://github.com/ClickHouse/ClickHouse/blob/master/libs/libwidechar_width/LICENSE) | +| llvm | [BSD3-条款许可](https://github.com/ClickHouse-Extras/llvm/blob/163def217817c90fb982a6daf384744d8472b92b/llvm/LICENSE.TXT) | +| lz4 | [BSD2-条款许可](https://github.com/lz4/lz4/blob/c10863b98e1503af90616ae99725ecd120265dfb/LICENSE) | +| mariadb-连接器-c | [LGPL v2.1](https://github.com/ClickHouse-Extras/mariadb-connector-c/blob/3.1/COPYING.LIB) | +| murmurhash | [公共领域](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/murmurhash/LICENSE) | +| pdqsort | [Zlib许可证](https://github.com/ClickHouse/ClickHouse/blob/master/contrib/pdqsort/license.txt) | +| poco | [提升软件许可证-1.0版](https://github.com/ClickHouse-Extras/poco/blob/fe5505e56c27b6ecb0dcbc40c49dc2caf4e9637f/LICENSE) | +| protobuf | [BSD3-条款许可](https://github.com/ClickHouse-Extras/protobuf/blob/12735370922a35f03999afff478e1c6d7aa917a4/LICENSE) | +| re2 | [BSD3-条款许可](https://github.com/google/re2/blob/7cf8b88e8f70f97fd4926b56aa87e7f53b2717e0/LICENSE) | +| UnixODBC | [LGPL v2.1](https://github.com/ClickHouse-Extras/UnixODBC/tree/b0ad30f7f6289c12b76f04bfb9d466374bb32168) | +| zlib-ng | [Zlib许可证](https://github.com/ClickHouse-Extras/zlib-ng/blob/develop/LICENSE.md) | +| zstd | [BSD3-条款许可](https://github.com/facebook/zstd/blob/dev/LICENSE) | diff --git a/docs/zh/development/developer_instruction.md b/docs/zh/development/developer_instruction.md index 6d865afb2c4..f39ab665ee6 100644 --- a/docs/zh/development/developer_instruction.md +++ b/docs/zh/development/developer_instruction.md @@ -1,3 +1,4 @@ + ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 # Windows使用指引 {#windowsshi-yong-zhi-yin} @@ -67,9 +68,9 @@ ClickHose支持Linux,FreeBSD 及 Mac OS X 系统。 命令执行成功后,可以通过执行`git pull upstream master`,从ClickHouse的主分支中拉去更新。 -## Working with submodules {#working-with-submodules} +## 使用子模块 {#working-with-submodules} -Working with submodules in git could be painful. Next commands will help to manage it: +在git中使用子模块可能会很痛苦。 接下来的命令将有助于管理它: # ! each command accepts --recursive # Update remote URLs for submodules. Barely rare case @@ -81,7 +82,7 @@ Working with submodules in git could be painful. Next commands will help to mana # Two last commands could be merged together git submodule update --init -The next commands would help you to reset all submodules to the initial state (!WARING! - any chenges inside will be deleted): +接下来的命令将帮助您将所有子模块重置为初始状态(!华林! -里面的任何chenges将被删除): # Synchronizes submodules' remote URL with .gitmodules git submodule sync --recursive diff --git a/docs/zh/development/index.md b/docs/zh/development/index.md index 187ee1b3e25..cf3b2fae1d9 100644 --- a/docs/zh/development/index.md +++ b/docs/zh/development/index.md @@ -1,3 +1,4 @@ + # ClickHouse 开发 {#clickhouse-kai-fa} [来源文章](https://clickhouse.tech/docs/en/development/) diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index 4d374f9b2e8..10c036fef3b 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -1,3 +1,4 @@ + # 如何编写 C++ 代码 {#ru-he-bian-xie-c-dai-ma} ## 一般建议 {#yi-ban-jian-yi} @@ -200,7 +201,7 @@ std::cerr << static_cast(c) << std::endl; for (Names::const_iterator it = column_names.begin(); it != column_names.end(); ++it) ``` -## Comments {#comments} +## 评论 {#comments} **1.** 请务必为所有非常重要的代码部分添加注释。 @@ -297,7 +298,7 @@ void executeQuery( /// for ``` -## Names {#names} +## 姓名 {#names} **1.** 在变量和类成员的名称中使用带下划线的小写字母。 @@ -623,7 +624,7 @@ Loader() {} **18.** 编码。 -在所有情况下使用 UTF-8 编码。使用 `std::string` and `char *`。不要使用 `std::wstring` 和 `wchar_t`。 +在所有情况下使用 UTF-8 编码。使用 `std::string` 和 `char *`。不要使用 `std::wstring` 和 `wchar_t`。 **19.** 日志。 diff --git a/docs/zh/development/tests.md b/docs/zh/development/tests.md index b3a3468e31c..f54e273a77a 100644 --- a/docs/zh/development/tests.md +++ b/docs/zh/development/tests.md @@ -1,3 +1,4 @@ + # ClickHouse 测试 {#clickhouse-ce-shi} ## 功能性测试 {#gong-neng-xing-ce-shi} @@ -14,7 +15,7 @@ 调用功能测试最简单的方法是将 `clickhouse-client` 复制到`/usr/bin/`,运行`clickhouse-server`,然后从自己的目录运行`./ clickhouse-test`。 -要添加新测试,请在 `tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`。 +要添加新测试,请在 `tests/queries/0_stateless` 目录内添加新的 `.sql` 或 `.sh` 文件,手动检查,然后按以下方式生成 `.reference` 文件: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` 或 `./00000_test.sh > ./00000_test.reference`。 测试应该只使用(创建,删除等)`test` 数据库中的表,这些表假定是事先创建的; 测试也可以使用临时表。 @@ -152,24 +153,24 @@ Clang 有更多有用的警告 - 您可以使用 `-Weverything` 查找它们并 对于生产构建,使用 gcc(它仍然生成比 clang 稍高效的代码)。对于开发来说,clang 通常更方便使用。您可以使用调试模式在自己的机器上构建(以节省笔记本电脑的电量),但请注意,由于更好的控制流程和过程分析,编译器使用 `-O3` 会生成更多警告。 当使用 clang 构建时,使用 `libc++` 而不是 `libstdc++`,并且在使用调试模式构建时,使用调试版本的 `libc++`,它允许在运行时捕获更多错误。 -## Sanitizers {#sanitizers} +## 消毒剂 {#sanitizers} -**Address sanitizer**. +**地址消毒剂**. 我们在每个提交的基础上在 ASan 下运行功能和集成测试。 -**Valgrind (Memcheck)**. +**ツ暗ェツ氾环催ツ団ツ法ツ人)**. 我们在 Valgrind 过夜进行功能测试。 这需要几个小时。 目前在 `re2` 库中有一个已知的误报,请参阅 [文章](https://research.swtch.com/sparse)。 -**Thread sanitizer**. +**螺纹消毒剂**. 我们在 TSan 下进行功能测试。ClickHouse 必须通过所有测试。在 TSan 下运行不是自动化的,只是偶尔执行。 -**Memory sanitizer**. +**记忆消毒剂**. 目前我们不使用 MSan。 -**Undefined behaviour sanitizer.** +**未定义的行为消毒剂。** 我们仍然不会在每次提交的基础上使用 UBSan。 有一些地方需要解决。 -**Debug allocator.** +**调试分alloc。** 您可以使用 `DEBUG_TCMALLOC` CMake 选项启用 `tcmalloc` 的调试版本。我们在每次提交的基础上使用调试分配器运行测试。 更多请参阅 `tests/instructions/sanitizers.txt`。 diff --git a/docs/zh/database_engines/index.md b/docs/zh/engines/database_engines/index.md similarity index 69% rename from docs/zh/database_engines/index.md rename to docs/zh/engines/database_engines/index.md index 95c7ea2c319..2431b96a43d 100644 --- a/docs/zh/database_engines/index.md +++ b/docs/zh/engines/database_engines/index.md @@ -1,8 +1,9 @@ + # 数据库引擎 {#shu-ju-ku-yin-qing} 您使用的所有表都是由数据库引擎所提供的 -默认情况下,ClickHouse使用自己的数据库引擎,该引擎提供可配置的[表引擎](../operations/table_engines/index.md)和[所有支持的SQL语法](../query_language/syntax.md). +默认情况下,ClickHouse使用自己的数据库引擎,该引擎提供可配置的[表引擎](../../engines/database_engines/index.md)和[所有支持的SQL语法](../../engines/database_engines/index.md). 除此之外,您还可以选择使用以下的数据库引擎: diff --git a/docs/zh/engines/database_engines/lazy.md b/docs/zh/engines/database_engines/lazy.md new file mode 100644 index 00000000000..6b094c8793d --- /dev/null +++ b/docs/zh/engines/database_engines/lazy.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 31 +toc_title: "\u61D2\u60F0" +--- + +# 懒惰 {#lazy} + +仅将表保留在RAM中 `expiration_time_in_seconds` 上次访问后几秒钟。 只能与\*日志表一起使用。 + +它针对存储许多小\*日志表进行了优化,访问之间存在较长的时间间隔。 + +## 创建数据库 {#creating-a-database} + + CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); + +[原始文章](https://clickhouse.tech/docs/en/database_engines/lazy/) diff --git a/docs/zh/database_engines/mysql.md b/docs/zh/engines/database_engines/mysql.md similarity index 63% rename from docs/zh/database_engines/mysql.md rename to docs/zh/engines/database_engines/mysql.md index 9467269a2cc..78844154bce 100644 --- a/docs/zh/database_engines/mysql.md +++ b/docs/zh/engines/database_engines/mysql.md @@ -1,3 +1,4 @@ + # MySQL {#mysql} MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并允许您对表进行`INSERT`和`SELECT`查询,以方便您在ClickHouse与MySQL之间进行数据交换。 @@ -28,25 +29,25 @@ ENGINE = MySQL('host:port', 'database', 'user', 'password') ## 支持的类型对应 {#zhi-chi-de-lei-xing-dui-ying} -| MySQL | ClickHouse | -|----------------------------------|---------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOAT | [Float32](../data_types/float.md) | -| DOUBLE | [Float64](../data_types/float.md) | -| DATE | [Date](../data_types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) | -| BINARY | [FixedString](../data_types/fixedstring.md) | +| MySQL | ClickHouse | +|----------------------------------|-------------------------------------------------------------| +| UNSIGNED TINYINT | [UInt8](../../sql_reference/data_types/int_uint.md) | +| TINYINT | [Int8](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED SMALLINT | [UInt16](../../sql_reference/data_types/int_uint.md) | +| SMALLINT | [Int16](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../sql_reference/data_types/int_uint.md) | +| INT, MEDIUMINT | [Int32](../../sql_reference/data_types/int_uint.md) | +| UNSIGNED BIGINT | [UInt64](../../sql_reference/data_types/int_uint.md) | +| BIGINT | [Int64](../../sql_reference/data_types/int_uint.md) | +| FLOAT | [Float32](../../sql_reference/data_types/float.md) | +| DOUBLE | [Float64](../../sql_reference/data_types/float.md) | +| DATE | [日期](../../sql_reference/data_types/date.md) | +| DATETIME, TIMESTAMP | [日期时间](../../sql_reference/data_types/datetime.md) | +| BINARY | [固定字符串](../../sql_reference/data_types/fixedstring.md) | -其他的MySQL数据类型将全部都转换为[String](../data_types/string.md)。 +其他的MySQL数据类型将全部都转换为[字符串](../../sql_reference/data_types/string.md)。 -同时以上的所有类型都支持[Nullable](../data_types/nullable.md)。 +同时以上的所有类型都支持[可为空](../../sql_reference/data_types/nullable.md)。 ## 使用示例 {#shi-yong-shi-li} diff --git a/docs/zh/engines/index.md b/docs/zh/engines/index.md new file mode 100644 index 00000000000..41d2a7e3d8d --- /dev/null +++ b/docs/zh/engines/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u53D1\u52A8\u673A" +toc_priority: 25 +--- + + diff --git a/docs/zh/operations/table_engines/index.md b/docs/zh/engines/table_engines/index.md similarity index 50% rename from docs/zh/operations/table_engines/index.md rename to docs/zh/engines/table_engines/index.md index 6a3a752561c..9603ebe78c8 100644 --- a/docs/zh/operations/table_engines/index.md +++ b/docs/zh/engines/table_engines/index.md @@ -1,3 +1,4 @@ + # 表引擎 {#biao-yin-qing} 表引擎(即表的类型)决定了: @@ -13,54 +14,54 @@ ## MergeTree {#mergetree} -适用于高负载任务的最通用和功能最强大的表引擎。这些引擎的共同特点是可以快速插入数据并进行后续的后台数据处理。 MergeTree系列引擎支持数据复制(使用[Replicated\*](replication.md) 的引擎版本),分区和一些其他引擎不支持的其他功能。 +适用于高负载任务的最通用和功能最强大的表引擎。这些引擎的共同特点是可以快速插入数据并进行后续的后台数据处理。 MergeTree系列引擎支持数据复制(使用[复制\*](mergetree_family/replication.md) 的引擎版本),分区和一些其他引擎不支持的其他功能。 该类型的引擎: -\* [MergeTree](mergetree.md) -\* [ReplacingMergeTree](replacingmergetree.md) -\* [SummingMergeTree](summingmergetree.md) -\* [AggregatingMergeTree](aggregatingmergetree.md) -\* [CollapsingMergeTree](collapsingmergetree.md) -\* [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -\* [GraphiteMergeTree](graphitemergetree.md) +\* [MergeTree](mergetree_family/mergetree.md) +\* [更换麦树](mergetree_family/replacingmergetree.md) +\* [SummingMergeTree](mergetree_family/summingmergetree.md) +\* [AggregatingMergeTree](mergetree_family/aggregatingmergetree.md) +\* [折叠树](mergetree_family/collapsingmergetree.md) +\* [版本集合在新树](mergetree_family/versionedcollapsingmergetree.md) +\* [GraphiteMergeTree](mergetree_family/graphitemergetree.md) -## Log {#log} +## 日志 {#log} -具有最小功能的[轻量级引擎](log_family.md)。当您需要快速写入许多小表(最多约100万行)并在以后整体读取它们时,该类型的引擎是最有效的。 +具有最小功能的[轻量级引擎](log_family/index.md)。当您需要快速写入许多小表(最多约100万行)并在以后整体读取它们时,该类型的引擎是最有效的。 该类型的引擎: -- \[TinyLog\](tinylog/) -- \[StripeLog\](stripelog/) -- [Log](#log)(log/) +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [日志](log_family/log.md) -## Integration engines {#integration-engines} +## 集成引擎 {#integration-engines} 用于与其他的数据存储与处理系统集成的引擎。 该类型的引擎: -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) -- [HDFS](hdfs.md) +- [卡夫卡](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) +- [HDFS](integrations/hdfs.md) ## 用于其他特定功能的引擎 {#yong-yu-qi-ta-te-ding-gong-neng-de-yin-qing} 该类型的引擎: -- [Distributed](distributed.md) -- [MaterializedView](materializedview.md) -- [Dictionary](dictionary.md) -- [Merge](merge.md) -- [File](file.md) -- [Null](null.md) -- [Set](set.md) -- [Join](join.md) -- [URL](url.md) -- [View](view.md) -- [Memory](memory.md) -- [Buffer](buffer.md) +- [分布](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [字典](special/dictionary.md) +- [合并](special/merge.md) +- [文件](special/file.md) +- [Null](special/null.md) +- [设置](special/set.md) +- [加入我们](special/join.md) +- [URL](special/url.md) +- [查看](special/view.md) +- [记忆](special/memory.md) +- [缓冲区](special/buffer.md) # 虚拟列 {#xu-ni-lie} diff --git a/docs/zh/engines/table_engines/integrations/hdfs.md b/docs/zh/engines/table_engines/integrations/hdfs.md new file mode 100644 index 00000000000..5cd60a855bc --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/hdfs.md @@ -0,0 +1,123 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: HDFS +--- + +# HDFS {#table_engines-hdfs} + +该引擎提供了集成 [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) 生态系统通过允许管理数据 [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)通过ClickHouse. 这个引擎是相似的 +到 [文件](../special/file.md) 和 [URL](../special/url.md) 引擎,但提供Hadoop特定的功能。 + +## 用途 {#usage} + +``` sql +ENGINE = HDFS(URI, format) +``` + +该 `URI` 参数是HDFS中的整个文件URI。 +该 `format` 参数指定一种可用的文件格式。 执行 +`SELECT` 查询时,格式必须支持输入,并执行 +`INSERT` queries – for output. The available formats are listed in the +[格式](../../../interfaces/formats.md#formats) 科。 +路径部分 `URI` 可能包含水珠。 在这种情况下,表将是只读的。 + +**示例:** + +**1.** 设置 `hdfs_engine_table` 表: + +``` sql +CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') +``` + +**2.** 填充文件: + +``` sql +INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** 查询数据: + +``` sql +SELECT * FROM hdfs_engine_table LIMIT 2 +``` + +``` text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## 实施细节 {#implementation-details} + +- 读取和写入可以并行 +- 不支持: + - `ALTER` 和 `SELECT...SAMPLE` 操作。 + - 索引。 + - 复制。 + +**路径中的水珠** + +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式。 文件列表确定在 `SELECT` (不在 `CREATE` 时刻)。 + +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. + +建筑与 `{}` 类似于 [远程](../../../sql_reference/table_functions/remote.md) 表功能。 + +**示例** + +1. 假设我们在HDFS上有几个TSV格式的文件,其中包含以下Uri: + +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ +- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ + +1. 有几种方法可以创建由所有六个文件组成的表: + + + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') +``` + +另一种方式: + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') +``` + +表由两个目录中的所有文件组成(所有文件都应满足query中描述的格式和模式): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') +``` + +!!! warning "警告" + 如果文件列表包含带有前导零的数字范围,请单独使用带有大括号的构造或使用 `?`. + +**示例** + +创建具有名为文件的表 `file000`, `file001`, … , `file999`: + +``` sql +CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +``` + +## 虚拟列 {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**另请参阅** + +- [虚拟列](../index.md#table_engines-virtual_columns) + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/zh/engines/table_engines/integrations/index.md b/docs/zh/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..b488c83d1bd --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u96C6\u6210" +toc_priority: 30 +--- + + diff --git a/docs/zh/operations/table_engines/jdbc.md b/docs/zh/engines/table_engines/integrations/jdbc.md similarity index 56% rename from docs/zh/operations/table_engines/jdbc.md rename to docs/zh/engines/table_engines/integrations/jdbc.md index 576c7182907..00363bb988a 100644 --- a/docs/zh/operations/table_engines/jdbc.md +++ b/docs/zh/engines/table_engines/integrations/jdbc.md @@ -1,16 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 34 +toc_title: JDBC --- # JDBC {#table-engine-jdbc} -Allows ClickHouse to connect to external databases via [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). +允许ClickHouse通过以下方式连接到外部数据库 [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). -To implement the JDBC connection, ClickHouse uses the separate program [clickhouse-jdbc-bridge](https://github.com/alex-krash/clickhouse-jdbc-bridge) that should run as a daemon. +要实现JDBC连接,ClickHouse使用单独的程序 [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/alex-krash/clickhouse-jdbc-bridge) 这应该作为守护进程运行。 -This engine supports the [Nullable](../../data_types/nullable.md) data type. +该引擎支持 [可为空](../../../sql_reference/data_types/nullable.md) 数据类型。 -## Creating a Table {#creating-a-table} +## 创建表 {#creating-a-table} ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name @@ -20,20 +23,20 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = JDBC(dbms_uri, external_database, external_table) ``` -**Engine Parameters** +**发动机参数** - `dbms_uri` — URI of an external DBMS. - Format: `jdbc:://:/?user=&password=`. - Example for MySQL: `jdbc:mysql://localhost:3306/?user=root&password=root`. + 格式: `jdbc:://:/?user=&password=`. + Mysql的示例: `jdbc:mysql://localhost:3306/?user=root&password=root`. - `external_database` — Database in an external DBMS. - `external_table` — Name of the table in `external_database`. -## Usage Example {#usage-example} +## 用法示例 {#usage-example} -Creating a table in MySQL server by connecting directly with it’s console client: +通过直接与它的控制台客户端连接在MySQL服务器中创建一个表: ``` text mysql> CREATE TABLE `test`.`test` ( @@ -48,15 +51,15 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` -Creating a table in ClickHouse server and selecting data from it: +在ClickHouse服务器中创建表并从中选择数据: ``` sql CREATE TABLE jdbc_table @@ -80,8 +83,8 @@ FROM jdbc_table └────────┴──────────────┴───────┴────────────────┘ ``` -## See Also {#see-also} +## 另请参阅 {#see-also} -- [JDBC table function](../../query_language/table_functions/jdbc.md). +- [JDBC表函数](../../../sql_reference/table_functions/jdbc.md). -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) diff --git a/docs/zh/operations/table_engines/kafka.md b/docs/zh/engines/table_engines/integrations/kafka.md similarity index 85% rename from docs/zh/operations/table_engines/kafka.md rename to docs/zh/engines/table_engines/integrations/kafka.md index e992a76519e..53bde650dfc 100644 --- a/docs/zh/operations/table_engines/kafka.md +++ b/docs/zh/engines/table_engines/integrations/kafka.md @@ -1,4 +1,5 @@ -# Kafka {#kafka} + +# 卡夫卡 {#kafka} 此引擎与 [Apache Kafka](http://kafka.apache.org/) 结合使用。 @@ -36,7 +37,7 @@ Kafka 特性: 可选参数: - `kafka_row_delimiter` - 每个消息体(记录)之间的分隔符。 -- `kafka_schema` – 如果解析格式需要一个 schema 时,此参数必填。例如,[Cap’n Proto](https://capnproto.org/) 需要 schema 文件路径以及根对象 `schema.capnp:Message` 的名字。 +- `kafka_schema` – 如果解析格式需要一个 schema 时,此参数必填。例如,[普罗托船长](https://capnproto.org/) 需要 schema 文件路径以及根对象 `schema.capnp:Message` 的名字。 - `kafka_num_consumers` – 单个表的消费者数量。默认值是:`1`,如果一个消费者的吞吐量不足,则指定更多的消费者。消费者的总数不应该超过 topic 中分区的数量,因为每个分区只能分配一个消费者。 示例: @@ -103,7 +104,7 @@ Kafka 特性: SELECT level, sum(total) FROM daily GROUP BY level; ``` -为了提高性能,接受的消息被分组为 [max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size) 大小的块。如果未在 [stream\_flush\_interval\_ms](../settings/settings.md) 毫秒内形成块,则不关心块的完整性,都会将数据刷新到表中。 +为了提高性能,接受的消息被分组为 [max\_insert\_block\_size](../../../operations/settings/settings.md#settings-max_insert_block_size) 大小的块。如果未在 [stream\_flush\_interval\_ms](../../../operations/settings/settings.md) 毫秒内形成块,则不关心块的完整性,都会将数据刷新到表中。 停止接收主题数据或更改转换逻辑,请 detach 物化视图: @@ -130,6 +131,6 @@ Kafka 特性: ``` -有关详细配置选项列表,请参阅 [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)。在 ClickHouse 配置中使用下划线 (`_`) ,并不是使用点 (`.`)。例如,`check.crcs=true` 将是 `true`。 +有关详细配置选项列表,请参阅 [librdkafka配置参考](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)。在 ClickHouse 配置中使用下划线 (`_`) ,并不是使用点 (`.`)。例如,`check.crcs=true` 将是 `true`。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/kafka/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/kafka/) diff --git a/docs/zh/operations/table_engines/mysql.md b/docs/zh/engines/table_engines/integrations/mysql.md similarity index 77% rename from docs/zh/operations/table_engines/mysql.md rename to docs/zh/engines/table_engines/integrations/mysql.md index e97f5f12106..bfd3e6445a5 100644 --- a/docs/zh/operations/table_engines/mysql.md +++ b/docs/zh/engines/table_engines/integrations/mysql.md @@ -1,3 +1,4 @@ + # MySQL {#mysql} MySQL 引擎可以对存储在远程 MySQL 服务器上的数据执行 `SELECT` 查询。 @@ -20,6 +21,6 @@ MySQL 引擎可以对存储在远程 MySQL 服务器上的数据执行 `SELECT` 其余条件以及 `LIMIT` 采样约束语句仅在对MySQL的查询完成后才在ClickHouse中执行。 -`MySQL` 引擎不支持 [Nullable](../../data_types/nullable.md) 数据类型,因此,当从MySQL表中读取数据时,`NULL` 将转换为指定列类型的默认值(通常为0或空字符串)。 +`MySQL` 引擎不支持 [可为空](../../../engines/table_engines/integrations/mysql.md) 数据类型,因此,当从MySQL表中读取数据时,`NULL` 将转换为指定列类型的默认值(通常为0或空字符串)。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/mysql/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/mysql/) diff --git a/docs/zh/engines/table_engines/integrations/odbc.md b/docs/zh/engines/table_engines/integrations/odbc.md new file mode 100644 index 00000000000..1488ab0d856 --- /dev/null +++ b/docs/zh/engines/table_engines/integrations/odbc.md @@ -0,0 +1,132 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 35 +toc_title: ODBC +--- + +# ODBC {#table-engine-odbc} + +允许ClickHouse通过以下方式连接到外部数据库 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +为了安全地实现ODBC连接,ClickHouse使用单独的程序 `clickhouse-odbc-bridge`. 如果直接从ODBC驱动程序加载 `clickhouse-server`,驱动程序问题可能会导致ClickHouse服务器崩溃。 ClickHouse自动启动 `clickhouse-odbc-bridge` 当它是必需的。 ODBC桥程序是从相同的软件包作为安装 `clickhouse-server`. + +该引擎支持 [可为空](../../../sql_reference/data_types/nullable.md) 数据类型。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1], + name2 [type2], + ... +) +ENGINE = ODBC(connection_settings, external_database, external_table) +``` + +请参阅的详细说明 [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) 查询。 + +表结构可以与源表结构不同: + +- 列名应与源表中的列名相同,但您可以按任何顺序使用其中的一些列。 +- 列类型可能与源表中的列类型不同。 ClickHouse尝试 [投](../../../sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) ClickHouse数据类型的值。 + +**发动机参数** + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` 文件 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +## 用法示例 {#usage-example} + +**通过ODBC从本地MySQL安装中检索数据** + +此示例检查Ubuntu Linux18.04和MySQL服务器5.7。 + +确保安装了unixODBC和MySQL连接器。 + +默认情况下(如果从软件包安装),ClickHouse以用户身份启动 `clickhouse`. 因此,您需要在MySQL服务器中创建和配置此用户。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +然后配置连接 `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +您可以使用 `isql` unixodbc安装中的实用程序。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL中的表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +ClickHouse中的表,从MySQL表中检索数据: + +``` sql +CREATE TABLE odbc_t +( + `int_id` Int32, + `float_nullable` Nullable(Float32) +) +ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') +``` + +``` sql +SELECT * FROM odbc_t +``` + +``` text +┌─int_id─┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└────────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [ODBC外部字典](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC表函数](../../../sql_reference/table_functions/odbc.md) + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/zh/engines/table_engines/log_family/index.md b/docs/zh/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..78557921c09 --- /dev/null +++ b/docs/zh/engines/table_engines/log_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u65E5\u5FD7\u7CFB\u5217" +toc_priority: 29 +--- + + diff --git a/docs/zh/operations/table_engines/log.md b/docs/zh/engines/table_engines/log_family/log.md similarity index 84% rename from docs/zh/operations/table_engines/log.md rename to docs/zh/engines/table_engines/log_family/log.md index 852575181cd..90f892615c9 100644 --- a/docs/zh/operations/table_engines/log.md +++ b/docs/zh/engines/table_engines/log_family/log.md @@ -1,5 +1,6 @@ -# Log {#log} + +# 日志 {#log} 日志与 TinyLog 的不同之处在于,«标记» 的小文件与列文件存在一起。这些标记写在每个数据块上,并且包含偏移量,这些偏移量指示从哪里开始读取文件以便跳过指定的行数。这使得可以在多个线程中读取表数据。对于并发数据访问,可以同时执行读取操作,而写入操作则阻塞读取和其它写入。Log 引擎不支持索引。同样,如果写入表失败,则该表将被破坏,并且从该表读取将返回错误。Log 引擎适用于临时数据,write-once 表以及测试或演示目的。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/log/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/log/) diff --git a/docs/zh/operations/table_engines/log_family.md b/docs/zh/engines/table_engines/log_family/log_family.md similarity index 93% rename from docs/zh/operations/table_engines/log_family.md rename to docs/zh/engines/table_engines/log_family/log_family.md index 9ce3ab95b55..60cecab5faf 100644 --- a/docs/zh/operations/table_engines/log_family.md +++ b/docs/zh/engines/table_engines/log_family/log_family.md @@ -1,3 +1,4 @@ + # 日志引擎系列 {#table_engines-log-engine-family} 这些引擎是为了需要写入许多小数据量(少于一百万行)的表的场景而开发的。 @@ -5,7 +6,7 @@ 这系列的引擎有: - [StripeLog](stripelog.md) -- [Log](log.md) +- [日志](log.md) - [TinyLog](tinylog.md) ## 共同属性 {#table_engines-log-engine-family-common-properties} @@ -16,7 +17,7 @@ - 写入时将数据追加在文件末尾。 -- 不支持[突变](../../query_language/alter.md#alter-mutations)操作。 +- 不支持[突变](../../../engines/table_engines/log_family/log_family.md#alter-mutations)操作。 - 不支持索引。 diff --git a/docs/zh/operations/table_engines/stripelog.md b/docs/zh/engines/table_engines/log_family/stripelog.md similarity index 97% rename from docs/zh/operations/table_engines/stripelog.md rename to docs/zh/engines/table_engines/log_family/stripelog.md index 1a5edfd23bf..ab4deb67ebb 100644 --- a/docs/zh/operations/table_engines/stripelog.md +++ b/docs/zh/engines/table_engines/log_family/stripelog.md @@ -1,3 +1,4 @@ + # StripeLog {#table_engines-stripelog} 该引擎属于日志引擎系列。请在[日志引擎系列](log_family.md)文章中查看引擎的共同属性和差异。 @@ -13,7 +14,7 @@ ... ) ENGINE = StripeLog -查看[建表](../../query_language/create.md#create-table-query)请求的详细说明。 +查看[建表](../../../engines/table_engines/log_family/stripelog.md#create-table-query)请求的详细说明。 ## 写数据 {#table_engines-stripelog-writing-the-data} diff --git a/docs/zh/operations/table_engines/tinylog.md b/docs/zh/engines/table_engines/log_family/tinylog.md similarity index 91% rename from docs/zh/operations/table_engines/tinylog.md rename to docs/zh/engines/table_engines/log_family/tinylog.md index 7c9d524d5e6..9a1b27fd418 100644 --- a/docs/zh/operations/table_engines/tinylog.md +++ b/docs/zh/engines/table_engines/log_family/tinylog.md @@ -1,3 +1,4 @@ + # TinyLog {#tinylog} 最简单的表引擎,用于将数据存储在磁盘上。每列都存储在单独的压缩文件中。写入时,数据将附加到文件末尾。 @@ -10,4 +11,4 @@ 在 Yandex.Metrica 中,TinyLog 表用于小批量处理的中间数据。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/tinylog/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/tinylog/) diff --git a/docs/zh/operations/table_engines/aggregatingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md similarity index 89% rename from docs/zh/operations/table_engines/aggregatingmergetree.md rename to docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md index 2b18b2fbe48..2d898a5d168 100644 --- a/docs/zh/operations/table_engines/aggregatingmergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -1,10 +1,11 @@ + # AggregatingMergeTree {#aggregatingmergetree} 该引擎继承自 [MergeTree](mergetree.md),并改变了数据片段的合并逻辑。 ClickHouse 会将相同主键的所有行(在一个数据片段内)替换为单个存储一系列聚合函数状态的行。 可以使用 `AggregatingMergeTree` 表来做增量数据统计聚合,包括物化视图的数据聚合。 -引擎需使用 [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md) 类型来处理所有列。 +引擎需使用 [AggregateFunction](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 类型来处理所有列。 如果要按一组规则来合并减少行数,则使用 `AggregatingMergeTree` 是合适的。 @@ -23,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -语句参数的说明,请参阅 [语句描述](../../query_language/create.md)。 +语句参数的说明,请参阅 [语句描述](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md)。 **子句** @@ -50,7 +51,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ## SELECT 和 INSERT {#select-he-insert} -插入数据,需使用带有聚合 -State- 函数的 [INSERT SELECT](../../query_language/insert_into.md) 语句。 +插入数据,需使用带有聚合 -State- 函数的 [INSERT SELECT](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 语句。 从 `AggregatingMergeTree` 表中查询数据时,需使用 `GROUP BY` 子句并且要使用与插入时相同的聚合函数,但后缀要改为 `-Merge` 。 在 `SELECT` 查询的结果中,对于 ClickHouse 的所有输出格式 `AggregateFunction` 类型的值都实现了特定的二进制表示法。如果直接用 `SELECT` 导出这些数据,例如如用 `TabSeparated` 格式,那么这些导出数据也能直接用 `INSERT` 语句加载导入。 diff --git a/docs/zh/operations/table_engines/collapsingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md similarity index 98% rename from docs/zh/operations/table_engines/collapsingmergetree.md rename to docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md index dd48fdd58ab..85b5ce076e8 100644 --- a/docs/zh/operations/table_engines/collapsingmergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -1,4 +1,5 @@ -# CollapsingMergeTree {#table_engine-collapsingmergetree} + +# 折叠树 {#table_engine-collapsingmergetree} 该引擎继承于 [MergeTree](mergetree.md),并在数据块合并算法中添加了折叠行的逻辑。 @@ -21,7 +22,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -请求参数的描述,参考[请求参数](../../query_language/create.md)。 +请求参数的描述,参考[请求参数](../../../engines/table_engines/mergetree_family/collapsingmergetree.md)。 **CollapsingMergeTree 参数** diff --git a/docs/zh/operations/table_engines/custom_partitioning_key.md b/docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md similarity index 87% rename from docs/zh/operations/table_engines/custom_partitioning_key.md rename to docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md index 1a8cea2d0e2..3844506c782 100644 --- a/docs/zh/operations/table_engines/custom_partitioning_key.md +++ b/docs/zh/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -1,6 +1,7 @@ + # 自定义分区键 {#zi-ding-yi-fen-qu-jian} -[MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](materializedview.md) 也支持分区。 +[MergeTree](mergetree.md) 系列的表(包括 [可复制表](replication.md) )可以使用分区。基于 MergeTree 表的 [物化视图](../special/materializedview.md) 也支持分区。 一个分区是指按指定规则逻辑组合一起的表的记录集。可以按任意标准进行分区,如按月,按日或按事件类型。为了减少需要操作的数据,每个分区都是分开存储的。访问数据时,ClickHouse 尽量使用这些分区的最小子集。 @@ -33,7 +34,7 @@ ORDER BY (CounterID, StartDate, intHash32(UserID)); !!! attention "注意" 那些有相同分区表达式值的数据片段才会合并。这意味着 **你不应该用太精细的分区方案**(超过一千个分区)。否则,会因为文件系统中的文件数量和需要找开的文件描述符过多,导致 `SELECT` 查询效率不佳。 -可以通过 [system.parts](../system_tables.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: +可以通过 [系统。零件](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#system_tables-parts) 表查看表片段和分区信息。例如,假设我们有一个 `visits` 表,按月分区。对 `system.parts` 表执行 `SELECT`: ``` sql SELECT @@ -70,7 +71,7 @@ WHERE table = 'visits' `active` 列为片段状态。`1` 激活状态;`0` 非激活状态。非激活片段是那些在合并到较大片段之后剩余的源数据片段。损坏的数据片段也表示为非活动状态。 -正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 大约在插入后15分钟定期报告合并操作,合并插入的数据片段。此外,你也可以使用 [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) 语句直接执行合并。例: +正如在示例中所看到的,同一分区中有几个独立的片段(例如,`201901_1_3_1`和`201901_1_9_2`)。这意味着这些片段尚未合并。ClickHouse 大约在插入后15分钟定期报告合并操作,合并插入的数据片段。此外,你也可以使用 [OPTIMIZE](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#misc_operations-optimize) 语句直接执行合并。例: ``` sql OPTIMIZE TABLE visits PARTITION 201902; @@ -107,10 +108,10 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached 文件夹 ‘201901\_1\_1\_0’,‘201901\_1\_7\_1’ 等是片段的目录。每个片段都与一个对应的分区相关,并且只包含这个月的数据(本例中的表按月分区)。 -`detached` 目录存放着使用 [DETACH](#alter_detach-partition) 语句从表中分离的片段。损坏的片段也会移到该目录,而不是删除。服务器不使用`detached`目录中的片段。可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../query_language/alter.md#alter_attach-partition) 语句前,服务器不会感知到。 +`detached` 目录存放着使用 [DETACH](#alter_detach-partition) 语句从表中分离的片段。损坏的片段也会移到该目录,而不是删除。服务器不使用`detached`目录中的片段。可以随时添加,删除或修改此目录中的数据 – 在运行 [ATTACH](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_attach-partition) 语句前,服务器不会感知到。 注意,在操作服务器时,你不能手动更改文件系统上的片段集或其数据,因为服务器不会感知到这些修改。对于非复制表,可以在服务器停止时执行这些操作,但不建议这样做。对于复制表,在任何情况下都不要更改片段文件。 -ClickHouse 支持对分区执行这些操作:删除分区,从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../query_language/alter.md#alter_manipulations-with-partitions) 一节。 +ClickHouse 支持对分区执行这些操作:删除分区,从一个表复制到另一个表,或创建备份。了解分区的所有操作,请参阅 [分区和片段的操作](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_manipulations-with-partitions) 一节。 [来源文章](https://clickhouse.tech/docs/en/operations/table_engines/custom_partitioning_key/) diff --git a/docs/zh/operations/table_engines/graphitemergetree.md b/docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md similarity index 50% rename from docs/zh/operations/table_engines/graphitemergetree.md rename to docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md index 6916441acd0..b578414a203 100644 --- a/docs/zh/operations/table_engines/graphitemergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/graphitemergetree.md @@ -1,16 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: GraphiteMergeTree --- # GraphiteMergeTree {#graphitemergetree} -This engine is designed for thinning and aggregating/averaging (rollup) [Graphite](http://graphite.readthedocs.io/en/latest/index.html) data. It may be helpful to developers who want to use ClickHouse as a data store for Graphite. +此引擎专为细化和聚合/平均(rollup) [石墨](http://graphite.readthedocs.io/en/latest/index.html) 戴达 对于想要使用ClickHouse作为Graphite的数据存储的开发人员来说,这可能会有所帮助。 -You can use any ClickHouse table engine to store the Graphite data if you don’t need rollup, but if you need a rollup use `GraphiteMergeTree`. The engine reduces the volume of storage and increases the efficiency of queries from Graphite. +您可以使用任何ClickHouse表引擎来存储石墨数据,如果你不需要汇总,但如果你需要一个汇总使用 `GraphiteMergeTree`. 该引擎减少了存储量,并提高了Graphite查询的效率。 -The engine inherits properties from [MergeTree](mergetree.md). +引擎继承从属性 [MergeTree](mergetree.md). -## Creating a Table {#creating-table} +## 创建表 {#creating-table} ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -27,36 +30,36 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. +请参阅的详细说明 [CREATE TABLE](../../../sql_reference/statements/create.md#create-table-query) 查询。 -A table for the Graphite data should have the following columns for the following data: +Graphite数据的表应具有以下数据的列: -- Metric name (Graphite sensor). Data type: `String`. +- 公制名称(石墨传感器)。 数据类型: `String`. -- Time of measuring the metric. Data type: `DateTime`. +- 测量度量的时间。 数据类型: `DateTime`. -- Value of the metric. Data type: any numeric. +- 度量值。 数据类型:任何数字。 -- Version of the metric. Data type: any numeric. +- 指标的版本。 数据类型:任何数字。 - ClickHouse saves the rows with the highest version or the last written if versions are the same. Other rows are deleted during the merge of data parts. + 如果版本相同,ClickHouse会保存版本最高或最后写入的行。 其他行在数据部分合并期间被删除。 -The names of these columns should be set in the rollup configuration. +应在汇总配置中设置这些列的名称。 -**GraphiteMergeTree parameters** +**GraphiteMergeTree参数** - `config_section` — Name of the section in the configuration file, where are the rules of rollup set. -**Query clauses** +**查询子句** -When creating a `GraphiteMergeTree` table, the same [clauses](mergetree.md#table_engine-mergetree-creating-a-table) are required, as when creating a `MergeTree` table. +当创建一个 `GraphiteMergeTree` 表,相同 [条款](mergetree.md#table_engine-mergetree-creating-a-table) 是必需的,因为当创建 `MergeTree` 桌子
    -Deprecated Method for Creating a Table +不推荐使用的创建表的方法 -!!! attention "Attention" - Do not use this method in new projects and, if possible, switch the old projects to the method described above. +!!! attention "注意" + 不要在新项目中使用此方法,如果可能的话,请将旧项目切换到上述方法。 ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -70,31 +73,31 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section) ``` -All of the parameters excepting `config_section` have the same meaning as in `MergeTree`. +所有参数除外 `config_section` 具有相同的含义 `MergeTree`. - `config_section` — Name of the section in the configuration file, where are the rules of rollup set.
    -## Rollup configuration {#rollup-configuration} +## 汇总配置 {#rollup-configuration} -The settings for rollup are defined by the [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) parameter in the server configuration. The name of the parameter could be any. You can create several configurations and use them for different tables. +汇总的设置由 [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) 服务器配置中的参数。 参数的名称可以是any。 您可以创建多个配置并将它们用于不同的表。 -Rollup configuration structure: +汇总配置结构: required-columns patterns -### Required Columns {#required-columns} +### 必填列 {#required-columns} - `path_column_name` — The name of the column storing the metric name (Graphite sensor). Default value: `Path`. - `time_column_name` — The name of the column storing the time of measuring the metric. Default value: `Time`. -- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. Default value: `Value`. +- `value_column_name` — The name of the column storing the value of the metric at the time set in `time_column_name`. 默认值: `Value`. - `version_column_name` — The name of the column storing the version of the metric. Default value: `Timestamp`. -### Patterns {#patterns} +### 模式 {#patterns} -Structure of the `patterns` section: +的结构 `patterns` 科: ``` text pattern @@ -117,23 +120,23 @@ default ... ``` -!!! warning "Attention" - Patterns must be strictly ordered: +!!! warning "注意" + 模式必须严格排序: 1. Patterns without `function` or `retention`. 1. Patterns with both `function` and `retention`. 1. Pattern `default`. -When processing a row, ClickHouse checks the rules in the `pattern` sections. Each of `pattern` (including `default`) sections can contain `function` parameter for aggregation, `retention` parameters or both. If the metric name matches the `regexp`, the rules from the `pattern` section (or sections) are applied; otherwise, the rules from the `default` section are used. +在处理行时,ClickHouse会检查以下内容中的规则 `pattern` 部分。 每个 `pattern` (包括 `default`)部分可以包含 `function` 聚合参数, `retention` 参数或两者兼而有之。 如果指标名称匹配 `regexp`,从规则 `pattern` 部分(sections节)的应用;否则,从规则 `default` 部分被使用。 -Fields for `pattern` and `default` sections: +字段为 `pattern` 和 `default` 科: - `regexp`– A pattern for the metric name. - `age` – The minimum age of the data in seconds. - `precision`– How precisely to define the age of the data in seconds. Should be a divisor for 86400 (seconds in a day). - `function` – The name of the aggregating function to apply to data whose age falls within the range `[age, age + precision]`. -### Configuration Example {#configuration-example} +### 配置示例 {#configuration-example} ``` xml @@ -168,4 +171,4 @@ Fields for `pattern` and `default` sections: ``` -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/graphitemergetree/) diff --git a/docs/zh/engines/table_engines/mergetree_family/index.md b/docs/zh/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..1cbf6104dc3 --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u6885\u6811\u5BB6\u65CF" +toc_priority: 28 +--- + + diff --git a/docs/zh/operations/table_engines/mergetree.md b/docs/zh/engines/table_engines/mergetree_family/mergetree.md similarity index 88% rename from docs/zh/operations/table_engines/mergetree.md rename to docs/zh/engines/table_engines/mergetree_family/mergetree.md index 61d36fea9fa..0778ab2487d 100644 --- a/docs/zh/operations/table_engines/mergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/mergetree.md @@ -1,3 +1,4 @@ + # MergeTree {#table_engines-mergetree} Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 @@ -23,7 +24,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 需要的话,你可以给表设置一个采样方法。 !!! 注意 "注意" - [Merge](merge.md) 引擎并不属于 `*MergeTree` 系列。 + [合并](../special/merge.md) 引擎并不属于 `*MergeTree` 系列。 ## 建表 {#table_engine-mergetree-creating-a-table} @@ -41,7 +42,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 [SAMPLE BY expr] [SETTINGS name=value, ...] -请求参数的描述,参考 [请求描述](../../query_language/create.md) 。 +请求参数的描述,参考 [请求描述](../../../engines/table_engines/mergetree_family/mergetree.md) 。 @@ -51,7 +52,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 - `PARTITION BY` — [分区键](custom_partitioning_key.md) 。 - 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../data_types/date.md) 类型的列。这里该分区名格式会是 `"YYYYMM"` 这样。 + 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table_engines/mergetree_family/mergetree.md) 类型的列。这里该分区名格式会是 `"YYYYMM"` 这样。 - `ORDER BY` — 表的排序键。 @@ -72,7 +73,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。默认值,8192 。该列表中所有可用的参数可以从这里查看 [MergeTreeSettings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Storages/MergeTree/MergeTreeSettings.h) 。 - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果仅按数据行数限制索引粒度, 请设置为0(不建议)。 - `enable_mixed_granularity_parts` — 启用或禁用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从大表(数十或数百兆)中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果你的表内数据量很大,可以开启这项配置用以提升`SELECT` 查询的性能。 - - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 + - `use_minimalistic_part_header_in_zookeeper` — 数据片段头在 ZooKeeper 中的存储方式。如果设置了 `use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考『服务配置参数』这章中的 [设置描述](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间。默认值: 86400 (1 天)。 @@ -85,7 +86,7 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 示例中,我们设为按月分区。 -同时我们设置了一个按用户ID哈希的抽样表达式。这让你可以有该表中每个 `CounterID` 和 `EventDate` 下面的数据的伪随机分布。如果你在查询时指定了 [SAMPLE](../../query_language/select.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 +同时我们设置了一个按用户ID哈希的抽样表达式。这让你可以有该表中每个 `CounterID` 和 `EventDate` 下面的数据的伪随机分布。如果你在查询时指定了 [SAMPLE](../../../engines/table_engines/mergetree_family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 `index_granularity` 可省略,默认值为 8192 。 @@ -105,9 +106,9 @@ Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及 **MergeTree() 参数** -- `date-column` — 类型为 [Date](../../data_types/date.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 +- `date-column` — 类型为 [日期](../../../engines/table_engines/mergetree_family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 - `sampling_expression` — 采样表达式。 -- `(primary, key)` — 主键。类型 — [Tuple()](../../data_types/tuple.md) +- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table_engines/mergetree_family/mergetree.md) - `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 **示例** @@ -168,7 +169,7 @@ ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主 ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 -- [CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里,数据合并时,会有额外的处理逻辑。 +- [折叠树](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里,数据合并时,会有额外的处理逻辑。 在这种情况下,指定一个跟主键不同的 *排序键* 也是有意义的。 @@ -191,7 +192,7 @@ ClickHouse 不要求主键惟一。所以,你可以插入多条具有相同主 这种情况下,主键中仅预留少量列保证高效范围扫描, 剩下的维度列放到排序键元组里。这样是合理的。 -[排序键的修改](../../query_language/alter.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 +[排序键的修改](../../../engines/table_engines/mergetree_family/mergetree.md) 是轻量级的操作,因为一个新列同时被加入到表里和排序键后时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且刚刚添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 ### 索引和分区在查询中的应用 {#suo-yin-he-fen-qu-zai-cha-xun-zhong-de-ying-yong} @@ -221,7 +222,7 @@ ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的 SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' ``` -要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force\_index\_by\_date](../settings/settings.md#settings-force_index_by_date) 和 [force\_primary\_key](../settings/settings.md) 。 +要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) 和 [force\_primary\_key](../../../operations/settings/settings.md) 。 按月分区的分区键是只能读取包含适当范围日期的数据块。这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有带主键前缀条件的查询将会导致读取超过这个日期范围。 @@ -299,14 +300,14 @@ INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY TTL可以设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。如果`TTL`同时作用于表和字段,ClickHouse会使用先到期的那个。 -被设置TTL的表,必须拥有[Date](../../data_types/date.md) 或 [DateTime](../../data_types/datetime.md) 类型的字段。要定义数据的生命周期,需要在这个日期字段上使用操作符,例如: +被设置TTL的表,必须拥有[日期](../../../engines/table_engines/mergetree_family/mergetree.md) 或 [日期时间](../../../engines/table_engines/mergetree_family/mergetree.md) 类型的字段。要定义数据的生命周期,需要在这个日期字段上使用操作符,例如: ``` sql TTL time_column TTL time_column + interval ``` -要定义`interval`, 需要使用 [time interval](../../query_language/operators.md#operators-datetime) 操作符。 +要定义`interval`, 需要使用 [时间间隔](../../../engines/table_engines/mergetree_family/mergetree.md#operators-datetime) 操作符。 ``` sql TTL date_time + INTERVAL 1 MONTH @@ -385,10 +386,10 @@ ALTER TABLE example_table 当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 你可以设置 [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout)。如果该值被设置的太低, 它将导致执行许多的计划外合并,这可能会消耗大量资源。 -如果在合并的时候执行`SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在`SELECT`之前使用 [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) 查询。 +如果在合并的时候执行`SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在`SELECT`之前使用 [OPTIMIZE](../../../engines/table_engines/mergetree_family/mergetree.md#misc_operations-optimize) 查询。 -## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes} +## 使用多个块设备进行数据存储 {#table_engine-mergetree-multiple-volumes} -### Configuration {#table_engine-mergetree-multiple-volumes-configure} +### 配置 {#table_engine-mergetree-multiple-volumes-configure} [来源文章](https://clickhouse.tech/docs/en/operations/table_engines/mergetree/) diff --git a/docs/zh/operations/table_engines/replacingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md similarity index 92% rename from docs/zh/operations/table_engines/replacingmergetree.md rename to docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md index 66c3246f272..720560bf1a4 100644 --- a/docs/zh/operations/table_engines/replacingmergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/replacingmergetree.md @@ -1,4 +1,5 @@ -# ReplacingMergeTree {#replacingmergetree} + +# 更换麦树 {#replacingmergetree} 该引擎和[MergeTree](mergetree.md)的不同之处在于它会删除具有相同主键的重复项。 @@ -21,9 +22,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -请求参数的描述,参考[请求参数](../../query_language/create.md)。 +请求参数的描述,参考[请求参数](../../../engines/table_engines/mergetree_family/replacingmergetree.md)。 -**ReplacingMergeTree Parameters** +**替换树参数** - `ver` — 版本列。类型为 `UInt*`, `Date` 或 `DateTime`。可选参数。 diff --git a/docs/zh/operations/table_engines/replication.md b/docs/zh/engines/table_engines/mergetree_family/replication.md similarity index 94% rename from docs/zh/operations/table_engines/replication.md rename to docs/zh/engines/table_engines/mergetree_family/replication.md index c2f4d3eb849..e518eb805c4 100644 --- a/docs/zh/operations/table_engines/replication.md +++ b/docs/zh/engines/table_engines/mergetree_family/replication.md @@ -1,3 +1,4 @@ + # 数据副本 {#table_engines-replication} 只有 MergeTree 系列里的表可支持副本: @@ -7,14 +8,14 @@ - ReplicatedReplacingMergeTree - ReplicatedAggregatingMergeTree - ReplicatedCollapsingMergeTree -- ReplicatedVersionedCollapsingMergeTree +- ReplicatedVersionedCollapsingMergetree - ReplicatedGraphiteMergeTree 副本是表级别的,不是整个服务器级的。所以,服务器里可以同时有复制表和非复制表。 副本不依赖分片。每个分片有它自己的独立副本。 -对于 `INSERT` 和 `ALTER` 语句操作数据的会在压缩的情况下被复制(更多信息,看 [ALTER](../../query_language/alter.md#query_language_queries_alter) )。 +对于 `INSERT` 和 `ALTER` 语句操作数据的会在压缩的情况下被复制(更多信息,看 [ALTER](../../../engines/table_engines/mergetree_family/replication.md#query_language_queries_alter) )。 而 `CREATE`,`DROP`,`ATTACH`,`DETACH` 和 `RENAME` 语句只会在单个服务器上执行,不会被复制。 @@ -47,7 +48,7 @@ 如果配置文件中没有设置 ZooKeeper ,则无法创建复制表,并且任何现有的复制表都将变为只读。 -`SELECT` 查询并不需要借助 ZooKeeper ,复本并不影响 `SELECT` 的性能,查询复制表与非复制表速度是一样的。查询分布式表时,ClickHouse的处理方式可通过设置 [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) 和 [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md) 修改。 +`SELECT` 查询并不需要借助 ZooKeeper ,复本并不影响 `SELECT` 的性能,查询复制表与非复制表速度是一样的。查询分布式表时,ClickHouse的处理方式可通过设置 [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) 和 [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md) 修改。 对于每个 `INSERT` 语句,会通过几个事务将十来个记录添加到 ZooKeeper。(确切地说,这是针对每个插入的数据块; 每个 INSERT 语句的每 `max_insert_block_size = 1048576` 行和最后剩余的都各算作一个块。)相比非复制表,写 zk 会导致 `INSERT` 的延迟略长一些。但只要你按照建议每秒不超过一个 `INSERT` 地批量插入数据,不会有任何问题。一个 ZooKeeper 集群能给整个 ClickHouse 集群支撑协调每秒几百个 `INSERT`。数据插入的吞吐量(每秒的行数)可以跟不用复制的数据一样高。 @@ -59,7 +60,7 @@ 单个数据块写入是原子的。 INSERT 的数据按每块最多 `max_insert_block_size = 1048576` 行进行分块,换句话说,如果 `INSERT` 插入的行少于 1048576,则该 INSERT 是原子的。 -数据块会去重。对于被多次写的相同数据块(大小相同且具有相同顺序的相同行的数据块),该块仅会写入一次。这样设计的原因是万一在网络故障时客户端应用程序不知道数据是否成功写入DB,此时可以简单地重复 `INSERT` 。把相同的数据发送给多个副本 INSERT 并不会有问题。因为这些 `INSERT` 是完全相同的(会被去重)。去重参数参看服务器设置 [merge\_tree](../server_settings/settings.md) 。(注意:Replicated\*MergeTree 才会去重,不需要 zookeeper 的不带 MergeTree 不会去重) +数据块会去重。对于被多次写的相同数据块(大小相同且具有相同顺序的相同行的数据块),该块仅会写入一次。这样设计的原因是万一在网络故障时客户端应用程序不知道数据是否成功写入DB,此时可以简单地重复 `INSERT` 。把相同的数据发送给多个副本 INSERT 并不会有问题。因为这些 `INSERT` 是完全相同的(会被去重)。去重参数参看服务器设置 [merge\_tree](../../../operations/server_configuration_parameters/settings.md) 。(注意:Replicated\*MergeTree 才会去重,不需要 zookeeper 的不带 MergeTree 不会去重) 在复制期间,只有要插入的源数据通过网络传输。进一步的数据转换(合并)会在所有副本上以相同的方式进行处理执行。这样可以最大限度地减少网络使用,这意味着即使副本在不同的数据中心,数据同步也能工作良好。(能在不同数据中心中的同步数据是副本机制的主要目标。) @@ -119,7 +120,7 @@ CREATE TABLE table_name `{layer}-{shard}` 是分片标识部分。在此示例中,由于 Yandex.Metrica 集群使用了两级分片,所以它是由两部分组成的。但对于大多数情况来说,你只需保留 {shard} 占位符即可,它会替换展开为分片标识。 `table_name` 是该表在 ZooKeeper 中的名称。使其与 ClickHouse 中的表名相同比较好。 这里它被明确定义,跟 ClickHouse 表名不一样,它并不会被 RENAME 语句修改。 -*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name` +*HINT*:你可以在前面添加一个数据库名称 `table_name` 也是 例如。 `db_name.table_name` 副本名称用于标识同一个表分片的不同副本。你可以使用服务器名称,如上例所示。同个分片中不同副本的副本名称要唯一。 diff --git a/docs/zh/operations/table_engines/summingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/summingmergetree.md similarity index 86% rename from docs/zh/operations/table_engines/summingmergetree.md rename to docs/zh/engines/table_engines/mergetree_family/summingmergetree.md index 326ccb7118e..73576b00346 100644 --- a/docs/zh/operations/table_engines/summingmergetree.md +++ b/docs/zh/engines/table_engines/mergetree_family/summingmergetree.md @@ -1,3 +1,4 @@ + # SummingMergeTree {#summingmergetree} 该引擎继承自 [MergeTree](mergetree.md)。区别在于,当合并 `SummingMergeTree` 表的数据片段时,ClickHouse 会把所有具有相同主键的行合并为一行,该行包含了被合并的行中具有数值数据类型的列的汇总值。如果主键的组合方式使得单个键值对应于大量的行,则可以显著的减少存储空间并加快数据查询的速度。 @@ -17,7 +18,7 @@ [SAMPLE BY expr] [SETTINGS name=value, ...] -请求参数的描述,参考 [请求描述](../../query_language/create.md)。 +请求参数的描述,参考 [请求描述](../../../engines/table_engines/mergetree_family/summingmergetree.md)。 **SummingMergeTree 的参数** @@ -83,7 +84,7 @@ SELECT key, sum(value) FROM summtt GROUP BY key 当数据被插入到表中时,他们将被原样保存。ClickHouse 定期合并插入的数据片段,并在这个时候对所有具有相同主键的行中的列进行汇总,将这些行替换为包含汇总数据的一行记录。 -ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含具有相同主键的行,即单个汇总片段将会是不完整的。因此,聚合函数 [sum()](../../query_language/agg_functions/reference.md#agg_function-sum) 和 `GROUP BY` 子句应该在(`SELECT`)查询语句中被使用,如上文中的例子所述。 +ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含具有相同主键的行,即单个汇总片段将会是不完整的。因此,聚合函数 [sum()](../../../engines/table_engines/mergetree_family/summingmergetree.md#agg_function-sum) 和 `GROUP BY` 子句应该在(`SELECT`)查询语句中被使用,如上文中的例子所述。 ### 汇总的通用规则 {#hui-zong-de-tong-yong-gui-ze} @@ -97,7 +98,7 @@ ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含 ### AggregateFunction 列中的汇总 {#aggregatefunction-lie-zhong-de-hui-zong} -对于 [AggregateFunction 类型](../../data_types/nested_data_structures/aggregatefunction.md)的列,ClickHouse 根据对应函数表现为 [AggregatingMergeTree](aggregatingmergetree.md) 引擎的聚合。 +对于 [AggregateFunction 类型](../../../engines/table_engines/mergetree_family/summingmergetree.md)的列,ClickHouse 根据对应函数表现为 [AggregatingMergeTree](aggregatingmergetree.md) 引擎的聚合。 ### 嵌套结构 {#qian-tao-jie-gou} @@ -117,7 +118,7 @@ ClickHouse 会按片段合并数据,以至于不同的数据片段中会包含 [(1, 100)] + [(1, 150), (2, 150)] -> [(1, 250), (2, 150)] [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] -请求数据时,使用 [sumMap(key, value)](../../query_language/agg_functions/reference.md) 函数来对 `Map` 进行聚合。 +请求数据时,使用 [sumMap(key,value)](../../../engines/table_engines/mergetree_family/summingmergetree.md) 函数来对 `Map` 进行聚合。 对于嵌套数据结构,你无需在列的元组中指定列以进行汇总。 diff --git a/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md b/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md new file mode 100644 index 00000000000..37f11bc21ad --- /dev/null +++ b/docs/zh/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -0,0 +1,238 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u7248\u672C\u96C6\u5408\u5728\u65B0\u6811" +--- + +# 版本集合在新树 {#versionedcollapsingmergetree} + +这个引擎: + +- 允许快速写入不断变化的对象状态。 +- 删除后台中的旧对象状态。 这显着降低了存储体积。 + +请参阅部分 [崩溃](#table_engines_versionedcollapsingmergetree) 有关详细信息。 + +引擎继承自 [MergeTree](mergetree.md#table_engines-mergetree) 并将折叠行的逻辑添加到合并数据部分的算法中。 `VersionedCollapsingMergeTree` 用于相同的目的 [折叠树](collapsingmergetree.md) 但使用不同的折叠算法,允许以多个线程的任何顺序插入数据。 特别是, `Version` 列有助于正确折叠行,即使它们以错误的顺序插入。 相比之下, `CollapsingMergeTree` 只允许严格连续插入。 + +## 创建表 {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE = VersionedCollapsingMergeTree(sign, version) +[PARTITION BY expr] +[ORDER BY expr] +[SAMPLE BY expr] +[SETTINGS name=value, ...] +``` + +有关查询参数的说明,请参阅 [查询说明](../../../sql_reference/statements/create.md). + +**发动机参数** + +``` sql +VersionedCollapsingMergeTree(sign, version) +``` + +- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 + + 列数据类型应为 `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列数据类型应为 `UInt*`. + +**查询子句** + +当创建一个 `VersionedCollapsingMergeTree` 表,相同 [条款](mergetree.md) 需要创建一个时 `MergeTree` 桌子 + +
    + +不推荐使用的创建表的方法 + +!!! attention "注意" + 不要在新项目中使用此方法。 如果可能,请将旧项目切换到上述方法。 + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) +``` + +所有的参数,除了 `sign` 和 `version` 具有相同的含义 `MergeTree`. + +- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 + + Column Data Type — `Int8`. + +- `version` — Name of the column with the version of the object state. + + 列数据类型应为 `UInt*`. + +
    + +## 崩溃 {#table_engines-versionedcollapsingmergetree} + +### 数据 {#data} + +考虑一种情况,您需要为某个对象保存不断变化的数据。 对于一个对象有一行,并在发生更改时更新该行是合理的。 但是,对于数据库管理系统来说,更新操作非常昂贵且速度很慢,因为它需要重写存储中的数据。 如果需要快速写入数据,则不能接受更新,但可以按如下顺序将更改写入对象。 + +使用 `Sign` 列写入行时。 如果 `Sign = 1` 这意味着该行是一个对象的状态(让我们把它称为 “state” 行)。 如果 `Sign = -1` 它指示具有相同属性的对象的状态的取消(让我们称之为 “cancel” 行)。 还可以使用 `Version` 列,它应该用单独的数字标识对象的每个状态。 + +例如,我们要计算用户在某个网站上访问了多少页面以及他们在那里的时间。 在某个时间点,我们用用户活动的状态写下面的行: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +在稍后的某个时候,我们注册用户活动的变化,并用以下两行写入它。 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +第一行取消对象(用户)的先前状态。 它应该复制已取消状态的所有字段,除了 `Sign`. + +第二行包含当前状态。 + +因为我们只需要用户活动的最后一个状态,行 + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +可以删除,折叠对象的无效(旧)状态。 `VersionedCollapsingMergeTree` 在合并数据部分时执行此操作。 + +要了解为什么每次更改都需要两行,请参阅 [算法](#table_engines-versionedcollapsingmergetree-algorithm). + +**使用注意事项** + +1. 写入数据的程序应该记住对象的状态以取消它。 该 “cancel” 字符串应该是 “state” 与相反的字符串 `Sign`. 这增加了存储的初始大小,但允许快速写入数据。 +2. 列中长时间增长的数组由于写入负载而降低了引擎的效率。 数据越简单,效率就越高。 +3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 您可以通过不一致的数据获得不可预测的结果,例如会话深度等非负指标的负值。 + +### 算法 {#table_engines-versionedcollapsingmergetree-algorithm} + +当ClickHouse合并数据部分时,它会删除具有相同主键和版本且不同主键和版本的每对行 `Sign`. 行的顺序并不重要。 + +当ClickHouse插入数据时,它会按主键对行进行排序。 如果 `Version` 列不在主键中,ClickHouse将其隐式添加到主键作为最后一个字段并使用它进行排序。 + +## 选择数据 {#selecting-data} + +ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着聚合是必需的,如果有必要得到完全 “collapsed” 从数据 `VersionedCollapsingMergeTree` 桌子 + +要完成折叠,请使用 `GROUP BY` 考虑符号的子句和聚合函数。 例如,要计算数量,请使用 `sum(Sign)` 而不是 `count()`. 要计算的东西的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0`. + +聚合 `count`, `sum` 和 `avg` 可以这样计算。 聚合 `uniq` 如果对象至少具有一个非折叠状态,则可以计算。 聚合 `min` 和 `max` 无法计算是因为 `VersionedCollapsingMergeTree` 不保存折叠状态值的历史记录。 + +如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰符 `FROM` 条款 这种方法效率低下,不应与大型表一起使用。 + +## 使用示例 {#example-of-use} + +示例数据: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +创建表: + +``` sql +CREATE TABLE UAct +( + UserID UInt64, + PageViews UInt8, + Duration UInt8, + Sign Int8, + Version UInt8 +) +ENGINE = VersionedCollapsingMergeTree(Sign, Version) +ORDER BY UserID +``` + +插入数据: + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) +``` + +``` sql +INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) +``` + +我们用两个 `INSERT` 查询以创建两个不同的数据部分。 如果我们使用单个查询插入数据,ClickHouse将创建一个数据部分,并且永远不会执行任何合并。 + +获取数据: + +``` sql +SELECT * FROM UAct +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +我们在这里看到了什么,折叠的部分在哪里? +我们使用两个创建了两个数据部分 `INSERT` 查询。 该 `SELECT` 查询是在两个线程中执行的,结果是行的随机顺序。 +由于数据部分尚未合并,因此未发生折叠。 ClickHouse在我们无法预测的未知时间点合并数据部分。 + +这就是为什么我们需要聚合: + +``` sql +SELECT + UserID, + sum(PageViews * Sign) AS PageViews, + sum(Duration * Sign) AS Duration, + Version +FROM UAct +GROUP BY UserID, Version +HAVING sum(Sign) > 0 +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 2 │ +└─────────────────────┴───────────┴──────────┴─────────┘ +``` + +如果我们不需要聚合,并希望强制折叠,我们可以使用 `FINAL` 修饰符 `FROM` 条款 + +``` sql +SELECT * FROM UAct FINAL +``` + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ +└─────────────────────┴───────────┴──────────┴──────┴─────────┘ +``` + +这是一个非常低效的方式来选择数据。 不要把它用于大桌子。 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/zh/operations/table_engines/buffer.md b/docs/zh/engines/table_engines/special/buffer.md similarity index 97% rename from docs/zh/operations/table_engines/buffer.md rename to docs/zh/engines/table_engines/special/buffer.md index 70fc8c6cbb2..6b53883be7b 100644 --- a/docs/zh/operations/table_engines/buffer.md +++ b/docs/zh/engines/table_engines/special/buffer.md @@ -1,4 +1,5 @@ -# Buffer {#buffer} + +# 缓冲区 {#buffer} 缓冲数据写入 RAM 中,周期性地将数据刷新到另一个表。在读取操作时,同时从缓冲区和另一个表读取数据。 @@ -50,4 +51,4 @@ PREWHERE,FINAL 和 SAMPLE 对缓冲表不起作用。这些条件将传递到 请注意,一次插入一行数据是没有意义的,即使对于 Buffer 表也是如此。这将只产生每秒几千行的速度,而插入更大的数据块每秒可以产生超过一百万行(参见 «性能» 部分)。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/buffer/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/buffer/) diff --git a/docs/zh/operations/table_engines/dictionary.md b/docs/zh/engines/table_engines/special/dictionary.md similarity index 95% rename from docs/zh/operations/table_engines/dictionary.md rename to docs/zh/engines/table_engines/special/dictionary.md index 3bd6b9d78b6..27da9b40e52 100644 --- a/docs/zh/operations/table_engines/dictionary.md +++ b/docs/zh/engines/table_engines/special/dictionary.md @@ -1,4 +1,5 @@ -# Dictionary {#dictionary} + +# 字典 {#dictionary} `Dictionary` 引擎将字典数据展示为一个ClickHouse的表。 @@ -57,7 +58,7 @@ WHERE name = 'products' │ products │ Flat │ UInt64 │ ['title'] │ ['String'] │ 23065376 │ 175032 │ ODBC: .products │ └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ -你可以使用 [dictGet\*](../../query_language/functions/ext_dict_functions.md) 函数来获取这种格式的字典数据。 +你可以使用 [dictGet\*](../../../engines/table_engines/special/dictionary.md) 函数来获取这种格式的字典数据。 当你需要获取原始数据,或者是想要使用 `JOIN` 操作的时候,这种视图并没有什么帮助。对于这些情况,你可以使用 `Dictionary` 引擎,它可以将字典数据展示在表中。 diff --git a/docs/zh/operations/table_engines/distributed.md b/docs/zh/engines/table_engines/special/distributed.md similarity index 93% rename from docs/zh/operations/table_engines/distributed.md rename to docs/zh/engines/table_engines/special/distributed.md index b81e52348e6..f31dae7c1ef 100644 --- a/docs/zh/operations/table_engines/distributed.md +++ b/docs/zh/engines/table_engines/special/distributed.md @@ -1,4 +1,5 @@ -# Distributed {#distributed} + +# 分布 {#distributed} **分布式引擎本身不存储数据**, 但可以在多个服务器上进行分布式查询。 读是自动并行的。读取时,远程服务器表的索引(如果有的话)会被使用。 @@ -51,7 +52,7 @@ logs – 服务器配置文件中的集群名称。 ``` -这里定义了一个名为‘logs’的集群,它由两个分片组成,每个分片包含两个副本。 +这里定义了一个名为'logs'的集群,它由两个分片组成,每个分片包含两个副本。 分片是指包含数据不同部分的服务器(要读取所有数据,必须访问所有分片)。 副本是存储复制数据的服务器(要读取所有数据,访问任一副本上的数据即可)。 @@ -98,7 +99,7 @@ logs – 服务器配置文件中的集群名称。 若此参数设置为«false»(默认值),写操作会将数据写入所有副本。实质上,这意味着要分布式表本身来复制数据。这种方式不如使用复制表的好,因为不会检查副本的一致性,并且随着时间的推移,副本数据可能会有些不一样。 -选择将一行数据发送到哪个分片的方法是,首先计算分片表达式,然后将这个计算结果除以所有分片的权重总和得到余数。该行会发送到那个包含该余数的从’prev\_weight’到’prev\_weights + weight’的半闭半开区间对应的分片上,其中 ‘prev\_weights’ 是该分片前面的所有分片的权重和,‘weight’ 是该分片的权重。例如,如果有两个分片,第一个分片权重为9,而第二个分片权重为10,则余数在 \[0,9) 中的行发给第一个分片,余数在 \[9,19) 中的行发给第二个分片。 +选择将一行数据发送到哪个分片的方法是,首先计算分片表达式,然后将这个计算结果除以所有分片的权重总和得到余数。该行会发送到那个包含该余数的从'prev\_weight'到'prev\_weights + weight'的半闭半开区间对应的分片上,其中 ‘prev\_weights’ 是该分片前面的所有分片的权重和,‘weight’ 是该分片的权重。例如,如果有两个分片,第一个分片权重为9,而第二个分片权重为10,则余数在 \[0,9) 中的行发给第一个分片,余数在 \[9,19) 中的行发给第二个分片。 分片表达式可以是由常量和表列组成的任何返回整数表达式。例如,您可以使用表达式 ‘rand()’ 来随机分配数据,或者使用 ‘UserID’ 来按用户 ID 的余数分布(相同用户的数据将分配到单个分片上,这可降低带有用户信息的 IN 和 JOIN 的语句运行的复杂度)。如果该列数据分布不够均匀,可以将其包装在散列函数中:intHash64(UserID)。 @@ -117,4 +118,4 @@ SELECT 查询会被发送到所有分片,并且无论数据在分片中如何 启用 max\_parallel\_replicas 选项后,会在分表的所有副本上并行查询处理。更多信息,请参阅«设置,max\_parallel\_replicas»部分。 -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/distributed/) diff --git a/docs/zh/operations/table_engines/external_data.md b/docs/zh/engines/table_engines/special/external_data.md similarity index 94% rename from docs/zh/operations/table_engines/external_data.md rename to docs/zh/engines/table_engines/special/external_data.md index d993a796e52..399ffd8c0f3 100644 --- a/docs/zh/operations/table_engines/external_data.md +++ b/docs/zh/engines/table_engines/special/external_data.md @@ -1,4 +1,5 @@ -# External Data for Query Processing {#external-data-for-query-processing} + +# 用于查询处理的外部数据 {#external-data-for-query-processing} ClickHouse 允许向服务器发送处理查询所需的数据以及 SELECT 查询。这些数据放在一个临时表中(请参阅 «临时表» 一节),可以在查询中使用(例如,在 IN 操作符中)。 @@ -58,4 +59,4 @@ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+count( 对于分布式查询,将临时表发送到所有远程服务器。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/external_data/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/external_data/) diff --git a/docs/zh/operations/table_engines/file.md b/docs/zh/engines/table_engines/special/file.md similarity index 74% rename from docs/zh/operations/table_engines/file.md rename to docs/zh/engines/table_engines/special/file.md index 10293130088..71c96f8ab43 100644 --- a/docs/zh/operations/table_engines/file.md +++ b/docs/zh/engines/table_engines/special/file.md @@ -1,4 +1,5 @@ -# File(InputFormat) {#table_engines-file} + +# 文件(输入格式) {#table_engines-file} 数据源是以 Clickhouse 支持的一种输入格式(TabSeparated,Native等)存储数据的文件。 @@ -12,13 +13,13 @@ File(Format) -选用的 `Format` 需要支持 `INSERT` 或 `SELECT` 。有关支持格式的完整列表,请参阅 [格式](../../interfaces/formats.md#formats)。 +选用的 `Format` 需要支持 `INSERT` 或 `SELECT` 。有关支持格式的完整列表,请参阅 [格式](../../../interfaces/formats.md#formats)。 -ClickHouse 不支持给 `File` 指定文件系统路径。它使用服务器配置中 [path](../server_settings/settings.md) 设定的文件夹。 +ClickHouse 不支持给 `File` 指定文件系统路径。它使用服务器配置中 [路径](../../../operations/server_configuration_parameters/settings.md) 设定的文件夹。 使用 `File(Format)` 创建表时,它会在该文件夹中创建空的子目录。当数据写入该表时,它会写到该子目录中的 `data.Format` 文件中。 -你也可以在服务器文件系统中手动创建这些子文件夹和文件,然后通过 [ATTACH](../../query_language/misc.md) 将其创建为具有对应名称的表,这样你就可以从该文件中查询数据了。 +你也可以在服务器文件系统中手动创建这些子文件夹和文件,然后通过 [ATTACH](../../../engines/table_engines/special/file.md) 将其创建为具有对应名称的表,这样你就可以从该文件中查询数据了。 !!! 注意 "注意" 注意这个功能,因为 ClickHouse 不会跟踪这些文件在外部的更改。在 ClickHouse 中和 ClickHouse 外部同时写入会造成结果是不确定的。 @@ -54,7 +55,7 @@ SELECT * FROM file_engine_table ## 在 Clickhouse-local 中的使用 {#zai-clickhouse-local-zhong-de-shi-yong} -使用 [clickhouse-local](../utils/clickhouse-local.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 +使用 [ツ环板-ョツ嘉ッツ偲](../../../engines/table_engines/special/file.md) 时,File 引擎除了 `Format` 之外,还可以接受文件路径参数。可以使用数字或人类可读的名称来指定标准输入/输出流,例如 `0` 或 `stdin`,`1` 或 `stdout`。 **例如:** ``` bash diff --git a/docs/zh/engines/table_engines/special/generate.md b/docs/zh/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..6a31e270066 --- /dev/null +++ b/docs/zh/engines/table_engines/special/generate.md @@ -0,0 +1,61 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: GenerateRandom +--- + +# Generaterandom {#table_engines-generate} + +GenerateRandom表引擎为给定的表架构生成随机数据。 + +使用示例: + +- 在测试中使用填充可重复的大表。 +- 为模糊测试生成随机输入。 + +## 在ClickHouse服务器中的使用 {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +该 `max_array_length` 和 `max_string_length` 参数指定所有的最大长度 +数组列和字符串相应地在生成的数据中。 + +生成表引擎仅支持 `SELECT` 查询。 + +它支持所有 [数据类型](../../../sql_reference/data_types/index.md) 可以存储在一个表中,除了 `LowCardinality` 和 `AggregateFunction`. + +**示例:** + +**1.** 设置 `generate_engine_table` 表: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** 查询数据: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## 实施细节 {#details-of-implementation} + +- 不支持: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - 指数 + - 复制 + +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/zh/engines/table_engines/special/index.md b/docs/zh/engines/table_engines/special/index.md new file mode 100644 index 00000000000..7be40b75fb5 --- /dev/null +++ b/docs/zh/engines/table_engines/special/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u7279\u522B" +toc_priority: 31 +--- + + diff --git a/docs/zh/operations/table_engines/join.md b/docs/zh/engines/table_engines/special/join.md similarity index 75% rename from docs/zh/operations/table_engines/join.md rename to docs/zh/engines/table_engines/special/join.md index 024530cc0b7..33cc0685a52 100644 --- a/docs/zh/operations/table_engines/join.md +++ b/docs/zh/engines/table_engines/special/join.md @@ -1,10 +1,11 @@ -# Join {#join} + +# 加入我们 {#join} 加载好的 JOIN 表数据会常驻内存中。 Join(ANY|ALL, LEFT|INNER, k1[, k2, ...]) -引擎参数:`ANY|ALL` – 连接修饰;`LEFT|INNER` – 连接类型。更多信息可参考 [JOIN子句](../../query_language/select.md#select-join)。 +引擎参数:`ANY|ALL` – 连接修饰;`LEFT|INNER` – 连接类型。更多信息可参考 [JOIN子句](../../../engines/table_engines/special/join.md#select-join)。 这些参数设置不用带引号,但必须与要 JOIN 表匹配。 k1,k2,……是 USING 子句中要用于连接的关键列。 此引擎表不能用于 GLOBAL JOIN 。 @@ -13,9 +14,9 @@ 跟 Set 引擎类似,Join 引擎把数据存储在磁盘中。 -### Limitations and Settings {#join-limitations-and-settings} +### 限制和设置 {#join-limitations-and-settings} -When creating a table, the following settings are applied: +创建表时,将应用以下设置: - join\_use\_nulls - max\_rows\_in\_join @@ -23,6 +24,6 @@ When creating a table, the following settings are applied: - join\_overflow\_mode - join\_any\_take\_last\_row -The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations. +该 `Join`-发动机表不能用于 `GLOBAL JOIN` 操作。 [来源文章](https://clickhouse.tech/docs/en/operations/table_engines/join/) diff --git a/docs/zh/engines/table_engines/special/materializedview.md b/docs/zh/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..5dc4e261fbd --- /dev/null +++ b/docs/zh/engines/table_engines/special/materializedview.md @@ -0,0 +1,6 @@ + +# 物化视图 {#wu-hua-shi-tu} + +物化视图的使用(更多信息请参阅 [CREATE TABLE](../../../engines/table_engines/special/materializedview.md) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 + +[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/zh/operations/table_engines/memory.md b/docs/zh/engines/table_engines/special/memory.md similarity index 89% rename from docs/zh/operations/table_engines/memory.md rename to docs/zh/engines/table_engines/special/memory.md index a48308f7b17..3fd29813d00 100644 --- a/docs/zh/operations/table_engines/memory.md +++ b/docs/zh/engines/table_engines/special/memory.md @@ -1,7 +1,8 @@ -# Memory {#memory} + +# 记忆 {#memory} Memory 引擎以未压缩的形式将数据存储在 RAM 中。数据完全以读取时获得的形式存储。换句话说,从这张表中读取是很轻松的。并发数据访问是同步的。锁范围小:读写操作不会相互阻塞。不支持索引。阅读是并行化的。在简单查询上达到最大生产率(超过10 GB /秒),因为没有磁盘读取,不需要解压缩或反序列化数据。(值得注意的是,在许多情况下,与 MergeTree 引擎的性能几乎一样高)。重新启动服务器时,表中的数据消失,表将变为空。通常,使用此表引擎是不合理的。但是,它可用于测试,以及在相对较少的行(最多约100,000,000)上需要最高性能的查询。 Memory 引擎是由系统用于临时表进行外部数据的查询(请参阅 «外部数据用于请求处理» 部分),以及用于实现 `GLOBAL IN`(请参见 «IN 运算符» 部分)。 -[Original article](https://clickhouse.tech/docs/zh/operations/table_engines/memory/) +[原始文章](https://clickhouse.tech/docs/zh/operations/table_engines/memory/) diff --git a/docs/zh/operations/table_engines/merge.md b/docs/zh/engines/table_engines/special/merge.md similarity index 99% rename from docs/zh/operations/table_engines/merge.md rename to docs/zh/engines/table_engines/special/merge.md index bbcbf8772b4..e4ee3fe92a5 100644 --- a/docs/zh/operations/table_engines/merge.md +++ b/docs/zh/engines/table_engines/special/merge.md @@ -1,4 +1,5 @@ -# Merge {#merge} + +# 合并 {#merge} `Merge` 引擎 (不要跟 `MergeTree` 引擎混淆) 本身不存储数据,但可用于同时从任意多个其他的表中读取数据。 读是自动并行的,不支持写入。读取时,那些被真正读取到数据的表的索引(如果有的话)会被使用。 diff --git a/docs/zh/operations/table_engines/null.md b/docs/zh/engines/table_engines/special/null.md similarity index 69% rename from docs/zh/operations/table_engines/null.md rename to docs/zh/engines/table_engines/special/null.md index 94f731f756d..3fd891db393 100644 --- a/docs/zh/operations/table_engines/null.md +++ b/docs/zh/engines/table_engines/special/null.md @@ -1,7 +1,8 @@ + # Null {#null} 当写入 Null 类型的表时,将忽略数据。从 Null 类型的表中读取时,返回空。 但是,可以在 Null 类型的表上创建物化视图。写入表的数据将转发到视图中。 -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/null/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/null/) diff --git a/docs/zh/operations/table_engines/set.md b/docs/zh/engines/table_engines/special/set.md similarity index 87% rename from docs/zh/operations/table_engines/set.md rename to docs/zh/engines/table_engines/special/set.md index e9be9ab7e56..b6ef859b85a 100644 --- a/docs/zh/operations/table_engines/set.md +++ b/docs/zh/engines/table_engines/special/set.md @@ -1,4 +1,5 @@ -# Set {#set} + +# 设置 {#set} 始终存在于 RAM 中的数据集。它适用于IN运算符的右侧(请参见 «IN运算符» 部分)。 @@ -8,4 +9,4 @@ 对于强制服务器重启,磁盘上的数据块可能会丢失或损坏。在数据块损坏的情况下,可能需要手动删除包含损坏数据的文件。 -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/set/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/set/) diff --git a/docs/zh/operations/table_engines/url.md b/docs/zh/engines/table_engines/special/url.md similarity index 94% rename from docs/zh/operations/table_engines/url.md rename to docs/zh/engines/table_engines/special/url.md index c2ce37adf21..0e02693aea0 100644 --- a/docs/zh/operations/table_engines/url.md +++ b/docs/zh/engines/table_engines/special/url.md @@ -1,13 +1,14 @@ -# URL(URL, Format) {#table_engines-url} + +# URL(URL,格式) {#table_engines-url} 用于管理远程 HTTP/HTTPS 服务器上的数据。该引擎类似 -[File](file.md) 引擎。 +[文件](file.md) 引擎。 ## 在 ClickHouse 服务器中使用引擎 {#zai-clickhouse-fu-wu-qi-zhong-shi-yong-yin-qing} `Format` 必须是 ClickHouse 可以用于 `SELECT` 查询的一种格式,若有必要,还要可用于 `INSERT` 。有关支持格式的完整列表,请查看 -[Formats](../../interfaces/formats.md#formats)。 +[格式](../../../interfaces/formats.md#formats)。 `URL` 必须符合统一资源定位符的结构。指定的URL必须指向一个 HTTP 或 HTTPS 服务器。对于服务端响应, diff --git a/docs/zh/operations/table_engines/view.md b/docs/zh/engines/table_engines/special/view.md similarity index 68% rename from docs/zh/operations/table_engines/view.md rename to docs/zh/engines/table_engines/special/view.md index 5d15fc74218..a17dab21ce2 100644 --- a/docs/zh/operations/table_engines/view.md +++ b/docs/zh/engines/table_engines/special/view.md @@ -1,5 +1,6 @@ -# View {#view} + +# 查看 {#view} 用于构建视图(有关更多信息,请参阅 `CREATE VIEW 查询`)。 它不存储数据,仅存储指定的 `SELECT` 查询。 从表中读取时,它会运行此查询(并从查询中删除所有不必要的列)。 -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/view/) +[原始文章](https://clickhouse.tech/docs/en/operations/table_engines/view/) diff --git a/docs/zh/faq/general.md b/docs/zh/faq/general.md index 17f4fe9b11b..b81d521fa80 100644 --- a/docs/zh/faq/general.md +++ b/docs/zh/faq/general.md @@ -1,3 +1,4 @@ + # 常见问题 {#chang-jian-wen-ti} ## 为什么不使用MapReduce之类的产品呢? {#wei-shi-yao-bu-shi-yong-mapreducezhi-lei-de-chan-pin-ni} @@ -8,11 +9,11 @@ 大多数MapReduce系统允许您在集群上执行任意代码。但是,声明性查询语言更适合OLAP,以便快速运行实验。例如,Hadoop包含Hive和Pig,Cloudera Impala或Shark(过时)for Spark,以及Spark SQL、Presto和Apache Drill。与专业系统相比,运行此类任务时的性能非常不理想,所以将这些系统用作Web接口的后端服务是不现实的,因为延迟相对较高。 -## What to do if I have a problem with encodings when using Oracle through ODBC? {#oracle-odbc-encodings} +## 如果我在通过ODBC使用Oracle时遇到编码问题,该怎么办? {#oracle-odbc-encodings} -If you use Oracle through ODBC driver as a source of external dictionaries, you need to set up correctly value for the `NLS_LANG` variable in the `/etc/default/clickhouse`. For more details see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). +如果您通过ODBC驱动程序使用Oracle作为外部字典的源,则需要为 `NLS_LANG` 在变量 `/etc/default/clickhouse`. 欲了解更多详情,请参阅 [Oracle NLS\_常见问题](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html). -**Example** +**示例** NLS_LANG=CHINESE_CHINA.ZHS16GBK diff --git a/docs/zh/faq/index.md b/docs/zh/faq/index.md new file mode 100644 index 00000000000..7c0b25dbec0 --- /dev/null +++ b/docs/zh/faq/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/zh/getting_started/example_datasets/amplab_benchmark.md b/docs/zh/getting_started/example_datasets/amplab_benchmark.md index fc78daa6a46..4c3b26819b1 100644 --- a/docs/zh/getting_started/example_datasets/amplab_benchmark.md +++ b/docs/zh/getting_started/example_datasets/amplab_benchmark.md @@ -1,3 +1,4 @@ + # AMPLab 大数据基准测试 {#amplab-da-shu-ju-ji-zhun-ce-shi} 参考 https://amplab.cs.berkeley.edu/benchmark/ @@ -119,4 +120,4 @@ ORDER BY totalRevenue DESC LIMIT 1 ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/zh/getting_started/example_datasets/criteo.md b/docs/zh/getting_started/example_datasets/criteo.md index 6083566113a..0ae2650b390 100644 --- a/docs/zh/getting_started/example_datasets/criteo.md +++ b/docs/zh/getting_started/example_datasets/criteo.md @@ -1,3 +1,4 @@ + # Criteo TB级别点击日志 {#criteo-tbji-bie-dian-ji-ri-zhi} 可以从http://labs.criteo.com/downloads/download-terabyte-click-logs/上下载数据 @@ -71,4 +72,4 @@ INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int DROP TABLE criteo_log; ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/criteo/) diff --git a/docs/zh/getting_started/example_datasets/index.md b/docs/zh/getting_started/example_datasets/index.md index a07ff8b0010..4faf3b0ecfc 100644 --- a/docs/zh/getting_started/example_datasets/index.md +++ b/docs/zh/getting_started/example_datasets/index.md @@ -1,18 +1,22 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u793A\u4F8B\u6570\u636E\u96C6" +toc_priority: 12 +toc_title: "\u5BFC\u8A00" --- -# Example Datasets +# 示例数据集 {#example-datasets} -This section describes how to obtain example datasets and import them into ClickHouse. -For some datasets example queries are also available. +本节介绍如何获取示例数据集并将其导入ClickHouse。 +对于某些数据集示例查询也可用。 -* [Anonymized Yandex.Metrica Dataset](metrica.md) -* [Star Schema Benchmark](star_schema.md) -* [WikiStat](wikistat.md) -* [Terabyte of Click Logs from Criteo](criteo.md) -* [AMPLab Big Data Benchmark](amplab_benchmark.md) -* [New York Taxi Data](nyc_taxi.md) -* [OnTime](ontime.md) +- [匿名Yandex的。梅里卡数据集](metrica.md) +- [星型架构基准测试](star_schema.md) +- [WikiStat](wikistat.md) +- [来自Criteo的万兆字节点击日志](criteo.md) +- [AMPLab大数据基准](amplab_benchmark.md) +- [纽约出租车数据](nyc_taxi.md) +- [时间](ontime.md) -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/zh/getting_started/example_datasets/metrica.md b/docs/zh/getting_started/example_datasets/metrica.md index d2a91a02a89..f7e0c86d324 100644 --- a/docs/zh/getting_started/example_datasets/metrica.md +++ b/docs/zh/getting_started/example_datasets/metrica.md @@ -1,16 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 21 +toc_title: "Yandex\u6885\u7279\u91CC\u5361\u6570\u636E" --- -# Anonymized Yandex.Metrica Data {#anonymized-yandex-metrica-data} +# 匿名Yandex的。梅特里卡数据 {#anonymized-yandex-metrica-data} -Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section. +Dataset由两个表组成,其中包含有关命中的匿名数据 (`hits_v1`)和访问 (`visits_v1`)的Yandex的。梅特里卡 你可以阅读更多关于Yandex的。梅特里卡 [ClickHouse历史](../../introduction/history.md) 科。 -The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz. +数据集由两个表组成,其中任何一个都可以作为压缩表下载 `tsv.xz` 文件或作为准备的分区。 除此之外,该扩展版本 `hits` 包含1亿行的表可作为TSV在https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz 并作为准备的分区在https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. -## Obtaining Tables from Prepared Partitions {#obtaining-tables-from-prepared-partitions} +## 从准备好的分区获取表 {#obtaining-tables-from-prepared-partitions} -Download and import hits table: +下载和导入点击表: ``` bash curl -O https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar @@ -20,7 +23,7 @@ sudo service clickhouse-server restart clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" ``` -Download and import visits: +下载和导入访问: ``` bash curl -O https://clickhouse-datasets.s3.yandex.net/visits/partitions/visits_v1.tar @@ -30,9 +33,9 @@ sudo service clickhouse-server restart clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## Obtaining Tables from Compressed TSV File {#obtaining-tables-from-compressed-tsv-file} +## 从压缩TSV文件获取表 {#obtaining-tables-from-compressed-tsv-file} -Download and import hits from compressed TSV file: +从压缩的TSV文件下载并导入命中: ``` bash curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv @@ -46,7 +49,7 @@ clickhouse-client --query "OPTIMIZE TABLE datasets.hits_v1 FINAL" clickhouse-client --query "SELECT COUNT(*) FROM datasets.hits_v1" ``` -Download and import visits from compressed tsv-file: +从压缩tsv文件下载和导入访问: ``` bash curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv @@ -60,8 +63,8 @@ clickhouse-client --query "OPTIMIZE TABLE datasets.visits_v1 FINAL" clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1" ``` -## Example Queries {#example-queries} +## 查询示例 {#example-queries} -[ClickHouse tutorial](../../getting_started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial. +[点击教程](../../getting_started/tutorial.md) 是基于Yandex的。Metrica数据集和开始使用此数据集的推荐方式是通过教程。 -Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there). +查询这些表的其他示例可以在 [有状态测试](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) ClickHouse的(它们被命名为 `test.hists` 和 `test.visits` 那里)。 diff --git a/docs/zh/getting_started/example_datasets/nyc_taxi.md b/docs/zh/getting_started/example_datasets/nyc_taxi.md index 50dcbed0988..e486dbef9a7 100644 --- a/docs/zh/getting_started/example_datasets/nyc_taxi.md +++ b/docs/zh/getting_started/example_datasets/nyc_taxi.md @@ -1,3 +1,4 @@ + # 纽约市出租车数据 {#niu-yue-shi-chu-zu-che-shu-ju} 纽约市出租车数据有以下两个方式获取: @@ -259,7 +260,7 @@ FROM trips ``` 这需要3030秒,速度约为每秒428,000行。 -要加快速度,可以使用`Log`引擎替换’MergeTree\`引擎来创建表。 在这种情况下,下载速度超过200秒。 +要加快速度,可以使用`Log`引擎替换'MergeTree\`引擎来创建表。 在这种情况下,下载速度超过200秒。 这个表需要使用126GB的磁盘空间。 @@ -285,7 +286,7 @@ $ sudo service clickhouse-server restart $ clickhouse-client --query "select count(*) from datasets.trips_mergetree" ``` -!!! info "Info" +!!! info "信息" 如果要运行下面的SQL查询,必须使用完整的表名, `datasets.trips_mergetree`。 @@ -297,7 +298,7 @@ Q1: SELECT cab_type, count(*) FROM trips_mergetree GROUP BY cab_type ``` -0.490 seconds. +0.490秒 Q2: @@ -305,7 +306,7 @@ Q2: SELECT passenger_count, avg(total_amount) FROM trips_mergetree GROUP BY passenger_count ``` -1.224 seconds. +1.224秒 Q3: @@ -313,7 +314,7 @@ Q3: SELECT passenger_count, toYear(pickup_date) AS year, count(*) FROM trips_mergetree GROUP BY passenger_count, year ``` -2.104 seconds. +2.104秒 Q4: @@ -324,11 +325,11 @@ GROUP BY passenger_count, year, distance ORDER BY year, count(*) DESC ``` -3.593 seconds. +3.593秒 我们使用的是如下配置的服务器: -Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical kernels total,128 GiB RAM,8x6 TB HD on hardware RAID-5 +两个英特尔(R)至强(R)CPU E5-2650v2@2.60GHz,总共有16个物理内核,128GiB RAM,硬件RAID-5上的8X6TB HD 执行时间是取三次运行中最好的值,但是从第二次查询开始,查询就讲从文件系统的缓存中读取数据。同时在每次读取和处理后不在进行缓存。 @@ -356,29 +357,29 @@ INSERT INTO trips_mergetree_x3 SELECT * FROM trips_mergetree 在三台服务器集群中运行的结果: -Q1: 0.212 seconds. -Q2: 0.438 seconds. -Q3: 0.733 seconds. -Q4: 1.241 seconds. +Q1:0.212秒. +Q2:0.438秒。 +Q3:0.733秒。 +Q4:1.241秒. 不出意料,查询是线性扩展的。 我们同时在140台服务器的集群中运行的结果: -Q1: 0.028 sec. -Q2: 0.043 sec. -Q3: 0.051 sec. -Q4: 0.072 sec. +Q1:0.028秒。 +Q2:0.043秒。 +Q3:0.051秒。 +Q4:0.072秒。 在这种情况下,查询处理时间首先由网络延迟确定。 我们使用位于芬兰的Yandex数据中心中的客户端去位于俄罗斯的集群上运行查询,这增加了大约20毫秒的延迟。 ## 总结 {#zong-jie} -| servers | Q1 | Q2 | Q3 | Q4 | -|---------|-------|-------|-------|-------| -| 1 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3 | 0.212 | 0.438 | 0.733 | 1.241 | -| 140 | 0.028 | 0.043 | 0.051 | 0.072 | +| 服务器 | Q1 | Q2 | Q3 | Q4 | +|--------|-------|-------|-------|-------| +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/zh/getting_started/example_datasets/ontime.md b/docs/zh/getting_started/example_datasets/ontime.md index 6db294b12fb..31684129bb7 100644 --- a/docs/zh/getting_started/example_datasets/ontime.md +++ b/docs/zh/getting_started/example_datasets/ontime.md @@ -1,3 +1,4 @@ + # 航班飞行数据 {#hang-ban-fei-xing-shu-ju} 航班飞行数据有以下两个方式获取: @@ -156,7 +157,7 @@ $ sudo service clickhouse-server restart $ clickhouse-client --query "select count(*) from datasets.ontime" ``` -!!! info "Info" +!!! info "信息" 如果要运行下面的SQL查询,必须使用完整的表名, `datasets.ontime`。 @@ -356,7 +357,7 @@ ORDER by rate DESC LIMIT 1000; ``` -Bonus: +奖金: ``` sql SELECT avg(cnt) diff --git a/docs/zh/getting_started/example_datasets/star_schema.md b/docs/zh/getting_started/example_datasets/star_schema.md index 4680fe652b2..4fed13923ff 100644 --- a/docs/zh/getting_started/example_datasets/star_schema.md +++ b/docs/zh/getting_started/example_datasets/star_schema.md @@ -1,4 +1,5 @@ -# Star Schema Benchmark {#star-schema-benchmark} + +# 星型架构基准测试 {#star-schema-benchmark} 编译 dbgen: @@ -110,7 +111,7 @@ FROM lineorder l ALTER TABLE lineorder_flat DROP COLUMN C_CUSTKEY, DROP COLUMN S_SUPPKEY, DROP COLUMN P_PARTKEY; ``` -Running the queries: +运行查询: Q1.1 @@ -190,4 +191,4 @@ Q4.3 SELECT toYear(LO_ORDERDATE) AS year, S_CITY, P_BRAND, sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year, S_CITY, P_BRAND ORDER BY year, S_CITY, P_BRAND; ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/star_schema/) diff --git a/docs/zh/getting_started/example_datasets/wikistat.md b/docs/zh/getting_started/example_datasets/wikistat.md index aacbdbf37f0..c2681a912e2 100644 --- a/docs/zh/getting_started/example_datasets/wikistat.md +++ b/docs/zh/getting_started/example_datasets/wikistat.md @@ -1,3 +1,4 @@ + # 维基访问数据 {#wei-ji-fang-wen-shu-ju} 参考: http://dumps.wikimedia.org/other/pagecounts-raw/ @@ -25,4 +26,4 @@ $ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/page $ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done ``` -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/example_datasets/wikistat/) diff --git a/docs/zh/getting_started/index.md b/docs/zh/getting_started/index.md index d6830aa6c84..35ae08bde7b 100644 --- a/docs/zh/getting_started/index.md +++ b/docs/zh/getting_started/index.md @@ -1,3 +1,4 @@ + # 入门 {#ru-men} 如果您是ClickHouse的新手,并希望亲身体验它的性能,首先您需要通过 [安装过程](install.md). diff --git a/docs/zh/getting_started/install.md b/docs/zh/getting_started/install.md index 55aeff892a2..6a9aae286ad 100644 --- a/docs/zh/getting_started/install.md +++ b/docs/zh/getting_started/install.md @@ -1,3 +1,4 @@ + ## 系统要求 {#xi-tong-yao-qiu} ClickHouse可以在任何具有x86\_64,AArch64或PowerPC64LE CPU架构的Linux,FreeBSD或Mac OS X上运行。 @@ -21,7 +22,7 @@ $ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not deb http://repo.yandex.ru/clickhouse/deb/stable/ main/ ``` -如果你想使用最新的测试版本,请使用’testing’替换’stable’。 +如果你想使用最新的测试版本,请使用'testing'替换'stable'。 然后运行: @@ -34,8 +35,8 @@ sudo apt-get install clickhouse-client clickhouse-server 你也可以从这里手动下载安装包:https://repo.yandex.ru/clickhouse/deb/stable/main/。 -ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与’config.xml’同目录)。 -默认情况下,允许从任何地方使用默认的‘default’用户无密码的访问ClickHouse。参考‘user/default/networks’。 +ClickHouse包含访问控制配置,它们位于`users.xml`文件中(与'config.xml'同目录)。 +默认情况下,允许从任何地方使用默认的'default'用户无密码的访问ClickHouse。参考'user/default/networks'。 有关更多信息,请参考«Configuration files»部分。 ### 来自RPM包 {#from-rpm-packages} @@ -62,7 +63,7 @@ sudo yum install clickhouse-server clickhouse-client ### 来自Docker {#from-docker-image} -要在Docker中运行ClickHouse,请遵循[Docker Hub](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。那些图像使用官方的`deb`包。 +要在Docker中运行ClickHouse,请遵循[码头工人中心](https://hub.docker.com/r/yandex/clickhouse-server/)上的指南。那些图像使用官方的`deb`包。 ### 使用源码安装 {#from-sources} @@ -84,7 +85,7 @@ Server: programs/clickhouse-server ``` (它们可以在server config中配置。) -为需要的用户运行‘chown’ +为需要的用户运行'chown' 日志的路径可以在server config (src/programs/server/config.xml)中配置。 @@ -107,7 +108,7 @@ clickhouse-server --config-file=/etc/clickhouse-server/config.xml ``` 在这种情况下,日志将被打印到控制台中,这在开发过程中很方便。 -如果配置文件在当前目录中,你可以不指定‘–config-file’参数。它默认使用‘./config.xml’。 +如果配置文件在当前目录中,你可以不指定'–config-file'参数。它默认使用'./config.xml'。 你可以使用命令行客户端连接到服务: @@ -115,7 +116,7 @@ clickhouse-server --config-file=/etc/clickhouse-server/config.xml clickhouse-client ``` -默认情况下它使用‘default’用户无密码的与localhost:9000服务建立连接。 +默认情况下它使用'default'用户无密码的与localhost:9000服务建立连接。 客户端也可以用于连接远程服务,例如: ``` bash @@ -149,4 +150,4 @@ SELECT 1 为了继续进行实验,你可以尝试下载测试数据集。 -[Original article](https://clickhouse.tech/docs/en/getting_started/install/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/install/) diff --git a/docs/zh/getting_started/playground.md b/docs/zh/getting_started/playground.md index 186cb9030c2..a09d615ba21 100644 --- a/docs/zh/getting_started/playground.md +++ b/docs/zh/getting_started/playground.md @@ -1,44 +1,47 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 14 +toc_title: "\u266A\u64CD\u573A\u266A" --- -# ClickHouse Playground {#clickhouse-playground} +# ツ环板Playgroundョツ嘉ッ {#clickhouse-playground} -[ClickHouse Playground](https://play.clickhouse.tech?file=welcome) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. -Several example datasets are available in the Playground as well as sample queries that show ClickHouse features. +[ツ环板Playgroundョツ嘉ッ](https://play.clickhouse.tech?file=welcome) 允许人们通过即时运行查询来尝试ClickHouse,而无需设置他们的服务器或集群。 +Playground中提供了几个示例数据集以及显示ClickHouse要素的示例查询。 -The queries are executed as a read-only user. It implies some limitations: +查询以只读用户身份执行。 这意味着一些局限性: -- DDL queries are not allowed -- INSERT queries are not allowed +- 不允许DDL查询 +- 不允许插入查询 -The following settings are also enforced: +还强制执行以下设置: - [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes) - [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows) - [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode) - [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time) -ClickHouse Playground gives the experience of m2.small -[Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) -instance hosted in [Yandex.Cloud](https://cloud.yandex.com/). -More information about [cloud providers](../commercial/cloud.md). +ClickHouse游乐场给m2的经验。小 +[管理服务ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) +实例托管在 [Yandex云](https://cloud.yandex.com/). +更多信息 [云提供商](../commercial/cloud.md). -ClickHouse Playground web interface makes requests via ClickHouse [HTTP API](../interfaces/http.md). -The Playground backend is just a ClickHouse cluster without any additional server-side application. -ClickHouse HTTPS endpoint is also available as a part of the Playground. +ClickHouse游乐场网的界面使请求通过ClickHouse [HTTP API](../interfaces/http.md). +Playground后端只是一个ClickHouse集群,没有任何额外的服务器端应用程序。 +隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇.貌路.隆拢脳枚脢虏 -You can make queries to playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. -More information about software products that support ClickHouse is available [here](../interfaces/index.md). +您可以使用任何HTTP客户端向playground进行查询,例如 [卷曲的](https://curl.haxx.se) 或 [wget](https://www.gnu.org/software/wget/),或使用以下方式建立连接 [JDBC](../interfaces/jdbc.md) 或 [ODBC](../interfaces/odbc.md) 司机 +有关支持ClickHouse的软件产品的更多信息,请访问 [这里](../interfaces/index.md). -| Parameter | Value | -|:----------|:--------------------------------------| -| Endpoint | https://play-api.clickhouse.tech:8443 | -| User | `playground` | -| Password | `clickhouse` | +| 参数 | 价值 | +|:-----|:--------------------------------------| +| 端点 | https://play-api.克莱克豪斯技术:8443 | +| 用户 | `playground` | +| 密码 | `clickhouse` | -Note that this endpoint requires a secure connection. +请注意,此端点需要安全连接。 -Example: +示例: ``` bash curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets" diff --git a/docs/zh/getting_started/tutorial.md b/docs/zh/getting_started/tutorial.md index 4f23dbe756d..4fc2fbdf290 100644 --- a/docs/zh/getting_started/tutorial.md +++ b/docs/zh/getting_started/tutorial.md @@ -1,18 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 12 +toc_title: "\u6559\u7A0B" --- -# ClickHouse Tutorial {#clickhouse-tutorial} +# 点击教程 {#clickhouse-tutorial} -## What to Expect from This Tutorial? {#what-to-expect-from-this-tutorial} +## 从本教程中可以期待什么? {#what-to-expect-from-this-tutorial} -By going through this tutorial, you’ll learn how to set up a simple ClickHouse cluster. It’ll be small, but fault-tolerant and scalable. Then we will use one of the example datasets to fill it with data and execute some demo queries. +通过本教程,您将学习如何设置一个简单的ClickHouse集群。 它会很小,但容错和可扩展。 然后,我们将使用其中一个示例数据集来填充数据并执行一些演示查询。 -## Single Node Setup {#single-node-setup} +## 单节点设置 {#single-node-setup} -To postpone the complexities of a distributed environment, we’ll start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](index.md#install-from-deb-packages) or [rpm](index.md#from-rpm-packages) packages, but there are [alternatives](index.md#from-docker-image) for the operating systems that do no support them. +为了推迟分布式环境的复杂性,我们将首先在单个服务器或虚拟机上部署ClickHouse。 ClickHouse通常是从安装 [黛布](index.md#install-from-deb-packages) 或 [rpm](index.md#from-rpm-packages) 包,但也有 [替代办法](index.md#from-docker-image) 对于不支持它们的操作系统。 -For example, you have chosen `deb` packages and executed: +例如,您选择了 `deb` 包和执行: ``` bash sudo apt-get install dirmngr @@ -24,48 +27,48 @@ sudo apt-get update sudo apt-get install -y clickhouse-server clickhouse-client ``` -What do we have in the packages that got installed: +我们在安装的软件包中有什么: -- `clickhouse-client` package contains [clickhouse-client](../interfaces/cli.md) application, interactive ClickHouse console client. -- `clickhouse-common` package contains a ClickHouse executable file. -- `clickhouse-server` package contains configuration files to run ClickHouse as a server. +- `clickhouse-client` 包包含 [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 应用程序,交互式ClickHouse控制台客户端。 +- `clickhouse-common` 包包含一个ClickHouse可执行文件。 +- `clickhouse-server` 包包含要作为服务器运行ClickHouse的配置文件。 -Server config files are located in `/etc/clickhouse-server/`. Before going further, please notice the `` element in `config.xml`. Path determines the location for data storage, so it should be located on volume with large disk capacity; the default value is `/var/lib/clickhouse/`. If you want to adjust the configuration, it’s not handy to directly edit `config.xml` file, considering it might get rewritten on future package updates. The recommended way to override the config elements is to create [files in config.d directory](../operations/configuration_files.md) which serve as “patches” to config.xml. +服务器配置文件位于 `/etc/clickhouse-server/`. 在进一步讨论之前,请注意 `` 元素in `config.xml`. Path确定数据存储的位置,因此应该位于磁盘容量较大的卷上;默认值为 `/var/lib/clickhouse/`. 如果你想调整配置,直接编辑并不方便 `config.xml` 文件,考虑到它可能会在未来的软件包更新中被重写。 复盖配置元素的推荐方法是创建 [在配置文件。d目录](../operations/configuration_files.md) 它作为 “patches” 要配置。xml -As you might have noticed, `clickhouse-server` is not launched automatically after package installation. It won’t be automatically restarted after updates, either. The way you start the server depends on your init system, usually, it is: +你可能已经注意到了, `clickhouse-server` 安装包后不会自动启动。 它也不会在更新后自动重新启动。 您启动服务器的方式取决于您的init系统,通常情况下,它是: ``` bash sudo service clickhouse-server start ``` -or +或 ``` bash sudo /etc/init.d/clickhouse-server start ``` -The default location for server logs is `/var/log/clickhouse-server/`. The server is ready to handle client connections once it logs the `Ready for connections` message. +服务器日志的默认位置是 `/var/log/clickhouse-server/`. 服务器已准备好处理客户端连接一旦它记录 `Ready for connections` 消息 -Once the `clickhouse-server` is up and running, we can use `clickhouse-client` to connect to the server and run some test queries like `SELECT "Hello, world!";`. +一旦 `clickhouse-server` 正在运行我们可以利用 `clickhouse-client` 连接到服务器并运行一些测试查询,如 `SELECT "Hello, world!";`.
    -Quick tips for clickhouse-client -Interactive mode: +Clickhouse-客户端的快速提示 +交互模式: ``` bash clickhouse-client clickhouse-client --host=... --port=... --user=... --password=... ``` -Enable multiline queries: +启用多行查询: ``` bash clickhouse-client -m clickhouse-client --multiline ``` -Run queries in batch-mode: +以批处理模式运行查询: ``` bash clickhouse-client --query='SELECT 1' @@ -73,7 +76,7 @@ echo 'SELECT 1' | clickhouse-client clickhouse-client <<< 'SELECT 1' ``` -Insert data from a file in specified format: +从指定格式的文件中插入数据: ``` bash clickhouse-client --query='INSERT INTO table VALUES' < data.txt @@ -82,39 +85,39 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
    -## Import Sample Dataset {#import-sample-dataset} +## 导入示例数据集 {#import-sample-dataset} -Now it’s time to fill our ClickHouse server with some sample data. In this tutorial, we’ll use the anonymized data of Yandex.Metrica, the first service that runs ClickHouse in production way before it became open-source (more on that in [history section](../introduction/history.md)). There are [multiple ways to import Yandex.Metrica dataset](example_datasets/metrica.md), and for the sake of the tutorial, we’ll go with the most realistic one. +现在是时候用一些示例数据填充我们的ClickHouse服务器。 在本教程中,我们将使用Yandex的匿名数据。Metrica,在成为开源之前以生产方式运行ClickHouse的第一个服务(更多关于这一点 [历史科](../introduction/history.md)). 有 [多种导入Yandex的方式。梅里卡数据集](example_datasets/metrica.md),为了本教程,我们将使用最现实的一个。 -### Download and Extract Table Data {#download-and-extract-table-data} +### 下载并提取表数据 {#download-and-extract-table-data} ``` bash curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv ``` -The extracted files are about 10GB in size. +提取的文件大小约为10GB。 -### Create Tables {#create-tables} +### 创建表 {#create-tables} -As in most databases management systems, ClickHouse logically groups tables into “databases”. There’s a `default` database, but we’ll create a new one named `tutorial`: +与大多数数据库管理系统一样,ClickHouse在逻辑上将表分组为 “databases”. 有一个 `default` 数据库,但我们将创建一个名为新的 `tutorial`: ``` bash clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -Syntax for creating tables is way more complicated compared to databases (see [reference](../query_language/create.md). In general `CREATE TABLE` statement has to specify three key things: +与数据库相比,创建表的语法要复杂得多(请参阅 [参考资料](../sql_reference/statements/create.md). 一般 `CREATE TABLE` 声明必须指定三个关键的事情: -1. Name of table to create. -2. Table schema, i.e. list of columns and their [data types](../data_types/index.md). -3. [Table engine](../operations/table_engines/index.md) and it’s settings, which determines all the details on how queries to this table will be physically executed. +1. 要创建的表的名称。 +2. Table schema, i.e. list of columns and their [数据类型](../sql_reference/data_types/index.md). +3. [表引擎](../engines/table_engines/index.md) 它是settings,它决定了如何物理执行对此表的查询的所有细节。 -Yandex.Metrica is a web analytics service, and sample dataset doesn’t cover its full functionality, so there are only two tables to create: +YandexMetrica是一个网络分析服务,样本数据集不包括其全部功能,因此只有两个表可以创建: -- `hits` is a table with each action done by all users on all websites covered by the service. -- `visits` is a table that contains pre-built sessions instead of individual actions. +- `hits` 是一个表格,其中包含所有用户在服务所涵盖的所有网站上完成的每个操作。 +- `visits` 是一个包含预先构建的会话而不是单个操作的表。 -Let’s see and execute the real create table queries for these tables: +让我们看看并执行这些表的实际创建表查询: ``` sql CREATE TABLE tutorial.hits_v1 @@ -457,22 +460,22 @@ SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192 ``` -You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want. +您可以使用以下交互模式执行这些查询 `clickhouse-client` (只需在终端中启动它,而不需要提前指定查询)或尝试一些 [替代接口](../interfaces/index.md) 如果你愿意的话 -As we can see, `hits_v1` uses the [basic MergeTree engine](../operations/table_engines/mergetree.md), while the `visits_v1` uses the [Collapsing](../operations/table_engines/collapsingmergetree.md) variant. +正如我们所看到的, `hits_v1` 使用 [基本MergeTree引擎](../engines/table_engines/mergetree_family/mergetree.md),而 `visits_v1` 使用 [崩溃](../engines/table_engines/mergetree_family/collapsingmergetree.md) 变体。 -### Import Data {#import-data} +### 导入数据 {#import-data} -Data import to ClickHouse is done via [INSERT INTO](../query_language/insert_into.md) query like in many other SQL databases. However, data is usually provided in one of the [supported serialization formats](../interfaces/formats.md) instead of `VALUES` clause (which is also supported). +数据导入到ClickHouse是通过以下方式完成的 [INSERT INTO](../sql_reference/statements/insert_into.md) 查询像许多其他SQL数据库。 然而,数据通常是在一个提供 [支持的序列化格式](../interfaces/formats.md) 而不是 `VALUES` 子句(也支持)。 -The files we downloaded earlier are in tab-separated format, so here’s how to import them via console client: +我们之前下载的文件是以制表符分隔的格式,所以这里是如何通过控制台客户端导入它们: ``` bash clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv ``` -ClickHouse has a lot of [settings to tune](../operations/settings/index.md) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: +ClickHouse有很多 [要调整的设置](../operations/settings/index.md) 在控制台客户端中指定它们的一种方法是通过参数,我们可以看到 `--max_insert_block_size`. 找出可用的设置,它们意味着什么以及默认值的最简单方法是查询 `system.settings` 表: ``` sql SELECT name, value, changed, description @@ -483,23 +486,23 @@ FORMAT TSV max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." ``` -Optionally you can [OPTIMIZE](../query_language/misc/#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later: +您也可以 [OPTIMIZE](../sql_reference/statements/misc.md#misc_operations-optimize) 导入后的表。 使用MergeTree-family引擎配置的表总是在后台合并数据部分以优化数据存储(或至少检查是否有意义)。 这些查询强制表引擎立即进行存储优化,而不是稍后进行一段时间: ``` bash clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" ``` -These queries start an I/O and CPU intensive operation, so if the table consistently receives new data, it’s better to leave it alone and let merges run in the background. +这些查询开始一个I/O和CPU密集型操作,所以如果表一直接收到新数据,最好不要管它,让合并在后台运行。 -Now we can check if the table import was successful: +现在我们可以检查表导入是否成功: ``` bash clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" ``` -## Example Queries {#example-queries} +## 查询示例 {#example-queries} ``` sql SELECT @@ -521,18 +524,18 @@ FROM tutorial.visits_v1 WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') ``` -## Cluster Deployment {#cluster-deployment} +## 集群部署 {#cluster-deployment} -ClickHouse cluster is a homogenous cluster. Steps to set up: +ClickHouse集群是一个同质集群。 设置步骤: -1. Install ClickHouse server on all machines of the cluster -2. Set up cluster configs in configuration files -3. Create local tables on each instance -4. Create a [Distributed table](../operations/table_engines/distributed.md) +1. 在群集的所有计算机上安装ClickHouse服务器 +2. 在配置文件中设置群集配置 +3. 在每个实例上创建本地表 +4. 创建一个 [分布式表](../engines/table_engines/special/distributed.md) -[Distributed table](../operations/table_engines/distributed.md) is actually a kind of “view” to local tables of ClickHouse cluster. SELECT query from a distributed table executes using resources of all cluster’s shards. You may specify configs for multiple clusters and create multiple distributed tables providing views to different clusters. +[分布式表](../engines/table_engines/special/distributed.md) 实际上是一种 “view” 到ClickHouse集群的本地表。 从分布式表中选择查询使用集群所有分片的资源执行。 您可以为多个集群指定configs,并创建多个分布式表,为不同的集群提供视图。 -Example config for a cluster with three shards, one replica each: +具有三个分片的集群的示例配置,每个分片一个副本: ``` xml @@ -559,37 +562,37 @@ Example config for a cluster with three shards, one replica each: ``` -For further demonstration, let’s create a new local table with the same `CREATE TABLE` query that we used for `hits_v1`, but different table name: +为了进一步演示,让我们创建一个新的本地表 `CREATE TABLE` 我们用于查询 `hits_v1`,但不同的表名: ``` sql CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... ``` -Creating a distributed table providing a view into local tables of the cluster: +创建提供集群本地表视图的分布式表: ``` sql CREATE TABLE tutorial.hits_all AS tutorial.hits_local ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); ``` -A common practice is to create similar Distributed tables on all machines of the cluster. It allows running distributed queries on any machine of the cluster. Also there’s an alternative option to create temporary distributed table for a given SELECT query using [remote](../query_language/table_functions/remote.md) table function. +常见的做法是在集群的所有计算机上创建类似的分布式表。 它允许在群集的任何计算机上运行分布式查询。 还有一个替代选项可以使用以下方法为给定的SELECT查询创建临时分布式表 [远程](../sql_reference/table_functions/remote.md) 表功能。 -Let’s run [INSERT SELECT](../query_language/insert_into.md) into the Distributed table to spread the table to multiple servers. +我们走吧 [INSERT SELECT](../sql_reference/statements/insert_into.md) 将该表传播到多个服务器。 ``` sql INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -!!! warning "Notice" - This approach is not suitable for the sharding of large tables. There’s a separate tool [clickhouse-copier](../operations/utils/clickhouse-copier.md) that can re-shard arbitrary large tables. +!!! warning "碌莽禄Notice:" + 这种方法不适合大型表的分片。 有一个单独的工具 [ツ环板-ョツ嘉ッツ偲](../operations/utilities/clickhouse-copier.md) 这可以重新分片任意大表。 -As you could expect, computationally heavy queries run N times faster if they utilize 3 servers instead of one. +正如您所期望的那样,如果计算量大的查询使用3台服务器而不是一个,则运行速度快N倍。 -In this case, we have used a cluster with 3 shards, and each contains a single replica. +在这种情况下,我们使用了具有3个分片的集群,每个分片都包含一个副本。 -To provide resilience in a production environment, we recommend that each shard should contain 2-3 replicas spread between multiple availability zones or datacenters (or at least racks). Note that ClickHouse supports an unlimited number of replicas. +为了在生产环境中提供弹性,我们建议每个分片应包含分布在多个可用区或数据中心(或至少机架)之间的2-3个副本。 请注意,ClickHouse支持无限数量的副本。 -Example config for a cluster of one shard containing three replicas: +包含三个副本的一个分片集群的示例配置: ``` xml @@ -613,12 +616,12 @@ Example config for a cluster of one shard containing three replicas: ``` -To enable native replication [ZooKeeper](http://zookeeper.apache.org/) is required. ClickHouse takes care of data consistency on all replicas and runs restore procedure after failure automatically. It’s recommended to deploy the ZooKeeper cluster on separate servers (where no other processes including ClickHouse are running). +启用本机复制 [动物园管理员](http://zookeeper.apache.org/) 是必需的。 ClickHouse负责所有副本的数据一致性,并在失败后自动运行恢复过程。 建议将ZooKeeper集群部署在单独的服务器上(其中没有其他进程,包括ClickHouse正在运行)。 -!!! note "Note" - ZooKeeper is not a strict requirement: in some simple cases, you can duplicate the data by writing it into all the replicas from your application code. This approach is **not** recommended, in this case, ClickHouse won’t be able to guarantee data consistency on all replicas. Thus it becomes the responsibility of your application. +!!! note "注" + ZooKeeper不是一个严格的requirement:在某些简单的情况下,您可以通过将数据写入应用程序代码中的所有副本来复制数据。 这种方法是 **不** 建议,在这种情况下,ClickHouse将无法保证所有副本上的数据一致性。 因此,它成为您的应用程序的责任。 -ZooKeeper locations are specified in the configuration file: +ZooKeeper位置在配置文件中指定: ``` xml @@ -637,7 +640,7 @@ ZooKeeper locations are specified in the configuration file: ``` -Also, we need to set macros for identifying each shard and replica which are used on table creation: +此外,我们需要设置宏来识别每个用于创建表的分片和副本: ``` xml @@ -646,7 +649,7 @@ Also, we need to set macros for identifying each shard and replica which are use ``` -If there are no replicas at the moment on replicated table creation, a new first replica is instantiated. If there are already live replicas, the new replica clones data from existing ones. You have an option to create all replicated tables first, and then insert data to it. Another option is to create some replicas and add the others after or during data insertion. +如果在创建复制表时没有副本,则会实例化新的第一个副本。 如果已有实时副本,则新副本将克隆现有副本中的数据。 您可以选择首先创建所有复制的表,然后向其中插入数据。 另一种选择是创建一些副本,并在数据插入之后或期间添加其他副本。 ``` sql CREATE TABLE tutorial.hits_replica (...) @@ -657,12 +660,12 @@ ENGINE = ReplcatedMergeTree( ... ``` -Here we use [ReplicatedMergeTree](../operations/table_engines/replication.md) table engine. In parameters we specify ZooKeeper path containing shard and replica identifiers. +在这里,我们使用 [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) 表引擎。 在参数中,我们指定包含分片和副本标识符的ZooKeeper路径。 ``` sql INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; ``` -Replication operates in multi-master mode. Data can be loaded into any replica, and the system then syncs it with other instances automatically. Replication is asynchronous so at a given moment, not all replicas may contain recently inserted data. At least one replica should be up to allow data ingestion. Others will sync up data and repair consistency once they will become active again. Note that this approach allows for the low possibility of a loss of recently inserted data. +复制在多主机模式下运行。 数据可以加载到任何副本中,然后系统会自动将其与其他实例同步。 复制是异步的,因此在给定时刻,并非所有副本都可能包含最近插入的数据。 至少应有一个副本允许数据摄取。 其他人将同步数据和修复一致性,一旦他们将再次变得活跃。 请注意,这种方法允许最近插入的数据丢失的可能性很低。 -[Original article](https://clickhouse.tech/docs/en/getting_started/tutorial/) +[原始文章](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/zh/guides/apply_catboost_model.md b/docs/zh/guides/apply_catboost_model.md index 62eb386147f..4ac7d926961 100644 --- a/docs/zh/guides/apply_catboost_model.md +++ b/docs/zh/guides/apply_catboost_model.md @@ -1,40 +1,43 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: "\u5E94\u7528CatBoost\u6A21\u578B" --- -# Applying a Catboost Model in ClickHouse {#applying-catboost-model-in-clickhouse} +# 在ClickHouse中应用Catboost模型 {#applying-catboost-model-in-clickhouse} -[CatBoost](https://catboost.ai) is a free and open-source gradient boosting library developed at [Yandex](https://yandex.com/company/) for machine learning. +[CatBoost](https://catboost.ai) 是一个自由和开源的梯度提升库开发 [Yandex](https://yandex.com/company/) 用于机器学习。 -With this instruction, you will learn to apply pre-trained models in ClickHouse by running model inference from SQL. +通过此指令,您将学习如何通过从SQL运行模型推理在ClickHouse中应用预先训练好的模型。 -To apply a CatBoost model in ClickHouse: +在ClickHouse中应用CatBoost模型: -1. [Create a Table](#create-table). -2. [Insert the Data to the Table](#insert-data-to-table). -3. [Integrate CatBoost into ClickHouse](#integrate-catboost-into-clickhouse) (Optional step). -4. [Run the Model Inference from SQL](#run-model-inference). +1. [创建表](#create-table). +2. [将数据插入到表中](#insert-data-to-table). +3. [碌莽禄into拢Integrate010-68520682\](#integrate-catboost-into-clickhouse) (可选步骤)。 +4. [从SQL运行模型推理](#run-model-inference). -For more information about training CatBoost models, see [Training and applying models](https://catboost.ai/docs/features/training.html#training). +有关训练CatBoost模型的详细信息,请参阅 [培训和应用模型](https://catboost.ai/docs/features/training.html#training). -## Prerequisites {#prerequisites} +## 先决条件 {#prerequisites} -If you don’t have the [Docker](https://docs.docker.com/install/) yet, install it. +如果你没有 [Docker](https://docs.docker.com/install/) 然而,安装它。 -!!! note "Note" - [Docker](https://www.docker.com) is a software platform that allows you to create containers that isolate a CatBoost and ClickHouse installation from the rest of the system. +!!! note "注" + [Docker](https://www.docker.com) 是一个软件平台,允许您创建容器,将CatBoost和ClickHouse安装与系统的其余部分隔离。 -Before applying a CatBoost model: +在应用CatBoost模型之前: -**1.** Pull the [Docker image](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) from the registry: +**1.** 拉 [码头窗口映像](https://hub.docker.com/r/yandex/tutorial-catboost-clickhouse) 从注册表: ``` bash $ docker pull yandex/tutorial-catboost-clickhouse ``` -This Docker image contains everything you need to run CatBoost and ClickHouse: code, runtime, libraries, environment variables, and configuration files. +此Docker映像包含运行CatBoost和ClickHouse所需的所有内容:代码、运行时、库、环境变量和配置文件。 -**2.** Make sure the Docker image has been successfully pulled: +**2.** 确保已成功拉取Docker映像: ``` bash $ docker image ls @@ -42,26 +45,26 @@ REPOSITORY TAG IMAGE ID CR yandex/tutorial-catboost-clickhouse latest 622e4d17945b 22 hours ago 1.37GB ``` -**3.** Start a Docker container based on this image: +**3.** 基于此映像启动一个Docker容器: ``` bash $ docker run -it -p 8888:8888 yandex/tutorial-catboost-clickhouse ``` -## 1. Create a Table {#create-table} +## 1. 创建表 {#create-table} -To create a ClickHouse table for the training sample: +为训练样本创建ClickHouse表: -**1.** Start ClickHouse console client in the interactive mode: +**1.** 在交互模式下启动ClickHouse控制台客户端: ``` bash $ clickhouse client ``` -!!! note "Note" - The ClickHouse server is already running inside the Docker container. +!!! note "注" + ClickHouse服务器已经在Docker容器内运行。 -**2.** Create the table using the command: +**2.** 使用以下命令创建表: ``` sql :) CREATE TABLE amazon_train @@ -81,29 +84,29 @@ $ clickhouse client ENGINE = MergeTree ORDER BY date ``` -**3.** Exit from ClickHouse console client: +**3.** 从ClickHouse控制台客户端退出: ``` sql :) exit ``` -## 2. Insert the Data to the Table {#insert-data-to-table} +## 2. 将数据插入到表中 {#insert-data-to-table} -To insert the data: +插入数据: -**1.** Run the following command: +**1.** 运行以下命令: ``` bash $ clickhouse client --host 127.0.0.1 --query 'INSERT INTO amazon_train FORMAT CSVWithNames' < ~/amazon/train.csv ``` -**2.** Start ClickHouse console client in the interactive mode: +**2.** 在交互模式下启动ClickHouse控制台客户端: ``` bash $ clickhouse client ``` -**3.** Make sure the data has been uploaded: +**3.** 确保数据已上传: ``` sql :) SELECT count() FROM amazon_train @@ -113,27 +116,27 @@ FROM amazon_train +-count()-+ | 65538 | -+---------+ ++-------+ ``` -## 3. Integrate CatBoost into ClickHouse {#integrate-catboost-into-clickhouse} +## 3. 碌莽禄into拢Integrate010-68520682\ {#integrate-catboost-into-clickhouse} -!!! note "Note" - **Optional step.** The Docker image contains everything you need to run CatBoost and ClickHouse. +!!! note "注" + **可选步骤。** Docker映像包含运行CatBoost和ClickHouse所需的所有内容。 -To integrate CatBoost into ClickHouse: +碌莽禄to拢integrate010-68520682\: -**1.** Build the evaluation library. +**1.** 构建评估库。 -The fastest way to evaluate a CatBoost model is compile `libcatboostmodel.` library. For more information about how to build the library, see [CatBoost documentation](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). +评估CatBoost模型的最快方法是编译 `libcatboostmodel.` 图书馆. 有关如何构建库的详细信息,请参阅 [CatBoost文件](https://catboost.ai/docs/concepts/c-plus-plus-api_dynamic-c-pluplus-wrapper.html). -**2.** Create a new directory anywhere and with any name, for example, `data` and put the created library in it. The Docker image already contains the library `data/libcatboostmodel.so`. +**2.** 例如,在任何地方和任何名称创建一个新目录, `data` 并将创建的库放入其中。 Docker映像已经包含了库 `data/libcatboostmodel.so`. -**3.** Create a new directory for config model anywhere and with any name, for example, `models`. +**3.** 例如,在任何地方和任何名称为config model创建一个新目录, `models`. -**4.** Create a model configuration file with any name, for example, `models/amazon_model.xml`. +**4.** 创建具有任意名称的模型配置文件,例如, `models/amazon_model.xml`. -**5.** Describe the model configuration: +**5.** 描述模型配置: ``` xml @@ -150,7 +153,7 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel. ``` -**6.** Add the path to CatBoost and the model configuration to the ClickHouse configuration: +**6.** 将CatBoost的路径和模型配置添加到ClickHouse配置: ``` xml @@ -158,11 +161,11 @@ The fastest way to evaluate a CatBoost model is compile `libcatboostmodel./home/catboost/models/*_model.xml ``` -## 4. Run the Model Inference from SQL {#run-model-inference} +## 4. 从SQL运行模型推理 {#run-model-inference} -For test model run the ClickHouse client `$ clickhouse client`. +对于测试模型,运行ClickHouse客户端 `$ clickhouse client`. -Let’s make sure that the model is working: +让我们确保模型正常工作: ``` sql :) SELECT @@ -181,10 +184,10 @@ FROM amazon_train LIMIT 10 ``` -!!! note "Note" - Function [modelEvaluate](../query_language/functions/other_functions.md#function-modelevaluate) returns tuple with per-class raw predictions for multiclass models. +!!! note "注" + 功能 [模型值](../sql_reference/functions/other_functions.md#function-modelevaluate) 返回带有多类模型的每类原始预测的元组。 -Let’s predict the probability: +让我们预测一下: ``` sql :) SELECT @@ -204,10 +207,10 @@ FROM amazon_train LIMIT 10 ``` -!!! note "Note" - More info about [exp()](../query_language/functions/math_functions.md) function. +!!! note "注" + 更多信息 [exp()](../sql_reference/functions/math_functions.md) 功能。 -Let’s calculate LogLoss on the sample: +让我们计算样本的LogLoss: ``` sql :) SELECT -avg(tg * log(prob) + (1 - tg) * log(1 - prob)) AS logloss @@ -230,7 +233,7 @@ FROM ) ``` -!!! note "Note" - More info about [avg()](../query_language/agg_functions/reference.md#agg_function-avg) and [log()](../query_language/functions/math_functions.md) functions. +!!! note "注" + 更多信息 [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg) 和 [日志()](../sql_reference/functions/math_functions.md) 功能。 -[Original article](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) +[原始文章](https://clickhouse.tech/docs/en/guides/apply_catboost_model/) diff --git a/docs/zh/guides/index.md b/docs/zh/guides/index.md index c1968730961..00fe071434d 100644 --- a/docs/zh/guides/index.md +++ b/docs/zh/guides/index.md @@ -1,12 +1,16 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u6307\u5357" +toc_priority: 38 +toc_title: "\u6982\u8FF0" --- -# ClickHouse Guides {#clickhouse-guides} +# ClickHouse指南 {#clickhouse-guides} -List of detailed step-by-step instructions that help to solve various tasks using ClickHouse: +详细的一步一步的说明,帮助解决使用ClickHouse的各种任务列表: -- [Tutorial on simple cluster set-up](../getting_started/tutorial.md) -- [Applying a CatBoost model in ClickHouse](apply_catboost_model.md) +- [简单集群设置教程](../getting_started/tutorial.md) +- [在ClickHouse中应用CatBoost模型](apply_catboost_model.md) -[Original article](https://clickhouse.tech/docs/en/guides/) +[原始文章](https://clickhouse.tech/docs/en/guides/) diff --git a/docs/zh/index.md b/docs/zh/index.md index b10fafebe93..cb9ccf0420a 100644 --- a/docs/zh/index.md +++ b/docs/zh/index.md @@ -1,36 +1,41 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + # 什么是ClickHouse? {#shi-yao-shi-clickhouse} ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)。 在传统的行式数据库系统中,数据按如下顺序存储: -| Row | WatchID | JavaEnable | Title | GoodEvent | EventTime | -|-----|-------------|------------|--------------------|-----------|---------------------| -| \#0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 | -| \#1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 | -| \#2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 | -| \#N | … | … | … | … | … | +| 行 | 小心点 | JavaEnable | 标题 | GoodEvent | 活动时间 | +|-----|-------------|------------|------------|-----------|---------------------| +| \#0 | 89354350662 | 1 | 投资者关系 | 1 | 2016-05-18 05:19:20 | +| \#1 | 90329509958 | 0 | 联系我们 | 1 | 2016-05-18 08:10:20 | +| \#2 | 89953706054 | 1 | 任务 | 1 | 2016-05-18 07:38:00 | +| \#N | … | … | … | … | … | 处于同一行中的数据总是被物理的存储在一起。 常见的行式数据库系统有: MySQL、Postgres和MS SQL Server。 -{: .grey } +{: .灰色 } 在列式数据库系统中,数据按如下的顺序存储: -| Row: | \#0 | \#1 | \#2 | \#N | +| 行: | \#0 | \#1 | \#2 | \#N | |-------------|---------------------|---------------------|---------------------|-----| -| WatchID: | 89354350662 | 90329509958 | 89953706054 | … | +| 小心点: | 89354350662 | 90329509958 | 89953706054 | … | | JavaEnable: | 1 | 0 | 1 | … | -| Title: | Investor Relations | Contact us | Mission | … | +| 标题: | 投资者关系 | 联系我们 | 任务 | … | | GoodEvent: | 1 | 1 | 1 | … | -| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | +| 活动时间: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … | 该示例中只展示了数据在列式数据库中数据的排列顺序。 对于存储而言,列式数据库总是将同一列的数据存储在一起,不同列的数据也总是分开存储。 常见的列式数据库有: Vertica、 Paraccel (Actian Matrix,Amazon Redshift)、 Sybase IQ、 Exasol、 Infobright、 InfiniDB、 MonetDB (VectorWise, Actian Vector)、 LucidDB、 SAP HANA、 Google Dremel、 Google PowerDrill、 Druid、 kdb+。 -{: .grey } +{: .灰色 } 不同的存储方式适合不同的场景,这里的查询场景包括: 进行了哪些查询,多久查询一次以及各类查询的比例; 每种查询读取多少数据————行、列和字节;读取数据和写入数据之间的关系;使用的数据集大小以及如何使用本地的数据集;是否使用事务,以及它们是如何进行隔离的;数据的复制机制与数据的完整性要求;每种类型的查询要求的延迟与吞吐量等等。 @@ -68,7 +73,7 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS) 看到差别了么?下面将详细介绍为什么会发生这种情况。 -### Input/output {#inputoutput} +### 输入/输出 {#inputoutput} 1. 针对分析类查询,通常只需要读取表的一小部分列。在列式数据库中你可以只读取你需要的数据。例如,如果只需要读取100列中的5列,这将帮助你最少减少20倍的I/O消耗。 2. 由于数据总是打包成批量读取的,所以压缩是非常容易的。同时数据按列分别存储这也更容易压缩。这进一步降低了I/O的体积。 diff --git a/docs/zh/interfaces/cli.md b/docs/zh/interfaces/cli.md index 7e858ce458b..fef8e404aef 100644 --- a/docs/zh/interfaces/cli.md +++ b/docs/zh/interfaces/cli.md @@ -1,3 +1,4 @@ + # 命令行客户端 {#ming-ling-xing-ke-hu-duan} 通过命令行来访问 ClickHouse,您可以使用 `clickhouse-client` @@ -48,7 +49,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA 命令行客户端是基于 `replxx`。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。 历史命令会写入在 `~/.clickhouse-client-history` 中。 -默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` or `--vertical` 参数,或使用客户端的配置文件。 +默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` 或 `--vertical` 参数,或使用客户端的配置文件。 若要退出客户端,使用 Ctrl+D (或 Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `учшеж`, `йгшеж`, `дщпщгеж`, `q`, `й`, `q`, `Q`, `:q`, `й`, `Й`, `Жй` @@ -61,7 +62,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA 您可以通过 Ctrl+C 来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下 Ctrl + C,客户端将会退出。 -命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../operations/table_engines/external_data.md)». +命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table_engines/special/external_data.md)». ## 配置 {#interfaces_cli_configuration} diff --git a/docs/zh/interfaces/cpp.md b/docs/zh/interfaces/cpp.md index 6f162036e01..4aa4f15a456 100644 --- a/docs/zh/interfaces/cpp.md +++ b/docs/zh/interfaces/cpp.md @@ -1,5 +1,6 @@ + # C ++客户端库 {#c-ke-hu-duan-ku} -请参阅以下网站的自述文件[clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp)资料库。 +请参阅以下网站的自述文件[ツ暗ェツ氾环催ツ団](https://github.com/ClickHouse/clickhouse-cpp)资料库。 -[Original article](https://clickhouse.tech/docs/zh/interfaces/cpp/) +[原始文章](https://clickhouse.tech/docs/zh/interfaces/cpp/) diff --git a/docs/zh/interfaces/formats.md b/docs/zh/interfaces/formats.md index 80ca5fdf221..64c1940df86 100644 --- a/docs/zh/interfaces/formats.md +++ b/docs/zh/interfaces/formats.md @@ -1,3 +1,4 @@ + # 输入输出格式 {#formats} ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT`) 请求中使用。 @@ -10,19 +11,19 @@ ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT | [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | | [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | | [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [Template](#format-template) | ✔ | ✔ | +| [模板](#format-template) | ✔ | ✔ | | [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | | [CSV](#csv) | ✔ | ✔ | | [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [CustomSeparated](#format-customseparated) | ✔ | ✔ | -| [Values](#data-format-values) | ✔ | ✔ | -| [Vertical](#vertical) | ✗ | ✔ | +| [自定义分离](#format-customseparated) | ✔ | ✔ | +| [值](#data-format-values) | ✔ | ✔ | +| [垂直](#vertical) | ✗ | ✔ | | VerticalRaw | ✗ | ✔ | | [JSON](#json) | ✗ | ✔ | | [JSONCompact](#jsoncompact) | ✗ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ | | [TSKV](#tskv) | ✔ | ✔ | -| [Pretty](#pretty) | ✗ | ✔ | +| [漂亮](#pretty) | ✗ | ✔ | | [PrettyCompact](#prettycompact) | ✗ | ✔ | | [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | | [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | @@ -30,11 +31,11 @@ ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT | [Protobuf](#protobuf) | ✔ | ✔ | | [Avro](#data-format-avro) | ✔ | ✔ | | [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | +| [镶木地板](#data-format-parquet) | ✔ | ✔ | | [ORC](#data-format-orc) | ✔ | ✗ | | [RowBinary](#rowbinary) | ✔ | ✔ | | [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [Native](#native) | ✔ | ✔ | +| [本地人](#native) | ✔ | ✔ | | [Null](#null) | ✗ | ✔ | | [XML](#xml) | ✗ | ✔ | | [CapnProto](#capnproto) | ✔ | ✔ | @@ -70,7 +71,7 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 整数以十进制形式写入。数字在开头可以包含额外的 `+` 字符(解析时忽略,格式化时不记录)。非负数不能包含负号。 读取时,允许将空字符串解析为零,或者(对于带符号的类型)将仅包含负号的字符串解析为零。 不符合相应数据类型的数字可能会被解析为不同的数字,而不会显示错误消息。 -浮点数以十进制形式写入。点号用作小数点分隔符。支持指数等符号,如’inf’,‘+ inf’,‘-inf’和’nan’。 浮点数的输入可以以小数点开始或结束。 +浮点数以十进制形式写入。点号用作小数点分隔符。支持指数等符号,如'inf',‘+ inf’,‘-inf’和’nan’。 浮点数的输入可以以小数点开始或结束。 格式化的时候,浮点数的精确度可能会丢失。 解析的时候,没有严格需要去读取与机器可以表示的最接近的数值。 @@ -96,7 +97,7 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 数组写在方括号内的逗号分隔值列表中。 通常情况下,数组中的数字项目会被拼凑,但日期,带时间的日期以及字符串将使用与上面相同的转义规则用单引号引起来。 -[NULL](../query_language/syntax.md) 将输出为 `\N`。 +[NULL](../sql_reference/syntax.md) 将输出为 `\N`。 ## TabSeparatedRaw {#tabseparatedraw} @@ -120,13 +121,13 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD 这种格式也可以使用名称 `TSVWithNamesAndTypes` 来表示。 -## Template {#format-template} +## 模板 {#format-template} -This format allows to specify a custom format string with placeholders for values with specified escaping rule. +此格式允许为具有指定转义规则的值指定带有占位符的自定义格式字符串。 -It uses settings `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` when using `JSON` escaping, see further) +它使用设置 `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` 使用时 `JSON` 逃跑,进一步查看) -Format string `format_schema_rows` specifies rows format with the following syntax: +格式字符串 `format_schema_rows` 使用以下语法指定行格式: `delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, @@ -150,25 +151,25 @@ Format string `format_schema_rows` specifies rows format with the following synt `Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` -The `format_schema_rows_between_delimiter` setting specifies delimiter between rows, which is printed (or expected) after every row except the last one (`\n` by default) +该 `format_schema_rows_between_delimiter` setting指定行之间的分隔符,该分隔符在除最后一行之外的每一行之后打印(或预期) (`\n` 默认情况下) -Format string `format_schema` has the same syntax as `format_schema_rows` and allows to specify a prefix, a suffix and a way to print some additional information. It contains the following placeholders instead of column names: +格式字符串 `format_schema` 具有相同的语法 `format_schema_rows` 并允许指定前缀,后缀和打印一些附加信息的方式。 它包含以下占位符而不是列名: -- `data` is the rows with data in `format_schema_rows` format, separated by `format_schema_rows_between_delimiter`. This placeholder must be the first placeholder in the format string. -- `totals` is the row with total values in `format_schema_rows` format (when using WITH TOTALS) -- `min` is the row with minimum values in `format_schema_rows` format (when extremes is set to 1) -- `max` is the row with maximum values in `format_schema_rows` format (when extremes is set to 1) -- `rows` is the total number of output rows -- `rows_before_limit` is the minimal number of rows there would have been without LIMIT. Output only if the query contains LIMIT. If the query contains GROUP BY, rows\_before\_limit\_at\_least is the exact number of rows there would have been without a LIMIT. -- `time` is the request execution time in seconds -- `rows_read` is the number of rows have been read -- `bytes_read` is the number of bytes (uncompressed) have been read +- `data` 包含数据的行 `format_schema_rows` 格式,由分隔 `format_schema_rows_between_delimiter`. 此占位符必须是格式字符串中的第一个占位符。 +- `totals` 是包含总值的行 `format_schema_rows` 格式(与总计一起使用时) +- `min` 是具有最小值的行 `format_schema_rows` 格式(当极值设置为1时) +- `max` 是具有最大值的行 `format_schema_rows` 格式(当极值设置为1时) +- `rows` 输出行总数 +- `rows_before_limit` 是没有限制的最小行数。 仅当查询包含LIMIT时输出。 如果查询包含GROUP BY,则rows\_before\_limit\_at\_least是没有限制的确切行数。 +- `time` 请求执行时间以秒为单位 +- `rows_read` 已读取的行数 +- `bytes_read` 被读取的字节数(未压缩) -The placeholders `data`, `totals`, `min` and `max` must not have escaping rule specified (or `None` must be specified explicitly). The remaining placeholders may have any escaping rule specified. -If the `format_schema` setting is an empty string, `${data}` is used as default value. -For insert queries format allows to skip some columns or some fields if prefix or suffix (see example). +占位符 `data`, `totals`, `min` 和 `max` 必须没有指定转义规则(或 `None` 必须明确指定)。 其余的占位符可能具有指定的任何转义规则。 +如果 `format_schema` 设置为空字符串, `${data}` 用作默认值。 +对于插入查询格式允许跳过一些列或一些字段,如果前缀或后缀(见示例)。 -`Select` example: +`Select` 示例: ``` sql SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 @@ -210,7 +211,7 @@ format_schema_rows_between_delimiter = '\n ' ``` -`Insert` example: +`Insert` 示例: Some header Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 @@ -223,14 +224,14 @@ format_schema = 'Some header\n${data}\nTotal rows: ${:CSV}\n', format_schema_rows = 'Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV}' ``` -`PageViews`, `UserID`, `Duration` and `Sign` inside placeholders are names of columns in the table. Values after `Useless field` in rows and after `\nTotal rows:` in suffix will be ignored. -All delimiters in the input data must be strictly equal to delimiters in specified format strings. +`PageViews`, `UserID`, `Duration` 和 `Sign` 占位符内部是表中列的名称。 值后 `Useless field` 在行和之后 `\nTotal rows:` in后缀将被忽略。 +输入数据中的所有分隔符必须严格等于指定格式字符串中的分隔符。 ## TemplateIgnoreSpaces {#templateignorespaces} -This format is suitable only for input. -Similar to `Template`, but skips whitespace characters between delimiters and values in the input stream. However, if format strings contain whitespace characters, these characters will be expected in the input stream. Also allows to specify empty placeholders (`${}` or `${:None}`) to split some delimiter into separate parts to ignore spaces between them. Such placeholders are used only for skipping whitespace characters. -It’s possible to read `JSON` using this format, if values of columns have the same order in all rows. For example, the following request can be used for inserting data from output example of format [JSON](#json): +此格式仅适用于输入。 +类似于 `Template`,但跳过输入流中的分隔符和值之间的空格字符。 但是,如果格式字符串包含空格字符,则在输入流中将需要这些字符。 还允许指定空占位符 (`${}` 或 `${:None}`)将一些分隔符分成单独的部分,以忽略它们之间的空格。 此类占位符仅用于跳过空格字符。 +可以阅读 `JSON` 如果列的值在所有行中具有相同的顺序,则使用此格式。 例如,以下请求可用于从格式的输出示例中插入数据 [JSON](#json): ``` sql INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS @@ -254,7 +255,7 @@ format_schema_rows_between_delimiter = ',' SearchPhrase=curtain designs count()=1064 SearchPhrase=baku count()=1000 -[NULL](../query_language/syntax.md) 输出为 `\N`。 +[NULL](../sql_reference/syntax.md) 输出为 `\N`。 ``` sql SELECT * FROM t_null FORMAT TSKV @@ -288,10 +289,10 @@ CSV 格式是和 TabSeparated 一样的方式输出总数和极值。 会输出带头部行,和 `TabSeparatedWithNames` 一样。 -## CustomSeparated {#format-customseparated} +## 自定义分离 {#format-customseparated} -Similar to [Template](#format-template), but it prints or reads all columns and uses escaping rule from setting `format_custom_escaping_rule` and delimiters from settings `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` and `format_custom_result_after_delimiter`, not from format strings. -There is also `CustomSeparatedIgnoreSpaces` format, which is similar to `TemplateIgnoreSpaces`. +类似于 [模板](#format-template),但它打印或读取所有列,并使用从设置转义规则 `format_custom_escaping_rule` 从设置和分隔符 `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` 和 `format_custom_result_after_delimiter`,而不是从格式字符串。 +也有 `CustomSeparatedIgnoreSpaces` 格式,这是类似于 `TemplateIgnoreSpaces`. ## JSON {#json} @@ -378,7 +379,7 @@ JSON 与 JavaScript 兼容。为了确保这一点,一些字符被另外转义 该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 -ClickHouse 支持 [NULL](../query_language/syntax.md), 在 JSON 格式中以 `null` 输出来表示. +ClickHouse 支持 [NULL](../sql_reference/syntax.md), 在 JSON 格式中以 `null` 输出来表示. 参考 JSONEachRow 格式。 @@ -449,23 +450,23 @@ ClickHouse 支持 [NULL](../query_language/syntax.md), 在 JSON 格式中以 `nu 对于解析,任何顺序都支持不同列的值。可以省略某些值 - 它们被视为等于它们的默认值。在这种情况下,零和空行被用作默认值。 作为默认值,不支持表中指定的复杂值。元素之间的空白字符被忽略。如果在对象之后放置逗号,它将被忽略。对象不一定必须用新行分隔。 -### Usage of Nested Structures {#jsoneachrow-nested} +### 嵌套结构的使用 {#jsoneachrow-nested} -If you have a table with the [Nested](../data_types/nested_data_structures/nested.md) data type columns, you can insert JSON data having the same structure. Enable this functionality with the [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) setting. +如果你有一张桌子 [嵌套式](../sql_reference/data_types/nested_data_structures/nested.md) 数据类型列,可以插入具有相同结构的JSON数据。 启用此功能与 [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json) 设置。 -For example, consider the following table: +例如,请考虑下表: ``` sql CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory ``` -As you can find in the `Nested` data type description, ClickHouse treats each component of the nested structure as a separate column, `n.s` and `n.i` for our table. So you can insert the data the following way: +正如你可以在找到 `Nested` 数据类型说明,ClickHouse将嵌套结构的每个组件视为单独的列, `n.s` 和 `n.i` 为了我们的桌子 所以你可以通过以下方式插入数据: ``` sql INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} ``` -To insert data as hierarchical JSON object set [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). +将数据作为分层JSON对象集插入 [input\_format\_import\_nested\_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). ``` json { @@ -476,7 +477,7 @@ To insert data as hierarchical JSON object set [input\_format\_import\_nested\_j } ``` -Without this setting ClickHouse throws the exception. +如果没有此设置,ClickHouse将引发异常。 ``` sql SELECT name, value FROM system.settings WHERE name = 'input_format_import_nested_json' @@ -508,7 +509,7 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` -## Native {#native} +## 本地人 {#native} 最高性能的格式。 据通过二进制格式的块进行写入和读取。对于每个块,该块中的行数,列数,列名称和类型以及列的部分将被相继记录。 换句话说,这种格式是 «列式»的 - 它不会将列转换为行。 这是用于在服务器之间进行交互的本地界面中使用的格式,用于使用命令行客户端和 C++ 客户端。 @@ -519,13 +520,13 @@ SELECT * FROM json_each_row_nested 没有输出。但是,查询已处理完毕,并且在使用命令行客户端时,数据将传输到客户端。这仅用于测试,包括生产力测试。 显然,这种格式只适用于输出,不适用于解析。 -## Pretty {#pretty} +## 漂亮 {#pretty} 将数据以表格形式输出,也可以使用 ANSI 转义字符在终端中设置颜色。 它会绘制一个完整的表格,每行数据在终端中占用两行。 每一个结果块都会以单独的表格输出。这是很有必要的,以便结果块不用缓冲结果输出(缓冲在可以预见结果集宽度的时候是很有必要的)。 -[NULL](../query_language/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 +[NULL](../sql_reference/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 ``` sql SELECT * FROM t_null @@ -610,29 +611,29 @@ FixedString 被简单地表示为一个字节序列。 数组表示为 varint 长度(无符号 [LEB128](https://en.wikipedia.org/wiki/LEB128)),后跟有序的数组元素。 -对于 [NULL](../query_language/syntax.md#null-literal) 的支持, 一个为 1 或 0 的字节会加在每个 [Nullable](../data_types/nullable.md) 值前面。如果为 1, 那么该值就是 `NULL`。 如果为 0,则不为 `NULL`。 +对于 [NULL](../sql_reference/syntax.md#null-literal) 的支持, 一个为 1 或 0 的字节会加在每个 [可为空](../sql_reference/data_types/nullable.md) 值前面。如果为 1, 那么该值就是 `NULL`。 如果为 0,则不为 `NULL`。 ## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} -Similar to [RowBinary](#rowbinary), but with added header: +类似于 [RowBinary](#rowbinary),但添加了标题: -- [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N) -- N `String`s specifying column names -- N `String`s specifying column types +- [LEB128](https://en.wikipedia.org/wiki/LEB128)-编码列数(N) +- N `String`s指定列名 +- N `String`s指定列类型 -## Values {#data-format-values} +## 值 {#data-format-values} -在括号中打印每一行。行由逗号分隔。最后一行之后没有逗号。括号内的值也用逗号分隔。数字以十进制格式输出,不含引号。 数组以方括号输出。带有时间的字符串,日期和时间用引号包围输出。转义字符的解析规则与 [TabSeparated](#tabseparated) 格式类似。 在格式化过程中,不插入额外的空格,但在解析过程中,空格是被允许并跳过的(除了数组值之外的空格,这是不允许的)。[NULL](../query_language/syntax.md) 为 `NULL`。 +在括号中打印每一行。行由逗号分隔。最后一行之后没有逗号。括号内的值也用逗号分隔。数字以十进制格式输出,不含引号。 数组以方括号输出。带有时间的字符串,日期和时间用引号包围输出。转义字符的解析规则与 [TabSeparated](#tabseparated) 格式类似。 在格式化过程中,不插入额外的空格,但在解析过程中,空格是被允许并跳过的(除了数组值之外的空格,这是不允许的)。[NULL](../sql_reference/syntax.md) 为 `NULL`。 以 Values 格式传递数据时需要转义的最小字符集是:单引号和反斜线。 这是 `INSERT INTO t VALUES ...` 中可以使用的格式,但您也可以将其用于查询结果。 -## Vertical {#vertical} +## 垂直 {#vertical} 使用指定的列名在单独的行上打印每个值。如果每行都包含大量列,则此格式便于打印一行或几行。 -[NULL](../query_language/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 +[NULL](../sql_reference/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 示例: @@ -747,9 +748,9 @@ SELECT * FROM t_null FORMAT Vertical ## CapnProto {#capnproto} -Cap’n Proto 是一种二进制消息格式,类似 Protocol Buffers 和 Thriftis,但与 JSON 或 MessagePack 格式不一样。 +Cap'n Proto 是一种二进制消息格式,类似 Protocol Buffers 和 Thriftis,但与 JSON 或 MessagePack 格式不一样。 -Cap’n Proto 消息格式是严格类型的,而不是自我描述,这意味着它们不需要外部的描述。这种格式可以实时地应用,并针对每个查询进行缓存。 +Cap'n Proto 消息格式是严格类型的,而不是自我描述,这意味着它们不需要外部的描述。这种格式可以实时地应用,并针对每个查询进行缓存。 ``` sql SELECT SearchPhrase, count() AS c FROM test.hits @@ -763,18 +764,18 @@ SELECT SearchPhrase, count() AS c FROM test.hits c @1 :Uint64; } -格式文件存储的目录可以在服务配置中的 [format\_schema\_path](../operations/server_settings/settings.md) 指定。 +格式文件存储的目录可以在服务配置中的 [format\_schema\_path](../operations/server_configuration_parameters/settings.md) 指定。 -Cap’n Proto 反序列化是很高效的,通常不会增加系统的负载。 +Cap'n Proto 反序列化是很高效的,通常不会增加系统的负载。 ## Protobuf {#protobuf} -Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format. +Protobuf-是一个 [协议缓冲区](https://developers.google.com/protocol-buffers/) 格式。 -This format requires an external format schema. The schema is cached between queries. -ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported. +此格式需要外部格式架构。 在查询之间缓存架构。 +ClickHouse支持 `proto2` 和 `proto3` 语法 支持重复/可选/必填字段。 -Usage examples: +使用示例: ``` sql SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:MessageType' @@ -784,7 +785,7 @@ SELECT * FROM test.table FORMAT Protobuf SETTINGS format_schema = 'schemafile:Me cat protobuf_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT Protobuf SETTINGS format_schema='schemafile:MessageType'" ``` -where the file `schemafile.proto` looks like this: +哪里的文件 `schemafile.proto` 看起来像这样: ``` capnp syntax = "proto3"; @@ -797,11 +798,11 @@ message MessageType { }; ``` -To find the correspondence between table columns and fields of Protocol Buffers’ message type ClickHouse compares their names. -This comparison is case-insensitive and the characters `_` (underscore) and `.` (dot) are considered as equal. -If types of a column and a field of Protocol Buffers’ message are different the necessary conversion is applied. +要查找协议缓冲区的消息类型的表列和字段之间的对应关系,ClickHouse比较它们的名称。 +这种比较是不区分大小写和字符 `_` (下划线)和 `.` (点)被认为是相等的。 +如果协议缓冲区消息的列和字段的类型不同,则应用必要的转换。 -Nested messages are supported. For example, for the field `z` in the following message type +支持嵌套消息。 例如,对于字段 `z` 在下面的消息类型 ``` capnp message MessageType { @@ -815,10 +816,10 @@ message MessageType { }; ``` -ClickHouse tries to find a column named `x.y.z` (or `x_y_z` or `X.y_Z` and so on). -Nested messages are suitable to input or output a [nested data structures](../data_types/nested_data_structures/nested.md). +ClickHouse尝试找到一个名为 `x.y.z` (或 `x_y_z` 或 `X.y_Z` 等)。 +嵌套消息适用于输入或输出一个 [嵌套数据结构](../sql_reference/data_types/nested_data_structures/nested.md). -Default values defined in a protobuf schema like this +在protobuf模式中定义的默认值,如下所示 ``` capnp syntax = "proto2"; @@ -828,91 +829,91 @@ message MessageType { } ``` -are not applied; the [table defaults](../query_language/create.md#create-default-values) are used instead of them. +不应用;该 [表默认值](../sql_reference/statements/create.md#create-default-values) 用来代替它们。 -ClickHouse inputs and outputs protobuf messages in the `length-delimited` format. -It means before every message should be written its length as a [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -See also [how to read/write length-delimited protobuf messages in popular languages](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). +ClickHouse在输入和输出protobuf消息 `length-delimited` 格式。 +这意味着每个消息之前,应该写它的长度作为一个 [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +另请参阅 [如何在流行语言中读取/写入长度分隔的protobuf消息](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). ## Avro {#data-format-avro} -[Apache Avro](http://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project. +[Apache Avro](http://avro.apache.org/) 是在Apache Hadoop项目中开发的面向行的数据序列化框架。 -ClickHouse Avro format supports reading and writing [Avro data files](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). +ClickHouse Avro格式支持读取和写入 [Avro数据文件](http://avro.apache.org/docs/current/spec.html#Object+Container+Files). -### Data Types Matching {#data_types-matching} +### 数据类型匹配{\#sql\_reference/data\_types-matching} {#data-types-matching-sql_referencedata_types-matching} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 和 `SELECT` 查询。 -| Avro data type `INSERT` | ClickHouse data type | Avro data type `SELECT` | -|---------------------------------------------|-------------------------------------------------------------------------------------------|------------------------------| -| `boolean`, `int`, `long`, `float`, `double` | [Int(8\|16\|32)](../data_types/int_uint.md), [UInt(8\|16\|32)](../data_types/int_uint.md) | `int` | -| `boolean`, `int`, `long`, `float`, `double` | [Int64](../data_types/int_uint.md), [UInt64](../data_types/int_uint.md) | `long` | -| `boolean`, `int`, `long`, `float`, `double` | [Float32](../data_types/float.md) | `float` | -| `boolean`, `int`, `long`, `float`, `double` | [Float64](../data_types/float.md) | `double` | -| `bytes`, `string`, `fixed`, `enum` | [String](../data_types/string.md) | `bytes` | -| `bytes`, `string`, `fixed` | [FixedString(N)](../data_types/fixedstring.md) | `fixed(N)` | -| `enum` | [Enum(8\|16)](../data_types/enum.md) | `enum` | -| `array(T)` | [Array(T)](../data_types/array.md) | `array(T)` | -| `union(null, T)`, `union(T, null)` | [Nullable(T)](../data_types/date.md) | `union(null, T)` | -| `null` | [Nullable(Nothing)](../data_types/special_data_types/nothing.md) | `null` | -| `int (date)` \* | [Date](../data_types/date.md) | `int (date)` \* | -| `long (timestamp-millis)` \* | [DateTime64(3)](../data_types/datetime.md) | `long (timestamp-millis)` \* | -| `long (timestamp-micros)` \* | [DateTime64(6)](../data_types/datetime.md) | `long (timestamp-micros)` \* | +| Avro数据类型 `INSERT` | ClickHouse数据类型 | Avro数据类型 `SELECT` | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|------------------------------| +| `boolean`, `int`, `long`, `float`, `double` | [Int(8/16/32)](../sql_reference/data_types/int_uint.md), [UInt(8/16/32)](../sql_reference/data_types/int_uint.md) | `int` | +| `boolean`, `int`, `long`, `float`, `double` | [Int64](../sql_reference/data_types/int_uint.md), [UInt64](../sql_reference/data_types/int_uint.md) | `long` | +| `boolean`, `int`, `long`, `float`, `double` | [Float32](../sql_reference/data_types/float.md) | `float` | +| `boolean`, `int`, `long`, `float`, `double` | [Float64](../sql_reference/data_types/float.md) | `double` | +| `bytes`, `string`, `fixed`, `enum` | [字符串](../sql_reference/data_types/string.md) | `bytes` | +| `bytes`, `string`, `fixed` | [固定字符串(N)](../sql_reference/data_types/fixedstring.md) | `fixed(N)` | +| `enum` | [枚举(8/16)](../sql_reference/data_types/enum.md) | `enum` | +| `array(T)` | [阵列(T)](../sql_reference/data_types/array.md) | `array(T)` | +| `union(null, T)`, `union(T, null)` | [可为空(T)](../sql_reference/data_types/date.md) | `union(null, T)` | +| `null` | [可为空(无)](../sql_reference/data_types/special_data_types/nothing.md) | `null` | +| `int (date)` \* | [日期](../sql_reference/data_types/date.md) | `int (date)` \* | +| `long (timestamp-millis)` \* | [DateTime64(3)](../sql_reference/data_types/datetime.md) | `long (timestamp-millis)` \* | +| `long (timestamp-micros)` \* | [DateTime64(6)](../sql_reference/data_types/datetime.md) | `long (timestamp-micros)` \* | -\* [Avro logical types](http://avro.apache.org/docs/current/spec.html#Logical+Types) +\* [Avro逻辑类型](http://avro.apache.org/docs/current/spec.html#Logical+Types) -Unsupported Avro data types: `record` (non-root), `map` +不支持的Avro数据类型: `record` (非根), `map` -Unsupported Avro logical data types: `uuid`, `time-millis`, `time-micros`, `duration` +不支持的Avro逻辑数据类型: `uuid`, `time-millis`, `time-micros`, `duration` -### Inserting Data {#inserting-data} +### 插入数据 {#inserting-data} -To insert data from an Avro file into ClickHouse table: +将Avro文件中的数据插入ClickHouse表: ``` bash $ cat file.avro | clickhouse-client --query="INSERT INTO {some_table} FORMAT Avro" ``` -The root schema of input Avro file must be of `record` type. +输入Avro文件的根模式必须是 `record` 类型。 -To find the correspondence between table columns and fields of Avro schema ClickHouse compares their names. This comparison is case-sensitive. -Unused fields are skipped. +要查找Avro schema的表列和字段之间的对应关系,ClickHouse比较它们的名称。 此比较区分大小写。 +跳过未使用的字段。 -Data types of a ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to corresponding column type. +ClickHouse表列的数据类型可能与插入的Avro数据的相应字段不同。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 将数据转换为相应的列类型。 -### Selecting Data {#selecting-data} +### 选择数据 {#selecting-data} -To select data from ClickHouse table into an Avro file: +从ClickHouse表中选择数据到Avro文件: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Avro" > file.avro ``` -Column names must: +列名必须: -- start with `[A-Za-z_]` -- subsequently contain only `[A-Za-z0-9_]` +- 名,名,名,名 `[A-Za-z_]` +- 随后只包含 `[A-Za-z0-9_]` -Output Avro file compression and sync interval can be configured with [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) and [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) respectively. +输出Avro文件压缩和同步间隔可以配置 [output\_format\_avro\_codec](../operations/settings/settings.md#settings-output_format_avro_codec) 和 [output\_format\_avro\_sync\_interval](../operations/settings/settings.md#settings-output_format_avro_sync_interval) 分别。 ## AvroConfluent {#data-format-avro-confluent} -AvroConfluent supports decoding single-object Avro messages commonly used with [Kafka](https://kafka.apache.org/) and [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html). +AvroConfluent支持解码单对象Avro消息常用于 [卡夫卡](https://kafka.apache.org/) 和 [汇合的模式注册表](https://docs.confluent.io/current/schema-registry/index.html). -Each Avro message embeds a schema id that can be resolved to the actual schema with help of the Schema Registry. +每个Avro消息都嵌入了一个架构id,该架构id可以在架构注册表的帮助下解析为实际架构。 -Schemas are cached once resolved. +模式解析后会进行缓存。 -Schema Registry URL is configured with [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) +架构注册表URL配置为 [format\_avro\_schema\_registry\_url](../operations/settings/settings.md#settings-format_avro_schema_registry_url) -### Data Types Matching {#data_types-matching-1} +### 数据类型匹配{\#sql\_reference/data\_types-matching-1} {#data-types-matching-sql_referencedata_types-matching-1} -Same as [Avro](#data-format-avro) +和 [Avro](#data-format-avro) -### Usage {#usage} +### 用途 {#usage} -To quickly verify schema resolution you can use [kafkacat](https://github.com/edenhill/kafkacat) with [clickhouse-local](../operations/utils/clickhouse-local.md): +要快速验证架构解析,您可以使用 [kafkacat](https://github.com/edenhill/kafkacat) 与 [ツ环板-ョツ嘉ッツ偲](../operations/utilities/clickhouse-local.md): ``` bash $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse-local --input-format AvroConfluent --format_avro_schema_registry_url 'http://schema-registry' -S "field1 Int64, field2 String" -q 'select * from table' @@ -921,7 +922,7 @@ $ kafkacat -b kafka-broker -C -t topic1 -o beginning -f '%s' -c 3 | clickhouse- 3 c ``` -To use `AvroConfluent` with [Kafka](../operations/table_engines/kafka.md): +使用 `AvroConfluent` 与 [卡夫卡](../engines/table_engines/integrations/kafka.md): ``` sql CREATE TABLE topic1_stream @@ -941,123 +942,123 @@ SET format_avro_schema_registry_url = 'http://schema-registry'; SELECT * FROM topic1_stream; ``` -!!! note "Warning" - Setting `format_avro_schema_registry_url` needs to be configured in `users.xml` to maintain it’s value after a restart. +!!! note "警告" + 设置 `format_avro_schema_registry_url` 需要在配置 `users.xml` restart动后保持它的价值。 -## Parquet {#data-format-parquet} +## 镶木地板 {#data-format-parquet} -[Apache Parquet](http://parquet.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. ClickHouse supports read and write operations for this format. +[阿帕奇地板](http://parquet.apache.org/) 是Hadoop生态系统中普遍存在的列式存储格式。 ClickHouse支持此格式的读写操作。 -### Data Types Matching {#data_types-matching-2} +### 数据类型匹配{\#sql\_reference/data\_types-matching-2} {#data-types-matching-sql_referencedata_types-matching-2} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` and `SELECT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 和 `SELECT` 查询。 -| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) | -|------------------------------|---------------------------------------------|------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Date](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [String](../data_types/string.md) | `STRING` | -| — | [FixedString](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | `DECIMAL` | +| Parquet数据类型 (`INSERT`) | ClickHouse数据类型 | Parquet数据类型 (`SELECT`) | +|----------------------------|----------------------------------------------------------|----------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [日期](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [日期时间](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [字符串](../sql_reference/data_types/string.md) | `STRING` | +| — | [固定字符串](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [十进制](../sql_reference/data_types/decimal.md) | `DECIMAL` | -ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query treats the Parquet `DECIMAL` type as the ClickHouse `Decimal128` type. +ClickHouse支持可配置的精度 `Decimal` 类型。 该 `INSERT` 查询对待实木复合地板 `DECIMAL` 键入为ClickHouse `Decimal128` 类型。 -Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +不支持的Parquet数据类型: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Data types of a ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. +ClickHouse表列的数据类型可能与插入的Parquet数据的相应字段不同。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 为ClickHouse表列设置的数据类型的数据。 -### Inserting and Selecting Data {#inserting-and-selecting-data} +### 插入和选择数据 {#inserting-and-selecting-data} -You can insert Parquet data from a file into ClickHouse table by the following command: +您可以通过以下命令将Parquet数据从文件插入到ClickHouse表中: ``` bash $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet" ``` -You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command: +您可以从ClickHouse表中选择数据,并通过以下命令将它们保存到Parquet格式的某个文件中: ``` bash $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +要与Hadoop交换数据,您可以使用 [HDFS表引擎](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} -[Apache ORC](https://orc.apache.org/) is a columnar storage format widespread in the Hadoop ecosystem. You can only insert data in this format to ClickHouse. +[阿帕奇兽人](https://orc.apache.org/) 是Hadoop生态系统中普遍存在的列式存储格式。 您只能将此格式的数据插入ClickHouse。 -### Data Types Matching {#data_types-matching-3} +### 数据类型匹配{\#sql\_reference/data\_types-matching-3} {#data-types-matching-sql_referencedata_types-matching-3} -The table below shows supported data types and how they match ClickHouse [data types](../data_types/index.md) in `INSERT` queries. +下表显示了支持的数据类型以及它们如何匹配ClickHouse [数据类型](../sql_reference/data_types/index.md) 在 `INSERT` 查询。 -| ORC data type (`INSERT`) | ClickHouse data type | -|--------------------------|---------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Date](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | -| `STRING`, `BINARY` | [String](../data_types/string.md) | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | +| ORC数据类型 (`INSERT`) | ClickHouse数据类型 | +|------------------------|-----------------------------------------------------| +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [日期](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [日期时间](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [字符串](../sql_reference/data_types/string.md) | +| `DECIMAL` | [十进制](../sql_reference/data_types/decimal.md) | -ClickHouse supports configurable precision of the `Decimal` type. The `INSERT` query treats the ORC `DECIMAL` type as the ClickHouse `Decimal128` type. +ClickHouse支持的可配置精度 `Decimal` 类型。 该 `INSERT` 查询对待兽人 `DECIMAL` 键入为ClickHouse `Decimal128` 类型。 -Unsupported ORC data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. +不支持的ORC数据类型: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -The data types of ClickHouse table columns don’t have to match the corresponding ORC data fields. When inserting data, ClickHouse interprets data types according to the table above and then [casts](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to the data type set for the ClickHouse table column. +ClickHouse表列的数据类型不必匹配相应的ORC数据字段。 插入数据时,ClickHouse根据上表解释数据类型,然后 [投](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) 将数据转换为ClickHouse表列的数据类型集。 -### Inserting Data {#inserting-data-1} +### 插入数据 {#inserting-data-1} -You can insert ORC data from a file into ClickHouse table by the following command: +您可以通过以下命令将文件中的ORC数据插入到ClickHouse表中: ``` bash $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -To exchange data with Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md). +要与Hadoop交换数据,您可以使用 [HDFS表引擎](../engines/table_engines/integrations/hdfs.md). -## Format Schema {#formatschema} +## 格式架构 {#formatschema} -The file name containing the format schema is set by the setting `format_schema`. -It’s required to set this setting when it is used one of the formats `Cap'n Proto` and `Protobuf`. -The format schema is a combination of a file name and the name of a message type in this file, delimited by colon, +包含格式架构的文件名由该设置设置 `format_schema`. +当使用其中一种格式时,需要设置此设置 `Cap'n Proto` 和 `Protobuf`. +格式架构是文件名和此文件中消息类型的名称的组合,用冒号分隔, e.g. `schemafile.proto:MessageType`. -If the file has the standard extension for the format (for example, `.proto` for `Protobuf`), -it can be omitted and in this case the format schema looks like `schemafile:MessageType`. +如果文件具有格式的标准扩展名(例如, `.proto` 为 `Protobuf`), +它可以被省略,在这种情况下,格式模式如下所示 `schemafile:MessageType`. -If you input or output data via the [client](../interfaces/cli.md) in the interactive mode, the file name specified in the format schema -can contain an absolute path or a path relative to the current directory on the client. -If you use the client in the batch mode, the path to the schema must be relative due to security reasons. +如果您通过输入或输出数据 [客户](../interfaces/cli.md) 在交互模式下,格式架构中指定的文件名 +可以包含绝对路径或相对于客户端上当前目录的路径。 +如果在批处理模式下使用客户端,则由于安全原因,架构的路径必须是相对的。 -If you input or output data via the [HTTP interface](../interfaces/http.md) the file name specified in the format schema -should be located in the directory specified in [format\_schema\_path](../operations/server_settings/settings.md#server_settings-format_schema_path) -in the server configuration. +如果您通过输入或输出数据 [HTTP接口](../interfaces/http.md) 格式架构中指定的文件名 +应该位于指定的目录中 [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) +在服务器配置中。 -[Original article](https://clickhouse.tech/docs/en/interfaces/formats/) +[原始文章](https://clickhouse.tech/docs/en/interfaces/formats/) -## Skipping Errors {#skippingerrors} +## 跳过错误 {#skippingerrors} -Some formats such as `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` and `Protobuf` can skip broken row if parsing error occurred and continue parsing from the beginning of next row. See [input\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) and -[input\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) settings. -Limitations: -- In case of parsing error `JSONEachRow` skips all data until the new line (or EOF), so rows must be delimited by `\n` to count errors correctly. -- `Template` and `CustomSeparated` use delimiter after the last column and delimiter between rows to find the beginning of next row, so skipping errors works only if at least one of them is not empty. +一些格式,如 `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` 和 `Protobuf` 如果发生解析错误,可以跳过断开的行,并从下一行开始继续解析。 看 [input\_format\_allow\_errors\_num](../operations/settings/settings.md#settings-input_format_allow_errors_num) 和 +[input\_format\_allow\_errors\_ratio](../operations/settings/settings.md#settings-input_format_allow_errors_ratio) 设置。 +限制: +-在解析错误的情况下 `JSONEachRow` 跳过所有数据,直到新行(或EOF),所以行必须由 `\n` 正确计算错误。 +- `Template` 和 `CustomSeparated` 在最后一列之后使用分隔符,并在行之间使用分隔符来查找下一行的开头,所以跳过错误只有在其中至少有一个不为空时才有效。 [来源文章](https://clickhouse.tech/docs/zh/interfaces/formats/) diff --git a/docs/zh/interfaces/http.md b/docs/zh/interfaces/http.md index 1dfbe87b7e0..ca8a9076fba 100644 --- a/docs/zh/interfaces/http.md +++ b/docs/zh/interfaces/http.md @@ -1,3 +1,4 @@ + # HTTP 客户端 {#http-ke-hu-duan} HTTP 接口可以让你通过任何平台和编程语言来使用 ClickHouse。我们用 Java 和 Perl 以及 shell 脚本来访问它。在其他的部门中,HTTP 接口会用在 Perl,Python 以及 Go 中。HTTP 接口比 TCP 原生接口更为局限,但是却有更好的兼容性。 @@ -17,7 +18,7 @@ Ok. 当使用 GET 方法请求时,`readonly` 会被设置。换句话说,若要作修改数据的查询,只能发送 POST 方法的请求。可以将查询通过 POST 主体发送,也可以通过 URL 参数发送。 -Examples: +例: ``` bash $ curl 'http://localhost:8123/?query=SELECT%201' @@ -200,7 +201,7 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812 可选的 `quota_key` 参数可能当做 quota key 传入(或者任何字符串)。更多信息,参见 «[配额](../operations/quotas.md#quotas)» 部分。 -HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../operations/table_engines/external_data.md)» 部分。 +HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../engines/table_engines/special/external_data.md)» 部分。 ## 响应缓冲 {#xiang-ying-huan-chong} diff --git a/docs/zh/interfaces/index.md b/docs/zh/interfaces/index.md index df0313cc3d2..a4131e833e7 100644 --- a/docs/zh/interfaces/index.md +++ b/docs/zh/interfaces/index.md @@ -1,3 +1,4 @@ + # 客户端 {#interfaces} ClickHouse提供了两个网络接口(两者都可以选择包装在TLS中以提高安全性): diff --git a/docs/zh/interfaces/jdbc.md b/docs/zh/interfaces/jdbc.md index a2aac229cca..932ab53b9af 100644 --- a/docs/zh/interfaces/jdbc.md +++ b/docs/zh/interfaces/jdbc.md @@ -1,8 +1,9 @@ + # JDBC 驱动 {#jdbc-qu-dong} - **[官方JDBC 的驱动](https://github.com/ClickHouse/clickhouse-jdbc)** - 三方提供的 JDBC 驱动: - - [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC) + - [掳胫--禄脢鹿脷露胫鲁隆鹿--酶](https://github.com/housepower/ClickHouse-Native-JDBC) - [clickhouse4j](https://github.com/blynkkk/clickhouse4j) [来源文章](https://clickhouse.tech/docs/zh/interfaces/jdbc/) diff --git a/docs/zh/interfaces/mysql.md b/docs/zh/interfaces/mysql.md index 668c0b7b9c3..8996ad6ae6f 100644 --- a/docs/zh/interfaces/mysql.md +++ b/docs/zh/interfaces/mysql.md @@ -1,22 +1,25 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 20 +toc_title: "MySQL\u63A5\u53E3" --- -# MySQL interface {#mysql-interface} +# MySQL接口 {#mysql-interface} -ClickHouse supports MySQL wire protocol. It can be enabled by [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) setting in configuration file: +ClickHouse支持MySQL线协议。 它可以通过启用 [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) 在配置文件中设置: ``` xml 9004 ``` -Example of connecting using command-line tool `mysql`: +使用命令行工具连接的示例 `mysql`: ``` bash $ mysql --protocol tcp -u default -P 9004 ``` -Output if a connection succeeded: +如果连接成功,则输出: ``` text Welcome to the MySQL monitor. Commands end with ; or \g. @@ -34,13 +37,13 @@ Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. mysql> ``` -For compatibility with all MySQL clients, it is recommended to specify user password with [double SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) in configuration file. -If user password is specified using [SHA256](../operations/settings/settings_users.md#password_sha256_hex), some clients won’t be able to authenticate (mysqljs and old versions of command-line tool mysql). +为了与所有MySQL客户端兼容,建议使用以下命令指定用户密码 [双SHA1](../operations/settings/settings_users.md#password_double_sha1_hex) 在配置文件中。 +如果使用用户密码指定 [SHA256](../operations/settings/settings_users.md#password_sha256_hex),一些客户端将无法进行身份验证(mysqljs和旧版本的命令行工具mysql)。 -Restrictions: +限制: -- prepared queries are not supported +- 不支持准备好的查询 -- some data types are sent as strings +- 某些数据类型以字符串形式发送 -[Original article](https://clickhouse.tech/docs/en/interfaces/mysql/) +[原始文章](https://clickhouse.tech/docs/en/interfaces/mysql/) diff --git a/docs/zh/interfaces/odbc.md b/docs/zh/interfaces/odbc.md index b45c54f8507..5cba3a499f1 100644 --- a/docs/zh/interfaces/odbc.md +++ b/docs/zh/interfaces/odbc.md @@ -1,3 +1,4 @@ + # ODBC 驱动 {#odbc-qu-dong} - ClickHouse官方有 ODBC 的驱动。 见 [这里](https://github.com/ClickHouse/clickhouse-odbc)。 diff --git a/docs/zh/interfaces/tcp.md b/docs/zh/interfaces/tcp.md index b783a8c3959..b926a63c476 100644 --- a/docs/zh/interfaces/tcp.md +++ b/docs/zh/interfaces/tcp.md @@ -1,3 +1,4 @@ + # 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp} 本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client))和/或拦截和分析TCP流量。 diff --git a/docs/zh/interfaces/third-party/client_libraries.md b/docs/zh/interfaces/third-party/client_libraries.md index 4814ca5cf9a..8e48bb8735e 100644 --- a/docs/zh/interfaces/third-party/client_libraries.md +++ b/docs/zh/interfaces/third-party/client_libraries.md @@ -1,3 +1,4 @@ + # 第三方开发的库 {#di-san-fang-kai-fa-de-ku} !!! warning "放弃" @@ -5,46 +6,46 @@ - Python - [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm) - - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) - - [clickhouse-client](https://github.com/yurial/clickhouse-client) + - [ツ环板driverョツ嘉ッツ偲](https://github.com/mymarilyn/clickhouse-driver) + - [ツ环板clientョツ嘉ッツ偲](https://github.com/yurial/clickhouse-client) - PHP - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) - - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) + - [8bitov/clickhouse-php客户端](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://packagist.org/packages/bozerkins/clickhouse-client) + - [ツ环板clientョツ嘉ッツ偲](https://packagist.org/packages/simpod/clickhouse-client) - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [SeasClick C++ client](https://github.com/SeasX/SeasClick) -- Go + - [ツ环板clientョツ嘉ッツ偲](https://github.com/SeasX/SeasClick) +- 走吧 - [clickhouse](https://github.com/kshvakov/clickhouse/) - - [go-clickhouse](https://github.com/roistat/go-clickhouse) - - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) + - [ツ环板-ョツ嘉ッツ偲](https://github.com/roistat/go-clickhouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/mailru/go-clickhouse) - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) - NodeJs - - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - - [node-clickhouse](https://github.com/apla/node-clickhouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人)](https://github.com/TimonKK/clickhouse) + - [ツ环板-ョツ嘉ッツ偲](https://github.com/apla/node-clickhouse) - Perl - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - - [ClickHouse (Ruby)](https://github.com/shlima/click_house) - - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) + - [ツ暗ェツ氾环催ツ団)](https://github.com/shlima/click_house) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [RClickhouse](https://github.com/IMSMWU/RClickhouse) - Java - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) -- Scala - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) +- 斯卡拉 + - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - Kotlin - [AORM](https://github.com/TanVD/AORM) - C\# - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) -- Elixir + - [克莱克豪斯客户](https://github.com/DarkWanderer/ClickHouse.Client) +- 仙丹 - [clickhousex](https://github.com/appodeal/clickhousex/) -- Nim +- 尼姆 - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) [来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/client_libraries/) diff --git a/docs/zh/interfaces/third-party/gui.md b/docs/zh/interfaces/third-party/gui.md index 83656d18858..bbbd78f650e 100644 --- a/docs/zh/interfaces/third-party/gui.md +++ b/docs/zh/interfaces/third-party/gui.md @@ -1,3 +1,4 @@ + # 第三方开发的可视化界面 {#di-san-fang-kai-fa-de-ke-shi-hua-jie-mian} ## 开源 {#kai-yuan} @@ -37,9 +38,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 集群管理 - 监控副本情况以及 Kafka 引擎表 -### LightHouse {#lighthouse} +### 灯塔 {#lighthouse} -[LightHouse](https://github.com/VKCOM/lighthouse) 是ClickHouse的轻量级Web界面。 +[灯塔](https://github.com/VKCOM/lighthouse) 是ClickHouse的轻量级Web界面。 特征: @@ -57,9 +58,9 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 表格预览。 - 自动完成。 -### clickhouse-cli {#clickhouse-cli} +### ツ环板-ョツ嘉ッツ偲 {#clickhouse-cli} -[clickhouse-cli](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。 +[ツ环板-ョツ嘉ッツ偲](https://github.com/hatarist/clickhouse-cli) 是ClickHouse的替代命令行客户端,用Python 3编写。 特征: @@ -68,15 +69,15 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 寻呼机支持数据输出。 - 自定义PostgreSQL类命令。 -### clickhouse-flamegraph {#clickhouse-flamegraph} +### ツ暗ェツ氾环催ツ団ツ法ツ人 {#clickhouse-flamegraph} [clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) 是一个可视化的专业工具`system.trace_log`如[flamegraph](http://www.brendangregg.com/flamegraphs.html). ## 商业 {#shang-ye} -### Holistics Software {#holistics-software} +### ツ环板Softwareョツ嘉ッ {#holistics-software} -[Holistics](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。 +[整体学](https://www.holistics.io/) 在2019年被Gartner FrontRunners列为可用性最高排名第二的商业智能工具之一。 Holistics是一个基于SQL的全栈数据平台和商业智能工具,用于设置您的分析流程。 特征: diff --git a/docs/zh/interfaces/third-party/index.md b/docs/zh/interfaces/third-party/index.md new file mode 100644 index 00000000000..fab8cb364e8 --- /dev/null +++ b/docs/zh/interfaces/third-party/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u7B2C\u4E09\u65B9" +toc_priority: 24 +--- + + diff --git a/docs/zh/interfaces/third-party/integrations.md b/docs/zh/interfaces/third-party/integrations.md index 4bfe367e1f1..aac3d7a1b11 100644 --- a/docs/zh/interfaces/third-party/integrations.md +++ b/docs/zh/interfaces/third-party/integrations.md @@ -1,3 +1,4 @@ + # 第三方集成库 {#di-san-fang-ji-cheng-ku} !!! warning "声明" @@ -9,7 +10,7 @@ - [MySQL](https://www.mysql.com) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) + - [horgh-复制器](https://github.com/larsnovikov/horgh-replicator) - [PostgreSQL](https://www.postgresql.org) - [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) @@ -17,70 +18,70 @@ - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) - 消息队列 - - [Kafka](https://kafka.apache.org) - - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [Go client](https://github.com/kshvakov/clickhouse/)) + - [卡夫卡](https://kafka.apache.org) + - [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/kshvakov/clickhouse/)) - 对象存储 - [S3](https://en.wikipedia.org/wiki/Amazon_S3) - - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) + - [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup) - 容器编排 - [Kubernetes](https://kubernetes.io) - - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) + - [clickhouse-操](https://github.com/Altinity/clickhouse-operator) - 配置管理 - - [puppet](https://puppet.com) - - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) + - [木偶](https://puppet.com) + - [ツ环板/ョツ嘉ッツ偲](https://forge.puppet.com/innogames/clickhouse) - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) - 监控 - - [Graphite](https://graphiteapp.org) + - [石墨](https://graphiteapp.org) - [graphouse](https://github.com/yandex/graphouse) - - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../operations/table_engines/graphitemergetree.md#rollup-configuration) could be applied + - [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) + + - [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse) + - [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) 可以应用 - [Grafana](https://grafana.com/) - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - - [Prometheus](https://prometheus.io/) + - [普罗米修斯号](https://prometheus.io/) - [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter) - [PromHouse](https://github.com/Percona-Lab/PromHouse) - - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/)) + - [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/)) - [Nagios](https://www.nagios.org/) - [check\_clickhouse](https://github.com/exogroup/check_clickhouse/) - [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - [Zabbix](https://www.zabbix.com) - - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template) - [Sematext](https://sematext.com/) - [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) - 记录 - [rsyslog](https://www.rsyslog.com/) - - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [鹿茫house om omhousehousehousehouse酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - [fluentd](https://www.fluentd.org) - [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io)) - [logagent](https://www.sematext.com/logagent) - - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) + - [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) - 地理 - [MaxMind](https://dev.maxmind.com/geoip/) - - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + - [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) ## 编程语言生态系统 {#bian-cheng-yu-yan-sheng-tai-xi-tong} - Python - [SQLAlchemy](https://www.sqlalchemy.org) - - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [pandas](https://pandas.pydata.org) + - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [熊猫](https://pandas.pydata.org) - [pandahouse](https://github.com/kszucs/pandahouse) - R - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) + - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [ツ暗ェツ氾环催ツ団](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../query_language/table_functions/jdbc.md)) -- Scala + - [clickhouse-hdfs-装载机](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../sql_reference/table_functions/jdbc.md)) +- 斯卡拉 - [Akka](https://akka.io) - - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) + - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) - C\# - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) -- Elixir +- 仙丹 - [Ecto](https://github.com/elixir-ecto/ecto) - [clickhouse\_ecto](https://github.com/appodeal/clickhouse_ecto) diff --git a/docs/zh/interfaces/third-party/proxy.md b/docs/zh/interfaces/third-party/proxy.md index 727bff00cbb..e954444c46f 100644 --- a/docs/zh/interfaces/third-party/proxy.md +++ b/docs/zh/interfaces/third-party/proxy.md @@ -1,3 +1,4 @@ + # 来自第三方开发人员的代理服务器 {#lai-zi-di-san-fang-kai-fa-ren-yuan-de-dai-li-fu-wu-qi} [chproxy](https://github.com/Vertamedia/chproxy) 是ClickHouse数据库的http代理和负载均衡器。 @@ -22,9 +23,9 @@ 在Go中实现。 -## ClickHouse-Bulk {#clickhouse-bulk} +## ツ环板-ョツ嘉ッツ偲 {#clickhouse-bulk} -[ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 +[ツ环板-ョツ嘉ッツ偲](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 特征: diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md index ef841b2fa05..8a69e67264e 100644 --- a/docs/zh/introduction/adopters.md +++ b/docs/zh/introduction/adopters.md @@ -1,79 +1,82 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 8 +toc_title: "\u91C7\u7528\u8005" --- -# ClickHouse Adopters {#clickhouse-adopters} +# ツ环板tersョツ嘉ッツ偲 {#clickhouse-adopters} -!!! warning "Disclaimer" - The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We’d appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won’t have any NDA issues by doing so. Providing updates with publications from other companies is also useful. +!!! warning "免责声明" + 使用ClickHouse的公司和他们的成功故事下面的名单是从公共来源组装,因此可能不同于当前的现实. 如果您分享您公司采用ClickHouse的故事,我们将不胜感激 [将其添加到列表](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md),但请确保你不会有任何保密协议的问题,这样做。 提供来自其他公司的出版物的更新也很有用。 -| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size\* | Reference | -|-----------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | -| [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | -| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| [ArenaData](https://arenadata.tech/) | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| [Badoo](https://badoo.com) | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | -| [Benocs](https://www.benocs.com/) | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | -| [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| `Dataliance/UltraPower` | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | -| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Citymobil](https://city-mobil.ru) | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| [ContentSquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| [Corunet](https://coru.net/) | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| [CraiditX 氪信](https://creditx.com) | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| [Criteo/Storetail](https://www.criteo.com/) | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| [Deutsche Bank](https://db.com) | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| [Diva-e](https://www.diva-e.com) | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| [Exness](https://www.exness.com) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Geniee](https://geniee.co.jp) | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| [HUYA](https://www.huya.com/) | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Idealista](https://www.idealista.com) | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Infovista](https://www.infovista.com/) | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| [InnoGames](https://www.innogames.com) | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| [Integros](https://integros.com) | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Kodiak Data](https://www.kodiakdata.com/) | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| [Kontur](https://kontur.ru) | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [LifeStreet](https://lifestreet.com/) | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | -| [Mail.ru Cloud Solutions](https://mcs.mail.ru/) | Cloud services | Main product | — | — | [Running ClickHouse Instance, in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | -| [MessageBird](https://www.messagebird.com) | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [MGID](https://www.mgid.com/) | Ad network | Web-analytics | — | — | [Our experience in implementing analytical DBMS ClickHouse, in Russian](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| [OneAPM](https://www.oneapm.com/) | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| [Pragma Innovation](http://www.pragma-innovation.fr/) | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| [QINGCLOUD](https://www.qingcloud.com/) | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| [Beijing PERCENT Information Technology Co., Ltd.](https://www.percent.cn/) | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| [Rambler](https://rambler.ru) | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| [Tencent](https://www.tencent.com) | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Traffic Stars](https://trafficstars.com/) | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| [S7 Airlines](https://www.s7.ru) | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| [SEMrush](https://www.semrush.com/) | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| [scireum GmbH](https://www.scireum.de/) | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [Sentry](https://sentry.io/) | Software developer | Backend for product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| [seo.do](https://seo.do/) | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| [Sina](http://english.sina.com/index.html) | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| [SMI2](https://smi2.ru/) | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | -| [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | -| [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| [Ximalaya](https://www.ximalaya.com/) | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | -| [Yandex Market](https://market.yandex.ru/) | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | -| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| 公司简介 | 行业 | 用例 | 群集大小 | (Un)压缩数据大小\* | 参考资料 | +|-----------------------------------------------------------------|-------------------|----------------|---------------------------------------------------|----------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2gis](https://2gis.ru) | 地图 | 监测 | — | — | [讲俄语,2019年7月](https://youtu.be/58sPkXfq6nw) | +| [阿罗哈浏览器](https://alohabrowser.com/) | 移动应用程序 | 浏览器后端 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [阿玛迪斯](https://amadeus.com/) | 旅费 | 分析 | — | — | [新闻稿,四月2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Appsflyer](https://www.appsflyer.com) | 移动分析 | 主要产品 | — | — | [讲俄语,2019年7月](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | 数据平台 | 主要产品 | — | — | [幻灯片在俄罗斯,十二月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [Badoo](https://badoo.com) | 约会 | 时间序列 | — | — | [幻灯片在俄罗斯,十二月2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [Benocs](https://www.benocs.com/) | 网络遥测和分析 | 主要产品 | — | — | [幻灯片英文,2017年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [彭博](https://www.bloomberg.com/) | 金融、媒体 | 监测 | 102个服务器 | — | [幻灯片,2018年5月](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Bloxy](https://bloxy.info) | 区块链 | 分析 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | 电信 | 分析 | — | — | [中文幻灯片,2018年1月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | 商业智能 | 地理分析 | — | — | [地理空间处理与ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | 研究 | 实验 | — | — | [新闻稿,四月2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Cisco](http://cisco.com/) | 碌莽禄Networking: | 流量分析 | — | — | [闪电对话,十月2019](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [城堡证券](https://www.citadelsecurities.com/) | 财政 | — | — | — | [贡献,2019年3月](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [Citymobil](https://city-mobil.ru) | 出租车 | 分析 | — | — | [博客文章在俄罗斯,三月2020](https://habr.com/en/company/citymobil/blog/490660/) | +| [内容广场](https://contentsquare.com) | 网站分析 | 主要产品 | — | — | [博客文章在法国,十一月2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | 流量分析 | 36服务器 | — | [博客文章,五月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [博客文章,三月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [Corunet](https://coru.net/) | 分析 | 主要产品 | — | — | [英文幻灯片,2019年4月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | 金融AI | 分析 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [ツ环板/ョツ嘉ッツ偲](https://www.criteo.com/) | 零售 | 主要产品 | — | — | [幻灯片中的英文,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [德意志银行](https://db.com) | 财政 | 商业智能分析 | — | — | [幻灯片中的英文,十月2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [Diva-e](https://www.diva-e.com) | 数字咨询 | 主要产品 | — | — | [英文幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Exness](https://www.exness.com) | 交易 | 指标,日志记录 | — | — | [俄语交谈,2019年5月](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [精灵](https://geniee.co.jp) | 广告网络 | 主要产品 | — | — | [日文博客,2017年7月](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | 视频流 | 分析 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [博客文章英文,四月2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Infovista](https://www.infovista.com/) | 网络 | 分析 | — | — | [幻灯片中的英文,十月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [InnoGames](https://www.innogames.com) | 游戏 | 指标,日志记录 | — | — | [俄罗斯幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [Integros](https://integros.com) | 视频服务平台 | 分析 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [科迪亚克数据](https://www.kodiakdata.com/) | 云 | 主要产品 | — | — | [虏茅驴麓卤戮碌禄路戮鲁拢](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Kontur](https://kontur.ru) | 软件开发 | 指标 | — | — | [俄语交谈,2018年11月](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | 广告网络 | 主要产品 | 75台服务器(3个副本) | 5.27PiB | [博客文章在俄罗斯,2017年2月](https://habr.com/en/post/322620/) | +| [Mail.ru 云解决方案](https://mcs.mail.ru/) | 云服务 | 主要产品 | — | — | [运行ClickHouse实例,俄语](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | 电信 | 统计 | — | — | [英文幻灯片,2018年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | 广告网络 | 网络分析 | — | — | [我们在实施分析DBMS ClickHouse的经验,在俄罗斯](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | 监测和数据分析 | 主要产品 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [ツ环板Innovationョツ嘉ッ](http://www.pragma-innovation.fr/) | 遥测和大数据分析 | 主要产品 | — | — | [幻灯片中的英文,十月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | 云服务 | 主要产品 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qrator](https://qrator.net) | DDoS保护 | 主要产品 | — | — | [博客文章,三月2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [北京百分之信息技术有限公司,Ltd.](https://www.percent.cn/) | 分析 | 主要产品 | — | — | [中文幻灯片,2019年6月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [漫步者](https://rambler.ru) | 互联网服务 | 分析 | — | — | [俄语交谈,2018年4月](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [腾讯](https://www.tencent.com) | 消息传递 | 日志记录 | — | — | [中文讲座,2019年11月](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [交通明星](https://trafficstars.com/) | 广告网络 | — | — | — | [幻灯片在俄罗斯,2018年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7航空公司](https://www.s7.ru) | 航空公司 | 指标,日志记录 | — | — | [讲俄语,2019年3月](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [SEMrush](https://www.semrush.com/) | 碌莽禄Marketing: | 主要产品 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [scireum GmbH](https://www.scireum.de/) | 电子商务 | 主要产品 | — | — | [德语讲座,2020年2月](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [哨兵](https://sentry.io/) | 软件开发人员 | 产品后端 | — | — | [博客文章英文,五月2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | 政府社会保障 | 分析 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [seo.do](https://seo.do/) | 分析 | 主要产品 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [新浪](http://english.sina.com/index.html) | 新闻 | — | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | 新闻 | 分析 | — | — | [博客文章在俄罗斯,2017年11月](https://habr.com/ru/company/smi2/blog/314558/) | +| [Splunk](https://www.splunk.com/) | 业务分析 | 主要产品 | — | — | [英文幻灯片,2018年1月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Spotify的](https://www.spotify.com) | 音乐 | 实验 | — | — | [幻灯片,七月2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [腾讯](https://www.tencent.com) | 大数据 | 数据处理 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [优步](https://www.uber.com) | 出租车 | 日志记录 | — | — | [幻灯片,二月2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [VKontakte](https://vk.com) | 社交网络 | 统计,日志记录 | — | — | [幻灯片在俄罗斯,八月2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Wisebits](https://wisebits.com/) | IT解决方案 | 分析 | — | — | [俄罗斯幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [小新科技](https://www.xiaoheiban.cn/) | 教育 | 共同目的 | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [西马拉亚](https://www.ximalaya.com/) | 音频共享 | OLAP | — | — | [英文幻灯片,2019年11月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Yandex云](https://cloud.yandex.ru/services/managed-clickhouse) | 公有云 | 主要产品 | — | — | [讲俄语,2019年12月](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | 商业智能 | 主要产品 | — | — | [幻灯片在俄罗斯,十二月2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Yandex市场](https://market.yandex.ru/) | 电子商务 | 指标,日志记录 | — | — | [讲俄语,2019年1月](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Yandex Metrica](https://metrica.yandex.com) | 网站分析 | 主要产品 | 一个集群中的360台服务器,一个部门中的1862台服务器 | 66.41PiB/5.68PiB | [幻灯片,二月2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | 软件开发 | 指标,日志记录 | — | — | [博客文章,三月2019,在俄罗斯](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | 银行 | 网络系统监控 | — | — | [俄罗斯幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | 商业智能分析 | 主要产品 | — | — | [中文幻灯片,2019年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) +[原始文章](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/zh/introduction/distinctive_features.md b/docs/zh/introduction/distinctive_features.md index 250a1a20e87..3b1e7a8c716 100644 --- a/docs/zh/introduction/distinctive_features.md +++ b/docs/zh/introduction/distinctive_features.md @@ -1,3 +1,4 @@ + # ClickHouse的独特功能 {#clickhousede-du-te-gong-neng} ## 真正的列式数据库管理系统 {#zhen-zheng-de-lie-shi-shu-ju-ku-guan-li-xi-tong} @@ -59,6 +60,6 @@ ClickHouse提供各种各样在允许牺牲数据精度的情况下对查询进 ClickHouse使用异步的多主复制技术。当数据被写入任何一个可用副本后,系统会在后台将数据分发给其他副本,以保证系统在不同副本上保持相同的数据。在大多数情况下ClickHouse能在故障后自动恢复,在一些少数的复杂情况下需要手动恢复。 -更多信息,参见 [数据复制](../operations/table_engines/replication.md)。 +更多信息,参见 [数据复制](../engines/table_engines/mergetree_family/replication.md)。 [来源文章](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/zh/introduction/features_considered_disadvantages.md b/docs/zh/introduction/features_considered_disadvantages.md index 04cd34c6ffc..efc967e90ac 100644 --- a/docs/zh/introduction/features_considered_disadvantages.md +++ b/docs/zh/introduction/features_considered_disadvantages.md @@ -1,3 +1,4 @@ + # ClickHouse的限制 {#clickhouseke-yi-ren-wei-shi-que-dian-de-gong-neng} 1. 没有完整的事务支持。 diff --git a/docs/zh/introduction/history.md b/docs/zh/introduction/history.md index 7c1a058ea76..673e070addb 100644 --- a/docs/zh/introduction/history.md +++ b/docs/zh/introduction/history.md @@ -1,6 +1,7 @@ + # ClickHouse历史 {#clickhouseli-shi} -ClickHouse最初是为 [Yandex.Metrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止,该系统在ClickHouse中有超过13万亿条记录,并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。 +ClickHouse最初是为 [YandexMetrica](https://metrica.yandex.com/) [世界第二大Web分析平台](http://w3techs.com/technologies/overview/traffic_analysis/all) 而开发的。多年来一直作为该系统的核心组件被该系统持续使用着。目前为止,该系统在ClickHouse中有超过13万亿条记录,并且每天超过200多亿个事件被处理。它允许直接从原始数据中动态查询并生成报告。本文简要介绍了ClickHouse在其早期发展阶段的目标。 Yandex.Metrica基于用户定义的字段,对实时访问、连接会话,生成实时的统计报表。这种需求往往需要复杂聚合方式,比如对访问用户进行去重。构建报表的数据,是实时接收存储的新数据。 diff --git a/docs/zh/introduction/index.md b/docs/zh/introduction/index.md new file mode 100644 index 00000000000..4bc6a76857a --- /dev/null +++ b/docs/zh/introduction/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5BFC\u8A00" +toc_priority: 1 +--- + + diff --git a/docs/zh/introduction/performance.md b/docs/zh/introduction/performance.md index ed44ec760bb..9c5ce29df6f 100644 --- a/docs/zh/introduction/performance.md +++ b/docs/zh/introduction/performance.md @@ -1,4 +1,5 @@ -# Performance {#performance} + +# 性能 {#performance} 根据Yandex的内部测试结果,ClickHouse表现出了比同类可比较产品更优的性能。你可以在 [这里](https://clickhouse.tech/benchmark.html) 查看具体的测试结果。 diff --git a/docs/zh/operations/access_rights.md b/docs/zh/operations/access_rights.md index 1c648a29f26..0178001e74f 100644 --- a/docs/zh/operations/access_rights.md +++ b/docs/zh/operations/access_rights.md @@ -1,8 +1,9 @@ -# Access Rights {#access-rights} -Users and access rights are set up in the user config. This is usually `users.xml`. +# 访问权限 {#access-rights} -Users are recorded in the `users` section. Here is a fragment of the `users.xml` file: +用户和访问权限在用户配置中设置。 这通常是 `users.xml`. + +用户被记录在 `users` 科。 这里是一个片段 `users.xml` 文件: ``` xml @@ -59,15 +60,15 @@ Users are recorded in the `users` section. Here is a fragment of the `users.xml` ``` -You can see a declaration from two users: `default`and`web`. We added the `web` user separately. +您可以看到两个用户的声明: `default`和`web`. 我们添加了 `web` 用户分开。 -The `default` user is chosen in cases when the username is not passed. The `default` user is also used for distributed query processing, if the configuration of the server or cluster doesn’t specify the `user` and `password` (see the section on the [Distributed](../operations/table_engines/distributed.md) engine). +该 `default` 在用户名未通过的情况下选择用户。 该 `default` 如果服务器或群集的配置没有指定分布式查询处理,则user也用于分布式查询处理 `user` 和 `password` (见上的部分 [分布](../engines/table_engines/special/distributed.md) 发动机)。 The user that is used for exchanging information between servers combined in a cluster must not have substantial restrictions or quotas – otherwise, distributed queries will fail. -The password is specified in clear text (not recommended) or in SHA-256. The hash isn’t salted. In this regard, you should not consider these passwords as providing security against potential malicious attacks. Rather, they are necessary for protection from employees. +密码以明文(不推荐)或SHA-256形式指定。 哈希没有腌制。 在这方面,您不应将这些密码视为提供了针对潜在恶意攻击的安全性。 相反,他们是必要的保护员工。 -A list of networks is specified that access is allowed from. In this example, the list of networks for both users is loaded from a separate file (`/etc/metrika.xml`) containing the `networks` substitution. Here is a fragment of it: +指定允许访问的网络列表。 在此示例中,将从单独的文件加载两个用户的网络列表 (`/etc/metrika.xml`)包含 `networks` 替代。 这里是它的一个片段: ``` xml @@ -81,21 +82,21 @@ A list of networks is specified that access is allowed from. In this example, th ``` -You could define this list of networks directly in `users.xml`, or in a file in the `users.d` directory (for more information, see the section «[Configuration files](configuration_files.md#configuration_files)»). +您可以直接在以下内容中定义此网络列表 `users.xml`,或在文件中 `users.d` directory (for more information, see the section «[配置文件](configuration_files.md#configuration_files)»). -The config includes comments explaining how to open access from everywhere. +该配置包括解释如何从任何地方打开访问的注释。 -For use in production, only specify `ip` elements (IP addresses and their masks), since using `host` and `hoost_regexp` might cause extra latency. +对于在生产中使用,仅指定 `ip` 元素(IP地址及其掩码),因为使用 `host` 和 `hoost_regexp` 可能会导致额外的延迟。 -Next the user settings profile is specified (see the section «[Settings profiles](settings/settings_profiles.md)»). You can specify the default profile, `default'`. The profile can have any name. You can specify the same profile for different users. The most important thing you can write in the settings profile is `readonly=1`, which ensures read-only access. -Then specify the quota to be used (see the section «[Quotas](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. +Next the user settings profile is specified (see the section «[设置配置文件](settings/settings_profiles.md)»). You can specify the default profile, `default'`. 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。 +Then specify the quota to be used (see the section «[配额](quotas.md#quotas)»). You can specify the default quota: `default`. It is set in the config by default to only count resource usage, without restricting it. The quota can have any name. You can specify the same quota for different users – in this case, resource usage is calculated for each user individually. -In the optional `` section, you can also specify a list of databases that the user can access. By default, all databases are available to the user. You can specify the `default` database. In this case, the user will receive access to the database by default. +在可选 `` 您还可以指定用户可以访问的数据库列表。 默认情况下,所有数据库都可供用户使用。 您可以指定 `default` 数据库。 在这种情况下,默认情况下,用户将接收对数据库的访问权限。 -Access to the `system` database is always allowed (since this database is used for processing queries). +访问 `system` 始终允许数据库(因为此数据库用于处理查询)。 -The user can get a list of all databases and tables in them by using `SHOW` queries or system tables, even if access to individual databases isn’t allowed. +用户可以通过以下方式获取其中所有数据库和表的列表 `SHOW` 查询或系统表,即使不允许访问单个数据库。 -Database access is not related to the [readonly](settings/permissions_for_queries.md#settings_readonly) setting. You can’t grant full access to one database and `readonly` access to another one. +数据库访问是不相关的 [只读](settings/permissions_for_queries.md#settings_readonly) 设置。 您不能授予对一个数据库的完全访问权限,并 `readonly` 进入另一个。 -[Original article](https://clickhouse.tech/docs/en/operations/access_rights/) +[原始文章](https://clickhouse.tech/docs/en/operations/access_rights/) diff --git a/docs/zh/operations/backup.md b/docs/zh/operations/backup.md index 90efb613098..256ddddd2c2 100644 --- a/docs/zh/operations/backup.md +++ b/docs/zh/operations/backup.md @@ -1,38 +1,41 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 49 +toc_title: "\u6570\u636E\u5907\u4EFD" --- -# Data Backup {#data-backup} +# 数据备份 {#data-backup} -While [replication](table_engines/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). However, these safeguards don’t cover all possible cases and can be circumvented. +碌莽禄While: [复制](../engines/table_engines/mergetree_family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [您不能使用类似MergeTree的引擎删除包含超过50Gb数据的表](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). 但是,这些保障措施并不涵盖所有可能的情况,可以规避。 -In order to effectively mitigate possible human errors, you should carefully prepare a strategy for backing up and restoring your data **in advance**. +为了有效地减少可能的人为错误,您应该仔细准备备份和还原数据的策略 **提前**. -Each company has different resources available and business requirements, so there’s no universal solution for ClickHouse backups and restores that will fit every situation. What works for one gigabyte of data likely won’t work for tens of petabytes. There are a variety of possible approaches with their own pros and cons, which will be discussed below. It is a good idea to use several approaches instead of just one in order to compensate for their various shortcomings. +每家公司都有不同的可用资源和业务需求,因此没有适合各种情况的ClickHouse备份和恢复通用解决方案。 什么适用于一千兆字节的数据可能不会为几十pb的工作。 有多种可能的方法有自己的优点和缺点,这将在下面讨论。 这是一个好主意,使用几种方法,而不是只是一个,以弥补其各种缺点。 -!!! note "Note" - Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. +!!! note "注" + 请记住,如果您备份了某些内容并且从未尝试过还原它,那么当您实际需要它时(或者至少需要比业务能够容忍的时间更长),恢复可能无法正常工作。 因此,无论您选择哪种备份方法,请确保自动还原过程,并定期在备用ClickHouse群集上练习。 -## Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else} +## 将源数据复制到其他地方 {#duplicating-source-data-somewhere-else} -Often data that is ingested into ClickHouse is delivered through some sort of persistent queue, such as [Apache Kafka](https://kafka.apache.org). In this case it is possible to configure an additional set of subscribers that will read the same data stream while it is being written to ClickHouse and store it in cold storage somewhere. Most companies already have some default recommended cold storage, which could be an object store or a distributed filesystem like [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). +通常被摄入到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka](https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认的推荐冷存储,可能是对象存储或分布式文件系统,如 [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html). -## Filesystem Snapshots {#filesystem-snapshots} +## 文件系统快照 {#filesystem-snapshots} -Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](table_engines/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective. +某些本地文件系统提供快照功能(例如, [ZFS](https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们从 [分布](../engines/table_engines/special/distributed.md) 用于以下目的的表 `SELECT` 查询。 任何修改数据的查询都无法访问此类副本上的快照。 作为奖励,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。 -## clickhouse-copier {#clickhouse-copier} +## ツ环板-ョツ嘉ッツ偲 {#clickhouse-copier} -[clickhouse-copier](utils/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters. +[ツ环板-ョツ嘉ッツ偲](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建用于重新分片pb大小的表。 它还可用于备份和还原目的,因为它可以在ClickHouse表和集群之间可靠地复制数据。 -For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well. +对于较小的数据量,一个简单的 `INSERT INTO ... SELECT ...` 到远程表也可以工作。 -## Manipulations with Parts {#manipulations-with-parts} +## 部件操作 {#manipulations-with-parts} -ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that doesn’t require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)). +ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是使用硬链接来实现 `/var/lib/clickhouse/shadow/` 文件夹中,所以它通常不会占用旧数据的额外磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然会容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统 [rsync](https://en.wikipedia.org/wiki/Rsync)). -For more information about queries related to partition manipulations, see the [ALTER documentation](../query_language/alter.md#alter_manipulations-with-partitions). +有关与分区操作相关的查询的详细信息,请参阅 [更改文档](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). -A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). +第三方工具可用于自动化此方法: [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup). -[Original article](https://clickhouse.tech/docs/en/operations/backup/) +[原始文章](https://clickhouse.tech/docs/en/operations/backup/) diff --git a/docs/zh/operations/configuration_files.md b/docs/zh/operations/configuration_files.md index b0c3d22fdaf..6505cfb1fb9 100644 --- a/docs/zh/operations/configuration_files.md +++ b/docs/zh/operations/configuration_files.md @@ -1,24 +1,25 @@ -# Configuration Files {#configuration_files} -The main server config file is `config.xml`. It resides in the `/etc/clickhouse-server/` directory. +# 配置文件 {#configuration_files} -Individual settings can be overridden in the `*.xml` and `*.conf` files in the `conf.d` and `config.d` directories next to the config file. +主服务器配置文件是 `config.xml`. 它驻留在 `/etc/clickhouse-server/` 目录。 -The `replace` or `remove` attributes can be specified for the elements of these config files. +单个设置可以在复盖 `*.xml` 和 `*.conf` 在文件 `conf.d` 和 `config.d` 配置文件旁边的目录。 -If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children. +该 `replace` 或 `remove` 可以为这些配置文件的元素指定属性。 -If `replace` is specified, it replaces the entire element with the specified one. +如果两者都未指定,则递归组合元素的内容,替换重复子项的值。 -If `remove` is specified, it deletes the element. +如果 `replace` 如果指定,则将整个元素替换为指定的元素。 -The config can also define «substitutions». If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include\_from](server_settings/settings.md#server_settings-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](#macros) server\_settings/settings.md)). +如果 `remove` 如果指定,则删除该元素。 -Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. +The config can also define «substitutions». If an element has the `incl` 属性时,从文件中的相应替换将被用作该值。 默认情况下,具有替换的文件的路径为 `/etc/metrika.xml`. 这可以在改变 [包括\_从](server_configuration_parameters/settings.md#server_configuration_parameters-include_from) 服务器配置中的元素。 替换值在指定 `/yandex/substitution_name` 这个文件中的元素。 如果在指定的替换 `incl` 不存在,则将其记录在日志中。 要防止ClickHouse记录丢失的替换,请指定 `optional="true"` 属性(例如,设置 [宏](#macros) server\_settings/settings.md))。 -The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the ‘users\_config’ element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`. +替换也可以从ZooKeeper执行。 为此,请指定属性 `from_zk = "/path/to/node"`. 元素值被替换为节点的内容 `/path/to/node` 在动物园管理员。 您还可以将整个XML子树放在ZooKeeper节点上,并将其完全插入到源元素中。 -In addition, `users_config` may have overrides in files from the `users_config.d` directory (for example, `users.d`) and substitutions. For example, you can have separate config file for each user like this: +该 `config.xml` 文件可以指定具有用户设置、配置文件和配额的单独配置。 这个配置的相对路径在 ‘users\_config’ 元素。 默认情况下,它是 `users.xml`. 如果 `users_config` 被省略,用户设置,配置文件和配额直接在指定 `config.xml`. + +此外, `users_config` 可以从文件中复盖 `users_config.d` 目录(例如, `users.d`)和替换。 例如,您可以为每个用户提供单独的配置文件,如下所示: ``` xml $ cat /etc/clickhouse-server/users.d/alice.xml @@ -36,8 +37,8 @@ $ cat /etc/clickhouse-server/users.d/alice.xml ``` -For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file. +对于每个配置文件,服务器还会生成 `file-preprocessed.xml` 启动时的文件。 这些文件包含所有已完成的替换和复盖,并且它们旨在提供信息。 如果zookeeper替换在配置文件中使用,但ZooKeeper在服务器启动时不可用,则服务器将从预处理的文件中加载配置。 -The server tracks changes in config files, as well as files and ZooKeeper nodes that were used when performing substitutions and overrides, and reloads the settings for users and clusters on the fly. This means that you can modify the cluster, users, and their settings without restarting the server. +服务器跟踪配置文件中的更改,以及执行替换和复盖时使用的文件和ZooKeeper节点,并动态重新加载用户和集群的设置。 这意味着您可以在不重新启动服务器的情况下修改群集、用户及其设置。 -[Original article](https://clickhouse.tech/docs/en/operations/configuration_files/) +[原始文章](https://clickhouse.tech/docs/en/operations/configuration_files/) diff --git a/docs/zh/operations/index.md b/docs/zh/operations/index.md index 596ec065f40..4d31fce45af 100644 --- a/docs/zh/operations/index.md +++ b/docs/zh/operations/index.md @@ -1,3 +1,4 @@ -# Operations {#operations} -[Original article](https://clickhouse.tech/docs/en/operations/) +# 操作 {#operations} + +[原始文章](https://clickhouse.tech/docs/en/operations/) diff --git a/docs/zh/operations/monitoring.md b/docs/zh/operations/monitoring.md index 97cb8329b2b..6683903f531 100644 --- a/docs/zh/operations/monitoring.md +++ b/docs/zh/operations/monitoring.md @@ -1,3 +1,4 @@ + # 监控 {#jian-kong} 可以监控到: @@ -28,9 +29,9 @@ ClickHouse 收集的指标项: - 服务用于计算的资源占用的各种指标。 - 关于查询处理的常见统计信息。 -可以在 [system.metrics](system_tables.md#system_tables-metrics) ,[system.events](system_tables.md#system_tables-events) 以及[system.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。 +可以在 [系统。指标](system_tables.md#system_tables-metrics) ,[系统。活动](system_tables.md#system_tables-events) 以及[系统。asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics) 等系统表查看所有的指标项。 -可以配置ClickHouse 往 [Graphite](https://github.com/graphite-project)导入指标。 参考 [Graphite section](server_settings/settings.md#server_settings-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。 +可以配置ClickHouse 往 [石墨](https://github.com/graphite-project)导入指标。 参考 [石墨部分](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) 配置文件。在配置指标导出之前,需要参考Graphite[官方教程](https://graphite.readthedocs.io/en/latest/install.html)搭建服务。 此外,您可以通过HTTP API监视服务器可用性。 将HTTP GET请求发送到 `/ping`。 如果服务器可用,它将以 `200 OK` 响应。 diff --git a/docs/zh/operations/optimizing_performance/index.md b/docs/zh/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..786a7200b28 --- /dev/null +++ b/docs/zh/operations/optimizing_performance/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u4F18\u5316\u6027\u80FD" +toc_priority: 52 +--- + + diff --git a/docs/zh/operations/optimizing_performance/sampling_query_profiler.md b/docs/zh/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..6f0eef0a1ed --- /dev/null +++ b/docs/zh/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,64 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 54 +toc_title: "\u67E5\u8BE2\u5206\u6790" +--- + +# 采样查询探查器 {#sampling-query-profiler} + +ClickHouse运行允许分析查询执行的采样探查器。 使用探查器,您可以找到在查询执行期间使用最频繁的源代码例程。 您可以跟踪CPU时间和挂钟花费的时间,包括空闲时间。 + +使用概要分析器: + +- 设置 [trace\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分。 + + 本节配置 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表包含探查器运行的结果。 它是默认配置的。 请记住,此表中的数据仅对正在运行的服务器有效。 服务器重新启动后,ClickHouse不会清理表,所有存储的虚拟内存地址都可能无效。 + +- 设置 [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) 或 [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) 设置。 这两种设置可以同时使用。 + + 这些设置允许您配置探查器计时器。 由于这些是会话设置,您可以为整个服务器、单个用户或用户配置文件、交互式会话以及每个单个查询获取不同的采样频率。 + +默认采样频率为每秒一个采样,CPU和实时定时器都启用。 该频率允许收集有关ClickHouse集群的足够信息。 同时,使用此频率,profiler不会影响ClickHouse服务器的性能。 如果您需要分析每个单独的查询,请尝试使用更高的采样频率。 + +分析 `trace_log` 系统表: + +- 安装 `clickhouse-common-static-dbg` 包。 看 [从DEB软件包安装](../../getting_started/install.md#install-from-deb-packages). + +- 允许由内省功能 [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) 设置。 + + 出于安全原因,默认情况下禁用内省功能。 + +- 使用 `addressToLine`, `addressToSymbol` 和 `demangle` [内省功能](../../sql_reference/functions/introspection.md) 获取函数名称及其在ClickHouse代码中的位置。 要获取某些查询的配置文件,您需要从以下内容汇总数据 `trace_log` 桌子 您可以通过单个函数或整个堆栈跟踪聚合数据。 + +如果你需要想象 `trace_log` 信息,尝试 [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) 和 [测速镜](https://github.com/laplab/clickhouse-speedscope). + +## 示例 {#example} + +在这个例子中,我们: + +- 过滤 `trace_log` 数据由查询标识符和当前日期组成。 + +- 通过堆栈跟踪聚合。 + +- 使用内省功能,我们将得到一个报告: + + - 符号名称和相应的源代码函数。 + - 这些函数的源代码位置。 + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/zh/operations/performance/sampling_query_profiler.md b/docs/zh/operations/performance/sampling_query_profiler.md deleted file mode 100644 index 25368fcd883..00000000000 --- a/docs/zh/operations/performance/sampling_query_profiler.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -en_copy: true ---- - -# Sampling Query Profiler {#sampling-query-profiler} - -ClickHouse runs sampling profiler that allows analyzing query execution. Using profiler you can find source code routines that used the most frequently during query execution. You can trace CPU time and wall-clock time spent including idle time. - -To use profiler: - -- Setup the [trace\_log](../server_settings/settings.md#server_settings-trace_log) section of the server configuration. - - This section configures the [trace\_log](../system_tables.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid. - -- Setup the [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) or [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously. - - These settings allow you to configure profiler timers. As these are the session settings, you can get different sampling frequency for the whole server, individual users or user profiles, for your interactive session, and for each individual query. - -The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler doesn’t affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. - -To analyze the `trace_log` system table: - -- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting_started/install.md#install-from-deb-packages). - -- Allow introspection functions by the [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) setting. - - For security reasons, introspection functions are disabled by default. - -- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../query_language/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces. - -If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). - -## Example {#example} - -In this example we: - -- Filtering `trace_log` data by a query identifier and the current date. - -- Aggregating by stack trace. - -- Using introspection functions, we will get a report of: - - - Names of symbols and corresponding source code functions. - - Source code locations of these functions. - - - -``` sql -SELECT - count(), - arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym -FROM system.trace_log -WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) -GROUP BY trace -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -{% include "operations/performance/sampling_query_profiler_example_result.txt" %} -``` diff --git a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt index a5f6d71ca95..56c2fdf9c65 100644 --- a/docs/zh/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/zh/operations/performance/sampling_query_profiler_example_result.txt @@ -1,7 +1,3 @@ ---- -en_copy: true ---- - Row 1: ────── count(): 6344 diff --git a/docs/zh/operations/performance_test.md b/docs/zh/operations/performance_test.md index ae4c5752703..f567a9528a0 100644 --- a/docs/zh/operations/performance_test.md +++ b/docs/zh/operations/performance_test.md @@ -1,18 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 54 +toc_title: "\u6D4B\u8BD5\u786C\u4EF6" --- -# How To Test Your Hardware With ClickHouse {#how-to-test-your-hardware-with-clickhouse} +# 如何使用ClickHouse测试您的硬件 {#how-to-test-your-hardware-with-clickhouse} -With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. +使用此指令,您可以在任何服务器上运行基本的ClickHouse性能测试,而无需安装ClickHouse软件包。 -1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master +1. 转到 “commits” 页数:https://github.com/ClickHouse/ClickHouse/commits/master -2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. +2. 点击第一个绿色复选标记或红色十字与绿色 “ClickHouse Build Check” 然后点击 “Details” 附近链接 “ClickHouse Build Check”. 在一些提交中没有这样的链接,例如与文档的提交。 在这种情况下,请选择具有此链接的最近提交。 -3. Copy the link to “clickhouse” binary for amd64 or aarch64. +3. 将链接复制到 “clickhouse” 二进制为amd64或aarch64. -4. ssh to the server and download it with wget: +4. ssh到服务器并使用wget下载它: @@ -23,7 +26,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -1. Download configs: +1. 下载配置: @@ -33,7 +36,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -1. Download benchmark files: +1. 下载基准测试文件: @@ -41,7 +44,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +1. 根据下载测试数据 [Yandex梅里卡数据集](../getting_started/example_datasets/metrica.md) 说明 (“hits” 表包含100万行)。 @@ -49,31 +52,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -1. Run the server: +1. 运行服务器: ./clickhouse server -1. Check the data: ssh to the server in another terminal +1. 检查数据:ssh到另一个终端中的服务器 ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +1. 编辑benchmark-new.sh,改变 “clickhouse-client” 到 “./clickhouse client” 并添加 “–max\_memory\_usage 100000000000” 参数。 mcedit benchmark-new.sh -1. Run the benchmark: +1. 运行基准测试: ./benchmark-new.sh hits_100m_obfuscated -1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +1. 将有关硬件配置的编号和信息发送到clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark\_hardware.html +所有结果都在这里公布:https://clickhouse.技术/benchmark\_hardware.html diff --git a/docs/zh/operations/quotas.md b/docs/zh/operations/quotas.md index 06f25f57016..3838da00e56 100644 --- a/docs/zh/operations/quotas.md +++ b/docs/zh/operations/quotas.md @@ -1,16 +1,17 @@ -# Quotas {#quotas} -Quotas allow you to limit resource usage over a period of time, or simply track the use of resources. -Quotas are set up in the user config. This is usually ‘users.xml’. +# 配额 {#quotas} + +配额允许您在一段时间内限制资源使用情况,或者只是跟踪资源的使用。 +配额在用户配置中设置。 这通常是 ‘users.xml’. The system also has a feature for limiting the complexity of a single query. See the section «Restrictions on query complexity»). -In contrast to query complexity restrictions, quotas: +与查询复杂性限制相比,配额: -- Place restrictions on a set of queries that can be run over a period of time, instead of limiting a single query. -- Account for resources spent on all remote servers for distributed query processing. +- 对可以在一段时间内运行的一组查询设置限制,而不是限制单个查询。 +- 占用在所有远程服务器上用于分布式查询处理的资源。 -Let’s look at the section of the ‘users.xml’ file that defines quotas. +让我们来看看的部分 ‘users.xml’ 定义配额的文件。 ``` xml @@ -32,8 +33,8 @@ Let’s look at the section of the ‘users.xml’ file that defines quotas. ``` -By default, the quota just tracks resource consumption for each hour, without limiting usage. -The resource consumption calculated for each interval is output to the server log after each request. +默认情况下,配额只跟踪每小时的资源消耗,而不限制使用情况。 +每次请求后,计算出的每个时间间隔的资源消耗将输出到服务器日志中。 ``` xml @@ -61,11 +62,11 @@ The resource consumption calculated for each interval is output to the server lo ``` -For the ‘statbox’ quota, restrictions are set for every hour and for every 24 hours (86,400 seconds). The time interval is counted starting from an implementation-defined fixed moment in time. In other words, the 24-hour interval doesn’t necessarily begin at midnight. +为 ‘statbox’ 配额,限制设置为每小时和每24小时(86,400秒)。 时间间隔从实现定义的固定时刻开始计数。 换句话说,24小时间隔不一定从午夜开始。 -When the interval ends, all collected values are cleared. For the next hour, the quota calculation starts over. +间隔结束时,将清除所有收集的值。 在下一个小时内,配额计算将重新开始。 -Here are the amounts that can be restricted: +以下是可以限制的金额: `queries` – The total number of requests. @@ -77,7 +78,7 @@ Here are the amounts that can be restricted: `execution_time` – The total query execution time, in seconds (wall time). -If the limit is exceeded for at least one time interval, an exception is thrown with a text about which restriction was exceeded, for which interval, and when the new interval begins (when queries can be sent again). +如果在至少一个时间间隔内超出限制,则会引发异常,其中包含有关超出了哪个限制、哪个时间间隔以及新时间间隔开始时(何时可以再次发送查询)的文本。 Quotas can use the «quota key» feature in order to report on resources for multiple keys independently. Here is an example of this: @@ -96,10 +97,10 @@ Quotas can use the «quota key» feature in order to report on resources for mul ``` -The quota is assigned to users in the ‘users’ section of the config. See the section «Access rights». +配额分配给用户 ‘users’ section of the config. See the section «Access rights». For distributed query processing, the accumulated amounts are stored on the requestor server. So if the user goes to another server, the quota there will «start over». -When the server is restarted, quotas are reset. +服务器重新启动时,将重置配额。 -[Original article](https://clickhouse.tech/docs/en/operations/quotas/) +[原始文章](https://clickhouse.tech/docs/en/operations/quotas/) diff --git a/docs/zh/operations/requirements.md b/docs/zh/operations/requirements.md index 9dd5553a241..d48de98f85c 100644 --- a/docs/zh/operations/requirements.md +++ b/docs/zh/operations/requirements.md @@ -1,58 +1,61 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: "\u8981\u6C42" --- -# Requirements {#requirements} +# 要求 {#requirements} ## CPU {#cpu} -For installation from prebuilt deb packages, use a CPU with x86\_64 architecture and support for SSE 4.2 instructions. To run ClickHouse with processors that do not support SSE 4.2 or have AArch64 or PowerPC64LE architecture, you should build ClickHouse from sources. +对于从预构建的deb包进行安装,请使用具有x86\_64架构并支持SSE4.2指令的CPU。 要使用不支持SSE4.2或具有AArch64或PowerPC64LE体系结构的处理器运行ClickHouse,您应该从源代码构建ClickHouse。 -ClickHouse implements parallel data processing and uses all the hardware resources available. When choosing a processor, take into account that ClickHouse works more efficiently at configurations with a large number of cores but a lower clock rate than at configurations with fewer cores and a higher clock rate. For example, 16 cores with 2600 MHz is preferable to 8 cores with 3600 MHz. +ClickHouse实现并行数据处理并使用所有可用的硬件资源。 在选择处理器时,考虑到ClickHouse在具有大量内核但时钟速率较低的配置中的工作效率要高于具有较少内核和较高时钟速率的配置。 例如,具有2600MHz的16核心优于具有3600MHz的8核心。 -Use of **Turbo Boost** and **hyper-threading** technologies is recommended. It significantly improves performance with a typical load. +建议使用 **涡轮增压** 和 **超线程** 技术。 它显着提高了典型工作负载的性能。 ## RAM {#ram} -We recommend to use a minimum of 4GB of RAM in order to perform non-trivial queries. The ClickHouse server can run with a much smaller amount of RAM, but it requires memory for processing queries. +我们建议使用至少4GB的RAM来执行非平凡的查询。 ClickHouse服务器可以使用少得多的RAM运行,但它需要处理查询的内存。 -The required volume of RAM depends on: +RAM所需的体积取决于: -- The complexity of queries. -- The amount of data that is processed in queries. +- 查询的复杂性。 +- 在查询中处理的数据量。 -To calculate the required volume of RAM, you should estimate the size of temporary data for [GROUP BY](../query_language/select.md#select-group-by-clause), [DISTINCT](../query_language/select.md#select-distinct), [JOIN](../query_language/select.md#select-join) and other operations you use. +要计算所需的RAM体积,您应该估计临时数据的大小 [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) 和您使用的其他操作。 -ClickHouse can use external memory for temporary data. See [GROUP BY in External Memory](../query_language/select.md#select-group-by-in-external-memory) for details. +ClickHouse可以使用外部存储器来存储临时数据。 看 [在外部存储器中分组](../sql_reference/statements/select.md#select-group-by-in-external-memory) 有关详细信息。 -## Swap File {#swap-file} +## 交换文件 {#swap-file} -Disable the swap file for production environments. +禁用生产环境的交换文件。 -## Storage Subsystem {#storage-subsystem} +## 存储子系统 {#storage-subsystem} -You need to have 2GB of free disk space to install ClickHouse. +您需要有2GB的可用磁盘空间来安装ClickHouse。 -The volume of storage required for your data should be calculated separately. Assessment should include: +数据所需的存储量应单独计算。 评估应包括: -- Estimation of the data volume. +- 估计数据量。 - You can take a sample of the data and get the average size of a row from it. Then multiply the value by the number of rows you plan to store. + 您可以采取数据的样本并从中获取行的平均大小。 然后将该值乘以计划存储的行数。 -- The data compression coefficient. +- 的数据压缩系数。 - To estimate the data compression coefficient, load a sample of your data into ClickHouse and compare the actual size of the data with the size of the table stored. For example, clickstream data is usually compressed by 6-10 times. + 要估计数据压缩系数,请将数据的样本加载到ClickHouse中,并将数据的实际大小与存储的表的大小进行比较。 例如,点击流数据通常被压缩6-10次。 -To calculate the final volume of data to be stored, apply the compression coefficient to the estimated data volume. If you plan to store data in several replicas, then multiply the estimated volume by the number of replicas. +要计算要存储的最终数据量,请将压缩系数应用于估计的数据量。 如果计划将数据存储在多个副本中,则将估计的卷乘以副本数。 -## Network {#network} +## 网络 {#network} -If possible, use networks of 10G or higher class. +如果可能的话,使用10G或更高级别的网络。 -The network bandwidth is critical for processing distributed queries with a large amount of intermediate data. In addition, network speed affects replication processes. +网络带宽对于处理具有大量中间数据的分布式查询至关重要。 此外,网络速度会影响复制过程。 -## Software {#software} +## 软件 {#software} -ClickHouse is developed for the Linux family of operating systems. The recommended Linux distribution is Ubuntu. The `tzdata` package should be installed in the system. +ClickHouse主要是为Linux系列操作系统开发的。 推荐的Linux发行版是Ubuntu。 该 `tzdata` 软件包应安装在系统中。 -ClickHouse can also work in other operating system families. See details in the [Getting started](../getting_started/index.md) section of the documentation. +ClickHouse也可以在其他操作系统系列中工作。 查看详细信息 [开始](../getting_started/index.md) 文档的部分。 diff --git a/docs/zh/operations/server_configuration_parameters/index.md b/docs/zh/operations/server_configuration_parameters/index.md new file mode 100644 index 00000000000..cf3f158b37c --- /dev/null +++ b/docs/zh/operations/server_configuration_parameters/index.md @@ -0,0 +1,12 @@ + +# 服务器配置参数 {#server-settings} + +本节包含无法在会话或查询级别更改的服务器设置的说明。 + +这些设置存储在 `config.xml` ClickHouse服务器上的文件。 + +Other settings are described in the «[设置](../settings/index.md#settings)» section. + +在研究设置之前,请阅读 [配置文件](../configuration_files.md#configuration_files) 部分和注意使用替换(的 `incl` 和 `optional` 属性)。 + +[原始文章](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/) diff --git a/docs/zh/operations/server_configuration_parameters/settings.md b/docs/zh/operations/server_configuration_parameters/settings.md new file mode 100644 index 00000000000..b78f8173741 --- /dev/null +++ b/docs/zh/operations/server_configuration_parameters/settings.md @@ -0,0 +1,872 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 57 +toc_title: "\u670D\u52A1\u5668\u8BBE\u7F6E" +--- + +# 服务器设置 {#server-settings} + +## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} + +重新加载内置字典之前的时间间隔(以秒为单位)。 + +ClickHouse每x秒重新加载内置字典。 这使得编辑字典成为可能 “on the fly” 无需重新启动服务器。 + +默认值:3600. + +**示例** + +``` xml +3600 +``` + +## 压缩 {#server-settings-compression} + +数据压缩设置 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-发动机表。 + +!!! warning "警告" + 如果您刚开始使用ClickHouse,请不要使用它。 + +配置模板: + +``` xml + + + ... + ... + ... + + ... + +``` + +`` 字段: + +- `min_part_size` – The minimum size of a data part. +- `min_part_size_ratio` – The ratio of the data part size to the table size. +- `method` – Compression method. Acceptable values: `lz4` 或 `zstd`. + +您可以配置多个 `` 部分。 + +满足条件时的操作: + +- 如果数据部分与条件集匹配,ClickHouse将使用指定的压缩方法。 +- 如果数据部分匹配多个条件集,ClickHouse将使用第一个匹配的条件集。 + +如果没有满足数据部分的条件,ClickHouse使用 `lz4` 压缩。 + +**示例** + +``` xml + + + 10000000000 + 0.01 + zstd + + +``` + +## default\_database {#default-database} + +默认数据库。 + +要获取数据库列表,请使用 [SHOW DATABASES](../../sql_reference/statements/show.md#show-databases) 查询。 + +**示例** + +``` xml +default +``` + +## default\_profile {#default-profile} + +默认设置配置文件。 + +设置配置文件位于参数中指定的文件中 `user_config`. + +**示例** + +``` xml +default +``` + +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} + +外部字典的配置文件的路径。 + +路径: + +- 指定相对于服务器配置文件的绝对路径或路径。 +- 路径可以包含通配符\*和?. + +另请参阅 “[外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)”. + +**示例** + +``` xml +*_dictionary.xml +``` + +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} + +延迟加载字典。 + +如果 `true`,然后在第一次使用时创建每个字典。 如果字典创建失败,则使用该字典的函数将引发异常。 + +如果 `false`,服务器启动时创建所有字典,如果出现错误,服务器将关闭。 + +默认值为 `true`. + +**示例** + +``` xml +true +``` + +## format\_schema\_path {#server_configuration_parameters-format_schema_path} + +包含输入数据方案的目录路径,例如输入数据的方案 [CapnProto](../../interfaces/formats.md#capnproto) 格式。 + +**示例** + +``` xml + + format_schemas/ +``` + +## 石墨 {#server_configuration_parameters-graphite} + +将数据发送到 [石墨](https://github.com/graphite-project). + +设置: + +- host – The Graphite server. +- port – The port on the Graphite server. +- interval – The interval for sending, in seconds. +- timeout – The timeout for sending data, in seconds. +- root\_path – Prefix for keys. +- metrics – Sending data from the [系统。指标](../../operations/system_tables.md#system_tables-metrics) 桌子 +- events – Sending deltas data accumulated for the time period from the [系统。活动](../../operations/system_tables.md#system_tables-events) 桌子 +- events\_cumulative – Sending cumulative data from the [系统。活动](../../operations/system_tables.md#system_tables-events) 桌子 +- asynchronous\_metrics – Sending data from the [系统。asynchronous\_metrics](../../operations/system_tables.md#system_tables-asynchronous_metrics) 桌子 + +您可以配置多个 `` 条款 例如,您可以使用它以不同的时间间隔发送不同的数据。 + +**示例** + +``` xml + + localhost + 42000 + 0.1 + 60 + one_min + true + true + false + true + +``` + +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} + +石墨细化数据的设置。 + +有关详细信息,请参阅 [GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md). + +**示例** + +``` xml + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + +``` + +## http\_port/https\_port {#http-porthttps-port} + +通过HTTP连接到服务器的端口。 + +如果 `https_port` 被指定, [openSSL](#server_configuration_parameters-openssl) 必须配置。 + +如果 `http_port` 指定时,即使设置了OpenSSL配置,也会忽略该配置。 + +**示例** + +``` xml +0000 +``` + +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} + +访问ClickHouse HTTP(s)服务器时默认显示的页面。 +默认值为 “Ok.” (最后有换行符) + +**示例** + +打开 `https://tabix.io/` 访问时 `http://localhost: http_port`. + +``` xml + +
    ]]> +
    +``` + +## 包括\_从 {#server_configuration_parameters-include_from} + +带替换的文件的路径。 + +有关详细信息,请参阅部分 “[配置文件](../configuration_files.md#configuration_files)”. + +**示例** + +``` xml +/etc/metrica.xml +``` + +## interserver\_http\_port {#interserver-http-port} + +用于在ClickHouse服务器之间交换数据的端口。 + +**示例** + +``` xml +9009 +``` + +## interserver\_http\_host {#interserver-http-host} + +其他服务器可用于访问此服务器的主机名。 + +如果省略,它以相同的方式作为定义 `hostname-f` 指挥部 + +用于脱离特定的网络接口。 + +**示例** + +``` xml +example.yandex.ru +``` + +## interserver\_http\_credentials {#server-settings-interserver-http-credentials} + +用户名和密码用于在以下期间进行身份验证 [复制](../../engines/table_engines/mergetree_family/replication.md) 与复制\*引擎。 这些凭据仅用于副本之间的通信,与ClickHouse客户端的凭据无关。 服务器正在检查这些凭据以连接副本,并在连接到其他副本时使用相同的凭据。 因此,这些凭据应该为集群中的所有副本设置相同。 +默认情况下,不使用身份验证。 + +本节包含以下参数: + +- `user` — username. +- `password` — password. + +**示例** + +``` xml + + admin + 222 + +``` + +## keep\_alive\_timeout {#keep-alive-timeout} + +ClickHouse在关闭连接之前等待传入请求的秒数。 默认为3秒。 + +**示例** + +``` xml +3 +``` + +## listen\_host {#server_configuration_parameters-listen_host} + +对请求可能来自的主机的限制。 如果您希望服务器回答所有这些问题,请指定 `::`. + +例: + +``` xml +::1 +127.0.0.1 +``` + +## 记录器 {#server_configuration_parameters-logger} + +日志记录设置。 + +键: + +- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. +- log – The log file. Contains all the entries according to `level`. +- errorlog – Error log file. +- size – Size of the file. Applies to `log`和`errorlog`. 一旦文件到达 `size`,ClickHouse存档并重命名它,并在其位置创建一个新的日志文件。 +- count – The number of archived log files that ClickHouse stores. + +**示例** + +``` xml + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + +``` + +还支持写入系统日志。 配置示例: + +``` xml + + 1 + +
    syslog.remote:10514
    + myhost.local + LOG_LOCAL6 + syslog +
    +
    +``` + +键: + +- use\_syslog — Required setting if you want to write to the syslog. +- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. +- hostname — Optional. The name of the host that logs are sent from. +- facility — [系统日志工具关键字](https://en.wikipedia.org/wiki/Syslog#Facility) 在大写字母与 “LOG\_” 前缀: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`,等等)。 + 默认值: `LOG_USER` 如果 `address` 被指定, `LOG_DAEMON otherwise.` +- format – Message format. Possible values: `bsd` 和 `syslog.` + +## 宏 {#macros} + +复制表的参数替换。 + +如果不使用复制的表,则可以省略。 + +有关详细信息,请参阅部分 “[创建复制的表](../../engines/table_engines/mergetree_family/replication.md)”. + +**示例** + +``` xml + +``` + +## mark\_cache\_size {#server-mark-cache-size} + +表引擎使用的标记缓存的近似大小(以字节为单位) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家人 + +缓存为服务器共享,并根据需要分配内存。 缓存大小必须至少为5368709120。 + +**示例** + +``` xml +5368709120 +``` + +## max\_concurrent\_queries {#max-concurrent-queries} + +同时处理的请求的最大数量。 + +**示例** + +``` xml +100 +``` + +## max\_connections {#max-connections} + +入站连接的最大数量。 + +**示例** + +``` xml +4096 +``` + +## max\_open\_files {#max-open-files} + +打开文件的最大数量。 + +默认情况下: `maximum`. + +我们建议在Mac OS X中使用此选项,因为 `getrlimit()` 函数返回一个不正确的值。 + +**示例** + +``` xml +262144 +``` + +## max\_table\_size\_to\_drop {#max-table-size-to-drop} + +限制删除表。 + +如果一个大小 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 表超过 `max_table_size_to_drop` (以字节为单位),您无法使用删除查询将其删除。 + +如果仍然需要在不重新启动ClickHouse服务器的情况下删除表,请创建 `/flags/force_drop_table` 文件并运行DROP查询。 + +默认值:50GB。 + +值0表示您可以删除所有表而不受任何限制。 + +**示例** + +``` xml +0 +``` + +## merge\_tree {#server_configuration_parameters-merge_tree} + +微调中的表 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +有关详细信息,请参阅MergeTreeSettings。h头文件。 + +**示例** + +``` xml + + 5 + +``` + +## openSSL {#server_configuration_parameters-openssl} + +SSL客户端/服务器配置。 + +对SSL的支持由 `libpoco` 图书馆. 该接口在文件中描述 [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) + +服务器/客户端设置的密钥: + +- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. +- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` 包含证书。 +- caConfig – The path to the file or directory that contains trusted root certificates. +- verificationMode – The method for checking the node's certificates. Details are in the description of the [A.背景](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) 同学们 可能的值: `none`, `relaxed`, `strict`, `once`. +- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| +- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. +- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. 可接受的值: `true`, `false`. +- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. 始终建议使用此参数,因为如果服务器缓存会话,以及客户端请求缓存,它有助于避免出现问题。 默认值: `${application.name}`. +- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. +- sessionTimeout – Time for caching the session on the server. +- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips – Activates OpenSSL FIPS mode. Supported if the library's OpenSSL version supports FIPS. +- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols – Protocols that are not allowed to use. +- preferServerCiphers – Preferred server ciphers on the client. + +**设置示例:** + +``` xml + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + +``` + +## part\_log {#server_configuration_parameters-part-log} + +记录与之关联的事件 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). 例如,添加或合并数据。 您可以使用日志来模拟合并算法并比较它们的特征。 您可以可视化合并过程。 + +查询记录在 [系统。part\_log](../../operations/system_tables.md#system_tables-part-log) 表,而不是在一个单独的文件。 您可以在以下命令中配置此表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md). +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +**示例** + +``` xml + + system + part_log
    + toMonday(event_date) + 7500 +
    +``` + +## 路径 {#server_configuration_parameters-path} + +包含数据的目录的路径。 + +!!! note "注" + 尾部斜杠是强制性的。 + +**示例** + +``` xml +/var/lib/clickhouse/ +``` + +## query\_log {#server_configuration_parameters-query-log} + +用于记录接收到的查询的设置 [log\_queries=1](../settings/settings.md) 设置。 + +查询记录在 [系统。query\_log](../../operations/system_tables.md#system_tables-query_log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 为了一张桌子 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +如果该表不存在,ClickHouse将创建它。 如果在ClickHouse服务器更新时查询日志的结构发生了更改,则会重命名具有旧结构的表,并自动创建新表。 + +**示例** + +``` xml + + system + query_log
    + toMonday(event_date) + 7500 +
    +``` + +## query\_thread\_log {#server_configuration_parameters-query-thread-log} + +设置用于记录接收到的查询的线程 [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) 设置。 + +查询记录在 [系统。query\_thread\_log](../../operations/system_tables.md#system_tables-query-thread-log) 表,而不是在一个单独的文件。 您可以更改表的名称 `table` 参数(见下文)。 + +使用以下参数配置日志记录: + +- `database` – Name of the database. +- `table` – Name of the system table the queries will be logged in. +- `partition_by` – Sets a [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 对于一个系统表。 +- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. + +如果该表不存在,ClickHouse将创建它。 如果更新ClickHouse服务器时查询线程日志的结构发生了更改,则会重命名具有旧结构的表,并自动创建新表。 + +**示例** + +``` xml + + system + query_thread_log
    + toMonday(event_date) + 7500 +
    +``` + +## trace\_log {#server_configuration_parameters-trace_log} + +设置为 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表操作。 + +参数: + +- `database` — Database for storing a table. +- `table` — Table name. +- `partition_by` — [自定义分区键](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 对于一个系统表。 +- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. + +默认服务器配置文件 `config.xml` 包含以下设置部分: + +``` xml + + system + trace_log
    + toYYYYMM(event_date) + 7500 +
    +``` + +## query\_masking\_rules {#query-masking-rules} + +基于正则表达式的规则,在将查询以及所有日志消息存储在服务器日志中之前,这些规则将应用于查询以及所有日志消息, +`system.query_log`, `system.text_log`, `system.processes` 表,并在日志中发送给客户端。 这允许防止 +从SQL查询敏感数据泄漏(如姓名,电子邮件,个人 +标识符或信用卡号码)记录。 + +**示例** + +``` xml + + + hide SSN + (^|\D)\d{3}-\d{2}-\d{4}($|\D) + 000-00-0000 + + +``` + +配置字段: +- `name` -规则的名称(可选) +- `regexp` -RE2兼容正则表达式(强制性) +- `replace` -敏感数据的替换字符串(可选,默认情况下-六个星号) + +屏蔽规则应用于整个查询(以防止敏感数据从格式错误/不可解析的查询泄漏)。 + +`system.events` 表有计数器 `QueryMaskingRulesMatch` 其中具有匹配的查询屏蔽规则的总数。 + +对于分布式查询,每个服务器必须单独配置,否则,子查询传递给其他 +节点将被存储而不屏蔽。 + +## remote\_servers {#server-settings-remote-servers} + +所使用的集群的配置 [分布](../../engines/table_engines/special/distributed.md) 表引擎和由 `cluster` 表功能。 + +**示例** + +``` xml + +``` + +对于该值 `incl` 属性,请参阅部分 “[配置文件](../configuration_files.md#configuration_files)”. + +**另请参阅** + +- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) + +## 时区 {#server_configuration_parameters-timezone} + +服务器的时区。 + +指定为UTC时区或地理位置(例如,非洲/阿比让)的IANA标识符。 + +当DateTime字段输出为文本格式(打印在屏幕上或文件中)时,以及从字符串获取DateTime时,时区对于字符串和DateTime格式之间的转换是必需的。 此外,如果在输入参数中没有收到时区,则时区用于处理时间和日期的函数。 + +**示例** + +``` xml +Europe/Moscow +``` + +## tcp\_port {#server_configuration_parameters-tcp_port} + +通过TCP协议与客户端通信的端口。 + +**示例** + +``` xml +9000 +``` + +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} + +TCP端口,用于与客户端进行安全通信。 使用它与 [OpenSSL](#server_configuration_parameters-openssl) 设置。 + +**可能的值** + +整数。 + +**默认值** + +``` xml +9440 +``` + +## mysql\_port {#server_configuration_parameters-mysql_port} + +通过MySQL协议与客户端通信的端口。 + +**可能的值** + +整数。 + +示例 + +``` xml +9004 +``` + +## tmp\_path {#server-settings-tmp_path} + +用于处理大型查询的临时数据的路径。 + +!!! note "注" + 尾部斜杠是强制性的。 + +**示例** + +``` xml +/var/lib/clickhouse/tmp/ +``` + +## tmp\_policy {#server-settings-tmp-policy} + +从政策 [`storage_configuration`](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) 存储临时文件。 +如果没有设置 [`tmp_path`](#server-settings-tmp_path) 被使用,否则被忽略。 + +!!! note "注" + - `move_factor` 被忽略 +- `keep_free_space_bytes` 被忽略 +- `max_data_part_size_bytes` 被忽略 +-您必须在该政策中只有一个卷 + +## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} + +表引擎使用的未压缩数据的缓存大小(以字节为单位) [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). + +服务器有一个共享缓存。 内存按需分配。 如果选项使用缓存 [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) 被启用。 + +在个别情况下,未压缩的缓存对于非常短的查询是有利的。 + +**示例** + +``` xml +8589934592 +``` + +## user\_files\_path {#server_configuration_parameters-user_files_path} + +包含用户文件的目录。 在表函数中使用 [文件()](../../sql_reference/table_functions/file.md). + +**示例** + +``` xml +/var/lib/clickhouse/user_files/ +``` + +## users\_config {#users-config} + +包含文件的路径: + +- 用户配置。 +- 访问权限。 +- 设置配置文件。 +- 配额设置。 + +**示例** + +``` xml +users.xml +``` + +## 动物园管理员 {#server-settings_zookeeper} + +包含允许ClickHouse与 [动物园管理员](http://zookeeper.apache.org/) 集群。 + +ClickHouse使用ZooKeeper在使用复制表时存储副本的元数据。 如果未使用复制的表,则可以省略此部分参数。 + +本节包含以下参数: + +- `node` — ZooKeeper endpoint. You can set multiple endpoints. + + 例如: + + + +``` xml + + example_host + 2181 + +``` + + The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. + +- `session_timeout` — Maximum timeout for the client session in milliseconds. +- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) 隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇.貌路.隆拢脳枚脢虏.麓脢for脱 可选。 +- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. + +**配置示例** + +``` xml + + + example1 + 2181 + + + example2 + 2181 + + 30000 + 10000 + + /path/to/zookeeper/node + + user:password + +``` + +**另请参阅** + +- [复制](../../engines/table_engines/mergetree_family/replication.md) +- [动物园管理员程序员指南](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) + +## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} + +ZooKeeper中数据部分头的存储方法。 + +此设置仅适用于 `MergeTree` 家人 它可以指定: + +- 在全球范围内 [merge\_tree](#server_configuration_parameters-merge_tree) 一节 `config.xml` 文件 + + ClickHouse使用服务器上所有表的设置。 您可以随时更改设置。 当设置更改时,现有表会更改其行为。 + +- 对于每个表。 + + 创建表时,指定相应的 [发动机设置](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). 即使全局设置更改,具有此设置的现有表的行为也不会更改。 + +**可能的值** + +- 0 — Functionality is turned off. +- 1 — Functionality is turned on. + +如果 `use_minimalistic_part_header_in_zookeeper = 1`,然后 [复制](../../engines/table_engines/mergetree_family/replication.md) 表存储的数据部分的头紧凑使用一个单一的 `znode`. 如果表包含许多列,则此存储方法显着减少了Zookeeper中存储的数据量。 + +!!! attention "注意" + 申请后 `use_minimalistic_part_header_in_zookeeper = 1`,您不能将ClickHouse服务器降级到不支持此设置的版本。 在集群中的服务器上升级ClickHouse时要小心。 不要一次升级所有服务器。 在测试环境中或在集群的几台服务器上测试ClickHouse的新版本更安全。 + + Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. + +**默认值:** 0. + +## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} + +禁用内部DNS缓存。 推荐用于在系统中运行ClickHouse +随着频繁变化的基础设施,如Kubernetes。 + +**默认值:** 0. + +## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} + +更新存储在ClickHouse内部DNS缓存中的IP地址的周期(以秒为单位)。 +更新是在一个单独的系统线程中异步执行的。 + +**默认值**: 15. + +[原始文章](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) diff --git a/docs/zh/operations/server_settings/index.md b/docs/zh/operations/server_settings/index.md deleted file mode 100644 index 4a1276a2ce1..00000000000 --- a/docs/zh/operations/server_settings/index.md +++ /dev/null @@ -1,11 +0,0 @@ -# Server configuration parameters {#server-settings} - -This section contains descriptions of server settings that cannot be changed at the session or query level. - -These settings are stored in the `config.xml` file on the ClickHouse server. - -Other settings are described in the «[Settings](../settings/index.md#settings)» section. - -Before studying the settings, read the [Configuration files](../configuration_files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes). - -[Original article](https://clickhouse.tech/docs/en/operations/server_settings/) diff --git a/docs/zh/operations/server_settings/settings.md b/docs/zh/operations/server_settings/settings.md deleted file mode 100644 index bfc1aca7217..00000000000 --- a/docs/zh/operations/server_settings/settings.md +++ /dev/null @@ -1,869 +0,0 @@ ---- -en_copy: true ---- - -# Server Settings {#server-settings} - -## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} - -The interval in seconds before reloading built-in dictionaries. - -ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries “on the fly” without restarting the server. - -Default value: 3600. - -**Example** - -``` xml -3600 -``` - -## compression {#server-settings-compression} - -Data compression settings for [MergeTree](../table_engines/mergetree.md)-engine tables. - -!!! warning "Warning" - Don’t use it if you have just started using ClickHouse. - -Configuration template: - -``` xml - - - ... - ... - ... - - ... - -``` - -`` fields: - -- `min_part_size` – The minimum size of a data part. -- `min_part_size_ratio` – The ratio of the data part size to the table size. -- `method` – Compression method. Acceptable values: `lz4` or `zstd`. - -You can configure multiple `` sections. - -Actions when conditions are met: - -- If a data part matches a condition set, ClickHouse uses the specified compression method. -- If a data part matches multiple condition sets, ClickHouse uses the first matched condition set. - -If no conditions met for a data part, ClickHouse uses the `lz4` compression. - -**Example** - -``` xml - - - 10000000000 - 0.01 - zstd - - -``` - -## default\_database {#default-database} - -The default database. - -To get a list of databases, use the [SHOW DATABASES](../../query_language/show.md#show-databases) query. - -**Example** - -``` xml -default -``` - -## default\_profile {#default-profile} - -Default settings profile. - -Settings profiles are located in the file specified in the parameter `user_config`. - -**Example** - -``` xml -default -``` - -## dictionaries\_config {#server_settings-dictionaries_config} - -The path to the config file for external dictionaries. - -Path: - -- Specify the absolute path or the path relative to the server config file. -- The path can contain wildcards \* and ?. - -See also “[External dictionaries](../../query_language/dicts/external_dicts.md)”. - -**Example** - -``` xml -*_dictionary.xml -``` - -## dictionaries\_lazy\_load {#server_settings-dictionaries_lazy_load} - -Lazy loading of dictionaries. - -If `true`, then each dictionary is created on first use. If dictionary creation failed, the function that was using the dictionary throws an exception. - -If `false`, all dictionaries are created when the server starts, and if there is an error, the server shuts down. - -The default is `true`. - -**Example** - -``` xml -true -``` - -## format\_schema\_path {#server_settings-format_schema_path} - -The path to the directory with the schemes for the input data, such as schemas for the [CapnProto](../../interfaces/formats.md#capnproto) format. - -**Example** - -``` xml - - format_schemas/ -``` - -## graphite {#server_settings-graphite} - -Sending data to [Graphite](https://github.com/graphite-project). - -Settings: - -- host – The Graphite server. -- port – The port on the Graphite server. -- interval – The interval for sending, in seconds. -- timeout – The timeout for sending data, in seconds. -- root\_path – Prefix for keys. -- metrics – Sending data from the [system.metrics](../system_tables.md#system_tables-metrics) table. -- events – Sending deltas data accumulated for the time period from the [system.events](../system_tables.md#system_tables-events) table. -- events\_cumulative – Sending cumulative data from the [system.events](../system_tables.md#system_tables-events) table. -- asynchronous\_metrics – Sending data from the [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) table. - -You can configure multiple `` clauses. For instance, you can use this for sending different data at different intervals. - -**Example** - -``` xml - - localhost - 42000 - 0.1 - 60 - one_min - true - true - false - true - -``` - -## graphite\_rollup {#server_settings-graphite-rollup} - -Settings for thinning data for Graphite. - -For more details, see [GraphiteMergeTree](../table_engines/graphitemergetree.md). - -**Example** - -``` xml - - - max - - 0 - 60 - - - 3600 - 300 - - - 86400 - 3600 - - - -``` - -## http\_port/https\_port {#http-porthttps-port} - -The port for connecting to the server over HTTP(s). - -If `https_port` is specified, [openSSL](#server_settings-openssl) must be configured. - -If `http_port` is specified, the OpenSSL configuration is ignored even if it is set. - -**Example** - -``` xml -0000 -``` - -## http\_server\_default\_response {#server_settings-http_server_default_response} - -The page that is shown by default when you access the ClickHouse HTTP(s) server. -The default value is “Ok.” (with a line feed at the end) - -**Example** - -Opens `https://tabix.io/` when accessing `http://localhost: http_port`. - -``` xml - -
    ]]> -
    -``` - -## include\_from {#server_settings-include_from} - -The path to the file with substitutions. - -For more information, see the section “[Configuration files](../configuration_files.md#configuration_files)”. - -**Example** - -``` xml -/etc/metrica.xml -``` - -## interserver\_http\_port {#interserver-http-port} - -Port for exchanging data between ClickHouse servers. - -**Example** - -``` xml -9009 -``` - -## interserver\_http\_host {#interserver-http-host} - -The hostname that can be used by other servers to access this server. - -If omitted, it is defined in the same way as the `hostname-f` command. - -Useful for breaking away from a specific network interface. - -**Example** - -``` xml -example.yandex.ru -``` - -## interserver\_http\_credentials {#server-settings-interserver-http-credentials} - -The username and password used to authenticate during [replication](../table_engines/replication.md) with the Replicated\* engines. These credentials are used only for communication between replicas and are unrelated to credentials for ClickHouse clients. The server is checking these credentials for connecting replicas and use the same credentials when connecting to other replicas. So, these credentials should be set the same for all replicas in a cluster. -By default, the authentication is not used. - -This section contains the following parameters: - -- `user` — username. -- `password` — password. - -**Example** - -``` xml - - admin - 222 - -``` - -## keep\_alive\_timeout {#keep-alive-timeout} - -The number of seconds that ClickHouse waits for incoming requests before closing the connection. Defaults to 3 seconds. - -**Example** - -``` xml -3 -``` - -## listen\_host {#server_settings-listen_host} - -Restriction on hosts that requests can come from. If you want the server to answer all of them, specify `::`. - -Examples: - -``` xml -::1 -127.0.0.1 -``` - -## logger {#server_settings-logger} - -Logging settings. - -Keys: - -- level – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. -- log – The log file. Contains all the entries according to `level`. -- errorlog – Error log file. -- size – Size of the file. Applies to `log`and`errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place. -- count – The number of archived log files that ClickHouse stores. - -**Example** - -``` xml - - trace - /var/log/clickhouse-server/clickhouse-server.log - /var/log/clickhouse-server/clickhouse-server.err.log - 1000M - 10 - -``` - -Writing to the syslog is also supported. Config example: - -``` xml - - 1 - -
    syslog.remote:10514
    - myhost.local - LOG_LOCAL6 - syslog -
    -
    -``` - -Keys: - -- use\_syslog — Required setting if you want to write to the syslog. -- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. -- hostname — Optional. The name of the host that logs are sent from. -- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG\_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on). - Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON otherwise.` -- format – Message format. Possible values: `bsd` and `syslog.` - -## macros {#macros} - -Parameter substitutions for replicated tables. - -Can be omitted if replicated tables are not used. - -For more information, see the section “[Creating replicated tables](../../operations/table_engines/replication.md)”. - -**Example** - -``` xml - -``` - -## mark\_cache\_size {#server-mark-cache-size} - -Approximate size (in bytes) of the cache of marks used by table engines of the [MergeTree](../table_engines/mergetree.md) family. - -The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120. - -**Example** - -``` xml -5368709120 -``` - -## max\_concurrent\_queries {#max-concurrent-queries} - -The maximum number of simultaneously processed requests. - -**Example** - -``` xml -100 -``` - -## max\_connections {#max-connections} - -The maximum number of inbound connections. - -**Example** - -``` xml -4096 -``` - -## max\_open\_files {#max-open-files} - -The maximum number of open files. - -By default: `maximum`. - -We recommend using this option in Mac OS X since the `getrlimit()` function returns an incorrect value. - -**Example** - -``` xml -262144 -``` - -## max\_table\_size\_to\_drop {#max-table-size-to-drop} - -Restriction on deleting tables. - -If the size of a [MergeTree](../table_engines/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a DROP query. - -If you still need to delete the table without restarting the ClickHouse server, create the `/flags/force_drop_table` file and run the DROP query. - -Default value: 50 GB. - -The value 0 means that you can delete all tables without any restrictions. - -**Example** - -``` xml -0 -``` - -## merge\_tree {#server_settings-merge_tree} - -Fine tuning for tables in the [MergeTree](../table_engines/mergetree.md). - -For more information, see the MergeTreeSettings.h header file. - -**Example** - -``` xml - - 5 - -``` - -## openSSL {#server_settings-openssl} - -SSL client/server configuration. - -Support for SSL is provided by the `libpoco` library. The interface is described in the file [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) - -Keys for server/client settings: - -- privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. -- certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contains the certificate. -- caConfig – The path to the file or directory that contains trusted root certificates. -- verificationMode – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. -- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. -- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| -- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. -- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. -- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. -- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. -- sessionTimeout – Time for caching the session on the server. -- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. -- requireTLSv1\_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. -- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. -- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . -- disableProtocols – Protocols that are not allowed to use. -- preferServerCiphers – Preferred server ciphers on the client. - -**Example of settings:** - -``` xml - - - - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key - - /etc/clickhouse-server/dhparam.pem - none - true - true - sslv2,sslv3 - true - - - true - true - sslv2,sslv3 - true - - - - RejectCertificateHandler - - - -``` - -## part\_log {#server_settings-part-log} - -Logging events that are associated with [MergeTree](../table_engines/mergetree.md). For instance, adding or merging data. You can use the log to simulate merge algorithms and compare their characteristics. You can visualize the merge process. - -Queries are logged in the [system.part\_log](../system_tables.md#system_tables-part-log) table, not in a separate file. You can configure the name of this table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md). -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -**Example** - -``` xml - - system - part_log
    - toMonday(event_date) - 7500 -
    -``` - -## path {#server_settings-path} - -The path to the directory containing data. - -!!! note "Note" - The trailing slash is mandatory. - -**Example** - -``` xml -/var/lib/clickhouse/ -``` - -## query\_log {#server_settings-query-log} - -Setting for logging queries received with the [log\_queries=1](../settings/settings.md) setting. - -Queries are logged in the [system.query\_log](../system_tables.md#system_tables-query_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a table. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -If the table doesn’t exist, ClickHouse will create it. If the structure of the query log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. - -**Example** - -``` xml - - system - query_log
    - toMonday(event_date) - 7500 -
    -``` - -## query\_thread\_log {#server_settings-query-thread-log} - -Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting. - -Queries are logged in the [system.query\_thread\_log](../system_tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). - -Use the following parameters to configure logging: - -- `database` – Name of the database. -- `table` – Name of the system table the queries will be logged in. -- `partition_by` – Sets a [custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. -- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table. - -If the table doesn’t exist, ClickHouse will create it. If the structure of the query thread log changed when the ClickHouse server was updated, the table with the old structure is renamed, and a new table is created automatically. - -**Example** - -``` xml - - system - query_thread_log
    - toMonday(event_date) - 7500 -
    -``` - -## trace\_log {#server_settings-trace_log} - -Settings for the [trace\_log](../system_tables.md#system_tables-trace_log) system table operation. - -Parameters: - -- `database` — Database for storing a table. -- `table` — Table name. -- `partition_by` — [Custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. -- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. - -The default server configuration file `config.xml` contains the following settings section: - -``` xml - - system - trace_log
    - toYYYYMM(event_date) - 7500 -
    -``` - -## query\_masking\_rules {#query-masking-rules} - -Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, -`system.query_log`, `system.text_log`, `system.processes` table, and in logs sent to the client. That allows preventing -sensitive data leakage from SQL queries (like names, emails, personal -identifiers or credit card numbers) to logs. - -**Example** - -``` xml - - - hide SSN - (^|\D)\d{3}-\d{2}-\d{4}($|\D) - 000-00-0000 - - -``` - -Config fields: -- `name` - name for the rule (optional) -- `regexp` - RE2 compatible regular expression (mandatory) -- `replace` - substitution string for sensitive data (optional, by default - six asterisks) - -The masking rules are applied to the whole query (to prevent leaks of sensitive data from malformed / non-parsable queries). - -`system.events` table have counter `QueryMaskingRulesMatch` which have an overall number of query masking rules matches. - -For distributed queries each server have to be configured separately, otherwise, subqueries passed to other -nodes will be stored without masking. - -## remote\_servers {#server-settings-remote-servers} - -Configuration of clusters used by the [Distributed](../../operations/table_engines/distributed.md) table engine and by the `cluster` table function. - -**Example** - -``` xml - -``` - -For the value of the `incl` attribute, see the section “[Configuration files](../configuration_files.md#configuration_files)”. - -**See Also** - -- [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) - -## timezone {#server_settings-timezone} - -The server’s time zone. - -Specified as an IANA identifier for the UTC timezone or geographic location (for example, Africa/Abidjan). - -The time zone is necessary for conversions between String and DateTime formats when DateTime fields are output to text format (printed on the screen or in a file), and when getting DateTime from a string. Besides, the time zone is used in functions that work with the time and date if they didn’t receive the time zone in the input parameters. - -**Example** - -``` xml -Europe/Moscow -``` - -## tcp\_port {#server_settings-tcp_port} - -Port for communicating with clients over the TCP protocol. - -**Example** - -``` xml -9000 -``` - -## tcp\_port\_secure {#server_settings-tcp_port-secure} - -TCP port for secure communication with clients. Use it with [OpenSSL](#server_settings-openssl) settings. - -**Possible values** - -Positive integer. - -**Default value** - -``` xml -9440 -``` - -## mysql\_port {#server_settings-mysql_port} - -Port for communicating with clients over MySQL protocol. - -**Possible values** - -Positive integer. - -Example - -``` xml -9004 -``` - -## tmp\_path {#server-settings-tmp_path} - -Path to temporary data for processing large queries. - -!!! note "Note" - The trailing slash is mandatory. - -**Example** - -``` xml -/var/lib/clickhouse/tmp/ -``` - -## tmp\_policy {#server-settings-tmp-policy} - -Policy from [`storage_configuration`](../table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) to store temporary files. -If not set [`tmp_path`](#server-settings-tmp_path) is used, otherwise it is ignored. - -!!! note "Note" - - `move_factor` is ignored -- `keep_free_space_bytes` is ignored -- `max_data_part_size_bytes` is ignored -- you must have exactly one volume in that policy - -## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} - -Cache size (in bytes) for uncompressed data used by table engines from the [MergeTree](../table_engines/mergetree.md). - -There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option [use\_uncompressed\_cache](../settings/settings.md#setting-use_uncompressed_cache) is enabled. - -The uncompressed cache is advantageous for very short queries in individual cases. - -**Example** - -``` xml -8589934592 -``` - -## user\_files\_path {#server_settings-user_files_path} - -The directory with user files. Used in the table function [file()](../../query_language/table_functions/file.md). - -**Example** - -``` xml -/var/lib/clickhouse/user_files/ -``` - -## users\_config {#users-config} - -Path to the file that contains: - -- User configurations. -- Access rights. -- Settings profiles. -- Quota settings. - -**Example** - -``` xml -users.xml -``` - -## zookeeper {#server-settings_zookeeper} - -Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster. - -ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted. - -This section contains the following parameters: - -- `node` — ZooKeeper endpoint. You can set multiple endpoints. - - For example: - - - -``` xml - - example_host - 2181 - -``` - - The `index` attribute specifies the node order when trying to connect to the ZooKeeper cluster. - -- `session_timeout` — Maximum timeout for the client session in milliseconds. -- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional. -- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. - -**Example configuration** - -``` xml - - - example1 - 2181 - - - example2 - 2181 - - 30000 - 10000 - - /path/to/zookeeper/node - - user:password - -``` - -**See Also** - -- [Replication](../../operations/table_engines/replication.md) -- [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) - -## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} - -Storage method for data part headers in ZooKeeper. - -This setting only applies to the `MergeTree` family. It can be specified: - -- Globally in the [merge\_tree](#server_settings-merge_tree) section of the `config.xml` file. - - ClickHouse uses the setting for all the tables on the server. You can change the setting at any time. Existing tables change their behaviour when the setting changes. - -- For each table. - - When creating a table, specify the corresponding [engine setting](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). The behaviour of an existing table with this setting does not change, even if the global setting changes. - -**Possible values** - -- 0 — Functionality is turned off. -- 1 — Functionality is turned on. - -If `use_minimalistic_part_header_in_zookeeper = 1`, then [replicated](../table_engines/replication.md) tables store the headers of the data parts compactly using a single `znode`. If the table contains many columns, this storage method significantly reduces the volume of the data stored in Zookeeper. - -!!! attention "Attention" - After applying `use_minimalistic_part_header_in_zookeeper = 1`, you can’t downgrade the ClickHouse server to a version that doesn’t support this setting. Be careful when upgrading ClickHouse on servers in a cluster. Don’t upgrade all the servers at once. It is safer to test new versions of ClickHouse in a test environment, or on just a few servers of a cluster. - - Data part headers already stored with this setting can't be restored to their previous (non-compact) representation. - -**Default value:** 0. - -## disable\_internal\_dns\_cache {#server-settings-disable-internal-dns-cache} - -Disables the internal DNS cache. Recommended for operating ClickHouse in systems -with frequently changing infrastructure such as Kubernetes. - -**Default value:** 0. - -## dns\_cache\_update\_period {#server-settings-dns-cache-update-period} - -The period of updating IP addresses stored in the ClickHouse internal DNS cache (in seconds). -The update is performed asynchronously, in a separate system thread. - -**Default value**: 15. - -[Original article](https://clickhouse.tech/docs/en/operations/server_settings/settings/) diff --git a/docs/zh/operations/settings/constraints_on_settings.md b/docs/zh/operations/settings/constraints_on_settings.md index b0037813199..c9e572dd907 100644 --- a/docs/zh/operations/settings/constraints_on_settings.md +++ b/docs/zh/operations/settings/constraints_on_settings.md @@ -1,11 +1,14 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 62 +toc_title: "\u5BF9\u8BBE\u7F6E\u7684\u9650\u5236" --- -# Constraints on Settings {#constraints-on-settings} +# 对设置的限制 {#constraints-on-settings} -The constraints on settings can be defined in the `profiles` section of the `user.xml` configuration file and prohibit users from changing some of the settings with the `SET` query. -The constraints are defined as the following: +在设置的约束可以在定义 `profiles` 一节 `user.xml` 配置文件,并禁止用户更改一些设置与 `SET` 查询。 +约束定义如下: ``` xml @@ -29,10 +32,10 @@ The constraints are defined as the following: ``` -If the user tries to violate the constraints an exception is thrown and the setting isn’t changed. -There are supported three types of constraints: `min`, `max`, `readonly`. The `min` and `max` constraints specify upper and lower boundaries for a numeric setting and can be used in combination. The `readonly` constraint specifies that the user cannot change the corresponding setting at all. +如果用户试图违反约束,将引发异常,并且设置不会更改。 +支持三种类型的约束: `min`, `max`, `readonly`. 该 `min` 和 `max` 约束指定数值设置的上边界和下边界,并且可以组合使用。 该 `readonly` constraint指定用户根本无法更改相应的设置。 -**Example:** Let `users.xml` includes lines: +**示例:** 让 `users.xml` 包括行: ``` xml @@ -53,7 +56,7 @@ There are supported three types of constraints: `min`, `max`, `readonly`. The `m ``` -The following queries all throw exceptions: +以下查询都会引发异常: ``` sql SET max_memory_usage=20000000001; @@ -67,6 +70,6 @@ Code: 452, e.displayText() = DB::Exception: Setting max_memory_usage should not Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should not be changed. ``` -**Note:** the `default` profile has special handling: all the constraints defined for the `default` profile become the default constraints, so they restrict all the users until they’re overridden explicitly for these users. +**注:** 该 `default` 配置文件具有特殊的处理:所有定义的约束 `default` 配置文件成为默认约束,因此它们限制所有用户,直到为这些用户显式复盖它们。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/constraints_on_settings/) diff --git a/docs/zh/operations/settings/index.md b/docs/zh/operations/settings/index.md index 6d3d96dfbf3..9c598d241d1 100644 --- a/docs/zh/operations/settings/index.md +++ b/docs/zh/operations/settings/index.md @@ -1,25 +1,28 @@ -# Settings {#settings} +--- +toc_folder_title: "\u8bbe\u7f6e" +--- +# 设置 {#settings} -There are multiple ways to make all the settings described below. -Settings are configured in layers, so each subsequent layer redefines the previous settings. +有多种方法可以进行以下所述的所有设置。 +设置是在图层中配置的,因此每个后续图层都会重新定义以前的设置。 -Ways to configure settings, in order of priority: +按优先级顺序配置设置的方法: -- Settings in the `users.xml` server configuration file. +- 在设置 `users.xml` 服务器配置文件。 Set in the element ``. -- Session settings. +- 会话设置。 Send ` SET setting=value` from the ClickHouse console client in interactive mode. - Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter. + 同样,您可以在HTTP协议中使用ClickHouse会话。 要做到这一点,你需要指定 `session_id` HTTP参数。 -- Query settings. +- 查询设置。 - - When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`. - - When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`). + - 在非交互模式下启动ClickHouse控制台客户端时,设置startup参数 `--setting=value`. + - 使用HTTP API时,请传递CGI参数 (`URL?setting_1=value&setting_2=value...`). -Settings that can only be made in the server config file are not covered in this section. +本节不介绍只能在服务器配置文件中进行的设置。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/) diff --git a/docs/zh/operations/settings/permissions_for_queries.md b/docs/zh/operations/settings/permissions_for_queries.md index 60942e1926b..a7d2e843b66 100644 --- a/docs/zh/operations/settings/permissions_for_queries.md +++ b/docs/zh/operations/settings/permissions_for_queries.md @@ -1,58 +1,61 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 58 +toc_title: "\u67E5\u8BE2\u6743\u9650" --- -# Permissions for Queries {#permissions_for_queries} +# 查询权限 {#permissions_for_queries} -Queries in ClickHouse can be divided into several types: +ClickHouse中的查询可以分为几种类型: -1. Read data queries: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. -2. Write data queries: `INSERT`, `OPTIMIZE`. -3. Change settings query: `SET`, `USE`. -4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. +1. 读取数据查询: `SELECT`, `SHOW`, `DESCRIBE`, `EXISTS`. +2. 写入数据查询: `INSERT`, `OPTIMIZE`. +3. 更改设置查询: `SET`, `USE`. +4. [DDL](https://en.wikipedia.org/wiki/Data_definition_language) 查询: `CREATE`, `ALTER`, `RENAME`, `ATTACH`, `DETACH`, `DROP` `TRUNCATE`. 5. `KILL QUERY`. -The following settings regulate user permissions by the type of query: +以下设置按查询类型规范用户权限: -- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. +- [只读](#settings_readonly) — Restricts permissions for all types of queries except DDL queries. - [allow\_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries. -`KILL QUERY` can be performed with any settings. +`KILL QUERY` 可以与任何设置进行。 -## readonly {#settings_readonly} +## 只读 {#settings_readonly} -Restricts permissions for reading data, write data and change settings queries. +限制读取数据、写入数据和更改设置查询的权限。 -See how the queries are divided into types [above](#permissions_for_queries). +查看查询如何划分为多种类型 [以上](#permissions_for_queries). -Possible values: +可能的值: - 0 — All queries are allowed. - 1 — Only read data queries are allowed. - 2 — Read data and change settings queries are allowed. -After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session. +设置后 `readonly = 1`,用户无法更改 `readonly` 和 `allow_ddl` 当前会话中的设置。 -When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method. +使用时 `GET` 方法中的 [HTTP接口](../../interfaces/http.md), `readonly = 1` 自动设置。 要修改数据,请使用 `POST` 方法。 -Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user -from changing only specific settings, for details see [constraints on settings](constraints_on_settings.md). +设置 `readonly = 1` 禁止用户更改所有设置。 有一种方法可以禁止用户 +从只更改特定设置,有关详细信息,请参阅 [对设置的限制](constraints_on_settings.md). -Default value: 0 +默认值:0 ## allow\_ddl {#settings_allow_ddl} -Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries. +允许或拒绝 [DDL](https://en.wikipedia.org/wiki/Data_definition_language) 查询。 -See how the queries are divided into types [above](#permissions_for_queries). +查看查询如何划分为多种类型 [以上](#permissions_for_queries). -Possible values: +可能的值: - 0 — DDL queries are not allowed. - 1 — DDL queries are allowed. -You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session. +你不能执行 `SET allow_ddl = 1` 如果 `allow_ddl = 0` 对于当前会话。 -Default value: 1 +默认值:1 -[Original article](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/permissions_for_queries/) diff --git a/docs/zh/operations/settings/query_complexity.md b/docs/zh/operations/settings/query_complexity.md index ccb8acd3da5..b17f5b7aa70 100644 --- a/docs/zh/operations/settings/query_complexity.md +++ b/docs/zh/operations/settings/query_complexity.md @@ -1,193 +1,194 @@ -# Restrictions on query complexity {#restrictions-on-query-complexity} -Restrictions on query complexity are part of the settings. -They are used in order to provide safer execution from the user interface. -Almost all the restrictions only apply to SELECTs.For distributed query processing, restrictions are applied on each server separately. +# 查询复杂性的限制 {#restrictions-on-query-complexity} + +对查询复杂性的限制是设置的一部分。 +它们被用来从用户界面提供更安全的执行。 +几乎所有的限制只适用于选择。对于分布式查询处理,每个服务器上分别应用限制。 Restrictions on the «maximum amount of something» can take the value 0, which means «unrestricted». -Most restrictions also have an ‘overflow\_mode’ setting, meaning what to do when the limit is exceeded. -It can take one of two values: `throw` or `break`. Restrictions on aggregation (group\_by\_overflow\_mode) also have the value `any`. +大多数限制也有一个 ‘overflow\_mode’ 设置,这意味着超过限制时该怎么做。 +它可以采用以下两个值之一: `throw` 或 `break`. 对聚合的限制(group\_by\_overflow\_mode)也具有以下值 `any`. `throw` – Throw an exception (default). `break` – Stop executing the query and return the partial result, as if the source data ran out. -`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don’t add new keys to the set. +`any (only for group_by_overflow_mode)` – Continuing aggregation for the keys that got into the set, but don't add new keys to the set. -## readonly {#query-complexity-readonly} +## 只读 {#query-complexity-readonly} -With a value of 0, you can execute any queries. -With a value of 1, you can only execute read requests (such as SELECT and SHOW). Requests for writing and changing settings (INSERT, SET) are prohibited. -With a value of 2, you can process read queries (SELECT, SHOW) and change settings (SET). +值为0时,可以执行任何查询。 +如果值为1,则只能执行读取请求(如SELECT和SHOW)。 禁止写入和更改设置(插入,设置)的请求。 +值为2时,可以处理读取查询(选择、显示)和更改设置(设置)。 -After enabling readonly mode, you can’t disable it in the current session. +启用只读模式后,您无法在当前会话中禁用它。 -When using the GET method in the HTTP interface, ‘readonly = 1’ is set automatically. In other words, for queries that modify data, you can only use the POST method. You can send the query itself either in the POST body, or in the URL parameter. +在HTTP接口中使用GET方法时, ‘readonly = 1’ 自动设置。 换句话说,对于修改数据的查询,您只能使用POST方法。 您可以在POST正文或URL参数中发送查询本身。 ## max\_memory\_usage {#settings_max_memory_usage} -The maximum amount of RAM to use for running a query on a single server. +用于在单个服务器上运行查询的最大RAM量。 -In the default configuration file, the maximum is 10 GB. +在默认配置文件中,最大值为10GB。 -The setting doesn’t consider the volume of available memory or the total volume of memory on the machine. -The restriction applies to a single query within a single server. -You can use `SHOW PROCESSLIST` to see the current memory consumption for each query. -In addition, the peak memory consumption is tracked for each query and written to the log. +该设置不考虑计算机上的可用内存量或内存总量。 +该限制适用于单个服务器中的单个查询。 +您可以使用 `SHOW PROCESSLIST` 查看每个查询的当前内存消耗。 +此外,还会跟踪每个查询的内存消耗峰值并将其写入日志。 -Memory usage is not monitored for the states of certain aggregate functions. +不监视某些聚合函数的状态的内存使用情况。 -Memory usage is not fully tracked for states of the aggregate functions `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` from `String` and `Array` arguments. +未完全跟踪聚合函数的状态的内存使用情况 `min`, `max`, `any`, `anyLast`, `argMin`, `argMax` 从 `String` 和 `Array` 争论。 -Memory consumption is also restricted by the parameters `max_memory_usage_for_user` and `max_memory_usage_for_all_queries`. +内存消耗也受到参数的限制 `max_memory_usage_for_user` 和 `max_memory_usage_for_all_queries`. ## max\_memory\_usage\_for\_user {#max-memory-usage-for-user} -The maximum amount of RAM to use for running a user’s queries on a single server. +用于在单个服务器上运行用户查询的最大RAM量。 -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L244). By default, the amount is not restricted (`max_memory_usage_for_user = 0`). +默认值定义在 [设置。h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L244). 默认情况下,金额不受限制 (`max_memory_usage_for_user = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +另请参阅说明 [max\_memory\_usage](#settings_max_memory_usage). ## max\_memory\_usage\_for\_all\_queries {#max-memory-usage-for-all-queries} -The maximum amount of RAM to use for running all queries on a single server. +用于在单个服务器上运行所有查询的最大RAM数量。 -Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L245). By default, the amount is not restricted (`max_memory_usage_for_all_queries = 0`). +默认值定义在 [设置。h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Interpreters/Settings.h#L245). 默认情况下,金额不受限制 (`max_memory_usage_for_all_queries = 0`). -See also the description of [max\_memory\_usage](#settings_max_memory_usage). +另请参阅说明 [max\_memory\_usage](#settings_max_memory_usage). ## max\_rows\_to\_read {#max-rows-to-read} -The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little. -When running a query in multiple threads, the following restrictions apply to each thread separately. +可以在每个块(而不是每行)上检查以下限制。 也就是说,限制可以打破一点。 +在多个线程中运行查询时,以下限制单独应用于每个线程。 -Maximum number of rows that can be read from a table when running a query. +运行查询时可从表中读取的最大行数。 ## max\_bytes\_to\_read {#max-bytes-to-read} -Maximum number of bytes (uncompressed data) that can be read from a table when running a query. +运行查询时可以从表中读取的最大字节数(未压缩数据)。 ## read\_overflow\_mode {#read-overflow-mode} -What to do when the volume of data read exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +读取的数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_to\_group\_by {#max-rows-to-group-by} -Maximum number of unique keys received from aggregation. This setting lets you limit memory consumption when aggregating. +从聚合接收的唯一密钥的最大数量。 此设置允许您在聚合时限制内存消耗。 ## group\_by\_overflow\_mode {#group-by-overflow-mode} -What to do when the number of unique keys for aggregation exceeds the limit: ‘throw’, ‘break’, or ‘any’. By default, throw. -Using the ‘any’ value lets you run an approximation of GROUP BY. The quality of this approximation depends on the statistical nature of the data. +当聚合的唯一键数超过限制时该怎么办: ‘throw’, ‘break’,或 ‘any’. 默认情况下,扔。 +使用 ‘any’ 值允许您运行GROUP BY的近似值。 这种近似值的质量取决于数据的统计性质。 ## max\_rows\_to\_sort {#max-rows-to-sort} -Maximum number of rows before sorting. This allows you to limit memory consumption when sorting. +排序前的最大行数。 这允许您在排序时限制内存消耗。 ## max\_bytes\_to\_sort {#max-bytes-to-sort} -Maximum number of bytes before sorting. +排序前的最大字节数。 ## sort\_overflow\_mode {#sort-overflow-mode} -What to do if the number of rows received before sorting exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +如果排序前收到的行数超过其中一个限制,该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_result\_rows {#max-result-rows} -Limit on the number of rows in the result. Also checked for subqueries, and on remote servers when running parts of a distributed query. +限制结果中的行数。 还检查子查询,并在运行分布式查询的部分时在远程服务器上。 ## max\_result\_bytes {#max-result-bytes} -Limit on the number of bytes in the result. The same as the previous setting. +限制结果中的字节数。 与之前的设置相同。 ## result\_overflow\_mode {#result-overflow-mode} -What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. -Using ‘break’ is similar to using LIMIT. +如果结果的体积超过其中一个限制,该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 +使用 ‘break’ 类似于使用限制。 ## max\_execution\_time {#max-execution-time} -Maximum query execution time in seconds. -At this time, it is not checked for one of the sorting stages, or when merging and finalizing aggregate functions. +最大查询执行时间(以秒为单位)。 +此时,不会检查其中一个排序阶段,也不会在合并和最终确定聚合函数时进行检查。 ## timeout\_overflow\_mode {#timeout-overflow-mode} -What to do if the query is run longer than ‘max\_execution\_time’: ‘throw’ or ‘break’. By default, throw. +如果查询的运行时间长于 ‘max\_execution\_time’: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## min\_execution\_speed {#min-execution-speed} -Minimal execution speed in rows per second. Checked on every data block when ‘timeout\_before\_checking\_execution\_speed’ expires. If the execution speed is lower, an exception is thrown. +以每秒行为单位的最小执行速度。 检查每个数据块时 ‘timeout\_before\_checking\_execution\_speed’ 到期。 如果执行速度较低,则会引发异常。 ## timeout\_before\_checking\_execution\_speed {#timeout-before-checking-execution-speed} -Checks that execution speed is not too slow (no less than ‘min\_execution\_speed’), after the specified time in seconds has expired. +检查执行速度是不是太慢(不低于 ‘min\_execution\_speed’),在指定的时间以秒为单位已过期之后。 ## max\_columns\_to\_read {#max-columns-to-read} -Maximum number of columns that can be read from a table in a single query. If a query requires reading a greater number of columns, it throws an exception. +单个查询中可从表中读取的最大列数。 如果查询需要读取更多列,则会引发异常。 ## max\_temporary\_columns {#max-temporary-columns} -Maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns. If there are more temporary columns than this, it throws an exception. +运行查询时必须同时保留在RAM中的最大临时列数,包括常量列。 如果有比这更多的临时列,它会引发异常。 ## max\_temporary\_non\_const\_columns {#max-temporary-non-const-columns} -The same thing as ‘max\_temporary\_columns’, but without counting constant columns. -Note that constant columns are formed fairly often when running a query, but they require approximately zero computing resources. +同样的事情 ‘max\_temporary\_columns’,但不计数常数列。 +请注意,常量列在运行查询时经常形成,但它们需要大约零计算资源。 ## max\_subquery\_depth {#max-subquery-depth} -Maximum nesting depth of subqueries. If subqueries are deeper, an exception is thrown. By default, 100. +子查询的最大嵌套深度。 如果子查询更深,则会引发异常。 默认情况下,100。 ## max\_pipeline\_depth {#max-pipeline-depth} -Maximum pipeline depth. Corresponds to the number of transformations that each data block goes through during query processing. Counted within the limits of a single server. If the pipeline depth is greater, an exception is thrown. By default, 1000. +最大管道深度。 对应于查询处理期间每个数据块经历的转换数。 在单个服务器的限制范围内计算。 如果管道深度较大,则会引发异常。 默认情况下,1000。 ## max\_ast\_depth {#max-ast-depth} -Maximum nesting depth of a query syntactic tree. If exceeded, an exception is thrown. -At this time, it isn’t checked during parsing, but only after parsing the query. That is, a syntactic tree that is too deep can be created during parsing, but the query will fail. By default, 1000. +查询语法树的最大嵌套深度。 如果超出,将引发异常。 +此时,在解析过程中不会对其进行检查,而是仅在解析查询之后进行检查。 也就是说,在分析过程中可以创建一个太深的语法树,但查询将失败。 默认情况下,1000。 ## max\_ast\_elements {#max-ast-elements} -Maximum number of elements in a query syntactic tree. If exceeded, an exception is thrown. -In the same way as the previous setting, it is checked only after parsing the query. By default, 50,000. +查询语法树中的最大元素数。 如果超出,将引发异常。 +与前面的设置相同,只有在解析查询后才会检查它。 默认情况下,50,000。 ## max\_rows\_in\_set {#max-rows-in-set} -Maximum number of rows for a data set in the IN clause created from a subquery. +从子查询创建的IN子句中数据集的最大行数。 ## max\_bytes\_in\_set {#max-bytes-in-set} -Maximum number of bytes (uncompressed data) used by a set in the IN clause created from a subquery. +从子查询创建的IN子句中的集合使用的最大字节数(未压缩数据)。 ## set\_overflow\_mode {#set-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_in\_distinct {#max-rows-in-distinct} -Maximum number of different rows when using DISTINCT. +使用DISTINCT时的最大不同行数。 ## max\_bytes\_in\_distinct {#max-bytes-in-distinct} -Maximum number of bytes used by a hash table when using DISTINCT. +使用DISTINCT时哈希表使用的最大字节数。 ## distinct\_overflow\_mode {#distinct-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 ## max\_rows\_to\_transfer {#max-rows-to-transfer} -Maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +使用GLOBAL IN时,可以传递到远程服务器或保存在临时表中的最大行数。 ## max\_bytes\_to\_transfer {#max-bytes-to-transfer} -Maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN. +使用GLOBAL IN时,可以传递到远程服务器或保存在临时表中的最大字节数(未压缩数据)。 ## transfer\_overflow\_mode {#transfer-overflow-mode} -What to do when the amount of data exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. +当数据量超过其中一个限制时该怎么办: ‘throw’ 或 ‘break’. 默认情况下,扔。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/query_complexity/) diff --git a/docs/zh/operations/settings/settings.md b/docs/zh/operations/settings/settings.md index 0475642124a..8e0a60d4f03 100644 --- a/docs/zh/operations/settings/settings.md +++ b/docs/zh/operations/settings/settings.md @@ -1,191 +1,194 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 60 +toc_title: "\u8BBE\u7F6E" --- -# Settings {#settings} +# 设置 {#settings} -## distributed\_product\_mode {#distributed-product-mode} +## 分布\_产品\_模式 {#distributed-product-mode} -Changes the behavior of [distributed subqueries](../../query_language/select.md). +改变的行为 [分布式子查询](../../sql_reference/statements/select.md). ClickHouse applies this setting when the query contains the product of distributed tables, i.e. when the query for a distributed table contains a non-GLOBAL subquery for the distributed table. -Restrictions: +限制: -- Only applied for IN and JOIN subqueries. -- Only if the FROM section uses a distributed table containing more than one shard. -- If the subquery concerns a distributed table containing more than one shard. -- Not used for a table-valued [remote](../../query_language/table_functions/remote.md) function. +- 仅适用于IN和JOIN子查询。 +- 仅当FROM部分使用包含多个分片的分布式表时。 +- 如果子查询涉及包含多个分片的分布式表。 +- 不用于表值 [远程](../../sql_reference/table_functions/remote.md) 功能。 -Possible values: +可能的值: -- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” exception). +- `deny` — Default value. Prohibits using these types of subqueries (returns the “Double-distributed in/JOIN subqueries is denied” 例外)。 - `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.` -- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.` +- `global` — Replaces the `IN`/`JOIN` 查询与 `GLOBAL IN`/`GLOBAL JOIN.` - `allow` — Allows the use of these types of subqueries. ## enable\_optimize\_predicate\_expression {#enable-optimize-predicate-expression} -Turns on predicate pushdown in `SELECT` queries. +打开谓词下推 `SELECT` 查询。 -Predicate pushdown may significantly reduce network traffic for distributed queries. +谓词下推可以显着减少分布式查询的网络流量。 -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +默认值:1。 -Usage +用途 -Consider the following queries: +请考虑以下查询: 1. `SELECT count() FROM test_table WHERE date = '2018-10-10'` 2. `SELECT count() FROM (SELECT * FROM test_table) WHERE date = '2018-10-10'` -If `enable_optimize_predicate_expression = 1`, then the execution time of these queries is equal because ClickHouse applies `WHERE` to the subquery when processing it. +如果 `enable_optimize_predicate_expression = 1`,则这些查询的执行时间相等,因为ClickHouse应用 `WHERE` 对子查询进行处理。 -If `enable_optimize_predicate_expression = 0`, then the execution time of the second query is much longer, because the `WHERE` clause applies to all the data after the subquery finishes. +如果 `enable_optimize_predicate_expression = 0`,那么第二个查询的执行时间要长得多,因为 `WHERE` 子句适用于子查询完成后的所有数据。 ## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} -Forces a query to an out-of-date replica if updated data is not available. See [Replication](../table_engines/replication.md). +如果更新的数据不可用,则强制对过期副本进行查询。 看 [复制](../../engines/table_engines/mergetree_family/replication.md). -ClickHouse selects the most relevant from the outdated replicas of the table. +ClickHouse从表的过时副本中选择最相关的副本。 -Used when performing `SELECT` from a distributed table that points to replicated tables. +执行时使用 `SELECT` 从指向复制表的分布式表。 -By default, 1 (enabled). +默认情况下,1(已启用)。 ## force\_index\_by\_date {#settings-force_index_by_date} -Disables query execution if the index can’t be used by date. +如果索引不能按日期使用,则禁用查询执行。 -Works with tables in the MergeTree family. +适用于MergeTree系列中的表。 -If `force_index_by_date=1`, ClickHouse checks whether the query has a date key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For example, the condition `Date != ' 2000-01-01 '` is acceptable even when it matches all the data in the table (i.e., running the query requires a full scan). For more information about ranges of data in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). +如果 `force_index_by_date=1`,ClickHouse检查查询是否具有可用于限制数据范围的date键条件。 如果没有合适的条件,则会引发异常。 但是,它不检查条件是否减少了要读取的数据量。 例如,条件 `Date != ' 2000-01-01 '` 即使它与表中的所有数据匹配(即运行查询需要完全扫描),也是可以接受的。 有关MergeTree表中数据范围的详细信息,请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## force\_primary\_key {#force-primary-key} -Disables query execution if indexing by the primary key is not possible. +如果无法按主键编制索引,则禁用查询执行。 -Works with tables in the MergeTree family. +适用于MergeTree系列中的表。 -If `force_primary_key=1`, ClickHouse checks to see if the query has a primary key condition that can be used for restricting data ranges. If there is no suitable condition, it throws an exception. However, it does not check whether the condition reduces the amount of data to read. For more information about data ranges in MergeTree tables, see [MergeTree](../table_engines/mergetree.md). +如果 `force_primary_key=1`,ClickHouse检查查询是否具有可用于限制数据范围的主键条件。 如果没有合适的条件,则会引发异常。 但是,它不检查条件是否减少了要读取的数据量。 有关MergeTree表中数据范围的详细信息,请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## format\_schema {#format-schema} -This parameter is useful when you are using formats that require a schema definition, such as [Cap’n Proto](https://capnproto.org/) or [Protobuf](https://developers.google.com/protocol-buffers/). The value depends on the format. +当您使用需要架构定义的格式时,此参数非常有用,例如 [普罗托船长](https://capnproto.org/) 或 [Protobuf](https://developers.google.com/protocol-buffers/). 该值取决于格式。 ## fsync\_metadata {#fsync-metadata} -Enables or disables [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) when writing `.sql` files. Enabled by default. +启用或禁用 [fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html) 写作时 `.sql` 文件 默认情况下启用。 -It makes sense to disable it if the server has millions of tiny tables that are constantly being created and destroyed. +如果服务器有数百万个不断创建和销毁的小表,那么禁用它是有意义的。 ## enable\_http\_compression {#settings-enable_http_compression} -Enables or disables data compression in the response to an HTTP request. +在对HTTP请求的响应中启用或禁用数据压缩。 -For more information, read the [HTTP interface description](../../interfaces/http.md). +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 ## http\_zlib\_compression\_level {#settings-http_zlib_compression_level} -Sets the level of data compression in the response to an HTTP request if [enable\_http\_compression = 1](#settings-enable_http_compression). +在以下情况下,设置对HTTP请求的响应中的数据压缩级别 [enable\_http\_compression=1](#settings-enable_http_compression). -Possible values: Numbers from 1 to 9. +可能的值:数字从1到9。 -Default value: 3. +默认值:3。 ## http\_native\_compression\_disable\_checksumming\_on\_decompress {#settings-http_native_compression_disable_checksumming_on_decompress} -Enables or disables checksum verification when decompressing the HTTP POST data from the client. Used only for ClickHouse native compression format (not used with `gzip` or `deflate`). +在从客户端解压缩HTTP POST数据时启用或禁用校验和验证。 仅用于ClickHouse原生压缩格式(不用于 `gzip` 或 `deflate`). -For more information, read the [HTTP interface description](../../interfaces/http.md). +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 ## send\_progress\_in\_http\_headers {#settings-send_progress_in_http_headers} -Enables or disables `X-ClickHouse-Progress` HTTP response headers in `clickhouse-server` responses. +启用或禁用 `X-ClickHouse-Progress` Http响应头 `clickhouse-server` 答复。 -For more information, read the [HTTP interface description](../../interfaces/http.md). +欲了解更多信息,请阅读 [HTTP接口描述](../../interfaces/http.md). -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 ## max\_http\_get\_redirects {#setting-max_http_get_redirects} -Limits the maximum number of HTTP GET redirect hops for [URL](../table_engines/url.md)-engine tables. The setting applies to both types of tables: those created by the [CREATE TABLE](../../query_language/create/#create-table-query) query and by the [url](../../query_language/table_functions/url.md) table function. +限制HTTP GET重定向跳数的最大数量 [URL](../../engines/table_engines/special/url.md)-发动机表。 该设置适用于两种类型的表:由 [CREATE TABLE](../../sql_reference/statements/create.md#create-table-query) 查询和由 [url](../../sql_reference/table_functions/url.md) 表功能。 -Possible values: +可能的值: -- Any positive integer number of hops. +- 跳数的任何正整数。 - 0 — No hops allowed. -Default value: 0. +默认值:0。 ## input\_format\_allow\_errors\_num {#settings-input_format_allow_errors_num} -Sets the maximum number of acceptable errors when reading from text formats (CSV, TSV, etc.). +设置从文本格式(CSV,TSV等)读取时可接受的错误的最大数量。). -The default value is 0. +默认值为0。 -Always pair it with `input_format_allow_errors_ratio`. +总是与它配对 `input_format_allow_errors_ratio`. -If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_num`, ClickHouse ignores the row and moves on to the next one. +如果在读取行时发生错误,但错误计数器仍小于 `input_format_allow_errors_num`,ClickHouse忽略该行并移动到下一个。 -If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. +如果两者 `input_format_allow_errors_num` 和 `input_format_allow_errors_ratio` 超出时,ClickHouse引发异常。 ## input\_format\_allow\_errors\_ratio {#settings-input_format_allow_errors_ratio} -Sets the maximum percentage of errors allowed when reading from text formats (CSV, TSV, etc.). -The percentage of errors is set as a floating-point number between 0 and 1. +设置从文本格式(CSV,TSV等)读取时允许的最大错误百分比。). +错误百分比设置为介于0和1之间的浮点数。 -The default value is 0. +默认值为0。 -Always pair it with `input_format_allow_errors_num`. +总是与它配对 `input_format_allow_errors_num`. -If an error occurred while reading rows but the error counter is still less than `input_format_allow_errors_ratio`, ClickHouse ignores the row and moves on to the next one. +如果在读取行时发生错误,但错误计数器仍小于 `input_format_allow_errors_ratio`,ClickHouse忽略该行并移动到下一个。 -If both `input_format_allow_errors_num` and `input_format_allow_errors_ratio` are exceeded, ClickHouse throws an exception. +如果两者 `input_format_allow_errors_num` 和 `input_format_allow_errors_ratio` 超出时,ClickHouse引发异常。 ## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} -Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../query_language/syntax.md) section. +如果快速流解析器无法解析数据,则启用或禁用完整SQL解析器。 此设置仅用于 [值](../../interfaces/formats.md#data-format-values) 格式在数据插入。 有关语法分析的详细信息,请参阅 [语法](../../sql_reference/syntax.md) 科。 -Possible values: +可能的值: - 0 — Disabled. - In this case, you must provide formatted data. See the [Formats](../../interfaces/formats.md) section. + 在这种情况下,您必须提供格式化的数据。 见 [格式](../../interfaces/formats.md) 科。 - 1 — Enabled. - In this case, you can use an SQL expression as a value, but data insertion is much slower this way. If you insert only formatted data, then ClickHouse behaves as if the setting value is 0. + 在这种情况下,您可以使用SQL表达式作为值,但数据插入速度要慢得多。 如果仅插入格式化的数据,则ClickHouse的行为就好像设置值为0。 -Default value: 1. +默认值:1。 -Example of Use +使用示例 -Insert the [DateTime](../../data_types/datetime.md) type value with the different settings. +插入 [日期时间](../../sql_reference/data_types/datetime.md) 使用不同的设置键入值。 ``` sql SET input_format_values_interpret_expressions = 0; @@ -206,7 +209,7 @@ INSERT INTO datetime_t VALUES (now()) Ok. ``` -The last query is equivalent to the following: +最后一个查询等效于以下内容: ``` sql SET input_format_values_interpret_expressions = 0; @@ -219,21 +222,21 @@ Ok. ## input\_format\_values\_deduce\_templates\_of\_expressions {#settings-input_format_values_deduce_templates_of_expressions} -Enables or disables template deduction for an SQL expressions in [Values](../../interfaces/formats.md#data-format-values) format. It allows to parse and interpret expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse will try to deduce template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows. For the following query: +启用或禁用以下内容中的SQL表达式的模板扣除 [值](../../interfaces/formats.md#data-format-values) 格式。 它允许解析和解释表达式 `Values` 如果连续行中的表达式具有相同的结构,速度要快得多。 ClickHouse将尝试推导表达式的模板,使用此模板解析以下行,并在一批成功解析的行上评估表达式。 对于以下查询: ``` sql INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (upper('Values')), ... ``` -- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=0` expressions will be interpreted separately for each row (this is very slow for large number of rows) -- if `input_format_values_interpret_expressions=0` and `format_values_deduce_templates_of_expressions=1` expressions in the first, second and third rows will be parsed using template `lower(String)` and interpreted together, expression is the forth row will be parsed with another template (`upper(String)`) -- if `input_format_values_interpret_expressions=1` and `format_values_deduce_templates_of_expressions=1` - the same as in previous case, but also allows fallback to interpreting expressions separately if it’s not possible to deduce template. +- 如果 `input_format_values_interpret_expressions=1` 和 `format_values_deduce_templates_of_expressions=0` 表达式将单独解释每行(对于大量行来说,这非常慢) +- 如果 `input_format_values_interpret_expressions=0` 和 `format_values_deduce_templates_of_expressions=1` 第一行,第二行和第三行中的表达式将使用template进行分析 `lower(String)` 并一起解释,expression是第四行将与另一个模板进行解析 (`upper(String)`) +- 如果 `input_format_values_interpret_expressions=1` 和 `format_values_deduce_templates_of_expressions=1` -与前面的情况相同,但如果无法推断模板,也可以回退到单独解释表达式。 -Enabled by default. +默认情况下启用。 ## input\_format\_values\_accurate\_types\_of\_literals {#settings-input-format-values-accurate-types-of-literals} -This setting is used only when `input_format_values_deduce_templates_of_expressions = 1`. It can happen, that expressions for some column have the same structure, but contain numeric literals of different types, e.g +此设置仅在以下情况下使用 `input_format_values_deduce_templates_of_expressions = 1`. 它可能发生,某些列的表达式具有相同的结构,但包含不同类型的数字文字,例如 ``` sql (..., abs(0), ...), -- UInt64 literal @@ -241,266 +244,266 @@ This setting is used only when `input_format_values_deduce_templates_of_expressi (..., abs(-1), ...), -- Int64 literal ``` -When this setting is enabled, ClickHouse will check the actual type of literal and will use an expression template of the corresponding type. In some cases, it may significantly slow down expression evaluation in `Values`. -When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` or `Int64` instead of `UInt64` for `42`), but it may cause overflow and precision issues. -Enabled by default. +启用此设置后,ClickHouse将检查文本的实际类型,并使用相应类型的表达式模板。 在某些情况下,可能会显着减慢表达式评估 `Values`. +When disabled, ClickHouse may use more general type for some literals (e.g. `Float64` 或 `Int64` 而不是 `UInt64` 为 `42`),但它可能会导致溢出和精度问题。 +默认情况下启用。 ## input\_format\_defaults\_for\_omitted\_fields {#session_settings-input_format_defaults_for_omitted_fields} -When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) and [TabSeparated](../../interfaces/formats.md#tabseparated) formats. +执行时 `INSERT` 查询时,将省略的输入列值替换为相应列的默认值。 此选项仅适用于 [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) 和 [TabSeparated](../../interfaces/formats.md#tabseparated) 格式。 -!!! note "Note" - When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance. +!!! note "注" + 启用此选项后,扩展表元数据将从服务器发送到客户端。 它会消耗服务器上的额外计算资源,并可能降低性能。 -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +默认值:1。 ## input\_format\_tsv\_empty\_as\_default {#settings-input-format-tsv-empty-as-default} -When enabled, replace empty input fields in TSV with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too. +启用后,将TSV中的空输入字段替换为默认值。 对于复杂的默认表达式 `input_format_defaults_for_omitted_fields` 必须启用了。 -Disabled by default. +默认情况下禁用。 ## input\_format\_null\_as\_default {#settings-input-format-null-as-default} -Enables or disables using default values if input data contain `NULL`, but data type of the corresponding column in not `Nullable(T)` (for text input formats). +如果输入数据包含 `NULL`,但相应列的数据类型不 `Nullable(T)` (对于文本输入格式)。 ## input\_format\_skip\_unknown\_fields {#settings-input-format-skip-unknown-fields} -Enables or disables skipping insertion of extra data. +启用或禁用跳过额外数据的插入。 -When writing data, ClickHouse throws an exception if input data contain columns that do not exist in the target table. If skipping is enabled, ClickHouse doesn’t insert extra data and doesn’t throw an exception. +写入数据时,如果输入数据包含目标表中不存在的列,ClickHouse将引发异常。 如果启用了跳过,ClickHouse不会插入额外的数据,也不会引发异常。 -Supported formats: +支持的格式: - [JSONEachRow](../../interfaces/formats.md#jsoneachrow) - [CSVWithNames](../../interfaces/formats.md#csvwithnames) - [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) - [TSKV](../../interfaces/formats.md#tskv) -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 ## input\_format\_import\_nested\_json {#settings-input_format_import_nested_json} -Enables or disables the insertion of JSON data with nested objects. +启用或禁用具有嵌套对象的JSON数据的插入。 -Supported formats: +支持的格式: - [JSONEachRow](../../interfaces/formats.md#jsoneachrow) -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 -See also: +另请参阅: -- [Usage of Nested Structures](../../interfaces/formats.md#jsoneachrow-nested) with the `JSONEachRow` format. +- [嵌套结构的使用](../../interfaces/formats.md#jsoneachrow-nested) 与 `JSONEachRow` 格式。 ## input\_format\_with\_names\_use\_header {#settings-input-format-with-names-use-header} -Enables or disables checking the column order when inserting data. +启用或禁用插入数据时检查列顺序。 -To improve insert performance, we recommend disabling this check if you are sure that the column order of the input data is the same as in the target table. +为了提高插入性能,如果您确定输入数据的列顺序与目标表中的列顺序相同,建议禁用此检查。 -Supported formats: +支持的格式: - [CSVWithNames](../../interfaces/formats.md#csvwithnames) - [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +默认值:1。 ## date\_time\_input\_format {#settings-date_time_input_format} -Allows choosing a parser of the text representation of date and time. +允许选择日期和时间的文本表示的解析器。 -The setting doesn’t apply to [date and time functions](../../query_language/functions/date_time_functions.md). +该设置不适用于 [日期和时间功能](../../sql_reference/functions/date_time_functions.md). -Possible values: +可能的值: - `'best_effort'` — Enables extended parsing. - ClickHouse can parse the basic `YYYY-MM-DD HH:MM:SS` format and all [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) date and time formats. For example, `'2018-06-08T01:02:03.000Z'`. + ClickHouse可以解析基本 `YYYY-MM-DD HH:MM:SS` 格式和所有 [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) 日期和时间格式。 例如, `'2018-06-08T01:02:03.000Z'`. - `'basic'` — Use basic parser. - ClickHouse can parse only the basic `YYYY-MM-DD HH:MM:SS` format. For example, `'2019-08-20 10:18:56'`. + ClickHouse只能解析基本的 `YYYY-MM-DD HH:MM:SS` 格式。 例如, `'2019-08-20 10:18:56'`. -Default value: `'basic'`. +默认值: `'basic'`. -See also: +另请参阅: -- [DateTime data type.](../../data_types/datetime.md) -- [Functions for working with dates and times.](../../query_language/functions/date_time_functions.md) +- [日期时间数据类型。](../../sql_reference/data_types/datetime.md) +- [用于处理日期和时间的函数。](../../sql_reference/functions/date_time_functions.md) ## join\_default\_strictness {#settings-join_default_strictness} -Sets default strictness for [JOIN clauses](../../query_language/select.md#select-join). +设置默认严格性 [加入子句](../../sql_reference/statements/select.md#select-join). -Possible values: +可能的值: -- `ALL` — If the right table has several matching rows, ClickHouse creates a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from matching rows. This is the normal `JOIN` behaviour from standard SQL. -- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` and `ALL` are the same. +- `ALL` — If the right table has several matching rows, ClickHouse creates a [笛卡尔积](https://en.wikipedia.org/wiki/Cartesian_product) 从匹配的行。 这是正常的 `JOIN` 来自标准SQL的行为。 +- `ANY` — If the right table has several matching rows, only the first one found is joined. If the right table has only one matching row, the results of `ANY` 和 `ALL` 都是一样的 - `ASOF` — For joining sequences with an uncertain match. -- `Empty string` — If `ALL` or `ANY` is not specified in the query, ClickHouse throws an exception. +- `Empty string` — If `ALL` 或 `ANY` 如果未在查询中指定,ClickHouse将引发异常。 -Default value: `ALL`. +默认值: `ALL`. ## join\_any\_take\_last\_row {#settings-join_any_take_last_row} -Changes behaviour of join operations with `ANY` strictness. +更改联接操作的行为 `ANY` 严格。 -!!! warning "Attention" - This setting applies only for `JOIN` operations with [Join](../table_engines/join.md) engine tables. +!!! warning "注意" + 此设置仅适用于 `JOIN` 操作与 [加入我们](../../engines/table_engines/special/join.md) 发动机表. -Possible values: +可能的值: - 0 — If the right table has more than one matching row, only the first one found is joined. - 1 — If the right table has more than one matching row, only the last one found is joined. -Default value: 0. +默认值:0。 -See also: +另请参阅: -- [JOIN clause](../../query_language/select.md#select-join) -- [Join table engine](../table_engines/join.md) +- [JOIN子句](../../sql_reference/statements/select.md#select-join) +- [联接表引擎](../../engines/table_engines/special/join.md) - [join\_default\_strictness](#settings-join_default_strictness) ## join\_use\_nulls {#join_use_nulls} -Sets the type of [JOIN](../../query_language/select.md) behavior. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting. +设置类型 [JOIN](../../sql_reference/statements/select.md) 行为 合并表时,可能会出现空单元格。 ClickHouse根据此设置以不同的方式填充它们。 -Possible values: +可能的值: - 0 — The empty cells are filled with the default value of the corresponding field type. -- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../data_types/nullable.md#data_type-nullable), and empty cells are filled with [NULL](../../query_language/syntax.md). +- 1 — `JOIN` 其行为方式与标准SQL中的行为方式相同。 相应字段的类型将转换为 [可为空](../../sql_reference/data_types/nullable.md#data_type-nullable),和空单元格填充 [NULL](../../sql_reference/syntax.md). -Default value: 0. +默认值:0。 ## max\_block\_size {#setting-max_block_size} -In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. +在ClickHouse中,数据由块(列部分集)处理。 单个块的内部处理周期足够高效,但每个块都有明显的支出。 该 `max_block_size` 设置是建议从表中加载块的大小(行数)。 块大小不应该太小,以便每个块上的支出仍然明显,但不能太大,以便在第一个块处理完成后快速完成限制查询。 目标是避免在多个线程中提取大量列时占用太多内存,并且至少保留一些缓存局部性。 -Default value: 65,536. +默认值:65,536。 -Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed. +块的大小 `max_block_size` 并不总是从表中加载。 如果显然需要检索的数据较少,则处理较小的块。 ## preferred\_block\_size\_bytes {#preferred-block-size-bytes} -Used for the same purpose as `max_block_size`, but it sets the recommended block size in bytes by adapting it to the number of rows in the block. -However, the block size cannot be more than `max_block_size` rows. -By default: 1,000,000. It only works when reading from MergeTree engines. +用于相同的目的 `max_block_size`,但它通过使其适应块中的行数来设置推荐的块大小(以字节为单位)。 +但是,块大小不能超过 `max_block_size` 行。 +默认情况下:1,000,000。 它只有在从MergeTree引擎读取时才有效。 ## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} -If the number of rows to be read from a file of a [MergeTree](../table_engines/mergetree.md) table exceeds `merge_tree_min_rows_for_concurrent_read` then ClickHouse tries to perform a concurrent reading from this file on several threads. +如果从a的文件中读取的行数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 表超过 `merge_tree_min_rows_for_concurrent_read` 然后ClickHouse尝试在多个线程上从该文件执行并发读取。 -Possible values: +可能的值: -- Any positive integer. +- 任何正整数。 -Default value: 163840. +默认值:163840. ## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} -If the number of bytes to read from one file of a [MergeTree](../table_engines/mergetree.md)-engine table exceeds `merge_tree_min_bytes_for_concurrent_read`, then ClickHouse tries to concurrently read from this file in several threads. +如果从一个文件中读取的字节数 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-发动机表超过 `merge_tree_min_bytes_for_concurrent_read`,然后ClickHouse尝试在多个线程中并发读取此文件。 -Possible value: +可能的值: -- Any positive integer. +- 任何正整数。 -Default value: 251658240. +默认值:251658240. ## merge\_tree\_min\_rows\_for\_seek {#setting-merge-tree-min-rows-for-seek} -If the distance between two data blocks to be read in one file is less than `merge_tree_min_rows_for_seek` rows, then ClickHouse does not seek through the file but reads the data sequentially. +如果要在一个文件中读取的两个数据块之间的距离小于 `merge_tree_min_rows_for_seek` 行,然后ClickHouse不查找文件,而是按顺序读取数据。 -Possible values: +可能的值: -- Any positive integer. +- 任何正整数。 -Default value: 0. +默认值:0。 ## merge\_tree\_min\_bytes\_for\_seek {#setting-merge-tree-min-bytes-for-seek} -If the distance between two data blocks to be read in one file is less than `merge_tree_min_bytes_for_seek` bytes, then ClickHouse sequentially reads a range of file that contains both blocks, thus avoiding extra seek. +如果要在一个文件中读取的两个数据块之间的距离小于 `merge_tree_min_bytes_for_seek` 字节数,然后ClickHouse依次读取包含两个块的文件范围,从而避免了额外的寻道。 -Possible values: +可能的值: -- Any positive integer. +- 任何正整数。 -Default value: 0. +默认值:0。 ## merge\_tree\_coarse\_index\_granularity {#setting-merge-tree-coarse-index-granularity} -When searching for data, ClickHouse checks the data marks in the index file. If ClickHouse finds that required keys are in some range, it divides this range into `merge_tree_coarse_index_granularity` subranges and searches the required keys there recursively. +搜索数据时,ClickHouse会检查索引文件中的数据标记。 如果ClickHouse发现所需的键在某个范围内,它将此范围划分为 `merge_tree_coarse_index_granularity` 子范围和递归地搜索所需的键。 -Possible values: +可能的值: -- Any positive even integer. +- 任何正偶数整数。 -Default value: 8. +默认值:8。 ## merge\_tree\_max\_rows\_to\_use\_cache {#setting-merge-tree-max-rows-to-use-cache} -If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it doesn’t use the cache of uncompressed blocks. +如果克里克豪斯应该阅读更多 `merge_tree_max_rows_to_use_cache` 在一个查询中的行,它不使用未压缩块的缓存。 -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +未压缩块的缓存存储为查询提取的数据。 ClickHouse使用此缓存来加快对重复的小查询的响应。 此设置可保护缓存免受读取大量数据的查询的破坏。 该 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) 服务器设置定义未压缩块的高速缓存的大小。 -Possible values: +可能的值: -- Any positive integer. +- 任何正整数。 Default value: 128 ✕ 8192. ## merge\_tree\_max\_bytes\_to\_use\_cache {#setting-merge-tree-max-bytes-to-use-cache} -If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it doesn’t use the cache of uncompressed blocks. +如果克里克豪斯应该阅读更多 `merge_tree_max_bytes_to_use_cache` 在一个查询中的字节,它不使用未压缩块的缓存。 -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +未压缩块的缓存存储为查询提取的数据。 ClickHouse使用此缓存来加快对重复的小查询的响应。 此设置可保护缓存免受读取大量数据的查询的破坏。 该 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) 服务器设置定义未压缩块的高速缓存的大小。 -Possible value: +可能的值: -- Any positive integer. +- 任何正整数。 -Default value: 2013265920. +默认值:2013265920. ## min\_bytes\_to\_use\_direct\_io {#settings-min-bytes-to-use-direct-io} -The minimum data volume required for using direct I/O access to the storage disk. +使用直接I/O访问存储磁盘所需的最小数据量。 -ClickHouse uses this setting when reading data from tables. If the total storage volume of all the data to be read exceeds `min_bytes_to_use_direct_io` bytes, then ClickHouse reads the data from the storage disk with the `O_DIRECT` option. +ClickHouse在从表中读取数据时使用此设置。 如果要读取的所有数据的总存储量超过 `min_bytes_to_use_direct_io` 字节,然后ClickHouse读取从存储磁盘的数据 `O_DIRECT` 选项。 -Possible values: +可能的值: - 0 — Direct I/O is disabled. -- Positive integer. +- 整数。 -Default value: 0. +默认值:0。 ## log\_queries {#settings-log-queries} -Setting up query logging. +设置查询日志记录。 -Queries sent to ClickHouse with this setup are logged according to the rules in the [query\_log](../server_settings/settings.md#server_settings-query-log) server configuration parameter. +使用此设置发送到ClickHouse的查询将根据以下内容中的规则记录 [query\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器配置参数。 -Example: +示例: ``` text log_queries=1 @@ -508,11 +511,11 @@ log_queries=1 ## log\_query\_threads {#settings-log-query-threads} -Setting up query threads logging. +设置查询线程日志记录。 -Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server_settings/settings.md#server_settings-query-thread-log) server configuration parameter. +ClickHouse使用此设置运行的查询线程将根据以下命令中的规则记录 [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器配置参数。 -Example: +示例: ``` text log_query_threads=1 @@ -520,680 +523,680 @@ log_query_threads=1 ## max\_insert\_block\_size {#settings-max_insert_block_size} -The size of blocks to form for insertion into a table. -This setting only applies in cases when the server forms the blocks. -For example, for an INSERT via the HTTP interface, the server parses the data format and forms blocks of the specified size. -But when using clickhouse-client, the client parses the data itself, and the ‘max\_insert\_block\_size’ setting on the server doesn’t affect the size of the inserted blocks. -The setting also doesn’t have a purpose when using INSERT SELECT, since data is inserted using the same blocks that are formed after SELECT. +要插入到表中的块的大小。 +此设置仅适用于服务器形成块的情况。 +例如,对于通过HTTP接口进行的插入,服务器会分析数据格式并形成指定大小的块。 +但是当使用clickhouse-client时,客户端解析数据本身,并且 ‘max\_insert\_block\_size’ 服务器上的设置不会影响插入的块的大小。 +使用INSERT SELECT时,该设置也没有目的,因为数据是使用在SELECT之后形成的相同块插入的。 -Default value: 1,048,576. +默认值:1,048,576。 -The default is slightly more than `max_block_size`. The reason for this is because certain table engines (`*MergeTree`) form a data part on the disk for each inserted block, which is a fairly large entity. Similarly, `*MergeTree` tables sort data during insertion and a large enough block size allow sorting more data in RAM. +默认值略高于 `max_block_size`. 这样做的原因是因为某些表引擎 (`*MergeTree`)在磁盘上为每个插入的块形成一个数据部分,这是一个相当大的实体。 同样, `*MergeTree` 表在插入过程中对数据进行排序,并且足够大的块大小允许在RAM中对更多数据进行排序。 ## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} -Disables lagging replicas for distributed queries. See [Replication](../../operations/table_engines/replication.md). +禁用分布式查询的滞后副本。 看 [复制](../../engines/table_engines/mergetree_family/replication.md). -Sets the time in seconds. If a replica lags more than the set value, this replica is not used. +以秒为单位设置时间。 如果副本滞后超过设定值,则不使用此副本。 -Default value: 300. +默认值:300。 -Used when performing `SELECT` from a distributed table that points to replicated tables. +执行时使用 `SELECT` 从指向复制表的分布式表。 ## max\_threads {#settings-max_threads} -The maximum number of query processing threads, excluding threads for retrieving data from remote servers (see the ‘max\_distributed\_connections’ parameter). +查询处理线程的最大数量,不包括用于从远程服务器检索数据的线程(请参阅 ‘max\_distributed\_connections’ 参数)。 -This parameter applies to threads that perform the same stages of the query processing pipeline in parallel. -For example, when reading from a table, if it is possible to evaluate expressions with functions, filter with WHERE and pre-aggregate for GROUP BY in parallel using at least ‘max\_threads’ number of threads, then ‘max\_threads’ are used. +此参数适用于并行执行查询处理管道的相同阶段的线程。 +例如,当从表中读取时,如果可以使用函数来评估表达式,请使用WHERE进行过滤,并且至少使用并行方式对GROUP BY进行预聚合 ‘max\_threads’ 线程数,然后 ‘max\_threads’ 被使用。 -Default value: the number of physical CPU cores. +默认值:物理CPU内核数。 -If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores. +如果一次在服务器上运行的SELECT查询通常少于一个,请将此参数设置为略小于实际处理器内核数的值。 -For queries that are completed quickly because of a LIMIT, you can set a lower ‘max\_threads’. For example, if the necessary number of entries are located in every block and max\_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one. +对于由于限制而快速完成的查询,可以设置较低的 ‘max\_threads’. 例如,如果必要数量的条目位于每个块中,并且max\_threads=8,则会检索8个块,尽管只读取一个块就足够了。 -The smaller the `max_threads` value, the less memory is consumed. +越小 `max_threads` 值,较少的内存被消耗。 ## max\_insert\_threads {#settings-max-insert-threads} -The maximum number of threads to execute the `INSERT SELECT` query. +要执行的最大线程数 `INSERT SELECT` 查询。 -Possible values: +可能的值: -- 0 (or 1) — `INSERT SELECT` no parallel execution. -- Positive integer. Bigger than 1. +- 0 (or 1) — `INSERT SELECT` 没有并行执行。 +- 整数。 大于1。 -Default value: 0. +默认值:0。 -Parallel `INSERT SELECT` has effect only if the `SELECT` part is executed in parallel, see [max\_threads](#settings-max_threads) setting. -Higher values will lead to higher memory usage. +平行 `INSERT SELECT` 只有在 `SELECT` 部分并行执行,请参阅 [max\_threads](#settings-max_threads) 设置。 +更高的值将导致更高的内存使用率。 ## max\_compress\_block\_size {#max-compress-block-size} -The maximum size of blocks of uncompressed data before compressing for writing to a table. By default, 1,048,576 (1 MiB). If the size is reduced, the compression rate is significantly reduced, the compression and decompression speed increases slightly due to cache locality, and memory consumption is reduced. There usually isn’t any reason to change this setting. +在压缩写入表之前,未压缩数据块的最大大小。 默认情况下,1,048,576(1MiB)。 如果大小减小,则压缩率显着降低,压缩和解压缩速度由于高速缓存局部性而略微增加,并且内存消耗减少。 通常没有任何理由更改此设置。 -Don’t confuse blocks for compression (a chunk of memory consisting of bytes) with blocks for query processing (a set of rows from a table). +不要将用于压缩的块(由字节组成的内存块)与用于查询处理的块(表中的一组行)混淆。 ## min\_compress\_block\_size {#min-compress-block-size} -For [MergeTree](../table_engines/mergetree.md)" tables. In order to reduce latency when processing queries, a block is compressed when writing the next mark if its size is at least ‘min\_compress\_block\_size’. By default, 65,536. +为 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)"表。 为了减少处理查询时的延迟,在写入下一个标记时,如果块的大小至少为 ‘min\_compress\_block\_size’. 默认情况下,65,536。 -The actual size of the block, if the uncompressed data is less than ‘max\_compress\_block\_size’, is no less than this value and no less than the volume of data for one mark. +块的实际大小,如果未压缩的数据小于 ‘max\_compress\_block\_size’,是不小于该值且不小于一个标记的数据量。 -Let’s look at an example. Assume that ‘index\_granularity’ was set to 8192 during table creation. +让我们来看看一个例子。 假设 ‘index\_granularity’ 在表创建期间设置为8192。 -We are writing a UInt32-type column (4 bytes per value). When writing 8192 rows, the total will be 32 KB of data. Since min\_compress\_block\_size = 65,536, a compressed block will be formed for every two marks. +我们正在编写一个UInt32类型的列(每个值4个字节)。 当写入8192行时,总数将是32KB的数据。 由于min\_compress\_block\_size=65,536,将为每两个标记形成一个压缩块。 -We are writing a URL column with the String type (average size of 60 bytes per value). When writing 8192 rows, the average will be slightly less than 500 KB of data. Since this is more than 65,536, a compressed block will be formed for each mark. In this case, when reading data from the disk in the range of a single mark, extra data won’t be decompressed. +我们正在编写一个字符串类型的URL列(每个值的平均大小60字节)。 当写入8192行时,平均数据将略少于500KB。 由于这超过65,536,将为每个标记形成一个压缩块。 在这种情况下,当从单个标记范围内的磁盘读取数据时,额外的数据不会被解压缩。 -There usually isn’t any reason to change this setting. +通常没有任何理由更改此设置。 ## max\_query\_size {#settings-max_query_size} -The maximum part of a query that can be taken to RAM for parsing with the SQL parser. -The INSERT query also contains data for INSERT that is processed by a separate stream parser (that consumes O(1) RAM), which is not included in this restriction. +查询的最大部分,可以被带到RAM用于使用SQL解析器进行解析。 +插入查询还包含由单独的流解析器(消耗O(1)RAM)处理的插入数据,这些数据不包含在此限制中。 -Default value: 256 KiB. +默认值:256KiB。 ## interactive\_delay {#interactive-delay} -The interval in microseconds for checking whether request execution has been cancelled and sending the progress. +以微秒为单位的间隔,用于检查请求执行是否已被取消并发送进度。 -Default value: 100,000 (checks for cancelling and sends the progress ten times per second). +默认值:100,000(检查取消并每秒发送十次进度)。 -## connect\_timeout, receive\_timeout, send\_timeout {#connect-timeout-receive-timeout-send-timeout} +## connect\_timeout,receive\_timeout,send\_timeout {#connect-timeout-receive-timeout-send-timeout} -Timeouts in seconds on the socket used for communicating with the client. +用于与客户端通信的套接字上的超时以秒为单位。 -Default value: 10, 300, 300. +默认值:10,300,300。 ## cancel\_http\_readonly\_queries\_on\_client\_close {#cancel-http-readonly-queries-on-client-close} Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response. -Default value: 0 +默认值:0 ## poll\_interval {#poll-interval} -Lock in a wait loop for the specified number of seconds. +锁定在指定秒数的等待循环。 -Default value: 10. +默认值:10。 ## max\_distributed\_connections {#max-distributed-connections} -The maximum number of simultaneous connections with remote servers for distributed processing of a single query to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. +与远程服务器同时连接的最大数量,用于分布式处理对单个分布式表的单个查询。 我们建议设置不小于群集中服务器数量的值。 -Default value: 1024. +默认值:1024。 -The following parameters are only used when creating Distributed tables (and when launching a server), so there is no reason to change them at runtime. +以下参数仅在创建分布式表(以及启动服务器时)时使用,因此没有理由在运行时更改它们。 ## distributed\_connections\_pool\_size {#distributed-connections-pool-size} -The maximum number of simultaneous connections with remote servers for distributed processing of all queries to a single Distributed table. We recommend setting a value no less than the number of servers in the cluster. +与远程服务器同时连接的最大数量,用于分布式处理对单个分布式表的所有查询。 我们建议设置不小于群集中服务器数量的值。 -Default value: 1024. +默认值:1024。 ## connect\_timeout\_with\_failover\_ms {#connect-timeout-with-failover-ms} -The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition. -If unsuccessful, several attempts are made to connect to various replicas. +以毫秒为单位连接到分布式表引擎的远程服务器的超时,如果 ‘shard’ 和 ‘replica’ 部分用于群集定义。 +如果不成功,将尝试多次连接到各种副本。 -Default value: 50. +默认值:50。 ## connections\_with\_failover\_max\_tries {#connections-with-failover-max-tries} -The maximum number of connection attempts with each replica for the Distributed table engine. +分布式表引擎的每个副本的最大连接尝试次数。 -Default value: 3. +默认值:3。 -## extremes {#extremes} +## 极端 {#extremes} -Whether to count extreme values (the minimums and maximums in columns of a query result). Accepts 0 or 1. By default, 0 (disabled). -For more information, see the section “Extreme values”. +是否计算极值(查询结果列中的最小值和最大值)。 接受0或1。 默认情况下,0(禁用)。 +有关详细信息,请参阅部分 “Extreme values”. ## use\_uncompressed\_cache {#setting-use_uncompressed_cache} -Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled). -Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. +是否使用未压缩块的缓存。 接受0或1。 默认情况下,0(禁用)。 +使用未压缩缓存(仅适用于MergeTree系列中的表)可以在处理大量短查询时显着减少延迟并提高吞吐量。 为频繁发送短请求的用户启用此设置。 还要注意 [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. -For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use\_uncompressed\_cache’ setting always set to 1. +对于至少读取大量数据(一百万行或更多行)的查询,将自动禁用未压缩缓存,以节省真正小型查询的空间。 这意味着你可以保持 ‘use\_uncompressed\_cache’ 设置始终设置为1。 ## replace\_running\_query {#replace-running-query} -When using the HTTP interface, the ‘query\_id’ parameter can be passed. This is any string that serves as the query identifier. -If a query from the same user with the same ‘query\_id’ already exists at this time, the behaviour depends on the ‘replace\_running\_query’ parameter. +当使用HTTP接口时, ‘query\_id’ 参数可以传递。 这是用作查询标识符的任何字符串。 +如果来自同一用户的查询具有相同的 ‘query\_id’ 已经存在在这个时候,行为取决于 ‘replace\_running\_query’ 参数。 -`0` (default) – Throw an exception (don’t allow the query to run if a query with the same ‘query\_id’ is already running). +`0` (default) – Throw an exception (don't allow the query to run if a query with the same ‘query\_id’ 已经运行)。 `1` – Cancel the old query and start running the new one. -Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasn’t finished yet, it should be cancelled. +YandexMetrica使用此参数设置为1来实现分段条件的建议。 输入下一个字符后,如果旧的查询还没有完成,应该取消。 ## stream\_flush\_interval\_ms {#stream-flush-interval-ms} -Works for tables with streaming in the case of a timeout, or when a thread generates [max\_insert\_block\_size](#settings-max_insert_block_size) rows. +适用于在超时的情况下或线程生成流式传输的表 [max\_insert\_block\_size](#settings-max_insert_block_size) 行。 -The default value is 7500. +默认值为7500。 -The smaller the value, the more often data is flushed into the table. Setting the value too low leads to poor performance. +值越小,数据被刷新到表中的频率就越高。 将该值设置得太低会导致性能较差。 ## load\_balancing {#settings-load_balancing} -Specifies the algorithm of replicas selection that is used for distributed query processing. +指定用于分布式查询处理的副本选择算法。 -ClickHouse supports the following algorithms of choosing replicas: +ClickHouse支持以下选择副本的算法: -- [Random](#load_balancing-random) (by default) -- [Nearest hostname](#load_balancing-nearest_hostname) -- [In order](#load_balancing-in_order) -- [First or random](#load_balancing-first_or_random) +- [随机](#load_balancing-random) (默认情况下) +- [最近的主机名](#load_balancing-nearest_hostname) +- [按顺序](#load_balancing-in_order) +- [第一次或随机](#load_balancing-first_or_random) -### Random (by default) {#load_balancing-random} +### 随机(默认情况下) {#load_balancing-random} ``` sql load_balancing = random ``` -The number of errors is counted for each replica. The query is sent to the replica with the fewest errors, and if there are several of these, to anyone of them. -Disadvantages: Server proximity is not accounted for; if the replicas have different data, you will also get different data. +对每个副本计算错误数。 查询发送到错误最少的副本,如果存在其中几个错误,则发送给其中任何一个。 +缺点:不考虑服务器邻近度;如果副本具有不同的数据,则也会获得不同的数据。 -### Nearest Hostname {#load_balancing-nearest_hostname} +### 最近的主机名 {#load_balancing-nearest_hostname} ``` sql load_balancing = nearest_hostname ``` -The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server’s hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). +The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the server's hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). -For instance, example01-01-1 and example01-01-2.yandex.ru are different in one position, while example01-01-1 and example01-02-2 differ in two places. -This method might seem primitive, but it doesn’t require external data about network topology, and it doesn’t compare IP addresses, which would be complicated for our IPv6 addresses. +例如,例如01-01-1和example01-01-2.yandex.ru 在一个位置是不同的,而example01-01-1和example01-02-2在两个地方不同。 +这种方法可能看起来很原始,但它不需要有关网络拓扑的外部数据,也不比较IP地址,这对于我们的IPv6地址来说会很复杂。 -Thus, if there are equivalent replicas, the closest one by name is preferred. -We can also assume that when sending a query to the same server, in the absence of failures, a distributed query will also go to the same servers. So even if different data is placed on the replicas, the query will return mostly the same results. +因此,如果存在等效副本,则首选按名称最接近的副本。 +我们还可以假设,当向同一台服务器发送查询时,在没有失败的情况下,分布式查询也将转到同一台服务器。 因此,即使在副本上放置了不同的数据,查询也会返回大多相同的结果。 -### In Order {#load_balancing-in_order} +### 按顺序 {#load_balancing-in_order} ``` sql load_balancing = in_order ``` -Replicas with the same number of errors are accessed in the same order as they are specified in the configuration. -This method is appropriate when you know exactly which replica is preferable. +具有相同错误数的副本的访问顺序与配置中指定的顺序相同。 +当您确切知道哪个副本是可取的时,此方法是适当的。 -### First or Random {#load_balancing-first_or_random} +### 第一次或随机 {#load_balancing-first_or_random} ``` sql load_balancing = first_or_random ``` -This algorithm chooses the first replica in the set or a random replica if the first is unavailable. It’s effective in cross-replication topology setups, but useless in other configurations. +此算法选择集合中的第一个副本,如果第一个副本不可用,则选择随机副本。 它在跨复制拓扑设置中有效,但在其他配置中无用。 -The `first_or_random` algorithm solves the problem of the `in_order` algorithm. With `in_order`, if one replica goes down, the next one gets a double load while the remaining replicas handle the usual amount of traffic. When using the `first_or_random` algorithm, the load is evenly distributed among replicas that are still available. +该 `first_or_random` 算法解决的问题 `in_order` 算法。 与 `in_order`,如果一个副本出现故障,下一个副本将获得双重负载,而其余副本将处理通常的流量。 使用时 `first_or_random` 算法中,负载均匀分布在仍然可用的副本之间。 ## prefer\_localhost\_replica {#settings-prefer-localhost-replica} -Enables/disables preferable using the localhost replica when processing distributed queries. +在处理分布式查询时,最好使用localhost副本启用/禁用该副本。 -Possible values: +可能的值: - 1 — ClickHouse always sends a query to the localhost replica if it exists. -- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) setting. +- 0 — ClickHouse uses the balancing strategy specified by the [load\_balancing](#settings-load_balancing) 设置。 -Default value: 1. +默认值:1。 -!!! warning "Warning" - Disable this setting if you use [max\_parallel\_replicas](#settings-max_parallel_replicas). +!!! warning "警告" + 如果使用此设置,请禁用此设置 [max\_parallel\_replicas](#settings-max_parallel_replicas). ## totals\_mode {#totals-mode} -How to calculate TOTALS when HAVING is present, as well as when max\_rows\_to\_group\_by and group\_by\_overflow\_mode = ‘any’ are present. -See the section “WITH TOTALS modifier”. +如何计算总计时有存在,以及当max\_rows\_to\_group\_by和group\_by\_overflow\_mode= ‘any’ 都在场。 +请参阅部分 “WITH TOTALS modifier”. ## totals\_auto\_threshold {#totals-auto-threshold} -The threshold for `totals_mode = 'auto'`. -See the section “WITH TOTALS modifier”. +阈值 `totals_mode = 'auto'`. +请参阅部分 “WITH TOTALS modifier”. ## max\_parallel\_replicas {#settings-max_parallel_replicas} -The maximum number of replicas for each shard when executing a query. -For consistency (to get different parts of the same data split), this option only works when the sampling key is set. -Replica lag is not controlled. +执行查询时每个分片的最大副本数。 +为了保持一致性(以获取相同数据拆分的不同部分),此选项仅在设置了采样键时有效。 +副本滞后不受控制。 -## compile {#compile} +## 编译 {#compile} -Enable compilation of queries. By default, 0 (disabled). +启用查询的编译。 默认情况下,0(禁用)。 -The compilation is only used for part of the query-processing pipeline: for the first stage of aggregation (GROUP BY). -If this portion of the pipeline was compiled, the query may run faster due to deployment of short cycles and inlining aggregate function calls. The maximum performance improvement (up to four times faster in rare cases) is seen for queries with multiple simple aggregate functions. Typically, the performance gain is insignificant. In very rare cases, it may slow down query execution. +编译仅用于查询处理管道的一部分:用于聚合的第一阶段(GROUP BY)。 +如果编译了管道的这一部分,则由于部署周期较短和内联聚合函数调用,查询可能运行得更快。 对于具有多个简单聚合函数的查询,可以看到最大的性能改进(在极少数情况下可快四倍)。 通常,性能增益是微不足道的。 在极少数情况下,它可能会减慢查询执行速度。 ## min\_count\_to\_compile {#min-count-to-compile} -How many times to potentially use a compiled chunk of code before running compilation. By default, 3. +在运行编译之前可能使用已编译代码块的次数。 默认情况下,3。 For testing, the value can be set to 0: compilation runs synchronously and the query waits for the end of the compilation process before continuing execution. For all other cases, use values ​​starting with 1. Compilation normally takes about 5-10 seconds. -If the value is 1 or more, compilation occurs asynchronously in a separate thread. The result will be used as soon as it is ready, including queries that are currently running. +如果该值为1或更大,则编译在单独的线程中异步进行。 结果将在准备就绪后立即使用,包括当前正在运行的查询。 -Compiled code is required for each different combination of aggregate functions used in the query and the type of keys in the GROUP BY clause. -The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don’t use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. +对于查询中使用的聚合函数的每个不同组合以及GROUP BY子句中的键类型,都需要编译代码。 +The results of the compilation are saved in the build directory in the form of .so files. There is no restriction on the number of compilation results since they don't use very much space. Old results will be used after server restarts, except in the case of a server upgrade – in this case, the old results are deleted. ## output\_format\_json\_quote\_64bit\_integers {#session_settings-output_format_json_quote_64bit_integers} -If the value is true, integers appear in quotes when using JSON\* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes. +如果该值为true,则在使用JSON\*Int64和UInt64格式时,整数将显示在引号中(为了与大多数JavaScript实现兼容);否则,整数将不带引号输出。 ## format\_csv\_delimiter {#settings-format_csv_delimiter} -The character interpreted as a delimiter in the CSV data. By default, the delimiter is `,`. +将字符解释为CSV数据中的分隔符。 默认情况下,分隔符为 `,`. ## input\_format\_csv\_unquoted\_null\_literal\_as\_null {#settings-input_format_csv_unquoted_null_literal_as_null} -For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`). +对于CSV输入格式,启用或禁用未引用的解析 `NULL` 作为文字(同义词 `\N`). ## output\_format\_csv\_crlf\_end\_of\_line {#settings-output-format-csv-crlf-end-of-line} -Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF). +在CSV中使用DOS/Windows样式的行分隔符(CRLF)而不是Unix样式(LF)。 ## output\_format\_tsv\_crlf\_end\_of\_line {#settings-output-format-tsv-crlf-end-of-line} -Use DOC/Windows-style line separator (CRLF) in TSV instead of Unix style (LF). +在TSV中使用DOC/Windows样式的行分隔符(CRLF)而不是Unix样式(LF)。 ## insert\_quorum {#settings-insert_quorum} -Enables the quorum writes. +启用仲裁写入。 -- If `insert_quorum < 2`, the quorum writes are disabled. -- If `insert_quorum >= 2`, the quorum writes are enabled. +- 如果 `insert_quorum < 2`,仲裁写入被禁用。 +- 如果 `insert_quorum >= 2`,仲裁写入已启用。 -Default value: 0. +默认值:0。 -Quorum writes +仲裁写入 -`INSERT` succeeds only when ClickHouse manages to correctly write data to the `insert_quorum` of replicas during the `insert_quorum_timeout`. If for any reason the number of replicas with successful writes does not reach the `insert_quorum`, the write is considered failed and ClickHouse will delete the inserted block from all the replicas where data has already been written. +`INSERT` 只有当ClickHouse设法正确地将数据写入成功 `insert_quorum` 在复制品的 `insert_quorum_timeout`. 如果由于任何原因,成功写入的副本数量没有达到 `insert_quorum`,写入被认为失败,ClickHouse将从已经写入数据的所有副本中删除插入的块。 -All the replicas in the quorum are consistent, i.e., they contain data from all previous `INSERT` queries. The `INSERT` sequence is linearized. +仲裁中的所有副本都是一致的,即它们包含来自所有以前的数据 `INSERT` 查询。 该 `INSERT` 序列线性化。 -When reading the data written from the `insert_quorum`, you can use the [select\_sequential\_consistency](#settings-select_sequential_consistency) option. +当读取从写入的数据 `insert_quorum`,您可以使用 [select\_sequential\_consistency](#settings-select_sequential_consistency) 选项。 -ClickHouse generates an exception +ClickHouse生成异常 -- If the number of available replicas at the time of the query is less than the `insert_quorum`. -- At an attempt to write data when the previous block has not yet been inserted in the `insert_quorum` of replicas. This situation may occur if the user tries to perform an `INSERT` before the previous one with the `insert_quorum` is completed. +- 如果查询时可用副本的数量小于 `insert_quorum`. +- 在尝试写入数据时,以前的块尚未被插入 `insert_quorum` 的复制品。 如果用户尝试执行 `INSERT` 前一个与 `insert_quorum` 完成。 -See also: +另请参阅: - [insert\_quorum\_timeout](#settings-insert_quorum_timeout) - [select\_sequential\_consistency](#settings-select_sequential_consistency) ## insert\_quorum\_timeout {#settings-insert_quorum-timeout} -Write to quorum timeout in seconds. If the timeout has passed and no write has taken place yet, ClickHouse will generate an exception and the client must repeat the query to write the same block to the same or any other replica. +写入仲裁超时以秒为单位。 如果超时已经过去,并且还没有发生写入,ClickHouse将生成异常,客户端必须重复查询以将相同的块写入相同的副本或任何其他副本。 -Default value: 60 seconds. +默认值:60秒。 -See also: +另请参阅: - [insert\_quorum](#settings-insert_quorum) - [select\_sequential\_consistency](#settings-select_sequential_consistency) ## select\_sequential\_consistency {#settings-select_sequential_consistency} -Enables or disables sequential consistency for `SELECT` queries: +启用或禁用顺序一致性 `SELECT` 查询: -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 0. +默认值:0。 -Usage +用途 -When sequential consistency is enabled, ClickHouse allows the client to execute the `SELECT` query only for those replicas that contain data from all previous `INSERT` queries executed with `insert_quorum`. If the client refers to a partial replica, ClickHouse will generate an exception. The SELECT query will not include data that has not yet been written to the quorum of replicas. +当启用顺序一致性时,ClickHouse允许客户端执行 `SELECT` 仅查询那些包含来自所有先前数据的副本 `INSERT` 查询执行 `insert_quorum`. 如果客户端引用了部分副本,ClickHouse将生成异常。 SELECT查询将不包括尚未写入副本仲裁的数据。 -See also: +另请参阅: - [insert\_quorum](#settings-insert_quorum) - [insert\_quorum\_timeout](#settings-insert_quorum_timeout) ## insert\_deduplicate {#settings-insert-deduplicate} -Enables or disables block deduplication of `INSERT` (for Replicated\* tables). +启用或禁用块重复数据删除 `INSERT` (对于复制的\*表)。 -Possible values: +可能的值: - 0 — Disabled. - 1 — Enabled. -Default value: 1. +默认值:1。 -By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see \[Data Replication\] (../ table\_engines/replication.md). +默认情况下,块插入到复制的表 `INSERT` 重复数据删除语句(请参阅\[数据复制\](../engines/table\_engines/mergetree\_family/replication.md)。 ## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} -Enables or disables the deduplication check for materialized views that receive data from Replicated\* tables. +启用或禁用从已复制\*表接收数据的实例化视图的重复数据删除检查。 -Possible values: +可能的值: 0 — Disabled. 1 — Enabled. -Default value: 0. +默认值:0。 -Usage +用途 -By default, deduplication is not performed for materialized views but is done upstream, in the source table. -If an INSERTed block is skipped due to deduplication in the source table, there will be no insertion into attached materialized views. This behaviour exists to enable insertion of highly aggregated data into materialized views, for cases where inserted blocks are the same after materialized view aggregation but derived from different INSERTs into the source table. -At the same time, this behaviour “breaks” `INSERT` idempotency. If an `INSERT` into the main table was successful and `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won’t receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` allows for changing this behaviour. On retry, a materialized view will receive the repeat insert and will perform deduplication check by itself, -ignoring check result for the source table, and will insert rows lost because of the first failure. +默认情况下,重复数据删除不对实例化视图执行,而是在源表的上游执行。 +如果由于源表中的重复数据删除而跳过了插入的块,则不会插入附加的实例化视图。 这种行为的存在是为了允许将高度聚合的数据插入到实例化视图中,对于在实例化视图聚合之后插入的块相同,但是从源表中的不同插入派生的情况。 +与此同时,这种行为 “breaks” `INSERT` 幂等性 如果一个 `INSERT` 进入主表是成功的, `INSERT` into a materialized view failed (e.g. because of communication failure with Zookeeper) a client will get an error and can retry the operation. However, the materialized view won't receive the second insert because it will be discarded by deduplication in the main (source) table. The setting `deduplicate_blocks_in_dependent_materialized_views` 允许改变这种行为。 重试时,实例化视图将收到重复插入,并自行执行重复数据删除检查, +忽略源表的检查结果,并将插入由于第一次失败而丢失的行。 ## max\_network\_bytes {#settings-max-network-bytes} -Limits the data volume (in bytes) that is received or transmitted over the network when executing a query. This setting applies to every individual query. +限制在执行查询时通过网络接收或传输的数据量(以字节为单位)。 此设置适用于每个单独的查询。 -Possible values: +可能的值: -- Positive integer. +- 整数。 - 0 — Data volume control is disabled. -Default value: 0. +默认值:0。 ## max\_network\_bandwidth {#settings-max-network-bandwidth} -Limits the speed of the data exchange over the network in bytes per second. This setting applies to every query. +限制通过网络进行数据交换的速度,以每秒字节为单位。 此设置适用于每个查询。 -Possible values: +可能的值: -- Positive integer. +- 整数。 - 0 — Bandwidth control is disabled. -Default value: 0. +默认值:0。 ## max\_network\_bandwidth\_for\_user {#settings-max-network-bandwidth-for-user} -Limits the speed of the data exchange over the network in bytes per second. This setting applies to all concurrently running queries performed by a single user. +限制通过网络进行数据交换的速度,以每秒字节为单位。 此设置适用于单个用户执行的所有并发运行的查询。 -Possible values: +可能的值: -- Positive integer. +- 整数。 - 0 — Control of the data speed is disabled. -Default value: 0. +默认值:0。 ## max\_network\_bandwidth\_for\_all\_users {#settings-max-network-bandwidth-for-all-users} -Limits the speed that data is exchanged at over the network in bytes per second. This setting applies to all concurrently running queries on the server. +限制通过网络交换数据的速度,以每秒字节为单位。 此设置适用于服务器上同时运行的所有查询。 -Possible values: +可能的值: -- Positive integer. +- 整数。 - 0 — Control of the data speed is disabled. -Default value: 0. +默认值:0。 ## count\_distinct\_implementation {#settings-count_distinct_implementation} -Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count) construction. +指定其中的 `uniq*` 函数应用于执行 [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count) 建筑。 -Possible values: +可能的值: -- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) -- [uniqCombined](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) -- [uniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) -Default value: `uniqExact`. +默认值: `uniqExact`. ## skip\_unavailable\_shards {#settings-skip_unavailable_shards} -Enables or disables silently skipping of unavailable shards. +启用或禁用静默跳过不可用分片。 -Shard is considered unavailable if all its replicas are unavailable. A replica is unavailable in the following cases: +如果分片的所有副本都不可用,则视为不可用。 副本在以下情况下不可用: -- ClickHouse can’t connect to replica for any reason. +- ClickHouse出于任何原因无法连接到副本。 - When connecting to a replica, ClickHouse performs several attempts. If all these attempts fail, the replica is considered unavailable. + 连接到副本时,ClickHouse会执行多次尝试。 如果所有这些尝试都失败,则认为副本不可用。 -- Replica can’t be resolved through DNS. +- 副本无法通过DNS解析。 - If replica’s hostname can’t be resolved through DNS, it can indicate the following situations: + 如果无法通过DNS解析副本的主机名,则可能指示以下情况: - - Replica’s host has no DNS record. It can occur in systems with dynamic DNS, for example, [Kubernetes](https://kubernetes.io), where nodes can be unresolvable during downtime, and this is not an error. + - 副本的主机没有DNS记录。 它可以发生在具有动态DNS的系统中,例如, [Kubernetes](https://kubernetes.io),其中节点在停机期间可能无法解决问题,这不是错误。 - - Configuration error. ClickHouse configuration file contains a wrong hostname. + - 配置错误。 ClickHouse配置文件包含错误的主机名。 -Possible values: +可能的值: - 1 — skipping enabled. - If a shard is unavailable, ClickHouse returns a result based on partial data and doesn’t report node availability issues. + 如果分片不可用,ClickHouse将基于部分数据返回结果,并且不报告节点可用性问题。 - 0 — skipping disabled. - If a shard is unavailable, ClickHouse throws an exception. + 如果分片不可用,ClickHouse将引发异常。 -Default value: 0. +默认值:0。 ## optimize\_skip\_unused\_shards {#settings-optimize_skip_unused_shards} -Enables or disables skipping of unused shards for SELECT queries that have sharding key condition in PREWHERE/WHERE (assumes that the data is distributed by sharding key, otherwise do nothing). +对于在PREWHERE/WHERE中具有分片键条件的SELECT查询,启用或禁用跳过未使用的分片(假定数据是通过分片键分发的,否则不执行任何操作)。 -Default value: 0 +默认值:0 ## force\_optimize\_skip\_unused\_shards {#settings-force_optimize_skip_unused_shards} -Enables or disables query execution if [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) enabled and skipping of unused shards is not possible. If the skipping is not possible and the setting is enabled exception will be thrown. +在以下情况下启用或禁用查询执行 [`optimize_skip_unused_shards`](#settings-optimize_skip_unused_shards) 无法启用和跳过未使用的分片。 如果跳过是不可能的,并且设置为启用异常将被抛出。 -Possible values: +可能的值: -- 0 - Disabled (do not throws) -- 1 - Disable query execution only if the table has sharding key -- 2 - Disable query execution regardless sharding key is defined for the table +- 0-禁用(不抛出) +- 1-仅当表具有分片键时禁用查询执行 +- 2-无论为表定义了分片键,都禁用查询执行 -Default value: 0 +默认值:0 ## force\_optimize\_skip\_unused\_shards\_no\_nested {#settings-force_optimize_skip_unused_shards_no_nested} -Reset [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) for nested `Distributed` table +重置 [`optimize_skip_unused_shards`](#settings-force_optimize_skip_unused_shards) 对于嵌套 `Distributed` 表 -Possible values: +可能的值: - 1 — Enabled. - 0 — Disabled. -Default value: 0. +默认值:0。 ## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} -Enables or disables throwing an exception if an [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query didn’t perform a merge. +启用或禁用抛出异常,如果 [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) 查询未执行合并。 -By default, `OPTIMIZE` returns successfully even if it didn’t do anything. This setting lets you differentiate these situations and get the reason in an exception message. +默认情况下, `OPTIMIZE` 即使它没有做任何事情,也会成功返回。 此设置允许您区分这些情况并在异常消息中获取原因。 -Possible values: +可能的值: - 1 — Throwing an exception is enabled. - 0 — Throwing an exception is disabled. -Default value: 0. +默认值:0。 ## distributed\_replica\_error\_half\_life {#settings-distributed_replica_error_half_life} -- Type: seconds -- Default value: 60 seconds +- 类型:秒 +- 默认值:60秒 -Controls how fast errors in distributed tables are zeroed. If a replica is unavailable for some time, accumulates 5 errors, and distributed\_replica\_error\_half\_life is set to 1 second, then the replica is considered normal 3 seconds after last error. +控制清零分布式表中的错误的速度。 如果某个副本在一段时间内不可用,累计出现5个错误,并且distributed\_replica\_error\_half\_life设置为1秒,则该副本在上一个错误发生3秒后视为正常。 -See also: +另请参阅: -- [Table engine Distributed](../../operations/table_engines/distributed.md) +- [表引擎分布式](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap) ## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap} -- Type: unsigned int -- Default value: 1000 +- 类型:无符号int +- 默认值:1000 -Error count of each replica is capped at this value, preventing a single replica from accumulating too many errors. +每个副本的错误计数上限为此值,从而防止单个副本累积太多错误。 -See also: +另请参阅: -- [Table engine Distributed](../../operations/table_engines/distributed.md) +- [表引擎分布式](../../engines/table_engines/special/distributed.md) - [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life) ## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} -Base interval for the [Distributed](../table_engines/distributed.md) table engine to send data. The actual interval grows exponentially in the event of errors. +对于基本间隔 [分布](../../engines/table_engines/special/distributed.md) 表引擎发送数据。 在发生错误时,实际间隔呈指数级增长。 -Possible values: +可能的值: -- A positive integer number of milliseconds. +- 毫秒的正整数。 -Default value: 100 milliseconds. +默认值:100毫秒。 ## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} -Maximum interval for the [Distributed](../table_engines/distributed.md) table engine to send data. Limits exponential growth of the interval set in the [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) setting. +的最大间隔 [分布](../../engines/table_engines/special/distributed.md) 表引擎发送数据。 限制在设置的区间的指数增长 [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms) 设置。 -Possible values: +可能的值: -- A positive integer number of milliseconds. +- 毫秒的正整数。 -Default value: 30000 milliseconds (30 seconds). +默认值:30000毫秒(30秒)。 ## distributed\_directory\_monitor\_batch\_inserts {#distributed_directory_monitor_batch_inserts} -Enables/disables sending of inserted data in batches. +启用/禁用批量发送插入的数据。 -When batch sending is enabled, the [Distributed](../table_engines/distributed.md) table engine tries to send multiple files of inserted data in one operation instead of sending them separately. Batch sending improves cluster performance by better-utilizing server and network resources. +当批量发送被启用时, [分布](../../engines/table_engines/special/distributed.md) 表引擎尝试在一个操作中发送插入数据的多个文件,而不是单独发送它们。 批量发送通过更好地利用服务器和网络资源来提高集群性能。 -Possible values: +可能的值: - 1 — Enabled. - 0 — Disabled. -Default value: 0. +默认值:0。 ## os\_thread\_priority {#setting-os-thread-priority} -Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. The OS scheduler considers this priority when choosing the next thread to run on each available CPU core. +设置优先级 ([不错](https://en.wikipedia.org/wiki/Nice_(Unix)))对于执行查询的线程。 当选择要在每个可用CPU内核上运行的下一个线程时,操作系统调度程序会考虑此优先级。 -!!! warning "Warning" - To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments don’t allow you to set the `CAP_SYS_NICE` capability. In this case, `clickhouse-server` shows a message about it at the start. +!!! warning "警告" + 要使用此设置,您需要设置 `CAP_SYS_NICE` 能力。 该 `clickhouse-server` 软件包在安装过程中设置它。 某些虚拟环境不允许您设置 `CAP_SYS_NICE` 能力。 在这种情况下, `clickhouse-server` 在开始时显示关于它的消息。 -Possible values: +可能的值: -- You can set values in the range `[-20, 19]`. +- 您可以在范围内设置值 `[-20, 19]`. -Lower values mean higher priority. Threads with low `nice` priority values are executed more frequently than threads with high values. High values are preferable for long-running non-interactive queries because it allows them to quickly give up resources in favour of short interactive queries when they arrive. +值越低意味着优先级越高。 低螺纹 `nice` 与具有高值的线程相比,优先级值的执行频率更高。 高值对于长时间运行的非交互式查询更为可取,因为这使得它们可以在到达时快速放弃资源,转而使用短交互式查询。 -Default value: 0. +默认值:0。 ## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} -Sets the period for a real clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). Real clock timer counts wall-clock time. +设置周期的实时时钟定时器 [查询探查器](../../operations/optimizing_performance/sampling_query_profiler.md). 真正的时钟计时器计数挂钟时间。 -Possible values: +可能的值: -- Positive integer number, in nanoseconds. +- 正整数,以纳秒为单位。 - Recommended values: + 推荐值: - 10000000 (100 times a second) nanoseconds and less for single queries. - 1000000000 (once a second) for cluster-wide profiling. -- 0 for turning off the timer. +- 0用于关闭计时器。 -Type: [UInt64](../../data_types/int_uint.md). +类型: [UInt64](../../sql_reference/data_types/int_uint.md). -Default value: 1000000000 nanoseconds (once a second). +默认值:1000000000纳秒(每秒一次)。 -See also: +另请参阅: -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} -Sets the period for a CPU clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). This timer counts only CPU time. +设置周期的CPU时钟定时器 [查询探查器](../../operations/optimizing_performance/sampling_query_profiler.md). 此计时器仅计算CPU时间。 -Possible values: +可能的值: -- A positive integer number of nanoseconds. +- 纳秒的正整数。 - Recommended values: + 推荐值: - 10000000 (100 times a second) nanoseconds and more for single queries. - 1000000000 (once a second) for cluster-wide profiling. -- 0 for turning off the timer. +- 0用于关闭计时器。 -Type: [UInt64](../../data_types/int_uint.md). +类型: [UInt64](../../sql_reference/data_types/int_uint.md). -Default value: 1000000000 nanoseconds. +默认值:1000000000纳秒。 -See also: +另请参阅: -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## allow\_introspection\_functions {#settings-allow_introspection_functions} -Enables of disables [introspections functions](../../query_language/functions/introspection.md) for query profiling. +启用禁用 [反省函数](../../sql_reference/functions/introspection.md) 用于查询分析。 -Possible values: +可能的值: - 1 — Introspection functions enabled. - 0 — Introspection functions disabled. -Default value: 0. +默认值:0。 -**See Also** +**另请参阅** -- [Sampling Query Profiler](../performance/sampling_query_profiler.md) -- System table [trace\_log](../system_tables.md#system_tables-trace_log) +- [采样查询探查器](../optimizing_performance/sampling_query_profiler.md) +- 系统表 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) ## input\_format\_parallel\_parsing {#input-format-parallel-parsing} -- Type: bool -- Default value: True +- 类型:布尔 +- 默认值:True -Enable order-preserving parallel parsing of data formats. Supported only for TSV, TKSV, CSV and JSONEachRow formats. +启用数据格式的保序并行分析。 仅支持TSV,TKSV,CSV和JSONEachRow格式。 ## min\_chunk\_bytes\_for\_parallel\_parsing {#min-chunk-bytes-for-parallel-parsing} -- Type: unsigned int -- Default value: 1 MiB +- 类型:无符号int +- 默认值:1MiB -The minimum chunk size in bytes, which each thread will parse in parallel. +以字节为单位的最小块大小,每个线程将并行解析。 ## output\_format\_avro\_codec {#settings-output_format_avro_codec} -Sets the compression codec used for output Avro file. +设置用于输出Avro文件的压缩编解ec。 -Type: string +类型:字符串 -Possible values: +可能的值: - `null` — No compression - `deflate` — Compress with Deflate (zlib) -- `snappy` — Compress with [Snappy](https://google.github.io/snappy/) +- `snappy` — Compress with [活泼的](https://google.github.io/snappy/) -Default value: `snappy` (if available) or `deflate`. +默认值: `snappy` (如果可用)或 `deflate`. ## output\_format\_avro\_sync\_interval {#settings-output_format_avro_sync_interval} -Sets minimum data size (in bytes) between synchronization markers for output Avro file. +设置输出Avro文件的同步标记之间的最小数据大小(以字节为单位)。 -Type: unsigned int +类型:无符号int -Possible values: 32 (32 bytes) - 1073741824 (1 GiB) +可能的值:32(32字节)-1073741824(1GiB) -Default value: 32768 (32 KiB) +默认值:32768(32KiB) ## format\_avro\_schema\_registry\_url {#settings-format_avro_schema_registry_url} -Sets Confluent Schema Registry URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format +设置要与之一起使用的汇合架构注册表URL [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) 格式 -Type: URL +类型:网址 -Default value: Empty +默认值:空 -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/zh/operations/settings/settings_profiles.md b/docs/zh/operations/settings/settings_profiles.md index 3c694c0889e..21379a65ebc 100644 --- a/docs/zh/operations/settings/settings_profiles.md +++ b/docs/zh/operations/settings/settings_profiles.md @@ -1,23 +1,26 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\u8BBE\u7F6E\u914D\u7F6E\u6587\u4EF6" --- -# Settings Profiles {#settings-profiles} +# 设置配置文件 {#settings-profiles} -A settings profile is a collection of settings grouped under the same name. Each ClickHouse user has a profile. -To apply all the settings in a profile, set the `profile` setting. +设置配置文件是以相同名称分组的设置的集合。 每个ClickHouse用户都有一个配置文件。 +要应用配置文件中的所有设置,请设置 `profile` 设置。 -Example: +示例: -Install the `web` profile. +安装 `web` 侧写 ``` sql SET profile = 'web' ``` -Settings profiles are declared in the user config file. This is usually `users.xml`. +设置配置文件在用户配置文件中声明。 这通常是 `users.xml`. -Example: +示例: ``` xml @@ -61,8 +64,8 @@ Example: ``` -The example specifies two profiles: `default` and `web`. The `default` profile has a special purpose: it must always be present and is applied when starting the server. In other words, the `default` profile contains default settings. The `web` profile is a regular profile that can be set using the `SET` query or using a URL parameter in an HTTP query. +该示例指定了两个配置文件: `default` 和 `web`. 该 `default` 配置文件有一个特殊用途:它必须始终存在并在启动服务器时应用。 换句话说, `default` 配置文件包含默认设置。 该 `web` 配置文件是一个常规的配置文件,可以使用设置 `SET` 查询或在HTTP查询中使用URL参数。 -Settings profiles can inherit from each other. To use inheritance, indicate one or multiple `profile` settings before the other settings that are listed in the profile. In case when one setting is defined in different profiles, the latest defined is used. +设置配置文件可以彼此继承。 要使用继承,请指示一个或多个 `profile` 配置文件中列出的其他设置之前的设置。 如果在不同的配置文件中定义了一个设置,则使用最新定义。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings_profiles/) diff --git a/docs/zh/operations/settings/settings_users.md b/docs/zh/operations/settings/settings_users.md index 8b852380f5b..2dba689d08f 100644 --- a/docs/zh/operations/settings/settings_users.md +++ b/docs/zh/operations/settings/settings_users.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 63 +toc_title: "\u7528\u6237\u8BBE\u7F6E" --- -# User Settings {#user-settings} +# 用户设置 {#user-settings} -The `users` section of the `user.xml` configuration file contains user settings. +该 `users` 一节 `user.xml` 配置文件包含用户设置。 -Structure of the `users` section: +的结构 `users` 科: ``` xml @@ -35,74 +38,74 @@ Structure of the `users` section: ``` -### user\_name/password {#user-namepassword} +### 用户名称/密码 {#user-namepassword} -Password can be specified in plaintext or in SHA256 (hex format). +密码可以以明文或SHA256(十六进制格式)指定。 -- To assign a password in plaintext (**not recommended**), place it in a `password` element. +- 以明文形式分配密码 (**不推荐**),把它放在一个 `password` 元素。 - For example, `qwerty`. The password can be left blank. + 例如, `qwerty`. 密码可以留空。 -- To assign a password using its SHA256 hash, place it in a `password_sha256_hex` element. +- 要使用其SHA256散列分配密码,请将其放置在 `password_sha256_hex` 元素。 - For example, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. + 例如, `65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5`. - Example of how to generate a password from shell: + 如何从shell生成密码的示例: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-' - The first line of the result is the password. The second line is the corresponding SHA256 hash. + 结果的第一行是密码。 第二行是相应的SHA256哈希。 -- For compatibility with MySQL clients, password can be specified in double SHA1 hash. Place it in `password_double_sha1_hex` element. +- 为了与MySQL客户端兼容,密码可以在双SHA1哈希中指定。 放进去 `password_double_sha1_hex` 元素。 - For example, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. + 例如, `08b4a0f1de6ad37da17359e592c8d74788a83eb0`. - Example of how to generate a password from shell: + 如何从shell生成密码的示例: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-' - The first line of the result is the password. The second line is the corresponding double SHA1 hash. + 结果的第一行是密码。 第二行是相应的双SHA1哈希。 -### user\_name/networks {#user-namenetworks} +### 用户名称/网络 {#user-namenetworks} -List of networks from which the user can connect to the ClickHouse server. +用户可以从中连接到ClickHouse服务器的网络列表。 -Each element of the list can have one of the following forms: +列表中的每个元素都可以具有以下形式之一: - `` — IP address or network mask. - Examples: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. + 例: `213.180.204.3`, `10.0.0.1/8`, `10.0.0.1/255.255.255.0`, `2a02:6b8::3`, `2a02:6b8::3/64`, `2a02:6b8::3/ffff:ffff:ffff:ffff::`. - `` — Hostname. - Example: `example01.host.ru`. + 示例: `example01.host.ru`. - To check access, a DNS query is performed, and all returned IP addresses are compared to the peer address. + 要检查访问,将执行DNS查询,并将所有返回的IP地址与对等地址进行比较。 - `` — Regular expression for hostnames. - Example, `^example\d\d-\d\d-\d\.host\.ru$` + 示例, `^example\d\d-\d\d-\d\.host\.ru$` - To check access, a [DNS PTR query](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) is performed for the peer address and then the specified regexp is applied. Then, another DNS query is performed for the results of the PTR query and all the received addresses are compared to the peer address. We strongly recommend that regexp ends with $. + 要检查访问,a [DNS PTR查询](https://en.wikipedia.org/wiki/Reverse_DNS_lookup) 对对等体地址执行,然后应用指定的正则表达式。 然后,对PTR查询的结果执行另一个DNS查询,并将所有接收到的地址与对等地址进行比较。 我们强烈建议正则表达式以$结尾。 -All results of DNS requests are cached until the server restarts. +DNS请求的所有结果都将被缓存,直到服务器重新启动。 -**Examples** +**例** -To open access for user from any network, specify: +要从任何网络打开用户的访问权限,请指定: ``` xml ::/0 ``` -!!! warning "Warning" - It’s insecure to open access from any network unless you have a firewall properly configured or the server is not directly connected to Internet. +!!! warning "警告" + 从任何网络开放访问是不安全的,除非你有一个防火墙正确配置或服务器没有直接连接到互联网。 -To open access only from localhost, specify: +若要仅从本地主机打开访问权限,请指定: ``` xml ::1 @@ -111,22 +114,22 @@ To open access only from localhost, specify: ### user\_name/profile {#user-nameprofile} -You can assign a settings profile for the user. Settings profiles are configured in a separate section of the `users.xml` file. For more information, see [Profiles of Settings](settings_profiles.md). +您可以为用户分配设置配置文件。 设置配置文件在单独的部分配置 `users.xml` 文件 有关详细信息,请参阅 [设置配置文件](settings_profiles.md). -### user\_name/quota {#user-namequota} +### 用户名称/配额 {#user-namequota} -Quotas allow you to track or limit resource usage over a period of time. Quotas are configured in the `quotas` -section of the `users.xml` configuration file. +配额允许您在一段时间内跟踪或限制资源使用情况。 配额在配置 `quotas` +一节 `users.xml` 配置文件。 -You can assign a quotas set for the user. For a detailed description of quotas configuration, see [Quotas](../quotas.md#quotas). +您可以为用户分配配额。 有关配额配置的详细说明,请参阅 [配额](../quotas.md#quotas). -### user\_name/databases {#user-namedatabases} +### 用户名/数据库 {#user-namedatabases} -In this section, you can you can limit rows that are returned by ClickHouse for `SELECT` queries made by the current user, thus implementing basic row-level security. +在本节中,您可以限制ClickHouse返回的行 `SELECT` 由当前用户进行的查询,从而实现基本的行级安全性。 -**Example** +**示例** -The following configuration forces that user `user1` can only see the rows of `table1` as the result of `SELECT` queries, where the value of the `id` field is 1000. +以下配置强制该用户 `user1` 只能看到的行 `table1` 作为结果 `SELECT` 查询,其中的值 `id` 场是1000。 ``` xml @@ -140,6 +143,6 @@ The following configuration forces that user `user1` can only see the rows of `t ``` -The `filter` can be any expression resulting in a [UInt8](../../data_types/int_uint.md)-type value. It usually contains comparisons and logical operators. Rows from `database_name.table1` where filter results to 0 are not returned for this user. The filtering is incompatible with `PREWHERE` operations and disables `WHERE→PREWHERE` optimization. +该 `filter` 可以是导致任何表达式 [UInt8](../../sql_reference/data_types/int_uint.md)-键入值。 它通常包含比较和逻辑运算符。 从行 `database_name.table1` 其中,不会为此用户返回为0的筛选结果。 过滤是不兼容的 `PREWHERE` 操作和禁用 `WHERE→PREWHERE` 优化。 -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings_users/) +[原始文章](https://clickhouse.tech/docs/en/operations/settings/settings_users/) diff --git a/docs/zh/operations/system_tables.md b/docs/zh/operations/system_tables.md index af47b99222a..ba762ddb562 100644 --- a/docs/zh/operations/system_tables.md +++ b/docs/zh/operations/system_tables.md @@ -1,25 +1,28 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 52 +toc_title: "\u7CFB\u7EDF\u8868" --- -# System tables {#system-tables} +# 系统表 {#system-tables} -System tables are used for implementing part of the system’s functionality, and for providing access to information about how the system is working. -You can’t delete a system table (but you can perform DETACH). -System tables don’t have files with data on the disk or files with metadata. The server creates all the system tables when it starts. -System tables are read-only. -They are located in the ‘system’ database. +系统表用于实现系统的部分功能,并提供对有关系统如何工作的信息的访问。 +您无法删除系统表(但可以执行分离)。 +系统表没有包含磁盘上数据的文件或包含元数据的文件。 服务器在启动时创建所有系统表。 +系统表是只读的。 +它们位于 ‘system’ 数据库。 -## system.asynchronous\_metrics {#system_tables-asynchronous_metrics} +## 系统。asynchronous\_metrics {#system_tables-asynchronous_metrics} -Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use. +包含在后台定期计算的指标。 例如,在使用的RAM量。 -Columns: +列: -- `metric` ([String](../data_types/string.md)) — Metric name. -- `value` ([Float64](../data_types/float.md)) — Metric value. +- `metric` ([字符串](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Float64](../sql_reference/data_types/float.md)) — Metric value. -**Example** +**示例** ``` sql SELECT * FROM system.asynchronous_metrics LIMIT 10 @@ -40,18 +43,18 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 └─────────────────────────────────────────┴────────────┘ ``` -**See Also** +**另请参阅** -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that have occurred. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that have occurred. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -## system.clusters {#system-clusters} +## 系统。集群 {#system-clusters} -Contains information about clusters available in the config file and the servers in them. +包含有关配置文件中可用的集群及其中的服务器的信息。 -Columns: +列: - `cluster` (String) — The cluster name. - `shard_num` (UInt32) — The shard number in the cluster, starting from 1. @@ -61,30 +64,30 @@ Columns: - `host_address` (String) — The host IP address obtained from DNS. - `port` (UInt16) — The port to use for connecting to the server. - `user` (String) — The name of the user for connecting to the server. -- `errors_count` (UInt32) - number of times this host failed to reach replica. -- `estimated_recovery_time` (UInt32) - seconds left until replica error count is zeroed and it is considered to be back to normal. +- `errors_count` (UInt32)-此主机无法到达副本的次数。 +- `estimated_recovery_time` (UInt32)-剩下的秒数,直到副本错误计数归零,它被认为是恢复正常。 -Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors. +请注意 `errors_count` 每个查询集群更新一次,但 `estimated_recovery_time` 按需重新计算。 所以有可能是非零的情况 `errors_count` 和零 `estimated_recovery_time`,下一个查询将为零 `errors_count` 并尝试使用副本,就好像它没有错误。 -**See also** +**另请参阅** -- [Table engine Distributed](table_engines/distributed.md) -- [distributed\_replica\_error\_cap setting](settings/settings.md#settings-distributed_replica_error_cap) -- [distributed\_replica\_error\_half\_life setting](settings/settings.md#settings-distributed_replica_error_half_life) +- [表引擎分布式](../engines/table_engines/special/distributed.md) +- [distributed\_replica\_error\_cap设置](settings/settings.md#settings-distributed_replica_error_cap) +- [distributed\_replica\_error\_half\_life设置](settings/settings.md#settings-distributed_replica_error_half_life) -## system.columns {#system-columns} +## 系统。列 {#system-columns} -Contains information about columns in all the tables. +包含有关所有表中列的信息。 -You can use this table to get information similar to the [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table) query, but for multiple tables at once. +您可以使用此表获取类似于以下内容的信息 [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table) 查询,但对于多个表一次。 -The `system.columns` table contains the following columns (the column type is shown in brackets): +该 `system.columns` 表包含以下列(列类型显示在括号中): - `database` (String) — Database name. - `table` (String) — Table name. - `name` (String) — Column name. - `type` (String) — Column type. -- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined. +- `default_kind` (String) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`)为默认值,如果没有定义,则为空字符串。 - `default_expression` (String) — Expression for the default value, or an empty string if it is not defined. - `data_compressed_bytes` (UInt64) — The size of compressed data, in bytes. - `data_uncompressed_bytes` (UInt64) — The size of decompressed data, in bytes. @@ -95,15 +98,15 @@ The `system.columns` table contains the following columns (the column type is sh - `is_in_primary_key` (UInt8) — Flag that indicates whether the column is in the primary key expression. - `is_in_sampling_key` (UInt8) — Flag that indicates whether the column is in the sampling key expression. -## system.contributors {#system-contributors} +## 系统。贡献者 {#system-contributors} -Contains information about contributors. All constributors in random order. The order is random at query execution time. +包含有关贡献者的信息。 按随机顺序排列所有构造。 该顺序在查询执行时是随机的。 -Columns: +列: - `name` (String) — Contributor (author) name from git log. -**Example** +**示例** ``` sql SELECT * FROM system.contributors LIMIT 10 @@ -124,7 +127,7 @@ SELECT * FROM system.contributors LIMIT 10 └──────────────────┘ ``` -To find out yourself in the table, use a query: +要在表中找出自己,请使用查询: ``` sql SELECT * FROM system.contributors WHERE name='Olga Khvostikova' @@ -136,21 +139,21 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' └──────────────────┘ ``` -## system.databases {#system-databases} +## 系统。数据库 {#system-databases} -This table contains a single String column called ‘name’ – the name of a database. -Each database that the server knows about has a corresponding entry in the table. -This system table is used for implementing the `SHOW DATABASES` query. +此表包含一个名为"字符串"的列 ‘name’ – the name of a database. +服务器知道的每个数据库在表中都有相应的条目。 +该系统表用于实现 `SHOW DATABASES` 查询。 -## system.detached\_parts {#system_tables-detached_parts} +## 系统。detached\_parts {#system_tables-detached_parts} -Contains information about detached parts of [MergeTree](table_engines/mergetree.md) tables. The `reason` column specifies why the part was detached. For user-detached parts, the reason is empty. Such parts can be attached with [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition) command. For the description of other columns, see [system.parts](#system_tables-parts). If part name is invalid, values of some columns may be `NULL`. Such parts can be deleted with [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). +包含有关分离部分的信息 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 桌子 该 `reason` 列指定分离部件的原因。 对于用户分离的部件,原因是空的。 这些部件可以附加 [ALTER TABLE ATTACH PARTITION\|PART](../sql_reference/statements/alter.md#alter_attach-partition) 指挥部 有关其他列的说明,请参阅 [系统。零件](#system_tables-parts). 如果部件名称无效,某些列的值可能为 `NULL`. 这些部分可以删除 [ALTER TABLE DROP DETACHED PART](../sql_reference/statements/alter.md#alter_drop-detached). -## system.dictionaries {#system-dictionaries} +## 系统。字典 {#system-dictionaries} -Contains information about external dictionaries. +包含有关外部字典的信息。 -Columns: +列: - `name` (String) — Dictionary name. - `type` (String) — Dictionary type: Flat, Hashed, Cache. @@ -163,22 +166,22 @@ Columns: - `element_count` (UInt64) — The number of items stored in the dictionary. - `load_factor` (Float64) — The percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). - `creation_time` (DateTime) — The time when the dictionary was created or last successfully reloaded. -- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created. +- `last_exception` (String) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. - `source` (String) — Text describing the data source for the dictionary. -Note that the amount of memory used by the dictionary is not proportional to the number of items stored in it. So for flat and cached dictionaries, all the memory cells are pre-assigned, regardless of how full the dictionary actually is. +请注意,字典使用的内存量与其中存储的项目数量不成正比。 因此,对于平面和缓存字典,所有的内存单元都是预先分配的,而不管字典实际上有多满。 -## system.events {#system_tables-events} +## 系统。活动 {#system_tables-events} -Contains information about the number of events that have occurred in the system. For example, in the table, you can find how many `SELECT` queries were processed since the ClickHouse server started. +包含有关系统中发生的事件数的信息。 例如,在表中,您可以找到多少 `SELECT` 自ClickHouse服务器启动以来已处理查询。 -Columns: +列: -- `event` ([String](../data_types/string.md)) — Event name. -- `value` ([UInt64](../data_types/int_uint.md)) — Number of events occurred. -- `description` ([String](../data_types/string.md)) — Event description. +- `event` ([字符串](../sql_reference/data_types/string.md)) — Event name. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — Number of events occurred. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Event description. -**Example** +**示例** ``` sql SELECT * FROM system.events LIMIT 5 @@ -194,43 +197,43 @@ SELECT * FROM system.events LIMIT 5 └───────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**另请参阅** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.functions {#system-functions} +## 系统。功能 {#system-functions} -Contains information about normal and aggregate functions. +包含有关正常函数和聚合函数的信息。 -Columns: +列: - `name`(`String`) – The name of the function. - `is_aggregate`(`UInt8`) — Whether the function is aggregate. -## system.graphite\_retentions {#system-graphite-retentions} +## 系统。graphite\_retentions {#system-graphite-retentions} -Contains information about parameters [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) which are used in tables with [\*GraphiteMergeTree](table_engines/graphitemergetree.md) engines. +包含有关参数的信息 [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) 这是在表中使用 [\*GraphiteMergeTree](../engines/table_engines/mergetree_family/graphitemergetree.md) 引擎 -Columns: +列: -- `config_name` (String) - `graphite_rollup` parameter name. -- `regexp` (String) - A pattern for the metric name. -- `function` (String) - The name of the aggregating function. -- `age` (UInt64) - The minimum age of the data in seconds. -- `precision` (UInt64) - How precisely to define the age of the data in seconds. -- `priority` (UInt16) - Pattern priority. -- `is_default` (UInt8) - Whether the pattern is the default. -- `Tables.database` (Array(String)) - Array of names of database tables that use the `config_name` parameter. -- `Tables.table` (Array(String)) - Array of table names that use the `config_name` parameter. +- `config_name` (字符串) - `graphite_rollup` 参数名称。 +- `regexp` (String)-指标名称的模式。 +- `function` (String)-聚合函数的名称。 +- `age` (UInt64)-以秒为单位的数据的最小期限。 +- `precision` (UInt64)-如何精确地定义以秒为单位的数据的年龄。 +- `priority` (UInt16)-模式优先级。 +- `is_default` (UInt8)-模式是否为默认值。 +- `Tables.database` (Array(String))-使用数据库表名称的数组 `config_name` 参数。 +- `Tables.table` (Array(String))-使用表名称的数组 `config_name` 参数。 -## system.merges {#system-merges} +## 系统。合并 {#system-merges} -Contains information about merges and part mutations currently in process for tables in the MergeTree family. +包含有关MergeTree系列中表当前正在进行的合并和部件突变的信息。 -Columns: +列: - `database` (String) — The name of the database the table is in. - `table` (String) — Table name. @@ -238,7 +241,7 @@ Columns: - `progress` (Float64) — The percentage of completed work from 0 to 1. - `num_parts` (UInt64) — The number of pieces to be merged. - `result_part_name` (String) — The name of the part that will be formed as the result of merging. -- `is_mutation` (UInt8) - 1 if this process is a part mutation. +- `is_mutation` (UInt8)-1如果这个过程是一个部分突变. - `total_size_bytes_compressed` (UInt64) — The total size of the compressed data in the merged chunks. - `total_size_marks` (UInt64) — The total number of marks in the merged parts. - `bytes_read_uncompressed` (UInt64) — Number of bytes read, uncompressed. @@ -246,19 +249,19 @@ Columns: - `bytes_written_uncompressed` (UInt64) — Number of bytes written, uncompressed. - `rows_written` (UInt64) — Number of rows written. -## system.metrics {#system_tables-metrics} +## 系统。指标 {#system_tables-metrics} -Contains metrics which can be calculated instantly, or have a current value. For example, the number of simultaneously processed queries or the current replica delay. This table is always up to date. +包含可以立即计算或具有当前值的指标。 例如,同时处理的查询的数量或当前副本的延迟。 此表始终是最新的。 -Columns: +列: -- `metric` ([String](../data_types/string.md)) — Metric name. -- `value` ([Int64](../data_types/int_uint.md)) — Metric value. -- `description` ([String](../data_types/string.md)) — Metric description. +- `metric` ([字符串](../sql_reference/data_types/string.md)) — Metric name. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — Metric value. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Metric description. -The list of supported metrics you can find in the [dbms/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/dbms/Common/CurrentMetrics.cpp) source file of ClickHouse. +支持的指标列表,您可以在 [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) ClickHouse的源文件。 -**Example** +**示例** ``` sql SELECT * FROM system.metrics LIMIT 10 @@ -279,17 +282,17 @@ SELECT * FROM system.metrics LIMIT 10 └────────────────────────────┴───────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**另请参阅** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that occurred. -- [system.metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that occurred. +- [系统。metric\_log](#system_tables-metric_log) — Contains a history of metrics values from tables `system.metrics` и `system.events`. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.metric\_log {#system_tables-metric_log} +## 系统。metric\_log {#system_tables-metric_log} -Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk. -To turn on metrics history collection on `system.metric_log`, create `/etc/clickhouse-server/config.d/metric_log.xml` with following content: +包含表中度量值的历史记录 `system.metrics` 和 `system.events`,定期刷新到磁盘。 +打开指标历史记录收集 `system.metric_log`,创建 `/etc/clickhouse-server/config.d/metric_log.xml` 具有以下内容: ``` xml @@ -302,7 +305,7 @@ To turn on metrics history collection on `system.metric_log`, create `/etc/click ``` -**Example** +**示例** ``` sql SELECT * FROM system.metric_log LIMIT 1 FORMAT Vertical; @@ -335,50 +338,50 @@ CurrentMetric_ReplicatedChecks: 0 ... ``` -**See also** +**另请参阅** -- [system.asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. -- [system.events](#system_tables-events) — Contains a number of events that occurred. -- [system.metrics](#system_tables-metrics) — Contains instantly calculated metrics. -- [Monitoring](monitoring.md) — Base concepts of ClickHouse monitoring. +- [系统。asynchronous\_metrics](#system_tables-asynchronous_metrics) — Contains periodically calculated metrics. +- [系统。活动](#system_tables-events) — Contains a number of events that occurred. +- [系统。指标](#system_tables-metrics) — Contains instantly calculated metrics. +- [监测](monitoring.md) — Base concepts of ClickHouse monitoring. -## system.numbers {#system-numbers} +## 系统。数字 {#system-numbers} -This table contains a single UInt64 column named ‘number’ that contains almost all the natural numbers starting from zero. -You can use this table for tests, or if you need to do a brute force search. -Reads from this table are not parallelized. +此表包含一个名为UInt64的列 ‘number’ 它包含几乎所有从零开始的自然数。 +您可以使用此表进行测试,或者如果您需要进行暴力搜索。 +从此表中读取的内容不是并行的。 -## system.numbers\_mt {#system-numbers-mt} +## 系统。numbers\_mt {#system-numbers-mt} -The same as ‘system.numbers’ but reads are parallelized. The numbers can be returned in any order. -Used for tests. +一样的 ‘system.numbers’ 但读取是并行的。 这些数字可以以任何顺序返回。 +用于测试。 -## system.one {#system-one} +## 系统。一 {#system-one} -This table contains a single row with a single ‘dummy’ UInt8 column containing the value 0. -This table is used if a SELECT query doesn’t specify the FROM clause. -This is similar to the DUAL table found in other DBMSs. +此表包含一行,其中包含一行 ‘dummy’ UInt8列包含值0。 +如果SELECT查询未指定FROM子句,则使用此表。 +这与其他Dbms中的双表类似。 -## system.parts {#system_tables-parts} +## 系统。零件 {#system_tables-parts} -Contains information about parts of [MergeTree](table_engines/mergetree.md) tables. +包含有关的部分信息 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 桌子 -Each row describes one data part. +每行描述一个数据部分。 -Columns: +列: -- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../query_language/alter.md#query_language_queries_alter) query. +- `partition` (String) – The partition name. To learn what a partition is, see the description of the [ALTER](../sql_reference/statements/alter.md#query_language_queries_alter) 查询。 - Formats: + 格式: - - `YYYYMM` for automatic partitioning by month. - - `any_string` when partitioning manually. + - `YYYYMM` 用于按月自动分区。 + - `any_string` 手动分区时。 - `name` (`String`) – Name of the data part. -- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. +- `active` (`UInt8`) – Flag that indicates whether the data part is active. If a data part is active, it's used in a table. Otherwise, it's deleted. Inactive data parts remain after merging. -- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). +- `marks` (`UInt64`) – The number of marks. To get the approximate number of rows in a data part, multiply `marks` 通过索引粒度(通常为8192)(此提示不适用于自适应粒度)。 - `rows` (`UInt64`) – The number of rows. @@ -418,7 +421,7 @@ Columns: - `primary_key_bytes_in_memory_allocated` (`UInt64`) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn’t exist. For more details, see [FREEZE PARTITION](../query_language/alter.md#alter_freeze-partition) +- `is_frozen` (`UInt8`) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup doesn't exist. For more details, see [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition) - `database` (`String`) – Name of the database. @@ -430,29 +433,29 @@ Columns: - `disk` (`String`) – Name of a disk that stores the data part. -- `hash_of_all_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of compressed files. +- `hash_of_all_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 的压缩文件。 -- `hash_of_uncompressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.). +- `hash_of_uncompressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 未压缩的文件(带标记的文件,索引文件等。). -- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed. +- `uncompressed_hash_of_compressed_files` (`String`) – [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) 压缩文件中的数据,就好像它们是未压缩的。 - `bytes` (`UInt64`) – Alias for `bytes_on_disk`. - `marks_size` (`UInt64`) – Alias for `marks_bytes`. -## system.part\_log {#system_tables-part-log} +## 系统。part\_log {#system_tables-part-log} -The `system.part_log` table is created only if the [part\_log](server_settings/settings.md#server_settings-part-log) server setting is specified. +该 `system.part_log` 表只有当创建 [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log) 指定了服务器设置。 -This table contains information about events that occurred with [data parts](table_engines/custom_partitioning_key.md) in the [MergeTree](table_engines/mergetree.md) family tables, such as adding or merging data. +此表包含与以下情况发生的事件有关的信息 [数据部分](../engines/table_engines/mergetree_family/custom_partitioning_key.md) 在 [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) 家庭表,例如添加或合并数据。 -The `system.part_log` table contains the following columns: +该 `system.part_log` 表包含以下列: - `event_type` (Enum) — Type of the event that occurred with the data part. Can have one of the following values: - `NEW_PART` — Inserting of a new data part. - `MERGE_PARTS` — Merging of data parts. - `DOWNLOAD_PART` — Downloading a data part. - - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). + - `REMOVE_PART` — Removing or detaching a data part using [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). - `MUTATE_PART` — Mutating of a data part. - `MOVE_PART` — Moving the data part from the one disk to another one. - `event_date` (Date) — Event date. @@ -461,7 +464,7 @@ The `system.part_log` table contains the following columns: - `database` (String) — Name of the database the data part is in. - `table` (String) — Name of the table the data part is in. - `part_name` (String) — Name of the data part. -- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ value if the partitioning is by `tuple()`. +- `partition_id` (String) — ID of the partition that the data part was inserted to. The column takes the ‘all’ 值,如果分区是由 `tuple()`. - `rows` (UInt64) — The number of rows in the data part. - `size_in_bytes` (UInt64) — Size of the data part in bytes. - `merged_from` (Array(String)) — An array of names of the parts which the current part was made up from (after the merge). @@ -471,36 +474,36 @@ The `system.part_log` table contains the following columns: - `error` (UInt16) — The code number of the occurred error. - `exception` (String) — Text message of the occurred error. -The `system.part_log` table is created after the first inserting data to the `MergeTree` table. +该 `system.part_log` 表的第一个插入数据到后创建 `MergeTree` 桌子 -## system.processes {#system_tables-processes} +## 系统。流程 {#system_tables-processes} -This system table is used for implementing the `SHOW PROCESSLIST` query. +该系统表用于实现 `SHOW PROCESSLIST` 查询。 -Columns: +列: -- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` user. The field contains the username for a specific query, not for a query that this query initiated. -- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` on the query requestor server. +- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` 用户。 该字段包含特定查询的用户名,而不是此查询启动的查询的用户名。 +- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` 查询请求者服务器上。 - `elapsed` (Float64) – The time in seconds since request execution started. - `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. - `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers. - `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known. -- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) setting. -- `query` (String) – The query text. For `INSERT`, it doesn’t include the data to insert. +- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max\_memory\_usage](../operations/settings/query_complexity.md#settings_max_memory_usage) 设置。 +- `query` (String) – The query text. For `INSERT`,它不包括要插入的数据。 - `query_id` (String) – Query ID, if defined. -## system.text\_log {#system-tables-text-log} +## 系统。text\_log {#system-tables-text-log} -Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting. +包含日志记录条目。 进入该表的日志记录级别可以通过以下方式进行限制 `text_log.level` 服务器设置。 -Columns: +列: -- `event_date` (`Date`) - Date of the entry. -- `event_time` (`DateTime`) - Time of the entry. -- `microseconds` (`UInt32`) - Microseconds of the entry. +- `event_date` (`Date`)-条目的日期。 +- `event_time` (`DateTime`)-条目的时间。 +- `microseconds` (`UInt32`)-条目的微秒。 - `thread_name` (String) — Name of the thread from which the logging was done. - `thread_id` (UInt64) — OS thread ID. -- `level` (`Enum8`) - Entry level. +- `level` (`Enum8`)-入门级。 - `'Fatal' = 1` - `'Critical' = 2` - `'Error' = 3` @@ -509,30 +512,30 @@ Columns: - `'Information' = 6` - `'Debug' = 7` - `'Trace' = 8` -- `query_id` (`String`) - ID of the query. +- `query_id` (`String`)-查询的ID。 - `logger_name` (`LowCardinality(String)`) - Name of the logger (i.e. `DDLWorker`) -- `message` (`String`) - The message itself. -- `revision` (`UInt32`) - ClickHouse revision. -- `source_file` (`LowCardinality(String)`) - Source file from which the logging was done. -- `source_line` (`UInt64`) - Source line from which the logging was done. +- `message` (`String`)-消息本身。 +- `revision` (`UInt32`)-ClickHouse修订。 +- `source_file` (`LowCardinality(String)`)-从中完成日志记录的源文件。 +- `source_line` (`UInt64`)-从中完成日志记录的源代码行。 -## system.query\_log {#system_tables-query_log} +## 系统。query\_log {#system_tables-query_log} -Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information. +包含有关查询执行的信息。 对于每个查询,您可以看到处理开始时间,处理持续时间,错误消息和其他信息。 -!!! note "Note" - The table doesn’t contain input data for `INSERT` queries. +!!! note "注" + 该表不包含以下内容的输入数据 `INSERT` 查询。 -ClickHouse creates this table only if the [query\_log](server_settings/settings.md#server_settings-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. +ClickHouse仅在以下情况下创建此表 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 指定服务器参数。 此参数设置日志记录规则,例如日志记录间隔或将记录查询的表的名称。 -To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section. +要启用查询日志记录,请设置 [log\_queries](settings/settings.md#settings-log-queries) 参数为1。 有关详细信息,请参阅 [设置](settings/settings.md) 科。 -The `system.query_log` table registers two kinds of queries: +该 `system.query_log` 表注册两种查询: -1. Initial queries that were run directly by the client. -2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns. +1. 客户端直接运行的初始查询。 +2. 由其他查询启动的子查询(用于分布式查询执行)。 对于这些类型的查询,有关父查询的信息显示在 `initial_*` 列。 -Columns: +列: - `type` (`Enum8`) — Type of event that occurred when executing the query. Values: - `'QueryStart' = 1` — Successful start of query execution. @@ -545,8 +548,8 @@ Columns: - `query_duration_ms` (UInt64) — Duration of query execution. - `read_rows` (UInt64) — Number of read rows. - `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. -- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `written_rows` (UInt64) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。 +- `written_bytes` (UInt64) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。 - `result_rows` (UInt64) — Number of rows in the result. - `result_bytes` (UInt64) — Number of bytes in the result. - `memory_usage` (UInt64) — Memory consumption by the query. @@ -567,50 +570,50 @@ Columns: - `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. -- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. -- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `os_user` (String) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或者运行另一个TCP客户端。 +- `client_name` (String) — The [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端名称。 +- `client_revision` (UInt32) — Revision of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_major` (UInt32) — Major version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_minor` (UInt32) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_patch` (UInt32) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端版本。 - `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - 0 — The query was launched from the TCP interface. - - 1 — `GET` method was used. - - 2 — `POST` method was used. -- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. -- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). + - 1 — `GET` 方法被使用。 + - 2 — `POST` 方法被使用。 +- `http_user_agent` (String) — The `UserAgent` http请求中传递的标头。 +- `quota_key` (String) — The “quota key” 在指定 [配额](quotas.md) 设置(见 `keyed`). - `revision` (UInt32) — ClickHouse revision. - `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column. -- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. -- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 +- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 +- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` 列。 -Each query creates one or two rows in the `query_log` table, depending on the status of the query: +每个查询创建一个或两个行中 `query_log` 表,具体取决于查询的状态: -1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column). -2. If an error occurred during query processing, two events with types 1 and 4 are created. -3. If an error occurred before launching the query, a single event with type 3 is created. +1. 如果查询执行成功,将创建两个类型为1和2的事件(请参阅 `type` 列)。 +2. 如果在查询处理过程中发生错误,将创建两个类型为1和4的事件。 +3. 如果在启动查询之前发生错误,将创建类型为3的单个事件。 -By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. +默认情况下,日志以7.5秒的间隔添加到表中。 您可以在设置此时间间隔 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器设置(请参阅 `flush_interval_milliseconds` 参数)。 要强制将日志从内存缓冲区刷新到表中,请使用 `SYSTEM FLUSH LOGS` 查询。 -When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. +当手动删除表时,它将自动动态创建。 请注意,所有以前的日志将被删除。 -!!! note "Note" - The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. +!!! note "注" + 日志的存储周期是无限的。 日志不会自动从表中删除。 您需要自己组织删除过时的日志。 -You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `partition_by` parameter). +您可以指定一个任意的分区键 `system.query_log` 表中的 [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) 服务器设置(请参阅 `partition_by` 参数)。 -## system.query\_thread\_log {#system_tables-query-thread-log} +## 系统。query\_thread\_log {#system_tables-query-thread-log} -The table contains information about each query execution thread. +该表包含有关每个查询执行线程的信息。 -ClickHouse creates this table only if the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. +ClickHouse仅在以下情况下创建此表 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 指定服务器参数。 此参数设置日志记录规则,例如日志记录间隔或将记录查询的表的名称。 -To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section. +要启用查询日志记录,请设置 [log\_query\_threads](settings/settings.md#settings-log-query-threads) 参数为1。 有关详细信息,请参阅 [设置](settings/settings.md) 科。 -Columns: +列: - `event_date` (Date) — the date when the thread has finished execution of the query. - `event_time` (DateTime) — the date and time when the thread has finished execution of the query. @@ -618,8 +621,8 @@ Columns: - `query_duration_ms` (UInt64) — Duration of query execution. - `read_rows` (UInt64) — Number of read rows. - `read_bytes` (UInt64) — Number of read bytes. -- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. -- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. +- `written_rows` (UInt64) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。 +- `written_bytes` (UInt64) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。 - `memory_usage` (Int64) — The difference between the amount of allocated and freed memory in context of this thread. - `peak_memory_usage` (Int64) — The maximum difference between the amount of allocated and freed memory in context of this thread. - `thread_name` (String) — Name of the thread. @@ -641,62 +644,62 @@ Columns: - `interface` (UInt8) — Interface that the query was initiated from. Possible values: - 1 — TCP. - 2 — HTTP. -- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md). -- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run. -- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name. -- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client. -- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version. +- `os_user` (String) — OS's username who runs [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md). +- `client_hostname` (String) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或者运行另一个TCP客户端。 +- `client_name` (String) — The [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端名称。 +- `client_revision` (UInt32) — Revision of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_major` (UInt32) — Major version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_minor` (UInt32) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端。 +- `client_version_patch` (UInt32) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../interfaces/cli.md) 或另一个TCP客户端版本。 - `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - 0 — The query was launched from the TCP interface. - - 1 — `GET` method was used. - - 2 — `POST` method was used. -- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request. -- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`). + - 1 — `GET` 方法被使用。 + - 2 — `POST` 方法被使用。 +- `http_user_agent` (String) — The `UserAgent` http请求中传递的标头。 +- `quota_key` (String) — The “quota key” 在指定 [配额](quotas.md) 设置(见 `keyed`). - `revision` (UInt32) — ClickHouse revision. -- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events) -- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column. +- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events) +- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。 -By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. +默认情况下,日志以7.5秒的间隔添加到表中。 您可以在设置此时间间隔 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器设置(请参阅 `flush_interval_milliseconds` 参数)。 要强制将日志从内存缓冲区刷新到表中,请使用 `SYSTEM FLUSH LOGS` 查询。 -When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. +当手动删除表时,它将自动动态创建。 请注意,所有以前的日志将被删除。 -!!! note "Note" - The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. +!!! note "注" + 日志的存储周期是无限的。 日志不会自动从表中删除。 您需要自己组织删除过时的日志。 -You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) server setting (see the `partition_by` parameter). +您可以指定一个任意的分区键 `system.query_thread_log` 表中的 [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) 服务器设置(请参阅 `partition_by` 参数)。 -## system.trace\_log {#system_tables-trace_log} +## 系统。trace\_log {#system_tables-trace_log} -Contains stack traces collected by the sampling query profiler. +包含采样查询探查器收集的堆栈跟踪。 -ClickHouse creates this table when the [trace\_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. +ClickHouse创建此表时 [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) 服务器配置部分被设置。 也是 [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) 和 [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) 应设置设置。 -To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions. +要分析日志,请使用 `addressToLine`, `addressToSymbol` 和 `demangle` 内省功能。 -Columns: +列: -- `event_date`([Date](../data_types/date.md)) — Date of sampling moment. +- `event_date`([日期](../sql_reference/data_types/date.md)) — Date of sampling moment. -- `event_time`([DateTime](../data_types/datetime.md)) — Timestamp of sampling moment. +- `event_time`([日期时间](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. -- `revision`([UInt32](../data_types/int_uint.md)) — ClickHouse server build revision. +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. - When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. + 通过以下方式连接到服务器 `clickhouse-client`,你看到的字符串类似于 `Connected to ClickHouse server version 19.18.1 revision 54429.`. 该字段包含 `revision`,但不是 `version` 的服务器。 -- `timer_type`([Enum8](../data_types/enum.md)) — Timer type: +- `timer_type`([枚举8](../sql_reference/data_types/enum.md)) — Timer type: - - `Real` represents wall-clock time. - - `CPU` represents CPU time. + - `Real` 表示挂钟时间。 + - `CPU` 表示CPU时间。 -- `thread_number`([UInt32](../data_types/int_uint.md)) — Thread identifier. +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. -- `query_id`([String](../data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) system table. +- `query_id`([字符串](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) 系统表. -- `trace`([Array(UInt64)](../data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. +- `trace`([数组(UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. -**Example** +**示例** ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -714,12 +717,12 @@ query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] ``` -## system.replicas {#system_tables-replicas} +## 系统。副本 {#system_tables-replicas} -Contains information and status for replicated tables residing on the local server. -This table can be used for monitoring. The table contains a row for every Replicated\* table. +包含驻留在本地服务器上的复制表的信息和状态。 +此表可用于监视。 该表对于每个已复制的\*表都包含一行。 -Example: +示例: ``` sql SELECT * @@ -763,46 +766,46 @@ total_replicas: 2 active_replicas: 2 ``` -Columns: +列: -- `database` (`String`) - Database name -- `table` (`String`) - Table name -- `engine` (`String`) - Table engine name -- `is_leader` (`UInt8`) - Whether the replica is the leader. - Only one replica at a time can be the leader. The leader is responsible for selecting background merges to perform. - Note that writes can be performed to any replica that is available and has a session in ZK, regardless of whether it is a leader. -- `can_become_leader` (`UInt8`) - Whether the replica can be elected as a leader. -- `is_readonly` (`UInt8`) - Whether the replica is in read-only mode. - This mode is turned on if the config doesn’t have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. -- `is_session_expired` (`UInt8`) - the session with ZooKeeper has expired. Basically the same as `is_readonly`. -- `future_parts` (`UInt32`) - The number of data parts that will appear as the result of INSERTs or merges that haven’t been done yet. -- `parts_to_check` (`UInt32`) - The number of data parts in the queue for verification. A part is put in the verification queue if there is suspicion that it might be damaged. -- `zookeeper_path` (`String`) - Path to table data in ZooKeeper. -- `replica_name` (`String`) - Replica name in ZooKeeper. Different replicas of the same table have different names. -- `replica_path` (`String`) - Path to replica data in ZooKeeper. The same as concatenating ‘zookeeper\_path/replicas/replica\_path’. -- `columns_version` (`Int32`) - Version number of the table structure. Indicates how many times ALTER was performed. If replicas have different versions, it means some replicas haven’t made all of the ALTERs yet. -- `queue_size` (`UInt32`) - Size of the queue for operations waiting to be performed. Operations include inserting blocks of data, merges, and certain other actions. It usually coincides with `future_parts`. -- `inserts_in_queue` (`UInt32`) - Number of inserts of blocks of data that need to be made. Insertions are usually replicated fairly quickly. If this number is large, it means something is wrong. -- `merges_in_queue` (`UInt32`) - The number of merges waiting to be made. Sometimes merges are lengthy, so this value may be greater than zero for a long time. -- `part_mutations_in_queue` (`UInt32`) - The number of mutations waiting to be made. -- `queue_oldest_time` (`DateTime`) - If `queue_size` greater than 0, shows when the oldest operation was added to the queue. -- `inserts_oldest_time` (`DateTime`) - See `queue_oldest_time` -- `merges_oldest_time` (`DateTime`) - See `queue_oldest_time` -- `part_mutations_oldest_time` (`DateTime`) - See `queue_oldest_time` +- `database` (`String`)-数据库名称 +- `table` (`String`)-表名 +- `engine` (`String`)-表引擎名称 +- `is_leader` (`UInt8`)-副本是否是领导者。 + 一次只有一个副本可以成为领导者。 领导者负责选择要执行的后台合并。 + 请注意,可以对任何可用且在ZK中具有会话的副本执行写操作,而不管该副本是否为leader。 +- `can_become_leader` (`UInt8`)-副本是否可以当选为领导者。 +- `is_readonly` (`UInt8`)-副本是否处于只读模式。 + 如果配置没有ZooKeeper的部分,如果在ZooKeeper中重新初始化会话时发生未知错误,以及在ZooKeeper中重新初始化会话时发生未知错误,则此模式将打开。 +- `is_session_expired` (`UInt8`)-与ZooKeeper的会话已经过期。 基本上一样 `is_readonly`. +- `future_parts` (`UInt32`)-由于尚未完成的插入或合并而显示的数据部分的数量。 +- `parts_to_check` (`UInt32`)-队列中用于验证的数据部分的数量。 如果怀疑零件可能已损坏,则将其放入验证队列。 +- `zookeeper_path` (`String`)-在ZooKeeper中的表数据路径。 +- `replica_name` (`String`)-在动物园管理员副本名称. 同一表的不同副本具有不同的名称。 +- `replica_path` (`String`)-在ZooKeeper中的副本数据的路径。 与连接相同 ‘zookeeper\_path/replicas/replica\_path’. +- `columns_version` (`Int32`)-表结构的版本号。 指示执行ALTER的次数。 如果副本有不同的版本,这意味着一些副本还没有做出所有的改变。 +- `queue_size` (`UInt32`)-等待执行的操作的队列大小。 操作包括插入数据块、合并和某些其他操作。 它通常与 `future_parts`. +- `inserts_in_queue` (`UInt32`)-需要插入数据块的数量。 插入通常复制得相当快。 如果这个数字很大,这意味着有什么不对劲。 +- `merges_in_queue` (`UInt32`)-等待进行合并的数量。 有时合并时间很长,因此此值可能长时间大于零。 +- `part_mutations_in_queue` (`UInt32`)-等待进行的突变的数量。 +- `queue_oldest_time` (`DateTime`)-如果 `queue_size` 大于0,显示何时将最旧的操作添加到队列中。 +- `inserts_oldest_time` (`DateTime`)-看 `queue_oldest_time` +- `merges_oldest_time` (`DateTime`)-看 `queue_oldest_time` +- `part_mutations_oldest_time` (`DateTime`)-看 `queue_oldest_time` -The next 4 columns have a non-zero value only where there is an active session with ZK. +接下来的4列只有在有ZK活动会话的情况下才具有非零值。 -- `log_max_index` (`UInt64`) - Maximum entry number in the log of general activity. -- `log_pointer` (`UInt64`) - Maximum entry number in the log of general activity that the replica copied to its execution queue, plus one. If `log_pointer` is much smaller than `log_max_index`, something is wrong. -- `last_queue_update` (`DateTime`) - When the queue was updated last time. -- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has. -- `total_replicas` (`UInt8`) - The total number of known replicas of this table. -- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas). +- `log_max_index` (`UInt64`)-一般活动日志中的最大条目数。 +- `log_pointer` (`UInt64`)-副本复制到其执行队列的常规活动日志中的最大条目数加一。 如果 `log_pointer` 比 `log_max_index`,有点不对劲。 +- `last_queue_update` (`DateTime`)-上次更新队列时。 +- `absolute_delay` (`UInt64`)-当前副本有多大滞后秒。 +- `total_replicas` (`UInt8`)-此表的已知副本总数。 +- `active_replicas` (`UInt8`)-在ZooKeeper中具有会话的此表的副本的数量(即正常运行的副本的数量)。 -If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row. -If you don’t request the last 4 columns (log\_max\_index, log\_pointer, total\_replicas, active\_replicas), the table works quickly. +如果您请求所有列,表可能会工作得有点慢,因为每行都会从ZooKeeper进行几次读取。 +如果您没有请求最后4列(log\_max\_index,log\_pointer,total\_replicas,active\_replicas),表工作得很快。 -For example, you can check that everything is working correctly like this: +例如,您可以检查一切是否正常工作,如下所示: ``` sql SELECT @@ -834,32 +837,61 @@ WHERE OR active_replicas < total_replicas ``` -If this query doesn’t return anything, it means that everything is fine. +如果这个查询没有返回任何东西,这意味着一切都很好。 -## system.settings {#system-settings} +## 系统。设置 {#system-tables-system-settings} -Contains information about settings that are currently in use. -I.e. used for executing the query you are using to read from the system.settings table. +包含有关当前用户的会话设置的信息。 -Columns: +列: -- `name` (String) — Setting name. -- `value` (String) — Setting value. -- `description` (String) — Setting description. -- `type` (String) — Setting type (implementation specific string value). -- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)). -- `readonly` (UInt8) — Can user change this setting (for more info, look into [constraints](settings/constraints_on_settings.md#constraints-on-settings)). +- `name` ([字符串](../sql_reference/data_types/string.md)) — Setting name. +- `value` ([字符串](../sql_reference/data_types/string.md)) — Setting value. +- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value. +- `description` ([字符串](../sql_reference/data_types/string.md)) — Short setting description. +- `min` ([可为空](../sql_reference/data_types/nullable.md)([字符串](../sql_reference/data_types/string.md))) — Minimum value of the setting, if any is set via [制约因素](settings/constraints_on_settings.md#constraints-on-settings). 如果设置没有最小值,则包含 [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([可为空](../sql_reference/data_types/nullable.md)([字符串](../sql_reference/data_types/string.md))) — Maximum value of the setting, if any is set via [制约因素](settings/constraints_on_settings.md#constraints-on-settings). 如果设置没有最大值,则包含 [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting: + - `0` — Current user can change the setting. + - `1` — Current user can't change the setting. -Example: +**示例** + +下面的示例演示如何获取有关名称包含的设置的信息 `min_i`. ``` sql -SELECT name, value +SELECT * FROM system.settings -WHERE changed +WHERE name LIKE '%min_i%' ``` +``` text +┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ +│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ +└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘ +``` + +使用 `WHERE changed` 可以是有用的,例如,当你想检查: + +- 配置文件中的设置是否正确加载并正在使用。 +- 在当前会话中更改的设置。 + + + +``` sql +SELECT * FROM system.settings WHERE changed AND name='load_balancing' +``` + +**另请参阅** + +- [设置](settings/index.md#settings) +- [查询权限](settings/permissions_for_queries.md#settings_readonly) +- [对设置的限制](settings/constraints_on_settings.md) + +## 系统。表\_engines {#system.table_engines} + ``` text ┌─name───────────────────┬─value───────┐ │ max_threads │ 8 │ @@ -869,11 +901,11 @@ WHERE changed └────────────────────────┴─────────────┘ ``` -## system.merge\_tree\_settings {#system-merge_tree_settings} +## 系统。merge\_tree\_settings {#system-merge_tree_settings} -Contains information about settings for `MergeTree` tables. +包含有关以下设置的信息 `MergeTree` 桌子 -Columns: +列: - `name` (String) — Setting name. - `value` (String) — Setting value. @@ -881,21 +913,21 @@ Columns: - `type` (String) — Setting type (implementation specific string value). - `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed. -## system.table\_engines {#system-table-engines} +## 系统。表\_engines {#system-table-engines} -Contains description of table engines supported by server and their feature support information. +包含服务器支持的表引擎的描述及其功能支持信息。 -This table contains the following columns (the column type is shown in brackets): +此表包含以下列(列类型显示在括号中): - `name` (String) — The name of table engine. -- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` clause. -- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [skipping indices](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). -- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`. -- `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](table_engines/replication/). +- `supports_settings` (UInt8) — Flag that indicates if table engine supports `SETTINGS` 条款 +- `supports_skipping_indices` (UInt8) — Flag that indicates if table engine supports [跳过索引](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — Flag that indicates if table engine supports [TTL](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-ttl). +- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` 和 `SAMPLE_BY`. +- `supports_replication` (UInt8) — Flag that indicates if table engine supports [数据复制](../engines/table_engines/mergetree_family/replication.md). - `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. -Example: +示例: ``` sql SELECT * @@ -911,56 +943,72 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') └───────────────────────────────┴───────────────────┴───────────────────────────┴─────────────────────┴──────────────┴──────────────────────┴────────────────────────┘ ``` -**See also** +**另请参阅** -- MergeTree family [query clauses](table_engines/mergetree.md#mergetree-query-clauses) -- Kafka [settings](table_engines/kafka.md#table_engine-kafka-creating-a-table) -- Join [settings](table_engines/join.md#join-limitations-and-settings) +- 梅树家族 [查询子句](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) +- 卡夫卡 [设置](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) +- 加入我们 [设置](../engines/table_engines/special/join.md#join-limitations-and-settings) -## system.tables {#system-tables} +## 系统。表 {#system-tables} -Contains metadata of each table that the server knows about. Detached tables are not shown in `system.tables`. +包含服务器知道的每个表的元数据。 分离的表不显示在 `system.tables`. -This table contains the following columns (the column type is shown in brackets): +此表包含以下列(列类型显示在括号中): - `database` (String) — The name of the database the table is in. + - `name` (String) — Table name. + - `engine` (String) — Table engine name (without parameters). -- `is_temporary` (UInt8) - Flag that indicates whether the table is temporary. -- `data_path` (String) - Path to the table data in the file system. -- `metadata_path` (String) - Path to the table metadata in the file system. -- `metadata_modification_time` (DateTime) - Time of latest modification of the table metadata. -- `dependencies_database` (Array(String)) - Database dependencies. -- `dependencies_table` (Array(String)) - Table dependencies ([MaterializedView](table_engines/materializedview.md) tables based on the current table). -- `create_table_query` (String) - The query that was used to create the table. -- `engine_full` (String) - Parameters of the table engine. -- `partition_key` (String) - The partition key expression specified in the table. -- `sorting_key` (String) - The sorting key expression specified in the table. -- `primary_key` (String) - The primary key expression specified in the table. -- `sampling_key` (String) - The sampling key expression specified in the table. -- `storage_policy` (String) - The storage policy: - - [MergeTree](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) - - [Distributed](table_engines/distributed.md#distributed) +- `is_temporary` (UInt8)-指示表是否是临时的标志。 -- `total_rows` (Nullable(UInt64)) - Total number of rows, if it is possible to quickly determine exact number of rows in the table, otherwise `Null` (including underying `Buffer` table). -- `total_bytes` (Nullable(UInt64)) - Total number of bytes, if it is possible to quickly determine exact number of bytes for the table on storage, otherwise `Null` (**does not** includes any underlying storage). +- `data_path` (String)-文件系统中表数据的路径。 - - If the table stores data on disk, returns used space on disk (i.e. compressed). - - If the table stores data in memory, returns approximated number of used bytes in memory. +- `metadata_path` (String)-文件系统中表元数据的路径。 -The `system.tables` table is used in `SHOW TABLES` query implementation. +- `metadata_modification_time` (DateTime)-表元数据的最新修改时间。 -## system.zookeeper {#system-zookeeper} +- `dependencies_database` (数组(字符串))-数据库依赖关系. -The table does not exist if ZooKeeper is not configured. Allows reading data from the ZooKeeper cluster defined in the config. -The query must have a ‘path’ equality condition in the WHERE clause. This is the path in ZooKeeper for the children that you want to get data for. +- `dependencies_table` (数组(字符串))-表依赖关系 ([MaterializedView](../engines/table_engines/special/materializedview.md) 基于当前表的表)。 -The query `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` outputs data for all children on the `/clickhouse` node. -To output data for all root nodes, write path = ‘/’. -If the path specified in ‘path’ doesn’t exist, an exception will be thrown. +- `create_table_query` (String)-用于创建表的查询。 -Columns: +- `engine_full` (String)-表引擎的参数。 + +- `partition_key` (String)-表中指定的分区键表达式。 + +- `sorting_key` (String)-表中指定的排序键表达式。 + +- `primary_key` (String)-表中指定的主键表达式。 + +- `sampling_key` (String)-表中指定的采样键表达式。 + +- `storage_policy` (字符串)-存储策略: + + - [MergeTree](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes) + - [分布](../engines/table_engines/special/distributed.md#distributed) + +- `total_rows` (Nullable(UInt64))-总行数,如果可以快速确定表中的确切行数,否则 `Null` (包括内衣 `Buffer` 表)。 + +- `total_bytes` (Nullable(UInt64))-总字节数,如果可以快速确定存储表的确切字节数,否则 `Null` (**不** 包括任何底层存储)。 + + - If the table stores data on disk, returns used space on disk (i.e. compressed). + - 如果表在内存中存储数据,返回在内存中使用的近似字节数. + +该 `system.tables` 表中使用 `SHOW TABLES` 查询实现。 + +## 系统。动物园管理员 {#system-zookeeper} + +如果未配置ZooKeeper,则表不存在。 允许从配置中定义的ZooKeeper集群读取数据。 +查询必须具有 ‘path’ WHERE子句中的平等条件。 这是ZooKeeper中您想要获取数据的孩子的路径。 + +查询 `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` 输出对所有孩子的数据 `/clickhouse` 节点。 +要输出所有根节点的数据,write path= ‘/’. +如果在指定的路径 ‘path’ 不存在,将引发异常。 + +列: - `name` (String) — The name of the node. - `path` (String) — The path to the node. @@ -977,7 +1025,7 @@ Columns: - `aversion` (Int32) — Number of changes to the ACL. - `ephemeralOwner` (Int64) — For ephemeral nodes, the ID of the session that owns this node. -Example: +示例: ``` sql SELECT * @@ -1022,57 +1070,57 @@ pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` -## system.mutations {#system_tables-mutations} +## 系统。突变 {#system_tables-mutations} -The table contains information about [mutations](../query_language/alter.md#alter-mutations) of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns: +该表包含以下信息 [突变](../sql_reference/statements/alter.md#alter-mutations) MergeTree表及其进展。 每个突变命令由一行表示。 该表具有以下列: -**database**, **table** - The name of the database and table to which the mutation was applied. +**数据库**, **表** -应用突变的数据库和表的名称。 -**mutation\_id** - The ID of the mutation. For replicated tables these IDs correspond to znode names in the `/mutations/` directory in ZooKeeper. For unreplicated tables the IDs correspond to file names in the data directory of the table. +**mutation\_id** -变异的ID 对于复制的表,这些Id对应于znode中的名称 `/mutations/` 动物园管理员的目录。 对于未复制的表,Id对应于表的数据目录中的文件名。 -**command** - The mutation command string (the part of the query after `ALTER TABLE [db.]table`). +**命令** -Mutation命令字符串(查询后的部分 `ALTER TABLE [db.]table`). -**create\_time** - When this mutation command was submitted for execution. +**create\_time** -当这个突变命令被提交执行。 -**block\_numbers.partition\_id**, **block\_numbers.number** - A nested column. For mutations of replicated tables, it contains one record for each partition: the partition ID and the block number that was acquired by the mutation (in each partition, only parts that contain blocks with numbers less than the block number acquired by the mutation in that partition will be mutated). In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. +**block\_numbers.partition\_id**, **block\_numbers.编号** -嵌套列。 对于复制表的突变,它包含每个分区的一条记录:分区ID和通过突变获取的块编号(在每个分区中,只有包含编号小于该分区中突变获取的块编号的块的 在非复制表中,所有分区中的块编号形成一个序列。 这意味着对于非复制表的突变,该列将包含一条记录,其中包含由突变获取的单个块编号。 -**parts\_to\_do** - The number of data parts that need to be mutated for the mutation to finish. +**parts\_to\_do** -为了完成突变,需要突变的数据部分的数量。 -**is\_done** - Is the mutation done? Note that even if `parts_to_do = 0` it is possible that a mutation of a replicated table is not done yet because of a long-running INSERT that will create a new data part that will need to be mutated. +**is\_done** -变异完成了?? 请注意,即使 `parts_to_do = 0` 由于长时间运行的INSERT将创建需要突变的新数据部分,因此可能尚未完成复制表的突变。 -If there were problems with mutating some parts, the following columns contain additional information: +如果在改变某些部分时出现问题,以下列将包含其他信息: -**latest\_failed\_part** - The name of the most recent part that could not be mutated. +**latest\_failed\_part** -不能变异的最新部分的名称。 -**latest\_fail\_time** - The time of the most recent part mutation failure. +**latest\_fail\_time** -最近的部分突变失败的时间。 -**latest\_fail\_reason** - The exception message that caused the most recent part mutation failure. +**latest\_fail\_reason** -导致最近部件变异失败的异常消息。 -## system.disks {#system_tables-disks} +## 系统。磁盘 {#system_tables-disks} -Contains information about disks defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +包含有关在定义的磁盘信息 [服务器配置](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). -Columns: +列: -- `name` ([String](../data_types/string.md)) — Name of a disk in the server configuration. -- `path` ([String](../data_types/string.md)) — Path to the mount point in the file system. -- `free_space` ([UInt64](../data_types/int_uint.md)) — Free space on disk in bytes. -- `total_space` ([UInt64](../data_types/int_uint.md)) — Disk volume in bytes. -- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` parameter of disk configuration. +- `name` ([字符串](../sql_reference/data_types/string.md)) — Name of a disk in the server configuration. +- `path` ([字符串](../sql_reference/data_types/string.md)) — Path to the mount point in the file system. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Free space on disk in bytes. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Disk volume in bytes. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — Amount of disk space that should stay free on disk in bytes. Defined in the `keep_free_space_bytes` 磁盘配置参数。 -## system.storage\_policies {#system_tables-storage_policies} +## 系统。storage\_policies {#system_tables-storage_policies} -Contains information about storage policies and volumes defined in the [server configuration](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +包含有关存储策略和卷中定义的信息 [服务器配置](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). -Columns: +列: -- `policy_name` ([String](../data_types/string.md)) — Name of the storage policy. -- `volume_name` ([String](../data_types/string.md)) — Volume name defined in the storage policy. -- `volume_priority` ([UInt64](../data_types/int_uint.md)) — Volume order number in the configuration. -- `disks` ([Array(String)](../data_types/array.md)) — Disk names, defined in the storage policy. -- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). -- `move_factor` ([Float64](../data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. +- `policy_name` ([字符串](../sql_reference/data_types/string.md)) — Name of the storage policy. +- `volume_name` ([字符串](../sql_reference/data_types/string.md)) — Volume name defined in the storage policy. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — Volume order number in the configuration. +- `disks` ([数组(字符串)](../sql_reference/data_types/array.md)) — Disk names, defined in the storage policy. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). +- `move_factor` ([Float64](../sql_reference/data_types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. -If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table. +如果存储策略包含多个卷,则每个卷的信息将存储在表的单独行中。 -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/) +[原始文章](https://clickhouse.tech/docs/en/operations/system_tables/) diff --git a/docs/zh/operations/table_engines/generate.md b/docs/zh/operations/table_engines/generate.md deleted file mode 100644 index 051369d2e1c..00000000000 --- a/docs/zh/operations/table_engines/generate.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -en_copy: true ---- - -# GenerateRandom {#table_engines-generate} - -The GenerateRandom table engine produces random data for given table schema. - -Usage examples: - -- Use in test to populate reproducible large table. -- Generate random input for fuzzing tests. - -## Usage in ClickHouse Server {#usage-in-clickhouse-server} - -``` sql -ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) -``` - -The `max_array_length` and `max_string_length` parameters specify maximum length of all -array columns and strings correspondingly in generated data. - -Generate table engine supports only `SELECT` queries. - -It supports all [DataTypes](../../data_types/index.md) that can be stored in a table except `LowCardinality` and `AggregateFunction`. - -**Example:** - -**1.** Set up the `generate_engine_table` table: - -``` sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Query the data: - -``` sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -``` text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## Details of Implementation {#details-of-implementation} - -- Not supported: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Indices - - Replication - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/zh/operations/table_engines/hdfs.md b/docs/zh/operations/table_engines/hdfs.md deleted file mode 100644 index 576bbc49d72..00000000000 --- a/docs/zh/operations/table_engines/hdfs.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -en_copy: true ---- - -# HDFS {#table_engines-hdfs} - -This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. This engine is similar -to the [File](file.md) and [URL](url.md) engines, but provides Hadoop-specific features. - -## Usage {#usage} - -``` sql -ENGINE = HDFS(URI, format) -``` - -The `URI` parameter is the whole file URI in HDFS. -The `format` parameter specifies one of the available file formats. To perform -`SELECT` queries, the format must be supported for input, and to perform -`INSERT` queries – for output. The available formats are listed in the -[Formats](../../interfaces/formats.md#formats) section. -The path part of `URI` may contain globs. In this case the table would be readonly. - -**Example:** - -**1.** Set up the `hdfs_engine_table` table: - -``` sql -CREATE TABLE hdfs_engine_table (name String, value UInt32) ENGINE=HDFS('hdfs://hdfs1:9000/other_storage', 'TSV') -``` - -**2.** Fill file: - -``` sql -INSERT INTO hdfs_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) -``` - -**3.** Query the data: - -``` sql -SELECT * FROM hdfs_engine_table LIMIT 2 -``` - -``` text -┌─name─┬─value─┐ -│ one │ 1 │ -│ two │ 2 │ -└──────┴───────┘ -``` - -## Implementation Details {#implementation-details} - -- Reads and writes can be parallel -- Not supported: - - `ALTER` and `SELECT...SAMPLE` operations. - - Indexes. - - Replication. - -**Globs in path** - -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern. Listing of files determines during `SELECT` (not at `CREATE` moment). - -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). - -Constructions with `{}` are similar to the [remote](../../query_language/table_functions/remote.md) table function. - -**Example** - -1. Suppose we have several files in TSV format with the following URIs on HDFS: - -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/some\_dir/some\_file\_3’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_1’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ -- ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ - -1. There are several ways to make a table consisting of all six files: - - - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV') -``` - -Another way: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/some_file_?', 'TSV') -``` - -Table consists of all the files in both directories (all files should satisfy format and schema described in query): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV') -``` - -!!! warning "Warning" - If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -**Example** - -Create table with files named `file000`, `file001`, … , `file999`: - -``` sql -CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') -``` - -## Virtual Columns {#virtual-columns} - -- `_path` — Path to the file. -- `_file` — Name of the file. - -**See Also** - -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) diff --git a/docs/zh/operations/table_engines/materializedview.md b/docs/zh/operations/table_engines/materializedview.md deleted file mode 100644 index b22700fe3c6..00000000000 --- a/docs/zh/operations/table_engines/materializedview.md +++ /dev/null @@ -1,5 +0,0 @@ -# 物化视图 {#wu-hua-shi-tu} - -物化视图的使用(更多信息请参阅 [CREATE TABLE](../../query_language/create.md) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 - -[来源文章](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/zh/operations/table_engines/odbc.md b/docs/zh/operations/table_engines/odbc.md deleted file mode 100644 index 69003623e0a..00000000000 --- a/docs/zh/operations/table_engines/odbc.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -en_copy: true ---- - -# ODBC {#table-engine-odbc} - -Allows ClickHouse to connect to external databases via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. - -This engine supports the [Nullable](../../data_types/nullable.md) data type. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1], - name2 [type2], - ... -) -ENGINE = ODBC(connection_settings, external_database, external_table) -``` - -See a detailed description of the [CREATE TABLE](../../query_language/create.md#create-table-query) query. - -The table structure can differ from the source table structure: - -- Column names should be the same as in the source table, but you can use just some of these columns and in any order. -- Column types may differ from those in the source table. ClickHouse tries to [cast](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) values to the ClickHouse data types. - -**Engine Parameters** - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -## Usage Example {#usage-example} - -**Retrieving data from the local MySQL installation via ODBC** - -This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. - -Ensure that unixODBC and MySQL Connector are installed. - -By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus, you need to create and configure this user in the MySQL server. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -Then configure the connection in `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -You can check the connection using the `isql` utility from the unixODBC installation. - -``` bash -$ isql -v mysqlconn -+---------------------------------------+ -| Connected! | -| | -... -``` - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Table in ClickHouse, retrieving data from the MySQL table: - -``` sql -CREATE TABLE odbc_t -( - `int_id` Int32, - `float_nullable` Nullable(Float32) -) -ENGINE = ODBC('DSN=mysqlconn', 'test', 'test') -``` - -``` sql -SELECT * FROM odbc_t -``` - -``` text -┌─int_id─┬─float_nullable─┐ -│ 1 │ ᴺᵁᴸᴸ │ -└────────┴────────────────┘ -``` - -## See Also {#see-also} - -- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [ODBC table function](../../query_language/table_functions/odbc.md) - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) diff --git a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md b/docs/zh/operations/table_engines/versionedcollapsingmergetree.md deleted file mode 100644 index 29f6d44d748..00000000000 --- a/docs/zh/operations/table_engines/versionedcollapsingmergetree.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -en_copy: true ---- - -# VersionedCollapsingMergeTree {#versionedcollapsingmergetree} - -This engine: - -- Allows quick writing of object states that are continually changing. -- Deletes old object states in the background. This significantly reduces the volume of storage. - -See the section [Collapsing](#table_engines_versionedcollapsingmergetree) for details. - -The engine inherits from [MergeTree](mergetree.md#table_engines-mergetree) and adds the logic for collapsing rows to the algorithm for merging data parts. `VersionedCollapsingMergeTree` serves the same purpose as [CollapsingMergeTree](collapsingmergetree.md) but uses a different collapsing algorithm that allows inserting the data in any order with multiple threads. In particular, the `Version` column helps to collapse the rows properly even if they are inserted in the wrong order. In contrast, `CollapsingMergeTree` allows only strictly consecutive insertion. - -## Creating a Table {#creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE = VersionedCollapsingMergeTree(sign, version) -[PARTITION BY expr] -[ORDER BY expr] -[SAMPLE BY expr] -[SETTINGS name=value, ...] -``` - -For a description of query parameters, see the [query description](../../query_language/create.md). - -**Engine Parameters** - -``` sql -VersionedCollapsingMergeTree(sign, version) -``` - -- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. - - The column data type should be `Int8`. - -- `version` — Name of the column with the version of the object state. - - The column data type should be `UInt*`. - -**Query Clauses** - -When creating a `VersionedCollapsingMergeTree` table, the same [clauses](mergetree.md) are required as when creating a `MergeTree` table. - -
    - -Deprecated Method for Creating a Table - -!!! attention "Attention" - Do not use this method in new projects. If possible, switch the old projects to the method described above. - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] VersionedCollapsingMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, sign, version) -``` - -All of the parameters except `sign` and `version` have the same meaning as in `MergeTree`. - -- `sign` — Name of the column with the type of row: `1` is a “state” row, `-1` is a “cancel” row. - - Column Data Type — `Int8`. - -- `version` — Name of the column with the version of the object state. - - The column data type should be `UInt*`. - -
    - -## Collapsing {#table_engines-versionedcollapsingmergetree} - -### Data {#data} - -Consider a situation where you need to save continually changing data for some object. It is reasonable to have one row for an object and update the row whenever there are changes. However, the update operation is expensive and slow for a DBMS because it requires rewriting the data in the storage. Update is not acceptable if you need to write data quickly, but you can write the changes to an object sequentially as follows. - -Use the `Sign` column when writing the row. If `Sign = 1` it means that the row is a state of an object (let’s call it the “state” row). If `Sign = -1` it indicates the cancellation of the state of an object with the same attributes (let’s call it the “cancel” row). Also use the `Version` column, which should identify each state of an object with a separate number. - -For example, we want to calculate how many pages users visited on some site and how long they were there. At some point in time we write the following row with the state of user activity: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -At some point later we register the change of user activity and write it with the following two rows. - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -The first row cancels the previous state of the object (user). It should copy all of the fields of the canceled state except `Sign`. - -The second row contains the current state. - -Because we need only the last state of user activity, the rows - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -can be deleted, collapsing the invalid (old) state of the object. `VersionedCollapsingMergeTree` does this while merging the data parts. - -To find out why we need two rows for each change, see [Algorithm](#table_engines-versionedcollapsingmergetree-algorithm). - -**Notes on Usage** - -1. The program that writes the data should remember the state of an object in order to cancel it. The “cancel” string should be a copy of the “state” string with the opposite `Sign`. This increases the initial size of storage but allows to write the data quickly. -2. Long growing arrays in columns reduce the efficiency of the engine due to the load for writing. The more straightforward the data, the better the efficiency. -3. `SELECT` results depend strongly on the consistency of the history of object changes. Be accurate when preparing data for inserting. You can get unpredictable results with inconsistent data, such as negative values for non-negative metrics like session depth. - -### Algorithm {#table_engines-versionedcollapsingmergetree-algorithm} - -When ClickHouse merges data parts, it deletes each pair of rows that have the same primary key and version and different `Sign`. The order of rows does not matter. - -When ClickHouse inserts data, it orders rows by the primary key. If the `Version` column is not in the primary key, ClickHouse adds it to the primary key implicitly as the last field and uses it for ordering. - -## Selecting Data {#selecting-data} - -ClickHouse doesn’t guarantee that all of the rows with the same primary key will be in the same resulting data part or even on the same physical server. This is true both for writing the data and for subsequent merging of the data parts. In addition, ClickHouse processes `SELECT` queries with multiple threads, and it cannot predict the order of rows in the result. This means that aggregation is required if there is a need to get completely “collapsed” data from a `VersionedCollapsingMergeTree` table. - -To finalize collapsing, write a query with a `GROUP BY` clause and aggregate functions that account for the sign. For example, to calculate quantity, use `sum(Sign)` instead of `count()`. To calculate the sum of something, use `sum(Sign * x)` instead of `sum(x)`, and add `HAVING sum(Sign) > 0`. - -The aggregates `count`, `sum` and `avg` can be calculated this way. The aggregate `uniq` can be calculated if an object has at least one non-collapsed state. The aggregates `min` and `max` can’t be calculated because `VersionedCollapsingMergeTree` does not save the history of values of collapsed states. - -If you need to extract the data with “collapsing” but without aggregation (for example, to check whether rows are present whose newest values match certain conditions), you can use the `FINAL` modifier for the `FROM` clause. This approach is inefficient and should not be used with large tables. - -## Example of Use {#example-of-use} - -Example data: - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 | -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 | -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 | -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -Creating the table: - -``` sql -CREATE TABLE UAct -( - UserID UInt64, - PageViews UInt8, - Duration UInt8, - Sign Int8, - Version UInt8 -) -ENGINE = VersionedCollapsingMergeTree(Sign, Version) -ORDER BY UserID -``` - -Inserting the data: - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, 1, 1) -``` - -``` sql -INSERT INTO UAct VALUES (4324182021466249494, 5, 146, -1, 1),(4324182021466249494, 6, 185, 1, 2) -``` - -We use two `INSERT` queries to create two different data parts. If we insert the data with a single query, ClickHouse creates one data part and will never perform any merge. - -Getting the data: - -``` sql -SELECT * FROM UAct -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ 1 │ 1 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 5 │ 146 │ -1 │ 1 │ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -What do we see here and where are the collapsed parts? -We created two data parts using two `INSERT` queries. The `SELECT` query was performed in two threads, and the result is a random order of rows. -Collapsing did not occur because the data parts have not been merged yet. ClickHouse merges data parts at an unknown point in time which we cannot predict. - -This is why we need aggregation: - -``` sql -SELECT - UserID, - sum(PageViews * Sign) AS PageViews, - sum(Duration * Sign) AS Duration, - Version -FROM UAct -GROUP BY UserID, Version -HAVING sum(Sign) > 0 -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 2 │ -└─────────────────────┴───────────┴──────────┴─────────┘ -``` - -If we don’t need aggregation and want to force collapsing, we can use the `FINAL` modifier for the `FROM` clause. - -``` sql -SELECT * FROM UAct FINAL -``` - -``` text -┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┬─Version─┐ -│ 4324182021466249494 │ 6 │ 185 │ 1 │ 2 │ -└─────────────────────┴───────────┴──────────┴──────┴─────────┘ -``` - -This is a very inefficient way to select data. Don’t use it for large tables. - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/zh/operations/tips.md b/docs/zh/operations/tips.md index 8980d74e8b2..05509399d85 100644 --- a/docs/zh/operations/tips.md +++ b/docs/zh/operations/tips.md @@ -1,126 +1,127 @@ -# Usage Recommendations {#usage-recommendations} + +# 使用建议 {#usage-recommendations} ## CPU {#cpu} -The SSE 4.2 instruction set must be supported. Modern processors (since 2008) support it. +必须支持SSE4.2指令集。 现代处理器(自2008年以来)支持它。 -When choosing a processor, prefer a large number of cores and slightly slower clock rate over fewer cores and a higher clock rate. -For example, 16 cores with 2600 MHz is better than 8 cores with 3600 MHz. +选择处理器时,与较少的内核和较高的时钟速率相比,更喜欢大量内核和稍慢的时钟速率。 +例如,具有2600MHz的16核心比具有3600MHz的8核心更好。 -## Hyper-threading {#hyper-threading} +## 超线程 {#hyper-threading} -Don’t disable hyper-threading. It helps for some queries, but not for others. +不要禁用超线程。 它有助于某些查询,但不适用于其他查询。 -## Turbo Boost {#turbo-boost} +## 涡轮增压 {#turbo-boost} -Turbo Boost is highly recommended. It significantly improves performance with a typical load. -You can use `turbostat` to view the CPU’s actual clock rate under a load. +强烈推荐涡轮增压。 它显着提高了典型负载的性能。 +您可以使用 `turbostat` 要查看负载下的CPU的实际时钟速率。 -## CPU Scaling Governor {#cpu-scaling-governor} +## CPU缩放调控器 {#cpu-scaling-governor} -Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand. +始终使用 `performance` 缩放调控器。 该 `on-demand` 随着需求的不断增加,缩放调节器的工作要糟糕得多。 ``` bash echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ``` -## CPU Limitations {#cpu-limitations} +## CPU限制 {#cpu-limitations} -Processors can overheat. Use `dmesg` to see if the CPU’s clock rate was limited due to overheating. -The restriction can also be set externally at the datacenter level. You can use `turbostat` to monitor it under a load. +处理器可能会过热。 使用 `dmesg` 看看CPU的时钟速率是否由于过热而受到限制。 +此限制也可以在数据中心级别的外部设置。 您可以使用 `turbostat` 在负载下监视它。 ## RAM {#ram} -For small amounts of data (up to ~200 GB compressed), it is best to use as much memory as the volume of data. -For large amounts of data and when processing interactive (online) queries, you should use a reasonable amount of RAM (128 GB or more) so the hot data subset will fit in the cache of pages. -Even for data volumes of ~50 TB per server, using 128 GB of RAM significantly improves query performance compared to 64 GB. +对于少量数据(高达-200GB压缩),最好使用与数据量一样多的内存。 +对于大量数据和处理交互式(在线)查询时,应使用合理数量的RAM(128GB或更多),以便热数据子集适合页面缓存。 +即使对于每台服务器约50TB的数据量,使用128GB的RAM与64GB相比显着提高了查询性能。 -## Swap File {#swap-file} +## 交换文件 {#swap-file} -Always disable the swap file. The only reason for not doing this is if you are using ClickHouse on your personal laptop. +始终禁用交换文件。 不这样做的唯一原因是,如果您使用的ClickHouse在您的个人笔记本电脑。 -## Huge Pages {#huge-pages} +## 巨大的页面 {#huge-pages} -Always disable transparent huge pages. It interferes with memory allocators, which leads to significant performance degradation. +始终禁用透明巨大的页面。 它会干扰内存分alloc,从而导致显着的性能下降。 ``` bash echo 'never' | sudo tee /sys/kernel/mm/transparent_hugepage/enabled ``` -Use `perf top` to watch the time spent in the kernel for memory management. -Permanent huge pages also do not need to be allocated. +使用 `perf top` 观看内核中用于内存管理的时间。 +永久巨大的页面也不需要被分配。 -## Storage Subsystem {#storage-subsystem} +## 存储子系统 {#storage-subsystem} -If your budget allows you to use SSD, use SSD. -If not, use HDD. SATA HDDs 7200 RPM will do. +如果您的预算允许您使用SSD,请使用SSD。 +如果没有,请使用硬盘。 SATA硬盘7200转就行了。 -Give preference to a lot of servers with local hard drives over a smaller number of servers with attached disk shelves. -But for storing archives with rare queries, shelves will work. +优先选择带有本地硬盘驱动器的大量服务器,而不是带有附加磁盘架的小量服务器。 +但是对于存储具有罕见查询的档案,货架将起作用。 ## RAID {#raid} -When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50. -For Linux, software RAID is better (with `mdadm`). We don’t recommend using LVM. -When creating RAID-10, select the `far` layout. -If your budget allows, choose RAID-10. +当使用硬盘,你可以结合他们的RAID-10,RAID-5,RAID-6或RAID-50。 +对于Linux,软件RAID更好(与 `mdadm`). 我们不建议使用LVM。 +当创建RAID-10,选择 `far` 布局。 +如果您的预算允许,请选择RAID-10。 -If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5. -When using RAID-5, RAID-6 or RAID-50, always increase stripe\_cache\_size, since the default value is usually not the best choice. +如果您有超过4个磁盘,请使用RAID-6(首选)或RAID-50,而不是RAID-5。 +当使用RAID-5、RAID-6或RAID-50时,始终增加stripe\_cache\_size,因为默认值通常不是最佳选择。 ``` bash echo 4096 | sudo tee /sys/block/md2/md/stripe_cache_size ``` -Calculate the exact number from the number of devices and the block size, using the formula: `2 * num_devices * chunk_size_in_bytes / 4096`. +使用以下公式,从设备数量和块大小计算确切数量: `2 * num_devices * chunk_size_in_bytes / 4096`. -A block size of 1025 KB is sufficient for all RAID configurations. -Never set the block size too small or too large. +1025KB的块大小足以满足所有RAID配置。 +切勿将块大小设置得太小或太大。 -You can use RAID-0 on SSD. -Regardless of RAID use, always use replication for data security. +您可以在SSD上使用RAID-0。 +无论使用何种RAID,始终使用复制来保证数据安全。 -Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting. -For HDD, enable the write cache. +使用长队列启用NCQ。 对于HDD,选择CFQ调度程序,对于SSD,选择noop。 不要减少 ‘readahead’ 设置。 +对于HDD,启用写入缓存。 -## File System {#file-system} +## 文件系统 {#file-system} -Ext4 is the most reliable option. Set the mount options `noatime, nobarrier`. -XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse. -Most other file systems should also work fine. File systems with delayed allocation work better. +Ext4是最可靠的选择。 设置挂载选项 `noatime, nobarrier`. +XFS也是合适的,但它还没有经过ClickHouse的彻底测试。 +大多数其他文件系统也应该正常工作。 具有延迟分配的文件系统工作得更好。 -## Linux Kernel {#linux-kernel} +## Linux内核 {#linux-kernel} -Don’t use an outdated Linux kernel. +不要使用过时的Linux内核。 -## Network {#network} +## 网络 {#network} -If you are using IPv6, increase the size of the route cache. -The Linux kernel prior to 3.2 had a multitude of problems with IPv6 implementation. +如果您使用的是IPv6,请增加路由缓存的大小。 +3.2之前的Linux内核在IPv6实现方面遇到了许多问题。 -Use at least a 10 GB network, if possible. 1 Gb will also work, but it will be much worse for patching replicas with tens of terabytes of data, or for processing distributed queries with a large amount of intermediate data. +如果可能的话,至少使用一个10GB的网络。 1Gb也可以工作,但对于使用数十tb的数据修补副本或处理具有大量中间数据的分布式查询,情况会更糟。 -## ZooKeeper {#zookeeper} +## 动物园管理员 {#zookeeper} -You are probably already using ZooKeeper for other purposes. You can use the same installation of ZooKeeper, if it isn’t already overloaded. +您可能已经将ZooKeeper用于其他目的。 您可以使用相同的zookeeper安装,如果它还没有超载。 -It’s best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. +It's best to use a fresh version of ZooKeeper – 3.4.9 or later. The version in stable Linux distributions may be outdated. You should never use manually written scripts to transfer data between different ZooKeeper clusters, because the result will be incorrect for sequential nodes. Never use the «zkcopy» utility for the same reason: https://github.com/ksprojects/zkcopy/issues/15 -If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters. +如果要将现有ZooKeeper集群分为两个,正确的方法是增加其副本的数量,然后将其重新配置为两个独立的集群。 -Do not run ZooKeeper on the same servers as ClickHouse. Because ZooKeeper is very sensitive for latency and ClickHouse may utilize all available system resources. +不要在与ClickHouse相同的服务器上运行ZooKeeper。 由于ZooKeeper对延迟非常敏感,ClickHouse可能会利用所有可用的系统资源。 -With the default settings, ZooKeeper is a time bomb: +使用默认设置,ZooKeeper是一个定时炸弹: -> The ZooKeeper server won’t delete files from old snapshots and logs when using the default configuration (see autopurge), and this is the responsibility of the operator. +> 使用默认配置时,ZooKeeper服务器不会从旧快照和日志中删除文件(请参阅autopurge),这是操作员的责任。 -This bomb must be defused. +必须拆除炸弹 -The ZooKeeper (3.5.1) configuration below is used in the Yandex.Metrica production environment as of May 20, 2017: +下面的ZooKeeper(3.5.1)配置在Yandex中使用。梅地卡生产环境截至2017年5月20日: -zoo.cfg: +动物园cfg: ``` bash # http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html @@ -176,12 +177,12 @@ standaloneEnabled=false dynamicConfigFile=/etc/zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }}/conf/zoo.cfg.dynamic ``` -Java version: +Java版本: Java(TM) SE Runtime Environment (build 1.8.0_25-b17) Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode) -JVM parameters: +JVM参数: ``` bash NAME=zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} @@ -222,7 +223,7 @@ JAVA_OPTS="-Xms{{ '{{' }} cluster.get('xms','128M') {{ '}}' }} \ -XX:+CMSParallelRemarkEnabled" ``` -Salt init: +盐初始化: description "zookeeper-{{ '{{' }} cluster['name'] {{ '}}' }} centralized coordination service" @@ -251,4 +252,4 @@ Salt init: -Dzookeeper.root.logger=${ZOO_LOG4J_PROP} $ZOOMAIN $ZOOCFG end script -[Original article](https://clickhouse.tech/docs/en/operations/tips/) +[原始文章](https://clickhouse.tech/docs/en/operations/tips/) diff --git a/docs/zh/operations/troubleshooting.md b/docs/zh/operations/troubleshooting.md index d48e2b4b7f6..db7bf6c6bb9 100644 --- a/docs/zh/operations/troubleshooting.md +++ b/docs/zh/operations/troubleshooting.md @@ -1,66 +1,69 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: "\u7591\u96BE\u89E3\u7B54" --- -# Troubleshooting {#troubleshooting} +# 疑难解答 {#troubleshooting} -- [Installation](#troubleshooting-installation-errors) -- [Connecting to the server](#troubleshooting-accepts-no-connections) -- [Query processing](#troubleshooting-does-not-process-queries) -- [Efficiency of query processing](#troubleshooting-too-slow) +- [安装方式](#troubleshooting-installation-errors) +- [连接到服务器](#troubleshooting-accepts-no-connections) +- [查询处理](#troubleshooting-does-not-process-queries) +- [查询处理效率](#troubleshooting-too-slow) -## Installation {#troubleshooting-installation-errors} +## 安装方式 {#troubleshooting-installation-errors} -### You Cannot Get Deb Packages from ClickHouse Repository With apt-get {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} +### 您无法使用Apt-get从ClickHouse存储库获取Deb软件包 {#you-cannot-get-deb-packages-from-clickhouse-repository-with-apt-get} -- Check firewall settings. -- If you cannot access the repository for any reason, download packages as described in the [Getting started](../getting_started/index.md) article and install them manually using the `sudo dpkg -i ` command. You will also need the `tzdata` package. +- 检查防火墙设置。 +- 如果出于任何原因无法访问存储库,请按照以下文件中的描述下载软件包 [开始](../getting_started/index.md) 文章并使用手动安装它们 `sudo dpkg -i ` 指挥部 您还需要 `tzdata` 包。 -## Connecting to the Server {#troubleshooting-accepts-no-connections} +## 连接到服务器 {#troubleshooting-accepts-no-connections} -Possible issues: +可能出现的问题: -- The server is not running. -- Unexpected or wrong configuration parameters. +- 服务器未运行。 +- 意外或错误的配置参数。 -### Server Is Not Running {#server-is-not-running} +### 服务器未运行 {#server-is-not-running} -**Check if server is runnnig** +**检查服务器是否运行nnig** -Command: +命令: ``` bash $ sudo service clickhouse-server status ``` -If the server is not running, start it with the command: +如果服务器没有运行,请使用以下命令启动它: ``` bash $ sudo service clickhouse-server start ``` -**Check logs** +**检查日志** -The main log of `clickhouse-server` is in `/var/log/clickhouse-server/clickhouse-server.log` by default. +主日志 `clickhouse-server` 是在 `/var/log/clickhouse-server/clickhouse-server.log` 默认情况下。 -If the server started successfully, you should see the strings: +如果服务器成功启动,您应该看到字符串: - ` Application: starting up.` — Server started. - ` Application: Ready for connections.` — Server is running and ready for connections. -If `clickhouse-server` start failed with a configuration error, you should see the `` string with an error description. For example: +如果 `clickhouse-server` 启动失败与配置错误,你应该看到 `` 具有错误描述的字符串。 例如: ``` text 2019.01.11 15:23:25.549505 [ 45 ] {} ExternalDictionaries: Failed reloading 'event2id' external dictionary: Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused, e.what() = Connection refused ``` -If you don’t see an error at the end of the file, look through the entire file starting from the string: +如果在文件末尾没有看到错误,请从字符串开始查看整个文件: ``` text Application: starting up. ``` -If you try to start a second instance of `clickhouse-server` on the server, you see the following log: +如果您尝试启动第二个实例 `clickhouse-server` 在服务器上,您将看到以下日志: ``` text 2019.01.11 15:25:11.151730 [ 1 ] {} : Starting ClickHouse 19.1.0 with revision 54413 @@ -76,68 +79,68 @@ Revision: 54413 2019.01.11 15:25:11.156716 [ 2 ] {} BaseDaemon: Stop SignalListener thread ``` -**See system.d logs** +**请参阅系统。d日志** -If you don’t find any useful information in `clickhouse-server` logs or there aren’t any logs, you can view `system.d` logs using the command: +如果你没有找到任何有用的信息 `clickhouse-server` 日志或没有任何日志,您可以查看 `system.d` 使用命令记录: ``` bash $ sudo journalctl -u clickhouse-server ``` -**Start clickhouse-server in interactive mode** +**在交互模式下启动clickhouse服务器** ``` bash $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml ``` -This command starts the server as an interactive app with standard parameters of the autostart script. In this mode `clickhouse-server` prints all the event messages in the console. +此命令将服务器作为带有自动启动脚本标准参数的交互式应用程序启动。 在这种模式下 `clickhouse-server` 打印控制台中的所有事件消息。 -### Configuration Parameters {#configuration-parameters} +### 配置参数 {#configuration-parameters} -Check: +检查: -- Docker settings. +- 码头工人设置。 - If you run ClickHouse in Docker in an IPv6 network, make sure that `network=host` is set. + 如果您在IPv6网络中的Docker中运行ClickHouse,请确保 `network=host` 已设置。 -- Endpoint settings. +- 端点设置。 - Check [listen\_host](server_settings/settings.md#server_settings-listen_host) and [tcp\_port](server_settings/settings.md#server_settings-tcp_port) settings. + 检查 [listen\_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) 和 [tcp\_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port) 设置。 - ClickHouse server accepts localhost connections only by default. + ClickHouse服务器默认情况下仅接受本地主机连接。 -- HTTP protocol settings. +- HTTP协议设置。 - Check protocol settings for the HTTP API. + 检查HTTP API的协议设置。 -- Secure connection settings. +- 安全连接设置。 - Check: + 检查: - - The [tcp\_port\_secure](server_settings/settings.md#server_settings-tcp_port_secure) setting. - - Settings for [SSL sertificates](server_settings/settings.md#server_settings-openssl). + - 该 [tcp\_port\_secure](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 设置。 + - 设置 [SSL序列](server_configuration_parameters/settings.md#server_configuration_parameters-openssl). - Use proper parameters while connecting. For example, use the `port_secure` parameter with `clickhouse_client`. + 连接时使用正确的参数。 例如,使用 `port_secure` 参数 `clickhouse_client`. -- User settings. +- 用户设置。 - You might be using the wrong user name or password. + 您可能使用了错误的用户名或密码。 -## Query Processing {#troubleshooting-does-not-process-queries} +## 查询处理 {#troubleshooting-does-not-process-queries} -If ClickHouse is not able to process the query, it sends an error description to the client. In the `clickhouse-client` you get a description of the error in the console. If you are using the HTTP interface, ClickHouse sends the error description in the response body. For example: +如果ClickHouse无法处理查询,它会向客户端发送错误描述。 在 `clickhouse-client` 您可以在控制台中获得错误的描述。 如果您使用的是HTTP接口,ClickHouse会在响应正文中发送错误描述。 例如: ``` bash $ curl 'http://localhost:8123/' --data-binary "SELECT a" Code: 47, e.displayText() = DB::Exception: Unknown identifier: a. Note that there are no tables (FROM clause) in your query, context: required_names: 'a' source_tables: table_aliases: private_aliases: column_aliases: public_columns: 'a' masked_columns: array_join_columns: source_columns: , e.what() = DB::Exception ``` -If you start `clickhouse-client` with the `stack-trace` parameter, ClickHouse returns the server stack trace with the description of an error. +如果你开始 `clickhouse-client` 与 `stack-trace` 参数,ClickHouse返回包含错误描述的服务器堆栈跟踪。 -You might see a message about a broken connection. In this case, you can repeat the query. If the connection breaks every time you perform the query, check the server logs for errors. +您可能会看到一条关于连接中断的消息。 在这种情况下,可以重复查询。 如果每次执行查询时连接中断,请检查服务器日志中是否存在错误。 -## Efficiency of Query Processing {#troubleshooting-too-slow} +## 查询处理效率 {#troubleshooting-too-slow} -If you see that ClickHouse is working too slowly, you need to profile the load on the server resources and network for your queries. +如果您发现ClickHouse工作速度太慢,则需要为查询分析服务器资源和网络的负载。 -You can use the clickhouse-benchmark utility to profile queries. It shows the number of queries processed per second, the number of rows processed per second, and percentiles of query processing times. +您可以使用clickhouse-benchmark实用程序来分析查询。 它显示每秒处理的查询数、每秒处理的行数以及查询处理时间的百分位数。 diff --git a/docs/zh/operations/update.md b/docs/zh/operations/update.md index b09eb707e77..a465a8110eb 100644 --- a/docs/zh/operations/update.md +++ b/docs/zh/operations/update.md @@ -1,10 +1,13 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 47 +toc_title: "\u70B9\u51FB\u66F4\u65B0" --- -# ClickHouse Update {#clickhouse-update} +# 点击更新 {#clickhouse-update} -If ClickHouse was installed from deb packages, execute the following commands on the server: +如果从deb包安装ClickHouse,请在服务器上执行以下命令: ``` bash $ sudo apt-get update @@ -12,6 +15,6 @@ $ sudo apt-get install clickhouse-client clickhouse-server $ sudo service clickhouse-server restart ``` -If you installed ClickHouse using something other than the recommended deb packages, use the appropriate update method. +如果您使用除推荐的deb包之外的其他内容安装ClickHouse,请使用适当的更新方法。 -ClickHouse does not support a distributed update. The operation should be performed consecutively on each separate server. Do not update all the servers on a cluster simultaneously, or the cluster will be unavailable for some time. +ClickHouse不支持分布式更新。 该操作应在每个单独的服务器上连续执行。 不要同时更新群集上的所有服务器,否则群集将在一段时间内不可用。 diff --git a/docs/zh/operations/utils/clickhouse-benchmark.md b/docs/zh/operations/utilities/clickhouse-benchmark.md similarity index 50% rename from docs/zh/operations/utils/clickhouse-benchmark.md rename to docs/zh/operations/utilities/clickhouse-benchmark.md index 1d8ac3dec46..809f4ebe2a1 100644 --- a/docs/zh/operations/utils/clickhouse-benchmark.md +++ b/docs/zh/operations/utilities/clickhouse-benchmark.md @@ -1,63 +1,66 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\uFF82\u6697\uFF6A\uFF82\u6C3E\u73AF\u50AC\uFF82\u56E3" --- -# clickhouse-benchmark {#clickhouse-benchmark} +# ツ暗ェツ氾环催ツ団 {#clickhouse-benchmark} -Connects to a ClickHouse server and repeatedly sends specified queries. +连接到ClickHouse服务器并重复发送指定的查询。 -Syntax: +语法: ``` bash $ echo "single query" | clickhouse-benchmark [keys] ``` -or +或 ``` bash $ clickhouse-benchmark [keys] <<< "single query" ``` -If you want to send a set of queries, create a text file and place each query on the individual string in this file. For example: +如果要发送一组查询,请创建一个文本文件,并将每个查询放在此文件中的单个字符串上。 例如: ``` sql SELECT * FROM system.numbers LIMIT 10000000 SELECT 1 ``` -Then pass this file to a standard input of `clickhouse-benchmark`. +然后将此文件传递给标准输入 `clickhouse-benchmark`. ``` bash clickhouse-benchmark [keys] < queries_file ``` -## Keys {#clickhouse-benchmark-keys} +## 键 {#clickhouse-benchmark-keys} -- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1. +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` 同时发送。 默认值:1。 - `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. -- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys. -- `-p N`, `--port=N` — Server port. Default value: 9000. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-p` keys. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. 为 [比较模式](#clickhouse-benchmark-comparison-mode) 您可以使用多个 `-h` 钥匙 +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [比较模式](#clickhouse-benchmark-comparison-mode) 您可以使用多个 `-p` 钥匙 - `-i N`, `--iterations=N` — Total number of queries. Default value: 0. - `-r`, `--randomize` — Random order of queries execution if there is more then one input query. - `-s`, `--secure` — Using TLS connection. -- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` stops sending queries when the specified time limit is reached. Default value: 0 (time limit disabled). -- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) test to determine whether the two distributions aren’t different with the selected level of confidence. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` 达到指定的时间限制时停止发送查询。 默认值:0(禁用时间限制)。 +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [比较模式](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` 执行 [独立双样本学生的t测试](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) 测试以确定两个分布是否与所选置信水平没有不同。 - `--cumulative` — Printing cumulative data instead of data per interval. - `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` 将报告输出到指定的JSON文件。 - `--user=USERNAME` — ClickHouse user name. Default value: `default`. - `--password=PSWD` — ClickHouse user password. Default value: empty string. -- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions. -- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` 输出异常的堆栈跟踪。 +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` 在指定的阶段。 可能的值: `complete`, `fetch_columns`, `with_mergeable_state`. 默认值: `complete`. - `--help` — Shows the help message. -If you want to apply some [settings](../../operations/settings/index.md) for queries, pass them as a key `--= SETTING_VALUE`. For example, `--max_memory_usage=1048576`. +如果你想申请一些 [设置](../../operations/settings/index.md) 对于查询,请将它们作为键传递 `--= SETTING_VALUE`. 例如, `--max_memory_usage=1048576`. -## Output {#clickhouse-benchmark-output} +## 输出 {#clickhouse-benchmark-output} -By default, `clickhouse-benchmark` reports for each `--delay` interval. +默认情况下, `clickhouse-benchmark` 每个报表 `--delay` 间隔。 -Example of the report: +报告示例: ``` text Queries executed: 10. @@ -80,29 +83,29 @@ localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, resul 99.990% 0.150 sec. ``` -In the report you can find: +在报告中,您可以找到: -- Number of queries in the `Queries executed:` field. +- 在查询的数量 `Queries executed:` 场。 -- Status string containing (in order): +- 状态字符串包含(按顺序): - - Endpoint of ClickHouse server. - - Number of processed queries. - - QPS: QPS: How many queries server performed per second during a period specified in the `--delay` argument. - - RPS: How many rows server read per second during a period specified in the `--delay` argument. - - MiB/s: How many mebibytes server read per second during a period specified in the `--delay` argument. - - result RPS: How many rows placed by server to the result of a query per second during a period specified in the `--delay` argument. - - result MiB/s. How many mebibytes placed by server to the result of a query per second during a period specified in the `--delay` argument. + - ClickHouse服务器的端点。 + - 已处理的查询数。 + - QPS:QPS:在指定的时间段内每秒执行多少个查询服务器 `--delay` 争论。 + - RPS:在指定的时间段内,服务器每秒读取多少行 `--delay` 争论。 + - MiB/s:在指定的时间段内每秒读取多少mebibytes服务器 `--delay` 争论。 + - 结果RPS:在指定的时间段内,服务器每秒放置到查询结果的行数 `--delay` 争论。 + - 结果MiB/s.在指定的时间段内,服务器每秒将多少mebibytes放置到查询结果中 `--delay` 争论。 -- Percentiles of queries execution time. +- 查询执行时间的百分位数。 -## Comparison mode {#clickhouse-benchmark-comparison-mode} +## 比较模式 {#clickhouse-benchmark-comparison-mode} -`clickhouse-benchmark` can compare performances for two running ClickHouse servers. +`clickhouse-benchmark` 可以比较两个正在运行的ClickHouse服务器的性能。 -To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately. +要使用比较模式,请通过以下两对指定两个服务器的端点 `--host`, `--port` 钥匙 键在参数列表中的位置匹配在一起,第一 `--host` 与第一匹配 `--port` 等等。 `clickhouse-benchmark` 建立到两个服务器的连接,然后发送查询。 每个查询寻址到随机选择的服务器。 每个服务器的结果分别显示。 -## Example {#clickhouse-benchmark-example} +## 示例 {#clickhouse-benchmark-example} ``` bash $ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 diff --git a/docs/zh/operations/utils/clickhouse-copier.md b/docs/zh/operations/utilities/clickhouse-copier.md similarity index 76% rename from docs/zh/operations/utils/clickhouse-copier.md rename to docs/zh/operations/utilities/clickhouse-copier.md index 1a1b8599dba..9e982188499 100644 --- a/docs/zh/operations/utils/clickhouse-copier.md +++ b/docs/zh/operations/utilities/clickhouse-copier.md @@ -1,40 +1,41 @@ -# clickhouse-copier {#clickhouse-copier} -Copies data from the tables in one cluster to tables in another (or the same) cluster. +# ツ环板-ョツ嘉ッツ偲 {#clickhouse-copier} -You can run multiple `clickhouse-copier` instances on different servers to perform the same job. ZooKeeper is used for syncing the processes. +将数据从一个群集中的表复制到另一个(或相同)群集中的表。 -After starting, `clickhouse-copier`: +您可以运行多个 `clickhouse-copier` 不同服务器上的实例执行相同的作业。 ZooKeeper用于同步进程。 -- Connects to ZooKeeper and receives: +开始后, `clickhouse-copier`: - - Copying jobs. - - The state of the copying jobs. +- 连接到动物园管理员和接收: -- It performs the jobs. + - 复制作业。 + - 复制作业的状态。 + +- 它执行的工作。 Each running process chooses the "closest" shard of the source cluster and copies the data into the destination cluster, resharding the data if necessary. -`clickhouse-copier` tracks the changes in ZooKeeper and applies them on the fly. +`clickhouse-copier` 跟踪ZooKeeper中的更改,并实时应用它们。 -To reduce network traffic, we recommend running `clickhouse-copier` on the same server where the source data is located. +为了减少网络流量,我们建议运行 `clickhouse-copier` 在源数据所在的同一服务器上。 -## Running clickhouse-copier {#running-clickhouse-copier} +## ツ暗ェツ氾环催ツ団ツ法ツ人 {#running-clickhouse-copier} -The utility should be run manually: +该实用程序应手动运行: ``` bash clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/path --base-dir /path/to/dir ``` -Parameters: +参数: -- `daemon` — Starts `clickhouse-copier` in daemon mode. -- `config` — The path to the `zookeeper.xml` file with the parameters for the connection to ZooKeeper. -- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` processes and storing tasks. Tasks are stored in `$task-path/description`. -- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` creates `clickhouse-copier_YYYYMMHHSS_` subdirectories in `$base-dir`. If this parameter is omitted, the directories are created in the directory where `clickhouse-copier` was launched. +- `daemon` — Starts `clickhouse-copier` 在守护进程模式。 +- `config` — The path to the `zookeeper.xml` 带有连接到ZooKeeper的参数的文件。 +- `task-path` — The path to the ZooKeeper node. This node is used for syncing `clickhouse-copier` 处理和存储任务。 任务存储在 `$task-path/description`. +- `base-dir` — The path to logs and auxiliary files. When it starts, `clickhouse-copier` 创建 `clickhouse-copier_YYYYMMHHSS_` 子目录 `$base-dir`. 如果省略此参数,则在以下目录中创建目录 `clickhouse-copier` 被推出。 -## Format of zookeeper.xml {#format-of-zookeeper-xml} +## 动物园管理员的格式。xml {#format-of-zookeeper-xml} ``` xml @@ -53,7 +54,7 @@ Parameters: ``` -## Configuration of copying tasks {#configuration-of-copying-tasks} +## 复制任务的配置 {#configuration-of-copying-tasks} ``` xml @@ -162,6 +163,6 @@ Parameters: ``` -`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change. +`clickhouse-copier` 跟踪更改 `/task/path/description` 并在飞行中应用它们。 例如,如果你改变的值 `max_workers`,运行任务的进程数也会发生变化。 -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) +[原始文章](https://clickhouse.tech/docs/en/operations/utils/clickhouse-copier/) diff --git a/docs/zh/operations/utils/clickhouse-local.md b/docs/zh/operations/utilities/clickhouse-local.md similarity index 55% rename from docs/zh/operations/utils/clickhouse-local.md rename to docs/zh/operations/utilities/clickhouse-local.md index 159e914f446..e29d8f6c4ac 100644 --- a/docs/zh/operations/utils/clickhouse-local.md +++ b/docs/zh/operations/utilities/clickhouse-local.md @@ -1,41 +1,41 @@ -# clickhouse-local {#clickhouse-local} +# ツ环板-ョツ嘉ッツ偲 {#clickhouse-local} -The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. +该 `clickhouse-local` 程序使您能够对本地文件执行快速处理,而无需部署和配置ClickHouse服务器。 -Accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../query_language/index.md). +接受表示表的数据并使用以下方式查询它们 [ツ环板ECTョツ嘉ッツ偲](../../operations/utilities/clickhouse-local.md). -`clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines. +`clickhouse-local` 使用与ClickHouse server相同的核心,因此它支持大多数功能以及相同的格式和表引擎。 -By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument. +默认情况下 `clickhouse-local` 不能访问同一主机上的数据,但它支持使用以下方式加载服务器配置 `--config-file` 争论。 -!!! warning "Warning" - It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. +!!! warning "警告" + 不建议将生产服务器配置加载到 `clickhouse-local` 因为数据可以在人为错误的情况下被损坏。 -## Usage {#usage} +## 用途 {#usage} -Basic usage: +基本用法: ``` bash clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query" ``` -Arguments: +参数: - `-S`, `--structure` — table structure for input data. -- `-if`, `--input-format` — input format, `TSV` by default. -- `-f`, `--file` — path to data, `stdin` by default. -- `-q` `--query` — queries to execute with `;` as delimeter. -- `-N`, `--table` — table name where to put output data, `table` by default. -- `-of`, `--format`, `--output-format` — output format, `TSV` by default. +- `-if`, `--input-format` — input format, `TSV` 默认情况下。 +- `-f`, `--file` — path to data, `stdin` 默认情况下。 +- `-q` `--query` — queries to execute with `;` 如delimeter。 +- `-N`, `--table` — table name where to put output data, `table` 默认情况下。 +- `-of`, `--format`, `--output-format` — output format, `TSV` 默认情况下。 - `--stacktrace` — whether to dump debug output in case of exception. - `--verbose` — more details on query execution. -- `-s` — disables `stderr` logging. +- `-s` — disables `stderr` 记录。 - `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty. - `--help` — arguments references for `clickhouse-local`. -Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`. +还有每个ClickHouse配置变量的参数,这些变量更常用,而不是 `--config-file`. -## Examples {#examples} +## 例 {#examples} ``` bash echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" @@ -44,7 +44,7 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. 3 4 ``` -Previous example is the same as: +前面的例子是一样的: ``` bash $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" @@ -53,7 +53,7 @@ Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. 3 4 ``` -Now let’s output memory user for each Unix user: +现在让我们为每个Unix用户输出内存用户: ``` bash $ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" @@ -68,4 +68,4 @@ Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. ... ``` -[Original article](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) +[原始文章](https://clickhouse.tech/docs/en/operations/utils/clickhouse-local/) diff --git a/docs/zh/operations/utilities/index.md b/docs/zh/operations/utilities/index.md new file mode 100644 index 00000000000..8d70ef4a6bb --- /dev/null +++ b/docs/zh/operations/utilities/index.md @@ -0,0 +1,8 @@ + +# ツ环板Utilityョツ嘉ッ {#clickhouse-utility} + +- [ツ环板-ョツ嘉ッツ偲](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` 做到这一点。 +- [ツ环板-ョツ嘉ッツ偲](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. +- [ツ暗ェツ氾环催ツ団](clickhouse-benchmark.md) — Loads server with the custom queries and settings. + +[原始文章](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/zh/operations/utils/index.md b/docs/zh/operations/utils/index.md deleted file mode 100644 index ebc1396d031..00000000000 --- a/docs/zh/operations/utils/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# ClickHouse Utility {#clickhouse-utility} - -- [clickhouse-local](clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this. -- [clickhouse-copier](clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. -- [clickhouse-benchmark](clickhouse-benchmark.md) — Loads server with the custom queries and settings. - -[Original article](https://clickhouse.tech/docs/en/operations/utils/) diff --git a/docs/zh/query_language/agg_functions/combinators.md b/docs/zh/query_language/agg_functions/combinators.md deleted file mode 100644 index a173e56fbea..00000000000 --- a/docs/zh/query_language/agg_functions/combinators.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -en_copy: true ---- - -# Aggregate function combinators {#aggregate_functions_combinators} - -The name of an aggregate function can have a suffix appended to it. This changes the way the aggregate function works. - -## -If {#agg-functions-combinator-if} - -The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). - -Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` and so on. - -With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, in Yandex.Metrica, conditional aggregate functions are used to implement the segment comparison functionality. - -## -Array {#agg-functions-combinator-array} - -The -Array suffix can be appended to any aggregate function. In this case, the aggregate function takes arguments of the ‘Array(T)’ type (arrays) instead of ‘T’ type arguments. If the aggregate function accepts multiple arguments, this must be arrays of equal lengths. When processing arrays, the aggregate function works like the original aggregate function across all array elements. - -Example 1: `sumArray(arr)` - Totals all the elements of all ‘arr’ arrays. In this example, it could have been written more simply: `sum(arraySum(arr))`. - -Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ arrays. This could be done an easier way: `uniq(arrayJoin(arr))`, but it’s not always possible to add ‘arrayJoin’ to a query. - --If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array. - -## -State {#agg-functions-combinator-state} - -If you apply this combinator, the aggregate function doesn’t return the resulting value (such as the number of unique values for the [uniq](reference.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. - -To work with these states, use: - -- [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) table engine. -- [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation) function. -- [runningAccumulate](../functions/other_functions.md#function-runningaccumulate) function. -- [-Merge](#aggregate_functions_combinators_merge) combinator. -- [-MergeState](#aggregate_functions_combinators_mergestate) combinator. - -## -Merge {#aggregate_functions_combinators-merge} - -If you apply this combinator, the aggregate function takes the intermediate aggregation state as an argument, combines the states to finish aggregation, and returns the resulting value. - -## -MergeState {#aggregate_functions_combinators-mergestate} - -Merges the intermediate aggregation states in the same way as the -Merge combinator. However, it doesn’t return the resulting value, but an intermediate aggregation state, similar to the -State combinator. - -## -ForEach {#agg-functions-combinator-foreach} - -Converts an aggregate function for tables into an aggregate function for arrays that aggregates the corresponding array items and returns an array of results. For example, `sumForEach` for the arrays `[1, 2]`, `[3, 4, 5]`and`[6, 7]`returns the result `[10, 13, 5]` after adding together the corresponding array items. - -## -OrDefault {#agg-functions-combinator-ordefault} - -Fills the default value of the aggregate function’s return type if there is nothing to aggregate. - -``` sql -SELECT avg(number), avgOrDefault(number) FROM numbers(0) -``` - -``` text -┌─avg(number)─┬─avgOrDefault(number)─┐ -│ nan │ 0 │ -└─────────────┴──────────────────────┘ -``` - -## -OrNull {#agg-functions-combinator-ornull} - -Fills `null` if there is nothing to aggregate. The return column will be nullable. - -``` sql -SELECT avg(number), avgOrNull(number) FROM numbers(0) -``` - -``` text -┌─avg(number)─┬─avgOrNull(number)─┐ -│ nan │ ᴺᵁᴸᴸ │ -└─────────────┴───────────────────┘ -``` - --OrDefault and -OrNull can be combined with other combinators. It is useful when the aggregate function does not accept the empty input. - -``` sql -SELECT avgOrNullIf(x, x > 10) -FROM -( - SELECT toDecimal32(1.23, 2) AS x -) -``` - -``` text -┌─avgOrNullIf(x, greater(x, 10))─┐ -│ ᴺᵁᴸᴸ │ -└────────────────────────────────┘ -``` - -## -Resample {#agg-functions-combinator-resample} - -Lets you divide data into groups, and then separately aggregates the data in those groups. Groups are created by splitting the values from one column into intervals. - -``` sql -Resample(start, end, step)(, resampling_key) -``` - -**Parameters** - -- `start` — Starting value of the whole required interval for `resampling_key` values. -- `stop` — Ending value of the whole required interval for `resampling_key` values. The whole interval doesn’t include the `stop` value `[start, stop)`. -- `step` — Step for separating the whole interval into subintervals. The `aggFunction` is executed over each of those subintervals independently. -- `resampling_key` — Column whose values are used for separating data into intervals. -- `aggFunction_params` — `aggFunction` parameters. - -**Returned values** - -- Array of `aggFunction` results for each subinterval. - -**Example** - -Consider the `people` table with the following data: - -``` text -┌─name───┬─age─┬─wage─┐ -│ John │ 16 │ 10 │ -│ Alice │ 30 │ 15 │ -│ Mary │ 35 │ 8 │ -│ Evelyn │ 48 │ 11.5 │ -│ David │ 62 │ 9.9 │ -│ Brian │ 60 │ 16 │ -└────────┴─────┴──────┘ -``` - -Let’s get the names of the people whose age lies in the intervals of `[30,60)` and `[60,75)`. Since we use integer representation for age, we get ages in the `[30, 59]` and `[60,74]` intervals. - -To aggregate names in an array, we use the [groupArray](reference.md#agg_function-grouparray) aggregate function. It takes one argument. In our case, it’s the `name` column. The `groupArrayResample` function should use the `age` column to aggregate names by age. To define the required intervals, we pass the `30, 75, 30` arguments into the `groupArrayResample` function. - -``` sql -SELECT groupArrayResample(30, 75, 30)(name, age) FROM people -``` - -``` text -┌─groupArrayResample(30, 75, 30)(name, age)─────┐ -│ [['Alice','Mary','Evelyn'],['David','Brian']] │ -└───────────────────────────────────────────────┘ -``` - -Consider the results. - -`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals. - -Now let’s count the total number of people and their average wage in the specified age intervals. - -``` sql -SELECT - countResample(30, 75, 30)(name, age) AS amount, - avgResample(30, 75, 30)(wage, age) AS avg_wage -FROM people -``` - -``` text -┌─amount─┬─avg_wage──────────────────┐ -│ [3,2] │ [11.5,12.949999809265137] │ -└────────┴───────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/zh/query_language/agg_functions/index.md b/docs/zh/query_language/agg_functions/index.md deleted file mode 100644 index c439ddb1e6a..00000000000 --- a/docs/zh/query_language/agg_functions/index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -en_copy: true ---- - -# Aggregate functions {#aggregate-functions} - -Aggregate functions work in the [normal](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) way as expected by database experts. - -ClickHouse also supports: - -- [Parametric aggregate functions](parametric_functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns. -- [Combinators](combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions. - -## NULL processing {#null-processing} - -During aggregation, all `NULL`s are skipped. - -**Examples:** - -Consider this table: - -``` text -┌─x─┬────y─┐ -│ 1 │ 2 │ -│ 2 │ ᴺᵁᴸᴸ │ -│ 3 │ 2 │ -│ 3 │ 3 │ -│ 3 │ ᴺᵁᴸᴸ │ -└───┴──────┘ -``` - -Let’s say you need to total the values in the `y` column: - -``` sql -SELECT sum(y) FROM t_null_big -``` - - ┌─sum(y)─┐ - │ 7 │ - └────────┘ - -The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`. - -Now you can use the `groupArray` function to create an array from the `y` column: - -``` sql -SELECT groupArray(y) FROM t_null_big -``` - -``` text -┌─groupArray(y)─┐ -│ [2,2,3] │ -└───────────────┘ -``` - -`groupArray` does not include `NULL` in the resulting array. - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/zh/query_language/agg_functions/reference.md b/docs/zh/query_language/agg_functions/reference.md deleted file mode 100644 index 31de8bf1226..00000000000 --- a/docs/zh/query_language/agg_functions/reference.md +++ /dev/null @@ -1,1834 +0,0 @@ ---- -en_copy: true ---- - -# Function Reference {#function-reference} - -## count {#agg_function-count} - -Counts the number of rows or not-NULL values. - -ClickHouse supports the following syntaxes for `count`: -- `count(expr)` or `COUNT(DISTINCT expr)`. -- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific. - -**Parameters** - -The function can take: - -- Zero parameters. -- One [expression](../syntax.md#syntax-expressions). - -**Returned value** - -- If the function is called without parameters it counts the number of rows. -- If the [expression](../syntax.md#syntax-expressions) is passed, then the function counts how many times this expression returned not null. If the expression returns a [Nullable](../../data_types/nullable.md)-type value, then the result of `count` stays not `Nullable`. The function returns 0 if the expression returned `NULL` for all the rows. - -In both cases the type of the returned value is [UInt64](../../data_types/int_uint.md). - -**Details** - -ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](#agg_function-uniqexact) function. - -The `SELECT count() FROM table` query is not optimized, because the number of entries in the table is not stored separately. It chooses a small column from the table and counts the number of values in it. - -**Examples** - -Example 1: - -``` sql -SELECT count() FROM t -``` - -``` text -┌─count()─┐ -│ 5 │ -└─────────┘ -``` - -Example 2: - -``` sql -SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' -``` - -``` text -┌─name──────────────────────────┬─value─────┐ -│ count_distinct_implementation │ uniqExact │ -└───────────────────────────────┴───────────┘ -``` - -``` sql -SELECT count(DISTINCT num) FROM t -``` - -``` text -┌─uniqExact(num)─┐ -│ 3 │ -└────────────────┘ -``` - -This example shows that `count(DISTINCT num)` is performed by the `uniqExact` function according to the `count_distinct_implementation` setting value. - -## any(x) {#agg_function-any} - -Selects the first encountered value. -The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate. -To get a determinate result, you can use the ‘min’ or ‘max’ function instead of ‘any’. - -In some cases, you can rely on the order of execution. This applies to cases when SELECT comes from a subquery that uses ORDER BY. - -When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function. - -## anyHeavy(x) {#anyheavyx} - -Selects a frequently occurring value using the [heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf) algorithm. If there is a value that occurs more than in half the cases in each of the query’s execution threads, this value is returned. Normally, the result is nondeterministic. - -``` sql -anyHeavy(column) -``` - -**Arguments** - -- `column` – The column name. - -**Example** - -Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select any frequently occurring value in the `AirlineID` column. - -``` sql -SELECT anyHeavy(AirlineID) AS res -FROM ontime -``` - -``` text -┌───res─┐ -│ 19690 │ -└───────┘ -``` - -## anyLast(x) {#anylastx} - -Selects the last value encountered. -The result is just as indeterminate as for the `any` function. - -## groupBitAnd {#groupbitand} - -Applies bitwise `AND` for series of numbers. - -``` sql -groupBitAnd(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitAnd(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -00000100 = 4 -``` - -## groupBitOr {#groupbitor} - -Applies bitwise `OR` for series of numbers. - -``` sql -groupBitOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitOr(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -01111101 = 125 -``` - -## groupBitXor {#groupbitxor} - -Applies bitwise `XOR` for series of numbers. - -``` sql -groupBitXor(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt*` type. - -**Example** - -Test data: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Query: - -``` sql -SELECT groupBitXor(num) FROM t -``` - -Where `num` is the column with the test data. - -Result: - -``` text -binary decimal -01101000 = 104 -``` - -## groupBitmap {#groupbitmap} - -Bitmap or Aggregate calculations from a unsigned integer column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmap(expr) -``` - -**Parameters** - -`expr` – An expression that results in `UInt*` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -Test data: - -``` text -UserID -1 -1 -2 -3 -``` - -Query: - -``` sql -SELECT groupBitmap(UserID) as num FROM t -``` - -Result: - -``` text -num -3 -``` - -## min(x) {#agg_function-min} - -Calculates the minimum. - -## max(x) {#agg_function-max} - -Calculates the maximum. - -## argMin(arg, val) {#agg-function-argmin} - -Calculates the ‘arg’ value for a minimal ‘val’ value. If there are several different values of ‘arg’ for minimal values of ‘val’, the first of these values encountered is output. - -**Example:** - -``` text -┌─user─────┬─salary─┐ -│ director │ 5000 │ -│ manager │ 3000 │ -│ worker │ 1000 │ -└──────────┴────────┘ -``` - -``` sql -SELECT argMin(user, salary) FROM salary -``` - -``` text -┌─argMin(user, salary)─┐ -│ worker │ -└──────────────────────┘ -``` - -## argMax(arg, val) {#agg-function-argmax} - -Calculates the ‘arg’ value for a maximum ‘val’ value. If there are several different values of ‘arg’ for maximum values of ‘val’, the first of these values encountered is output. - -## sum(x) {#agg_function-sum} - -Calculates the sum. -Only works for numbers. - -## sumWithOverflow(x) {#sumwithoverflowx} - -Computes the sum of the numbers, using the same data type for the result as for the input parameters. If the sum exceeds the maximum value for this data type, the function returns an error. - -Only works for numbers. - -## sumMap(key, value) {#agg_functions-summap} - -Totals the ‘value’ array according to the keys specified in the ‘key’ array. -The number of elements in ‘key’ and ‘value’ must be the same for each row that is totaled. -Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. - -Example: - -``` sql -CREATE TABLE sum_map( - date Date, - timeslot DateTime, - statusMap Nested( - status UInt16, - requests UInt64 - ) -) ENGINE = Log; -INSERT INTO sum_map VALUES - ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); -SELECT - timeslot, - sumMap(statusMap.status, statusMap.requests) -FROM sum_map -GROUP BY timeslot -``` - -``` text -┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ -│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ -│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ -└─────────────────────┴──────────────────────────────────────────────┘ -``` - -## skewPop {#skewpop} - -Computes the [skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. - -``` sql -skewPop(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The skewness of the given distribution. Type — [Float64](../../data_types/float.md) - -**Example** - -``` sql -SELECT skewPop(value) FROM series_with_value_column -``` - -## skewSamp {#skewsamp} - -Computes the [sample skewness](https://en.wikipedia.org/wiki/Skewness) of a sequence. - -It represents an unbiased estimate of the skewness of a random variable if passed values form its sample. - -``` sql -skewSamp(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The skewness of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is the size of the sample), then the function returns `nan`. - -**Example** - -``` sql -SELECT skewSamp(value) FROM series_with_value_column -``` - -## kurtPop {#kurtpop} - -Computes the [kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. - -``` sql -kurtPop(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md) - -**Example** - -``` sql -SELECT kurtPop(value) FROM series_with_value_column -``` - -## kurtSamp {#kurtsamp} - -Computes the [sample kurtosis](https://en.wikipedia.org/wiki/Kurtosis) of a sequence. - -It represents an unbiased estimate of the kurtosis of a random variable if passed values form its sample. - -``` sql -kurtSamp(expr) -``` - -**Parameters** - -`expr` — [Expression](../syntax.md#syntax-expressions) returning a number. - -**Returned value** - -The kurtosis of the given distribution. Type — [Float64](../../data_types/float.md). If `n <= 1` (`n` is a size of the sample), then the function returns `nan`. - -**Example** - -``` sql -SELECT kurtSamp(value) FROM series_with_value_column -``` - -## timeSeriesGroupSum(uid, timestamp, value) {#agg-function-timeseriesgroupsum} - -`timeSeriesGroupSum` can aggregate different time series that sample timestamp not alignment. -It will use linear interpolation between two sample timestamp and then sum time-series together. - -- `uid` is the time series unique id, `UInt64`. -- `timestamp` is Int64 type in order to support millisecond or microsecond. -- `value` is the metric. - -The function returns array of tuples with `(timestamp, aggregated_value)` pairs. - -Before using this function make sure `timestamp` is in ascending order. - -Example: - -``` text -┌─uid─┬─timestamp─┬─value─┐ -│ 1 │ 2 │ 0.2 │ -│ 1 │ 7 │ 0.7 │ -│ 1 │ 12 │ 1.2 │ -│ 1 │ 17 │ 1.7 │ -│ 1 │ 25 │ 2.5 │ -│ 2 │ 3 │ 0.6 │ -│ 2 │ 8 │ 1.6 │ -│ 2 │ 12 │ 2.4 │ -│ 2 │ 18 │ 3.6 │ -│ 2 │ 24 │ 4.8 │ -└─────┴───────────┴───────┘ -``` - -``` sql -CREATE TABLE time_series( - uid UInt64, - timestamp Int64, - value Float64 -) ENGINE = Memory; -INSERT INTO time_series VALUES - (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), - (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); - -SELECT timeSeriesGroupSum(uid, timestamp, value) -FROM ( - SELECT * FROM time_series order by timestamp ASC -); -``` - -And the result will be: - -``` text -[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] -``` - -## timeSeriesGroupRateSum(uid, ts, val) {#agg-function-timeseriesgroupratesum} - -Similarly timeSeriesGroupRateSum, timeSeriesGroupRateSum will Calculate the rate of time-series and then sum rates together. -Also, timestamp should be in ascend order before use this function. - -Use this function, the result above case will be: - -``` text -[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] -``` - -## avg(x) {#agg_function-avg} - -Calculates the average. -Only works for numbers. -The result is always Float64. - -## uniq {#agg_function-uniq} - -Calculates the approximate number of different values of the argument. - -``` sql -uniq(x[, ...]) -``` - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**Returned value** - -- A [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash for all parameters in the aggregate, then uses it in calculations. - -- Uses an adaptive sampling algorithm. For the calculation state, the function uses a sample of element hash values up to 65536. - - This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. - -- Provides the result deterministically (it doesn’t depend on the query processing order). - -We recommend using this function in almost all scenarios. - -**See Also** - -- [uniqCombined](#agg_function-uniqcombined) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined {#agg_function-uniqcombined} - -Calculates the approximate number of different argument values. - -``` sql -uniqCombined(HLL_precision)(x[, ...]) -``` - -The `uniqCombined` function is a good choice for calculating the number of different values. - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -`HLL_precision` is the base-2 logarithm of the number of cells in [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Optional, you can use the function as `uniqCombined(x[, ...])`. The default value for `HLL_precision` is 17, which is effectively 96 KiB of space (2^17 cells, 6 bits each). - -**Returned value** - -- A number [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash (64-bit hash for `String` and 32-bit otherwise) for all parameters in the aggregate, then uses it in calculations. - -- Uses a combination of three algorithms: array, hash table, and HyperLogLog with an error correction table. - - For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. - -- Provides the result deterministically (it doesn’t depend on the query processing order). - -!!! note "Note" - Since it uses 32-bit hash for non-`String` type, the result will have very high error for cardinalities significantly larger than `UINT_MAX` (error will raise quickly after a few tens of billions of distinct values), hence in this case you should use [uniqCombined64](#agg_function-uniqcombined64) - -Compared to the [uniq](#agg_function-uniq) function, the `uniqCombined`: - -- Consumes several times less memory. -- Calculates with several times higher accuracy. -- Usually has slightly lower performance. In some scenarios, `uniqCombined` can perform better than `uniq`, for example, with distributed queries that transmit a large number of aggregation states over the network. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined64 {#agg_function-uniqcombined64} - -Same as [uniqCombined](#agg_function-uniqcombined), but uses 64-bit hash for all data types. - -## uniqHLL12 {#agg_function-uniqhll12} - -Calculates the approximate number of different argument values, using the [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algorithm. - -``` sql -uniqHLL12(x[, ...]) -``` - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**Returned value** - -- A [UInt64](../../data_types/int_uint.md)-type number. - -**Implementation details** - -Function: - -- Calculates a hash for all parameters in the aggregate, then uses it in calculations. - -- Uses the HyperLogLog algorithm to approximate the number of different argument values. - - 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). - -- Provides the determinate result (it doesn’t depend on the query processing order). - -We don’t recommend using this function. In most cases, use the [uniq](#agg_function-uniq) or [uniqCombined](#agg_function-uniqcombined) function. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqExact](#agg_function-uniqexact) - -## uniqExact {#agg_function-uniqexact} - -Calculates the exact number of different argument values. - -``` sql -uniqExact(x[, ...]) -``` - -Use the `uniqExact` function if you absolutely need an exact result. Otherwise use the [uniq](#agg_function-uniq) function. - -The `uniqExact` function uses more memory than `uniq`, because the size of the state has unbounded growth as the number of different values increases. - -**Parameters** - -The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types. - -**See Also** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqHLL12](#agg_function-uniqhll12) - -## groupArray(x), groupArray(max\_size)(x) {#agg_function-grouparray} - -Creates an array of argument values. -Values can be added to the array in any (indeterminate) order. - -The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. -For example, `groupArray (1) (x)` is equivalent to `[any (x)]`. - -In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`. - -## groupArrayInsertAt(value, position) {#grouparrayinsertatvalue-position} - -Inserts a value into the array in the specified position. - -!!! note "Note" - This function uses zero-based positions, contrary to the conventional one-based positions for SQL arrays. - -Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. - -Optional parameters: - -- The default value for substituting in empty positions. -- The length of the resulting array. This allows you to receive arrays of the same size for all the aggregate keys. When using this parameter, the default value must be specified. - -## groupArrayMovingSum {#agg_function-grouparraymovingsum} - -Calculates the moving sum of input values. - -``` sql -groupArrayMovingSum(numbers_for_summing) -groupArrayMovingSum(window_size)(numbers_for_summing) -``` - -The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. - -**Parameters** - -- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. -- `window_size` — Size of the calculation window. - -**Returned values** - -- Array of the same size and type as the input data. - -**Example** - -The sample table: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -The queries: - -``` sql -SELECT - groupArrayMovingSum(int) AS I, - groupArrayMovingSum(float) AS F, - groupArrayMovingSum(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingSum(2)(int) AS I, - groupArrayMovingSum(2)(float) AS F, - groupArrayMovingSum(2)(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -## groupArrayMovingAvg {#agg_function-grouparraymovingavg} - -Calculates the moving average of input values. - -``` sql -groupArrayMovingAvg(numbers_for_summing) -groupArrayMovingAvg(window_size)(numbers_for_summing) -``` - -The function can take the window size as a parameter. If left unspecified, the function takes the window size equal to the number of rows in the column. - -**Parameters** - -- `numbers_for_summing` — [Expression](../syntax.md#syntax-expressions) resulting in a numeric data type value. -- `window_size` — Size of the calculation window. - -**Returned values** - -- Array of the same size and type as the input data. - -The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). It truncates the decimal places insignificant for the resulting data type. - -**Example** - -The sample table `b`: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -The queries: - -``` sql -SELECT - groupArrayMovingAvg(int) AS I, - groupArrayMovingAvg(float) AS F, - groupArrayMovingAvg(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ -│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ -└───────────┴─────────────────────────────────────┴───────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingAvg(2)(int) AS I, - groupArrayMovingAvg(2)(float) AS F, - groupArrayMovingAvg(2)(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ -│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ -└───────────┴──────────────────────────────────┴───────────────────────┘ -``` - -## groupUniqArray(x), groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} - -Creates an array from different argument values. Memory consumption is the same as for the `uniqExact` function. - -The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. -For example, `groupUniqArray(1)(x)` is equivalent to `[any(x)]`. - -## quantile {#quantile} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and a random number generator for sampling. The result is non-deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantile(level)(expr) -``` - -Alias: `median`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT quantile(val) FROM t -``` - -Result: - -``` text -┌─quantile(val)─┐ -│ 1.5 │ -└───────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileDeterministic {#quantiledeterministic} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -This function applies [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with a reservoir size up to 8192 and deterministic algorithm of sampling. The result is deterministic. To get an exact quantile, use the [quantileExact](#quantileexact) function. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileDeterministic(level)(expr, determinator) -``` - -Alias: `medianDeterministic`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT quantileDeterministic(val, 1) FROM t -``` - -Result: - -``` text -┌─quantileDeterministic(val, 1)─┐ -│ 1.5 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExact {#quantileexact} - -Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` memory, where `n` is a number of values that were passed. However, for a small number of values, the function is very effective. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileExact(level)(expr) -``` - -Alias: `medianExact`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileExact(number) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileExact(number)─┐ -│ 5 │ -└───────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExactWeighted {#quantileexactweighted} - -Exactly computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence, taking into account the weight of each element. - -To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). You can use this function instead of `quantileExact` and specify the weight 1. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileExactWeighted(level)(expr, weight) -``` - -Alias: `medianExactWeighted`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. - -**Returned value** - -- Quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Input table: - -``` text -┌─n─┬─val─┐ -│ 0 │ 3 │ -│ 1 │ 2 │ -│ 2 │ 1 │ -│ 5 │ 4 │ -└───┴─────┘ -``` - -Query: - -``` sql -SELECT quantileExactWeighted(n, val) FROM t -``` - -Result: - -``` text -┌─quantileExactWeighted(n, val)─┐ -│ 1 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTiming {#quantiletiming} - -With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence. - -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTiming(level)(expr) -``` - -Alias: `medianTiming`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -**Accuracy** - -The calculation is accurate if: - -- Total number of values doesn’t exceed 5670. -- Total number of values exceeds 5670, but the page loading time is less than 1024ms. - -Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. - -!!! note "Note" - For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). - -**Returned value** - -- Quantile of the specified level. - -Type: `Float32`. - -!!! note "Note" - If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. - -**Example** - -Input table: - -``` text -┌─response_time─┐ -│ 72 │ -│ 112 │ -│ 126 │ -│ 145 │ -│ 104 │ -│ 242 │ -│ 313 │ -│ 168 │ -│ 108 │ -└───────────────┘ -``` - -Query: - -``` sql -SELECT quantileTiming(response_time) FROM t -``` - -Result: - -``` text -┌─quantileTiming(response_time)─┐ -│ 126 │ -└───────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTimingWeighted {#quantiletimingweighted} - -With the determined precision computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence according to the weight of each sequence member. - -The result is deterministic (it doesn’t depend on the query processing order). The function is optimized for working with sequences which describe distributions like loading web pages times or backend response times. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTimingWeighted(level)(expr, weight) -``` - -Alias: `medianTimingWeighted`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). - -- `expr` — [Expression](../syntax.md#syntax-expressions) over a column values returning a [Float\*](../../data_types/float.md)-type number. - - - If negative values are passed to the function, the behavior is undefined. - - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. - -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Accuracy** - -The calculation is accurate if: - -- Total number of values doesn’t exceed 5670. -- Total number of values exceeds 5670, but the page loading time is less than 1024ms. - -Otherwise, the result of the calculation is rounded to the nearest multiple of 16 ms. - -!!! note "Note" - For calculating page loading time quantiles, this function is more effective and accurate than [quantile](#quantile). - -**Returned value** - -- Quantile of the specified level. - -Type: `Float32`. - -!!! note "Note" - If no values are passed to the function (when using `quantileTimingIf`), [NaN](../../data_types/float.md#data_type-float-nan-inf) is returned. The purpose of this is to differentiate these cases from cases that result in zero. See [ORDER BY clause](../select.md#select-order-by) for notes on sorting `NaN` values. - -**Example** - -Input table: - -``` text -┌─response_time─┬─weight─┐ -│ 68 │ 1 │ -│ 104 │ 2 │ -│ 112 │ 3 │ -│ 126 │ 2 │ -│ 138 │ 1 │ -│ 162 │ 1 │ -└───────────────┴────────┘ -``` - -Query: - -``` sql -SELECT quantileTimingWeighted(response_time, weight) FROM t -``` - -Result: - -``` text -┌─quantileTimingWeighted(response_time, weight)─┐ -│ 112 │ -└───────────────────────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigest {#quantiletdigest} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. - -The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic. - -The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTDigest(level)(expr) -``` - -Alias: `medianTDigest`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileTDigest(number) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileTDigest(number)─┐ -│ 4.5 │ -└─────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigestWeighted {#quantiletdigestweighted} - -Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm. The function takes into account the weight of each sequence member. The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. - -The performance of the function is lower than performance of [quantile](#quantile) or [quantileTiming](#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`. - -The result depends on the order of running the query, and is nondeterministic. - -When using multiple `quantile*` functions with different levels in a query, the internal states are not combined (that is, the query works less efficiently than it could). In this case, use the [quantiles](#quantiles) function. - -**Syntax** - -``` sql -quantileTDigest(level)(expr) -``` - -Alias: `medianTDigest`. - -**Parameters** - -- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` value in the range of `[0.01, 0.99]`. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median). -- `expr` — Expression over the column values resulting in numeric [data types](../../data_types/index.md#data_types), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). -- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. - -**Returned value** - -- Approximate quantile of the specified level. - -Type: - -- [Float64](../../data_types/float.md) for numeric data type input. -- [Date](../../data_types/date.md) if input values have the `Date` type. -- [DateTime](../../data_types/datetime.md) if input values have the `DateTime` type. - -**Example** - -Query: - -``` sql -SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) -``` - -Result: - -``` text -┌─quantileTDigestWeighted(number, 1)─┐ -│ 4.5 │ -└────────────────────────────────────┘ -``` - -**See Also** - -- [median](#median) -- [quantiles](#quantiles) - -## median {#median} - -The `median*` functions are the aliases for the corresponding `quantile*` functions. They calculate median of a numeric data sample. - -Functions: - -- `median` — Alias for [quantile](#quantile). -- `medianDeterministic` — Alias for [quantileDeterministic](#quantiledeterministic). -- `medianExact` — Alias for [quantileExact](#quantileexact). -- `medianExactWeighted` — Alias for [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — Alias for [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — Alias for [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). - -**Example** - -Input table: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Query: - -``` sql -SELECT medianDeterministic(val, 1) FROM t -``` - -Result: - -``` text -┌─medianDeterministic(val, 1)─┐ -│ 1.5 │ -└─────────────────────────────┘ -``` - -## quantiles(level1, level2, …)(x) {#quantiles} - -All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. - -## varSamp(x) {#varsampx} - -Calculates the amount `Σ((x - x̅)^2) / (n - 1)`, where `n` is the sample size and `x̅`is the average value of `x`. - -It represents an unbiased estimate of the variance of a random variable if passed values form its sample. - -Returns `Float64`. When `n <= 1`, returns `+∞`. - -## varPop(x) {#varpopx} - -Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`. - -In other words, dispersion for a set of values. Returns `Float64`. - -## stddevSamp(x) {#stddevsampx} - -The result is equal to the square root of `varSamp(x)`. - -## stddevPop(x) {#stddevpopx} - -The result is equal to the square root of `varPop(x)`. - -## topK(N)(x) {#topknx} - -Returns an array of the approximately most frequent values in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). - -Implements the [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) algorithm for analyzing TopK, based on the reduce-and-combine algorithm from [Parallel Space Saving](https://arxiv.org/pdf/1401.0702.pdf). - -``` sql -topK(N)(column) -``` - -This function doesn’t provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. - -We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. - -**Parameters** - -- ‘N’ is the number of elements to return. - -If the parameter is omitted, default value 10 is used. - -**Arguments** - -- ’ x ’ – The value to calculate frequency. - -**Example** - -Take the [OnTime](../../getting_started/example_datasets/ontime.md) data set and select the three most frequently occurring values in the `AirlineID` column. - -``` sql -SELECT topK(3)(AirlineID) AS res -FROM ontime -``` - -``` text -┌─res─────────────────┐ -│ [19393,19790,19805] │ -└─────────────────────┘ -``` - -## topKWeighted {#topkweighted} - -Similar to `topK` but takes one additional argument of integer type - `weight`. Every value is accounted `weight` times for frequency calculation. - -**Syntax** - -``` sql -topKWeighted(N)(x, weight) -``` - -**Parameters** - -- `N` — The number of elements to return. - -**Arguments** - -- `x` – The value. -- `weight` — The weight. [UInt8](../../data_types/int_uint.md). - -**Returned value** - -Returns an array of the values with maximum approximate sum of weights. - -**Example** - -Query: - -``` sql -SELECT topKWeighted(10)(number, number) FROM numbers(1000) -``` - -Result: - -``` text -┌─topKWeighted(10)(number, number)──────────┐ -│ [999,998,997,996,995,994,993,992,991,990] │ -└───────────────────────────────────────────┘ -``` - -## covarSamp(x, y) {#covarsampx-y} - -Calculates the value of `Σ((x - x̅)(y - y̅)) / (n - 1)`. - -Returns Float64. When `n <= 1`, returns +∞. - -## covarPop(x, y) {#covarpopx-y} - -Calculates the value of `Σ((x - x̅)(y - y̅)) / n`. - -## corr(x, y) {#corrx-y} - -Calculates the Pearson correlation coefficient: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. - -## categoricalInformationValue {#categoricalinformationvalue} - -Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category. - -``` sql -categoricalInformationValue(category1, category2, ..., tag) -``` - -The result indicates how a discrete (categorical) feature `[category1, category2, ...]` contribute to a learning model which predicting the value of `tag`. - -## simpleLinearRegression {#simplelinearregression} - -Performs simple (unidimensional) linear regression. - -``` sql -simpleLinearRegression(x, y) -``` - -Parameters: - -- `x` — Column with dependent variable values. -- `y` — Column with explanatory variable values. - -Returned values: - -Constants `(a, b)` of the resulting line `y = a*x + b`. - -**Examples** - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ -│ (1,0) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ -│ (1,3) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## stochasticLinearRegression {#agg_functions-stochasticlinearregression} - -This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). - -### Parameters {#agg_functions-stochasticlinearregression-parameters} - -There are 4 customizable parameters. They are passed to the function sequentially, but there is no need to pass all four - default values will be used, however good model required some parameter tuning. - -``` text -stochasticLinearRegression(1.0, 1.0, 10, 'SGD') -``` - -1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`. -2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`. -3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`. -4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. - -### Usage {#agg_functions-stochasticlinearregression-usage} - -`stochasticLinearRegression` is used in two steps: fitting the model and predicting on new data. In order to fit the model and save its state for later usage we use `-State` combinator, which basically saves the state (model weights, etc). -To predict we use function [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod), which takes a state as an argument as well as features to predict on. - - - -**1.** Fitting - -Such query may be used. - -``` sql -CREATE TABLE IF NOT EXISTS train_data -( - param1 Float64, - param2 Float64, - target Float64 -) ENGINE = Memory; - -CREATE TABLE your_model ENGINE = Memory AS SELECT -stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) -AS state FROM train_data; -``` - -Here we also need to insert data into `train_data` table. The number of parameters is not fixed, it depends only on number of arguments, passed into `linearRegressionState`. They all must be numeric values. -Note that the column with target value(which we would like to learn to predict) is inserted as the first argument. - -**2.** Predicting - -After saving a state into the table, we may use it multiple times for prediction, or even merge with other states and create new even better models. - -``` sql -WITH (SELECT state FROM your_model) AS model SELECT -evalMLMethod(model, param1, param2) FROM test_data -``` - -The query will return a column of predicted values. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. - -`test_data` is a table like `train_data` but may not contain target value. - -### Notes {#agg_functions-stochasticlinearregression-notes} - -1. To merge two models user may create such query: - `sql SELECT state1 + state2 FROM your_models` - where `your_models` table contains both models. This query will return new `AggregateFunctionState` object. - -2. User may fetch weights of the created model for its own purposes without saving the model if no `-State` combinator is used. - `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` - Such query will fit the model and return its weights - first are weights, which correspond to the parameters of the model, the last one is bias. So in the example above the query will return a column with 3 values. - -**See Also** - -- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) -- [Difference between linear and logistic regressions](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} - -This function implements stochastic logistic regression. It can be used for binary classification problem, supports the same custom parameters as stochasticLinearRegression and works the same way. - -### Parameters {#agg_functions-stochasticlogisticregression-parameters} - -Parameters are exactly the same as in stochasticLinearRegression: -`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. -For more information see [parameters](#agg_functions-stochasticlinearregression-parameters). - -``` text -stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') -``` - -1. Fitting - - - - See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. - - Predicted labels have to be in \[-1, 1\]. - -1. Predicting - - - - Using saved state we can predict probability of object having label `1`. - - ``` sql - WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) FROM test_data - ``` - - The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. - - We can also set a bound of probability, which assigns elements to different labels. - - ``` sql - SELECT ans < 1.1 AND ans > 0.5 FROM - (WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) AS ans FROM test_data) - ``` - - Then the result will be labels. - - `test_data` is a table like `train_data` but may not contain target value. - -**See Also** - -- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) -- [Difference between linear and logistic regressions.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## groupBitmapAnd {#groupbitmapand} - -Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmapAnd(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapAnd(z)─┐ -│ 3 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ -│ [6,8,10] │ -└──────────────────────────────────────────────────┘ -``` - -## groupBitmapOr {#groupbitmapor} - -Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). This is equivalent to `groupBitmapMerge`. - -``` sql -groupBitmapOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapOr(z)─┐ -│ 15 │ -└──────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ -│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ -└─────────────────────────────────────────────────┘ -``` - -## groupBitmapXor {#groupbitmapxor} - -Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../functions/bitmap_functions.md). - -``` sql -groupBitmapOr(expr) -``` - -**Parameters** - -`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. - -**Return value** - -Value of the `UInt64` type. - -**Example** - -``` sql -DROP TABLE IF EXISTS bitmap_column_expr_test2; -CREATE TABLE bitmap_column_expr_test2 -( - tag_id String, - z AggregateFunction(groupBitmap, UInt32) -) -ENGINE = MergeTree -ORDER BY tag_id; - -INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); -INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); - -SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─groupBitmapXor(z)─┐ -│ 10 │ -└───────────────────┘ - -SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); -┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ -│ [1,3,5,6,8,10,11,13,14,15] │ -└──────────────────────────────────────────────────┘ -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/zh/query_language/alter.md b/docs/zh/query_language/alter.md deleted file mode 100644 index a2b05037315..00000000000 --- a/docs/zh/query_language/alter.md +++ /dev/null @@ -1,502 +0,0 @@ ---- -en_copy: true ---- - -## ALTER {#query_language_queries_alter} - -The `ALTER` query is only supported for `*MergeTree` tables, as well as `Merge`and`Distributed`. The query has several variations. - -### Column Manipulations {#column-manipulations} - -Changing the table structure. - -``` sql -ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... -``` - -In the query, specify a list of one or more comma-separated actions. -Each action is an operation on a column. - -The following actions are supported: - -- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. -- [DROP COLUMN](#alter_drop-column) — Deletes the column. -- [CLEAR COLUMN](#alter_clear-column) — Resets column values. -- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. -- [MODIFY COLUMN](#alter_modify-column) — Changes column’s type, default expression and TTL. - -These actions are described in detail below. - -#### ADD COLUMN {#alter_add-column} - -``` sql -ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] -``` - -Adds a new column to the table with the specified `name`, `type`, [`codec`](create.md#codecs) and `default_expr` (see the section [Default expressions](create.md#create-default-values)). - -If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. Otherwise, the column is added to the end of the table. Note that there is no way to add a column to the beginning of a table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. - -Adding a column just changes the table structure, without performing any actions with data. The data doesn’t appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../operations/table_engines/mergetree.md)). - -This approach allows us to complete the `ALTER` query instantly, without increasing the volume of old data. - -Example: - -``` sql -ALTER TABLE visits ADD COLUMN browser String AFTER user_id -``` - -#### DROP COLUMN {#alter_drop-column} - -``` sql -DROP COLUMN [IF EXISTS] name -``` - -Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. - -Example: - -``` sql -ALTER TABLE visits DROP COLUMN browser -``` - -#### CLEAR COLUMN {#alter_clear-column} - -``` sql -CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name -``` - -Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Example: - -``` sql -ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() -``` - -#### COMMENT COLUMN {#alter_comment-column} - -``` sql -COMMENT COLUMN [IF EXISTS] name 'comment' -``` - -Adds a comment to the column. If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment. - -Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](misc.md#misc-describe-table) query. - -Example: - -``` sql -ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' -``` - -#### MODIFY COLUMN {#alter_modify-column} - -``` sql -MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] -``` - -This query changes the `name` column properties: - -- Type - -- Default expression - -- TTL - - For examples of columns TTL modifying, see [Column TTL](../operations/table_engines/mergetree.md#mergetree-column-ttl). - -If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist. - -When changing the type, values are converted as if the [toType](functions/type_conversion_functions.md) functions were applied to them. If only the default expression is changed, the query doesn’t do anything complex, and is completed almost instantly. - -Example: - -``` sql -ALTER TABLE visits MODIFY COLUMN browser Array(String) -``` - -Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. - -There are several processing stages: - -- Preparing temporary (new) files with modified data. -- Renaming old files. -- Renaming the temporary (new) files to the old names. -- Deleting the old files. - -Only the first stage takes time. If there is a failure at this stage, the data is not changed. -If there is a failure during one of the successive stages, data can be restored manually. The exception is if the old files were deleted from the file system but the data for the new files did not get written to the disk and was lost. - -The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously. - -#### ALTER Query Limitations {#alter-query-limitations} - -The `ALTER` query lets you create and delete separate elements (columns) in nested data structures, but not whole nested data structures. To add a nested data structure, you can add columns with a name like `name.nested_name` and the type `Array(T)`. A nested data structure is equivalent to multiple array columns with a name that has the same prefix before the dot. - -There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`). - -If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](insert_into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](misc.md#misc_operations-rename) query and delete the old table. You can use the [clickhouse-copier](../operations/utils/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. - -The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. - -For tables that don’t store data themselves (such as `Merge` and `Distributed`), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. - -### Manipulations With Key Expressions {#manipulations-with-key-expressions} - -The following command is supported: - -``` sql -MODIFY ORDER BY new_expression -``` - -It only works for tables in the [`MergeTree`](../operations/table_engines/mergetree.md) family (including -[replicated](../operations/table_engines/replication.md) tables). The command changes the -[sorting key](../operations/table_engines/mergetree.md) of the table -to `new_expression` (an expression or a tuple of expressions). Primary key remains the same. - -The command is lightweight in a sense that it only changes metadata. To keep the property that data part -rows are ordered by the sorting key expression you cannot add expressions containing existing columns -to the sorting key (only columns added by the `ADD COLUMN` command in the same `ALTER` query). - -### Manipulations With Data Skipping Indices {#manipulations-with-data-skipping-indices} - -It only works for tables in the [`*MergeTree`](../operations/table_engines/mergetree.md) family (including -[replicated](../operations/table_engines/replication.md) tables). The following operations -are available: - -- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` - Adds index description to tables metadata. - -- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. - -These commands are lightweight in a sense that they only change metadata or remove files. -Also, they are replicated (syncing indices metadata through ZooKeeper). - -### Manipulations with constraints {#manipulations-with-constraints} - -See more on [constraints](create.md#constraints) - -Constraints could be added or deleted using following syntax: - -``` sql -ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; -ALTER TABLE [db].name DROP CONSTRAINT constraint_name; -``` - -Queries will add or remove metadata about constraints from table so they are processed immediately. - -Constraint check *will not be executed* on existing data if it was added. - -All changes on replicated tables are broadcasting to ZooKeeper so will be applied on other replicas. - -### Manipulations With Partitions and Parts {#alter_manipulations-with-partitions} - -The following operations with [partitions](../operations/table_engines/custom_partitioning_key.md) are available: - -- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` directory and forget it. -- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. -- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` directory to the table. -- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another. -- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. -- [REPLACE PARTITION](#alter_replace-partition) - Copies the data partition from one table to another and replaces. -- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition) - Move the data partition from one table to another. -- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) - Resets the value of a specified column in a partition. -- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) - Resets the specified secondary index in a partition. -- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. -- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. -- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. - - - -#### DETACH PARTITION {\#alter\_detach-partition} {#detach-partition-alter-detach-partition} - -``` sql -ALTER TABLE table_name DETACH PARTITION partition_expr -``` - -Moves all data for the specified partition to the `detached` directory. The server forgets about the detached data partition as if it does not exist. The server will not know about this data until you make the [ATTACH](#alter_attach-partition) query. - -Example: - -``` sql -ALTER TABLE visits DETACH PARTITION 201901 -``` - -Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it. - -This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../operations/system_tables.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replica. - -#### DROP PARTITION {#alter_drop-partition} - -``` sql -ALTER TABLE table_name DROP PARTITION partition_expr -``` - -Deletes the specified partition from the table. This query tags the partition as inactive and deletes data completely, approximately in 10 minutes. - -Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -The query is replicated – it deletes data on all replicas. - -#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} - -``` sql -ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr -``` - -Removes the specified part or all parts of the specified partition from `detached`. -Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -#### ATTACH PARTITION\|PART {#alter_attach-partition} - -``` sql -ALTER TABLE table_name ATTACH PARTITION|PART partition_expr -``` - -Adds data to the table from the `detached` directory. It is possible to add data for an entire partition or for a separate part. Examples: - -``` sql -ALTER TABLE visits ATTACH PARTITION 201901; -ALTER TABLE visits ATTACH PART 201901_2_2_0; -``` - -Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -This query is replicated. The replica-initiator checks whether there is data in the `detached` directory. If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table. All other replicas download the data from the replica-initiator. - -So you can put data to the `detached` directory on one replica, and use the `ALTER ... ATTACH` query to add it to the table on all replicas. - -#### ATTACH PARTITION FROM {#alter_attach-partition-from} - -``` sql -ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 -``` - -This query copies the data partition from the `table1` to `table2` adds data to exsisting in the `table2`. Note that data won’t be deleted from `table1`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. - -#### REPLACE PARTITION {#alter_replace-partition} - -``` sql -ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 -``` - -This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. Note that data won’t be deleted from `table1`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. - -#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} - -``` sql -ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest -``` - -This query move the data partition from the `table_source` to `table_dest` with deleting the data from `table_source`. - -For the query to run successfully, the following conditions must be met: - -- Both tables must have the same structure. -- Both tables must have the same partition key. -- Both tables must be the same engine family. (replicated or non-replicated) -- Both tables must have the same storage policy. - -#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} - -``` sql -ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr -``` - -Resets all values in the specified column in a partition. If the `DEFAULT` clause was determined when creating a table, this query sets the column value to a specified default value. - -Example: - -``` sql -ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 -``` - -#### FREEZE PARTITION {#alter_freeze-partition} - -``` sql -ALTER TABLE table_name FREEZE [PARTITION partition_expr] -``` - -This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once. - -!!! note "Note" - The entire backup process is performed without stopping the server. - -Note that for old-styled tables you can specify the prefix of the partition name (for example, ‘2019’) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr). - -At the time of execution, for a data snapshot, the query creates hardlinks to a table data. Hardlinks are placed in the directory `/var/lib/clickhouse/shadow/N/...`, where: - -- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config. -- `N` is the incremental number of the backup. - -!!! note "Note" - If you use [a set of disks for data storage in a table](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression. - -The same structure of directories is created inside the backup as inside `/var/lib/clickhouse/`. The query performs ‘chmod’ for all files, forbidding writing into them. - -After creating the backup, you can copy the data from `/var/lib/clickhouse/shadow/` to the remote server and then delete it from the local server. Note that the `ALTER t FREEZE PARTITION` query is not replicated. It creates a local backup only on the local server. - -The query creates backup almost instantly (but first it waits for the current queries to the corresponding table to finish running). - -`ALTER TABLE t FREEZE PARTITION` copies only the data, not table metadata. To make a backup of table metadata, copy the file `/var/lib/clickhouse/metadata/database/table.sql` - -To restore data from a backup, do the following: - -1. Create the table if it does not exist. To view the query, use the .sql file (replace `ATTACH` in it with `CREATE`). -2. Copy the data from the `data/database/table/` directory inside the backup to the `/var/lib/clickhouse/data/database/table/detached/` directory. -3. Run `ALTER TABLE t ATTACH PARTITION` queries to add the data to a table. - -Restoring from a backup doesn’t require stopping the server. - -For more information about backups and restoring data, see the [Data Backup](../operations/backup.md) section. - -#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} - -``` sql -ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr -``` - -The query works similar to `CLEAR COLUMN`, but it resets an index instead of a column data. - -#### FETCH PARTITION {#alter_fetch-partition} - -``` sql -ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' -``` - -Downloads a partition from another server. This query only works for the replicated tables. - -The query does the following: - -1. Downloads the partition from the specified shard. In ‘path-in-zookeeper’ you must specify a path to the shard in ZooKeeper. -2. Then the query puts the downloaded data to the `detached` directory of the `table_name` table. Use the [ATTACH PARTITION\|PART](#alter_attach-partition) query to add the data to the table. - -For example: - -``` sql -ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; -ALTER TABLE users ATTACH PARTITION 201902; -``` - -Note that: - -- The `ALTER ... FETCH PARTITION` query isn’t replicated. It places the partition to the `detached` directory only on the local server. -- The `ALTER TABLE ... ATTACH` query is replicated. It adds the data to all replicas. The data is added to one of the replicas from the `detached` directory, and to the others - from neighboring replicas. - -Before downloading, the system checks if the partition exists and the table structure matches. The most appropriate replica is selected automatically from the healthy replicas. - -Although the query is called `ALTER TABLE`, it does not change the table structure and does not immediately change the data available in the table. - -#### MOVE PARTITION\|PART {#alter_move-partition} - -Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). - -``` sql -ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' -``` - -The `ALTER TABLE t MOVE` query: - -- Not replicated, because different replicas can have different storage policies. -- Returns an error if the specified disk or volume is not configured. Query also returns an error if conditions of data moving, that specified in the storage policy, can’t be applied. -- Can return an error in the case, when data to be moved is already moved by a background process, concurrent `ALTER TABLE t MOVE` query or as a result of background data merging. A user shouldn’t perform any additional actions in this case. - -Example: - -``` sql -ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' -ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' -``` - -#### How To Set Partition Expression {#alter-how-to-specify-part-expr} - -You can specify the partition expression in `ALTER ... PARTITION` queries in different ways: - -- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`. -- As the expression from the table column. Constants and constant expressions are supported. For example, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. -- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached\_parts](../operations/system_tables.md#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. - -Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed. - -For old-style tables, you can specify the partition either as a number `201901` or a string `'201901'`. The syntax for the new-style tables is stricter with types (similar to the parser for the VALUES input format). - -All the rules above are also true for the [OPTIMIZE](misc.md#misc_operations-optimize) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example: - -``` sql -OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; -``` - -The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). - -### Manipulations with Table TTL {#manipulations-with-table-ttl} - -You can change [table TTL](../operations/table_engines/mergetree.md#mergetree-table-ttl) with a request of the following form: - -``` sql -ALTER TABLE table-name MODIFY TTL ttl-expression -``` - -### Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} - -For non-replicatable tables, all `ALTER` queries are performed synchronously. For replicatable tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. - -For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. -Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. - -### Mutations {#alter-mutations} - -Mutations are an ALTER query variant that allows changing or deleting rows in a table. In contrast to standard `UPDATE` and `DELETE` queries that are intended for point data changes, mutations are intended for heavy operations that change a lot of rows in a table. Supported for the `MergeTree` family of table engines including the engines with replication support. - -Existing tables are ready for mutations as-is (no conversion necessary), but after the first mutation is applied to a table, its metadata format becomes incompatible with previous server versions and falling back to a previous version becomes impossible. - -Currently available commands: - -``` sql -ALTER TABLE [db.]table DELETE WHERE filter_expr -``` - -The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value. - -``` sql -ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr -``` - -The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported. - -``` sql -ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name -``` - -The query rebuilds the secondary index `name` in the partition `partition_name`. - -One query can contain several commands separated by commas. - -For \*MergeTree tables mutations execute by rewriting whole data parts. There is no atomicity - parts are substituted for mutated parts as soon as they are ready and a `SELECT` query that started executing during a mutation will see data from parts that have already been mutated along with data from parts that have not been mutated yet. - -Mutations are totally ordered by their creation order and are applied to each part in that order. Mutations are also partially ordered with INSERTs - data that was inserted into the table before the mutation was submitted will be mutated and data that was inserted after that will not be mutated. Note that mutations do not block INSERTs in any way. - -A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for nonreplicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](misc.md#kill-mutation) query. - -Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. - -[Original article](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/zh/query_language/dicts/external_dicts.md b/docs/zh/query_language/dicts/external_dicts.md deleted file mode 100644 index ef41a48f95f..00000000000 --- a/docs/zh/query_language/dicts/external_dicts.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -en_copy: true ---- - -# External Dictionaries {#dicts-external-dicts} - -You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](external_dicts_dict_sources.md)”. - -ClickHouse: - -- Fully or partially stores dictionaries in RAM. -- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically. -- Allows to create external dictionaries with xml files or [DDL queries](../create.md#create-dictionary-query). - -The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries\_config](../../operations/server_settings/settings.md#server_settings-dictionaries_config) parameter. - -Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries\_lazy\_load](../../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load) setting. - -The dictionary configuration file has the following format: - -``` xml - - An optional element with any content. Ignored by the ClickHouse server. - - - /etc/metrika.xml - - - - - - - - -``` - -You can [configure](external_dicts_dict.md) any number of dictionaries in the same file. - -[DDL queries for dictionaries](../create.md#create-dictionary-query) doesn’t require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views. - -!!! attention "Attention" - You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../functions/other_functions.md) function). This functionality is not related to external dictionaries. - -## See also {#ext-dicts-see-also} - -- [Configuring an External Dictionary](external_dicts_dict.md) -- [Storing Dictionaries in Memory](external_dicts_dict_layout.md) -- [Dictionary Updates](external_dicts_dict_lifetime.md) -- [Sources of External Dictionaries](external_dicts_dict_sources.md) -- [Dictionary Key and Fields](external_dicts_dict_structure.md) -- [Functions for Working with External Dictionaries](../functions/ext_dict_functions.md) - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict.md b/docs/zh/query_language/dicts/external_dicts_dict.md deleted file mode 100644 index 0519cd381f4..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -en_copy: true ---- - -# Configuring an External Dictionary {#dicts-external-dicts-dict} - -If dictionary is configured using xml file, than dictionary configuration has the following structure: - -``` xml - - dict_name - - - - - - - - - - - - - - - - - -``` - -Corresponding [DDL-query](../create.md#create-dictionary-query) has the following structure: - -``` sql -CREATE DICTIONARY dict_name -( - ... -- attributes -) -PRIMARY KEY ... -- complex or single key configuration -SOURCE(...) -- Source configuration -LAYOUT(...) -- Memory layout configuration -LIFETIME(...) -- Lifetime of dictionary in memory -``` - -- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. -- [source](external_dicts_dict_sources.md) — Source of the dictionary. -- [layout](external_dicts_dict_layout.md) — Dictionary layout in memory. -- [structure](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. -- [lifetime](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md deleted file mode 100644 index 1a1232f95cd..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_hierarchical.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -en_copy: true ---- - -# Hierarchical Dictionaries {#hierarchical-dictionaries} - -ClickHouse supports hierarchical dictionaries with a [numeric key](external_dicts_dict_structure.md#ext_dict-numeric-key). - -Look at the following hierarchical structure: - -``` text -0 (Common parent) -│ -├── 1 (Russia) -│ │ -│ └── 2 (Moscow) -│ │ -│ └── 3 (Center) -│ -└── 4 (Great Britain) - │ - └── 5 (London) -``` - -This hierarchy can be expressed as the following dictionary table. - -| region\_id | parent\_region | region\_name | -|------------|----------------|---------------| -| 1 | 0 | Russia | -| 2 | 1 | Moscow | -| 3 | 2 | Center | -| 4 | 0 | Great Britain | -| 5 | 4 | London | - -This table contains a column `parent_region` that contains the key of the nearest parent for the element. - -ClickHouse supports the [hierarchical](external_dicts_dict_structure.md#hierarchical-dict-attr) property for [external dictionary](index.md) attributes. This property allows you to configure the hierarchical dictionary similar to described above. - -The [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) function allows you to get the parent chain of an element. - -For our example, the structure of dictionary can be the following: - -``` xml - - - - region_id - - - - parent_region - UInt64 - 0 - true - - - - region_name - String - - - - - -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_layout.md b/docs/zh/query_language/dicts/external_dicts_dict_layout.md deleted file mode 100644 index c6aa101da46..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_layout.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -en_copy: true ---- - -# Storing Dictionaries in Memory {#dicts-external-dicts-dict-layout} - -There are a variety of ways to store dictionaries in memory. - -We recommend [flat](#flat), [hashed](#dicts-external_dicts_dict_layout-hashed) and [complex\_key\_hashed](#complex-key-hashed). which provide optimal processing speed. - -Caching is not recommended because of potentially poor performance and difficulties in selecting optimal parameters. Read more in the section “[cache](#cache)”. - -There are several ways to improve dictionary performance: - -- Call the function for working with the dictionary after `GROUP BY`. -- Mark attributes to extract as injective. An attribute is called injective if different attribute values correspond to different keys. So when `GROUP BY` uses a function that fetches an attribute value by the key, this function is automatically taken out of `GROUP BY`. - -ClickHouse generates an exception for errors with dictionaries. Examples of errors: - -- The dictionary being accessed could not be loaded. -- Error querying a `cached` dictionary. - -You can view the list of external dictionaries and their statuses in the `system.dictionaries` table. - -The configuration looks like this: - -``` xml - - - ... - - - - - - ... - - -``` - -Corresponding [DDL-query](../create.md#create-dictionary-query): - -``` sql -CREATE DICTIONARY (...) -... -LAYOUT(LAYOUT_TYPE(param value)) -- layout settings -... -``` - -## Ways to Store Dictionaries in Memory {#ways-to-store-dictionaries-in-memory} - -- [flat](#flat) -- [hashed](#dicts-external_dicts_dict_layout-hashed) -- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) -- [cache](#cache) -- [range\_hashed](#range-hashed) -- [complex\_key\_hashed](#complex-key-hashed) -- [complex\_key\_cache](#complex-key-cache) -- [ip\_trie](#ip-trie) - -### flat {#flat} - -The dictionary is completely stored in memory in the form of flat arrays. How much memory does the dictionary use? The amount is proportional to the size of the largest key (in space used). - -The dictionary key has the `UInt64` type and the value is limited to 500,000. If a larger key is discovered when creating the dictionary, ClickHouse throws an exception and does not create the dictionary. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -This method provides the best performance among all available methods of storing the dictionary. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(FLAT()) -``` - -### hashed {#dicts-external_dicts_dict_layout-hashed} - -The dictionary is completely stored in memory in the form of a hash table. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items. - -All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety. - -Configuration example: - -``` xml - - - -``` - -or - -``` sql -LAYOUT(HASHED()) -``` - -### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} - -Similar to `hashed`, but uses less memory in favor more CPU usage. - -Configuration example: - -``` xml - - - -``` - -``` sql -LAYOUT(SPARSE_HASHED()) -``` - -### complex\_key\_hashed {#complex-key-hashed} - -This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `hashed`. - -Configuration example: - -``` xml - - - -``` - -``` sql -LAYOUT(COMPLEX_KEY_HASHED()) -``` - -### range\_hashed {#range-hashed} - -The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values. - -This storage method works the same way as hashed and allows using date/time (arbitrary numeric type) ranges in addition to the key. - -Example: The table contains discounts for each advertiser in the format: - -``` text -+---------------|---------------------|-------------------|--------+ -| advertiser id | discount start date | discount end date | amount | -+===============+=====================+===================+========+ -| 123 | 2015-01-01 | 2015-01-15 | 0.15 | -+---------------|---------------------|-------------------|--------+ -| 123 | 2015-01-16 | 2015-01-31 | 0.25 | -+---------------|---------------------|-------------------|--------+ -| 456 | 2015-01-01 | 2015-01-15 | 0.05 | -+---------------|---------------------|-------------------|--------+ -``` - -To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](external_dicts_dict_structure.md). These elements must contain elements `name` and`type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). - -Example: - -``` xml - - - Id - - - first - Date - - - last - Date - - ... -``` - -or - -``` sql -CREATE DICTIONARY somedict ( - id UInt64, - first Date, - last Date -) -PRIMARY KEY id -LAYOUT(RANGE_HASHED()) -RANGE(MIN first MAX last) -``` - -To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected: - -``` sql -dictGetT('dict_name', 'attr_name', id, date) -``` - -This function returns the value for the specified `id`s and the date range that includes the passed date. - -Details of the algorithm: - -- If the `id` is not found or a range is not found for the `id`, it returns the default value for the dictionary. -- If there are overlapping ranges, you can use any. -- If the range delimiter is `NULL` or an invalid date (such as 1900-01-01 or 2039-01-01), the range is left open. The range can be open on both sides. - -Configuration example: - -``` xml - - - - ... - - - - - - - - Abcdef - - - StartTimeStamp - UInt64 - - - EndTimeStamp - UInt64 - - - XXXType - String - - - - - - -``` - -or - -``` sql -CREATE DICTIONARY somedict( - Abcdef UInt64, - StartTimeStamp UInt64, - EndTimeStamp UInt64, - XXXType String DEFAULT '' -) -PRIMARY KEY Abcdef -RANGE(MIN StartTimeStamp MAX EndTimeStamp) -``` - -### cache {#cache} - -The dictionary is stored in a cache that has a fixed number of cells. These cells contain frequently used elements. - -When searching for a dictionary, the cache is searched first. For each block of data, all keys that are not found in the cache or are outdated are requested from the source using `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. The received data is then written to the cache. - -For cache dictionaries, the expiration [lifetime](external_dicts_dict_lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used, and it is re-requested the next time it needs to be used. -This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the `system.dictionaries` table. - -To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally. - -Supported [sources](external_dicts_dict_sources.md): MySQL, ClickHouse, executable, HTTP. - -Example of settings: - -``` xml - - - - 1000000000 - - -``` - -or - -``` sql -LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) -``` - -Set a large enough cache size. You need to experiment to select the number of cells: - -1. Set some value. -2. Run queries until the cache is completely full. -3. Assess memory consumption using the `system.dictionaries` table. -4. Increase or decrease the number of cells until the required memory consumption is reached. - -!!! warning "Warning" - Do not use ClickHouse as a source, because it is slow to process queries with random reads. - -### complex\_key\_cache {#complex-key-cache} - -This type of storage is for use with composite [keys](external_dicts_dict_structure.md). Similar to `cache`. - -### ip\_trie {#ip-trie} - -This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. - -Example: The table contains network prefixes and their corresponding AS number and country code: - -``` text - +-----------------|-------|--------+ - | prefix | asn | cca2 | - +=================+=======+========+ - | 202.79.32.0/20 | 17501 | NP | - +-----------------|-------|--------+ - | 2620:0:870::/48 | 3856 | US | - +-----------------|-------|--------+ - | 2a02:6b8:1::/48 | 13238 | RU | - +-----------------|-------|--------+ - | 2001:db8::/32 | 65536 | ZZ | - +-----------------|-------|--------+ -``` - -When using this type of layout, the structure must have a composite key. - -Example: - -``` xml - - - - prefix - String - - - - asn - UInt32 - - - - cca2 - String - ?? - - ... -``` - -or - -``` sql -CREATE DICTIONARY somedict ( - prefix String, - asn UInt32, - cca2 String DEFAULT '??' -) -PRIMARY KEY prefix -``` - -The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet. - -For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys: - -``` sql -dictGetT('dict_name', 'attr_name', tuple(ip)) -``` - -The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6: - -``` sql -dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) -``` - -Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. - -Data is stored in a `trie`. It must completely fit into RAM. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md b/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md deleted file mode 100644 index 3a90e437681..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_lifetime.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -en_copy: true ---- - -# Dictionary Updates {#dictionary-updates} - -ClickHouse periodically updates the dictionaries. The update interval for fully downloaded dictionaries and the invalidation interval for cached dictionaries are defined in the `` tag in seconds. - -Dictionary updates (other than loading for first use) do not block queries. During updates, the old version of a dictionary is used. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -Example of settings: - -``` xml - - ... - 300 - ... - -``` - -``` sql -CREATE DICTIONARY (...) -... -LIFETIME(300) -... -``` - -Setting `0` (`LIFETIME(0)`) prevents dictionaries from updating. - -You can set a time interval for upgrades, and ClickHouse will choose a uniformly random time within this range. This is necessary in order to distribute the load on the dictionary source when upgrading on a large number of servers. - -Example of settings: - -``` xml - - ... - - 300 - 360 - - ... - -``` - -or - -``` sql -LIFETIME(MIN 300 MAX 360) -``` - -When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](external_dicts_dict_sources.md): - -- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated. -- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query. -- Dictionaries from other sources are updated every time by default. - -For MySQL (InnoDB), ODBC and ClickHouse sources, you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: - -- The dictionary table must have a field that always changes when the source data is updated. -- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](external_dicts_dict_sources.md). - -Example of settings: - -``` xml - - ... - - ... - SELECT update_time FROM dictionary_source where id = 1 - - ... - -``` - -or - -``` sql -... -SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) -... -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_structure.md b/docs/zh/query_language/dicts/external_dicts_dict_structure.md deleted file mode 100644 index acb0ce36875..00000000000 --- a/docs/zh/query_language/dicts/external_dicts_dict_structure.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -en_copy: true ---- - -# Dictionary Key and Fields {#dictionary-key-and-fields} - -The `` clause describes the dictionary key and fields available for queries. - -XML description: - -``` xml - - - - Id - - - - - - - ... - - - -``` - -Attributes are described in the elements: - -- `` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key). -- `` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -DDL query: - -``` sql -CREATE DICTIONARY dict_name ( - Id UInt64, - -- attributes -) -PRIMARY KEY Id -... -``` - -Attributes are described in the query body: - -- `PRIMARY KEY` — [Key column](external_dicts_dict_structure.md#ext_dict_structure-key) -- `AttrName AttrType` — [Data column](external_dicts_dict_structure.md#ext_dict_structure-attributes). There can be a multiple number of attributes. - -## Key {#ext_dict_structure-key} - -ClickHouse supports the following types of keys: - -- Numeric key. `UInt64`. Defined in the `` tag or using `PRIMARY KEY` keyword. -- Composite key. Set of values of different types. Defined in the tag `` or `PRIMARY KEY` keyword. - -An xml structure can contain either `` or ``. DDL-query must contain single `PRIMARY KEY`. - -!!! warning "Warning" - You must not describe key as an attribute. - -### Numeric Key {#ext_dict-numeric-key} - -Type: `UInt64`. - -Configuration example: - -``` xml - - Id - -``` - -Configuration fields: - -- `name` – The name of the column with keys. - -For DDL-query: - -``` sql -CREATE DICTIONARY ( - Id UInt64, - ... -) -PRIMARY KEY Id -... -``` - -- `PRIMARY KEY` – The name of the column with keys. - -### Composite Key {#composite-key} - -The key can be a `tuple` from any types of fields. The [layout](external_dicts_dict_layout.md) in this case must be `complex_key_hashed` or `complex_key_cache`. - -!!! tip "Tip" - A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. - -The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](external_dicts_dict_structure.md). Example: - -``` xml - - - - field1 - String - - - field2 - UInt32 - - ... - -... -``` - -or - -``` sql -CREATE DICTIONARY ( - field1 String, - field2 String - ... -) -PRIMARY KEY field1, field2 -... -``` - -For a query to the `dictGet*` function, a tuple is passed as the key. Example: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. - -## Attributes {#ext_dict_structure-attributes} - -Configuration example: - -``` xml - - ... - - Name - ClickHouseDataType - - rand64() - true - true - true - - -``` - -or - -``` sql -CREATE DICTIONARY somename ( - Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID -) -``` - -Configuration fields: - -| Tag | Description | Required | -|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| -| `name` | Column name. | Yes | -| `type` | ClickHouse data type.
    ClickHouse tries to cast value from dictionary to the specified data type. For example, for MySQL, the field might be `TEXT`, `VARCHAR`, or `BLOB` in the MySQL source table, but it can be uploaded as `String` in ClickHouse.
    [Nullable](../../data_types/nullable.md) is not supported. | Yes | -| `null_value` | Default value for a non-existing element.
    In the example, it is an empty string. You cannot use `NULL` in this field. | Yes | -| `expression` | [Expression](../syntax.md#syntax-expressions) that ClickHouse executes on the value.
    The expression can be a column name in the remote SQL database. Thus, you can use it to create an alias for the remote column.

    Default value: no expression. | No | -| `hierarchical` | If `true`, the attribute contains the value of a parent key for the current key. See [Hierarchical Dictionaries](external_dicts_dict_hierarchical.md).

    Default value: `false`. | No | -| `injective` | Flag that shows whether the `id -> attribute` image is [injective](https://en.wikipedia.org/wiki/Injective_function).
    If `true`, ClickHouse can automatically place after the `GROUP BY` clause the requests to dictionaries with injection. Usually it significantly reduces the amount of such requests.

    Default value: `false`. | No | -| `is_object_id` | Flag that shows whether the query is executed for a MongoDB document by `ObjectID`.

    Default value: `false`. | No | - -## See Also {#see-also} - -- [Functions for working with external dictionaries](../functions/ext_dict_functions.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/zh/query_language/dicts/index.md b/docs/zh/query_language/dicts/index.md deleted file mode 100644 index 9c7883cf7a1..00000000000 --- a/docs/zh/query_language/dicts/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -en_copy: true ---- - -# Dictionaries {#dictionaries} - -A dictionary is a mapping (`key -> attributes`) that is convenient for various types of reference lists. - -ClickHouse supports special functions for working with dictionaries that can be used in queries. It is easier and more efficient to use dictionaries with functions than a `JOIN` with reference tables. - -[NULL](../syntax.md#null) values can’t be stored in a dictionary. - -ClickHouse supports: - -- [Built-in dictionaries](internal_dicts.md#internal_dicts) with a specific [set of functions](../functions/ym_dict_functions.md). -- [Plug-in (external) dictionaries](external_dicts.md) with a [set of functions](../functions/ext_dict_functions.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/zh/query_language/dicts/internal_dicts.md b/docs/zh/query_language/dicts/internal_dicts.md deleted file mode 100644 index a7ac9fe7d8c..00000000000 --- a/docs/zh/query_language/dicts/internal_dicts.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -en_copy: true ---- - -# Internal dictionaries {#internal_dicts} - -ClickHouse contains a built-in feature for working with a geobase. - -This allows you to: - -- Use a region’s ID to get its name in the desired language. -- Use a region’s ID to get the ID of a city, area, federal district, country, or continent. -- Check whether a region is part of another region. -- Get a chain of parent regions. - -All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with Yandex.Metrica dictionaries”. - -The internal dictionaries are disabled in the default package. -To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. - -The geobase is loaded from text files. - -Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory. - -Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory. - -You can also create these files yourself. The file format is as follows: - -`regions_hierarchy*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- parent region ID (`UInt32`) -- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types don’t have values -- population (`UInt32`) — optional column - -`regions_names_*.txt`: TabSeparated (no header), columns: - -- region ID (`UInt32`) -- region name (`String`) — Can’t contain tabs or line feeds, even escaped ones. - -A flat array is used for storing in RAM. For this reason, IDs shouldn’t be more than a million. - -Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated. -For updates, the file modification times are checked. If a file has changed, the dictionary is updated. -The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter. -Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries. - -We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. - -There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldn’t be used. - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/zh/query_language/functions/ext_dict_functions.md b/docs/zh/query_language/functions/ext_dict_functions.md deleted file mode 100644 index c1d5d9b60ba..00000000000 --- a/docs/zh/query_language/functions/ext_dict_functions.md +++ /dev/null @@ -1,46 +0,0 @@ -# 字典函数 {#zi-dian-han-shu} - -有关连接和配置外部词典的信息,请参阅[外部词典](../dicts/external_dicts.md)。 - -## dictGetUInt8, dictGetUInt16, dictGetUInt32, dictGetUInt64 {#dictgetuint8-dictgetuint16-dictgetuint32-dictgetuint64} - -## dictGetInt8, dictGetInt16, dictGetInt32, dictGetInt64 {#dictgetint8-dictgetint16-dictgetint32-dictgetint64} - -## dictGetFloat32, dictGetFloat64 {#dictgetfloat32-dictgetfloat64} - -## dictGetDate, dictGetDateTime {#dictgetdate-dictgetdatetime} - -## dictGetUUID {#dictgetuuid} - -## dictGetString {#dictgetstring} - -`dictGetT('dict_name', 'attr_name', id)` - -- 使用’id’键获取dict\_name字典中attr\_name属性的值。`dict_name`和`attr_name`是常量字符串。`id`必须是UInt64。 - 如果字典中没有`id`键,则返回字典描述中指定的默认值。 - -## dictGetTOrDefault {#ext_dict_functions-dictgettordefault} - -`dictGetTOrDefault('dict_name', 'attr_name', id, default)` - -与`dictGetT`函数相同,但默认值取自函数的最后一个参数。 - -## dictIsIn {#dictisin} - -`dictIsIn ('dict_name', child_id, ancestor_id)` - -- 对于’dict\_name’分层字典,查找’child\_id’键是否位于’ancestor\_id’内(或匹配’ancestor\_id’)。返回UInt8。 - -## dictGetHierarchy {#dictgethierarchy} - -`dictGetHierarchy('dict_name', id)` - -- 对于’dict\_name’分层字典,返回从’id’开始并沿父元素链继续的字典键数组。返回Array(UInt64) - -## dictHas {#dicthas} - -`dictHas('dict_name', id)` - -- 检查字典是否存在指定的`id`。如果不存在,则返回0;如果存在,则返回1。 - -[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/zh/query_language/functions/machine_learning_functions.md b/docs/zh/query_language/functions/machine_learning_functions.md deleted file mode 100644 index e9fe2622a57..00000000000 --- a/docs/zh/query_language/functions/machine_learning_functions.md +++ /dev/null @@ -1,15 +0,0 @@ -# 机器学习函数 {#ji-qi-xue-xi-han-shu} - -## evalMLMethod (prediction) {#machine_learning_methods-evalmlmethod} - -使用拟合回归模型的预测请使用`evalMLMethod`函数。 请参阅`linearRegression`中的链接。 - -## Stochastic Linear Regression {#stochastic-linear-regression} - -`stochasticLinearRegression`聚合函数使用线性模型和MSE损失函数实现随机梯度下降法。 使用`evalMLMethod`来预测新数据。 -请参阅示例和注释[此处](../agg_functions/reference.md#agg_functions-stochasticlinearregression)。 - -## Stochastic Logistic Regression {#stochastic-logistic-regression} - -`stochasticLogisticRegression`聚合函数实现了二元分类问题的随机梯度下降法。 使用`evalMLMethod`来预测新数据。 -请参阅示例和注释[此处](../agg_functions/reference.md#agg_functions-stochasticlogisticregression)。 diff --git a/docs/zh/query_language/index.md b/docs/zh/query_language/index.md deleted file mode 100644 index 07950fb56a6..00000000000 --- a/docs/zh/query_language/index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -en_copy: true ---- - -# SQL Reference {#sql-reference} - -- [SELECT](select.md) -- [INSERT INTO](insert_into.md) -- [CREATE](create.md) -- [ALTER](alter.md#query_language_queries_alter) -- [Other types of queries](misc.md) - -[Original article](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/zh/query_language/misc.md b/docs/zh/query_language/misc.md deleted file mode 100644 index 152dc0dd3b4..00000000000 --- a/docs/zh/query_language/misc.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -en_copy: true ---- - -# Miscellaneous Queries {#miscellaneous-queries} - -## ATTACH {#attach} - -This query is exactly the same as `CREATE`, but - -- Instead of the word `CREATE` it uses the word `ATTACH`. -- The query does not create data on the disk, but assumes that data is already in the appropriate places, and just adds information about the table to the server. - After executing an ATTACH query, the server will know about the existence of the table. - -If the table was previously detached (`DETACH`), meaning that its structure is known, you can use shorthand without defining the structure. - -``` sql -ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] -``` - -This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of system tables, which are explicitly created on the server). - -## CHECK TABLE {#check-table} - -Checks if the data in the table is corrupted. - -``` sql -CHECK TABLE [db.]name -``` - -The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution. - -The query response contains the `result` column with a single row. The row has a value of -[Boolean](../data_types/boolean.md) type: - -- 0 - The data in the table is corrupted. -- 1 - The data maintains integrity. - -The `CHECK TABLE` query supports the following table engines: - -- [Log](../operations/table_engines/log.md) -- [TinyLog](../operations/table_engines/tinylog.md) -- [StripeLog](../operations/table_engines/stripelog.md) -- [MergeTree family](../operations/table_engines/mergetree.md) - -Performed over the tables with another table engines causes an exception. - -Engines from the `*Log` family don’t provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner. - -For `MergeTree` family engines, the `CHECK TABLE` query shows a check status for every individual data part of a table on the local server. - -**If the data is corrupted** - -If the table is corrupted, you can copy the non-corrupted data to another table. To do this: - -1. Create a new table with the same structure as damaged table. To do this execute the query `CREATE TABLE AS `. -2. Set the [max\_threads](../operations/settings/settings.md#settings-max_threads) value to 1 to process the next query in a single thread. To do this run the query `SET max_threads = 1`. -3. Execute the query `INSERT INTO SELECT * FROM `. This request copies the non-corrupted data from the damaged table to another table. Only the data before the corrupted part will be copied. -4. Restart the `clickhouse-client` to reset the `max_threads` value. - -## DESCRIBE TABLE {#misc-describe-table} - -``` sql -DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] -``` - -Returns the following `String` type columns: - -- `name` — Column name. -- `type`— Column type. -- `default_type` — Clause that is used in [default expression](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` or `ALIAS`). Column contains an empty string, if the default expression isn’t specified. -- `default_expression` — Value specified in the `DEFAULT` clause. -- `comment_expression` — Comment text. - -Nested data structures are output in “expanded” format. Each column is shown separately, with the name after a dot. - -## DETACH {#detach} - -Deletes information about the ‘name’ table from the server. The server stops knowing about the table’s existence. - -``` sql -DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. -Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them). - -There is no `DETACH DATABASE` query. - -## DROP {#drop} - -This query has two types: `DROP DATABASE` and `DROP TABLE`. - -``` sql -DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] -``` - -Deletes all tables inside the ‘db’ database, then deletes the ‘db’ database itself. -If `IF EXISTS` is specified, it doesn’t return an error if the database doesn’t exist. - -``` sql -DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Deletes the table. -If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. - - DROP DICTIONARY [IF EXISTS] [db.]name - -Delets the dictionary. -If `IF EXISTS` is specified, it doesn’t return an error if the table doesn’t exist or the database doesn’t exist. - -## EXISTS {#exists} - -``` sql -EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] -``` - -Returns a single `UInt8`-type column, which contains the single value `0` if the table or database doesn’t exist, or `1` if the table exists in the specified database. - -## KILL QUERY {#kill-query} - -``` sql -KILL QUERY [ON CLUSTER cluster] - WHERE - [SYNC|ASYNC|TEST] - [FORMAT format] -``` - -Attempts to forcibly terminate the currently running queries. -The queries to terminate are selected from the system.processes table using the criteria defined in the `WHERE` clause of the `KILL` query. - -Examples: - -``` sql --- Forcibly terminates all queries with the specified query_id: -KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' - --- Synchronously terminates all queries run by 'username': -KILL QUERY WHERE user='username' SYNC -``` - -Read-only users can only stop their own queries. - -By default, the asynchronous version of queries is used (`ASYNC`), which doesn’t wait for confirmation that queries have stopped. - -The synchronous version (`SYNC`) waits for all queries to stop and displays information about each process as it stops. -The response contains the `kill_status` column, which can take the following values: - -1. ‘finished’ – The query was terminated successfully. -2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. -3. The other values ​​explain why the query can’t be stopped. - -A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. - -## KILL MUTATION {#kill-mutation} - -``` sql -KILL MUTATION [ON CLUSTER cluster] - WHERE - [TEST] - [FORMAT format] -``` - -Tries to cancel and remove [mutations](alter.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../operations/system_tables.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. - -A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop. - -Examples: - -``` sql --- Cancel and remove all mutations of the single table: -KILL MUTATION WHERE database = 'default' AND table = 'table' - --- Cancel the specific mutation: -KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' -``` - -The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). - -Changes already made by the mutation are not rolled back. - -## OPTIMIZE {#misc_operations-optimize} - -``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] -``` - -This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../operations/table_engines/mergetree.md) family. - -The `OPTMIZE` query is also supported for the [MaterializedView](../operations/table_engines/materializedview.md) and the [Buffer](../operations/table_engines/buffer.md) engines. Other table engines aren’t supported. - -When `OPTIMIZE` is used with the [ReplicatedMergeTree](../operations/table_engines/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all nodes (if the `replication_alter_partitions_sync` setting is enabled). - -- If `OPTIMIZE` doesn’t perform a merge for any reason, it doesn’t notify the client. To enable notifications, use the [optimize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. -- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter.md#alter-how-to-specify-part-expr). -- If you specify `FINAL`, optimization is performed even when all the data is already in one part. -- If you specify `DEDUPLICATE`, then completely identical rows will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. - -!!! warning "Warning" - `OPTIMIZE` can’t fix the “Too many parts” error. - -## RENAME {#misc_operations-rename} - -Renames one or more tables. - -``` sql -RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] -``` - -All tables are renamed under global locking. Renaming tables is a light operation. If you indicated another database after TO, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). - -## SET {#query-set} - -``` sql -SET param = value -``` - -Assigns `value` to the `param` [setting](../operations/settings/index.md) for the current session. You cannot change [server settings](../operations/server_settings/index.md) this way. - -You can also set all the values from the specified settings profile in a single query. - -``` sql -SET profile = 'profile-name-from-the-settings-file' -``` - -For more information, see [Settings](../operations/settings/settings.md). - -## TRUNCATE {#truncate} - -``` sql -TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] -``` - -Removes all data from a table. When the clause `IF EXISTS` is omitted, the query returns an error if the table does not exist. - -The `TRUNCATE` query is not supported for [View](../operations/table_engines/view.md), [File](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) and [Null](../operations/table_engines/null.md) table engines. - -## USE {#use} - -``` sql -USE db -``` - -Lets you set the current database for the session. -The current database is used for searching for tables if the database is not explicitly defined in the query with a dot before the table name. -This query can’t be made when using the HTTP protocol, since there is no concept of a session. - -[Original article](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/zh/query_language/syntax.md b/docs/zh/query_language/syntax.md deleted file mode 100644 index fb86f56e7bd..00000000000 --- a/docs/zh/query_language/syntax.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -en_copy: true ---- - -# Syntax {#syntax} - -There are two types of parsers in the system: the full SQL parser (a recursive descent parser), and the data format parser (a fast stream parser). -In all cases except the `INSERT` query, only the full SQL parser is used. -The `INSERT` query uses both parsers: - -``` sql -INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') -``` - -The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions). - -Data can have any format. When a query is received, the server calculates no more than [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. -This means the system doesn’t have problems with large `INSERT` queries, like MySQL does. - -When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited. - -Next we will cover the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. - -## Spaces {#spaces} - -There may be any number of space symbols between syntactical constructions (including the beginning and end of a query). Space symbols include the space, tab, line feed, CR, and form feed. - -## Comments {#comments} - -SQL-style and C-style comments are supported. -SQL-style comments: from `--` to the end of the line. The space after `--` can be omitted. -Comments in C-style: from `/*` to `*/`. These comments can be multiline. Spaces are not required here, either. - -## Keywords {#syntax-keywords} - -Keywords are case-insensitive when they correspond to: - -- SQL standard. For example, `SELECT`, `select` and `SeLeCt` are all valid. -- Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is same as `datetime`. - -Whether data type name is case-sensitive can be checked in the `system.data_type_families` table. - -In contrast to standard SQL all other keywords (including functions names) are **case-sensitive**. - -Keywords are not reserved (they are just parsed as keywords in the corresponding context). If you use [identifiers](#syntax-identifiers) the same as the keywords, enclose them into quotes. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. - -## Identifiers {#syntax-identifiers} - -Identifiers are: - -- Cluster, database, table, partition and column names. -- Functions. -- Data types. -- [Expression aliases](#syntax-expression_aliases). - -Identifiers can be quoted or non-quoted. It is recommended to use non-quoted identifiers. - -Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.` - -If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``. - -## Literals {#literals} - -There are: numeric, string, compound and `NULL` literals. - -### Numeric {#numeric} - -A numeric literal tries to be parsed: - -- First as a 64-bit signed number, using the [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) function. -- If unsuccessful, as a 64-bit unsigned number, using the [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) function. -- If unsuccessful, as a floating-point number using the [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) function. -- Otherwise, an error is returned. - -The corresponding value will have the smallest type that the value fits in. -For example, 1 is parsed as `UInt8`, but 256 is parsed as `UInt16`. For more information, see [Data types](../data_types/index.md). - -Examples: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. - -### String {#syntax-string-literal} - -Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. This means that you can use the sequences `\'`and`\\`. The value will have the [String](../data_types/string.md) type. - -The minimum set of characters that you need to escape in string literals: `'` and `\`. Single quote can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. - -### Compound {#compound} - -Constructions are supported for arrays: `[1, 2, 3]` and tuples: `(1, 'Hello, world!', 2)`.. -Actually, these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. -An array must consist of at least one item, and a tuple must have at least two items. -Tuples have a special purpose for use in the `IN` clause of a `SELECT` query. Tuples can be obtained as the result of a query, but they can’t be saved to a database (with the exception of [Memory](../operations/table_engines/memory.md) tables). - -### NULL {#null-literal} - -Indicates that the value is missing. - -In order to store `NULL` in a table field, it must be of the [Nullable](../data_types/nullable.md) type. - -Depending on the data format (input or output), `NULL` may have a different representation. For more information, see the documentation for [data formats](../interfaces/formats.md#formats). - -There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation will also be `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation. - -In queries, you can check `NULL` using the [IS NULL](operators.md#operator-is-null) and [IS NOT NULL](operators.md) operators and the related functions `isNull` and `isNotNull`. - -## Functions {#functions} - -Functions are written like an identifier with a list of arguments (possibly empty) in brackets. In contrast to standard SQL, the brackets are required, even for an empty arguments list. Example: `now()`. -There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions. - -## Operators {#operators} - -Operators are converted to their corresponding functions during query parsing, taking their priority and associativity into account. -For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, multiply(2, 3)), 4)`. - -## Data Types and Database Table Engines {#data_types-and-database-table-engines} - -Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an arguments list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. - -## Expression Aliases {#syntax-expression_aliases} - -An alias is a user-defined name for an expression in a query. - -``` sql -expr AS alias -``` - -- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause without using the `AS` keyword. - - For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - - In the [CAST](functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. - -- `expr` — Any expression supported by ClickHouse. - - For example, `SELECT column_name * 2 AS double FROM some_table`. - -- `alias` — Name for `expr`. Aliases should comply with the [identifiers](#syntax-identifiers) syntax. - - For example, `SELECT "table t".column_name FROM table_name AS "table t"`. - -### Notes on Usage {#notes-on-usage} - -Aliases are global for a query or subquery and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. - -Aliases are not visible in subqueries and between subqueries. For example, while executing the query `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse generates the exception `Unknown identifier: num`. - -If an alias is defined for the result columns in the `SELECT` clause of a subquery, these columns are visible in the outer query. For example, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. - -Be careful with aliases that are the same as column or table names. Let’s consider the following example: - -``` sql -CREATE TABLE t -( - a Int, - b Int -) -ENGINE = TinyLog() -``` - -``` sql -SELECT - argMax(a, b), - sum(b) AS b -FROM t -``` - -``` text -Received exception from server (version 18.14.17): -Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. -``` - -In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. - -## Asterisk {#asterisk} - -In a `SELECT` query, an asterisk can replace the expression. For more information, see the section “SELECT”. - -## Expressions {#syntax-expressions} - -An expression is a function, identifier, literal, application of an operator, expression in brackets, subquery, or asterisk. It can also contain an alias. -A list of expressions is one or more expressions separated by commas. -Functions and operators, in turn, can have expressions as arguments. - -[Original article](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/zh/query_language/system.md b/docs/zh/query_language/system.md deleted file mode 100644 index a6b72d63ead..00000000000 --- a/docs/zh/query_language/system.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -en_copy: true ---- - -# SYSTEM Queries {#query-language-system} - -- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) -- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) -- [DROP DNS CACHE](#query_language-system-drop-dns-cache) -- [DROP MARK CACHE](#query_language-system-drop-mark-cache) -- [FLUSH LOGS](#query_language-system-flush_logs) -- [RELOAD CONFIG](#query_language-system-reload-config) -- [SHUTDOWN](#query_language-system-shutdown) -- [KILL](#query_language-system-kill) -- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) -- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) -- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) -- [STOP MERGES](#query_language-system-stop-merges) -- [START MERGES](#query_language-system-start-merges) - -## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} - -Reloads all dictionaries that have been successfully loaded before. -By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../operations/server_settings/settings.md#server_settings-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED). -Always returns `Ok.` regardless of the result of the dictionary update. - -## RELOAD DICTIONARY dictionary\_name {#query_language-system-reload-dictionary} - -Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED). -Always returns `Ok.` regardless of the result of updating the dictionary. -The status of the dictionary can be checked by querying the `system.dictionaries` table. - -``` sql -SELECT name, status FROM system.dictionaries; -``` - -## DROP DNS CACHE {#query_language-system-drop-dns-cache} - -Resets ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries). - -For more convenient (automatic) cache management, see disable\_internal\_dns\_cache, dns\_cache\_update\_period parameters. - -## DROP MARK CACHE {#query_language-system-drop-mark-cache} - -Resets the mark cache. Used in development of ClickHouse and performance tests. - -## FLUSH LOGS {#query_language-system-flush_logs} - -Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. - -## RELOAD CONFIG {#query_language-system-reload-config} - -Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeeper. - -## SHUTDOWN {#query_language-system-shutdown} - -Normally shuts down ClickHouse (like `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) - -## KILL {#query_language-system-kill} - -Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`) - -## Managing Distributed Tables {#query-language-system-distributed} - -ClickHouse can manage [distributed](../operations/table_engines/distributed.md) tables. When a user inserts data into these tables, ClickHouse first creates a queue of the data that should be sent to cluster nodes, then asynchronously sends it. You can manage queue processing with the [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed), and [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) queries. You can also synchronously insert distributed data with the `insert_distributed_sync` setting. - -### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} - -Disables background data distribution when inserting data into distributed tables. - -``` sql -SYSTEM STOP DISTRIBUTED SENDS [db.] -``` - -### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} - -Forces ClickHouse to send data to cluster nodes synchronously. If any nodes are unavailable, ClickHouse throws an exception and stops query execution. You can retry the query until it succeeds, which will happen when all nodes are back online. - -``` sql -SYSTEM FLUSH DISTRIBUTED [db.] -``` - -### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} - -Enables background data distribution when inserting data into distributed tables. - -``` sql -SYSTEM START DISTRIBUTED SENDS [db.] -``` - -### STOP MERGES {#query_language-system-stop-merges} - -Provides possibility to stop background merges for tables in the MergeTree family: - -``` sql -SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] -``` - -!!! note "Note" - `DETACH / ATTACH` table will start background merges for the table even in case when merges have been stopped for all MergeTree tables before. - -### START MERGES {#query_language-system-start-merges} - -Provides possibility to start background merges for tables in the MergeTree family: - -``` sql -SYSTEM START MERGES [[db.]merge_tree_family_table_name] -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/zh/query_language/table_functions/index.md b/docs/zh/query_language/table_functions/index.md deleted file mode 100644 index ba231a6eeea..00000000000 --- a/docs/zh/query_language/table_functions/index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -en_copy: true ---- - -# Table Functions {#table-functions} - -Table functions are methods for constructing tables. - -You can use table functions in: - -- [FROM](../select.md#select-from) clause of the `SELECT` query. - - The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. - -- [CREATE TABLE AS \](../create.md#create-table-query) query. - - It's one of the methods of creating a table. - -!!! warning "Warning" - You can’t use table functions if the [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) setting is disabled. - -| Function | Description | -|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------| -| [file](file.md) | Creates a [File](../../operations/table_engines/file.md)-engine table. | -| [merge](merge.md) | Creates a [Merge](../../operations/table_engines/merge.md)-engine table. | -| [numbers](numbers.md) | Creates a table with a single column filled with integer numbers. | -| [remote](remote.md) | Allows you to access remote servers without creating a [Distributed](../../operations/table_engines/distributed.md)-engine table. | -| [url](url.md) | Creates a [Url](../../operations/table_engines/url.md)-engine table. | -| [mysql](mysql.md) | Creates a [MySQL](../../operations/table_engines/mysql.md)-engine table. | -| [jdbc](jdbc.md) | Creates a [JDBC](../../operations/table_engines/jdbc.md)-engine table. | -| [odbc](odbc.md) | Creates a [ODBC](../../operations/table_engines/odbc.md)-engine table. | -| [hdfs](hdfs.md) | Creates a [HDFS](../../operations/table_engines/hdfs.md)-engine table. | - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/zh/query_language/table_functions/input.md b/docs/zh/query_language/table_functions/input.md deleted file mode 100644 index 7536a9bffc2..00000000000 --- a/docs/zh/query_language/table_functions/input.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -en_copy: true ---- - -# input {#input} - -`input(structure)` - table function that allows effectively convert and insert data sent to the -server with given structure to the table with another structure. - -`structure` - structure of data sent to the server in following format `'column1_name column1_type, column2_name column2_type, ...'`. -For example, `'id UInt32, name String'`. - -This function can be used only in `INSERT SELECT` query and only once but otherwise behaves like ordinary table function -(for example, it can be used in subquery, etc.). - -Data can be sent in any way like for ordinary `INSERT` query and passed in any available [format](../../interfaces/formats.md#formats) -that must be specified in the end of query (unlike ordinary `INSERT SELECT`). - -The main feature of this function is that when server receives data from client it simultaneously converts it -according to the list of expressions in the `SELECT` clause and inserts into the target table. Temporary table -with all transferred data is not created. - -**Examples** - -- Let the `test` table has the following structure `(a String, b String)` - and data in `data.csv` has a different structure `(col1 String, col2 Date, col3 Int32)`. Query for insert - data from the `data.csv` into the `test` table with simultaneous conversion looks like this: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; -``` - -- If `data.csv` contains data of the same structure `test_structure` as the table `test` then these two queries are equal: - - - -``` bash -$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" -$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/zh/query_language/table_functions/jdbc.md b/docs/zh/query_language/table_functions/jdbc.md deleted file mode 100644 index e1ba7b362bd..00000000000 --- a/docs/zh/query_language/table_functions/jdbc.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -en_copy: true ---- - -# jdbc {#table-function-jdbc} - -`jdbc(jdbc_connection_uri, schema, table)` - returns table that is connected via JDBC driver. - -This table function requires separate `clickhouse-jdbc-bridge` program to be running. -It supports Nullable types (based on DDL of remote table that is queried). - -**Examples** - -``` sql -SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') -``` - -``` sql -SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/query_language/table_functions/merge.md b/docs/zh/query_language/table_functions/merge.md deleted file mode 100644 index 3638fad418d..00000000000 --- a/docs/zh/query_language/table_functions/merge.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -en_copy: true ---- - -# merge {#merge} - -`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. - -The table structure is taken from the first table encountered that matches the regular expression. - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/zh/query_language/table_functions/numbers.md b/docs/zh/query_language/table_functions/numbers.md deleted file mode 100644 index 5aec0b3c96b..00000000000 --- a/docs/zh/query_language/table_functions/numbers.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -en_copy: true ---- - -# numbers {#numbers} - -`numbers(N)` – Returns a table with the single ‘number’ column (UInt64) that contains integers from 0 to N-1. -`numbers(N, M)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1). - -Similar to the `system.numbers` table, it can be used for testing and generating successive values, `numbers(N, M)` more efficient than `system.numbers`. - -The following queries are equivalent: - -``` sql -SELECT * FROM numbers(10); -SELECT * FROM numbers(0, 10); -SELECT * FROM system.numbers LIMIT 10; -``` - -Examples: - -``` sql --- Generate a sequence of dates from 2010-01-01 to 2010-12-31 -select toDate('2010-01-01') + number as d FROM numbers(365); -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/zh/query_language/table_functions/odbc.md b/docs/zh/query_language/table_functions/odbc.md deleted file mode 100644 index 8c972b1f93a..00000000000 --- a/docs/zh/query_language/table_functions/odbc.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -en_copy: true ---- - -# odbc {#table-functions-odbc} - -Returns table that is connected via [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). - -``` sql -odbc(connection_settings, external_database, external_table) -``` - -Parameters: - -- `connection_settings` — Name of the section with connection settings in the `odbc.ini` file. -- `external_database` — Name of a database in an external DBMS. -- `external_table` — Name of a table in the `external_database`. - -To safely implement ODBC connections, ClickHouse uses a separate program `clickhouse-odbc-bridge`. If the ODBC driver is loaded directly from `clickhouse-server`, driver problems can crash the ClickHouse server. ClickHouse automatically starts `clickhouse-odbc-bridge` when it is required. The ODBC bridge program is installed from the same package as the `clickhouse-server`. - -The fields with the `NULL` values from the external table are converted into the default values for the base data type. For example, if a remote MySQL table field has the `INT NULL` type it is converted to 0 (the default value for ClickHouse `Int32` data type). - -## Usage example {#usage-example} - -**Getting data from the local MySQL installation via ODBC** - -This example is checked for Ubuntu Linux 18.04 and MySQL server 5.7. - -Ensure that unixODBC and MySQL Connector are installed. - -By default (if installed from packages), ClickHouse starts as user `clickhouse`. Thus you need to create and configure this user in the MySQL server. - -``` bash -$ sudo mysql -``` - -``` sql -mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; -mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; -``` - -Then configure the connection in `/etc/odbc.ini`. - -``` bash -$ cat /etc/odbc.ini -[mysqlconn] -DRIVER = /usr/local/lib/libmyodbc5w.so -SERVER = 127.0.0.1 -PORT = 3306 -DATABASE = test -USERNAME = clickhouse -PASSWORD = clickhouse -``` - -You can check the connection using the `isql` utility from the unixODBC installation. - -``` bash -$ isql -v mysqlconn -+---------------------------------------+ -| Connected! | -| | -... -``` - -Table in MySQL: - -``` text -mysql> CREATE TABLE `test`.`test` ( - -> `int_id` INT NOT NULL AUTO_INCREMENT, - -> `int_nullable` INT NULL DEFAULT NULL, - -> `float` FLOAT NOT NULL, - -> `float_nullable` FLOAT NULL DEFAULT NULL, - -> PRIMARY KEY (`int_id`)); -Query OK, 0 rows affected (0,09 sec) - -mysql> insert into test (`int_id`, `float`) VALUES (1,2); -Query OK, 1 row affected (0,00 sec) - -mysql> select * from test; -+--------+--------------+-------+----------------+ -| int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ -| 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ -1 row in set (0,00 sec) -``` - -Retrieving data from the MySQL table in ClickHouse: - -``` sql -SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') -``` - -``` text -┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ -│ 1 │ 0 │ 2 │ 0 │ -└────────┴──────────────┴───────┴────────────────┘ -``` - -## See Also {#see-also} - -- [ODBC external dictionaries](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [ODBC table engine](../../operations/table_engines/odbc.md). - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/query_language/table_functions/remote.md b/docs/zh/query_language/table_functions/remote.md deleted file mode 100644 index e8c751af7e2..00000000000 --- a/docs/zh/query_language/table_functions/remote.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -en_copy: true ---- - -# remote, remoteSecure {#remote-remotesecure} - -Allows you to access remote servers without creating a `Distributed` table. - -Signatures: - -``` sql -remote('addresses_expr', db, table[, 'user'[, 'password']]) -remote('addresses_expr', db.table[, 'user'[, 'password']]) -``` - -`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. The port is the TCP port on the remote server. If the port is omitted, it uses `tcp_port` from the server’s config file (by default, 9000). - -!!! important "Important" - The port is required for an IPv6 address. - -Examples: - -``` text -example01-01-1 -example01-01-1:9000 -localhost -127.0.0.1 -[::]:9000 -[2a02:6b8:0:1111::11]:9000 -``` - -Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like to shards with different data). - -Example: - -``` text -example01-01-1,example01-02-1 -``` - -Part of the expression can be specified in curly brackets. The previous example can be written as follows: - -``` text -example01-0{1,2}-1 -``` - -Curly brackets can contain a range of numbers separated by two dots (non-negative integers). In this case, the range is expanded to a set of values that generate shard addresses. If the first number starts with zero, the values are formed with the same zero alignment. The previous example can be written as follows: - -``` text -example01-{01..02}-1 -``` - -If you have multiple pairs of curly brackets, it generates the direct product of the corresponding sets. - -Addresses and parts of addresses in curly brackets can be separated by the pipe symbol (\|). In this case, the corresponding sets of addresses are interpreted as replicas, and the query will be sent to the first healthy replica. However, the replicas are iterated in the order currently set in the [load\_balancing](../../operations/settings/settings.md) setting. - -Example: - -``` text -example01-{01..02}-{1|2} -``` - -This example specifies two shards that each have two replicas. - -The number of addresses generated is limited by a constant. Right now this is 1000 addresses. - -Using the `remote` table function is less optimal than creating a `Distributed` table, because in this case, the server connection is re-established for every request. In addition, if host names are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and don’t use the `remote` table function. - -The `remote` table function can be useful in the following cases: - -- Accessing a specific server for data comparison, debugging, and testing. -- Queries between various ClickHouse clusters for research purposes. -- Infrequent distributed requests that are made manually. -- Distributed requests where the set of servers is re-defined each time. - -If the user is not specified, `default` is used. -If the password is not specified, an empty password is used. - -`remoteSecure` - same as `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_settings/settings.md#server_settings-tcp_port_secure) from config or 9440. - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/zh/query_language/table_functions/url.md b/docs/zh/query_language/table_functions/url.md deleted file mode 100644 index e1250b438ab..00000000000 --- a/docs/zh/query_language/table_functions/url.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -en_copy: true ---- - -# url {#url} - -`url(URL, format, structure)` - returns a table created from the `URL` with given -`format` and `structure`. - -URL - HTTP or HTTPS server address, which can accept `GET` and/or `POST` requests. - -format - [format](../../interfaces/formats.md#formats) of the data. - -structure - table structure in `'UserID UInt64, Name String'` format. Determines column names and types. - -**Example** - -``` sql --- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. -SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 -``` - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/zh/sql_reference/aggregate_functions/combinators.md b/docs/zh/sql_reference/aggregate_functions/combinators.md new file mode 100644 index 00000000000..a8be457ab23 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/combinators.md @@ -0,0 +1,166 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u805A\u5408\u51FD\u6570\u7EC4\u5408\u5668" +--- + +# 聚合函数组合器 {#aggregate_functions_combinators} + +聚合函数的名称可以附加一个后缀。 这改变了聚合函数的工作方式。 + +## -如果 {#agg-functions-combinator-if} + +The suffix -If can be appended to the name of any aggregate function. In this case, the aggregate function accepts an extra argument – a condition (Uint8 type). The aggregate function processes only the rows that trigger the condition. If the condition was not triggered even once, it returns a default value (usually zeros or empty strings). + +例: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` 等等。 + +使用条件聚合函数,您可以一次计算多个条件的聚合,而无需使用子查询和 `JOIN`例如,在Yandex的。Metrica,条件聚合函数用于实现段比较功能。 + +## -阵列 {#agg-functions-combinator-array} + +-Array后缀可以附加到任何聚合函数。 在这种情况下,聚合函数采用的参数 ‘Array(T)’ 类型(数组)而不是 ‘T’ 类型参数。 如果聚合函数接受多个参数,则它必须是长度相等的数组。 在处理数组时,聚合函数的工作方式与所有数组元素的原始聚合函数类似。 + +示例1: `sumArray(arr)` -总计所有的所有元素 ‘arr’ 阵列。 在这个例子中,它可以更简单地编写: `sum(arraySum(arr))`. + +示例2: `uniqArray(arr)` – Counts the number of unique elements in all ‘arr’ 阵列。 这可以做一个更简单的方法: `uniq(arrayJoin(arr))`,但它并不总是可以添加 ‘arrayJoin’ 到查询。 + +-如果和-阵列可以组合。 然而, ‘Array’ 必须先来,然后 ‘If’. 例: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. 由于这个顺序,该 ‘cond’ 参数不会是数组。 + +## -州 {#agg-functions-combinator-state} + +如果应用此combinator,则聚合函数不会返回结果值(例如唯一值的数量 [uniq](reference.md#agg_function-uniq) 函数),但聚合的中间状态(用于 `uniq`,这是用于计算唯一值的数量的散列表)。 这是一个 `AggregateFunction(...)` 可用于进一步处理或存储在表中以完成聚合。 + +要使用这些状态,请使用: + +- [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md) 表引擎。 +- [最后聚会](../../sql_reference/functions/other_functions.md#function-finalizeaggregation) 功能。 +- [跑累积](../../sql_reference/functions/other_functions.md#function-runningaccumulate) 功能。 +- [-合并](#aggregate_functions_combinators_merge) combinator +- [-MergeState](#aggregate_functions_combinators_mergestate) combinator + +## -合并 {#aggregate_functions_combinators-merge} + +如果应用此组合器,则聚合函数将中间聚合状态作为参数,组合状态以完成聚合,并返回结果值。 + +## -MergeState {#aggregate_functions_combinators-mergestate} + +以与-Merge combinator相同的方式合并中间聚合状态。 但是,它不会返回结果值,而是返回中间聚合状态,类似于-State combinator。 + +## -ForEach {#agg-functions-combinator-foreach} + +将表的聚合函数转换为聚合相应数组项并返回结果数组的数组的聚合函数。 例如, `sumForEach` 对于数组 `[1, 2]`, `[3, 4, 5]`和`[6, 7]`返回结果 `[10, 13, 5]` 之后将相应的数组项添加在一起。 + +## -OrDefault {#agg-functions-combinator-ordefault} + +如果没有要聚合的内容,则填充聚合函数的返回类型的默认值。 + +``` sql +SELECT avg(number), avgOrDefault(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrDefault(number)─┐ +│ nan │ 0 │ +└─────────────┴──────────────────────┘ +``` + +## -OrNull {#agg-functions-combinator-ornull} + +填充 `null` 如果没有什么聚合。 返回列将为空。 + +``` sql +SELECT avg(number), avgOrNull(number) FROM numbers(0) +``` + +``` text +┌─avg(number)─┬─avgOrNull(number)─┐ +│ nan │ ᴺᵁᴸᴸ │ +└─────────────┴───────────────────┘ +``` + +-OrDefault和-OrNull可以与其他组合器相结合。 当聚合函数不接受空输入时,它很有用。 + +``` sql +SELECT avgOrNullIf(x, x > 10) +FROM +( + SELECT toDecimal32(1.23, 2) AS x +) +``` + +``` text +┌─avgOrNullIf(x, greater(x, 10))─┐ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────┘ +``` + +## -重新采样 {#agg-functions-combinator-resample} + +允许您将数据划分为组,然后单独聚合这些组中的数据。 通过将一列中的值拆分为间隔来创建组。 + +``` sql +Resample(start, end, step)(, resampling_key) +``` + +**参数** + +- `start` — Starting value of the whole required interval for `resampling_key` 值。 +- `stop` — Ending value of the whole required interval for `resampling_key` 值。 整个时间间隔不包括 `stop` 价值 `[start, stop)`. +- `step` — Step for separating the whole interval into subintervals. The `aggFunction` 在每个子区间上独立执行。 +- `resampling_key` — Column whose values are used for separating data into intervals. +- `aggFunction_params` — `aggFunction` 参数。 + +**返回值** + +- 阵列 `aggFunction` 每个子区间的结果。 + +**示例** + +考虑一下 `people` 具有以下数据的表: + +``` text +┌─name───┬─age─┬─wage─┐ +│ John │ 16 │ 10 │ +│ Alice │ 30 │ 15 │ +│ Mary │ 35 │ 8 │ +│ Evelyn │ 48 │ 11.5 │ +│ David │ 62 │ 9.9 │ +│ Brian │ 60 │ 16 │ +└────────┴─────┴──────┘ +``` + +让我们得到的人的名字,他们的年龄在于的时间间隔 `[30,60)` 和 `[60,75)`. 由于我们使用整数表示的年龄,我们得到的年龄 `[30, 59]` 和 `[60,74]` 间隔。 + +要在数组中聚合名称,我们使用 [groupArray](reference.md#agg_function-grouparray) 聚合函数。 这需要一个参数。 在我们的例子中,它是 `name` 列。 该 `groupArrayResample` 函数应该使用 `age` 按年龄聚合名称的列。 要定义所需的时间间隔,我们通过 `30, 75, 30` 参数到 `groupArrayResample` 功能。 + +``` sql +SELECT groupArrayResample(30, 75, 30)(name, age) FROM people +``` + +``` text +┌─groupArrayResample(30, 75, 30)(name, age)─────┐ +│ [['Alice','Mary','Evelyn'],['David','Brian']] │ +└───────────────────────────────────────────────┘ +``` + +考虑结果。 + +`Jonh` 是因为他太年轻了 其他人按照指定的年龄间隔进行分配。 + +现在让我们计算指定年龄间隔内的总人数和平均工资。 + +``` sql +SELECT + countResample(30, 75, 30)(name, age) AS amount, + avgResample(30, 75, 30)(wage, age) AS avg_wage +FROM people +``` + +``` text +┌─amount─┬─avg_wage──────────────────┐ +│ [3,2] │ [11.5,12.949999809265137] │ +└────────┴───────────────────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/zh/sql_reference/aggregate_functions/index.md b/docs/zh/sql_reference/aggregate_functions/index.md new file mode 100644 index 00000000000..7e53c8c8c53 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/index.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u805A\u5408\u51FD\u6570" +toc_priority: 33 +toc_title: "\u5BFC\u8A00" +--- + +# 聚合函数 {#aggregate-functions} + +聚合函数在 [正常](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) 方式如预期的数据库专家。 + +ClickHouse还支持: + +- [参数聚合函数](parametric_functions.md#aggregate_functions_parametric),它接受除列之外的其他参数。 +- [组合器](combinators.md#aggregate_functions_combinators),这改变了聚合函数的行为。 + +## 空处理 {#null-processing} + +在聚合过程中,所有 `NULL`s被跳过。 + +**例:** + +考虑这个表: + +``` text +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +比方说,你需要在总的值 `y` 列: + +``` sql +SELECT sum(y) FROM t_null_big +``` + + ┌─sum(y)─┐ + │ 7 │ + └────────┘ + +该 `sum` 函数解释 `NULL` 作为 `0`. 特别是,这意味着,如果函数接收输入的选择,其中所有的值 `NULL`,那么结果将是 `0`,不 `NULL`. + +现在你可以使用 `groupArray` 函数从创建一个数组 `y` 列: + +``` sql +SELECT groupArray(y) FROM t_null_big +``` + +``` text +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ +``` + +`groupArray` 不包括 `NULL` 在生成的数组中。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/zh/query_language/agg_functions/parametric_functions.md b/docs/zh/sql_reference/aggregate_functions/parametric_functions.md similarity index 57% rename from docs/zh/query_language/agg_functions/parametric_functions.md rename to docs/zh/sql_reference/aggregate_functions/parametric_functions.md index d4e29feff0e..18adcd93487 100644 --- a/docs/zh/query_language/agg_functions/parametric_functions.md +++ b/docs/zh/sql_reference/aggregate_functions/parametric_functions.md @@ -1,29 +1,32 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: "\u53C2\u6570\u805A\u5408\u51FD\u6570" --- -# Parametric aggregate functions {#aggregate_functions_parametric} +# 参数聚合函数 {#aggregate_functions_parametric} Some aggregate functions can accept not only argument columns (used for compression), but a set of parameters – constants for initialization. The syntax is two pairs of brackets instead of one. The first is for parameters, and the second is for arguments. -## histogram {#histogram} +## 直方图 {#histogram} -Calculates an adaptive histogram. It doesn’t guarantee precise results. +计算自适应直方图。 它不能保证精确的结果。 ``` sql histogram(number_of_bins)(values) ``` -The functions uses [A Streaming Parallel Decision Tree Algorithm](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). The borders of histogram bins are adjusted as new data enters a function. In common case, the widths of bins are not equal. +该函数使用 [流式并行决策树算法](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). 当新数据输入函数时,hist图分区的边界将被调整。 在通常情况下,箱的宽度不相等。 -**Parameters** +**参数** `number_of_bins` — Upper limit for the number of bins in the histogram. The function automatically calculates the number of bins. It tries to reach the specified number of bins, but if it fails, it uses fewer bins. -`values` — [Expression](../syntax.md#syntax-expressions) resulting in input values. +`values` — [表达式](../syntax.md#syntax-expressions) 导致输入值。 -**Returned values** +**返回值** -- [Array](../../data_types/array.md) of [Tuples](../../data_types/tuple.md) of the following format: +- [阵列](../../sql_reference/data_types/array.md) 的 [元组](../../sql_reference/data_types/tuple.md) 下面的格式: ``` [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] @@ -33,7 +36,7 @@ The functions uses [A Streaming Parallel Decision Tree Algorithm](http://jmlr.or - `upper` — Upper bound of the bin. - `height` — Calculated height of the bin. -**Example** +**示例** ``` sql SELECT histogram(5)(number + 1) @@ -50,7 +53,7 @@ FROM ( └─────────────────────────────────────────────────────────────────────────┘ ``` -You can visualize a histogram with the [bar](../functions/other_functions.md#function-bar) function, for example: +您可以使用 [酒吧](../../sql_reference/functions/other_functions.md#function-bar) 功能,例如: ``` sql WITH histogram(5)(rand() % 100) AS hist @@ -75,46 +78,46 @@ FROM └────────┴───────┘ ``` -In this case, you should remember that you don’t know the histogram bin borders. +在这种情况下,您应该记住您不知道直方图bin边界。 ## sequenceMatch(pattern)(timestamp, cond1, cond2, …) {#function-sequencematch} -Checks whether the sequence contains an event chain that matches the pattern. +检查序列是否包含与模式匹配的事件链。 ``` sql sequenceMatch(pattern)(timestamp, cond1, cond2, ...) ``` -!!! warning "Warning" - Events that occur at the same second may lay in the sequence in an undefined order affecting the result. +!!! warning "警告" + 在同一秒钟发生的事件可能以未定义的顺序排列在序列中,影响结果。 -**Parameters** +**参数** -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [模式语法](#sequence-function-pattern-syntax). -- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` 和 `DateTime`. 您还可以使用任何支持的 [UInt](../../sql_reference/data_types/int_uint.md) 数据类型。 -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最多可以传递32个条件参数。 该函数只考虑这些条件中描述的事件。 如果序列包含未在条件中描述的数据,则函数将跳过这些数据。 -**Returned values** +**返回值** -- 1, if the pattern is matched. -- 0, if the pattern isn’t matched. +- 1,如果模式匹配。 +- 0,如果模式不匹配。 -Type: `UInt8`. +类型: `UInt8`. -**Pattern syntax** +**模式语法** -- `(?N)` — Matches the condition argument at position `N`. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. +- `(?N)` — Matches the condition argument at position `N`. 条件在编号 `[1, 32]` 范围。 例如, `(?1)` 匹配传递给 `cond1` 参数。 -- `.*` — Matches any number of events. You don’t need conditional arguments to match this element of the pattern. +- `.*` — Matches any number of events. You don't need conditional arguments to match this element of the pattern. -- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=` operators. +- `(?t operator value)` — Sets the time in seconds that should separate two events. For example, pattern `(?1)(?t>1800)(?2)` 匹配彼此发生超过1800秒的事件。 这些事件之间可以存在任意数量的任何事件。 您可以使用 `>=`, `>`, `<`, `<=` 运营商。 -**Examples** +**例** -Consider data in the `t` table: +考虑在数据 `t` 表: ``` text ┌─time─┬─number─┐ @@ -124,7 +127,7 @@ Consider data in the `t` table: └──────┴────────┘ ``` -Perform the query: +执行查询: ``` sql SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t @@ -136,7 +139,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2) FROM t └───────────────────────────────────────────────────────────────────────┘ ``` -The function found the event chain where number 2 follows number 1. It skipped number 3 between them, because the number is not described as an event. If we want to take this number into account when searching for the event chain given in the example, we should make a condition for it. +该函数找到了数字2跟随数字1的事件链。 它跳过了它们之间的数字3,因为该数字没有被描述为事件。 如果我们想在搜索示例中给出的事件链时考虑这个数字,我们应该为它创建一个条件。 ``` sql SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM t @@ -148,7 +151,7 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 3) FROM └──────────────────────────────────────────────────────────────────────────────────────────┘ ``` -In this case, the function couldn’t find the event chain matching the pattern, because the event for number 3 occured between 1 and 2. If in the same case we checked the condition for number 4, the sequence would match the pattern. +在这种情况下,函数找不到与模式匹配的事件链,因为数字3的事件发生在1和2之间。 如果在相同的情况下,我们检查了数字4的条件,则序列将与模式匹配。 ``` sql SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM t @@ -160,38 +163,38 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM └──────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**另请参阅** - [sequenceCount](#function-sequencecount) ## sequenceCount(pattern)(time, cond1, cond2, …) {#function-sequencecount} -Counts the number of event chains that matched the pattern. The function searches event chains that don’t overlap. It starts to search for the next chain after the current chain is matched. +计数与模式匹配的事件链的数量。 该函数搜索不重叠的事件链。 当前链匹配后,它开始搜索下一个链。 -!!! warning "Warning" - Events that occur at the same second may lay in the sequence in an undefined order affecting the result. +!!! warning "警告" + 在同一秒钟发生的事件可能以未定义的顺序排列在序列中,影响结果。 ``` sql sequenceCount(pattern)(timestamp, cond1, cond2, ...) ``` -**Parameters** +**参数** -- `pattern` — Pattern string. See [Pattern syntax](#sequence-function-pattern-syntax). +- `pattern` — Pattern string. See [模式语法](#sequence-function-pattern-syntax). -- `timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported [UInt](../../data_types/int_uint.md) data types. +- `timestamp` — Column considered to contain time data. Typical data types are `Date` 和 `DateTime`. 您还可以使用任何支持的 [UInt](../../sql_reference/data_types/int_uint.md) 数据类型。 -- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. +- `cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. 最多可以传递32个条件参数。 该函数只考虑这些条件中描述的事件。 如果序列包含未在条件中描述的数据,则函数将跳过这些数据。 -**Returned values** +**返回值** -- Number of non-overlapping event chains that are matched. +- 匹配的非重叠事件链数。 -Type: `UInt64`. +类型: `UInt64`. -**Example** +**示例** -Consider data in the `t` table: +考虑在数据 `t` 表: ``` text ┌─time─┬─number─┐ @@ -204,7 +207,7 @@ Consider data in the `t` table: └──────┴────────┘ ``` -Count how many times the number 2 occurs after the number 1 with any amount of other numbers between them: +计算数字2在数字1之后出现的次数以及它们之间的任何其他数字: ``` sql SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t @@ -216,55 +219,55 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t └─────────────────────────────────────────────────────────────────────────┘ ``` -**See Also** +**另请参阅** - [sequenceMatch](#function-sequencematch) ## windowFunnel {#windowfunnel} -Searches for event chains in a sliding time window and calculates the maximum number of events that occurred from the chain. +搜索滑动时间窗中的事件链,并计算从链中发生的最大事件数。 -The function works according to the algorithm: +该函数根据算法工作: -- The function searches for data that triggers the first condition in the chain and sets the event counter to 1. This is the moment when the sliding window starts. +- 该函数搜索触发链中的第一个条件并将事件计数器设置为1的数据。 这是滑动窗口启动的时刻。 -- If events from the chain occur sequentially within the window, the counter is incremented. If the sequence of events is disrupted, the counter isn’t incremented. +- 如果来自链的事件在窗口内顺序发生,则计数器将递增。 如果事件序列中断,则计数器不会增加。 -- If the data has multiple event chains at varying points of completion, the function will only output the size of the longest chain. +- 如果数据在不同的完成点具有多个事件链,则该函数将仅输出最长链的大小。 -**Syntax** +**语法** ``` sql windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) ``` -**Parameters** +**参数** - `window` — Length of the sliding window in seconds. -- `mode` - It is an optional argument. - - `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. -- `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md#data_type-datetime) and other unsigned integer types (note that even though timestamp supports the `UInt64` type, it’s value can’t exceed the Int64 maximum, which is 2^63 - 1). -- `cond` — Conditions or data describing the chain of events. [UInt8](../../data_types/int_uint.md). +- `mode` -这是一个可选的参数。 + - `'strict'` -当 `'strict'` 设置时,windowFunnel()仅对唯一值应用条件。 +- `timestamp` — Name of the column containing the timestamp. Data types supported: [日期](../../sql_reference/data_types/date.md), [日期时间](../../sql_reference/data_types/datetime.md#data_type-datetime) 和其他无符号整数类型(请注意,即使时间戳支持 `UInt64` 类型,它的值不能超过Int64最大值,即2^63-1)。 +- `cond` — Conditions or data describing the chain of events. [UInt8](../../sql_reference/data_types/int_uint.md). -**Returned value** +**返回值** -The maximum number of consecutive triggered conditions from the chain within the sliding time window. -All the chains in the selection are analyzed. +滑动时间窗口内连续触发条件链的最大数目。 +对选择中的所有链进行了分析。 -Type: `Integer`. +类型: `Integer`. -**Example** +**示例** -Determine if a set period of time is enough for the user to select a phone and purchase it twice in the online store. +确定设定的时间段是否足以让用户选择手机并在在线商店中购买两次。 -Set the following chain of events: +设置以下事件链: -1. The user logged in to their account on the store (`eventID = 1003`). -2. The user searches for a phone (`eventID = 1007, product = 'phone'`). -3. The user placed an order (`eventID = 1009`). -4. The user made the order again (`eventID = 1010`). +1. 用户登录到其在应用商店中的帐户 (`eventID = 1003`). +2. 用户搜索手机 (`eventID = 1007, product = 'phone'`). +3. 用户下了订单 (`eventID = 1009`). +4. 用户再次下订单 (`eventID = 1010`). -Input table: +输入表: ``` text ┌─event_date─┬─user_id─┬───────────timestamp─┬─eventID─┬─product─┐ @@ -281,9 +284,9 @@ Input table: └────────────┴─────────┴─────────────────────┴─────────┴─────────┘ ``` -Find out how far the user `user_id` could get through the chain in a period in January-February of 2019. +了解用户有多远 `user_id` 可以在2019的1-2月期间通过链条。 -Query: +查询: ``` sql SELECT @@ -302,7 +305,7 @@ GROUP BY level ORDER BY level ASC ``` -Result: +结果: ``` text ┌─level─┬─c─┐ @@ -310,35 +313,35 @@ Result: └───────┴───┘ ``` -## retention {#retention} +## 保留 {#retention} -The function takes as arguments a set of conditions from 1 to 32 arguments of type `UInt8` that indicate whether a certain condition was met for the event. -Any condition can be specified as an argument (as in [WHERE](../../query_language/select.md#select-where)). +该函数将一组条件作为参数,类型为1到32个参数 `UInt8` 表示事件是否满足特定条件。 +任何条件都可以指定为参数(如 [WHERE](../../sql_reference/statements/select.md#select-where)). -The conditions, except the first, apply in pairs: the result of the second will be true if the first and second are true, of the third if the first and fird are true, etc. +除了第一个以外,条件成对适用:如果第一个和第二个是真的,第二个结果将是真的,如果第一个和fird是真的,第三个结果将是真的,等等。 -**Syntax** +**语法** ``` sql retention(cond1, cond2, ..., cond32); ``` -**Parameters** +**参数** -- `cond` — an expression that returns a `UInt8` result (1 or 0). +- `cond` — an expression that returns a `UInt8` 结果(1或0)。 -**Returned value** +**返回值** -The array of 1 or 0. +数组为1或0。 - 1 — condition was met for the event. -- 0 — condition wasn’t met for the event. +- 0 — condition wasn't met for the event. -Type: `UInt8`. +类型: `UInt8`. -**Example** +**示例** -Let’s consider an example of calculating the `retention` function to determine site traffic. +让我们考虑计算的一个例子 `retention` 功能,以确定网站流量。 **1.** Сreate a table to illustrate an example. @@ -350,15 +353,15 @@ INSERT INTO retention_test SELECT '2020-01-02', number FROM numbers(10); INSERT INTO retention_test SELECT '2020-01-03', number FROM numbers(15); ``` -Input table: +输入表: -Query: +查询: ``` sql SELECT * FROM retention_test ``` -Result: +结果: ``` text ┌───────date─┬─uid─┐ @@ -399,9 +402,9 @@ Result: └────────────┴─────┘ ``` -**2.** Group users by unique ID `uid` using the `retention` function. +**2.** 按唯一ID对用户进行分组 `uid` 使用 `retention` 功能。 -Query: +查询: ``` sql SELECT @@ -413,7 +416,7 @@ GROUP BY uid ORDER BY uid ASC ``` -Result: +结果: ``` text ┌─uid─┬─r───────┐ @@ -435,9 +438,9 @@ Result: └─────┴─────────┘ ``` -**3.** Calculate the total number of site visits per day. +**3.** 计算每天的现场访问总数。 -Query: +查询: ``` sql SELECT @@ -455,7 +458,7 @@ FROM ) ``` -Result: +结果: ``` text ┌─r1─┬─r2─┬─r3─┐ @@ -463,34 +466,34 @@ Result: └────┴────┴────┘ ``` -Where: +哪里: -- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition). -- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions). -- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions). +- `r1`-2020-01-01期间访问该网站的独立访问者数量( `cond1` 条件)。 +- `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。 +- `r3`-在2020-01-01和2020-01-03之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond3` 条件)。 ## uniqUpTo(N)(x) {#uniquptonx} Calculates the number of different argument values ​​if it is less than or equal to N. If the number of different argument values is greater than N, it returns N + 1. -Recommended for use with small Ns, up to 10. The maximum value of N is 100. +建议使用小Ns,高达10。 N的最大值为100。 -For the state of an aggregate function, it uses the amount of memory equal to 1 + N \* the size of one value of bytes. -For strings, it stores a non-cryptographic hash of 8 bytes. That is, the calculation is approximated for strings. +对于聚合函数的状态,它使用的内存量等于1+N\*一个字节值的大小。 +对于字符串,它存储8个字节的非加密哈希。 也就是说,计算是近似的字符串。 -The function also works for several arguments. +该函数也适用于多个参数。 -It works as fast as possible, except for cases when a large N value is used and the number of unique values is slightly less than N. +它的工作速度尽可能快,除了使用较大的N值并且唯一值的数量略小于N的情况。 -Usage example: +用法示例: ``` text Problem: Generate a report that shows only keywords that produced at least 5 unique users. Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 ``` -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) -## sumMapFiltered(keys\_to\_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values} +## sumMapFiltered(keys\_to\_keep)(键值) {#summapfilteredkeys-to-keepkeys-values} -Same behavior as [sumMap](reference.md#agg_functions-summap) except that an array of keys is passed as a parameter. This can be especially useful when working with a high cardinality of keys. +同样的行为 [sumMap](reference.md#agg_functions-summap) 除了一个键数组作为参数传递。 这在使用高基数密钥时尤其有用。 diff --git a/docs/zh/sql_reference/aggregate_functions/reference.md b/docs/zh/sql_reference/aggregate_functions/reference.md new file mode 100644 index 00000000000..b8071860d41 --- /dev/null +++ b/docs/zh/sql_reference/aggregate_functions/reference.md @@ -0,0 +1,1878 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: "\u53C2\u8003\u8D44\u6599" +--- + +# 函数参考 {#function-reference} + +## 计数 {#agg_function-count} + +计数行数或非空值。 + +ClickHouse支持以下语法 `count`: +- `count(expr)` 或 `COUNT(DISTINCT expr)`. +- `count()` 或 `COUNT(*)`. 该 `count()` 语法是ClickHouse特定的。 + +**参数** + +该功能可以采取: + +- 零参数。 +- 一 [表达式](../syntax.md#syntax-expressions). + +**返回值** + +- 如果没有参数调用函数,它会计算行数。 +- 如果 [表达式](../syntax.md#syntax-expressions) 被传递,则该函数计数此表达式返回的次数非null。 如果表达式返回 [可为空](../../sql_reference/data_types/nullable.md)-键入值,然后结果 `count` 保持不 `Nullable`. 如果返回表达式,则该函数返回0 `NULL` 对于所有的行。 + +在这两种情况下,返回值的类型为 [UInt64](../../sql_reference/data_types/int_uint.md). + +**详细信息** + +ClickHouse支持 `COUNT(DISTINCT ...)` 语法 这种结构的行为取决于 [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation) 设置。 它定义了其中的 [uniq\*](#agg_function-uniq) 函数用于执行操作。 默认值为 [uniqExact](#agg_function-uniqexact) 功能。 + +该 `SELECT count() FROM table` 查询未被优化,因为表中的条目数没有单独存储。 它从表中选择一个小列并计算其中的值数。 + +**例** + +示例1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +示例2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +这个例子表明 `count(DISTINCT num)` 由执行 `uniqExact` 根据功能 `count_distinct_implementation` 设定值。 + +## 任何(x) {#agg_function-any} + +选择第一个遇到的值。 +查询可以以任何顺序执行,甚至每次都以不同的顺序执行,因此此函数的结果是不确定的。 +要获得确定的结果,您可以使用 ‘min’ 或 ‘max’ 功能,而不是 ‘any’. + +在某些情况下,可以依靠执行的顺序。 这适用于SELECT来自使用ORDER BY的子查询的情况。 + +当一个 `SELECT` 查询具有 `GROUP BY` 子句或至少一个聚合函数,ClickHouse(相对于MySQL)要求在所有表达式 `SELECT`, `HAVING`,和 `ORDER BY` 子句可以从键或聚合函数计算。 换句话说,从表中选择的每个列必须在键或聚合函数内使用。 要获得像MySQL这样的行为,您可以将其他列放在 `any` 聚合函数。 + +## anyHeavy(x) {#anyheavyx} + +使用选择一个频繁出现的值 [重打者](http://www.cs.umd.edu/~samir/498/karp.pdf) 算法。 如果某个值在查询的每个执行线程中出现的情况超过一半,则返回此值。 通常情况下,结果是不确定的。 + +``` sql +anyHeavy(column) +``` + +**参数** + +- `column` – The column name. + +**示例** + +就拿 [时间](../../getting_started/example_datasets/ontime.md) 数据集,并选择在任何频繁出现的值 `AirlineID` 列。 + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +## anyLast(x) {#anylastx} + +选择遇到的最后一个值。 +其结果是一样不确定的 `any` 功能。 + +## 集团比特 {#groupbitand} + +按位应用 `AND` 对于一系列的数字。 + +``` sql +groupBitAnd(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +00000100 = 4 +``` + +## groupBitOr {#groupbitor} + +按位应用 `OR` 对于一系列的数字。 + +``` sql +groupBitOr(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +01111101 = 125 +``` + +## groupBitXor {#groupbitxor} + +按位应用 `XOR` 对于一系列的数字。 + +``` sql +groupBitXor(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt*` 类型。 + +**示例** + +测试数据: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +查询: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +哪里 `num` 是包含测试数据的列。 + +结果: + +``` text +binary decimal +01101000 = 104 +``` + +## groupBitmap {#groupbitmap} + +从无符号整数列的位图或聚合计算,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmap(expr) +``` + +**参数** + +`expr` – An expression that results in `UInt*` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +测试数据: + +``` text +UserID +1 +1 +2 +3 +``` + +查询: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +结果: + +``` text +num +3 +``` + +## min(x) {#agg_function-min} + +计算最小值。 + +## max(x) {#agg_function-max} + +计算最大值。 + +## argMin(arg,val) {#agg-function-argmin} + +计算 ‘arg’ 最小值的值 ‘val’ 价值。 如果有几个不同的值 ‘arg’ 对于最小值 ‘val’,遇到的第一个值是输出。 + +**示例:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +## argMax(arg,val) {#agg-function-argmax} + +计算 ‘arg’ 最大值 ‘val’ 价值。 如果有几个不同的值 ‘arg’ 对于最大值 ‘val’,遇到的第一个值是输出。 + +## sum(x) {#agg_function-sum} + +计算总和。 +只适用于数字。 + +## sumWithOverflow(x) {#sumwithoverflowx} + +使用与输入参数相同的数据类型计算数字的总和。 如果总和超过此数据类型的最大值,则函数返回错误。 + +只适用于数字。 + +## sumMap(key,value) {#agg_functions-summap} + +总计 ‘value’ 数组根据在指定的键 ‘key’ 阵列。 +元素的数量 ‘key’ 和 ‘value’ 总计的每一行必须相同。 +Returns a tuple of two arrays: keys in sorted order, and values ​​summed for the corresponding keys. + +示例: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +## skewPop {#skewpop} + +计算 [歪斜](https://en.wikipedia.org/wiki/Skewness) 的序列。 + +``` sql +skewPop(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**示例** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +## skewSamp {#skewsamp} + +计算 [样品偏度](https://en.wikipedia.org/wiki/Skewness) 的序列。 + +它表示随机变量的偏度的无偏估计,如果传递的值形成其样本。 + +``` sql +skewSamp(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The skewness of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). 如果 `n <= 1` (`n` 是样本的大小),则该函数返回 `nan`. + +**示例** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +## kurtPop {#kurtpop} + +计算 [峰度](https://en.wikipedia.org/wiki/Kurtosis) 的序列。 + +``` sql +kurtPop(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md) + +**示例** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +## kurtSamp {#kurtsamp} + +计算 [峰度样本](https://en.wikipedia.org/wiki/Kurtosis) 的序列。 + +它表示随机变量峰度的无偏估计,如果传递的值形成其样本。 + +``` sql +kurtSamp(expr) +``` + +**参数** + +`expr` — [表达式](../syntax.md#syntax-expressions) 返回一个数字。 + +**返回值** + +The kurtosis of the given distribution. Type — [Float64](../../sql_reference/data_types/float.md). 如果 `n <= 1` (`n` 是样本的大小),则该函数返回 `nan`. + +**示例** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +## timeSeriesGroupSum(uid,timestamp,value) {#agg-function-timeseriesgroupsum} + +`timeSeriesGroupSum` 可以聚合不同的时间序列,即采样时间戳不对齐。 +它将在两个采样时间戳之间使用线性插值,然后将时间序列和在一起。 + +- `uid` 是时间序列唯一id, `UInt64`. +- `timestamp` 是Int64型,以支持毫秒或微秒。 +- `value` 是指标。 + +函数返回元组数组 `(timestamp, aggregated_value)` 对。 + +在使用此功能之前,请确保 `timestamp` 按升序排列 + +示例: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +其结果将是: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +## timeSeriesGroupRateSum(uid,ts,val) {#agg-function-timeseriesgroupratesum} + +同样,timeSeriesGroupRateSum,timeSeriesGroupRateSum将计算时间序列的速率,然后将速率总和在一起。 +此外,使用此函数之前,时间戳应该是上升顺序。 + +使用此函数,上述情况下的结果将是: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +## avg(x) {#agg_function-avg} + +计算平均值。 +只适用于数字。 +结果总是Float64。 + +## 平均加权 {#avgweighted} + +计算 [加权算术平均值](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean). + +**语法** + +``` sql +avgWeighted(x, weight) +``` + +**参数** + +- `x` — Values. [整数](../data_types/int_uint.md) 或 [浮点](../data_types/float.md). +- `weight` — Weights of the values. [整数](../data_types/int_uint.md) 或 [浮点](../data_types/float.md). + +类型 `x` 和 `weight` 一定是一样的 + +**返回值** + +- 加权平均值。 +- `NaN`. 如果所有的权重都等于0。 + +类型: [Float64](../data_types/float.md). + +**示例** + +查询: + +``` sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +结果: + +``` text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + +## uniq {#agg_function-uniq} + +计算参数的不同值的近似数量。 + +``` sql +uniq(x[, ...]) +``` + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**返回值** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算聚合中所有参数的哈希值,然后在计算中使用它。 + +- 使用自适应采样算法。 对于计算状态,该函数使用最多65536个元素哈希值的样本。 + + This algorithm is very accurate and very efficient on the CPU. When the query contains several of these functions, using `uniq` is almost as fast as using other aggregate functions. + +- 确定性地提供结果(它不依赖于查询处理顺序)。 + +我们建议在几乎所有情况下使用此功能。 + +**另请参阅** + +- [uniqCombined](#agg_function-uniqcombined) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined {#agg_function-uniqcombined} + +计算不同参数值的近似数量。 + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +该 `uniqCombined` 函数是计算不同数值数量的不错选择。 + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +`HLL_precision` 是以2为底的单元格数的对数 [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). 可选,您可以将该函数用作 `uniqCombined(x[, ...])`. 默认值 `HLL_precision` 是17,这是有效的96KiB的空间(2^17个单元,每个6比特)。 + +**返回值** + +- 一个数字 [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算散列(64位散列 `String` 否则32位)对于聚合中的所有参数,然后在计算中使用它。 + +- 使用三种算法的组合:数组、哈希表和HyperLogLog与error错表。 + + For a small number of distinct elements, an array is used. When the set size is larger, a hash table is used. For a larger number of elements, HyperLogLog is used, which will occupy a fixed amount of memory. + +- 确定性地提供结果(它不依赖于查询处理顺序)。 + +!!! note "注" + 因为它使用32位散列非-`String` 类型,结果将有非常高的误差基数显着大于 `UINT_MAX` (错误将在几百亿不同值之后迅速提高),因此在这种情况下,您应该使用 [uniqCombined64](#agg_function-uniqcombined64) + +相比于 [uniq](#agg_function-uniq) 功能,该 `uniqCombined`: + +- 消耗少几倍的内存。 +- 计算精度高出几倍。 +- 通常具有略低的性能。 在某些情况下, `uniqCombined` 可以表现得比 `uniq`,例如,使用通过网络传输大量聚合状态的分布式查询。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined64](#agg_function-uniqcombined64) +- [uniqHLL12](#agg_function-uniqhll12) +- [uniqExact](#agg_function-uniqexact) + +## uniqCombined64 {#agg_function-uniqcombined64} + +和 [uniqCombined](#agg_function-uniqcombined),但对所有数据类型使用64位哈希。 + +## uniqHLL12 {#agg_function-uniqhll12} + +计算不同参数值的近似数量,使用 [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) 算法。 + +``` sql +uniqHLL12(x[, ...]) +``` + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**返回值** + +- A [UInt64](../../sql_reference/data_types/int_uint.md)-键入号码。 + +**实施细节** + +功能: + +- 计算聚合中所有参数的哈希值,然后在计算中使用它。 + +- 使用HyperLogLog算法来近似不同参数值的数量。 + + 212 5-bit cells are used. The size of the state is slightly more than 2.5 KB. The result is not very accurate (up to ~10% error) for small data sets (<10K elements). However, the result is fairly accurate for high-cardinality data sets (10K-100M), with a maximum error of ~1.6%. Starting from 100M, the estimation error increases, and the function will return very inaccurate results for data sets with extremely high cardinality (1B+ elements). + +- 提供确定结果(它不依赖于查询处理顺序)。 + +我们不建议使用此功能。 在大多数情况下,使用 [uniq](#agg_function-uniq) 或 [uniqCombined](#agg_function-uniqcombined) 功能。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqExact](#agg_function-uniqexact) + +## uniqExact {#agg_function-uniqexact} + +计算不同参数值的准确数目。 + +``` sql +uniqExact(x[, ...]) +``` + +使用 `uniqExact` 功能,如果你绝对需要一个确切的结果。 否则使用 [uniq](#agg_function-uniq) 功能。 + +该 `uniqExact` 功能使用更多的内存比 `uniq`,因为状态的大小随着不同值的数量的增加而无界增长。 + +**参数** + +该函数采用可变数量的参数。 参数可以是 `Tuple`, `Array`, `Date`, `DateTime`, `String`,或数字类型。 + +**另请参阅** + +- [uniq](#agg_function-uniq) +- [uniqCombined](#agg_function-uniqcombined) +- [uniqHLL12](#agg_function-uniqhll12) + +## 群交(x),群交(max\_size)(x) {#agg_function-grouparray} + +创建参数值的数组。 +值可以按任何(不确定)顺序添加到数组中。 + +第二个版本(与 `max_size` 参数)将结果数组的大小限制为 `max_size` 元素。 +例如, `groupArray (1) (x)` 相当于 `[any (x)]`. + +在某些情况下,您仍然可以依靠执行的顺序。 这适用于以下情况 `SELECT` 来自使用 `ORDER BY`. + +## groupArrayInsertAt(值,位置) {#grouparrayinsertatvalue-position} + +将值插入到数组中的指定位置中。 + +!!! note "注" + 此函数使用从零开始的位置,与传统SQL数组的从一开始的位置相反。 + +Accepts the value and position as input. If several values ​​are inserted into the same position, any of them might end up in the resulting array (the first one will be used in the case of single-threaded execution). If no value is inserted into a position, the position is assigned the default value. + +可选参数: + +- 在空位置替换的默认值。 +- 生成数组的长度。 这允许您接收所有聚合键的相同大小的数组。 使用此参数时,必须指定默认值。 + +## groupArrayMovingSum {#agg_function-grouparraymovingsum} + +计算输入值的移动和。 + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。 + +**参数** + +- `numbers_for_summing` — [表达式](../syntax.md#syntax-expressions) 生成数值数据类型值。 +- `window_size` — Size of the calculation window. + +**返回值** + +- 与输入数据大小和类型相同的数组。 + +**示例** + +样品表: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +查询: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +## groupArrayMovingAvg {#agg_function-grouparraymovingavg} + +计算输入值的移动平均值。 + +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` + +该函数可以将窗口大小作为参数。 如果未指定,则该函数的窗口大小等于列中的行数。 + +**参数** + +- `numbers_for_summing` — [表达式](../syntax.md#syntax-expressions) 生成数值数据类型值。 +- `window_size` — Size of the calculation window. + +**返回值** + +- 与输入数据大小和类型相同的数组。 + +该函数使用 [四舍五入到零](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero). 它截断结果数据类型的小数位数。 + +**示例** + +样品表 `b`: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +查询: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +## 禄,赂麓ta脌麓,):脡,,拢脢,group媒group)galaxy s8碌胫脢)禄煤)酶脱脩) {#groupuniqarrayx-groupuniqarraymax-sizex} + +从不同的参数值创建一个数组。 内存消耗是一样的 `uniqExact` 功能。 + +第二个版本(与 `max_size` 参数)将结果数组的大小限制为 `max_size` 元素。 +例如, `groupUniqArray(1)(x)` 相当于 `[any(x)]`. + +## 分位数 {#quantile} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +此功能适用 [油藏采样](https://en.wikipedia.org/wiki/Reservoir_sampling) 随着储存器大小高达8192和随机数发生器进行采样。 结果是非确定性的。 要获得精确的分位数,请使用 [quantileExact](#quantileexact) 功能。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantile(level)(expr) +``` + +别名: `median`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT quantile(val) FROM t +``` + +结果: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 量化确定 {#quantiledeterministic} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +此功能适用 [油藏采样](https://en.wikipedia.org/wiki/Reservoir_sampling) 与储层大小高达8192和采样的确定性算法。 结果是确定性的。 要获得精确的分位数,请使用 [quantileExact](#quantileexact) 功能。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +别名: `medianDeterministic`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `determinator` — Number whose hash is used instead of a random number generator in the reservoir sampling algorithm to make the result of sampling deterministic. As a determinator you can use any deterministic positive number, for example, a user id or an event id. If the same determinator value occures too often, the function works incorrectly. + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +结果: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileExact {#quantileexact} + +正是计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Therefore, the function consumes `O(n)` 内存,其中 `n` 是传递的多个值。 然而,对于少量的值,该函数是非常有效的。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileExact(level)(expr) +``` + +别名: `medianExact`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位数加权 {#quantileexactweighted} + +正是计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 数值数据序列,考虑到每个元素的权重。 + +To get exact value, all the passed values ​​are combined into an array, which is then partially sorted. Each value is counted with its weight, as if it is present `weight` times. A hash table is used in the algorithm. Because of this, if the passed values ​​are frequently repeated, the function consumes less RAM than [quantileExact](#quantileexact). 您可以使用此功能,而不是 `quantileExact` 并指定重量1。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +别名: `medianExactWeighted`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence members. Weight is a number of value occurrences. + +**返回值** + +- 指定电平的分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +输入表: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +查询: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +结果: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位定时 {#quantiletiming} + +随着确定的精度计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 的数字数据序列。 + +结果是确定性的(它不依赖于查询处理顺序)。 该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTiming(level)(expr) +``` + +别名: `medianTiming`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). + +- `expr` — [表达式](../syntax.md#syntax-expressions) 在一个列值返回 [浮动\*](../../sql_reference/data_types/float.md)-键入号码。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +**精度** + +计算是准确的,如果: + +- 值的总数不超过5670。 +- 总数值超过5670,但页面加载时间小于1024ms。 + +否则,计算结果将四舍五入到16毫秒的最接近倍数。 + +!!! note "注" + 对于计算页面加载时间分位数,此函数比 [分位数](#quantile). + +**返回值** + +- 指定电平的分位数。 + +类型: `Float32`. + +!!! note "注" + 如果没有值传递给函数(当使用 `quantileTimingIf`), [阿南](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 看 [按条款订购](../statements/select.md#select-order-by) 对于排序注意事项 `NaN` 值。 + +**示例** + +输入表: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +查询: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +结果: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 分位时间加权 {#quantiletimingweighted} + +随着确定的精度计算 [分位数](https://en.wikipedia.org/wiki/Quantile) 根据每个序列成员的权重对数字数据序列进行处理。 + +结果是确定性的(它不依赖于查询处理顺序)。 该函数针对描述加载网页时间或后端响应时间等分布的序列进行了优化。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +别名: `medianTimingWeighted`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). + +- `expr` — [表达式](../syntax.md#syntax-expressions) 在一个列值返回 [浮动\*](../../sql_reference/data_types/float.md)-键入号码。 + + - If negative values are passed to the function, the behavior is undefined. + - If the value is greater than 30,000 (a page loading time of more than 30 seconds), it is assumed to be 30,000. + +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**精度** + +计算是准确的,如果: + +- 值的总数不超过5670。 +- 总数值超过5670,但页面加载时间小于1024ms。 + +否则,计算结果将四舍五入到16毫秒的最接近倍数。 + +!!! note "注" + 对于计算页面加载时间分位数,此函数比 [分位数](#quantile). + +**返回值** + +- 指定电平的分位数。 + +类型: `Float32`. + +!!! note "注" + 如果没有值传递给函数(当使用 `quantileTimingIf`), [阿南](../../sql_reference/data_types/float.md#data_type-float-nan-inf) 被返回。 这样做的目的是将这些案例与导致零的案例区分开来。 看 [按条款订购](../statements/select.md#select-order-by) 对于排序注意事项 `NaN` 值。 + +**示例** + +输入表: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +查询: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +结果: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileTDigest {#quantiletdigest} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 使用的数字数据序列 [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法。 + +最大误差为1%。 内存消耗 `log(n)`,哪里 `n` 是多个值。 结果取决于运行查询的顺序,并且是不确定的。 + +该功能的性能低于性能 [分位数](#quantile) 或 [分位定时](#quantiletiming). 在状态大小与精度的比率方面,这个函数比 `quantile`. + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTDigest(level)(expr) +``` + +别名: `medianTDigest`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## quantileTDigestWeighted {#quantiletdigestweighted} + +计算近似值 [分位数](https://en.wikipedia.org/wiki/Quantile) 使用的数字数据序列 [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) 算法。 该函数考虑了每个序列成员的权重。 最大误差为1%。 内存消耗 `log(n)`,哪里 `n` 是多个值。 + +该功能的性能低于性能 [分位数](#quantile) 或 [分位定时](#quantiletiming). 在状态大小与精度的比率方面,这个函数比 `quantile`. + +结果取决于运行查询的顺序,并且是不确定的。 + +当使用多个 `quantile*` 在查询中具有不同级别的函数,内部状态不会被组合(即查询的工作效率低于它可以)。 在这种情况下,使用 [分位数](#quantiles) 功能。 + +**语法** + +``` sql +quantileTDigest(level)(expr) +``` + +别名: `medianTDigest`. + +**参数** + +- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. We recommend using a `level` 值的范围 `[0.01, 0.99]`. 默认值:0.5。 在 `level=0.5` 该函数计算 [中位数](https://en.wikipedia.org/wiki/Median). +- `expr` — Expression over the column values resulting in numeric [数据类型](../../sql_reference/data_types/index.md#data_types), [日期](../../sql_reference/data_types/date.md) 或 [日期时间](../../sql_reference/data_types/datetime.md). +- `weight` — Column with weights of sequence elements. Weight is a number of value occurrences. + +**返回值** + +- 指定电平的近似分位数。 + +类型: + +- [Float64](../../sql_reference/data_types/float.md) 对于数字数据类型输入。 +- [日期](../../sql_reference/data_types/date.md) 如果输入值具有 `Date` 类型。 +- [日期时间](../../sql_reference/data_types/datetime.md) 如果输入值具有 `DateTime` 类型。 + +**示例** + +查询: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +结果: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**另请参阅** + +- [中位数](#median) +- [分位数](#quantiles) + +## 中位数 {#median} + +该 `median*` 函数是相应的别名 `quantile*` 功能。 它们计算数字数据样本的中位数。 + +功能: + +- `median` — Alias for [分位数](#quantile). +- `medianDeterministic` — Alias for [量化确定](#quantiledeterministic). +- `medianExact` — Alias for [quantileExact](#quantileexact). +- `medianExactWeighted` — Alias for [分位数加权](#quantileexactweighted). +- `medianTiming` — Alias for [分位定时](#quantiletiming). +- `medianTimingWeighted` — Alias for [分位时间加权](#quantiletimingweighted). +- `medianTDigest` — Alias for [quantileTDigest](#quantiletdigest). +- `medianTDigestWeighted` — Alias for [quantileTDigestWeighted](#quantiletdigestweighted). + +**示例** + +输入表: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +查询: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +结果: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +## quantiles(level1, level2, …)(x) {#quantiles} + +所有分位数函数也具有相应的分位数函数: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. 这些函数在一遍中计算所列电平的所有分位数,并返回结果值的数组。 + +## varSamp(x) {#varsampx} + +计算金额 `Σ((x - x̅)^2) / (n - 1)`,哪里 `n` 是样本大小和 `x̅`是平均值 `x`. + +它表示随机变量的方差的无偏估计,如果传递的值形成其样本。 + +返回 `Float64`. 当 `n <= 1`,返回 `+∞`. + +## varPop(x) {#varpopx} + +计算金额 `Σ((x - x̅)^2) / n`,哪里 `n` 是样本大小和 `x̅`是平均值 `x`. + +换句话说,分散为一组值。 返回 `Float64`. + +## stddevSamp(x) {#stddevsampx} + +结果等于平方根 `varSamp(x)`. + +## stddevPop(x) {#stddevpopx} + +结果等于平方根 `varPop(x)`. + +## topK(N)(x) {#topknx} + +返回指定列中近似最常见值的数组。 生成的数组按值的近似频率降序排序(而不是值本身)。 + +实现了 [过滤节省空间](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) 基于reduce-and-combine算法的TopK分析算法 [并行节省空间](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +此函数不提供保证的结果。 在某些情况下,可能会发生错误,并且可能会返回不是最常见值的常见值。 + +我们建议使用 `N < 10` 值;性能降低了大 `N` 值。 的最大值 `N = 65536`. + +**参数** + +- ‘N’ 是要返回的元素数。 + +如果省略该参数,则使用默认值10。 + +**参数** + +- ' x ' – The value to calculate frequency. + +**示例** + +就拿 [时间](../../getting_started/example_datasets/ontime.md) 数据集,并选择在三个最频繁出现的值 `AirlineID` 列。 + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +## topKWeighted {#topkweighted} + +类似于 `topK` 但需要一个整数类型的附加参数 - `weight`. 每一价值是占 `weight` 次频率计算。 + +**语法** + +``` sql +topKWeighted(N)(x, weight) +``` + +**参数** + +- `N` — The number of elements to return. + +**参数** + +- `x` – The value. +- `weight` — The weight. [UInt8](../../sql_reference/data_types/int_uint.md). + +**返回值** + +返回具有最大近似权重总和的值数组。 + +**示例** + +查询: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +结果: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +## covarSamp(x,y) {#covarsampx-y} + +计算的值 `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +返回Float64。 当 `n <= 1`, returns +∞. + +## covarPop(x,y) {#covarpopx-y} + +计算的值 `Σ((x - x̅)(y - y̅)) / n`. + +## corr(x,y) {#corrx-y} + +计算Pearson相关系数: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +## categoricalInformationValue {#categoricalinformationvalue} + +计算的值 `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` 对于每个类别。 + +``` sql +categoricalInformationValue(category1, category2, ..., tag) +``` + +结果指示离散(分类)要素如何使用 `[category1, category2, ...]` 有助于预测的价值的学习模型 `tag`. + +## simpleLinearRegression {#simplelinearregression} + +执行简单(一维)线性回归。 + +``` sql +simpleLinearRegression(x, y) +``` + +参数: + +- `x` — Column with dependent variable values. +- `y` — Column with explanatory variable values. + +返回值: + +常量 `(a, b)` 结果行的 `y = a*x + b`. + +**例** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +## 随机指标线上回归 {#agg_functions-stochasticlinearregression} + +该函数实现随机线性回归。 它支持自定义参数的学习率,L2正则化系数,迷你批量大小,并具有更新权重的方法很少 ([亚当](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (默认使用), [简单SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [动量](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### 参数 {#agg_functions-stochasticlinearregression-parameters} + +有4个可自定义的参数。 它们按顺序传递给函数,但是没有必要传递所有四个默认值将被使用,但是好的模型需要一些参数调整。 + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. `learning rate` 当执行梯度下降步骤时,步长上的系数。 过大的学习率可能会导致模型的权重无限大。 默认值为 `0.00001`. +2. `l2 regularization coefficient` 这可能有助于防止过度拟合。 默认值为 `0.1`. +3. `mini-batch size` 设置元素的数量,这些元素将被计算和求和以执行梯度下降的一个步骤。 纯随机下降使用一个元素,但是具有小批量(约10个元素)使梯度步骤更稳定。 默认值为 `15`. +4. `method for updating weights` 他们是: `Adam` (默认情况下), `SGD`, `Momentum`, `Nesterov`. `Momentum` 和 `Nesterov` 需要更多的计算和内存,但是它们恰好在收敛速度和随机梯度方法的稳定性方面是有用的。 + +### 用途 {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` 用于两个步骤:拟合模型和预测新数据。 为了拟合模型并保存其状态以供以后使用,我们使用 `-State` combinator,它基本上保存了状态(模型权重等)。 +为了预测我们使用函数 [evalMLMethod](../functions/machine_learning_functions.md#machine_learning_methods-evalmlmethod),这需要一个状态作为参数以及特征来预测。 + + + +**1.** 适合 + +可以使用这种查询。 + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +在这里,我们还需要将数据插入到 `train_data` 桌子 参数的数量不是固定的,它只取决于参数的数量,传递到 `linearRegressionState`. 它们都必须是数值。 +请注意,带有目标值的列(我们想要学习预测)被插入作为第一个参数。 + +**2.** 预测 + +在将状态保存到表中之后,我们可以多次使用它进行预测,甚至与其他状态合并并创建新的更好的模型。 + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +查询将返回一列预测值。 请注意,第一个参数 `evalMLMethod` 是 `AggregateFunctionState` 对象,接下来是要素列。 + +`test_data` 是一个像表 `train_data` 但可能不包含目标值。 + +### 注 {#agg_functions-stochasticlinearregression-notes} + +1. 要合并两个模型,用户可以创建这样的查询: + `sql SELECT state1 + state2 FROM your_models` + 哪里 `your_models` 表包含这两个模型。 此查询将返回new `AggregateFunctionState` 对象。 + +2. 如果没有,用户可以获取创建的模型的权重用于自己的目的,而不保存模型 `-State` 使用combinator。 + `sql SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data` + 这种查询将拟合模型并返回其权重-首先是权重,它对应于模型的参数,最后一个是偏差。 所以在上面的例子中,查询将返回一个具有3个值的列。 + +**另请参阅** + +- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) +- [线性回归和逻辑回归之间的区别](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +该函数实现随机逻辑回归。 它可以用于二进制分类问题,支持与stochasticLinearRegression相同的自定义参数,并以相同的方式工作。 + +### 参数 {#agg_functions-stochasticlogisticregression-parameters} + +参数与stochasticLinearRegression中的参数完全相同: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +欲了解更多信息,请参阅 [参数](#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. 适合 + + + + See the `Fitting` section in the [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) description. + + Predicted labels have to be in \[-1, 1\]. + +1. 预测 + + + + Using saved state we can predict probability of object having label `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + + The query will return a column of probabilities. Note that first argument of `evalMLMethod` is `AggregateFunctionState` object, next are columns of features. + + We can also set a bound of probability, which assigns elements to different labels. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + + Then the result will be labels. + + `test_data` is a table like `train_data` but may not contain target value. + +**另请参阅** + +- [随机指标线上回归](#agg_functions-stochasticlinearregression) +- [线性回归和逻辑回归之间的差异。](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +## groupBitmapAnd {#groupbitmapand} + +计算位图列的AND,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapAnd(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapAnd(z)─┐ +│ 3 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapAndState(z)))─┐ +│ [6,8,10] │ +└──────────────────────────────────────────────────┘ +``` + +## groupBitmapOr {#groupbitmapor} + +计算位图列的OR,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). 这相当于 `groupBitmapMerge`. + +``` sql +groupBitmapOr(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapOr(z)─┐ +│ 15 │ +└──────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapOrState(z)))─┐ +│ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] │ +└─────────────────────────────────────────────────┘ +``` + +## groupBitmapXor {#groupbitmapxor} + +计算位图列的XOR,返回UInt64类型的基数,如果添加后缀状态,则返回 [位图对象](../../sql_reference/functions/bitmap_functions.md). + +``` sql +groupBitmapOr(expr) +``` + +**参数** + +`expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` 类型。 + +**返回值** + +的价值 `UInt64` 类型。 + +**示例** + +``` sql +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─groupBitmapXor(z)─┐ +│ 10 │ +└───────────────────┘ + +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +┌─arraySort(bitmapToArray(groupBitmapXorState(z)))─┐ +│ [1,3,5,6,8,10,11,13,14,15] │ +└──────────────────────────────────────────────────┘ +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/agg_functions/reference/) diff --git a/docs/zh/data_types/nested_data_structures/aggregatefunction.md b/docs/zh/sql_reference/data_types/aggregatefunction.md similarity index 82% rename from docs/zh/data_types/nested_data_structures/aggregatefunction.md rename to docs/zh/sql_reference/data_types/aggregatefunction.md index 3153150d2bd..e1fb7b1d133 100644 --- a/docs/zh/data_types/nested_data_structures/aggregatefunction.md +++ b/docs/zh/sql_reference/data_types/aggregatefunction.md @@ -1,3 +1,4 @@ + # AggregateFunction(name, types\_of\_arguments…) {#data-type-aggregatefunction} 聚合函数的中间状态,可以通过聚合函数名称加`-State`后缀的形式得到它。与此同时,当您需要访问该类型的最终状态数据时,您需要以相同的聚合函数名加`-Merge`后缀的形式来得到最终状态数据。 @@ -23,7 +24,7 @@ CREATE TABLE t ) ENGINE = ... ``` -上述中的[uniq](../../query_language/agg_functions/reference.md#agg_function-uniq), anyIf ([any](../../query_language/agg_functions/reference.md#agg_function-any)+[If](../../query_language/agg_functions/combinators.md#agg-functions-combinator-if)) 以及 [quantiles](../../query_language/agg_functions/reference.md) 都为ClickHouse中支持的聚合函数。 +上述中的[uniq](../../sql_reference/data_types/aggregatefunction.md#agg_function-uniq), anyIf ([任何](../../sql_reference/data_types/aggregatefunction.md#agg_function-any)+[如果](../../sql_reference/data_types/aggregatefunction.md#agg-functions-combinator-if)) 以及 [分位数](../../sql_reference/data_types/aggregatefunction.md) 都为ClickHouse中支持的聚合函数。 ## 使用指南 {#shi-yong-zhi-nan} @@ -58,6 +59,6 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP ## 使用示例 {#shi-yong-shi-li} -请参阅 [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md) 的说明 +请参阅 [AggregatingMergeTree](../../sql_reference/data_types/aggregatefunction.md) 的说明 [来源文章](https://clickhouse.tech/docs/en/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/zh/data_types/array.md b/docs/zh/sql_reference/data_types/array.md similarity index 91% rename from docs/zh/data_types/array.md rename to docs/zh/sql_reference/data_types/array.md index 774210b0d29..7a35647d20e 100644 --- a/docs/zh/data_types/array.md +++ b/docs/zh/sql_reference/data_types/array.md @@ -1,4 +1,5 @@ -# Array(T) {#data-type-array} + +# 阵列(T) {#data-type-array} 由 `T` 类型元素组成的数组。 @@ -42,7 +43,7 @@ ## 使用数据类型 {#shi-yong-shu-ju-lei-xing} -ClickHouse会自动检测数组元素,并根据元素计算出存储这些元素最小的数据类型。如果在元素中存在 [NULL](../query_language/syntax.md#null-literal) 或存在 [Nullable](nullable.md#data_type-nullable) 类型元素,那么数组的元素类型将会变成 [Nullable](nullable.md)。 +ClickHouse会自动检测数组元素,并根据元素计算出存储这些元素最小的数据类型。如果在元素中存在 [NULL](../../sql_reference/data_types/array.md#null-literal) 或存在 [可为空](nullable.md#data_type-nullable) 类型元素,那么数组的元素类型将会变成 [可为空](nullable.md)。 如果 ClickHouse 无法确定数据类型,它将产生异常。当尝试同时创建一个包含字符串和数字的数组时会发生这种情况 (`SELECT array(1, 'a')`)。 diff --git a/docs/zh/data_types/boolean.md b/docs/zh/sql_reference/data_types/boolean.md similarity index 73% rename from docs/zh/data_types/boolean.md rename to docs/zh/sql_reference/data_types/boolean.md index 1918bb1c56b..26c8ac5cdd5 100644 --- a/docs/zh/data_types/boolean.md +++ b/docs/zh/sql_reference/data_types/boolean.md @@ -1,3 +1,4 @@ -# Boolean Values {#boolean-values} + +# 布尔值 {#boolean-values} 没有单独的类型来存储布尔值。可以使用 UInt8 类型,取值限制为 0 或 1。 diff --git a/docs/zh/data_types/date.md b/docs/zh/sql_reference/data_types/date.md similarity index 94% rename from docs/zh/data_types/date.md rename to docs/zh/sql_reference/data_types/date.md index 96ee60d53a8..18bdb507f37 100644 --- a/docs/zh/data_types/date.md +++ b/docs/zh/sql_reference/data_types/date.md @@ -1,4 +1,5 @@ -# Date {#date} + +# 日期 {#date} 日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为0000-00-00。 diff --git a/docs/zh/data_types/datetime.md b/docs/zh/sql_reference/data_types/datetime.md similarity index 95% rename from docs/zh/data_types/datetime.md rename to docs/zh/sql_reference/data_types/datetime.md index 50c5964360c..1122131614b 100644 --- a/docs/zh/data_types/datetime.md +++ b/docs/zh/sql_reference/data_types/datetime.md @@ -1,4 +1,5 @@ -# DateTime {#data_type-datetime} + +# 日期时间 {#data_type-datetime} 时间戳类型。用四个字节(无符号的)存储 Unix 时间戳)。允许存储与日期类型相同的范围内的值。最小值为 0000-00-00 00:00:00。时间戳类型值精确到秒(没有闰秒)。 diff --git a/docs/zh/sql_reference/data_types/datetime64.md b/docs/zh/sql_reference/data_types/datetime64.md new file mode 100644 index 00000000000..dd87486cee5 --- /dev/null +++ b/docs/zh/sql_reference/data_types/datetime64.md @@ -0,0 +1,104 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 49 +toc_title: DateTime64 +--- + +# Datetime64 {#data_type-datetime64} + +允许存储时间instant间,可以表示为日历日期和一天中的时间,具有定义的亚秒精度 + +刻度尺寸(精度):10-精度 秒 + +语法: + +``` sql +DateTime64(precision, [timezone]) +``` + +在内部,存储数据作为一些 ‘ticks’ 自纪元开始(1970-01-01 00:00:00UTC)作为Int64. 刻度分辨率由precision参数确定。 此外,该 `DateTime64` 类型可以存储时区是相同的整个列,影响如何的值 `DateTime64` 类型值以文本格式显示,以及如何解析指定为字符串的值 (‘2020-01-01 05:00:01.000’). 时区不存储在表的行中(或resultset中),而是存储在列元数据中。 查看详细信息 [日期时间](datetime.md). + +## 例 {#examples} + +**1.** 创建一个表 `DateTime64`-输入列并将数据插入其中: + +``` sql +CREATE TABLE dt +( + `timestamp` DateTime64(3, 'Europe/Moscow'), + `event_id` UInt8 +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO dt Values (1546300800000, 1), ('2019-01-01 00:00:00', 2) +``` + +``` sql +SELECT * FROM dt +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 03:00:00.000 │ 1 │ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +- 将日期时间作为整数插入时,将其视为适当缩放的Unix时间戳(UTC)。 `1546300800000` (精度为3)表示 `'2019-01-01 00:00:00'` UTC. 然而,作为 `timestamp` 列有 `Europe/Moscow` (UTC+3)指定的时区,当输出为字符串时,该值将显示为 `'2019-01-01 03:00:00'` +- 当插入字符串值作为日期时间时,它被视为处于列时区。 `'2019-01-01 00:00:00'` 将被视为 `Europe/Moscow` 时区并存储为 `1546290000000`. + +**2.** 过滤 `DateTime64` 值 + +``` sql +SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow') +``` + +``` text +┌───────────────timestamp─┬─event_id─┐ +│ 2019-01-01 00:00:00.000 │ 2 │ +└─────────────────────────┴──────────┘ +``` + +不像 `DateTime`, `DateTime64` 值不转换为 `String` 自动 + +**3.** 获取一个时区 `DateTime64`-类型值: + +``` sql +SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x +``` + +``` text +┌──────────────────column─┬─x──────────────────────────────┐ +│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ +└─────────────────────────┴────────────────────────────────┘ +``` + +**4.** 时区转换 + +``` sql +SELECT +toDateTime64(timestamp, 3, 'Europe/London') as lon_time, +toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time +FROM dt +``` + +``` text +┌───────────────lon_time──┬────────────────mos_time─┐ +│ 2019-01-01 00:00:00.000 │ 2019-01-01 03:00:00.000 │ +│ 2018-12-31 21:00:00.000 │ 2019-01-01 00:00:00.000 │ +└─────────────────────────┴─────────────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [类型转换函数](../../sql_reference/functions/type_conversion_functions.md) +- [用于处理日期和时间的函数](../../sql_reference/functions/date_time_functions.md) +- [用于处理数组的函数](../../sql_reference/functions/array_functions.md) +- [该 `date_time_input_format` 设置](../../operations/settings/settings.md#settings-date_time_input_format) +- [该 `timezone` 服务器配置参数](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-timezone) +- [使用日期和时间的操作员](../../sql_reference/operators.md#operators-datetime) +- [`Date` 数据类型](date.md) +- [`DateTime` 数据类型](datetime.md) diff --git a/docs/zh/data_types/decimal.md b/docs/zh/sql_reference/data_types/decimal.md similarity index 85% rename from docs/zh/data_types/decimal.md rename to docs/zh/sql_reference/data_types/decimal.md index 1503da90d89..24bc1f70415 100644 --- a/docs/zh/data_types/decimal.md +++ b/docs/zh/sql_reference/data_types/decimal.md @@ -1,4 +1,5 @@ -# Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} + +# Decimal(P,S),Decimal32(S),Decimal64(S),Decimal128(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} 有符号的定点数,可在加、减和乘法运算过程中保持精度。对于除法,最低有效数字会被丢弃(不舍入)。 @@ -8,15 +9,15 @@ - S - 规模。有效范围:\[0:P\],决定数字的小数部分中包含的小数位数。 对于不同的 P 参数值 Decimal 表示,以下例子都是同义的: -- P from \[ 1 : 9 \] - for Decimal32(S) -- P from \[ 10 : 18 \] - for Decimal64(S) -- P from \[ 19 : 38 \] - for Decimal128(S) +-P从\[1:9\]-对于Decimal32(S) +-P从\[10:18\]-对于Decimal64(小号) +-P从\[19:38\]-对于Decimal128(S) ## 十进制值范围 {#shi-jin-zhi-zhi-fan-wei} -- Decimal32(S) - ( -1 \* 10^(9 - S), 1 \* 10^(9 - S) ) -- Decimal64(S) - ( -1 \* 10^(18 - S), 1 \* 10^(18 - S) ) -- Decimal128(S) - ( -1 \* 10^(38 - S), 1 \* 10^(38 - S) ) +- Decimal32(S) - ( -1 \* 10^(9 - S),1\*10^(9-S) ) +- Decimal64(S) - ( -1 \* 10^(18 - S),1\*10^(18-S) ) +- Decimal128(S) - ( -1 \* 10^(38 - S),1\*10^(38-S) ) 例如,Decimal32(4) 可以表示 -99999.9999 至 99999.9999 的数值,步长为0.0001。 @@ -30,9 +31,9 @@ 对Decimal的二进制运算导致更宽的结果类型(无论参数的顺序如何)。 -- Decimal64(S1) Decimal32(S2) -\> Decimal64(S) -- Decimal128(S1) Decimal32(S2) -\> Decimal128(S) -- Decimal128(S1) Decimal64(S2) -\> Decimal128(S) +- Decimal64(S1) Decimal32(S2)-\>Decimal64(S) +- Decimal128(S1) Decimal32(S2)-\>Decimal128(S) +- Decimal128(S1) Decimal64(S2)-\>Decimal128(S) 精度变化的规则: diff --git a/docs/zh/sql_reference/data_types/domains/index.md b/docs/zh/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..7df13d51e54 --- /dev/null +++ b/docs/zh/sql_reference/data_types/domains/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u57DF" +toc_priority: 56 +--- + + diff --git a/docs/zh/data_types/domains/ipv4.md b/docs/zh/sql_reference/data_types/domains/ipv4.md similarity index 99% rename from docs/zh/data_types/domains/ipv4.md rename to docs/zh/sql_reference/data_types/domains/ipv4.md index 65c066fb487..26ed4d84922 100644 --- a/docs/zh/data_types/domains/ipv4.md +++ b/docs/zh/sql_reference/data_types/domains/ipv4.md @@ -1,3 +1,4 @@ + ## IPv4 {#ipv4} `IPv4`是与`UInt32`类型保持二进制兼容的Domain类型,其用于存储IPv4地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 diff --git a/docs/zh/data_types/domains/ipv6.md b/docs/zh/sql_reference/data_types/domains/ipv6.md similarity index 99% rename from docs/zh/data_types/domains/ipv6.md rename to docs/zh/sql_reference/data_types/domains/ipv6.md index bc0f95932aa..b147fb6db84 100644 --- a/docs/zh/data_types/domains/ipv6.md +++ b/docs/zh/sql_reference/data_types/domains/ipv6.md @@ -1,3 +1,4 @@ + ## IPv6 {#ipv6} `IPv6`是与`FixedString(16)`类型保持二进制兼容的Domain类型,其用于存储IPv6地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 diff --git a/docs/zh/data_types/domains/overview.md b/docs/zh/sql_reference/data_types/domains/overview.md similarity index 98% rename from docs/zh/data_types/domains/overview.md rename to docs/zh/sql_reference/data_types/domains/overview.md index 6c59860132e..b330bad18c0 100644 --- a/docs/zh/data_types/domains/overview.md +++ b/docs/zh/sql_reference/data_types/domains/overview.md @@ -1,4 +1,5 @@ -# Domains {#domains} + +# 域 {#domains} Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。 diff --git a/docs/zh/data_types/enum.md b/docs/zh/sql_reference/data_types/enum.md similarity index 95% rename from docs/zh/data_types/enum.md rename to docs/zh/sql_reference/data_types/enum.md index 034406a303b..87ada143638 100644 --- a/docs/zh/data_types/enum.md +++ b/docs/zh/sql_reference/data_types/enum.md @@ -1,4 +1,5 @@ -# Enum8, Enum16 {#enum8-enum16} + +# Enum8,Enum16 {#enum8-enum16} 包括 `Enum8` 和 `Enum16` 类型。`Enum` 保存 `'string'= integer` 的对应关系。在 ClickHouse 中,尽管用户使用的是字符串常量,但所有含有 `Enum` 数据类型的操作都是按照包含整数的值来执行。这在性能方面比使用 `String` 数据类型更有效。 @@ -65,9 +66,9 @@ `Enum8` 类型的每个值范围是 `-128 ... 127`,`Enum16` 类型的每个值范围是 `-32768 ... 32767`。所有的字符串或者数字都必须是不一样的。允许存在空字符串。如果某个 Enum 类型被指定了(在表定义的时候),数字可以是任意顺序。然而,顺序并不重要。 -`Enum` 中的字符串和数值都不能是 [NULL](../query_language/syntax.md)。 +`Enum` 中的字符串和数值都不能是 [NULL](../../sql_reference/data_types/enum.md)。 -`Enum` 包含在 [Nullable](nullable.md) 类型中。因此,如果您使用此查询创建一个表 +`Enum` 包含在 [可为空](nullable.md) 类型中。因此,如果您使用此查询创建一个表 CREATE TABLE t_enum_nullable ( diff --git a/docs/zh/data_types/fixedstring.md b/docs/zh/sql_reference/data_types/fixedstring.md similarity index 82% rename from docs/zh/data_types/fixedstring.md rename to docs/zh/sql_reference/data_types/fixedstring.md index 27945b74fc8..c8e71e69303 100644 --- a/docs/zh/data_types/fixedstring.md +++ b/docs/zh/sql_reference/data_types/fixedstring.md @@ -1,4 +1,5 @@ -# FixedString {#fixedstring} + +# 固定字符串 {#fixedstring} 固定长度 N 的字符串(N 必须是严格的正自然数)。 @@ -23,7 +24,7 @@ 当向ClickHouse中插入数据时, -- 如果字符串包含的字节数少于\`N’,将对字符串末尾进行空字节填充。 +- 如果字符串包含的字节数少于\`N',将对字符串末尾进行空字节填充。 - 如果字符串包含的字节数大于`N`,将抛出`Too large value for FixedString(N)`异常。 当做数据查询时,ClickHouse不会删除字符串末尾的空字节。 如果使用`WHERE`子句,则须要手动添加空字节以匹配`FixedString`的值。 以下示例阐明了如何将`WHERE`子句与`FixedString`一起使用。 @@ -51,6 +52,6 @@ WHERE a = 'b\0' 这种方式与MySQL的`CHAR`类型的方式不同(MySQL中使用空格填充字符串,并在输出时删除空格)。 -请注意,`FixedString(N)`的长度是个常量。仅由空字符组成的字符串,函数[length](../query_language/functions/array_functions.md#array_functions-length)返回值为`N`,而函数[empty](../query_language/functions/string_functions.md#string_functions-empty)的返回值为`1`。 +请注意,`FixedString(N)`的长度是个常量。仅由空字符组成的字符串,函数[长度](../../sql_reference/data_types/fixedstring.md#array_functions-length)返回值为`N`,而函数[空](../../sql_reference/data_types/fixedstring.md#string_functions-empty)的返回值为`1`。 [来源文章](https://clickhouse.tech/docs/en/data_types/fixedstring/) diff --git a/docs/zh/data_types/float.md b/docs/zh/sql_reference/data_types/float.md similarity index 89% rename from docs/zh/data_types/float.md rename to docs/zh/sql_reference/data_types/float.md index f43000ffa35..bdc8093a9a9 100644 --- a/docs/zh/data_types/float.md +++ b/docs/zh/sql_reference/data_types/float.md @@ -1,4 +1,5 @@ -# Float32, Float64 {#float32-float64} + +# Float32,Float64 {#float32-float64} [浮点数](https://en.wikipedia.org/wiki/IEEE_754)。 @@ -29,7 +30,7 @@ SELECT 1 - 0.9 - 当一行行阅读浮点数的时候,浮点数的结果可能不是机器最近显示的数值。 -## NaN and Inf {#data_type-float-nan-inf} +## 南和Inf {#data_type-float-nan-inf} 与标准SQL相比,ClickHouse 支持以下类别的浮点数: @@ -67,4 +68,4 @@ SELECT -0.5 / 0 │ nan │ └──────────────┘ -可以在 [ORDER BY 子句](../query_language/select.md) 查看更多关于 `NaN` 排序的规则。 +可以在 [ORDER BY 子句](../../sql_reference/data_types/float.md) 查看更多关于 `NaN` 排序的规则。 diff --git a/docs/zh/data_types/index.md b/docs/zh/sql_reference/data_types/index.md similarity index 99% rename from docs/zh/data_types/index.md rename to docs/zh/sql_reference/data_types/index.md index 70aa976cb11..8df3911ab36 100644 --- a/docs/zh/data_types/index.md +++ b/docs/zh/sql_reference/data_types/index.md @@ -1,3 +1,4 @@ + # 数据类型 {#data_types} ClickHouse 可以在数据表中存储多种数据类型。 diff --git a/docs/zh/sql_reference/data_types/int_uint.md b/docs/zh/sql_reference/data_types/int_uint.md new file mode 100644 index 00000000000..b74bbcf178f --- /dev/null +++ b/docs/zh/sql_reference/data_types/int_uint.md @@ -0,0 +1,18 @@ + +# UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} + +固定长度的整型,包括有符号整型或无符号整型。 + +## 整型范围 {#zheng-xing-fan-wei} + +- Int8-\[-128:127\] +- Int16-\[-32768:32767\] +- Int32-\[-2147483648:2147483647\] +- Int64-\[-9223372036854775808:9223372036854775807\] + +## 无符号整型范围 {#wu-fu-hao-zheng-xing-fan-wei} + +- UInt8-\[0:255\] +- UInt16-\[0:65535\] +- UInt32-\[0:4294967295\] +- UInt64-\[0:18446744073709551615\] diff --git a/docs/zh/data_types/nested_data_structures/index.md b/docs/zh/sql_reference/data_types/nested_data_structures/index.md similarity index 97% rename from docs/zh/data_types/nested_data_structures/index.md rename to docs/zh/sql_reference/data_types/nested_data_structures/index.md index 3914064674e..fdeb9fe6ac5 100644 --- a/docs/zh/data_types/nested_data_structures/index.md +++ b/docs/zh/sql_reference/data_types/nested_data_structures/index.md @@ -1 +1,2 @@ + # 嵌套数据结构 {#qian-tao-shu-ju-jie-gou} diff --git a/docs/zh/data_types/nested_data_structures/nested.md b/docs/zh/sql_reference/data_types/nested_data_structures/nested.md similarity index 99% rename from docs/zh/data_types/nested_data_structures/nested.md rename to docs/zh/sql_reference/data_types/nested_data_structures/nested.md index d2fd1e3a630..6ac26c0eeba 100644 --- a/docs/zh/data_types/nested_data_structures/nested.md +++ b/docs/zh/sql_reference/data_types/nested_data_structures/nested.md @@ -1,3 +1,4 @@ + # Nested(Name1 Type1, Name2 Type2, …) {#nestedname1-type1-name2-type2} 嵌套数据结构类似于嵌套表。嵌套数据结构的参数(列名和类型)与 CREATE 查询类似。每个表可以包含任意多行嵌套数据结构。 diff --git a/docs/zh/data_types/nullable.md b/docs/zh/sql_reference/data_types/nullable.md similarity index 72% rename from docs/zh/data_types/nullable.md rename to docs/zh/sql_reference/data_types/nullable.md index ae4a2066fd7..6ece5f3c178 100644 --- a/docs/zh/data_types/nullable.md +++ b/docs/zh/sql_reference/data_types/nullable.md @@ -1,8 +1,9 @@ -# Nullable(TypeName) {#data_type-nullable} -允许用特殊标记 ([NULL](../query_language/syntax.md)) 表示«缺失值»,可以与 `TypeName` 的正常值存放一起。例如,`Nullable(Int8)` 类型的列可以存储 `Int8` 类型值,而没有值的行将存储 `NULL`。 +# 可为空(类型名称) {#data_type-nullable} -对于 `TypeName`,不能使用复合数据类型 [Array](array.md) 和 [Tuple](tuple.md)。复合数据类型可以包含 `Nullable` 类型值,例如`Array(Nullable(Int8))`。 +允许用特殊标记 ([NULL](../../sql_reference/data_types/nullable.md)) 表示«缺失值»,可以与 `TypeName` 的正常值存放一起。例如,`Nullable(Int8)` 类型的列可以存储 `Int8` 类型值,而没有值的行将存储 `NULL`。 + +对于 `TypeName`,不能使用复合数据类型 [阵列](array.md) 和 [元组](tuple.md)。复合数据类型可以包含 `Nullable` 类型值,例如`Array(Nullable(Int8))`。 `Nullable` 类型字段不能包含在表索引中。 diff --git a/docs/zh/data_types/special_data_types/expression.md b/docs/zh/sql_reference/data_types/special_data_types/expression.md similarity index 64% rename from docs/zh/data_types/special_data_types/expression.md rename to docs/zh/sql_reference/data_types/special_data_types/expression.md index 86b4d5591c7..d4fb3257f60 100644 --- a/docs/zh/data_types/special_data_types/expression.md +++ b/docs/zh/sql_reference/data_types/special_data_types/expression.md @@ -1,3 +1,4 @@ -# Expression {#expression} + +# 表达式 {#expression} 用于表示高阶函数中的Lambd表达式。 diff --git a/docs/zh/data_types/special_data_types/index.md b/docs/zh/sql_reference/data_types/special_data_types/index.md similarity index 71% rename from docs/zh/data_types/special_data_types/index.md rename to docs/zh/sql_reference/data_types/special_data_types/index.md index 5963c377f01..64d93783cb9 100644 --- a/docs/zh/data_types/special_data_types/index.md +++ b/docs/zh/sql_reference/data_types/special_data_types/index.md @@ -1,3 +1,4 @@ -# Special Data Types {#special-data-types} + +# 特殊数据类型 {#special-data-types} 特殊数据类型的值既不能存在表中也不能在结果中输出,但可用于查询的中间结果。 diff --git a/docs/zh/data_types/special_data_types/interval.md b/docs/zh/sql_reference/data_types/special_data_types/interval.md similarity index 51% rename from docs/zh/data_types/special_data_types/interval.md rename to docs/zh/sql_reference/data_types/special_data_types/interval.md index 8a37476579c..7a7ac888775 100644 --- a/docs/zh/data_types/special_data_types/interval.md +++ b/docs/zh/sql_reference/data_types/special_data_types/interval.md @@ -1,20 +1,23 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 61 +toc_title: "\u95F4\u9694" --- -# Interval {#data-type-interval} +# 间隔 {#data-type-interval} -The family of data types representing time and date intervals. The resulting types of the [INTERVAL](../../query_language/operators.md#operator-interval) operator. +表示时间和日期间隔的数据类型族。 由此产生的类型 [INTERVAL](../../../sql_reference/operators.md#operator-interval) 接线员 -!!! warning "Warning" - `Interval` data type values can’t be stored in tables. +!!! warning "警告" + `Interval` 数据类型值不能存储在表中。 -Structure: +结构: -- Time interval as an unsigned integer value. -- Type of an interval. +- 时间间隔作为无符号整数值。 +- 间隔的类型。 -Supported interval types: +支持的时间间隔类型: - `SECOND` - `MINUTE` @@ -25,7 +28,7 @@ Supported interval types: - `QUARTER` - `YEAR` -For each interval type, there is a separate data type. For example, the `DAY` interval corresponds to the `IntervalDay` data type: +对于每个间隔类型,都有一个单独的数据类型。 例如, `DAY` 间隔对应于 `IntervalDay` 数据类型: ``` sql SELECT toTypeName(INTERVAL 4 DAY) @@ -37,9 +40,9 @@ SELECT toTypeName(INTERVAL 4 DAY) └──────────────────────────────┘ ``` -## Usage Remarks {#data-type-interval-usage-remarks} +## 使用说明 {#data-type-interval-usage-remarks} -You can use `Interval`-type values in arithmetical operations with [Date](../../data_types/date.md) and [DateTime](../../data_types/datetime.md)-type values. For example, you can add 4 days to the current time: +您可以使用 `Interval`-在算术运算类型值 [日期](../../../sql_reference/data_types/date.md) 和 [日期时间](../../../sql_reference/data_types/datetime.md)-类型值。 例如,您可以将4天添加到当前时间: ``` sql SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY @@ -51,9 +54,9 @@ SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY └─────────────────────┴───────────────────────────────┘ ``` -Intervals with different types can’t be combined. You can’t use intervals like `4 DAY 1 HOUR`. Specify intervals in units that are smaller or equal to the smallest unit of the interval, for example, the interval `1 day and an hour` interval can be expressed as `25 HOUR` or `90000 SECOND`. +不同类型的间隔不能合并。 你不能使用间隔,如 `4 DAY 1 HOUR`. 以小于或等于间隔的最小单位的单位指定间隔,例如,间隔 `1 day and an hour` 间隔可以表示为 `25 HOUR` 或 `90000 SECOND`. -You can’t perform arithmetical operations with `Interval`-type values, but you can add intervals of different types consequently to values in `Date` or `DateTime` data types. For example: +你不能执行算术运算 `Interval`-类型值,但你可以添加不同类型的时间间隔,因此值 `Date` 或 `DateTime` 数据类型。 例如: ``` sql SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR @@ -65,7 +68,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL └─────────────────────┴────────────────────────────────────────────────────────┘ ``` -The following query causes an exception: +以下查询将导致异常: ``` sql select now() AS current_date_time, current_date_time + (INTERVAL 4 DAY + INTERVAL 3 HOUR) @@ -76,7 +79,7 @@ Received exception from server (version 19.14.1): Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argument types for function plus: if one argument is Interval, then another must be Date or DateTime.. ``` -## See Also {#see-also} +## 另请参阅 {#see-also} -- [INTERVAL](../../query_language/operators.md#operator-interval) operator -- [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) type convertion functions +- [INTERVAL](../../../sql_reference/operators.md#operator-interval) 接线员 +- [toInterval](../../../sql_reference/functions/type_conversion_functions.md#function-tointerval) 类型转换函数 diff --git a/docs/zh/data_types/special_data_types/nothing.md b/docs/zh/sql_reference/data_types/special_data_types/nothing.md similarity index 61% rename from docs/zh/data_types/special_data_types/nothing.md rename to docs/zh/sql_reference/data_types/special_data_types/nothing.md index 7a6bf0e035b..ebc2b572983 100644 --- a/docs/zh/data_types/special_data_types/nothing.md +++ b/docs/zh/sql_reference/data_types/special_data_types/nothing.md @@ -1,8 +1,9 @@ -# Nothing {#nothing} + +# 没什么 {#nothing} 此数据类型的唯一目的是表示不是期望值的情况。 所以不能创建一个 `Nothing` 类型的值。 -例如,文本 [NULL](../../query_language/syntax.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [Nullable](../../data_types/nullable.md)。 +例如,文本 [NULL](../../../sql_reference/data_types/special_data_types/nothing.md#null-literal) 的类型为 `Nullable(Nothing)`。详情请见 [可为空](../../../sql_reference/data_types/special_data_types/nothing.md)。 `Nothing` 类型也可以用来表示空数组: diff --git a/docs/zh/data_types/special_data_types/set.md b/docs/zh/sql_reference/data_types/special_data_types/set.md similarity index 72% rename from docs/zh/data_types/special_data_types/set.md rename to docs/zh/sql_reference/data_types/special_data_types/set.md index d1f2ad368ee..0e1f9c6cc35 100644 --- a/docs/zh/data_types/special_data_types/set.md +++ b/docs/zh/sql_reference/data_types/special_data_types/set.md @@ -1,3 +1,4 @@ -# Set {#set} + +# 设置 {#set} 可以用在 IN 表达式的右半部分。 diff --git a/docs/zh/data_types/string.md b/docs/zh/sql_reference/data_types/string.md similarity index 97% rename from docs/zh/data_types/string.md rename to docs/zh/sql_reference/data_types/string.md index 742452ee0bf..3c9226787a2 100644 --- a/docs/zh/data_types/string.md +++ b/docs/zh/sql_reference/data_types/string.md @@ -1,4 +1,5 @@ -# String {#string} + +# 字符串 {#string} 字符串可以任意长度的。它可以包含任意的字节集,包含空字节。因此,字符串类型可以代替其他 DBMSs 中的 VARCHAR、BLOB、CLOB 等类型。 diff --git a/docs/zh/data_types/tuple.md b/docs/zh/sql_reference/data_types/tuple.md similarity index 86% rename from docs/zh/data_types/tuple.md rename to docs/zh/sql_reference/data_types/tuple.md index 4efeb651e76..e3520722c97 100644 --- a/docs/zh/data_types/tuple.md +++ b/docs/zh/sql_reference/data_types/tuple.md @@ -1,8 +1,9 @@ + # Tuple(T1, T2, …) {#tuplet1-t2} 元组,其中每个元素都有单独的 [类型](index.md#data_types)。 -不能在表中存储元组(除了内存表)。它们可以用于临时列分组。在查询中,IN 表达式和带特定参数的 lambda 函数可以来对临时列进行分组。更多信息,请参阅 [IN 操作符](../query_language/select.md) and [Higher order functions](../query_language/functions/higher_order_functions.md)。 +不能在表中存储元组(除了内存表)。它们可以用于临时列分组。在查询中,IN 表达式和带特定参数的 lambda 函数可以来对临时列进行分组。更多信息,请参阅 [IN 操作符](../../sql_reference/data_types/tuple.md) 和 [高阶函数](../../sql_reference/data_types/tuple.md)。 元组可以是查询的结果。在这种情况下,对于JSON以外的文本格式,括号中的值是逗号分隔的。在JSON格式中,元组作为数组输出(在方括号中)。 @@ -28,7 +29,7 @@ ## 元组中的数据类型 {#yuan-zu-zhong-de-shu-ju-lei-xing} -在动态创建元组时,ClickHouse 会自动为元组的每一个参数赋予最小可表达的类型。如果参数为 [NULL](../query_language/syntax.md#null-literal),那这个元组对应元素是 [Nullable](nullable.md)。 +在动态创建元组时,ClickHouse 会自动为元组的每一个参数赋予最小可表达的类型。如果参数为 [NULL](../../sql_reference/data_types/tuple.md#null-literal),那这个元组对应元素是 [可为空](nullable.md)。 自动数据类型检测示例: diff --git a/docs/zh/sql_reference/data_types/uuid.md b/docs/zh/sql_reference/data_types/uuid.md new file mode 100644 index 00000000000..4c35fcf2d9c --- /dev/null +++ b/docs/zh/sql_reference/data_types/uuid.md @@ -0,0 +1,77 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: UUID +--- + +# UUID {#uuid-data-type} + +通用唯一标识符(UUID)是用于标识记录的16字节数。 有关UUID的详细信息,请参阅 [维基百科](https://en.wikipedia.org/wiki/Universally_unique_identifier). + +UUID类型值的示例如下所示: + +``` text +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +``` + +如果在插入新记录时未指定UUID列值,则UUID值将用零填充: + +``` text +00000000-0000-0000-0000-000000000000 +``` + +## 如何生成 {#how-to-generate} + +要生成UUID值,ClickHouse提供了 [generateuidv4](../../sql_reference/functions/uuid_functions.md) 功能。 + +## 用法示例 {#usage-example} + +**示例1** + +此示例演示如何创建具有UUID类型列的表并将值插入到表中。 + +``` sql +CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog +``` + +``` sql +INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +└──────────────────────────────────────┴───────────┘ +``` + +**示例2** + +在此示例中,插入新记录时未指定UUID列值。 + +``` sql +INSERT INTO t_uuid (y) VALUES ('Example 2') +``` + +``` sql +SELECT * FROM t_uuid +``` + +``` text +┌────────────────────────────────────x─┬─y─────────┐ +│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ +│ 00000000-0000-0000-0000-000000000000 │ Example 2 │ +└──────────────────────────────────────┴───────────┘ +``` + +## 限制 {#restrictions} + +UUID数据类型仅支持以下功能 [字符串](string.md) 数据类型也支持(例如, [min](../../sql_reference/aggregate_functions/reference.md#agg_function-min), [max](../../sql_reference/aggregate_functions/reference.md#agg_function-max),和 [计数](../../sql_reference/aggregate_functions/reference.md#agg_function-count)). + +算术运算不支持UUID数据类型(例如, [abs](../../sql_reference/functions/arithmetic_functions.md#arithm_func-abs))或聚合函数,例如 [sum](../../sql_reference/aggregate_functions/reference.md#agg_function-sum) 和 [avg](../../sql_reference/aggregate_functions/reference.md#agg_function-avg). + +[原始文章](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md new file mode 100644 index 00000000000..afbdd082576 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -0,0 +1,56 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u6982\u8FF0" +--- + +# 外部字典 {#dicts-external-dicts} + +您可以从各种数据源添加自己的字典。 字典的数据源可以是本地文本或可执行文件、HTTP(s)资源或其他DBMS。 有关详细信息,请参阅 “[外部字典的来源](external_dicts_dict_sources.md)”. + +ClickHouse: + +- 完全或部分存储在RAM中的字典。 +- 定期更新字典并动态加载缺失的值。 换句话说,字典可以动态加载。 +- 允许创建外部字典与xml文件或 [DDL查询](../../statements/create.md#create-dictionary-query). + +外部字典的配置可以位于一个或多个xml文件中。 配置的路径在指定 [dictionaries\_config](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_config) 参数。 + +字典可以在服务器启动或首次使用时加载,具体取决于 [dictionaries\_lazy\_load](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) 设置。 + +字典配置文件具有以下格式: + +``` xml + + An optional element with any content. Ignored by the ClickHouse server. + + + /etc/metrika.xml + + + + + + + + +``` + +你可以 [配置](external_dicts_dict.md) 同一文件中的任意数量的字典。 + +[字典的DDL查询](../../statements/create.md#create-dictionary-query) 在服务器配置中不需要任何其他记录。 它们允许使用字典作为一流的实体,如表或视图。 + +!!! attention "注意" + 您可以通过在一个小字典中描述它来转换小字典的值 `SELECT` 查询(见 [变换](../../../sql_reference/functions/other_functions.md) 功能)。 此功能与外部字典无关。 + +## 另请参阅 {#ext-dicts-see-also} + +- [配置外部字典](external_dicts_dict.md) +- [在内存中存储字典](external_dicts_dict_layout.md) +- [字典更新](external_dicts_dict_lifetime.md) +- [外部字典的来源](external_dicts_dict_sources.md) +- [字典键和字段](external_dicts_dict_structure.md) +- [使用外部字典的函数](../../../sql_reference/functions/ext_dict_functions.md) + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md new file mode 100644 index 00000000000..df64d31d2a9 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -0,0 +1,53 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 40 +toc_title: "\u914D\u7F6E\u5916\u90E8\u5B57\u5178" +--- + +# 配置外部字典 {#dicts-external-dicts-dict} + +如果使用xml文件配置字典,则比字典配置具有以下结构: + +``` xml + + dict_name + + + + + + + + + + + + + + + + + +``` + +相应的 [DDL-查询](../../statements/create.md#create-dictionary-query) 具有以下结构: + +``` sql +CREATE DICTIONARY dict_name +( + ... -- attributes +) +PRIMARY KEY ... -- complex or single key configuration +SOURCE(...) -- Source configuration +LAYOUT(...) -- Memory layout configuration +LIFETIME(...) -- Lifetime of dictionary in memory +``` + +- `name` – The identifier that can be used to access the dictionary. Use the characters `[a-zA-Z0-9_\-]`. +- [来源](external_dicts_dict_sources.md) — Source of the dictionary. +- [布局](external_dicts_dict_layout.md) — Dictionary layout in memory. +- [结构](external_dicts_dict_structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. +- [使用寿命](external_dicts_dict_lifetime.md) — Frequency of dictionary updates. + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md new file mode 100644 index 00000000000..925e5f6c8f4 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -0,0 +1,70 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 45 +toc_title: "\u5206\u5C42\u5B57\u5178" +--- + +# 分层字典 {#hierarchical-dictionaries} + +ClickHouse支持分层字典与 [数字键](external_dicts_dict_structure.md#ext_dict-numeric-key). + +看看下面的层次结构: + +``` text +0 (Common parent) +│ +├── 1 (Russia) +│ │ +│ └── 2 (Moscow) +│ │ +│ └── 3 (Center) +│ +└── 4 (Great Britain) + │ + └── 5 (London) +``` + +这种层次结构可以表示为下面的字典表。 + +| region\_id | parent\_region | region\_name | +|------------|----------------|--------------| +| 1 | 0 | 俄罗斯 | +| 2 | 1 | 莫斯科 | +| 3 | 2 | 中心 | +| 4 | 0 | 英国 | +| 5 | 4 | 伦敦 | + +此表包含一列 `parent_region` 包含该元素的最近父项的键。 + +ClickHouse支持 [等级](external_dicts_dict_structure.md#hierarchical-dict-attr) 属性为 [外部字典](index.md) 属性。 此属性允许您配置类似于上述的分层字典。 + +该 [独裁主义](../../../sql_reference/functions/ext_dict_functions.md#dictgethierarchy) 函数允许您获取元素的父链。 + +对于我们的例子,dictionary的结构可以是以下内容: + +``` xml + + + + region_id + + + + parent_region + UInt64 + 0 + true + + + + region_name + String + + + + + +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md new file mode 100644 index 00000000000..4dcf5f4c1b0 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -0,0 +1,373 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: "\u5728\u5185\u5B58\u4E2D\u5B58\u50A8\u5B57\u5178" +--- + +# 在内存中存储字典 {#dicts-external-dicts-dict-layout} + +有多种方法可以将字典存储在内存中。 + +我们建议 [平](#flat), [散列](#dicts-external_dicts_dict_layout-hashed) 和 [complex\_key\_hashed](#complex-key-hashed). 其提供最佳的处理速度。 + +不建议使用缓存,因为性能可能较差,并且难以选择最佳参数。 阅读更多的部分 “[缓存](#cache)”. + +有几种方法可以提高字典性能: + +- 调用该函数以使用后的字典 `GROUP BY`. +- 将要提取的属性标记为"注射"。 如果不同的属性值对应于不同的键,则称为注射属性。 所以当 `GROUP BY` 使用由键获取属性值的函数,此函数会自动取出 `GROUP BY`. + +ClickHouse为字典中的错误生成异常。 错误示例: + +- 无法加载正在访问的字典。 +- 查询错误 `cached` 字典 + +您可以查看外部字典的列表及其状态 `system.dictionaries` 桌子 + +配置如下所示: + +``` xml + + + ... + + + + + + ... + + +``` + +相应的 [DDL-查询](../../statements/create.md#create-dictionary-query): + +``` sql +CREATE DICTIONARY (...) +... +LAYOUT(LAYOUT_TYPE(param value)) -- layout settings +... +``` + +## 在内存中存储字典的方法 {#ways-to-store-dictionaries-in-memory} + +- [平](#flat) +- [散列](#dicts-external_dicts_dict_layout-hashed) +- [sparse\_hashed](#dicts-external_dicts_dict_layout-sparse_hashed) +- [缓存](#cache) +- [range\_hashed](#range-hashed) +- [complex\_key\_hashed](#complex-key-hashed) +- [complex\_key\_cache](#complex-key-cache) +- [ip\_trie](#ip-trie) + +### 平 {#flat} + +字典以平面数组的形式完全存储在内存中。 字典使用多少内存? 量与最大键的大小(在使用的空间中)成正比。 + +字典键具有 `UInt64` 类型和值限制为500,000。 如果在创建字典时发现较大的键,ClickHouse将引发异常,不会创建字典。 + +支持所有类型的来源。 更新时,数据(来自文件或表)将完整读取。 + +此方法在存储字典的所有可用方法中提供了最佳性能。 + +配置示例: + +``` xml + + + +``` + +或 + +``` sql +LAYOUT(FLAT()) +``` + +### 散列 {#dicts-external_dicts_dict_layout-hashed} + +该字典以哈希表的形式完全存储在内存中。 字典中可以包含任意数量的带有任意标识符的元素,在实践中,键的数量可以达到数千万项。 + +支持所有类型的来源。 更新时,数据(来自文件或表)将完整读取。 + +配置示例: + +``` xml + + + +``` + +或 + +``` sql +LAYOUT(HASHED()) +``` + +### sparse\_hashed {#dicts-external_dicts_dict_layout-sparse_hashed} + +类似于 `hashed`,但使用更少的内存,有利于更多的CPU使用率。 + +配置示例: + +``` xml + + + +``` + +``` sql +LAYOUT(SPARSE_HASHED()) +``` + +### complex\_key\_hashed {#complex-key-hashed} + +这种类型的存储是用于复合 [键](external_dicts_dict_structure.md). 类似于 `hashed`. + +配置示例: + +``` xml + + + +``` + +``` sql +LAYOUT(COMPLEX_KEY_HASHED()) +``` + +### range\_hashed {#range-hashed} + +字典以哈希表的形式存储在内存中,其中包含有序范围及其相应值的数组。 + +此存储方法的工作方式与散列方式相同,除了键之外,还允许使用日期/时间(任意数字类型)范围。 + +示例:该表格包含每个广告客户的折扣,格式为: + +``` text ++---------|-------------|-------------|------+ +| advertiser id | discount start date | discount end date | amount | ++===============+=====================+===================+========+ +| 123 | 2015-01-01 | 2015-01-15 | 0.15 | ++---------|-------------|-------------|------+ +| 123 | 2015-01-16 | 2015-01-31 | 0.25 | ++---------|-------------|-------------|------+ +| 456 | 2015-01-01 | 2015-01-15 | 0.05 | ++---------|-------------|-------------|------+ +``` + +要对日期范围使用示例,请定义 `range_min` 和 `range_max` 中的元素 [结构](external_dicts_dict_structure.md). 这些元素必须包含元素 `name` 和`type` (如果 `type` 如果没有指定,则默认类型将使用-Date)。 `type` 可以是任何数字类型(Date/DateTime/UInt64/Int32/others)。 + +示例: + +``` xml + + + Id + + + first + Date + + + last + Date + + ... +``` + +或 + +``` sql +CREATE DICTIONARY somedict ( + id UInt64, + first Date, + last Date +) +PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +``` + +要使用这些字典,您需要将附加参数传递给 `dictGetT` 函数,为其选择一个范围: + +``` sql +dictGetT('dict_name', 'attr_name', id, date) +``` + +此函数返回指定的值 `id`s和包含传递日期的日期范围。 + +算法的详细信息: + +- 如果 `id` 未找到或范围未找到 `id`,它返回字典的默认值。 +- 如果存在重叠范围,则可以使用任意范围。 +- 如果范围分隔符是 `NULL` 或无效日期(如1900-01-01或2039-01-01),范围保持打开状态。 范围可以在两侧打开。 + +配置示例: + +``` xml + + + + ... + + + + + + + + Abcdef + + + StartTimeStamp + UInt64 + + + EndTimeStamp + UInt64 + + + XXXType + String + + + + + + +``` + +或 + +``` sql +CREATE DICTIONARY somedict( + Abcdef UInt64, + StartTimeStamp UInt64, + EndTimeStamp UInt64, + XXXType String DEFAULT '' +) +PRIMARY KEY Abcdef +RANGE(MIN StartTimeStamp MAX EndTimeStamp) +``` + +### 缓存 {#cache} + +字典存储在具有固定数量的单元格的缓存中。 这些单元格包含经常使用的元素。 + +搜索字典时,首先搜索缓存。 对于每个数据块,所有在缓存中找不到或过期的密钥都从源请求,使用 `SELECT attrs... FROM db.table WHERE id IN (k1, k2, ...)`. 然后将接收到的数据写入高速缓存。 + +对于缓存字典,过期 [使用寿命](external_dicts_dict_lifetime.md) 可以设置高速缓存中的数据。 如果更多的时间比 `lifetime` 自从在单元格中加载数据以来,单元格的值不被使用,并且在下次需要使用时重新请求它。 +这是存储字典的所有方法中最不有效的。 缓存的速度在很大程度上取决于正确的设置和使用场景。 缓存类型字典只有在命中率足够高(推荐99%或更高)时才能表现良好。 您可以查看平均命中率 `system.dictionaries` 桌子 + +要提高缓存性能,请使用以下子查询 `LIMIT`,并从外部调用字典函数。 + +支持 [来源](external_dicts_dict_sources.md):MySQL的,ClickHouse的,可执行文件,HTTP. + +设置示例: + +``` xml + + + + 1000000000 + + +``` + +或 + +``` sql +LAYOUT(CACHE(SIZE_IN_CELLS 1000000000)) +``` + +设置足够大的缓存大小。 你需要尝试选择细胞的数量: + +1. 设置一些值。 +2. 运行查询,直到缓存完全满。 +3. 使用评估内存消耗 `system.dictionaries` 桌子 +4. 增加或减少单元数,直到达到所需的内存消耗。 + +!!! warning "警告" + 不要使用ClickHouse作为源,因为处理随机读取的查询速度很慢。 + +### complex\_key\_cache {#complex-key-cache} + +这种类型的存储是用于复合 [键](external_dicts_dict_structure.md). 类似于 `cache`. + +### ip\_trie {#ip-trie} + +这种类型的存储用于将网络前缀(IP地址)映射到ASN等元数据。 + +示例:该表包含网络前缀及其对应的AS号码和国家代码: + +``` text + +-----------|-----|------+ + | prefix | asn | cca2 | + +=================+=======+========+ + | 202.79.32.0/20 | 17501 | NP | + +-----------|-----|------+ + | 2620:0:870::/48 | 3856 | US | + +-----------|-----|------+ + | 2a02:6b8:1::/48 | 13238 | RU | + +-----------|-----|------+ + | 2001:db8::/32 | 65536 | ZZ | + +-----------|-----|------+ +``` + +使用此类布局时,结构必须具有复合键。 + +示例: + +``` xml + + + + prefix + String + + + + asn + UInt32 + + + + cca2 + String + ?? + + ... +``` + +或 + +``` sql +CREATE DICTIONARY somedict ( + prefix String, + asn UInt32, + cca2 String DEFAULT '??' +) +PRIMARY KEY prefix +``` + +该键必须只有一个包含允许的IP前缀的字符串类型属性。 还不支持其他类型。 + +对于查询,必须使用相同的函数 (`dictGetT` 与元组)至于具有复合键的字典: + +``` sql +dictGetT('dict_name', 'attr_name', tuple(ip)) +``` + +该函数采用任一 `UInt32` 对于IPv4,或 `FixedString(16)` 碌莽禄Ipv6拢IPv6: + +``` sql +dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +``` + +还不支持其他类型。 该函数返回与此IP地址对应的前缀的属性。 如果有重叠的前缀,则返回最具体的前缀。 + +数据存储在一个 `trie`. 它必须完全适合RAM。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md new file mode 100644 index 00000000000..66ff7124ba1 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md @@ -0,0 +1,86 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 42 +toc_title: "\u5B57\u5178\u66F4\u65B0" +--- + +# 字典更新 {#dictionary-updates} + +ClickHouse定期更新字典。 完全下载字典的更新间隔和缓存字典的无效间隔在 `` 在几秒钟内标记。 + +字典更新(除首次使用的加载之外)不会阻止查询。 在更新期间,将使用旧版本的字典。 如果在更新过程中发生错误,则将错误写入服务器日志,并使用旧版本的字典继续查询。 + +设置示例: + +``` xml + + ... + 300 + ... + +``` + +``` sql +CREATE DICTIONARY (...) +... +LIFETIME(300) +... +``` + +设置 `0` (`LIFETIME(0)`)防止字典更新。 + +您可以设置升级的时间间隔,ClickHouse将在此范围内选择一个统一的随机时间。 为了在大量服务器上升级时分配字典源上的负载,这是必要的。 + +设置示例: + +``` xml + + ... + + 300 + 360 + + ... + +``` + +或 + +``` sql +LIFETIME(MIN 300 MAX 360) +``` + +升级字典时,ClickHouse服务器根据字典的类型应用不同的逻辑 [来源](external_dicts_dict_sources.md): + +- 对于文本文件,它检查修改的时间。 如果时间与先前记录的时间不同,则更新字典。 +- 对于MyISAM表,修改的时间使用检查 `SHOW TABLE STATUS` 查询。 +- 默认情况下,每次都会更新来自其他来源的字典。 + +对于MySQL(InnoDB),ODBC和ClickHouse源代码,您可以设置一个查询,只有在字典真正改变时才会更新字典,而不是每次都更新。 为此,请按照下列步骤操作: + +- 字典表必须具有在源数据更新时始终更改的字段。 +- 源的设置必须指定检索更改字段的查询。 ClickHouse服务器将查询结果解释为一行,如果此行相对于其以前的状态发生了更改,则更新字典。 指定查询 `` 字段中的设置 [来源](external_dicts_dict_sources.md). + +设置示例: + +``` xml + + ... + + ... + SELECT update_time FROM dictionary_source where id = 1 + + ... + +``` + +或 + +``` sql +... +SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source where id = 1')) +... +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/zh/query_language/dicts/external_dicts_dict_sources.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md similarity index 70% rename from docs/zh/query_language/dicts/external_dicts_dict_sources.md rename to docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 37d050a8e72..c8173749b33 100644 --- a/docs/zh/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -1,12 +1,15 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 43 +toc_title: "\u5916\u90E8\u5B57\u5178\u7684\u6765\u6E90" --- -# Sources of External Dictionaries {#dicts-external-dicts-dict-sources} +# 外部字典的来源 {#dicts-external-dicts-dict-sources} -An external dictionary can be connected from many different sources. +外部字典可以从许多不同的来源连接。 -If dictionary is configured using xml-file, the configuration looks like this: +如果使用xml-file配置字典,则配置如下所示: ``` xml @@ -23,7 +26,7 @@ If dictionary is configured using xml-file, the configuration looks like this: ``` -In case of [DDL-query](../create.md#create-dictionary-query), equal configuration will looks like: +在情况下 [DDL-查询](../../statements/create.md#create-dictionary-query),相等的配置将看起来像: ``` sql CREATE DICTIONARY dict_name (...) @@ -32,12 +35,12 @@ SOURCE(SOURCE_TYPE(param1 val1 ... paramN valN)) -- Source configuration ... ``` -The source is configured in the `source` section. +源配置在 `source` 科。 -Types of sources (`source_type`): +来源类型 (`source_type`): -- [Local file](#dicts-external_dicts_dict_sources-local_file) -- [Executable file](#dicts-external_dicts_dict_sources-executable) +- [本地文件](#dicts-external_dicts_dict_sources-local_file) +- [可执行文件](#dicts-external_dicts_dict_sources-executable) - [HTTP(s)](#dicts-external_dicts_dict_sources-http) - DBMS - [ODBC](#dicts-external_dicts_dict_sources-odbc) @@ -46,9 +49,9 @@ Types of sources (`source_type`): - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) -## Local File {#dicts-external_dicts_dict_sources-local_file} +## 本地文件 {#dicts-external_dicts_dict_sources-local_file} -Example of settings: +设置示例: ``` xml @@ -59,22 +62,22 @@ Example of settings: ``` -or +或 ``` sql SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) ``` -Setting fields: +设置字段: - `path` – The absolute path to the file. -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 -## Executable File {#dicts-external_dicts_dict_sources-executable} +## 可执行文件 {#dicts-external_dicts_dict_sources-executable} -Working with executable files depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data. +使用可执行文件取决于 [字典如何存储在内存中](external_dicts_dict_layout.md). 如果字典存储使用 `cache` 和 `complex_key_cache`,ClickHouse通过向可执行文件的STDIN发送请求来请求必要的密钥。 否则,ClickHouse将启动可执行文件并将其输出视为字典数据。 -Example of settings: +设置示例: ``` xml @@ -85,22 +88,22 @@ Example of settings: ``` -or +或 ``` sql SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) ``` -Setting fields: +设置字段: - `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`). -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 -## HTTP(s) {#dicts-external_dicts_dict_sources-http} +## Http(s) {#dicts-external_dicts_dict_sources-http} -Working with an HTTP(s) server depends on [how the dictionary is stored in memory](external_dicts_dict_layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. +使用HTTP(s)服务器取决于 [字典如何存储在内存中](external_dicts_dict_layout.md). 如果字典存储使用 `cache` 和 `complex_key_cache`,ClickHouse通过通过发送请求请求必要的密钥 `POST` 方法。 -Example of settings: +设置示例: ``` xml @@ -121,7 +124,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(HTTP( @@ -132,12 +135,12 @@ SOURCE(HTTP( )) ``` -In order for ClickHouse to access an HTTPS resource, you must [configure openSSL](../../operations/server_settings/settings.md#server_settings-openssl) in the server configuration. +为了让ClickHouse访问HTTPS资源,您必须 [配置openSSL](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-openssl) 在服务器配置中。 -Setting fields: +设置字段: - `url` – The source URL. -- `format` – The file format. All the formats described in “[Formats](../../interfaces/formats.md#formats)” are supported. +- `format` – The file format. All the formats described in “[格式](../../../interfaces/formats.md#formats)” 支持。 - `credentials` – Basic HTTP authentication. Optional parameter. - `user` – Username required for the authentication. - `password` – Password required for the authentication. @@ -148,9 +151,9 @@ Setting fields: ## ODBC {#dicts-external_dicts_dict_sources-odbc} -You can use this method to connect any database that has an ODBC driver. +您可以使用此方法连接具有ODBC驱动程序的任何数据库。 -Example of settings: +设置示例: ``` xml @@ -163,7 +166,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(ODBC( @@ -174,25 +177,25 @@ SOURCE(ODBC( )) ``` -Setting fields: +设置字段: -- `db` – Name of the database. Omit it if the database name is set in the `` parameters. +- `db` – Name of the database. Omit it if the database name is set in the `` 参数。 - `table` – Name of the table and schema if exists. - `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). -ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. +ClickHouse接收来自ODBC-driver的引用符号,并将查询中的所有设置引用到driver,因此有必要根据数据库中的表名大小写设置表名。 -If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../faq/general.md#oracle-odbc-encodings) article. +如果您在使用Oracle时遇到编码问题,请参阅相应的 [FAQ](../../../faq/general.md#oracle-odbc-encodings) 文章. -### Known vulnerability of the ODBC dictionary functionality {#known-vulnerability-of-the-odbc-dictionary-functionality} +### ODBC字典功能的已知漏洞 {#known-vulnerability-of-the-odbc-dictionary-functionality} -!!! attention "Attention" - When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. +!!! attention "注意" + 通过ODBC驱动程序连接参数连接到数据库时 `Servername` 可以取代。 在这种情况下,值 `USERNAME` 和 `PASSWORD` 从 `odbc.ini` 被发送到远程服务器,并且可能会受到损害。 -**Example of insecure use** +**不安全使用示例** -Let’s configure unixODBC for PostgreSQL. Content of `/etc/odbc.ini`: +让我们为PostgreSQL配置unixODBC。 的内容 `/etc/odbc.ini`: ``` text [gregtest] @@ -205,25 +208,25 @@ USERNAME = test PASSWORD = test ``` -If you then make a query such as +如果然后进行查询,例如 ``` sql SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db'); ``` -ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. +ODBC驱动程序将发送的值 `USERNAME` 和 `PASSWORD` 从 `odbc.ini` 到 `some-server.com`. -### Example of Connecting PostgreSQL {#example-of-connecting-postgresql} +### 连接Postgresql的示例 {#example-of-connecting-postgresql} -Ubuntu OS. +Ubuntu操作系统。 -Installing unixODBC and the ODBC driver for PostgreSQL: +为PostgreSQL安装unixODBC和ODBC驱动程序: ``` bash $ sudo apt-get install -y unixodbc odbcinst odbc-postgresql ``` -Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): +配置 `/etc/odbc.ini` (或 `~/.odbc.ini`): ``` text [DEFAULT] @@ -244,7 +247,7 @@ Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): ConnSettings = ``` -The dictionary configuration in ClickHouse: +ClickHouse中的字典配置: ``` xml @@ -279,7 +282,7 @@ The dictionary configuration in ClickHouse: ``` -or +或 ``` sql CREATE DICTIONARY table_name ( @@ -292,19 +295,19 @@ LAYOUT(HASHED()) LIFETIME(MIN 300 MAX 360) ``` -You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. +您可能需要编辑 `odbc.ini` 使用驱动程序指定库的完整路径 `DRIVER=/usr/local/lib/psqlodbcw.so`. -### Example of Connecting MS SQL Server {#example-of-connecting-ms-sql-server} +### 连接MS SQL Server的示例 {#example-of-connecting-ms-sql-server} -Ubuntu OS. +Ubuntu操作系统。 -Installing the driver: : +安装驱动程序: : ``` bash $ sudo apt-get install tdsodbc freetds-bin sqsh ``` -Configuring the driver: +配置驱动程序: ``` bash $ cat /etc/freetds/freetds.conf @@ -339,7 +342,7 @@ Configuring the driver: Port = 1433 ``` -Configuring the dictionary in ClickHouse: +在ClickHouse中配置字典: ``` xml @@ -375,7 +378,7 @@ Configuring the dictionary in ClickHouse: ``` -or +或 ``` sql CREATE DICTIONARY test ( @@ -390,9 +393,9 @@ LIFETIME(MIN 300 MAX 360) ## DBMS {#dbms} -### MySQL {#dicts-external_dicts_dict_sources-mysql} +### Mysql {#dicts-external_dicts_dict_sources-mysql} -Example of settings: +设置示例: ``` xml @@ -416,7 +419,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(MYSQL( @@ -432,7 +435,7 @@ SOURCE(MYSQL( )) ``` -Setting fields: +设置字段: - `port` – The port on the MySQL server. You can specify it for all replicas, or for each one individually (inside ``). @@ -449,13 +452,13 @@ Setting fields: - `table` – Name of the table. -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in MySQL, for example, `id > 10 AND id < 20`. Optional parameter. +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` 例如,mysql中的子句, `id > 10 AND id < 20`. 可选参数。 -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). -MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. +MySQL可以通过套接字在本地主机上连接。 要做到这一点,设置 `host` 和 `socket`. -Example of settings: +设置示例: ``` xml @@ -472,7 +475,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(MYSQL( @@ -489,7 +492,7 @@ SOURCE(MYSQL( ### ClickHouse {#dicts-external_dicts_dict_sources-clickhouse} -Example of settings: +设置示例: ``` xml @@ -505,7 +508,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(CLICKHOUSE( @@ -519,20 +522,20 @@ SOURCE(CLICKHOUSE( )) ``` -Setting fields: +设置字段: -- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [Distributed](../../operations/table_engines/distributed.md) table and enter it in subsequent configurations. +- `host` – The ClickHouse host. If it is a local host, the query is processed without any network activity. To improve fault tolerance, you can create a [分布](../../../engines/table_engines/special/distributed.md) 表并在后续配置中输入它。 - `port` – The port on the ClickHouse server. - `user` – Name of the ClickHouse user. - `password` – Password of the ClickHouse user. - `db` – Name of the database. - `table` – Name of the table. - `where` – The selection criteria. May be omitted. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](external_dicts_dict_lifetime.md). +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [更新字典](external_dicts_dict_lifetime.md). -### MongoDB {#dicts-external_dicts_dict_sources-mongodb} +### Mongodb {#dicts-external_dicts_dict_sources-mongodb} -Example of settings: +设置示例: ``` xml @@ -547,7 +550,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(MONGO( @@ -560,7 +563,7 @@ SOURCE(MONGO( )) ``` -Setting fields: +设置字段: - `host` – The MongoDB host. - `port` – The port on the MongoDB server. @@ -571,7 +574,7 @@ Setting fields: ### Redis {#dicts-external_dicts_dict_sources-redis} -Example of settings: +设置示例: ``` xml @@ -584,7 +587,7 @@ Example of settings: ``` -or +或 ``` sql SOURCE(REDIS( @@ -595,11 +598,11 @@ SOURCE(REDIS( )) ``` -Setting fields: +设置字段: - `host` – The Redis host. - `port` – The port on the Redis server. -- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. +- `storage_type` – The structure of internal Redis storage using for work with keys. `simple` 适用于简单源和散列单键源, `hash_map` 用于具有两个键的散列源。 不支持具有复杂键的范围源和缓存源。 可以省略,默认值为 `simple`. - `db_index` – The specific numeric index of Redis logical database. May be omitted, default value is 0. -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md new file mode 100644 index 00000000000..0ac0226aa50 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -0,0 +1,175 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: "\u5B57\u5178\u952E\u548C\u5B57\u6BB5" +--- + +# 字典键和字段 {#dictionary-key-and-fields} + +该 `` 子句描述可用于查询的字典键和字段。 + +XML描述: + +``` xml + + + + Id + + + + + + + ... + + + +``` + +属性在元素中描述: + +- `` — [键列](external_dicts_dict_structure.md#ext_dict_structure-key). +- `` — [数据列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 可以有多个属性。 + +DDL查询: + +``` sql +CREATE DICTIONARY dict_name ( + Id UInt64, + -- attributes +) +PRIMARY KEY Id +... +``` + +查询正文中描述了属性: + +- `PRIMARY KEY` — [键列](external_dicts_dict_structure.md#ext_dict_structure-key) +- `AttrName AttrType` — [数据列](external_dicts_dict_structure.md#ext_dict_structure-attributes). 可以有多个属性。 + +## 键 {#ext_dict_structure-key} + +ClickHouse支持以下类型的键: + +- 数字键。 `UInt64`. 在定义 `` 标记或使用 `PRIMARY KEY` 关键字。 +- 复合密钥。 组不同类型的值。 在标签中定义 `` 或 `PRIMARY KEY` 关键字。 + +Xml结构可以包含 `` 或 ``. DDL-查询必须包含单个 `PRIMARY KEY`. + +!!! warning "警告" + 不能将键描述为属性。 + +### 数字键 {#ext_dict-numeric-key} + +类型: `UInt64`. + +配置示例: + +``` xml + + Id + +``` + +配置字段: + +- `name` – The name of the column with keys. + +对于DDL-查询: + +``` sql +CREATE DICTIONARY ( + Id UInt64, + ... +) +PRIMARY KEY Id +... +``` + +- `PRIMARY KEY` – The name of the column with keys. + +### 复合密钥 {#composite-key} + +关键可以是一个 `tuple` 从任何类型的字段。 该 [布局](external_dicts_dict_layout.md) 在这种情况下,必须是 `complex_key_hashed` 或 `complex_key_cache`. + +!!! tip "提示" + 复合键可以由单个元素组成。 例如,这使得可以使用字符串作为键。 + +键结构在元素中设置 ``. 键字段的格式与字典的格式相同 [属性](external_dicts_dict_structure.md). 示例: + +``` xml + + + + field1 + String + + + field2 + UInt32 + + ... + +... +``` + +或 + +``` sql +CREATE DICTIONARY ( + field1 String, + field2 String + ... +) +PRIMARY KEY field1, field2 +... +``` + +对于查询 `dictGet*` 函数中,一个元组作为键传递。 示例: `dictGetString('dict_name', 'attr_name', tuple('string for field1', num_for_field2))`. + +## 属性 {#ext_dict_structure-attributes} + +配置示例: + +``` xml + + ... + + Name + ClickHouseDataType + + rand64() + true + true + true + + +``` + +或 + +``` sql +CREATE DICTIONARY somename ( + Name ClickHouseDataType DEFAULT '' EXPRESSION rand64() HIERARCHICAL INJECTIVE IS_OBJECT_ID +) +``` + +配置字段: + +| 标签 | 产品描述 | 必填项 | +|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| +| `name` | 列名称。 | 是 | +| `type` | ClickHouse数据类型。
    ClickHouse尝试将字典中的值转换为指定的数据类型。 例如,对于MySQL,该字段可能是 `TEXT`, `VARCHAR`,或 `BLOB` 在MySQL源表中,但它可以上传为 `String` 在克里克豪斯
    [可为空](../../../sql_reference/data_types/nullable.md) 不支持。 | 是 | +| `null_value` | 非现有元素的默认值。
    在示例中,它是一个空字符串。 你不能使用 `NULL` 在这个领域。 | 是 | +| `expression` | [表达式](../../syntax.md#syntax-expressions) ClickHouse对该值执行。
    表达式可以是远程SQL数据库中的列名。 因此,您可以使用它为远程列创建别名。

    默认值:无表达式。 | 非也。 | +| `hierarchical` | 如果 `true`,该属性包含当前键的父键值。 看 [分层字典](external_dicts_dict_hierarchical.md).

    默认值: `false`. | 非也。 | +| `injective` | 标志,显示是否 `id -> attribute` 图像是 [注射](https://en.wikipedia.org/wiki/Injective_function).
    如果 `true`,ClickHouse可以自动放置后 `GROUP BY` 子句注入字典的请求。 通常它显着减少了这种请求的数量。

    默认值: `false`. | 非也。 | +| `is_object_id` | 显示是否通过以下方式对MongoDB文档执行查询的标志 `ObjectID`.

    默认值: `false`. | 非也。 | + +## 另请参阅 {#see-also} + +- [使用外部字典的函数](../../../sql_reference/functions/ext_dict_functions.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md b/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..25d86ecda96 --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5916\u90E8\u5B57\u5178" +toc_priority: 37 +--- + + diff --git a/docs/zh/sql_reference/dictionaries/index.md b/docs/zh/sql_reference/dictionaries/index.md new file mode 100644 index 00000000000..9c9817ad0ad --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/index.md @@ -0,0 +1,22 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u5B57\u5178" +toc_priority: 35 +toc_title: "\u5BFC\u8A00" +--- + +# 字典 {#dictionaries} + +字典是一个映射 (`key -> attributes`)这是方便各种类型的参考清单。 + +ClickHouse支持使用可用于查询的字典的特殊功能。 这是更容易和更有效地使用字典与功能比 `JOIN` 与参考表。 + +[NULL](../syntax.md#null) 值不能存储在字典中。 + +ClickHouse支持: + +- [内置字典](internal_dicts.md#internal_dicts) 具有特定的 [功能集](../../sql_reference/functions/ym_dict_functions.md). +- [插件(外部)字典](external_dictionaries/external_dicts.md) 用一个 [职能净额](../../sql_reference/functions/ext_dict_functions.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/zh/sql_reference/dictionaries/internal_dicts.md b/docs/zh/sql_reference/dictionaries/internal_dicts.md new file mode 100644 index 00000000000..bcede3c14ad --- /dev/null +++ b/docs/zh/sql_reference/dictionaries/internal_dicts.md @@ -0,0 +1,55 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u5185\u90E8\u5B57\u5178" +--- + +# 内部字典 {#internal_dicts} + +ClickHouse包含用于处理地理数据库的内置功能。 + +这使您可以: + +- 使用区域的ID以所需语言获取其名称。 +- 使用区域ID获取城市、地区、联邦区、国家或大陆的ID。 +- 检查一个区域是否属于另一个区域。 +- 获取父区域链。 + +所有功能支持 “translocality,” 能够同时使用不同的角度对区域所有权。 有关详细信息,请参阅部分 “Functions for working with Yandex.Metrica dictionaries”. + +在默认包中禁用内部字典。 +要启用它们,请取消注释参数 `path_to_regions_hierarchy_file` 和 `path_to_regions_names_files` 在服务器配置文件中。 + +Geobase从文本文件加载。 + +将 `regions_hierarchy*.txt` 文件到 `path_to_regions_hierarchy_file` 目录。 此配置参数必须包含指向 `regions_hierarchy.txt` 文件(默认区域层次结构)和其他文件 (`regions_hierarchy_ua.txt`)必须位于同一目录中。 + +把 `regions_names_*.txt` 在文件 `path_to_regions_names_files` 目录。 + +您也可以自己创建这些文件。 文件格式如下: + +`regions_hierarchy*.txt`:TabSeparated(无标题),列: + +- 地区ID (`UInt32`) +- 父区域ID (`UInt32`) +- 区域类型 (`UInt8`):1-大陆,3-国家,4-联邦区,5-地区,6-城市;其他类型没有价值 +- 人口 (`UInt32`) — optional column + +`regions_names_*.txt`:TabSeparated(无标题),列: + +- 地区ID (`UInt32`) +- 地区名称 (`String`) — Can't contain tabs or line feeds, even escaped ones. + +平面阵列用于存储在RAM中。 出于这个原因,Id不应该超过一百万。 + +字典可以在不重新启动服务器的情况下更新。 但是,不会更新可用字典集。 +对于更新,将检查文件修改时间。 如果文件已更改,则更新字典。 +检查更改的时间间隔在 `builtin_dictionaries_reload_interval` 参数。 +字典更新(首次使用时加载除外)不会阻止查询。 在更新期间,查询使用旧版本的字典。 如果在更新过程中发生错误,则将错误写入服务器日志,并使用旧版本的字典继续查询。 + +我们建议定期使用geobase更新字典。 在更新期间,生成新文件并将其写入单独的位置。 一切准备就绪后,将其重命名为服务器使用的文件。 + +还有与操作系统标识符和Yandex的工作功能。Metrica搜索引擎,但他们不应该被使用。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/zh/query_language/functions/arithmetic_functions.md b/docs/zh/sql_reference/functions/arithmetic_functions.md similarity index 79% rename from docs/zh/query_language/functions/arithmetic_functions.md rename to docs/zh/sql_reference/functions/arithmetic_functions.md index 08d13b15af1..66bd42ec63a 100644 --- a/docs/zh/query_language/functions/arithmetic_functions.md +++ b/docs/zh/sql_reference/functions/arithmetic_functions.md @@ -1,3 +1,4 @@ + # 算术函数 {#suan-zhu-han-shu} 对于所有算术函数,结果类型为结果适合的最小数字类型(如果存在这样的类型)。最小数字类型是根据数字的位数,是否有符号以及是否是浮点类型而同时进行的。如果没有足够的位,则采用最高位类型。 @@ -16,59 +17,59 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 溢出的产生方式与C++相同。 -## plus(a, b), a + b {#plusa-b-a-b} +## 加(a,b),a+b {#plusa-b-a-b} 计算数字的总和。 您还可以将Date或DateTime与整数进行相加。在Date的情况下,添加的整数意味着添加相应的天数。对于DateTime,这意味这添加相应的描述。 -## minus(a, b), a - b {#minusa-b-a-b} +## 减(a,b),a-b {#minusa-b-a-b} 计算数字之间的差,结果总是有符号的。 -您还可以将Date或DateTime与整数进行相减。见上面的’plus’。 +您还可以将Date或DateTime与整数进行相减。见上面的'plus'。 -## multiply(a, b), a \* b {#multiplya-b-a-b} +## 乘(a,b),a\*b {#multiplya-b-a-b} 计算数字的乘积。 -## divide(a, b), a / b {#dividea-b-a-b} +## 除以(a,b),a/b {#dividea-b-a-b} 计算数字的商。结果类型始终是浮点类型。 -它不是整数除法。对于整数除法,请使用’intDiv’函数。 -当除以零时,你得到’inf’,‘- inf’或’nan’。 +它不是整数除法。对于整数除法,请使用'intDiv'函数。 +当除以零时,你得到'inf',‘- inf’或’nan’。 -## intDiv(a, b) {#intdiva-b} +## intDiv(a,b) {#intdiva-b} 计算整数数字的商,向下舍入(按绝对值)。 除以零或将最小负数除以-1时抛出异常。 -## intDivOrZero(a, b) {#intdivorzeroa-b} +## intDivOrZero(a,b) {#intdivorzeroa-b} -与’intDiv’的不同之处在于它在除以零或将最小负数除以-1时返回零。 +与'intDiv'的不同之处在于它在除以零或将最小负数除以-1时返回零。 -## modulo(a, b), a % b {#moduloa-b-a-b} +## 模(a,b),a%b {#moduloa-b-a-b} 计算除法后的余数。 如果参数是浮点数,则通过删除小数部分将它们预转换为整数。 其余部分与C++中的含义相同。截断除法用于负数。 除以零或将最小负数除以-1时抛出异常。 -## negate(a), -a {#negatea-a} +## 否定(a),-a {#negatea-a} 计算一个数字的 用反转符号计算一个数字。结果始终是签名的。 -Calculates a number with the reverse sign. The result is always signed. +计算具有反向符号的数字。 结果始终签名。 ## abs(a) {#arithm_func-abs} 计算数字(a)的绝对值。也就是说,如果a &lt; 0,它返回-a。对于无符号类型,它不执行任何操作。对于有符号整数类型,它返回无符号数。 -## gcd(a, b) {#gcda-b} +## gcd(a,b) {#gcda-b} 返回数字的最大公约数。 除以零或将最小负数除以-1时抛出异常。 -## lcm(a, b) {#lcma-b} +## lcm(a,b) {#lcma-b} 返回数字的最小公倍数。 除以零或将最小负数除以-1时抛出异常。 diff --git a/docs/zh/query_language/functions/array_functions.md b/docs/zh/sql_reference/functions/array_functions.md similarity index 88% rename from docs/zh/query_language/functions/array_functions.md rename to docs/zh/sql_reference/functions/array_functions.md index 7f0d734a7c9..cb8f7347b72 100644 --- a/docs/zh/query_language/functions/array_functions.md +++ b/docs/zh/sql_reference/functions/array_functions.md @@ -1,6 +1,7 @@ + # 数组函数 {#shu-zu-han-shu} -## empty {#empty} +## 空 {#empty} 对于空数组返回1,对于非空数组返回0。 结果类型是UInt8。 @@ -12,21 +13,21 @@ 结果类型是UInt8。 该函数也适用于字符串。 -## length {#array_functions-length} +## 长度 {#array_functions-length} 返回数组中的元素个数。 结果类型是UInt64。 该函数也适用于字符串。 -## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} +## emptyArrayUInt8,emptyArrayUInt16,emptyArrayUInt32,emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} -## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} +## emptyArrayInt8,emptyArrayInt16,emptyArrayInt32,emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} -## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} +## emptyArrayFloat32,emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} -## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} +## 空空漫步,空空漫步时间 {#emptyarraydate-emptyarraydatetime} -## emptyArrayString {#emptyarraystring} +## 空字符串 {#emptyarraystring} 不接受任何参数并返回适当类型的空数组。 @@ -34,7 +35,7 @@ 接受一个空数组并返回一个仅包含一个默认值元素的数组。 -## range(N) {#rangen} +## 范围(N) {#rangen} 返回从0到N-1的数字数组。 以防万一,如果在数据块中创建总长度超过100,000,000个元素的数组,则抛出异常。 @@ -42,8 +43,8 @@ ## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} 使用函数的参数作为数组元素创建一个数组。 -参数必须是常量,并且具有最小公共类型的类型。必须至少传递一个参数,否则将不清楚要创建哪种类型的数组。也就是说,你不能使用这个函数来创建一个空数组(为此,使用上面描述的’emptyArray  \*’函数)。 -返回’Array(T)’类型的结果,其中’T’是传递的参数中最小的公共类型。 +参数必须是常量,并且具有最小公共类型的类型。必须至少传递一个参数,否则将不清楚要创建哪种类型的数组。也就是说,你不能使用这个函数来创建一个空数组(为此,使用上面描述的'emptyArray  \*'函数)。 +返回'Array(T)'类型的结果,其中'T'是传递的参数中最小的公共类型。 ## arrayConcat {#arrayconcat} @@ -53,7 +54,7 @@ **参数** -- `arrays` – 任意数量的[Array](../../data_types/array.md)类型的参数. +- `arrays` – 任意数量的[阵列](../../sql_reference/functions/array_functions.md)类型的参数. **示例** @@ -66,7 +67,7 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res │ [1,2,3,4,5,6] │ └───────────────┘ -## arrayElement(arr, n), operator arr\[n\] {#arrayelementarr-n-operator-arrn} +## arrayElement(arr,n),运算符arr\[n\] {#arrayelementarr-n-operator-arrn} 从数组`arr`中获取索引为«n»的元素。 `n`必须是任何整数类型。 数组中的索引从一开始。 @@ -74,9 +75,9 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res 如果索引超出数组的边界,则返回默认值(数字为0,字符串为空字符串等)。 -## has(arr, elem) {#hasarr-elem} +## 有(arr,elem) {#hasarr-elem} -检查’arr’数组是否具有’elem’元素。 +检查'arr'数组是否具有'elem'元素。 如果元素不在数组中,则返回0;如果在,则返回1。 `NULL` 值的处理。 @@ -111,17 +112,17 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res **示例** -`SELECT hasAll([], [])` returns 1. +`SELECT hasAll([], [])` 返回1。 -`SELECT hasAll([1, Null], [Null])` returns 1. +`SELECT hasAll([1, Null], [Null])` 返回1。 -`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` returns 1. +`SELECT hasAll([1.0, 2, 3, 4], [1, 3])` 返回1。 -`SELECT hasAll(['a', 'b'], ['a'])` returns 1. +`SELECT hasAll(['a', 'b'], ['a'])` 返回1。 -`SELECT hasAll([1], ['a'])` returns 0. +`SELECT hasAll([1], ['a'])` 返回0。 -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` returns 0. +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]])` 返回0。 ## hasAny {#hasany} @@ -146,19 +147,19 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res **示例** -`SELECT hasAny([1], [])` returns `0`. +`SELECT hasAny([1], [])` 返回 `0`. -`SELECT hasAny([Null], [Null, 1])` returns `1`. +`SELECT hasAny([Null], [Null, 1])` 返回 `1`. -`SELECT hasAny([-128, 1., 512], [1])` returns `1`. +`SELECT hasAny([-128, 1., 512], [1])` 返回 `1`. -`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` returns `0`. +`SELECT hasAny([[1, 2], [3, 4]], ['a', 'c'])` 返回 `0`. -`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` returns `1`. +`SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` 返回 `1`. -## indexOf(arr, x) {#indexofarr-x} +## indexOf(arr,x) {#indexofarr-x} -返回数组中第一个‘x’元素的索引(从1开始),如果‘x’元素不存在在数组中,则返回0。 +返回数组中第一个'x'元素的索引(从1开始),如果'x'元素不存在在数组中,则返回0。 示例: @@ -172,7 +173,7 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res 设置为«NULL»的元素将作为普通的元素值处理。 -## countEqual(arr, x) {#countequalarr-x} +## countEqual(arr,x) {#countequalarr-x} 返回数组中等于x的元素的个数。相当于arrayCount(elem - \> elem = x,arr)。 @@ -186,7 +187,7 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res │ 2 │ └──────────────────────────────────────┘ -## arrayEnumerate(arr) {#array_functions-arrayenumerate} +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#array_functions-arrayenumerate} 返回 Array \[1, 2, 3, …, length (arr) \] @@ -324,7 +325,7 @@ SELECT arrayPopFront([1, 2, 3]) AS res **参数** - `array` – 数组。 -- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../data_types/index.md#data_types)»。可以是’NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 +- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../sql_reference/functions/array_functions.md#data_types)»。可以是'NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 **示例** @@ -345,7 +346,7 @@ SELECT arrayPushBack(['a'], 'b') AS res **参数** - `array` – 数组。 -- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../data_types/index.md#data_types)»。可以是’NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 +- `single_value` – 单个值。只能将数字添加到带数字的数组中,并且只能将字符串添加到字符串数组中。添加数字时,ClickHouse会自动为数组的数据类型设置`single_value`类型。有关ClickHouse中数据类型的更多信息,请参阅«[数据类型](../../sql_reference/functions/array_functions.md#data_types)»。可以是'NULL`。该函数向数组添加一个«NULL»元素,数组元素的类型转换为`Nullable\`。 **示例** @@ -369,7 +370,7 @@ SELECT arrayPushFront(['b'], 'a') AS res - `size` — 数组所需的长度。 - 如果`size`小于数组的原始大小,则数组将从右侧截断。 - 如果`size`大于数组的初始大小,则使用`extender`值或数组项的数据类型的默认值将数组扩展到右侧。 -- `extender` — 扩展数组的值。可以是’NULL\`。 +- `extender` — 扩展数组的值。可以是'NULL\`。 **返回值:** @@ -476,7 +477,7 @@ SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; │ ['world', 'hello'] │ └────────────────────┘ -这里,在第二个数组(\[2, 1\])中定义了第一个数组(\[‘hello’,‘world’\])的相应元素的排序键,即\[‘hello’ -\> 2,‘world’ -\> 1\]。 由于lambda函数中没有使用`x`,因此源数组中的实际值不会影响结果的顺序。所以,‘world’将是结果中的第一个元素,‘hello’将是结果中的第二个元素。 +这里,在第二个数组(\[2, 1\])中定义了第一个数组(\[‘hello’,‘world’\])的相应元素的排序键,即\[‘hello’ -\> 2,‘world’ -\> 1\]。 由于lambda函数中没有使用`x`,因此源数组中的实际值不会影响结果的顺序。所以,'world'将是结果中的第一个元素,'hello'将是结果中的第二个元素。 其他示例如下所示。 @@ -501,7 +502,7 @@ SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; ``` !!! 注意 "注意" - 为了提高排序效率, 使用了[Schwartzian transform](https://en.wikipedia.org/wiki/Schwartzian_transform)。 + 为了提高排序效率, 使用了[施瓦茨变换](https://en.wikipedia.org/wiki/Schwartzian_transform)。 ## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort} @@ -555,7 +556,7 @@ SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; └─────────┘ 数组按以下方式排序: -The array is sorted in the following way: +数组按以下方式排序: 1. 首先,根据lambda函数的调用结果对源数组(\[1, 2, 3\])进行排序。 结果是\[3, 2, 1\]。 2. 反转上一步获得的数组。 所以,最终的结果是\[1, 2, 3\]。 @@ -606,7 +607,7 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; 如果要获取数组中唯一项的列表,可以使用arrayReduce(‘groupUniqArray’,arr)。 -## arrayJoin(arr) {#array-functions-join} +## arryjoin(arr) {#array-functions-join} 一个特殊的功能。请参见[«ArrayJoin函数»](array_join.md#functions_arrayjoin)部分。 @@ -658,7 +659,7 @@ SELECT arrayReduce(‘agg\_func’,arr1,…) - 将聚合函数`agg_func`应用于数组`arr1 ...`。如果传递了多个数组,则相应位置上的元素将作为多个参数传递给聚合函数。例如:SELECT arrayReduce(‘max’,\[1,2,3\])= 3 -## arrayReverse(arr) {#arrayreversearr} +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#arrayreversearr} 返回与源数组大小相同的数组,包含反转源数组的所有元素的结果。 diff --git a/docs/zh/query_language/functions/array_join.md b/docs/zh/sql_reference/functions/array_join.md similarity index 93% rename from docs/zh/query_language/functions/array_join.md rename to docs/zh/sql_reference/functions/array_join.md index b7a4855efa5..1788b44f3e5 100644 --- a/docs/zh/query_language/functions/array_join.md +++ b/docs/zh/sql_reference/functions/array_join.md @@ -1,10 +1,11 @@ + # arrayJoin函数 {#functions_arrayjoin} 这是一个非常有用的函数。 普通函数不会更改结果集的行数,而只是计算每行中的值(map)。 聚合函数将多行压缩到一行中(fold或reduce)。 -’arrayJoin’函数获取每一行并将他们展开到多行(unfold)。 +'arrayJoin'函数获取每一行并将他们展开到多行(unfold)。 此函数将数组作为参数,并将该行在结果集中复制数组元素个数。 除了应用此函数的列中的值之外,简单地复制列中的所有值;它被替换为相应的数组值。 diff --git a/docs/zh/query_language/functions/bit_functions.md b/docs/zh/sql_reference/functions/bit_functions.md similarity index 58% rename from docs/zh/query_language/functions/bit_functions.md rename to docs/zh/sql_reference/functions/bit_functions.md index a9ded6b0930..1b280c8babd 100644 --- a/docs/zh/query_language/functions/bit_functions.md +++ b/docs/zh/sql_reference/functions/bit_functions.md @@ -1,29 +1,30 @@ + # 位操作函数 {#wei-cao-zuo-han-shu} 位操作函数适用于UInt8,UInt16,UInt32,UInt64,Int8,Int16,Int32,Int64,Float32或Float64中的任何类型。 结果类型是一个整数,其位数等于其参数的最大位。如果至少有一个参数为有符数字,则结果为有符数字。如果参数是浮点数,则将其强制转换为Int64。 -## bitAnd(a, b) {#bitanda-b} +## bitAnd(a,b) {#bitanda-b} -## bitOr(a, b) {#bitora-b} +## bitOr(a,b) {#bitora-b} -## bitXor(a, b) {#bitxora-b} +## bitXor(a,b) {#bitxora-b} ## bitNot(a) {#bitnota} -## bitShiftLeft(a, b) {#bitshiftlefta-b} +## bitShiftLeft(a,b) {#bitshiftlefta-b} -## bitShiftRight(a, b) {#bitshiftrighta-b} +## bitShiftRight(a,b) {#bitshiftrighta-b} -## bitRotateLeft(a, b) {#bitrotatelefta-b} +## bitRotateLeft(a,b) {#bitrotatelefta-b} -## bitRotateRight(a, b) {#bitrotaterighta-b} +## bitRotateRight(a,b) {#bitrotaterighta-b} -## bitTest(a, b) {#bittesta-b} +## bitTest(a,b) {#bittesta-b} -## bitTestAll(a, b) {#bittestalla-b} +## bitTestAll(a,b) {#bittestalla-b} -## bitTestAny(a, b) {#bittestanya-b} +## bitTestAny(a,b) {#bittestanya-b} [来源文章](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/zh/query_language/functions/bitmap_functions.md b/docs/zh/sql_reference/functions/bitmap_functions.md similarity index 96% rename from docs/zh/query_language/functions/bitmap_functions.md rename to docs/zh/sql_reference/functions/bitmap_functions.md index 498212bc1fe..3415b590644 100644 --- a/docs/zh/query_language/functions/bitmap_functions.md +++ b/docs/zh/sql_reference/functions/bitmap_functions.md @@ -1,3 +1,4 @@ + # 位图函数 {#wei-tu-han-shu} 位图函数用于对两个位图对象进行计算,对于任何一个位图函数,它都将返回一个位图对象,例如and,or,xor,not等等。 @@ -6,7 +7,7 @@ 我们使用RoaringBitmap实际存储位图对象,当基数小于或等于32时,它使用Set保存。当基数大于32时,它使用RoaringBitmap保存。这也是为什么低基数集的存储更快的原因。 -有关RoaringBitmap的更多信息,请参阅:[CRoaring](https://github.com/RoaringBitmap/CRoaring)。 +有关RoaringBitmap的更多信息,请参阅:[呻吟声](https://github.com/RoaringBitmap/CRoaring)。 ## bitmapBuild {#bitmapbuild} @@ -153,7 +154,7 @@ SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res │ 0 │ └─────┘ -## bitmapAnd {#bitmapand} +## 位图和 {#bitmapand} 为两个位图对象进行与操作,返回一个新的位图对象。 @@ -174,13 +175,13 @@ SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re │ [3] │ └─────┘ -## bitmapOr {#bitmapor} +## 位图 {#bitmapor} 为两个位图对象进行或操作,返回一个新的位图对象。 bitmapOr(bitmap1,bitmap2) -**Parameters** +**参数** - `bitmap1` – 位图对象。 - `bitmap2` – 位图对象。 @@ -243,7 +244,7 @@ SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS bitmapCardinality(bitmap) -**Parameters** +**参数** - `bitmap` – 位图对象。 @@ -263,7 +264,7 @@ SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res bitmapMin(bitmap) -**Parameters** +**参数** - `bitmap` – 位图对象。 @@ -283,7 +284,7 @@ SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res bitmapMax(bitmap) -**Parameters** +**参数** - `bitmap` – 位图对象。 @@ -297,7 +298,7 @@ SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res │ 5 │ └─────┘ -## bitmapAndCardinality {#bitmapandcardinality} +## 位图和标准性 {#bitmapandcardinality} 为两个位图对象进行与操作,返回结果位图的基数。 @@ -360,7 +361,7 @@ SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; │ 4 │ └─────┘ -## bitmapAndnotCardinality {#bitmapandnotcardinality} +## 位图和非标准性 {#bitmapandnotcardinality} 计算两个位图的差异,返回结果位图的基数。 diff --git a/docs/zh/query_language/functions/comparison_functions.md b/docs/zh/sql_reference/functions/comparison_functions.md similarity index 69% rename from docs/zh/query_language/functions/comparison_functions.md rename to docs/zh/sql_reference/functions/comparison_functions.md index ce8c3728b5b..a73d983f386 100644 --- a/docs/zh/query_language/functions/comparison_functions.md +++ b/docs/zh/sql_reference/functions/comparison_functions.md @@ -1,13 +1,14 @@ + # 比较函数 {#bi-jiao-han-shu} 比较函数始终返回0或1(UInt8)。 可以比较以下类型: -- Numbers +- 数字 - String 和 FixedString -- Date -- DateTime +- 日期 +- 日期时间 以上每个组内的类型均可互相比较,但是对于不同组的类型间不能够进行比较。 @@ -17,16 +18,16 @@ 注意。直到1.1.54134版本,有符号和无符号数字的比较方式与C++相同。换句话说,在SELECT 9223372036854775807 &gt; -1 等情况下,您可能会得到错误的结果。 此行为在版本1.1.54134中已更改,现在在数学上是正确的。 -## equals, a = b and a == b operator {#equals-a-b-and-a-b-operator} +## 等于,a=b和a==b运算符 {#equals-a-b-and-a-b-operator} -## notEquals, a ! operator= b and a `<>` b {#notequals-a-operator-b-and-a-b} +## notEquals,a! 运算符=b和a `<>` b {#notequals-a-operator-b-and-a-b} -## less, `< operator` {#less-operator} +## 少, `< operator` {#less-operator} -## greater, `> operator` {#greater-operator} +## 更大, `> operator` {#greater-operator} -## lessOrEquals, `<= operator` {#lessorequals-operator} +## 出租等级, `<= operator` {#lessorequals-operator} -## greaterOrEquals, `>= operator` {#greaterorequals-operator} +## 伟大的等级, `>= operator` {#greaterorequals-operator} [来源文章](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/zh/query_language/functions/conditional_functions.md b/docs/zh/sql_reference/functions/conditional_functions.md similarity index 91% rename from docs/zh/query_language/functions/conditional_functions.md rename to docs/zh/sql_reference/functions/conditional_functions.md index 175656b8374..d9721fa3f60 100644 --- a/docs/zh/query_language/functions/conditional_functions.md +++ b/docs/zh/sql_reference/functions/conditional_functions.md @@ -1,13 +1,14 @@ + # 条件函数 {#tiao-jian-han-shu} -## if(cond, then, else), cond ? operator then : else {#ifcond-then-else-cond-operator-then-else} +## 如果(cond,那么,否则),cond? 运算符然后:else {#ifcond-then-else-cond-operator-then-else} 如果`cond != 0`则返回`then`,如果`cond = 0`则返回`else`。 `cond`必须是`UInt8`类型,`then`和`else`必须存在最低的共同类型。 `then`和`else`可以是`NULL` -## multiIf {#multiif} +## 多 {#multiif} 允许您在查询中更紧凑地编写[CASE](../operators.md#operator_case)运算符。 diff --git a/docs/zh/query_language/functions/date_time_functions.md b/docs/zh/sql_reference/functions/date_time_functions.md similarity index 66% rename from docs/zh/query_language/functions/date_time_functions.md rename to docs/zh/sql_reference/functions/date_time_functions.md index fe9961d7658..ca974f563db 100644 --- a/docs/zh/query_language/functions/date_time_functions.md +++ b/docs/zh/sql_reference/functions/date_time_functions.md @@ -1,3 +1,4 @@ + # 时间日期函数 {#shi-jian-ri-qi-han-shu} 支持时区。 @@ -22,11 +23,11 @@ SELECT 将Date或DateTime转换为指定的时区。 -## toYear {#toyear} +## 玩一年 {#toyear} 将Date或DateTime转换为包含年份编号(AD)的UInt16类型的数字。 -## toQuarter {#toquarter} +## 到四分钟 {#toquarter} 将Date或DateTime转换为包含季度编号的UInt8类型的数字。 @@ -34,15 +35,15 @@ SELECT 将Date或DateTime转换为包含月份编号(1-12)的UInt8类型的数字。 -## toDayOfYear {#todayofyear} +## 今天一年 {#todayofyear} 将Date或DateTime转换为包含一年中的某一天的编号的UInt16(1-366)类型的数字。 -## toDayOfMonth {#todayofmonth} +## 今天月 {#todayofmonth} 将Date或DateTime转换为包含一月中的某一天的编号的UInt8(1-31)类型的数字。 -## toDayOfWeek {#todayofweek} +## 今天一周 {#todayofweek} 将Date或DateTime转换为包含一周中的某一天的编号的UInt8(周一是1, 周日是7)类型的数字。 @@ -55,7 +56,7 @@ SELECT 将DateTime转换为包含一小时中分钟数(0-59)的UInt8数字。 -## toSecond {#tosecond} +## 秒 {#tosecond} 将DateTime转换为包含一分钟中秒数(0-59)的UInt8数字。 闰秒不计算在内。 @@ -64,22 +65,22 @@ SELECT 将DateTime转换为unix时间戳。 -## toStartOfYear {#tostartofyear} +## 开始一年 {#tostartofyear} 将Date或DateTime向前取整到本年的第一天。 返回Date类型。 -## toStartOfISOYear {#tostartofisoyear} +## 今年开始 {#tostartofisoyear} 将Date或DateTime向前取整到ISO本年的第一天。 返回Date类型。 -## toStartOfQuarter {#tostartofquarter} +## 四分之一开始 {#tostartofquarter} 将Date或DateTime向前取整到本季度的第一天。 返回Date类型。 -## toStartOfMonth {#tostartofmonth} +## 到月份开始 {#tostartofmonth} 将Date或DateTime向前取整到本月的第一天。 返回Date类型。 @@ -92,31 +93,31 @@ SELECT 将Date或DateTime向前取整到本周的星期一。 返回Date类型。 -## toStartOfDay {#tostartofday} +## 今天开始 {#tostartofday} 将DateTime向前取整到当日的开始。 -## toStartOfHour {#tostartofhour} +## 开始一小时 {#tostartofhour} 将DateTime向前取整到当前小时的开始。 -## toStartOfMinute {#tostartofminute} +## to startofminute {#tostartofminute} 将DateTime向前取整到当前分钟的开始。 -## toStartOfFiveMinute {#tostartoffiveminute} +## to startoffiveminute {#tostartoffiveminute} 将DateTime以五分钟为单位向前取整到最接近的时间点。 -## toStartOfTenMinutes {#tostartoftenminutes} +## 开始分钟 {#tostartoftenminutes} 将DateTime以十分钟为单位向前取整到最接近的时间点。 -## toStartOfFifteenMinutes {#tostartoffifteenminutes} +## 开始几分钟 {#tostartoffifteenminutes} 将DateTime以十五分钟为单位向前取整到最接近的时间点。 -## toStartOfInterval(time\_or\_data, INTERVAL x unit \[, time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} +## toStartOfInterval(time\_or\_data,间隔x单位\[,time\_zone\]) {#tostartofintervaltime-or-data-interval-x-unit-time-zone} 这是名为`toStartOf*`的所有函数的通用函数。例如, `toStartOfInterval(t,INTERVAL 1 year)`返回与`toStartOfYear(t)`相同的结果, @@ -168,31 +169,31 @@ SELECT 将Date或DateTime转换为包含ISO周数的UInt8类型的编号。 -## now {#now} +## 现在 {#now} 不接受任何参数并在请求执行时的某一刻返回当前时间(DateTime)。 此函数返回一个常量,即时请求需要很长时间能够完成。 -## today {#today} +## 今天 {#today} 不接受任何参数并在请求执行时的某一刻返回当前日期(Date)。 -其功能与’toDate(now())’相同。 +其功能与'toDate(now())'相同。 -## yesterday {#yesterday} +## 昨天 {#yesterday} 不接受任何参数并在请求执行时的某一刻返回昨天的日期(Date)。 -其功能与’today() - 1’相同。 +其功能与'today() - 1'相同。 -## timeSlot {#timeslot} +## 时隙 {#timeslot} 将时间向前取整半小时。 此功能用于Yandex.Metrica,因为如果跟踪标记显示单个用户的连续综合浏览量在时间上严格超过此数量,则半小时是将会话分成两个会话的最短时间。这意味着(tag id,user id,time slot)可用于搜索相应会话中包含的综合浏览量。 -## toYYYYMM {#toyyyymm} +## toyyymm {#toyyyymm} 将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 100 + MM)。 -## toYYYYMMDD {#toyyyymmdd} +## toyyymmdd {#toyyyymmdd} 将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 10000 + MM \* 100 + DD)。 @@ -200,7 +201,7 @@ SELECT 将Date或DateTime转换为包含年份和月份编号的UInt64类型的数字(YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss)。 -## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} +## 隆隆隆隆路虏脢,,陇,貌,垄拢卢虏禄quar陇,貌路,隆拢脳枚脢虏,麓脢,脱,,,录,禄庐戮,utes, {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters} 函数将一段时间间隔添加到Date/DateTime,然后返回Date/DateTime。例如: @@ -217,7 +218,7 @@ SELECT │ 2019-01-01 │ 2019-01-01 00:00:00 │ └─────────────────────┴──────────────────────────┘ -## subtractYears, subtractMonths, subtractWeeks, subtractDays, subtractHours, subtractMinutes, subtractSeconds, subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} +## subtractYears,subtractMonths,subtractWeeks,subtractDays,subtractours,subtractMinutes,subtractSeconds,subtractQuarters {#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters} 函数将Date/DateTime减去一段时间间隔,然后返回Date/DateTime。例如: @@ -234,59 +235,59 @@ SELECT │ 2018-01-01 │ 2018-01-01 00:00:00 │ └──────────────────────────┴───────────────────────────────┘ -## dateDiff(‘unit’, t1, t2, \[timezone\]) {#datediffunit-t1-t2-timezone} +## dateDiff(‘unit’,t1,t2,\[时区\]) {#datediffunit-t1-t2-timezone} -返回以’unit’为单位表示的两个时间之间的差异,例如`'hours'`。 ‘t1’和’t2’可以是Date或DateTime,如果指定’timezone’,它将应用于两个参数。如果不是,则使用来自数据类型’t1’和’t2’的时区。如果时区不相同,则结果将是未定义的。 +返回以'unit'为单位表示的两个时间之间的差异,例如`'hours'`。 ‘t1’和’t2’可以是Date或DateTime,如果指定’timezone’,它将应用于两个参数。如果不是,则使用来自数据类型't1'和't2'的时区。如果时区不相同,则结果将是未定义的。 -Supported unit values: +支持的单位值: -| unit | -|---------| -| second | -| minute | -| hour | -| day | -| week | -| month | -| quarter | -| year | +| 单位 | +|------| +| 第二 | +| 分钟 | +| 小时 | +| 日 | +| 周 | +| 月 | +| 季 | +| 年 | -## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} +## 时隙(开始时间,持续时间,\[,大小\]) {#timeslotsstarttime-duration-size} 它返回一个时间数组,其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»(以秒为单位)步长的时间点。其中«size»是一个可选参数,默认为1800。 例如,`timeSlots(toDateTime('2012-01-01 12:20:00'),600) = [toDateTime('2012-01-01 12:00:00'),toDateTime('2012-01-01 12:30:00' )]`。 这对于搜索在相应会话中综合浏览量是非常有用的。 -## formatDateTime(Time, Format\[, Timezone\]) {#formatdatetimetime-format-timezone} +## formatDateTime(时间,格式\[,时区\]) {#formatdatetimetime-format-timezone} 函数根据给定的格式字符串来格式化时间。请注意:格式字符串必须是常量表达式,例如:单个结果列不能有多种格式字符串。 支持的格式修饰符: («Example» 列是对`2018-01-02 22:33:44`的格式化结果) -| Modifier | Description | Example | -|----------|---------------------------------------------------------|------------| -| %C | year divided by 100 and truncated to integer (00-99) | 20 | -| %d | day of the month, zero-padded (01-31) | 02 | -| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/2018 | -| %e | day of the month, space-padded ( 1-31) | 2 | -| %F | short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2018-01-02 | -| %H | hour in 24h format (00-23) | 22 | -| %I | hour in 12h format (01-12) | 10 | -| %j | day of the year (001-366) | 002 | -| %m | month as a decimal number (01-12) | 01 | -| %M | minute (00-59) | 33 | -| %n | new-line character (‘’) | | -| %p | AM or PM designation | PM | -| %R | 24-hour HH:MM time, equivalent to %H:%M | 22:33 | -| %S | second (00-59) | 44 | -| %t | horizontal-tab character (’) | | -| %T | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 22:33:44 | -| %u | ISO 8601 weekday as number with Monday as 1 (1-7) | 2 | -| %V | ISO 8601 week number (01-53) | 01 | -| %w | weekday as a decimal number with Sunday as 0 (0-6) | 2 | -| %y | Year, last two digits (00-99) | 18 | -| %Y | Year | 2018 | -| %% | a % sign | % | +| 修饰符 | 产品描述 | 示例 | +|--------|-------------------------------------------|------------| +| %C | 年除以100并截断为整数(00-99) | 20 | +| %d | 月中的一天,零填充(01-31) | 02 | +| %D | 短MM/DD/YY日期,相当于%m/%d/%y | 01/02/2018 | +| %e | 月中的一天,空格填充(1-31) | 2 | +| %F | 短YYYY-MM-DD日期,相当于%Y-%m-%d | 2018-01-02 | +| %H | 24小时格式(00-23) | 22 | +| %I | 小时12h格式(01-12) | 10 | +| %j | 一年(001-366) | 002 | +| %m | 月份为十进制数(01-12) | 01 | +| %M | 分钟(00-59) | 33 | +| %n | 换行符(") | | +| %p | AM或PM指定 | PM | +| %R | 24小时HH:MM时间,相当于%H:%M | 22:33 | +| %S | 第二(00-59) | 44 | +| %t | 水平制表符(') | | +| %T | ISO8601时间格式(HH:MM:SS),相当于%H:%M:%S | 22:33:44 | +| %u | ISO8601平日as编号,星期一为1(1-7) | 2 | +| %V | ISO8601周编号(01-53) | 01 | +| %w | 周日为十进制数,周日为0(0-6) | 2 | +| %y | 年份,最后两位数字(00-99) | 18 | +| %Y | 年 | 2018 | +| %% | %符号 | % | [来源文章](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/zh/query_language/functions/encoding_functions.md b/docs/zh/sql_reference/functions/encoding_functions.md similarity index 95% rename from docs/zh/query_language/functions/encoding_functions.md rename to docs/zh/sql_reference/functions/encoding_functions.md index 589edd75450..42d10c4408f 100644 --- a/docs/zh/query_language/functions/encoding_functions.md +++ b/docs/zh/sql_reference/functions/encoding_functions.md @@ -1,3 +1,4 @@ + # 编码函数 {#bian-ma-han-shu} ## hex {#hex} @@ -17,11 +18,11 @@ 接受FixedString(16)值。返回包含36个字符的文本格式的字符串。 -## bitmaskToList(num) {#bitmasktolistnum} +## 位掩码列表(num) {#bitmasktolistnum} 接受一个整数。返回一个字符串,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。列表使用逗号分割,按升序排列。 -## bitmaskToArray(num) {#bitmasktoarraynum} +## 位掩码阵列(num) {#bitmasktoarraynum} 接受一个整数。返回一个UInt64类型数组,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。数组中的数字按升序排列。 diff --git a/docs/zh/sql_reference/functions/ext_dict_functions.md b/docs/zh/sql_reference/functions/ext_dict_functions.md new file mode 100644 index 00000000000..23077618722 --- /dev/null +++ b/docs/zh/sql_reference/functions/ext_dict_functions.md @@ -0,0 +1,47 @@ + +# 字典函数 {#zi-dian-han-shu} + +有关连接和配置外部词典的信息,请参阅[外部词典](../../sql_reference/functions/ext_dict_functions.md)。 + +## dictGetUInt8,dictGetUInt16,dictGetUInt32,dictGetUInt64 {#dictgetuint8-dictgetuint16-dictgetuint32-dictgetuint64} + +## dictGetInt8,dictGetInt16,dictGetInt32,dictGetInt64 {#dictgetint8-dictgetint16-dictgetint32-dictgetint64} + +## dictGetFloat32,dictGetFloat64 {#dictgetfloat32-dictgetfloat64} + +## dictGetDate,dictGetDateTime {#dictgetdate-dictgetdatetime} + +## dictgetuid {#dictgetuuid} + +## dictGetString {#dictgetstring} + +`dictGetT('dict_name', 'attr_name', id)` + +- 使用'id'键获取dict\_name字典中attr\_name属性的值。`dict_name`和`attr_name`是常量字符串。`id`必须是UInt64。 + 如果字典中没有`id`键,则返回字典描述中指定的默认值。 + +## dictGetTOrDefault {#ext_dict_functions-dictgettordefault} + +`dictGetTOrDefault('dict_name', 'attr_name', id, default)` + +与`dictGetT`函数相同,但默认值取自函数的最后一个参数。 + +## dictIsIn {#dictisin} + +`dictIsIn ('dict_name', child_id, ancestor_id)` + +- 对于'dict\_name'分层字典,查找'child\_id'键是否位于'ancestor\_id'内(或匹配'ancestor\_id')。返回UInt8。 + +## 独裁主义 {#dictgethierarchy} + +`dictGetHierarchy('dict_name', id)` + +- 对于'dict\_name'分层字典,返回从'id'开始并沿父元素链继续的字典键数组。返回Array(UInt64) + +## dictHas {#dicthas} + +`dictHas('dict_name', id)` + +- 检查字典是否存在指定的`id`。如果不存在,则返回0;如果存在,则返回1。 + +[来源文章](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/zh/query_language/functions/functions_for_nulls.md b/docs/zh/sql_reference/functions/functions_for_nulls.md similarity index 93% rename from docs/zh/query_language/functions/functions_for_nulls.md rename to docs/zh/sql_reference/functions/functions_for_nulls.md index d6db2906e92..9252d8bfeb0 100644 --- a/docs/zh/query_language/functions/functions_for_nulls.md +++ b/docs/zh/sql_reference/functions/functions_for_nulls.md @@ -1,3 +1,4 @@ + # Nullable处理函数 {#nullablechu-li-han-shu} ## isNull {#isnull} @@ -76,7 +77,7 @@ 1 rows in set. Elapsed: 0.010 sec. -## coalesce {#coalesce} +## 合并 {#coalesce} 检查从左到右是否传递了«NULL»参数并返回第一个非`'NULL`参数。 @@ -88,8 +89,8 @@ **返回值** -- 第一个非’NULL\`参数。 -- `NULL`,如果所有参数都是’NULL\`。 +- 第一个非'NULL\`参数。 +- `NULL`,如果所有参数都是'NULL\`。 **示例** @@ -125,12 +126,12 @@ **参数:** - `x` — 要检查«NULL»的值。 -- `alt` — 如果`x`为’NULL\`,函数返回的值。 +- `alt` — 如果`x`为'NULL\`,函数返回的值。 **返回值** -- The value `x`, if `x` is not `NULL`. -- The value `alt`, if `x` is `NULL`. +- 价值 `x`,如果 `x` 不是 `NULL`. +- 价值 `alt`,如果 `x` 是 `NULL`. **示例** @@ -177,7 +178,7 @@ ## assumeNotNull {#assumenotnull} -将[Nullable](../../data_types/nullable.md)类型的值转换为非`Nullable`类型的值。 +将[可为空](../../sql_reference/functions/functions_for_nulls.md)类型的值转换为非`Nullable`类型的值。 assumeNotNull(x) @@ -221,7 +222,7 @@ │ Int8 │ └──────────────────────────────┘ -## toNullable {#tonullable} +## 可调整 {#tonullable} 将参数的类型转换为`Nullable`。 diff --git a/docs/zh/query_language/functions/geo.md b/docs/zh/sql_reference/functions/geo.md similarity index 89% rename from docs/zh/query_language/functions/geo.md rename to docs/zh/sql_reference/functions/geo.md index 3e6e6aa6b64..3f6e6a3bb10 100644 --- a/docs/zh/query_language/functions/geo.md +++ b/docs/zh/sql_reference/functions/geo.md @@ -1,6 +1,7 @@ + # GEO函数 {#geohan-shu} -## greatCircleDistance {#greatcircledistance} +## 大圆形距离 {#greatcircledistance} 使用[great-circle distance公式](https://en.wikipedia.org/wiki/Great-circle_distance)计算地球表面两点之间的距离。 @@ -35,7 +36,7 @@ SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) └───────────────────────────────────────────────────────────────────┘ ``` -## pointInEllipses {#pointinellipses} +## 尖尖的人 {#pointinellipses} 检查指定的点是否至少包含在指定的一个椭圆中。 下述中的坐标是几何图形在笛卡尔坐标系中的位置。 @@ -78,8 +79,8 @@ pointInPolygon((x, y), [(a, b), (c, d) ...], ...) **输入参数** -- `(x, y)` — 平面上某个点的坐标。[Tuple](../../data_types/tuple.md)类型,包含坐标的两个数字。 -- `[(a, b), (c, d) ...]` — 多边形的顶点。[Array](../../data_types/array.md)类型。每个顶点由一对坐标`(a, b)`表示。顶点可以按顺时针或逆时针指定。顶点的个数应该大于等于3。同时只能是常量的。 +- `(x, y)` — 平面上某个点的坐标。[元组](../../sql_reference/functions/geo.md)类型,包含坐标的两个数字。 +- `[(a, b), (c, d) ...]` — 多边形的顶点。[阵列](../../sql_reference/functions/geo.md)类型。每个顶点由一对坐标`(a, b)`表示。顶点可以按顺时针或逆时针指定。顶点的个数应该大于等于3。同时只能是常量的。 - 该函数还支持镂空的多边形(切除部分)。如果需要,可以使用函数的其他参数定义需要切除部分的多边形。(The function does not support non-simply-connected polygons.) **返回值** @@ -163,16 +164,16 @@ geoToH3(lon, lat, resolution) **输入值** -- `lon` — 经度。 [Float64](../../data_types/float.md)类型。 -- `lat` — 纬度。 [Float64](../../data_types/float.md)类型。 -- `resolution` — 索引的分辨率。 取值范围为: `[0, 15]`。 [UInt8](../../data_types/int_uint.md)类型。 +- `lon` — 经度。 [Float64](../../sql_reference/functions/geo.md)类型。 +- `lat` — 纬度。 [Float64](../../sql_reference/functions/geo.md)类型。 +- `resolution` — 索引的分辨率。 取值范围为: `[0, 15]`。 [UInt8](../../sql_reference/functions/geo.md)类型。 **返回值** - H3中六边形的索引值。 - 发生异常时返回0。 -[UInt64](../../data_types/int_uint.md)类型。 +[UInt64](../../sql_reference/functions/geo.md)类型。 **示例** diff --git a/docs/zh/query_language/functions/hash_functions.md b/docs/zh/sql_reference/functions/hash_functions.md similarity index 92% rename from docs/zh/query_language/functions/hash_functions.md rename to docs/zh/sql_reference/functions/hash_functions.md index 835da4a9204..9dc4aa9b794 100644 --- a/docs/zh/query_language/functions/hash_functions.md +++ b/docs/zh/sql_reference/functions/hash_functions.md @@ -1,3 +1,4 @@ + # Hash函数 {#hashhan-shu} Hash函数可以用于将元素不可逆的伪随机打乱。 @@ -6,12 +7,12 @@ Hash函数可以用于将元素不可逆的伪随机打乱。 计算字符串的MD5。然后获取结果的前8个字节并将它们作为UInt64(大端)返回。 此函数相当低效(500万个短字符串/秒/核心)。 -如果您不需要一定使用MD5,请使用‘sipHash64’函数。 +如果您不需要一定使用MD5,请使用'sipHash64'函数。 ## MD5 {#md5} 计算字符串的MD5并将结果放入FixedString(16)中返回。 -如果您只是需要一个128位的hash,同时不需要一定使用MD5,请使用‘sipHash128’函数。 +如果您只是需要一个128位的hash,同时不需要一定使用MD5,请使用'sipHash128'函数。 如果您要获得与md5sum程序相同的输出结果,请使用lower(hex(MD5(s)))。 ## sipHash64 {#siphash64} @@ -56,7 +57,7 @@ SipHash是一种加密哈希函数。它的处理性能至少比MD5快三倍。 我们建议仅在必须使用这些Hash函数且无法更改的情况下使用这些函数。 即使在这些情况下,我们仍建议将函数采用在写入数据时使用预计算的方式将其计算完毕。而不是在SELECT中计算它们。 -## URLHash(url\[, N\]) {#urlhashurl-n} +## URLHash(url\[,N\]) {#urlhashurl-n} 一种快速的非加密哈希函数,用于规范化的从URL获得的字符串。 `URLHash(s)` - 从一个字符串计算一个哈希,如果结尾存在尾随符号`/`,`?`或`#`则忽略。 @@ -93,19 +94,19 @@ URL的层级与URLHierarchy中的层级相同。 此函数被用于Yandex.Metric 接受UInt64类型的参数。返回Int32。 有关更多信息,请参见链接:[JumpConsistentHash](https://arxiv.org/pdf/1406.2294.pdf) -## murmurHash2\_32, murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} +## murmurHash2\_32,murmurHash2\_64 {#murmurhash2-32-murmurhash2-64} 计算字符串的MurmurHash2。 接受一个String类型的参数。返回UInt64或UInt32。 有关更多信息,请参阅链接:[MurmurHash2](https://github.com/aappleby/smhasher) -## murmurHash3\_32, murmurHash3\_64, murmurHash3\_128 {#murmurhash3-32-murmurhash3-64-murmurhash3-128} +## murmurHash3\_32,murmurHash3\_64,murmurHash3\_128 {#murmurhash3-32-murmurhash3-64-murmurhash3-128} 计算字符串的MurmurHash3。 接受一个String类型的参数。返回UInt64或UInt32或FixedString(16)。 有关更多信息,请参阅链接:[MurmurHash3](https://github.com/aappleby/smhasher) -## xxHash32, xxHash64 {#xxhash32-xxhash64} +## xxHash32,xxHash64 {#xxhash32-xxhash64} 计算字符串的xxHash。 接受一个String类型的参数。返回UInt64或UInt32。 diff --git a/docs/zh/query_language/functions/higher_order_functions.md b/docs/zh/sql_reference/functions/higher_order_functions.md similarity index 76% rename from docs/zh/query_language/functions/higher_order_functions.md rename to docs/zh/sql_reference/functions/higher_order_functions.md index 9e84a73f0b8..6d090e7330d 100644 --- a/docs/zh/query_language/functions/higher_order_functions.md +++ b/docs/zh/sql_reference/functions/higher_order_functions.md @@ -1,3 +1,4 @@ + # 高阶函数 {#gao-jie-han-shu} ## `->` 运算符, lambda(params, expr) 函数 {#yun-suan-fu-lambdaparams-expr-han-shu} @@ -10,17 +11,17 @@ 高阶函数可以接受多个参数的lambda函数作为其参数,在这种情况下,高阶函数需要同时传递几个长度相等的数组,这些数组将被传递给lambda参数。 -除了’arrayMap’和’arrayFilter’以外的所有其他函数,都可以省略第一个参数(lambda函数)。在这种情况下,默认返回数组元素本身。 +除了'arrayMap'和'arrayFilter'以外的所有其他函数,都可以省略第一个参数(lambda函数)。在这种情况下,默认返回数组元素本身。 ### arrayMap(func, arr1, …) {#higher_order_functions-array-map} 将arr -将从’func’函数的原始应用程序获得的数组返回到’arr’数组中的每个元素。 -Returns an array obtained from the original application of the ‘func’ function to each element in the ‘arr’ array. +将从'func'函数的原始应用程序获得的数组返回到'arr'数组中的每个元素。 +返回从原始应用程序获得的数组 ‘func’ 函数中的每个元素 ‘arr’ 阵列。 ### arrayFilter(func, arr1, …) {#arrayfilterfunc-arr1} -Returns an array containing only the elements in ‘arr1’ for which ‘func’ returns something other than 0. +返回一个仅包含以下元素的数组 ‘arr1’ 对于哪个 ‘func’ 返回0以外的内容。 示例: @@ -47,27 +48,27 @@ SELECT ### arrayCount(\[func,\] arr1, …) {#arraycountfunc-arr1} -返回数组arr中非零元素的数量,如果指定了‘func’,则通过‘func’的返回值确定元素是否为非零元素。 +返回数组arr中非零元素的数量,如果指定了'func',则通过'func'的返回值确定元素是否为非零元素。 ### arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} -返回数组‘arr’中是否存在非零元素,如果指定了‘func’,则使用‘func’的返回值确定元素是否为非零元素。 +返回数组'arr'中是否存在非零元素,如果指定了'func',则使用'func'的返回值确定元素是否为非零元素。 ### arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} -返回数组‘arr’中是否存在为零的元素,如果指定了‘func’,则使用‘func’的返回值确定元素是否为零元素。 +返回数组'arr'中是否存在为零的元素,如果指定了'func',则使用'func'的返回值确定元素是否为零元素。 ### arraySum(\[func,\] arr1, …) {#arraysumfunc-arr1} -计算arr数组的总和,如果指定了‘func’,则通过‘func’的返回值计算数组的总和。 +计算arr数组的总和,如果指定了'func',则通过'func'的返回值计算数组的总和。 ### arrayFirst(func, arr1, …) {#arrayfirstfunc-arr1} -返回数组中第一个匹配的元素,函数使用‘func’匹配所有元素,直到找到第一个匹配的元素。 +返回数组中第一个匹配的元素,函数使用'func'匹配所有元素,直到找到第一个匹配的元素。 ### arrayFirstIndex(func, arr1, …) {#arrayfirstindexfunc-arr1} -返回数组中第一个匹配的元素的下标索引,函数使用‘func’匹配所有元素,直到找到第一个匹配的元素。 +返回数组中第一个匹配的元素的下标索引,函数使用'func'匹配所有元素,直到找到第一个匹配的元素。 ### arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} diff --git a/docs/zh/query_language/functions/in_functions.md b/docs/zh/sql_reference/functions/in_functions.md similarity index 65% rename from docs/zh/query_language/functions/in_functions.md rename to docs/zh/sql_reference/functions/in_functions.md index 60df3e25db1..f4f358bad9a 100644 --- a/docs/zh/query_language/functions/in_functions.md +++ b/docs/zh/sql_reference/functions/in_functions.md @@ -1,8 +1,9 @@ + # IN运算符相关函数 {#inyun-suan-fu-xiang-guan-han-shu} -## in, notIn, globalIn, globalNotIn {#in-notin-globalin-globalnotin} +## in,notIn,globalIn,globalNotIn {#in-notin-globalin-globalnotin} -请参阅[IN 运算符](../select.md#select-in-operators)部分。 +请参阅[IN 运算符](../statements/select.md#select-in-operators)部分。 ## tuple(x, y, …), operator (x, y, …) {#tuplex-y-operator-x-y} @@ -10,10 +11,10 @@ 对于具有类型T1,T2,…的列,它返回包含这些列的元组(T1,T2,…)。 执行该函数没有任何成本。 元组通常用作IN运算符的中间参数值,或用于创建lambda函数的形参列表。 元组不能写入表。 -## tupleElement(tuple, n), operator x.N {#tupleelementtuple-n-operator-x-n} +## 元组元素(元组,n),运算符x.N {#tupleelementtuple-n-operator-x-n} 函数用于从元组中获取列。 -’N’是列索引,从1开始。N必须是常量正整数常数,并且不大于元组的大小。 +'N'是列索引,从1开始。N必须是常量正整数常数,并且不大于元组的大小。 执行该函数没有任何成本。 [来源文章](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/zh/query_language/functions/index.md b/docs/zh/sql_reference/functions/index.md similarity index 91% rename from docs/zh/query_language/functions/index.md rename to docs/zh/sql_reference/functions/index.md index 8d178592e92..1f61a1f2919 100644 --- a/docs/zh/query_language/functions/index.md +++ b/docs/zh/sql_reference/functions/index.md @@ -1,10 +1,11 @@ + # 函数 {#han-shu} ClickHouse中至少存在两种类型的函数 - 常规函数(它们称之为«函数»)和聚合函数。 常规函数的工作就像分别为每一行执行一次函数计算一样(对于每一行,函数的结果不依赖于其他行)。 聚合函数则从各行累积一组值(即函数的结果以来整个结果集)。 在本节中,我们将讨论常规函数。 有关聚合函数,请参阅«聚合函数»一节。 - \* - ’arrayJoin’函数与表函数均属于第三种类型的函数。 \* + \* - 'arrayJoin'函数与表函数均属于第三种类型的函数。 \* ## 强类型 {#qiang-lei-xing} @@ -22,7 +23,7 @@ ClickHouse中至少存在两种类型的函数 - 常规函数(它们称之为 为了简单起见,某些函数的某些参数只能是常量。 例如,LIKE运算符的右参数必须是常量。 几乎所有函数都为常量参数返回常量。 除了用于生成随机数的函数。 -’now’函数为在不同时间运行的查询返回不同的值,但结果被视为常量,因为常量在单个查询中很重要。 +'now'函数为在不同时间运行的查询返回不同的值,但结果被视为常量,因为常量在单个查询中很重要。 常量表达式也被视为常量(例如,LIKE运算符的右半部分可以由多个常量构造)。 对于常量和非常量参数,可以以不同方式实现函数(执行不同的代码)。 但是,对于包含相同数据的常量和非常量参数它们的结果应该是一致的。 @@ -54,8 +55,8 @@ ClickHouse中至少存在两种类型的函数 - 常规函数(它们称之为 这意味着可以在不同的服务器上执行功能。 例如,在查询`SELECT f(sum(g(x)))FROM distributed_table GROUP BY h(y)中,` -- 如果`distributed_table`至少有两个分片,则在远程服务器上执行函数’g’和’h’,并在请求服务器上执行函数’f’。 -- 如果`distributed_table`只有一个分片,则在该分片的服务器上执行所有’f’,’g’和’h’功能。 +- 如果`distributed_table`至少有两个分片,则在远程服务器上执行函数'g'和'h',并在请求服务器上执行函数'f'。 +- 如果`distributed_table`只有一个分片,则在该分片的服务器上执行所有'f','g'和'h'功能。 函数的结果通常不依赖于它在哪个服务器上执行。但是,有时这很重要。 例如,使用字典的函数时将使用运行它们的服务器上存在的字典。 diff --git a/docs/zh/query_language/functions/introspection.md b/docs/zh/sql_reference/functions/introspection.md similarity index 63% rename from docs/zh/query_language/functions/introspection.md rename to docs/zh/sql_reference/functions/introspection.md index bb1d884d15b..f0c907b3e67 100644 --- a/docs/zh/query_language/functions/introspection.md +++ b/docs/zh/sql_reference/functions/introspection.md @@ -1,61 +1,64 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 65 +toc_title: "\u81EA\u7701" --- -# Introspection Functions {#introspection-functions} +# 内省功能 {#introspection-functions} -You can use functions described in this chapter to introspect [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) and [DWARF](https://en.wikipedia.org/wiki/DWARF) for query profiling. +您可以使用本章中描述的函数来反省 [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) 和 [DWARF](https://en.wikipedia.org/wiki/DWARF) 用于查询分析。 -!!! warning "Warning" - These functions are slow and may impose security considerations. +!!! warning "警告" + 这些功能很慢,可能会强加安全考虑。 -For proper operation of introspection functions: +对于内省功能的正确操作: -- Install the `clickhouse-common-static-dbg` package. +- 安装 `clickhouse-common-static-dbg` 包。 -- Set the [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1. +- 设置 [allow\_introspection\_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) 设置为1。 For security reasons introspection functions are disabled by default. -ClickHouse saves profiler reports to the [trace\_log](../../operations/system_tables.md#system_tables-trace_log) system table. Make sure the table and profiler are configured properly. +ClickHouse将探查器报告保存到 [trace\_log](../../operations/system_tables.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。 ## addressToLine {#addresstoline} -Converts virtual memory address inside ClickHouse server process to the filename and the line number in ClickHouse source code. +将ClickHouse服务器进程内的虚拟内存地址转换为ClickHouse源代码中的文件名和行号。 -If you use official ClickHouse packages, you need to install the `clickhouse-common-static-dbg` package. +如果您使用官方的ClickHouse软件包,您需要安装 `clickhouse-common-static-dbg` 包。 -**Syntax** +**语法** ``` sql addressToLine(address_of_binary_instruction) ``` -**Parameters** +**参数** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. -**Returned value** +**返回值** -- Source code filename and the line number in this file delimited by colon. +- 源代码文件名和此文件中用冒号分隔的行号。 - For example, `/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199`, where `199` is a line number. + For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number. -- Name of a binary, if the function couldn’t find the debug information. +- 二进制文件的名称,如果函数找不到调试信息。 -- Empty string, if the address is not valid. +- 空字符串,如果地址无效。 -Type: [String](../../data_types/string.md). +类型: [字符串](../../sql_reference/data_types/string.md). -**Example** +**示例** -Enabling introspection functions: +启用内省功能: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +从中选择第一个字符串 `trace_log` 系统表: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -73,9 +76,9 @@ query_id: 421b6855-1858-45a5-8f37-f383409d6d72 trace: [140658411141617,94784174532828,94784076370703,94784076372094,94784076361020,94784175007680,140658411116251,140658403895439] ``` -The `trace` field contains the stack trace at the moment of sampling. +该 `trace` 字段包含采样时的堆栈跟踪。 -Getting the source code filename and the line number for a single address: +获取单个地址的源代码文件名和行号: ``` sql SELECT addressToLine(94784076370703) \G @@ -84,10 +87,10 @@ SELECT addressToLine(94784076370703) \G ``` text Row 1: ────── -addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 +addressToLine(94784076370703): /build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 ``` -Applying the function to the whole stack trace: +将函数应用于整个堆栈跟踪: ``` sql SELECT @@ -97,15 +100,15 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToLine` function. The result of this processing you see in the `trace_source_code_lines` column of output. +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `addressToLine` 功能。 这种处理的结果,你在看 `trace_source_code_lines` 列的输出。 ``` text Row 1: ────── trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so /usr/lib/debug/usr/bin/clickhouse -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.cpp:199 -/build/obj-x86_64-linux-gnu/../dbms/Common/ThreadPool.h:155 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199 +/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.h:155 /usr/include/c++/9/bits/atomic_base.h:551 /usr/lib/debug/usr/bin/clickhouse /lib/x86_64-linux-gnu/libpthread-2.27.so @@ -114,34 +117,34 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so ## addressToSymbol {#addresstosymbol} -Converts virtual memory address inside ClickHouse server process to the symbol from ClickHouse object files. +将ClickHouse服务器进程内的虚拟内存地址转换为ClickHouse对象文件中的符号。 -**Syntax** +**语法** ``` sql addressToSymbol(address_of_binary_instruction) ``` -**Parameters** +**参数** -- `address_of_binary_instruction` ([UInt64](../../data_types/int_uint.md)) — Address of instruction in a running process. +- `address_of_binary_instruction` ([UInt64](../../sql_reference/data_types/int_uint.md)) — Address of instruction in a running process. -**Returned value** +**返回值** -- Symbol from ClickHouse object files. -- Empty string, if the address is not valid. +- 来自ClickHouse对象文件的符号。 +- 空字符串,如果地址无效。 -Type: [String](../../data_types/string.md). +类型: [字符串](../../sql_reference/data_types/string.md). -**Example** +**示例** -Enabling introspection functions: +启用内省功能: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +从中选择第一个字符串 `trace_log` 系统表: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -159,9 +162,9 @@ query_id: 724028bf-f550-45aa-910d-2af6212b94ac trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] ``` -The `trace` field contains the stack trace at the moment of sampling. +该 `trace` 字段包含采样时的堆栈跟踪。 -Getting a symbol for a single address: +获取单个地址的符号: ``` sql SELECT addressToSymbol(94138803686098) \G @@ -173,7 +176,7 @@ Row 1: addressToSymbol(94138803686098): _ZNK2DB24IAggregateFunctionHelperINS_20AggregateFunctionSumImmNS_24AggregateFunctionSumDataImEEEEE19addBatchSinglePlaceEmPcPPKNS_7IColumnEPNS_5ArenaE ``` -Applying the function to the whole stack trace: +将函数应用于整个堆栈跟踪: ``` sql SELECT @@ -183,7 +186,7 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `addressToSymbols` function. The result of this processing you see in the `trace_symbols` column of output. +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `addressToSymbols` 功能。 这种处理的结果,你在看 `trace_symbols` 列的输出。 ``` text Row 1: @@ -211,34 +214,34 @@ clone ## demangle {#demangle} -Converts a symbol that you can get using the [addressToSymbol](#addresstosymbol) function to the C++ function name. +转换一个符号,您可以使用 [addressToSymbol](#addresstosymbol) 函数到C++函数名。 -**Syntax** +**语法** ``` sql demangle(symbol) ``` -**Parameters** +**参数** -- `symbol` ([String](../../data_types/string.md)) — Symbol from an object file. +- `symbol` ([字符串](../../sql_reference/data_types/string.md)) — Symbol from an object file. -**Returned value** +**返回值** -- Name of the C++ function. -- Empty string if a symbol is not valid. +- C++函数的名称。 +- 如果符号无效,则为空字符串。 -Type: [String](../../data_types/string.md). +类型: [字符串](../../sql_reference/data_types/string.md). -**Example** +**示例** -Enabling introspection functions: +启用内省功能: ``` sql SET allow_introspection_functions=1 ``` -Selecting the first string from the `trace_log` system table: +从中选择第一个字符串 `trace_log` 系统表: ``` sql SELECT * FROM system.trace_log LIMIT 1 \G @@ -256,9 +259,9 @@ query_id: 724028bf-f550-45aa-910d-2af6212b94ac trace: [94138803686098,94138815010911,94138815096522,94138815101224,94138815102091,94138814222988,94138806823642,94138814457211,94138806823642,94138814457211,94138806823642,94138806795179,94138806796144,94138753770094,94138753771646,94138753760572,94138852407232,140399185266395,140399178045583] ``` -The `trace` field contains the stack trace at the moment of sampling. +该 `trace` 字段包含采样时的堆栈跟踪。 -Getting a function name for a single address: +获取单个地址的函数名称: ``` sql SELECT demangle(addressToSymbol(94138803686098)) \G @@ -270,7 +273,7 @@ Row 1: demangle(addressToSymbol(94138803686098)): DB::IAggregateFunctionHelper > >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const ``` -Applying the function to the whole stack trace: +将函数应用于整个堆栈跟踪: ``` sql SELECT @@ -280,7 +283,7 @@ LIMIT 1 \G ``` -The [arrayMap](higher_order_functions.md#higher_order_functions-array-map) function allows to process each individual element of the `trace` array by the `demangle` function. The result of this processing you see in the `trace_functions` column of output. +该 [arrayMap](higher_order_functions.md#higher_order_functions-array-map) 功能允许处理的每个单独的元素 `trace` 阵列由 `demangle` 功能。 这种处理的结果,你在看 `trace_functions` 列的输出。 ``` text Row 1: diff --git a/docs/zh/query_language/functions/ip_address_functions.md b/docs/zh/sql_reference/functions/ip_address_functions.md similarity index 92% rename from docs/zh/query_language/functions/ip_address_functions.md rename to docs/zh/sql_reference/functions/ip_address_functions.md index 0e012f90f84..17f4c4a5991 100644 --- a/docs/zh/query_language/functions/ip_address_functions.md +++ b/docs/zh/sql_reference/functions/ip_address_functions.md @@ -1,3 +1,4 @@ + # IP函数 {#iphan-shu} ## IPv4NumToString(num) {#ipv4numtostringnum} @@ -37,7 +38,7 @@ LIMIT 10 │ 83.149.48.xxx │ 17406 │ └────────────────┴───────┘ -由于使用’xxx’是不规范的,因此将来可能会更改。我们建议您不要依赖此格式。 +由于使用'xxx'是不规范的,因此将来可能会更改。我们建议您不要依赖此格式。 ### IPv6NumToString(x) {#ipv6numtostringx} @@ -117,7 +118,7 @@ SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr │ ::ffff:192.168.0.1 │ └────────────────────┘ -## cutIPv6(x, bitsToCutForIPv6, bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} +## cutIPv6(x,bitsToCutForIPv6,bitsToCutForIPv4) {#cutipv6x-bitstocutforipv6-bitstocutforipv4} 接受一个FixedString(16)类型的IPv6地址,返回一个String,这个String中包含了删除指定位之后的地址的文本格式。例如: @@ -134,7 +135,7 @@ SELECT │ 2001:db8:ac10:fe01:feed:babe:cafe:0 │ ::ffff:192.168.0.0 │ └─────────────────────────────────────┴─────────────────────┘ -## IPv4CIDRToRange(ipv4, cidr), {#ipv4cidrtorangeipv4-cidr} +## ツ古カツ益ツ催ツ団ツ法ツ人), {#ipv4cidrtorangeipv4-cidr} 接受一个IPv4地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 @@ -146,7 +147,7 @@ SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) │ ('192.168.0.0','192.168.255.255') │ └────────────────────────────────────────────┘ -## IPv6CIDRToRange(ipv6, cidr), {#ipv6cidrtorangeipv6-cidr} +## ツ暗ェツ氾环催ツ団ツ法ツ人), {#ipv6cidrtorangeipv6-cidr} 接受一个IPv6地址以及一个UInt8类型的CIDR。返回包含子网最低范围以及最高范围的元组。 @@ -158,9 +159,9 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); │ ('2001:db8::','2001:db8:ffff:ffff:ffff:ffff:ffff:ffff') │ └────────────────────────────────────────────────────────────────────────┘ -## toIPv4(string) {#toipv4string} +## toIPv4(字符串) {#toipv4string} -`IPv4StringToNum()`的别名,它采用字符串形式的IPv4地址并返回[IPv4](../../data_types/domains/ipv4.md)类型的值,该二进制值等于`IPv4StringToNum()`返回的值。 +`IPv4StringToNum()`的别名,它采用字符串形式的IPv4地址并返回[IPv4](../../sql_reference/functions/ip_address_functions.md)类型的值,该二进制值等于`IPv4StringToNum()`返回的值。 ``` sql WITH @@ -186,9 +187,9 @@ SELECT │ ABE1822D │ ABE1822D │ └───────────────────────────────────┴──────────────────────────┘ -## toIPv6(string) {#toipv6string} +## toIPv6(字符串) {#toipv6string} -`IPv6StringToNum()`的别名,它采用字符串形式的IPv6地址并返回[IPv6](../../data_types/domains/ipv6.md)类型的值,该二进制值等于`IPv6StringToNum()`返回的值。 +`IPv6StringToNum()`的别名,它采用字符串形式的IPv6地址并返回[IPv6](../../sql_reference/functions/ip_address_functions.md)类型的值,该二进制值等于`IPv6StringToNum()`返回的值。 ``` sql WITH diff --git a/docs/zh/query_language/functions/json_functions.md b/docs/zh/sql_reference/functions/json_functions.md similarity index 92% rename from docs/zh/query_language/functions/json_functions.md rename to docs/zh/sql_reference/functions/json_functions.md index 5203ae91291..ca76edde09c 100644 --- a/docs/zh/query_language/functions/json_functions.md +++ b/docs/zh/sql_reference/functions/json_functions.md @@ -1,3 +1,4 @@ + # JSON函数 {#jsonhan-shu} 在Yandex.Metrica中,用户使用JSON作为访问参数。为了处理这些JSON,实现了一些函数。(尽管在大多数情况下,JSON是预先进行额外处理的,并将结果值放在单独的列中。)所有的这些函数都进行了尽可能的假设。以使函数能够尽快的完成工作。 @@ -9,27 +10,27 @@ 3. 函数可以随意的在多层嵌套结构下查找字段。如果存在多个匹配字段,则返回第一个匹配字段。 4. JSON除字符串文本外不存在空格字符。 -## visitParamHas(params, name) {#visitparamhasparams-name} +## ツ环板(ョツ嘉ッツ偲青visャツ静ャツ青サツ催ャツ渉) {#visitparamhasparams-name} 检查是否存在«name»名称的字段 -## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name} +## 访问paramextractuint(参数,名称) {#visitparamextractuintparams-name} 将名为«name»的字段的值解析成UInt64。如果这是一个字符串字段,函数将尝试从字符串的开头解析一个数字。如果该字段不存在,或无法从它中解析到数字,则返回0。 -## visitParamExtractInt(params, name) {#visitparamextractintparams-name} +## visitParamExtractInt(参数,名称) {#visitparamextractintparams-name} 与visitParamExtractUInt相同,但返回Int64。 -## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name} +## 访问paramextractfloat(参数,名称) {#visitparamextractfloatparams-name} 与visitParamExtractUInt相同,但返回Float64。 -## visitParamExtractBool(params, name) {#visitparamextractboolparams-name} +## ツ环板(ョツ嘉ッツ偲青妥-ツ姪(不ツ督ョツ産) {#visitparamextractboolparams-name} 解析true/false值。其结果是UInt8类型的。 -## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name} +## 掳胫((禄脢鹿脷露胫鲁隆鹿((酶-11-16""\[脪陆(,,,) {#visitparamextractrawparams-name} 返回字段的值,包含空格符。 @@ -38,7 +39,7 @@ visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"' visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}' -## visitParamExtractString(params, name) {#visitparamextractstringparams-name} +## visitParamExtractString(参数,名称) {#visitparamextractstringparams-name} 使用双引号解析字符串。这个值没有进行转义。如果转义失败,它将返回一个空白字符串。 diff --git a/docs/zh/query_language/functions/logical_functions.md b/docs/zh/sql_reference/functions/logical_functions.md similarity index 70% rename from docs/zh/query_language/functions/logical_functions.md rename to docs/zh/sql_reference/functions/logical_functions.md index 2f2a61f57a6..18a383edbdb 100644 --- a/docs/zh/query_language/functions/logical_functions.md +++ b/docs/zh/sql_reference/functions/logical_functions.md @@ -1,15 +1,16 @@ + # 逻辑函数 {#luo-ji-han-shu} 逻辑函数可以接受任何数字类型的参数,并返回UInt8类型的0或1。 当向函数传递零时,函数将判定为«false»,否则,任何其他非零的值都将被判定为«true»。 -## and, AND operator {#and-and-operator} +## 和,和运营商 {#and-and-operator} -## or, OR operator {#or-or-operator} +## 或,或运营商 {#or-or-operator} -## not, NOT operator {#not-not-operator} +## 不是,不是运营商 {#not-not-operator} -## xor {#xor} +## 异或 {#xor} [来源文章](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/zh/sql_reference/functions/machine_learning_functions.md b/docs/zh/sql_reference/functions/machine_learning_functions.md new file mode 100644 index 00000000000..0bdea52c59f --- /dev/null +++ b/docs/zh/sql_reference/functions/machine_learning_functions.md @@ -0,0 +1,16 @@ + +# 机器学习函数 {#ji-qi-xue-xi-han-shu} + +## evalMLMethod(预测) {#machine_learning_methods-evalmlmethod} + +使用拟合回归模型的预测请使用`evalMLMethod`函数。 请参阅`linearRegression`中的链接。 + +## 随机线性回归 {#stochastic-linear-regression} + +`stochasticLinearRegression`聚合函数使用线性模型和MSE损失函数实现随机梯度下降法。 使用`evalMLMethod`来预测新数据。 +请参阅示例和注释[此处](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlinearregression)。 + +## 随机逻辑回归 {#stochastic-logistic-regression} + +`stochasticLogisticRegression`聚合函数实现了二元分类问题的随机梯度下降法。 使用`evalMLMethod`来预测新数据。 +请参阅示例和注释[此处](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlogisticregression)。 diff --git a/docs/zh/query_language/functions/math_functions.md b/docs/zh/sql_reference/functions/math_functions.md similarity index 88% rename from docs/zh/query_language/functions/math_functions.md rename to docs/zh/sql_reference/functions/math_functions.md index 38b3115e396..fef88389b86 100644 --- a/docs/zh/query_language/functions/math_functions.md +++ b/docs/zh/sql_reference/functions/math_functions.md @@ -1,3 +1,4 @@ + # 数学函数 {#shu-xue-han-shu} 以下所有的函数都返回一个Float64类型的数值。返回结果总是以尽可能最大精度返回,但还是可能与机器中可表示最接近该值的数字不同。 @@ -14,7 +15,7 @@ 接受一个数值类型的参数并返回它的指数。 -## log(x), ln(x) {#logx-lnx} +## log(x),ln(x) {#logx-lnx} 接受一个数值类型的参数并返回它的自然对数。 @@ -44,7 +45,7 @@ ## erf(x) {#erfx} -如果’x’是非负数,那么erf(x / σ√2)是具有正态分布且标准偏差为«σ»的随机变量的值与预期值之间的距离大于«x»。 +如果'x'是非负数,那么erf(x / σ√2)是具有正态分布且标准偏差为«σ»的随机变量的值与预期值之间的距离大于«x»。 示例 (三西格玛准则): @@ -76,7 +77,7 @@ SELECT erf(3 / sqrt(2)) 返回x的三角余弦值。 -## tan(x) {#tanx} +## 谭(x) {#tanx} 返回x的三角正切值。 @@ -88,11 +89,11 @@ SELECT erf(3 / sqrt(2)) 返回x的反三角余弦值。 -## atan(x) {#atanx} +## 阿坦(x) {#atanx} 返回x的反三角正切值。 -## pow(x, y), power(x, y) {#powx-y-powerx-y} +## pow(x,y),power(x,y) {#powx-y-powerx-y} 接受x和y两个参数。返回x的y次方。 diff --git a/docs/zh/query_language/functions/other_functions.md b/docs/zh/sql_reference/functions/other_functions.md similarity index 85% rename from docs/zh/query_language/functions/other_functions.md rename to docs/zh/sql_reference/functions/other_functions.md index 8383c57150c..e0c7e47be58 100644 --- a/docs/zh/query_language/functions/other_functions.md +++ b/docs/zh/sql_reference/functions/other_functions.md @@ -1,6 +1,7 @@ + # 其他函数 {#qi-ta-han-shu} -## hostName() {#hostname} +## 主机名() {#hostname} 返回一个字符串,其中包含执行此函数的主机的名称。 对于分布式处理,如果在远程服务器上执行此函数,则将返回远程服务器主机的名称。 @@ -12,7 +13,7 @@ **参数** -- `expr` — 任何一个返回[String](../../data_types/string.md)结果的表达式。[String](../../data_types/string.md) +- `expr` — 任何一个返回[字符串](../../sql_reference/functions/other_functions.md)结果的表达式。[字符串](../../sql_reference/functions/other_functions.md) **返回值** @@ -60,10 +61,10 @@ SELECT 'some-file-name' AS a, basename(a) 以文本格式(以制表符分隔)向控制台输出值时,计算近似宽度。 系统使用此函数实现Pretty格式。 -Calculates the approximate width when outputting values to the console in text format (tab-separated). -This function is used by the system for implementing Pretty formats. +以文本格式(制表符分隔)将值输出到控制台时,计算近似宽度。 +这个函数被系统用于实现漂亮的格式。 -`NULL` is represented as a string corresponding to `NULL` in `Pretty` formats. +`NULL` 表示为对应于 `NULL` 在 `Pretty` 格式。 SELECT visibleWidth(NULL) @@ -77,12 +78,12 @@ This function is used by the system for implementing Pretty formats. 如果将`NULL`作为参数传递给函数,那么它返回`Nullable(Nothing)`类型,它对应于ClickHouse中的内部`NULL`。 -## blockSize() {#function-blocksize} +## 块大小() {#function-blocksize} 获取Block的大小。 在ClickHouse中,查询始终工作在Block(包含列的部分的集合)上。此函数允许您获取调用其的块的大小。 -## materialize(x) {#materializex} +## 实现(x) {#materializex} 将一个常量列变为一个非常量列。 在ClickHouse中,非常量列和常量列在内存中的表示方式不同。尽管函数对于常量列和非常量总是返回相同的结果,但它们的工作方式可能完全不同(执行不同的代码)。此函数用于调试这种行为。 @@ -92,15 +93,15 @@ This function is used by the system for implementing Pretty formats. 接受任何参数,包括`NULL`。始终返回0。 但是,函数的参数总是被计算的。该函数可以用于基准测试。 -## sleep(seconds) {#sleepseconds} +## 睡眠(秒) {#sleepseconds} -在每个Block上休眠’seconds’秒。可以是整数或浮点数。 +在每个Block上休眠'seconds'秒。可以是整数或浮点数。 -## sleepEachRow(seconds) {#sleepeachrowseconds} +## sleepEachRow(秒) {#sleepeachrowseconds} -在每行上休眠’seconds’秒。可以是整数或浮点数。 +在每行上休眠'seconds'秒。可以是整数或浮点数。 -## currentDatabase() {#currentdatabase} +## 当前数据库() {#currentdatabase} 返回当前数据库的名称。 当您需要在CREATE TABLE中的表引擎参数中指定数据库,您可以使用此函数。 @@ -119,11 +120,11 @@ This function is used by the system for implementing Pretty formats. ## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’) {#hascolumnintablehostname-username-password-database-table-column} -Accepts constant strings: database name, table name, and column name. Returns a UInt8 constant expression equal to 1 if there is a column, otherwise 0. If the hostname parameter is set, the test will run on a remote server. -The function throws an exception if the table does not exist. -For elements in a nested data structure, the function checks for the existence of a column. For the nested data structure itself, the function returns 0. +接受常量字符串:数据库名称、表名称和列名称。 如果存在列,则返回等于1的UInt8常量表达式,否则返回0。 如果设置了hostname参数,则测试将在远程服务器上运行。 +如果表不存在,该函数将引发异常。 +对于嵌套数据结构中的元素,该函数检查是否存在列。 对于嵌套数据结构本身,函数返回0。 -## bar {#function-bar} +## 酒吧 {#function-bar} 使用unicode构建图表。 @@ -176,7 +177,7 @@ ORDER BY h ASC │ 23 │ 400397 │ █████████████▎ │ └────┴────────┴────────────────────┘ -## transform {#transform} +## 变换 {#transform} 根据定义,将某些元素转换为其他元素。 此函数有两种使用方式: @@ -187,9 +188,9 @@ ORDER BY h ASC `array_from` – 用于转换的常量数组。 -`array_to` – 将‘from’中的值转换为的常量数组。 +`array_to` – 将'from'中的值转换为的常量数组。 -`default` – 如果‘x’不等于‘from’中的任何值,则默认转换的值。 +`default` – 如果'x'不等于'from'中的任何值,则默认转换的值。 `array_from` 和 `array_to` – 拥有相同大小的数组。 @@ -201,7 +202,7 @@ ORDER BY h ASC 对于相同的字母(T或U),如果数值类型,那么它们不可不完全匹配的,只需要具备共同的类型即可。 例如,第一个参数是Int64类型,第二个参数是Array(UInt16)类型。 -如果’x’值等于’array\_from’数组中的一个元素,它将从’array\_to’数组返回一个对应的元素(下标相同)。否则,它返回’default’。如果’array\_from’匹配到了多个元素,则返回第一个匹配的元素。 +如果'x'值等于'array\_from'数组中的一个元素,它将从'array\_to'数组返回一个对应的元素(下标相同)。否则,它返回'default'。如果'array\_from'匹配到了多个元素,则返回第一个匹配的元素。 示例: @@ -223,8 +224,8 @@ ORDER BY c DESC 1. `transform(x, array_from, array_to)` -与第一种不同在于省略了’default’参数。 -如果’x’值等于’array\_from’数组中的一个元素,它将从’array\_to’数组返回相应的元素(下标相同)。 否则,它返回’x’。 +与第一种不同在于省略了'default'参数。 +如果'x'值等于'array\_from'数组中的一个元素,它将从'array\_to'数组返回相应的元素(下标相同)。 否则,它返回'x'。 类型约束: @@ -273,23 +274,23 @@ SELECT │ 192851925 │ 183.92 MiB │ └────────────────┴────────────┘ -## least(a, b) {#leasta-b} +## 至少(a,b) {#leasta-b} 返回a和b中的最小值。 -## greatest(a, b) {#greatesta-b} +## 最伟大(a,b) {#greatesta-b} 返回a和b的最大值。 -## uptime() {#uptime} +## 碌莽禄time拢time() {#uptime} 返回服务正常运行的秒数。 -## version() {#version} +## 版本() {#version} 以字符串形式返回服务器的版本。 -## timezone() {#timezone} +## 时区() {#timezone} 返回服务器的时区。 @@ -305,7 +306,7 @@ SELECT 返回行所在结果集中的序列号。此函数仅考虑受影响的Block。 -## runningDifference(x) {#other_functions-runningdifference} +## 运行差异(x) {#other_functions-runningdifference} 计算数据块中相邻行的值之间的差异。 对于第一行返回0,并为每个后续行返回与前一行的差异。 @@ -340,9 +341,9 @@ FROM │ 1110 │ 2016-11-24 00:00:10 │ 1 │ └─────────┴─────────────────────┴───────┘ -## runningDifferenceStartingWithFirstValue {#runningdifferencestartingwithfirstvalue} +## 运行差异启动与第一值 {#runningdifferencestartingwithfirstvalue} -与[runningDifference](./other_functions.md#other_functions-runningdifference)相同,区别在于第一行返回第一行的值,后续每个后续行返回与上一行的差值。 +与[运行差异](./other_functions.md#other_functions-runningdifference)相同,区别在于第一行返回第一行的值,后续每个后续行返回与上一行的差值。 ## MACNumToString(num) {#macnumtostringnum} @@ -358,7 +359,7 @@ FROM ## getSizeOfEnumType {#getsizeofenumtype} -返回[Enum](../../data_types/enum.md)中的枚举数量。 +返回[枚举](../../sql_reference/functions/other_functions.md)中的枚举数量。 getSizeOfEnumType(value) @@ -453,7 +454,7 @@ FROM - 数值类型返回`0`。 - 字符串类型返回空的字符串。 -- [Nullable](../../data_types/nullable.md)类型返回`ᴺᵁᴸᴸ`。 +- [可为空](../../sql_reference/functions/other_functions.md)类型返回`ᴺᵁᴸᴸ`。 **示例** @@ -477,7 +478,7 @@ FROM 1 rows in set. Elapsed: 0.002 sec. -## replicate {#replicate} +## 复制 {#replicate} 使用单个值填充一个数组。 @@ -502,26 +503,26 @@ FROM │ [1,1,1] │ └───────────────────────────────┘ -## filesystemAvailable {#filesystemavailable} +## 文件系统可用 {#filesystemavailable} 返回磁盘的剩余空间信息(以字节为单位)。使用配置文件中的path配置评估此信息。 -## filesystemCapacity {#filesystemcapacity} +## 文件系统容量 {#filesystemcapacity} 返回磁盘的容量信息,以字节为单位。使用配置文件中的path配置评估此信息。 -## finalizeAggregation {#function-finalizeaggregation} +## 最后聚会 {#function-finalizeaggregation} 获取聚合函数的状态。返回聚合结果(最终状态)。 -## runningAccumulate {#function-runningaccumulate} +## 跑累积 {#function-runningaccumulate} 获取聚合函数的状态并返回其具体的值。这是从第一行到当前行的所有行累计的结果。 例如,获取聚合函数的状态(示例runningAccumulate(uniqState(UserID))),对于数据块的每一行,返回所有先前行和当前行的状态合并后的聚合函数的结果。 因此,函数的结果取决于分区中数据块的顺序以及数据块中行的顺序。 -## joinGet(‘join\_storage\_table\_name’, ‘get\_column’, join\_key) {#joingetjoin-storage-table-name-get-column-join-key} +## joinGet(‘join\_storage\_table\_name’, ‘get\_column’,join\_key) {#joingetjoin-storage-table-name-get-column-join-key} 使用指定的连接键从Join类型引擎的表中获取数据。 diff --git a/docs/zh/query_language/functions/random_functions.md b/docs/zh/sql_reference/functions/random_functions.md similarity index 98% rename from docs/zh/query_language/functions/random_functions.md rename to docs/zh/sql_reference/functions/random_functions.md index 31283cce08a..1db2f4a8438 100644 --- a/docs/zh/query_language/functions/random_functions.md +++ b/docs/zh/sql_reference/functions/random_functions.md @@ -1,3 +1,4 @@ + # 随机函数 {#sui-ji-han-shu} 随机函数使用非加密方式生成伪随机数字。 @@ -6,7 +7,7 @@ 您可以向它传递任何类型的参数,但传递的参数将不会使用在任何随机数生成过程中。 此参数的唯一目的是防止公共子表达式消除,以便在相同的查询中使用相同的随机函数生成不同的随机数。 -## rand {#rand} +## 兰德 {#rand} 返回一个UInt32类型的随机数字,所有UInt32类型的数字被生成的概率均相等。此函数线性同于的方式生成随机数。 diff --git a/docs/zh/query_language/functions/rounding_functions.md b/docs/zh/sql_reference/functions/rounding_functions.md similarity index 79% rename from docs/zh/query_language/functions/rounding_functions.md rename to docs/zh/sql_reference/functions/rounding_functions.md index fb421be3b28..773f969090d 100644 --- a/docs/zh/query_language/functions/rounding_functions.md +++ b/docs/zh/sql_reference/functions/rounding_functions.md @@ -1,22 +1,23 @@ + # 取整函数 {#qu-zheng-han-shu} -## floor(x\[, N\]) {#floorx-n} +## 楼(x\[,N\]) {#floorx-n} 返回小于或等于x的最大舍入数。该函数使用参数乘1/10N,如果1/10N不精确,则选择最接近的精确的适当数据类型的数。 -‘N’是一个整数常量,可选参数。默认为0,这意味着不对其进行舍入。 -‘N’可以是负数。 +'N'是一个整数常量,可选参数。默认为0,这意味着不对其进行舍入。 +'N'可以是负数。 示例: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` `x`是任何数字类型。结果与其为相同类型。 -对于整数参数,使用负‘N’值进行舍入是有意义的(对于非负«N»,该函数不执行任何操作)。 +对于整数参数,使用负'N'值进行舍入是有意义的(对于非负«N»,该函数不执行任何操作)。 如果取整导致溢出(例如,floor(-128,-1)),则返回特定于实现的结果。 -## ceil(x\[, N\]), ceiling(x\[, N\]) {#ceilx-n-ceilingx-n} +## ceil(x\[,N\]),天花板(x\[,N\]) {#ceilx-n-ceilingx-n} -返回大于或等于’x’的最小舍入数。在其他方面,它与’floor’功能相同(见上文)。 +返回大于或等于'x'的最小舍入数。在其他方面,它与'floor'功能相同(见上文)。 -## round(x\[, N\]) {#rounding_functions-round} +## 圆形(x\[,N\]) {#rounding_functions-round} 将值取整到指定的小数位数。 @@ -26,7 +27,7 @@ **参数:** -- `expression` — 要进行取整的数字。可以是任何返回数字[类型](../../data_types/index.md#data_types)的[表达式](../syntax.md#syntax-expressions)。 +- `expression` — 要进行取整的数字。可以是任何返回数字[类型](../../sql_reference/functions/rounding_functions.md#data_types)的[表达式](../syntax.md#syntax-expressions)。 - `decimal-places` — 整数类型。 - 如果`decimal-places > 0`,则该函数将值舍入小数点右侧。 - 如果`decimal-places < 0`,则该函数将小数点左侧的值四舍五入。 @@ -71,15 +72,15 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3 接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入到最接近的(整个非负)2的x次幂。 -## roundDuration(num) {#rounddurationnum} +## 圆形饱和度(num) {#rounddurationnum} 接受一个数字。如果数字小于1,则返回0。否则,它将数字向下舍入为集合中的数字:1,10,30,60,120,180,240,300,600,1200,1800,3600,7200,18000,36000。此函数用于Yandex.Metrica报表中计算会话的持续时长。 -## roundAge(num) {#roundagenum} +## 圆数(num) {#roundagenum} 接受一个数字。如果数字小于18,则返回0。否则,它将数字向下舍入为集合中的数字:18,25,35,45,55。此函数用于Yandex.Metrica报表中用户年龄的计算。 -## roundDown(num, arr) {#rounddownnum-arr} +## roundDown(num,arr) {#rounddownnum-arr} 接受一个数字,将其向下舍入到指定数组中的元素。如果该值小于数组中的最低边界,则返回最低边界。 diff --git a/docs/zh/query_language/functions/splitting_merging_functions.md b/docs/zh/sql_reference/functions/splitting_merging_functions.md similarity index 63% rename from docs/zh/query_language/functions/splitting_merging_functions.md rename to docs/zh/sql_reference/functions/splitting_merging_functions.md index 7477e89441e..d217ea19f0d 100644 --- a/docs/zh/query_language/functions/splitting_merging_functions.md +++ b/docs/zh/sql_reference/functions/splitting_merging_functions.md @@ -1,17 +1,18 @@ + # 字符串拆分合并函数 {#zi-fu-chuan-chai-fen-he-bing-han-shu} -## splitByChar(separator, s) {#splitbycharseparator-s} +## splitByChar(分隔符,s) {#splitbycharseparator-s} -将字符串以‘separator’拆分成多个子串。‘separator’必须为仅包含一个字符的字符串常量。 +将字符串以'separator'拆分成多个子串。'separator'必须为仅包含一个字符的字符串常量。 返回拆分后的子串的数组。 如果分隔符出现在字符串的开头或结尾,或者如果有多个连续的分隔符,则将在对应位置填充空的子串。 -## splitByString(separator, s) {#splitbystringseparator-s} +## splitByString(分隔符,s) {#splitbystringseparator-s} 与上面相同,但它使用多个字符的字符串作为分隔符。 该字符串必须为非空。 -## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator} +## arrayStringConcat(arr\[,分隔符\]) {#arraystringconcatarr-separator} -使用separator将数组中列出的字符串拼接起来。‘separator’是一个可选参数:一个常量字符串,默认情况下设置为空字符串。 +使用separator将数组中列出的字符串拼接起来。'separator'是一个可选参数:一个常量字符串,默认情况下设置为空字符串。 返回拼接后的字符串。 ## alphaTokens(s) {#alphatokenss} diff --git a/docs/zh/query_language/functions/string_functions.md b/docs/zh/sql_reference/functions/string_functions.md similarity index 78% rename from docs/zh/query_language/functions/string_functions.md rename to docs/zh/sql_reference/functions/string_functions.md index a2b5355ae8c..c04305b9d67 100644 --- a/docs/zh/query_language/functions/string_functions.md +++ b/docs/zh/sql_reference/functions/string_functions.md @@ -1,6 +1,7 @@ + # 字符串函数 {#zi-fu-chuan-han-shu} -## empty {#string-functions-empty} +## 空 {#string-functions-empty} 对于空字符串返回1,对于非空字符串返回0。 结果类型是UInt8。 @@ -13,32 +14,32 @@ 结果类型是UInt8。 该函数也适用于数组。 -## length {#length} +## 长度 {#length} 返回字符串的字节长度。 结果类型是UInt64。 该函数也适用于数组。 -## lengthUTF8 {#lengthutf8} +## 长度8 {#lengthutf8} 假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 结果类型是UInt64。 -## char\_length, CHAR\_LENGTH {#char-length-char-length} +## char\_length,CHAR\_LENGTH {#char-length-char-length} 假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 结果类型是UInt64。 -## character\_length, CHARACTER\_LENGTH {#character-length-character-length} +## 字符长度,字符长度 {#character-length-character-length} 假定字符串以UTF-8编码组成的文本,返回此字符串的Unicode字符长度。如果传入的字符串不是UTF-8编码,则函数可能返回一个预期外的值(不会抛出异常)。 结果类型是UInt64。 -## lower, lcase {#lower-lcase} +## 低一点 {#lower-lcase} 将字符串中的ASCII转换为小写。 -## upper, ucase {#upper-ucase} +## 上,ucase {#upper-ucase} 将字符串中的ASCII转换为大写。 @@ -68,7 +69,7 @@ 参数: -- input\_string — 任何一个[String](../../data_types/string.md)类型的对象。 +- input\_string — 任何一个[字符串](../../sql_reference/functions/string_functions.md)类型的对象。 返回值: 有效的UTF-8字符串。 @@ -84,7 +85,7 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') └───────────────────────┘ ``` -## reverse {#reverse} +## 反向 {#reverse} 反转字符串。 @@ -118,25 +119,25 @@ SELECT format('{} {}', 'Hello', 'World') 与[concat](./string_functions.md#concat-s1-s2)相同,区别在于,你需要保证concat(s1, s2, s3) -\> s4是单射的,它将用于GROUP BY的优化。 -## substring(s, offset, length), mid(s, offset, length), substr(s, offset, length) {#substrings-offset-length-mids-offset-length-substrs-offset-length} +## 子串(s,offset,length),mid(s,offset,length),substr(s,offset,length) {#substrings-offset-length-mids-offset-length-substrs-offset-length} -以字节为单位截取指定位置字符串,返回以‘offset’位置为开头,长度为‘length’的子串。‘offset’从1开始(与标准SQL相同)。‘offset’和‘length’参数必须是常量。 +以字节为单位截取指定位置字符串,返回以'offset'位置为开头,长度为'length'的子串。'offset'从1开始(与标准SQL相同)。'offset'和'length'参数必须是常量。 -## substringUTF8(s, offset, length) {#substringutf8s-offset-length} +## substringf8(s,offset,length) {#substringutf8s-offset-length} -与‘substring’相同,但其操作单位为Unicode字符,函数假设字符串是以UTF-8进行编码的文本。如果不是则可能返回一个预期外的结果(不会抛出异常)。 +与'substring'相同,但其操作单位为Unicode字符,函数假设字符串是以UTF-8进行编码的文本。如果不是则可能返回一个预期外的结果(不会抛出异常)。 -## appendTrailingCharIfAbsent(s, c) {#appendtrailingcharifabsents-c} +## appendTrailingCharIfAbsent(s,c) {#appendtrailingcharifabsents-c} -如果‘s’字符串非空并且末尾不包含‘c’字符,则将‘c’字符附加到末尾。 +如果's'字符串非空并且末尾不包含'c'字符,则将'c'字符附加到末尾。 -## convertCharset(s, from, to) {#convertcharsets-from-to} +## convertCharset(s,from,to) {#convertcharsets-from-to} -返回从‘from’中的编码转换为‘to’中的编码的字符串‘s’。 +返回从'from'中的编码转换为'to'中的编码的字符串's'。 ## base64Encode(s) {#base64encodes} -将字符串‘s’编码成base64 +将字符串's'编码成base64 ## base64Decode(s) {#base64decodes} @@ -146,11 +147,11 @@ SELECT format('{} {}', 'Hello', 'World') 使用base64将字符串解码成原始字符串。但如果出现错误,将返回空字符串。 -## endsWith(s, suffix) {#endswiths-suffix} +## endsWith(s,后缀) {#endswiths-suffix} 返回是否以指定的后缀结尾。如果字符串以指定的后缀结束,则返回1,否则返回0。 -## startsWith(s, prefix) {#startswiths-prefix} +## 开始使用(s,前缀) {#startswiths-prefix} 返回是否以指定的前缀开头。如果字符串以指定的前缀开头,则返回1,否则返回0。 diff --git a/docs/zh/query_language/functions/string_replace_functions.md b/docs/zh/sql_reference/functions/string_replace_functions.md similarity index 75% rename from docs/zh/query_language/functions/string_replace_functions.md rename to docs/zh/sql_reference/functions/string_replace_functions.md index e70dcade3a0..04b110a2cef 100644 --- a/docs/zh/query_language/functions/string_replace_functions.md +++ b/docs/zh/sql_reference/functions/string_replace_functions.md @@ -1,20 +1,21 @@ + # 字符串替换函数 {#zi-fu-chuan-ti-huan-han-shu} -## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} +## replaceOne(大海捞针,模式,更换) {#replaceonehaystack-pattern-replacement} -用‘replacement’子串替换‘haystack’中与‘pattern’子串第一个匹配的匹配项(如果存在)。 -‘pattern’和‘replacement’必须是常量。 +用'replacement'子串替换'haystack'中与'pattern'子串第一个匹配的匹配项(如果存在)。 +'pattern'和'replacement'必须是常量。 -## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} +## replaceAll(大海捞针,模式,替换),替换(大海捞针,模式,替换) {#replaceallhaystack-pattern-replacement-replacehaystack-pattern-replacement} -用‘replacement’子串替换‘haystack’中出现的所有‘pattern’子串。 +用'replacement'子串替换'haystack'中出现的所有'pattern'子串。 -## replaceRegexpOne(haystack, pattern, replacement) {#replaceregexponehaystack-pattern-replacement} +## replaceRegexpOne(大海捞针,模式,更换) {#replaceregexponehaystack-pattern-replacement} -使用‘pattern’正则表达式替换。 ‘pattern’可以是任意一个有效的re2正则表达式。 +使用'pattern'正则表达式替换。 ‘pattern’可以是任意一个有效的re2正则表达式。 如果存在与正则表达式匹配的匹配项,仅替换第一个匹配项。 同时‘replacement’可以指定为正则表达式中的捕获组。可以包含`\0-\9`。 -在这种情况下,函数将使用正则表达式的整个匹配项替换‘\\0’。使用其他与之对应的子模式替换对应的‘\\1-\\9’。要在模版中使用‘’字符,请使用‘’将其转义。 +在这种情况下,函数将使用正则表达式的整个匹配项替换‘\\0’。使用其他与之对应的子模式替换对应的'\\1-\\9'。要在模版中使用''字符,请使用''将其转义。 另外还请记住,字符串文字需要额外的转义。 示例1.将日期转换为美国格式: @@ -46,7 +47,7 @@ SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0') │ Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World!Hello, World! │ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -## replaceRegexpAll(haystack, pattern, replacement) {#replaceregexpallhaystack-pattern-replacement} +## replaceRegexpAll(大海捞针,模式,替换) {#replaceregexpallhaystack-pattern-replacement} 与replaceRegexpOne相同,但会替换所有出现的匹配项。例如: @@ -72,7 +73,7 @@ SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res ## regexpQuoteMeta(s) {#regexpquotemetas} 该函数用于在字符串中的某些预定义字符之前添加反斜杠。 -预定义字符:‘0’,‘\\’,‘\|’,‘(’,‘)’,‘^’,‘$’,‘。’,‘\[’,’\]’,‘?’,‘\*’,‘+’,‘{’,‘:’,’ - ’。 +预定义字符:‘0’,‘\\’,‘\|’,‘(’,‘)’,‘^’,‘$’,‘。’,‘\[’,'\]',‘?’,‘\*’,‘+’,‘{’,‘:’,' - '。 这个实现与re2 :: RE2 :: QuoteMeta略有不同。它以\\0而不是00转义零字节,它只转义所需的字符。 有关详细信息,请参阅链接:\[RE2\](https://github.com/google/re2/blob/master/re2/re2.cc\#L473) diff --git a/docs/zh/query_language/functions/string_search_functions.md b/docs/zh/sql_reference/functions/string_search_functions.md similarity index 63% rename from docs/zh/query_language/functions/string_search_functions.md rename to docs/zh/sql_reference/functions/string_search_functions.md index 8a27c460966..e0f5e06a357 100644 --- a/docs/zh/query_language/functions/string_search_functions.md +++ b/docs/zh/sql_reference/functions/string_search_functions.md @@ -1,48 +1,49 @@ + # 字符串搜索函数 {#zi-fu-chuan-sou-suo-han-shu} 下列所有函数在默认的情况下区分大小写。对于不区分大小写的搜索,存在单独的变体。 -## position(haystack, needle), locate(haystack, needle) {#positionhaystack-needle-locatehaystack-needle} +## 位置(大海捞针),定位(大海捞针) {#positionhaystack-needle-locatehaystack-needle} 在字符串`haystack`中搜索子串`needle`。 返回子串的位置(以字节为单位),从1开始,如果未找到子串,则返回0。 对于不区分大小写的搜索,请使用函数`positionCaseInsensitive`。 -## positionUTF8(haystack, needle) {#positionutf8haystack-needle} +## positionUTF8(大海捞针) {#positionutf8haystack-needle} 与`position`相同,但位置以Unicode字符返回。此函数工作在UTF-8编码的文本字符集中。如非此编码的字符集,则返回一些非预期结果(他不会抛出异常)。 对于不区分大小写的搜索,请使用函数`positionCaseInsensitiveUTF8`。 -## multiSearchAllPositions(haystack, \[needle1, needle2, …, needlen\]) {#multisearchallpositionshaystack-needle1-needle2-needlen} +## 多搜索分配(干草堆,\[针1,针2, …, needlen\]) {#multisearchallpositionshaystack-needle1-needle2-needlen} -与`position`相同,但函数返回一个数组,其中包含所有匹配needlei的位置。 +与`position`相同,但函数返回一个数组,其中包含所有匹配needle的位置。 对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAllPositionsCaseInsensitive,multiSearchAllPositionsUTF8,multiSearchAllPositionsCaseInsensitiveUTF8`。 -## multiSearchFirstPosition(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstpositionhaystack-needle1-needle2-needlen} +## multiSearchFirstPosition(大海捞针,\[针1,针2, …, needlen\]) {#multisearchfirstpositionhaystack-needle1-needle2-needlen} 与`position`相同,但返回在`haystack`中与needles字符串匹配的最左偏移。 对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstPositionCaseInsensitive,multiSearchFirstPositionUTF8,multiSearchFirstPositionCaseInsensitiveUTF8`。 -## multiSearchFirstIndex(haystack, \[needle1, needle2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} +## multiSearchFirstIndex(大海捞针,\[针1,针2, …, needlen\]) {#multisearchfirstindexhaystack-needle1-needle2-needlen} -返回在字符串`haystack`中最先查找到的needlei的索引`i`(从1开始),没有找到任何匹配项则返回0。 +返回在字符串`haystack`中最先查找到的needle的索引`i`(从1开始),没有找到任何匹配项则返回0。 对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchFirstIndexCaseInsensitive,multiSearchFirstIndexUTF8,multiSearchFirstIndexCaseInsensitiveUTF8`。 -## multiSearchAny(haystack, \[needle1, needle2, …, needlen\]) {#multisearchanyhaystack-needle1-needle2-needlen} +## 多搜索(大海捞针,\[针1,针2, …, needlen\]) {#multisearchanyhaystack-needle1-needle2-needlen} -如果`haystack`中至少存在一个needlei匹配则返回1,否则返回0。 +如果`haystack`中至少存在一个needle匹配则返回1,否则返回0。 对于不区分大小写的搜索或/和UTF-8格式,使用函数`multiSearchAnyCaseInsensitive,multiSearchAnyUTF8,multiSearchAnyCaseInsensitiveUTF8`。 !!! note "注意" 在所有`multiSearch*`函数中,由于实现规范,needles的数量应小于28。 -## match(haystack, pattern) {#matchhaystack-pattern} +## 匹配(大海捞针,模式) {#matchhaystack-pattern} 检查字符串是否与`pattern`正则表达式匹配。`pattern`可以是一个任意的`re2`正则表达式。 `re2`正则表达式的[语法](https://github.com/google/re2/wiki/Syntax)比Perl正则表达式的语法存在更多限制。 @@ -53,22 +54,22 @@ 正则表达式与字符串一起使用,就像它是一组字节一样。正则表达式中不能包含空字节。 对于在字符串中搜索子字符串的模式,最好使用LIKE或«position»,因为它们更加高效。 -## multiMatchAny(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} +## multiMatchAny(大海捞针,\[模式1,模式2, …, patternn\]) {#multimatchanyhaystack-pattern1-pattern2-patternn} -与`match`相同,但如果所有正则表达式都不匹配,则返回0;如果任何模式匹配,则返回1。它使用[hyperscan](https://github.com/intel/hyperscan)库。对于在字符串中搜索子字符串的模式,最好使用«multisearchany»,因为它更高效。 +与`match`相同,但如果所有正则表达式都不匹配,则返回0;如果任何模式匹配,则返回1。它使用[超扫描](https://github.com/intel/hyperscan)库。对于在字符串中搜索子字符串的模式,最好使用«multisearchany»,因为它更高效。 !!! note "注意" 任何`haystack`字符串的长度必须小于232\字节,否则抛出异常。这种限制是因为hyperscan API而产生的。 -## multiMatchAnyIndex(haystack, \[pattern1, pattern2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} +## multiMatchAnyIndex(大海捞针,\[模式1,模式2, …, patternn\]) {#multimatchanyindexhaystack-pattern1-pattern2-patternn} 与`multiMatchAny`相同,但返回与haystack匹配的任何内容的索引位置。 -## multiFuzzyMatchAny(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAny(干草堆,距离,\[模式1,模式2, …, patternn\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn} 与`multiMatchAny`相同,但如果在haystack能够查找到任何模式匹配能够在指定的[编辑距离](https://en.wikipedia.org/wiki/Edit_distance)内进行匹配,则返回1。此功能也处于实验模式,可能非常慢。有关更多信息,请参阅[hyperscan文档](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching)。 -## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern1, pattern2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} +## multiFuzzyMatchAnyIndex(大海捞针,距离,\[模式1,模式2, …, patternn\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn} 与`multiFuzzyMatchAny`相同,但返回匹配项的匹配能容的索引位置。 @@ -78,15 +79,15 @@ !!! note "注意" 如要关闭所有hyperscan函数的使用,请设置`SET allow_hyperscan = 0;`。 -## extract(haystack, pattern) {#extracthaystack-pattern} +## 提取(大海捞针,图案) {#extracthaystack-pattern} -使用正则表达式截取字符串。如果‘haystack’与‘pattern’不匹配,则返回空字符串。如果正则表达式中不包含子模式,它将获取与整个正则表达式匹配的子串。否则,它将获取与第一个子模式匹配的子串。 +使用正则表达式截取字符串。如果'haystack'与'pattern'不匹配,则返回空字符串。如果正则表达式中不包含子模式,它将获取与整个正则表达式匹配的子串。否则,它将获取与第一个子模式匹配的子串。 -## extractAll(haystack, pattern) {#extractallhaystack-pattern} +## extractAll(大海捞针,图案) {#extractallhaystack-pattern} -使用正则表达式提取字符串的所有片段。如果‘haystack’与‘pattern’正则表达式不匹配,则返回一个空字符串。否则返回所有与正则表达式匹配的字符串数组。通常,行为与‘extract’函数相同(它采用第一个子模式,如果没有子模式,则采用整个表达式)。 +使用正则表达式提取字符串的所有片段。如果'haystack'与'pattern'正则表达式不匹配,则返回一个空字符串。否则返回所有与正则表达式匹配的字符串数组。通常,行为与'extract'函数相同(它采用第一个子模式,如果没有子模式,则采用整个表达式)。 -## like(haystack, pattern), haystack LIKE pattern operator {#likehaystack-pattern-haystack-like-pattern-operator} +## 像(干草堆,模式),干草堆像模式运算符 {#likehaystack-pattern-haystack-like-pattern-operator} 检查字符串是否与简单正则表达式匹配。 正则表达式可以包含的元符号有`%`和`_`。 @@ -98,19 +99,19 @@ 可以使用反斜杠(`\`)来对元符号进行转义。请参阅«match»函数说明中有关转义的说明。 对于像`%needle%`这样的正则表达式,改函数与`position`函数一样快。 -对于其他正则表达式,函数与‘match’函数相同。 +对于其他正则表达式,函数与'match'函数相同。 -## notLike(haystack, pattern), haystack NOT LIKE pattern operator {#notlikehaystack-pattern-haystack-not-like-pattern-operator} +## 不喜欢(干草堆,模式),干草堆不喜欢模式运算符 {#notlikehaystack-pattern-haystack-not-like-pattern-operator} -与‘like’函数返回相反的结果。 +与'like'函数返回相反的结果。 -## ngramDistance(haystack, needle) {#ngramdistancehaystack-needle} +## 大海捞针) {#ngramdistancehaystack-needle} 基于4-gram计算`haystack`和`needle`之间的距离:计算两个4-gram集合之间的对称差异,并用它们的基数和对其进行归一化。返回0到1之间的任何浮点数 – 越接近0则表示越多的字符串彼此相似。如果常量的`needle`或`haystack`超过32KB,函数将抛出异常。如果非常量的`haystack`或`needle`字符串超过32Kb,则距离始终为1。 对于不区分大小写的搜索或/和UTF-8格式,使用函数`ngramDistanceCaseInsensitive,ngramDistanceUTF8,ngramDistanceCaseInsensitiveUTF8`。 -## ngramSearch(haystack, needle) {#ngramsearchhaystack-needle} +## ツ暗ェツ氾环催ツ団ツ法ツ人) {#ngramsearchhaystack-needle} 与`ngramDistance`相同,但计算`needle`和`haystack`之间的非对称差异——`needle`的n-gram减去`needle`归一化n-gram。可用于模糊字符串搜索。 diff --git a/docs/zh/query_language/functions/type_conversion_functions.md b/docs/zh/sql_reference/functions/type_conversion_functions.md similarity index 67% rename from docs/zh/query_language/functions/type_conversion_functions.md rename to docs/zh/sql_reference/functions/type_conversion_functions.md index a9c97589c9f..56375eabc07 100644 --- a/docs/zh/query_language/functions/type_conversion_functions.md +++ b/docs/zh/sql_reference/functions/type_conversion_functions.md @@ -1,16 +1,17 @@ + # 类型转换函数 {#lei-xing-zhuan-huan-han-shu} -## toUInt8, toUInt16, toUInt32, toUInt64 {#touint8-touint16-touint32-touint64} +## toUInt8,toUInt16,toUInt32,toUInt64 {#touint8-touint16-touint32-touint64} -## toInt8, toInt16, toInt32, toInt64 {#toint8-toint16-toint32-toint64} +## toInt8,toInt16,toInt32,toInt64 {#toint8-toint16-toint32-toint64} -## toFloat32, toFloat64 {#tofloat32-tofloat64} +## toFloat32,toFloat64 {#tofloat32-tofloat64} -## toDate, toDateTime {#todate-todatetime} +## 今天,今天 {#todate-todatetime} -## toUInt8OrZero, toUInt16OrZero, toUInt32OrZero, toUInt64OrZero, toInt8OrZero, toInt16OrZero, toInt32OrZero, toInt64OrZero, toFloat32OrZero, toFloat64OrZero, toDateOrZero, toDateTimeOrZero {#touint8orzero-touint16orzero-touint32orzero-touint64orzero-toint8orzero-toint16orzero-toint32orzero-toint64orzero-tofloat32orzero-tofloat64orzero-todateorzero-todatetimeorzero} +## toUInt8OrZero,toUInt16OrZero,toUInt32OrZero,toUInt64OrZero,toInt8OrZero,toInt16OrZero,toInt32OrZero,toInt64OrZero,toFloat32OrZero,toFloat64OrZero,toDateOrZero,toDateTimeOrZero {#touint8orzero-touint16orzero-touint32orzero-touint64orzero-toint8orzero-toint16orzero-toint32orzero-toint64orzero-tofloat32orzero-tofloat64orzero-todateorzero-todatetimeorzero} -## toUInt8OrNull, toUInt16OrNull, toUInt32OrNull, toUInt64OrNull, toInt8OrNull, toInt16OrNull, toInt32OrNull, toInt64OrNull, toFloat32OrNull, toFloat64OrNull, toDateOrNull, toDateTimeOrNull {#touint8ornull-touint16ornull-touint32ornull-touint64ornull-toint8ornull-toint16ornull-toint32ornull-toint64ornull-tofloat32ornull-tofloat64ornull-todateornull-todatetimeornull} +## toUInt8OrNull,toUInt16OrNull,toUInt32OrNull,toUInt64OrNull,toInt8OrNull,toInt16OrNull,toInt32OrNull,toInt64OrNull,toFloat32OrNull,toFloat64OrNull,toDateOrNull,toDateTimeOrNull {#touint8ornull-touint16ornull-touint32ornull-touint64ornull-toint8ornull-toint16ornull-toint32ornull-toint64ornull-tofloat32ornull-tofloat64ornull-todateornull-todatetimeornull} ## toString {#tostring} @@ -27,7 +28,7 @@ toDate/toDateTime函数的日期和日期时间格式定义如下: YYYY-MM-DD YYYY-MM-DD hh:mm:ss -例外的是,如果将UInt32、Int32、UInt64或Int64类型的数值转换为Date类型,并且其对应的值大于等于65536,则该数值将被解析成unix时间戳(而不是对应的天数)。这意味着允许写入‘toDate(unix\_timestamp)’这种常见情况,否则这将是错误的,并且需要便携更加繁琐的‘toDate(toDateTime(unix\_timestamp))’。 +例外的是,如果将UInt32、Int32、UInt64或Int64类型的数值转换为Date类型,并且其对应的值大于等于65536,则该数值将被解析成unix时间戳(而不是对应的天数)。这意味着允许写入'toDate(unix\_timestamp)'这种常见情况,否则这将是错误的,并且需要便携更加繁琐的'toDate(toDateTime(unix\_timestamp))'。 Date与DateTime之间的转换以更为自然的方式进行:通过添加空的time或删除time。 @@ -47,11 +48,11 @@ SELECT 另请参阅`toUnixTimestamp`函数。 -## toDecimal32(value, S), toDecimal64(value, S), toDecimal128(value, S) {#todecimal32value-s-todecimal64value-s-todecimal128value-s} +## toDecimal32(value,S),toDecimal64(value,S),toDecimal128(value,S) {#todecimal32value-s-todecimal64value-s-todecimal128value-s} -将`value`转换为精度为`S`的[Decimal](../../data_types/decimal.md)。`value`可以是数字或字符串。`S`参数为指定的小数位数。 +将`value`转换为精度为`S`的[十进制](../../sql_reference/functions/type_conversion_functions.md)。`value`可以是数字或字符串。`S`参数为指定的小数位数。 -## toFixedString(s, N) {#tofixedstrings-n} +## toFixedString(s,N) {#tofixedstrings-n} 将String类型的参数转换为FixedString(N)类型的值(具有固定长度N的字符串)。N必须是一个常量。 如果字符串的字节数少于N,则向右填充空字节。如果字符串的字节数多于N,则抛出异常。 @@ -78,17 +79,17 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut │ foo\0bar\0 │ foo │ └────────────┴───────┘ -## reinterpretAsUInt8, reinterpretAsUInt16, reinterpretAsUInt32, reinterpretAsUInt64 {#reinterpretasuint8-reinterpretasuint16-reinterpretasuint32-reinterpretasuint64} +## reinterpretAsUInt8,reinterpretAsUInt16,reinterpretAsUInt32,reinterpretAsUInt64 {#reinterpretasuint8-reinterpretasuint16-reinterpretasuint32-reinterpretasuint64} -## reinterpretAsInt8, reinterpretAsInt16, reinterpretAsInt32, reinterpretAsInt64 {#reinterpretasint8-reinterpretasint16-reinterpretasint32-reinterpretasint64} +## reinterpretAsInt8,reinterpretAsInt16,reinterpretAsInt32,reinterpretAsInt64 {#reinterpretasint8-reinterpretasint16-reinterpretasint32-reinterpretasint64} -## reinterpretAsFloat32, reinterpretAsFloat64 {#reinterpretasfloat32-reinterpretasfloat64} +## reinterpretAsFloat32,reinterpretAsFloat64 {#reinterpretasfloat32-reinterpretasfloat64} -## reinterpretAsDate, reinterpretAsDateTime {#reinterpretasdate-reinterpretasdatetime} +## 重新解释日期,重新解释日期时间 {#reinterpretasdate-reinterpretasdatetime} 这些函数接受一个字符串,并将放在字符串开头的字节解释为主机顺序中的数字(little endian)。如果字符串不够长,则函数就像使用必要数量的空字节填充字符串一样。如果字符串比需要的长,则忽略额外的字节。Date被解释为Unix时间戳的天数,DateTime被解释为Unix时间戳。 -## reinterpretAsString {#reinterpretasstring} +## 重新解释字符串 {#reinterpretasstring} 此函数接受数字、Date或DateTime,并返回一个字符串,其中包含表示主机顺序(小端)的相应值的字节。从末尾删除空字节。例如,UInt32类型值255是一个字节长的字符串。 @@ -96,9 +97,9 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut 此函数接受数字、Date或DateTime,并返回包含表示主机顺序(小端)的相应值的字节的FixedString。从末尾删除空字节。例如,UInt32类型值255是一个长度为一个字节的FixedString。 -## CAST(x, t) {#type_conversion_function-cast} +## 演员(x,t) {#type_conversion_function-cast} -将‘x’转换为‘t’数据类型。还支持语法CAST(x AS t) +将'x'转换为't'数据类型。还支持语法CAST(x AS t) 示例: @@ -117,7 +118,7 @@ SELECT 将参数转换为FixedString(N),仅适用于String或FixedString(N)类型的参数。 -支持将数据转换为[Nullable](../../data_types/nullable.md)。例如: +支持将数据转换为[可为空](../../sql_reference/functions/type_conversion_functions.md)。例如: SELECT toTypeName(x) FROM t_null @@ -133,7 +134,7 @@ SELECT │ Nullable(UInt16) │ └─────────────────────────────────────────┘ -## toIntervalYear, toIntervalQuarter, toIntervalMonth, toIntervalWeek, toIntervalDay, toIntervalHour, toIntervalMinute, toIntervalSecond {#function-tointerval} +## 每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每天每 {#function-tointerval} 将数字类型参数转换为Interval类型(时间区间)。 Interval类型实际上是非常有用的,您可以使用此类型的数据直接与Date或DateTime执行算术运算。同时,ClickHouse为Interval类型数据的声明提供了更方便的语法。例如: @@ -152,18 +153,18 @@ SELECT │ 2019-01-08 │ 2019-01-08 │ └───────────────────────────┴──────────────────────────────┘ -## parseDateTimeBestEffort {#type_conversion_functions-parsedatetimebesteffort} +## parsedatetimebestefort {#type_conversion_functions-parsedatetimebesteffort} 将数字类型参数解析为Date或DateTime类型。 与toDate和toDateTime不同,parseDateTimeBestEffort可以进行更复杂的日期格式。 有关详细信息,请参阅链接:[复杂日期格式](https://xkcd.com/1179/)。 -## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull} +## parsedatetimebestefortornull {#parsedatetimebesteffortornull} -与[parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回null。 +与[parsedatetimebestefort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回null。 -## parseDateTimeBestEffortOrZero {#parsedatetimebesteffortorzero} +## parsedatetimebestefortorzero {#parsedatetimebesteffortorzero} -与[parseDateTimeBestEffort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回零Date或零DateTime。 +与[parsedatetimebestefort](#type_conversion_functions-parsedatetimebesteffort)相同,但它遇到无法处理的日期格式时返回零Date或零DateTime。 [来源文章](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/zh/query_language/functions/url_functions.md b/docs/zh/sql_reference/functions/url_functions.md similarity index 69% rename from docs/zh/query_language/functions/url_functions.md rename to docs/zh/sql_reference/functions/url_functions.md index df8b1cb69c4..53295221e51 100644 --- a/docs/zh/query_language/functions/url_functions.md +++ b/docs/zh/sql_reference/functions/url_functions.md @@ -1,3 +1,4 @@ + # URL函数 {#urlhan-shu} 所有这些功能都不遵循RFC。它们被最大程度简化以提高性能。 @@ -6,25 +7,25 @@ 如果URL中没有要截取的内容则返回空字符串。 -### protocol {#protocol} +### 协议 {#protocol} 返回URL的协议。例如: http、ftp、mailto、magnet… -### domain {#domain} +### 域 {#domain} 获取域名。 -### domainWithoutWWW {#domainwithoutwww} +### domainwithoutww {#domainwithoutwww} -返回域名并删除第一个‘www.’。 +返回域名并删除第一个'www.'。 ### topLevelDomain {#topleveldomain} 返回顶级域名。例如:.ru。 -### firstSignificantSubdomain {#firstsignificantsubdomain} +### 第一重要的元素分区域 {#firstsignificantsubdomain} -返回«第一个有效子域名»。这并不是一个标准概念,仅用于Yandex.Metrica。如果顶级域名为‘com’,‘net’,‘org’或者‘co’则第一个有效子域名为二级域名。否则则返回三级域名。例如,irstSignificantSubdomain (’https://news.yandex.ru/‘) = ’yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’。一些实现细节在未来可能会进行改变。 +返回«第一个有效子域名»。这并不是一个标准概念,仅用于Yandex.Metrica。如果顶级域名为'com',‘net’,‘org’或者‘co’则第一个有效子域名为二级域名。否则则返回三级域名。例如,irstSignificantSubdomain (’https://news.yandex.ru/‘) = ’yandex’, firstSignificantSubdomain (‘https://news.yandex.com.tr/’) = ‘yandex’。一些实现细节在未来可能会进行改变。 ### cutToFirstSignificantSubdomain {#cuttofirstsignificantsubdomain} @@ -32,7 +33,7 @@ 例如, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`. -### path {#path} +### 路径 {#path} 返回URL路径。例如:`/top/news.html`,不包含请求参数。 @@ -40,21 +41,21 @@ 与上面相同,但包括请求参数和fragment。例如:/top/news.html?page=2\#comments -### queryString {#querystring} +### 查询字符串 {#querystring} 返回请求参数。例如:page=1&lr=213。请求参数不包含问号已经\# 以及\# 之后所有的内容。 -### fragment {#fragment} +### 片段 {#fragment} 返回URL的fragment标识。fragment不包含\#。 -### queryStringAndFragment {#querystringandfragment} +### querystring andfragment {#querystringandfragment} 返回请求参数和fragment标识。例如:page=1\#29390。 -### extractURLParameter(URL, name) {#extracturlparameterurl-name} +### extractURLParameter(URL,name) {#extracturlparameterurl-name} -返回URL请求参数中名称为‘name’的参数。如果不存在则返回一个空字符串。如果存在多个匹配项则返回第一个相匹配的。此函数假设参数名称与参数值在url中的编码方式相同。 +返回URL请求参数中名称为'name'的参数。如果不存在则返回一个空字符串。如果存在多个匹配项则返回第一个相匹配的。此函数假设参数名称与参数值在url中的编码方式相同。 ### extractURLParameters(URL) {#extracturlparametersurl} @@ -68,7 +69,7 @@ 返回一个数组,其中包含以/切割的URL的所有内容。?将被包含在URL路径以及请求参数中。连续的分割符号被记为一个。 -### URLPathHierarchy(URL) {#urlpathhierarchyurl} +### Urlpathhierarchy(URL) {#urlpathhierarchyurl} 与上面相同,但结果不包含协议和host部分。 /element(root)不包括在内。该函数用于在Yandex.Metric中实现导出URL的树形结构。 @@ -97,7 +98,7 @@ SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS Decod ### cutWWW {#cutwww} -删除开始的第一个’www.’。 +删除开始的第一个'www.'。 ### cutQueryString {#cutquerystring} @@ -107,12 +108,12 @@ SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS Decod 删除fragment标识。\#同样也会被删除。 -### cutQueryStringAndFragment {#cutquerystringandfragment} +### cutquerystring andfragment {#cutquerystringandfragment} 删除请求参数以及fragment标识。问号以及\#也会被删除。 -### cutURLParameter(URL, name) {#cuturlparameterurl-name} +### cutURLParameter(URL,name) {#cuturlparameterurl-name} -删除URL中名称为‘name’的参数。改函数假设参数名称以及参数值经过URL相同的编码。 +删除URL中名称为'name'的参数。改函数假设参数名称以及参数值经过URL相同的编码。 [来源文章](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/zh/query_language/functions/uuid_functions.md b/docs/zh/sql_reference/functions/uuid_functions.md similarity index 87% rename from docs/zh/query_language/functions/uuid_functions.md rename to docs/zh/sql_reference/functions/uuid_functions.md index 2cb2ff30872..306a55f08a0 100644 --- a/docs/zh/query_language/functions/uuid_functions.md +++ b/docs/zh/sql_reference/functions/uuid_functions.md @@ -1,8 +1,9 @@ + # UUID函数 {#uuidhan-shu} 下面列出了所有UUID的相关函数 -## generateUUIDv4 {#uuid-function-generate} +## generateuidv4 {#uuid-function-generate} 生成一个UUID([版本4](https://tools.ietf.org/html/rfc4122#section-4.4))。 @@ -30,7 +31,7 @@ UUID类型的值。 └──────────────────────────────────────┘ ``` -## toUUID (x) {#touuid-x} +## toUUID(x) {#touuid-x} 将String类型的值转换为UUID类型的值。 @@ -54,7 +55,7 @@ UUID类型的值 ## UUIDStringToNum {#uuidstringtonum} -接受一个String类型的值,其中包含36个字符且格式为`xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`,将其转换为UUID的数值并以[FixedString(16)](../../data_types/fixedstring.md)将其返回。 +接受一个String类型的值,其中包含36个字符且格式为`xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`,将其转换为UUID的数值并以[固定字符串(16)](../../sql_reference/functions/uuid_functions.md)将其返回。 ``` sql UUIDStringToNum(String) @@ -62,7 +63,7 @@ UUIDStringToNum(String) **返回值** -FixedString(16) +固定字符串(16) **使用示例** @@ -78,7 +79,7 @@ FixedString(16) ## UUIDNumToString {#uuidnumtostring} -接受一个[FixedString(16)](../../data_types/fixedstring.md)类型的值,返回其对应的String表现形式。 +接受一个[固定字符串(16)](../../sql_reference/functions/uuid_functions.md)类型的值,返回其对应的String表现形式。 ``` sql UUIDNumToString(FixedString(16)) @@ -86,7 +87,7 @@ UUIDNumToString(FixedString(16)) **返回值** -String. +字符串。 **使用示例** @@ -102,6 +103,6 @@ SELECT ## 另请参阅 {#ling-qing-can-yue} -- [dictGetUUID](ext_dict_functions.md) +- [dictgetuid](ext_dict_functions.md) [来源文章](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/zh/query_language/functions/ym_dict_functions.md b/docs/zh/sql_reference/functions/ym_dict_functions.md similarity index 56% rename from docs/zh/query_language/functions/ym_dict_functions.md rename to docs/zh/sql_reference/functions/ym_dict_functions.md index 6d03ae228e8..87492ec9d12 100644 --- a/docs/zh/query_language/functions/ym_dict_functions.md +++ b/docs/zh/sql_reference/functions/ym_dict_functions.md @@ -1,36 +1,37 @@ -# Functions for working with Yandex.Metrica dictionaries {#functions-for-working-with-yandex-metrica-dictionaries} -In order for the functions below to work, the server config must specify the paths and addresses for getting all the Yandex.Metrica dictionaries. The dictionaries are loaded at the first call of any of these functions. If the reference lists can’t be loaded, an exception is thrown. +# 功能与Yandex的工作。梅特里卡词典 {#functions-for-working-with-yandex-metrica-dictionaries} + +为了使下面的功能正常工作,服务器配置必须指定获取所有Yandex的路径和地址。梅特里卡字典. 字典在任何这些函数的第一次调用时加载。 如果无法加载引用列表,则会引发异常。 For information about creating reference lists, see the section «Dictionaries». -## Multiple geobases {#multiple-geobases} +## 多个地理基 {#multiple-geobases} -ClickHouse supports working with multiple alternative geobases (regional hierarchies) simultaneously, in order to support various perspectives on which countries certain regions belong to. +ClickHouse支持同时使用多个备选地理基(区域层次结构),以支持某些地区所属国家的各种观点。 -The ‘clickhouse-server’ config specifies the file with the regional hierarchy::`/opt/geo/regions_hierarchy.txt` +该 ‘clickhouse-server’ config指定具有区域层次结构的文件::`/opt/geo/regions_hierarchy.txt` -Besides this file, it also searches for files nearby that have the \_ symbol and any suffix appended to the name (before the file extension). -For example, it will also find the file `/opt/geo/regions_hierarchy_ua.txt`, if present. +除了这个文件,它还搜索附近有\_符号和任何后缀附加到名称(文件扩展名之前)的文件。 +例如,它还会找到该文件 `/opt/geo/regions_hierarchy_ua.txt`,如果存在。 -`ua` is called the dictionary key. For a dictionary without a suffix, the key is an empty string. +`ua` 被称为字典键。 对于没有后缀的字典,键是空字符串。 -All the dictionaries are re-loaded in runtime (once every certain number of seconds, as defined in the builtin\_dictionaries\_reload\_interval config parameter, or once an hour by default). However, the list of available dictionaries is defined one time, when the server starts. +所有字典都在运行时重新加载(每隔一定数量的秒重新加载一次,如builtin\_dictionaries\_reload\_interval config参数中定义,或默认情况下每小时一次)。 但是,可用字典列表在服务器启动时定义一次。 All functions for working with regions have an optional argument at the end – the dictionary key. It is referred to as the geobase. -Example: +示例: regionToCountry(RegionID) – Uses the default dictionary: /opt/geo/regions_hierarchy.txt regionToCountry(RegionID, '') – Uses the default dictionary: /opt/geo/regions_hierarchy.txt regionToCountry(RegionID, 'ua') – Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt -### regionToCity(id\[, geobase\]) {#regiontocityid-geobase} +### ツ环板(ョツ嘉ッツ偲青regionシツ氾カツ鉄ツ工ツ渉\]) {#regiontocityid-geobase} Accepts a UInt32 number – the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0. -### regionToArea(id\[, geobase\]) {#regiontoareaid-geobase} +### 虏茅驴麓卤戮碌禄路戮鲁拢\]) {#regiontoareaid-geobase} -Converts a region to an area (type 5 in the geobase). In every other way, this function is the same as ‘regionToCity’. +将区域转换为区域(地理数据库中的类型5)。 在所有其他方式,这个功能是一样的 ‘regionToCity’. ``` sql SELECT DISTINCT regionToName(regionToArea(toUInt32(number), 'ua')) @@ -56,9 +57,9 @@ LIMIT 15 │ Tula region │ └──────────────────────────────────────────────────────┘ -### regionToDistrict(id\[, geobase\]) {#regiontodistrictid-geobase} +### regionToDistrict(id\[,geobase\]) {#regiontodistrictid-geobase} -Converts a region to a federal district (type 4 in the geobase). In every other way, this function is the same as ‘regionToCity’. +将区域转换为联邦区(地理数据库中的类型4)。 在所有其他方式,这个功能是一样的 ‘regionToCity’. ``` sql SELECT DISTINCT regionToName(regionToDistrict(toUInt32(number), 'ua')) @@ -84,37 +85,37 @@ LIMIT 15 │ Federation of Bosnia and Herzegovina │ └──────────────────────────────────────────────────────────┘ -### regionToCountry(id\[, geobase\]) {#regiontocountryid-geobase} +### 虏茅驴麓卤戮碌禄路戮鲁拢(陆毛隆隆(803)888-8325\]) {#regiontocountryid-geobase} -Converts a region to a country. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia (225). +将区域转换为国家。 在所有其他方式,这个功能是一样的 ‘regionToCity’. +示例: `regionToCountry(toUInt32(213)) = 225` 转换莫斯科(213)到俄罗斯(225)。 -### regionToContinent(id\[, geobase\]) {#regiontocontinentid-geobase} +### 掳胫((禄脢鹿脷露胫鲁隆鹿((酶-11-16""\[脪陆,ase\]) {#regiontocontinentid-geobase} -Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’. -Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001). +将区域转换为大陆。 在所有其他方式,这个功能是一样的 ‘regionToCity’. +示例: `regionToContinent(toUInt32(213)) = 10001` 将莫斯科(213)转换为欧亚大陆(10001)。 -### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase} +### ツ环板(ョツ嘉ッツ偲青regionャツ静ャツ青サツ催ャツ渉\]) {#regiontopopulationid-geobase} -Gets the population for a region. +获取区域的人口。 The population can be recorded in files with the geobase. See the section «External dictionaries». -If the population is not recorded for the region, it returns 0. -In the Yandex geobase, the population might be recorded for child regions, but not for parent regions. +如果没有为该区域记录人口,则返回0。 +在Yandex地理数据库中,可能会为子区域记录人口,但不会为父区域记录人口。 -### regionIn(lhs, rhs\[, geobase\]) {#regioninlhs-rhs-geobase} +### regionIn(lhs,rhs\[,地理数据库\]) {#regioninlhs-rhs-geobase} -Checks whether a ‘lhs’ region belongs to a ‘rhs’ region. Returns a UInt8 number equal to 1 if it belongs, or 0 if it doesn’t belong. +检查是否 ‘lhs’ 属于一个区域 ‘rhs’ 区域。 如果属于UInt8,则返回等于1的数字,如果不属于则返回0。 The relationship is reflexive – any region also belongs to itself. -### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase} +### ツ暗ェツ氾环催ツ団ツ法ツ人\]) {#regionhierarchyid-geobase} Accepts a UInt32 number – the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain. -Example: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. +示例: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`. -### regionToName(id\[, lang\]) {#regiontonameid-lang} +### 地区名称(id\[,郎\]) {#regiontonameid-lang} -Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn’t exist, an empty string is returned. +Accepts a UInt32 number – the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ‘ru’ is used. If the language is not supported, an exception is thrown. Returns a string – the name of the region in the corresponding language. If the region with the specified ID doesn't exist, an empty string is returned. -`ua` and `uk` both mean Ukrainian. +`ua` 和 `uk` 都意味着乌克兰。 -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) +[原始文章](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/zh/sql_reference/index.md b/docs/zh/sql_reference/index.md new file mode 100644 index 00000000000..aed704442ab --- /dev/null +++ b/docs/zh/sql_reference/index.md @@ -0,0 +1,18 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "SQL\u53C2\u8003" +toc_hidden: true +toc_priority: 28 +toc_title: "\u9690\u85CF" +--- + +# SQL参考 {#sql-reference} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md#query_language_queries_alter) +- [其他类型的查询](statements/misc.md) + +[原始文章](https://clickhouse.tech/docs/en/query_language/) diff --git a/docs/zh/query_language/operators.md b/docs/zh/sql_reference/operators.md similarity index 80% rename from docs/zh/query_language/operators.md rename to docs/zh/sql_reference/operators.md index 2e73f85d5ec..fb5c7d0ee38 100644 --- a/docs/zh/query_language/operators.md +++ b/docs/zh/sql_reference/operators.md @@ -1,3 +1,4 @@ + # 操作符 {#cao-zuo-fu} 所有的操作符(运算符)都会在查询时依据他们的优先级及其结合顺序在被解析时转换为对应的函数。下面按优先级从高到低列出各组运算符及其对应的函数: @@ -52,7 +53,7 @@ ## 集合关系运算符 {#ji-he-guan-xi-yun-suan-fu} -*详见此节 [IN 相关操作符](select.md#select-in-operators) 。* +*详见此节 [IN 相关操作符](statements/select.md#select-in-operators) 。* `a IN ...` – 对应函数 `in(a, b)` @@ -80,9 +81,9 @@ 注意: -条件运算符会先计算表达式b和表达式c的值,再根据表达式a的真假,返回相应的值。如果表达式b和表达式c是 [arrayJoin()](functions/array_join.md#functions_arrayjoin) 函数,则不管表达式a是真是假,每行都会被复制展开。 +条件运算符会先计算表达式b和表达式c的值,再根据表达式a的真假,返回相应的值。如果表达式b和表达式c是 [arrayJoin()](../sql_reference/functions/array_join.md#functions_arrayjoin) 函数,则不管表达式a是真是假,每行都会被复制展开。 -## Operators for Working with Dates and Times {#operators-datetime} +## 使用日期和时间的操作员 {#operators-datetime} ### EXTRACT {#operator-extract} @@ -90,9 +91,9 @@ EXTRACT(part FROM date); ``` -Extracts a part from a given date. For example, you can retrieve a month from a given date, or a second from a time. +从给定日期中提取部件。 例如,您可以从给定日期检索一个月,或从时间检索一秒钟。 -The `part` parameter specifies which part of the date to retrieve. The following values are available: +该 `part` 参数指定要检索的日期部分。 以下值可用: - `DAY` — The day of the month. Possible values: 1–31. - `MONTH` — The number of a month. Possible values: 1–12. @@ -101,11 +102,11 @@ The `part` parameter specifies which part of the date to retrieve. The following - `MINUTE` — The minute. Possible values: 0–59. - `HOUR` — The hour. Possible values: 0–23. -The `part` parameter is case-insensitive. +该 `part` 参数不区分大小写。 -The `date` parameter specifies the date or the time to process. Either [Date](../data_types/date.md) or [DateTime](../data_types/datetime.md) type is supported. +该 `date` 参数指定要处理的日期或时间。 无论是 [日期](../sql_reference/data_types/date.md) 或 [日期时间](../sql_reference/data_types/datetime.md) 支持类型。 -Examples: +例: ``` sql SELECT EXTRACT(DAY FROM toDate('2017-06-15')); @@ -113,7 +114,7 @@ SELECT EXTRACT(MONTH FROM toDate('2017-06-15')); SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); ``` -In the following example we create a table and insert into it a value with the `DateTime` type. +在下面的例子中,我们创建一个表,并在其中插入一个值 `DateTime` 类型。 ``` sql CREATE TABLE test.Orders @@ -146,13 +147,13 @@ FROM test.Orders; └───────────┴────────────┴──────────┴───────────┴─────────────┴─────────────┘ ``` -You can see more examples in [tests](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). +你可以看到更多的例子 [测试](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00619_extract.sql). ### INTERVAL {#operator-interval} -Creates an [Interval](../data_types/special_data_types/interval.md)-type value that should be used in arithmetical operations with [Date](../data_types/date.md) and [DateTime](../data_types/datetime.md)-type values. +创建一个 [间隔](../sql_reference/operators.md)-应在算术运算中使用的类型值 [日期](../sql_reference/data_types/date.md) 和 [日期时间](../sql_reference/data_types/datetime.md)-类型值。 -Example: +示例: ``` sql SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL 3 HOUR @@ -164,10 +165,10 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL └─────────────────────┴────────────────────────────────────────────────────────┘ ``` -**See Also** +**另请参阅** -- [Interval](../data_types/special_data_types/interval.md) data type -- [toInterval](functions/type_conversion_functions.md#function-tointerval) type convertion functions +- [间隔](../sql_reference/operators.md) 数据类型 +- [toInterval](../sql_reference/operators.md#function-tointerval) 类型转换函数 ## CASE条件表达式 {#operator_case} @@ -216,7 +217,7 @@ ClickHouse 支持 `IS NULL` 和 `IS NOT NULL` 。 ### IS NULL {#operator-is-null} -- 对于 [Nullable](../data_types/nullable.md) 类型的值, `IS NULL` 会返回: +- 对于 [可为空](../sql_reference/operators.md) 类型的值, `IS NULL` 会返回: - `1` 值为 `NULL` - `0` 否则 - 对于其他类型的值, `IS NULL` 总会返回 `0` @@ -239,7 +240,7 @@ WHERE isNull(y) ### IS NOT NULL {#is-not-null} -- 对于 [Nullable](../data_types/nullable.md) 类型的值, `IS NOT NULL` 会返回: +- 对于 [可为空](../sql_reference/operators.md) 类型的值, `IS NOT NULL` 会返回: - `0` 值为 `NULL` - `1` 否则 - 对于其他类型的值,`IS NOT NULL` 总会返回 `1` diff --git a/docs/zh/sql_reference/statements/alter.md b/docs/zh/sql_reference/statements/alter.md new file mode 100644 index 00000000000..ee8911edea2 --- /dev/null +++ b/docs/zh/sql_reference/statements/alter.md @@ -0,0 +1,505 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 36 +toc_title: ALTER +--- + +## ALTER {#query_language_queries_alter} + +该 `ALTER` 查询仅支持 `*MergeTree` 表,以及 `Merge`和`Distributed`. 查询有几个变体。 + +### 列操作 {#column-manipulations} + +更改表结构。 + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN ... +``` + +在查询中,指定一个或多个逗号分隔操作的列表。 +每个操作都是对列的操作。 + +支持以下操作: + +- [ADD COLUMN](#alter_add-column) — Adds a new column to the table. +- [DROP COLUMN](#alter_drop-column) — Deletes the column. +- [CLEAR COLUMN](#alter_clear-column) — Resets column values. +- [COMMENT COLUMN](#alter_comment-column) — Adds a text comment to the column. +- [MODIFY COLUMN](#alter_modify-column) — Changes column's type, default expression and TTL. + +下面详细描述这些动作。 + +#### ADD COLUMN {#alter_add-column} + +``` sql +ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] +``` + +将一个新列添加到表中,并指定 `name`, `type`, [`codec`](create.md#codecs) 和 `default_expr` (请参阅部分 [默认表达式](create.md#create-default-values)). + +如果 `IF NOT EXISTS` 如果列已经存在,则查询不会返回错误。 如果您指定 `AFTER name_after` (另一列的名称),该列被添加在表列表中指定的一列之后。 否则,该列将添加到表的末尾。 请注意,没有办法将列添加到表的开头。 为了一系列的行动, `name_after` 可将该名称一栏,加入一个以前的行动。 + +添加列只是更改表结构,而不对数据执行任何操作。 数据不会出现在磁盘上后 `ALTER`. 如果从表中读取某一列的数据缺失,则将使用默认值填充该列(如果存在默认表达式,则执行默认表达式,或使用零或空字符串)。 合并数据部分后,该列将出现在磁盘上(请参阅 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)). + +这种方法使我们能够完成 `ALTER` 即时查询,不增加旧数据量。 + +示例: + +``` sql +ALTER TABLE visits ADD COLUMN browser String AFTER user_id +``` + +#### DROP COLUMN {#alter_drop-column} + +``` sql +DROP COLUMN [IF EXISTS] name +``` + +删除具有名称的列 `name`. 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +从文件系统中删除数据。 由于这将删除整个文件,查询几乎立即完成。 + +示例: + +``` sql +ALTER TABLE visits DROP COLUMN browser +``` + +#### CLEAR COLUMN {#alter_clear-column} + +``` sql +CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name +``` + +重置指定分区的列中的所有数据。 了解有关设置分区名称的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +示例: + +``` sql +ALTER TABLE visits CLEAR COLUMN browser IN PARTITION tuple() +``` + +#### COMMENT COLUMN {#alter_comment-column} + +``` sql +COMMENT COLUMN [IF EXISTS] name 'comment' +``` + +向列添加注释。 如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +每列可以有一个注释。 如果列的注释已存在,则新注释将复盖以前的注释。 + +注释存储在 `comment_expression` 由返回的列 [DESCRIBE TABLE](misc.md#misc-describe-table) 查询。 + +示例: + +``` sql +ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for accessing the site.' +``` + +#### MODIFY COLUMN {#alter_modify-column} + +``` sql +MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] +``` + +此查询更改 `name` 列属性: + +- 类型 + +- 默认表达式 + +- TTL + + For examples of columns TTL modifying, see [Column TTL](../engines/table_engines/mergetree_family/mergetree.md#mergetree-column-ttl). + +如果 `IF EXISTS` 如果指定了子句,如果该列不存在,则查询不会返回错误。 + +更改类型时,值将被转换为 [toType](../../sql_reference/functions/type_conversion_functions.md) 函数被应用到它们。 如果仅更改默认表达式,则查询不会执行任何复杂的操作,并且几乎立即完成。 + +示例: + +``` sql +ALTER TABLE visits MODIFY COLUMN browser Array(String) +``` + +Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time. + +有几个处理阶段: + +- 准备具有修改数据的临时(新)文件。 +- 重命名旧文件。 +- 将临时(新)文件重命名为旧名称。 +- 删除旧文件。 + +只有第一阶段需要时间。 如果在此阶段出现故障,则不会更改数据。 +如果在其中一个连续阶段中出现故障,可以手动恢复数据。 例外情况是,如果旧文件从文件系统中删除,但新文件的数据没有写入磁盘并丢失。 + +该 `ALTER` 复制更改列的查询。 这些指令保存在ZooKeeper中,然后每个副本应用它们。 全部 `ALTER` 查询以相同的顺序运行。 查询等待对其他副本完成适当的操作。 但是,更改复制表中的列的查询可能会中断,并且所有操作都将异步执行。 + +#### 更改查询限制 {#alter-query-limitations} + +该 `ALTER` query允许您在嵌套数据结构中创建和删除单独的元素(列),但不能创建整个嵌套数据结构。 要添加嵌套数据结构,可以添加名称如下的列 `name.nested_name` 和类型 `Array(T)`. 嵌套数据结构等效于名称在点之前具有相同前缀的多个数组列。 + +不支持删除主键或采样键中的列(在主键中使用的列 `ENGINE` 表达式)。 只有在此更改不会导致数据被修改时,才可以更改主键中包含的列的类型(例如,允许您向枚举添加值或更改类型 `DateTime` 到 `UInt32`). + +如果 `ALTER` 查询不足以使您需要的表更改,您可以创建一个新的表,使用 [INSERT SELECT](insert_into.md#insert_query_insert-select) 查询,然后使用切换表 [RENAME](misc.md#misc_operations-rename) 查询并删除旧表。 您可以使用 [ツ环板-ョツ嘉ッツ偲](../../operations/utilities/clickhouse-copier.md) 作为替代 `INSERT SELECT` 查询。 + +该 `ALTER` 查询阻止对表的所有读取和写入。 换句话说,如果长 `SELECT` 正在运行的时间 `ALTER` 查询,该 `ALTER` 查询将等待它完成。 同时,对同一个表的所有新查询将等待 `ALTER` 正在运行。 + +对于本身不存储数据的表(例如 `Merge` 和 `Distributed`), `ALTER` 只是改变了表结构,并且不改变从属表的结构。 例如,当运行ALTER时 `Distributed` 表,你还需要运行 `ALTER` 对于所有远程服务器上的表。 + +### 使用键表达式进行操作 {#manipulations-with-key-expressions} + +支持以下命令: + +``` sql +MODIFY ORDER BY new_expression +``` + +它只适用于在表 [`MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) 家庭(包括 +[复制](../../engines/table_engines/mergetree_family/replication.md) 表)。 该命令更改 +[排序键](../../engines/table_engines/mergetree_family/mergetree.md) 表 +到 `new_expression` (表达式或表达式元组)。 主键保持不变。 + +该命令是轻量级的,因为它只更改元数据。 要保持该数据部分的属性 +行按排序键表达式排序您不能添加包含现有列的表达式 +到排序键(仅由列添加 `ADD COLUMN` 命令在同一个 `ALTER` 查询)。 + +### 使用数据跳过索引进行操作 {#manipulations-with-data-skipping-indices} + +它只适用于在表 [`*MergeTree`](../../engines/table_engines/mergetree_family/mergetree.md) 家庭(包括 +[复制](../../engines/table_engines/mergetree_family/replication.md) 表)。 以下操作 +可用: + +- `ALTER TABLE [db].name ADD INDEX name expression TYPE type GRANULARITY value AFTER name [AFTER name2]` -将索引描述添加到表元数据。 + +- `ALTER TABLE [db].name DROP INDEX name` -从表元数据中删除索引描述并从磁盘中删除索引文件。 + +这些命令是轻量级的,因为它们只更改元数据或删除文件。 +此外,它们被复制(通过ZooKeeper同步索引元数据)。 + +### 使用约束进行操作 {#manipulations-with-constraints} + +查看更多 [制约因素](create.md#constraints) + +可以使用以下语法添加或删除约束: + +``` sql +ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression; +ALTER TABLE [db].name DROP CONSTRAINT constraint_name; +``` + +查询将从表中添加或删除有关约束的元数据,以便立即处理它们。 + +约束检查 *不会被执行* 在现有数据上,如果它被添加。 + +复制表上的所有更改都广播到ZooKeeper,因此将应用于其他副本。 + +### 操作与分区和零件 {#alter_manipulations-with-partitions} + +下面的操作与 [分区](../../engines/table_engines/mergetree_family/custom_partitioning_key.md) 可用: + +- [DETACH PARTITION](#alter_detach-partition) – Moves a partition to the `detached` 目录和忘记它。 +- [DROP PARTITION](#alter_drop-partition) – Deletes a partition. +- [ATTACH PART\|PARTITION](#alter_attach-partition) – Adds a part or partition from the `detached` 目录到表。 +- [REPLACE PARTITION](#alter_replace-partition) -将数据分区从一个表复制到另一个表。 +- [ATTACH PARTITION FROM](#alter_attach-partition-from) – Copies the data partition from one table to another and adds. +- [REPLACE PARTITION](#alter_replace-partition) -将数据分区从一个表复制到另一个表并替换。 +- [MOVE PARTITION TO TABLE](#alter_move_to_table-partition) (\#alter\_move\_to\_table-partition)-将数据分区从一个表移动到另一个表。 +- [CLEAR COLUMN IN PARTITION](#alter_clear-column-partition) -重置分区中指定列的值。 +- [CLEAR INDEX IN PARTITION](#alter_clear-index-partition) -重置分区中指定的二级索引。 +- [FREEZE PARTITION](#alter_freeze-partition) – Creates a backup of a partition. +- [FETCH PARTITION](#alter_fetch-partition) – Downloads a partition from another server. +- [MOVE PARTITION\|PART](#alter_move-partition) – Move partition/data part to another disk or volume. + + + +#### 分离分区{\#alter\_detach-partition} {#detach-partition-alter-detach-partition} + +``` sql +ALTER TABLE table_name DETACH PARTITION partition_expr +``` + +将指定分区的所有数据移动到 `detached` 目录。 服务器会忘记分离的数据分区,就好像它不存在一样。 服务器不会知道这个数据,直到你做 [ATTACH](#alter_attach-partition) 查询。 + +示例: + +``` sql +ALTER TABLE visits DETACH PARTITION 201901 +``` + +阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +执行查询后,您可以对查询中的数据进行任何操作 `detached` directory — delete it from the file system, or just leave it. + +This query is replicated – it moves the data to the `detached` 所有副本上的目录。 请注意,您只能对领导副本执行此查询。 要确定副本是否为领导者,请执行 `SELECT` 查询到 [系统。副本](../../operations/system_tables.md#system_tables-replicas) 桌子 或者,它更容易使 `DETACH` 对所有副本进行查询-除了领导副本之外,所有副本都会引发异常。 + +#### DROP PARTITION {#alter_drop-partition} + +``` sql +ALTER TABLE table_name DROP PARTITION partition_expr +``` + +从表中删除指定的分区。 此查询将分区标记为非活动分区,并在大约10分钟内完全删除数据。 + +阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +The query is replicated – it deletes data on all replicas. + +#### DROP DETACHED PARTITION\|PART {#alter_drop-detached} + +``` sql +ALTER TABLE table_name DROP DETACHED PARTITION|PART partition_expr +``` + +从中删除指定分区的指定部分或所有部分 `detached`. +了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +#### ATTACH PARTITION\|PART {#alter_attach-partition} + +``` sql +ALTER TABLE table_name ATTACH PARTITION|PART partition_expr +``` + +将数据从 `detached` 目录。 可以为整个分区或单独的部分添加数据。 例: + +``` sql +ALTER TABLE visits ATTACH PARTITION 201901; +ALTER TABLE visits ATTACH PART 201901_2_2_0; +``` + +了解有关在一节中设置分区表达式的详细信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +此查询被复制。 副本发起程序检查是否有数据在 `detached` 目录。 如果数据存在,则查询将检查其完整性。 如果一切正确,则查询将数据添加到表中。 所有其他副本都从副本发起程序下载数据。 + +所以你可以把数据到 `detached` 在一个副本上的目录,并使用 `ALTER ... ATTACH` 查询以将其添加到所有副本上的表中。 + +#### ATTACH PARTITION FROM {#alter_attach-partition-from} + +``` sql +ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1 +``` + +此查询将数据分区从 `table1` 到 `table2` 将数据添加到存在 `table2`. 请注意,数据不会从中删除 `table1`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 + +#### REPLACE PARTITION {#alter_replace-partition} + +``` sql +ALTER TABLE table2 REPLACE PARTITION partition_expr FROM table1 +``` + +此查询将数据分区从 `table1` 到 `table2` 并替换在现有的分区 `table2`. 请注意,数据不会从中删除 `table1`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 + +#### MOVE PARTITION TO TABLE {#alter_move_to_table-partition} + +``` sql +ALTER TABLE table_source MOVE PARTITION partition_expr TO TABLE table_dest +``` + +此查询将数据分区从 `table_source` 到 `table_dest` 删除数据 `table_source`. + +要使查询成功运行,必须满足以下条件: + +- 两个表必须具有相同的结构。 +- 两个表必须具有相同的分区键。 +- 两个表必须是相同的引擎系列。 (已复制或未复制) +- 两个表必须具有相同的存储策略。 + +#### CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} + +``` sql +ALTER TABLE table_name CLEAR COLUMN column_name IN PARTITION partition_expr +``` + +重置分区中指定列中的所有值。 如果 `DEFAULT` 创建表时确定了子句,此查询将列值设置为指定的默认值。 + +示例: + +``` sql +ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902 +``` + +#### FREEZE PARTITION {#alter_freeze-partition} + +``` sql +ALTER TABLE table_name FREEZE [PARTITION partition_expr] +``` + +此查询创建指定分区的本地备份。 如果 `PARTITION` 子句被省略,查询一次创建所有分区的备份。 + +!!! note "注" + 在不停止服务器的情况下执行整个备份过程。 + +请注意,对于旧式表,您可以指定分区名称的前缀(例如, ‘2019’)-然后查询为所有相应的分区创建备份。 阅读有关在一节中设置分区表达式的信息 [如何指定分区表达式](#alter-how-to-specify-part-expr). + +在执行时,对于数据快照,查询将创建指向表数据的硬链接。 硬链接被放置在目录中 `/var/lib/clickhouse/shadow/N/...`,哪里: + +- `/var/lib/clickhouse/` 是配置中指定的工作ClickHouse目录。 +- `N` 是备份的增量编号。 + +!!! note "注" + 如果您使用 [用于在表中存储数据的一组磁盘](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes),该 `shadow/N` 目录出现在每个磁盘上,存储由匹配的数据部分 `PARTITION` 表达。 + +在备份内部创建的目录结构与在备份内部创建的目录结构相同 `/var/lib/clickhouse/`. 查询执行 ‘chmod’ 对于所有文件,禁止写入它们。 + +创建备份后,您可以从以下位置复制数据 `/var/lib/clickhouse/shadow/` 然后将其从本地服务器中删除。 请注意, `ALTER t FREEZE PARTITION` 不复制查询。 它仅在本地服务器上创建本地备份。 + +查询几乎立即创建备份(但首先它会等待对相应表的当前查询完成运行)。 + +`ALTER TABLE t FREEZE PARTITION` 仅复制数据,而不复制表元数据。 若要备份表元数据,请复制该文件 `/var/lib/clickhouse/metadata/database/table.sql` + +要从备份还原数据,请执行以下操作: + +1. 如果表不存在,则创建该表。 要查看查询,请使用。sql文件(替换 `ATTACH` 在它与 `CREATE`). +2. 从复制数据 `data/database/table/` 目录内的备份到 `/var/lib/clickhouse/data/database/table/detached/` 目录。 +3. 快跑 `ALTER TABLE t ATTACH PARTITION` 将数据添加到表的查询。 + +从备份还原不需要停止服务器。 + +有关备份和还原数据的详细信息,请参阅 [数据备份](../../operations/backup.md) 科。 + +#### CLEAR INDEX IN PARTITION {#alter_clear-index-partition} + +``` sql +ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr +``` + +查询的工作原理类似于 `CLEAR COLUMN`,但它重置索引而不是列数据。 + +#### FETCH PARTITION {#alter_fetch-partition} + +``` sql +ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' +``` + +从另一台服务器下载分区。 此查询仅适用于复制的表。 + +查询执行以下操作: + +1. 从指定的分片下载分区。 在 ‘path-in-zookeeper’ 您必须在ZooKeeper中指定分片的路径。 +2. 然后查询将下载的数据放到 `detached` 的目录 `table_name` 桌子 使用 [ATTACH PARTITION\|PART](#alter_attach-partition) 查询将数据添加到表中。 + +例如: + +``` sql +ALTER TABLE users FETCH PARTITION 201902 FROM '/clickhouse/tables/01-01/visits'; +ALTER TABLE users ATTACH PARTITION 201902; +``` + +请注意: + +- 该 `ALTER ... FETCH PARTITION` 查询不被复制。 它将分区放置在 `detached` 仅在本地服务器上的目录。 +- 该 `ALTER TABLE ... ATTACH` 复制查询。 它将数据添加到所有副本。 数据被添加到从副本之一 `detached` 目录,以及其他-从相邻的副本。 + +在下载之前,系统会检查分区是否存在并且表结构匹配。 从正常副本中自动选择最合适的副本。 + +虽然查询被调用 `ALTER TABLE`,它不会更改表结构,并且不会立即更改表中可用的数据。 + +#### MOVE PARTITION\|PART {#alter_move-partition} + +将分区或数据部分移动到另一个卷或磁盘 `MergeTree`-发动机表。 看 [使用多个块设备进行数据存储](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes). + +``` sql +ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' +``` + +该 `ALTER TABLE t MOVE` 查询: + +- 不复制,因为不同的副本可能具有不同的存储策略。 +- 如果未配置指定的磁盘或卷,则返回错误。 如果无法应用存储策略中指定的数据移动条件,Query还会返回错误。 +- 可以在返回错误的情况下,当要移动的数据已经被后台进程移动时,并发 `ALTER TABLE t MOVE` 查询或作为后台数据合并的结果。 在这种情况下,用户不应该执行任何其他操作。 + +示例: + +``` sql +ALTER TABLE hits MOVE PART '20190301_14343_16206_438' TO VOLUME 'slow' +ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' +``` + +#### 如何设置分区表达式 {#alter-how-to-specify-part-expr} + +您可以在以下内容中指定分区表达式 `ALTER ... PARTITION` 以不同方式查询: + +- 作为从值 `partition` 列 `system.parts` 桌子 例如, `ALTER TABLE visits DETACH PARTITION 201901`. +- 作为来自表列的表达式。 支持常量和常量表达式。 例如, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. +- 使用分区ID。 分区ID是用作文件系统和ZooKeeper中分区名称的分区的字符串标识符(如果可能的话,人类可读)。 分区ID必须在指定 `PARTITION ID` 子句,用单引号。 例如, `ALTER TABLE visits DETACH PARTITION ID '201901'`. +- 在 [ALTER ATTACH PART](#alter_attach-partition) 和 [DROP DETACHED PART](#alter_drop-detached) 查询时,要指定部件的名称,请将字符串文字与来自 `name` 列 [系统。detached\_parts](../../operations/system_tables.md#system_tables-detached_parts) 桌子 例如, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. + +指定分区时引号的使用取决于分区表达式的类型。 例如,对于 `String` 类型,你必须在引号中指定其名称 (`'`). 为 `Date` 和 `Int*` 类型不需要引号。 + +对于旧式表,您可以将分区指定为数字 `201901` 或者一个字符串 `'201901'`. 对于类型,新样式表的语法更严格(类似于值输入格式的解析器)。 + +上述所有规则也适用于 [OPTIMIZE](misc.md#misc_operations-optimize) 查询。 如果在优化非分区表时需要指定唯一的分区,请设置表达式 `PARTITION tuple()`. 例如: + +``` sql +OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; +``` + +的例子 `ALTER ... PARTITION` 查询在测试中演示 [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) 和 [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). + +### 使用表TTL进行操作 {#manipulations-with-table-ttl} + +你可以改变 [表TTL](../../engines/table_engines/mergetree_family/mergetree.md#mergetree-table-ttl) 请填写以下表格: + +``` sql +ALTER TABLE table-name MODIFY TTL ttl-expression +``` + +### ALTER查询的同步性 {#synchronicity-of-alter-queries} + +对于不可复制的表,所有 `ALTER` 查询是同步执行的。 对于可复制的表,查询仅添加相应操作的说明 `ZooKeeper`,并尽快执行操作本身。 但是,查询可以等待在所有副本上完成这些操作。 + +为 `ALTER ... ATTACH|DETACH|DROP` 查询,您可以使用 `replication_alter_partitions_sync` 设置设置等待。 +可能的值: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +### 突变 {#alter-mutations} + +突变是允许更改或删除表中的行的ALTER查询变体。 与标准相比 `UPDATE` 和 `DELETE` 用于点数据更改的查询,mutations适用于更改表中大量行的繁重操作。 支持的 `MergeTree` 表引擎系列,包括具有复制支持的引擎。 + +现有表可以按原样进行突变(无需转换),但是在将第一次突变应用于表之后,其元数据格式将与以前的服务器版本不兼容,并且无法回退到以前的版本。 + +当前可用的命令: + +``` sql +ALTER TABLE [db.]table DELETE WHERE filter_expr +``` + +该 `filter_expr` 必须是类型 `UInt8`. 查询删除表中此表达式采用非零值的行。 + +``` sql +ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr +``` + +该 `filter_expr` 必须是类型 `UInt8`. 此查询将指定列的值更新为行中相应表达式的值。 `filter_expr` 取非零值。 使用以下命令将值转换为列类型 `CAST` 接线员 不支持更新用于计算主键或分区键的列。 + +``` sql +ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name +``` + +查询将重新生成二级索引 `name` 在分区中 `partition_name`. + +一个查询可以包含多个用逗号分隔的命令。 + +For\*MergeTree表的突变通过重写整个数据部分来执行。 没有原子性-部分被取代为突变的部分,只要他们准备好和 `SELECT` 在突变期间开始执行的查询将看到来自已经突变的部件的数据以及来自尚未突变的部件的数据。 + +突变完全按其创建顺序排序,并以该顺序应用于每个部分。 突变也使用插入进行部分排序-在提交突变之前插入到表中的数据将被突变,之后插入的数据将不会被突变。 请注意,突变不会以任何方式阻止插入。 + +Mutation查询在添加mutation条目后立即返回(如果将复制的表复制到ZooKeeper,则将非复制的表复制到文件系统)。 突变本身使用系统配置文件设置异步执行。 要跟踪突变的进度,您可以使用 [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) 桌子 即使重新启动ClickHouse服务器,成功提交的突变仍将继续执行。 一旦提交,没有办法回滚突变,但如果突变由于某种原因被卡住,可以使用 [`KILL MUTATION`](misc.md#kill-mutation) 查询。 + +已完成突变的条目不会立即删除(保留条目的数量由 `finished_mutations_to_keep` 存储引擎参数)。 旧的突变条目将被删除。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/zh/query_language/create.md b/docs/zh/sql_reference/statements/create.md similarity index 68% rename from docs/zh/query_language/create.md rename to docs/zh/sql_reference/statements/create.md index 94c4ea3669c..1697df692b5 100644 --- a/docs/zh/query_language/create.md +++ b/docs/zh/sql_reference/statements/create.md @@ -1,3 +1,4 @@ + ## CREATE DATABASE {#create-database} 该查询用于根据指定名称创建数据库。 @@ -22,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = engine ``` -在指定的‘db’数据库中创建一个名为‘name’的表,如果查询中没有包含‘db’,则默认使用当前选择的数据库作为‘db’。后面的是包含在括号中的表结构以及表引擎的声明。 +在指定的'db'数据库中创建一个名为'name'的表,如果查询中没有包含'db',则默认使用当前选择的数据库作为'db'。后面的是包含在括号中的表结构以及表引擎的声明。 其中表结构声明是一个包含一组列描述声明的组合。如果表引擎是支持索引的,那么可以在表引擎的参数中对其进行说明。 在最简单的情况下,列描述是指`名称 类型`这样的子句。例如: `RegionID UInt32`。 @@ -42,16 +43,16 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... 以上所有情况,如果指定了`IF NOT EXISTS`,那么在该表已经存在的情况下,查询不会返回任何错误。在这种情况下,查询几乎不会做任何事情。 -在`ENGINE`子句后还可能存在一些其他的子句,更详细的信息可以参考 [表引擎](../operations/table_engines/index.md) 中关于建表的描述。 +在`ENGINE`子句后还可能存在一些其他的子句,更详细的信息可以参考 [表引擎](../../sql_reference/statements/create.md) 中关于建表的描述。 ### 默认值 {#create-default-values} 在列描述中你可以通过以下方式之一为列指定默认表达式:`DEFAULT expr`,`MATERIALIZED expr`,`ALIAS expr`。 示例:`URLDomain String DEFAULT domain(URL)`。 -如果在列描述中未定义任何默认表达式,那么系统将会根据类型设置对应的默认值,如:数值类型为零、字符串类型为空字符串、数组类型为空数组、日期类型为‘0000-00-00’以及时间类型为‘0000-00-00 00:00:00’。不支持使用NULL作为普通类型的默认值。 +如果在列描述中未定义任何默认表达式,那么系统将会根据类型设置对应的默认值,如:数值类型为零、字符串类型为空字符串、数组类型为空数组、日期类型为'0000-00-00'以及时间类型为'0000-00-00 00:00:00'。不支持使用NULL作为普通类型的默认值。 -如果定义了默认表达式,则可以不定义列的类型。如果没有明确的定义类的类型,则使用默认表达式的类型。例如:`EventDate DEFAULT toDate(EventTime)` - 最终‘EventDate’将使用‘Date’作为类型。 +如果定义了默认表达式,则可以不定义列的类型。如果没有明确的定义类的类型,则使用默认表达式的类型。例如:`EventDate DEFAULT toDate(EventTime)` - 最终'EventDate'将使用'Date'作为类型。 如果同时指定了默认表达式与列的类型,则将使用类型转换函数将默认表达式转换为指定的类型。例如:`Hits UInt32 DEFAULT 0`与`Hits UInt32 DEFAULT toUInt32(0)`意思相同。 @@ -65,7 +66,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... 物化表达式,被该表达式指定的列不能包含在INSERT的列表中,因为它总是被计算出来的。 对于INSERT而言,不需要考虑这些列。 -另外,在SELECT查询中如果包含星号,此列不会被用来替换星号,这是因为考虑到数据转储,在使用`SELECT *`查询出的结果总能够被’INSERT’回表。 +另外,在SELECT查询中如果包含星号,此列不会被用来替换星号,这是因为考虑到数据转储,在使用`SELECT *`查询出的结果总能够被'INSERT'回表。 `ALIAS expr` @@ -79,9 +80,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... 不能够为nested类型的列设置默认值。 -### Constraints {#constraints} +### 制约因素 {#constraints} -Along with columns descriptions constraints could be defined: +随着列描述约束可以定义: ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] @@ -93,17 +94,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = engine ``` -`boolean_expr_1` could by any boolean expression. If constraints are defined for the table, each of them will be checked for every row in `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. +`boolean_expr_1` 可以通过任何布尔表达式。 如果为表定义了约束,则将为表中的每一行检查它们中的每一行 `INSERT` query. If any constraint is not satisfied — server will raise an exception with constraint name and checking expression. -Adding large amount of constraints can negatively affect performance of big `INSERT` queries. +添加大量的约束会对big的性能产生负面影响 `INSERT` 查询。 -### TTL Expression {#ttl-expression} +### Ttl表达式 {#ttl-expression} -Defines storage time for values. Can be specified only for MergeTree-family tables. For the detailed description, see [TTL for columns and tables](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). +定义值的存储时间。 只能为MergeTree系列表指定。 有关详细说明,请参阅 [列和表的TTL](../../sql_reference/statements/create.md#table_engine-mergetree-ttl). -### Column Compression Codecs {#codecs} +### 列压缩编解ecs {#codecs} -By default, ClickHouse applies the compression method, defined in [server settings](../operations/server_settings/settings.md#server-settings-compression), to columns. You can also define the compression method for each individual column in the `CREATE TABLE` query. +默认情况下,ClickHouse应用以下定义的压缩方法 [服务器设置](../../sql_reference/statements/create.md#server-settings-compression),列。 您还可以定义在每个单独的列的压缩方法 `CREATE TABLE` 查询。 ``` sql CREATE TABLE codec_example @@ -118,32 +119,32 @@ ENGINE = ... ``` -If a codec is specified, the default codec doesn’t apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. +如果指定了编解ec,则默认编解码器不适用。 编解码器可以组合在一个流水线中,例如, `CODEC(Delta, ZSTD)`. 要为您的项目选择最佳的编解码器组合,请通过类似于Altinity中描述的基准测试 [新编码提高ClickHouse效率](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) 文章. -!!! warning "Warning" - You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. +!!! warning "警告" + 您无法使用外部实用程序解压缩ClickHouse数据库文件,如 `lz4`. 相反,使用特殊的 [ツ环板compressorョツ嘉ッツ偲](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) 实用程序。 -Compression is supported for the following table engines: +下表引擎支持压缩: -- [MergeTree](../operations/table_engines/mergetree.md) family -- [Log](../operations/table_engines/log_family.md) family -- [Set](../operations/table_engines/set.md) -- [Join](../operations/table_engines/join.md) +- [MergeTree](../../sql_reference/statements/create.md) 家庭 +- [日志](../../sql_reference/statements/create.md) 家庭 +- [设置](../../sql_reference/statements/create.md) +- [加入我们](../../sql_reference/statements/create.md) -ClickHouse supports common purpose codecs and specialized codecs. +ClickHouse支持通用编解码器和专用编解ecs。 -#### Specialized Codecs {#create-query-specialized-codecs} +#### 专业编解ecs {#create-query-specialized-codecs} -These codecs are designed to make compression more effective by using specific features of data. Some of these codecs don’t compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. +这些编解码器旨在通过使用数据的特定功能使压缩更有效。 其中一些编解码器不压缩数据本身。 相反,他们准备的数据用于共同目的的编解ec,其压缩它比没有这种准备更好。 -Specialized codecs: +专业编解ecs: -- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` are used for storing delta values, so `delta_bytes` is the maximum size of raw values. Possible `delta_bytes` values: 1, 2, 4, 8. The default value for `delta_bytes` is `sizeof(type)` if equal to 1, 2, 4, or 8. In all other cases, it’s 1. -- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that don’t differ between maximum and minimum values in the whole data part for which the compression is used. +- `Delta(delta_bytes)` — Compression approach in which raw values are replaced by the difference of two neighboring values, except for the first value that stays unchanged. Up to `delta_bytes` 用于存储增量值,所以 `delta_bytes` 是原始值的最大大小。 可能 `delta_bytes` 值:1,2,4,8. 默认值 `delta_bytes` 是 `sizeof(type)` 如果等于1,2,4或8。 在所有其他情况下,它是1。 +- `DoubleDelta` — Calculates delta of deltas and writes it in compact binary form. Optimal compression rates are achieved for monotonic sequences with a constant stride, such as time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. Uses 1 extra bit for 32-byte deltas: 5-bit prefixes instead of 4-bit prefixes. For additional information, see Compressing Time Stamps in [Gorilla:一个快速、可扩展的内存时间序列数据库](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `Gorilla` — Calculates XOR between current and previous value and writes it in compact binary form. Efficient when storing a series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64-bit types. For additional information, see Compressing Values in [Gorilla:一个快速、可扩展的内存时间序列数据库](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` 和 `DateTime`). 在算法的每个步骤中,编解码器采用64个值块,将它们放入64x64位矩阵中,对其进行转置,裁剪未使用的值位并将其余部分作为序列返回。 未使用的位是使用压缩的整个数据部分的最大值和最小值之间没有区别的位。 -`DoubleDelta` and `Gorilla` codecs are used in Gorilla TSDB as the components of its compressing algorithm. Gorilla approach is effective in scenarios when there is a sequence of slowly changing values with their timestamps. Timestamps are effectively compressed by the `DoubleDelta` codec, and values are effectively compressed by the `Gorilla` codec. For example, to get an effectively stored table, you can create it in the following configuration: +`DoubleDelta` 和 `Gorilla` 编解码器在Gorilla TSDB中用作其压缩算法的组件。 大猩猩的方法是有效的情况下,当有缓慢变化的值与他们的时间戳序列。 时间戳是由有效地压缩 `DoubleDelta` 编解ec,和值有效地由压缩 `Gorilla` 编解ec 例如,要获取有效存储的表,可以在以下配置中创建它: ``` sql CREATE TABLE codec_example @@ -154,16 +155,16 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` -#### Common purpose codecs {#create-query-common-purpose-codecs} +#### 通用编解ecs {#create-query-common-purpose-codecs} -Codecs: +编解ecs: - `NONE` — No compression. -- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression. -- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` applies the default level. Possible levels: \[1, 12\]. Recommended level range: \[4, 9\]. -- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: \[1, 22\]. Default value: 1. +- `LZ4` — Lossless [数据压缩算法](https://github.com/lz4/lz4) 默认情况下使用。 应用LZ4快速压缩。 +- `LZ4HC[(level)]` — LZ4 HC (high compression) algorithm with configurable level. Default level: 9. Setting `level <= 0` 应用默认级别。 可能的水平:\[1,12\]。 推荐级别范围:\[4,9\]。 +- `ZSTD[(level)]` — [ZSTD压缩算法](https://en.wikipedia.org/wiki/Zstandard) 可配置 `level`. 可能的水平:\[1,22\]。 默认值:1。 -High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. +高压缩级别对于非对称场景非常有用,例如压缩一次,重复解压缩。 更高的级别意味着更好的压缩和更高的CPU使用率。 ## 临时表 {#lin-shi-biao} diff --git a/docs/zh/sql_reference/statements/index.md b/docs/zh/sql_reference/statements/index.md new file mode 100644 index 00000000000..bb04551dea1 --- /dev/null +++ b/docs/zh/sql_reference/statements/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u53D1\u8A00" +toc_priority: 31 +--- + + diff --git a/docs/zh/query_language/insert_into.md b/docs/zh/sql_reference/statements/insert_into.md similarity index 89% rename from docs/zh/query_language/insert_into.md rename to docs/zh/sql_reference/statements/insert_into.md index b271f62bb03..a59730f5750 100644 --- a/docs/zh/query_language/insert_into.md +++ b/docs/zh/sql_reference/statements/insert_into.md @@ -1,3 +1,4 @@ + ## INSERT {#insert} INSERT查询主要用于向系统中添加数据. @@ -13,9 +14,9 @@ INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), . - 如果存在`DEFAULT`表达式,根据`DEFAULT`表达式计算被填充的值。 - 如果没有定义`DEFAULT`表达式,则填充零或空字符串。 -如果 [strict\_insert\_defaults=1](../operations/settings/settings.md),你必须在查询中列出所有没有定义`DEFAULT`表达式的列。 +如果 [strict\_insert\_defaults=1](../../operations/settings/settings.md),你必须在查询中列出所有没有定义`DEFAULT`表达式的列。 -数据可以以ClickHouse支持的任何 [输入输出格式](../interfaces/formats.md#formats) 传递给INSERT。格式的名称必须显示的指定在查询中: +数据可以以ClickHouse支持的任何 [输入输出格式](../../interfaces/formats.md#formats) 传递给INSERT。格式的名称必须显示的指定在查询中: ``` sql INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set @@ -37,7 +38,7 @@ INSERT INTO t FORMAT TabSeparated 22 Qwerty ``` -在使用命令行客户端或HTTP客户端时,你可以将具体的查询语句与数据分开发送。更多具体信息,请参考«[客户端](../interfaces/index.md#interfaces)»部分。 +在使用命令行客户端或HTTP客户端时,你可以将具体的查询语句与数据分开发送。更多具体信息,请参考«[客户端](../../interfaces/index.md#interfaces)»部分。 ### 使用`SELECT`的结果写入 {#insert_query_insert-select} diff --git a/docs/zh/sql_reference/statements/misc.md b/docs/zh/sql_reference/statements/misc.md new file mode 100644 index 00000000000..e50f08464b7 --- /dev/null +++ b/docs/zh/sql_reference/statements/misc.md @@ -0,0 +1,252 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u5176\u4ED6" +--- + +# 杂项查询 {#miscellaneous-queries} + +## ATTACH {#attach} + +这个查询是完全一样的 `CREATE`,但是 + +- 而不是这个词 `CREATE` 它使用这个词 `ATTACH`. +- 查询不会在磁盘上创建数据,但假定数据已经在适当的位置,只是将有关表的信息添加到服务器。 + 执行附加查询后,服务器将知道表的存在。 + +如果表之前已分离 (`DETACH`),意味着其结构是已知的,可以使用速记而不限定该结构。 + +``` sql +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] +``` + +启动服务器时使用此查询。 服务器将表元数据作为文件存储 `ATTACH` 查询,它只是在启动时运行(除了在服务器上显式创建的系统表)。 + +## CHECK TABLE {#check-table} + +检查表中的数据是否已损坏。 + +``` sql +CHECK TABLE [db.]name +``` + +该 `CHECK TABLE` 查询将实际文件大小与存储在服务器上的预期值进行比较。 如果文件大小与存储的值不匹配,则表示数据已损坏。 例如,这可能是由查询执行期间的系统崩溃引起的。 + +查询响应包含 `result` 具有单行的列。 该行的值为 +[布尔值](../../sql_reference/data_types/boolean.md) 类型: + +- 0-表中的数据已损坏。 +- 1-数据保持完整性。 + +该 `CHECK TABLE` 查询支持下表引擎: + +- [日志](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [梅树家族](../../engines/table_engines/mergetree_family/mergetree.md) + +使用另一个表引擎对表执行会导致异常。 + +从发动机 `*Log` 家庭不提供故障自动数据恢复。 使用 `CHECK TABLE` 查询以及时跟踪数据丢失。 + +为 `MergeTree` 家庭发动机, `CHECK TABLE` 查询显示本地服务器上表的每个单独数据部分的检查状态。 + +**如果数据已损坏** + +如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点: + +1. 创建具有与损坏的表相同结构的新表。 要执行此操作,请执行查询 `CREATE TABLE AS `. +2. 设置 [max\_threads](../../operations/settings/settings.md#settings-max_threads) 值为1以在单个线程中处理下一个查询。 要执行此操作,请运行查询 `SET max_threads = 1`. +3. 执行查询 `INSERT INTO SELECT * FROM `. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。 +4. 重新启动 `clickhouse-client` 要重置 `max_threads` 价值。 + +## DESCRIBE TABLE {#misc-describe-table} + +``` sql +DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] +``` + +返回以下内容 `String` 类型列: + +- `name` — Column name. +- `type`— Column type. +- `default_type` — Clause that is used in [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`). 如果未指定默认表达式,则Column包含一个空字符串。 +- `default_expression` — Value specified in the `DEFAULT` 条款 +- `comment_expression` — Comment text. + +嵌套的数据结构输出 “expanded” 格式。 每列分别显示,名称后面有一个点。 + +## DETACH {#detach} + +删除有关 ‘name’ 表从服务器。 服务器停止了解表的存在。 + +``` sql +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找有关表的信息。 +同样,一个 “detached” 表可以使用重新连接 `ATTACH` 查询(系统表除外,它们没有为它们存储元数据)。 + +没有 `DETACH DATABASE` 查询。 + +## DROP {#drop} + +此查询有两种类型: `DROP DATABASE` 和 `DROP TABLE`. + +``` sql +DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] +``` + +删除内部的所有表 ‘db’ 数据库,然后删除 ‘db’ 数据库本身。 +如果 `IF EXISTS` 如果数据库不存在,则不会返回错误。 + +``` sql +DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +删除表。 +如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。 + + DROP DICTIONARY [IF EXISTS] [db.]name + +删除字典。 +如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。 + +## EXISTS {#exists} + +``` sql +EXISTS [TEMPORARY] [TABLE|DICTIONARY] [db.]name [INTO OUTFILE filename] [FORMAT format] +``` + +返回单 `UInt8`-type column,其中包含单个值 `0` 如果表或数据库不存在,或 `1` 如果该表存在于指定的数据库中。 + +## KILL QUERY {#kill-query} + +``` sql +KILL QUERY [ON CLUSTER cluster] + WHERE + [SYNC|ASYNC|TEST] + [FORMAT format] +``` + +尝试强制终止当前正在运行的查询。 +要终止的查询是从系统中选择的。使用在定义的标准进程表 `WHERE` 《公约》条款 `KILL` 查询。 + +例: + +``` sql +-- Forcibly terminates all queries with the specified query_id: +KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90' + +-- Synchronously terminates all queries run by 'username': +KILL QUERY WHERE user='username' SYNC +``` + +只读用户只能停止自己的查询。 + +默认情况下,使用异步版本的查询 (`ASYNC`),不等待确认查询已停止。 + +同步版本 (`SYNC`)等待所有查询停止,并在停止时显示有关每个进程的信息。 +响应包含 `kill_status` 列,它可以采用以下值: + +1. ‘finished’ – The query was terminated successfully. +2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate. +3. The other values ​​explain why the query can't be stopped. + +测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。 + +## KILL MUTATION {#kill-mutation} + +``` sql +KILL MUTATION [ON CLUSTER cluster] + WHERE + [TEST] + [FORMAT format] +``` + +尝试取消和删除 [突变](alter.md#alter-mutations) 当前正在执行。 要取消的突变选自 [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) 表使用由指定的过滤器 `WHERE` 《公约》条款 `KILL` 查询。 + +测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。 + +例: + +``` sql +-- Cancel and remove all mutations of the single table: +KILL MUTATION WHERE database = 'default' AND table = 'table' + +-- Cancel the specific mutation: +KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt' +``` + +The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table). + +已经由突变所做的更改不会回滚。 + +## OPTIMIZE {#misc_operations-optimize} + +``` sql +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +``` + +此查询尝试使用来自表引擎的表初始化表的数据部分的非计划合并 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) 家人 + +该 `OPTMIZE` 查询也支持 [MaterializedView](../../engines/table_engines/special/materializedview.md) 和 [缓冲区](../../engines/table_engines/special/buffer.md) 引擎 不支持其他表引擎。 + +当 `OPTIMIZE` 与使用 [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md) 表引擎的家族,ClickHouse创建合并任务,并等待在所有节点上执行(如果 `replication_alter_partitions_sync` 设置已启用)。 + +- 如果 `OPTIMIZE` 出于任何原因不执行合并,它不通知客户端。 要启用通知,请使用 [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) 设置。 +- 如果您指定 `PARTITION`,仅优化指定的分区。 [如何设置分区表达式](alter.md#alter-how-to-specify-part-expr). +- 如果您指定 `FINAL`,即使所有数据已经在一个部分中,也会执行优化。 +- 如果您指定 `DEDUPLICATE`,然后完全相同的行将被重复数据删除(所有列进行比较),这仅适用于MergeTree引擎。 + +!!! warning "警告" + `OPTIMIZE` 无法修复 “Too many parts” 错误 + +## RENAME {#misc_operations-rename} + +重命名一个或多个表。 + +``` sql +RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... [ON CLUSTER cluster] +``` + +所有表都在全局锁定下重命名。 重命名表是一个轻型操作。 如果您在TO之后指定了另一个数据库,则表将被移动到此数据库。 但是,包含数据库的目录必须位于同一文件系统中(否则,将返回错误)。 + +## SET {#query-set} + +``` sql +SET param = value +``` + +分配 `value` 到 `param` [设置](../../operations/settings/index.md) 对于当前会话。 你不能改变 [服务器设置](../../operations/server_configuration_parameters/index.md) 这边 + +您还可以在单个查询中设置指定设置配置文件中的所有值。 + +``` sql +SET profile = 'profile-name-from-the-settings-file' +``` + +有关详细信息,请参阅 [设置](../../operations/settings/settings.md). + +## TRUNCATE {#truncate} + +``` sql +TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +``` + +从表中删除所有数据。 当条款 `IF EXISTS` 如果该表不存在,则查询返回错误。 + +该 `TRUNCATE` 查询不支持 [查看](../../engines/table_engines/special/view.md), [文件](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) 和 [Null](../../engines/table_engines/special/null.md) 表引擎. + +## USE {#use} + +``` sql +USE db +``` + +用于设置会话的当前数据库。 +当前数据库用于搜索表,如果数据库没有在查询中明确定义与表名之前的点。 +使用HTTP协议时无法进行此查询,因为没有会话的概念。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/misc/) diff --git a/docs/zh/query_language/select.md b/docs/zh/sql_reference/statements/select.md similarity index 91% rename from docs/zh/query_language/select.md rename to docs/zh/sql_reference/statements/select.md index 8400e963f3c..9f4e71c3343 100644 --- a/docs/zh/query_language/select.md +++ b/docs/zh/sql_reference/statements/select.md @@ -1,3 +1,4 @@ + # SELECT 查询语法 {#select-cha-xun-yu-fa} `SELECT` 语句用于执行数据的检索。 @@ -35,7 +36,7 @@ FROM子句规定了将从哪个表、或子查询、或表函数中读取数据 可以使用包含在括号里的子查询来替代表。 在这种情况下,子查询的处理将会构建在外部的查询内部。 -不同于SQL标准,子查询后无需指定别名。为了兼容,你可以在子查询后添加‘AS 别名’,但是指定的名字不能被使用在任何地方。 +不同于SQL标准,子查询后无需指定别名。为了兼容,你可以在子查询后添加'AS 别名',但是指定的名字不能被使用在任何地方。 也可以使用表函数来代替表,有关信息,参见«表函数»。 @@ -50,10 +51,10 @@ FROM子句规定了将从哪个表、或子查询、或表函数中读取数据 `SAMPLE`子句可以使用`SAMPLE k`来表示,其中k可以是0到1的小数值,或者是一个足够大的正整数值。 -当k为0到1的小数时,查询将使用’k’作为百分比选取数据。例如,`SAMPLE 0.1`查询只会检索数据总量的10%。 -当k为一个足够大的正整数时,查询将使用’k’作为最大样本数。例如, `SAMPLE 10000000`查询只会检索最多10,000,000行数据。 +当k为0到1的小数时,查询将使用'k'作为百分比选取数据。例如,`SAMPLE 0.1`查询只会检索数据总量的10%。 +当k为一个足够大的正整数时,查询将使用'k'作为最大样本数。例如, `SAMPLE 10000000`查询只会检索最多10,000,000行数据。 -Example: +示例: ``` sql SELECT @@ -346,15 +347,15 @@ FROM 在使用`ALL`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统则将右表中所有可以与左表关联的数据全部返回在结果中。这与SQL标准的JOIN行为相同。 在使用`ANY`修饰符对JOIN进行修饰时,如果右表中存在多个与左表关联的数据,那么系统仅返回第一个与左表匹配的结果。如果左表与右表一一对应,不存在多余的行时,`ANY`与`ALL`的结果相同。 -你可以在会话中通过设置 [join\_default\_strictness](../operations/settings/settings.md) 来指定默认的JOIN修饰符。 +你可以在会话中通过设置 [join\_default\_strictness](../../operations/settings/settings.md) 来指定默认的JOIN修饰符。 -**`GLOBAL` distribution** +**`GLOBAL` 分布** 当使用普通的`JOIN`时,查询将被发送给远程的服务器。并在这些远程服务器上生成右表并与它们关联。换句话说,右表来自于各个服务器本身。 当使用`GLOBAL ... JOIN`,首先会在请求服务器上计算右表并以临时表的方式将其发送到所有服务器。这时每台服务器将直接使用它进行计算。 -使用`GLOBAL`时需要小心。更多信息,参阅 [Distributed subqueries](#select-distributed-subqueries) 部分。 +使用`GLOBAL`时需要小心。更多信息,参阅 [分布式子查询](#select-distributed-subqueries) 部分。 **使用建议** @@ -402,7 +403,7 @@ LIMIT 10 └───────────┴────────┴────────┘ 子查询不允许您设置别名或在其他地方引用它们。 -`USING`中指定的列必须在两个子查询中具有相同的名称,而其他列必须具有不同的名称。您可以通过使用别名的方式来更改子查询中的列名(示例中就分别使用了’hits’与’visits’别名)。 +`USING`中指定的列必须在两个子查询中具有相同的名称,而其他列必须具有不同的名称。您可以通过使用别名的方式来更改子查询中的列名(示例中就分别使用了'hits'与'visits'别名)。 `USING`子句用于指定要进行链接的一个或多个列,系统会将这些列在两张表中相等的值连接起来。如果列是一个列表,不需要使用括号包裹。同时JOIN不支持其他更复杂的Join方式。 @@ -410,17 +411,17 @@ LIMIT 10 只能在查询中指定一个`JOIN`。若要运行多个`JOIN`,你可以将它们放入子查询中。 -每次运行相同的`JOIN`查询,总是会再次计算 - 没有缓存结果。 为了避免这种情况,可以使用‘Join’引擎,它是一个预处理的Join数据结构,总是保存在内存中。更多信息,参见«Join引擎»部分。 +每次运行相同的`JOIN`查询,总是会再次计算 - 没有缓存结果。 为了避免这种情况,可以使用'Join'引擎,它是一个预处理的Join数据结构,总是保存在内存中。更多信息,参见«Join引擎»部分。 在一些场景下,使用`IN`代替`JOIN`将会得到更高的效率。在各种类型的JOIN中,最高效的是`ANY LEFT JOIN`,然后是`ANY INNER JOIN`,效率最差的是`ALL LEFT JOIN`以及`ALL INNER JOIN`。 -如果你需要使用`JOIN`来关联一些纬度表(包含纬度属性的一些相对比较小的表,例如广告活动的名称),那么`JOIN`可能不是好的选择,因为语法负责,并且每次查询都将重新访问这些表。对于这种情况,您应该使用«外部字典»的功能来替换`JOIN`。更多信息,参见 [外部字典](dicts/external_dicts.md) 部分。 +如果你需要使用`JOIN`来关联一些纬度表(包含纬度属性的一些相对比较小的表,例如广告活动的名称),那么`JOIN`可能不是好的选择,因为语法负责,并且每次查询都将重新访问这些表。对于这种情况,您应该使用«外部字典»的功能来替换`JOIN`。更多信息,参见 [外部字典](../../sql_reference/statements/select.md) 部分。 #### Null的处理 {#nullde-chu-li} -JOIN的行为受 [join\_use\_nulls](../operations/settings/settings.md) 的影响。当`join_use_nulls=1`时,`JOIN`的工作与SQL标准相同。 +JOIN的行为受 [join\_use\_nulls](../../operations/settings/settings.md) 的影响。当`join_use_nulls=1`时,`JOIN`的工作与SQL标准相同。 -如果JOIN的key是 [Nullable](../data_types/nullable.md) 类型的字段,则其中至少一个存在 [NULL](syntax.md) 值的key不会被关联。 +如果JOIN的key是 [可为空](../../sql_reference/statements/select.md) 类型的字段,则其中至少一个存在 [NULL](../syntax.md) 值的key不会被关联。 ### WHERE 子句 {#select-where} @@ -444,7 +445,7 @@ PREWHERE 仅支持`*MergeTree`系列引擎。 值得注意的是,PREWHERE不适合用于已经存在于索引中的列,因为当列已经存在于索引中的情况下,只有满足索引的数据块才会被读取。 -如果将’optimize\_move\_to\_prewhere’设置为1,并且在查询中不包含PREWHERE,则系统将自动的把适合PREWHERE表达式的部分从WHERE中抽离到PREWHERE中。 +如果将'optimize\_move\_to\_prewhere'设置为1,并且在查询中不包含PREWHERE,则系统将自动的把适合PREWHERE表达式的部分从WHERE中抽离到PREWHERE中。 ### GROUP BY 子句 {#select-group-by-clause} @@ -455,7 +456,7 @@ SELECT,HAVING,ORDER BY子句中的表达式列表必须来自于这些«key 如果查询表达式列表中仅包含聚合函数,则可以省略GROUP BY子句,这时会假定将所有数据聚合成一组空«key»。 -Example: +示例: ``` sql SELECT @@ -467,9 +468,9 @@ FROM hits 与SQL标准不同的是,如果表中不存在任何数据(可能表本身中就不存在任何数据,或者由于被WHERE条件过滤掉了),将返回一个空结果,而不是一个包含聚合函数初始值的结果。 -与MySQL不同的是(实际上这是符合SQL标准的),你不能够获得一个不在key中的非聚合函数列(除了常量表达式)。但是你可以使用‘any’(返回遇到的第一个值)、max、min等聚合函数使它工作。 +与MySQL不同的是(实际上这是符合SQL标准的),你不能够获得一个不在key中的非聚合函数列(除了常量表达式)。但是你可以使用'any'(返回遇到的第一个值)、max、min等聚合函数使它工作。 -Example: +示例: ``` sql SELECT @@ -488,7 +489,7 @@ GROUP BY子句会为遇到的每一个不同的key计算一组聚合函数的值 #### NULL 处理 {#null-chu-li} -对于GROUP BY子句,ClickHouse将 [NULL](syntax.md) 解释为一个值,并且支持`NULL=NULL`。 +对于GROUP BY子句,ClickHouse将 [NULL](../syntax.md) 解释为一个值,并且支持`NULL=NULL`。 下面这个例子将说明这将意味着什么。 @@ -520,9 +521,9 @@ GROUP BY子句会为遇到的每一个不同的key计算一组聚合函数的值 该行仅在JSON\*, TabSeparated\*, Pretty\*输出格式中与其他行分开输出。 -在JSON\*输出格式中,这行将出现在Json的‘totals’字段中。在TabSeparated\*输出格式中,这行将位于其他结果之后,同时与其他结果使用空白行分隔。在Pretty\*输出格式中,这行将作为单独的表在所有结果之后输出。 +在JSON\*输出格式中,这行将出现在Json的'totals'字段中。在TabSeparated\*输出格式中,这行将位于其他结果之后,同时与其他结果使用空白行分隔。在Pretty\*输出格式中,这行将作为单独的表在所有结果之后输出。 -当`WITH TOTALS`与HAVING子句同时存在时,它的行为受‘totals\_mode’配置的影响。 +当`WITH TOTALS`与HAVING子句同时存在时,它的行为受'totals\_mode'配置的影响。 默认情况下,`totals_mode = 'before_having'`,这时`WITH TOTALS`将会在HAVING前计算最多不超过`max_rows_to_group_by`行的数据。 在`group_by_overflow_mode = 'any'`并指定了`max_rows_to_group_by`的情况下,`WITH TOTALS`的行为受`totals_mode`的影响。 @@ -531,7 +532,7 @@ GROUP BY子句会为遇到的每一个不同的key计算一组聚合函数的值 `after_having_inclusive` - 在HAVING后进行计算,计算不少于`max_rows_to_group_by`行的数据。 -`after_having_auto` - 在HAVING后进行计算,采用统计通过HAVING的行数,在超过不超过‘max\_rows\_to\_group\_by’指定值(默认为50%)的情况下,包含所有行的结果。否则排除这些结果。 +`after_having_auto` - 在HAVING后进行计算,采用统计通过HAVING的行数,在超过不超过'max\_rows\_to\_group\_by'指定值(默认为50%)的情况下,包含所有行的结果。否则排除这些结果。 `totals_auto_threshold` - 默认 0.5,是`after_having_auto`的参数。 @@ -637,9 +638,9 @@ WHERE于HAVING不同之处在于WHERE在聚合前(GROUP BY)执行,HAVING在聚 如果你在ORDER BY子句后面存在LIMIT并给定了较小的数值,则将会使用较少的内存。否则,内存的使用量将与需要排序的数据成正比。对于分布式查询,如果省略了GROUP BY,则在远程服务器上执行部分排序,最后在请求服务器上合并排序结果。这意味这对于分布式查询而言,要排序的数据量可以大于单台服务器的内存。 -如果没有足够的内存,可以使用外部排序(在磁盘中创建一些临时文件)。可以使用`max_bytes_before_external_sort`来设置外部排序,如果你讲它设置为0(默认),则表示禁用外部排序功能。如果启用该功能。当要排序的数据量达到所指定的字节数时,当前排序的结果会被转存到一个临时文件中去。当全部数据读取完毕后,所有的临时文件将会合并成最终输出结果。这些临时文件将会写到config文件配置的/var/lib/clickhouse/tmp/目录中(默认值,你可以通过修改’tmp\_path’配置调整该目录的位置)。 +如果没有足够的内存,可以使用外部排序(在磁盘中创建一些临时文件)。可以使用`max_bytes_before_external_sort`来设置外部排序,如果你讲它设置为0(默认),则表示禁用外部排序功能。如果启用该功能。当要排序的数据量达到所指定的字节数时,当前排序的结果会被转存到一个临时文件中去。当全部数据读取完毕后,所有的临时文件将会合并成最终输出结果。这些临时文件将会写到config文件配置的/var/lib/clickhouse/tmp/目录中(默认值,你可以通过修改'tmp\_path'配置调整该目录的位置)。 -查询运行使用的内存要高于‘max\_bytes\_before\_external\_sort’,为此,这个配置必须要远远小于‘max\_memory\_usage’配置的值。例如,如果你的服务器有128GB的内存去运行一个查询,那么推荐你将‘max\_memory\_usage’设置为100GB,‘max\_bytes\_before\_external\_sort’设置为80GB。 +查询运行使用的内存要高于'max\_bytes\_before\_external\_sort',为此,这个配置必须要远远小于'max\_memory\_usage'配置的值。例如,如果你的服务器有128GB的内存去运行一个查询,那么推荐你将'max\_memory\_usage'设置为100GB,'max\_bytes\_before\_external\_sort'设置为80GB。 外部排序效率要远低于在内存中排序。 @@ -661,14 +662,14 @@ WHERE于HAVING不同之处在于WHERE在聚合前(GROUP BY)执行,HAVING在聚 在SELECT表达式中存在Array类型的列时,不能使用DISTINCT。 -`DISTINCT`可以与 [NULL](syntax.md)一起工作,就好像`NULL`仅是一个特殊的值一样,并且`NULL=NULL`。换而言之,在`DISTINCT`的结果中,与`NULL`不同的组合仅能出现一次。 +`DISTINCT`可以与 [NULL](../syntax.md)一起工作,就好像`NULL`仅是一个特殊的值一样,并且`NULL=NULL`。换而言之,在`DISTINCT`的结果中,与`NULL`不同的组合仅能出现一次。 ### LIMIT 子句 {#limit-zi-ju} LIMIT m 用于在查询结果中选择前m行数据。 LIMIT n, m 用于在查询结果中选择从n行开始的m行数据。 -‘n’与‘m’必须是正整数。 +'n'与'm'必须是正整数。 如果没有指定ORDER BY子句,则结果可能是任意的顺序,并且是不确定的。 @@ -730,11 +731,11 @@ SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... 如果左侧是单个列并且是一个索引,并且右侧是一组常量时,系统将使用索引来处理查询。 不要在列表中列出太多的值(百万)。如果数据集很大,将它们放入临时表中(可以参考«»), 然后使用子查询。 -Don’t list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section «External data for query processing»), then use a subquery. +Don't list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section «External data for query processing»), then use a subquery. 右侧可以是一个由常量表达式组成的元组列表(像上面的例子一样),或者是一个数据库中的表的名称,或是一个包含在括号中的子查询。 -如果右侧是一个表的名字(例如,`UserID IN users`),这相当于`UserID IN (SELECT * FROM users)`。在查询与外部数据表组合使用时可以使用该方法。例如,查询与包含user IDS的‘users’临时表一起被发送的同时需要对结果进行过滤时。 +如果右侧是一个表的名字(例如,`UserID IN users`),这相当于`UserID IN (SELECT * FROM users)`。在查询与外部数据表组合使用时可以使用该方法。例如,查询与包含user IDS的'users'临时表一起被发送的同时需要对结果进行过滤时。 如果操作符的右侧是一个Set引擎的表时(数据总是在内存中准备好),则不会每次都为查询创建新的数据集。 @@ -779,7 +780,7 @@ IN子句中的子查询仅在单个服务器上运行一次。不能够是相关 #### NULL 处理 {#null-chu-li-1} -在处理中,IN操作符总是假定 [NULL](syntax.md) 值的操作结果总是等于`0`,而不管`NULL`位于左侧还是右侧。`NULL`值不应该包含在任何数据集中,它们彼此不能够对应,并且不能够比较。 +在处理中,IN操作符总是假定 [NULL](../syntax.md) 值的操作结果总是等于`0`,而不管`NULL`位于左侧还是右侧。`NULL`值不应该包含在任何数据集中,它们彼此不能够对应,并且不能够比较。 下面的示例中有一个`t_null`表: @@ -809,7 +810,7 @@ IN子句中的子查询仅在单个服务器上运行一次。不能够是相关 对于带有子查询的(类似与JOINs)IN中,有两种选择:普通的`IN`/`JOIN`与`GLOBAL IN` / `GLOBAL JOIN`。它们对于分布式查询的处理运行方式是不同的。 !!! 注意 "注意" - 请记住,下面描述的算法可能因为根据 [settings](../operations/settings/settings.md) 配置的不同而不同。 + 请记住,下面描述的算法可能因为根据 [设置](../../operations/settings/settings.md) 配置的不同而不同。 当使用普通的IN时,查询总是被发送到远程的服务器,并且在每个服务器中运行«IN»或«JOIN»子句中的子查询。 @@ -905,13 +906,13 @@ SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL 另外,在`GLOBAL IN`子句中使用本地表也是有用的,比如,本地表仅在请求服务器上可用,并且您希望在远程服务器上使用来自本地表的数据。 -### Extreme Values {#extreme-values} +### 极端值 {#extreme-values} -除了结果外,你还可以获得结果列的最大值与最小值,可以将**extremes**配置设置成1来做到这一点。最大值最小值的计算是针对于数字类型,日期类型进行计算的,对于其他列,将会输出默认值。 +除了结果外,你还可以获得结果列的最大值与最小值,可以将**极端**配置设置成1来做到这一点。最大值最小值的计算是针对于数字类型,日期类型进行计算的,对于其他列,将会输出默认值。 额外计算的两行结果 - 最大值与最小值,这两行额外的结果仅在JSON\*, TabSeparated\*, and Pretty\* 格式与其他行分开的输出方式输出,不支持其他输出格式。 -在JSON\*格式中,Extreme值在单独的’extremes’字段中。在TabSeparated\*格式中,在其他结果与’totals’之后输出,并使用空行与其分隔。在Pretty\* 格式中,将在其他结果与’totals’后以单独的表格输出。 +在JSON\*格式中,Extreme值在单独的'extremes'字段中。在TabSeparated\*格式中,在其他结果与'totals'之后输出,并使用空行与其分隔。在Pretty\* 格式中,将在其他结果与'totals'后以单独的表格输出。 如果在计算Extreme值的同时包含LIMIT。extremes的计算结果将包含offset跳过的行。在流式的请求中,它可能还包含多余LIMIT的少量行的值。 diff --git a/docs/zh/query_language/show.md b/docs/zh/sql_reference/statements/show.md similarity index 52% rename from docs/zh/query_language/show.md rename to docs/zh/sql_reference/statements/show.md index 840a2fc9766..f60452f97a3 100644 --- a/docs/zh/query_language/show.md +++ b/docs/zh/sql_reference/statements/show.md @@ -1,8 +1,11 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: SHOW --- -# SHOW Queries {#show-queries} +# 显示查询 {#show-queries} ## SHOW CREATE TABLE {#show-create-table} @@ -10,7 +13,7 @@ en_copy: true SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [FORMAT format] ``` -Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object. +返回单 `String`-类型 ‘statement’ column, which contains a single value – the `CREATE` 用于创建指定对象的查询。 ## SHOW DATABASES {#show-databases} @@ -18,8 +21,8 @@ Returns a single `String`-type ‘statement’ column, which contains a single v SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] ``` -Prints a list of all databases. -This query is identical to `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. +打印所有数据库的列表。 +这个查询是相同的 `SELECT name FROM system.databases [INTO OUTFILE filename] [FORMAT format]`. ## SHOW PROCESSLIST {#show-processlist} @@ -27,11 +30,11 @@ This query is identical to `SELECT name FROM system.databases [INTO OUTFILE file SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] ``` -Outputs the content of the [system.processes](../operations/system_tables.md#system_tables-processes) table, that contains a list of queries that is being processed at the moment, excepting `SHOW PROCESSLIST` queries. +输出的内容 [系统。流程](../../operations/system_tables.md#system_tables-processes) 表,包含目前正在处理的查询列表,除了 `SHOW PROCESSLIST` 查询。 -The `SELECT * FROM system.processes` query returns data about all the current queries. +该 `SELECT * FROM system.processes` 查询返回有关所有当前查询的数据。 -Tip (execute in the console): +提示(在控制台中执行): ``` bash $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" @@ -39,23 +42,23 @@ $ watch -n1 "clickhouse-client --query='SHOW PROCESSLIST'" ## SHOW TABLES {#show-tables} -Displays a list of tables. +显示表的列表。 ``` sql SHOW [TEMPORARY] TABLES [{FROM | IN} ] [LIKE '' | WHERE expr] [LIMIT ] [INTO OUTFILE ] [FORMAT ] ``` -If the `FROM` clause is not specified, the query returns the list of tables from the current database. +如果 `FROM` 如果未指定子句,则查询返回当前数据库中的表列表。 -You can get the same results as the `SHOW TABLES` query in the following way: +你可以得到相同的结果 `SHOW TABLES` 通过以下方式进行查询: ``` sql SELECT name FROM system.tables WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] ``` -**Example** +**示例** -The following query selects the first two rows from the list of tables in the `system` database, whose names contain `co`. +下面的查询从表的列表中选择前两行 `system` 数据库,其名称包含 `co`. ``` sql SHOW TABLES FROM system LIKE '%co%' LIMIT 2 @@ -70,23 +73,23 @@ SHOW TABLES FROM system LIKE '%co%' LIMIT 2 ## SHOW DICTIONARIES {#show-dictionaries} -Displays a list of [external dictionaries](dicts/external_dicts.md). +显示列表 [外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). ``` sql SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] ``` -If the `FROM` clause is not specified, the query returns the list of dictionaries from the current database. +如果 `FROM` 如果未指定子句,则查询从当前数据库返回字典列表。 -You can get the same results as the `SHOW DICTIONARIES` query in the following way: +你可以得到相同的结果 `SHOW DICTIONARIES` 通过以下方式进行查询: ``` sql SELECT name FROM system.dictionaries WHERE database = [AND name LIKE ] [LIMIT ] [INTO OUTFILE ] [FORMAT ] ``` -**Example** +**示例** -The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`. +下面的查询从表的列表中选择前两行 `system` 数据库,其名称包含 `reg`. ``` sql SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 @@ -99,4 +102,4 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2 └──────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/show/) +[原始文章](https://clickhouse.tech/docs/en/query_language/show/) diff --git a/docs/zh/sql_reference/statements/system.md b/docs/zh/sql_reference/statements/system.md new file mode 100644 index 00000000000..06d4f6dc1cb --- /dev/null +++ b/docs/zh/sql_reference/statements/system.md @@ -0,0 +1,113 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: SYSTEM +--- + +# 系统查询 {#query-language-system} + +- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries) +- [RELOAD DICTIONARY](#query_language-system-reload-dictionary) +- [DROP DNS CACHE](#query_language-system-drop-dns-cache) +- [DROP MARK CACHE](#query_language-system-drop-mark-cache) +- [FLUSH LOGS](#query_language-system-flush_logs) +- [RELOAD CONFIG](#query_language-system-reload-config) +- [SHUTDOWN](#query_language-system-shutdown) +- [KILL](#query_language-system-kill) +- [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends) +- [FLUSH DISTRIBUTED](#query_language-system-flush-distributed) +- [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) +- [STOP MERGES](#query_language-system-stop-merges) +- [START MERGES](#query_language-system-start-merges) + +## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} + +重新加载之前已成功加载的所有字典。 +默认情况下,字典是懒惰加载的(请参阅 [dictionaries\_lazy\_load](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)),所以不是在启动时自动加载,而是通过dictGet函数在第一次访问时初始化,或者从ENGINE=Dictionary的表中选择。 该 `SYSTEM RELOAD DICTIONARIES` 查询重新加载这样的字典(加载)。 +总是返回 `Ok.` 无论字典更新的结果如何。 + +## 重新加载字典Dictionary\_name {#query_language-system-reload-dictionary} + +完全重新加载字典 `dictionary_name`,与字典的状态无关(LOADED/NOT\_LOADED/FAILED)。 +总是返回 `Ok.` 无论更新字典的结果如何。 +字典的状态可以通过查询 `system.dictionaries` 桌子 + +``` sql +SELECT name, status FROM system.dictionaries; +``` + +## DROP DNS CACHE {#query_language-system-drop-dns-cache} + +重置ClickHouse的内部DNS缓存。 有时(对于旧的ClickHouse版本)在更改基础架构(更改另一个ClickHouse服务器或字典使用的服务器的IP地址)时需要使用此命令。 + +有关更方便(自动)缓存管理,请参阅disable\_internal\_dns\_cache、dns\_cache\_update\_period参数。 + +## DROP MARK CACHE {#query_language-system-drop-mark-cache} + +重置标记缓存。 用于开发ClickHouse和性能测试。 + +## FLUSH LOGS {#query_language-system-flush_logs} + +Flushes buffers of log messages to system tables (e.g. system.query\_log). Allows you to not wait 7.5 seconds when debugging. + +## RELOAD CONFIG {#query_language-system-reload-config} + +重新加载ClickHouse配置。 当配置存储在ZooKeeeper中时使用。 + +## SHUTDOWN {#query_language-system-shutdown} + +通常关闭ClickHouse(如 `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`) + +## KILL {#query_language-system-kill} + +中止ClickHouse进程(如 `kill -9 {$ pid_clickhouse-server}`) + +## 管理分布式表 {#query-language-system-distributed} + +ClickHouse可以管理 [分布](../../engines/table_engines/special/distributed.md) 桌子 当用户将数据插入到这些表中时,ClickHouse首先创建应发送到群集节点的数据队列,然后异步发送它。 您可以使用 [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [FLUSH DISTRIBUTED](#query_language-system-flush-distributed),和 [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) 查询。 您也可以同步插入分布式数据与 `insert_distributed_sync` 设置。 + +### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} + +将数据插入分布式表时禁用后台数据分发。 + +``` sql +SYSTEM STOP DISTRIBUTED SENDS [db.] +``` + +### FLUSH DISTRIBUTED {#query_language-system-flush-distributed} + +强制ClickHouse将数据同步发送到群集节点。 如果任何节点不可用,ClickHouse将引发异常并停止查询执行。 您可以重试查询,直到查询成功,这将在所有节点恢复联机时发生。 + +``` sql +SYSTEM FLUSH DISTRIBUTED [db.] +``` + +### START DISTRIBUTED SENDS {#query_language-system-start-distributed-sends} + +将数据插入分布式表时启用后台数据分发。 + +``` sql +SYSTEM START DISTRIBUTED SENDS [db.] +``` + +### STOP MERGES {#query_language-system-stop-merges} + +提供停止MergeTree系列中表的后台合并的可能性: + +``` sql +SYSTEM STOP MERGES [[db.]merge_tree_family_table_name] +``` + +!!! note "注" + `DETACH / ATTACH` 即使在之前所有MergeTree表的合并已停止的情况下,table也会为表启动后台合并。 + +### START MERGES {#query_language-system-start-merges} + +为MergeTree系列中的表提供启动后台合并的可能性: + +``` sql +SYSTEM START MERGES [[db.]merge_tree_family_table_name] +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/zh/sql_reference/syntax.md b/docs/zh/sql_reference/syntax.md new file mode 100644 index 00000000000..ab9009def47 --- /dev/null +++ b/docs/zh/sql_reference/syntax.md @@ -0,0 +1,187 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 31 +toc_title: "\u8BED\u6CD5" +--- + +# 语法 {#syntax} + +系统中有两种类型的解析器:完整SQL解析器(递归下降解析器)和数据格式解析器(快速流解析器)。 +在所有情况下,除了 `INSERT` 查询时,只使用完整的SQL解析器。 +该 `INSERT` 查询使用的分析程序: + +``` sql +INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') +``` + +该 `INSERT INTO t VALUES` 片段由完整的解析器解析,并且数据 `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` 由快速流解析器解析。 您也可以通过使用 [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) 设置。 当 `input_format_values_interpret_expressions = 1`,ClickHouse首先尝试使用fast stream解析器解析值。 如果失败,ClickHouse将尝试对数据使用完整的解析器,将其视为SQL [表达式](#syntax-expressions). + +数据可以有任何格式。 当接收到查询时,服务器计算不超过 [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) RAM中请求的字节(默认为1MB),其余的是流解析。 +这意味着系统没有大的问题 `INSERT` 查询,就像MySQL一样。 + +使用时 `Values` 格式为 `INSERT` 查询,它可能看起来数据被解析相同的表达式 `SELECT` 查询,但事实并非如此。 该 `Values` 格式更为有限。 + +接下来我们将介绍完整的解析器。 有关格式解析器的详细信息,请参阅 [格式](../interfaces/formats.md) 科。 + +## 空间 {#spaces} + +语法结构之间可能有任意数量的空格符号(包括查询的开始和结束)。 空格符号包括空格、制表符、换行符、CR和换页符。 + +## 评论 {#comments} + +支持SQL样式和C样式注释。 +SQL风格的评论:来自 `--` 直到最后 后的空间 `--` 可以省略。 +C风格的评论:来自 `/*` 到 `*/`. 这些注释可以是多行。 这里也不需要空格。 + +## 关键词 {#syntax-keywords} + +当关键字对应于以下关键字时,不区分大小写: + +- SQL标准。 例如, `SELECT`, `select` 和 `SeLeCt` 都是有效的。 +- 在一些流行的DBMS(MySQL或Postgres)中实现。 例如, `DateTime` 是一样的 `datetime`. + +数据类型名称是否区分大小写可以在 `system.data_type_families` 桌子 + +与标准SQL相比,所有其他关键字(包括函数名称)都是 **区分大小写**. + +不保留关键字(它们只是在相应的上下文中解析为关键字)。 如果您使用 [标识符](#syntax-identifiers) 与关键字相同,将它们括在引号中。 例如,查询 `SELECT "FROM" FROM table_name` 是有效的,如果表 `table_name` 具有名称的列 `"FROM"`. + +## 标识符 {#syntax-identifiers} + +标识符是: + +- 集群、数据库、表、分区和列名称。 +- 功能。 +- 数据类型。 +- [表达式别名](#syntax-expression_aliases). + +标识符可以是引号或非引号。 建议使用非引号标识符。 + +非引号标识符必须与正则表达式匹配 `^[a-zA-Z_][0-9a-zA-Z_]*$` 并且不能等于 [关键词](#syntax-keywords). 例: `x, _1, X_y__Z123_.` + +如果要使用与关键字相同的标识符,或者要在标识符中使用其他符号,请使用双引号或反引号对其进行引用,例如, `"id"`, `` `id` ``. + +## 文字数 {#literals} + +有:数字,字符串,复合和 `NULL` 文字。 + +### 数字 {#numeric} + +数值文本尝试进行分析: + +- 首先作为一个64位有符号的数字,使用 [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) 功能。 +- 如果不成功,作为64位无符号数,使用 [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) 功能。 +- 如果不成功,作为一个浮点数使用 [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) 功能。 +- 否则,将返回错误。 + +相应的值将具有该值适合的最小类型。 +例如,1被解析为 `UInt8`,但256被解析为 `UInt16`. 有关详细信息,请参阅 [数据类型](../sql_reference/data_types/index.md). + +例: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. + +### 字符串 {#syntax-string-literal} + +仅支持单引号中的字符串文字。 封闭的字符可以反斜杠转义。 以下转义序列具有相应的特殊值: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. 在所有其他情况下,转义序列的格式为 `\c`,哪里 `c` 是任何字符,被转换为 `c`. 这意味着您可以使用序列 `\'`和`\\`. 该值将具有 [字符串](../sql_reference/data_types/string.md) 类型。 + +在字符串文字中需要转义的最小字符集: `'` 和 `\`. 单引号可以用单引号,文字转义 `'It\'s'` 和 `'It''s'` 是平等的。 + +### 化合物 {#compound} + +数组支持构造: `[1, 2, 3]` 和元组: `(1, 'Hello, world!', 2)`.. +实际上,这些不是文字,而是分别具有数组创建运算符和元组创建运算符的表达式。 +数组必须至少包含一个项目,元组必须至少包含两个项目。 +元组有一个特殊的用途用于 `IN` a条款 `SELECT` 查询。 元组可以作为查询的结果获得,但它们不能保存到数据库(除了 [记忆](../engines/table_engines/special/memory.md) 表)。 + +### NULL {#null-literal} + +指示该值丢失。 + +为了存储 `NULL` 在表字段中,它必须是 [可为空](../sql_reference/data_types/nullable.md) 类型。 + +根据数据格式(输入或输出), `NULL` 可能有不同的表示。 有关详细信息,请参阅以下文档 [数据格式](../interfaces/formats.md#formats). + +处理有许多细微差别 `NULL`. 例如,如果比较操作的至少一个参数是 `NULL`,此操作的结果也将是 `NULL`. 对于乘法,加法和其他操作也是如此。 有关详细信息,请阅读每个操作的文档。 + +在查询中,您可以检查 `NULL` 使用 [IS NULL](operators.md#operator-is-null) 和 [IS NOT NULL](operators.md) 运算符及相关功能 `isNull` 和 `isNotNull`. + +## 功能 {#functions} + +函数像标识符一样写入,并在括号中包含一个参数列表(可能是空的)。 与标准SQL相比,括号是必需的,即使是空的参数列表。 示例: `now()`. +有常规函数和聚合函数(请参阅部分 “Aggregate functions”). 某些聚合函数可以包含括号中的两个参数列表。 示例: `quantile (0.9) (x)`. 这些聚合函数被调用 “parametric” 函数,并在第一个列表中的参数被调用 “parameters”. 不带参数的聚合函数的语法与常规函数的语法相同。 + +## 运营商 {#operators} + +在查询解析过程中,运算符会转换为相应的函数,同时考虑它们的优先级和关联性。 +例如,表达式 `1 + 2 * 3 + 4` 转化为 `plus(plus(1, multiply(2, 3)), 4)`. + +## 数据类型和数据库表引擎 {#data_types-and-database-table-engines} + +数据类型和表引擎 `CREATE` 查询的编写方式与标识符或函数相同。 换句话说,它们可能包含也可能不包含括在括号中的参数列表。 有关详细信息,请参阅部分 “Data types,” “Table engines,” 和 “CREATE”. + +## 表达式别名 {#syntax-expression_aliases} + +别名是查询中表达式的用户定义名称。 + +``` sql +expr AS alias +``` + +- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` 子句不使用 `AS` 关键字。 + + For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. + + In the [CAST](sql_reference/functions/type_conversion_functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function. + +- `expr` — Any expression supported by ClickHouse. + + For example, `SELECT column_name * 2 AS double FROM some_table`. + +- `alias` — Name for `expr`. 别名应符合 [标识符](#syntax-identifiers) 语法 + + For example, `SELECT "table t".column_name FROM table_name AS "table t"`. + +### 使用注意事项 {#notes-on-usage} + +别名对于查询或子查询是全局的,您可以在查询的任何部分中为任何表达式定义别名。 例如, `SELECT (1 AS n) + 2, n`. + +别名在子查询和子查询之间不可见。 例如,在执行查询时 `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse生成异常 `Unknown identifier: num`. + +如果为结果列定义了别名 `SELECT` 子查询的子句,这些列在外部查询中可见。 例如, `SELECT n + m FROM (SELECT 1 AS n, 2 AS m)`. + +小心使用与列或表名相同的别名。 让我们考虑以下示例: + +``` sql +CREATE TABLE t +( + a Int, + b Int +) +ENGINE = TinyLog() +``` + +``` sql +SELECT + argMax(a, b), + sum(b) AS b +FROM t +``` + +``` text +Received exception from server (version 18.14.17): +Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query. +``` + +在这个例子中,我们声明表 `t` 带柱 `b`. 然后,在选择数据时,我们定义了 `sum(b) AS b` 别名 由于别名是全局的,ClickHouse替换了文字 `b` 在表达式中 `argMax(a, b)` 用表达式 `sum(b)`. 这种替换导致异常。 + +## 星号 {#asterisk} + +在一个 `SELECT` 查询中,星号可以替换表达式。 有关详细信息,请参阅部分 “SELECT”. + +## 表达式 {#syntax-expressions} + +表达式是函数、标识符、文字、运算符的应用程序、括号中的表达式、子查询或星号。 它还可以包含别名。 +表达式列表是一个或多个用逗号分隔的表达式。 +函数和运算符,反过来,可以有表达式作为参数。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/syntax/) diff --git a/docs/zh/query_language/table_functions/file.md b/docs/zh/sql_reference/table_functions/file.md similarity index 52% rename from docs/zh/query_language/table_functions/file.md rename to docs/zh/sql_reference/table_functions/file.md index 88bbc2a3453..b3c93f7f1fd 100644 --- a/docs/zh/query_language/table_functions/file.md +++ b/docs/zh/sql_reference/table_functions/file.md @@ -1,28 +1,31 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 37 +toc_title: "\u6587\u4EF6" --- -# file {#file} +# 文件 {#file} -Creates a table from a file. This table function is similar to [url](url.md) and [hdfs](hdfs.md) ones. +从文件创建表。 此表函数类似于 [url](url.md) 和 [hdfs](hdfs.md) 一些的。 ``` sql file(path, format, structure) ``` -**Input parameters** +**输入参数** -- `path` — The relative path to the file from [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `path` — The relative path to the file from [user\_files\_path](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-user_files_path). 只读模式下的globs后的文件支持路径: `*`, `?`, `{abc,def}` 和 `{N..M}` 哪里 `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [格式](../../interfaces/formats.md#formats) 的文件。 - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -**Returned value** +**返回值** -A table with the specified structure for reading or writing data in the specified file. +具有指定结构的表,用于读取或写入指定文件中的数据。 -**Example** +**示例** -Setting `user_files_path` and the contents of the file `test.csv`: +设置 `user_files_path` 和文件的内容 `test.csv`: ``` bash $ grep user_files_path /etc/clickhouse-server/config.xml @@ -34,7 +37,7 @@ $ cat /var/lib/clickhouse/user_files/test.csv 78,43,45 ``` -Table from`test.csv` and selection of the first two rows from it: +表从`test.csv` 并从中选择前两行: ``` sql SELECT * @@ -54,20 +57,20 @@ LIMIT 2 SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10 ``` -**Globs in path** +**路径中的水珠** -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式(不仅后缀或前缀)。 -- `*` — Substitutes any number of any characters except `/` including empty string. +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). +- `{N..M}` — Substitutes any number in range from N to M including both borders. -Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). +建筑与 `{}` 类似于 [远程表功能](../../sql_reference/table_functions/remote.md)). -**Example** +**示例** -1. Suppose we have several files with the following relative paths: +1. 假设我们有几个具有以下相对路径的文件: - ‘some\_dir/some\_file\_1’ - ‘some\_dir/some\_file\_2’ @@ -76,7 +79,7 @@ Constructions with `{}` are similar to the [remote table function](../../query_l - ‘another\_dir/some\_file\_2’ - ‘another\_dir/some\_file\_3’ -1. Query the amount of rows in these files: +1. 查询这些文件中的行数: @@ -85,7 +88,7 @@ SELECT count(*) FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') ``` -1. Query the amount of rows in all files of these two directories: +1. 查询这两个目录的所有文件中的行数: @@ -94,25 +97,25 @@ SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32') ``` -!!! warning "Warning" - If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. +!!! warning "警告" + 如果您的文件列表包含带前导零的数字范围,请单独使用带大括号的构造或使用 `?`. -**Example** +**示例** -Query the data from files named `file000`, `file001`, … , `file999`: +从名为 `file000`, `file001`, … , `file999`: ``` sql SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` -## Virtual Columns {#virtual-columns} +## 虚拟列 {#virtual-columns} - `_path` — Path to the file. - `_file` — Name of the file. -**See Also** +**另请参阅** -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) +- [虚拟列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/file/) +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/file/) diff --git a/docs/zh/query_language/table_functions/generate.md b/docs/zh/sql_reference/table_functions/generate.md similarity index 76% rename from docs/zh/query_language/table_functions/generate.md rename to docs/zh/sql_reference/table_functions/generate.md index 273b5bd7e23..84c711711d5 100644 --- a/docs/zh/query_language/table_functions/generate.md +++ b/docs/zh/sql_reference/table_functions/generate.md @@ -1,18 +1,21 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 47 +toc_title: generateRandom --- # generateRandom {#generaterandom} -Generates random data with given schema. -Allows to populate test tables with data. -Supports all data types that can be stored in table except `LowCardinality` and `AggregateFunction`. +使用给定的模式生成随机数据。 +允许用数据填充测试表。 +支持可以存储在表中的所有数据类型,除了 `LowCardinality` 和 `AggregateFunction`. ``` sql generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); ``` -**Parameters** +**参数** - `name` — Name of corresponding column. - `TypeName` — Type of corresponding column. @@ -21,11 +24,11 @@ generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_stri - `max_string_length` — Maximum string length for all generated strings. Defaults to `10`. - `random_seed` — Specify random seed manually to produce stable results. If NULL — seed is randomly generated. -**Returned Value** +**返回值** -A table object with requested schema. +具有请求架构的表对象。 -## Usage Example {#usage-example} +## 用法示例 {#usage-example} ``` sql SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)', 1, 10, 2); @@ -39,4 +42,4 @@ SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64( └──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/zh/query_language/table_functions/hdfs.md b/docs/zh/sql_reference/table_functions/hdfs.md similarity index 53% rename from docs/zh/query_language/table_functions/hdfs.md rename to docs/zh/sql_reference/table_functions/hdfs.md index 22e64665179..2cf79c31c83 100644 --- a/docs/zh/query_language/table_functions/hdfs.md +++ b/docs/zh/sql_reference/table_functions/hdfs.md @@ -1,28 +1,31 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 45 +toc_title: hdfs --- # hdfs {#hdfs} -Creates a table from files in HDFS. This table function is similar to [url](url.md) and [file](file.md) ones. +从HDFS中的文件创建表。 此表函数类似于 [url](url.md) 和 [文件](file.md) 一些的。 ``` sql hdfs(URI, format, structure) ``` -**Input parameters** +**输入参数** -- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings. -- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `URI` — The relative URI to the file in HDFS. Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` 和 `{N..M}` 哪里 `N`, `M` — numbers, \``'abc', 'def'` — strings. +- `format` — The [格式](../../interfaces/formats.md#formats) 的文件。 - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. -**Returned value** +**返回值** -A table with the specified structure for reading or writing data in the specified file. +具有指定结构的表,用于读取或写入指定文件中的数据。 -**Example** +**示例** -Table from `hdfs://hdfs1:9000/test` and selection of the first two rows from it: +表从 `hdfs://hdfs1:9000/test` 并从中选择前两行: ``` sql SELECT * @@ -37,20 +40,20 @@ LIMIT 2 └─────────┴─────────┴─────────┘ ``` -**Globs in path** +**路径中的水珠** -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). +多个路径组件可以具有globs。 对于正在处理的文件应该存在并匹配到整个路径模式(不仅后缀或前缀)。 -- `*` — Substitutes any number of any characters except `/` including empty string. +- `*` — Substitutes any number of any characters except `/` 包括空字符串。 - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. -- `{N..M}` — Substitutes any number in range from N to M including both borders (could include leading zeros). +- `{N..M}` — Substitutes any number in range from N to M including both borders. -Constructions with `{}` are similar to the [remote table function](../../query_language/table_functions/remote.md)). +建筑与 `{}` 类似于 [远程表功能](../../sql_reference/table_functions/remote.md)). -**Example** +**示例** -1. Suppose that we have several files with following URIs on HDFS: +1. 假设我们在HDFS上有几个具有以下Uri的文件: - ‘hdfs://hdfs1:9000/some\_dir/some\_file\_1’ - ‘hdfs://hdfs1:9000/some\_dir/some\_file\_2’ @@ -59,7 +62,7 @@ Constructions with `{}` are similar to the [remote table function](../../query_l - ‘hdfs://hdfs1:9000/another\_dir/some\_file\_2’ - ‘hdfs://hdfs1:9000/another\_dir/some\_file\_3’ -1. Query the amount of rows in these files: +1. 查询这些文件中的行数: @@ -68,7 +71,7 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32') ``` -1. Query the amount of rows in all files of these two directories: +1. 查询这两个目录的所有文件中的行数: @@ -77,25 +80,25 @@ SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/{some,another}_dir/*', 'TSV', 'name String, value UInt32') ``` -!!! warning "Warning" - If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. +!!! warning "警告" + 如果您的文件列表包含带前导零的数字范围,请单独使用带大括号的构造或使用 `?`. -**Example** +**示例** -Query the data from files named `file000`, `file001`, … , `file999`: +从名为 `file000`, `file001`, … , `file999`: ``` sql SELECT count(*) FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32') ``` -## Virtual Columns {#virtual-columns} +## 虚拟列 {#virtual-columns} - `_path` — Path to the file. - `_file` — Name of the file. -**See Also** +**另请参阅** -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) +- [虚拟列](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/zh/sql_reference/table_functions/index.md b/docs/zh/sql_reference/table_functions/index.md new file mode 100644 index 00000000000..38ef9bf1f4b --- /dev/null +++ b/docs/zh/sql_reference/table_functions/index.md @@ -0,0 +1,38 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u8868\u51FD\u6570" +toc_priority: 34 +toc_title: "\u5BFC\u8A00" +--- + +# 表函数 {#table-functions} + +表函数是构造表的方法。 + +您可以使用表函数: + +- [FROM](../statements/select.md#select-from) 《公约》条款 `SELECT` 查询。 + + The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes. + +- [创建表为\](../statements/create.md#create-table-query) 查询。 + + It's one of the methods of creating a table. + +!!! warning "警告" + 你不能使用表函数,如果 [allow\_ddl](../../operations/settings/permissions_for_queries.md#settings_allow_ddl) 设置被禁用。 + +| 功能 | 产品描述 | +|--------------------|--------------------------------------------------------------------------------------------------------| +| [文件](file.md) | 创建一个 [文件](../../engines/table_engines/special/file.md)-发动机表。 | +| [合并](merge.md) | 创建一个 [合并](../../engines/table_engines/special/merge.md)-发动机表。 | +| [数字](numbers.md) | 创建一个包含整数填充的单列的表。 | +| [远程](remote.md) | 允许您访问远程服务器,而无需创建 [分布](../../engines/table_engines/special/distributed.md)-发动机表。 | +| [url](url.md) | 创建一个 [Url](../../engines/table_engines/special/url.md)-发动机表。 | +| [mysql](mysql.md) | 创建一个 [MySQL](../../engines/table_engines/integrations/mysql.md)-发动机表。 | +| [jdbc](jdbc.md) | 创建一个 [JDBC](../../engines/table_engines/integrations/jdbc.md)-发动机表。 | +| [odbc](odbc.md) | 创建一个 [ODBC](../../engines/table_engines/integrations/odbc.md)-发动机表。 | +| [hdfs](hdfs.md) | 创建一个 [HDFS](../../engines/table_engines/integrations/hdfs.md)-发动机表。 | + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/zh/sql_reference/table_functions/input.md b/docs/zh/sql_reference/table_functions/input.md new file mode 100644 index 00000000000..72f71576729 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/input.md @@ -0,0 +1,47 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 46 +toc_title: "\u8F93\u5165" +--- + +# 输入 {#input} + +`input(structure)` -表功能,允许有效地转换和插入数据发送到 +服务器与给定结构的表与另一种结构。 + +`structure` -以下格式发送到服务器的数据结构 `'column1_name column1_type, column2_name column2_type, ...'`. +例如, `'id UInt32, name String'`. + +此功能只能用于 `INSERT SELECT` 查询,只有一次,但其他行为像普通表函数 +(例如,它可以用于子查询等。). + +数据可以以任何方式像普通发送 `INSERT` 查询并传递任何可用 [格式](../../interfaces/formats.md#formats) +必须在查询结束时指定(不像普通 `INSERT SELECT`). + +这个功能的主要特点是,当服务器从客户端接收数据时,它同时将其转换 +根据表达式中的列表 `SELECT` 子句并插入到目标表中。 临时表 +不创建所有传输的数据。 + +**例** + +- 让 `test` 表具有以下结构 `(a String, b String)` + 和数据 `data.csv` 具有不同的结构 `(col1 String, col2 Date, col3 Int32)`. 查询插入 + 从数据 `data.csv` 进 `test` 同时转换的表如下所示: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT lower(col1), col3 * col3 FROM input('col1 String, col2 Date, col3 Int32') FORMAT CSV"; +``` + +- 如果 `data.csv` 包含相同结构的数据 `test_structure` 作为表 `test` 那么这两个查询是相等的: + + + +``` bash +$ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" +$ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/zh/sql_reference/table_functions/jdbc.md b/docs/zh/sql_reference/table_functions/jdbc.md new file mode 100644 index 00000000000..e2268b42e28 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/jdbc.md @@ -0,0 +1,29 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 43 +toc_title: jdbc +--- + +# jdbc {#table-function-jdbc} + +`jdbc(jdbc_connection_uri, schema, table)` -返回通过JDBC驱动程序连接的表。 + +此表函数需要单独的 `clickhouse-jdbc-bridge` 程序正在运行。 +它支持可空类型(基于查询的远程表的DDL)。 + +**例** + +``` sql +SELECT * FROM jdbc('jdbc:mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', 'table') +``` + +``` sql +SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/sql_reference/table_functions/merge.md b/docs/zh/sql_reference/table_functions/merge.md new file mode 100644 index 00000000000..7304c447b1f --- /dev/null +++ b/docs/zh/sql_reference/table_functions/merge.md @@ -0,0 +1,14 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 38 +toc_title: "\u5408\u5E76" +--- + +# 合并 {#merge} + +`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”. + +表结构取自与正则表达式匹配的第一个表。 + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/zh/query_language/table_functions/mysql.md b/docs/zh/sql_reference/table_functions/mysql.md similarity index 59% rename from docs/zh/query_language/table_functions/mysql.md rename to docs/zh/sql_reference/table_functions/mysql.md index 5a8e8d4fd96..3cdf3047aac 100644 --- a/docs/zh/query_language/table_functions/mysql.md +++ b/docs/zh/sql_reference/table_functions/mysql.md @@ -1,16 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 42 +toc_title: mysql --- # mysql {#mysql} -Allows `SELECT` queries to be performed on data that is stored on a remote MySQL server. +允许 `SELECT` 要对存储在远程MySQL服务器上的数据执行的查询。 ``` sql mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` -**Parameters** +**参数** - `host:port` — MySQL server address. @@ -22,25 +25,25 @@ mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_ - `password` — User password. -- `replace_query` — Flag that converts `INSERT INTO` queries to `REPLACE INTO`. If `replace_query=1`, the query is replaced. +- `replace_query` — Flag that converts `INSERT INTO` 查询到 `REPLACE INTO`. 如果 `replace_query=1`,查询被替换。 -- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` expression that is added to the `INSERT` query. +- `on_duplicate_clause` — The `ON DUPLICATE KEY on_duplicate_clause` 表达式被添加到 `INSERT` 查询。 Example: `INSERT INTO t (c1,c2) VALUES ('a', 2) ON DUPLICATE KEY UPDATE c2 = c2 + 1`, where `on_duplicate_clause` is `UPDATE c2 = c2 + 1`. See the MySQL documentation to find which `on_duplicate_clause` you can use with the `ON DUPLICATE KEY` clause. To specify `on_duplicate_clause` you need to pass `0` to the `replace_query` parameter. If you simultaneously pass `replace_query = 1` and `on_duplicate_clause`, ClickHouse generates an exception. -Simple `WHERE` clauses such as `=, !=, >, >=, <, <=` are currently executed on the MySQL server. +简单 `WHERE` 条款如 `=, !=, >, >=, <, <=` 当前在MySQL服务器上执行。 -The rest of the conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to MySQL finishes. +其余的条件和 `LIMIT` 只有在对MySQL的查询完成后,才会在ClickHouse中执行采样约束。 -**Returned Value** +**返回值** -A table object with the same columns as the original MySQL table. +与原始MySQL表具有相同列的table对象。 -## Usage Example {#usage-example} +## 用法示例 {#usage-example} -Table in MySQL: +MySQL中的表: ``` text mysql> CREATE TABLE `test`.`test` ( @@ -55,15 +58,15 @@ mysql> insert into test (`int_id`, `float`) VALUES (1,2); Query OK, 1 row affected (0,00 sec) mysql> select * from test; -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | int_id | int_nullable | float | float_nullable | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ | 1 | NULL | 2 | NULL | -+--------+--------------+-------+----------------+ ++------+----------+-----+----------+ 1 row in set (0,00 sec) ``` -Selecting data from ClickHouse: +从ClickHouse中选择数据: ``` sql SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') @@ -75,9 +78,9 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') └────────┴──────────────┴───────┴────────────────┘ ``` -## See Also {#see-also} +## 另请参阅 {#see-also} -- [The ‘MySQL’ table engine](../../operations/table_engines/mysql.md) -- [Using MySQL as a source of external dictionary](../dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [该 ‘MySQL’ 表引擎](../../engines/table_engines/integrations/mysql.md) +- [使用MySQL作为外部字典的来源](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/mysql/) diff --git a/docs/zh/sql_reference/table_functions/numbers.md b/docs/zh/sql_reference/table_functions/numbers.md new file mode 100644 index 00000000000..aaee632d5dc --- /dev/null +++ b/docs/zh/sql_reference/table_functions/numbers.md @@ -0,0 +1,30 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 39 +toc_title: "\u6570\u5B57" +--- + +# 数字 {#numbers} + +`numbers(N)` – Returns a table with the single ‘number’ 包含从0到N-1的整数的列(UInt64)。 +`numbers(N, M)` -返回一个表与单 ‘number’ 包含从N到(N+M-1)的整数的列(UInt64)。 + +类似于 `system.numbers` 表,它可以用于测试和生成连续的值, `numbers(N, M)` 比 `system.numbers`. + +以下查询是等效的: + +``` sql +SELECT * FROM numbers(10); +SELECT * FROM numbers(0, 10); +SELECT * FROM system.numbers LIMIT 10; +``` + +例: + +``` sql +-- Generate a sequence of dates from 2010-01-01 to 2010-12-31 +select toDate('2010-01-01') + number as d FROM numbers(365); +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/zh/sql_reference/table_functions/odbc.md b/docs/zh/sql_reference/table_functions/odbc.md new file mode 100644 index 00000000000..ad7503fd551 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/odbc.md @@ -0,0 +1,108 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 44 +toc_title: odbc +--- + +# odbc {#table-functions-odbc} + +返回通过连接的表 [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). + +``` sql +odbc(connection_settings, external_database, external_table) +``` + +参数: + +- `connection_settings` — Name of the section with connection settings in the `odbc.ini` 文件 +- `external_database` — Name of a database in an external DBMS. +- `external_table` — Name of a table in the `external_database`. + +为了安全地实现ODBC连接,ClickHouse使用单独的程序 `clickhouse-odbc-bridge`. 如果直接从ODBC驱动程序加载 `clickhouse-server`,驱动程序问题可能会导致ClickHouse服务器崩溃。 ClickHouse自动启动 `clickhouse-odbc-bridge` 当它是必需的。 ODBC桥程序是从相同的软件包作为安装 `clickhouse-server`. + +与字段 `NULL` 外部表中的值将转换为基数据类型的默认值。 例如,如果远程MySQL表字段具有 `INT NULL` 键入它将转换为0(ClickHouse的默认值 `Int32` 数据类型)。 + +## 用法示例 {#usage-example} + +**通过ODBC从本地MySQL安装获取数据** + +此示例检查Ubuntu Linux18.04和MySQL服务器5.7。 + +确保安装了unixODBC和MySQL连接器。 + +默认情况下(如果从软件包安装),ClickHouse以用户身份启动 `clickhouse`. 因此,您需要在MySQL服务器中创建和配置此用户。 + +``` bash +$ sudo mysql +``` + +``` sql +mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse'; +mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION; +``` + +然后配置连接 `/etc/odbc.ini`. + +``` bash +$ cat /etc/odbc.ini +[mysqlconn] +DRIVER = /usr/local/lib/libmyodbc5w.so +SERVER = 127.0.0.1 +PORT = 3306 +DATABASE = test +USERNAME = clickhouse +PASSWORD = clickhouse +``` + +您可以使用 `isql` unixodbc安装中的实用程序。 + +``` bash +$ isql -v mysqlconn ++-------------------------+ +| Connected! | +| | +... +``` + +MySQL中的表: + +``` text +mysql> CREATE TABLE `test`.`test` ( + -> `int_id` INT NOT NULL AUTO_INCREMENT, + -> `int_nullable` INT NULL DEFAULT NULL, + -> `float` FLOAT NOT NULL, + -> `float_nullable` FLOAT NULL DEFAULT NULL, + -> PRIMARY KEY (`int_id`)); +Query OK, 0 rows affected (0,09 sec) + +mysql> insert into test (`int_id`, `float`) VALUES (1,2); +Query OK, 1 row affected (0,00 sec) + +mysql> select * from test; ++------+----------+-----+----------+ +| int_id | int_nullable | float | float_nullable | ++------+----------+-----+----------+ +| 1 | NULL | 2 | NULL | ++------+----------+-----+----------+ +1 row in set (0,00 sec) +``` + +从ClickHouse中的MySQL表中检索数据: + +``` sql +SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─float_nullable─┐ +│ 1 │ 0 │ 2 │ 0 │ +└────────┴──────────────┴───────┴────────────────┘ +``` + +## 另请参阅 {#see-also} + +- [ODBC外部字典](../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) +- [ODBC表引擎](../../engines/table_engines/integrations/odbc.md). + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/zh/sql_reference/table_functions/remote.md b/docs/zh/sql_reference/table_functions/remote.md new file mode 100644 index 00000000000..be6e9138fb4 --- /dev/null +++ b/docs/zh/sql_reference/table_functions/remote.md @@ -0,0 +1,83 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 40 +toc_title: "\u8FDC\u7A0B" +--- + +# 远程,远程安全 {#remote-remotesecure} + +允许您访问远程服务器,而无需创建 `Distributed` 桌子 + +签名: + +``` sql +remote('addresses_expr', db, table[, 'user'[, 'password']]) +remote('addresses_expr', db.table[, 'user'[, 'password']]) +``` + +`addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`,或者只是 `host`. 主机可以指定为服务器名称,也可以指定为IPv4或IPv6地址。 IPv6地址在方括号中指定。 端口是远程服务器上的TCP端口。 如果省略端口,它使用 `tcp_port` 从服务器的配置文件(默认情况下,9000)。 + +!!! important "重要事项" + IPv6地址需要该端口。 + +例: + +``` text +example01-01-1 +example01-01-1:9000 +localhost +127.0.0.1 +[::]:9000 +[2a02:6b8:0:1111::11]:9000 +``` + +多个地址可以用逗号分隔。 在这种情况下,ClickHouse将使用分布式处理,因此它将将查询发送到所有指定的地址(如具有不同数据的分片)。 + +示例: + +``` text +example01-01-1,example01-02-1 +``` + +表达式的一部分可以用大括号指定。 前面的示例可以写成如下: + +``` text +example01-0{1,2}-1 +``` + +大括号可以包含由两个点(非负整数)分隔的数字范围。 在这种情况下,范围将扩展为生成分片地址的一组值。 如果第一个数字以零开头,则使用相同的零对齐形成值。 前面的示例可以写成如下: + +``` text +example01-{01..02}-1 +``` + +如果您有多对大括号,它会生成相应集合的直接乘积。 + +大括号中的地址和部分地址可以用管道符号(\|)分隔。 在这种情况下,相应的地址集被解释为副本,并且查询将被发送到第一个正常副本。 但是,副本将按照当前设置的顺序进行迭代 [load\_balancing](../../operations/settings/settings.md) 设置。 + +示例: + +``` text +example01-{01..02}-{1|2} +``` + +此示例指定两个分片,每个分片都有两个副本。 + +生成的地址数由常量限制。 现在这是1000个地址。 + +使用 `remote` 表函数比创建一个不太优化 `Distributed` 表,因为在这种情况下,服务器连接被重新建立为每个请求。 此外,如果设置了主机名,则会解析这些名称,并且在使用各种副本时不会计算错误。 在处理大量查询时,始终创建 `Distributed` 表的时间提前,不要使用 `remote` 表功能。 + +该 `remote` 表函数可以在以下情况下是有用的: + +- 访问特定服务器进行数据比较、调试和测试。 +- 查询之间的各种ClickHouse群集用于研究目的。 +- 手动发出的罕见分布式请求。 +- 每次重新定义服务器集的分布式请求。 + +如果未指定用户, `default` 被使用。 +如果未指定密码,则使用空密码。 + +`remoteSecure` -相同 `remote` but with secured connection. Default port — [tcp\_port\_secure](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port_secure) 从配置或9440. + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) diff --git a/docs/zh/sql_reference/table_functions/url.md b/docs/zh/sql_reference/table_functions/url.md new file mode 100644 index 00000000000..d220bb05c2c --- /dev/null +++ b/docs/zh/sql_reference/table_functions/url.md @@ -0,0 +1,26 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 41 +toc_title: url +--- + +# url {#url} + +`url(URL, format, structure)` -返回从创建的表 `URL` 与给定 +`format` 和 `structure`. + +URL-HTTP或HTTPS服务器地址,它可以接受 `GET` 和/或 `POST` 请求。 + +格式 - [格式](../../interfaces/formats.md#formats) 的数据。 + +结构-表结构 `'UserID UInt64, Name String'` 格式。 确定列名称和类型。 + +**示例** + +``` sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` + +[原始文章](https://clickhouse.tech/docs/en/query_language/table_functions/url/) diff --git a/docs/zh/whats_new/changelog/2017.md b/docs/zh/whats_new/changelog/2017.md new file mode 100644 index 00000000000..ed77ead9023 --- /dev/null +++ b/docs/zh/whats_new/changelog/2017.md @@ -0,0 +1,268 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 79 +toc_title: '2017' +--- + +### ClickHouse释放1.1.54327,2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +此版本包含以前版本1.1.54318的错误修复: + +- 修复了可能导致数据丢失的复制中可能存在的争用条件的错误。 此问题影响版本1.1.54310和1.1.54318。 如果将其中一个版本用于复制的表,则强烈建议进行更新。 此问题显示在日志中的警告消息,如 `Part ... from own log doesn't exist.` 即使您在日志中没有看到这些消息,问题也是相关的。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54318-2017-11-30} + +此版本包含以前版本1.1.54310的错误修复: + +- 修复了SummingMergeTree引擎中合并过程中错误的行删除 +- 修复了未复制的MergeTree引擎中的内存泄漏 +- 修复了MergeTree引擎中频繁插入的性能下降 +- 修复了导致复制队列停止运行的问题 +- 固定服务器日志的轮换和归档 + +### ClickHouse释放1.1.54310,2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### 新功能: {#new-features} + +- MergeTree表引擎系列的自定义分区键。 +- [卡夫卡](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) 表引擎。 +- 增加了对加载的支持 [CatBoost](https://catboost.yandex/) 模型并将其应用到ClickHouse中存储的数据。 +- 增加了对UTC非整数偏移的时区的支持。 +- 增加了对具有时间间隔的算术运算的支持。 +- 日期和日期时间类型的值范围扩展到2105年。 +- 添加了 `CREATE MATERIALIZED VIEW x TO y` 查询(指定用于存储实例化视图数据的现有表)。 +- 添加了 `ATTACH TABLE` 不带参数的查询。 +- 将SummingMergeTree表中名称以-Map结尾的嵌套列的处理逻辑提取到sumMap聚合函数中。 现在,您可以显式指定此类列。 +- IP trie字典的最大大小增加到128M条目。 +- 添加了getSizeOfEnumType函数。 +- 添加了sumWithOverflow聚合函数。 +- 增加了对Cap'n Proto输入格式的支持。 +- 使用zstd算法时,您现在可以自定义压缩级别。 + +#### 向后不兼容的更改: {#backward-incompatible-changes} + +- 不允许使用内存以外的引擎创建临时表。 +- 不允许使用View或MaterializedView引擎显式创建表。 +- 在创建表期间,新检查将验证采样键表达式是否包含在主键中。 + +#### 错误修复: {#bug-fixes} + +- 修复了同步插入到分布式表中时的挂断问题。 +- 修复了复制表中部分的非原子添加和删除。 +- 插入到实例化视图中的数据不会遭受不必要的重复数据删除。 +- 对本地副本滞后且远程副本不可用的分布式表执行查询不会再导致错误。 +- 用户不需要访问权限 `default` 数据库创建临时表了。 +- 修复了在指定数组类型时不带参数的崩溃。 +- 修复了包含服务器日志的磁盘卷已满时的挂机问题。 +- 修复了unix时代的第一周toRelativeWeekNum函数的溢出。 + +#### 构建改进: {#build-improvements} + +- 几个第三方库(特别是Poco)被更新并转换为git子模块。 + +### ClickHouse释放1.1.54304,2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### 新功能: {#new-features-1} + +- 本机协议中的TLS支持(要启用,请设置 `tcp_ssl_port` 在 `config.xml` ). + +#### 错误修复: {#bug-fixes-1} + +- `ALTER` 对于复制的表现在尝试尽快开始运行。 +- 使用设置读取数据时修复崩溃 `preferred_block_size_bytes=0.` +- 固定的崩溃 `clickhouse-client` 按下时 `Page Down` +- 正确解释某些复杂的查询 `GLOBAL IN` 和 `UNION ALL` +- `FREEZE PARTITION` 现在总是以原子方式工作。 +- 空POST请求现在返回代码411的响应。 +- 修正了像表达式的解释错误 `CAST(1 AS Nullable(UInt8)).` +- 修正了读取时的错误 `Array(Nullable(String))` 从列 `MergeTree` 桌子 +- 修正了解析查询时崩溃,如 `SELECT dummy AS dummy, dummy AS b` +- 用户正确更新无效 `users.xml` +- 可执行字典返回非零响应代码时的正确处理。 + +### ClickHouse释放1.1.54292,2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### 新功能: {#new-features-2} + +- 添加了 `pointInPolygon` 用于处理坐标平面上的坐标的函数。 +- 添加了 `sumMap` 用于计算数组总和的聚合函数,类似于 `SummingMergeTree`. +- 添加了 `trunc` 功能。 改进舍入函数的性能 (`round`, `floor`, `ceil`, `roundToExp2`)并corrected正了他们如何工作的逻辑。 改变的逻辑 `roundToExp2` 分数和负数的功能。 +- ClickHouse可执行文件现在对libc版本的依赖性较低。 同样的ClickHouse可执行文件可以在各种各样的Linux系统上运行。 使用编译的查询(使用设置)时仍然存在依赖关系 `compile = 1` ,默认情况下不使用)。 +- 减少了动态编译查询所需的时间。 + +#### 错误修复: {#bug-fixes-2} + +- 修正了有时产生的错误 `part ... intersects previous part` 消息和副本的一致性减弱。 +- 修正了一个错误,导致服务器锁定,如果ZooKeeper在关闭过程中不可用。 +- 恢复副本时删除了过多的日志记录。 +- 修复了UNION ALL实现中的错误。 +- 修复了在块中的第一列具有数组类型时发生的concat函数中的错误。 +- 进度现在在系统中正确显示。合并表。 + +### ClickHouse释放1.1.54289,2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### 新功能: {#new-features-3} + +- `SYSTEM` 服务器管理查询: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- 添加了用于处理数组的函数: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- 已添加 `root` 和 `identity` ZooKeeper配置的参数。 这允许您隔离同一个ZooKeeper集群上的单个用户。 +- 添加聚合函数 `groupBitAnd`, `groupBitOr`,和 `groupBitXor` (为了兼容,它们也可以在名称下使用 `BIT_AND`, `BIT_OR`,和 `BIT_XOR`). +- 通过在文件系统中指定套接字,可以从MySQL加载外部字典。 +- 外部字典可以通过SSL从MySQL加载 (`ssl_cert`, `ssl_key`, `ssl_ca` 参数)。 +- 添加了 `max_network_bandwidth_for_user` 设置为限制每个用户查询的总带宽使用。 +- 支持 `DROP TABLE` 对于临时表。 +- 支持阅读 `DateTime` 从Unix时间戳格式的值 `CSV` 和 `JSONEachRow` 格式。 +- 分布式查询中的滞后副本现在默认排除(默认阈值为5分钟)。 +- 在ALTER期间使用FIFO锁定:对于连续运行的查询,ALTER查询不会无限期地阻止。 +- 选项设置 `umask` 在配置文件中。 +- 改进了查询的性能 `DISTINCT` . + +#### 错误修复: {#bug-fixes-3} + +- 改进了在ZooKeeper中删除旧节点的过程。 以前,如果插入非常频繁,旧节点有时不会被删除,这导致服务器关闭速度缓慢等等。 +- 修正了选择主机连接到ZooKeeper时的随机化。 +- 修复了在分布式查询中排除滞后副本,如果副本是localhost。 +- 修正了一个错误,其中在一个数据部分 `ReplicatedMergeTree` 运行后表可能会被打破 `ALTER MODIFY` 在一个元素 `Nested` 结构。 +- 修复了可能导致SELECT查询执行以下操作的错误 “hang”. +- 对分布式DDL查询的改进。 +- 修正了查询 `CREATE TABLE ... AS `. +- 解决了在僵局 `ALTER ... CLEAR COLUMN IN PARTITION` 查询为 `Buffer` 桌子 +- 修正了无效的默认值 `Enum` s(0,而不是最小)使用时 `JSONEachRow` 和 `TSKV` 格式。 +- 解决了使用字典时僵尸进程的外观 `executable` 资料来源。 +- 修正了HEAD查询的段错误。 + +#### 改进开发和组装ClickHouse的工作流程: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- 您可以使用 `pbuilder` 建造克里克豪斯 +- 您可以使用 `libc++` 而不是 `libstdc++` 对于构建在Linux上。 +- 添加了使用静态代码分析工具的说明: `Coverage`, `clang-tidy`, `cppcheck`. + +#### 升级时请注意: {#please-note-when-upgrading} + +- MergeTree设置现在有一个更高的默认值 `max_bytes_to_merge_at_max_space_in_pool` (要合并的数据部分的最大总大小,以字节为单位):它已从100GiB增加到150GiB。 这可能会导致服务器升级后运行大型合并,这可能会导致磁盘子系统的负载增加。 如果服务器上的可用空间小于正在运行的合并总量的两倍,这将导致所有其他合并停止运行,包括小数据部分的合并。 因此,插入查询将失败,并显示消息 “Merges are processing significantly slower than inserts.” 使用 `SELECT * FROM system.merges` 查询监控情况。 您还可以检查 `DiskSpaceReservedForMerge` 度量在 `system.metrics` 表,或石墨。 你不需要做任何事情来解决这个问题,因为一旦大合并完成,问题就会自行解决。 如果您发现这是不可接受的,则可以恢复以前的值 `max_bytes_to_merge_at_max_space_in_pool` 设置。 要做到这一点,请转到 在配置部分。xml,设置 ``` ``107374182400 ``` 并重新启动服务器。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54284-2017-08-29} + +- 这是一个错误修正版本,以前的1.1.54282版本。 它修复了ZooKeeper中部件目录中的泄漏。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54282-2017-08-23} + +此版本包含以前版本1.1.54276的错误修复: + +- 固定 `DB::Exception: Assertion violation: !_path.empty()` 当插入到分布式表中。 +- 如果输入数据以";"开头,则以RowBinary格式插入时修复了解析。 +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54276-2017-08-16} + +#### 新功能: {#new-features-4} + +- 为选择查询添加了一个可选的WITH部分。 查询示例: `WITH 1+1 AS a SELECT a, a*a` +- INSERT可以在分布式表中同步执行:仅在所有分片上保存所有数据后才返回OK。 这是由设置insert\_distributed\_sync=1激活的。 +- 添加了用于处理16字节标识符的UUID数据类型。 +- 添加了CHAR,FLOAT和其他类型的别名,以便与Tableau兼容。 +- 添加了toyyyymm,toYYYYMMDD和toyyyyymmddhhmmss将时间转换为数字的功能。 +- 您可以使用IP地址(与主机名一起使用)来标识群集DDL查询的服务器。 +- 增加了对函数中非常量参数和负偏移的支持 `substring(str, pos, len).` +- 添加了max\_size参数 `groupArray(max_size)(column)` 聚合函数,并优化了其性能。 + +#### 主要变化: {#main-changes} + +- 安全性改进:所有服务器文件都使用0640权限创建(可以通过更改 配置参数)。 +- 改进了语法无效的查询的错误消息。 +- 在合并mergetree大部分数据时,显着降低了内存消耗并提高了性能。 +- 显着提高了ReplacingMergeTree引擎的数据合并性能。 +- 通过组合多个源插入来改进来自分布式表的异步插入的性能。 要启用此功能,请使用设置distributed\_directory\_monitor\_batch\_inserts=1。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-1} + +- 改变聚合状态的二进制格式 `groupArray(array_column)` 数组的函数。 + +#### 更改的完整列表: {#complete-list-of-changes} + +- 添加了 `output_format_json_quote_denormals` 设置,允许以JSON格式输出nan和inf值。 +- 从分布式表读取时优化流分配。 +- 如果值没有更改,可以在只读模式下配置设置。 +- 添加了检索MergeTree引擎的非整数颗粒的功能,以满足preferred\_block\_size\_bytes设置中指定的块大小的限制。 其目的是在处理来自具有大列的表的查询时减少RAM消耗并增加缓存局部性。 +- 高效使用包含如下表达式的索引 `toStartOfHour(x)` 对于像条件 `toStartOfHour(x) op сonstexpr.` +- 添加了MergeTree引擎的新设置(配置中的merge\_tree部分。xml): + - replicated\_deduplication\_window\_seconds设置复制表中重复数据删除插入所允许的秒数。 + - cleanup\_delay\_period设置启动清理以删除过时数据的频率。 + - replicated\_can\_become\_leader可以防止副本成为领导者(并分配合并)。 +- 加速清理,从ZooKeeper中删除过时的数据。 +- 针对群集DDL查询的多个改进和修复。 特别令人感兴趣的是新设置distributed\_ddl\_task\_timeout,它限制了等待群集中服务器响应的时间。 如果未在所有主机上执行ddl请求,则响应将包含超时错误,并且请求将以异步模式执行。 +- 改进了服务器日志中堆栈跟踪的显示。 +- 添加了 “none” 压缩方法的值。 +- 您可以在config中使用多个dictionaries\_config部分。xml +- 可以通过文件系统中的套接字连接到MySQL。 +- 系统。部件表有一个新的列,其中包含有关标记大小的信息,以字节为单位。 + +#### 错误修复: {#bug-fixes-4} + +- 使用合并表的分布式表现在可以正确地用于具有条件的SELECT查询 `_table` 场。 +- 修复了检查数据部分时ReplicatedMergeTree中罕见的争用条件。 +- 固定可能冻结 “leader election” 启动服务器时。 +- 使用数据源的本地副本时,将忽略max\_replica\_delay\_for\_distributed\_queries设置。 这已被修复。 +- 修正了不正确的行为 `ALTER TABLE CLEAR COLUMN IN PARTITION` 尝试清除不存在的列时。 +- 修复了multif函数中使用空数组或字符串时的异常。 +- 修正了反序列化本机格式时过多的内存分配。 +- 修正了Trie字典的不正确的自动更新。 +- 修复了使用SAMPLE从合并表中使用GROUP BY子句运行查询时的异常。 +- 修复了使用distributed\_aggregation\_memory\_efficient=1时组的崩溃。 +- 现在,您可以指定数据库。表在右侧的IN和JOIN。 +- 用于并行聚合的线程太多。 这已被修复。 +- 固定如何 “if” 函数与FixedString参数一起使用。 +- 为权重为0的分片从分布式表中选择工作不正确。 这已被修复。 +- 运行 `CREATE VIEW IF EXISTS no longer causes crashes.` +- 修正了input\_format\_skip\_unknown\_fields=1设置并且有负数时的不正确行为。 +- 修正了一个无限循环 `dictGetHierarchy()` 如果字典中有一些无效的数据,则函数。 +- 固定 `Syntax error: unexpected (...)` 在IN或JOIN子句和合并表中使用子查询运行分布式查询时出错。 +- 修复了从字典表中选择查询的不正确解释。 +- 修正了 “Cannot mremap” 在IN和JOIN子句中使用包含超过20亿个元素的数组时出错。 +- 修复了以MySQL为源的字典的故障转移。 + +#### 改进开发和组装ClickHouse的工作流程: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- 构建可以在阿卡迪亚组装。 +- 您可以使用gcc7来编译ClickHouse。 +- 现在使用ccache+distcc的并行构建速度更快。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54245-2017-07-04} + +#### 新功能: {#new-features-5} + +- 分布式的DDL(例如, `CREATE TABLE ON CLUSTER`) +- 复制的查询 `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- 字典表的引擎(以表格形式访问字典数据)。 +- 字典数据库引擎(这种类型的数据库会自动为所有连接的外部字典提供字典表)。 +- 您可以通过向源发送请求来检查字典的更新。 +- 限定列名称 +- 使用双引号引用标识符。 +- Http接口中的会话。 +- 复制表的优化查询不仅可以在leader上运行。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-2} + +- 删除设置全局。 + +#### 小的变化: {#minor-changes} + +- 现在,在触发警报之后,日志将打印完整的堆栈跟踪。 +- 在启动时放宽对损坏/额外数据部件数量的验证(有太多误报)。 + +#### 错误修复: {#bug-fixes-5} + +- 修复了连接错误 “sticking” 当插入到分布式表中。 +- GLOBAL IN现在适用于查看分布式表的合并表中的查询。 +- 在Google Compute Engine虚拟机上检测到不正确的内核数。 这已被修复。 +- 缓存外部字典的可执行源如何工作的更改。 +- 修复了包含空字符的字符串的比较。 +- 修正了Float32主键字段与常量的比较。 +- 以前,对字段大小的不正确估计可能导致分配过大。 +- 修复了使用ALTER查询添加到表中的可空列时的崩溃。 +- 修复了按可空列排序时的崩溃,如果行数小于限制。 +- 修复了仅由常量值组成的子查询的顺序。 +- 以前,复制的表在丢弃表失败后可能仍处于无效状态。 +- 具有空结果的标量子查询的别名不再丢失。 +- 现在如果.so文件被损坏,使用编译的查询不会失败并出现错误。 diff --git a/docs/zh/whats_new/changelog/2018.md b/docs/zh/whats_new/changelog/2018.md new file mode 100644 index 00000000000..b62d8372d1a --- /dev/null +++ b/docs/zh/whats_new/changelog/2018.md @@ -0,0 +1,1063 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 78 +toc_title: '2018' +--- + +## ClickHouse释放18.16 {#clickhouse-release-18-16} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-16-1-2018-12-21} + +#### 错误修复: {#bug-fixes} + +- 修复了导致使用ODBC源更新字典时出现问题的错误。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- 聚集函数的JIT编译现在适用于低心率列。 [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### 改进: {#improvements} + +- 添加了 `low_cardinality_allow_in_native_format` 设置(默认情况下启用)。 如果禁用,则选择查询的LowCardinality列将转换为普通列,插入查询将需要普通列。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### 构建改进: {#build-improvements} + +- 修复了基于macOS和ARM的构建。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-16-0-2018-12-14} + +#### 新功能: {#new-features} + +- `DEFAULT` 在以半结构化输入格式加载数据时,会计算表达式是否缺少字段 (`JSONEachRow`, `TSKV`). 该功能与启用 `insert_sample_with_metadata` 设置。 [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- 该 `ALTER TABLE` 查询现在有 `MODIFY ORDER BY` 用于在添加或删除表列时更改排序键的操作。 这是在表有用 `MergeTree` 基于此排序键合并时执行其他任务的系列,例如 `SummingMergeTree`, `AggregatingMergeTree`,等等。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- 对于在表 `MergeTree` 家庭,现在你可以指定一个不同的排序键 (`ORDER BY`)和索引 (`PRIMARY KEY`). 排序键可以长于索引。 [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- 添加了 `hdfs` 表功能和 `HDFS` 用于将数据导入和导出到HDFS的表引擎。 [晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- 增加了使用base64的功能: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) +- 现在,您可以使用一个参数来配置的精度 `uniqCombined` 聚合函数(选择HyperLogLog单元格的数量)。 [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- 添加了 `system.contributors` 包含在ClickHouse中进行提交的所有人的名称的表。 [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- 增加了省略分区的能力 `ALTER TABLE ... FREEZE` 查询以便一次备份所有分区。 [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- 已添加 `dictGet` 和 `dictGetOrDefault` 不需要指定返回值类型的函数。 该类型是从字典描述自动确定的。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3564) +- 现在,您可以在表描述中为列指定注释,并使用以下方式对其进行更改 `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- 阅读支持 `Join` 使用简单键键入表格。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 现在,您可以指定选项 `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`,和 `join_overflow_mode` 当创建一个 `Join` 键入表。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 添加了 `joinGet` 功能,允许您使用 `Join` 像字典一样键入表格。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3728) +- 添加了 `partition_key`, `sorting_key`, `primary_key`,和 `sampling_key` 列到 `system.tables` 表以便提供关于表键的信息。 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- 添加了 `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`,和 `is_in_sampling_key` 列到 `system.columns` 桌子 [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- 添加了 `min_time` 和 `max_time` 列到 `system.parts` 桌子 当分区键是由以下表达式组成的表达式时,将填充这些列 `DateTime` 列。 [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### 错误修复: {#bug-fixes-1} + +- 修复和性能改进 `LowCardinality` 数据类型。 `GROUP BY` 使用 `LowCardinality(Nullable(...))`. 获取的值 `extremes`. 处理高阶函数。 `LEFT ARRAY JOIN`. 分布 `GROUP BY`. 返回的函数 `Array`. 执行 `ORDER BY`. 写入 `Distributed` 表(nicelulu)。 向后兼容 `INSERT` 从实现旧客户端的查询 `Native` 协议 支持 `LowCardinality` 为 `JOIN`. 在单个流中工作时提高性能。 [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- 固定如何 `select_sequential_consistency` 选项工作。 以前,启用此设置时,在开始写入新分区后,有时会返回不完整的结果。 [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- 执行DDL时正确指定数据库 `ON CLUSTER` 查询和 `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- 为视图中的子查询正确指定了数据库。 [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- 修正了一个错误 `PREWHERE` 与 `FINAL` 为 `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- 现在你可以使用 `KILL QUERY` 取消尚未启动的查询,因为它们正在等待锁定表。 [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- 更正日期和时间计算,如果时钟被移回午夜(这发生在伊朗,并发生在莫斯科1981年至1983年)。 以前,这导致时间比必要的时间早一天重置,并且还导致文本格式的日期和时间格式不正确。 [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- 修正了某些情况下的错误 `VIEW` 和省略数据库的子查询。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3521) +- 修正了一个争用条件时,同时从读取 `MATERIALIZED VIEW` 和删除 `MATERIALIZED VIEW` 由于不锁定内部 `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- 修正了错误 `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- 固定查询处理时 `compile_expressions` 选项已启用(默认情况下启用)。 非确定性常量表达式,如 `now` 功能不再展开。 [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- 修复了在指定非常量比例参数时发生的崩溃 `toDecimal32/64/128` 功能。 +- 修复了尝试插入数组时的错误 `NULL` 中的元素 `Values` 格式化为类型的列 `Array` 没有 `Nullable` (如果 `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- 固定连续错误登录 `DDLWorker` 如果动物园管理员不可用。 [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- 修正了返回类型 `quantile*` 从功能 `Date` 和 `DateTime` 参数的类型。 [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- 修正了 `WITH` 子句,如果它指定了一个没有表达式的简单别名。 [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- 固定处理具有命名子查询和限定列名的查询时 `enable_optimize_predicate_expression` 被启用。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3588) +- 修正了错误 `Attempt to attach to nullptr thread group` 使用实例化视图时。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- 修正了传递某些不正确的参数时崩溃 `arrayReverse` 功能。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- 修正了缓冲区溢出 `extractURLParameter` 功能。 改进的性能。 添加了包含零字节的字符串的正确处理。 [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- 在固定缓冲区溢出 `lowerUTF8` 和 `upperUTF8` 功能。 删除了执行这些功能的能力 `FixedString` 类型参数。 [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- 修复了删除时罕见的竞争条件 `MergeTree` 桌子 [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- 修正了从读取时的争用条件 `Buffer` 表和同时执行 `ALTER` 或 `DROP` 在目标桌上。 [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- 修正了一个段错误,如果 `max_temporary_non_const_columns` 超过限制。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### 改进: {#improvements-1} + +- 服务器不会将处理后的配置文件写入 `/etc/clickhouse-server/` 目录。 相反,它将它们保存在 `preprocessed_configs` 里面的目录 `path`. 这意味着 `/etc/clickhouse-server/` 目录没有写访问权限 `clickhouse` 用户,从而提高了安全性。 [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- 该 `min_merge_bytes_to_use_direct_io` 默认情况下,选项设置为10GiB。 将在MergeTree系列中执行形成大部分表的合并 `O_DIRECT` 模式,这可以防止过多的页高速缓存逐出。 [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- 当表数量非常多时,加速服务器启动。 [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- 添加了连接池和HTTP `Keep-Alive` 用于副本之间的连接。 [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- 如果查询语法无效,则 `400 Bad Request` 代码在返回 `HTTP` 接口(500以前返回)。 [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- 该 `join_default_strictness` 选项设置为 `ALL` 默认情况下为兼容性。 [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- 删除日志记录 `stderr` 从 `re2` 无效或复杂正则表达式的库。 [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- 添加的 `Kafka` 表引擎:在开始从Kafka读取之前检查订阅;表的kafka\_max\_block\_size设置。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- 该 `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`,和 `murmurHash3_64` 函数现在适用于任意数量的参数和元组形式的参数。 [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- 该 `arrayReverse` 函数现在适用于任何类型的数组。 [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- 增加了一个可选参数:插槽大小的 `timeSlots` 功能。 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/3724) +- 为 `FULL` 和 `RIGHT JOIN`,该 `max_block_size` 设置用于右表中未连接的数据流。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3699) +- 添加了 `--secure` 命令行参数 `clickhouse-benchmark` 和 `clickhouse-performance-test` 启用TLS。 [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- 类型转换时的结构 `Buffer` 表的类型与目标表的结构不匹配。 [维塔利\*巴拉诺夫](https://github.com/ClickHouse/ClickHouse/pull/3603) +- 添加了 `tcp_keep_alive_timeout` 在指定的时间间隔内不活动后启用保持活动数据包的选项。 [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- 删除不必要的引用值的分区键中 `system.parts` 表,如果它由单列组成。 [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- 模函数适用于 `Date` 和 `DateTime` 数据类型。 [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- 添加同义词的 `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`,和 `MID` 功能。 [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) 为了与SQL标准兼容,某些函数名称不区分大小写。 添加语法糖 `SUBSTRING(expr FROM start FOR length)` 对于与SQL的兼容性。 [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- 增加了以下能力 `mlock` 对应于存储器页 `clickhouse-server` 可执行代码,以防止它被强制出内存。 默认情况下禁用此功能。 [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- 从读取时改进的性能 `O_DIRECT` (与 `min_bytes_to_use_direct_io` 选项启用)。 [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- 的改进的性能 `dictGet...OrDefault` 常量键参数和非常量默认参数的函数。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3563) +- 该 `firstSignificantSubdomain` 功能现在处理域 `gov`, `mil`,和 `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) 改进的性能。 [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- 能够指定用于启动的自定义环境变量 `clickhouse-server` 使用 `SYS-V init.d` 通过定义脚本 `CLICKHOUSE_PROGRAM_ENV` 在 `/etc/default/clickhouse`. + [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Clickhouse-server init脚本的正确返回代码。 [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- 该 `system.metrics` 表现在有 `VersionInteger` 公制和 `system.build_options` 有添加的行 `VERSION_INTEGER`,其中包含ClickHouse版本的数字形式,例如 `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- 删除比较的能力 `Date` 输入一个数字,以避免潜在的错误,如 `date = 2018-12-17`,其中日期周围的引号被错误省略。 [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- 修正了有状态函数的行为,如 `rowNumberInAllBlocks`. 他们之前输出的结果是由于在查询分析期间启动而大一个数字。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3729) +- 如果 `force_restore_data` 文件无法删除,将显示错误消息。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### 构建改进: {#build-improvements-1} + +- 更新了 `jemalloc` 库,它修复了潜在的内存泄漏。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3557) +- 分析与 `jemalloc` 默认情况下为了调试生成启用。 [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- 增加了运行集成测试的能力,当只 `Docker` 安装在系统上。 [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- 在SELECT查询中添加了模糊表达式测试。 [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- 为提交添加了一个压力测试,它以并行和随机顺序执行功能测试,以检测更多的竞争条件。 [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- 改进了在Docker映像中启动clickhouse-server的方法。 [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) +- 对于Docker映像,增加了对使用数据库中的文件初始化数据库的支持 `/docker-entrypoint-initdb.d` 目录。 [康斯坦丁\*列别杰夫](https://github.com/ClickHouse/ClickHouse/pull/3695) +- 修复了基于ARM的构建。 [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### 向后不兼容的更改: {#backward-incompatible-changes} + +- 删除比较的能力 `Date` 用数字键入。 而不是 `toDate('2018-12-18') = 17883`,必须使用显式类型转换 `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouse释放18.14 {#clickhouse-release-18-14} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-14-19-2018-12-19} + +#### 错误修复: {#bug-fixes-2} + +- 修复了导致使用ODBC源更新字典时出现问题的错误。 [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- 执行DDL时正确指定数据库 `ON CLUSTER` 查询。 [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- 修正了一个段错误,如果 `max_temporary_non_const_columns` 超过限制。 [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### 构建改进: {#build-improvements-2} + +- 修复了基于ARM的构建。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-18-2018-12-04} + +#### 错误修复: {#bug-fixes-3} + +- 修正错误 `dictGet...` 类型字典的函数 `range`,如果其中一个参数是恒定的,而另一个则不是。 [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- 修复了导致消息的错误 `netlink: '...': attribute type 1 has an invalid length` 要打印在Linux内核日志中,这只发生在足够新鲜的Linux内核版本上。 [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- 在功能固定段错误 `empty` 对于争论 `FixedString` 类型。 [丹尼尔,道广明](https://github.com/ClickHouse/ClickHouse/pull/3703) +- 修正了使用大值时过多的内存分配 `max_query_size` 设置(内存块 `max_query_size` 字节被预先分配一次)。 [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### 构建更改: {#build-changes} + +- 使用操作系统包中的版本7的LLVM/Clang库修复构建(这些库用于运行时查询编译)。 [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-17-2018-11-30} + +#### 错误修复: {#bug-fixes-4} + +- 修复了ODBC桥进程未与主服务器进程终止的情况。 [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- 固定同步插入 `Distributed` 具有不同于远程表的列列表的列列表的表。 [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- 修复了丢弃MergeTree表时可能导致崩溃的罕见竞争条件。 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- 修复了查询线程创建失败时的查询死锁 `Resource temporarily unavailable` 错误 [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- 修正了解析 `ENGINE` 条款时 `CREATE AS table` 语法被使用和 `ENGINE` 子句之前指定 `AS table` (错误导致忽略指定的引擎)。 [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-15-2018-11-21} + +#### 错误修复: {#bug-fixes-5} + +- 反序列化类型的列时,高估了内存块的大小 `Array(String)` 这导致 “Memory limit exceeded” 错误。 该问题出现在版本18.12.13中。 [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-14-2018-11-20} + +#### 错误修复: {#bug-fixes-6} + +- 固定 `ON CLUSTER` 当群集配置为安全时进行查询(标志 ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### 构建更改: {#build-changes-1} + +- 固定的问题(llvm-7从系统,macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-13-2018-11-08} + +#### 错误修复: {#bug-fixes-7} + +- 修正了 `Block structure mismatch in MergingSorted stream` 错误 [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- 固定 `ON CLUSTER` 查询的情况下,当安全连接被打开的群集配置( `` 标志)。 [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- 修复了查询中使用的错误 `SAMPLE`, `PREWHERE` 和别名列。 [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- 修正了一个罕见的 `unknown compression method` 错误时 `min_bytes_to_use_direct_io` 设置已启用。 [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### 性能改进: {#performance-improvements} + +- 查询的固定性能回归 `GROUP BY` 在AMD EPYC处理器上执行时,uint16或Date类型的列。 [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) +- 修正了处理长字符串的查询的性能回归。 [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### 构建改进: {#build-improvements-3} + +- 简化阿卡迪亚构建的改进。 [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-12-2018-11-02} + +#### 错误修复: {#bug-fixes-8} + +- 修复了加入两个未命名的子查询时的崩溃。 [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- 修正了生成不正确的查询(用空 `WHERE` 子句)查询外部数据库时。 [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) +- 修正了在ODBC字典中使用不正确的超时值。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-11-2018-10-29} + +#### 错误修复: {#bug-fixes-9} + +- 修正了错误 `Block structure mismatch in UNION stream: different number of columns` 在限制查询。 [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- 修复了在嵌套结构中包含数组的表中合并数据时出现的错误。 [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- 修正了不正确的查询结果,如果 `merge_tree_uniform_read_distribution` 设置被禁用(默认情况下启用)。 [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- 修复了在本机格式的分布式表中插入错误。 [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-10-2018-10-23} + +- 该 `compile_expressions` 默认情况下禁用设置(表达式的JIT编译)。 [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- 该 `enable_optimize_predicate_expression` 默认情况下禁用设置。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-14-9-2018-10-16} + +#### 新功能: {#new-features-1} + +- 该 `WITH CUBE` 修饰符 `GROUP BY` (替代语法 `GROUP BY CUBE(...)` 也可用)。 [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- 添加了 `formatDateTime` 功能。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) +- 添加了 `JDBC` 表引擎和 `jdbc` 表功能(需要安装clickhouse-jdbc桥)。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- 增加了使用ISO周编号的功能: `toISOWeek`, `toISOYear`, `toStartOfISOYear`,和 `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- 现在你可以使用 `Nullable` 列 `MySQL` 和 `ODBC` 桌子 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- 嵌套的数据结构可以被读取为嵌套的对象 `JSONEachRow` 格式。 添加了 `input_format_import_nested_json` 设置。 [维罗曼\*云坎](https://github.com/ClickHouse/ClickHouse/pull/3144) +- 并行处理可用于许多 `MATERIALIZED VIEW`s插入数据时。 见 `parallel_view_processing` 设置。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- 添加了 `SYSTEM FLUSH LOGS` 查询(强制日志刷新到系统表,如 `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- 现在,您可以使用预定义 `database` 和 `table` 声明时的宏 `Replicated` 桌子 [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- 增加了阅读的能力 `Decimal` 工程表示法中的类型值(表示十的幂)。 [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### 实验特点: {#experimental-features} + +- 对GROUP BY子句进行优化 `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- 表达式的优化计算 `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### 改进: {#improvements-2} + +- 显着减少查询的内存消耗 `ORDER BY` 和 `LIMIT`. 见 `max_bytes_before_remerge_sort` 设置。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- 在没有 `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` 是假定的。 [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- 限定星号在以下查询中正常工作 `JOIN`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/3202) +- 该 `ODBC` 表引擎正确地选择用于引用远程数据库的SQL方言中的标识符的方法。 [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) +- 该 `compile_expressions` 默认情况下启用设置(表达式的JIT编译)。 +- 修复了同时删除数据库/表(如果存在)和创建数据库/表(如果不存在)的行为。 前情提要 `CREATE DATABASE ... IF NOT EXISTS` 查询可能会返回错误消息 “File … already exists” 和 `CREATE TABLE ... IF NOT EXISTS` 和 `DROP TABLE IF EXISTS` 查询可能会返回 `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- 当从MySQL或ODBC表中查询时,LIKE和IN表达式具有常量右半部分被传递到远程服务器。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- 当从MySQL和ODBC表查询时,与WHERE子句中常量表达式的比较会传递给远程服务器。 以前,只通过与常量的比较。 [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- 正确计算终端中的行宽 `Pretty` 格式,包括带有象形文字的字符串。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` 可以指定 `ALTER UPDATE` 查询。 +- 提高了读取数据的性能 `JSONEachRow` 格式。 [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- 添加同义词的 `LENGTH` 和 `CHARACTER_LENGTH` 功能的兼容性。 该 `CONCAT` 函数不再区分大小写。 [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- 添加了 `TIMESTAMP` 的同义词 `DateTime` 类型。 [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- 服务器日志中始终为query\_id保留空间,即使日志行与查询无关。 这使得使用第三方工具更容易分析服务器文本日志。 +- 当查询超过整数千兆字节的下一级别时,会记录查询的内存消耗。 [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- 为使用本机协议的客户端库错误发送的列少于服务器预期的插入查询时的情况添加了兼容模式。 使用clickhouse-cpp库时,这种情况是可能的。 以前,此方案会导致服务器崩溃。 [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- 在用户定义的WHERE表达式中 `clickhouse-copier`,您现在可以使用 `partition_key` 别名(用于按源表分区进行其他过滤)。 如果分区方案在复制过程中发生更改,但仅稍有更改,这很有用。 [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- 的工作流程 `Kafka` 引擎已被移动到后台线程池中,以便在高负载下自动降低数据读取速度。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- 支持阅读 `Tuple` 和 `Nested` 结构的值,如 `struct` 在 `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- 顶级域名列表 `firstSignificantSubdomain` 功能现在包括域 `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) +- 在外部字典的配置, `null_value` 被解释为默认数据类型的值。 [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- 支持 `intDiv` 和 `intDivOrZero` 功能 `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- 支持 `Date`, `DateTime`, `UUID`,和 `Decimal` 类型作为键 `sumMap` 聚合函数。 [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- 支持 `Decimal` 外部字典中的数据类型。 [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- 支持 `Decimal` 数据类型in `SummingMergeTree` 桌子 [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- 增加了专业化 `UUID` 在 `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- 减少的数量 `open` 和 `close` 从读取时系统调用 `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` 查询可以在任何副本上执行(将查询传递给领导副本)。 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### 错误修复: {#bug-fixes-10} + +- 修正了一个问题 `Dictionary` 表 `range_hashed` 字典 此错误发生在版本18.12.17中。 [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- 修正了加载时的错误 `range_hashed` 字典(消息 `Unsupported type Nullable (...)`). 此错误发生在版本18.12.17中。 [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- 在固定的错误 `pointInPolygon` 函数由于不准确的计算的多边形与大量的顶点位于彼此靠近的积累。 [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- 如果在合并数据部分之后,结果部分的校验和与另一个副本中相同合并的结果不同,则删除合并的结果并从另一个副本下载数据部分(这是正确的行为)。 但是在下载数据部分之后,由于该部分已经存在的错误(因为合并后数据部分被删除了一些延迟),因此无法将其添加到工作集中。 这导致周期性尝试下载相同的数据。 [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- 修正了查询总内存消耗的不正确计算(由于计算不正确, `max_memory_usage_for_all_queries` 设置工作不正确, `MemoryTracking` 度量值不正确)。 此错误发生在版本18.12.13中。 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- 修正的功能 `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` 此错误发生在版本18.12.13中。 [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- 修正了数据结构的不必要的准备 `JOIN`如果发起查询的服务器上 `JOIN` 仅在远程服务器上执行。 [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- 在固定的错误 `Kafka` 引擎:开始读取数据时异常后的死锁,并在完成时锁定 [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- 为 `Kafka` 表,可选 `schema` 参数未被传递(的架构 `Cap'n'Proto` 格式)。 [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- 如果ZooKeeper服务器的整体服务器接受连接,但随后立即关闭它,而不是响应握手,ClickHouse选择连接另一台服务器。 以前,这会产生错误 `Cannot read all data. Bytes read: 0. Bytes expected: 4.` 服务器无法启动。 [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- 如果ZooKeeper服务器的整体包含DNS查询返回错误的服务器,则忽略这些服务器。 [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- 固定类型之间的转换 `Date` 和 `DateTime` 当在插入数据 `VALUES` 格式(如果 `input_format_values_interpret_expressions = 1`). 以前,转换是在Unix Epoch时间中的天数和Unix时间戳的数值之间进行的,这会导致意外的结果。 [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- 修正类型之间的转换 `Decimal` 和整数。 [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- 在固定的错误 `enable_optimize_predicate_expression` 设置。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3231) +- 如果使用非默认的CSV分隔符,则修复了CSV格式的浮点数解析错误,例如 `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- 修正了 `arrayCumSumNonNegative` 函数(它不累加负值,如果累加器小于零)。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) +- 固定如何 `Merge` 表工作的顶部 `Distributed` 使用时的表 `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- 在错误修复 `ALTER UPDATE` 查询。 +- 在固定的错误 `odbc` 表功能,出现在版本18.12。 [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- 修正了聚合函数的操作 `StateArray` 组合子 [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- 修正了划分时崩溃 `Decimal` 值为零。 [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- 使用固定输出类型的操作 `Decimal` 和整数参数。 [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- 修正了在段错误 `GROUP BY` 上 `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- 该 `log_query_threads` 设置(关于查询执行的每个线程的日志记录信息)现在生效,只有当 `log_queries` 选项(有关查询的日志记录信息)设置为1。 由于 `log_query_threads` 默认情况下,即使禁用了查询日志记录,也会先前记录有关线程的信息。 [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- 修正了分位数聚合函数的分布式操作中的错误(错误消息 `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- 修复了同时在18.12.17版服务器和旧服务器的集群上工作时的兼容性问题。 对于具有固定和非固定长度的GROUP BY键的分布式查询,如果要聚合大量数据,则返回的数据并不总是完全聚合(两个不同的行包含相同的聚合键)。 [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- 固定处理替换 `clickhouse-performance-test`,如果查询只包含测试中声明的替换的一部分。 [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- 修复了使用时的错误 `FINAL` 与 `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- 修复了使用时的错误 `PREWHERE` 在过程中添加的列 `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- 增加了一个检查没有 `arrayJoin` 为 `DEFAULT` 和 `MATERIALIZED` 表达式。 前情提要, `arrayJoin` 插入数据时导致错误。 [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- 增加了一个检查没有 `arrayJoin` 在一个 `PREWHERE` 条款 以前,这导致了类似的消息 `Size ... doesn't match` 或 `Unknown compression method` 执行查询时。 [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- 修复了优化后可能发生的极少数情况下的段错误,并将相等性评估与相应的IN表达式链接起来。 [刘一民-字节舞](https://github.com/ClickHouse/ClickHouse/pull/3339) +- 小幅更正 `clickhouse-benchmark`:以前,客户端信息没有发送到服务器;现在关闭时更准确地计算执行的查询数量,并限制迭代次数。 [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### 向后不兼容的更改: {#backward-incompatible-changes-1} + +- 删除了 `allow_experimental_decimal_type` 选项。 该 `Decimal` 数据类型可供默认使用。 [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouse释放18.12 {#clickhouse-release-18-12} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-12-17-2018-09-16} + +#### 新功能: {#new-features-2} + +- `invalidate_query` (指定查询来检查是否需要更新外部字典的能力)实现了 `clickhouse` 资料来源。 [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- 增加了使用的能力 `UInt*`, `Int*`,和 `DateTime` 数据类型(与 `Date` 类型)作为 `range_hashed` 定义范围边界的外部字典键。 现在 `NULL` 可用于指定开放范围。 [瓦西里\*内姆科夫](https://github.com/ClickHouse/ClickHouse/pull/3123) +- 该 `Decimal` 类型现在支持 `var*` 和 `stddev*` 聚合函数。 [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- 该 `Decimal` 类型现在支持数学函数 (`exp`, `sin` 等等。) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- 该 `system.part_log` 表现在有 `partition_id` 列。 [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### 错误修复: {#bug-fixes-11} + +- `Merge` 现在正常工作 `Distributed` 桌子 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3159) +- 修复了不兼容(不必要的依赖 `glibc` 版本),这使得它不可能运行ClickHouse的 `Ubuntu Precise` 和旧版本。 在版本18.12.13中出现了不兼容。 [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- 在固定的错误 `enable_optimize_predicate_expression` 设置。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3107) +- 修复了在早于18.12.13的版本上使用副本集群并同时在具有较新版本的服务器上创建表的新副本时出现的向后兼容性的一个小问题(如消息中所示 `Can not clone replica, because the ... updated to new ClickHouse version`,这是合乎逻辑的,但不应该发生)。 [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### 向后不兼容的更改: {#backward-incompatible-changes-2} + +- 该 `enable_optimize_predicate_expression` 默认情况下启用选项(这是相当乐观的)。 如果发生与搜索列名相关的查询分析错误,请设置 `enable_optimize_predicate_expression` 为0。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### 碌莽禄,拢,0755-88888888 {#clickhouse-release-18-12-14-2018-09-13} + +#### 新功能: {#new-features-3} + +- 增加了对 `ALTER UPDATE` 查询。 [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- 添加了 `allow_ddl` 选项,它限制用户对DDL查询的访问。 [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- 添加了 `min_merge_bytes_to_use_direct_io` 备选案文 `MergeTree` 引擎允许您为合并的总大小设置阈值(当超过阈值时,将使用O\_DIRECT处理数据部分文件)。 [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- 该 `system.merges` 系统表现在包含 `partition_id` 列。 [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### 改进 {#improvements-3} + +- 如果数据部分在变异期间保持不变,则副本不会下载该数据部分。 [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- 使用时,自动完成可用于设置名称 `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### 错误修复: {#bug-fixes-12} + +- 添加了一个检查是元素的数组的大小 `Nested` 插入时的类型字段。 [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- 修正了一个错误更新外部字典与 `ODBC` 来源和 `hashed` 存储。 此错误发生在版本18.12.13中。 +- 修复了使用以下命令从查询创建临时表时出现的崩溃 `IN` 条件。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3098) +- 修复了聚合函数中可能具有的数组的错误 `NULL` 元素。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-12-13-2018-09-10} + +#### 新功能: {#new-features-4} + +- 添加了 `DECIMAL(digits, scale)` 数据类型 (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). 要启用它,请使用以下设置 `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- 新 `WITH ROLLUP` 修饰符 `GROUP BY` (替代语法: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- 在具有JOIN的查询中,星形字符将扩展为符合SQL标准的所有表中的列列表。 您可以通过设置恢复旧行为 `asterisk_left_columns_only` 在用户配置级别上为1。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2787) +- 增加了对连接表函数的支持。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2907) +- 在clickhouse-client中按Tab键进行自动完成。 [谢尔盖\*谢尔宾](https://github.com/ClickHouse/ClickHouse/pull/2447) +- Clickhouse-client中的Ctrl+C清除输入的查询。 [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- 添加了 `join_default_strictness` 设置(值: `"`, `'any'`, `'all'`). 这允许您不指定 `ANY` 或 `ALL` 为 `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- 与查询处理相关的服务器日志的每一行都显示了查询ID。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 现在,您可以在clickhouse-client中获取查询执行日志(使用 `send_logs_level` 设置)。 通过分布式查询处理,日志从所有服务器级联。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.query_log` 和 `system.processes` (`SHOW PROCESSLIST`)表现在有关所有更改的设置信息,当你运行一个查询(的嵌套结构 `Settings` 数据)。 添加了 `log_query_settings` 设置。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.query_log` 和 `system.processes` 表现在显示有关参与查询执行的线程数的信息(请参阅 `thread_numbers` 列)。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 已添加 `ProfileEvents` 用于度量通过网络读取和写入磁盘以及读取和写入磁盘所花费的时间、网络错误的数量以及在网络带宽受限时所花费的等待时间。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 已添加 `ProfileEvents`包含来自rusage的系统指标的计数器(您可以使用它们获取有关用户空间和内核、页面错误和上下文切换的CPU使用率的信息),以及taskstats指标(使用它们获取有关I/O等待时间、CPU等待时间以及读取和记录的数据量的信息,无论是否包含页面缓存)。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `ProfileEvents` 计数器应用于全局和每个查询,以及每个查询执行线程,它允许您按查询详细分析资源消耗情况。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 添加了 `system.query_thread_log` 表,其中包含有关每个查询执行线程的信息。 添加了 `log_query_threads` 设置。 [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- 该 `system.metrics` 和 `system.events` 表现在有内置文档。 [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- 添加了 `arrayEnumerateDense` 功能。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2975) +- 添加了 `arrayCumSumNonNegative` 和 `arrayDifference` 功能。 [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) +- 添加了 `retention` 聚合函数。 [李尚迪](https://github.com/ClickHouse/ClickHouse/pull/2887) +- 现在,您可以使用plus运算符添加(合并)聚合函数的状态,并将聚合函数的状态乘以非负常数。 [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- MergeTree系列中的表现在具有虚拟列 `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### 实验特点: {#experimental-features-1} + +- 添加了 `LowCardinality(T)` 数据类型。 此数据类型自动创建值的本地字典,并允许数据处理而无需解压字典。 [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- 添加了JIT编译函数的缓存和编译前使用次数的计数器。 要JIT编译表达式,请启用 `compile_expressions` 设置。 [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### 改进: {#improvements-4} + +- 修复了放弃副本时复制日志无限积累的问题。 为延迟较长的副本添加了有效的恢复模式。 +- 改进的性能 `GROUP BY` 当其中一个是string,其他是固定长度时,具有多个聚合字段。 +- 使用时提高性能 `PREWHERE` 并与表达式的隐式转移 `PREWHERE`. +- 改进文本格式的解析性能 (`CSV`, `TSV`). [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- 改进了读取二进制格式字符串和数组的性能。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2955) +- 提高性能和减少内存消耗的查询 `system.tables` 和 `system.columns` 当单个服务器上有非常大量的表时。 [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- 修复了大量查询导致错误的情况下的性能问题( `_dl_addr` 功能是可见的 `perf top`,但服务器没有使用太多的CPU)。 [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- 条件被转换到视图中(当 `enable_optimize_predicate_expression` 被启用)。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2907) +- 改进的功能 `UUID` 数据类型。 [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- 该 `UUID` -Alchemist字典支持数据类型。 [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- 该 `visitParamExtractRaw` 函数与嵌套结构正常工作。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2974) +- 当 `input_format_skip_unknown_fields` 启用设置,在对象字段 `JSONEachRow` 格式被正确跳过。 [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- 对于一个 `CASE` 表达式与条件,你现在可以省略 `ELSE`,这相当于 `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- 现在可以在使用ZooKeeper时配置操作超时。 [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- 您可以指定偏移量 `LIMIT n, m` 作为 `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- 您可以使用 `SELECT TOP n` 语法作为替代 `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- 增加了队列的大小写入系统表,因此 `SystemLog parameter queue is full` 错误不经常发生。 +- 该 `windowFunnel` aggregate函数现在支持满足多个条件的事件。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2801) +- 重复的列可以用于 `USING` 条款 `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` 格式现在对列对齐宽度有限制。 使用 `output_format_pretty_max_column_pad_width` 设置。 如果一个值较宽,它仍将完整显示,但表中的其他单元格不会太宽。 [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- 该 `odbc` 表函数现在允许您指定数据库/模式名称。 [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2885) +- 增加了使用在指定的用户名的能力 `clickhouse-client` 配置文件。 [弗拉基米尔\*科兹宾](https://github.com/ClickHouse/ClickHouse/pull/2909) +- 该 `ZooKeeperExceptions` 计数器已被分成三个计数器: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`,和 `ZooKeeperOtherExceptions`. +- `ALTER DELETE` 查询适用于实例化视图。 +- 在定期运行清理线程时添加了随机化 `ReplicatedMergeTree` 表,以避免周期性负载尖峰时有一个非常大的数量 `ReplicatedMergeTree` 桌子 +- 支持 `ATTACH TABLE ... ON CLUSTER` 查询。 [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### 错误修复: {#bug-fixes-13} + +- 修正了一个问题 `Dictionary` 表(抛出 `Size of offsets doesn't match size of column` 或 `Unknown compression method` 例外)。 此错误出现在版本18.10.3中。 [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- 修复了合并时的错误 `CollapsingMergeTree` 如果其中一个数据部分为空(这些部分在合并或合并期间形成 `ALTER DELETE` 如果所有数据被删除),和 `vertical` 算法被用于合并。 [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- 在固定的竞争条件 `DROP` 或 `TRUNCATE` 为 `Memory` 表与同时 `SELECT`,这可能导致服务器崩溃。 此错误出现在版本1.1.54388中。 [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- 修正了插入时数据丢失的可能性 `Replicated` 表如果 `Session is expired` 错误返回(数据丢失可以通过检测 `ReplicatedDataLoss` 公制)。 此错误发生在版本1.1.54378。 [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- 在修复段错误 `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- 修正了错误搜索列名时 `WHERE` 表达式完全由限定列名组成,例如 `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- 修正了 “Not found column” 如果从远程服务器请求由IN表达式和子查询组成的单个列,则在执行分布式查询时发生错误。 [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- 修正了 `Block structure mismatch in UNION stream: different number of columns` 如果其中一个分片是本地的,而另一个分片不是,则发生分布式查询的错误,并优化移动到 `PREWHERE` 被触发。 [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- 修正了 `pointInPolygon` 非凸多边形的某些情况下的函数。 [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- 修正了比较时不正确的结果 `nan` 与整数。 [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- 修正了一个错误 `zlib-ng` 在极少数情况下可能导致segfault的库。 [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- 修复了插入到表中时的内存泄漏 `AggregateFunction` 列,如果聚合函数的状态不简单(分别分配内存),并且如果单个插入请求导致多个小块。 [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- 修复了创建和删除相同的竞争条件 `Buffer` 或 `MergeTree` 同时表。 +- 修复了比较由某些非平凡类型(如元组)组成的元组时出现段错误的可能性。 [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- 修正了运行某些时段错误的可能性 `ON CLUSTER` 查询。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2960) +- 修正了一个错误 `arrayDistinct` 功能 `Nullable` 数组元素。 [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- 该 `enable_optimize_predicate_expression` 选项现在正确支持的情况下 `SELECT *`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/2929) +- 修复了重新初始化ZooKeeper会话时的段错误。 [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- 与ZooKeeper工作时固定的潜在阻塞。 +- 修正了不正确的代码添加嵌套的数据结构中 `SummingMergeTree`. +- 在为聚合函数的状态分配内存时,会正确考虑对齐,这使得在实现聚合函数的状态时可以使用需要对齐的操作。 [晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### 安全修复: {#security-fix} + +- 安全使用ODBC数据源。 与ODBC驱动程序的交互使用单独的 `clickhouse-odbc-bridge` 过程。 第三方ODBC驱动程序中的错误不再导致服务器稳定性问题或漏洞。 [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- 修正了在文件路径的不正确的验证 `catBoostPool` 表功能。 [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- 系统表的内容 (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`,和 `replication_queue`)根据用户对数据库的配置访问权限进行过滤 (`allow_databases`). [张冬](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### 向后不兼容的更改: {#backward-incompatible-changes-3} + +- 在具有JOIN的查询中,星形字符将扩展为符合SQL标准的所有表中的列列表。 您可以通过设置恢复旧行为 `asterisk_left_columns_only` 在用户配置级别上为1。 + +#### 构建更改: {#build-changes-2} + +- 大多数集成测试现在可以通过commit运行。 +- 代码样式检查也可以通过提交运行。 +- 该 `memcpy` 在CentOS7/Fedora上构建时,正确选择实现。 [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) +- 当使用clang来构建时,来自一些警告 `-Weverything` 已添加,除了常规 `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- 调试生成使用 `jemalloc` 调试选项。 +- 用于与ZooKeeper交互的库接口被声明为抽象。 [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouse释放18.10 {#clickhouse-release-18-10} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-10-3-2018-08-13} + +#### 新功能: {#new-features-5} + +- HTTPS可用于复制。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- 新增功能 `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`,和 `murmurHash3_128` 除了现有的 `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- 支持ClickHouse ODBC驱动程序中的可空类型 (`ODBCDriver2` 输出格式)。 [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- 支持 `UUID` 在关键列。 + +#### 改进: {#improvements-5} + +- 当群集从配置文件中删除时,可以在不重新启动服务器的情况下删除群集。 [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- 从配置文件中删除外部字典时,可以在不重新启动服务器的情况下删除它们。 [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- 已添加 `SETTINGS` 支持 `Kafka` 表引擎。 [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) +- 改进的 `UUID` 数据类型(尚未完成)。 [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- 支持合并后的空部件 `SummingMergeTree`, `CollapsingMergeTree` 和 `VersionedCollapsingMergeTree` 引擎 [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- 已完成突变的旧记录将被删除 (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- 添加了 `system.merge_tree_settings` 桌子 [基里尔\*什瓦科夫](https://github.com/ClickHouse/ClickHouse/pull/2841) +- 该 `system.tables` 表现在具有依赖列: `dependencies_database` 和 `dependencies_table`. [张冬](https://github.com/ClickHouse/ClickHouse/pull/2851) +- 添加了 `max_partition_size_to_drop` 配置选项。 [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- 添加了 `output_format_json_escape_forward_slashes` 选项。 [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) +- 添加了 `max_fetch_partition_retries_count` 设置。 [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- 添加了 `prefer_localhost_replica` 用于禁用本地副本的首选项以及在不进程间交互的情况下转到本地副本的设置。 [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- 该 `quantileExact` 聚合函数返回 `nan` 在聚合在一个空的情况下 `Float32` 或 `Float64` 预备 [李尚迪](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### 错误修复: {#bug-fixes-14} + +- 删除了ODBC的连接字符串参数的不必要的转义,这使得无法建立连接。 此错误发生在版本18.6.0中。 +- 修正了处理逻辑 `REPLACE PARTITION` 复制队列中的命令。 如果有两个 `REPLACE` 对于同一个分区的命令,不正确的逻辑可能会导致其中一个保留在复制队列中而无法执行。 [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- 修正了一个合并错误,当所有的数据部分都是空的(从合并或从形成的部分 `ALTER DELETE` 如果所有数据都被删除)。 此错误出现在18.1.0版本。 [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- 修复了并发错误 `Set` 或 `Join`. [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2823) +- 修正了 `Block structure mismatch in UNION stream: different number of columns` 发生的错误 `UNION ALL` 子查询内的查询,如果一个 `SELECT` 查询包含重复的列名。 [张冬](https://github.com/ClickHouse/ClickHouse/pull/2094) +- 修复了连接到MySQL服务器时发生异常时的内存泄漏。 +- 在查询错误的情况下修复了不正确的clickhouse客户端响应代码。 +- 修正了包含DISTINCT的实例化视图的不正确行为。 [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### 向后不兼容的更改 {#backward-incompatible-changes-4} + +- 删除了对分布式表的检查表查询的支持。 + +#### 构建更改: {#build-changes-3} + +- 分配器已被替换: `jemalloc` 现在用来代替 `tcmalloc`. 在某些情况下,这增加了速度高达20%。 但是,有些查询已经减慢了20%。 在某些情况下,内存消耗减少了大约10%,稳定性得到了提高。 由于竞争激烈的负载,用户空间和系统中的CPU使用率略有增加。 [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- 从子模块使用libressl。 [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- 从子模块使用unixodbc。 [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- 从子模块中使用mariadb-connector-c。 [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- 将功能性测试文件添加到存储库中,这些文件取决于测试数据的可用性(暂时不包含测试数据本身)。 + +## ClickHouse释放18.6 {#clickhouse-release-18-6} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-6-0-2018-08-02} + +#### 新功能: {#new-features-6} + +- 增加了对ON表达式的支持,以便在语法上加入: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + 表达式必须是由AND运算符连接的等式链。 等式的每一侧都可以是其中一个表的列上的任意表达式。 支持使用完全限定的列名 (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`)对于正确的表。 [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- 可以启用HTTPS进行复制。 [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### 改进: {#improvements-6} + +- 服务器将其版本的补丁组件传递给客户端。 有关修补程序版本组件的数据位于 `system.processes` 和 `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouse释放18.5 {#clickhouse-release-18-5} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-18-5-1-2018-07-31} + +#### 新功能: {#new-features-7} + +- 添加了哈希函数 `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### 改进: {#improvements-7} + +- 现在你可以使用 `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) 从环境变量设置配置文件中的值的属性。 +- 增加了不区分大小写的版本 `coalesce`, `ifNull`,和 `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### 错误修复: {#bug-fixes-15} + +- 修复了启动副本时可能出现的错误 [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouse释放18.4 {#clickhouse-release-18-4} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-4-0-2018-07-28} + +#### 新功能: {#new-features-8} + +- 添加系统表: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- 增加了使用表函数代替表作为参数的能力 `remote` 或 `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- 支持 `HTTP Basic` 复制协议中的身份验证 [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- 该 `has` 函数现在允许搜索数组中的数值 `Enum` 值 [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). +- 支持添加任意消息分隔符从读取时 `Kafka` [阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### 改进: {#improvements-8} + +- 该 `ALTER TABLE t DELETE WHERE` 查询不会重写未受WHERE条件影响的数据部分 [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- 该 `use_minimalistic_checksums_in_zookeeper` 备选案文 `ReplicatedMergeTree` 默认情况下启用表。 此设置在版本1.1.54378,2018-04-16中添加。 不能再安装超过1.1.54378的版本。 +- 支持运行 `KILL` 和 `OPTIMIZE` 指定的查询 `ON CLUSTER` [张冬](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### 错误修复: {#bug-fixes-16} + +- 修正了错误 `Column ... is not under an aggregate function and not in GROUP BY` 用于具有IN表达式的聚合。 此错误出现在18.1.0版本。 ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- 修正了一个错误 `windowFunnel aggregate function` [张冬](https://github.com/ClickHouse/ClickHouse/pull/2735). +- 修正了一个错误 `anyHeavy` 聚合函数 ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- 使用时固定服务器崩溃 `countArray()` 聚合函数。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-5} + +- 参数 `Kafka` 发动机从改变 `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` 到 `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. 如果你的表使用 `kafka_schema` 或 `kafka_num_consumers` 参数,你必须手动编辑元数据文件 `path/metadata/database/table.sql` 并添加 `kafka_row_delimiter` 参数 `''` 价值。 + +## ClickHouse释放18.1 {#clickhouse-release-18-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-18-1-0-2018-07-23} + +#### 新功能: {#new-features-9} + +- 支持 `ALTER TABLE t DELETE WHERE` 非复制MergeTree表的查询 ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- 支持任意类型的 `uniq*` 聚合函数族 ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- 支持比较运算符中的任意类型 ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- 该 `users.xml` 文件允许设置子网掩码的格式 `10.0.0.1/255.255.255.0`. 这对于在中间使用零的IPv6网络使用掩码是必要的 ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- 添加了 `arrayDistinct` 功能 ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- SummingMergeTree引擎现在可以使用AggregateFunction类型列 ([康斯坦丁\*潘](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### 改进: {#improvements-9} + +- 更改了发布版本的编号方案。 现在第一部分包含发布年份(公元,莫斯科时区,减去2000),第二部分包含主要更改的数量(大多数版本的增加),第三部分是补丁版本。 除非在更新日志中另有说明,否则版本仍然向后兼容。 +- 更快地将浮点数转换为字符串 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- 如果在插入过程中由于解析错误而跳过某些行(这可能与 `input_allow_errors_num` 和 `input_allow_errors_ratio` 启用设置),跳过的行数现在写入服务器日志 ([列奥纳多\*切奇](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### 错误修复: {#bug-fixes-17} + +- 修复了临时表的截断命令 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- 修复了读取响应时出现网络错误时ZooKeeper客户端库中罕见的死锁 ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- 修复了转换为可空类型期间的错误 ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- 修正了不正确的结果 `maxIntersection()` 函数时间间隔的边界重合 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- 修复了函数参数中OR表达式链的不正确转换 ([晨兴-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- 修复了包含 `IN (subquery)` 另一个子查询中的表达式 ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- 修复了分布式查询中使用不同版本的服务器之间的不兼容性 `CAST` 不是大写字母的函数 ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- 添加了对外部数据库管理系统查询的缺少标识符引用 ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### 向后不兼容的更改: {#backward-incompatible-changes-6} + +- 将包含数字零的字符串转换为DateTime不起作用。 示例: `SELECT toDateTime('0')`. 这也是原因 `DateTime DEFAULT '0'` 在表中不起作用,以及 `0` 在字典里 解决方案:替换 `0` 与 `0000-00-00 00:00:00`. + +## ClickHouse释放1.1 {#clickhouse-release-1-1} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54394-2018-07-12} + +#### 新功能: {#new-features-10} + +- 添加了 `histogram` 聚合函数 ([米哈伊尔\*苏林](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- 现在 `OPTIMIZE TABLE ... FINAL` 可以在不指定分区的情况下使用 `ReplicatedMergeTree` ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### 错误修复: {#bug-fixes-18} + +- 修复了在发送和下载复制数据时读取和写入套接字超时非常小的问题(一秒钟),这使得在网络或磁盘上存在负载时无法下载更大的部分(导致周期性尝试下载部分)。 此错误发生在版本1.1.54388。 +- 修复了在ZooKeeper中使用chroot时在表中插入重复数据块的问题。 +- 该 `has` 函数现在可以正常工作用于具有可为空元素的数组 ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- 该 `system.tables` 在分布式查询中使用表现在可以正常工作。 该 `metadata_modification_time` 和 `engine_full` 列现在是非虚拟的。 修复了仅从表中查询这些列时发生的错误。 +- 固定如何空 `TinyLog` 表插入一个空数据块后工作 ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- 该 `system.zookeeper` 如果ZooKeeper中节点的值为NULL,表就可以工作。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54390-2018-07-06} + +#### 新功能: {#new-features-11} + +- 查询可以在发送 `multipart/form-data` 格式(在 `query` 字段),如果外部数据也被发送用于查询处理,这是有用的 ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- 增加了在读取CSV格式数据时启用或禁用处理单引号或双引号的功能。 您可以在 `format_csv_allow_single_quotes` 和 `format_csv_allow_double_quotes` 设置 ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- 现在 `OPTIMIZE TABLE ... FINAL` 可以在不指定非复制变体的分区的情况下使用 `MergeTree` ([阿莫斯鸟](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### 改进: {#improvements-10} + +- 在可以使用表索引时使用IN运算符提高性能、减少内存消耗并正确跟踪内存消耗 ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- 删除添加数据部分时校验和的冗余检查。 当存在大量副本时,这一点很重要,因为在这些情况下,检查的总数等于N^2。 +- 增加了对 `Array(Tuple(...))` 对于参数 `arrayEnumerateUniq` 功能 ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- 已添加 `Nullable` 支持 `runningDifference` 功能 ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- 当存在大量表达式时,改进了查询分析性能 ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- 更快地选择用于合并的数据部分 `ReplicatedMergeTree` 桌子 更快地恢复ZooKeeper会话 ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- 该 `format_version.txt` 文件 `MergeTree` 如果表丢失,则重新创建表,如果在没有文件的情况下复制目录结构后启动ClickHouse,这是有意义的 ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### 错误修复: {#bug-fixes-19} + +- 修复了与ZooKeeper一起工作时的错误,这可能会导致无法在重新启动服务器之前恢复表的会话和只读状态。 +- 修复了与ZooKeeper一起工作时的错误,如果会话中断,可能会导致旧节点不被删除。 +- 修正了一个错误 `quantileTDigest` Float参数的函数(此错误在版本1.1.54388中引入) ([米哈伊尔\*苏林](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- 修复了MergeTree表索引中的一个错误,如果主键列位于函数内部,用于在相同大小的有符号和无符号整数之间转换类型 ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- 如果修复段错误 `macros` 使用,但它们不在配置文件中 ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- 修复了重新连接客户端时切换到默认数据库的问题 ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- 修正了当发生的错误 `use_index_for_in_with_subqueries` 设置被禁用。 + +#### 安全修复: {#security-fix-1} + +- 当连接到MySQL时,发送文件不再可能 (`LOAD DATA LOCAL INFILE`). + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54388-2018-06-28} + +#### 新功能: {#new-features-12} + +- 支持 `ALTER TABLE t DELETE WHERE` 查询复制的表。 添加了 `system.mutations` 表来跟踪这种类型的查询的进度。 +- 支持 `ALTER TABLE t [REPLACE|ATTACH] PARTITION` 查询\*MergeTree表。 +- 支持 `TRUNCATE TABLE` 查询 ([张冬](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- 几个新的 `SYSTEM` 复制表的查询 (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- 增加了使用MySQL引擎和相应的表函数写入表的能力 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- 添加了 `url()` 表功能和 `URL` 表引擎 ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- 添加了 `windowFunnel` 聚合函数 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- 新 `startsWith` 和 `endsWith` 字符串的函数 ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- 该 `numbers()` 表函数现在允许您指定偏移量 ([张冬](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- 密码 `clickhouse-client` 可以交互输入。 +- 服务器日志现在可以发送到系统日志 ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- 支持使用共享库源登录字典 ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- 支持自定义CSV分隔符 ([伊万\*朱可夫](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- 添加了 `date_time_input_format` 设置。 如果将此设置切换到 `'best_effort'`,日期时间值将以各种格式读取。 +- 添加了 `clickhouse-obfuscator` 用于数据混ob的实用程序。 用法示例:发布性能测试中使用的数据。 + +#### 实验特点: {#experimental-features-2} + +- 增加了计算能力 `and` 只有在需要的地方才能参数 ([阿纳斯塔西娅Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- Jit编译为本机代码现在可用于某些表达式 ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### 错误修复: {#bug-fixes-20} + +- 对于具有以下内容的查询,不再显示重复项 `DISTINCT` 和 `ORDER BY`. +- 查询与 `ARRAY JOIN` 和 `arrayFilter` 不再返回不正确的结果。 +- 修复了从嵌套结构读取数组列时的错误 ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- 修复了使用HAVING子句分析查询时出现的错误,如 `HAVING tuple IN (...)`. +- 修复了使用递归别名分析查询时出现的错误。 +- 修复了从REPLACINGMERGETREE读取过滤所有行的PREWHERE中的条件时出现的错误 ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- 在HTTP界面中使用会话时,未应用用户配置文件设置。 +- 修复了如何从clickhouse-local中的命令行参数应用设置。 +- ZooKeeper客户端库现在使用从服务器接收的会话超时。 +- 修正了ZooKeeper客户端库中的一个错误,当客户端等待服务器响应时间超过超时时间。 +- 修剪部分的查询与分区键列的条件 ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- 合并后,现在可以 `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- ODBC表函数中的类型映射已修复 ([三弟](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- 类型比较已修复 `DateTime` 有和没有时区 ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- 修正了语法解析和格式化的 `CAST` 接线员 +- 固定插入到分布式表引擎的实例化视图中 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- 修正了从写入数据时的争用条件 `Kafka` 引擎到实例化视图 ([刘杨宽](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- 固定ssrf中的remote()表函数。 +- 固定退出行为 `clickhouse-client` 在多行模式下 ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### 改进: {#improvements-11} + +- 复制表中的后台任务现在在线程池中执行,而不是在单独的线程中执行 ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- 改进的LZ4压缩性能。 +- 更快地分析具有大量联接和子查询的查询。 +- 当有太多的网络错误时,DNS缓存现在会自动更新。 +- 如果由于其中一个实例化视图包含太多部件而无法插入表格插入,则不再发生表格插入。 +- 纠正了事件计数器中的差异 `Query`, `SelectQuery`,和 `InsertQuery`. +- 像这样的表达式 `tuple IN (SELECT tuple)` 如果元组类型匹配,则允许。 +- 即使您没有配置ZooKeeper,具有复制表的服务器也可以启动。 +- 在计算可用CPU内核数时,现在考虑了cgroups的限制 ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- 在systemd配置文件中添加了配置目录的chown ([米哈伊尔Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### 构建更改: {#build-changes-4} + +- Gcc8编译器可用于构建。 +- 增加了从子模块构建llvm的能力。 +- Librdkafka库的版本已更新为v0.11.4。 +- 增加了使用系统libcpuid库的能力。 库版本已更新为0.4.0。 +- 使用vectorclass库修复了构建 ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmake现在默认情况下为ninja生成文件(如使用 `-G Ninja`). +- 添加了使用libtinfo库而不是libtermcap的功能 ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- 修复了Fedora Rawhide中的头文件冲突 ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### 向后不兼容的更改: {#backward-incompatible-changes-7} + +- 删除逃逸 `Vertical` 和 `Pretty*` 格式和删除 `VerticalRaw` 格式。 +- 如果在分布式查询中同时使用版本1.1.54388(或更高版本)的服务器和版本较旧的服务器,并且查询具有 `cast(x, 'Type')` 表达式没有 `AS` 关键字并没有这个词 `cast` 以大写形式,将引发一个异常,并显示如下消息 `Not found column cast(0, 'UInt8') in block`. 解决方案:更新整个群集上的服务器。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-1-1-54385-2018-06-01} + +#### 错误修复: {#bug-fixes-21} + +- 修复了在某些情况下导致ZooKeeper操作阻塞的错误。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54383-2018-05-22} + +#### 错误修复: {#bug-fixes-22} + +- 修正了如果一个表有许多副本,复制队列的放缓。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54381-2018-05-14} + +#### 错误修复: {#bug-fixes-23} + +- 修复了ClickHouse与ZooKeeper服务器断开连接时,ZooKeeper中的节点泄漏问题。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54380-2018-04-21} + +#### 新功能: {#new-features-13} + +- 增加了表功能 `file(path, format, structure)`. 从读取字节的示例 `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### 改进: {#improvements-12} + +- 子查询可以包装在 `()` 括号以增强查询的可读性。 例如: `(SELECT 1) UNION ALL (SELECT 1)`. +- 简单 `SELECT` 从查询 `system.processes` 表不包括在 `max_concurrent_queries` 限制。 + +#### 错误修复: {#bug-fixes-24} + +- 修正了不正确的行为 `IN` 从中选择时的运算符 `MATERIALIZED VIEW`. +- 修正了不正确的过滤分区索引的表达式,如 `partition_key_column IN (...)`. +- 固定无法执行 `OPTIMIZE` 在以下情况下对非领导副本进行查询 `REANAME` 在桌子上进行。 +- 修复了执行时的授权错误 `OPTIMIZE` 或 `ALTER` 对非领导副本的查询。 +- 固定的冻结 `KILL QUERY`. +- 修复了ZooKeeper客户端库中的错误,这导致了手表丢失,分布式的DDL队列冻结,并在复制队列中的速度变慢,如果非空 `chroot` 前缀在ZooKeeper配置中使用。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-8} + +- 删除对如下表达式的支持 `(a, b) IN (SELECT (a, b))` (可以使用等效表达式 `(a, b) IN (SELECT a, b)`). 在以前的版本中,这些表达式导致未确定 `WHERE` 过滤或导致的错误。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54378-2018-04-16} + +#### 新功能: {#new-features-14} + +- 可以在不重新启动服务器的情况下更改日志记录级别。 +- 添加了 `SHOW CREATE DATABASE` 查询。 +- 该 `query_id` 可以传递给 `clickhouse-client` (肘部空间)。 +- 新设置: `max_network_bandwidth_for_all_users`. +- 增加了对 `ALTER TABLE ... PARTITION ...` 为 `MATERIALIZED VIEW`. +- 在系统表中以未压缩形式添加有关数据部件大小的信息。 +- 对分布式表的服务器到服务器加密支持 (`1` 在副本配置中 ``). +- 表级别的配置 `ReplicatedMergeTree` 家庭,以最大限度地减少存储在Zookeeper的数据量: : `use_minimalistic_checksums_in_zookeeper = 1` +- 的配置 `clickhouse-client` 提示。 默认情况下,服务器名称现在输出到提示符。 可以更改服务器的显示名称。 它也发送了 `X-ClickHouse-Display-Name` HTTP头(基里尔Shvakov)。 +- 多个逗号分隔 `topics` 可为指定 `Kafka` 发动机(托比亚斯\*亚当森) +- 当查询停止时 `KILL QUERY` 或 `replace_running_query`,客户端接收 `Query was canceled` 异常而不是不完整的结果。 + +#### 改进: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` 查询在复制队列的前面运行。 +- `SELECT ... FINAL` 和 `OPTIMIZE ... FINAL` 即使表具有单个数据部分,也可以使用。 +- A `query_log` 如果手动删除(基里尔Shvakov),则会在飞行中重新创建表格。 +- 该 `lengthUTF8` 功能运行速度更快(zhang2014)。 +- 在同步刀片的性能提高 `Distributed` 表 (`insert_distributed_sync = 1`)当有一个非常大的数量的碎片。 +- 服务器接受 `send_timeout` 和 `receive_timeout` 从客户端设置并在连接到客户端时应用它们(它们以相反的顺序应用:服务器套接字的 `send_timeout` 被设置为 `receive_timeout` 值,反之亦然)。 +- 更强大的崩溃恢复异步插入 `Distributed` 桌子 +- 的返回类型 `countEqual` 功能从更改 `UInt32` 到 `UInt64` (谢磊). + +#### 错误修复: {#bug-fixes-25} + +- 修正了一个错误 `IN` 当表达式的左侧是 `Nullable`. +- 使用元组时,现在返回正确的结果 `IN` 当某些元组组件位于表索引中时。 +- 该 `max_execution_time` limit现在可以正常使用分布式查询。 +- 在计算复合列的大小时修正错误 `system.columns` 桌子 +- 修复了创建临时表时的错误 `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- 修正错误 `StorageKafka` (\#\#2075) +- 修复了某些聚合函数的无效参数导致的服务器崩溃。 +- 修正了防止错误 `DETACH DATABASE` 查询停止后台任务 `ReplicatedMergeTree` 桌子 +- `Too many parts` 插入到聚合实例化视图时,状态不太可能发生(\#\#2084)。 +- 如果替换必须在同一级别上跟随另一个替换,则更正了配置中替换的递归处理。 +- 更正了创建元数据文件时的语法 `VIEW` 这使用一个查询 `UNION ALL`. +- `SummingMergeTree` 现在可以正常使用复合键对嵌套数据结构进行求和。 +- 修复了在选择领导者时出现竞争条件的可能性 `ReplicatedMergeTree` 桌子 + +#### 构建更改: {#build-changes-5} + +- 构建支持 `ninja` 而不是 `make` 和用途 `ninja` 默认情况下,构建版本。 +- 重命名的软件包: `clickhouse-server-base` 在 `clickhouse-common-static`; `clickhouse-server-common` 在 `clickhouse-server`; `clickhouse-common-dbg` 在 `clickhouse-common-static-dbg`. 要安装,请使用 `clickhouse-server clickhouse-client`. 具有旧名称的软件包仍将加载到存储库中,以便向后兼容。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-9} + +- 如果在左侧指定了数组,则删除了IN表达式的特殊解释。 以前,表达式 `arr IN (set)` 被解释为 “at least one `arr` element belongs to the `set`”. 要在新版本中获得相同的行为,请编写 `arrayExists(x -> x IN (set), arr)`. +- 禁用套接字选项的不正确使用 `SO_REUSEPORT`,默认情况下,Poco库中未正确启用。 请注意,在Linux上,不再有任何理由同时指定地址 `::` 和 `0.0.0.0` for listen – use just `::`,它允许监听通过IPv4和IPv6的连接(使用默认的内核配置设置)。 您还可以通过指定以下命令恢复到以前版本中的行为 `1` 在配置。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54370-2018-03-16} + +#### 新功能: {#new-features-15} + +- 添加了 `system.macros` 更改配置文件时,宏的表和自动更新。 +- 添加了 `SYSTEM RELOAD CONFIG` 查询。 +- 添加了 `maxIntersections(left_col, right_col)` 聚合函数,它返回同时相交间隔的最大数目 `[left; right]`. 该 `maxIntersectionsPosition(left, right)` 函数返回的开始 “maximum” 间隔。 ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### 改进: {#improvements-14} + +- 当在一个插入数据 `Replicated` 表,较少的请求是由 `ZooKeeper` (和大多数用户级错误已经从消失 `ZooKeeper` 日志)。 +- 添加了为数据集创建别名的功能。 示例: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### 错误修复: {#bug-fixes-26} + +- 修正了 `Illegal PREWHERE` 从合并表读取时出错 `Distributed`桌子 +- 添加了修复,允许您在仅支持IPv4的Docker容器中启动clickhouse-server。 +- 修正了从系统读取时的争用条件 `system.parts_columns tables.` +- 同步插入到一个过程中删除双缓冲 `Distributed` 表,这可能导致连接超时。 +- 修正了一个错误,导致过长的等待不可用的副本开始之前 `SELECT` 查询。 +- 在固定不正确的日期 `system.parts` 桌子 +- 修正了一个错误,使得它无法在插入数据 `Replicated` 表if `chroot` 是非空的配置 `ZooKeeper` 集群。 +- 修正了一个空的垂直合并算法 `ORDER BY` 桌子 +- 恢复了在对远程表的查询中使用字典的能力,即使这些字典不存在于请求者服务器上。 此功能在版本1.1.54362中丢失。 +- 恢复查询的行为,如 `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` 当右侧的 `IN` 应该使用远程 `default.table` 而不是当地的 此行为在版本1.1.54358中被破坏。 +- 删除了无关的错误级别日志记录 `Not found column ... in block`. + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-1-1-54362-2018-03-11} + +#### 新功能: {#new-features-16} + +- 聚合不 `GROUP BY` 对于一个空集(如 `SELECT count(*) FROM table WHERE 0`)现在返回一个结果,其中一行为聚合函数带有null值,符合SQL标准。 要恢复旧行为(返回一个空结果),请设置 `empty_result_for_aggregation_by_empty_set` 到1。 +- 增加了类型转换 `UNION ALL`. 不同的别名被允许 `SELECT` 在职位 `UNION ALL`,符合SQL标准。 +- 任意表达式支持 `LIMIT BY` 条款 以前,只能使用以下内容产生的列 `SELECT`. +- 的索引 `MergeTree` 表用于以下情况 `IN` 应用于来自主键列的表达式元组。 示例: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova) +- 添加了 `clickhouse-copier` 用于在群集之间复制和重新分布数据的工具(测试版)。 +- 添加了一致的哈希函数: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. 它们可以用作分片密钥,以减少后续重新分片期间的网络流量。 +- 新增功能: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- 添加了 `arrayCumSum` 功能(哈维桑塔纳)。 +- 添加了 `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`,和 `parseDateTimeBestEffortOrNull` 用于从包含各种可能格式的文本的字符串中读取DateTime的函数。 +- 数据可以在更新期间从外部字典部分重新加载(加载只是记录,其中指定字段的值大于先前的下载)(Arsen Hakobyan)。 +- 添加了 `cluster` 表功能。 示例: `cluster(cluster_name, db, table)`. 该 `remote` 表函数可以接受集群名称作为第一个参数,如果它被指定为标识符。 +- 该 `remote` 和 `cluster` 表函数可用于 `INSERT` 查询。 +- 添加了 `create_table_query` 和 `engine_full` 虚拟列到 `system.tables`桌子 该 `metadata_modification_time` 列是虚拟的。 +- 添加了 `data_path` 和 `metadata_path` 列 `system.tables`和`system.databases` 表,并添加了 `path` 列到 `system.parts` 和 `system.parts_columns` 桌子 +- 添加了关于合并的其他信息 `system.part_log` 桌子 +- 一个任意的分区键可以用于 `system.query_log` 表(基里尔Shvakov)。 +- 该 `SHOW TABLES` 查询现在还显示临时表。 添加临时表和 `is_temporary` 列到 `system.tables` (张2014)。 +- 已添加 `DROP TEMPORARY TABLE` 和 `EXISTS TEMPORARY TABLE` 查询(zhang2014)。 +- 支持 `SHOW CREATE TABLE` 对于临时表(zhang2014)。 +- 添加了 `system_profile` 内部进程使用的设置的配置参数。 +- 支持加载 `object_id` 作为一个属性 `MongoDB` 字典(帕维尔\*利特维年科)。 +- 阅读 `null` 作为加载数据的外部字典与时的默认值 `MongoDB` 资料来源(帕维尔\*利特维年科)。 +- 阅读 `DateTime` 在值 `Values` 从不带单引号的Unix时间戳格式化。 +- 故障转移支持 `remote` 当某些副本缺少请求的表时,表函数。 +- 运行时可以在命令行中复盖配置设置 `clickhouse-server`. 示例: `clickhouse-server -- --logger.level=information`. +- 实施了 `empty` 从功能 `FixedString` 参数:如果字符串完全由空字节组成,则函数返回1(zhang2014)。 +- 添加了 `listen_try`如果某些地址无法侦听,则在不退出的情况下侦听至少一个侦听地址的配置参数(对于禁用IPv4或IPv6支持的系统非常有用)。 +- 添加了 `VersionedCollapsingMergeTree` 表引擎。 +- 对于行和任意数字类型的支持 `library` 字典源. +- `MergeTree` 表可以在没有主键的情况下使用(您需要指定 `ORDER BY tuple()`). +- A `Nullable` 类型可以是 `CAST` 到非-`Nullable` 如果参数不是,则键入 `NULL`. +- `RENAME TABLE` 可以进行 `VIEW`. +- 添加了 `throwIf` 功能。 +- 添加了 `odbc_default_field_size` 选项,它允许您扩展从ODBC源加载的值的最大大小(默认情况下为1024)。 +- 该 `system.processes` 表和 `SHOW PROCESSLIST` 现在有 `is_cancelled` 和 `peak_memory_usage` 列。 + +#### 改进: {#improvements-15} + +- 结果的限制和配额不再应用于以下内容的中间数据 `INSERT SELECT` 查询或 `SELECT` 子查询。 +- 更少的虚假触发 `force_restore_data` 当检查的状态 `Replicated` 服务器启动时的表。 +- 添加了 `allow_distributed_ddl` 选项。 +- 表达式中不允许使用非确定性函数 `MergeTree` 表键。 +- 从替换文件 `config.d` 目录按字母顺序加载。 +- 的改进的性能 `arrayElement` 函数在常量多维数组的情况下,以空数组作为元素之一。 示例: `[[1], []][x]`. +- 当使用具有非常大的替换(例如,非常大的IP网络列表)的配置文件时,服务器现在启动速度更快。 +- 运行查询时,表值函数运行一次。 前情提要, `remote` 和 `mysql` 表值函数执行两次相同的查询以从远程服务器检索表结构。 +- 该 `MkDocs` 使用文档生成器。 +- 当您尝试删除表列时 `DEFAULT`/`MATERIALIZED` 取决于其他列的表达式,会抛出异常(zhang2014)。 +- 增加了解析文本格式的空行作为数字0的能力 `Float` 数据类型。 此功能以前可用,但在版本1.1.54342中丢失。 +- `Enum` 值可以用于 `min`, `max`, `sum` 和其他一些功能。 在这些情况下,它使用相应的数值。 此功能以前可用,但在版本1.1.54337中丢失。 +- 已添加 `max_expanded_ast_elements` 递归扩展别名后限制AST的大小。 + +#### 错误修复: {#bug-fixes-27} + +- 修复了错误地从子查询中删除不必要的列或未从包含以下内容的子查询中删除不必要列的情况 `UNION ALL`. +- 修正了合并的错误 `ReplacingMergeTree` 桌子 +- 在固定的同步插入 `Distributed` 表 (`insert_distributed_sync = 1`). +- 固定段错误的某些用途 `FULL` 和 `RIGHT JOIN` 在子查询中使用重复的列。 +- 固定段错误的某些用途 `replace_running_query` 和 `KILL QUERY`. +- 固定的顺序 `source` 和 `last_exception` 在列 `system.dictionaries` 桌子 +- 修正了一个错误,当 `DROP DATABASE` 查询没有删除带有元数据的文件。 +- 修正了 `DROP DATABASE` 查询为 `Dictionary` 数据库。 +- 固定的低精度 `uniqHLL12` 和 `uniqCombined` 功能基数大于100万个项目(Alex克斯Bocharov)。 +- 修复了在必要时计算隐式默认值,以便同时计算默认显式表达式 `INSERT` 查询(zhang2014)。 +- 修正了一个罕见的情况下,当一个查询 `MergeTree` 表不能完成(陈星-xc)。 +- 修正了运行时发生的崩溃 `CHECK` 查询为 `Distributed` 如果所有分片都是本地的(chenxing.xc)。 +- 修复了使用正则表达式的函数的轻微性能回归。 +- 修复了从复杂表达式创建多维数组时的性能回归。 +- 修正了一个错误,可能会导致一个额外的 `FORMAT` 部分出现在一个 `.sql` 具有元数据的文件。 +- 修复了导致 `max_table_size_to_drop` 尝试删除时应用的限制 `MATERIALIZED VIEW` 查看显式指定的表。 +- 修复了与旧客户端的不兼容性(旧客户端有时会发送数据 `DateTime('timezone')` 类型,他们不明白)。 +- 修复了阅读时的错误 `Nested` 使用以下方式添加的结构的列元素 `ALTER` 但是,这是空的旧分区,当这些列的条件移动到 `PREWHERE`. +- 修正了通过虚拟过滤表时的错误 `_table` 查询中的列 `Merge` 桌子 +- 修复了使用时的错误 `ALIAS` 列 `Distributed` 桌子 +- 修正了一个错误,使得动态编译不可能从聚合函数的查询 `quantile` 家人 +- 修复了查询执行管道中极少数情况下使用时发生的争用条件 `Merge` 具有大量表的表,并且当使用 `GLOBAL` 子查询。 +- 修复了将不同大小的数组传递给 `arrayReduce` 使用来自多个参数的聚合函数时的函数。 +- 禁止使用与查询 `UNION ALL` 在一个 `MATERIALIZED VIEW`. +- 修正了初始化过程中的错误 `part_log` 服务器启动时的系统表(默认情况下, `part_log` 被禁用)。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-10} + +- 删除了 `distributed_ddl_allow_replicated_alter` 选项。 默认情况下启用此行为。 +- 删除了 `strict_insert_defaults` 设置。 如果您使用此功能,请写入 `clickhouse-feedback@yandex-team.com`. +- 删除了 `UnsortedMergeTree` 引擎 + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102005602 {#clickhouse-release-1-1-54343-2018-02-05} + +- 在分布式DDL查询和分布式表的构造函数中添加了用于定义集群名称的宏支持: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- 现在像查询 `SELECT ... FROM table WHERE expr IN (subquery)` 使用处理 `table` 指数。 +- 在插入到复制表时改进了重复项的处理,因此它们不再减慢复制队列的执行速度。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-1-1-54342-2018-01-22} + +此版本包含以前版本1.1.54337的错误修复: + +- 修正了1.1.54337中的回归:如果默认用户具有只读访问权限,则服务器拒绝启动消息 `Cannot create database in readonly mode`. +- 修正了1.1.54337中的回归:在具有systemd的系统上,无论配置如何,日志总是写入syslog;看门狗脚本仍然使用init。d。 +- 修正了1.1.54337中的回归:Docker映像中错误的默认配置。 +- 修正GraphiteMergeTree的非确定性行为(您可以在日志消息中看到它 `Data after merge is not byte-identical to the data on another replicas`). +- 修复了优化查询到复制表后可能导致合并不一致的错误(您可能会在日志消息中看到它 `Part ... intersects the previous part`). +- 当目标表中存在具体化列时,缓冲区表现在可以正常工作(by zhang2014)。 +- 修复了实现NULL的错误。 + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-1-1-54337-2018-01-18} + +#### 新功能: {#new-features-17} + +- 增加了对多维数组和元组存储的支持 (`Tuple` 表中的数据类型)。 +- 支持表函数 `DESCRIBE` 和 `INSERT` 查询。 增加了对子查询的支持 `DESCRIBE`. 例: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. 支持 `INSERT INTO TABLE` 除了 `INSERT INTO`. +- 改进了对时区的支持。 该 `DateTime` 数据类型可以使用用于以文本格式进行分析和格式化的时区进行注释。 示例: `DateTime('Europe/Moscow')`. 当在函数中指定时区时 `DateTime` 参数,返回类型将跟踪时区,并且值将按预期显示。 +- 新增功能 `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. 该 `toRelativeHour`/`Minute`/`Second` 函数可以采用类型的值 `Date` 作为参数。 该 `now` 函数名称区分大小写。 +- 添加了 `toStartOfFifteenMinutes` 功能(基里尔Shvakov)。 +- 添加了 `clickhouse format` 用于格式化查询的工具。 +- 添加了 `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` 格式。 架构文件只能位于指定的目录中。 +- 增加了对配置替换的支持 (`incl` 和 `conf.d`)外部字典和模型的配置(帕维尔\*亚库宁)。 +- 添加了一列文档 `system.settings` 表(基里尔Shvakov)。 +- 添加了 `system.parts_columns` 表中的每个数据部分的列大小信息 `MergeTree` 桌子 +- 添加了 `system.models` 包含已加载信息的表 `CatBoost` 机器学习模型。 +- 添加了 `mysql` 和 `odbc` 表函数和对应 `MySQL` 和 `ODBC` 用于访问远程数据库的表引擎。 此功能处于测试阶段。 +- 增加了传递类型参数的可能性 `AggregateFunction` 为 `groupArray` 聚合函数(这样你就可以创建一些聚合函数的状态数组)。 +- 删除了对聚合函数组合器的各种组合的限制。 例如,您可以使用 `avgForEachIf` 以及 `avgIfForEach` 聚合函数,它们具有不同的行为。 +- 该 `-ForEach` 聚合函数combinator是针对多个参数的聚合函数的情况进行扩展的。 +- 增加了对聚合函数的支持 `Nullable` 即使是函数返回非参数的情况-`Nullable` 结果(添加Silviu Caragea的贡献)。 示例: `groupArray`, `groupUniqArray`, `topK`. +- 添加了 `max_client_network_bandwidth` 为 `clickhouse-client` (基里尔\*什瓦科夫)。 +- 用户与 `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- 增加了对使用多个消费者的支持 `Kafka` 引擎 扩展的配置选项 `Kafka` (Marek Vavruša). +- 添加了 `intExp3` 和 `intExp4` 功能。 +- 添加了 `sumKahan` 聚合函数。 +- 添加了to\*Number\*OrNull函数,其中\*Number\*是数字类型。 +- 增加了对 `WITH` a的子句 `INSERT SELECT` 查询(作者:zhang2014)。 +- 添加设置: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. 特别是,这些设置用于下载用于复制的数据部分。 如果网络过载,更改这些设置可以更快地进行故障转移。 +- 增加了对 `ALTER` 对于类型的表 `Null` (Anastasiya Tsarkova) +- 该 `reinterpretAsString` 函数扩展为连续存储在内存中的所有数据类型。 +- 添加了 `--silent` 选项的 `clickhouse-local` 工具 它禁止在stderr中打印查询执行信息。 +- 增加了对读取类型值的支持 `Date` 从使用单个数字而不是两个数字(Amos Bird)指定月份和/或月份日的格式的文本。 + +#### 性能优化: {#performance-optimizations} + +- 改进聚合函数的性能 `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` 从字符串参数。 +- 改进功能的性能 `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- 改进了解析和格式化的性能 `Date` 和 `DateTime` 以文本格式键入值。 +- 改进了解析浮点数的性能和精度。 +- 降低内存使用量 `JOIN` 在左部分和右部分具有不包含在相同名称的列的情况下 `USING` . +- 改进聚合函数的性能 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` 通过降低计算稳定性。 旧函数的名称下可用 `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### 错误修复: {#bug-fixes-28} + +- 固定数据重复数据删除运行后 `DROP` 或 `DETACH PARTITION` 查询。 在以前的版本中,删除分区并再次插入相同的数据不起作用,因为插入的块被认为是重复的。 +- 修复了可能导致错误解释的错误 `WHERE` 条款 `CREATE MATERIALIZED VIEW` 查询与 `POPULATE` . +- 修正了在使用 `root_path` 在参数 `zookeeper_servers` 配置。 +- 通过固定意外的结果 `Date` 论据 `toStartOfDay` . +- 修正了 `addMonths` 和 `subtractMonths` 函数和算术 `INTERVAL n MONTH` 在情况下,当结果有前一年。 +- 增加了缺少的支持 `UUID` 数据类型 `DISTINCT` , `JOIN` ,和 `uniq` 聚合函数和外部字典(叶夫根尼伊万诺夫)。 支持 `UUID` 仍然是不完整的。 +- 固定 `SummingMergeTree` 行为的情况下,当行相加为零。 +- 各种修复 `Kafka` engine (Marek Vavruša). +- 修正了不正确的行为 `Join` 表引擎(阿莫斯鸟)。 +- 修复了FreeBSD和OS X下不正确的分配器行为。 +- 该 `extractAll` 函数现在支持空匹配。 +- 修复了阻止使用的错误 `libressl` 而不是 `openssl` . +- 修正了 `CREATE TABLE AS SELECT` 从临时表查询。 +- 修复了更新复制队列的非原子性。 这可能导致副本在服务器重新启动之前不同步。 +- 修正了可能的溢出 `gcd` , `lcm` 和 `modulo` (`%` 运营商)(Maks Skorokhod)。 +- `-preprocessed` 现在更改后创建文件 `umask` (`umask` 可以在配置中更改)。 +- 修正了部分的背景检查中的错误 (`MergeTreePartChecker` )使用自定义分区密钥时。 +- 元组的固定解析(的值 `Tuple` 数据类型)的文本格式。 +- 改进了有关传递到的不兼容类型的错误消息 `multiIf` , `array` 和其他一些功能。 +- 重新设计的支持 `Nullable` 类型。 修复了可能导致服务器崩溃的错误。 修正了与几乎所有其他错误 `NULL` 支持:insert SELECT中的类型转换不正确,HAVING和PREWHERE中对Nullable的支持不足, `join_use_nulls` 模式,可以为Null的类型作为参数 `OR` 操作员等。 +- 修正了与数据类型的内部语义相关的各种错误。 例子:不必要的总结 `Enum` 输入字段 `SummingMergeTree` ;对齐 `Enum` 类型 `Pretty` 格式等。 +- 对复合列的允许组合进行更严格的检查。 +- 修复了指定一个非常大的参数时的溢出 `FixedString` 数据类型。 +- 修正了一个错误 `topK` 一般情况下的聚合函数。 +- 在聚合函数的n元变体的参数中添加了对数组大小相等性的缺失检查。 `-Array` combinator +- 修正了一个错误 `--pager` 为 `clickhouse-client` (作者:ks1322)。 +- 固定的精度 `exp10` 功能。 +- 固定的行为 `visitParamExtract` 功能更好地符合文档。 +- 修复了指定不正确的数据类型时的崩溃。 +- 固定的行为 `DISTINCT` 在所有列都是常量的情况下。 +- 在使用的情况下固定的查询格式 `tupleElement` 使用复数常量表达式作为元组元素索引的函数。 +- 修正了一个错误 `Dictionary` 表 `range_hashed` 字典 +- 修正了导致结果中的过多行的错误 `FULL` 和 `RIGHT JOIN` (阿莫斯鸟)。 +- 修复了在创建和删除临时文件时的服务器崩溃 `config.d` 配置重新加载期间的目录。 +- 修正了 `SYSTEM DROP DNS CACHE` 查询:缓存已刷新,但群集节点的地址未更新。 +- 固定的行为 `MATERIALIZED VIEW` 执行后 `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### 构建改进: {#build-improvements-4} + +- 该 `pbuilder` 工具用于构建。 构建过程几乎完全独立于构建主机环境。 +- 单个构建用于不同的操作系统版本。 软件包和二进制文件已经与各种Linux系统兼容。 +- 添加了 `clickhouse-test` 包。 它可用于运行功能测试。 +- 现在可以将源代码包发布到存储库。 它可以用来在不使用GitHub的情况下重现构建。 +- 增加了有限的集成与特拉维斯CI。 由于Travis中的构建时间限制,仅测试调试构建并运行有限的测试子集。 +- 增加了对 `Cap'n'Proto` 在默认构建中。 +- 更改文档来源的格式 `Restricted Text` 到 `Markdown`. +- 增加了对 `systemd` (弗拉基米尔\*斯米尔诺夫)。 默认情况下,由于与某些操作系统映像不兼容,它被禁用,并且可以手动启用。 +- 用于动态代码生成, `clang` 和 `lld` 嵌入到 `clickhouse` 二进制 它们也可以被调用为 `clickhouse clang` 和 `clickhouse lld` . +- 从代码中删除GNU扩展的使用。 启用 `-Wextra` 选项。 当与建设 `clang` 默认值为 `libc++` 而不是 `libstdc++`. +- 提取 `clickhouse_parsers` 和 `clickhouse_common_io` 库,以加快各种工具的构建。 + +#### 向后不兼容的更改: {#backward-incompatible-changes-11} + +- 标记的格式 `Log` 键入包含以下内容的表 `Nullable` 列以向后不兼容的方式进行了更改。 如果你有这些表,你应该将它们转换为 `TinyLog` 在启动新服务器版本之前键入。 要做到这一点,替换 `ENGINE = Log` 与 `ENGINE = TinyLog` 在相应的 `.sql` 文件中的 `metadata` 目录。 如果你的桌子没有 `Nullable` 列或表的类型不是 `Log`,那么你什么都不需要做。 +- 删除了 `experimental_allow_extended_storage_definition_syntax` 设置。 现在,此功能默认启用。 +- 该 `runningIncome` 函数重命名为 `runningDifferenceStartingWithFirstvalue` 为了避免混confusion。 +- 删除了 `FROM ARRAY JOIN arr` 在FROM with no table(Amos Bird)之后直接指定数组连接时的语法。 +- 删除了 `BlockTabSeparated` 仅用于演示目的的格式。 +- 更改聚合函数的状态格式 `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. 如果您已将这些聚合函数的状态存储在表中(使用 `AggregateFunction` 数据类型或具体化视图与相应的状态),请写信给clickhouse-feedback@yandex-team.com. +- 在以前的服务器版本中,有一个未记录的功能:如果聚合函数依赖于参数,则仍然可以在AggregateFunction数据类型中指定它而不带参数。 示例: `AggregateFunction(quantiles, UInt64)` 而不是 `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. 此功能已丢失。 虽然它没有记录,但我们计划在未来的版本中再次支持它。 +- 枚举数据类型不能用于最小/最大聚合函数。 这种能力将在下一个版本中返回。 + +#### 升级时请注意: {#please-note-when-upgrading} + +- 当在群集上执行滚动更新时,当某些副本运行旧版本的ClickHouse,而某些副本运行新版本时,复制会暂时停止,并且消息 `unknown parameter 'shard'` 出现在日志中。 更新集群的所有副本后,复制将继续。 +- 如果群集服务器上运行不同版本的ClickHouse,则使用以下函数的分布式查询可能会产生不正确的结果: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. 您应该更新所有群集节点。 + +## [更新日志2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/zh/whats_new/changelog/2019.md b/docs/zh/whats_new/changelog/2019.md new file mode 100644 index 00000000000..f776141b14a --- /dev/null +++ b/docs/zh/whats_new/changelog/2019.md @@ -0,0 +1,2074 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_priority: 77 +toc_title: '2019' +--- + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v19-17} + +### ClickHouse释放v19.17.6.36,2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### 错误修复 {#bug-fix} + +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递制造的压缩数据,可能导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入JSON或XML格式的数据与字符串数据类型的值(需要UTF-8验证),或者当压缩结果数据与Brotli算法或在其他一些罕见的情况下。 [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从clickhouse源固定字典 `VIEW`,现在阅读这样的字典不会导致错误 `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复了用户中指定的host\_regexp是否允许客户端主机的检查。xml [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `RENAME TABLE` 对于分布式表,现在在发送到分片之前重命名包含插入数据的文件夹。 这解决了连续重命名的问题 `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- `range_hashed` DDL查询创建的外部字典现在允许任意数字类型的范围。 [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 固定 `INSERT INTO table SELECT ... FROM mysql(...)` 表功能。 [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复段错误 `INSERT INTO TABLE FUNCTION file()` 同时插入到一个不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修正了聚合位图和标量位图相交时的位图和错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 修复段错误时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛,就像 `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在可为空的参数的情况下。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定 `DROP DICTIONARY IF EXISTS db.dict`,现在它不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 如果由于服务器崩溃而未完全删除表,服务器将尝试恢复并加载它 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修正了一个简单的计数查询分布式表,如果有两个以上的分片本地表。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 修正了导致DB::BlockStreamProfileInfo::calculateRowsBeforeLimit数据竞赛的错误() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 固定 `ALTER table MOVE part` 在合并指定部件后立即执行,这可能导致移动指定部件合并到的部件。 现在它正确地移动指定的部分。 [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 字典的表达式现在可以指定为字符串。 这对于从非ClickHouse源中提取数据时计算属性非常有用,因为它允许对这些表达式使用非ClickHouse语法。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修正了一个非常罕见的比赛 `clickhouse-copier` 由于ZXid的溢出。 [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- 修复了查询失败后的错误(由于 “Too many simultaneous queries” 例如)它不会读取外部表信息,并且 + 下一个请求会将此信息解释为下一个查询的开始,导致如下错误 `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 避免空取消引用后 “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,添加对常量表达式应用排序规则的能力,并将语言名称添加到系统。排序规则表。 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 用于读取的流数 `StorageFile` 和 `StorageHDFS` 现在是有限的,以避免超过内存限制。 [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 固定 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 如果没有突变,则从部件名称中删除突变编号。 这种删除提高了与旧版本的兼容性。 [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 修复了某些附加部分因data\_version大于表突变版本而跳过突变的问题。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) +- 允许在将部件移动到其他设备后使用冗余副本启动服务器。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了错误 “Sizes of columns doesn’t match” 使用聚合函数列时可能会出现。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 现在在使用WITH TIES和LIMIT BY的情况下,将抛出一个异常。 现在可以使用TOP with LIMIT BY。 [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v19.17.4.11时,2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 使用列而不是AST来存储标量子查询结果以获得更好的性能。 设置 `enable_scalar_subquery_optimization` 在19.17中添加,默认情况下启用。 它会导致以下错误 [这](https://github.com/ClickHouse/ClickHouse/issues/7851) 在从以前的版本升级到19.17.2或19.17.3期间。 默认情况下,19.17.4中禁用此设置,以便可以从19.16及更早版本升级而不会出现错误。 [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 新功能 {#new-feature} + +- 添加使用DDL查询创建字典的功能。 [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([阿利沙平](https://github.com/alesapin)) +- 赂眉露\>\> `bloom_filter` 支持的索引类型 `LowCardinality` 和 `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加功能 `isValidJSON` 要检查传递的字符串是否是有效的json。 [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) +- 执行 `arrayCompact` 功能 [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([备忘录](https://github.com/Joeywzr)) +- 创建函数 `hex` 对于十进制数。 它的工作原理如下 `hex(reinterpretAsString())`,但不会删除最后的零字节。 [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加 `arrayFill` 和 `arrayReverseFill` 函数,用数组中其他元素替换它们前面/后面的元素。 [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- 添加 `CRC32IEEE()`/`CRC64()` 碌莽禄support: [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) +- 执行 `char` 功能类似于一个 [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) +- 添加 `bitmapTransform` 功能。 它将位图中的值数组转换为另一个值数组,结果是一个新的位图 [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([余志昌](https://github.com/yuzhichang)) +- 已实施 `javaHashUTF16LE()` 功能 [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) +- 添加 `_shard_num` 分布式引擎的虚拟列 [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) + +#### 实验特点 {#experimental-feature} + +- 支持处理器(新的查询执行管道) `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 错误修复 {#bug-fix-1} + +- 修复不正确的浮点解析 `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复启用trace\_log时可能发生的罕见死锁。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 当生成Kafka表时有任何从中选择的Mv时,防止消息重复 [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([伊万](https://github.com/abyss7)) +- 支持 `Array(LowCardinality(Nullable(String)))` 在 `IN`. 决定 [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) +- 添加处理 `SQL_TINYINT` 和 `SQL_BIGINT`,并修复处理 `SQL_FLOAT` ODBC桥中的数据源类型。 [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 修复聚合 (`avg` 和分位数)在空的十进制列 [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 修复 `INSERT` 变成分布式 `MATERIALIZED` 列 [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> `MOVE PARTITION` 如果分区的某些部分已经在目标磁盘或卷上,则可以工作 [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了在突变过程中无法创建硬链接的错误 `ReplicatedMergeTree` 在多磁盘配置。 [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了当整个部分保持不变并且在另一个磁盘上找到最佳空间时,MergeTree上出现突变的错误 [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正错误 `keep_free_space_ratio` 未从磁盘读取配置 [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正错误与表只包含 `Tuple` 列或具有复杂路径的列。 修复 [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([阿利沙平](https://github.com/alesapin)) +- 在max\_memory\_usage限制中不考虑缓冲区引擎的内存 [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) +- 修复最终标记用法 `MergeTree` 表排序 `tuple()`. 在极少数情况下,它可能会导致 `Can't adjust last granule` 选择时出错。 [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了需要上下文操作(例如json函数)的谓词突变中的错误,这可能会导致崩溃或奇怪的异常。 [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([阿利沙平](https://github.com/alesapin)) +- 修复转义的数据库和表名称不匹配 `data/` 和 `shadow/` 目录 [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. 在这种情况下修复崩溃。 [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `Not found column in block` 当加入表达式与权利或完全连接。 [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) +- 再次尝试修复无限循环 `PrettySpace` 格式 [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复bug `concat` 函数时,所有的参数 `FixedString` 同样大小的 [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([阿利沙平](https://github.com/alesapin)) +- 在定义S3,URL和HDFS存储时使用1个参数的情况下修复了异常。 [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复查询视图的InterpreterSelectQuery的范围 [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- `Nullable` ODBC-bridge可识别的列和正确处理的NULL值 [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 以原子方式写入分布式发送的当前批次 [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) +- 如果我们无法在查询中检测到列名称的表,则引发异常。 [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加 `merge_max_block_size` 设置为 `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) +- 查询与 `HAVING` 而没有 `GROUP BY` 假设按常量分组。 所以, `SELECT 1 HAVING 1` 现在返回一个结果。 [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([阿莫斯鸟](https://github.com/amosbird)) +- 支持解析 `(X,)` 作为类似python的元组。 [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([阿莫斯鸟](https://github.com/amosbird)) +- 赂眉露\>\> `range` 函数行为几乎像pythonic。 [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) +- 添加 `constraints` 列到表 `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Tcp处理程序的更好的Null格式,以便可以使用 `select ignore() from table format Null` 通过clickhouse-client进行性能测量 [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([阿莫斯鸟](https://github.com/amosbird)) +- 查询如 `CREATE TABLE ... AS (SELECT (1, 2))` 正确解析 [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### 性能改进 {#performance-improvement} + +- 改进了对短字符串键的聚合性能。 [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [阿莫斯鸟](https://github.com/amosbird)) +- 运行另一次语法/表达式分析以在常量谓词折叠后获得潜在的优化。 [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([阿莫斯鸟](https://github.com/amosbird)) +- 使用存储元信息来评估琐碎 `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([阿莫斯鸟](https://github.com/amosbird), [阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 矢量化处理 `arrayReduce` 与聚合器类似 `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- 在性能的小改进 `Kafka` 消费 [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 添加对交叉编译的支持到CPU架构AARCH64。 重构打包器脚本。 [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([伊万](https://github.com/abyss7)) +- 在构建软件包时,将darwin-x86\_64和linux-aarch64工具链解压缩到已挂载的Docker卷中 [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([伊万](https://github.com/abyss7)) +- 更新二进制打包器的Docker映像 [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([伊万](https://github.com/abyss7)) +- 修复了MacOS Catalina上的编译错误 [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([欧内斯特\*波列塔耶夫](https://github.com/ernestp)) +- 查询分析逻辑中的一些重构:将复杂的类拆分为几个简单的类。 [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复没有子模块的构建 [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- 更好 `add_globs` 在CMake文件中 [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([阿莫斯鸟](https://github.com/amosbird)) +- 删除硬编码路径 `unwind` 目标 [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) +- 允许在没有ssl的情况下使用mysql格式 [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### 其他 {#other} + +- 为ClickHouse SQL方言添加了ANTLR4语法 [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v19-16} + +#### ClickHouse版本v19.16.14.65,2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz))这个错误修正是由Altinity的特殊要求回移到版本19.16的。 + +#### ClickHouse释放v19.16.14.65,2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- 修复分布式子查询与旧版本的CH不兼容。 修复 [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复检查本地地址 `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse发布版本v19.16.2.2,2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 为count/counIf添加缺失的验证。 + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) +- 删除旧版 `asterisk_left_columns_only` 设置(默认情况下禁用)。 + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 模板数据格式的格式字符串现在在文件中指定。 + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) + +#### 新功能 {#new-feature-2} + +- 引入uniqCombined64()来计算大于UINT\_MAX的基数。 + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat + Khuzhin](https://github.com/azat)) +- 支持数组列上的Bloom filter索引。 + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([achimbab](https://github.com/achimbab)) +- 添加函数 `getMacro(name)` 返回与相应值的字符串 `` + 从服务器配置. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为基于HTTP源的字典设置两个配置选项: `credentials` 和 + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([纪尧姆 + Tassery](https://github.com/YiuRULE)) +- 添加新的ProfileEvent `Merge` 这计算启动的背景合并的数量。 + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([米哈伊尔 + 科罗托夫](https://github.com/millb)) +- 添加返回完全限定域名的fullHostName函数。 + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) +- 添加功能 `arraySplit` 和 `arrayReverseSplit` 通过拆分数组 “cut off” + 条件。 它们在时间序列处理中非常有用。 + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- 添加返回multiMatch函数系列中所有匹配索引的数组的新函数。 + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila + 库特宁](https://github.com/danlark1)) +- 添加新的数据库引擎 `Lazy` 即针对存储大量小日志进行了优化 + 桌子 [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([尼基塔 + Vasilev](https://github.com/nikvas0)) +- 为位图列添加聚合函数groupBitmapAnd,-或-Xor。 [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([知昌 + 阿优](https://github.com/yuzhichang)) +- 添加聚合函数组合器-OrNull和-OrDefault,它们返回null + 或默认值时没有任何聚合。 + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- 引入支持自定义转义的CustomSeparated数据格式 + 分隔符规则。 [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([tavplubix](https://github.com/tavplubix)) +- 支持Redis作为外部字典的来源。 [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [安东 + 波波夫](https://github.com/CurtizJ)) + +#### 错误修复 {#bug-fix-2} + +- 修复错误的查询结果,如果它有 `WHERE IN (SELECT ...)` 部分和 `optimize_read_in_order` 是 + 使用。 [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([安东 + 波波夫](https://github.com/CurtizJ)) +- 禁用MariaDB身份验证插件,这取决于项目之外的文件。 + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([尤里 + 巴拉诺夫](https://github.com/yurriy)) +- 修复异常 `Cannot convert column ... because it is constant but values of constants are different in source and result` 这可能很少发生,当功能 `now()`, `today()`, + `yesterday()`, `randConstant()` 被使用。 + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 修复了使用HTTP保持活动超时而不是TCP保持活动超时的问题。 + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([瓦西里 + Nemkov](https://github.com/Enmk)) +- 修复了groupBitmapOr中的分段错误(问题 [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([知昌 + 阿优](https://github.com/yuzhichang)) +- 对于实例化视图,在写入所有数据之后调用kafka的提交。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([伊万](https://github.com/abyss7)) +- 修复错误 `duration_ms` 值 `system.part_log` 桌子 这是十次关闭。 + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 快速修复解决实时查看表中的崩溃并重新启用所有实时查看测试。 + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([vzakaznikov](https://github.com/vzakaznikov)) +- 在MergeTree部件的最小/最大索引中正确序列化NULL值。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 不要把虚拟列。创建表时的sql元数据 `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([伊万](https://github.com/abyss7)) +- 修复分段故障 `ATTACH PART` 查询。 + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([阿利沙平](https://github.com/alesapin)) +- 修复了子查询中empty和empty优化给出的某些查询的错误结果 + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 修复LIVE VIEW getHeader()方法中的AddressSanitizer错误。 + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([vzakaznikov](https://github.com/vzakaznikov)) + +#### 改进 {#improvement-1} + +- 在queue\_wait\_max\_ms等待发生的情况下添加消息。 + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat + Khuzhin](https://github.com/azat)) +- 制作设置 `s3_min_upload_part_size` 表级别。 + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 检查Ttl在StorageFactory。 [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([sundyli](https://github.com/sundy-li)) +- 在部分合并连接(优化)中压缩左侧块。 + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 不允许在复制表引擎的突变中使用非确定性函数,因为这 + 可能会在副本之间引入不一致。 + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([亚历山大 + 卡扎科夫](https://github.com/Akazz)) +- 将异常堆栈跟踪转换为字符串时禁用内存跟踪器。 它可以防止损失 + 类型的错误消息 `Memory limit exceeded` 在服务器上,这导致了 `Attempt to read after eof` 客户端上的例外。 [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 其他格式改进。 决定 + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([tavplubix](https://github.com/tavplubix)) +- ClickHouse将忽略IN运算符右侧不可转换为左侧的值 + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 支持ASOF加入缺失的不平等。 它可以加入小于或等于变体和严格 + 在语法上,ASOF列的变体越来越多。 + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 优化部分合并连接。 [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Artem Zuikov](https://github.com/4ertus2)) +- 不要在uniqCombined函数中使用超过98K的内存。 + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat + Khuzhin](https://github.com/azat)) +- 在PartialMergeJoin中刷新磁盘上右连接表的部分(如果没有足够的 + 记忆)。 需要时加载数据。 [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能改进 {#performance-improvement-1} + +- 通过避免数据重复加快使用const参数的joinGet。 + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 如果子查询为空,请提前返回。 + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- 优化值中SQL表达式的解析。 + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 禁用交叉编译到Mac OS的一些贡献。 + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([伊万](https://github.com/abyss7)) +- 为clickhouse\_common\_io添加与PocoXML缺少的链接。 + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat + Khuzhin](https://github.com/azat)) +- 在clickhouse-test中接受多个测试过滤器参数。 + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 为ARM启用musl和jemalloc。 [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `--client-option` 参数 `clickhouse-test` 将其他参数传递给客户端。 + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([尼古拉 + Kochetov](https://github.com/KochetovNicolai)) +- 在rpm软件包升级时保留现有配置。 + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([filimonov](https://github.com/filimonov)) +- 修复PVS检测到的错误。 [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 修复达尔文的构建。 [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([伊万](https://github.com/abyss7)) +- glibc2.29兼容性. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 确保dh\_clean不会触及潜在的源文件。 + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 尝试避免从altinity rpm更新时发生冲突-它有单独打包的配置文件 + 在clickhouse服务器-常见. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([filimonov](https://github.com/filimonov)) +- 优化一些头文件,以便更快地重建。 + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 添加日期和日期时间的性能测试。 [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([瓦西里 + Nemkov](https://github.com/Enmk)) +- 修复一些包含非确定性突变的测试。 + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([亚历山大 + 卡扎科夫](https://github.com/Akazz)) +- 添加构建与MemorySanitizer CI。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 避免在MetricsTransmitter中使用未初始化的值。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat + Khuzhin](https://github.com/azat)) +- 修复MemorySanitizer发现的字段中的一些问题。 + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([阿莫斯鸟](https://github.com/amosbird)) +- 修复murmurhash32中未定义的行为。 [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 修复StoragesInfoStream中未定义的行为。 [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([tavplubix](https://github.com/tavplubix)) +- 固定常量表达式折叠外部数据库引擎(MySQL,ODBC,JDBC)。 在上一页 + 版本它不适用于多个常量表达式,并且根本不适用于日期, + 日期时间和UUID。 这修复 [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在访问no\_users\_thread变量时修复实时查看中的ThreadSanitizer数据竞争错误。 + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([vzakaznikov](https://github.com/vzakaznikov)) +- 在libcommon中摆脱malloc符号 + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([阿莫斯 + 鸟](https://github.com/amosbird)) +- 添加全局标志ENABLE\_LIBRARY以禁用所有库。 + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### 代码清理 {#code-cleanup} + +- 概括配置存储库以准备字典的DDL。 [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([阿利沙平](https://github.com/alesapin)) +- 解析器字典DDL没有任何语义。 + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([阿利沙平](https://github.com/alesapin)) +- 将ParserCreateQuery拆分为不同的较小的解析器。 + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([阿利沙平](https://github.com/alesapin)) +- 在外部字典附近进行小型重构和重命名。 + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([阿利沙平](https://github.com/alesapin)) +- 重构一些代码以准备基于角色的访问控制。 [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([维塔利 + 巴拉诺夫](https://github.com/vitlibar)) +- DatabaseOrdinary代码中的一些改进。 + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([尼基塔 + Vasilev](https://github.com/nikvas0)) +- 不要在哈希表的find()和emplace()方法中使用迭代器。 + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([亚历山大 + 库兹门科夫](https://github.com/akuzm)) +- 修正getMultipleValuesFromConfig的情况下,当参数根不为空。 [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 删除一些复制粘贴(TemporaryFile和TemporaryFileStream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([阿尔乔姆 + Zuikov](https://github.com/4ertus2)) +- 改进了代码的可读性一点点 (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([弗拉基米尔 + Chebotarev](https://github.com/excitoon)) +- 等待使用本地对象的所有计划作业,如果 `ThreadPool::schedule(...)` 投掷 + 一个例外 重命名 `ThreadPool::schedule(...)` 到 `ThreadPool::scheduleOrThrowOnError(...)` 和 + 修复注释,使明显的,它可能会抛出。 + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([tavplubix](https://github.com/tavplubix)) + +## ClickHouse释放19.15 {#clickhouse-release-19-15} + +### ClickHouse释放19.15.4.10,2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### 错误修复 {#bug-fix-3} + +- 增加了sql\_tinyint和SQL\_BIGINT的处理,并修复了ODBC桥中SQL\_FLOAT数据源类型的处理。 + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) +- 允许在移动分区中的目标磁盘或卷上有一些部分。 + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 通过ODBC桥固定可空列中的NULL值。 + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 固定插入到具体化列的分布式非本地节点。 + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) +- 固定函数getMultipleValuesFromConfig。 + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复了使用HTTP保持活动超时而不是TCP保持活动超时的问题。 + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 等待所有作业在异常时完成(修复罕见的段错误)。 + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) +- 在插入Kafka表时不要推送MVs。 + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([伊万](https://github.com/abyss7)) +- 禁用异常堆栈的内存跟踪器。 + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复了外部数据库转换查询中的错误代码。 + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免在MetricsTransmitter中使用未初始化的值。 + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) +- 添加了用于测试的宏的示例配置 ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.15.3.6,2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### 错误修复 {#bug-fix-4} + +- 修正了哈希字典中的bad\_variant。 + ([阿利沙平](https://github.com/alesapin)) +- 修复了附加部件查询中分段故障的错误。 + ([阿利沙平](https://github.com/alesapin)) +- 固定时间计算 `MergeTreeData`. + ([Vladimir Chebotarev](https://github.com/excitoon)) +- 写作完成后明确提交给Kafka。 + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([伊万](https://github.com/abyss7)) +- 在MergeTree部件的最小/最大索引中正确序列化NULL值。 + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-15-2-2-2019-10-01} + +#### 新功能 {#new-feature-3} + +- 分层存储:支持使用MergeTree引擎对表使用多个存储卷。 可以将新数据存储在SSD上,并自动将旧数据移动到HDD。 ([示例](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([阿利沙平](https://github.com/alesapin)) +- 添加表函数 `input` 用于读取传入的数据 `INSERT SELECT` 查询。 [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([安东\*波波夫](https://github.com/CurtizJ)) +- 添加一个 `sparse_hashed` 字典布局,即在功能上等同于 `hashed` 布局,但更高效的内存。 它使用的内存减少了大约两倍,代价是较慢的值检索。 [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) +- 实现定义用户列表以访问字典的能力。 仅使用当前连接的数据库。 [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加 `LIMIT` 选项 `SHOW` 查询。 [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 添加 `bitmapSubsetLimit(bitmap, range_start, limit)` 函数,返回最小的子集 `limit` 设置中的值不小于 `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([余志昌](https://github.com/yuzhichang)) +- 添加 `bitmapMin` 和 `bitmapMax` 功能。 [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([余志昌](https://github.com/yuzhichang)) +- 添加功能 `repeat` 有关 [问题-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([弗林](https://github.com/ucasFL)) + +#### 实验特点 {#experimental-feature-1} + +- 实现(在内存中)不更改当前管道的合并联接变体。 结果按合并键进行部分排序。 设置 `partial_merge_join = 1` 要使用此功能。 合并联接仍在开发中。 [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加 `S3` 发动机和表功能. 它仍在开发中(还没有身份验证支持)。 [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) + +#### 改进 {#improvement-2} + +- 从Kafka读取的每条消息都是以原子方式插入的。 这解决了Kafka引擎的几乎所有已知问题。 [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([伊万](https://github.com/abyss7)) +- 对分布式查询故障转移的改进。 缩短恢复时间,也是现在可配置的,可以看出 `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 直接支持枚举的数值 `IN` 科。 \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- 支持(可选,默认情况下禁用)对URL存储进行重定向。 [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- 当具有较旧版本的客户端连接到服务器时添加信息消息。 [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) +- 删除在分布式表中发送数据的最大退避睡眠时间限制 [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) +- 添加将配置文件事件(计数器)与累积值发送到graphite的能力。 它可以在启用 `` 在服务器 `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) +- 添加自动转换类型 `T` 到 `LowCardinality(T)` 在类型的列中插入数据 `LowCardinality(T)` 在本机格式通过HTTP。 [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加使用功能的能力 `hex` 不使用 `reinterpretAsString` 为 `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([米哈伊尔\*科罗托夫](https://github.com/millb)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 将gdb-index添加到带有调试信息的clickhouse二进制文件。 这将加快启动时间 `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([阿利沙平](https://github.com/alesapin)) +- 加速deb包装与补丁dpkg-deb它使用 `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([阿利沙平](https://github.com/alesapin)) +- 设置 `enable_fuzzing = 1` 启用所有项目代码的libfuzzer检测功能。 [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- 在CI中添加拆分构建烟雾测试。 [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([阿利沙平](https://github.com/alesapin)) +- 添加构建与MemorySanitizer CI。 [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 替换 `libsparsehash` 与 `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) + +#### 错误修复 {#bug-fix-5} + +- 修复了大型表上复杂键的索引分析的性能下降。 这修复了#6924。 [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复从Kafka空主题中选择时导致段错误的逻辑错误。 [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([伊万](https://github.com/abyss7)) +- 修复过早的MySQL连接关闭 `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 返回对非常旧的Linux内核的支持(修复 [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的数据丢失 `insert select` 在输入流中的空块的情况下进行查询。 \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- 使用数组联接和全局子查询修复复杂的查询。 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([伊万](https://github.com/abyss7)) +- 修复 `Unknown identifier` 按顺序排列和按多个联接分组的错误 [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定 `MSan` 执行函数时发出警告 `LowCardinality` 争论。 [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 向后不兼容的更改 {#backward-incompatible-change-2} + +- 更改了位图\*聚合函数状态的序列化格式,以提高性能。 无法读取以前版本的位图\*的序列化状态。 [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([余志昌](https://github.com/yuzhichang)) + +## ClickHouse释放19.14 {#clickhouse-release-19-14} + +### ClickHouse释放19.14.7.15,2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### 错误修复 {#bug-fix-6} + +- 此版本还包含19.11.12.69的所有错误修复。 +- 修复了19.14和早期版本之间分布式查询的兼容性。 这修复 [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.14.6.12,2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### 错误修复 {#bug-fix-7} + +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- 修复了查询中的子查询名称 `ARRAY JOIN` 和 `GLOBAL IN subquery` 用化名。 如果指定了外部表名,请使用子查询别名。 [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-3} + +- 修复 [拍打](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) 测试 `00715_fetch_merged_or_mutated_part_zookeeper` 通过将其重写为shell脚本,因为它需要等待突变应用。 [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修正了UBSan和MemSan功能失败 `groupUniqArray` 使用emtpy数组参数。 这是由于放置空 `PaddedPODArray` 因为没有调用零单元格值的构造函数,所以将其转换为哈希表零单元格。 [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([阿莫斯鸟](https://github.com/amosbird)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-14-3-3-2019-09-10} + +#### 新功能 {#new-feature-4} + +- `WITH FILL` 修饰符 `ORDER BY`. (继续 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([安东\*波波夫](https://github.com/CurtizJ)) +- `WITH TIES` 修饰符 `LIMIT`. (继续 [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([安东\*波波夫](https://github.com/CurtizJ)) +- 解析无引号 `NULL` 文字为NULL(如果设置 `format_csv_unquoted_null_literal_as_null=1`). 如果此字段的数据类型不可为空,则使用默认值初始化null字段(如果设置 `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) +- 支持表函数路径中的通配符 `file` 和 `hdfs`. 如果路径包含通配符,则表将为只读。 使用示例: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` 和 `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) +- 新 `system.metric_log` 表存储的值 `system.events` 和 `system.metrics` 具有指定的时间间隔。 [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许将ClickHouse文本日志写入 `system.text_log` 桌子 [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在堆栈跟踪中显示私有符号(这是通过解析ELF文件的符号表来完成的)。 如果存在调试信息,则在堆栈跟踪中添加有关文件和行号的信息。 使用程序中存在的索引符号加速符号名称查找。 增加了新的SQL函数的反省: `demangle` 和 `addressToLine`. 重命名函数 `symbolizeAddress` 到 `addressToSymbol` 为了一致性。 功能 `addressToSymbol` 将返回错位的名称出于性能原因,你必须申请 `demangle`. 添加设置 `allow_introspection_functions` 默认情况下,这是关闭的。 [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表函数 `values` (名称不区分大小写)。 它允许从读取 `VALUES` 建议的名单 [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). 示例: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- 增加了改变存储设置的功能。 语法: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([阿利沙平](https://github.com/alesapin)) +- 用于拆卸分离部件的支撑。 语法: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) +- 表约束。 允许将约束添加到将在插入时检查的表定义。 [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持级联实例化视图。 [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([阿莫斯鸟](https://github.com/amosbird)) +- 默认情况下,打开查询探查器以每秒对每个查询执行线程进行一次采样。 [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 输入格式 `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) +- 增加了两个新功能: `sigmoid` 和 `tanh` (这对于机器学习应用程序非常有用)。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 功能 `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` 检查给定的令牌是否在干草堆中。 Token是两个非字母数字ASCII字符(或干草堆的边界)之间的最大长度子串。 Token必须是常量字符串。 由tokenbf\_v1索引专业化支持。 [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 新功能 `neighbor(value, offset[, default_value])`. 允许在一个数据块中的列中达到上一个/下一个值。 [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) +- 创建了一个函数 `currentUser()`,返回授权用户的登录。 添加别名 `user()` 对于与MySQL的兼容性。 [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) +- 新的聚合函数 `quantilesExactInclusive` 和 `quantilesExactExclusive` 这是在提出 [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- 功能 `bitmapRange(bitmap, range_begin, range_end)` 返回具有指定范围的新集(不包括 `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([余志昌](https://github.com/yuzhichang)) +- 功能 `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` 它创建了一系列精确的长串geohash盒复盖提供的区域。 [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 实现对插入查询的支持 `Kafka` 桌子 [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([伊万](https://github.com/abyss7)) +- 增加了对 `_partition` 和 `_timestamp` 虚拟列到Kafka引擎。 [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([伊万](https://github.com/abyss7)) +- 可以从中删除敏感数据 `query_log`,服务器日志,基于正则表达式的规则的进程列表。 [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) + +#### 实验特点 {#experimental-feature-2} + +- 输入和输出数据格式 `Template`. 它允许为输入和输出指定自定义格式字符串。 [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) +- 执行 `LIVE VIEW` 最初提出的表 [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898),准备 [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925),然后更新 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). 看 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) 详细描述。 [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov))请注意 `LIVE VIEW` 功能可能会在下一个版本中删除。 + +#### 错误修复 {#bug-fix-8} + +- 此版本还包含19.13和19.11的所有错误修复。 +- 修复表有跳过索引和垂直合并发生时的分段错误。 [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([阿利沙平](https://github.com/alesapin)) +- 使用非平凡的列默认值修复每列TTL。 以前在强制TTL合并的情况下 `OPTIMIZE ... FINAL` 查询,过期的值被替换为类型默认值,而不是用户指定的列默认值。 [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复kafka服务器正常重启时的消息重复问题。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([伊万](https://github.com/abyss7)) +- 修正了读取Kafka消息时的无限循环。 根本不要暂停/恢复订阅消费者-否则在某些情况下可能会无限期暂停。 [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([伊万](https://github.com/abyss7)) +- 修复 `Key expression contains comparison between inconvertible types` 例外 `bitmapContains` 功能。 [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- 修复已启用的段错误 `optimize_skip_unused_shards` 还丢失了分片钥匙 [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在MySQL界面中删除了额外的详细日志记录 [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返回解析布尔设置的能力 ‘true’ 和 ‘false’ 在配置文件中。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃 `quantile` 和 `median` 功能结束 `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能不完整的结果返回 `SELECT` 查询与 `WHERE` 主键上的条件包含转换为浮点类型。 它是由不正确的单调性检查引起的 `toFloat` 功能。 [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- 检查 `max_expanded_ast_elements` 设置为突变。 明确突变后 `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([张冬](https://github.com/zhang2014)) +- 修复使用键列时的联接结果 `join_use_nulls`. 附加空值而不是列默认值。 [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了跳过索引与垂直合并和改变。 修复 `Bad size of marks file` 例外。 [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([阿利沙平](https://github.com/alesapin)) +- 修复罕见的崩溃 `ALTER MODIFY COLUMN` 和垂直合并,当合并/改变的部分之一是空的(0行) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([阿利沙平](https://github.com/alesapin)) +- 修正错误的转换 `LowCardinality` 类型 `AggregateFunctionFactory`. 这修复 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误的行为和可能的段错误 `topK` 和 `topKWeighted` 聚合函数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定周围的不安全代码 `getIdentifier` 功能。 [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在MySQL线协议(连接到ClickHouse的形式MySQL客户端时使用)修正了错误。 引起的堆缓冲区溢出 `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 固定内存泄漏 `bitmapSubsetInRange` 功能。 [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([余志昌](https://github.com/yuzhichang)) +- 修复粒度变化后执行突变时的罕见错误。 [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([阿利沙平](https://github.com/alesapin)) +- 默认情况下允许包含所有字段的protobuf消息。 [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 解决错误 `nullIf` 功能,当我们发送 `NULL` 第二个参数的参数。 [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修正了错误的内存分配/解除分配在复杂的键高速缓存字典与字符串字段,导致无限的内存消耗罕见的错误(看起来像内存泄漏)。 当字符串大小为8(8,16,32等)开始的2的幂时,错误会重现。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([阿利沙平](https://github.com/alesapin)) +- 修复了导致异常的小序列上的大猩猩编码 `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 允许在连接中使用不可为空的类型 `join_use_nulls` 已启用。 [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁用 `Poco::AbstractConfiguration` 查询中的替换 `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免死锁 `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用 `arrayReduce` 对于不变的参数可能会导致段错误。 [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能出现的不一致的部分,如果副本恢复后 `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 固定挂起 `JSONExtractRaw` 功能。 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误跳过索引序列化和聚合与自适应粒度。 [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([阿利沙平](https://github.com/alesapin)) +- 修复 `WITH ROLLUP` 和 `WITH CUBE` 修饰符 `GROUP BY` 具有两级聚合。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复编写具有自适应粒度的二级索引标记的错误。 [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿利沙平](https://github.com/alesapin)) +- 修复服务器启动时的初始化顺序。 由于 `StorageMergeTree::background_task_handle` 在初始化 `startup()` 该 `MergeTreeBlockOutputStream::write()` 可以尝试在初始化之前使用它。 只需检查它是否被初始化。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([伊万](https://github.com/abyss7)) +- 从以前的读取操作中清除数据缓冲区,该操作完成时出现错误。 [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([尼古拉](https://github.com/bopohaa)) +- 修复为复制\*MergeTree表创建新副本时启用自适应粒度的错误。 [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([阿利沙平](https://github.com/alesapin)) +- 修复了在服务器启动过程中发生异常的情况下可能发生的崩溃 `libunwind` 在异常访问未初始化 `ThreadStatus` 结构。 [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复崩溃 `yandexConsistentHash` 功能。 通过模糊测试发现。 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了服务器过载和全局线程池接近满时挂起查询的可能性。 这在具有大量分片(数百个)的集群上发生的机会更高,因为分布式查询为每个分片分配每个连接的线程。 例如,如果集群330分片正在处理30个并发分布式查询,则此问题可能再现。 此问题会影响从19.2开始的所有版本。 [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 的固定逻辑 `arrayEnumerateUniqRanked` 功能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 解码符号表时修复段错误。 [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([阿莫斯鸟](https://github.com/amosbird)) +- 在固定不相关的异常转换 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除描述中的额外引用 `system.settings` 桌子 [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免可能的死锁 `TRUNCATE` 复制的表。 [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复读取排序键的顺序。 [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 修复错误打开 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (自19.4.0)。 当我们不查询任何列时,在对MergeTree表的分布式表的查询中复制 (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([阿利沙平](https://github.com/alesapin)) +- 在有符号类型的整数划分为无符号类型的固定溢出。 这种行为与C或C++语言(整数升级规则)完全相同,这可能令人惊讶。 请注意,当将大型有符号数字划分为大型无符号数字或反之亦然时,溢出仍然是可能的(但这种情况不太常见)。 所有服务器版本都存在此问题。 [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 限制最大睡眠时间限制时 `max_execution_speed` 或 `max_execution_speed_bytes` 已设置。 修正错误,如 `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关于使用固定的问题 `MATERIALIZED` 列和别名 `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([阿莫斯鸟](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `FormatFactory` 未实现为处理器的输入流的行为。 [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定错字。 [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) +- 错字在错误消息(是-\>是)。 [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) +- 修复了从字符串中解析列列表时的错误,如果类型包含逗号(这个问题与 `File`, `URL`, `HDFS` 储存) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### 安全修复 {#security-fix} + +- 此版本还包含19.13和19.11的所有错误安全修复。 +- 修复了由于SQL解析器中的堆栈溢出而导致服务器崩溃的制造查询的可能性。 修复了合并和分布式表,实例化视图和涉及子查询的行级安全性条件中堆栈溢出的可能性。 [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-3} + +- 三元逻辑的正确实现 `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在,值和行与过期的TTL将被删除后 `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` 查询。 添加查询 `SYSTEM STOP/START TTL MERGES` 要禁止/允许使用TTL分配合并,并在所有合并中过滤过期值。 [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([安东\*波波夫](https://github.com/CurtizJ)) +- 可以更改ClickHouse历史文件的位置为客户端使用 `CLICKHOUSE_HISTORY_FILE` env [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) +- 删除 `dry_run` 从标志 `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `ASOF JOIN` 与 `ON` 科。 [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) +- 更好地支持用于突变和复制的跳过索引。 支持 `MATERIALIZE/CLEAR INDEX ... IN PARTITION` 查询。 `UPDATE x = x` 重新计算使用列的所有索引 `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 允许 `ATTACH` 实时视图(例如,在服务器启动时),无论 `allow_experimental_live_view` 设置。 [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于由查询探查器收集的堆栈跟踪,不包括由查询探查器本身生成的堆栈帧。 [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在表函数 `values`, `file`, `url`, `hdfs` 支持别名列。 [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果抛出异常 `config.d` 文件没有相应的根元素作为配置文件。 [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- 在异常消息中打印额外的信息 `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) +- 当确定一个碎片 `Distributed` 要被读取查询复盖的表(用于 `optimize_skip_unused_shards` =1)ClickHouse现在从两个检查条件 `prewhere` 和 `where` select语句的子句。 [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 已启用 `SIMDJSON` 对于没有AVX2,但与SSE4.2和PCLMUL指令集的机器。 [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ClickHouse可以在文件系统上工作,而无需 `O_DIRECT` 支持(如ZFS和BtrFS),无需额外的调整。 [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持最终子查询的下推谓词。 [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好 `JOIN ON` 密钥提取 [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) +- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 优化最小列的选择 `SELECT count()` 查询。 [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `strict` 参数in `windowFunnel()`. 当 `strict` 设置,该 `windowFunnel()` 仅对唯一值应用条件。 [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) +- 更安全的界面 `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) +- 执行时选项行大小 `--help` 选项现在与终端大小对应。 [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- 禁用 “read in order” 优化无键的聚合。 [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([安东\*波波夫](https://github.com/CurtizJ)) +- Http状态代码 `INCORRECT_DATA` 和 `TYPE_MISMATCH` 错误代码已从默认值更改 `500 Internal Server Error` 到 `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([亚历山大\*罗丹](https://github.com/a-rodin)) +- 从移动连接对象 `ExpressionAction` 成 `AnalyzedJoin`. `ExpressionAnalyzer` 和 `ExpressionAction` 不知道 `Join` 不再上课了 它的逻辑被隐藏 `AnalyzedJoin` 伊菲斯 [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了当其中一个分片是localhost但查询通过网络连接发送时可能出现的分布式查询死锁。 [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改多个表的语义 `RENAME` 为了避免可能的死锁。 [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 重写MySQL兼容性服务器以防止在内存中加载完整的数据包有效负载。 每个连接的内存消耗减少到大约 `2 * DBMS_DEFAULT_BUFFER_SIZE` (读/写缓冲区)。 [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 将AST别名解释逻辑移出不必了解查询语义的解析器。 [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) +- 稍微更安全的解析 `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-copier`:允许使用 `where_condition` 从配置 `partition_key` 查询中用于检查分区存在的别名(以前它仅用于读取数据查询)。 [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- 在添加可选的消息参数 `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) +- 在发送插入数据时,服务器异常也正在客户端中处理。 [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- 添加了指标 `DistributedFilesToInsert` 这显示了文件系统中选择通过分布式表发送到远程服务器的文件总数。 该数字在所有分片之间相加。 [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将大部分连接准备逻辑从 `ExpressionAction/ExpressionAnalyzer` 到 `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复曾 [警告](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 关于缺乏Linux功能的更好的信息消息。 记录致命错误 “fatal” 水平,这将使它更容易找到 `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当启用转储临时数据到磁盘,以限制内存使用期间 `GROUP BY`, `ORDER BY`,它没有检查可用磁盘空间。 修复程序添加新设置 `min_free_disk_space`,当可用磁盘空间小于阈值时,查询将停止并抛出 `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([徐伟清](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过线程删除递归rwlock。 这是没有意义的,因为线程在查询之间重用。 `SELECT` 查询可以在一个线程中获取锁,从另一个线程持有锁并从第一个线程退出。 在同一时间,第一个线程可以通过重复使用 `DROP` 查询。 这将导致虚假 “Attempt to acquire exclusive lock recursively” 消息 [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 斯普利特 `ExpressionAnalyzer.appendJoin()`. 准备一个地方 `ExpressionAnalyzer` 为 `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `mysql_native_password` mysql兼容性服务器的身份验证插件。 [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 更少的数量 `clock_gettime` 调用;调试/发布之间的固定ABI兼容性 `Allocator` (微不足道的问题)。 [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移动 `collectUsedColumns` 从 `ExpressionAnalyzer` 到 `SyntaxAnalyzer`. `SyntaxAnalyzer` 赂眉露\>\> `required_source_columns` 现在本身。 [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `joined_subquery_requires_alias` 要求子选择和表函数的别名 `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) +- 提取物 `GetAggregatesVisitor` 从类 `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) +- `system.query_log`:更改数据类型 `type` 列到 `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 静态链接 `sha256_password` 身份验证插件。 [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 避免对设置的额外依赖 `compile` 去工作 在以前的版本中,用户可能会得到如下错误 `cannot open crti.o`, `unable to find library -lc` 等。 [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对可能来自恶意副本的输入进行更多验证。 [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在 `clickhouse-obfuscator` 文件是可用的 `clickhouse-client` 包。 在以前的版本中,它可以作为 `clickhouse obfuscator` (带空格)。 [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- 当我们至少有两个查询以不同的顺序读取至少两个表,另一个查询对其中一个表执行DDL操作时,修复了死锁。 修复了另一个非常罕见的死锁。 [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `os_thread_ids` 列到 `system.processes` 和 `system.query_log` 为了更好的调试可能性。 [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当发生PHP mysqlnd扩展错误的解决方法 `sha256_password` 用作默认身份验证插件(在描述 [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 删除不需要的地方与更改为空列。 [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) +- 设置默认值 `queue_max_wait_ms` 为零,因为当前值(五秒)是没有意义的。 在极少数情况下,此设置有任何用途。 添加设置 `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` 和 `connection_pool_max_wait_ms` 用于消除歧义。 [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提取物 `SelectQueryExpressionAnalyzer` 从 `ExpressionAnalyzer`. 保留最后一个用于非select查询。 [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) +- 删除重复输入和输出格式。 [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 允许用户复盖 `poll_interval` 和 `idle_connection_timeout` 连接设置。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `MergeTree` 现在有一个额外的选项 `ttl_only_drop_parts` (默认情况下禁用),以避免部分的部分修剪,以便在部分中的所有行都过期时完全删除它们。 [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([塞尔吉\*弗拉季金](https://github.com/svladykin)) +- 类型检查set索引函数。 如果函数类型错误,则引发异常。 这修复了模糊测试与UBSan。 [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 性能改进 {#performance-improvement-2} + +- 优化查询 `ORDER BY expressions` 条款,其中 `expressions` 有重合前缀与排序键 `MergeTree` 桌子 此优化由以下方式控制 `optimize_read_in_order` 设置。 [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许在零件装载和拆卸期间使用多个螺纹。 [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 实现了更新聚合函数状态的批处理变体。 这可能导致性能优势。 [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用 `FastOps` 函数库 `exp`, `log`, `sigmoid`, `tanh`. FastOps是迈克尔\*帕拉欣(Yandex的首席技术官)的快速矢量数学库。 改进的性能 `exp` 和 `log` 功能超过6倍。 功能 `exp` 和 `log` 从 `Float32` 参数将返回 `Float32` (在以前的版本中,他们总是返回 `Float64`). 现在 `exp(nan)` 可能会回来 `inf`. 的结果 `exp` 和 `log` 函数可能不是最接近机器可代表的数字到真正的答案。 [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))使用Danila Kutenin变体使fastops工作 [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁用连续密钥优化 `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) +- 改进的性能 `simdjson` 库通过摆脱动态分配 `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 预故障页分配内存时 `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) +- 修复性能错误 `Decimal` 比较。 [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-4} + +- 删除编译器(运行时模板实例化),因为我们已经赢得了它的性能。 [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试,以显示gcc-9以更隔离的方式性能下降。 [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加表功能 `numbers_mt`,这是多线程版本 `numbers`. 使用哈希函数更新性能测试。 [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 比较模式 `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- 尽最大努力打印堆栈痕迹。 还添加了 `SIGPROF` 作为调试信号,打印正在运行的线程的堆栈跟踪。 [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 每个函数都在自己的文件中,第10部分。 [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除两倍常量 `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) +- 格式化更改 `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) +- 更好的联接创建子查询 `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) +- 删除冗余条件(由PVS Studio找到)。 [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) +- 分隔散列表接口 `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) +- 重构设置。 [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([阿利沙平](https://github.com/alesapin)) +- 添加注释 `set` 索引函数。 [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 在Linux上的调试版本中增加OOM分数。 [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) +- HDFS HA现在在调试版本中工作。 [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([徐伟清](https://github.com/weiqxu)) +- 添加了一个测试 `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为Kafka表添加多个实例化视图的测试。 [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([伊万](https://github.com/abyss7)) +- 制定一个更好的构建计划。 [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([伊万](https://github.com/abyss7)) +- 固定 `test_external_dictionaries` 集成的情况下,它是在非root用户下执行。 [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 当写入的数据包的总大小超过 `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 增加了一个测试 `RENAME` 表竞争条件 [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免在设置数据竞赛 `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过缓存字典添加处理错误的集成测试。 [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在Mac OS上禁用ELF对象文件的解析,因为这是没有意义的。 [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 尝试使更新日志生成器更好。 [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `-Wshadow` 切换到海湾合作委员会。 [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 删除过时的代码 `mimalloc` 支持。 [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `zlib-ng` 确定x86功能并将此信息保存到全局变量。 这是在defalteInit调用中完成的,它可以由不同的线程同时进行。 为了避免多线程写入,请在库启动时执行此操作。 [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) +- 回归测试一个错误,在连接这是固定的 [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) +- 修正MSan报告。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复扑ttl测试。 [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了虚假数据竞赛 `MergeTreeDataPart::is_frozen` 场。 [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试中的超时。 在以前的版本中,它设法在查询中找到虚假挂断 `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了调试检查 `static_cast` 列。 [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在官方RPM软件包中支持Oracle Linux。 [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从更改json perftests `once` 到 `loop` 类型。 [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` 定义 `main()` 所以它不应该被包括在 `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Origej Desh](https://github.com/orivej)) +- 测试碰撞 `FULL|RIGHT JOIN` 右表的键中有空值。 [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) +- 为了以防万一,增加了对别名扩展限制的测试。 [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `boost::filesystem` 到 `std::filesystem` 在适当的情况下。 [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将RPM包添加到网站。 [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为固定添加测试 `Unknown identifier` 例外 `IN` 科。 [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) +- 简化操作 `shared_ptr_helper` 因为人们面临困难理解它。 [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了固定大猩猩和DoubleDelta编解码器的性能测试。 [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 拆分集成测试 `test_dictionaries` 分成四个单独的测试。 [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复PVS-Studio中的警告 `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 允许使用 `library` 与ASan字典源. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了从Pr列表生成更新日志的选项。 [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 锁定 `TinyLog` 读取时存储。 [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) +- 检查CI中损坏的符号链接。 [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加超时时间 “stack overflow” 测试,因为它可能需要很长的时间在调试构建。 [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了双空格检查。 [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `new/delete` 使用消毒剂构建时的内存跟踪。 跟踪不清楚。 它只防止测试中的内存限制异常。 [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) +- 启用链接时对未定义符号的检查。 [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([伊万](https://github.com/abyss7)) +- 避免重建 `hyperscan` 每天 [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在固定的瑞银报告 `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要允许将查询探查器与消毒器一起使用,因为它不兼容。 [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加测试计时器失败后重新加载字典。 [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复不一致 `PipelineExecutor::prepareProcessor` 参数类型。 [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加了对坏Uri的测试。 [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了更多的检查 `CAST` 功能。 这应该获得更多关于模糊测试中分割故障的信息。 [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 已添加 `gcc-9` 支持 `docker/builder` 本地生成映像的容器。 [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 测试主键 `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- 修复了缓慢堆栈跟踪打印影响的测试。 [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加崩溃的测试用例 `groupUniqArray` 固定在 [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) +- 固定指数突变测试。 [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 在性能测试中,不要读取我们没有运行的查询的查询日志。 [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) +- 现在可以使用任何低基数类型创建实例化视图,而不考虑关于可疑低基数类型的设置。 [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) +- 更新的测试 `send_logs_level` 设置。 [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复gcc-8.2下的构建。 [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) +- 修复构建与内部libc++。 [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([伊万](https://github.com/abyss7)) +- 修复共享构建 `rdkafka` 图书馆 [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([伊万](https://github.com/abyss7)) +- 修复Mac OS构建(不完整)。 [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) +- 修复 “splitted” 碌莽禄.拢. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 其他构建修复: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([阿莫斯鸟](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([伊万](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-change-3} + +- 删除很少使用的表函数 `catBoostPool` 和存储 `CatBoostPool`. 如果您使用了此表功能,请写电子邮件至 `clickhouse-feedback@yandex-team.com`. 请注意,CatBoost集成仍然存在,并将受到支持。 [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁用 `ANY RIGHT JOIN` 和 `ANY FULL JOIN` 默认情况下。 设置 `any_join_distinct_right_table_keys` 设置启用它们。 [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) + +## ClickHouse释放19.13 {#clickhouse-release-19-13} + +### ClickHouse释放19.13.6.51,2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### 错误修复 {#bug-fix-9} + +- 此版本还包含19.11.12.69的所有错误修复。 + +### ClickHouse释放19.13.5.44,2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### 错误修复 {#bug-fix-10} + +- 此版本还包含19.14.6.12的所有错误修复。 +- 修复了在执行时表的可能不一致的状态 `DROP` 在zookeeper无法访问时查询复制的表。 [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复了StorageMerge中的数据竞赛 [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复查询分析器中引入的错误,从而导致套接字无休止的recv。 [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([阿利沙平](https://github.com/alesapin)) +- 修复执行时过多的CPU使用率 `JSONExtractRaw` 函数在一个布尔值。 [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在推送到实例化视图时修复回归。 [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([伊万](https://github.com/abyss7)) +- 表函数 `url` 该漏洞是否允许攻击者在请求中注入任意HTTP头。 这个问题被发现 [尼基塔\*季霍米罗夫](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复无用 `AST` 检查设置索引。 [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修正了解析 `AggregateFunction` 查询中嵌入的值。 [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([余志昌](https://github.com/yuzhichang)) +- 修正了错误的行为 `trim` 功能家庭。 [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### ClickHouse释放19.13.4.32,2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### 错误修复 {#bug-fix-11} + +- 此版本还包含19.11.9.52和19.11.10.54的所有错误安全修复。 +- 固定数据竞赛 `system.parts` 表和 `ALTER` 查询。 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了从带有sample和prewhere的空分布式表中读取流中发生的不匹配标题。 [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([钱丽祥](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复使用时的崩溃 `IN` 子句带有一个元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复具有相同列名的大小写 `GLOBAL JOIN ON` 科。 [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复强制转换类型时的崩溃 `Decimal` 这不支持它。 抛出异常代替。 [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了崩溃 `extractAll()` 功能。 [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) +- 查询转换 `MySQL`, `ODBC`, `JDBC` 表函数现在正常工作 `SELECT WHERE` 具有多个查询 `AND` 表达式。 [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- 添加了以前的声明检查MySQL8集成。 [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([拉斐尔\*大卫\*蒂诺科](https://github.com/rafaeldtinoco)) + +#### 安全修复 {#security-fix-1} + +- 修复解压缩阶段编解码器中的两个漏洞(恶意用户可以制造压缩数据,导致解压缩中的缓冲区溢出)。 [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102005602 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### 错误修复 {#bug-fix-12} + +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 在使用IN子句时修复带有元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复了一个问题,即如果一个陈旧的副本变为活动的,它可能仍然有被删除分区的数据部分。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 修正了解析CSV的问题 [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- 修正了系统中的数据竞赛。部件表和ALTER查询。 这修复 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复后可能的数据丢失 `ALTER DELETE` 查询表跳过索引。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 安全修复 {#security-fix-2} + +- 如果攻击者具有对ZooKeeper的写入访问权限,并且能够从ClickHouse运行的网络中运行可用的自定义服务器,则可以创建自定义构建的恶意服务器,该服务器将充当ClickHouse副本并将其注册到ZooKeeper中。 当另一个副本从恶意副本中获取数据部分时,它可以强制clickhouse-server写入文件系统上的任意路径。 由Yandex的信息安全团队Eldar Zaitov发现。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-13-2-19-2019-08-14} + +#### 新功能 {#new-feature-5} + +- 查询级别上的采样探查器。 [示例](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- 允许指定列的列表 `COLUMNS('regexp')` 表达的工作原理就像一个更复杂的变体 `*` 星号 [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` 现在是可能的 [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- 亚当优化随机梯度下降默认情况下使用 `stochasticLinearRegression()` 和 `stochasticLogisticRegression()` 聚合函数,因为它显示了良好的质量,几乎没有任何调整。 [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([杨小姐](https://github.com/andyyzh)) +- `RENAME` 查询现在适用于所有存储。 [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([伊万](https://github.com/abyss7)) +- 现在客户端通过设置从服务器接收任何所需级别的日志 `send_logs_level` 无论服务器设置中指定的日志级别如何。 [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) + +#### 向后不兼容的更改 {#backward-incompatible-change-4} + +- 设置 `input_format_defaults_for_omitted_fields` 默认情况下启用。 分布式表中的插入需要此设置在集群上相同(您需要在滚动更新之前设置它)。 它允许计算复杂的默认表达式的省略字段 `JSONEachRow` 和 `CSV*` 格式。 它应该是预期的行为,但可能导致可忽略不计的性能差异。 [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) + +#### 实验特点 {#experimental-features} + +- 新的查询处理管道。 使用 `experimental_use_processors=1` 选项来启用它。 用你自己的麻烦。 [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 错误修复 {#bug-fix-13} + +- Kafka集成已在此版本中修复。 +- 固定 `DoubleDelta` 编码 `Int64` 对于大 `DoubleDelta` 值,改进 `DoubleDelta` 编码为随机数据 `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 固定的高估 `max_rows_to_read` 如果设置 `merge_tree_uniform_read_distribution` 置为0。 [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-4} + +- 如果抛出异常 `config.d` 文件没有相应的根元素作为配置文件 [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### 性能改进 {#performance-improvement-3} + +- 优化 `count()`. 现在它使用最小的列(如果可能的话)。 [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-5} + +- 在性能测试中报告内存使用情况。 [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) +- 修复构建与外部 `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([伊万](https://github.com/abyss7)) +- 修复共享构建 `rdkafka` 图书馆 [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([伊万](https://github.com/abyss7)) + +## ClickHouse释放19.11 {#clickhouse-release-19-11} + +### ClickHouse释放19.11.13.74,2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### 错误修复 {#bug-fix-14} + +- 修复了罕见的崩溃 `ALTER MODIFY COLUMN` 当合并/更改部分之一为空(0行)时,垂直合并。 [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([阿利沙平](https://github.com/alesapin)) +- 手动更新 `SIMDJSON`. 这修复了stderr文件可能泛滥的错误json诊断消息。 [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修正错误 `mrk` 突变的文件扩展名 ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放19.11.12.69,2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### 错误修复 {#bug-fix-15} + +- 修复了大型表上复杂键的索引分析的性能下降。 这修复 [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用分布式引擎在表中发送数据时避免罕见的SIGSEGV (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `Unknown identifier` 有多个连接。 这修复 [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-11-57-2019-09-13} + +- 修复从Kafka空主题中选择时导致段错误的逻辑错误。 [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([伊万](https://github.com/abyss7)) +- 修复功能 `АrrayEnumerateUniqRanked` 在参数中使用空数组。 [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouse释放19.11.10.54,2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### 错误修复 {#bug-fix-16} + +- 手动存储Kafka消息的偏移量,以便能够一次性为所有分区提交它们。 修复潜在的重复 “one consumer - many partitions” 场景。 [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([伊万](https://github.com/abyss7)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682 {#clickhouse-release-19-11-9-52-2019-09-6} + +- 改进缓存字典中的错误处理。 [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在功能固定错误 `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- 修复 `JSONExtract` 功能,同时提取 `Tuple` 从JSON。 [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复后可能的数据丢失 `ALTER DELETE` 查询表跳过索引。 [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 固定性能测试。 [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 实木复合地板:修复读取布尔列。 [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了错误的行为 `nullIf` 常量参数的函数。 [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复kafka服务器正常重启时的消息重复问题。 [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([伊万](https://github.com/abyss7)) +- 修正了一个问题,当长 `ALTER UPDATE` 或 `ALTER DELETE` 可能会阻止常规合并运行。 如果没有足够的可用线程,则防止突变执行。 [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) +- 修正了处理错误 “timezone” 在服务器配置文件中。 [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复卡夫卡测试。 [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([伊万](https://github.com/abyss7)) + +#### 安全修复 {#security-fix-3} + +- 如果攻击者具有对ZooKeeper的写入访问权限,并且能够从运行ClickHouse的网络中运行可用的自定义服务器,则可以创建自定义构建的恶意服务器,该服务器将充当ClickHouse副本并将其注册到ZooKeeper中。 当另一个副本从恶意副本中获取数据部分时,它可以强制clickhouse-server写入文件系统上的任意路径。 由Yandex的信息安全团队Eldar Zaitov发现。 [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-8-46-2019-08-22} + +#### 错误修复 {#bug-fix-17} + +- 修复 `ALTER TABLE ... UPDATE` 查询表 `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([阿利沙平](https://github.com/alesapin)) +- 在使用IN子句时修复带有元组的子查询。 [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) +- 修复了一个问题,即如果一个陈旧的副本变为活动的,它可能仍然有被删除分区的数据部分。 [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) +- 修正了解析CSV的问题 [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) +- 修正了系统中的数据竞赛。部件表和ALTER查询。 这修复 [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了可能导致内存损坏的突变中的错误代码。 修复了读取地址的段错误 `0x14c0` 这可能发生由于并发 `DROP TABLE` 和 `SELECT` 从 `system.parts` 或 `system.parts_columns`. 在准备突变查询时修复了竞争条件。 修复了由于 `OPTIMIZE` 复制的表和并发修改操作,如改变。 [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-7-40-2019-08-14} + +#### 错误修复 {#bug-fix-18} + +- Kafka集成已在此版本中修复。 +- 使用时修复段错误 `arrayReduce` 对于不断的参数。 [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `toFloat()` 单调性。 [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- 修复已启用的段错误 `optimize_skip_unused_shards` 还丢失了分片钥匙 [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- 的固定逻辑 `arrayEnumerateUniqRanked` 功能。 [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从MySQL处理程序中删除了额外的详细日志记录。 [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误的行为和可能的段错误 `topK` 和 `topKWeighted` 聚合函数。 [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- 不要公开虚拟列 `system.columns` 桌子 这是向后兼容所必需的。 [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复复杂键缓存字典中字符串字段的内存分配错误。 [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([阿利沙平](https://github.com/alesapin)) +- 修复创建新副本时启用自适应粒度的错误 `Replicated*MergeTree` 桌子 [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([阿利沙平](https://github.com/alesapin)) +- 阅读Kafka消息时修复无限循环。 [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- 修复了由于SQL解析器中的堆栈溢出和堆栈溢出的可能性而导致服务器崩溃的编造查询的可能性 `Merge` 和 `Distributed` 表 [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在小序列固定大猩猩编码错误。 [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### 改进 {#improvement-5} + +- 允许用户复盖 `poll_interval` 和 `idle_connection_timeout` 连接设置。 [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102003042 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### 错误修复 {#bug-fix-19} + +- 修复了服务器超载时挂起查询的可能性。 [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复yandexConsistentHash函数中的FPE。 这修复 [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误的转换 `LowCardinality` 类型 `AggregateFunctionFactory`. 这修复 [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复解析 `bool` 从设置 `true` 和 `false` 配置文件中的字符串。 [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([阿利沙平](https://github.com/alesapin)) +- 修复查询中不兼容的流头的罕见错误 `Distributed` 桌子结束 `MergeTree` 表时的一部分 `WHERE` 移动到 `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([阿利沙平](https://github.com/alesapin)) +- 在有符号类型的整数划分为无符号类型的固定溢出。 这修复 [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 向后不兼容的更改 {#backward-incompatible-change-5} + +- `Kafka` 还是坏了 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-4-24-2019-08-01} + +#### 错误修复 {#bug-fix-20} + +- 修复编写具有自适应粒度的二级索引标记的错误。 [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([阿利沙平](https://github.com/alesapin)) +- 修复 `WITH ROLLUP` 和 `WITH CUBE` 修饰符 `GROUP BY` 具有两级聚合。 [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定挂起 `JSONExtractRaw` 功能。 固定 [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复ExternalLoader::reloadOutdated()中的段错误。 [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复了服务器可能关闭侦听套接字但不关闭并继续提供剩余查询的情况。 您最终可能会有两个正在运行的clickhouse服务器进程。 有时,服务器可能会返回错误 `bad_function_call` 对于剩余的查询。 [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了通过ODBC,MySQL,ClickHouse和HTTP初始加载外部字典的更新字段无用和不正确的条件。 这修复 [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在固定不相关的异常转换 `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复非确定性结果 “uniq” 在极少数情况下聚合函数。 该错误存在于所有ClickHouse版本。 [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Segfault当我们在函数上设置了一点点太高的CIDR `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复了服务器从许多不同上下文中抛出许多异常时的小内存泄漏。 [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复消费者在订阅之前暂停而之后未恢复的情况。 [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([伊万](https://github.com/abyss7))请注意,卡夫卡在这个版本中被打破。 +- 从以前的读取操作中清除Kafka数据缓冲区,并且完成了错误操作 [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([尼古拉](https://github.com/bopohaa))请注意,卡夫卡在这个版本中被打破。 +- 由于 `StorageMergeTree::background_task_handle` 在初始化 `startup()` 该 `MergeTreeBlockOutputStream::write()` 可以尝试在初始化之前使用它。 只需检查它是否被初始化。 [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-6} + +- 新增官方 `rpm` 包. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([阿利沙平](https://github.com/alesapin)) +- 添加构建能力 `.rpm` 和 `.tgz` 包 `packager` 脚本 [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([阿利沙平](https://github.com/alesapin)) +- 修复了 “Arcadia” 构建系统。 [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-change-6} + +- `Kafka` 在这个版本中被打破。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-11-3-11-2019-07-18} + +#### 新功能 {#new-feature-6} + +- 增加了对准备好的语句的支持。 [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([亚历山大](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `DoubleDelta` 和 `Gorilla` 列编解ecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 已添加 `os_thread_priority` 设置,允许控制 “nice” 操作系统用于调整动态调度优先级的查询处理线程的值。 它需要 `CAP_SYS_NICE` 能力的工作。 这实现了 [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `_topic`, `_offset`, `_key` kafka引擎的列 [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([伊万](https://github.com/abyss7))请注意,卡夫卡在这个版本中被打破。 +- 添加聚合函数组合 `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- 聚合函数 `groupArrayMovingSum(win_size)(x)` 和 `groupArrayMovingAvg(win_size)(x)`,计算移动和/平均有或没有窗口大小限制。 [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- 添加synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Intergate H3功能 `geoToH3` 从尤伯杯. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 错误修复 {#bug-fix-21} + +- 使用异步更新实现DNS缓存。 单独的线程解析所有主机并更新dns缓存(设置 `dns_cache_update_period`). 当主机的ip频繁更改时,它应该有所帮助。 [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复段错误 `Delta` 影响值小于32位大小的列的编解ec。 该错误导致随机内存损坏。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([阿利沙平](https://github.com/alesapin)) +- 修复ttl合并中的段错误与块中的非物理列。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在检查部分罕见的错误 `LowCardinality` 列。 前情提要 `checkDataPart` 总是失败的一部分 `LowCardinality` 列。 [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([阿利沙平](https://github.com/alesapin)) +- 避免在服务器线程池已满时挂起连接。 它是从连接重要 `remote` 当连接超时时,表函数或连接到没有副本的分片。 这修复 [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持常量参数 `evalMLModel` 功能。 这修复 [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了ClickHouse将默认时区确定为 `UCT` 而不是 `UTC`. 这修复 [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定缓冲区下溢 `visitParamExtractRaw`. 这修复 [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在分发 `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` 查询将直接在leader副本上执行。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([阿利沙平](https://github.com/alesapin)) +- 修复 `coalesce` 为 `ColumnConst` 与 `ColumnNullable` +相关变化. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `ReadBufferFromKafkaConsumer` 所以它不断阅读新的消息后 `commit()` 即使它之前停滞不前 [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([伊万](https://github.com/abyss7)) +- 修复 `FULL` 和 `RIGHT` 加入时加入结果 `Nullable` 键在右表. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) +- 可能修复低优先级查询的无限休眠。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复争用条件,这导致某些查询可能不会出现在query\_log后 `SYSTEM FLUSH LOGS` 查询。 [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([安东\*波波夫](https://github.com/CurtizJ)) +- 固定 `heap-use-after-free` 由手表引起的ClusterCopier中的警告尝试使用已经删除的复印机对象。 [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `StringRef` 由一些实现返回的指针 `IColumn::deserializeAndInsertFromArena`. 这个错误只影响单元测试。 [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 防止源数组和中间数组连接掩蔽相同名称列的列。 [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复插入并选择查询MySQL引擎与MySQL样式标识符引用。 [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([张冬](https://github.com/zhang2014)) +- 现在 `CHECK TABLE` 查询可以与MergeTree引擎系列一起使用。 它返回检查状态和消息,如果任何为每个部分(或文件在simplier引擎的情况下)。 此外,修复获取损坏部分的错误。 [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([阿利沙平](https://github.com/alesapin)) +- 修复SPLIT\_SHARED\_LIBRARY运行时 [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) +- 固定时区初始化时 `/etc/localtime` 是一个相对的符号链接,如 `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- clickhouse复印机:修复使用-关机后免费 [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- 更新 `simdjson`. 修复了一些无效的零字节Json成功解析的问题。 [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复系统日志的关机 [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当invalidate\_query中的条件取决于字典时挂起。 [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 改进 {#improvement-6} + +- 允许群集配置中的无法解析的地址。 它们将被视为不可用,并尝试在每次连接尝试时解决。 这对Kubernetes特别有用。 这修复 [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关闭空闲TCP连接(默认情况下为一小时超时)。 这对于每台服务器上具有多个分布式表的大型集群尤其重要,因为每台服务器都可能保留与其他服务器的连接池,并且在高峰查询并发之后,连接将停 这修复 [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的质量 `topK` 功能。 如果新元素具有更大的权重,则更改了SavingSpace set行为以删除最后一个元素。 [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 与域一起使用的URL函数现在可以在没有方案的情况下适用于不完整的Url [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([阿利沙平](https://github.com/alesapin)) +- 校验和添加到 `system.parts_columns` 桌子 [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 已添加 `Enum` 数据类型作为synonim `Enum8` 或 `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- 全位转置变种 `T64` 编解ec 可能会导致更好的压缩 `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) +- 条件 `startsWith` 函数现在可以使用主键。 这修复 [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) 和 [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- 允许使用 `clickhouse-copier` 通过允许空数据库名称来实现具有交叉复制的群集拓扑。 [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 使用 `UTC` 作为系统上的默认时区,而不 `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` 被打印并且服务器或客户机拒绝启动。 [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返回对函数中浮点参数的支持 `quantileTiming` 为了向后兼容性。 [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在错误消息中显示哪个表缺少列。 [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([伊万](https://github.com/abyss7)) +- 不允许不同用户使用相同的query\_id运行查询 [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- 用于向Graphite发送指标的更强大的代码。 它甚至可以在长时间的多重工作 `RENAME TABLE` 操作。 [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ThreadPool无法计划执行任务时,将显示更多信息错误消息。 这修复 [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 反转ngramSearch更直观 [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) +- 在HDFS引擎生成器中添加用户解析 [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) +- 更新默认值 `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) +- 增加了过时设置的概念。 过时的设置 `allow_experimental_low_cardinality_type` 可以没有效果使用。 [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) + +#### 性能改进 {#performance-improvement-4} + +- 增加从合并表中选择的流数量,以便更均匀地分布线程。 添加设置 `max_streams_multiplier_for_merge_tables`. 这修复 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-7} + +- 为与不同版本的clickhouse的客户端-服务器交互添加向后兼容性测试。 [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([阿利沙平](https://github.com/alesapin)) +- 每个提交和拉取请求中的测试复盖率信息。 [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([阿利沙平](https://github.com/alesapin)) +- 与address sanitizer合作,支持我们的自定义分alloc (`Arena` 和 `ArenaWithFreeLists`)为了更好地调试 “use-after-free” 错误。 [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) +- 切换到 [LLVM libunwind实现](https://github.com/llvm-mirror/libunwind) 用于C++异常处理和堆栈跟踪打印 [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([尼基塔\*拉普科夫](https://github.com/laplab)) +- 添加来自-Weverything的两个警告 [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许用内存消毒剂建立ClickHouse。 [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 关于固定的ubsan报告 `bitTest` 在模糊测试功能。 [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Docker:增加了初始化需要身份验证的ClickHouse实例的可能性。 [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([科尔维亚科夫\*安德烈](https://github.com/shurshun)) +- 将librdkafka更新到版本1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([伊万](https://github.com/abyss7)) +- 为集成测试添加全局超时,并在测试代码中禁用其中一些。 [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([阿利沙平](https://github.com/alesapin)) +- 修复一些ThreadSanitizer故障。 [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) +- 该 `--no-undefined` 选项强制链接器在链接时检查所有外部名称是否存在。 在拆分构建模式下跟踪库之间的真实依赖关系非常有用。 [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([伊万](https://github.com/abyss7)) +- 增加了性能测试 [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 与gcc-7固定兼容性。 [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了对gcc-9的支持。 这修复 [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了libunwind链接不正确时的错误。 [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了PVS-Studio发现的一些警告。 [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了初始支持 `clang-tidy` 静态分析仪。 [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 转换BSD/Linux endian宏( ‘be64toh’ 和 ‘htobe64’)到Mac OS x当量 [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([傅辰](https://github.com/fredchenbj)) +- 改进的集成测试指南. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 在macosx+gcc9修复构建 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) +- 修复难以识别的错字:aggreAGte-\>aggregate。 [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) +- 修复freebsd构建 [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- 添加链接到实验YouTube频道的网站 [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([伊万\*布林科夫](https://github.com/blinkov)) +- CMake:为复盖率标志添加选项:WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- 修复一些内联PODArray的初始大小。 [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) +- clickhouse服务器.postinst:修复centos6的操作系统检测 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- 添加Arch linux软件包生成。 [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 拆分常见/配置.h by libs(dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- 修复了 “Arcadia” 构建平台 [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- 修复了非常规构建(gcc9,没有子模块) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- 在unalignedStore中需要显式类型,因为它被证明容易出现错误 [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) +- 修复MacOS构建 [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) +- 关于具有更大数据集的新JIT功能的性能测试,请参阅此处 [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 在压力测试中运行有状态测试 [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([阿利沙平](https://github.com/alesapin)) + +#### 向后不兼容的更改 {#backward-incompatible-change-7} + +- `Kafka` 在这个版本中被打破。 +- 启用 `adaptive_index_granularity` =10mb默认为新 `MergeTree` 桌子 如果您在19.11+版本上创建了新的MergeTree表,则不可能降级到19.6之前的版本。 [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([阿利沙平](https://github.com/alesapin)) +- 删除了Yandex使用的过时无证嵌入式字典。梅特里卡 功能 `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` 不再可用。 如果您正在使用这些功能,请写电子邮件至clickhouse-feedback@yandex-team.com注:在最后时刻,我们决定保持这些功能一段时间。 [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## ClickHouse释放19.10 {#clickhouse-release-19-10} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-10-1-5-2019-07-12} + +#### 新功能 {#new-feature-7} + +- 添加新列编解ec: `T64`. 为(U)IntX/EnumX/Data(时间)/DecimalX列制作。 它应该适用于具有常量或小范围值的列。 编解码器本身允许放大或缩小数据类型而无需重新压缩。 [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加数据库引擎 `MySQL` 允许查看远程MySQL服务器中的所有表 [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([张冬](https://github.com/zhang2014)) +- `bitmapContains` 执行。 这是2倍的速度比 `bitmapHasAny` 如果第二个位图包含一个元素。 [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([余志昌](https://github.com/yuzhichang)) +- 支持 `crc32` 功能(与MySQL或PHP中的行为完全相同)。 如果您需要散列函数,请不要使用它。 [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) +- 已实施 `SYSTEM START/STOP DISTRIBUTED SENDS` 查询控制异步插入到 `Distributed` 桌子 [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([张冬](https://github.com/zhang2014)) + +#### 错误修复 {#bug-fix-22} + +- 在执行突变时忽略查询执行限制和合并限制的最大部件大小。 [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能导致重复数据删除正常块(极其罕见)和插入重复块(更常见)的错误。 [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([阿利沙平](https://github.com/alesapin)) +- 功能修复 `arrayEnumerateUniqRanked` 对于具有空数组的参数 [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- 不要在没有轮询任何消息的情况下订阅Kafka主题。 [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([伊万](https://github.com/abyss7)) +- 使设置 `join_use_nulls` 对于不能在Nullable内的类型不起作用 [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 `Incorrect size of index granularity` 错误 [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) +- 修正浮动到十进制转换溢出 [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) +- 冲洗缓冲区时 `WriteBufferFromHDFS`的析构函数被调用。 这修复了写入 `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([新东鹏](https://github.com/eejoin)) + +#### 改进 {#improvement-7} + +- 对待空单元格 `CSV` 作为默认值时的设置 `input_format_defaults_for_omitted_fields` 被启用。 [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) +- 外部字典的非阻塞加载。 [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 可以根据设置动态更改已建立的连接的网络超时。 [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) +- 使用 “public\_suffix\_list” 对于功能 `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. 它使用一个完美的哈希表生成 `gperf` 从文件生成的列表:https://publicsuffix.org/list/public\_suffix\_list.dat(例如,现在我们认识到域 `ac.uk` 作为非显着)。 [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 通过 `IPv6` 系统表中的数据类型;统一客户端信息列 `system.processes` 和 `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用会话与MySQL兼容性协议的连接。 \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持更多 `ALTER` 查询 `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) +- 碌莽禄Support: `` 第1节 `clickhouse-local` 配置文件。 [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- 允许运行查询 `remote` 表函数 `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### 性能改进 {#performance-improvement-5} + +- 添加在MergeTree列末尾写最后标记的可能性。 它允许避免对超出表数据范围的键进行无用的读取。 仅当使用自适应索引粒度时才启用此功能。 [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([阿利沙平](https://github.com/alesapin)) +- 通过减少非常慢的文件系统上的MergeTree表的性能 `stat` syscalls [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了从版本19.6中引入的MergeTree表读取时的性能下降。 修复#5631。 [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-8} + +- 已实施 `TestKeeper` 作为用于测试的ZooKeeper接口的实现 [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) +- 从现在起 `.sql` 测试可以通过服务器隔离,并行运行,并使用随机数据库。 它允许更快地运行它们,使用自定义服务器配置添加新的测试,并确保不同的测试不会相互影响。 [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([伊万](https://github.com/abyss7)) +- 删除 `` 和 `` 从性能测试 [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) +- 固定 “select\_format” 性能测试 `Pretty` 格式 [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## ClickHouse释放19.9 {#clickhouse-release-19-9} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-9-3-31-2019-07-05} + +#### 错误修复 {#bug-fix-23} + +- 修复增量编解码器中的段错误,这会影响值小于32位大小的列。 该错误导致随机内存损坏。 [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([阿利沙平](https://github.com/alesapin)) +- 修复在检查部分低心率列中罕见的错误。 [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([阿利沙平](https://github.com/alesapin)) +- 修复ttl合并中的段错误与块中的非物理列。 [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复低优先级查询的潜在无限休眠。 [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复ClickHouse如何将默认时区确定为UCT而不是UTC。 [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在领导者副本之前的跟随者副本上执行分布式删除/更改/截断/优化集群查询的错误。 现在他们将直接在领导者副本上执行。 [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([阿利沙平](https://github.com/alesapin)) +- 修复了系统刷新日志查询后某些查询可能不会立即出现在query\_log中的竞争条件。 [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了对常量参数的缺失支持 `evalMLModel` 功能。 [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-9-2-4-2019-06-24} + +#### 新功能 {#new-feature-8} + +- 打印有关冷冻部件的信息 `system.parts` 桌子 [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- 在clickhouse上询问客户端密码-如果未在参数中设置,则在tty上启动客户端 [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- 执行 `dictGet` 和 `dictGetOrDefault` 十进制类型的函数。 [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvement-8} + +- Debian的初始化:添加服务停止超时 [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- 默认情况下添加禁止设置,以创建具有可疑类型的表格 [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) +- 当不用作函数中的状态时,回归函数返回模型权重 `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- 重命名和改进回归方法。 [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- 更清晰的字符串搜索界面。 [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) + +#### 错误修复 {#bug-fix-24} + +- 修复Kafka中潜在的数据丢失 [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([伊万](https://github.com/abyss7)) +- 修复潜在的无限循环 `PrettySpace` 使用零列调用时的格式 [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修正了线性模型中的UInt32溢出错误。 允许对非常量模型参数的eval ML模型。 [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` 如果提供的索引不存在,则不应引发异常 [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 修复段错误 `bitmapHasAny` 在标量子查询中 [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([余志昌](https://github.com/yuzhichang)) +- 修复了复制连接池不重试解析主机时的错误,即使删除了DNS缓存。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([阿利沙平](https://github.com/alesapin)) +- 固定 `ALTER ... MODIFY TTL` 在ReplicatedMergeTree上。 [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复插入到具体化列的分布式表中 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) +- 修复截断联接存储时的错误alloc [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- 在最近版本的包tzdata中,现在有些文件是符号链接。 当前用于检测默认时区的机制被打破,并为某些时区提供错误的名称。 现在至少我们强制时区名称到TZ的内容,如果提供。 [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([伊万](https://github.com/abyss7)) +- 修复一些极为罕见的情况下,MultiVolnitsky搜索器时,在总和恒定针至少16KB长。 该算法错过或复盖以前的结果,这可能导致错误的结果 `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) +- 修复ExternalData请求的设置无法使用ClickHouse设置时的问题。 此外,现在,设置 `date_time_input_format` 和 `low_cardinality_allow_in_native_format` 由于名称的歧义,无法使用(在外部数据中,它可以解释为表格式,在查询中它可以是一个设置)。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) +- 修复只从FS中删除部件而不从Zookeeper中删除部件时的错误。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([阿利沙平](https://github.com/alesapin)) +- 从MySQL协议中删除调试日志记录 [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在DDL查询处理过程中跳过ZNONODE [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- 修复混合 `UNION ALL` 结果列类型。 有些情况下,结果列的数据和列类型不一致。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- 在错误的整数上抛出异常 `dictGetT` 功能,而不是崩溃。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复散列字典中错误的element\_count和load\_factor `system.dictionaries` 桌子 [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-9} + +- 固定构建没有 `Brotli` HTTP压缩支持 (`ENABLE_BROTLI=OFF` cmake变量)。 [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) +- 包括ro哮。h为ro哮/咆哮。h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Origej Desh](https://github.com/orivej)) +- 修复超扫描中的gcc9警告(#行指令是邪恶的!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) +- 使用gcc-9编译时修复所有警告。 修复一些contrib问题。 修复gcc9ICE并将其提交给bugzilla。 [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) +- 与lld固定链接 [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除字典中未使用的专业化 [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) +- 针对不同类型的文件进行格式化和解析表的改进性能测试 [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复并行测试运行 [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker:使用clickhouse-test中的configs [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- 修复编译为FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- 升级提升到1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- 修复构建clickhouse作为子模块 [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- 改进JSONExtract性能测试 [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +## ClickHouse释放19.8 {#clickhouse-release-19-8} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-8-3-8-2019-06-11} + +#### 新功能 {#new-features} + +- 添加了与JSON一起使用的函数 [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加一个函数basename,具有类似于basename函数的行为,它存在于许多语言中 (`os.path.basename` 在python中, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 已添加 `LIMIT n, m BY` 或 `LIMIT m OFFSET n BY` 为LIMIT BY子句设置n偏移量的语法。 [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了新的数据类型 `SimpleAggregateFunction`,它允许在一个具有光聚集的列 `AggregatingMergeTree`. 这只能用于简单的功能,如 `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) +- 增加了对函数中非常量参数的支持 `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) +- 新增功能 `skewPop`, `skewSamp`, `kurtPop` 和 `kurtSamp` 分别计算序列偏度、样本偏度、峰度和样本峰度。 [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- 支持重命名操作 `MaterializeView` 存储。 [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加了允许使用MySQL客户端连接到ClickHouse的服务器。 [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加 `toDecimal*OrZero` 和 `toDecimal*OrNull` 功能。 [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持函数中的十进制类型: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted` 媒体加权。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `format` 功能。 使用参数中列出的字符串格式化常量模式(简化的Python格式模式)。 [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `system.detached_parts` 表包含有关分离部分的信息 `MergeTree` 桌子 [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) +- 已添加 `ngramSearch` 函数来计算针和大海捞针之间的非对称差异。 [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) +- 使用聚合函数接口实现基本的机器学习方法(随机线性回归和逻辑回归)。 有不同的策略,用于更新模型权重(简单梯度下降,动量法,涅斯捷罗夫法)。 还支持自定义大小的小批次。 [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- 执行 `geohashEncode` 和 `geohashDecode` 功能。 [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加聚合功能 `timeSeriesGroupSum`,从而可以聚合不同的时间序列,即采样时间戳不对齐。 它将在两个采样时间戳之间使用线性插值,然后将时间序列和在一起。 添加聚合功能 `timeSeriesGroupRateSum`,它计算时间序列的速率,然后将速率总和在一起。 [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([刘杨宽](https://github.com/LiuYangkuan)) +- 新增功能 `IPv4CIDRtoIPv4Range` 和 `IPv6CIDRtoIPv6Range` 使用CIDR计算子网中IP的下限和上限。 [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加一个X-ClickHouse-Summary头,当我们发送查询使用HTTP启用设置 `send_progress_in_http_headers`. 返回X-ClickHouse-Progress的常用信息,以及其他信息,例如在查询中插入了多少行和字节。 [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 改进 {#improvements} + +- 已添加 `max_parts_in_total` 设置表的MergeTree家族(默认:100 000)防止分区键的不安全规范\#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`:通过将初始种子与列名(而不是列位置)组合来派生单个列的种子。 这用于转换具有多个相关表的数据集,以便在转换后表将保持可联接。 [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 新增功能 `JSONExtractRaw`, `JSONExtractKeyAndValues`. 重命名函数 `jsonExtract` 到 `JSONExtract`. 当出现问题时,这些函数返回对应的值,而不是 `NULL`. 修改功能 `JSONExtract`,现在它从最后一个参数中获取返回类型,并且不会注入nullables。 在AVX2指令不可用的情况下实现了回退到RapidJSON。 Simdjson库更新到新版本。 [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 现在 `if` 和 `multiIf` 功能不依赖于条件的 `Nullable`,但依靠分支来实现sql兼容性。 [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([吴健](https://github.com/janplus)) +- `In` 谓词现在生成 `Null` 结果来自 `Null` 输入像 `Equal` 功能。 [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([吴健](https://github.com/janplus)) +- 检查来自Kafka的每个(flush\_interval/poll\_timeout)行数的时间限制。 这允许更频繁地中断Kafka consumer的读取,并检查顶级流的时间限制 [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([伊万](https://github.com/abyss7)) +- 链接rdkafka捆绑的SASL。 它应该允许使用SASL SCRAM身份验证 [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([伊万](https://github.com/abyss7)) +- 所有联接的RowRefList的批处理版本。 [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) +- clickhouse服务器:更多信息侦听错误消息。 [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- 在clickhouse-复印机的功能支持字典 `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- 添加新设置 `kafka_commit_every_batch` 来规范卡夫卡的承诺政策。 + 它允许设置提交模式:在处理每批消息之后,或者在整个块写入存储之后。 这是在某些极端情况下丢失一些消息或阅读两次之间的权衡。 [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([伊万](https://github.com/abyss7)) +- 赂眉露\>\> `windowFunnel` 支持其他无符号整数类型。 [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) +- 允许对虚拟列进行阴影 `_table` 在合并引擎。 [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([伊万](https://github.com/abyss7)) +- 赂眉露\>\> `sequenceMatch` 聚合函数支持其他无符号整数类型 [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) +- 如果校验和不匹配很可能是由硬件故障引起的,则更好的错误消息。 [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 检查基础表是否支持以下内容的采样 `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([伊万](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- MySQL线协议的改进。 将格式名称更改为MySQLWire。 使用RAII调用RSA\_free。 如果无法创建上下文,则禁用SSL。 [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- 尊重异步插入到分布式表中的查询设置。 [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- 重命名函数 `leastSqr` 到 `simpleLinearRegression`, `LinearRegression` 到 `linearRegression`, `LogisticRegression` 到 `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 性能改进 {#performance-improvements} + +- 在ALTER MODIFY查询中并行处理非复制MergeTree表的部分。 [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([伊万库什](https://github.com/IvanKush)) +- Regular达式提取中的优化。 [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) +- 如果仅在join on部分中使用,则不要将右连接键列添加到join result。 [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) +- 在第一个空响应之后冻结Kafka缓冲区。 它避免了多次调用 `ReadBuffer::next()` 对于一些行解析流的空结果。 [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([伊万](https://github.com/abyss7)) +- `concat` 多个参数的函数优化。 [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用reference one升级我们的LZ4实现以获得更快的解压缩。 [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) +- 实现了MSD基数排序(基于kxsort)和部分排序。 [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) + +#### 错误修复 {#bug-fixes} + +- 修复推送需要列与联接 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([张冬](https://github.com/zhang2014)) +- 修正了当ClickHouse由systemd运行时,命令 `sudo service clickhouse-server forcerestart` 没有按预期工作。 [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- 修复DataPartsExchange中的http错误代码(9009端口上的服务器间http服务器始终返回代码200,即使是错误)。 [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- 修复SimpleAggregateFunction字符串长于MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) +- 修复错误 `Decimal` 到 `Nullable(Decimal)` 转换中。 支持其他十进制到十进制转换(包括不同的比例)。 [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了simdjson库中导致错误计算的FPU clobbering `uniqHLL` 和 `uniqCombined` 聚合函数和数学函数,如 `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定处理JSON函数中的混合常量/非常量情况。 [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复 `retention` 功能。 现在所有满足一行数据的条件都被添加到数据状态。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- 修复结果类型 `quantileExact` 用小数。 [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 文件 {#documentation} + +- 翻译文档 `CollapsingMergeTree` 到中国。 [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- 将一些关于表格引擎的文档翻译成中文。 + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([永远不会李](https://github.com/neverlee)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements} + +- 修复一些显示可能使用后免费的消毒剂报告。[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([伊万](https://github.com/abyss7)) +- 为了方便起见,将性能测试从单独的目录中移出。 [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复不正确的性能测试。 [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([阿利沙平](https://github.com/alesapin)) +- 增加了一个工具来计算由位翻转引起的校验和,以调试硬件问题。 [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使亚军脚本更有用。 [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) +- 添加如何编写性能测试的小指令。 [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([阿利沙平](https://github.com/alesapin)) +- 添加在性能测试中创建,填写和删除查询中进行替换的功能 [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) + +## ClickHouse释放19.7 {#clickhouse-release-19-7} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-7-5-29-2019-07-05} + +#### 错误修复 {#bug-fix-25} + +- 使用JOIN修复某些查询中的性能回归。 [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([张冬](https://github.com/zhang2014)) + +### 碌莽禄,拢,010-68520682\戮漏鹿芦,酶,虏卤赂拢,110102003042 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### 新功能 {#new-features-1} + +- 添加位图相关功能 `bitmapHasAny` 和 `bitmapHasAll` 类似于 `hasAny` 和 `hasAll` 数组的函数。 [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([塞尔吉\*弗拉季金](https://github.com/svladykin)) + +#### 错误修复 {#bug-fixes-1} + +- 修复段错误 `minmax` 具有空值的索引。 [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 根据需要输出标记"LIMIT BY"中的所有输入列。 它修复 ‘Not found column’ 某些分布式查询中出错。 [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([康斯坦丁\*潘](https://github.com/kvap)) +- 修复 “Column ‘0’ already exists” 错误 `SELECT .. PREWHERE` 在具有默认值的列上 [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- 修复 `ALTER MODIFY TTL` 查询开 `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([安东\*波波夫](https://github.com/CurtizJ)) +- 当Kafka消费者无法启动时,不要使服务器崩溃。 [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([伊万](https://github.com/abyss7)) +- 固定位图函数产生错误的结果。 [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([杨小姐](https://github.com/andyyzh)) +- 修复散列字典的element\_count(不包括重复项) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) +- 使用环境变量TZ的内容作为时区的名称。 在某些情况下,它有助于正确检测默认时区。[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([伊万](https://github.com/abyss7)) +- 不要试图将整数转换为 `dictGetT` 功能,因为它不能正常工作。 而是抛出一个异常。 [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) +- 在ExternalData HTTP请求修复设置。 [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila + 库特宁](https://github.com/danlark1)) +- 修复只从FS中删除部件而不从Zookeeper中删除部件时的错误。 [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([阿利沙平](https://github.com/alesapin)) +- 修复分段故障 `bitmapHasAny` 功能。 [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([余志昌](https://github.com/yuzhichang)) +- 修复了复制连接池不重试解析主机时的错误,即使删除了DNS缓存。 [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([阿利沙平](https://github.com/alesapin)) +- 固定 `DROP INDEX IF EXISTS` 查询。 现在 `ALTER TABLE ... DROP INDEX IF EXISTS ...` 如果提供的索引不存在,查询不会引发异常。 [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([格列布\*诺维科夫](https://github.com/NanoBjorn)) +- 修复联合所有超类型列。 有些情况下,结果列的数据和列类型不一致。 [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) +- 在DDL查询处理过程中跳过ZNONODE。 之前,如果另一个节点删除znode在任务队列中,那一个 + 没有处理它,但已经得到子列表,将终止DDLWorker线程。 [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) +- 修复插入到具体化列的分布式()表中。 [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-7-3-9-2019-05-30} + +#### 新功能 {#new-features-2} + +- 允许限制用户可以指定的设置的范围。 + 这些约束可以在用户设置配置文件中设置。 + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([维塔利 + 巴拉诺夫](https://github.com/vitlibar)) +- 添加该函数的第二个版本 `groupUniqArray` 用一个可选的 + `max_size` 限制结果数组大小的参数。 这 + 行为类似于 `groupArray(max_size)(x)` 功能。 + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([纪尧姆 + Tassery](https://github.com/YiuRULE)) +- 对于TSVWithNames/CSVWithNames输入文件格式,列顺序现在可以是 + 从文件头确定。 这是由控制 + `input_format_with_names_use_header` 参数。 + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([亚历山大](https://github.com/Akazz)) + +#### 错误修复 {#bug-fixes-2} + +- 在合并过程中uncompressed\_cache+JOIN崩溃(#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila + 库特宁](https://github.com/danlark1)) +- Clickhouse客户端查询到系统表上的分段错误。 \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([伊万](https://github.com/abyss7)) +- 通过KafkaEngine重负载数据丢失(#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([伊万](https://github.com/abyss7)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvements-1} + +- 使用基数排序按单个数字列进行排序 `ORDER BY` 没有 + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Evgenii Pravda](https://github.com/kvinty), + [阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 文件 {#documentation-1} + +- 将某些表格引擎的文档翻译为中文。 + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([从来没有 + 李](https://github.com/neverlee)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-1} + +- 正确打印UTF-8字符 `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为clickhouse-client添加命令行参数以始终加载建议 + 戴达 [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 解决一些PVS-Studio警告。 + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila + 库特宁](https://github.com/danlark1)) +- 添加gperf以构建即将到来的拉取请求#5030的requirements。 + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouse释放19.6 {#clickhouse-release-19-6} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-6-3-18-2019-06-13} + +#### 错误修复 {#bug-fixes-3} + +- 修复了来自表函数的查询的条件下推 `mysql` 和 `odbc` 和相应的表引擎。 这修复了#3540和#2384。 [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复动物园管理员的死锁。 [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- 允许在CSV中引用小数。 [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) +- 禁止从float Inf/NaN转换为小数(抛出异常)。 [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复重命名查询中的数据竞赛。 [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([张冬](https://github.com/zhang2014)) +- 暂时禁用LFAlloc。 使用LFAlloc可能会导致大量MAP\_FAILED在分配UncompressedCache时,并导致高负载服务器上的查询崩溃。 [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-6-2-11-2019-05-13} + +#### 新功能 {#new-features-3} + +- 列和表的TTL表达式。 [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([安东\*波波夫](https://github.com/CurtizJ)) +- 增加了对 `brotli` http响应的压缩(接受编码:br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([米哈伊尔](https://github.com/fandyushin)) +- 增加了新功能 `isValidUTF8` 用于检查一组字节是否被正确地utf-8编码。 [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) +- 添加新的负载平衡策略 `first_or_random` 它将查询发送到第一个指定的主机,如果无法访问,则向分片的随机主机发送查询。 对于跨复制拓扑设置非常有用。 [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([纳瓦托洛梅](https://github.com/nvartolomei)) + +#### 实验特点 {#experimental-features-1} + +- 添加设置 `index_granularity_bytes` (自适应索引粒度)对于MergeTree\*表族. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([阿利沙平](https://github.com/alesapin)) + +#### 改进 {#improvements-1} + +- 增加了对函数的非常量和负大小和长度参数的支持 `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在左联接中禁用向下推到右表,在右联接中禁用左表,并在完全联接中禁用两个表。 在某些情况下,这可以修复错误的连接结果。 [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([伊万](https://github.com/abyss7)) +- `clickhouse-copier`:从自动上传任务配置 `--task-file` 备选案文 [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- 为存储工厂和表函数工厂添加了错别字处理程序。 [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) +- 支持不带子查询的多个联接的星号和限定星号 [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) +- 使缺少列错误消息更加用户友好。 [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 性能改进 {#performance-improvements-2} + +- ASOF加速显着 [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) + +#### 向后不兼容的更改 {#backward-incompatible-changes} + +- HTTP头 `Query-Id` 改名为 `X-ClickHouse-Query-Id` 为了一致性。 [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([米哈伊尔](https://github.com/fandyushin)) + +#### 错误修复 {#bug-fixes-4} + +- 修正了潜在的空指针取消引用 `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- 修复了使用JOIN+ARRAY JOIN查询的错误 [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定挂在服务器的启动时,字典依赖于另一个字典通过引擎数据库=字典。 [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复潜在的错误结果 `SELECT DISTINCT` 与 `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-2} + +- 在不同的主机上运行clickhouse服务器时修复测试失败 [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- clickhouse-test:在非tty环境中禁用颜色控制序列。 [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([阿利沙平](https://github.com/alesapin)) +- clickhouse-test:允许使用任何测试数据库(删除 `test.` 在可能的情况下获得资格) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- 修复ubsan错误 [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Yandex LFAlloc被添加到ClickHouse中,以不同的方式分配MarkCache和UncompressedCache数据,以更可靠地捕获段错误 [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) +- Python util帮助反向移植和更改日志。 [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([伊万](https://github.com/abyss7)) + +## ClickHouse释放19.5 {#clickhouse-release-19-5} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-4-22-2019-05-13} + +#### 错误修复 {#bug-fixes-5} + +- 修正了位图\*功能中可能出现的崩溃 [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([杨小姐](https://github.com/andyyzh)) +- 修复了在执行UNION查询时可能发生的非常罕见的数据争用条件,所有查询都涉及至少两个来自系统的选择。列,系统。表,系统。部件,系统。parts\_tables或Merge系列的表,并同时执行相关表的列的更改。 [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误 `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. 如果lowcardinality列是主键的一部分,则会发生此错误。 \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修改保留函数:如果一行同时满足第一个和第N个条件,则只有第一个满足的条件被添加到数据状态。 现在所有满足一行数据的条件都被添加到数据状态。 [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-3-8-2019-04-18} + +#### 错误修复 {#bug-fixes-6} + +- 固定设置类型 `max_partitions_per_insert_block` 从布尔到UInt64。 [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([2.Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-5-2-6-2019-04-15} + +#### 新功能 {#new-features-4} + +- [超扫描](https://github.com/intel/hyperscan) 添加了多个正则表达式匹配(函数 `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) +- `multiSearchFirstPosition` 添加了功能。 [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) +- 为表实现每行的预定义表达式筛选器。 [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([伊万](https://github.com/abyss7)) +- 一种基于bloom过滤器的新型数据跳过索引(可用于 `equal`, `in` 和 `like` 功能)。 [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 已添加 `ASOF JOIN` 它允许运行连接到最新已知值的查询。 [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) +- 重写多个 `COMMA JOIN` 到 `CROSS JOIN`. 然后将它们重写为 `INNER JOIN` 如果可能的话 [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvement-9} + +- `topK` 和 `topKWeighted` 现在支持自定义 `loadFactor` (修复问题 [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([基里尔丹信](https://github.com/kirillDanshin)) +- 允许使用 `parallel_replicas_count > 1` 即使对于没有采样的表(设置简单地忽略它们)。 在以前的版本中,它导致异常。 [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) +- 支持 `CREATE OR REPLACE VIEW`. 允许在单个语句中创建视图或设置新定义。 [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) +- `Buffer` 表引擎现在支持 `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([刘杨宽](https://github.com/LiuYangkuan)) +- 添加在zookeeper中启动没有元数据的复制表的能力 `readonly` 模式 [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([阿利沙平](https://github.com/alesapin)) +- 在clickhouse客户端固定进度条闪烁。 使用时,这个问题最明显 `FORMAT Null` 随着流查询。 [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许禁用功能 `hyperscan` 基于每个用户的库,以限制潜在的过度和不受控制的资源使用。 [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加版本号记录所有错误。 [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- 增加了限制 `multiMatch` 需要字符串大小以适应的函数 `unsigned int`. 还增加了参数的数量限制 `multiSearch` 功能。 [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) +- 改进了超扫描暂存空间的使用和错误处理。 [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) +- 填充 `system.graphite_detentions` 从表配置 `*GraphiteMergeTree` 发动机表. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 重命名 `trigramDistance` 功能 `ngramDistance` 并添加更多的功能 `CaseInsensitive` 和 `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) +- 改进的数据跳过指数计算。 [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 保持平凡, `DEFAULT`, `MATERIALIZED` 和 `ALIAS` 在一个列表中的列(修复问题 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +#### 错误修复 {#bug-fix-26} + +- 避免 `std::terminate` 在内存分配失败的情况下。 现在 `std::bad_alloc` 按预期引发异常。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复capnproto从缓冲区读取。 有时文件没有通过HTTP成功加载。 [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([弗拉季斯拉夫](https://github.com/smirnov-vs)) +- 修复错误 `Unknown log entry type: 0` 后 `OPTIMIZE TABLE FINAL` 查询。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([阿莫斯鸟](https://github.com/amosbird)) +- 错误的参数 `hasAny` 或 `hasAll` 函数可能会导致段错误。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时可能会发生死锁 `DROP DATABASE dictionary` 查询。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `median` 和 `quantile` 功能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- 修复压缩级别检测时 `network_compression_method` 小写。 在19.1节中被打破。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- 固定的无知 `UTC` 设置(修复问题 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修复 `histogram` 函数行为 `Distributed` 桌子 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsan报告 `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了由于系统日志使用中的争用条件而关闭的TSan报告。 修复了当part\_log启用时关机后的潜在使用。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新检查零件 `ReplicatedMergeTreeAlterThread` 在错误的情况下。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 对中间聚合函数状态的算术运算不适用于常量参数(如子查询结果)。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 始终在元数据中反引用列名。 否则,不可能创建一个名为列的表 `index` (由于格式错误,服务器无法重新启动 `ATTACH` 元数据中的查询)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复崩溃 `ALTER ... MODIFY ORDER BY` 上 `Distributed` 桌子 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- 修复段错误 `JOIN ON` 已启用 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([张冬](https://github.com/zhang2014)) +- 修复kafka使用protobuf消息后添加无关行的错误。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复崩溃 `JOIN` 在不可为空的vs可为空的列上。 修复 `NULLs` 在右键 `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 在固定的竞争条件 `SELECT` 从 `system.tables` 如果同时重命名或更改表。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 获取已经过时的数据部分时修复了数据竞赛。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定罕见的数据竞赛,可以在发生 `RENAME` MergeTree家族的表. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正功能中的分段故障 `arrayIntersect`. 如果函数使用常量和普通参数混合调用,则可能会发生分段错误。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([钱丽祥](https://github.com/fancyqlx)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复 `No message received` 在副本之间获取部件时出现异常。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([阿利沙平](https://github.com/alesapin)) +- 固定 `arrayIntersect` 函数错误导致在单个数组中的几个重复值的情况下。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在并发期间修复争用条件 `ALTER COLUMN` 可能导致服务器崩溃的查询(修复问题 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复不正确的结果 `FULL/RIGHT JOIN` 与常量列。 [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复重复 `GLOBAL JOIN` 用星号。 [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复参数扣除 `ALTER MODIFY` 列 `CODEC` 未指定列类型时。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([阿利沙平](https://github.com/alesapin)) +- 功能 `cutQueryStringAndFragment()` 和 `queryStringAndFragment()` 现在正常工作时 `URL` 包含一个片段,没有查询。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复设置时罕见的错误 `min_bytes_to_use_direct_io` 大于零,这发生在线程必须在列文件中向后寻找时。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([阿利沙平](https://github.com/alesapin)) +- 修复聚合函数的错误参数类型 `LowCardinality` 参数(修复问题 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误的名称资格 `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复功能 `toISOWeek` 1970年的结果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `DROP`, `TRUNCATE` 和 `OPTIMIZE` 查询重复,在执行时 `ON CLUSTER` 为 `ReplicatedMergeTree*` 表家庭. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([阿利沙平](https://github.com/alesapin)) + +#### 向后不兼容的更改 {#backward-incompatible-change-8} + +- 重命名设置 `insert_sample_with_metadata` 到设置 `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `max_partitions_per_insert_block` (默认值为100)。 如果插入的块包含较大数量的分区,则会引发异常。 如果要删除限制(不推荐),请将其设置为0。 [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 多搜索功能被重命名 (`multiPosition` 到 `multiSearchAllPositions`, `multiSearch` 到 `multiSearchAny`, `firstMatch` 到 `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) + +#### 性能改进 {#performance-improvement-6} + +- 通过内联优化Volnitsky搜索器,为许多针或许多类似bigrams的查询提供约5-10%的搜索改进。 [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) +- 修复设置时的性能问题 `use_uncompressed_cache` 大于零时,即出现在所有读取缓存中包含的数据时。 [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([阿利沙平](https://github.com/alesapin)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-10} + +- 强化调试构建:更精细的内存映射和ASLR;为标记缓存和索引添加内存保护。 这允许在ASan和MSan无法做到这一点的情况下找到更多的内存st脚错误。 [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加对cmake变量的支持 `ENABLE_PROTOBUF`, `ENABLE_PARQUET` 和 `ENABLE_BROTLI` 它允许启用/禁用上述功能(与我们对librdkafka,mysql等所做的相同)。 [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) +- 添加打印进程列表和堆栈跟踪的所有线程的能力,如果一些查询测试运行后挂起。 [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([阿利沙平](https://github.com/alesapin)) +- 添加重试 `Connection loss` 错误 `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([阿利沙平](https://github.com/alesapin)) +- 在打包程序脚本中添加使用vagrant的freebsd build和使用thread sanitizer的build。 [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([阿利沙平](https://github.com/alesapin)) +- 现在用户要求用户密码 `'default'` 在安装过程中。 [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- 禁止在警告 `rdkafka` 图书馆. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在没有ssl的情况下构建。 [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- 添加从自定义用户启动clickhouse服务器映像的方法。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 升级contrib升压到1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- 禁用使用 `mremap` 使用线程消毒剂编译时。 令人惊讶的是,TSan并没有拦截 `mremap` (虽然它确实拦截 `mmap`, `munmap` 这会导致误报。 修复了有状态测试中的TSan报告。 [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过HTTP接口使用格式模式添加测试检查。 [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +## ClickHouse释放19.4 {#clickhouse-release-19-4} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-4-33-2019-04-17} + +#### 错误修复 {#bug-fixes-7} + +- 避免 `std::terminate` 在内存分配失败的情况下。 现在 `std::bad_alloc` 按预期引发异常。 [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复capnproto从缓冲区读取。 有时文件没有通过HTTP成功加载。 [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([弗拉季斯拉夫](https://github.com/smirnov-vs)) +- 修复错误 `Unknown log entry type: 0` 后 `OPTIMIZE TABLE FINAL` 查询。 [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([阿莫斯鸟](https://github.com/amosbird)) +- 错误的参数 `hasAny` 或 `hasAll` 函数可能会导致段错误。 [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时可能会发生死锁 `DROP DATABASE dictionary` 查询。 [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `median` 和 `quantile` 功能。 [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- 修复压缩级别检测时 `network_compression_method` 小写。 在19.1节中被打破。 [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- 固定的无知 `UTC` 设置(修复问题 [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- 修复 `histogram` 函数行为 `Distributed` 桌子 [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- 固定tsan报告 `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了由于系统日志使用中的争用条件而关闭的TSan报告。 修复了当part\_log启用时关机后的潜在使用。 [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新检查零件 `ReplicatedMergeTreeAlterThread` 在错误的情况下。 [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 对中间聚合函数状态的算术运算不适用于常量参数(如子查询结果)。 [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 始终在元数据中反引用列名。 否则,不可能创建一个名为列的表 `index` (由于格式错误,服务器无法重新启动 `ATTACH` 元数据中的查询)。 [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复崩溃 `ALTER ... MODIFY ORDER BY` 上 `Distributed` 桌子 [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- 修复段错误 `JOIN ON` 已启用 `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([张冬](https://github.com/zhang2014)) +- 修复kafka使用protobuf消息后添加无关行的错误。 [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 在固定的竞争条件 `SELECT` 从 `system.tables` 如果同时重命名或更改表。 [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 获取已经过时的数据部分时修复了数据竞赛。 [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定罕见的数据竞赛,可以在发生 `RENAME` MergeTree家族的表. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正功能中的分段故障 `arrayIntersect`. 如果函数使用常量和普通参数混合调用,则可能会发生分段错误。 [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([钱丽祥](https://github.com/fancyqlx)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复 `No message received` 在副本之间获取部件时出现异常。 [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([阿利沙平](https://github.com/alesapin)) +- 固定 `arrayIntersect` 函数错误导致在单个数组中的几个重复值的情况下。 [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在并发期间修复争用条件 `ALTER COLUMN` 可能导致服务器崩溃的查询(修复问题 [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复参数扣除 `ALTER MODIFY` 列 `CODEC` 未指定列类型时。 [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([阿利沙平](https://github.com/alesapin)) +- 功能 `cutQueryStringAndFragment()` 和 `queryStringAndFragment()` 现在正常工作时 `URL` 包含一个片段,没有查询。 [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复设置时罕见的错误 `min_bytes_to_use_direct_io` 大于零,这发生在线程必须在列文件中向后寻找时。 [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([阿利沙平](https://github.com/alesapin)) +- 修复聚合函数的错误参数类型 `LowCardinality` 参数(修复问题 [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复功能 `toISOWeek` 1970年的结果。 [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `DROP`, `TRUNCATE` 和 `OPTIMIZE` 查询重复,在执行时 `ON CLUSTER` 为 `ReplicatedMergeTree*` 表家庭. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([阿利沙平](https://github.com/alesapin)) + +#### 改进 {#improvements-2} + +- 保持平凡, `DEFAULT`, `MATERIALIZED` 和 `ALIAS` 在一个列表中的列(修复问题 [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-3-11-2019-04-02} + +#### 错误修复 {#bug-fixes-8} + +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-11} + +- 添加从自定义用户启动clickhouse服务器映像的方法。 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-2-7-2019-03-30} + +#### 错误修复 {#bug-fixes-9} + +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-1-3-2019-03-19} + +#### 错误修复 {#bug-fixes-10} + +- 包含两个固定的远程查询 `LIMIT BY` 和 `LIMIT`. 以前,如果 `LIMIT BY` 和 `LIMIT` 用于远程查询, `LIMIT` 可能发生之前 `LIMIT BY`,这导致过滤的结果。 [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([康斯坦丁\*潘](https://github.com/kvap)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-4-0-49-2019-03-09} + +#### 新功能 {#new-features-5} + +- 增加了全面支持 `Protobuf` 格式(输入和输出,嵌套数据结构)。 [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加位图功能与Ro哮的位图。 [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([杨小姐](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 实木复合地板格式支持。 [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- 为模糊字符串比较添加了N-gram距离。 它类似于R语言中的q-gram指标。 [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) +- 结合专用聚合和保留模式中的石墨汇总规则。 [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `max_execution_speed` 和 `max_execution_speed_bytes` 限制资源使用。 已添加 `min_execution_speed_bytes` 设置以补充 `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([张冬](https://github.com/zhang2014)) +- 实现功能 `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) +- 新增功能 `arrayEnumerateDenseRanked` 和 `arrayEnumerateUniqRanked` (这就像 `arrayEnumerateUniq` 但是允许微调数组深度以查看多维数组内部)。 [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-11} + +- 此版本还包含19.3和19.1中的所有错误修复。 +- 修正了数据跳过索引的错误:插入后颗粒顺序不正确。 [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 固定 `set` ツ环板forョ `Nullable` 和 `LowCardinality` 列。 在它之前, `set` 索引与 `Nullable` 或 `LowCardinality` 列导致错误 `Data type must be deserialized with multiple streams` 同时选择。 [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 正确设置完整的update\_time `executable` 字典更新. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) +- 修复19.3中损坏的进度条。 [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) +- 在某些情况下,修复了内存区域收缩时MemoryTracker的不一致值。 [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了ThreadPool中未定义的行为。 [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个非常罕见的崩溃的消息 `mutex lock failed: Invalid argument` 当MergeTree表与SELECT同时删除时,可能会发生这种情况。 [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) +- ODBC驱动程序兼容 `LowCardinality` 数据类型。 [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD:修复程序 `AIOcontextPool: Found io_event with unknown id 0` 错误 [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` 无论配置如何,都会创建表。 [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复未定义的行为 `dictIsIn` 缓存字典功能。 [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([阿利沙平](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) +- 默认情况下禁用compile\_expressions,直到我们得到自己 `llvm` contrib并且可以测试它 `clang` 和 `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([阿利沙平](https://github.com/alesapin)) +- 预防 `std::terminate` 当 `invalidate_query` 为 `clickhouse` 外部字典源返回了错误的结果集(空或一行以上或一列以上)。 固定的问题,当 `invalidate_query` 执行每五秒钟,无论到 `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免死锁时 `invalidate_query` 对于与字典 `clickhouse` 资料来源涉及 `system.dictionaries` 表或 `Dictionaries` 数据库(罕见的情况)。 [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了交叉连接与空在哪里。 [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) +- 在功能固定段错误 “replicate” 传递常量参数时。 [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用谓词优化器修复lambda函数。 [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([张冬](https://github.com/zhang2014)) +- 多个联接多个修复。 [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 改进 {#improvements-3} + +- 在右表列的连接上部分支持别名。 [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) +- 结果多加入了需要正确的结果,名称为使用中子选择. 替换平的别名来源中的名称结果。 [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进连接语句的下推逻辑。 [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([伊万](https://github.com/abyss7)) + +#### 性能改进 {#performance-improvements-3} + +- 改进的启发式 “move to PREWHERE” 优化。 [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用适当的查找表,使用HashTable的api用于8位和16位密钥。 [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进字符串比较的性能。 [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在单独的线程中清理分布式DDL队列,以便它不会减慢处理分布式DDL任务的主循环。 [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) +- 当 `min_bytes_to_use_direct_io` 如果设置为1,则不是每个文件都使用O\_DIRECT模式打开,因为要读取的数据大小有时被一个压缩块的大小所低估。 [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-12} + +- 增加了对clang-9的支持 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `__asm__` 说明(再次) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) +- 添加指定设置的能力 `clickhouse-performance-test` 从命令行。 [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([阿利沙平](https://github.com/alesapin)) +- 将字典测试添加到集成测试。 [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([阿利沙平](https://github.com/alesapin)) +- 在网站上添加了来自基准测试的查询,以自动化性能测试。 [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `xxhash.h` 在外部lz4中不存在,因为它是一个实现细节,并且它的符号是命名空间的 `XXH_NAMESPACE` 麦克罗 当lz4是外部的,xxHash也必须是外部的,并且依赖者必须链接到它。 [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Origej Desh](https://github.com/orivej)) +- 固定的情况下,当 `quantileTiming` 聚合函数可以用负或浮点参数调用(这修复了使用未定义的行为消毒器的模糊测试)。 [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 拼写错误更正。 [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) +- 在Mac上修复编译。 [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- Freebsd和各种不寻常的构建配置的构建修复程序。 [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## ClickHouse释放19.3 {#clickhouse-release-19-3} + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-3-9-1-2019-04-02} + +#### 错误修复 {#bug-fixes-12} + +- 修复崩溃 `FULL/RIGHT JOIN` 当我们加入可为空vs不可为空时。 [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复分段故障 `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- 固定读取 `Array(LowCardinality)` column在极少数情况下,当column包含一个长序列的空数组时。 [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-13} + +- 添加从自定义用户启动clickhouse服务器映像的方法 [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-7-2019-03-12} + +#### 错误修复 {#bug-fixes-13} + +- 修正了#3920中的错误。 此错误表现为随机缓存损坏(消息 `Unknown codec family code`, `Cannot seek through file`)和段错误。 这个错误最早出现在19.1版本中,并且存在于19.1.10和19.3.6之前的版本中。 [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-3-6-2019-03-02} + +#### 错误修复 {#bug-fixes-14} + +- 当线程池中有超过1000个线程时, `std::terminate` 线程退出时可能发生。 [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在可以创建 `ReplicatedMergeTree*` 对没有默认值的列进行注释的表和对没有注释和默认值的列进行编解码的表。 还修复编解码器的比较。 [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([阿利沙平](https://github.com/alesapin)) +- 修复了与数组或元组联接时的崩溃。 [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复了clickhouse-复印机中的消息崩溃 `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果使用分布式Ddl,则在服务器关闭时修复了挂机问题。 [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) +- 错误的列编号打印在有关文本格式分析的列数大于10的错误消息中。 [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-3} + +- 固定构建与启用AVX。 [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 基于已知版本而不是编译它的内核启用扩展记帐和IO记帐。 [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 允许跳过core\_dump的设置。size\_limit,如果限制设置失败,则警告而不是throw。 [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- 删除了 `inline` 标签 `void readBinary(...)` 在 `Field.cpp`. 也合并冗余 `namespace DB` 块。 [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-5-2019-02-21} + +#### 错误修复 {#bug-fixes-15} + +- 修正了大型http插入查询处理的错误。 [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([阿利沙平](https://github.com/alesapin)) +- 修正了向后不兼容的旧版本,由于错误的实现 `send_logs_level` 设置。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了表函数的向后不兼容性 `remote` 与列注释介绍. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-4-2019-02-16} + +#### 改进 {#improvements-4} + +- 执行以下操作时,表索引大小不考虑内存限制 `ATTACH TABLE` 查询。 避免了分离后无法连接表的可能性。 [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 稍微提高了从ZooKeeper接收的最大字符串和数组大小的限制。 它允许继续与增加的尺寸工作 `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` 在动物园管理员。 [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许修复被遗弃的副本,即使它已经在其队列中拥有大量的节点。 [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加一个必需的参数 `SET` 索引(最大存储行数)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) + +#### 错误修复 {#bug-fixes-16} + +- 固定 `WITH ROLLUP` 单组结果 `LowCardinality` 钥匙 [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在设置索引固定错误(删除颗粒,如果它包含超过 `max_rows` 行)。 [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 很多的FreeBSD构建修复。 [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- 固定别名替换查询与子查询包含相同的别名(问题 [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-4} + +- 添加运行能力 `clickhouse-server` 对于docker镜像中的无状态测试。 [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([瓦西里\*内姆科夫](https://github.com/Enmk)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-3-3-2019-02-13} + +#### 新功能 {#new-features-6} + +- 添加了 `KILL MUTATION` 允许删除由于某些原因卡住的突变的声明。 已添加 `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` 字段到 `system.mutations` 表更容易排除故障。 [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) +- 添加聚合功能 `entropy` 计算香农熵 [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- 添加发送查询的功能 `INSERT INTO tbl VALUES (....` 到服务器而不拆分 `query` 和 `data` 零件。 [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([阿利沙平](https://github.com/alesapin)) +- 通用实现 `arrayWithConstant` 添加了功能。 [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已实施 `NOT BETWEEN` 比较运算符。 [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) +- 执行 `sumMapFiltered` 为了能够限制其值将被求和的键的数量 `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 增加了支持 `Nullable` 类型 `mysql` 表功能。 [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- 支持任意常量表达式 `LIMIT` 条款 [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- 已添加 `topKWeighted` 采用带有(无符号整数)权重的附加参数的聚合函数。 [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([安德鲁\*戈尔曼](https://github.com/andrewgolman)) +- `StorageJoin` 现在支持 `join_any_take_last_row` 允许复盖同一键的现有值的设置。 [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([阿莫斯鸟](https://github.com/amosbird) +- 添加功能 `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 已添加 `RowBinaryWithNamesAndTypes` 格式。 [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V.Kozlyuk](https://github.com/DarkWanderer)) +- 已添加 `IPv4` 和 `IPv6` 数据类型。 更有效的实现 `IPv*` 功能。 [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加功能 `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 已添加 `Protobuf` 输出格式。 [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 增加了对数据导入(插入)HTTP接口的brotli支持。 [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([米哈伊尔](https://github.com/fandyushin)) +- 增加了提示,而用户做出错字的函数名称或键入命令行客户端。 [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) +- 已添加 `Query-Id` 到服务器的HTTP响应头。 [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([米哈伊尔](https://github.com/fandyushin)) + +#### 实验特点 {#experimental-features-2} + +- 已添加 `minmax` 和 `set` MergeTree表引擎系列的数据跳过索引。 [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 增加了转换 `CROSS JOIN` 到 `INNER JOIN` 如果可能的话 [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-17} + +- 固定 `Not found column` 对于重复的列 `JOIN ON` 科。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `START REPLICATED SENDS` 命令开始复制发送。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 固定聚合函数执行 `Array(LowCardinality)` 争论。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 修正了错误的行为,当做 `INSERT ... SELECT ... FROM file(...)` 查询和文件有 `CSVWithNames` 或 `TSVWIthNames` 格式和第一个数据行丢失。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果字典不可用,则修复了字典重新加载时的崩溃。 此错误出现在19.1.6中。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 固定 `ALL JOIN` 右表中有重复项。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了分段故障 `use_uncompressed_cache=1` 和异常与错误的未压缩大小。 此错误出现在19.1.6中。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([阿利沙平](https://github.com/alesapin)) +- 固定 `compile_expressions` 错误与大(超过int16)日期的比较。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([阿利沙平](https://github.com/alesapin)) +- 从表函数选择时固定无限循环 `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 暂时禁用谓词优化 `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([张冬](https://github.com/zhang2014)) +- 固定 `Illegal instruction` 在旧Cpu上使用base64函数时出错。 仅当ClickHouse使用gcc-8编译时,才会重现此错误。 [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `No message received` 通过TLS连接与PostgreSQL ODBC驱动程序交互时出错。 还修复了使用MySQL ODBC驱动程序时的段错误。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误的结果时 `Date` 和 `DateTime` 参数用于条件运算符(函数)的分支 `if`). 增加了函数的通用案例 `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ClickHouse字典现在加载内 `clickhouse` 过程。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复死锁时 `SELECT` 从一个表 `File` 引擎被重试后 `No such file or directory` 错误 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从选择时固定的竞争条件 `system.tables` 可能会给 `table doesn't exist` 错误 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `clickhouse-client` 如果在交互模式下运行,则在加载命令行建议的数据时可以在退出时段错误。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误,当包含突变的执行 `IN` 操作员产生了不正确的结果。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修正错误:如果有一个数据库 `Dictionary` 引擎中,所有字典在服务器启动时强制加载,如果有来自localhost的ClickHouse源字典,则字典无法加载。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了在服务器关闭时尝试再次创建系统日志时的错误。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 正确返回正确的类型和正确处理锁 `joinGet` 功能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([阿莫斯鸟](https://github.com/amosbird)) +- 已添加 `sumMapWithOverflow` 功能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 固定段错误 `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正错误与不正确 `Date` 和 `DateTime` 比较。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 在未定义的行为消毒固定模糊测试:增加了参数类型检查 `quantile*Weighted` 家庭的功能。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了在删除旧数据部分时罕见的争用条件可能会失败 `File not found` 错误 [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复缺少/etc/clickhouse-server/config的安装包。xml [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-5} + +- Debian软件包:根据配置正确的/etc/clickhouse-server/预处理链接。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Freebsd的各种构建修复程序。 [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- 增加了在perftest中创建,填充和删除表的能力。 [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([阿利沙平](https://github.com/alesapin)) +- 添加了一个脚本来检查重复的包括。 [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了在性能测试中通过索引运行查询的能力。 [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([阿利沙平](https://github.com/alesapin)) +- 建议安装带有调试符号的软件包。 [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 重构性能测试。 更好的记录和信号处理。 [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([阿利沙平](https://github.com/alesapin)) +- 将文档添加到匿名Yandex。Metrika数据集. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([阿利沙平](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) +- 添加了有关s3中两个数据集的文档。 [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([阿利沙平](https://github.com/alesapin)) +- 增加了从拉请求描述创建更新日志的脚本。 [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 为ClickHouse添加了木偶模块。 [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- 添加了一组无证函数的文档。 [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([张冬](https://github.com/zhang2014)) +- ARM构建修复。 [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- 字典测试现在能够从运行 `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- 现在 `/etc/ssl` 用作带有SSL证书的默认目录。 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在开始时添加了检查SSE和AVX指令。 [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) +- 初始化脚本将等待服务器,直到启动。 [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### 向后不兼容的更改 {#backward-incompatible-changes-1} + +- 已删除 `allow_experimental_low_cardinality_type` 设置。 `LowCardinality` 数据类型已准备就绪。 [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 根据可用内存量减少标记高速缓存大小和未压缩高速缓存大小。 [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) +- 添加关键字 `INDEX` 在 `CREATE TABLE` 查询。 具有名称的列 `index` 必须使用反引号或双引号引用: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- `sumMap` 现在提升结果类型而不是溢出。 老 `sumMap` 行为可以通过使用获得 `sumMapWithOverflow` 功能。 [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### 性能改进 {#performance-improvements-4} + +- `std::sort` 改为 `pdqsort` 对于没有 `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) +- 现在服务器重用全局线程池中的线程。 这会影响某些角落情况下的性能。 [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvements-5} + +- 实现了对FreeBSD的AIO支持。 [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` 现在回来 `a` 和 `b` 列仅从左表。 [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) +- 允许 `-C` 客户端的选项作为工作 `-c` 选项。 [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) +- 现在选项 `--password` 无值使用需要从标准输入的密码。 [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- 在包含字符串文字中添加了非转义元字符的突出显示 `LIKE` 表达式或正则表达式。 [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加取消HTTP只读查询,如果客户端套接字消失。 [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 现在,服务器报告进度,以保持客户端连接活跃。 [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([伊万](https://github.com/abyss7)) +- 稍微好一点的消息与优化查询的原因 `optimize_throw_if_noop` 设置已启用。 [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了支持 `--version` clickhouse服务器的选项。 [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) +- 已添加 `--help/-h` 选项 `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 增加了对具有聚合函数状态结果的标量子查询的支持。 [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 改进服务器关闭时间并改变等待时间。 [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加了有关replicated\_can\_become\_leader设置到系统的信息。如果副本不会尝试成为领导者,则添加日志记录。 [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) + +## ClickHouse释放19.1 {#clickhouse-release-19-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-14-2019-03-14} + +- 修正错误 `Column ... queried more than once` 这可能发生,如果设置 `asterisk_left_columns_only` 在使用的情况下设置为1 `GLOBAL JOIN` 与 `SELECT *` (罕见的情况)。 该问题在19.3及更新版本中不存在。 [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-13-2019-03-12} + +此版本包含与19.3.7完全相同的补丁集。 + +### 碌莽禄,拢,010-68520682\戮卤篓拢,010-68520682\ {#clickhouse-release-19-1-10-2019-03-03} + +此版本包含与19.3.6完全相同的补丁集。 + +## ClickHouse释放19.1 {#clickhouse-release-19-1-1} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-9-2019-02-21} + +#### 错误修复 {#bug-fixes-18} + +- 修正了向后不兼容的旧版本,由于错误的实现 `send_logs_level` 设置。 [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了表函数的向后不兼容性 `remote` 与列注释介绍. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-8-2019-02-16} + +#### 错误修复 {#bug-fixes-19} + +- 修复缺少/etc/clickhouse-server/config的安装包。xml [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## ClickHouse释放19.1 {#clickhouse-release-19-1-2} + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-7-2019-02-15} + +#### 错误修复 {#bug-fixes-20} + +- 正确返回正确的类型和正确处理锁 `joinGet` 功能。 [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复了在服务器关闭时尝试再次创建系统日志时的错误。 [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误:如果有一个数据库 `Dictionary` 引擎中,所有字典在服务器启动时强制加载,如果有来自localhost的ClickHouse源字典,则字典无法加载。 [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误,当包含突变的执行 `IN` 操作员产生了不正确的结果。 [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) +- `clickhouse-client` 如果在交互模式下运行,则在加载命令行建议的数据时可以在退出时段错误。 [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从选择时固定的竞争条件 `system.tables` 可能会给 `table doesn't exist` 错误 [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复死锁时 `SELECT` 从一个表 `File` 引擎被重试后 `No such file or directory` 错误 [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了一个问题:本地ClickHouse字典通过TCP加载,但应该在进程中加载。 [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `No message received` 通过TLS连接与PostgreSQL ODBC驱动程序交互时出错。 还修复了使用MySQL ODBC驱动程序时的段错误。 [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 暂时禁用谓词优化 `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([张冬](https://github.com/zhang2014)) +- 从表函数选择时固定无限循环 `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定 `compile_expressions` 错误与大(超过int16)日期的比较。 [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([阿利沙平](https://github.com/alesapin)) +- 修正了分段故障 `uncompressed_cache=1` 和异常与错误的未压缩大小。 [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([阿利沙平](https://github.com/alesapin)) +- 固定 `ALL JOIN` 右表中有重复项。 [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了错误的行为,当做 `INSERT ... SELECT ... FROM file(...)` 查询和文件有 `CSVWithNames` 或 `TSVWIthNames` 格式和第一个数据行丢失。 [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定聚合函数执行 `Array(LowCardinality)` 争论。 [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- Debian软件包:根据配置正确的/etc/clickhouse-server/预处理链接。 [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- 在未定义的行为消毒固定模糊测试:增加了参数类型检查 `quantile*Weighted` 家庭的功能。 [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `START REPLICATED SENDS` 命令开始复制发送。 [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([纳瓦托洛梅](https://github.com/nvartolomei)) +- 固定 `Not found column` 对于联接部分中的重复列。 [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `/etc/ssl` 用作带有SSL证书的默认目录。 [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 如果字典不可用,则修复了字典重新加载时的崩溃。 [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- 修正错误与不正确 `Date` 和 `DateTime` 比较。 [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- 修正错误的结果时 `Date` 和 `DateTime` 参数用于条件运算符(函数)的分支 `if`). 增加了函数的通用案例 `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +### 碌莽禄,拢,010-68520682\ {#clickhouse-release-19-1-6-2019-01-24} + +#### 新功能 {#new-features-7} + +- 自定义每列压缩编解码器的表。 [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([阿利沙平](https://github.com/alesapin), [张冬](https://github.com/zhang2014), [阿纳托利](https://github.com/Sindbag)) +- 添加压缩编解ec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([阿利沙平](https://github.com/alesapin)) +- 允许 `ALTER` 压缩编解ecs。 [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([阿利沙平](https://github.com/alesapin)) +- 新增功能 `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` 对于SQL标准的兼容性。 [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([伊万\*布林科夫](https://github.com/blinkov)) +- 支持写入 `HDFS` 表和 `hdfs` 表功能。 [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([阿利沙平](https://github.com/alesapin)) +- 增加了从big haystack中搜索多个常量字符串的功能: `multiPosition`, `multiSearch` ,`firstMatch` 也与 `-UTF8`, `-CaseInsensitive`,和 `-CaseInsensitiveUTF8` 变体。 [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) +- 修剪未使用的碎片,如果 `SELECT` 通过分片键查询过滤器(设置 `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [伊万](https://github.com/abyss7)) +- 允许 `Kafka` 引擎忽略每个块的解析错误数。 [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([伊万](https://github.com/abyss7)) +- 增加了对 `CatBoost` 多类模型评估。 功能 `modelEvaluate` 返回带有多类模型的每类原始预测的元组。 `libcatboostmodel.so` 应建立与 [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 新增功能 `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) +- 添加了哈希函数 `xxHash64` 和 `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) +- 已添加 `gccMurmurHash` 散列函数(GCC风味杂音散列),它使用相同的散列种子 [海湾合作委员会](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) +- 添加了哈希函数 `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([上书结365](https://github.com/shangshujie365)) +- 添加表功能 `remoteSecure`. 函数的工作原理为 `remote`,但使用安全连接。 [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### 实验特点 {#experimental-features-3} + +- 添加了多个联接仿真 (`allow_experimental_multiple_joins_emulation` 设置)。 [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) + +#### 错误修复 {#bug-fixes-21} + +- 赂眉露\>\> `compiled_expression_cache_size` 默认情况下设置有限,以降低内存消耗。 [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([阿利沙平](https://github.com/alesapin)) +- 修复导致执行更改复制表的线程和从ZooKeeper更新配置的线程中挂断的错误。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复了执行分布式ALTER任务时的争用条件。 争用条件导致多个副本试图执行任务和所有副本,除了一个失败与ZooKeeper错误。 [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复错误时 `from_zk` 在对ZooKeeper的请求超时后,配置元素没有刷新。 [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复IPv4子网掩码错误前缀的错误。 [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([阿利沙平](https://github.com/alesapin)) +- 固定崩溃 (`std::terminate`)在极少数情况下,由于资源耗尽而无法创建新线程。 [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误时 `remote` 表函数执行时,错误的限制被用于 `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([阿利沙平](https://github.com/alesapin)) +- 修复netlink套接字的泄漏。 它们被放置在一个池中,在那里它们永远不会被删除,并且当所有当前套接字都在使用时,在新线程开始时创建了新的套接字。 [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) +- 修复关闭错误 `/proc/self/fd` 目录早于所有fds被读取 `/proc` 分叉后 `odbc-bridge` 子进程。 [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([阿利沙平](https://github.com/alesapin)) +- 在主键中使用字符串的情况下,固定字符串到UInt单调转换。 [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([张冬](https://github.com/zhang2014)) +- 整数转换函数单调性计算中的固定误差。 [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复段错误 `arrayEnumerateUniq`, `arrayEnumerateDense` 函数在一些无效的参数的情况下。 [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在StorageMerge修复UB。 [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正函数中的段错误 `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正错误:功能 `round`, `floor`, `trunc`, `ceil` 在整数参数和大负比例执行时可能会返回虚假结果。 [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了一个错误引起的 ‘kill query sync’ 从而导致核心转储。 [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) +- 修复空复制队列后延迟较长的bug。 [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([阿利沙平](https://github.com/alesapin)) +- 修复了插入到表中的过多内存使用情况 `LowCardinality` 主键。 [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定 `LowCardinality` 序列化 `Native` 在空数组的情况下格式化。 [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定不正确的结果,而使用distinct通过单LowCardinality数字列。 [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 固定专门的聚合与LowCardinality键(以防万一 `compile` 设置已启用)。 [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 修复复制表查询的用户和密码转发。 [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([阿利沙平](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- 修复了在重新加载字典时在字典数据库中列出表时可能发生的非常罕见的争用条件。 [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了与ROLLUP或CUBE一起使用时的错误结果。 [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([周三](https://github.com/reflection)) +- 用于查询的固定列别名 `JOIN ON` 语法和分布式表。 [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([张冬](https://github.com/zhang2014)) +- 在内部实现固定的错误 `quantileTDigest` (由阿尔乔姆Vakhrushev发现)。 这个错误从来没有发生在ClickHouse中,只有那些直接使用ClickHouse代码库作为库的人才有关。 [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvements-6} + +- 支持 `IF NOT EXISTS` 在 `ALTER TABLE ADD COLUMN` 发言以及 `IF EXISTS` 在 `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) +- 功能 `parseDateTimeBestEffort`:支持格式 `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` 和相似。 [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` 现在支持锯齿结构。 [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) +- 可用性改进:增加了从数据目录的所有者启动服务器进程的检查。 如果数据属于非root用户,则不允许从root用户启动服务器。 [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([谢尔盖-v-加尔采夫](https://github.com/sergey-v-galtsev)) +- 在分析具有联接的查询期间检查所需列的更好的逻辑。 [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) +- 减少在单个服务器中有大量分布式表的情况下的连接数。 [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([张冬](https://github.com/zhang2014)) +- 支持的总计行 `WITH TOTALS` 查询ODBC驱动程序。 [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) +- 允许使用 `Enum`s为if函数内的整数。 [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([伊万](https://github.com/abyss7)) +- 已添加 `low_cardinality_allow_in_native_format` 设置。 如果禁用,请不要使用 `LowCadrinality` 输入 `Native` 格式。 [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) +- 从编译表达式缓存中删除了一些冗余对象以降低内存使用率。 [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([阿利沙平](https://github.com/alesapin)) +- 添加检查 `SET send_logs_level = 'value'` 查询接受适当的值。 [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin马克西姆](https://github.com/s-mx)) +- 固定数据类型检查类型转换功能。 [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([张冬](https://github.com/zhang2014)) + +#### 性能改进 {#performance-improvements-5} + +- 添加MergeTree设置 `use_minimalistic_part_header_in_zookeeper`. 如果启用,复制的表将在单个零件znode中存储紧凑零件元数据。 这可以显着减少ZooKeeper快照大小(特别是如果表有很多列)。 请注意,启用此设置后,您将无法降级到不支持它的版本。 [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) +- 为函数添加基于DFA的实现 `sequenceMatch` 和 `sequenceCount` 以防模式不包含时间。 [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- 整数序列化的性能改进。 [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([阿莫斯鸟](https://github.com/amosbird)) +- 零左填充PODArray,使-1元素始终有效并归零。 它用于无分支计算偏移量。 [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([阿莫斯鸟](https://github.com/amosbird)) +- 还原 `jemalloc` 版本导致性能下降。 [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 向后不兼容的更改 {#backward-incompatible-changes-2} + +- 删除无证功能 `ALTER MODIFY PRIMARY KEY` 因为它被 `ALTER MODIFY ORDER BY` 指挥部 [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) +- 删除功能 `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止使用具有结果类型的标量子查询 `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([伊万](https://github.com/abyss7)) + +#### 构建/测试/打包改进 {#buildtestingpackaging-improvements-6} + +- 增加了对PowerPC的支持 (`ppc64le`)建设。 [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) +- 有状态功能测试在公共可用数据集上运行。 [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了服务器无法启动时的错误 `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` Docker或systemd-nspawn中的消息。 [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `rdkafka` 库v1.0.0-RC5。 使用cppkafka而不是原始的C接口。 [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([伊万](https://github.com/abyss7)) +- 更新 `mariadb-client` 图书馆. 修复了UBSan发现的问题之一。 [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- UBSan版本的一些修复。 [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了使用UBSan构建的每次提交运行的测试。 +- 增加了PVS-Studio静态分析器的每次提交运行。 +- 修复了PVS-Studio发现的错误。 [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了glibc兼容性问题。 [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将Docker映像移动到18.10并为glibc\>=2.28添加兼容性文件 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([阿利沙平](https://github.com/alesapin)) +- 如果用户不想在服务器码头镜像中播放目录,请添加env变量。 [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([阿利沙平](https://github.com/alesapin)) +- 启用了大多数来自警告 `-Weverything` 在叮当声。 已启用 `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一些只在clang8中可用的警告。 [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 链接到 `libLLVM` 在使用共享链接时,而不是单独的LLVM库。 [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Origej Desh](https://github.com/orivej)) +- 为测试图像添加了消毒变量。 [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([阿利沙平](https://github.com/alesapin)) +- `clickhouse-server` debian软件包会推荐 `libcap2-bin` 使用包 `setcap` 设置功能的工具。 这是可选的。 [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的编译时间,固定包括。 [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- 添加了哈希函数的性能测试。 [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) +- 固定循环库依赖。 [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- 改进的编译与低可用内存。 [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- 添加了测试脚本,以重现性能下降 `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了在下面的注释和字符串文字拼写错误 `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) +- 修正了错别字的评论。 [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) + +## [2018年的更新日志](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/zh/whats_new/changelog/index.md b/docs/zh/whats_new/changelog/index.md new file mode 100644 index 00000000000..90bb7abe0b0 --- /dev/null +++ b/docs/zh/whats_new/changelog/index.md @@ -0,0 +1,665 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +--- + +## 碌莽禄release拢.0755-88888888 {#clickhouse-release-v20-3} + +### ClickHouse版本v20.3.4.10,2020-03-20 {#clickhouse-release-v20-3-4-10-2020-03-20} + +#### 错误修复 {#bug-fix} + +- 此版本还包含20.1.8.41的所有错误修复 +- 修复丢失 `rows_before_limit_at_least` 用于通过http进行查询(使用处理器管道)。 这修复 [\#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [\#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +### ClickHouse释放v20.3.3.6,2020-03-17 {#clickhouse-release-v20-3-3-6-2020-03-17} + +#### 错误修复 {#bug-fix-1} + +- 此版本还包含20.1.7.38的所有错误修复 +- 修复复制中的错误,如果用户在以前的版本上执行了突变,则不允许复制工作。 这修复 [\#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [\#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 +- 添加设置 `use_compact_format_in_distributed_parts_names` 它允许写文件 `INSERT` 查询到 `Distributed` 表格格式更紧凑。 这修复 [\#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [\#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([阿利沙平](https://github.com/alesapin)). 它使版本20.3再次向后兼容。 + +### ClickHouse版本v20.3.2.1,2020-03-12 {#clickhouse-release-v20-3-2-1-2020-03-12} + +#### 向后不兼容的更改 {#backward-incompatible-change} + +- 修正了这个问题 `file name too long` 当发送数据 `Distributed` 大量副本的表。 修复了服务器日志中显示副本凭据的问题。 磁盘上的目录名格式已更改为 `[shard{shard_index}[_replica{replica_index}]]`. [\#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([米哈伊尔\*科罗托夫](https://github.com/millb))升级到新版本后,您将无法在没有人工干预的情况下降级,因为旧的服务器版本无法识别新的目录格式。 如果要降级,则必须手动将相应的目录重命名为旧格式。 仅当您使用了异步时,此更改才相关 `INSERT`s到 `Distributed` 桌子 在版本20.3.3中,我们将介绍一个设置,让您逐渐启用新格式。 +- 更改了mutation命令的复制日志条目的格式。 在安装新版本之前,您必须等待旧的突变处理。 +- 实现简单的内存分析器,将堆栈跟踪转储到 `system.trace_log` 超过软分配限制的每N个字节 [\#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([伊万](https://github.com/abyss7)) [\#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov))列 `system.trace_log` 从改名 `timer_type` 到 `trace_type`. 这将需要改变第三方性能分析和flamegraph处理工具。 +- 在任何地方使用操作系统线程id,而不是内部线程编号。 这修复 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) 老 `clickhouse-client` 无法接收从服务器发送的日志,当设置 `send_logs_level` 已启用,因为结构化日志消息的名称和类型已更改。 另一方面,不同的服务器版本可以相互发送不同类型的日志。 当你不使用 `send_logs_level` 设置,你不应该关心。 [\#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `indexHint` 功能 [\#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `findClusterIndex`, `findClusterValue` 功能。 这修复 [\#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). 如果您正在使用这些功能,请发送电子邮件至 `clickhouse-feedback@yandex-team.com` [\#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在不允许创建列或添加列 `SELECT` 子查询作为默认表达式。 [\#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([阿利沙平](https://github.com/alesapin)) +- 需要联接中的子查询的别名。 [\#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([阿利沙平](https://github.com/alesapin)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 设置 `experimental_use_processors` 默认情况下启用。 此设置允许使用新的查询管道。 这是内部重构,我们期望没有明显的变化。 如果您将看到任何问题,请将其设置为返回零。 [\#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 新功能 {#new-feature} + +- 添加 `Avro` 和 `AvroConfluent` 输入/输出格式 [\#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([安德鲁Onyshchuk](https://github.com/oandrew)) [\#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 过期密钥的多线程和非阻塞更新 `cache` 字典(可选的权限读取旧的)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加查询 `ALTER ... MATERIALIZE TTL`. 它运行突变,强制通过TTL删除过期的数据,并重新计算所有部分有关ttl的元信息。 [\#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果需要,从HashJoin切换到MergeJoin(在磁盘上 [\#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `MOVE PARTITION` 命令 `ALTER TABLE` [\#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [\#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 动态地从配置文件重新加载存储配置。 [\#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许更改 `storage_policy` 为了不那么富有的人。 [\#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了对s3存储和表功能的globs/通配符的支持。 [\#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 执行 `bitAnd`, `bitOr`, `bitXor`, `bitNot` 为 `FixedString(N)` 数据类型。 [\#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加功能 `bitCount`. 这修复 [\#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [\#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +- 添加 `generateRandom` 表函数生成具有给定模式的随机行。 允许用数据填充任意测试表。 [\#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +- `JSONEachRowFormat`:当对象包含在顶层数组中时,支持特殊情况。 [\#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([克鲁格洛夫\*帕维尔](https://github.com/Avogar)) +- 现在可以创建一个列 `DEFAULT` 取决于默认列的表达式 `ALIAS` 表达。 [\#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([阿利沙平](https://github.com/alesapin)) +- 允许指定 `--limit` 超过源数据大小 `clickhouse-obfuscator`. 数据将以不同的随机种子重复。 [\#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `groupArraySample` 功能(类似于 `groupArray`)与reservior采样算法。 [\#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在,您可以监视更新队列的大小 `cache`/`complex_key_cache` 通过系统指标字典。 [\#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 允许使用CRLF作为CSV输出格式的行分隔符与设置 `output_format_csv_crlf_end_of_line` 设置为1 [\#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [\#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [\#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 实现的更多功能 [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` 和 `stringToH3` [\#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +- 引入新设置: `max_parser_depth` 控制最大堆栈大小并允许大型复杂查询。 这修复 [\#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) 和 [\#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [\#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([马克西姆\*斯米尔诺夫](https://github.com/qMBQx8GH)) +- 添加设置 `force_optimize_skip_unused_shards` 如果无法跳过未使用的分片,则设置为抛出 [\#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +- 允许配置多个磁盘/卷用于存储数据发送 `Distributed` 发动机 [\#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +- 支持存储策略 (``)用于存储临时数据。 [\#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +- 已添加 `X-ClickHouse-Exception-Code` 如果在发送数据之前引发异常,则设置的HTTP头。 这实现了 [\#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [\#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `ifNotFinite`. 这只是一个句法糖: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [\#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `last_successful_update_time` 列中 `system.dictionaries` 表 [\#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 添加 `blockSerializedSize` 功能(磁盘大小不压缩) [\#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +- 添加功能 `moduloOrZero` [\#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +- 添加系统表 `system.zeros` 和 `system.zeros_mt` 以及故事功能 `zeros()` 和 `zeros_mt()`. 表(和表函数)包含具有名称的单列 `zero` 和类型 `UInt8`. 此列包含零。 为了测试目的,需要它作为生成许多行的最快方法。 这修复 [\#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [\#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) + +#### 实验特点 {#experimental-feature} + +- 添加新的紧凑格式的部件 `MergeTree`-家庭表中的所有列都存储在一个文件中。 它有助于提高小型和频繁插入的性能。 旧的格式(每列一个文件)现在被称为wide。 数据存储格式由设置控制 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part`. [\#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([安东\*波波夫](https://github.com/CurtizJ)) +- 支持S3存储 `Log`, `TinyLog` 和 `StripeLog` 桌子 [\#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([帕维尔\*科瓦连科](https://github.com/Jokser)) + +#### 错误修复 {#bug-fix-2} + +- 修正了日志消息中不一致的空格。 [\#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复在创建表时将未命名元组数组展平为嵌套结构的错误。 [\#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +- 修复了以下问题 “Too many open files” 如果有太多的文件匹配glob模式可能会发生错误 `File` 表或 `file` 表功能。 现在文件懒洋洋地打开。 这修复 [\#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [\#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除临时表现在只删除临时表。 [\#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 当我们关闭服务器或分离/附加表时删除过时的分区。 [\#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 默认磁盘如何计算可用空间 `data` 子目录。 修复了可用空间量计算不正确的问题,如果 `data` 目录被安装到一个单独的设备(罕见的情况)。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 允许逗号(交叉)与IN()内部连接。 [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +- 如果在WHERE部分中有\[NOT\]LIKE运算符,则允许将CROSS重写为INNER JOIN。 [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复后可能不正确的结果 `GROUP BY` 启用设置 `distributed_aggregation_memory_efficient`. 修复 [\#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [\#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 找到的键在缓存字典的指标中被计为错过。 [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 在固定的竞争条件 `queue_task_handle` 在启动 `ReplicatedMergeTree` 桌子 [\#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 令牌 `NOT` 没有工作 `SHOW TABLES NOT LIKE` 查询 [\#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [\#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加范围检查功能 `h3EdgeLengthM`. 如果没有这个检查,缓冲区溢出是可能的。 [\#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了多个参数(超过10)的三元逻辑运算批量计算中的错误。 [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 修复PREWHERE优化的错误,这可能导致段错误或 `Inconsistent number of columns got from MergeTreeRangeReader` 例外。 [\#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复意外 `Timeout exceeded while reading from socket` 异常,在实际超时之前以及启用查询探查器时,在安全连接上随机发生。 还添加 `connect_timeout_with_failover_secure_ms` 设置(默认100ms),这是类似于 `connect_timeout_with_failover_ms`,但用于安全连接(因为SSL握手比普通TCP连接慢) [\#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +- 修复突变最终确定的错误,当突变可能处于以下状态时 `parts_to_do=0` 和 `is_done=0`. [\#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([阿利沙平](https://github.com/alesapin)) +- 使用新的任何连接逻辑 `partial_merge_join` 设置。 有可能使 `ANY|ALL|SEMI LEFT` 和 `ALL INNER` 加入与 `partial_merge_join=1` 现在 [\#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +- Shard现在将从发起者获得的设置夹到shard的constaints,而不是抛出异常。 此修补程序允许将查询发送到具有另一个约束的分片。 [\#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修正了内存管理问题 `MergeTreeReadPool`. [\#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复 `toDecimal*OrNull()` 使用字符串调用时的函数系列 `e`. 修复 [\#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [\#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +- 请确保 `FORMAT Null` 不向客户端发送数据。 [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 修复时间戳中的错误 `LiveViewBlockInputStream` 不会更新。 `LIVE VIEW` 是一个实验特征。 [\#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [\#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +- 固定 `ALTER MODIFY TTL` 不允许删除旧ttl表达式的错误行为。 [\#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复了MergeTreeIndexSet中的UBSan报告。 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从apache Avro第三方库中的析构函数抛出。 [\#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 不要提交从轮询的批次 `Kafka` 部分,因为它可能会导致数据漏洞。 [\#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +- 修复 `joinGet` 使用可为空的返回类型。 https://github.com/ClickHouse/ClickHouse/issues/8919 [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复压缩时的数据不兼容 `T64` 编解ec [\#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2))修复数据类型id `T64` 在受影响的版本中导致错误(de)压缩的压缩编解ec。 [\#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `enable_early_constant_folding` 并禁用它在某些情况下,导致错误。 [\#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +- 使用VIEW修复下推谓词优化器并启用测试 [\#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([张冬](https://github.com/zhang2014)) +- 修复段错误 `Merge` 表,从读取时可能发生 `File` 储存 [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. 否则,它可以使部分数据重新启动后无法访问,并阻止ClickHouse启动。 [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复改变,如果有TTL设置表。 [\#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复在以下情况下可能发生的竞争条件 `SYSTEM RELOAD ALL DICTIONARIES` 在某些字典被修改/添加/删除时执行。 [\#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 在以前的版本 `Memory` 数据库引擎使用空数据路径,因此在以下位置创建表 `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [\#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +- 修复了关于缺少默认磁盘或策略的错误日志消息。 [\#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 允许表中的第一列 `Log` 引擎是别名 [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([伊万](https://github.com/abyss7)) +- 从读取时修复范围的顺序 `MergeTree` 表中的一个线程。 它可能会导致例外 `MergeTreeRangeReader` 或错误的查询结果。 [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([安东\*波波夫](https://github.com/CurtizJ)) +- 赂眉露\>\> `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 避免极少数情况下,当用户可以得到错误的错误消息 (`Success` 而不是详细的错误描述)。 [\#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时不要崩溃 `Template` 使用空行模板格式化。 [\#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 系统表的元数据文件可能在错误的位置创建 [\#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix))修复 [\#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +- 修复缓存字典中exception\_ptr上的数据竞赛 [\#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [\#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 不要为查询引发异常 `ATTACH TABLE IF NOT EXISTS`. 以前它是抛出,如果表已经存在,尽管 `IF NOT EXISTS` 条款 [\#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了异常消息中丢失的关闭paren。 [\#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免消息 `Possible deadlock avoided` 在clickhouse客户端在交互模式下启动。 [\#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +- 在固定的异常 `DROP TABLE IF EXISTS` [\#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([尼基塔\*瓦西列夫](https://github.com/nikvas0)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 支持在JSON相关函数中不适合Int64的UInt64号码。 更新SIMDJSON掌握。 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当使用非严格单调函数索引时,固定执行反转谓词。 [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 不要试图折叠 `IN` 常量在 `GROUP BY` [\#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复bug `ALTER DELETE` 突变导致索引损坏。 这修复 [\#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) 和 [\#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). 另外修复极其罕见的竞争条件 `ReplicatedMergeTree` `ALTER` 查询。 [\#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([阿利沙平](https://github.com/alesapin)) +- 当设置 `compile_expressions` 被启用,你可以得到 `unexpected column` 在 `LLVMExecutableFunction` 当我们使用 `Nullable` 类型 [\#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 多个修复 `Kafka` 引擎:1)修复在消费者组重新平衡期间出现的重复项。 2)修复罕见 ‘holes’ 当数据从一个轮询的几个分区轮询并部分提交时出现(现在我们总是处理/提交整个轮询的消息块)。 3)通过块大小修复刷新(在此之前,只有超时刷新才能正常工作)。 4)更好的订阅程序(与分配反馈)。 5)使测试工作得更快(默认时间间隔和超时)。 由于数据之前没有被块大小刷新(根据文档),pr可能会导致默认设置的性能下降(由于更频繁和更小的刷新不太理想)。 如果您在更改后遇到性能问题-请增加 `kafka_max_block_size` 在表中的更大的值(例如 `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). 修复 [\#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [\#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +- 修复 `Parameter out of bound` 在PREWHERE优化之后的某些查询中出现异常。 [\#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +- 修正了函数参数混合常量的情况 `arrayZip`. [\#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修正了双重移动可能会损坏原始部分的错误。 这是相关的,如果你使用 `ALTER TABLE MOVE` [\#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 允许 `interval` 用于正确解析的标识符,而无需反引号。 当一个查询不能被执行,即使固定的问题 `interval` 标识符用反引号或双引号括起来。 这修复 [\#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [\#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了模糊测试和不正确的行为 `bitTestAll`/`bitTestAny` 功能。 [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 使用enabled编写的部件修复突变 `insert_quorum`. [\#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([阿利沙平](https://github.com/alesapin)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复运行时显示误导性错误消息的错误 `SHOW CREATE TABLE a_table_that_does_not_exist`. [\#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +- 固定 `Parameters are out of bound` 例外在一些罕见的情况下,当我们在一个常数 `SELECT` 条款时,我们有一个 `ORDER BY` 和一个 `LIMIT` 条款 [\#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 修复突变定稿,当已经完成突变可以有状态 `is_done=0`. [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([阿利沙平](https://github.com/alesapin)) +- 防止执行 `ALTER ADD INDEX` 对于旧语法的MergeTree表,因为它不起作用。 [\#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在服务器启动时不要访问表,这 `LIVE VIEW` 取决于,所以服务器将能够启动。 也删除 `LIVE VIEW` 分离时的依赖关系 `LIVE VIEW`. `LIVE VIEW` 是一个实验特征。 [\#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修正了一个错误,当部分没有被移动的情况下,只有一个卷的TTL规则在后台。 [\#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修正了这个问题 `Method createColumn() is not implemented for data type Set`. 这修复 [\#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [\#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复 `intDiv` 减一个常数 [\#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +- 修复可能的竞争条件 `BlockIO`. [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +- 增加了解决方法,如果操作系统返回错误的结果 `timer_create` 功能。 [\#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在使用固定错误 `min_marks_for_seek` 参数。 修复了分布式表中没有分片键时的错误消息,并且我们尝试跳过未使用的分片。 [\#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### 改进 {#improvement} + +- 执行 `ALTER MODIFY/DROP` 对突变的顶部查询 `ReplicatedMergeTree*` 引擎家族. 现在 `ALTERS` 仅在元数据更新阶段阻止,之后不阻止。 [\#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([阿利沙平](https://github.com/alesapin)) +- 添加重写交叉到内部连接的能力 `WHERE` 包含未编译名称的部分。 [\#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +- 赂眉露\>\> `SHOW TABLES` 和 `SHOW DATABASES` 查询支持 `WHERE` 表达式和 `FROM`/`IN` [\#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +- 添加了一个设置 `deduplicate_blocks_in_dependent_materialized_views`. [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +- 在最近的变化之后,MySQL客户端开始以十六进制打印二进制字符串,从而使它们不可读 ([\#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). ClickHouse中的解决方法是将字符串列标记为UTF-8,这并不总是如此,但通常是这种情况。 [\#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 添加对字符串和FixedString键的支持 `sumMap` [\#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +- 支持SummingMergeTree地图中的字符串键 [\#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +- 即使线程已抛出异常,也向线程池发送线程终止信号 [\#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([丁香飞](https://github.com/dingxiangfei2009)) +- 允许设置 `query_id` 在 `clickhouse-benchmark` [\#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([安东\*波波夫](https://github.com/CurtizJ)) +- 不要让奇怪的表达 `ALTER TABLE ... PARTITION partition` 查询。 这个地址 [\#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [\#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 表 `system.table_engines` 现在提供有关功能支持的信息(如 `supports_ttl` 或 `supports_sort_order`). [\#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +- 启用 `system.metric_log` 默认情况下。 它将包含具有ProfileEvents值的行,CurrentMetrics收集与 “collect\_interval\_milliseconds” 间隔(默认情况下为一秒)。 该表非常小(通常以兆字节为单位),默认情况下收集此数据是合理的。 [\#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [\#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [\#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([伊万](https://github.com/abyss7)) +- 现在是暂时的 `LIVE VIEW` 创建者 `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` 而不是 `CREATE TEMPORARY LIVE VIEW ...`,因为以前的语法不符合 `CREATE TEMPORARY TABLE ...` [\#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +- 添加text\_log。级别配置参数,以限制进入 `system.text_log` 表 [\#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +- 允许根据TTL规则将下载的部分放入磁盘/卷 [\#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 对于外部MySQL字典,允许将MySQL连接池共同化为 “share” 他们在字典中。 此选项显着减少到MySQL服务器的连接数。 [\#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- 显示分位数的最近查询执行时间 `clickhouse-benchmark` 输出而不是插值值。 最好显示与某些查询的执行时间相对应的值。 [\#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 可以在将数据插入到Kafka时为消息添加密钥和时间戳。 修复 [\#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [\#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +- 如果服务器从终端运行,请按颜色突出显示线程号,查询id和日志优先级。 这是为了提高开发人员相关日志消息的可读性。 [\#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好的异常消息,同时加载表 `Ordinary` 数据库。 [\#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 执行 `arraySlice` 对于具有聚合函数状态的数组。 这修复 [\#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [\#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 允许在in运算符的右侧使用常量函数和常量数组。 [\#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([安东\*波波夫](https://github.com/CurtizJ)) +- 如果在获取系统数据时发生了zookeeper异常。副本,将其显示在单独的列中。 这实现了 [\#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [\#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 原子删除destroy上的MergeTree数据部分。 [\#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 支持分布式表的行级安全性。 [\#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([伊万](https://github.com/abyss7)) +- Now we recognize suffix (like KB, KiB…) in settings values. [\#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在构建大型连接的结果时防止内存不足。 [\#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +- 在交互模式下为建议添加群集名称 `clickhouse-client`. [\#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [\#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([伊万](https://github.com/abyss7)) +- 添加列 `exception_code` 在 `system.query_log` 桌子 [\#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 在端口上启用MySQL兼容服务器 `9004` 在默认服务器配置文件中。 在配置的例子固定密码生成命令。 [\#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 如果文件系统是只读的,请防止在关闭时中止。 这修复 [\#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [\#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当HTTP POST查询中需要长度时,更好的异常消息。 [\#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `_path` 和 `_file` 虚拟列 `HDFS` 和 `File` 发动机和 `hdfs` 和 `file` 表函数 [\#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复错误 `Cannot find column` 同时插入到 `MATERIALIZED VIEW` 在情况下,如果新列被添加到视图的内部表。 [\#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [\#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [\#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [\#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 通过最终更新后发送进度(如日志)修复本机客户端-服务器协议的进度。 这可能仅与使用本机协议的某些第三方工具相关。 [\#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +- 添加系统指标跟踪使用MySQL协议的客户端连接数 ([\#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [\#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([尤金\*克里莫夫](https://github.com/Slach)) +- 从现在开始,HTTP响应将有 `X-ClickHouse-Timezone` 标题设置为相同的时区值 `SELECT timezone()` 会报告。 [\#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### 性能改进 {#performance-improvement} + +- 使用IN提高分析指标的性能 [\#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([安东\*波波夫](https://github.com/CurtizJ)) +- 逻辑函数+代码清理更简单,更有效的代码。 跟进到 [\#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [\#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 整体性能改善(范围为5%。.通过确保使用C++20功能进行更严格的别名处理,对于受影响的查询来说,这是200%)。 [\#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([阿莫斯鸟](https://github.com/amosbird)) +- 比较函数的内部循环更严格的别名。 [\#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 对于算术函数的内部循环更严格的别名。 [\#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ColumnVector::replicate()的实现速度快约3倍,通过该实现ColumnConst::convertToFullColumn()。 在实现常数时,也将在测试中有用。 [\#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 另一个小的性能改进 `ColumnVector::replicate()` (这加快了 `materialize` 函数和高阶函数),甚至进一步改进 [\#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [\#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 改进的性能 `stochasticLinearRegression` 聚合函数。 此补丁由英特尔贡献。 [\#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 提高性能 `reinterpretAsFixedString` 功能。 [\#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 不要向客户端发送块 `Null` 处理器管道中的格式。 [\#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) [\#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +- 替换 `readline` 与 `replxx` 对于在交互式线编辑 `clickhouse-client` [\#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([伊万](https://github.com/abyss7)) +- 在FunctionsComparison中更好的构建时间和更少的模板实例化。 [\#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了与集成 `clang-tidy` 在线人 另请参阅 [\#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [\#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们使用CI链接ClickHouse `lld` 即使是 `gcc`. [\#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([阿利沙平](https://github.com/alesapin)) +- 允许随机线程调度和插入毛刺时 `THREAD_FUZZER_*` 设置环境变量。 这有助于测试。 [\#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在无状态测试中启用安全套接字 [\#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +- 使SPLIT\_SHARED\_LIBRARIES=OFF更强大 [\#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +- 赂眉露\>\> “performance\_introspection\_and\_logging” 测试可靠的随机服务器卡住。 这可能发生在CI环境中。 另请参阅 [\#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [\#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在样式检查中验证XML。 [\#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修正了测试中的竞争条件 `00738_lock_for_inner_table`. 这个测试依赖于睡眠。 [\#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除类型的性能测试 `once`. 这是在统计比较模式下运行所有性能测试(更可靠)所需的。 [\#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了算术函数的性能测试。 [\#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了性能测试 `sumMap` 和 `sumMapWithOverflow` 聚合函数。 后续行动 [\#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [\#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 通过样式检查确保错误代码的样式。 [\#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为测试历史添加脚本。 [\#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([阿利沙平](https://github.com/alesapin)) +- 添加GCC警告 `-Wsuggest-override` 找到并修复所有地方 `override` 必须使用关键字。 [\#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 在Mac OS X下忽略弱符号,因为它必须被定义 [\#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([已删除用户](https://github.com/ghost)) +- 规范性能测试中某些查询的运行时间。 这是在准备在比较模式下运行所有性能测试时完成的。 [\#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复一些测试,以支持pytest与查询测试 [\#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([伊万](https://github.com/abyss7)) +- 使用MSan在生成中启用SSL,因此在运行无状态测试时,服务器不会在启动时失败 [\#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +- 修复测试结果中的数据库替换 [\#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +- 针对其他平台构建修复程序 [\#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [\#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [\#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +- 将磁盘部分添加到无状态复盖率测试docker映像 [\#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 使用GRPC构建时,摆脱源代码树中的文件 [\#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([阿莫斯鸟](https://github.com/amosbird)) +- 通过从上下文中删除SessionCleaner来缩短构建时间。 让SessionCleaner的代码更简单。 [\#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +- 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [\#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清理重复的链接器标志。 确保链接器不会查找意想不到的符号。 [\#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加 `clickhouse-odbc` 驱动程序进入测试图像。 这允许通过自己的ODBC驱动程序测试ClickHouse与ClickHouse的交互。 [\#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +- 修复单元测试中的几个错误。 [\#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin)) +- 启用 `-Wmissing-include-dirs` GCC警告消除所有不存在的包括-主要是由于CMake脚本错误 [\#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- 描述查询探查器无法工作的原因。 这是用于 [\#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [\#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 将OpenSSL更新到上游主机。 修复了TLS连接可能会失败并显示消息的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` 和 `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. 该问题出现在版本20.1中。 [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新服务器的Dockerfile [\#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +- Build-gcc-from-sources脚本中的小修复 [\#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +- 替换 `numbers` 到 `zeros` 在perftests其中 `number` 不使用列。 这将导致更干净的测试结果。 [\#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复列构造函数中使用initializer\_list时堆栈溢出问题。 [\#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([已删除用户](https://github.com/ghost)) +- 将librdkafka升级到v1.3.0。 启用bund绑 `rdkafka` 和 `gsasl` mac OS X上的库 [\#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([安德鲁Onyshchuk](https://github.com/oandrew)) +- 在GCC9.2.0上构建修复程序 [\#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + +## 碌莽禄.拢.0755-88888888 {#clickhouse-release-v20-1} + +### ClickHouse版本v20.1.8.41,2020-03-20 {#clickhouse-release-v20-1-8-41-2020-03-20} + +#### 错误修复 {#bug-fix-3} + +- 修复可能的永久性 `Cannot schedule a task` 错误(由于未处理的异常 `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). 这修复 [\#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [\#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +- 修复过多的内存消耗 `ALTER` 查询(突变)。 这修复 [\#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) 和 [\#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [\#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([阿利沙平](https://github.com/alesapin)) +- 修复外部字典DDL中反引用的错误。 这修复 [\#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [\#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([阿利沙平](https://github.com/alesapin)) + +### ClickHouse释放v20.1.7.38,2020-03-18 {#clickhouse-release-v20-1-7-38-2020-03-18} + +#### 错误修复 {#bug-fix-4} + +- 修正了不正确的内部函数名称 `sumKahan` 和 `sumWithOverflow`. 在远程查询中使用此函数时,我会导致异常。 [\#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). 这个问题是在所有ClickHouse版本。 +- 允许 `ALTER ON CLUSTER` 的 `Distributed` 具有内部复制的表。 这修复 [\#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [\#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). 这个问题是在所有ClickHouse版本。 +- 修复可能的异常 `Size of filter doesn't match size of column` 和 `Invalid number of rows in Chunk` 在 `MergeTreeRangeReader`. 它们可能在执行时出现 `PREWHERE` 在某些情况下。 修复 [\#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [\#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复了这个问题:如果你编写一个简单的算术表达式,则不会保留时区 `time + 1` (与像这样的表达形成对比 `time + INTERVAL 1 SECOND`). 这修复 [\#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [\#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)). 这个问题是在所有ClickHouse版本。 +- 现在不可能创建或添加具有简单循环别名的列,如 `a DEFAULT b, b DEFAULT a`. [\#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([阿利沙平](https://github.com/alesapin)) +- 修复了base64编码值末尾填充格式错误的问题。 更新base64库。 这修复 [\#9491](https://github.com/ClickHouse/ClickHouse/issues/9491),关闭 [\#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [\#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复数据竞赛破坏 `Poco::HTTPServer`. 当服务器启动并立即关闭时,可能会发生这种情况。 [\#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复可能的崩溃/错误的行数 `LIMIT n WITH TIES` 当有很多行等于第n行时。 [\#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +- 修复与列Ttl可能不匹配的校验和。 [\#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([安东\*波波夫](https://github.com/CurtizJ)) +- 修复当用户尝试崩溃 `ALTER MODIFY SETTING` 对于老格式化 `MergeTree` 表引擎家族. [\#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([阿利沙平](https://github.com/alesapin)) +- 现在我们将尝试更频繁地完成突变。 [\#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([阿利沙平](https://github.com/alesapin)) +- 修复引入的复制协议不兼容 [\#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [\#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([阿利沙平](https://github.com/alesapin)) +- 修复数组类型的bloom\_filter索引的not(has())。 [\#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +- 固定的行为 `match` 和 `extract` 当干草堆有零字节的函数。 当干草堆不变时,这种行为是错误的。 这修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) [\#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-1} + +- 异常处理现在可以在适用于Linux的Windows子系统上正常工作。 看https://github.com/ClickHouse-Extras/libunwind/pull/3 这修复 [\#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [\#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + +### ClickHouse释放v20.1.6.30,2020-03-05 {#clickhouse-release-v20-1-6-30-2020-03-05} + +#### 错误修复 {#bug-fix-5} + +- 修复压缩时的数据不兼容 `T64` 编解ec + [\#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +- 在一个线程中从MergeTree表中读取时修复范围顺序。 修复 [\#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). + [\#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ))](https://github.com/CurtizJ) +- 修复可能的段错误 `MergeTreeRangeReader`,同时执行 `PREWHERE`. 修复 [\#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). + [\#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ))](https://github.com/CurtizJ) +- 修复 `reinterpretAsFixedString` 返回 `FixedString` 而不是 `String`. + [\#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +- 修复 `joinGet` 使用可为空的返回类型。 修复 [\#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) + [\#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +- 修复bittestall/bitTestAny函数的模糊测试和不正确的行为。 + [\#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 修复当干草堆有零字节时匹配和提取函数的行为。 当干草堆不变时,这种行为是错误的。 修复 [\#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) + [\#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 当使用非严格单调函数索引时,固定执行反转谓词。 修复 [\#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) + [\#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +- 允许重写 `CROSS` 到 `INNER JOIN` 如果有 `[NOT] LIKE` 操作员在 `WHERE` 科。 修复 [\#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) + [\#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +- 允许使用日志引擎的表中的第一列成为别名。 + [\#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +- 允许逗号加入 `IN()` 进去 修复 [\#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). + [\#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +- 改进 `ALTER MODIFY/ADD` 查询逻辑。 现在你不能 `ADD` 不带类型的列, `MODIFY` 默认表达式不改变列的类型和 `MODIFY` type不会丢失默认表达式值。 修复 [\#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). + [\#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +- 修复突变最终确定,当已经完成突变时可以具有状态is\_done=0。 + [\#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +- 碌莽禄Support: “Processors” 管道系统.数字和系统.numbers\_mt 这也修复了错误时 `max_execution_time` 不被尊重。 + [\#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 修复错误的计数 `DictCacheKeysRequestedFound` 公制。 + [\#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +- 添加了对存储策略的检查 `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` 否则可能使部分数据在重新启动后无法访问,并阻止ClickHouse启动。 + [\#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +- 在固定的瑞银报告 `MergeTreeIndexSet`. 这修复 [\#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) + [\#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 在BlockIO中修复可能的数据集。 + [\#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +- 支持 `UInt64` 在JSON相关函数中不适合Int64的数字。 更新 `SIMDJSON` 为了主人 这修复 [\#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) + [\#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 如果将数据目录挂载到单独的设备,则修复可用空间量计算不正确时的问题。 对于默认磁盘,计算数据子目录的可用空间。 这修复 [\#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) + [\#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(米尔布)](https://github.com/millb) +- 修复TLS连接可能会失败并显示消息时的问题 `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` 将OpenSSL更新到上游主机。 + [\#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(阿列克谢-米洛维多夫)](https://github.com/alexey-milovidov) +- 执行时 `CREATE` 查询,在存储引擎参数中折叠常量表达式。 将空数据库名称替换为当前数据库。 修复 [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). 还修复了ClickHouseDictionarySource中检查本地地址。 + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- 修复段错误 `StorageMerge`,从StorageFile读取时可能发生。 + [\#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +- 防止丢失数据 `Kafka` 在极少数情况下,在读取后缀之后但在提交之前发生异常。 修复 [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). 相关: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(菲利蒙诺夫)](https://github.com/filimonov) +- 修复尝试使用/删除时导致服务器终止的错误 `Kafka` 使用错误的参数创建的表。 修复 [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). 结合 [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(菲利蒙诺夫)](https://github.com/filimonov) + +#### 新功能 {#new-feature-1} + +- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。 + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22} + +#### 向后不兼容的更改 {#backward-incompatible-change-1} + +- 使设置 `merge_tree_uniform_read_distribution` 过时了 服务器仍可识别此设置,但无效。 [\#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更改函数的返回类型 `greatCircleDistance` 到 `Float32` 因为现在计算的结果是 `Float32`. [\#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在预计查询参数表示为 “escaped” 格式。 例如,要传递字符串 `ab` 你必须写 `a\tb` 或 `a\b` 并分别, `a%5Ctb` 或 `a%5C%09b` 在URL中。 这是需要添加传递NULL作为的可能性 `\N`. 这修复 [\#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [\#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 启用 `use_minimalistic_part_header_in_zookeeper` 设置 `ReplicatedMergeTree` 默认情况下。 这将显着减少存储在ZooKeeper中的数据量。 自19.1版本以来支持此设置,我们已经在多个服务的生产中使用它,半年以上没有任何问题。 如果您有机会降级到19.1以前的版本,请禁用此设置。 [\#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 数据跳过索引已准备就绪并默认启用。 设置 `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` 和 `allow_experimental_multiple_joins_emulation` 现在已经过时,什么也不做。 [\#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加新建 `ANY JOIN` 逻辑 `StorageJoin` 符合 `JOIN` 操作。 要在不改变行为的情况下进行升级,您需要添加 `SETTINGS any_join_distinct_right_table_keys = 1` 引擎联接表元数据或在升级后重新创建这些表。 [\#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +- 要求重新启动服务器以应用日志记录配置中的更改。 这是一种临时解决方法,可以避免服务器将日志记录到已删除的日志文件中的错误(请参阅 [\#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [\#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### 新功能 {#new-feature-2} + +- 添加了有关部件路径的信息 `system.merges`. [\#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 添加执行能力 `SYSTEM RELOAD DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) +- 添加执行能力 `CREATE DICTIONARY` 查询中 `ON CLUSTER` 模式 [\#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([阿利沙平](https://github.com/alesapin)) +- 现在用户的个人资料 `users.xml` 可以继承多个配置文件。 [\#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- 已添加 `system.stack_trace` 允许查看所有服务器线程的堆栈跟踪的表。 这对于开发人员反省服务器状态非常有用。 这修复 [\#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [\#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `DateTime64` 具有可配置子秒精度的数据类型。 [\#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加表函数 `clusterAllReplicas` 这允许查询集群中的所有节点。 [\#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +- 添加聚合函数 `categoricalInformationValue` 其计算出离散特征的信息值。 [\#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +- 加快数据文件的解析 `CSV`, `TSV` 和 `JSONEachRow` 通过并行进行格式化。 [\#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加功能 `bankerRound` 它执行银行家的四舍五入。 [\#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +- 支持区域名称的嵌入式字典中的更多语言: ‘ru’, ‘en’, ‘ua’, ‘uk’, ‘by’, ‘kz’, ‘tr’, ‘de’, ‘uz’, ‘lv’, ‘lt’, ‘et’, ‘pt’, ‘he’, ‘vi’. [\#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的一致性 `ANY JOIN` 逻辑 现在 `t1 ANY LEFT JOIN t2` 等于 `t2 ANY RIGHT JOIN t1`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加设置 `any_join_distinct_right_table_keys` 这使旧的行为 `ANY INNER JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 添加新建 `SEMI` 和 `ANTI JOIN`. 老 `ANY INNER JOIN` 行为现在可作为 `SEMI LEFT JOIN`. [\#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `Distributed` 格式 `File` 发动机和 `file` 表函数,它允许从读 `.bin` 通过异步插入生成的文件 `Distributed` 桌子 [\#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加可选的重置列参数 `runningAccumulate` 这允许为每个新的键值重置聚合结果。 [\#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([谢尔盖\*科诺年科](https://github.com/kononencheg)) +- 添加使用ClickHouse作为普罗米修斯端点的能力。 [\#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +- 添加部分 `` 在 `config.xml` 这将限制允许的主机用于远程表引擎和表函数 `URL`, `S3`, `HDFS`. [\#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 添加功能 `greatCircleAngle` 它计算球体上的距离(以度为单位)。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改变地球半径与h3库一致。 [\#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 已添加 `JSONCompactEachRow` 和 `JSONCompactEachRowWithNamesAndTypes` 输入和输出格式。 [\#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 增加了与文件相关的表引擎和表函数的功能 (`File`, `S3`, `URL`, `HDFS`)它允许读取和写入 `gzip` 基于附加引擎参数或文件扩展名的文件。 [\#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 添加了 `randomASCII(length)` 函数,生成一个字符串与一个随机集 [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) 可打印字符。 [\#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([刺刀](https://github.com/BayoNet)) +- 添加功能 `JSONExtractArrayRaw` 它返回从未解析的json数组元素上的数组 `JSON` 字符串。 [\#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +- 添加 `arrayZip` 函数允许将多个长度相等的数组合成一个元组数组。 [\#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([张冬](https://github.com/zhang2014)) +- 添加根据配置的磁盘之间移动数据的能力 `TTL`-表达式为 `*MergeTree` 表引擎家族. [\#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了新的聚合功能 `avgWeighted` 其允许计算加权平均值。 [\#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 现在并行解析默认启用 `TSV`, `TSKV`, `CSV` 和 `JSONEachRow` 格式。 [\#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从添加几个地理功能 `H3` 图书馆: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` 和 `h3kRing`. [\#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +- 增加了对brotli的支持 (`br`)压缩文件相关的存储和表函数。 这修复 [\#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [\#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 添加 `groupBit*` 功能的 `SimpleAggregationFunction` 类型。 [\#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([纪尧姆\*塔瑟里](https://github.com/YiuRULE)) + +#### 错误修复 {#bug-fix-6} + +- 修复重命名表 `Distributed` 引擎 修复问题 [\#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +- 现在字典支持 `EXPRESSION` 对于非ClickHouse SQL方言中任意字符串中的属性。 [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([阿利沙平](https://github.com/alesapin)) +- 修复损坏 `INSERT SELECT FROM mysql(...)` 查询。 这修复 [\#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) 和 [\#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +- 修复错误 “Mismatch column sizes” 插入默认值时 `Tuple` 从 `JSONEachRow`. 这修复 [\#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [\#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +- 现在将在使用的情况下抛出一个异常 `WITH TIES` 旁边的 `LIMIT BY`. 还增加了使用能力 `TOP` 与 `LIMIT BY`. 这修复 [\#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从新鲜的glibc版本中修复unintendent依赖关系 `clickhouse-odbc-bridge` 二进制 [\#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([阿莫斯鸟](https://github.com/amosbird)) +- 修正错误的检查功能 `*MergeTree` 引擎家族. 现在,当我们在最后一个颗粒和最后一个标记(非最终)中有相同数量的行时,它不会失败。 [\#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([阿利沙平](https://github.com/alesapin)) +- 修复插入 `Enum*` 列后 `ALTER` 查询,当基础数值类型等于表指定类型时。 这修复 [\#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [\#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([安东\*波波夫](https://github.com/CurtizJ)) +- 允许非常数负 “size” 函数的参数 `substring`. 这是不允许的错误。 这修复 [\#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [\#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复当错误数量的参数传递到解析错误 `(O|J)DBC` 表引擎。 [\#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([阿利沙平](https://github.com/alesapin)) +- 将日志发送到syslog时使用正在运行的clickhouse进程的命令名。 在以前的版本中,使用空字符串而不是命令名称。 [\#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +- 修复检查允许的主机 `localhost`. 这个公关修复了在提供的解决方案 [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复罕见的崩溃 `argMin` 和 `argMax` 长字符串参数的函数,当结果被用于 `runningAccumulate` 功能。 这修复 [\#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [\#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([恐龙](https://github.com/769344359)) +- 修复表的内存过度使用 `Buffer` 引擎 [\#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +- 修正了可以采取的功能中的潜在错误 `NULL` 作为参数之一,并返回非NULL。 [\#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在线程池中更好地计算后台进程的指标 `MergeTree` 表引擎. [\#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复功能 `IN` 里面 `WHERE` 存在行级表筛选器时的语句。 修复 [\#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [\#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([伊万](https://github.com/abyss7)) +- 现在,如果整数值没有完全解析设置值,则会引发异常。 [\#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 修复当聚合函数用于查询具有两个以上本地分片的分布式表时出现的异常。 [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- 现在,bloom filter可以处理零长度数组,并且不执行冗余计算。 [\#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +- 修正了通过匹配客户端主机来检查客户端主机是否允许 `host_regexp` 在指定 `users.xml`. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 放松不明确的列检查,导致多个误报 `JOIN ON` 科。 [\#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +- 修正了可能的服务器崩溃 (`std::terminate`)当服务器不能发送或写入数据 `JSON` 或 `XML` 格式与值 `String` 数据类型(需要 `UTF-8` 验证)或使用Brotli算法或其他一些罕见情况下压缩结果数据时。 这修复 [\#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复竞争条件 `StorageDistributedDirectoryMonitor` 被线人发现 这修复 [\#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [\#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在背景合并 `*MergeTree` 表引擎家族更准确地保留存储策略卷顺序。 [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在表引擎 `Kafka` 与正常工作 `Native` 格式。 这修复 [\#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [\#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [\#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 固定格式与标题(如 `CSVWithNames`)这是抛出关于EOF表引擎的异常 `Kafka`. [\#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +- 修复了从子查询右侧部分制作set的错误 `IN` 科。 这修复 [\#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) 和 [\#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [\#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 从存储读取时修复可能的崩溃 `File`. [\#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 在固定的文件读取 `Parquet` 包含类型列的格式 `list`. [\#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([马苏兰](https://github.com/maxulan)) +- 修复错误 `Not found column` 对于分布式查询 `PREWHERE` 条件取决于采样键if `max_parallel_replicas > 1`. [\#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复错误 `Not found column` 如果使用查询 `PREWHERE` 依赖于表的别名,结果集由于主键条件而为空。 [\#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 函数的固定返回类型 `rand` 和 `randConstant` 在情况下 `Nullable` 争论。 现在函数总是返回 `UInt32` 而且从来没有 `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 禁用谓词下推 `WITH FILL` 表达。 这修复 [\#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [\#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([张冬](https://github.com/zhang2014)) +- 修正错误 `count()` 结果 `SummingMergeTree` 当 `FINAL` 部分被使用。 [\#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [\#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 修复来自远程服务器的常量函数可能不正确的结果。 它发生在具有以下功能的查询中 `version()`, `uptime()` 等。 它为不同的服务器返回不同的常量值。 这修复 [\#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [\#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复下推谓词优化中导致错误结果的复杂错误。 这解决了下推谓词优化的很多问题。 [\#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([张冬](https://github.com/zhang2014)) +- 修复崩溃 `CREATE TABLE .. AS dictionary` 查询。 [\#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +- 一些改进ClickHouse语法 `.g4` 文件 [\#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([太阳里](https://github.com/taiyang-li)) +- 修复导致崩溃的错误 `JOIN`s与表与发动机 `Join`. 这修复 [\#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [\#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [\#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [\#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [\#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +- 修复冗余字典重新加载 `CREATE DATABASE`. [\#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +- 限制从读取流的最大数量 `StorageFile` 和 `StorageHDFS`. 修复https://github.com/ClickHouse/ClickHouse/issues/7650. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([阿利沙平](https://github.com/alesapin)) +- 修复bug `ALTER ... MODIFY ... CODEC` 查询,当用户同时指定默认表达式和编解ec。 修复 [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [\#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([阿利沙平](https://github.com/alesapin)) +- 修复列的后台合并错误 `SimpleAggregateFunction(LowCardinality)` 类型。 [\#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 固定类型签入功能 `toDateTime64`. [\#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 现在服务器不崩溃 `LEFT` 或 `FULL JOIN` 与和加入引擎和不支持 `join_use_nulls` 设置。 [\#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +- 现在 `DROP DICTIONARY IF EXISTS db.dict` 查询不会抛出异常,如果 `db` 根本不存在 [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 修复表函数中可能出现的崩溃 (`file`, `mysql`, `remote`)引用删除引起的 `IStorage` 对象。 修复插入表函数时指定的列的不正确解析。 [\#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +- 确保网络启动前 `clickhouse-server`. 这修复 [\#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [\#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([余志昌](https://github.com/yuzhichang)) +- 修复安全连接的超时处理,因此查询不会无限挂起。 这修复 [\#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [\#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `clickhouse-copier`并发工人之间的冗余争用。 [\#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([丁香飞](https://github.com/dingxiangfei2009)) +- 现在突变不会跳过附加的部分,即使它们的突变版本比当前的突变版本大。 [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([余志昌](https://github.com/yuzhichang)) [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([阿利沙平](https://github.com/alesapin)) +- 忽略冗余副本 `*MergeTree` 数据部分移动到另一个磁盘和服务器重新启动后。 [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复崩溃 `FULL JOIN` 与 `LowCardinality` 在 `JOIN` 钥匙 [\#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +- 禁止在插入查询中多次使用列名,如 `INSERT INTO tbl (x, y, x)`. 这修复 [\#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [\#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [\#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([阿利沙平](https://github.com/alesapin)) +- 增加了回退,用于检测未知Cpu的物理CPU内核数量(使用逻辑CPU内核数量)。 这修复 [\#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [\#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `There's no column` 实例化列和别名列出错。 [\#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定切断崩溃时 `EXISTS` 查询没有使用 `TABLE` 或 `DICTIONARY` 预选赛 就像 `EXISTS t`. 这修复 [\#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). 此错误在版本19.17中引入。 [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复罕见错误 `"Sizes of columns doesn't match"` 使用时可能会出现 `SimpleAggregateFunction` 列。 [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +- 修正错误,其中用户空 `allow_databases` 可以访问所有数据库(和相同的 `allow_dictionaries`). [\#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +- 修复客户端崩溃时,服务器已经从客户端断开连接。 [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `ORDER BY` 在按主键前缀和非主键后缀排序的情况下的行为。 [\#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([安东\*波波夫](https://github.com/CurtizJ)) +- 检查表中是否存在合格列。 这修复 [\#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [\#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +- 固定行为 `ALTER MOVE` 合并完成后立即运行移动指定的超部分。 修复 [\#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 使用时修复可能的服务器崩溃 `UNION` 具有不同数量的列。 修复 [\#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [\#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复函数结果子字符串的大小 `substr` 负大小。 [\#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在服务器不执行部分突变 `MergeTree` 如果后台池中没有足够的可用线程。 [\#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +- 修复格式化时的小错字 `UNION ALL` AST. [\#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +- 修正了负数不正确的布隆过滤结果。 这修复 [\#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [\#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([张冬](https://github.com/zhang2014)) +- 在解压缩固定潜在的缓冲区溢出。 恶意用户可以传递捏造的压缩数据,这将导致缓冲区后读取。 这个问题是由Yandex信息安全团队的Eldar Zaitov发现的。 [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复因整数溢出而导致的错误结果 `arrayIntersect`. [\#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在 `OPTIMIZE TABLE` query不会等待脱机副本执行该操作。 [\#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +- 固定 `ALTER TTL` 解析器 `Replicated*MergeTree` 桌子 [\#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复服务器和客户端之间的通信,以便服务器在查询失败后读取临时表信息。 [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +- 修复 `bitmapAnd` 在聚合位图和标量位图相交时出现函数错误。 [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([黄月](https://github.com/moon03432)) +- 完善的定义 `ZXid` 根据动物园管理员的程序员指南,它修复了错误 `clickhouse-cluster-copier`. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([丁香飞](https://github.com/dingxiangfei2009)) +- `odbc` 表函数现在尊重 `external_table_functions_use_nulls` 设置。 [\#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了导致罕见的数据竞赛的错误。 [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 现在 `SYSTEM RELOAD DICTIONARY` 完全重新加载字典,忽略 `update_field`. 这修复 [\#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [\#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 添加检查字典是否存在于创建查询的能力。 [\#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([阿利沙平](https://github.com/alesapin)) +- 修复 `Float*` 解析中 `Values` 格式。 这修复 [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +- 修复崩溃时,我们不能在一些后台操作保留空间 `*MergeTree` 表引擎家族. [\#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复表包含合并操作时的崩溃 `SimpleAggregateFunction(LowCardinality)` 列。 这修复 [\#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [\#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +- 恢复对所有ICU区域设置的支持,并添加对常量表达式应用排序规则的功能。 还添加语言名称 `system.collations` 桌子 [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([阿利沙平](https://github.com/alesapin)) +- 修正错误时,外部字典与零最小寿命 (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`)不要在后台更新。 [\#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([阿利沙平](https://github.com/alesapin)) +- 修复当clickhouse源外部字典在查询中有子查询时崩溃。 [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 修复文件扩展名不正确的解析表与引擎 `URL`. 这修复 [\#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [\#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([安德烈\*博德罗夫](https://github.com/apbodrov)) +- 修复 `CHECK TABLE` 查询为 `*MergeTree` 表没有关键. 修复 [\#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([阿利沙平](https://github.com/alesapin)) +- 固定转换 `Float64` 到MySQL类型。 [\#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在,如果表没有完全删除,因为服务器崩溃,服务器将尝试恢复并加载它。 [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +- 修复了表函数中的崩溃 `file` 同时插入到不存在的文件。 现在在这种情况下,文件将被创建,然后插入将被处理。 [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +- 修复罕见的死锁时,可能发生 `trace_log` 处于启用状态。 [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +- 添加能力与不同类型的工作,除了 `Date` 在 `RangeHashed` 从DDL查询创建的外部字典。 修复 [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([阿利沙平](https://github.com/alesapin)) +- 修复崩溃时 `now64()` 用另一个函数的结果调用。 [\#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 修正了通过mysql有线协议检测客户端IP连接的错误。 [\#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +- 修复空阵列处理 `arraySplit` 功能。 这修复 [\#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [\#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +- 修复了以下问题 `pid-file` 另一个运行 `clickhouse-server` 可能会被删除。 [\#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([徐伟清](https://github.com/weiqxu)) +- 修复字典重新加载,如果它有 `invalidate_query`,停止更新,并在以前的更新尝试一些异常。 [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([阿利沙平](https://github.com/alesapin)) +- 修正了功能错误 `arrayReduce` 这可能会导致 “double free” 和聚合函数组合器中的错误 `Resample` 这可能会导致内存泄漏。 添加聚合功能 `aggThrow`. 此功能可用于测试目的。 [\#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 改进 {#improvement-1} + +- 改进了使用时的日志记录 `S3` 表引擎。 [\#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 在调用时未传递任何参数时打印帮助消息 `clickhouse-local`. 这修复 [\#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [\#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([安德烈\*纳戈尔尼](https://github.com/Melancholic)) +- 添加设置 `mutations_sync` 这允许等待 `ALTER UPDATE/DELETE` 同步查询。 [\#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([阿利沙平](https://github.com/alesapin)) +- 允许设置相对 `user_files_path` 在 `config.xml` (在类似的方式 `format_schema_path`). [\#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +- 为转换函数添加非法类型的异常 `-OrZero` 后缀 [\#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([安德烈\*科尼亚耶夫](https://github.com/akonyaev90)) +- 简化在分布式查询中发送到分片的数据头的格式。 [\#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- `Live View` 表引擎重构。 [\#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +- 为从DDL查询创建的外部字典添加额外的检查。 [\#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([阿利沙平](https://github.com/alesapin)) +- 修复错误 `Column ... already exists` 使用时 `FINAL` 和 `SAMPLE` together, e.g. `select count() from table final sample 1/2`. 修复 [\#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [\#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在表的第一个参数 `joinGet` 函数可以是表标识符。 [\#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([阿莫斯鸟](https://github.com/amosbird)) +- 允许使用 `MaterializedView` 与上面的子查询 `Kafka` 桌子 [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +- 现在后台在磁盘之间移动,运行它的seprate线程池。 [\#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +- `SYSTEM RELOAD DICTIONARY` 现在同步执行。 [\#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 堆栈跟踪现在显示物理地址(对象文件中的偏移量),而不是虚拟内存地址(加载对象文件的位置)。 这允许使用 `addr2line` 当二进制独立于位置并且ASLR处于活动状态时。 这修复 [\#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [\#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 支持行级安全筛选器的新语法: `
    `. 修复 [\#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [\#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([伊万](https://github.com/abyss7)) +- 现在 `cityHash` 功能可以与工作 `Decimal` 和 `UUID` 类型。 修复 [\#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [\#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([米哈伊尔\*科罗托夫](https://github.com/millb)) +- 从系统日志中删除了固定的索引粒度(它是1024),因为它在实现自适应粒度之后已经过时。 [\#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 当ClickHouse在没有SSL的情况下编译时,启用MySQL兼容服务器。 [\#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 现在服务器校验和分布式批处理,这在批处理中损坏数据的情况下提供了更多详细的错误。 [\#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +- 碌莽禄Support: `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` 和 `ATTACH TABLE` 为 `MySQL` 数据库引擎。 [\#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([张冬](https://github.com/zhang2014)) +- 在S3表功能和表引擎中添加身份验证。 [\#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 增加了检查额外的部分 `MergeTree` 在不同的磁盘上,为了不允许错过未定义磁盘上的数据部分。 [\#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 启用Mac客户端和服务器的SSL支持。 [\#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([伊万](https://github.com/abyss7)) +- 现在ClickHouse可以作为MySQL联合服务器(参见https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html)。 [\#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +- `clickhouse-client` 现在只能启用 `bracketed-paste` 当多查询处于打开状态且多行处于关闭状态时。 这修复(#7757)\[https://github.com/ClickHouse/ClickHouse/issues/7757。 [\#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([阿莫斯鸟](https://github.com/amosbird)) +- 碌莽禄Support: `Array(Decimal)` 在 `if` 功能。 [\#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +- 支持小数 `arrayDifference`, `arrayCumSum` 和 `arrayCumSumNegative` 功能。 [\#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +- 已添加 `lifetime` 列到 `system.dictionaries` 桌子 [\#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [\#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +- 改进了检查不同磁盘上的现有部件 `*MergeTree` 表引擎. 地址 [\#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [\#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 集成与 `AWS SDK` 为 `S3` 交互允许使用开箱即用的所有S3功能。 [\#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 增加了对子查询的支持 `Live View` 桌子 [\#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +- 检查使用 `Date` 或 `DateTime` 从列 `TTL` 表达式已删除。 [\#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 有关磁盘的信息已添加到 `system.detached_parts` 桌子 [\#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 现在设置 `max_(table|partition)_size_to_drop` 无需重新启动即可更改。 [\#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +- 错误消息的可用性略好。 要求用户不要删除下面的行 `Stack trace:`. [\#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地阅读消息 `Kafka` 引擎在各种格式后 [\#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [\#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([伊万](https://github.com/abyss7)) +- 与不支持MySQL客户端更好的兼容性 `sha2_password` 验证插件。 [\#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 支持MySQL兼容性服务器中的更多列类型。 [\#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([尤里\*巴拉诺夫](https://github.com/yurriy)) +- 执行 `ORDER BY` 优化 `Merge`, `Buffer` 和 `Materilized View` 存储与底层 `MergeTree` 桌子 [\#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([安东\*波波夫](https://github.com/CurtizJ)) +- 现在我们总是使用POSIX实现 `getrandom` 与旧内核更好的兼容性(\<3.17)。 [\#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([阿莫斯鸟](https://github.com/amosbird)) +- 更好地检查移动ttl规则中的有效目标。 [\#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 更好地检查损坏的刀片批次 `Distributed` 表引擎。 [\#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +- 添加带有部件名称数组的列,这些部件将来必须处理突变 `system.mutations` 桌子 [\#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([阿利沙平](https://github.com/alesapin)) +- 处理器的并行合并排序优化。 [\#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 设置 `mark_cache_min_lifetime` 现在已经过时了,什么也不做。 在以前的版本中,标记缓存可以在内存中增长大于 `mark_cache_size` 以容纳内的数据 `mark_cache_min_lifetime` 秒。 这导致了混乱和比预期更高的内存使用率,这在内存受限的系统上尤其糟糕。 如果您在安装此版本后会看到性能下降,则应增加 `mark_cache_size`. [\#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 准备使用 `tid` 到处都是 这是必要的 [\#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [\#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +#### 性能改进 {#performance-improvement-1} + +- 处理器管道中的性能优化。 [\#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 缓存字典中过期密钥的非阻塞更新(具有读取旧密钥的权限)。 [\#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 没有编译ClickHouse `-fno-omit-frame-pointer` 在全球范围内多余一个寄存器。 [\#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([阿莫斯鸟](https://github.com/amosbird)) +- 加速 `greatCircleDistance` 功能,并为它添加性能测试。 [\#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +- 改进的功能性能 `roundDown`. [\#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `max`, `min`, `argMin`, `argMax` 为 `DateTime64` 数据类型。 [\#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进了无限制或大限制和外部排序的排序性能。 [\#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能格式化浮点数高达6倍。 [\#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 改进的性能 `modulo` 功能。 [\#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([阿莫斯鸟](https://github.com/amosbird)) +- 优化 `ORDER BY` 并与单列键合并。 [\#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更好地实施 `arrayReduce`, `-Array` 和 `-State` 组合子 [\#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在 `PREWHERE` 应优化为至少一样高效 `WHERE`. [\#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([阿莫斯鸟](https://github.com/amosbird)) +- 改进方式 `round` 和 `roundBankers` 处理负数。 [\#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +- 改进的解码性能 `DoubleDelta` 和 `Gorilla` 编解码器大约30-40%。 这修复 [\#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [\#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 改进的性能 `base64` 相关功能。 [\#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 增加了一个功能 `geoDistance`. 它类似于 `greatCircleDistance` 但使用近似于WGS-84椭球模型。 两个功能的性能几乎相同。 [\#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更快 `min` 和 `max` 聚合函数 `Decimal` 数据类型。 [\#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +- 矢量化处理 `arrayReduce`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([阿莫斯鸟](https://github.com/amosbird)) +- `if` 链现在优化为 `multiIf`. [\#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +- 修复性能回归 `Kafka` 表引擎在19.15中引入。 这修复 [\#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [\#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +- 已删除 “pie” 代码生成 `gcc` 从Debian软件包偶尔带来默认情况下。 [\#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 并行解析数据格式 [\#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([尼基塔\*米哈伊洛夫](https://github.com/nikitamikhaylov)) +- 启用优化的解析器 `Values` 默认使用表达式 (`input_format_values_deduce_templates_of_expressions=1`). [\#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### 构建/测试/包装改进 {#buildtestingpackaging-improvement-2} + +- 构建修复 `ARM` 而在最小模式。 [\#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +- 添加复盖文件刷新 `clickhouse-server` 当不调用std::atexit时。 还略微改进了无状态测试的复盖率日志记录。 [\#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([阿利沙平](https://github.com/alesapin)) +- 更新contrib中的LLVM库。 避免从操作系统包中使用LLVM。 [\#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使bund绑 `curl` 建立完全安静。 [\#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [\#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 修复一些 `MemorySanitizer` 警告。 [\#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使用 `add_warning` 和 `no_warning` 宏 `CMakeLists.txt`. [\#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([伊万](https://github.com/abyss7)) +- 添加对Minio S3兼容对象的支持(https://min.io/)为了更好的集成测试。 [\#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [\#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([帕维尔\*科瓦连科](https://github.com/Jokser)) +- 导入 `libc` 标题到contrib。 它允许在各种系统中使构建更加一致(仅适用于 `x86_64-linux-gnu`). [\#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除 `-fPIC` 从一些图书馆。 [\#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 清洁 `CMakeLists.txt` 对于卷曲。 看https://github.com/ClickHouse/ClickHouse/pull/8011\#issuecomment-569478910 [\#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 无声警告 `CapNProto` 图书馆. [\#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 为短字符串优化哈希表添加性能测试。 [\#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([阿莫斯鸟](https://github.com/amosbird)) +- 现在ClickHouse将建立在 `AArch64` 即使 `MADV_FREE` 不可用。 这修复 [\#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [\#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([阿莫斯鸟](https://github.com/amosbird)) +- 更新 `zlib-ng` 来解决记忆消毒的问题 [\#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [\#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在非Linux系统上启用内部MySQL库,因为操作系统包的使用非常脆弱,通常根本不起作用。 这修复 [\#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [\#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复了启用后在某些系统上构建的问题 `libc++`. 这取代了 [\#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [\#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `Field` 方法更类型安全,以找到更多的错误。 [\#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [\#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 添加丢失的文件到 `libc-headers` 子模块。 [\#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复错误 `JSON` 引用性能测试输出。 [\#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 现在堆栈跟踪显示 `std::exception` 和 `Poco::Exception`. 在以前的版本中,它仅适用于 `DB::Exception`. 这改进了诊断。 [\#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 移植 `clock_gettime` 和 `clock_nanosleep` 对于新鲜的glibc版本。 [\#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([阿莫斯鸟](https://github.com/amosbird)) +- 启用 `part_log` 在示例配置开发人员。 [\#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复重新加载的异步性质 `01036_no_superfluous_dict_reload_on_create_database*`. [\#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +- 固定编解码器性能测试。 [\#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 添加安装脚本 `.tgz` 为他们构建和文档。 [\#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [\#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([阿利沙平](https://github.com/alesapin)) +- 删除旧 `ZSTD` 测试(它是在2016年创建的,以重现zstd1.0版本之前的错误)。 这修复 [\#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [\#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 固定构建在Mac OS卡特琳娜。 [\#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +- 增加编解码器性能测试中的行数,以使结果显着。 [\#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- 在调试版本中,处理 `LOGICAL_ERROR` 异常作为断言失败,使得它们更容易被注意到。 [\#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 使与格式相关的性能测试更具确定性。 [\#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `lz4` 来修复记忆消毒器的故障 [\#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 在异常处理中抑制已知MemorySanitizer误报。 [\#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 更新 `gcc` 和 `g++` 到版本9在 `build/docker/build.sh` [\#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +- 添加性能测试用例来测试 `PREWHERE` 比 `WHERE`. [\#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([阿莫斯鸟](https://github.com/amosbird)) +- 在修复一个笨拙的测试方面取得了进展。 [\#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 避免从MemorySanitizer报告数据 `libunwind`. [\#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 更新 `libc++` 到最新版本。 [\#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从源头构建ICU库。 这修复 [\#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [\#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 从切换 `libressl` 到 `openssl`. ClickHouse应在此更改后支持TLS1.3和SNI。 这修复 [\#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [\#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用时固定的UBSan报告 `chacha20_poly1305` 从SSL(发生在连接到https://yandex.ru/)。 [\#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复默认密码文件的模式 `.deb` linux发行版。 [\#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +- 改进的表达式获取 `clickhouse-server` PID输入 `clickhouse-test`. [\#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) +- 更新contrib/googletest到v1.10.0。 [\#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +- 修复了ThreadSaninitizer报告 `base64` 图书馆. 还将此库更新到最新版本,但无关紧要。 这修复 [\#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [\#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 修复 `00600_replace_running_query` 对于处理器。 [\#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 删除支持 `tcmalloc` 为了使 `CMakeLists.txt` 更简单 [\#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 发布海湾合作委员会构建现在使用 `libc++` 而不是 `libstdc++`. 最近 `libc++` 只与叮当一起使用。 这将提高构建配置的一致性和可移植性。 [\#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 使用MemorySanitizer启用ICU库进行构建。 [\#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 禁止从警告 `CapNProto` 图书馆. [\#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 删除代码的特殊情况 `tcmalloc`,因为它不再受支持。 [\#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI coverage任务中,优雅地终止服务器以允许它保存coverage报告。 这修复了我们最近看到的不完整的复盖率报告。 [\#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([阿利沙平](https://github.com/alesapin)) +- 针对所有编解码器的性能测试 `Float64` 和 `UInt64` 值。 [\#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([瓦西里\*内姆科夫](https://github.com/Enmk)) +- `termcap` 非常不推荐使用,并导致各种问题(f.g.missing “up” 帽和呼应 `^J` 而不是多行)。 帮个忙 `terminfo` 或bund绑 `ncurses`. [\#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([阿莫斯鸟](https://github.com/amosbird)) +- 修复 `test_storage_s3` 集成测试。 [\#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 碌莽禄Support: `StorageFile(, null)` 将块插入给定格式的文件而不实际写入磁盘。 这是性能测试所必需的。 [\#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([阿莫斯鸟](https://github.com/amosbird)) +- 添加参数 `--print-time` 功能测试打印每个测试的执行时间。 [\#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 添加断言 `KeyCondition` 同时评估RPN。 这将修复来自gcc-9的警告。 [\#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 在CI构建中转储cmake选项。 [\#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 不要为某些fat库生成调试信息。 [\#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 赂眉露\>\> `log_to_console.xml` 始终登录到stderr,无论它是否交互。 [\#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +- 删除了一些未使用的功能 `clickhouse-performance-test` 工具 [\#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 现在我们也将搜索 `lld-X` 与相应的 `clang-X` 版本。 [\#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([阿利沙平](https://github.com/alesapin)) +- 实木复合地板建设改善。 [\#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([马苏兰](https://github.com/maxulan)) +- 更多海湾合作委员会警告 [\#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Arch Linux的软件包现在允许运行ClickHouse服务器,而不仅仅是客户端。 [\#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +- 修复与处理器的测试。 微小的性能修复。 [\#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) +- 更新contrib/protobuf。 [\#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V.Kornilov](https://github.com/matwey)) +- 在准备切换到c++20作为新年庆祝活动。 “May the C++ force be with ClickHouse.” [\#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([阿莫斯鸟](https://github.com/amosbird)) + +#### 实验特点 {#experimental-feature-1} + +- 增加了实验设置 `min_bytes_to_use_mmap_io`. 它允许读取大文件,而无需将数据从内核复制到用户空间。 默认情况下禁用该设置。 建议的阈值大约是64MB,因为mmap/munmap很慢。 [\#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- 返工配额作为访问控制系统的一部分。 增加了新表 `system.quotas`,新功能 `currentQuota`, `currentQuotaKey`,新的SQL语法 `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [\#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 允许跳过带有警告的未知设置,而不是引发异常。 [\#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) +- 重新设计的行策略作为访问控制系统的一部分。 增加了新表 `system.row_policies`,新功能 `currentRowPolicies()`,新的SQL语法 `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [\#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([维塔利\*巴拉诺夫](https://github.com/vitlibar)) + +#### 安全修复 {#security-fix} + +- 修正了读取目录结构中的表的可能性 `File` 表引擎。 这修复 [\#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [\#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) + +## [更新日志2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) {#changelog-for-2019} diff --git a/docs/zh/whats_new/index.md b/docs/zh/whats_new/index.md new file mode 100644 index 00000000000..75a13a72bac --- /dev/null +++ b/docs/zh/whats_new/index.md @@ -0,0 +1,8 @@ +--- +machine_translated: true +machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 +toc_folder_title: "\u65B0\u589E\u5185\u5BB9" +toc_priority: 72 +--- + + diff --git a/docs/zh/roadmap.md b/docs/zh/whats_new/roadmap.md similarity index 99% rename from docs/zh/roadmap.md rename to docs/zh/whats_new/roadmap.md index 49532c046f5..47e09bc3c78 100644 --- a/docs/zh/roadmap.md +++ b/docs/zh/whats_new/roadmap.md @@ -1,3 +1,4 @@ + # 规划 {#gui-hua} ## Q1 2020 {#q1-2020} diff --git a/docs/zh/security_changelog.md b/docs/zh/whats_new/security_changelog.md similarity index 99% rename from docs/zh/security_changelog.md rename to docs/zh/whats_new/security_changelog.md index e35d6a7c632..6315398371f 100644 --- a/docs/zh/security_changelog.md +++ b/docs/zh/whats_new/security_changelog.md @@ -1,3 +1,4 @@ + ## 修复于 ClickHouse Release 18.12.13, 2018-09-10 {#xiu-fu-yu-clickhouse-release-18-12-13-2018-09-10} ### CVE-2018-14672 {#cve-2018-14672} From b06801a2c098ca734d51566c6d7748b44d8142d1 Mon Sep 17 00:00:00 2001 From: Sergei Shtykov Date: Wed, 8 Apr 2020 18:09:40 +0300 Subject: [PATCH 205/484] CLICKHOUSEDOCS-475: Links fixed. --- docs/en/operations/system_tables.md | 44 ++++++++++++++--------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index b8971108eba..60d13b939fb 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -149,36 +149,36 @@ Contains information about detached parts of [MergeTree](../engines/table_engine ## system.dictionaries {#system_tables-dictionaries} -Contains information about [external dictionaries](../query_language/dicts/external_dicts.md). +Contains information about [external dictionaries](../sql_reference/dictionaries/external_dictionaries/external_dicts.md). Columns: -- `database` ([String](../data_types/string.md)) — Database name where the dictionary is located. Only for dictionaries created by DDL query, for others is always an empty string. -- `name` ([String](../data_types/string.md)) — [Dictionary name](../query_language/dicts/external_dicts_dict.md). -- `status` ([Enum8](../data_types/enum.md)) — Dictionary status. Possible values: +- `database` ([String](../sql_reference/data_types/string.md)) — Database name where the dictionary is located. Only for dictionaries created by DDL query, for others is always an empty string. +- `name` ([String](../sql_reference/data_types/string.md)) — [Dictionary name](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). +- `status` ([Enum8](../sql_reference/data_types/enum.md)) — Dictionary status. Possible values: - `NOT_LOADED` — Dictionary was not loaded because it was not used. - `LOADED` — Dictionary loaded successfully. - `FAILED` — Unable to load the dictionary as a result of an error. - `LOADING` — Dictionary is loading now. - - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../query_language/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed). + - `LOADED_AND_RELOADING` — Dictionary is loaded successfully, and is being reloaded right now (frequent reasons: [SYSTEM RELOAD DICTIONARY](../sql_reference/statements/system.md#query_language-system-reload-dictionary) query, timeout, dictionary config has changed). - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. -- `origin` ([String](../data_types/string.md)) — Path to the configuration file that describes the dictionary. -- `type` ([String](../data_types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../query_language/dicts/external_dicts_dict_layout.md). -- `key` — [Key type](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../data_types/int_uint.md#uint-ranges)) or Сomposite key ([String](../data_types/string.md)) — form "(type 1, type 2, ..., type n)". -- `attribute.names` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Array of [attribute names](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes) provided by the dictionary. -- `attribute.types` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Corresponding array of [attribute types](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes) that are provided by the dictionary. -- `bytes_allocated` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. -- `query_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. -- `hit_rate` ([Float64](../data_types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. -- `element_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Number of items stored in the dictionary. -- `load_factor` ([Float64](../data_types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). -- `source` ([String](../data_types/string.md)) — Text describing the [data source](../query_language/dicts/external_dicts_dict_sources.md) for the dictionary. -- `lifetime_min` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Minimum [lifetime](../query_language/dicts/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. -- `lifetime_max` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Maximum [lifetime](../query_language/dicts/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. -- `loading_start_time` ([DateTime](../data_types/datetime.md)) — Start time for loading the dictionary. -- `last_successful_update_time` ([DateTime](../data_types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. -- `loading_duration` ([Float32](../data_types/float.md)) — Duration of a dictionary loading. -- `last_exception` ([String](../data_types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. +- `origin` ([String](../sql_reference/data_types/string.md)) — Path to the configuration file that describes the dictionary. +- `type` ([String](../sql_reference/data_types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md). +- `key` — [Key type](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) or Сomposite key ([String](../sql_reference/data_types/string.md)) — form "(type 1, type 2, ..., type n)". +- `attribute.names` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Array of [attribute names](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes) provided by the dictionary. +- `attribute.types` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Corresponding array of [attribute types](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes) that are provided by the dictionary. +- `bytes_allocated` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. +- `query_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. +- `hit_rate` ([Float64](../sql_reference/data_types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. +- `element_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Number of items stored in the dictionary. +- `load_factor` ([Float64](../sql_reference/data_types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table). +- `source` ([String](../sql_reference/data_types/string.md)) — Text describing the [data source](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) for the dictionary. +- `lifetime_min` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Minimum [lifetime](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `lifetime_max` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Maximum [lifetime](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds. +- `loading_start_time` ([DateTime](../sql_reference/data_types/datetime.md)) — Start time for loading the dictionary. +- `last_successful_update_time` ([DateTime](../sql_reference/data_types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. +- `loading_duration` ([Float32](../sql_reference/data_types/float.md)) — Duration of a dictionary loading. +- `last_exception` ([String](../sql_reference/data_types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn't be created. **Example** From c8c4dc8104a0b9c0e59457885b0dfc6c5a09994f Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 8 Apr 2020 19:20:52 +0300 Subject: [PATCH 206/484] fix 'ALTER MODIFY COLUMN' with compact parts --- src/Storages/MergeTree/IMergeTreeReader.cpp | 24 ++++++++++--- src/Storages/MergeTree/IMergeTreeReader.h | 11 ++++-- .../MergeTree/MergeTreeReaderCompact.cpp | 20 ++--------- .../MergeTree/MergeTreeReaderWide.cpp | 36 ++----------------- 4 files changed, 34 insertions(+), 57 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index 4978aeaaa58..8243983d837 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -32,6 +32,8 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_ , all_mark_ranges(all_mark_ranges_) , alter_conversions(storage.getAlterConversionsForPart(data_part)) { + for (const NameAndTypePair & column_from_part : data_part->getColumns()) + columns_from_part[column_from_part.name] = column_from_part.type; } IMergeTreeReader::~IMergeTreeReader() = default; @@ -183,6 +185,23 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns } } +NameAndTypePair IMergeTreeReader::getColumnFromPart(const NameAndTypePair & required_column) const +{ + auto it = columns_from_part.find(required_column.name); + if (it != columns_from_part.end()) + return {it->first, it->second}; + + if (alter_conversions.isColumnRenamed(required_column.name)) + { + String old_name = alter_conversions.getColumnOldName(required_column.name); + it = columns_from_part.find(old_name); + if (it != columns_from_part.end()) + return {it->first, it->second}; + } + + return required_column; +} + void IMergeTreeReader::performRequiredConversions(Columns & res_columns) { try @@ -209,10 +228,7 @@ void IMergeTreeReader::performRequiredConversions(Columns & res_columns) if (res_columns[pos] == nullptr) continue; - if (columns_from_part.count(name_and_type->name)) - copy_block.insert({res_columns[pos], columns_from_part[name_and_type->name], name_and_type->name}); - else - copy_block.insert({res_columns[pos], name_and_type->type, name_and_type->name}); + copy_block.insert({res_columns[pos], getColumnFromPart(*name_and_type).type, name_and_type->name}); } DB::performRequiredConversions(copy_block, columns, storage.global_context); diff --git a/src/Storages/MergeTree/IMergeTreeReader.h b/src/Storages/MergeTree/IMergeTreeReader.h index 622e11dae8b..02d8f67f9d0 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.h +++ b/src/Storages/MergeTree/IMergeTreeReader.h @@ -4,7 +4,6 @@ #include #include - namespace DB { @@ -59,6 +58,9 @@ public: MergeTreeData::DataPartPtr data_part; protected: + /// Returns actual column type in part, which can differ from table metadata. + NameAndTypePair getColumnFromPart(const NameAndTypePair & required_column) const; + /// avg_value_size_hints are used to reduce the number of reallocations when creating columns of variable size. ValueSizeMap avg_value_size_hints; /// Stores states for IDataType::deserializeBinaryBulk @@ -67,8 +69,6 @@ protected: /// Columns that are read. NamesAndTypesList columns; - std::unordered_map columns_from_part; - UncompressedCache * uncompressed_cache; MarkCache * mark_cache; @@ -78,8 +78,13 @@ protected: MarkRanges all_mark_ranges; friend class MergeTreeRangeReader::DelayedStream; + +private: /// Alter conversions, which must be applied on fly if required MergeTreeData::AlterConversions alter_conversions; + + /// Actual data type of columns in part + std::unordered_map columns_from_part; }; } diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index e4f7275f4a5..a895149e12e 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -78,15 +78,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( auto name_and_type = columns.begin(); for (size_t i = 0; i < columns_num; ++i, ++name_and_type) { - const auto & [name, type] = *name_and_type; + const auto & [name, type] = getColumnFromPart(*name_and_type); auto position = data_part->getColumnPosition(name); - if (!position && alter_conversions.isColumnRenamed(name)) - { - String old_name = alter_conversions.getColumnOldName(name); - position = data_part->getColumnPosition(old_name); - } - if (!position && typeid_cast(type.get())) { /// If array of Nested column is missing in part, @@ -118,7 +112,7 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading, bool append = res_columns[i] != nullptr; if (!append) - res_columns[i] = column_it->type->createColumn(); + res_columns[i] = getColumnFromPart(*column_it).type->createColumn(); mutable_columns[i] = res_columns[i]->assumeMutable(); } @@ -132,15 +126,7 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading, if (!res_columns[pos]) continue; - auto [name, type] = *name_and_type; - - if (alter_conversions.isColumnRenamed(name)) - { - String old_name = alter_conversions.getColumnOldName(name); - if (!data_part->getColumnPosition(name) && data_part->getColumnPosition(old_name)) - name = old_name; - } - + auto [name, type] = getColumnFromPart(*name_and_type); auto & column = mutable_columns[pos]; try diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index ad676b4db03..1a03acb5758 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -41,28 +41,10 @@ MergeTreeReaderWide::MergeTreeReaderWide( { try { - for (const NameAndTypePair & column_from_part : data_part->getColumns()) - columns_from_part[column_from_part.name] = column_from_part.type; - for (const NameAndTypePair & column : columns) { - if (columns_from_part.count(column.name)) - { - addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_); - } - else - { - if (alter_conversions.isColumnRenamed(column.name)) - { - String old_name = alter_conversions.getColumnOldName(column.name); - if (columns_from_part.count(old_name)) - addStreams(old_name, *columns_from_part[old_name], profile_callback_, clock_type_); - } - else - { - addStreams(column.name, *column.type, profile_callback_, clock_type_); - } - } + auto column_from_part = getColumnFromPart(column); + addStreams(column_from_part.name, *column_from_part.type, profile_callback_, clock_type_); } } catch (...) @@ -93,19 +75,7 @@ size_t MergeTreeReaderWide::readRows(size_t from_mark, bool continue_reading, si auto name_and_type = columns.begin(); for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type) { - String name = name_and_type->name; - if (alter_conversions.isColumnRenamed(name)) - { - String original_name = alter_conversions.getColumnOldName(name); - if (!columns_from_part.count(name) && columns_from_part.count(original_name)) - name = original_name; - } - - DataTypePtr type; - if (columns_from_part.count(name)) - type = columns_from_part[name]; - else - type = name_and_type->type; + auto [name, type] = getColumnFromPart(*name_and_type); /// The column is already present in the block so we will append the values to the end. bool append = res_columns[pos] != nullptr; From 1e3ec9113a77d44772336050b6fe8fd7d41b43c0 Mon Sep 17 00:00:00 2001 From: Ivan Lezhankin Date: Wed, 8 Apr 2020 19:29:08 +0300 Subject: [PATCH 207/484] Fix compiler possible values --- docker/packager/packager | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/packager/packager b/docker/packager/packager index 360a358c6e5..10b4c7e901c 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -214,7 +214,7 @@ if __name__ == "__main__": parser.add_argument("--clickhouse-repo-path", default="../../") parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-8", "clang-8-darwin", "clang-8-aarch64", "gcc-8", "gcc-9", "clang-9"), default="gcc-8") + parser.add_argument("--compiler", choices=("clang-8", "clang-8-darwin", "clang-9-aarch64", "clang-9-freebsd", "gcc-8", "gcc-9", "clang-9"), default="gcc-8") parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") parser.add_argument("--unbundled", action="store_true") parser.add_argument("--split-binary", action="store_true") From ae2a05d4143fa23586f684f129e58178b5b91160 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 8 Apr 2020 19:33:57 +0300 Subject: [PATCH 208/484] add test with 'ALTER MODIFY' and compact parts --- .../01114_alter_modify_compact_parts.reference | 1 + .../0_stateless/01114_alter_modify_compact_parts.sql | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 tests/queries/0_stateless/01114_alter_modify_compact_parts.reference create mode 100644 tests/queries/0_stateless/01114_alter_modify_compact_parts.sql diff --git a/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference b/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference new file mode 100644 index 00000000000..4ec38dfb475 --- /dev/null +++ b/tests/queries/0_stateless/01114_alter_modify_compact_parts.reference @@ -0,0 +1 @@ +999000 diff --git a/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql b/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql new file mode 100644 index 00000000000..a5aa12548e7 --- /dev/null +++ b/tests/queries/0_stateless/01114_alter_modify_compact_parts.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mt_compact; + +CREATE TABLE mt_compact (d Date, id UInt32, s String) + ENGINE = MergeTree ORDER BY id PARTITION BY d + SETTINGS min_bytes_for_wide_part = 10000000, index_granularity = 128; + +INSERT INTO mt_compact SELECT toDate('2020-01-05'), number, toString(number) FROM numbers(1000); +INSERT INTO mt_compact SELECT toDate('2020-01-06'), number, toString(number) FROM numbers(1000); +ALTER TABLE mt_compact MODIFY COLUMN s UInt64; +SELECT sum(s) from mt_compact; + +DROP TABLE IF EXISTS mt_compact; From 9ed708b9027d47b7d0bb8326cdf54dce36afebd7 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Tue, 7 Apr 2020 13:14:49 +0800 Subject: [PATCH 209/484] ISSUES-10056 add some check and support identifier argument for MySQL Database Engine --- src/Databases/DatabaseFactory.cpp | 34 ++++++++++++++++--- .../test_mysql_database_engine/test.py | 7 +++- 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 40e5682565d..b6300ab3482 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,28 @@ DatabasePtr DatabaseFactory::get( } } +template +static inline ValueType getLiteralValue(const ASTPtr & ast, const String & engine_name) +{ + if (!ast || !ast->as()) + throw Exception("Database engine " + engine_name + " requested literal argument.", ErrorCodes::BAD_ARGUMENTS); + + return ast->as()->value.safeGet(); +} + +[[maybe_unused]] static inline String getIdentifierOrStringLiteral(const ASTPtr & ast, const String & engine_name) +{ + if (ast) + { + if (const auto & literal = ast->as()) + return literal->value.safeGet(); + else if (const auto & identifier = ast->as()) + return identifier->name; + } + + throw Exception("Database engine " + engine_name + " requested literal or identifier argument.", ErrorCodes::BAD_ARGUMENTS); +} + DatabasePtr DatabaseFactory::getImpl( const String & database_name, const String & metadata_path, const ASTStorage * engine_define, Context & context) { @@ -79,11 +102,12 @@ DatabasePtr DatabaseFactory::getImpl( throw Exception("MySQL Database require mysql_hostname, mysql_database_name, mysql_username, mysql_password arguments.", ErrorCodes::BAD_ARGUMENTS); + const auto & arguments = engine->arguments->children; - const auto & host_name_and_port = arguments[0]->as()->value.safeGet(); - const auto & database_name_in_mysql = arguments[1]->as()->value.safeGet(); - const auto & mysql_user_name = arguments[2]->as()->value.safeGet(); - const auto & mysql_user_password = arguments[3]->as()->value.safeGet(); + const auto & host_name_and_port = getLiteralValue(arguments[0], "MySQL"); + const auto & database_name_in_mysql = getIdentifierOrStringLiteral(arguments[1], "MySQL"); + const auto & mysql_user_name = getLiteralValue(arguments[2], "MySQL"); + const auto & mysql_user_password = getLiteralValue(arguments[3], "MySQL"); try { @@ -114,7 +138,7 @@ DatabasePtr DatabaseFactory::getImpl( const auto & arguments = engine->arguments->children; - const auto cache_expiration_time_seconds = arguments[0]->as()->value.safeGet(); + const auto cache_expiration_time_seconds = getLiteralValue(arguments[0], "Lazy"); return std::make_shared(database_name, metadata_path, cache_expiration_time_seconds, context); } diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 86e0b9df5fd..42663e46752 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -92,7 +92,7 @@ def test_clickhouse_dml_for_mysql_database(started_cluster): with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `i``d` int(11) NOT NULL, PRIMARY KEY (`i``d`)) ENGINE=InnoDB;') - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', 'test_database', 'root', 'clickhouse')") + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', test_database, 'root', 'clickhouse')") assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '0' clickhouse_node.query("INSERT INTO `test_database`.`test_table`(`i\`d`) select number from numbers(10000)") @@ -120,3 +120,8 @@ def test_clickhouse_join_for_mysql_database(started_cluster): "LEFT JOIN default.t1_remote_mysql AS s_ref " "ON (s_ref.opco = s.opco AND s_ref.service = s.service)") == '' mysql_node.query("DROP DATABASE test") + +def test_bad_arguments_for_mysql_database_engine(started_cluster): + assert clickhouse_node.query( + "CREATE TABLE default.t1_remote_mysql AS mysql('mysql1:3306', 'test', 't1_mysql_local', root, 'clickhouse')").find( + 'Database engine MySQL requested literal argument.') != -1 From aa0fcf40886f06cd66711071d734198019895348 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Tue, 7 Apr 2020 20:25:01 +0800 Subject: [PATCH 210/484] ISSUES-10056 update docs --- docs/en/engines/database_engines/mysql.md | 4 ++-- docs/ru/database_engines/mysql.md | 4 +--- docs/zh/engines/database_engines/mysql.md | 4 +--- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/docs/en/engines/database_engines/mysql.md b/docs/en/engines/database_engines/mysql.md index 678c174e1fb..467a3aa032d 100644 --- a/docs/en/engines/database_engines/mysql.md +++ b/docs/en/engines/database_engines/mysql.md @@ -3,7 +3,7 @@ toc_priority: 30 toc_title: MySQL --- -# Mysql {#mysql} +# MySQL {#mysql} Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL. @@ -19,7 +19,7 @@ You cannot perform the following queries: ``` sql CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', 'database', 'user', 'password') +ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') ``` **Engine Parameters** diff --git a/docs/ru/database_engines/mysql.md b/docs/ru/database_engines/mysql.md index 420ca370297..45547407be6 100644 --- a/docs/ru/database_engines/mysql.md +++ b/docs/ru/database_engines/mysql.md @@ -6,8 +6,6 @@ Не поддерживаемые виды запросов: -- `ATTACH`/`DETACH` -- `DROP` - `RENAME` - `CREATE TABLE` - `ALTER` @@ -16,7 +14,7 @@ ``` sql CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', 'database', 'user', 'password') +ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') ``` **Параметры движка** diff --git a/docs/zh/engines/database_engines/mysql.md b/docs/zh/engines/database_engines/mysql.md index 78844154bce..80ff82ec2d3 100644 --- a/docs/zh/engines/database_engines/mysql.md +++ b/docs/zh/engines/database_engines/mysql.md @@ -7,8 +7,6 @@ MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并 但您无法对其执行以下操作: -- `ATTACH`/`DETACH` -- `DROP` - `RENAME` - `CREATE TABLE` - `ALTER` @@ -17,7 +15,7 @@ MySQL引擎用于将远程的MySQL服务器中的表映射到ClickHouse中,并 ``` sql CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] -ENGINE = MySQL('host:port', 'database', 'user', 'password') +ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') ``` **MySQL数据库引擎参数** From 9eb96b87db4ae5b1fba90640aa65205ad1fc8379 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Wed, 8 Apr 2020 13:41:11 +0800 Subject: [PATCH 211/484] ISSUES-10056 reused evaluateConstantExpressionOrIdentifierAsLiteral --- src/Databases/DatabaseFactory.cpp | 30 +++++++------------ .../test_mysql_database_engine/test.py | 18 +++++++---- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index b6300ab3482..f1cea04dc29 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -16,6 +16,7 @@ #if USE_MYSQL #include +#include #endif @@ -51,7 +52,7 @@ DatabasePtr DatabaseFactory::get( } template -static inline ValueType getLiteralValue(const ASTPtr & ast, const String & engine_name) +static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &engine_name) { if (!ast || !ast->as()) throw Exception("Database engine " + engine_name + " requested literal argument.", ErrorCodes::BAD_ARGUMENTS); @@ -59,19 +60,6 @@ static inline ValueType getLiteralValue(const ASTPtr & ast, const String & engin return ast->as()->value.safeGet(); } -[[maybe_unused]] static inline String getIdentifierOrStringLiteral(const ASTPtr & ast, const String & engine_name) -{ - if (ast) - { - if (const auto & literal = ast->as()) - return literal->value.safeGet(); - else if (const auto & identifier = ast->as()) - return identifier->name; - } - - throw Exception("Database engine " + engine_name + " requested literal or identifier argument.", ErrorCodes::BAD_ARGUMENTS); -} - DatabasePtr DatabaseFactory::getImpl( const String & database_name, const String & metadata_path, const ASTStorage * engine_define, Context & context) { @@ -103,11 +91,13 @@ DatabasePtr DatabaseFactory::getImpl( ErrorCodes::BAD_ARGUMENTS); - const auto & arguments = engine->arguments->children; - const auto & host_name_and_port = getLiteralValue(arguments[0], "MySQL"); - const auto & database_name_in_mysql = getIdentifierOrStringLiteral(arguments[1], "MySQL"); - const auto & mysql_user_name = getLiteralValue(arguments[2], "MySQL"); - const auto & mysql_user_password = getLiteralValue(arguments[3], "MySQL"); + ASTs & arguments = engine->arguments->children; + arguments[1] = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[1], context); + + const auto & host_name_and_port = safeGetLiteralValue(arguments[0], "MySQL"); + const auto & database_name_in_mysql = safeGetLiteralValue(arguments[1], "MySQL"); + const auto & mysql_user_name = safeGetLiteralValue(arguments[2], "MySQL"); + const auto & mysql_user_password = safeGetLiteralValue(arguments[3], "MySQL"); try { @@ -138,7 +128,7 @@ DatabasePtr DatabaseFactory::getImpl( const auto & arguments = engine->arguments->children; - const auto cache_expiration_time_seconds = getLiteralValue(arguments[0], "Lazy"); + const auto cache_expiration_time_seconds = safeGetLiteralValue(arguments[0], "Lazy"); return std::make_shared(database_name, metadata_path, cache_expiration_time_seconds, context); } diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 42663e46752..2791cc7b382 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -5,6 +5,7 @@ import pymysql.cursors import pytest from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql=True) @@ -116,12 +117,17 @@ def test_clickhouse_join_for_mysql_database(started_cluster): clickhouse_node.query("CREATE TABLE default.t1_remote_mysql AS mysql('mysql1:3306','test','t1_mysql_local','root','clickhouse')") clickhouse_node.query("CREATE TABLE default.t2_remote_mysql AS mysql('mysql1:3306','test','t2_mysql_local','root','clickhouse')") assert clickhouse_node.query("SELECT s.pays " - "FROM default.t1_remote_mysql AS s " - "LEFT JOIN default.t1_remote_mysql AS s_ref " - "ON (s_ref.opco = s.opco AND s_ref.service = s.service)") == '' + "FROM default.t1_remote_mysql AS s " + "LEFT JOIN default.t1_remote_mysql AS s_ref " + "ON (s_ref.opco = s.opco AND s_ref.service = s.service)") == '' mysql_node.query("DROP DATABASE test") + def test_bad_arguments_for_mysql_database_engine(started_cluster): - assert clickhouse_node.query( - "CREATE TABLE default.t1_remote_mysql AS mysql('mysql1:3306', 'test', 't1_mysql_local', root, 'clickhouse')").find( - 'Database engine MySQL requested literal argument.') != -1 + with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', '127.0.0.1', port=3308)) as mysql_node: + with pytest.raises(QueryRuntimeException) as exception: + mysql_node.query("CREATE DATABASE IF NOT EXISTS test_bad_arguments DEFAULT CHARACTER SET 'utf8'") + clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql1:3306', test_bad_arguments, root, 'clickhouse')") + + assert 'Database engine MySQL requested literal argument.' in str(exception.value) + mysql_node.query("DROP DATABASE test_bad_arguments") From 3b4682d1791716510847a7ffba6e1a6179687250 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 8 Apr 2020 20:46:48 +0300 Subject: [PATCH 212/484] Updated 01232_preparing_sets_race_condition. --- .../01232_preparing_sets_race_condition.reference | 9 --------- .../0_stateless/01232_preparing_sets_race_condition.sh | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference b/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference index 2fc36ed5c97..45a4fb75db8 100644 --- a/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.reference @@ -1,10 +1 @@ 8 -8 -8 -8 -8 -8 -8 -8 -8 -8 diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh index 5f7b76c0e99..25a8cdb12ea 100755 --- a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh @@ -18,7 +18,7 @@ echo " insert into tableB select number, number % 100000, addDays(toDate('2020-01-01'), number % 90) from numbers(50000000); " | $CLICKHOUSE_CLIENT -n -for i in {1..10}; do echo " +for i in {1..1}; do echo " SELECT tableName FROM ( From 824255f603abbf1363c6bfe2e9a078011f9131d8 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 8 Apr 2020 11:18:35 +0300 Subject: [PATCH 213/484] Update tests for multiline SHOW CREATE The following has been used for this: git grep '^CREATE' tests/queries/**.reference | cut -d: -f1 | sort -u | xargs -n1 -i sh -c 'show-create-rewrite.py < {} | sponge {}' show-create-rewrite.py is available here: https://gist.github.com/azat/916b98b5ddf9573f7dc9a4dce33b59b5 And for 00998_constraints_all_tables test FORMAT TSVRaw I simply drop. --- .../00061_merge_tree_alter.reference | 20 ++++---- ...cated_merge_tree_alter_zookeeper.reference | 48 +++++++++---------- ...00564_temporary_table_management.reference | 2 +- .../00599_create_view_with_subquery.reference | 2 +- .../00604_show_create_database.reference | 2 +- .../queries/0_stateless/00642_cast.reference | 9 +++- .../00643_cast_zookeeper.reference | 9 +++- .../00725_comment_columns.reference | 12 ++--- .../00725_ipv4_ipv6_domains.reference | 4 +- ...51_default_databasename_for_view.reference | 27 ++++++++++- .../00753_comment_columns_zookeeper.reference | 4 +- ...4_alter_modify_column_partitions.reference | 4 +- .../00754_alter_modify_order_by.reference | 2 +- ...fy_order_by_replicated_zookeeper.reference | 4 +- ...4_test_custom_compression_codecs.reference | 4 +- ...m_compression_codes_log_storages.reference | 8 ++-- .../0_stateless/00836_indices_alter.reference | 10 ++-- ...dices_alter_replicated_zookeeper.reference | 24 +++++----- ...om_compression_codecs_replicated.reference | 2 +- .../00916_create_or_replace_view.reference | 4 +- .../0_stateless/00933_alter_ttl.reference | 2 +- .../00933_ttl_replicated_zookeeper.reference | 2 +- .../0_stateless/00933_ttl_simple.reference | 8 ++-- .../00980_merge_alter_settings.reference | 10 ++-- ...keeper_merge_tree_alter_settings.reference | 12 ++--- .../00998_constraints_all_tables.reference | 4 +- .../00998_constraints_all_tables.sql | 4 +- .../01018_ddl_dictionaries_create.reference | 2 +- ...age_odbc_parsing_exception_check.reference | 2 +- .../01055_compact_parts_1.reference | 4 +- .../01069_database_memory.reference | 4 +- .../01070_alter_with_ttl.reference | 4 +- .../01079_alter_default_zookeeper.reference | 16 +++---- .../01079_bad_alters_zookeeper.reference | 4 +- ..._expressions_in_engine_arguments.reference | 14 +++--- ...tionary_layout_without_arguments.reference | 2 +- ...13_alter_rename_column_zookeeper.reference | 4 +- .../01213_alter_rename_nested.reference | 6 +-- ...er_rename_with_default_zookeeper.reference | 10 ++-- .../01213_alter_table_rename_nested.reference | 4 +- 40 files changed, 179 insertions(+), 140 deletions(-) diff --git a/tests/queries/0_stateless/00061_merge_tree_alter.reference b/tests/queries/0_stateless/00061_merge_tree_alter.reference index 571affd7231..b609bc257f1 100644 --- a/tests/queries/0_stateless/00061_merge_tree_alter.reference +++ b/tests/queries/0_stateless/00061_merge_tree_alter.reference @@ -1,14 +1,14 @@ d Date k UInt64 i32 Int32 -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 10 42 d Date k UInt64 i32 Int32 n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 8 40 [1,2,3] ['12','13','14'] 2015-01-01 10 42 [] [] d Date @@ -17,7 +17,7 @@ i32 Int32 n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 10 42 [] [] [] @@ -28,7 +28,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 @@ -39,7 +39,7 @@ i32 Int32 n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 @@ -51,7 +51,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] @@ -65,7 +65,7 @@ k UInt64 i32 Int32 n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 ['asd','qwe','qwe'] 100500 2015-01-01 7 39 ['120','130','140'] 0 2015-01-01 8 40 ['12','13','14'] 0 @@ -74,7 +74,7 @@ d Date k UInt64 i32 Int32 s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 2015-01-01 7 39 0 2015-01-01 8 40 0 @@ -85,7 +85,7 @@ i32 Int32 s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 [] [] 2015-01-01 7 39 0 [] [] 2015-01-01 8 40 0 [] [] @@ -94,7 +94,7 @@ d Date k UInt64 i32 Int32 s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061 (`d` Date, `k` UInt64, `i32` Int32, `s` UInt32 DEFAULT \'0\') ENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 2015-01-01 7 39 0 2015-01-01 8 40 0 diff --git a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference index 6f2eb080286..fa5e65d2d60 100644 --- a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference +++ b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference @@ -1,22 +1,22 @@ d Date k UInt64 i32 Int32 -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 10 42 d Date k UInt64 i32 Int32 dt DateTime -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 9 41 1992-01-01 08:00:00 2015-01-01 10 42 0000-00-00 00:00:00 d Date @@ -25,14 +25,14 @@ i32 Int32 dt DateTime n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] 2015-01-01 10 42 0000-00-00 00:00:00 [] [] @@ -43,7 +43,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -51,7 +51,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] @@ -64,7 +64,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -73,7 +73,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 @@ -86,7 +86,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -94,7 +94,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` Int64 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 @@ -108,7 +108,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -117,7 +117,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `s` UInt32 DEFAULT \'0\', `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] @@ -129,14 +129,14 @@ i32 Int32 dt DateTime n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.s` Array(String), `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 @@ -147,13 +147,13 @@ k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -166,7 +166,7 @@ dt DateTime s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -174,7 +174,7 @@ dt DateTime s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\', `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] 2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] 2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] @@ -185,13 +185,13 @@ k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `s` UInt32 DEFAULT \'0\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -202,13 +202,13 @@ k UInt64 i32 Int32 dt Date s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter1 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime DEFAULT \'0000-00-00 00:00:00\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt Date s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter2 (`d` Date, `k` UInt64, `i32` Int32, `dt` Date, `s` DateTime DEFAULT \'0000-00-00 00:00:00\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00 2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00 2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00 diff --git a/tests/queries/0_stateless/00564_temporary_table_management.reference b/tests/queries/0_stateless/00564_temporary_table_management.reference index edd17b9ea39..4cfb4230223 100644 --- a/tests/queries/0_stateless/00564_temporary_table_management.reference +++ b/tests/queries/0_stateless/00564_temporary_table_management.reference @@ -1,4 +1,4 @@ 1 -CREATE TEMPORARY TABLE temp_tab (`number` UInt64) ENGINE = Memory +CREATE TEMPORARY TABLE temp_tab\n(\n `number` UInt64\n)\nENGINE = Memory temp_tab 0 diff --git a/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference index 13e0f35b075..d83d2837a18 100644 --- a/tests/queries/0_stateless/00599_create_view_with_subquery.reference +++ b/tests/queries/0_stateless/00599_create_view_with_subquery.reference @@ -1 +1 @@ -CREATE VIEW default.test_view_00599 (`id` UInt64) AS SELECT * FROM default.test_00599 WHERE id = (SELECT 1) +CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = \n(\n SELECT 1\n) diff --git a/tests/queries/0_stateless/00604_show_create_database.reference b/tests/queries/0_stateless/00604_show_create_database.reference index 1fe93a5e393..a9ad6abea25 100644 --- a/tests/queries/0_stateless/00604_show_create_database.reference +++ b/tests/queries/0_stateless/00604_show_create_database.reference @@ -1 +1 @@ -CREATE DATABASE test_00604 ENGINE = Ordinary +CREATE DATABASE test_00604\nENGINE = Ordinary diff --git a/tests/queries/0_stateless/00642_cast.reference b/tests/queries/0_stateless/00642_cast.reference index f75503efffe..907861c1784 100644 --- a/tests/queries/0_stateless/00642_cast.reference +++ b/tests/queries/0_stateless/00642_cast.reference @@ -7,7 +7,14 @@ hello hello hello 1970-01-01 00:00:01 -CREATE TABLE default.cast (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = MergeTree ORDER BY e SETTINGS index_granularity = 8192 +CREATE TABLE default.cast +( + `x` UInt8, + `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') +) +ENGINE = MergeTree +ORDER BY e +SETTINGS index_granularity = 8192 x UInt8 e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') 1 hello diff --git a/tests/queries/0_stateless/00643_cast_zookeeper.reference b/tests/queries/0_stateless/00643_cast_zookeeper.reference index 86a8b164844..b79eb07aee3 100644 --- a/tests/queries/0_stateless/00643_cast_zookeeper.reference +++ b/tests/queries/0_stateless/00643_cast_zookeeper.reference @@ -1,4 +1,11 @@ -CREATE TABLE test.cast1 (`x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)')) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192 +CREATE TABLE test.cast1 +( + `x` UInt8, + `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') +ORDER BY e +SETTINGS index_granularity = 8192 x UInt8 e Enum8(\'hello\' = 1, \'world\' = 2) DEFAULT CAST(x, \'Enum8(\\\'hello\\\' = 1, \\\'world\\\' = 2)\') 1 hello diff --git a/tests/queries/0_stateless/00725_comment_columns.reference b/tests/queries/0_stateless/00725_comment_columns.reference index 7204496753c..86794581daf 100644 --- a/tests/queries/0_stateless/00725_comment_columns.reference +++ b/tests/queries/0_stateless/00725_comment_columns.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', `fourth_column` UInt8 COMMENT \'comment 4\', `fifth_column` UInt8) ENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', \n `fourth_column` UInt8 COMMENT \'comment 4\', \n `fifth_column` UInt8\n)\nENGINE = TinyLog first_column UInt8 DEFAULT 1 comment 1 second_column UInt8 MATERIALIZED first_column comment 2 third_column UInt8 ALIAS second_column comment 3 @@ -11,7 +11,7 @@ fifth_column UInt8 │ check_query_comment_column │ fourth_column │ comment 4 │ │ check_query_comment_column │ fifth_column │ │ └────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', `fourth_column` UInt8 COMMENT \'comment 4_1\', `fifth_column` UInt8 COMMENT \'comment 5_1\') ENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', \n `fourth_column` UInt8 COMMENT \'comment 4_1\', \n `fifth_column` UInt8 COMMENT \'comment 5_1\'\n)\nENGINE = TinyLog ┌─table──────────────────────┬─name──────────┬─comment─────┐ │ check_query_comment_column │ first_column │ comment 1_2 │ │ check_query_comment_column │ second_column │ comment 2_2 │ @@ -19,8 +19,8 @@ CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 │ check_query_comment_column │ fourth_column │ comment 4_2 │ │ check_query_comment_column │ fifth_column │ comment 5_2 │ └────────────────────────────┴───────────────┴─────────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', `fourth_column` UInt8 COMMENT \'comment 4_2\', `fifth_column` UInt8 COMMENT \'comment 5_2\') ENGINE = TinyLog -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1\', `second_column` UInt8 COMMENT \'comment 2\', `third_column` UInt8 COMMENT \'comment 3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', \n `fourth_column` UInt8 COMMENT \'comment 4_2\', \n `fifth_column` UInt8 COMMENT \'comment 5_2\'\n)\nENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1\', \n `second_column` UInt8 COMMENT \'comment 2\', \n `third_column` UInt8 COMMENT \'comment 3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 first_column UInt8 comment 1 second_column UInt8 comment 2 third_column UInt8 comment 3 @@ -29,8 +29,8 @@ third_column UInt8 comment 3 │ check_query_comment_column │ second_column │ comment 2 │ │ check_query_comment_column │ third_column │ comment 3 │ └────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_2\', `second_column` UInt8 COMMENT \'comment 2_2\', `third_column` UInt8 COMMENT \'comment 3_2\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 -CREATE TABLE default.check_query_comment_column (`first_column` UInt8 COMMENT \'comment 1_3\', `second_column` UInt8 COMMENT \'comment 2_3\', `third_column` UInt8 COMMENT \'comment 3_3\') ENGINE = MergeTree() PARTITION BY second_column ORDER BY first_column SAMPLE BY first_column SETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_2\', \n `second_column` UInt8 COMMENT \'comment 2_2\', \n `third_column` UInt8 COMMENT \'comment 3_2\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_3\', \n `second_column` UInt8 COMMENT \'comment 2_3\', \n `third_column` UInt8 COMMENT \'comment 3_3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 ┌─table──────────────────────┬─name──────────┬─comment─────┐ │ check_query_comment_column │ first_column │ comment 1_3 │ │ check_query_comment_column │ second_column │ comment 2_3 │ diff --git a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference index a31b4bd7308..28051d15f65 100644 --- a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference +++ b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ipv4_test (`ipv4_` IPv4) ENGINE = Memory +CREATE TABLE default.ipv4_test\n(\n `ipv4_` IPv4\n)\nENGINE = Memory 0.0.0.0 00 8.8.8.8 08080808 127.0.0.1 7F000001 @@ -10,7 +10,7 @@ CREATE TABLE default.ipv4_test (`ipv4_` IPv4) ENGINE = Memory > 127.0.0.1 255.255.255.255 = 127.0.0.1 127.0.0.1 euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1 -CREATE TABLE default.ipv6_test (`ipv6_` IPv6) ENGINE = Memory +CREATE TABLE default.ipv6_test\n(\n `ipv6_` IPv6\n)\nENGINE = Memory :: 00000000000000000000000000000000 :: 00000000000000000000000000000000 ::ffff:8.8.8.8 00000000000000000000FFFF08080808 diff --git a/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/tests/queries/0_stateless/00751_default_databasename_for_view.reference index 2873fcbee3b..5ba1861e3ef 100644 --- a/tests/queries/0_stateless/00751_default_databasename_for_view.reference +++ b/tests/queries/0_stateless/00751_default_databasename_for_view.reference @@ -1,4 +1,29 @@ -CREATE MATERIALIZED VIEW test_00751.t_mv_00751 (`date` Date, `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1)) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT date, platform, app FROM test_00751.t_00751 WHERE (app = (SELECT min(app) FROM test_00751.u_00751)) AND (platform = (SELECT (SELECT min(platform) FROM test_00751.v_00751))) +CREATE MATERIALIZED VIEW test_00751.t_mv_00751 +( + `date` Date, + `platform` Enum8('a' = 0, 'b' = 1), + `app` Enum8('a' = 0, 'b' = 1) +) +ENGINE = MergeTree +ORDER BY date +SETTINGS index_granularity = 8192 AS +SELECT + date, + platform, + app +FROM test_00751.t_00751 +WHERE (app = +( + SELECT min(app) + FROM test_00751.u_00751 +)) AND (platform = +( + SELECT + ( + SELECT min(platform) + FROM test_00751.v_00751 + ) +)) 2000-01-01 a a 2000-01-02 b b 2000-01-03 a a diff --git a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference index 8b1eeea8203..b5021d00f56 100644 --- a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference +++ b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference @@ -1,6 +1,6 @@ -CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 +CREATE TABLE test.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 comment column_name2 UInt8 non default comment -CREATE TABLE test.check_comments (`column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', `column_name2` UInt8 COMMENT \'non default comment\') ENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\') ORDER BY column_name1 SETTINGS index_granularity = 8192 +CREATE TABLE test.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 another comment column_name2 UInt8 non default comment diff --git a/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference index 93f15318634..a1493508b61 100644 --- a/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference +++ b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference @@ -1,5 +1,5 @@ *** Check SHOW CREATE TABLE *** -CREATE TABLE default.alter_column (`x` UInt32, `y` Int32) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int32\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 *** Check parts *** 0 0 10 -10 @@ -52,7 +52,7 @@ CREATE TABLE default.alter_column (`x` UInt32, `y` Int32) ENGINE = MergeTree PAR 8 -8 9 -9 *** Check SHOW CREATE TABLE after ALTER MODIFY *** -CREATE TABLE default.alter_column (`x` UInt32, `y` Int64) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int64\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 *** Check parts after ALTER MODIFY *** 0 0 10 -10 diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by.reference b/tests/queries/0_stateless/00754_alter_modify_order_by.reference index 1bcdae884f8..f0dc413a186 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by.reference +++ b/tests/queries/0_stateless/00754_alter_modify_order_by.reference @@ -9,4 +9,4 @@ 1 2 1 30 1 2 4 90 *** Check SHOW CREATE TABLE *** -CREATE TABLE default.summing (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = SummingMergeTree PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 +CREATE TABLE default.summing\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = SummingMergeTree\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference index ebe30941f3f..938a90a27b4 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference @@ -9,6 +9,6 @@ 1 2 1 30 1 2 4 90 *** Check SHOW CREATE TABLE *** -CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, -z) SETTINGS index_granularity = 8192 +CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 *** Check SHOW CREATE TABLE after offline ALTER *** -CREATE TABLE test.summing_r2 (`x` UInt32, `y` UInt32, `z` UInt32, `t` UInt32, `val` UInt32) ENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\') PRIMARY KEY (x, y) ORDER BY (x, y, t * t) SETTINGS index_granularity = 8192 +CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `t` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference index 6da97ff6091..f778c4f5d90 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -9,10 +9,10 @@ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] ! 222 !ZSTD -CREATE TABLE default.test_default_delta (`id` UInt64 CODEC(Delta(8)), `data` String CODEC(Delta(1)), `somedate` Date CODEC(Delta(2)), `somenum` Float64 CODEC(Delta(8)), `somestr` FixedString(3) CODEC(Delta(1)), `othernum` Int64 CODEC(Delta(8)), `yetothernum` Float32 CODEC(Delta(4)), `ddd.age` Array(UInt8) CODEC(Delta(1)), `ddd.Name` Array(String) CODEC(Delta(1)), `ddd.OName` Array(String) CODEC(Delta(1)), `ddd.BName` Array(String) CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.test_default_delta\n(\n `id` UInt64 CODEC(Delta(8)), \n `data` String CODEC(Delta(1)), \n `somedate` Date CODEC(Delta(2)), \n `somenum` Float64 CODEC(Delta(8)), \n `somestr` FixedString(3) CODEC(Delta(1)), \n `othernum` Int64 CODEC(Delta(8)), \n `yetothernum` Float32 CODEC(Delta(4)), \n `ddd.age` Array(UInt8) CODEC(Delta(1)), \n `ddd.Name` Array(String) CODEC(Delta(1)), \n `ddd.OName` Array(String) CODEC(Delta(1)), \n `ddd.BName` Array(String) CODEC(Delta(1))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference index 322b207bf7d..b33535364e5 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference @@ -1,9 +1,9 @@ -CREATE TABLE default.compression_codec_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = Log() +CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log() 1 hello 2018-12-14 1.1 aaa 5 2 world 2018-12-15 2.2 bbb 6 3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = Log() +CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log() 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 @@ -11,12 +11,12 @@ CREATE TABLE default.compression_codec_multiple_log (`id` UInt64 CODEC(LZ4, ZSTD 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_tiny_log (`id` UInt64 CODEC(LZ4), `data` String CODEC(ZSTD(1)), `ddd` Date CODEC(NONE), `somenum` Float64 CODEC(ZSTD(2)), `somestr` FixedString(3) CODEC(LZ4HC(7)), `othernum` Int64 CODEC(Delta(8))) ENGINE = TinyLog() +CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog() 1 hello 2018-12-14 1.1 aaa 5 2 world 2018-12-15 2.2 bbb 6 3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_tiny_log (`id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))) ENGINE = TinyLog() +CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog() 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 diff --git a/tests/queries/0_stateless/00836_indices_alter.reference b/tests/queries/0_stateless/00836_indices_alter.reference index e30c17eb673..6efa25f47b7 100644 --- a/tests/queries/0_stateless/00836_indices_alter.reference +++ b/tests/queries/0_stateless/00836_indices_alter.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -6,15 +6,15 @@ CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i3 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -23,6 +23,6 @@ CREATE TABLE default.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i3 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 diff --git a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference index b2c2b41f460..ec9de160fcc 100644 --- a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference @@ -1,5 +1,5 @@ -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -14,8 +14,8 @@ CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -28,10 +28,10 @@ CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx3 u64 - i32 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -44,14 +44,14 @@ CREATE TABLE test.minmax_idx_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 * i32 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 1 3 -CREATE TABLE test.minmax_idx2 (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\') ORDER BY u64 SETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r (`u64` UInt64, `i32` Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\') ORDER BY u64 SETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 diff --git a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference index 29bda49a8e5..ee481c88d89 100644 --- a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference +++ b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference @@ -20,7 +20,7 @@ 274972506.6 9175437371954010821 9175437371954010821 -CREATE TABLE test.compression_codec_multiple_more_types_replicated (`id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\') ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE test.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] ! diff --git a/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference index 30d14bf1e41..50323e47556 100644 --- a/tests/queries/0_stateless/00916_create_or_replace_view.reference +++ b/tests/queries/0_stateless/00916_create_or_replace_view.reference @@ -1,2 +1,2 @@ -CREATE VIEW default.t (`number` UInt64) AS SELECT number FROM system.numbers -CREATE VIEW default.t (`next_number` UInt64) AS SELECT number + 1 AS next_number FROM system.numbers +CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers +CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers diff --git a/tests/queries/0_stateless/00933_alter_ttl.reference b/tests/queries/0_stateless/00933_alter_ttl.reference index 44ba49026a7..9b5cec0f773 100644 --- a/tests/queries/0_stateless/00933_alter_ttl.reference +++ b/tests/queries/0_stateless/00933_alter_ttl.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ttl (`d` Date, `a` Int32) ENGINE = MergeTree PARTITION BY toDayOfMonth(d) ORDER BY a TTL d + toIntervalDay(1) SETTINGS index_granularity = 8192 +CREATE TABLE default.ttl\n(\n `d` Date, \n `a` Int32\n)\nENGINE = MergeTree\nPARTITION BY toDayOfMonth(d)\nORDER BY a\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 2100-10-10 3 2100-10-10 4 d Date diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference index 986bc6b4a24..629fbf2a4a3 100644 --- a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference @@ -1,3 +1,3 @@ 200 400 -CREATE TABLE test.ttl_repl2 (`d` Date, `x` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + toIntervalDay(1) SETTINGS index_granularity = 8192 +CREATE TABLE test.ttl_repl2\n(\n `d` Date, \n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00933_ttl_simple.reference b/tests/queries/0_stateless/00933_ttl_simple.reference index e8b0c699aec..102639947a3 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.reference +++ b/tests/queries/0_stateless/00933_ttl_simple.reference @@ -6,11 +6,11 @@ 2000-10-10 00:00:00 0 2100-10-10 00:00:00 3 2100-10-10 2 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL now() - 1000) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL now() + 1000) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 1 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL today() - 1) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1 (`b` Int32, `a` Int32 TTL today() + 1) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple() SETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 1 diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.reference b/tests/queries/0_stateless/00980_merge_alter_settings.reference index ee3818d25dc..340cf29ce89 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.reference +++ b/tests/queries/0_stateless/00980_merge_alter_settings.reference @@ -1,6 +1,6 @@ -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 2 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 -CREATE TABLE default.table_for_alter (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference index 159102e1ca7..ab006ea6931 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference @@ -1,12 +1,12 @@ -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 4 4 4 4 6 6 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 -CREATE TABLE default.replicated_table_for_alter2 (`id` UInt64, `Data` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\') ORDER BY id SETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.replicated_table_for_alter1 (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\') ORDER BY id SETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 -CREATE TABLE default.replicated_table_for_alter2 (`id` UInt64, `Data` String, `Data2` UInt64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\') ORDER BY id SETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.reference b/tests/queries/0_stateless/00998_constraints_all_tables.reference index 730df555af3..3de251daa71 100644 --- a/tests/queries/0_stateless/00998_constraints_all_tables.reference +++ b/tests/queries/0_stateless/00998_constraints_all_tables.reference @@ -10,5 +10,5 @@ 0 0 3 -CREATE TABLE default.constrained (`URL` String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log -CREATE TABLE default.constrained2 (`URL` String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log +CREATE TABLE default.constrained\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained2\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.sql b/tests/queries/0_stateless/00998_constraints_all_tables.sql index 66b93fca97b..e47b7eaf83c 100644 --- a/tests/queries/0_stateless/00998_constraints_all_tables.sql +++ b/tests/queries/0_stateless/00998_constraints_all_tables.sql @@ -45,8 +45,8 @@ DROP TABLE constrained; DROP TABLE IF EXISTS constrained2; CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; CREATE TABLE constrained2 AS constrained; -SHOW CREATE TABLE constrained FORMAT TSVRaw; -SHOW CREATE TABLE constrained2 FORMAT TSVRaw; +SHOW CREATE TABLE constrained; +SHOW CREATE TABLE constrained2; INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } INSERT INTO constrained2 VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } DROP TABLE constrained; diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference index 327c02a4b8a..ad16e8ae7f2 100644 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference @@ -1,5 +1,5 @@ =DICTIONARY in Ordinary DB -CREATE DICTIONARY ordinary_db.dict1 (`key_column` UInt64 DEFAULT 0, `second_column` UInt8 DEFAULT 1, `third_column` String DEFAULT \'qqq\') PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) +CREATE DICTIONARY ordinary_db.dict1\n(\n `key_column` UInt64 DEFAULT 0, \n `second_column` UInt8 DEFAULT 1, \n `third_column` String DEFAULT \'qqq\'\n)\nPRIMARY KEY key_column\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(FLAT()) dict1 1 ordinary_db dict1 diff --git a/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference index bba4944f4a8..c2d7d849fae 100644 --- a/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference +++ b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference @@ -1 +1 @@ -CREATE TABLE default.BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') +CREATE TABLE default.BannerDict\n(\n `BannerID` UInt64, \n `CompaignID` UInt64\n)\nENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') diff --git a/tests/queries/0_stateless/01055_compact_parts_1.reference b/tests/queries/0_stateless/01055_compact_parts_1.reference index 7c9dd4a0ef9..b99f336d3b0 100644 --- a/tests/queries/0_stateless/01055_compact_parts_1.reference +++ b/tests/queries/0_stateless/01055_compact_parts_1.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.mt_compact (`a` Int32, `s` String) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0, index_granularity = 8192 -CREATE TABLE default.mt_compact (`a` Int32, `s` String) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 +CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, index_granularity = 8192 +CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 diff --git a/tests/queries/0_stateless/01069_database_memory.reference b/tests/queries/0_stateless/01069_database_memory.reference index 393c85070b9..e7486d57276 100644 --- a/tests/queries/0_stateless/01069_database_memory.reference +++ b/tests/queries/0_stateless/01069_database_memory.reference @@ -1,8 +1,8 @@ -CREATE DATABASE memory_01069 ENGINE = Memory() +CREATE DATABASE memory_01069\nENGINE = Memory() 1 2 3 4 3 4 -CREATE TABLE memory_01069.file (`n` UInt8) ENGINE = File(\'CSV\') +CREATE TABLE memory_01069.file\n(\n `n` UInt8\n)\nENGINE = File(\'CSV\') diff --git a/tests/queries/0_stateless/01070_alter_with_ttl.reference b/tests/queries/0_stateless/01070_alter_with_ttl.reference index 8b2bd9d1389..de7833472a1 100644 --- a/tests/queries/0_stateless/01070_alter_with_ttl.reference +++ b/tests/queries/0_stateless/01070_alter_with_ttl.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.alter_ttl (`i` Int32, `s` String TTL toDate(\'2020-01-01\')) ENGINE = MergeTree ORDER BY i TTL toDate(\'2020-05-05\') SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_ttl (`d` Date, `s` String TTL d + toIntervalDay(1)) ENGINE = MergeTree ORDER BY d TTL d + toIntervalMonth(1) SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_ttl\n(\n `i` Int32, \n `s` String TTL toDate(\'2020-01-01\')\n)\nENGINE = MergeTree\nORDER BY i\nTTL toDate(\'2020-05-05\')\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_ttl\n(\n `d` Date, \n `s` String TTL d + toIntervalDay(1)\n)\nENGINE = MergeTree\nORDER BY d\nTTL d + toIntervalMonth(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference index 35ba20aff3e..62d26bc9b4b 100644 --- a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference +++ b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference @@ -1,11 +1,11 @@ -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` String DEFAULT \'10\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT \'10\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt64 DEFAULT 100) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt16 DEFAULT 100) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 10000 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10, `better_column` UInt8 DEFAULT \'1\') ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default (`date` Date, `key` UInt64, `value` UInt8 DEFAULT 10, `better_column` UInt8 DEFAULT \'1\', `other_date` String DEFAULT 1) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\', \n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference index 198f79cf9a4..ea3fbec34a8 100644 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference @@ -1,6 +1,6 @@ Wrong column name. -CREATE TABLE default.table_for_bad_alters (`key` UInt64, `value1` UInt8, `value2` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_bad_alters (`key` UInt64, `value1` UInt8, `value2` UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\') ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 syntax error at begin of string. 7 Hello diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference index 5b376a0654f..2007eda0f07 100644 --- a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference @@ -1,11 +1,11 @@ -CREATE TABLE test_01083.file (`n` Int8) ENGINE = File(\'TSVWithNamesAndTypes\') -CREATE TABLE test_01083.buffer (`n` Int8) ENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) -CREATE TABLE test_01083.merge (`n` Int8) ENGINE = Merge(\'test_01083\', \'distributed\') +CREATE TABLE test_01083.file\n(\n `n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') +CREATE TABLE test_01083.buffer\n(\n `n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) +CREATE TABLE test_01083.merge\n(\n `n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') CREATE TABLE test_01083.merge_tf AS merge(\'test_01083\', \'.*\') -CREATE TABLE test_01083.distributed (`n` Int8) ENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') +CREATE TABLE test_01083.distributed\n(\n `n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') CREATE TABLE test_01083.distributed_tf AS cluster(\'test_shard_localhost\', \'test_01083\', \'buffer\') -CREATE TABLE test_01083.url (`n` UInt64, `col` String) ENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') +CREATE TABLE test_01083.url\n(\n `n` UInt64, \n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') CREATE TABLE test_01083.rich_syntax AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'test_01083\', \'view\'))) -CREATE VIEW test_01083.view (`n` Int64) AS SELECT toInt64(n) AS n FROM (SELECT toString(n) AS n FROM test_01083.merge WHERE _table != \'qwerty\' ORDER BY _table ASC) UNION ALL SELECT * FROM test_01083.file -CREATE DICTIONARY test_01083.dict (`n` UInt64, `col` String DEFAULT \'42\') PRIMARY KEY n SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\')) LIFETIME(MIN 0 MAX 1) LAYOUT(CACHE(SIZE_IN_CELLS 1)) +CREATE VIEW test_01083.view\n(\n `n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file +CREATE DICTIONARY test_01083.dict\n(\n `n` UInt64, \n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) 16 diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference index a0518e78891..852abeea187 100644 --- a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference @@ -1,3 +1,3 @@ World -CREATE DICTIONARY db_for_dict.dict_with_hashed_layout (`key1` UInt64, `value` String) PRIMARY KEY key1 SOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\')) LIFETIME(MIN 1 MAX 10) LAYOUT(HASHED) +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout\n(\n `key1` UInt64, \n `value` String\n)\nPRIMARY KEY key1\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(HASHED) Hello diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference index a2c0e0d7d77..e2d6007c57f 100644 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -1,6 +1,6 @@ 1 -CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 1 date key renamed_value1 value2 value3 2019-10-02 1 1 1 1 diff --git a/tests/queries/0_stateless/01213_alter_rename_nested.reference b/tests/queries/0_stateless/01213_alter_rename_nested.reference index 8b7aaaa3d5a..2641df46aeb 100644 --- a/tests/queries/0_stateless/01213_alter_rename_nested.reference +++ b/tests/queries/0_stateless/01213_alter_rename_nested.reference @@ -1,10 +1,10 @@ [8,9,10] ['a','b','c'] -CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 7 [8,9,10] 7 ['a','b','c'] [['7']] -CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `renamed_value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `renamed_value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 date key n.renamed_x n.renamed_y renamed_value1 2019-10-01 7 [8,9,10] ['a','b','c'] [['7']] diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference index 06f136d6dbc..251e664b522 100644 --- a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference @@ -1,17 +1,17 @@ date key value1 value2 2019-10-02 1 1 Hello 1 -CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `value1` String, `value2` String DEFAULT concat(\'Hello \', value1), `value3` String ALIAS concat(\'Word \', value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String DEFAULT concat(\'Hello \', value1), \n `value3` String ALIAS concat(\'Word \', value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 date key renamed_value1 value2 2019-10-02 1 1 Hello 1 -CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String DEFAULT concat(\'Hello \', renamed_value1), `value3` String ALIAS concat(\'Word \', renamed_value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String DEFAULT concat(\'Hello \', renamed_value1), \n `value3` String ALIAS concat(\'Word \', renamed_value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 Hello 1 Word 1 date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl (`date1` Date, `date2` Date, `value1` String, `value2` String TTL date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 renamed_date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `renamed_date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL renamed_date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `renamed_date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_table_rename_nested.reference b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference index 51647dc2e7b..8e6d93dbcce 100644 --- a/tests/queries/0_stateless/01213_alter_table_rename_nested.reference +++ b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference @@ -1,6 +1,6 @@ [8,9,10] ['a','b','c'] -CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 7 [8,9,10] 7 ['a','b','c'] From 458c7f516deb834bcb6775cdec732b404e793ce0 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 8 Apr 2020 15:40:04 +0300 Subject: [PATCH 214/484] Fix extremes for Processors. --- .../ClusterProxy/SelectStreamFactory.cpp | 23 ++- src/Interpreters/InterpreterSelectQuery.cpp | 4 +- .../TreeExecutorBlockInputStream.cpp | 35 +++- .../Executors/TreeExecutorBlockInputStream.h | 5 +- .../Formats/Impl/PrettyBlockOutputFormat.cpp | 4 +- src/Processors/NullSink.h | 11 ++ src/Processors/Pipe.cpp | 19 +- src/Processors/Pipe.h | 6 +- src/Processors/QueryPipeline.cpp | 162 ++++++++++++------ src/Processors/QueryPipeline.h | 2 +- .../Sources/SourceFromInputStream.cpp | 53 ++++-- .../Sources/SourceFromInputStream.h | 10 +- 12 files changed, 243 insertions(+), 91 deletions(-) diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 39bbb1eb667..45e2fc9dc4b 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -82,7 +82,16 @@ Pipe createLocalStream(const ASTPtr & query_ast, const Block & header, const Con /// This flag means that pipeline must be tree-shaped, /// so we can't enable processors for InterpreterSelectQuery here. auto stream = interpreter.execute().in; - Pipe pipe(std::make_shared(std::move(stream))); + auto source = std::make_shared(std::move(stream)); + + bool add_totals_and_extremes_port = processed_stage == QueryProcessingStage::Complete; + if (add_totals_and_extremes_port) + { + source->addTotalsPort(); + source->addExtremesPort(); + } + + Pipe pipe(std::move(source)); pipe.addSimpleTransform(std::make_shared( pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name, context)); @@ -130,7 +139,7 @@ void SelectStreamFactory::createForShard( Pipes & res) { bool force_add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; - bool add_totals_port = processed_stage == QueryProcessingStage::Complete; + bool add_totals_and_extremes_port = processed_stage == QueryProcessingStage::Complete; auto modified_query_ast = query_ast->clone(); if (has_virtual_shard_num_column) @@ -153,8 +162,11 @@ void SelectStreamFactory::createForShard( auto source = std::make_shared(std::move(stream), force_add_agg_info); - if (add_totals_port) + if (add_totals_and_extremes_port) + { source->addTotalsPort(); + source->addExtremesPort(); + } res.emplace_back(std::move(source)); }; @@ -303,8 +315,11 @@ void SelectStreamFactory::createForShard( auto lazy_stream = std::make_shared("LazyShardWithLocalReplica", header, lazily_create_stream); auto source = std::make_shared(std::move(lazy_stream), force_add_agg_info); - if (add_totals_port) + if (add_totals_and_extremes_port) + { source->addTotalsPort(); + source->addExtremesPort(); + } res.emplace_back(std::move(source)); } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index c58b0eab71b..63007f070db 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -87,7 +87,6 @@ #include #include #include -#include #include #include #include @@ -2541,8 +2540,7 @@ void InterpreterSelectQuery::executeExtremes(QueryPipeline & pipeline) if (!context->getSettingsRef().extremes) return; - auto transform = std::make_shared(pipeline.getHeader()); - pipeline.addExtremesTransform(std::move(transform)); + pipeline.addExtremesTransform(); } diff --git a/src/Processors/Executors/TreeExecutorBlockInputStream.cpp b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp index ee5b254ccf9..84fd97f4781 100644 --- a/src/Processors/Executors/TreeExecutorBlockInputStream.cpp +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp @@ -30,7 +30,10 @@ static void checkProcessorHasSingleOutput(IProcessor * processor) /// Check tree invariants (described in TreeExecutor.h). /// Collect sources with progress. -static void validateTree(const Processors & processors, IProcessor * root, IProcessor * totals_root, std::vector & sources) +static void validateTree( + const Processors & processors, + IProcessor * root, IProcessor * totals_root, IProcessor * extremes_root, + std::vector & sources) { std::unordered_map index; @@ -49,6 +52,8 @@ static void validateTree(const Processors & processors, IProcessor * root, IProc stack.push(root); if (totals_root) stack.push(totals_root); + if (extremes_root) + stack.push(extremes_root); while (!stack.empty()) { @@ -104,11 +109,15 @@ void TreeExecutorBlockInputStream::init() root = &output_port.getProcessor(); IProcessor * totals_root = nullptr; + IProcessor * extremes_root = nullptr; if (totals_port) totals_root = &totals_port->getProcessor(); - validateTree(processors, root, totals_root, sources_with_progress); + if (extremes_port) + extremes_root = &extremes_port->getProcessor(); + + validateTree(processors, root, totals_root, extremes_root, sources_with_progress); input_port = std::make_unique(getHeader(), root); connect(output_port, *input_port); @@ -121,15 +130,24 @@ void TreeExecutorBlockInputStream::init() input_totals_port->setNeeded(); } + if (extremes_port) + { + input_extremes_port = std::make_unique(extremes_port->getHeader(), root); + connect(*extremes_port, *input_extremes_port); + input_extremes_port->setNeeded(); + } + initRowsBeforeLimit(); } -void TreeExecutorBlockInputStream::execute(bool on_totals) +void TreeExecutorBlockInputStream::execute(bool on_totals, bool on_extremes) { std::stack stack; if (on_totals) stack.push(&totals_port->getProcessor()); + else if (on_extremes) + stack.push(&extremes_port->getProcessor()); else stack.push(root); @@ -283,11 +301,18 @@ Block TreeExecutorBlockInputStream::readImpl() { if (totals_port && !input_totals_port->isFinished()) { - execute(true); + execute(true, false); if (input_totals_port->hasData()) totals = getHeader().cloneWithColumns(input_totals_port->pull().detachColumns()); } + if (extremes_port && !input_extremes_port->isFinished()) + { + execute(false, true); + if (input_extremes_port->hasData()) + extremes = getHeader().cloneWithColumns(input_extremes_port->pull().detachColumns()); + } + if (rows_before_limit_at_least && rows_before_limit_at_least->hasAppliedLimit()) info.setRowsBeforeLimit(rows_before_limit_at_least->get()); @@ -311,7 +336,7 @@ Block TreeExecutorBlockInputStream::readImpl() return block; } - execute(false); + execute(false, false); } } diff --git a/src/Processors/Executors/TreeExecutorBlockInputStream.h b/src/Processors/Executors/TreeExecutorBlockInputStream.h index 24cab387eb8..dfe8e66ed09 100644 --- a/src/Processors/Executors/TreeExecutorBlockInputStream.h +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.h @@ -31,6 +31,7 @@ public: interpreter_context.emplace_back(context); totals_port = pipe.getTotalsPort(); + extremes_port = pipe.getExtremesPort(); processors = std::move(pipe).detachProcessors(); init(); } @@ -52,10 +53,12 @@ protected: private: OutputPort & output_port; OutputPort * totals_port = nullptr; + OutputPort * extremes_port = nullptr; Processors processors; IProcessor * root = nullptr; std::unique_ptr input_port; std::unique_ptr input_totals_port; + std::unique_ptr input_extremes_port; RowsBeforeLimitCounterPtr rows_before_limit_at_least; /// Remember sources that support progress. @@ -65,7 +68,7 @@ private: void init(); /// Execute tree step-by-step until root returns next chunk or execution is finished. - void execute(bool on_totals); + void execute(bool on_totals, bool on_extremes); void initRowsBeforeLimit(); diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index a816cdd5318..ae86a01a52a 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -225,7 +225,7 @@ void PrettyBlockOutputFormat::consumeTotals(Chunk chunk) { total_rows = 0; writeSuffixIfNot(); - writeCString("\nExtremes:\n", out); + writeCString("\nTotals:\n", out); write(chunk, PortKind::Totals); } @@ -233,7 +233,7 @@ void PrettyBlockOutputFormat::consumeExtremes(Chunk chunk) { total_rows = 0; writeSuffixIfNot(); - writeCString("\nTotals:\n", out); + writeCString("\nExtremes:\n", out); write(chunk, PortKind::Extremes); } diff --git a/src/Processors/NullSink.h b/src/Processors/NullSink.h index e4968daee29..b3c3bc1ac60 100644 --- a/src/Processors/NullSink.h +++ b/src/Processors/NullSink.h @@ -1,5 +1,6 @@ #pragma once #include +#include namespace DB { @@ -19,4 +20,14 @@ public: InputPort & getPort() { return inputs.front(); } }; +class EmptySink : public ISink +{ +public: + explicit EmptySink(Block header) : ISink(std::move(header)) {} + String getName() const override { return "EmptySink"; } + +protected: + void consume(Chunk) override {} +}; + } diff --git a/src/Processors/Pipe.cpp b/src/Processors/Pipe.cpp index 4461d714264..d9b21dbc854 100644 --- a/src/Processors/Pipe.cpp +++ b/src/Processors/Pipe.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB { @@ -48,7 +49,7 @@ static void checkSource(const IProcessor & source) throw Exception("Source for pipe should have single output, but it doesn't have any", ErrorCodes::LOGICAL_ERROR); - if (source.getOutputs().size() > 2) + if (source.getOutputs().size() > 1) throw Exception("Source for pipe should have single or two outputs, but " + source.getName() + " has " + toString(source.getOutputs().size()) + " outputs.", ErrorCodes::LOGICAL_ERROR); } @@ -56,18 +57,22 @@ static void checkSource(const IProcessor & source) Pipe::Pipe(ProcessorPtr source) { - checkSource(*source); - output_port = &source->getOutputs().front(); + if (auto * source_from_input_stream = typeid_cast(source.get())) + { + totals = source_from_input_stream->getTotalsPort(); + extremes = source_from_input_stream->getExtremesPort(); + } + else if (source->getOutputs().size() != 1) + checkSource(*source); - if (source->getOutputs().size() > 1) - totals = &source->getOutputs().back(); + output_port = &source->getOutputs().front(); processors.emplace_back(std::move(source)); max_parallel_streams = 1; } -Pipe::Pipe(Processors processors_, OutputPort * output_port_, OutputPort * totals_) - : processors(std::move(processors_)), output_port(output_port_), totals(totals_) +Pipe::Pipe(Processors processors_, OutputPort * output_port_, OutputPort * totals_, OutputPort * extremes_) + : processors(std::move(processors_)), output_port(output_port_), totals(totals_), extremes(extremes_) { } diff --git a/src/Processors/Pipe.h b/src/Processors/Pipe.h index 60715d986af..42bbd4e06d0 100644 --- a/src/Processors/Pipe.h +++ b/src/Processors/Pipe.h @@ -47,8 +47,11 @@ public: void enableQuota(); + /// Totals and extremes port. void setTotalsPort(OutputPort * totals_) { totals = totals_; } + void setExtremesPort(OutputPort * extremes_) { extremes = extremes_; } OutputPort * getTotalsPort() const { return totals; } + OutputPort * getExtremesPort() const { return extremes; } size_t maxParallelStreams() const { return max_parallel_streams; } @@ -67,6 +70,7 @@ private: Processors processors; OutputPort * output_port = nullptr; OutputPort * totals = nullptr; + OutputPort * extremes = nullptr; /// It is the max number of processors which can be executed in parallel for each step. See QueryPipeline::Streams. size_t max_parallel_streams = 0; @@ -84,7 +88,7 @@ private: /// and therefore we can skip those checks. /// Note that Pipe represents a tree if it was created using public interface. But this constructor can't assert it. /// So, it's possible that TreeExecutorBlockInputStream could be unable to convert such Pipe to IBlockInputStream. - explicit Pipe(Processors processors_, OutputPort * output_port, OutputPort * totals); + explicit Pipe(Processors processors_, OutputPort * output_port, OutputPort * totals, OutputPort * extremes); friend class QueryPipeline; }; diff --git a/src/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp index ee6938a48a6..d20086e726f 100644 --- a/src/Processors/QueryPipeline.cpp +++ b/src/Processors/QueryPipeline.cpp @@ -60,6 +60,58 @@ void QueryPipeline::init(Pipe pipe) init(std::move(pipes)); } +static OutputPort * uniteExtremes(const std::vector & ports, const Block & header, Processors & processors) +{ + /// Here we calculate extremes for extremes in case we unite several pipelines. + /// Example: select number from numbers(2) union all select number from numbers(3) + + /// ->> Resize -> Extremes --(output port)----> Null + /// --(extremes port)--> ... + + auto resize = std::make_shared(header, ports.size(), 1); + auto extremes = std::make_shared(header); + auto sink = std::make_shared(header); + + auto * extremes_port = &extremes->getExtremesPort(); + + auto in = resize->getInputs().begin(); + for (auto & port : ports) + connect(*port, *(in++)); + + connect(resize->getOutputs().front(), extremes->getInputPort()); + connect(extremes->getOutputPort(), sink->getPort()); + + processors.emplace_back(std::move(resize)); + processors.emplace_back(std::move(extremes)); + processors.emplace_back(std::move(sink)); + + return extremes_port; +} + +static OutputPort * uniteTotals(const std::vector & ports, const Block & header, Processors & processors) +{ + /// Calculate totals fro several streams. + /// Take totals from first sources which has any, skip others. + + /// ->> Concat -> Limit + + auto concat = std::make_shared(header, ports.size()); + auto limit = std::make_shared(header, 1, 0); + + auto * totals_port = &limit->getOutputPort(); + + auto in = concat->getInputs().begin(); + for (auto & port : ports) + connect(*port, *(in++)); + + connect(concat->getOutputs().front(), limit->getInputPort()); + + processors.emplace_back(std::move(concat)); + processors.emplace_back(std::move(limit)); + + return totals_port; +} + void QueryPipeline::init(Pipes pipes) { if (initialized()) @@ -82,6 +134,7 @@ void QueryPipeline::init(Pipes pipes) } std::vector totals; + std::vector extremes; for (auto & pipe : pipes) { @@ -98,6 +151,12 @@ void QueryPipeline::init(Pipes pipes) totals.emplace_back(totals_port); } + if (auto * extremes_port_ = pipe.getExtremesPort()) + { + assertBlocksHaveEqualStructure(current_header, extremes_port_->getHeader(), "QueryPipeline"); + extremes.emplace_back(extremes_port_); + } + streams.addStream(&pipe.getPort(), pipe.maxParallelStreams()); auto cur_processors = std::move(pipe).detachProcessors(); processors.insert(processors.end(), cur_processors.begin(), cur_processors.end()); @@ -108,15 +167,15 @@ void QueryPipeline::init(Pipes pipes) if (totals.size() == 1) totals_having_port = totals.back(); else - { - auto resize = std::make_shared(current_header, totals.size(), 1); - auto in = resize->getInputs().begin(); - for (auto & total : totals) - connect(*total, *(in++)); + totals_having_port = uniteTotals(totals, current_header, processors); + } - totals_having_port = &resize->getOutputs().front(); - processors.emplace_back(std::move(resize)); - } + if (!extremes.empty()) + { + if (extremes.size() == 1) + extremes_port = extremes.back(); + else + extremes_port = uniteExtremes(extremes, current_header, processors); } } @@ -356,29 +415,31 @@ void QueryPipeline::dropTotalsIfHas() } } -void QueryPipeline::addExtremesTransform(ProcessorPtr transform) +void QueryPipeline::addExtremesTransform() { checkInitialized(); - if (!typeid_cast(transform.get())) - throw Exception("ExtremesTransform expected for QueryPipeline::addExtremesTransform.", - ErrorCodes::LOGICAL_ERROR); - if (extremes_port) throw Exception("Extremes transform was already added to pipeline.", ErrorCodes::LOGICAL_ERROR); - if (getNumStreams() != 1) - throw Exception("Cant't add Extremes transform because pipeline is expected to have single stream, " - "but it has " + toString(getNumStreams()) + " streams.", ErrorCodes::LOGICAL_ERROR); + std::vector extremes; + extremes.reserve(streams.size()); - connect(*streams.front(), transform->getInputs().front()); + for (auto & stream : streams) + { + auto transform = std::make_shared(current_header); + connect(*stream, transform->getInputPort()); - auto & outputs = transform->getOutputs(); + stream = &transform->getOutputPort(); + extremes.push_back(&transform->getExtremesPort()); - streams.assign({ &outputs.front() }); - extremes_port = &outputs.back(); - current_header = outputs.front().getHeader(); - processors.emplace_back(std::move(transform)); + processors.emplace_back(std::move(transform)); + } + + if (extremes.size() == 1) + extremes_port = extremes.front(); + else + extremes_port = uniteExtremes(extremes, current_header, processors); } void QueryPipeline::addCreatingSetsTransform(ProcessorPtr transform) @@ -455,6 +516,13 @@ void QueryPipeline::unitePipelines( }); std::vector extremes; + std::vector totals; + + if (extremes_port) + extremes.push_back(extremes_port); + + if (totals_having_port) + totals.push_back(totals_having_port); for (auto & pipeline : pipelines) { @@ -479,17 +547,12 @@ void QueryPipeline::unitePipelines( /// Take totals only from first port. if (pipeline.totals_having_port) { - if (!totals_having_port) - { - auto converting = std::make_shared( - pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); + auto converting = std::make_shared( + pipeline.current_header, common_header, ConvertingTransform::MatchColumnsMode::Position, context); - connect(*pipeline.totals_having_port, converting->getInputPort()); - totals_having_port = &converting->getOutputPort(); - processors.push_back(std::move(converting)); - } - else - pipeline.dropTotalsIfHas(); + connect(*pipeline.totals_having_port, converting->getInputPort()); + totals.push_back(&converting->getOutputPort()); + processors.push_back(std::move(converting)); } processors.insert(processors.end(), pipeline.processors.begin(), pipeline.processors.end()); @@ -504,28 +567,18 @@ void QueryPipeline::unitePipelines( if (!extremes.empty()) { - size_t num_inputs = extremes.size() + (extremes_port ? 1u : 0u); - - if (num_inputs == 1) - extremes_port = extremes.front(); + if (extremes.size() == 1) + extremes_port = extremes.back(); else - { - /// Add extra processor for extremes. - auto resize = std::make_shared(current_header, num_inputs, 1); - auto input = resize->getInputs().begin(); + extremes_port = uniteExtremes(extremes, current_header, processors); + } - if (extremes_port) - connect(*extremes_port, *(input++)); - - for (auto & output : extremes) - connect(*output, *(input++)); - - auto transform = std::make_shared(current_header); - extremes_port = &transform->getOutputPort(); - - connect(resize->getOutputs().front(), transform->getInputPort()); - processors.emplace_back(std::move(transform)); - } + if (!totals.empty()) + { + if (totals.size() == 1) + totals_having_port = totals.back(); + else + totals_having_port = uniteTotals(totals, current_header, processors); } } @@ -644,7 +697,7 @@ void QueryPipeline::initRowsBeforeLimit() Pipe QueryPipeline::getPipe() && { resize(1); - Pipe pipe(std::move(processors), streams.at(0), totals_having_port); + Pipe pipe(std::move(processors), streams.at(0), totals_having_port, extremes_port); pipe.max_parallel_streams = streams.maxParallelStreams(); for (auto & lock : table_locks) @@ -659,6 +712,9 @@ Pipe QueryPipeline::getPipe() && if (totals_having_port) pipe.setTotalsPort(totals_having_port); + if (extremes_port) + pipe.setExtremesPort(extremes_port); + return pipe; } diff --git a/src/Processors/QueryPipeline.h b/src/Processors/QueryPipeline.h index 9ce12e75b91..e8ba80cf65b 100644 --- a/src/Processors/QueryPipeline.h +++ b/src/Processors/QueryPipeline.h @@ -99,7 +99,7 @@ public: void addSimpleTransform(const ProcessorGetterWithStreamKind & getter); void addPipe(Processors pipe); void addTotalsHavingTransform(ProcessorPtr transform); - void addExtremesTransform(ProcessorPtr transform); + void addExtremesTransform(); void addCreatingSetsTransform(ProcessorPtr transform); void setOutput(ProcessorPtr output); diff --git a/src/Processors/Sources/SourceFromInputStream.cpp b/src/Processors/Sources/SourceFromInputStream.cpp index 6f2a7eeb28a..e7ca28f72b9 100644 --- a/src/Processors/Sources/SourceFromInputStream.cpp +++ b/src/Processors/Sources/SourceFromInputStream.cpp @@ -28,11 +28,20 @@ void SourceFromInputStream::init() void SourceFromInputStream::addTotalsPort() { - if (has_totals_port) + if (totals_port) throw Exception("Totals port was already added for SourceFromInputStream.", ErrorCodes::LOGICAL_ERROR); outputs.emplace_back(outputs.front().getHeader(), this); - has_totals_port = true; + totals_port = &outputs.back(); +} + +void SourceFromInputStream::addExtremesPort() +{ + if (extremes_port) + throw Exception("Extremes port was already added for SourceFromInputStream.", ErrorCodes::LOGICAL_ERROR); + + outputs.emplace_back(outputs.front().getHeader(), this); + extremes_port = &outputs.back(); } IProcessor::Status SourceFromInputStream::prepare() @@ -47,23 +56,32 @@ IProcessor::Status SourceFromInputStream::prepare() if (!is_stream_finished && !isCancelled()) return Status::Ready; - if (has_totals_port) + if (totals_port && !totals_port->isFinished()) { - auto & totals_out = outputs.back(); - - if (totals_out.isFinished()) - return Status::Finished; - if (has_totals) { - if (!totals_out.canPush()) + if (!totals_port->canPush()) return Status::PortFull; - totals_out.push(std::move(totals)); + totals_port->push(std::move(totals)); has_totals = false; } - totals_out.finish(); + totals_port->finish(); + } + + if (extremes_port && !extremes_port->isFinished()) + { + if (has_extremes) + { + if (!extremes_port->canPush()) + return Status::PortFull; + + extremes_port->push(std::move(extremes)); + has_extremes = false; + } + + extremes_port->finish(); } } @@ -138,13 +156,22 @@ Chunk SourceFromInputStream::generate() if (auto totals_block = stream->getTotals()) { - if (totals_block.rows() == 1) /// Sometimes we can get empty totals. Skip it. + if (totals_block.rows() > 0) /// Sometimes we can get empty totals. Skip it. { - totals.setColumns(totals_block.getColumns(), 1); + totals.setColumns(totals_block.getColumns(), totals_block.rows()); has_totals = true; } } + if (auto extremes_block = stream->getExtremes()) + { + if (extremes_block.rows() > 0) /// Sometimes we can get empty extremes. Skip it. + { + extremes.setColumns(extremes_block.getColumns(), extremes_block.rows()); + has_extremes = true; + } + } + is_stream_finished = true; return {}; } diff --git a/src/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h index 0fc92164059..b547e6a6d1f 100644 --- a/src/Processors/Sources/SourceFromInputStream.h +++ b/src/Processors/Sources/SourceFromInputStream.h @@ -23,6 +23,10 @@ public: BlockInputStreamPtr & getStream() { return stream; } void addTotalsPort(); + void addExtremesPort(); + + OutputPort * getTotalsPort() const { return totals_port; } + OutputPort * getExtremesPort() const { return extremes_port; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) { rows_before_limit.swap(counter); } @@ -44,9 +48,13 @@ private: RowsBeforeLimitCounterPtr rows_before_limit; Chunk totals; - bool has_totals_port = false; + OutputPort * totals_port = nullptr; bool has_totals = false; + Chunk extremes; + OutputPort * extremes_port = nullptr; + bool has_extremes = false; + bool is_generating_finished = false; bool is_stream_finished = false; bool is_stream_started = false; From b9f73a9f772935f27c5de2134daeb56677eb6913 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 8 Apr 2020 21:48:46 +0300 Subject: [PATCH 215/484] Added test. --- .../0_stateless/01232_extremes.reference | 110 ++++++++++++++++++ tests/queries/0_stateless/01232_extremes.sql | 51 ++++++++ 2 files changed, 161 insertions(+) create mode 100644 tests/queries/0_stateless/01232_extremes.reference create mode 100644 tests/queries/0_stateless/01232_extremes.sql diff --git a/tests/queries/0_stateless/01232_extremes.reference b/tests/queries/0_stateless/01232_extremes.reference new file mode 100644 index 00000000000..d5b66dcbd4b --- /dev/null +++ b/tests/queries/0_stateless/01232_extremes.reference @@ -0,0 +1,110 @@ +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 +0 +1 + +0 +1 +- + +- +- +0 +1 + +0 +1 +------ + +------ +------ +0 +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +0 +1 +1 +2 + +0 +2 +------ + +------ +------ +0 +0 +1 +1 +2 + +0 +2 +- + +- +- +0 +0 +1 +1 +2 + +0 +2 diff --git a/tests/queries/0_stateless/01232_extremes.sql b/tests/queries/0_stateless/01232_extremes.sql new file mode 100644 index 00000000000..80bf628d669 --- /dev/null +++ b/tests/queries/0_stateless/01232_extremes.sql @@ -0,0 +1,51 @@ +set send_logs_level = 'error'; +set extremes = 1; +-- set experimental_use_processors=0; + +select * from remote('127.0.0.1', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,1}', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.{2,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.2', numbers(2)); +select '------'; + +select * from (select * from numbers(2) union all select * from numbers(3) union all select * from numbers(1)) order by number; +select '-'; +select * from (select * from numbers(1) union all select * from numbers(2) union all select * from numbers(3)) order by number; +select '-'; +select * from (select * from numbers(3) union all select * from numbers(1) union all select * from numbers(2)) order by number; +select '------'; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; + +create table shard_0.num_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num_01232 select number from numbers(2); +insert into shard_1.num_01232 select number from numbers(3); +create table distr (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num_01232); + +create table shard_0.num2_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num2_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num2_01232 select number from numbers(3); +insert into shard_1.num2_01232 select number from numbers(2); +create table distr2 (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num2_01232); + +select * from distr order by number; +select '-'; +select * from distr2 order by number; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; + From c389fee7e785340c9861bcfd3f943dc85dfa84a6 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 8 Apr 2020 22:48:01 +0300 Subject: [PATCH 216/484] Update StorageReplicatedMergeTree.cpp --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8ce65aca3e0..1af86f7d5f1 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -246,7 +246,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( createTableIfNotExists(); - /// We have to check granularity on other replicas. It it's fixed we + /// We have to check granularity on other replicas. If it's fixed we /// must create our new replica with fixed granularity and store this /// information in /replica/metadata. other_replicas_fixed_granularity = checkFixedGranualrityInZookeeper(); From 5e336ba0630396baff2daae7635c78cd0be37180 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Apr 2020 23:33:40 +0300 Subject: [PATCH 217/484] Added another test #10077 --- .../0_stateless/01114_mysql_database_engine_segfault.reference | 0 .../queries/0_stateless/01114_mysql_database_engine_segfault.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/01114_mysql_database_engine_segfault.reference create mode 100644 tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql diff --git a/tests/queries/0_stateless/01114_mysql_database_engine_segfault.reference b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql new file mode 100644 index 00000000000..371df4f8dee --- /dev/null +++ b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql @@ -0,0 +1 @@ +CREATE DATABASE conv_main ENGINE = MySQL('127.0.0.1:3456', conv_main, 'metrika', 'password'); -- { serverError 1000 } From b6f4287d2f7aad81771ee23416d2741404a494c5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 8 Apr 2020 23:42:12 +0300 Subject: [PATCH 218/484] Remove vagrant build --- docker/packager/README.md | 8 ++-- docker/packager/freebsd/Vagrantfile | 4 -- docker/packager/packager | 57 +---------------------------- 3 files changed, 6 insertions(+), 63 deletions(-) delete mode 100644 docker/packager/freebsd/Vagrantfile diff --git a/docker/packager/README.md b/docker/packager/README.md index e02a45fdaea..5d9751a0fbd 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen Usage: -Build deb package with `gcc-8` in `debug` mode: +Build deb package with `gcc-9` in `debug` mode: ``` $ mkdir deb/test_output -$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-8 --build-type=debug +$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=gcc-9 --build-type=debug $ ls -l deb/test_output -rw-r--r-- 1 root root 3730 clickhouse-client_18.14.2+debug_all.deb -rw-r--r-- 1 root root 84221888 clickhouse-common-static_18.14.2+debug_amd64.deb @@ -18,11 +18,11 @@ $ ls -l deb/test_output ``` -Build ClickHouse binary with `clang-6.0` and `address` sanitizer in `relwithdebuginfo` +Build ClickHouse binary with `clang-9.0` and `address` sanitizer in `relwithdebuginfo` mode: ``` $ mkdir $HOME/some_clickhouse -$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-6.0 --sanitizer=address +$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-9.0 --sanitizer=address $ ls -l $HOME/some_clickhouse -rwxr-xr-x 1 root root 787061952 clickhouse lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse diff --git a/docker/packager/freebsd/Vagrantfile b/docker/packager/freebsd/Vagrantfile deleted file mode 100644 index 765f46d5604..00000000000 --- a/docker/packager/freebsd/Vagrantfile +++ /dev/null @@ -1,4 +0,0 @@ -Vagrant.configure("2") do |config| - config.vm.box = "robot-clickhouse/clickhouse-freebsd" - config.vm.synced_folder ".", "/vagrant", disabled: true -end diff --git a/docker/packager/packager b/docker/packager/packager index 10b4c7e901c..71380b92fac 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -11,48 +11,8 @@ SCRIPT_PATH = os.path.realpath(__file__) IMAGE_MAP = { "deb": "yandex/clickhouse-deb-builder", "binary": "yandex/clickhouse-binary-builder", - "freebsd": os.path.join(os.path.dirname(SCRIPT_PATH), "freebsd"), } -class Vagrant(object): - def __init__(self, path_to_vagrant_file): - self.prefix = "VAGRANT_CWD=" + path_to_vagrant_file - - def __enter__(self): - subprocess.check_call("{} vagrant up".format(self.prefix), shell=True) - self.ssh_path = "/tmp/vagrant-ssh" - subprocess.check_call("{} vagrant ssh-config > {}".format(self.prefix, self.ssh_path), shell=True) - return self - - def copy_to_image(self, local_path, remote_path): - cmd = "scp -F {ssh} -r {lpath} default:{rpath}".format(ssh=self.ssh_path, lpath=local_path, rpath=remote_path) - logging.info("Copying to image %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def copy_from_image(self, remote_path, local_path): - cmd = "scp -F {ssh} -r default:{rpath} {lpath}".format(ssh=self.ssh_path, rpath=remote_path, lpath=local_path) - logging.info("Copying from image %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def execute_cmd(self, cmd): - cmd = '{} vagrant ssh -c "{}"'.format(self.prefix, cmd) - logging.info("Executin cmd %s", cmd) - subprocess.check_call( - cmd, - shell=True - ) - - def __exit__(self, exc_type, exc_val, exc_tb): - logging.info("Destroying image") - subprocess.check_call("{} vagrant destroy --force".format(self.prefix), shell=True) - - def check_image_exists_locally(image_name): try: output = subprocess.check_output("docker images -q {} 2> /dev/null".format(image_name), shell=True) @@ -94,15 +54,6 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache subprocess.check_call(cmd, shell=True) -def run_vagrant_box_with_env(image_path, output_dir, ch_root): - with Vagrant(image_path) as vagrant: - logging.info("Copying folder to vagrant machine") - vagrant.copy_to_image(ch_root, "~/ClickHouse") - logging.info("Running build") - vagrant.execute_cmd("cd ~/ClickHouse && cmake . && ninja") - logging.info("Copying binary back") - vagrant.copy_from_image("~/ClickHouse/programs/clickhouse", output_dir) - def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, unbundled, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage): CLANG_PREFIX = "clang" DARWIN_SUFFIX = "-darwin" @@ -210,7 +161,7 @@ if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') parser = argparse.ArgumentParser(description="ClickHouse building script using prebuilt Docker image") # 'performance' creates a combined .tgz with server and configs to be used for performance test. - parser.add_argument("--package-type", choices=['deb', 'binary', 'performance', 'freebsd'], required=True) + parser.add_argument("--package-type", choices=['deb', 'binary', 'performance'], required=True) parser.add_argument("--clickhouse-repo-path", default="../../") parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") @@ -252,9 +203,5 @@ if __name__ == "__main__": args.build_type, args.compiler, args.sanitizer, args.package_type, image_type, args.cache, args.distcc_hosts, args.unbundled, args.split_binary, args.clang_tidy, args.version, args.author, args.official, args.alien_pkgs, args.with_coverage) - if image_type != "freebsd": - run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir) - else: - logging.info("Running freebsd build, arguments will be ignored") - run_vagrant_box_with_env(image_name, args.output_dir, ch_root) + run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir) logging.info("Output placed into {}".format(args.output_dir)) From 6d85207bfbb4ad8b1a6bdfffb3633dcc46ced64e Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 8 Apr 2020 23:07:29 +0300 Subject: [PATCH 219/484] Convert blocks if structure does not match on INSERT into Distributed() Follow-up for: #10105 --- .../DistributedBlockOutputStream.cpp | 35 +++++++++++++------ ...into_distributed_different_types.reference | 1 + ...nsert_into_distributed_different_types.sql | 2 +- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index af17a026927..80b7d4c019e 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -59,6 +61,26 @@ namespace ErrorCodes extern const int CANNOT_LINK; } +static void writeBlockConvert(const Context & context, const BlockOutputStreamPtr & out, const Block & block, const size_t repeats) +{ + if (!blocksHaveEqualStructure(out->getHeader(), block)) + { + ConvertingBlockInputStream convert(context, + std::make_shared(block), + out->getHeader(), + ConvertingBlockInputStream::MatchColumnsMode::Name); + auto adopted_block = convert.read(); + + for (size_t i = 0; i < repeats; ++i) + out->write(adopted_block); + } + else + { + for (size_t i = 0; i < repeats; ++i) + out->write(block); + } +} + DistributedBlockOutputStream::DistributedBlockOutputStream( const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, @@ -306,14 +328,12 @@ ThreadPool::Job DistributedBlockOutputStream::runWritingJob(DistributedBlockOutp InterpreterInsertQuery interp(query_ast, *job.local_context); auto block_io = interp.execute(); - assertBlocksHaveEqualStructure(block_io.out->getHeader(), shard_block, "flushing shard block for " + storage.getStorageID().getNameForLogs()); + job.stream = block_io.out; job.stream->writePrefix(); } - size_t num_repetitions = shard_info.getLocalNodeCount(); - for (size_t i = 0; i < num_repetitions; ++i) - job.stream->write(shard_block); + writeBlockConvert(context, job.stream, shard_block, shard_info.getLocalNodeCount()); } job.blocks_written += 1; @@ -547,13 +567,8 @@ void DistributedBlockOutputStream::writeToLocal(const Block & block, const size_ auto block_io = interp.execute(); - assertBlocksHaveEqualStructure(block_io.out->getHeader(), block, "flushing " + storage.getStorageID().getNameForLogs()); - block_io.out->writePrefix(); - - for (size_t i = 0; i < repeats; ++i) - block_io.out->write(block); - + writeBlockConvert(context, block_io.out, block, repeats); block_io.out->writeSuffix(); } diff --git a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference index e69de29bb2d..573541ac970 100644 --- a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference +++ b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql index 6b23c72981a..33f16eb241c 100644 --- a/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql +++ b/tests/queries/0_stateless/00967_insert_into_distributed_different_types.sql @@ -4,6 +4,6 @@ DROP TABLE IF EXISTS underlying_00967; CREATE TABLE dist_00967 (key UInt64) Engine=Distributed('test_shard_localhost', currentDatabase(), underlying_00967); -- fails for TinyLog()/MergeTree()/... but not for Memory() CREATE TABLE underlying_00967 (key Nullable(UInt64)) Engine=TinyLog(); -INSERT INTO dist_00967 SELECT toUInt64(number) FROM system.numbers LIMIT 1; -- { serverError 171; } +INSERT INTO dist_00967 SELECT toUInt64(number) FROM system.numbers LIMIT 1; SELECT * FROM dist_00967; From f48fdda6787d44ec259b3e897729dcaf7788ca7f Mon Sep 17 00:00:00 2001 From: BohuTANG <172204+BohuTANG@users.noreply.github.com> Date: Thu, 9 Apr 2020 05:52:19 +0800 Subject: [PATCH 220/484] Enhanced compatibility with native mysql-connector-java(JDBC) (#10021) * Skip the `/* comments */ SELECT @@variables ...` from mysql-connector-java setup for MySQL Handler #9336 mysql-connector setup query: /* mysql-connector-java-5.1.38 ( Revision: ${revinfo.commit} ) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout... ClickHouse side Error: {} executeQuery: Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 74: @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_conn. Expected one of: CAST, NULL... Client side Exception: java.sql.SQLException: Syntax error: failed at position 74: @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_conn. Expected one of: CAST... * add repalce 'SHOW VARIABLES' for mysql-connector-java-5.1.34 #9336 * Add java client(JDBC) integration test to test_mysql_protocol * shift out java tests from dbms * Update MySQLHandler.cpp * Update MySQLHandler.cpp * test_mysql_protocol: add Test.java exit code 1 when expection Co-authored-by: alexey-milovidov --- programs/server/MySQLHandler.cpp | 33 ++++++-- .../clients/java/0.reference | 15 ++++ .../clients/java/Dockerfile | 18 +++++ .../clients/java/Test.java | 76 +++++++++++++++++++ .../clients/java/docker_compose.yml | 8 ++ tests/integration/test_mysql_protocol/test.py | 22 ++++++ 6 files changed, 165 insertions(+), 7 deletions(-) create mode 100644 tests/integration/test_mysql_protocol/clients/java/0.reference create mode 100644 tests/integration/test_mysql_protocol/clients/java/Dockerfile create mode 100644 tests/integration/test_mysql_protocol/clients/java/Test.java create mode 100644 tests/integration/test_mysql_protocol/clients/java/docker_compose.yml diff --git a/programs/server/MySQLHandler.cpp b/programs/server/MySQLHandler.cpp index 3e1432dbfce..b72aa8104d3 100644 --- a/programs/server/MySQLHandler.cpp +++ b/programs/server/MySQLHandler.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #if USE_POCO_NETSSL #include @@ -268,7 +269,8 @@ void MySQLHandler::comPing() packet_sender->sendPacket(OK_Packet(0x0, client_capability_flags, 0, 0, 0), true); } -static bool isFederatedServerSetupCommand(const String & query); +static bool isFederatedServerSetupSetCommand(const String & query); +static bool isFederatedServerSetupSelectVarCommand(const String & query); void MySQLHandler::comQuery(ReadBuffer & payload) { @@ -276,7 +278,7 @@ void MySQLHandler::comQuery(ReadBuffer & payload) // This is a workaround in order to support adding ClickHouse to MySQL using federated server. // As Clickhouse doesn't support these statements, we just send OK packet in response. - if (isFederatedServerSetupCommand(query)) + if (isFederatedServerSetupSetCommand(query)) { packet_sender->sendPacket(OK_Packet(0x00, client_capability_flags, 0, 0, 0), true); } @@ -288,10 +290,11 @@ void MySQLHandler::comQuery(ReadBuffer & payload) // Translate query from MySQL to ClickHouse. // This is a temporary workaround until ClickHouse supports the syntax "@@var_name". - if (query == "select @@version_comment limit 1") // MariaDB client starts session with that query + if (isFederatedServerSetupSelectVarCommand(query)) { should_replace = true; } + // This is a workaround in order to support adding ClickHouse to MySQL using federated server. if (0 == strncasecmp("SHOW TABLE STATUS LIKE", query.c_str(), 22)) { @@ -358,11 +361,27 @@ void MySQLHandlerSSL::finishHandshakeSSL(size_t packet_size, char * buf, size_t #endif -static bool isFederatedServerSetupCommand(const String & query) +static bool isFederatedServerSetupSetCommand(const String & query) { - return 0 == strncasecmp("SET NAMES", query.c_str(), 9) || 0 == strncasecmp("SET character_set_results", query.c_str(), 25) - || 0 == strncasecmp("SET FOREIGN_KEY_CHECKS", query.c_str(), 22) || 0 == strncasecmp("SET AUTOCOMMIT", query.c_str(), 14) - || 0 == strncasecmp("SET SESSION TRANSACTION ISOLATION LEVEL", query.c_str(), 39); + static const std::regex expr{ + "(^(SET NAMES(.*)))" + "|(^(SET character_set_results(.*)))" + "|(^(SET FOREIGN_KEY_CHECKS(.*)))" + "|(^(SET AUTOCOMMIT(.*)))" + "|(^(SET sql_mode(.*)))" + "|(^(SET SESSION TRANSACTION ISOLATION LEVEL(.*)))" + , std::regex::icase}; + return 1 == std::regex_match(query, expr); +} + +static bool isFederatedServerSetupSelectVarCommand(const String & query) +{ + static const std::regex expr{ + "|(^(SELECT @@(.*)))" + "|(^((/\\*(.*)\\*/)([ \t]*)(SELECT([ \t]*)@@(.*))))" + "|(^((/\\*(.*)\\*/)([ \t]*)(SHOW VARIABLES(.*))))" + , std::regex::icase}; + return 1 == std::regex_match(query, expr); } const String MySQLHandler::show_table_status_replacement_query("SELECT" diff --git a/tests/integration/test_mysql_protocol/clients/java/0.reference b/tests/integration/test_mysql_protocol/clients/java/0.reference new file mode 100644 index 00000000000..bcf9e3dde94 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/0.reference @@ -0,0 +1,15 @@ +33jdbc +44ck +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 diff --git a/tests/integration/test_mysql_protocol/clients/java/Dockerfile b/tests/integration/test_mysql_protocol/clients/java/Dockerfile new file mode 100644 index 00000000000..96713a68e66 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/Dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:18.04 + +RUN apt-get update && \ + apt-get install -y software-properties-common build-essential openjdk-8-jdk libmysql-java curl + +RUN rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ +RUN apt-get clean + +ARG ver=5.1.46 +RUN curl -L -o /mysql-connector-java-${ver}.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/${ver}/mysql-connector-java-${ver}.jar +ENV CLASSPATH=$CLASSPATH:/mysql-connector-java-${ver}.jar + +WORKDIR /jdbc +COPY Test.java Test.java +RUN javac Test.java diff --git a/tests/integration/test_mysql_protocol/clients/java/Test.java b/tests/integration/test_mysql_protocol/clients/java/Test.java new file mode 100644 index 00000000000..50ce824f67c --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/Test.java @@ -0,0 +1,76 @@ +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +class JavaConnectorTest { + private static final String CREATE_TABLE_SQL = "CREATE TABLE IF NOT EXISTS default.test1 (age Int32, name String) Engine = Memory"; + private static final String INSERT_SQL = "INSERT INTO default.test1 VALUES(33, 'jdbc'),(44, 'ck')"; + private static final String SELECT_SQL = "SELECT * FROM default.test1"; + private static final String SELECT_NUMBER_SQL = "SELECT * FROM system.numbers LIMIT 13"; + private static final String DROP_TABLE_SQL = "DROP TABLE default.test1"; + + public static void main(String[] args) { + int i = 0; + String host = "127.0.0.1"; + String port = "9004"; + String user = "default"; + String password = ""; + String database = "default"; + while (i < args.length) { + switch (args[i]) { + case "--host": + host = args[++i]; + break; + case "--port": + port = args[++i]; + break; + case "--user": + user = args[++i]; + break; + case "--password": + password = args[++i]; + break; + case "--database": + database = args[++i]; + break; + default: + i++; + break; + } + } + + String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?maxAllowedPacket=67108864&useSSL=false", host, port, database); + + Connection conn = null; + Statement stmt = null; + try { + conn = DriverManager.getConnection(jdbcUrl, user, password); + stmt = conn.createStatement(); + stmt.executeUpdate(CREATE_TABLE_SQL); + stmt.executeUpdate(INSERT_SQL); + + ResultSet rs = stmt.executeQuery(SELECT_SQL); + while (rs.next()) { + System.out.print(rs.getString("age")); + System.out.print(rs.getString("name")); + System.out.println(); + } + + stmt.executeUpdate(DROP_TABLE_SQL); + + rs = stmt.executeQuery(SELECT_NUMBER_SQL); + while (rs.next()) { + System.out.print(rs.getString(1)); + System.out.println(); + } + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(1); + } + } +} diff --git a/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml b/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml new file mode 100644 index 00000000000..dbe404232a0 --- /dev/null +++ b/tests/integration/test_mysql_protocol/clients/java/docker_compose.yml @@ -0,0 +1,8 @@ +version: '2.2' +services: + java1: + build: + context: ./ + network: host + # to keep container running + command: sleep infinity diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 7987076c29a..b5ee3cecec9 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -79,6 +79,13 @@ def nodejs_container(): yield docker.from_env().containers.get(cluster.project_name + '_mysqljs1_1') +@pytest.fixture(scope='module') +def java_container(): + docker_compose = os.path.join(SCRIPT_DIR, 'clients', 'java', 'docker_compose.yml') + subprocess.check_call(['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) + yield docker.from_env().containers.get(cluster.project_name + '_java1_1') + + def test_mysql_client(mysql_client, server_address): # type: (Container, str) -> None code, (stdout, stderr) = mysql_client.exec_run(''' @@ -266,6 +273,21 @@ def test_mysqljs_client(server_address, nodejs_container): assert code == 1 +def test_java_client(server_address, java_container): + # type: (str, Container) -> None + with open(os.path.join(SCRIPT_DIR, 'clients', 'java', '0.reference')) as fp: + reference = fp.read() + + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' + 'abc'.format(host=server_address, port=server_port), demux=True) + assert code == 1 + + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + def test_types(server_address): client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) From b666f60af87e2bbc86990cacbcbc23c760cce3f7 Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev Date: Mon, 6 Apr 2020 22:27:57 +0300 Subject: [PATCH 221/484] Optional secured communication between ClickHouse and Zookeeper --- programs/server/config.xml | 2 +- src/Common/ZooKeeper/CMakeLists.txt | 4 ++ src/Common/ZooKeeper/ZooKeeper.cpp | 29 ++++++---- src/Common/ZooKeeper/ZooKeeper.h | 4 ++ src/Common/ZooKeeper/ZooKeeperImpl.cpp | 53 ++++++++++++++----- src/Common/ZooKeeper/ZooKeeperImpl.h | 13 +++-- .../tests/zkutil_test_commands_new_lib.cpp | 25 ++++++--- src/Common/ZooKeeper/tests/zookeeper_impl.cpp | 2 +- .../configs/config.xml | 2 +- tests/server-test.xml | 2 +- 10 files changed, 97 insertions(+), 39 deletions(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index f55ab02d903..fb2f9be6e24 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -37,7 +37,7 @@ true - + true true sslv2,sslv3 diff --git a/src/Common/ZooKeeper/CMakeLists.txt b/src/Common/ZooKeeper/CMakeLists.txt index aa6efcd3ca1..4dbf999419e 100644 --- a/src/Common/ZooKeeper/CMakeLists.txt +++ b/src/Common/ZooKeeper/CMakeLists.txt @@ -7,6 +7,10 @@ add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} $ target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils PUBLIC ${Poco_Util_LIBRARY}) target_include_directories(clickhouse_common_zookeeper PUBLIC ${DBMS_INCLUDE_DIR}) +if (USE_POCO_NETSSL) + target_link_libraries (clickhouse_common_zookeeper PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY}) +endif() + if (ENABLE_TESTS) add_subdirectory (tests) endif () diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 99c3f115021..f2442f3f5c5 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -59,30 +59,36 @@ void ZooKeeper::init(const std::string & implementation_, const std::string & ho if (implementation == "zookeeper") { if (hosts.empty()) - throw KeeperException("No addresses passed to ZooKeeper constructor.", Coordination::ZBADARGUMENTS); + throw KeeperException("No hosts passed to ZooKeeper constructor.", Coordination::ZBADARGUMENTS); - std::vector addresses_strings; - splitInto<','>(addresses_strings, hosts); - Coordination::ZooKeeper::Addresses addresses; - addresses.reserve(addresses_strings.size()); + std::vector hosts_strings; + splitInto<','>(hosts_strings, hosts); + Coordination::ZooKeeper::Nodes nodes; + nodes.reserve(hosts_strings.size()); - for (const auto & address_string : addresses_strings) + for (auto & host_string : hosts_strings) { try { - addresses.emplace_back(address_string); + bool secure = bool(startsWith(host_string, "secure://")); + + if (secure) { + host_string.erase(0, strlen("secure://")); + } + + nodes.emplace_back(Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{host_string}, secure}); } catch (const Poco::Net::DNSException & e) { - LOG_ERROR(log, "Cannot use ZooKeeper address " << address_string << ", reason: " << e.displayText()); + LOG_ERROR(log, "Cannot use ZooKeeper host " << host_string << ", reason: " << e.displayText()); } } - if (addresses.empty()) - throw KeeperException("Cannot use any of provided ZooKeeper addresses", Coordination::ZBADARGUMENTS); + if (nodes.empty()) + throw KeeperException("Cannot use any of provided ZooKeeper nodes", Coordination::ZBADARGUMENTS); impl = std::make_unique( - addresses, + nodes, chroot, identity_.empty() ? "" : "digest", identity_, @@ -130,6 +136,7 @@ struct ZooKeeperArgs if (startsWith(key, "node")) { hosts_strings.push_back( + (config.getBool(config_name + "." + key + ".secure", false) ? "secure://" : "") + config.getString(config_name + "." + key + ".host") + ":" + config.getString(config_name + "." + key + ".port", "2181") ); diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 2d4d449b1a6..db166314a07 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -63,10 +63,14 @@ public: example1 2181 + + 1 example2 2181 + + 1 30000 10000 diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index b8700a93e35..2fba10b20e9 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -11,6 +11,11 @@ #include #include +#include +#if USE_POCO_NETSSL +#include +#endif + #include @@ -44,6 +49,13 @@ namespace CurrentMetrics extern const Metric ZooKeeperWatch; } +namespace DB +{ + namespace ErrorCodes + { + extern const int SUPPORT_IS_DISABLED; + } +} /** ZooKeeper wire protocol. @@ -817,7 +829,7 @@ ZooKeeper::~ZooKeeper() ZooKeeper::ZooKeeper( - const Addresses & addresses, + const Nodes & nodes, const String & root_path_, const String & auth_scheme, const String & auth_data, @@ -851,7 +863,7 @@ ZooKeeper::ZooKeeper( default_acls.emplace_back(std::move(acl)); } - connect(addresses, connection_timeout); + connect(nodes, connection_timeout); if (!auth_scheme.empty()) sendAuth(auth_scheme, auth_data); @@ -864,11 +876,11 @@ ZooKeeper::ZooKeeper( void ZooKeeper::connect( - const Addresses & addresses, + const Nodes & nodes, Poco::Timespan connection_timeout) { - if (addresses.empty()) - throw Exception("No addresses passed to ZooKeeper constructor", ZBADARGUMENTS); + if (nodes.empty()) + throw Exception("No nodes passed to ZooKeeper constructor", ZBADARGUMENTS); static constexpr size_t num_tries = 3; bool connected = false; @@ -876,12 +888,25 @@ void ZooKeeper::connect( WriteBufferFromOwnString fail_reasons; for (size_t try_no = 0; try_no < num_tries; ++try_no) { - for (const auto & address : addresses) + for (const auto & node : nodes) { try { - socket = Poco::Net::StreamSocket(); /// Reset the state of previous attempt. - socket.connect(address, connection_timeout); + /// Reset the state of previous attempt. + if (node.secure) + { +#if USE_POCO_NETSSL + socket = Poco::Net::SecureStreamSocket(); +#else + throw Exception{"Communication with ZooKeeper over SSL is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + } + else + { + socket = Poco::Net::StreamSocket(); + } + + socket.connect(node.address, connection_timeout); socket.setReceiveTimeout(operation_timeout); socket.setSendTimeout(operation_timeout); @@ -915,7 +940,7 @@ void ZooKeeper::connect( } catch (...) { - fail_reasons << "\n" << getCurrentExceptionMessage(false) << ", " << address.toString(); + fail_reasons << "\n" << getCurrentExceptionMessage(false) << ", " << node.address.toString(); } } @@ -926,15 +951,19 @@ void ZooKeeper::connect( if (!connected) { WriteBufferFromOwnString message; - message << "All connection tries failed while connecting to ZooKeeper. Addresses: "; + message << "All connection tries failed while connecting to ZooKeeper. nodes: "; bool first = true; - for (const auto & address : addresses) + for (const auto & node : nodes) { if (first) first = false; else message << ", "; - message << address.toString(); + + if (node.secure) + message << "secure://"; + + message << node.address.toString(); } message << fail_reasons.str() << "\n"; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h index 88e949dbd45..069df723d43 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -93,17 +93,22 @@ struct ZooKeeperRequest; class ZooKeeper : public IKeeper { public: - using Addresses = std::vector; + struct Node { + Poco::Net::SocketAddress address; + bool secure; + }; + + using Nodes = std::vector; using XID = int32_t; using OpNum = int32_t; - /** Connection to addresses is performed in order. If you want, shuffle them manually. + /** Connection to nodes is performed in order. If you want, shuffle them manually. * Operation timeout couldn't be greater than session timeout. * Operation timeout applies independently for network read, network write, waiting for events and synchronization. */ ZooKeeper( - const Addresses & addresses, + const Nodes & nodes, const String & root_path, const String & auth_scheme, const String & auth_data, @@ -213,7 +218,7 @@ private: ThreadFromGlobalPool receive_thread; void connect( - const Addresses & addresses, + const Nodes & node, Poco::Timespan connection_timeout); void sendHandshake(); diff --git a/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp index aa348163adf..0bca8e0f561 100644 --- a/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp +++ b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -23,15 +24,23 @@ try Poco::Logger::root().setChannel(channel); Poco::Logger::root().setLevel("trace"); - std::string addresses_arg = argv[1]; - std::vector addresses_strings; - splitInto<','>(addresses_strings, addresses_arg); - ZooKeeper::Addresses addresses; - addresses.reserve(addresses_strings.size()); - for (const auto & address_string : addresses_strings) - addresses.emplace_back(address_string); + std::string hosts_arg = argv[1]; + std::vector hosts_strings; + splitInto<','>(hosts_strings, hosts_arg); + ZooKeeper::Nodes nodes; + nodes.reserve(hosts_strings.size()); + for (auto & host_string : hosts_strings) { + bool secure = bool(startsWith(host_string, "secure://")); - ZooKeeper zk(addresses, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000}); + if (secure) { + host_string.erase(0, strlen("secure://")); + } + + nodes.emplace_back(ZooKeeper::Node{Poco::Net::SocketAddress{host_string},secure}); + } + + + ZooKeeper zk(nodes, {}, {}, {}, {5, 0}, {0, 50000}, {0, 50000}); Poco::Event event(true); diff --git a/src/Common/ZooKeeper/tests/zookeeper_impl.cpp b/src/Common/ZooKeeper/tests/zookeeper_impl.cpp index da609a7bc72..74ba63514f2 100644 --- a/src/Common/ZooKeeper/tests/zookeeper_impl.cpp +++ b/src/Common/ZooKeeper/tests/zookeeper_impl.cpp @@ -5,7 +5,7 @@ int main() try { - Coordination::ZooKeeper zookeeper({Poco::Net::SocketAddress{"localhost:2181"}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000}); + Coordination::ZooKeeper zookeeper({Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{"localhost:2181"}, false}}, "", "", "", {30, 0}, {0, 50000}, {0, 50000}); zookeeper.create("/test", "hello", false, false, {}, [](const Coordination::CreateResponse & response) { diff --git a/tests/integration/test_config_corresponding_root/configs/config.xml b/tests/integration/test_config_corresponding_root/configs/config.xml index 154ebf6c35e..4e130afa84d 100644 --- a/tests/integration/test_config_corresponding_root/configs/config.xml +++ b/tests/integration/test_config_corresponding_root/configs/config.xml @@ -37,7 +37,7 @@ true - + true true sslv2,sslv3 diff --git a/tests/server-test.xml b/tests/server-test.xml index c2356ec1ba0..7f792479065 100644 --- a/tests/server-test.xml +++ b/tests/server-test.xml @@ -31,7 +31,7 @@ true - + true true sslv2,sslv3 From 540e9f7d0df12d772ca203cb36ed26bddf194f9a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Apr 2020 01:58:15 +0300 Subject: [PATCH 222/484] Fix logged number of inserted rows into ReplicatedMergeTree --- .../MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index fda0a8eb5a8..72255081e6b 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -147,11 +147,11 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block) /// That is, do not insert the same data to the same partition twice. block_id = part->info.partition_id + "_" + toString(hash_value.words[0]) + "_" + toString(hash_value.words[1]); - LOG_DEBUG(log, "Wrote block with ID '" << block_id << "', " << block.rows() << " rows"); + LOG_DEBUG(log, "Wrote block with ID '" << block_id << "', " << current_block.block.rows() << " rows"); } else { - LOG_DEBUG(log, "Wrote block with " << block.rows() << " rows"); + LOG_DEBUG(log, "Wrote block with " << current_block.block.rows() << " rows"); } try From e544edd72643a804fe9fe75e723f1eadc3a6bae9 Mon Sep 17 00:00:00 2001 From: BohuTANG Date: Thu, 9 Apr 2020 09:43:02 +0800 Subject: [PATCH 223/484] Fix random scramble using seperator character issue during MySQL handshakes --- src/Core/MySQLProtocol.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h index 1fae57517c1..5adcf836c18 100644 --- a/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -914,8 +914,16 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) + /** Generate a random string using ASCII characters but avoid seperator character, + * produce pseudo random numbers between with about 7 bit worth of entropty between 1-127. + * https://github.com/mysql/mysql-server/blob/8.0/mysys/crypt_genhash_impl.cc#L427 + */ + for (size_t i = 0; i < SCRAMBLE_LENGTH; i++){ generator >> scramble[i]; + scramble[i] &= 0x7f; + if (scramble[i] == '\0' || scramble[i] == '$') + scramble[i] = scramble[i] + 1; + } } String getName() override @@ -993,8 +1001,12 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) + for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) { generator >> scramble[i]; + scramble[i] &= 0x7f; + if (scramble[i] == '\0' || scramble[i] == '$') + scramble[i] = scramble[i] + 1; + } } String getName() override From 5314b277aff685629bfa8ce50a62578cc49c0771 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Apr 2020 05:50:37 +0300 Subject: [PATCH 224/484] Fixed test --- .../0_stateless/01114_mysql_database_engine_segfault.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql index 371df4f8dee..af88c5af53a 100644 --- a/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql +++ b/tests/queries/0_stateless/01114_mysql_database_engine_segfault.sql @@ -1 +1 @@ -CREATE DATABASE conv_main ENGINE = MySQL('127.0.0.1:3456', conv_main, 'metrika', 'password'); -- { serverError 1000 } +CREATE DATABASE conv_main ENGINE = MySQL('127.0.0.1:3456', conv_main, 'metrika', 'password'); -- { serverError 501 } From 17256e0f1e02111da6df9902d7c20be231cda8d9 Mon Sep 17 00:00:00 2001 From: BohuTANG Date: Thu, 9 Apr 2020 10:53:40 +0800 Subject: [PATCH 225/484] add java client integation tests --- tests/integration/test_mysql_protocol/test.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index b5ee3cecec9..f75a168d5db 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -278,15 +278,29 @@ def test_java_client(server_address, java_container): with open(os.path.join(SCRIPT_DIR, 'clients', 'java', '0.reference')) as fp: reference = fp.read() + # database not exists exception. code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' 'abc'.format(host=server_address, port=server_port), demux=True) assert code == 1 + # empty password passed. code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' 'default'.format(host=server_address, port=server_port), demux=True) assert code == 0 assert stdout == reference + # non-empty password passed. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + + # double-sha1 password passed. + code, (stdout, stderr) = java_container.exec_run('java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' + 'default'.format(host=server_address, port=server_port), demux=True) + assert code == 0 + assert stdout == reference + def test_types(server_address): client = pymysql.connections.Connection(host=server_address, user='default', password='123', database='default', port=server_port) From eca178fd8eda9c273e49d1bb994684b1bf557b5f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 9 Apr 2020 06:03:49 +0300 Subject: [PATCH 226/484] Added results from Jack Gao --- website/benchmark_hardware.html | 52 +++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/website/benchmark_hardware.html b/website/benchmark_hardware.html index ab75e7ca063..64eb576cc66 100644 --- a/website/benchmark_hardware.html +++ b/website/benchmark_hardware.html @@ -2427,6 +2427,57 @@ var results = [0.011, 0.007, 0.007] ] }, + + { + "system": "AMD EPYC 7702, 256 cores, 512 GiB, NVMe SSD, version 19.16", + "time": "2020-04-09 00:00:00", + "result": + [ +[0.103, 0.038, 0.043], +[0.072, 0.042, 0.044], +[0.118, 0.051, 0.057], +[0.222, 0.054, 0.051], +[0.339, 0.193, 0.215], +[0.376, 0.189, 0.175], +[0.114, 0.040, 0.052], +[0.085, 0.055, 0.049], +[0.354, 0.180, 0.168], +[0.372, 0.172, 0.161], +[0.276, 0.105, 0.100], +[0.259, 0.110, 0.115], +[0.399, 0.222, 0.207], +[0.586, 0.261, 0.262], +[0.394, 0.251, 0.228], +[0.350, 0.194, 0.189], +[0.705, 0.468, 0.462], +[0.653, 0.368, 0.381], +[1.285, 0.826, 0.922], +[0.223, 0.032, 0.036], +[1.690, 0.186, 0.178], +[1.916, 0.231, 0.189], +[3.551, 0.602, 0.595], +[3.198, 0.607, 0.478], +[0.530, 0.143, 0.138], +[0.311, 0.079, 0.090], +[0.554, 0.137, 0.134], +[1.775, 0.305, 0.293], +[1.480, 0.257, 0.276], +[0.864, 0.838, 0.795], +[0.529, 0.183, 0.177], +[1.051, 0.226, 0.230], +[1.719, 1.074, 1.075], +[2.134, 0.856, 0.873], +[2.123, 0.829, 0.846], +[0.380, 0.285, 0.280], +[0.193, 0.187, 0.183], +[0.080, 0.080, 0.080], +[0.077, 0.066, 0.068], +[0.432, 0.405, 0.444], +[0.050, 0.038, 0.037], +[0.032, 0.028, 0.025], +[0.010, 0.010, 0.008] + ] + }, ]; @@ -2862,6 +2913,7 @@ Results for Pinebook Pro are from Aleksey R. @kITerE.
    Results for AMD Ryzen are from Alexey Milovidov. Firefox was running in background.
    Results for Azure E32s are from Piotr Maśko.
    Results for MacBook Pro are from Denis Glazachev. MacOS Catalina Version 10.15.4 (19E266). For "drop caches", the "Free Up RAM" in CleanMyMac is used.
    +Results for AMD EPYC 7702 are from Peng Gao in sina.com.
    Xeon Gold 6230 server is using 4 x SAMSUNG datacenter class SSD in RAID-10.
    Results for Yandex Managed ClickHouse for "cold cache" are biased and should not be compared, because cache was not flushed for every next query.
    From ccf5cb2a668499ad0fd9c275a4e63aeb02cd6d1c Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 9 Apr 2020 06:24:09 +0300 Subject: [PATCH 227/484] Update MySQLProtocol.h --- src/Core/MySQLProtocol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h index 5adcf836c18..e73e1fddd3a 100644 --- a/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -914,7 +914,7 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - /** Generate a random string using ASCII characters but avoid seperator character, + /** Generate a random string using ASCII characters but avoid separator character, * produce pseudo random numbers between with about 7 bit worth of entropty between 1-127. * https://github.com/mysql/mysql-server/blob/8.0/mysys/crypt_genhash_impl.cc#L427 */ From cb6c860d898c7e7b1c99b8f98921d51ff5146dd9 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 9 Apr 2020 06:25:20 +0300 Subject: [PATCH 228/484] Update MySQLProtocol.h --- src/Core/MySQLProtocol.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Core/MySQLProtocol.h b/src/Core/MySQLProtocol.h index e73e1fddd3a..5255c6f263e 100644 --- a/src/Core/MySQLProtocol.h +++ b/src/Core/MySQLProtocol.h @@ -918,7 +918,8 @@ public: * produce pseudo random numbers between with about 7 bit worth of entropty between 1-127. * https://github.com/mysql/mysql-server/blob/8.0/mysys/crypt_genhash_impl.cc#L427 */ - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++){ + for (size_t i = 0; i < SCRAMBLE_LENGTH; ++i) + { generator >> scramble[i]; scramble[i] &= 0x7f; if (scramble[i] == '\0' || scramble[i] == '$') @@ -1001,7 +1002,8 @@ public: scramble.resize(SCRAMBLE_LENGTH + 1, 0); Poco::RandomInputStream generator; - for (size_t i = 0; i < SCRAMBLE_LENGTH; i++) { + for (size_t i = 0; i < SCRAMBLE_LENGTH; ++i) + { generator >> scramble[i]; scramble[i] &= 0x7f; if (scramble[i] == '\0' || scramble[i] == '$') From a24471233fce463ffd70bff561f7842d4d0b7bd9 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 08:04:12 +0300 Subject: [PATCH 229/484] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 84af1e30a6b..e9ae2c2d2f4 100644 --- a/README.md +++ b/README.md @@ -16,5 +16,6 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events * [ClickHouse in Avito (online in Russian)](https://avitotech.timepad.ru/event/1290051/) on April 9, 2020. +* [ClickHouse Monitoring Round Table (online in English)](https://www.eventbrite.com/e/clickhouse-april-virtual-meetup-tickets-102272923066) on April 15, 2020. * [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date. * [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date. From ae6267070eb6b63d510a78e2558b7a3402a592da Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 9 Apr 2020 09:28:13 +0300 Subject: [PATCH 230/484] Fix style. --- src/Processors/QueryPipeline.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp index d20086e726f..a13547568d1 100644 --- a/src/Processors/QueryPipeline.cpp +++ b/src/Processors/QueryPipeline.cpp @@ -151,10 +151,10 @@ void QueryPipeline::init(Pipes pipes) totals.emplace_back(totals_port); } - if (auto * extremes_port_ = pipe.getExtremesPort()) + if (auto * port = pipe.getExtremesPort()) { - assertBlocksHaveEqualStructure(current_header, extremes_port_->getHeader(), "QueryPipeline"); - extremes.emplace_back(extremes_port_); + assertBlocksHaveEqualStructure(current_header, port->getHeader(), "QueryPipeline"); + extremes.emplace_back(port); } streams.addStream(&pipe.getPort(), pipe.maxParallelStreams()); From e28e5b24e967ac594240d9bd8e0584214b4fcda5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 9 Apr 2020 09:29:38 +0300 Subject: [PATCH 231/484] Update test. --- tests/queries/0_stateless/01232_extremes.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/01232_extremes.sql b/tests/queries/0_stateless/01232_extremes.sql index 80bf628d669..9379dc1cd38 100644 --- a/tests/queries/0_stateless/01232_extremes.sql +++ b/tests/queries/0_stateless/01232_extremes.sql @@ -27,6 +27,8 @@ drop table if exists shard_0.num_01232; drop table if exists shard_0.num2_01232; drop table if exists shard_1.num_01232; drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; create table shard_0.num_01232 (number UInt64) engine = MergeTree order by number; create table shard_1.num_01232 (number UInt64) engine = MergeTree order by number; @@ -48,4 +50,6 @@ drop table if exists shard_0.num_01232; drop table if exists shard_0.num2_01232; drop table if exists shard_1.num_01232; drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; From 36a5b57ac4eaeac7f2356d6811acd1b0d1892523 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 8 Apr 2020 02:57:14 +0300 Subject: [PATCH 232/484] Use "CREATE SETTINGS PROFILE name SETTINGS INHERIT parent" instead of "CREATE SETTINGS PROFILE name SETTINGS PROFILE parent". --- ...InterpreterShowCreateAccessEntityQuery.cpp | 3 +++ src/Parsers/ASTCreateSettingsProfileQuery.h | 2 ++ src/Parsers/ASTSettingsProfileElement.cpp | 10 +++++++++- src/Parsers/ASTSettingsProfileElement.h | 3 +++ src/Parsers/IParser.h | 2 +- .../ParserCreateSettingsProfileQuery.cpp | 2 +- .../ParserCreateSettingsProfileQuery.h | 4 ++-- src/Parsers/ParserSettingsProfileElement.cpp | 19 +++++++++++++++---- src/Parsers/ParserSettingsProfileElement.h | 8 +++++++- .../test_disk_access_storage/test.py | 4 ++-- .../integration/test_settings_profile/test.py | 11 +++++++++++ 11 files changed, 56 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index d2f435106a8..e579ade11ca 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -101,6 +102,8 @@ namespace query->settings = profile.elements.toAST(); else query->settings = profile.elements.toASTWithNames(*manager); + if (query->settings) + query->settings->setUseInheritKeyword(true); } if (!profile.to_roles.empty()) diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.h b/src/Parsers/ASTCreateSettingsProfileQuery.h index cc133397db4..eabe1ba441b 100644 --- a/src/Parsers/ASTCreateSettingsProfileQuery.h +++ b/src/Parsers/ASTCreateSettingsProfileQuery.h @@ -12,10 +12,12 @@ class ASTExtendedRoleSet; /** CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] * * ALTER SETTINGS PROFILE [IF EXISTS] name * [RENAME TO new_name] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ class ASTCreateSettingsProfileQuery : public IAST, public ASTQueryWithOnCluster { diff --git a/src/Parsers/ASTSettingsProfileElement.cpp b/src/Parsers/ASTSettingsProfileElement.cpp index b3f4032d14c..24f1aa60813 100644 --- a/src/Parsers/ASTSettingsProfileElement.cpp +++ b/src/Parsers/ASTSettingsProfileElement.cpp @@ -25,7 +25,8 @@ void ASTSettingsProfileElement::formatImpl(const FormatSettings & settings, Form { if (!parent_profile.empty()) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "PROFILE " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (use_inherit_keyword ? "INHERIT" : "PROFILE") << " " + << (settings.hilite ? IAST::hilite_none : ""); formatProfileNameOrID(parent_profile, id_mode, settings); return; } @@ -85,4 +86,11 @@ void ASTSettingsProfileElements::formatImpl(const FormatSettings & settings, For } } + +void ASTSettingsProfileElements::setUseInheritKeyword(bool use_inherit_keyword_) +{ + for (auto & element : elements) + element->use_inherit_keyword = use_inherit_keyword_; +} + } diff --git a/src/Parsers/ASTSettingsProfileElement.h b/src/Parsers/ASTSettingsProfileElement.h index 0470b51cf85..ee1ee28c383 100644 --- a/src/Parsers/ASTSettingsProfileElement.h +++ b/src/Parsers/ASTSettingsProfileElement.h @@ -19,6 +19,7 @@ public: Field max_value; std::optional readonly; bool id_mode = false; /// If true then `parent_profile` keeps UUID, not a name. + bool use_inherit_keyword = false; /// If true then this element is a part of ASTCreateSettingsProfileQuery. bool empty() const { return parent_profile.empty() && name.empty(); } @@ -41,5 +42,7 @@ public: String getID(char) const override { return "SettingsProfileElements"; } ASTPtr clone() const override { return std::make_shared(*this); } void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + + void setUseInheritKeyword(bool use_inherit_keyword_); }; } diff --git a/src/Parsers/IParser.h b/src/Parsers/IParser.h index 925140bd25e..5bfbf1ed476 100644 --- a/src/Parsers/IParser.h +++ b/src/Parsers/IParser.h @@ -126,7 +126,7 @@ public: return parse(pos, node, expected); } - virtual ~IParser() {} + virtual ~IParser() = default; }; using ParserPtr = std::unique_ptr; diff --git a/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp index 4d3ed2f6e63..5b33fed2fa0 100644 --- a/src/Parsers/ParserCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ParserCreateSettingsProfileQuery.cpp @@ -33,7 +33,7 @@ namespace return false; ASTPtr new_settings_ast; - if (!ParserSettingsProfileElements{}.useIDMode(id_mode).parse(pos, new_settings_ast, expected)) + if (!ParserSettingsProfileElements{}.useIDMode(id_mode).enableInheritKeyword(true).parse(pos, new_settings_ast, expected)) return false; if (!settings) diff --git a/src/Parsers/ParserCreateSettingsProfileQuery.h b/src/Parsers/ParserCreateSettingsProfileQuery.h index 6797fc884fa..073a8ca75ae 100644 --- a/src/Parsers/ParserCreateSettingsProfileQuery.h +++ b/src/Parsers/ParserCreateSettingsProfileQuery.h @@ -7,11 +7,11 @@ namespace DB { /** Parses queries like * CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] * * ALTER SETTINGS PROFILE [IF EXISTS] name * [RENAME TO new_name] - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] */ class ParserCreateSettingsProfileQuery : public IParserBase { diff --git a/src/Parsers/ParserSettingsProfileElement.cpp b/src/Parsers/ParserSettingsProfileElement.cpp index 06fa58fde4e..31bc339f544 100644 --- a/src/Parsers/ParserSettingsProfileElement.cpp +++ b/src/Parsers/ParserSettingsProfileElement.cpp @@ -108,7 +108,8 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected Field max_value; std::optional readonly; - if (ParserKeyword{"PROFILE"}.ignore(pos, expected)) + if (ParserKeyword{"PROFILE"}.ignore(pos, expected) || + (enable_inherit_keyword && ParserKeyword{"INHERIT"}.ignore(pos, expected))) { if (!parseProfileNameOrID(pos, expected, id_mode, parent_profile)) return false; @@ -120,9 +121,15 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected return false; name = getIdentifierName(name_ast); + bool has_value_or_constraint = false; while (parseValue(pos, expected, value) || parseMinMaxValue(pos, expected, min_value, max_value) || parseReadonlyOrWritableKeyword(pos, expected, readonly)) - ; + { + has_value_or_constraint = true; + } + + if (!has_value_or_constraint) + return false; } auto result = std::make_shared(); @@ -133,6 +140,7 @@ bool ParserSettingsProfileElement::parseImpl(Pos & pos, ASTPtr & node, Expected result->max_value = std::move(max_value); result->readonly = readonly; result->id_mode = id_mode; + result->use_inherit_keyword = enable_inherit_keyword; node = result; return true; } @@ -142,12 +150,15 @@ bool ParserSettingsProfileElements::parseImpl(Pos & pos, ASTPtr & node, Expected { std::vector> elements; - if (!ParserKeyword{"NONE"}.ignore(pos, expected)) + if (ParserKeyword{"NONE"}.ignore(pos, expected)) + { + } + else { do { ASTPtr ast; - if (!ParserSettingsProfileElement{}.useIDMode(id_mode).parse(pos, ast, expected)) + if (!ParserSettingsProfileElement{}.useIDMode(id_mode).enableInheritKeyword(enable_inherit_keyword).parse(pos, ast, expected)) return false; auto element = typeid_cast>(ast); elements.push_back(std::move(element)); diff --git a/src/Parsers/ParserSettingsProfileElement.h b/src/Parsers/ParserSettingsProfileElement.h index ec8e1abb5b5..309c797e645 100644 --- a/src/Parsers/ParserSettingsProfileElement.h +++ b/src/Parsers/ParserSettingsProfileElement.h @@ -12,6 +12,7 @@ class ParserSettingsProfileElement : public IParserBase { public: ParserSettingsProfileElement & useIDMode(bool enable_) { id_mode = enable_; return *this; } + ParserSettingsProfileElement & enableInheritKeyword(bool enable_) { enable_inherit_keyword = enable_; return *this; } protected: const char * getName() const override { return "SettingsProfileElement"; } @@ -19,6 +20,7 @@ protected: private: bool id_mode = false; + bool enable_inherit_keyword = false; }; @@ -26,6 +28,7 @@ class ParserSettingsProfileElements : public IParserBase { public: ParserSettingsProfileElements & useIDMode(bool enable_) { id_mode = enable_; return *this; } + ParserSettingsProfileElements & enableInheritKeyword(bool enable_) { enable_inherit_keyword = enable_; return *this; } protected: const char * getName() const override { return "SettingsProfileElements"; } @@ -33,4 +36,7 @@ protected: private: bool id_mode = false; -};} + bool enable_inherit_keyword = false; +}; + +} diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index 1f6577b9dd1..019c1073205 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -47,7 +47,7 @@ def test_create(): assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s1\n" assert instance.query("SHOW GRANTS FOR rx") == "" assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" check() instance.restart_clickhouse() # Check persistency @@ -77,7 +77,7 @@ def test_alter(): assert instance.query("SHOW GRANTS FOR rx") == "GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION\n" assert instance.query("SHOW GRANTS FOR ry") == "GRANT rx TO ry WITH ADMIN OPTION\n" assert instance.query("SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" check() instance.restart_clickhouse() # Check persistency diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index 6866c6b3901..7ad3041b81e 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -31,22 +31,26 @@ def reset_after_test(): def test_settings_profile(): # Set settings and constraints via CREATE SETTINGS PROFILE ... TO user instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") instance.query("ALTER SETTINGS PROFILE xyz TO NONE") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") # Set settings and constraints via CREATE USER ... SETTINGS PROFILE instance.query("ALTER USER robin SETTINGS PROFILE xyz") + assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin SETTINGS PROFILE xyz\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") instance.query("ALTER USER robin SETTINGS NONE") + assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") @@ -57,6 +61,8 @@ def test_settings_profile_from_granted_role(): instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000") instance.query("CREATE ROLE worker SETTINGS PROFILE xyz") instance.query("GRANT worker TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker SETTINGS PROFILE xyz\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") @@ -68,17 +74,20 @@ def test_settings_profile_from_granted_role(): instance.query("ALTER ROLE worker SETTINGS NONE") instance.query("GRANT worker TO robin") + assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") # Set settings and constraints via CREATE SETTINGS PROFILE ... TO granted role instance.query("ALTER SETTINGS PROFILE xyz TO worker") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO worker\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n" assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin") instance.query("ALTER SETTINGS PROFILE xyz TO NONE") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n" instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") @@ -87,6 +96,8 @@ def test_settings_profile_from_granted_role(): def test_inheritance_of_settings_profile(): instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY") instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin") + assert instance.query("SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY\n" + assert instance.query("SHOW CREATE SETTINGS PROFILE alpha") == "CREATE SETTINGS PROFILE alpha SETTINGS INHERIT xyz TO robin\n" assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000002\n" assert "Setting max_memory_usage should not be changed" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") From c97d12a19c96f3857864c6f00a75d0c0ede2c341 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 8 Apr 2020 03:50:27 +0300 Subject: [PATCH 233/484] Enable access management by default for all integration tests. --- .../0_common_instance_users.xml} | 0 tests/integration/helpers/cluster.py | 1 + .../test_allowed_client_hosts/configs/users.xml | 13 ------------- .../configs/users.d/access_management.xml | 7 ------- tests/integration/test_authentication/test.py | 2 +- .../configs/users.d/access_management.xml | 7 ------- .../test_disk_access_storage/test.py | 2 +- .../configs/users.d/access_management.xml | 7 ------- tests/integration/test_grant_and_revoke/test.py | 2 +- ...access_management.xml => assign_myquota.xml} | 2 +- .../configs/users.d/drop_default_quota.xml | 5 +++++ tests/integration/test_quota/configs/users.xml | 17 ----------------- .../configs/users.d/access_management.xml | 7 ------- .../configs/{config.d => }/remote_servers.xml | 0 .../configs/users.d/access_management.xml | 7 ------- .../test.py | 6 +++--- .../configs/users.d/access_management.xml | 7 ------- tests/integration/test_settings_profile/test.py | 2 +- 18 files changed, 14 insertions(+), 80 deletions(-) rename tests/integration/{test_access_control_on_cluster/configs/users.d/access_management.xml => helpers/0_common_instance_users.xml} (100%) delete mode 100644 tests/integration/test_allowed_client_hosts/configs/users.xml delete mode 100644 tests/integration/test_authentication/configs/users.d/access_management.xml delete mode 100644 tests/integration/test_disk_access_storage/configs/users.d/access_management.xml delete mode 100644 tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml rename tests/integration/test_quota/configs/users.d/{access_management.xml => assign_myquota.xml} (60%) create mode 100644 tests/integration/test_quota/configs/users.d/drop_default_quota.xml delete mode 100644 tests/integration/test_quota/configs/users.xml delete mode 100644 tests/integration/test_row_policy/configs/users.d/access_management.xml rename tests/integration/test_settings_constraints_distributed/configs/{config.d => }/remote_servers.xml (100%) delete mode 100644 tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml delete mode 100644 tests/integration/test_settings_profile/configs/users.d/access_management.xml diff --git a/tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml b/tests/integration/helpers/0_common_instance_users.xml similarity index 100% rename from tests/integration/test_access_control_on_cluster/configs/users.d/access_management.xml rename to tests/integration/helpers/0_common_instance_users.xml diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 5dc93cb338a..69f8206b2c1 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -923,6 +923,7 @@ class ClickHouseInstance: # The file is named with 0_ prefix to be processed before other configuration overloads. shutil.copy(p.join(HELPERS_DIR, '0_common_instance_config.xml'), self.config_d_dir) + shutil.copy(p.join(HELPERS_DIR, '0_common_instance_users.xml'), users_d_dir) # Generate and write macros file macros = self.macros.copy() diff --git a/tests/integration/test_allowed_client_hosts/configs/users.xml b/tests/integration/test_allowed_client_hosts/configs/users.xml deleted file mode 100644 index 3142ec5355a..00000000000 --- a/tests/integration/test_allowed_client_hosts/configs/users.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - default - - - - diff --git a/tests/integration/test_authentication/configs/users.d/access_management.xml b/tests/integration/test_authentication/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_authentication/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_authentication/test.py b/tests/integration/test_authentication/test.py index b7ffd1ed35b..483b59813e5 100644 --- a/tests/integration/test_authentication/test.py +++ b/tests/integration/test_authentication/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir="configs") +instance = cluster.add_instance('instance') @pytest.fixture(scope="module", autouse=True) diff --git a/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml b/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_disk_access_storage/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index 019c1073205..0db0e21afef 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir='configs', stay_alive=True) +instance = cluster.add_instance('instance', stay_alive=True) @pytest.fixture(scope="module", autouse=True) diff --git a/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml b/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_grant_and_revoke/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index 25e0e9882de..6f4b0be5325 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster import re cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir="configs") +instance = cluster.add_instance('instance') @pytest.fixture(scope="module", autouse=True) diff --git a/tests/integration/test_quota/configs/users.d/access_management.xml b/tests/integration/test_quota/configs/users.d/assign_myquota.xml similarity index 60% rename from tests/integration/test_quota/configs/users.d/access_management.xml rename to tests/integration/test_quota/configs/users.d/assign_myquota.xml index 7e799cb7b10..8b98ade8aeb 100644 --- a/tests/integration/test_quota/configs/users.d/access_management.xml +++ b/tests/integration/test_quota/configs/users.d/assign_myquota.xml @@ -1,7 +1,7 @@ - 1 + myQuota diff --git a/tests/integration/test_quota/configs/users.d/drop_default_quota.xml b/tests/integration/test_quota/configs/users.d/drop_default_quota.xml new file mode 100644 index 00000000000..5f53ecf5f49 --- /dev/null +++ b/tests/integration/test_quota/configs/users.d/drop_default_quota.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/tests/integration/test_quota/configs/users.xml b/tests/integration/test_quota/configs/users.xml deleted file mode 100644 index 4412345a731..00000000000 --- a/tests/integration/test_quota/configs/users.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - ::/0 - - default - myQuota - - - diff --git a/tests/integration/test_row_policy/configs/users.d/access_management.xml b/tests/integration/test_row_policy/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_row_policy/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_settings_constraints_distributed/configs/config.d/remote_servers.xml b/tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml similarity index 100% rename from tests/integration/test_settings_constraints_distributed/configs/config.d/remote_servers.xml rename to tests/integration/test_settings_constraints_distributed/configs/remote_servers.xml diff --git a/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml b/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_settings_constraints_distributed/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py index a58c037a2fc..51999902e7d 100644 --- a/tests/integration/test_settings_constraints_distributed/test.py +++ b/tests/integration/test_settings_constraints_distributed/test.py @@ -8,9 +8,9 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', config_dir="configs") -node2 = cluster.add_instance('node2', config_dir="configs") -distributed = cluster.add_instance('distributed', config_dir="configs") +node1 = cluster.add_instance('node1') +node2 = cluster.add_instance('node2') +distributed = cluster.add_instance('distributed', main_configs=["configs/remote_servers.xml"]) @pytest.fixture(scope="module") diff --git a/tests/integration/test_settings_profile/configs/users.d/access_management.xml b/tests/integration/test_settings_profile/configs/users.d/access_management.xml deleted file mode 100644 index 7e799cb7b10..00000000000 --- a/tests/integration/test_settings_profile/configs/users.d/access_management.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - 1 - - - diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index 7ad3041b81e..8b9d023d56f 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', config_dir="configs") +instance = cluster.add_instance('instance') @pytest.fixture(scope="module", autouse=True) From 23ac1ee87c87ae20152bf3593284203f01bacfdc Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 8 Apr 2020 04:35:15 +0300 Subject: [PATCH 234/484] readonly user now can execute SHOW CREATE for access entities. --- src/Access/ContextAccess.cpp | 3 ++- .../__init__.py | 0 .../configs/users.d/extra_users.xml | 13 ++++++++++ .../test_enabling_access_management/test.py | 24 +++++++++++++++++++ 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_enabling_access_management/__init__.py create mode 100644 tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml create mode 100644 tests/integration/test_enabling_access_management/test.py diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 14775f7a4de..cf788a0a63e 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -408,9 +408,10 @@ boost::shared_ptr ContextAccess::calculateResultAccess(bool static const AccessFlags dictionary_ddl = AccessType::CREATE_DICTIONARY | AccessType::DROP_DICTIONARY; static const AccessFlags table_and_dictionary_ddl = table_ddl | dictionary_ddl; static const AccessFlags write_table_access = AccessType::INSERT | AccessType::OPTIMIZE; + static const AccessFlags write_dcl_access = AccessType::ACCESS_MANAGEMENT - AccessType::SHOW_ACCESS; if (readonly_) - merged_access->revoke(write_table_access | table_and_dictionary_ddl | AccessType::SYSTEM | AccessType::KILL_QUERY | AccessType::ACCESS_MANAGEMENT); + merged_access->revoke(write_table_access | table_and_dictionary_ddl | write_dcl_access | AccessType::SYSTEM | AccessType::KILL_QUERY); if (readonly_ == 1) { diff --git a/tests/integration/test_enabling_access_management/__init__.py b/tests/integration/test_enabling_access_management/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml b/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml new file mode 100644 index 00000000000..7d87a29a915 --- /dev/null +++ b/tests/integration/test_enabling_access_management/configs/users.d/extra_users.xml @@ -0,0 +1,13 @@ + + + + + readonly + 1 + + + + default + + + diff --git a/tests/integration/test_enabling_access_management/test.py b/tests/integration/test_enabling_access_management/test.py new file mode 100644 index 00000000000..abb8cd6c07a --- /dev/null +++ b/tests/integration/test_enabling_access_management/test.py @@ -0,0 +1,24 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', config_dir="configs") + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_enabling_access_management(): + instance.query("CREATE USER Alex", user='default') + assert instance.query("SHOW CREATE USER Alex", user='default') == "CREATE USER Alex\n" + assert instance.query("SHOW CREATE USER Alex", user='readonly') == "CREATE USER Alex\n" + assert "Not enough privileges" in instance.query_and_get_error("SHOW CREATE USER Alex", user='xyz') + + assert "Cannot execute query in readonly mode" in instance.query_and_get_error("CREATE USER Robin", user='readonly') + assert "Not enough privileges" in instance.query_and_get_error("CREATE USER Robin", user='xyz') From d548c7e381e412c8f7d4e2d733bf92765699ac0a Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 8 Apr 2020 06:09:40 +0300 Subject: [PATCH 235/484] Simplify DCL for creating quotas. --- .../InterpreterCreateQuotaQuery.cpp | 4 +- ...InterpreterShowCreateAccessEntityQuery.cpp | 2 +- src/Parsers/ASTCreateQuotaQuery.cpp | 25 +++++---- src/Parsers/ASTCreateQuotaQuery.h | 11 ++-- src/Parsers/ParserCreateQuotaQuery.cpp | 53 ++++++++----------- src/Parsers/ParserCreateQuotaQuery.h | 9 ++-- .../test_disk_access_storage/test.py | 4 +- tests/integration/test_quota/test.py | 22 ++++---- .../0_stateless/01033_quota_dcl.reference | 2 +- 9 files changed, 61 insertions(+), 71 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/InterpreterCreateQuotaQuery.cpp index 13e772965ff..80987993c96 100644 --- a/src/Interpreters/InterpreterCreateQuotaQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuotaQuery.cpp @@ -34,7 +34,7 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, auto duration = query_limits.duration; auto it = boost::range::find_if(quota_all_limits, [&](const Quota::Limits & x) { return x.duration == duration; }); - if (query_limits.unset_tracking) + if (query_limits.drop) { if (it != quota_all_limits.end()) quota_all_limits.erase(it); @@ -59,6 +59,8 @@ void updateQuotaFromQueryImpl(Quota & quota, const ASTCreateQuotaQuery & query, { if (query_limits.max[resource_type]) quota_limits.max[resource_type] = *query_limits.max[resource_type]; + else + quota_limits.max[resource_type] = Quota::UNLIMITED; } } diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index e579ade11ca..4c2dcc19a88 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -136,7 +136,7 @@ namespace create_query_limits.duration = limits.duration; create_query_limits.randomize_interval = limits.randomize_interval; for (auto resource_type : ext::range(Quota::MAX_RESOURCE_TYPE)) - if (limits.max[resource_type]) + if (limits.max[resource_type] != Quota::UNLIMITED) create_query_limits.max[resource_type] = limits.max[resource_type]; query->all_limits.push_back(create_query_limits); } diff --git a/src/Parsers/ASTCreateQuotaQuery.cpp b/src/Parsers/ASTCreateQuotaQuery.cpp index 8fa0dbb0d31..cd064756fb6 100644 --- a/src/Parsers/ASTCreateQuotaQuery.cpp +++ b/src/Parsers/ASTCreateQuotaQuery.cpp @@ -28,16 +28,17 @@ namespace } - void formatLimit(ResourceType resource_type, ResourceAmount max, const IAST::FormatSettings & settings) + void formatLimit(ResourceType resource_type, ResourceAmount max, bool first, const IAST::FormatSettings & settings) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " MAX " << Quota::resourceTypeToKeyword(resource_type) - << (settings.hilite ? IAST::hilite_none : ""); + if (first) + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " MAX" << (settings.hilite ? IAST::hilite_none : ""); + else + settings.ostr << ","; - settings.ostr << (settings.hilite ? IAST::hilite_operator : "") << " = " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << " " << (settings.hilite ? IAST::hilite_keyword : "") << Quota::resourceTypeToKeyword(resource_type) + << (settings.hilite ? IAST::hilite_none : "") << " "; - if (max == Quota::UNLIMITED) - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "ANY" << (settings.hilite ? IAST::hilite_none : ""); - else if (resource_type == Quota::EXECUTION_TIME) + if (resource_type == Quota::EXECUTION_TIME) settings.ostr << Quota::executionTimeToSeconds(max); else settings.ostr << max; @@ -59,9 +60,9 @@ namespace << interval_kind.toKeyword() << (settings.hilite ? IAST::hilite_none : ""); - if (limits.unset_tracking) + if (limits.drop) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " UNSET TRACKING" << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " NO LIMITS" << (settings.hilite ? IAST::hilite_none : ""); } else { @@ -70,14 +71,12 @@ namespace { if (limits.max[resource_type]) { - if (limit_found) - settings.ostr << ","; + formatLimit(resource_type, *limits.max[resource_type], !limit_found, settings); limit_found = true; - formatLimit(resource_type, *limits.max[resource_type], settings); } } if (!limit_found) - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " TRACKING" << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " TRACKING ONLY" << (settings.hilite ? IAST::hilite_none : ""); } } diff --git a/src/Parsers/ASTCreateQuotaQuery.h b/src/Parsers/ASTCreateQuotaQuery.h index 09ceaea9825..70f8cba6de0 100644 --- a/src/Parsers/ASTCreateQuotaQuery.h +++ b/src/Parsers/ASTCreateQuotaQuery.h @@ -13,17 +13,16 @@ class ASTExtendedRoleSet; /** CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING} [,...]] + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] * * ALTER QUOTA [IF EXISTS] name * [RENAME TO new_name] * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING | - * UNSET TRACKING} [,...]] + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ class ASTCreateQuotaQuery : public IAST, public ASTQueryWithOnCluster @@ -48,7 +47,7 @@ public: struct Limits { std::optional max[MAX_RESOURCE_TYPE]; - bool unset_tracking = false; + bool drop = false; std::chrono::seconds duration = std::chrono::seconds::zero(); bool randomize_interval = false; }; diff --git a/src/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp index 66e72ee4968..8bbd2127922 100644 --- a/src/Parsers/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/ParserCreateQuotaQuery.cpp @@ -63,12 +63,22 @@ namespace }); } - bool parseLimit(IParserBase::Pos & pos, Expected & expected, ResourceType & resource_type, ResourceAmount & max) + bool parseLimit(IParserBase::Pos & pos, Expected & expected, bool first, ResourceType & resource_type, ResourceAmount & max) { return IParserBase::wrapParseImpl(pos, [&] { - if (!ParserKeyword{"MAX"}.ignore(pos, expected)) - return false; + if (first) + { + if (!ParserKeyword{"MAX"}.ignore(pos, expected)) + return false; + } + else + { + if (!ParserToken{TokenType::Comma}.ignore(pos, expected)) + return false; + + ParserKeyword{"MAX"}.ignore(pos, expected); + } bool resource_type_set = false; for (auto rt : ext::range_with_static_cast(Quota::MAX_RESOURCE_TYPE)) @@ -83,9 +93,6 @@ namespace if (!resource_type_set) return false; - if (!ParserToken{TokenType::Equals}.ignore(pos, expected)) - return false; - ASTPtr max_ast; if (ParserNumber{}.parse(pos, max_ast, expected)) { @@ -95,10 +102,6 @@ namespace else max = applyVisitor(FieldVisitorConvertToNumber(), max_field); } - else if (ParserKeyword{"ANY"}.ignore(pos, expected)) - { - max = Quota::UNLIMITED; - } else return false; @@ -106,18 +109,7 @@ namespace }); } - bool parseCommaAndLimit(IParserBase::Pos & pos, Expected & expected, ResourceType & resource_type, ResourceAmount & max) - { - return IParserBase::wrapParseImpl(pos, [&] - { - if (!ParserToken{TokenType::Comma}.ignore(pos, expected)) - return false; - - return parseLimit(pos, expected, resource_type, max); - }); - } - - bool parseLimits(IParserBase::Pos & pos, Expected & expected, bool alter, ASTCreateQuotaQuery::Limits & limits) + bool parseLimits(IParserBase::Pos & pos, Expected & expected, ASTCreateQuotaQuery::Limits & limits) { return IParserBase::wrapParseImpl(pos, [&] { @@ -142,23 +134,22 @@ namespace new_limits.duration = std::chrono::seconds(static_cast(num_intervals * interval_kind.toAvgSeconds())); - if (alter && ParserKeyword{"UNSET TRACKING"}.ignore(pos, expected)) + if (ParserKeyword{"NO LIMITS"}.ignore(pos, expected)) { - new_limits.unset_tracking = true; + new_limits.drop = true; } - else if (ParserKeyword{"SET TRACKING"}.ignore(pos, expected) || ParserKeyword{"TRACKING"}.ignore(pos, expected)) + else if (ParserKeyword{"TRACKING ONLY"}.ignore(pos, expected)) { } else { - ParserKeyword{"SET"}.ignore(pos, expected); ResourceType resource_type; ResourceAmount max; - if (!parseLimit(pos, expected, resource_type, max)) + if (!parseLimit(pos, expected, true, resource_type, max)) return false; new_limits.max[resource_type] = max; - while (parseCommaAndLimit(pos, expected, resource_type, max)) + while (parseLimit(pos, expected, false, resource_type, max)) new_limits.max[resource_type] = max; } @@ -167,7 +158,7 @@ namespace }); } - bool parseAllLimits(IParserBase::Pos & pos, Expected & expected, bool alter, std::vector & all_limits) + bool parseAllLimits(IParserBase::Pos & pos, Expected & expected, std::vector & all_limits) { return IParserBase::wrapParseImpl(pos, [&] { @@ -175,7 +166,7 @@ namespace do { ASTCreateQuotaQuery::Limits limits; - if (!parseLimits(pos, expected, alter, limits)) + if (!parseLimits(pos, expected, limits)) { all_limits.resize(old_size); return false; @@ -257,7 +248,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!key_type && parseKeyType(pos, expected, key_type)) continue; - if (parseAllLimits(pos, expected, alter, all_limits)) + if (parseAllLimits(pos, expected, all_limits)) continue; break; diff --git a/src/Parsers/ParserCreateQuotaQuery.h b/src/Parsers/ParserCreateQuotaQuery.h index 18e6ef6f9f7..786c8292b15 100644 --- a/src/Parsers/ParserCreateQuotaQuery.h +++ b/src/Parsers/ParserCreateQuotaQuery.h @@ -9,17 +9,16 @@ namespace DB * CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING} [,...]] + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] * * ALTER QUOTA [IF EXISTS] name * [RENAME TO new_name] * [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] * [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} - * {[SET] MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = {number | ANY} } [,...] | - * [SET] TRACKING | - * UNSET TRACKING} [,...]] + * {MAX {{QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number} } [,...] | + * NO LIMITS | TRACKING ONLY} [,...]] * [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] */ class ParserCreateQuotaQuery : public IParserBase diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index 0db0e21afef..babceee7c76 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -22,7 +22,7 @@ def create_entities(): instance.query("CREATE USER u2 IDENTIFIED BY 'qwerty' HOST LOCAL DEFAULT ROLE rx") instance.query("CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2") instance.query("CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a<1000 TO u1, u2") - instance.query("CREATE QUOTA q FOR INTERVAL 1 HOUR SET MAX QUERIES = 100 TO ALL EXCEPT rx") + instance.query("CREATE QUOTA q FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx") @pytest.fixture(autouse=True) @@ -41,7 +41,7 @@ def test_create(): assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" assert instance.query("SHOW CREATE USER u2") == "CREATE USER u2 HOST LOCAL DEFAULT ROLE rx\n" assert instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" - assert instance.query("SHOW CREATE QUOTA q") == "CREATE QUOTA q KEYED BY \\'none\\' FOR INTERVAL 1 HOUR MAX QUERIES = 100 TO ALL EXCEPT rx\n" + assert instance.query("SHOW CREATE QUOTA q") == "CREATE QUOTA q KEYED BY \\'none\\' FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx\n" assert instance.query("SHOW GRANTS FOR u1") == "" assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s1\n" diff --git a/tests/integration/test_quota/test.py b/tests/integration/test_quota/test.py index 85d2ded16c1..ae68a34a03e 100644 --- a/tests/integration/test_quota/test.py +++ b/tests/integration/test_quota/test.py @@ -180,7 +180,7 @@ def test_reload_users_xml_by_timer(): def test_dcl_introspection(): assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000 TO default\n" expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0/1000 errors=0 result_rows=0 result_bytes=0 read_rows=0/1000 read_bytes=0 execution_time=0" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE CURRENT")) @@ -193,7 +193,7 @@ def test_dcl_introspection(): # Add interval. copy_quota_xml('two_intervals.xml') assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000, FOR RANDOMIZED INTERVAL 2 YEAR MAX RESULT BYTES = 30000, MAX READ BYTES = 20000, MAX EXECUTION TIME = 120 TO default\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000, FOR RANDOMIZED INTERVAL 2 YEAR MAX RESULT BYTES 30000, READ BYTES 20000, EXECUTION TIME 120 TO default\n" expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*\n"\ "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0/30000 read_rows=0 read_bytes=0/20000 execution_time=0/120" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) @@ -201,8 +201,8 @@ def test_dcl_introspection(): # Drop interval, add quota. copy_quota_xml('two_quotas.xml') assert instance.query("SHOW QUOTAS") == "myQuota\nmyQuota2\n" - assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES = 1000, MAX READ ROWS = 1000 TO default\n" - assert instance.query("SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY \\'client key or user name\\' FOR RANDOMIZED INTERVAL 1 HOUR MAX RESULT ROWS = 4000, MAX RESULT BYTES = 400000, MAX READ ROWS = 4000, MAX READ BYTES = 400000, MAX EXECUTION TIME = 60, FOR INTERVAL 1 MONTH MAX EXECUTION TIME = 1800\n" + assert instance.query("SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY \\'user name\\' FOR INTERVAL 1 YEAR MAX QUERIES 1000, READ ROWS 1000 TO default\n" + assert instance.query("SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY \\'client key or user name\\' FOR RANDOMIZED INTERVAL 1 HOUR MAX RESULT ROWS 4000, RESULT BYTES 400000, READ ROWS 4000, READ BYTES 400000, EXECUTION TIME 60, FOR INTERVAL 1 MONTH MAX EXECUTION TIME 1800\n" expected_usage = "myQuota key=\\\\'default\\\\' interval=\[.*\] queries=1/1000 errors=0 result_rows=50 result_bytes=200 read_rows=50/1000 read_bytes=200 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) @@ -212,9 +212,9 @@ def test_dcl_management(): assert instance.query("SHOW QUOTAS") == "" assert instance.query("SHOW QUOTA USAGE") == "" - instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH SET MAX QUERIES = 123 TO CURRENT_USER") + instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER") assert instance.query("SHOW QUOTAS") == "qA\n" - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 5 QUARTER MAX QUERIES = 123 TO default\n" + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 5 QUARTER MAX QUERIES 123 TO default\n" expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0/123 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) @@ -222,14 +222,14 @@ def test_dcl_management(): expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=1/123 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES = 321, MAX ERRORS = 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME = 0.5") - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 30 MINUTE MAX EXECUTION TIME = 0.5, FOR INTERVAL 5 QUARTER MAX QUERIES = 321, MAX ERRORS = 10 TO default\n" + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 321, MAX ERRORS 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME 0.5") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR INTERVAL 30 MINUTE MAX EXECUTION TIME 0.5, FOR INTERVAL 5 QUARTER MAX QUERIES 321, ERRORS 10 TO default\n" expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*/0.5\n"\ "qA key=\\\\'\\\\' interval=\[.*\] queries=1/321 errors=0/10 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) - instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH UNSET TRACKING, FOR RANDOMIZED INTERVAL 16 MONTH SET TRACKING, FOR INTERVAL 1800 SECOND UNSET TRACKING") - assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" + instance.query("ALTER QUOTA qA FOR INTERVAL 15 MONTH NO LIMITS, FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY, FOR INTERVAL 1800 SECOND NO LIMITS") + assert instance.query("SHOW CREATE QUOTA qA") == "CREATE QUOTA qA KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY TO default\n" expected_usage = "qA key=\\\\'\\\\' interval=\[.*\] queries=0 errors=0 result_rows=0 result_bytes=0 read_rows=0 read_bytes=0 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) @@ -238,7 +238,7 @@ def test_dcl_management(): assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) instance.query("ALTER QUOTA qA RENAME TO qB") - assert instance.query("SHOW CREATE QUOTA qB") == "CREATE QUOTA qB KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING TO default\n" + assert instance.query("SHOW CREATE QUOTA qB") == "CREATE QUOTA qB KEYED BY \\'none\\' FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY TO default\n" expected_usage = "qB key=\\\\'\\\\' interval=\[.*\] queries=1 errors=0 result_rows=50 result_bytes=200 read_rows=50 read_bytes=200 execution_time=.*" assert re.match(expected_usage, instance.query("SHOW QUOTA USAGE")) diff --git a/tests/queries/0_stateless/01033_quota_dcl.reference b/tests/queries/0_stateless/01033_quota_dcl.reference index 7f92f992dd5..7bd2d2923d2 100644 --- a/tests/queries/0_stateless/01033_quota_dcl.reference +++ b/tests/queries/0_stateless/01033_quota_dcl.reference @@ -1,2 +1,2 @@ default -CREATE QUOTA default KEYED BY \'user name\' FOR INTERVAL 1 HOUR TRACKING TO default, readonly +CREATE QUOTA default KEYED BY \'user name\' FOR INTERVAL 1 HOUR TRACKING ONLY TO default, readonly From d992e408d8432ba86289fe712096e1ac484086c3 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 8 Apr 2020 21:01:42 +0300 Subject: [PATCH 236/484] Disable creating row policies for insert, update, delete because those filters are not supported. --- src/Parsers/ParserCreateRowPolicyQuery.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp index 8bfe54b87b2..75c21cd930a 100644 --- a/src/Parsers/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/ParserCreateRowPolicyQuery.cpp @@ -83,14 +83,13 @@ namespace static constexpr char delete_op[] = "DELETE"; std::vector ops; - bool keyword_for = false; if (ParserKeyword{"FOR"}.ignore(pos, expected)) { - keyword_for = true; do { if (ParserKeyword{"SELECT"}.ignore(pos, expected)) ops.push_back(select_op); +#if 0 /// INSERT, UPDATE, DELETE are not supported yet else if (ParserKeyword{"INSERT"}.ignore(pos, expected)) ops.push_back(insert_op); else if (ParserKeyword{"UPDATE"}.ignore(pos, expected)) @@ -100,6 +99,7 @@ namespace else if (ParserKeyword{"ALL"}.ignore(pos, expected)) { } +#endif else return false; } @@ -109,9 +109,11 @@ namespace if (ops.empty()) { ops.push_back(select_op); +#if 0 /// INSERT, UPDATE, DELETE are not supported yet ops.push_back(insert_op); ops.push_back(update_op); ops.push_back(delete_op); +#endif } std::optional filter; @@ -123,14 +125,15 @@ namespace if (!parseConditionalExpression(pos, expected, filter)) return false; } +#if 0 /// INSERT, UPDATE, DELETE are not supported yet if (ParserKeyword{"WITH CHECK"}.ignore(pos, expected)) { keyword_with_check = true; if (!parseConditionalExpression(pos, expected, check)) return false; } - - if (!keyword_for && !keyword_using && !keyword_with_check) +#endif + if (!keyword_using && !keyword_with_check) return false; if (filter && !check && !alter) From 4d93577791414f62b586895644cacacd8e861ad8 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 9 Apr 2020 00:10:00 +0300 Subject: [PATCH 237/484] PREWHERE can be used now by user without row filtering. --- src/Access/EnabledRowPolicies.cpp | 28 +++--- src/Access/RowPolicyCache.cpp | 90 ++----------------- src/Parsers/makeASTForLogicalFunction.cpp | 103 ++++++++++++++++++++++ src/Parsers/makeASTForLogicalFunction.h | 19 ++++ tests/integration/test_row_policy/test.py | 3 + 5 files changed, 146 insertions(+), 97 deletions(-) create mode 100644 src/Parsers/makeASTForLogicalFunction.cpp create mode 100644 src/Parsers/makeASTForLogicalFunction.h diff --git a/src/Access/EnabledRowPolicies.cpp b/src/Access/EnabledRowPolicies.cpp index a525fb65606..56c73aaf40d 100644 --- a/src/Access/EnabledRowPolicies.cpp +++ b/src/Access/EnabledRowPolicies.cpp @@ -1,7 +1,5 @@ #include -#include -#include -#include +#include #include #include @@ -35,19 +33,17 @@ ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const { - ASTPtr main_condition = getCondition(database, table_name, type); - if (!main_condition) - return extra_condition; - if (!extra_condition) - return main_condition; - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children.push_back(main_condition); - exp_list->children.push_back(extra_condition); - return function; + ASTPtr condition = getCondition(database, table_name, type); + if (condition && extra_condition) + condition = makeASTForLogicalAnd({condition, extra_condition}); + else if (!condition) + condition = extra_condition; + + bool value; + if (tryGetLiteralBool(condition.get(), value) && value) + condition = nullptr; /// The condition is always true, no need to check it. + + return condition; } diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp index 9509923adbf..44f2cd160d4 100644 --- a/src/Access/RowPolicyCache.cpp +++ b/src/Access/RowPolicyCache.cpp @@ -1,97 +1,19 @@ #include #include #include -#include -#include #include #include +#include #include #include #include #include -#include -#include namespace DB { namespace { - bool tryGetLiteralBool(const IAST & ast, bool & value) - { - try - { - if (const ASTLiteral * literal = ast.as()) - { - value = !literal->value.isNull() && applyVisitor(FieldVisitorConvertToNumber(), literal->value); - return true; - } - return false; - } - catch (...) - { - return false; - } - } - - ASTPtr applyFunctionAND(ASTs arguments) - { - bool const_arguments = true; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments &= b; - return true; - }); - - if (!const_arguments) - return std::make_shared(Field{UInt8(0)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(1)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "and"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - - ASTPtr applyFunctionOR(ASTs arguments) - { - bool const_arguments = false; - boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool - { - bool b; - if (!tryGetLiteralBool(*argument, b)) - return false; - const_arguments |= b; - return true; - }); - - if (const_arguments) - return std::make_shared(Field{UInt8(1)}); - if (arguments.empty()) - return std::make_shared(Field{UInt8(0)}); - if (arguments.size() == 1) - return arguments[0]; - - auto function = std::make_shared(); - auto exp_list = std::make_shared(); - function->name = "or"; - function->arguments = exp_list; - function->children.push_back(exp_list); - exp_list->children = std::move(arguments); - return function; - } - - using ConditionType = RowPolicy::ConditionType; constexpr size_t MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; @@ -111,10 +33,16 @@ namespace ASTPtr getResult() && { /// Process permissive conditions. - restrictions.push_back(applyFunctionOR(std::move(permissions))); + restrictions.push_back(makeASTForLogicalOr(std::move(permissions))); /// Process restrictive conditions. - return applyFunctionAND(std::move(restrictions)); + auto condition = makeASTForLogicalAnd(std::move(restrictions)); + + bool value; + if (tryGetLiteralBool(condition.get(), value) && value) + condition = nullptr; /// The condition is always true, no need to check it. + + return condition; } private: diff --git a/src/Parsers/makeASTForLogicalFunction.cpp b/src/Parsers/makeASTForLogicalFunction.cpp new file mode 100644 index 00000000000..eaae38740aa --- /dev/null +++ b/src/Parsers/makeASTForLogicalFunction.cpp @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ +ASTPtr makeASTForLogicalNot(ASTPtr argument) +{ + bool b; + if (tryGetLiteralBool(argument.get(), b)) + return std::make_shared(Field{UInt8(!b)}); + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "not"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children.push_back(argument); + return function; +} + + +ASTPtr makeASTForLogicalAnd(ASTs && arguments) +{ + bool partial_result = true; + boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool + { + bool b; + if (!tryGetLiteralBool(argument.get(), b)) + return false; + partial_result &= b; + return true; + }); + + if (!partial_result) + return std::make_shared(Field{UInt8(0)}); + if (arguments.empty()) + return std::make_shared(Field{UInt8(1)}); + if (arguments.size() == 1) + return arguments[0]; + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "and"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children = std::move(arguments); + return function; +} + + +ASTPtr makeASTForLogicalOr(ASTs && arguments) +{ + bool partial_result = false; + boost::range::remove_erase_if(arguments, [&](const ASTPtr & argument) -> bool + { + bool b; + if (!tryGetLiteralBool(argument.get(), b)) + return false; + partial_result |= b; + return true; + }); + + if (partial_result) + return std::make_shared(Field{UInt8(1)}); + if (arguments.empty()) + return std::make_shared(Field{UInt8(0)}); + if (arguments.size() == 1) + return arguments[0]; + + auto function = std::make_shared(); + auto exp_list = std::make_shared(); + function->name = "or"; + function->arguments = exp_list; + function->children.push_back(exp_list); + exp_list->children = std::move(arguments); + return function; +} + + +bool tryGetLiteralBool(const IAST * ast, bool & value) +{ + if (!ast) + return false; + + try + { + if (const ASTLiteral * literal = ast->as()) + { + value = !literal->value.isNull() && applyVisitor(FieldVisitorConvertToNumber(), literal->value); + return true; + } + return false; + } + catch (...) + { + return false; + } +} +} diff --git a/src/Parsers/makeASTForLogicalFunction.h b/src/Parsers/makeASTForLogicalFunction.h new file mode 100644 index 00000000000..5c1096cab6e --- /dev/null +++ b/src/Parsers/makeASTForLogicalFunction.h @@ -0,0 +1,19 @@ +#pragma once + +#include + + +namespace DB +{ +/// Makes an AST calculating NOT argument. +ASTPtr makeASTForLogicalNot(ASTPtr argument); + +/// Makes an AST calculating argument1 AND argument2 AND ... AND argumentN. +ASTPtr makeASTForLogicalAnd(ASTs && arguments); + +/// Makes an AST calculating argument1 OR argument2 OR ... OR argumentN. +ASTPtr makeASTForLogicalOr(ASTs && arguments); + +/// Tries to extract a literal bool from AST. +bool tryGetLiteralBool(const IAST * ast, bool & value); +} diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index 7087e6aafae..3a5b7340528 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -113,6 +113,9 @@ def test_prewhere_not_supported(): assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table2 PREWHERE 1") assert expected_error in instance.query_and_get_error("SELECT * FROM mydb.filtered_table3 PREWHERE 1") + # However PREWHERE should still work for user without filtering. + assert instance.query("SELECT * FROM mydb.filtered_table1 PREWHERE 1", user="another") == "0\t0\n0\t1\n1\t0\n1\t1\n" + def test_single_table_name(): copy_policy_xml('tag_with_table_name.xml') From f0d3547b8f19ac0747849e28cfeb6b14b1f67896 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 9 Apr 2020 02:01:41 +0300 Subject: [PATCH 238/484] Parser can parse "ON CLUSTER" in other places of SQL commands too. --- src/Parsers/ParserCreateQuotaQuery.cpp | 22 +++++++++++------ src/Parsers/ParserCreateRoleQuery.cpp | 20 ++++++++++------ src/Parsers/ParserCreateRowPolicyQuery.cpp | 22 +++++++++++------ .../ParserCreateSettingsProfileQuery.cpp | 23 ++++++++++++------ src/Parsers/ParserCreateUserQuery.cpp | 19 +++++++++------ src/Parsers/ParserGrantQuery.cpp | 24 +++++++++++++++---- 6 files changed, 90 insertions(+), 40 deletions(-) diff --git a/src/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp index 8bbd2127922..6007d6206ec 100644 --- a/src/Parsers/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/ParserCreateQuotaQuery.cpp @@ -190,6 +190,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -229,16 +237,10 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!parseIdentifierOrStringLiteral(pos, expected, name)) return false; - String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } - String new_name; std::optional key_type; std::vector all_limits; + String cluster; while (true) { @@ -251,12 +253,18 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (parseAllLimits(pos, expected, all_limits)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } std::shared_ptr roles; parseToRoles(pos, expected, attach_mode, roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; diff --git a/src/Parsers/ParserCreateRoleQuery.cpp b/src/Parsers/ParserCreateRoleQuery.cpp index 05143108480..2a6f2dd2c90 100644 --- a/src/Parsers/ParserCreateRoleQuery.cpp +++ b/src/Parsers/ParserCreateRoleQuery.cpp @@ -41,6 +41,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -80,15 +88,10 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!parseRoleName(pos, expected, name)) return false; - String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } - String new_name; std::shared_ptr settings; + String cluster; + while (true) { if (alter && parseRenameTo(pos, expected, new_name)) @@ -97,6 +100,9 @@ bool ParserCreateRoleQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (parseSettings(pos, expected, attach_mode, settings)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } diff --git a/src/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp index 75c21cd930a..b6840f0ed6a 100644 --- a/src/Parsers/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/ParserCreateRowPolicyQuery.cpp @@ -203,6 +203,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -246,16 +254,10 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & || !parseDatabaseAndTableName(pos, expected, database, table_name)) return false; - String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } - String new_policy_name; std::optional is_restrictive; std::vector> conditions; + String cluster; while (true) { @@ -268,12 +270,18 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & if (parseMultipleConditions(pos, expected, alter, conditions)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } std::shared_ptr roles; parseToRoles(pos, expected, attach_mode, roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; diff --git a/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp index 5b33fed2fa0..83d0f0c1d91 100644 --- a/src/Parsers/ParserCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ParserCreateSettingsProfileQuery.cpp @@ -57,6 +57,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -96,15 +104,10 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec if (!parseIdentifierOrStringLiteral(pos, expected, name)) return false; - String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } - String new_name; std::shared_ptr settings; + String cluster; + while (true) { if (alter && parseRenameTo(pos, expected, new_name)) @@ -113,12 +116,18 @@ bool ParserCreateSettingsProfileQuery::parseImpl(Pos & pos, ASTPtr & node, Expec if (parseSettings(pos, expected, attach_mode, settings)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + break; } std::shared_ptr to_roles; parseToRoles(pos, expected, attach_mode, to_roles); + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + auto query = std::make_shared(); node = query; diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 3968c26d42e..28483cc76ec 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -250,6 +250,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -290,13 +298,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!parseUserName(pos, expected, name, host_pattern)) return false; - String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } - String new_name; std::optional new_host_pattern; std::optional authentication; @@ -305,6 +306,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec std::optional remove_hosts; std::shared_ptr default_roles; std::shared_ptr settings; + String cluster; while (true) { @@ -320,6 +322,9 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!default_roles && parseDefaultRoles(pos, expected, attach_mode, default_roles)) continue; + if (cluster.empty() && parseOnCluster(pos, expected, cluster)) + continue; + if (alter) { if (new_name.empty() && parseRenameTo(pos, expected, new_name, new_host_pattern)) diff --git a/src/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp index f6eecbe5dba..64dde8f6524 100644 --- a/src/Parsers/ParserGrantQuery.cpp +++ b/src/Parsers/ParserGrantQuery.cpp @@ -237,6 +237,14 @@ namespace return true; }); } + + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) + { + return IParserBase::wrapParseImpl(pos, [&] + { + return ParserKeyword{"ON"}.ignore(pos, expected) && ASTQueryWithOnCluster::parse(pos, cluster, expected); + }); + } } @@ -260,11 +268,8 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; String cluster; - if (ParserKeyword{"ON"}.ignore(pos, expected)) - { - if (!ASTQueryWithOnCluster::parse(pos, cluster, expected)) - return false; - } + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); bool grant_option = false; bool admin_option = false; @@ -281,10 +286,16 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!parseAccessRightsElements(pos, expected, elements) && !parseRoles(pos, expected, attach, roles)) return false; + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + std::shared_ptr to_roles; if (!parseToRoles(pos, expected, kind, to_roles)) return false; + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + if (kind == Kind::GRANT) { if (ParserKeyword{"WITH GRANT OPTION"}.ignore(pos, expected)) @@ -293,6 +304,9 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) admin_option = true; } + if (cluster.empty()) + parseOnCluster(pos, expected, cluster); + if (grant_option && roles) throw Exception("GRANT OPTION should be specified for access types", ErrorCodes::SYNTAX_ERROR); if (admin_option && !elements.empty()) From ed2562b3f468ecab6b0bf49ea95ec9487a6524f2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 9 Apr 2020 02:53:41 +0300 Subject: [PATCH 239/484] Add new words to client's suggest. --- programs/client/Suggest.cpp | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/programs/client/Suggest.cpp b/programs/client/Suggest.cpp index f7141449f54..8fffbec4fab 100644 --- a/programs/client/Suggest.cpp +++ b/programs/client/Suggest.cpp @@ -67,16 +67,19 @@ void Suggest::load(const ConnectionParameters & connection_parameters, size_t su Suggest::Suggest() { /// Keywords may be not up to date with ClickHouse parser. - words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", - "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", - "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", - "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", - "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", - "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES", - "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", - "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", - "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", - "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE"}; + words = {"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", + "MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP", + "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT", + "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO", + "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE", + "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES", + "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER", + "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY", + "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC", + "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER", "ROLE", + "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE", + "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "FOR", "RANDOMIZED", + "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP"}; } void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit) From 12336a9ece3d9b6d2073c6d6168a976bfec65b88 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 9 Apr 2020 02:57:45 +0300 Subject: [PATCH 240/484] Use "CREATE USER HOST REGEXP" instead of "CREATE USER HOST NAME REGEXP". --- src/Parsers/ASTCreateUserQuery.cpp | 2 +- src/Parsers/ASTCreateUserQuery.h | 4 ++-- src/Parsers/ParserCreateUserQuery.cpp | 2 +- src/Parsers/ParserCreateUserQuery.h | 4 ++-- .../0_stateless/01075_allowed_client_hosts.reference | 8 ++++---- tests/queries/0_stateless/01075_allowed_client_hosts.sql | 8 ++++---- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp index d901ed8f5a1..c8e2a76dfa2 100644 --- a/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -109,7 +109,7 @@ namespace { if (std::exchange(need_comma, true)) settings.ostr << ", "; - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "NAME REGEXP " << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "REGEXP " << (settings.hilite ? IAST::hilite_none : ""); bool need_comma2 = false; for (const auto & host_regexp : name_regexps) { diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index 5a5cc0d9550..54dc51d783b 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -13,14 +13,14 @@ class ASTSettingsProfileElements; /** CREATE USER [IF NOT EXISTS | OR REPLACE] name * [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] - * [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|DROP] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 28483cc76ec..76a06a0282f 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -166,7 +166,7 @@ namespace { new_hosts.addLocalHost(); } - else if (ParserKeyword{"NAME REGEXP"}.ignore(pos, expected)) + else if (ParserKeyword{"REGEXP"}.ignore(pos, expected)) { ASTPtr ast; if (!ParserList{std::make_unique(), std::make_unique(TokenType::Comma), false}.parse(pos, ast, expected)) diff --git a/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h index 4b2af34c003..d609894a7ec 100644 --- a/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -8,13 +8,13 @@ namespace DB /** Parses queries like * CREATE USER [IF NOT EXISTS | OR REPLACE] name * [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] - * [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] - * [[ADD|DROP] HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ class ParserCreateUserQuery : public IParserBase diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.reference b/tests/queries/0_stateless/01075_allowed_client_hosts.reference index 0082653059c..73f54c6027a 100644 --- a/tests/queries/0_stateless/01075_allowed_client_hosts.reference +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.reference @@ -8,10 +8,10 @@ CREATE USER test_user_01075 HOST LOCAL, IP \'2001:db8:11a3:9d7:1f34:8a2e:7a0:765 CREATE USER test_user_01075 HOST LOCAL CREATE USER test_user_01075 HOST NONE CREATE USER test_user_01075 HOST LIKE \'@.somesite.com\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite.com\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite.com\', \'.*.anothersite.org\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite2.com\', \'.*.anothersite2.org\' -CREATE USER test_user_01075 HOST NAME REGEXP \'.*.anothersite3.com\', \'.*.anothersite3.org\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite.com\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite.com\', \'.*.anothersite.org\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite2.com\', \'.*.anothersite2.org\' +CREATE USER test_user_01075 HOST REGEXP \'.*.anothersite3.com\', \'.*.anothersite3.org\' CREATE USER `test_user_01075_x@localhost` HOST LOCAL CREATE USER test_user_01075_x CREATE USER `test_user_01075_x@192.168.23.15` HOST LIKE \'192.168.23.15\' diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.sql b/tests/queries/0_stateless/01075_allowed_client_hosts.sql index e0b1c0f9905..2960a93f0f2 100644 --- a/tests/queries/0_stateless/01075_allowed_client_hosts.sql +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.sql @@ -30,16 +30,16 @@ SHOW CREATE USER test_user_01075; ALTER USER test_user_01075 HOST LIKE '@.somesite.com'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite\.com'; +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite\.com', '.*\.anothersite\.org'; +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com', '.*\.anothersite\.org'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite2\.com', NAME REGEXP '.*\.anothersite2\.org'; +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite2\.com', REGEXP '.*\.anothersite2\.org'; SHOW CREATE USER test_user_01075; -ALTER USER test_user_01075 HOST NAME REGEXP '.*\.anothersite3\.com' HOST NAME REGEXP '.*\.anothersite3\.org'; +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite3\.com' HOST REGEXP '.*\.anothersite3\.org'; SHOW CREATE USER test_user_01075; DROP USER test_user_01075; From 3149f75430161b8feee13b6759ada45d63f151c9 Mon Sep 17 00:00:00 2001 From: "philip.han" Date: Thu, 9 Apr 2020 17:27:55 +0900 Subject: [PATCH 241/484] Replace a reference for Context with a copied bool value. --- src/Interpreters/ActionsVisitor.cpp | 4 ++-- src/Interpreters/ExpressionAnalyzer.cpp | 2 +- src/Interpreters/Set.cpp | 7 ++++--- src/Interpreters/Set.h | 8 ++++---- src/Storages/StorageSet.cpp | 4 ++-- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index f7d64d54f27..38656c47765 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -186,7 +186,7 @@ SetPtr makeExplicitSet( else throw_unsupported_type(right_arg_type); - SetPtr set = std::make_shared(size_limits, create_ordered_set, context); + SetPtr set = std::make_shared(size_limits, create_ordered_set, context.getSettingsRef().transform_null_in); set->setHeader(block); set->insertFromBlock(block); @@ -654,7 +654,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su return subquery_for_set.set; } - SetPtr set = std::make_shared(data.set_size_limit, false, data.context); + SetPtr set = std::make_shared(data.set_size_limit, false, data.context.getSettingsRef().transform_null_in); /** The following happens for GLOBAL INs: * - in the addExternalStorage function, the IN (SELECT ...) subquery is replaced with IN _data1, diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index d0b44b91af7..ee3ba3c8b98 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -291,7 +291,7 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr auto interpreter_subquery = interpretSubquery(subquery_or_table_name, context, {}, query_options); BlockIO res = interpreter_subquery->execute(); - SetPtr set = std::make_shared(settings.size_limits_for_set, true, context); + SetPtr set = std::make_shared(settings.size_limits_for_set, true, context.getSettingsRef().transform_null_in); set->setHeader(res.in->getHeader()); res.in->readPrefix(); diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index e63eff37047..0504f9d9e6d 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -140,7 +140,7 @@ void Set::setHeader(const Block & header) /// We will insert to the Set only keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, context.getSettingsRef().transform_null_in); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); if (fill_set_elements) { @@ -230,7 +230,7 @@ static Field extractValueFromNode(const ASTPtr & node, const IDataType & type, c throw Exception("Incorrect element of set. Must be literal or constant expression.", ErrorCodes::INCORRECT_ELEMENT_OF_SET); } -void Set::createFromAST(const DataTypes & types, ASTPtr node) +void Set::createFromAST(const DataTypes & types, ASTPtr node, const Context & context) { /// Will form a block with values from the set. @@ -350,7 +350,8 @@ ColumnPtr Set::execute(const Block & block, bool negative) const /// We will check existence in Set only for keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, context.getSettingsRef().transform_null_in); + + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); executeOrdinary(key_columns, vec_res, negative, null_map); diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index da20ffc41b6..90ff6c48dec 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -30,9 +30,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits_, bool fill_set_elements_, const Context & context_) + Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) : log(&Logger::get("Set")), - limits(limits_), fill_set_elements(fill_set_elements_), context(context_) + limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) { } @@ -45,7 +45,7 @@ public: * 'types' - types of what are on the left hand side of IN. * 'node' - list of values: 1, 2, 3 or list of tuples: (1, 2), (3, 4), (5, 6). */ - void createFromAST(const DataTypes & types, ASTPtr node); + void createFromAST(const DataTypes & types, ASTPtr node, const Context & context); /** Create a Set from stream. * Call setHeader, then call insertFromBlock for each block. @@ -113,7 +113,7 @@ private: /// Do we need to additionally store all elements of the set in explicit form for subsequent use for index. bool fill_set_elements; - const Context & context; + bool transform_null_in; bool has_null = false; diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 45e1f81b487..7d2a7ee128f 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -112,7 +112,7 @@ StorageSet::StorageSet( const ConstraintsDescription & constraints_, const Context & context_) : StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_}, - set(std::make_shared(SizeLimits(), false, context_)) + set(std::make_shared(SizeLimits(), false, context_.getSettingsRef().transform_null_in)) { Block header = getSampleBlock(); header = header.sortColumns(); @@ -137,7 +137,7 @@ void StorageSet::truncate(const ASTPtr &, const Context & context, TableStructur header = header.sortColumns(); increment = 0; - set = std::make_shared(SizeLimits(), false, context); + set = std::make_shared(SizeLimits(), false, context.getSettingsRef().transform_null_in); set->setHeader(header); } From 4847914edad4a82caaba40092753fabc152a4c73 Mon Sep 17 00:00:00 2001 From: Sergei Shtykov Date: Thu, 9 Apr 2020 13:15:54 +0300 Subject: [PATCH 242/484] CLICKHOUSEDOCS-475: Fixes. --- docs/en/operations/system_tables.md | 2 +- docs/ru/operations/system_tables.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/system_tables.md b/docs/en/operations/system_tables.md index 60d13b939fb..8905ca14569 100644 --- a/docs/en/operations/system_tables.md +++ b/docs/en/operations/system_tables.md @@ -153,7 +153,7 @@ Contains information about [external dictionaries](../sql_reference/dictionaries Columns: -- `database` ([String](../sql_reference/data_types/string.md)) — Database name where the dictionary is located. Only for dictionaries created by DDL query, for others is always an empty string. +- `database` ([String](../sql_reference/data_types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries. - `name` ([String](../sql_reference/data_types/string.md)) — [Dictionary name](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). - `status` ([Enum8](../sql_reference/data_types/enum.md)) — Dictionary status. Possible values: - `NOT_LOADED` — Dictionary was not loaded because it was not used. diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index a70d7c97d0b..fac1e63264b 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -141,7 +141,7 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' Столбцы: -- `database` ([String](../data_types/string.md)) — Имя базы данных, в которой находится словарь. Только для словарей, созданных с помощью DDL-запроса, для остальных — всегда пустая строка. +- `database` ([String](../data_types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. - `name` ([String](../data_types/string.md)) — [Имя словаря](../query_language/dicts/external_dicts_dict.md). - `status` ([Enum8](../data_types/enum.md)) — Статус словаря. Возможные значения: - `NOT_LOADED` — Словарь не загружен, потому что не использовался. From 9f5a40e7004f8aaa233fea7e64c38cd2d948450b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 9 Apr 2020 13:35:51 +0300 Subject: [PATCH 243/484] Added comment. --- src/Processors/NullSink.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Processors/NullSink.h b/src/Processors/NullSink.h index b3c3bc1ac60..5d304a0d68e 100644 --- a/src/Processors/NullSink.h +++ b/src/Processors/NullSink.h @@ -5,6 +5,7 @@ namespace DB { +/// Sink which closes input port and reads nothing. class NullSink : public IProcessor { public: @@ -20,6 +21,7 @@ public: InputPort & getPort() { return inputs.front(); } }; +/// Sink which reads everything and do nothing with it. class EmptySink : public ISink { public: From 7cc0c99669bbec93fa7d57ed034199ba7db6c19c Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Thu, 9 Apr 2020 15:16:56 +0200 Subject: [PATCH 244/484] clickhouse-docker-util --- utils/clickhouse-docker | 57 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100755 utils/clickhouse-docker diff --git a/utils/clickhouse-docker b/utils/clickhouse-docker new file mode 100755 index 00000000000..6f2d1197c0a --- /dev/null +++ b/utils/clickhouse-docker @@ -0,0 +1,57 @@ +#!/bin/bash + +if [ $# -lt 1 ] +then +cat << HELP + +clickhouse-docker -- open clickhouse-client of desired version in docker container (automatically removed after you exit bash shell). + +EXAMPLE: + - start latest version: + clickhouse-docker latest + + - start version 20.1: + clickhouse-docker 20.1 + + - list avaliable versions: + clickhouse-docker list +HELP +exit +fi + +param="$1" + +if [ "${param}" = "list" ] +then + # https://stackoverflow.com/a/39454426/1555175 + wget -q https://registry.hub.docker.com/v1/repositories/yandex/clickhouse-server/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' +else + docker pull yandex/clickhouse-server:${param} + tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) # older version require /nonexistent folder to exist to run clickhouse client :D + chmod 777 ${tmp_dir} + set -e + containerid=`docker run -v${tmp_dir}:/nonexistent -d yandex/clickhouse-server:${param}` + set +e + while : + do + # that trick with init-file allows to start clickhouse client inside bash shell (nice if you need exit to bash, check smth, and get back to clickhouse-client) + docker exec -it ${containerid} bash -c 'bash --init-file <(echo "clickhouse client -m")' + + printf "\n\nYou exited the session. What next?\n" + echo " [Q]uit and remove container." + echo " [R]estart clickhouse and run clickhouse-client in shell again." + echo "You can also hit Ctrl+C to exit and keep container running." + + while : + do + read -p "Quit or restart [Q/R]?" choice + case "$choice" in + q|Q|exit ) break 2;; + r|R|restart ) echo "Restarting container ..."; docker restart ${containerid} > /dev/null; break 1;; + * ) echo "I don't understand. Please type Q or R" ;; + esac + done + done + docker rm -f ${containerid} > /dev/null + rm -rf ${tmp_dir} +fi From 23e757bcb79e11756e72134fdf037052a0b295b5 Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Thu, 9 Apr 2020 17:10:29 +0200 Subject: [PATCH 245/484] Fix link to prev changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0ea7f62b6c..d5301de8a23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -641,4 +641,4 @@ #### Security Fix * Fixed the possibility of reading directories structure in tables with `File` table engine. This fixes [#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) +## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats_new/changelog/2019.md) From 25eae6abe035bda78634f17d15c9f05348d97aad Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 18:51:36 +0300 Subject: [PATCH 246/484] Update README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index e9ae2c2d2f4..3db5e08d2a9 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [ClickHouse in Avito (online in Russian)](https://avitotech.timepad.ru/event/1290051/) on April 9, 2020. * [ClickHouse Monitoring Round Table (online in English)](https://www.eventbrite.com/e/clickhouse-april-virtual-meetup-tickets-102272923066) on April 15, 2020. * [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date. * [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date. From 20d935566fc83d82a9ad1d47b00ece69b7efd01d Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 9 Apr 2020 19:12:55 +0300 Subject: [PATCH 247/484] Enable access management in stateless tests #3. --- tests/users.d/access_management.xml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 tests/users.d/access_management.xml diff --git a/tests/users.d/access_management.xml b/tests/users.d/access_management.xml new file mode 100644 index 00000000000..7e799cb7b10 --- /dev/null +++ b/tests/users.d/access_management.xml @@ -0,0 +1,7 @@ + + + + 1 + + + From a4e2fd24b05a0dc1e3fd661b02daed720c8eae48 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 21:06:07 +0300 Subject: [PATCH 248/484] Get rid of toc_ru.yml (#10152) --- ...sampling_query_profiler_example_result.txt | 4 - docs/ru/changelog/2017.md | 265 --- docs/ru/changelog/2018.md | 1060 --------- docs/ru/changelog/2019.md | 2071 ---------------- docs/ru/changelog/index.md | 1 - docs/ru/commercial/cloud.md | 24 +- docs/ru/commercial/index.md | 7 + docs/ru/development/architecture.md | 205 +- docs/ru/development/build.md | 79 +- docs/ru/development/build_cross_arm.md | 21 +- docs/ru/development/build_cross_osx.md | 25 +- docs/ru/development/build_osx.md | 37 +- docs/ru/development/index.md | 7 +- docs/ru/development/tests.md | 239 +- .../{ => engines}/database_engines/index.md | 2 +- .../ru/{ => engines}/database_engines/lazy.md | 0 .../{ => engines}/database_engines/mysql.md | 30 +- docs/ru/engines/index.md | 6 + .../table_engines/index.md | 56 +- .../table_engines/integrations}/hdfs.md | 6 +- .../table_engines/integrations/index.md | 5 + .../table_engines/integrations}/jdbc.md | 4 +- .../table_engines/integrations}/kafka.md | 4 +- .../table_engines/integrations}/mysql.md | 8 +- .../table_engines/integrations}/odbc.md | 10 +- .../engines/table_engines/log_family/index.md | 5 + .../table_engines/log_family}/log.md | 0 .../table_engines/log_family}/log_family.md | 2 +- .../table_engines/log_family}/stripelog.md | 2 +- .../table_engines/log_family}/tinylog.md | 0 .../mergetree_family}/aggregatingmergetree.md | 4 +- .../mergetree_family}/collapsingmergetree.md | 2 +- .../custom_partitioning_key.md | 10 +- .../mergetree_family}/graphitemergetree.md | 4 +- .../table_engines/mergetree_family/index.md | 5 + .../mergetree_family}/mergetree.md | 71 +- .../mergetree_family}/replacingmergetree.md | 2 +- .../mergetree_family}/replication.md | 8 +- .../mergetree_family}/summingmergetree.md | 8 +- .../versionedcollapsingmergetree.md | 2 +- .../table_engines/special}/buffer.md | 0 .../table_engines/special}/dictionary.md | 4 +- .../table_engines/special}/distributed.md | 10 +- .../table_engines/special}/external_data.md | 0 .../table_engines/special}/file.md | 8 +- .../engines/table_engines/special/generate.md | 59 + .../ru/engines/table_engines/special/index.md | 5 + .../table_engines/special}/join.md | 22 +- .../table_engines/special/materializedview.md | 5 + .../table_engines/special}/memory.md | 0 .../table_engines/special}/merge.md | 2 +- .../table_engines/special}/null.md | 0 .../table_engines/special}/set.md | 0 .../table_engines/special}/url.md | 4 +- .../table_engines/special}/view.md | 0 docs/ru/faq/general.md | 6 +- docs/ru/faq/index.md | 6 + docs/ru/getting_started/tutorial.md | 145 +- docs/ru/guides/apply_catboost_model.md | 6 +- docs/ru/interfaces/cli.md | 2 +- docs/ru/interfaces/formats.md | 92 +- docs/ru/interfaces/http.md | 2 +- docs/ru/interfaces/mysql.md | 2 +- docs/ru/interfaces/third-party/index.md | 5 + .../ru/interfaces/third-party/integrations.md | 4 +- docs/ru/introduction/adopters.md | 147 +- docs/ru/introduction/distinctive_features.md | 2 +- docs/ru/introduction/index.md | 6 + docs/ru/operations/access_rights.md | 2 +- docs/ru/operations/backup.md | 8 +- docs/ru/operations/configuration_files.md | 2 +- docs/ru/operations/index.md | 2 +- docs/ru/operations/monitoring.md | 4 +- .../optimizing_performance/index.md | 5 + .../sampling_query_profiler.md | 62 + .../performance/sampling_query_profiler.md | 61 - ...sampling_query_profiler_example_result.txt | 4 - docs/ru/operations/performance_test.md | 33 +- docs/ru/operations/requirements.md | 4 +- .../index.md | 2 +- .../settings.md | 100 +- .../operations/settings/query_complexity.md | 12 +- docs/ru/operations/settings/settings.md | 88 +- docs/ru/operations/settings/settings_users.md | 2 +- docs/ru/operations/system_tables.md | 170 +- docs/ru/operations/table_engines/generate.md | 58 - .../table_engines/materializedview.md | 5 - docs/ru/operations/troubleshooting.md | 2 +- .../utilities/clickhouse-benchmark.md | 154 ++ .../{utils => utilities}/clickhouse-copier.md | 0 .../{utils => utilities}/clickhouse-local.md | 2 +- .../operations/{utils => utilities}/index.md | 0 .../operations/utils/clickhouse-benchmark.md | 153 -- docs/ru/query_language/index.md | 9 - docs/ru/roadmap.md | 16 - .../aggregate_functions}/combinators.md | 6 +- .../aggregate_functions}/index.md | 0 .../parametric_functions.md | 16 +- .../aggregate_functions}/reference.md | 84 +- .../data_types}/aggregatefunction.md | 4 +- .../{ => sql_reference}/data_types/array.md | 2 +- .../{ => sql_reference}/data_types/boolean.md | 0 .../ru/{ => sql_reference}/data_types/date.md | 0 .../data_types/datetime.md | 20 +- .../data_types/datetime64.md | 12 +- .../{ => sql_reference}/data_types/decimal.md | 0 .../sql_reference/data_types/domains/index.md | 5 + .../data_types/domains/ipv4.md | 0 .../data_types/domains/ipv6.md | 0 .../data_types/domains/overview.md | 0 .../ru/{ => sql_reference}/data_types/enum.md | 2 +- .../data_types/fixedstring.md | 2 +- .../{ => sql_reference}/data_types/float.md | 2 +- .../{ => sql_reference}/data_types/index.md | 0 .../data_types/int_uint.md | 0 .../nested_data_structures/index.md | 0 .../nested_data_structures/nested.md | 0 .../data_types/nullable.md | 2 +- .../special_data_types/expression.md | 0 .../data_types/special_data_types/index.md | 0 .../data_types/special_data_types/interval.md | 8 +- .../data_types/special_data_types/nothing.md | 4 +- .../data_types/special_data_types/set.md | 0 .../{ => sql_reference}/data_types/string.md | 0 .../{ => sql_reference}/data_types/tuple.md | 4 +- .../ru/{ => sql_reference}/data_types/uuid.md | 6 +- .../external_dictionaries}/external_dicts.md | 12 +- .../external_dicts_dict.md | 2 +- .../external_dicts_dict_hierarchical.md | 2 +- .../external_dicts_dict_layout.md | 2 +- .../external_dicts_dict_lifetime.md | 0 .../external_dicts_dict_sources.md | 14 +- .../external_dicts_dict_structure.md | 6 +- .../external_dictionaries/index.md | 5 + .../dictionaries}/index.md | 4 +- .../dictionaries}/internal_dicts.md | 0 .../functions/arithmetic_functions.md | 0 .../functions/array_functions.md | 10 +- .../functions/array_join.md | 0 .../functions/bit_functions.md | 2 +- .../functions/bitmap_functions.md | 6 +- .../functions/comparison_functions.md | 0 .../functions/conditional_functions.md | 2 +- .../functions/date_time_functions.md | 4 +- .../functions/encoding_functions.md | 4 +- .../functions/ext_dict_functions.md | 24 +- .../functions/functions_for_nulls.md | 2 +- .../functions/geo.md | 34 +- .../functions/hash_functions.md | 40 +- .../functions/higher_order_functions.md | 0 .../functions/in_functions.md | 2 +- .../functions/index.md | 0 .../functions/introspection.md | 12 +- .../functions/ip_address_functions.md | 4 +- .../functions/json_functions.md | 0 .../functions/logical_functions.md | 0 .../functions/machine_learning_functions.md | 4 +- .../functions/math_functions.md | 0 .../functions/other_functions.md | 28 +- .../functions/random_functions.md | 0 .../functions/rounding_functions.md | 4 +- .../functions/splitting_merging_functions.md | 0 .../functions/string_functions.md | 16 +- .../functions/string_replace_functions.md | 0 .../functions/string_search_functions.md | 0 .../functions/type_conversion_functions.md | 28 +- .../functions/url_functions.md | 4 +- .../functions/uuid_functions.md | 6 +- .../functions/ym_dict_functions.md | 4 +- docs/ru/sql_reference/index.md | 9 + .../operators.md | 16 +- .../statements}/alter.md | 32 +- .../statements}/create.md | 26 +- docs/ru/sql_reference/statements/index.md | 5 + .../statements}/insert_into.md | 8 +- .../statements}/misc.md | 26 +- .../statements}/select.md | 70 +- .../statements}/show.md | 4 +- .../statements}/system.md | 4 +- .../syntax.md | 10 +- .../table_functions/file.md | 2 +- .../table_functions/generate.md | 0 .../table_functions/hdfs.md | 0 .../table_functions/index.md | 20 +- .../table_functions/input.md | 0 .../table_functions/jdbc.md | 0 .../table_functions/merge.md | 0 .../table_functions/mysql.md | 4 +- .../table_functions/numbers.md | 0 .../table_functions/odbc.md | 4 +- .../table_functions/remote.md | 0 .../table_functions/url.md | 0 docs/ru/whats_new/changelog/2017.md | 266 +++ docs/ru/whats_new/changelog/2018.md | 1061 +++++++++ docs/ru/whats_new/changelog/2019.md | 2072 +++++++++++++++++ docs/ru/whats_new/changelog/index.md | 650 ++++++ docs/ru/whats_new/index.md | 6 + docs/ru/whats_new/roadmap.md | 17 + docs/ru/{ => whats_new}/security_changelog.md | 0 docs/toc_ru.yml | 253 -- docs/tools/convert_toc.py | 2 +- 201 files changed, 5657 insertions(+), 5181 deletions(-) delete mode 100644 docs/ru/changelog/2017.md delete mode 100644 docs/ru/changelog/2018.md delete mode 100644 docs/ru/changelog/2019.md delete mode 120000 docs/ru/changelog/index.md create mode 100644 docs/ru/commercial/index.md rename docs/ru/{ => engines}/database_engines/index.md (78%) rename docs/ru/{ => engines}/database_engines/lazy.md (100%) rename docs/ru/{ => engines}/database_engines/mysql.md (68%) create mode 100644 docs/ru/engines/index.md rename docs/ru/{operations => engines}/table_engines/index.md (64%) rename docs/ru/{operations/table_engines => engines/table_engines/integrations}/hdfs.md (94%) create mode 100644 docs/ru/engines/table_engines/integrations/index.md rename docs/ru/{operations/table_engines => engines/table_engines/integrations}/jdbc.md (95%) rename docs/ru/{operations/table_engines => engines/table_engines/integrations}/kafka.md (95%) rename docs/ru/{operations/table_engines => engines/table_engines/integrations}/mysql.md (89%) rename docs/ru/{operations/table_engines => engines/table_engines/integrations}/odbc.md (90%) create mode 100644 docs/ru/engines/table_engines/log_family/index.md rename docs/ru/{operations/table_engines => engines/table_engines/log_family}/log.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/log_family}/log_family.md (97%) rename docs/ru/{operations/table_engines => engines/table_engines/log_family}/stripelog.md (98%) rename docs/ru/{operations/table_engines => engines/table_engines/log_family}/tinylog.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/aggregatingmergetree.md (95%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/collapsingmergetree.md (99%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/custom_partitioning_key.md (91%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/graphitemergetree.md (93%) create mode 100644 docs/ru/engines/table_engines/mergetree_family/index.md rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/mergetree.md (87%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/replacingmergetree.md (96%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/replication.md (97%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/summingmergetree.md (91%) rename docs/ru/{operations/table_engines => engines/table_engines/mergetree_family}/versionedcollapsingmergetree.md (99%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/buffer.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/dictionary.md (94%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/distributed.md (92%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/external_data.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/file.md (81%) create mode 100644 docs/ru/engines/table_engines/special/generate.md create mode 100644 docs/ru/engines/table_engines/special/index.md rename docs/ru/{operations/table_engines => engines/table_engines/special}/join.md (70%) create mode 100644 docs/ru/engines/table_engines/special/materializedview.md rename docs/ru/{operations/table_engines => engines/table_engines/special}/memory.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/merge.md (98%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/null.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/set.md (100%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/url.md (95%) rename docs/ru/{operations/table_engines => engines/table_engines/special}/view.md (100%) create mode 100644 docs/ru/faq/index.md create mode 100644 docs/ru/interfaces/third-party/index.md create mode 100644 docs/ru/introduction/index.md create mode 100644 docs/ru/operations/optimizing_performance/index.md create mode 100644 docs/ru/operations/optimizing_performance/sampling_query_profiler.md delete mode 100644 docs/ru/operations/performance/sampling_query_profiler.md rename docs/ru/operations/{server_settings => server_configuration_parameters}/index.md (93%) rename docs/ru/operations/{server_settings => server_configuration_parameters}/settings.md (84%) delete mode 100644 docs/ru/operations/table_engines/generate.md delete mode 100644 docs/ru/operations/table_engines/materializedview.md create mode 100644 docs/ru/operations/utilities/clickhouse-benchmark.md rename docs/ru/operations/{utils => utilities}/clickhouse-copier.md (100%) rename docs/ru/operations/{utils => utilities}/clickhouse-local.md (97%) rename docs/ru/operations/{utils => utilities}/index.md (100%) delete mode 100644 docs/ru/operations/utils/clickhouse-benchmark.md delete mode 100644 docs/ru/query_language/index.md delete mode 100644 docs/ru/roadmap.md rename docs/ru/{query_language/agg_functions => sql_reference/aggregate_functions}/combinators.md (96%) rename docs/ru/{query_language/agg_functions => sql_reference/aggregate_functions}/index.md (100%) rename docs/ru/{query_language/agg_functions => sql_reference/aggregate_functions}/parametric_functions.md (94%) rename docs/ru/{query_language/agg_functions => sql_reference/aggregate_functions}/reference.md (91%) rename docs/ru/{data_types/nested_data_structures => sql_reference/data_types}/aggregatefunction.md (87%) rename docs/ru/{ => sql_reference}/data_types/array.md (92%) rename docs/ru/{ => sql_reference}/data_types/boolean.md (100%) rename docs/ru/{ => sql_reference}/data_types/date.md (100%) rename docs/ru/{ => sql_reference}/data_types/datetime.md (86%) rename docs/ru/{ => sql_reference}/data_types/datetime64.md (92%) rename docs/ru/{ => sql_reference}/data_types/decimal.md (100%) create mode 100644 docs/ru/sql_reference/data_types/domains/index.md rename docs/ru/{ => sql_reference}/data_types/domains/ipv4.md (100%) rename docs/ru/{ => sql_reference}/data_types/domains/ipv6.md (100%) rename docs/ru/{ => sql_reference}/data_types/domains/overview.md (100%) rename docs/ru/{ => sql_reference}/data_types/enum.md (99%) rename docs/ru/{ => sql_reference}/data_types/fixedstring.md (89%) rename docs/ru/{ => sql_reference}/data_types/float.md (97%) rename docs/ru/{ => sql_reference}/data_types/index.md (100%) rename docs/ru/{ => sql_reference}/data_types/int_uint.md (100%) rename docs/ru/{ => sql_reference}/data_types/nested_data_structures/index.md (100%) rename docs/ru/{ => sql_reference}/data_types/nested_data_structures/nested.md (100%) rename docs/ru/{ => sql_reference}/data_types/nullable.md (83%) rename docs/ru/{ => sql_reference}/data_types/special_data_types/expression.md (100%) rename docs/ru/{ => sql_reference}/data_types/special_data_types/index.md (100%) rename docs/ru/{ => sql_reference}/data_types/special_data_types/interval.md (84%) rename docs/ru/{ => sql_reference}/data_types/special_data_types/nothing.md (63%) rename docs/ru/{ => sql_reference}/data_types/special_data_types/set.md (100%) rename docs/ru/{ => sql_reference}/data_types/string.md (100%) rename docs/ru/{ => sql_reference}/data_types/tuple.md (87%) rename docs/ru/{ => sql_reference}/data_types/uuid.md (82%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts.md (79%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict.md (91%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_hierarchical.md (90%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_layout.md (99%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_lifetime.md (100%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_sources.md (96%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries/external_dictionaries}/external_dicts_dict_structure.md (90%) create mode 100644 docs/ru/sql_reference/dictionaries/external_dictionaries/index.md rename docs/ru/{query_language/dicts => sql_reference/dictionaries}/index.md (82%) rename docs/ru/{query_language/dicts => sql_reference/dictionaries}/internal_dicts.md (100%) rename docs/ru/{query_language => sql_reference}/functions/arithmetic_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/array_functions.md (97%) rename docs/ru/{query_language => sql_reference}/functions/array_join.md (100%) rename docs/ru/{query_language => sql_reference}/functions/bit_functions.md (94%) rename docs/ru/{query_language => sql_reference}/functions/bitmap_functions.md (97%) rename docs/ru/{query_language => sql_reference}/functions/comparison_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/conditional_functions.md (98%) rename docs/ru/{query_language => sql_reference}/functions/date_time_functions.md (97%) rename docs/ru/{query_language => sql_reference}/functions/encoding_functions.md (91%) rename docs/ru/{query_language => sql_reference}/functions/ext_dict_functions.md (85%) rename docs/ru/{query_language => sql_reference}/functions/functions_for_nulls.md (97%) rename docs/ru/{query_language => sql_reference}/functions/geo.md (86%) rename docs/ru/{query_language => sql_reference}/functions/hash_functions.md (88%) rename docs/ru/{query_language => sql_reference}/functions/higher_order_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/in_functions.md (93%) rename docs/ru/{query_language => sql_reference}/functions/index.md (100%) rename docs/ru/{query_language => sql_reference}/functions/introspection.md (94%) rename docs/ru/{query_language => sql_reference}/functions/ip_address_functions.md (95%) rename docs/ru/{query_language => sql_reference}/functions/json_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/logical_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/machine_learning_functions.md (51%) rename docs/ru/{query_language => sql_reference}/functions/math_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/other_functions.md (97%) rename docs/ru/{query_language => sql_reference}/functions/random_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/rounding_functions.md (98%) rename docs/ru/{query_language => sql_reference}/functions/splitting_merging_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/string_functions.md (97%) rename docs/ru/{query_language => sql_reference}/functions/string_replace_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/string_search_functions.md (100%) rename docs/ru/{query_language => sql_reference}/functions/type_conversion_functions.md (90%) rename docs/ru/{query_language => sql_reference}/functions/url_functions.md (98%) rename docs/ru/{query_language => sql_reference}/functions/uuid_functions.md (92%) rename docs/ru/{query_language => sql_reference}/functions/ym_dict_functions.md (98%) create mode 100644 docs/ru/sql_reference/index.md rename docs/ru/{query_language => sql_reference}/operators.md (89%) rename docs/ru/{query_language => sql_reference/statements}/alter.md (93%) rename docs/ru/{query_language => sql_reference/statements}/create.md (94%) create mode 100644 docs/ru/sql_reference/statements/index.md rename docs/ru/{query_language => sql_reference/statements}/insert_into.md (88%) rename docs/ru/{query_language => sql_reference/statements}/misc.md (87%) rename docs/ru/{query_language => sql_reference/statements}/select.md (91%) rename docs/ru/{query_language => sql_reference/statements}/show.md (90%) rename docs/ru/{query_language => sql_reference/statements}/system.md (80%) rename docs/ru/{query_language => sql_reference}/syntax.md (97%) rename docs/ru/{query_language => sql_reference}/table_functions/file.md (92%) rename docs/ru/{query_language => sql_reference}/table_functions/generate.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/hdfs.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/index.md (62%) rename docs/ru/{query_language => sql_reference}/table_functions/input.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/jdbc.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/merge.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/mysql.md (93%) rename docs/ru/{query_language => sql_reference}/table_functions/numbers.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/odbc.md (95%) rename docs/ru/{query_language => sql_reference}/table_functions/remote.md (100%) rename docs/ru/{query_language => sql_reference}/table_functions/url.md (100%) create mode 100644 docs/ru/whats_new/changelog/2017.md create mode 100644 docs/ru/whats_new/changelog/2018.md create mode 100644 docs/ru/whats_new/changelog/2019.md create mode 100644 docs/ru/whats_new/changelog/index.md create mode 100644 docs/ru/whats_new/index.md create mode 100644 docs/ru/whats_new/roadmap.md rename docs/ru/{ => whats_new}/security_changelog.md (100%) delete mode 100644 docs/toc_ru.yml diff --git a/docs/fa/operations/performance/sampling_query_profiler_example_result.txt b/docs/fa/operations/performance/sampling_query_profiler_example_result.txt index a5f6d71ca95..56c2fdf9c65 100644 --- a/docs/fa/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/fa/operations/performance/sampling_query_profiler_example_result.txt @@ -1,7 +1,3 @@ ---- -en_copy: true ---- - Row 1: ────── count(): 6344 diff --git a/docs/ru/changelog/2017.md b/docs/ru/changelog/2017.md deleted file mode 100644 index 95156754100..00000000000 --- a/docs/ru/changelog/2017.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -en_copy: true ---- - -### ClickHouse release 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} - -This release contains bug fixes for the previous release 1.1.54318: - -- Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don’t see these messages in logs. - -### ClickHouse release 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} - -This release contains bug fixes for the previous release 1.1.54310: - -- Fixed incorrect row deletions during merges in the SummingMergeTree engine -- Fixed a memory leak in unreplicated MergeTree engines -- Fixed performance degradation with frequent inserts in MergeTree engines -- Fixed an issue that was causing the replication queue to stop running -- Fixed rotation and archiving of server logs - -### ClickHouse release 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} - -#### New features: {#new-features} - -- Custom partitioning key for the MergeTree family of table engines. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. -- Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. -- Added support for time zones with non-integer offsets from UTC. -- Added support for arithmetic operations with time intervals. -- The range of values for the Date and DateTime types is extended to the year 2105. -- Added the `CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view). -- Added the `ATTACH TABLE` query without arguments. -- The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly. -- Max size of the IP trie dictionary is increased to 128M entries. -- Added the getSizeOfEnumType function. -- Added the sumWithOverflow aggregate function. -- Added support for the Cap’n Proto input format. -- You can now customize compression level when using the zstd algorithm. - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Creation of temporary tables with an engine other than Memory is not allowed. -- Explicit creation of tables with the View or MaterializedView engine is not allowed. -- During table creation, a new check verifies that the sampling key expression is included in the primary key. - -#### Bug fixes: {#bug-fixes} - -- Fixed hangups when synchronously inserting into a Distributed table. -- Fixed nonatomic adding and removing of parts in Replicated tables. -- Data inserted into a materialized view is not subjected to unnecessary deduplication. -- Executing a query to a Distributed table for which the local replica is lagging and remote replicas are unavailable does not result in an error anymore. -- Users don’t need access permissions to the `default` database to create temporary tables anymore. -- Fixed crashing when specifying the Array type without arguments. -- Fixed hangups when the disk volume containing server logs is full. -- Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch. - -#### Build improvements: {#build-improvements} - -- Several third-party libraries (notably Poco) were updated and converted to git submodules. - -### ClickHouse release 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} - -#### New features: {#new-features-1} - -- TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ). - -#### Bug fixes: {#bug-fixes-1} - -- `ALTER` for replicated tables now tries to start running as soon as possible. -- Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.` -- Fixed crashes of `clickhouse-client` when pressing `Page Down` -- Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL` -- `FREEZE PARTITION` always works atomically now. -- Empty POST requests now return a response with code 411. -- Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).` -- Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables. -- Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b` -- Users are updated correctly with invalid `users.xml` -- Correct handling when an executable dictionary returns a non-zero response code. - -### ClickHouse release 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} - -#### New features: {#new-features-2} - -- Added the `pointInPolygon` function for working with coordinates on a coordinate plane. -- Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`. -- Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers. -- The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting `compile = 1` , which is not used by default). -- Reduced the time needed for dynamic compilation of queries. - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency. -- Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown. -- Removed excessive logging when restoring replicas. -- Fixed an error in the UNION ALL implementation. -- Fixed an error in the concat function that occurred if the first column in a block has the Array type. -- Progress is now displayed correctly in the system.merges table. - -### ClickHouse release 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} - -#### New features: {#new-features-3} - -- `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. -- Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. -- Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster. -- Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`). -- External dictionaries can be loaded from MySQL by specifying a socket in the filesystem. -- External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters). -- Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user. -- Support for `DROP TABLE` for temporary tables. -- Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats. -- Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes). -- FIFO locking is used during ALTER: an ALTER query isn’t blocked indefinitely for continuously running queries. -- Option to set `umask` in the config file. -- Improved performance for queries with `DISTINCT` . - -#### Bug fixes: {#bug-fixes-3} - -- Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn’t get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things. -- Fixed randomization when choosing hosts for the connection to ZooKeeper. -- Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost. -- Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running `ALTER MODIFY` on an element in a `Nested` structure. -- Fixed an error that could cause SELECT queries to “hang”. -- Improvements to distributed DDL queries. -- Fixed the query `CREATE TABLE ... AS `. -- Resolved the deadlock in the `ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables. -- Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats. -- Resolved the appearance of zombie processes when using a dictionary with an `executable` source. -- Fixed segfault for the HEAD query. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} - -- You can use `pbuilder` to build ClickHouse. -- You can use `libc++` instead of `libstdc++` for builds on Linux. -- Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT queries will fail with the message “Merges are processing significantly slower than inserts.” Use the `SELECT * FROM system.merges` query to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don’t need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the section in config.xml, set ``` ``107374182400 ``` and restart the server. - -### ClickHouse release 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} - -- This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper. - -### ClickHouse release 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} - -This release contains bug fixes for the previous release 1.1.54276: - -- Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table. -- Fixed parsing when inserting in RowBinary format if input data starts with’;’. -- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). - -### Clickhouse Release 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} - -#### New features: {#new-features-4} - -- Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a` -- INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert\_distributed\_sync=1. -- Added the UUID data type for working with 16-byte identifiers. -- Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau. -- Added the functions toYYYYMM, toYYYYMMDD, and toYYYYMMDDhhmmss for converting time into numbers. -- You can use IP addresses (together with the hostname) to identify servers for clustered DDL queries. -- Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).` -- Added the max\_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance. - -#### Main changes: {#main-changes} - -- Security improvements: all server files are created with 0640 permissions (can be changed via config parameter). -- Improved error messages for queries with invalid syntax. -- Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data. -- Significantly increased the performance of data merges for the ReplacingMergeTree engine. -- Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed\_directory\_monitor\_batch\_inserts=1. - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Changed the binary format of aggregate states of `groupArray(array_column)` functions for arrays. - -#### Complete list of changes: {#complete-list-of-changes} - -- Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format. -- Optimized stream allocation when reading from a Distributed table. -- Settings can be configured in readonly mode if the value doesn’t change. -- Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred\_block\_size\_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns. -- Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.` -- Added new settings for MergeTree engines (the merge\_tree section in config.xml): - - replicated\_deduplication\_window\_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables. - - cleanup\_delay\_period sets how often to start cleanup to remove outdated data. - - replicated\_can\_become\_leader can prevent a replica from becoming the leader (and assigning merges). -- Accelerated cleanup to remove outdated data from ZooKeeper. -- Multiple improvements and fixes for clustered DDL queries. Of particular interest is the new setting distributed\_ddl\_task\_timeout, which limits the time to wait for a response from the servers in the cluster. If a ddl request has not been performed on all hosts, a response will contain a timeout error and a request will be executed in an async mode. -- Improved display of stack traces in the server logs. -- Added the “none” value for the compression method. -- You can use multiple dictionaries\_config sections in config.xml. -- It is possible to connect to MySQL through a socket in the file system. -- The system.parts table has a new column with information about the size of marks, in bytes. - -#### Bug fixes: {#bug-fixes-4} - -- Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field. -- Fixed a rare race condition in ReplicatedMergeTree when checking data parts. -- Fixed possible freezing on “leader election” when starting a server. -- The max\_replica\_delay\_for\_distributed\_queries setting was ignored when using a local replica of the data source. This has been fixed. -- Fixed incorrect behavior of `ALTER TABLE CLEAR COLUMN IN PARTITION` when attempting to clean a non-existing column. -- Fixed an exception in the multiIf function when using empty arrays or strings. -- Fixed excessive memory allocations when deserializing Native format. -- Fixed incorrect auto-update of Trie dictionaries. -- Fixed an exception when running queries with a GROUP BY clause from a Merge table when using SAMPLE. -- Fixed a crash of GROUP BY when using distributed\_aggregation\_memory\_efficient=1. -- Now you can specify the database.table in the right side of IN and JOIN. -- Too many threads were used for parallel aggregation. This has been fixed. -- Fixed how the “if” function works with FixedString arguments. -- SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed. -- Running `CREATE VIEW IF EXISTS no longer causes crashes.` -- Fixed incorrect behavior when input\_format\_skip\_unknown\_fields=1 is set and there are negative numbers. -- Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary. -- Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables. -- Fixed an incorrect interpretation of a SELECT query from Dictionary tables. -- Fixed the “Cannot mremap” error when using arrays in IN and JOIN clauses with more than 2 billion elements. -- Fixed the failover for dictionaries with MySQL as the source. - -#### Improved workflow for developing and assembling ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} - -- Builds can be assembled in Arcadia. -- You can use gcc 7 to compile ClickHouse. -- Parallel builds using ccache+distcc are faster now. - -### ClickHouse release 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} - -#### New features: {#new-features-5} - -- Distributed DDL (for example, `CREATE TABLE ON CLUSTER`) -- The replicated query `ALTER TABLE CLEAR COLUMN IN PARTITION.` -- The engine for Dictionary tables (access to dictionary data in the form of a table). -- Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries). -- You can check for updates to the dictionary by sending a request to the source. -- Qualified column names -- Quoting identifiers using double quotation marks. -- Sessions in the HTTP interface. -- The OPTIMIZE query for a Replicated table can can run not only on the leader. - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- Removed SET GLOBAL. - -#### Minor changes: {#minor-changes} - -- Now after an alert is triggered, the log prints the full stack trace. -- Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives). - -#### Bug fixes: {#bug-fixes-5} - -- Fixed a bad connection “sticking” when inserting into a Distributed table. -- GLOBAL IN now works for a query from a Merge table that looks at a Distributed table. -- The incorrect number of cores was detected on a Google Compute Engine virtual machine. This has been fixed. -- Changes in how an executable source of cached external dictionaries works. -- Fixed the comparison of strings containing null characters. -- Fixed the comparison of Float32 primary key fields with constants. -- Previously, an incorrect estimate of the size of a field could lead to overly large allocations. -- Fixed a crash when querying a Nullable column added to a table using ALTER. -- Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT. -- Fixed an ORDER BY subquery consisting of only constant values. -- Previously, a Replicated table could remain in the invalid state after a failed DROP TABLE. -- Aliases for scalar subqueries with empty results are no longer lost. -- Now a query that used compilation does not fail with an error if the .so file gets damaged. diff --git a/docs/ru/changelog/2018.md b/docs/ru/changelog/2018.md deleted file mode 100644 index 49bef18cbf3..00000000000 --- a/docs/ru/changelog/2018.md +++ /dev/null @@ -1,1060 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release 18.16 {#clickhouse-release-18-16} - -### ClickHouse release 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} - -#### Bug fixes: {#bug-fixes} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- JIT compilation of aggregate functions now works with LowCardinality columns. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) - -#### Improvements: {#improvements} - -- Added the `low_cardinality_allow_in_native_format` setting (enabled by default). When disabled, LowCardinality columns will be converted to ordinary columns for SELECT queries and ordinary columns will be expected for INSERT queries. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) - -#### Build improvements: {#build-improvements} - -- Fixes for builds on macOS and ARM. - -### ClickHouse release 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} - -#### New features: {#new-features} - -- `DEFAULT` expressions are evaluated for missing fields when loading data in semi-structured input formats (`JSONEachRow`, `TSKV`). The feature is enabled with the `insert_sample_with_metadata` setting. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) -- The `ALTER TABLE` query now has the `MODIFY ORDER BY` action for changing the sorting key when adding or removing a table column. This is useful for tables in the `MergeTree` family that perform additional tasks when merging based on this sorting key, such as `SummingMergeTree`, `AggregatingMergeTree`, and so on. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) -- For tables in the `MergeTree` family, now you can specify a different sorting key (`ORDER BY`) and index (`PRIMARY KEY`). The sorting key can be longer than the index. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) -- Added the `hdfs` table function and the `HDFS` table engine for importing and exporting data to HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) -- Added functions for working with base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3350) -- Now you can use a parameter to configure the precision of the `uniqCombined` aggregate function (select the number of HyperLogLog cells). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) -- Added the `system.contributors` table that contains the names of everyone who made commits in ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) -- Added the ability to omit the partition for the `ALTER TABLE ... FREEZE` query in order to back up all partitions at once. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -- Added `dictGet` and `dictGetOrDefault` functions that don’t require specifying the type of return value. The type is determined automatically from the dictionary description. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3564) -- Now you can specify comments for a column in the table description and change it using `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) -- Reading is supported for `Join` type tables with simple keys. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Now you can specify the options `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, and `join_overflow_mode` when creating a `Join` type table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `joinGet` function that allows you to use a `Join` type table like a dictionary. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Added the `partition_key`, `sorting_key`, `primary_key`, and `sampling_key` columns to the `system.tables` table in order to provide information about table keys. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, and `is_in_sampling_key` columns to the `system.columns` table. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Added the `min_time` and `max_time` columns to the `system.parts` table. These columns are populated when the partitioning key is an expression consisting of `DateTime` columns. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) - -#### Bug fixes: {#bug-fixes-1} - -- Fixes and performance improvements for the `LowCardinality` data type. `GROUP BY` using `LowCardinality(Nullable(...))`. Getting the values of `extremes`. Processing high-order functions. `LEFT ARRAY JOIN`. Distributed `GROUP BY`. Functions that return `Array`. Execution of `ORDER BY`. Writing to `Distributed` tables (nicelulu). Backward compatibility for `INSERT` queries from old clients that implement the `Native` protocol. Support for `LowCardinality` for `JOIN`. Improved performance when working in a single stream. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) -- Fixed how the `select_sequential_consistency` option works. Previously, when this setting was enabled, an incomplete result was sometimes returned after beginning to write to a new partition. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries and `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Databases are correctly specified for subqueries inside a VIEW. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a bug in `PREWHERE` with `FINAL` for `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) -- Now you can use `KILL QUERY` to cancel queries that have not started yet because they are waiting for the table to be locked. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) -- Corrected date and time calculations if the clocks were moved back at midnight (this happens in Iran, and happened in Moscow from 1981 to 1983). Previously, this led to the time being reset a day earlier than necessary, and also caused incorrect formatting of the date and time in text format. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) -- Fixed bugs in some cases of `VIEW` and subqueries that omit the database. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Fixed a race condition when simultaneously reading from a `MATERIALIZED VIEW` and deleting a `MATERIALIZED VIEW` due to not locking the internal `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) -- Fixed the error `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) -- Fixed query processing when the `compile_expressions` option is enabled (it’s enabled by default). Nondeterministic constant expressions like the `now` function are no longer unfolded. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) -- Fixed a crash when specifying a non-constant scale argument in `toDecimal32/64/128` functions. -- Fixed an error when trying to insert an array with `NULL` elements in the `Values` format into a column of type `Array` without `Nullable` (if `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) -- Fixed continuous error logging in `DDLWorker` if ZooKeeper is not available. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) -- Fixed the return type for `quantile*` functions from `Date` and `DateTime` types of arguments. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) -- Fixed the `WITH` clause if it specifies a simple alias without expressions. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) -- Fixed processing of queries with named sub-queries and qualified column names when `enable_optimize_predicate_expression` is enabled. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3588) -- Fixed the error `Attempt to attach to nullptr thread group` when working with materialized views. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) -- Fixed a crash when passing certain incorrect arguments to the `arrayReverse` function. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Fixed the buffer overflow in the `extractURLParameter` function. Improved performance. Added correct processing of strings containing zero bytes. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) -- Fixed buffer overflow in the `lowerUTF8` and `upperUTF8` functions. Removed the ability to execute these functions over `FixedString` type arguments. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) -- Fixed a rare race condition when deleting `MergeTree` tables. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) -- Fixed a race condition when reading from `Buffer` tables and simultaneously performing `ALTER` or `DROP` on the target tables. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Improvements: {#improvements-1} - -- The server does not write the processed configuration files to the `/etc/clickhouse-server/` directory. Instead, it saves them in the `preprocessed_configs` directory inside `path`. This means that the `/etc/clickhouse-server/` directory doesn’t have write access for the `clickhouse` user, which improves security. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) -- The `min_merge_bytes_to_use_direct_io` option is set to 10 GiB by default. A merge that forms large parts of tables from the MergeTree family will be performed in `O_DIRECT` mode, which prevents excessive page cache eviction. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) -- Accelerated server start when there is a very large number of tables. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) -- Added a connection pool and HTTP `Keep-Alive` for connections between replicas. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) -- If the query syntax is invalid, the `400 Bad Request` code is returned in the `HTTP` interface (500 was returned previously). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) -- The `join_default_strictness` option is set to `ALL` by default for compatibility. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) -- Removed logging to `stderr` from the `re2` library for invalid or complex regular expressions. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) -- Added for the `Kafka` table engine: checks for subscriptions before beginning to read from Kafka; the kafka\_max\_block\_size setting for the table. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) -- The `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, and `murmurHash3_64` functions now work for any number of arguments and for arguments in the form of tuples. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) -- The `arrayReverse` function now works with any types of arrays. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Added an optional parameter: the slot size for the `timeSlots` function. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3724) -- For `FULL` and `RIGHT JOIN`, the `max_block_size` setting is used for a stream of non-joined data from the right table. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3699) -- Added the `--secure` command line parameter in `clickhouse-benchmark` and `clickhouse-performance-test` to enable TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) -- Type conversion when the structure of a `Buffer` type table does not match the structure of the destination table. [Vitaly Baranov](https://github.com/ClickHouse/ClickHouse/pull/3603) -- Added the `tcp_keep_alive_timeout` option to enable keep-alive packets after inactivity for the specified time interval. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) -- Removed unnecessary quoting of values for the partition key in the `system.parts` table if it consists of a single column. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) -- The modulo function works for `Date` and `DateTime` data types. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) -- Added synonyms for the `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, and `MID` functions. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Some function names are case-insensitive for compatibility with the SQL standard. Added syntactic sugar `SUBSTRING(expr FROM start FOR length)` for compatibility with SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) -- Added the ability to `mlock` memory pages corresponding to `clickhouse-server` executable code to prevent it from being forced out of memory. This feature is disabled by default. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) -- Improved performance when reading from `O_DIRECT` (with the `min_bytes_to_use_direct_io` option enabled). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) -- Improved performance of the `dictGet...OrDefault` function for a constant key argument and a non-constant default argument. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3563) -- The `firstSignificantSubdomain` function now processes the domains `gov`, `mil`, and `edu`. [Igor Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Improved performance. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) -- Ability to specify custom environment variables for starting `clickhouse-server` using the `SYS-V init.d` script by defining `CLICKHOUSE_PROGRAM_ENV` in `/etc/default/clickhouse`. - [Pavlo Bashynskyi](https://github.com/ClickHouse/ClickHouse/pull/3612) -- Correct return code for the clickhouse-server init script. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) -- The `system.metrics` table now has the `VersionInteger` metric, and `system.build_options` has the added line `VERSION_INTEGER`, which contains the numeric form of the ClickHouse version, such as `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) -- Removed the ability to compare the `Date` type with a number to avoid potential errors like `date = 2018-12-17`, where quotes around the date are omitted by mistake. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) -- Fixed the behavior of stateful functions like `rowNumberInAllBlocks`. They previously output a result that was one number larger due to starting during query analysis. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3729) -- If the `force_restore_data` file can’t be deleted, an error message is displayed. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3794) - -#### Build improvements: {#build-improvements-1} - -- Updated the `jemalloc` library, which fixes a potential memory leak. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3557) -- Profiling with `jemalloc` is enabled by default in order to debug builds. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) -- Added the ability to run integration tests when only `Docker` is installed on the system. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) -- Added the fuzz expression test in SELECT queries. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) -- Added a stress test for commits, which performs functional tests in parallel and in random order to detect more race conditions. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) -- Improved the method for starting clickhouse-server in a Docker image. [Elghazal Ahmed](https://github.com/ClickHouse/ClickHouse/pull/3663) -- For a Docker image, added support for initializing databases using files in the `/docker-entrypoint-initdb.d` directory. [Konstantin Lebedev](https://github.com/ClickHouse/ClickHouse/pull/3695) -- Fixes for builds on ARM. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) - -#### Backward incompatible changes: {#backward-incompatible-changes} - -- Removed the ability to compare the `Date` type with a number. Instead of `toDate('2018-12-18') = 17883`, you must use explicit type conversion `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) - -## ClickHouse release 18.14 {#clickhouse-release-18-14} - -### ClickHouse release 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} - -#### Bug fixes: {#bug-fixes-2} - -- Fixed an error that led to problems with updating dictionaries with the ODBC source. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- Databases are correctly specified when executing DDL `ON CLUSTER` queries. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Fixed a segfault if the `max_temporary_non_const_columns` limit was exceeded. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Build improvements: {#build-improvements-2} - -- Fixes for builds on ARM. - -### ClickHouse release 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} - -#### Bug fixes: {#bug-fixes-3} - -- Fixed error in `dictGet...` function for dictionaries of type `range`, if one of the arguments is constant and other is not. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) -- Fixed error that caused messages `netlink: '...': attribute type 1 has an invalid length` to be printed in Linux kernel log, that was happening only on fresh enough versions of Linux kernel. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) -- Fixed segfault in function `empty` for argument of `FixedString` type. [Daniel, Dao Quang Minh](https://github.com/ClickHouse/ClickHouse/pull/3703) -- Fixed excessive memory allocation when using large value of `max_query_size` setting (a memory chunk of `max_query_size` bytes was preallocated at once). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) - -#### Build changes: {#build-changes} - -- Fixed build with LLVM/Clang libraries of version 7 from the OS packages (these libraries are used for runtime query compilation). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} - -#### Bug fixes: {#bug-fixes-4} - -- Fixed cases when the ODBC bridge process did not terminate with the main server process. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) -- Fixed synchronous insertion into the `Distributed` table with a columns list that differs from the column list of the remote table. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) -- Fixed a rare race condition that can lead to a crash when dropping a MergeTree table. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed a query deadlock in case when query thread creation fails with the `Resource temporarily unavailable` error. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Fixed parsing of the `ENGINE` clause when the `CREATE AS table` syntax was used and the `ENGINE` clause was specified before the `AS table` (the error resulted in ignoring the specified engine). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) - -### ClickHouse release 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} - -#### Bug fixes: {#bug-fixes-5} - -- The size of memory chunk was overestimated while deserializing the column of type `Array(String)` that leads to “Memory limit exceeded” errors. The issue appeared in version 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) - -### ClickHouse release 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} - -#### Bug fixes: {#bug-fixes-6} - -- Fixed `ON CLUSTER` queries when cluster configured as secure (flag ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) - -#### Build changes: {#build-changes-1} - -- Fixed problems (llvm-7 from system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse release 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} - -#### Bug fixes: {#bug-fixes-7} - -- Fixed the `Block structure mismatch in MergingSorted stream` error. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) -- Fixed `ON CLUSTER` queries in case when secure connections were turned on in the cluster config (the `` flag). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) -- Fixed an error in queries that used `SAMPLE`, `PREWHERE` and alias columns. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) -- Fixed a rare `unknown compression method` error when the `min_bytes_to_use_direct_io` setting was enabled. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) - -#### Performance improvements: {#performance-improvements} - -- Fixed performance regression of queries with `GROUP BY` of columns of UInt16 or Date type when executing on AMD EPYC processors. [Igor Lapko](https://github.com/ClickHouse/ClickHouse/pull/3512) -- Fixed performance regression of queries that process long strings. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) - -#### Build improvements: {#build-improvements-3} - -- Improvements for simplifying the Arcadia build. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) - -### ClickHouse release 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} - -#### Bug fixes: {#bug-fixes-8} - -- Fixed a crash on joining two unnamed subqueries. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) -- Fixed generating incorrect queries (with an empty `WHERE` clause) when querying external databases. [hotid](https://github.com/ClickHouse/ClickHouse/pull/3477) -- Fixed using an incorrect timeout value in ODBC dictionaries. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) - -### ClickHouse release 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} - -#### Bug fixes: {#bug-fixes-9} - -- Fixed the error `Block structure mismatch in UNION stream: different number of columns` in LIMIT queries. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) -- Fixed errors when merging data in tables containing arrays inside Nested structures. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) -- Fixed incorrect query results if the `merge_tree_uniform_read_distribution` setting is disabled (it is enabled by default). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) -- Fixed an error on inserts to a Distributed table in Native format. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) - -### ClickHouse release 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} - -- The `compile_expressions` setting (JIT compilation of expressions) is disabled by default. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) -- The `enable_optimize_predicate_expression` setting is disabled by default. - -### ClickHouse release 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} - -#### New features: {#new-features-1} - -- The `WITH CUBE` modifier for `GROUP BY` (the alternative syntax `GROUP BY CUBE(...)` is also available). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) -- Added the `formatDateTime` function. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2770) -- Added the `JDBC` table engine and `jdbc` table function (requires installing clickhouse-jdbc-bridge). [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- Added functions for working with the ISO week number: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, and `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) -- Now you can use `Nullable` columns for `MySQL` and `ODBC` tables. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Nested data structures can be read as nested objects in `JSONEachRow` format. Added the `input_format_import_nested_json` setting. [Veloman Yunkan](https://github.com/ClickHouse/ClickHouse/pull/3144) -- Parallel processing is available for many `MATERIALIZED VIEW`s when inserting data. See the `parallel_view_processing` setting. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) -- Added the `SYSTEM FLUSH LOGS` query (forced log flushes to system tables such as `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) -- Now you can use pre-defined `database` and `table` macros when declaring `Replicated` tables. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) -- Added the ability to read `Decimal` type values in engineering notation (indicating powers of ten). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) - -#### Experimental features: {#experimental-features} - -- Optimization of the GROUP BY clause for `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) -- Optimized calculation of expressions for `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) - -#### Improvements: {#improvements-2} - -- Significantly reduced memory consumption for queries with `ORDER BY` and `LIMIT`. See the `max_bytes_before_remerge_sort` setting. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- In the absence of `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` is assumed. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) -- Qualified asterisks work correctly in queries with `JOIN`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3202) -- The `ODBC` table engine correctly chooses the method for quoting identifiers in the SQL dialect of a remote database. [Alexandr Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/3210) -- The `compile_expressions` setting (JIT compilation of expressions) is enabled by default. -- Fixed behavior for simultaneous DROP DATABASE/TABLE IF EXISTS and CREATE DATABASE/TABLE IF NOT EXISTS. Previously, a `CREATE DATABASE ... IF NOT EXISTS` query could return the error message “File … already exists”, and the `CREATE TABLE ... IF NOT EXISTS` and `DROP TABLE IF EXISTS` queries could return `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) -- LIKE and IN expressions with a constant right half are passed to the remote server when querying from MySQL or ODBC tables. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Comparisons with constant expressions in a WHERE clause are passed to the remote server when querying from MySQL and ODBC tables. Previously, only comparisons with constants were passed. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Correct calculation of row width in the terminal for `Pretty` formats, including strings with hieroglyphs. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/3257). -- `ON CLUSTER` can be specified for `ALTER UPDATE` queries. -- Improved performance for reading data in `JSONEachRow` format. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) -- Added synonyms for the `LENGTH` and `CHARACTER_LENGTH` functions for compatibility. The `CONCAT` function is no longer case-sensitive. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) -- Added the `TIMESTAMP` synonym for the `DateTime` type. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) -- There is always space reserved for query\_id in the server logs, even if the log line is not related to a query. This makes it easier to parse server text logs with third-party tools. -- Memory consumption by a query is logged when it exceeds the next level of an integer number of gigabytes. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- Added compatibility mode for the case when the client library that uses the Native protocol sends fewer columns by mistake than the server expects for the INSERT query. This scenario was possible when using the clickhouse-cpp library. Previously, this scenario caused the server to crash. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) -- In a user-defined WHERE expression in `clickhouse-copier`, you can now use a `partition_key` alias (for additional filtering by source table partition). This is useful if the partitioning scheme changes during copying, but only changes slightly. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) -- The workflow of the `Kafka` engine has been moved to a background thread pool in order to automatically reduce the speed of data reading at high loads. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- Support for reading `Tuple` and `Nested` values of structures like `struct` in the `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) -- The list of top-level domains for the `firstSignificantSubdomain` function now includes the domain `biz`. [decaseal](https://github.com/ClickHouse/ClickHouse/pull/3219) -- In the configuration of external dictionaries, `null_value` is interpreted as the value of the default data type. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) -- Support for the `intDiv` and `intDivOrZero` functions for `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) -- Support for the `Date`, `DateTime`, `UUID`, and `Decimal` types as a key for the `sumMap` aggregate function. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) -- Support for the `Decimal` data type in external dictionaries. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) -- Support for the `Decimal` data type in `SummingMergeTree` tables. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) -- Added specializations for `UUID` in `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) -- Reduced the number of `open` and `close` system calls when reading from a `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) -- A `TRUNCATE TABLE` query can be executed on any replica (the query is passed to the leader replica). [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/3375) - -#### Bug fixes: {#bug-fixes-10} - -- Fixed an issue with `Dictionary` tables for `range_hashed` dictionaries. This error occurred in version 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) -- Fixed an error when loading `range_hashed` dictionaries (the message `Unsupported type Nullable (...)`). This error occurred in version 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Fixed errors in the `pointInPolygon` function due to the accumulation of inaccurate calculations for polygons with a large number of vertices located close to each other. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) -- If after merging data parts, the checksum for the resulting part differs from the result of the same merge in another replica, the result of the merge is deleted and the data part is downloaded from the other replica (this is the correct behavior). But after downloading the data part, it couldn’t be added to the working set because of an error that the part already exists (because the data part was deleted with some delay after the merge). This led to cyclical attempts to download the same data. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) -- Fixed incorrect calculation of total memory consumption by queries (because of incorrect calculation, the `max_memory_usage_for_all_queries` setting worked incorrectly and the `MemoryTracking` metric had an incorrect value). This error occurred in version 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) -- Fixed the functionality of `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` This error occurred in version 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) -- Fixed unnecessary preparation of data structures for `JOIN`s on the server that initiates the query if the `JOIN` is only performed on remote servers. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) -- Fixed bugs in the `Kafka` engine: deadlocks after exceptions when starting to read data, and locks upon completion [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- For `Kafka` tables, the optional `schema` parameter was not passed (the schema of the `Cap'n'Proto` format). [Vojtech Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) -- If the ensemble of ZooKeeper servers has servers that accept the connection but then immediately close it instead of responding to the handshake, ClickHouse chooses to connect another server. Previously, this produced the error `Cannot read all data. Bytes read: 0. Bytes expected: 4.` and the server couldn’t start. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) -- If the ensemble of ZooKeeper servers contains servers for which the DNS query returns an error, these servers are ignored. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) -- Fixed type conversion between `Date` and `DateTime` when inserting data in the `VALUES` format (if `input_format_values_interpret_expressions = 1`). Previously, the conversion was performed between the numerical value of the number of days in Unix Epoch time and the Unix timestamp, which led to unexpected results. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) -- Corrected type conversion between `Decimal` and integer numbers. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3231) -- Fixed a parsing error in CSV format with floating-point numbers if a non-default CSV separator is used, such as `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) -- Fixed the `arrayCumSumNonNegative` function (it does not accumulate negative values if the accumulator is less than zero). [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/3163) -- Fixed how `Merge` tables work on top of `Distributed` tables when using `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) -- Bug fixes in the `ALTER UPDATE` query. -- Fixed bugs in the `odbc` table function that appeared in version 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) -- Fixed the operation of aggregate functions with `StateArray` combinators. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) -- Fixed a crash when dividing a `Decimal` value by zero. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) -- Fixed output of types for operations using `Decimal` and integer arguments. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) -- Fixed the segfault during `GROUP BY` on `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) -- The `log_query_threads` setting (logging information about each thread of query execution) now takes effect only if the `log_queries` option (logging information about queries) is set to 1. Since the `log_query_threads` option is enabled by default, information about threads was previously logged even if query logging was disabled. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) -- Fixed an error in the distributed operation of the quantiles aggregate function (the error message `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) -- Fixed the compatibility problem when working on a cluster of version 18.12.17 servers and older servers at the same time. For distributed queries with GROUP BY keys of both fixed and non-fixed length, if there was a large amount of data to aggregate, the returned data was not always fully aggregated (two different rows contained the same aggregation keys). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) -- Fixed handling of substitutions in `clickhouse-performance-test`, if the query contains only part of the substitutions declared in the test. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) -- Fixed an error when using `FINAL` with `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Fixed an error when using `PREWHERE` over columns that were added during `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Added a check for the absence of `arrayJoin` for `DEFAULT` and `MATERIALIZED` expressions. Previously, `arrayJoin` led to an error when inserting data. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -- Added a check for the absence of `arrayJoin` in a `PREWHERE` clause. Previously, this led to messages like `Size ... doesn't match` or `Unknown compression method` when executing queries. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) -- Fixed segfault that could occur in rare cases after optimization that replaced AND chains from equality evaluations with the corresponding IN expression. [liuyimin-bytedance](https://github.com/ClickHouse/ClickHouse/pull/3339) -- Minor corrections to `clickhouse-benchmark`: previously, client information was not sent to the server; now the number of queries executed is calculated more accurately when shutting down and for limiting the number of iterations. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) - -#### Backward incompatible changes: {#backward-incompatible-changes-1} - -- Removed the `allow_experimental_decimal_type` option. The `Decimal` data type is available for default use. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) - -## ClickHouse release 18.12 {#clickhouse-release-18-12} - -### ClickHouse release 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} - -#### New features: {#new-features-2} - -- `invalidate_query` (the ability to specify a query to check whether an external dictionary needs to be updated) is implemented for the `clickhouse` source. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) -- Added the ability to use `UInt*`, `Int*`, and `DateTime` data types (along with the `Date` type) as a `range_hashed` external dictionary key that defines the boundaries of ranges. Now `NULL` can be used to designate an open range. [Vasily Nemkov](https://github.com/ClickHouse/ClickHouse/pull/3123) -- The `Decimal` type now supports `var*` and `stddev*` aggregate functions. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `Decimal` type now supports mathematical functions (`exp`, `sin` and so on.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- The `system.part_log` table now has the `partition_id` column. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Bug fixes: {#bug-fixes-11} - -- `Merge` now works correctly on `Distributed` tables. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3159) -- Fixed incompatibility (unnecessary dependency on the `glibc` version) that made it impossible to run ClickHouse on `Ubuntu Precise` and older versions. The incompatibility arose in version 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) -- Fixed errors in the `enable_optimize_predicate_expression` setting. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) -- Fixed a minor issue with backwards compatibility that appeared when working with a cluster of replicas on versions earlier than 18.12.13 and simultaneously creating a new replica of a table on a server with a newer version (shown in the message `Can not clone replica, because the ... updated to new ClickHouse version`, which is logical, but shouldn’t happen). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) - -#### Backward incompatible changes: {#backward-incompatible-changes-2} - -- The `enable_optimize_predicate_expression` option is enabled by default (which is rather optimistic). If query analysis errors occur that are related to searching for the column names, set `enable_optimize_predicate_expression` to 0. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3107) - -### ClickHouse release 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} - -#### New features: {#new-features-3} - -- Added support for `ALTER UPDATE` queries. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) -- Added the `allow_ddl` option, which restricts the user’s access to DDL queries. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) -- Added the `min_merge_bytes_to_use_direct_io` option for `MergeTree` engines, which allows you to set a threshold for the total size of the merge (when above the threshold, data part files will be handled using O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) -- The `system.merges` system table now contains the `partition_id` column. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) - -#### Improvements {#improvements-3} - -- If a data part remains unchanged during mutation, it isn’t downloaded by replicas. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) -- Autocomplete is available for names of settings when working with `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) - -#### Bug fixes: {#bug-fixes-12} - -- Added a check for the sizes of arrays that are elements of `Nested` type fields when inserting. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) -- Fixed an error updating external dictionaries with the `ODBC` source and `hashed` storage. This error occurred in version 18.12.13. -- Fixed a crash when creating a temporary table from a query with an `IN` condition. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3098) -- Fixed an error in aggregate functions for arrays that can have `NULL` elements. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/3097) - -### ClickHouse release 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} - -#### New features: {#new-features-4} - -- Added the `DECIMAL(digits, scale)` data type (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). To enable it, use the setting `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) -- New `WITH ROLLUP` modifier for `GROUP BY` (alternative syntax: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2787) -- Added support for JOIN with table functions. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Autocomplete by pressing Tab in clickhouse-client. [Sergey Shcherbin](https://github.com/ClickHouse/ClickHouse/pull/2447) -- Ctrl+C in clickhouse-client clears a query that was entered. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) -- Added the `join_default_strictness` setting (values: `"`, `'any'`, `'all'`). This allows you to not specify `ANY` or `ALL` for `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) -- Each line of the server log related to query processing shows the query ID. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Now you can get query execution logs in clickhouse-client (use the `send_logs_level` setting). With distributed query processing, logs are cascaded from all the servers. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` (`SHOW PROCESSLIST`) tables now have information about all changed settings when you run a query (the nested structure of the `Settings` data). Added the `log_query_settings` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.query_log` and `system.processes` tables now show information about the number of threads that are participating in query execution (see the `thread_numbers` column). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents` counters that measure the time spent on reading and writing over the network and reading and writing to disk, the number of network errors, and the time spent waiting when network bandwidth is limited. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added `ProfileEvents`counters that contain the system metrics from rusage (you can use them to get information about CPU usage in userspace and the kernel, page faults, and context switches), as well as taskstats metrics (use these to obtain information about I/O wait time, CPU wait time, and the amount of data read and recorded, both with and without page cache). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `ProfileEvents` counters are applied globally and for each query, as well as for each query execution thread, which allows you to profile resource consumption by query in detail. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Added the `system.query_thread_log` table, which contains information about each query execution thread. Added the `log_query_threads` setting. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- The `system.metrics` and `system.events` tables now have built-in documentation. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) -- Added the `arrayEnumerateDense` function. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2975) -- Added the `arrayCumSumNonNegative` and `arrayDifference` functions. [Aleksey Studnev](https://github.com/ClickHouse/ClickHouse/pull/2942) -- Added the `retention` aggregate function. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2887) -- Now you can add (merge) states of aggregate functions by using the plus operator, and multiply the states of aggregate functions by a nonnegative constant. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) -- Tables in the MergeTree family now have the virtual column `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Experimental features: {#experimental-features-1} - -- Added the `LowCardinality(T)` data type. This data type automatically creates a local dictionary of values and allows data processing without unpacking the dictionary. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) -- Added a cache of JIT-compiled functions and a counter for the number of uses before compiling. To JIT compile expressions, enable the `compile_expressions` setting. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) - -#### Improvements: {#improvements-4} - -- Fixed the problem with unlimited accumulation of the replication log when there are abandoned replicas. Added an effective recovery mode for replicas with a long lag. -- Improved performance of `GROUP BY` with multiple aggregation fields when one of them is string and the others are fixed length. -- Improved performance when using `PREWHERE` and with implicit transfer of expressions in `PREWHERE`. -- Improved parsing performance for text formats (`CSV`, `TSV`). [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) -- Improved performance of reading strings and arrays in binary formats. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2955) -- Increased performance and reduced memory consumption for queries to `system.tables` and `system.columns` when there is a very large number of tables on a single server. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) -- Fixed a performance problem in the case of a large stream of queries that result in an error (the `_dl_addr` function is visible in `perf top`, but the server isn’t using much CPU). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) -- Conditions are cast into the View (when `enable_optimize_predicate_expression` is enabled). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Improvements to the functionality for the `UUID` data type. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) -- The `UUID` data type is supported in The-Alchemist dictionaries. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) -- The `visitParamExtractRaw` function works correctly with nested structures. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2974) -- When the `input_format_skip_unknown_fields` setting is enabled, object fields in `JSONEachRow` format are skipped correctly. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) -- For a `CASE` expression with conditions, you can now omit `ELSE`, which is equivalent to `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) -- The operation timeout can now be configured when working with ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) -- You can specify an offset for `LIMIT n, m` as `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- You can use the `SELECT TOP n` syntax as an alternative for `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Increased the size of the queue to write to system tables, so the `SystemLog parameter queue is full` error doesn’t happen as often. -- The `windowFunnel` aggregate function now supports events that meet multiple conditions. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2801) -- Duplicate columns can be used in a `USING` clause for `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) -- `Pretty` formats now have a limit on column alignment by width. Use the `output_format_pretty_max_column_pad_width` setting. If a value is wider, it will still be displayed in its entirety, but the other cells in the table will not be too wide. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) -- The `odbc` table function now allows you to specify the database/schema name. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2885) -- Added the ability to use a username specified in the `clickhouse-client` config file. [Vladimir Kozbin](https://github.com/ClickHouse/ClickHouse/pull/2909) -- The `ZooKeeperExceptions` counter has been split into three counters: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, and `ZooKeeperOtherExceptions`. -- `ALTER DELETE` queries work for materialized views. -- Added randomization when running the cleanup thread periodically for `ReplicatedMergeTree` tables in order to avoid periodic load spikes when there are a very large number of `ReplicatedMergeTree` tables. -- Support for `ATTACH TABLE ... ON CLUSTER` queries. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) - -#### Bug fixes: {#bug-fixes-13} - -- Fixed an issue with `Dictionary` tables (throws the `Size of offsets doesn't match size of column` or `Unknown compression method` exception). This bug appeared in version 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) -- Fixed a bug when merging `CollapsingMergeTree` tables if one of the data parts is empty (these parts are formed during merge or `ALTER DELETE` if all data was deleted), and the `vertical` algorithm was used for the merge. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) -- Fixed a race condition during `DROP` or `TRUNCATE` for `Memory` tables with a simultaneous `SELECT`, which could lead to server crashes. This bug appeared in version 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) -- Fixed the possibility of data loss when inserting in `Replicated` tables if the `Session is expired` error is returned (data loss can be detected by the `ReplicatedDataLoss` metric). This error occurred in version 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) -- Fixed a segfault during `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) -- Fixed the error searching column names when the `WHERE` expression consists entirely of a qualified column name, such as `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) -- Fixed the “Not found column” error that occurred when executing distributed queries if a single column consisting of an IN expression with a subquery is requested from a remote server. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for distributed queries if one of the shards is local and the other is not, and optimization of the move to `PREWHERE` is triggered. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) -- Fixed the `pointInPolygon` function for certain cases of non-convex polygons. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) -- Fixed the incorrect result when comparing `nan` with integers. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) -- Fixed an error in the `zlib-ng` library that could lead to segfault in rare cases. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) -- Fixed a memory leak when inserting into a table with `AggregateFunction` columns, if the state of the aggregate function is not simple (allocates memory separately), and if a single insertion request results in multiple small blocks. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) -- Fixed a race condition when creating and deleting the same `Buffer` or `MergeTree` table simultaneously. -- Fixed the possibility of a segfault when comparing tuples made up of certain non-trivial types, such as tuples. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) -- Fixed the possibility of a segfault when running certain `ON CLUSTER` queries. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2960) -- Fixed an error in the `arrayDistinct` function for `Nullable` array elements. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) -- The `enable_optimize_predicate_expression` option now correctly supports cases with `SELECT *`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2929) -- Fixed the segfault when re-initializing the ZooKeeper session. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) -- Fixed potential blocking when working with ZooKeeper. -- Fixed incorrect code for adding nested data structures in a `SummingMergeTree`. -- When allocating memory for states of aggregate functions, alignment is correctly taken into account, which makes it possible to use operations that require alignment when implementing states of aggregate functions. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) - -#### Security fix: {#security-fix} - -- Safe use of ODBC data sources. Interaction with ODBC drivers uses a separate `clickhouse-odbc-bridge` process. Errors in third-party ODBC drivers no longer cause problems with server stability or vulnerabilities. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) -- Fixed incorrect validation of the file path in the `catBoostPool` table function. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) -- The contents of system tables (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, and `replication_queue`) are filtered according to the user’s configured access to databases (`allow_databases`). [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2856) - -#### Backward incompatible changes: {#backward-incompatible-changes-3} - -- In queries with JOIN, the star character expands to a list of columns in all tables, in compliance with the SQL standard. You can restore the old behavior by setting `asterisk_left_columns_only` to 1 on the user configuration level. - -#### Build changes: {#build-changes-2} - -- Most integration tests can now be run by commit. -- Code style checks can also be run by commit. -- The `memcpy` implementation is chosen correctly when building on CentOS7/Fedora. [Etienne Champetier](https://github.com/ClickHouse/ClickHouse/pull/2912) -- When using clang to build, some warnings from `-Weverything` have been added, in addition to the regular `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) -- Debugging the build uses the `jemalloc` debug option. -- The interface of the library for interacting with ZooKeeper is declared abstract. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) - -## ClickHouse release 18.10 {#clickhouse-release-18-10} - -### ClickHouse release 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} - -#### New features: {#new-features-5} - -- HTTPS can be used for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) -- Added the functions `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, and `murmurHash3_128` in addition to the existing `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) -- Support for Nullable types in the ClickHouse ODBC driver (`ODBCDriver2` output format). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) -- Support for `UUID` in the key columns. - -#### Improvements: {#improvements-5} - -- Clusters can be removed without restarting the server when they are deleted from the config files. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) -- External dictionaries can be removed without restarting the server when they are removed from config files. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) -- Added `SETTINGS` support for the `Kafka` table engine. [Alexander Marshalov](https://github.com/ClickHouse/ClickHouse/pull/2781) -- Improvements for the `UUID` data type (not yet complete). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) -- Support for empty parts after merges in the `SummingMergeTree`, `CollapsingMergeTree` and `VersionedCollapsingMergeTree` engines. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) -- Old records of completed mutations are deleted (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) -- Added the `system.merge_tree_settings` table. [Kirill Shvakov](https://github.com/ClickHouse/ClickHouse/pull/2841) -- The `system.tables` table now has dependency columns: `dependencies_database` and `dependencies_table`. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2851) -- Added the `max_partition_size_to_drop` config option. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) -- Added the `output_format_json_escape_forward_slashes` option. [Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2812) -- Added the `max_fetch_partition_retries_count` setting. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) -- Added the `prefer_localhost_replica` setting for disabling the preference for a local replica and going to a local replica without inter-process interaction. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) -- The `quantileExact` aggregate function returns `nan` in the case of aggregation on an empty `Float32` or `Float64` set. [Sundy Li](https://github.com/ClickHouse/ClickHouse/pull/2855) - -#### Bug fixes: {#bug-fixes-14} - -- Removed unnecessary escaping of the connection string parameters for ODBC, which made it impossible to establish a connection. This error occurred in version 18.6.0. -- Fixed the logic for processing `REPLACE PARTITION` commands in the replication queue. If there are two `REPLACE` commands for the same partition, the incorrect logic could cause one of them to remain in the replication queue and not be executed. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) -- Fixed a merge bug when all data parts were empty (parts that were formed from a merge or from `ALTER DELETE` if all data was deleted). This bug appeared in version 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) -- Fixed an error for concurrent `Set` or `Join`. [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2823) -- Fixed the `Block structure mismatch in UNION stream: different number of columns` error that occurred for `UNION ALL` queries inside a sub-query if one of the `SELECT` queries contains duplicate column names. [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2094) -- Fixed a memory leak if an exception occurred when connecting to a MySQL server. -- Fixed incorrect clickhouse-client response code in case of a query error. -- Fixed incorrect behavior of materialized views containing DISTINCT. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) - -#### Backward incompatible changes {#backward-incompatible-changes-4} - -- Removed support for CHECK TABLE queries for Distributed tables. - -#### Build changes: {#build-changes-3} - -- The allocator has been replaced: `jemalloc` is now used instead of `tcmalloc`. In some scenarios, this increases speed up to 20%. However, there are queries that have slowed by up to 20%. Memory consumption has been reduced by approximately 10% in some scenarios, with improved stability. With highly competitive loads, CPU usage in userspace and in system shows just a slight increase. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) -- Use of libressl from a submodule. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) -- Use of unixodbc from a submodule. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) -- Use of mariadb-connector-c from a submodule. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) -- Added functional test files to the repository that depend on the availability of test data (for the time being, without the test data itself). - -## ClickHouse release 18.6 {#clickhouse-release-18-6} - -### ClickHouse release 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} - -#### New features: {#new-features-6} - -- Added support for ON expressions for the JOIN ON syntax: - `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` - The expression must be a chain of equalities joined by the AND operator. Each side of the equality can be an arbitrary expression over the columns of one of the tables. The use of fully qualified column names is supported (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) -- HTTPS can be enabled for replication. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) - -#### Improvements: {#improvements-6} - -- The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) - -## ClickHouse release 18.5 {#clickhouse-release-18-5} - -### ClickHouse release 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} - -#### New features: {#new-features-7} - -- Added the hash function `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). - -#### Improvements: {#improvements-7} - -- Now you can use the `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) attribute to set values in config files from environment variables. -- Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). - -#### Bug fixes: {#bug-fixes-15} - -- Fixed a possible bug when starting a replica [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). - -## ClickHouse release 18.4 {#clickhouse-release-18-4} - -### ClickHouse release 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} - -#### New features: {#new-features-8} - -- Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). -- Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). -- Support for `HTTP Basic` authentication in the replication protocol [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). -- The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/ClickHouse/ClickHouse/pull/2699). -- Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2701). - -#### Improvements: {#improvements-8} - -- The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). -- The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. -- Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2689). - -#### Bug fixes: {#bug-fixes-16} - -- Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) -- Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2735). -- Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) -- Fixed server crash when using the `countArray()` aggregate function. - -#### Backward incompatible changes: {#backward-incompatible-changes-5} - -- Parameters for `Kafka` engine was changed from `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` to `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. If your tables use `kafka_schema` or `kafka_num_consumers` parameters, you have to manually edit the metadata files `path/metadata/database/table.sql` and add `kafka_row_delimiter` parameter with `''` value. - -## ClickHouse release 18.1 {#clickhouse-release-18-1} - -### ClickHouse release 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} - -#### New features: {#new-features-9} - -- Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). -- Support for arbitrary types for the `uniq*` family of aggregate functions ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). -- Support for arbitrary types in comparison operators ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). -- The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). -- Added the `arrayDistinct` function ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). -- The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/ClickHouse/ClickHouse/pull/2566)). - -#### Improvements: {#improvements-9} - -- Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backward compatible, unless otherwise stated in the changelog. -- Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2664)). -- If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/ClickHouse/ClickHouse/pull/2669)). - -#### Bug fixes: {#bug-fixes-17} - -- Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2624)). -- Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). -- Fixed an error during a CAST to Nullable types ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). -- Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2657)). -- Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). -- Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). -- Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn’t in uppercase letters ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). -- Added missing quoting of identifiers for queries to an external DBMS ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). - -#### Backward incompatible changes: {#backward-incompatible-changes-6} - -- Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. - -## ClickHouse release 1.1 {#clickhouse-release-1-1} - -### ClickHouse release 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} - -#### New features: {#new-features-10} - -- Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2521)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2600)). - -#### Bug fixes: {#bug-fixes-18} - -- Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. -- Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. -- The `has` function now works correctly for an array with Nullable elements ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). -- The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were queried from the table. -- Fixed how an empty `TinyLog` table works after inserting an empty data block ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). -- The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. - -### ClickHouse release 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} - -#### New features: {#new-features-11} - -- Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/ClickHouse/ClickHouse/pull/2490)). -- Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2574)). -- Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/ClickHouse/ClickHouse/pull/2599)). - -#### Improvements: {#improvements-10} - -- Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). -- Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. -- Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). -- Added `Nullable` support for the `runningDifference` function ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). -- Improved query analysis performance when there is a very large number of expressions ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). -- Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). -- The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/ClickHouse/ClickHouse/pull/2593)). - -#### Bug fixes: {#bug-fixes-19} - -- Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. -- Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. -- Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/ClickHouse/ClickHouse/pull/2553)). -- Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). -- Fixed segfault if `macros` are used but they aren’t in the config file ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). -- Fixed switching to the default database when reconnecting the client ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). -- Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. - -#### Security fix: {#security-fix-1} - -- Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). - -### ClickHouse release 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} - -#### New features: {#new-features-12} - -- Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries. -- Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables. -- Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2260)) -- Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). -- Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2294)). -- Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2501)). -- Added the `windowFunnel` aggregate function ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2352)). -- New `startsWith` and `endsWith` functions for strings ([Vadim Plakhtinsky](https://github.com/ClickHouse/ClickHouse/pull/2429)). -- The `numbers()` table function now allows you to specify the offset ([Winter Zhang](https://github.com/ClickHouse/ClickHouse/pull/2535)). -- The password to `clickhouse-client` can be entered interactively. -- Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/ClickHouse/ClickHouse/pull/2459)). -- Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/ClickHouse/ClickHouse/pull/2472)). -- Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/ClickHouse/ClickHouse/pull/2263)) -- Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats. -- Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests. - -#### Experimental features: {#experimental-features-2} - -- Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/ClickHouse/ClickHouse/pull/2272)) -- JIT compilation to native code is now available for some expressions ([pyos](https://github.com/ClickHouse/ClickHouse/pull/2277)). - -#### Bug fixes: {#bug-fixes-20} - -- Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`. -- Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result. -- Fixed an error when reading an array column from a Nested structure ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). -- Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`. -- Fixed an error when analyzing queries with recursive aliases. -- Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). -- User profile settings were not applied when using sessions in the HTTP interface. -- Fixed how settings are applied from the command line parameters in clickhouse-local. -- The ZooKeeper client library now uses the session timeout received from the server. -- Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout. -- Fixed pruning of parts for queries with conditions on partition key columns ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). -- Merges are now possible after `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). -- Type mapping in the ODBC table function has been fixed ([sundy-li](https://github.com/ClickHouse/ClickHouse/pull/2268)). -- Type comparisons have been fixed for `DateTime` with and without the time zone ([Alexander Bocharov](https://github.com/ClickHouse/ClickHouse/pull/2400)). -- Fixed syntactic parsing and formatting of the `CAST` operator. -- Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). -- Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/ClickHouse/ClickHouse/pull/2448)). -- Fixed SSRF in the remote() table function. -- Fixed exit behavior of `clickhouse-client` in multiline mode ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). - -#### Improvements: {#improvements-11} - -- Background tasks in replicated tables are now performed in a thread pool instead of in separate threads ([Silviu Caragea](https://github.com/ClickHouse/ClickHouse/pull/1722)). -- Improved LZ4 compression performance. -- Faster analysis for queries with a large number of JOINs and sub-queries. -- The DNS cache is now updated automatically when there are too many network errors. -- Table inserts no longer occur if the insert into one of the materialized views is not possible because it has too many parts. -- Corrected the discrepancy in the event counters `Query`, `SelectQuery`, and `InsertQuery`. -- Expressions like `tuple IN (SELECT tuple)` are allowed if the tuple types match. -- A server with replicated tables can start even if you haven’t configured ZooKeeper. -- When calculating the number of available CPU cores, limits on cgroups are now taken into account ([Atri Sharma](https://github.com/ClickHouse/ClickHouse/pull/2325)). -- Added chown for config directories in the systemd config file ([Mikhail Shiryaev](https://github.com/ClickHouse/ClickHouse/pull/2421)). - -#### Build changes: {#build-changes-4} - -- The gcc8 compiler can be used for builds. -- Added the ability to build llvm from submodule. -- The version of the librdkafka library has been updated to v0.11.4. -- Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0. -- Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). -- Cmake now generates files for ninja by default (like when using `-G Ninja`). -- Added the ability to use the libtinfo library instead of libtermcap ([Georgy Kondratiev](https://github.com/ClickHouse/ClickHouse/pull/2519)). -- Fixed a header file conflict in Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). - -#### Backward incompatible changes: {#backward-incompatible-changes-7} - -- Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format. -- If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn’t have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster. - -### ClickHouse release 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} - -#### Bug fixes: {#bug-fixes-21} - -- Fixed an error that in some cases caused ZooKeeper operations to block. - -### ClickHouse release 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} - -#### Bug fixes: {#bug-fixes-22} - -- Fixed a slowdown of replication queue if a table has many replicas. - -### ClickHouse release 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} - -#### Bug fixes: {#bug-fixes-23} - -- Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server. - -### ClickHouse release 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} - -#### New features: {#new-features-13} - -- Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. - -#### Improvements: {#improvements-12} - -- Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`. -- Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit. - -#### Bug fixes: {#bug-fixes-24} - -- Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`. -- Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`. -- Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table. -- Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica. -- Fixed freezing of `KILL QUERY`. -- Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration. - -#### Backward incompatible changes: {#backward-incompatible-changes-8} - -- Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors. - -### ClickHouse release 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} - -#### New features: {#new-features-14} - -- Logging level can be changed without restarting the server. -- Added the `SHOW CREATE DATABASE` query. -- The `query_id` can be passed to `clickhouse-client` (elBroom). -- New setting: `max_network_bandwidth_for_all_users`. -- Added support for `ALTER TABLE ... PARTITION ...` for `MATERIALIZED VIEW`. -- Added information about the size of data parts in uncompressed form in the system table. -- Server-to-server encryption support for distributed tables (`1` in the replica config in ``). -- Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` -- Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server’s display name can be changed. It’s also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov). -- Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson) -- When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was canceled` exception instead of an incomplete result. - -#### Improvements: {#improvements-13} - -- `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue. -- `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part. -- A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov). -- The `lengthUTF8` function runs faster (zhang2014). -- Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards. -- The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket’s `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa). -- More robust crash recovery for asynchronous insertion into `Distributed` tables. -- The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊). - -#### Bug fixes: {#bug-fixes-25} - -- Fixed an error with `IN` when the left side of the expression is `Nullable`. -- Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index. -- The `max_execution_time` limit now works correctly with distributed queries. -- Fixed errors when calculating the size of composite columns in the `system.columns` table. -- Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.` -- Fixed errors in `StorageKafka` (\#\#2075) -- Fixed server crashes from invalid arguments of certain aggregate functions. -- Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables. -- `Too many parts` state is less likely to happen when inserting into aggregated materialized views (\#\#2084). -- Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level. -- Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`. -- `SummingMergeTree` now works correctly for summation of nested data structures with a composite key. -- Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables. - -#### Build changes: {#build-changes-5} - -- The build supports `ninja` instead of `make` and uses `ninja` by default for building releases. -- Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility. - -#### Backward incompatible changes: {#backward-incompatible-changes-9} - -- Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as “at least one `arr` element belongs to the `set`”. To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`. -- Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen – use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `1` in the config. - -### ClickHouse release 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} - -#### New features: {#new-features-15} - -- Added the `system.macros` table and auto updating of macros when the config file is changed. -- Added the `SYSTEM RELOAD CONFIG` query. -- Added the `maxIntersections(left_col, right_col)` aggregate function, which returns the maximum number of simultaneously intersecting intervals `[left; right]`. The `maxIntersectionsPosition(left, right)` function returns the beginning of the “maximum” interval. ([Michael Furmur](https://github.com/ClickHouse/ClickHouse/pull/2012)). - -#### Improvements: {#improvements-14} - -- When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log). -- Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. - -#### Bug fixes: {#bug-fixes-26} - -- Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables. -- Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers. -- Fixed a race condition when reading from system `system.parts_columns tables.` -- Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout. -- Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query. -- Fixed incorrect dates in the `system.parts` table. -- Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster. -- Fixed the vertical merging algorithm for an empty `ORDER BY` table. -- Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362. -- Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358. -- Removed extraneous error-level logging of `Not found column ... in block`. - -### Clickhouse Release 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} - -#### New features: {#new-features-16} - -- Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1. -- Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard. -- Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`. -- An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova). -- Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta). -- Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings. -- Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. -- Added the `arrayCumSum` function (Javi Santana). -- Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats. -- Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan). -- Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier. -- The `remote` and `cluster` table functions can be used in `INSERT` queries. -- Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual. -- Added the `data_path` and `metadata_path` columns to `system.tables`and`system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables. -- Added additional information about merges in the `system.part_log` table. -- An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov). -- The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014). -- Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014). -- Support for `SHOW CREATE TABLE` for temporary tables (zhang2014). -- Added the `system_profile` configuration parameter for the settings used by internal processes. -- Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko). -- Reading `null` as the default value when loading data for an external dictionary with the `MongoDB` source (Pavel Litvinenko). -- Reading `DateTime` values in the `Values` format from a Unix timestamp without single quotes. -- Failover is supported in `remote` table functions for cases when some of the replicas are missing the requested table. -- Configuration settings can be overridden in the command line when you run `clickhouse-server`. Example: `clickhouse-server -- --logger.level=information`. -- Implemented the `empty` function from a `FixedString` argument: the function returns 1 if the string consists entirely of null bytes (zhang2014). -- Added the `listen_try`configuration parameter for listening to at least one of the listen addresses without quitting, if some of the addresses can’t be listened to (useful for systems with disabled support for IPv4 or IPv6). -- Added the `VersionedCollapsingMergeTree` table engine. -- Support for rows and arbitrary numeric types for the `library` dictionary source. -- `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`). -- A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`. -- `RENAME TABLE` can be performed for `VIEW`. -- Added the `throwIf` function. -- Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024). -- The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns. - -#### Improvements: {#improvements-15} - -- Limits and quotas on the result are no longer applied to intermediate data for `INSERT SELECT` queries or for `SELECT` subqueries. -- Fewer false triggers of `force_restore_data` when checking the status of `Replicated` tables when the server starts. -- Added the `allow_distributed_ddl` option. -- Nondeterministic functions are not allowed in expressions for `MergeTree` table keys. -- Files with substitutions from `config.d` directories are loaded in alphabetical order. -- Improved performance of the `arrayElement` function in the case of a constant multidimensional array with an empty array as one of the elements. Example: `[[1], []][x]`. -- The server starts faster now when using configuration files with very large substitutions (for instance, very large lists of IP networks). -- When running a query, table valued functions run once. Previously, `remote` and `mysql` table valued functions performed the same query twice to retrieve the table structure from a remote server. -- The `MkDocs` documentation generator is used. -- When you try to delete a table column that `DEFAULT`/`MATERIALIZED` expressions of other columns depend on, an exception is thrown (zhang2014). -- Added the ability to parse an empty line in text formats as the number 0 for `Float` data types. This feature was previously available but was lost in release 1.1.54342. -- `Enum` values can be used in `min`, `max`, `sum` and some other functions. In these cases, it uses the corresponding numeric values. This feature was previously available but was lost in the release 1.1.54337. -- Added `max_expanded_ast_elements` to restrict the size of the AST after recursively expanding aliases. - -#### Bug fixes: {#bug-fixes-27} - -- Fixed cases when unnecessary columns were removed from subqueries in error, or not removed from subqueries containing `UNION ALL`. -- Fixed a bug in merges for `ReplacingMergeTree` tables. -- Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`). -- Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries. -- Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`. -- Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table. -- Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata. -- Fixed the `DROP DATABASE` query for `Dictionary` databases. -- Fixed the low precision of `uniqHLL12` and `uniqCombined` functions for cardinalities greater than 100 million items (Alex Bocharov). -- Fixed the calculation of implicit default values when necessary to simultaneously calculate default explicit expressions in `INSERT` queries (zhang2014). -- Fixed a rare case when a query to a `MergeTree` table couldn’t finish (chenxing-xc). -- Fixed a crash that occurred when running a `CHECK` query for `Distributed` tables if all shards are local (chenxing.xc). -- Fixed a slight performance regression with functions that use regular expressions. -- Fixed a performance regression when creating multidimensional arrays from complex expressions. -- Fixed a bug that could cause an extra `FORMAT` section to appear in an `.sql` file with metadata. -- Fixed a bug that caused the `max_table_size_to_drop` limit to apply when trying to delete a `MATERIALIZED VIEW` looking at an explicitly specified table. -- Fixed incompatibility with old clients (old clients were sometimes sent data with the `DateTime('timezone')` type, which they do not understand). -- Fixed a bug when reading `Nested` column elements of structures that were added using `ALTER` but that are empty for the old partitions, when the conditions for these columns moved to `PREWHERE`. -- Fixed a bug when filtering tables by virtual `_table` columns in queries to `Merge` tables. -- Fixed a bug when using `ALIAS` columns in `Distributed` tables. -- Fixed a bug that made dynamic compilation impossible for queries with aggregate functions from the `quantile` family. -- Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries. -- Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments. -- Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`. -- Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled). - -#### Backward incompatible changes: {#backward-incompatible-changes-10} - -- Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default. -- Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`. -- Removed the `UnsortedMergeTree` engine. - -### Clickhouse Release 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} - -- Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. -- Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index. -- Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue. - -### Clickhouse Release 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} - -This release contains bug fixes for the previous release 1.1.54337: - -- Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`. -- Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d. -- Fixed a regression in 1.1.54337: wrong default configuration in the Docker image. -- Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`). -- Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`). -- Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014). -- Fixed a bug in implementation of NULL. - -### Clickhouse Release 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} - -#### New features: {#new-features-17} - -- Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables. -- Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`. -- Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected. -- Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive. -- Added the `toStartOfFifteenMinutes` function (Kirill Shvakov). -- Added the `clickhouse format` tool for formatting queries. -- Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory. -- Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin). -- Added a column with documentation for the `system.settings` table (Kirill Shvakov). -- Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables. -- Added the `system.models` table with information about loaded `CatBoost` machine learning models. -- Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage. -- Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function). -- Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors. -- The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments. -- Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`. -- Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov). -- Users with the `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). -- Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša). -- Added the `intExp3` and `intExp4` functions. -- Added the `sumKahan` aggregate function. -- Added the to \* Number\* OrNull functions, where \* Number\* is a numeric type. -- Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014). -- Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded. -- Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova). -- The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory. -- Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr. -- Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird). - -#### Performance optimizations: {#performance-optimizations} - -- Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments. -- Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. -- Improved performance of parsing and formatting `Date` and `DateTime` type values in text format. -- Improved performance and precision of parsing floating point numbers. -- Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` . -- Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. - -#### Bug fixes: {#bug-fixes-28} - -- Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates. -- Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE` . -- Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration. -- Fixed unexpected results of passing the `Date` argument to `toStartOfDay` . -- Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for `INTERVAL n MONTH` in cases when the result has the previous year. -- Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete. -- Fixed `SummingMergeTree` behavior in cases when the rows summed to zero. -- Various fixes for the `Kafka` engine (Marek Vavruša). -- Fixed incorrect behavior of the `Join` table engine (Amos Bird). -- Fixed incorrect allocator behavior under FreeBSD and OS X. -- The `extractAll` function now supports empty matches. -- Fixed an error that blocked usage of `libressl` instead of `openssl` . -- Fixed the `CREATE TABLE AS SELECT` query from temporary tables. -- Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts. -- Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod). -- `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config). -- Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key. -- Fixed parsing of tuples (values of the `Tuple` data type) in text formats. -- Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions. -- Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to `NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc. -- Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc. -- Stricter checks for allowed combinations of composite columns. -- Fixed the overflow when specifying a very large parameter for the `FixedString` data type. -- Fixed a bug in the `topK` aggregate function in a generic case. -- Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator. -- Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322). -- Fixed the precision of the `exp10` function. -- Fixed the behavior of the `visitParamExtract` function for better compliance with documentation. -- Fixed the crash when incorrect data types are specified. -- Fixed the behavior of `DISTINCT` in the case when all columns are constants. -- Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index. -- Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries. -- Fixed a bug that leads to excessive rows in the result of `FULL` and `RIGHT JOIN` (Amos Bird). -- Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload. -- Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated. -- Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša). - -#### Build improvements: {#build-improvements-4} - -- The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment. -- A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems. -- Added the `clickhouse-test` package. It can be used to run functional tests. -- The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub. -- Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run. -- Added support for `Cap'n'Proto` in the default build. -- Changed the format of documentation sources from `Restricted Text` to `Markdown`. -- Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually. -- For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as `clickhouse clang` and `clickhouse lld` . -- Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`. -- Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools. - -#### Backward incompatible changes: {#backward-incompatible-changes-11} - -- The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn’t have `Nullable` columns or if the type of your table is not `Log`, then you don’t need to do anything. -- Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default. -- The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion. -- Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird). -- Removed the `BlockTabSeparated` format that was used solely for demonstration purposes. -- Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com. -- In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases. -- Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release. - -#### Please note when upgrading: {#please-note-when-upgrading} - -- When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated. -- If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes. - -## [Changelog for 2017](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) diff --git a/docs/ru/changelog/2019.md b/docs/ru/changelog/2019.md deleted file mode 100644 index 01a0756af14..00000000000 --- a/docs/ru/changelog/2019.md +++ /dev/null @@ -1,2071 +0,0 @@ ---- -en_copy: true ---- - -## ClickHouse release v19.17 {#clickhouse-release-v19-17} - -### ClickHouse release v19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} - -#### Bug Fix {#bug-fix} - -- Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that could cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible server crash (`std::terminate`) when the server cannot send or write data in JSON or XML format with values of String data type (that require UTF-8 validation) or when compressing result data with Brotli algorithm or in some other rare cases. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed dictionaries with source from a clickhouse `VIEW`, now reading such dictionaries doesn’t cause the error `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed checking if a client host is allowed by host\_regexp specified in users.xml. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) -- `RENAME TABLE` for a distributed table now renames the folder containing inserted data before sending to shards. This fixes an issue with successive renames `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) -- `range_hashed` external dictionaries created by DDL queries now allow ranges of arbitrary numeric types. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) -- Fixed `INSERT INTO table SELECT ... FROM mysql(...)` table function. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) -- Fixed segfault in `INSERT INTO TABLE FUNCTION file()` while inserting into a file which doesn’t exist. Now in this case file would be created and then insert would be processed. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed bitmapAnd error when intersecting an aggregated bitmap and a scalar bitmap. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) -- Fixed segfault when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier, just like `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed return type for functions `rand` and `randConstant` in case of nullable argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed `DROP DICTIONARY IF EXISTS db.dict`, now it doesn’t throw exception if `db` doesn’t exist. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) -- If a table wasn’t completely dropped because of server crash, the server will try to restore and load it [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) -- Fixed a trivial count query for a distributed table if there are more than two shard local table. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -- Fixed bug that lead to a data race in DB::BlockStreamProfileInfo::calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed `ALTER table MOVE part` executed immediately after merging the specified part, which could cause moving a part which the specified part merged into. Now it correctly moves the specified part. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Expressions for dictionaries can be specified as strings now. This is useful for calculation of attributes while extracting data from non-ClickHouse sources because it allows to use non-ClickHouse syntax for those expressions. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) -- Fixed a very rare race in `clickhouse-copier` because of an overflow in ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) -- Fixed the bug when after the query failed (due to “Too many simultaneous queries” for example) it would not read external tables info, and the - next request would interpret this info as the beginning of the next query causing an error like `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) -- Avoid null dereference after “Unknown packet X from server” [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) -- Restore support of all ICU locales, add the ability to apply collations for constant expressions and add language name to system.collations table. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) -- Number of streams for read from `StorageFile` and `StorageHDFS` is now limited, to avoid exceeding the memory limit. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) -- Fixed `CHECK TABLE` query for `*MergeTree` tables without key. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) -- Removed the mutation number from a part name in case there were no mutations. This removing improved the compatibility with older versions. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) -- Fixed the bug that mutations are skipped for some attached parts due to their data\_version are larger than the table mutation version. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) -- Allow starting the server with redundant copies of parts after moving them to another device. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed the error “Sizes of columns doesn’t match” that might appear when using aggregate function columns. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) -- Now an exception will be thrown in case of using WITH TIES alongside LIMIT BY. And now it’s possible to use TOP with LIMIT BY. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) - -### ClickHouse release v19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} - -#### Backward Incompatible Change {#backward-incompatible-change} - -- Using column instead of AST to store scalar subquery results for better performance. Setting `enable_scalar_subquery_optimization` was added in 19.17 and it was enabled by default. It leads to errors like [this](https://github.com/ClickHouse/ClickHouse/issues/7851) during upgrade to 19.17.2 or 19.17.3 from previous versions. This setting was disabled by default in 19.17.4, to make possible upgrading from 19.16 and older versions without errors. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Amos Bird](https://github.com/amosbird)) - -#### New Feature {#new-feature} - -- Add the ability to create dictionaries with DDL queries. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([alesapin](https://github.com/alesapin)) -- Make `bloom_filter` type of index supporting `LowCardinality` and `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add function `isValidJSON` to check that passed string is a valid json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Vdimir](https://github.com/Vdimir)) -- Implement `arrayCompact` function [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Memo](https://github.com/Joeywzr)) -- Created function `hex` for Decimal numbers. It works like `hex(reinterpretAsString())`, but doesn’t delete last zero bytes. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Mikhail Korotov](https://github.com/millb)) -- Add `arrayFill` and `arrayReverseFill` functions, which replace elements by other elements in front/back of them in the array. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) -- Add `CRC32IEEE()`/`CRC64()` support [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Azat Khuzhin](https://github.com/azat)) -- Implement `char` function similar to one in [mysql](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([sundyli](https://github.com/sundy-li)) -- Add `bitmapTransform` function. It transforms an array of values in a bitmap to another array of values, the result is a new bitmap [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Yu](https://github.com/yuzhichang)) -- Implemented `javaHashUTF16LE()` function [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([achimbab](https://github.com/achimbab)) -- Add `_shard_num` virtual column for the Distributed engine [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Azat Khuzhin](https://github.com/azat)) - -#### Experimental Feature {#experimental-feature} - -- Support for processors (new query execution pipeline) in `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-1} - -- Fix incorrect float parsing in `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) -- Fix rare deadlock which can happen when trace\_log is enabled. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) -- Prevent message duplication when producing Kafka table has any MVs selecting from it [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Support for `Array(LowCardinality(Nullable(String)))` in `IN`. Resolves [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([achimbab](https://github.com/achimbab)) -- Add handling of `SQL_TINYINT` and `SQL_BIGINT`, and fix handling of `SQL_FLOAT` data source types in ODBC Bridge. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Fix aggregation (`avg` and quantiles) over empty decimal columns [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Andrey Konyaev](https://github.com/akonyaev90)) -- Fix `INSERT` into Distributed with `MATERIALIZED` columns [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Make `MOVE PARTITION` work if some parts of partition are already on destination disk or volume [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with hardlinks failing to be created during mutations in `ReplicatedMergeTree` in multi-disk configurations. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed a bug with a mutation on a MergeTree when whole part remains unchanged and best space is being found on another disk [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed bug with `keep_free_space_ratio` not being read from disks configuration [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix bug with table contains only `Tuple` columns or columns with complex paths. Fixes [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([alesapin](https://github.com/alesapin)) -- Do not account memory for Buffer engine in max\_memory\_usage limit [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Azat Khuzhin](https://github.com/azat)) -- Fix final mark usage in `MergeTree` tables ordered by `tuple()`. In rare cases it could lead to `Can't adjust last granule` error while select. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug in mutations that have predicate with actions that require context (for example functions for json), which may lead to crashes or strange exceptions. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([alesapin](https://github.com/alesapin)) -- Fix mismatch of database and table names escaping in `data/` and `shadow/` directories [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Alexander Burmak](https://github.com/Alex-Burmak)) -- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Fix crash in this case. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `Not found column in block` when joining on expression with RIGHT or FULL JOIN. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Artem Zuikov](https://github.com/4ertus2)) -- One more attempt to fix infinite loop in `PrettySpace` format [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fix bug in `concat` function when all arguments were `FixedString` of the same size. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([alesapin](https://github.com/alesapin)) -- Fixed exception in case of using 1 argument while defining S3, URL and HDFS storages. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fix scope of the InterpreterSelectQuery for views with query [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Azat Khuzhin](https://github.com/azat)) - -#### Improvement {#improvement} - -- `Nullable` columns recognized and NULL-values handled correctly by ODBC-bridge [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Write current batch for distributed send atomically [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Azat Khuzhin](https://github.com/azat)) -- Throw an exception if we cannot detect table for column name in query. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `merge_max_block_size` setting to `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Artem Zuikov](https://github.com/4ertus2)) -- Queries with `HAVING` and without `GROUP BY` assume group by constant. So, `SELECT 1 HAVING 1` now returns a result. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Amos Bird](https://github.com/amosbird)) -- Support parsing `(X,)` as tuple similar to python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Amos Bird](https://github.com/amosbird)) -- Make `range` function behaviors almost like pythonic one. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([sundyli](https://github.com/sundy-li)) -- Add `constraints` columns to table `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Vitaly Baranov](https://github.com/vitlibar)) -- Better Null format for tcp handler, so that it’s possible to use `select ignore() from table format Null` for perf measure via clickhouse-client [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Amos Bird](https://github.com/amosbird)) -- Queries like `CREATE TABLE ... AS (SELECT (1, 2))` are parsed correctly [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) - -#### Performance Improvement {#performance-improvement} - -- The performance of aggregation over short string keys is improved. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Alexander Kuzmenkov](https://github.com/akuzm), [Amos Bird](https://github.com/amosbird)) -- Run another pass of syntax/expression analysis to get potential optimizations after constant predicates are folded. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Amos Bird](https://github.com/amosbird)) -- Use storage meta info to evaluate trivial `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Amos Bird](https://github.com/amosbird), [alexey-milovidov](https://github.com/alexey-milovidov)) -- Vectorize processing `arrayReduce` similar to Aggregator `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) -- Minor improvements in performance of `Kafka` consumption [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement} - -- Add support for cross-compiling to the CPU architecture AARCH64. Refactor packager script. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Ivan](https://github.com/abyss7)) -- Unpack darwin-x86\_64 and linux-aarch64 toolchains into mounted Docker volume when building packages [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Ivan](https://github.com/abyss7)) -- Update Docker Image for Binary Packager [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Ivan](https://github.com/abyss7)) -- Fixed compile errors on MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Ernest Poletaev](https://github.com/ernestp)) -- Some refactoring in query analysis logic: split complex class into several simple ones. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix build without submodules [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) -- Better `add_globs` in CMake files [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Amos Bird](https://github.com/amosbird)) -- Remove hardcoded paths in `unwind` target [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Konstantin Podshumok](https://github.com/podshumok)) -- Allow to use mysql format without ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) - -#### Other {#other} - -- Added ANTLR4 grammar for ClickHouse SQL dialect [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release v19.16 {#clickhouse-release-v19-16} - -#### Clickhouse release v19.16.14.65, 2020-03-25 - -* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) This bugfix was backported to version 19.16 by a special request from Altinity. - -#### Clickhouse release v19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} - -- Fix distributed subqueries incompatibility with older CH versions. Fixes [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) - [(tabplubix)](https://github.com/tavplubix) -- When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in `ClickHouseDictionarySource`. - [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) -- Now background merges in `*MergeTree` table engines family preserve storage policy volume order more accurately. - [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) - [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) -- Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). - [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) -- Allow using `MaterializedView` with subqueries above `Kafka` tables. - [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) - -#### New Feature {#new-feature-1} - -- Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. - [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) - -### ClickHouse release v19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} - -#### Backward Incompatible Change {#backward-incompatible-change-1} - -- Add missing arity validation for count/counIf. - [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) - [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Vdimir](https://github.com/Vdimir)) -- Remove legacy `asterisk_left_columns_only` setting (it was disabled by default). - [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Artem - Zuikov](https://github.com/4ertus2)) -- Format strings for Template data format are now specified in files. - [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) - -#### New Feature {#new-feature-2} - -- Introduce uniqCombined64() to calculate cardinality greater than UINT\_MAX. - [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), - [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Azat - Khuzhin](https://github.com/azat)) -- Support Bloom filter indexes on Array columns. - [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) - ([achimbab](https://github.com/achimbab)) -- Add a function `getMacro(name)` that returns String with the value of corresponding `` - from server configuration. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Set two configuration options for a dictionary based on an HTTP source: `credentials` and - `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- Add a new ProfileEvent `Merge` that counts the number of launched background merges. - [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Mikhail - Korotov](https://github.com/millb)) -- Add fullHostName function that returns a fully qualified domain name. - [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) - [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([sundyli](https://github.com/sundy-li)) -- Add function `arraySplit` and `arrayReverseSplit` which split an array by “cut off” - conditions. They are useful in time sequence handling. - [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) -- Add new functions that return the Array of all matched indices in multiMatch family of functions. - [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Danila - Kutenin](https://github.com/danlark1)) -- Add a new database engine `Lazy` that is optimized for storing a large number of small -Log - tables. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Add aggregate functions groupBitmapAnd, -Or, -Xor for bitmap columns. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Zhichang - Yu](https://github.com/yuzhichang)) -- Add aggregate function combinators -OrNull and -OrDefault, which return null - or default values when there is nothing to aggregate. - [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) - ([hcz](https://github.com/hczhcz)) -- Introduce CustomSeparated data format that supports custom escaping and - delimiter rules. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([tavplubix](https://github.com/tavplubix)) -- Support Redis as source of external dictionary. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Anton - Popov](https://github.com/CurtizJ)) - -#### Bug Fix {#bug-fix-2} - -- Fix wrong query result if it has `WHERE IN (SELECT ...)` section and `optimize_read_in_order` is - used. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Anton - Popov](https://github.com/CurtizJ)) -- Disabled MariaDB authentication plugin, which depends on files outside of project. - [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Yuriy - Baranov](https://github.com/yurriy)) -- Fix exception `Cannot convert column ... because it is constant but values of constants are different in source and result` which could rarely happen when functions `now()`, `today()`, - `yesterday()`, `randConstant()` are used. - [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fixed a segmentation fault in groupBitmapOr (issue [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). - [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Zhichang - Yu](https://github.com/yuzhichang)) -- For materialized views the commit for Kafka is called after all data were written. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Fixed wrong `duration_ms` value in `system.part_log` table. It was ten times off. - [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- A quick fix to resolve crash in LIVE VIEW table and re-enabling all LIVE VIEW tests. - [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Don’t put virtual columns to .sql metadata when table is created as `CREATE TABLE AS`. - [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Ivan](https://github.com/abyss7)) -- Fix segmentation fault in `ATTACH PART` query. - [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) - ([alesapin](https://github.com/alesapin)) -- Fix wrong result for some queries given by the optimization of empty IN subqueries and empty - INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Fixing AddressSanitizer error in the LIVE VIEW getHeader() method. - [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) - ([vzakaznikov](https://github.com/vzakaznikov)) - -#### Improvement {#improvement-1} - -- Add a message in case of queue\_wait\_max\_ms wait takes place. - [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Azat - Khuzhin](https://github.com/azat)) -- Made setting `s3_min_upload_part_size` table-level. - [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Check TTL in StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) - ([sundyli](https://github.com/sundy-li)) -- Squash left-hand blocks in partial merge join (optimization). - [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Artem - Zuikov](https://github.com/4ertus2)) -- Do not allow non-deterministic functions in mutations of Replicated table engines, because this - can introduce inconsistencies between replicas. - [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Alexander - Kazakov](https://github.com/Akazz)) -- Disable memory tracker while converting exception stack trace to string. It can prevent the loss - of error messages of type `Memory limit exceeded` on server, which caused the `Attempt to read after eof` exception on client. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) - ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Miscellaneous format improvements. Resolves - [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), - [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), - [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), - [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) - [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) - ([tavplubix](https://github.com/tavplubix)) -- ClickHouse ignores values on the right side of IN operator that are not convertible to the left - side type. Make it work properly for compound types – Array and Tuple. - [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Support missing inequalities for ASOF JOIN. It’s possible to join less-or-equal variant and strict - greater and less variants for ASOF column in ON syntax. - [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Artem - Zuikov](https://github.com/4ertus2)) -- Optimize partial merge join. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) - ([Artem Zuikov](https://github.com/4ertus2)) -- Do not use more than 98K of memory in uniqCombined functions. - [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), - [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Azat - Khuzhin](https://github.com/azat)) -- Flush parts of right-hand joining table on disk in PartialMergeJoin (if there is not enough - memory). Load data back when needed. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) - ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvement {#performance-improvement-1} - -- Speed up joinGet with const arguments by avoiding data duplication. - [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Amos - Bird](https://github.com/amosbird)) -- Return early if the subquery is empty. - [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) -- Optimize parsing of SQL expression in Values. - [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) - ([tavplubix](https://github.com/tavplubix)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-1} - -- Disable some contribs for cross-compilation to Mac OS. - [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Ivan](https://github.com/abyss7)) -- Add missing linking with PocoXML for clickhouse\_common\_io. - [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Azat - Khuzhin](https://github.com/azat)) -- Accept multiple test filter arguments in clickhouse-test. - [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Enable musl and jemalloc for ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) - ([Amos Bird](https://github.com/amosbird)) -- Added `--client-option` parameter to `clickhouse-test` to pass additional parameters to client. - [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Nikolai - Kochetov](https://github.com/KochetovNicolai)) -- Preserve existing configs on rpm package upgrade. - [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) - ([filimonov](https://github.com/filimonov)) -- Fix errors detected by PVS. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Artem - Zuikov](https://github.com/4ertus2)) -- Fix build for Darwin. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) - ([Ivan](https://github.com/abyss7)) -- glibc 2.29 compatibility. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Amos - Bird](https://github.com/amosbird)) -- Make sure dh\_clean does not touch potential source files. - [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Amos - Bird](https://github.com/amosbird)) -- Attempt to avoid conflict when updating from altinity rpm - it has config file packaged separately - in clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) - ([filimonov](https://github.com/filimonov)) -- Optimize some header files for faster rebuilds. - [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), - [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Add performance tests for Date and DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Vasily - Nemkov](https://github.com/Enmk)) -- Fix some tests that contained non-deterministic mutations. - [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Alexander - Kazakov](https://github.com/Akazz)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) - ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat - Khuzhin](https://github.com/azat)) -- Fix some issues in Fields found by MemorySanitizer. - [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), - [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Alexander - Kuzmenkov](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) - ([Amos Bird](https://github.com/amosbird)) -- Fix undefined behavior in murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Amos - Bird](https://github.com/amosbird)) -- Fix undefined behavior in StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) - ([tavplubix](https://github.com/tavplubix)) -- Fixed constant expressions folding for external database engines (MySQL, ODBC, JDBC). In previous - versions it wasn’t working for multiple constant expressions and was not working at all for Date, - DateTime and UUID. This fixes [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixing ThreadSanitizer data race error in the LIVE VIEW when accessing no\_users\_thread variable. - [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) - ([vzakaznikov](https://github.com/vzakaznikov)) -- Get rid of malloc symbols in libcommon - [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), - [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Amos - Bird](https://github.com/amosbird)) -- Add global flag ENABLE\_LIBRARIES for disabling all libraries. - [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) - ([proller](https://github.com/proller)) - -#### Code cleanup {#code-cleanup} - -- Generalize configuration repository to prepare for DDL for Dictionaries. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) - ([alesapin](https://github.com/alesapin)) -- Parser for dictionaries DDL without any semantic. - [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) - ([alesapin](https://github.com/alesapin)) -- Split ParserCreateQuery into different smaller parsers. - [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) - ([alesapin](https://github.com/alesapin)) -- Small refactoring and renaming near external dictionaries. - [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) - ([alesapin](https://github.com/alesapin)) -- Refactor some code to prepare for role-based access control. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Some improvements in DatabaseOrdinary code. - [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Nikita - Vasilev](https://github.com/nikvas0)) -- Do not use iterators in find() and emplace() methods of hash tables. - [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Alexander - Kuzmenkov](https://github.com/akuzm)) -- Fix getMultipleValuesFromConfig in case when parameter root is not empty. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) - ([Mikhail Korotov](https://github.com/millb)) -- Remove some copy-paste (TemporaryFile and TemporaryFileStream) - [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Artem - Zuikov](https://github.com/4ertus2)) -- Improved code readability a little bit (`MergeTreeData::getActiveContainingPart`). - [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Vladimir - Chebotarev](https://github.com/excitoon)) -- Wait for all scheduled jobs, which are using local objects, if `ThreadPool::schedule(...)` throws - an exception. Rename `ThreadPool::schedule(...)` to `ThreadPool::scheduleOrThrowOnError(...)` and - fix comments to make obvious that it may throw. - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) - ([tavplubix](https://github.com/tavplubix)) - -## ClickHouse release 19.15 {#clickhouse-release-19-15} - -### ClickHouse release 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} - -#### Bug Fix {#bug-fix-3} - -- Added handling of SQL\_TINYINT and SQL\_BIGINT, and fix handling of SQL\_FLOAT data source types in ODBC Bridge. - [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Denis Glazachev](https://github.com/traceon)) -- Allowed to have some parts on destination disk or volume in MOVE PARTITION. - [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixed NULL-values in nullable columns through ODBC-bridge. - [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed INSERT into Distributed non local node with MATERIALIZED columns. - [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Azat Khuzhin](https://github.com/azat)) -- Fixed function getMultipleValuesFromConfig. - [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Mikhail Korotov](https://github.com/millb)) -- Fixed issue of using HTTP keep alive timeout instead of TCP keep alive timeout. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Vasily Nemkov](https://github.com/Enmk)) -- Wait for all jobs to finish on exception (fixes rare segfaults). - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([tavplubix](https://github.com/tavplubix)) -- Don’t push to MVs when inserting into Kafka table. - [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Ivan](https://github.com/abyss7)) -- Disable memory tracker for exception stack. - [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bad code in transforming query for external database. - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid use of uninitialized values in MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Azat Khuzhin](https://github.com/azat)) -- Added example config with macros for tests ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} - -#### Bug Fix {#bug-fix-4} - -- Fixed bad\_variant in hashed dictionary. - ([alesapin](https://github.com/alesapin)) -- Fixed up bug with segmentation fault in ATTACH PART query. - ([alesapin](https://github.com/alesapin)) -- Fixed time calculation in `MergeTreeData`. - ([Vladimir Chebotarev](https://github.com/excitoon)) -- Commit to Kafka explicitly after the writing is finalized. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Ivan](https://github.com/abyss7)) -- Serialize NULL values correctly in min/max indexes of MergeTree parts. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Alexander Kuzmenkov](https://github.com/akuzm)) - -### ClickHouse release 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} - -#### New Feature {#new-feature-3} - -- Tiered storage: support to use multiple storage volumes for tables with MergeTree engine. It’s possible to store fresh data on SSD and automatically move old data to HDD. ([example](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Igr](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([alesapin](https://github.com/alesapin)) -- Add table function `input` for reading incoming data in `INSERT SELECT` query. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([palasonic1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Anton Popov](https://github.com/CurtizJ)) -- Add a `sparse_hashed` dictionary layout, that is functionally equivalent to the `hashed` layout, but is more memory efficient. It uses about twice as less memory at the cost of slower value retrieval. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Azat Khuzhin](https://github.com/azat)) -- Implement ability to define list of users for access to dictionaries. Only current connected database using. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add `LIMIT` option to `SHOW` query. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Add `bitmapSubsetLimit(bitmap, range_start, limit)` function, that returns subset of the smallest `limit` values in set that is no smaller than `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add `bitmapMin` and `bitmapMax` functions. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Yu](https://github.com/yuzhichang)) -- Add function `repeat` related to [issue-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([flynn](https://github.com/ucasFL)) - -#### Experimental Feature {#experimental-feature-1} - -- Implement (in memory) Merge Join variant that does not change current pipeline. Result is partially sorted by merge key. Set `partial_merge_join = 1` to use this feature. The Merge Join is still in development. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Artem Zuikov](https://github.com/4ertus2)) -- Add `S3` engine and table function. It is still in development (no authentication support yet). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Vladimir Chebotarev](https://github.com/excitoon)) - -#### Improvement {#improvement-2} - -- Every message read from Kafka is inserted atomically. This resolves almost all known issues with Kafka engine. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Ivan](https://github.com/abyss7)) -- Improvements for failover of Distributed queries. Shorten recovery time, also it is now configurable and can be seen in `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Vasily Nemkov](https://github.com/Enmk)) -- Support numeric values for Enums directly in `IN` section. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) -- Support (optional, disabled by default) redirects on URL storage. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) -- Add information message when client with an older version connects to a server. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Philipp Malkovsky](https://github.com/malkfilipp)) -- Remove maximum backoff sleep time limit for sending data in Distributed tables [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Azat Khuzhin](https://github.com/azat)) -- Add ability to send profile events (counters) with cumulative values to graphite. It can be enabled under `` in server `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Azat Khuzhin](https://github.com/azat)) -- Add automatically cast type `T` to `LowCardinality(T)` while inserting data in column of type `LowCardinality(T)` in Native format via HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Add ability to use function `hex` without using `reinterpretAsString` for `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Mikhail Korotov](https://github.com/millb)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-2} - -- Add gdb-index to clickhouse binary with debug info. It will speed up startup time of `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([alesapin](https://github.com/alesapin)) -- Speed up deb packaging with patched dpkg-deb which uses `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([alesapin](https://github.com/alesapin)) -- Set `enable_fuzzing = 1` to enable libfuzzer instrumentation of all the project code. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) -- Add split build smoke test in CI. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([alesapin](https://github.com/alesapin)) -- Add build with MemorySanitizer to CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Alexander Kuzmenkov](https://github.com/akuzm)) -- Replace `libsparsehash` with `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Azat Khuzhin](https://github.com/azat)) - -#### Bug Fix {#bug-fix-5} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix too early MySQL connection close in `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Returned support for very old Linux kernels (fix [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix possible data loss in `insert select` query in case of empty block in input stream. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fix complex queries with array joins and global subqueries. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) -- Fix `Unknown identifier` error in ORDER BY and GROUP BY with multiple JOINs [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed `MSan` warning while executing function with `LowCardinality` argument. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Backward Incompatible Change {#backward-incompatible-change-2} - -- Changed serialization format of bitmap\* aggregate function states to improve performance. Serialized states of bitmap\* from previous versions cannot be read. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Yu](https://github.com/yuzhichang)) - -## ClickHouse release 19.14 {#clickhouse-release-19-14} - -### ClickHouse release 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} - -#### Bug Fix {#bug-fix-6} - -- This release also contains all bug fixes from 19.11.12.69. -- Fixed compatibility for distributed queries between 19.14 and earlier versions. This fixes [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} - -#### Bug Fix {#bug-fix-7} - -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Fixed subquery name in queries with `ARRAY JOIN` and `GLOBAL IN subquery` with alias. Use subquery alias for external table name if it is specified. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-3} - -- Fix [flapping](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) test `00715_fetch_merged_or_mutated_part_zookeeper` by rewriting it to a shell scripts because it needs to wait for mutations to apply. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed UBSan and MemSan failure in function `groupUniqArray` with emtpy array argument. It was caused by placing of empty `PaddedPODArray` into hash table zero cell because constructor for zero cell value was not called. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Amos Bird](https://github.com/amosbird)) - -### ClickHouse release 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} - -#### New Feature {#new-feature-4} - -- `WITH FILL` modifier for `ORDER BY`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- `WITH TIES` modifier for `LIMIT`. (continuation of [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Anton Popov](https://github.com/CurtizJ)) -- Parse unquoted `NULL` literal as NULL (if setting `format_csv_unquoted_null_literal_as_null=1`). Initialize null fields with default values if data type of this field is not nullable (if setting `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([tavplubix](https://github.com/tavplubix)) -- Support for wildcards in paths of table functions `file` and `hdfs`. If the path contains wildcards, the table will be readonly. Example of usage: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` and `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Olga Khvostikova](https://github.com/stavrolia)) -- New `system.metric_log` table which stores values of `system.events` and `system.metrics` with specified time interval. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to write ClickHouse text logs to `system.text_log` table. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show private symbols in stack traces (this is done via parsing symbol tables of ELF files). Added information about file and line number in stack traces if debug info is present. Speedup symbol name lookup with indexing symbols present in program. Added new SQL functions for introspection: `demangle` and `addressToLine`. Renamed function `symbolizeAddress` to `addressToSymbol` for consistency. Function `addressToSymbol` will return mangled name for performance reasons and you have to apply `demangle`. Added setting `allow_introspection_functions` which is turned off by default. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Table function `values` (the name is case-insensitive). It allows to read from `VALUES` list proposed in [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Example: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) -- Added an ability to alter storage settings. Syntax: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([alesapin](https://github.com/alesapin)) -- Support for removing of detached parts. Syntax: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([tavplubix](https://github.com/tavplubix)) -- Table constraints. Allows to add constraint to table definition which will be checked at insert. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Gleb Novikov](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Suppport for cascaded materialized views. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Amos Bird](https://github.com/amosbird)) -- Turn on query profiler by default to sample every query execution thread once a second. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Input format `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([akonyaev90](https://github.com/akonyaev90)) -- Added two new functions: `sigmoid` and `tanh` (that are useful for machine learning applications). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Function `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` to check if given token is in haystack. Token is a maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack). Token must be a constant string. Supported by tokenbf\_v1 index specialization. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Vasily Nemkov](https://github.com/Enmk)) -- New function `neighbor(value, offset[, default_value])`. Allows to reach prev/next value within column in a block of data. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Alex Krash](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Alexey Milovidov](https://github.com/alexey-milovidov) -- Created a function `currentUser()`, returning login of authorized user. Added alias `user()` for compatibility with MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Alex Krash](https://github.com/alex-krash)) -- New aggregate functions `quantilesExactInclusive` and `quantilesExactExclusive` which were proposed in [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) -- Function `bitmapRange(bitmap, range_begin, range_end)` which returns new set with specified range (not include the `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Yu](https://github.com/yuzhichang)) -- Function `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` which creates array of precision-long strings of geohash-boxes covering provided area. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Vasily Nemkov](https://github.com/Enmk)) -- Implement support for INSERT query with `Kafka` tables. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Ivan](https://github.com/abyss7)) -- Added support for `_partition` and `_timestamp` virtual columns to Kafka engine. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Ivan](https://github.com/abyss7)) -- Possibility to remove sensitive data from `query_log`, server logs, process list with regexp-based rules. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([filimonov](https://github.com/filimonov)) - -#### Experimental Feature {#experimental-feature-2} - -- Input and output data format `Template`. It allows to specify custom format string for input and output. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([tavplubix](https://github.com/tavplubix)) -- Implementation of `LIVE VIEW` tables that were originally proposed in [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), prepared in [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), and then updated in [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). See [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) for detailed description. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([vzakaznikov](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([vzakaznikov](https://github.com/vzakaznikov)) Note that `LIVE VIEW` feature may be removed in next versions. - -#### Bug Fix {#bug-fix-8} - -- This release also contains all bug fixes from 19.13 and 19.11. -- Fix segmentation fault when the table has skip indices and vertical merge happens. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([alesapin](https://github.com/alesapin)) -- Fix per-column TTL with non-trivial column defaults. Previously in case of force TTL merge with `OPTIMIZE ... FINAL` query, expired values was replaced by type defaults instead of user-specified column defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Anton Popov](https://github.com/CurtizJ)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed infinite loop when reading Kafka messages. Do not pause/resume consumer on subscription at all - otherwise it may get paused indefinitely in some scenarios. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Ivan](https://github.com/abyss7)) -- Fix `Key expression contains comparison between inconvertible types` exception in `bitmapContains` function. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging in MySQL interface [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Return the ability to parse boolean settings from ‘true’ and ‘false’ in the configuration file. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix crash in `quantile` and `median` function over `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible incomplete result returned by `SELECT` query with `WHERE` condition on primary key contained conversion to Float type. It was caused by incorrect checking of monotonicity in `toFloat` function. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Check `max_expanded_ast_elements` setting for mutations. Clear mutations after `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Winter Zhang](https://github.com/zhang2014)) -- Fix JOIN results for key columns when used with `join_use_nulls`. Attach Nulls instead of columns defaults. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix for skip indices with vertical merge and alter. Fix for `Bad size of marks file` exception. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([alesapin](https://github.com/alesapin)) -- Fix rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed unsafe code around `getIdentifier` function. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in MySQL wire protocol (is used while connecting to ClickHouse form MySQL client). Caused by heap buffer overflow in `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Yuriy Baranov](https://github.com/yurriy)) -- Fixed memory leak in `bitmapSubsetInRange` function. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fix rare bug when mutation executed after granularity change. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([alesapin](https://github.com/alesapin)) -- Allow protobuf message with all fields by default. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Vitaly Baranov](https://github.com/vitlibar)) -- Resolve a bug with `nullIf` function when we send a `NULL` argument on the second argument. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fix rare bug with wrong memory allocation/deallocation in complex key cache dictionaries with string fields which leads to infinite memory consumption (looks like memory leak). Bug reproduces when string size was a power of two starting from eight (8, 16, 32, etc). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fixed Gorilla encoding on small sequences which caused exception `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Vasily Nemkov](https://github.com/Enmk)) -- Allow to use not nullable types in JOINs with `join_use_nulls` enabled. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Artem Zuikov](https://github.com/4ertus2)) -- Disable `Poco::AbstractConfiguration` substitutions in query in `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock in `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `arrayReduce` for constant arguments may lead to segfault. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix inconsistent parts which can appear if replica was restored after `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed hang in `JSONExtractRaw` function. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with incorrect skip indices serialization and aggregation with adaptive granularity. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix initialization order while server startup. Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) -- Clearing the data buffer from the previous read operation that was completed with an error. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) -- Fix bug with enabling adaptive granularity when creating a new replica for Replicated\*MergeTree table. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fixed possible crash during server startup in case of exception happened in `libunwind` during exception at access to uninitialized `ThreadStatus` structure. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix crash in `yandexConsistentHash` function. Found by fuzz test. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the possibility of hanging queries when server is overloaded and global thread pool becomes near full. This have higher chance to happen on clusters with large number of shards (hundreds), because distributed queries allocate a thread per connection to each shard. For example, this issue may reproduce if a cluster of 330 shards is processing 30 concurrent distributed queries. This issue affects all versions starting from 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault when decoding symbol table. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Amos Bird](https://github.com/amosbird)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Removed extra quoting of description in `system.settings` table. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid possible deadlock in `TRUNCATE` of Replicated table. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix reading in order of sorting key. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Anton Popov](https://github.com/CurtizJ)) -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix bug opened by [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (since 19.4.0). Reproduces in queries to Distributed tables over MergeTree tables when we doesn’t query any columns (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. The behaviour was exactly as in C or C++ language (integer promotion rules) that may be surprising. Please note that the overflow is still possible when dividing large signed number to large unsigned number or vice-versa (but that case is less usual). The issue existed in all server versions. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Limit maximum sleep time for throttling when `max_execution_speed` or `max_execution_speed_bytes` is set. Fixed false errors like `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed issues about using `MATERIALIZED` columns and aliases in `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Amos Bird](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `FormatFactory` behaviour for input streams which are not implemented as processor. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed typo. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Alex Ryndin](https://github.com/alexryndin)) -- Typo in the error message ( is -\> are ). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Denis Zhuravlev](https://github.com/den-crane)) -- Fixed error while parsing of columns list from string if type contained a comma (this issue was relevant for `File`, `URL`, `HDFS` storages) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) - -#### Security Fix {#security-fix} - -- This release also contains all bug security fixes from 19.13 and 19.11. -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser. Fixed the possibility of stack overflow in Merge and Distributed tables, materialized views and conditions for row-level security that involve subqueries. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-3} - -- Correct implementation of ternary logic for `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Alexander Kazakov](https://github.com/Akazz)) -- Now values and rows with expired TTL will be removed after `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` query. Added queries `SYSTEM STOP/START TTL MERGES` to disallow/allow assign merges with TTL and filter expired values in all merges. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Anton Popov](https://github.com/CurtizJ)) -- Possibility to change the location of ClickHouse history file for client using `CLICKHOUSE_HISTORY_FILE` env. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([filimonov](https://github.com/filimonov)) -- Remove `dry_run` flag from `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Support `ASOF JOIN` with `ON` section. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Artem Zuikov](https://github.com/4ertus2)) -- Better support of skip indexes for mutations and replication. Support for `MATERIALIZE/CLEAR INDEX ... IN PARTITION` query. `UPDATE x = x` recalculates all indices that use column `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Nikita Vasilev](https://github.com/nikvas0)) -- Allow to `ATTACH` live views (for example, at the server startup) regardless to `allow_experimental_live_view` setting. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- For stack traces gathered by query profiler, do not include stack frames generated by the query profiler itself. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now table functions `values`, `file`, `url`, `hdfs` have support for ALIAS columns. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Throw an exception if `config.d` file doesn’t have the corresponding root element as the config file. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) -- Print extra info in exception message for `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([tavplubix](https://github.com/tavplubix)) -- When determining shards of a `Distributed` table to be covered by a read query (for `optimize_skip_unused_shards` = 1) ClickHouse now checks conditions from both `prewhere` and `where` clauses of select statement. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Alexander Kazakov](https://github.com/Akazz)) -- Enabled `SIMDJSON` for machines without AVX2 but with SSE 4.2 and PCLMUL instruction set. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse can work on filesystems without `O_DIRECT` support (such as ZFS and BtrFS) without additional tuning. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support push down predicate for final subquery. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better `JOIN ON` keys extraction [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Artem Zuikov](https://github.com/4ertus2)) -- Upated `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Optimize selecting of smallest column for `SELECT count()` query. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Amos Bird](https://github.com/amosbird)) -- Added `strict` parameter in `windowFunnel()`. When the `strict` is set, the `windowFunnel()` applies conditions only for the unique values. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([achimbab](https://github.com/achimbab)) -- Safer interface of `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([avasiliev](https://github.com/avasiliev)) -- Options line size when executing with `--help` option now corresponds with terminal size. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) -- Disable “read in order” optimization for aggregation without keys. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Anton Popov](https://github.com/CurtizJ)) -- HTTP status code for `INCORRECT_DATA` and `TYPE_MISMATCH` error codes was changed from default `500 Internal Server Error` to `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Alexander Rodin](https://github.com/a-rodin)) -- Move Join object from `ExpressionAction` into `AnalyzedJoin`. `ExpressionAnalyzer` and `ExpressionAction` do not know about `Join` class anymore. Its logic is hidden by `AnalyzedJoin` iface. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed possible deadlock of distributed queries when one of shards is localhost but the query is sent via network connection. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed semantic of multiple tables `RENAME` to avoid possible deadlocks. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Rewritten MySQL compatibility server to prevent loading full packet payload in memory. Decreased memory consumption for each connection to approximately `2 * DBMS_DEFAULT_BUFFER_SIZE` (read/write buffers). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Yuriy Baranov](https://github.com/yurriy)) -- Move AST alias interpreting logic out of parser that doesn’t have to know anything about query semantics. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Artem Zuikov](https://github.com/4ertus2)) -- Slightly more safe parsing of `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-copier`: Allow use `where_condition` from config with `partition_key` alias in query for checking partition existence (Earlier it was used only in reading data queries). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) -- Added optional message argument in `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Vdimir](https://github.com/Vdimir)) -- Server exception got while sending insertion data is now being processed in client as well. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) -- Added a metric `DistributedFilesToInsert` that shows the total number of files in filesystem that are selected to send to remote servers by Distributed tables. The number is summed across all shards. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move most of JOINs prepare logic from `ExpressionAction/ExpressionAnalyzer` to `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix TSan [warning](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Vasily Nemkov](https://github.com/Enmk)) -- Better information messages about lack of Linux capabilities. Logging fatal errors with “fatal” level, that will make it easier to find in `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- When enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`, `ORDER BY`, it didn’t check the free disk space. The fix add a new setting `min_free_disk_space`, when the free disk space it smaller then the threshold, the query will stop and throw `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Weiqing Xu](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed recursive rwlock by thread. It makes no sense, because threads are reused between queries. `SELECT` query may acquire a lock in one thread, hold a lock from another thread and exit from first thread. In the same time, first thread can be reused by `DROP` query. This will lead to false “Attempt to acquire exclusive lock recursively” messages. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Split `ExpressionAnalyzer.appendJoin()`. Prepare a place in `ExpressionAnalyzer` for `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `mysql_native_password` authentication plugin to MySQL compatibility server. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Yuriy Baranov](https://github.com/yurriy)) -- Less number of `clock_gettime` calls; fixed ABI compatibility between debug/release in `Allocator` (insignificant issue). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move `collectUsedColumns` from `ExpressionAnalyzer` to `SyntaxAnalyzer`. `SyntaxAnalyzer` makes `required_source_columns` itself now. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Artem Zuikov](https://github.com/4ertus2)) -- Add setting `joined_subquery_requires_alias` to require aliases for subselects and table functions in `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Artem Zuikov](https://github.com/4ertus2)) -- Extract `GetAggregatesVisitor` class from `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Artem Zuikov](https://github.com/4ertus2)) -- `system.query_log`: change data type of `type` column to `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Static linking of `sha256_password` authentication plugin. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Yuriy Baranov](https://github.com/yurriy)) -- Avoid extra dependency for the setting `compile` to work. In previous versions, the user may get error like `cannot open crti.o`, `unable to find library -lc` etc. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More validation of the input that may come from malicious replica. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now `clickhouse-obfuscator` file is available in `clickhouse-client` package. In previous versions it was available as `clickhouse obfuscator` (with whitespace). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed deadlock when we have at least two queries that read at least two tables in different order and another query that performs DDL operation on one of tables. Fixed another very rare deadlock. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added `os_thread_ids` column to `system.processes` and `system.query_log` for better debugging possibilities. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- A workaround for PHP mysqlnd extension bugs which occur when `sha256_password` is used as a default authentication plugin (described in [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Yuriy Baranov](https://github.com/yurriy)) -- Remove unneeded place with changed nullability columns. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Artem Zuikov](https://github.com/4ertus2)) -- Set default value of `queue_max_wait_ms` to zero, because current value (five seconds) makes no sense. There are rare circumstances when this settings has any use. Added settings `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` and `connection_pool_max_wait_ms` for disambiguation. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Extract `SelectQueryExpressionAnalyzer` from `ExpressionAnalyzer`. Keep the last one for non-select queries. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Artem Zuikov](https://github.com/4ertus2)) -- Removed duplicating input and output formats. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `MergeTree` now has an additional option `ttl_only_drop_parts` (disabled by default) to avoid partial pruning of parts, so that they dropped completely when all the rows in a part are expired. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Sergi Vladykin](https://github.com/svladykin)) -- Type checks for set index functions. Throw exception if function got a wrong type. This fixes fuzz test with UBSan. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Performance Improvement {#performance-improvement-2} - -- Optimize queries with `ORDER BY expressions` clause, where `expressions` have coinciding prefix with sorting key in `MergeTree` tables. This optimization is controlled by `optimize_read_in_order` setting. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Anton Popov](https://github.com/CurtizJ)) -- Allow to use multiple threads during parts loading and removal. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented batch variant of updating aggregate function states. It may lead to performance benefits. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using `FastOps` library for functions `exp`, `log`, `sigmoid`, `tanh`. FastOps is a fast vector math library from Michael Parakhin (Yandex CTO). Improved performance of `exp` and `log` functions more than 6 times. The functions `exp` and `log` from `Float32` argument will return `Float32` (in previous versions they always return `Float64`). Now `exp(nan)` may return `inf`. The result of `exp` and `log` functions may be not the nearest machine representable number to the true answer. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([alexey-milovidov](https://github.com/alexey-milovidov)) Using Danila Kutenin variant to make fastops working [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable consecutive key optimization for `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([akuzm](https://github.com/akuzm)) -- Improved performance of `simdjson` library by getting rid of dynamic allocation in `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Vitaly Baranov](https://github.com/vitlibar)) -- Pre-fault pages when allocating memory with `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([akuzm](https://github.com/akuzm)) -- Fix performance bug in `Decimal` comparison. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-4} - -- Remove Compiler (runtime template instantiation) because we’ve win over it’s performance. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance test to show degradation of performance in gcc-9 in more isolated way. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added table function `numbers_mt`, which is multithreaded version of `numbers`. Updated performance tests with hash functions. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Comparison mode in `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) -- Best effort for printing stack traces. Also added `SIGPROF` as a debugging signal to print stack trace of a running thread. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Every function in its own file, part 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove doubled const `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([filimonov](https://github.com/filimonov)) -- Formatting changes for `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([akuzm](https://github.com/akuzm)) -- Better subquery for join creation in `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Artem Zuikov](https://github.com/4ertus2)) -- Remove a redundant condition (found by PVS Studio). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([akuzm](https://github.com/akuzm)) -- Separate the hash table interface for `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([akuzm](https://github.com/akuzm)) -- Refactoring of settings. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([alesapin](https://github.com/alesapin)) -- Add comments for `set` index functions. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Nikita Vasilev](https://github.com/nikvas0)) -- Increase OOM score in debug version on Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([akuzm](https://github.com/akuzm)) -- HDFS HA now work in debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Weiqing Xu](https://github.com/weiqxu)) -- Added a test to `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for multiple materialized views for Kafka table. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Ivan](https://github.com/abyss7)) -- Make a better build scheme. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Ivan](https://github.com/abyss7)) -- Fixed `test_external_dictionaries` integration in case it was executed under non root user. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- The bug reproduces when total size of written packets exceeds `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Yuriy Baranov](https://github.com/yurriy)) -- Added a test for `RENAME` table race condition [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid data race on Settings in `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add integration test for handling errors by a cache dictionary. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Vitaly Baranov](https://github.com/vitlibar)) -- Disable parsing of ELF object files on Mac OS, because it makes no sense. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Attempt to make changelog generator better. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Adding `-Wshadow` switch to the GCC. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) -- Removed obsolete code for `mimalloc` support. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `zlib-ng` determines x86 capabilities and saves this info to global variables. This is done in defalteInit call, which may be made by different threads simultaneously. To avoid multithreaded writes, do it on library startup. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([akuzm](https://github.com/akuzm)) -- Regression test for a bug which in join which was fixed in [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Bakhtiyor Ruziev](https://github.com/theruziev)) -- Fixed MSan report. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix flapping TTL test. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed false data race in `MergeTreeDataPart::is_frozen` field. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed timeouts in fuzz test. In previous version, it managed to find false hangup in query `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added debug checks to `static_cast` of columns. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for Oracle Linux in official RPM packages. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Changed json perftests from `once` to `loop` type. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `odbc-bridge.cpp` defines `main()` so it should not be included in `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Orivej Desh](https://github.com/orivej)) -- Test for crash in `FULL|RIGHT JOIN` with nulls in right table’s keys. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Artem Zuikov](https://github.com/4ertus2)) -- Added a test for the limit on expansion of aliases just in case. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Switched from `boost::filesystem` to `std::filesystem` where appropriate. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added RPM packages to website. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test for fixed `Unknown identifier` exception in `IN` section. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Artem Zuikov](https://github.com/4ertus2)) -- Simplify `shared_ptr_helper` because people facing difficulties understanding it. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added performance tests for fixed Gorilla and DoubleDelta codec. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Vasily Nemkov](https://github.com/Enmk)) -- Split the integration test `test_dictionaries` into 4 separate tests. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix PVS-Studio warning in `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Allow to use `library` dictionary source with ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added option to generate changelog from a list of PRs. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Lock the `TinyLog` storage when reading. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([akuzm](https://github.com/akuzm)) -- Check for broken symlinks in CI. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Increase timeout for “stack overflow” test because it may take a long time in debug build. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a check for double whitespaces. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `new/delete` memory tracking when build with sanitizers. Tracking is not clear. It only prevents memory limit exceptions in tests. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Artem Zuikov](https://github.com/4ertus2)) -- Enable back the check of undefined symbols while linking. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Ivan](https://github.com/abyss7)) -- Avoid rebuilding `hyperscan` every day. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed UBSan report in `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Don’t allow to use query profiler with sanitizers because it is not compatible. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test for reloading a dictionary after fail by timer. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix inconsistency in `PipelineExecutor::prepareProcessor` argument type. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added a test for bad URIs. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added more checks to `CAST` function. This should get more information about segmentation fault in fuzzy test. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Added `gcc-9` support to `docker/builder` container that builds image locally. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Test for primary key with `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) -- Fixed tests affected by slow stack traces printing. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add a test case for crash in `groupUniqArray` fixed in [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([akuzm](https://github.com/akuzm)) -- Fixed indices mutations tests. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Nikita Vasilev](https://github.com/nikvas0)) -- In performance test, do not read query log for queries we didn’t run. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([akuzm](https://github.com/akuzm)) -- Materialized view now could be created with any low cardinality types regardless to the setting about suspicious low cardinality types. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Olga Khvostikova](https://github.com/stavrolia)) -- Updated tests for `send_logs_level` setting. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix build under gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Max Akhmedov](https://github.com/zlobober)) -- Fix build with internal libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) -- Fixes for Mac OS build (incomplete). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([alex-zaitsev](https://github.com/alex-zaitsev)) -- Fix “splitted” build. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Other build fixes: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Amos Bird](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Ivan](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-3} - -- Removed rarely used table function `catBoostPool` and storage `CatBoostPool`. If you have used this table function, please write email to `clickhouse-feedback@yandex-team.com`. Note that CatBoost integration remains and will be supported. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable `ANY RIGHT JOIN` and `ANY FULL JOIN` by default. Set `any_join_distinct_right_table_keys` setting to enable them. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Artem Zuikov](https://github.com/4ertus2)) - -## ClickHouse release 19.13 {#clickhouse-release-19-13} - -### ClickHouse release 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} - -#### Bug Fix {#bug-fix-9} - -- This release also contains all bug fixes from 19.11.12.69. - -### ClickHouse release 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} - -#### Bug Fix {#bug-fix-10} - -- This release also contains all bug fixes from 19.14.6.12. -- Fixed possible inconsistent state of table while executing `DROP` query for replicated table while zookeeper is not accessible. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Fix for data race in StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug introduced in query profiler which leads to endless recv from socket. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([alesapin](https://github.com/alesapin)) -- Fix excessive CPU usage while executing `JSONExtractRaw` function over a boolean value. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixes the regression while pushing to materialized view. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Ivan](https://github.com/abyss7)) -- Table function `url` had the vulnerability allowed the attacker to inject arbitrary HTTP headers in the request. This issue was found by [Nikita Tikhomirov](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix useless `AST` check in Set index. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed parsing of `AggregateFunction` values embedded in query. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed wrong behaviour of `trim` functions family. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} - -#### Bug Fix {#bug-fix-11} - -- This release also contains all bug security fixes from 19.11.9.52 and 19.11.10.54. -- Fixed data race in `system.parts` table and `ALTER` query. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed mismatched header in streams happened in case of reading from empty distributed table with sample and prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Lixiang Qian](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed crash when using `IN` clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fix case with same column names in `GLOBAL JOIN ON` section. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix crash when casting types to `Decimal` that do not support it. Throw exception instead. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in `extractAll()` function. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Artem Zuikov](https://github.com/4ertus2)) -- Query transformation for `MySQL`, `ODBC`, `JDBC` table functions now works properly for `SELECT WHERE` queries with multiple `AND` expressions. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) -- Added previous declaration checks for MySQL 8 integration. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Rafael David Tinoco](https://github.com/rafaeldtinoco)) - -#### Security Fix {#security-fix-1} - -- Fix two vulnerabilities in codecs in decompression phase (malicious user can fabricate compressed data that will lead to buffer overflow in decompression). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} - -#### Bug Fix {#bug-fix-12} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Security Fix {#security-fix-2} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse run, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} - -#### New Feature {#new-feature-5} - -- Sampling profiler on query level. [Example](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([laplab](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([alexey-milovidov](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) -- Allow to specify a list of columns with `COLUMNS('regexp')` expression that works like a more sophisticated variant of `*` asterisk. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([mfridental](https://github.com/mfridental)), ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CREATE TABLE AS table_function()` is now possible [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) -- Adam optimizer for stochastic gradient descent is used by default in `stochasticLinearRegression()` and `stochasticLogisticRegression()` aggregate functions, because it shows good quality without almost any tuning. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) -- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Andy Yang](https://github.com/andyyzh)) -- `RENAME` queries now work with all storages. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Ivan](https://github.com/abyss7)) -- Now client receive logs from server with any desired level by setting `send_logs_level` regardless to the log level specified in server settings. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) - -#### Backward Incompatible Change {#backward-incompatible-change-4} - -- The setting `input_format_defaults_for_omitted_fields` is enabled by default. Inserts in Distributed tables need this setting to be the same on cluster (you need to set it before rolling update). It enables calculation of complex default expressions for omitted fields in `JSONEachRow` and `CSV*` formats. It should be the expected behavior but may lead to negligible performance difference. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Artem Zuikov](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) - -#### Experimental features {#experimental-features} - -- New query processing pipeline. Use `experimental_use_processors=1` option to enable it. Use for your own trouble. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Bug Fix {#bug-fix-13} - -- Kafka integration has been fixed in this version. -- Fixed `DoubleDelta` encoding of `Int64` for large `DoubleDelta` values, improved `DoubleDelta` encoding for random data for `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Vasily Nemkov](https://github.com/Enmk)) -- Fixed overestimation of `max_rows_to_read` if the setting `merge_tree_uniform_read_distribution` is set to 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvement {#improvement-4} - -- Throws an exception if `config.d` file doesn’t have the corresponding root element as the config file [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - -#### Performance Improvement {#performance-improvement-3} - -- Optimize `count()`. Now it uses the smallest column (if possible). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Amos Bird](https://github.com/amosbird)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-5} - -- Report memory usage in performance tests. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([akuzm](https://github.com/akuzm)) -- Fix build with external `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Ivan](https://github.com/abyss7)) -- Fix shared build with `rdkafka` library [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.11 {#clickhouse-release-19-11} - -### ClickHouse release 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} - -#### Bug Fix {#bug-fix-14} - -- Fixed rare crash in `ALTER MODIFY COLUMN` and vertical merge when one of merged/altered parts is empty (0 rows). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([alesapin](https://github.com/alesapin)) -- Manual update of `SIMDJSON`. This fixes possible flooding of stderr files with bogus json diagnostic messages. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Alexander Kazakov](https://github.com/Akazz)) -- Fixed bug with `mrk` file extension for mutations ([alesapin](https://github.com/alesapin)) - -### ClickHouse release 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} - -#### Bug Fix {#bug-fix-15} - -- Fixed performance degradation of index analysis on complex keys on large tables. This fixes [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid rare SIGSEGV while sending data in tables with Distributed engine (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Azat Khuzhin](https://github.com/azat)) -- Fix `Unknown identifier` with multiple joins. This fixes [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} - -- Fix logical error causing segfaults when selecting from Kafka empty topic. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Ivan](https://github.com/abyss7)) -- Fix for function `АrrayEnumerateUniqRanked` with empty arrays in params. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) - -### ClickHouse release 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} - -#### Bug Fix {#bug-fix-16} - -- Do store offsets for Kafka messages manually to be able to commit them all at once for all partitions. Fixes potential duplication in “one consumer - many partitions” scenario. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Ivan](https://github.com/abyss7)) - -### ClickHouse release 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} - -- Improve error handling in cache dictionaries. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed bug in function `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) -- Fix `JSONExtract` function while extracting a `Tuple` from JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed possible data loss after `ALTER DELETE` query on table with skipping index. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed performance test. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Parquet: Fix reading boolean columns. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong behaviour of `nullIf` function for constant arguments. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Guillaume Tassery](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix Kafka messages duplication problem on normal server restart. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Ivan](https://github.com/abyss7)) -- Fixed an issue when long `ALTER UPDATE` or `ALTER DELETE` may prevent regular merges to run. Prevent mutations from executing if there is no enough free threads available. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([tavplubix](https://github.com/tavplubix)) -- Fixed error with processing “timezone” in server configuration file. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix kafka tests. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Ivan](https://github.com/abyss7)) - -#### Security Fix {#security-fix-3} - -- If the attacker has write access to ZooKeeper and is able to run custom server available from the network where ClickHouse runs, it can create custom-built malicious server that will act as ClickHouse replica and register it in ZooKeeper. When another replica will fetch data part from malicious replica, it can force clickhouse-server to write to arbitrary path on filesystem. Found by Eldar Zaitov, information security team at Yandex. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} - -#### Bug Fix {#bug-fix-17} - -- Fix `ALTER TABLE ... UPDATE` query for tables with `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([alesapin](https://github.com/alesapin)) -- Fix NPE when using IN clause with a subquery with a tuple. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([tavplubix](https://github.com/tavplubix)) -- Fixed an issue that if a stale replica becomes alive, it may still have data parts that were removed by DROP PARTITION. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([tavplubix](https://github.com/tavplubix)) -- Fixed issue with parsing CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([tavplubix](https://github.com/tavplubix)) -- Fixed data race in system.parts table and ALTER query. This fixes [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed wrong code in mutations that may lead to memory corruption. Fixed segfault with read of address `0x14c0` that may happed due to concurrent `DROP TABLE` and `SELECT` from `system.parts` or `system.parts_columns`. Fixed race condition in preparation of mutation queries. Fixed deadlock caused by `OPTIMIZE` of Replicated tables and concurrent modification operations like ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} - -#### Bug fix {#bug-fix-18} - -- Kafka integration has been fixed in this version. -- Fix segfault when using `arrayReduce` for constant arguments. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `toFloat()` monotonicity. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Fix segfault with enabled `optimize_skip_unused_shards` and missing sharding key. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) -- Fixed logic of `arrayEnumerateUniqRanked` function. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Removed extra verbose logging from MySQL handler. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong behavior and possible segfaults in `topK` and `topKWeighted` aggregated functions. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) -- Do not expose virtual columns in `system.columns` table. This is required for backward compatibility. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug with memory allocation for string fields in complex key cache dictionary. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([alesapin](https://github.com/alesapin)) -- Fix bug with enabling adaptive granularity when creating new replica for `Replicated*MergeTree` table. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([alesapin](https://github.com/alesapin)) -- Fix infinite loop when reading Kafka messages. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) -- Fixed the possibility of a fabricated query to cause server crash due to stack overflow in SQL parser and possibility of stack overflow in `Merge` and `Distributed` tables [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed Gorilla encoding error on small sequences. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) - -#### Improvement {#improvement-5} - -- Allow user to override `poll_interval` and `idle_connection_timeout` settings on connection. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} - -#### Bug fix {#bug-fix-19} - -- Fixed the possibility of hanging queries when server is overloaded. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix FPE in yandexConsistentHash function. This fixes [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed bug in conversion of `LowCardinality` types in `AggregateFunctionFactory`. This fixes [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix parsing of `bool` settings from `true` and `false` strings in configuration files. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([alesapin](https://github.com/alesapin)) -- Fix rare bug with incompatible stream headers in queries to `Distributed` table over `MergeTree` table when part of `WHERE` moves to `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([alesapin](https://github.com/alesapin)) -- Fixed overflow in integer division of signed type to unsigned type. This fixes [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Change {#backward-incompatible-change-5} - -- `Kafka` still broken. - -### ClickHouse release 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} - -#### Bug Fix {#bug-fix-20} - -- Fix bug with writing secondary indices marks with adaptive granularity. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([alesapin](https://github.com/alesapin)) -- Fix `WITH ROLLUP` and `WITH CUBE` modifiers of `GROUP BY` with two-level aggregation. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed hang in `JSONExtractRaw` function. Fixed [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix segfault in ExternalLoader::reloadOutdated(). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fixed the case when server may close listening sockets but not shutdown and continue serving remaining queries. You may end up with two running clickhouse-server processes. Sometimes, the server may return an error `bad_function_call` for remaining queries. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed useless and incorrect condition on update field for initial loading of external dictionaries via ODBC, MySQL, ClickHouse and HTTP. This fixes [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed irrelevant exception in cast of `LowCardinality(Nullable)` to not-Nullable column in case if it doesn’t contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix non-deterministic result of “uniq” aggregate function in extreme rare cases. The bug was present in all ClickHouse versions. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Segfault when we set a little bit too high CIDR on the function `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Fixed small memory leak when server throw many exceptions from many different contexts. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix the situation when consumer got paused before subscription and not resumed afterwards. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Clearing the Kafka data buffer from the previous read operation that was completed with an error [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Nikolay](https://github.com/bopohaa)) Note that Kafka is broken in this version. -- Since `StorageMergeTree::background_task_handle` is initialized in `startup()` the `MergeTreeBlockOutputStream::write()` may try to use it before initialization. Just check if it is initialized. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-6} - -- Added official `rpm` packages. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([alesapin](https://github.com/alesapin)) -- Add an ability to build `.rpm` and `.tgz` packages with `packager` script. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([alesapin](https://github.com/alesapin)) -- Fixes for “Arcadia” build system. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) - -#### Backward Incompatible Change {#backward-incompatible-change-6} - -- `Kafka` is broken in this version. - -### ClickHouse release 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} - -#### New Feature {#new-feature-6} - -- Added support for prepared statements. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Alexander](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `DoubleDelta` and `Gorilla` column codecs [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Vasily Nemkov](https://github.com/Enmk)) -- Added `os_thread_priority` setting that allows to control the “nice” value of query processing threads that is used by OS to adjust dynamic scheduling priority. It requires `CAP_SYS_NICE` capabilities to work. This implements [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implement `_topic`, `_offset`, `_key` columns for Kafka engine [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Ivan](https://github.com/abyss7)) Note that Kafka is broken in this version. -- Add aggregate function combinator `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) -- Aggregate functions `groupArrayMovingSum(win_size)(x)` and `groupArrayMovingAvg(win_size)(x)`, which calculate moving sum/avg with or without window-size limitation. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) -- Add synonim `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) -- Intergate H3 function `geoToH3` from Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Remen Ivan](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Bug Fix {#bug-fix-21} - -- Implement DNS cache with asynchronous update. Separate thread resolves all hosts and updates DNS cache with period (setting `dns_cache_update_period`). It should help, when ip of hosts changes frequently. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Anton Popov](https://github.com/CurtizJ)) -- Fix segfault in `Delta` codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix rare bug in checking of part with `LowCardinality` column. Previously `checkDataPart` always fails for part with `LowCardinality` column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Avoid hanging connections when server thread pool is full. It is important for connections from `remote` table function or connections to a shard without replicas when there is long connection timeout. This fixes [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Support for constant arguments to `evalMLModel` function. This fixes [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed the issue when ClickHouse determines default time zone as `UCT` instead of `UTC`. This fixes [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed buffer underflow in `visitParamExtractRaw`. This fixes [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now distributed `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` queries will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix `coalesce` for `ColumnConst` with `ColumnNullable` + related changes. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix the `ReadBufferFromKafkaConsumer` so that it keeps reading new messages after `commit()` even if it was stalled before [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Ivan](https://github.com/abyss7)) -- Fix `FULL` and `RIGHT` JOIN results when joining on `Nullable` keys in right table. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Artem Zuikov](https://github.com/4ertus2)) -- Possible fix of infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix race condition, which cause that some queries may not appear in query\_log after `SYSTEM FLUSH LOGS` query. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Fixed `heap-use-after-free` ASan warning in ClusterCopier caused by watch which try to use already removed copier object. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed wrong `StringRef` pointer returned by some implementations of `IColumn::deserializeAndInsertFromArena`. This bug affected only unit-tests. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Prevent source and intermediate array join columns of masking same name columns. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix insert and select query to MySQL engine with MySQL style identifier quoting. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Winter Zhang](https://github.com/zhang2014)) -- Now `CHECK TABLE` query can work with MergeTree engine family. It returns check status and message if any for each part (or file in case of simplier engines). Also, fix bug in fetch of a broken part. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([alesapin](https://github.com/alesapin)) -- Fix SPLIT\_SHARED\_LIBRARIES runtime [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed time zone initialization when `/etc/localtime` is a relative symlink like `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- clickhouse-copier: Fix use-after free on shutdown [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) -- Updated `simdjson`. Fixed the issue that some invalid JSONs with zero bytes successfully parse. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix shutdown of SystemLogs [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Anton Popov](https://github.com/CurtizJ)) -- Fix hanging when condition in invalidate\_query depends on a dictionary. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Vitaly Baranov](https://github.com/vitlibar)) - -#### Improvement {#improvement-6} - -- Allow unresolvable addresses in cluster configuration. They will be considered unavailable and tried to resolve at every connection attempt. This is especially useful for Kubernetes. This fixes [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Close idle TCP connections (with one hour timeout by default). This is especially important for large clusters with multiple distributed tables on every server, because every server can possibly keep a connection pool to every other server, and after peak query concurrency, connections will stall. This fixes [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Better quality of `topK` function. Changed the SavingSpace set behavior to remove the last element if the new element have a bigger weight. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Guillaume Tassery](https://github.com/YiuRULE)) -- URL functions to work with domains now can work for incomplete URLs without scheme [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([alesapin](https://github.com/alesapin)) -- Checksums added to the `system.parts_columns` table. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) -- Added `Enum` data type as a synonim for `Enum8` or `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) -- Full bit transpose variant for `T64` codec. Could lead to better compression with `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Artem Zuikov](https://github.com/4ertus2)) -- Condition on `startsWith` function now can uses primary key. This fixes [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) and [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) -- Allow to use `clickhouse-copier` with cross-replication cluster topology by permitting empty database name. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) -- Use `UTC` as default timezone on a system without `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` was printed and server or client refused to start. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Returned back support for floating point argument in function `quantileTiming` for backward compatibility. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Show which table is missing column in error messages. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Ivan](https://github.com/abyss7)) -- Disallow run query with same query\_id by various users [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) -- More robust code for sending metrics to Graphite. It will work even during long multiple `RENAME TABLE` operation. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- More informative error messages will be displayed when ThreadPool cannot schedule a task for execution. This fixes [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Inverting ngramSearch to be more intuitive [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Danila Kutenin](https://github.com/danlark1)) -- Add user parsing in HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([akonyaev90](https://github.com/akonyaev90)) -- Update default value of `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Artem Konovalov](https://github.com/izebit)) -- Added a notion of obsolete settings. The obsolete setting `allow_experimental_low_cardinality_type` can be used with no effect. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Alexey Milovidov](https://github.com/alexey-milovidov) - -#### Performance Improvement {#performance-improvement-4} - -- Increase number of streams to SELECT from Merge table for more uniform distribution of threads. Added setting `max_streams_multiplier_for_merge_tables`. This fixes [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-7} - -- Add a backward compatibility test for client-server interaction with different versions of clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([alesapin](https://github.com/alesapin)) -- Test coverage information in every commit and pull request. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([alesapin](https://github.com/alesapin)) -- Cooperate with address sanitizer to support our custom allocators (`Arena` and `ArenaWithFreeLists`) for better debugging of “use-after-free” errors. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([akuzm](https://github.com/akuzm)) -- Switch to [LLVM libunwind implementation](https://github.com/llvm-mirror/libunwind) for C++ exception handling and for stack traces printing [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Nikita Lapkov](https://github.com/laplab)) -- Add two more warnings from -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to build ClickHouse with Memory Sanitizer. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed ubsan report about `bitTest` function in fuzz test. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Docker: added possibility to init a ClickHouse instance which requires authentication. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Korviakov Andrey](https://github.com/shurshun)) -- Update librdkafka to version 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Ivan](https://github.com/abyss7)) -- Add global timeout for integration tests and disable some of them in tests code. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([alesapin](https://github.com/alesapin)) -- Fix some ThreadSanitizer failures. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([akuzm](https://github.com/akuzm)) -- The `--no-undefined` option forces the linker to check all external names for existence while linking. It’s very useful to track real dependencies between libraries in the split build mode. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Ivan](https://github.com/abyss7)) -- Added performance test for [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed compatibility with gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support for gcc-9. This fixes [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when libunwind can be linked incorrectly. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a few warnings found by PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added initial support for `clang-tidy` static analyzer. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Convert BSD/Linux endian macros( ‘be64toh’ and ‘htobe64’) to the Mac OS X equivalents [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Fu Chen](https://github.com/fredchenbj)) -- Improved integration tests guide. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Fixing build at macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([filimonov](https://github.com/filimonov)) -- Fix a hard-to-spot typo: aggreAGte -\> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([akuzm](https://github.com/akuzm)) -- Fix freebsd build [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) -- Add link to experimental YouTube channel to website [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Ivan Blinkov](https://github.com/blinkov)) -- CMake: add option for coverage flags: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) -- Fix initial size of some inline PODArray’s. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([akuzm](https://github.com/akuzm)) -- clickhouse-server.postinst: fix os detection for centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) -- Added Arch linux package generation. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Vladimir Chebotarev](https://github.com/excitoon)) -- Split Common/config.h by libs (dbms) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) -- Fixes for “Arcadia” build platform [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) -- Fixes for unconventional build (gcc9, no submodules) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) -- Require explicit type in unalignedStore because it was proven to be bug-prone [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([akuzm](https://github.com/akuzm)) -- Fixes MacOS build [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([filimonov](https://github.com/filimonov)) -- Performance test concerning the new JIT feature with bigger dataset, as requested here [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Run stateful tests in stress test [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-7} - -- `Kafka` is broken in this version. -- Enable `adaptive_index_granularity` = 10MB by default for new `MergeTree` tables. If you created new MergeTree tables on version 19.11+, downgrade to versions prior to 19.6 will be impossible. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([alesapin](https://github.com/alesapin)) -- Removed obsolete undocumented embedded dictionaries that were used by Yandex.Metrica. The functions `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` are no longer available. If you are using these functions, write email to clickhouse-feedback@yandex-team.com. Note: at the last moment we decided to keep these functions for a while. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.10 {#clickhouse-release-19-10} - -### ClickHouse release 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} - -#### New Feature {#new-feature-7} - -- Add new column codec: `T64`. Made for (U)IntX/EnumX/Data(Time)/DecimalX columns. It should be good for columns with constant or small range values. Codec itself allows enlarge or shrink data type without re-compression. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Artem Zuikov](https://github.com/4ertus2)) -- Add database engine `MySQL` that allow to view all the tables in remote MySQL server [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Winter Zhang](https://github.com/zhang2014)) -- `bitmapContains` implementation. It’s 2x faster than `bitmapHasAny` if the second bitmap contains one element. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Yu](https://github.com/yuzhichang)) -- Support for `crc32` function (with behaviour exactly as in MySQL or PHP). Do not use it if you need a hash function. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Remen Ivan](https://github.com/BHYCHIK)) -- Implemented `SYSTEM START/STOP DISTRIBUTED SENDS` queries to control asynchronous inserts into `Distributed` tables. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Winter Zhang](https://github.com/zhang2014)) - -#### Bug Fix {#bug-fix-22} - -- Ignore query execution limits and max parts size for merge limits while executing mutations. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Anton Popov](https://github.com/CurtizJ)) -- Fix bug which may lead to deduplication of normal blocks (extremely rare) and insertion of duplicate blocks (more often). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([alesapin](https://github.com/alesapin)) -- Fix of function `arrayEnumerateUniqRanked` for arguments with empty arrays [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) -- Don’t subscribe to Kafka topics without intent to poll any messages. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Ivan](https://github.com/abyss7)) -- Make setting `join_use_nulls` get no effect for types that cannot be inside Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed `Incorrect size of index granularity` errors [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([coraxster](https://github.com/coraxster)) -- Fix Float to Decimal convert overflow [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([coraxster](https://github.com/coraxster)) -- Flush buffer when `WriteBufferFromHDFS`’s destructor is called. This fixes writing into `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Xindong Peng](https://github.com/eejoin)) - -#### Improvement {#improvement-7} - -- Treat empty cells in `CSV` as default values when the setting `input_format_defaults_for_omitted_fields` is enabled. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([akuzm](https://github.com/akuzm)) -- Non-blocking loading of external dictionaries. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Vitaly Baranov](https://github.com/vitlibar)) -- Network timeouts can be dynamically changed for already established connections according to the settings. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Konstantin Podshumok](https://github.com/podshumok)) -- Using “public\_suffix\_list” for functions `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. It’s using a perfect hash table generated by `gperf` with a list generated from the file: https://publicsuffix.org/list/public\_suffix\_list.dat. (for example, now we recognize the domain `ac.uk` as non-significant). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Adopted `IPv6` data type in system tables; unified client info columns in `system.processes` and `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Using sessions for connections with MySQL compatibility protocol. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Yuriy Baranov](https://github.com/yurriy)) -- Support more `ALTER` queries `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([sundyli](https://github.com/sundy-li)) -- Support `` section in `clickhouse-local` config file. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) -- Allow run query with `remote` table function in `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) - -#### Performance Improvement {#performance-improvement-5} - -- Add the possibility to write the final mark at the end of MergeTree columns. It allows to avoid useless reads for keys that are out of table data range. It is enabled only if adaptive index granularity is in use. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([alesapin](https://github.com/alesapin)) -- Improved performance of MergeTree tables on very slow filesystems by reducing number of `stat` syscalls. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed performance degradation in reading from MergeTree tables that was introduced in version 19.6. Fixes \#5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-8} - -- Implemented `TestKeeper` as an implementation of ZooKeeper interface used for testing [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([alexey-milovidov](https://github.com/alexey-milovidov)) ([levushkin aleksej](https://github.com/alexey-milovidov)) -- From now on `.sql` tests can be run isolated by server, in parallel, with random database. It allows to run them faster, add new tests with custom server configurations, and be sure that different tests doesn’t affect each other. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Ivan](https://github.com/abyss7)) -- Remove `` and `` from performance tests [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed “select\_format” performance test for `Pretty` formats [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -## ClickHouse release 19.9 {#clickhouse-release-19-9} - -### ClickHouse release 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} - -#### Bug Fix {#bug-fix-23} - -- Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) -- Fix rare bug in checking of part with LowCardinality column. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([alesapin](https://github.com/alesapin)) -- Fix segfault in TTL merge with non-physical columns in block. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Anton Popov](https://github.com/CurtizJ)) -- Fix potential infinite sleeping of low-priority queries. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix how ClickHouse determines default time zone as UCT instead of UTC. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug about executing distributed DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER queries on follower replica before leader replica. Now they will be executed directly on leader replica. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([alesapin](https://github.com/alesapin)) -- Fix race condition, which cause that some queries may not appear in query\_log instantly after SYSTEM FLUSH LOGS query. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) -- Added missing support for constant arguments to `evalMLModel` function. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} - -#### New Feature {#new-feature-8} - -- Print information about frozen parts in `system.parts` table. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) -- Ask client password on clickhouse-client start on tty if not set in arguments [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) -- Implement `dictGet` and `dictGetOrDefault` functions for Decimal types. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-8} - -- Debian init: Add service stop timeout [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) -- Add setting forbidden by default to create table with suspicious types for LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Olga Khvostikova](https://github.com/stavrolia)) -- Regression functions return model weights when not used as State in function `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) -- Rename and improve regression methods. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) -- Clearer interfaces of string searchers. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Danila Kutenin](https://github.com/danlark1)) - -#### Bug Fix {#bug-fix-24} - -- Fix potential data loss in Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Ivan](https://github.com/abyss7)) -- Fix potential infinite loop in `PrettySpace` format when called with zero columns [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixed UInt32 overflow bug in linear models. Allow eval ML model for non-const model argument. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- `ALTER TABLE ... DROP INDEX IF EXISTS ...` should not raise an exception if provided index does not exist [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix segfault with `bitmapHasAny` in scalar subquery [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `ALTER ... MODIFY TTL` on ReplicatedMergeTree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Anton Popov](https://github.com/CurtizJ)) -- Fix INSERT into Distributed table with MATERIALIZED column [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) -- Fix bad alloc when truncate Join storage [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) -- In recent versions of package tzdata some of files are symlinks now. The current mechanism for detecting default timezone gets broken and gives wrong names for some timezones. Now at least we force the timezone name to the contents of TZ if provided. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Fix some extremely rare cases with MultiVolnitsky searcher when the constant needles in sum are at least 16KB long. The algorithm missed or overwrote the previous results which can lead to the incorrect result of `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Danila Kutenin](https://github.com/danlark1)) -- Fix the issue when settings for ExternalData requests couldn’t use ClickHouse settings. Also, for now, settings `date_time_input_format` and `low_cardinality_allow_in_native_format` cannot be used because of the ambiguity of names (in external data it can be interpreted as table format and in the query it can be a setting). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Remove debug logging from MySQL protocol [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Skip ZNONODE during DDL query processing [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix mix `UNION ALL` result column type. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Throw an exception on wrong integers in `dictGetT` functions instead of crash. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix wrong element\_count and load\_factor for hashed dictionary in `system.dictionaries` table. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-9} - -- Fixed build without `Brotli` HTTP compression support (`ENABLE_BROTLI=OFF` cmake variable). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Anton Yuzhaninov](https://github.com/citrin)) -- Include roaring.h as roaring/roaring.h [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Orivej Desh](https://github.com/orivej)) -- Fix gcc9 warnings in hyperscan (\#line directive is evil!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Danila Kutenin](https://github.com/danlark1)) -- Fix all warnings when compiling with gcc-9. Fix some contrib issues. Fix gcc9 ICE and submit it to bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Danila Kutenin](https://github.com/danlark1)) -- Fixed linking with lld [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Remove unused specializations in dictionaries [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Artem Zuikov](https://github.com/4ertus2)) -- Improvement performance tests for formatting and parsing tables for different types of files [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Olga Khvostikova](https://github.com/stavrolia)) -- Fixes for parallel test run [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) -- Docker: use configs from clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) -- Fix compile for FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) -- Upgrade boost to 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) -- Fix build clickhouse as submodule [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) -- Improve JSONExtract performance tests [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.8 {#clickhouse-release-19-8} - -### ClickHouse release 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} - -#### New Features {#new-features} - -- Added functions to work with JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Vitaly Baranov](https://github.com/vitlibar)) -- Add a function basename, with a similar behaviour to a basename function, which exists in a lot of languages (`os.path.basename` in python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added `LIMIT n, m BY` or `LIMIT m OFFSET n BY` syntax to set offset of n for LIMIT BY clause. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Anton Popov](https://github.com/CurtizJ)) -- Added new data type `SimpleAggregateFunction`, which allows to have columns with light aggregation in an `AggregatingMergeTree`. This can only be used with simple functions like `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Boris Granveaud](https://github.com/bgranvea)) -- Added support for non-constant arguments in function `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Danila Kutenin](https://github.com/danlark1)) -- Added functions `skewPop`, `skewSamp`, `kurtPop` and `kurtSamp` to compute for sequence skewness, sample skewness, kurtosis and sample kurtosis respectively. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) -- Support rename operation for `MaterializeView` storage. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Added server which allows connecting to ClickHouse using MySQL client. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Yuriy Baranov](https://github.com/yurriy)) -- Add `toDecimal*OrZero` and `toDecimal*OrNull` functions. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Artem Zuikov](https://github.com/4ertus2)) -- Support Decimal types in functions: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) -- Added `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Danila Kutenin](https://github.com/danlark1)) -- Added `format` function. Formatting constant pattern (simplified Python format pattern) with the strings listed in the arguments. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Danila Kutenin](https://github.com/danlark1)) -- Added `system.detached_parts` table containing information about detached parts of `MergeTree` tables. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([akuzm](https://github.com/akuzm)) -- Added `ngramSearch` function to calculate the non-symmetric difference between needle and haystack. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Danila Kutenin](https://github.com/danlark1)) -- Implementation of basic machine learning methods (stochastic linear regression and logistic regression) using aggregate functions interface. Has different strategies for updating model weights (simple gradient descent, momentum method, Nesterov method). Also supports mini-batches of custom size. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) -- Implementation of `geohashEncode` and `geohashDecode` functions. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Vasily Nemkov](https://github.com/Enmk)) -- Added aggregate function `timeSeriesGroupSum`, which can aggregate different time series that sample timestamp not alignment. It will use linear interpolation between two sample timestamp and then sum time-series together. Added aggregate function `timeSeriesGroupRateSum`, which calculates the rate of time-series and then sum rates together. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Added functions `IPv4CIDRtoIPv4Range` and `IPv6CIDRtoIPv6Range` to calculate the lower and higher bounds for an IP in the subnet using a CIDR. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Guillaume Tassery](https://github.com/YiuRULE)) -- Add a X-ClickHouse-Summary header when we send a query using HTTP with enabled setting `send_progress_in_http_headers`. Return the usual information of X-ClickHouse-Progress, with additional information like how many rows and bytes were inserted in the query. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Guillaume Tassery](https://github.com/YiuRULE)) - -#### Improvements {#improvements} - -- Added `max_parts_in_total` setting for MergeTree family of tables (default: 100 000) that prevents unsafe specification of partition key \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-obfuscator`: derive seed for individual columns by combining initial seed with column name, not column position. This is intended to transform datasets with multiple related tables, so that tables will remain JOINable after transformation. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added functions `JSONExtractRaw`, `JSONExtractKeyAndValues`. Renamed functions `jsonExtract` to `JSONExtract`. When something goes wrong these functions return the correspondent values, not `NULL`. Modified function `JSONExtract`, now it gets the return type from its last parameter and doesn’t inject nullables. Implemented fallback to RapidJSON in case AVX2 instructions are not available. Simdjson library updated to a new version. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Vitaly Baranov](https://github.com/vitlibar)) -- Now `if` and `multiIf` functions don’t rely on the condition’s `Nullable`, but rely on the branches for sql compatibility. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Jian Wu](https://github.com/janplus)) -- `In` predicate now generates `Null` result from `Null` input like the `Equal` function. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Jian Wu](https://github.com/janplus)) -- Check the time limit every (flush\_interval / poll\_timeout) number of rows from Kafka. This allows to break the reading from Kafka consumer more frequently and to check the time limits for the top-level streams [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Ivan](https://github.com/abyss7)) -- Link rdkafka with bundled SASL. It should allow to use SASL SCRAM authentication [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Ivan](https://github.com/abyss7)) -- Batched version of RowRefList for ALL JOINS. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Artem Zuikov](https://github.com/4ertus2)) -- clickhouse-server: more informative listen error messages. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) -- Support dictionaries in clickhouse-copier for functions in `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) -- Add new setting `kafka_commit_every_batch` to regulate Kafka committing policy. - It allows to set commit mode: after every batch of messages is handled, or after the whole block is written to the storage. It’s a trade-off between losing some messages or reading them twice in some extreme situations. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Ivan](https://github.com/abyss7)) -- Make `windowFunnel` support other Unsigned Integer Types. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([sundyli](https://github.com/sundy-li)) -- Allow to shadow virtual column `_table` in Merge engine. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Ivan](https://github.com/abyss7)) -- Make `sequenceMatch` aggregate functions support other unsigned Integer types [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([sundyli](https://github.com/sundy-li)) -- Better error messages if checksum mismatch is most likely caused by hardware failures. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Check that underlying tables support sampling for `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Ivan](https://github.com/abyss7)) -- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Improvements of MySQL Wire Protocol. Changed name of format to MySQLWire. Using RAII for calling RSA\_free. Disabling SSL if context cannot be created. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Yuriy Baranov](https://github.com/yurriy)) -- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) -- Respect query settings in asynchronous INSERTs into Distributed tables. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) -- Renamed functions `leastSqr` to `simpleLinearRegression`, `LinearRegression` to `linearRegression`, `LogisticRegression` to `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Performance Improvements {#performance-improvements} - -- Parallelize processing of parts of non-replicated MergeTree tables in ALTER MODIFY query. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Ivan Kush](https://github.com/IvanKush)) -- Optimizations in regular expressions extraction. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Danila Kutenin](https://github.com/danlark1)) -- Do not add right join key column to join result if it’s used only in join on section. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Artem Zuikov](https://github.com/4ertus2)) -- Freeze the Kafka buffer after first empty response. It avoids multiple invokations of `ReadBuffer::next()` for empty result in some row-parsing streams. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Ivan](https://github.com/abyss7)) -- `concat` function optimization for multiple arguments. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Danila Kutenin](https://github.com/danlark1)) -- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Artem Zuikov](https://github.com/4ertus2)) -- Upgrade our LZ4 implementation with reference one to have faster decompression. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Danila Kutenin](https://github.com/danlark1)) -- Implemented MSD radix sort (based on kxsort), and partial sorting. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Evgenii Pravda](https://github.com/kvinty)) - -#### Bug Fixes {#bug-fixes} - -- Fix push require columns with join [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed bug, when ClickHouse is run by systemd, the command `sudo service clickhouse-server forcerestart` was not working as expected. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) -- Fix http error codes in DataPartsExchange (interserver http server on 9009 port always returned code 200, even on errors). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) -- Fix SimpleAggregateFunction for String longer than MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Azat Khuzhin](https://github.com/azat)) -- Fix error for `Decimal` to `Nullable(Decimal)` conversion in IN. Support other Decimal to Decimal conversions (including different scales). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed FPU clobbering in simdjson library that lead to wrong calculation of `uniqHLL` and `uniqCombined` aggregate function and math functions such as `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed handling mixed const/nonconst cases in JSON functions. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix `retention` function. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) -- Fix result type for `quantileExact` with Decimals. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Documentation {#documentation} - -- Translate documentation for `CollapsingMergeTree` to chinese. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) -- Translate some documentation about table engines to chinese. - [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) - [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) - ([never lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements} - -- Fix some sanitizer reports that show probable use-after-free.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Ivan](https://github.com/abyss7)) -- Move performance tests out of separate directories for convenience. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix incorrect performance tests. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([alesapin](https://github.com/alesapin)) -- Added a tool to calculate checksums caused by bit flips to debug hardware issues. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make runner script more usable. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([filimonov](https://github.com/filimonov)) -- Add small instruction how to write performance tests. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([alesapin](https://github.com/alesapin)) -- Add ability to make substitutions in create, fill and drop query in performance tests [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Olga Khvostikova](https://github.com/stavrolia)) - -## ClickHouse release 19.7 {#clickhouse-release-19-7} - -### ClickHouse release 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} - -#### Bug Fix {#bug-fix-25} - -- Fix performance regression in some queries with JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) - -### ClickHouse release 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} - -#### New features {#new-features-1} - -- Added bitmap related functions `bitmapHasAny` and `bitmapHasAll` analogous to `hasAny` and `hasAll` functions for arrays. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Sergi Vladykin](https://github.com/svladykin)) - -#### Bug Fixes {#bug-fixes-1} - -- Fix segfault on `minmax` INDEX with Null value. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Nikita Vasilev](https://github.com/nikvas0)) -- Mark all input columns in LIMIT BY as required output. It fixes ‘Not found column’ error in some distributed queries. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Constantin S. Pan](https://github.com/kvap)) -- Fix “Column ‘0’ already exists” error in `SELECT .. PREWHERE` on column with DEFAULT [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) -- Fix `ALTER MODIFY TTL` query on `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Anton Popov](https://github.com/CurtizJ)) -- Don’t crash the server when Kafka consumers have failed to start. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Ivan](https://github.com/abyss7)) -- Fixed bitmap functions produce wrong result. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Andy Yang](https://github.com/andyyzh)) -- Fix element\_count for hashed dictionary (do not include duplicates) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Azat Khuzhin](https://github.com/azat)) -- Use contents of environment variable TZ as the name for timezone. It helps to correctly detect default timezone in some cases.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Ivan](https://github.com/abyss7)) -- Do not try to convert integers in `dictGetT` functions, because it doesn’t work correctly. Throw an exception instead. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix settings in ExternalData HTTP request. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Danila - Kutenin](https://github.com/danlark1)) -- Fix bug when parts were removed only from FS without dropping them from Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([alesapin](https://github.com/alesapin)) -- Fix segmentation fault in `bitmapHasAny` function. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Yu](https://github.com/yuzhichang)) -- Fixed error when replication connection pool doesn’t retry to resolve host, even when DNS cache was dropped. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([alesapin](https://github.com/alesapin)) -- Fixed `DROP INDEX IF EXISTS` query. Now `ALTER TABLE ... DROP INDEX IF EXISTS ...` query doesn’t raise an exception if provided index does not exist. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Gleb Novikov](https://github.com/NanoBjorn)) -- Fix union all supertype column. There were cases with inconsistent data and column types of resulting columns. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Artem Zuikov](https://github.com/4ertus2)) -- Skip ZNONODE during DDL query processing. Before if another node removes the znode in task queue, the one that - did not process it, but already get list of children, will terminate the DDLWorker thread. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Azat Khuzhin](https://github.com/azat)) -- Fix INSERT into Distributed() table with MATERIALIZED column. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Azat Khuzhin](https://github.com/azat)) - -### ClickHouse release 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} - -#### New Features {#new-features-2} - -- Allow to limit the range of a setting that can be specified by user. - These constraints can be set up in user settings profile. - [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Vitaly - Baranov](https://github.com/vitlibar)) -- Add a second version of the function `groupUniqArray` with an optional - `max_size` parameter that limits the size of the resulting array. This - behavior is similar to `groupArray(max_size)(x)` function. - [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Guillaume - Tassery](https://github.com/YiuRULE)) -- For TSVWithNames/CSVWithNames input file formats, column order can now be - determined from file header. This is controlled by - `input_format_with_names_use_header` parameter. - [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) - ([Alexander](https://github.com/Akazz)) - -#### Bug Fixes {#bug-fixes-2} - -- Crash with uncompressed\_cache + JOIN during merge (\#5197) - [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Danila - Kutenin](https://github.com/danlark1)) -- Segmentation fault on a clickhouse-client query to system tables. \#5066 - [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) - ([Ivan](https://github.com/abyss7)) -- Data loss on heavy load via KafkaEngine (\#4736) - [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) - ([Ivan](https://github.com/abyss7)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Performance Improvements {#performance-improvements-1} - -- Use radix sort for sorting by single numeric column in `ORDER BY` without - `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), - [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) - ([Evgenii Pravda](https://github.com/kvinty), - [alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Documentation {#documentation-1} - -- Translate documentation for some table engines to Chinese. - [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), - [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), - [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) - ([张风啸](https://github.com/AlexZFX)), - [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([never - lee](https://github.com/neverlee)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-1} - -- Print UTF-8 characters properly in `clickhouse-test`. - [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add command line parameter for clickhouse-client to always load suggestion - data. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Resolve some of PVS-Studio warnings. - [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) - ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Update LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Danila - Kutenin](https://github.com/danlark1)) -- Add gperf to build requirements for upcoming pull request \#5030. - [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) - ([proller](https://github.com/proller)) - -## ClickHouse release 19.6 {#clickhouse-release-19-6} - -### ClickHouse release 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} - -#### Bug Fixes {#bug-fixes-3} - -- Fixed IN condition pushdown for queries from table functions `mysql` and `odbc` and corresponding table engines. This fixes \#3540 and \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix deadlock in Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) -- Allow quoted decimals in CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Artem Zuikov](https://github.com/4ertus2) -- Disallow conversion from float Inf/NaN into Decimals (throw exception). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix data race in rename query. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Winter Zhang](https://github.com/zhang2014)) -- Temporarily disable LFAlloc. Usage of LFAlloc might lead to a lot of MAP\_FAILED in allocating UncompressedCache and in a result to crashes of queries at high loaded servers. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Danila Kutenin](https://github.com/danlark1)) - -### ClickHouse release 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} - -#### New Features {#new-features-3} - -- TTL expressions for columns and tables. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Anton Popov](https://github.com/CurtizJ)) -- Added support for `brotli` compression for HTTP responses (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Mikhail](https://github.com/fandyushin)) -- Added new function `isValidUTF8` for checking whether a set of bytes is correctly utf-8 encoded. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Danila Kutenin](https://github.com/danlark1)) -- Add new load balancing policy `first_or_random` which sends queries to the first specified host and if it’s inaccessible send queries to random hosts of shard. Useful for cross-replication topology setups. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) - -#### Experimental Features {#experimental-features-1} - -- Add setting `index_granularity_bytes` (adaptive index granularity) for MergeTree\* tables family. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-1} - -- Added support for non-constant and negative size and length arguments for function `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Disable push-down to right table in left join, left table in right join, and both tables in full join. This fixes wrong JOIN results in some cases. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Ivan](https://github.com/abyss7)) -- `clickhouse-copier`: auto upload task configuration from `--task-file` option [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) -- Added typos handler for storage factory and table functions factory. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Danila Kutenin](https://github.com/danlark1)) -- Support asterisks and qualified asterisks for multiple joins without subqueries [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Artem Zuikov](https://github.com/4ertus2)) -- Make missing column error message more user friendly. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Performance Improvements {#performance-improvements-2} - -- Significant speedup of ASOF JOIN [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Martijn Bakker](https://github.com/Gladdy)) - -#### Backward Incompatible Changes {#backward-incompatible-changes} - -- HTTP header `Query-Id` was renamed to `X-ClickHouse-Query-Id` for consistency. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Mikhail](https://github.com/fandyushin)) - -#### Bug Fixes {#bug-fixes-4} - -- Fixed potential null pointer dereference in `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) -- Fixed error on query with JOIN + ARRAY JOIN [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hanging on start of the server when a dictionary depends on another dictionary via a database with engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Vitaly Baranov](https://github.com/vitlibar)) -- Partially fix distributed\_product\_mode = local. It’s possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There’s not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix potentially wrong result for `SELECT DISTINCT` with `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-2} - -- Fixed test failures when running clickhouse-server on different host [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Vasily Nemkov](https://github.com/Enmk)) -- clickhouse-test: Disable color control sequences in non tty environment. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([alesapin](https://github.com/alesapin)) -- clickhouse-test: Allow use any test database (remove `test.` qualification where it possible) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) -- Fix ubsan errors [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Vitaly Baranov](https://github.com/vitlibar)) -- Yandex LFAlloc was added to ClickHouse to allocate MarkCache and UncompressedCache data in different ways to catch segfaults more reliable [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Danila Kutenin](https://github.com/danlark1)) -- Python util to help with backports and changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Ivan](https://github.com/abyss7)) - -## ClickHouse release 19.5 {#clickhouse-release-19-5} - -### ClickHouse release 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} - -#### Bug fixes {#bug-fixes-5} - -- Fixed possible crash in bitmap\* functions [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Andy Yang](https://github.com/andyyzh)) -- Fixed very rare data race condition that could happen when executing a query with UNION ALL involving at least two SELECTs from system.columns, system.tables, system.parts, system.parts\_tables or tables of Merge family and performing ALTER of columns of the related tables concurrently. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. This error happened if LowCardinality column was the part of primary key. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Modification of retention function: If a row satisfies both the first and NTH condition, only the first satisfied condition is added to the data state. Now all conditions that satisfy in a row of data are added to the data state. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) - -### ClickHouse release 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} - -#### Bug fixes {#bug-fixes-6} - -- Fixed type of setting `max_partitions_per_insert_block` from boolean to UInt64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Mohammad Hossein Sekhavat](https://github.com/mhsekhavat)) - -### ClickHouse release 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} - -#### New Features {#new-features-4} - -- [Hyperscan](https://github.com/intel/hyperscan) multiple regular expression matching was added (functions `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Danila Kutenin](https://github.com/danlark1)) -- `multiSearchFirstPosition` function was added. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) -- Implement the predefined expression filter per row for tables. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Ivan](https://github.com/abyss7)) -- A new type of data skipping indices based on bloom filters (can be used for `equal`, `in` and `like` functions). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added `ASOF JOIN` which allows to run queries that join to the most recent value known. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Martijn Bakker](https://github.com/Gladdy), [Artem Zuikov](https://github.com/4ertus2)) -- Rewrite multiple `COMMA JOIN` to `CROSS JOIN`. Then rewrite them to `INNER JOIN` if possible. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvement {#improvement-9} - -- `topK` and `topKWeighted` now supports custom `loadFactor` (fixes issue [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Kirill Danshin](https://github.com/kirillDanshin)) -- Allow to use `parallel_replicas_count > 1` even for tables without sampling (the setting is simply ignored for them). In previous versions it was lead to exception. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Alexey Elymanov](https://github.com/digitalist)) -- Support for `CREATE OR REPLACE VIEW`. Allow to create a view or set a new definition in a single statement. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Boris Granveaud](https://github.com/bgranvea)) -- `Buffer` table engine now supports `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Yangkuan Liu](https://github.com/LiuYangkuan)) -- Add ability to start replicated table without metadata in zookeeper in `readonly` mode. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([alesapin](https://github.com/alesapin)) -- Fixed flicker of progress bar in clickhouse-client. The issue was most noticeable when using `FORMAT Null` with streaming queries. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to disable functions with `hyperscan` library on per user basis to limit potentially excessive and uncontrolled resource usage. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add version number logging in all errors. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) -- Added restriction to the `multiMatch` functions which requires string size to fit into `unsigned int`. Also added the number of arguments limit to the `multiSearch` functions. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Danila Kutenin](https://github.com/danlark1)) -- Improved usage of scratch space and error handling in Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Danila Kutenin](https://github.com/danlark1)) -- Fill `system.graphite_detentions` from a table config of `*GraphiteMergeTree` engine tables. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Rename `trigramDistance` function to `ngramDistance` and add more functions with `CaseInsensitive` and `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Danila Kutenin](https://github.com/danlark1)) -- Improved data skipping indices calculation. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Nikita Vasilev](https://github.com/nikvas0)) -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -#### Bug Fix {#bug-fix-26} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix crash of `JOIN` on not-nullable vs nullable column. Fix `NULLs` in right keys in `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix incorrect result in `FULL/RIGHT JOIN` with const column. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix duplicates in `GLOBAL JOIN` with asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix wrong name qualification in `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Backward Incompatible Change {#backward-incompatible-change-8} - -- Rename setting `insert_sample_with_metadata` to setting `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Artem Zuikov](https://github.com/4ertus2)) -- Added setting `max_partitions_per_insert_block` (with value 100 by default). If inserted block contains larger number of partitions, an exception is thrown. Set it to 0 if you want to remove the limit (not recommended). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multi-search functions were renamed (`multiPosition` to `multiSearchAllPositions`, `multiSearch` to `multiSearchAny`, `firstMatch` to `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Danila Kutenin](https://github.com/danlark1)) - -#### Performance Improvement {#performance-improvement-6} - -- Optimize Volnitsky searcher by inlining, giving about 5-10% search improvement for queries with many needles or many similar bigrams. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Danila Kutenin](https://github.com/danlark1)) -- Fix performance issue when setting `use_uncompressed_cache` is greater than zero, which appeared when all read data contained in cache. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([alesapin](https://github.com/alesapin)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-10} - -- Hardening debug build: more granular memory mappings and ASLR; add memory protection for mark cache and index. This allows to find more memory stomping bugs in case when ASan and MSan cannot do it. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add support for cmake variables `ENABLE_PROTOBUF`, `ENABLE_PARQUET` and `ENABLE_BROTLI` which allows to enable/disable the above features (same as we can do for librdkafka, mysql, etc). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Silviu Caragea](https://github.com/silviucpp)) -- Add ability to print process list and stacktraces of all threads if some queries are hung after test run. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([alesapin](https://github.com/alesapin)) -- Add retries on `Connection loss` error in `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([alesapin](https://github.com/alesapin)) -- Add freebsd build with vagrant and build with thread sanitizer to packager script. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([alesapin](https://github.com/alesapin)) -- Now user asked for password for user `'default'` during installation. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) -- Suppress warning in `rdkafka` library. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow ability to build without ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Upgrade contrib boost to 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) -- Disable usage of `mremap` when compiled with Thread Sanitizer. Surprisingly enough, TSan does not intercept `mremap` (though it does intercept `mmap`, `munmap`) that leads to false positives. Fixed TSan report in stateful tests. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add test checking using format schema via HTTP interface. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Vitaly Baranov](https://github.com/vitlibar)) - -## ClickHouse release 19.4 {#clickhouse-release-19-4} - -### ClickHouse release 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} - -#### Bug Fixes {#bug-fixes-7} - -- Avoid `std::terminate` in case of memory allocation failure. Now `std::bad_alloc` exception is thrown as expected. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes capnproto reading from buffer. Sometimes files wasn’t loaded successfully by HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Vladislav](https://github.com/smirnov-vs)) -- Fix error `Unknown log entry type: 0` after `OPTIMIZE TABLE FINAL` query. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Amos Bird](https://github.com/amosbird)) -- Wrong arguments to `hasAny` or `hasAll` functions may lead to segfault. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Deadlock may happen while executing `DROP DATABASE dictionary` query. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behavior in `median` and `quantile` functions. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Fix compression level detection when `network_compression_method` in lowercase. Broken in v19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Fixed ignorance of `UTC` setting (fixes issue [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Fix `histogram` function behaviour with `Distributed` tables. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Fixed tsan report `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed TSan report on shutdown due to race condition in system logs usage. Fixed potential use-after-free on shutdown when part\_log is enabled. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix recheck parts in `ReplicatedMergeTreeAlterThread` in case of error. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Arithmetic operations on intermediate aggregate function states were not working for constant arguments (such as subquery results). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Always backquote column names in metadata. Otherwise it’s impossible to create a table with column named `index` (server won’t restart due to malformed `ATTACH` query in metadata). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix crash in `ALTER ... MODIFY ORDER BY` on `Distributed` table. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Fix segfault in `JOIN ON` with enabled `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Winter Zhang](https://github.com/zhang2014)) -- Fix bug with adding an extraneous row after consuming a protobuf message from Kafka. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed race condition in `SELECT` from `system.tables` if the table is renamed or altered concurrently. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed data race when fetching data part that is already obsolete. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare data race that can happen during `RENAME` table of MergeTree family. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segmentation fault in function `arrayIntersect`. Segmentation fault could happen if function was called with mixed constant and ordinary arguments. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Lixiang Qian](https://github.com/fancyqlx)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix `No message received` exception while fetching parts between replicas. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([alesapin](https://github.com/alesapin)) -- Fixed `arrayIntersect` function wrong result in case of several repeated values in single array. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix a race condition during concurrent `ALTER COLUMN` queries that could lead to a server crash (fixes issue [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix parameter deduction in `ALTER MODIFY` of column `CODEC` when column type is not specified. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([alesapin](https://github.com/alesapin)) -- Functions `cutQueryStringAndFragment()` and `queryStringAndFragment()` now works correctly when `URL` contains a fragment and no query. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Vitaly Baranov](https://github.com/vitlibar)) -- Fix rare bug when setting `min_bytes_to_use_direct_io` is greater than zero, which occures when thread have to seek backward in column file. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([alesapin](https://github.com/alesapin)) -- Fix wrong argument types for aggregate functions with `LowCardinality` arguments (fixes issue [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fix function `toISOWeek` result for year 1970. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix `DROP`, `TRUNCATE` and `OPTIMIZE` queries duplication, when executed on `ON CLUSTER` for `ReplicatedMergeTree*` tables family. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([alesapin](https://github.com/alesapin)) - -#### Improvements {#improvements-2} - -- Keep ordinary, `DEFAULT`, `MATERIALIZED` and `ALIAS` columns in a single list (fixes issue [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Alex Zatelepin](https://github.com/ztlpn)) - -### ClickHouse release 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} - -#### Bug Fixes {#bug-fixes-8} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-11} - -- Add a way to launch clickhouse-server image from a custom user. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} - -#### Bug Fixes {#bug-fixes-9} - -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -### ClickHouse release 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} - -#### Bug Fixes {#bug-fixes-10} - -- Fixed remote queries which contain both `LIMIT BY` and `LIMIT`. Previously, if `LIMIT BY` and `LIMIT` were used for remote query, `LIMIT` could happen before `LIMIT BY`, which led to too filtered result. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Constantin S. Pan](https://github.com/kvap)) - -### ClickHouse release 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} - -#### New Features {#new-features-5} - -- Added full support for `Protobuf` format (input and output, nested data structures). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added bitmap functions with Roaring Bitmaps. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Andy Yang](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Vitaly Baranov](https://github.com/vitlibar)) -- Parquet format support. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) -- N-gram distance was added for fuzzy string comparison. It is similar to q-gram metrics in R language. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Danila Kutenin](https://github.com/danlark1)) -- Combine rules for graphite rollup from dedicated aggregation and retention patterns. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Added `max_execution_speed` and `max_execution_speed_bytes` to limit resource usage. Added `min_execution_speed_bytes` setting to complement the `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Winter Zhang](https://github.com/zhang2014)) -- Implemented function `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([alexey-milovidov](https://github.com/alexey-milovidov), [kzon](https://github.com/kzon)) -- Added functions `arrayEnumerateDenseRanked` and `arrayEnumerateUniqRanked` (it’s like `arrayEnumerateUniq` but allows to fine tune array depth to look inside multidimensional arrays). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-11} - -- This release also contains all bug fixes from 19.3 and 19.1. -- Fixed bug in data skipping indices: order of granules after INSERT was incorrect. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Nikita Vasilev](https://github.com/nikvas0)) -- Fixed `set` index for `Nullable` and `LowCardinality` columns. Before it, `set` index with `Nullable` or `LowCardinality` column led to error `Data type must be deserialized with multiple streams` while selecting. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Correctly set update\_time on full `executable` dictionary update. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Tema Novikov](https://github.com/temoon)) -- Fix broken progress bar in 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([filimonov](https://github.com/filimonov)) -- Fixed inconsistent values of MemoryTracker when memory region was shrinked, in certain cases. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed undefined behaviour in ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a very rare crash with the message `mutex lock failed: Invalid argument` that could happen when a MergeTree table was dropped concurrently with a SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Alex Zatelepin](https://github.com/ztlpn)) -- ODBC driver compatibility with `LowCardinality` data type. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) -- FreeBSD: Fixup for `AIOcontextPool: Found io_event with unknown id 0` error. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `system.part_log` table was created regardless to configuration. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix undefined behaviour in `dictIsIn` function for cache dictionaries. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([alesapin](https://github.com/alesapin)) -- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Alex Zatelepin](https://github.com/ztlpn)) -- Disable compile\_expressions by default until we get own `llvm` contrib and can test it with `clang` and `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([alesapin](https://github.com/alesapin)) -- Prevent `std::terminate` when `invalidate_query` for `clickhouse` external dictionary source has returned wrong resultset (empty or more than one row or more than one column). Fixed issue when the `invalidate_query` was performed every five seconds regardless to the `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Avoid deadlock when the `invalidate_query` for a dictionary with `clickhouse` source was involving `system.dictionaries` table or `Dictionaries` database (rare case). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixes for CROSS JOIN with empty WHERE. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segfault in function “replicate” when constant argument is passed. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix lambda function with predicate optimizer. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Winter Zhang](https://github.com/zhang2014)) -- Multiple JOINs multiple fixes. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Improvements {#improvements-3} - -- Support aliases in JOIN ON section for right table columns. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Artem Zuikov](https://github.com/4ertus2)) -- Result of multiple JOINs need correct result names to be used in subselects. Replace flat aliases with source names in result. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Artem Zuikov](https://github.com/4ertus2)) -- Improve push-down logic for joined statements. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Ivan](https://github.com/abyss7)) - -#### Performance Improvements {#performance-improvements-3} - -- Improved heuristics of “move to PREWHERE” optimization. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Use proper lookup tables that uses HashTable’s API for 8-bit and 16-bit keys. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Amos Bird](https://github.com/amosbird)) -- Improved performance of string comparison. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Cleanup distributed DDL queue in a separate thread so that it doesn’t slow down the main loop that processes distributed DDL tasks. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Alex Zatelepin](https://github.com/ztlpn)) -- When `min_bytes_to_use_direct_io` is set to 1, not every file was opened with O\_DIRECT mode because the data size to read was sometimes underestimated by the size of one compressed block. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-12} - -- Added support for clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix wrong `__asm__` instructions (again) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Konstantin Podshumok](https://github.com/podshumok)) -- Add ability to specify settings for `clickhouse-performance-test` from command line. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([alesapin](https://github.com/alesapin)) -- Add dictionaries tests to integration tests. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([alesapin](https://github.com/alesapin)) -- Added queries from the benchmark on the website to automated performance tests. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `xxhash.h` does not exist in external lz4 because it is an implementation detail and its symbols are namespaced with `XXH_NAMESPACE` macro. When lz4 is external, xxHash has to be external too, and the dependents have to link to it. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Orivej Desh](https://github.com/orivej)) -- Fixed a case when `quantileTiming` aggregate function can be called with negative or floating point argument (this fixes fuzz test with undefined behaviour sanitizer). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Spelling error correction. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([sdk2](https://github.com/sdk2)) -- Fix compilation on Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Vitaly Baranov](https://github.com/vitlibar)) -- Build fixes for FreeBSD and various unusual build configurations. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) - -## ClickHouse release 19.3 {#clickhouse-release-19-3} - -### ClickHouse release 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} - -#### Bug Fixes {#bug-fixes-12} - -- Fix crash in `FULL/RIGHT JOIN` when we joining on nullable vs not nullable. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Artem Zuikov](https://github.com/4ertus2)) -- Fix segmentation fault in `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Fixed reading from `Array(LowCardinality)` column in rare case when column contained a long sequence of empty arrays. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) - -#### Build/Testing/Packaging Improvement {#buildtestingpackaging-improvement-13} - -- Add a way to launch clickhouse-server image from a custom user [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse release 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} - -#### Bug fixes {#bug-fixes-13} - -- Fixed error in \#3920. This error manifests itself as random cache corruption (messages `Unknown codec family code`, `Cannot seek through file`) and segfaults. This bug first appeared in version 19.1 and is present in versions up to 19.1.10 and 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} - -#### Bug fixes {#bug-fixes-14} - -- When there are more than 1000 threads in a thread pool, `std::terminate` may happen on thread exit. [Azat Khuzhin](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Now it’s possible to create `ReplicatedMergeTree*` tables with comments on columns without defaults and tables with columns codecs without comments and defaults. Also fix comparison of codecs. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([alesapin](https://github.com/alesapin)) -- Fixed crash on JOIN with array or tuple. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed crash in clickhouse-copier with the message `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed hangup on server shutdown if distributed DDLs were used. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Alex Zatelepin](https://github.com/ztlpn)) -- Incorrect column numbers were printed in error message about text format parsing for columns with number greater than 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-3} - -- Fixed build with AVX enabled. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Enable extended accounting and IO accounting based on good known version instead of kernel under which it is compiled. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) -- Allow to skip setting of core\_dump.size\_limit, warning instead of throw if limit set fail. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) -- Removed the `inline` tags of `void readBinary(...)` in `Field.cpp`. Also merged redundant `namespace DB` blocks. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) - -### ClickHouse release 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} - -#### Bug fixes {#bug-fixes-15} - -- Fixed bug with large http insert queries processing. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([alesapin](https://github.com/alesapin)) -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} - -#### Improvements {#improvements-4} - -- Table index size is not accounted for memory limits when doing `ATTACH TABLE` query. Avoided the possibility that a table cannot be attached after being detached. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Slightly raised up the limit on max string and array size received from ZooKeeper. It allows to continue to work with increased size of `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` on ZooKeeper. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Allow to repair abandoned replica even if it already has huge number of nodes in its queue. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Add one required argument to `SET` index (max stored rows number). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) - -#### Bug Fixes {#bug-fixes-16} - -- Fixed `WITH ROLLUP` result for group by single `LowCardinality` key. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Fixed bug in the set index (dropping a granule if it contains more than `max_rows` rows). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Nikita Vasilev](https://github.com/nikvas0)) -- A lot of FreeBSD build fixes. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) -- Fixed aliases substitution in queries with subquery containing same alias (issue [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-4} - -- Add ability to run `clickhouse-server` for stateless tests in docker image. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Vasily Nemkov](https://github.com/Enmk)) - -### ClickHouse release 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} - -#### New Features {#new-features-6} - -- Added the `KILL MUTATION` statement that allows removing mutations that are for some reasons stuck. Added `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` fields to the `system.mutations` table for easier troubleshooting. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added aggregate function `entropy` which computes Shannon entropy. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) -- Added ability to send queries `INSERT INTO tbl VALUES (....` to server without splitting on `query` and `data` parts. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([alesapin](https://github.com/alesapin)) -- Generic implementation of `arrayWithConstant` function was added. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Implemented `NOT BETWEEN` comparison operator. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Dmitry Naumov](https://github.com/nezed)) -- Implement `sumMapFiltered` in order to be able to limit the number of keys for which values will be summed by `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Added support of `Nullable` types in `mysql` table function. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) -- Support for arbitrary constant expressions in `LIMIT` clause. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) -- Added `topKWeighted` aggregate function that takes additional argument with (unsigned integer) weight. [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Andrew Golman](https://github.com/andrewgolman)) -- `StorageJoin` now supports `join_any_take_last_row` setting that allows overwriting existing values of the same key. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Amos Bird](https://github.com/amosbird) -- Added function `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `RowBinaryWithNamesAndTypes` format. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Oleg V. Kozlyuk](https://github.com/DarkWanderer)) -- Added `IPv4` and `IPv6` data types. More effective implementations of `IPv*` functions. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Vasily Nemkov](https://github.com/Enmk)) -- Added function `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added `Protobuf` output format. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Vitaly Baranov](https://github.com/vitlibar)) -- Added brotli support for HTTP interface for data import (INSERTs). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Mikhail](https://github.com/fandyushin)) -- Added hints while user make typo in function name or type in command line client. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Danila Kutenin](https://github.com/danlark1)) -- Added `Query-Id` to Server’s HTTP Response header. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Mikhail](https://github.com/fandyushin)) - -#### Experimental features {#experimental-features-2} - -- Added `minmax` and `set` data skipping indices for MergeTree table engines family. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- Added conversion of `CROSS JOIN` to `INNER JOIN` if possible. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-17} - -- Fixed `Not found column` for duplicate columns in `JOIN ON` section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. This bug was appeared in 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed segmentation fault with `use_uncompressed_cache=1` and exception with wrong uncompressed size. This bug was appeared in 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed `Illegal instruction` error when using base64 functions on old CPUs. This error has been reproduced only when ClickHouse was compiled with gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- ClickHouse dictionaries now load within `clickhouse` process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Added `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Fixed segfault with `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed rare race condition when removing of old data parts can fail with `File not found` error. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-5} - -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Various build fixes for FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) -- Added ability to create, fill and drop tables in perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([alesapin](https://github.com/alesapin)) -- Added a script to check for duplicate includes. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added ability to run queries by index in performance test. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([alesapin](https://github.com/alesapin)) -- Package with debug symbols is suggested to be installed. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Refactoring of performance-test. Better logging and signals handling. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([alesapin](https://github.com/alesapin)) -- Added docs to anonymized Yandex.Metrika datasets. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([alesapin](https://github.com/alesapin)) -- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Alex Zatelepin](https://github.com/ztlpn)) -- Added docs about two datasets in s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([alesapin](https://github.com/alesapin)) -- Added script which creates changelog from pull requests description. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([KochetovNicolai](https://github.com/KochetovNicolai)) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added puppet module for Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Maxim Fedotov](https://github.com/MaxFedotov)) -- Added docs for a group of undocumented functions. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Winter Zhang](https://github.com/zhang2014)) -- ARM build fixes. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) -- Dictionary tests now able to run from `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added checking SSE and AVX instruction at start. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Igr](https://github.com/igron99)) -- Init script will wait server until start. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-1} - -- Removed `allow_experimental_low_cardinality_type` setting. `LowCardinality` data types are production ready. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Reduce mark cache size and uncompressed cache size accordingly to available memory amount. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Lopatin Konstantin](https://github.com/k-lopatin) -- Added keyword `INDEX` in `CREATE TABLE` query. A column with name `index` must be quoted with backticks or double quotes: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Nikita Vasilev](https://github.com/nikvas0)) -- `sumMap` now promote result type instead of overflow. The old `sumMap` behavior can be obtained by using `sumMapWithOverflow` function. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - -#### Performance Improvements {#performance-improvements-4} - -- `std::sort` replaced by `pdqsort` for queries without `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Evgenii Pravda](https://github.com/kvinty)) -- Now server reuse threads from global thread pool. This affects performance in some corner cases. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-5} - -- Implemented AIO support for FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `SELECT * FROM a JOIN b USING a, b` now return `a` and `b` columns only from the left table. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Artem Zuikov](https://github.com/4ertus2)) -- Allow `-C` option of client to work as `-c` option. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([syominsergey](https://github.com/syominsergey)) -- Now option `--password` used without value requires password from stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) -- Added highlighting of unescaped metacharacters in string literals that contain `LIKE` expressions or regexps. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added cancelling of HTTP read only queries if client socket goes away. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) -- Now server reports progress to keep client connections alive. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Ivan](https://github.com/abyss7)) -- Slightly better message with reason for OPTIMIZE query with `optimize_throw_if_noop` setting enabled. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added support of `--version` option for clickhouse server. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Lopatin Konstantin](https://github.com/k-lopatin)) -- Added `--help/-h` option to `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Yuriy Baranov](https://github.com/yurriy)) -- Added support for scalar subqueries with aggregate function state result. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Improved server shutdown time and ALTERs waiting time. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added info about the replicated\_can\_become\_leader setting to system.replicas and add logging if the replica won’t try to become leader. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Alex Zatelepin](https://github.com/ztlpn)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1} - -### ClickHouse release 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} - -- Fixed error `Column ... queried more than once` that may happen if the setting `asterisk_left_columns_only` is set to 1 in case of using `GLOBAL JOIN` with `SELECT *` (rare case). The issue does not exist in 19.3 and newer. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Artem Zuikov](https://github.com/4ertus2)) - -### ClickHouse release 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} - -This release contains exactly the same set of patches as 19.3.7. - -### ClickHouse release 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} - -This release contains exactly the same set of patches as 19.3.6. - -## ClickHouse release 19.1 {#clickhouse-release-19-1-1} - -### ClickHouse release 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} - -#### Bug fixes {#bug-fixes-18} - -- Fixed backward incompatibility with old versions due to wrong implementation of `send_logs_level` setting. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed backward incompatibility of table function `remote` introduced with column comments. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} - -#### Bug Fixes {#bug-fixes-19} - -- Fix install package with missing /etc/clickhouse-server/config.xml. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -## ClickHouse release 19.1 {#clickhouse-release-19-1-2} - -### ClickHouse release 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} - -#### Bug Fixes {#bug-fixes-20} - -- Correctly return the right type and properly handle locks in `joinGet` function. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Amos Bird](https://github.com/amosbird)) -- Fixed error when system logs are tried to create again at server shutdown. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: if there is a database with `Dictionary` engine, all dictionaries forced to load at server startup, and if there is a dictionary with ClickHouse source from localhost, the dictionary cannot load. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug when the execution of mutations containing `IN` operators was producing incorrect results. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Alex Zatelepin](https://github.com/ztlpn)) -- `clickhouse-client` can segfault on exit while loading data for command line suggestions if it was run in interactive mode. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed race condition when selecting from `system.tables` may give `table doesn't exist` error. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed deadlock when `SELECT` from a table with `File` engine was retried after `No such file or directory` error. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed an issue: local ClickHouse dictionaries are loaded via TCP, but should load within process. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `No message received` error when interacting with PostgreSQL ODBC Driver through TLS connection. Also fixes segfault when using MySQL ODBC Driver. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Temporarily disable predicate optimization for `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed infinite loop when selecting from table function `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed `compile_expressions` bug with comparison of big (more than int16) dates. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([alesapin](https://github.com/alesapin)) -- Fixed segmentation fault with `uncompressed_cache=1` and exception with wrong uncompressed size. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([alesapin](https://github.com/alesapin)) -- Fixed `ALL JOIN` with duplicates in right table. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Artem Zuikov](https://github.com/4ertus2)) -- Fixed wrong behaviour when doing `INSERT ... SELECT ... FROM file(...)` query and file has `CSVWithNames` or `TSVWIthNames` format and the first data row is missing. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed aggregate functions execution with `Array(LowCardinality)` arguments. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Debian package: correct /etc/clickhouse-server/preprocessed link according to config. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Fixed fuzz test under undefined behavior sanitizer: added parameter type check for `quantile*Weighted` family of functions. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Make `START REPLICATED SENDS` command start replicated sends. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Fixed `Not found column` for duplicate columns in JOIN ON section. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Artem Zuikov](https://github.com/4ertus2)) -- Now `/etc/ssl` is used as default directory with SSL certificates. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed crash on dictionary reload if dictionary not available. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Fixed bug with incorrect `Date` and `DateTime` comparison. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Fixed incorrect result when `Date` and `DateTime` arguments are used in branches of conditional operator (function `if`). Added generic case for function `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -### ClickHouse release 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} - -#### New Features {#new-features-7} - -- Custom per column compression codecs for tables. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([alesapin](https://github.com/alesapin), [Winter Zhang](https://github.com/zhang2014), [Anatoly](https://github.com/Sindbag)) -- Added compression codec `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([alesapin](https://github.com/alesapin)) -- Allow to `ALTER` compression codecs. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([alesapin](https://github.com/alesapin)) -- Added functions `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` for SQL standard compatibility. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Ivan Blinkov](https://github.com/blinkov)) -- Support for write in `HDFS` tables and `hdfs` table function. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([alesapin](https://github.com/alesapin)) -- Added functions to search for multiple constant strings from big haystack: `multiPosition`, `multiSearch` ,`firstMatch` also with `-UTF8`, `-CaseInsensitive`, and `-CaseInsensitiveUTF8` variants. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Danila Kutenin](https://github.com/danlark1)) -- Pruning of unused shards if `SELECT` query filters by sharding key (setting `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Gleb Kanterov](https://github.com/kanterov), [Ivan](https://github.com/abyss7)) -- Allow `Kafka` engine to ignore some number of parsing errors per block. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Ivan](https://github.com/abyss7)) -- Added support for `CatBoost` multiclass models evaluation. Function `modelEvaluate` returns tuple with per-class raw predictions for multiclass models. `libcatboostmodel.so` should be built with [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Added functions `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Boris Granveaud](https://github.com/bgranvea)) -- Added hashing functions `xxHash64` and `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([filimonov](https://github.com/filimonov)) -- Added `gccMurmurHash` hashing function (GCC flavoured Murmur hash) which uses the same hash seed as [gcc](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([sundyli](https://github.com/sundy-li)) -- Added hashing functions `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) -- Added table function `remoteSecure`. Function works as `remote`, but uses secure connection. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) - -#### Experimental features {#experimental-features-3} - -- Added multiple JOINs emulation (`allow_experimental_multiple_joins_emulation` setting). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Artem Zuikov](https://github.com/4ertus2)) - -#### Bug Fixes {#bug-fixes-21} - -- Make `compiled_expression_cache_size` setting limited by default to lower memory consumption. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([alesapin](https://github.com/alesapin)) -- Fix a bug that led to hangups in threads that perform ALTERs of Replicated tables and in the thread that updates configuration from ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fixed a race condition when executing a distributed ALTER task. The race condition led to more than one replica trying to execute the task and all replicas except one failing with a ZooKeeper error. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix a bug when `from_zk` config elements weren’t refreshed after a request to ZooKeeper timed out. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with wrong prefix for IPv4 subnet masks. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([alesapin](https://github.com/alesapin)) -- Fixed crash (`std::terminate`) in rare cases when a new thread cannot be created due to exhausted resources. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix bug when in `remote` table function execution when wrong restrictions were used for in `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([alesapin](https://github.com/alesapin)) -- Fix a leak of netlink sockets. They were placed in a pool where they were never deleted and new sockets were created at the start of a new thread when all current sockets were in use. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Alex Zatelepin](https://github.com/ztlpn)) -- Fix bug with closing `/proc/self/fd` directory earlier than all fds were read from `/proc` after forking `odbc-bridge` subprocess. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([alesapin](https://github.com/alesapin)) -- Fixed String to UInt monotonic conversion in case of usage String in primary key. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in calculation of integer conversion function monotonicity. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed segfault in `arrayEnumerateUniq`, `arrayEnumerateDense` functions in case of some invalid arguments. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fix UB in StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Amos Bird](https://github.com/amosbird)) -- Fixed segfault in functions `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error: functions `round`, `floor`, `trunc`, `ceil` may return bogus result when executed on integer argument and large negative scale. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed a bug induced by ‘kill query sync’ which leads to a core dump. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([muVulDeePecker](https://github.com/fancyqlx)) -- Fix bug with long delay after empty replication queue. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([alesapin](https://github.com/alesapin)) -- Fixed excessive memory usage in case of inserting into table with `LowCardinality` primary key. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed `LowCardinality` serialization for `Native` format in case of empty arrays. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed incorrect result while using distinct by single LowCardinality numeric column. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fixed specialized aggregation with LowCardinality key (in case when `compile` setting is enabled). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Fix user and password forwarding for replicated tables queries. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([alesapin](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) -- Fixed very rare race condition that can happen when listing tables in Dictionary database while reloading dictionaries. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed incorrect result when HAVING was used with ROLLUP or CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Sam Chou](https://github.com/reflection)) -- Fixed column aliases for query with `JOIN ON` syntax and distributed tables. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Winter Zhang](https://github.com/zhang2014)) -- Fixed error in internal implementation of `quantileTDigest` (found by Artem Vakhrushev). This error never happens in ClickHouse and was relevant only for those who use ClickHouse codebase as a library directly. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Improvements {#improvements-6} - -- Support for `IF NOT EXISTS` in `ALTER TABLE ADD COLUMN` statements along with `IF EXISTS` in `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Boris Granveaud](https://github.com/bgranvea)) -- Function `parseDateTimeBestEffort`: support for formats `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` and similar. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- `CapnProtoInputStream` now support jagged structures. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Odin Hultgren Van Der Horst](https://github.com/Miniwoffer)) -- Usability improvement: added a check that server process is started from the data directory’s owner. Do not allow to start server from root if the data belongs to non-root user. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([sergey-v-galtsev](https://github.com/sergey-v-galtsev)) -- Better logic of checking required columns during analysis of queries with JOINs. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Artem Zuikov](https://github.com/4ertus2)) -- Decreased the number of connections in case of large number of Distributed tables in a single server. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Winter Zhang](https://github.com/zhang2014)) -- Supported totals row for `WITH TOTALS` query for ODBC driver. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Maksim Koritckiy](https://github.com/nightweb)) -- Allowed to use `Enum`s as integers inside if function. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Ivan](https://github.com/abyss7)) -- Added `low_cardinality_allow_in_native_format` setting. If disabled, do not use `LowCadrinality` type in `Native` format. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([KochetovNicolai](https://github.com/KochetovNicolai)) -- Removed some redundant objects from compiled expressions cache to lower memory usage. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([alesapin](https://github.com/alesapin)) -- Add check that `SET send_logs_level = 'value'` query accept appropriate value. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Sabyanin Maxim](https://github.com/s-mx)) -- Fixed data type check in type conversion functions. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Winter Zhang](https://github.com/zhang2014)) - -#### Performance Improvements {#performance-improvements-5} - -- Add a MergeTree setting `use_minimalistic_part_header_in_zookeeper`. If enabled, Replicated tables will store compact part metadata in a single part znode. This can dramatically reduce ZooKeeper snapshot size (especially if the tables have a lot of columns). Note that after enabling this setting you will not be able to downgrade to a version that doesn’t support it. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Alex Zatelepin](https://github.com/ztlpn)) -- Add an DFA-based implementation for functions `sequenceMatch` and `sequenceCount` in case pattern doesn’t contain time. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Performance improvement for integer numbers serialization. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Amos Bird](https://github.com/amosbird)) -- Zero left padding PODArray so that -1 element is always valid and zeroed. It’s used for branchless calculation of offsets. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Amos Bird](https://github.com/amosbird)) -- Reverted `jemalloc` version which lead to performance degradation. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([alexey-milovidov](https://github.com/alexey-milovidov)) - -#### Backward Incompatible Changes {#backward-incompatible-changes-2} - -- Removed undocumented feature `ALTER MODIFY PRIMARY KEY` because it was superseded by the `ALTER MODIFY ORDER BY` command. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Alex Zatelepin](https://github.com/ztlpn)) -- Removed function `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Forbid using scalar subqueries with result of type `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Ivan](https://github.com/abyss7)) - -#### Build/Testing/Packaging Improvements {#buildtestingpackaging-improvements-6} - -- Added support for PowerPC (`ppc64le`) build. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Danila Kutenin](https://github.com/danlark1)) -- Stateful functional tests are run on public available dataset. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed error when the server cannot start with the `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` message within Docker or systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Updated `rdkafka` library to v1.0.0-RC5. Used cppkafka instead of raw C interface. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Ivan](https://github.com/abyss7)) -- Updated `mariadb-client` library. Fixed one of issues found by UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Some fixes for UBSan builds. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added per-commit runs of tests with UBSan build. -- Added per-commit runs of PVS-Studio static analyzer. -- Fixed bugs found by PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed glibc compatibility issues. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Move Docker images to 18.10 and add compatibility file for glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([alesapin](https://github.com/alesapin)) -- Add env variable if user don’t want to chown directories in server Docker image. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([alesapin](https://github.com/alesapin)) -- Enabled most of the warnings from `-Weverything` in clang. Enabled `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Added a few more warnings that are available only in clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Link to `libLLVM` rather than to individual LLVM libs when using shared linking. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Orivej Desh](https://github.com/orivej)) -- Added sanitizer variables for test images. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([alesapin](https://github.com/alesapin)) -- `clickhouse-server` debian package will recommend `libcap2-bin` package to use `setcap` tool for setting capabilities. This is optional. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Improved compilation time, fixed includes. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) -- Added performance tests for hash functions. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([filimonov](https://github.com/filimonov)) -- Fixed cyclic library dependences. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) -- Improved compilation with low available memory. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) -- Added test script to reproduce performance degradation in `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([alexey-milovidov](https://github.com/alexey-milovidov)) -- Fixed misspells in comments and string literals under `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([maiha](https://github.com/maiha)) -- Fixed typos in comments. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Evgenii Pravda](https://github.com/kvinty)) - -## [Changelog for 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) diff --git a/docs/ru/changelog/index.md b/docs/ru/changelog/index.md deleted file mode 120000 index 79b747aee1b..00000000000 --- a/docs/ru/changelog/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../CHANGELOG.md \ No newline at end of file diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index f096bdb92cf..9716f4f1cd2 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -1,20 +1,16 @@ ---- -en_copy: true ---- +# Поставщики облачных услуг ClickHouse {#clickhouse-cloud-service-providers} -# ClickHouse Cloud Service Providers {#clickhouse-cloud-service-providers} - -!!! info "Info" - If you have launched a public cloud with managed ClickHouse service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) adding it to the following list. +!!! info "Инфо" + Если вы запустили публичный облачный сервис с управляемым ClickHouse, не стесняйтесь [открыть pull request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/cloud.md) c добавлением его в последующий список. ## Yandex Cloud {#yandex-cloud} -[Yandex Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) provides the following key features: +[Yandex Managed Service for ClickHouse](https://cloud.yandex.ru/services/managed-clickhouse?utm_source=referrals&utm_medium=clickhouseofficialsite&utm_campaign=link3) предоставляет следующие ключевые возможности: -- Fully managed ZooKeeper service for [ClickHouse replication](../operations/table_engines/replication.md) -- Multiple storage type choices -- Replicas in different availability zones -- Encryption and isolation -- Automated maintenance +- Полностью управляемый сервис ZooKeeper для [репликации ClickHouse](../engines/table_engines/mergetree_family/replication.md) +- Выбор типа хранилища +- Реплики в разных зонах доступности +- Шифрование и изоляция +- Автоматизированное техническое обслуживание -{## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##} +{## [Оригинальная статья](https://clickhouse.tech/docs/ru/commercial/cloud/) ##} diff --git a/docs/ru/commercial/index.md b/docs/ru/commercial/index.md new file mode 100644 index 00000000000..f9065c7cd50 --- /dev/null +++ b/docs/ru/commercial/index.md @@ -0,0 +1,7 @@ +--- +toc_folder_title: Commercial +toc_priority: 70 +toc_title: Commercial +--- + + diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index dc92d425d37..f5f57179ece 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -1,200 +1,201 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# Overview of ClickHouse Architecture {#overview-of-clickhouse-architecture} +# Обзор архитектуры ClickHouse {#overview-of-clickhouse-architecture} -ClickHouse is a true column-oriented DBMS. Data is stored by columns and during the execution of arrays (vectors or chunks of columns). Whenever possible, operations are dispatched on arrays, rather than on individual values. It is called “vectorized query execution,” and it helps lower the cost of actual data processing. +ClickHouse-это настоящая СУБД, ориентированная на столбцы. Данные хранятся столбцами и во время выполнения массивов (векторов или кусков столбцов). Когда это возможно, операции отправляются на массивы, а не на отдельные значения. Это называется «vectorized query execution,» и это помогает снизить стоимость фактической обработки данных. -> This idea is nothing new. It dates back to the `APL` programming language and its descendants: `A +`, `J`, `K`, and `Q`. Array programming is used in scientific data processing. Neither is this idea something new in relational databases: for example, it is used in the `Vectorwise` system. +> В этой идее нет ничего нового. Она восходит к тому времени, когда `APL` язык программирования и его потомки: `A +`, `J`, `K`, и `Q`. Массивное программирование используется в научной обработке данных. Эта идея также не является чем-то новым в реляционных базах данных: например, она используется в `Vectorwise` система. -There are two different approaches for speeding up query processing: vectorized query execution and runtime code generation. The latter removes all indirection and dynamic dispatch. Neither of these approaches is strictly better than the other. Runtime code generation can be better when it fuses many operations, thus fully utilizing CPU execution units and the pipeline. Vectorized query execution can be less practical because it involves temporary vectors that must be written to the cache and read back. If the temporary data does not fit in the L2 cache, this becomes an issue. But vectorized query execution more easily utilizes the SIMD capabilities of the CPU. A [research paper](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) written by our friends shows that it is better to combine both approaches. ClickHouse uses vectorized query execution and has limited initial support for runtime code generation. +Существует два различных подхода для ускорения обработки запросов: векторизованное выполнение запросов и генерация кода во время выполнения. Последнее устраняет все косвенные действия и динамическую диспетчеризацию. Ни один из этих подходов не является строго лучшим, чем другой. Генерация кода во время выполнения может быть лучше, когда он объединяет множество операций, таким образом полностью используя исполнительные блоки процессора и конвейер. Векторизованное выполнение запроса может быть менее практичным, поскольку оно включает временные векторы, которые должны быть записаны в кэш и считаны обратно. Если временные данные не помещаются в кэш L2, это становится проблемой. Но векторизованное выполнение запросов более легко использует возможности SIMD центрального процессора. Один [научная статья](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) написанное нашими друзьями показывает, что лучше сочетать оба подхода. ClickHouse использует векторизованное выполнение запросов и имеет ограниченную начальную поддержку для генерации кода во время выполнения. -## Columns {#columns} +## Столбцы {#columns} -`IColumn` interface is used to represent columns in memory (actually, chunks of columns). This interface provides helper methods for the implementation of various relational operators. Almost all operations are immutable: they do not modify the original column, but create a new modified one. For example, the `IColumn :: filter` method accepts a filter byte mask. It is used for the `WHERE` and `HAVING` relational operators. Additional examples: the `IColumn :: permute` method to support `ORDER BY`, the `IColumn :: cut` method to support `LIMIT`. +`IColumn` интерфейс используется для представления столбцов в памяти (собственно, кусков столбцов). Этот интерфейс предоставляет вспомогательные методы для реализации различных реляционных операторов. Почти все операции неизменяемы: они не изменяют исходный столбец, а создают новый измененный. Например, в `IColumn :: filter` метод принимает маску байта фильтра. Он используется для `WHERE` и `HAVING` реляционный оператор. Дополнительные примеры: `IColumn :: permute` способ поддержки `ORDER BY`, этот `IColumn :: cut` способ поддержки `LIMIT`. -Various `IColumn` implementations (`ColumnUInt8`, `ColumnString`, and so on) are responsible for the memory layout of columns. The memory layout is usually a contiguous array. For the integer type of columns, it is just one contiguous array, like `std :: vector`. For `String` and `Array` columns, it is two vectors: one for all array elements, placed contiguously, and a second one for offsets to the beginning of each array. There is also `ColumnConst` that stores just one value in memory, but looks like a column. +Различный `IColumn` реализации (`ColumnUInt8`, `ColumnString`, и так далее) отвечают за расположение столбцов в памяти. Расположение памяти обычно представляет собой непрерывный массив. Для целочисленного типа столбцов это всего лишь один непрерывный массив, например `std :: vector`. Для `String` и `Array` столбцы, это два вектора: один для всех элементов массива, расположенных последовательно, и второй для смещений к началу каждого массива. Существует также `ColumnConst` это сохраняет только одно значение в памяти, но выглядит как столбец. -## Field {#field} +## Поле {#field} -Nevertheless, it is possible to work with individual values as well. To represent an individual value, the `Field` is used. `Field` is just a discriminated union of `UInt64`, `Int64`, `Float64`, `String` and `Array`. `IColumn` has the `operator[]` method to get the n-th value as a `Field` and the `insert` method to append a `Field` to the end of a column. These methods are not very efficient, because they require dealing with temporary `Field` objects representing an individual value. There are more efficient methods, such as `insertFrom`, `insertRangeFrom`, and so on. +Тем не менее, можно работать и с индивидуальными ценностями. Чтобы представить индивидуальную ценность, то `Field` предназначенный. `Field` это просто дискриминированный Союз `UInt64`, `Int64`, `Float64`, `String` и `Array`. `IColumn` имеет `operator[]` метод получения n-го значения в виде a `Field` и `insert` способ, чтобы добавить `Field` до самого конца колонны. Эти методы не очень эффективны, потому что они требуют решения временных проблем `Field` объекты, представляющие индивидуальную ценность. Существуют и более эффективные методы, такие как `insertFrom`, `insertRangeFrom` и так далее. -`Field` doesn’t have enough information about a specific data type for a table. For example, `UInt8`, `UInt16`, `UInt32`, and `UInt64` are all represented as `UInt64` in a `Field`. +`Field` у него нет достаточной информации о конкретном типе данных для таблицы. Например, `UInt8`, `UInt16`, `UInt32`, и `UInt64` все они представлены в виде `UInt64` в `Field`. -## Leaky Abstractions {#leaky-abstractions} +## Дырявые абстракции {#leaky-abstractions} -`IColumn` has methods for common relational transformations of data, but they don’t meet all needs. For example, `ColumnUInt64` doesn’t have a method to calculate the sum of two columns, and `ColumnString` doesn’t have a method to run a substring search. These countless routines are implemented outside of `IColumn`. +`IColumn` есть методы для общих реляционных преобразований данных, но они не удовлетворяют всем потребностям. Например, `ColumnUInt64` не имеет метода для вычисления суммы двух столбцов, и `ColumnString` у него нет метода для запуска поиска по подстрокам. Эти бесчисленные процедуры реализуются за пределами `IColumn`. -Various functions on columns can be implemented in a generic, non-efficient way using `IColumn` methods to extract `Field` values, or in a specialized way using knowledge of inner memory layout of data in a specific `IColumn` implementation. It is implemented by casting functions to a specific `IColumn` type and deal with internal representation directly. For example, `ColumnUInt64` has the `getData` method that returns a reference to an internal array, then a separate routine reads or fills that array directly. We have “leaky abstractions” to allow efficient specializations of various routines. +Различные функции на столбцах могут быть реализованы общим, неэффективным способом с использованием `IColumn` способы извлечения `Field` значения, или специализированным способом, использующим знание внутренней компоновки памяти данных в определенном месте. `IColumn` реализация. Он реализуется путем приведения функций к определенному виду `IColumn` тип и дело с внутренним представлением непосредственно. Например, `ColumnUInt64` имеет `getData` метод, который возвращает ссылку на внутренний массив, а затем отдельная процедура считывает или заполняет этот массив непосредственно. У нас есть «leaky abstractions» чтобы обеспечить эффективную специализацию различных процедур. -## Data Types {#data_types} +## Тип данных {#data_types} -`IDataType` is responsible for serialization and deserialization: for reading and writing chunks of columns or individual values in binary or text form. `IDataType` directly corresponds to data types in tables. For example, there are `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` and so on. +`IDataType` отвечает за сериализацию и десериализацию: чтение и запись фрагментов столбцов или отдельных значений в двоичной или текстовой форме. `IDataType` непосредственно соответствует типам данных в таблицах. Например, существуют `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` и так далее. -`IDataType` and `IColumn` are only loosely related to each other. Different data types can be represented in memory by the same `IColumn` implementations. For example, `DataTypeUInt32` and `DataTypeDateTime` are both represented by `ColumnUInt32` or `ColumnConstUInt32`. In addition, the same data type can be represented by different `IColumn` implementations. For example, `DataTypeUInt8` can be represented by `ColumnUInt8` or `ColumnConstUInt8`. +`IDataType` и `IColumn` они лишь слабо связаны друг с другом. Различные типы данных могут быть представлены в памяти одним и тем же именем `IColumn` реализации. Например, `DataTypeUInt32` и `DataTypeDateTime` оба они представлены следующим образом `ColumnUInt32` или `ColumnConstUInt32`. Кроме того, один и тот же тип данных может быть представлен разными `IColumn` реализации. Например, `DataTypeUInt8` может быть представлен следующим образом `ColumnUInt8` или `ColumnConstUInt8`. -`IDataType` only stores metadata. For instance, `DataTypeUInt8` doesn’t store anything at all (except vptr) and `DataTypeFixedString` stores just `N` (the size of fixed-size strings). +`IDataType` хранит только метаданные. Например, `DataTypeUInt8` не хранит вообще ничего (кроме vptr) и `DataTypeFixedString` магазины просто `N` (размер строк фиксированного размера). -`IDataType` has helper methods for various data formats. Examples are methods to serialize a value with possible quoting, to serialize a value for JSON, and to serialize a value as part of the XML format. There is no direct correspondence to data formats. For example, the different data formats `Pretty` and `TabSeparated` can use the same `serializeTextEscaped` helper method from the `IDataType` interface. +`IDataType` имеет вспомогательные методы для различных форматов данных. Примерами являются методы сериализации значения с возможным цитированием, сериализации значения для JSON и сериализации значения в формате XML. Прямого соответствия форматам данных не существует. Например, различные форматы данных `Pretty` и `TabSeparated` можно использовать то же самое `serializeTextEscaped` вспомогательный метод от `IDataType` интерфейс. -## Block {#block} +## Блок {#block} -A `Block` is a container that represents a subset (chunk) of a table in memory. It is just a set of triples: `(IColumn, IDataType, column name)`. During query execution, data is processed by `Block`s. If we have a `Block`, we have data (in the `IColumn` object), we have information about its type (in `IDataType`) that tells us how to deal with that column, and we have the column name. It could be either the original column name from the table or some artificial name assigned for getting temporary results of calculations. +A `Block` это контейнер, представляющий подмножество (фрагмент) таблицы в памяти. Это всего лишь набор троек: `(IColumn, IDataType, column name)`. Во время выполнения запроса данные обрабатываются с помощью `Block`s. Если у нас есть `Block`, у нас есть данные (в `IColumn` объект), у нас есть информация о его типе (в `IDataType`) это говорит нам, как обращаться с этим столбцом, и у нас есть имя столбца. Это может быть либо исходное имя столбца из таблицы, либо какое-то искусственное имя, назначенное для получения временных результатов вычислений. -When we calculate some function over columns in a block, we add another column with its result to the block, and we don’t touch columns for arguments of the function because operations are immutable. Later, unneeded columns can be removed from the block, but not modified. It is convenient for the elimination of common subexpressions. +Когда мы вычисляем некоторую функцию по столбцам в блоке, мы добавляем другой столбец с его результатом в блок, и мы не касаемся столбцов для аргументов функции, потому что операции неизменяемы. Позже ненужные столбцы могут быть удалены из блока, но не изменены. Это удобно для исключения общих подвыражений. -Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared\_ptrs and column names. +Блоки создаются для каждого обработанного фрагмента данных. Обратите внимание, что для одного и того же типа вычисления имена столбцов и типы остаются одинаковыми для разных блоков, и изменяются только данные столбцов. Лучше разделить данные блока из заголовка блока, потому что небольшие размеры блока имеют высокую нагрузку временных строк для копирования shared\_ptrs и имен столбцов. -## Block Streams {#block-streams} +## Блокировать Потоки {#block-streams} -Block streams are for processing data. We use streams of blocks to read data from somewhere, perform data transformations, or write data to somewhere. `IBlockInputStream` has the `read` method to fetch the next block while available. `IBlockOutputStream` has the `write` method to push the block somewhere. +Блочные потоки предназначены для обработки данных. Мы используем потоки блоков для чтения данных откуда-то, выполнения преобразований данных или записи данных куда-то. `IBlockInputStream` имеет `read` метод для извлечения следующего блока, пока он доступен. `IBlockOutputStream` имеет `write` метод, чтобы подтолкнуть блок куда-то. -Streams are responsible for: +Потоки отвечают за: -1. Reading or writing to a table. The table just returns a stream for reading or writing blocks. -2. Implementing data formats. For example, if you want to output data to a terminal in `Pretty` format, you create a block output stream where you push blocks, and it formats them. -3. Performing data transformations. Let’s say you have `IBlockInputStream` and want to create a filtered stream. You create `FilterBlockInputStream` and initialize it with your stream. Then when you pull a block from `FilterBlockInputStream`, it pulls a block from your stream, filters it, and returns the filtered block to you. Query execution pipelines are represented this way. +1. Чтение или письмо за столом. Таблица просто возвращает поток для чтения или записи блоков. +2. Реализация форматов данных. Например, если вы хотите вывести данные на терминал в `Pretty` форматирование, вы создаете поток вывода блока, где вы толкаете блоки, и он форматирует их. +3. Выполнение преобразований данных. Скажем так у вас есть `IBlockInputStream` и хотите создать отфильтрованный поток. Вы создаете `FilterBlockInputStream` и инициализируйте его с помощью своего потока. Затем, когда вы вытащите блок из `FilterBlockInputStream`, он извлекает блок из вашего потока, фильтрует его и возвращает отфильтрованный блок вам. Конвейеры выполнения запросов представлены таким образом. -There are more sophisticated transformations. For example, when you pull from `AggregatingBlockInputStream`, it reads all data from its source, aggregates it, and then returns a stream of aggregated data for you. Another example: `UnionBlockInputStream` accepts many input sources in the constructor and also a number of threads. It launches multiple threads and reads from multiple sources in parallel. +Есть и более сложные трансформации. Например, когда вы тянете из `AggregatingBlockInputStream`, он считывает все данные из своего источника, агрегирует их, а затем возвращает поток агрегированных данных для вас. Еще пример: `UnionBlockInputStream` принимает множество источников ввода в конструкторе, а также ряд потоков. Он запускает несколько потоков и читает из нескольких источников параллельно. -> Block streams use the “pull” approach to control flow: when you pull a block from the first stream, it consequently pulls the required blocks from nested streams, and the entire execution pipeline will work. Neither “pull” nor “push” is the best solution, because control flow is implicit, and that limits the implementation of various features like simultaneous execution of multiple queries (merging many pipelines together). This limitation could be overcome with coroutines or just running extra threads that wait for each other. We may have more possibilities if we make control flow explicit: if we locate the logic for passing data from one calculation unit to another outside of those calculation units. Read this [article](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) for more thoughts. +> Потоки блокируют использовать «pull» подход к управлению потоком: когда вы вытягиваете блок из первого потока, он, следовательно, вытягивает необходимые блоки из вложенных потоков, и весь конвейер выполнения будет работать. Ни «pull» ни «push» это лучшее решение, потому что поток управления является неявным, и это ограничивает реализацию различных функций, таких как одновременное выполнение нескольких запросов (объединение многих конвейеров вместе). Это ограничение может быть преодолено с помощью сопрограмм или просто запуском дополнительных потоков, которые ждут друг друга. У нас может быть больше возможностей, если мы сделаем поток управления явным: если мы найдем логику для передачи данных из одной расчетной единицы в другую вне этих расчетных единиц. Читать это [статья](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) для новых мыслей. -We should note that the query execution pipeline creates temporary data at each step. We try to keep block size small enough so that temporary data fits in the CPU cache. With that assumption, writing and reading temporary data is almost free in comparison with other calculations. We could consider an alternative, which is to fuse many operations in the pipeline together. It could make the pipeline as short as possible and remove much of the temporary data, which could be an advantage, but it also has drawbacks. For example, a split pipeline makes it easy to implement caching intermediate data, stealing intermediate data from similar queries running at the same time, and merging pipelines for similar queries. +Следует отметить, что конвейер выполнения запроса создает временные данные на каждом шаге. Мы стараемся держать размер блока достаточно маленьким, чтобы временные данные помещались в кэш процессора. При таком допущении запись и чтение временных данных практически бесплатны по сравнению с другими расчетами. Мы могли бы рассмотреть альтернативу, которая заключается в том, чтобы объединить многие операции в трубопроводе вместе. Это может сделать конвейер как можно короче и удалить большую часть временных данных, что может быть преимуществом, но у него также есть недостатки. Например, разделенный конвейер позволяет легко реализовать кэширование промежуточных данных, кражу промежуточных данных из аналогичных запросов, выполняемых одновременно, и объединение конвейеров для аналогичных запросов. -## Formats {#formats} +## Форматы {#formats} -Data formats are implemented with block streams. There are “presentational” formats only suitable for the output of data to the client, such as `Pretty` format, which provides only `IBlockOutputStream`. And there are input/output formats, such as `TabSeparated` or `JSONEachRow`. +Форматы данных реализуются с помощью блочных потоков. Есть «presentational» форматы, пригодные только для вывода данных клиенту, такие как `Pretty` формат, который предоставляет только `IBlockOutputStream`. И есть форматы ввода/вывода, такие как `TabSeparated` или `JSONEachRow`. -There are also row streams: `IRowInputStream` and `IRowOutputStream`. They allow you to pull/push data by individual rows, not by blocks. And they are only needed to simplify the implementation of row-oriented formats. The wrappers `BlockInputStreamFromRowInputStream` and `BlockOutputStreamFromRowOutputStream` allow you to convert row-oriented streams to regular block-oriented streams. +Существуют также потоки подряд : `IRowInputStream` и `IRowOutputStream`. Они позволяют вытягивать / выталкивать данные отдельными строками, а не блоками. И они нужны только для упрощения реализации ориентированных на строки форматов. Обертка `BlockInputStreamFromRowInputStream` и `BlockOutputStreamFromRowOutputStream` позволяет конвертировать потоки, ориентированные на строки, в обычные потоки, ориентированные на блоки. ## I/O {#io} -For byte-oriented input/output, there are `ReadBuffer` and `WriteBuffer` abstract classes. They are used instead of C++ `iostream`s. Don’t worry: every mature C++ project is using something other than `iostream`s for good reasons. +Для байт-ориентированных входов / выходов существуют `ReadBuffer` и `WriteBuffer` абстрактный класс. Они используются вместо C++ `iostream`s. Не волнуйтесь: каждый зрелый проект C++ использует что-то другое, чем `iostream`s по уважительным причинам. -`ReadBuffer` and `WriteBuffer` are just a contiguous buffer and a cursor pointing to the position in that buffer. Implementations may own or not own the memory for the buffer. There is a virtual method to fill the buffer with the following data (for `ReadBuffer`) or to flush the buffer somewhere (for `WriteBuffer`). The virtual methods are rarely called. +`ReadBuffer` и `WriteBuffer` это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут владеть или не владеть памятью для буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или смыть буфер куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются. -Implementations of `ReadBuffer`/`WriteBuffer` are used for working with files and file descriptors and network sockets, for implementing compression (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, and `HashingWriteBuffer` speak for themselves. +Реализация следующих принципов: `ReadBuffer`/`WriteBuffer` используются для работы с файлами и файловыми дескрипторами, а также сетевыми сокетами, для реализации сжатия (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, и `HashingWriteBuffer` за себя говорить. -Read/WriteBuffers only deal with bytes. There are functions from `ReadHelpers` and `WriteHelpers` header files to help with formatting input/output. For example, there are helpers to write a number in decimal format. +Буферы чтения/записи имеют дело только с байтами. Есть функции от `ReadHelpers` и `WriteHelpers` заголовочные файлы, чтобы помочь с форматированием ввода / вывода. Например, есть помощники для записи числа в десятичном формате. -Let’s look at what happens when you want to write a result set in `JSON` format to stdout. You have a result set ready to be fetched from `IBlockInputStream`. You create `WriteBufferFromFileDescriptor(STDOUT_FILENO)` to write bytes to stdout. You create `JSONRowOutputStream`, initialized with that `WriteBuffer`, to write rows in `JSON` to stdout. You create `BlockOutputStreamFromRowOutputStream` on top of it, to represent it as `IBlockOutputStream`. Then you call `copyData` to transfer data from `IBlockInputStream` to `IBlockOutputStream`, and everything works. Internally, `JSONRowOutputStream` will write various JSON delimiters and call the `IDataType::serializeTextJSON` method with a reference to `IColumn` and the row number as arguments. Consequently, `IDataType::serializeTextJSON` will call a method from `WriteHelpers.h`: for example, `writeText` for numeric types and `writeJSONString` for `DataTypeString`. +Давайте посмотрим, что происходит, когда вы хотите написать результирующий набор в `JSON` форматирование в stdout. У вас есть результирующий набор, готовый к извлечению из него `IBlockInputStream`. Вы создаете `WriteBufferFromFileDescriptor(STDOUT_FILENO)` чтобы записать байты в stdout. Вы создаете `JSONRowOutputStream`, инициализируется с помощью этого `WriteBuffer`, чтобы записать строки в `JSON` в stdout. Вы создаете `BlockOutputStreamFromRowOutputStream` кроме того, чтобы представить его как `IBlockOutputStream`. А потом ты позвонишь `copyData` для передачи данных из `IBlockInputStream` к `IBlockOutputStream` и все это работает. Внутренне, `JSONRowOutputStream` буду писать в формате JSON различные разделители и вызвать `IDataType::serializeTextJSON` метод со ссылкой на `IColumn` и номер строки в качестве аргументов. Следовательно, `IDataType::serializeTextJSON` вызовет метод из `WriteHelpers.h`: например, `writeText` для числовых типов и `writeJSONString` для `DataTypeString`. -## Tables {#tables} +## Таблицы {#tables} -The `IStorage` interface represents tables. Different implementations of that interface are different table engines. Examples are `StorageMergeTree`, `StorageMemory`, and so on. Instances of these classes are just tables. +То `IStorage` интерфейс представляет собой таблицы. Различные реализации этого интерфейса являются различными движками таблиц. Примеры `StorageMergeTree`, `StorageMemory` и так далее. Экземпляры этих классов являются просто таблицами. -The key `IStorage` methods are `read` and `write`. There are also `alter`, `rename`, `drop`, and so on. The `read` method accepts the following arguments: the set of columns to read from a table, the `AST` query to consider, and the desired number of streams to return. It returns one or multiple `IBlockInputStream` objects and information about the stage of data processing that was completed inside a table engine during query execution. +Ключ `IStorage` методы `read` и `write`. Есть и другие варианты `alter`, `rename`, `drop` и так далее. То `read` метод принимает следующие аргументы: набор столбцов для чтения из таблицы, набор столбцов для чтения из таблицы. `AST` запрос для рассмотрения и желаемое количество потоков для возврата. Он возвращает один или несколько `IBlockInputStream` объекты и информация о стадии обработки данных, которая была завершена внутри табличного движка во время выполнения запроса. -In most cases, the read method is only responsible for reading the specified columns from a table, not for any further data processing. All further data processing is done by the query interpreter and is outside the responsibility of `IStorage`. +В большинстве случаев метод read отвечает только за чтение указанных столбцов из таблицы, а не за дальнейшую обработку данных. Вся дальнейшая обработка данных осуществляется интерпретатором запросов и не входит в сферу ответственности компании `IStorage`. -But there are notable exceptions: +Но есть и заметные исключения: -- The AST query is passed to the `read` method, and the table engine can use it to derive index usage and to read fewer data from a table. -- Sometimes the table engine can process data itself to a specific stage. For example, `StorageDistributed` can send a query to remote servers, ask them to process data to a stage where data from different remote servers can be merged, and return that preprocessed data. The query interpreter then finishes processing the data. +- Запрос AST передается на сервер `read` метод, и механизм таблиц может использовать его для получения использования индекса и считывания меньшего количества данных из таблицы. +- Иногда механизм таблиц может сам обрабатывать данные до определенного этапа. Например, `StorageDistributed` можно отправить запрос на удаленные серверы, попросить их обработать данные на этапе, когда данные с разных удаленных серверов могут быть объединены, и вернуть эти предварительно обработанные данные. Затем интерпретатор запросов завершает обработку данных. -The table’s `read` method can return multiple `IBlockInputStream` objects to allow parallel data processing. These multiple block input streams can read from a table in parallel. Then you can wrap these streams with various transformations (such as expression evaluation or filtering) that can be calculated independently and create a `UnionBlockInputStream` on top of them, to read from multiple streams in parallel. +Стол `read` метод может возвращать несколько значений `IBlockInputStream` объекты, позволяющие осуществлять параллельную обработку данных. Эти несколько блочных входных потоков могут считываться из таблицы параллельно. Затем вы можете обернуть эти потоки с помощью различных преобразований (таких как вычисление выражений или фильтрация), которые могут быть вычислены независимо, и создать `UnionBlockInputStream` поверх них, чтобы читать из нескольких потоков параллельно. -There are also `TableFunction`s. These are functions that return a temporary `IStorage` object to use in the `FROM` clause of a query. +Есть и другие варианты `TableFunction`s. Это функции, которые возвращают временное значение `IStorage` объект для использования в `FROM` предложение запроса. -To get a quick idea of how to implement your table engine, look at something simple, like `StorageMemory` or `StorageTinyLog`. +Чтобы получить быстрое представление о том, как реализовать свой движок таблиц, посмотрите на что-то простое, например `StorageMemory` или `StorageTinyLog`. -> As the result of the `read` method, `IStorage` returns `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. +> В результате этого `read` метод, `IStorage` возвращается `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. -## Parsers {#parsers} +## Синтаксический анализатор {#parsers} -A hand-written recursive descent parser parses a query. For example, `ParserSelectQuery` just recursively calls the underlying parsers for various parts of the query. Parsers create an `AST`. The `AST` is represented by nodes, which are instances of `IAST`. +Написанный от руки рекурсивный парсер спуска анализирует запрос. Например, `ParserSelectQuery` просто рекурсивно вызывает базовые Парсеры для различных частей запроса. Парсеры создают `AST`. То `AST` представлен узлами, которые являются экземплярами `IAST`. -> Parser generators are not used for historical reasons. +> Генераторы парсеров не используются по историческим причинам. -## Interpreters {#interpreters} +## Переводчики {#interpreters} -Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the INSERT query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time. +Интерпретаторы отвечают за создание конвейера выполнения запроса из `AST`. Есть простые переводчики, такие как `InterpreterExistsQuery` и `InterpreterDropQuery` или более изощренные `InterpreterSelectQuery`. Конвейер выполнения запроса представляет собой комбинацию блочных входных и выходных потоков. Например, результат интерпретации `SELECT` запросов `IBlockInputStream` для чтения результирующего набора из; результат запроса INSERT - это `IBlockOutputStream` чтобы записать данные для вставки в, и результат интерпретации `INSERT SELECT` запросов `IBlockInputStream` это возвращает пустой результирующий набор при первом чтении, но копирует данные из него `SELECT` к `INSERT` в то же время. -`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations or query. +`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` машины для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` это довольно грязно и должно быть переписано: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запрос. -## Functions {#functions} +## Функции {#functions} -There are ordinary functions and aggregate functions. For aggregate functions, see the next section. +Существуют обычные функции и агрегатные функции. Агрегатные функции см. В следующем разделе. -Ordinary functions don’t change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s of data to implement vectorized query execution. +Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`'s данных для реализации векторизованного выполнения запросов. -There are some miscellaneous functions, like [blockSize](../query_language/functions/other_functions.md#function-blocksize), [rowNumberInBlock](../query_language/functions/other_functions.md#function-rownumberinblock), and [runningAccumulate](../query_language/functions/other_functions.md#function-runningaccumulate), that exploit block processing and violate the independence of rows. +Есть некоторые другие функции, такие как [размер блока](../sql_reference/functions/other_functions.md#function-blocksize), [роунумберинблок](../sql_reference/functions/other_functions.md#function-rownumberinblock), и [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate), которые эксплуатируют обработку блоков и нарушают независимость строк. -ClickHouse has strong typing, so there’s no implicit type conversion. If a function doesn’t support a specific combination of types, it throws an exception. But functions can work (be overloaded) for many different combinations of types. For example, the `plus` function (to implement the `+` operator) works for any combination of numeric types: `UInt8` + `Float32`, `UInt16` + `Int8`, and so on. Also, some variadic functions can accept any number of arguments, such as the `concat` function. +ClickHouse имеет сильную типизацию, поэтому нет никакого неявного преобразования типов. Если функция не поддерживает определенную комбинацию типов, она создает исключение. Но функции могут работать (перегружаться) для многих различных комбинаций типов. Например, в `plus` функция (для реализации `+` оператор) работает для любой комбинации числовых типов: `UInt8` + `Float32`, `UInt16` + `Int8` и так далее. Кроме того, некоторые вариадические функции могут принимать любое количество аргументов, например `concat` функция. -Implementing a function may be slightly inconvenient because a function explicitly dispatches supported data types and supported `IColumns`. For example, the `plus` function has code generated by instantiation of a C++ template for each combination of numeric types, and constant or non-constant left and right arguments. +Реализация функции может быть немного неудобной, поскольку функция явно отправляет поддерживаемые типы данных и поддерживается `IColumns`. Например, в `plus` функция имеет код, генерируемый экземпляром шаблона C++ для каждой комбинации числовых типов, а также постоянные или непостоянные левые и правые аргументы. -It is an excellent place to implement runtime code generation to avoid template code bloat. Also, it makes it possible to add fused functions like fused multiply-add or to make multiple comparisons in one loop iteration. +Это отличное место для реализации генерации кода во время выполнения, чтобы избежать раздувания кода шаблона. Кроме того, он позволяет добавлять слитые функции, такие как fused multiply-add или выполнять несколько сравнений в одной итерации цикла. -Due to vectorized query execution, functions are not short-circuited. For example, if you write `WHERE f(x) AND g(y)`, both sides are calculated, even for rows, when `f(x)` is zero (except when `f(x)` is a zero constant expression). But if the selectivity of the `f(x)` condition is high, and calculation of `f(x)` is much cheaper than `g(y)`, it’s better to implement multi-pass calculation. It would first calculate `f(x)`, then filter columns by the result, and then calculate `g(y)` only for smaller, filtered chunks of data. +Из-за векторизованного выполнения запроса функции не закорачиваются. Например, если вы пишете `WHERE f(x) AND g(y)`, обе стороны вычисляются, даже для строк, когда `f(x)` равно нулю (за исключением тех случаев, когда `f(x)` является нулевым постоянным выражением). Но если избирательность самого `f(x)` состояние является высоким, и расчет `f(x)` это гораздо дешевле, чем `g(y)`, лучше всего реализовать многоходовой расчет. Это будет первый расчет `f(x)`, затем отфильтруйте столбцы по результату, а затем вычислите `g(y)` только для небольших отфильтрованных фрагментов данных. -## Aggregate Functions {#aggregate-functions} +## Статистическая функция {#aggregate-functions} -Aggregate functions are stateful functions. They accumulate passed values into some state and allow you to get results from that state. They are managed with the `IAggregateFunction` interface. States can be rather simple (the state for `AggregateFunctionCount` is just a single `UInt64` value) or quite complex (the state of `AggregateFunctionUniqCombined` is a combination of a linear array, a hash table, and a `HyperLogLog` probabilistic data structure). +Агрегатные функции - это функции, определяющие состояние. Они накапливают переданные значения в некотором состоянии и позволяют получать результаты из этого состояния. Они управляются с помощью `IAggregateFunction` интерфейс. Состояния могут быть довольно простыми (состояние для `AggregateFunctionCount` это всего лишь один человек `UInt64` значение) или довольно сложное (состояние `AggregateFunctionUniqCombined` представляет собой комбинацию линейного массива, хэш-таблицы и `HyperLogLog` вероятностная структура данных). -States are allocated in `Arena` (a memory pool) to deal with multiple states while executing a high-cardinality `GROUP BY` query. States can have a non-trivial constructor and destructor: for example, complicated aggregation states can allocate additional memory themselves. It requires some attention to creating and destroying states and properly passing their ownership and destruction order. +Государства распределяются в `Arena` (пул памяти) для работы с несколькими состояниями при выполнении высокой мощности `GROUP BY` запрос. Состояния могут иметь нетривиальный конструктор и деструктор: например, сложные агрегатные состояния могут сами выделять дополнительную память. Это требует некоторого внимания к созданию и уничтожению государств и правильной передаче их права собственности и порядка уничтожения. -Aggregation states can be serialized and deserialized to pass over the network during distributed query execution or to write them on the disk where there is not enough RAM. They can even be stored in a table with the `DataTypeAggregateFunction` to allow incremental aggregation of data. +Агрегатные состояния могут быть сериализованы и десериализованы для передачи по сети во время выполнения распределенного запроса или для записи их на диск, где недостаточно оперативной памяти. Они даже могут храниться в таблице с `DataTypeAggregateFunction` чтобы разрешить инкрементное агрегирование данных. -> The serialized data format for aggregate function states is not versioned right now. It is ok if aggregate states are only stored temporarily. But we have the `AggregatingMergeTree` table engine for incremental aggregation, and people are already using it in production. It is the reason why backward compatibility is required when changing the serialized format for any aggregate function in the future. +> Сериализованный формат данных для состояний агрегатных функций в настоящее время не является версионным. Это нормально, если агрегатные состояния хранятся только временно. Но у нас есть такая возможность `AggregatingMergeTree` механизм таблиц для инкрементного агрегирования, и люди уже используют его в производстве. Именно по этой причине обратная совместимость требуется при изменении сериализованного формата для любой агрегатной функции в будущем. -## Server {#server} +## Сервер {#server} -The server implements several different interfaces: +Сервер реализует несколько различных интерфейсов: -- An HTTP interface for any foreign clients. -- A TCP interface for the native ClickHouse client and for cross-server communication during distributed query execution. -- An interface for transferring data for replication. +- Интерфейс HTTP для любых иностранных клиентов. +- TCP-интерфейс для собственного клиента ClickHouse и для межсерверной связи во время выполнения распределенного запроса. +- Интерфейс для передачи данных для репликации. -Internally, it is just a primitive multithreaded server without coroutines or fibers. Since the server is not designed to process a high rate of simple queries but to process a relatively low rate of complex queries, each of them can process a vast amount of data for analytics. +Внутренне это просто примитивный многопоточный сервер без сопрограмм или волокон. Поскольку сервер предназначен не для обработки высокой скорости простых запросов, а для обработки относительно низкой скорости сложных запросов, каждый из них может обрабатывать огромное количество данных для аналитики. -The server initializes the `Context` class with the necessary environment for query execution: the list of available databases, users and access rights, settings, clusters, the process list, the query log, and so on. Interpreters use this environment. +Сервер инициализирует программу `Context` класс с необходимой средой для выполнения запроса: список доступных баз данных, пользователей и прав доступа, настройки, кластеры, список процессов, журнал запросов и так далее. Переводчики используют эту среду. -We maintain full backward and forward compatibility for the server TCP protocol: old clients can talk to new servers, and new clients can talk to old servers. But we don’t want to maintain it eternally, and we are removing support for old versions after about one year. +Мы поддерживаем полную обратную и прямую совместимость для протокола TCP сервера: старые клиенты могут разговаривать с новыми серверами, а новые клиенты-со старыми серверами. Но мы не хотим поддерживать его вечно, и мы удаляем поддержку старых версий примерно через год. -!!! note "Note" - For most external applications, we recommend using the HTTP interface because it is simple and easy to use. The TCP protocol is more tightly linked to internal data structures: it uses an internal format for passing blocks of data, and it uses custom framing for compressed data. We haven’t released a C library for that protocol because it requires linking most of the ClickHouse codebase, which is not practical. +!!! note "Примечание" + Для большинства внешних приложений мы рекомендуем использовать интерфейс HTTP, поскольку он прост и удобен в использовании. Протокол TCP более тесно связан с внутренними структурами данных: он использует внутренний формат для передачи блоков данных, а также использует пользовательское обрамление для сжатых данных. Мы не выпустили библиотеку C для этого протокола, потому что она требует связывания большей части кодовой базы ClickHouse, что нецелесообразно. -## Distributed Query Execution {#distributed-query-execution} +## Выполнение Распределенных Запросов {#distributed-query-execution} -Servers in a cluster setup are mostly independent. You can create a `Distributed` table on one or all servers in a cluster. The `Distributed` table does not store data itself – it only provides a “view” to all local tables on multiple nodes of a cluster. When you SELECT from a `Distributed` table, it rewrites that query, chooses remote nodes according to load balancing settings, and sends the query to them. The `Distributed` table requests remote servers to process a query just up to a stage where intermediate results from different servers can be merged. Then it receives the intermediate results and merges them. The distributed table tries to distribute as much work as possible to remote servers and does not send much intermediate data over the network. +Серверы в кластерной установке в основном независимы. Вы можете создать `Distributed` таблица на одном или всех серверах кластера. То `Distributed` table does not store data itself – it only provides a «view» ко всем локальным таблицам на нескольких узлах кластера. Когда вы выберите из `Distributed` таблица, он переписывает этот запрос, выбирает удаленные узлы в соответствии с настройками балансировки нагрузки и отправляет запрос к ним. То `Distributed` таблица запрашивает удаленные серверы для обработки запроса только до стадии, когда промежуточные результаты с разных серверов могут быть объединены. Затем он получает промежуточные результаты и сливает их. Распределенная таблица пытается распределить как можно больше работы на удаленные серверы и не отправляет много промежуточных данных по сети. -Things become more complicated when you have subqueries in IN or JOIN clauses, and each of them uses a `Distributed` table. We have different strategies for the execution of these queries. +Все становится сложнее, когда у вас есть подзапросы в предложениях IN или JOIN, и каждый из них использует a `Distributed` стол. У нас есть разные стратегии выполнения этих запросов. -There is no global query plan for distributed query execution. Each node has its local query plan for its part of the job. We only have simple one-pass distributed query execution: we send queries for remote nodes and then merge the results. But this is not feasible for complicated queries with high cardinality GROUP BYs or with a large amount of temporary data for JOIN. In such cases, we need to “reshuffle” data between servers, which requires additional coordination. ClickHouse does not support that kind of query execution, and we need to work on it. +Глобального плана запросов для выполнения распределенных запросов не существует. Каждый узел имеет свой локальный план запроса для своей части задания. У нас есть только простое однопроходное распределенное выполнение запросов: мы отправляем запросы на удаленные узлы, а затем объединяем результаты. Но это неосуществимо для сложных запросов с высокой мощностью группы BYs или с большим количеством временных данных для соединения. В таких случаях нам необходимо: «reshuffle» данные между серверами, что требует дополнительной координации. ClickHouse не поддерживает такого рода выполнение запросов, и мы должны работать над этим. -## Merge Tree {#merge-tree} +## Дерево Слияния {#merge-tree} -`MergeTree` is a family of storage engines that supports indexing by primary key. The primary key can be an arbitrary tuple of columns or expressions. Data in a `MergeTree` table is stored in “parts”. Each part stores data in the primary key order, so data is ordered lexicographically by the primary key tuple. All the table columns are stored in separate `column.bin` files in these parts. The files consist of compressed blocks. Each block is usually from 64 KB to 1 MB of uncompressed data, depending on the average value size. The blocks consist of column values placed contiguously one after the other. Column values are in the same order for each column (the primary key defines the order), so when you iterate by many columns, you get values for the corresponding rows. +`MergeTree` это семейство механизмов хранения данных, поддерживающих индексацию по первичному ключу. Первичный ключ может быть произвольным кортежем столбцов или выражений. Данные в a `MergeTree` таблица хранится в «parts». Каждая часть хранит данные в порядке первичного ключа, поэтому данные лексикографически упорядочиваются кортежем первичного ключа. Все столбцы таблицы хранятся отдельно `column.bin` файлы в этих краях. Файлы состоят из сжатых блоков. Каждый блок обычно содержит от 64 КБ до 1 МБ несжатых данных, в зависимости от среднего размера значения. Блоки состоят из значений столбцов, расположенных последовательно друг за другом. Значения столбцов находятся в одном и том же порядке для каждого столбца (первичный ключ определяет порядок), поэтому при итерации по многим столбцам вы получаете значения для соответствующих строк. -The primary key itself is “sparse”. It doesn’t address every single row, but only some ranges of data. A separate `primary.idx` file has the value of the primary key for each N-th row, where N is called `index_granularity` (usually, N = 8192). Also, for each column, we have `column.mrk` files with “marks,” which are offsets to each N-th row in the data file. Each mark is a pair: the offset in the file to the beginning of the compressed block, and the offset in the decompressed block to the beginning of data. Usually, compressed blocks are aligned by marks, and the offset in the decompressed block is zero. Data for `primary.idx` always resides in memory, and data for `column.mrk` files is cached. +Сам первичный ключ является «sparse». Он адресует не каждую отдельную строку, а только некоторые диапазоны данных. Разделение `primary.idx` файл имеет значение первичного ключа для каждой N-й строки, где N называется `index_granularity` (обычно N = 8192). Кроме того, для каждой колонки у нас есть `column.mrk` файлы с «marks,» которые являются смещениями для каждой N-й строки в файле данных. Каждая метка представляет собой пару: смещение в файле к началу сжатого блока и смещение в распакованном блоке к началу данных. Обычно сжатые блоки выравниваются по меткам, а смещение в распакованном блоке равно нулю. Данные для `primary.idx` всегда находится в памяти, а данные для `column.mrk` файлы кэшируются. -When we are going to read something from a part in `MergeTree`, we look at `primary.idx` data and locate ranges that could contain requested data, then look at `column.mrk` data and calculate offsets for where to start reading those ranges. Because of sparseness, excess data may be read. ClickHouse is not suitable for a high load of simple point queries, because the entire range with `index_granularity` rows must be read for each key, and the entire compressed block must be decompressed for each column. We made the index sparse because we must be able to maintain trillions of rows per single server without noticeable memory consumption for the index. Also, because the primary key is sparse, it is not unique: it cannot check the existence of the key in the table at INSERT time. You could have many rows with the same key in a table. +Когда мы собираемся прочитать что-то из части в `MergeTree`, мы смотрим на `primary.idx` данные и найдите диапазоны, которые могут содержать запрошенные данные, а затем посмотрите на `column.mrk` данные и рассчитать смещения для того, чтобы начать чтение этих диапазонов. Из-за разреженности могут быть прочитаны избыточные данные. ClickHouse не подходит для высокой загрузки простых точечных запросов, так как весь диапазон с `index_granularity` строки должны быть прочитаны для каждого ключа, и весь сжатый блок должен быть распакован для каждого столбца. Мы сделали индекс разреженным, потому что мы должны быть в состоянии поддерживать триллионы строк на одном сервере без заметного потребления памяти для индекса. Кроме того, поскольку первичный ключ разрежен, он не является уникальным: он не может проверить существование ключа в таблице во время вставки. В таблице может быть много строк с одним и тем же ключом. -When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts. +Когда вы `INSERT` куча данных в `MergeTree`, эта связка сортируется по порядку первичного ключа и образует новую часть. Существуют фоновые потоки, которые периодически выделяют некоторые детали и объединяют их в одну сортированную деталь, чтобы сохранить количество деталей относительно низким. Вот почему он так называется `MergeTree`. Конечно, слияние приводит к тому, что «write amplification». Все части неизменны: они только создаются и удаляются, но не изменяются. Когда SELECT выполняется, он содержит снимок таблицы (набор деталей). После слияния мы также сохраняем старые детали в течение некоторого времени, чтобы облегчить восстановление после сбоя, поэтому, если мы видим, что какая-то объединенная деталь, вероятно, сломана, мы можем заменить ее исходными частями. -`MergeTree` is not an LSM tree because it doesn’t contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. +`MergeTree` это не дерево LSM, потому что оно не содержит «memtable» и «log»: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. -> MergeTree tables can only have one (primary) index: there aren’t any secondary indices. It would be nice to allow multiple physical representations under one logical table, for example, to store data in more than one physical order or even to allow representations with pre-aggregated data along with original data. +> Таблицы MergeTree могут иметь только один (первичный) индекс: вторичных индексов не существует. Было бы неплохо разрешить несколько физических представлений в одной логической таблице, например, хранить данные в более чем одном физическом порядке или даже разрешить представления с предварительно агрегированными данными наряду с исходными данными. -There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form. +Есть движки MergeTree, которые выполняют дополнительную работу во время фоновых слияний. Примеры `CollapsingMergeTree` и `AggregatingMergeTree`. Это можно рассматривать как специальную поддержку обновлений. Имейте в виду, что это не настоящие обновления, поскольку пользователи обычно не имеют никакого контроля над временем выполнения фоновых слияний, а данные в `MergeTree` таблица почти всегда хранится в нескольких частях, а не в полностью объединенном виде. -## Replication {#replication} +## Копирование {#replication} -Replication in ClickHouse can be configured on a per-table basis. You could have some replicated and some non-replicated tables on the same server. You could also have tables replicated in different ways, such as one table with two-factor replication and another with three-factor. +Репликация в ClickHouse может быть настроена на основе каждой таблицы. Вы можете иметь некоторые реплицированные и некоторые нереплицированные таблицы на одном сервере. Вы также можете иметь таблицы, реплицируемые различными способами,например, одна таблица с двухфакторной репликацией, а другая-с трехфакторной. -Replication is implemented in the `ReplicatedMergeTree` storage engine. The path in `ZooKeeper` is specified as a parameter for the storage engine. All tables with the same path in `ZooKeeper` become replicas of each other: they synchronize their data and maintain consistency. Replicas can be added and removed dynamically simply by creating or dropping a table. +Репликация осуществляется в виде `ReplicatedMergeTree` подсистема хранилища. Путь в `ZooKeeper` указывается в качестве параметра для механизма хранения данных. Все таблицы с одинаковым путем внутри `ZooKeeper` становятся репликами друг друга: они синхронизируют свои данные и поддерживают согласованность. Реплики можно добавлять и удалять динамически, просто создавая или удаляя таблицу. -Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn’t support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. +Репликация использует асинхронную многомастерную схему. Вы можете вставить данные в любую реплику, которая имеет сеанс с `ZooKeeper`, и данные реплицируются во все остальные реплики асинхронно. Поскольку ClickHouse не поддерживает обновления, репликация является бесконфликтной. Поскольку нет подтверждения кворума вставок, только что вставленные данные могут быть потеряны, если один узел выйдет из строя. -Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. It is achieved by electing one replica as the leader, and that replica initiates merges and writes “merge parts” actions to the log. +Метаданные для репликации хранятся в ZooKeeper. Существует журнал репликации, в котором перечислены необходимые действия. Действия таковы: получить часть; объединить части; удалить раздел и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из этой очереди. Например, при вставке «get the part» действие создается в журнале, и каждая реплика загружает эту часть. Слияния координируются между репликами для получения идентичных байтам результатов. Все части объединяются одинаково на всех репликах. Это достигается путем выбора одной реплики в качестве лидера, и эта реплика инициирует слияние и запись «merge parts» действия по ведению журнала. -Replication is physical: only compressed parts are transferred between nodes, not queries. Merges are processed on each replica independently in most cases to lower the network costs by avoiding network amplification. Large merged parts are sent over the network only in cases of significant replication lag. +Репликация является физической: между узлами передаются только сжатые части, а не запросы. Слияния обрабатываются на каждой реплике независимо в большинстве случаев, чтобы снизить затраты на сеть, избегая усиления сети. Большие объединенные части передаются по сети только в случаях значительного запаздывания репликации. -Besides, each replica stores its state in ZooKeeper as the set of parts and its checksums. When the state on the local filesystem diverges from the reference state in ZooKeeper, the replica restores its consistency by downloading missing and broken parts from other replicas. When there is some unexpected or broken data in the local filesystem, ClickHouse does not remove it, but moves it to a separate directory and forgets it. +Кроме того, каждая реплика хранит свое состояние в ZooKeeper как набор деталей и их контрольные суммы. Когда состояние локальной файловой системы отличается от эталонного состояния в ZooKeeper, реплика восстанавливает свою согласованность, загружая недостающие и сломанные части из других реплик. Когда в локальной файловой системе появляются неожиданные или неработающие данные, ClickHouse не удаляет их, а перемещает в отдельный каталог и забывает. -!!! note "Note" - The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically. +!!! note "Примечание" + Кластер ClickHouse состоит из независимых сегментов, и каждый сегмент состоит из реплик. Кластер таков **неупругий**, поэтому после добавления нового осколка данные не будут автоматически перебалансированы между осколками. Вместо этого предполагается, что нагрузка на кластер будет регулироваться неравномерно. Эта реализация дает вам больше контроля, и это нормально для относительно небольших кластеров, таких как десятки узлов. Но для кластеров с сотнями узлов, которые мы используем в производстве, этот подход становится существенным недостатком. Мы должны реализовать механизм таблиц, который охватывает весь кластер с динамически реплицируемыми областями, которые могут быть разделены и сбалансированы между кластерами автоматически. -{## [Original article](https://clickhouse.tech/docs/en/development/architecture/) ##} +{## [Оригинальная статья](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md index 32042a4128e..3e0c3763be6 100644 --- a/docs/ru/development/build.md +++ b/docs/ru/development/build.md @@ -1,26 +1,27 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# How to Build ClickHouse for Development {#how-to-build-clickhouse-for-development} +# Как построить ClickHouse для развития {#how-to-build-clickhouse-for-development} -The following tutorial is based on the Ubuntu Linux system. -With appropriate changes, it should also work on any other Linux distribution. -Supported platforms: x86\_64 and AArch64. Support for Power9 is experimental. +Следующий учебник основан на системе Ubuntu Linux. +С соответствующими изменениями он также должен работать на любом другом дистрибутиве Linux. +Поддерживаемые платформы: x86\_64 и AArch64. Поддержка Power9 является экспериментальной. -## Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja} +## Установите Git, CMake, Python и Ninja {#install-git-cmake-python-and-ninja} ``` bash $ sudo apt-get install git cmake python ninja-build ``` -Or cmake3 instead of cmake on older systems. +Или cmake3 вместо cmake на старых системах. -## Install GCC 9 {#install-gcc-9} +## Установка GCC 9 {#install-gcc-9} -There are several ways to do this. +Есть несколько способов сделать это. -### Install from a PPA Package {#install-from-a-ppa-package} +### Установка из PPA пакет {#install-from-a-ppa-package} ``` bash $ sudo apt-get install software-properties-common @@ -29,30 +30,30 @@ $ sudo apt-get update $ sudo apt-get install gcc-9 g++-9 ``` -### Install from Sources {#install-from-sources} +### Установка из источников {#install-from-sources} -Look at [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) +Смотреть на [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) -## Use GCC 9 for Builds {#use-gcc-9-for-builds} +## Использовать GCC для сборки 9 {#use-gcc-9-for-builds} ``` bash $ export CC=gcc-9 $ export CXX=g++-9 ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources} +## Проверка Источников ClickHouse {#checkout-clickhouse-sources} ``` bash $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git ``` -or +или ``` bash $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git ``` -## Build ClickHouse {#build-clickhouse} +## Построить ClickHouse {#build-clickhouse} ``` bash $ cd ClickHouse @@ -63,23 +64,23 @@ $ ninja $ cd .. ``` -To create an executable, run `ninja clickhouse`. -This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments. +Чтобы создать исполняемый файл, выполните команду `ninja clickhouse`. +Это позволит создать `programs/clickhouse` исполняемый файл, который может быть использован с `client` или `server` аргументы. -# How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux} +# Как построить ClickHouse на любом Linux {#how-to-build-clickhouse-on-any-linux} -The build requires the following components: +Для сборки требуются следующие компоненты: -- Git (is used only to checkout the sources, it’s not needed for the build) -- CMake 3.10 or newer -- Ninja (recommended) or Make -- C++ compiler: gcc 9 or clang 8 or newer -- Linker: lld or gold (the classic GNU ld won’t work) -- Python (is only used inside LLVM build and it is optional) +- Git (используется только для проверки исходных текстов, он не нужен для сборки) +- CMake 3.10 или новее +- Ниндзя (рекомендуется) или сделать +- Компилятор C++: gcc 9 или clang 8 или новее +- Компоновщик: lld или gold (классический GNU ld не будет работать) +- Python (используется только внутри сборки LLVM и является необязательным) -If all the components are installed, you may build in the same way as the steps above. +Если все компоненты установлены, Вы можете построить их так же, как и описанные выше шаги. -Example for Ubuntu Eoan: +Пример для Ubuntu Eoan: sudo apt update sudo apt install git cmake ninja-build g++ python @@ -88,7 +89,7 @@ Example for Ubuntu Eoan: cmake ../ClickHouse ninja -Example for OpenSUSE Tumbleweed: +Пример для OpenSUSE перекати-поле: sudo zypper install git cmake ninja gcc-c++ python lld git clone --recursive https://github.com/ClickHouse/ClickHouse.git @@ -96,7 +97,7 @@ Example for OpenSUSE Tumbleweed: cmake ../ClickHouse ninja -Example for Fedora Rawhide: +Пример для сыромятной кожи Fedora: sudo yum update yum --nogpg install git cmake make gcc-c++ python2 @@ -105,34 +106,34 @@ Example for Fedora Rawhide: cmake ../ClickHouse make -j $(nproc) -# You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse} +# Вам не нужно строить ClickHouse {#you-dont-have-to-build-clickhouse} -ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour. +ClickHouse доступен в готовых двоичных файлах и пакетах. Двоичные файлы являются портативными и могут быть запущены на любом вкусе Linux. -They are built for stable, prestable and testing releases as long as for every commit to master and for every pull request. +Они созданы для стабильных, предустановленных и тестовых релизов до тех пор, пока для каждого коммита к мастеру и для каждого запроса на вытягивание. -To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”. +Чтобы найти самую свежую сборку из `master`, обратиться [совершает страницы](https://github.com/ClickHouse/ClickHouse/commits/master), нажмите на первую зеленую галочку или красный крестик рядом с фиксацией и нажмите на кнопку «Details» ссылка сразу после этого «ClickHouse Build Check». -# How to Build ClickHouse Debian Package {#how-to-build-clickhouse-debian-package} +# Как создать пакет ClickHouse Debian {#how-to-build-clickhouse-debian-package} -## Install Git and Pbuilder {#install-git-and-pbuilder} +## Установите Git и Pbuilder {#install-git-and-pbuilder} ``` bash $ sudo apt-get update $ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources-1} +## Проверка Источников ClickHouse {#checkout-clickhouse-sources-1} ``` bash $ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git $ cd ClickHouse ``` -## Run Release Script {#run-release-script} +## Запустить Сценарий Выпуска {#run-release-script} ``` bash $ ./release ``` -[Original article](https://clickhouse.tech/docs/en/development/build/) +[Оригинальная статья](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md index 0936a3133b2..27e2d73c759 100644 --- a/docs/ru/development/build_cross_arm.md +++ b/docs/ru/development/build_cross_arm.md @@ -1,17 +1,18 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# How to Build ClickHouse on Linux for AARCH64 (ARM64) architecture {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} +# Как построить ClickHouse на Linux для архитектуры AArch64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} -This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with AARCH64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. +Это для случая, когда у вас есть Linux-машина и вы хотите использовать ее для сборки `clickhouse` двоичный файл, который будет работать на другой машине Linux с архитектурой процессора AARCH64. Это предназначено для непрерывной проверки интеграции, которая выполняется на серверах Linux. -The cross-build for AARCH64 is based on the [Build instructions](build.md), follow them first. +Кросс-сборка для AARCH64 основана на следующих принципах: [Инструкции по сборке](build.md)- сначала следуйте за ними. -# Install Clang-8 {#install-clang-8} +# Установка Clang-8 {#install-clang-8} -Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. -For example, in Ubuntu Bionic you can use the following commands: +Следуйте инструкциям от https://apt.llvm.org/ для вашей установки Ubuntu или Debian. +Например, в Ubuntu Bionic вы можете использовать следующие команды: ``` bash echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list @@ -19,7 +20,7 @@ sudo apt-get update sudo apt-get install clang-8 ``` -# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} +# Установка Набора Инструментов Перекрестной Компиляции {#install-cross-compilation-toolset} ``` bash cd ClickHouse @@ -28,7 +29,7 @@ wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 ``` -# Build ClickHouse {#build-clickhouse} +# Построить ClickHouse {#build-clickhouse} ``` bash cd ClickHouse @@ -37,4 +38,4 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linu ninja -C build-arm64 ``` -The resulting binary will run only on Linux with the AARCH64 CPU architecture. +Полученный двоичный файл будет работать только в Linux с архитектурой процессора AARCH64. diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md index a708dc4d4f3..04d505f1a83 100644 --- a/docs/ru/development/build_cross_osx.md +++ b/docs/ru/development/build_cross_osx.md @@ -1,26 +1,27 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# How to Build ClickHouse on Linux for Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} +# Как построить ClickHouse на Linux для Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} -This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](build_osx.md). +Это для случая, когда у вас есть Linux-машина и вы хотите использовать ее для сборки `clickhouse` двоичный файл, который будет работать на OS X. Это предназначено для непрерывной проверки интеграции, которая выполняется на серверах Linux. Если вы хотите построить ClickHouse непосредственно на Mac OS X, то продолжайте [еще одна инструкция](build_osx.md). -The cross-build for Mac OS X is based on the [Build instructions](build.md), follow them first. +Кросс-сборка для Mac OS X основана на следующих принципах: [Инструкции по сборке](build.md)- сначала следуйте за ними. -# Install Clang-8 {#install-clang-8} +# Установка Clang-8 {#install-clang-8} -Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. -For example the commands for Bionic are like: +Следуйте инструкциям от https://apt.llvm.org/ для вашей установки Ubuntu или Debian. +Например команды для Bionic выглядят так: ``` bash sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list sudo apt-get install clang-8 ``` -# Install Cross-Compilation Toolset {#install-cross-compilation-toolset} +# Установка Набора Инструментов Перекрестной Компиляции {#install-cross-compilation-toolset} -Let’s remember the path where we install `cctools` as ${CCTOOLS} +Давайте вспомним путь, по которому мы устанавливаем `cctools` как ${CCTOOLS} ``` bash mkdir ${CCTOOLS} @@ -37,7 +38,7 @@ cd cctools-port/cctools make install ``` -Also, we need to download macOS X SDK into the working tree. +Кроме того, нам нужно загрузить MacOS X SDK в рабочее дерево. ``` bash cd ClickHouse @@ -46,7 +47,7 @@ mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 ``` -# Build ClickHouse {#build-clickhouse} +# Построить ClickHouse {#build-clickhouse} ``` bash cd ClickHouse @@ -58,4 +59,4 @@ CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin ninja -C build-osx ``` -The resulting binary will have a Mach-O executable format and can’t be run on Linux. +Полученный двоичный файл будет иметь исполняемый формат Mach-O и не может быть запущен в Linux. diff --git a/docs/ru/development/build_osx.md b/docs/ru/development/build_osx.md index 6b1839aaf7f..b218304d9d1 100644 --- a/docs/ru/development/build_osx.md +++ b/docs/ru/development/build_osx.md @@ -1,30 +1,31 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x} +# Как построить ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x} -Build should work on Mac OS X 10.15 (Catalina) +Сборка должна работать на Mac OS X 10.15 (Catalina) -## Install Homebrew {#install-homebrew} +## Установите Homebrew {#install-homebrew} ``` bash $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" ``` -## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries} +## Установите необходимые компиляторы, инструменты и библиотеки {#install-required-compilers-tools-and-libraries} ``` bash $ brew install cmake ninja libtool gettext ``` -## Checkout ClickHouse Sources {#checkout-clickhouse-sources} +## Проверка Источников ClickHouse {#checkout-clickhouse-sources} ``` bash $ git clone --recursive git@github.com:ClickHouse/ClickHouse.git ``` -or +или ``` bash $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git @@ -32,7 +33,7 @@ $ git clone --recursive https://github.com/ClickHouse/ClickHouse.git $ cd ClickHouse ``` -## Build ClickHouse {#build-clickhouse} +## Построить ClickHouse {#build-clickhouse} ``` bash $ mkdir build @@ -42,16 +43,16 @@ $ ninja $ cd .. ``` -## Caveats {#caveats} +## Предостережения {#caveats} -If you intend to run clickhouse-server, make sure to increase the system’s maxfiles variable. +Если вы собираетесь запустить clickhouse-сервер, убедитесь в том, чтобы увеличить параметром maxfiles системная переменная. -!!! info "Note" - You’ll need to use sudo. +!!! info "Примечание" + Вам нужно будет использовать sudo. -To do so, create the following file: +Для этого создайте следующий файл: -/Library/LaunchDaemons/limit.maxfiles.plist: +/Библиотека / LaunchDaemons / limit.параметром maxfiles.файл plist: ``` xml @@ -77,14 +78,14 @@ To do so, create the following file: ``` -Execute the following command: +Выполните следующую команду: ``` bash $ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist ``` -Reboot. +Перезагрузить. -To check if it’s working, you can use `ulimit -n` command. +Чтобы проверить, работает ли он, вы можете использовать `ulimit -n` команда. -[Original article](https://clickhouse.tech/docs/en/development/build_osx/) +[Оригинальная статья](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md index 727e89ca891..8bf31ed0d3f 100644 --- a/docs/ru/development/index.md +++ b/docs/ru/development/index.md @@ -1,7 +1,8 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# ClickHouse Development {#clickhouse-development} +# Разработка ClickHouse {#clickhouse-development} -[Original article](https://clickhouse.tech/docs/en/development/) +[Оригинальная статья](https://clickhouse.tech/docs/en/development/) diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md index c703d6cd5b3..630ceecf2b2 100644 --- a/docs/ru/development/tests.md +++ b/docs/ru/development/tests.md @@ -1,87 +1,88 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# ClickHouse Testing {#clickhouse-testing} +# Тестирование ClickHouse {#clickhouse-testing} -## Functional Tests {#functional-tests} +## Функциональные пробы {#functional-tests} -Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. +Функциональные тесты являются наиболее простыми и удобными в использовании. Большинство функций ClickHouse можно протестировать с помощью функциональных тестов, и они обязательны для использования при каждом изменении кода ClickHouse, которое может быть протестировано таким образом. -Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. +Каждый функциональный тест отправляет один или несколько запросов на запущенный сервер ClickHouse и сравнивает результат со ссылкой. -Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests. +Тесты расположены в `queries` каталог. Существует два подкаталога: `stateless` и `stateful`. Тесты без состояния выполняют запросы без каких - либо предварительно загруженных тестовых данных-они часто создают небольшие синтетические наборы данных на лету, в самом тесте. Статусные тесты требуют предварительно загруженных тестовых данных от Яндекса.Метрика и не доступна широкой публике. Мы склонны использовать только `stateless` тесты и избегайте добавления новых `stateful` тесты. -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. +Каждый тест может быть одного из двух типов: `.sql` и `.sh`. `.sql` тест - это простой SQL-скрипт, который передается по конвейеру в `clickhouse-client --multiquery --testmode`. `.sh` тест - это скрипт, который запускается сам по себе. -To run all tests, use `testskhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. +Чтобы выполнить все тесты, используйте `testskhouse-test` инструмент. Смотри `--help` для списка возможных вариантов. Вы можете просто запустить все тесты или запустить подмножество тестов, отфильтрованных по подстроке в имени теста: `./clickhouse-test substring`. -The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. +Самый простой способ вызвать функциональные тесты-это скопировать `clickhouse-client` к `/usr/bin/`, бежать `clickhouse-server` а потом бежать `./clickhouse-test` из собственного каталога. -To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +Чтобы добавить новый тест, создайте `.sql` или `.sh` файл в `queries/0_stateless` каталог, проверьте его вручную, а затем сгенерируйте `.reference` файл создается следующим образом: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` или `./00000_test.sh > ./00000_test.reference`. -Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. +Тесты должны использовать (создавать, отбрасывать и т. д.) Только таблицы в `test` предполагается, что база данных создается заранее; также тесты могут использовать временные таблицы. -If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. +Если вы хотите использовать распределенные запросы в функциональных тестах, вы можете использовать их в качестве рычагов `remote` функция таблицы с `127.0.0.{1..2}` адреса для запроса самого сервера; или вы можете использовать предопределенные тестовые кластеры в файле конфигурации сервера, например `test_shard_localhost`. -Some tests are marked with `zookeeper`, `shard` or `long` in their names. -`zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that -requires server to listen `127.0.0.*`; `distributed` or `global` have the same -meaning. `long` is for tests that run slightly longer that one second. You can -disable these groups of tests using `--no-zookeeper`, `--no-shard` and -`--no-long` options, respectively. +Некоторые тесты помечены знаком `zookeeper`, `shard` или `long` в своем названии. +`zookeeper` это для тестов, которые используют ZooKeeper. `shard` это для тестов, что +требуется сервер для прослушивания `127.0.0.*`; `distributed` или `global` есть то же самое +значение. `long` это для тестов, которые работают немного дольше, чем одна секунда. Ты можешь +отключите эти группы тестов с помощью `--no-zookeeper`, `--no-shard` и +`--no-long` варианты, соответственно. -## Known bugs {#known-bugs} +## Известная ошибка {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `queries/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. +Если мы знаем некоторые ошибки, которые могут быть легко воспроизведены функциональными тестами, мы помещаем подготовленные функциональные тесты в `queries/bugs` каталог. Эти тесты будут перенесены в `teststests_stateless` когда ошибки будут исправлены. -## Integration Tests {#integration-tests} +## Интеграционные Тесты {#integration-tests} -Integration tests allow to test ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. +Интеграционные тесты позволяют тестировать ClickHouse в кластерной конфигурации и взаимодействие ClickHouse с другими серверами, такими как MySQL, Postgres, MongoDB. Они полезны для эмуляции сетевых разбиений, отбрасывания пакетов и т. д. Эти тесты выполняются в Docker и создают несколько контейнеров с различным программным обеспечением. -See `testsgration/README.md` on how to run these tests. +Видеть `testsgration/README.md` о том, как проводить эти тесты. -Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don’t have integration tests with our JDBC and ODBC drivers. +Обратите внимание, что интеграция ClickHouse со сторонними драйверами не тестируется. Кроме того, в настоящее время у нас нет интеграционных тестов с нашими драйверами JDBC и ODBC. -## Unit Tests {#unit-tests} +## Модульное тестирование {#unit-tests} -Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure. +Модульные тесты полезны, если вы хотите протестировать не весь ClickHouse в целом, а одну изолированную библиотеку или класс. Вы можете включить или отключить сборку тестов с помощью `ENABLE_TESTS` Вариант CMake. Модульные тесты (и другие тестовые программы) расположены в `tests` подкаталоги по всему коду. Чтобы запустить модульные тесты, введите `ninja test`. Некоторые тесты используют `gtest`, но некоторые из них-это просто программы, которые возвращают ненулевой код выхода при сбое теста. -It’s not necessarily to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use). +Не обязательно иметь модульные тесты, Если код уже охвачен функциональными тестами (а функциональные тесты обычно гораздо более просты в использовании). -## Performance Tests {#performance-tests} +## Эксплуатационное испытание {#performance-tests} -Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Tests are located at `tests/performance`. Each test is represented by `.xml` file with description of test case. Tests are run with `clickhouse performance-test` tool (that is embedded in `clickhouse` binary). See `--help` for invocation. +Тесты производительности позволяют измерять и сравнивать производительность некоторой изолированной части ClickHouse по синтетическим запросам. Тесты расположены по адресу `tests/performance`. Каждый тест представлен следующим образом `.xml` файл с описанием тестового случая. Тесты выполняются с помощью `clickhouse performance-test` инструмент (который встроен в `clickhouse` двоичный). Видеть `--help` для призыва. -Each test run one or miltiple queries (possibly with combinations of parameters) in a loop with some conditions for stop (like “maximum execution speed is not changing in three seconds”) and measure some metrics about query performance (like “maximum execution speed”). Some tests can contain preconditions on preloaded test dataset. +Каждый тест запускает один или несколько запросов (возможно, с комбинациями параметров) в цикле с некоторыми условиями остановки (например «maximum execution speed is not changing in three seconds») и измерьте некоторые показатели производительности запросов (например, «maximum execution speed»). Некоторые тесты могут содержать предварительные условия для предварительно загруженного тестового набора данных. -If you want to improve performance of ClickHouse in some scenario, and if improvements can be observed on simple queries, it is highly recommended to write a performance test. It always makes sense to use `perf top` or other perf tools during your tests. +Если вы хотите улучшить производительность ClickHouse в каком-то сценарии, и если улучшения могут наблюдаться в простых запросах, настоятельно рекомендуется написать тест производительности. Это всегда имеет смысл использовать `perf top` или другие инструменты perf во время ваших тестов. -## Test Tools And Scripts {#test-tools-and-scripts} +## Инструменты И Сценарии Тестирования {#test-tools-and-scripts} -Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `dbms/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing. +Некоторые программы в `tests` каталог-это не подготовленные тесты, а инструменты тестирования. Например, для `Lexer` есть такой инструмент `dbms/Parsers/tests/lexer` это просто делает токенизацию stdin и записывает раскрашенный результат в stdout. Вы можете использовать эти инструменты в качестве примеров кода, а также для исследования и ручного тестирования. -You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated. +Вы также можете разместить пару файлов `.sh` и `.reference` вместе с инструментом нужно запустить его на каком - то заранее заданном входе- тогда результат скрипта можно сравнить с `.reference` файл. Такого рода тесты не автоматизированы. -## Miscellanous Tests {#miscellanous-tests} +## Различные Тесты {#miscellanous-tests} -There are tests for external dictionaries located at `tests/external_dictionaries` and for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests. +Существуют тесты для внешних словарей, расположенных по адресу `tests/external_dictionaries` и для машинно-обученных моделей в `tests/external_models`. Эти тесты не обновляются и должны быть перенесены в интеграционные тесты. -There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not. +Существует отдельный тест для вставки кворума. Этот тест запускает кластер ClickHouse на отдельных серверах и эмулирует различные случаи сбоя: разделение сети, отбрасывание пакетов (между узлами ClickHouse, между ClickHouse и ZooKeeper, между сервером ClickHouse и клиентом и т. д.), `kill -9`, `kill -STOP` и `kill -CONT` , любить [Джепсен](https://aphyr.com/tags/Jepsen). Затем тест проверяет, что все признанные вставки были записаны, а все отклоненные вставки-нет. -Quorum test was written by separate team before ClickHouse was open-sourced. This team no longer work with ClickHouse. Test was accidentially written in Java. For these reasons, quorum test must be rewritten and moved to integration tests. +Тест кворума был написан отдельной командой еще до того, как ClickHouse стал открытым исходным кодом. Эта команда больше не работает с ClickHouse. Тест был случайно написан на Java. По этим причинам тест кворума должен быть переписан и перенесен в интеграционные тесты. -## Manual Testing {#manual-testing} +## Ручное тестирование {#manual-testing} -When you develop a new feature, it is reasonable to also test it manually. You can do it with the following steps: +Когда вы разрабатываете новую функцию, разумно также протестировать ее вручную. Вы можете сделать это с помощью следующих шагов: -Build ClickHouse. Run ClickHouse from the terminal: change directory to `programs/clickhouse-server` and run it with `./clickhouse-server`. It will use configuration (`config.xml`, `users.xml` and files within `config.d` and `users.d` directories) from the current directory by default. To connect to ClickHouse server, run `programs/clickhouse-client/clickhouse-client`. +Постройте ClickHouse. Запустите ClickHouse из терминала: измените каталог на `programs/clickhouse-server` и запустить его с помощью `./clickhouse-server`. Он будет использовать конфигурацию (`config.xml`, `users.xml` и файлы внутри `config.d` и `users.d` каталоги) из текущего каталога по умолчанию. Чтобы подключиться к серверу ClickHouse, выполните команду `programs/clickhouse-client/clickhouse-client`. -Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`. +Обратите внимание, что все инструменты clickhouse (сервер, клиент и т. д.) являются просто символическими ссылками на один двоичный файл с именем `clickhouse`. Вы можете найти этот двоичный файл по адресу `programs/clickhouse`. Все инструменты также могут быть вызваны как `clickhouse tool` вместо `clickhouse-tool`. -Alternatively you can install ClickHouse package: either stable release from Yandex repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo service clickhouse-server start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`. +В качестве альтернативы вы можете установить пакет ClickHouse: либо стабильный релиз из репозитория Яндекса, либо вы можете построить пакет для себя с помощью `./release` в корне источников ClickHouse. Затем запустите сервер с помощью `sudo service clickhouse-server start` (или остановить, чтобы остановить сервер). Ищите журналы по адресу `/etc/clickhouse-server/clickhouse-server.log`. -When ClickHouse is already installed on your system, you can build a new `clickhouse` binary and replace the existing binary: +Когда ClickHouse уже установлен в вашей системе, вы можете построить новый `clickhouse` двоичный код и заменить существующий двоичный код: ``` bash $ sudo service clickhouse-server stop @@ -89,161 +90,161 @@ $ sudo cp ./clickhouse /usr/bin/ $ sudo service clickhouse-server start ``` -Also you can stop system clickhouse-server and run your own with the same configuration but with logging to terminal: +Также вы можете остановить системный clickhouse-сервер и запустить свой собственный с той же конфигурацией, но с регистрацией в терминал: ``` bash $ sudo service clickhouse-server stop $ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml ``` -Example with gdb: +Пример с gdb: ``` bash $ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml ``` -If the system clickhouse-server is already running and you don’t want to stop it, you can change port numbers in your `config.xml` (or override them in a file in `config.d` directory), provide appropriate data path, and run it. +Если системный clickhouse-сервер уже запущен, и вы не хотите его останавливать, вы можете изменить номера портов в своей системе. `config.xml` (или переопределить их в файле внутри `config.d` каталог), укажите соответствующий путь к данным и запустите его. -`clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above. +`clickhouse` binary почти не имеет зависимостей и работает в широком диапазоне дистрибутивов Linux. Чтобы быстро и грязно протестировать свои изменения на сервере, вы можете просто `scp` ваша свежая постройка `clickhouse` двоичный файл на ваш сервер, а затем запустите его, как в приведенных выше примерах. -## Testing Environment {#testing-environment} +## Тестовая среда {#testing-environment} -Before publishing release as stable we deploy it on testing environment. Testing environment is a cluster that process 1/39 part of [Yandex.Metrica](https://metrica.yandex.com/) data. We share our testing environment with Yandex.Metrica team. ClickHouse is upgraded without downtime on top of existing data. We look at first that data is processed successfully without lagging from realtime, the replication continue to work and there is no issues visible to Yandex.Metrica team. First check can be done in the following way: +Перед публикацией релиза как стабильного мы развертываем его в тестовой среде. Среда тестирования-это кластер, который обрабатывает 1/39 часть [Яндекс.Метрика](https://metrica.yandex.com/) данные. Мы делимся нашей тестовой средой с Яндексом.Команда метрики. ClickHouse обновляется без простоев поверх существующих данных. Мы смотрим сначала на то, что данные обрабатываются успешно, не отставая от реального времени, репликация продолжает работать и нет никаких проблем, видимых Яндексу.Команда метрики. Первую проверку можно провести следующим образом: ``` sql SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h; ``` -In some cases we also deploy to testing environment of our friend teams in Yandex: Market, Cloud, etc. Also we have some hardware servers that are used for development purposes. +В некоторых случаях мы также развернуть на тестирование среды нашего друга команды Яндекса: Маркет, облако и т. д. Кроме того, у нас есть некоторые аппаратные серверы, которые используются для целей разработки. -## Load Testing {#load-testing} +## Нагрузочное тестирование {#load-testing} -After deploying to testing environment we run load testing with queries from production cluster. This is done manually. +После развертывания в среде тестирования мы запускаем нагрузочное тестирование с запросами из производственного кластера. Это делается вручную. -Make sure you have enabled `query_log` on your production cluster. +Убедитесь, что вы включили `query_log` на вашем производственном кластере. -Collect query log for a day or more: +Сбор журнала запросов в течение одного или нескольких дней: ``` bash $ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv ``` -This is a way complicated example. `type = 2` will filter queries that are executed successfully. `query LIKE '%ym:%'` is to select relevant queries from Yandex.Metrica. `is_initial_query` is to select only queries that are initiated by client, not by ClickHouse itself (as parts of distributed query processing). +Это очень сложный пример. `type = 2` будет фильтровать запросы, которые выполняются успешно. `query LIKE '%ym:%'` это выбор релевантных запросов от Яндекса.Метрика. `is_initial_query` это выбор только тех запросов, которые инициируются клиентом, а не самим ClickHouse (как части распределенной обработки запросов). -`scp` this log to your testing cluster and run it as following: +`scp` это войдите в свой тестовый кластер и запустите его следующим образом: ``` bash $ clickhouse benchmark --concurrency 16 < queries.tsv ``` -(probably you also want to specify a `--user`) +(вероятно, вы также хотите указать a `--user`) -Then leave it for a night or weekend and go take a rest. +Затем оставьте его на ночь или выходные и идите отдыхать. -You should check that `clickhouse-server` doesn’t crash, memory footprint is bounded and performance not degrading over time. +Вы должны это проверить `clickhouse-server` не дает сбоя, объем памяти ограничен, а производительность не ухудшается с течением времени. -Precise query execution timings are not recorded and not compared due to high variability of queries and environment. +Точные тайминги выполнения запросов не регистрируются и не сравниваются из-за высокой вариативности запросов и окружающей среды. -## Build Tests {#build-tests} +## Построение Тестов {#build-tests} -Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. Tests are located at `ci` directory. They run build from source inside Docker, Vagrant, and sometimes with `qemu-user-static` inside Docker. These tests are under development and test runs are not automated. +Тесты сборки позволяют проверить, что сборка не нарушается на различных альтернативных конфигурациях и на некоторых зарубежных системах. Тесты расположены по адресу `ci` каталог. Они запускают сборку из исходного кода внутри Docker, Vagrant, а иногда и с помощью `qemu-user-static` внутри Докер. Эти тесты находятся в стадии разработки, и тестовые запуски не автоматизированы. -Motivation: +Мотивация: -Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples: +Обычно мы выпускаем и запускаем все тесты на одном варианте сборки ClickHouse. Но есть и альтернативные варианты сборки, которые не проходят тщательной проверки. Примеры: -- build on FreeBSD; -- build on Debian with libraries from system packages; -- build with shared linking of libraries; -- build on AArch64 platform; -- build on PowerPc platform. +- сборка на FreeBSD; +- сборка на Debian с библиотеками из системных пакетов; +- сборка с общим связыванием библиотек; +- построить на платформе AArch64 ; +- постройте на платформе PowerPc. -For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts. +Например, сборка с системными пакетами-это плохая практика, потому что мы не можем гарантировать, какая именно версия пакетов будет у системы. Но это действительно необходимо сопровождающим Debian. По этой причине мы, по крайней мере, должны поддерживать этот вариант сборки. Другой пример: Общие ссылки-это общий источник проблем, но он необходим для некоторых энтузиастов. -Though we cannot run all tests on all variant of builds, we want to check at least that various build variants are not broken. For this purpose we use build tests. +Хотя мы не можем выполнить все тесты на всех вариантах сборки, мы хотим проверить, по крайней мере, что различные варианты сборки не нарушены. Для этого мы используем тесты сборки. -## Testing For Protocol Compatibility {#testing-for-protocol-compatibility} +## Тестирование Совместимости Протоколов {#testing-for-protocol-compatibility} -When we extend ClickHouse network protocol, we test manually that old clickhouse-client works with new clickhouse-server and new clickhouse-client works with old clickhouse-server (simply by running binaries from corresponding packages). +Когда мы расширяем сетевой протокол ClickHouse, мы вручную проверяем, что старый clickhouse-клиент работает с новым clickhouse-сервером, а новый clickhouse-клиент работает со старым clickhouse-сервером (просто запустив двоичные файлы из соответствующих пакетов). -## Help From The Compiler {#help-from-the-compiler} +## Помощь От Компилятора {#help-from-the-compiler} -Main ClickHouse code (that is located in `dbms` directory) is built with `-Wall -Wextra -Werror` and with some additional enabled warnings. Although these options are not enabled for third-party libraries. +Основной код ClickHouse (который находится в `dbms` каталог) строится с помощью `-Wall -Wextra -Werror` и с некоторыми дополнительными включенными предупреждениями. Хотя эти параметры не включены для сторонних библиотек. -Clang has even more useful warnings - you can look for them with `-Weverything` and pick something to default build. +У Clang есть еще более полезные предупреждения - вы можете искать их с помощью `-Weverything` и выберите что-то для сборки по умолчанию. -For production builds, gcc is used (it still generates slightly more efficient code than clang). For development, clang is usually more convenient to use. You can build on your own machine with debug mode (to save battery of your laptop), but please note that compiler is able to generate more warnings with `-O3` due to better control flow and inter-procedure analysis. When building with clang, `libc++` is used instead of `libstdc++` and when building with debug mode, debug version of `libc++` is used that allows to catch more errors at runtime. +Для производственных сборок используется gcc (он все еще генерирует немного более эффективный код, чем clang). Для развития, лязгают, как правило, более удобны в использовании. Вы можете построить на своей собственной машине с режимом отладки (чтобы сэкономить батарею вашего ноутбука), но обратите внимание, что компилятор способен генерировать больше предупреждений с помощью `-O3` благодаря лучшему потоку управления и межпроцедурному анализу. При строительстве с лязгом, `libc++` используется вместо `libstdc++` и при построении с режимом отладки, отладочная версия `libc++` используется, что позволяет ловить больше ошибок во время выполнения. -## Sanitizers {#sanitizers} +## Дезинфицирующее средство {#sanitizers} -**Address sanitizer**. -We run functional and integration tests under ASan on per-commit basis. +**Адрес дезинфицирующее средство**. +Мы проводим функциональные и интеграционные тесты в асане на фиксации основы. -**Valgrind (Memcheck)**. -We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse). +**С Valgrind (Помощи Valgrind)**. +Мы проводим функциональные тесты под Valgrind ночь. Это займет несколько часов. В настоящее время существует один известный ложноположительный результат в `re2` библиотека, см. [эта статья](https://research.swtch.com/sparse). -**Undefined behaviour sanitizer.** -We run functional and integration tests under ASan on per-commit basis. +**Неопределенное поведение дезинфицирующего средства.** +Мы проводим функциональные и интеграционные тесты в асане на фиксации основы. -**Thread sanitizer**. -We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis. +**Дезинфицирующее средство для нитей**. +Мы проводим функциональные тесты в рамках TSan на основе per-commit. Мы все еще не запускаем интеграционные тесты под TSan на основе per-commit. -**Memory sanitizer**. -Currently we still don’t use MSan. +**Дезинфицирующее средство для памяти**. +В настоящее время мы все еще не используем MSan. -**Debug allocator.** -Debug version of `jemalloc` is used for debug build. +**Отладочный распределитель.** +Отладочная версия `jemalloc` используется для отладки сборки. -## Fuzzing {#fuzzing} +## Затуманивающего {#fuzzing} -We use simple fuzz test to generate random SQL queries and to check that the server doesn’t die. Fuzz testing is performed with Address sanitizer. You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer). +Мы используем простой тест fuzz для генерации случайных SQL-запросов и проверки того, что сервер не умирает. Тестирование пуха проводится с помощью адресного дезинфицирующего средства. Вы можете найти его в `00746_sql_fuzzy.pl`. Этот тест следует проводить непрерывно (в течение ночи и дольше). -As of December 2018, we still don’t use isolated fuzz testing of library code. +По состоянию на декабрь 2018 года мы все еще не используем изолированное тестирование fuzz библиотечного кода. -## Security Audit {#security-audit} +## Аудит безопасности {#security-audit} -People from Yandex Cloud department do some basic overview of ClickHouse capabilities from the security standpoint. +Люди из облачного отдела Яндекса делают некоторый базовый обзор возможностей ClickHouse с точки зрения безопасности. -## Static Analyzers {#static-analyzers} +## Статический анализатор {#static-analyzers} -We run `PVS-Studio` on per-commit basis. We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/). +Мы бежим `PVS-Studio` на основе каждой фиксации. Мы провели оценку `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Вы найдете инструкции по использованию в `tests/instructions/` каталог. Кроме того, вы можете читать [статья на русском языке](https://habr.com/company/yandex/blog/342018/). -If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box. +Если вы используете `CLion` как IDE, вы можете использовать некоторые из них `clang-tidy` выписывает чеки из коробки. -## Hardening {#hardening} +## Затвердение {#hardening} -`FORTIFY_SOURCE` is used by default. It is almost useless, but still makes sense in rare cases and we don’t disable it. +`FORTIFY_SOURCE` используется по умолчанию. Это почти бесполезно, но все же имеет смысл в редких случаях, и мы не отключаем его. -## Code Style {#code-style} +## Стиль Кода {#code-style} -Code style rules are described [here](https://clickhouse.tech/docs/en/development/style/). +Описаны правила стиля кода [здесь](https://clickhouse.tech/docs/en/development/style/). -To check for some common style violations, you can use `utils/check-style` script. +Чтобы проверить наличие некоторых распространенных нарушений стиля, вы можете использовать `utils/check-style` скрипт. -To force proper style of your code, you can use `clang-format`. File `.clang-format` is located at the sources root. It mostly corresponding with our actual code style. But it’s not recommended to apply `clang-format` to existing files because it makes formatting worse. You can use `clang-format-diff` tool that you can find in clang source repository. +Чтобы принудительно создать правильный стиль вашего кода, Вы можете использовать `clang-format`. Файл `.clang-format` находится в корне источника. Это в основном соответствует нашему фактическому стилю кода. Но применять его не рекомендуется `clang-format` к существующим файлам, потому что это ухудшает форматирование. Вы можете использовать `clang-format-diff` инструмент, который вы можете найти в репозитории Clang source. -Alternatively you can try `uncrustify` tool to reformat your code. Configuration is in `uncrustify.cfg` in the sources root. It is less tested than `clang-format`. +В качестве альтернативы вы можете попробовать `uncrustify` инструмент для переформатирования вашего кода. Конфигурации в `uncrustify.cfg` в корне источников. Это меньше, чем `clang-format`. -`CLion` has its own code formatter that has to be tuned for our code style. +`CLion` имеет свой собственный формататор кода, который должен быть настроен для нашего стиля кода. -## Metrica B2B Tests {#metrica-b2b-tests} +## В2В метрика тесты {#metrica-b2b-tests} -Each ClickHouse release is tested with Yandex Metrica and AppMetrica engines. Testing and stable versions of ClickHouse are deployed on VMs and run with a small copy of Metrica engine that is processing fixed sample of input data. Then results of two instances of Metrica engine are compared together. +Каждый релиз ClickHouse тестируется с помощью движков Yandex Metrica и AppMetrica. Тестовые и стабильные версии ClickHouse развертываются на виртуальных машинах и запускаются с небольшой копией движка Metrica engine, который обрабатывает фиксированную выборку входных данных. Затем результаты двух экземпляров двигателя Metrica сравниваются вместе. -These tests are automated by separate team. Due to high number of moving parts, tests are fail most of the time by completely unrelated reasons, that are very difficult to figure out. Most likely these tests have negative value for us. Nevertheless these tests was proved to be useful in about one or two times out of hundreds. +Эти тесты автоматизированы отдельной командой. Из-за большого количества движущихся частей тесты чаще всего проваливаются по совершенно несвязанным причинам, которые очень трудно выяснить. Скорее всего, эти тесты имеют для нас отрицательное значение. Тем не менее эти тесты оказались полезными примерно в одном или двух случаях из сотен. -## Test Coverage {#test-coverage} +## Тестовое покрытие {#test-coverage} -As of July 2018 we don’t track test coverage. +По состоянию на июль 2018 года мы не отслеживаем покрытие тестов. -## Test Automation {#test-automation} +## Автоматизация тестирования {#test-automation} -We run tests with Yandex internal CI and job automation system named “Sandbox”. +Мы проводим тесты с помощью внутренней CI Яндекса и системы автоматизации заданий под названием «Sandbox». -Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored eternally. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you. +Задания сборки и тесты выполняются в песочнице на основе каждой фиксации. Полученные пакеты и результаты тестирования публикуются на GitHub и могут быть загружены по прямым ссылкам. Артефакты хранятся вечно. Когда вы отправляете запрос на вытягивание на GitHub, мы помечаем его как «can be tested» и наша система CI построит пакеты ClickHouse (release, debug, with address sanitizer и т. д.) Для вас. -We don’t use Travis CI due to the limit on time and computational power. -We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins. +Мы не используем Travis CI из-за ограничения по времени и вычислительной мощности. +Мы не используем Дженкинса. Он был использован раньше, и теперь мы счастливы, что не используем Дженкинса. -[Original article](https://clickhouse.tech/docs/en/development/tests/) -velopment/tests/) +[Оригинальная статья](https://clickhouse.tech/docs/en/development/tests/) +разработка / испытания/) diff --git a/docs/ru/database_engines/index.md b/docs/ru/engines/database_engines/index.md similarity index 78% rename from docs/ru/database_engines/index.md rename to docs/ru/engines/database_engines/index.md index 982324e0408..75086eaf678 100644 --- a/docs/ru/database_engines/index.md +++ b/docs/ru/engines/database_engines/index.md @@ -2,7 +2,7 @@ Движки баз данных обеспечивают работу с таблицами. -По умолчанию ClickHouse использует собственный движок баз данных, который поддерживает конфигурируемые [движки таблиц](../operations/table_engines/index.md) и [диалект SQL](../query_language/syntax.md). +По умолчанию ClickHouse использует собственный движок баз данных, который поддерживает конфигурируемые [движки таблиц](../../engines/database_engines/index.md) и [диалект SQL](../../engines/database_engines/index.md). Также можно использовать следующие движки баз данных: diff --git a/docs/ru/database_engines/lazy.md b/docs/ru/engines/database_engines/lazy.md similarity index 100% rename from docs/ru/database_engines/lazy.md rename to docs/ru/engines/database_engines/lazy.md diff --git a/docs/ru/database_engines/mysql.md b/docs/ru/engines/database_engines/mysql.md similarity index 68% rename from docs/ru/database_engines/mysql.md rename to docs/ru/engines/database_engines/mysql.md index 45547407be6..1dbcb67e8f1 100644 --- a/docs/ru/database_engines/mysql.md +++ b/docs/ru/engines/database_engines/mysql.md @@ -28,23 +28,23 @@ ENGINE = MySQL('host:port', ['database' | database], 'user', 'password') | MySQL | ClickHouse | |----------------------------------|---------------------------------------------| -| UNSIGNED TINYINT | [UInt8](../data_types/int_uint.md) | -| TINYINT | [Int8](../data_types/int_uint.md) | -| UNSIGNED SMALLINT | [UInt16](../data_types/int_uint.md) | -| SMALLINT | [Int16](../data_types/int_uint.md) | -| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../data_types/int_uint.md) | -| INT, MEDIUMINT | [Int32](../data_types/int_uint.md) | -| UNSIGNED BIGINT | [UInt64](../data_types/int_uint.md) | -| BIGINT | [Int64](../data_types/int_uint.md) | -| FLOAT | [Float32](../data_types/float.md) | -| DOUBLE | [Float64](../data_types/float.md) | -| DATE | [Date](../data_types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../data_types/datetime.md) | -| BINARY | [FixedString](../data_types/fixedstring.md) | +| UNSIGNED TINYINT | [UInt8](../../engines/database_engines/mysql.md) | +| TINYINT | [Int8](../../engines/database_engines/mysql.md) | +| UNSIGNED SMALLINT | [UInt16](../../engines/database_engines/mysql.md) | +| SMALLINT | [Int16](../../engines/database_engines/mysql.md) | +| UNSIGNED INT, UNSIGNED MEDIUMINT | [UInt32](../../engines/database_engines/mysql.md) | +| INT, MEDIUMINT | [Int32](../../engines/database_engines/mysql.md) | +| UNSIGNED BIGINT | [UInt64](../../engines/database_engines/mysql.md) | +| BIGINT | [Int64](../../engines/database_engines/mysql.md) | +| FLOAT | [Float32](../../engines/database_engines/mysql.md) | +| DOUBLE | [Float64](../../engines/database_engines/mysql.md) | +| DATE | [Date](../../engines/database_engines/mysql.md) | +| DATETIME, TIMESTAMP | [DateTime](../../engines/database_engines/mysql.md) | +| BINARY | [FixedString](../../engines/database_engines/mysql.md) | -Все прочие типы данных преобразуются в [String](../data_types/string.md). +Все прочие типы данных преобразуются в [String](../../engines/database_engines/mysql.md). -[Nullable](../data_types/nullable.md) поддержан. +[Nullable](../../engines/database_engines/mysql.md) поддержан. ## Примеры использования {#primery-ispolzovaniia} diff --git a/docs/ru/engines/index.md b/docs/ru/engines/index.md new file mode 100644 index 00000000000..c4b0b299858 --- /dev/null +++ b/docs/ru/engines/index.md @@ -0,0 +1,6 @@ +--- +toc_folder_title: Engines +toc_priority: 25 +--- + + diff --git a/docs/ru/operations/table_engines/index.md b/docs/ru/engines/table_engines/index.md similarity index 64% rename from docs/ru/operations/table_engines/index.md rename to docs/ru/engines/table_engines/index.md index 775164ccb52..fdf9cd50bf6 100644 --- a/docs/ru/operations/table_engines/index.md +++ b/docs/ru/engines/table_engines/index.md @@ -13,27 +13,27 @@ ### MergeTree {#mergetree} -Наиболее универсальные и функциональные движки таблиц для задач с высокой загрузкой. Общим свойством этих движков является быстрая вставка данных с последующей фоновой обработкой данных. Движки `*MergeTree` поддерживают репликацию данных (в [Replicated\*](replication.md) версиях движков), партиционирование, и другие возможности не поддержанные для других движков. +Наиболее универсальные и функциональные движки таблиц для задач с высокой загрузкой. Общим свойством этих движков является быстрая вставка данных с последующей фоновой обработкой данных. Движки `*MergeTree` поддерживают репликацию данных (в [Replicated\*](mergetree_family/replication.md) версиях движков), партиционирование, и другие возможности не поддержанные для других движков. Движки семейства: -- [MergeTree](mergetree.md) -- [ReplacingMergeTree](replacingmergetree.md) -- [SummingMergeTree](summingmergetree.md) -- [AggregatingMergeTree](aggregatingmergetree.md) -- [CollapsingMergeTree](collapsingmergetree.md) -- [VersionedCollapsingMergeTree](versionedcollapsingmergetree.md) -- [GraphiteMergeTree](graphitemergetree.md) +- [MergeTree](mergetree_family/mergetree.md) +- [ReplacingMergeTree](mergetree_family/replacingmergetree.md) +- [SummingMergeTree](mergetree_family/summingmergetree.md) +- [AggregatingMergeTree](mergetree_family/aggregatingmergetree.md) +- [CollapsingMergeTree](mergetree_family/collapsingmergetree.md) +- [VersionedCollapsingMergeTree](mergetree_family/versionedcollapsingmergetree.md) +- [GraphiteMergeTree](mergetree_family/graphitemergetree.md) ### Log {#log} -Простые [движки](log_family.md) с минимальной функциональностью. Они наиболее эффективны, когда вам нужно быстро записать много небольших таблиц (до примерно 1 миллиона строк) и прочитать их позже целиком. +Простые [движки](log_family/index.md) с минимальной функциональностью. Они наиболее эффективны, когда вам нужно быстро записать много небольших таблиц (до примерно 1 миллиона строк) и прочитать их позже целиком. Движки семейства: -- [TinyLog](tinylog.md) -- [StripeLog](stripelog.md) -- [Log](log.md) +- [TinyLog](log_family/tinylog.md) +- [StripeLog](log_family/stripelog.md) +- [Log](log_family/log.md) ### Движки для интеграции {#dvizhki-dlia-integratsii} @@ -41,27 +41,27 @@ Движки семейства: -- [Kafka](kafka.md) -- [MySQL](mysql.md) -- [ODBC](odbc.md) -- [JDBC](jdbc.md) +- [Kafka](integrations/kafka.md) +- [MySQL](integrations/mysql.md) +- [ODBC](integrations/odbc.md) +- [JDBC](integrations/jdbc.md) ### Специальные движки {#spetsialnye-dvizhki} Движки семейства: -- [Distributed](distributed.md) -- [MaterializedView](materializedview.md) -- [Dictionary](dictionary.md) -- [Merge](merge.md) -- [File](file.md) -- [Null](null.md) -- [Set](set.md) -- [Join](join.md) -- [URL](url.md) -- [View](view.md) -- [Memory](memory.md) -- [Buffer](buffer.md) +- [Distributed](special/distributed.md) +- [MaterializedView](special/materializedview.md) +- [Dictionary](special/dictionary.md) +- [Merge](special/merge.md) +- [File](special/file.md) +- [Null](special/null.md) +- [Set](special/set.md) +- [Join](special/join.md) +- [URL](special/url.md) +- [View](special/view.md) +- [Memory](special/memory.md) +- [Buffer](special/buffer.md) ## Виртуальные столбцы {#table_engines-virtual-columns} diff --git a/docs/ru/operations/table_engines/hdfs.md b/docs/ru/engines/table_engines/integrations/hdfs.md similarity index 94% rename from docs/ru/operations/table_engines/hdfs.md rename to docs/ru/engines/table_engines/integrations/hdfs.md index 4f892b1e492..26b97a99f77 100644 --- a/docs/ru/operations/table_engines/hdfs.md +++ b/docs/ru/engines/table_engines/integrations/hdfs.md @@ -1,6 +1,6 @@ # HDFS {#table_engines-hdfs} -Управляет данными в HDFS. Данный движок похож на движки [File](file.md) и [URL](url.md). +Управляет данными в HDFS. Данный движок похож на движки [File](../special/file.md) и [URL](../special/url.md). ## Использование движка {#ispolzovanie-dvizhka} @@ -9,7 +9,7 @@ ENGINE = HDFS(URI, format) ``` В параметр `URI` нужно передавать полный URI файла в HDFS. -Параметр `format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT`, и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../interfaces/formats.md#formats). +Параметр `format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT`, и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../../interfaces/formats.md#formats). Часть URI с путем файла может содержать шаблоны. В этом случае таблица может использоваться только для чтения. **Пример:** @@ -56,7 +56,7 @@ SELECT * FROM hdfs_engine_table LIMIT 2 - `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. - `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). -Конструкция с `{}` аналогична табличной функции [remote](../../query_language/table_functions/remote.md). +Конструкция с `{}` аналогична табличной функции [remote](../../../engines/table_engines/integrations/hdfs.md). **Пример** diff --git a/docs/ru/engines/table_engines/integrations/index.md b/docs/ru/engines/table_engines/integrations/index.md new file mode 100644 index 00000000000..716d00cdd98 --- /dev/null +++ b/docs/ru/engines/table_engines/integrations/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Integrations +toc_priority: 30 +--- + diff --git a/docs/ru/operations/table_engines/jdbc.md b/docs/ru/engines/table_engines/integrations/jdbc.md similarity index 95% rename from docs/ru/operations/table_engines/jdbc.md rename to docs/ru/engines/table_engines/integrations/jdbc.md index d9a66244849..ae461a539be 100644 --- a/docs/ru/operations/table_engines/jdbc.md +++ b/docs/ru/engines/table_engines/integrations/jdbc.md @@ -4,7 +4,7 @@ Для реализации соединения по JDBC ClickHouse использует отдельную программу [clickhouse-jdbc-bridge](https://github.com/alex-krash/clickhouse-jdbc-bridge), которая должна запускаться как демон. -Движок поддерживает тип данных [Nullable](../../data_types/nullable.md). +Движок поддерживает тип данных [Nullable](../../../engines/table_engines/integrations/jdbc.md). ## Создание таблицы {#sozdanie-tablitsy} @@ -82,6 +82,6 @@ FROM jdbc_table ## Смотрите также {#smotrite-takzhe} -- [Табличная функция JDBC](../../query_language/table_functions/jdbc.md). +- [Табличная функция JDBC](../../../engines/table_engines/integrations/jdbc.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/jdbc/) diff --git a/docs/ru/operations/table_engines/kafka.md b/docs/ru/engines/table_engines/integrations/kafka.md similarity index 95% rename from docs/ru/operations/table_engines/kafka.md rename to docs/ru/engines/table_engines/integrations/kafka.md index 960eecf49d0..c43a7b5d37d 100644 --- a/docs/ru/operations/table_engines/kafka.md +++ b/docs/ru/engines/table_engines/integrations/kafka.md @@ -33,7 +33,7 @@ SETTINGS - `kafka_broker_list` – перечень брокеров, разделенный запятыми (`localhost:9092`). - `kafka_topic_list` – перечень необходимых топиков Kafka. - `kafka_group_name` – группа потребителя Kafka. Отступы для чтения отслеживаются для каждой группы отдельно. Если необходимо, чтобы сообщения не повторялись на кластере, используйте везде одно имя группы. -- `kafka_format` – формат сообщений. Названия форматов должны быть теми же, что можно использовать в секции `FORMAT`, например, `JSONEachRow`. Подробнее читайте в разделе [Форматы](../../interfaces/formats.md). +- `kafka_format` – формат сообщений. Названия форматов должны быть теми же, что можно использовать в секции `FORMAT`, например, `JSONEachRow`. Подробнее читайте в разделе [Форматы](../../../interfaces/formats.md). Опциональные параметры: @@ -123,7 +123,7 @@ Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format SELECT level, sum(total) FROM daily GROUP BY level; ``` -Для улучшения производительности полученные сообщения группируются в блоки размера [max\_insert\_block\_size](../settings/settings.md#settings-max_insert_block_size). Если блок не удалось сформировать за [stream\_flush\_interval\_ms](../settings/settings.md) миллисекунд, то данные будут сброшены в таблицу независимо от полноты блока. +Для улучшения производительности полученные сообщения группируются в блоки размера [max\_insert\_block\_size](../../../operations/settings/settings.md#settings-max_insert_block_size). Если блок не удалось сформировать за [stream\_flush\_interval\_ms](../../../operations/settings/settings.md) миллисекунд, то данные будут сброшены в таблицу независимо от полноты блока. Чтобы остановить получение данных топика или изменить логику преобразования, отсоедините материализованное представление: diff --git a/docs/ru/operations/table_engines/mysql.md b/docs/ru/engines/table_engines/integrations/mysql.md similarity index 89% rename from docs/ru/operations/table_engines/mysql.md rename to docs/ru/engines/table_engines/integrations/mysql.md index 09ca9077c2c..7260b182c6d 100644 --- a/docs/ru/operations/table_engines/mysql.md +++ b/docs/ru/engines/table_engines/integrations/mysql.md @@ -13,12 +13,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` -Смотрите подробное описание запроса [CREATE TABLE](../../query_language/create.md#create-table-query). +Смотрите подробное описание запроса [CREATE TABLE](../../../engines/table_engines/integrations/mysql.md#create-table-query). Структура таблицы может отличаться от исходной структуры таблицы MySQL: - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../engines/table_engines/integrations/mysql.md#type_conversion_function-cast) значения к типам данных ClickHouse. **Параметры движка** @@ -92,7 +92,7 @@ SELECT * FROM mysql_table ## Смотрите также {#smotrite-takzhe} -- [Табличная функция ‘mysql’](../../query_language/table_functions/mysql.md) -- [Использование MySQL в качестве источника для внешнего словаря](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [Табличная функция ‘mysql’](../../../engines/table_engines/integrations/mysql.md) +- [Использование MySQL в качестве источника для внешнего словаря](../../../engines/table_engines/integrations/mysql.md#dicts-external_dicts_dict_sources-mysql) [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/mysql/) diff --git a/docs/ru/operations/table_engines/odbc.md b/docs/ru/engines/table_engines/integrations/odbc.md similarity index 90% rename from docs/ru/operations/table_engines/odbc.md rename to docs/ru/engines/table_engines/integrations/odbc.md index b3dde77890c..6124a928315 100644 --- a/docs/ru/operations/table_engines/odbc.md +++ b/docs/ru/engines/table_engines/integrations/odbc.md @@ -4,7 +4,7 @@ Чтобы использование ODBC было безопасным, ClickHouse использует отдельную программу `clickhouse-odbc-bridge`. Если драйвер ODBC подгружать непосредственно из `clickhouse-server`, то проблемы с драйвером могут привести к аварийной остановке сервера ClickHouse. ClickHouse автоматически запускает `clickhouse-odbc-bridge` по мере необходимости. Программа устанавливается из того же пакета, что и `clickhouse-server`. -Движок поддерживает тип данных [Nullable](../../data_types/nullable.md). +Движок поддерживает тип данных [Nullable](../../../engines/table_engines/integrations/odbc.md). ## Создание таблицы {#sozdanie-tablitsy} @@ -18,12 +18,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ENGINE = ODBC(connection_settings, external_database, external_table) ``` -Смотрите подробное описание запроса [CREATE TABLE](../../query_language/create.md#create-table-query). +Смотрите подробное описание запроса [CREATE TABLE](../../../engines/table_engines/integrations/odbc.md#create-table-query). Структура таблицы может отличаться от структуры исходной таблицы в удалённой СУБД: - Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../query_language/functions/type_conversion_functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../../engines/table_engines/integrations/odbc.md#type_conversion_function-cast) значения к типам данных ClickHouse. **Параметры движка** @@ -119,7 +119,7 @@ SELECT * FROM odbc_t ## Смотрите также {#smotrite-takzhe} -- [Внешние словари ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Табличная функция odbc](../../query_language/table_functions/odbc.md) +- [Внешние словари ODBC](../../../engines/table_engines/integrations/odbc.md#dicts-external_dicts_dict_sources-odbc) +- [Табличная функция odbc](../../../engines/table_engines/integrations/odbc.md) [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/odbc/) diff --git a/docs/ru/engines/table_engines/log_family/index.md b/docs/ru/engines/table_engines/log_family/index.md new file mode 100644 index 00000000000..a64371200a6 --- /dev/null +++ b/docs/ru/engines/table_engines/log_family/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Log Family +toc_priority: 29 +--- + diff --git a/docs/ru/operations/table_engines/log.md b/docs/ru/engines/table_engines/log_family/log.md similarity index 100% rename from docs/ru/operations/table_engines/log.md rename to docs/ru/engines/table_engines/log_family/log.md diff --git a/docs/ru/operations/table_engines/log_family.md b/docs/ru/engines/table_engines/log_family/log_family.md similarity index 97% rename from docs/ru/operations/table_engines/log_family.md rename to docs/ru/engines/table_engines/log_family/log_family.md index 597d331981c..f132c2e8d33 100644 --- a/docs/ru/operations/table_engines/log_family.md +++ b/docs/ru/engines/table_engines/log_family/log_family.md @@ -20,7 +20,7 @@ Во время запросов `INSERT` таблица блокируется, а другие запросы на чтение и запись ожидают разблокировки таблицы. Если запросов на запись данных нет, то можно выполнять любое количество конкуретных запросов на чтение. -- Не поддерживают операции [мутации](../../query_language/alter.md#alter-mutations). +- Не поддерживают операции [мутации](../../../engines/table_engines/log_family/log_family.md#alter-mutations). - Не поддерживают индексы. diff --git a/docs/ru/operations/table_engines/stripelog.md b/docs/ru/engines/table_engines/log_family/stripelog.md similarity index 98% rename from docs/ru/operations/table_engines/stripelog.md rename to docs/ru/engines/table_engines/log_family/stripelog.md index 3f69e1bdd73..4eb4d4620de 100644 --- a/docs/ru/operations/table_engines/stripelog.md +++ b/docs/ru/engines/table_engines/log_family/stripelog.md @@ -15,7 +15,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = StripeLog ``` -Смотрите подробное описание запроса [CREATE TABLE](../../query_language/create.md#create-table-query). +Смотрите подробное описание запроса [CREATE TABLE](../../../engines/table_engines/log_family/stripelog.md#create-table-query). ## Запись данных {#table_engines-stripelog-writing-the-data} diff --git a/docs/ru/operations/table_engines/tinylog.md b/docs/ru/engines/table_engines/log_family/tinylog.md similarity index 100% rename from docs/ru/operations/table_engines/tinylog.md rename to docs/ru/engines/table_engines/log_family/tinylog.md diff --git a/docs/ru/operations/table_engines/aggregatingmergetree.md b/docs/ru/engines/table_engines/mergetree_family/aggregatingmergetree.md similarity index 95% rename from docs/ru/operations/table_engines/aggregatingmergetree.md rename to docs/ru/engines/table_engines/mergetree_family/aggregatingmergetree.md index 64ae3aa037c..8fdf063f569 100644 --- a/docs/ru/operations/table_engines/aggregatingmergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/aggregatingmergetree.md @@ -4,7 +4,7 @@ Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений. -Движок обрабатывает все столбцы типа [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md). +Движок обрабатывает все столбцы типа [AggregateFunction](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md). Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки. @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Описание параметров запроса смотрите в [описании запроса](../../query_language/create.md). +Описание параметров запроса смотрите в [описании запроса](../../../engines/table_engines/mergetree_family/aggregatingmergetree.md). **Секции запроса** diff --git a/docs/ru/operations/table_engines/collapsingmergetree.md b/docs/ru/engines/table_engines/mergetree_family/collapsingmergetree.md similarity index 99% rename from docs/ru/operations/table_engines/collapsingmergetree.md rename to docs/ru/engines/table_engines/mergetree_family/collapsingmergetree.md index 38d4d475e07..5179ac06fa5 100644 --- a/docs/ru/operations/table_engines/collapsingmergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/collapsingmergetree.md @@ -21,7 +21,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Подробности про `CREATE TABLE` смотрите в [описании запроса](../../query_language/create.md). +Подробности про `CREATE TABLE` смотрите в [описании запроса](../../../engines/table_engines/mergetree_family/collapsingmergetree.md). **Параметры CollapsingMergeTree** diff --git a/docs/ru/operations/table_engines/custom_partitioning_key.md b/docs/ru/engines/table_engines/mergetree_family/custom_partitioning_key.md similarity index 91% rename from docs/ru/operations/table_engines/custom_partitioning_key.md rename to docs/ru/engines/table_engines/mergetree_family/custom_partitioning_key.md index c2b846ef3c1..039ab1ba0cf 100644 --- a/docs/ru/operations/table_engines/custom_partitioning_key.md +++ b/docs/ru/engines/table_engines/mergetree_family/custom_partitioning_key.md @@ -1,6 +1,6 @@ # Произвольный ключ партиционирования {#proizvolnyi-kliuch-partitsionirovaniia} -Партиционирование данных доступно для таблиц семейства [MergeTree](mergetree.md) (включая [реплицированные таблицы](replication.md)). Таблицы [MaterializedView](materializedview.md), созданные на основе таблиц MergeTree, также поддерживают партиционирование. +Партиционирование данных доступно для таблиц семейства [MergeTree](mergetree.md) (включая [реплицированные таблицы](replication.md)). Таблицы [MaterializedView](../special/materializedview.md), созданные на основе таблиц MergeTree, также поддерживают партиционирование. Партиция – это набор записей в таблице, объединенных по какому-либо критерию. Например, партиция может быть по месяцу, по дню или по типу события. Данные для разных партиций хранятся отдельно. Это позволяет оптимизировать работу с данными, так как при обработке запросов будет использоваться только необходимое подмножество из всевозможных данных. Например, при получении данных за определенный месяц, ClickHouse будет считывать данные только за этот месяц. @@ -33,7 +33,7 @@ ORDER BY (CounterID, StartDate, intHash32(UserID)); !!! info "Info" Не рекомендуется делать слишком гранулированное партиционирование – то есть задавать партиции по столбцу, в котором будет слишком большой разброс значений (речь идет о порядке более тысячи партиций). Это приведет к скоплению большого числа файлов и файловых дескрипторов в системе, что может значительно снизить производительность запросов `SELECT`. -Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../system_tables.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`: +Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`: ``` sql SELECT @@ -74,7 +74,7 @@ WHERE table = 'visits' Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены – в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные. -Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize). Пример: +Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#misc_operations-optimize). Пример: ``` sql OPTIMIZE TABLE visits PARTITION 201902; @@ -115,12 +115,12 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached Директория `detached` содержит куски, отсоединенные от таблицы с помощью запроса [DETACH](#alter_detach-partition). Поврежденные куски также попадают в эту директорию – они не удаляются с сервера. -Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../query_language/alter.md#alter_attach-partition). +Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_attach-partition). Следует иметь в виду, что при работающем сервере нельзя вручную изменять набор кусков на файловой системе, так как сервер не будет знать об этом. Для нереплицируемых таблиц, вы можете это делать при остановленном сервере, однако это не рекомендуется. Для реплицируемых таблиц, набор кусков нельзя менять в любом случае. -ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../query_language/alter.md#alter_manipulations-with-partitions). +ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../engines/table_engines/mergetree_family/custom_partitioning_key.md#alter_manipulations-with-partitions). [Оригинальная статья:](https://clickhouse.tech/docs/ru/operations/table_engines/custom_partitioning_key/) diff --git a/docs/ru/operations/table_engines/graphitemergetree.md b/docs/ru/engines/table_engines/mergetree_family/graphitemergetree.md similarity index 93% rename from docs/ru/operations/table_engines/graphitemergetree.md rename to docs/ru/engines/table_engines/mergetree_family/graphitemergetree.md index c128da7ac02..305300fc9a5 100644 --- a/docs/ru/operations/table_engines/graphitemergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/graphitemergetree.md @@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Смотрите описание запроса [CREATE TABLE](../../query_language/create.md#create-table-query). +Смотрите описание запроса [CREATE TABLE](../../../engines/table_engines/mergetree_family/graphitemergetree.md#create-table-query). В таблице должны быть столбцы для следующих данных: @@ -74,7 +74,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ## Конфигурация rollup {#rollup-configuration} -Настройки прореживания данных задаются параметром [graphite\_rollup](../server_settings/settings.md#server_settings-graphite_rollup) в конфигурации сервера . Имя параметра может быть любым. Можно создать несколько конфигураций и использовать их для разных таблиц. +Настройки прореживания данных задаются параметром [graphite\_rollup](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) в конфигурации сервера . Имя параметра может быть любым. Можно создать несколько конфигураций и использовать их для разных таблиц. Структура конфигурации rollup: diff --git a/docs/ru/engines/table_engines/mergetree_family/index.md b/docs/ru/engines/table_engines/mergetree_family/index.md new file mode 100644 index 00000000000..6a23ef23a8e --- /dev/null +++ b/docs/ru/engines/table_engines/mergetree_family/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: MergeTree Family +toc_priority: 28 +--- + diff --git a/docs/ru/operations/table_engines/mergetree.md b/docs/ru/engines/table_engines/mergetree_family/mergetree.md similarity index 87% rename from docs/ru/operations/table_engines/mergetree.md rename to docs/ru/engines/table_engines/mergetree_family/mergetree.md index f8bbc983b74..e0ab59c8a05 100644 --- a/docs/ru/operations/table_engines/mergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/mergetree.md @@ -23,7 +23,7 @@ При необходимости можно задать способ сэмплирования данных в таблице. !!! info "Info" - Движок [Merge](merge.md) не относится к семейству `*MergeTree`. + Движок [Merge](../special/merge.md) не относится к семейству `*MergeTree`. ## Создание таблицы {#table_engine-mergetree-creating-a-table} @@ -44,7 +44,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Описание параметров смотрите в [описании запроса CREATE](../../query_language/create.md). +Описание параметров смотрите в [описании запроса CREATE](../../../engines/table_engines/mergetree_family/mergetree.md). !!! note "Note" `INDEX` — экспериментальная возможность, смотрите [Индексы пропуска данных](#table_engine-mergetree-data_skipping-indexes). @@ -55,7 +55,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - `PARTITION BY` — [ключ партиционирования](custom_partitioning_key.md). - Для партиционирования по месяцам используйте выражение `toYYYYMM(date_column)`, где `date_column` — столбец с датой типа [Date](../../data_types/date.md). В этом случае имена партиций имеют формат `"YYYYMM"`. + Для партиционирования по месяцам используйте выражение `toYYYYMM(date_column)`, где `date_column` — столбец с датой типа [Date](../../../engines/table_engines/mergetree_family/mergetree.md). В этом случае имена партиций имеют формат `"YYYYMM"`. - `ORDER BY` — ключ сортировки. @@ -84,7 +84,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - `index_granularity` — максимальное количество строк данных между засечками индекса. По умолчанию — 8192. Смотрите [Хранение данных](#mergetree-data-storage). - `index_granularity_bytes` — максимальный размер гранул данных в байтах. По умолчанию — 10Mb. Чтобы ограничить размер гранул только количеством строк, установите значение 0 (не рекомендовано). Смотрите [Хранение данных](#mergetree-data-storage). - `enable_mixed_granularity_parts` — включает или выключает переход к ограничению размера гранул с помощью настройки `index_granularity_bytes`. До версии 19.11, размер гранул ограничивался только настройкой `index_granularity`. Настройка `index_granularity_bytes` улучшает производительность ClickHouse при выборке данных из таблиц с большими (десятки и сотни мегабайтов) строками. Если у вас есть таблицы с большими строками, можно включить эту настройку, чтобы повысить эффективность запросов `SELECT`. - - `use_minimalistic_part_header_in_zookeeper` — Способ хранения заголовков кусков данных в ZooKeeper. Если `use_minimalistic_part_header_in_zookeeper = 1`, то ZooKeeper хранит меньше данных. Подробнее читайте в [описании настройки](../server_settings/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) в разделе "Конфигурационные параметры сервера". + - `use_minimalistic_part_header_in_zookeeper` — Способ хранения заголовков кусков данных в ZooKeeper. Если `use_minimalistic_part_header_in_zookeeper = 1`, то ZooKeeper хранит меньше данных. Подробнее читайте в [описании настройки](../../../operations/server_configuration_parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) в разделе "Конфигурационные параметры сервера". - `min_merge_bytes_to_use_direct_io` — минимальный объём данных при слиянии, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск. При слиянии частей данных ClickHouse вычисляет общий объём хранения всех данных, подлежащих слиянию. Если общий объём хранения всех данных для чтения превышает `min_bytes_to_use_direct_io` байт, тогда ClickHouse использует флаг `O_DIRECT` при чтении данных с диска. Если `min_merge_bytes_to_use_direct_io = 0`, тогда прямой ввод-вывод отключен. Значение по умолчанию: `10 * 1024 * 1024 * 1024` байтов. - `merge_with_ttl_timeout` — минимальное время в секундах перед повторным слиянием с TTL. По умолчанию — 86400 (1 день). @@ -100,7 +100,7 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa В примере мы устанавливаем партиционирование по месяцам. -Также мы задаем выражение для сэмплирования в виде хэша по идентификатору посетителя. Это позволяет псевдослучайным образом перемешать данные в таблице для каждого `CounterID` и `EventDate`. Если при выборке данных задать секцию [SAMPLE](../../query_language/select.md#select-sample-clause), то ClickHouse вернёт равномерно-псевдослучайную выборку данных для подмножества посетителей. +Также мы задаем выражение для сэмплирования в виде хэша по идентификатору посетителя. Это позволяет псевдослучайным образом перемешать данные в таблице для каждого `CounterID` и `EventDate`. Если при выборке данных задать секцию [SAMPLE](../../../engines/table_engines/mergetree_family/mergetree.md#select-sample-clause), то ClickHouse вернёт равномерно-псевдослучайную выборку данных для подмножества посетителей. `index_granularity` можно было не указывать, поскольку 8192 — это значение по умолчанию. @@ -122,9 +122,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Параметры MergeTree()** -- `date-column` — имя столбца с типом [Date](../../data_types/date.md). На основе этого столбца ClickHouse автоматически создаёт партиции по месяцам. Имена партиций имеют формат `"YYYYMM"`. +- `date-column` — имя столбца с типом [Date](../../../engines/table_engines/mergetree_family/mergetree.md). На основе этого столбца ClickHouse автоматически создаёт партиции по месяцам. Имена партиций имеют формат `"YYYYMM"`. - `sampling_expression` — выражение для сэмплирования. -- `(primary, key)` — первичный ключ. Тип — [Tuple()](../../data_types/tuple.md) +- `(primary, key)` — первичный ключ. Тип — [Tuple()](../../../engines/table_engines/mergetree_family/mergetree.md) - `index_granularity` — гранулярность индекса. Число строк данных между «засечками» индекса. Для большинства задач подходит значение 8192. **Пример** @@ -214,7 +214,7 @@ ClickHouse не требует уникального первичного кл В этом сценарии имеет смысл оставить в первичном ключе всего несколько столбцов, которые обеспечат эффективную фильтрацию по индексу, а остальные столбцы-измерения добавить в выражение ключа сортировки. -[ALTER ключа сортировки](../../query_language/alter.md) — лёгкая операция, так как при одновременном добавлении нового столбца в таблицу и ключ сортировки не нужно изменять данные кусков (они остаются упорядоченными и по новому выражению ключа). +[ALTER ключа сортировки](../../../engines/table_engines/mergetree_family/mergetree.md) — лёгкая операция, так как при одновременном добавлении нового столбца в таблицу и ключ сортировки не нужно изменять данные кусков (они остаются упорядоченными и по новому выражению ключа). ### Использование индексов и партиций в запросах {#ispolzovanie-indeksov-i-partitsii-v-zaprosakh} @@ -246,7 +246,7 @@ ClickHouse будет использовать индекс по первичн SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' ``` -Чтобы проверить, сможет ли ClickHouse использовать индекс при выполнении запроса, используйте настройки [force\_index\_by\_date](../settings/settings.md#settings-force_index_by_date) и [force\_primary\_key](../settings/settings.md#settings-force_primary_key). +Чтобы проверить, сможет ли ClickHouse использовать индекс при выполнении запроса, используйте настройки [force\_index\_by\_date](../../../operations/settings/settings.md#settings-force_index_by_date) и [force\_primary\_key](../../../operations/settings/settings.md#settings-force_primary_key). Ключ партиционирования по месяцам обеспечивает чтение только тех блоков данных, которые содержат даты из нужного диапазона. При этом блок данных может содержать данные за многие даты (до целого месяца). В пределах одного блока данные упорядочены по первичному ключу, который может не содержать дату в качестве первого столбца. В связи с этим, при использовании запроса с указанием условия только на дату, но не на префикс первичного ключа, будет читаться данных больше, чем за одну дату. @@ -304,7 +304,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 Поддержанные типы данных: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`. - Фильтром могут пользоваться функции: [equals](../../query_language/functions/comparison_functions.md), [notEquals](../../query_language/functions/comparison_functions.md), [in](../../query_language/functions/in_functions.md), [notIn](../../query_language/functions/in_functions.md). + Фильтром могут пользоваться функции: [equals](../../../engines/table_engines/mergetree_family/mergetree.md), [notEquals](../../../engines/table_engines/mergetree_family/mergetree.md), [in](../../../engines/table_engines/mergetree_family/mergetree.md), [notIn](../../../engines/table_engines/mergetree_family/mergetree.md). **Примеры** @@ -321,21 +321,21 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | Function (operator) / Index | primary key | minmax | ngrambf\_v1 | tokenbf\_v1 | bloom\_filter | |----------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../query_language/functions/comparison_functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, \<\>)](../../query_language/functions/comparison_functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](../../query_language/functions/string_search_functions.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | -| [notLike](../../query_language/functions/string_search_functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [startsWith](../../query_language/functions/string_functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../query_language/functions/string_functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../query_language/functions/string_search_functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [in](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../query_language/functions/in_functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../query_language/functions/comparison_functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../query_language/functions/comparison_functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../query_language/functions/comparison_functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../query_language/functions/comparison_functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../query_language/functions/array_functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../query_language/functions/array_functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [equals (=, ==)](../../../engines/table_engines/mergetree_family/mergetree.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, \<\>)](../../../engines/table_engines/mergetree_family/mergetree.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [like](../../../engines/table_engines/mergetree_family/mergetree.md#function-like) | ✔ | ✔ | ✔ | ✗ | ✗ | +| [notLike](../../../engines/table_engines/mergetree_family/mergetree.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [startsWith](../../../engines/table_engines/mergetree_family/mergetree.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../engines/table_engines/mergetree_family/mergetree.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../engines/table_engines/mergetree_family/mergetree.md#function-multisearchany) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [in](../../../engines/table_engines/mergetree_family/mergetree.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../engines/table_engines/mergetree_family/mergetree.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [less (\<)](../../../engines/table_engines/mergetree_family/mergetree.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../engines/table_engines/mergetree_family/mergetree.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../engines/table_engines/mergetree_family/mergetree.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../engines/table_engines/mergetree_family/mergetree.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](../../../engines/table_engines/mergetree_family/mergetree.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../engines/table_engines/mergetree_family/mergetree.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса. @@ -367,7 +367,7 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT Секция `TTL` может быть установлена как для всей таблицы, так и для каждого отдельного столбца. Правила `TTL` для таблицы позволяют указать целевые диски или тома для фонового перемещения на них частей данных. -Выражения должны возвращать тип [Date](../../data_types/date.md) или [DateTime](../../data_types/datetime.md). +Выражения должны возвращать тип [Date](../../../engines/table_engines/mergetree_family/mergetree.md) или [DateTime](../../../engines/table_engines/mergetree_family/mergetree.md). Для задания времени жизни столбца, например: @@ -376,7 +376,7 @@ TTL time_column TTL time_column + interval ``` -Чтобы задать `interval`, используйте операторы [интервала времени](../../query_language/operators.md#operators-datetime). +Чтобы задать `interval`, используйте операторы [интервала времени](../../../engines/table_engines/mergetree_family/mergetree.md#operators-datetime). ``` sql TTL date_time + INTERVAL 1 MONTH @@ -465,7 +465,7 @@ ALTER TABLE example_table Когда ClickHouse видит, что некоторые данные устарели, он выполняет внеплановые мёржи. Для управление частотой подобных мёржей, можно задать настройку [merge\_with\_ttl\_timeout](#mergetree_setting-merge_with_ttl_timeout). Если её значение слишком низкое, придется выполнять много внеплановых мёржей, которые могут начать потреблять значительную долю ресурсов сервера. -Если вы выполните запрос `SELECT` между слияниями вы можете получить устаревшие данные. Чтобы избежать этого используйте запрос [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) перед `SELECT`. +Если вы выполните запрос `SELECT` между слияниями вы можете получить устаревшие данные. Чтобы избежать этого используйте запрос [OPTIMIZE](../../../engines/table_engines/mergetree_family/mergetree.md#misc_operations-optimize) перед `SELECT`. ## Хранение данных таблицы на нескольких блочных устройствах {#table_engine-mergetree-multiple-volumes} @@ -473,16 +473,16 @@ ALTER TABLE example_table Движки таблиц семейства `MergeTree` могут хранить данные на нескольких блочных устройствах. Это может оказаться полезным, например, при неявном разделении данных одной таблицы на «горячие» и «холодные». Наиболее свежая часть занимает малый объём и запрашивается регулярно, а большой хвост исторических данных запрашивается редко. При наличии в системе нескольких дисков, «горячая» часть данных может быть размещена на быстрых дисках (например, на NVMe SSD или в памяти), а холодная на более медленных (например, HDD). -Минимальной перемещаемой единицей для `MergeTree` является кусок данных (data part). Данные одного куска могут находится только на одном диске. Куски могут перемещаться между дисками в фоне, согласно пользовательским настройкам, а также с помощью запросов [ALTER](../../query_language/alter.md#alter_move-partition). +Минимальной перемещаемой единицей для `MergeTree` является кусок данных (data part). Данные одного куска могут находится только на одном диске. Куски могут перемещаться между дисками в фоне, согласно пользовательским настройкам, а также с помощью запросов [ALTER](../../../engines/table_engines/mergetree_family/mergetree.md#alter_move-partition). ### Термины {#terminy} - Диск — примонтированное в файловой системе блочное устройство. -- Диск по умолчанию — диск, на котором находится путь, указанный в конфигурационной настройке сервера [path](../server_settings/settings.md#server_settings-path). +- Диск по умолчанию — диск, на котором находится путь, указанный в конфигурационной настройке сервера [path](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-path). - Том (Volume) — упорядоченный набор равноценных дисков (схоже с [JBOD](https://ru.wikipedia.org/wiki/JBOD)) - Политика хранения (StoragePolicy) — множество томов с правилами перемещения данных между ними. -У всех описанных сущностей при создании указываются имена, можно найти в системных таблицах [system.storage\_policies](../system_tables.md#system_tables-storage_policies) и [system.disks](../system_tables.md#system_tables-disks). Имя политики хранения можно указать в настройке `storage_policy` движков таблиц семейства `MergeTree`. +У всех описанных сущностей при создании указываются имена, можно найти в системных таблицах [system.storage\_policies](../../../engines/table_engines/mergetree_family/mergetree.md#system_tables-storage_policies) и [system.disks](../../../engines/table_engines/mergetree_family/mergetree.md#system_tables-disks). Имя политики хранения можно указать в настройке `storage_policy` движков таблиц семейства `MergeTree`. ### Конфигурация {#table_engine-mergetree-multiple-volumes-configure} @@ -616,9 +616,9 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' В таблицах `MergeTree` данные попадают на диск несколькими способами: - В результате вставки (запрос `INSERT`). -- В фоновых операциях слияний и [мутаций](../../query_language/alter.md#alter-mutations). +- В фоновых операциях слияний и [мутаций](../../../engines/table_engines/mergetree_family/mergetree.md#alter-mutations). - При скачивании данных с другой реплики. -- В результате заморозки партиций [ALTER TABLE … FREEZE PARTITION](../../query_language/alter.md#alter_freeze-partition). +- В результате заморозки партиций [ALTER TABLE … FREEZE PARTITION](../../../engines/table_engines/mergetree_family/mergetree.md#alter_freeze-partition). Во всех случаях, кроме мутаций и заморозки партиций, при записи куска выбирается том и диск в соответствии с указанной конфигурацией хранилища: @@ -627,9 +627,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' Мутации и запросы заморозки партиций в реализации используют [жесткие ссылки](https://ru.wikipedia.org/wiki/%D0%96%D1%91%D1%81%D1%82%D0%BA%D0%B0%D1%8F_%D1%81%D1%81%D1%8B%D0%BB%D0%BA%D0%B0). Жесткие ссылки между различными дисками не поддерживаются, поэтому в случае таких операций куски размещаются на тех же дисках, что и исходные. -В фоне куски перемещаются между томами на основе информации о занятом месте (настройка `move_factor`) по порядку, в котором указаны тома в конфигурации. Данные никогда не перемещаются с последнего тома и на первый том. Следить за фоновыми перемещениями можно с помощью системных таблиц [system.part\_log](../system_tables.md#system_tables-part-log) (поле `type = MOVE_PART`) и [system.parts](../system_tables.md#system_tables-parts) (поля `path` и `disk`). Также подробная информация о перемещениях доступна в логах сервера. - -С помощью запроса [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../query_language/alter.md#alter_move-partition) пользователь может принудительно перенести кусок или партицию с одного раздела на другой. При этом учитываются все ограничения, указанные для фоновых операций. Запрос самостоятельно инициирует процесс перемещения не дожидаясь фоновых операций. В случае недостатка места или неудовлетворения ограничениям пользователь получит сообщение об ошибке. +В фоне куски перемещаются между томами на основе информации о занятом месте (настройка `move_factor`) по порядку, в котором указаны тома в конфигурации. Данные никогда не перемещаются с последнего тома и на первый том. Следить за фоновыми перемещениями можно с помощью системных таблиц [system.part\_log](../../../engines/table_engines/mergetree_family/mergetree.md#system_tables-part-log) (поле `type = MOVE_PART`) и [system.parts](../../../engines/table_engines/mergetree_family/mergetree.md#system_tables-parts) (поля `path` и `disk`). Также подробная информация о перемещениях доступна в логах сервера. +С помощью запроса [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../engines/table_engines/mergetree_family/mergetree.md#alter_move-partition) пользователь может принудительно перенести кусок или партицию с одного раздела на другой. При этом учитываются все ограничения, указанные для фоновых операций. Запрос самостоятельно инициирует процесс перемещения не дожидаясь фоновых операций. В случае недостатка места или неудовлетворения ограничениям пользователь получит сообщение об ошибке. Перемещения данных не взаимодействуют с репликацией данных, поэтому на разных репликах одной и той же таблицы могут быть указаны разные политики хранения. diff --git a/docs/ru/operations/table_engines/replacingmergetree.md b/docs/ru/engines/table_engines/mergetree_family/replacingmergetree.md similarity index 96% rename from docs/ru/operations/table_engines/replacingmergetree.md rename to docs/ru/engines/table_engines/mergetree_family/replacingmergetree.md index b403e485741..40a1eb1a9c6 100644 --- a/docs/ru/operations/table_engines/replacingmergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/replacingmergetree.md @@ -21,7 +21,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Описание параметров запроса смотрите в [описании запроса](../../query_language/create.md). +Описание параметров запроса смотрите в [описании запроса](../../../engines/table_engines/mergetree_family/replacingmergetree.md). **Параметры ReplacingMergeTree** diff --git a/docs/ru/operations/table_engines/replication.md b/docs/ru/engines/table_engines/mergetree_family/replication.md similarity index 97% rename from docs/ru/operations/table_engines/replication.md rename to docs/ru/engines/table_engines/mergetree_family/replication.md index 67f8d5f0161..14a50a2b94b 100644 --- a/docs/ru/operations/table_engines/replication.md +++ b/docs/ru/engines/table_engines/mergetree_family/replication.md @@ -14,7 +14,7 @@ Репликация не зависит от шардирования. На каждом шарде репликация работает независимо. -Реплицируются сжатые данные запросов `INSERT`, `ALTER` (см. подробности в описании запроса [ALTER](../../query_language/alter.md#query_language_queries_alter)). +Реплицируются сжатые данные запросов `INSERT`, `ALTER` (см. подробности в описании запроса [ALTER](../../../engines/table_engines/mergetree_family/replication.md#query_language_queries_alter)). Запросы `CREATE`, `DROP`, `ATTACH`, `DETACH` и `RENAME` выполняются на одном сервере и не реплицируются: @@ -24,7 +24,7 @@ ClickHouse хранит метаинформацию о репликах в [Apache ZooKeeper](https://zookeeper.apache.org). Используйте ZooKeeper 3.4.5 или новее. -Для использовании репликации, установите параметры в секции [zookeeper](../server_settings/settings.md#server-settings_zookeeper) конфигурации сервера. +Для использовании репликации, установите параметры в секции [zookeeper](../../../operations/server_configuration_parameters/settings.md#server-settings_zookeeper) конфигурации сервера. !!! attention "Внимание" Не пренебрегайте настройками безопасности. ClickHouse поддерживает [ACL схему](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) `digest` подсистемы безопасности ZooKeeper. @@ -52,7 +52,7 @@ ClickHouse хранит метаинформацию о репликах в [Apa Если в конфигурационном файле не настроен ZooKeeper, то вы не сможете создать реплицируемые таблицы, а уже имеющиеся реплицируемые таблицы будут доступны в режиме только на чтение. -При запросах `SELECT`, ZooKeeper не используется, т.е. репликация не влияет на производительность `SELECT` и запросы работают так же быстро, как и для нереплицируемых таблиц. При запросах к распределенным реплицированным таблицам поведение ClickHouse регулируется настройками [max\_replica\_delay\_for\_distributed\_queries](../settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../settings/settings.md). +При запросах `SELECT`, ZooKeeper не используется, т.е. репликация не влияет на производительность `SELECT` и запросы работают так же быстро, как и для нереплицируемых таблиц. При запросах к распределенным реплицированным таблицам поведение ClickHouse регулируется настройками [max\_replica\_delay\_for\_distributed\_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback\_to\_stale\_replicas\_for\_distributed\_queries](../../../operations/settings/settings.md). При каждом запросе `INSERT`, делается около десятка записей в ZooKeeper в рамках нескольких транзакций. (Чтобы быть более точным, это для каждого вставленного блока данных; запрос INSERT содержит один блок или один блок на `max_insert_block_size = 1048576` строк.) Это приводит к некоторому увеличению задержек при `INSERT`, по сравнению с нереплицируемыми таблицами. Но если придерживаться обычных рекомендаций - вставлять данные пачками не более одного `INSERT` в секунду, то это не составляет проблем. На всём кластере ClickHouse, использующим для координации один кластер ZooKeeper, может быть в совокупности несколько сотен `INSERT` в секунду. Пропускная способность при вставке данных (количество строчек в секунду) такая же высокая, как для нереплицируемых таблиц. @@ -64,7 +64,7 @@ ClickHouse хранит метаинформацию о репликах в [Apa Каждый блок данных записывается атомарно. Запрос INSERT разбивается на блоки данных размером до `max_insert_block_size = 1048576` строк. То есть, если в запросе `INSERT` менее 1048576 строк, то он делается атомарно. -Блоки данных дедуплицируются. При многократной записи одного и того же блока данных (блоков данных одинакового размера, содержащих одни и те же строчки в одном и том же порядке), блок будет записан только один раз. Это сделано для того, чтобы в случае сбоя в сети, когда клиентское приложение не может понять, были ли данные записаны в БД, можно было просто повторить запрос `INSERT`. При этом не имеет значения, на какую реплику будут отправлены INSERT-ы с одинаковыми данными. Запрос `INSERT` идемпотентный. Параметры дедуплицирования регулируются настройками сервера [merge\_tree](../server_settings/settings.md#server_settings-merge_tree) +Блоки данных дедуплицируются. При многократной записи одного и того же блока данных (блоков данных одинакового размера, содержащих одни и те же строчки в одном и том же порядке), блок будет записан только один раз. Это сделано для того, чтобы в случае сбоя в сети, когда клиентское приложение не может понять, были ли данные записаны в БД, можно было просто повторить запрос `INSERT`. При этом не имеет значения, на какую реплику будут отправлены INSERT-ы с одинаковыми данными. Запрос `INSERT` идемпотентный. Параметры дедуплицирования регулируются настройками сервера [merge\_tree](../../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-merge_tree) При репликации, по сети передаются только исходные вставляемые данные. Дальнейшие преобразования данных (слияния) координируются и делаются на всех репликах одинаковым образом. За счёт этого минимизируется использование сети, и благодаря этому, репликация хорошо работает при расположении реплик в разных дата-центрах. (Стоит заметить, что дублирование данных в разных дата-центрах, по сути, является основной задачей репликации). diff --git a/docs/ru/operations/table_engines/summingmergetree.md b/docs/ru/engines/table_engines/mergetree_family/summingmergetree.md similarity index 91% rename from docs/ru/operations/table_engines/summingmergetree.md rename to docs/ru/engines/table_engines/mergetree_family/summingmergetree.md index 6b1a41384c0..b69f58c3dac 100644 --- a/docs/ru/operations/table_engines/summingmergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/summingmergetree.md @@ -19,7 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Описание параметров запроса смотрите в [описании запроса](../../query_language/create.md). +Описание параметров запроса смотрите в [описании запроса](../../../engines/table_engines/mergetree_family/summingmergetree.md). **Параметры SummingMergeTree** @@ -91,7 +91,7 @@ SELECT key, sum(value) FROM summtt GROUP BY key При вставке данных в таблицу они сохраняются как есть. Периодически ClickHouse выполняет слияние вставленных кусков данных и именно в этот момент производится суммирование и замена многих строк с одинаковым первичным ключом на одну для каждого результирующего куска данных. -ClickHouse может слить куски данных таким образом, что не все строки с одинаковым первичным ключом окажутся в одном финальном куске, т.е. суммирование будет не полным. Поэтому, при выборке данных (`SELECT`) необходимо использовать агрегатную функцию [sum()](../../query_language/agg_functions/reference.md#agg_function-sum) и секцию `GROUP BY` как описано в примере выше. +ClickHouse может слить куски данных таким образом, что не все строки с одинаковым первичным ключом окажутся в одном финальном куске, т.е. суммирование будет не полным. Поэтому, при выборке данных (`SELECT`) необходимо использовать агрегатную функцию [sum()](../../../engines/table_engines/mergetree_family/summingmergetree.md#agg_function-sum) и секцию `GROUP BY` как описано в примере выше. ### Общие правила суммирования {#obshchie-pravila-summirovaniia} @@ -105,7 +105,7 @@ ClickHouse может слить куски данных таким образо ### Суммирование в столбцах AggregateFunction {#summirovanie-v-stolbtsakh-aggregatefunction} -Для столбцов типа [AggregateFunction](../../data_types/nested_data_structures/aggregatefunction.md#data_type-aggregatefunction) ClickHouse выполняет агрегацию согласно заданной функции, повторяя поведение движка [AggregatingMergeTree](aggregatingmergetree.md). +Для столбцов типа [AggregateFunction](../../../engines/table_engines/mergetree_family/summingmergetree.md#data_type-aggregatefunction) ClickHouse выполняет агрегацию согласно заданной функции, повторяя поведение движка [AggregatingMergeTree](aggregatingmergetree.md). ### Вложенные структуры {#vlozhennye-struktury} @@ -127,7 +127,7 @@ ClickHouse может слить куски данных таким образо [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] ``` -При запросе данных используйте функцию [sumMap(key, value)](../../query_language/agg_functions/reference.md) для агрегации `Map`. +При запросе данных используйте функцию [sumMap(key, value)](../../../engines/table_engines/mergetree_family/summingmergetree.md) для агрегации `Map`. Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования. diff --git a/docs/ru/operations/table_engines/versionedcollapsingmergetree.md b/docs/ru/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md similarity index 99% rename from docs/ru/operations/table_engines/versionedcollapsingmergetree.md rename to docs/ru/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md index a93b56f3c52..90647edd0eb 100644 --- a/docs/ru/operations/table_engines/versionedcollapsingmergetree.md +++ b/docs/ru/engines/table_engines/mergetree_family/versionedcollapsingmergetree.md @@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] [SETTINGS name=value, ...] ``` -Подробности про `CREATE TABLE` смотрите в [описании запроса](../../query_language/create.md). +Подробности про `CREATE TABLE` смотрите в [описании запроса](../../../engines/table_engines/mergetree_family/versionedcollapsingmergetree.md). **Параметры движка** diff --git a/docs/ru/operations/table_engines/buffer.md b/docs/ru/engines/table_engines/special/buffer.md similarity index 100% rename from docs/ru/operations/table_engines/buffer.md rename to docs/ru/engines/table_engines/special/buffer.md diff --git a/docs/ru/operations/table_engines/dictionary.md b/docs/ru/engines/table_engines/special/dictionary.md similarity index 94% rename from docs/ru/operations/table_engines/dictionary.md rename to docs/ru/engines/table_engines/special/dictionary.md index 9f764a65ffa..fac22b5d2f2 100644 --- a/docs/ru/operations/table_engines/dictionary.md +++ b/docs/ru/engines/table_engines/special/dictionary.md @@ -1,6 +1,6 @@ # Dictionary {#dictionary} -Движок `Dictionary` отображает данные [словаря](../../query_language/dicts/external_dicts.md) как таблицу ClickHouse. +Движок `Dictionary` отображает данные [словаря](../../../engines/table_engines/special/dictionary.md) как таблицу ClickHouse. Рассмотрим для примера словарь `products` со следующей конфигурацией: @@ -57,7 +57,7 @@ WHERE name = 'products' └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ ``` -В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../query_language/functions/ext_dict_functions.md#ext_dict_functions). +В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../engines/table_engines/special/dictionary.md#ext_dict_functions). Такое представление неудобно, когда нам необходимо получить данные в чистом виде, а также при выполнении операции `JOIN`. Для этих случаев можно использовать движок `Dictionary`, который отобразит данные словаря в таблицу. diff --git a/docs/ru/operations/table_engines/distributed.md b/docs/ru/engines/table_engines/special/distributed.md similarity index 92% rename from docs/ru/operations/table_engines/distributed.md rename to docs/ru/engines/table_engines/special/distributed.md index 73547e76692..9fc7b470f1d 100644 --- a/docs/ru/operations/table_engines/distributed.md +++ b/docs/ru/engines/table_engines/special/distributed.md @@ -61,12 +61,12 @@ logs - имя кластера в конфигурационном файле с В качестве параметров для каждого сервера указываются `host`, `port` и, не обязательно, `user`, `password`, `secure`, `compression`: - `host` - адрес удалённого сервера. Может быть указан домен, или IPv4 или IPv6 адрес. В случае указания домена, при старте сервера делается DNS запрос, и результат запоминается на всё время работы сервера. Если DNS запрос неуспешен, то сервер не запускается. Если вы изменяете DNS-запись, перезапустите сервер. - `port` - TCP-порт для межсерверного взаимодействия (в конфиге - tcp\_port, обычно 9000). Не перепутайте с http\_port. -- `user` - имя пользователя для соединения с удалённым сервером. по умолчанию - default. Этот пользователь должен иметь доступ для соединения с указанным сервером. Доступы настраиваются в файле users.xml, подробнее смотрите в разделе [Права доступа](../../operations/access_rights.md). +- `user` - имя пользователя для соединения с удалённым сервером. по умолчанию - default. Этот пользователь должен иметь доступ для соединения с указанным сервером. Доступы настраиваются в файле users.xml, подробнее смотрите в разделе [Права доступа](../../../operations/access_rights.md). - `password` - пароль для соединения с удалённым сервером, в открытом виде. по умолчанию - пустая строка. - `secure` - Использовать шифрованное соединение ssl, Обычно используется с портом `port` = 9440. Сервер должен слушать порт 9440 с корректными настройками сертификатов. - `compression` - Использовать сжатие данных. По умолчанию: true. -При указании реплик, для каждого из шардов, при чтении, будет выбрана одна из доступных реплик. Можно настроить алгоритм балансировки нагрузки (то есть, предпочтения, на какую из реплик идти) - см. настройку [load\_balancing](../settings/settings.md#settings-load_balancing). +При указании реплик, для каждого из шардов, при чтении, будет выбрана одна из доступных реплик. Можно настроить алгоритм балансировки нагрузки (то есть, предпочтения, на какую из реплик идти) - см. настройку [load\_balancing](../../../operations/settings/settings.md#settings-load_balancing). Если соединение с сервером не установлено, то будет произведена попытка соединения с небольшим таймаутом. Если соединиться не удалось, то будет выбрана следующая реплика, и так для всех реплик. Если попытка соединения для всех реплик не удалась, то будут снова произведены попытки соединения по кругу, и так несколько раз. Это работает в пользу отказоустойчивости, хотя и не обеспечивает полную отказоустойчивость: удалённый сервер может принять соединение, но не работать, или плохо работать. @@ -78,7 +78,7 @@ logs - имя кластера в конфигурационном файле с Движок Distributed позволяет работать с кластером, как с локальным сервером. При этом, кластер является неэластичным: вы должны прописать его конфигурацию в конфигурационный файл сервера (лучше всех серверов кластера). -Как видно, движок Distributed требует прописывания кластера в конфигурационный файл; кластера из конфигурационного файла обновляются налету, без перезапуска сервера. Если вам необходимо каждый раз отправлять запрос на неизвестный набор шардов и реплик, вы можете не создавать Distributed таблицу, а воспользоваться табличной функцией remote. Смотрите раздел [Табличные функции](../../query_language/table_functions/index.md). +Как видно, движок Distributed требует прописывания кластера в конфигурационный файл; кластера из конфигурационного файла обновляются налету, без перезапуска сервера. Если вам необходимо каждый раз отправлять запрос на неизвестный набор шардов и реплик, вы можете не создавать Distributed таблицу, а воспользоваться табличной функцией remote. Смотрите раздел [Табличные функции](../../../engines/table_engines/special/distributed.md). Есть два способа записывать данные на кластер: @@ -107,10 +107,10 @@ logs - имя кластера в конфигурационном файле с - используются запросы, требующие соединение данных (IN, JOIN) по определённому ключу - тогда если данные шардированы по этому ключу, то можно использовать локальные IN, JOIN вместо GLOBAL IN, GLOBAL JOIN, что кардинально более эффективно. - используется большое количество серверов (сотни и больше) и большое количество маленьких запросов (запросы отдельных клиентов - сайтов, рекламодателей, партнёров) - тогда, для того, чтобы маленькие запросы не затрагивали весь кластер, имеет смысл располагать данные одного клиента на одном шарде, или (вариант, который используется в Яндекс.Метрике) сделать двухуровневое шардирование: разбить весь кластер на «слои», где слой может состоять из нескольких шардов; данные для одного клиента располагаются на одном слое, но в один слой можно по мере необходимости добавлять шарды, в рамках которых данные распределены произвольным образом; создаются распределённые таблицы на каждый слой и одна общая распределённая таблица для глобальных запросов. -Запись данных осуществляется полностью асинхронно. При вставке в таблицу, блок данных сначала записывается в файловую систему. Затем, в фоновом режиме отправляются на удалённые серверы при первой возможности. Период отправки регулируется настройками [distributed\_directory\_monitor\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_sleep_time_ms) и [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../settings/settings.md#distributed_directory_monitor_max_sleep_time_ms). Движок таблиц `Distributed` отправляет каждый файл со вставленными данными отдельно, но можно включить пакетную отправку данных настройкой [distributed\_directory\_monitor\_batch\_inserts](../settings/settings.md#distributed_directory_monitor_batch_inserts). Эта настройка улучшает производительность кластера за счет более оптимального использования ресурсов сервера-отправителя и сети. Необходимо проверять, что данные отправлены успешно, для этого проверьте список файлов (данных, ожидающих отправки) в каталоге таблицы `/var/lib/clickhouse/data/database/table/`. +Запись данных осуществляется полностью асинхронно. При вставке в таблицу, блок данных сначала записывается в файловую систему. Затем, в фоновом режиме отправляются на удалённые серверы при первой возможности. Период отправки регулируется настройками [distributed\_directory\_monitor\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) и [distributed\_directory\_monitor\_max\_sleep\_time\_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms). Движок таблиц `Distributed` отправляет каждый файл со вставленными данными отдельно, но можно включить пакетную отправку данных настройкой [distributed\_directory\_monitor\_batch\_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts). Эта настройка улучшает производительность кластера за счет более оптимального использования ресурсов сервера-отправителя и сети. Необходимо проверять, что данные отправлены успешно, для этого проверьте список файлов (данных, ожидающих отправки) в каталоге таблицы `/var/lib/clickhouse/data/database/table/`. Если после INSERT-а в Distributed таблицу, сервер перестал существовать или был грубо перезапущен (например, в следствие аппаратного сбоя), то записанные данные могут быть потеряны. Если в директории таблицы обнаружен повреждённый кусок данных, то он переносится в поддиректорию broken и больше не используется. -При выставлении опции max\_parallel\_replicas выполнение запроса распараллеливается по всем репликам внутри одного шарда. Подробнее смотрите раздел [max\_parallel\_replicas](../settings/settings.md#settings-max_parallel_replicas). +При выставлении опции max\_parallel\_replicas выполнение запроса распараллеливается по всем репликам внутри одного шарда. Подробнее смотрите раздел [max\_parallel\_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/distributed/) diff --git a/docs/ru/operations/table_engines/external_data.md b/docs/ru/engines/table_engines/special/external_data.md similarity index 100% rename from docs/ru/operations/table_engines/external_data.md rename to docs/ru/engines/table_engines/special/external_data.md diff --git a/docs/ru/operations/table_engines/file.md b/docs/ru/engines/table_engines/special/file.md similarity index 81% rename from docs/ru/operations/table_engines/file.md rename to docs/ru/engines/table_engines/special/file.md index 2d248c22081..138c2e47b89 100644 --- a/docs/ru/operations/table_engines/file.md +++ b/docs/ru/engines/table_engines/special/file.md @@ -14,13 +14,13 @@ File(Format) ``` -`Format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT` и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../interfaces/formats.md#formats). +`Format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT` и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../../interfaces/formats.md#formats). -Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../server_settings/settings.md) в конфигурации сервера. +Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server_configuration_parameters/settings.md) в конфигурации сервера. При создании таблицы с помощью `File(Format)` сервер ClickHouse создает в хранилище каталог с именем таблицы, а после добавления в таблицу данных помещает туда файл `data.Format`. -Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../query_language/misc.md)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные. +Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../engines/table_engines/special/file.md)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные. !!! warning "Warning" Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем. @@ -58,7 +58,7 @@ SELECT * FROM file_engine_table ## Использование движка в clickhouse-local {#ispolzovanie-dvizhka-v-clickhouse-local} -В [clickhouse-local](../utils/clickhouse-local.md) движок в качестве параметра принимает не только формат, но и путь к файлу. В том числе можно указать стандартные потоки ввода/вывода цифровым или буквенным обозначением `0` или `stdin`, `1` или `stdout`. +В [clickhouse-local](../../../engines/table_engines/special/file.md) движок в качестве параметра принимает не только формат, но и путь к файлу. В том числе можно указать стандартные потоки ввода/вывода цифровым или буквенным обозначением `0` или `stdin`, `1` или `stdout`. **Пример:** diff --git a/docs/ru/engines/table_engines/special/generate.md b/docs/ru/engines/table_engines/special/generate.md new file mode 100644 index 00000000000..87004bfe5b1 --- /dev/null +++ b/docs/ru/engines/table_engines/special/generate.md @@ -0,0 +1,59 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +# GenerateRandom {#table_engines-generate} + +Механизм генерации случайных таблиц генерирует случайные данные для данной схемы таблиц. + +Примеры употребления: + +- Используйте в тесте для заполнения воспроизводимого большого стола. +- Генерируйте случайные входные данные для тестов размытия. + +## Использование в сервере ClickHouse {#usage-in-clickhouse-server} + +``` sql +ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) +``` + +То `max_array_length` и `max_string_length` параметры укажите максимальную длину всех +столбцы массива и строки соответственно в генерируемых данных. + +Генерация таблицы движок поддерживает только `SELECT` запросы. + +Он поддерживает все [Тип данных](../../../engines/table_engines/special/generate.md) это может быть сохранено в таблице за исключением `LowCardinality` и `AggregateFunction`. + +**Пример:** + +**1.** Настройка системы `generate_engine_table` стол: + +``` sql +CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) +``` + +**2.** Запрос данных: + +``` sql +SELECT * FROM generate_engine_table LIMIT 3 +``` + +``` text +┌─name─┬──────value─┐ +│ c4xJ │ 1412771199 │ +│ r │ 1791099446 │ +│ 7#$ │ 124312908 │ +└──────┴────────────┘ +``` + +## Детали внедрения {#details-of-implementation} + +- Не поддерживаемый: + - `ALTER` + - `SELECT ... SAMPLE` + - `INSERT` + - Индексы + - Копирование + +[Оригинальная статья](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/ru/engines/table_engines/special/index.md b/docs/ru/engines/table_engines/special/index.md new file mode 100644 index 00000000000..22cebf295c1 --- /dev/null +++ b/docs/ru/engines/table_engines/special/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Special +toc_priority: 31 +--- + diff --git a/docs/ru/operations/table_engines/join.md b/docs/ru/engines/table_engines/special/join.md similarity index 70% rename from docs/ru/operations/table_engines/join.md rename to docs/ru/engines/table_engines/special/join.md index a9c06d05ebf..0ca53f34acb 100644 --- a/docs/ru/operations/table_engines/join.md +++ b/docs/ru/engines/table_engines/special/join.md @@ -1,6 +1,6 @@ # Join {#join} -Подготовленная структура данных для использования в операциях [JOIN](../../query_language/select.md#select-join). +Подготовленная структура данных для использования в операциях [JOIN](../../../engines/table_engines/special/join.md#select-join). ## Создание таблицы {#creating-a-table} @@ -12,12 +12,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) ``` -Смотрите подробное описание запроса [CREATE TABLE](../../query_language/create.md#create-table-query). +Смотрите подробное описание запроса [CREATE TABLE](../../../engines/table_engines/special/join.md#create-table-query). **Параметры движка** -- `join_strictness` – [строгость JOIN](../../query_language/select.md#select-join-strictness). -- `join_type` – [тип JOIN](../../query_language/select.md#select-join-types). +- `join_strictness` – [строгость JOIN](../../../engines/table_engines/special/join.md#select-join-strictness). +- `join_type` – [тип JOIN](../../../engines/table_engines/special/join.md#select-join-types). - `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`. Вводите параметры `join_strictness` и `join_type` без кавычек, например, `Join(ANY, LEFT, col1)`. Они должны быть такими же как и в той операции `JOIN`, в которой таблица будет использоваться. Если параметры не совпадают, ClickHouse не генерирует исключение и может возвращать неверные данные. @@ -79,21 +79,21 @@ SELECT joinGet('id_val_join', 'val', toUInt32(1)) Из таблиц нельзя выбрать данные с помощью запроса `SELECT`. Вместо этого, используйте один из следующих методов: - Используйте таблицу как правую в секции `JOIN`. -- Используйте функцию [joinGet](../../query_language/functions/other_functions.md#joinget), которая позволяет извлекать данные из таблицы таким же образом как из словаря. +- Используйте функцию [joinGet](../../../engines/table_engines/special/join.md#joinget), которая позволяет извлекать данные из таблицы таким же образом как из словаря. ### Ограничения и настройки {#join-limitations-and-settings} При создании таблицы, применяются следующие параметры : -- [join\_use\_nulls](../settings/settings.md#join_use_nulls) -- [max\_rows\_in\_join](../settings/query_complexity.md#settings-max_rows_in_join) -- [max\_bytes\_in\_join](../settings/query_complexity.md#settings-max_bytes_in_join) -- [join\_overflow\_mode](../settings/query_complexity.md#settings-join_overflow_mode) -- [join\_any\_take\_last\_row](../settings/settings.md#settings-join_any_take_last_row) +- [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) +- [max\_rows\_in\_join](../../../operations/settings/query_complexity.md#settings-max_rows_in_join) +- [max\_bytes\_in\_join](../../../operations/settings/query_complexity.md#settings-max_bytes_in_join) +- [join\_overflow\_mode](../../../operations/settings/query_complexity.md#settings-join_overflow_mode) +- [join\_any\_take\_last\_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) Таблицы с движком `Join` нельзя использовать в операциях `GLOBAL JOIN`. -Движок `Join` позволяет использовать параметр [join\_use\_nulls](../settings/settings.md#join_use_nulls) в запросе `CREATE TABLE`, который также можно использовать в запросе [SELECT](../../query_language/select.md). Если у вас разные настройки `join_use_nulls`, вы можете получить сообщение об ошибке при объединении таблиц. Это зависит от типа соединения. Когда вы используете функцию [joinGet](../../query_language/functions/other_functions.md#joinget), вам необходимо использовать один и тот же параметр `join_use_nulls` в запросах `CRATE TABLE` и `SELECT`. +Движок `Join` позволяет использовать параметр [join\_use\_nulls](../../../operations/settings/settings.md#join_use_nulls) в запросе `CREATE TABLE`, который также можно использовать в запросе [SELECT](../../../engines/table_engines/special/join.md). Если у вас разные настройки `join_use_nulls`, вы можете получить сообщение об ошибке при объединении таблиц. Это зависит от типа соединения. Когда вы используете функцию [joinGet](../../../engines/table_engines/special/join.md#joinget), вам необходимо использовать один и тот же параметр `join_use_nulls` в запросах `CRATE TABLE` и `SELECT`. ## Хранение данных {#khranenie-dannykh} diff --git a/docs/ru/engines/table_engines/special/materializedview.md b/docs/ru/engines/table_engines/special/materializedview.md new file mode 100644 index 00000000000..2adcdb8df70 --- /dev/null +++ b/docs/ru/engines/table_engines/special/materializedview.md @@ -0,0 +1,5 @@ +# MaterializedView {#materializedview} + +Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../engines/table_engines/special/materializedview.md)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. + +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) diff --git a/docs/ru/operations/table_engines/memory.md b/docs/ru/engines/table_engines/special/memory.md similarity index 100% rename from docs/ru/operations/table_engines/memory.md rename to docs/ru/engines/table_engines/special/memory.md diff --git a/docs/ru/operations/table_engines/merge.md b/docs/ru/engines/table_engines/special/merge.md similarity index 98% rename from docs/ru/operations/table_engines/merge.md rename to docs/ru/engines/table_engines/special/merge.md index 4f11cf77352..65dd8dc7a2c 100644 --- a/docs/ru/operations/table_engines/merge.md +++ b/docs/ru/engines/table_engines/special/merge.md @@ -52,7 +52,7 @@ FROM WatchLog ## Виртуальные столбцы {#virtualnye-stolbtsy} -- `_table` — содержит имя таблицы, из которой данные были прочитаны. Тип — [String](../../data_types/string.md). +- `_table` — содержит имя таблицы, из которой данные были прочитаны. Тип — [String](../../../engines/table_engines/special/merge.md). В секции `WHERE/PREWHERE` можно установить константное условие на столбец `_table` (например, `WHERE _table='xyz'`). В этом случае операции чтения выполняются только для тех таблиц, для которых выполняется условие на значение `_table`, таким образом, столбец `_table` работает как индекс. diff --git a/docs/ru/operations/table_engines/null.md b/docs/ru/engines/table_engines/special/null.md similarity index 100% rename from docs/ru/operations/table_engines/null.md rename to docs/ru/engines/table_engines/special/null.md diff --git a/docs/ru/operations/table_engines/set.md b/docs/ru/engines/table_engines/special/set.md similarity index 100% rename from docs/ru/operations/table_engines/set.md rename to docs/ru/engines/table_engines/special/set.md diff --git a/docs/ru/operations/table_engines/url.md b/docs/ru/engines/table_engines/special/url.md similarity index 95% rename from docs/ru/operations/table_engines/url.md rename to docs/ru/engines/table_engines/special/url.md index 6f9d22e887d..5f4696286d7 100644 --- a/docs/ru/operations/table_engines/url.md +++ b/docs/ru/engines/table_engines/special/url.md @@ -7,7 +7,7 @@ `Format` должен быть таким, который ClickHouse может использовать в запросах `SELECT` и, если есть необходимость, `INSERT`. Полный список поддерживаемых форматов смотрите в -разделе [Форматы](../../interfaces/formats.md#formats). +разделе [Форматы](../../../interfaces/formats.md#formats). `URL` должен соответствовать структуре Uniform Resource Locator. По указанному URL должен находится сервер работающий по протоколу HTTP или HTTPS. При этом не должно требоваться никаких @@ -17,7 +17,7 @@ соответственно. Для обработки `POST`-запросов удаленный сервер должен поддерживать [Chunked transfer encoding](https://ru.wikipedia.org/wiki/Chunked_transfer_encoding). -Максимальное количество переходов по редиректам при выполнении HTTP-запроса методом GET можно ограничить с помощью настройки [max\_http\_get\_redirects](../settings/settings.md#setting-max_http_get_redirects). +Максимальное количество переходов по редиректам при выполнении HTTP-запроса методом GET можно ограничить с помощью настройки [max\_http\_get\_redirects](../../../operations/settings/settings.md#setting-max_http_get_redirects). **Пример:** diff --git a/docs/ru/operations/table_engines/view.md b/docs/ru/engines/table_engines/special/view.md similarity index 100% rename from docs/ru/operations/table_engines/view.md rename to docs/ru/engines/table_engines/special/view.md diff --git a/docs/ru/faq/general.md b/docs/ru/faq/general.md index e5a5e0c00fa..5bfe8ea8f2d 100644 --- a/docs/ru/faq/general.md +++ b/docs/ru/faq/general.md @@ -25,7 +25,7 @@ NLS_LANG=RUSSIAN_RUSSIA.UTF8 ### Секция INTO OUTFILE {#sektsiia-into-outfile} -Добавьте секцию [INTO OUTFILE](../query_language/select/#into-outfile-clause) к своему запросу. +Добавьте секцию [INTO OUTFILE](../sql_reference/statements/select.md#into-outfile-clause) к своему запросу. Например: @@ -33,7 +33,7 @@ NLS_LANG=RUSSIAN_RUSSIA.UTF8 SELECT * FROM table INTO OUTFILE 'file' ``` -По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../interfaces/formats.md), используйте [секцию FORMAT](../query_language/select/#format-clause). +По умолчанию, для выдачи данных ClickHouse использует формат [TabSeparated](../interfaces/formats.md#tabseparated). Чтобы выбрать [формат данных](../interfaces/formats.md), используйте [секцию FORMAT](../sql_reference/statements/select.md#format-clause). Например: @@ -43,7 +43,7 @@ SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV ### Таблица с движком File {#tablitsa-s-dvizhkom-file} -Смотрите [File](../operations/table_engines/file.md). +Смотрите [File](../engines/table_engines/special/file.md). ### Перенаправление в командой строке {#perenapravlenie-v-komandoi-stroke} diff --git a/docs/ru/faq/index.md b/docs/ru/faq/index.md new file mode 100644 index 00000000000..2ee9d51e83b --- /dev/null +++ b/docs/ru/faq/index.md @@ -0,0 +1,6 @@ +--- +toc_folder_title: F.A.Q. +toc_priority: 76 +--- + + diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md index 4f23dbe756d..4a31f4b23a2 100644 --- a/docs/ru/getting_started/tutorial.md +++ b/docs/ru/getting_started/tutorial.md @@ -1,18 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# ClickHouse Tutorial {#clickhouse-tutorial} +# Учебник По Клик-Хаусу {#clickhouse-tutorial} -## What to Expect from This Tutorial? {#what-to-expect-from-this-tutorial} +## Чего ожидать от этого урока? {#what-to-expect-from-this-tutorial} -By going through this tutorial, you’ll learn how to set up a simple ClickHouse cluster. It’ll be small, but fault-tolerant and scalable. Then we will use one of the example datasets to fill it with data and execute some demo queries. +Пройдя через этот учебник,вы узнаете, как настроить простой кластер ClickHouse. Он будет небольшим, но отказоустойчивым и масштабируемым. Затем мы будем использовать один из примеров наборов данных, чтобы заполнить его данными и выполнить некоторые демонстрационные запросы. -## Single Node Setup {#single-node-setup} +## Настройка Одного Узла {#single-node-setup} -To postpone the complexities of a distributed environment, we’ll start with deploying ClickHouse on a single server or virtual machine. ClickHouse is usually installed from [deb](index.md#install-from-deb-packages) or [rpm](index.md#from-rpm-packages) packages, but there are [alternatives](index.md#from-docker-image) for the operating systems that do no support them. +Чтобы избежать сложностей распределенной среды, мы начнем с развертывания ClickHouse на одном сервере или виртуальной машине. ClickHouse обычно устанавливается из [дебютантка](install.md#install-from-deb-packages) или [оборотов в минуту](install.md#from-rpm-packages) пакеты, но есть и такие [альтернативы](install.md#from-docker-image) для операционных систем, которые их не поддерживают. -For example, you have chosen `deb` packages and executed: +Например, вы выбрали `deb` пакеты и выполненные работы: ``` bash sudo apt-get install dirmngr @@ -24,48 +25,48 @@ sudo apt-get update sudo apt-get install -y clickhouse-server clickhouse-client ``` -What do we have in the packages that got installed: +Что у нас есть в пакетах, которые были установлены: -- `clickhouse-client` package contains [clickhouse-client](../interfaces/cli.md) application, interactive ClickHouse console client. -- `clickhouse-common` package contains a ClickHouse executable file. -- `clickhouse-server` package contains configuration files to run ClickHouse as a server. +- `clickhouse-client` пакет содержит [clickhouse-клиент](../interfaces/cli.md) приложение, интерактивный консольный клиент ClickHouse. +- `clickhouse-common` пакет содержит исполняемый файл ClickHouse. +- `clickhouse-server` пакет содержит файлы конфигурации для запуска ClickHouse в качестве сервера. -Server config files are located in `/etc/clickhouse-server/`. Before going further, please notice the `` element in `config.xml`. Path determines the location for data storage, so it should be located on volume with large disk capacity; the default value is `/var/lib/clickhouse/`. If you want to adjust the configuration, it’s not handy to directly edit `config.xml` file, considering it might get rewritten on future package updates. The recommended way to override the config elements is to create [files in config.d directory](../operations/configuration_files.md) which serve as “patches” to config.xml. +Файлы конфигурации сервера находятся в `/etc/clickhouse-server/`. Прежде чем идти дальше, пожалуйста, обратите внимание на `` элемент в `config.xml`. Путь определяет место для хранения данных, поэтому он должен быть расположен на Томе с большой емкостью диска; значение по умолчанию равно `/var/lib/clickhouse/`. Если вы хотите настроить конфигурацию, то это не удобно для непосредственного редактирования `config.xml` файл, учитывая, что он может быть переписан при будущих обновлениях пакета. Рекомендуемый способ переопределения элементов конфигурации заключается в создании [файлы в конфигурации.D каталог](../operations/configuration_files.md) которые служат в качестве «patches» к конфигурации.XML. -As you might have noticed, `clickhouse-server` is not launched automatically after package installation. It won’t be automatically restarted after updates, either. The way you start the server depends on your init system, usually, it is: +Как вы могли заметить, `clickhouse-server` не запускается автоматически после установки пакета. Он также не будет автоматически перезапущен после обновления. То, как вы запускаете сервер, зависит от вашей системы init, как правило, это так: ``` bash sudo service clickhouse-server start ``` -or +или ``` bash sudo /etc/init.d/clickhouse-server start ``` -The default location for server logs is `/var/log/clickhouse-server/`. The server is ready to handle client connections once it logs the `Ready for connections` message. +По умолчанию для журналов сервера используется следующее расположение `/var/log/clickhouse-server/`. Сервер готов к обработке клиентских подключений, как только он регистрирует `Ready for connections` сообщение. -Once the `clickhouse-server` is up and running, we can use `clickhouse-client` to connect to the server and run some test queries like `SELECT "Hello, world!";`. +Как только это произойдет `clickhouse-server` все готово и работает, мы можем использовать `clickhouse-client` чтобы подключиться к серверу и выполнить некоторые тестовые запросы, такие как `SELECT "Hello, world!";`.
    -Quick tips for clickhouse-client -Interactive mode: +Быстрые советы для clickhouse-клиента +Интерактивный режим: ``` bash clickhouse-client clickhouse-client --host=... --port=... --user=... --password=... ``` -Enable multiline queries: +Включить многострочные запросы: ``` bash clickhouse-client -m clickhouse-client --multiline ``` -Run queries in batch-mode: +Запуск запросов в пакетном режиме: ``` bash clickhouse-client --query='SELECT 1' @@ -73,7 +74,7 @@ echo 'SELECT 1' | clickhouse-client clickhouse-client <<< 'SELECT 1' ``` -Insert data from a file in specified format: +Вставка данных из файла в заданном формате: ``` bash clickhouse-client --query='INSERT INTO table VALUES' < data.txt @@ -82,39 +83,39 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
    -## Import Sample Dataset {#import-sample-dataset} +## Импорт Образца Набора Данных {#import-sample-dataset} -Now it’s time to fill our ClickHouse server with some sample data. In this tutorial, we’ll use the anonymized data of Yandex.Metrica, the first service that runs ClickHouse in production way before it became open-source (more on that in [history section](../introduction/history.md)). There are [multiple ways to import Yandex.Metrica dataset](example_datasets/metrica.md), and for the sake of the tutorial, we’ll go with the most realistic one. +Теперь пришло время заполнить наш сервер ClickHouse некоторыми образцами данных. В этом уроке мы будем использовать анонимизированные данные Яндекса.Metrica, первый сервис, который запускает ClickHouse в производственном режиме до того, как он стал открытым исходным кодом (подробнее об этом в [раздел истории](../introduction/history.md)). Есть [несколько способов импорта Яндекса.Набор метрика ](example_datasets/metrica.md), и ради учебника мы пойдем с самым реалистичным из них. -### Download and Extract Table Data {#download-and-extract-table-data} +### Загрузка и извлечение данных таблицы {#download-and-extract-table-data} ``` bash curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv ``` -The extracted files are about 10GB in size. +Извлеченные файлы имеют размер около 10 ГБ. -### Create Tables {#create-tables} +### Создавать таблицы {#create-tables} -As in most databases management systems, ClickHouse logically groups tables into “databases”. There’s a `default` database, but we’ll create a new one named `tutorial`: +Как и в большинстве систем управления базами данных, ClickHouse логически группирует таблицы в «databases». Там есть еще один `default` база данных, но мы создадим новую с именем `tutorial`: ``` bash clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" ``` -Syntax for creating tables is way more complicated compared to databases (see [reference](../query_language/create.md). In general `CREATE TABLE` statement has to specify three key things: +Синтаксис для создания таблиц намного сложнее по сравнению с базами данных (см. [ссылка](../sql_reference/statements/create.md). В общем `CREATE TABLE` в заявлении должны быть указаны три ключевых момента: -1. Name of table to create. -2. Table schema, i.e. list of columns and their [data types](../data_types/index.md). -3. [Table engine](../operations/table_engines/index.md) and it’s settings, which determines all the details on how queries to this table will be physically executed. +1. Имя таблицы для создания. +2. Table schema, i.e. list of columns and their [тип данных](../sql_reference/data_types/index.md). +3. [Настольный двигатель](../engines/table_engines/index.md) и это настройки, которые определяют все детали того, как запросы к этой таблице будут физически выполняться. -Yandex.Metrica is a web analytics service, and sample dataset doesn’t cover its full functionality, so there are only two tables to create: +Яндекс.Metrica - это сервис веб-аналитики, и пример набора данных не охватывает его полную функциональность, поэтому для создания необходимо создать только две таблицы: -- `hits` is a table with each action done by all users on all websites covered by the service. -- `visits` is a table that contains pre-built sessions instead of individual actions. +- `hits` это таблица с каждым действием, выполняемым всеми пользователями на всех веб-сайтах, охватываемых сервисом. +- `visits` это таблица, которая содержит предварительно построенные сеансы вместо отдельных действий. -Let’s see and execute the real create table queries for these tables: +Давайте посмотрим и выполним реальные запросы create table для этих таблиц: ``` sql CREATE TABLE tutorial.hits_v1 @@ -457,22 +458,22 @@ SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192 ``` -You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want. +Вы можете выполнить эти запросы с помощью интерактивного режима `clickhouse-client` (просто запустите его в терминале, не указывая заранее запрос) или попробуйте некоторые [альтернативный интерфейс](../interfaces/index.md) если ты хочешь. -As we can see, `hits_v1` uses the [basic MergeTree engine](../operations/table_engines/mergetree.md), while the `visits_v1` uses the [Collapsing](../operations/table_engines/collapsingmergetree.md) variant. +Как мы видим, `hits_v1` использует [базовый движок MergeTree](../engines/table_engines/mergetree_family/mergetree.md), в то время как `visits_v1` использует [Разрушение](../engines/table_engines/mergetree_family/collapsingmergetree.md) вариант. -### Import Data {#import-data} +### Импортировать данные {#import-data} -Data import to ClickHouse is done via [INSERT INTO](../query_language/insert_into.md) query like in many other SQL databases. However, data is usually provided in one of the [supported serialization formats](../interfaces/formats.md) instead of `VALUES` clause (which is also supported). +Импорт данных в ClickHouse осуществляется через [INSERT INTO](../sql_reference/statements/insert_into.md) запрос, как и во многих других базах данных SQL. Однако данные обычно приводятся в одном из следующих документов: [поддерживаемые форматы сериализации](../interfaces/formats.md) вместо `VALUES` предложение (которое также поддерживается). -The files we downloaded earlier are in tab-separated format, so here’s how to import them via console client: +Файлы, которые мы загрузили ранее, находятся в формате с разделенными вкладками, поэтому вот как импортировать их через консольный клиент: ``` bash clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv ``` -ClickHouse has a lot of [settings to tune](../operations/settings/index.md) and one way to specify them in console client is via arguments, as we can see with `--max_insert_block_size`. The easiest way to figure out what settings are available, what do they mean and what the defaults are is to query the `system.settings` table: +У ClickHouse их очень много [настройки для настройки](../operations/settings/index.md) и один из способов указать их в консольном клиенте - это через аргументы, как мы видим с помощью `--max_insert_block_size`. Самый простой способ выяснить, какие настройки доступны, что они означают и каковы значения по умолчанию, - это запросить `system.settings` стол: ``` sql SELECT name, value, changed, description @@ -483,23 +484,23 @@ FORMAT TSV max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." ``` -Optionally you can [OPTIMIZE](../query_language/misc/#misc_operations-optimize) the tables after import. Tables that are configured with an engine from MergeTree-family always do merges of data parts in the background to optimize data storage (or at least check if it makes sense). These queries force the table engine to do storage optimization right now instead of some time later: +По желанию вы можете [OPTIMIZE](../sql_reference/misc/#misc_operations-optimize) таблицы после импорта. Таблицы, настроенные с помощью движка из семейства MergeTree, всегда выполняют слияние частей данных в фоновом режиме для оптимизации хранения данных (или, по крайней мере, проверяют, имеет ли это смысл). Эти запросы заставляют механизм таблиц выполнять оптимизацию хранилища прямо сейчас, а не некоторое время спустя: ``` bash clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" ``` -These queries start an I/O and CPU intensive operation, so if the table consistently receives new data, it’s better to leave it alone and let merges run in the background. +Эти запросы запускают интенсивную работу ввода-вывода и процессора, поэтому, если таблица постоянно получает новые данные, лучше оставить ее в покое и позволить слияниям работать в фоновом режиме. -Now we can check if the table import was successful: +Теперь мы можем проверить, был ли импорт таблицы успешным: ``` bash clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" ``` -## Example Queries {#example-queries} +## Пример запроса {#example-queries} ``` sql SELECT @@ -521,18 +522,18 @@ FROM tutorial.visits_v1 WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') ``` -## Cluster Deployment {#cluster-deployment} +## Развертывание Кластера {#cluster-deployment} -ClickHouse cluster is a homogenous cluster. Steps to set up: +Кластер ClickHouse-это однородный кластер. Шаги для настройки: -1. Install ClickHouse server on all machines of the cluster -2. Set up cluster configs in configuration files -3. Create local tables on each instance -4. Create a [Distributed table](../operations/table_engines/distributed.md) +1. Установите сервер ClickHouse на всех компьютерах кластера +2. Настройка конфигураций кластера в файлах конфигурации +3. Создание локальных таблиц на каждом экземпляре +4. Создать [Распространены таблицы](../engines/table_engines/special/distributed.md) -[Distributed table](../operations/table_engines/distributed.md) is actually a kind of “view” to local tables of ClickHouse cluster. SELECT query from a distributed table executes using resources of all cluster’s shards. You may specify configs for multiple clusters and create multiple distributed tables providing views to different clusters. +[Распространены таблицы](../engines/table_engines/special/distributed.md) это на самом деле своего рода «view» к локальным таблицам кластера ClickHouse. Запрос SELECT из распределенной таблицы выполняется с использованием ресурсов всех сегментов кластера. Вы можете указать конфигурации для нескольких кластеров и создать несколько распределенных таблиц, предоставляющих представления для разных кластеров. -Example config for a cluster with three shards, one replica each: +Пример конфигурации для кластера с тремя сегментами, по одной реплике в каждом: ``` xml @@ -559,37 +560,37 @@ Example config for a cluster with three shards, one replica each: ``` -For further demonstration, let’s create a new local table with the same `CREATE TABLE` query that we used for `hits_v1`, but different table name: +Для дальнейшей демонстрации давайте создадим новую локальную таблицу с тем же именем `CREATE TABLE` запрос, который мы использовали для `hits_v1`, но другое имя таблицы: ``` sql CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... ``` -Creating a distributed table providing a view into local tables of the cluster: +Создание распределенной таблицы, предоставляющей представление в локальные таблицы кластера: ``` sql CREATE TABLE tutorial.hits_all AS tutorial.hits_local ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); ``` -A common practice is to create similar Distributed tables on all machines of the cluster. It allows running distributed queries on any machine of the cluster. Also there’s an alternative option to create temporary distributed table for a given SELECT query using [remote](../query_language/table_functions/remote.md) table function. +Распространенной практикой является создание одинаковых распределенных таблиц на всех машинах кластера. Он позволяет выполнять распределенные запросы на любой машине кластера. Кроме того, существует альтернативный вариант создания временной распределенной таблицы для данного запроса SELECT с помощью [удаленный](../sql_reference/table_functions/remote.md) табличная функция. -Let’s run [INSERT SELECT](../query_language/insert_into.md) into the Distributed table to spread the table to multiple servers. +Давай убежим [INSERT SELECT](../sql_reference/statements/insert_into.md) в распределенную таблицу, чтобы распространить таблицу на несколько серверов. ``` sql INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; ``` -!!! warning "Notice" - This approach is not suitable for the sharding of large tables. There’s a separate tool [clickhouse-copier](../operations/utils/clickhouse-copier.md) that can re-shard arbitrary large tables. +!!! warning "Уведомление" + Такой подход не подходит для сегментации больших столов. Есть отдельный инструмент [clickhouse-копировальный аппарат](../operations/utilities/clickhouse-copier.md) это может повторно осколить произвольные большие таблицы. -As you could expect, computationally heavy queries run N times faster if they utilize 3 servers instead of one. +Как и следовало ожидать, вычислительно тяжелые запросы выполняются в N раз быстрее, если они используют 3 сервера вместо одного. -In this case, we have used a cluster with 3 shards, and each contains a single replica. +В этом случае мы использовали кластер с 3 осколками, и каждый из них содержит одну реплику. -To provide resilience in a production environment, we recommend that each shard should contain 2-3 replicas spread between multiple availability zones or datacenters (or at least racks). Note that ClickHouse supports an unlimited number of replicas. +Для обеспечения устойчивости в рабочей среде рекомендуется, чтобы каждый сегмент содержал 2-3 реплики, распределенные между несколькими зонами доступности или центрами обработки данных (или, по крайней мере, стойками). Обратите внимание, что ClickHouse поддерживает неограниченное количество реплик. -Example config for a cluster of one shard containing three replicas: +Пример конфигурации для кластера из одного осколка, содержащего три реплики: ``` xml @@ -613,12 +614,12 @@ Example config for a cluster of one shard containing three replicas: ``` -To enable native replication [ZooKeeper](http://zookeeper.apache.org/) is required. ClickHouse takes care of data consistency on all replicas and runs restore procedure after failure automatically. It’s recommended to deploy the ZooKeeper cluster on separate servers (where no other processes including ClickHouse are running). +Чтобы включить собственную репликацию [Смотритель зоопарка](http://zookeeper.apache.org/) требуемый. ClickHouse заботится о согласованности данных во всех репликах и автоматически запускает процедуру восстановления после сбоя. Рекомендуется развернуть кластер ZooKeeper на отдельных серверах (где не выполняются никакие другие процессы, включая ClickHouse). -!!! note "Note" - ZooKeeper is not a strict requirement: in some simple cases, you can duplicate the data by writing it into all the replicas from your application code. This approach is **not** recommended, in this case, ClickHouse won’t be able to guarantee data consistency on all replicas. Thus it becomes the responsibility of your application. +!!! note "Примечание" + ZooKeeper не является строгим требованием: в некоторых простых случаях вы можете дублировать данные, записав их во все реплики из кода вашего приложения. Такой подход является **нет** рекомендуется, чтобы в этом случае ClickHouse не мог гарантировать согласованность данных на всех репликах. Таким образом, это становится ответственностью вашего приложения. -ZooKeeper locations are specified in the configuration file: +Расположение ZooKeeper указано в конфигурационном файле: ``` xml @@ -637,7 +638,7 @@ ZooKeeper locations are specified in the configuration file: ``` -Also, we need to set macros for identifying each shard and replica which are used on table creation: +Кроме того, нам нужно установить макросы для идентификации каждого осколка и реплики, которые используются при создании таблицы: ``` xml @@ -646,7 +647,7 @@ Also, we need to set macros for identifying each shard and replica which are use ``` -If there are no replicas at the moment on replicated table creation, a new first replica is instantiated. If there are already live replicas, the new replica clones data from existing ones. You have an option to create all replicated tables first, and then insert data to it. Another option is to create some replicas and add the others after or during data insertion. +Если в данный момент при создании реплицированной таблицы реплик нет, то создается новая первая реплика. Если уже существуют живые реплики, то новая реплика клонирует данные из существующих. У вас есть возможность сначала создать все реплицированные таблицы, а затем вставить в них данные. Другой вариант-создать некоторые реплики и добавить другие после или во время вставки данных. ``` sql CREATE TABLE tutorial.hits_replica (...) @@ -657,12 +658,12 @@ ENGINE = ReplcatedMergeTree( ... ``` -Here we use [ReplicatedMergeTree](../operations/table_engines/replication.md) table engine. In parameters we specify ZooKeeper path containing shard and replica identifiers. +Здесь мы используем [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) настольный двигатель. В параметрах мы указываем путь ZooKeeper, содержащий идентификаторы сегментов и реплик. ``` sql INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; ``` -Replication operates in multi-master mode. Data can be loaded into any replica, and the system then syncs it with other instances automatically. Replication is asynchronous so at a given moment, not all replicas may contain recently inserted data. At least one replica should be up to allow data ingestion. Others will sync up data and repair consistency once they will become active again. Note that this approach allows for the low possibility of a loss of recently inserted data. +Репликация работает в режиме мульти-мастер. Данные могут быть загружены в любую реплику, а затем система автоматически синхронизирует их с другими экземплярами. Репликация является асинхронной, поэтому в данный момент не все реплики могут содержать недавно вставленные данные. По крайней мере, одна реплика должна быть готова, чтобы обеспечить прием данных. Другие будут синхронизировать данные и восстанавливать согласованность, как только они снова станут активными. Обратите внимание, что этот подход допускает низкую вероятность потери недавно вставленных данных. -[Original article](https://clickhouse.tech/docs/en/getting_started/tutorial/) +[Оригинальная статья](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/ru/guides/apply_catboost_model.md b/docs/ru/guides/apply_catboost_model.md index f6c9799a171..2f94753b7f2 100644 --- a/docs/ru/guides/apply_catboost_model.md +++ b/docs/ru/guides/apply_catboost_model.md @@ -178,7 +178,7 @@ LIMIT 10 ``` !!! note "Примечание" - Функция [modelEvaluate](../query_language/functions/other_functions.md#function-modelevaluate) возвращает кортежи (tuple) с исходными прогнозами по классам для моделей с несколькими классами. + Функция [modelEvaluate](../sql_reference/functions/other_functions.md#function-modelevaluate) возвращает кортежи (tuple) с исходными прогнозами по классам для моделей с несколькими классами. Спрогнозируйте вероятность: @@ -201,7 +201,7 @@ LIMIT 10 ``` !!! note "Примечание" - Подробнее про функцию [exp()](../query_language/functions/math_functions.md). + Подробнее про функцию [exp()](../sql_reference/functions/math_functions.md). Посчитайте логистическую функцию потерь (LogLoss) на всей выборке: @@ -227,4 +227,4 @@ FROM ``` !!! note "Примечание" - Подробнее про функции [avg()](../query_language/agg_functions/reference.md#agg_function-avg), [log()](../query_language/functions/math_functions.md). + Подробнее про функции [avg()](../sql_reference/aggregate_functions/reference.md#agg_function-avg), [log()](../sql_reference/functions/math_functions.md). diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 749e93c34ad..b76e96cc1dc 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -88,7 +88,7 @@ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {p ``` - `name` — идентификатор подстановки. В консольном клиенте его следует использовать как часть имени параметра `--param_ = value`. -- `data type` — [тип данных](../data_types/index.md) значения. Например, структура данных `(integer, ('string', integer))` может иметь тип данных `Tuple(UInt8, Tuple(String, UInt8))` ([целочисленный](../data_types/int_uint.md) тип может быть и другим). +- `data type` — [тип данных](../sql_reference/data_types/index.md) значения. Например, структура данных `(integer, ('string', integer))` может иметь тип данных `Tuple(UInt8, Tuple(String, UInt8))` ([целочисленный](../sql_reference/data_types/int_uint.md) тип может быть и другим). #### Пример {#primer} diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 8d5a38a3a63..27cab90bdd4 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -99,9 +99,9 @@ world Массивы форматируются в виде списка значений через запятую в квадратных скобках. Элементы массива - числа форматируются как обычно, а даты, даты-с-временем и строки - в одинарных кавычках с такими же правилами экранирования, как указано выше. -[NULL](../query_language/syntax.md) форматируется как `\N`. +[NULL](../sql_reference/syntax.md) форматируется как `\N`. -Каждый элемент структуры типа [Nested](../data_types/nested_data_structures/nested.md) представляется как отдельный массив. +Каждый элемент структуры типа [Nested](../sql_reference/data_types/nested_data_structures/nested.md) представляется как отдельный массив. Например: @@ -302,7 +302,7 @@ SearchPhrase=дизайн штор count()=1064 SearchPhrase=баку count()=1000 ``` -[NULL](../query_language/syntax.md) форматируется как `\N`. +[NULL](../sql_reference/syntax.md) форматируется как `\N`. ``` sql SELECT * FROM t_null FORMAT TSKV @@ -432,7 +432,7 @@ JSON совместим с JavaScript. Для этого, дополнитель Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). -ClickHouse поддерживает [NULL](../query_language/syntax.md), который при выводе JSON будет отображен как `null`. +ClickHouse поддерживает [NULL](../sql_reference/syntax.md), который при выводе JSON будет отображен как `null`. Смотрите также формат [JSONEachRow](#jsoneachrow) . @@ -507,7 +507,7 @@ ClickHouse игнорирует пробелы между элементами **Обработка пропущенных значений** -ClickHouse заменяет опущенные значения значениями по умолчанию для соответствующих [data types](../data_types/index.md). +ClickHouse заменяет опущенные значения значениями по умолчанию для соответствующих [data types](../sql_reference/data_types/index.md). Если указано `DEFAULT expr`, то ClickHouse использует различные правила подстановки в зависимости от настройки [input\_format\_defaults\_for\_omitted\_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields). @@ -552,7 +552,7 @@ CREATE TABLE IF NOT EXISTS example_table ### Использование вложенных структур {#jsoneachrow-nested} -Если у вас есть таблица со столбцами типа [Nested](../data_types/nested_data_structures/nested.md), то в неё можно вставить данные из JSON-документа с такой же структурой. Функциональность включается настройкой [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json). +Если у вас есть таблица со столбцами типа [Nested](../sql_reference/data_types/nested_data_structures/nested.md), то в неё можно вставить данные из JSON-документа с такой же структурой. Функциональность включается настройкой [input\_format\_import\_nested\_json](../operations/settings/settings.md#settings-input_format_import_nested_json). Например, рассмотрим следующую таблицу: @@ -626,7 +626,7 @@ SELECT * FROM json_each_row_nested Рисуется полная сетка таблицы и, таким образом, каждая строчка занимает две строки в терминале. Каждый блок результата выводится в виде отдельной таблицы. Это нужно, чтобы можно было выводить блоки без буферизации результата (буферизация потребовалась бы, чтобы заранее вычислить видимую ширину всех значений.) -[NULL](../query_language/syntax.md) выводится как `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) выводится как `ᴺᵁᴸᴸ`. ``` sql SELECT * FROM t_null @@ -728,7 +728,7 @@ FixedString представлены просто как последовате Array представлены как длина в формате varint (unsigned [LEB128](https://en.wikipedia.org/wiki/LEB128)), а затем элементы массива, подряд. -Для поддержки [NULL](../query_language/syntax.md#null-literal) перед каждым значением типа [Nullable](../data_types/nullable.md) следует байт содержащий 1 или 0. Если байт 1, то значение равно NULL, и этот байт интерпретируется как отдельное значение (т.е. после него следует значение следующего поля). Если байт 0, то после байта следует значение поля (не равно NULL). +Для поддержки [NULL](../sql_reference/syntax.md#null-literal) перед каждым значением типа [Nullable](../sql_reference/data_types/nullable.md) следует байт содержащий 1 или 0. Если байт 1, то значение равно NULL, и этот байт интерпретируется как отдельное значение (т.е. после него следует значение следующего поля). Если байт 0, то после байта следует значение поля (не равно NULL). ## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes} @@ -740,7 +740,7 @@ Array представлены как длина в формате varint (unsig ## Values {#data-format-values} -Выводит каждую строку в скобках. Строки разделены запятыми. После последней строки запятой нет. Значения внутри скобок также разделены запятыми. Числа выводятся в десятичном виде без кавычек. Массивы выводятся в квадратных скобках. Строки, даты, даты-с-временем выводятся в кавычках. Правила экранирования и особенности парсинга аналогичны формату [TabSeparated](#tabseparated). При форматировании, лишние пробелы не ставятся, а при парсинге - допустимы и пропускаются (за исключением пробелов внутри значений типа массив, которые недопустимы). [NULL](../query_language/syntax.md) представляется как `NULL`. +Выводит каждую строку в скобках. Строки разделены запятыми. После последней строки запятой нет. Значения внутри скобок также разделены запятыми. Числа выводятся в десятичном виде без кавычек. Массивы выводятся в квадратных скобках. Строки, даты, даты-с-временем выводятся в кавычках. Правила экранирования и особенности парсинга аналогичны формату [TabSeparated](#tabseparated). При форматировании, лишние пробелы не ставятся, а при парсинге - допустимы и пропускаются (за исключением пробелов внутри значений типа массив, которые недопустимы). [NULL](../sql_reference/syntax.md) представляется как `NULL`. Минимальный набор символов, которых вам необходимо экранировать при передаче в Values формате: одинарная кавычка и обратный слеш. @@ -750,7 +750,7 @@ Array представлены как длина в формате varint (unsig Выводит каждое значение на отдельной строке, с указанием имени столбца. Формат удобно использовать для вывода одной-нескольких строк, если каждая строка состоит из большого количества столбцов. -[NULL](../query_language/syntax.md) выводится как `ᴺᵁᴸᴸ`. +[NULL](../sql_reference/syntax.md) выводится как `ᴺᵁᴸᴸ`. Пример: @@ -928,7 +928,7 @@ message MessageType { ``` ClickHouse попытается найти столбец с именем `x.y.z` (или `x_y_z`, или `X.y_Z` и т.п.). -Вложенные сообщения удобно использовать в качестве соответствия для [вложенной структуры данных](../data_types/nested_data_structures/nested.md). +Вложенные сообщения удобно использовать в качестве соответствия для [вложенной структуры данных](../sql_reference/data_types/nested_data_structures/nested.md). Значения по умолчанию, определённые в схеме `proto2`, например, @@ -940,7 +940,7 @@ message MessageType { } ``` -не применяются; вместо них используются определенные в таблице [значения по умолчанию](../query_language/create.md#create-default-values). +не применяются; вместо них используются определенные в таблице [значения по умолчанию](../sql_reference/statements/create.md#create-default-values). ClickHouse пишет и читает сообщения `Protocol Buffers` в формате `length-delimited`. Это означает, что перед каждым сообщением пишется его длина в формате [varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). См. также [как читать и записывать сообщения Protocol Buffers в формате length-delimited в различных языках программирования](https://cwiki.apache.org/confluence/display/GEODE/Delimiting+Protobuf+Messages). @@ -951,25 +951,25 @@ ClickHouse пишет и читает сообщения `Protocol Buffers` в ### Соответствие типов данных {#sootvetstvie-tipov-dannykh} -Таблица ниже содержит поддерживаемые типы данных и их соответствие [типам данных](../data_types/index.md) ClickHouse для запросов `INSERT` и `SELECT`. +Таблица ниже содержит поддерживаемые типы данных и их соответствие [типам данных](../sql_reference/data_types/index.md) ClickHouse для запросов `INSERT` и `SELECT`. | Тип данных Parquet (`INSERT`) | Тип данных ClickHouse | Тип данных Parquet (`SELECT`) | |-------------------------------|---------------------------------------------|-------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | `UINT8` | -| `INT8` | [Int8](../data_types/int_uint.md) | `INT8` | -| `UINT16` | [UInt16](../data_types/int_uint.md) | `UINT16` | -| `INT16` | [Int16](../data_types/int_uint.md) | `INT16` | -| `UINT32` | [UInt32](../data_types/int_uint.md) | `UINT32` | -| `INT32` | [Int32](../data_types/int_uint.md) | `INT32` | -| `UINT64` | [UInt64](../data_types/int_uint.md) | `UINT64` | -| `INT64` | [Int64](../data_types/int_uint.md) | `INT64` | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | `FLOAT` | -| `DOUBLE` | [Float64](../data_types/float.md) | `DOUBLE` | -| `DATE32` | [Date](../data_types/date.md) | `UINT16` | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | `UINT32` | -| `STRING`, `BINARY` | [String](../data_types/string.md) | `STRING` | -| — | [FixedString](../data_types/fixedstring.md) | `STRING` | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | `DECIMAL` | +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | `INT16` | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | `UINT32` | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | `INT32` | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | `UINT64` | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | `INT64` | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | `FLOAT` | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | `DOUBLE` | +| `DATE32` | [Date](../sql_reference/data_types/date.md) | `UINT16` | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | `UINT32` | +| `STRING`, `BINARY` | [String](../sql_reference/data_types/string.md) | `STRING` | +| — | [FixedString](../sql_reference/data_types/fixedstring.md) | `STRING` | +| `DECIMAL` | [Decimal](../sql_reference/data_types/decimal.md) | `DECIMAL` | ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При обработке запроса `INSERT`, ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`. @@ -991,7 +991,7 @@ $ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Pa $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq} ``` -Для обмена данными с экосистемой Hadoop можно использовать движки таблиц [HDFS](../operations/table_engines/hdfs.md). +Для обмена данными с экосистемой Hadoop можно использовать движки таблиц [HDFS](../engines/table_engines/integrations/hdfs.md). ## ORC {#data-format-orc} @@ -999,24 +999,24 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_ ### Соответствие типов данных {#sootvetstvie-tipov-dannykh-1} -Таблица показывает поддержанные типы данных и их соответствие [типам данных](../data_types/index.md) ClickHouse для запросов `INSERT`. +Таблица показывает поддержанные типы данных и их соответствие [типам данных](../sql_reference/data_types/index.md) ClickHouse для запросов `INSERT`. | Тип данных ORC (`INSERT`) | Тип данных ClickHouse | |---------------------------|---------------------------------------| -| `UINT8`, `BOOL` | [UInt8](../data_types/int_uint.md) | -| `INT8` | [Int8](../data_types/int_uint.md) | -| `UINT16` | [UInt16](../data_types/int_uint.md) | -| `INT16` | [Int16](../data_types/int_uint.md) | -| `UINT32` | [UInt32](../data_types/int_uint.md) | -| `INT32` | [Int32](../data_types/int_uint.md) | -| `UINT64` | [UInt64](../data_types/int_uint.md) | -| `INT64` | [Int64](../data_types/int_uint.md) | -| `FLOAT`, `HALF_FLOAT` | [Float32](../data_types/float.md) | -| `DOUBLE` | [Float64](../data_types/float.md) | -| `DATE32` | [Date](../data_types/date.md) | -| `DATE64`, `TIMESTAMP` | [DateTime](../data_types/datetime.md) | -| `STRING`, `BINARY` | [String](../data_types/string.md) | -| `DECIMAL` | [Decimal](../data_types/decimal.md) | +| `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | +| `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | +| `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | +| `INT16` | [Int16](../sql_reference/data_types/int_uint.md) | +| `UINT32` | [UInt32](../sql_reference/data_types/int_uint.md) | +| `INT32` | [Int32](../sql_reference/data_types/int_uint.md) | +| `UINT64` | [UInt64](../sql_reference/data_types/int_uint.md) | +| `INT64` | [Int64](../sql_reference/data_types/int_uint.md) | +| `FLOAT`, `HALF_FLOAT` | [Float32](../sql_reference/data_types/float.md) | +| `DOUBLE` | [Float64](../sql_reference/data_types/float.md) | +| `DATE32` | [Date](../sql_reference/data_types/date.md) | +| `DATE64`, `TIMESTAMP` | [DateTime](../sql_reference/data_types/datetime.md) | +| `STRING`, `BINARY` | [String](../sql_reference/data_types/string.md) | +| `DECIMAL` | [Decimal](../sql_reference/data_types/decimal.md) | ClickHouse поддерживает настраиваемую точность для формата `Decimal`. При обработке запроса `INSERT`, ClickHouse обрабатывает тип данных Parquet `DECIMAL` как `Decimal128`. @@ -1032,7 +1032,7 @@ ClickHouse поддерживает настраиваемую точность $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT ORC" ``` -Для обмена данных с Hadoop можно использовать [движок таблиц HDFS](../operations/table_engines/hdfs.md). +Для обмена данных с Hadoop можно использовать [движок таблиц HDFS](../engines/table_engines/integrations/hdfs.md). ## Схема формата {#formatschema} @@ -1045,6 +1045,6 @@ $ cat filename.orc | clickhouse-client --query="INSERT INTO some_table FORMAT OR относительно текущей директории на клиенте. Если клиент используется в [batch режиме](../interfaces/cli.md#cli_usage), то в записи схемы допускается только относительный путь, из соображений безопасности. Если для ввода/вывода данных используется [HTTP-интерфейс](../interfaces/http.md), то файл со схемой должен располагаться на сервере в каталоге, -указанном в параметре [format\_schema\_path](../operations/server_settings/settings.md#server_settings-format_schema_path) конфигурации сервера. +указанном в параметре [format\_schema\_path](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-format_schema_path) конфигурации сервера. [Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/formats/) diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 9a89a25bec7..add57183824 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -3,7 +3,7 @@ HTTP интерфейс позволяет использовать ClickHouse на любой платформе, из любого языка программирования. У нас он используется для работы из Java и Perl, а также из shell-скриптов. В других отделах, HTTP интерфейс используется из Perl, Python и Go. HTTP интерфейс более ограничен по сравнению с родным интерфейсом, но является более совместимым. По умолчанию, clickhouse-server слушает HTTP на порту 8123 (это можно изменить в конфиге). -Если запросить GET / без параметров, то вернётся строка заданная с помощью настройки [http\_server\_default\_response](../operations/server_settings/settings.md#server_settings-http_server_default_response). Значение по умолчанию «Ok.» (с переводом строки на конце). +Если запросить GET / без параметров, то вернётся строка заданная с помощью настройки [http\_server\_default\_response](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-http_server_default_response). Значение по умолчанию «Ok.» (с переводом строки на конце). ``` bash $ curl 'http://localhost:8123/' diff --git a/docs/ru/interfaces/mysql.md b/docs/ru/interfaces/mysql.md index 146947342cd..d550b430c69 100644 --- a/docs/ru/interfaces/mysql.md +++ b/docs/ru/interfaces/mysql.md @@ -1,6 +1,6 @@ # MySQL-интерфейс {#mysql-interface} -ClickHouse поддерживает взаимодействие по протоколу MySQL. Данная функция включается настройкой [mysql\_port](../operations/server_settings/settings.md#server_settings-mysql_port) в конфигурационном файле: +ClickHouse поддерживает взаимодействие по протоколу MySQL. Данная функция включается настройкой [mysql\_port](../operations/server_configuration_parameters/settings.md#server_configuration_parameters-mysql_port) в конфигурационном файле: ``` xml 9004 diff --git a/docs/ru/interfaces/third-party/index.md b/docs/ru/interfaces/third-party/index.md new file mode 100644 index 00000000000..16d315d059c --- /dev/null +++ b/docs/ru/interfaces/third-party/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Third-Party +toc_priority: 24 +--- + diff --git a/docs/ru/interfaces/third-party/integrations.md b/docs/ru/interfaces/third-party/integrations.md index b835dc949bf..c05ff4d062f 100644 --- a/docs/ru/interfaces/third-party/integrations.md +++ b/docs/ru/interfaces/third-party/integrations.md @@ -35,7 +35,7 @@ - [graphouse](https://github.com/yandex/graphouse) - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) - - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../operations/table_engines/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../operations/table_engines/graphitemergetree.md#rollup-configuration) + - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) - [Grafana](https://grafana.com/) - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - [Prometheus](https://prometheus.io/) @@ -72,7 +72,7 @@ - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (использует [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (использует [JDBC](../../query_language/table_functions/jdbc.md)) + - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (использует [JDBC](../../sql_reference/table_functions/jdbc.md)) - Scala - [Akka](https://akka.io) - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md index ef841b2fa05..1b7d56b19d1 100644 --- a/docs/ru/introduction/adopters.md +++ b/docs/ru/introduction/adopters.md @@ -1,79 +1,80 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# ClickHouse Adopters {#clickhouse-adopters} +# Усыновители ClickHouse {#clickhouse-adopters} -!!! warning "Disclaimer" - The following list of companies using ClickHouse and their success stories is assembled from public sources, thus might differ from current reality. We’d appreciate it if you share the story of adopting ClickHouse in your company and [add it to the list](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), but please make sure you won’t have any NDA issues by doing so. Providing updates with publications from other companies is also useful. +!!! warning "Оговорка" + Следующий список компаний, использующих ClickHouse, и их истории успеха собраны из открытых источников, поэтому они могут отличаться от текущей реальности. Мы были бы очень признательны, если бы вы поделились историей принятия ClickHouse в свою компанию и [добавьте его в список](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), но, пожалуйста, убедитесь, что у вас не будет никаких проблем с NDA, сделав это. Предоставление обновлений с публикациями от других компаний также полезно. -| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size\* | Reference | -|-----------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [2gis](https://2gis.ru) | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) | -| [Aloha Browser](https://alohabrowser.com/) | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | -| [Amadeus](https://amadeus.com/) | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| [Appsflyer](https://www.appsflyer.com) | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| [ArenaData](https://arenadata.tech/) | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| [Badoo](https://badoo.com) | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | -| [Benocs](https://www.benocs.com/) | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| [Bloomberg](https://www.bloomberg.com/) | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | -| [Bloxy](https://bloxy.info) | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| `Dataliance/UltraPower` | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [CARTO](https://carto.com/) | Business Intelligence | Geo analytics | — | — | [Geospatial processing with Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| [CERN](http://public.web.cern.ch/public/) | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [Cisco](http://cisco.com/) | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | -| [Citadel Securities](https://www.citadelsecurities.com/) | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Citymobil](https://city-mobil.ru) | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| [ContentSquare](https://contentsquare.com) | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| [Cloudflare](https://cloudflare.com) | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| [Corunet](https://coru.net/) | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| [CraiditX 氪信](https://creditx.com) | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| [Criteo/Storetail](https://www.criteo.com/) | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| [Deutsche Bank](https://db.com) | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| [Diva-e](https://www.diva-e.com) | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| [Exness](https://www.exness.com) | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Geniee](https://geniee.co.jp) | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| [HUYA](https://www.huya.com/) | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Idealista](https://www.idealista.com) | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Infovista](https://www.infovista.com/) | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| [InnoGames](https://www.innogames.com) | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| [Integros](https://integros.com) | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Kodiak Data](https://www.kodiakdata.com/) | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| [Kontur](https://kontur.ru) | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [LifeStreet](https://lifestreet.com/) | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) | -| [Mail.ru Cloud Solutions](https://mcs.mail.ru/) | Cloud services | Main product | — | — | [Running ClickHouse Instance, in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) | -| [MessageBird](https://www.messagebird.com) | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [MGID](https://www.mgid.com/) | Ad network | Web-analytics | — | — | [Our experience in implementing analytical DBMS ClickHouse, in Russian](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| [OneAPM](https://www.oneapm.com/) | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| [Pragma Innovation](http://www.pragma-innovation.fr/) | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| [QINGCLOUD](https://www.qingcloud.com/) | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| [Beijing PERCENT Information Technology Co., Ltd.](https://www.percent.cn/) | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| [Rambler](https://rambler.ru) | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| [Tencent](https://www.tencent.com) | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Traffic Stars](https://trafficstars.com/) | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| [S7 Airlines](https://www.s7.ru) | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| [SEMrush](https://www.semrush.com/) | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| [scireum GmbH](https://www.scireum.de/) | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [Sentry](https://sentry.io/) | Software developer | Backend for product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| [seo.do](https://seo.do/) | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| [Sina](http://english.sina.com/index.html) | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| [SMI2](https://smi2.ru/) | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | -| [Splunk](https://www.splunk.com/) | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| [Spotify](https://www.spotify.com) | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| [Tencent](https://www.tencent.com) | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Uber](https://www.uber.com) | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | -| [VKontakte](https://vk.com) | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| [Wisebits](https://wisebits.com/) | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Xiaoxin Tech.](https://www.xiaoheiban.cn/) | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| [Ximalaya](https://www.ximalaya.com/) | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| [Yandex Cloud](https://cloud.yandex.ru/services/managed-clickhouse) | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [Yandex DataLens](https://cloud.yandex.ru/services/datalens) | Business Intelligence | Main product | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | -| [Yandex Market](https://market.yandex.ru/) | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) | -| [Yandex Metrica](https://metrica.yandex.com) | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| Компания | Промышленность | Usecase | Размер кластера | (Un)Сжатый Размер Данных\* | Ссылка | +|---------------------------------------------------------------------------------|----------------------------------------|-----------------------------|------------------------------------------------------------|------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [2ГИС](https://2gis.ru) | Карты | Мониторинг | — | — | [Говорить по-русски, июль 2019](https://youtu.be/58sPkXfq6nw) | +| [Браузер Aloha](https://alohabrowser.com/) | Мобильное приложение | Серверная часть браузера | — | — | [Слайды на русском языке, май 2019 года](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | +| [Компания Amadeus](https://amadeus.com/) | Путешествовать | Аналитика | — | — | [Пресс-Релиз, Апрель 2018 Года](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | +| [Компания](https://www.appsflyer.com) | Мобильная аналитика | Главный продукт | — | — | [Говорить по-русски, июль 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | +| [ArenaData](https://arenadata.tech/) | Платформа данных | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | +| [На Badoo](https://badoo.com) | Знакомства | Таймсерии | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | +| [Бенокс](https://www.benocs.com/) | Сетевая телеметрия и аналитика | Главный продукт | — | — | [Слайды на английском языке, октябрь 2017 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | +| [Блумберг](https://www.bloomberg.com/) | Финансы, СМИ | Мониторинг | 102 сервера | — | [Слайды, Май 2018 Года](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | +| [Блокси](https://bloxy.info) | Блокчейн | Аналитика | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | +| `Dataliance/UltraPower` | Телекоммуникационный | Аналитика | — | — | [Слайды на китайском языке, январь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | +| [CARTO](https://carto.com/) | Бизнес-разведка | Гео аналитика | — | — | [Геопространственная обработка с помощью Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | +| [CERN](http://public.web.cern.ch/public/) | Исследование | Эксперимент | — | — | [Пресс-релиз, апрель 2012 года](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | +| [Компании Cisco](http://cisco.com/) | Сетевой | Анализ трафика | — | — | [Молниеносный разговор, октябрь 2019 года](https://youtu.be/-hI1vDR2oPY?t=5057) | +| [Ценные Бумаги Цитадели](https://www.citadelsecurities.com/) | Финансы | — | — | — | [Взнос, Март 2019 Года](https://github.com/ClickHouse/ClickHouse/pull/4774) | +| [Ситимобил](https://city-mobil.ru) | Такси | Аналитика | — | — | [Запись в блоге на русском языке, март 2020 года](https://habr.com/en/company/citymobil/blog/490660/) | +| [ContentSquare](https://contentsquare.com) | Веб-аналитика | Главный продукт | — | — | [Запись в блоге на французском языке, ноябрь 2018 года](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| [Cloudflare](https://cloudflare.com) | CDN | Анализ трафика | 36 серверов | — | [Сообщение в блоге, май 2017 года](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Сообщение в блоге, март 2018 года](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| [Корунет](https://coru.net/) | Аналитика | Главный продукт | — | — | [Слайды на английском языке, апрель 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | +| [CraiditX 氪信](https://creditx.com) | Финансовый ИИ | Анализ | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| [Criteo / Storetail](https://www.criteo.com/) | Розничная торговля | Главный продукт | — | — | [Слайды на английском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| [Дойче банк](https://db.com) | Финансы | Би аналитика | — | — | [Слайды на английском языке, октябрь 2019 года](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | +| [Дива-е](https://www.diva-e.com) | Цифровой Консалтинг | Главный продукт | — | — | [Слайды на английском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | +| [Компания Exness](https://www.exness.com) | Торговый | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, май 2019 года](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| [Джинн](https://geniee.co.jp) | Рекламная сеть | Главный продукт | — | — | [Запись в блоге на японском языке, июль 2017 года](https://tech.geniee.co.jp/entry/2017/07/20/160100) | +| [HUYA](https://www.huya.com/) | Потоковое видео | Аналитика | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | +| [Идеалиста](https://www.idealista.com) | Недвижимость | Аналитика | — | — | [Сообщение в блоге на английском языке, апрель 2019 года](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Infovista](https://www.infovista.com/) | Сети | Аналитика | — | — | [Слайды на английском языке, октябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | +| [Компания innogames](https://www.innogames.com) | Игры | Метрики, Ведение Журнала | — | — | [Слайды на русском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| [Интегрос](https://integros.com) | Платформа для видеосервисов | Аналитика | — | — | [Слайды на русском языке, май 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Данные По Кадьяку](https://www.kodiakdata.com/) | Облака | Главный продукт | — | — | [Слайды на английском языке, апрель 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | +| [Контур](https://kontur.ru) | Разработка программного обеспечения | Метрика | — | — | [Говорить по-русски, ноябрь 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | +| [LifeStreet](https://lifestreet.com/) | Рекламная сеть | Главный продукт | 75 серверов (3 реплики) | 5.27 ПИБ | [Запись в блоге на русском языке, февраль 2017 года](https://habr.com/en/post/322620/) | +| [Mail.ru Облачные Решения](https://mcs.mail.ru/) | Облачные сервисы | Главный продукт | — | — | [Запуск экземпляра ClickHouse на русском языке](https://mcs.mail.ru/help/db-create/clickhouse#) | +| [MessageBird](https://www.messagebird.com) | Электросвязь | Статистика | — | — | [Слайды на английском языке, ноябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | +| [MGID](https://www.mgid.com/) | Рекламная сеть | Веб-аналитика | — | — | [Наш опыт внедрения аналитической СУБД ClickHouse на русском языке](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| [OneAPM](https://www.oneapm.com/) | Мониторинг и анализ данных | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| [ПРАГМА Инноваций](http://www.pragma-innovation.fr/) | Телеметрия и анализ Больших Данных | Главный продукт | — | — | [Слайды на английском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | +| [QINGCLOUD](https://www.qingcloud.com/) | Облачные сервисы | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | +| [Qrator](https://qrator.net) | Защита от DDoS-атак | Главный продукт | — | — | [Сообщение В Блоге, Март 2019 Года](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | +| [Beijing PERCENT Information Technology Co., Лимитед.](https://www.percent.cn/) | Аналитика | Главный продукт | — | — | [Слайды на китайском языке, июнь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| [Бродяга](https://rambler.ru) | Интернет услуги | Аналитика | — | — | [Говорить по-русски, апрель 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | +| [Tencent](https://www.tencent.com) | Обмен сообщениями | Регистрация | — | — | [Говорить по-китайски, ноябрь 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| [Движения Звезд](https://trafficstars.com/) | Рекламная сеть | — | — | — | [Слайды на русском языке, май 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | +| [S7 Airlines](https://www.s7.ru) | Авиакомпании | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, март 2019 года](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | +| [Общий](https://www.semrush.com/) | Маркетинг | Главный продукт | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | +| [scireum ГмбХ](https://www.scireum.de/) | электронная коммерция | Главный продукт | — | — | [Говорить по-немецки, февраль 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| [Караул](https://sentry.io/) | Разработчик | Бэкэнд для продукта | — | — | [Сообщение в блоге на английском языке, май 2019 года](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | +| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Государственное Социальное Обеспечение | Аналитика | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | +| [СЕО.делать](https://seo.do/) | Аналитика | Главный продукт | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| [Зина](http://english.sina.com/index.html) | Новости | — | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | +| [SMI2](https://smi2.ru/) | Новости | Аналитика | — | — | [Запись в блоге на русском языке, ноябрь 2017 года](https://habr.com/ru/company/smi2/blog/314558/) | +| [Чмок](https://www.splunk.com/) | Бизнес-аналитика | Главный продукт | — | — | [Слайды на английском языке, январь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | +| [Спотифай](https://www.spotify.com) | Музыка | Экспериментирование | — | — | [Слайды, Июль 2018 Года](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | +| [Tencent](https://www.tencent.com) | Большие данные | Обработка данных | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| [Убер](https://www.uber.com) | Такси | Регистрация | — | — | [Слайды, Февраль 2020 Года](https://presentations.clickhouse.tech/meetup40/uber.pdf) | +| [ВКонтакте](https://vk.com) | Социальная сеть | Статистика, Ведение Журнала | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| [Мудрецы](https://wisebits.com/) | IT-решение | Аналитика | — | — | [Слайды на русском языке, май 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| [Технология Сяосин.](https://www.xiaoheiban.cn/) | Образование | Общая цель | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | +| [Сималайя](https://www.ximalaya.com/) | Общий доступ к аудио | OLAP | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | +| [Облако Яндекса](https://cloud.yandex.ru/services/managed-clickhouse) | Публичное Облако | Главный продукт | — | — | [Разговор на русском языке, декабрь 2019 года](https://www.youtube.com/watch?v=pgnak9e_E0o) | +| [DataLens Яндекс ](https://cloud.yandex.ru/services/datalens) | Бизнес-разведка | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [Яндекс Маркет](https://market.yandex.ru/) | электронная коммерция | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, январь 2019 года](https://youtu.be/_l1qP0DyBcA?t=478) | +| [Яндекс Метрика](https://metrica.yandex.com) | Веб-аналитика | Главный продукт | 360 серверов в одном кластере, 1862 сервера в одном отделе | 66.41 ПИБ / 5.68 ПИБ | [Слайды, Февраль 2020 Года](https://presentations.clickhouse.tech/meetup40/introduction/#13) | +| [ЦВТ](https://htc-cs.ru/) | Разработка программного обеспечения | Метрики, Ведение Журнала | — | — | [Сообщение в блоге, март 2019 года, на русском языке](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | +| [МКБ](https://mkb.ru/) | Банк | Мониторинг веб-систем | — | — | [Слайды на русском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | +| [金数据](https://jinshuju.net) | Би аналитика | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) +[Оригинальная статья](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/ru/introduction/distinctive_features.md b/docs/ru/introduction/distinctive_features.md index 093053a3b89..0cc40e4e162 100644 --- a/docs/ru/introduction/distinctive_features.md +++ b/docs/ru/introduction/distinctive_features.md @@ -59,6 +59,6 @@ ClickHouse предоставляет различные способы разм Используется асинхронная multimaster репликация. После записи на любую доступную реплику, данные распространяются на все остальные реплики в фоне. Система поддерживает полную идентичность данных на разных репликах. Восстановление после большинства сбоев осуществляется автоматически, а в сложных случаях — полуавтоматически. При необходимости, можно [включить кворумную запись](../operations/settings/settings.md) данных. -Подробнее смотрите раздел [Репликация данных](../operations/table_engines/replication.md). +Подробнее смотрите раздел [Репликация данных](../engines/table_engines/mergetree_family/replication.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/distinctive_features/) diff --git a/docs/ru/introduction/index.md b/docs/ru/introduction/index.md new file mode 100644 index 00000000000..ba80f9c2640 --- /dev/null +++ b/docs/ru/introduction/index.md @@ -0,0 +1,6 @@ +--- +toc_folder_title: Introduction +toc_priority: 1 +--- + + diff --git a/docs/ru/operations/access_rights.md b/docs/ru/operations/access_rights.md index d4cd7793bf1..18c2a25377a 100644 --- a/docs/ru/operations/access_rights.md +++ b/docs/ru/operations/access_rights.md @@ -61,7 +61,7 @@ Здесь видно объявление двух пользователей - `default` и `web`. Пользователя `web` мы добавили самостоятельно. -Пользователь `default` выбирается в случаях, когда имя пользователя не передаётся. Также пользователь `default` может использоваться при распределённой обработке запроса - если в конфигурации кластера для сервера не указаны `user` и `password`. (см. раздел о движке [Distributed](../operations/table_engines/distributed.md)). +Пользователь `default` выбирается в случаях, когда имя пользователя не передаётся. Также пользователь `default` может использоваться при распределённой обработке запроса - если в конфигурации кластера для сервера не указаны `user` и `password`. (см. раздел о движке [Distributed](../engines/table_engines/special/distributed.md)). Пользователь, который используется для обмена информацией между серверами, объединенными в кластер, не должен иметь существенных ограничений или квот - иначе распределённые запросы сломаются. diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index 4888f2b418e..601eaa4d6d9 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -1,6 +1,6 @@ # Резервное копирование данных {#rezervnoe-kopirovanie-dannykh} -[Репликация](table_engines/replication.md) обеспечивает защиту от аппаратных сбоев, но не защищает от человеческих ошибок: случайного удаления данных, удаления не той таблицы, которую надо было, или таблицы на не том кластере, а также программных ошибок, которые приводят к неправильной обработке данных или их повреждению. Во многих случаях подобные ошибки влияют на все реплики. ClickHouse имеет встроенные средства защиты для предотвращения некоторых типов ошибок — например, по умолчанию [не получится удалить таблицы \*MergeTree, содержащие более 50 Гб данных, одной командой](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Однако эти средства защиты не охватывают все возможные случаи и могут быть обойдены. +[Репликация](../engines/table_engines/mergetree_family/replication.md) обеспечивает защиту от аппаратных сбоев, но не защищает от человеческих ошибок: случайного удаления данных, удаления не той таблицы, которую надо было, или таблицы на не том кластере, а также программных ошибок, которые приводят к неправильной обработке данных или их повреждению. Во многих случаях подобные ошибки влияют на все реплики. ClickHouse имеет встроенные средства защиты для предотвращения некоторых типов ошибок — например, по умолчанию [не получится удалить таблицы \*MergeTree, содержащие более 50 Гб данных, одной командой](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Однако эти средства защиты не охватывают все возможные случаи и могут быть обойдены. Для того чтобы эффективно уменьшить возможные человеческие ошибки, следует тщательно подготовить стратегию резервного копирования и восстановления данных **заранее**. @@ -15,11 +15,11 @@ ## Снимки файловой системы {#snimki-failovoi-sistemy} -Некоторые локальные файловые системы позволяют делать снимки (например, [ZFS](https://en.wikipedia.org/wiki/ZFS)), но они могут быть не лучшим выбором для обслуживания живых запросов. Возможным решением является создание дополнительных реплик с такой файловой системой и исключение их из [Distributed](table_engines/distributed.md) таблиц, используемых для запросов `SELECT`. Снимки на таких репликах будут недоступны для запросов, изменяющих данные. В качестве бонуса, эти реплики могут иметь особые конфигурации оборудования с большим количеством дисков, подключенных к серверу, что будет экономически эффективным. +Некоторые локальные файловые системы позволяют делать снимки (например, [ZFS](https://en.wikipedia.org/wiki/ZFS)), но они могут быть не лучшим выбором для обслуживания живых запросов. Возможным решением является создание дополнительных реплик с такой файловой системой и исключение их из [Distributed](../engines/table_engines/special/distributed.md) таблиц, используемых для запросов `SELECT`. Снимки на таких репликах будут недоступны для запросов, изменяющих данные. В качестве бонуса, эти реплики могут иметь особые конфигурации оборудования с большим количеством дисков, подключенных к серверу, что будет экономически эффективным. ## clickhouse-copier {#clickhouse-copier} -[clickhouse-copier](utils/clickhouse-copier.md) — это универсальный инструмент, который изначально был создан для перешардирования таблиц с петабайтами данных. Его также можно использовать для резервного копирования и восстановления, поскольку он надёжно копирует данные между таблицами и кластерами ClickHouse. +[clickhouse-copier](utilities/clickhouse-copier.md) — это универсальный инструмент, который изначально был создан для перешардирования таблиц с петабайтами данных. Его также можно использовать для резервного копирования и восстановления, поскольку он надёжно копирует данные между таблицами и кластерами ClickHouse. Для небольших объёмов данных можно применять `INSERT INTO ... SELECT ...` в удалённые таблицы. @@ -27,7 +27,7 @@ ClickHouse позволяет использовать запрос `ALTER TABLE ... FREEZE PARTITION ...` для создания локальной копии партиций таблицы. Это реализуется с помощью жестких ссылок (hardlinks) на каталог `/var/lib/clickhouse/shadow/`, поэтому такая копия обычно не занимает дополнительное место на диске для старых данных. Созданные копии файлов не обрабатываются сервером ClickHouse, поэтому вы можете просто оставить их там: у вас будет простая резервная копия, которая не требует дополнительной внешней системы, однако при аппаратных проблемах вы можете утратить и актуальные данные и сохраненную копию. По этой причине, лучше удаленно скопировать их в другое место, а затем удалить локальную копию. Распределенные файловые системы и хранилища объектов по-прежнему являются хорошими вариантами для этого, однако можно использовать и обычные присоединенные файловые серверы с достаточно большой ёмкостью (в этом случае передача будет происходить через сетевую файловую систему или, возможно, [rsync](https://en.wikipedia.org/wiki/Rsync)). -Дополнительные сведения о запросах, связанных с манипуляциями партициями, см. в разделе [ALTER](../query_language/alter.md#alter_manipulations-with-partitions). +Дополнительные сведения о запросах, связанных с манипуляциями партициями, см. в разделе [ALTER](../sql_reference/statements/alter.md#alter_manipulations-with-partitions). Для автоматизации этого подхода доступен инструмент от сторонних разработчиков: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). diff --git a/docs/ru/operations/configuration_files.md b/docs/ru/operations/configuration_files.md index 0bcae78a128..19f7ea9d5db 100644 --- a/docs/ru/operations/configuration_files.md +++ b/docs/ru/operations/configuration_files.md @@ -12,7 +12,7 @@ Если указано `remove` - удалить элемент. -Также в конфиге могут быть указаны «подстановки». Если у элемента присутствует атрибут `incl`, то в качестве значения будет использована соответствующая подстановка из файла. По умолчанию, путь к файлу с подстановками - `/etc/metrika.xml`. Он может быть изменён в конфигурации сервера в элементе [include\_from](server_settings/settings.md#server_settings-include_from). Значения подстановок указываются в элементах `/yandex/имя_подстановки` этого файла. Если подстановка, заданная в `incl` отсутствует, то в лог попадает соответствующая запись. Чтобы ClickHouse не писал в лог об отсутствии подстановки, необходимо указать атрибут `optional="true"` (например, настройка [macros](server_settings/settings.md)). +Также в конфиге могут быть указаны «подстановки». Если у элемента присутствует атрибут `incl`, то в качестве значения будет использована соответствующая подстановка из файла. По умолчанию, путь к файлу с подстановками - `/etc/metrika.xml`. Он может быть изменён в конфигурации сервера в элементе [include\_from](server_configuration_parameters/settings.md#server_configuration_parameters-include_from). Значения подстановок указываются в элементах `/yandex/имя_подстановки` этого файла. Если подстановка, заданная в `incl` отсутствует, то в лог попадает соответствующая запись. Чтобы ClickHouse не писал в лог об отсутствии подстановки, необходимо указать атрибут `optional="true"` (например, настройка [macros](server_configuration_parameters/settings.md)). Подстановки могут также выполняться из ZooKeeper. Для этого укажите у элемента атрибут `from_zk = "/path/to/node"`. Значение элемента заменится на содержимое узла `/path/to/node` в ZooKeeper. В ZooKeeper-узел также можно положить целое XML-поддерево, оно будет целиком вставлено в исходный элемент. diff --git a/docs/ru/operations/index.md b/docs/ru/operations/index.md index 2e85f1bf816..3df5dbb1f7e 100644 --- a/docs/ru/operations/index.md +++ b/docs/ru/operations/index.md @@ -12,7 +12,7 @@ - [Конфигурационные файлы](configuration_files.md) - [Квоты](quotas.md) - [Системные таблицы](system_tables.md) - - [Конфигурационные параметры сервера](server_settings/index.md) + - [Конфигурационные параметры сервера](server_configuration_parameters/index.md) - [Тестирование севреров с помощью ClickHouse](performance_test.md) - [Настройки](settings/index.md) - [Утилиты](utils/index.md) diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index 8681261bf6f..469d712376b 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -21,7 +21,7 @@ ClickHouse не отслеживает состояние аппаратных Сервер ClickHouse имеет встроенные инструменты мониторинга. -Для отслеживания событий на сервере используйте логи. Подробнее смотрите в разделе конфигурационного файла [logger](server_settings/settings.md#server_settings-logger). +Для отслеживания событий на сервере используйте логи. Подробнее смотрите в разделе конфигурационного файла [logger](server_configuration_parameters/settings.md#server_configuration_parameters-logger). ClickHouse собирает: @@ -30,7 +30,7 @@ ClickHouse собирает: Метрики находятся в таблицах [system.metrics](system_tables.md#system_tables-metrics), [system.events](system_tables.md#system_tables-events) и [system.asynchronous\_metrics](system_tables.md#system_tables-asynchronous_metrics). -Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server_settings/settings.md#server_settings-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html). +Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html). Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/ping`. Если сервер доступен, он отвечает `200 OK`. diff --git a/docs/ru/operations/optimizing_performance/index.md b/docs/ru/operations/optimizing_performance/index.md new file mode 100644 index 00000000000..4f4cbb66d24 --- /dev/null +++ b/docs/ru/operations/optimizing_performance/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Optimizing Performance +toc_priority: 52 +--- + diff --git a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md new file mode 100644 index 00000000000..d2cc9738749 --- /dev/null +++ b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1,62 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +# Выборки Профилировщик Запросов {#sampling-query-profiler} + +ClickHouse запускает профилировщик выборок, который позволяет анализировать выполнение запросов. С помощью profiler можно найти подпрограммы исходного кода, которые наиболее часто используются во время выполнения запроса. Вы можете отслеживать процессорное время и время работы настенных часов, включая время простоя. + +Чтобы использовать профилировщик: + +- Настройка программы [журнал трассировки](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) раздел конфигурации сервера. + + В этом разделе настраиваются следующие параметры: [журнал трассировки](../../operations/optimizing_performance/sampling_query_profiler.md#system_tables-trace_log) системная таблица, содержащая результаты работы профилировщика. Он настроен по умолчанию. Помните, что данные в этой таблице действительны только для работающего сервера. После перезагрузки сервера ClickHouse не очищает таблицу, и все сохраненные адреса виртуальной памяти могут стать недействительными. + +- Настройка программы [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) или [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) настройки. Обе настройки можно использовать одновременно. + + Эти параметры позволяют настроить таймеры профилировщика. Поскольку это параметры сеанса, вы можете получить различную частоту дискретизации для всего сервера, отдельных пользователей или профилей пользователей, для вашего интерактивного сеанса и для каждого отдельного запроса. + +Частота дискретизации по умолчанию составляет одну выборку в секунду, и включены как ЦП, так и реальные таймеры. Эта частота позволяет собрать достаточно информации о кластере ClickHouse. В то же время, работая с такой частотой, профилировщик не влияет на производительность сервера ClickHouse. Если вам нужно профилировать каждый отдельный запрос, попробуйте использовать более высокую частоту дискретизации. + +Для того чтобы проанализировать `trace_log` системная таблица: + +- Установите устройство `clickhouse-common-static-dbg` пакет. Видеть [Установка из пакетов DEB](../../getting_started/install.md#install-from-deb-packages). + +- Разрешить функции самоанализа с помощью [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) установка. + + По соображениям безопасности функции самоанализа по умолчанию отключены. + +- Используйте `addressToLine`, `addressToSymbol` и `demangle` [функции самоанализа](../../operations/optimizing_performance/sampling_query_profiler.md) чтобы получить имена функций и их позиции в коде ClickHouse. Чтобы получить профиль для какого-либо запроса, вам необходимо агрегировать данные из `trace_log` стол. Вы можете агрегировать данные по отдельным функциям или по всем трассировкам стека. + +Если вам нужно визуализировать `trace_log` информация, попробуйте [огнемет](../../interfaces/third-party/gui/#clickhouse-flamegraph) и [speedscope](https://github.com/laplab/clickhouse-speedscope). + +## Пример {#example} + +В этом примере мы: + +- Фильтрация `trace_log` данные по идентификатору запроса и текущей дате. + +- Агрегирование по трассировке стека. + +- Используя функции интроспекции, мы получим отчет о: + + - Имена символов и соответствующие им функции исходного кода. + - Расположение исходных кодов этих функций. + + + +``` sql +SELECT + count(), + arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym +FROM system.trace_log +WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) +GROUP BY trace +ORDER BY count() DESC +LIMIT 10 +``` + +``` text +{% include "operations/performance/sampling_query_profiler_example_result.txt" %} +``` diff --git a/docs/ru/operations/performance/sampling_query_profiler.md b/docs/ru/operations/performance/sampling_query_profiler.md deleted file mode 100644 index 25368fcd883..00000000000 --- a/docs/ru/operations/performance/sampling_query_profiler.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -en_copy: true ---- - -# Sampling Query Profiler {#sampling-query-profiler} - -ClickHouse runs sampling profiler that allows analyzing query execution. Using profiler you can find source code routines that used the most frequently during query execution. You can trace CPU time and wall-clock time spent including idle time. - -To use profiler: - -- Setup the [trace\_log](../server_settings/settings.md#server_settings-trace_log) section of the server configuration. - - This section configures the [trace\_log](../system_tables.md#system_tables-trace_log) system table containing the results of the profiler functioning. It is configured by default. Remember that data in this table is valid only for a running server. After the server restart, ClickHouse doesn’t clean up the table and all the stored virtual memory address may become invalid. - -- Setup the [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) or [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) settings. Both settings can be used simultaneously. - - These settings allow you to configure profiler timers. As these are the session settings, you can get different sampling frequency for the whole server, individual users or user profiles, for your interactive session, and for each individual query. - -The default sampling frequency is one sample per second and both CPU and real timers are enabled. This frequency allows collecting enough information about ClickHouse cluster. At the same time, working with this frequency, profiler doesn’t affect ClickHouse server’s performance. If you need to profile each individual query try to use higher sampling frequency. - -To analyze the `trace_log` system table: - -- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting_started/install.md#install-from-deb-packages). - -- Allow introspection functions by the [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) setting. - - For security reasons, introspection functions are disabled by default. - -- Use the `addressToLine`, `addressToSymbol` and `demangle` [introspection functions](../../query_language/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces. - -If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope). - -## Example {#example} - -In this example we: - -- Filtering `trace_log` data by a query identifier and the current date. - -- Aggregating by stack trace. - -- Using introspection functions, we will get a report of: - - - Names of symbols and corresponding source code functions. - - Source code locations of these functions. - - - -``` sql -SELECT - count(), - arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym -FROM system.trace_log -WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) -GROUP BY trace -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -{% include "operations/performance/sampling_query_profiler_example_result.txt" %} -``` diff --git a/docs/ru/operations/performance/sampling_query_profiler_example_result.txt b/docs/ru/operations/performance/sampling_query_profiler_example_result.txt index a5f6d71ca95..56c2fdf9c65 100644 --- a/docs/ru/operations/performance/sampling_query_profiler_example_result.txt +++ b/docs/ru/operations/performance/sampling_query_profiler_example_result.txt @@ -1,7 +1,3 @@ ---- -en_copy: true ---- - Row 1: ────── count(): 6344 diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md index ae4c5752703..391bcddd412 100644 --- a/docs/ru/operations/performance_test.md +++ b/docs/ru/operations/performance_test.md @@ -1,18 +1,19 @@ --- -en_copy: true +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# How To Test Your Hardware With ClickHouse {#how-to-test-your-hardware-with-clickhouse} +# Как Протестировать Ваше Оборудование С Помощью ClickHouse {#how-to-test-your-hardware-with-clickhouse} -With this instruction you can run basic ClickHouse performance test on any server without installation of ClickHouse packages. +С помощью этой инструкции вы можете запустить базовый тест производительности ClickHouse на любом сервере без установки пакетов ClickHouse. -1. Go to “commits” page: https://github.com/ClickHouse/ClickHouse/commits/master +1. Идти к «commits» страница: https://github.com/ClickHouse/ClickHouse/commits/master -2. Click on the first green check mark or red cross with green “ClickHouse Build Check” and click on the “Details” link near “ClickHouse Build Check”. +2. Нажмите на первую зеленую галочку или красный крест с зеленым цветом «ClickHouse Build Check» и нажмите на кнопку «Details» ссылка рядом «ClickHouse Build Check». -3. Copy the link to “clickhouse” binary for amd64 or aarch64. +3. Скопируйте ссылку на «clickhouse» двоичный код для amd64 или aarch64. -4. ssh to the server and download it with wget: +4. ssh к серверу и скачать его с помощью wget: @@ -23,7 +24,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -1. Download configs: +1. Скачать конфиги: @@ -33,7 +34,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -1. Download benchmark files: +1. Скачать тест файлы: @@ -41,7 +42,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +1. Загрузите тестовые данные в соответствии с [Яндекс.Набор метрика ](../getting_started/example_datasets/metrica.md) инструкция («hits» таблица, содержащая 100 миллионов строк). @@ -49,31 +50,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -1. Run the server: +1. Запустите сервер: ./clickhouse server -1. Check the data: ssh to the server in another terminal +1. Проверьте данные: ssh на сервер в другом терминале ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +1. Отредактируйте текст benchmark-new.sh, изменение «clickhouse-client» к «./clickhouse client» и добавить «–max\_memory\_usage 100000000000» параметр. mcedit benchmark-new.sh -1. Run the benchmark: +1. Выполнить тест: ./benchmark-new.sh hits_100m_obfuscated -1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +1. Отправьте номера и информацию о конфигурации вашего оборудования по адресу clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark\_hardware.html +Все результаты опубликованы здесь: https://clickhouse-да.технология / benchmark\_hardware.HTML diff --git a/docs/ru/operations/requirements.md b/docs/ru/operations/requirements.md index 9fafe59343f..e6bc2f15e43 100644 --- a/docs/ru/operations/requirements.md +++ b/docs/ru/operations/requirements.md @@ -17,9 +17,9 @@ ClickHouse реализует параллельную обработку дан - Сложности запросов. - Объёма данных, обрабатываемых в запросах. -Для расчета объёма RAM необходимо оценить размер промежуточных данных для операций [GROUP BY](../query_language/select.md#select-group-by-clause), [DISTINCT](../query_language/select.md#select-distinct), [JOIN](../query_language/select.md#select-join) а также других операций, которыми вы пользуетесь. +Для расчета объёма RAM необходимо оценить размер промежуточных данных для операций [GROUP BY](../sql_reference/statements/select.md#select-group-by-clause), [DISTINCT](../sql_reference/statements/select.md#select-distinct), [JOIN](../sql_reference/statements/select.md#select-join) а также других операций, которыми вы пользуетесь. -ClickHouse может использовать внешнюю память для промежуточных данных. Подробнее смотрите в разделе [GROUP BY во внешней памяти](../query_language/select.md#select-group-by-in-external-memory). +ClickHouse может использовать внешнюю память для промежуточных данных. Подробнее смотрите в разделе [GROUP BY во внешней памяти](../sql_reference/statements/select.md#select-group-by-in-external-memory). ## Файл подкачки {#fail-podkachki} diff --git a/docs/ru/operations/server_settings/index.md b/docs/ru/operations/server_configuration_parameters/index.md similarity index 93% rename from docs/ru/operations/server_settings/index.md rename to docs/ru/operations/server_configuration_parameters/index.md index dae21c2b475..91deb2973a7 100644 --- a/docs/ru/operations/server_settings/index.md +++ b/docs/ru/operations/server_configuration_parameters/index.md @@ -8,4 +8,4 @@ Перед изучением настроек ознакомьтесь с разделом [Конфигурационные файлы](../configuration_files.md#configuration_files), обратите внимание на использование подстановок (атрибуты `incl` и `optional`). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_settings/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_configuration_parameters/) diff --git a/docs/ru/operations/server_settings/settings.md b/docs/ru/operations/server_configuration_parameters/settings.md similarity index 84% rename from docs/ru/operations/server_settings/settings.md rename to docs/ru/operations/server_configuration_parameters/settings.md index a062f13c400..16f00a82016 100644 --- a/docs/ru/operations/server_settings/settings.md +++ b/docs/ru/operations/server_configuration_parameters/settings.md @@ -58,7 +58,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat База данных по умолчанию. -Перечень баз данных можно получить запросом [SHOW DATABASES](../../query_language/show.md#show-databases). +Перечень баз данных можно получить запросом [SHOW DATABASES](../../operations/server_configuration_parameters/settings.md#show-databases). **Пример** @@ -87,7 +87,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat - Указывается абсолютным или относительно конфигурационного файла сервера. - Может содержать wildcard-ы \* и ?. -Смотрите также «[Внешние словари](../../query_language/dicts/external_dicts.md)». +Смотрите также «[Внешние словари](../../operations/server_configuration_parameters/settings.md)». **Пример** @@ -111,7 +111,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat true ``` -## format\_schema\_path {#server_settings-format_schema_path} +## format\_schema\_path {#server_configuration_parameters-format_schema_path} Путь к каталогу со схемами для входных данных. Например со схемами для формата [CapnProto](../../interfaces/formats.md#capnproto). @@ -122,7 +122,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat format_schemas/ ``` -## graphite {#server_settings-graphite} +## graphite {#server_configuration_parameters-graphite} Отправка данных в [Graphite](https://github.com/graphite-project). @@ -133,10 +133,10 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat - interval – Период отправки в секундах. - timeout – Таймаут отправки данных в секундах. - root\_path – Префикс для ключей. -- metrics – Отправка данных из таблицы [system.metrics](../system_tables.md#system_tables-metrics). -- events – Отправка дельты данных, накопленной за промежуток времени из таблицы [system.events](../system_tables.md#system_tables-events). -- events\_cumulative – Отправка суммарных данных из таблицы [system.events](../system_tables.md#system_tables-events). -- asynchronous\_metrics – Отправка данных из таблицы [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics). +- metrics – Отправка данных из таблицы [system.metrics](../../operations/server_configuration_parameters/settings.md#system_tables-metrics). +- events – Отправка дельты данных, накопленной за промежуток времени из таблицы [system.events](../../operations/server_configuration_parameters/settings.md#system_tables-events). +- events\_cumulative – Отправка суммарных данных из таблицы [system.events](../../operations/server_configuration_parameters/settings.md#system_tables-events). +- asynchronous\_metrics – Отправка данных из таблицы [system.asynchronous\_metrics](../../operations/server_configuration_parameters/settings.md#system_tables-asynchronous_metrics). Можно определить несколько секций ``, например, для передачи различных данных с различной частотой. @@ -156,11 +156,11 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## graphite\_rollup {#server_settings-graphite-rollup} +## graphite\_rollup {#server_configuration_parameters-graphite-rollup} Настройка прореживания данных для Graphite. -Подробнее читайте в разделе [GraphiteMergeTree](../table_engines/graphitemergetree.md). +Подробнее читайте в разделе [GraphiteMergeTree](../../operations/server_configuration_parameters/settings.md). **Пример** @@ -188,7 +188,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat Порт для обращений к серверу по протоколу HTTP(s). -Если указан `https_port`, то требуется конфигурирование [openSSL](#server_settings-openssl). +Если указан `https_port`, то требуется конфигурирование [openSSL](#server_configuration_parameters-openssl). Если указан `http_port`, то настройка openSSL игнорируется, даже если она задана. @@ -198,7 +198,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 0000 ``` -## http\_server\_default\_response {#server_settings-http_server_default_response} +## http\_server\_default\_response {#server_configuration_parameters-http_server_default_response} Страница, показываемая по умолчанию, при обращении к HTTP(s) серверу ClickHouse. Значение по умолчанию «Ok.» (с переводом строки на конце). @@ -213,7 +213,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## include\_from {#server_settings-include_from} +## include\_from {#server_configuration_parameters-include_from} Путь к файлу с подстановками. @@ -251,7 +251,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ## interserver\_http\_credentials {#server-settings-interserver-http-credentials} -Имя пользователя и пароль, использующиеся для аутентификации при [репликации](../table_engines/replication.md) движками Replicated\*. Это имя пользователя и пароль используются только для взаимодействия между репликами кластера и никак не связаны с аутентификацией клиентов ClickHouse. Сервер проверяет совпадение имени и пароля для соединяющихся с ним реплик, а также использует это же имя и пароль для соединения с другими репликами. Соответственно, эти имя и пароль должны быть прописаны одинаковыми для всех реплик кластера. +Имя пользователя и пароль, использующиеся для аутентификации при [репликации](../../operations/server_configuration_parameters/settings.md) движками Replicated\*. Это имя пользователя и пароль используются только для взаимодействия между репликами кластера и никак не связаны с аутентификацией клиентов ClickHouse. Сервер проверяет совпадение имени и пароля для соединяющихся с ним реплик, а также использует это же имя и пароль для соединения с другими репликами. Соответственно, эти имя и пароль должны быть прописаны одинаковыми для всех реплик кластера. По умолчанию аутентификация не используется. Раздел содержит следующие параметры: @@ -278,7 +278,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 3 ``` -## listen\_host {#server_settings-listen_host} +## listen\_host {#server_configuration_parameters-listen_host} Ограничение по хостам, с которых может прийти запрос. Если необходимо, чтобы сервер отвечал всем, то надо указать `::`. @@ -289,7 +289,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 127.0.0.1 ``` -## logger {#server_settings-logger} +## logger {#server_configuration_parameters-logger} Настройки логирования. @@ -342,7 +342,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat Можно не указывать, если реплицируемых таблицы не используются. -Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../operations/table_engines/replication.md)». +Подробнее смотрите в разделе «[Создание реплицируемых таблиц](../../operations/server_configuration_parameters/settings.md)». **Пример** @@ -352,7 +352,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ## mark\_cache\_size {#server-mark-cache-size} -Приблизительный размер (в байтах) кэша засечек, используемых движками таблиц семейства [MergeTree](../table_engines/mergetree.md). +Приблизительный размер (в байтах) кэша засечек, используемых движками таблиц семейства [MergeTree](../../operations/server_configuration_parameters/settings.md). Кэш общий для сервера, память выделяется по мере необходимости. @@ -400,7 +400,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat Ограничение на удаление таблиц. -Если размер таблицы семейства [MergeTree](../table_engines/mergetree.md) превышает `max_table_size_to_drop` (в байтах), то ее нельзя удалить запросом DROP. +Если размер таблицы семейства [MergeTree](../../operations/server_configuration_parameters/settings.md) превышает `max_table_size_to_drop` (в байтах), то ее нельзя удалить запросом DROP. Если таблицу все же необходимо удалить, не перезапуская при этом сервер ClickHouse, то необходимо создать файл `/flags/force_drop_table` и выполнить запрос DROP. @@ -414,9 +414,9 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 0 ``` -## merge\_tree {#server_settings-merge_tree} +## merge\_tree {#server_configuration_parameters-merge_tree} -Тонкая настройка таблиц семейства [MergeTree](../table_engines/mergetree.md). +Тонкая настройка таблиц семейства [MergeTree](../../operations/server_configuration_parameters/settings.md). Подробнее смотрите в заголовочном файле MergeTreeSettings.h. @@ -428,7 +428,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## openSSL {#server_settings-openssl} +## openSSL {#server_configuration_parameters-openssl} Настройки клиента/сервера SSL. @@ -487,17 +487,17 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## part\_log {#server_settings-part-log} +## part\_log {#server_configuration_parameters-part-log} -Логирование событий, связанных с данными типа [MergeTree](../table_engines/mergetree.md). Например, события добавления или мержа данных. Лог можно использовать для симуляции алгоритмов слияния, чтобы сравнивать их характеристики. Также, можно визуализировать процесс слияния. +Логирование событий, связанных с данными типа [MergeTree](../../operations/server_configuration_parameters/settings.md). Например, события добавления или мержа данных. Лог можно использовать для симуляции алгоритмов слияния, чтобы сравнивать их характеристики. Также, можно визуализировать процесс слияния. -Запросы логируются не в отдельный файл, а в таблицу [system.part\_log](../system_tables.md#system_tables-part-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). +Запросы логируются не в отдельный файл, а в таблицу [system.part\_log](../../operations/server_configuration_parameters/settings.md#system_tables-part-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). При настройке логирования используются следующие параметры: - `database` — имя базы данных; - `table` — имя таблицы; -- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/table_engines/custom_partitioning_key.md); +- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server_configuration_parameters/settings.md); - `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу. **Пример** @@ -511,7 +511,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## path {#server_settings-path} +## path {#server_configuration_parameters-path} Путь к каталогу с данными. @@ -524,17 +524,17 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat /var/lib/clickhouse/ ``` -## query\_log {#server_settings-query-log} +## query\_log {#server_configuration_parameters-query-log} Настройка логирования запросов, принятых с настройкой [log\_queries=1](../settings/settings.md). -Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_log](../system_tables.md#system_tables-query-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). +Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_log](../../operations/server_configuration_parameters/settings.md#system_tables-query-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). При настройке логирования используются следующие параметры: - `database` — имя базы данных; - `table` — имя таблицы, куда будет записываться лог; -- `partition_by` — [произвольный ключ партиционирования](../../operations/table_engines/custom_partitioning_key.md) для таблицы с логами; +- `partition_by` — [произвольный ключ партиционирования](../../operations/server_configuration_parameters/settings.md) для таблицы с логами; - `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу. Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически. @@ -550,17 +550,17 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## query\_thread\_log {#server_settings-query-thread-log} +## query\_thread\_log {#server_configuration_parameters-query-thread-log} Настройка логирования потоков выполнения запросов, принятых с настройкой [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads). -Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../system_tables.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). +Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server_configuration_parameters/settings.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже). При настройке логирования используются следующие параметры: - `database` — имя базы данных; - `table` — имя таблицы, куда будет записываться лог; -- `partition_by` — [произвольный ключ партиционирования](../../operations/table_engines/custom_partitioning_key.md) для таблицы с логами; +- `partition_by` — [произвольный ключ партиционирования](../../operations/server_configuration_parameters/settings.md) для таблицы с логами; - `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу. Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически. @@ -576,15 +576,15 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat ``` -## trace\_log {#server_settings-trace_log} +## trace\_log {#server_configuration_parameters-trace_log} -Settings for the [trace\_log](../system_tables.md#system_tables-trace_log) system table operation. +Settings for the [trace\_log](../../operations/server_configuration_parameters/settings.md#system_tables-trace_log) system table operation. Parameters: - `database` — Database for storing a table. - `table` — Table name. -- `partition_by` — [Custom partitioning key](../../operations/table_engines/custom_partitioning_key.md) for a system table. +- `partition_by` — [Custom partitioning key](../../operations/server_configuration_parameters/settings.md) for a system table. - `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table. The default server configuration file `config.xml` contains the following settings section: @@ -600,7 +600,7 @@ The default server configuration file `config.xml` contains the following settin ## remote\_servers {#server-settings-remote-servers} -Конфигурация кластеров, которые использует движок таблиц [Distributed](../../operations/table_engines/distributed.md) и табличная функция `cluster`. +Конфигурация кластеров, которые использует движок таблиц [Distributed](../../operations/server_configuration_parameters/settings.md) и табличная функция `cluster`. **Пример** @@ -614,7 +614,7 @@ The default server configuration file `config.xml` contains the following settin - [skip\_unavailable\_shards](../settings/settings.md#settings-skip_unavailable_shards) -## timezone {#server_settings-timezone} +## timezone {#server_configuration_parameters-timezone} Временная зона сервера. @@ -628,7 +628,7 @@ The default server configuration file `config.xml` contains the following settin Europe/Moscow ``` -## tcp\_port {#server_settings-tcp_port} +## tcp\_port {#server_configuration_parameters-tcp_port} Порт для взаимодействия с клиентами по протоколу TCP. @@ -638,9 +638,9 @@ The default server configuration file `config.xml` contains the following settin 9000 ``` -## tcp\_port\_secure {#server_settings-tcp_port-secure} +## tcp\_port\_secure {#server_configuration_parameters-tcp_port-secure} -TCP порт для защищённого обмена данными с клиентами. Используйте с настройкой [OpenSSL](#server_settings-openssl). +TCP порт для защищённого обмена данными с клиентами. Используйте с настройкой [OpenSSL](#server_configuration_parameters-openssl). **Возможные значения** @@ -652,7 +652,7 @@ TCP порт для защищённого обмена данными с кли 9440 ``` -## mysql\_port {#server_settings-mysql_port} +## mysql\_port {#server_configuration_parameters-mysql_port} Порт для взаимодействия с клиентами по протоколу MySQL. @@ -677,7 +677,7 @@ TCP порт для защищённого обмена данными с кли ## uncompressed\_cache\_size {#server-settings-uncompressed_cache_size} -Размер кеша (в байтах) для несжатых данных, используемых движками таблиц семейства [MergeTree](../table_engines/mergetree.md). +Размер кеша (в байтах) для несжатых данных, используемых движками таблиц семейства [MergeTree](../../operations/server_configuration_parameters/settings.md). Кеш единый для сервера. Память выделяется по требованию. Кеш используется в том случае, если включена опция [use\_uncompressed\_cache](../settings/settings.md). @@ -689,9 +689,9 @@ TCP порт для защищённого обмена данными с кли 8589934592 ``` -## user\_files\_path {#server_settings-user_files_path} +## user\_files\_path {#server_configuration_parameters-user_files_path} -Каталог с пользовательскими файлами. Используется в табличной функции [file()](../../query_language/table_functions/file.md). +Каталог с пользовательскими файлами. Используется в табличной функции [file()](../../operations/server_configuration_parameters/settings.md). **Пример** @@ -763,7 +763,7 @@ ClickHouse использует ZooKeeper для хранения метадан **Смотрите также** -- [Репликация](../../operations/table_engines/replication.md) +- [Репликация](../../operations/server_configuration_parameters/settings.md) - [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html) ## use\_minimalistic\_part\_header\_in\_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper} @@ -772,20 +772,20 @@ ClickHouse использует ZooKeeper для хранения метадан Параметр применяется только к семейству таблиц `MergeTree`. Его можно установить: -- Глобально в разделе [merge\_tree](#server_settings-merge_tree) файла `config.xml`. +- Глобально в разделе [merge\_tree](#server_configuration_parameters-merge_tree) файла `config.xml`. ClickHouse использует этот параметр для всех таблиц на сервере. Вы можете изменить настройку в любое время. Существующие таблицы изменяют свое поведение при изменении параметра. - Для каждой отдельной таблицы. - При создании таблицы укажите соответствующую [настройку движка](../table_engines/mergetree.md#table_engine-mergetree-creating-a-table). Поведение существующей таблицы с установленным параметром не изменяется даже при изменении глобального параметра. + При создании таблицы укажите соответствующую [настройку движка](../../operations/server_configuration_parameters/settings.md#table_engine-mergetree-creating-a-table). Поведение существующей таблицы с установленным параметром не изменяется даже при изменении глобального параметра. **Возможные значения** - 0 — функциональность выключена. - 1 — функциональность включена. -Если `use_minimalistic_part_header_in_zookeeper = 1`, то [реплицированные](../table_engines/replication.md) таблицы хранят заголовки кусков данных в компактном виде, используя только одну `znode`. Если таблица содержит много столбцов, этот метод хранения значительно уменьшает объём данных, хранящихся в Zookeeper. +Если `use_minimalistic_part_header_in_zookeeper = 1`, то [реплицированные](../../operations/server_configuration_parameters/settings.md) таблицы хранят заголовки кусков данных в компактном виде, используя только одну `znode`. Если таблица содержит много столбцов, этот метод хранения значительно уменьшает объём данных, хранящихся в Zookeeper. !!! attention "Внимание" После того как вы установили `use_minimalistic_part_header_in_zookeeper = 1`, невозможно откатить ClickHouse до версии, которая не поддерживает этот параметр. Будьте осторожны при обновлении ClickHouse на серверах в кластере. Не обновляйте все серверы сразу. Безопаснее проверять новые версии ClickHouse в тестовой среде или только на некоторых серверах кластера. @@ -808,4 +808,4 @@ ClickHouse использует ZooKeeper для хранения метадан **Значение по умолчанию**: 15. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_settings/settings/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_configuration_parameters/settings/) diff --git a/docs/ru/operations/settings/query_complexity.md b/docs/ru/operations/settings/query_complexity.md index 94791d79420..5ad28eed0a8 100644 --- a/docs/ru/operations/settings/query_complexity.md +++ b/docs/ru/operations/settings/query_complexity.md @@ -76,11 +76,11 @@ ## max\_bytes\_before\_external\_group\_by {#settings-max_bytes_before_external_group_by} -Включает или отключает выполнение секций `GROUP BY` во внешней памяти. Смотрите [GROUP BY во внешней памяти](../../query_language/select.md#select-group-by-in-external-memory). +Включает или отключает выполнение секций `GROUP BY` во внешней памяти. Смотрите [GROUP BY во внешней памяти](../../sql_reference/statements/select.md#select-group-by-in-external-memory). Возможные значения: -- Максимальный объём RAM (в байтах), который может использовать отдельная операция [GROUP BY](../../query_language/select.md#select-group-by-clause). +- Максимальный объём RAM (в байтах), который может использовать отдельная операция [GROUP BY](../../sql_reference/statements/select.md#select-group-by-clause). - 0 — `GROUP BY` во внешней памяти отключен. Значение по умолчанию — 0. @@ -228,7 +228,7 @@ FORMAT Null; Ограничивает количество строк в хэш-таблице, используемой при соединении таблиц. -Параметр применяется к операциям [SELECT… JOIN](../../query_language/select.md#select-join) и к движку таблиц [Join](../table_engines/join.md). +Параметр применяется к операциям [SELECT… JOIN](../../sql_reference/statements/select.md#select-join) и к движку таблиц [Join](../../engines/table_engines/special/join.md). Если запрос содержит несколько `JOIN`, то ClickHouse проверяет значение настройки для каждого промежуточного результата. @@ -245,7 +245,7 @@ FORMAT Null; Ограничивает размер (в байтах) хэш-таблицы, используемой при объединении таблиц. -Параметр применяется к операциям [SELECT… JOIN](../../query_language/select.md#select-join) и к движку таблиц [Join](../table_engines/join.md). +Параметр применяется к операциям [SELECT… JOIN](../../sql_reference/statements/select.md#select-join) и к движку таблиц [Join](../../engines/table_engines/special/join.md). Если запрос содержит несколько `JOIN`, то ClickHouse проверяет значение настройки для каждого промежуточного результата. @@ -274,8 +274,8 @@ FORMAT Null; **Смотрите также** -- [Секция JOIN](../../query_language/select.md#select-join) -- [Движок таблиц Join](../table_engines/join.md) +- [Секция JOIN](../../sql_reference/statements/select.md#select-join) +- [Движоy таблиц Join](../../engines/table_engines/special/join.md) ## max\_partitions\_per\_insert\_block {#max-partitions-per-insert-block} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 42b504086d0..94169b212a1 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -2,7 +2,7 @@ ## distributed\_product\_mode {#distributed-product-mode} -Изменяет поведение [распределенных подзапросов](../../query_language/select.md). +Изменяет поведение [распределенных подзапросов](../../sql_reference/statements/select.md). ClickHouse применяет настройку в тех случаях, когда запрос содержит произведение распределённых таблиц, т.е. когда запрос к распределенной таблице содержит не-GLOBAL подзапрос к также распределенной таблице. @@ -11,7 +11,7 @@ ClickHouse применяет настройку в тех случаях, ко - Только подзапросы для IN, JOIN. - Только если в секции FROM используется распределённая таблица, содержащая более одного шарда. - Если подзапрос касается распределенной таблицы, содержащей более одного шарда. -- Не используется в случае табличной функции [remote](../../query_language/table_functions/remote.md). +- Не используется в случае табличной функции [remote](../../sql_reference/table_functions/remote.md). Возможные значения: @@ -46,7 +46,7 @@ ClickHouse применяет настройку в тех случаях, ко ## fallback\_to\_stale\_replicas\_for\_distributed\_queries {#settings-fallback_to_stale_replicas_for_distributed_queries} -Форсирует запрос в устаревшую реплику в случае, если актуальные данные недоступны. См. [Репликация](../../operations/table_engines/replication.md). +Форсирует запрос в устаревшую реплику в случае, если актуальные данные недоступны. См. [Репликация](../../engines/table_engines/mergetree_family/replication.md). Из устаревших реплик таблицы ClickHouse выбирает наиболее актуальную. @@ -60,7 +60,7 @@ ClickHouse применяет настройку в тех случаях, ко Работает с таблицами семейства MergeTree. -При `force_index_by_date=1` ClickHouse проверяет, есть ли в запросе условие на ключ даты, которое может использоваться для отсечения диапазонов данных. Если подходящего условия нет - кидается исключение. При этом не проверяется, действительно ли условие уменьшает объём данных для чтения. Например, условие `Date != '2000-01-01'` подходит даже в том случае, когда соответствует всем данным в таблице (т.е. для выполнения запроса требуется full scan). Подробнее про диапазоны данных в таблицах MergeTree читайте в разделе [MergeTree](../table_engines/mergetree.md). +При `force_index_by_date=1` ClickHouse проверяет, есть ли в запросе условие на ключ даты, которое может использоваться для отсечения диапазонов данных. Если подходящего условия нет - кидается исключение. При этом не проверяется, действительно ли условие уменьшает объём данных для чтения. Например, условие `Date != '2000-01-01'` подходит даже в том случае, когда соответствует всем данным в таблице (т.е. для выполнения запроса требуется full scan). Подробнее про диапазоны данных в таблицах MergeTree читайте в разделе [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## force\_primary\_key {#settings-force-primary-key} @@ -68,7 +68,7 @@ ClickHouse применяет настройку в тех случаях, ко Работает с таблицами семейства MergeTree. -При `force_primary_key=1` ClickHouse проверяет, есть ли в запросе условие на первичный ключ, которое может использоваться для отсечения диапазонов данных. Если подходящего условия нет - кидается исключение. При этом не проверяется, действительно ли условие уменьшает объём данных для чтения. Подробнее про диапазоны данных в таблицах MergeTree читайте в разделе [MergeTree](../table_engines/mergetree.md). +При `force_primary_key=1` ClickHouse проверяет, есть ли в запросе условие на первичный ключ, которое может использоваться для отсечения диапазонов данных. Если подходящего условия нет - кидается исключение. При этом не проверяется, действительно ли условие уменьшает объём данных для чтения. Подробнее про диапазоны данных в таблицах MergeTree читайте в разделе [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). ## format\_schema {#format-schema} @@ -129,7 +129,7 @@ ClickHouse применяет настройку в тех случаях, ко ## max\_http\_get\_redirects {#setting-max_http_get_redirects} -Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../table_engines/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../query_language/create/#create-table-query) и с помощью табличной функции [url](../../query_language/table_functions/url.md). +Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../../engines/table_engines/special/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../sql_reference/create/#create-table-query) и с помощью табличной функции [url](../../sql_reference/table_functions/url.md). Возможные значения: @@ -165,7 +165,7 @@ ClickHouse применяет настройку в тех случаях, ко ## input\_format\_values\_interpret\_expressions {#settings-input_format_values_interpret_expressions} -Включает или отключает парсер SQL, если потоковый парсер не может проанализировать данные. Этот параметр используется только для формата [Values](../../interfaces/formats.md#data-format-values) при вставке данных. Дополнительные сведения о парсерах читайте в разделе [Синтаксис](../../query_language/syntax.md). +Включает или отключает парсер SQL, если потоковый парсер не может проанализировать данные. Этот параметр используется только для формата [Values](../../interfaces/formats.md#data-format-values) при вставке данных. Дополнительные сведения о парсерах читайте в разделе [Синтаксис](../../sql_reference/syntax.md). Возможные значения: @@ -181,7 +181,7 @@ ClickHouse применяет настройку в тех случаях, ко Пример использования: -Вставим значение типа [DateTime](../../data_types/datetime.md) при разных значения настройки. +Вставим значение типа [DateTime](../../sql_reference/data_types/datetime.md) при разных значения настройки. ``` sql SET input_format_values_interpret_expressions = 0; @@ -298,7 +298,7 @@ Ok. Выбор парсера для текстового представления дат и времени при обработке входного формата. -Настройка не применяется к [функциям для работы с датой и временем](../../query_language/functions/date_time_functions.md). +Настройка не применяется к [функциям для работы с датой и временем](../../sql_reference/functions/date_time_functions.md). Возможные значения: @@ -314,12 +314,12 @@ Ok. См. также: -- [Тип данных DateTime.](../../data_types/datetime.md) -- [Функции для работы с датой и временем.](../../query_language/functions/date_time_functions.md) +- [Тип данных DateTime.](../../sql_reference/data_types/datetime.md) +- [Функции для работы с датой и временем.](../../sql_reference/functions/date_time_functions.md) ## join\_default\_strictness {#settings-join_default_strictness} -Устанавливает строгость по умолчанию для [JOIN](../../query_language/select.md#select-join). +Устанавливает строгость по умолчанию для [JOIN](../../sql_reference/statements/select.md#select-join). Возможные значения @@ -334,7 +334,7 @@ Ok. Изменяет поведение операций, выполняемых со строгостью `ANY`. !!! warning "Внимание" - Настройка применяется только для операций `JOIN`, выполняемых над таблицами с движком [Join](../table_engines/join.md). + Настройка применяется только для операций `JOIN`, выполняемых над таблицами с движком [Join](../../engines/table_engines/special/join.md). Возможные значения: @@ -345,18 +345,18 @@ Ok. См. также: -- [Секция JOIN](../../query_language/select.md#select-join) -- [Движок таблиц Join](../table_engines/join.md) +- [Секция JOIN](../../sql_reference/statements/select.md#select-join) +- [Движок таблиц Join](../../engines/table_engines/special/join.md) - [join\_default\_strictness](#settings-join_default_strictness) ## join\_use\_nulls {#join_use_nulls} -Устанавливает тип поведения [JOIN](../../query_language/select.md). При объединении таблиц могут появиться пустые ячейки. ClickHouse заполняет их по-разному в зависимости от настроек. +Устанавливает тип поведения [JOIN](../../sql_reference/statements/select.md). При объединении таблиц могут появиться пустые ячейки. ClickHouse заполняет их по-разному в зависимости от настроек. Возможные значения - 0 — пустые ячейки заполняются значением по умолчанию соответствующего типа поля. -- 1 — `JOIN` ведёт себя как в стандартном SQL. Тип соответствующего поля преобразуется в [Nullable](../../data_types/nullable.md#data_type-nullable), а пустые ячейки заполняются значениями [NULL](../../query_language/syntax.md). +- 1 — `JOIN` ведёт себя как в стандартном SQL. Тип соответствующего поля преобразуется в [Nullable](../../sql_reference/data_types/nullable.md#data_type-nullable), а пустые ячейки заполняются значениями [NULL](../../sql_reference/syntax.md). Значение по умолчанию: 0. @@ -376,7 +376,7 @@ Ok. ## merge\_tree\_uniform\_read\_distribution {#setting-merge-tree-uniform-read-distribution} -При чтении из таблиц [MergeTree](../table_engines/mergetree.md) ClickHouse использует несколько потоков. Этот параметр включает/выключает равномерное распределение заданий по рабочим потокам. Алгоритм равномерного распределения стремится сделать время выполнения всех потоков примерно равным для одного запроса `SELECT`. +При чтении из таблиц [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) ClickHouse использует несколько потоков. Этот параметр включает/выключает равномерное распределение заданий по рабочим потокам. Алгоритм равномерного распределения стремится сделать время выполнения всех потоков примерно равным для одного запроса `SELECT`. Возможные значения: @@ -387,7 +387,7 @@ Ok. ## merge\_tree\_min\_rows\_for\_concurrent\_read {#setting-merge-tree-min-rows-for-concurrent-read} -Если количество строк, считываемых из файла таблицы [MergeTree](../table_engines/mergetree.md) превышает `merge_tree_min_rows_for_concurrent_read`, то ClickHouse пытается выполнить одновременное чтение из этого файла в несколько потоков. +Если количество строк, считываемых из файла таблицы [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md) превышает `merge_tree_min_rows_for_concurrent_read`, то ClickHouse пытается выполнить одновременное чтение из этого файла в несколько потоков. Возможные значения: @@ -397,7 +397,7 @@ Ok. ## merge\_tree\_min\_bytes\_for\_concurrent\_read {#setting-merge-tree-min-bytes-for-concurrent-read} -Если число байтов, которое должно быть прочитано из одного файла таблицы с движком [MergeTree](../table_engines/mergetree.md), превышает значение `merge_tree_min_bytes_for_concurrent_read`, то ClickHouse выполняет одновременное чтение в несколько потоков из этого файла. +Если число байтов, которое должно быть прочитано из одного файла таблицы с движком [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md), превышает значение `merge_tree_min_bytes_for_concurrent_read`, то ClickHouse выполняет одновременное чтение в несколько потоков из этого файла. Возможное значение: @@ -439,7 +439,7 @@ Ok. Если требуется прочитать более, чем `merge_tree_max_rows_to_use_cache` строк в одном запросе, ClickHouse не используют кэш несжатых блоков. -Кэш несжатых блоков хранит данные, извлечённые при выполнении запросов. ClickHouse использует этот кэш для ускорения ответов на повторяющиеся небольшие запросы. Настройка защищает кэш от замусоривания запросами, для выполнения которых необходимо извлечь большое количество данных. Настройка сервера [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) определяет размер кэша несжатых блоков. +Кэш несжатых блоков хранит данные, извлечённые при выполнении запросов. ClickHouse использует этот кэш для ускорения ответов на повторяющиеся небольшие запросы. Настройка защищает кэш от замусоривания запросами, для выполнения которых необходимо извлечь большое количество данных. Настройка сервера [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) определяет размер кэша несжатых блоков. Возможные значения: @@ -451,7 +451,7 @@ Ok. Если требуется прочитать более, чем `merge_tree_max_bytes_to_use_cache` байтов в одном запросе, ClickHouse не используют кэш несжатых блоков. -Кэш несжатых блоков хранит данные, извлечённые при выполнении запросов. ClickHouse использует кэш для ускорения ответов на повторяющиеся небольшие запросы. Настройка защищает кэш от переполнения. Настройка сервера [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) определяет размер кэша несжатых блоков. +Кэш несжатых блоков хранит данные, извлечённые при выполнении запросов. ClickHouse использует кэш для ускорения ответов на повторяющиеся небольшие запросы. Настройка защищает кэш от переполнения. Настройка сервера [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) определяет размер кэша несжатых блоков. Возможное значение: @@ -476,7 +476,7 @@ ClickHouse использует этот параметр при чтении д Установка логирования запроса. -Запросы, переданные в ClickHouse с этой установкой, логируются согласно правилам конфигурационного параметра сервера [query\_log](../../operations/server_settings/settings.md#server_settings-query-log). +Запросы, переданные в ClickHouse с этой установкой, логируются согласно правилам конфигурационного параметра сервера [query\_log](../../operations/server_configuration_parameters/settings.md#server_configuration_parameters-query-log). Пример: @@ -488,7 +488,7 @@ log_queries=1 Установка логирования информации о потоках выполнения запроса. -Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server_settings/settings.md#server_settings-query-thread-log). +Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log). Пример: @@ -510,7 +510,7 @@ log_query_threads=1 ## max\_replica\_delay\_for\_distributed\_queries {#settings-max_replica_delay_for_distributed_queries} -Отключает отстающие реплики при распределенных запросах. См. [Репликация](../../operations/table_engines/replication.md). +Отключает отстающие реплики при распределенных запросах. См. [Репликация](../../engines/table_engines/mergetree_family/replication.md). Устанавливает время в секундах. Если отставание реплики больше установленного значения, то реплика не используется. @@ -555,7 +555,7 @@ log_query_threads=1 ## min\_compress\_block\_size {#min-compress-block-size} -Для таблиц типа [MergeTree](../table_engines/mergetree.md). В целях уменьшения задержек при обработке запросов, блок сжимается при записи следующей засечки, если его размер не меньше min\_compress\_block\_size. По умолчанию - 65 536. +Для таблиц типа [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). В целях уменьшения задержек при обработке запросов, блок сжимается при записи следующей засечки, если его размер не меньше min\_compress\_block\_size. По умолчанию - 65 536. Реальный размер блока, если несжатых данных меньше max\_compress\_block\_size, будет не меньше этого значения и не меньше объёма данных на одну засечку. @@ -634,7 +634,7 @@ log_query_threads=1 Использовать ли кэш разжатых блоков. Принимает 0 или 1. По умолчанию - 0 (выключено). -Использование кэша несжатых блоков (только для таблиц семейства MergeTree) может существенно сократить задержку и увеличить пропускную способность при работе с большим количеством коротких запросов. Включите эту настройку для пользователей, от которых идут частые короткие запросы. Также обратите внимание на конфигурационный параметр [uncompressed\_cache\_size](../server_settings/settings.md#server-settings-uncompressed_cache_size) (настраивается только в конфигурационном файле) – размер кэша разжатых блоков. По умолчанию - 8 GiB. Кэш разжатых блоков заполняется по мере надобности, а наиболее невостребованные данные автоматически удаляются. +Использование кэша несжатых блоков (только для таблиц семейства MergeTree) может существенно сократить задержку и увеличить пропускную способность при работе с большим количеством коротких запросов. Включите эту настройку для пользователей, от которых идут частые короткие запросы. Также обратите внимание на конфигурационный параметр [uncompressed\_cache\_size](../server_configuration_parameters/settings.md#server-settings-uncompressed_cache_size) (настраивается только в конфигурационном файле) – размер кэша разжатых блоков. По умолчанию - 8 GiB. Кэш разжатых блоков заполняется по мере надобности, а наиболее невостребованные данные автоматически удаляются. Для запросов, читающих хоть немного приличный объём данных (миллион строк и больше), кэш разжатых блоков автоматически выключается, чтобы оставить место для действительно мелких запросов. Поэтому, можно держать настройку `use_uncompressed_cache` всегда выставленной в 1. @@ -850,7 +850,7 @@ ClickHouse генерирует исключение Значение по умолчанию: 1. -По умолчанию блоки, вставляемые в реплицируемые таблицы оператором `INSERT`, дедуплицируются (см. [Репликация данных](../table_engines/replication.md)). +По умолчанию блоки, вставляемые в реплицируемые таблицы оператором `INSERT`, дедуплицируются (см. [Репликация данных](../../engines/table_engines/mergetree_family/replication.md)). ## deduplicate\_blocks\_in\_dependent\_materialized\_views {#settings-deduplicate-blocks-in-dependent-materialized-views} @@ -869,15 +869,15 @@ ClickHouse генерирует исключение ## count\_distinct\_implementation {#settings-count_distinct_implementation} -Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../query_language/agg_functions/reference.md#agg_function-count). +Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql_reference/aggregate_functions/reference.md#agg_function-count). Возможные значения: -- [uniq](../../query_language/agg_functions/reference.md#agg_function-uniq) -- [uniqCombined](../../query_language/agg_functions/reference.md#agg_function-uniqcombined) -- [uniqCombined64](../../query_language/agg_functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../query_language/agg_functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../query_language/agg_functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql_reference/aggregate_functions/reference.md#agg_function-uniq) +- [uniqCombined](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqhll12) +- [uniqExact](../../sql_reference/aggregate_functions/reference.md#agg_function-uniqexact) Значение по умолчанию: `uniqExact`. @@ -957,7 +957,7 @@ ClickHouse генерирует исключение ## optimize\_throw\_if\_noop {#setting-optimize_throw_if_noop} -Включает или отключает генерирование исключения в в случаях, когда запрос [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) не выполняет мёрж. +Включает или отключает генерирование исключения в в случаях, когда запрос [OPTIMIZE](../../sql_reference/statements/misc.md#misc_operations-optimize) не выполняет мёрж. По умолчанию, `OPTIMIZE` завершается успешно и в тех случаях, когда он ничего не сделал. Настройка позволяет отделить подобные случаи и включает генерирование исключения с поясняющим сообщением. @@ -970,7 +970,7 @@ ClickHouse генерирует исключение ## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms} -Основной интервал отправки данных движком таблиц [Distributed](../table_engines/distributed.md). Фактический интервал растёт экспоненциально при возникновении ошибок. +Основной интервал отправки данных движком таблиц [Distributed](../../engines/table_engines/special/distributed.md). Фактический интервал растёт экспоненциально при возникновении ошибок. Возможные значения: @@ -980,7 +980,7 @@ ClickHouse генерирует исключение ## distributed\_directory\_monitor\_max\_sleep\_time\_ms {#distributed_directory_monitor_max_sleep_time_ms} -Максимальный интервал отправки данных движком таблиц [Distributed](../table_engines/distributed.md). Ограничивает экпоненциальный рост интервала, установленого настройкой [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms). +Максимальный интервал отправки данных движком таблиц [Distributed](../../engines/table_engines/special/distributed.md). Ограничивает экпоненциальный рост интервала, установленого настройкой [distributed\_directory\_monitor\_sleep\_time\_ms](#distributed_directory_monitor_sleep_time_ms). Возможные значения: @@ -992,7 +992,7 @@ ClickHouse генерирует исключение Включает/выключает пакетную отправку вставленных данных. -Если пакетная отправка включена, то движок таблиц [Distributed](../table_engines/distributed.md) вместо того, чтобы отправлять каждый файл со вставленными данными по отдельности, старается отправить их все за одну операцию. Пакетная отправка улучшает производительность кластера за счет более оптимального использования ресурсов сервера и сети. +Если пакетная отправка включена, то движок таблиц [Distributed](../../engines/table_engines/special/distributed.md) вместо того, чтобы отправлять каждый файл со вставленными данными по отдельности, старается отправить их все за одну операцию. Пакетная отправка улучшает производительность кластера за счет более оптимального использования ресурсов сервера и сети. Возможные значения: @@ -1018,7 +1018,7 @@ ClickHouse генерирует исключение ## query\_profiler\_real\_time\_period\_ns {#query_profiler_real_time_period_ns} -Sets the period for a real clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). Real clock timer counts wall-clock time. +Sets the period for a real clock timer of the [query profiler](../../operations/optimizing_performance/sampling_query_profiler.md). Real clock timer counts wall-clock time. Possible values: @@ -1031,7 +1031,7 @@ Possible values: - 0 for turning off the timer. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). Default value: 1000000000 nanoseconds (once a second). @@ -1041,7 +1041,7 @@ See also: ## query\_profiler\_cpu\_time\_period\_ns {#query_profiler_cpu_time_period_ns} -Sets the period for a CPU clock timer of the [query profiler](../../operations/performance/sampling_query_profiler.md). This timer counts only CPU time. +Sets the period for a CPU clock timer of the [query profiler](../../operations/optimizing_performance/sampling_query_profiler.md). This timer counts only CPU time. Possible values: @@ -1054,7 +1054,7 @@ Possible values: - 0 for turning off the timer. -Type: [UInt64](../../data_types/int_uint.md). +Type: [UInt64](../../sql_reference/data_types/int_uint.md). Default value: 1000000000 nanoseconds. @@ -1064,7 +1064,7 @@ See also: ## allow\_introspection\_functions {#settings-allow_introspection_functions} -Enables of disables [introspections functions](../../query_language/functions/introspection.md) for query profiling. +Enables of disables [introspections functions](../../sql_reference/functions/introspection.md) for query profiling. Possible values: @@ -1075,7 +1075,7 @@ Default value: 0. **See Also** -- [Sampling Query Profiler](../performance/sampling_query_profiler.md) +- [Sampling Query Profiler](../optimizing_performance/sampling_query_profiler.md) - System table [trace\_log](../../operations/system_tables.md#system_tables-trace_log) [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/settings/settings_users.md b/docs/ru/operations/settings/settings_users.md index b010358e547..1719f21a031 100644 --- a/docs/ru/operations/settings/settings_users.md +++ b/docs/ru/operations/settings/settings_users.md @@ -139,6 +139,6 @@ ``` -Элемент `filter` содержать любое выражение, возвращающее значение типа [UInt8](../../data_types/int_uint.md). Обычно он содержит сравнения и логические операторы. Строки `database_name.table1`, для которых фильтр возвращает 0 не выдаются пользователю. Фильтрация несовместима с операциями `PREWHERE` и отключает оптимизацию `WHERE→PREWHERE`. +Элемент `filter` содержать любое выражение, возвращающее значение типа [UInt8](../../sql_reference/data_types/int_uint.md). Обычно он содержит сравнения и логические операторы. Строки `database_name.table1`, для которых фильтр возвращает 0 не выдаются пользователю. Фильтрация несовместима с операциями `PREWHERE` и отключает оптимизацию `WHERE→PREWHERE`. [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings_users/) diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index fac1e63264b..dfc15e6281a 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -12,8 +12,8 @@ Столбцы: -- `metric` ([String](../data_types/string.md)) — название метрики. -- `value` ([Float64](../data_types/float.md)) — значение метрики. +- `metric` ([String](../sql_reference/data_types/string.md)) — название метрики. +- `value` ([Float64](../sql_reference/data_types/float.md)) — значение метрики. **Пример** @@ -63,7 +63,7 @@ user String — имя пользователя, которого использ Содержит информацию о столбцах всех таблиц. -С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../query_language/misc.md#misc-describe-table), но для многих таблиц сразу. +С помощью этой таблицы можно получить информацию аналогично запросу [DESCRIBE TABLE](../sql_reference/statements/misc.md#misc-describe-table), но для многих таблиц сразу. Таблица `system.columns` содержит столбцы (тип столбца указан в скобках): @@ -131,41 +131,41 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' ## system.detached\_parts {#system_tables-detached_parts} -Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](table_engines/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. -Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION\|PART](../query_language/query_language/alter/#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). -Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../query_language/query_language/alter/#alter_drop-detached). +Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](../engines/table_engines/mergetree_family/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. +Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION\|PART](../sql_reference/alter/#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). +Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../sql_reference/alter/#alter_drop-detached). ## system.dictionaries {#system_tables-dictionaries} -Содержит информацию о [внешних словарях](../query_language/dicts/external_dicts.md). +Содержит информацию о [внешних словарях](../sql_reference/dictionaries/external_dictionaries/external_dicts.md). Столбцы: -- `database` ([String](../data_types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. -- `name` ([String](../data_types/string.md)) — [Имя словаря](../query_language/dicts/external_dicts_dict.md). -- `status` ([Enum8](../data_types/enum.md)) — Статус словаря. Возможные значения: +- `database` ([String](../sql_reference/data_types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. +- `name` ([String](../sql_reference/data_types/string.md)) — [Имя словаря](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). +- `status` ([Enum8](../sql_reference/data_types/enum.md)) — Статус словаря. Возможные значения: - `NOT_LOADED` — Словарь не загружен, потому что не использовался. - `LOADED` — Словарь загружен успешно. - `FAILED` — Словарь не загружен в результате ошибки. - `LOADING` — Словарь в процессе загрузки. - - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../query_language/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). + - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../sql_reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). - `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается. -- `origin` ([String](../data_types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. -- `type` ([String](../data_types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../query_language/dicts/external_dicts_dict_layout.md). -- `key` — [Тип ключа](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../data_types/int_uint.md#uint-ranges)) или Составной ключ ([String](../data_types/string.md)) — строка вида "(тип 1, тип 2, ..., тип n)". -- `attribute.names` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Массив [имен атрибутов](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `attribute.types` ([Array](../data_types/array.md)([String](../data_types/string.md))) — Соответствующий массив [типов атрибутов](../query_language/dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `bytes_allocated` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. -- `query_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. -- `hit_rate` ([Float64](../data_types/float.md)) — Для cache-словарей — процент закэшированных значений. -- `element_count` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. -- `load_factor` ([Float64](../data_types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). -- `source` ([String](../data_types/string.md)) — Текст, описывающий [источник данных](../query_language/dicts/external_dicts_dict_sources.md) для словаря. -- `lifetime_min` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Минимальное [время обновления](../query_language/dicts/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `lifetime_max` ([UInt64](../data_types/int_uint.md#uint-ranges)) — Максимальное [время обновления](../query_language/dicts/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `loading_start_time` ([DateTime](../data_types/datetime.md)) — Время начала загрузки словаря. -- `loading_duration` ([Float32](../data_types/float.md)) — Время, затраченное на загрузку словаря. -- `last_exception` ([String](../data_types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. +- `origin` ([String](../sql_reference/data_types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. +- `type` ([String](../sql_reference/data_types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md). +- `key` — [Тип ключа](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) или Составной ключ ([String](../sql_reference/data_types/string.md)) — строка вида "(тип 1, тип 2, ..., тип n)". +- `attribute.names` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Массив [имен атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `attribute.types` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Соответствующий массив [типов атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `bytes_allocated` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. +- `query_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. +- `hit_rate` ([Float64](../sql_reference/data_types/float.md)) — Для cache-словарей — процент закэшированных значений. +- `element_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. +- `load_factor` ([Float64](../sql_reference/data_types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). +- `source` ([String](../sql_reference/data_types/string.md)) — Текст, описывающий [источник данных](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) для словаря. +- `lifetime_min` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Минимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `lifetime_max` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Максимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `loading_start_time` ([DateTime](../sql_reference/data_types/datetime.md)) — Время начала загрузки словаря. +- `loading_duration` ([Float32](../sql_reference/data_types/float.md)) — Время, затраченное на загрузку словаря. +- `last_exception` ([String](../sql_reference/data_types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. **Пример** @@ -202,9 +202,9 @@ SELECT * FROM system.dictionaries Столбцы: -- `event` ([String](../data_types/string.md)) — имя события. -- `value` ([UInt64](../data_types/int_uint.md)) — количество произошедших событий. -- `description` ([String](../data_types/string.md)) — описание события. +- `event` ([String](../sql_reference/data_types/string.md)) — имя события. +- `value` ([UInt64](../sql_reference/data_types/int_uint.md)) — количество произошедших событий. +- `description` ([String](../sql_reference/data_types/string.md)) — описание события. **Пример** @@ -240,7 +240,7 @@ SELECT * FROM system.events LIMIT 5 ## system.graphite\_retentions {#system-graphite-retentions} -Содержит информацию о том, какие параметры [graphite\_rollup](server_settings/settings.md#server_settings-graphite_rollup) используются в таблицах с движками [\*GraphiteMergeTree](table_engines/graphitemergetree.md). +Содержит информацию о том, какие параметры [graphite\_rollup](server_configuration_parameters/settings.md#server_configuration_parameters-graphite_rollup) используются в таблицах с движками [\*GraphiteMergeTree](../engines/table_engines/mergetree_family/graphitemergetree.md). Столбцы: @@ -280,9 +280,9 @@ SELECT * FROM system.events LIMIT 5 Столбцы: -- `metric` ([String](../data_types/string.md)) — название метрики. -- `value` ([Int64](../data_types/int_uint.md)) — значение метрики. -- `description` ([String](../data_types/string.md)) — описание метрики. +- `metric` ([String](../sql_reference/data_types/string.md)) — название метрики. +- `value` ([Int64](../sql_reference/data_types/int_uint.md)) — значение метрики. +- `description` ([String](../sql_reference/data_types/string.md)) — описание метрики. Список поддержанных метрик смотрите в файле [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp). @@ -389,13 +389,13 @@ CurrentMetric_ReplicatedChecks: 0 ## system.parts {#system_tables-parts} -Содержит информацию о кусках данных таблиц семейства [MergeTree](table_engines/mergetree.md). +Содержит информацию о кусках данных таблиц семейства [MergeTree](../engines/table_engines/mergetree_family/mergetree.md). Каждая строка описывает один кусок данных. Столбцы: -- `partition` (`String`) – Имя партиции. Что такое партиция можно узнать из описания запроса [ALTER](../query_language/alter.md#query_language_queries_alter). +- `partition` (`String`) – Имя партиции. Что такое партиция можно узнать из описания запроса [ALTER](../sql_reference/statements/alter.md#sql_reference_queries_alter). Форматы: @@ -446,7 +446,7 @@ CurrentMetric_ReplicatedChecks: 0 - `primary_key_bytes_in_memory_allocated` (`UInt64`) – объём памяти (в байтах) выделенный для размещения первичных ключей. -- `is_frozen` (`UInt8`) – Признак, показывающий существование бэкапа партиции. 1, бэкап есть. 0, бэкапа нет. Смотрите раздел [FREEZE PARTITION](../query_language/alter.md#alter_freeze-partition). +- `is_frozen` (`UInt8`) – Признак, показывающий существование бэкапа партиции. 1, бэкап есть. 0, бэкапа нет. Смотрите раздел [FREEZE PARTITION](../sql_reference/statements/alter.md#alter_freeze-partition). - `database` (`String`) – имя базы данных. @@ -458,11 +458,11 @@ CurrentMetric_ReplicatedChecks: 0 - `disk` (`String`) – имя диска, на котором находится кусок данных. -- `hash_of_all_files` (`String`) – значение [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) для сжатых файлов. +- `hash_of_all_files` (`String`) – значение [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) для сжатых файлов. -- `hash_of_uncompressed_files` (`String`) – значение [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) несжатых файлов (файлы с засечками, первичным ключом и пр.) +- `hash_of_uncompressed_files` (`String`) – значение [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) несжатых файлов (файлы с засечками, первичным ключом и пр.) -- `uncompressed_hash_of_compressed_files` (`String`) – значение [sipHash128](../query_language/functions/hash_functions.md#hash_functions-siphash128) данных в сжатых файлах как если бы они были разжатыми. +- `uncompressed_hash_of_compressed_files` (`String`) – значение [sipHash128](../sql_reference/functions/hash_functions.md#hash_functions-siphash128) данных в сжатых файлах как если бы они были разжатыми. - `bytes` (`UInt64`) – алиас для `bytes_on_disk`. @@ -470,9 +470,9 @@ CurrentMetric_ReplicatedChecks: 0 ## system.part\_log {#system_tables-part-log} -Системная таблица `system.part_log` создается только в том случае, если задана серверная настройка [part\_log](server_settings/settings.md#server_settings-part-log). +Системная таблица `system.part_log` создается только в том случае, если задана серверная настройка [part\_log](server_configuration_parameters/settings.md#server_configuration_parameters-part-log). -Содержит информацию о всех событиях, произошедших с [кусками данных](table_engines/custom_partitioning_key.md) таблиц семейства [MergeTree](table_engines/mergetree.md) (например, события добавления, удаления или слияния данных). +Содержит информацию о всех событиях, произошедших с [кусками данных](../engines/table_engines/mergetree_family/custom_partitioning_key.md) таблиц семейства [MergeTree](../engines/table_engines/mergetree_family/mergetree.md) (например, события добавления, удаления или слияния данных). Столбцы: @@ -480,7 +480,7 @@ CurrentMetric_ReplicatedChecks: 0 - `NEW_PART` — вставка нового куска. - `MERGE_PARTS` — слияние кусков. - `DOWNLOAD_PART` — загрузка с реплики. - - `REMOVE_PART` — удаление или отсоединение из таблицы с помощью [DETACH PARTITION](../query_language/alter.md#alter_detach-partition). + - `REMOVE_PART` — удаление или отсоединение из таблицы с помощью [DETACH PARTITION](../sql_reference/statements/alter.md#alter_detach-partition). - `MUTATE_PART` — изменение куска. - `MOVE_PART` — перемещение куска между дисками. - `event_date` (Date) — дата события. @@ -524,7 +524,7 @@ CurrentMetric_ReplicatedChecks: 0 !!! note "Внимание" Таблица не содержит входных данных для запросов `INSERT`. -ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_log](server_settings/settings.md#server_settings-query-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы. +ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы. Чтобы включить логирование, задайте значение параметра [log\_queries](settings/settings.md#settings-log-queries) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md). @@ -594,14 +594,14 @@ ClickHouse создаёт таблицу только в том случае, к 2. Если во время обработки запроса произошла ошибка, создаются два события с типами 1 и 4. 3. Если ошибка произошла до запуска запроса, создается одно событие с типом 3. -По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_log](server_settings/settings.md#server_settings-query-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`. +По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`. Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены. !!! note "Примечание" Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов. -Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server_settings/settings.md#server_settings-query-log) (параметр `partition_by`). +Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`). ## system.query\_log {#system_tables-query_log} @@ -610,7 +610,7 @@ Contains information about execution of queries. For each query, you can see pro !!! note "Note" The table doesn’t contain input data for `INSERT` queries. -ClickHouse creates this table only if the [query\_log](server_settings/settings.md#server_settings-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. +ClickHouse creates this table only if the [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in. To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section. @@ -680,19 +680,19 @@ Each query creates one or two rows in the `query_log` table, depending on the st 2. If an error occurred during query processing, two events with types 1 and 4 are created. 3. If an error occurred before launching the query, a single event with type 3 is created. -By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. +By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query. When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted. !!! note "Note" The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself. -You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server_settings/settings.md#server_settings-query-log) server setting (see the `partition_by` parameter). +You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter). \#\# system.query\_thread\_log {\#system\_tables-query-thread-log} Содержит информацию о каждом потоке выполняемых запросов. -ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы. +ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы. Чтобы включить логирование, задайте значение параметра [log\_query\_threads](settings/settings.md#settings-log-query-threads) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md). @@ -743,43 +743,43 @@ ClickHouse создаёт таблицу только в том случае, к - `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events - `ProfileEvents.Values` (Array(UInt64)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`. -По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`. +По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`. Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены. !!! note "Примечание" Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов. -Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server_settings/settings.md#server_settings-query-thread-log) (параметр `partition_by`). +Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server_configuration_parameters/settings.md#server_configuration_parameters-query-thread-log) (параметр `partition_by`). ## system.trace\_log {#system_tables-trace_log} Contains stack traces collected by the sampling query profiler. -ClickHouse creates this table when the [trace\_log](server_settings/settings.md#server_settings-trace_log) server configuration section is set. Also the [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. +ClickHouse creates this table when the [trace\_log](server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query\_profiler\_real\_time\_period\_ns](settings/settings.md#query_profiler_real_time_period_ns) and [query\_profiler\_cpu\_time\_period\_ns](settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set. To analyze logs, use the `addressToLine`, `addressToSymbol` and `demangle` introspection functions. Columns: -- `event_date`([Date](../data_types/date.md)) — Date of sampling moment. +- `event_date`([Date](../sql_reference/data_types/date.md)) — Date of sampling moment. -- `event_time`([DateTime](../data_types/datetime.md)) — Timestamp of sampling moment. +- `event_time`([DateTime](../sql_reference/data_types/datetime.md)) — Timestamp of sampling moment. -- `revision`([UInt32](../data_types/int_uint.md)) — ClickHouse server build revision. +- `revision`([UInt32](../sql_reference/data_types/int_uint.md)) — ClickHouse server build revision. When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. -- `timer_type`([Enum8](../data_types/enum.md)) — Timer type: +- `timer_type`([Enum8](../sql_reference/data_types/enum.md)) — Timer type: - `Real` represents wall-clock time. - `CPU` represents CPU time. -- `thread_number`([UInt32](../data_types/int_uint.md)) — Thread identifier. +- `thread_number`([UInt32](../sql_reference/data_types/int_uint.md)) — Thread identifier. -- `query_id`([String](../data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) system table. +- `query_id`([String](../sql_reference/data_types/string.md)) — Query identifier that can be used to get details about a query that was running from the [query\_log](#system_tables-query_log) system table. -- `trace`([Array(UInt64)](../data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. +- `trace`([Array(UInt64)](../sql_reference/data_types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process. **Example** @@ -927,13 +927,13 @@ WHERE Столбцы: -- `name` ([String](../data_types/string.md)) — имя настройки. -- `value` ([String](../data_types/string.md)) — значение настройки. -- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию. -- `description` ([String](../data_types/string.md)) — краткое описание настройки. -- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal). -- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal). -- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку: +- `name` ([String](../sql_reference/data_types/string.md)) — имя настройки. +- `value` ([String](../sql_reference/data_types/string.md)) — значение настройки. +- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию. +- `description` ([String](../sql_reference/data_types/string.md)) — краткое описание настройки. +- `min` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку: - `0` — Текущий пользователь может изменять настройку. - `1` — Текущий пользователь не может изменять настройку. @@ -1013,9 +1013,9 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') **Смотрите также** -- [Секции движка](table_engines/mergetree/#mergetree-query-clauses) семейства MergeTree -- [Настройки](table_engines/kafka.md#table_engine-kafka-creating-a-table) Kafka -- [Настройки](table_engines/join/#join-limitations-and-settings) Join +- [Секции движка](../engines/table_engines/mergetree_family/mergetree.md#mergetree-query-clauses) семейства MergeTree +- [Настройки](../engines/table_engines/integrations/kafka.md#table_engine-kafka-creating-a-table) Kafka +- [Настройки](../engines/table_engines/special/join.md#join-limitations-and-settings) Join ## system.tables {#system-tables} @@ -1031,7 +1031,7 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') - `metadata_path` (String) — путь к табличным метаданным в файловой системе. - `metadata_modification_time` (DateTime) — время последней модификации табличных метаданных. - `dependencies_database` (Array(String)) — зависимости базы данных. -- `dependencies_table` (Array(String)) — табличные зависимости (таблицы [MaterializedView](table_engines/materializedview.md), созданные на базе текущей таблицы). +- `dependencies_table` (Array(String)) — табличные зависимости (таблицы [MaterializedView](../engines/table_engines/special/materializedview.md), созданные на базе текущей таблицы). - `create_table_query` (String) — запрос, которым создавалась таблица. - `engine_full` (String) — параметры табличного движка. - `partition_key` (String) — ключ партиционирования таблицы. @@ -1114,7 +1114,7 @@ path: /clickhouse/tables/01-08/visits/replicas ## system.mutations {#system_tables-mutations} -Таблица содержит информацию о ходе выполнения [мутаций](../query_language/alter.md#alter-mutations) MergeTree-таблиц. Каждой команде мутации соответствует одна строка. В таблице есть следующие столбцы: +Таблица содержит информацию о ходе выполнения [мутаций](../sql_reference/statements/alter.md#alter-mutations) MergeTree-таблиц. Каждой команде мутации соответствует одна строка. В таблице есть следующие столбцы: **database**, **table** - имя БД и таблицы, к которой была применена мутация. @@ -1140,28 +1140,28 @@ path: /clickhouse/tables/01-08/visits/replicas ## system.disks {#system_tables-disks} -Cодержит информацию о дисках, заданных в [конфигурации сервера](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Cодержит информацию о дисках, заданных в [конфигурации сервера](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Столбцы: -- `name` ([String](../data_types/string.md)) — имя диска в конфигурации сервера. -- `path` ([String](../data_types/string.md)) — путь к точке монтирования в файловой системе. -- `free_space` ([UInt64](../data_types/int_uint.md)) — свободное место на диске в байтах. -- `total_space` ([UInt64](../data_types/int_uint.md)) — объём диска в байтах. -- `keep_free_space` ([UInt64](../data_types/int_uint.md)) — место, которое должно остаться свободным на диске в байтах. Задаётся значением параметра `keep_free_space_bytes` конфигурации дисков. +- `name` ([String](../sql_reference/data_types/string.md)) — имя диска в конфигурации сервера. +- `path` ([String](../sql_reference/data_types/string.md)) — путь к точке монтирования в файловой системе. +- `free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — свободное место на диске в байтах. +- `total_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — объём диска в байтах. +- `keep_free_space` ([UInt64](../sql_reference/data_types/int_uint.md)) — место, которое должно остаться свободным на диске в байтах. Задаётся значением параметра `keep_free_space_bytes` конфигурации дисков. ## system.storage\_policies {#system_tables-storage_policies} -Содержит информацию о политиках хранения и томах, заданных в [конфигурации сервера](table_engines/mergetree.md#table_engine-mergetree-multiple-volumes_configure). +Содержит информацию о политиках хранения и томах, заданных в [конфигурации сервера](../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-multiple-volumes_configure). Столбцы: -- `policy_name` ([String](../data_types/string.md)) — имя политики хранения. -- `volume_name` ([String](../data_types/string.md)) — имя тома, который содержится в политике хранения. -- `volume_priority` ([UInt64](../data_types/int_uint.md)) — порядковый номер тома согласно конфигурации. -- `disks` ([Array(String)](../data_types/array.md)) — имена дисков, содержащихся в политике хранения. -- `max_data_part_size` ([UInt64](../data_types/int_uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений). -- `move_factor` ([Float64](../data_types/float.md))\` — доля свободного места, при превышении которой данные начинают перемещаться на следующий том. +- `policy_name` ([String](../sql_reference/data_types/string.md)) — имя политики хранения. +- `volume_name` ([String](../sql_reference/data_types/string.md)) — имя тома, который содержится в политике хранения. +- `volume_priority` ([UInt64](../sql_reference/data_types/int_uint.md)) — порядковый номер тома согласно конфигурации. +- `disks` ([Array(String)](../sql_reference/data_types/array.md)) — имена дисков, содержащихся в политике хранения. +- `max_data_part_size` ([UInt64](../sql_reference/data_types/int_uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений). +- `move_factor` ([Float64](../sql_reference/data_types/float.md))\` — доля свободного места, при превышении которой данные начинают перемещаться на следующий том. Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице. diff --git a/docs/ru/operations/table_engines/generate.md b/docs/ru/operations/table_engines/generate.md deleted file mode 100644 index 051369d2e1c..00000000000 --- a/docs/ru/operations/table_engines/generate.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -en_copy: true ---- - -# GenerateRandom {#table_engines-generate} - -The GenerateRandom table engine produces random data for given table schema. - -Usage examples: - -- Use in test to populate reproducible large table. -- Generate random input for fuzzing tests. - -## Usage in ClickHouse Server {#usage-in-clickhouse-server} - -``` sql -ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) -``` - -The `max_array_length` and `max_string_length` parameters specify maximum length of all -array columns and strings correspondingly in generated data. - -Generate table engine supports only `SELECT` queries. - -It supports all [DataTypes](../../data_types/index.md) that can be stored in a table except `LowCardinality` and `AggregateFunction`. - -**Example:** - -**1.** Set up the `generate_engine_table` table: - -``` sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Query the data: - -``` sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -``` text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## Details of Implementation {#details-of-implementation} - -- Not supported: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Indices - - Replication - -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/ru/operations/table_engines/materializedview.md b/docs/ru/operations/table_engines/materializedview.md deleted file mode 100644 index 6bcc528782a..00000000000 --- a/docs/ru/operations/table_engines/materializedview.md +++ /dev/null @@ -1,5 +0,0 @@ -# MaterializedView {#materializedview} - -Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../query_language/create.md)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. - -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) diff --git a/docs/ru/operations/troubleshooting.md b/docs/ru/operations/troubleshooting.md index 4c9fbf49bd6..7c4be02456c 100644 --- a/docs/ru/operations/troubleshooting.md +++ b/docs/ru/operations/troubleshooting.md @@ -98,7 +98,7 @@ $ sudo -u clickhouse /usr/bin/clickhouse-server --config-file /etc/clickhouse-se - Параметры endpoint. - Проверьте настройки [listen_host](server_settings/settings.md#server_settings-listen_host) и [tcp_port](server_settings/settings.md#server_settings-tcp_port). + Проверьте настройки [listen_host](server_configuration_parameters/settings.md#server_configuration_parameters-listen_host) и [tcp_port](server_configuration_parameters/settings.md#server_configuration_parameters-tcp_port). По умолчанию, сервер ClickHouse принимает только локальные подключения. diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md new file mode 100644 index 00000000000..5467a58676e --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1,154 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +# clickhouse-бенчмарк {#clickhouse-benchmark} + +Подключается к серверу ClickHouse и повторно отправляет указанные запросы. + +Синтаксис: + +``` bash +$ echo "single query" | clickhouse-benchmark [keys] +``` + +или + +``` bash +$ clickhouse-benchmark [keys] <<< "single query" +``` + +Если вы хотите отправить набор запросов, создайте текстовый файл и поместите каждый запрос в отдельную строку в этом файле. Например: + +``` sql +SELECT * FROM system.numbers LIMIT 10000000 +SELECT 1 +``` + +Затем передайте этот файл на стандартный вход `clickhouse-benchmark`. + +``` bash +clickhouse-benchmark [keys] < queries_file +``` + +## Ключи {#clickhouse-benchmark-keys} + +- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` посылает одновременно. Значение по умолчанию: 1. +- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. +- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. Для [режим сравнения](#clickhouse-benchmark-comparison-mode) вы можете использовать несколько `-h` ключи. +- `-p N`, `--port=N` — Server port. Default value: 9000. For the [режим сравнения](#clickhouse-benchmark-comparison-mode) вы можете использовать несколько `-p` ключи. +- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. +- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. +- `-s`, `--secure` — Using TLS connection. +- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` прекращает отправку запросов по достижении указанного срока. Значение по умолчанию: 0 (ограничение по времени отключено). +- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [режим сравнения](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` выполняет следующие функции: [Независимый двухпробный t-тест Стьюдента](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) проверьте, не отличаются ли эти два распределения с выбранным уровнем достоверности. +- `--cumulative` — Printing cumulative data instead of data per interval. +- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. +- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` выводит отчет в указанный JSON-файл. +- `--user=USERNAME` — ClickHouse user name. Default value: `default`. +- `--password=PSWD` — ClickHouse user password. Default value: empty string. +- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` выводит трассировки стека исключений. +- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` на указанном этапе. Возможное значение: `complete`, `fetch_columns`, `with_mergeable_state`. Значение по умолчанию: `complete`. +- `--help` — Shows the help message. + +Если вы хотите применить некоторые из них [настройки](../../operations/settings/index.md) для запросов передайте их в качестве ключа `--= SETTING_VALUE`. Например, `--max_memory_usage=1048576`. + +## Выход {#clickhouse-benchmark-output} + +По умолчанию, `clickhouse-benchmark` отчеты для каждого из них `--delay` интервал. + +Пример отчета: + +``` text +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. + +0.000% 0.145 sec. +10.000% 0.146 sec. +20.000% 0.146 sec. +30.000% 0.146 sec. +40.000% 0.147 sec. +50.000% 0.148 sec. +60.000% 0.148 sec. +70.000% 0.148 sec. +80.000% 0.149 sec. +90.000% 0.150 sec. +95.000% 0.150 sec. +99.000% 0.150 sec. +99.900% 0.150 sec. +99.990% 0.150 sec. +``` + +В отчете вы можете найти:: + +- Количество запросов в системе `Queries executed:` поле. + +- Строка состояния, содержащая (по порядку): + + - Конечная точка сервера ClickHouse. + - Количество обработанных запросов. + - QPS: QPS: сколько запросов сервер выполняет в секунду в течение периода, указанного в `--delay` аргумент. + - RPS: сколько строк сервер читает в секунду в течение периода, указанного в `--delay` аргумент. + - MiB/s: сколько мегабайт сервер читает в секунду в течение периода, указанного в `--delay` аргумент. + - result RPS: сколько строк помещается сервером в результат запроса в секунду в течение периода, указанного в `--delay` аргумент. + - результат MiB/s. сколько мебибайт помещается сервером в результат запроса в секунду в течение периода, указанного в `--delay` аргумент. + +- Процентили времени выполнения запросов. + +## Режим сравнения {#clickhouse-benchmark-comparison-mode} + +`clickhouse-benchmark` можно сравнить производительность для двух запущенных серверов ClickHouse. + +Чтобы использовать режим сравнения, укажите конечные точки обоих серверов по двум парам `--host`, `--port` ключи. Ключи, сопоставленные вместе по позиции в списке аргументов, первые `--host` сопоставляется с первым `--port` и так далее. `clickhouse-benchmark` устанавливает соединения с обоими серверами, а затем отправляет запросы. Каждый запрос адресован случайно выбранному серверу. Результаты отображаются для каждого сервера отдельно. + +## Пример {#clickhouse-benchmark-example} + +``` bash +$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 +``` + +``` text +Loaded 1 queries. + +Queries executed: 6. + +localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.159 sec. +30.000% 0.160 sec. +40.000% 0.160 sec. +50.000% 0.162 sec. +60.000% 0.164 sec. +70.000% 0.165 sec. +80.000% 0.166 sec. +90.000% 0.166 sec. +95.000% 0.167 sec. +99.000% 0.167 sec. +99.900% 0.167 sec. +99.990% 0.167 sec. + + + +Queries executed: 10. + +localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. + +0.000% 0.159 sec. +10.000% 0.159 sec. +20.000% 0.160 sec. +30.000% 0.163 sec. +40.000% 0.164 sec. +50.000% 0.165 sec. +60.000% 0.166 sec. +70.000% 0.166 sec. +80.000% 0.167 sec. +90.000% 0.167 sec. +95.000% 0.170 sec. +99.000% 0.172 sec. +99.900% 0.172 sec. +99.990% 0.172 sec. +``` diff --git a/docs/ru/operations/utils/clickhouse-copier.md b/docs/ru/operations/utilities/clickhouse-copier.md similarity index 100% rename from docs/ru/operations/utils/clickhouse-copier.md rename to docs/ru/operations/utilities/clickhouse-copier.md diff --git a/docs/ru/operations/utils/clickhouse-local.md b/docs/ru/operations/utilities/clickhouse-local.md similarity index 97% rename from docs/ru/operations/utils/clickhouse-local.md rename to docs/ru/operations/utilities/clickhouse-local.md index bcc34deea4e..7dfa9587686 100644 --- a/docs/ru/operations/utils/clickhouse-local.md +++ b/docs/ru/operations/utilities/clickhouse-local.md @@ -1,6 +1,6 @@ # clickhouse-local {#clickhouse-local} -Принимает на вход данные, которые можно представить в табличном виде и выполняет над ними операции, заданные на [языке запросов](../../query_language/index.md) ClickHouse. +Принимает на вход данные, которые можно представить в табличном виде и выполняет над ними операции, заданные на [языке запросов](../../operations/utilities/clickhouse-local.md) ClickHouse. `clickhouse-local` использует движок сервера ClickHouse, т.е. поддерживает все форматы данных и движки таблиц, с которыми работает ClickHouse, при этом для выполнения операций не требуется запущенный сервер. diff --git a/docs/ru/operations/utils/index.md b/docs/ru/operations/utilities/index.md similarity index 100% rename from docs/ru/operations/utils/index.md rename to docs/ru/operations/utilities/index.md diff --git a/docs/ru/operations/utils/clickhouse-benchmark.md b/docs/ru/operations/utils/clickhouse-benchmark.md deleted file mode 100644 index 1d8ac3dec46..00000000000 --- a/docs/ru/operations/utils/clickhouse-benchmark.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -en_copy: true ---- - -# clickhouse-benchmark {#clickhouse-benchmark} - -Connects to a ClickHouse server and repeatedly sends specified queries. - -Syntax: - -``` bash -$ echo "single query" | clickhouse-benchmark [keys] -``` - -or - -``` bash -$ clickhouse-benchmark [keys] <<< "single query" -``` - -If you want to send a set of queries, create a text file and place each query on the individual string in this file. For example: - -``` sql -SELECT * FROM system.numbers LIMIT 10000000 -SELECT 1 -``` - -Then pass this file to a standard input of `clickhouse-benchmark`. - -``` bash -clickhouse-benchmark [keys] < queries_file -``` - -## Keys {#clickhouse-benchmark-keys} - -- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1. -- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. -- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys. -- `-p N`, `--port=N` — Server port. Default value: 9000. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-p` keys. -- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. -- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. -- `-s`, `--secure` — Using TLS connection. -- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` stops sending queries when the specified time limit is reached. Default value: 0 (time limit disabled). -- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) test to determine whether the two distributions aren’t different with the selected level of confidence. -- `--cumulative` — Printing cumulative data instead of data per interval. -- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file. -- `--user=USERNAME` — ClickHouse user name. Default value: `default`. -- `--password=PSWD` — ClickHouse user password. Default value: empty string. -- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions. -- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`. -- `--help` — Shows the help message. - -If you want to apply some [settings](../../operations/settings/index.md) for queries, pass them as a key `--= SETTING_VALUE`. For example, `--max_memory_usage=1048576`. - -## Output {#clickhouse-benchmark-output} - -By default, `clickhouse-benchmark` reports for each `--delay` interval. - -Example of the report: - -``` text -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. - -0.000% 0.145 sec. -10.000% 0.146 sec. -20.000% 0.146 sec. -30.000% 0.146 sec. -40.000% 0.147 sec. -50.000% 0.148 sec. -60.000% 0.148 sec. -70.000% 0.148 sec. -80.000% 0.149 sec. -90.000% 0.150 sec. -95.000% 0.150 sec. -99.000% 0.150 sec. -99.900% 0.150 sec. -99.990% 0.150 sec. -``` - -In the report you can find: - -- Number of queries in the `Queries executed:` field. - -- Status string containing (in order): - - - Endpoint of ClickHouse server. - - Number of processed queries. - - QPS: QPS: How many queries server performed per second during a period specified in the `--delay` argument. - - RPS: How many rows server read per second during a period specified in the `--delay` argument. - - MiB/s: How many mebibytes server read per second during a period specified in the `--delay` argument. - - result RPS: How many rows placed by server to the result of a query per second during a period specified in the `--delay` argument. - - result MiB/s. How many mebibytes placed by server to the result of a query per second during a period specified in the `--delay` argument. - -- Percentiles of queries execution time. - -## Comparison mode {#clickhouse-benchmark-comparison-mode} - -`clickhouse-benchmark` can compare performances for two running ClickHouse servers. - -To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately. - -## Example {#clickhouse-benchmark-example} - -``` bash -$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 -``` - -``` text -Loaded 1 queries. - -Queries executed: 6. - -localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.159 sec. -30.000% 0.160 sec. -40.000% 0.160 sec. -50.000% 0.162 sec. -60.000% 0.164 sec. -70.000% 0.165 sec. -80.000% 0.166 sec. -90.000% 0.166 sec. -95.000% 0.167 sec. -99.000% 0.167 sec. -99.900% 0.167 sec. -99.990% 0.167 sec. - - - -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.160 sec. -30.000% 0.163 sec. -40.000% 0.164 sec. -50.000% 0.165 sec. -60.000% 0.166 sec. -70.000% 0.166 sec. -80.000% 0.167 sec. -90.000% 0.167 sec. -95.000% 0.170 sec. -99.000% 0.172 sec. -99.900% 0.172 sec. -99.990% 0.172 sec. -``` diff --git a/docs/ru/query_language/index.md b/docs/ru/query_language/index.md deleted file mode 100644 index d57d8971a7d..00000000000 --- a/docs/ru/query_language/index.md +++ /dev/null @@ -1,9 +0,0 @@ -# Справка по SQL {#spravka-po-sql} - -- [SELECT](select.md) -- [INSERT INTO](insert_into.md) -- [CREATE](create.md) -- [ALTER](alter.md#query_language_queries_alter) -- [Прочие виды запросов](misc.md) - -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/) diff --git a/docs/ru/roadmap.md b/docs/ru/roadmap.md deleted file mode 100644 index 0d17b67a3a9..00000000000 --- a/docs/ru/roadmap.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -en_copy: true ---- - -# Roadmap {#roadmap} - -## Q1 2020 {#q1-2020} - -- Role-based access control - -## Q2 2020 {#q2-2020} - -- Integration with external authentication services -- Resource pools for more precise distribution of cluster capacity between users - -{## [Original article](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/ru/query_language/agg_functions/combinators.md b/docs/ru/sql_reference/aggregate_functions/combinators.md similarity index 96% rename from docs/ru/query_language/agg_functions/combinators.md rename to docs/ru/sql_reference/aggregate_functions/combinators.md index bc0616ef5de..e4295f0d76e 100644 --- a/docs/ru/query_language/agg_functions/combinators.md +++ b/docs/ru/sql_reference/aggregate_functions/combinators.md @@ -27,9 +27,9 @@ Для работы с промежуточными состояниями предназначены: -- Движок таблиц [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md). -- Функция [finalizeAggregation](../functions/other_functions.md#function-finalizeaggregation). -- Функция [runningAccumulate](../functions/other_functions.md#function-runningaccumulate). +- Движок таблиц [AggregatingMergeTree](../../engines/table_engines/mergetree_family/aggregatingmergetree.md). +- Функция [finalizeAggregation](../../sql_reference/aggregate_functions/combinators.md#function-finalizeaggregation). +- Функция [runningAccumulate](../../sql_reference/aggregate_functions/combinators.md#function-runningaccumulate). - Комбинатор [-Merge](#aggregate_functions_combinators_merge). - Комбинатор [-MergeState](#aggregate_functions_combinators_mergestate). diff --git a/docs/ru/query_language/agg_functions/index.md b/docs/ru/sql_reference/aggregate_functions/index.md similarity index 100% rename from docs/ru/query_language/agg_functions/index.md rename to docs/ru/sql_reference/aggregate_functions/index.md diff --git a/docs/ru/query_language/agg_functions/parametric_functions.md b/docs/ru/sql_reference/aggregate_functions/parametric_functions.md similarity index 94% rename from docs/ru/query_language/agg_functions/parametric_functions.md rename to docs/ru/sql_reference/aggregate_functions/parametric_functions.md index 22fc7eb2934..70430d21eae 100644 --- a/docs/ru/query_language/agg_functions/parametric_functions.md +++ b/docs/ru/sql_reference/aggregate_functions/parametric_functions.md @@ -17,7 +17,7 @@ **Возвращаемые значения** -- [Массив](../../data_types/array.md) [кортежей](../../data_types/tuple.md) следующего вида: +- [Массив](../../sql_reference/data_types/array.md) [кортежей](../../sql_reference/data_types/tuple.md) следующего вида: ``` [(lower_1, upper_1, height_1), ... (lower_N, upper_N, height_N)] @@ -44,7 +44,7 @@ FROM ( └─────────────────────────────────────────────────────────────────────────┘ ``` -С помощью функции [bar](../functions/other_functions.md#function-bar) можно визуализировать гистограмму, например: +С помощью функции [bar](../../sql_reference/aggregate_functions/parametric_functions.md#function-bar) можно визуализировать гистограмму, например: ``` sql WITH histogram(5)(rand() % 100) AS hist @@ -86,7 +86,7 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...) - `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). -- `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../data_types/int_uint.md). +- `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../sql_reference/aggregate_functions/parametric_functions.md). - `cond1`, `cond2` — условия, описывающие цепочку событий. Тип данных — `UInt8`. Можно использовать до 32 условий. Функция учитывает только те события, которые указаны в условиях. Функция пропускает данные из последовательности, если они не описаны ни в одном из условий. @@ -173,7 +173,7 @@ sequenceCount(pattern)(timestamp, cond1, cond2, ...) - `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). -- `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../data_types/int_uint.md). +- `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../sql_reference/aggregate_functions/parametric_functions.md). - `cond1`, `cond2` — условия, описывающие цепочку событий. Тип данных — `UInt8`. Можно использовать до 32 условий. Функция учитывает только те события, которые указаны в условиях. Функция пропускает данные из последовательности, если они не описаны ни в одном из условий. @@ -234,10 +234,10 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Параметры** -- `window` — ширина скользящего окна по времени в секундах. [UInt](../../data_types/int_uint.md). +- `window` — ширина скользящего окна по времени в секундах. [UInt](../../sql_reference/aggregate_functions/parametric_functions.md). - `mode` - необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений. -- `timestamp` — имя столбца, содержащего временные отметки. [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. -- `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../data_types/int_uint.md). +- `timestamp` — имя столбца, содержащего временные отметки. [Date](../../sql_reference/aggregate_functions/parametric_functions.md), [DateTime](../../sql_reference/aggregate_functions/parametric_functions.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. +- `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../sql_reference/aggregate_functions/parametric_functions.md). **Возвращаемое значение** @@ -299,7 +299,7 @@ ORDER BY level ASC Аналитическая функция, которая показывает, насколько выдерживаются те или иные условия, например, удержание динамики/уровня [посещаемости сайта](https://yandex.ru/support/partner2/statistics/metrika-visitors-statistics.html?lang=ru). -Функция принимает набор (от 1 до 32) логических условий, как в [WHERE](../select.md#select-where), и применяет их к заданному набору данных. +Функция принимает набор (от 1 до 32) логических условий, как в [WHERE](../../sql_reference/statements/select.md#select-where), и применяет их к заданному набору данных. Условия, кроме первого, применяются попарно: результат второго будет истинным, если истинно первое и второе, третьего - если истинно первое и третье и т. д. diff --git a/docs/ru/query_language/agg_functions/reference.md b/docs/ru/sql_reference/aggregate_functions/reference.md similarity index 91% rename from docs/ru/query_language/agg_functions/reference.md rename to docs/ru/sql_reference/aggregate_functions/reference.md index 3a6c3679c54..4e82b8775db 100644 --- a/docs/ru/query_language/agg_functions/reference.md +++ b/docs/ru/sql_reference/aggregate_functions/reference.md @@ -19,9 +19,9 @@ ClickHouse поддерживает следующие виды синтакси **Возвращаемое значение** - Если функция вызывается без параметров, она вычисляет количество строк. -- Если передаётся [выражение](../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../data_types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. +- Если передаётся [выражение](../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../sql_reference/data_types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. -В обоих случаях тип возвращаемого значения [UInt64](../../data_types/int_uint.md). +В обоих случаях тип возвращаемого значения [UInt64](../../sql_reference/data_types/int_uint.md). **Подробности** @@ -240,7 +240,7 @@ binary decimal ## groupBitmap {#groupbitmap} -Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../functions/bitmap_functions.md). +Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../../sql_reference/aggregate_functions/reference.md). ``` sql groupBitmap(expr) @@ -376,7 +376,7 @@ skewPop(expr) **Возвращаемое значение** -Коэффициент асимметрии заданного распределения. Тип — [Float64](../../data_types/float.md) +Коэффициент асимметрии заданного распределения. Тип — [Float64](../../sql_reference/aggregate_functions/reference.md) **Пример** @@ -400,7 +400,7 @@ skewSamp(expr) **Возвращаемое значение** -Коэффициент асимметрии заданного распределения. Тип — [Float64](../../data_types/float.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. +Коэффициент асимметрии заданного распределения. Тип — [Float64](../../sql_reference/aggregate_functions/reference.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. **Пример** @@ -422,7 +422,7 @@ kurtPop(expr) **Возвращаемое значение** -Коэффициент эксцесса заданного распределения. Тип — [Float64](../../data_types/float.md) +Коэффициент эксцесса заданного распределения. Тип — [Float64](../../sql_reference/aggregate_functions/reference.md) **Пример** @@ -446,7 +446,7 @@ kurtSamp(expr) **Возвращаемое значение** -Коэффициент эксцесса заданного распределения. Тип — [Float64](../../data_types/float.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. +Коэффициент эксцесса заданного распределения. Тип — [Float64](../../sql_reference/aggregate_functions/reference.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. **Пример** @@ -536,8 +536,8 @@ avgWeighted(x, weight) **Параметры** -- `x` — Значения. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md). -- `weight` — Веса отдельных значений. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md). +- `x` — Значения. [Целые числа](../../sql_reference/aggregate_functions/reference.md) или [числа с плавающей запятой](../../sql_reference/aggregate_functions/reference.md). +- `weight` — Веса отдельных значений. [Целые числа](../../sql_reference/aggregate_functions/reference.md) или [числа с плавающей запятой](../../sql_reference/aggregate_functions/reference.md). Типы параметров должны совпадать. @@ -546,7 +546,7 @@ avgWeighted(x, weight) - Среднее арифметическое взвешенное. - `NaN`, если все веса равны 0. -Тип: [Float64](../../data_types/float.md) +Тип: [Float64](../../sql_reference/aggregate_functions/reference.md) **Пример** @@ -580,7 +580,7 @@ uniq(x[, ...]) **Возвращаемое значение** -- Значение с типом данных [UInt64](../../data_types/int_uint.md). +- Значение с типом данных [UInt64](../../sql_reference/aggregate_functions/reference.md). **Детали реализации** @@ -621,7 +621,7 @@ uniqCombined(HLL_precision)(x[, ...]) **Возвращаемое значение** -- Число типа [UInt64](../../data_types/int_uint.md). +- Число типа [UInt64](../../sql_reference/aggregate_functions/reference.md). **Детали реализации** @@ -669,7 +669,7 @@ uniqHLL12(x[, ...]) **Возвращаемое значение** -- Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +- Значение хэша с типом данных [UInt64](../../sql_reference/aggregate_functions/reference.md). **Детали реализации** @@ -905,7 +905,7 @@ quantile(level)(expr) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). **Возвращаемое значение** @@ -913,9 +913,9 @@ quantile(level)(expr) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md), если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md), если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md), если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md), если входные значения имеют тип `DateTime`. **Пример** @@ -968,7 +968,7 @@ quantileDeterministic(level)(expr, determinator) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). - `determinator` — Число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. **Возвращаемое значение** @@ -977,9 +977,9 @@ quantileDeterministic(level)(expr, determinator) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md) если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md) если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `DateTime`. **Пример** @@ -1032,7 +1032,7 @@ quantileExact(level)(expr) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). **Возвращаемое значение** @@ -1040,9 +1040,9 @@ quantileExact(level)(expr) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md) если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md) если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `DateTime`. **Пример** @@ -1084,7 +1084,7 @@ quantileExactWeighted(level)(expr, weight) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). - `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** @@ -1093,9 +1093,9 @@ quantileExactWeighted(level)(expr, weight) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md) если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md) если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `DateTime`. **Пример** @@ -1149,7 +1149,7 @@ quantileTiming(level)(expr) - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../data_types/float.md). +- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../sql_reference/aggregate_functions/reference.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. @@ -1173,7 +1173,7 @@ quantileTiming(level)(expr) Тип: `Float32`. !!! note "Примечания" - Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../data_types/float.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../select.md#select-order-by). + Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../sql_reference/aggregate_functions/reference.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../sql_reference/statements/select.md#select-order-by). **Пример** @@ -1232,7 +1232,7 @@ quantileTimingWeighted(level)(expr, weight) - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../data_types/float.md). +- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../sql_reference/aggregate_functions/reference.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. @@ -1258,7 +1258,7 @@ quantileTimingWeighted(level)(expr, weight) Тип: `Float32`. !!! note "Примечания" - Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../data_types/float.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../select.md#select-order-by). + Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../sql_reference/aggregate_functions/reference.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../sql_reference/statements/select.md#select-order-by). **Пример** @@ -1315,7 +1315,7 @@ quantileTDigest(level)(expr) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). **Возвращаемое значение** @@ -1323,9 +1323,9 @@ quantileTDigest(level)(expr) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md) если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md) если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `DateTime`. **Пример** @@ -1369,7 +1369,7 @@ quantileTDigestWeighted(level)(expr, weight) **Параметры** - `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../data_types/index.md#data_types) или типов [Date](../../data_types/date.md), [DateTime](../../data_types/datetime.md). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql_reference/aggregate_functions/reference.md#data_types) или типов [Date](../../sql_reference/aggregate_functions/reference.md), [DateTime](../../sql_reference/aggregate_functions/reference.md). - `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** @@ -1378,9 +1378,9 @@ quantileTDigestWeighted(level)(expr, weight) Тип: -- [Float64](../../data_types/float.md) для входных данных числового типа. -- [Date](../../data_types/date.md) если входные значения имеют тип `Date`. -- [DateTime](../../data_types/datetime.md) если входные значения имеют тип `DateTime`. +- [Float64](../../sql_reference/aggregate_functions/reference.md) для входных данных числового типа. +- [Date](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `Date`. +- [DateTime](../../sql_reference/aggregate_functions/reference.md) если входные значения имеют тип `DateTime`. **Пример** @@ -1522,7 +1522,7 @@ topKWeighted(N)(x, weight) **Аргументы** - `x` – значение. -- `weight` — вес. [UInt8](../../data_types/int_uint.md). +- `weight` — вес. [UInt8](../../sql_reference/aggregate_functions/reference.md). **Возвращаемое значение** diff --git a/docs/ru/data_types/nested_data_structures/aggregatefunction.md b/docs/ru/sql_reference/data_types/aggregatefunction.md similarity index 87% rename from docs/ru/data_types/nested_data_structures/aggregatefunction.md rename to docs/ru/sql_reference/data_types/aggregatefunction.md index 641d8aa8386..d7ee1211845 100644 --- a/docs/ru/data_types/nested_data_structures/aggregatefunction.md +++ b/docs/ru/sql_reference/data_types/aggregatefunction.md @@ -23,7 +23,7 @@ CREATE TABLE t ) ENGINE = ... ``` -[uniq](../../query_language/agg_functions/reference.md#agg_function-uniq), anyIf ([any](../../query_language/agg_functions/reference.md#agg_function-any)+[If](../../query_language/agg_functions/combinators.md#agg-functions-combinator-if)) и [quantiles](../../query_language/agg_functions/reference.md) — агрегатные функции, поддержанные в ClickHouse. +[uniq](../../sql_reference/data_types/aggregatefunction.md#agg_function-uniq), anyIf ([any](../../sql_reference/data_types/aggregatefunction.md#agg_function-any)+[If](../../sql_reference/data_types/aggregatefunction.md#agg-functions-combinator-if)) и [quantiles](../../sql_reference/data_types/aggregatefunction.md) — агрегатные функции, поддержанные в ClickHouse. ## Особенности использования {#osobennosti-ispolzovaniia} @@ -58,6 +58,6 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP ## Пример использования {#primer-ispolzovaniia} -Смотрите в описании движка [AggregatingMergeTree](../../operations/table_engines/aggregatingmergetree.md). +Смотрите в описании движка [AggregatingMergeTree](../../sql_reference/data_types/aggregatefunction.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/ru/data_types/array.md b/docs/ru/sql_reference/data_types/array.md similarity index 92% rename from docs/ru/data_types/array.md rename to docs/ru/sql_reference/data_types/array.md index 0fa13d54cae..acc2e4940d0 100644 --- a/docs/ru/data_types/array.md +++ b/docs/ru/sql_reference/data_types/array.md @@ -42,7 +42,7 @@ SELECT [1, 2] AS x, toTypeName(x) ## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh} -При создании массива «на лету» ClickHouse автоматически определяет тип аргументов как наиболее узкий тип данных, в котором можно хранить все перечисленные аргументы. Если среди аргументов есть [NULL](../query_language/syntax.md#null-literal) или аргумент типа [Nullable](nullable.md#data_type-nullable), то тип элементов массива — [Nullable](nullable.md). +При создании массива «на лету» ClickHouse автоматически определяет тип аргументов как наиболее узкий тип данных, в котором можно хранить все перечисленные аргументы. Если среди аргументов есть [NULL](../../sql_reference/data_types/array.md#null-literal) или аргумент типа [Nullable](nullable.md#data_type-nullable), то тип элементов массива — [Nullable](nullable.md). Если ClickHouse не смог подобрать тип данных, то он сгенерирует исключение. Это произойдёт, например, при попытке создать массив одновременно со строками и числами `SELECT array(1, 'a')`. diff --git a/docs/ru/data_types/boolean.md b/docs/ru/sql_reference/data_types/boolean.md similarity index 100% rename from docs/ru/data_types/boolean.md rename to docs/ru/sql_reference/data_types/boolean.md diff --git a/docs/ru/data_types/date.md b/docs/ru/sql_reference/data_types/date.md similarity index 100% rename from docs/ru/data_types/date.md rename to docs/ru/sql_reference/data_types/date.md diff --git a/docs/ru/data_types/datetime.md b/docs/ru/sql_reference/data_types/datetime.md similarity index 86% rename from docs/ru/data_types/datetime.md rename to docs/ru/sql_reference/data_types/datetime.md index 957ffe717a3..e52ca549907 100644 --- a/docs/ru/data_types/datetime.md +++ b/docs/ru/sql_reference/data_types/datetime.md @@ -18,13 +18,13 @@ DateTime([timezone]) Список поддерживаемых временных зон можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones). Пакет `tzdata`, содержащий [базу данных часовых поясов IANA](https://www.iana.org/time-zones), должен быть установлен в системе. Используйте команду `timedatectl list-timezones` для получения списка часовых поясов, известных локальной системе. -Часовой пояс для столбца типа `DateTime` можно в явном виде установить при создании таблицы. Если часовой пояс не установлен, то ClickHouse использует значение параметра [timezone](../operations/server_settings/settings.md#server_settings-timezone), установленное в конфигурации сервера или в настройках операционной системы на момент запуска сервера. +Часовой пояс для столбца типа `DateTime` можно в явном виде установить при создании таблицы. Если часовой пояс не установлен, то ClickHouse использует значение параметра [timezone](../../sql_reference/data_types/datetime.md#server_configuration_parameters-timezone), установленное в конфигурации сервера или в настройках операционной системы на момент запуска сервера. -Консольный клиент ClickHouse по умолчанию использует часовой пояс сервера, если для значения `DateTime` часовой пояс не был задан в явном виде при инициализации типа данных. Чтобы использовать часовой пояс клиента, запустите [clickhouse-client](../interfaces/cli.md) с параметром `--use_client_time_zone`. +Консольный клиент ClickHouse по умолчанию использует часовой пояс сервера, если для значения `DateTime` часовой пояс не был задан в явном виде при инициализации типа данных. Чтобы использовать часовой пояс клиента, запустите [clickhouse-client](../../interfaces/cli.md) с параметром `--use_client_time_zone`. -ClickHouse отображает значения типа `DateTime` в формате `YYYY-MM-DD hh:mm:ss`. Отображение можно поменять с помощью функции [formatDateTime](../query_language/functions/date_time_functions.md#formatdatetime). +ClickHouse отображает значения типа `DateTime` в формате `YYYY-MM-DD hh:mm:ss`. Отображение можно поменять с помощью функции [formatDateTime](../../sql_reference/data_types/datetime.md#formatdatetime). -При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date\_time\_input\_format](../operations/settings/settings.md#settings-date_time_input_format). +При вставке данных в ClickHouse, можно использовать различные форматы даты и времени в зависимости от значения настройки [date\_time\_input\_format](../../operations/settings/settings.md#settings-date_time_input_format). ## Примеры {#primery} @@ -111,12 +111,12 @@ FROM dt ## See Also {#see-also} -- [Функции преобразования типов](../query_language/functions/type_conversion_functions.md) -- [Функции для работы с датой и временем](../query_language/functions/date_time_functions.md) -- [Функции для работы с массивами](../query_language/functions/array_functions.md) -- [Настройка `date_time_input_format`](../operations/settings/settings.md#settings-date_time_input_format) -- [Конфигурационный параметр сервера `timezone`](../operations/server_settings/settings.md#server_settings-timezone) -- [Операторы для работы с датой и временем](../query_language/operators.md#operators-datetime) +- [Функции преобразования типов](../../sql_reference/data_types/datetime.md) +- [Функции для работы с датой и временем](../../sql_reference/data_types/datetime.md) +- [Функции для работы с массивами](../../sql_reference/data_types/datetime.md) +- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) +- [Конфигурационный параметр сервера `timezone`](../../sql_reference/data_types/datetime.md#server_configuration_parameters-timezone) +- [Операторы для работы с датой и временем](../../sql_reference/data_types/datetime.md#operators-datetime) - [Тип данных `Date`](date.md) [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/datetime/) diff --git a/docs/ru/data_types/datetime64.md b/docs/ru/sql_reference/data_types/datetime64.md similarity index 92% rename from docs/ru/data_types/datetime64.md rename to docs/ru/sql_reference/data_types/datetime64.md index 8e3277dd52f..9e126885058 100644 --- a/docs/ru/data_types/datetime64.md +++ b/docs/ru/sql_reference/data_types/datetime64.md @@ -87,11 +87,11 @@ FROM dt ## See Also {#see-also} -- [Функции преобразования типов](../query_language/functions/type_conversion_functions.md) -- [Функции для работы с датой и временем](../query_language/functions/date_time_functions.md) -- [Функции для работы с массивами](../query_language/functions/array_functions.md) -- [Настройка `date_time_input_format`](../operations/settings/settings.md#settings-date_time_input_format) -- [Конфигурационный параметр сервера `timezone`](../operations/server_settings/settings.md#server_settings-timezone) -- [Операторы для работы с датой и временем](../query_language/operators.md#operators-datetime) +- [Функции преобразования типов](../../sql_reference/data_types/datetime64.md) +- [Функции для работы с датой и временем](../../sql_reference/data_types/datetime64.md) +- [Функции для работы с массивами](../../sql_reference/data_types/datetime64.md) +- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) +- [Конфигурационный параметр сервера `timezone`](../../sql_reference/data_types/datetime64.md#server_configuration_parameters-timezone) +- [Операторы для работы с датой и временем](../../sql_reference/data_types/datetime64.md#operators-datetime) - [Тип данных `Date`](date.md) - [Тип данных `DateTime`](datetime.md) diff --git a/docs/ru/data_types/decimal.md b/docs/ru/sql_reference/data_types/decimal.md similarity index 100% rename from docs/ru/data_types/decimal.md rename to docs/ru/sql_reference/data_types/decimal.md diff --git a/docs/ru/sql_reference/data_types/domains/index.md b/docs/ru/sql_reference/data_types/domains/index.md new file mode 100644 index 00000000000..d4496cf8d5b --- /dev/null +++ b/docs/ru/sql_reference/data_types/domains/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Domains +toc_priority: 56 +--- + diff --git a/docs/ru/data_types/domains/ipv4.md b/docs/ru/sql_reference/data_types/domains/ipv4.md similarity index 100% rename from docs/ru/data_types/domains/ipv4.md rename to docs/ru/sql_reference/data_types/domains/ipv4.md diff --git a/docs/ru/data_types/domains/ipv6.md b/docs/ru/sql_reference/data_types/domains/ipv6.md similarity index 100% rename from docs/ru/data_types/domains/ipv6.md rename to docs/ru/sql_reference/data_types/domains/ipv6.md diff --git a/docs/ru/data_types/domains/overview.md b/docs/ru/sql_reference/data_types/domains/overview.md similarity index 100% rename from docs/ru/data_types/domains/overview.md rename to docs/ru/sql_reference/data_types/domains/overview.md diff --git a/docs/ru/data_types/enum.md b/docs/ru/sql_reference/data_types/enum.md similarity index 99% rename from docs/ru/data_types/enum.md rename to docs/ru/sql_reference/data_types/enum.md index 2ee7c77028b..58f2a4b188e 100644 --- a/docs/ru/data_types/enum.md +++ b/docs/ru/sql_reference/data_types/enum.md @@ -86,7 +86,7 @@ SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) Для каждого из значений прописывается число в диапазоне `-128 .. 127` для `Enum8` или в диапазоне `-32768 .. 32767` для `Enum16`. Все строки должны быть разными, числа - тоже. Разрешена пустая строка. При указании такого типа (в определении таблицы), числа могут идти не подряд и в произвольном порядке. При этом, порядок не имеет значения. -Ни строка, ни цифровое значение в `Enum` не могут быть [NULL](../query_language/syntax.md). +Ни строка, ни цифровое значение в `Enum` не могут быть [NULL](../../sql_reference/syntax.md#null-literal). `Enum` может быть передан в тип [Nullable](nullable.md). Таким образом, если создать таблицу запросом diff --git a/docs/ru/data_types/fixedstring.md b/docs/ru/sql_reference/data_types/fixedstring.md similarity index 89% rename from docs/ru/data_types/fixedstring.md rename to docs/ru/sql_reference/data_types/fixedstring.md index ba91fcde9b3..7e2fdd5d525 100644 --- a/docs/ru/data_types/fixedstring.md +++ b/docs/ru/sql_reference/data_types/fixedstring.md @@ -51,6 +51,6 @@ WHERE a = 'b\0' Это поведение отличается от поведения MySQL для типа `CHAR`, где строки дополняются пробелами, а пробелы перед выводом вырезаются. -Обратите внимание, что длина значения `FixedString(N)` постоянна. Функция [length](../query_language/functions/array_functions.md#array_functions-length) возвращает `N` даже если значение `FixedString(N)` заполнено только нулевыми байтами, однако функция [empty](../query_language/functions/string_functions.md#empty) в этом же случае возвращает `1`. +Обратите внимание, что длина значения `FixedString(N)` постоянна. Функция [length](../../sql_reference/data_types/fixedstring.md#array_functions-length) возвращает `N` даже если значение `FixedString(N)` заполнено только нулевыми байтами, однако функция [empty](../../sql_reference/data_types/fixedstring.md#empty) в этом же случае возвращает `1`. [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/fixedstring/) diff --git a/docs/ru/data_types/float.md b/docs/ru/sql_reference/data_types/float.md similarity index 97% rename from docs/ru/data_types/float.md rename to docs/ru/sql_reference/data_types/float.md index 20eab345573..91d4b655e2a 100644 --- a/docs/ru/data_types/float.md +++ b/docs/ru/sql_reference/data_types/float.md @@ -75,6 +75,6 @@ SELECT 0 / 0 └──────────────┘ ``` - Смотрите правила сортировки `NaN` в разделе [Секция ORDER BY](../query_language/select.md). + Смотрите правила сортировки `NaN` в разделе [Секция ORDER BY](../sql_reference/data_types/float.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/float/) diff --git a/docs/ru/data_types/index.md b/docs/ru/sql_reference/data_types/index.md similarity index 100% rename from docs/ru/data_types/index.md rename to docs/ru/sql_reference/data_types/index.md diff --git a/docs/ru/data_types/int_uint.md b/docs/ru/sql_reference/data_types/int_uint.md similarity index 100% rename from docs/ru/data_types/int_uint.md rename to docs/ru/sql_reference/data_types/int_uint.md diff --git a/docs/ru/data_types/nested_data_structures/index.md b/docs/ru/sql_reference/data_types/nested_data_structures/index.md similarity index 100% rename from docs/ru/data_types/nested_data_structures/index.md rename to docs/ru/sql_reference/data_types/nested_data_structures/index.md diff --git a/docs/ru/data_types/nested_data_structures/nested.md b/docs/ru/sql_reference/data_types/nested_data_structures/nested.md similarity index 100% rename from docs/ru/data_types/nested_data_structures/nested.md rename to docs/ru/sql_reference/data_types/nested_data_structures/nested.md diff --git a/docs/ru/data_types/nullable.md b/docs/ru/sql_reference/data_types/nullable.md similarity index 83% rename from docs/ru/data_types/nullable.md rename to docs/ru/sql_reference/data_types/nullable.md index 1ee6777254b..dfbd0fb0512 100644 --- a/docs/ru/data_types/nullable.md +++ b/docs/ru/sql_reference/data_types/nullable.md @@ -1,6 +1,6 @@ # Nullable(TypeName) {#data_type-nullable} -Позволяет работать как со значением типа `TypeName` так и с отсутствием этого значения ([NULL](../query_language/syntax.md)) в одной и той же переменной, в том числе хранить `NULL` в таблицах вместе со значения типа `TypeName`. Например, в столбце типа `Nullable(Int8)` можно хранить значения типа `Int8`, а в тех строках, где значения нет, будет храниться `NULL`. +Позволяет работать как со значением типа `TypeName` так и с отсутствием этого значения ([NULL](../../sql_reference/data_types/nullable.md)) в одной и той же переменной, в том числе хранить `NULL` в таблицах вместе со значения типа `TypeName`. Например, в столбце типа `Nullable(Int8)` можно хранить значения типа `Int8`, а в тех строках, где значения нет, будет храниться `NULL`. В качестве `TypeName` нельзя использовать составные типы данных [Array](array.md#data_type-array) и [Tuple](tuple.md). Составные типы данных могут содержать значения типа `Nullable`, например `Array(Nullable(Int8))`. diff --git a/docs/ru/data_types/special_data_types/expression.md b/docs/ru/sql_reference/data_types/special_data_types/expression.md similarity index 100% rename from docs/ru/data_types/special_data_types/expression.md rename to docs/ru/sql_reference/data_types/special_data_types/expression.md diff --git a/docs/ru/data_types/special_data_types/index.md b/docs/ru/sql_reference/data_types/special_data_types/index.md similarity index 100% rename from docs/ru/data_types/special_data_types/index.md rename to docs/ru/sql_reference/data_types/special_data_types/index.md diff --git a/docs/ru/data_types/special_data_types/interval.md b/docs/ru/sql_reference/data_types/special_data_types/interval.md similarity index 84% rename from docs/ru/data_types/special_data_types/interval.md rename to docs/ru/sql_reference/data_types/special_data_types/interval.md index 22912bdbca1..1721b8631ad 100644 --- a/docs/ru/data_types/special_data_types/interval.md +++ b/docs/ru/sql_reference/data_types/special_data_types/interval.md @@ -1,6 +1,6 @@ # Interval {#data-type-interval} -Семейство типов данных, представляющих интервалы дат и времени. Оператор [INTERVAL](../../query_language/operators.md#operator-interval) возвращает значения этих типов. +Семейство типов данных, представляющих интервалы дат и времени. Оператор [INTERVAL](../../../sql_reference/data_types/special_data_types/interval.md#operator-interval) возвращает значения этих типов. !!! warning "Внимание" Нельзя использовать типы данных `Interval` для хранения данных в таблице. @@ -35,7 +35,7 @@ SELECT toTypeName(INTERVAL 4 DAY) ## Использование {#data-type-interval-usage-remarks} -Значения типов `Interval` можно использовать в арифметических операциях со значениями типов [Date](../../data_types/date.md) и [DateTime](../../data_types/datetime.md). Например, можно добавить 4 дня к текущей дате: +Значения типов `Interval` можно использовать в арифметических операциях со значениями типов [Date](../../../sql_reference/data_types/special_data_types/interval.md) и [DateTime](../../../sql_reference/data_types/special_data_types/interval.md). Например, можно добавить 4 дня к текущей дате: ``` sql SELECT now() as current_date_time, current_date_time + INTERVAL 4 DAY @@ -74,5 +74,5 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu ## Смотрите также {#smotrite-takzhe} -- Оператор[INTERVAL](../../query_language/operators.md#operator-interval) -- Функция приведения типа [toInterval](../../query_language/functions/type_conversion_functions.md#function-tointerval) +- Оператор[INTERVAL](../../../sql_reference/data_types/special_data_types/interval.md#operator-interval) +- Функция приведения типа [toInterval](../../../sql_reference/data_types/special_data_types/interval.md#function-tointerval) diff --git a/docs/ru/data_types/special_data_types/nothing.md b/docs/ru/sql_reference/data_types/special_data_types/nothing.md similarity index 63% rename from docs/ru/data_types/special_data_types/nothing.md rename to docs/ru/sql_reference/data_types/special_data_types/nothing.md index ab4b96cc519..c23de847077 100644 --- a/docs/ru/data_types/special_data_types/nothing.md +++ b/docs/ru/sql_reference/data_types/special_data_types/nothing.md @@ -1,8 +1,8 @@ # Nothing {#nothing} -Этот тип данных предназначен только для того, чтобы представлять [NULL](../../query_language/syntax.md), т.е. отсутствие значения. +Этот тип данных предназначен только для того, чтобы представлять [NULL](../../../sql_reference/data_types/special_data_types/nothing.md), т.е. отсутствие значения. -Невозможно создать значение типа `Nothing`, поэтому он используется там, где значение не подразумевается. Например, `NULL` записывается как `Nullable(Nothing)` ([Nullable](../../data_types/nullable.md) — это тип данных, позволяющий хранить `NULL` в таблицах). Также тип `Nothing` используется для обозначения пустых массивов: +Невозможно создать значение типа `Nothing`, поэтому он используется там, где значение не подразумевается. Например, `NULL` записывается как `Nullable(Nothing)` ([Nullable](../../../sql_reference/data_types/special_data_types/nothing.md) — это тип данных, позволяющий хранить `NULL` в таблицах). Также тип `Nothing` используется для обозначения пустых массивов: ``` sql SELECT toTypeName(Array()) diff --git a/docs/ru/data_types/special_data_types/set.md b/docs/ru/sql_reference/data_types/special_data_types/set.md similarity index 100% rename from docs/ru/data_types/special_data_types/set.md rename to docs/ru/sql_reference/data_types/special_data_types/set.md diff --git a/docs/ru/data_types/string.md b/docs/ru/sql_reference/data_types/string.md similarity index 100% rename from docs/ru/data_types/string.md rename to docs/ru/sql_reference/data_types/string.md diff --git a/docs/ru/data_types/tuple.md b/docs/ru/sql_reference/data_types/tuple.md similarity index 87% rename from docs/ru/data_types/tuple.md rename to docs/ru/sql_reference/data_types/tuple.md index 17732d04953..e8f5f335278 100644 --- a/docs/ru/data_types/tuple.md +++ b/docs/ru/sql_reference/data_types/tuple.md @@ -2,7 +2,7 @@ Кортеж из элементов любого [типа](index.md#data_types). Элементы кортежа могут быть одного или разных типов. -Кортежи используются для временной группировки столбцов. Столбцы могут группироваться при использовании выражения IN в запросе, а также для указания нескольких формальных параметров лямбда-функций. Подробнее смотрите разделы [Операторы IN](../query_language/select.md), [Функции высшего порядка](../query_language/functions/higher_order_functions.md#higher_order_functions). +Кортежи используются для временной группировки столбцов. Столбцы могут группироваться при использовании выражения IN в запросе, а также для указания нескольких формальных параметров лямбда-функций. Подробнее смотрите разделы [Операторы IN](../../sql_reference/data_types/tuple.md), [Функции высшего порядка](../../sql_reference/data_types/tuple.md#higher_order_functions). Кортежи могут быть результатом запроса. В этом случае, в текстовых форматах кроме JSON, значения выводятся в круглых скобках через запятую. В форматах JSON, кортежи выводятся в виде массивов (в квадратных скобках). @@ -28,7 +28,7 @@ SELECT tuple(1,'a') AS x, toTypeName(x) ## Особенности работы с типами данных {#osobennosti-raboty-s-tipami-dannykh} -При создании кортежа «на лету» ClickHouse автоматически определяет тип каждого аргументов как минимальный из типов, который может сохранить значение аргумента. Если аргумент — [NULL](../query_language/syntax.md#null-literal), то тип элемента кортежа — [Nullable](nullable.md). +При создании кортежа «на лету» ClickHouse автоматически определяет тип каждого аргументов как минимальный из типов, который может сохранить значение аргумента. Если аргумент — [NULL](../../sql_reference/data_types/tuple.md#null-literal), то тип элемента кортежа — [Nullable](nullable.md). Пример автоматического определения типа данных: diff --git a/docs/ru/data_types/uuid.md b/docs/ru/sql_reference/data_types/uuid.md similarity index 82% rename from docs/ru/data_types/uuid.md rename to docs/ru/sql_reference/data_types/uuid.md index 24f43cc5d24..d62ec22eecb 100644 --- a/docs/ru/data_types/uuid.md +++ b/docs/ru/sql_reference/data_types/uuid.md @@ -16,7 +16,7 @@ ## Как сгенерировать UUID {#kak-sgenerirovat-uuid} -Для генерации UUID-значений предназначена функция [generateUUIDv4](../query_language/functions/uuid_functions.md). +Для генерации UUID-значений предназначена функция [generateUUIDv4](../../sql_reference/data_types/uuid.md). ## Примеры использования {#primery-ispolzovaniia} @@ -65,8 +65,8 @@ SELECT * FROM t_uuid ## Ограничения {#ogranicheniia} -Тип данных UUID можно использовать только с функциями, которые поддерживаются типом данных [String](string.md) (например, [min](../query_language/agg_functions/reference.md#agg_function-min), [max](../query_language/agg_functions/reference.md#agg_function-max), и [count](../query_language/agg_functions/reference.md#agg_function-count)). +Тип данных UUID можно использовать только с функциями, которые поддерживаются типом данных [String](string.md) (например, [min](../../sql_reference/data_types/uuid.md#agg_function-min), [max](../../sql_reference/data_types/uuid.md#agg_function-max), и [count](../../sql_reference/data_types/uuid.md#agg_function-count)). -Тип данных UUID не поддерживается арифметическими операциями (например, [abs](../query_language/functions/arithmetic_functions.md#arithm_func-abs)) или агрегатными функциями, такими как [sum](../query_language/agg_functions/reference.md#agg_function-sum) и [avg](../query_language/agg_functions/reference.md#agg_function-avg). +Тип данных UUID не поддерживается арифметическими операциями (например, [abs](../../sql_reference/data_types/uuid.md#arithm_func-abs)) или агрегатными функциями, такими как [sum](../../sql_reference/data_types/uuid.md#agg_function-sum) и [avg](../../sql_reference/data_types/uuid.md#agg_function-avg). [Original article](https://clickhouse.tech/docs/en/data_types/uuid/) diff --git a/docs/ru/query_language/dicts/external_dicts.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts.md similarity index 79% rename from docs/ru/query_language/dicts/external_dicts.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts.md index 586e443e3a9..4929e571758 100644 --- a/docs/ru/query_language/dicts/external_dicts.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts.md @@ -5,11 +5,11 @@ ClickHouse: - Полностью или частично хранит словари в оперативной памяти. - Периодически обновляет их и динамически подгружает отсутствующие значения. -- Позволяет создавать внешние словари с помощью xml-файлов или [DDL-запросов](../create.md#create-dictionary-query). +- Позволяет создавать внешние словари с помощью xml-файлов или [DDL-запросов](../../../sql_reference/statements/create.md#create-dictionary-query). -Конфигурация внешних словарей может находится в одном или нескольких xml-файлах. Путь к конфигурации указывается в параметре [dictionaries\_config](../../operations/server_settings/settings.md). +Конфигурация внешних словарей может находится в одном или нескольких xml-файлах. Путь к конфигурации указывается в параметре [dictionaries\_config](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). -Словари могут загружаться при старте сервера или при первом использовании, в зависимости от настройки [dictionaries\_lazy\_load](../../operations/server_settings/settings.md). +Словари могут загружаться при старте сервера или при первом использовании, в зависимости от настройки [dictionaries\_lazy\_load](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md). Конфигурационный файл словарей имеет вид: @@ -35,10 +35,10 @@ ClickHouse: В одном файле можно [сконфигурировать](external_dicts_dict.md) произвольное количество словарей. -Если вы создаёте внешние словари [DDL-запросами](../create.md#create-dictionary-query), то не задавайте конфигурацию словаря в конфигурации сервера. +Если вы создаёте внешние словари [DDL-запросами](../../../sql_reference/statements/create.md#create-dictionary-query), то не задавайте конфигурацию словаря в конфигурации сервера. !!! attention "Внимание" - Можно преобразовывать значения по небольшому словарю, описав его в запросе `SELECT` (см. функцию [transform](../functions/other_functions.md)). Эта функциональность не связана с внешними словарями. + Можно преобразовывать значения по небольшому словарю, описав его в запросе `SELECT` (см. функцию [transform](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md)). Эта функциональность не связана с внешними словарями. ## Смотрите также {#ext-dicts-see-also} @@ -47,6 +47,6 @@ ClickHouse: - [Обновление словарей](external_dicts_dict_lifetime.md) - [Источники внешних словарей](external_dicts_dict_sources.md) - [Ключ и поля словаря](external_dicts_dict_structure.md) -- [Функции для работы с внешними словарями](../functions/ext_dict_functions.md#ext_dict_functions) +- [Функции для работы с внешними словарями](../../../sql_reference/dictionaries/external_dictionaries/external_dicts.md#ext_dict_functions) [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts/) diff --git a/docs/ru/query_language/dicts/external_dicts_dict.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md similarity index 91% rename from docs/ru/query_language/dicts/external_dicts_dict.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md index dcb8c6652b0..2e3068882bf 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md @@ -24,7 +24,7 @@ XML-конфигурация словаря имеет следующую стр ``` -Соответствующий [DDL-запрос](../create.md#create-dictionary-query) имеет следующий вид: +Соответствующий [DDL-запрос](../../../sql_reference/statements/create.md#create-dictionary-query) имеет следующий вид: ``` sql CREATE DICTIONARY dict_name diff --git a/docs/ru/query_language/dicts/external_dicts_dict_hierarchical.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md similarity index 90% rename from docs/ru/query_language/dicts/external_dicts_dict_hierarchical.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md index 335b882a97c..ef9b79c0444 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_hierarchical.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md @@ -32,7 +32,7 @@ ClickHouse поддерживает иерархические словари с ClickHouse поддерживает свойство [hierarchical](external_dicts_dict_structure.md#hierarchical-dict-attr) для атрибутов [внешнего словаря](index.md). Это свойство позволяет конфигурировать словари, подобные описанному выше. -С помощью функции [dictGetHierarchy](../functions/ext_dict_functions.md#dictgethierarchy) можно получить цепочку предков элемента. +С помощью функции [dictGetHierarchy](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md#dictgethierarchy) можно получить цепочку предков элемента. Структура словаря для нашего примера может выглядеть следующим образом: diff --git a/docs/ru/query_language/dicts/external_dicts_dict_layout.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md similarity index 99% rename from docs/ru/query_language/dicts/external_dicts_dict_layout.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md index 458593e82aa..898fe45b15a 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_layout.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md @@ -34,7 +34,7 @@ ``` -Соответствущий [DDL-запрос](../create.md#create-dictionary-query): +Соответствущий [DDL-запрос](../../../sql_reference/statements/create.md#create-dictionary-query): ``` sql CREATE DICTIONARY (...) diff --git a/docs/ru/query_language/dicts/external_dicts_dict_lifetime.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md similarity index 100% rename from docs/ru/query_language/dicts/external_dicts_dict_lifetime.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md diff --git a/docs/ru/query_language/dicts/external_dicts_dict_sources.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md similarity index 96% rename from docs/ru/query_language/dicts/external_dicts_dict_sources.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md index 8b9961ee7fc..32115e703f4 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md @@ -19,7 +19,7 @@ ``` -Аналогичный [DDL-запрос](../create.md#create-dictionary-query): +Аналогичный [DDL-запрос](../../../sql_reference/statements/create.md#create-dictionary-query): ``` sql CREATE DICTIONARY dict_name (...) @@ -64,7 +64,7 @@ SOURCE(FILE(path '/opt/dictionaries/os.tsv' format 'TabSeparated')) Поля настройки: - `path` — Абсолютный путь к файлу. -- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../interfaces/formats.md#formats)». +- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../../interfaces/formats.md#formats)». ## Исполняемый файл {#dicts-external_dicts_dict_sources-executable} @@ -90,7 +90,7 @@ SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated')) Поля настройки: - `command` — Абсолютный путь к исполняемому файлу или имя файла (если каталог программы прописан в `PATH`). -- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../interfaces/formats.md#formats)». +- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../../interfaces/formats.md#formats)». ## HTTP(s) {#dicts-external_dicts_dict_sources-http} @@ -128,12 +128,12 @@ SOURCE(HTTP( )) ``` -Чтобы ClickHouse смог обратиться к HTTPS-ресурсу, необходимо [настроить openSSL](../../operations/server_settings/settings.md) в конфигурации сервера. +Чтобы ClickHouse смог обратиться к HTTPS-ресурсу, необходимо [настроить openSSL](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) в конфигурации сервера. Поля настройки: - `url` — URL источника. -- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../interfaces/formats.md#formats)». +- `format` — Формат файла. Поддерживаются все форматы, описанные в разделе «[Форматы](../../../interfaces/formats.md#formats)». ## ODBC {#dicts-external_dicts_dict_sources-odbc} @@ -172,7 +172,7 @@ SOURCE(ODBC( ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных. -Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../faq/general.md#oracle-odbc-encodings). +Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/general.md#oracle-odbc-encodings). ### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei} @@ -509,7 +509,7 @@ SOURCE(CLICKHOUSE( Поля настройки: -- `host` — хост ClickHouse. Если host локальный, то запрос выполняется без сетевого взаимодействия. Чтобы повысить отказоустойчивость решения, можно создать таблицу типа [Distributed](../../operations/table_engines/distributed.md) и прописать её в дальнейших настройках. +- `host` — хост ClickHouse. Если host локальный, то запрос выполняется без сетевого взаимодействия. Чтобы повысить отказоустойчивость решения, можно создать таблицу типа [Distributed](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) и прописать её в дальнейших настройках. - `port` — порт сервера ClickHouse. - `user` — имя пользователя ClickHouse. - `password` — пароль пользователя ClickHouse. diff --git a/docs/ru/query_language/dicts/external_dicts_dict_structure.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md similarity index 90% rename from docs/ru/query_language/dicts/external_dicts_dict_structure.md rename to docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md index 7fa762c063a..f83b7657b54 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_structure.md +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md @@ -154,14 +154,14 @@ CREATE DICTIONARY somename ( | Тег | Описание | Обязательный | |------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| | `name` | Имя столбца. | Да | -| `type` | Тип данных ClickHouse.
    ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`. [Nullable](../../data_types/nullable.md) не поддерживается. | Да | +| `type` | Тип данных ClickHouse.
    ClickHouse пытается привести значение из словаря к заданному типу данных. Например, в случае MySQL, в таблице-источнике поле может быть `TEXT`, `VARCHAR`, `BLOB`, но загружено может быть как `String`. [Nullable](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md) не поддерживается. | Да | | `null_value` | Значение по умолчанию для несуществующего элемента.
    В примере это пустая строка. Нельзя указать значение `NULL`. | Да | -| `expression` | [Выражение](../syntax.md#syntax-expressions), которое ClickHouse выполняет со значением.
    Выражением может быть имя столбца в удаленной SQL базе. Таким образом, вы можете использовать его для создания псевдонима удаленного столбца.

    Значение по умолчанию: нет выражения. | Нет | +| `expression` | [Выражение](../../syntax.md#syntax-expressions), которое ClickHouse выполняет со значением.
    Выражением может быть имя столбца в удаленной SQL базе. Таким образом, вы можете использовать его для создания псевдонима удаленного столбца.

    Значение по умолчанию: нет выражения. | Нет | | `hierarchical` | Если `true`, то атрибут содержит ключ предка для текущего элемента. Смотрите [Иерархические словари](external_dicts_dict_hierarchical.md).

    Default value: `false`. | No | | `is_object_id` | Признак того, что запрос выполняется к документу MongoDB по `ObjectID`.

    Значение по умолчанию: `false`. | Нет | ## Смотрите также {#smotrite-takzhe} -- [Функции для работы с внешними словарями](../functions/ext_dict_functions.md). +- [Функции для работы с внешними словарями](../../../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/ru/sql_reference/dictionaries/external_dictionaries/index.md b/docs/ru/sql_reference/dictionaries/external_dictionaries/index.md new file mode 100644 index 00000000000..9af8b4f2f12 --- /dev/null +++ b/docs/ru/sql_reference/dictionaries/external_dictionaries/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: External Dictionaries +toc_priority: 37 +--- + diff --git a/docs/ru/query_language/dicts/index.md b/docs/ru/sql_reference/dictionaries/index.md similarity index 82% rename from docs/ru/query_language/dicts/index.md rename to docs/ru/sql_reference/dictionaries/index.md index e53b7f394d8..e876b92c9bf 100644 --- a/docs/ru/query_language/dicts/index.md +++ b/docs/ru/sql_reference/dictionaries/index.md @@ -8,7 +8,7 @@ ClickHouse поддерживает специальные функции для ClickHouse поддерживает: -- [Встроенные словари](internal_dicts.md#internal_dicts) со специфическим [набором функций](../functions/ym_dict_functions.md). -- [Подключаемые (внешние) словари](external_dicts.md) с [набором функций](../functions/ext_dict_functions.md). +- [Встроенные словари](internal_dicts.md#internal_dicts) со специфическим [набором функций](../../sql_reference/dictionaries/external_dictionaries/index.md). +- [Подключаемые (внешние) словари](external_dictionaries/external_dicts.md) с [набором функций](../../sql_reference/dictionaries/external_dictionaries/index.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/) diff --git a/docs/ru/query_language/dicts/internal_dicts.md b/docs/ru/sql_reference/dictionaries/internal_dicts.md similarity index 100% rename from docs/ru/query_language/dicts/internal_dicts.md rename to docs/ru/sql_reference/dictionaries/internal_dicts.md diff --git a/docs/ru/query_language/functions/arithmetic_functions.md b/docs/ru/sql_reference/functions/arithmetic_functions.md similarity index 100% rename from docs/ru/query_language/functions/arithmetic_functions.md rename to docs/ru/sql_reference/functions/arithmetic_functions.md diff --git a/docs/ru/query_language/functions/array_functions.md b/docs/ru/sql_reference/functions/array_functions.md similarity index 97% rename from docs/ru/query_language/functions/array_functions.md rename to docs/ru/sql_reference/functions/array_functions.md index ce757921bf5..36865f9aa79 100644 --- a/docs/ru/query_language/functions/array_functions.md +++ b/docs/ru/sql_reference/functions/array_functions.md @@ -55,7 +55,7 @@ arrayConcat(arrays) **Параметры** -- `arrays` – произвольное количество элементов типа [Array](../../data_types/array.md) +- `arrays` – произвольное количество элементов типа [Array](../../sql_reference/functions/array_functions.md) **Пример** @@ -359,7 +359,7 @@ arrayPushBack(array, single_value) **Параметры** - `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../data_types/index.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql_reference/functions/array_functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -384,7 +384,7 @@ arrayPushFront(array, single_value) **Параметры** - `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../data_types/index.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql_reference/functions/array_functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -881,7 +881,7 @@ flatten(array_of_arrays) **Параметры** -- `array_of_arrays` — [Массивов](../../data_types/array.md) массивов. Например, `[[1,2,3], [4,5]]`. +- `array_of_arrays` — [Массивов](../../sql_reference/functions/array_functions.md) массивов. Например, `[[1,2,3], [4,5]]`. **Примеры** @@ -907,7 +907,7 @@ arrayCompact(arr) **Параметры** -`arr` — [Массив](../../data_types/array.md) для обхода. +`arr` — [Массив](../../sql_reference/functions/array_functions.md) для обхода. **Возвращаемое значение** diff --git a/docs/ru/query_language/functions/array_join.md b/docs/ru/sql_reference/functions/array_join.md similarity index 100% rename from docs/ru/query_language/functions/array_join.md rename to docs/ru/sql_reference/functions/array_join.md diff --git a/docs/ru/query_language/functions/bit_functions.md b/docs/ru/sql_reference/functions/bit_functions.md similarity index 94% rename from docs/ru/query_language/functions/bit_functions.md rename to docs/ru/sql_reference/functions/bit_functions.md index 53efa9db06b..09eb9d17bc2 100644 --- a/docs/ru/query_language/functions/bit_functions.md +++ b/docs/ru/sql_reference/functions/bit_functions.md @@ -207,7 +207,7 @@ bitCount(x) **Параметры** -- `x` — [Целое число](../../data_types/int_uint.md) или [число с плавающей запятой](../../data_types/float.md). Функция использует представление числа в памяти, что позволяет поддержать числа с плавающей запятой. +- `x` — [Целое число](../../sql_reference/functions/bit_functions.md) или [число с плавающей запятой](../../sql_reference/functions/bit_functions.md). Функция использует представление числа в памяти, что позволяет поддержать числа с плавающей запятой. **Возвращаемое значение** diff --git a/docs/ru/query_language/functions/bitmap_functions.md b/docs/ru/sql_reference/functions/bitmap_functions.md similarity index 97% rename from docs/ru/query_language/functions/bitmap_functions.md rename to docs/ru/sql_reference/functions/bitmap_functions.md index 90adee46b15..0a6288498be 100644 --- a/docs/ru/query_language/functions/bitmap_functions.md +++ b/docs/ru/sql_reference/functions/bitmap_functions.md @@ -61,8 +61,8 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) **Параметры** - `bitmap` – Битмап. [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – Начальная точка подмножества. [UInt32](../../data_types/int_uint.md). -- `cardinality_limit` – Верхний предел подмножества. [UInt32](../../data_types/int_uint.md). +- `range_start` – Начальная точка подмножества. [UInt32](../../sql_reference/functions/bitmap_functions.md). +- `cardinality_limit` – Верхний предел подмножества. [UInt32](../../sql_reference/functions/bitmap_functions.md). **Возвращаемое значение** @@ -97,7 +97,7 @@ bitmapContains(haystack, needle) **Параметры** - `haystack` – [объект Bitmap](#bitmap_functions-bitmapbuild), в котором функция ищет значение. -- `needle` – значение, которое функция ищет. Тип — [UInt32](../../data_types/int_uint.md). +- `needle` – значение, которое функция ищет. Тип — [UInt32](../../sql_reference/functions/bitmap_functions.md). **Возвращаемые значения** diff --git a/docs/ru/query_language/functions/comparison_functions.md b/docs/ru/sql_reference/functions/comparison_functions.md similarity index 100% rename from docs/ru/query_language/functions/comparison_functions.md rename to docs/ru/sql_reference/functions/comparison_functions.md diff --git a/docs/ru/query_language/functions/conditional_functions.md b/docs/ru/sql_reference/functions/conditional_functions.md similarity index 98% rename from docs/ru/query_language/functions/conditional_functions.md rename to docs/ru/sql_reference/functions/conditional_functions.md index f2c97330d20..f3c57aac38b 100644 --- a/docs/ru/query_language/functions/conditional_functions.md +++ b/docs/ru/sql_reference/functions/conditional_functions.md @@ -14,7 +14,7 @@ SELECT if(cond, then, else) **Параметры** -- `cond` – Условие, которое может быть равно 0 или нет. Может быть [UInt8](../../data_types/int_uint.md) или `NULL`. +- `cond` – Условие, которое может быть равно 0 или нет. Может быть [UInt8](../../sql_reference/functions/conditional_functions.md) или `NULL`. - `then` - Возвращается результат выражения, если условие `cond` истинно. - `else` - Возвращается результат выражения, если условие `cond` ложно. diff --git a/docs/ru/query_language/functions/date_time_functions.md b/docs/ru/sql_reference/functions/date_time_functions.md similarity index 97% rename from docs/ru/query_language/functions/date_time_functions.md rename to docs/ru/sql_reference/functions/date_time_functions.md index 51a097fb31b..9f2f5b8afd2 100644 --- a/docs/ru/query_language/functions/date_time_functions.md +++ b/docs/ru/sql_reference/functions/date_time_functions.md @@ -215,9 +215,9 @@ dateDiff('unit', startdate, enddate, [timezone]) |quarter | |year | -- `startdate` — Первая дата. [Date](../../data_types/date.md) или [DateTime](../../data_types/datetime.md). +- `startdate` — Первая дата. [Date](../../sql_reference/functions/date_time_functions.md) или [DateTime](../../sql_reference/functions/date_time_functions.md). -- `enddate` — Вторая дата. [Date](../../data_types/date.md) или [DateTime](../../data_types/datetime.md). +- `enddate` — Вторая дата. [Date](../../sql_reference/functions/date_time_functions.md) или [DateTime](../../sql_reference/functions/date_time_functions.md). - `timezone` — Опциональный параметр. Если определен, применяется к обоим значениям: `startdate` и `enddate`. Если не определен, используются часовые пояса `startdate` и `enddate`. Если часовые пояса не совпадают, вернется неожидаемый результат. diff --git a/docs/ru/query_language/functions/encoding_functions.md b/docs/ru/sql_reference/functions/encoding_functions.md similarity index 91% rename from docs/ru/query_language/functions/encoding_functions.md rename to docs/ru/sql_reference/functions/encoding_functions.md index e6fbeb133c5..b4c4716d9c7 100644 --- a/docs/ru/query_language/functions/encoding_functions.md +++ b/docs/ru/sql_reference/functions/encoding_functions.md @@ -12,7 +12,7 @@ char(number_1, [number_2, ..., number_n]); **Параметры** -- `number_1, number_2, ..., number_n` — Числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../data_types/int_uint.md), [Float](../../data_types/float.md). +- `number_1, number_2, ..., number_n` — Числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../sql_reference/functions/encoding_functions.md), [Float](../../sql_reference/functions/encoding_functions.md). **Возвращаемое значение** @@ -104,7 +104,7 @@ Values of floating point and Decimal types are encoded as their representation i **Parameters** -- `arg` — A value to convert to hexadecimal. Types: [String](../../data_types/string.md), [UInt](../../data_types/int_uint.md), [Float](../../data_types/float.md), [Decimal](../../data_types/decimal.md), [Date](../../data_types/date.md) or [DateTime](../../data_types/datetime.md). +- `arg` — A value to convert to hexadecimal. Types: [String](../../sql_reference/functions/encoding_functions.md), [UInt](../../sql_reference/functions/encoding_functions.md), [Float](../../sql_reference/functions/encoding_functions.md), [Decimal](../../sql_reference/functions/encoding_functions.md), [Date](../../sql_reference/functions/encoding_functions.md) or [DateTime](../../sql_reference/functions/encoding_functions.md). **Returned value** diff --git a/docs/ru/query_language/functions/ext_dict_functions.md b/docs/ru/sql_reference/functions/ext_dict_functions.md similarity index 85% rename from docs/ru/query_language/functions/ext_dict_functions.md rename to docs/ru/sql_reference/functions/ext_dict_functions.md index dd7aff463f5..1a1d383e4bb 100644 --- a/docs/ru/query_language/functions/ext_dict_functions.md +++ b/docs/ru/sql_reference/functions/ext_dict_functions.md @@ -1,6 +1,6 @@ # Функции для работы с внешними словарями {#ext_dict_functions} -Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../dicts/external_dicts.md). +Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../../sql_reference/functions/ext_dict_functions.md). ## dictGet {#dictget} @@ -15,12 +15,12 @@ dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md) или [Tuple](../../data_types/tuple.md) в зависимости от конфигурации словаря. +- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md) или [Tuple](../../sql_reference/functions/ext_dict_functions.md) в зависимости от конфигурации словаря. - `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions) возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. **Возвращаемое значение** -- Значение атрибута, соответствующее ключу `id_expr`, если ClickHouse смог привести это значение к [заданному типу данных](../dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes). +- Значение атрибута, соответствующее ключу `id_expr`, если ClickHouse смог привести это значение к [заданному типу данных](../../sql_reference/functions/ext_dict_functions.md#ext_dict_structure-attributes). - Если ключа, соответствующего `id_expr` в словаре нет, то: @@ -90,7 +90,7 @@ LIMIT 3 **Смотрите также** -- [Внешние словари](../dicts/external_dicts.md) +- [Внешние словари](../../sql_reference/functions/ext_dict_functions.md) ## dictHas {#dicthas} @@ -103,7 +103,7 @@ dictHas('dict_name', id) **Параметры** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md). +- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md). **Возвращаемое значение** @@ -114,7 +114,7 @@ dictHas('dict_name', id) ## dictGetHierarchy {#dictgethierarchy} -Создаёт массив, содержащий цепочку предков для заданного ключа в [иерархическом словаре](../dicts/external_dicts_dict_hierarchical.md). +Создаёт массив, содержащий цепочку предков для заданного ключа в [иерархическом словаре](../dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md). **Синтаксис** @@ -125,13 +125,13 @@ dictGetHierarchy('dict_name', key) **Параметры** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `key` — значение ключа. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md). +- `key` — значение ключа. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md). **Возвращаемое значение** - Цепочка предков заданного ключа. -Type: [Array(UInt64)](../../data_types/array.md). +Type: [Array(UInt64)](../../sql_reference/functions/ext_dict_functions.md). ## dictIsIn {#dictisin} @@ -142,8 +142,8 @@ Type: [Array(UInt64)](../../data_types/array.md). **Параметры** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `child_id_expr` — ключ для проверки. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md). -- `ancestor_id_expr` — предполагаемый предок ключа `child_id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md). +- `child_id_expr` — ключ для проверки. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md). +- `ancestor_id_expr` — предполагаемый предок ключа `child_id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md). **Возвращаемое значение** @@ -179,12 +179,12 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). -- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../data_types/int_uint.md). +- `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql_reference/functions/ext_dict_functions.md). - `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions) возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. **Возвращаемое значение** -- Если ClickHouse успешно обработал атрибут в соответствии с [заданным типом данных](../dicts/external_dicts_dict_structure.md#ext_dict_structure-attributes), то функции возвращают значение атрибута, соответствующее ключу `id_expr`. +- Если ClickHouse успешно обработал атрибут в соответствии с [заданным типом данных](../../sql_reference/functions/ext_dict_functions.md#ext_dict_structure-attributes), то функции возвращают значение атрибута, соответствующее ключу `id_expr`. - Если запрошенного `id_expr` нет в словаре, то: diff --git a/docs/ru/query_language/functions/functions_for_nulls.md b/docs/ru/sql_reference/functions/functions_for_nulls.md similarity index 97% rename from docs/ru/query_language/functions/functions_for_nulls.md rename to docs/ru/sql_reference/functions/functions_for_nulls.md index 1782a5a0cf2..0d07cbeb62d 100644 --- a/docs/ru/query_language/functions/functions_for_nulls.md +++ b/docs/ru/sql_reference/functions/functions_for_nulls.md @@ -204,7 +204,7 @@ SELECT nullIf(1, 2) ## assumeNotNull {#assumenotnull} -Приводит значение типа [Nullable](../../data_types/nullable.md) к не `Nullable`, если значение не `NULL`. +Приводит значение типа [Nullable](../../sql_reference/functions/functions_for_nulls.md) к не `Nullable`, если значение не `NULL`. ``` sql assumeNotNull(x) diff --git a/docs/ru/query_language/functions/geo.md b/docs/ru/sql_reference/functions/geo.md similarity index 86% rename from docs/ru/query_language/functions/geo.md rename to docs/ru/sql_reference/functions/geo.md index 07e2e4c1da6..db51ac05166 100644 --- a/docs/ru/query_language/functions/geo.md +++ b/docs/ru/sql_reference/functions/geo.md @@ -107,8 +107,8 @@ pointInPolygon((x, y), [(a, b), (c, d) ...], ...) **Входные значения** -- `(x, y)` — координаты точки на плоскости. Тип данных — [Tuple](../../data_types/tuple.md) — кортеж из двух чисел. -- `[(a, b), (c, d) ...]` — вершины многоугольника. Тип данных — [Array](../../data_types/array.md). Каждая вершина представлена парой координат `(a, b)`. Вершины следует указывать в порядке обхода по или против часовой стрелки. Минимальное количество вершин — 3. Многоугольник должен быть константным. +- `(x, y)` — координаты точки на плоскости. Тип данных — [Tuple](../../sql_reference/functions/geo.md) — кортеж из двух чисел. +- `[(a, b), (c, d) ...]` — вершины многоугольника. Тип данных — [Array](../../sql_reference/functions/geo.md). Каждая вершина представлена парой координат `(a, b)`. Вершины следует указывать в порядке обхода по или против часовой стрелки. Минимальное количество вершин — 3. Многоугольник должен быть константным. - функция поддерживает также многоугольники с дырками (вырезанными кусками). Для этого случая, добавьте многоугольники, описывающие вырезанные куски, дополнительными аргументами функции. Функция не поддерживает не односвязные многоугольники. **Возвращаемые значения** @@ -196,14 +196,14 @@ h3IsValid(h3index) **Входные значения** -- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../data_types/int_uint.md). +- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../sql_reference/functions/geo.md). **Возвращаемые значения** - 0 — число не является H3-индексом - 1 — число является H3-индексом -Тип — [UInt8](../../data_types/int_uint.md). +Тип — [UInt8](../../sql_reference/functions/geo.md). **Пример** @@ -227,14 +227,14 @@ h3GetResolution(h3index) **Входные значения** -- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../data_types/int_uint.md). +- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../sql_reference/functions/geo.md). **Возвращаемые значения** - Разрешение сетки, от 0 до 15. - Для несуществующего идентификатора может быть возвращено произвольное значение, используйте [h3IsValid](#h3isvalid) для проверки идентификаторов -Тип — [UInt8](../../data_types/int_uint.md). +Тип — [UInt8](../../sql_reference/functions/geo.md). **Пример** @@ -258,11 +258,11 @@ h3EdgeAngle(resolution) **Входные значения** -- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../data_types/int_uint.md). Диапазон возможных значений — `[0, 15]`. +- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../sql_reference/functions/geo.md). Диапазон возможных значений — `[0, 15]`. **Возвращаемые значения** -Средняя длина стороны многоугольника H3 в градусах, тип — [Float64](../../data_types/float.md). +Средняя длина стороны многоугольника H3 в градусах, тип — [Float64](../../sql_reference/functions/geo.md). **Пример** @@ -286,11 +286,11 @@ h3EdgeLengthM(resolution) **Входные значения** -- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../data_types/int_uint.md). Диапазон возможных значений — `[0, 15]`. +- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../sql_reference/functions/geo.md). Диапазон возможных значений — `[0, 15]`. **Возвращаемые значения** -Средняя длина стороны многоугольника H3 в метрах, тип — [Float64](../../data_types/float.md). +Средняя длина стороны многоугольника H3 в метрах, тип — [Float64](../../sql_reference/functions/geo.md). **Пример** @@ -320,16 +320,16 @@ geoToH3(lon, lat, resolution) **Параметры** -- `lon` — географическая долгота. Тип данных — [Float64](../../data_types/float.md). -- `lat` — географическая широта. Тип данных — [Float64](../../data_types/float.md). -- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../data_types/int_uint.md). Диапазон возможных значений — `[0, 15]`. +- `lon` — географическая долгота. Тип данных — [Float64](../../sql_reference/functions/geo.md). +- `lat` — географическая широта. Тип данных — [Float64](../../sql_reference/functions/geo.md). +- `resolution` — требуемое разрешение индекса. Тип данных — [UInt8](../../sql_reference/functions/geo.md). Диапазон возможных значений — `[0, 15]`. **Возвращаемые значения** - Порядковый номер шестиугольника. - 0 в случае ошибки. -Тип — [UInt64](../../data_types/int_uint.md). +Тип — [UInt64](../../sql_reference/functions/geo.md). **Пример** @@ -357,12 +357,12 @@ h3kRing(h3index, k) **Входные значения** -- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../data_types/int_uint.md). -- `k` — радиус. Тип данных — [целое число](../../data_types/int_uint.md) +- `h3index` — идентификатор шестиугольника. Тип данных — [UInt64](../../sql_reference/functions/geo.md). +- `k` — радиус. Тип данных — [целое число](../../sql_reference/functions/geo.md) **Возвращаемые значения** -[Массив](../../data_types/array.md) из H3-индексов типа [UInt64](../../data_types/int_uint.md). +[Массив](../../sql_reference/functions/geo.md) из H3-индексов типа [UInt64](../../sql_reference/functions/geo.md). **Пример** diff --git a/docs/ru/query_language/functions/hash_functions.md b/docs/ru/sql_reference/functions/hash_functions.md similarity index 88% rename from docs/ru/query_language/functions/hash_functions.md rename to docs/ru/sql_reference/functions/hash_functions.md index 32e701cbd23..62b6566f63f 100644 --- a/docs/ru/query_language/functions/hash_functions.md +++ b/docs/ru/sql_reference/functions/hash_functions.md @@ -4,7 +4,7 @@ ## halfMD5 {#hash-functions-halfmd5} -[Интерпретирует](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш [MD5](https://ru.wikipedia.org/wiki/MD5) для каждой из них. Затем объединяет хэши, берет первые 8 байт хэша результирующей строки и интерпретирует их как значение типа `UInt64` с big-endian порядком байтов. +[Интерпретирует](../../sql_reference/functions/hash_functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш [MD5](https://ru.wikipedia.org/wiki/MD5) для каждой из них. Затем объединяет хэши, берет первые 8 байт хэша результирующей строки и интерпретирует их как значение типа `UInt64` с big-endian порядком байтов. ``` sql halfMD5(par1, ...) @@ -15,11 +15,11 @@ halfMD5(par1, ...) **Параметры** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +Значение хэша с типом данных [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -49,7 +49,7 @@ sipHash64(par1,...) Это криптографическая хэш-функция. Она работает по крайней мере в три раза быстрее, чем функция [MD5](#hash_functions-md5). -Функция [интерпретирует](../../query_language/functions/type_conversion_functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш MD5 для каждой из них. Затем комбинирует хэши по следующему алгоритму. +Функция [интерпретирует](../../sql_reference/functions/hash_functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш MD5 для каждой из них. Затем комбинирует хэши по следующему алгоритму. 1. После хэширования всех входных параметров функция получает массив хэшей. 2. Функция принимает первый и второй элементы и вычисляет хэш для массива из них. @@ -58,11 +58,11 @@ sipHash64(par1,...) **Параметры** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +Значение хэша с типом данных [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -94,11 +94,11 @@ cityHash64(par1,...) **Параметры** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +Значение хэша с типом данных [UInt64](../../sql_reference/functions/hash_functions.md). **Примеры** @@ -160,11 +160,11 @@ farmHash64(par1, ...) **Параметры** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +Значение хэша с типом данных [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -288,11 +288,11 @@ metroHash64(par1, ...) **Параметры** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Значение хэша с типом данных [UInt64](../../data_types/int_uint.md). +Значение хэша с типом данных [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -323,12 +323,12 @@ murmurHash2_64(par1, ...) **Параметры** -Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -- Функция `murmurHash2_32` возвращает значение типа [UInt32](../../data_types/int_uint.md). -- Функция `murmurHash2_64` возвращает значение типа [UInt64](../../data_types/int_uint.md). +- Функция `murmurHash2_32` возвращает значение типа [UInt32](../../sql_reference/functions/hash_functions.md). +- Функция `murmurHash2_64` возвращает значение типа [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -353,12 +353,12 @@ murmurHash3_64(par1, ...) **Параметры** -Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../data_types/index.md). +Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -- Функция `murmurHash3_32` возвращает значение типа [UInt32](../../data_types/int_uint.md). -- Функция `murmurHash3_64` возвращает значение типа [UInt64](../../data_types/int_uint.md). +- Функция `murmurHash3_32` возвращает значение типа [UInt32](../../sql_reference/functions/hash_functions.md). +- Функция `murmurHash3_64` возвращает значение типа [UInt64](../../sql_reference/functions/hash_functions.md). **Пример** @@ -382,11 +382,11 @@ murmurHash3_128( expr ) **Параметры** -- `expr` — [выражение](../syntax.md#syntax-expressions) возвращающее значение типа[String](../../data_types/string.md). +- `expr` — [выражение](../syntax.md#syntax-expressions) возвращающее значение типа[String](../../sql_reference/functions/hash_functions.md). **Возвращаемое значение** -Хэш-значение типа [FixedString(16)](../../data_types/fixedstring.md). +Хэш-значение типа [FixedString(16)](../../sql_reference/functions/hash_functions.md). **Пример** diff --git a/docs/ru/query_language/functions/higher_order_functions.md b/docs/ru/sql_reference/functions/higher_order_functions.md similarity index 100% rename from docs/ru/query_language/functions/higher_order_functions.md rename to docs/ru/sql_reference/functions/higher_order_functions.md diff --git a/docs/ru/query_language/functions/in_functions.md b/docs/ru/sql_reference/functions/in_functions.md similarity index 93% rename from docs/ru/query_language/functions/in_functions.md rename to docs/ru/sql_reference/functions/in_functions.md index 70c8c1bb6c3..a5cdb1dc217 100644 --- a/docs/ru/query_language/functions/in_functions.md +++ b/docs/ru/sql_reference/functions/in_functions.md @@ -2,7 +2,7 @@ ## in, notIn, globalIn, globalNotIn {#in-functions} -Смотрите раздел [Операторы IN](../select.md#select-in-operators). +Смотрите раздел [Операторы IN](../statements/select.md#select-in-operators). ## tuple(x, y, …), оператор (x, y, …) {#tuplex-y-operator-x-y} diff --git a/docs/ru/query_language/functions/index.md b/docs/ru/sql_reference/functions/index.md similarity index 100% rename from docs/ru/query_language/functions/index.md rename to docs/ru/sql_reference/functions/index.md diff --git a/docs/ru/query_language/functions/introspection.md b/docs/ru/sql_reference/functions/introspection.md similarity index 94% rename from docs/ru/query_language/functions/introspection.md rename to docs/ru/sql_reference/functions/introspection.md index 50b4cbb44bf..41ffb114e09 100644 --- a/docs/ru/query_language/functions/introspection.md +++ b/docs/ru/sql_reference/functions/introspection.md @@ -29,7 +29,7 @@ addressToLine(address_of_binary_instruction) **Параметры** -- `address_of_binary_instruction` ([Тип UInt64](../../data_types/int_uint.md))- Адрес инструкции в запущенном процессе. +- `address_of_binary_instruction` ([Тип UInt64](../../sql_reference/functions/introspection.md))- Адрес инструкции в запущенном процессе. **Возвращаемое значение** @@ -41,7 +41,7 @@ addressToLine(address_of_binary_instruction) - Пустая строка, если адрес не является допустимым. -Тип: [String](../../data_types/string.md). +Тип: [String](../../sql_reference/functions/introspection.md). **Пример** @@ -120,14 +120,14 @@ addressToSymbol(address_of_binary_instruction) **Параметры** -- `address_of_binary_instruction` ([Тип uint64](../../data_types/int_uint.md)) — Адрес инструкции в запущенном процессе. +- `address_of_binary_instruction` ([Тип uint64](../../sql_reference/functions/introspection.md)) — Адрес инструкции в запущенном процессе. **Возвращаемое значение** - Символ из объектных файлов ClickHouse. - Пустая строка, если адрес не является допустимым. -Тип: [String](../../data_types/string.md). +Тип: [String](../../sql_reference/functions/introspection.md). **Пример** @@ -217,14 +217,14 @@ demangle(symbol) **Параметры** -- `symbol` ([Строка](../../data_types/string.md)) - Символ из объектного файла. +- `symbol` ([Строка](../../sql_reference/functions/introspection.md)) - Символ из объектного файла. **Возвращаемое значение** - Имя функции C++. - Пустая строка, если символ не является допустимым. -Тип: [Строка](../../data_types/string.md). +Тип: [Строка](../../sql_reference/functions/introspection.md). **Пример** diff --git a/docs/ru/query_language/functions/ip_address_functions.md b/docs/ru/sql_reference/functions/ip_address_functions.md similarity index 95% rename from docs/ru/query_language/functions/ip_address_functions.md rename to docs/ru/sql_reference/functions/ip_address_functions.md index 87c1da4114b..57485f6a812 100644 --- a/docs/ru/query_language/functions/ip_address_functions.md +++ b/docs/ru/sql_reference/functions/ip_address_functions.md @@ -176,7 +176,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32) ## toIPv4(string) {#toipv4string} -Псевдоним функции `IPv4StringToNum()` которая принимает строку с адресом IPv4 и возвращает значение типа [IPv4](../../data_types/domains/ipv4.md), которое равно значению, возвращаемому функцией `IPv4StringToNum()`. +Псевдоним функции `IPv4StringToNum()` которая принимает строку с адресом IPv4 и возвращает значение типа [IPv4](../../sql_reference/functions/ip_address_functions.md), которое равно значению, возвращаемому функцией `IPv4StringToNum()`. ``` sql WITH @@ -208,7 +208,7 @@ SELECT ## toIPv6(string) {#toipv6string} -Псевдоним функции `IPv6StringToNum()` которая принимает строку с адресом IPv6 и возвращает значение типа [IPv6](../../data_types/domains/ipv6.md), которое равно значению, возвращаемому функцией `IPv6StringToNum()`. +Псевдоним функции `IPv6StringToNum()` которая принимает строку с адресом IPv6 и возвращает значение типа [IPv6](../../sql_reference/functions/ip_address_functions.md), которое равно значению, возвращаемому функцией `IPv6StringToNum()`. ``` sql WITH diff --git a/docs/ru/query_language/functions/json_functions.md b/docs/ru/sql_reference/functions/json_functions.md similarity index 100% rename from docs/ru/query_language/functions/json_functions.md rename to docs/ru/sql_reference/functions/json_functions.md diff --git a/docs/ru/query_language/functions/logical_functions.md b/docs/ru/sql_reference/functions/logical_functions.md similarity index 100% rename from docs/ru/query_language/functions/logical_functions.md rename to docs/ru/sql_reference/functions/logical_functions.md diff --git a/docs/ru/query_language/functions/machine_learning_functions.md b/docs/ru/sql_reference/functions/machine_learning_functions.md similarity index 51% rename from docs/ru/query_language/functions/machine_learning_functions.md rename to docs/ru/sql_reference/functions/machine_learning_functions.md index 8a51d1dd4c6..c5dd27d96af 100644 --- a/docs/ru/query_language/functions/machine_learning_functions.md +++ b/docs/ru/sql_reference/functions/machine_learning_functions.md @@ -6,8 +6,8 @@ ### Stochastic Linear Regression {#stochastic-linear-regression} -Агрегатная функция [stochasticLinearRegression](../agg_functions/reference.md#agg_functions-stochasticlinearregression) реализует стохастический градиентный спуск, использую линейную модель и функцию потерь MSE. +Агрегатная функция [stochasticLinearRegression](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlinearregression) реализует стохастический градиентный спуск, использую линейную модель и функцию потерь MSE. ### Stochastic Logistic Regression {#stochastic-logistic-regression} -Агрегатная функция [stochasticLogisticRegression](../agg_functions/reference.md#agg_functions-stochasticlogisticregression) реализует стохастический градиентный спуск для задачи бинарной классификации. +Агрегатная функция [stochasticLogisticRegression](../../sql_reference/functions/machine_learning_functions.md#agg_functions-stochasticlogisticregression) реализует стохастический градиентный спуск для задачи бинарной классификации. diff --git a/docs/ru/query_language/functions/math_functions.md b/docs/ru/sql_reference/functions/math_functions.md similarity index 100% rename from docs/ru/query_language/functions/math_functions.md rename to docs/ru/sql_reference/functions/math_functions.md diff --git a/docs/ru/query_language/functions/other_functions.md b/docs/ru/sql_reference/functions/other_functions.md similarity index 97% rename from docs/ru/query_language/functions/other_functions.md rename to docs/ru/sql_reference/functions/other_functions.md index e4f7440b7cd..a0568cb262e 100644 --- a/docs/ru/query_language/functions/other_functions.md +++ b/docs/ru/sql_reference/functions/other_functions.md @@ -48,7 +48,7 @@ basename( expr ) **Параметры** -- `expr` — Выражение, возвращающее значение типа [String](../../data_types/string.md). В результирующем значении все бэкслэши должны быть экранированы. +- `expr` — Выражение, возвращающее значение типа [String](../../sql_reference/functions/other_functions.md). В результирующем значении все бэкслэши должны быть экранированы. **Возвращаемое значение** @@ -186,8 +186,8 @@ SELECT currentUser(); **Параметры** -- `x` — Значение, которое нужно проверить на бесконечность. Тип: [Float\*](../../data_types/float.md). -- `y` — Запасное значение. Тип: [Float\*](../../data_types/float.md). +- `x` — Значение, которое нужно проверить на бесконечность. Тип: [Float\*](../../sql_reference/functions/other_functions.md). +- `y` — Запасное значение. Тип: [Float\*](../../sql_reference/functions/other_functions.md). **Возвращаемые значения** @@ -420,7 +420,7 @@ neighbor(column, offset[, default_value]) **Параметры** - `column` — Имя столбца или скалярное выражение. -- `offset` - Смещение от текущей строки `column`. [Int64](../../data_types/int_uint.md). +- `offset` - Смещение от текущей строки `column`. [Int64](../../sql_reference/functions/other_functions.md). - `default_value` - Опциональный параметр. Значение, которое будет возвращено, если смещение выходит за пределы блока данных. **Возвращаемое значение** @@ -603,7 +603,7 @@ WHERE diff != 1 ## getSizeOfEnumType {#getsizeofenumtype} -Возвращает количество полей в [Enum](../../data_types/enum.md). +Возвращает количество полей в [Enum](../../sql_reference/functions/other_functions.md). ``` sql getSizeOfEnumType(value) @@ -716,7 +716,7 @@ defaultValueOfArgumentType(expression) - `0` для чисел; - Пустая строка для строк; -- `ᴺᵁᴸᴸ` для [Nullable](../../data_types/nullable.md). +- `ᴺᵁᴸᴸ` для [Nullable](../../sql_reference/functions/other_functions.md). **Пример** @@ -791,7 +791,7 @@ filesystemAvailable() - Объём доступного для записи данных места в байтах. -Тип: [UInt64](../../data_types/int_uint.md). +Тип: [UInt64](../../sql_reference/functions/other_functions.md). **Пример** @@ -823,7 +823,7 @@ filesystemFree() - Объем свободного места в байтах. -Тип: [UInt64](../../data_types/int_uint.md). +Тип: [UInt64](../../sql_reference/functions/other_functions.md). **Пример** @@ -843,7 +843,7 @@ SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesyst ## filesystemCapacity {#filesystemcapacity} -Возвращает информацию о ёмкости файловой системы в байтах. Для оценки должен быть настроен [путь](../../operations/server_settings/settings.md#server_settings-path) к каталогу с данными. +Возвращает информацию о ёмкости файловой системы в байтах. Для оценки должен быть настроен [путь](../../sql_reference/functions/other_functions.md#server_configuration_parameters-path) к каталогу с данными. **Синтаксис** @@ -855,7 +855,7 @@ filesystemCapacity() - Информация о ёмкости файловой системы в байтах. -Тип: [UInt64](../../data_types/int_uint.md). +Тип: [UInt64](../../sql_reference/functions/other_functions.md). **Пример** @@ -883,9 +883,9 @@ SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesy ## joinGet {#joinget} -Функция позволяет извлекать данные из таблицы таким же образом как из [словаря](../../query_language/dicts/index.md). +Функция позволяет извлекать данные из таблицы таким же образом как из [словаря](../../sql_reference/functions/other_functions.md). -Получает данные из таблиц [Join](../../operations/table_engines/join.md#creating-a-table) по ключу. +Получает данные из таблиц [Join](../../sql_reference/functions/other_functions.md#creating-a-table) по ключу. Поддерживаются только таблицы, созданные с `ENGINE = Join(ANY, LEFT, )`. @@ -907,7 +907,7 @@ joinGet(join_storage_table_name, `value_column`, join_keys) Если значения не существует в исходной таблице, вернется `0` или `null` в соответствии с настройками [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls). -Подробнее о настройке `join_use_nulls` в [операциях Join](../../operations/table_engines/join.md). +Подробнее о настройке `join_use_nulls` в [операциях Join](../../sql_reference/functions/other_functions.md). **Пример** @@ -1010,7 +1010,7 @@ randomPrintableASCII(length) - Строка со случайным набором печатных символов [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters). -Тип: [String](../../data_types/string.md) +Тип: [String](../../sql_reference/functions/other_functions.md) **Пример** diff --git a/docs/ru/query_language/functions/random_functions.md b/docs/ru/sql_reference/functions/random_functions.md similarity index 100% rename from docs/ru/query_language/functions/random_functions.md rename to docs/ru/sql_reference/functions/random_functions.md diff --git a/docs/ru/query_language/functions/rounding_functions.md b/docs/ru/sql_reference/functions/rounding_functions.md similarity index 98% rename from docs/ru/query_language/functions/rounding_functions.md rename to docs/ru/sql_reference/functions/rounding_functions.md index 9c5f73815b5..9e7947c109d 100644 --- a/docs/ru/query_language/functions/rounding_functions.md +++ b/docs/ru/sql_reference/functions/rounding_functions.md @@ -30,7 +30,7 @@ round(expression [, decimal_places]) **Параметры:** -- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../data_types/index.md#data_types). +- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql_reference/functions/rounding_functions.md#data_types). - `decimal-places` — Целое значение. - Если `decimal-places > 0`, то функция округляет значение справа от запятой. - Если `decimal-places < 0` то функция округляет значение слева от запятой. @@ -109,7 +109,7 @@ roundBankers(expression [, decimal_places]) **Параметры** -- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../data_types/index.md#data_types). +- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql_reference/functions/rounding_functions.md#data_types). - `decimal-places` — Десятичный разряд. Целое число. - `decimal-places > 0` — Функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции справа от запятой. Например, `roundBankers(3.55, 1) = 3.6`. - `decimal-places < 0` — Функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции слева от запятой. Например, `roundBankers(24.55, -1) = 20`. diff --git a/docs/ru/query_language/functions/splitting_merging_functions.md b/docs/ru/sql_reference/functions/splitting_merging_functions.md similarity index 100% rename from docs/ru/query_language/functions/splitting_merging_functions.md rename to docs/ru/sql_reference/functions/splitting_merging_functions.md diff --git a/docs/ru/query_language/functions/string_functions.md b/docs/ru/sql_reference/functions/string_functions.md similarity index 97% rename from docs/ru/query_language/functions/string_functions.md rename to docs/ru/sql_reference/functions/string_functions.md index ef2793d2c18..5c51ad7c73c 100644 --- a/docs/ru/query_language/functions/string_functions.md +++ b/docs/ru/sql_reference/functions/string_functions.md @@ -70,7 +70,7 @@ toValidUTF8( input_string ) Параметры: -- input\_string — произвольный набор байтов, представленный как объект типа [String](../../data_types/string.md). +- input\_string — произвольный набор байтов, представленный как объект типа [String](../../sql_reference/functions/string_functions.md). Возвращаемое значение: Корректная строка UTF-8. @@ -98,8 +98,8 @@ repeat(s, n) **Параметры** -- `s` — Строка для повторения. [String](../../data_types/string.md). -- `n` — Количество повторов. [UInt](../../data_types/int_uint.md). +- `s` — Строка для повторения. [String](../../sql_reference/functions/string_functions.md). +- `n` — Количество повторов. [UInt](../../sql_reference/functions/string_functions.md). **Возвращаемое значение** @@ -322,8 +322,8 @@ trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) **Параметры** -- `trim_character` — один или несколько символов, подлежащие удалению. [String](../../data_types/string.md). -- `input_string` — строка для обрезки. [String](../../data_types/string.md). +- `trim_character` — один или несколько символов, подлежащие удалению. [String](../../sql_reference/functions/string_functions.md). +- `input_string` — строка для обрезки. [String](../../sql_reference/functions/string_functions.md). **Возвращаемое значение** @@ -361,7 +361,7 @@ trimLeft(input_string) **Параметры** -- `input_string` — строка для обрезки. [String](../../data_types/string.md). +- `input_string` — строка для обрезки. [String](../../sql_reference/functions/string_functions.md). **Возвращаемое значение** @@ -399,7 +399,7 @@ trimRight(input_string) **Параметры** -- `input_string` — строка для обрезки. [String](../../data_types/string.md). +- `input_string` — строка для обрезки. [String](../../sql_reference/functions/string_functions.md). **Возвращаемое значение** @@ -437,7 +437,7 @@ trimBoth(input_string) **Параметры** -- `input_string` — строка для обрезки. [String](../../data_types/string.md). +- `input_string` — строка для обрезки. [String](../../sql_reference/functions/string_functions.md). **Возвращаемое значение** diff --git a/docs/ru/query_language/functions/string_replace_functions.md b/docs/ru/sql_reference/functions/string_replace_functions.md similarity index 100% rename from docs/ru/query_language/functions/string_replace_functions.md rename to docs/ru/sql_reference/functions/string_replace_functions.md diff --git a/docs/ru/query_language/functions/string_search_functions.md b/docs/ru/sql_reference/functions/string_search_functions.md similarity index 100% rename from docs/ru/query_language/functions/string_search_functions.md rename to docs/ru/sql_reference/functions/string_search_functions.md diff --git a/docs/ru/query_language/functions/type_conversion_functions.md b/docs/ru/sql_reference/functions/type_conversion_functions.md similarity index 90% rename from docs/ru/query_language/functions/type_conversion_functions.md rename to docs/ru/sql_reference/functions/type_conversion_functions.md index 00582cd61cb..be4de7e7c89 100644 --- a/docs/ru/query_language/functions/type_conversion_functions.md +++ b/docs/ru/sql_reference/functions/type_conversion_functions.md @@ -8,7 +8,7 @@ ## toInt(8\|16\|32\|64) {#toint8163264} -Преобразует входное значение к типу [Int](../../data_types/int_uint.md). Семейство функций включает: +Преобразует входное значение к типу [Int](../../sql_reference/functions/type_conversion_functions.md). Семейство функций включает: - `toInt8(expr)` — возвращает значение типа `Int8`. - `toInt16(expr)` — возвращает значение типа `Int16`. @@ -25,7 +25,7 @@ Функции используют [округление к нулю](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), т.е. обрезают дробную часть числа. -Поведение функций для аргументов [NaN и Inf](../../data_types/float.md#data_type-float-nan-inf) не определено. При использовании функций помните о возможных проблемах при [преобразовании чисел](#numeric-conversion-issues). +Поведение функций для аргументов [NaN и Inf](../../sql_reference/functions/type_conversion_functions.md#data_type-float-nan-inf) не определено. При использовании функций помните о возможных проблемах при [преобразовании чисел](#numeric-conversion-issues). **Пример** @@ -73,7 +73,7 @@ select toInt64OrNull('123123'), toInt8OrNull('123qwe123') ## toUInt(8\|16\|32\|64) {#touint8163264} -Преобраует входное значение к типу [UInt](../../data_types/int_uint.md). Семейство функций включает: +Преобраует входное значение к типу [UInt](../../sql_reference/functions/type_conversion_functions.md). Семейство функций включает: - `toUInt8(expr)` — возвращает значение типа `UInt8`. - `toUInt16(expr)` — возвращает значение типа `UInt16`. @@ -90,7 +90,7 @@ select toInt64OrNull('123123'), toInt8OrNull('123qwe123') Функции используют [округление к нулю](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), т.е. обрезают дробную часть числа. -Поведение функций для аргументов [NaN и Inf](../../data_types/float.md#data_type-float-nan-inf) не определено. Если передать строку, содержащую отрицательное число, например `'-32'`, ClickHouse генерирует исключение. При использовании функций помните о возможных проблемах при [преобразовании чисел](#numeric-conversion-issues). +Поведение функций для аргументов [NaN и Inf](../../sql_reference/functions/type_conversion_functions.md#data_type-float-nan-inf) не определено. Если передать строку, содержащую отрицательное число, например `'-32'`, ClickHouse генерирует исключение. При использовании функций помните о возможных проблемах при [преобразовании чисел](#numeric-conversion-issues). **Пример** @@ -128,7 +128,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) ## toDecimal(32\|64\|128) {#todecimal3264128} -Преобразует `value` к типу данных [Decimal](../../data_types/decimal.md) с точностью `S`. `value` может быть числом или строкой. Параметр `S` (scale) задаёт число десятичных знаков. +Преобразует `value` к типу данных [Decimal](../../sql_reference/functions/type_conversion_functions.md) с точностью `S`. `value` может быть числом или строкой. Параметр `S` (scale) задаёт число десятичных знаков. - `toDecimal32(value, S)` - `toDecimal64(value, S)` @@ -136,7 +136,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) ## toDecimal(32\|64\|128)OrNull {#todecimal3264128ornull} -Преобразует входную строку в значение с типом данных [Nullable (Decimal (P, S))](../../data_types/decimal.md). Семейство функций включает в себя: +Преобразует входную строку в значение с типом данных [Nullable (Decimal (P, S))](../../sql_reference/functions/type_conversion_functions.md). Семейство функций включает в себя: - `toDecimal32OrNull(expr, S)` — Возвращает значение типа `Nullable(Decimal32(S))`. - `toDecimal64OrNull(expr, S)` — Возвращает значение типа `Nullable(Decimal64(S))`. @@ -146,7 +146,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8) **Параметры** -- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../data_types/string.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. +- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql_reference/functions/type_conversion_functions.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. - `S` — количество десятичных знаков в результирующем значении. **Возвращаемое значение** @@ -180,7 +180,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) ## toDecimal(32\|64\|128)OrZero {#todecimal3264128orzero} -Преобразует тип входного значения в [Decimal (P, S)](../../data_types/decimal.md). Семейство функций включает в себя: +Преобразует тип входного значения в [Decimal (P, S)](../../sql_reference/functions/type_conversion_functions.md). Семейство функций включает в себя: - `toDecimal32OrZero( expr, S)` — возвращает значение типа `Decimal32(S)`. - `toDecimal64OrZero( expr, S)` — возвращает значение типа `Decimal64(S)`. @@ -190,7 +190,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val) **Параметры** -- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../data_types/string.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. +- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql_reference/functions/type_conversion_functions.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. - `S` — количество десятичных знаков в результирующем значении. **Возвращаемое значение** @@ -332,7 +332,7 @@ SELECT Преобразование в FixedString(N) работает только для аргументов типа String или FixedString(N). -Поддержано преобразование к типу [Nullable](../../data_types/nullable.md) и обратно. Пример: +Поддержано преобразование к типу [Nullable](../../sql_reference/functions/type_conversion_functions.md) и обратно. Пример: ``` sql SELECT toTypeName(x) FROM t_null @@ -358,7 +358,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null ## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) {#function-tointerval} -Приводит аргумент из числового типа данных к типу данных [IntervalType](../../data_types/special_data_types/interval.md). +Приводит аргумент из числового типа данных к типу данных [IntervalType](../../sql_reference/functions/type_conversion_functions.md). **Синтаксис** @@ -401,7 +401,7 @@ SELECT ## parseDateTimeBestEffort {#parsedatetimebesteffort} -Преобразует дату и время в [строковом](../../data_types/string.md) представлении к типу данных [DateTime](../../data_types/datetime.md#data_type-datetime). +Преобразует дату и время в [строковом](../../sql_reference/functions/type_conversion_functions.md) представлении к типу данных [DateTime](../../sql_reference/functions/type_conversion_functions.md#data_type-datetime). Функция распознаёт форматы [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), формат даты времени ClickHouse's а также некоторые другие форматы. @@ -413,8 +413,8 @@ parseDateTimeBestEffort(time_string[, time_zone]); **Параметры** -- `time_string` — строка, содержащая дату и время для преобразования. [String](../../data_types/string.md). -- `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с заданным часовым поясом. [String](../../data_types/string.md). +- `time_string` — строка, содержащая дату и время для преобразования. [String](../../sql_reference/functions/type_conversion_functions.md). +- `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с заданным часовым поясом. [String](../../sql_reference/functions/type_conversion_functions.md). **Поддерживаемые нестандартные форматы** diff --git a/docs/ru/query_language/functions/url_functions.md b/docs/ru/sql_reference/functions/url_functions.md similarity index 98% rename from docs/ru/query_language/functions/url_functions.md rename to docs/ru/sql_reference/functions/url_functions.md index 6dc62b9c193..f024cfdcfd9 100644 --- a/docs/ru/query_language/functions/url_functions.md +++ b/docs/ru/sql_reference/functions/url_functions.md @@ -20,7 +20,7 @@ domain(url) **Параметры** -- `url` — URL. Тип — [String](../../data_types/string.md). +- `url` — URL. Тип — [String](../../sql_reference/functions/url_functions.md). URL может быть указан со схемой или без неё. Примеры: @@ -71,7 +71,7 @@ topLevelDomain(url) **Параметры** -- `url` — URL. Тип — [String](../../data_types/string.md). +- `url` — URL. Тип — [String](../../sql_reference/functions/url_functions.md). URL может быть указан со схемой или без неё. Примеры: diff --git a/docs/ru/query_language/functions/uuid_functions.md b/docs/ru/sql_reference/functions/uuid_functions.md similarity index 92% rename from docs/ru/query_language/functions/uuid_functions.md rename to docs/ru/sql_reference/functions/uuid_functions.md index 1f52ba27e31..960e2b10e80 100644 --- a/docs/ru/query_language/functions/uuid_functions.md +++ b/docs/ru/sql_reference/functions/uuid_functions.md @@ -10,7 +10,7 @@ generateUUIDv4() **Возвращаемое значение** -Значение типа [UUID](../../data_types/uuid.md). +Значение типа [UUID](../../sql_reference/functions/uuid_functions.md). **Пример использования** @@ -56,7 +56,7 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid ## UUIDStringToNum {#uuidstringtonum} -Принимает строку, содержащую 36 символов в формате `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, и возвращает в виде набора байт в [FixedString(16)](../../data_types/fixedstring.md). +Принимает строку, содержащую 36 символов в формате `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, и возвращает в виде набора байт в [FixedString(16)](../../sql_reference/functions/uuid_functions.md). ``` sql UUIDStringToNum(String) @@ -82,7 +82,7 @@ SELECT ## UUIDNumToString {#uuidnumtostring} -Принимает значение типа [FixedString(16)](../../data_types/fixedstring.md). Возвращает строку из 36 символов в текстовом виде. +Принимает значение типа [FixedString(16)](../../sql_reference/functions/uuid_functions.md). Возвращает строку из 36 символов в текстовом виде. ``` sql UUIDNumToString(FixedString(16)) diff --git a/docs/ru/query_language/functions/ym_dict_functions.md b/docs/ru/sql_reference/functions/ym_dict_functions.md similarity index 98% rename from docs/ru/query_language/functions/ym_dict_functions.md rename to docs/ru/sql_reference/functions/ym_dict_functions.md index d5e11658a4f..5d7aece88b9 100644 --- a/docs/ru/query_language/functions/ym_dict_functions.md +++ b/docs/ru/sql_reference/functions/ym_dict_functions.md @@ -113,8 +113,8 @@ regionToTopContinent(id[, geobase]); **Параметры** -- `id` — Идентификатор региона из геобазы Яндекса. [UInt32](../../data_types/int_uint.md). -- `geobase` — Ключ словаря. Смотрите [Множественные геобазы](#multiple-geobases). [String](../../data_types/string.md). Опциональный параметр. +- `id` — Идентификатор региона из геобазы Яндекса. [UInt32](../../sql_reference/functions/ym_dict_functions.md). +- `geobase` — Ключ словаря. Смотрите [Множественные геобазы](#multiple-geobases). [String](../../sql_reference/functions/ym_dict_functions.md). Опциональный параметр. **Возвращаемое значение** diff --git a/docs/ru/sql_reference/index.md b/docs/ru/sql_reference/index.md new file mode 100644 index 00000000000..ca35c14a547 --- /dev/null +++ b/docs/ru/sql_reference/index.md @@ -0,0 +1,9 @@ +# Справка по SQL {#spravka-po-sql} + +- [SELECT](statements/select.md) +- [INSERT INTO](statements/insert_into.md) +- [CREATE](statements/create.md) +- [ALTER](statements/alter.md) +- [Прочие виды запросов](statements/misc.md) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/) diff --git a/docs/ru/query_language/operators.md b/docs/ru/sql_reference/operators.md similarity index 89% rename from docs/ru/query_language/operators.md rename to docs/ru/sql_reference/operators.md index 670990b0967..2cff1ad6aed 100644 --- a/docs/ru/query_language/operators.md +++ b/docs/ru/sql_reference/operators.md @@ -55,7 +55,7 @@ ## Операторы для работы с множествами {#operatory-dlia-raboty-s-mnozhestvami} -*Смотрите раздел [Операторы IN](select.md#select-in-operators).* +*Смотрите раздел [Операторы IN](../sql_reference/statements/select.md#select-in-operators).* `a IN ...` - функция `in(a, b)` @@ -86,7 +86,7 @@ EXTRACT(part FROM date); Эти значения могут быть указаны также в нижнем регистре (`day`, `month`). -В параметре `date` указывается исходная дата. Поддерживаются типы [Date](../data_types/date.md) и [DateTime](../data_types/datetime.md). +В параметре `date` указывается исходная дата. Поддерживаются типы [Date](../sql_reference/data_types/date.md) и [DateTime](../sql_reference/data_types/datetime.md). Примеры: @@ -133,7 +133,7 @@ FROM test.Orders; ### INTERVAL {#operator-interval} -Создаёт значение типа [Interval](../data_types/special_data_types/interval.md) которое должно использоваться в арифметических операциях со значениями типов [Date](../data_types/date.md) и [DateTime](../data_types/datetime.md). +Создаёт значение типа [Interval](../sql_reference/operators.md) которое должно использоваться в арифметических операциях со значениями типов [Date](../sql_reference/operators.md) и [DateTime](../sql_reference/operators.md). Типы интервалов: - `SECOND` @@ -162,8 +162,8 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL **Смотрите также** -- Тип данных [Interval](../data_types/special_data_types/interval.md) -- Функции преобразования типов [toInterval](functions/type_conversion_functions.md#function-tointerval) +- Тип данных [Interval](../sql_reference/operators.md) +- Функции преобразования типов [toInterval](../sql_reference/operators.md#function-tointerval) ## Оператор логического отрицания {#operator-logicheskogo-otritsaniia} @@ -183,7 +183,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL Примечание: -Условный оператор сначала вычисляет значения b и c, затем проверяет выполнение условия a, и только после этого возвращает соответствующее значение. Если в качестве b или с выступает функция [arrayJoin()](functions/array_join.md#functions_arrayjoin), то размножение каждой строки произойдет вне зависимости от условия а. +Условный оператор сначала вычисляет значения b и c, затем проверяет выполнение условия a, и только после этого возвращает соответствующее значение. Если в качестве b или с выступает функция [arrayJoin()](../sql_reference/operators.md#functions_arrayjoin), то размножение каждой строки произойдет вне зависимости от условия а. ## Условное выражение {#operator_case} @@ -232,7 +232,7 @@ ClickHouse поддерживает операторы `IS NULL` и `IS NOT NULL ### IS NULL {#operator-is-null} -- Для значений типа [Nullable](../data_types/nullable.md) оператор `IS NULL` возвращает: +- Для значений типа [Nullable](../sql_reference/operators.md) оператор `IS NULL` возвращает: - `1`, если значение — `NULL`. - `0` в обратном случае. - Для прочих значений оператор `IS NULL` всегда возвращает `0`. @@ -251,7 +251,7 @@ SELECT x+100 FROM t_null WHERE y IS NULL ### IS NOT NULL {#is-not-null} -- Для значений типа [Nullable](../data_types/nullable.md) оператор `IS NOT NULL` возвращает: +- Для значений типа [Nullable](../sql_reference/operators.md) оператор `IS NOT NULL` возвращает: - `0`, если значение — `NULL`. - `1`, в обратном случае. - Для прочих значений оператор `IS NOT NULL` всегда возвращает `1`. diff --git a/docs/ru/query_language/alter.md b/docs/ru/sql_reference/statements/alter.md similarity index 93% rename from docs/ru/query_language/alter.md rename to docs/ru/sql_reference/statements/alter.md index 401d7e3bcbc..bc06fe074d2 100644 --- a/docs/ru/query_language/alter.md +++ b/docs/ru/sql_reference/statements/alter.md @@ -33,7 +33,7 @@ ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after] Если указано `IF NOT EXISTS`, запрос не будет возвращать ошибку, если столбец уже существует. Если указано `AFTER name_after` (имя другого столбца), то столбец добавляется (в список столбцов таблицы) после указанного. Иначе, столбец добавляется в конец таблицы. Обратите внимание, ClickHouse не позволяет добавлять столбцы в начало таблицы. Для цепочки действий, `name_after` может быть именем столбца, который добавляется в одном из предыдущих действий. -Добавление столбца всего лишь меняет структуру таблицы, и не производит никаких действий с данными - соответствующие данные не появляются на диске после ALTER-а. При чтении из таблицы, если для какого-либо столбца отсутствуют данные, то он заполняется значениями по умолчанию (выполняя выражение по умолчанию, если такое есть, или нулями, пустыми строками). Также, столбец появляется на диске при слиянии кусков данных (см. [MergeTree](../operations/table_engines/mergetree.md)). +Добавление столбца всего лишь меняет структуру таблицы, и не производит никаких действий с данными - соответствующие данные не появляются на диске после ALTER-а. При чтении из таблицы, если для какого-либо столбца отсутствуют данные, то он заполняется значениями по умолчанию (выполняя выражение по умолчанию, если такое есть, или нулями, пустыми строками). Также, столбец появляется на диске при слиянии кусков данных (см. [MergeTree](../../sql_reference/statements/alter.md)). Такая схема позволяет добиться мгновенной работы запроса `ALTER` и отсутствия необходимости увеличивать объём старых данных. @@ -107,11 +107,11 @@ MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] - TTL - Примеры изменения TTL столбца смотрите в разделе [TTL столбца](../operations/table_engines/mergetree.md#mergetree-column-ttl). + Примеры изменения TTL столбца смотрите в разделе [TTL столбца](../../sql_reference/statements/alter.md#mergetree-column-ttl). Если указано `IF EXISTS`, запрос не возвращает ошибку, если столбца не существует. -При изменении типа, значения преобразуются так, как если бы к ним была применена функция [toType](functions/type_conversion_functions.md). Если изменяется только выражение для умолчания, запрос не делает никакой сложной работы и выполняется мгновенно. +При изменении типа, значения преобразуются так, как если бы к ним была применена функция [toType](../../sql_reference/statements/alter.md). Если изменяется только выражение для умолчания, запрос не делает никакой сложной работы и выполняется мгновенно. Пример запроса: @@ -139,11 +139,11 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String) Отсутствует возможность удалять столбцы, входящие в первичный ключ или ключ для сэмплирования (в общем, входящие в выражение `ENGINE`). Изменение типа у столбцов, входящих в первичный ключ возможно только в том случае, если это изменение не приводит к изменению данных (например, разрешено добавление значения в Enum или изменение типа с `DateTime` на `UInt32`). -Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](insert_into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](misc.md#misc_operations-rename), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../operations/utils/clickhouse-copier.md). +Если возможностей запроса `ALTER` не хватает для нужного изменения таблицы, вы можете создать новую таблицу, скопировать туда данные с помощью запроса [INSERT SELECT](insert_into.md#insert_query_insert-select), затем поменять таблицы местами с помощью запроса [RENAME](misc.md#misc_operations-rename), и удалить старую таблицу. В качестве альтернативы для запроса `INSERT SELECT`, можно использовать инструмент [clickhouse-copier](../../sql_reference/statements/alter.md). Запрос `ALTER` блокирует все чтения и записи для таблицы. То есть, если на момент запроса `ALTER`, выполнялся долгий `SELECT`, то запрос `ALTER` сначала дождётся его выполнения. И в это время, все новые запросы к той же таблице, будут ждать, пока завершится этот `ALTER`. -Для таблиц, которые не хранят данные самостоятельно (типа [Merge](../operations/table_engines/merge.md) и [Distributed](../operations/table_engines/distributed.md)), `ALTER` всего лишь меняет структуру таблицы, но не меняет структуру подчинённых таблиц. Для примера, при ALTER-е таблицы типа `Distributed`, вам также потребуется выполнить запрос `ALTER` для таблиц на всех удалённых серверах. +Для таблиц, которые не хранят данные самостоятельно (типа [Merge](../../sql_reference/statements/alter.md) и [Distributed](../../sql_reference/statements/alter.md)), `ALTER` всего лишь меняет структуру таблицы, но не меняет структуру подчинённых таблиц. Для примера, при ALTER-е таблицы типа `Distributed`, вам также потребуется выполнить запрос `ALTER` для таблиц на всех удалённых серверах. ### Манипуляции с ключевыми выражениями таблиц {#manipuliatsii-s-kliuchevymi-vyrazheniiami-tablits} @@ -153,8 +153,8 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String) MODIFY ORDER BY new_expression ``` -Работает только для таблиц семейства [`MergeTree`](../operations/table_engines/mergetree.md) (в том числе [реплицированных](../operations/table_engines/replication.md)). После выполнения запроса -[ключ сортировки](../operations/table_engines/mergetree.md) таблицы +Работает только для таблиц семейства [`MergeTree`](../../sql_reference/statements/alter.md) (в том числе [реплицированных](../../sql_reference/statements/alter.md)). После выполнения запроса +[ключ сортировки](../../sql_reference/statements/alter.md) таблицы заменяется на `new_expression` (выражение или кортеж выражений). Первичный ключ при этом остаётся прежним. Операция затрагивает только метаданные. Чтобы сохранить свойство упорядоченности кусков данных по ключу @@ -197,7 +197,7 @@ ALTER TABLE [db].name DROP CONSTRAINT constraint_name; ### Манипуляции с партициями и кусками {#alter_manipulations-with-partitions} -Для работы с [партициями](../operations/table_engines/custom_partitioning_key.md) доступны следующие операции: +Для работы с [партициями](../../sql_reference/statements/alter.md) доступны следующие операции: - [DETACH PARTITION](#alter_detach-partition) – перенести партицию в директорию `detached`; - [DROP PARTITION](#alter_drop-partition) – удалить партицию; @@ -229,7 +229,7 @@ ALTER TABLE visits DETACH PARTITION 201901 После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы. -Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../operations/system_tables.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку. +Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../operations/system_tables.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку. #### DROP PARTITION {#alter_drop-partition} @@ -355,7 +355,7 @@ ALTER TABLE table_name FREEZE [PARTITION partition_expr] - `N` — инкрементальный номер резервной копии. !!! note "Примечание" - При использовании [нескольких дисков для хранения данных таблицы](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes) директория `shadow/N` появляется на каждом из дисков, на которых были куски, попавшие под выражение `PARTITION`. + При использовании [нескольких дисков для хранения данных таблицы](../../sql_reference/statements/alter.md#table_engine-mergetree-multiple-volumes) директория `shadow/N` появляется на каждом из дисков, на которых были куски, попавшие под выражение `PARTITION`. Структура директорий внутри резервной копии такая же, как внутри `/var/lib/clickhouse/`. Запрос выполнит ‘chmod’ для всех файлов, запрещая запись в них. @@ -373,7 +373,7 @@ ALTER TABLE table_name FREEZE [PARTITION partition_expr] Восстановление данных из резервной копии не требует остановки сервера. -Подробнее о резервном копировании и восстановлении данных читайте в разделе [Резервное копирование данных](../operations/backup.md). +Подробнее о резервном копировании и восстановлении данных читайте в разделе [Резервное копирование данных](../../operations/backup.md). #### FETCH PARTITION {#alter_fetch-partition} @@ -406,7 +406,7 @@ ALTER TABLE users ATTACH PARTITION 201902; #### MOVE PARTITION\|PART {#alter_move-partition} -Перемещает партицию или кусок данных на другой том или диск для таблиц с движком `MergeTree`. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](../operations/table_engines/mergetree.md#table_engine-mergetree-multiple-volumes). +Перемещает партицию или кусок данных на другой том или диск для таблиц с движком `MergeTree`. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](../../sql_reference/statements/alter.md#table_engine-mergetree-multiple-volumes). ``` sql ALTER TABLE table_name MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' @@ -429,10 +429,10 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' Чтобы задать нужную партицию в запросах `ALTER ... PARTITION`, можно использовать: -- Имя партиции. Посмотреть имя партиции можно в столбце `partition` системной таблицы [system.parts](../operations/system_tables.md#system_tables-parts). Например, `ALTER TABLE visits DETACH PARTITION 201901`. +- Имя партиции. Посмотреть имя партиции можно в столбце `partition` системной таблицы [system.parts](../../operations/system_tables.md#system_tables-parts). Например, `ALTER TABLE visits DETACH PARTITION 201901`. - Произвольное выражение из столбцов исходной таблицы. Также поддерживаются константы и константные выражения. Например, `ALTER TABLE visits DETACH PARTITION toYYYYMM(toDate('2019-01-25'))`. - Строковый идентификатор партиции. Идентификатор партиции используется для именования кусков партиции на файловой системе и в ZooKeeper. В запросах `ALTER` идентификатор партиции нужно указывать в секции `PARTITION ID`, в одинарных кавычках. Например, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- Для запросов [ATTACH PART](#alter_attach-partition) и [DROP DETACHED PART](#alter_drop-detached): чтобы задать имя куска партиции, используйте строковой литерал со значением из столбца `name` системной таблицы [system.detached\_parts](../operations/system_tables.md#system_tables-detached_parts). Например, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. +- Для запросов [ATTACH PART](#alter_attach-partition) и [DROP DETACHED PART](#alter_drop-detached): чтобы задать имя куска партиции, используйте строковой литерал со значением из столбца `name` системной таблицы [system.detached\_parts](../../operations/system_tables.md#system_tables-detached_parts). Например, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. Использование кавычек в имени партиций зависит от типа данных столбца, по которому задано партиционирование. Например, для столбца с типом `String` имя партиции необходимо указывать в кавычках (одинарных). Для типов `Date` и `Int*` кавычки указывать не нужно. @@ -448,7 +448,7 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ### Манипуляции с TTL таблицы {#manipuliatsii-s-ttl-tablitsy} -Вы можете изменить [TTL для таблицы](../operations/table_engines/mergetree.md#mergetree-table-ttl) запросом следующего вида: +Вы можете изменить [TTL для таблицы](../../sql_reference/statements/alter.md#mergetree-table-ttl) запросом следующего вида: ``` sql ALTER TABLE table-name MODIFY TTL ttl-expression @@ -493,7 +493,7 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name Мутации линейно упорядочены между собой и накладываются на каждый кусок в порядке добавления. Мутации также упорядочены со вставками - гарантируется, что данные, вставленные в таблицу до начала выполнения запроса мутации, будут изменены, а данные, вставленные после окончания запроса мутации, изменены не будут. При этом мутации никак не блокируют вставки. -Запрос завершается немедленно после добавления информации о мутации (для реплицированных таблиц - в ZooKeeper, для нереплицированных - на файловую систему). Сама мутация выполняется асинхронно, используя настройки системного профиля. Следить за ходом её выполнения можно по таблице [`system.mutations`](../operations/system_tables.md#system_tables-mutations). Добавленные мутации будут выполняться до конца даже в случае перезапуска серверов ClickHouse. Откатить мутацию после её добавления нельзя, но если мутация по какой-то причине не может выполниться до конца, её можно остановить с помощью запроса [`KILL MUTATION`](misc.md#kill-mutation). +Запрос завершается немедленно после добавления информации о мутации (для реплицированных таблиц - в ZooKeeper, для нереплицированных - на файловую систему). Сама мутация выполняется асинхронно, используя настройки системного профиля. Следить за ходом её выполнения можно по таблице [`system.mutations`](../../operations/system_tables.md#system_tables-mutations). Добавленные мутации будут выполняться до конца даже в случае перезапуска серверов ClickHouse. Откатить мутацию после её добавления нельзя, но если мутация по какой-то причине не может выполниться до конца, её можно остановить с помощью запроса [`KILL MUTATION`](misc.md#kill-mutation). Записи о последних выполненных мутациях удаляются не сразу (количество сохраняемых мутаций определяется параметром движка таблиц `finished_mutations_to_keep`). Более старые записи удаляются. diff --git a/docs/ru/query_language/create.md b/docs/ru/sql_reference/statements/create.md similarity index 94% rename from docs/ru/query_language/create.md rename to docs/ru/sql_reference/statements/create.md index dfaae11a359..7a5e3c9dc08 100644 --- a/docs/ru/query_language/create.md +++ b/docs/ru/sql_reference/statements/create.md @@ -20,11 +20,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. - `ENGINE` - - [MySQL](../database_engines/mysql.md) + - [MySQL](../../sql_reference/statements/create.md) Позволяет получать данные с удаленного сервера MySQL. - По умолчанию ClickHouse использует собственный [движок баз данных](../database_engines/index.md). + По умолчанию ClickHouse использует собственный [движок баз данных](../../sql_reference/statements/create.md). ## CREATE TABLE {#create-table-query} @@ -65,7 +65,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ... Во всех случаях, если указано `IF NOT EXISTS`, то запрос не будет возвращать ошибку, если таблица уже существует. В этом случае, запрос будет ничего не делать. -После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../operations/table_engines/index.md#table_engines). +После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../sql_reference/statements/create.md#table_engines). ### Значения по умолчанию {#create-default-values} @@ -123,11 +123,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ### Выражение для TTL {#vyrazhenie-dlia-ttl} -Определяет время хранения значений. Может быть указано только для таблиц семейства MergeTree. Подробнее смотрите в [TTL для столбцов и таблиц](../operations/table_engines/mergetree.md#table_engine-mergetree-ttl). +Определяет время хранения значений. Может быть указано только для таблиц семейства MergeTree. Подробнее смотрите в [TTL для столбцов и таблиц](../../sql_reference/statements/create.md#table_engine-mergetree-ttl). ### Кодеки сжатия столбцов {#codecs} -По умолчанию, ClickHouse применяет к столбцу метод сжатия, определённый в [конфигурации сервера](../operations/server_settings/settings.md#compression). Кроме этого, можно задать метод сжатия для каждого отдельного столбца в запросе `CREATE TABLE`. +По умолчанию, ClickHouse применяет к столбцу метод сжатия, определённый в [конфигурации сервера](../../sql_reference/statements/create.md#compression). Кроме этого, можно задать метод сжатия для каждого отдельного столбца в запросе `CREATE TABLE`. ``` sql CREATE TABLE codec_example @@ -149,10 +149,10 @@ ENGINE = Сжатие поддерживается для следующих движков таблиц: -- [MergeTree family](../operations/table_engines/mergetree.md) -- [Log family](../operations/table_engines/log_family.md) -- [Set](../operations/table_engines/set.md) -- [Join](../operations/table_engines/join.md) +- [MergeTree family](../../sql_reference/statements/create.md) +- [Log family](../../sql_reference/statements/create.md) +- [Set](../../sql_reference/statements/create.md) +- [Join](../../sql_reference/statements/create.md) ClickHouse поддерживает кодеки общего назначения и специализированные кодеки. @@ -213,7 +213,7 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name В большинстве случаев, временные таблицы создаются не вручную, а при использовании внешних данных для запроса, или при распределённом `(GLOBAL) IN`. Подробнее см. соответствующие разделы -Вместо временных можно использовать обычные таблицы с [ENGINE = Memory](../operations/table_engines/memory.md). +Вместо временных можно использовать обычные таблицы с [ENGINE = Memory](../../sql_reference/statements/create.md). ## Распределенные DDL запросы (секция ON CLUSTER) {#raspredelennye-ddl-zaprosy-sektsiia-on-cluster} @@ -289,12 +289,12 @@ LAYOUT(LAYOUT_NAME([param_name param_value])) LIFETIME([MIN val1] MAX val2) ``` -Создаёт [внешний словарь](dicts/external_dicts.md) с заданной [структурой](dicts/external_dicts_dict_structure.md), [источником](dicts/external_dicts_dict_sources.md), [способом размещения в памяти](dicts/external_dicts_dict_layout.md) и [периодом обновления](dicts/external_dicts_dict_lifetime.md). +Создаёт [внешний словарь](../../sql_reference/statements/create.md) с заданной [структурой](../../sql_reference/statements/create.md), [источником](../../sql_reference/statements/create.md), [способом размещения в памяти](../../sql_reference/statements/create.md) и [периодом обновления](../../sql_reference/statements/create.md). Структура внешнего словаря состоит из атрибутов. Атрибуты словаря задаются как столбцы таблицы. Единственным обязательным свойством атрибута является его тип, все остальные свойства могут иметь значения по умолчанию. -В зависимости от [способа размещения словаря в памяти](dicts/external_dicts_dict_layout.md), ключами словаря могут быть один и более атрибутов. +В зависимости от [способа размещения словаря в памяти](../../sql_reference/statements/create.md), ключами словаря могут быть один и более атрибутов. -Смотрите [Внешние словари](dicts/external_dicts.md). +Смотрите [Внешние словари](../../sql_reference/statements/create.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/create/) diff --git a/docs/ru/sql_reference/statements/index.md b/docs/ru/sql_reference/statements/index.md new file mode 100644 index 00000000000..1adf93a153e --- /dev/null +++ b/docs/ru/sql_reference/statements/index.md @@ -0,0 +1,5 @@ +--- +toc_folder_title: Statements +toc_priority: 31 +--- + diff --git a/docs/ru/query_language/insert_into.md b/docs/ru/sql_reference/statements/insert_into.md similarity index 88% rename from docs/ru/query_language/insert_into.md rename to docs/ru/sql_reference/statements/insert_into.md index 7a9540e059c..ed07d6d3b1c 100644 --- a/docs/ru/query_language/insert_into.md +++ b/docs/ru/sql_reference/statements/insert_into.md @@ -13,9 +13,9 @@ INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), . - Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы. - Нули и пустые строки, если `DEFAULT` не определены. -Если [strict\_insert\_defaults=1](../operations/settings/settings.md), то столбцы, для которых не определены `DEFAULT`, необходимо перечислить в запросе. +Если [strict\_insert\_defaults=1](../../operations/settings/settings.md), то столбцы, для которых не определены `DEFAULT`, необходимо перечислить в запросе. -В INSERT можно передавать данные любого [формата](../interfaces/formats.md#formats), который поддерживает ClickHouse. Для этого формат необходимо указать в запросе в явном виде: +В INSERT можно передавать данные любого [формата](../../interfaces/formats.md#formats), который поддерживает ClickHouse. Для этого формат необходимо указать в запросе в явном виде: ``` sql INSERT INTO [db.]table [(c1, c2, c3)] FORMAT format_name data_set @@ -37,7 +37,7 @@ INSERT INTO t FORMAT TabSeparated 22 Qwerty ``` -С помощью консольного клиента или HTTP интерфейса можно вставлять данные отдельно от запроса. Как это сделать, читайте в разделе «[Интерфейсы](../interfaces/index.md#interfaces)». +С помощью консольного клиента или HTTP интерфейса можно вставлять данные отдельно от запроса. Как это сделать, читайте в разделе «[Интерфейсы](../../interfaces/index.md#interfaces)». ### Ограничения (constraints) {#ogranicheniia-constraints} @@ -56,7 +56,7 @@ INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... Не поддерживаются другие запросы на модификацию части данных: `UPDATE`, `DELETE`, `REPLACE`, `MERGE`, `UPSERT`, `INSERT UPDATE`. Вы можете удалять старые данные с помощью запроса `ALTER TABLE ... DROP PARTITION`. -Для табличной функции [input()](table_functions/input.md) после секции `SELECT` должна следовать +Для табличной функции [input()](../table_functions/input.md) после секции `SELECT` должна следовать секция `FORMAT`. ### Замечания о производительности {#zamechaniia-o-proizvoditelnosti} diff --git a/docs/ru/query_language/misc.md b/docs/ru/sql_reference/statements/misc.md similarity index 87% rename from docs/ru/query_language/misc.md rename to docs/ru/sql_reference/statements/misc.md index 476c57d34a5..d8278534d46 100644 --- a/docs/ru/query_language/misc.md +++ b/docs/ru/sql_reference/statements/misc.md @@ -25,17 +25,17 @@ CHECK TABLE [db.]name Запрос `CHECK TABLE` сравнивает текущие размеры файлов (в которых хранятся данные из колонок) с ожидаемыми значениями. Если значения не совпадают, данные в таблице считаются поврежденными. Искажение возможно, например, из-за сбоя при записи данных. -Ответ содержит колонку `result`, содержащую одну строку с типом [Boolean](../data_types/boolean.md). Допустимые значения: +Ответ содержит колонку `result`, содержащую одну строку с типом [Boolean](../../sql_reference/data_types/boolean.md). Допустимые значения: - 0 - данные в таблице повреждены; - 1 - данные не повреждены. Запрос `CHECK TABLE` поддерживает следующие движки таблиц: -- [Log](../operations/table_engines/log.md) -- [TinyLog](../operations/table_engines/tinylog.md) -- [StripeLog](../operations/table_engines/stripelog.md) -- [Семейство MergeTree](../operations/table_engines/mergetree.md) +- [Log](../../engines/table_engines/log_family/log.md) +- [TinyLog](../../engines/table_engines/log_family/tinylog.md) +- [StripeLog](../../engines/table_engines/log_family/stripelog.md) +- [Семейство MergeTree](../../engines/table_engines/mergetree_family/index.md) При попытке выполнить запрос с таблицами с другими табличными движками, ClickHouse генерирует исключение. @@ -48,7 +48,7 @@ CHECK TABLE [db.]name В этом случае можно скопировать оставшиеся неповрежденные данные в другую таблицу. Для этого: 1. Создайте новую таблицу с такой же структурой, как у поврежденной таблицы. Для этого выполните запрос `CREATE TABLE AS `. -2. Установите значение параметра [max\_threads](../operations/settings/settings.md#settings-max_threads) в 1. Это нужно для того, чтобы выполнить следующий запрос в одном потоке. Установить значение параметра можно через запрос: `SET max_threads = 1`. +2. Установите значение параметра [max\_threads](../../operations/settings/settings.md#settings-max_threads) в 1. Это нужно для того, чтобы выполнить следующий запрос в одном потоке. Установить значение параметра можно через запрос: `SET max_threads = 1`. 3. Выполните запрос `INSERT INTO SELECT * FROM `. В результате неповрежденные данные будут скопированы в другую таблицу. Обратите внимание, будут скопированы только те данные, которые следуют до поврежденного участка. 4. Перезапустите `clickhouse-client`, чтобы вернуть предыдущее значение параметра `max_threads`. @@ -153,7 +153,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Пытается остановить выполняющиеся в данные момент [мутации](alter.md#alter-mutations). Мутации для остановки выбираются из таблицы [`system.mutations`](../operations/system_tables.md#system_tables-mutations) с помощью условия, указанного в секции `WHERE` запроса `KILL`. +Пытается остановить выполняющиеся в данные момент [мутации](alter.md#alter-mutations). Мутации для остановки выбираются из таблицы [`system.mutations`](../../operations/system_tables.md#system_tables-mutations) с помощью условия, указанного в секции `WHERE` запроса `KILL`. Тестовый вариант запроса (`TEST`) только проверяет права пользователя и выводит список запросов для остановки. @@ -177,11 +177,11 @@ KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = ' OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] ``` -Запрос пытается запустить внеплановый мёрж кусков данных для таблиц семейства [MergeTree](../operations/table_engines/mergetree.md). Другие движки таблиц не поддерживаются. +Запрос пытается запустить внеплановый мёрж кусков данных для таблиц семейства [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md). Другие движки таблиц не поддерживаются. -Если `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../operations/table_engines/replication.md), ClickHouse создаёт задачу на мёрж и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). +Если `OPTIMIZE` применяется к таблицам семейства [ReplicatedMergeTree](../../engines/table_engines/mergetree_family/replication.md), ClickHouse создаёт задачу на мёрж и ожидает её исполнения на всех узлах (если активирована настройка `replication_alter_partitions_sync`). -- Если `OPTIMIZE` не выполняет мёрж по любой причине, ClickHouse не оповещает об этом клиента. Чтобы включить оповещения, используйте настройку [optimize\_throw\_if\_noop](../operations/settings/settings.md#setting-optimize_throw_if_noop). +- Если `OPTIMIZE` не выполняет мёрж по любой причине, ClickHouse не оповещает об этом клиента. Чтобы включить оповещения, используйте настройку [optimize\_throw\_if\_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop). - Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter.md#alter-how-to-specify-part-expr). - Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске. - Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех колонках), имеет смысл только для движка MergeTree. @@ -205,7 +205,7 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... SET param = value ``` -Устанавливает значение `value` для [настройки](../operations/settings/index.md) `param` в текущей сессии. [Конфигурационные параметры сервера](../operations/server_settings/index.md) нельзя изменить подобным образом. +Устанавливает значение `value` для [настройки](../../operations/settings/index.md) `param` в текущей сессии. [Конфигурационные параметры сервера](../../operations/server_configuration_parameters/settings.md) нельзя изменить подобным образом. Можно одним запросом установить все настройки из заданного профиля настроек. @@ -213,7 +213,7 @@ SET param = value SET profile = 'profile-name-from-the-settings-file' ``` -Подробности смотрите в разделе [Настройки](../operations/settings/settings.md). +Подробности смотрите в разделе [Настройки](../../operations/settings/settings.md). ## TRUNCATE {#truncate} @@ -223,7 +223,7 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Удаляет все данные из таблицы. Если условие `IF EXISTS` не указано, запрос вернет ошибку, если таблицы не существует. -Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../operations/table_engines/view.md), [File](../operations/table_engines/file.md), [URL](../operations/table_engines/url.md) и [Null](../operations/table_engines/null.md). +Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table_engines/special/view.md), [File](../../engines/table_engines/special/file.md), [URL](../../engines/table_engines/special/url.md) и [Null](../../engines/table_engines/special/null.md). ## USE {#use} diff --git a/docs/ru/query_language/select.md b/docs/ru/sql_reference/statements/select.md similarity index 91% rename from docs/ru/query_language/select.md rename to docs/ru/sql_reference/statements/select.md index 759a65f155a..bd9dc21e2aa 100644 --- a/docs/ru/query_language/select.md +++ b/docs/ru/sql_reference/statements/select.md @@ -107,7 +107,7 @@ Cекция `FROM` определяет источник данных: - Таблица - Подзапрос -- [Табличная функция](table_functions/index.md) +- [Табличная функция](../../sql_reference/statements/select.md) Также могут присутствовать `ARRAY JOIN` и обычный `JOIN` (смотрите ниже). @@ -117,17 +117,17 @@ Cекция `FROM` определяет источник данных: Для выполнения запроса, из соответствующей таблицы, вынимаются все столбцы, перечисленные в запросе. Из подзапросов выкидываются столбцы, не нужные для внешнего запроса. Если в запросе не перечислено ни одного столбца (например, `SELECT count() FROM t`), то из таблицы всё равно вынимается один какой-нибудь столбец (предпочитается самый маленький), для того, чтобы можно было посчитать количество строк. -Модификатор `FINAL` может быть использован в запросе `SELECT` из таблиц семейства [MergeTree](../operations/table_engines/mergetree.md). При указании `FINAL`, данные будут выбираться полностью «домерженными». Стоит учитывать, что использование `FINAL` приводит к чтению также столбцов, относящихся к первичному ключу. Также, запрос будет выполняться в один поток, и при выполнении запроса будет выполняться слияние данных. Это приводит к тому, что при использовании `FINAL`, запрос выполняется медленнее. В большинстве случаев, следует избегать использования `FINAL`. +Модификатор `FINAL` может быть использован в запросе `SELECT` из таблиц семейства [MergeTree](../../engines/table_engines/mergetree_family/index.md). При указании `FINAL`, данные будут выбираться полностью «домерженными». Стоит учитывать, что использование `FINAL` приводит к чтению также столбцов, относящихся к первичному ключу. Также, запрос будет выполняться в один поток, и при выполнении запроса будет выполняться слияние данных. Это приводит к тому, что при использовании `FINAL`, запрос выполняется медленнее. В большинстве случаев, следует избегать использования `FINAL`. Модификатор `FINAL` может быть использован для всех таблиц семейства `MergeTree`, которые производят преобразования данных в процессе фоновых слияний (кроме GraphiteMergeTree). #### FINAL Modifier {#select-from-final} -Применим при выборке данных из таблиц с движками таблиц семейства [MergeTree](../operations/table_engines/mergetree.md), кроме `GraphiteMergeTree`. Если в запросе используется `FINAL`, то ClickHouse полностью мёржит данные перед выдачей результата, таким образом выполняя все преобразования данных, которые производятся движком таблиц при мёржах. +Применим при выборке данных из таблиц с движками таблиц семейства [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)), кроме `GraphiteMergeTree`. Если в запросе используется `FINAL`, то ClickHouse полностью мёржит данные перед выдачей результата, таким образом выполняя все преобразования данных, которые производятся движком таблиц при мёржах. Также поддержан для движков: -- [Replicated](../operations/table_engines/replication.md)-версий `MergeTree`. -- [View](../operations/table_engines/view.md), [Buffer](../operations/table_engines/buffer.md), [Distributed](../operations/table_engines/distributed.md), и [MaterializedView](../operations/table_engines/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`. +- [Replicated](../../engines/table_engines/mergetree_family/replication.md)-версий `MergeTree`. +- [View](../../engines/table_engines/special/view.md), [Buffer](../../engines/table_engines/special/buffer.md), [Distributed](../../engines/table_engines/special/distributed.md), и [MaterializedView](../../engines/table_engines/special/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`. Запросы, использующие `FINAL` исполняются медленнее аналогичных запросов без `FINAL`, поскольку: @@ -153,9 +153,9 @@ Cекция `FROM` определяет источник данных: - Сэмплирование работает детерминированно. При многократном выполнении одного и того же запроса `SELECT .. SAMPLE`, результат всегда будет одинаковым. - Сэмплирование поддерживает консистентность для разных таблиц. Имеется в виду, что для таблиц с одним и тем же ключом сэмплирования, подмножество данных в выборках будет одинаковым (выборки при этом должны быть сформированы для одинаковой доли данных). Например, выборка по идентификаторам посетителей выберет из разных таблиц строки с одинаковым подмножеством всех возможных идентификаторов. Это свойство позволяет использовать выборки в подзапросах в секции [IN](#select-in-operators), а также объединять выборки с помощью [JOIN](#select-join). -- Сэмплирование позволяет читать меньше данных с диска. Обратите внимание, для этого необходимо корректно указать ключ сэмплирования. Подробнее см. в разделе [Создание таблицы MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table). +- Сэмплирование позволяет читать меньше данных с диска. Обратите внимание, для этого необходимо корректно указать ключ сэмплирования. Подробнее см. в разделе [Создание таблицы MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). -Сэмплирование поддерживается только таблицами семейства [MergeTree](../operations/table_engines/mergetree.md) и только в том случае, если для таблиц был указан ключ сэмплирования (выражение, на основе которого должна производиться выборка). Подробнее см. в разделе [Создание таблиц MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table). +Сэмплирование поддерживается только таблицами семейства [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table) и только в том случае, если для таблиц был указан ключ сэмплирования (выражение, на основе которого должна производиться выборка). Подробнее см. в разделе [Создание таблиц MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table). Выражение `SAMPLE` в запросе можно задать следующими способами: @@ -198,7 +198,7 @@ ORDER BY PageViews DESC LIMIT 1000 При выполнении `SAMPLE n` коэффициент сэмплирования заранее неизвестен (то есть нет информации о том, относительно какого количества данных будет сформирована выборка). Чтобы узнать коэффициент сэмплирования, используйте столбец `_sample_factor`. -Виртуальный столбец `_sample_factor` автоматически создается в тех таблицах, для которых задано выражение `SAMPLE BY` (подробнее см. в разделе [Создание таблицы MergeTree](../operations/table_engines/mergetree.md#table_engine-mergetree-creating-a-table)). В столбце содержится коэффициент сэмплирования для таблицы – он рассчитывается динамически по мере добавления данных в таблицу. Ниже приведены примеры использования столбца `_sample_factor`. +Виртуальный столбец `_sample_factor` автоматически создается в тех таблицах, для которых задано выражение `SAMPLE BY` (подробнее см. в разделе [Создание таблицы MergeTree](../../engines/table_engines/mergetree_family/mergetree.md#table_engine-mergetree-creating-a-table)). В столбце содержится коэффициент сэмплирования для таблицы – он рассчитывается динамически по мере добавления данных в таблицу. Ниже приведены примеры использования столбца `_sample_factor`. Предположим, у нас есть таблица, в которой ведется статистика посещений сайта. Пример ниже показывает, как рассчитать суммарное число просмотров: @@ -252,7 +252,7 @@ SAMPLE 1/10 OFFSET 1/2 ### Секция ARRAY JOIN {#select-array-join-clause} -Позволяет выполнить `JOIN` с массивом или вложенной структурой данных. Смысл похож на функцию [arrayJoin](functions/array_join.md#functions_arrayjoin), но функциональность более широкая. +Позволяет выполнить `JOIN` с массивом или вложенной структурой данных. Смысл похож на функцию [arrayJoin](../../sql_reference/statements/select.md#functions_arrayjoin), но функциональность более широкая. ``` sql SELECT @@ -271,7 +271,7 @@ FROM - `ARRAY JOIN` — в этом случае результат `JOIN` не будет содержать пустые массивы; - `LEFT ARRAY JOIN` — пустые массивы попадут в результат выполнения `JOIN`. В качестве значения для пустых массивов устанавливается значение по умолчанию. Обычно это 0, пустая строка или NULL, в зависимости от типа элементов массива. -Рассмотрим примеры использования `ARRAY JOIN` и `LEFT ARRAY JOIN`. Для начала создадим таблицу, содержащую столбец с типом [Array](../data_types/array.md), и добавим в него значение: +Рассмотрим примеры использования `ARRAY JOIN` и `LEFT ARRAY JOIN`. Для начала создадим таблицу, содержащую столбец с типом [Array](../../sql_reference/statements/select.md), и добавим в него значение: ``` sql CREATE TABLE arrays_test @@ -389,7 +389,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS ma └───────┴─────────┴───┴─────┴────────┘ ``` -В примере ниже используется функция [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate): +В примере ниже используется функция [arrayEnumerate](../../sql_reference/statements/select.md#array_functions-arrayenumerate): ``` sql SELECT s, arr, a, num, arrayEnumerate(arr) @@ -409,7 +409,7 @@ ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; #### ARRAY JOIN с вложенными структурами данных {#array-join-s-vlozhennymi-strukturami-dannykh} -`ARRAY JOIN` также работает с [вложенными структурами данных](../data_types/nested_data_structures/nested.md). Пример: +`ARRAY JOIN` также работает с [вложенными структурами данных](../../sql_reference/statements/select.md). Пример: ``` sql CREATE TABLE nested_test @@ -502,7 +502,7 @@ ARRAY JOIN nest AS n; └───────┴─────┴─────┴─────────┴────────────┘ ``` -Пример использования функции [arrayEnumerate](functions/array_functions.md#array_functions-arrayenumerate): +Пример использования функции [arrayEnumerate](../../sql_reference/statements/select.md#array_functions-arrayenumerate): ``` sql SELECT s, `n.x`, `n.y`, `nest.x`, `nest.y`, num @@ -534,7 +534,7 @@ FROM (ON )|(USING ) ... ``` -Вместо `` и `` можно указать имена таблиц. Это эквивалентно подзапросу `SELECT * FROM table`, за исключением особого случая таблицы с движком [Join](../operations/table_engines/join.md) – массива, подготовленного для присоединения. +Вместо `` и `` можно указать имена таблиц. Это эквивалентно подзапросу `SELECT * FROM table`, за исключением особого случая таблицы с движком [Join](../../sql_reference/statements/select.md) – массива, подготовленного для присоединения. #### Поддерживаемые типы `JOIN` {#select-join-types} @@ -618,9 +618,9 @@ USING (equi_column1, ... equi_columnN, asof_column) `ASOF JOIN` принимает метку времени пользовательского события из `table_1` и находит такое событие в `table_2` метка времени которого наиболее близка к метке времени события из `table_1` в соответствии с условием на ближайшее совпадение. При этом столбец `user_id` используется для объединения по равенству, а столбец `ev_time` для объединения по ближайшему совпадению. В нашем примере `event_1_1` может быть объединено с `event_2_1`, `event_1_2` может быть объединено с `event_2_3`, а `event_2_2` не объединяется. !!! note "Примечание" - `ASOF JOIN` не поддержан для движка таблиц [Join](../operations/table_engines/join.md). + `ASOF JOIN` не поддержан для движка таблиц [Join](../../sql_reference/statements/select.md). -Чтобы задать значение строгости по умолчанию, используйте сессионный параметр [join\_default\_strictness](../operations/settings/settings.md#settings-join_default_strictness). +Чтобы задать значение строгости по умолчанию, используйте сессионный параметр [join\_default\_strictness](../../operations/settings/settings.md#settings-join_default_strictness). #### GLOBAL JOIN {#global-join} @@ -684,27 +684,27 @@ LIMIT 10 «Правая» таблица (результат подзапроса) располагается в оперативной памяти. Если её не хватает, вы не сможете выполнить `JOIN`. -Каждый раз для выполнения запроса с одинаковым `JOIN`, подзапрос выполняется заново — результат не кэшируется. Это можно избежать, используя специальный движок таблиц [Join](../operations/table_engines/join.md), представляющий собой подготовленное множество для соединения, которое всегда находится в оперативке. +Каждый раз для выполнения запроса с одинаковым `JOIN`, подзапрос выполняется заново — результат не кэшируется. Это можно избежать, используя специальный движок таблиц [Join](../../engines/table_engines/special/join.md), представляющий собой подготовленное множество для соединения, которое всегда находится в оперативке. В некоторых случаях более эффективно использовать `IN` вместо `JOIN`. Среди разных типов `JOIN`, наиболее эффективен `ANY LEFT JOIN`, следующий по эффективности `ANY INNER JOIN`. Наименее эффективны `ALL LEFT JOIN` и `ALL INNER JOIN`. -Если `JOIN` необходим для соединения с таблицами измерений (dimension tables - сравнительно небольшие таблицы, которые содержат свойства измерений - например, имена для рекламных кампаний), то использование `JOIN` может быть не очень удобным из-за громоздкости синтаксиса, а также из-за того, что правая таблица читается заново при каждом запросе. Специально для таких случаев существует функциональность «Внешние словари», которую следует использовать вместо `JOIN`. Дополнительные сведения смотрите в разделе [Внешние словари](dicts/external_dicts.md). +Если `JOIN` необходим для соединения с таблицами измерений (dimension tables - сравнительно небольшие таблицы, которые содержат свойства измерений - например, имена для рекламных кампаний), то использование `JOIN` может быть не очень удобным из-за громоздкости синтаксиса, а также из-за того, что правая таблица читается заново при каждом запросе. Специально для таких случаев существует функциональность «Внешние словари», которую следует использовать вместо `JOIN`. Дополнительные сведения смотрите в разделе [Внешние словари](../../sql_reference/statements/select.md). **Ограничения по памяти** ClickHouse использует алгоритм [hash join](https://en.wikipedia.org/wiki/Hash_join). ClickHouse принимает `` и создает для него хэш-таблицу в RAM. Чтобы ограничить потребление памяти операцией `JOIN`, используйте следующие параметры: -- [max\_rows\_in\_join](../operations/settings/query_complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хэш-таблице. -- [max\_bytes\_in\_join](../operations/settings/query_complexity.md#settings-max_bytes_in_join) — ограничивает размер хэш-таблицы. +- [max\_rows\_in\_join](../../operations/settings/query_complexity.md#settings-max_rows_in_join) — ограничивает количество строк в хэш-таблице. +- [max\_bytes\_in\_join](../../operations/settings/query_complexity.md#settings-max_bytes_in_join) — ограничивает размер хэш-таблицы. -По достижении любого из этих ограничений, ClickHouse действует в соответствии с настройкой [join\_overflow\_mode](../operations/settings/query_complexity.md#settings-join_overflow_mode). +По достижении любого из этих ограничений, ClickHouse действует в соответствии с настройкой [join\_overflow\_mode](../../operations/settings/query_complexity.md#settings-join_overflow_mode). #### Обработка пустых ячеек и NULL {#obrabotka-pustykh-iacheek-i-null} -При слиянии таблиц могут появляться пустые ячейки. То, каким образом ClickHouse заполняет эти ячейки, определяется настройкой [join\_use\_nulls](../operations/settings/settings.md#join_use_nulls). +При слиянии таблиц могут появляться пустые ячейки. То, каким образом ClickHouse заполняет эти ячейки, определяется настройкой [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls). -Если ключами `JOIN` выступают поля типа [Nullable](../data_types/nullable.md), то строки, где хотя бы один из ключей имеет значение [NULL](syntax.md#null-literal), не соединяются. +Если ключами `JOIN` выступают поля типа [Nullable](../../sql_reference/statements/select.md), то строки, где хотя бы один из ключей имеет значение [NULL](../syntax.md#null-literal), не соединяются. #### Ограничения синтаксиса {#ogranicheniia-sintaksisa} @@ -723,9 +723,9 @@ ClickHouse использует алгоритм [hash join](https://en.wikipedi Результат выражения должен иметь тип `UInt8`. -ClickHouse использует в выражении индексы, если это позволяет [движок таблицы](../operations/table_engines/index.md). +ClickHouse использует в выражении индексы, если это позволяет [движок таблицы](../../sql_reference/statements/select.md). -Если в секции необходимо проверить [NULL](syntax.md#null-literal), то используйте операторы [IS NULL](operators.md#operator-is-null) и [IS NOT NULL](operators.md#is-not-null), а также соответствующие функции `isNull` и `isNotNull`. В противном случае выражение будет считаться всегда не выполненным. +Если в секции необходимо проверить [NULL](../syntax.md#null-literal), то используйте операторы [IS NULL](../operators.md#operator-is-null) и [IS NOT NULL](../operators.md#is-not-null), а также соответствующие функции `isNull` и `isNotNull`. В противном случае выражение будет считаться всегда не выполненным. Пример проверки на `NULL`: @@ -796,7 +796,7 @@ GROUP BY вычисляет для каждого встретившегося #### Обработка NULL {#obrabotka-null} -При группировке, ClickHouse рассматривает [NULL](syntax.md) как значение, причём `NULL=NULL`. +При группировке, ClickHouse рассматривает [NULL](../syntax.md) как значение, причём `NULL=NULL`. Рассмотрим, что это значит на примере. @@ -854,11 +854,11 @@ GROUP BY вычисляет для каждого встретившегося #### GROUP BY во внешней памяти {#select-group-by-in-external-memory} Можно включить сброс временных данных на диск, чтобы ограничить потребление оперативной памяти при выполнении `GROUP BY`. -Настройка [max\_bytes\_before\_external\_group\_by](../operations/settings/settings.md#settings-max_bytes_before_external_group_by) определяет пороговое значение потребления RAM, по достижении которого временные данные `GROUP BY` сбрасываются в файловую систему. Если равно 0 (по умолчанию) - значит выключено. +Настройка [max\_bytes\_before\_external\_group\_by](../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) определяет пороговое значение потребления RAM, по достижении которого временные данные `GROUP BY` сбрасываются в файловую систему. Если равно 0 (по умолчанию) - значит выключено. При использовании `max_bytes_before_external_group_by`, рекомендуем выставить `max_memory_usage` приблизительно в два раза больше. Это следует сделать, потому что агрегация выполняется в две стадии: чтение и формирование промежуточных данных (1) и слияние промежуточных данных (2). Сброс данных на файловую систему может производиться только на стадии 1. Если сброса временных данных не было, то на стадии 2 может потребляться до такого же объёма памяти, как на стадии 1. -Например, если [max\_memory\_usage](../operations/settings/settings.md#settings_max_memory_usage) было выставлено в 10000000000, и вы хотите использовать внешнюю агрегацию, то имеет смысл выставить `max_bytes_before_external_group_by` в 10000000000, а max\_memory\_usage в 20000000000. При срабатывании внешней агрегации (если был хотя бы один сброс временных данных в файловую систему) максимальное потребление оперативки будет лишь чуть-чуть больше `max_bytes_before_external_group_by`. +Например, если [max\_memory\_usage](../../operations/settings/settings.md#settings_max_memory_usage) было выставлено в 10000000000, и вы хотите использовать внешнюю агрегацию, то имеет смысл выставить `max_bytes_before_external_group_by` в 10000000000, а max\_memory\_usage в 20000000000. При срабатывании внешней агрегации (если был хотя бы один сброс временных данных в файловую систему) максимальное потребление оперативки будет лишь чуть-чуть больше `max_bytes_before_external_group_by`. При распределённой обработке запроса внешняя агрегация производится на удалённых серверах. Для того чтобы на сервере-инициаторе запроса использовалось немного оперативки, нужно выставить настройку `distributed_aggregation_memory_efficient` в 1. @@ -870,7 +870,7 @@ GROUP BY вычисляет для каждого встретившегося ### Секция LIMIT BY {#sektsiia-limit-by} -Запрос с секцией `LIMIT n BY expressions` выбирает первые `n` строк для каждого отличного значения `expressions`. Ключ `LIMIT BY` может содержать любое количество [выражений](syntax.md#syntax-expressions). +Запрос с секцией `LIMIT n BY expressions` выбирает первые `n` строк для каждого отличного значения `expressions`. Ключ `LIMIT BY` может содержать любое количество [выражений](../syntax.md#syntax-expressions). ClickHouse поддерживает следующий синтаксис: @@ -936,7 +936,7 @@ LIMIT 100 Запрос выберет топ 5 рефереров для каждой пары `domain, device_type`, но не более 100 строк (`LIMIT n BY + LIMIT`). -`LIMIT n BY` работает с [NULL](syntax.md) как если бы это было конкретное значение. Т.е. в результате запроса пользователь получит все комбинации полей, указанных в `BY`. +`LIMIT n BY` работает с [NULL](../syntax.md) как если бы это было конкретное значение. Т.е. в результате запроса пользователь получит все комбинации полей, указанных в `BY`. ### Секция HAVING {#sektsiia-having} @@ -1007,7 +1007,7 @@ WHERE и HAVING отличаются тем, что WHERE выполняется ### Секция SELECT {#select-select} -[Выражения](syntax.md#syntax-expressions) указанные в секции `SELECT` анализируются после завершения всех вычислений из секций, описанных выше. Вернее, анализируются выражения, стоящие над агрегатными функциями, если есть агрегатные функции. +[Выражения](../syntax.md#syntax-expressions) указанные в секции `SELECT` анализируются после завершения всех вычислений из секций, описанных выше. Вернее, анализируются выражения, стоящие над агрегатными функциями, если есть агрегатные функции. Сами агрегатные функции и то, что под ними, вычисляются при агрегации (`GROUP BY`). Эти выражения работают так, как будто применяются к отдельным строкам результата. Если в результат необходимо включить все столбцы, используйте символ звёздочка (`*`). Например, `SELECT * FROM ...`. @@ -1080,7 +1080,7 @@ Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of `DISTINCT` не поддерживается, если в `SELECT` присутствует хотя бы один столбец типа массив. -`DISTINCT` работает с [NULL](syntax.md) как если бы `NULL` был конкретным значением, причём `NULL=NULL`. Т.е. в результате `DISTINCT` разные комбинации с `NULL` встретятся только по одному разу. +`DISTINCT` работает с [NULL](../syntax.md) как если бы `NULL` был конкретным значением, причём `NULL=NULL`. Т.е. в результате `DISTINCT` разные комбинации с `NULL` встретятся только по одному разу. ClickHouse поддерживает использование в одном запросе секций `DISTINCT` и `ORDER BY` для разных столбцов. Секция `DISTINCT` исполняется перед секцией `ORDER BY`. @@ -1237,7 +1237,7 @@ ORDER BY EventDate ASC #### Обработка NULL {#obrabotka-null-1} -При обработке запроса оператор IN будет считать, что результат операции с [NULL](syntax.md) всегда равен `0`, независимо от того, находится `NULL` в правой или левой части оператора. Значения `NULL` не входят ни в какое множество, не соответствуют друг другу и не могут сравниваться. +При обработке запроса оператор IN будет считать, что результат операции с [NULL](../syntax.md) всегда равен `0`, независимо от того, находится `NULL` в правой или левой части оператора. Значения `NULL` не входят ни в какое множество, не соответствуют друг другу и не могут сравниваться. Рассмотрим для примера таблицу `t_null`: @@ -1275,7 +1275,7 @@ FROM t_null Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса. !!! attention "Attention" - Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../operations/settings/settings.md) `distributed_product_mode`. + Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`. При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`. @@ -1375,7 +1375,7 @@ SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL Вы можете получить в дополнение к результату также минимальные и максимальные значения по столбцам результата. Для этого выставите настройку **extremes** в 1. Минимумы и максимумы считаются для числовых типов, дат, дат-с-временем. Для остальных столбцов будут выведены значения по умолчанию. -Вычисляются дополнительные две строчки - минимумы и максимумы, соответственно. Эти две дополнительные строки выводятся в [форматах](../interfaces/formats.md) `JSON*`, `TabSeparated*`, и `Pretty*` отдельно от остальных строчек. В остальных форматах они не выводится. +Вычисляются дополнительные две строчки - минимумы и максимумы, соответственно. Эти две дополнительные строки выводятся в [форматах](../../interfaces/formats.md) `JSON*`, `TabSeparated*`, и `Pretty*` отдельно от остальных строчек. В остальных форматах они не выводится. Во форматах `JSON*`, экстремальные значения выводятся отдельным полем ‘extremes’. В форматах `TabSeparated*`, строка выводится после основного результата и после ‘totals’ если есть. Перед ней (после остальных данных) вставляется пустая строка. В форматах `Pretty*`, строка выводится отдельной таблицей после основного результата и после `totals` если есть. diff --git a/docs/ru/query_language/show.md b/docs/ru/sql_reference/statements/show.md similarity index 90% rename from docs/ru/query_language/show.md rename to docs/ru/sql_reference/statements/show.md index 03f99378ccc..545301d6166 100644 --- a/docs/ru/query_language/show.md +++ b/docs/ru/sql_reference/statements/show.md @@ -23,7 +23,7 @@ SHOW DATABASES [INTO OUTFILE filename] [FORMAT format] SHOW PROCESSLIST [INTO OUTFILE filename] [FORMAT format] ``` -Выводит содержимое таблицы [system.processes](../operations/system_tables.md#system_tables-processes), которая содержит список запросов, выполняющихся в данный момент времени, кроме самих запросов `SHOW PROCESSLIST`. +Выводит содержимое таблицы [system.processes](../../operations/system_tables.md#system_tables-processes), которая содержит список запросов, выполняющихся в данный момент времени, кроме самих запросов `SHOW PROCESSLIST`. Запрос `SELECT * FROM system.processes` возвращает данные обо всех текущих запросах. @@ -66,7 +66,7 @@ SHOW TABLES FROM system LIKE '%co%' LIMIT 2 ## SHOW DICTIONARIES {#show-dictionaries} -Выводит список [внешних словарей](dicts/external_dicts.md). +Выводит список [внешних словарей](../../sql_reference/statements/show.md). ``` sql SHOW DICTIONARIES [FROM ] [LIKE ''] [LIMIT ] [INTO OUTFILE ] [FORMAT ] diff --git a/docs/ru/query_language/system.md b/docs/ru/sql_reference/statements/system.md similarity index 80% rename from docs/ru/query_language/system.md rename to docs/ru/sql_reference/statements/system.md index 12909c12ce2..2a4acd15e7f 100644 --- a/docs/ru/query_language/system.md +++ b/docs/ru/sql_reference/statements/system.md @@ -17,7 +17,7 @@ ## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} Перегружает все словари, которые были успешно загружены до этого. -По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../operations/server_settings/settings.md#dictionaries-lazy-load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`. +По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../../sql_reference/statements/system.md#dictionaries-lazy-load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`. Всегда возвращает `Ok.`, вне зависимости от результата обновления словарей. ## RELOAD DICTIONARY dictionary\_name {#query_language-system-reload-dictionary} @@ -58,7 +58,7 @@ SELECT name, status FROM system.dictionaries; ## Управление распределёнными таблицами {#query-language-system-distributed} -ClickHouse может оперировать [распределёнными](../operations/table_engines/distributed.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки `insert_distributed_sync`. +ClickHouse может оперировать [распределёнными](../../sql_reference/statements/system.md) таблицами. Когда пользователь вставляет данные в эти таблицы, ClickHouse сначала формирует очередь из данных, которые должны быть отправлены на узлы кластера, а затем асинхронно отправляет подготовленные данные. Вы можете управлять очередью с помощью запросов [STOP DISTRIBUTED SENDS](#query_language-system-stop-distributed-sends), [START DISTRIBUTED SENDS](#query_language-system-start-distributed-sends) и [FLUSH DISTRIBUTED](#query_language-system-flush-distributed). Также есть возможность синхронно вставлять распределенные данные с помощью настройки `insert_distributed_sync`. ### STOP DISTRIBUTED SENDS {#query_language-system-stop-distributed-sends} diff --git a/docs/ru/query_language/syntax.md b/docs/ru/sql_reference/syntax.md similarity index 97% rename from docs/ru/query_language/syntax.md rename to docs/ru/sql_reference/syntax.md index 0219de90685..b9576e48a59 100644 --- a/docs/ru/query_language/syntax.md +++ b/docs/ru/sql_reference/syntax.md @@ -68,13 +68,13 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') - Иначе — ошибка. Соответствующее значение будет иметь тип минимального размера, который вмещает значение. -Например, 1 парсится как `UInt8`, а 256 как `UInt16`. Подробнее о типах данных читайте в разделе [Типы данных](../data_types/index.md). +Например, 1 парсится как `UInt8`, а 256 как `UInt16`. Подробнее о типах данных читайте в разделе [Типы данных](../sql_reference/syntax.md). Примеры: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. ### Строковые {#syntax-string-literal} -Поддерживаются только строковые литералы в одинарных кавычках. Символы внутри могут быть экранированы с помощью обратного слеша. Следующие escape-последовательности имеют соответствующее специальное значение: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Во всех остальных случаях, последовательности вида `\c`, где `c` — любой символ, преобразуется в `c` . Таким образом, могут быть использованы последовательности `\'` и `\\`. Значение будет иметь тип [String](../data_types/string.md). +Поддерживаются только строковые литералы в одинарных кавычках. Символы внутри могут быть экранированы с помощью обратного слеша. Следующие escape-последовательности имеют соответствующее специальное значение: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Во всех остальных случаях, последовательности вида `\c`, где `c` — любой символ, преобразуется в `c` . Таким образом, могут быть использованы последовательности `\'` и `\\`. Значение будет иметь тип [String](../sql_reference/syntax.md). Минимальный набор символов, которых вам необходимо экранировать в строковых литералах: `'` и `\`. Одинарная кавычка может быть экранирована одинарной кавычкой, литералы `'It\'s'` и `'It''s'` эквивалентны. @@ -83,13 +83,13 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') Поддерживаются конструкции для массивов: `[1, 2, 3]` и кортежей: `(1, 'Hello, world!', 2)`. На самом деле, это вовсе не литералы, а выражение с оператором создания массива и оператором создания кортежа, соответственно. Массив должен состоять хотя бы из одного элемента, а кортеж - хотя бы из двух. -Кортежи носят служебное значение для использования в секции `IN` запроса `SELECT`. Кортежи могут быть получены как результат запроса, но они не могут быть сохранены в базе данных (за исключением таблицы [Memory](../operations/table_engines/memory.md).) +Кортежи носят служебное значение для использования в секции `IN` запроса `SELECT`. Кортежи могут быть получены как результат запроса, но они не могут быть сохранены в базе данных (за исключением таблицы [Memory](../sql_reference/syntax.md).) ### NULL {#null-literal} Обозначает, что значение отсутствует. -Чтобы в поле таблицы можно было хранить `NULL`, оно должно быть типа [Nullable](../data_types/nullable.md). +Чтобы в поле таблицы можно было хранить `NULL`, оно должно быть типа [Nullable](../sql_reference/syntax.md). В зависимости от формата данных (входных или выходных) `NULL` может иметь различное представление. Подробнее смотрите в документации для [форматов данных](../interfaces/formats.md#formats). @@ -123,7 +123,7 @@ expr AS alias Например, `SELECT table_name_alias.column_name FROM table_name table_name_alias`. - В функции [CAST](functions/type_conversion_functions.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. + В функции [CAST](sql_reference/syntax.md#type_conversion_function-cast), ключевое слово `AS` имеет другое значение. Смотрите описание функции. - `expr` — любое выражение, которое поддерживает ClickHouse. diff --git a/docs/ru/query_language/table_functions/file.md b/docs/ru/sql_reference/table_functions/file.md similarity index 92% rename from docs/ru/query_language/table_functions/file.md rename to docs/ru/sql_reference/table_functions/file.md index d415b20858b..e0da3ddc15f 100644 --- a/docs/ru/query_language/table_functions/file.md +++ b/docs/ru/sql_reference/table_functions/file.md @@ -8,7 +8,7 @@ file(path, format, structure) **Входные параметры** -- `path` — относительный путь до файла от [user\_files\_path](../../operations/server_settings/settings.md#server_settings-user_files_path). Путь к файлу поддерживает следующие шаблоны в режиме доступа только для чтения `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, \``'abc', 'def'` — строки. +- `path` — относительный путь до файла от [user\_files\_path](../../sql_reference/table_functions/file.md#server_configuration_parameters-user_files_path). Путь к файлу поддерживает следующие шаблоны в режиме доступа только для чтения `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, \``'abc', 'def'` — строки. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат `'colunmn1_name column1_ype, column2_name column2_type, ...'`. diff --git a/docs/ru/query_language/table_functions/generate.md b/docs/ru/sql_reference/table_functions/generate.md similarity index 100% rename from docs/ru/query_language/table_functions/generate.md rename to docs/ru/sql_reference/table_functions/generate.md diff --git a/docs/ru/query_language/table_functions/hdfs.md b/docs/ru/sql_reference/table_functions/hdfs.md similarity index 100% rename from docs/ru/query_language/table_functions/hdfs.md rename to docs/ru/sql_reference/table_functions/hdfs.md diff --git a/docs/ru/query_language/table_functions/index.md b/docs/ru/sql_reference/table_functions/index.md similarity index 62% rename from docs/ru/query_language/table_functions/index.md rename to docs/ru/sql_reference/table_functions/index.md index 446aa554243..f0e465c5cb0 100644 --- a/docs/ru/query_language/table_functions/index.md +++ b/docs/ru/sql_reference/table_functions/index.md @@ -4,11 +4,11 @@ Табличные функции можно использовать в: -- Секции [FROM](../select.md#select-from) запроса `SELECT`. +- Секции [FROM](../statements/select.md#select-from) запроса `SELECT`. Это способ создания временной таблицы, которая доступна только в текущем запросе. -- Запросе [CREATE TABLE AS \](../create.md#create-table-query). +- Запросе [CREATE TABLE AS \](../statements/create.md#create-table-query). Это один из методов создания таблицы. @@ -17,14 +17,14 @@ | Функция | Описание | |-----------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [file](file.md) | Создаёт таблицу с движком [File](../../operations/table_engines/file.md). | -| [merge](merge.md) | Создаёт таблицу с движком [Merge](../../operations/table_engines/merge.md). | +| [file](file.md) | Создаёт таблицу с движком [File](../../sql_reference/table_functions/index.md). | +| [merge](merge.md) | Создаёт таблицу с движком [Merge](../../sql_reference/table_functions/index.md). | | [numbers](numbers.md) | Создаёт таблицу с единственным столбцом, заполненным целыми числами. | -| [remote](remote.md) | Предоставляет доступ к удалённым серверам, не создавая таблицу с движком [Distributed](../../operations/table_engines/distributed.md). | -| [url](url.md) | Создаёт таблицу с движком [Url](../../operations/table_engines/url.md). | -| [mysql](mysql.md) | Создаёт таблицу с движком [MySQL](../../operations/table_engines/mysql.md). | -| [jdbc](jdbc.md) | Создаёт таблицу с дижком [JDBC](../../operations/table_engines/jdbc.md). | -| [odbc](odbc.md) | Создаёт таблицу с движком [ODBC](../../operations/table_engines/odbc.md). | -| [hdfs](hdfs.md) | Создаёт таблицу с движком [HDFS](../../operations/table_engines/hdfs.md). | +| [remote](remote.md) | Предоставляет доступ к удалённым серверам, не создавая таблицу с движком [Distributed](../../sql_reference/table_functions/index.md). | +| [url](url.md) | Создаёт таблицу с движком [Url](../../sql_reference/table_functions/index.md). | +| [mysql](mysql.md) | Создаёт таблицу с движком [MySQL](../../sql_reference/table_functions/index.md). | +| [jdbc](jdbc.md) | Создаёт таблицу с дижком [JDBC](../../sql_reference/table_functions/index.md). | +| [odbc](odbc.md) | Создаёт таблицу с движком [ODBC](../../sql_reference/table_functions/index.md). | +| [hdfs](hdfs.md) | Создаёт таблицу с движком [HDFS](../../sql_reference/table_functions/index.md). | [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/) diff --git a/docs/ru/query_language/table_functions/input.md b/docs/ru/sql_reference/table_functions/input.md similarity index 100% rename from docs/ru/query_language/table_functions/input.md rename to docs/ru/sql_reference/table_functions/input.md diff --git a/docs/ru/query_language/table_functions/jdbc.md b/docs/ru/sql_reference/table_functions/jdbc.md similarity index 100% rename from docs/ru/query_language/table_functions/jdbc.md rename to docs/ru/sql_reference/table_functions/jdbc.md diff --git a/docs/ru/query_language/table_functions/merge.md b/docs/ru/sql_reference/table_functions/merge.md similarity index 100% rename from docs/ru/query_language/table_functions/merge.md rename to docs/ru/sql_reference/table_functions/merge.md diff --git a/docs/ru/query_language/table_functions/mysql.md b/docs/ru/sql_reference/table_functions/mysql.md similarity index 93% rename from docs/ru/query_language/table_functions/mysql.md rename to docs/ru/sql_reference/table_functions/mysql.md index 228b0bbf38e..a26f89ced2f 100644 --- a/docs/ru/query_language/table_functions/mysql.md +++ b/docs/ru/sql_reference/table_functions/mysql.md @@ -73,7 +73,7 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123') ## Смотрите также {#smotrite-takzhe} -- [Движок таблиц ‘MySQL’](../../operations/table_engines/mysql.md) -- [Использование MySQL как источника данных для внешнего словаря](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-mysql) +- [Движок таблиц ‘MySQL’](../../sql_reference/table_functions/mysql.md) +- [Использование MySQL как источника данных для внешнего словаря](../../sql_reference/table_functions/mysql.md#dicts-external_dicts_dict_sources-mysql) [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/mysql/) diff --git a/docs/ru/query_language/table_functions/numbers.md b/docs/ru/sql_reference/table_functions/numbers.md similarity index 100% rename from docs/ru/query_language/table_functions/numbers.md rename to docs/ru/sql_reference/table_functions/numbers.md diff --git a/docs/ru/query_language/table_functions/odbc.md b/docs/ru/sql_reference/table_functions/odbc.md similarity index 95% rename from docs/ru/query_language/table_functions/odbc.md rename to docs/ru/sql_reference/table_functions/odbc.md index bff2c23cf47..0d277b2b26d 100644 --- a/docs/ru/query_language/table_functions/odbc.md +++ b/docs/ru/sql_reference/table_functions/odbc.md @@ -95,7 +95,7 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') ## Смотрите также {#smotrite-takzhe} -- [Внешние словари ODBC](../../query_language/dicts/external_dicts_dict_sources.md#dicts-external_dicts_dict_sources-odbc) -- [Движок таблиц ODBC](../../operations/table_engines/odbc.md). +- [Внешние словари ODBC](../../sql_reference/table_functions/odbc.md#dicts-external_dicts_dict_sources-odbc) +- [Движок таблиц ODBC](../../sql_reference/table_functions/odbc.md). [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/jdbc/) diff --git a/docs/ru/query_language/table_functions/remote.md b/docs/ru/sql_reference/table_functions/remote.md similarity index 100% rename from docs/ru/query_language/table_functions/remote.md rename to docs/ru/sql_reference/table_functions/remote.md diff --git a/docs/ru/query_language/table_functions/url.md b/docs/ru/sql_reference/table_functions/url.md similarity index 100% rename from docs/ru/query_language/table_functions/url.md rename to docs/ru/sql_reference/table_functions/url.md diff --git a/docs/ru/whats_new/changelog/2017.md b/docs/ru/whats_new/changelog/2017.md new file mode 100644 index 00000000000..1c820453901 --- /dev/null +++ b/docs/ru/whats_new/changelog/2017.md @@ -0,0 +1,266 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +### ClickHouse релиз 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} + +Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54318: + +- Исправлена ошибка с возможным состоянием гонки в репликации, которая могла привести к потере данных. Эта проблема затрагивает версии 1.1.54310 и 1.1.54318. Если вы используете одну из этих версий с Реплицированными таблицами, настоятельно рекомендуется обновить ее. Эта проблема отображается в журналах в предупреждающих сообщениях, таких как `Part ... from own log doesn't exist.` Эта проблема актуальна, даже если вы не видите эти сообщения в журналах. + +### ClickHouse релиз 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} + +Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54310: + +- Исправлено некорректное удаление строк при слияниях в движке SummingMergeTree +- Исправлена утечка памяти в несложных движках MergeTree +- Исправлено снижение производительности при частых вставках в двигатели MergeTree +- Исправлена ошибка, из-за которой очередь репликации останавливалась +- Исправлена ротация и архивация журналов сервера + +### ClickHouse релиз 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} + +#### Новые средства: {#new-features} + +- Пользовательский ключ секционирования для семейства движков таблиц MergeTree. +- [Кафка](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) настольный двигатель. +- Добавлена поддержка загрузки [CatBoost](https://catboost.yandex/) модели и их применение к данным, хранящимся в ClickHouse. +- Добавлена поддержка часовых поясов с нецелочисленными смещениями от UTC. +- Добавлена поддержка арифметических операций с временными интервалами. +- Диапазон значений для типов Date и DateTime расширен до 2105 года. +- Добавил тот `CREATE MATERIALIZED VIEW x TO y` запрос (указывает существующую таблицу для хранения данных материализованного представления). +- Добавил тот `ATTACH TABLE` запрос без аргументов. +- Логика обработки вложенных столбцов с именами, оканчивающимися на-Map в таблице SummingMergeTree, была извлечена в агрегатную функцию sumMap. Теперь вы можете указать такие столбцы явно. +- Максимальный размер словаря IP trie увеличен до 128 миллионов записей. +- Добавлена функция getSizeOfEnumType. +- Добавлена агрегатная функция sumWithOverflow. +- Добавлена поддержка формата ввода Cap'n Proto. +- Теперь вы можете настроить уровень сжатия при использовании алгоритма zstd. + +#### Назад несовместимые изменения: {#backward-incompatible-changes} + +- Создание временных таблиц с движком, отличным от памяти, не допускается. +- Явное создание таблиц с помощью движка View или MaterializedView не допускается. +- Во время создания таблицы новая проверка проверяет, что выражение ключа выборки включено в первичный ключ. + +#### Устранение ошибок: {#bug-fixes} + +- Исправлены зависания при синхронной вставке в распределенную таблицу. +- Исправлено неатомное добавление и удаление деталей в реплицируемых таблицах. +- Данные, вставленные в материализованное представление, не подвергаются ненужной дедупликации. +- Выполнение запроса к распределенной таблице, для которой локальная реплика отстает, а удаленные реплики недоступны, больше не приводит к ошибке. +- Пользователям не нужны разрешения на доступ к `default` база данных для создания временных таблиц больше не существует. +- Исправлен сбой при указании типа массива без аргументов. +- Исправлены зависания, когда дисковый том, содержащий журналы сервера, заполнен. +- Исправлено переполнение в функции toRelativeWeekNum для первой недели эпохи Unix. + +#### Улучшения сборки: {#build-improvements} + +- Несколько сторонних библиотек (особенно Poco) были обновлены и преобразованы в подмодули git. + +### ClickHouse релиз 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} + +#### Новые средства: {#new-features-1} + +- Поддержка TLS в собственном протоколе (чтобы включить, установите `tcp_ssl_port` в `config.xml` ). + +#### Устранение ошибок: {#bug-fixes-1} + +- `ALTER` для реплицированных таблиц теперь пытается начать работать как можно скорее. +- Исправлен сбой при чтении данных с настройкой `preferred_block_size_bytes=0.` +- Исправлены сбои в работе `clickhouse-client` при нажатии на кнопку `Page Down` +- Правильная интерпретация некоторых сложных запросов с помощью `GLOBAL IN` и `UNION ALL` +- `FREEZE PARTITION` теперь он всегда работает атомарно. +- Пустые почтовые запросы теперь возвращают ответ с кодом 411. +- Исправлены ошибки интерпретации таких выражений, как `CAST(1 AS Nullable(UInt8)).` +- Исправлена ошибка при чтении `Array(Nullable(String))` колонки от `MergeTree` таблицы. +- Исправлен сбой при разборе таких запросов, как `SELECT dummy AS dummy, dummy AS b` +- Пользователи обновляются правильно с недопустимым `users.xml` +- Правильная обработка, когда исполняемый словарь возвращает ненулевой код ответа. + +### ClickHouse релиз 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} + +#### Новые средства: {#new-features-2} + +- Добавил тот `pointInPolygon` функция для работы с координатами на координатной плоскости. +- Добавил тот `sumMap` агрегатная функция для вычисления суммы массивов, аналогичная `SummingMergeTree`. +- Добавил тот `trunc` функция. Улучшена производительность функций округления (`round`, `floor`, `ceil`, `roundToExp2`) и скорректировал логику их работы. Поменялась логика игры `roundToExp2` функция для дробей и отрицательных чисел. +- Исполняемый файл ClickHouse теперь меньше зависит от версии libc. Один и тот же исполняемый файл ClickHouse может работать на самых разных системах Linux. Существует еще зависимость при использовании скомпилированных запросов (с настройкой `compile = 1` , который не используется по умолчанию). +- Сократилось время, необходимое для динамической компиляции запросов. + +#### Устранение ошибок: {#bug-fixes-2} + +- Исправлена ошибка, которая иногда производилась `part ... intersects previous part` сообщения и ослабленная согласованность реплик. +- Исправлена ошибка, из-за которой сервер блокировался, если ZooKeeper был недоступен во время завершения работы. +- Удалено избыточное ведение журнала при восстановлении реплик. +- Исправлена ошибка в объединении всех реализаций. +- Исправлена ошибка в функции concat, возникшая, если первый столбец в блоке имеет тип массива. +- Прогресс теперь отображается в системе правильно.таблица слияний. + +### ClickHouse релиз 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} + +#### Новые средства: {#new-features-3} + +- `SYSTEM` запросы для администрирования сервера: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. +- Добавлены функции для работы с массивами: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. +- Добавлен `root` и `identity` параметры для конфигурации ZooKeeper. Это позволяет изолировать отдельных пользователей в одном кластере ZooKeeper. +- Добавлены статистические функции `groupBitAnd`, `groupBitOr`, и `groupBitXor` (для совместимости они также доступны под названиями `BIT_AND`, `BIT_OR`, и `BIT_XOR`). +- Внешние словари можно загрузить из MySQL, указав сокет в файловой системе. +- Внешние словари могут быть загружены из MySQL по протоколу SSL (`ssl_cert`, `ssl_key`, `ssl_ca` параметры). +- Добавил тот `max_network_bandwidth_for_user` настройка для ограничения общего использования полосы пропускания для запросов на одного пользователя. +- Поддержка `DROP TABLE` для временных таблиц. +- Поддержка чтения `DateTime` значения в формате временных меток Unix от `CSV` и `JSONEachRow` форматы. +- Запаздывающие реплики в распределенных запросах теперь исключаются по умолчанию (пороговое значение по умолчанию-5 минут). +- Блокировка FIFO используется во время ALTER: запрос ALTER не блокируется бесконечно для непрерывно выполняемых запросов. +- Возможность установки `umask` в конфигурационном файле. +- Улучшенная производительность для запросов с помощью `DISTINCT` . + +#### Устранение ошибок: {#bug-fixes-3} + +- Улучшен процесс удаления старых узлов в ZooKeeper. Раньше старые узлы иногда не удалялись, если были очень частые вставки, что приводило к медленному завершению работы сервера, среди прочего. +- Исправлена рандомизация при выборе хостов для подключения к ZooKeeper. +- Исправлено исключение запаздывающих реплик в распределенных запросах, если реплика является localhost. +- Исправлена ошибка, когда часть данных в a `ReplicatedMergeTree` стол может быть сломан после запуска `ALTER MODIFY` на элементе в `Nested` структура. +- Исправлена ошибка, которая могла привести к тому, что запросы SELECT «hang». +- Улучшения в распределенных DDL-запросах. +- Исправлен запрос `CREATE TABLE ... AS `. +- Разрешен тупик в работе `ALTER ... CLEAR COLUMN IN PARTITION` запрос для `Buffer` таблицы. +- Исправлено недопустимое значение по умолчанию для `Enum` s (0 вместо минимума) при использовании `JSONEachRow` и `TSKV` форматы. +- Разрешен внешний вид зомби-процессов при использовании словаря с помощью `executable` источник. +- Исправлена обработка выхода онлайн / оффлайн для запроса. + +#### Улучшен рабочий процесс разработки и сборки ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} + +- Вы можете использовать `pbuilder` чтобы построить ClickHouse. +- Вы можете использовать `libc++` вместо `libstdc++` для сборок на Linux. +- Добавлены инструкции по использованию инструментов статического анализа кода: `Coverage`, `clang-tidy`, `cppcheck`. + +#### Пожалуйста, обратите внимание при обновлении: {#please-note-when-upgrading} + +- Теперь существует более высокое значение по умолчанию для параметра MergeTree `max_bytes_to_merge_at_max_space_in_pool` (максимальный общий размер частей данных для слияния, в байтах): он увеличился со 100 гигабайт до 150 гигабайт. Это может привести к большим слияниям, выполняемым после обновления сервера, что может привести к увеличению нагрузки на дисковую подсистему. Если свободное пространство, доступное на сервере, меньше чем в два раза общего объема выполняемых слияний, это приведет к остановке всех других слияний, включая слияния небольших частей данных. В результате запросы INSERT завершатся ошибкой с сообщением «Merges are processing significantly slower than inserts.» Используйте `SELECT * FROM system.merges` запрос на мониторинг ситуации. Вы также можете проверить следующее: `DiskSpaceReservedForMerge` метрика в системе `system.metrics` таблица, или в графите. Вам не нужно ничего делать, чтобы исправить это, так как проблема будет решена сама собой, как только большие слияния закончатся. Если вы сочтете это неприемлемым, вы можете восстановить предыдущее значение для `max_bytes_to_merge_at_max_space_in_pool` установка. Чтобы сделать это, перейдите в раздел раздел в конфигурации.xml, набор ``` ``107374182400 ``` и перезагрузите сервер. + +### ClickHouse релиз 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} + +- Это исправленный выпуск для предыдущей версии 1.1.54282. Он исправляет утечки в каталоге запчастей в ZooKeeper. + +### ClickHouse релиз 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} + +Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54276: + +- Исправлено `DB::Exception: Assertion violation: !_path.empty()` при вставке в распределенную таблицу. +- Исправлен синтаксический анализ при вставке в формат RowBinary, если входные данные начинаются с';'. +- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). + +### Clickhouse Релиз 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} + +#### Новые средства: {#new-features-4} + +- Добавлен дополнительный раздел для запроса SELECT. Пример запроса: `WITH 1+1 AS a SELECT a, a*a` +- Вставка может быть выполнена синхронно в распределенной таблице: OK возвращается только после того, как все данные сохранены на всех осколках. Это активируется установкой insert\_distributed\_sync=1. +- Добавлен тип данных UUID для работы с 16-байтовыми идентификаторами. +- Добавлены псевдонимы CHAR, FLOAT и других типов для совместимости с таблицей. +- Добавлены функции toYYYYMM, toYYYYMMDD и toYYYYMMDDhhmmss для преобразования времени в числа. +- Вы можете использовать IP-адреса (вместе с именем хоста) для идентификации серверов для кластеризованных запросов DDL. +- Добавлена поддержка непостоянных аргументов и отрицательных смещений в функции `substring(str, pos, len).` +- Добавлен параметр max\_size для `groupArray(max_size)(column)` агрегатная функция и оптимизированная ее производительность. + +#### Основное изменение: {#main-changes} + +- Улучшения безопасности: все файлы сервера создаются с разрешениями 0640 (могут быть изменены с помощью параметр config). +- Улучшены сообщения об ошибках для запросов с неверным синтаксисом. +- Значительно сокращается потребление памяти и повышается производительность при слиянии больших разделов данных MergeTree. +- Значительно повысилась производительность слияний данных для заменяющего движка Mergetree. +- Улучшена производительность асинхронных вставок из распределенной таблицы за счет объединения нескольких исходных вставок. Чтобы включить эту функцию, используйте параметр distributed\_directory\_monitor\_batch\_inserts=1. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-1} + +- Изменен двоичный формат агрегатных состояний `groupArray(array_column)` функции для массивов. + +#### Полный список изменений: {#complete-list-of-changes} + +- Добавил тот `output_format_json_quote_denormals` настройка, которая позволяет выводить значения nan и inf в формате JSON. +- Оптимизировано распределение потока при чтении из распределенной таблицы. +- Настройки можно настроить в режиме только для чтения, если значение не изменяется. +- Добавлена возможность извлечения нецелочисленных гранул движка MergeTree для выполнения ограничений на размер блока, указанных в параметре preferred\_block\_size\_bytes. Цель состоит в том, чтобы уменьшить потребление оперативной памяти и увеличить локальность кэша при обработке запросов из таблиц с большими столбцами. +- Эффективное использование индексов, содержащих такие выражения, как `toStartOfHour(x)` для таких условий, как `toStartOfHour(x) op сonstexpr.` +- Добавлены новые настройки для движков MergeTree (раздел merge\_tree в config.XML): + - replicated\_deduplication\_window\_seconds задает количество секунд, разрешенных для дедуплицирующих вставок в реплицируемые таблицы. + - cleanup\_delay\_period устанавливает, как часто нужно запустить программу очистки, чтобы удалить устаревшие данные. + - replicated\_can\_become\_leader может препятствовать тому, чтобы реплика становилась лидером (и назначала слияния). +- Ускоренная очистка для удаления устаревших данных из ZooKeeper. +- Множество улучшений и исправлений для кластеризованных DDL-запросов. Особый интерес представляет новая настройка distributed\_ddl\_task\_timeout, которая ограничивает время ожидания ответа от серверов в кластере. Если запрос ddl не был выполнен на всех хостах, ответ будет содержать ошибку таймаута, и запрос будет выполнен в асинхронном режиме. +- Улучшено отображение трассировок стека в журналах сервера. +- Добавил тот «none» значение для метода сжатия. +- Вы можете использовать несколько разделов dictionaries\_config в config.XML. +- Можно подключиться к MySQL через сокет в файловой системе. +- Система.в таблице деталей появился новый столбец с информацией о размере меток, в байтах. + +#### Устранение ошибок: {#bug-fixes-4} + +- Распределенные таблицы, использующие таблицу слияния, теперь корректно работают для запроса SELECT с условием на `_table` поле. +- Исправлено редкое состояние гонки в ReplicatedMergeTree при проверке частей данных. +- Исправлена возможная заморозка на «leader election» при запуске сервера. +- Параметр max\_replica\_delay\_for\_distributed\_queries был проигнорирован при использовании локальной реплики источника данных. Это было исправлено. +- Исправлено некорректное поведение `ALTER TABLE CLEAR COLUMN IN PARTITION` при попытке очистить несуществующий столбец. +- Исправлено исключение в функции multif при использовании пустых массивов или строк. +- Исправлено чрезмерное выделение памяти при десериализации собственного формата. +- Исправлено некорректное автоматическое обновление словарей Trie. +- Исправлено исключение при выполнении запросов с предложением GROUP BY из таблицы слияния при использовании SAMPLE. +- Исправлена ошибка, из группы при использовании distributed\_aggregation\_memory\_efficient=1. +- Теперь вы можете указать базу данных.таблицы в правой стороне и присоединиться. +- Слишком много потоков было использовано для параллельной агрегации. Это было исправлено. +- Исправлено как то «if» функция работает с аргументами FixedString. +- Выберите из распределенной таблицы неправильно сработавшие осколки с весом 0. Это было исправлено. +- Бегущий `CREATE VIEW IF EXISTS no longer causes crashes.` +- Исправлено некорректное поведение при установке input\_format\_skip\_unknown\_fields=1 и наличии отрицательных чисел. +- Исправлен бесконечный цикл в `dictGetHierarchy()` функция, если в словаре есть какие-то недопустимые данные. +- Исправлено `Syntax error: unexpected (...)` ошибки при выполнении распределенных запросов с вложенными запросами в предложении IN или JOIN и таблицах слияния. +- Исправлена неправильная интерпретация запроса SELECT из таблиц справочника. +- Исправлена ошибка «Cannot mremap» ошибка при использовании массивов в предложениях IN и JOIN с более чем 2 миллиардами элементов. +- Исправлена ошибка отработки отказа для словарей с MySQL в качестве источника. + +#### Улучшен рабочий процесс разработки и сборки ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} + +- Сборки могут быть собраны в Аркадии. +- Вы можете использовать gcc 7 для компиляции ClickHouse. +- Параллельные сборки с использованием ccache+distcc теперь работают быстрее. + +### ClickHouse релиз 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} + +#### Новые средства: {#new-features-5} + +- Распределенный DDL (например, `CREATE TABLE ON CLUSTER`) +- Реплицированный запрос `ALTER TABLE CLEAR COLUMN IN PARTITION.` +- Движок для таблиц словаря (доступ к данным словаря в виде таблицы). +- Компонент Dictionary database engine (этот тип базы данных автоматически содержит таблицы словарей, доступные для всех подключенных внешних словарей). +- Вы можете проверить наличие обновлений в словаре, отправив запрос источнику. +- Полные имена столбцов +- Цитирование идентификаторов с использованием двойных кавычек. +- Сеансы в интерфейсе HTTP. +- Запрос оптимизации для реплицированной таблицы может выполняться не только на лидере. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-2} + +- Удалить набор глобальных. + +#### Несущественные изменения: {#minor-changes} + +- Теперь после срабатывания предупреждения журнал печатает полную трассировку стека. +- Ослаблена проверка количества поврежденных / лишних частей данных при запуске (было слишком много ложных срабатываний). + +#### Устранение ошибок: {#bug-fixes-5} + +- Исправлена плохая связь «sticking» при вставке в распределенную таблицу. +- GLOBAL IN теперь работает для запроса из таблицы слияния, которая смотрит на распределенную таблицу. +- Неверное количество ядер было обнаружено на виртуальной машине Google Compute Engine. Это было исправлено. +- Изменения в том, как работает исполняемый источник кэшированных внешних словарей. +- Исправлено сравнение строк, содержащих нулевые символы. +- Исправлено сравнение полей первичного ключа Float32 с константами. +- Ранее неверная оценка размера поля могла привести к чрезмерно большим распределениям. +- Исправлена ошибка, при отправке запроса столбец допускает значения NULL в таблицу с помощью инструкции Alter. +- Исправлена ошибка при сортировке по нулевому столбцу, если количество строк меньше предельного. +- Исправлен порядок по подзапросу, состоящему только из постоянных значений. +- Ранее реплицированная таблица могла оставаться в недопустимом состоянии после неудачного удаления таблицы. +- Псевдонимы для скалярных подзапросов с пустыми результатами больше не теряются. +- Теперь запрос, который использовал компиляцию, не завершается ошибкой, если файл .so поврежден. diff --git a/docs/ru/whats_new/changelog/2018.md b/docs/ru/whats_new/changelog/2018.md new file mode 100644 index 00000000000..5de3ba68437 --- /dev/null +++ b/docs/ru/whats_new/changelog/2018.md @@ -0,0 +1,1061 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +## ClickHouse релиз 18.16 {#clickhouse-release-18-16} + +### ClickHouse релиз 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} + +#### Устранение ошибок: {#bug-fixes} + +- Исправлена ошибка, которая приводила к проблемам с обновлением словарей с источником ODBC. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- JIT-компиляция агрегатных функций теперь работает с колонками LowCardinality. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) + +#### Улучшения: {#improvements} + +- Добавил тот `low_cardinality_allow_in_native_format` настройка (включена по умолчанию). Если этот параметр отключен, столбцы с низким коэффициентом полезности будут преобразованы в обычные столбцы для запросов SELECT, а обычные столбцы будут ожидаться для запросов INSERT. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) + +#### Улучшения сборки: {#build-improvements} + +- Исправления для сборок на macOS и ARM. + +### ClickHouse релиз 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} + +#### Новые средства: {#new-features} + +- `DEFAULT` выражения вычисляются для пропущенных полей при загрузке данных в полуструктурированные входные форматы (`JSONEachRow`, `TSKV`). Эта функция включена с помощью `insert_sample_with_metadata` установка. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) +- То `ALTER TABLE` запрос теперь имеет следующее значение `MODIFY ORDER BY` действие для изменения ключа сортировки при добавлении или удалении столбца таблицы. Это полезно для таблиц в `MergeTree` семейство, выполняющее дополнительные задачи при слиянии на основе этого ключа сортировки, например `SummingMergeTree`, `AggregatingMergeTree` и так далее. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) +- Для столиков в центре города `MergeTree` семья, теперь вы можете указать другой ключ сортировки (`ORDER BY`) и индекс (`PRIMARY KEY`). Ключ сортировки может быть длиннее индекса. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) +- Добавил тот `hdfs` функция таблицы и `HDFS` механизм таблиц для импорта и экспорта данных в HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) +- Добавлены функции для работы с base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3350) +- Теперь вы можете использовать параметр для настройки точности `uniqCombined` агрегатная функция (выбор количества ячеек Гиперлога). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) +- Добавил тот `system.contributors` таблица, содержащая имена всех, кто совершил коммиты в ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) +- Добавлена возможность опустить Раздел для `ALTER TABLE ... FREEZE` запрос для резервного копирования всех разделов сразу. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) +- Добавлен `dictGet` и `dictGetOrDefault` функции, которые не требуют указания типа возвращаемого значения. Тип определяется автоматически из описания словаря. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3564) +- Теперь вы можете указать комментарии для столбца в описании таблицы и изменить его с помощью `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) +- Чтение поддерживается для `Join` введите таблицы с простыми ключами. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Теперь вы можете указать следующие параметры `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, и `join_overflow_mode` при создании `Join` типизированная таблица. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Добавил тот `joinGet` функция, которая позволяет вам использовать a `Join` введите таблицу, как словарь. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) +- Добавил тот `partition_key`, `sorting_key`, `primary_key`, и `sampling_key` колонны в сторону `system.tables` таблица для того, чтобы предоставить информацию о ключах таблицы. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Добавил тот `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, и `is_in_sampling_key` колонны в сторону `system.columns` стол. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) +- Добавил тот `min_time` и `max_time` колонны в сторону `system.parts` стол. Эти столбцы заполняются, когда ключ секционирования является выражением, состоящим из `DateTime` столбцы. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) + +#### Устранение ошибок: {#bug-fixes-1} + +- Исправления и улучшения производительности для `LowCardinality` тип данных. `GROUP BY` с помощью `LowCardinality(Nullable(...))`. Получение значений `extremes`. Обработка функций высокого порядка. `LEFT ARRAY JOIN`. Распределенный `GROUP BY`. Функции, которые возвращают `Array`. Исполнение приказа `ORDER BY`. Написание в адрес `Distributed` таблицы (nicelulu). Обратная совместимость для `INSERT` запросы от старых клиентов, которые реализуют `Native` протокол. Поддержка `LowCardinality` для `JOIN`. Улучшена производительность при работе в одном потоке. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) +- Исправлено как то `select_sequential_consistency` вариант работает. Ранее, когда этот параметр был включен, неполный результат иногда возвращался после начала записи в новый раздел. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) +- Базы данных правильно задаются при выполнении DDL `ON CLUSTER` запросы и `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Базы данных правильно задаются для вложенных запросов внутри представления. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Исправлена ошибка в работе `PREWHERE` с `FINAL` для `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) +- Теперь вы можете использовать `KILL QUERY` чтобы отменить запросы, которые еще не начались, потому что они ждут блокировки таблицы. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) +- Исправлены расчеты даты и времени, если часы были перенесены назад в полночь (это происходит в Иране, а произошло в Москве с 1981 по 1983 год). Ранее это приводило к тому, что время сбрасывалось на день раньше необходимого, а также вызывало неправильное форматирование даты и времени в текстовом формате. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) +- Исправлены ошибки в некоторых случаях `VIEW` и подзапросы, которые опускают базу данных. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3521) +- Исправлено состояние гонки при одновременном чтении из `MATERIALIZED VIEW` и удаление `MATERIALIZED VIEW` из-за того, что внутренняя дверь не запирается `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) +- Исправлена ошибка `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) +- Исправлена обработка запросов, когда `compile_expressions` опция включена (она включена по умолчанию). Недетерминированные постоянные выражения, такие как `now` функции больше не разворачиваются. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) +- Исправлена ошибка при указании непостоянного аргумента масштаба в `toDecimal32/64/128` функции. +- Исправлена ошибка при попытке вставить массив с помощью `NULL` элементы в системе `Values` форматирование в столбец типа `Array` без `Nullable` (если `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) +- Исправлена непрерывная ошибка входа в систему `DDLWorker` если смотритель зоопарка не доступен. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) +- Исправлен тип возврата для `quantile*` функции от `Date` и `DateTime` тип аргумента. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) +- Исправлена ошибка `WITH` предложение, если оно указывает простой псевдоним без выражений. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) +- Исправлена обработка запросов с именованными подзапросами и квалифицированными именами столбцов, когда `enable_optimize_predicate_expression` это включено. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3588) +- Исправлена ошибка `Attempt to attach to nullptr thread group` при работе с материализованными представлениями. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) +- Исправлена ошибка при передаче некоторых неверных аргументов в систему `arrayReverse` функция. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- Исправлено переполнение буфера в системе `extractURLParameter` функция. Повышение производительности. Добавлена корректная обработка строк, содержащих ноль байт. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) +- Исправлено переполнение буфера в системе `lowerUTF8` и `upperUTF8` функции. Удалена возможность выполнения этих функций сверх `FixedString` аргумент типа. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) +- Исправлено редкое состояние гонки при удалении `MergeTree` таблицы. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) +- Исправлено состояние гонки при чтении с `Buffer` таблицы и одновременно выполнять `ALTER` или `DROP` на целевых столах. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) +- Исправлен сегфолт, если `max_temporary_non_const_columns` лимит был превышен. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### Улучшения: {#improvements-1} + +- Сервер не записывает обработанные конфигурационные файлы в систему. `/etc/clickhouse-server/` каталог. Вместо этого он спасает их в будущем. `preprocessed_configs` каталог внутри `path`. Это означает, что `/etc/clickhouse-server/` Директория не имеет доступа на запись для `clickhouse` пользователь, что повышает безопасность. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) +- То `min_merge_bytes_to_use_direct_io` по умолчанию параметр установлен на 10 гигабайт. Слияние, которое образует большие части таблиц из семейства MergeTree, будет выполнено в `O_DIRECT` режим, который предотвращает чрезмерное вытеснение кэша страниц. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) +- Ускоренный запуск сервера при наличии очень большого количества таблиц. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) +- Добавлен пул соединений и HTTP `Keep-Alive` для связи между репликами. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) +- Если синтаксис запроса неверен, то `400 Bad Request` код возвращается в виде `HTTP` интерфейс (ранее было возвращено 500). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) +- То `join_default_strictness` параметр установлен в значение `ALL` по умолчанию для обеспечения совместимости. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) +- Удалено ведение журнала в `stderr` из `re2` библиотека для недопустимых или сложных регулярных выражений. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) +- Добавлено в `Kafka` механизм таблиц: проверяет наличие подписок перед началом чтения из Kafka; параметр kafka\_max\_block\_size для таблицы. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) +- То `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, и `murmurHash3_64` функции теперь работают для любого количества аргументов и для аргументов в виде кортежей. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) +- То `arrayReverse` функция теперь работает с любыми типами массивов. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) +- Добавлен необязательный параметр: размер слота для `timeSlots` функция. [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/3724) +- Для `FULL` и `RIGHT JOIN`, этот `max_block_size` настройка используется для потока несвязанных данных из правой таблицы. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3699) +- Добавил тот `--secure` параметр командной строки в `clickhouse-benchmark` и `clickhouse-performance-test` чтобы включить TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) +- Тип преобразования, когда структура a `Buffer` таблица типов не соответствует структуре целевой таблицы. [Виталий Баранов](https://github.com/ClickHouse/ClickHouse/pull/3603) +- Добавил тот `tcp_keep_alive_timeout` опция для включения пакетов keep-alive после бездействия в течение заданного интервала времени. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) +- Удалены ненужные кавычки значений для ключа раздела В разделе `system.parts` таблица, если она состоит из одного столбца. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) +- Функция по модулю работает для `Date` и `DateTime` тип данных. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) +- Добавлены синонимы для этого `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, и `MID` функции. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Некоторые имена функций не зависят от регистра для обеспечения совместимости со стандартом SQL. Добавлен синтаксический сахар `SUBSTRING(expr FROM start FOR length)` для совместимости с SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) +- Добавлена возможность `mlock` страницы памяти, соответствующие `clickhouse-server` исполняемый код, чтобы предотвратить его вытеснение из памяти. По умолчанию эта функция отключена. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) +- Улучшенная производительность при чтении с `O_DIRECT` (с помощью `min_bytes_to_use_direct_io` опция включена). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) +- Улучшенная производительность системы `dictGet...OrDefault` функция для постоянного ключевого аргумента и непостоянного аргумента по умолчанию. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3563) +- То `firstSignificantSubdomain` функция теперь обрабатывает Домены `gov`, `mil`, и `edu`. [Игорь Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Повышение производительности. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) +- Возможность указать пользовательские переменные среды для запуска `clickhouse-server` с помощью `SYS-V init.d` сценарий по определению `CLICKHOUSE_PROGRAM_ENV` в `/etc/default/clickhouse`. + [Павел Башинский](https://github.com/ClickHouse/ClickHouse/pull/3612) +- Правильный код возврата для сценария clickhouse-server init. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) +- То `system.metrics` таблица теперь имеет `VersionInteger` метрика и `system.build_options` есть ли добавленная строка `VERSION_INTEGER`, который содержит числовую форму версии ClickHouse, например `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) +- Удалена возможность сравнения `Date` введите с номером, чтобы избежать потенциальных ошибок, таких как `date = 2018-12-17`, где кавычки вокруг даты опущены по ошибке. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) +- Исправлено поведение статусных функций, таких как `rowNumberInAllBlocks`. Ранее они выводили результат, который был на одно число больше из-за запуска во время анализа запроса. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3729) +- Если `force_restore_data` файл не может быть удален, отображается сообщение об ошибке. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3794) + +#### Улучшения сборки: {#build-improvements-1} + +- Обновлено приложение `jemalloc` библиотека, которая исправляет потенциальную утечку памяти. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3557) +- Профилирование с помощью `jemalloc` включен по умолчанию для отладки сборок. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) +- Добавлена возможность запуска интеграционных тестов только тогда, когда `Docker` устанавливается в системе. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) +- Добавлен тест выражения fuzz в запросах SELECT. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) +- Добавлен стресс-тест для коммитов, который выполняет функциональные тесты параллельно и в случайном порядке, чтобы обнаружить больше условий гонки. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) +- Улучшен метод запуска clickhouse-сервера в образе Docker. [Эльгазал Ахмед](https://github.com/ClickHouse/ClickHouse/pull/3663) +- Для Docker образ, добавлена поддержка для инициализации базы данных с помощью файлов в `/docker-entrypoint-initdb.d` каталог. [Константин Лебедев](https://github.com/ClickHouse/ClickHouse/pull/3695) +- Исправления опирается на руку. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) + +#### Назад несовместимые изменения: {#backward-incompatible-changes} + +- Удалена возможность сравнения `Date` тип с номером. Вместо `toDate('2018-12-18') = 17883`, вы должны использовать явное преобразование типов `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) + +## ClickHouse релиз 18.14 {#clickhouse-release-18-14} + +### ClickHouse релиз 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} + +#### Устранение ошибок: {#bug-fixes-2} + +- Исправлена ошибка, которая привела к проблемам с обновлением словарей с источником ODBC. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) +- Базы данных правильно задаются при выполнении DDL `ON CLUSTER` запросы. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) +- Исправлен сегфолт, если `max_temporary_non_const_columns` лимит был превышен. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) + +#### Улучшения сборки: {#build-improvements-2} + +- Исправления опирается на руку. + +### ClickHouse релиз 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} + +#### Устранение ошибок: {#bug-fixes-3} + +- Исправлена ошибка в работе `dictGet...` функция для словарей типа `range`, если один из аргументов является постоянным, а другой-нет. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) +- Исправлена ошибка, приводившая к появлению сообщений `netlink: '...': attribute type 1 has an invalid length` чтобы быть напечатанным в журнале ядра Linux, это происходило только на достаточно свежих версиях ядра Linux. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) +- Исправлена обработка выхода онлайн / оффлайн в функции `empty` для аргументации из `FixedString` тип. [Дэниел, Дао Куанг Мин](https://github.com/ClickHouse/ClickHouse/pull/3703) +- Исправлено чрезмерное выделение памяти при использовании большого значения `max_query_size` настройка (фрагмент памяти из `max_query_size` байты были предварительно распределены сразу). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) + +#### Изменения в сборке: {#build-changes} + +- Исправлена сборка с библиотеками LLVM/Clang версии 7 из пакетов ОС (эти библиотеки используются для компиляции запросов во время выполнения). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse релиз 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} + +#### Устранение ошибок: {#bug-fixes-4} + +- Исправлены случаи, когда процесс моста ODBC не завершался с основным серверным процессом. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) +- Исправлено одновременное включение в `Distributed` таблица со списком столбцов, который отличается от списка столбцов удаленной таблицы. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) +- Исправлено редкое состояние гонки, которое может привести к аварии при падении таблицы MergeTree. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Исправлена взаимоблокировка запросов в случае сбоя при создании потока запросов с помощью `Resource temporarily unavailable` ошибка. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) +- Исправлен разбор текста `ENGINE` п. Когда `CREATE AS table` был использован синтаксис, а также `ENGINE` оговорка была указана еще до того, как `AS table` (ошибка привела к игнорированию указанного движка). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) + +### ClickHouse релиз 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} + +#### Устранение ошибок: {#bug-fixes-5} + +- Размер блока памяти был завышен при десериализации столбца типа `Array(String)` это приводит к тому, что «Memory limit exceeded» ошибки. Проблема появилась в версии 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) + +### ClickHouse релиз 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} + +#### Устранение ошибок: {#bug-fixes-6} + +- Исправлено `ON CLUSTER` запросы, когда кластер настроен как безопасный (флаг ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) + +#### Изменения в сборке: {#build-changes-1} + +- Исправлены неполадки (llvm-7 от system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) + +### ClickHouse релиз 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} + +#### Устранение ошибок: {#bug-fixes-7} + +- Исправлена ошибка `Block structure mismatch in MergingSorted stream` ошибка. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) +- Исправлено `ON CLUSTER` запросы в случае, если в конфигурации кластера были включены защищенные соединения (the `` флаг). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) +- Исправлена ошибка в запросах, которые использовались `SAMPLE`, `PREWHERE` и столбцы псевдонимов. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) +- Исправлена редкая ошибка `unknown compression method` ошибка, когда `min_bytes_to_use_direct_io` настройка была включена. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) + +#### Улучшения в производительности: {#performance-improvements} + +- Исправлена регрессия производительности запросов с помощью `GROUP BY` столбцов типа UInt16 или Date при выполнении на процессорах AMD EPYC. [Игорь Лапко](https://github.com/ClickHouse/ClickHouse/pull/3512) +- Исправлена регрессия производительности запросов, обрабатывающих длинные строки. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) + +#### Улучшения сборки: {#build-improvements-3} + +- Улучшения для упрощения сборки Arcadia. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) + +### ClickHouse релиз 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} + +#### Устранение ошибок: {#bug-fixes-8} + +- Исправлена ошибка при соединении двух безымянных подзапросов. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) +- Исправлена генерация некорректных запросов (с пустым именем `WHERE` пункт 2) при запросе внешних баз данных. [хотид](https://github.com/ClickHouse/ClickHouse/pull/3477) +- Исправлено использование неверного значения таймаута в словарях ODBC. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) + +### ClickHouse релиз 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} + +#### Устранение ошибок: {#bug-fixes-9} + +- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` в предельных запросах. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) +- Исправлены ошибки при объединении данных в таблицах, содержащих массивы внутри вложенных структур. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) +- Исправлены неправильные результаты запроса, если `merge_tree_uniform_read_distribution` настройка отключена (по умолчанию она включена). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) +- Исправлена ошибка при вставках в распределенную таблицу в собственном формате. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) + +### ClickHouse релиз 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} + +- То `compile_expressions` настройка (JIT-компиляция выражений) по умолчанию отключена. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) +- То `enable_optimize_predicate_expression` по умолчанию этот параметр отключен. + +### ClickHouse релиз 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} + +#### Новые средства: {#new-features-1} + +- То `WITH CUBE` модификатор для `GROUP BY` (альтернативный синтаксис `GROUP BY CUBE(...)` также доступный). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) +- Добавил тот `formatDateTime` функция. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/2770) +- Добавил тот `JDBC` двигатель таблицы и `jdbc` табличная функция (требуется установка clickhouse-jdbc-bridge). [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3210) +- Добавлены функции для работы с номером недели ISO: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, и `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) +- Теперь вы можете использовать `Nullable` колонки для `MySQL` и `ODBC` таблицы. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- Вложенные структуры данных могут быть прочитаны как вложенные объекты в `JSONEachRow` формат. Добавил тот `input_format_import_nested_json` установка. [Веломан Юнкан](https://github.com/ClickHouse/ClickHouse/pull/3144) +- Параллельная обработка доступна для многих `MATERIALIZED VIEW`s при вставке данных. Смотрите сами `parallel_view_processing` установка. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) +- Добавил тот `SYSTEM FLUSH LOGS` запрос (принудительный сброс журнала в системные таблицы, такие как `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) +- Теперь вы можете использовать заранее определенные `database` и `table` макросы при объявлении `Replicated` таблицы. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) +- Добавлена возможность чтения `Decimal` введите значения в инженерной нотации (с указанием степеней десять). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) + +#### Экспериментальная возможность: {#experimental-features} + +- Оптимизация группы по предложению для `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) +- Оптимизированный расчет выражений для `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) + +#### Улучшения: {#improvements-2} + +- Значительно уменьшено потребление памяти для запросов с помощью `ORDER BY` и `LIMIT`. Смотрите сами `max_bytes_before_remerge_sort` установка. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- В случае отсутствия `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` предполагается. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) +- Квалифицированные звездочки корректно работают в запросах с `JOIN`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3202) +- То `ODBC` механизм таблиц правильно выбирает метод для цитирования идентификаторов на диалекте SQL удаленной базы данных. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3210) +- То `compile_expressions` настройка (JIT-компиляция выражений) включена по умолчанию. +- Исправлено поведение для одновременного удаления базы данных / таблицы, если она существует, и создания базы данных/таблицы, если она не существует. Ранее, а `CREATE DATABASE ... IF NOT EXISTS` запрос может вернуть сообщение об ошибке «File … already exists», и то `CREATE TABLE ... IF NOT EXISTS` и `DROP TABLE IF EXISTS` запросы могут вернуться `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) +- Как и в выражениях с постоянной правой половиной, они передаются на удаленный сервер при запросе из таблиц MySQL или ODBC. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- Сравнения с постоянными выражениями в предложении WHERE передаются удаленному серверу при запросе из таблиц MySQL и ODBC. Раньше проходили только сравнения с константами. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) +- Правильный расчет ширины строки в терминале для `Pretty` форматы, в том числе строки с иероглифами. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3257). +- `ON CLUSTER` может быть указан для `ALTER UPDATE` запросы. +- Улучшенная производительность для считывания данных в `JSONEachRow` формат. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) +- Добавлены синонимы для этого `LENGTH` и `CHARACTER_LENGTH` функции для обеспечения совместимости. То `CONCAT` функция больше не зависит от регистра. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) +- Добавил тот `TIMESTAMP` синоним для этого `DateTime` тип. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) +- В журналах сервера всегда есть место, зарезервированное для query\_id, даже если строка журнала не связана с запросом. Это упрощает синтаксический анализ текстовых журналов сервера с помощью сторонних инструментов. +- Потребление памяти запросом регистрируется, когда оно превышает следующий уровень целого числа гигабайт. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) +- Добавлен режим совместимости для случая, когда клиентская библиотека, использующая собственный протокол, по ошибке отправляет меньше столбцов, чем сервер ожидает для запроса INSERT. Этот сценарий был возможен при использовании библиотеки clickhouse-cpp. Ранее этот сценарий приводил к сбою сервера. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) +- В пользовательском выражении WHERE in `clickhouse-copier`, теперь вы можете использовать a `partition_key` псевдоним (для дополнительной фильтрации по исходному разделу таблицы). Это полезно, если схема секционирования изменяется во время копирования,но только незначительно. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) +- Рабочий процесс компании `Kafka` движок был перемещен в фоновый пул потоков, чтобы автоматически снизить скорость считывания данных при высоких нагрузках. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- Поддержка чтения `Tuple` и `Nested` значения таких структур, как `struct` в `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) +- Список доменов верхнего уровня для `firstSignificantSubdomain` функция теперь включает в себя домен `biz`. [деказеал](https://github.com/ClickHouse/ClickHouse/pull/3219) +- В конфигурации внешних справочников, `null_value` интерпретируется как значение типа данных по умолчанию. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) +- Поддержка для the `intDiv` и `intDivOrZero` функции для `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) +- Поддержка для the `Date`, `DateTime`, `UUID`, и `Decimal` типы в качестве ключа для `sumMap` статистическая функция. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) +- Поддержка для the `Decimal` тип данных во внешних справочниках. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) +- Поддержка для the `Decimal` введите данные в поле `SummingMergeTree` таблицы. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) +- Добавлены специализации для `UUID` в `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) +- Уменьшилось количество `open` и `close` системные вызовы для чтения `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) +- A `TRUNCATE TABLE` запрос может быть выполнен на любой реплике (запрос передается в реплику лидера). [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/3375) + +#### Устранение ошибок: {#bug-fixes-10} + +- Исправлена проблема с `Dictionary` таблицы для `range_hashed` словари. Эта ошибка произошла в версии 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) +- Исправлена ошибка при загрузке `range_hashed` словари (сообщение `Unsupported type Nullable (...)`). Эта ошибка произошла в версии 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) +- Исправлены ошибки в работе `pointInPolygon` функция обусловлена накоплением неточных вычислений для полигонов с большим количеством вершин, расположенных близко друг к другу. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) +- Если после слияния частей данных контрольная сумма для результирующей части отличается от результата того же слияния в другой реплике, то результат слияния удаляется и часть данных загружается из другой реплики (это правильное поведение). Но после загрузки части данных она не могла быть добавлена в рабочий набор из-за ошибки, что часть уже существует (потому что часть данных была удалена с некоторой задержкой после слияния). Это привело к циклическим попыткам загрузить одни и те же данные. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) +- Исправлено неправильное вычисление общего потребления памяти запросами (из-за неправильного вычисления `max_memory_usage_for_all_queries` установка сработала неправильно и то `MemoryTracking` метрика имела неверное значение). Эта ошибка произошла в версии 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) +- Исправлена функциональность программы `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` Эта ошибка произошла в версии 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) +- Исправлена ненужная подготовка структур данных для `JOIN`s на сервере, который инициирует запрос, если `JOIN` выполняется только на удаленных серверах. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) +- Исправлены ошибки в работе `Kafka` движок: взаимоблокировки после исключений при запуске чтения данных и блокировки по завершении работы [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). +- Для `Kafka` таблицы, опциональные `schema` параметр не был передан (схема `Cap'n'Proto` формат). [Войтех Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) +- Если в ансамбле серверов ZooKeeper есть серверы, которые принимают соединение, но затем немедленно закрывают его вместо ответа на рукопожатие, ClickHouse выбирает для подключения другой сервер. Ранее это приводило к ошибке `Cannot read all data. Bytes read: 0. Bytes expected: 4.` и сервер не мог запуститься. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) +- Если ансамбль серверов ZooKeeper содержит серверы, для которых DNS-запрос возвращает ошибку, эти серверы игнорируются. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) +- Фиксированное преобразование типа между `Date` и `DateTime` при вставке данных в `VALUES` формат (если `input_format_values_interpret_expressions = 1`). Ранее преобразование производилось между числовым значением числа дней в Unix Epoch time и unix timestamp, что приводило к неожиданным результатам. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) +- Исправлено преобразование типов между `Decimal` и целые числа. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) +- Исправлены ошибки в работе `enable_optimize_predicate_expression` установка. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3231) +- Исправлена ошибка синтаксического анализа в формате CSV с числами с плавающей запятой, если используется нестандартный разделитель CSV, например `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) +- Исправлена ошибка `arrayCumSumNonNegative` функция (она не накапливает отрицательных значений, если накопитель меньше нуля). [Алексей Студнев](https://github.com/ClickHouse/ClickHouse/pull/3163) +- Исправлено как `Merge` таблицы работают на верхней части `Distributed` таблицы при использовании `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) +- Исправлены ошибки в системе `ALTER UPDATE` запрос. +- Исправлены ошибки в работе `odbc` табличная функция, появившаяся в версии 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) +- Исправлена работа агрегатных функций с помощью `StateArray` комбинаторы. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) +- Исправлена ошибка при делении `Decimal` значение по нулю. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) +- Фиксированный вывод типов для использования операций `Decimal` и целочисленные аргументы. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) +- Исправлена обработка выхода онлайн / оффлайн в `GROUP BY` на `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) +- То `log_query_threads` настройка (протоколирование информации о каждом потоке выполнения запроса) теперь вступает в силу только в том случае, если `log_queries` параметр (протоколирование информации о запросах) имеет значение 1. Поскольку `log_query_threads` опция включена по умолчанию, информация о потоках ранее регистрировалась, даже если ведение журнала запросов было отключено. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) +- Исправлена ошибка в распределенной работе агрегатной функции квантилей (сообщение об ошибке `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) +- Исправлена проблема совместимости при работе с кластером серверов версии 18.12.17 и более старых серверов одновременно. Для распределенных запросов с ключами GROUP BY как фиксированной, так и не фиксированной длины при наличии большого объема данных для агрегирования возвращаемые данные не всегда были полностью агрегированы (две разные строки содержали одни и те же ключи агрегирования). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) +- Исправлена обработка подстановок в `clickhouse-performance-test`, если запрос содержит только часть подстановок, объявленных в тесте. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) +- Исправлена ошибка при использовании `FINAL` с `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Исправлена ошибка при использовании `PREWHERE` над столбцами, которые были добавлены во время `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) +- Добавлена проверка на отсутствие `arrayJoin` для `DEFAULT` и `MATERIALIZED` выражения. Ранее, `arrayJoin` это привело к ошибке при вставке данных. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) +- Добавлена проверка на отсутствие `arrayJoin` в `PREWHERE` пункт. Ранее это приводило к таким сообщениям, как `Size ... doesn't match` или `Unknown compression method` при выполнении запросов. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) +- Исправлена ошибка segfault, которая могла возникнуть в редких случаях после оптимизации, которая заменила и цепочки из оценок равенства с соответствующим выражением IN. [люимин-бытданс](https://github.com/ClickHouse/ClickHouse/pull/3339) +- Незначительные исправления к `clickhouse-benchmark`: раньше информация о клиенте не отправлялась на сервер, теперь количество выполненных запросов вычисляется более точно при выключении и ограничении количества итераций. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) + +#### Назад несовместимые изменения: {#backward-incompatible-changes-1} + +- Удалил то `allow_experimental_decimal_type` вариант. То `Decimal` тип данных доступен для использования по умолчанию. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) + +## ClickHouse релиз 18.12 {#clickhouse-release-18-12} + +### ClickHouse релиз 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} + +#### Новые средства: {#new-features-2} + +- `invalidate_query` (возможность задать запрос для проверки необходимости обновления внешнего словаря) реализована для `clickhouse` источник. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) +- Добавлена возможность использования `UInt*`, `Int*`, и `DateTime` типы данных (вместе с `Date` типа) как `range_hashed` внешний ключ словаря, определяющий границы диапазонов. Сейчас `NULL` может использоваться для обозначения открытого диапазона. [Василий Немков](https://github.com/ClickHouse/ClickHouse/pull/3123) +- То `Decimal` тип теперь поддерживает `var*` и `stddev*` статистическая функция. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- То `Decimal` тип теперь поддерживает математические функции (`exp`, `sin` и так далее.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) +- То `system.part_log` таблица теперь имеет `partition_id` колонка. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Устранение ошибок: {#bug-fixes-11} + +- `Merge` теперь работает правильно на `Distributed` таблицы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3159) +- Исправлена несовместимость (ненужная зависимость от `glibc` версия), что сделало невозможным запуск ClickHouse на `Ubuntu Precise` и более старые версии. Несовместимость возникла в версии 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) +- Исправлены ошибки в работе `enable_optimize_predicate_expression` установка. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3107) +- Исправлена незначительная проблема с обратной совместимостью, возникшая при работе с кластером реплик на версиях ранее 18.12.13 и одновременном создании новой реплики таблицы на сервере с более новой версией (показано в сообщении `Can not clone replica, because the ... updated to new ClickHouse version`, что вполне логично, но не должно произойти). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) + +#### Назад несовместимые изменения: {#backward-incompatible-changes-2} + +- То `enable_optimize_predicate_expression` опция включена по умолчанию (что довольно оптимистично). Если возникают ошибки анализа запросов, связанные с поиском имен столбцов, установите `enable_optimize_predicate_expression` до 0. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3107) + +### ClickHouse релиз 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} + +#### Новые средства: {#new-features-3} + +- Добавлена поддержка для `ALTER UPDATE` запросы. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) +- Добавил тот `allow_ddl` опция, которая ограничивает доступ пользователя к DDL-запросам. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) +- Добавил тот `min_merge_bytes_to_use_direct_io` вариант для `MergeTree` движки, которые позволяют установить пороговое значение для общего размера слияния (при превышении порогового значения файлы частей данных будут обрабатываться с помощью O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) +- То `system.merges` системная таблица теперь содержит `partition_id` колонка. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) + +#### Улучшения {#improvements-3} + +- Если часть данных остается неизменной во время мутации, она не загружается репликами. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) +- Автозаполнение доступно для имен настроек при работе с ними `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) + +#### Устранение ошибок: {#bug-fixes-12} + +- Добавлена проверка размеров массивов, являющихся элементами `Nested` введите поля при вставке. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) +- Исправлена ошибка обновления внешних словарей с помощью `ODBC` источник и `hashed` место хранения. Эта ошибка произошла в версии 18.12.13. +- Исправлена ошибка при создании временной таблицы из запроса с помощью `IN` состояние. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3098) +- Исправлена ошибка в агрегатных функциях для массивов, которые могут иметь `NULL` элементы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3097) + +### ClickHouse релиз 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} + +#### Новые средства: {#new-features-4} + +- Добавил тот `DECIMAL(digits, scale)` тип данных (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). Чтобы включить его, используйте параметр `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) +- Новый `WITH ROLLUP` модификатор для `GROUP BY` (альтернативный синтаксис: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) +- В запросах с соединением символ звезды расширяется до списка столбцов во всех таблицах в соответствии со стандартом SQL. Вы можете восстановить старое поведение, установив `asterisk_left_columns_only` до 1 на уровне конфигурации пользователя. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2787) +- Добавлена поддержка соединения с табличными функциями. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Автозаполнение осуществляется нажатием клавиши Tab в clickhouse-клиенте. [Сергей Щербин](https://github.com/ClickHouse/ClickHouse/pull/2447) +- Сочетание клавиш CTRL+C в clickhouse-клиент сбрасывает запрос, который был введен. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) +- Добавил тот `join_default_strictness` уставка: `"`, `'any'`, `'all'`). Это позволяет вам не указывать `ANY` или `ALL` для `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) +- Каждая строка журнала сервера, связанная с обработкой запросов, показывает идентификатор запроса. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Теперь вы можете получить журналы выполнения запросов в clickhouse-клиенте (используйте `send_logs_level` установочный). При распределенной обработке запросов журналы каскадируются со всех серверов. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- То `system.query_log` и `system.processes` (`SHOW PROCESSLIST`) таблицы теперь содержат информацию обо всех измененных настройках при выполнении запроса (вложенная структура запроса). `Settings` данные). Добавил тот `log_query_settings` установка. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- То `system.query_log` и `system.processes` теперь в таблицах отображается информация о количестве потоков, участвующих в выполнении запроса (см. `thread_numbers` колонка). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Добавлен `ProfileEvents` счетчики, которые измеряют время, затраченное на чтение и запись по сети и чтение и запись на диск, количество сетевых ошибок и время ожидания, когда пропускная способность сети ограничена. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Добавлен `ProfileEvents`счетчики, содержащие системные метрики из rusage (их можно использовать для получения информации об использовании ЦП в пользовательском пространстве и ядре, сбоях страниц и переключателях контекста), а также метрики taskstats (используйте их для получения информации о времени ожидания ввода-вывода, времени ожидания ЦП и объеме данных, считываемых и записываемых как с помощью кэша страниц, так и без него). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- То `ProfileEvents` счетчики применяются глобально и для каждого запроса, а также для каждого потока выполнения запроса, что позволяет детально профилировать потребление ресурсов запросом. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- Добавил тот `system.query_thread_log` таблица, содержащая информацию о каждом потоке выполнения запроса. Добавил тот `log_query_threads` установка. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) +- То `system.metrics` и `system.events` таблицы теперь имеют встроенную документацию. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) +- Добавил тот `arrayEnumerateDense` функция. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2975) +- Добавил тот `arrayCumSumNonNegative` и `arrayDifference` функции. [Алексей Студнев](https://github.com/ClickHouse/ClickHouse/pull/2942) +- Добавил тот `retention` статистическая функция. [Вашим Ли](https://github.com/ClickHouse/ClickHouse/pull/2887) +- Теперь вы можете добавить (объединить) состояния агрегатных функций с помощью оператора плюс и умножить состояния агрегатных функций на неотрицательную константу. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) +- Таблицы в семействе MergeTree теперь имеют виртуальный столбец `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) + +#### Экспериментальная возможность: {#experimental-features-1} + +- Добавил тот `LowCardinality(T)` тип данных. Этот тип данных автоматически создает локальный словарь значений и позволяет обрабатывать данные без распаковки словаря. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) +- Добавлен кэш JIT-скомпилированных функций и счетчик количества использований перед компиляцией. Чтобы выполнить JIT-компиляцию выражений, включите `compile_expressions` установка. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) + +#### Улучшения: {#improvements-4} + +- Исправлена проблема с неограниченным накоплением журнала репликации при наличии брошенных реплик. Добавлен эффективный режим восстановления для реплик с длительным запаздыванием. +- Улучшенная производительность `GROUP BY` с несколькими полями агрегации, когда одно из них является строковым, а другие-фиксированной длины. +- Улучшенная производительность при использовании `PREWHERE` и с неявной передачей выражений в `PREWHERE`. +- Улучшена производительность синтаксического анализа для текстовых форматов (`CSV`, `TSV`). [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) +- Улучшена производительность чтения строк и массивов в двоичных форматах. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2955) +- Повышенная производительность и снижение потребления памяти для запросов к `system.tables` и `system.columns` при наличии очень большого количества таблиц на одном сервере. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) +- Исправлена проблема производительности в случае большого потока запросов, приводящих к ошибке (the `_dl_addr` функция видна в `perf top`, но сервер не использует много процессора). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) +- Условия бросаются в поле зрения (когда `enable_optimize_predicate_expression` включен). [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2907) +- Улучшения в функциональности для `UUID` тип данных. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) +- То `UUID` тип данных поддерживается в словарях-Alchemist. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) +- То `visitParamExtractRaw` функция корректно работает с вложенными структурами. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2974) +- Когда `input_format_skip_unknown_fields` настройка включена, поля объекта в поле `JSONEachRow` формат пропущен правильно. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) +- Для `CASE` выражение с условиями теперь можно опустить `ELSE`, что эквивалентно `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) +- Тайм-аут операции теперь можно настроить при работе с ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) +- Вы можете указать смещение для `LIMIT n, m` как `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- Вы можете использовать `SELECT TOP n` синтаксис как альтернатива для `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) +- Увеличен размер очереди для записи в системные таблицы, так что `SystemLog parameter queue is full` ошибки случаются не так часто. +- То `windowFunnel` агрегатная функция теперь поддерживает события, удовлетворяющие нескольким условиям. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2801) +- Повторяющиеся столбцы могут быть использованы в a `USING` пунктом `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) +- `Pretty` форматы теперь имеют ограничение на выравнивание столбцов по ширине. Используйте `output_format_pretty_max_column_pad_width` установка. Если значение больше, то оно все равно будет отображаться полностью, но другие ячейки таблицы не будут слишком широкими. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) +- То `odbc` функция таблицы теперь позволяет указать имя базы данных / схемы. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2885) +- Добавлена возможность использовать имя Пользователя, указанное в `clickhouse-client` конфигурационный файл. [Владимир Козбин](https://github.com/ClickHouse/ClickHouse/pull/2909) +- То `ZooKeeperExceptions` счетчик был разделен на три счетчика: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, и `ZooKeeperOtherExceptions`. +- `ALTER DELETE` запросы работают для материализованных представлений. +- Добавлена рандомизация при периодическом запуске потока очистки для `ReplicatedMergeTree` таблицы во избежание периодических скачков нагрузки при наличии очень большого количества `ReplicatedMergeTree` таблицы. +- Поддержка `ATTACH TABLE ... ON CLUSTER` запросы. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) + +#### Устранение ошибок: {#bug-fixes-13} + +- Исправлена проблема с `Dictionary` таблицы (бросает то `Size of offsets doesn't match size of column` или `Unknown compression method` исключение). Эта ошибка появилась в версии 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) +- Исправлена ошибка при слиянии `CollapsingMergeTree` таблицы, если одна из частей данных пуста (эти части формируются во время слияния или `ALTER DELETE` если все данные были удалены), а также `vertical` для слияния был использован алгоритм. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) +- Исправлено состояние гонки во время `DROP` или `TRUNCATE` для `Memory` столы с одновременным `SELECT`, что может привести к сбоям сервера. Эта ошибка появилась в версии 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) +- Исправлена возможность потери данных при вставке в систему `Replicated` таблицы, если `Session is expired` возвращается ошибка (потеря данных может быть обнаружена с помощью `ReplicatedDataLoss` метрический). Эта ошибка произошла в версии 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) +- Исправлен сегфолт при `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) +- Исправлена ошибка поиска имен столбцов, когда `WHERE` выражение полностью состоит из квалифицированного имени столбца, например `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) +- Исправлена ошибка «Not found column» ошибка, возникшая при выполнении распределенных запросов, если с удаленного сервера запрашивается один столбец, состоящий из выражения IN с вложенным запросом. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) +- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` ошибка, возникшая для распределенных запросов, если один из сегментов является локальным, а другой-нет, и оптимизация перемещения в `PREWHERE` это срабатывает. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) +- Исправлена ошибка `pointInPolygon` функция для некоторых случаев невыпуклых многоугольников. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) +- Исправлен неверный результат при сравнении `nan` с целыми числами. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) +- Исправлена ошибка в системе `zlib-ng` библиотека, которая в редких случаях может привести к segfault. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) +- Исправлена утечка памяти при вставке в таблицу с помощью `AggregateFunction` столбцы, если состояние агрегатной функции не простое (выделяет память отдельно), и если один запрос на вставку приводит к нескольким небольшим блокам. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) +- Исправлено состояние гонки при создании и удалении одного и того же объекта `Buffer` или `MergeTree` стол одновременно. +- Исправлена возможность segfault при сравнении кортежей, составленных из определенных нетривиальных типов, таких как кортежи. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) +- Исправлена возможность возникновения segfault при запуске некоторых `ON CLUSTER` запросы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2960) +- Исправлена ошибка в системе `arrayDistinct` функция для `Nullable` элемент массива. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) +- То `enable_optimize_predicate_expression` опция теперь корректно поддерживает случаи с `SELECT *`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2929) +- Исправлена ошибка segfault при повторной инициализации сеанса ZooKeeper. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) +- Исправлена блокировка при работе с зоопарка. +- Исправлен неверный код для добавления вложенных структур данных в a `SummingMergeTree`. +- При выделении памяти для состояний агрегатных функций корректно учитывается выравнивание, что позволяет использовать операции, требующие выравнивания при реализации состояний агрегатных функций. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) + +#### Исправление безопасности: {#security-fix} + +- Безопасное использование источников данных ODBC. Взаимодействие с драйверами ODBC использует отдельный интерфейс `clickhouse-odbc-bridge` процесс. Ошибки в сторонних драйверах ODBC больше не вызывают проблем со стабильностью сервера или уязвимостями. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) +- Исправлена неправильная проверка пути к файлу в системе `catBoostPool` табличная функция. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) +- Содержание системных таблиц (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, и `replication_queue`) фильтруются в соответствии с настроенным пользователем доступом к базам данных (`allow_databases`). [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2856) + +#### Назад несовместимые изменения: {#backward-incompatible-changes-3} + +- В запросах с соединением символ звезды расширяется до списка столбцов во всех таблицах в соответствии со стандартом SQL. Вы можете восстановить старое поведение, установив `asterisk_left_columns_only` до 1 на уровне конфигурации пользователя. + +#### Изменения в сборке: {#build-changes-2} + +- Большинство интеграционных тестов теперь можно запускать с помощью commit. +- Проверка стиля кода также может выполняться с помощью commit. +- То `memcpy` реализация выбрана правильно при построении на CentOS7/Fedora. [Этьен Шампетье](https://github.com/ClickHouse/ClickHouse/pull/2912) +- При использовании clang для сборки, некоторые предупреждения от `-Weverything` были добавлены, в дополнение к обычным `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) +- Отладка сборки использует следующие методы: `jemalloc` вариант отладки. +- Интерфейс библиотеки для взаимодействия с ZooKeeper объявлен абстрактным. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) + +## ClickHouse релиз 18.10 {#clickhouse-release-18-10} + +### ClickHouse релиз 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} + +#### Новые средства: {#new-features-5} + +- HTTPS можно использовать для репликации. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) +- Добавлены функции `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, и `murmurHash3_128` в дополнение к существующим `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) +- Поддержка типы, допускающие значения NULL в драйвере ODBC ClickHouse (`ODBCDriver2` выходной формат). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) +- Поддержка `UUID` в ключевых колонках. + +#### Улучшения: {#improvements-5} + +- Кластеры могут быть удалены без перезагрузки сервера, когда они удаляются из конфигурационных файлов. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) +- Внешние словари могут быть удалены без перезагрузки сервера, когда они удаляются из конфигурационных файлов. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) +- Добавлен `SETTINGS` поддержка для the `Kafka` настольный двигатель. [Александр Маршалов](https://github.com/ClickHouse/ClickHouse/pull/2781) +- Улучшения для компании `UUID` тип данных (еще не полный). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) +- Поддержка для пустых частей после слияния в `SummingMergeTree`, `CollapsingMergeTree` и `VersionedCollapsingMergeTree` двигатели. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) +- Старые записи завершенных мутаций удаляются (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) +- Добавил тот `system.merge_tree_settings` стол. [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/2841) +- То `system.tables` таблица теперь имеет столбцы зависимостей: `dependencies_database` и `dependencies_table`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2851) +- Добавил тот `max_partition_size_to_drop` вариант конфигурации. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) +- Добавил тот `output_format_json_escape_forward_slashes` вариант. [Александр Бочаров](https://github.com/ClickHouse/ClickHouse/pull/2812) +- Добавил тот `max_fetch_partition_retries_count` установка. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) +- Добавил тот `prefer_localhost_replica` настройка для отключения предпочтения для локальной реплики и перехода к локальной реплике без межпроцессного взаимодействия. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) +- То `quantileExact` возвращает агрегатная функция `nan` в случае агрегации на пустом месте `Float32` или `Float64` набор. [Вашим Ли](https://github.com/ClickHouse/ClickHouse/pull/2855) + +#### Устранение ошибок: {#bug-fixes-14} + +- Удалено ненужное экранирование параметров строки подключения для ODBC, что сделало невозможным установление соединения. Эта ошибка произошла в версии 18.6.0. +- Исправлена логика обработки `REPLACE PARTITION` команды в очереди репликации. Если их будет двое `REPLACE` команды для одного и того же раздела, неправильная логика может привести к тому, что один из них останется в очереди репликации и не будет выполнен. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) +- Исправлена ошибка слияния, когда все части данных были пусты (части, которые были сформированы из слияния или из `ALTER DELETE` если все данные были удалены). Эта ошибка появилась в версии 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) +- Исправлена ошибка при одновременном использовании `Set` или `Join`. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2823) +- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` ошибка, которая произошла для `UNION ALL` запросы внутри подзапроса, если один из `SELECT` запросы содержат повторяющиеся имена столбцов. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2094) +- Исправлена утечка памяти, если при подключении к серверу MySQL возникало исключение. +- Исправлен неверный код ответа clickhouse-клиента в случае ошибки запроса. +- Исправлено некорректное поведение материализованных представлений, содержащих отличия. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) + +#### Назад несовместимые изменения {#backward-incompatible-changes-4} + +- Удалена поддержка запросов CHECK TABLE для распределенных таблиц. + +#### Изменения в сборке: {#build-changes-3} + +- Распределитель был заменен: `jemalloc` теперь используется вместо `tcmalloc`. В некоторых сценариях это увеличивает скорость до 20%. Однако есть запросы, которые замедлились до 20%. Потребление памяти было уменьшено приблизительно на 10% в некоторых сценариях, с улучшенной стабильностью. При высокой конкурентной нагрузке использование процессора в пользовательском пространстве и в системе показывает лишь небольшое увеличение. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) +- Использование libressl из подмодуля. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) +- Использование unixodbc из подмодуля. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) +- Использование mariadb-connector-c из подмодуля. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) +- Добавлены функциональные тестовые файлы в репозиторий, зависящие от доступности тестовых данных (пока без самих тестовых данных). + +## ClickHouse релиз 18.6 {#clickhouse-release-18-6} + +### ClickHouse релиз 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} + +#### Новые средства: {#new-features-6} + +- Добавлена поддержка выражений для соединения на синтаксис: + `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` + Выражение должно быть цепочкой равенств, Соединенных оператором и. Каждая сторона равенства может быть произвольным выражением над столбцами одной из таблиц. Поддерживается использование полных имен столбцов (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) для правильного стола. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) +- HTTPS может быть включен для репликации. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) + +#### Улучшения: {#improvements-6} + +- Сервер передает клиенту компонент исправлений своей версии. Данные о компоненте версии патча находятся в `system.processes` и `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) + +## ClickHouse релиз 18.5 {#clickhouse-release-18-5} + +### ClickHouse релиз 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} + +#### Новые средства: {#new-features-7} + +- Добавлена хэш-функция `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). + +#### Улучшения: {#improvements-7} + +- Теперь вы можете использовать `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) атрибут для установки значений в конфигурационных файлах из переменных окружения. +- Добавлены версии с нечувствительностью к регистру символов `coalesce`, `ifNull`, и `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). + +#### Устранение ошибок: {#bug-fixes-15} + +- Исправлена возможная ошибка при запуске реплики [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). + +## ClickHouse релиз 18.4 {#clickhouse-release-18-4} + +### ClickHouse релиз 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} + +#### Новые средства: {#new-features-8} + +- Добавлены системные таблицы: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). +- Добавлена возможность использовать табличную функцию вместо таблицы в качестве аргумента a `remote` или `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). +- Поддержка `HTTP Basic` аутентификация в протоколе репликации [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). +- То `has` функция теперь позволяет искать числовое значение в массиве `Enum` ценности [Максим Хрисанфов](https://github.com/ClickHouse/ClickHouse/pull/2699). +- Поддержка добавления произвольных разделителей сообщений при чтении из `Kafka` [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2701). + +#### Улучшения: {#improvements-8} + +- То `ALTER TABLE t DELETE WHERE` запрос не перезаписывает части данных, которые не были затронуты условием WHERE [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). +- То `use_minimalistic_checksums_in_zookeeper` вариант для `ReplicatedMergeTree` таблицы включены по умолчанию. Этот параметр был добавлен в версии 1.1.54378, 2018-04-16. Версии, которые старше 1.1.54378, больше не могут быть установлены. +- Поддержка для бега `KILL` и `OPTIMIZE` запросы, которые определяют `ON CLUSTER` [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2689). + +#### Устранение ошибок: {#bug-fixes-16} + +- Исправлена ошибка `Column ... is not under an aggregate function and not in GROUP BY` для агрегатирования с выражением. Эта ошибка появилась в версии 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +- Исправлена ошибка в системе `windowFunnel aggregate function` [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2735). +- Исправлена ошибка в системе `anyHeavy` статистическая функция ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +- Исправлен сбой сервера при использовании `countArray()` статистическая функция. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-5} + +- Параметры для `Kafka` двигатель был изменен с `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` к `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Если ваши таблицы используют `kafka_schema` или `kafka_num_consumers` параметры, вы должны вручную редактировать файлы метаданных `path/metadata/database/table.sql` и добавить `kafka_row_delimiter` параметр с `''` ценность. + +## ClickHouse релиз 18.1 {#clickhouse-release-18-1} + +### ClickHouse релиз 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} + +#### Новые средства: {#new-features-9} + +- Поддержка для the `ALTER TABLE t DELETE WHERE` запрос на нереплицируемые MergeTree таблицы ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). +- Поддержка произвольных типов для `uniq*` семейство агрегатных функций ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). +- Поддержка произвольных типов в операторах сравнения ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). +- То `users.xml` файл позволяет установить маску подсети в формате `10.0.0.1/255.255.255.0`. Это необходимо для использования масок для сетей IPv6 с нулями посередине ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). +- Добавил тот `arrayDistinct` функция ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). +- Движок SummingMergeTree теперь может работать со столбцами типа AggregateFunction ([Константин Сергеевич Пан](https://github.com/ClickHouse/ClickHouse/pull/2566)). + +#### Улучшения: {#improvements-9} + +- Изменена схема нумерации для версий выпуска. Теперь первая часть содержит год выпуска (A. D., Московский часовой пояс, минус 2000), вторая часть содержит номер для крупных изменений (увеличивается для большинства релизов), а третья часть-это патч-версия. Релизы по-прежнему имеют обратную совместимость, если в списке изменений не указано иное. +- Более быстрое преобразование чисел с плавающей запятой в строку ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2664)). +- Если некоторые строки были пропущены во время вставки из-за ошибок синтаксического анализа (это возможно с помощью `input_allow_errors_num` и `input_allow_errors_ratio` настройки включены), количество пропущенных строк теперь записывается в журнал сервера ([Леонардо Чекки](https://github.com/ClickHouse/ClickHouse/pull/2669)). + +#### Устранение ошибок: {#bug-fixes-17} + +- Исправлена команда усечения для временных таблиц ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2624)). +- Исправлена редкая тупиковая ситуация в клиентской библиотеке ZooKeeper, возникшая при возникновении сетевой ошибки при чтении ответа ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +- Исправлена ошибка во время бросания на типы, допускающие значение null ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). +- Исправлен неверный результат работы системы `maxIntersection()` функция, когда границы интервалов совпадают ([Майкл Фурмур](https://github.com/ClickHouse/ClickHouse/pull/2657)). +- Исправлено некорректное преобразование цепочки выражений OR в аргумент функции ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). +- Исправлено снижение производительности для запросов, содержащих `IN (subquery)` выражения внутри другого подзапроса ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). +- Исправлена несовместимость серверов с различными версиями в распределенных запросах, использующих a `CAST` функция, которая не написана прописными буквами ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +- Добавлено отсутствующее цитирование идентификаторов для запросов к внешней СУБД ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). + +#### Назад несовместимые изменения: {#backward-incompatible-changes-6} + +- Преобразование строки, содержащей нулевое число, в DateTime не работает. Пример: `SELECT toDateTime('0')`. Это также является причиной того, что `DateTime DEFAULT '0'` не работает в таблицах, а также `0` в словарях. Решение: заменить `0` с `0000-00-00 00:00:00`. + +## ClickHouse release 1.1 {#clickhouse-release-1-1} + +### ClickHouse релиз 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} + +#### Новые средства: {#new-features-10} + +- Добавил тот `histogram` статистическая функция ([Михаил Сурин](https://github.com/ClickHouse/ClickHouse/pull/2521)). +- Сейчас `OPTIMIZE TABLE ... FINAL` может использоваться без указания разделов для `ReplicatedMergeTree` ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2600)). + +#### Устранение ошибок: {#bug-fixes-18} + +- Исправлена проблема с очень малым таймаутом для сокетов (одна секунда) для чтения и записи при отправке и загрузке реплицированных данных, что делало невозможной загрузку больших частей при наличии нагрузки на сеть или диск (это приводило к циклическим попыткам загрузки частей). Эта ошибка произошла в версии 1.1.54388. +- Исправлены проблемы при использовании chroot в ZooKeeper, если вы вставили дубликаты блоков данных в таблицу. +- То `has` функция теперь корректно работает для массива с нулевыми элементами ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). +- То `system.tables` таблица теперь работает корректно при использовании в распределенных запросах. То `metadata_modification_time` и `engine_full` столбцы теперь не являются виртуальными. Исправлена ошибка, возникавшая при запросе из таблицы только этих столбцов. +- Исправлено как пустой `TinyLog` таблица работает после вставки пустого блока данных ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). +- То `system.zookeeper` таблица работает, если значение узла в ZooKeeper равно NULL. + +### ClickHouse релиз 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} + +#### Новые средства: {#new-features-11} + +- Запросы могут быть отправлены в `multipart/form-data` формат (в виде `query` поле), что полезно, если внешние данные также отправляются для обработки запросов ([Ольга Хвостикова](https://github.com/ClickHouse/ClickHouse/pull/2490)). +- Добавлена возможность включения или отключения обработки одинарных или двойных кавычек при чтении данных в формате CSV. Вы можете настроить это в разделе `format_csv_allow_single_quotes` и `format_csv_allow_double_quotes` настройки ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2574)). +- Сейчас `OPTIMIZE TABLE ... FINAL` может использоваться без указания раздела для нереплицированных вариантов `MergeTree` ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2599)). + +#### Улучшения: {#improvements-10} + +- Улучшенная производительность, снижение потребления памяти и корректное отслеживание потребления памяти с использованием оператора IN, когда можно использовать табличный индекс ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). +- Удалена избыточная проверка контрольных сумм при добавлении части данных. Это важно при наличии большого количества реплик, так как в этих случаях общее число проверок было равно N^2. +- Добавлена поддержка для `Array(Tuple(...))` аргументы в пользу этого `arrayEnumerateUniq` функция ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). +- Добавлен `Nullable` поддержка для the `runningDifference` функция ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). +- Улучшена производительность анализа запросов при наличии очень большого количества выражений ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). +- Более быстрый выбор частей данных для слияния в `ReplicatedMergeTree` таблицы. Более быстрое восстановление сеанса смотрителя зоопарка ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). +- То `format_version.txt` файл для `MergeTree` таблицы создаются заново, если они отсутствуют, что имеет смысл, если ClickHouse запускается после копирования структуры каталогов без файлов ([Киприан Хакман](https://github.com/ClickHouse/ClickHouse/pull/2593)). + +#### Устранение ошибок: {#bug-fixes-19} + +- Исправлена ошибка при работе с ZooKeeper, которая могла сделать невозможным восстановление сеанса и состояний таблиц только для чтения перед перезапуском сервера. +- Исправлена ошибка при работе с ZooKeeper, которая могла привести к тому, что старые узлы не удалялись, если сеанс прерывался. +- Исправлена ошибка в системе `quantileTDigest` функции для Аргументов с плавающей точкой (эта ошибка была введена в версии 1.1.54388) ([Михаил Сурин](https://github.com/ClickHouse/ClickHouse/pull/2553)). +- Исправлена ошибка в индексе для таблиц MergeTree, если столбец первичного ключа находится внутри функции преобразования типов между знаковыми и беззнаковыми целыми числами одинакового размера ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). +- Исправлена обработка выхода онлайн / оффлайн если `macros` используются, но их нет в файле конфигурации ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). +- Исправлено переключение на базу данных по умолчанию при повторном подключении клиента ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). +- Исправлена ошибка, возникшая при появлении `use_index_for_in_with_subqueries` настройка была отключена. + +#### Исправление безопасности: {#security-fix-1} + +- Отправка файлов больше не возможна при подключении к MySQL (`LOAD DATA LOCAL INFILE`). + +### ClickHouse релиз 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} + +#### Новые средства: {#new-features-12} + +- Поддержка для the `ALTER TABLE t DELETE WHERE` запрос для реплицированных таблиц. Добавил тот `system.mutations` таблица для отслеживания хода выполнения запросов этого типа. +- Поддержка для the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` запрос для таблиц \* MergeTree. +- Поддержка для the `TRUNCATE TABLE` запрос ([Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2260)) +- Несколько новый `SYSTEM` запросы к реплицируемым таблицам (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). +- Добавлена возможность записи в таблицу с помощью движка MySQL и соответствующей табличной функции ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2294)). +- Добавил тот `url()` функция таблицы и `URL` настольный двигатель ([Александр Сапин](https://github.com/ClickHouse/ClickHouse/pull/2501)). +- Добавил тот `windowFunnel` статистическая функция ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2352)). +- Новый `startsWith` и `endsWith` функции для строк ([Вадим Плахтинский](https://github.com/ClickHouse/ClickHouse/pull/2429)). +- То `numbers()` функция таблицы теперь позволяет указать смещение ([Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2535)). +- Пароль к нему `clickhouse-client` может быть введен в интерактивном режиме. +- Теперь журналы сервера можно отправлять в системный журнал ([Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/2459)). +- Поддержка входа в словари с общим источником библиотеки ([Александр Сапин](https://github.com/ClickHouse/ClickHouse/pull/2472)). +- Поддержка пользовательских разделителей CSV ([Иван Жуков](https://github.com/ClickHouse/ClickHouse/pull/2263)) +- Добавил тот `date_time_input_format` установка. Если вы переключите этот параметр на `'best_effort'`, Значения DateTime будут считываться в широком диапазоне форматов. +- Добавил тот `clickhouse-obfuscator` утилита для запутывания данных. Пример использования: публикация данных, используемых в тестах производительности. + +#### Экспериментальная возможность: {#experimental-features-2} + +- Добавлена возможность расчета `and` аргументы только там, где они нужны ([Анастасия Царькова](https://github.com/ClickHouse/ClickHouse/pull/2272)) +- JIT компиляция в машинный код теперь доступна для некоторых выражений ([Плес](https://github.com/ClickHouse/ClickHouse/pull/2277)). + +#### Устранение ошибок: {#bug-fixes-20} + +- Дубликаты больше не появляются для запроса с `DISTINCT` и `ORDER BY`. +- Запросы с помощью `ARRAY JOIN` и `arrayFilter` больше не возвращайте неверный результат. +- Исправлена ошибка при чтении столбца массива из вложенной структуры ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). +- Исправлена ошибка при анализе запросов с предложением HAVING, например `HAVING tuple IN (...)`. +- Исправлена ошибка при анализе запросов с рекурсивными псевдонимами. +- Исправлена ошибка при чтении из ReplacingMergeTree с условием в PREWHERE, которое фильтрует все строки ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). +- Настройки профиля пользователя не применялись при использовании сеансов в интерфейсе HTTP. +- Исправлено применение настроек из параметров командной строки в clickhouse-local. +- Клиентская библиотека ZooKeeper теперь использует тайм-аут сеанса, полученный от сервера. +- Исправлена ошибка в клиентской библиотеке ZooKeeper, когда клиент ждал ответа сервера дольше, чем тайм-аут. +- Исправлена обрезка деталей для запросов с условиями по ключевым столбцам разделов ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). +- Слияния теперь возможны после `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). +- Исправлено отображение типов в функции таблицы ODBC ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2268)). +- Сравнение типов было исправлено для `DateTime` с часовым поясом и без него ([Александр Бочаров](https://github.com/ClickHouse/ClickHouse/pull/2400)). +- Исправлен синтаксический разбор и форматирование текста `CAST` оператор. +- Исправлена вставка в материализованный вид для механизма распределенных таблиц ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). +- Исправлено состояние гонки при записи данных из `Kafka` двигатель к материализованным представлениям ([Янкуань Лю](https://github.com/ClickHouse/ClickHouse/pull/2448)). +- Исправлена ошибка SSRF в функции таблицы remote (). +- Исправлено поведение выхода из системы `clickhouse-client` в многострочном режиме ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). + +#### Улучшения: {#improvements-11} + +- Фоновые задачи в реплицированных таблицах теперь выполняются в пуле потоков, а не в отдельных потоках ([Сильвиу Развивается](https://github.com/ClickHouse/ClickHouse/pull/1722)). +- Улучшена производительность сжатия LZ4. +- Более быстрый анализ запросов с большим количеством соединений и подзапросов. +- Кэш DNS теперь обновляется автоматически, когда возникает слишком много сетевых ошибок. +- Вставка таблицы больше не происходит, если вставка в один из материализованных видов невозможна из-за слишком большого количества деталей. +- Исправлено несоответствие в счетчиках событий `Query`, `SelectQuery`, и `InsertQuery`. +- Такие выражения, как `tuple IN (SELECT tuple)` разрешены, если типы кортежей совпадают. +- Сервер с реплицированными таблицами может запуститься, даже если вы еще не настроили ZooKeeper. +- При расчете количества доступных ядер ЦП теперь учитываются ограничения на контрольные группы ([Атри Шарма](https://github.com/ClickHouse/ClickHouse/pull/2325)). +- Добавлено меню для конфигурации каталогов в файл systemd конфиг ([Михаил Ширяев](https://github.com/ClickHouse/ClickHouse/pull/2421)). + +#### Изменения в сборке: {#build-changes-4} + +- Компилятор gcc8 можно использовать для сборки. +- Добавлена возможность построения llvm из подмодуля. +- Версия библиотеки librdkafka была обновлена с v0.11.4. +- Добавлена возможность использования системной библиотеки libcpuid. Версия библиотеки была обновлена до версии 0.4.0. +- Исправлена сборка с использованием библиотеки vectorclass ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). +- Cmake теперь генерирует файлы для ninja по умолчанию (например, при использовании `-G Ninja`). +- Добавлена возможность использовать библиотеку libtinfo вместо libtermcap ([Георгий Кондратьев](https://github.com/ClickHouse/ClickHouse/pull/2519)). +- Исправлен конфликт заголовочных файлов в Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). + +#### Назад несовместимые изменения: {#backward-incompatible-changes-7} + +- Удален побег в `Vertical` и `Pretty*` форматы и удалил `VerticalRaw` формат. +- Если серверы с версией 1.1.54388 (или более поздней) и серверы с более старой версией используются одновременно в распределенном запросе, то запрос имеет следующее значение: `cast(x, 'Type')` выражение лица без лица `AS` ключевое слово и не имеет этого слова `cast` в верхнем регистре исключение будет выдано с сообщением типа `Not found column cast(0, 'UInt8') in block`. Решение: обновите сервер на всем кластере. + +### ClickHouse релиз 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} + +#### Устранение ошибок: {#bug-fixes-21} + +- Исправлена ошибка,которая в некоторых случаях приводила к блокировке операций ZooKeeper. + +### ClickHouse релиз 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} + +#### Устранение ошибок: {#bug-fixes-22} + +- Исправлено замедление очереди репликации, если таблица содержит много реплик. + +### ClickHouse релиз 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} + +#### Устранение ошибок: {#bug-fixes-23} + +- Исправлена утечка узлов в ZooKeeper, когда ClickHouse теряет соединение с сервером ZooKeeper. + +### ClickHouse релиз 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} + +#### Новые средства: {#new-features-13} + +- Добавлена функция таблицы `file(path, format, structure)`. Пример чтения байтов из `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. + +#### Улучшения: {#improvements-12} + +- Вложенные запросы могут быть обернуты в `()` скобки для повышения удобочитаемости запросов. Например: `(SELECT 1) UNION ALL (SELECT 1)`. +- Простой `SELECT` запросы от компании `system.processes` таблица не входит в состав `max_concurrent_queries` предел. + +#### Устранение ошибок: {#bug-fixes-24} + +- Исправлено некорректное поведение устройства `IN` оператор при выборе из `MATERIALIZED VIEW`. +- Исправлена некорректная фильтрация по индексу раздела в выражениях типа `partition_key_column IN (...)`. +- Исправлена невозможность выполнения `OPTIMIZE` запрос на реплику без лидера, если `REANAME` было исполнено на столе. +- Исправлена ошибка авторизации при выполнении `OPTIMIZE` или `ALTER` запросы к реплике, не являющейся лидером. +- Исправлено зависание `KILL QUERY`. +- Исправлена ошибка в клиентской библиотеке ZooKeeper, которая приводила к потере часов, замораживанию распределенной очереди DDL и замедлению работы очереди репликации, если она была непустой. `chroot` префикс используется в конфигурации ZooKeeper. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-8} + +- Удалена поддержка таких выражений как `(a, b) IN (SELECT (a, b))` (вы можете использовать эквивалентное выражение `(a, b) IN (SELECT a, b)`). В предыдущих выпусках эти выражения приводили к неопределенным `WHERE` фильтрация или вызванные ошибки. + +### ClickHouse релиз 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} + +#### Новые средства: {#new-features-14} + +- Уровень ведения журнала можно изменить без перезагрузки сервера. +- Добавил тот `SHOW CREATE DATABASE` запрос. +- То `query_id` может быть передан в `clickhouse-client` (локтевой зал). +- Новая настройка: `max_network_bandwidth_for_all_users`. +- Добавлена поддержка для `ALTER TABLE ... PARTITION ...` для `MATERIALIZED VIEW`. +- Добавлена информация о размере частей данных в несжатом виде в системной таблице. +- Поддержка межсерверного шифрования распределенных таблиц (`1` в конфигурации реплики in ``). +- Конфигурация уровня таблицы для `ReplicatedMergeTree` семья для того, чтобы свести к минимуму объем данных, хранящихся в Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` +- Конфигурация системы `clickhouse-client` срочный. По умолчанию имена серверов теперь выводятся в приглашение. Отображаемое имя сервера может быть изменено. Он также отправлен в США. `X-ClickHouse-Display-Name` Заголовок HTTP (Кирилл Шваков). +- Несколько разделенных запятыми `topics` может быть указан для `Kafka` двигатель (Тобиас Адамсон) +- Когда запрос остановлен `KILL QUERY` или `replace_running_query`, клиент получает `Query was canceled` исключение вместо неполного результата. + +#### Улучшения: {#improvements-13} + +- `ALTER TABLE ... DROP/DETACH PARTITION` запросы выполняются в передней части очереди репликации. +- `SELECT ... FINAL` и `OPTIMIZE ... FINAL` может использоваться даже в том случае, если таблица содержит одну часть данных. +- A `query_log` таблица воссоздается на лету, если она была удалена вручную (Кирилл Шваков). +- То `lengthUTF8` функция работает быстрее (zhang2014). +- Улучшенная производительность синхронных вставок в `Distributed` таблицы (`insert_distributed_sync = 1`) при наличии очень большого количества осколков. +- Сервер принимает следующее: `send_timeout` и `receive_timeout` настройки от клиента и применяет их при подключении к клиенту (они применяются в обратном порядке: сокет сервера `send_timeout` устанавливается в положение `receive_timeout` ценность, полученная от клиента, и наоборот). +- Более надежное аварийное восстановление для асинхронной вставки в систему `Distributed` таблицы. +- Возвращаемый тип объекта `countEqual` функция изменяется от `UInt32` к `UInt64` (谢磊). + +#### Устранение ошибок: {#bug-fixes-25} + +- Исправлена ошибка с помощью `IN` когда левая сторона выражения является `Nullable`. +- Правильные результаты теперь возвращаются при использовании кортежей с `IN` когда некоторые компоненты кортежа находятся в индексе таблицы. +- То `max_execution_time` limit теперь корректно работает с распределенными запросами. +- Исправлены ошибки при вычислении размера составных столбцов в системе `system.columns` стол. +- Исправлена ошибка при создании временной таблицы `CREATE TEMPORARY TABLE IF NOT EXISTS.` +- Исправлены ошибки в работе `StorageKafka` (\#\#2075) +- Исправлены сбои сервера из-за недопустимых аргументов некоторых агрегатных функций. +- Исправлена ошибка, которая помешала `DETACH DATABASE` запрос от остановки фоновых задач для `ReplicatedMergeTree` таблицы. +- `Too many parts` состояние с меньшей вероятностью произойдет при вставке в агрегированные материализованные представления (\#\#2084). +- Исправлена рекурсивная обработка подстановок в конфигурации, если за подстановкой должна следовать другая подстановка на том же уровне. +- Исправлен синтаксис в файле метаданных при создании `VIEW` это использует запрос с `UNION ALL`. +- `SummingMergeTree` теперь корректно работает суммирование вложенных структур данных с помощью составного ключа. +- Исправлена возможность возникновения расового состояния при выборе лидера для участия в гонке. `ReplicatedMergeTree` таблицы. + +#### Изменения в сборке: {#build-changes-5} + +- Сборка поддерживает `ninja` вместо `make` и использует `ninja` по умолчанию для построения релизов. +- Переименованные пакеты: `clickhouse-server-base` в `clickhouse-common-static`; `clickhouse-server-common` в `clickhouse-server`; `clickhouse-common-dbg` в `clickhouse-common-static-dbg`. Для установки используйте `clickhouse-server clickhouse-client`. Пакеты со старыми именами по-прежнему будут загружаться в репозитории для обеспечения обратной совместимости. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-9} + +- Удалена специальная интерпретация выражения IN, если массив указан с левой стороны. Ранее выражение `arr IN (set)` было истолковано как «at least one `arr` element belongs to the `set`». Чтобы получить такое же поведение в новой версии, напишите `arrayExists(x -> x IN (set), arr)`. +- Отключено неправильное использование опции сокета `SO_REUSEPORT`, который был неправильно включен по умолчанию в библиотеке Poco. Обратите внимание, что в Linux больше нет никаких причин одновременно указывать адреса `::` и `0.0.0.0` for listen – use just `::`, что позволяет прослушивать соединение как по IPv4, так и по IPv6 (с настройками конфигурации ядра по умолчанию). Вы также можете вернуться к поведению из предыдущих версий, указав `1` в конфигурации. + +### ClickHouse релиз 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} + +#### Новые средства: {#new-features-15} + +- Добавил тот `system.macros` таблица и автоматическое обновление макросов при изменении конфигурационного файла. +- Добавил тот `SYSTEM RELOAD CONFIG` запрос. +- Добавил тот `maxIntersections(left_col, right_col)` агрегатная функция, возвращающая максимальное количество одновременно пересекающихся интервалов `[left; right]`. То `maxIntersectionsPosition(left, right)` функция возвращает начало строки «maximum» интервал. ([Майкл Фурмур](https://github.com/ClickHouse/ClickHouse/pull/2012)). + +#### Улучшения: {#improvements-14} + +- При вставке данных в `Replicated` таблица, меньше запросов делается к `ZooKeeper` (и большинство ошибок на уровне пользователя исчезли с экрана. `ZooKeeper` бревно). +- Добавлена возможность создавать псевдонимы для наборов данных. Пример: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. + +#### Устранение ошибок: {#bug-fixes-26} + +- Исправлена ошибка `Illegal PREWHERE` ошибка при чтении из таблиц слияния для `Distributed`таблицы. +- Добавлены исправления, позволяющие запускать clickhouse-сервер в контейнерах Docker только для IPv4. +- Исправлено состояние гонки при считывании из системы `system.parts_columns tables.` +- Удалена двойная буферизация во время синхронной вставки в a `Distributed` таблица, которая могла бы вызвать тайм-аут соединения. +- Исправлена ошибка, приводившая к чрезмерно долгому ожиданию недоступной реплики перед началом работы. `SELECT` запрос. +- Исправлены неверные даты в программе `system.parts` стол. +- Исправлена ошибка, из-за которой невозможно было вставить данные в `Replicated` таблица если `chroot` был непустым в конфигурации системы. `ZooKeeper` скопление. +- Исправлен алгоритм вертикального слияния для пустого объекта `ORDER BY` стол. +- Восстановлена возможность использования словарей в запросах к удаленным таблицам, даже если эти словари отсутствуют на сервере-запросчике. Эта функциональность была потеряна в выпуске 1.1.54362. +- Восстановлено поведение для таких запросов, как `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` когда правая сторона `IN` следует использовать пульт дистанционного управления `default.table` а не какой-нибудь местный. Это поведение было нарушено в версии 1.1.54358. +- Удалено постороннее протоколирование уровня ошибок `Not found column ... in block`. + +### Clickhouse Релиз 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} + +#### Новые средства: {#new-features-16} + +- Агрегация без `GROUP BY` для пустого набора (например, `SELECT count(*) FROM table WHERE 0`) теперь возвращает результат с одной строкой с нулевыми значениями для агрегатных функций, в соответствии со стандартом SQL. Чтобы восстановить старое поведение (вернуть пустой результат), установите `empty_result_for_aggregation_by_empty_set` до 1. +- Добавлено преобразование типов для `UNION ALL`. Здесь разрешены разные псевдонимы `SELECT` должности в `UNION ALL`, в соответствии со стандартом SQL. +- Произвольные выражения поддерживаются в `LIMIT BY` статьи. Ранее можно было использовать только столбцы, полученные в результате `SELECT`. +- Индекс из `MergeTree` таблицы используются, когда `IN` применяется к кортежу выражений из столбцов первичного ключа. Пример: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Анастасия Царькова). +- Добавил тот `clickhouse-copier` инструмент для копирования между кластерами и пересчета данных (бета-версия). +- Добавлены последовательные функции хэширования: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. Они могут быть использованы в качестве ключа сегментирования для уменьшения объема сетевого трафика во время последующих повторных сегментирования. +- Добавленные функции: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. +- Добавил тот `arrayCumSum` функция (Хави Сантана). +- Добавил тот `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, и `parseDateTimeBestEffortOrNull` функции для чтения DateTime из строки, содержащей текст в широком спектре возможных форматов. +- Данные могут быть частично перезагружены из внешних словарей во время обновления (загружаются только те записи, в которых значение указанного поля больше, чем в предыдущей загрузке) (Арсен Акопян). +- Добавил тот `cluster` табличная функция. Пример: `cluster(cluster_name, db, table)`. То `remote` табличная функция может принять имя кластера в качестве первого аргумента, если оно указано в качестве идентификатора. +- То `remote` и `cluster` функции таблицы можно использовать в `INSERT` запросы. +- Добавил тот `create_table_query` и `engine_full` виртуальные столбцы для `system.tables`стол. То `metadata_modification_time` колонка виртуальная. +- Добавил тот `data_path` и `metadata_path` колонны до `system.tables`и`system.databases` таблицы, а также добавил `path` колонка к столу `system.parts` и `system.parts_columns` таблицы. +- Добавлена дополнительная информация о слияниях в системе `system.part_log` стол. +- Для этого можно использовать произвольный ключ секционирования. `system.query_log` стол (Кирилл Шваков). +- То `SHOW TABLES` запрос теперь также показывает временные таблицы. Добавлены временные таблицы и `is_temporary` столбец `system.tables` (zhang2014). +- Добавлен `DROP TEMPORARY TABLE` и `EXISTS TEMPORARY TABLE` запросы (zhang2014). +- Поддержка `SHOW CREATE TABLE` для временных таблиц (zhang2014). +- Добавил тот `system_profile` параметр конфигурации для параметров, используемых внутренними процессами. +- Поддержка для загрузки `object_id` в качестве атрибута `MongoDB` словари (Павел Литвиненко). +- Чтение `null` в качестве значения по умолчанию при загрузке данных для внешнего словаря с помощью `MongoDB` источник (Павел Литвиненко). +- Чтение `DateTime` значения в системе `Values` форматирование из временной метки Unix без одинарных кавычек. +- Отказоустойчивость поддерживается в `remote` табличные функции для случаев, когда некоторые реплики отсутствуют в запрашиваемой таблице. +- Параметры конфигурации могут быть переопределены в командной строке при запуске `clickhouse-server`. Пример: `clickhouse-server -- --logger.level=information`. +- Реализовано следующее `empty` функция от `FixedString` аргумент: функция возвращает 1, если строка полностью состоит из нулевых байтов (zhang2014). +- Добавил тот `listen_try`параметр конфигурации для прослушивания хотя бы одного из прослушиваемых адресов без выхода из системы, если некоторые адреса не могут быть прослушаны (полезно для систем с отключенной поддержкой IPv4 или IPv6). +- Добавил тот `VersionedCollapsingMergeTree` настольный двигатель. +- Поддержка строк и произвольных числовых типов для `library` источник словаря. +- `MergeTree` таблицы можно использовать и без первичного ключа (необходимо указать `ORDER BY tuple()`). +- A `Nullable` тип может быть `CAST` не-`Nullable` введите если аргумент не является таковым `NULL`. +- `RENAME TABLE` может быть выполнена для `VIEW`. +- Добавил тот `throwIf` функция. +- Добавил тот `odbc_default_field_size` опция, которая позволяет расширить максимальный размер значения, загруженного из источника ODBC (по умолчанию это 1024). +- То `system.processes` стол и `SHOW PROCESSLIST` теперь у вас есть `is_cancelled` и `peak_memory_usage` столбцы. + +#### Улучшения: {#improvements-15} + +- Ограничения и квоты на результат больше не применяются к промежуточным данным для `INSERT SELECT` запросы или для `SELECT` подзапросы. +- Меньше ложных срабатываний `force_restore_data` при проверке состояния `Replicated` таблицы при запуске сервера. +- Добавил тот `allow_distributed_ddl` вариант. +- Недетерминированные функции не допускаются в выражениях для `MergeTree` ключи от стола. +- Файлы с заменами из `config.d` каталоги загружаются в алфавитном порядке. +- Улучшенная производительность системы `arrayElement` функция в случае постоянного многомерного массива с пустым массивом в качестве одного из элементов. Пример: `[[1], []][x]`. +- Теперь сервер запускается быстрее при использовании конфигурационных файлов с очень большими заменами (например, очень большими списками IP-сетей). +- При выполнении запроса функции с табличным значением выполняются один раз. Ранее, `remote` и `mysql` функции с табличным значением дважды выполняли один и тот же запрос для получения структуры таблицы с удаленного сервера. +- То `MkDocs` используется генератор документации. +- При попытке удалить столбец таблицы, который `DEFAULT`/`MATERIALIZED` выражения других столбцов зависят от того, возникает ли исключение (zhang2014). +- Добавлена возможность разбирать пустую строку в текстовых форматах как число 0 для `Float` тип данных. Эта функция была ранее доступна, но была потеряна в выпуске 1.1.54342. +- `Enum` значения могут быть использованы в `min`, `max`, `sum` и некоторые другие функции. В этих случаях он использует соответствующие числовые значения. Эта функция была ранее доступна, но была потеряна в выпуске 1.1.54337. +- Добавлен `max_expanded_ast_elements` чтобы ограничить размер AST после рекурсивного расширения псевдонимов. + +#### Устранение ошибок: {#bug-fixes-27} + +- Исправлены случаи, когда ненужные столбцы были удалены из подзапросов по ошибке или не были удалены из подзапросов, содержащих `UNION ALL`. +- Исправлена ошибка в слияниях для `ReplacingMergeTree` таблицы. +- Исправлены синхронные вставки в `Distributed` таблицы (`insert_distributed_sync = 1`). +- Исправлена обработка выхода онлайн / оффлайн для определенного использования `FULL` и `RIGHT JOIN` с повторяющимися столбцами в подзапросах. +- Исправлена ошибка segfault для некоторых видов использования `replace_running_query` и `KILL QUERY`. +- Исправлен порядок следования `source` и `last_exception` колонны в центре города `system.dictionaries` стол. +- Исправлена ошибка, когда `DROP DATABASE` запрос не удалил файл с метаданными. +- Исправлена ошибка `DROP DATABASE` запрос для `Dictionary` база данных. +- Исправлена низкая точность `uniqHLL12` и `uniqCombined` функции для кардинальностей, превышающих 100 миллионов единиц (Алексей Бочаров). +- Исправлено вычисление неявных значений по умолчанию при необходимости одновременного вычисления явных выражений по умолчанию в `INSERT` запросы (zhang2014). +- Исправлен редкий случай, когда запрос к a `MergeTree` стол не смог закончить (chenxing-xc). +- Исправлена ошибка, возникшая при запуске программы `CHECK` запрос для `Distributed` таблицы, если все осколки являются локальными (chenxing.xc). +- Исправлена небольшая регрессия производительности с функциями, использующими регулярные выражения. +- Исправлена регрессия производительности при создании многомерных массивов из сложных выражений. +- Исправлена ошибка, которая могла привести к дополнительному `FORMAT` раздел, который будет отображаться в `.sql` файл с метаданными. +- Исправлена ошибка, которая вызвала `max_table_size_to_drop` ограничение для применения при попытке удалить a `MATERIALIZED VIEW` глядя на явно заданную таблицу. +- Исправлена несовместимость со старыми клиентами (старые клиенты иногда отправляли данные вместе со старыми клиентами). `DateTime('timezone')` типа, которого они не понимают). +- Исправлена ошибка при чтении `Nested` элементы столбцов структур, которые были добавлены с помощью `ALTER` но это пусто для старых разделов, когда условия для этих столбцов переместились в `PREWHERE`. +- Исправлена ошибка при фильтрации таблиц по виртуальным `_table` столбцы в запросах к `Merge` таблицы. +- Исправлена ошибка при использовании `ALIAS` колонны внутри `Distributed` таблицы. +- Исправлена ошибка, которая делала невозможной динамическую компиляцию запросов с агрегатными функциями из `quantile` семья. +- Исправлено условие гонки в конвейере выполнения запросов, которое возникало в очень редких случаях при использовании `Merge` таблицы с большим количеством таблиц, а при использовании `GLOBAL` подзапросы. +- Исправлена ошибка при передаче массивов разных размеров в `arrayReduce` функция при использовании агрегатных функций из нескольких аргументов. +- Запрещено использование запросов с помощью `UNION ALL` в `MATERIALIZED VIEW`. +- Исправлена ошибка при инициализации программы. `part_log` системная таблица при запуске сервера (по умолчанию, `part_log` отключен). + +#### Назад несовместимые изменения: {#backward-incompatible-changes-10} + +- Удалил то `distributed_ddl_allow_replicated_alter` вариант. Это поведение включено по умолчанию. +- Удалил то `strict_insert_defaults` установка. Если вы использовали эту функцию, напишите нам `clickhouse-feedback@yandex-team.com`. +- Удалил то `UnsortedMergeTree` двигатель. + +### Clickhouse Релиз 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} + +- Добавлена поддержка макросов для определения имен кластеров в распределенных DDL запросах и конструкторах распределенных таблиц: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. +- Теперь такие запросы, как `SELECT ... FROM table WHERE expr IN (subquery)` обрабатываются с помощью `table` индекс. +- Улучшена обработка дубликатов при вставке в реплицируемые таблицы, поэтому они больше не замедляют выполнение очереди репликации. + +### Clickhouse Релиз 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} + +Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54337: + +- Исправлена регрессия в 1.1.54337: если пользователь по умолчанию имеет доступ только для чтения, то сервер отказывается запускаться с сообщением `Cannot create database in readonly mode`. +- Исправлена регрессия в 1.1.54337: в системах с systemd журналы всегда записываются в syslog независимо от конфигурации; сценарий watchdog все еще использует init.д. +- Исправлена регрессия в 1.1.54337: неправильная конфигурация по умолчанию в образе Docker. +- Исправлено недетерминированное поведение GraphiteMergeTree (вы можете увидеть его в сообщениях журнала `Data after merge is not byte-identical to the data on another replicas`). +- Исправлена ошибка, которая могла привести к несогласованным слияниям после оптимизации запроса к Реплицируемым таблицам (вы можете увидеть это в сообщениях журнала `Part ... intersects the previous part`). +- Буферные таблицы теперь работают правильно, когда материализованные столбцы присутствуют в целевой таблице (по zhang2014). +- Исправлена ошибка в реализации NULL. + +### Clickhouse Релиз 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} + +#### Новые средства: {#new-features-17} + +- Добавлена поддержка хранения многомерных массивов и кортежей (`Tuple` тип данных) в таблицах. +- Поддержка функций таблицы для `DESCRIBE` и `INSERT` запросы. Добавлена поддержка вложенных запросов в `DESCRIBE`. Примеры: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Поддержка `INSERT INTO TABLE` в дополнение к `INSERT INTO`. +- Улучшена поддержка часовых поясов. То `DateTime` тип данных может быть аннотирован с помощью часового пояса, который используется для синтаксического анализа и форматирования в текстовых форматах. Пример: `DateTime('Europe/Moscow')`. Когда часовые пояса указаны в функциях для `DateTime` аргументы, возвращаемый тип будет отслеживать часовой пояс, и значение будет отображаться, как и ожидалось. +- Добавлены функции `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. То `toRelativeHour`/`Minute`/`Second` функции могут принимать значение типа `Date` в качестве аргумента. То `now` имя функции чувствительно к регистру. +- Добавил тот `toStartOfFifteenMinutes` функция (Кирилл Шваков). +- Добавил тот `clickhouse format` инструмент для форматирования запросов. +- Добавил тот `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` формат. Файлы схемы могут быть расположены только в указанном каталоге. +- Добавлена поддержка подстановок конфигураций (`incl` и `conf.d`) для настройки внешних словарей и моделей (Павел Якунин). +- Добавлена колонка с документацией для `system.settings` стол (Кирилл Шваков). +- Добавил тот `system.parts_columns` таблица с информацией о размерах столбцов в каждой части данных `MergeTree` таблицы. +- Добавил тот `system.models` таблица с информацией о загруженных данных `CatBoost` модели машинного обучения. +- Добавил тот `mysql` и `odbc` таблица функций и соответствующих `MySQL` и `ODBC` табличные движки для доступа к удаленным базам данных. Эта функциональность находится в стадии бета-тестирования. +- Добавлена возможность передачи аргумента типа `AggregateFunction` для `groupArray` агрегатная функция (таким образом, вы можете создать массив состояний некоторой агрегатной функции). +- Сняты ограничения на различные комбинации комбинаторов агрегатных функций. Например, вы можете использовать `avgForEachIf` так же как `avgIfForEach` агрегатные функции, которые имеют различное поведение. +- То `-ForEach` комбинатор агрегатных функций расширен для случая агрегатных функций с несколькими аргументами. +- Добавлена поддержка агрегатных функций `Nullable` аргументы даже в тех случаях, когда функция возвращает не --`Nullable` результат (добавлено с вкладом Сильвиу Карагеа). Пример: `groupArray`, `groupUniqArray`, `topK`. +- Добавил тот `max_client_network_bandwidth` для `clickhouse-client` (Кирилл Шваков). +- Пользователи с помощью `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). +- Добавлена поддержка использования нескольких потребителей с помощью `Kafka` двигатель. Расширенные параметры конфигурации для `Kafka` (Marek Vavruša). +- Добавил тот `intExp3` и `intExp4` функции. +- Добавил тот `sumKahan` статистическая функция. +- Добавлены функции to \* Number\* OrNull, где \* Number\* - это числовой тип. +- Добавлена поддержка для `WITH` положения для `INSERT SELECT` запрос (автор: zhang2014). +- Добавлены настройки: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. В частности, эти параметры используются для загрузки частей данных для репликации. Изменение этих параметров позволяет ускорить отработку отказа при перегрузке сети. +- Добавлена поддержка для `ALTER` для таблиц типа `Null` (Анастасия Царькова). +- То `reinterpretAsString` функция расширена для всех типов данных, которые хранятся последовательно в памяти. +- Добавил тот `--silent` вариант для самого `clickhouse-local` инструмент. Он подавляет печать информации о выполнении запроса в stderr. +- Добавлена поддержка считывания значений типа `Date` из текста в формате, где месяц и / или день месяца указывается с использованием одной цифры вместо двух цифр (Amos Bird). + +#### Оптимизация производительности: {#performance-optimizations} + +- Улучшена производительность агрегатных функций `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` из строковых аргументов. +- Улучшенная производительность функций `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. +- Улучшена производительность синтаксического анализа и форматирования `Date` и `DateTime` введите значения в текстовом формате. +- Улучшена производительность и точность синтаксического анализа чисел с плавающей запятой. +- Пониженное использование памяти для `JOIN` в том случае, когда левая и правая части имеют столбцы с одинаковыми именами, которые не содержатся в `USING` . +- Улучшена производительность агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` за счет снижения вычислительной стабильности. Старые функции доступны под названиями `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. + +#### Устранение ошибок: {#bug-fixes-28} + +- Исправлена дедупликация данных после запуска a `DROP` или `DETACH PARTITION` запрос. В предыдущей версии удаление раздела и вставка тех же данных снова не работали, потому что вставленные блоки считались дубликатами. +- Исправлена ошибка, которая могла привести к неправильной интерпретации текста. `WHERE` пунктом `CREATE MATERIALIZED VIEW` запросы с помощью `POPULATE` . +- Исправлена ошибка в использовании `root_path` параметр в поле `zookeeper_servers` конфигурация. +- Исправлены непредвиденные результаты прохождения теста `Date` аргумент в пользу `toStartOfDay` . +- Исправлена ошибка `addMonths` и `subtractMonths` функции и арифметика для `INTERVAL n MONTH` в тех случаях, когда результат имеет предыдущий год. +- Добавлена отсутствующая поддержка для `UUID` тип данных для `DISTINCT` , `JOIN` , и `uniq` агрегатные функции и внешние словари (Евгений Иванов). Поддержка `UUID` он все еще не завершен. +- Исправлено `SummingMergeTree` поведение в тех случаях, когда строки суммируются до нуля. +- Различные исправления для `Kafka` engine (Marek Vavruša). +- Исправлено некорректное поведение устройства `Join` настольный движок (птица Амос). +- Исправлено некорректное поведение распределителя под FreeBSD и OS X. +- То `extractAll` функция теперь поддерживает пустые матчи. +- Исправлена ошибка, которая блокировала использование `libressl` вместо `openssl` . +- Исправлена ошибка `CREATE TABLE AS SELECT` запрос из временных таблиц. +- Исправлена неатомность обновления очереди репликации. Это может привести к тому, что реплики будут несинхронизированы до тех пор, пока сервер не перезагрузится. +- Исправлено возможное переполнение внутри `gcd` , `lcm` и `modulo` (`%` оператор) (Макс Скороход). +- `-preprocessed` файлы теперь создаются после изменения `umask` (`umask` можно изменить в конфигурации). +- Исправлена ошибка в фоновой проверке деталей (`MergeTreePartChecker` ) при использовании пользовательского ключа раздела. +- Исправлен разбор кортежей (значения `Tuple` тип данных) в текстовых форматах. +- Улучшены сообщения об ошибках о несовместимых типах, передаваемых в `multiIf` , `array` и некоторые другие функции. +- Переработанная поддержка для `Nullable` типы. Исправлены ошибки, которые могут привести к сбою сервера. Исправлены почти все другие ошибки, связанные с `NULL` поддержка: некорректное преобразование типов в вставьте выберите, недостаточная поддержка значения NULL в наличии и PREWHERE, `join_use_nulls` режим, типы, допускающие значения NULL в качестве аргументов `OR` оператор и т. д. +- Исправлены различные ошибки, связанные с внутренней семантикой типов данных. Примеры: ненужное суммирование `Enum` поля, тип в `SummingMergeTree` ; выравнивание `Enum` напечатать `Pretty` форматы и т. д. +- Более строгие проверки допустимых комбинаций составных столбцов. +- Исправлено переполнение при указании очень большого параметра для `FixedString` тип данных. +- Исправлена ошибка в системе `topK` агрегатная функция в общем случае. +- Добавлена недостающая проверка на равенство размеров массива в аргументах n-арных вариантов агрегатных функций с АНА - `-Array` комбинатор. +- Исправлена ошибка в работе `--pager` для `clickhouse-client` (автор: кс1322). +- Исправлена точность установки `exp10` функция. +- Исправлено поведение объекта `visitParamExtract` функция для лучшего соответствия документации. +- Исправлена ошибка при указании неверных типов данных. +- Исправлено поведение `DISTINCT` в том случае, когда все столбцы являются константами. +- Исправлено форматирование запроса в случае использования `tupleElement` функция со сложным постоянным выражением в качестве индекса элемента кортежа. +- Исправлена ошибка в работе `Dictionary` таблицы для `range_hashed` словари. +- Исправлена ошибка, приводившая к избыточным строкам в результате `FULL` и `RIGHT JOIN` (Эймос Берд). +- Исправлен сбой сервера при создании и удалении временных файлов в системе `config.d` каталоги во время перезагрузки конфигурации. +- Исправлена ошибка `SYSTEM DROP DNS CACHE` запрос: Кэш был очищен, но адреса узлов кластера не были обновлены. +- Исправлено поведение `MATERIALIZED VIEW` после выполнения `DETACH TABLE` for the table under the view (Marek Vavruša). + +#### Улучшения сборки: {#build-improvements-4} + +- То `pbuilder` инструмент используется для сборки. Процесс сборки практически полностью независим от среды узла сборки. +- Одна сборка используется для разных версий ОС. Пакеты и двоичные файлы были сделаны совместимыми с широким спектром систем Linux. +- Добавил тот `clickhouse-test` пакет. Он может быть использован для выполнения функциональных тестов. +- Исходный тарбол теперь можно опубликовать в репозитории. Он может быть использован для воспроизведения сборки без использования GitHub. +- Добавлена ограниченная интеграция с Travis CI. Из-за ограничений на время сборки в Travis тестируется только отладочная сборка и выполняется ограниченное подмножество тестов. +- Добавлена поддержка для `Cap'n'Proto` в сборке по умолчанию. +- Изменен формат источников документации с `Restricted Text` к `Markdown`. +- Добавлена поддержка для `systemd` (Владимир Смирнов). Он отключен по умолчанию из-за несовместимости с некоторыми образами ОС и может быть включен вручную. +- Для динамической генерации кода, `clang` и `lld` они встроены в систему `clickhouse` двоичный. Они также могут быть вызваны как `clickhouse clang` и `clickhouse lld` . +- Удалено использование расширений GNU из кода. Включил эту функцию `-Wextra` вариант. При строительстве с помощью `clang` значение по умолчанию равно `libc++` вместо `libstdc++`. +- Извлеченный `clickhouse_parsers` и `clickhouse_common_io` библиотеки для ускорения сборки различных инструментов. + +#### Назад несовместимые изменения: {#backward-incompatible-changes-11} + +- Формат для отметок в `Log` введите таблицы, которые содержат `Nullable` колонны были изменены обратно несовместимым образом. Если у вас есть эти таблицы, вы должны преобразовать их в следующие: `TinyLog` введите текст перед запуском новой версии сервера. Чтобы сделать это, замените `ENGINE = Log` с `ENGINE = TinyLog` в соответствующем разделе `.sql` файл в папке `metadata` каталог. Если ваш стол не имеет `Nullable` столбцы или если тип вашей таблицы не указан `Log`- тогда вам ничего не нужно делать. +- Удалил то `experimental_allow_extended_storage_definition_syntax` установка. Теперь эта функция включена по умолчанию. +- То `runningIncome` функция была переименована в `runningDifferenceStartingWithFirstvalue` избежать недоразумений. +- Удалил то `FROM ARRAY JOIN arr` синтаксис, когда соединение массива задается непосредственно после FROM без таблицы (Amos Bird). +- Удалил то `BlockTabSeparated` формат, который использовался исключительно в демонстрационных целях. +- Изменен формат состояния для агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Если вы сохранили состояния этих агрегатных функций в таблицах (с помощью `AggregateFunction` тип данных или материализованные представления с соответствующими состояниями), пожалуйста, напишите нам clickhouse-feedback@yandex-team.com-да. +- В предыдущих версиях сервера существовала недокументированная функция: если агрегатная функция зависит от параметров, то вы все равно можете указать ее без параметров в типе данных AggregateFunction. Пример: `AggregateFunction(quantiles, UInt64)` вместо `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. Эта особенность была утеряна. Хотя он был недокументирован, мы планируем снова поддержать его в будущих выпусках. +- Типы данных Enum не могут использоваться в агрегатных функциях min/max. Эта способность будет возвращена в следующем выпуске. + +#### Пожалуйста, обратите внимание при обновлении: {#please-note-when-upgrading} + +- При выполнении скользящего обновления в кластере в тот момент, когда некоторые реплики работают под управлением старой версии ClickHouse, а некоторые-под управлением новой версии, репликация временно прекращается и появляется сообщение `unknown parameter 'shard'` появляется в журнале регистрации. Репликация будет продолжена после обновления всех реплик кластера. +- Если на серверах кластера запущены разные версии ClickHouse, то вполне возможно, что распределенные запросы, использующие следующие функции, будут иметь неверные результаты: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Вы должны обновить все узлы кластера. + +## [Список изменений на 2017 год](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/ru/whats_new/changelog/2019.md b/docs/ru/whats_new/changelog/2019.md new file mode 100644 index 00000000000..ea5bffd74c9 --- /dev/null +++ b/docs/ru/whats_new/changelog/2019.md @@ -0,0 +1,2072 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +## Релиз ClickHouse в19.17 {#clickhouse-release-v19-17} + +### Релиз ClickHouse в19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} + +#### Исправление ошибок {#bug-fix} + +- Исправлено потенциальное переполнение буфера при распаковке. Злонамеренный пользователь может передавать сфабрикованные сжатые данные,которые могут вызвать чтение после буфера. Эту проблему обнаружил Эльдар Зайтов из команды информационной безопасности Яндекса. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена возможная ошибка сервера (`std::terminate`) когда сервер не может отправлять или записывать данные в формате JSON или XML со значениями строкового типа данных (которые требуют проверки UTF-8) или при сжатии результирующих данных с помощью алгоритма Brotli или в некоторых других редких случаях. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлены словари с исходным кодом из clickhouse `VIEW`, теперь чтение таких словарей не вызывает ошибки `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена проверка, разрешен ли клиентский хост с помощью host\_regexp, указанного в users.XML. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Виталий Баранов](https://github.com/vitlibar)) +- `RENAME TABLE` для распределенной таблицы теперь переименовывается папка, содержащая вставленные данные перед отправкой в сегменты. Это исправляет проблему с последовательными переименованиями `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([тавплубикс](https://github.com/tavplubix)) +- `range_hashed` внешние словари, созданные запросами DDL, теперь допускают диапазоны произвольных числовых типов. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([алесапин](https://github.com/alesapin)) +- Исправлено `INSERT INTO table SELECT ... FROM mysql(...)` табличная функция. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена обработка выхода онлайн / оффлайн в `INSERT INTO TABLE FUNCTION file()` при вставке в файл, который не существует. Теперь в этом случае файл будет создан, а затем вставка будет обработана. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправлена ошибка bitmapAnd при пересечении агрегированного растрового изображения и скалярного растрового изображения. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Юе Хуанг](https://github.com/moon03432)) +- Исправлена обработка выхода онлайн / оффлайн, когда `EXISTS` запрос был использован без `TABLE` или `DICTIONARY` квалификатор, совсем как `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированный тип возврата для функций `rand` и `randConstant` в случае ничтожного аргумента. Теперь функции всегда возвращаются `UInt32` и никогда `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлено `DROP DICTIONARY IF EXISTS db.dict`, теперь он не бросает исключение, если `db` его просто не существует. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Виталий Баранов](https://github.com/vitlibar)) +- Если таблица не была полностью удалена из-за сбоя сервера, сервер попытается восстановить и загрузить ее [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([тавплубикс](https://github.com/tavplubix)) +- Исправлен тривиальный запрос count для распределенной таблицы, если существует более двух локальных таблиц shard. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +- Исправлена ошибка, приводившая к гонке данных в DB:: BlockStreamProfileInfo:: calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Александр казаков](https://github.com/Akazz)) +- Исправлено `ALTER table MOVE part` выполняется сразу же после слияния указанной детали, что может привести к перемещению детали, в которую данная деталь была объединена. Теперь он правильно перемещает указанную деталь. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Владимир Чеботарев](https://github.com/excitoon)) +- Теперь выражения для словарей можно задавать в виде строк. Это полезно для вычисления атрибутов при извлечении данных из источников, отличных от ClickHouse, поскольку позволяет использовать синтаксис, отличающийся от ClickHouse, для этих выражений. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([алесапин](https://github.com/alesapin)) +- Исправлена очень редкая гонка в `clickhouse-copier` из-за переполнения в ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Дин Сян Фэй](https://github.com/dingxiangfei2009)) +- Исправлена ошибка, когда после неудачного запроса (из-за «Too many simultaneous queries» например) он не будет читать информацию о внешних таблицах, а также + следующий запрос будет интерпретировать эту информацию как начало следующего запроса, вызывающего ошибку типа `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Азат Хужин](https://github.com/azat)) +- Избежать разыменования null после «Unknown packet X from server» [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Азат Хужин](https://github.com/azat)) +- Восстановите поддержку всех локалей ICU, добавьте возможность применять параметры сортировки для постоянных выражений и добавьте имя языка в систему.таблица сортировки. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([алесапин](https://github.com/alesapin)) +- Количество потоков для чтения из `StorageFile` и `StorageHDFS` теперь он ограничен, чтобы не превысить лимит памяти. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([алесапин](https://github.com/alesapin)) +- Исправлено `CHECK TABLE` запрос для `*MergeTree` таблицы без ключа. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([алесапин](https://github.com/alesapin)) +- Удалил номер мутации из имени детали на тот случай, если мутаций не было. Это удаление улучшило совместимость с более старыми версиями. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка, что мутации пропускаются для некоторых присоединенных частей из-за их data\_version больше, чем версия мутации таблицы. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Ю](https://github.com/yuzhichang)) +- Разрешить запуск сервера с избыточными копиями деталей после их перемещения на другое устройство. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена ошибка «Sizes of columns doesn’t match» это может появиться при использовании столбцов агрегатной функции. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Борис Гранво](https://github.com/bgranvea)) +- Теперь исключение будет сделано в случае использования с привязками рядом с LIMIT BY. И теперь его можно использовать сверху предел. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Никита Михайлов](https://github.com/nikitamikhaylov)) +- Исправьте перезагрузку словаря, если она есть `invalidate_query`, который остановил обновления и некоторые исключения при предыдущих попытках обновления. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([алесапин](https://github.com/alesapin)) + +### Релиз ClickHouse в19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} + +#### Назад Несовместимые Изменения {#backward-incompatible-change} + +- Использование столбца вместо AST для хранения скалярных результатов подзапросов для повышения производительности. Установка `enable_scalar_subquery_optimization` был добавлен в 19.17, и он был включен по умолчанию. Это приводит к таким ошибкам, как [этот](https://github.com/ClickHouse/ClickHouse/issues/7851) во время обновления до 19.17.2 или 19.17.3 с предыдущих версий. Этот параметр был отключен по умолчанию в 19.17.4, чтобы сделать возможным обновление с 19.16 и более старых версий без ошибок. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Амос Птица](https://github.com/amosbird)) + +#### Новая функция {#new-feature} + +- Добавьте возможность создавать словари с запросами DDL. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([алесапин](https://github.com/alesapin)) +- Сделай `bloom_filter` тип поддержки индекса `LowCardinality` и `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Функция add `isValidJSON` чтобы проверить, что переданная строка является допустимым json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Вдимир](https://github.com/Vdimir)) +- Осуществлять `arrayCompact` функция [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Меморандум](https://github.com/Joeywzr)) +- Созданная функция `hex` для десятичных чисел. Это работает так `hex(reinterpretAsString())`, но не удаляет последние нулевые байты. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Михаил Коротов](https://github.com/millb)) +- Добавь `arrayFill` и `arrayReverseFill` функции, которые заменяют элементы другими элементами спереди/сзади от них в массиве. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) +- Добавь `CRC32IEEE()`/`CRC64()` поддержка [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Азат Хужин](https://github.com/azat)) +- Осуществлять `char` функция, аналогичная одной в [в MySQL](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([сундили](https://github.com/sundy-li)) +- Добавь `bitmapTransform` функция. Он преобразует массив значений в растровом изображении в другой массив значений, в результате чего получается новое растровое изображение [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Ю](https://github.com/yuzhichang)) +- Реализованный `javaHashUTF16LE()` функция [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([ачимбаб](https://github.com/achimbab)) +- Добавь `_shard_num` виртуальный столбец для распределенного движка [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Азат Хужин](https://github.com/azat)) + +#### Экспериментальная возможность {#experimental-feature} + +- Поддержка процессоров (новый конвейер выполнения запросов) в `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +#### Исправление ошибок {#bug-fix-1} + +- Исправить неправильный парсинг float в `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([тавплубикс](https://github.com/tavplubix)) +- Исправьте редкий тупик, который может произойти, когда trace\_log включен. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([Филимонов](https://github.com/filimonov)) +- Предотвратите дублирование сообщений при создании таблицы Кафки, в которой есть любой MVs, выбирающий из нее [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Иван](https://github.com/abyss7)) +- Поддержка `Array(LowCardinality(Nullable(String)))` в `IN`. Разрешает [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([ачимбаб](https://github.com/achimbab)) +- Добавить обработку данных `SQL_TINYINT` и `SQL_BIGINT`, и исправьте обработку `SQL_FLOAT` типы источников данных в ODBC мост. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Денис Глазачев](https://github.com/traceon)) +- Исправить агрегации (`avg` и квантили) над пустыми десятичными столбцами [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Андрей Коняев](https://github.com/akonyaev90)) +- Чинить `INSERT` в распределенный с `MATERIALIZED` столбцы [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Азат Хужин](https://github.com/azat)) +- Сделай `MOVE PARTITION` работайте, если некоторые части раздела уже находятся на целевом диске или томе [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена ошибка, из-за которой жесткие ссылки не создавались во время мутаций в `ReplicatedMergeTree` в конфигурациях с несколькими дисками. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена ошибка с мутацией на MergeTree, когда вся часть остается неизменной, а лучшее место находится на другом диске [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена ошибка с `keep_free_space_ratio` не считывается с конфигурации дисков [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена ошибка с таблицей содержит только `Tuple` столбцы или столбцы со сложными путями. Исправления [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([алесапин](https://github.com/alesapin)) +- Не учитывайте память для буферного движка в ограничении max\_memory\_usage [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Азат Хужин](https://github.com/azat)) +- Исправлена окончательная отметка использования в `MergeTree` таблицы, заказанные по `tuple()`. В редких случаях это может привести к тому, что `Can't adjust last granule` ошибка при выборе. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена ошибка в мутациях, которые имеют предикат с действиями, требующими контекста (например, функции для json), что может привести к сбоям или странным исключениям. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([алесапин](https://github.com/alesapin)) +- Исправлено несоответствие экранирования имен баз данных и таблиц `data/` и `shadow/` справочники [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Александр Бурмак](https://github.com/Alex-Burmak)) +- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Исправьте сбой в этом случае. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Артем Зуйков](https://github.com/4ertus2)) +- Чинить `Not found column in block` при соединении по выражению с правым или полным соединением. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Артем Зуйков](https://github.com/4ertus2)) +- Еще одна попытка исправить бесконечный цикл в `PrettySpace` формат [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправлена ошибка в работе `concat` функция, когда все аргументы были `FixedString` такого же размера. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([алесапин](https://github.com/alesapin)) +- Исправлено исключение в случае использования 1 аргумента при определении хранилищ S3, URL и HDFS. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправлена область действия InterpreterSelectQuery для представлений с запросом [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Азат Хужин](https://github.com/azat)) + +#### Улучшение {#improvement} + +- `Nullable` столбцы признал и NULL-значения будут корректно обработаны в ODBC-мост [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Василий Немков](https://github.com/Enmk)) +- Напишите текущий пакет для распределенной отправки атомарно [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Азат Хужин](https://github.com/azat)) +- Вызовите исключение, если мы не можем обнаружить таблицу для имени столбца в запросе. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавь `merge_max_block_size` установка `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Артем Зуйков](https://github.com/4ertus2)) +- Запросы с помощью `HAVING` и без него `GROUP BY` предположим, что группа по константе. Так, `SELECT 1 HAVING 1` теперь возвращает результат. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Амос Птица](https://github.com/amosbird)) +- Поддержка синтаксического анализа `(X,)` как Кортеж похож на python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Амос Птица](https://github.com/amosbird)) +- Сделай `range` функциональное поведение почти как у питона. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([сундили](https://github.com/sundy-li)) +- Добавь `constraints` столбцы в таблицу `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Виталий Баранов](https://github.com/vitlibar)) +- Лучший нулевой формат для обработчика tcp, так что его можно использовать `select ignore() from table format Null` для измерения производительности через clickhouse-клиент [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Амос Птица](https://github.com/amosbird)) +- Такие запросы, как `CREATE TABLE ... AS (SELECT (1, 2))` разбираются правильно [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) + +#### Улучшение производительности {#performance-improvement} + +- Улучшена производительность агрегирования по коротким строковым ключам. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Александр Кузьменков](https://github.com/akuzm), [Амос Птица](https://github.com/amosbird)) +- Выполните еще один проход синтаксического анализа / анализа выражений, чтобы получить потенциальную оптимизацию после того, как постоянные предикаты будут свернуты. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Амос Птица](https://github.com/amosbird)) +- Использовать для хранения мета-информации, чтобы оценить тривиально `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Амос Птица](https://github.com/amosbird), [Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Векторизация обработки `arrayReduce` аналогично агрегатору `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Амос Птица](https://github.com/amosbird)) +- Незначительные улучшения в производительности `Kafka` потребление [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Иван](https://github.com/abyss7)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement} + +- Добавьте поддержку кросс-компиляции в архитектуру процессора AARCH64. Сценарий рефакторинга упаковщика. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Иван](https://github.com/abyss7)) +- Распакуйте цепочки инструментов darwin-x86\_64 и linux-aarch64 в смонтированный том Docker при сборке пакетов [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Иван](https://github.com/abyss7)) +- Обновление образа Docker для двоичного упаковщика [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Иван](https://github.com/abyss7)) +- Исправлены ошибки компиляции на MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Эрнест Полетаев](https://github.com/ernestp)) +- Некоторые рефакторинги в логике анализа запросов: разделение сложного класса на несколько простых. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена сборка без подмодулей [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) +- Лучше `add_globs` в файлах CMake [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Амос Птица](https://github.com/amosbird)) +- Удалить жестко закодированные пути в `unwind` цель [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Константин Подшумок](https://github.com/podshumok)) +- Разрешить использовать формат mysql без ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) + +#### Другой {#other} + +- Добавлена грамматика ANTLR4 для диалекта ClickHouse SQL [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +## Релиз ClickHouse в19.16 {#clickhouse-release-v19-16} + +#### Релиз Clickhouse в19.16.14.65, 2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} + +- Исправлена ошибка в пакетных вычислениях тернарных логических операций по нескольким аргументам (более 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Александр казаков](https://github.com/Akazz)) Это исправление было возвращено в версию 19.16 по специальному запросу Altinity. + +#### Релиз Clickhouse в19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} + +- Исправлена несовместимость распределенных подзапросов с более старыми версиями CH. Исправления [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) + [(tabplubix)](https://github.com/tavplubix) +- При выполнении `CREATE` запрос, сложите постоянные выражения в аргументах механизма хранения. Замените пустое имя базы данных текущей базой данных. Исправления [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Также исправлена проверка наличия локального адреса в системе `ClickHouseDictionarySource`. + [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +- Теперь фон сливается воедино `*MergeTree` семейство движков таблиц более точно сохраняет порядок объема политики хранения. + [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Владимир Чеботарев](https://github.com/excitoon)) +- Предотвращение потери данных в `Kafka` в редких случаях, когда исключение происходит после чтения суффикса, но до фиксации. Исправления [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Связанный: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) + [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(Филимонов)](https://github.com/filimonov) +- Исправлена ошибка, приводящая к завершению работы сервера при попытке использовать / drop `Kafka` таблица создана с неверными параметрами. Исправления [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Включает [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). + [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(Филимонов)](https://github.com/filimonov) +- Разрешить использование `MaterializedView` с подзапросами выше `Kafka` таблицы. + [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([Филимонов](https://github.com/filimonov)) + +#### Новая функция {#new-feature-1} + +- Добавь `deduplicate_blocks_in_dependent_materialized_views` возможность управления поведением идемпотентных вставок в таблицы с материализованными представлениями. Эта новая функция была добавлена в релиз исправления ошибок по специальному запросу от Altinity. + [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(урыхи)](https://github.com/urykhy) + +### Релиз ClickHouse в19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} + +#### Назад Несовместимые Изменения {#backward-incompatible-change-1} + +- Добавьте недостающую проверку arity для count/countif. + [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) + [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Вдимир](https://github.com/Vdimir)) +- Удаление устаревших `asterisk_left_columns_only` настройка (по умолчанию она была отключена). + [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Артем + Зуйков](https://github.com/4ertus2)) +- Строки формата для формата данных шаблона теперь задаются в файлах. + [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([тавплубикс](https://github.com/tavplubix)) + +#### Новая функция {#new-feature-2} + +- Введите uniqCombined64 () для вычисления мощности, большей, чем UINT\_MAX. + [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), + [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Азат + Хужин](https://github.com/azat)) +- Поддержка индексов Bloom filter для столбцов массива. + [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) + ([ачимбаб](https://github.com/achimbab)) +- Добавление функции `getMacro(name)` это возвращает строку со значением соответствующего `` + из конфигурации сервера. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) + ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Установите два параметра конфигурации для словаря, основанного на источнике HTTP: `credentials` и + `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Гийом + Тассери](https://github.com/YiuRULE)) +- Добавьте новый ProfileEvent `Merge` это подсчитывает количество запущенных фоновых слияний. + [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Михаил + Коротов](https://github.com/millb)) +- Добавьте функцию fullHostName, которая возвращает полное доменное имя. + [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) + [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([сундили](https://github.com/sundy-li)) +- Функция add `arraySplit` и `arrayReverseSplit` которые разделяют массив на «cut off» + условия. Они полезны при обработке временных последовательностей. + [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) +- Добавьте новые функции, возвращающие массив всех сопоставленных индексов в семействе функций multiMatch. + [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Данила + Кутенин](https://github.com/danlark1)) +- Добавление нового компонента Database engine `Lazy` то есть оптимизирован для хранения большого количества мелких логов + таблицы. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Никита + Васильев](https://github.com/nikvas0)) +- Добавьте агрегатные функции groupBitmapAnd, - Or, - Xor для растровых столбцов. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Чжичан + Ю](https://github.com/yuzhichang)) +- Добавьте комбинаторы агрегатных функций-OrNull и-OrDefault, которые возвращают значение null + или значения по умолчанию, когда агрегировать нечего. + [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) + ([hcz](https://github.com/hczhcz)) +- Представьте пользовательский разделенный формат данных, который поддерживает пользовательское экранирование и + правила разграничения. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) + ([тавплубикс](https://github.com/tavplubix)) +- Поддержка Redis в качестве источника внешнего словаря. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Антон + Попов](https://github.com/CurtizJ)) + +#### Исправление ошибок {#bug-fix-2} + +- Исправьте неправильный результат запроса, если он есть `WHERE IN (SELECT ...)` раздел и `optimize_read_in_order` является + использованный. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Антон + Попов](https://github.com/CurtizJ)) +- Отключен плагин аутентификации MariaDB, который зависит от файлов вне проекта. + [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Юрий Владимирович + Баранов](https://github.com/yurriy)) +- Исправить исключение `Cannot convert column ... because it is constant but values of constants are different in source and result` что редко может произойти, когда функции `now()`, `today()`, + `yesterday()`, `randConstant()` не использовать. + [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Николай + Кочетов](https://github.com/KochetovNicolai)) +- Исправлена проблема с использованием HTTP, оставьте в живых тайм-аут вместо TCP оставить в живых тайм-аут. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Василий + Немков](https://github.com/Enmk)) +- Исправлена ошибка сегментации в groupBitmapOr (проблема [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). + [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Чжичан + Ю](https://github.com/yuzhichang)) +- Для материализованных представлений фиксация для Кафки вызывается после того, как все данные были записаны. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Иван](https://github.com/abyss7)) +- Исправлена ошибка `duration_ms` значение в `system.part_log` стол. Это было в десять раз хуже. + [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Владимир + Чеботарев](https://github.com/excitoon)) +- Быстрое исправление для устранения сбоя в таблице LIVE VIEW и повторного включения всех тестов LIVE VIEW. + [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) + ([взаказников](https://github.com/vzakaznikov)) +- Правильно сериализовать значение NULL значений в мин/макс показатели MergeTree части. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Александр + Кузьменков](https://github.com/akuzm)) +- Не ставьте виртуальные столбцы .метаданные sql при создании таблицы в виде `CREATE TABLE AS`. + [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Иван](https://github.com/abyss7)) +- Исправлена ошибка сегментации в `ATTACH PART` запрос. + [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) + ([алесапин](https://github.com/alesapin)) +- Исправьте неправильный результат для некоторых запросов, задаваемых оптимизацией empty в подзапросах и empty + INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Николай + Кочетов](https://github.com/KochetovNicolai)) +- Исправление ошибок в системах живой вид getHeader() метод. + [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) + ([взаказников](https://github.com/vzakaznikov)) + +#### Улучшение {#improvement-1} + +- Добавьте сообщение в случае ожидания queue\_wait\_max\_ms. + [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Азат + Хужин](https://github.com/azat)) +- Выполнена установка `s3_min_upload_part_size` уровень таблицы. + [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Владимир + Чеботарев](https://github.com/excitoon)) +- Проверьте TTL в StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) + ([сундили](https://github.com/sundy-li)) +- Сквош левых блоков в частичном объединении слиянием (оптимизация). + [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Артем + Зуйков](https://github.com/4ertus2)) +- Не допускайте недетерминированных функций в мутациях реплицируемых движков таблиц, поскольку это + может привести к несогласованности между репликами. + [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Александр + Казаков](https://github.com/Akazz)) +- Отключите отслеживание памяти при преобразовании трассировки стека исключений в строку. Это может предотвратить потерю + сообщений об ошибках типа `Memory limit exceeded` на сервере, который вызвал `Attempt to read after eof` исключение для клиента. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) + ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Различные улучшения формата. Разрешает + [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), + [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), + [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), + [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) + [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) + ([тавплубикс](https://github.com/tavplubix)) +- ClickHouse игнорирует значения в правой части оператора IN, которые не могут быть преобразованы в левую + side type. Make it work properly for compound types – Array and Tuple. + [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Александр + Кузьменков](https://github.com/akuzm)) +- Поддержка отсутствует Неравенство для следующих присоединиться. Можно объединить менее-или-равный вариант и строгий + больше и меньше вариантов для столбцов, следующих на синтаксис. + [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Артем + Зуйков](https://github.com/4ertus2)) +- Оптимизируйте частичное объединение слиянием. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) + ([Артем Зуйков](https://github.com/4ertus2)) +- Не используйте больше, чем 98К памяти в функции uniqCombined. + [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), + [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Азат + Хужин](https://github.com/azat)) +- Промыть части правой соединительной таблицы на диске в PartialMergeJoin (если их недостаточно + память). Загружайте данные обратно, когда это необходимо. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) + ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшение производительности {#performance-improvement-1} + +- Ускорьте joinGet с аргументами const, избегая дублирования данных. + [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Амос + Птица](https://github.com/amosbird)) +- Возвращайтесь раньше, если подзапрос пуст. + [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) +- Оптимизируйте синтаксический анализ SQL-выражения в значениях. + [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) + ([тавплубикс](https://github.com/tavplubix)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-1} + +- Отключите некоторые вклады для кросс-компиляции в Mac OS. + [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Иван](https://github.com/abyss7)) +- Добавьте недостающую ссылку с PocoXML для clickhouse\_common\_io. + [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Азат + Хужин](https://github.com/azat)) +- Примите несколько аргументов тестового фильтра в clickhouse-test. + [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Александр + Кузьменков](https://github.com/akuzm)) +- Включите musl и jemalloc для ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) + ([Амос Птица](https://github.com/amosbird)) +- Добавлен `--client-option` параметр to `clickhouse-test` чтобы передать клиенту дополнительные параметры. + [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Николай + Кочетов](https://github.com/KochetovNicolai)) +- Сохраните существующие конфигурации при обновлении пакета rpm. + [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) + ([Филимонов](https://github.com/filimonov)) +- Исправление ошибок, обнаруженных ПВС. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Артем + Зуйков](https://github.com/4ertus2)) +- Исправьте сборку для Дарвина. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) + ([Иван](https://github.com/abyss7)) +- совместимость с glibc 2.29. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Амос + Птица](https://github.com/amosbird)) +- Убедитесь, что dh\_clean не касается потенциальных исходных файлов. + [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Амос + Птица](https://github.com/amosbird)) +- Попытка избежать конфликта при обновлении с altinity rpm - он имеет конфигурационный файл, упакованный отдельно + в clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) + ([Филимонов](https://github.com/filimonov)) +- Оптимизируйте некоторые заголовочные файлы для более быстрого восстановления. + [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), + [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Александр + Кузьменков](https://github.com/akuzm)) +- Добавьте тесты производительности для Date и DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Василий + Немков](https://github.com/Enmk)) +- Исправьте некоторые тесты, которые содержали недетерминированные мутации. + [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Александр + Казаков](https://github.com/Akazz)) +- Добавьте сборку с MemorySanitizer в CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) + ([Александр Кузьменков](https://github.com/akuzm)) +- Избегайте использования неинициализированных значений в MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Азат + Хужин](https://github.com/azat)) +- Исправьте некоторые проблемы в полях, найденных MemorySanitizer. + [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), + [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Александр + Кузьменков](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) + ([Амос Птица](https://github.com/amosbird)) +- Исправьте неопределенное поведение в murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Амос + Птица](https://github.com/amosbird)) +- Исправьте неопределенное поведение в StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) + ([тавплубикс](https://github.com/tavplubix)) +- Исправлено сворачивание постоянных выражений для внешних движков баз данных (MySQL, ODBC, JDBC). В предыдущих случаях + версии он не работал для нескольких постоянных выражений и вообще не работал для даты, + Дата-время и UUID. Это исправление [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) + ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправление ошибки гонки данных ThreadSanitizer в режиме реального времени при обращении к переменной no\_users\_thread. + [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) + ([взаказников](https://github.com/vzakaznikov)) +- Избавьтесь от символов malloc в libcommon + [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), + [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Амос + Птица](https://github.com/amosbird)) +- Добавьте глобальный флаг ENABLE\_LIBRARIES для отключения всех библиотек. + [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) + ([proller](https://github.com/proller)) + +#### Очистка кода {#code-cleanup} + +- Обобщите репозиторий конфигурации для подготовки к DDL для словарей. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) + ([алесапин](https://github.com/alesapin)) +- Парсер для словарей DDL без всякой семантики. + [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) + ([алесапин](https://github.com/alesapin)) +- Разделите ParserCreateQuery на различные более мелкие Парсеры. + [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) + ([алесапин](https://github.com/alesapin)) +- Небольшой рефакторинг и переименование рядом с внешними словарями. + [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) + ([алесапин](https://github.com/alesapin)) +- Рефакторинг некоторого кода для подготовки к управлению доступом на основе ролей. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Виталий + Баранов](https://github.com/vitlibar)) +- Некоторые улучшения в коде DatabaseOrdinary. + [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Никита + Васильев](https://github.com/nikvas0)) +- Не используйте итераторы в методах find () и emplace () хэш-таблиц. + [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Александр + Кузьменков](https://github.com/akuzm)) +- Исправьте getMultipleValuesFromConfig в случае, если корень параметра не пуст. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) + ([Михаил Коротов](https://github.com/millb)) +- Удалите часть copy-paste (TemporaryFile и TemporaryFileStream) + [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Артем + Зуйков](https://github.com/4ertus2)) +- Немного улучшена читаемость кода (`MergeTreeData::getActiveContainingPart`). + [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Владимир + Чеботарев](https://github.com/excitoon)) +- Дождитесь всех запланированных заданий, которые используют локальные объекты, если `ThreadPool::schedule(...)` бросает + исключение. Переименовать `ThreadPool::schedule(...)` к `ThreadPool::scheduleOrThrowOnError(...)` и + исправьте комментарии, чтобы сделать очевидным, что он может бросить. + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) + ([тавплубикс](https://github.com/tavplubix)) + +## ClickHouse релиз 19.15 {#clickhouse-release-19-15} + +### ClickHouse релиз 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} + +#### Исправление ошибок {#bug-fix-3} + +- Добавлена обработка SQL\_TINYINT и SQL\_BIGINT, а также исправлена обработка типов источников данных SQL\_FLOAT в Мосте ODBC. + [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Денис Глазачев](https://github.com/traceon)) +- Разрешается иметь некоторые части на целевом диске или Томе в разделе перемещения. + [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Владимир Чеботарев](https://github.com/excitoon)) +- Фиксированное значение NULL-значений в столбцы, допускающие значения null через ODBC-мост. + [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Василий Немков](https://github.com/Enmk)) +- Исправлена вставка в распределенный нелокальный узел с материализованными столбцами. + [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Азат Хужин](https://github.com/azat)) +- Исправлена функция getMultipleValuesFromConfig. + [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Михаил Коротов](https://github.com/millb)) +- Исправлена проблема с использованием HTTP, оставьте в живых тайм-аут вместо TCP оставить в живых тайм-аут. + [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Василий Немков](https://github.com/Enmk)) +- Дождитесь завершения всех заданий по исключению (исправлены редкие сегменты). + [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([тавплубикс](https://github.com/tavplubix)) +- Не нажимайте на MVs при вставке в таблицу Кафки. + [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Иван](https://github.com/abyss7)) +- Отключите отслеживание памяти для стека исключений. + [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлен неверный код при преобразовании запроса для внешней базы данных. + [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте использования неинициализированных значений в MetricsTransmitter. + [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Азат Хужин](https://github.com/azat)) +- Добавлен пример конфигурации с макросами для тестов ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} + +#### Исправление ошибок {#bug-fix-4} + +- Исправлена ошибка bad\_variant в хэшированном словаре. + ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка с ошибкой сегментации в запросе ATTACH PART. + ([алесапин](https://github.com/alesapin)) +- Расчет фиксированного времени в `MergeTreeData`. + ([Владимир Чеботарев](https://github.com/excitoon)) +- Посвятите себя Кафке явно после того, как написание будет завершено. + [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Иван](https://github.com/abyss7)) +- Правильно сериализовать значение NULL значений в мин/макс показатели MergeTree части. + [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Александр Кузьменков](https://github.com/akuzm)) + +### ClickHouse релиз 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} + +#### Новая функция {#new-feature-3} + +- Многоуровневое хранение: поддержка использования нескольких томов хранения для таблиц с движком MergeTree. Можно хранить свежие данные на SSD и автоматически перемещать старые данные на жесткий диск. ([пример](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Игр](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([алесапин](https://github.com/alesapin)) +- Добавить функцию таблицы `input` для считывания входящих данных в `INSERT SELECT` запрос. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([паласоник1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Антон Попов](https://github.com/CurtizJ)) +- Добавить а `sparse_hashed` компоновка словаря, которая функционально эквивалентна `hashed` макет, но более эффективен для работы с памятью. Он использует примерно в два раза меньше памяти за счет более медленного извлечения значений. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Азат Хужин](https://github.com/azat)) +- Реализована возможность определения списка пользователей для доступа к словарям. Используется только текущая подключенная база данных. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Гийом Тассери](https://github.com/YiuRULE)) +- Добавь `LIMIT` опцион на `SHOW` запрос. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Филипп Мальковский](https://github.com/malkfilipp)) +- Добавь `bitmapSubsetLimit(bitmap, range_start, limit)` функция, возвращающая подмножество наименьшего числа `limit` значения в наборе, который не меньше, чем `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Ю](https://github.com/yuzhichang)) +- Добавь `bitmapMin` и `bitmapMax` функции. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Ю](https://github.com/yuzhichang)) +- Функция add `repeat` относится к [выпуск-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([Флинн](https://github.com/ucasFL)) + +#### Экспериментальная возможность {#experimental-feature-1} + +- Реализуйте (в памяти) вариант соединения слиянием, который не изменяет текущий конвейер. Результат частично сортируется по ключу слияния. Набор `partial_merge_join = 1` чтобы использовать эту функцию. Соединение слиянием все еще находится в разработке. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавь `S3` функция двигателя и таблицы. Он все еще находится в разработке (пока нет поддержки аутентификации). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Владимир Чеботарев](https://github.com/excitoon)) + +#### Улучшение {#improvement-2} + +- Каждое сообщение, прочитанное от Кафки, вставляется атомарно. Это решает почти все известные проблемы с двигателем Kafka. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Иван](https://github.com/abyss7)) +- Улучшения для отработки отказа распределенных запросов. Сократите время восстановления, также оно теперь конфигурируемо и может быть увидено в `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Василий Немков](https://github.com/Enmk)) +- Поддержка числовых значений для перечислений непосредственно в `IN` раздел. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) +- Поддержка (опционально, по умолчанию отключен) перенаправляет на URL хранение. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) +- Добавьте информационное сообщение, когда клиент с более старой версией подключается к серверу. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Филипп Мальковский](https://github.com/malkfilipp)) +- Удалите максимальное ограничение времени ожидания обратного отсчета для отправки данных в распределенных таблицах [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Азат Хужин](https://github.com/azat)) +- Добавьте возможность отправлять графиту события профиля (счетчики) с кумулятивными значениями. Его можно включить в разделе `` в серверах `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Азат Хужин](https://github.com/azat)) +- Добавить автоматическое приведение типа `T` к `LowCardinality(T)` при вставке данных в столбец типа `LowCardinality(T)` в родном формате через HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Добавить возможность использования функции `hex` без использования `reinterpretAsString` для `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Михаил Коротов](https://github.com/millb)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-2} + +- Добавьте gdb-индекс в двоичный файл clickhouse с отладочной информацией. Это ускорит время запуска системы. `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([алесапин](https://github.com/alesapin)) +- Ускорить деб упаковки с исправленными помощью dpkg-деб, которая использует `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([алесапин](https://github.com/alesapin)) +- Набор `enable_fuzzing = 1` чтобы включить инструментирование libfuzzer всего кода проекта. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) +- Добавить сплит построить тест в КИ. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([алесапин](https://github.com/alesapin)) +- Добавьте сборку с MemorySanitizer в CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Александр Кузьменков](https://github.com/akuzm)) +- Заменять `libsparsehash` с `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Азат Хужин](https://github.com/azat)) + +#### Исправление ошибок {#bug-fix-5} + +- Исправлено снижение производительности индексного анализа по сложным ключам на больших таблицах. Это исправляет \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена логическая ошибка, вызывающая segfaults при выборе из Кафки пустой темы. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Иван](https://github.com/abyss7)) +- Исправлено слишком раннее закрытие соединения MySQL `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Возвращена поддержка очень старых ядер Linux (исправление [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить возможную потерю данных в `insert select` запрос в случае пустого блока во входном потоке. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Исправьте сложные запросы с помощью соединений массивов и глобальных подзапросов. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Иван](https://github.com/abyss7)) +- Чинить `Unknown identifier` ошибка в порядке ПО и группировка ПО с несколькими соединениями [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено `MSan` предупреждение при выполнении функции с помощью `LowCardinality` аргумент. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-2} + +- Изменен формат сериализации состояний растровых \* агрегатных функций для повышения производительности. Сериализованные состояния растрового изображения\* из предыдущих версий не могут быть прочитаны. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Ю](https://github.com/yuzhichang)) + +## ClickHouse релиз 19.14 {#clickhouse-release-19-14} + +### ClickHouse релиз 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} + +#### Исправление ошибок {#bug-fix-6} + +- Этот релиз также содержит все исправления ошибок от 19.11.12.69. +- Исправлена совместимость для распределенных запросов между 19.14 и более ранними версиями. Это исправление [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} + +#### Исправление ошибок {#bug-fix-7} + +- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) +- Исправлено имя подзапроса в запросах с `ARRAY JOIN` и `GLOBAL IN subquery` с псевдонимом. Используйте псевдоним подзапроса для внешнего имени таблицы, если оно указано. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Иван](https://github.com/abyss7)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-3} + +- Чинить [хлопанье](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) тест `00715_fetch_merged_or_mutated_part_zookeeper` переписывая его в оболочку скриптов, потому что он должен ждать, пока мутации применятся. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Александр казаков](https://github.com/Akazz)) +- Исправлены системные и MemSan сбой в функции `groupUniqArray` с аргументом массива emtpy. Это было вызвано размещением пустых `PaddedPODArray` в хэш-таблицу нулевая ячейка, потому что конструктор для нулевого значения ячейки не вызывался. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Амос Птица](https://github.com/amosbird)) + +### ClickHouse релиз 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} + +#### Новая функция {#new-feature-4} + +- `WITH FILL` модификатор для `ORDER BY`. (продолжение работы [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Антон Попов](https://github.com/CurtizJ)) +- `WITH TIES` модификатор для `LIMIT`. (продолжение работы [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Антон Попов](https://github.com/CurtizJ)) +- Разобрать некотируемых `NULL` буквальное значение NULL (если настройки `format_csv_unquoted_null_literal_as_null=1`). Инициализируйте нулевые поля значениями по умолчанию, если тип данных этого поля не является нулевым (если задано значение `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([тавплубикс](https://github.com/tavplubix)) +- Поддержка подстановочных знаков в путях табличных функций `file` и `hdfs`. Если путь содержит подстановочные знаки, то таблица будет доступна только для чтения. Пример использования: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` и `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Новый `system.metric_log` таблица, в которой хранятся значения `system.events` и `system.metrics` с заданным интервалом времени. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Никита Михайлов](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Разрешить запись текстовых журналов ClickHouse в `system.text_log` стол. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Никита Михайлов](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Показывать частные символы в трассировках стека (это делается с помощью синтаксического анализа таблиц символов файлов ELF). Добавлена информация о файле и номере строки в трассировках стека, если присутствует отладочная информация. Ускоренный поиск имени символа с индексацией символов, присутствующих в программе. Добавлены новые функции SQL для самоанализа: `demangle` и `addressToLine`. Переименованная функция `symbolizeAddress` к `addressToSymbol` для последовательности. Функция `addressToSymbol` вернет искалеченное имя по соображениям производительности и вам придется подать заявку `demangle`. Добавлена настройка `allow_introspection_functions` который по умолчанию отключен. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Табличная функция `values` (имя не чувствительно к регистру). Это позволяет читать из `VALUES` список предложенных в [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Пример: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) +- Добавлена возможность изменять настройки хранения. Синтаксис: `ALTER TABLE
    MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([алесапин](https://github.com/alesapin)) +- Опора для снятия отсоединенных деталей. Синтаксис: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([тавплубикс](https://github.com/tavplubix)) +- Ограничения таблицы. Позволяет добавить ограничение к определению таблицы,которое будет проверяться при вставке. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Глеб Новиков](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Суппорт для каскадных материализованных представлений. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Амос Птица](https://github.com/amosbird)) +- Включите профилировщик запросов по умолчанию, чтобы один раз в секунду выполнять выборку каждого потока выполнения запроса. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Входной формат `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([аконяев90](https://github.com/akonyaev90)) +- Добавлены две новые функции: `sigmoid` и `tanh` (которые полезны для приложений машинного обучения). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Функция `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` чтобы проверить, находится ли данный токен в стоге сена. Токен-это подстрока максимальной длины между двумя не буквенно-цифровыми символами ASCII (или границами стога сена). Токен должен быть постоянной строкой. Поддерживается специализацией индекса tokenbf\_v1. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Василий Немков](https://github.com/Enmk)) +- Новая функция `neighbor(value, offset[, default_value])`. Позволяет достичь значения prev / next внутри столбца в блоке данных. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Алекс Краш](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Алексей Миловидов](https://github.com/alexey-milovidov) +- Создал функцию `currentUser()`, возвращая логин авторизованного пользователя. Добавлен псевдоним `user()` для совместимости с MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Алекс Краш](https://github.com/alex-krash)) +- Новые агрегатные функции `quantilesExactInclusive` и `quantilesExactExclusive` которые были предложены в [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) +- Функция `bitmapRange(bitmap, range_begin, range_end)` который возвращает новый набор с заданным диапазоном (не включает в себя `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Ю](https://github.com/yuzhichang)) +- Функция `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` что создает массив прецизионных длинных строк геохаш-боксов, покрывающих заданную площадь. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Василий Немков](https://github.com/Enmk)) +- Реализуйте поддержку запроса INSERT с помощью `Kafka` таблицы. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Иван](https://github.com/abyss7)) +- Добавлена поддержка для `_partition` и `_timestamp` виртуальные колонки для движка Кафки. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Иван](https://github.com/abyss7)) +- Возможность удаления конфиденциальных данных из `query_log`, журналы серверов, список процессов с правилами на основе регулярных выражений. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([Филимонов](https://github.com/filimonov)) + +#### Экспериментальная возможность {#experimental-feature-2} + +- Формат входных и выходных данных `Template`. Он позволяет указать строку пользовательского формата для ввода и вывода. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([тавплубикс](https://github.com/tavplubix)) +- Реализация проекта `LIVE VIEW` таблицы, которые были первоначально предложены в [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), подготовленные в [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), а затем обновляется в [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). Видеть [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) для детального описания. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([взаказников](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Николай Кочетов](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([взаказников](https://github.com/vzakaznikov)) Заметить что `LIVE VIEW` функция может быть удалена в следующих версиях. + +#### Исправление ошибок {#bug-fix-8} + +- Этот релиз также содержит все исправления ошибок от 19.13 и 19.11. +- Исправьте ошибку сегментации, когда в таблице есть индексы пропуска и происходит вертикальное слияние. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([алесапин](https://github.com/alesapin)) +- Исправьте ТТЛ для каждого столбца с нетривиальными значениями по умолчанию для столбцов. Ранее в случае принудительного слияния TTL с `OPTIMIZE ... FINAL` запрос, истекшие значения были заменены типом defaults вместо заданных пользователем значений столбца defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена проблема дублирования сообщений Кафки при обычном перезапуске сервера. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Иван](https://github.com/abyss7)) +- Исправлена бесконечная петля при чтении сообщений Кафки. Не приостанавливайте/возобновляйте потребительскую подписку вообще - в противном случае она может быть приостановлена на неопределенный срок в некоторых сценариях. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Иван](https://github.com/abyss7)) +- Чинить `Key expression contains comparison between inconvertible types` исключение в `bitmapContains` функция. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) +- Исправлена обработка выхода онлайн / оффлайн с поддержкой `optimize_skip_unused_shards` и пропал ключ от осколков. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Убраны лишние подробный вход в интерфейс для MySQL [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Возвращает возможность разбора логических настроек из ‘true’ и ‘false’ в конфигурационном файле. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([алесапин](https://github.com/alesapin)) +- Исправить сбой в работе `quantile` и `median` функции `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлен возможный неполный результат возвращаемый компанией `SELECT` запрос с помощью `WHERE` условие о первичном ключе содержало преобразование в тип Float. Это было вызвано неправильной проверкой монотонности в `toFloat` функция. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Проверять `max_expanded_ast_elements` установка для мутаций. Ясные мутации после `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправьте результаты соединения для ключевых столбцов при использовании с `join_use_nulls`. Прикрепите значения null вместо столбцов по умолчанию. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка пропуска индексов с вертикальным слиянием и alter. Исправить для `Bad size of marks file` исключение. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([алесапин](https://github.com/alesapin)) +- Исправлена редкая ошибка в `ALTER MODIFY COLUMN` и вертикальное слияние, когда одна из Объединенных/измененных частей пуста (0 строк) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка в преобразовании `LowCardinality` напечатать `AggregateFunctionFactory`. Это исправление [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправить неправильное поведение и возможные вылеты в `topK` и `topKWeighted` агрегированные функции. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлен небезопасный код вокруг `getIdentifier` функция. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка в протоколе MySQL wire (используется при подключении к ClickHouse form MySQL client). Вызвано переполнением буфера кучи в `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Юрий Баранов](https://github.com/yurriy)) +- Исправлена утечка памяти внутри `bitmapSubsetInRange` функция. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Ю](https://github.com/yuzhichang)) +- Исправлена редкая ошибка, когда мутация выполнялась после изменения детализации. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([алесапин](https://github.com/alesapin)) +- Разрешить сообщение protobuf со всеми полями по умолчанию. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Виталий Баранов](https://github.com/vitlibar)) +- Устраните ошибку с помощью `nullIf` функция, когда мы посылаем `NULL` аргумент по второму аргументу. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Гийом Тассери](https://github.com/YiuRULE)) +- Исправлена редкая ошибка с неправильным выделением/освобождением памяти в сложных ключевых словарях кэша со строковыми полями, что приводит к бесконечному потреблению памяти (похоже на утечку памяти). Ошибка воспроизводится, когда размер строки был равен степени два, начиная с восьми (8, 16, 32 и т. д.). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([алесапин](https://github.com/alesapin)) +- Исправлено кодирование горилл на небольших последовательностях, которое вызывало исключение `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Василий Немков](https://github.com/Enmk)) +- Разрешить использовать не обнуляемые типы В соединениях с `join_use_nulls` включен. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Артем Зуйков](https://github.com/4ertus2)) +- Отключать `Poco::AbstractConfiguration` подстановки в запрос `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте тупиковых ситуаций в `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- С помощью `arrayReduce` для постоянных аргументов может привести к обработка выхода онлайн / оффлайн. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте несогласованные детали, которые могут появиться, если реплика была восстановлена после этого `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) +- Исправлено зависание `JSONExtractRaw` функция. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка с неправильной сериализацией индексов пропуска и агрегацией с адаптивной детализацией. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([алесапин](https://github.com/alesapin)) +- Чинить `WITH ROLLUP` и `WITH CUBE` модификаторы `GROUP BY` с двухуровневой агрегацией. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена ошибка с написанием вторичных индексных меток с адаптивной детализацией. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([алесапин](https://github.com/alesapin)) +- Исправьте порядок инициализации при запуске сервера. С `StorageMergeTree::background_task_handle` инициализируется в `startup()` то `MergeTreeBlockOutputStream::write()` возможно, вы попытаетесь использовать его перед инициализацией. Просто проверьте, инициализирован ли он. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Иван](https://github.com/abyss7)) +- Очистка буфера данных от предыдущей операции чтения, которая была завершена с ошибкой. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Николай](https://github.com/bopohaa)) +- Исправлена ошибка с включением адаптивной детализации при создании новой реплики для реплицированной таблицы \* MergeTree. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([алесапин](https://github.com/alesapin)) +- Исправлена возможная ошибка при запуске сервера в случае возникновения исключения `libunwind` во время исключения при доступе к неинициализированному `ThreadStatus` структура. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Никита Михайлов](https://github.com/nikitamikhaylov)) +- Исправить сбой в работе `yandexConsistentHash` функция. Найдено с помощью теста fuzz. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена возможность зависания запросов, когда сервер перегружен и глобальный пул потоков становится почти полным. Это имеет более высокие шансы произойти в кластерах с большим количеством сегментов (сотни), поскольку распределенные запросы выделяют поток для каждого соединения с каждым сегментом. Например, эта проблема может возникнуть, если кластер из 330 сегментов обрабатывает 30 параллельных распределенных запросов. Эта проблема затрагивает все версии, начиная с версии 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированная логика работы `arrayEnumerateUniqRanked` функция. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка segfault при декодировании таблицы символов. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Амос Птица](https://github.com/amosbird)) +- Исправлено неуместное исключение в приведении `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Удалено дополнительное цитирование описания в `system.settings` стол. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте возможных тупиков в работе `TRUNCATE` из реплицированной таблицы. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте чтение в порядке сортировки ключа. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Антон Попов](https://github.com/CurtizJ)) +- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) +- Исправить ошибку, открытую [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (начиная с 19.4.0). Воспроизводится в запросах к распределенным таблицам через таблицы MergeTree, когда мы не запрашиваем никаких столбцов (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([алесапин](https://github.com/alesapin)) +- Исправлено переполнение при целочисленном делении знакового типа на беззнаковый. Поведение было точно таким же, как в языке C или C++ (целочисленные правила продвижения), что может быть удивительно. Обратите внимание, что переполнение все еще возможно при делении большого числа со знаком на большое число без знака или наоборот (но этот случай менее обычен). Эта проблема существовала во всех версиях сервера. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Ограничьте максимальное время сна для дросселирования, когда `max_execution_speed` или `max_execution_speed_bytes` заданный. Исправлены ложные ошибки, такие как `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлены проблемы, связанные с использованием `MATERIALIZED` столбцы и псевдонимы в `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Амос Птица](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Чинить `FormatFactory` поведение для входных потоков, которые не реализованы в качестве процессора. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена опечатка. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Алексей Рындин](https://github.com/alexryndin)) +- Опечатка в сообщении об ошибке (is - \> are). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Денис Журавлев](https://github.com/den-crane)) +- Исправлена ошибка при разборе списка столбцов из строки, Если тип содержал запятую (эта проблема была актуальна для `File`, `URL`, `HDFS` хранения) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) + +#### Исправление безопасности {#security-fix} + +- Этот релиз также содержит все исправления безопасности ошибок от 19.13 и 19.11. +- Исправлена возможность сфабрикованного запроса вызвать сбой сервера из-за переполнения стека в синтаксическом анализаторе SQL. Исправлена возможность переполнения стека в таблицах слияния и распределения, материализованных представлениях и условиях безопасности на уровне строк, включающих подзапросы. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшение {#improvement-3} + +- Правильная реализация троичной логики для `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Александр казаков](https://github.com/Akazz)) +- Теперь значения и строки с истекшим сроком действия TTL будут удалены после этого `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` запрос. Добавленные запросы `SYSTEM STOP/START TTL MERGES` чтобы запретить / разрешить назначать слияния с TTL и фильтровать просроченные значения во всех слияниях. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Антон Попов](https://github.com/CurtizJ)) +- Возможность изменить расположение файла истории ClickHouse для использования клиентом `CLICKHOUSE_HISTORY_FILE` ОКР. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([Филимонов](https://github.com/filimonov)) +- Удалять `dry_run` флаг от `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Поддержка `ASOF JOIN` с `ON` раздел. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Артем Зуйков](https://github.com/4ertus2)) +- Улучшенная поддержка индексов пропуска для мутаций и репликации. Поддержка `MATERIALIZE/CLEAR INDEX ... IN PARTITION` запрос. `UPDATE x = x` пересчитывает все индексы, использующие столбец `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Никита Васильев](https://github.com/nikvas0)) +- Разрешить `ATTACH` живые представления (например, при запуске сервера) независимо от того, чтобы `allow_experimental_live_view` установка. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Для трассировок стека, собранных профилировщиком запросов, не включайте кадры стека, созданные самим профилировщиком запросов. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Теперь функции таблицы `values`, `file`, `url`, `hdfs` есть поддержка столбцов псевдонимов. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Создайте исключение, если `config.d` файл не имеет соответствующего корневого элемента в качестве файла конфигурации. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) +- Распечатайте дополнительную информацию в сообщении об исключении для `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([тавплубикс](https://github.com/tavplubix)) +- При определении осколков а `Distributed` таблица, которая будет покрыта запросом на чтение (для `optimize_skip_unused_shards` = 1) ClickHouse теперь проверяет условия от обоих `prewhere` и `where` предложения оператора select. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Александр казаков](https://github.com/Akazz)) +- Включенный `SIMDJSON` для машин без AVX2, но с набором инструкций SSE 4.2 и PCLMUL. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- ClickHouse может работать на файловых системах без `O_DIRECT` поддержка (например, ZFS и BtrFS) без дополнительной настройки. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Поддержка push down предиката для окончательного подзапроса. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Лучше `JOIN ON` извлечение ключей [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Артем Зуйков](https://github.com/4ertus2)) +- Обновление `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Оптимизируйте выбор самого маленького столбца для `SELECT count()` запрос. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Амос Птица](https://github.com/amosbird)) +- Добавлен `strict` параметр в `windowFunnel()`. Когда `strict` устанавливается, то `windowFunnel()` применяет условия только для уникальных значений. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([ачимбаб](https://github.com/achimbab)) +- Более безопасный интерфейс `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([авасилиев](https://github.com/avasiliev)) +- Параметры размер строки при выполнении с помощью `--help` опция теперь соответствует размеру терминала. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) +- Отключать «read in order» оптимизация для агрегации без ключей. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Антон Попов](https://github.com/CurtizJ)) +- Код состояния HTTP для `INCORRECT_DATA` и `TYPE_MISMATCH` коды ошибок были изменены по умолчанию `500 Internal Server Error` к `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Александр Родин](https://github.com/a-rodin)) +- Переместить объект соединения из `ExpressionAction` в `AnalyzedJoin`. `ExpressionAnalyzer` и `ExpressionAction` не знаю о чем `Join` больше никаких занятий. Его логика скрыта за `AnalyzedJoin` iface защитный. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена возможная взаимоблокировка распределенных запросов, когда один из сегментов является localhost, но запрос отправляется через сетевое соединение. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Изменена семантика нескольких таблиц `RENAME` чтобы избежать возможных тупиков. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Переписан сервер совместимости MySQL, чтобы предотвратить загрузку полной полезной нагрузки пакета в память. Снижение потребления памяти для каждого соединения примерно до `2 * DBMS_DEFAULT_BUFFER_SIZE` (буферы чтения/записи). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Юрий Баранов](https://github.com/yurriy)) +- Переместите логику интерпретации псевдонимов AST из синтаксического анализатора, который не должен ничего знать о семантике запросов. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Артем Зуйков](https://github.com/4ertus2)) +- Чуть более безопасный разбор данных `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `clickhouse-copier`: Разрешить использование `where_condition` из конфигурации с `partition_key` псевдоним в запросе для проверки существования раздела (ранее он использовался только при чтении запросов данных). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) +- Добавлен необязательный аргумент сообщения в поле `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Вдимир](https://github.com/Vdimir)) +- Исключение сервера, полученное при отправке данных вставки, теперь обрабатывается и в клиенте. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) +- Добавлена метрика `DistributedFilesToInsert` это показывает общее количество файлов в файловой системе, выбранных для отправки на удаленные серверы распределенными таблицами. Это число суммируется по всем осколкам. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Переместите большинство соединений подготовьте логику из `ExpressionAction/ExpressionAnalyzer` к `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправить Цан [предупреждение](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Василий Немков](https://github.com/Enmk)) +- Улучшенные информационные сообщения об отсутствии возможностей Linux. Протоколирование фатальных ошибок с помощью «fatal» уровень, который будет легче найти в `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Когда включить сброс временных данных на диск, чтобы ограничить использование памяти во время `GROUP BY`, `ORDER BY`, он не проверял свободное место на диске. Исправление добавить новую настройку `min_free_disk_space`, когда свободное место на диске будет меньше порогового значения, запрос остановится и бросит `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Вэйцин Сюй](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Удален рекурсивной записи по теме. Это не имеет смысла, потому что потоки используются повторно между запросами. `SELECT` запрос может получить Блокировку в одном потоке, удерживать блокировку в другом потоке и выходить из первого потока. В то же время, первый поток может быть повторно использован `DROP` запрос. Это приведет к ложным результатам «Attempt to acquire exclusive lock recursively» сообщения. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Расщеплять `ExpressionAnalyzer.appendJoin()`. Подготовьте место в `ExpressionAnalyzer` для `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавлен `mysql_native_password` плагин аутентификации для сервера совместимости MySQL. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Юрий Баранов](https://github.com/yurriy)) +- Меньшее количество `clock_gettime` вызовы; исправлена совместимость ABI между debug/release in `Allocator` (незначительный вопрос). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Подвиньте `collectUsedColumns` от `ExpressionAnalyzer` к `SyntaxAnalyzer`. `SyntaxAnalyzer` делает `required_source_columns` теперь он сам по себе. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавить настройку `joined_subquery_requires_alias` чтобы требовать псевдонимы для подселектов и табличных функций в `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Артем Зуйков](https://github.com/4ertus2)) +- Извлекать `GetAggregatesVisitor` класс от `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Артем Зуйков](https://github.com/4ertus2)) +- `system.query_log`: изменение типа данных `type` столбец `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Никита Михайлов](https://github.com/nikitamikhaylov)) +- Статическое соединение `sha256_password` плагин аутентификации. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Юрий Баранов](https://github.com/yurriy)) +- Избегайте дополнительной зависимости для настройки `compile` работать. В предыдущих версиях пользователь может получить ошибку типа `cannot open crti.o`, `unable to find library -lc` и т.д. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Дополнительная проверка входных данных, которые могут быть получены от вредоносной реплики. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Сейчас `clickhouse-obfuscator` файл доступен в формате `clickhouse-client` пакет. В предыдущих версиях он был доступен как `clickhouse obfuscator` (с пробелами). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) +- Исправлена взаимоблокировка, когда у нас есть по крайней мере два запроса, которые читают по крайней мере две таблицы в разном порядке, и еще один запрос, который выполняет операцию DDL на одной из таблиц. Исправлена еще одна очень редкая тупиковая ситуация. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлен `os_thread_ids` столбец `system.processes` и `system.query_log` для улучшения возможностей отладки. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Обходной путь для ошибок расширения PHP mysqlnd, которые возникают, когда `sha256_password` используется в качестве плагина аутентификации по умолчанию (описано в разделе [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Юрий Баранов](https://github.com/yurriy)) +- Удалите ненужное место с измененными столбцами nullability. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Артем Зуйков](https://github.com/4ertus2)) +- Установите значение по умолчанию: `queue_max_wait_ms` до нуля, потому что текущее значение (пять секунд) не имеет никакого смысла. Есть редкие обстоятельства, когда эта настройка имеет какое-либо применение. Добавлены настройки `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` и `connection_pool_max_wait_ms` для устранения двусмысленности. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Извлекать `SelectQueryExpressionAnalyzer` от `ExpressionAnalyzer`. Оставьте последний вариант для запросов, не связанных с выбором. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Артем Зуйков](https://github.com/4ertus2)) +- Удалено дублирование входных и выходных форматов. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Позволяет пользователю переопределить `poll_interval` и `idle_connection_timeout` настройки при подключении. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `MergeTree` теперь есть дополнительная опция `ttl_only_drop_parts` (отключено по умолчанию), чтобы избежать частичной обрезки деталей, чтобы они полностью выпадали, когда все строки в детали истекли. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Сергей Владыкин](https://github.com/svladykin)) +- Тип проверяет наличие заданных индексных функций. Бросьте исключение, если функция получила неправильный тип. Это устраняет тестирования с помощью утилиты. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Никита Васильев](https://github.com/nikvas0)) + +#### Улучшение производительности {#performance-improvement-2} + +- Оптимизируйте запросы с помощью `ORDER BY expressions` пункт, где `expressions` есть совпадающий префикс с ключом сортировки в `MergeTree` таблицы. Эта оптимизация управляется с помощью `optimize_read_in_order` установка. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Антон Попов](https://github.com/CurtizJ)) +- Позволяет использовать несколько резьб при загрузке и демонтаже деталей. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Реализован пакетный вариант обновления состояний агрегатной функции. Это может привести к повышению производительности. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- С помощью `FastOps` библиотека для функций `exp`, `log`, `sigmoid`, `tanh`. FastOps-это быстрая векторная математическая библиотека от Михаила Парахина (технический директор Яндекса). Улучшенная производительность `exp` и `log` функции более чем в 6 раз. Функция `exp` и `log` от `Float32` аргумент вернется `Float32` (в предыдущих версиях они всегда возвращаются `Float64`). Сейчас `exp(nan)` может вернуться `inf`. Результат работы `exp` и `log` функции могут быть не самым близким машинным представимым числом к истинному ответу. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) Используя вариант Данилы Кутенина, чтобы сделать fastops работающими [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Отключить последовательную оптимизацию ключа для `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([акузм](https://github.com/akuzm)) +- Улучшенная производительность `simdjson` библиотека, избавившись от динамического распределения в `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Виталий Баранов](https://github.com/vitlibar)) +- Предаварийные страницы при выделении памяти с помощью `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([акузм](https://github.com/akuzm)) +- Исправлена ошибка производительности в `Decimal` сравнение. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-4} + +- Удалите компилятор (экземпляр шаблона времени выполнения), потому что мы выиграли его производительность. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлен тест производительности, чтобы показать ухудшение производительности в gcc-9 более изолированным способом. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена функция таблицы `numbers_mt`, который является многопоточным вариантом `numbers`. Обновленные тесты производительности с хэш-функциями. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Режим сравнения в `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) +- Самое лучшее усилие для печати следов стека. Также добавить `SIGPROF` в качестве отладочного сигнала для печати трассировки стека запущенного потока. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Каждая функция в своем собственном файле, часть 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Удалить два раза константный `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([Филимонов](https://github.com/filimonov)) +- Изменения форматирования для `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([акузм](https://github.com/akuzm)) +- Лучший подзапрос для создания соединения в `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Артем Зуйков](https://github.com/4ertus2)) +- Удалить ненужное состояние (найденных с помощью PVS-студия). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([акузм](https://github.com/akuzm)) +- Разделите интерфейс хэш-таблицы для `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([акузм](https://github.com/akuzm)) +- Рефакторинг настроек. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([алесапин](https://github.com/alesapin)) +- Добавить комментарии для `set` индексные функции. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Никита Васильев](https://github.com/nikvas0)) +- Увеличьте оценку OOM в отладочной версии на Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([акузм](https://github.com/akuzm)) +- HDFS HA теперь работает в debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Вэйцин Сюй](https://github.com/weiqxu)) +- Добавлен тест на `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте тест для нескольких материализованных представлений для таблицы Кафки. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Иван](https://github.com/abyss7)) +- Сделайте лучшую схему сборки. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Иван](https://github.com/abyss7)) +- Исправлено `test_external_dictionaries` интеграция в случае, если она была выполнена под некорневым пользователем. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Ошибка воспроизводится, когда общий размер записанных пакетов превышает `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Юрий Баранов](https://github.com/yurriy)) +- Добавлен тест для `RENAME` состояние гонки таблицы [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте гонки данных по настройкам в `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте интеграционный тест для обработки ошибок с помощью словаря кэша. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Виталий Баранов](https://github.com/vitlibar)) +- Отключите синтаксический анализ объектных файлов ELF на Mac OS, потому что это не имеет никакого смысла. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Попытайтесь сделать генератор списка изменений лучше. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавление `-Wshadow` перейти к ССЗ. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +- Удален устаревший код для `mimalloc` поддержка. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `zlib-ng` определяет возможности x86 и сохраняет эту информацию в глобальных переменных. Это делается в вызове defalteInit, который может быть выполнен разными потоками одновременно. Чтобы избежать многопоточной записи, сделайте это при запуске библиотеки. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([акузм](https://github.com/akuzm)) +- Регрессионный тест на ошибку, которая в соединении была исправлена в [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Бахтиер Рузиев](https://github.com/theruziev)) +- Исправлен отчет MSan. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте хлопающий тест TTL. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена гонка ложных данных в `MergeTreeDataPart::is_frozen` поле. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлены тайм-ауты в тесте fuzz. В предыдущей версии ему удалось найти ложное зависание в запросе `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлены отладочные проверки для `static_cast` из колонн. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Поддержка Oracle Linux в официальных пакетах RPM. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Изменен JSON perftests с `once` к `loop` тип. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- `odbc-bridge.cpp` определяет `main()` поэтому он не должен быть включен в состав `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Оривей Деш](https://github.com/orivej)) +- Тест на аварийное включение `FULL|RIGHT JOIN` с нулями в ключах правого стола. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Артем Зуйков](https://github.com/4ertus2)) +- На всякий случай добавлен тест на ограничение по расширению псевдонимов. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Переключился с `boost::filesystem` к `std::filesystem` при необходимости. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлены пакеты RPM на веб-сайт. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте тест для фиксированного `Unknown identifier` исключение в `IN` раздел. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Артем Зуйков](https://github.com/4ertus2)) +- Упрощать `shared_ptr_helper` потому что люди сталкиваются с трудностями понимания этого. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлены тесты производительности для исправленных кодеков Gorilla и DoubleDelta. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Василий Немков](https://github.com/Enmk)) +- Разделите интеграционный тест `test_dictionaries` в 4 отдельных теста. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправить предупреждение PVS-Studio в `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Разрешить использовать `library` источник словаря с ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена возможность генерировать список изменений из списка PR. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Заприте дверь `TinyLog` хранение при чтении. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([акузм](https://github.com/akuzm)) +- Проверить неработающие ссылки в ИЦ. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Увеличьте тайм-аут для «stack overflow» тест, потому что это может занять много времени в отладочной сборке. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена проверка на наличие двойных пробелов. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Чинить `new/delete` отслеживание памяти при сборке с помощью дезинфицирующих средств. Слежка не совсем ясна. Это только предотвращает исключения ограничения памяти в тестах. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Артем Зуйков](https://github.com/4ertus2)) +- Включите обратную проверку неопределенных символов при связывании. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Иван](https://github.com/abyss7)) +- Избежать восстановления `hyperscan` каждый день. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен отчет утилиты в `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Не разрешайте использовать query profiler с дезинфицирующими средствами, потому что он не совместим. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте тест для перезагрузки словаря после сбоя по таймеру. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправить несоответствие `PipelineExecutor::prepareProcessor` тип аргумента. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Добавлен тест на плохие URI. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлены дополнительные проверки для `CAST` функция. Это должно получить больше информации о неисправности сегментации в нечетком тесте. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Добавлен `gcc-9` поддержка для `docker/builder` контейнер, который создает образ локально. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Глеб Новиков](https://github.com/NanoBjorn)) +- Тест для первичного ключа с помощью `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) +- Исправлены тесты, связанные с медленной печатью трассировок стека. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте тестовый случай для аварийного входа `groupUniqArray` фиксированной в [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([акузм](https://github.com/akuzm)) +- Фиксированные индексы мутаций тестов. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Никита Васильев](https://github.com/nikvas0)) +- В тесте производительности не считывайте журнал запросов для запросов, которые мы не выполняли. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([акузм](https://github.com/akuzm)) +- Материализованное представление теперь может быть создано с любыми типами низкой мощности независимо от настройки о подозрительных типах низкой мощности. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Обновленные тесты для `send_logs_level` установка. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена сборка под gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Макс Ахмедов](https://github.com/zlobober)) +- Исправлена сборка с помощью внутреннего libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Иван](https://github.com/abyss7)) +- Исправлена общая сборка с помощью `rdkafka` библиотека [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Иван](https://github.com/abyss7)) +- Исправления для сборки Mac OS (неполные). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([Алексей-Зайцев](https://github.com/alex-zaitsev)) +- Чинить «splitted» строить. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Другие исправления сборки: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Амос Птица](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Иван](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-3} + +- Удалены редко используемые функции таблица `catBoostPool` и хранение `CatBoostPool`. Если вы использовали эту функцию таблицы, пожалуйста, напишите письмо по адресу `clickhouse-feedback@yandex-team.com`. Обратите внимание, что интеграция CatBoost остается и будет поддерживаться. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Отключать `ANY RIGHT JOIN` и `ANY FULL JOIN` по умолчанию. Набор `any_join_distinct_right_table_keys` настройка для их включения. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Артем Зуйков](https://github.com/4ertus2)) + +## ClickHouse релиз 19.13 {#clickhouse-release-19-13} + +### ClickHouse релиз 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} + +#### Исправление ошибок {#bug-fix-9} + +- Этот релиз также содержит все исправления ошибок от 19.11.12.69. + +### ClickHouse релиз 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} + +#### Исправление ошибок {#bug-fix-10} + +- Этот релиз также содержит все исправления ошибок от 19.14.6.12. +- Исправлено возможное несогласованное состояние таблицы при выполнении `DROP` запрос для реплицированной таблицы в то время как zookeeper недоступен. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Никита Михайлов](https://github.com/nikitamikhaylov)) +- Исправление для гонки данных в StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, появившаяся в профайлером запрос, который приводит к бесконечному приему от гнезда. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([алесапин](https://github.com/alesapin)) +- Исправлена чрезмерная загрузка процессора во время выполнения `JSONExtractRaw` функция над логическим значением. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправляет регрессию при нажатии на материализованный вид. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Иван](https://github.com/abyss7)) +- Табличная функция `url` если бы эта уязвимость позволяла злоумышленнику вводить произвольные HTTP-заголовки в запрос. Эта проблема была обнаружена [Никита Тихомиров](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлять бесполезно `AST` проверьте установленный индекс. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Никита Васильев](https://github.com/nikvas0)) +- Исправлен парсинг данных `AggregateFunction` значения, встроенные в запрос. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Ю](https://github.com/yuzhichang)) +- Исправлено неправильное поведение `trim` функции семьи. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} + +#### Исправление ошибок {#bug-fix-11} + +- Этот релиз также содержит все исправления безопасности ошибок от 19.11.9.52 и 19.11.10.54. +- Фиксированная гонка данных в `system.parts` стол и `ALTER` запрос. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено несовпадение заголовков в потоках, произошедшее при чтении из пустой распределенной таблицы с sample и prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Лисян Цянь](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена ошибка при использовании `IN` предложение с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) +- Исправьте случай с одинаковыми именами столбцов в `GLOBAL JOIN ON` раздел. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка при приведении типов к `Decimal` это не поддерживает его. Вместо этого бросьте исключение. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлен сбой в работе `extractAll()` функция. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Артем Зуйков](https://github.com/4ertus2)) +- Преобразование запроса для `MySQL`, `ODBC`, `JDBC` функции таблицы теперь работают правильно для `SELECT WHERE` запросы с несколькими `AND` выражения. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) +- Добавлены предыдущие проверки деклараций для интеграции MySQL 8. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Рафаэль Давид Тиноко](https://github.com/rafaeldtinoco)) + +#### Исправление безопасности {#security-fix-1} + +- Исправлены две уязвимости в кодеках на этапе декомпрессии (злоумышленник может сфабриковать сжатые данные, что приведет к переполнению буфера при декомпрессии). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Артем Зуйков](https://github.com/4ertus2)) + +### ClickHouse релиз 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} + +#### Исправление ошибок {#bug-fix-12} + +- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) +- Исправьте NPE при использовании предложения IN с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена проблема, что если устаревшая реплика становится живой, она все еще может содержать части данных, которые были удалены разделом DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена проблема с синтаксическим анализом CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена гонка данных в системе.таблица деталей и запрос ALTER. Это исправление [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена возможная потеря данных после этого `ALTER DELETE` запрос на таблицу с пропущенным индексом. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Никита Васильев](https://github.com/nikvas0)) + +#### Исправление безопасности {#security-fix-2} + +- Если злоумышленник имеет доступ на запись в ZooKeeper и может запустить пользовательский сервер, доступный из сети, где работает ClickHouse, он может создать пользовательский вредоносный сервер, который будет действовать как реплика ClickHouse, и зарегистрировать его в ZooKeeper. Когда другая реплика будет извлекать часть данных из вредоносной реплики, она может заставить clickhouse-сервер выполнить запись в произвольный путь на файловой системе. Найдено Эльдаром Зайтовым, специалистом по информационной безопасности Яндекса. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} + +#### Новая функция {#new-feature-5} + +- Профилировщик выборки на уровне запроса. [Пример](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([лаплаб](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) +- Разрешить указывать список столбцов с помощью `COLUMNS('regexp')` выражение, которое работает как более сложный вариант `*` звездочка. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([мфриденталь](https://github.com/mfridental)), ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `CREATE TABLE AS table_function()` теперь возможный [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) +- Adam optimizer для стохастического градиентного спуска используется по умолчанию в `stochasticLinearRegression()` и `stochasticLogisticRegression()` агрегатные функции, потому что он показывает хорошее качество почти без какой-либо настройки. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) +- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Энди Янг](https://github.com/andyyzh)) +- `RENAME` запросы теперь работают со всеми хранилищами. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Иван](https://github.com/abyss7)) +- Теперь клиент получает журналы с сервера с любым желаемым уровнем настройки `send_logs_level` независимо от уровня журнала, указанного в настройках сервера. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Никита Михайлов](https://github.com/nikitamikhaylov)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-4} + +- Установка `input_format_defaults_for_omitted_fields` по умолчанию он включен. Вставки в распределенные таблицы требуют, чтобы этот параметр был одинаковым в кластере (его необходимо установить перед развертыванием обновления). Он позволяет вычислять сложные выражения по умолчанию для пропущенных полей в `JSONEachRow` и `CSV*` форматы. Это должно быть ожидаемое поведение, но может привести к незначительной разнице в производительности. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Артем Зуйков](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([акузм](https://github.com/akuzm)) + +#### Экспериментальная возможность {#experimental-features} + +- Новый конвейер обработки запросов. Воспользуйся `experimental_use_processors=1` возможность включить его. Используй для своих собственных проблем. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +#### Исправление ошибок {#bug-fix-13} + +- Интеграция Кафки была исправлена в этой версии. +- Исправлено `DoubleDelta` кодирование данных `Int64` для больших `DoubleDelta` значения, улучшенные `DoubleDelta` кодирование случайных данных для `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Василий Немков](https://github.com/Enmk)) +- Исправлена завышенная оценка стоимости `max_rows_to_read` если установка `merge_tree_uniform_read_distribution` имеет значение 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшение {#improvement-4} + +- Создает исключение, если `config.d` файл не имеет соответствующего корневого элемента в качестве файла конфигурации [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) + +#### Улучшение производительности {#performance-improvement-3} + +- Оптимизировать `count()`. Теперь он использует самый маленький столбец (если это возможно). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Амос Птица](https://github.com/amosbird)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-5} + +- Отчет об использовании памяти в тестах производительности. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([акузм](https://github.com/akuzm)) +- Исправление построения с внешним `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Иван](https://github.com/abyss7)) +- Исправить общую сборку с помощью `rdkafka` библиотека [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Иван](https://github.com/abyss7)) + +## ClickHouse релиз 19.11 {#clickhouse-release-19-11} + +### ClickHouse релиз 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} + +#### Исправление ошибок {#bug-fix-14} + +- Исправлена редкая авария в системе `ALTER MODIFY COLUMN` и вертикальное слияние, когда одна из Объединенных/измененных частей пуста (0 строк). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([алесапин](https://github.com/alesapin)) +- Ручное обновление данных `SIMDJSON`. Это устраняет возможные наводнения в stderr файлы с поддельными диагностическими сообщениями в формате JSON. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Александр казаков](https://github.com/Akazz)) +- Исправлена ошибка с `mrk` расширение файла для мутаций ([алесапин](https://github.com/alesapin)) + +### ClickHouse релиз 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} + +#### Исправление ошибок {#bug-fix-15} + +- Исправлено снижение производительности индексного анализа по сложным ключам на больших таблицах. Это исправление [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте редких SIGSEGV при отправке данных в таблицах с распределенным движком (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Азат Хужин](https://github.com/azat)) +- Чинить `Unknown identifier` с несколькими соединениями. Это исправление [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Артем Зуйков](https://github.com/4ertus2)) + +### ClickHouse релиз 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} + +- Исправлена логическая ошибка, вызывающая segfaults при выборе из Кафки пустой темы. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Иван](https://github.com/abyss7)) +- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) + +### ClickHouse релиз 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} + +#### Исправление ошибок {#bug-fix-16} + +- Сохраняйте смещения для сообщений Кафки вручную, чтобы иметь возможность фиксировать их все сразу для всех разделов. Исправляет потенциальное дублирование в «one consumer - many partitions» сценарий. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Иван](https://github.com/abyss7)) + +### ClickHouse релиз 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} + +- Улучшена обработка ошибок в словарях кэша. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлена ошибка в функции `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) +- Чинить `JSONExtract` функция при извлечении `Tuple` из JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлена возможная потеря данных после этого `ALTER DELETE` запрос на таблицу с пропущенным индексом. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Никита Васильев](https://github.com/nikvas0)) +- Исправлена проверка производительности. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Паркет: исправьте чтение логических столбцов. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено неправильное поведение `nullIf` функция для постоянных аргументов. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Гийом Тассери](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена проблема дублирования сообщений Кафки при обычном перезапуске сервера. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Иван](https://github.com/abyss7)) +- Исправлена ошибка, когда долго `ALTER UPDATE` или `ALTER DELETE` может помешать запуску регулярных слияний. Предотвратите выполнение мутаций, если нет достаточного количества свободных потоков. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена ошибка при обработке данных «timezone» в файле конфигурации сервера. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте тесты Кафки. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Иван](https://github.com/abyss7)) + +#### Исправление безопасности {#security-fix-3} + +- Если злоумышленник имеет доступ на запись в ZooKeeper и может запустить пользовательский сервер, доступный из сети, где работает ClickHouse, он может создать пользовательский вредоносный сервер, который будет действовать как реплика ClickHouse, и зарегистрировать его в ZooKeeper. Когда другая реплика будет извлекать часть данных из вредоносной реплики, она может заставить clickhouse-сервер выполнить запись в произвольный путь на файловой системе. Найдено Эльдаром Зайтовым, специалистом по информационной безопасности Яндекса. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} + +#### Исправление ошибок {#bug-fix-17} + +- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) +- Исправьте NPE при использовании предложения IN с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена проблема, что если устаревшая реплика становится живой, она все еще может содержать части данных, которые были удалены разделом DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена проблема с синтаксическим анализом CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([тавплубикс](https://github.com/tavplubix)) +- Исправлена гонка данных в системе.таблица деталей и запрос ALTER. Это исправление [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} + +#### Исправление ошибок {#bug-fix-18} + +- Интеграция Кафки была исправлена в этой версии. +- Исправлена обработка выхода онлайн / оффлайн при использовании `arrayReduce` для постоянных споров. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено `toFloat()` монотонность. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) +- Исправлена обработка выхода онлайн / оффлайн с поддержкой `optimize_skip_unused_shards` и пропал ключ от осколков. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) +- Фиксированная логика работы `arrayEnumerateUniqRanked` функция. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Удалено дополнительное подробное ведение журнала из обработчика MySQL. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить неправильное поведение и возможные вылеты в `topK` и `topKWeighted` агрегированные функции. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) +- Не выставляйте виртуальные столбцы в `system.columns` стол. Это необходимо для обратной совместимости. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка с выделением памяти для строковых полей в сложном словаре кэша ключей. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка с включением адаптивной детализации при создании новой реплики для `Replicated*MergeTree` стол. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([алесапин](https://github.com/alesapin)) +- Исправьте бесконечный цикл при чтении сообщений Кафки. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) +- Исправлена возможность готовых запрос, чтобы вызвать падение сервера из-за переполнения стека в парсер SQL и возможность переполнения стека в `Merge` и `Distributed` таблицы [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка кодирования горилл на небольших последовательностях. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) + +#### Улучшение {#improvement-5} + +- Позволяет пользователю переопределить `poll_interval` и `idle_connection_timeout` настройки при подключении. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} + +#### Исправление ошибок {#bug-fix-19} + +- Исправлена возможность зависания запросов при перегрузке сервера. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте FPE в функции yandexConsistentHash. Это исправление [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка в преобразовании `LowCardinality` напечатать `AggregateFunctionFactory`. Это исправление [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправьте синтаксический анализ `bool` настройки от `true` и `false` строки в файлах конфигурации. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([алесапин](https://github.com/alesapin)) +- Исправлена редкая ошибка с несовместимыми заголовками потока в запросах к `Distributed` стол `MergeTree` таблица, когда часть `WHERE` движется к `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([алесапин](https://github.com/alesapin)) +- Исправлено переполнение при целочисленном делении знакового типа на беззнаковый. Это исправление [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-5} + +- `Kafka` все еще сломанный. + +### ClickHouse релиз 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} + +#### Исправление ошибок {#bug-fix-20} + +- Исправлена ошибка с написанием вторичных индексных меток с адаптивной детализацией. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([алесапин](https://github.com/alesapin)) +- Чинить `WITH ROLLUP` и `WITH CUBE` модификаторы `GROUP BY` с двухуровневой агрегацией. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлено зависание `JSONExtractRaw` функция. Исправлено [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка segfault в ExternalLoader:: reloadOutdated (). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлен случай, когда сервер может закрыть прослушивающие сокеты, но не выключаться и продолжать обслуживать оставшиеся запросы. В конечном итоге вы можете получить два запущенных процесса clickhouse-server. Иногда сервер может выдать ошибку `bad_function_call` для остальных запросов. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено бесполезное и некорректное условие на поле update для начальной загрузки внешних словарей через ODBC, MySQL, ClickHouse и HTTP. Это исправление [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено неуместное исключение в приведении `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Зафиксировать недетерминированный результат «uniq» агрегатная функция в крайне редких случаях. Ошибка присутствовала во всех версиях ClickHouse. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Обработка выхода онлайн / оффлайн, когда мы немного завышены запись на функцию `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Гийом Тассери](https://github.com/YiuRULE)) +- Исправлена небольшая утечка памяти, когда сервер выбрасывает много исключений из многих различных контекстов. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте ситуацию, когда потребитель сделал паузу перед подпиской и не возобновил ее после этого. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Иван](https://github.com/abyss7)) Обратите внимание, что Кафка разбит в этой версии. +- Очистка буфера данных Кафки от предыдущей операции чтения, которая была завершена с ошибкой [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Николай](https://github.com/bopohaa)) Обратите внимание, что Кафка разбит в этой версии. +- С `StorageMergeTree::background_task_handle` инициализируется в `startup()` то `MergeTreeBlockOutputStream::write()` возможно, вы попытаетесь использовать его перед инициализацией. Просто проверьте, инициализирован ли он. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Иван](https://github.com/abyss7)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-6} + +- Добавлено официальное лицо `rpm` пакеты. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([алесапин](https://github.com/alesapin)) +- Добавьте возможность строить `.rpm` и `.tgz` пакеты с `packager` скрипт. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([алесапин](https://github.com/alesapin)) +- Исправления для «Arcadia» система сборки. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-6} + +- `Kafka` сломан в этой версии. + +### ClickHouse релиз 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} + +#### Новая функция {#new-feature-6} + +- Добавлена поддержка подготовленных заявлений. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Александр](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `DoubleDelta` и `Gorilla` кодеки столбцов [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Василий Немков](https://github.com/Enmk)) +- Добавлен `os_thread_priority` настройка, позволяющая контролировать «nice» значение потоков обработки запросов, используемых ОС для настройки приоритета динамического планирования. Для этого требуется `CAP_SYS_NICE` возможности для работы. Это орудия труда [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Осуществлять `_topic`, `_offset`, `_key` колонны для двигателя Кафки [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Иван](https://github.com/abyss7)) Обратите внимание, что Кафка разбит в этой версии. +- Добавить комбинатор агрегатных функций `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) +- Статистическая функция `groupArrayMovingSum(win_size)(x)` и `groupArrayMovingAvg(win_size)(x)`, которые вычисляют движущуюся сумму / среднее значение с ограничением размера окна или без него. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) +- Добавить синоним `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) +- Функция сделала из intergate Н3 `geoToH3` от Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Ремен Иван](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Исправление ошибок {#bug-fix-21} + +- Реализуйте кэш DNS с асинхронным обновлением. Отдельный поток разрешает все хосты и обновляет кэш DNS с периодом (настройка `dns_cache_update_period`). Это должно помочь, когда ip хостов часто меняется. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена обработка выхода онлайн / оффлайн в `Delta` кодек, который влияет на столбцы со значениями размером менее 32 бит. Ошибка привела к случайному повреждению памяти. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([алесапин](https://github.com/alesapin)) +- Исправлена обработка выхода онлайн / оффлайн в TTL слиться с не-физической столбцов в блоке. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена редкая ошибка при проверке деталей с помощью `LowCardinality` колонка. Ранее `checkDataPart` всегда терпит неудачу при расставании с `LowCardinality` колонка. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([алесапин](https://github.com/alesapin)) +- Избегайте зависания соединений, когда пул потоков сервера заполнен. Это важно для соединений от `remote` табличная функция или соединения с сегментом без реплик при длительном таймауте соединения. Это исправление [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Поддержка постоянных аргументов для того, чтобы `evalMLModel` функция. Это исправление [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, когда ClickHouse определяет часовой пояс по умолчанию как `UCT` вместо `UTC`. Это исправление [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированный нижний поток буфера в `visitParamExtractRaw`. Это исправление [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Сейчас распространены `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` запросы будут выполняться непосредственно на реплике лидера. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([алесапин](https://github.com/alesapin)) +- Чинить `coalesce` для `ColumnConst` с `ColumnNullable` + соответствующие изменения. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправьте это `ReadBufferFromKafkaConsumer` так что он продолжает читать новые сообщения после этого `commit()` даже если он был остановлен раньше [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Иван](https://github.com/abyss7)) +- Чинить `FULL` и `RIGHT` Результаты соединения при присоединении на `Nullable` ключи в правой таблице. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Артем Зуйков](https://github.com/4ertus2)) +- Возможно исправление бесконечного сна низкоприоритетных запросов. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено состояние гонки, которое приводит к тому, что некоторые запросы могут не отображаться в query\_log после `SYSTEM FLUSH LOGS` запрос. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлено `heap-use-after-free` Предупреждение ASan в ClusterCopier вызвано часами, которые пытаются использовать уже удаленный объект copier. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена ошибка `StringRef` указатель, возвращаемый некоторыми реализациями `IColumn::deserializeAndInsertFromArena`. Эта ошибка затронула только модульные тесты. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Не допускайте соединения исходного и промежуточного массива со столбцами маскировки одноименных столбцов. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка вставки и выбора запроса к движку MySQL с цитированием идентификатора стиля MySQL. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Зимний Чжан](https://github.com/zhang2014)) +- Сейчас `CHECK TABLE` запрос может работать с семейством движков MergeTree. Он возвращает статус проверки и сообщение, если таковые имеются для каждой детали (или файла в случае более простых движков). Кроме того, Исправлена ошибка в извлечении сломанной детали. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([алесапин](https://github.com/alesapin)) +- Исправлена среда выполнения SPLIT\_SHARED\_LIBRARIES [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Данила Кутенин](https://github.com/danlark1)) +- Инициализация фиксированного часового пояса когда `/etc/localtime` это относительная ссылка, как `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- clickhouse-копир: исправлена использования после освобождения при завершении работы [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) +- Обновленный `simdjson`. Исправлена проблема, из-за которой некоторые недопустимые JSONs с нулевыми байтами успешно разбирались. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено отключение системных журналов [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Антон Попов](https://github.com/CurtizJ)) +- Исправьте зависание, когда условие в invalidate\_query зависит от словаря. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Виталий Баранов](https://github.com/vitlibar)) + +#### Улучшение {#improvement-6} + +- Разрешить неразрешимые адреса в конфигурации кластера. Они будут считаться недоступными и пытаться разрешить их при каждой попытке подключения. Это особенно полезно для Kubernetes. Это исправление [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Закройте неработающие TCP-соединения (по умолчанию с таймаутом в один час). Это особенно важно для больших кластеров с несколькими распределенными таблицами на каждом сервере, поскольку каждый сервер может содержать пул соединений с каждым другим сервером, и после пикового параллелизма запросов соединения будут останавливаться. Это исправление [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Более лучшее качество `topK` функция. Изменено поведение набора SavingSpace для удаления последнего элемента, если новый элемент имеет больший вес. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Гийом Тассери](https://github.com/YiuRULE)) +- Функции URL для работы с доменами теперь могут работать для неполных url без схемы [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([алесапин](https://github.com/alesapin)) +- Контрольные суммы, добавленные к `system.parts_columns` стол. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Никита Михайлов](https://github.com/nikitamikhaylov)) +- Добавлен `Enum` тип данных как синоним для `Enum8` или `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) +- Полный вариант транспонирования битов для `T64` кодек. Может привести к лучшему сжатию с помощью `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Артем Зуйков](https://github.com/4ertus2)) +- Состояние на `startsWith` функция теперь может использовать первичный ключ. Это исправление [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) и [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) +- Разрешить использовать `clickhouse-copier` с перекрестной репликацией кластерной топологии, разрешив пустое имя базы данных. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) +- Воспользуйся `UTC` как часовой пояс по умолчанию в системе без `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` была напечатана, и сервер или клиент отказались запускаться. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Возвращенная назад поддержка аргумента с плавающей запятой в функции `quantileTiming` для обратной совместимости. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Показать, в какой таблице отсутствует столбец в сообщениях об ошибках. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Иван](https://github.com/abyss7)) +- Запретить выполнение запроса с одинаковым идентификатором query\_id разными пользователями [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) +- Более надежный код для отправки метрик в графит. Он будет работать даже во время длительного многократного использования `RENAME TABLE` операция. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Более информативные сообщения об ошибках будут отображаться, когда ThreadPool не может запланировать выполнение задачи. Это исправление [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Инвертирование ngramSearch, чтобы быть более интуитивным [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Данила Кутенин](https://github.com/danlark1)) +- Добавить пользователя parsing в HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([аконяев90](https://github.com/akonyaev90)) +- Обновить значение по умолчанию `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Артем Коновалов](https://github.com/izebit)) +- Добавлено понятие устаревших настроек. Устаревшая настройка `allow_experimental_low_cardinality_type` может использоваться без какого-либо эффекта. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Алексей Миловидов](https://github.com/alexey-milovidov) + +#### Улучшение производительности {#performance-improvement-4} + +- Увеличьте количество потоков для выбора из таблицы слияния для более равномерного распределения потоков. Добавлена настройка `max_streams_multiplier_for_merge_tables`. Это исправление [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-7} + +- Добавьте тест обратной совместимости для взаимодействия клиент-сервер с различными версиями clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([алесапин](https://github.com/alesapin)) +- Проверьте информацию о покрытии в каждом запросе фиксации и вытягивания. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([алесапин](https://github.com/alesapin)) +- Сотрудничайте с address sanitizer для поддержки наших пользовательских распределителей (`Arena` и `ArenaWithFreeLists`) для лучшей отладки «use-after-free» ошибки. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([акузм](https://github.com/akuzm)) +- Переключитесь на [Реализация LLVM libunwind](https://github.com/llvm-mirror/libunwind) для обработки исключений C++ и для печати трассировок стека [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Никита Лапков](https://github.com/laplab)) +- Добавьте еще два предупреждения от -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Разрешите построить ClickHouse с дезинфицирующим средством для памяти. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен отчет утилиты о `bitTest` функция в тест. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Настройки: добавлена возможность инициализации экземпляра ClickHouse, который требует проверки подлинности. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Корвяков Андрей Николаевич](https://github.com/shurshun)) +- Librdkafka обновление до версии 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Иван](https://github.com/abyss7)) +- Добавьте глобальный тайм-аут для интеграционных тестов и отключите некоторые из них в коде тестов. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([алесапин](https://github.com/alesapin)) +- Исправьте некоторые сбои ThreadSanitizer. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([акузм](https://github.com/akuzm)) +- То `--no-undefined` опция заставляет компоновщика проверять все внешние имена на наличие во время связывания. Очень полезно отслеживать реальные зависимости между библиотеками в режиме разделенной сборки. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Иван](https://github.com/abyss7)) +- Добавлен тест производительности для [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена совместимость с gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена поддержка gcc-9. Это исправление [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, когда libunwind может быть связан неправильно. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено несколько предупреждений, найденных PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена начальная поддержка для `clang-tidy` статический анализатор. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Преобразование макросов BSD / Linux endian( ‘be64toh’ и ‘htobe64’) к эквивалентам Mac OS X [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Фу Чен](https://github.com/fredchenbj)) +- Улучшенное руководство по интеграционным тестам. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Владимир Чеботарев](https://github.com/excitoon)) +- Исправление сборки на macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([Филимонов](https://github.com/filimonov)) +- Исправьте трудноуловимую опечатку: aggreAGte - \> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([акузм](https://github.com/akuzm)) +- Исправлена сборка freebsd [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) +- Добавить ссылку на экспериментальный канал YouTube на сайт [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Иван Блинков](https://github.com/blinkov)) +- CMake: добавить опцию для флагов покрытия: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) +- Исправьте начальный размер некоторых встроенных подарков. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([акузм](https://github.com/akuzm)) +- clickhouse-сервер.postinst: исправлено обнаружение ОС для centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) +- Добавлена генерация пакетов Arch linux. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Владимир Чеботарев](https://github.com/excitoon)) +- Разделите Common / config.ч по библиотекам (СУБД) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) +- Исправления для «Arcadia» построить платформу [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) +- Исправления для нетрадиционной сборки (gcc9, без подмодулей) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) +- Требуется явный тип в unalignedStore, потому что было доказано, что он подвержен ошибкам [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([акузм](https://github.com/akuzm)) +- Исправлена сборка MacOS [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([Филимонов](https://github.com/filimonov)) +- Тест производительности относительно новой функции JIT с большим набором данных, как это было запрошено здесь [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Гийом Тассери](https://github.com/YiuRULE)) +- Запуск статических тестов в стресс-тесте [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([алесапин](https://github.com/alesapin)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-7} + +- `Kafka` сломан в этой версии. +- Включить `adaptive_index_granularity` = 10 МБ по умолчанию для новых `MergeTree` таблицы. Если вы создали новые таблицы MergeTree на версии 19.11+, понижение рейтинга до версий до 19.6 будет невозможно. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([алесапин](https://github.com/alesapin)) +- Удалены устаревшие недокументированные встроенные словари, которые использовались Яндексом.Метрика. Функция `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` они больше не доступны. Если вы используете эти функции, напишите письмо по адресу clickhouse-feedback@yandex-team.com Примечание: в последний момент мы решили сохранить эти функции на некоторое время. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +## ClickHouse релиз 19.10 {#clickhouse-release-19-10} + +### ClickHouse релиз 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} + +#### Новая функция {#new-feature-7} + +- Добавить новый кодек столбца: `T64`. Сделано для(U)столбцов IntX/EnumX/Data (Time)/DecimalX. Это должно быть хорошо для столбцов с постоянными или малыми значениями диапазона. Сам кодек позволяет увеличить или уменьшить тип данных без повторного сжатия. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавление СУБД `MySQL` что позволяет просматривать все таблицы на удаленном сервере MySQL [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Зимний Чжан](https://github.com/zhang2014)) +- `bitmapContains` реализация. Это в 2 раза быстрее, чем `bitmapHasAny` если второе растровое изображение содержит один элемент. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Ю](https://github.com/yuzhichang)) +- Поддержка `crc32` функция (с поведением точно таким же, как в MySQL или PHP). Не используйте его, если вам нужна хэш-функция. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Ремен Иван](https://github.com/BHYCHIK)) +- Реализованный `SYSTEM START/STOP DISTRIBUTED SENDS` запросы для управления асинхронными вставками в `Distributed` таблицы. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Зимний Чжан](https://github.com/zhang2014)) + +#### Исправление ошибок {#bug-fix-22} + +- Игнорируйте ограничения на выполнение запросов и максимальный размер частей для ограничений слияния при выполнении мутаций. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Антон Попов](https://github.com/CurtizJ)) +- Исправлена ошибка, которая может привести к дедупликации обычных блоков (крайне редко) и вставке дубликатов блоков (чаще). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([алесапин](https://github.com/alesapin)) +- Исправление функции `arrayEnumerateUniqRanked` для Аргументов с пустыми массивами [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) +- Не Подписывайтесь на темы Кафки без намерения опросить какие-либо сообщения. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Иван](https://github.com/abyss7)) +- Сделать настройку `join_use_nulls` не получите никакого эффекта для типов, которые не могут быть внутри Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправлено `Incorrect size of index granularity` ошибки [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([коракстер](https://github.com/coraxster)) +- Фиксировать поплавок в десятичные преобразования переполнения [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([коракстер](https://github.com/coraxster)) +- Смыть буфер, когда `WriteBufferFromHDFS`- деструктор называется. Это исправляет запись в `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Синьдун Пэн](https://github.com/eejoin)) + +#### Улучшение {#improvement-7} + +- Обработать пустые ячейки в `CSV` в качестве значений по умолчанию при настройке `input_format_defaults_for_omitted_fields` это включено. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([акузм](https://github.com/akuzm)) +- Неблокирующая загрузка внешних словарей. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Виталий Баранов](https://github.com/vitlibar)) +- Тайм-ауты сети могут быть динамически изменены для уже установленных соединений в соответствии с настройками. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Константин Подшумок](https://github.com/podshumok)) +- С помощью «public\_suffix\_list» для функций `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. Он использует идеальную хэш-таблицу, сгенерированную `gperf` с помощью списка, сгенерированного из файла: https://publicsuffix.org/list/public\_suffix\_list.dat (например, теперь мы признаем домен `ac.uk` как несущественные). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Гийом Тассери](https://github.com/YiuRULE)) +- Усыновленный `IPv6` тип данных в системных таблицах; унифицированные столбцы информации о клиенте в `system.processes` и `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Использование сеансов для соединений с протоколом совместимости MySQL. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Юрий Баранов](https://github.com/yurriy)) +- Поддержка более `ALTER` запросы `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([сундили](https://github.com/sundy-li)) +- Поддержка `` раздел в `clickhouse-local` конфигурационный файл. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) +- Разрешить выполнение запроса с помощью `remote` функция таблицы в `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) + +#### Улучшение производительности {#performance-improvement-5} + +- Добавьте возможность написать окончательную отметку в конце столбцов MergeTree. Это позволяет избежать бесполезного считывания ключей, находящихся вне диапазона табличных данных. Он включается только в том случае, если используется адаптивная детализация индекса. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([алесапин](https://github.com/alesapin)) +- Улучшена производительность таблиц MergeTree на очень медленных файловых системах за счет уменьшения количества `stat` системных вызовов. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено снижение производительности при чтении из таблиц MergeTree, которое было введено в версии 19.6. Исправления №5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-8} + +- Реализованный `TestKeeper` в качестве реализации интерфейса ZooKeeper используется для тестирования [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) ([Левушкин Алексей](https://github.com/alexey-milovidov)) +- Отныне `.sql` тесты могут выполняться изолированно сервером, параллельно, со случайной базой данных. Это позволяет запускать их быстрее, добавлять новые тесты с пользовательскими конфигурациями серверов и быть уверенным, что различные тесты не влияют друг на друга. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Иван](https://github.com/abyss7)) +- Удалять `` и `` из тестов производительности [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправлено «select\_format» тест производительности для `Pretty` форматы [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +## ClickHouse релиз 19.9 {#clickhouse-release-19-9} + +### ClickHouse релиз 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} + +#### Исправление ошибок {#bug-fix-23} + +- Исправлена ошибка segfault в Дельта-кодеке, которая влияет на столбцы со значениями размером менее 32 бит. Ошибка привела к случайному повреждению памяти. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([алесапин](https://github.com/alesapin)) +- Исправлена редкая ошибка в проверке детали с колонкой LowCardinality. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([алесапин](https://github.com/alesapin)) +- Исправлена обработка выхода онлайн / оффлайн в TTL слиться с не-физической столбцов в блоке. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Антон Попов](https://github.com/CurtizJ)) +- Исправьте потенциальный бесконечный спящий режим низкоприоритетных запросов. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить, как ClickHouse определяет часовой пояс по умолчанию, как СРТ, а не мирового. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, связанная с выполнением распределенного DROP/ALTER/TRUNCATE/OPTIMIZE в кластерных запросах на реплику последователя перед репликой лидера. Теперь они будут выполняться непосредственно на реплике лидера. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([алесапин](https://github.com/alesapin)) +- Исправлено состояние гонки, которое приводит к тому, что некоторые запросы могут не отображаться в query\_log сразу же после запроса SYSTEM FLUSH LOGS. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Антон Попов](https://github.com/CurtizJ)) +- Добавлена отсутствующая поддержка постоянных аргументов для `evalMLModel` функция. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} + +#### Новая функция {#new-feature-8} + +- Печать информации о замороженных деталях в `system.parts` стол. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) +- Ask client password on clickhouse-запуск клиента на tty, если он не задан в аргументах [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) +- Осуществлять `dictGet` и `dictGetOrDefault` функции для десятичных типов. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшение {#improvement-8} + +- Инициализации в Debian: добавить службу ожидания [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) +- Добавление параметра запрещено по умолчанию, чтобы создать таблицу с подозрительных типов для LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Регрессионные функции возвращают веса модели, если они не используются в качестве состояния в функции `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) +- Переименуйте и улучшите методы регрессии. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) +- Более четкие интерфейсы поиска строк. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Данила Кутенин](https://github.com/danlark1)) + +#### Исправление ошибок {#bug-fix-24} + +- Исправить потенциальную потерю данных в Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Иван](https://github.com/abyss7)) +- Исправьте потенциальную бесконечную петлю в `PrettySpace` форматирование при вызове с нулевыми столбцами [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправлена ошибка переполнения UInt32 в линейных моделях. Разрешить eval ML-модель для аргумента неконстантной модели. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- `ALTER TABLE ... DROP INDEX IF EXISTS ...` не следует вызывать исключение, если указанный индекс не существует [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Глеб Новиков](https://github.com/NanoBjorn)) +- Исправлена обработка выхода онлайн / оффлайн с `bitmapHasAny` в скалярном подзапросе [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Ю](https://github.com/yuzhichang)) +- Исправлена ошибка, когда пул соединений репликации не повторяет попытку разрешения узла, даже если кэш DNS был удален. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([алесапин](https://github.com/alesapin)) +- Исправлено `ALTER ... MODIFY TTL` на Реплицированном Mergetree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Антон Попов](https://github.com/CurtizJ)) +- Фиксированная вставка в распределенную таблицу с материализованной колонкой [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Азат Хужин](https://github.com/azat)) +- Исправление плохой запас, когда усекают присоединиться хранения [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) +- В последних версиях пакета tzdata некоторые файлы теперь являются символическими ссылками. Текущий механизм обнаружения часового пояса по умолчанию нарушается и дает неверные имена для некоторых часовых поясов. Теперь, по крайней мере, мы заставим имя часового пояса к содержимому TZ, если оно будет предоставлено. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Иван](https://github.com/abyss7)) +- Исправьте некоторые крайне редкие случаи с Многовольницким поисковиком, когда постоянные иглы в сумме имеют длину не менее 16 КБ. Алгоритм пропустил или переписал предыдущие результаты, что может привести к неправильному результату работы алгоритма. `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Данила Кутенин](https://github.com/danlark1)) +- Исправлена проблема, когда настройки для запросов ExternalData не могли использовать параметры ClickHouse. Кроме того, на данный момент настройки `date_time_input_format` и `low_cardinality_allow_in_native_format` не может использоваться из-за неоднозначности имен (во внешних данных это может быть интерпретировано как формат таблицы, а в запросе-как настройка). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Данила Кутенин](https://github.com/danlark1)) +- Исправлена ошибка, когда детали удалялись только из FS, не сбрасывая их из Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([алесапин](https://github.com/alesapin)) +- Удалить ведение журнала отладки из протокола MySQL [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Пропустить ZNONODE во время обработки DDL запроса [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Азат Хужин](https://github.com/azat)) +- Фикс микс `UNION ALL` тип столбца результатов. Были случаи с несогласованными данными и типами столбцов результирующих столбцов. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Артем Зуйков](https://github.com/4ertus2)) +- Бросьте исключение на неправильные целые числа в `dictGetT` функции вместо сбоя. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка element\_count и load\_factor для хэшированного словаря в `system.dictionaries` стол. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Азат Хужин](https://github.com/azat)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-9} + +- Исправлена сборка без `Brotli` Поддержка сжатия HTTP (`ENABLE_BROTLI=OFF` переменная cmake). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Антон Южанинов](https://github.com/citrin)) +- Включая рев.ч как рев/рев.х [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Оривей Деш](https://github.com/orivej)) +- Исправьте предупреждения gcc9 в hyperscan (директива \# line-это зло!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Данила Кутенин](https://github.com/danlark1)) +- Исправьте все предупреждения при компиляции с gcc-9. Исправлены некоторые проблемы ВНО. Исправьте GCC9 ICE и отправьте его в bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Данила Кутенин](https://github.com/danlark1)) +- Фиксированная связь с LLD [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Удаление неиспользуемых специализаций в словарях [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Артем Зуйков](https://github.com/4ertus2)) +- Улучшение тестов производительности для форматирования и синтаксического анализа таблиц для различных типов файлов [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Ольга Хвостикова](https://github.com/stavrolia)) +- Исправления для параллельного выполнения тестов [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) +- Docker: используйте конфигурации из clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) +- Исправлена компиляция для FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) +- Повышение уровня обновления до 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) +- Исправлена сборка clickhouse как подмодуля [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) +- Улучшение тестов производительности JSONExtract [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Виталий Баранов](https://github.com/vitlibar)) + +## ClickHouse релиз 19.8 {#clickhouse-release-19-8} + +### ClickHouse релиз 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} + +#### Новые средства {#new-features} + +- Добавлены функции для работы с JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Виталий Баранов](https://github.com/vitlibar)) +- Добавьте функцию basename с аналогичным поведением к функции basename, которая существует во многих языках (`os.path.basename` в Python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Гийом Тассери](https://github.com/YiuRULE)) +- Добавлен `LIMIT n, m BY` или `LIMIT m OFFSET n BY` синтаксис для задания смещения N для ограничения на предложение. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Антон Попов](https://github.com/CurtizJ)) +- Добавлен новый тип данных `SimpleAggregateFunction`, что позволяет иметь столбцы с легкой агрегацией в виде `AggregatingMergeTree`. Это может быть использовано только с простыми функциями, такими как `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Борис Гранво](https://github.com/bgranvea)) +- Добавлена поддержка непостоянных аргументов в функции `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Данила Кутенин](https://github.com/danlark1)) +- Добавленные функции `skewPop`, `skewSamp`, `kurtPop` и `kurtSamp` для вычисления асимметрии последовательности, асимметрии образца, эксцесса и эксцесса образца соответственно. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) +- Поддержка переименования операции для `MaterializeView` место хранения. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Гийом Тассери](https://github.com/YiuRULE)) +- Добавлен сервер, который позволяет подключаться к ClickHouse с помощью клиента MySQL. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Юрий Баранов](https://github.com/yurriy)) +- Добавь `toDecimal*OrZero` и `toDecimal*OrNull` функции. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Артем Зуйков](https://github.com/4ertus2)) +- Поддержка десятичных типов в функциях: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавлен `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Данила Кутенин](https://github.com/danlark1)) +- Добавлен `format` функция. Форматирование константы pattern (упрощенный шаблон формата Python) со строками, перечисленными в аргументах. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Данила Кутенин](https://github.com/danlark1)) +- Добавлен `system.detached_parts` таблица, содержащая информацию об отсоединенных частях `MergeTree` таблицы. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([акузм](https://github.com/akuzm)) +- Добавлен `ngramSearch` функция для вычисления несимметричной разности между иглой и стогом сена. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Данила Кутенин](https://github.com/danlark1)) +- Реализация основных методов машинного обучения (стохастическая линейная регрессия и логистическая регрессия) с использованием интерфейса агрегатных функций. Имеет различные стратегии обновления весов моделей (простой градиентный спуск, метод импульса, метод Нестерова). Также поддерживаются мини-пакеты нестандартного размера. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) +- Реализация проекта `geohashEncode` и `geohashDecode` функции. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Василий Немков](https://github.com/Enmk)) +- Добавлена статистическая функция `timeSeriesGroupSum`, который может агрегировать различные временные ряды, которые выборка временных меток не выравнивается. Он будет использовать линейную интерполяцию между двумя временными метками выборки, а затем суммировать временные ряды вместе. Добавлена статистическая функция `timeSeriesGroupRateSum`, который вычисляет скорость временных рядов, а затем суммирует ставки вместе. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Янкуань Лю](https://github.com/LiuYangkuan)) +- Добавленные функции `IPv4CIDRtoIPv4Range` и `IPv6CIDRtoIPv6Range` рассчитать нижний и верхний пределы для IP в подсети с использованием бесклассовой междоменной маршрутизации. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Гийом Тассери](https://github.com/YiuRULE)) +- Добавьте заголовок X-ClickHouse-Summary, когда мы отправляем запрос с помощью HTTP с включенной настройкой `send_progress_in_http_headers`. Возвращает обычную информацию X-ClickHouse-Progress с дополнительной информацией, например, сколько строк и байтов было вставлено в запрос. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Гийом Тассери](https://github.com/YiuRULE)) + +#### Улучшения {#improvements} + +- Добавлен `max_parts_in_total` настройка для семейства таблиц MergeTree (по умолчанию: 100 000), которая предотвращает небезопасную спецификацию ключа раздела \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `clickhouse-obfuscator`: выведите начальное значение для отдельных столбцов, объединив начальное значение с именем столбца, а не с позицией столбца. Это предназначено для преобразования наборов данных с несколькими связанными таблицами, чтобы таблицы оставались соединяемыми после преобразования. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавленные функции `JSONExtractRaw`, `JSONExtractKeyAndValues`. Переименованные функции `jsonExtract` к `JSONExtract`. Когда что-то идет не так, эти функции возвращают соответствующие значения, а не наоборот. `NULL`. Модифицированная функция `JSONExtract`, теперь он получает возвращаемый тип из своего последнего параметра и не вводит nullables. Реализован резервный вариант для RapidJSON в случае, если инструкции AVX2 недоступны. Библиотека Simdjson обновлена до новой версии. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Виталий Баранов](https://github.com/vitlibar)) +- Сейчас `if` и `multiIf` функции не зависят от условий `Nullable`, но полагайтесь на ветви для обеспечения совместимости sql. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Цзянь Ву](https://github.com/janplus)) +- `In` теперь предикат генерирует `Null` результат от `Null` входные данные, такие как `Equal` функция. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Цзянь Ву](https://github.com/janplus)) +- Проверьте ограничение по времени для каждого (flush\_interval / poll\_timeout) числа строк из Kafka. Это позволяет чаще отрывать чтение от потребителя Кафки и проверять временные ограничения для потоков верхнего уровня [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Иван](https://github.com/abyss7)) +- Соедините рдкафку с комплектным САСЛОМ. Это должно позволить использовать аутентификацию SASL SCRAM [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Иван](https://github.com/abyss7)) +- Пакетная версия RowRefList для всех соединений. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Артем Зуйков](https://github.com/4ertus2)) +- clickhouse-server: более информативное прослушивание сообщений об ошибках. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) +- Поддержка словарей в clickhouse-copier для функций в `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) +- Добавить новую настройку `kafka_commit_every_batch` чтобы регулировать политику Кафки. + Он позволяет установить режим фиксации: после обработки каждой партии сообщений или после записи всего блока в хранилище. Это компромисс между потерей некоторых сообщений или чтением их дважды в некоторых экстремальных ситуациях. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Иван](https://github.com/abyss7)) +- Сделай `windowFunnel` поддержка других целочисленных типов без знака. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([сундили](https://github.com/sundy-li)) +- Разрешить теневой виртуальный столбец `_table` в двигателе слияния. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Иван](https://github.com/abyss7)) +- Сделай `sequenceMatch` агрегатные функции поддерживают другие целочисленные типы без знака [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([сундили](https://github.com/sundy-li)) +- Лучше сообщения об ошибках, если несоответствие контрольной суммы, скорее всего, вызвано аппаратными сбоями. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Проверьте, что базовые таблицы поддерживают выборку для `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Иван](https://github.com/abyss7)) +- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) +- Улучшения протокола MySQL Wire. Изменено имя формата на MySQLWire. Использование RAII для вызова RSA\_free. Отключение SSL, если контекст не может быть создан. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Юрий Баранов](https://github.com/yurriy)) +- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) +- Соблюдайте настройки запросов при асинхронных вставках в распределенные таблицы. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) +- Переименованные функции `leastSqr` к `simpleLinearRegression`, `LinearRegression` к `linearRegression`, `LogisticRegression` к `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +#### Улучшения в производительности {#performance-improvements} + +- Распараллеливание обработки деталей невоспроизводимого MergeTree столы В изменить изменить запрос. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Иван Куш](https://github.com/IvanKush)) +- Оптимизация при извлечении регулярных выражений. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Данила Кутенин](https://github.com/danlark1)) +- Не добавляйте правый ключевой столбец join к результату соединения, если он используется только в разделе join on. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Артем Зуйков](https://github.com/4ertus2)) +- Заморозьте буфер Кафки после первого пустого ответа. Это позволяет избежать многократных обращений к `ReadBuffer::next()` для пустого результата в некоторых потоках разбора строк. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Иван](https://github.com/abyss7)) +- `concat` оптимизация функций для нескольких аргументов. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Данила Кутенин](https://github.com/danlark1)) +- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Артем Зуйков](https://github.com/4ertus2)) +- Обновите нашу реализацию LZ4 со ссылкой на нее, чтобы иметь более быструю декомпрессию. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Данила Кутенин](https://github.com/danlark1)) +- Реализована сортировка MSD radix (на основе kxsort) и частичная сортировка. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Евгений Правда](https://github.com/kvinty)) + +#### Устранение ошибок {#bug-fixes} + +- Исправить пуш требуют колонн с соединением [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена ошибка, когда ClickHouse запускался systemd, команда `sudo service clickhouse-server forcerestart` он работал не так, как ожидалось. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) +- Исправьте коды ошибок http в DataPartsExchange (interserver http server на порту 9009 всегда возвращал код 200, даже при ошибках). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) +- Исправить SimpleAggregateFunction на более длительный строк, чем MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Азат Хужин](https://github.com/azat)) +- Исправьте ошибку для `Decimal` к `Nullable(Decimal)` конверсия в ин. Поддержка других десятичных и десятичных преобразований (включая различные масштабы). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено блокирование FPU в библиотеке simdjson, приводящее к неправильному вычислению `uniqHLL` и `uniqCombined` агрегатная функция и математические функции, такие как `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена обработка смешанных случаев const/nonconst в функциях JSON. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Виталий Баранов](https://github.com/vitlibar)) +- Чинить `retention` функция. Теперь все условия, которые удовлетворяют в строке данных, добавляются в состояние данных. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) +- Исправьте тип результата для `quantileExact` с десятичными дробями. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Документация {#documentation} + +- Перевести документацию для `CollapsingMergeTree` к китайцам. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) +- Переведите некоторые документы о табличных движках на китайский язык. + [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) + [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) + ([никогда ли](https://github.com/neverlee)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements} + +- Исправьте некоторые отчеты о дезинфицирующих средствах, которые показывают вероятное использование после освобождения.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Иван](https://github.com/abyss7)) +- Для удобства переместите тесты производительности из отдельных каталогов. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте неправильные тесты производительности. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([алесапин](https://github.com/alesapin)) +- Добавлен инструмент для вычисления контрольных сумм, вызванных битовыми переворотами, для отладки аппаратных проблем. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Сделайте сценарий runner более удобным для использования. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([Филимонов](https://github.com/filimonov)) +- Добавьте небольшую инструкцию, как писать тесты производительности. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([алесапин](https://github.com/alesapin)) +- Добавить возможность делать замены В создать, заполнить и запросов падение производительности тесты [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Ольга Хвостикова](https://github.com/stavrolia)) + +## ClickHouse релиз 19.7 {#clickhouse-release-19-7} + +### ClickHouse релиз 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} + +#### Исправление ошибок {#bug-fix-25} + +- Исправьте регрессию производительности в некоторых запросах с помощью JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Зимний Чжан](https://github.com/zhang2014)) + +### ClickHouse релиз 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} + +#### Новые средства {#new-features-1} + +- Добавлены функции связанные с растровым изображением `bitmapHasAny` и `bitmapHasAll` по аналогии с `hasAny` и `hasAll` функции для массивов. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Сергей Владыкин](https://github.com/svladykin)) + +#### Устранение ошибок {#bug-fixes-1} + +- Исправлена обработка выхода онлайн / оффлайн на `minmax` Индекс с нулевым значением. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Никита Васильев](https://github.com/nikvas0)) +- Отметить все входные столбцы в пределе по мере необходимости выход. Это исправляет ‘Not found column’ ошибка в некоторых распределенных запросах. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Константин Сергеевич Пан](https://github.com/kvap)) +- Чинить «Column ‘0’ already exists» ошибка в работе `SELECT .. PREWHERE` на колонке с дефолтом [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) +- Чинить `ALTER MODIFY TTL` запрос на `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Антон Попов](https://github.com/CurtizJ)) +- Не разрушайте сервер, когда потребители Kafka не смогли запустить его. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Иван](https://github.com/abyss7)) +- Исправленные функции растрового изображения дают неверный результат. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Энди Янг](https://github.com/andyyzh)) +- Исправить element\_count для хэшированного словаря (не включать дубликаты) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Азат Хужин](https://github.com/azat)) +- Используйте содержимое переменной окружения TZ в качестве имени для часового пояса. В некоторых случаях это помогает правильно определить часовой пояс по умолчанию.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Иван](https://github.com/abyss7)) +- Не пытайтесь конвертировать целые числа в `dictGetT` функции, потому что он не работает правильно. Вместо этого создайте исключение. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Артем Зуйков](https://github.com/4ertus2)) +- Фиксировать параметры в запрос внешних данных по протоколу HTTP. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Данила + Кутенин](https://github.com/danlark1)) +- Исправлена ошибка, когда детали удалялись только из FS, не сбрасывая их из Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка сегментации в `bitmapHasAny` функция. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Ю](https://github.com/yuzhichang)) +- Исправлена ошибка, когда пул соединений репликации не повторяет попытку разрешения узла, даже если кэш DNS был удален. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([алесапин](https://github.com/alesapin)) +- Исправлено `DROP INDEX IF EXISTS` запрос. Сейчас `ALTER TABLE ... DROP INDEX IF EXISTS ...` запрос не вызывает исключения, если указанный индекс не существует. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Глеб Новиков](https://github.com/NanoBjorn)) +- Союз исправить все колонки супертипа. Были случаи с несогласованными данными и типами столбцов результирующих столбцов. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Артем Зуйков](https://github.com/4ertus2)) +- Пропустите ZNONODE во время обработки DDL-запроса. До того, как другой узел удалит znode в очереди задач, тот, который + не обработал его, но уже получил список детей,завершит поток DDLWorker. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Азат Хужин](https://github.com/azat)) +- Исправлена вставка в таблицу Distributed () с материализованным столбцом. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Азат Хужин](https://github.com/azat)) + +### ClickHouse релиз 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} + +#### Новые средства {#new-features-2} + +- Разрешить ограничить диапазон настроек, которые могут быть заданы пользователем. + Эти ограничения можно настроить в профиле настроек пользователя. + [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Виталий + Баранов](https://github.com/vitlibar)) +- Добавьте вторую версию функции `groupUniqArray` с дополнительным + `max_size` параметр, ограничивающий размер результирующего массива. Этот + поведение похоже на то, что `groupArray(max_size)(x)` функция. + [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Гийом + Тассери](https://github.com/YiuRULE)) +- Для форматов входных файлов TSVWithNames/CSVWithNames порядок столбцов теперь может быть + определяется из заголовка файла. Это контролируется с помощью + `input_format_with_names_use_header` параметр. + [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) + ([Александр](https://github.com/Akazz)) + +#### Устранение ошибок {#bug-fixes-2} + +- Сбой с uncompressed\_cache + JOIN во время слияния (\#5197) + [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Данила + Кутенин](https://github.com/danlark1)) +- Ошибка сегментации при запросе clickhouse-клиента к системным таблицам. \#5066 + [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) + ([Иван](https://github.com/abyss7)) +- Потеря данных при большой нагрузке через KafkaEngine (\#4736) + [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) + ([Иван](https://github.com/abyss7)) +- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшения в производительности {#performance-improvements-1} + +- Используйте radix sort для сортировки по одному числовому столбцу в `ORDER BY` без + `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), + [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) + ([Евгений Правда](https://github.com/kvinty), + [Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Документация {#documentation-1} + +- Перевод документации для некоторых движков таблиц на китайский язык. + [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), + [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), + [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) + ([张风啸](https://github.com/AlexZFX)), + [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([никогда + Ли](https://github.com/neverlee)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-1} + +- Правильная печать символов UTF-8 в `clickhouse-test`. + [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) + ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте параметр командной строки для clickhouse-client, чтобы всегда загружать предложение + данные. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) + ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Устраните некоторые предупреждения PVS-Studio. + [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) + ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Обновление LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Данила + Кутенин](https://github.com/danlark1)) +- Добавьте gperf для построения требований к предстоящему запросу на вытягивание \#5030. + [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) + ([proller](https://github.com/proller)) + +## ClickHouse релиз 19.6 {#clickhouse-release-19-6} + +### ClickHouse релиз 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} + +#### Устранение ошибок {#bug-fixes-3} + +- Исправлено в состоянии pushdown для запросов из табличных функций `mysql` и `odbc` и соответствующие табличные двигатели. Это исправляет \#3540 и \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена тупиковая ситуация в Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) +- Разрешить кавычки десятичных знаков в CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Артем Зуйков](https://github.com/4ertus2) +- Запретить преобразование из float Inf/NaN в десятичные дроби (исключение throw). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена гонка данных в запросе переименования. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Зимний Чжан](https://github.com/zhang2014)) +- Временно отключите LFAlloc. Использование LFAlloc может привести к большому количеству MAP\_FAILED при выделении несжатого кэша и в результате к сбоям запросов на высоконагруженных серверах. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Данила Кутенин](https://github.com/danlark1)) + +### ClickHouse релиз 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} + +#### Новые средства {#new-features-3} + +- Выражения TTL для столбцов и таблиц. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Антон Попов](https://github.com/CurtizJ)) +- Добавлена поддержка для `brotli` сжатие для HTTP-ответов (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Михаил](https://github.com/fandyushin)) +- Добавлена новая функция `isValidUTF8` для проверки правильности кодировки набора байтов в кодировке utf-8. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Данила Кутенин](https://github.com/danlark1)) +- Добавление новой политики балансировки нагрузки `first_or_random` который отправляет запросы на первый указанный хост, а если он недоступен, то отправляет запросы на случайные хосты shard. Полезно для настройки топологии перекрестной репликации. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) + +#### Экспериментальная возможность {#experimental-features-1} + +- Добавить настройку `index_granularity_bytes` (адаптивная степень детализации индекса) для семейства таблиц MergeTree\*. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([алесапин](https://github.com/alesapin)) + +#### Улучшения {#improvements-1} + +- Добавлена поддержка непостоянных и отрицательных аргументов размера и длины для функции `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Отключить push-вниз в правой таблице в левой присоединиться, левой таблицы в правую присоединиться, и обе таблицы полностью присоединиться. Это исправляет неправильные результаты соединения в некоторых случаях. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Иван](https://github.com/abyss7)) +- `clickhouse-copier`: автоматическая загрузка конфигурации задачи из `--task-file` вариант [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) +- Добавлен обработчик опечаток для фабрики хранения и фабрики табличных функций. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Данила Кутенин](https://github.com/danlark1)) +- Поддержка звездочек и квалифицированных звездочек для нескольких соединений без вложенных запросов [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Артем Зуйков](https://github.com/4ertus2)) +- Сделайте сообщение об ошибке отсутствующего столбца более удобным для пользователя. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшения в производительности {#performance-improvements-2} + +- Значительное ускорение от присоединения [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Мартийн Баккер](https://github.com/Gladdy)) + +#### Назад Несовместимые Изменения {#backward-incompatible-changes} + +- Заголовок http `Query-Id` был переименован в `X-ClickHouse-Query-Id` для последовательности. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Михаил](https://github.com/fandyushin)) + +#### Устранение ошибок {#bug-fixes-4} + +- Исправлено потенциальное разыменование нулевого указателя в `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) +- Исправлена ошибка запроса с соединением + массив присоединиться [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено зависание при запуске сервера, когда словарь зависит от другого словаря через базу данных с engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Виталий Баранов](https://github.com/vitlibar)) +- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправьте потенциально неправильный результат для `SELECT DISTINCT` с `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-2} + +- Исправлены ошибки тестирования при запуске clickhouse-сервера на другом хосте [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Василий Немков](https://github.com/Enmk)) +- clickhouse-test: отключение последовательностей управления цветом в среде без tty. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([алесапин](https://github.com/alesapin)) +- clickhouse-test: разрешить использование любой тестовой базы данных (удалить `test.` квалификация там, где это возможно) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) +- Исправление ошибок утилиты [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Виталий Баранов](https://github.com/vitlibar)) +- Yandex LFAlloc был добавлен в ClickHouse для выделения данных MarkCache и UncompressedCache различными способами для более надежного улавливания сегментов [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Данила Кутенин](https://github.com/danlark1)) +- Python util, чтобы помочь с backports и changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Иван](https://github.com/abyss7)) + +## ClickHouse релиз 19.5 {#clickhouse-release-19-5} + +### ClickHouse релиз 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} + +#### Устранение ошибок {#bug-fixes-5} + +- Исправлена возможная ошибка в функциях bitmap\* [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Энди Янг](https://github.com/andyyzh)) +- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправления ошибок `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. Эта ошибка произошла, если столбец LowCardinality был частью первичного ключа. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Модификация функции удержания: если строка удовлетворяет как первому, так и N-му условию, то в состояние данных добавляется только первое удовлетворенное условие. Теперь все условия, которые удовлетворяют в строке данных, добавляются в состояние данных. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) + +### ClickHouse релиз 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} + +#### Устранение ошибок {#bug-fixes-6} + +- Фиксированный тип установки `max_partitions_per_insert_block` из булев тип uint64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Мохаммад Хосейн Сехават](https://github.com/mhsekhavat)) + +### ClickHouse релиз 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} + +#### Новые средства {#new-features-4} + +- [Гиперскан](https://github.com/intel/hyperscan) было добавлено несколько совпадений регулярных выражений (функции `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Данила Кутенин](https://github.com/danlark1)) +- `multiSearchFirstPosition` была добавлена функция. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Данила Кутенин](https://github.com/danlark1)) +- Реализуйте предварительно заданный фильтр выражений для каждой строки таблиц. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Иван](https://github.com/abyss7)) +- Новый тип индексов пропуска данных на основе фильтров Блума (может использоваться для `equal`, `in` и `like` должностные обязанности). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Никита Васильев](https://github.com/nikvas0)) +- Добавлен `ASOF JOIN` что позволяет запускать запросы, которые присоединяются к самому последнему известному значению. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Мартийн Баккер](https://github.com/Gladdy), [Артем Зуйков](https://github.com/4ertus2)) +- Переписать несколько раз `COMMA JOIN` к `CROSS JOIN`. Затем перепишите их на `INNER JOIN` если можно. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшение {#improvement-9} + +- `topK` и `topKWeighted` теперь поддерживает пользовательские `loadFactor` (Исправлена проблема [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Кирилл Даньшин](https://github.com/kirillDanshin)) +- Разрешить использовать `parallel_replicas_count > 1` даже для таблиц без выборки (настройка для них просто игнорируется). В предыдущих версиях это приводило к исключениям. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Алексей Елыманов](https://github.com/digitalist)) +- Поддержка `CREATE OR REPLACE VIEW`. Позволяет создать представление или задать новое определение в одном операторе. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Борис Гранво](https://github.com/bgranvea)) +- `Buffer` движок таблицы теперь поддерживает `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Янкуань Лю](https://github.com/LiuYangkuan)) +- Добавить возможность запуска реплицированной таблицы без метаданных в zookeeper in `readonly` режим. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([алесапин](https://github.com/alesapin)) +- Исправлено мерцание индикатора выполнения в clickhouse-клиенте. Эта проблема была наиболее заметна при использовании `FORMAT Null` с потоковыми запросами. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Разрешить отключать функции с помощью `hyperscan` библиотека на основе каждого пользователя, чтобы ограничить потенциально чрезмерное и неконтролируемое использование ресурсов. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте номер версии, регистрирующий все ошибки. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) +- Добавлено ограничение на использование `multiMatch` функции, которые требуют размера строки, чтобы вписаться в `unsigned int`. Также добавлено ограничение по количеству аргументов для `multiSearch` функции. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Данила Кутенин](https://github.com/danlark1)) +- Улучшено использование пространства царапин и обработка ошибок в Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Данила Кутенин](https://github.com/danlark1)) +- Заполнить `system.graphite_detentions` из таблицы config of `*GraphiteMergeTree` столы для двигателей. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Переименовать `trigramDistance` функция к `ngramDistance` и добавьте больше функций с помощью `CaseInsensitive` и `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Данила Кутенин](https://github.com/danlark1)) +- Улучшен расчет индексов пропуска данных. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Никита Васильев](https://github.com/nikvas0)) +- Держать обычные, `DEFAULT`, `MATERIALIZED` и `ALIAS` столбцы в одном списке (Исправлена проблема [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Алексей Зателепин](https://github.com/ztlpn)) + +#### Исправление ошибок {#bug-fix-26} + +- Избегать `std::terminate` в случае сбоя выделения памяти. Сейчас `std::bad_alloc` исключение создается, как и ожидалось. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено чтение capnproto из буфера. Иногда файлы не были успешно загружены по протоколу HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Владислав](https://github.com/smirnov-vs)) +- Исправления ошибок `Unknown log entry type: 0` после `OPTIMIZE TABLE FINAL` запрос. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Амос Птица](https://github.com/amosbird)) +- Неверные аргументы, чтобы `hasAny` или `hasAll` функции может привести к обработка выхода онлайн / оффлайн. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Взаимоблокировка может произойти во время выполнения `DROP DATABASE dictionary` запрос. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить неопределенное поведение в `median` и `quantile` функции. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Исправьте обнаружение уровня сжатия, когда `network_compression_method` в нижнем регистре. Разбитые в в19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Фиксированное незнание `UTC` настройка (Исправлена проблема [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Чинить `histogram` поведение функции с помощью `Distributed` таблицы. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Исправлен отчет Цан `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен отчет TSan о завершении работы из-за состояния гонки в использовании системных журналов. Исправлено потенциальное использование-после освобождения при выключении, когда включен part\_log. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить перепроверять детали в `ReplicatedMergeTreeAlterThread` в случае ошибки. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Арифметические операции над промежуточными состояниями агрегатной функции не работали для постоянных аргументов (таких как результаты подзапросов). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Всегда делайте обратные кавычки имен столбцов в метаданных. В противном случае невозможно создать таблицу с именем столбца `index` (сервер не будет перезапущен из-за неправильной формы `ATTACH` запрос в метаданных). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить сбой в работе `ALTER ... MODIFY ORDER BY` на `Distributed` стол. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Исправлена обработка выхода онлайн / оффлайн в `JOIN ON` с включенной функцией `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена ошибка с добавлением посторонней строки после использования сообщения protobuf от Кафки. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправить аварию `JOIN` на не значение столбца против значение null. Чинить `NULLs` в правой клавиш в `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Исправлено состояние гонки в `SELECT` от `system.tables` если таблица переименована или изменена одновременно. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена гонка данных при извлечении части данных, которая уже устарела. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена редкая гонка данных, которая может произойти во время `RENAME` таблица семейства MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка сегментации в функции `arrayIntersect`. Ошибка сегментации может произойти, если функция вызывается со смешанными постоянными и обычными аргументами. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Лисян Цянь](https://github.com/fancyqlx)) +- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) +- Чинить `No message received` исключение при извлечении деталей между репликами. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([алесапин](https://github.com/alesapin)) +- Исправлено `arrayIntersect` неправильный результат функции в случае нескольких повторяющихся значений в одном массиве. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправьте состояние гонки во время параллельной работы `ALTER COLUMN` запросы, которые могут привести к сбою сервера (Исправлена проблема [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправьте неправильный результат в `FULL/RIGHT JOIN` с колонкой const. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправьте дубликаты внутри `GLOBAL JOIN` с Asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправить вычет параметров в `ALTER MODIFY` из колонки `CODEC` если тип столбца не указан. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([алесапин](https://github.com/alesapin)) +- Функции `cutQueryStringAndFragment()` и `queryStringAndFragment()` теперь работает правильно, когда `URL` содержит фрагмент и не содержит запроса. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлена редкая ошибка при настройке `min_bytes_to_use_direct_io` больше нуля, что происходит, когда поток должен искать назад в файле столбца. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([алесапин](https://github.com/alesapin)) +- Исправьте неправильные типы аргументов для агрегатных функций с помощью `LowCardinality` аргументы (Исправлена проблема [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправить неправильное имя квалификация в `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Артем Зуйков](https://github.com/4ertus2)) +- Фиксированная функция `toISOWeek` результат за 1970 год. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Чинить `DROP`, `TRUNCATE` и `OPTIMIZE` дублирование запросов при выполнении на `ON CLUSTER` для `ReplicatedMergeTree*` столы семейные. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([алесапин](https://github.com/alesapin)) + +#### Назад Несовместимые Изменения {#backward-incompatible-change-8} + +- Переименовать настройки `insert_sample_with_metadata` ставить `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Артем Зуйков](https://github.com/4ertus2)) +- Добавлена настройка `max_partitions_per_insert_block` (со значением 100 по умолчанию). Если вставленный блок содержит большее количество разделов, то возникает исключение. Установите его равным 0, если вы хотите удалить ограничение (не рекомендуется). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Функции мульти-поиска были переименованы (`multiPosition` к `multiSearchAllPositions`, `multiSearch` к `multiSearchAny`, `firstMatch` к `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Данила Кутенин](https://github.com/danlark1)) + +#### Улучшение производительности {#performance-improvement-6} + +- Оптимизировать Volnitsky поисковик путем встраивания, дающая около 5-10% улучшение поиска по запросам со многими иглами или много схожих биграмм. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Данила Кутенин](https://github.com/danlark1)) +- Исправлена проблема производительности при настройке `use_uncompressed_cache` больше нуля, который появился при считывании всех данных, содержащихся в кэше. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([алесапин](https://github.com/alesapin)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-10} + +- Упрочнение отладочной сборки: более детализированные сопоставления памяти и ASLR; добавление защиты памяти для кэша меток и индекса. Это позволяет найти больше ошибок топтания памяти в случае, когда ASan и MSan не могут этого сделать. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте поддержку переменных cmake `ENABLE_PROTOBUF`, `ENABLE_PARQUET` и `ENABLE_BROTLI` который позволяет включить/выключить выше особенностей (так же, как мы можем сделать для librdkafka, MySQL и т. д). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Сильвиу Развивается](https://github.com/silviucpp)) +- Добавьте возможность печати списка процессов и stacktraces всех потоков, если некоторые запросы зависли после тестового запуска. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([алесапин](https://github.com/alesapin)) +- Добавить повторные попытки ВКЛ `Connection loss` ошибка в работе `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([алесапин](https://github.com/alesapin)) +- Добавьте build FreeBSD с Vagrant и построить с резьбой дезинфицирующее средство на упаковщик скриптов. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([алесапин](https://github.com/alesapin)) +- Теперь пользователь запросил пароль для пользователя `'default'` во время установки. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) +- Подавить предупреждение в `rdkafka` библиотека. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Разрешить возможность сборки без ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) +- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Обновите contrib boost до 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) +- Отключить использование `mremap` при компиляции с помощью дезинфицирующего средства для нитей. Как ни странно, Цан не перехватил его `mremap` (хотя это действительно перехват `mmap`, `munmap`) это приводит к ложным срабатываниям. Исправлен отчет TSan в тестах с сохранением состояния. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте тестовую проверку с использованием схемы формата через HTTP-интерфейс. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Виталий Баранов](https://github.com/vitlibar)) + +## ClickHouse релиз 19.4 {#clickhouse-release-19-4} + +### ClickHouse релиз 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} + +#### Устранение ошибок {#bug-fixes-7} + +- Избегать `std::terminate` в случае сбоя выделения памяти. Сейчас `std::bad_alloc` исключение создается, как и ожидалось. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено чтение capnproto из буфера. Иногда файлы не были успешно загружены по протоколу HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Владислав](https://github.com/smirnov-vs)) +- Исправления ошибок `Unknown log entry type: 0` после `OPTIMIZE TABLE FINAL` запрос. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Амос Птица](https://github.com/amosbird)) +- Неверные аргументы, чтобы `hasAny` или `hasAll` функции может привести к обработка выхода онлайн / оффлайн. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Взаимоблокировка может произойти во время выполнения `DROP DATABASE dictionary` запрос. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить неопределенное поведение в `median` и `quantile` функции. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) +- Исправьте обнаружение уровня сжатия, когда `network_compression_method` в нижнем регистре. Разбитые в в19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) +- Фиксированное незнание `UTC` настройка (Исправлена проблема [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) +- Чинить `histogram` поведение функции с помощью `Distributed` таблицы. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) +- Исправлен отчет Цан `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен отчет TSan о завершении работы из-за состояния гонки в использовании системных журналов. Исправлено потенциальное использование-после освобождения при выключении, когда включен part\_log. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить перепроверять детали в `ReplicatedMergeTreeAlterThread` в случае ошибки. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Арифметические операции над промежуточными состояниями агрегатной функции не работали для постоянных аргументов (таких как результаты подзапросов). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Всегда делайте обратные кавычки имен столбцов в метаданных. В противном случае невозможно создать таблицу с именем столбца `index` (сервер не будет перезапущен из-за неправильной формы `ATTACH` запрос в метаданных). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправить сбой в работе `ALTER ... MODIFY ORDER BY` на `Distributed` стол. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) +- Исправлена обработка выхода онлайн / оффлайн в `JOIN ON` с включенной функцией `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена ошибка с добавлением посторонней строки после использования сообщения protobuf от Кафки. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Исправлено состояние гонки в `SELECT` от `system.tables` если таблица переименована или изменена одновременно. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена гонка данных при извлечении части данных, которая уже устарела. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена редкая гонка данных, которая может произойти во время `RENAME` таблица семейства MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка сегментации в функции `arrayIntersect`. Ошибка сегментации может произойти, если функция вызывается со смешанными постоянными и обычными аргументами. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Лисян Цянь](https://github.com/fancyqlx)) +- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Чинить `No message received` исключение при извлечении деталей между репликами. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([алесапин](https://github.com/alesapin)) +- Исправлено `arrayIntersect` неправильный результат функции в случае нескольких повторяющихся значений в одном массиве. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправьте состояние гонки во время параллельной работы `ALTER COLUMN` запросы, которые могут привести к сбою сервера (Исправлена проблема [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправьте вычет параметров в `ALTER MODIFY` из колонки `CODEC` если тип столбца не указан. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([алесапин](https://github.com/alesapin)) +- Функции `cutQueryStringAndFragment()` и `queryStringAndFragment()` теперь работает правильно, когда `URL` содержит фрагмент и не содержит запроса. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправлена редкая ошибка при настройке `min_bytes_to_use_direct_io` больше нуля, что происходит, когда поток должен искать назад в файле столбца. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([алесапин](https://github.com/alesapin)) +- Исправьте неправильные типы аргументов для агрегатных функций с помощью `LowCardinality` аргументы (Исправлена проблема [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Фиксированная функция `toISOWeek` результат за 1970 год. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Чинить `DROP`, `TRUNCATE` и `OPTIMIZE` дублирование запросов при выполнении на `ON CLUSTER` для `ReplicatedMergeTree*` столы семейные. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([алесапин](https://github.com/alesapin)) + +#### Улучшения {#improvements-2} + +- Держать обычные, `DEFAULT`, `MATERIALIZED` и `ALIAS` столбцы в одном списке (Исправлена проблема [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Алексей Зателепин](https://github.com/ztlpn)) + +### ClickHouse релиз 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} + +#### Устранение ошибок {#bug-fixes-8} + +- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-11} + +- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse релиз 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} + +#### Устранение ошибок {#bug-fixes-9} + +- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +### ClickHouse релиз 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} + +#### Устранение ошибок {#bug-fixes-10} + +- Исправлены удаленные запросы, содержащие и то, и другое `LIMIT BY` и `LIMIT`. Ранее, если `LIMIT BY` и `LIMIT` были использованы для удаленного запроса, `LIMIT` может случиться и раньше `LIMIT BY`, что привело к слишком отфильтрованному результату. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Константин Сергеевич Пан](https://github.com/kvap)) + +### ClickHouse релиз 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} + +#### Новые средства {#new-features-5} + +- Добавлена полная поддержка для `Protobuf` формат (ввод и вывод, вложенные структуры данных). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Виталий Баранов](https://github.com/vitlibar)) +- Добавлены растровые функции с ревущими растровыми изображениями. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Энди Янг](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Виталий Баранов](https://github.com/vitlibar)) +- Поддержка формата паркета. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) +- Для сравнения нечетких строк было добавлено расстояние N-грамм. Это похоже на Q-граммовые метрики в языке R. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Данила Кутенин](https://github.com/danlark1)) +- Комбинируйте правила для свертки графита из выделенных шаблонов агрегации и хранения. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +- Добавлен `max_execution_speed` и `max_execution_speed_bytes` чтобы ограничить использование ресурсов. Добавлен `min_execution_speed_bytes` установка в дополнение к `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Зимний Чжан](https://github.com/zhang2014)) +- Реализованная функция `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([Алексей-Миловидов](https://github.com/alexey-milovidov), [произв](https://github.com/kzon)) +- Добавленные функции `arrayEnumerateDenseRanked` и `arrayEnumerateUniqRanked` (это как будто `arrayEnumerateUniq` но позволяет точно настроить глубину массива, чтобы заглянуть внутрь многомерных массивов). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Устранение ошибок {#bug-fixes-11} + +- Этот релиз также содержит все исправления ошибок из 19.3 и 19.1. +- Исправлена ошибка в индексах пропуска данных: неправильный порядок гранул после вставки. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Никита Васильев](https://github.com/nikvas0)) +- Исправлено `set` индекс для `Nullable` и `LowCardinality` столбцы. Перед ним, `set` индекс с `Nullable` или `LowCardinality` колонка привела к ошибке `Data type must be deserialized with multiple streams` во время выбора. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Правильно установите update\_time на полный `executable` обновление словаря. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Тема Новиков](https://github.com/temoon)) +- Исправлена поломка индикатора выполнения в 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([Филимонов](https://github.com/filimonov)) +- Исправлены несогласованные значения MemoryTracker при сжатии области памяти, в некоторых случаях. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено неопределенное поведение в ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена очень редкая ошибка с сообщением `mutex lock failed: Invalid argument` это может произойти, когда таблица MergeTree была удалена одновременно с SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Алексей Зателепин](https://github.com/ztlpn)) +- Совместимость драйвера ODBC с `LowCardinality` тип данных. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) +- FreeBSD: исправление для `AIOcontextPool: Found io_event with unknown id 0` ошибка. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `system.part_log` таблица была создана независимо от конфигурации. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте неопределенное поведение в `dictIsIn` функция для словарей кэша. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([алесапин](https://github.com/alesapin)) +- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Алексей Зателепин](https://github.com/ztlpn)) +- Отключите compile\_expressions по умолчанию, пока мы не получим собственные `llvm` contrib и может проверить его с помощью `clang` и `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([алесапин](https://github.com/alesapin)) +- Предотвращать `std::terminate` когда `invalidate_query` для `clickhouse` внешний источник словаря вернул неверный результирующий набор (пустой или более одной строки или более одного столбца). Исправлена проблема, когда `invalidate_query` выполнялось каждые пять секунд независимо от `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Избегайте тупиковой ситуации, когда `invalidate_query` для словаря с `clickhouse` источник был задействован `system.dictionaries` таблица или `Dictionaries` база данных (редкий случай). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено перекрестное соединение с пустым местом. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена обработка выхода онлайн / оффлайн в функции «replicate» когда передается постоянный аргумент. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте лямбда-функцию с помощью оптимизатора предикатов. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Зимний Чжан](https://github.com/zhang2014)) +- Несколько соединений несколько исправлений. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшения {#improvements-3} + +- Поддержка псевдонимов в разделе JOIN ON для правых столбцов таблицы. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Артем Зуйков](https://github.com/4ertus2)) +- Результат нескольких соединений требует правильных имен результатов, которые будут использоваться в подсекциях. В результате замените плоские псевдонимы именами источников. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Артем Зуйков](https://github.com/4ertus2)) +- Улучшить нажимаем-вниз-логика вступила заявления. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Иван](https://github.com/abyss7)) + +#### Улучшения в производительности {#performance-improvements-3} + +- Улучшенная эвристика «move to PREWHERE» оптимизация. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Используйте правильные таблицы поиска, которые используют API HashTable для 8-битных и 16-битных ключей. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Амос Птица](https://github.com/amosbird)) +- Улучшена производительность сравнения строк. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Очистите распределенную очередь DDL в отдельном потоке, чтобы она не замедляла основной цикл, обрабатывающий распределенные задачи DDL. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Алексей Зателепин](https://github.com/ztlpn)) +- Когда `min_bytes_to_use_direct_io` имеет значение 1, не каждый файл был открыт в режиме O\_DIRECT, потому что размер данных для чтения иногда недооценивался размером одного сжатого блока. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-12} + +- Добавлена поддержка clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправь ошибку `__asm__` инструкции (опять же) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Константин Подшумок](https://github.com/podshumok)) +- Добавить возможность задавать настройки для `clickhouse-performance-test` из командной строки. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([алесапин](https://github.com/alesapin)) +- Добавьте тесты словарей в интеграционные тесты. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([алесапин](https://github.com/alesapin)) +- Добавлены запросы от бенчмарка на веб-сайте к автоматизированным тестам производительности. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `xxhash.h` не существует во внешнем lz4, потому что это деталь реализации, и ее символы находятся в пространстве имен с `XXH_NAMESPACE` макрос. Когда lz4 является внешним, xxHash также должен быть внешним, и зависимые должны быть связаны с ним. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Оривей Деш](https://github.com/orivej)) +- Исправлен случай, когда `quantileTiming` агрегатная функция может быть вызвана с отрицательным или плавающим аргументом (это исправляет тест fuzz с неопределенным поведением дезинфицирующего средства). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправление орфографических ошибок. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([сдк2](https://github.com/sdk2)) +- Исправлена компиляция на Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Виталий Баранов](https://github.com/vitlibar)) +- Исправления сборки для FreeBSD и различных необычных конфигураций сборки. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) + +## ClickHouse релиз 19.3 {#clickhouse-release-19-3} + +### ClickHouse релиз 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} + +#### Устранение ошибок {#bug-fixes-12} + +- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) +- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) + +#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-13} + +- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) + +### ClickHouse релиз 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} + +#### Устранение ошибок {#bug-fixes-13} + +- Исправлена ошибка в \#3920. Эта ошибка проявляется как случайное повреждение кэша (сообщения `Unknown codec family code`, `Cannot seek through file`) и segfaults. Эта ошибка впервые появилась в версии 19.1 и присутствует в версиях до 19.1.10 и 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} + +#### Устранение ошибок {#bug-fixes-14} + +- При наличии более 1000 потоков в пуле, `std::terminate` может произойти на выходе из потока. [Азат Хужин](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Теперь это можно создать `ReplicatedMergeTree*` таблицы с комментариями к столбцам без значений по умолчанию и таблицы со столбцами кодеки без комментариев и значений по умолчанию. Также исправлено сравнение кодеков. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка при соединении с массивом или кортежем. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка в работе clickhouse-копировальной машины с сообщением `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено зависание при выключении сервера, если использовались распределенные DDLs. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Алексей Зателепин](https://github.com/ztlpn)) +- Неверные номера столбцов были напечатаны в сообщении об ошибке о синтаксическом анализе текстового формата для столбцов с числом больше 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-3} + +- Исправлена сборка с включенным AVX. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Включите расширенный учет и учет ввода-вывода на основе хорошо известной версии вместо ядра, под которым он компилируется. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) +- Разрешить пропустить настройку core\_dump.size\_limit, предупреждение, а не бросать, если лимита не получится. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) +- Удалил то `inline` метки из `void readBinary(...)` в `Field.cpp`. Также объединены избыточные `namespace DB` блоки. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) + +### ClickHouse релиз 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} + +#### Устранение ошибок {#bug-fixes-15} + +- Исправлена ошибка с обработкой больших запросов вставки http. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([алесапин](https://github.com/alesapin)) +- Исправлена обратная несовместимость со старыми версиями из-за неправильной реализации `send_logs_level` установка. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена обратная несовместимость функции таблицы `remote` введено с комментариями к колонке. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} + +#### Улучшения {#improvements-4} + +- Размер индекса таблицы не учитывается для ограничений памяти при выполнении `ATTACH TABLE` запрос. Избегайте возможности того, что стол не может быть прикреплен после отсоединения. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Немного повышен лимит на максимальный размер строки и массива, полученный от ZooKeeper. Это позволяет продолжать работу с увеличенным размером `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` на смотрителя зоопарка. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Разрешить восстанавливать брошенную реплику, даже если она уже имеет огромное количество узлов в своей очереди. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавьте один обязательный аргумент к `SET` индекс (максимальное количество хранимых строк). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Никита Васильев](https://github.com/nikvas0)) + +#### Устранение ошибок {#bug-fixes-16} + +- Исправлено `WITH ROLLUP` результат для группы по одиночке `LowCardinality` ключ. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Исправлена ошибка в заданном индексе (удаление гранулы, если она содержит более `max_rows` грядки). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Никита Васильев](https://github.com/nikvas0)) +- Множество исправлений для сборки FreeBSD. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) +- Исправлена подстановка псевдонимов в запросах с подзапросом, содержащим один и тот же псевдоним (проблема [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-4} + +- Добавить возможность запуска `clickhouse-server` для тестов без состояния в образе docker. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Василий Немков](https://github.com/Enmk)) + +### ClickHouse релиз 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} + +#### Новые средства {#new-features-6} + +- Добавил тот `KILL MUTATION` оператор, который позволяет удалять мутации, которые по каким-то причинам застряли. Добавлен `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` поля к тому же `system.mutations` таблица для более легкого устранения неполадок. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Алексей Зателепин](https://github.com/ztlpn)) +- Добавлена статистическая функция `entropy` который вычисляет энтропию Шеннона. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) +- Добавлена возможность отправлять запросы `INSERT INTO tbl VALUES (....` к серверу без разделения на `query` и `data` части. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([алесапин](https://github.com/alesapin)) +- Общая реализация проекта `arrayWithConstant` была добавлена функция. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Реализованный `NOT BETWEEN` оператор сравнения. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Дмитрий Наумов](https://github.com/nezed)) +- Осуществлять `sumMapFiltered` для того чтобы иметь возможность ограничить количество ключей для которых значения будут суммироваться по формуле `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Добавлена поддержка `Nullable` напечатать `mysql` табличная функция. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) +- Поддержка произвольных константных выражений в `LIMIT` пункт. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) +- Добавлен `topKWeighted` агрегатная функция, принимающая дополнительный аргумент с весом (целое число без знака). [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Андрей Гольман](https://github.com/andrewgolman)) +- `StorageJoin` теперь поддерживать `join_any_take_last_row` настройка, позволяющая перезаписать существующие значения одного и того же ключа. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Амос Птица](https://github.com/amosbird) +- Добавлена функция `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Виталий Баранов](https://github.com/vitlibar)) +- Добавлен `RowBinaryWithNamesAndTypes` формат. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Козлюк Олег Викторович](https://github.com/DarkWanderer)) +- Добавлен `IPv4` и `IPv6` тип данных. Более эффективное внедрение `IPv*` функции. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Василий Немков](https://github.com/Enmk)) +- Добавлена функция `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Виталий Баранов](https://github.com/vitlibar)) +- Добавлен `Protobuf` выходной формат. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Виталий Баранов](https://github.com/vitlibar)) +- Добавлена поддержка brotli для HTTP-интерфейса для импорта данных (вставки). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Михаил](https://github.com/fandyushin)) +- Добавлены подсказки, когда пользователь делает опечатку в имени функции или вводит клиент командной строки. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Данила Кутенин](https://github.com/danlark1)) +- Добавлен `Query-Id` к заголовку HTTP-ответа сервера. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Михаил](https://github.com/fandyushin)) + +#### Экспериментальная возможность {#experimental-features-2} + +- Добавлен `minmax` и `set` индексы пропуска данных для семейства движков таблиц MergeTree. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Никита Васильев](https://github.com/nikvas0)) +- Добавлено преобразование из `CROSS JOIN` к `INNER JOIN` если можно. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Устранение ошибок {#bug-fixes-17} + +- Исправлено `Not found column` для повторяющихся столбцов в `JOIN ON` раздел. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Артем Зуйков](https://github.com/4ertus2)) +- Сделай `START REPLICATED SENDS` команда начала репликации отправляет. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Фиксированное выполнение агрегатных функций с помощью `Array(LowCardinality)` аргументы. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Исправлено неправильное поведение при выполнении `INSERT ... SELECT ... FROM file(...)` запрос и файл имеет `CSVWithNames` или `TSVWIthNames` формат и первая строка данных отсутствуют. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка при перезагрузке словаря, если словарь недоступен. Эта ошибка появилась в 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Исправлено `ALL JOIN` с дубликатами в правой таблице. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка сегментации с помощью `use_uncompressed_cache=1` и исключение с неправильным несжатым размером. Эта ошибка появилась в 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([алесапин](https://github.com/alesapin)) +- Исправлено `compile_expressions` ошибка с сопоставлением больших (более int16) дат. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([алесапин](https://github.com/alesapin)) +- Исправлена бесконечная петля при выборе функции из таблицы `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Временно отключите оптимизацию предикатов для `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлено `Illegal instruction` ошибка при использовании функций base64 на старых процессорах. Эта ошибка была воспроизведена только тогда, когда ClickHouse был скомпилирован с помощью gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено `No message received` ошибка при взаимодействии с драйвером PostgreSQL ODBC через TLS-соединение. Также исправлена ошибка segfault при использовании драйвера MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен неверный результат, когда `Date` и `DateTime` аргументы используются в ветвях условного оператора (функции `if`). Добавлен общий случай для функции `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Словари ClickHouse теперь загружаются внутри `clickhouse` процесс. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена тупиковая ситуация, когда `SELECT` из-за стола с `File` двигатель был восстановлен после того, как `No such file or directory` ошибка. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированное состояние гонки при выборе из `system.tables` может дать `table doesn't exist` ошибка. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `clickhouse-client` может segfault на выходе при загрузке данных для предложений командной строки, если он был запущен в интерактивном режиме. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка при выполнении мутаций, содержащих `IN` операторы давали неверные результаты. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправлена ошибка: если есть база данных с `Dictionary` движок, все словари принудительно загружаются при запуске сервера, и если есть словарь с источником ClickHouse от localhost, то словарь не может загрузиться. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка при повторной попытке создания системных журналов при выключении сервера. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Правильно верните правильный тип и правильно обработайте замки `joinGet` функция. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Амос Птица](https://github.com/amosbird)) +- Добавлен `sumMapWithOverflow` функция. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Исправлена обработка выхода онлайн / оффлайн с `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлена ошибка с неправильным `Date` и `DateTime` сравнение. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Исправлен тест fuzz при неопределенном поведении дезинфицирующего средства: добавлена проверка типа параметра для `quantile*Weighted` семейство функций. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено редкое состояние гонки при удалении старых частей данных может произойти сбой с помощью `File not found` ошибка. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка установки пакета с отсутствующим /etc / clickhouse-server / config.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-5} + +- Пакет Debian: исправьте/etc/clickhouse-server / preprocessed link в соответствии с конфигурацией. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Различные исправления сборки для FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) +- Добавлена возможность создавать, заполнять и удалять таблицы в perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([алесапин](https://github.com/alesapin)) +- Добавлен скрипт для проверки наличия дубликатов включений. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена возможность выполнения запросов по индексу в тесте производительности. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([алесапин](https://github.com/alesapin)) +- Предлагается установить пакет с отладочными символами. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Рефакторинг производительности-тест. Улучшенная регистрация и обработка сигналов. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([алесапин](https://github.com/alesapin)) +- Добавлены документы в анонимизированный Яндекс.Метрика набирает данные. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([алесапин](https://github.com/alesapin)) +- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Алексей Зателепин](https://github.com/ztlpn)) +- Добавлены документы о двух наборах данных в s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([алесапин](https://github.com/alesapin)) +- Добавлен скрипт, который создает список изменений из описания запросов на вытягивание. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([Кочетовниколай](https://github.com/KochetovNicolai)) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Добавлен кукольный модуль для Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Максим Федотов](https://github.com/MaxFedotov)) +- Добавлены документы для группы недокументированных функций. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправление сборки рук. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) +- Словарные тесты теперь можно запускать из `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) +- Сейчас `/etc/ssl` используется в качестве каталога по умолчанию с SSL-сертификатами. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена проверка инструкций SSE и AVX при запуске. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Игр](https://github.com/igron99)) +- Init скрипт будет ждать сервер до запуска. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) + +#### Назад Несовместимые Изменения {#backward-incompatible-changes-1} + +- Удаленный `allow_experimental_low_cardinality_type` установка. `LowCardinality` типы данных уже готовы к производству. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Уменьшите размер маркированного кэша и размер несжатого кэша соответственно доступному объему памяти. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Лопатин Константин](https://github.com/k-lopatin) +- Добавить ключевые слова `INDEX` в `CREATE TABLE` запрос. Столбец с именем `index` должен быть заключен в кавычки с обратными или двойными кавычками: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Никита Васильев](https://github.com/nikvas0)) +- `sumMap` теперь продвигайте тип результата вместо переполнения. Старое `sumMap` поведение может быть получено с помощью `sumMapWithOverflow` функция. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) + +#### Улучшения в производительности {#performance-improvements-4} + +- `std::sort` заменено на `pdqsort` для запросов без `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Евгений Правда](https://github.com/kvinty)) +- Теперь сервер повторно использует потоки из глобального пула потоков. Это влияет на производительность в некоторых угловых случаях. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшения {#improvements-5} + +- Реализована поддержка AIO для FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) +- `SELECT * FROM a JOIN b USING a, b` теперь вернуться `a` и `b` столбцы только из левой таблицы. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Артем Зуйков](https://github.com/4ertus2)) +- Позволять `-C` возможность работы клиента в качестве `-c` вариант. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([семинсергей](https://github.com/syominsergey)) +- Теперь вариант `--password` использовать без стоимости требует пароль из stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) +- Добавлена подсветка неэскапированных метасимволов в строковых литералах, содержащих `LIKE` выражения или регулярные выражения. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена отмена HTTP-запросов только для чтения, если клиентский сокет уходит. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) +- Теперь сервер сообщает о прогрессе, чтобы сохранить клиентские соединения живыми. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Иван](https://github.com/abyss7)) +- Немного лучше сообщение с причиной для оптимизации запроса с помощью `optimize_throw_if_noop` настройка включена. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена поддержка `--version` опция для сервера clickhouse. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Лопатин Константин](https://github.com/k-lopatin)) +- Добавлен `--help/-h` опцион на `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Юрий Баранов](https://github.com/yurriy)) +- Добавлена поддержка скалярных подзапросов с результатом состояния агрегатной функции. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Николай Кочетов](https://github.com/KochetovNicolai)) +- Улучшено время завершения работы сервера и изменено время ожидания. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлена информация о настройке replicated\_can\_become\_leader для system.реплики и добавить ведение журнала, если реплика не будет пытаться стать лидером. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Алексей Зателепин](https://github.com/ztlpn)) + +## ClickHouse релиз 19.1 {#clickhouse-release-19-1} + +### ClickHouse релиз 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} + +- Исправления ошибок `Column ... queried more than once` это может произойти, если установка `asterisk_left_columns_only` имеет значение 1 в случае использования `GLOBAL JOIN` с `SELECT *` (редкий случай). Эта проблема не существует в версии 19.3 и более поздних версиях. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Артем Зуйков](https://github.com/4ertus2)) + +### ClickHouse релиз 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} + +Этот релиз содержит точно такой же набор исправлений, как и 19.3.7. + +### ClickHouse релиз 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} + +Этот релиз содержит точно такой же набор исправлений, как и 19.3.6. + +## ClickHouse релиз 19.1 {#clickhouse-release-19-1-1} + +### ClickHouse релиз 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} + +#### Устранение ошибок {#bug-fixes-18} + +- Исправлена обратная несовместимость со старыми версиями из-за неправильной реализации `send_logs_level` установка. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена обратная несовместимость функции таблицы `remote` введено с комментариями к колонке. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} + +#### Устранение ошибок {#bug-fixes-19} + +- Исправлена ошибка установки пакета с отсутствующим /etc / clickhouse-server / config.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) + +## ClickHouse релиз 19.1 {#clickhouse-release-19-1-2} + +### ClickHouse релиз 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} + +#### Устранение ошибок {#bug-fixes-20} + +- Правильно возвращайте правильный тип и правильно обрабатывайте замки `joinGet` функция. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Амос Птица](https://github.com/amosbird)) +- Исправлена ошибка при повторной попытке создания системных журналов при выключении сервера. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка: если есть база данных с `Dictionary` движок, все словари принудительно загружаются при запуске сервера, и если есть словарь с источником ClickHouse от localhost, то словарь не может загрузиться. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка при выполнении мутаций, содержащих `IN` операторы давали неверные результаты. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Алексей Зателепин](https://github.com/ztlpn)) +- `clickhouse-client` может segfault на выходе при загрузке данных для предложений командной строки, если он был запущен в интерактивном режиме. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированное состояние гонки при выборе из `system.tables` может дать `table doesn't exist` ошибка. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена тупиковая ситуация, когда `SELECT` из-за стола с надписью: `File` двигатель был восстановлен после того, как `No such file or directory` ошибка. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка: локальные словари ClickHouse загружаются через TCP, но должны загружаться внутри процесса. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено `No message received` ошибка при взаимодействии с драйвером PostgreSQL ODBC через TLS-соединение. Также исправлена ошибка segfault при использовании драйвера MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Временно отключите оптимизацию предикатов для `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена бесконечная петля при выборе функции из таблицы `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлено `compile_expressions` ошибка с сопоставлением больших (более int16) дат. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка сегментации с помощью `uncompressed_cache=1` и исключение с неправильным несжатым размером. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([алесапин](https://github.com/alesapin)) +- Исправлено `ALL JOIN` с дубликатами в правой таблице. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Артем Зуйков](https://github.com/4ertus2)) +- Исправлено неправильное поведение при выполнении `INSERT ... SELECT ... FROM file(...)` запрос и файл имеет `CSVWithNames` или `TSVWIthNames` формат и первая строка данных отсутствуют. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Фиксированное выполнение агрегатных функций с помощью `Array(LowCardinality)` аргументы. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Пакет Debian: исправьте/etc/clickhouse-server / preprocessed link в соответствии с конфигурацией. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) +- Исправлен тест fuzz при неопределенном поведении дезинфицирующего средства: добавлена проверка типа параметра для `quantile*Weighted` семейство функций. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Сделай `START REPLICATED SENDS` команда начала репликации отправляет. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) +- Исправлено `Not found column` для повторяющихся столбцов в разделе JOIN ON. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Артем Зуйков](https://github.com/4ertus2)) +- Сейчас `/etc/ssl` используется в качестве каталога по умолчанию с SSL-сертификатами. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка при перезагрузке словаря, если словарь недоступен. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) +- Исправлена ошибка с неправильным `Date` и `DateTime` сравнение. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) +- Исправлен неверный результат, когда `Date` и `DateTime` аргументы используются в ветвях условного оператора (функции `if`). Добавлен общий случай для функции `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +### ClickHouse релиз 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} + +#### Новые средства {#new-features-7} + +- Пользовательские кодеки сжатия для каждого столбца для таблиц. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([алесапин](https://github.com/alesapin), [Зимний Чжан](https://github.com/zhang2014), [Анатолий](https://github.com/Sindbag)) +- Добавлен кодек сжатия `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([алесапин](https://github.com/alesapin)) +- Разрешить `ALTER` кодеки сжатия. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([алесапин](https://github.com/alesapin)) +- Добавленные функции `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` для совместимости со стандартом SQL. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Иван Блинков](https://github.com/blinkov)) +- Поддержка записи в систему `HDFS` таблица или `hdfs` табличная функция. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([алесапин](https://github.com/alesapin)) +- Добавлены функции поиска нескольких постоянных строк из большого стога сена: `multiPosition`, `multiSearch` ,`firstMatch` также с помощью `-UTF8`, `-CaseInsensitive`, и `-CaseInsensitiveUTF8` варианты. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Данила Кутенин](https://github.com/danlark1)) +- Обрезка неиспользуемых осколков, если `SELECT` фильтры запросов по ключу сегментирования (настройка `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Глеб Кантеров](https://github.com/kanterov), [Иван](https://github.com/abyss7)) +- Позволять `Kafka` движок для игнорирования некоторого количества ошибок синтаксического анализа в каждом блоке. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Иван](https://github.com/abyss7)) +- Добавлена поддержка для `CatBoost` мультиклассовые модели оценки. Функция `modelEvaluate` возвращает кортеж с необработанными прогнозами для каждого класса для многоклассовых моделей. `libcatboostmodel.so` должно быть построено с помощью [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Добавленные функции `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Борис Гранво](https://github.com/bgranvea)) +- Добавлены функции хэширования `xxHash64` и `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([Филимонов](https://github.com/filimonov)) +- Добавлен `gccMurmurHash` функция хэширования (GCC flavoured Murmur hash), которая использует то же самое хэш-семя, что и [ССЗ](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([сундили](https://github.com/sundy-li)) +- Добавлены функции хэширования `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) +- Добавлена функция таблицы `remoteSecure`. Функция работает как `remote`, но использует безопасное соединение. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) + +#### Экспериментальная возможность {#experimental-features-3} + +- Добавлена эмуляция нескольких соединений (`allow_experimental_multiple_joins_emulation` установочный). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Артем Зуйков](https://github.com/4ertus2)) + +#### Устранение ошибок {#bug-fixes-21} + +- Сделай `compiled_expression_cache_size` установка ограничена по умолчанию для снижения потребления памяти. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка, которая привела к зависанию в потоках, выполняющих изменения реплицированных таблиц, и в потоке, обновляющем конфигурацию из ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправлено состояние гонки при выполнении распределенной задачи ALTER. Состояние гонки привело к тому, что более чем одна реплика пыталась выполнить задачу, и все реплики, кроме одной, потерпели неудачу с ошибкой ZooKeeper. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправьте ошибку, когда `from_zk` элементы конфигурации не были обновлены после истечения времени ожидания запроса к ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправлена ошибка с неправильным префиксом для масок подсети IPv4. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([алесапин](https://github.com/alesapin)) +- Исправлена ошибка (`std::terminate`) в редких случаях, когда новый поток не может быть создан из-за исчерпания ресурсов. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, когда в `remote` таблица выполнения функции, когда ошибались ограничений, используемых в `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([алесапин](https://github.com/alesapin)) +- Исправьте утечку сокетов netlink. Они были помещены в пул, где они никогда не удалялись, а новые сокеты создавались в начале нового потока, когда использовались все текущие сокеты. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Алексей Зателепин](https://github.com/ztlpn)) +- Исправлена ошибка с закрытием `/proc/self/fd` каталог раньше, чем все fds были прочитаны из него `/proc` после раздвоения `odbc-bridge` подпроцесс. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([алесапин](https://github.com/alesapin)) +- Исправлено монотонное преобразование строки в UInt в случае использования строки в первичном ключе. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена ошибка при вычислении монотонности функции преобразования целых чисел. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена обработка выхода онлайн / оффлайн в `arrayEnumerateUniq`, `arrayEnumerateDense` функции в случае некоторых недопустимых аргументов. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправьте UB в StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Амос Птица](https://github.com/amosbird)) +- Исправлена обработка выхода онлайн / оффлайн в функции `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка: функции `round`, `floor`, `trunc`, `ceil` может возвращать фиктивный результат при выполнении с целочисленным аргументом и большим отрицательным масштабом. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, вызванная ‘kill query sync’ что ведет к свалке ядра. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([мувулдипекер](https://github.com/fancyqlx)) +- Исправлена ошибка с длительной задержкой после пустой очереди репликации. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([алесапин](https://github.com/alesapin)) +- Исправлено чрезмерное использование памяти при вставке в таблицу с `LowCardinality` первичный ключ. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Исправлено `LowCardinality` сериализация для `Native` форматирование в случае пустых массивов. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Исправлен неверный результат при использовании числового столбца distinct by single LowCardinality. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Исправлена специализированная агрегация с ключом LowCardinality (в случае, когда `compile` настройка включена). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Исправлена переадресация пользователей и паролей для запросов реплицированных таблиц. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([алесапин](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) +- Исправлено очень редкое состояние гонки, которое может произойти при перечислении таблиц в базе данных словаря во время перезагрузки словарей. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлен неверный результат при использовании метода Rollup или CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Сэм Чоу](https://github.com/reflection)) +- Исправлены псевдонимы столбцов для запроса с помощью `JOIN ON` синтаксис и распределенные таблицы. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Зимний Чжан](https://github.com/zhang2014)) +- Исправлена ошибка во внутренней реализации `quantileTDigest` (найдено Артемом Вахрушевым). Эта ошибка никогда не происходит в ClickHouse и была актуальна только для тех, кто использует кодовую базу ClickHouse непосредственно в качестве библиотеки. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Улучшения {#improvements-6} + +- Поддержка `IF NOT EXISTS` в `ALTER TABLE ADD COLUMN` заявления вместе с `IF EXISTS` в `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Борис Гранво](https://github.com/bgranvea)) +- Функция `parseDateTimeBestEffort`: поддержка форматов `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` и тому подобное. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- `CapnProtoInputStream` теперь поддерживайте зубчатые конструкции. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Hultgren Один Ван Дер Хорст](https://github.com/Miniwoffer)) +- Улучшение удобства использования: добавлена проверка того, что серверный процесс запускается от владельца каталога данных. Не позволяют запускать сервер от root, если данные принадлежат к непривилегированным пользователем. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([Сергей-в-Гальцев](https://github.com/sergey-v-galtsev)) +- Улучшена логика проверки необходимых столбцов при анализе запросов с соединениями. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Артем Зуйков](https://github.com/4ertus2)) +- Уменьшено количество подключений в случае большого количества распределенных таблиц на одном сервере. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Зимний Чжан](https://github.com/zhang2014)) +- Поддерживаемые итоговые значения строка для `WITH TOTALS` запрос для драйвера ODBC. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Максим Корицкий](https://github.com/nightweb)) +- Разрешено к использованию `Enum`s как целые числа внутри функции if. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Иван](https://github.com/abyss7)) +- Добавлен `low_cardinality_allow_in_native_format` установка. Если он отключен, не используйте его `LowCadrinality` напечатать `Native` формат. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([Кочетовниколай](https://github.com/KochetovNicolai)) +- Удалил некоторые избыточные объекты из кэша скомпилированных выражений, чтобы снизить использование памяти. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([алесапин](https://github.com/alesapin)) +- Добавить проверить это `SET send_logs_level = 'value'` запрос принимает соответствующее значение. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Сабянин Максим](https://github.com/s-mx)) +- Исправлена проверка типа данных в функциях преобразования типов. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Зимний Чжан](https://github.com/zhang2014)) + +#### Улучшения в производительности {#performance-improvements-5} + +- Добавьте параметр MergeTree `use_minimalistic_part_header_in_zookeeper`. Если этот параметр включен, реплицированные таблицы будут хранить метаданные компактной детали в одном znode детали. Это может значительно уменьшить размер моментального снимка ZooKeeper (особенно если таблицы содержат много столбцов). Обратите внимание, что после включения этого параметра вы не сможете понизить рейтинг до версии, которая его не поддерживает. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Алексей Зателепин](https://github.com/ztlpn)) +- Добавление реализации функций на основе DFA `sequenceMatch` и `sequenceCount` в случае, если шаблон не содержит времени. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) +- Повышение производительности при сериализации целых чисел. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Амос Птица](https://github.com/amosbird)) +- Нулевое левое заполнение PODArray так, чтобы элемент -1 всегда был действителен и обнулялся. Он используется для безветвевого расчета смещений. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Амос Птица](https://github.com/amosbird)) +- Возвратившегося `jemalloc` версии, которые приводят к снижению производительности. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) + +#### Назад Несовместимые Изменения {#backward-incompatible-changes-2} + +- Удалена недокументированная функция `ALTER MODIFY PRIMARY KEY` потому что он был вытеснен на второй план `ALTER MODIFY ORDER BY` команда. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Алексей Зателепин](https://github.com/ztlpn)) +- Удаленная функция `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Запретить использование скалярных подзапросов с результатом типа `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Иван](https://github.com/abyss7)) + +#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-6} + +- Добавлена поддержка PowerPC (`ppc64le`) строить. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Данила Кутенин](https://github.com/danlark1)) +- Функциональные тесты с отслеживанием состояния выполняются на общедоступном наборе данных. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлена ошибка, когда сервер не может начать работу с `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` сообщение внутри Docker или systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Обновленный `rdkafka` библиотека для v1.0.0-проект RC5. Используется cppkafka вместо raw c интерфейса. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Иван](https://github.com/abyss7)) +- Обновленный `mariadb-client` библиотека. Исправлена одна из проблем, обнаруженных UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Некоторые исправления для утилиты для сборки. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлено в фиксации запусков тестов с утилиты для сборки. +- Добавлено в фиксации работает в PVS-Studio-статический анализатор. +- Исправлены ошибки, обнаруженные компанией PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлены проблемы совместимости glibc. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Переместите изображения Docker в 18.10 и добавьте файл совместимости для glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([алесапин](https://github.com/alesapin)) +- Добавьте переменную env, если пользователь не хочет использовать каталоги chown в образе Server Docker. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([алесапин](https://github.com/alesapin)) +- Включено большинство предупреждений от `-Weverything` в лязг. Включенный `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Добавлено еще несколько предупреждений, которые доступны только в clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Ссылка на `libLLVM` а не к отдельным библиотекам LLVM при использовании общего связывания. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Оривей Деш](https://github.com/orivej)) +- Добавлены переменные дезинфицирующего средства для тестовых изображений. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([алесапин](https://github.com/alesapin)) +- `clickhouse-server` пакет debian будет рекомендовать `libcap2-bin` пакет для использования `setcap` инструмент для настройки возможностей. Это необязательно. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Улучшено время компиляции, исправлены ошибки. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) +- Добавлены тесты производительности для хэш-функций. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([Филимонов](https://github.com/filimonov)) +- Фиксированные циклические библиотечные зависимости. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) +- Улучшена компиляция с низким уровнем доступной памяти. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) +- Добавлен тестовый сценарий для воспроизведения снижения производительности в `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) +- Исправлены опечатки в комментариях и строковых литералах под заголовком `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([майха](https://github.com/maiha)) +- Исправлены опечатки в комментариях. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Евгений Правда](https://github.com/kvinty)) + +## [Changelog для 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/ru/whats_new/changelog/index.md b/docs/ru/whats_new/changelog/index.md new file mode 100644 index 00000000000..bcfe62cbd0b --- /dev/null +++ b/docs/ru/whats_new/changelog/index.md @@ -0,0 +1,650 @@ +--- +toc_folder_title: Changelog +toc_priority: 74 +toc_title: '2020' +--- + +## ClickHouse release v20.3 + +### ClickHouse release v20.3.4.10, 2020-03-20 + +#### Bug Fix +* This release also contains all bug fixes from 20.1.8.41 +* Fix missing `rows_before_limit_at_least` for queries over http (with processors pipeline). This fixes [#9730](https://github.com/ClickHouse/ClickHouse/issues/9730). [#9757](https://github.com/ClickHouse/ClickHouse/pull/9757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + + +### ClickHouse release v20.3.3.6, 2020-03-17 + +#### Bug Fix +* This release also contains all bug fixes from 20.1.7.38 +* Fix bug in a replication that doesn't allow replication to work if the user has executed mutations on the previous version. This fixes [#9645](https://github.com/ClickHouse/ClickHouse/issues/9645). [#9652](https://github.com/ClickHouse/ClickHouse/pull/9652) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. +* Add setting `use_compact_format_in_distributed_parts_names` which allows to write files for `INSERT` queries into `Distributed` table with more compact format. This fixes [#9647](https://github.com/ClickHouse/ClickHouse/issues/9647). [#9653](https://github.com/ClickHouse/ClickHouse/pull/9653) ([alesapin](https://github.com/alesapin)). It makes version 20.3 backward compatible again. + +### ClickHouse release v20.3.2.1, 2020-03-12 + +#### Backward Incompatible Change + +* Fixed the issue `file name too long` when sending data for `Distributed` tables for a large number of replicas. Fixed the issue that replica credentials were exposed in the server log. The format of directory name on disk was changed to `[shard{shard_index}[_replica{replica_index}]]`. [#8911](https://github.com/ClickHouse/ClickHouse/pull/8911) ([Mikhail Korotov](https://github.com/millb)) After you upgrade to the new version, you will not be able to downgrade without manual intervention, because old server version does not recognize the new directory format. If you want to downgrade, you have to manually rename the corresponding directories to the old format. This change is relevant only if you have used asynchronous `INSERT`s to `Distributed` tables. In the version 20.3.3 we will introduce a setting that will allow you to enable the new format gradually. +* Changed the format of replication log entries for mutation commands. You have to wait for old mutations to process before installing the new version. +* Implement simple memory profiler that dumps stacktraces to `system.trace_log` every N bytes over soft allocation limit [#8765](https://github.com/ClickHouse/ClickHouse/pull/8765) ([Ivan](https://github.com/abyss7)) [#9472](https://github.com/ClickHouse/ClickHouse/pull/9472) ([alexey-milovidov](https://github.com/alexey-milovidov)) The column of `system.trace_log` was renamed from `timer_type` to `trace_type`. This will require changes in third-party performance analysis and flamegraph processing tools. +* Use OS thread id everywhere instead of internal thread number. This fixes [#7477](https://github.com/ClickHouse/ClickHouse/issues/7477) Old `clickhouse-client` cannot receive logs that are send from the server when the setting `send_logs_level` is enabled, because the names and types of the structured log messages were changed. On the other hand, different server versions can send logs with different types to each other. When you don't use the `send_logs_level` setting, you should not care. [#8954](https://github.com/ClickHouse/ClickHouse/pull/8954) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove `indexHint` function [#9542](https://github.com/ClickHouse/ClickHouse/pull/9542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove `findClusterIndex`, `findClusterValue` functions. This fixes [#8641](https://github.com/ClickHouse/ClickHouse/issues/8641). If you were using these functions, send an email to `clickhouse-feedback@yandex-team.com` [#9543](https://github.com/ClickHouse/ClickHouse/pull/9543) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now it's not allowed to create columns or add columns with `SELECT` subquery as default expression. [#9481](https://github.com/ClickHouse/ClickHouse/pull/9481) ([alesapin](https://github.com/alesapin)) +* Require aliases for subqueries in JOIN. [#9274](https://github.com/ClickHouse/ClickHouse/pull/9274) ([Artem Zuikov](https://github.com/4ertus2)) +* Improved `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). [#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) ([alesapin](https://github.com/alesapin)) +* Require server to be restarted to apply the changes in logging configuration. This is a temporary workaround to avoid the bug where the server logs to a deleted log file (see [#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* The setting `experimental_use_processors` is enabled by default. This setting enables usage of the new query pipeline. This is internal refactoring and we expect no visible changes. If you will see any issues, set it to back zero. [#8768](https://github.com/ClickHouse/ClickHouse/pull/8768) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### New Feature +* Add `Avro` and `AvroConfluent` input/output formats [#8571](https://github.com/ClickHouse/ClickHouse/pull/8571) ([Andrew Onyshchuk](https://github.com/oandrew)) [#8957](https://github.com/ClickHouse/ClickHouse/pull/8957) ([Andrew Onyshchuk](https://github.com/oandrew)) [#8717](https://github.com/ClickHouse/ClickHouse/pull/8717) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Multi-threaded and non-blocking updates of expired keys in `cache` dictionaries (with optional permission to read old ones). [#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Add query `ALTER ... MATERIALIZE TTL`. It runs mutation that forces to remove expired data by TTL and recalculates meta-information about TTL in all parts. [#8775](https://github.com/ClickHouse/ClickHouse/pull/8775) ([Anton Popov](https://github.com/CurtizJ)) +* Switch from HashJoin to MergeJoin (on disk) if needed [#9082](https://github.com/ClickHouse/ClickHouse/pull/9082) ([Artem Zuikov](https://github.com/4ertus2)) +* Added `MOVE PARTITION` command for `ALTER TABLE` [#4729](https://github.com/ClickHouse/ClickHouse/issues/4729) [#6168](https://github.com/ClickHouse/ClickHouse/pull/6168) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Reloading storage configuration from configuration file on the fly. [#8594](https://github.com/ClickHouse/ClickHouse/pull/8594) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Allowed to change `storage_policy` to not less rich one. [#8107](https://github.com/ClickHouse/ClickHouse/pull/8107) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Added support for globs/wildcards for S3 storage and table function. [#8851](https://github.com/ClickHouse/ClickHouse/pull/8851) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Implement `bitAnd`, `bitOr`, `bitXor`, `bitNot` for `FixedString(N)` datatype. [#9091](https://github.com/ClickHouse/ClickHouse/pull/9091) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Added function `bitCount`. This fixes [#8702](https://github.com/ClickHouse/ClickHouse/issues/8702). [#8708](https://github.com/ClickHouse/ClickHouse/pull/8708) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#8749](https://github.com/ClickHouse/ClickHouse/pull/8749) ([ikopylov](https://github.com/ikopylov)) +* Add `generateRandom` table function to generate random rows with given schema. Allows to populate arbitrary test table with data. [#8994](https://github.com/ClickHouse/ClickHouse/pull/8994) ([Ilya Yatsishin](https://github.com/qoega)) +* `JSONEachRowFormat`: support special case when objects enclosed in top-level array. [#8860](https://github.com/ClickHouse/ClickHouse/pull/8860) ([Kruglov Pavel](https://github.com/Avogar)) +* Now it's possible to create a column with `DEFAULT` expression which depends on a column with default `ALIAS` expression. [#9489](https://github.com/ClickHouse/ClickHouse/pull/9489) ([alesapin](https://github.com/alesapin)) +* Allow to specify `--limit` more than the source data size in `clickhouse-obfuscator`. The data will repeat itself with different random seed. [#9155](https://github.com/ClickHouse/ClickHouse/pull/9155) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added `groupArraySample` function (similar to `groupArray`) with reservior sampling algorithm. [#8286](https://github.com/ClickHouse/ClickHouse/pull/8286) ([Amos Bird](https://github.com/amosbird)) +* Now you can monitor the size of update queue in `cache`/`complex_key_cache` dictionaries via system metrics. [#9413](https://github.com/ClickHouse/ClickHouse/pull/9413) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Allow to use CRLF as a line separator in CSV output format with setting `output_format_csv_crlf_end_of_line` is set to 1 [#8934](https://github.com/ClickHouse/ClickHouse/pull/8934) [#8935](https://github.com/ClickHouse/ClickHouse/pull/8935) [#8963](https://github.com/ClickHouse/ClickHouse/pull/8963) ([Mikhail Korotov](https://github.com/millb)) +* Implement more functions of the [H3](https://github.com/uber/h3) API: `h3GetBaseCell`, `h3HexAreaM2`, `h3IndexesAreNeighbors`, `h3ToChildren`, `h3ToString` and `stringToH3` [#8938](https://github.com/ClickHouse/ClickHouse/pull/8938) ([Nico Mandery](https://github.com/nmandery)) +* New setting introduced: `max_parser_depth` to control maximum stack size and allow large complex queries. This fixes [#6681](https://github.com/ClickHouse/ClickHouse/issues/6681) and [#7668](https://github.com/ClickHouse/ClickHouse/issues/7668). [#8647](https://github.com/ClickHouse/ClickHouse/pull/8647) ([Maxim Smirnov](https://github.com/qMBQx8GH)) +* Add a setting `force_optimize_skip_unused_shards` setting to throw if skipping of unused shards is not possible [#8805](https://github.com/ClickHouse/ClickHouse/pull/8805) ([Azat Khuzhin](https://github.com/azat)) +* Allow to configure multiple disks/volumes for storing data for send in `Distributed` engine [#8756](https://github.com/ClickHouse/ClickHouse/pull/8756) ([Azat Khuzhin](https://github.com/azat)) +* Support storage policy (``) for storing temporary data. [#8750](https://github.com/ClickHouse/ClickHouse/pull/8750) ([Azat Khuzhin](https://github.com/azat)) +* Added `X-ClickHouse-Exception-Code` HTTP header that is set if exception was thrown before sending data. This implements [#4971](https://github.com/ClickHouse/ClickHouse/issues/4971). [#8786](https://github.com/ClickHouse/ClickHouse/pull/8786) ([Mikhail Korotov](https://github.com/millb)) +* Added function `ifNotFinite`. It is just a syntactic sugar: `ifNotFinite(x, y) = isFinite(x) ? x : y`. [#8710](https://github.com/ClickHouse/ClickHouse/pull/8710) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added `last_successful_update_time` column in `system.dictionaries` table [#9394](https://github.com/ClickHouse/ClickHouse/pull/9394) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Add `blockSerializedSize` function (size on disk without compression) [#8952](https://github.com/ClickHouse/ClickHouse/pull/8952) ([Azat Khuzhin](https://github.com/azat)) +* Add function `moduloOrZero` [#9358](https://github.com/ClickHouse/ClickHouse/pull/9358) ([hcz](https://github.com/hczhcz)) +* Added system tables `system.zeros` and `system.zeros_mt` as well as tale functions `zeros()` and `zeros_mt()`. Tables (and table functions) contain single column with name `zero` and type `UInt8`. This column contains zeros. It is needed for test purposes as the fastest method to generate many rows. This fixes [#6604](https://github.com/ClickHouse/ClickHouse/issues/6604) [#9593](https://github.com/ClickHouse/ClickHouse/pull/9593) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) + +#### Experimental Feature +* Add new compact format of parts in `MergeTree`-family tables in which all columns are stored in one file. It helps to increase performance of small and frequent inserts. The old format (one file per column) is now called wide. Data storing format is controlled by settings `min_bytes_for_wide_part` and `min_rows_for_wide_part`. [#8290](https://github.com/ClickHouse/ClickHouse/pull/8290) ([Anton Popov](https://github.com/CurtizJ)) +* Support for S3 storage for `Log`, `TinyLog` and `StripeLog` tables. [#8862](https://github.com/ClickHouse/ClickHouse/pull/8862) ([Pavel Kovalenko](https://github.com/Jokser)) + +#### Bug Fix +* Fixed inconsistent whitespaces in log messages. [#9322](https://github.com/ClickHouse/ClickHouse/pull/9322) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix bug in which arrays of unnamed tuples were flattened as Nested structures on table creation. [#8866](https://github.com/ClickHouse/ClickHouse/pull/8866) ([achulkov2](https://github.com/achulkov2)) +* Fixed the issue when "Too many open files" error may happen if there are too many files matching glob pattern in `File` table or `file` table function. Now files are opened lazily. This fixes [#8857](https://github.com/ClickHouse/ClickHouse/issues/8857) [#8861](https://github.com/ClickHouse/ClickHouse/pull/8861) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* DROP TEMPORARY TABLE now drops only temporary table. [#8907](https://github.com/ClickHouse/ClickHouse/pull/8907) ([Vitaly Baranov](https://github.com/vitlibar)) +* Remove outdated partition when we shutdown the server or DETACH/ATTACH a table. [#8602](https://github.com/ClickHouse/ClickHouse/pull/8602) ([Guillaume Tassery](https://github.com/YiuRULE)) +* For how the default disk calculates the free space from `data` subdirectory. Fixed the issue when the amount of free space is not calculated correctly if the `data` directory is mounted to a separate device (rare case). This fixes [#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) [#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) ([Mikhail Korotov](https://github.com/millb)) +* Allow comma (cross) join with IN () inside. [#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) ([Artem Zuikov](https://github.com/4ertus2)) +* Allow to rewrite CROSS to INNER JOIN if there's [NOT] LIKE operator in WHERE section. [#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) ([Artem Zuikov](https://github.com/4ertus2)) +* Fix possible incorrect result after `GROUP BY` with enabled setting `distributed_aggregation_memory_efficient`. Fixes [#9134](https://github.com/ClickHouse/ClickHouse/issues/9134). [#9289](https://github.com/ClickHouse/ClickHouse/pull/9289) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Found keys were counted as missed in metrics of cache dictionaries. [#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix replication protocol incompatibility introduced in [#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +* Fixed race condition on `queue_task_handle` at the startup of `ReplicatedMergeTree` tables. [#9552](https://github.com/ClickHouse/ClickHouse/pull/9552) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* The token `NOT` didn't work in `SHOW TABLES NOT LIKE` query [#8727](https://github.com/ClickHouse/ClickHouse/issues/8727) [#8940](https://github.com/ClickHouse/ClickHouse/pull/8940) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added range check to function `h3EdgeLengthM`. Without this check, buffer overflow is possible. [#8945](https://github.com/ClickHouse/ClickHouse/pull/8945) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed up a bug in batched calculations of ternary logical OPs on multiple arguments (more than 10). [#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Alexander Kazakov](https://github.com/Akazz)) +* Fix error of PREWHERE optimization, which could lead to segfaults or `Inconsistent number of columns got from MergeTreeRangeReader` exception. [#9024](https://github.com/ClickHouse/ClickHouse/pull/9024) ([Anton Popov](https://github.com/CurtizJ)) +* Fix unexpected `Timeout exceeded while reading from socket` exception, which randomly happens on secure connection before timeout actually exceeded and when query profiler is enabled. Also add `connect_timeout_with_failover_secure_ms` settings (default 100ms), which is similar to `connect_timeout_with_failover_ms`, but is used for secure connections (because SSL handshake is slower, than ordinary TCP connection) [#9026](https://github.com/ClickHouse/ClickHouse/pull/9026) ([tavplubix](https://github.com/tavplubix)) +* Fix bug with mutations finalization, when mutation may hang in state with `parts_to_do=0` and `is_done=0`. [#9022](https://github.com/ClickHouse/ClickHouse/pull/9022) ([alesapin](https://github.com/alesapin)) +* Use new ANY JOIN logic with `partial_merge_join` setting. It's possible to make `ANY|ALL|SEMI LEFT` and `ALL INNER` joins with `partial_merge_join=1` now. [#8932](https://github.com/ClickHouse/ClickHouse/pull/8932) ([Artem Zuikov](https://github.com/4ertus2)) +* Shard now clamps the settings got from the initiator to the shard's constaints instead of throwing an exception. This fix allows to send queries to a shard with another constraints. [#9447](https://github.com/ClickHouse/ClickHouse/pull/9447) ([Vitaly Baranov](https://github.com/vitlibar)) +* Fixed memory management problem in `MergeTreeReadPool`. [#8791](https://github.com/ClickHouse/ClickHouse/pull/8791) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix `toDecimal*OrNull()` functions family when called with string `e`. Fixes [#8312](https://github.com/ClickHouse/ClickHouse/issues/8312) [#8764](https://github.com/ClickHouse/ClickHouse/pull/8764) ([Artem Zuikov](https://github.com/4ertus2)) +* Make sure that `FORMAT Null` sends no data to the client. [#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Fix bug that timestamp in `LiveViewBlockInputStream` will not updated. `LIVE VIEW` is an experimental feature. [#8644](https://github.com/ClickHouse/ClickHouse/pull/8644) ([vxider](https://github.com/Vxider)) [#8625](https://github.com/ClickHouse/ClickHouse/pull/8625) ([vxider](https://github.com/Vxider)) +* Fixed `ALTER MODIFY TTL` wrong behavior which did not allow to delete old TTL expressions. [#8422](https://github.com/ClickHouse/ClickHouse/pull/8422) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fixed UBSan report in MergeTreeIndexSet. This fixes [#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) [#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed the behaviour of `match` and `extract` functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. This fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Avoid throwing from destructor in Apache Avro 3rd-party library. [#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([Andrew Onyshchuk](https://github.com/oandrew)) +* Don't commit a batch polled from `Kafka` partially as it can lead to holes in data. [#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov)) +* Fix `joinGet` with nullable return types. https://github.com/ClickHouse/ClickHouse/issues/8919 [#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([Amos Bird](https://github.com/amosbird)) +* Fix data incompatibility when compressed with `T64` codec. [#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2)) Fix data type ids in `T64` compression codec that leads to wrong (de)compression in affected versions. [#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2)) +* Add setting `enable_early_constant_folding` and disable it in some cases that leads to errors. [#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2)) +* Fix pushdown predicate optimizer with VIEW and enable the test [#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([Winter Zhang](https://github.com/zhang2014)) +* Fix segfault in `Merge` tables, that can happen when reading from `File` storages [#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) ([tavplubix](https://github.com/tavplubix)) +* Added a check for storage policy in `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE`. Otherwise it could make data of part inaccessible after restart and prevent ClickHouse to start. [#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix alters if there is TTL set for table. [#8800](https://github.com/ClickHouse/ClickHouse/pull/8800) ([Anton Popov](https://github.com/CurtizJ)) +* Fix race condition that can happen when `SYSTEM RELOAD ALL DICTIONARIES` is executed while some dictionary is being modified/added/removed. [#8801](https://github.com/ClickHouse/ClickHouse/pull/8801) ([Vitaly Baranov](https://github.com/vitlibar)) +* In previous versions `Memory` database engine use empty data path, so tables are created in `path` directory (e.g. `/var/lib/clickhouse/`), not in data directory of database (e.g. `/var/lib/clickhouse/db_name`). [#8753](https://github.com/ClickHouse/ClickHouse/pull/8753) ([tavplubix](https://github.com/tavplubix)) +* Fixed wrong log messages about missing default disk or policy. [#9530](https://github.com/ClickHouse/ClickHouse/pull/9530) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix not(has()) for the bloom_filter index of array types. [#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +* Allow first column(s) in a table with `Log` engine be an alias [#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) ([Ivan](https://github.com/abyss7)) +* Fix order of ranges while reading from `MergeTree` table in one thread. It could lead to exceptions from `MergeTreeRangeReader` or wrong query results. [#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) ([Anton Popov](https://github.com/CurtizJ)) +* Make `reinterpretAsFixedString` to return `FixedString` instead of `String`. [#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) ([Andrew Onyshchuk](https://github.com/oandrew)) +* Avoid extremely rare cases when the user can get wrong error message (`Success` instead of detailed error description). [#9457](https://github.com/ClickHouse/ClickHouse/pull/9457) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Do not crash when using `Template` format with empty row template. [#8785](https://github.com/ClickHouse/ClickHouse/pull/8785) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Metadata files for system tables could be created in wrong place [#8653](https://github.com/ClickHouse/ClickHouse/pull/8653) ([tavplubix](https://github.com/tavplubix)) Fixes [#8581](https://github.com/ClickHouse/ClickHouse/issues/8581). +* Fix data race on exception_ptr in cache dictionary [#8303](https://github.com/ClickHouse/ClickHouse/issues/8303). [#9379](https://github.com/ClickHouse/ClickHouse/pull/9379) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Do not throw an exception for query `ATTACH TABLE IF NOT EXISTS`. Previously it was thrown if table already exists, despite the `IF NOT EXISTS` clause. [#8967](https://github.com/ClickHouse/ClickHouse/pull/8967) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed missing closing paren in exception message. [#8811](https://github.com/ClickHouse/ClickHouse/pull/8811) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Avoid message `Possible deadlock avoided` at the startup of clickhouse-client in interactive mode. [#9455](https://github.com/ClickHouse/ClickHouse/pull/9455) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed the issue when padding at the end of base64 encoded value can be malformed. Update base64 library. This fixes [#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), closes [#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [#9378](https://github.com/ClickHouse/ClickHouse/issues/9378) [#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) ([filimonov](https://github.com/filimonov)) +* Fixed exception in `DROP TABLE IF EXISTS` [#8663](https://github.com/ClickHouse/ClickHouse/pull/8663) ([Nikita Vasilev](https://github.com/nikvas0)) +* Fix crash when a user tries to `ALTER MODIFY SETTING` for old-formated `MergeTree` table engines family. [#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +* Support for UInt64 numbers that don't fit in Int64 in JSON-related functions. Update SIMDJSON to master. This fixes [#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) [#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed execution of inversed predicates when non-strictly monotinic functional index is used. [#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) ([Alexander Kazakov](https://github.com/Akazz)) +* Don't try to fold `IN` constant in `GROUP BY` [#8868](https://github.com/ClickHouse/ClickHouse/pull/8868) ([Amos Bird](https://github.com/amosbird)) +* Fix bug in `ALTER DELETE` mutations which leads to index corruption. This fixes [#9019](https://github.com/ClickHouse/ClickHouse/issues/9019) and [#8982](https://github.com/ClickHouse/ClickHouse/issues/8982). Additionally fix extremely rare race conditions in `ReplicatedMergeTree` `ALTER` queries. [#9048](https://github.com/ClickHouse/ClickHouse/pull/9048) ([alesapin](https://github.com/alesapin)) +* When the setting `compile_expressions` is enabled, you can get `unexpected column` in `LLVMExecutableFunction` when we use `Nullable` type [#8910](https://github.com/ClickHouse/ClickHouse/pull/8910) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Multiple fixes for `Kafka` engine: 1) fix duplicates that were appearing during consumer group rebalance. 2) Fix rare 'holes' appeared when data were polled from several partitions with one poll and committed partially (now we always process / commit the whole polled block of messages). 3) Fix flushes by block size (before that only flushing by timeout was working properly). 4) better subscription procedure (with assignment feedback). 5) Make tests work faster (with default intervals and timeouts). Due to the fact that data was not flushed by block size before (as it should according to documentation), that PR may lead to some performance degradation with default settings (due to more often & tinier flushes which are less optimal). If you encounter the performance issue after that change - please increase `kafka_max_block_size` in the table to the bigger value ( for example `CREATE TABLE ...Engine=Kafka ... SETTINGS ... kafka_max_block_size=524288`). Fixes [#7259](https://github.com/ClickHouse/ClickHouse/issues/7259) [#8917](https://github.com/ClickHouse/ClickHouse/pull/8917) ([filimonov](https://github.com/filimonov)) +* Fix `Parameter out of bound` exception in some queries after PREWHERE optimizations. [#8914](https://github.com/ClickHouse/ClickHouse/pull/8914) ([Baudouin Giard](https://github.com/bgiard)) +* Fixed the case of mixed-constness of arguments of function `arrayZip`. [#8705](https://github.com/ClickHouse/ClickHouse/pull/8705) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [#3492](https://github.com/ClickHouse/ClickHouse/issues/3492) [#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) ([tavplubix](https://github.com/tavplubix)) +* Now it's not possible to create or add columns with simple cyclic aliases like `a DEFAULT b, b DEFAULT a`. [#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +* Fixed a bug with double move which may corrupt original part. This is relevant if you use `ALTER TABLE MOVE` [#8680](https://github.com/ClickHouse/ClickHouse/pull/8680) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Allow `interval` identifier to correctly parse without backticks. Fixed issue when a query cannot be executed even if the `interval` identifier is enclosed in backticks or double quotes. This fixes [#9124](https://github.com/ClickHouse/ClickHouse/issues/9124). [#9142](https://github.com/ClickHouse/ClickHouse/pull/9142) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed fuzz test and incorrect behaviour of `bitTestAll`/`bitTestAny` functions. [#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix possible crash/wrong number of rows in `LIMIT n WITH TIES` when there are a lot of rows equal to n'th row. [#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +* Fix mutations with parts written with enabled `insert_quorum`. [#9463](https://github.com/ClickHouse/ClickHouse/pull/9463) ([alesapin](https://github.com/alesapin)) +* Fix data race at destruction of `Poco::HTTPServer`. It could happen when server is started and immediately shut down. [#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +* Fix bug in which a misleading error message was shown when running `SHOW CREATE TABLE a_table_that_does_not_exist`. [#8899](https://github.com/ClickHouse/ClickHouse/pull/8899) ([achulkov2](https://github.com/achulkov2)) +* Fixed `Parameters are out of bound` exception in some rare cases when we have a constant in the `SELECT` clause when we have an `ORDER BY` and a `LIMIT` clause. [#8892](https://github.com/ClickHouse/ClickHouse/pull/8892) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Fix mutations finalization, when already done mutation can have status `is_done=0`. [#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) ([alesapin](https://github.com/alesapin)) +* Prevent from executing `ALTER ADD INDEX` for MergeTree tables with old syntax, because it doesn't work. [#8822](https://github.com/ClickHouse/ClickHouse/pull/8822) ([Mikhail Korotov](https://github.com/millb)) +* During server startup do not access table, which `LIVE VIEW` depends on, so server will be able to start. Also remove `LIVE VIEW` dependencies when detaching `LIVE VIEW`. `LIVE VIEW` is an experimental feature. [#8824](https://github.com/ClickHouse/ClickHouse/pull/8824) ([tavplubix](https://github.com/tavplubix)) +* Fix possible segfault in `MergeTreeRangeReader`, while executing `PREWHERE`. [#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible mismatched checksums with column TTLs. [#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed a bug when parts were not being moved in background by TTL rules in case when there is only one volume. [#8672](https://github.com/ClickHouse/ClickHouse/pull/8672) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fixed the issue `Method createColumn() is not implemented for data type Set`. This fixes [#7799](https://github.com/ClickHouse/ClickHouse/issues/7799). [#8674](https://github.com/ClickHouse/ClickHouse/pull/8674) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now we will try finalize mutations more frequently. [#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +* Fix `intDiv` by minus one constant [#9351](https://github.com/ClickHouse/ClickHouse/pull/9351) ([hcz](https://github.com/hczhcz)) +* Fix possible race condition in `BlockIO`. [#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. [#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) ([filimonov](https://github.com/filimonov)) +* Added workaround if OS returns wrong result for `timer_create` function. [#8837](https://github.com/ClickHouse/ClickHouse/pull/8837) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed error in usage of `min_marks_for_seek` parameter. Fixed the error message when there is no sharding key in Distributed table and we try to skip unused shards. [#8908](https://github.com/ClickHouse/ClickHouse/pull/8908) ([Azat Khuzhin](https://github.com/azat)) + +#### Improvement +* Implement `ALTER MODIFY/DROP` queries on top of mutations for `ReplicatedMergeTree*` engines family. Now `ALTERS` blocks only at the metadata update stage, and don't block after that. [#8701](https://github.com/ClickHouse/ClickHouse/pull/8701) ([alesapin](https://github.com/alesapin)) +* Add ability to rewrite CROSS to INNER JOINs with `WHERE` section containing unqialified names. [#9512](https://github.com/ClickHouse/ClickHouse/pull/9512) ([Artem Zuikov](https://github.com/4ertus2)) +* Make `SHOW TABLES` and `SHOW DATABASES` queries support the `WHERE` expressions and `FROM`/`IN` [#9076](https://github.com/ClickHouse/ClickHouse/pull/9076) ([sundyli](https://github.com/sundy-li)) +* Added a setting `deduplicate_blocks_in_dependent_materialized_views`. [#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) ([urykhy](https://github.com/urykhy)) +* After recent changes MySQL client started to print binary strings in hex thereby making them not readable ([#9032](https://github.com/ClickHouse/ClickHouse/issues/9032)). The workaround in ClickHouse is to mark string columns as UTF-8, which is not always, but usually the case. [#9079](https://github.com/ClickHouse/ClickHouse/pull/9079) ([Yuriy Baranov](https://github.com/yurriy)) +* Add support of String and FixedString keys for `sumMap` [#8903](https://github.com/ClickHouse/ClickHouse/pull/8903) ([Baudouin Giard](https://github.com/bgiard)) +* Support string keys in SummingMergeTree maps [#8933](https://github.com/ClickHouse/ClickHouse/pull/8933) ([Baudouin Giard](https://github.com/bgiard)) +* Signal termination of thread to the thread pool even if the thread has thrown exception [#8736](https://github.com/ClickHouse/ClickHouse/pull/8736) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +* Allow to set `query_id` in `clickhouse-benchmark` [#9416](https://github.com/ClickHouse/ClickHouse/pull/9416) ([Anton Popov](https://github.com/CurtizJ)) +* Don't allow strange expressions in `ALTER TABLE ... PARTITION partition` query. This addresses [#7192](https://github.com/ClickHouse/ClickHouse/issues/7192) [#8835](https://github.com/ClickHouse/ClickHouse/pull/8835) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* The table `system.table_engines` now provides information about feature support (like `supports_ttl` or `supports_sort_order`). [#8830](https://github.com/ClickHouse/ClickHouse/pull/8830) ([Max Akhmedov](https://github.com/zlobober)) +* Enable `system.metric_log` by default. It will contain rows with values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval (one second by default). The table is very small (usually in order of megabytes) and collecting this data by default is reasonable. [#9225](https://github.com/ClickHouse/ClickHouse/pull/9225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries. Fixes [#6964](https://github.com/ClickHouse/ClickHouse/issues/6964) [#8874](https://github.com/ClickHouse/ClickHouse/pull/8874) ([Ivan](https://github.com/abyss7)) +* Now temporary `LIVE VIEW` is created by `CREATE LIVE VIEW name WITH TIMEOUT [42] ...` instead of `CREATE TEMPORARY LIVE VIEW ...`, because the previous syntax was not consistent with `CREATE TEMPORARY TABLE ...` [#9131](https://github.com/ClickHouse/ClickHouse/pull/9131) ([tavplubix](https://github.com/tavplubix)) +* Add text_log.level configuration parameter to limit entries that goes to `system.text_log` table [#8809](https://github.com/ClickHouse/ClickHouse/pull/8809) ([Azat Khuzhin](https://github.com/azat)) +* Allow to put downloaded part to a disks/volumes according to TTL rules [#8598](https://github.com/ClickHouse/ClickHouse/pull/8598) ([Vladimir Chebotarev](https://github.com/excitoon)) +* For external MySQL dictionaries, allow to mutualize MySQL connection pool to "share" them among dictionaries. This option significantly reduces the number of connections to MySQL servers. [#9409](https://github.com/ClickHouse/ClickHouse/pull/9409) ([Clément Rodriguez](https://github.com/clemrodriguez)) +* Show nearest query execution time for quantiles in `clickhouse-benchmark` output instead of interpolated values. It's better to show values that correspond to the execution time of some queries. [#8712](https://github.com/ClickHouse/ClickHouse/pull/8712) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Possibility to add key & timestamp for the message when inserting data to Kafka. Fixes [#7198](https://github.com/ClickHouse/ClickHouse/issues/7198) [#8969](https://github.com/ClickHouse/ClickHouse/pull/8969) ([filimonov](https://github.com/filimonov)) +* If server is run from terminal, highlight thread number, query id and log priority by colors. This is for improved readability of correlated log messages for developers. [#8961](https://github.com/ClickHouse/ClickHouse/pull/8961) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better exception message while loading tables for `Ordinary` database. [#9527](https://github.com/ClickHouse/ClickHouse/pull/9527) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Implement `arraySlice` for arrays with aggregate function states. This fixes [#9388](https://github.com/ClickHouse/ClickHouse/issues/9388) [#9391](https://github.com/ClickHouse/ClickHouse/pull/9391) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Allow constant functions and constant arrays to be used on the right side of IN operator. [#8813](https://github.com/ClickHouse/ClickHouse/pull/8813) ([Anton Popov](https://github.com/CurtizJ)) +* If zookeeper exception has happened while fetching data for system.replicas, display it in a separate column. This implements [#9137](https://github.com/ClickHouse/ClickHouse/issues/9137) [#9138](https://github.com/ClickHouse/ClickHouse/pull/9138) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Atomically remove MergeTree data parts on destroy. [#8402](https://github.com/ClickHouse/ClickHouse/pull/8402) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Support row-level security for Distributed tables. [#8926](https://github.com/ClickHouse/ClickHouse/pull/8926) ([Ivan](https://github.com/abyss7)) +* Now we recognize suffix (like KB, KiB...) in settings values. [#8072](https://github.com/ClickHouse/ClickHouse/pull/8072) ([Mikhail Korotov](https://github.com/millb)) +* Prevent out of memory while constructing result of a large JOIN. [#8637](https://github.com/ClickHouse/ClickHouse/pull/8637) ([Artem Zuikov](https://github.com/4ertus2)) +* Added names of clusters to suggestions in interactive mode in `clickhouse-client`. [#8709](https://github.com/ClickHouse/ClickHouse/pull/8709) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Initialize query profiler for all threads in a group, e.g. it allows to fully profile insert-queries [#8820](https://github.com/ClickHouse/ClickHouse/pull/8820) ([Ivan](https://github.com/abyss7)) +* Added column `exception_code` in `system.query_log` table. [#8770](https://github.com/ClickHouse/ClickHouse/pull/8770) ([Mikhail Korotov](https://github.com/millb)) +* Enabled MySQL compatibility server on port `9004` in the default server configuration file. Fixed password generation command in the example in configuration. [#8771](https://github.com/ClickHouse/ClickHouse/pull/8771) ([Yuriy Baranov](https://github.com/yurriy)) +* Prevent abort on shutdown if the filesystem is readonly. This fixes [#9094](https://github.com/ClickHouse/ClickHouse/issues/9094) [#9100](https://github.com/ClickHouse/ClickHouse/pull/9100) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better exception message when length is required in HTTP POST query. [#9453](https://github.com/ClickHouse/ClickHouse/pull/9453) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add `_path` and `_file` virtual columns to `HDFS` and `File` engines and `hdfs` and `file` table functions [#8489](https://github.com/ClickHouse/ClickHouse/pull/8489) ([Olga Khvostikova](https://github.com/stavrolia)) +* Fix error `Cannot find column` while inserting into `MATERIALIZED VIEW` in case if new column was added to view's internal table. [#8766](https://github.com/ClickHouse/ClickHouse/pull/8766) [#8788](https://github.com/ClickHouse/ClickHouse/pull/8788) ([vzakaznikov](https://github.com/vzakaznikov)) [#8788](https://github.com/ClickHouse/ClickHouse/issues/8788) [#8806](https://github.com/ClickHouse/ClickHouse/pull/8806) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [#8803](https://github.com/ClickHouse/ClickHouse/pull/8803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix progress over native client-server protocol, by send progress after final update (like logs). This may be relevant only to some third-party tools that are using native protocol. [#9495](https://github.com/ClickHouse/ClickHouse/pull/9495) ([Azat Khuzhin](https://github.com/azat)) +* Add a system metric tracking the number of client connections using MySQL protocol ([#9013](https://github.com/ClickHouse/ClickHouse/issues/9013)). [#9015](https://github.com/ClickHouse/ClickHouse/pull/9015) ([Eugene Klimov](https://github.com/Slach)) +* From now on, HTTP responses will have `X-ClickHouse-Timezone` header set to the same timezone value that `SELECT timezone()` would report. [#9493](https://github.com/ClickHouse/ClickHouse/pull/9493) ([Denis Glazachev](https://github.com/traceon)) + +#### Performance Improvement +* Improve performance of analysing index with IN [#9261](https://github.com/ClickHouse/ClickHouse/pull/9261) ([Anton Popov](https://github.com/CurtizJ)) +* Simpler and more efficient code in Logical Functions + code cleanups. A followup to [#8718](https://github.com/ClickHouse/ClickHouse/issues/8718) [#8728](https://github.com/ClickHouse/ClickHouse/pull/8728) ([Alexander Kazakov](https://github.com/Akazz)) +* Overall performance improvement (in range of 5%..200% for affected queries) by ensuring even more strict aliasing with C++20 features. [#9304](https://github.com/ClickHouse/ClickHouse/pull/9304) ([Amos Bird](https://github.com/amosbird)) +* More strict aliasing for inner loops of comparison functions. [#9327](https://github.com/ClickHouse/ClickHouse/pull/9327) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* More strict aliasing for inner loops of arithmetic functions. [#9325](https://github.com/ClickHouse/ClickHouse/pull/9325) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* A ~3 times faster implementation for ColumnVector::replicate(), via which ColumnConst::convertToFullColumn() is implemented. Also will be useful in tests when materializing constants. [#9293](https://github.com/ClickHouse/ClickHouse/pull/9293) ([Alexander Kazakov](https://github.com/Akazz)) +* Another minor performance improvement to `ColumnVector::replicate()` (this speeds up the `materialize` function and higher order functions) an even further improvement to [#9293](https://github.com/ClickHouse/ClickHouse/issues/9293) [#9442](https://github.com/ClickHouse/ClickHouse/pull/9442) ([Alexander Kazakov](https://github.com/Akazz)) +* Improved performance of `stochasticLinearRegression` aggregate function. This patch is contributed by Intel. [#8652](https://github.com/ClickHouse/ClickHouse/pull/8652) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improve performance of `reinterpretAsFixedString` function. [#9342](https://github.com/ClickHouse/ClickHouse/pull/9342) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Do not send blocks to client for `Null` format in processors pipeline. [#8797](https://github.com/ClickHouse/ClickHouse/pull/8797) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) [#8767](https://github.com/ClickHouse/ClickHouse/pull/8767) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### Build/Testing/Packaging Improvement +* Exception handling now works correctly on Windows Subsystem for Linux. See https://github.com/ClickHouse-Extras/libunwind/pull/3 This fixes [#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) +* Replace `readline` with `replxx` for interactive line editing in `clickhouse-client` [#8416](https://github.com/ClickHouse/ClickHouse/pull/8416) ([Ivan](https://github.com/abyss7)) +* Better build time and less template instantiations in FunctionsComparison. [#9324](https://github.com/ClickHouse/ClickHouse/pull/9324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added integration with `clang-tidy` in CI. See also [#6044](https://github.com/ClickHouse/ClickHouse/issues/6044) [#9566](https://github.com/ClickHouse/ClickHouse/pull/9566) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now we link ClickHouse in CI using `lld` even for `gcc`. [#9049](https://github.com/ClickHouse/ClickHouse/pull/9049) ([alesapin](https://github.com/alesapin)) +* Allow to randomize thread scheduling and insert glitches when `THREAD_FUZZER_*` environment variables are set. This helps testing. [#9459](https://github.com/ClickHouse/ClickHouse/pull/9459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enable secure sockets in stateless tests [#9288](https://github.com/ClickHouse/ClickHouse/pull/9288) ([tavplubix](https://github.com/tavplubix)) +* Make SPLIT_SHARED_LIBRARIES=OFF more robust [#9156](https://github.com/ClickHouse/ClickHouse/pull/9156) ([Azat Khuzhin](https://github.com/azat)) +* Make "performance_introspection_and_logging" test reliable to random server stuck. This may happen in CI environment. See also [#9515](https://github.com/ClickHouse/ClickHouse/issues/9515) [#9528](https://github.com/ClickHouse/ClickHouse/pull/9528) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Validate XML in style check. [#9550](https://github.com/ClickHouse/ClickHouse/pull/9550) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed race condition in test `00738_lock_for_inner_table`. This test relied on sleep. [#9555](https://github.com/ClickHouse/ClickHouse/pull/9555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove performance tests of type `once`. This is needed to run all performance tests in statistical comparison mode (more reliable). [#9557](https://github.com/ClickHouse/ClickHouse/pull/9557) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added performance test for arithmetic functions. [#9326](https://github.com/ClickHouse/ClickHouse/pull/9326) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added performance test for `sumMap` and `sumMapWithOverflow` aggregate functions. Follow-up for [#8933](https://github.com/ClickHouse/ClickHouse/issues/8933) [#8947](https://github.com/ClickHouse/ClickHouse/pull/8947) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Ensure style of ErrorCodes by style check. [#9370](https://github.com/ClickHouse/ClickHouse/pull/9370) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add script for tests history. [#8796](https://github.com/ClickHouse/ClickHouse/pull/8796) ([alesapin](https://github.com/alesapin)) +* Add GCC warning `-Wsuggest-override` to locate and fix all places where `override` keyword must be used. [#8760](https://github.com/ClickHouse/ClickHouse/pull/8760) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +* Ignore weak symbol under Mac OS X because it must be defined [#9538](https://github.com/ClickHouse/ClickHouse/pull/9538) ([Deleted user](https://github.com/ghost)) +* Normalize running time of some queries in performance tests. This is done in preparation to run all the performance tests in comparison mode. [#9565](https://github.com/ClickHouse/ClickHouse/pull/9565) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix some tests to support pytest with query tests [#9062](https://github.com/ClickHouse/ClickHouse/pull/9062) ([Ivan](https://github.com/abyss7)) +* Enable SSL in build with MSan, so server will not fail at startup when running stateless tests [#9531](https://github.com/ClickHouse/ClickHouse/pull/9531) ([tavplubix](https://github.com/tavplubix)) +* Fix database substitution in test results [#9384](https://github.com/ClickHouse/ClickHouse/pull/9384) ([Ilya Yatsishin](https://github.com/qoega)) +* Build fixes for miscellaneous platforms [#9381](https://github.com/ClickHouse/ClickHouse/pull/9381) ([proller](https://github.com/proller)) [#8755](https://github.com/ClickHouse/ClickHouse/pull/8755) ([proller](https://github.com/proller)) [#8631](https://github.com/ClickHouse/ClickHouse/pull/8631) ([proller](https://github.com/proller)) +* Added disks section to stateless-with-coverage test docker image [#9213](https://github.com/ClickHouse/ClickHouse/pull/9213) ([Pavel Kovalenko](https://github.com/Jokser)) +* Get rid of in-source-tree files when building with GRPC [#9588](https://github.com/ClickHouse/ClickHouse/pull/9588) ([Amos Bird](https://github.com/amosbird)) +* Slightly faster build time by removing SessionCleaner from Context. Make the code of SessionCleaner more simple. [#9232](https://github.com/ClickHouse/ClickHouse/pull/9232) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) +* Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +* Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix flacky test `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Clean up duplicated linker flags. Make sure the linker won't look up an unexpected symbol. [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([Amos Bird](https://github.com/amosbird)) +* Add `clickhouse-odbc` driver into test images. This allows to test interaction of ClickHouse with ClickHouse via its own ODBC driver. [#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) +* Fix several bugs in unit tests. [#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([alesapin](https://github.com/alesapin)) +* Enable `-Wmissing-include-dirs` GCC warning to eliminate all non-existing includes - mostly as a result of CMake scripting errors [#8704](https://github.com/ClickHouse/ClickHouse/pull/8704) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +* Describe reasons if query profiler cannot work. This is intended for [#9049](https://github.com/ClickHouse/ClickHouse/issues/9049) [#9144](https://github.com/ClickHouse/ClickHouse/pull/9144) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Update OpenSSL to upstream master. Fixed the issue when TLS connections may fail with the message `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` and `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. The issue was present in version 20.1. [#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Update Dockerfile for server [#8893](https://github.com/ClickHouse/ClickHouse/pull/8893) ([Ilya Mazaev](https://github.com/ne-ray)) +* Minor fixes in build-gcc-from-sources script [#8774](https://github.com/ClickHouse/ClickHouse/pull/8774) ([Michael Nacharov](https://github.com/mnach)) +* Replace `numbers` to `zeros` in perftests where `number` column is not used. This will lead to more clean test results. [#9600](https://github.com/ClickHouse/ClickHouse/pull/9600) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix stack overflow issue when using initializer_list in Column constructors. [#9367](https://github.com/ClickHouse/ClickHouse/pull/9367) ([Deleted user](https://github.com/ghost)) +* Upgrade librdkafka to v1.3.0. Enable bundled `rdkafka` and `gsasl` libraries on Mac OS X. [#9000](https://github.com/ClickHouse/ClickHouse/pull/9000) ([Andrew Onyshchuk](https://github.com/oandrew)) +* build fix on GCC 9.2.0 [#9306](https://github.com/ClickHouse/ClickHouse/pull/9306) ([vxider](https://github.com/Vxider)) + + +## ClickHouse release v20.1 + +### ClickHouse release v20.1.8.41, 2020-03-20 + +#### Bug Fix +* Fix possible permanent `Cannot schedule a task` error (due to unhandled exception in `ParallelAggregatingBlockInputStream::Handler::onFinish/onFinishThread`). This fixes [#6833](https://github.com/ClickHouse/ClickHouse/issues/6833). [#9154](https://github.com/ClickHouse/ClickHouse/pull/9154) ([Azat Khuzhin](https://github.com/azat)) +* Fix excessive memory consumption in `ALTER` queries (mutations). This fixes [#9533](https://github.com/ClickHouse/ClickHouse/issues/9533) and [#9670](https://github.com/ClickHouse/ClickHouse/issues/9670). [#9754](https://github.com/ClickHouse/ClickHouse/pull/9754) ([alesapin](https://github.com/alesapin)) +* Fix bug in backquoting in external dictionaries DDL. This fixes [#9619](https://github.com/ClickHouse/ClickHouse/issues/9619). [#9734](https://github.com/ClickHouse/ClickHouse/pull/9734) ([alesapin](https://github.com/alesapin)) + +### ClickHouse release v20.1.7.38, 2020-03-18 + +#### Bug Fix +* Fixed incorrect internal function names for `sumKahan` and `sumWithOverflow`. I lead to exception while using this functions in remote queries. [#9636](https://github.com/ClickHouse/ClickHouse/pull/9636) ([Azat Khuzhin](https://github.com/azat)). This issue was in all ClickHouse releases. +* Allow `ALTER ON CLUSTER` of `Distributed` tables with internal replication. This fixes [#3268](https://github.com/ClickHouse/ClickHouse/issues/3268). [#9617](https://github.com/ClickHouse/ClickHouse/pull/9617) ([shinoi2](https://github.com/shinoi2)). This issue was in all ClickHouse releases. +* Fix possible exceptions `Size of filter doesn't match size of column` and `Invalid number of rows in Chunk` in `MergeTreeRangeReader`. They could appear while executing `PREWHERE` in some cases. Fixes [#9132](https://github.com/ClickHouse/ClickHouse/issues/9132). [#9612](https://github.com/ClickHouse/ClickHouse/pull/9612) ([Anton Popov](https://github.com/CurtizJ)) +* Fixed the issue: timezone was not preserved if you write a simple arithmetic expression like `time + 1` (in contrast to an expression like `time + INTERVAL 1 SECOND`). This fixes [#5743](https://github.com/ClickHouse/ClickHouse/issues/5743). [#9323](https://github.com/ClickHouse/ClickHouse/pull/9323) ([alexey-milovidov](https://github.com/alexey-milovidov)). This issue was in all ClickHouse releases. +* Now it's not possible to create or add columns with simple cyclic aliases like `a DEFAULT b, b DEFAULT a`. [#9603](https://github.com/ClickHouse/ClickHouse/pull/9603) ([alesapin](https://github.com/alesapin)) +* Fixed the issue when padding at the end of base64 encoded value can be malformed. Update base64 library. This fixes [#9491](https://github.com/ClickHouse/ClickHouse/issues/9491), closes [#9492](https://github.com/ClickHouse/ClickHouse/issues/9492) [#9500](https://github.com/ClickHouse/ClickHouse/pull/9500) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix data race at destruction of `Poco::HTTPServer`. It could happen when server is started and immediately shut down. [#9468](https://github.com/ClickHouse/ClickHouse/pull/9468) ([Anton Popov](https://github.com/CurtizJ)) +* Fix possible crash/wrong number of rows in `LIMIT n WITH TIES` when there are a lot of rows equal to n'th row. [#9464](https://github.com/ClickHouse/ClickHouse/pull/9464) ([tavplubix](https://github.com/tavplubix)) +* Fix possible mismatched checksums with column TTLs. [#9451](https://github.com/ClickHouse/ClickHouse/pull/9451) ([Anton Popov](https://github.com/CurtizJ)) +* Fix crash when a user tries to `ALTER MODIFY SETTING` for old-formated `MergeTree` table engines family. [#9435](https://github.com/ClickHouse/ClickHouse/pull/9435) ([alesapin](https://github.com/alesapin)) +* Now we will try finalize mutations more frequently. [#9427](https://github.com/ClickHouse/ClickHouse/pull/9427) ([alesapin](https://github.com/alesapin)) +* Fix replication protocol incompatibility introduced in [#8598](https://github.com/ClickHouse/ClickHouse/issues/8598). [#9412](https://github.com/ClickHouse/ClickHouse/pull/9412) ([alesapin](https://github.com/alesapin)) +* Fix not(has()) for the bloom_filter index of array types. [#9407](https://github.com/ClickHouse/ClickHouse/pull/9407) ([achimbab](https://github.com/achimbab)) +* Fixed the behaviour of `match` and `extract` functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. This fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Build/Testing/Packaging Improvement + +* Exception handling now works correctly on Windows Subsystem for Linux. See https://github.com/ClickHouse-Extras/libunwind/pull/3 This fixes [#6480](https://github.com/ClickHouse/ClickHouse/issues/6480) [#9564](https://github.com/ClickHouse/ClickHouse/pull/9564) ([sobolevsv](https://github.com/sobolevsv)) + + +### ClickHouse release v20.1.6.30, 2020-03-05 + +#### Bug Fix + +* Fix data incompatibility when compressed with `T64` codec. +[#9039](https://github.com/ClickHouse/ClickHouse/pull/9039) [(abyss7)](https://github.com/abyss7) +* Fix order of ranges while reading from MergeTree table in one thread. Fixes [#8964](https://github.com/ClickHouse/ClickHouse/issues/8964). +[#9050](https://github.com/ClickHouse/ClickHouse/pull/9050) [(CurtizJ)](https://github.com/CurtizJ) +* Fix possible segfault in `MergeTreeRangeReader`, while executing `PREWHERE`. Fixes [#9064](https://github.com/ClickHouse/ClickHouse/issues/9064). +[#9106](https://github.com/ClickHouse/ClickHouse/pull/9106) [(CurtizJ)](https://github.com/CurtizJ) +* Fix `reinterpretAsFixedString` to return `FixedString` instead of `String`. +[#9052](https://github.com/ClickHouse/ClickHouse/pull/9052) [(oandrew)](https://github.com/oandrew) +* Fix `joinGet` with nullable return types. Fixes [#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) +[#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) [(amosbird)](https://github.com/amosbird) +* Fix fuzz test and incorrect behaviour of bitTestAll/bitTestAny functions. +[#9143](https://github.com/ClickHouse/ClickHouse/pull/9143) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix the behaviour of match and extract functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. Fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) +[#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fixed execution of inversed predicates when non-strictly monotinic functional index is used. Fixes [#9034](https://github.com/ClickHouse/ClickHouse/issues/9034) +[#9223](https://github.com/ClickHouse/ClickHouse/pull/9223) [(Akazz)](https://github.com/Akazz) +* Allow to rewrite `CROSS` to `INNER JOIN` if there's `[NOT] LIKE` operator in `WHERE` section. Fixes [#9191](https://github.com/ClickHouse/ClickHouse/issues/9191) +[#9229](https://github.com/ClickHouse/ClickHouse/pull/9229) [(4ertus2)](https://github.com/4ertus2) +* Allow first column(s) in a table with Log engine be an alias. +[#9231](https://github.com/ClickHouse/ClickHouse/pull/9231) [(abyss7)](https://github.com/abyss7) +* Allow comma join with `IN()` inside. Fixes [#7314](https://github.com/ClickHouse/ClickHouse/issues/7314). +[#9251](https://github.com/ClickHouse/ClickHouse/pull/9251) [(4ertus2)](https://github.com/4ertus2) +* Improve `ALTER MODIFY/ADD` queries logic. Now you cannot `ADD` column without type, `MODIFY` default expression doesn't change type of column and `MODIFY` type doesn't loose default expression value. Fixes [#8669](https://github.com/ClickHouse/ClickHouse/issues/8669). +[#9227](https://github.com/ClickHouse/ClickHouse/pull/9227) [(alesapin)](https://github.com/alesapin) +* Fix mutations finalization, when already done mutation can have status is_done=0. +[#9217](https://github.com/ClickHouse/ClickHouse/pull/9217) [(alesapin)](https://github.com/alesapin) +* Support "Processors" pipeline for system.numbers and system.numbers_mt. This also fixes the bug when `max_execution_time` is not respected. +[#7796](https://github.com/ClickHouse/ClickHouse/pull/7796) [(KochetovNicolai)](https://github.com/KochetovNicolai) +* Fix wrong counting of `DictCacheKeysRequestedFound` metric. +[#9411](https://github.com/ClickHouse/ClickHouse/pull/9411) [(nikitamikhaylov)](https://github.com/nikitamikhaylov) +* Added a check for storage policy in `ATTACH PARTITION FROM`, `REPLACE PARTITION`, `MOVE TO TABLE` which otherwise could make data of part inaccessible after restart and prevent ClickHouse to start. +[#9383](https://github.com/ClickHouse/ClickHouse/pull/9383) [(excitoon)](https://github.com/excitoon) +* Fixed UBSan report in `MergeTreeIndexSet`. This fixes [#9250](https://github.com/ClickHouse/ClickHouse/issues/9250) +[#9365](https://github.com/ClickHouse/ClickHouse/pull/9365) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix possible datarace in BlockIO. +[#9356](https://github.com/ClickHouse/ClickHouse/pull/9356) [(KochetovNicolai)](https://github.com/KochetovNicolai) +* Support for `UInt64` numbers that don't fit in Int64 in JSON-related functions. Update `SIMDJSON` to master. This fixes [#9209](https://github.com/ClickHouse/ClickHouse/issues/9209) +[#9344](https://github.com/ClickHouse/ClickHouse/pull/9344) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* Fix the issue when the amount of free space is not calculated correctly if the data directory is mounted to a separate device. For default disk calculate the free space from data subdirectory. This fixes [#7441](https://github.com/ClickHouse/ClickHouse/issues/7441) +[#9257](https://github.com/ClickHouse/ClickHouse/pull/9257) [(millb)](https://github.com/millb) +* Fix the issue when TLS connections may fail with the message `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error and SSL Exception: error:2400006E:random number generator::error retrieving entropy.` Update OpenSSL to upstream master. +[#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) [(alexey-milovidov)](https://github.com/alexey-milovidov) +* When executing `CREATE` query, fold constant expressions in storage engine arguments. Replace empty database name with current database. Fixes [#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Also fix check for local address in ClickHouseDictionarySource. +[#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) +* Fix segfault in `StorageMerge`, which can happen when reading from StorageFile. +[#9387](https://github.com/ClickHouse/ClickHouse/pull/9387) [(tabplubix)](https://github.com/tavplubix) +* Prevent losing data in `Kafka` in rare cases when exception happens after reading suffix but before commit. Fixes [#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Related: [#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) +[#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(filimonov)](https://github.com/filimonov) +* Fix bug leading to server termination when trying to use / drop `Kafka` table created with wrong parameters. Fixes [#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Incorporates [#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). +[#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(filimonov)](https://github.com/filimonov) + +#### New Feature +* Add `deduplicate_blocks_in_dependent_materialized_views` option to control the behaviour of idempotent inserts into tables with materialized views. This new feature was added to the bugfix release by a special request from Altinity. +[#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy) + +### ClickHouse release v20.1.2.4, 2020-01-22 + +#### Backward Incompatible Change +* Make the setting `merge_tree_uniform_read_distribution` obsolete. The server still recognizes this setting but it has no effect. [#8308](https://github.com/ClickHouse/ClickHouse/pull/8308) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Changed return type of the function `greatCircleDistance` to `Float32` because now the result of calculation is `Float32`. [#7993](https://github.com/ClickHouse/ClickHouse/pull/7993) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now it's expected that query parameters are represented in "escaped" format. For example, to pass string `ab` you have to write `a\tb` or `a\b` and respectively, `a%5Ctb` or `a%5C%09b` in URL. This is needed to add the possibility to pass NULL as `\N`. This fixes [#7488](https://github.com/ClickHouse/ClickHouse/issues/7488). [#8517](https://github.com/ClickHouse/ClickHouse/pull/8517) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enable `use_minimalistic_part_header_in_zookeeper` setting for `ReplicatedMergeTree` by default. This will significantly reduce amount of data stored in ZooKeeper. This setting is supported since version 19.1 and we already use it in production in multiple services without any issues for more than half a year. Disable this setting if you have a chance to downgrade to versions older than 19.1. [#6850](https://github.com/ClickHouse/ClickHouse/pull/6850) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Data skipping indices are production ready and enabled by default. The settings `allow_experimental_data_skipping_indices`, `allow_experimental_cross_to_join_conversion` and `allow_experimental_multiple_joins_emulation` are now obsolete and do nothing. [#7974](https://github.com/ClickHouse/ClickHouse/pull/7974) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add new `ANY JOIN` logic for `StorageJoin` consistent with `JOIN` operation. To upgrade without changes in behaviour you need add `SETTINGS any_join_distinct_right_table_keys = 1` to Engine Join tables metadata or recreate these tables after upgrade. [#8400](https://github.com/ClickHouse/ClickHouse/pull/8400) ([Artem Zuikov](https://github.com/4ertus2)) +* Require server to be restarted to apply the changes in logging configuration. This is a temporary workaround to avoid the bug where the server logs to a deleted log file (see [#8696](https://github.com/ClickHouse/ClickHouse/issues/8696)). [#8707](https://github.com/ClickHouse/ClickHouse/pull/8707) ([Alexander Kuzmenkov](https://github.com/akuzm)) + +#### New Feature +* Added information about part paths to `system.merges`. [#8043](https://github.com/ClickHouse/ClickHouse/pull/8043) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Add ability to execute `SYSTEM RELOAD DICTIONARY` query in `ON CLUSTER` mode. [#8288](https://github.com/ClickHouse/ClickHouse/pull/8288) ([Guillaume Tassery](https://github.com/YiuRULE)) +* Add ability to execute `CREATE DICTIONARY` queries in `ON CLUSTER` mode. [#8163](https://github.com/ClickHouse/ClickHouse/pull/8163) ([alesapin](https://github.com/alesapin)) +* Now user's profile in `users.xml` can inherit multiple profiles. [#8343](https://github.com/ClickHouse/ClickHouse/pull/8343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) +* Added `system.stack_trace` table that allows to look at stack traces of all server threads. This is useful for developers to introspect server state. This fixes [#7576](https://github.com/ClickHouse/ClickHouse/issues/7576). [#8344](https://github.com/ClickHouse/ClickHouse/pull/8344) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add `DateTime64` datatype with configurable sub-second precision. [#7170](https://github.com/ClickHouse/ClickHouse/pull/7170) ([Vasily Nemkov](https://github.com/Enmk)) +* Add table function `clusterAllReplicas` which allows to query all the nodes in the cluster. [#8493](https://github.com/ClickHouse/ClickHouse/pull/8493) ([kiran sunkari](https://github.com/kiransunkari)) +* Add aggregate function `categoricalInformationValue` which calculates the information value of a discrete feature. [#8117](https://github.com/ClickHouse/ClickHouse/pull/8117) ([hcz](https://github.com/hczhcz)) +* Speed up parsing of data files in `CSV`, `TSV` and `JSONEachRow` format by doing it in parallel. [#7780](https://github.com/ClickHouse/ClickHouse/pull/7780) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Add function `bankerRound` which performs banker's rounding. [#8112](https://github.com/ClickHouse/ClickHouse/pull/8112) ([hcz](https://github.com/hczhcz)) +* Support more languages in embedded dictionary for region names: 'ru', 'en', 'ua', 'uk', 'by', 'kz', 'tr', 'de', 'uz', 'lv', 'lt', 'et', 'pt', 'he', 'vi'. [#8189](https://github.com/ClickHouse/ClickHouse/pull/8189) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improvements in consistency of `ANY JOIN` logic. Now `t1 ANY LEFT JOIN t2` equals `t2 ANY RIGHT JOIN t1`. [#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +* Add setting `any_join_distinct_right_table_keys` which enables old behaviour for `ANY INNER JOIN`. [#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +* Add new `SEMI` and `ANTI JOIN`. Old `ANY INNER JOIN` behaviour now available as `SEMI LEFT JOIN`. [#7665](https://github.com/ClickHouse/ClickHouse/pull/7665) ([Artem Zuikov](https://github.com/4ertus2)) +* Added `Distributed` format for `File` engine and `file` table function which allows to read from `.bin` files generated by asynchronous inserts into `Distributed` table. [#8535](https://github.com/ClickHouse/ClickHouse/pull/8535) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Add optional reset column argument for `runningAccumulate` which allows to reset aggregation results for each new key value. [#8326](https://github.com/ClickHouse/ClickHouse/pull/8326) ([Sergey Kononenko](https://github.com/kononencheg)) +* Add ability to use ClickHouse as Prometheus endpoint. [#7900](https://github.com/ClickHouse/ClickHouse/pull/7900) ([vdimir](https://github.com/Vdimir)) +* Add section `` in `config.xml` which restricts allowed hosts for remote table engines and table functions `URL`, `S3`, `HDFS`. [#7154](https://github.com/ClickHouse/ClickHouse/pull/7154) ([Mikhail Korotov](https://github.com/millb)) +* Added function `greatCircleAngle` which calculates the distance on a sphere in degrees. [#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Changed Earth radius to be consistent with H3 library. [#8105](https://github.com/ClickHouse/ClickHouse/pull/8105) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added `JSONCompactEachRow` and `JSONCompactEachRowWithNamesAndTypes` formats for input and output. [#7841](https://github.com/ClickHouse/ClickHouse/pull/7841) ([Mikhail Korotov](https://github.com/millb)) +* Added feature for file-related table engines and table functions (`File`, `S3`, `URL`, `HDFS`) which allows to read and write `gzip` files based on additional engine parameter or file extension. [#7840](https://github.com/ClickHouse/ClickHouse/pull/7840) ([Andrey Bodrov](https://github.com/apbodrov)) +* Added the `randomASCII(length)` function, generating a string with a random set of [ASCII](https://en.wikipedia.org/wiki/ASCII#Printable_characters) printable characters. [#8401](https://github.com/ClickHouse/ClickHouse/pull/8401) ([BayoNet](https://github.com/BayoNet)) +* Added function `JSONExtractArrayRaw` which returns an array on unparsed json array elements from `JSON` string. [#8081](https://github.com/ClickHouse/ClickHouse/pull/8081) ([Oleg Matrokhin](https://github.com/errx)) +* Add `arrayZip` function which allows to combine multiple arrays of equal lengths into one array of tuples. [#8149](https://github.com/ClickHouse/ClickHouse/pull/8149) ([Winter Zhang](https://github.com/zhang2014)) +* Add ability to move data between disks according to configured `TTL`-expressions for `*MergeTree` table engines family. [#8140](https://github.com/ClickHouse/ClickHouse/pull/8140) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Added new aggregate function `avgWeighted` which allows to calculate weighted average. [#7898](https://github.com/ClickHouse/ClickHouse/pull/7898) ([Andrey Bodrov](https://github.com/apbodrov)) +* Now parallel parsing is enabled by default for `TSV`, `TSKV`, `CSV` and `JSONEachRow` formats. [#7894](https://github.com/ClickHouse/ClickHouse/pull/7894) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Add several geo functions from `H3` library: `h3GetResolution`, `h3EdgeAngle`, `h3EdgeLength`, `h3IsValid` and `h3kRing`. [#8034](https://github.com/ClickHouse/ClickHouse/pull/8034) ([Konstantin Malanchev](https://github.com/hombit)) +* Added support for brotli (`br`) compression in file-related storages and table functions. This fixes [#8156](https://github.com/ClickHouse/ClickHouse/issues/8156). [#8526](https://github.com/ClickHouse/ClickHouse/pull/8526) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add `groupBit*` functions for the `SimpleAggregationFunction` type. [#8485](https://github.com/ClickHouse/ClickHouse/pull/8485) ([Guillaume Tassery](https://github.com/YiuRULE)) + +#### Bug Fix +* Fix rename of tables with `Distributed` engine. Fixes issue [#7868](https://github.com/ClickHouse/ClickHouse/issues/7868). [#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([tavplubix](https://github.com/tavplubix)) +* Now dictionaries support `EXPRESSION` for attributes in arbitrary string in non-ClickHouse SQL dialect. [#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([alesapin](https://github.com/alesapin)) +* Fix broken `INSERT SELECT FROM mysql(...)` query. This fixes [#8070](https://github.com/ClickHouse/ClickHouse/issues/8070) and [#7960](https://github.com/ClickHouse/ClickHouse/issues/7960). [#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([tavplubix](https://github.com/tavplubix)) +* Fix error "Mismatch column sizes" when inserting default `Tuple` from `JSONEachRow`. This fixes [#5653](https://github.com/ClickHouse/ClickHouse/issues/5653). [#8606](https://github.com/ClickHouse/ClickHouse/pull/8606) ([tavplubix](https://github.com/tavplubix)) +* Now an exception will be thrown in case of using `WITH TIES` alongside `LIMIT BY`. Also add ability to use `TOP` with `LIMIT BY`. This fixes [#7472](https://github.com/ClickHouse/ClickHouse/issues/7472). [#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix unintendent dependency from fresh glibc version in `clickhouse-odbc-bridge` binary. [#8046](https://github.com/ClickHouse/ClickHouse/pull/8046) ([Amos Bird](https://github.com/amosbird)) +* Fix bug in check function of `*MergeTree` engines family. Now it doesn't fail in case when we have equal amount of rows in last granule and last mark (non-final). [#8047](https://github.com/ClickHouse/ClickHouse/pull/8047) ([alesapin](https://github.com/alesapin)) +* Fix insert into `Enum*` columns after `ALTER` query, when underlying numeric type is equal to table specified type. This fixes [#7836](https://github.com/ClickHouse/ClickHouse/issues/7836). [#7908](https://github.com/ClickHouse/ClickHouse/pull/7908) ([Anton Popov](https://github.com/CurtizJ)) +* Allowed non-constant negative "size" argument for function `substring`. It was not allowed by mistake. This fixes [#4832](https://github.com/ClickHouse/ClickHouse/issues/4832). [#7703](https://github.com/ClickHouse/ClickHouse/pull/7703) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix parsing bug when wrong number of arguments passed to `(O|J)DBC` table engine. [#7709](https://github.com/ClickHouse/ClickHouse/pull/7709) ([alesapin](https://github.com/alesapin)) +* Using command name of the running clickhouse process when sending logs to syslog. In previous versions, empty string was used instead of command name. [#8460](https://github.com/ClickHouse/ClickHouse/pull/8460) ([Michael Nacharov](https://github.com/mnach)) +* Fix check of allowed hosts for `localhost`. This PR fixes the solution provided in [#8241](https://github.com/ClickHouse/ClickHouse/pull/8241). [#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Vitaly Baranov](https://github.com/vitlibar)) +* Fix rare crash in `argMin` and `argMax` functions for long string arguments, when result is used in `runningAccumulate` function. This fixes [#8325](https://github.com/ClickHouse/ClickHouse/issues/8325) [#8341](https://github.com/ClickHouse/ClickHouse/pull/8341) ([dinosaur](https://github.com/769344359)) +* Fix memory overcommit for tables with `Buffer` engine. [#8345](https://github.com/ClickHouse/ClickHouse/pull/8345) ([Azat Khuzhin](https://github.com/azat)) +* Fixed potential bug in functions that can take `NULL` as one of the arguments and return non-NULL. [#8196](https://github.com/ClickHouse/ClickHouse/pull/8196) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better metrics calculations in thread pool for background processes for `MergeTree` table engines. [#8194](https://github.com/ClickHouse/ClickHouse/pull/8194) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix function `IN` inside `WHERE` statement when row-level table filter is present. Fixes [#6687](https://github.com/ClickHouse/ClickHouse/issues/6687) [#8357](https://github.com/ClickHouse/ClickHouse/pull/8357) ([Ivan](https://github.com/abyss7)) +* Now an exception is thrown if the integral value is not parsed completely for settings values. [#7678](https://github.com/ClickHouse/ClickHouse/pull/7678) ([Mikhail Korotov](https://github.com/millb)) +* Fix exception when aggregate function is used in query to distributed table with more than two local shards. [#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) +* Now bloom filter can handle zero length arrays and doesn't perform redundant calculations. [#8242](https://github.com/ClickHouse/ClickHouse/pull/8242) ([achimbab](https://github.com/achimbab)) +* Fixed checking if a client host is allowed by matching the client host to `host_regexp` specified in `users.xml`. [#8241](https://github.com/ClickHouse/ClickHouse/pull/8241) ([Vitaly Baranov](https://github.com/vitlibar)) +* Relax ambiguous column check that leads to false positives in multiple `JOIN ON` section. [#8385](https://github.com/ClickHouse/ClickHouse/pull/8385) ([Artem Zuikov](https://github.com/4ertus2)) +* Fixed possible server crash (`std::terminate`) when the server cannot send or write data in `JSON` or `XML` format with values of `String` data type (that require `UTF-8` validation) or when compressing result data with Brotli algorithm or in some other rare cases. This fixes [#7603](https://github.com/ClickHouse/ClickHouse/issues/7603) [#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix race condition in `StorageDistributedDirectoryMonitor` found by CI. This fixes [#8364](https://github.com/ClickHouse/ClickHouse/issues/8364). [#8383](https://github.com/ClickHouse/ClickHouse/pull/8383) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Now background merges in `*MergeTree` table engines family preserve storage policy volume order more accurately. [#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Now table engine `Kafka` works properly with `Native` format. This fixes [#6731](https://github.com/ClickHouse/ClickHouse/issues/6731) [#7337](https://github.com/ClickHouse/ClickHouse/issues/7337) [#8003](https://github.com/ClickHouse/ClickHouse/issues/8003). [#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +* Fixed formats with headers (like `CSVWithNames`) which were throwing exception about EOF for table engine `Kafka`. [#8016](https://github.com/ClickHouse/ClickHouse/pull/8016) ([filimonov](https://github.com/filimonov)) +* Fixed a bug with making set from subquery in right part of `IN` section. This fixes [#5767](https://github.com/ClickHouse/ClickHouse/issues/5767) and [#2542](https://github.com/ClickHouse/ClickHouse/issues/2542). [#7755](https://github.com/ClickHouse/ClickHouse/pull/7755) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix possible crash while reading from storage `File`. [#7756](https://github.com/ClickHouse/ClickHouse/pull/7756) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fixed reading of the files in `Parquet` format containing columns of type `list`. [#8334](https://github.com/ClickHouse/ClickHouse/pull/8334) ([maxulan](https://github.com/maxulan)) +* Fix error `Not found column` for distributed queries with `PREWHERE` condition dependent on sampling key if `max_parallel_replicas > 1`. [#7913](https://github.com/ClickHouse/ClickHouse/pull/7913) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix error `Not found column` if query used `PREWHERE` dependent on table's alias and the result set was empty because of primary key condition. [#7911](https://github.com/ClickHouse/ClickHouse/pull/7911) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fixed return type for functions `rand` and `randConstant` in case of `Nullable` argument. Now functions always return `UInt32` and never `Nullable(UInt32)`. [#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Disabled predicate push-down for `WITH FILL` expression. This fixes [#7784](https://github.com/ClickHouse/ClickHouse/issues/7784). [#7789](https://github.com/ClickHouse/ClickHouse/pull/7789) ([Winter Zhang](https://github.com/zhang2014)) +* Fixed incorrect `count()` result for `SummingMergeTree` when `FINAL` section is used. [#3280](https://github.com/ClickHouse/ClickHouse/issues/3280) [#7786](https://github.com/ClickHouse/ClickHouse/pull/7786) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Fix possible incorrect result for constant functions from remote servers. It happened for queries with functions like `version()`, `uptime()`, etc. which returns different constant values for different servers. This fixes [#7666](https://github.com/ClickHouse/ClickHouse/issues/7666). [#7689](https://github.com/ClickHouse/ClickHouse/pull/7689) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix complicated bug in push-down predicate optimization which leads to wrong results. This fixes a lot of issues on push-down predicate optimization. [#8503](https://github.com/ClickHouse/ClickHouse/pull/8503) ([Winter Zhang](https://github.com/zhang2014)) +* Fix crash in `CREATE TABLE .. AS dictionary` query. [#8508](https://github.com/ClickHouse/ClickHouse/pull/8508) ([Azat Khuzhin](https://github.com/azat)) +* Several improvements ClickHouse grammar in `.g4` file. [#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([taiyang-li](https://github.com/taiyang-li)) +* Fix bug that leads to crashes in `JOIN`s with tables with engine `Join`. This fixes [#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2)) +* Fix redundant dictionaries reload on `CREATE DATABASE`. [#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat)) +* Limit maximum number of streams for read from `StorageFile` and `StorageHDFS`. Fixes https://github.com/ClickHouse/ClickHouse/issues/7650. [#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin)) +* Fix bug in `ALTER ... MODIFY ... CODEC` query, when user specify both default expression and codec. Fixes [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([alesapin](https://github.com/alesapin)) +* Fix error in background merge of columns with `SimpleAggregateFunction(LowCardinality)` type. [#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fixed type check in function `toDateTime64`. [#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([Vasily Nemkov](https://github.com/Enmk)) +* Now server do not crash on `LEFT` or `FULL JOIN` with and Join engine and unsupported `join_use_nulls` settings. [#8479](https://github.com/ClickHouse/ClickHouse/pull/8479) ([Artem Zuikov](https://github.com/4ertus2)) +* Now `DROP DICTIONARY IF EXISTS db.dict` query doesn't throw exception if `db` doesn't exist. [#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Vitaly Baranov](https://github.com/vitlibar)) +* Fix possible crashes in table functions (`file`, `mysql`, `remote`) caused by usage of reference to removed `IStorage` object. Fix incorrect parsing of columns specified at insertion into table function. [#7762](https://github.com/ClickHouse/ClickHouse/pull/7762) ([tavplubix](https://github.com/tavplubix)) +* Ensure network be up before starting `clickhouse-server`. This fixes [#7507](https://github.com/ClickHouse/ClickHouse/issues/7507). [#8570](https://github.com/ClickHouse/ClickHouse/pull/8570) ([Zhichang Yu](https://github.com/yuzhichang)) +* Fix timeouts handling for secure connections, so queries doesn't hang indefenitely. This fixes [#8126](https://github.com/ClickHouse/ClickHouse/issues/8126). [#8128](https://github.com/ClickHouse/ClickHouse/pull/8128) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix `clickhouse-copier`'s redundant contention between concurrent workers. [#7816](https://github.com/ClickHouse/ClickHouse/pull/7816) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +* Now mutations doesn't skip attached parts, even if their mutation version were larger than current mutation version. [#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Yu](https://github.com/yuzhichang)) [#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([alesapin](https://github.com/alesapin)) +* Ignore redundant copies of `*MergeTree` data parts after move to another disk and server restart. [#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix crash in `FULL JOIN` with `LowCardinality` in `JOIN` key. [#8252](https://github.com/ClickHouse/ClickHouse/pull/8252) ([Artem Zuikov](https://github.com/4ertus2)) +* Forbidden to use column name more than once in insert query like `INSERT INTO tbl (x, y, x)`. This fixes [#5465](https://github.com/ClickHouse/ClickHouse/issues/5465), [#7681](https://github.com/ClickHouse/ClickHouse/issues/7681). [#7685](https://github.com/ClickHouse/ClickHouse/pull/7685) ([alesapin](https://github.com/alesapin)) +* Added fallback for detection the number of physical CPU cores for unknown CPUs (using the number of logical CPU cores). This fixes [#5239](https://github.com/ClickHouse/ClickHouse/issues/5239). [#7726](https://github.com/ClickHouse/ClickHouse/pull/7726) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix `There's no column` error for materialized and alias columns. [#8210](https://github.com/ClickHouse/ClickHouse/pull/8210) ([Artem Zuikov](https://github.com/4ertus2)) +* Fixed sever crash when `EXISTS` query was used without `TABLE` or `DICTIONARY` qualifier. Just like `EXISTS t`. This fixes [#8172](https://github.com/ClickHouse/ClickHouse/issues/8172). This bug was introduced in version 19.17. [#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix rare bug with error `"Sizes of columns doesn't match"` that might appear when using `SimpleAggregateFunction` column. [#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Boris Granveaud](https://github.com/bgranvea)) +* Fix bug where user with empty `allow_databases` got access to all databases (and same for `allow_dictionaries`). [#7793](https://github.com/ClickHouse/ClickHouse/pull/7793) ([DeifyTheGod](https://github.com/DeifyTheGod)) +* Fix client crash when server already disconnected from client. [#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Azat Khuzhin](https://github.com/azat)) +* Fix `ORDER BY` behaviour in case of sorting by primary key prefix and non primary key suffix. [#7759](https://github.com/ClickHouse/ClickHouse/pull/7759) ([Anton Popov](https://github.com/CurtizJ)) +* Check if qualified column present in the table. This fixes [#6836](https://github.com/ClickHouse/ClickHouse/issues/6836). [#7758](https://github.com/ClickHouse/ClickHouse/pull/7758) ([Artem Zuikov](https://github.com/4ertus2)) +* Fixed behavior with `ALTER MOVE` ran immediately after merge finish moves superpart of specified. Fixes [#8103](https://github.com/ClickHouse/ClickHouse/issues/8103). [#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix possible server crash while using `UNION` with different number of columns. Fixes [#7279](https://github.com/ClickHouse/ClickHouse/issues/7279). [#7929](https://github.com/ClickHouse/ClickHouse/pull/7929) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix size of result substring for function `substr` with negative size. [#8589](https://github.com/ClickHouse/ClickHouse/pull/8589) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Now server does not execute part mutation in `MergeTree` if there are not enough free threads in background pool. [#8588](https://github.com/ClickHouse/ClickHouse/pull/8588) ([tavplubix](https://github.com/tavplubix)) +* Fix a minor typo on formatting `UNION ALL` AST. [#7999](https://github.com/ClickHouse/ClickHouse/pull/7999) ([litao91](https://github.com/litao91)) +* Fixed incorrect bloom filter results for negative numbers. This fixes [#8317](https://github.com/ClickHouse/ClickHouse/issues/8317). [#8566](https://github.com/ClickHouse/ClickHouse/pull/8566) ([Winter Zhang](https://github.com/zhang2014)) +* Fixed potential buffer overflow in decompress. Malicious user can pass fabricated compressed data that will cause read after buffer. This issue was found by Eldar Zaitov from Yandex information security team. [#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix incorrect result because of integers overflow in `arrayIntersect`. [#7777](https://github.com/ClickHouse/ClickHouse/pull/7777) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Now `OPTIMIZE TABLE` query will not wait for offline replicas to perform the operation. [#8314](https://github.com/ClickHouse/ClickHouse/pull/8314) ([javi santana](https://github.com/javisantana)) +* Fixed `ALTER TTL` parser for `Replicated*MergeTree` tables. [#8318](https://github.com/ClickHouse/ClickHouse/pull/8318) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix communication between server and client, so server read temporary tables info after query failure. [#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Azat Khuzhin](https://github.com/azat)) +* Fix `bitmapAnd` function error when intersecting an aggregated bitmap and a scalar bitmap. [#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Yue Huang](https://github.com/moon03432)) +* Refine the definition of `ZXid` according to the ZooKeeper Programmer's Guide which fixes bug in `clickhouse-cluster-copier`. [#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Ding Xiang Fei](https://github.com/dingxiangfei2009)) +* `odbc` table function now respects `external_table_functions_use_nulls` setting. [#7506](https://github.com/ClickHouse/ClickHouse/pull/7506) ([Vasily Nemkov](https://github.com/Enmk)) +* Fixed bug that lead to a rare data race. [#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Alexander Kazakov](https://github.com/Akazz)) +* Now `SYSTEM RELOAD DICTIONARY` reloads a dictionary completely, ignoring `update_field`. This fixes [#7440](https://github.com/ClickHouse/ClickHouse/issues/7440). [#8037](https://github.com/ClickHouse/ClickHouse/pull/8037) ([Vitaly Baranov](https://github.com/vitlibar)) +* Add ability to check if dictionary exists in create query. [#8032](https://github.com/ClickHouse/ClickHouse/pull/8032) ([alesapin](https://github.com/alesapin)) +* Fix `Float*` parsing in `Values` format. This fixes [#7817](https://github.com/ClickHouse/ClickHouse/issues/7817). [#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([tavplubix](https://github.com/tavplubix)) +* Fix crash when we cannot reserve space in some background operations of `*MergeTree` table engines family. [#7873](https://github.com/ClickHouse/ClickHouse/pull/7873) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix crash of merge operation when table contains `SimpleAggregateFunction(LowCardinality)` column. This fixes [#8515](https://github.com/ClickHouse/ClickHouse/issues/8515). [#8522](https://github.com/ClickHouse/ClickHouse/pull/8522) ([Azat Khuzhin](https://github.com/azat)) +* Restore support of all ICU locales and add the ability to apply collations for constant expressions. Also add language name to `system.collations` table. [#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([alesapin](https://github.com/alesapin)) +* Fix bug when external dictionaries with zero minimal lifetime (`LIFETIME(MIN 0 MAX N)`, `LIFETIME(N)`) don't update in background. [#7983](https://github.com/ClickHouse/ClickHouse/pull/7983) ([alesapin](https://github.com/alesapin)) +* Fix crash when external dictionary with ClickHouse source has subquery in query. [#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Fix incorrect parsing of file extension in table with engine `URL`. This fixes [#8157](https://github.com/ClickHouse/ClickHouse/issues/8157). [#8419](https://github.com/ClickHouse/ClickHouse/pull/8419) ([Andrey Bodrov](https://github.com/apbodrov)) +* Fix `CHECK TABLE` query for `*MergeTree` tables without key. Fixes [#7543](https://github.com/ClickHouse/ClickHouse/issues/7543). [#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([alesapin](https://github.com/alesapin)) +* Fixed conversion of `Float64` to MySQL type. [#8079](https://github.com/ClickHouse/ClickHouse/pull/8079) ([Yuriy Baranov](https://github.com/yurriy)) +* Now if table was not completely dropped because of server crash, server will try to restore and load it. [#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([tavplubix](https://github.com/tavplubix)) +* Fixed crash in table function `file` while inserting into file that doesn't exist. Now in this case file would be created and then insert would be processed. [#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Olga Khvostikova](https://github.com/stavrolia)) +* Fix rare deadlock which can happen when `trace_log` is in enabled. [#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([filimonov](https://github.com/filimonov)) +* Add ability to work with different types besides `Date` in `RangeHashed` external dictionary created from DDL query. Fixes [7899](https://github.com/ClickHouse/ClickHouse/issues/7899). [#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([alesapin](https://github.com/alesapin)) +* Fixes crash when `now64()` is called with result of another function. [#8270](https://github.com/ClickHouse/ClickHouse/pull/8270) ([Vasily Nemkov](https://github.com/Enmk)) +* Fixed bug with detecting client IP for connections through mysql wire protocol. [#7743](https://github.com/ClickHouse/ClickHouse/pull/7743) ([Dmitry Muzyka](https://github.com/dmitriy-myz)) +* Fix empty array handling in `arraySplit` function. This fixes [#7708](https://github.com/ClickHouse/ClickHouse/issues/7708). [#7747](https://github.com/ClickHouse/ClickHouse/pull/7747) ([hcz](https://github.com/hczhcz)) +* Fixed the issue when `pid-file` of another running `clickhouse-server` may be deleted. [#8487](https://github.com/ClickHouse/ClickHouse/pull/8487) ([Weiqing Xu](https://github.com/weiqxu)) +* Fix dictionary reload if it has `invalidate_query`, which stopped updates and some exception on previous update tries. [#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([alesapin](https://github.com/alesapin)) +* Fixed error in function `arrayReduce` that may lead to "double free" and error in aggregate function combinator `Resample` that may lead to memory leak. Added aggregate function `aggThrow`. This function can be used for testing purposes. [#8446](https://github.com/ClickHouse/ClickHouse/pull/8446) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Improvement +* Improved logging when working with `S3` table engine. [#8251](https://github.com/ClickHouse/ClickHouse/pull/8251) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +* Printed help message when no arguments are passed when calling `clickhouse-local`. This fixes [#5335](https://github.com/ClickHouse/ClickHouse/issues/5335). [#8230](https://github.com/ClickHouse/ClickHouse/pull/8230) ([Andrey Nagorny](https://github.com/Melancholic)) +* Add setting `mutations_sync` which allows to wait `ALTER UPDATE/DELETE` queries synchronously. [#8237](https://github.com/ClickHouse/ClickHouse/pull/8237) ([alesapin](https://github.com/alesapin)) +* Allow to set up relative `user_files_path` in `config.xml` (in the way similar to `format_schema_path`). [#7632](https://github.com/ClickHouse/ClickHouse/pull/7632) ([hcz](https://github.com/hczhcz)) +* Add exception for illegal types for conversion functions with `-OrZero` postfix. [#7880](https://github.com/ClickHouse/ClickHouse/pull/7880) ([Andrey Konyaev](https://github.com/akonyaev90)) +* Simplify format of the header of data sending to a shard in a distributed query. [#8044](https://github.com/ClickHouse/ClickHouse/pull/8044) ([Vitaly Baranov](https://github.com/vitlibar)) +* `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) +* Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin)) +* Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Now table the first argument of `joinGet` function can be table indentifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird)) +* Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) +* Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) +* `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar)) +* Stack traces now display physical addresses (offsets in object file) instead of virtual memory addresses (where the object file was loaded). That allows the use of `addr2line` when binary is position independent and ASLR is active. This fixes [#8360](https://github.com/ClickHouse/ClickHouse/issues/8360). [#8387](https://github.com/ClickHouse/ClickHouse/pull/8387) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Support new syntax for row-level security filters: `
    `. Fixes [#5779](https://github.com/ClickHouse/ClickHouse/issues/5779). [#8381](https://github.com/ClickHouse/ClickHouse/pull/8381) ([Ivan](https://github.com/abyss7)) +* Now `cityHash` function can work with `Decimal` and `UUID` types. Fixes [#5184](https://github.com/ClickHouse/ClickHouse/issues/5184). [#7693](https://github.com/ClickHouse/ClickHouse/pull/7693) ([Mikhail Korotov](https://github.com/millb)) +* Removed fixed index granularity (it was 1024) from system logs because it's obsolete after implementation of adaptive granularity. [#7698](https://github.com/ClickHouse/ClickHouse/pull/7698) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enabled MySQL compatibility server when ClickHouse is compiled without SSL. [#7852](https://github.com/ClickHouse/ClickHouse/pull/7852) ([Yuriy Baranov](https://github.com/yurriy)) +* Now server checksums distributed batches, which gives more verbose errors in case of corrupted data in batch. [#7914](https://github.com/ClickHouse/ClickHouse/pull/7914) ([Azat Khuzhin](https://github.com/azat)) +* Support `DROP DATABASE`, `DETACH TABLE`, `DROP TABLE` and `ATTACH TABLE` for `MySQL` database engine. [#8202](https://github.com/ClickHouse/ClickHouse/pull/8202) ([Winter Zhang](https://github.com/zhang2014)) +* Add authentication in S3 table function and table engine. [#7623](https://github.com/ClickHouse/ClickHouse/pull/7623) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Added check for extra parts of `MergeTree` at different disks, in order to not allow to miss data parts at undefined disks. [#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Enable SSL support for Mac client and server. [#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([Ivan](https://github.com/abyss7)) +* Now ClickHouse can work as MySQL federated server (see https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html). [#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov)) +* `clickhouse-client` now only enable `bracketed-paste` when multiquery is on and multiline is off. This fixes (#7757)[https://github.com/ClickHouse/ClickHouse/issues/7757]. [#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([Amos Bird](https://github.com/amosbird)) +* Support `Array(Decimal)` in `if` function. [#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2)) +* Support Decimals in `arrayDifference`, `arrayCumSum` and `arrayCumSumNegative` functions. [#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2)) +* Added `lifetime` column to `system.dictionaries` table. [#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule)) +* Improved check for existing parts on different disks for `*MergeTree` table engines. Addresses [#7660](https://github.com/ClickHouse/ClickHouse/issues/7660). [#8440](https://github.com/ClickHouse/ClickHouse/pull/8440) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Integration with `AWS SDK` for `S3` interactions which allows to use all S3 features out of the box. [#8011](https://github.com/ClickHouse/ClickHouse/pull/8011) ([Pavel Kovalenko](https://github.com/Jokser)) +* Added support for subqueries in `Live View` tables. [#7792](https://github.com/ClickHouse/ClickHouse/pull/7792) ([vzakaznikov](https://github.com/vzakaznikov)) +* Check for using `Date` or `DateTime` column from `TTL` expressions was removed. [#7920](https://github.com/ClickHouse/ClickHouse/pull/7920) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Information about disk was added to `system.detached_parts` table. [#7833](https://github.com/ClickHouse/ClickHouse/pull/7833) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Now settings `max_(table|partition)_size_to_drop` can be changed without a restart. [#7779](https://github.com/ClickHouse/ClickHouse/pull/7779) ([Grigory Pervakov](https://github.com/GrigoryPervakov)) +* Slightly better usability of error messages. Ask user not to remove the lines below `Stack trace:`. [#7897](https://github.com/ClickHouse/ClickHouse/pull/7897) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better reading messages from `Kafka` engine in various formats after [#7935](https://github.com/ClickHouse/ClickHouse/issues/7935). [#8035](https://github.com/ClickHouse/ClickHouse/pull/8035) ([Ivan](https://github.com/abyss7)) +* Better compatibility with MySQL clients which don't support `sha2_password` auth plugin. [#8036](https://github.com/ClickHouse/ClickHouse/pull/8036) ([Yuriy Baranov](https://github.com/yurriy)) +* Support more column types in MySQL compatibility server. [#7975](https://github.com/ClickHouse/ClickHouse/pull/7975) ([Yuriy Baranov](https://github.com/yurriy)) +* Implement `ORDER BY` optimization for `Merge`, `Buffer` and `Materilized View` storages with underlying `MergeTree` tables. [#8130](https://github.com/ClickHouse/ClickHouse/pull/8130) ([Anton Popov](https://github.com/CurtizJ)) +* Now we always use POSIX implementation of `getrandom` to have better compatibility with old kernels (< 3.17). [#7940](https://github.com/ClickHouse/ClickHouse/pull/7940) ([Amos Bird](https://github.com/amosbird)) +* Better check for valid destination in a move TTL rule. [#8410](https://github.com/ClickHouse/ClickHouse/pull/8410) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Better checks for broken insert batches for `Distributed` table engine. [#7933](https://github.com/ClickHouse/ClickHouse/pull/7933) ([Azat Khuzhin](https://github.com/azat)) +* Add column with array of parts name which mutations must process in future to `system.mutations` table. [#8179](https://github.com/ClickHouse/ClickHouse/pull/8179) ([alesapin](https://github.com/alesapin)) +* Parallel merge sort optimization for processors. [#8552](https://github.com/ClickHouse/ClickHouse/pull/8552) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* The settings `mark_cache_min_lifetime` is now obsolete and does nothing. In previous versions, mark cache can grow in memory larger than `mark_cache_size` to accomodate data within `mark_cache_min_lifetime` seconds. That was leading to confusion and higher memory usage than expected, that is especially bad on memory constrained systems. If you will see performance degradation after installing this release, you should increase the `mark_cache_size`. [#8484](https://github.com/ClickHouse/ClickHouse/pull/8484) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Preparation to use `tid` everywhere. This is needed for [#7477](https://github.com/ClickHouse/ClickHouse/issues/7477). [#8276](https://github.com/ClickHouse/ClickHouse/pull/8276) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +#### Performance Improvement +* Performance optimizations in processors pipeline. [#7988](https://github.com/ClickHouse/ClickHouse/pull/7988) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Non-blocking updates of expired keys in cache dictionaries (with permission to read old ones). [#8303](https://github.com/ClickHouse/ClickHouse/pull/8303) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Compile ClickHouse without `-fno-omit-frame-pointer` globally to spare one more register. [#8097](https://github.com/ClickHouse/ClickHouse/pull/8097) ([Amos Bird](https://github.com/amosbird)) +* Speedup `greatCircleDistance` function and add performance tests for it. [#7307](https://github.com/ClickHouse/ClickHouse/pull/7307) ([Olga Khvostikova](https://github.com/stavrolia)) +* Improved performance of function `roundDown`. [#8465](https://github.com/ClickHouse/ClickHouse/pull/8465) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improved performance of `max`, `min`, `argMin`, `argMax` for `DateTime64` data type. [#8199](https://github.com/ClickHouse/ClickHouse/pull/8199) ([Vasily Nemkov](https://github.com/Enmk)) +* Improved performance of sorting without a limit or with big limit and external sorting. [#8545](https://github.com/ClickHouse/ClickHouse/pull/8545) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improved performance of formatting floating point numbers up to 6 times. [#8542](https://github.com/ClickHouse/ClickHouse/pull/8542) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Improved performance of `modulo` function. [#7750](https://github.com/ClickHouse/ClickHouse/pull/7750) ([Amos Bird](https://github.com/amosbird)) +* Optimized `ORDER BY` and merging with single column key. [#8335](https://github.com/ClickHouse/ClickHouse/pull/8335) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Better implementation for `arrayReduce`, `-Array` and `-State` combinators. [#7710](https://github.com/ClickHouse/ClickHouse/pull/7710) ([Amos Bird](https://github.com/amosbird)) +* Now `PREWHERE` should be optimized to be at least as efficient as `WHERE`. [#7769](https://github.com/ClickHouse/ClickHouse/pull/7769) ([Amos Bird](https://github.com/amosbird)) +* Improve the way `round` and `roundBankers` handling negative numbers. [#8229](https://github.com/ClickHouse/ClickHouse/pull/8229) ([hcz](https://github.com/hczhcz)) +* Improved decoding performance of `DoubleDelta` and `Gorilla` codecs by roughly 30-40%. This fixes [#7082](https://github.com/ClickHouse/ClickHouse/issues/7082). [#8019](https://github.com/ClickHouse/ClickHouse/pull/8019) ([Vasily Nemkov](https://github.com/Enmk)) +* Improved performance of `base64` related functions. [#8444](https://github.com/ClickHouse/ClickHouse/pull/8444) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Added a function `geoDistance`. It is similar to `greatCircleDistance` but uses approximation to WGS-84 ellipsoid model. The performance of both functions are near the same. [#8086](https://github.com/ClickHouse/ClickHouse/pull/8086) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Faster `min` and `max` aggregation functions for `Decimal` data type. [#8144](https://github.com/ClickHouse/ClickHouse/pull/8144) ([Artem Zuikov](https://github.com/4ertus2)) +* Vectorize processing `arrayReduce`. [#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Amos Bird](https://github.com/amosbird)) +* `if` chains are now optimized as `multiIf`. [#8355](https://github.com/ClickHouse/ClickHouse/pull/8355) ([kamalov-ruslan](https://github.com/kamalov-ruslan)) +* Fix performance regression of `Kafka` table engine introduced in 19.15. This fixes [#7261](https://github.com/ClickHouse/ClickHouse/issues/7261). [#7935](https://github.com/ClickHouse/ClickHouse/pull/7935) ([filimonov](https://github.com/filimonov)) +* Removed "pie" code generation that `gcc` from Debian packages occasionally brings by default. [#8483](https://github.com/ClickHouse/ClickHouse/pull/8483) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Parallel parsing data formats [#6553](https://github.com/ClickHouse/ClickHouse/pull/6553) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)) +* Enable optimized parser of `Values` with expressions by default (`input_format_values_deduce_templates_of_expressions=1`). [#8231](https://github.com/ClickHouse/ClickHouse/pull/8231) ([tavplubix](https://github.com/tavplubix)) + +#### Build/Testing/Packaging Improvement +* Build fixes for `ARM` and in minimal mode. [#8304](https://github.com/ClickHouse/ClickHouse/pull/8304) ([proller](https://github.com/proller)) +* Add coverage file flush for `clickhouse-server` when std::atexit is not called. Also slightly improved logging in stateless tests with coverage. [#8267](https://github.com/ClickHouse/ClickHouse/pull/8267) ([alesapin](https://github.com/alesapin)) +* Update LLVM library in contrib. Avoid using LLVM from OS packages. [#8258](https://github.com/ClickHouse/ClickHouse/pull/8258) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Make bundled `curl` build fully quiet. [#8232](https://github.com/ClickHouse/ClickHouse/pull/8232) [#8203](https://github.com/ClickHouse/ClickHouse/pull/8203) ([Pavel Kovalenko](https://github.com/Jokser)) +* Fix some `MemorySanitizer` warnings. [#8235](https://github.com/ClickHouse/ClickHouse/pull/8235) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Use `add_warning` and `no_warning` macros in `CMakeLists.txt`. [#8604](https://github.com/ClickHouse/ClickHouse/pull/8604) ([Ivan](https://github.com/abyss7)) +* Add support of Minio S3 Compatible object (https://min.io/) for better integration tests. [#7863](https://github.com/ClickHouse/ClickHouse/pull/7863) [#7875](https://github.com/ClickHouse/ClickHouse/pull/7875) ([Pavel Kovalenko](https://github.com/Jokser)) +* Imported `libc` headers to contrib. It allows to make builds more consistent across various systems (only for `x86_64-linux-gnu`). [#5773](https://github.com/ClickHouse/ClickHouse/pull/5773) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Remove `-fPIC` from some libraries. [#8464](https://github.com/ClickHouse/ClickHouse/pull/8464) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Clean `CMakeLists.txt` for curl. See https://github.com/ClickHouse/ClickHouse/pull/8011#issuecomment-569478910 [#8459](https://github.com/ClickHouse/ClickHouse/pull/8459) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Silent warnings in `CapNProto` library. [#8220](https://github.com/ClickHouse/ClickHouse/pull/8220) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Add performance tests for short string optimized hash tables. [#7679](https://github.com/ClickHouse/ClickHouse/pull/7679) ([Amos Bird](https://github.com/amosbird)) +* Now ClickHouse will build on `AArch64` even if `MADV_FREE` is not available. This fixes [#8027](https://github.com/ClickHouse/ClickHouse/issues/8027). [#8243](https://github.com/ClickHouse/ClickHouse/pull/8243) ([Amos Bird](https://github.com/amosbird)) +* Update `zlib-ng` to fix memory sanitizer problems. [#7182](https://github.com/ClickHouse/ClickHouse/pull/7182) [#8206](https://github.com/ClickHouse/ClickHouse/pull/8206) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Enable internal MySQL library on non-Linux system, because usage of OS packages is very fragile and usually doesn't work at all. This fixes [#5765](https://github.com/ClickHouse/ClickHouse/issues/5765). [#8426](https://github.com/ClickHouse/ClickHouse/pull/8426) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed build on some systems after enabling `libc++`. This supersedes [#8374](https://github.com/ClickHouse/ClickHouse/issues/8374). [#8380](https://github.com/ClickHouse/ClickHouse/pull/8380) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Make `Field` methods more type-safe to find more errors. [#7386](https://github.com/ClickHouse/ClickHouse/pull/7386) [#8209](https://github.com/ClickHouse/ClickHouse/pull/8209) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Added missing files to the `libc-headers` submodule. [#8507](https://github.com/ClickHouse/ClickHouse/pull/8507) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix wrong `JSON` quoting in performance test output. [#8497](https://github.com/ClickHouse/ClickHouse/pull/8497) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Now stack trace is displayed for `std::exception` and `Poco::Exception`. In previous versions it was available only for `DB::Exception`. This improves diagnostics. [#8501](https://github.com/ClickHouse/ClickHouse/pull/8501) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Porting `clock_gettime` and `clock_nanosleep` for fresh glibc versions. [#8054](https://github.com/ClickHouse/ClickHouse/pull/8054) ([Amos Bird](https://github.com/amosbird)) +* Enable `part_log` in example config for developers. [#8609](https://github.com/ClickHouse/ClickHouse/pull/8609) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix async nature of reload in `01036_no_superfluous_dict_reload_on_create_database*`. [#8111](https://github.com/ClickHouse/ClickHouse/pull/8111) ([Azat Khuzhin](https://github.com/azat)) +* Fixed codec performance tests. [#8615](https://github.com/ClickHouse/ClickHouse/pull/8615) ([Vasily Nemkov](https://github.com/Enmk)) +* Add install scripts for `.tgz` build and documentation for them. [#8612](https://github.com/ClickHouse/ClickHouse/pull/8612) [#8591](https://github.com/ClickHouse/ClickHouse/pull/8591) ([alesapin](https://github.com/alesapin)) +* Removed old `ZSTD` test (it was created in year 2016 to reproduce the bug that pre 1.0 version of ZSTD has had). This fixes [#8618](https://github.com/ClickHouse/ClickHouse/issues/8618). [#8619](https://github.com/ClickHouse/ClickHouse/pull/8619) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed build on Mac OS Catalina. [#8600](https://github.com/ClickHouse/ClickHouse/pull/8600) ([meo](https://github.com/meob)) +* Increased number of rows in codec performance tests to make results noticeable. [#8574](https://github.com/ClickHouse/ClickHouse/pull/8574) ([Vasily Nemkov](https://github.com/Enmk)) +* In debug builds, treat `LOGICAL_ERROR` exceptions as assertion failures, so that they are easier to notice. [#8475](https://github.com/ClickHouse/ClickHouse/pull/8475) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Make formats-related performance test more deterministic. [#8477](https://github.com/ClickHouse/ClickHouse/pull/8477) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Update `lz4` to fix a MemorySanitizer failure. [#8181](https://github.com/ClickHouse/ClickHouse/pull/8181) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Suppress a known MemorySanitizer false positive in exception handling. [#8182](https://github.com/ClickHouse/ClickHouse/pull/8182) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Update `gcc` and `g++` to version 9 in `build/docker/build.sh` [#7766](https://github.com/ClickHouse/ClickHouse/pull/7766) ([TLightSky](https://github.com/tlightsky)) +* Add performance test case to test that `PREWHERE` is worse than `WHERE`. [#7768](https://github.com/ClickHouse/ClickHouse/pull/7768) ([Amos Bird](https://github.com/amosbird)) +* Progress towards fixing one flacky test. [#8621](https://github.com/ClickHouse/ClickHouse/pull/8621) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Avoid MemorySanitizer report for data from `libunwind`. [#8539](https://github.com/ClickHouse/ClickHouse/pull/8539) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Updated `libc++` to the latest version. [#8324](https://github.com/ClickHouse/ClickHouse/pull/8324) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Build ICU library from sources. This fixes [#6460](https://github.com/ClickHouse/ClickHouse/issues/6460). [#8219](https://github.com/ClickHouse/ClickHouse/pull/8219) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Switched from `libressl` to `openssl`. ClickHouse should support TLS 1.3 and SNI after this change. This fixes [#8171](https://github.com/ClickHouse/ClickHouse/issues/8171). [#8218](https://github.com/ClickHouse/ClickHouse/pull/8218) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fixed UBSan report when using `chacha20_poly1305` from SSL (happens on connect to https://yandex.ru/). [#8214](https://github.com/ClickHouse/ClickHouse/pull/8214) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix mode of default password file for `.deb` linux distros. [#8075](https://github.com/ClickHouse/ClickHouse/pull/8075) ([proller](https://github.com/proller)) +* Improved expression for getting `clickhouse-server` PID in `clickhouse-test`. [#8063](https://github.com/ClickHouse/ClickHouse/pull/8063) ([Alexander Kazakov](https://github.com/Akazz)) +* Updated contrib/googletest to v1.10.0. [#8587](https://github.com/ClickHouse/ClickHouse/pull/8587) ([Alexander Burmak](https://github.com/Alex-Burmak)) +* Fixed ThreadSaninitizer report in `base64` library. Also updated this library to the latest version, but it doesn't matter. This fixes [#8397](https://github.com/ClickHouse/ClickHouse/issues/8397). [#8403](https://github.com/ClickHouse/ClickHouse/pull/8403) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Fix `00600_replace_running_query` for processors. [#8272](https://github.com/ClickHouse/ClickHouse/pull/8272) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Remove support for `tcmalloc` to make `CMakeLists.txt` simpler. [#8310](https://github.com/ClickHouse/ClickHouse/pull/8310) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Release gcc builds now use `libc++` instead of `libstdc++`. Recently `libc++` was used only with clang. This will improve consistency of build configurations and portability. [#8311](https://github.com/ClickHouse/ClickHouse/pull/8311) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Enable ICU library for build with MemorySanitizer. [#8222](https://github.com/ClickHouse/ClickHouse/pull/8222) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Suppress warnings from `CapNProto` library. [#8224](https://github.com/ClickHouse/ClickHouse/pull/8224) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Removed special cases of code for `tcmalloc`, because it's no longer supported. [#8225](https://github.com/ClickHouse/ClickHouse/pull/8225) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* In CI coverage task, kill the server gracefully to allow it to save the coverage report. This fixes incomplete coverage reports we've been seeing lately. [#8142](https://github.com/ClickHouse/ClickHouse/pull/8142) ([alesapin](https://github.com/alesapin)) +* Performance tests for all codecs against `Float64` and `UInt64` values. [#8349](https://github.com/ClickHouse/ClickHouse/pull/8349) ([Vasily Nemkov](https://github.com/Enmk)) +* `termcap` is very much deprecated and lead to various problems (f.g. missing "up" cap and echoing `^J` instead of multi line) . Favor `terminfo` or bundled `ncurses`. [#7737](https://github.com/ClickHouse/ClickHouse/pull/7737) ([Amos Bird](https://github.com/amosbird)) +* Fix `test_storage_s3` integration test. [#7734](https://github.com/ClickHouse/ClickHouse/pull/7734) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Support `StorageFile(, null) ` to insert block into given format file without actually write to disk. This is required for performance tests. [#8455](https://github.com/ClickHouse/ClickHouse/pull/8455) ([Amos Bird](https://github.com/amosbird)) +* Added argument `--print-time` to functional tests which prints execution time per test. [#8001](https://github.com/ClickHouse/ClickHouse/pull/8001) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Added asserts to `KeyCondition` while evaluating RPN. This will fix warning from gcc-9. [#8279](https://github.com/ClickHouse/ClickHouse/pull/8279) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Dump cmake options in CI builds. [#8273](https://github.com/ClickHouse/ClickHouse/pull/8273) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Don't generate debug info for some fat libraries. [#8271](https://github.com/ClickHouse/ClickHouse/pull/8271) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Make `log_to_console.xml` always log to stderr, regardless of is it interactive or not. [#8395](https://github.com/ClickHouse/ClickHouse/pull/8395) ([Alexander Kuzmenkov](https://github.com/akuzm)) +* Removed some unused features from `clickhouse-performance-test` tool. [#8555](https://github.com/ClickHouse/ClickHouse/pull/8555) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Now we will also search for `lld-X` with corresponding `clang-X` version. [#8092](https://github.com/ClickHouse/ClickHouse/pull/8092) ([alesapin](https://github.com/alesapin)) +* Parquet build improvement. [#8421](https://github.com/ClickHouse/ClickHouse/pull/8421) ([maxulan](https://github.com/maxulan)) +* More GCC warnings [#8221](https://github.com/ClickHouse/ClickHouse/pull/8221) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) +* Package for Arch Linux now allows to run ClickHouse server, and not only client. [#8534](https://github.com/ClickHouse/ClickHouse/pull/8534) ([Vladimir Chebotarev](https://github.com/excitoon)) +* Fix test with processors. Tiny performance fixes. [#7672](https://github.com/ClickHouse/ClickHouse/pull/7672) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) +* Update contrib/protobuf. [#8256](https://github.com/ClickHouse/ClickHouse/pull/8256) ([Matwey V. Kornilov](https://github.com/matwey)) +* In preparation of switching to c++20 as a new year celebration. "May the C++ force be with ClickHouse." [#8447](https://github.com/ClickHouse/ClickHouse/pull/8447) ([Amos Bird](https://github.com/amosbird)) + +#### Experimental Feature +* Added experimental setting `min_bytes_to_use_mmap_io`. It allows to read big files without copying data from kernel to userspace. The setting is disabled by default. Recommended threshold is about 64 MB, because mmap/munmap is slow. [#8520](https://github.com/ClickHouse/ClickHouse/pull/8520) ([alexey-milovidov](https://github.com/alexey-milovidov)) +* Reworked quotas as a part of access control system. Added new table `system.quotas`, new functions `currentQuota`, `currentQuotaKey`, new SQL syntax `CREATE QUOTA`, `ALTER QUOTA`, `DROP QUOTA`, `SHOW QUOTA`. [#7257](https://github.com/ClickHouse/ClickHouse/pull/7257) ([Vitaly Baranov](https://github.com/vitlibar)) +* Allow skipping unknown settings with warnings instead of throwing exceptions. [#7653](https://github.com/ClickHouse/ClickHouse/pull/7653) ([Vitaly Baranov](https://github.com/vitlibar)) +* Reworked row policies as a part of access control system. Added new table `system.row_policies`, new function `currentRowPolicies()`, new SQL syntax `CREATE POLICY`, `ALTER POLICY`, `DROP POLICY`, `SHOW CREATE POLICY`, `SHOW POLICIES`. [#7808](https://github.com/ClickHouse/ClickHouse/pull/7808) ([Vitaly Baranov](https://github.com/vitlibar)) + +#### Security Fix +* Fixed the possibility of reading directories structure in tables with `File` table engine. This fixes [#8536](https://github.com/ClickHouse/ClickHouse/issues/8536). [#8537](https://github.com/ClickHouse/ClickHouse/pull/8537) ([alexey-milovidov](https://github.com/alexey-milovidov)) + +## [Changelog for 2019](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2019.md) diff --git a/docs/ru/whats_new/index.md b/docs/ru/whats_new/index.md new file mode 100644 index 00000000000..0901166b887 --- /dev/null +++ b/docs/ru/whats_new/index.md @@ -0,0 +1,6 @@ +--- +toc_folder_title: What's New +toc_priority: 72 +--- + + diff --git a/docs/ru/whats_new/roadmap.md b/docs/ru/whats_new/roadmap.md new file mode 100644 index 00000000000..3994ed4ac29 --- /dev/null +++ b/docs/ru/whats_new/roadmap.md @@ -0,0 +1,17 @@ +--- +machine_translated: true +machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 +--- + +# Дорожная карта {#roadmap} + +## Q1 2020 {#q1-2020} + +- Управление доступом на основе ролей + +## Q2 2020 {#q2-2020} + +- Интеграция с внешними службами аутентификации +- Пулы ресурсов для более точного распределения емкости кластера между пользователями + +{## [Оригинальная статья](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/ru/security_changelog.md b/docs/ru/whats_new/security_changelog.md similarity index 100% rename from docs/ru/security_changelog.md rename to docs/ru/whats_new/security_changelog.md diff --git a/docs/toc_ru.yml b/docs/toc_ru.yml deleted file mode 100644 index 0df04f892cf..00000000000 --- a/docs/toc_ru.yml +++ /dev/null @@ -1,253 +0,0 @@ -nav: - -- 'Введение': - - 'Обзор': 'index.md' - - 'Отличительные возможности ClickHouse': 'introduction/distinctive_features.md' - - 'Особенности ClickHouse, которые могут считаться недостатками': 'introduction/features_considered_disadvantages.md' - - 'Производительность': 'introduction/performance.md' - - 'История': 'introduction/history.md' - - 'Информационная поддержка': 'introduction/info.md' - - 'Пользователи': 'introduction/adopters.md' - -- 'Начало работы': - - 'hidden': 'getting_started/index.md' - - 'Установка': 'getting_started/install.md' - - 'Руководство для начинающих': 'getting_started/tutorial.md' - - 'Тестовые наборы данных': - - 'Введение': 'getting_started/example_datasets/index.md' - - 'OnTime': 'getting_started/example_datasets/ontime.md' - - 'Данные о такси в Нью-Йорке': 'getting_started/example_datasets/nyc_taxi.md' - - 'AMPLab Big Data Benchmark': 'getting_started/example_datasets/amplab_benchmark.md' - - 'WikiStat': 'getting_started/example_datasets/wikistat.md' - - 'Терабайт логов кликов от Criteo': 'getting_started/example_datasets/criteo.md' - - 'Схема «Звезда»': 'getting_started/example_datasets/star_schema.md' - - 'Данные Яндекс.Метрики': 'getting_started/example_datasets/metrica.md' - - 'Playground': 'getting_started/playground.md' - -- 'Интерфейсы': - - 'Введение': 'interfaces/index.md' - - 'Клиент командной строки': 'interfaces/cli.md' - - 'Нативный интерфейс (TCP)': 'interfaces/tcp.md' - - 'HTTP-интерфейс': 'interfaces/http.md' - - 'MySQL-интерфейс': 'interfaces/mysql.md' - - 'Форматы входных и выходных данных': 'interfaces/formats.md' - - 'JDBC-драйвер': 'interfaces/jdbc.md' - - 'ODBC-драйвер': 'interfaces/odbc.md' - - 'C++ клиентская библиотека': 'interfaces/cpp.md' - - 'От сторонних разработчиков': - - 'Клиентские библиотеки': 'interfaces/third-party/client_libraries.md' - - 'Интеграции': 'interfaces/third-party/integrations.md' - - 'Визуальные интерфейсы': 'interfaces/third-party/gui.md' - - 'Прокси': 'interfaces/third-party/proxy.md' - -- 'Движки баз данных': - - 'Введение': 'database_engines/index.md' - - 'MySQL': 'database_engines/mysql.md' - - 'Lazy': 'database_engines/lazy.md' - -- 'Движки таблиц': - - 'Введение': 'operations/table_engines/index.md' - - 'Семейство MergeTree': - - 'MergeTree': 'operations/table_engines/mergetree.md' - - 'Репликация данных': 'operations/table_engines/replication.md' - - 'Произвольный ключ партиционирования': 'operations/table_engines/custom_partitioning_key.md' - - 'ReplacingMergeTree': 'operations/table_engines/replacingmergetree.md' - - 'SummingMergeTree': 'operations/table_engines/summingmergetree.md' - - 'AggregatingMergeTree': 'operations/table_engines/aggregatingmergetree.md' - - 'CollapsingMergeTree': 'operations/table_engines/collapsingmergetree.md' - - 'VersionedCollapsingMergeTree': 'operations/table_engines/versionedcollapsingmergetree.md' - - 'GraphiteMergeTree': 'operations/table_engines/graphitemergetree.md' - - 'Семейство Log': - - 'Введение': 'operations/table_engines/log_family.md' - - 'StripeLog': 'operations/table_engines/stripelog.md' - - 'Log': 'operations/table_engines/log.md' - - 'TinyLog': 'operations/table_engines/tinylog.md' - - 'Интеграции': - - 'Kafka': 'operations/table_engines/kafka.md' - - 'MySQL': 'operations/table_engines/mysql.md' - - 'JDBC': 'operations/table_engines/jdbc.md' - - 'ODBC': 'operations/table_engines/odbc.md' - - 'HDFS': 'operations/table_engines/hdfs.md' - - 'Особые': - - 'Distributed': 'operations/table_engines/distributed.md' - - 'Внешние данные': 'operations/table_engines/external_data.md' - - 'Dictionary': 'operations/table_engines/dictionary.md' - - 'Merge': 'operations/table_engines/merge.md' - - 'File': 'operations/table_engines/file.md' - - 'Null': 'operations/table_engines/null.md' - - 'Set': 'operations/table_engines/set.md' - - 'Join': 'operations/table_engines/join.md' - - 'URL': 'operations/table_engines/url.md' - - 'View': 'operations/table_engines/view.md' - - 'MaterializedView': 'operations/table_engines/materializedview.md' - - 'Memory': 'operations/table_engines/memory.md' - - 'Buffer': 'operations/table_engines/buffer.md' - - 'GenerateRandom': 'operations/table_engines/generate.md' - -- 'Справка по SQL': - - 'hidden': 'query_language/index.md' - - 'Общий синтаксис': 'query_language/syntax.md' - - 'Запросы': - - 'SELECT': 'query_language/select.md' - - 'INSERT INTO': 'query_language/insert_into.md' - - 'CREATE': 'query_language/create.md' - - 'ALTER': 'query_language/alter.md' - - 'SYSTEM': 'query_language/system.md' - - 'SHOW': 'query_language/show.md' - - 'Прочие': 'query_language/misc.md' - - 'Функции': - - 'Введение': 'query_language/functions/index.md' - - 'Арифметические функции': 'query_language/functions/arithmetic_functions.md' - - 'Функции сравнения': 'query_language/functions/comparison_functions.md' - - 'Логические функции': 'query_language/functions/logical_functions.md' - - 'Функции преобразования типов': 'query_language/functions/type_conversion_functions.md' - - 'Функции для работы с датами и временем': 'query_language/functions/date_time_functions.md' - - 'Функции для работы со строками': 'query_language/functions/string_functions.md' - - 'Функции поиска в строках': 'query_language/functions/string_search_functions.md' - - 'Функции поиска и замены в строках': 'query_language/functions/string_replace_functions.md' - - 'Условные функции': 'query_language/functions/conditional_functions.md' - - 'Математические функции': 'query_language/functions/math_functions.md' - - 'Функции округления': 'query_language/functions/rounding_functions.md' - - 'Функции по работе с массивами': 'query_language/functions/array_functions.md' - - 'Функции разбиения и слияния строк и массивов': 'query_language/functions/splitting_merging_functions.md' - - 'Битовые функции': 'query_language/functions/bit_functions.md' - - 'Функции для битмапов': 'query_language/functions/bitmap_functions.md' - - 'Функции хэширования': 'query_language/functions/hash_functions.md' - - 'Функции генерации псевдослучайных чисел': 'query_language/functions/random_functions.md' - - 'Функции для работы с UUID': 'query_language/functions/uuid_functions.md' - - 'Функции кодирования': 'query_language/functions/encoding_functions.md' - - 'Функции для работы с URL': 'query_language/functions/url_functions.md' - - 'Функции для работы с IP-адресами': 'query_language/functions/ip_address_functions.md' - - 'Функции для работы с JSON.': 'query_language/functions/json_functions.md' - - 'Функции высшего порядка': 'query_language/functions/higher_order_functions.md' - - 'Функции для работы с внешними словарями': 'query_language/functions/ext_dict_functions.md' - - 'Функции для работы со словарями Яндекс.Метрики': 'query_language/functions/ym_dict_functions.md' - - 'Функции для реализации оператора IN.': 'query_language/functions/in_functions.md' - - 'Функция arrayJoin': 'query_language/functions/array_join.md' - - 'Функции для работы с географическими координатами': 'query_language/functions/geo.md' - - 'Функции c Nullable аргументами': 'query_language/functions/functions_for_nulls.md' - - 'Функции машинного обучения': 'query_language/functions/machine_learning_functions.md' - - 'Функции для интроспекции': 'query_language/functions/introspection.md' - - 'Прочие функции': 'query_language/functions/other_functions.md' - - 'Агрегатные функции': - - 'Введение': 'query_language/agg_functions/index.md' - - 'Справочник функций': 'query_language/agg_functions/reference.md' - - 'Комбинаторы агрегатных функций': 'query_language/agg_functions/combinators.md' - - 'Параметрические агрегатные функции': 'query_language/agg_functions/parametric_functions.md' - - 'Табличные функции': - - 'Введение': 'query_language/table_functions/index.md' - - 'file': 'query_language/table_functions/file.md' - - 'merge': 'query_language/table_functions/merge.md' - - 'numbers': 'query_language/table_functions/numbers.md' - - 'remote': 'query_language/table_functions/remote.md' - - 'url': 'query_language/table_functions/url.md' - - 'mysql': 'query_language/table_functions/mysql.md' - - 'jdbc': 'query_language/table_functions/jdbc.md' - - 'odbc': 'query_language/table_functions/odbc.md' - - 'hdfs': 'query_language/table_functions/hdfs.md' - - 'input': 'query_language/table_functions/input.md' - - 'generateRandom': 'query_language/table_functions/generate.md' - - 'Словари': - - 'Введение': 'query_language/dicts/index.md' - - 'Внешние словари': - - 'Общее описание': 'query_language/dicts/external_dicts.md' - - 'Настройка внешнего словаря': 'query_language/dicts/external_dicts_dict.md' - - 'Хранение словарей в памяти': 'query_language/dicts/external_dicts_dict_layout.md' - - 'Обновление словарей': 'query_language/dicts/external_dicts_dict_lifetime.md' - - 'Источники внешних словарей': 'query_language/dicts/external_dicts_dict_sources.md' - - 'Ключ и поля словаря': 'query_language/dicts/external_dicts_dict_structure.md' - - 'Иерархические словари': 'query_language/dicts/external_dicts_dict_hierarchical.md' - - 'Встроенные словари': 'query_language/dicts/internal_dicts.md' - - 'Операторы': 'query_language/operators.md' - - 'Типы данных': - - 'Введение': 'data_types/index.md' - - 'UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64': 'data_types/int_uint.md' - - 'Float32, Float64': 'data_types/float.md' - - 'Decimal': 'data_types/decimal.md' - - 'Булевы значения': 'data_types/boolean.md' - - 'String': 'data_types/string.md' - - 'FixedString(N)': 'data_types/fixedstring.md' - - 'UUID': 'data_types/uuid.md' - - 'Date': 'data_types/date.md' - - 'DateTime': 'data_types/datetime.md' - - 'DateTime64': 'data_types/datetime64.md' - - 'Enum': 'data_types/enum.md' - - 'Array(T)': 'data_types/array.md' - - 'AggregateFunction(name, types_of_arguments...)': 'data_types/nested_data_structures/aggregatefunction.md' - - 'Tuple(T1, T2, ...)': 'data_types/tuple.md' - - 'Nullable': 'data_types/nullable.md' - - 'Вложенные структуры данных': - - 'hidden': 'data_types/nested_data_structures/index.md' - - 'Nested(Name1 Type1, Name2 Type2, ...)': 'data_types/nested_data_structures/nested.md' - - 'Служебные типы данных': - - 'hidden': 'data_types/special_data_types/index.md' - - 'Expression': 'data_types/special_data_types/expression.md' - - 'Set': 'data_types/special_data_types/set.md' - - 'Nothing': 'data_types/special_data_types/nothing.md' - - 'Interval': 'data_types/special_data_types/interval.md' - - 'Domains': - - 'Overview': 'data_types/domains/overview.md' - - 'IPv4': 'data_types/domains/ipv4.md' - - 'IPv6': 'data_types/domains/ipv6.md' - -- 'Руководства': - - 'Обзор': 'guides/index.md' - - 'Применение CatBoost моделей': 'guides/apply_catboost_model.md' - -- 'Эксплуатация': - - 'Введение': 'operations/index.md' - - 'Требования': 'operations/requirements.md' - - 'Мониторинг': 'operations/monitoring.md' - - 'Устранение неисправностей': 'operations/troubleshooting.md' - - 'Советы по эксплуатации': 'operations/tips.md' - - 'Обновление ClickHouse': 'operations/update.md' - - 'Права доступа': 'operations/access_rights.md' - - 'Резервное копирование': 'operations/backup.md' - - 'Конфигурационные файлы': 'operations/configuration_files.md' - - 'Квоты': 'operations/quotas.md' - - 'Системные таблицы': 'operations/system_tables.md' - - 'Оптимизация производительности': - - 'Профилирование запросов': 'operations/performance/sampling_query_profiler.md' - - 'Тестирование оборудования': 'operations/performance_test.md' - - 'Конфигурационные параметры сервера': - - 'Введение': 'operations/server_settings/index.md' - - 'Серверные настройки': 'operations/server_settings/settings.md' - - 'Настройки': - - 'Введение': 'operations/settings/index.md' - - 'Разрешения на выполнение запросов': 'operations/settings/permissions_for_queries.md' - - 'Ограничения на сложность запроса': 'operations/settings/query_complexity.md' - - 'Настройки': 'operations/settings/settings.md' - - 'Ограничения на изменение настроек': 'operations/settings/constraints_on_settings.md' - - 'Профили настроек': 'operations/settings/settings_profiles.md' - - 'Настройки пользователей': 'operations/settings/settings_users.md' - - 'Утилиты': - - 'Введение': 'operations/utils/index.md' - - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' - - 'clickhouse-local': 'operations/utils/clickhouse-local.md' - - 'clickhouse-benchmark': 'operations/utils/clickhouse-benchmark.md' - -- 'Разработка': - - 'hidden': 'development/index.md' - - 'Инструкция для начинающего разработчика ClickHouse': 'development/developer_instruction.md' - - 'Обзор архитектуры ClickHouse': 'development/architecture.md' - - 'Навигация по коду ClickHouse': 'development/browse_code.md' - - 'Как собрать ClickHouse на Linux': 'development/build.md' - - 'Как собрать ClickHouse на Mac OS X': 'development/build_osx.md' - - 'Как собрать ClickHouse на Linux для Mac OS X': 'development/build_cross_osx.md' - - 'Как собрать ClickHouse на Linux для AARCH64 (ARM64)': 'development/build_cross_arm.md' - - 'Как писать код на C++': 'development/style.md' - - 'Как запустить тесты': 'development/tests.md' - - 'Сторонние библиотеки': 'development/contrib.md' - -- 'Что нового': - - 'Changelog': - - '2020': 'changelog/index.md' - - '2019': 'changelog/2019.md' - - '2018': 'changelog/2018.md' - - '2017': 'changelog/2017.md' - - 'Security changelog': 'security_changelog.md' - - 'Roadmap': 'roadmap.md' - - 'Подробный roadmap 2020': 'extended_roadmap.md' - -- 'F.A.Q.': - - 'Общие вопросы': 'faq/general.md' diff --git a/docs/tools/convert_toc.py b/docs/tools/convert_toc.py index 9bfc347d244..5e3fe97de44 100755 --- a/docs/tools/convert_toc.py +++ b/docs/tools/convert_toc.py @@ -8,7 +8,7 @@ import yaml import util -lang = 'zh' +lang = 'ru' base_dir = os.path.join(os.path.dirname(__file__), '..') en_dir = os.path.join(base_dir, 'en') docs_dir = os.path.join(base_dir, lang) From d035173889d7ebfe067ecbf9a310dad5d0776eec Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 21:08:22 +0300 Subject: [PATCH 249/484] There's no toc_NN.yml files anymore --- docs/README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/README.md b/docs/README.md index f075716a0b0..3c66408089c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -135,16 +135,13 @@ When adding a new file: $ ln -sr en/new/file.md lang/new/file.md ``` -- Reference the file from `toc_{en,ru,zh,ja,fa}.yaml` files with the pages index. - - ### Adding a New Language 1. Create a new docs subfolder named using the [ISO-639-1 language code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes). 2. Add Markdown files with the translation, mirroring the folder structure of other languages. -3. Commit and open a pull request with the new content. +3. Commit and open a pull-request with the new content. When everything is ready, we will add the new language to the website. From c1558f8c18e2727c5991cf923ee79710de89d1d0 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 21:08:57 +0300 Subject: [PATCH 250/484] fix link --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 3c66408089c..a7473094ad7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -203,4 +203,4 @@ Templates: ## How to Build Documentation -You can build your documentation manually by following the instructions in [docs/tools/README.md](docs/tools/README.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it. +You can build your documentation manually by following the instructions in [docs/tools/README.md](../docs/tools/README.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it. From 26dd6140b21c99df5f51f81ac6fe1263ca08bcc6 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Thu, 9 Apr 2020 21:10:27 +0300 Subject: [PATCH 251/484] Added new config settings to control timeouts * "lock_acquire_timeout" controls for how long a query will continue to acquire each lock on its argument tables * "lock_acquire_timeout_for_background_operations" is a per-table setting for storages of *MergeTree family --- src/Core/Defines.h | 4 ++ src/Core/Settings.h | 1 + .../PushingToViewsBlockOutputStream.cpp | 7 +++- src/Databases/DatabaseMySQL.cpp | 4 +- src/Functions/FunctionJoinGet.cpp | 3 +- src/Interpreters/InterpreterAlterQuery.cpp | 7 +++- src/Interpreters/InterpreterCreateQuery.cpp | 3 +- src/Interpreters/InterpreterDescribeQuery.cpp | 3 +- src/Interpreters/InterpreterDropQuery.cpp | 12 +++--- src/Interpreters/InterpreterInsertQuery.cpp | 3 +- src/Interpreters/InterpreterRenameQuery.cpp | 3 +- src/Interpreters/InterpreterSelectQuery.cpp | 3 +- src/Interpreters/InterpreterSystemQuery.cpp | 2 +- src/Storages/IStorage.cpp | 33 +++++++-------- src/Storages/IStorage.h | 11 ++--- src/Storages/LiveView/StorageLiveView.cpp | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 3 +- src/Storages/MergeTree/MergeTreeSettings.h | 1 + .../ReplicatedMergeTreeCleanupThread.cpp | 3 +- .../ReplicatedMergeTreePartCheckThread.cpp | 4 +- src/Storages/StorageBuffer.cpp | 5 ++- src/Storages/StorageDistributed.cpp | 2 +- src/Storages/StorageMaterializedView.cpp | 9 +++-- src/Storages/StorageMerge.cpp | 18 +++++---- src/Storages/StorageMerge.h | 5 ++- src/Storages/StorageMergeTree.cpp | 29 ++++++++------ src/Storages/StorageNull.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 40 ++++++++++++------- src/Storages/System/StorageSystemColumns.cpp | 15 +++---- .../System/StorageSystemPartsBase.cpp | 4 +- src/Storages/System/StorageSystemPartsBase.h | 2 + src/Storages/System/StorageSystemTables.cpp | 3 +- 32 files changed, 151 insertions(+), 95 deletions(-) diff --git a/src/Core/Defines.h b/src/Core/Defines.h index f2d4a517712..c797f527be9 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -91,3 +91,7 @@ # define ASAN_UNPOISON_MEMORY_REGION(a, b) # define ASAN_POISON_MEMORY_REGION(a, b) #endif + +/// Actually, there may be multiple acquisitions of different locks for a given table within one query. +/// Check with IStorage class for the list of possible locks +#define DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC 120 diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 753231603b2..ec03dfa1a4e 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -406,6 +406,7 @@ struct Settings : public SettingsCollection M(SettingBool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \ M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ + M(SettingSeconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "How long locking request should wait before failing", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp index 991d206777a..8e547767584 100644 --- a/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -25,7 +25,8 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( * Although now any insertion into the table is done via PushingToViewsBlockOutputStream, * but it's clear that here is not the best place for this functionality. */ - addTableLock(storage->lockStructureForShare(true, context.getInitialQueryId())); + addTableLock( + storage->lockStructureForShare(true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout)); /// If the "root" table deduplactes blocks, there are no need to make deduplication for children /// Moreover, deduplication for AggregatingMergeTree children could produce false positives due to low size of inserting blocks @@ -54,7 +55,9 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( if (auto * materialized_view = dynamic_cast(dependent_table.get())) { - addTableLock(materialized_view->lockStructureForShare(true, context.getInitialQueryId())); + addTableLock( + materialized_view->lockStructureForShare( + true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout)); StoragePtr inner_table = materialized_view->getTargetTable(); auto inner_table_id = inner_table->getStorageID(); diff --git a/src/Databases/DatabaseMySQL.cpp b/src/Databases/DatabaseMySQL.cpp index 959121585ea..1cbbd4b06d9 100644 --- a/src/Databases/DatabaseMySQL.cpp +++ b/src/Databases/DatabaseMySQL.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ namespace ErrorCodes constexpr static const auto suffix = ".remove_flag"; static constexpr const std::chrono::seconds cleaner_sleep_time{30}; +static const SettingSeconds lock_acquire_timeout{10}; static String toQueryStringWithQuote(const std::vector & quote_list) { @@ -358,7 +360,7 @@ void DatabaseMySQL::cleanOutdatedTables() ++iterator; else { - const auto table_lock = (*iterator)->lockAlterIntention(RWLockImpl::NO_QUERY); + const auto table_lock = (*iterator)->lockAlterIntention(RWLockImpl::NO_QUERY, lock_acquire_timeout); (*iterator)->shutdown(); (*iterator)->is_dropped = true; diff --git a/src/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp index 0860deccb14..7940bad2cf4 100644 --- a/src/Functions/FunctionJoinGet.cpp +++ b/src/Functions/FunctionJoinGet.cpp @@ -65,7 +65,8 @@ FunctionBaseImplPtr JoinGetOverloadResolver::build(const ColumnsWithTypeAndName auto join = storage_join->getJoin(); DataTypes data_types(arguments.size()); - auto table_lock = storage_join->lockStructureForShare(false, context.getInitialQueryId()); + auto table_lock = storage_join->lockStructureForShare( + false, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); for (size_t i = 0; i < arguments.size(); ++i) data_types[i] = arguments[i].type; diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index 315527765ef..7412b6b683b 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -82,7 +82,9 @@ BlockIO InterpreterAlterQuery::execute() if (!mutation_commands.empty()) { - auto table_lock_holder = table->lockStructureForShare(false /* because mutation is executed asyncronously */, context.getCurrentQueryId()); + auto table_lock_holder = table->lockStructureForShare( + false /* because mutation is executed asyncronously */, + context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); MutationsInterpreter(table, mutation_commands, context, false).validate(table_lock_holder); table->mutate(mutation_commands, context); } @@ -109,7 +111,8 @@ BlockIO InterpreterAlterQuery::execute() if (!alter_commands.empty()) { - auto table_lock_holder = table->lockAlterIntention(context.getCurrentQueryId()); + auto table_lock_holder = table->lockAlterIntention( + context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); StorageInMemoryMetadata metadata = table->getInMemoryMetadata(); alter_commands.validate(metadata, context); alter_commands.prepare(metadata); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f15796688e1..81b238a8973 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -403,7 +403,8 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::setProperties(AS StoragePtr as_storage = DatabaseCatalog::instance().getTable({as_database_name, create.as_table}); /// as_storage->getColumns() and setEngine(...) must be called under structure lock of other_table for CREATE ... AS other_table. - as_storage_lock = as_storage->lockStructureForShare(false, context.getCurrentQueryId()); + as_storage_lock = as_storage->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); properties.columns = as_storage->getColumns(); /// Secondary indices make sense only for MergeTree family of storage engines. diff --git a/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp index 1353c01ebf6..f9c769a523e 100644 --- a/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -89,7 +89,8 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl() table = DatabaseCatalog::instance().getTable(table_id); } - auto table_lock = table->lockStructureForShare(false, context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare( + false, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); columns = table->getColumns(); } diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 42d9528abd5..70707c814ca 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -93,7 +93,7 @@ BlockIO InterpreterDropQuery::executeToTable( context.checkAccess(table->isView() ? AccessType::DROP_VIEW : AccessType::DROP_TABLE, table_id); table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table from memory, don't touch data and metadata database->detachTable(table_name); } @@ -103,7 +103,7 @@ BlockIO InterpreterDropQuery::executeToTable( table->checkTableCanBeDropped(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table data, don't touch metadata table->truncate(query_ptr, context, table_lock); } @@ -115,7 +115,7 @@ BlockIO InterpreterDropQuery::executeToTable( table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const std::string metadata_file_without_extension = database->getMetadataPath() + escapeForFileName(table_id.table_name); const auto prev_metadata_name = metadata_file_without_extension + ".sql"; @@ -216,7 +216,8 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, if (kind == ASTDropQuery::Kind::Truncate) { /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = + table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Drop table data, don't touch metadata table->truncate(query_ptr, context, table_lock); } @@ -225,7 +226,8 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, context_handle.removeExternalTable(table_name); table->shutdown(); /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = + table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); /// Delete table data table->drop(table_lock); table->is_dropped = true; diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 39b99b10c0d..fc5d76ee216 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -109,7 +109,8 @@ BlockIO InterpreterInsertQuery::execute() BlockIO res; StoragePtr table = getTable(query); - auto table_lock = table->lockStructureForShare(true, context.getInitialQueryId()); + auto table_lock = table->lockStructureForShare( + true, context.getInitialQueryId(), context.getSettingsRef().lock_acquire_timeout); auto query_sample_block = getSampleBlock(query, table); if (!query.table_function) diff --git a/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp index 4f54f759510..9a4f4b1b197 100644 --- a/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -79,7 +79,8 @@ BlockIO InterpreterRenameQuery::execute() { database_catalog.assertTableDoesntExist(StorageID(elem.to_database_name, elem.to_table_name)); auto from_table = database_catalog.getTable({elem.from_database_name, elem.from_table_name}); - auto from_table_lock = from_table->lockExclusively(context.getCurrentQueryId()); + auto from_table_lock = + from_table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); database_catalog.getDatabase(elem.from_database_name)->renameTable( context, diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 80a7831475b..b355d5af6b1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -255,7 +255,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (storage) { - table_lock = storage->lockStructureForShare(false, context->getInitialQueryId()); + table_lock = storage->lockStructureForShare( + false, context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); table_id = storage->getStorageID(); } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 9a7d6ae7c5a..87d995372ef 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -326,7 +326,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, table->shutdown(); { /// If table was already dropped by anyone, an exception will be thrown - auto table_lock = table->lockExclusively(context.getCurrentQueryId()); + auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); create_ast = database->getCreateTableQuery(system_context, replica.table_name); database->detachTable(replica.table_name); diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index ab3a750db16..3bf8054485c 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -315,63 +315,64 @@ bool IStorage::isVirtualColumn(const String & column_name) const return getColumns().get(column_name).is_virtual; } -RWLockImpl::LockHolder IStorage::tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id) +RWLockImpl::LockHolder IStorage::tryLockTimed( + const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const SettingSeconds & acquire_timeout) { - auto lock_holder = rwlock->getLock(type, query_id, RWLockImpl::default_locking_timeout_ms); + auto lock_holder = rwlock->getLock(type, query_id, std::chrono::milliseconds(acquire_timeout.totalMilliseconds())); if (!lock_holder) { const String type_str = type == RWLockImpl::Type::Read ? "READ" : "WRITE"; throw Exception( type_str + " locking attempt on \"" + getStorageID().getFullTableName() + - "\" has timed out! (" + toString(RWLockImpl::default_locking_timeout_ms.count()) + "ms) " + "\" has timed out! (" + toString(acquire_timeout.totalMilliseconds()) + "ms ). " "Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); } return lock_holder; } -TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id) +TableStructureReadLockHolder IStorage::lockStructureForShare(bool will_add_new_data, const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureReadLockHolder result; if (will_add_new_data) - result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Read, query_id); - result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Read, query_id); + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Read, query_id, acquire_timeout); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Read, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); return result; } -TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_id) +TableStructureWriteLockHolder IStorage::lockAlterIntention(const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureWriteLockHolder result; - result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); return result; } -void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id) +void IStorage::lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id, const SettingSeconds & acquire_timeout) { if (!lock_holder.alter_intention_lock) throw Exception("Alter intention lock for table " + getStorageID().getNameForLogs() + " was not taken. This is a bug.", ErrorCodes::LOGICAL_ERROR); if (!lock_holder.new_data_structure_lock) - lock_holder.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id); - lock_holder.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id); + lock_holder.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id, acquire_timeout); + lock_holder.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id, acquire_timeout); } -TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id) +TableStructureWriteLockHolder IStorage::lockExclusively(const String & query_id, const SettingSeconds & acquire_timeout) { TableStructureWriteLockHolder result; - result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id); + result.alter_intention_lock = tryLockTimed(alter_intention_lock, RWLockImpl::Write, query_id, acquire_timeout); if (is_dropped) throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED); - result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id); - result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id); + result.new_data_structure_lock = tryLockTimed(new_data_structure_lock, RWLockImpl::Write, query_id, acquire_timeout); + result.structure_lock = tryLockTimed(structure_lock, RWLockImpl::Write, query_id, acquire_timeout); return result; } @@ -386,7 +387,7 @@ void IStorage::alter( const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); params.apply(metadata); diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 581fc8a67e7..dd4e8506f9f 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -196,24 +196,25 @@ private: ConstraintsDescription constraints; private: - RWLockImpl::LockHolder tryLockTimed(const RWLock & rwlock, RWLockImpl::Type type, const String & query_id); + RWLockImpl::LockHolder tryLockTimed( + const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const SettingSeconds & acquire_timeout); public: /// Acquire this lock if you need the table structure to remain constant during the execution of /// the query. If will_add_new_data is true, this means that the query will add new data to the table /// (INSERT or a parts merge). - TableStructureReadLockHolder lockStructureForShare(bool will_add_new_data, const String & query_id); + TableStructureReadLockHolder lockStructureForShare(bool will_add_new_data, const String & query_id, const SettingSeconds & acquire_timeout); /// Acquire this lock at the start of ALTER to lock out other ALTERs and make sure that only you /// can modify the table structure. It can later be upgraded to the exclusive lock. - TableStructureWriteLockHolder lockAlterIntention(const String & query_id); + TableStructureWriteLockHolder lockAlterIntention(const String & query_id, const SettingSeconds & acquire_timeout); /// Upgrade alter intention lock to the full exclusive structure lock. This is done by ALTER queries /// to ensure that no other query uses the table structure and it can be safely changed. - void lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id); + void lockStructureExclusively(TableStructureWriteLockHolder & lock_holder, const String & query_id, const SettingSeconds & acquire_timeout); /// Acquire the full exclusive lock immediately. No other queries can run concurrently. - TableStructureWriteLockHolder lockExclusively(const String & query_id); + TableStructureWriteLockHolder lockExclusively(const String & query_id, const SettingSeconds & acquire_timeout); /** Returns stage to which query is going to be processed in read() function. * (Normally, the function only reads the columns from the list, but in other cases, diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index 049110a3294..569e5c24e1c 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -519,7 +519,7 @@ void StorageLiveView::drop(TableStructureWriteLockHolder &) void StorageLiveView::refresh(const Context & context) { - auto alter_lock = lockAlterIntention(context.getCurrentQueryId()); + auto alter_lock = lockAlterIntention(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); { std::lock_guard lock(mutex); if (getNewBlocks()) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 6373c85a15d..c656fbf0c58 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -85,7 +85,8 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo try { - auto storage_lock = data.lockStructureForShare(false, RWLockImpl::NO_QUERY); + auto storage_lock = data.lockStructureForShare( + false, RWLockImpl::NO_QUERY, data.getSettings()->lock_acquire_timeout_for_background_operations); MergeTreeData::DataPartPtr part = findPart(part_name); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index bbd1fd6cbeb..02c852b4f4b 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -42,6 +42,7 @@ struct MergeTreeSettings : public SettingsCollection M(SettingUInt64, number_of_free_entries_in_pool_to_execute_mutation, 10, "When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \ M(SettingSeconds, old_parts_lifetime, 8 * 60, "How many seconds to keep obsolete parts.", 0) \ M(SettingSeconds, temporary_directories_lifetime, 86400, "How many seconds to keep tmp_-directories.", 0) \ + M(SettingSeconds, lock_acquire_timeout_for_background_operations, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "For background operations like merges, mutations etc. How many seconds before failing to acquire table locks.", 0) \ \ /** Inserts settings. */ \ M(SettingUInt64, parts_to_delay_insert, 150, "If table contains at least that many active parts in single partition, artificially slow down insert into table.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 77a5bca7a92..b1164f6621c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -57,7 +57,8 @@ void ReplicatedMergeTreeCleanupThread::iterate() { /// TODO: Implement tryLockStructureForShare. - auto lock = storage.lockStructureForShare(false, ""); + auto lock = storage.lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); storage.clearOldTemporaryDirectories(); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 17b716d14c2..b587b5f71c0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -203,7 +203,9 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na else if (part->name == part_name) { auto zookeeper = storage.getZooKeeper(); - auto table_lock = storage.lockStructureForShare(false, RWLockImpl::NO_QUERY); + + auto table_lock = storage.lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage.getSettings()->lock_acquire_timeout_for_background_operations); auto local_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( part->getColumns(), part->checksums); diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 7699f8379d9..2702b344dc3 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -168,7 +168,8 @@ Pipes StorageBuffer::read( if (destination.get() == this) throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP); - auto destination_lock = destination->lockStructureForShare(false, context.getCurrentQueryId()); + auto destination_lock = destination->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); const bool dst_has_same_structure = std::all_of(column_names.begin(), column_names.end(), [this, destination](const String& column_name) { @@ -757,7 +758,7 @@ std::optional StorageBuffer::totalBytes() const void StorageBuffer::alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); checkAlterIsPossible(params, context.getSettingsRef()); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b4375dd5b0a..7e6b9d14e02 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -460,7 +460,7 @@ void StorageDistributed::checkAlterIsPossible(const AlterCommands & commands, co void StorageDistributed::alter(const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); checkAlterIsPossible(params, context.getSettingsRef()); diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 3fb25bf8275..056e2cbb42f 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -185,7 +185,9 @@ Pipes StorageMaterializedView::read( const unsigned num_streams) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(false, context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + if (query_info.order_by_optimizer) query_info.input_sorting_info = query_info.order_by_optimizer->getInputOrder(storage); @@ -200,7 +202,8 @@ Pipes StorageMaterializedView::read( BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const Context & context) { auto storage = getTargetTable(); - auto lock = storage->lockStructureForShare(true, context.getCurrentQueryId()); + auto lock = storage->lockStructureForShare( + true, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto stream = storage->write(query, context); stream->addTableLock(lock); return stream; @@ -258,7 +261,7 @@ void StorageMaterializedView::alter( const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); params.apply(metadata); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f3322c7dfff..a108e615bee 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -118,7 +118,8 @@ bool StorageMerge::isRemote() const bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, const Context & query_context) const { /// It's beneficial if it is true for at least one table. - StorageListWithLocks selected_tables = getSelectedTables(query_context.getCurrentQueryId()); + StorageListWithLocks selected_tables = getSelectedTables( + query_context.getCurrentQueryId(), query_context.getSettingsRef()); size_t i = 0; for (const auto & table : selected_tables) @@ -195,7 +196,7 @@ Pipes StorageMerge::read( * This is necessary to correctly pass the recommended number of threads to each table. */ StorageListWithLocks selected_tables = getSelectedTables( - query_info.query, has_table_virtual_column, context.getCurrentQueryId()); + query_info.query, has_table_virtual_column, context.getCurrentQueryId(), context.getSettingsRef()); if (selected_tables.empty()) /// FIXME: do we support sampling in this case? @@ -355,7 +356,7 @@ Pipes StorageMerge::createSources(const SelectQueryInfo & query_info, const Quer } -StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String & query_id) const +StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String & query_id, const Settings & settings) const { StorageListWithLocks selected_tables; auto iterator = getDatabaseIterator(); @@ -364,7 +365,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String { auto & table = iterator->table(); if (table.get() != this) - selected_tables.emplace_back(table, table->lockStructureForShare(false, query_id), iterator->name()); + selected_tables.emplace_back( + table, table->lockStructureForShare(false, query_id, settings.lock_acquire_timeout), iterator->name()); iterator->next(); } @@ -373,7 +375,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const String } -StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const +StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( + const ASTPtr & query, bool has_virtual_column, const String & query_id, const Settings & settings) const { StorageListWithLocks selected_tables; DatabaseTablesIteratorPtr iterator = getDatabaseIterator(); @@ -389,7 +392,8 @@ StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr if (storage.get() != this) { - selected_tables.emplace_back(storage, storage->lockStructureForShare(false, query_id), iterator->name()); + selected_tables.emplace_back( + storage, storage->lockStructureForShare(false, query_id, settings.lock_acquire_timeout), iterator->name()); virtual_column->insert(iterator->name()); } @@ -434,7 +438,7 @@ void StorageMerge::checkAlterIsPossible(const AlterCommands & commands, const Se void StorageMerge::alter( const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata storage_metadata = getInMemoryMetadata(); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 1d2df3cb9ce..bb3205184b1 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -57,9 +57,10 @@ private: using StorageWithLockAndName = std::tuple; using StorageListWithLocks = std::list; - StorageListWithLocks getSelectedTables(const String & query_id) const; + StorageListWithLocks getSelectedTables(const String & query_id, const Settings & settings) const; - StorageMerge::StorageListWithLocks getSelectedTables(const ASTPtr & query, bool has_virtual_column, const String & query_id) const; + StorageMerge::StorageListWithLocks getSelectedTables( + const ASTPtr & query, bool has_virtual_column, const String & query_id, const Settings & settings) const; template StoragePtr getFirstTable(F && predicate) const; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 2efeff19657..5bf16f49fbe 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -223,7 +223,7 @@ void StorageMergeTree::alter( /// This alter can be performed at metadata level only if (commands.isSettingsAlter()) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); changeSettings(metadata.settings_ast, table_lock_holder); @@ -231,7 +231,7 @@ void StorageMergeTree::alter( } else { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); changeSettings(metadata.settings_ast, table_lock_holder); /// Reinitialize primary key because primary key column types might have changed. @@ -537,7 +537,8 @@ bool StorageMergeTree::merge( bool deduplicate, String * out_disable_reason) { - auto table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); FutureMergedMutatedPart future_part; @@ -655,7 +656,8 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::movePartsTask() bool StorageMergeTree::tryMutatePart() { - auto table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); + auto table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); size_t max_ast_elements = global_context.getSettingsRef().max_expanded_ast_elements; FutureMergedMutatedPart future_part; @@ -780,7 +782,8 @@ BackgroundProcessingPoolTaskResult StorageMergeTree::mergeMutateTask() { { /// TODO: Implement tryLockStructureForShare. - auto lock_structure = lockStructureForShare(false, ""); + auto lock_structure = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); clearOldPartsFromFilesystem(); clearOldTemporaryDirectories(); } @@ -973,14 +976,16 @@ void StorageMergeTree::alterPartition(const ASTPtr & query, const PartitionComma case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(false, context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); freezePartition(command.partition, command.with_name, context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(false, context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); freezeAll(command.with_name, context, lock); } break; @@ -998,7 +1003,7 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, cons /// This protects against "revival" of data for a removed partition after completion of merge. auto merge_blocker = merger_mutator.merges_blocker.cancel(); /// Waits for completion of merge and does not start new ones. - auto lock = lockExclusively(context.getCurrentQueryId()); + auto lock = lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); String partition_id = getPartitionIDFromQuery(partition, context); @@ -1045,8 +1050,8 @@ void StorageMergeTree::attachPartition(const ASTPtr & partition, bool attach_par void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) { - auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -1116,8 +1121,8 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) diff --git a/src/Storages/StorageNull.cpp b/src/Storages/StorageNull.cpp index 878be5bbf2d..bafb3d9a9fb 100644 --- a/src/Storages/StorageNull.cpp +++ b/src/Storages/StorageNull.cpp @@ -48,7 +48,7 @@ void StorageNull::checkAlterIsPossible(const AlterCommands & commands, const Set void StorageNull::alter( const AlterCommands & params, const Context & context, TableStructureWriteLockHolder & table_lock_holder) { - lockStructureExclusively(table_lock_holder, context.getCurrentQueryId()); + lockStructureExclusively(table_lock_holder, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto table_id = getStorageID(); StorageInMemoryMetadata metadata = getInMemoryMetadata(); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8896151561b..ab5898458c5 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1025,7 +1025,8 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) ReservationPtr reserved_space = reserveSpacePreferringTTLRules(estimated_space_for_merge, ttl_infos, time(nullptr), max_volume_index); - auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); FutureMergedMutatedPart future_merged_part(parts, entry.new_part_type); if (future_merged_part.name != entry.new_part_name) @@ -1160,7 +1161,8 @@ bool StorageReplicatedMergeTree::tryExecutePartMutation(const StorageReplicatedM /// Can throw an exception. ReservationPtr reserved_space = reserveSpace(estimated_space_for_result, source_part->disk); - auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); MutableDataPartPtr new_part; Transaction transaction(*this); @@ -1514,7 +1516,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) PartDescriptions parts_to_add; DataPartsVector parts_to_remove; - auto table_lock_holder_dst_table = lockStructureForShare(false, RWLockImpl::NO_QUERY); + auto table_lock_holder_dst_table = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); for (size_t i = 0; i < entry_replace.new_part_names.size(); ++i) { @@ -1576,7 +1579,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) return 0; } - table_lock_holder_src_table = source_table->lockStructureForShare(false, RWLockImpl::NO_QUERY); + table_lock_holder_src_table = source_table->lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); DataPartStates valid_states{MergeTreeDataPartState::PreCommitted, MergeTreeDataPartState::Committed, MergeTreeDataPartState::Outdated}; @@ -2699,7 +2703,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin TableStructureReadLockHolder table_lock_holder; if (!to_detached) - table_lock_holder = lockStructureForShare(true, RWLockImpl::NO_QUERY); + table_lock_holder = lockStructureForShare( + true, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); /// Logging Stopwatch stopwatch; @@ -3166,7 +3171,7 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer { /// TODO (relax this lock) - auto table_lock = lockExclusively(RWLockImpl::NO_QUERY); + auto table_lock = lockExclusively(RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); LOG_INFO(log, "Metadata changed in ZooKeeper. Applying changes locally."); @@ -3193,7 +3198,8 @@ void StorageReplicatedMergeTree::alter( if (params.isSettingsAlter()) { - lockStructureExclusively(table_lock_holder, query_context.getCurrentQueryId()); + lockStructureExclusively( + table_lock_holder, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); /// We don't replicate storage_settings_ptr ALTER. It's local operation. /// Also we don't upgrade alter lock to table structure lock. StorageInMemoryMetadata metadata = getInMemoryMetadata(); @@ -3259,7 +3265,8 @@ void StorageReplicatedMergeTree::alter( if (ast_to_str(current_metadata.settings_ast) != ast_to_str(future_metadata.settings_ast)) { - lockStructureExclusively(table_lock_holder, query_context.getCurrentQueryId()); + lockStructureExclusively( + table_lock_holder, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); /// Just change settings current_metadata.settings_ast = future_metadata.settings_ast; changeSettings(current_metadata.settings_ast, table_lock_holder); @@ -3428,14 +3435,16 @@ void StorageReplicatedMergeTree::alterPartition(const ASTPtr & query, const Part case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); freezePartition(command.partition, command.with_name, query_context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockStructureForShare(false, query_context.getCurrentQueryId()); + auto lock = lockStructureForShare( + false, query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); freezeAll(command.with_name, query_context, lock); } break; @@ -4443,7 +4452,8 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() { /// Critical section is not required (since grabOldParts() returns unique part set on each call) - auto table_lock = lockStructureForShare(false, RWLockImpl::NO_QUERY); + auto table_lock = lockStructureForShare( + false, RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); auto zookeeper = getZooKeeper(); DataPartsVector parts = grabOldParts(); @@ -4738,8 +4748,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ const Context & context) { /// First argument is true, because we possibly will add new data to current table. - auto lock1 = lockStructureForShare(true, context.getCurrentQueryId()); - auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(true, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = source_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); Stopwatch watch; MergeTreeData & src_data = checkStructureAndGetMergeTreeData(source_table); @@ -4917,8 +4927,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context) { - auto lock1 = lockStructureForShare(false, context.getCurrentQueryId()); - auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId()); + auto lock1 = lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); + auto lock2 = dest_table->lockStructureForShare(false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); auto dest_table_storage = std::dynamic_pointer_cast(dest_table); if (!dest_table_storage) diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index cbf6ada9ed3..26e2376c3f7 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -62,12 +62,12 @@ public: ColumnPtr databases_, ColumnPtr tables_, Storages storages_, - const std::shared_ptr & access_, - String query_id_) + const Context & context) : SourceWithProgress(header_) , columns_mask(std::move(columns_mask_)), max_block_size(max_block_size_) , databases(std::move(databases_)), tables(std::move(tables_)), storages(std::move(storages_)) - , query_id(std::move(query_id_)), total_tables(tables->size()), access(access_) + , total_tables(tables->size()), access(context.getAccess()) + , query_id(context.getCurrentQueryId()), lock_acquire_timeout(context.getSettingsRef().lock_acquire_timeout) { } @@ -103,7 +103,7 @@ protected: try { - table_lock = storage->lockStructureForShare(false, query_id); + table_lock = storage->lockStructureForShare(false, query_id, lock_acquire_timeout); } catch (const Exception & e) { @@ -227,10 +227,11 @@ private: ColumnPtr databases; ColumnPtr tables; Storages storages; - String query_id; size_t db_table_num = 0; size_t total_tables; std::shared_ptr access; + String query_id; + SettingSeconds lock_acquire_timeout; }; @@ -331,8 +332,8 @@ Pipes StorageSystemColumns::read( pipes.emplace_back(std::make_shared( std::move(columns_mask), std::move(header), max_block_size, - std::move(filtered_database_column), std::move(filtered_table_column), std::move(storages), - context.getAccess(), context.getCurrentQueryId())); + std::move(filtered_database_column), std::move(filtered_table_column), + std::move(storages), context)); return pipes; } diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index d8f564b0160..19c6f6b3d03 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -62,7 +62,7 @@ StoragesInfo::getParts(MergeTreeData::DataPartStateVector & state, bool has_stat } StoragesInfoStream::StoragesInfoStream(const SelectQueryInfo & query_info, const Context & context) - : query_id(context.getCurrentQueryId()) + : query_id(context.getCurrentQueryId()), settings(context.getSettings()) { /// Will apply WHERE to subset of columns and then add more columns. /// This is kind of complicated, but we use WHERE to do less work. @@ -192,7 +192,7 @@ StoragesInfo StoragesInfoStream::next() try { /// For table not to be dropped and set of columns to remain constant. - info.table_lock = info.storage->lockStructureForShare(false, query_id); + info.table_lock = info.storage->lockStructureForShare(false, query_id, settings.lock_acquire_timeout); } catch (const Exception & e) { diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index b30f7c62914..be8e45146cb 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -36,6 +36,8 @@ public: private: String query_id; + Settings settings; + ColumnPtr database_column; ColumnPtr table_column; diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index a8d5fc2ec57..f4ce4a8b717 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -244,7 +244,8 @@ protected: if (need_lock_structure) { table = tables_it->table(); - lock = table->lockStructureForShare(false, context.getCurrentQueryId()); + lock = table->lockStructureForShare( + false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); } } catch (const Exception & e) From 3166eab2dfc89323b9d756aeba6866381fd9c186 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 21:56:03 +0300 Subject: [PATCH 252/484] Update release.sh --- docs/tools/release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/tools/release.sh b/docs/tools/release.sh index e0f580c383b..8eec2d758da 100755 --- a/docs/tools/release.sh +++ b/docs/tools/release.sh @@ -4,9 +4,9 @@ set -ex BASE_DIR=$(dirname $(readlink -f $0)) BUILD_DIR="${BASE_DIR}/../build" PUBLISH_DIR="${BASE_DIR}/../publish" -BASE_DOMAIN="${BASE_DOMAIN:-clickhouse.tech}" +BASE_DOMAIN="${BASE_DOMAIN:-content.clickhouse.tech}" GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse.github.io.git}" -GIT_PROD_URI="git@github.com:ClickHouse/clickhouse.github.io.git" +GIT_PROD_URI="git@github.com:ClickHouse/clickhouse-website-content.git" EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---enable-stable-releases --minify}" HISTORY_SIZE="${HISTORY_SIZE:-5}" From 0accd2908c347038db03f26911a8d2de5ae72ea5 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Thu, 9 Apr 2020 23:11:20 +0300 Subject: [PATCH 253/484] Fix up styler's grudge --- src/Storages/IStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 3bf8054485c..5a792080370 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -324,7 +324,7 @@ RWLockImpl::LockHolder IStorage::tryLockTimed( const String type_str = type == RWLockImpl::Type::Read ? "READ" : "WRITE"; throw Exception( type_str + " locking attempt on \"" + getStorageID().getFullTableName() + - "\" has timed out! (" + toString(acquire_timeout.totalMilliseconds()) + "ms ). " + "\" has timed out! (" + toString(acquire_timeout.totalMilliseconds()) + "ms) " "Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED); } From 2b51b5ee5fdf414d1fc1e6cace53f578351afd4c Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 23:32:35 +0300 Subject: [PATCH 254/484] Update release.sh --- docs/tools/release.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/release.sh b/docs/tools/release.sh index 8eec2d758da..faca0e8ec17 100755 --- a/docs/tools/release.sh +++ b/docs/tools/release.sh @@ -5,7 +5,7 @@ BASE_DIR=$(dirname $(readlink -f $0)) BUILD_DIR="${BASE_DIR}/../build" PUBLISH_DIR="${BASE_DIR}/../publish" BASE_DOMAIN="${BASE_DOMAIN:-content.clickhouse.tech}" -GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse.github.io.git}" +GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse-website-content.git}" GIT_PROD_URI="git@github.com:ClickHouse/clickhouse-website-content.git" EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---enable-stable-releases --minify}" HISTORY_SIZE="${HISTORY_SIZE:-5}" From 9326016e5fd3c50550cfa23a47c063c41a886b70 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 23:50:39 +0300 Subject: [PATCH 255/484] Put single-page content into a separate js file (#10160) --- docs/tools/build.py | 17 +++++++++++++++-- website/templates/docs/content.html | 15 ++++++++++----- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/docs/tools/build.py b/docs/tools/build.py index 65b9f9f8c04..1719fe051d3 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -219,7 +219,20 @@ def build_single_page_version(lang, args, nav, cfg): os.path.join(site_temp, 'single'), single_page_output_path ) - + + single_page_index_html = os.path.join(single_page_output_path, 'index.html') + single_page_content_js = os.path.join(single_page_output_path, 'content.js') + with open(single_page_index_html, 'r') as f: + sp_prefix, sp_js, sp_suffix = f.read().split('') + with open(single_page_index_html, 'w') as f: + f.write(sp_prefix) + f.write(sp_suffix) + with open(single_page_content_js, 'w') as f: + if args.minify: + import jsmin + sp_js = jsmin.jsmin(sp_js) + f.write(sp_js) + logging.info(f'Re-building single page for {lang} pdf/test') with util.temp_dir() as test_dir: extra['single_page'] = False @@ -400,7 +413,7 @@ if __name__ == '__main__': from build import build build(args) - + if args.livereload: new_args = [arg for arg in sys.argv if not arg.startswith('--livereload')] new_args = sys.executable + ' ' + ' '.join(new_args) diff --git a/website/templates/docs/content.html b/website/templates/docs/content.html index 320f1a2b53f..d4ff1fd8554 100644 --- a/website/templates/docs/content.html +++ b/website/templates/docs/content.html @@ -17,11 +17,7 @@ {% endif %} {% if single_page and page.content %} - + {% endif %}
    @@ -32,3 +28,12 @@
    {% endif %}
    +{% if single_page and page.content %} + +(function() { + {% for chunk in page.content|chunks %} + document.write({{ chunk|tojson|safe }}); + {% endfor %} +})(); + +{% endif %} From 345978ae35776781a136504f828336ca2bbe1151 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Thu, 9 Apr 2020 23:51:01 +0300 Subject: [PATCH 256/484] normalize ru markdown (#10159) --- docs/ru/development/architecture.md | 6 +- docs/ru/development/build.md | 2 +- docs/ru/development/build_cross_arm.md | 2 +- docs/ru/development/developer_instruction.md | 4 +- docs/ru/development/style.md | 6 +- docs/ru/development/tests.md | 2 +- docs/ru/getting_started/install.md | 2 +- docs/ru/getting_started/tutorial.md | 2 +- docs/ru/interfaces/formats.md | 8 +- docs/ru/introduction/adopters.md | 2 +- docs/ru/introduction/history.md | 26 ++--- docs/ru/operations/performance_test.md | 2 +- docs/ru/operations/quotas.md | 4 +- docs/ru/operations/system_tables.md | 102 ++++++++++--------- docs/ru/operations/tips.md | 4 +- docs/ru/operations/troubleshooting.md | 2 +- 16 files changed, 89 insertions(+), 87 deletions(-) diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index f5f57179ece..0d1fc2ff947 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -118,7 +118,7 @@ A `Block` это контейнер, представляющий подмнож Существуют обычные функции и агрегатные функции. Агрегатные функции см. В следующем разделе. -Ordinary functions don't change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`'s данных для реализации векторизованного выполнения запросов. +Ordinary functions don’t change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s данных для реализации векторизованного выполнения запросов. Есть некоторые другие функции, такие как [размер блока](../sql_reference/functions/other_functions.md#function-blocksize), [роунумберинблок](../sql_reference/functions/other_functions.md#function-rownumberinblock), и [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate), которые эксплуатируют обработку блоков и нарушают независимость строк. @@ -157,7 +157,7 @@ ClickHouse имеет сильную типизацию, поэтому нет !!! note "Примечание" Для большинства внешних приложений мы рекомендуем использовать интерфейс HTTP, поскольку он прост и удобен в использовании. Протокол TCP более тесно связан с внутренними структурами данных: он использует внутренний формат для передачи блоков данных, а также использует пользовательское обрамление для сжатых данных. Мы не выпустили библиотеку C для этого протокола, потому что она требует связывания большей части кодовой базы ClickHouse, что нецелесообразно. -## Выполнение Распределенных Запросов {#distributed-query-execution} +## Выполнение Распределенных Запросов {#distributed-query-execution} Серверы в кластерной установке в основном независимы. Вы можете создать `Distributed` таблица на одном или всех серверах кластера. То `Distributed` table does not store data itself – it only provides a «view» ко всем локальным таблицам на нескольких узлах кластера. Когда вы выберите из `Distributed` таблица, он переписывает этот запрос, выбирает удаленные узлы в соответствии с настройками балансировки нагрузки и отправляет запрос к ним. То `Distributed` таблица запрашивает удаленные серверы для обработки запроса только до стадии, когда промежуточные результаты с разных серверов могут быть объединены. Затем он получает промежуточные результаты и сливает их. Распределенная таблица пытается распределить как можно больше работы на удаленные серверы и не отправляет много промежуточных данных по сети. @@ -175,7 +175,7 @@ ClickHouse имеет сильную типизацию, поэтому нет Когда вы `INSERT` куча данных в `MergeTree`, эта связка сортируется по порядку первичного ключа и образует новую часть. Существуют фоновые потоки, которые периодически выделяют некоторые детали и объединяют их в одну сортированную деталь, чтобы сохранить количество деталей относительно низким. Вот почему он так называется `MergeTree`. Конечно, слияние приводит к тому, что «write amplification». Все части неизменны: они только создаются и удаляются, но не изменяются. Когда SELECT выполняется, он содержит снимок таблицы (набор деталей). После слияния мы также сохраняем старые детали в течение некоторого времени, чтобы облегчить восстановление после сбоя, поэтому, если мы видим, что какая-то объединенная деталь, вероятно, сломана, мы можем заменить ее исходными частями. -`MergeTree` это не дерево LSM, потому что оно не содержит «memtable» и «log»: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity's sake, and because we are already inserting data in batches in our applications. +`MergeTree` это не дерево LSM, потому что оно не содержит «memtable» и «log»: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. > Таблицы MergeTree могут иметь только один (первичный) индекс: вторичных индексов не существует. Было бы неплохо разрешить несколько физических представлений в одной логической таблице, например, хранить данные в более чем одном физическом порядке или даже разрешить представления с предварительно агрегированными данными наряду с исходными данными. diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md index 3e0c3763be6..f0e0ccfd4aa 100644 --- a/docs/ru/development/build.md +++ b/docs/ru/development/build.md @@ -21,7 +21,7 @@ $ sudo apt-get install git cmake python ninja-build Есть несколько способов сделать это. -### Установка из PPA пакет {#install-from-a-ppa-package} +### Установка из PPA пакет {#install-from-a-ppa-package} ``` bash $ sudo apt-get install software-properties-common diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md index 27e2d73c759..184028212e9 100644 --- a/docs/ru/development/build_cross_arm.md +++ b/docs/ru/development/build_cross_arm.md @@ -3,7 +3,7 @@ machine_translated: true machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# Как построить ClickHouse на Linux для архитектуры AArch64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} +# Как построить ClickHouse на Linux для архитектуры AArch64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} Это для случая, когда у вас есть Linux-машина и вы хотите использовать ее для сборки `clickhouse` двоичный файл, который будет работать на другой машине Linux с архитектурой процессора AARCH64. Это предназначено для непрерывной проверки интеграции, которая выполняется на серверах Linux. diff --git a/docs/ru/development/developer_instruction.md b/docs/ru/development/developer_instruction.md index 4bc2ada8c1e..11ac3a73f6e 100644 --- a/docs/ru/development/developer_instruction.md +++ b/docs/ru/development/developer_instruction.md @@ -71,7 +71,7 @@ ClickHouse не работает и не собирается на 32-битны После этого, вы сможете добавлять в свой репозиторий обновления из репозитория Яндекса с помощью команды `git pull upstream master`. -## Работа с сабмодулями git {#rabota-s-sabmoduliami-git} +## Работа с сабмодулями Git {#rabota-s-sabmoduliami-git} Работа с сабмодулями git может быть достаточно болезненной. Следующие команды позволят содержать их в порядке: @@ -267,7 +267,7 @@ Mac OS X: clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.hits FORMAT TSV" < hits_v1.tsv clickhouse-client --max_insert_block_size 100000 --query "INSERT INTO test.visits FORMAT TSV" < visits_v1.tsv -# Создание pull request {#sozdanie-pull-request} +# Создание Pull Request {#sozdanie-pull-request} Откройте свой форк репозитория в интерфейсе GitHub. Если вы вели разработку в бранче, выберите этот бранч. На странице будет доступна кнопка «Pull request». По сути, это означает «создать заявку на принятие моих изменений в основной репозиторий». diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index 091419394f7..a282ec6ec5c 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -431,9 +431,9 @@ enum class CompressionMethod Примеры: -- проще всего разместить объект на стеке, или сделать его членом другого класса. -- для большого количества маленьких объектов используйте контейнеры. -- для автоматического освобождения маленького количества объектов, выделенных на куче, используйте `shared_ptr/unique_ptr`. +- проще всего разместить объект на стеке, или сделать его членом другого класса. +- для большого количества маленьких объектов используйте контейнеры. +- для автоматического освобождения маленького количества объектов, выделенных на куче, используйте `shared_ptr/unique_ptr`. **2.** Управление ресурсами. diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md index 630ceecf2b2..1dfcdfdfe6f 100644 --- a/docs/ru/development/tests.md +++ b/docs/ru/development/tests.md @@ -215,7 +215,7 @@ $ clickhouse benchmark --concurrency 16 < queries.tsv `FORTIFY_SOURCE` используется по умолчанию. Это почти бесполезно, но все же имеет смысл в редких случаях, и мы не отключаем его. -## Стиль Кода {#code-style} +## Стиль Кода {#code-style} Описаны правила стиля кода [здесь](https://clickhouse.tech/docs/en/development/style/). diff --git a/docs/ru/getting_started/install.md b/docs/ru/getting_started/install.md index 7caffb498e9..cd571156d03 100644 --- a/docs/ru/getting_started/install.md +++ b/docs/ru/getting_started/install.md @@ -57,7 +57,7 @@ sudo yum install clickhouse-server clickhouse-client Также есть возможность установить пакеты вручную, скачав отсюда: https://repo.yandex.ru/clickhouse/rpm/stable/x86\_64. -### Из tgz архивов {#from-tgz-archives} +### Из Tgz архивов {#from-tgz-archives} Команда ClickHouse в Яндексе рекомендует использовать предкомпилированные бинарники из `tgz` архивов для всех дистрибутивов, где невозможна установка `deb` и `rpm` пакетов. diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md index 4a31f4b23a2..69cdeac8387 100644 --- a/docs/ru/getting_started/tutorial.md +++ b/docs/ru/getting_started/tutorial.md @@ -85,7 +85,7 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv ## Импорт Образца Набора Данных {#import-sample-dataset} -Теперь пришло время заполнить наш сервер ClickHouse некоторыми образцами данных. В этом уроке мы будем использовать анонимизированные данные Яндекса.Metrica, первый сервис, который запускает ClickHouse в производственном режиме до того, как он стал открытым исходным кодом (подробнее об этом в [раздел истории](../introduction/history.md)). Есть [несколько способов импорта Яндекса.Набор метрика ](example_datasets/metrica.md), и ради учебника мы пойдем с самым реалистичным из них. +Теперь пришло время заполнить наш сервер ClickHouse некоторыми образцами данных. В этом уроке мы будем использовать анонимизированные данные Яндекса.Metrica, первый сервис, который запускает ClickHouse в производственном режиме до того, как он стал открытым исходным кодом (подробнее об этом в [раздел истории](../introduction/history.md)). Есть [несколько способов импорта Яндекса.Набор метрика](example_datasets/metrica.md), и ради учебника мы пойдем с самым реалистичным из них. ### Загрузка и извлечение данных таблицы {#download-and-extract-table-data} diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 27cab90bdd4..b1707a55193 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -953,8 +953,8 @@ ClickHouse пишет и читает сообщения `Protocol Buffers` в Таблица ниже содержит поддерживаемые типы данных и их соответствие [типам данных](../sql_reference/data_types/index.md) ClickHouse для запросов `INSERT` и `SELECT`. -| Тип данных Parquet (`INSERT`) | Тип данных ClickHouse | Тип данных Parquet (`SELECT`) | -|-------------------------------|---------------------------------------------|-------------------------------| +| Тип данных Parquet (`INSERT`) | Тип данных ClickHouse | Тип данных Parquet (`SELECT`) | +|-------------------------------|-----------------------------------------------------------|-------------------------------| | `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | `UINT8` | | `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | `INT8` | | `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | `UINT16` | @@ -1001,8 +1001,8 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_ Таблица показывает поддержанные типы данных и их соответствие [типам данных](../sql_reference/data_types/index.md) ClickHouse для запросов `INSERT`. -| Тип данных ORC (`INSERT`) | Тип данных ClickHouse | -|---------------------------|---------------------------------------| +| Тип данных ORC (`INSERT`) | Тип данных ClickHouse | +|---------------------------|-----------------------------------------------------| | `UINT8`, `BOOL` | [UInt8](../sql_reference/data_types/int_uint.md) | | `INT8` | [Int8](../sql_reference/data_types/int_uint.md) | | `UINT16` | [UInt16](../sql_reference/data_types/int_uint.md) | diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md index 1b7d56b19d1..20c465f6418 100644 --- a/docs/ru/introduction/adopters.md +++ b/docs/ru/introduction/adopters.md @@ -70,7 +70,7 @@ machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 | [Технология Сяосин.](https://www.xiaoheiban.cn/) | Образование | Общая цель | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | | [Сималайя](https://www.ximalaya.com/) | Общий доступ к аудио | OLAP | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | | [Облако Яндекса](https://cloud.yandex.ru/services/managed-clickhouse) | Публичное Облако | Главный продукт | — | — | [Разговор на русском языке, декабрь 2019 года](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [DataLens Яндекс ](https://cloud.yandex.ru/services/datalens) | Бизнес-разведка | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | +| [DataLens Яндекс](https://cloud.yandex.ru/services/datalens) | Бизнес-разведка | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | | [Яндекс Маркет](https://market.yandex.ru/) | электронная коммерция | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, январь 2019 года](https://youtu.be/_l1qP0DyBcA?t=478) | | [Яндекс Метрика](https://metrica.yandex.com) | Веб-аналитика | Главный продукт | 360 серверов в одном кластере, 1862 сервера в одном отделе | 66.41 ПИБ / 5.68 ПИБ | [Слайды, Февраль 2020 Года](https://presentations.clickhouse.tech/meetup40/introduction/#13) | | [ЦВТ](https://htc-cs.ru/) | Разработка программного обеспечения | Метрики, Ведение Журнала | — | — | [Сообщение в блоге, март 2019 года, на русском языке](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | diff --git a/docs/ru/introduction/history.md b/docs/ru/introduction/history.md index 856263704e5..65254b0f4f0 100644 --- a/docs/ru/introduction/history.md +++ b/docs/ru/introduction/history.md @@ -13,11 +13,11 @@ ClickHouse изначально разрабатывался для обеспе Также ClickHouse используется: -- для хранения данных Вебвизора; -- для обработки промежуточных данных; -- для построения глобальных отчётов Аналитиками; -- для выполнения запросов в целях отладки движка Метрики; -- для анализа логов работы API и пользовательского интерфейса. +- для хранения данных Вебвизора; +- для обработки промежуточных данных; +- для построения глобальных отчётов Аналитиками; +- для выполнения запросов в целях отладки движка Метрики; +- для анализа логов работы API и пользовательского интерфейса. ClickHouse имеет более десятка инсталляций в других отделах Яндекса: в Вертикальных сервисах, Маркете, Директе, БК, Бизнес аналитике, Мобильной разработке, AdFox, Персональных сервисах и т п. @@ -27,14 +27,14 @@ ClickHouse имеет более десятка инсталляций в дру Но агрегированные данные являются очень ограниченным решением, по следующим причинам: -- вы должны заранее знать перечень отчётов, необходимых пользователю; -- то есть, пользователь не может построить произвольный отчёт; -- при агрегации по большому количеству ключей, объём данных не уменьшается и агрегация бесполезна; -- при большом количестве отчётов, получается слишком много вариантов агрегации (комбинаторный взрыв); -- при агрегации по ключам высокой кардинальности (например, URL) объём данных уменьшается не сильно (менее чем в 2 раза); -- из-за этого, объём данных при агрегации может не уменьшиться, а вырасти; -- пользователи будут смотреть не все отчёты, которые мы для них посчитаем - то есть, большая часть вычислений бесполезна; -- возможно нарушение логической целостности данных для разных агрегаций; +- вы должны заранее знать перечень отчётов, необходимых пользователю; +- то есть, пользователь не может построить произвольный отчёт; +- при агрегации по большому количеству ключей, объём данных не уменьшается и агрегация бесполезна; +- при большом количестве отчётов, получается слишком много вариантов агрегации (комбинаторный взрыв); +- при агрегации по ключам высокой кардинальности (например, URL) объём данных уменьшается не сильно (менее чем в 2 раза); +- из-за этого, объём данных при агрегации может не уменьшиться, а вырасти; +- пользователи будут смотреть не все отчёты, которые мы для них посчитаем - то есть, большая часть вычислений бесполезна; +- возможно нарушение логической целостности данных для разных агрегаций; Как видно, если ничего не агрегировать, и работать с неагрегированными данными, то это даже может уменьшить объём вычислений. diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md index 391bcddd412..9b5c6f4fed3 100644 --- a/docs/ru/operations/performance_test.md +++ b/docs/ru/operations/performance_test.md @@ -42,7 +42,7 @@ machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Загрузите тестовые данные в соответствии с [Яндекс.Набор метрика ](../getting_started/example_datasets/metrica.md) инструкция («hits» таблица, содержащая 100 миллионов строк). +1. Загрузите тестовые данные в соответствии с [Яндекс.Набор метрика](../getting_started/example_datasets/metrica.md) инструкция («hits» таблица, содержащая 100 миллионов строк). diff --git a/docs/ru/operations/quotas.md b/docs/ru/operations/quotas.md index f109b889350..399e80d2011 100644 --- a/docs/ru/operations/quotas.md +++ b/docs/ru/operations/quotas.md @@ -7,8 +7,8 @@ В отличие от них, квоты: -- ограничивают не один запрос, а множество запросов, которые могут быть выполнены за интервал времени; -- при распределённой обработке запроса, учитывают ресурсы, потраченные на всех удалённых серверах. +- ограничивают не один запрос, а множество запросов, которые могут быть выполнены за интервал времени; +- при распределённой обработке запроса, учитывают ресурсы, потраченные на всех удалённых серверах. Рассмотрим фрагмент файла users.xml, описывающего квоты. diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index dfc15e6281a..b68aa570f52 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -141,37 +141,37 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' Столбцы: -- `database` ([String](../sql_reference/data_types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. -- `name` ([String](../sql_reference/data_types/string.md)) — [Имя словаря](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). -- `status` ([Enum8](../sql_reference/data_types/enum.md)) — Статус словаря. Возможные значения: - - `NOT_LOADED` — Словарь не загружен, потому что не использовался. - - `LOADED` — Словарь загружен успешно. - - `FAILED` — Словарь не загружен в результате ошибки. - - `LOADING` — Словарь в процессе загрузки. - - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../sql_reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). - - `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается. -- `origin` ([String](../sql_reference/data_types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. -- `type` ([String](../sql_reference/data_types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md). -- `key` — [Тип ключа](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) или Составной ключ ([String](../sql_reference/data_types/string.md)) — строка вида "(тип 1, тип 2, ..., тип n)". -- `attribute.names` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Массив [имен атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `attribute.types` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Соответствующий массив [типов атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `bytes_allocated` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. -- `query_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. -- `hit_rate` ([Float64](../sql_reference/data_types/float.md)) — Для cache-словарей — процент закэшированных значений. -- `element_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. -- `load_factor` ([Float64](../sql_reference/data_types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). -- `source` ([String](../sql_reference/data_types/string.md)) — Текст, описывающий [источник данных](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) для словаря. -- `lifetime_min` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Минимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `lifetime_max` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Максимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `loading_start_time` ([DateTime](../sql_reference/data_types/datetime.md)) — Время начала загрузки словаря. -- `loading_duration` ([Float32](../sql_reference/data_types/float.md)) — Время, затраченное на загрузку словаря. -- `last_exception` ([String](../sql_reference/data_types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. +- `database` ([String](../sql_reference/data_types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. +- `name` ([String](../sql_reference/data_types/string.md)) — [Имя словаря](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md). +- `status` ([Enum8](../sql_reference/data_types/enum.md)) — Статус словаря. Возможные значения: + - `NOT_LOADED` — Словарь не загружен, потому что не использовался. + - `LOADED` — Словарь загружен успешно. + - `FAILED` — Словарь не загружен в результате ошибки. + - `LOADING` — Словарь в процессе загрузки. + - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../sql_reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). + - `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается. +- `origin` ([String](../sql_reference/data_types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. +- `type` ([String](../sql_reference/data_types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md). +- `key` — [Тип ключа](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) или Составной ключ ([String](../sql_reference/data_types/string.md)) — строка вида “(тип 1, тип 2, …, тип n)”. +- `attribute.names` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Массив [имен атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `attribute.types` ([Array](../sql_reference/data_types/array.md)([String](../sql_reference/data_types/string.md))) — Соответствующий массив [типов атрибутов](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md#ext_dict_structure-attributes), предоставляемых справочником. +- `bytes_allocated` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. +- `query_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. +- `hit_rate` ([Float64](../sql_reference/data_types/float.md)) — Для cache-словарей — процент закэшированных значений. +- `element_count` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. +- `load_factor` ([Float64](../sql_reference/data_types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). +- `source` ([String](../sql_reference/data_types/string.md)) — Текст, описывающий [источник данных](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md) для словаря. +- `lifetime_min` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Минимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `lifetime_max` ([UInt64](../sql_reference/data_types/int_uint.md#uint-ranges)) — Максимальное [время обновления](../sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `loading_start_time` ([DateTime](../sql_reference/data_types/datetime.md)) — Время начала загрузки словаря. +- `loading_duration` ([Float32](../sql_reference/data_types/float.md)) — Время, затраченное на загрузку словаря. +- `last_exception` ([String](../sql_reference/data_types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. **Пример** Настройте словарь. -```sql +``` sql CREATE DICTIONARY dictdb.dict ( `key` Int64 DEFAULT -1, @@ -186,11 +186,11 @@ LAYOUT(FLAT()) Убедитесь, что словарь загружен. -```sql +``` sql SELECT * FROM system.dictionaries ``` -```text +``` text ┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ │ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ @@ -921,33 +921,33 @@ WHERE Если этот запрос ничего не возвращает - значит всё хорошо. -## system.settings {#system-tables-system-settings} +## system.settings {#system-tables-system-settings} Содержит информацию о сессионных настройках для текущего пользователя. Столбцы: -- `name` ([String](../sql_reference/data_types/string.md)) — имя настройки. -- `value` ([String](../sql_reference/data_types/string.md)) — значение настройки. -- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию. -- `description` ([String](../sql_reference/data_types/string.md)) — краткое описание настройки. -- `min` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). -- `max` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). -- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку: - - `0` — Текущий пользователь может изменять настройку. - - `1` — Текущий пользователь не может изменять настройку. +- `name` ([String](../sql_reference/data_types/string.md)) — имя настройки. +- `value` ([String](../sql_reference/data_types/string.md)) — значение настройки. +- `changed` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию. +- `description` ([String](../sql_reference/data_types/string.md)) — краткое описание настройки. +- `min` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). +- `max` ([Nullable](../sql_reference/data_types/nullable.md)([String](../sql_reference/data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../sql_reference/syntax.md#null-literal). +- `readonly` ([UInt8](../sql_reference/data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку: + - `0` — Текущий пользователь может изменять настройку. + - `1` — Текущий пользователь не может изменять настройку. **Пример** Пример показывает как получить информацию о настройках, имена которых содержат `min_i`. -```sql -SELECT * -FROM system.settings +``` sql +SELECT * +FROM system.settings WHERE name LIKE '%min_i%' ``` -```text +``` text ┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐ │ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ │ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │ @@ -957,21 +957,23 @@ WHERE name LIKE '%min_i%' Использование `WHERE changed` может быть полезно, например, если необходимо проверить: -- Что настройки корректно загрузились из конфигурационного файла и используются. -- Настройки, изменённые в текущей сессии. +- Что настройки корректно загрузились из конфигурационного файла и используются. +- Настройки, изменённые в текущей сессии. -```sql + + +``` sql SELECT * FROM system.settings WHERE changed AND name='load_balancing' ``` - **Cм. также** -- [Настройки](settings/index.md#settings) -- [Разрешения для запросов](settings/permissions_for_queries.md#settings_readonly) -- [Ограничения для значений настроек](settings/constraints_on_settings.md) +- [Настройки](settings/index.md#settings) +- [Разрешения для запросов](settings/permissions_for_queries.md#settings_readonly) +- [Ограничения для значений настроек](settings/constraints_on_settings.md) + +## system.table\_engines {#system.table_engines} -## system.table_engines ``` text ┌─name───────────────────┬─value───────┬─changed─┐ │ max_threads │ 8 │ 1 │ diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md index 4aa4605defb..271a6a35e25 100644 --- a/docs/ru/operations/tips.md +++ b/docs/ru/operations/tips.md @@ -1,6 +1,6 @@ # Советы по эксплуатации {#sovety-po-ekspluatatsii} -## CPU scaling governor {#cpu-scaling-governor} +## CPU Scaling Governor {#cpu-scaling-governor} Всегда используйте `performance` scaling governor. `ondemand` scaling governor работает намного хуже при постоянно высоком спросе. @@ -25,7 +25,7 @@ $ echo 'performance' | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_gov $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory ``` -## Huge pages {#huge-pages} +## Huge Pages {#huge-pages} Механизм прозрачных huge pages нужно отключить. Он мешает работе аллокаторов памяти, что приводит к значительной деградации производительности. diff --git a/docs/ru/operations/troubleshooting.md b/docs/ru/operations/troubleshooting.md index 7c4be02456c..a045de41ccc 100644 --- a/docs/ru/operations/troubleshooting.md +++ b/docs/ru/operations/troubleshooting.md @@ -7,7 +7,7 @@ ## Установка дистрибутива {#troubleshooting-installation-errors} -### Не получается скачать deb-пакеты из репозитория ClickHouse с помощью apt-get {#ne-poluchaetsia-skachat-deb-pakety-iz-repozitoriia-clickhouse-s-pomoshchiu-apt-get} +### Не получается скачать deb-пакеты из репозитория ClickHouse с помощью Apt-get {#ne-poluchaetsia-skachat-deb-pakety-iz-repozitoriia-clickhouse-s-pomoshchiu-apt-get} - Проверьте настройки брандмауэра. - Если по какой-либо причине вы не можете получить доступ к репозиторию, скачайте пакеты как описано в разделе [Начало работы](../getting_started/index.md) и установите их вручную командой `sudo dpkg -i `. Также, необходим пакет `tzdata`. From 976ef5af3c6a9b627d0a2ef8882a1f90557228a8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 10 Apr 2020 01:26:32 +0300 Subject: [PATCH 257/484] Updated results from Jack P. Gao --- website/benchmark_hardware.html | 88 ++++++++++++++++----------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/website/benchmark_hardware.html b/website/benchmark_hardware.html index 64eb576cc66..36f034457f8 100644 --- a/website/benchmark_hardware.html +++ b/website/benchmark_hardware.html @@ -2429,53 +2429,53 @@ var results = }, { - "system": "AMD EPYC 7702, 256 cores, 512 GiB, NVMe SSD, version 19.16", + "system": "AMD EPYC 7702, 256 cores, 512 GiB, NVMe SSD", "time": "2020-04-09 00:00:00", "result": [ -[0.103, 0.038, 0.043], -[0.072, 0.042, 0.044], -[0.118, 0.051, 0.057], -[0.222, 0.054, 0.051], -[0.339, 0.193, 0.215], -[0.376, 0.189, 0.175], -[0.114, 0.040, 0.052], -[0.085, 0.055, 0.049], -[0.354, 0.180, 0.168], -[0.372, 0.172, 0.161], -[0.276, 0.105, 0.100], -[0.259, 0.110, 0.115], -[0.399, 0.222, 0.207], -[0.586, 0.261, 0.262], -[0.394, 0.251, 0.228], -[0.350, 0.194, 0.189], -[0.705, 0.468, 0.462], -[0.653, 0.368, 0.381], -[1.285, 0.826, 0.922], -[0.223, 0.032, 0.036], -[1.690, 0.186, 0.178], -[1.916, 0.231, 0.189], -[3.551, 0.602, 0.595], -[3.198, 0.607, 0.478], -[0.530, 0.143, 0.138], -[0.311, 0.079, 0.090], -[0.554, 0.137, 0.134], -[1.775, 0.305, 0.293], -[1.480, 0.257, 0.276], -[0.864, 0.838, 0.795], -[0.529, 0.183, 0.177], -[1.051, 0.226, 0.230], -[1.719, 1.074, 1.075], -[2.134, 0.856, 0.873], -[2.123, 0.829, 0.846], -[0.380, 0.285, 0.280], -[0.193, 0.187, 0.183], -[0.080, 0.080, 0.080], -[0.077, 0.066, 0.068], -[0.432, 0.405, 0.444], -[0.050, 0.038, 0.037], -[0.032, 0.028, 0.025], -[0.010, 0.010, 0.008] +[0.006, 0.002, 0.002], +[0.252, 0.072, 0.057], +[0.113, 0.066, 0.057], +[0.197, 0.055, 0.065], +[0.311, 0.199, 0.217], +[0.360, 0.200, 0.183], +[0.119, 0.050, 0.045], +[0.066, 0.061, 0.057], +[0.320, 0.150, 0.144], +[0.346, 0.170, 0.162], +[0.226, 0.117, 0.115], +[0.265, 0.112, 0.118], +[0.402, 0.249, 0.250], +[0.561, 0.327, 0.332], +[0.397, 0.267, 0.257], +[0.323, 0.221, 0.233], +[0.710, 0.527, 0.517], +[0.667, 0.437, 0.443], +[1.269, 0.936, 0.957], +[0.189, 0.043, 0.043], +[1.673, 0.206, 0.169], +[1.937, 0.214, 0.184], +[3.527, 0.755, 0.737], +[3.197, 0.551, 0.523], +[0.519, 0.076, 0.086], +[0.268, 0.060, 0.080], +[0.522, 0.075, 0.079], +[1.693, 0.345, 0.351], +[1.466, 0.330, 0.318], +[1.078, 0.974, 1.019], +[0.501, 0.196, 0.200], +[1.032, 0.266, 0.271], +[1.621, 1.156, 1.169], +[2.089, 0.998, 0.972], +[2.106, 0.974, 0.959], +[0.366, 0.305, 0.305], +[0.190, 0.187, 0.183], +[0.071, 0.066, 0.075], +[0.072, 0.068, 0.062], +[0.415, 0.353, 0.457], +[0.034, 0.032, 0.028], +[0.031, 0.027, 0.032], +[0.024, 0.007, 0.007] ] }, ]; From c39e3a51adf41e429560e5e06118d7faffea2431 Mon Sep 17 00:00:00 2001 From: "philip.han" Date: Fri, 10 Apr 2020 12:30:54 +0900 Subject: [PATCH 258/484] Fix Set::insertFromBlockImplCase() --- src/Interpreters/Set.cpp | 7 +++++-- .../queries/0_stateless/01231_operator_null_in.sql | 14 +++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index 0504f9d9e6d..54992eeff2c 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -87,7 +87,10 @@ void NO_INLINE Set::insertFromBlockImplCase( { if ((*null_map)[i]) { - has_null = true; + if (transform_null_in) + { + has_null = true; + } if constexpr (build_filter) { @@ -180,7 +183,7 @@ bool Set::insertFromBlock(const Block & block) /// We will insert to the Set only keys, where all components are not NULL. ConstNullMapPtr null_map{}; - ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); + ColumnPtr null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map, transform_null_in); /// Filter to extract distinct values from the block. ColumnUInt8::MutablePtr filter; diff --git a/tests/queries/0_stateless/01231_operator_null_in.sql b/tests/queries/0_stateless/01231_operator_null_in.sql index 12361373001..3c4333c8ea6 100644 --- a/tests/queries/0_stateless/01231_operator_null_in.sql +++ b/tests/queries/0_stateless/01231_operator_null_in.sql @@ -90,4 +90,16 @@ SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FRO SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); -DROP TABLE IF EXISTS null_in_subquery; +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, '3')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +DROP TABLE IF EXISTS null_in_tuple; From c6bf39d7a9a8606fa00caff764cb16133a3620f0 Mon Sep 17 00:00:00 2001 From: "philip.han" Date: Fri, 10 Apr 2020 14:02:55 +0900 Subject: [PATCH 259/484] Fix 01231_operator_null_in.reference --- .../0_stateless/01231_operator_null_in.reference | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/queries/0_stateless/01231_operator_null_in.reference b/tests/queries/0_stateless/01231_operator_null_in.reference index 7432b657191..5cd5e5ee5fb 100644 --- a/tests/queries/0_stateless/01231_operator_null_in.reference +++ b/tests/queries/0_stateless/01231_operator_null_in.reference @@ -52,3 +52,13 @@ 1 1 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 From 7fa5afecb44484e04f710ee7876ee590fbf2d1cd Mon Sep 17 00:00:00 2001 From: "philip.han" Date: Fri, 10 Apr 2020 14:42:36 +0900 Subject: [PATCH 260/484] Support transform_null_in option for StorageSet --- src/Interpreters/Set.cpp | 7 +--- src/Storages/StorageSet.cpp | 6 +-- .../01231_operator_null_in.reference | 16 ++++++++ .../0_stateless/01231_operator_null_in.sql | 38 +++++++++++++++++++ 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index 54992eeff2c..a4fea5dd705 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -87,10 +87,7 @@ void NO_INLINE Set::insertFromBlockImplCase( { if ((*null_map)[i]) { - if (transform_null_in) - { - has_null = true; - } + has_null = true; if constexpr (build_filter) { @@ -397,7 +394,7 @@ void NO_INLINE Set::executeImplCase( { if (has_null_map && (*null_map)[i]) { - if (has_null) + if (transform_null_in && has_null) vec_res[i] = !negative; else vec_res[i] = negative; diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 7d2a7ee128f..79f5198b304 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -112,7 +112,7 @@ StorageSet::StorageSet( const ConstraintsDescription & constraints_, const Context & context_) : StorageSetOrJoinBase{relative_path_, table_id_, columns_, constraints_, context_}, - set(std::make_shared(SizeLimits(), false, context_.getSettingsRef().transform_null_in)) + set(std::make_shared(SizeLimits(), false, true)) { Block header = getSampleBlock(); header = header.sortColumns(); @@ -127,7 +127,7 @@ void StorageSet::finishInsert() { set->finishInsert(); } size_t StorageSet::getSize() const { return set->getTotalRowCount(); } -void StorageSet::truncate(const ASTPtr &, const Context & context, TableStructureWriteLockHolder &) +void StorageSet::truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) { Poco::File(path).remove(true); Poco::File(path).createDirectories(); @@ -137,7 +137,7 @@ void StorageSet::truncate(const ASTPtr &, const Context & context, TableStructur header = header.sortColumns(); increment = 0; - set = std::make_shared(SizeLimits(), false, context.getSettingsRef().transform_null_in); + set = std::make_shared(SizeLimits(), false, true); set->setHeader(header); } diff --git a/tests/queries/0_stateless/01231_operator_null_in.reference b/tests/queries/0_stateless/01231_operator_null_in.reference index 5cd5e5ee5fb..b76f42e9af4 100644 --- a/tests/queries/0_stateless/01231_operator_null_in.reference +++ b/tests/queries/0_stateless/01231_operator_null_in.reference @@ -62,3 +62,19 @@ 1 1 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01231_operator_null_in.sql b/tests/queries/0_stateless/01231_operator_null_in.sql index 3c4333c8ea6..ddebaf23900 100644 --- a/tests/queries/0_stateless/01231_operator_null_in.sql +++ b/tests/queries/0_stateless/01231_operator_null_in.sql @@ -40,8 +40,46 @@ SELECT count() == 3 FROM null_in WHERE i global not in (1, 3); SELECT count() == 3 FROM null_in WHERE i global not in range(4); SELECT count() == 3 FROM null_in WHERE s global not in ('1', '3'); +DROP TABLE IF EXISTS test_set; +CREATE TABLE test_set (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 1 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 3 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +-- Create with transform_null_in +CREATE TABLE test_set2 (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set2 VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 1 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 3 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +DROP TABLE IF EXISTS test_set; DROP TABLE IF EXISTS null_in; + DROP TABLE IF EXISTS null_in_subquery; CREATE TABLE null_in_subquery (dt DateTime, idx int, i Nullable(UInt64)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx; INSERT INTO null_in_subquery SELECT number % 3, number, number FROM system.numbers LIMIT 99999; From b8bfbad85777287f523cbda6d8fea34f2681763a Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 09:35:07 +0300 Subject: [PATCH 261/484] Update sitemap_static.xml --- website/sitemap_static.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/sitemap_static.xml b/website/sitemap_static.xml index 751ad4e8ce2..33d258674f6 100644 --- a/website/sitemap_static.xml +++ b/website/sitemap_static.xml @@ -12,4 +12,8 @@ https://clickhouse.tech/benchmark_hardware.html weekly + + https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html + daily + From 888baad56574822348f744d2ff85a845fd3c70b5 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 10 Apr 2020 10:35:13 +0300 Subject: [PATCH 262/484] tests/queries/0_stateless/01056_create_table_as: drop dictionary at start --- tests/queries/0_stateless/01056_create_table_as.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01056_create_table_as.sql b/tests/queries/0_stateless/01056_create_table_as.sql index 868e1f082dd..f95df9b7906 100644 --- a/tests/queries/0_stateless/01056_create_table_as.sql +++ b/tests/queries/0_stateless/01056_create_table_as.sql @@ -15,6 +15,7 @@ CREATE TABLE t3 AS v; -- { serverError 80; } DROP TABLE v; -- dictionary +DROP DICTIONARY IF EXISTS dict; DROP DATABASE if exists test_01056_dict_data; CREATE DATABASE test_01056_dict_data; CREATE TABLE test_01056_dict_data.dict_data (key Int, value UInt16) Engine=Memory(); From 25f9a1a2490845153f39a34f2b41b16911669b5a Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Fri, 10 Apr 2020 07:41:44 +0000 Subject: [PATCH 263/484] Bump mkdocs-macros-plugin from 0.4.4 to 0.4.6 in /docs/tools Bumps [mkdocs-macros-plugin](https://github.com/fralau/mkdocs_macros_plugin) from 0.4.4 to 0.4.6. - [Release notes](https://github.com/fralau/mkdocs_macros_plugin/releases) - [Commits](https://github.com/fralau/mkdocs_macros_plugin/commits) Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 587bcabb8fb..0e3e3c24b5f 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -19,7 +19,7 @@ Markdown==3.2.1 MarkupSafe==1.1.1 mkdocs==1.1 mkdocs-htmlproofer-plugin==0.0.3 -mkdocs-macros-plugin==0.4.4 +mkdocs-macros-plugin==0.4.6 nltk==3.4.5 nose==1.3.7 protobuf==3.11.3 From 569b85eda4b73110fe26f39e26fed94645ba4cb2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 10 Apr 2020 10:46:23 +0300 Subject: [PATCH 264/484] Add ability to use unbundled msgpack Actually it works before but by accidentally and with warnings. --- cmake/find/msgpack.cmake | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake index a1f18bb1eb0..0b56bbc1a0d 100644 --- a/cmake/find/msgpack.cmake +++ b/cmake/find/msgpack.cmake @@ -1,2 +1,17 @@ -set(MSGPACK_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include) +option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED}) + +if (USE_INTERNAL_MSGPACK_LIBRARY) + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp") + message(WARNING "submodule contrib/msgpack-c is missing. to fix try run: \n git submodule update --init --recursive") + set(USE_INTERNAL_MSGPACK_LIBRARY 0) + set(MISSING_INTERNAL_MSGPACK_LIBRARY 1) + endif() +endif() + +if (USE_INTERNAL_MSGPACK_LIBRARY) + set(MSGPACK_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include) +else() + find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS}) +endif() + message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}") From 66d443df14da259b49c95481f4d21a48376f072b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 10 Apr 2020 11:30:23 +0300 Subject: [PATCH 265/484] Add libmsgpack-dev into the image (for unbundled build) --- docker/packager/deb/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index bedde0a2013..6aa550aaf82 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -48,6 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \ libltdl-dev \ libre2-dev \ libjemalloc-dev \ + libmsgpack-dev \ unixodbc-dev \ odbcinst \ tzdata \ From a4f5280d80e6459dcaba5b34b542f41512207df6 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 12:32:37 +0300 Subject: [PATCH 266/484] Remove excessive } --- website/templates/footer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/templates/footer.html b/website/templates/footer.html index 42f1e4263be..765ea63d528 100644 --- a/website/templates/footer.html +++ b/website/templates/footer.html @@ -8,7 +8,7 @@ {{ _('ClickHouse source code is published under the Apache 2.0 License.') }} {{ _('Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.') }}
    - © 2016–2020 {{ _('Yandex LLC') }}} + © 2016–2020 {{ _('Yandex LLC') }}
    From 564bec176ead92e937efa689b4c3b139264da4e5 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 13:40:59 +0300 Subject: [PATCH 267/484] Update codebrowser links (#10166) * Put single-page content into a separate js file * move break comments * Update codebrowser links --- docs/en/development/browse_code.md | 2 +- docs/en/whats_new/changelog/index.md | 2 +- docs/es/development/browse_code.md | 2 +- docs/es/whats_new/changelog/index.md | 2 +- docs/fa/development/browse_code.md | 2 +- docs/fa/whats_new/changelog/index.md | 2 +- docs/fr/development/browse_code.md | 2 +- docs/fr/whats_new/changelog/index.md | 2 +- docs/ja/development/browse_code.md | 2 +- docs/ja/whats_new/changelog/index.md | 2 +- docs/ru/development/browse_code.md | 2 +- docs/ru/whats_new/changelog/index.md | 2 +- docs/zh/changelog/index.md | 2 +- docs/zh/development/browse_code.md | 2 +- docs/zh/whats_new/changelog/index.md | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/en/development/browse_code.md b/docs/en/development/browse_code.md index 69b15df3629..8e98e3f5f0f 100644 --- a/docs/en/development/browse_code.md +++ b/docs/en/development/browse_code.md @@ -5,7 +5,7 @@ toc_title: Browse ClickHouse Source Code # Browse ClickHouse Source Code {#browse-clickhouse-source-code} -You can use **Woboq** online code browser available [here](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. +You can use **Woboq** online code browser available [here](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. diff --git a/docs/en/whats_new/changelog/index.md b/docs/en/whats_new/changelog/index.md index bcfe62cbd0b..969e71fbf42 100644 --- a/docs/en/whats_new/changelog/index.md +++ b/docs/en/whats_new/changelog/index.md @@ -240,7 +240,7 @@ toc_title: '2020' * Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) * Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) * Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/es/development/browse_code.md b/docs/es/development/browse_code.md index 6bbc9d57729..393577e8dca 100644 --- a/docs/es/development/browse_code.md +++ b/docs/es/development/browse_code.md @@ -7,7 +7,7 @@ toc_title: "Examinar el c\xF3digo fuente de ClickHouse" # Examinar el código fuente de ClickHouse {#browse-clickhouse-source-code} -Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. +Usted puede utilizar **Woboq** navegador de código en línea disponible [aqui](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). Proporciona navegación de código y resaltado semántico, búsqueda e indexación. La instantánea de código se actualiza diariamente. Además, puede navegar por las fuentes en [GitHub](https://github.com/ClickHouse/ClickHouse) como de costumbre. diff --git a/docs/es/whats_new/changelog/index.md b/docs/es/whats_new/changelog/index.md index 03c8ea00cbe..053f924099a 100644 --- a/docs/es/whats_new/changelog/index.md +++ b/docs/es/whats_new/changelog/index.md @@ -249,7 +249,7 @@ toc_title: '2020' - Comprobación actualizada de consultas colgadas en el script de prueba de clickhouse [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alejandro Kazakov](https://github.com/Akazz)) - Se eliminaron algunos archivos inútiles del repositorio. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Tipo cambiado de perftests matemáticos de `once` a `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- Agregue una imagen acoplable que permite construir un informe HTML del navegador de código interactivo para nuestra base de código. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) Ver [Navegador de código Woboq](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) - Suprima algunas fallas de prueba bajo MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alejandro Kuzmenkov](https://github.com/akuzm)) - Aceleración “exception while insert” prueba. Esta prueba a menudo se agota en la compilación de depuración con cobertura. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) - Actualizar `libcxx` y `libcxxabi` dominar. En preparación para [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/fa/development/browse_code.md b/docs/fa/development/browse_code.md index 1609e4b1d77..0338511c44c 100644 --- a/docs/fa/development/browse_code.md +++ b/docs/fa/development/browse_code.md @@ -8,7 +8,7 @@ toc_title: "\u0645\u0631\u0648\u0631 \u06A9\u062F \u0645\u0646\u0628\u0639 \u06A # فهرست clickhouse کد منبع {#browse-clickhouse-source-code} -شما می توانید استفاده کنید **ووبوک** آنلاین کد مرورگر موجود [اینجا](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). این فراهم می کند ناوبری کد و برجسته معنایی, جستجو و نمایه سازی. عکس فوری کد روزانه به روز می شود. +شما می توانید استفاده کنید **ووبوک** آنلاین کد مرورگر موجود [اینجا](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html). این فراهم می کند ناوبری کد و برجسته معنایی, جستجو و نمایه سازی. عکس فوری کد روزانه به روز می شود. همچنین شما می توانید فهرست منابع در [گیتهاب](https://github.com/ClickHouse/ClickHouse) به عنوان معمول است. diff --git a/docs/fa/whats_new/changelog/index.md b/docs/fa/whats_new/changelog/index.md index c7eb257e5a9..8dcc20e9409 100644 --- a/docs/fa/whats_new/changelog/index.md +++ b/docs/fa/whats_new/changelog/index.md @@ -249,7 +249,7 @@ toc_title: '2020' - به روز شده در چک کردن برای نمایش داده شد را قطع کرد در اسکریپت کلیک تست [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([الکساندر کازاکوف](https://github.com/Akazz)) - حذف برخی از فایل های بی فایده از مخزن. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([الکسی میلویدو](https://github.com/alexey-milovidov)) - نوع تغییر کامل ریاضی از `once` به `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([نیکولای کوچتو](https://github.com/KochetovNicolai)) -- اضافه کردن تصویر کارگر بارانداز که اجازه می دهد تا برای ساخت کد تعاملی مرورگر گزارش متنی برای کدهای ما. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([الساپین](https://github.com/alesapin)) ببینید [مرورگر کد ووبوک](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- اضافه کردن تصویر کارگر بارانداز که اجازه می دهد تا برای ساخت کد تعاملی مرورگر گزارش متنی برای کدهای ما. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([الساپین](https://github.com/alesapin)) ببینید [مرورگر کد ووبوک](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) - سرکوب برخی از شکست تست تحت مسان. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([الکساندر کوزمنکوف](https://github.com/akuzm)) - افزایش سرعت “exception while insert” امتحان این تست اغلب زمان در اشکال زدایی با پوشش ساخت. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([الکسی میلویدو](https://github.com/alexey-milovidov)) - به روز شده `libcxx` و `libcxxabi` به سلامتی استاد در تهیه به [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([الکسی میلویدو](https://github.com/alexey-milovidov)) diff --git a/docs/fr/development/browse_code.md b/docs/fr/development/browse_code.md index 62caa530b5d..0d708da5beb 100644 --- a/docs/fr/development/browse_code.md +++ b/docs/fr/development/browse_code.md @@ -7,7 +7,7 @@ toc_title: Parcourir Le Code Source De ClickHouse # Parcourir Le Code Source De ClickHouse {#browse-clickhouse-source-code} -Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. +Vous pouvez utiliser **Woboq** navigateur de code en ligne disponible [ici](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html). Il fournit la navigation de code et la mise en évidence sémantique, la recherche et l'indexation. L'instantané de code est mis à jour quotidiennement. Aussi, vous pouvez parcourir les sources sur [GitHub](https://github.com/ClickHouse/ClickHouse) comme à l'habitude. diff --git a/docs/fr/whats_new/changelog/index.md b/docs/fr/whats_new/changelog/index.md index d45e36b1d8f..fe49ce8244c 100644 --- a/docs/fr/whats_new/changelog/index.md +++ b/docs/fr/whats_new/changelog/index.md @@ -249,7 +249,7 @@ toc_title: '2020' - Mise à jour de la vérification des requêtes suspendues dans le script clickhouse-test [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) - Suppression de certains fichiers inutiles du référentiel. [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexeï-milovidov](https://github.com/alexey-milovidov)) - Changement de type de math perftests de `once` de `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- Ajouter une image docker qui permet de créer un rapport HTML interactif du navigateur de code pour notre base de code. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alésapine](https://github.com/alesapin)) Voir [Navigateur De Code Woboq](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/src/index.html) +- Ajouter une image docker qui permet de créer un rapport HTML interactif du navigateur de code pour notre base de code. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alésapine](https://github.com/alesapin)) Voir [Navigateur De Code Woboq](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/src/index.html) - Supprimer certains échecs de test sous MSan. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - SpeedUp “exception while insert” test. Ce test expire souvent dans la construction debug-with-coverage. [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexeï-milovidov](https://github.com/alexey-milovidov)) - Mettre `libcxx` et `libcxxabi` maîtriser. En préparation à [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexeï-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/ja/development/browse_code.md b/docs/ja/development/browse_code.md index f8357fcca27..d66b14e400f 100644 --- a/docs/ja/development/browse_code.md +++ b/docs/ja/development/browse_code.md @@ -7,7 +7,7 @@ toc_title: "ClickHouse\u306E\u30BD\u30FC\u30B9\u30B3\u30FC\u30C9\u3092\u53C2\u71 # ClickHouseのソースコードを参照 {#browse-clickhouse-source-code} -を使用することができ **Woboq** オンラインのコードブラウザをご利用 [ここに](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html). このコードナビゲーションや意味のハイライト表示、検索インデックス. コードのスナップショットは随時更新中です。 +を使用することができ **Woboq** オンラインのコードブラウザをご利用 [ここに](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html). このコードナビゲーションや意味のハイライト表示、検索インデックス. コードのスナップショットは随時更新中です。 また、ソースを閲覧することもできます [GitHub](https://github.com/ClickHouse/ClickHouse) いつものように diff --git a/docs/ja/whats_new/changelog/index.md b/docs/ja/whats_new/changelog/index.md index dd382ebf5ce..0d90862ee11 100644 --- a/docs/ja/whats_new/changelog/index.md +++ b/docs/ja/whats_new/changelog/index.md @@ -249,7 +249,7 @@ toc_title: '2020' - Clickhouseテストスクリプトでハングクエリのチェックを更新 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) - リポジトリか [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) - から数学perftestsの変更タイプ `once` に `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -- 追加docker画像を構築ィコードのブラウザのhtmlレポート当社のコードベース. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin))見る [Woboqコードブラウザ](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +- 追加docker画像を構築ィコードのブラウザのhtmlレポート当社のコードベース. [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin))見る [Woboqコードブラウザ](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) - MSanの下でいくつかのテストの失敗を抑制. [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - スピードアップ “exception while insert” テスト。 このテス [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) - 更新 `libcxx` と `libcxxabi` マスターに。 準備のために [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/ru/development/browse_code.md b/docs/ru/development/browse_code.md index f87e3be7f4b..814b213a6a7 100644 --- a/docs/ru/development/browse_code.md +++ b/docs/ru/development/browse_code.md @@ -1,6 +1,6 @@ # Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse} -Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. +Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse). diff --git a/docs/ru/whats_new/changelog/index.md b/docs/ru/whats_new/changelog/index.md index bcfe62cbd0b..969e71fbf42 100644 --- a/docs/ru/whats_new/changelog/index.md +++ b/docs/ru/whats_new/changelog/index.md @@ -240,7 +240,7 @@ toc_title: '2020' * Updated checking for hung queries in clickhouse-test script [#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([Alexander Kazakov](https://github.com/Akazz)) * Removed some useless files from repository. [#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Changed type of math perftests from `once` to `loop`. [#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/src/index.html) +* Add docker image which allows to build interactive code browser HTML report for our codebase. [#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([alesapin](https://github.com/alesapin)) See [Woboq Code Browser](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/src/index.html) * Suppress some test failures under MSan. [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) * Speedup "exception while insert" test. This test often time out in debug-with-coverage build. [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([alexey-milovidov](https://github.com/alexey-milovidov)) * Updated `libcxx` and `libcxxabi` to master. In preparation to [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([alexey-milovidov](https://github.com/alexey-milovidov)) diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md index 90bb7abe0b0..33bb7bfd5f1 100644 --- a/docs/zh/changelog/index.md +++ b/docs/zh/changelog/index.md @@ -246,7 +246,7 @@ machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 - 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) - 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) -- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html) - 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) diff --git a/docs/zh/development/browse_code.md b/docs/zh/development/browse_code.md index 10d3ffecd15..d098675b6a0 100644 --- a/docs/zh/development/browse_code.md +++ b/docs/zh/development/browse_code.md @@ -7,7 +7,7 @@ toc_title: "\u6D4F\u89C8ClickHouse\u6E90\u4EE3\u7801" # 浏览ClickHouse源代码 {#browse-clickhouse-source-code} -您可以使用 **Woboq** 在线代码浏览器可用 [这里](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/src/index.html). 它提供了代码导航和语义突出显示,搜索和索引。 代码快照每天更新。 +您可以使用 **Woboq** 在线代码浏览器可用 [这里](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). 它提供了代码导航和语义突出显示,搜索和索引。 代码快照每天更新。 此外,您还可以浏览源 [GitHub](https://github.com/ClickHouse/ClickHouse) 像往常一样 diff --git a/docs/zh/whats_new/changelog/index.md b/docs/zh/whats_new/changelog/index.md index 90bb7abe0b0..33bb7bfd5f1 100644 --- a/docs/zh/whats_new/changelog/index.md +++ b/docs/zh/whats_new/changelog/index.md @@ -246,7 +246,7 @@ machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1 - 更新了clickhouse-test脚本中挂起查询的检查 [\#8858](https://github.com/ClickHouse/ClickHouse/pull/8858) ([亚历山大\*卡扎科夫](https://github.com/Akazz)) - 从存储库中删除了一些无用的文件。 [\#8843](https://github.com/ClickHouse/ClickHouse/pull/8843) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - 更改类型的数学perftests从 `once` 到 `loop`. [\#8783](https://github.com/ClickHouse/ClickHouse/pull/8783) ([尼古拉\*科切托夫](https://github.com/KochetovNicolai)) -- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse-test-reports.s3.yandex.net/codebrowser/html_report///ClickHouse/dbms/index.html) +- 添加码头镜像,它允许为我们的代码库构建交互式代码浏览器HTML报告。 [\#8781](https://github.com/ClickHouse/ClickHouse/pull/8781) ([阿利沙平](https://github.com/alesapin))见 [Woboq代码浏览器](https://clickhouse.tech/codebrowser/html_report///ClickHouse/dbms/index.html) - 抑制MSan下的一些测试失败。 [\#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - 加速 “exception while insert” 测试 此测试通常在具有复盖率的调试版本中超时。 [\#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - 更新 `libcxx` 和 `libcxxabi` 为了主人 在准备 [\#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [\#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) From 91a33e1eec27b31a262ac409ecd3f3dd976e1516 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 10 Apr 2020 12:45:25 +0200 Subject: [PATCH 268/484] Fix code wrapping for non-code part (#10129) * Fix code wrapping for non-code part * Fix links, fix formatting --- docs/en/sql_reference/statements/create.md | 24 ++++++++-------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/docs/en/sql_reference/statements/create.md b/docs/en/sql_reference/statements/create.md index fbfe3eb2cfb..36dd3aced8d 100644 --- a/docs/en/sql_reference/statements/create.md +++ b/docs/en/sql_reference/statements/create.md @@ -15,24 +15,18 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. ### Clauses {#clauses} -- `IF NOT EXISTS` +- `IF NOT EXISTS` + If the `db_name` database already exists, then ClickHouse doesn't create a new database and: + - Doesn't throw an exception if clause is specified. + - Throws an exception if clause isn't specified. - If the `db_name` database already exists, then ClickHouse doesn't create a new database and: - - - Doesn't throw an exception if clause is specified. - - Throws an exception if clause isn't specified. - -- `ON CLUSTER` - - ClickHouse creates the `db_name` database on all the servers of a specified cluster. +- `ON CLUSTER` + ClickHouse creates the `db_name` database on all the servers of a specified cluster. - `ENGINE` - - - [MySQL](../engines/database_engines/mysql.md) - - Allows you to retrieve data from the remote MySQL server. - - By default, ClickHouse uses its own [database engine](../engines/database_engines/index.md). + - [MySQL](../../engines/database_engines/mysql.md) + Allows you to retrieve data from the remote MySQL server. + By default, ClickHouse uses its own [database engine](../../engines/database_engines/index.md). ## CREATE TABLE {#create-table-query} From 9b0640a3b53aff4de72c0bd85c45a8440d68193c Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 10 Apr 2020 14:29:56 +0300 Subject: [PATCH 269/484] Fix odbc round trip test --- .../0_stateless/01086_odbc_roundtrip.sh | 24 +++++++++++++++++++ .../0_stateless/01086_odbc_roundtrip.sql | 14 ----------- 2 files changed, 24 insertions(+), 14 deletions(-) create mode 100755 tests/queries/0_stateless/01086_odbc_roundtrip.sh delete mode 100644 tests/queries/0_stateless/01086_odbc_roundtrip.sql diff --git a/tests/queries/0_stateless/01086_odbc_roundtrip.sh b/tests/queries/0_stateless/01086_odbc_roundtrip.sh new file mode 100755 index 00000000000..827376395d1 --- /dev/null +++ b/tests/queries/0_stateless/01086_odbc_roundtrip.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CUR_DIR/../shell_config.sh + + +for i in $(seq 1 10); do + ${CLICKHOUSE_CLIENT} -q "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables'))" 2>/dev/null && break +done + +${CLICKHOUSE_CLIENT} --query "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables'))" + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_01086" +${CLICKHOUSE_CLIENT} --query "CREATE DATABASE test_01086" + + +${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_01086.t (x UInt8, y Float32, z String) ENGINE = Memory" +${CLICKHOUSE_CLIENT} --query "INSERT INTO test_01086.t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d')" + +${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (ANSI)}','test_01086','t') ORDER BY x" + +${CLICKHOUSE_CLIENT} --query "SELECT * FROM odbc('DSN={ClickHouse DSN (Unicode)}','test_01086','t') ORDER BY x" + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE test_01086;" diff --git a/tests/queries/0_stateless/01086_odbc_roundtrip.sql b/tests/queries/0_stateless/01086_odbc_roundtrip.sql deleted file mode 100644 index 2c31711d895..00000000000 --- a/tests/queries/0_stateless/01086_odbc_roundtrip.sql +++ /dev/null @@ -1,14 +0,0 @@ -select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables')); -select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables')); - -DROP DATABASE IF EXISTS test_01086; -CREATE DATABASE test_01086; -USE test_01086; - -CREATE TABLE t (x UInt8, y Float32, z String) ENGINE = Memory; -INSERT INTO t VALUES (1,0.1,'a я'),(2,0.2,'b ą'),(3,0.3,'c d'); - -select * from odbc('DSN={ClickHouse DSN (ANSI)}','test_01086','t') ORDER BY x; -select * from odbc('DSN={ClickHouse DSN (Unicode)}','test_01086','t') ORDER BY x; - -DROP DATABASE test_01086; From 2108f621ddb5497ba22b8feba2edff85b503c14e Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 14:31:21 +0300 Subject: [PATCH 270/484] Disable webvisor on single-page docs --- website/js/base.js | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/js/base.js b/website/js/base.js index 2c43e435f48..ae8b3c01573 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -50,13 +50,14 @@ (function (d, w, c) { (w[c] = w[c] || []).push(function() { + var is_single_page = $('html').attr('data-single-page') === 'true'; try { w.yaCounter18343495 = new Ya.Metrika2({ - id:18343495, - clickmap:true, - trackLinks:true, - accurateTrackBounce:true, - webvisor:true + id: 18343495, + clickmap: !is_single_page, + trackLinks: !is_single_page, + accurateTrackBounce: !is_single_page, + webvisor: !is_single_page }); } catch(e) { } }); From 88657cfbe1ce48cbcedf469d5a3f16be89f335b9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 10 Apr 2020 15:14:27 +0300 Subject: [PATCH 271/484] Add retries to test --- tests/queries/0_stateless/00646_url_engine.python | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/00646_url_engine.python b/tests/queries/0_stateless/00646_url_engine.python index 960048dbb8f..1b41216b198 100644 --- a/tests/queries/0_stateless/00646_url_engine.python +++ b/tests/queries/0_stateless/00646_url_engine.python @@ -180,7 +180,14 @@ def main(): if __name__ == "__main__": - try: - main() - except: + exception_text = '' + for i in range(1, 5): + try: + main() + break + except Exception as ex: + exception_text = str(ex) + + if exception_text: + print("Exception: {}".format(exception_text), file=sys.stderr) os._exit(1) From d4bc6662cd441321e9f9170ff83aebdf9cc6ef31 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Fri, 10 Apr 2020 15:29:25 +0300 Subject: [PATCH 272/484] Update extended_roadmap.md --- docs/ru/extended_roadmap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/extended_roadmap.md b/docs/ru/extended_roadmap.md index 193e2035b56..1637b54311a 100644 --- a/docs/ru/extended_roadmap.md +++ b/docs/ru/extended_roadmap.md @@ -761,7 +761,7 @@ ClickHouse предоставляет возможность обратитьс Вместо этого предлагается описывать необходимые данные в конфигурационном файле сервера или в отдельном сервисе и ссылаться на них по именам. ### 9.3. Поддержка TLS для ZooKeeper. {#podderzhka-tls-dlia-zookeeper} - +[#10174](https://github.com/ClickHouse/ClickHouse/issues/10174) ## 10. Внешние словари. {#vneshnie-slovari} ### 10.1. + Исправление зависания в библиотеке доступа к YT. {#ispravlenie-zavisaniia-v-biblioteke-dostupa-k-yt} From d018977f4b0de8c933a6d90995cb0c17ae9a744e Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 10 Apr 2020 16:36:51 +0300 Subject: [PATCH 273/484] fix 'ALTER CLEAR INDEX/COLUMN' queries with compact parts --- .../MergeTree/IMergeTreeDataPartWriter.h | 5 ++-- src/Storages/MergeTree/MergeTreeData.h | 5 ---- .../MergeTree/MergeTreeDataMergerMutator.cpp | 24 +++++++++++++++++-- .../MergeTree/MergeTreeDataMergerMutator.h | 6 +++++ .../MergeTree/MergeTreeDataWriter.cpp | 2 +- src/Storages/MergeTree/MergeTreeIndices.h | 2 +- .../MergeTree/MergedBlockOutputStream.cpp | 6 +++-- .../MergeTree/MergedBlockOutputStream.h | 2 ++ 8 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index 4eb842f9279..d18b31edc72 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -102,8 +102,7 @@ public: written_offset_columns = written_offset_columns_; } - using SkipIndices = std::vector; - const SkipIndices & getSkipIndices() { return skip_indices; } + const MergeTreeIndices & getSkipIndices() { return skip_indices; } void initSkipIndices(); void initPrimaryIndex(); @@ -126,7 +125,7 @@ protected: CompressionCodecPtr default_codec; - std::vector skip_indices; + MergeTreeIndices skip_indices; MergeTreeWriterSettings settings; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 125a90d26e0..d299d39726e 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -433,11 +433,6 @@ public: DataPartPtr getPartIfExists(const String & part_name, const DataPartStates & valid_states); DataPartPtr getPartIfExists(const MergeTreePartInfo & part_info, const DataPartStates & valid_states); - std::vector getSkipIndices() const - { - return std::vector(std::begin(skip_indices), std::end(skip_indices)); - } - /// Total size of active parts in bytes. size_t getTotalActiveSizeInBytes() const; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 2b732d879b0..c10a6c6dd59 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -773,6 +773,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor MergedBlockOutputStream to{ new_data_part, merging_columns, + data.skip_indices, compression_codec, merged_column_to_size, data_settings->min_merge_bytes_to_use_direct_io, @@ -991,7 +992,6 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor splitMutationCommands(source_part, commands_for_part, for_interpreter, for_file_renames); - UInt64 watch_prev_elapsed = 0; MergeStageProgress stage_progress(1.0); @@ -1043,8 +1043,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor /// All columns from part are changed and may be some more that were missing before in part if (isCompactPart(source_part) || source_part->getColumns().isSubsetOf(updated_header.getNamesAndTypesList())) { + auto part_indices = getIndicesForNewDataPart(data.skip_indices, for_file_renames); mutateAllPartColumns( new_data_part, + part_indices, in, time_of_mutation, compression_codec, @@ -1260,6 +1262,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands( else if (is_compact_part && command.type == MutationCommand::Type::DROP_COLUMN) { removed_columns_from_compact_part.emplace(command.column_name); + for_file_renames.push_back(command); } else if (command.type == MutationCommand::Type::RENAME_COLUMN) { @@ -1439,6 +1442,22 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( return all_columns; } +MergeTreeIndices MergeTreeDataMergerMutator::getIndicesForNewDataPart( + const MergeTreeIndices & all_indices, + const MutationCommands & commands_for_removes) +{ + NameSet removed_indices; + for (const auto & command : commands_for_removes) + if (command.type == MutationCommand::DROP_INDEX) + removed_indices.insert(command.column_name); + + MergeTreeIndices new_indices; + for (const auto & index : all_indices) + if (!removed_indices.count(index->name)) + new_indices.push_back(index); + + return new_indices; +} std::set MergeTreeDataMergerMutator::getIndicesToRecalculate( BlockInputStreamPtr & input_stream, @@ -1503,6 +1522,7 @@ bool MergeTreeDataMergerMutator::shouldExecuteTTL(const Names & columns, const M void MergeTreeDataMergerMutator::mutateAllPartColumns( MergeTreeData::MutableDataPartPtr new_data_part, + const MergeTreeIndices & skip_indices, BlockInputStreamPtr mutating_stream, time_t time_of_mutation, const CompressionCodecPtr & compression_codec, @@ -1524,6 +1544,7 @@ void MergeTreeDataMergerMutator::mutateAllPartColumns( MergedBlockOutputStream out{ new_data_part, new_data_part->getColumns(), + skip_indices, compression_codec}; mutating_stream->readPrefix(); @@ -1560,7 +1581,6 @@ void MergeTreeDataMergerMutator::mutateSomePartColumns( if (mutating_stream == nullptr) throw Exception("Cannot mutate part columns with uninitialized mutations stream. It's a bug", ErrorCodes::LOGICAL_ERROR); - if (need_remove_expired_values) mutating_stream = std::make_shared(mutating_stream, data, new_data_part, time_of_mutation, true); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index b24b56a4780..6f4f8a03e9a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -160,6 +160,11 @@ private: NamesAndTypesList all_columns, const MutationCommands & commands_for_removes); + /// Get skip indcies, that should exists in the resulting data part. + static MergeTreeIndices getIndicesForNewDataPart( + const MergeTreeIndices & all_indices, + const MutationCommands & commands_for_removes); + bool shouldExecuteTTL(const Names & columns, const MutationCommands & commands) const; /// Return set of indices which should be recalculated during mutation also @@ -173,6 +178,7 @@ private: /// Override all columns of new part using mutating_stream void mutateAllPartColumns( MergeTreeData::MutableDataPartPtr new_data_part, + const MergeTreeIndices & skip_indices, BlockInputStreamPtr mutating_stream, time_t time_of_mutation, const CompressionCodecPtr & codec, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index c560583259c..34c615994f0 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -294,7 +294,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa /// either default lz4 or compression method with zero thresholds on absolute and relative part size. auto compression_codec = data.global_context.chooseCompressionCodec(0, 0); - MergedBlockOutputStream out(new_data_part, columns, compression_codec); + MergedBlockOutputStream out(new_data_part, columns, data.skip_indices, compression_codec); out.writePrefix(); out.writeWithPermutation(block, perm_ptr); diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 007851f2912..d871a522e6c 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -125,7 +125,7 @@ public: size_t granularity; }; -using MergeTreeIndices = std::vector; +using MergeTreeIndices = std::vector; class MergeTreeIndexFactory : private boost::noncopyable diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 221170b7a32..2b482ac7c29 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -15,10 +15,11 @@ namespace ErrorCodes MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, bool blocks_are_granules_size) : MergedBlockOutputStream( - data_part, columns_list_, default_codec, {}, + data_part, columns_list_, skip_indices, default_codec, {}, data_part->storage.global_context.getSettings().min_bytes_to_use_direct_io, blocks_are_granules_size) { @@ -27,6 +28,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( MergedBlockOutputStream::MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size, size_t aio_threshold, @@ -49,7 +51,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( disk->createDirectories(part_path); - writer = data_part->getWriter(columns_list, data_part->storage.getSkipIndices(), default_codec, writer_settings); + writer = data_part->getWriter(columns_list, skip_indices, default_codec, writer_settings); writer->initPrimaryIndex(); writer->initSkipIndices(); } diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index ee453f41a31..5a92977640e 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -16,12 +16,14 @@ public: MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, bool blocks_are_granules_size = false); MergedBlockOutputStream( const MergeTreeDataPartPtr & data_part, const NamesAndTypesList & columns_list_, + const MergeTreeIndices & skip_indices, CompressionCodecPtr default_codec, const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size, size_t aio_threshold, From 2034f09b2af9697f90502acb0619749499fdfe5b Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 16:50:38 +0300 Subject: [PATCH 274/484] Grammar in syntax.md --- docs/en/sql_reference/syntax.md | 52 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/en/sql_reference/syntax.md b/docs/en/sql_reference/syntax.md index 12a4b9df7ef..10e8c421f6d 100644 --- a/docs/en/sql_reference/syntax.md +++ b/docs/en/sql_reference/syntax.md @@ -16,11 +16,11 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input\_format\_values\_interpret\_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions). Data can have any format. When a query is received, the server calculates no more than [max\_query\_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. -This means the system doesn’t have problems with large `INSERT` queries, like MySQL does. +It allows for avoiding issues with large `INSERT` queries. When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited. -Next we will cover the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. +The rest of this article covers the full parser. For more information about format parsers, see the [Formats](../interfaces/formats.md) section. ## Spaces {#spaces} @@ -28,33 +28,33 @@ There may be any number of space symbols between syntactical constructions (incl ## Comments {#comments} -SQL-style and C-style comments are supported. -SQL-style comments: from `--` to the end of the line. The space after `--` can be omitted. -Comments in C-style: from `/*` to `*/`. These comments can be multiline. Spaces are not required here, either. +ClickHouse supports either SQL-style and C-style comments. +SQL-style comments start with `--` and continue to the end of the line, a space after `--` can be omitted. +C-style are from `/*` to `*/`and can be multiline, spaces are not required either. ## Keywords {#syntax-keywords} Keywords are case-insensitive when they correspond to: - SQL standard. For example, `SELECT`, `select` and `SeLeCt` are all valid. -- Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is same as `datetime`. +- Implementation in some popular DBMS (MySQL or Postgres). For example, `DateTime` is the same as `datetime`. Whether data type name is case-sensitive can be checked in the `system.data_type_families` table. -In contrast to standard SQL all other keywords (including functions names) are **case-sensitive**. +In contrast to standard SQL, all other keywords (including functions names) are **case-sensitive**. -Keywords are not reserved (they are just parsed as keywords in the corresponding context). If you use [identifiers](#syntax-identifiers) the same as the keywords, enclose them into quotes. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. +Keywords are not reserved; they are treated as such only in the corresponding context. If you use [identifiers](#syntax-identifiers) with the same name as the keywords, enclose them into double-quotes or backticks. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`. ## Identifiers {#syntax-identifiers} Identifiers are: -- Cluster, database, table, partition and column names. +- Cluster, database, table, partition, and column names. - Functions. - Data types. - [Expression aliases](#syntax-expression_aliases). -Identifiers can be quoted or non-quoted. It is recommended to use non-quoted identifiers. +Identifiers can be quoted or non-quoted. The latter is preferred. Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.` @@ -62,34 +62,34 @@ If you want to use identifiers the same as keywords or you want to use other sym ## Literals {#literals} -There are: numeric, string, compound and `NULL` literals. +There are numeric, string, compound, and `NULL` literals. ### Numeric {#numeric} -A numeric literal tries to be parsed: +Numeric literal tries to be parsed: -- First as a 64-bit signed number, using the [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) function. +- First, as a 64-bit signed number, using the [strtoull](https://en.cppreference.com/w/cpp/string/byte/strtoul) function. - If unsuccessful, as a 64-bit unsigned number, using the [strtoll](https://en.cppreference.com/w/cpp/string/byte/strtol) function. - If unsuccessful, as a floating-point number using the [strtod](https://en.cppreference.com/w/cpp/string/byte/strtof) function. -- Otherwise, an error is returned. +- Otherwise, it returns an error. -The corresponding value will have the smallest type that the value fits in. +Literal value has the smallest type that the value fits in. For example, 1 is parsed as `UInt8`, but 256 is parsed as `UInt16`. For more information, see [Data types](../sql_reference/data_types/index.md). Examples: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. ### String {#syntax-string-literal} -Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. This means that you can use the sequences `\'`and`\\`. The value will have the [String](../sql_reference/data_types/string.md) type. +Only string literals in single quotes are supported. The enclosed characters can be backslash-escaped. The following escape sequences have a corresponding special value: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. In all other cases, escape sequences in the format `\c`, where `c` is any character, are converted to `c`. It means that you can use the sequences `\'`and`\\`. The value will have the [String](../sql_reference/data_types/string.md) type. -The minimum set of characters that you need to escape in string literals: `'` and `\`. Single quote can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. +In string literals, you need to escape at least `'` and `\`. Single quotes can be escaped with the single quote, literals `'It\'s'` and `'It''s'` are equal. ### Compound {#compound} -Constructions are supported for arrays: `[1, 2, 3]` and tuples: `(1, 'Hello, world!', 2)`.. -Actually, these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. +Arrays are constructed with square brackets `[1, 2, 3]`. Nuples are constructed with round brackets `(1, 'Hello, world!', 2)`. +Technically these are not literals, but expressions with the array creation operator and the tuple creation operator, respectively. An array must consist of at least one item, and a tuple must have at least two items. -Tuples have a special purpose for use in the `IN` clause of a `SELECT` query. Tuples can be obtained as the result of a query, but they can’t be saved to a database (with the exception of [Memory](../engines/table_engines/special/memory.md) tables). +There's a separate case when tuples appear in the `IN` clause of a `SELECT` query. Query results can include tuples, but tuples can’t be saved to a database (except of tables with [Memory](../engines/table_engines/special/memory.md) engine). ### NULL {#null-literal} @@ -99,13 +99,13 @@ In order to store `NULL` in a table field, it must be of the [Nullable](../sql_r Depending on the data format (input or output), `NULL` may have a different representation. For more information, see the documentation for [data formats](../interfaces/formats.md#formats). -There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation will also be `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation. +There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation is also `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation. In queries, you can check `NULL` using the [IS NULL](operators.md#operator-is-null) and [IS NOT NULL](operators.md) operators and the related functions `isNull` and `isNotNull`. ## Functions {#functions} -Functions are written like an identifier with a list of arguments (possibly empty) in brackets. In contrast to standard SQL, the brackets are required, even for an empty arguments list. Example: `now()`. +Function calls are written like an identifier with a list of arguments (possibly empty) in round brackets. In contrast to standard SQL, the brackets are required, even for an empty argument list. Example: `now()`. There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions. ## Operators {#operators} @@ -115,11 +115,11 @@ For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, mult ## Data Types and Database Table Engines {#data_types-and-database-table-engines} -Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an arguments list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. +Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an argument list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”. ## Expression Aliases {#syntax-expression_aliases} -An alias is a user-defined name for an expression in a query. +An alias is a user-defined name for expression in a query. ``` sql expr AS alias @@ -141,7 +141,7 @@ expr AS alias ### Notes on Usage {#notes-on-usage} -Aliases are global for a query or subquery and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. +Aliases are global for a query or subquery, and you can define an alias in any part of a query for any expression. For example, `SELECT (1 AS n) + 2, n`. Aliases are not visible in subqueries and between subqueries. For example, while executing the query `SELECT (SELECT sum(b.a) + num FROM b) - a.a AS num FROM a` ClickHouse generates the exception `Unknown identifier: num`. @@ -182,4 +182,4 @@ An expression is a function, identifier, literal, application of an operator, ex A list of expressions is one or more expressions separated by commas. Functions and operators, in turn, can have expressions as arguments. -[Original article](https://clickhouse.tech/docs/en/query_language/syntax/) +[Original article](https://clickhouse.tech/docs/en/sql_reference/syntax/) From 9d81f896488d5f8496f598f1544d2a754617f906 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 10 Apr 2020 16:51:09 +0300 Subject: [PATCH 275/484] Add sleep to test --- tests/queries/0_stateless/01086_odbc_roundtrip.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01086_odbc_roundtrip.sh b/tests/queries/0_stateless/01086_odbc_roundtrip.sh index 827376395d1..71ea517f4dd 100755 --- a/tests/queries/0_stateless/01086_odbc_roundtrip.sh +++ b/tests/queries/0_stateless/01086_odbc_roundtrip.sh @@ -6,6 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for i in $(seq 1 10); do ${CLICKHOUSE_CLIENT} -q "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (ANSI)}','system','tables'))" 2>/dev/null && break + sleep 0.1 done ${CLICKHOUSE_CLIENT} --query "select count() > 1 as ok from (select * from odbc('DSN={ClickHouse DSN (Unicode)}','system','tables'))" From 81a7b4b248ef77e5fb71502fbbeb93dcb135ed59 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 16:55:00 +0300 Subject: [PATCH 276/484] Grammar in operators.md --- docs/en/sql_reference/operators.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/en/sql_reference/operators.md b/docs/en/sql_reference/operators.md index 418a9e32771..8ae9e460d87 100644 --- a/docs/en/sql_reference/operators.md +++ b/docs/en/sql_reference/operators.md @@ -5,8 +5,7 @@ toc_title: Operators # Operators {#operators} -All operators are transformed to their corresponding functions at the query parsing stage in accordance with their precedence and associativity. -Groups of operators are listed in order of priority (the higher it is in the list, the earlier the operator is connected to its arguments). +ClickHouse transforms operators to their corresponding functions at the query parsing stage according to their priority, precedence, and associativity. ## Access Operators {#access-operators} @@ -78,7 +77,7 @@ Groups of operators are listed in order of priority (the higher it is in the lis EXTRACT(part FROM date); ``` -Extracts a part from a given date. For example, you can retrieve a month from a given date, or a second from a time. +Extract parts from a given date. For example, you can retrieve a month from a given date, or a second from a time. The `part` parameter specifies which part of the date to retrieve. The following values are available: @@ -151,7 +150,7 @@ Types of intervals: - `YEAR` !!! warning "Warning" - Intervals with different types can’t be combined. You can’t use expressions like `INTERVAL 4 DAY 1 HOUR`. Express intervals in units that are smaller or equal the the smallest unit of the interval, for example `INTERVAL 25 HOUR`. You can use consequtive operations like in the example below. + Intervals with different types can’t be combined. You can’t use expressions like `INTERVAL 4 DAY 1 HOUR`. Specify intervals in units that are smaller or equal to the smallest unit of the interval, for example, `INTERVAL 25 HOUR`. You can use consecutive operations, like in the example below. Example: @@ -214,7 +213,7 @@ The `transform` function does not work with `NULL`. `x -> expr` – The `lambda(x, expr) function.` -The following operators do not have a priority, since they are brackets: +The following operators do not have a priority since they are brackets: ## Array Creation Operator {#array-creation-operator} @@ -229,7 +228,7 @@ The following operators do not have a priority, since they are brackets: All binary operators have left associativity. For example, `1 + 2 + 3` is transformed to `plus(plus(1, 2), 3)`. Sometimes this doesn’t work the way you expect. For example, `SELECT 4 > 2 > 3` will result in 0. -For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed to a single call of these functions. +For efficiency, the `and` and `or` functions accept any number of arguments. The corresponding chains of `AND` and `OR` operators are transformed into a single call of these functions. ## Checking for `NULL` {#checking-for-null} From 0b4019becb61d63e9d20efccc8b7899e9ff778b3 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 16:56:25 +0300 Subject: [PATCH 277/484] Website performance improvements (#10175) * workers moved to separate repo https://github.com/ClickHouse/clickhouse-website-worker * support prefetch tag * Prefetch docs from front page + async load of secondary images --- website/index.html | 4 ++ website/js/base.js | 6 +++ website/templates/common_meta.html | 4 ++ website/templates/index/community.html | 18 +++---- website/templates/index/efficient.html | 4 +- website/templates/index/scalable.html | 2 +- website/templates/index/why.html | 8 +-- website/workers/events.js | 34 ------------ website/workers/meet-form.js | 75 -------------------------- website/workers/play-api.js | 24 --------- website/workers/repo.js | 10 ---- 11 files changed, 30 insertions(+), 159 deletions(-) delete mode 100644 website/workers/events.js delete mode 100644 website/workers/meet-form.js delete mode 100644 website/workers/play-api.js delete mode 100644 website/workers/repo.js diff --git a/website/index.html b/website/index.html index b249fc31285..e2ac6e31441 100644 --- a/website/index.html +++ b/website/index.html @@ -1,3 +1,7 @@ +{% set prefetch_items = [ + ('/docs/en/', 'document') +] %} + {% extends "templates/base.html" %} {% block content %} diff --git a/website/js/base.js b/website/js/base.js index ae8b3c01573..4e43a44d63a 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -47,6 +47,12 @@ }, 70); } + $('img').each(function() { + var src = $(this).attr('data-src'); + if (src) { + $(this).attr('src', src); + } + }); (function (d, w, c) { (w[c] = w[c] || []).push(function() { diff --git a/website/templates/common_meta.html b/website/templates/common_meta.html index 7ed5a8409ec..2aca17f93a2 100644 --- a/website/templates/common_meta.html +++ b/website/templates/common_meta.html @@ -19,3 +19,7 @@ content="{% if description %}{{ description }}{% else %}{{ _('ClickHouse is an open source distributed column-oriented database management system that allows generating analytical data reports in real time using SQL queries. Сreated by Yandex ClickHouse manages extremely large volumes of data in a stable and sustainable manner.') }}{% endif %}"/> + +{% for prefetch_item in prefetch_items %} + +{% endfor %} diff --git a/website/templates/index/community.html b/website/templates/index/community.html index e48edb311b6..47bcbd67218 100644 --- a/website/templates/index/community.html +++ b/website/templates/index/community.html @@ -9,7 +9,7 @@
    - {{ _('ClickHouse YouTube Channel') }} @@ -21,7 +21,7 @@
    - {{ _('ClickHouse Official Twitter Account') }} @@ -32,7 +32,7 @@
    - {{ _('ClickHouse at Telegram') }} @@ -56,7 +56,7 @@
    - ClickHouse GitHub @@ -72,7 +72,7 @@
    - Blazing fast + Blazing fast

    Blazing fast

    @@ -15,7 +15,7 @@ processing performance for a single query stands at more than 2 terabytes per second (after decompression, only used columns). In distributed setup reads are automatically balanced among healthy replicas to avoid increasing latency.

    - Fault tolerant + Fault tolerant

    Fault-tolerant

    @@ -24,14 +24,14 @@
    - Easy to use + Easy to use

    Easy to use

    ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some DBMS.

    - Highly reliable + Highly reliable

    Highly reliable

    diff --git a/website/workers/events.js b/website/workers/events.js deleted file mode 100644 index 653139af9f9..00000000000 --- a/website/workers/events.js +++ /dev/null @@ -1,34 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let raw = await fetch('https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/README.md'); - let text = await raw.text(); - let lines = text.split('\n'); - let skip = true; - let events = []; - for (let idx in lines) { - let line = lines[idx]; - if (skip) { - if (line.includes('Upcoming Events')) { - skip = false; - } - } else { - if (!line) { continue; }; - line = line.split(']('); - var tail = line[1].split(') '); - events.push({ - 'signup_link': tail[0], - 'event_name': line[0].replace('* [', ''), - 'event_date': tail[1].slice(0, -1).replace('on ', '') - }); - } - } - - let response = new Response(JSON.stringify({ - 'events': events - })); - response.headers.set('Content-Type', 'application/json'); - return response; -} diff --git a/website/workers/meet-form.js b/website/workers/meet-form.js deleted file mode 100644 index 6506d59522e..00000000000 --- a/website/workers/meet-form.js +++ /dev/null @@ -1,75 +0,0 @@ - -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - if (request.method != 'POST') { - return new Response('Bad request', { - status: 400, - statusText: 'Bad request' - }); - } - let url = new URL('https://api.sendgrid.com/v3/mail/send'); - let newHdrs = new Headers(); - newHdrs.set('Authorization', 'Bearer ' + SENDGRID_TOKEN); - newHdrs.set('Content-Type', 'application/json'); - let args = await request.json(); - let subject = args['name'] + ' wants to meet'; - let content = ''; - let argsKeys = Object.keys(args); - if (['name', 'email', 'city', 'company'].filter(n=>!argsKeys.includes(n)).length) { - return new Response('Bad request', { - status: 400, - statusText: 'Bad request' - }); - } - for (let key in args) { - content += key.charAt(0).toUpperCase() + key.slice(1); - content += ':\r\n' + args[key] + '\r\n\r\n'; - } - let body = { - "personalizations": [ - { - "to": [ - { - "email": "clickhouse-feedback@yandex-team.ru", - "name": "ClickHouse Core Team" - } - ], - "subject": subject - } - ], "content": [ - { - "type": "text/plain", - "value": content - } - ], "from": { - "email": "no-reply@clickhouse.tech", - "name": "ClickHouse Website" - }, "reply_to": - { - "email": "no-reply@clickhouse.tech", - "name": "ClickHouse Website" - } - }; - const init = { - body: JSON.stringify(body), - headers: newHdrs, - method: 'POST' - } - - let response = await fetch(url, init); - let status = 200; - if (response.status != 202) { - status = 200; - } - - return new Response('{}', { - status: status, - statusText: response.statusText.replace('Accepted', 'OK'), - headers: new Headers({ - 'Content-Type': 'application/json' - }) - }) -} diff --git a/website/workers/play-api.js b/website/workers/play-api.js deleted file mode 100644 index 62792d37a4d..00000000000 --- a/website/workers/play-api.js +++ /dev/null @@ -1,24 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let url = new URL(request.url); - url.hostname = 'play-api.clickhouse.tech'; - url.port = 8443; - url.pathname = url.pathname.replace('/api/', '/'); - let newHdrs = new Headers() - - const init = { - body: request.body, - headers: request.headers, - method: request.method - } - - let response = await fetch(url, init); - - return new Response(response.body, { - status: response.status, - statusText: response.statusText - }) -} diff --git a/website/workers/repo.js b/website/workers/repo.js deleted file mode 100644 index 470391cf225..00000000000 --- a/website/workers/repo.js +++ /dev/null @@ -1,10 +0,0 @@ -addEventListener('fetch', event => { - event.respondWith(handleRequest(event.request)) -}) - -async function handleRequest(request) { - let url = new URL(request.url); - url.hostname = 'repo.yandex.ru'; - url.pathname = '/clickhouse' + url.pathname; - return fetch(url) -} From 27eaea184a54d27f3724b533b77e8a9180560add Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 10 Apr 2020 17:03:42 +0300 Subject: [PATCH 278/484] Add small timeout --- tests/queries/0_stateless/00646_url_engine.python | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/00646_url_engine.python b/tests/queries/0_stateless/00646_url_engine.python index 1b41216b198..494eb12b0ef 100644 --- a/tests/queries/0_stateless/00646_url_engine.python +++ b/tests/queries/0_stateless/00646_url_engine.python @@ -2,6 +2,7 @@ from __future__ import print_function import csv import sys +import time import tempfile import threading import os, urllib @@ -187,6 +188,7 @@ if __name__ == "__main__": break except Exception as ex: exception_text = str(ex) + time.sleep(0.1) if exception_text: print("Exception: {}".format(exception_text), file=sys.stderr) From 5e860ddb0482deb97147934190bb7cd4e4ecb884 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Fri, 10 Apr 2020 17:09:47 +0300 Subject: [PATCH 279/484] fix 01098_temporary_and_external_tables --- .../0_stateless/01098_temporary_and_external_tables.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01098_temporary_and_external_tables.sh b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh index f8b9862c1c1..c984f363c31 100755 --- a/tests/queries/0_stateless/01098_temporary_and_external_tables.sh +++ b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh @@ -5,12 +5,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) url="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?session_id=test_01098" -${CLICKHOUSE_CURL} -sSk "$url" --data "CREATE TEMPORARY TABLE tmp_table AS SELECT number AS n FROM numbers(42)" > /dev/null; +${CLICKHOUSE_CURL} -m 30 -sSk "$url" --data "CREATE TEMPORARY TABLE tmp_table AS SELECT number AS n FROM numbers(42)" > /dev/null; name_expr="'\`' || database || '\`.\`' || name || '\`'" -full_tmp_name=`echo "SELECT $name_expr FROM system.tables WHERE database='_temporary_and_external_tables' AND create_table_query LIKE '%tmp_table%'" | ${CLICKHOUSE_CURL} -sSgk $url -d @-` +full_tmp_name=`echo "SELECT $name_expr FROM system.tables WHERE database='_temporary_and_external_tables' AND create_table_query LIKE '%tmp_table%'" | ${CLICKHOUSE_CURL} -m 30 -sSgk $url -d @-` -echo "SELECT * FROM $full_tmp_name" | ${CLICKHOUSE_CURL} -sSgk $url -d @- | grep -F "Code: 291" > /dev/null && echo "OK" +echo "SELECT * FROM $full_tmp_name" | ${CLICKHOUSE_CURL} -m 60 -sSgk $url -d @- | grep -F "Code: 291" > /dev/null && echo "OK" -echo -ne '0\n1\n' | ${CLICKHOUSE_CURL} -sSkF 'file=@-' "$url&file_format=CSV&file_types=UInt64&query=SELECT+sum((number+GLOBAL+IN+(SELECT+number+AS+n+FROM+remote('127.0.0.2',+numbers(5))+WHERE+n+GLOBAL+IN+(SELECT+*+FROM+tmp_table)+AND+n+GLOBAL+NOT+IN+(SELECT+*+FROM+file)+))+AS+res),+sum(number*res)+FROM+remote('127.0.0.2',+numbers(10))"; +echo -ne '0\n1\n' | ${CLICKHOUSE_CURL} -m 30 -sSkF 'file=@-' "$url&file_format=CSV&file_types=UInt64&query=SELECT+sum((number+GLOBAL+IN+(SELECT+number+AS+n+FROM+remote('127.0.0.2',+numbers(5))+WHERE+n+GLOBAL+IN+(SELECT+*+FROM+tmp_table)+AND+n+GLOBAL+NOT+IN+(SELECT+*+FROM+file)+))+AS+res),+sum(number*res)+FROM+remote('127.0.0.2',+numbers(10))"; From d2237c3ab850c4aeb924cf7ed7c061acac7d5bfa Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 10 Apr 2020 17:17:32 +0300 Subject: [PATCH 280/484] Update msgpack.cmake --- cmake/find/msgpack.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake index 0b56bbc1a0d..093555bebc0 100644 --- a/cmake/find/msgpack.cmake +++ b/cmake/find/msgpack.cmake @@ -2,7 +2,7 @@ option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library if (USE_INTERNAL_MSGPACK_LIBRARY) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp") - message(WARNING "submodule contrib/msgpack-c is missing. to fix try run: \n git submodule update --init --recursive") + message(WARNING "submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive") set(USE_INTERNAL_MSGPACK_LIBRARY 0) set(MISSING_INTERNAL_MSGPACK_LIBRARY 1) endif() From 7177bc4cbf10524f8d9fa630db4ca113e6232ad2 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 17:40:55 +0300 Subject: [PATCH 281/484] fix to_url with version_prefix --- docs/tools/build.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/tools/build.py b/docs/tools/build.py index 1719fe051d3..fb3dba8a529 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -307,7 +307,7 @@ def write_redirect_html(out_path, to_url): Page Redirection @@ -320,7 +320,7 @@ def write_redirect_html(out_path, to_url): def build_redirect_html(args, from_path, to_path): for lang in ['en', 'es', 'fr', 'ja', 'fa']: # TODO: args.lang.split(','): out_path = os.path.join(args.docs_output_dir, lang, from_path.replace('.md', '/index.html')) - version_prefix = args.version_prefix + '/' if args.version_prefix else '/' + version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/' target_path = to_path.replace('.md', '/') to_url = f'/docs{version_prefix}{lang}/{target_path}' to_url = to_url.strip() From 770bc149df7f85107020aad11f2ed59c65ae6859 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 17:54:20 +0300 Subject: [PATCH 282/484] Extra mark for redirect template page --- docs/tools/build.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/tools/build.py b/docs/tools/build.py index fb3dba8a529..7508a072acb 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -301,7 +301,8 @@ def write_redirect_html(out_path, to_url): except OSError: pass with open(out_path, 'w') as f: - f.write(f''' + f.write(f''' + From ca1aba62b3706c78f819ef0583318b6ec7003582 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 17:56:09 +0300 Subject: [PATCH 283/484] Do not minify redirects --- docs/tools/website.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/tools/website.py b/docs/tools/website.py index 9704cf7d5a4..83eef270fc5 100644 --- a/docs/tools/website.py +++ b/docs/tools/website.py @@ -155,7 +155,8 @@ def minify_website(args): with open(path, 'rb') as f: content = f.read().decode('utf-8') if filename.endswith('.html'): - content = htmlmin.minify(content, remove_empty_space=False) + if not content.startswith(' diff --git a/docs/en/introduction/features_considered_disadvantages.md b/docs/en/introduction/features_considered_disadvantages.md deleted file mode 100644 index e295b5570ab..00000000000 --- a/docs/en/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -toc_priority: 5 -toc_title: ClickHouse Features that Can Be Considered Disadvantages ---- - -# ClickHouse Features that Can Be Considered Disadvantages {#clickhouse-features-that-can-be-considered-disadvantages} - -1. No full-fledged transactions. -2. Lack of ability to modify or delete already inserted data with high rate and low latency. There are batch deletes and updates available to clean up or modify data, for example to comply with [GDPR](https://gdpr-info.eu). -3. The sparse index makes ClickHouse not so suitable for point queries retrieving single rows by their keys. - -[Original article](https://clickhouse.tech/docs/en/introduction/features_considered_disadvantages/) diff --git a/docs/en/sql_reference/statements/index.md b/docs/en/sql_reference/statements/index.md index 1adf93a153e..507d858c14a 100644 --- a/docs/en/sql_reference/statements/index.md +++ b/docs/en/sql_reference/statements/index.md @@ -2,4 +2,3 @@ toc_folder_title: Statements toc_priority: 31 --- - diff --git a/docs/es/introduction/distinctive_features.md b/docs/es/introduction/distinctive_features.md index 5117fcf9324..82b757341be 100644 --- a/docs/es/introduction/distinctive_features.md +++ b/docs/es/introduction/distinctive_features.md @@ -68,4 +68,10 @@ ClickHouse utiliza la replicación multi-maestro asincrónica. Después de escri Para obtener más información, consulte la sección [Replicación de datos](../engines/table_engines/mergetree_family/replication.md). +## Características que pueden considerarse desventajas {#clickhouse-features-that-can-be-considered-disadvantages} + +1. No hay transacciones completas. +2. Falta de capacidad para modificar o eliminar datos ya insertados con alta tasa y baja latencia. Hay eliminaciones y actualizaciones por lotes disponibles para limpiar o modificar datos, por ejemplo, para cumplir con [GDPR](https://gdpr-info.eu). +3. El índice disperso hace que ClickHouse no sea tan adecuado para consultas de puntos que recuperan filas individuales por sus claves. + [Artículo Original](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/es/introduction/features_considered_disadvantages.md b/docs/es/introduction/features_considered_disadvantages.md deleted file mode 100644 index 60eabad3102..00000000000 --- a/docs/es/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 3e185d24c9fe772c7cf03d5475247fb829a21dfa -toc_priority: 5 -toc_title: "Caracter\xEDsticas de ClickHouse que pueden considerarse desventajas" ---- - -# Características de ClickHouse que pueden considerarse desventajas {#clickhouse-features-that-can-be-considered-disadvantages} - -1. No hay transacciones completas. -2. Falta de capacidad para modificar o eliminar datos ya insertados con alta tasa y baja latencia. Hay eliminaciones y actualizaciones por lotes disponibles para limpiar o modificar datos, por ejemplo, para cumplir con [GDPR](https://gdpr-info.eu). -3. El índice disperso hace que ClickHouse no sea tan adecuado para consultas de puntos que recuperan filas individuales por sus claves. - -[Artículo Original](https://clickhouse.tech/docs/en/introduction/features_considered_disadvantages/) diff --git a/docs/fa/introduction/distinctive_features.md b/docs/fa/introduction/distinctive_features.md index a4313168796..71a8f3eb543 100644 --- a/docs/fa/introduction/distinctive_features.md +++ b/docs/fa/introduction/distinctive_features.md @@ -62,6 +62,12 @@ ClickHouse از روش asynchronous multimaster replication استفاده می برای اطلاعات بیشتر، به بخش [replication داده ها](../engines/table_engines/mergetree_family/replication.md) مراجعه کنید. +## ویژگی های از ClickHouse که می تواند معایبی باشد. {#wyjgy-hy-z-clickhouse-khh-my-twnd-m-yby-bshd} + +1. بدون پشتیبانی کامل از تراکنش +2. عدم توانایی برای تغییر و یا حذف داده های در حال حاضر وارد شده با سرعت بالا و تاخیر کم. برای پاک کردن و یا اصلاح داده ها، به عنوان مثال برای پیروی از [GDPR](https://gdpr-info.eu)، دسته ای پاک و به روزرسانی وجود دارد.حال توسعه می باشد. +3. Sparse index باعث می شود ClickHouse چندان مناسب اجرای پرسمان های point query برای دریافت یک ردیف از داده ها با استفاده از کلید آنها نباشد. +
    [مقاله اصلی](https://clickhouse.tech/docs/fa/introduction/distinctive_features/) diff --git a/docs/fa/introduction/features_considered_disadvantages.md b/docs/fa/introduction/features_considered_disadvantages.md deleted file mode 100644 index 5a8ea156da5..00000000000 --- a/docs/fa/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,11 +0,0 @@ -
    - -# ویژگی های از ClickHouse که می تواند معایبی باشد. {#wyjgy-hy-z-clickhouse-khh-my-twnd-m-yby-bshd} - -1. بدون پشتیبانی کامل از تراکنش -2. عدم توانایی برای تغییر و یا حذف داده های در حال حاضر وارد شده با سرعت بالا و تاخیر کم. برای پاک کردن و یا اصلاح داده ها، به عنوان مثال برای پیروی از [GDPR](https://gdpr-info.eu)، دسته ای پاک و به روزرسانی وجود دارد.حال توسعه می باشد. -3. Sparse index باعث می شود ClickHouse چندان مناسب اجرای پرسمان های point query برای دریافت یک ردیف از داده ها با استفاده از کلید آنها نباشد. - -
    - -[مقاله اصلی](https://clickhouse.tech/docs/fa/introduction/features_considered_disadvantages/) diff --git a/docs/fr/introduction/distinctive_features.md b/docs/fr/introduction/distinctive_features.md index dcea4046fcd..2c825cac85a 100644 --- a/docs/fr/introduction/distinctive_features.md +++ b/docs/fr/introduction/distinctive_features.md @@ -68,4 +68,10 @@ ClickHouse utilise la réplication multi-maître asynchrone. Après avoir été Pour plus d'informations, consultez la section [Réplication des données](../engines/table_engines/mergetree_family/replication.md). +## Caractéristiques de ClickHouse qui peuvent être considérées comme des inconvénients {#clickhouse-features-that-can-be-considered-disadvantages} + +1. Pas de transactions à part entière. +2. Manque de capacité à modifier ou supprimer des données déjà insérées avec un taux élevé et une faible latence. Des suppressions et des mises à jour par lots sont disponibles pour nettoyer ou modifier les données, par exemple pour [GDPR](https://gdpr-info.eu). +3. L'index clairsemé rend ClickHouse pas si approprié pour les requêtes ponctuelles récupérant des lignes simples par leurs clés. + [Article Original](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/fr/introduction/features_considered_disadvantages.md b/docs/fr/introduction/features_considered_disadvantages.md deleted file mode 100644 index dc9fe708fef..00000000000 --- a/docs/fr/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: f865c9653f9df092694258e0ccdd733c339112f5 -toc_priority: 5 -toc_title: "Caract\xE9ristiques de ClickHouse qui peuvent \xEAtre consid\xE9r\xE9\ - es comme des inconv\xE9nients" ---- - -# Caractéristiques de ClickHouse qui peuvent être considérées comme des inconvénients {#clickhouse-features-that-can-be-considered-disadvantages} - -1. Pas de transactions à part entière. -2. Manque de capacité à modifier ou supprimer des données déjà insérées avec un taux élevé et une faible latence. Des suppressions et des mises à jour par lots sont disponibles pour nettoyer ou modifier les données, par exemple pour [GDPR](https://gdpr-info.eu). -3. L'index clairsemé rend ClickHouse pas si approprié pour les requêtes ponctuelles récupérant des lignes simples par leurs clés. - -[Article Original](https://clickhouse.tech/docs/en/introduction/features_considered_disadvantages/) diff --git a/docs/ja/introduction/distinctive_features.md b/docs/ja/introduction/distinctive_features.md index 6cd0834708c..5c4b91759dc 100644 --- a/docs/ja/introduction/distinctive_features.md +++ b/docs/ja/introduction/distinctive_features.md @@ -63,4 +63,10 @@ ClickHouseには、精度を犠牲にしてパフォーマンスを得るため 詳細については、[データ複製](../engines/table_engines/mergetree_family/replication.md) セクションを参照してください。 +## 欠点と考えられるClickHouseの機能 {#qian-dian-tokao-erareruclickhousenoji-neng} + +1. 本格的なトランザクションはありません。 +2. 既に挿入されたデータの変更または削除を、高頻度かつ低遅延に行う機能はありません。 [GDPR](https://gdpr-info.eu)に準拠するなど、データをクリーンアップまたは変更するために、バッチ削除およびバッチ更新が利用可能です。 +3. インデックスが疎であるため、ClickHouseは、キーで単一行を取得するようなクエリにはあまり適していません。 + [Original article](https://clickhouse.yandex/docs/en/introduction/distinctive_features/) diff --git a/docs/ja/introduction/features_considered_disadvantages.md b/docs/ja/introduction/features_considered_disadvantages.md deleted file mode 100644 index 8c766e06fe8..00000000000 --- a/docs/ja/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -toc_priority: 5 -toc_title: 欠点と見なすことができるClickHouseの機能 ---- - -# 欠点と考えられるClickHouseの機能 {#qian-dian-tokao-erareruclickhousenoji-neng} - -1. 本格的なトランザクションはありません。 -2. 既に挿入されたデータの変更または削除を、高頻度かつ低遅延に行う機能はありません。 [GDPR](https://gdpr-info.eu)に準拠するなど、データをクリーンアップまたは変更するために、バッチ削除およびバッチ更新が利用可能です。 -3. インデックスが疎であるため、ClickHouseは、キーで単一行を取得するようなクエリにはあまり適していません。 - -[Original article](https://clickhouse.yandex/docs/en/introduction/features_considered_disadvantages/) diff --git a/docs/redirects.txt b/docs/redirects.txt index c0f3c81813f..95df6773359 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -98,6 +98,7 @@ functions/ym_dict_functions.md query_language/functions/ym_dict_functions.md interfaces/http_interface.md interfaces/http.md interfaces/third-party_client_libraries.md interfaces/third-party/client_libraries.md interfaces/third-party_gui.md interfaces/third-party/gui.md +introduction/features_considered_disadvantages.md introduction/distinctive_features.md introduction/possible_silly_questions.md faq/general.md introduction/ya_metrika_task.md introduction/history.md operations/performance/sampling_query_profiler.md operations/optimizing_performance/sampling_query_profiler.md diff --git a/docs/ru/introduction/distinctive_features.md b/docs/ru/introduction/distinctive_features.md index 0cc40e4e162..079a0667070 100644 --- a/docs/ru/introduction/distinctive_features.md +++ b/docs/ru/introduction/distinctive_features.md @@ -61,4 +61,11 @@ ClickHouse предоставляет различные способы разм Подробнее смотрите раздел [Репликация данных](../engines/table_engines/mergetree_family/replication.md). +## Особенности, которые могут считаться недостатками {#osobennosti-clickhouse-kotorye-mogut-schitatsia-nedostatkami} + +1. Отсутствие полноценных транзакций. +2. Возможность изменять или удалять ранее записанные данные с низкими задержками и высокой частотой запросов не предоставляется. Есть массовое удаление и изменение данных для очистки более не нужного или соответствия [GDPR](https://gdpr-info.eu). +3. Разреженный индекс делает ClickHouse плохо пригодным для точечных чтений одиночных строк по своим + ключам. + [Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/distinctive_features/) diff --git a/docs/ru/introduction/features_considered_disadvantages.md b/docs/ru/introduction/features_considered_disadvantages.md deleted file mode 100644 index 0cd4838d908..00000000000 --- a/docs/ru/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,8 +0,0 @@ -# Особенности ClickHouse, которые могут считаться недостатками {#osobennosti-clickhouse-kotorye-mogut-schitatsia-nedostatkami} - -1. Отсутствие полноценных транзакций. -2. Возможность изменять или удалять ранее записанные данные с низкими задержками и высокой частотой запросов не предоставляется. Есть массовое удаление и изменение данных для очистки более не нужного или соответствия [GDPR](https://gdpr-info.eu). -3. Разреженный индекс делает ClickHouse плохо пригодным для точечных чтений одиночных строк по своим - ключам. - -[Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/features_considered_disadvantages/) diff --git a/docs/tools/nav.py b/docs/tools/nav.py index 56d47d58d07..fe11b21d8e7 100644 --- a/docs/tools/nav.py +++ b/docs/tools/nav.py @@ -35,6 +35,8 @@ def build_nav_entry(root): title = meta.get('toc_folder_title', 'hidden') prio = meta.get('toc_priority', 9999) logging.debug(f'Nav entry: {prio}, {title}, {path}') + if not content.strip(): + title = 'hidden' result_items.append((prio, title, path)) result_items = sorted(result_items, key=lambda x: (x[0], x[1])) result = collections.OrderedDict([(item[1], item[2]) for item in result_items]) @@ -45,8 +47,16 @@ def build_nav(lang, args): docs_dir = os.path.join(args.docs_dir, lang) _, _, nav = build_nav_entry(docs_dir) result = [] + index_key = None for key, value in nav.items(): if key and value: + if value == 'index.md': + index_key = key + continue result.append({key: value}) + if index_key: + key = list(result[0].keys())[0] + result[0][key][index_key] = 'index.md' + result[0][key].move_to_end(index_key, last=False) print('result', result) return result diff --git a/docs/zh/introduction/distinctive_features.md b/docs/zh/introduction/distinctive_features.md index 3b1e7a8c716..a267a49bf8a 100644 --- a/docs/zh/introduction/distinctive_features.md +++ b/docs/zh/introduction/distinctive_features.md @@ -62,4 +62,10 @@ ClickHouse使用异步的多主复制技术。当数据被写入任何一个可 更多信息,参见 [数据复制](../engines/table_engines/mergetree_family/replication.md)。 +# 的限制 {#clickhouseke-yi-ren-wei-shi-que-dian-de-gong-neng} + +1. 没有完整的事务支持。 +2. 缺少高频率,低延迟的修改或删除已存在数据的能力。仅能用于批量删除或修改数据,但这符合 [GDPR](https://gdpr-info.eu)。 +3. 稀疏索引使得ClickHouse不适合通过其键检索单行的点查询。 + [来源文章](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/zh/introduction/features_considered_disadvantages.md b/docs/zh/introduction/features_considered_disadvantages.md deleted file mode 100644 index efc967e90ac..00000000000 --- a/docs/zh/introduction/features_considered_disadvantages.md +++ /dev/null @@ -1,8 +0,0 @@ - -# ClickHouse的限制 {#clickhouseke-yi-ren-wei-shi-que-dian-de-gong-neng} - -1. 没有完整的事务支持。 -2. 缺少高频率,低延迟的修改或删除已存在数据的能力。仅能用于批量删除或修改数据,但这符合 [GDPR](https://gdpr-info.eu)。 -3. 稀疏索引使得ClickHouse不适合通过其键检索单行的点查询。 - -[来源文章](https://clickhouse.tech/docs/zh/introduction/features_considered_disadvantages/) From 2f5b4b0f9b3d3738291f190da11c5d43d5c36b21 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Fri, 10 Apr 2020 22:01:10 +0200 Subject: [PATCH 296/484] Added ability to relax the restriction on non-deterministic functions usage in mutations with allow_nondeterministic_mutations setting. --- src/Core/Settings.h | 2 +- src/Interpreters/MutationsInterpreter.cpp | 4 ++- ...eterministic_functions_zookeeper.reference | 3 ++ ...th_nondeterministic_functions_zookeeper.sh | 28 +++++++++++++++++++ 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 753231603b2..31b8bd6ab02 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -418,7 +418,7 @@ struct Settings : public SettingsCollection M(SettingBool, merge_tree_uniform_read_distribution, true, "Obsolete setting, does nothing. Will be removed after 2020-05-20", 0) \ M(SettingUInt64, mark_cache_min_lifetime, 0, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \ M(SettingBool, partial_merge_join, false, "Obsolete. Use join_algorithm='prefer_partial_merge' instead.", 0) \ - + M(SettingBool, allow_nondeterministic_mutations, false, "Allow non-deterministic functions in ALTER UPDATE/ALTER DELETE statements", 0) \ DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 669b72c6317..df0267b9450 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -661,9 +661,11 @@ BlockInputStreamPtr MutationsInterpreter::addStreamsForLaterStages(const std::ve void MutationsInterpreter::validate(TableStructureReadLockHolder &) { + const Settings & settings = context.getSettingsRef(); + /// For Replicated* storages mutations cannot employ non-deterministic functions /// because that produces inconsistencies between replicas - if (startsWith(storage->getName(), "Replicated")) + if (startsWith(storage->getName(), "Replicated") && !settings.allow_nondeterministic_mutations) { for (const auto & command : commands) { diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference index b462a5a7baa..f799e8ed8f0 100644 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference @@ -2,3 +2,6 @@ OK OK OK OK +OK +OK +OK diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index ac66dbc352a..9b190855adf 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -11,6 +11,22 @@ T1=table_1017_merge ${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; + DROP TABLE IF EXISTS $T1; + + DROP TABLE IF EXISTS lookup_table; + DROP TABLE IF EXISTS table_for_dict; + DROP DICTIONARY IF EXISTS dict1; + + CREATE TABLE table_for_dict (y UInt64, y_new UInt32) ENGINE = Log; + INSERT INTO table_for_dict VALUES (3, 3003),(4,4004); + + CREATE DICTIONARY dict1( y UInt64 DEFAULT 0, y_new UInt32 DEFAULT 0 ) PRIMARY KEY y + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB '${CLICKHOUSE_DATABASE}')) + LIFETIME(MIN 1 MAX 10) + LAYOUT(FLAT()); + + CREATE TABLE lookup_table (y UInt32, y_new UInt32) ENGINE = Join(ANY, LEFT, y); + INSERT INTO lookup_table VALUES(1,1001),(2,1002); CREATE TABLE $R1 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r1') ORDER BY x; CREATE TABLE $R2 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r2') ORDER BY x; @@ -35,9 +51,21 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" 2>&1 > /d ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 > /dev/null \ && echo 'OK' || echo 'FAIL' +# hm... it looks like joinGet condidered determenistic +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = joinGet('${CLICKHOUSE_DATABASE}.lookup_table', 'y_new', y) WHERE x=1" 2>&1 \ +| echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ +&& echo 'OK' || echo 'FAIL' ${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $T1; + DROP TABLE IF EXISTS lookup_table; + DROP TABLE IF EXISTS table_for_dict; + DROP DICTIONARY IF EXISTS dict1; " From 2122eab14eda10dc3bb8f68192bad3da47ecf51e Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Fri, 10 Apr 2020 23:02:36 +0300 Subject: [PATCH 297/484] Update adopters.md --- docs/ru/introduction/adopters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md index 20c465f6418..5f8b825353c 100644 --- a/docs/ru/introduction/adopters.md +++ b/docs/ru/introduction/adopters.md @@ -3,7 +3,7 @@ machine_translated: true machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 --- -# Усыновители ClickHouse {#clickhouse-adopters} +# Пользователи ClickHouse {#clickhouse-adopters} !!! warning "Оговорка" Следующий список компаний, использующих ClickHouse, и их истории успеха собраны из открытых источников, поэтому они могут отличаться от текущей реальности. Мы были бы очень признательны, если бы вы поделились историей принятия ClickHouse в свою компанию и [добавьте его в список](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), но, пожалуйста, убедитесь, что у вас не будет никаких проблем с NDA, сделав это. Предоставление обновлений с публикациями от других компаний также полезно. From 330d13810666b1044b99f6e3916bc61ecade8c7d Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 10 Apr 2020 23:58:13 +0300 Subject: [PATCH 298/484] Update Settings.h --- src/Core/Settings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 31b8bd6ab02..bb2b0a20b81 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -406,6 +406,7 @@ struct Settings : public SettingsCollection M(SettingBool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \ M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ + M(SettingBool, allow_nondeterministic_mutations, false, "Allow non-deterministic functions in ALTER UPDATE/ALTER DELETE statements", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ @@ -418,7 +419,6 @@ struct Settings : public SettingsCollection M(SettingBool, merge_tree_uniform_read_distribution, true, "Obsolete setting, does nothing. Will be removed after 2020-05-20", 0) \ M(SettingUInt64, mark_cache_min_lifetime, 0, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \ M(SettingBool, partial_merge_join, false, "Obsolete. Use join_algorithm='prefer_partial_merge' instead.", 0) \ - M(SettingBool, allow_nondeterministic_mutations, false, "Allow non-deterministic functions in ALTER UPDATE/ALTER DELETE statements", 0) \ DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS) From 9208847a85a6db66ead9983a33d864f7164c2668 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 11 Apr 2020 00:07:08 +0300 Subject: [PATCH 299/484] Update submodule libc-headers to a new version. --- contrib/libc-headers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libc-headers b/contrib/libc-headers index 9676d2645a7..92c74f938cf 160000 --- a/contrib/libc-headers +++ b/contrib/libc-headers @@ -1 +1 @@ -Subproject commit 9676d2645a713e679dc981ffd84dee99fcd68b8e +Subproject commit 92c74f938cf2c4dd529cae4f3d2923d153b029a7 From c9542b66018bc2e0ca124572642af364f627ebcb Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev Date: Sat, 11 Apr 2020 01:08:43 +0300 Subject: [PATCH 300/484] Style fixes for communication between ClickHouse and Zookeeper over SSL --- src/Common/ZooKeeper/ZooKeeper.cpp | 3 +-- src/Common/ZooKeeper/ZooKeeperImpl.h | 3 ++- src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index f2442f3f5c5..032d1e90ff5 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -72,9 +72,8 @@ void ZooKeeper::init(const std::string & implementation_, const std::string & ho { bool secure = bool(startsWith(host_string, "secure://")); - if (secure) { + if (secure) host_string.erase(0, strlen("secure://")); - } nodes.emplace_back(Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{host_string}, secure}); } diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h index 069df723d43..840cbdbde3f 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -93,7 +93,8 @@ struct ZooKeeperRequest; class ZooKeeper : public IKeeper { public: - struct Node { + struct Node + { Poco::Net::SocketAddress address; bool secure; }; diff --git a/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp index 0bca8e0f561..d9d3402fa32 100644 --- a/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp +++ b/src/Common/ZooKeeper/tests/zkutil_test_commands_new_lib.cpp @@ -29,12 +29,12 @@ try splitInto<','>(hosts_strings, hosts_arg); ZooKeeper::Nodes nodes; nodes.reserve(hosts_strings.size()); - for (auto & host_string : hosts_strings) { + for (auto & host_string : hosts_strings) + { bool secure = bool(startsWith(host_string, "secure://")); - if (secure) { + if (secure) host_string.erase(0, strlen("secure://")); - } nodes.emplace_back(ZooKeeper::Node{Poco::Net::SocketAddress{host_string},secure}); } From c3a71616d9a55b6745fbd4872ef0990a2816d5f9 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Sat, 11 Apr 2020 01:29:15 +0300 Subject: [PATCH 301/484] simplified backport script --- utils/simple-backport/.gitignore | 1 + utils/simple-backport/README.md | 73 ++++++++++++++++++++++++ utils/simple-backport/backport.sh | 95 +++++++++++++++++++++++++++++++ 3 files changed, 169 insertions(+) create mode 100644 utils/simple-backport/.gitignore create mode 100644 utils/simple-backport/README.md create mode 100755 utils/simple-backport/backport.sh diff --git a/utils/simple-backport/.gitignore b/utils/simple-backport/.gitignore new file mode 100644 index 00000000000..72e8ffc0db8 --- /dev/null +++ b/utils/simple-backport/.gitignore @@ -0,0 +1 @@ +* diff --git a/utils/simple-backport/README.md b/utils/simple-backport/README.md new file mode 100644 index 00000000000..89a0c6d6f22 --- /dev/null +++ b/utils/simple-backport/README.md @@ -0,0 +1,73 @@ +# Упрощённый скрипт для бекпортирования + +Это упрощённый скрипт для бекпортирования. Он определяет, какие пулреквесты ещё не бекпортировали из мастера в указанную ветку. Запускать скрипт нужно из папки, где он лежит, указав ему название ветки. Он предполагает, что ваш апстримный remote называется origin. +``` +cd my-clickhouse-repo/simple-backport +git fetch origin +time GITHUB_TOKEN= ./backport.sh 20.1 +``` + +Скрипт выведет примитивный отчёт: +``` +$ time GITHUB_TOKEN= ~/backport.sh 20.3 +144 PRs differ between 20.3 and master. +backport https://github.com/ClickHouse/ClickHouse/pull/10135 +backport https://github.com/ClickHouse/ClickHouse/pull/10121 +... +backport https://github.com/ClickHouse/ClickHouse/pull/9808 +backport https://github.com/ClickHouse/ClickHouse/pull/9410 + +real 0m1.213s +user 0m1.065s +sys 0m0.311s +``` + +Также в рабочей папке сгенерируется отчёт `<ваша-ветка>-report.tsv`: + +``` +$ cat 20.3-report.tsv +skip 10153 https://github.com/ClickHouse/ClickHouse/pull/10153 pr10153.json +skip 10147 https://github.com/ClickHouse/ClickHouse/pull/10147 pr10147.json +no-backport 10138 https://github.com/ClickHouse/ClickHouse/pull/10138 pr10138.json +backport 10135 https://github.com/ClickHouse/ClickHouse/pull/10135 pr10135.json +skip 10134 https://github.com/ClickHouse/ClickHouse/pull/10134 pr10134.json +... +``` + +Можно кликать по ссылкам прям из консоли, а можно ещё проще: + +``` +$ cat <ветка>-report.tsv | grep ^backport | cut -f3 +$ cat <ветка>-report.tsv | grep ^backport | cut -f3 | xargs -n1 xdg-open +``` + +Такая команда откроет в браузере все пулреквесты, которые надо бекпортировать. Есть и другие статусы, посмотрите какие: + +``` +$ cat 20.1-report.tsv | cut -f1 | sort | uniq -c | sort -rn + 446 skip + 38 done + 25 conflict + 18 backport + 10 no-backport +``` + + +### Как разметить пулреквест? +По умолчанию бекпортируются все пулреквесты, у которых в описании указана категория чейнжлога Bug fix. Если этого недостаточно, используйте теги: +* v20.1-backported -- этот пулреквест уже бекпортирован в ветку 20.1. На случай, если автоматически не определилось. +* v20.1-no-backport -- в ветку 20.1 бекпортировать не нужно. +* pr-no-backport -- ни в какие ветки бекпортировать не нужно. +* v20.1-conflicts -- при бекпорте в 20.1 произошёл конфликт. Такие пулреквесты скрипт пропускает, к ним можно потом вернуться. +* pr-should-backport -- нужно бекпортировать в поддерживаемые ветки. +* v20.1-must-backport -- нужно бекпортировать в 20.1. + + +### Я поправил пулреквест, почему скрипт не видит? +В процессе работы скрипт кеширует данные о пулреквестах в текущей папке, чтобы экономить квоту гитхаба. Удалите закешированные файлы, например, для всех реквестов, которые не помечены как пропущенные: +``` +$ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 +$ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 | xargs rm +``` + + diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh new file mode 100755 index 00000000000..4a39f9d97c3 --- /dev/null +++ b/utils/simple-backport/backport.sh @@ -0,0 +1,95 @@ +#!/bin/bash +set -e + +branch="$1" +merge_base=$(git merge-base origin/master "origin/$branch") + +# Make lists of PRs that were merged into each branch. Use first parent here, or else +# we'll get weird things like seeing older master that was merged into a PR branch +# that was then merged into master. +git log "$merge_base..origin/master" --first-parent --oneline > master-log.txt +sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p" master-log.txt | sort -rn > master-prs.txt + +git log "$merge_base..origin/$branch" --first-parent --oneline > "$branch-log.txt" +sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p" "$branch-log.txt" | sort -rn > "$branch-prs.txt" + +# Find all master PRs that are not in branch by calculating differences of two PR lists. +grep -f "$branch-prs.txt" -F -x -v master-prs.txt > "$branch-diff-prs.txt" + +rm "$branch-report.tsv" ||: + +echo "$(wc -l < "$branch-diff-prs".txt) PRs differ between $branch and master." + +for pr in $(cat "$branch-diff-prs.txt") +do + # Download PR info from github. + file="pr$pr.json" + if ! [ -f "$file" ] + then + if ! curl -H "Authorization: token $GITHUB_TOKEN" \ + -sSf "https://api.github.com/repos/ClickHouse/ClickHouse/pulls/$pr" \ + > "$file" + then + >&2 cat "$file" + rm "$file" + break + fi + sleep 0.5 + fi + + if ! [ "$pr" == "$(jq -r .number "$file")" ] + then + >&2 echo "File $file is broken (no PR number)." + continue + fi + + action="skip" + + # First, check the changelog category. We port all bugfixes. + if jq -r .body "$file" | grep -i "^- bug[ -]*fix" > /dev/null + then + action="backport" + fi + + # Next, check the tag. They might override the decision. + matched_labels=() + for label in $(jq -r .labels[].name "$file") + do + label_action="" + case "$label" in + pr-must-backport | "v$branch-must-backport") + label_action="backport" + ;; + pr-no-backport | "v$branch-no-backport") + label_action="no-backport" + ;; + "v$branch-conflicts") + label_action="conflict" + ;; + "v$branch" | "v$branch-backported") + label_action="done" + ;; + esac + if [ "$label_action" != "" ] + then + action="$label_action" + matched_labels+=("$label") + fi + done + + # Show an error if there are conflicting labels. + if [ ${#matched_labels[@]} -gt 1 ] + then + >&2 echo "PR #$pr has conflicting labels: ${matched_labels[*]}" + continue + fi + + url="https://github.com/ClickHouse/ClickHouse/pull/$pr" + printf "%s\t%s\t%s\t%s\n" "$action" "$pr" "$url" "$file" >> "$branch-report.tsv" + if [ "$action" == "backport" ] + then + printf "%s\t%s\n" "$action" "$url" + fi +done + +wait From c097e1f9e5d766989666ffec371ff7dd0b75ca6a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Sat, 11 Apr 2020 01:38:40 +0300 Subject: [PATCH 302/484] Update README.md --- utils/simple-backport/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/simple-backport/README.md b/utils/simple-backport/README.md index 89a0c6d6f22..89a9ce36934 100644 --- a/utils/simple-backport/README.md +++ b/utils/simple-backport/README.md @@ -2,7 +2,7 @@ Это упрощённый скрипт для бекпортирования. Он определяет, какие пулреквесты ещё не бекпортировали из мастера в указанную ветку. Запускать скрипт нужно из папки, где он лежит, указав ему название ветки. Он предполагает, что ваш апстримный remote называется origin. ``` -cd my-clickhouse-repo/simple-backport +cd my-clickhouse-repo/utils/simple-backport git fetch origin time GITHUB_TOKEN= ./backport.sh 20.1 ``` From 32f0789eaa98dd4c03554c054bdba4bcf19a1340 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Sat, 11 Apr 2020 01:41:47 +0300 Subject: [PATCH 303/484] Update README.md --- utils/simple-backport/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/simple-backport/README.md b/utils/simple-backport/README.md index 89a9ce36934..13378f93989 100644 --- a/utils/simple-backport/README.md +++ b/utils/simple-backport/README.md @@ -59,7 +59,7 @@ $ cat 20.1-report.tsv | cut -f1 | sort | uniq -c | sort -rn * v20.1-no-backport -- в ветку 20.1 бекпортировать не нужно. * pr-no-backport -- ни в какие ветки бекпортировать не нужно. * v20.1-conflicts -- при бекпорте в 20.1 произошёл конфликт. Такие пулреквесты скрипт пропускает, к ним можно потом вернуться. -* pr-should-backport -- нужно бекпортировать в поддерживаемые ветки. +* pr-must-backport -- нужно бекпортировать в поддерживаемые ветки. * v20.1-must-backport -- нужно бекпортировать в 20.1. From 7c57876ea4edbec8e6da7b0c4e207a807de468db Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Sat, 11 Apr 2020 03:00:33 +0300 Subject: [PATCH 304/484] simplified backport script --- utils/simple-backport/backport.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index 4a39f9d97c3..a0143108383 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -8,10 +8,15 @@ merge_base=$(git merge-base origin/master "origin/$branch") # we'll get weird things like seeing older master that was merged into a PR branch # that was then merged into master. git log "$merge_base..origin/master" --first-parent --oneline > master-log.txt -sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p" master-log.txt | sort -rn > master-prs.txt - git log "$merge_base..origin/$branch" --first-parent --oneline > "$branch-log.txt" -sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p" "$branch-log.txt" | sort -rn > "$branch-prs.txt" + +# Search for PR numbers in commit messages. First variant is normal merge, and second +# variant is squashed. +find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; + s/^.*(#\([[:digit:]]\+\))$/\1/p") + +"${find_prs[@]}" master-log.txt | sort -rn > master-prs.txt +"${find_prs[@]}" "$branch-log.txt" | sort -rn > "$branch-prs.txt" # Find all master PRs that are not in branch by calculating differences of two PR lists. grep -f "$branch-prs.txt" -F -x -v master-prs.txt > "$branch-diff-prs.txt" @@ -39,7 +44,7 @@ do if ! [ "$pr" == "$(jq -r .number "$file")" ] then - >&2 echo "File $file is broken (no PR number)." + >&2 echo "Got wrong data for PR #$pr (please check and remove '$file')." continue fi @@ -92,4 +97,3 @@ do fi done -wait From 68fa04054aec7ef6314b6850f95f4adcb1fe823e Mon Sep 17 00:00:00 2001 From: Eugene Klimov Date: Sat, 11 Apr 2020 08:27:24 +0300 Subject: [PATCH 305/484] add in server settings and monitoring section (#10015) * add description for in server settings and monitoring section Signed-off-by: Slach * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * Update docs/en/operations/server_settings/settings.md Co-Authored-By: BayoNet * sync russian description with english Signed-off-by: Slach * Update docs/ru/operations/server_settings/settings.md * sync russian description with english Signed-off-by: Slach Co-authored-by: BayoNet Co-authored-by: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> --- docs/en/operations/monitoring.md | 2 ++ .../settings.md | 24 +++++++++++++++++++ docs/ru/operations/monitoring.md | 2 ++ .../settings.md | 24 +++++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index 363e9cc4bff..dee1255569b 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -37,6 +37,8 @@ You can find metrics in the [system.metrics](../operations/system_tables.md#syst You can configure ClickHouse to export metrics to [Graphite](https://github.com/graphite-project). See the [Graphite section](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) in the ClickHouse server configuration file. Before configuring export of metrics, you should set up Graphite by following their official [guide](https://graphite.readthedocs.io/en/latest/install.html). +You can configure ClickHouse to export metrics to [Prometheus](https://prometheus.io). See the [Prometheus section](server_configuration_parameters/settings.md#server_configuration_parameters-prometheus) in the ClickHouse server configuration file. Before configuring export of metrics, you should set up Prometheus by following their official [guide](https://prometheus.io/docs/prometheus/latest/installation/). + Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`. To monitor servers in a cluster configuration, you should set the [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap. diff --git a/docs/en/operations/server_configuration_parameters/settings.md b/docs/en/operations/server_configuration_parameters/settings.md index 85744a039f4..02c00fababf 100644 --- a/docs/en/operations/server_configuration_parameters/settings.md +++ b/docs/en/operations/server_configuration_parameters/settings.md @@ -536,6 +536,30 @@ The path to the directory containing data. /var/lib/clickhouse/ ``` +## prometheus {#server_configuration_parameters-prometheus} + +Exposing metrics data for scraping from [Prometheus](https://prometheus.io). + +Settings: + +- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from '/'. +- `port` – Port for `endpoint`. +- `metrics` – Flag that sets to expose metrics from the [system.metrics](../system_tables.md#system_tables-metrics) table. +- `events` – Flag that sets to expose metrics from the [system.events](../system_tables.md#system_tables-events) table. +- `asynchronous_metrics` – Flag that sets to expose current metrics values from the [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics) table. + +**Example** + +``` xml + + /metrics + 8001 + true + true + true + +``` + ## query\_log {#server_configuration_parameters-query-log} Setting for logging queries received with the [log\_queries=1](../settings/settings.md) setting. diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index 469d712376b..2629a4da931 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -32,6 +32,8 @@ ClickHouse собирает: Можно настроить экспорт метрик из ClickHouse в [Graphite](https://github.com/graphite-project). Смотрите секцию [graphite](server_configuration_parameters/settings.md#server_configuration_parameters-graphite) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Graphite, как указано в [официальном руководстве](https://graphite.readthedocs.io/en/latest/install.html). +Можно настроить экспорт метрик из ClickHouse в [Prometheus](https://prometheus.io). Смотрите [prometheus](server_configuration_parameters/settings.md#server_configuration_parameters-prometheus) конфигурационного файла ClickHouse. Перед настройкой экспорта метрик необходимо настроить Prometheus, как указано в [официальном руководстве](https://prometheus.io/docs/prometheus/latest/installation/). + Также, можно отслеживать доступность сервера через HTTP API. Отправьте `HTTP GET` к ресурсу `/ping`. Если сервер доступен, он отвечает `200 OK`. Для мониторинга серверов в кластерной конфигурации необходимо установить параметр [max\_replica\_delay\_for\_distributed\_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) и использовать HTTP ресурс `/replicas_status`. Если реплика доступна и не отстаёт от других реплик, то запрос к `/replicas_status` возвращает `200 OK`. Если реплика отстаёт, то запрос возвращает `503 HTTP_SERVICE_UNAVAILABLE`, включая информацию о размере отставания. diff --git a/docs/ru/operations/server_configuration_parameters/settings.md b/docs/ru/operations/server_configuration_parameters/settings.md index 16f00a82016..618bb9764ad 100644 --- a/docs/ru/operations/server_configuration_parameters/settings.md +++ b/docs/ru/operations/server_configuration_parameters/settings.md @@ -524,6 +524,30 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat /var/lib/clickhouse/ ``` +## prometheus {#server_configuration_parameters-prometheus} + +Опубликовать данные о метриках, для сбора с помощью системы мониторинга [Prometheus](https://prometheus.io). + +Настройки: + +- `endpoint` – путь по которому будет осуществляться экспорт данных метрик по HTTP протоколу для сбора с помощью prometheus. Должен начинаться с '/'. +- `port` – порт по которому будет доступен endpoint для сбора метрик. +- `metrics` – флаг для экспорта текущих значений метрик из таблицы [system.metrics](../system_tables.md#system_tables-metrics). +- `events` – флаг для экспорта текущих значений метрик из таблицы [system.events](../system_tables.md#system_tables-events). +- `asynchronous_metrics` – флаг для экспорта текущих значений значения метрик из таблицы [system.asynchronous\_metrics](../system_tables.md#system_tables-asynchronous_metrics). + +**Пример** + +``` xml + + /metrics + 8001 + true + true + true + +``` + ## query\_log {#server_configuration_parameters-query-log} Настройка логирования запросов, принятых с настройкой [log\_queries=1](../settings/settings.md). From e0c972448ea92bf77d41ac0b53e139185115e1ec Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 11 Apr 2020 13:25:53 +0300 Subject: [PATCH 306/484] Cover SHOW CREATE TABLE from database with Dictionary ENGINE --- ...how_create_table_from_dictionary.reference | 6 ++++++ ...1225_show_create_table_from_dictionary.sql | 21 +++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference create mode 100644 tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference new file mode 100644 index 00000000000..14ddc093143 --- /dev/null +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference @@ -0,0 +1,6 @@ +CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` +( + `key` UInt64, + `val` UInt64 +) +ENGINE = Dictionary(`dict_db_01225.dict`) diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql new file mode 100644 index 00000000000..7550d5292d0 --- /dev/null +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql @@ -0,0 +1,21 @@ +DROP DATABASE IF EXISTS dict_db_01225; +DROP DATABASE IF EXISTS dict_db_01225_dictionary; +CREATE DATABASE dict_db_01225; +CREATE DATABASE dict_db_01225_dictionary Engine=Dictionary; + +CREATE TABLE dict_db_01225.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01225.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01225')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` FORMAT TSVRaw; +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError 36; } + +DROP DATABASE dict_db_01225; +DROP DATABASE dict_db_01225_dictionary; From 55a143d1a559a4cdbf915da15972500b1f28a7eb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 10 Apr 2020 01:32:59 +0300 Subject: [PATCH 307/484] Avoid superfluous dictionaries load (system.tables, SHOW CREATE TABLE) This patch avoids loading dictionaries for: - SELECT * FROM system.tables (used by clickhouse-client for completion) - SHOW CREATE TABLE some_dict But the dictionary will still be loaded on: - SHOW CREATE TABLE some_dict (from the database with Dictionary engine) --- src/Databases/DatabaseDictionary.cpp | 4 +-- src/Databases/DatabaseWithDictionaries.cpp | 36 ++++++++++++++++--- src/Databases/DatabaseWithDictionaries.h | 6 +++- src/Interpreters/ExternalDictionariesLoader.h | 7 ++-- ...01224_no_superfluous_dict_reload.reference | 19 ++++++++++ .../01224_no_superfluous_dict_reload.sql | 32 +++++++++++++++++ 6 files changed, 94 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference create mode 100644 tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 006eb1656a2..9e7788bf846 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -64,7 +64,7 @@ StoragePtr DatabaseDictionary::tryGetTable( const Context & context, const String & table_name) const { - auto dict_ptr = context.getExternalDictionariesLoader().tryGetDictionary(table_name); + auto dict_ptr = context.getExternalDictionariesLoader().tryGetDictionary(table_name, true /*load*/); if (dict_ptr) { const DictionaryStructure & dictionary_structure = dict_ptr->getStructure(); @@ -94,7 +94,7 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const Context & context, const auto & dictionaries = context.getExternalDictionariesLoader(); auto dictionary = throw_on_error ? dictionaries.getDictionary(table_name) - : dictionaries.tryGetDictionary(table_name); + : dictionaries.tryGetDictionary(table_name, true /*load*/); if (!dictionary) return {}; diff --git a/src/Databases/DatabaseWithDictionaries.cpp b/src/Databases/DatabaseWithDictionaries.cpp index e849962aae3..6673fdf8075 100644 --- a/src/Databases/DatabaseWithDictionaries.cpp +++ b/src/Databases/DatabaseWithDictionaries.cpp @@ -26,6 +26,8 @@ namespace ErrorCodes extern const int TABLE_ALREADY_EXISTS; extern const int UNKNOWN_TABLE; extern const int DICTIONARY_ALREADY_EXISTS; + extern const int FILE_DOESNT_EXIST; + extern const int CANNOT_GET_CREATE_TABLE_QUERY; } @@ -165,7 +167,7 @@ void DatabaseWithDictionaries::removeDictionary(const Context & context, const S } } -StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const String & table_name) const +StoragePtr DatabaseWithDictionaries::tryGetTableImpl(const Context & context, const String & table_name, bool load) const { if (auto table_ptr = DatabaseWithOwnTablesBase::tryGetTable(context, table_name)) return table_ptr; @@ -173,10 +175,34 @@ StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const if (isDictionaryExist(context, table_name)) /// We don't need lock database here, because database doesn't store dictionary itself /// just metadata - return getDictionaryStorage(context, table_name); + return getDictionaryStorage(context, table_name, load); return {}; } +StoragePtr DatabaseWithDictionaries::tryGetTable(const Context & context, const String & table_name) const +{ + return tryGetTableImpl(context, table_name, true /*load*/); +} + +ASTPtr DatabaseWithDictionaries::getCreateTableQueryImpl(const Context & context, const String & table_name, bool throw_on_error) const +{ + ASTPtr ast; + bool has_table = tryGetTableImpl(context, table_name, false /*load*/) != nullptr; + auto table_metadata_path = getObjectMetadataPath(table_name); + try + { + ast = getCreateQueryFromMetadata(context, table_metadata_path, throw_on_error); + } + catch (const Exception & e) + { + if (!has_table && e.code() == ErrorCodes::FILE_DOESNT_EXIST && throw_on_error) + throw Exception{"Table " + backQuote(table_name) + " doesn't exist", + ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY}; + else if (throw_on_error) + throw; + } + return ast; +} DatabaseTablesIteratorPtr DatabaseWithDictionaries::getTablesWithDictionaryTablesIterator( const Context & context, const FilterByNameFunction & filter_by_dictionary_name) @@ -195,7 +221,7 @@ DatabaseTablesIteratorPtr DatabaseWithDictionaries::getTablesWithDictionaryTable while (dictionaries_it && dictionaries_it->isValid()) { auto table_name = dictionaries_it->name(); - auto table_ptr = getDictionaryStorage(context, table_name); + auto table_ptr = getDictionaryStorage(context, table_name, false /*load*/); if (table_ptr) result.emplace(table_name, table_ptr); dictionaries_it->next(); @@ -223,11 +249,11 @@ bool DatabaseWithDictionaries::isDictionaryExist(const Context & /*context*/, co return dictionaries.find(dictionary_name) != dictionaries.end(); } -StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & context, const String & table_name) const +StoragePtr DatabaseWithDictionaries::getDictionaryStorage(const Context & context, const String & table_name, bool load) const { auto dict_name = database_name + "." + table_name; const auto & external_loader = context.getExternalDictionariesLoader(); - auto dict_ptr = external_loader.tryGetDictionary(dict_name); + auto dict_ptr = external_loader.tryGetDictionary(dict_name, load); if (dict_ptr) { const DictionaryStructure & dictionary_structure = dict_ptr->getStructure(); diff --git a/src/Databases/DatabaseWithDictionaries.h b/src/Databases/DatabaseWithDictionaries.h index e47ab6206c5..50e4dca671f 100644 --- a/src/Databases/DatabaseWithDictionaries.h +++ b/src/Databases/DatabaseWithDictionaries.h @@ -20,6 +20,8 @@ public: StoragePtr tryGetTable(const Context & context, const String & table_name) const override; + ASTPtr getCreateTableQueryImpl(const Context & context, const String & table_name, bool throw_on_error) const override; + DatabaseTablesIteratorPtr getTablesWithDictionaryTablesIterator(const Context & context, const FilterByNameFunction & filter_by_dictionary_name) override; DatabaseDictionariesIteratorPtr getDictionariesIterator(const Context & context, const FilterByNameFunction & filter_by_dictionary_name) override; @@ -37,7 +39,7 @@ protected: void attachToExternalDictionariesLoader(Context & context); void detachFromExternalDictionariesLoader(); - StoragePtr getDictionaryStorage(const Context & context, const String & table_name) const; + StoragePtr getDictionaryStorage(const Context & context, const String & table_name, bool load) const; ASTPtr getCreateDictionaryQueryImpl(const Context & context, const String & dictionary_name, @@ -45,6 +47,8 @@ protected: private: ext::scope_guard database_as_config_repo_for_external_loader; + + StoragePtr tryGetTableImpl(const Context & context, const String & table_name, bool load) const; }; } diff --git a/src/Interpreters/ExternalDictionariesLoader.h b/src/Interpreters/ExternalDictionariesLoader.h index 68913ffa166..4a54a9963e7 100644 --- a/src/Interpreters/ExternalDictionariesLoader.h +++ b/src/Interpreters/ExternalDictionariesLoader.h @@ -23,9 +23,12 @@ public: return std::static_pointer_cast(load(name)); } - DictPtr tryGetDictionary(const std::string & name) const + DictPtr tryGetDictionary(const std::string & name, bool load) const { - return std::static_pointer_cast(tryLoad(name)); + if (load) + return std::static_pointer_cast(tryLoad(name)); + else + return std::static_pointer_cast(getCurrentLoadResult(name).object); } static void resetAll(); diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference new file mode 100644 index 00000000000..5321624de02 --- /dev/null +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference @@ -0,0 +1,19 @@ +NOT_LOADED +NOT_LOADED +CREATE DICTIONARY dict_db_01224.dict +( + `key` UInt64 DEFAULT 0, + `val` UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01224')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()) +NOT_LOADED +CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` +( + `key` UInt64, + `val` UInt64 +) +ENGINE = Dictionary(`dict_db_01224.dict`) +LOADED diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql new file mode 100644 index 00000000000..a6eed6f072c --- /dev/null +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql @@ -0,0 +1,32 @@ +DROP DATABASE IF EXISTS dict_db_01224; +DROP DATABASE IF EXISTS dict_db_01224_dictionary; +CREATE DATABASE dict_db_01224; + +CREATE TABLE dict_db_01224.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01224.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01224')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SELECT * FROM system.tables FORMAT Null; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SHOW CREATE TABLE dict_db_01224.dict FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +CREATE DATABASE dict_db_01224_dictionary Engine=Dictionary; +SHOW CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DICTIONARY dict_db_01224.dict; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DATABASE dict_db_01224; +DROP DATABASE dict_db_01224_dictionary; From 67235834b3cb40ccc457f659ba7c23a6891c8765 Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Sat, 11 Apr 2020 14:14:01 +0300 Subject: [PATCH 308/484] Update libdivide to v3.0 (#10169) --- contrib/libdivide/libdivide.h | 2807 +++++++++++++--------- src/Functions/intDiv.cpp | 7 +- src/Functions/modulo.cpp | 4 +- src/Interpreters/createBlockSelector.cpp | 4 +- 4 files changed, 1732 insertions(+), 1090 deletions(-) diff --git a/contrib/libdivide/libdivide.h b/contrib/libdivide/libdivide.h index eaeaec7db6b..a153e7f9c5e 100644 --- a/contrib/libdivide/libdivide.h +++ b/contrib/libdivide/libdivide.h @@ -1,117 +1,106 @@ -/* libdivide.h - Copyright 2010 ridiculous_fish -*/ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. -#if defined(_WIN32) || defined(WIN32) -#define LIBDIVIDE_WINDOWS 1 -#endif +#ifndef LIBDIVIDE_H +#define LIBDIVIDE_H -#if defined(_MSC_VER) -#define LIBDIVIDE_VC 1 -#endif +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 -#ifdef __cplusplus -#include -#include -#include -#else -#include -#include -#include -#endif - -#if ! LIBDIVIDE_HAS_STDINT_TYPES && (! LIBDIVIDE_VC || _MSC_VER >= 1600) -/* Only Visual C++ 2010 and later include stdint.h */ #include -#define LIBDIVIDE_HAS_STDINT_TYPES 1 + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include #endif -#if ! LIBDIVIDE_HAS_STDINT_TYPES -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; -#endif - -#if LIBDIVIDE_USE_SSE2 +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) #include #endif -#if LIBDIVIDE_VC +#if defined(_MSC_VER) #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 // Compatibility with non-clang compilers. +#if !defined(__has_builtin) + #define __has_builtin(x) 0 #endif -#ifdef __ICC -#define HAS_INT128_T 0 -#else -#define HAS_INT128_T __LP64__ +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif #endif -#if defined(__x86_64__) || defined(_WIN64) || defined(_M_64) -#define LIBDIVIDE_IS_X86_64 1 +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 #endif #if defined(__i386__) -#define LIBDIVIDE_IS_i386 1 + #define LIBDIVIDE_i386 #endif -#if __GNUC__ || __clang__ -#define LIBDIVIDE_GCC_STYLE_ASM 1 +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM #endif +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif -/* libdivide may use the pmuldq (vector signed 32x32->64 mult instruction) which is in SSE 4.1. However, signed multiplication can be emulated efficiently with unsigned multiplication, and SSE 4.1 is currently rare, so it is OK to not turn this on */ -#ifdef LIBDIVIDE_USE_SSE4_1 -#include +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + exit(-1); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + exit(-1); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) #endif #ifdef __cplusplus -/* We place libdivide within the libdivide namespace, and that goes in an anonymous namespace so that the functions are only visible to files that #include this header and don't get external linkage. At least that's the theory. */ -namespace { namespace libdivide { #endif -/* Explanation of "more" field: bit 6 is whether to use shift path. If we are using the shift path, bit 7 is whether the divisor is negative in the signed case; in the unsigned case it is 0. Bits 0-4 is shift value (for shift path or mult path). In 32 bit case, bit 5 is always 0. We use bit 7 as the "negative divisor indicator" so that we can use sign extension to efficiently go to a full-width -1. - - -u32: [0-4] shift value - [5] ignored - [6] add indicator - [7] shift path - -s32: [0-4] shift value - [5] shift path - [6] add indicator - [7] indicates negative divisor - -u64: [0-5] shift value - [6] add indicator - [7] shift path - -s64: [0-5] shift value - [6] add indicator - [7] indicates negative divisor - magic number of 0 indicates shift path (we ran out of bits!) -*/ - -enum { - LIBDIVIDE_32_SHIFT_MASK = 0x1F, - LIBDIVIDE_64_SHIFT_MASK = 0x3F, - LIBDIVIDE_ADD_MARKER = 0x40, - LIBDIVIDE_U32_SHIFT_PATH = 0x80, - LIBDIVIDE_U64_SHIFT_PATH = 0x80, - LIBDIVIDE_S32_SHIFT_PATH = 0x20, - LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 -}; - +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) struct libdivide_u32_t { uint32_t magic; @@ -133,497 +122,446 @@ struct libdivide_s64_t { uint8_t more; }; +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; -#ifndef LIBDIVIDE_API - #ifdef __cplusplus - /* In C++, we don't want our public functions to be static, because they are arguments to templates and static functions can't do that. They get internal linkage through virtue of the anonymous namespace. In C, they should be static. */ - #define LIBDIVIDE_API - #else - #define LIBDIVIDE_API static - #endif -#endif +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; -#ifdef __APPLE__ -typedef signed long Int64; -typedef unsigned long UInt64; -#endif +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; -LIBDIVIDE_API struct libdivide_s32_t libdivide_s32_gen(int32_t y); -LIBDIVIDE_API struct libdivide_u32_t libdivide_u32_gen(uint32_t y); -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(int64_t y); -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(uint64_t y); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API struct libdivide_s64_t libdivide_s64_gen(Int64 y) { return libdivide_s64_gen(int64_t(y)); }; -LIBDIVIDE_API struct libdivide_u64_t libdivide_u64_gen(UInt64 y) { return libdivide_u64_gen(uint64_t(y)); }; -#pragma GCC diagnostic pop -#endif +#pragma pack(pop) -LIBDIVIDE_API int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do(uint64_t y, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do(int64_t(numer), denom)); }; -LIBDIVIDE_API UInt64 libdivide_u64_do(UInt64 y, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do(uint64_t(y), denom)); }; -#pragma GCC diagnostic pop -#endif +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. -LIBDIVIDE_API int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom); -LIBDIVIDE_API uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom); +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; -LIBDIVIDE_API int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom); -LIBDIVIDE_API uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API UInt64 libdivide_u64_do_alg0(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg0(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg1(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg1(uint64_t(numer), denom)); } -LIBDIVIDE_API UInt64 libdivide_u64_do_alg2(UInt64 numer, const struct libdivide_u64_t *denom) { return UInt64(libdivide_u64_do_alg2(uint64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom); -LIBDIVIDE_API int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom); +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); -LIBDIVIDE_API int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom); -LIBDIVIDE_API int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom); -#if defined(__APPLE__) && defined(__cplusplus) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -LIBDIVIDE_API Int64 libdivide_s64_do_alg0(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg0(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg1(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg1(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg2(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg2(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg3(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg3(int64_t(numer), denom)); } -LIBDIVIDE_API Int64 libdivide_s64_do_alg4(Int64 numer, const struct libdivide_s64_t *denom) { return Int64(libdivide_s64_do_alg4(int64_t(numer), denom)); } -#pragma GCC diagnostic pop -#endif +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); -#if LIBDIVIDE_USE_SSE2 -LIBDIVIDE_API __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom); - -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t * denom); -LIBDIVIDE_API __m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t * denom); - -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t * denom); -LIBDIVIDE_API __m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t * denom); - -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t * denom); -LIBDIVIDE_API __m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t * denom); - -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t * denom); -LIBDIVIDE_API __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t * denom); -#endif - +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); //////// Internal Utility Functions -static inline uint32_t libdivide__mullhi_u32(uint32_t x, uint32_t y) { +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { uint64_t xl = x, yl = y; uint64_t rl = xl * yl; return (uint32_t)(rl >> 32); } -static uint64_t libdivide__mullhi_u64(uint64_t x, uint64_t y) { -#if HAS_INT128_T +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) __uint128_t xl = x, yl = y; __uint128_t rl = xl * yl; return (uint64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), x1 = (uint32_t)(x >> 32); - const uint32_t y0 = (uint32_t)(y & mask), y1 = (uint32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const uint64_t x0y1 = x0 * (uint64_t)y1; - const uint64_t x1y0 = x1 * (uint64_t)y0; - const uint64_t x1y1 = x1 * (uint64_t)y1; - + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; uint64_t temp = x1y0 + x0y0_hi; - uint64_t temp_lo = temp & mask, temp_hi = temp >> 32; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); #endif } -static inline int64_t libdivide__mullhi_s64(int64_t x, int64_t y) { -#if HAS_INT128_T +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) __int128_t xl = x, yl = y; __int128_t rl = xl * yl; return (int64_t)(rl >> 64); #else - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const uint32_t mask = 0xFFFFFFFF; - const uint32_t x0 = (uint32_t)(x & mask), y0 = (uint32_t)(y & mask); - const int32_t x1 = (int32_t)(x >> 32), y1 = (int32_t)(y >> 32); - const uint32_t x0y0_hi = libdivide__mullhi_u32(x0, y0); - const int64_t t = x1*(int64_t)y0 + x0y0_hi; - const int64_t w1 = x0*(int64_t)y1 + (t & mask); - return x1*(int64_t)y1 + (t >> 32) + (w1 >> 32); + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); #endif } -#if LIBDIVIDE_USE_SSE2 - -static inline __m128i libdivide__u64_to_m128(uint64_t x) { -#if LIBDIVIDE_VC && ! _WIN64 - //64 bit windows doesn't seem to have an implementation of any of these load intrinsics, and 32 bit Visual C++ crashes - _declspec(align(16)) uint64_t temp[2] = {x, x}; - return _mm_load_si128((const __m128i*)temp); -#elif defined(__ICC) - uint64_t __attribute__((aligned(16))) temp[2] = {x,x}; - return _mm_load_si128((const __m128i*)temp); -#elif __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wc++11-narrowing" // narrowing from uint64_t (aka 'unsigned long') to 'long long' - // clang does not provide this intrinsic either - return (__m128i){x, x}; -#pragma clang diagnostic pop -#else - // everyone else gets it right - return _mm_set1_epi64x(x); -#endif -} - -static inline __m128i libdivide_get_FFFFFFFF00000000(void) { - //returns the same as _mm_set1_epi64(0xFFFFFFFF00000000ULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - return _mm_slli_epi64(result, 32); -} - -static inline __m128i libdivide_get_00000000FFFFFFFF(void) { - //returns the same as _mm_set1_epi64(0x00000000FFFFFFFFULL) without touching memory - __m128i result = _mm_set1_epi8(-1); //optimizes to pcmpeqd on OS X - result = _mm_srli_epi64(result, 32); - return result; -} - -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif -static inline __m128i libdivide_get_0000FFFF(void) { - //returns the same as _mm_set1_epi32(0x0000FFFFULL) without touching memory - __m128i result; //we don't care what its contents are - result = _mm_cmpeq_epi8(result, result); //all 1s - result = _mm_srli_epi32(result, 16); - return result; -} -#if __clang__ -#pragma clang diagnostic pop -#endif - -/// This is a bug in gcc-8, _MM_SHUFFLE was forgotten, though in trunk it is ok https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/xmmintrin.h#L61 -#if defined(__PPC__) -#ifndef _MM_SHUFFLE -#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z)) -#endif -#endif - -static inline __m128i libdivide_s64_signbits(__m128i v) { - //we want to compute v >> 63, that is, _mm_srai_epi64(v, 63). But there is no 64 bit shift right arithmetic instruction in SSE2. So we have to fake it by first duplicating the high 32 bit values, and then using a 32 bit shift. Another option would be to use _mm_srli_epi64(v, 63) and then subtract that from 0, but that approach appears to be substantially slower for unknown reasons - __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); - __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); - return signBits; -} - -/* Returns an __m128i whose low 32 bits are equal to amt and has zero elsewhere. */ -static inline __m128i libdivide_u32_to_m128i(uint32_t amt) { - return _mm_set_epi32(0, 0, 0, amt); -} - -static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { - //implementation of _mm_sra_epi64. Here we have two 64 bit values which are shifted right to logically become (64 - amt) values, and are then sign extended from a (64 - amt) bit number. - const int b = 64 - amt; - __m128i m = libdivide__u64_to_m128(1ULL << (b - 1)); - __m128i x = _mm_srl_epi64(v, libdivide_u32_to_m128i(amt)); - __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); //result = x^m - m - return result; -} - -/* Here, b is assumed to contain one 32 bit value repeated four times. If it did not, the function would not work. */ -static inline __m128i libdivide__mullhi_u32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - - -/* Here, y is assumed to contain one 64 bit value repeated twice. */ -static inline __m128i libdivide_mullhi_u64_flat_vector(__m128i x, __m128i y) { - //full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) - const __m128i mask = libdivide_get_00000000FFFFFFFF(); - const __m128i x0 = _mm_and_si128(x, mask), x1 = _mm_srli_epi64(x, 32); //x0 is low half of 2 64 bit values, x1 is high half in low slots - const __m128i y0 = _mm_and_si128(y, mask), y1 = _mm_srli_epi64(y, 32); - const __m128i x0y0_hi = _mm_srli_epi64(_mm_mul_epu32(x0, y0), 32); //x0 happens to have the low half of the two 64 bit values in 32 bit slots 0 and 2, so _mm_mul_epu32 computes their full product, and then we shift right by 32 to get just the high values - const __m128i x0y1 = _mm_mul_epu32(x0, y1); - const __m128i x1y0 = _mm_mul_epu32(x1, y0); - const __m128i x1y1 = _mm_mul_epu32(x1, y1); - - const __m128i temp = _mm_add_epi64(x1y0, x0y0_hi); - __m128i temp_lo = _mm_and_si128(temp, mask), temp_hi = _mm_srli_epi64(temp, 32); - temp_lo = _mm_srli_epi64(_mm_add_epi64(temp_lo, x0y1), 32); - temp_hi = _mm_add_epi64(x1y1, temp_hi); - - return _mm_add_epi64(temp_lo, temp_hi); -} - -/* y is one 64 bit value repeated twice */ -static inline __m128i libdivide_mullhi_s64_flat_vector(__m128i x, __m128i y) { - __m128i p = libdivide_mullhi_u64_flat_vector(x, y); - __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); - p = _mm_sub_epi64(p, t1); - __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); - p = _mm_sub_epi64(p, t2); - return p; -} - -#ifdef LIBDIVIDE_USE_SSE4_1 - -/* b is one 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epi32(a, b), 32); - __m128i a1X3X = _mm_srli_epi64(a, 32); - __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epi32(a1X3X, b), libdivide_get_FFFFFFFF00000000()); - return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); // = hi_product_0123 -} - -#else - -/* SSE2 does not have a signed multiplication instruction, but we can convert unsigned to signed pretty efficiently. Again, b is just a 32 bit value repeated four times. */ -static inline __m128i libdivide_mullhi_s32_flat_vector(__m128i a, __m128i b) { - __m128i p = libdivide__mullhi_u32_flat_vector(a, b); - __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); //t1 = (a >> 31) & y, arithmetic shift - __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); - p = _mm_sub_epi32(p, t1); - p = _mm_sub_epi32(p, t2); - return p; -} -#endif -#endif - -static inline int32_t libdivide__count_trailing_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_ctz) - /* Fast way to count trailing zeros */ - return __builtin_ctz(val); -#elif LIBDIVIDE_VC - unsigned long result; - if (_BitScanForward(&result, val)) { - return result; - } - return 0; -#else - /* Dorky way to count trailing zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - val = (val ^ (val - 1)) >> 1; // Set v's trailing 0s to 1s and zero rest - while (val) { - val >>= 1; - result++; - } - return result; -#endif -} - -static inline int32_t libdivide__count_trailing_zeros64(uint64_t val) { -#if __LP64__ && (__GNUC__ || __has_builtin(__builtin_ctzll)) - /* Fast way to count trailing zeros. Note that we disable this in 32 bit because gcc does something horrible - it calls through to a dynamically bound function. */ - return __builtin_ctzll(val); -#elif LIBDIVIDE_VC && _WIN64 - unsigned long result; - if (_BitScanForward64(&result, val)) { - return result; - } - return 0; -#else - /* Pretty good way to count trailing zeros. Note that this hangs for val = 0! */ - uint32_t lo = val & 0xFFFFFFFF; - if (lo != 0) return libdivide__count_trailing_zeros32(lo); - return 32 + libdivide__count_trailing_zeros32(val >> 32); -#endif -} - -static inline int32_t libdivide__count_leading_zeros32(uint32_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros return __builtin_clz(val); -#elif LIBDIVIDE_VC +#elif defined(LIBDIVIDE_VC) unsigned long result; if (_BitScanReverse(&result, val)) { return 31 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ int32_t result = 0; - while (! (val & (1U << 31))) { - val <<= 1; + uint32_t hi = 1U << 31; + for (; ~val & hi; hi >>= 1) { result++; } return result; #endif } -static inline int32_t libdivide__count_leading_zeros64(uint64_t val) { -#if __GNUC__ || __has_builtin(__builtin_clzll) - /* Fast way to count leading zeros */ +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros return __builtin_clzll(val); -#elif LIBDIVIDE_VC && _WIN64 +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) unsigned long result; if (_BitScanReverse64(&result, val)) { return 63 - result; } return 0; #else - /* Dorky way to count leading zeros. Note that this hangs for val = 0! */ - int32_t result = 0; - while (! (val & (1ULL << 63))) { - val <<= 1; - result++; - } - return result; + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); #endif } -//libdivide_64_div_32_to_32: divides a 64 bit uint {u1, u0} by a 32 bit uint {v}. The result must fit in 32 bits. Returns the quotient directly and the remainder in *r -#if (LIBDIVIDE_IS_i386 || LIBDIVIDE_IS_X86_64) && LIBDIVIDE_GCC_STYLE_ASM -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint32_t result; __asm__("divl %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; -} #else -static uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { - uint64_t n = (((uint64_t)u1) << 32) | u0; + uint64_t n = ((uint64_t)u1 << 32) | u0; uint32_t result = (uint32_t)(n / v); *r = (uint32_t)(n - result * (uint64_t)v); return result; -} #endif +} -#if LIBDIVIDE_IS_X86_64 && LIBDIVIDE_GCC_STYLE_ASM +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - //u0 -> rax - //u1 -> rdx - //divq +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) uint64_t result; __asm__("divq %[v]" : "=a"(result), "=d"(*r) : [v] "r"(v), "a"(u0), "d"(u1) ); return result; - -} +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; #else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm -/* Code taken from Hacker's Delight, http://www.hackersdelight.org/HDcode/divlu.c . License permits inclusion here per http://www.hackersdelight.org/permissions.htm - */ -static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { - const uint64_t b = (1ULL << 32); // Number base (16 bits). - uint64_t un1, un0, // Norm. dividend LSD's. - vn1, vn0, // Norm. divisor digits. - q1, q0, // Quotient digits. - un64, un21, un10,// Dividend digit pairs. - rhat; // A remainder. - int s; // Shift amount for norm. + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm - if (u1 >= v) { // If overflow, set rem. - if (r != NULL) // to an impossible value, - *r = (uint64_t)(-1); // and return the largest - return (uint64_t)(-1);} // possible quotient. + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } - /* count leading zeros */ - s = libdivide__count_leading_zeros64(v); // 0 <= s <= 63. + // count leading zeros + s = libdivide_count_leading_zeros64(v); if (s > 0) { - v = v << s; // Normalize divisor. - un64 = (u1 << s) | ((u0 >> (64 - s)) & (-s >> 31)); - un10 = u0 << s; // Shift dividend left. + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left } else { - // Avoid undefined behavior. - un64 = u1 | u0; + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; un10 = u0; } - vn1 = v >> 32; // Break divisor up into - vn0 = v & 0xFFFFFFFF; // two 32-bit digits. + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; - un1 = un10 >> 32; // Break right half of - un0 = un10 & 0xFFFFFFFF; // dividend into two digits. + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; - q1 = un64/vn1; // Compute the first - rhat = un64 - q1*vn1; // quotient digit, q1. -again1: - if (q1 >= b || q1*vn0 > b*rhat + un1) { + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { q1 = q1 - 1; rhat = rhat + vn1; - if (rhat < b) goto again1;} + if (rhat >= b) + break; + } - un21 = un64*b + un1 - q1*v; // Multiply and subtract. + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; - q0 = un21/vn1; // Compute the second - rhat = un21 - q0*vn1; // quotient digit, q0. -again2: - if (q0 >= b || q0*vn0 > b*rhat + un0) { + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { q0 = q0 - 1; rhat = rhat + vn1; - if (rhat < b) goto again2;} + if (rhat >= b) + break; + } - if (r != NULL) // If remainder is wanted, - *r = (un21*b + un0 - q0*v) >> s; // return it. - return q1*b + q0; + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif } -#endif -#if LIBDIVIDE_ASSERTIONS_ON -#define LIBDIVIDE_ASSERT(x) do { if (! (x)) { fprintf(stderr, "Assertion failure on line %ld: %s\n", (long)__LINE__, #x); exit(-1); } } while (0) +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; #else -#define LIBDIVIDE_ASSERT(x) -#endif + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; -#ifndef LIBDIVIDE_HEADER_ONLY + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} ////////// UINT32 -struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { - struct libdivide_u32_t result; - if ((d & (d - 1)) == 0) { - result.magic = 0; - result.more = libdivide__count_trailing_zeros32(d) | LIBDIVIDE_U32_SHIFT_PATH; +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(d); + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint8_t more; uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); @@ -631,570 +569,1358 @@ struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint32_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 33-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases. - + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. } return result; } +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_32_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint32_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_32_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u32_get_algorithm(const struct libdivide_u32_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint32_t libdivide_u32_do_alg0(uint32_t numer, const struct libdivide_u32_t *denom) { - return numer >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); -} - -uint32_t libdivide_u32_do_alg1(uint32_t numer, const struct libdivide_u32_t *denom) { - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); - return q >> denom->more; -} - -uint32_t libdivide_u32_do_alg2(uint32_t numer, const struct libdivide_u32_t *denom) { - // denom->add != 0 - uint32_t q = libdivide__mullhi_u32(denom->magic, numer); +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); uint32_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_32_SHIFT_MASK); + return t >> denom->more; } - - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U32_SHIFT_PATH) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); - } - else { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; - } - else { - //q >> denom->shift - return _mm_srl_epi32(q, libdivide_u32_to_m128i(more)); - } + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; } } -__m128i libdivide_u32_do_vector_alg0(__m128i numers, const struct libdivide_u32_t *denom) { - return _mm_srl_epi32(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); -} +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; -__m128i libdivide_u32_do_vector_alg1(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - return _mm_srl_epi32(q, libdivide_u32_to_m128i(denom->more)); -} + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); -__m128i libdivide_u32_do_vector_alg2(__m128i numers, const struct libdivide_u32_t *denom) { - __m128i q = libdivide__mullhi_u32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); - return _mm_srl_epi32(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); + // We rounded down in gen (hence +1) + return full_q + 1; + } } -#endif - /////////// UINT64 -struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { - struct libdivide_u64_t result; - if ((d & (d - 1)) == 0) { - result.more = libdivide__count_trailing_zeros64(d) | LIBDIVIDE_U64_SHIFT_PATH; - result.magic = 0; +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(d); + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { uint64_t proposed_m, rem; uint8_t more; - proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); //== (1 << (64 + floor_log_2_d)) / d + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); LIBDIVIDE_ASSERT(rem > 0 && rem < d); const uint64_t e = d - rem; - /* This power works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d; - } - else { - /* We have to use the general 65-bit algorithm. We need to compute (2**power) / d. However, we already have (2**(power-1))/d and its remainder. By doubling both, and then correcting the remainder, we can compute the larger division. */ - proposed_m += proposed_m; //don't care about overflow here - in fact, we expect it + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= d || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } result.magic = 1 + proposed_m; result.more = more; - //result.more's shift should in general be ceil_log_2_d. But if we used the smaller power, we subtract one from the shift because we're using the smaller power. If we're using the larger power, we subtract one from the shift because it's taken care of by the add indicator. So floor_log_2_d happens to be correct in both cases, which is why we do it outside of the if statement. + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. } return result; } +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return numer >> (more & LIBDIVIDE_64_SHIFT_MASK); + if (!denom->magic) { + return numer >> more; } else { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { uint64_t t = ((numer - q) >> 1) + q; return t >> (more & LIBDIVIDE_64_SHIFT_MASK); } else { - return q >> more; //all upper bits are 0 - don't need to mask them off + // All upper bits are 0, + // don't need to mask them off. + return q >> more; } } } - -int libdivide_u64_get_algorithm(const struct libdivide_u64_t *denom) { - uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) return 0; - else if (! (more & LIBDIVIDE_ADD_MARKER)) return 1; - else return 2; -} - -uint64_t libdivide_u64_do_alg0(uint64_t numer, const struct libdivide_u64_t *denom) { - return numer >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); -} - -uint64_t libdivide_u64_do_alg1(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); - return q >> denom->more; -} - -uint64_t libdivide_u64_do_alg2(uint64_t numer, const struct libdivide_u64_t *denom) { - uint64_t q = libdivide__mullhi_u64(denom->magic, numer); +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); uint64_t t = ((numer - q) >> 1) + q; - return t >> (denom->more & LIBDIVIDE_64_SHIFT_MASK); + return t >> denom->more; } -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t * denom) { +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_U64_SHIFT_PATH) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - //uint32_t t = ((numer - q) >> 1) + q; - //return t >> denom->shift; - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(more & LIBDIVIDE_64_SHIFT_MASK)); - } - else { - //q >> denom->shift - return _mm_srl_epi64(q, libdivide_u32_to_m128i(more)); - } + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; } } -__m128i libdivide_u64_do_vector_alg0(__m128i numers, const struct libdivide_u64_t *denom) { - return _mm_srl_epi64(numers, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } } -__m128i libdivide_u64_do_vector_alg1(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - return _mm_srl_epi64(q, libdivide_u32_to_m128i(denom->more)); -} - -__m128i libdivide_u64_do_vector_alg2(__m128i numers, const struct libdivide_u64_t *denom) { - __m128i q = libdivide_mullhi_u64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); - return _mm_srl_epi64(t, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_64_SHIFT_MASK)); -} - - -#endif - /////////// SINT32 +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -static inline int32_t libdivide__mullhi_s32(int32_t x, int32_t y) { - int64_t xl = x, yl = y; - int64_t rl = xl * yl; - return (int32_t)(rl >> 32); //needs to be arithmetic shift -} - -struct libdivide_s32_t libdivide_s32_gen(int32_t d) { struct libdivide_s32_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - uint32_t absD = (uint32_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same result.magic = 0; - result.more = libdivide__count_trailing_zeros32(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0) | LIBDIVIDE_S32_SHIFT_PATH; - } - else { - const uint32_t floor_log_2_d = 31 - libdivide__count_leading_zeros32(absD); + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { LIBDIVIDE_ASSERT(floor_log_2_d >= 1); uint8_t more; - //the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word is 0 and the high word is floor_log_2_d - 1 + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 uint32_t rem, proposed_m; proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); const uint32_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1U << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint32_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); //use the general algorithm + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } - proposed_m += 1; - result.magic = (d < 0 ? -(int32_t)proposed_m : (int32_t)proposed_m); - result.more = more; + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; } return result; } +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint8_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - q = q >> shifter; - int32_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; return q; - } - else { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int32_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_32_SHIFT_MASK; + int32_t q = (int32_t)uq; + q >>= shift; q += (q < 0); return q; } } -int libdivide_s32_get_algorithm(const struct libdivide_s32_t *denom) { +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (more & LIBDIVIDE_S32_SHIFT_PATH) return (positiveDivisor ? 0 : 1); - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int32_t libdivide_s32_do_alg0(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return q >> shifter; -} - -int32_t libdivide_s32_do_alg1(int32_t numer, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - int32_t q = numer + ((numer >> 31) & ((1 << shifter) - 1)); - return - (q >> shifter); -} - -int32_t libdivide_s32_do_alg2(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); q += numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + return q; } -int32_t libdivide_s32_do_alg3(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -int32_t libdivide_s32_do_alg4(int32_t numer, const struct libdivide_s32_t *denom) { - int32_t q = libdivide__mullhi_s32(denom->magic, numer); - q >>= denom->more & LIBDIVIDE_32_SHIFT_MASK; - q += (q < 0); - return q; -} - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t * denom) { +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { uint8_t more = denom->more; - if (more & LIBDIVIDE_S32_SHIFT_PATH) { - uint32_t shifter = more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); //could use _mm_srli_epi32 with an all -1 register - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); //q = numer + ((numer >> 31) & roundToZeroTweak); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //set all bits of shift mask = to the sign bit of more - q = _mm_sub_epi32(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; - return q; - } - else { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)(int8_t)more >> 7); //must be arithmetic shift - q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; } - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(more & LIBDIVIDE_32_SHIFT_MASK)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; } } -__m128i libdivide_s32_do_vector_alg0(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter)); +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); } -__m128i libdivide_s32_do_vector_alg1(__m128i numers, const struct libdivide_s32_t *denom) { - uint8_t shifter = denom->more & LIBDIVIDE_32_SHIFT_MASK; - __m128i roundToZeroTweak = _mm_set1_epi32((1 << shifter) - 1); - __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); - return _mm_sub_epi32(_mm_setzero_si128(), _mm_sra_epi32(q, libdivide_u32_to_m128i(shifter))); -} - -__m128i libdivide_s32_do_vector_alg2(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_add_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg3(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sub_epi32(q, numers); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more & LIBDIVIDE_32_SHIFT_MASK)); - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); - return q; -} - -__m128i libdivide_s32_do_vector_alg4(__m128i numers, const struct libdivide_s32_t *denom) { - __m128i q = libdivide_mullhi_s32_flat_vector(numers, _mm_set1_epi32(denom->magic)); - q = _mm_sra_epi32(q, libdivide_u32_to_m128i(denom->more)); //q >>= shift - q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) - return q; -} -#endif - ///////////// SINT64 +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } -struct libdivide_s64_t libdivide_s64_gen(int64_t d) { struct libdivide_s64_t result; - /* If d is a power of 2, or negative a power of 2, we have to use a shift. This is especially important because the magic algorithm fails for -1. To check if d is a power of 2 or its inverse, it suffices to check whether its absolute value has exactly one bit set. This works even for INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set and is a power of 2. */ - const uint64_t absD = (uint64_t)(d < 0 ? -d : d); //gcc optimizes this to the fast abs trick - if ((absD & (absD - 1)) == 0) { //check if exactly one bit is set, don't care if absD is 0 since that's divide by zero - result.more = libdivide__count_trailing_zeros64(absD) | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same result.magic = 0; - } - else { - const uint32_t floor_log_2_d = 63 - libdivide__count_leading_zeros64(absD); - - //the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word is 0 and the high word is floor_log_2_d - 1 + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 uint8_t more; uint64_t rem, proposed_m; proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); const uint64_t e = absD - rem; - /* We are going to start with a power of floor_log_2_d - 1. This works if works if e < 2**floor_log_2_d. */ - if (e < (1ULL << floor_log_2_d)) { - /* This power works */ + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works more = floor_log_2_d - 1; - } - else { - /* We need to go one higher. This should not make proposed_m overflow, but it will make it negative when interpreted as an int32_t. */ + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. proposed_m += proposed_m; const uint64_t twice_rem = rem + rem; if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; - more = floor_log_2_d | LIBDIVIDE_ADD_MARKER | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; } proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + result.more = more; - result.magic = (d < 0 ? -(int64_t)proposed_m : (int64_t)proposed_m); + result.magic = magic; } return result; } +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { uint8_t more = denom->more; - int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - q = q >> shifter; - int64_t shiftMask = (int8_t)more >> 7; //must be arithmetic shift and then sign-extend - q = (q ^ shiftMask) - shiftMask; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; return q; - } - else { - int64_t q = libdivide__mullhi_s64(magic, numer); + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); if (more & LIBDIVIDE_ADD_MARKER) { - int64_t sign = (int8_t)more >> 7; //must be arithmetic shift and then sign extend - q += ((numer ^ sign) - sign); + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; } - q >>= more & LIBDIVIDE_64_SHIFT_MASK; + int64_t q = (int64_t)uq; + q >>= shift; q += (q < 0); return q; } } - -int libdivide_s64_get_algorithm(const struct libdivide_s64_t *denom) { - uint8_t more = denom->more; - int positiveDivisor = ! (more & LIBDIVIDE_NEGATIVE_DIVISOR); - if (denom->magic == 0) return (positiveDivisor ? 0 : 1); //shift path - else if (more & LIBDIVIDE_ADD_MARKER) return (positiveDivisor ? 2 : 3); - else return 4; -} - -int64_t libdivide_s64_do_alg0(int64_t numer, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return q >> shifter; -} - -int64_t libdivide_s64_do_alg1(int64_t numer, const struct libdivide_s64_t *denom) { - //denom->shifter != -1 && demo->shiftMask != 0 - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - int64_t q = numer + ((numer >> 63) & ((1LL << shifter) - 1)); - return - (q >> shifter); -} - -int64_t libdivide_s64_do_alg2(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q += numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg3(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q -= numer; - q >>= denom->more & LIBDIVIDE_64_SHIFT_MASK; - q += (q < 0); - return q; -} - -int64_t libdivide_s64_do_alg4(int64_t numer, const struct libdivide_s64_t *denom) { - int64_t q = libdivide__mullhi_s64(denom->magic, numer); - q >>= denom->more; - q += (q < 0); - return q; -} - - -#if LIBDIVIDE_USE_SSE2 -__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t * denom) { +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; int64_t magic = denom->magic; - if (magic == 0) { //shift path - uint32_t shifter = more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); //q = numer + ((numer >> 63) & roundToZeroTweak); - q = libdivide_s64_shift_right_vector(q, shifter); // q = q >> shifter - __m128i shiftMask = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); - q = _mm_sub_epi64(_mm_xor_si128(q, shiftMask), shiftMask); //q = (q ^ shiftMask) - shiftMask; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); return q; } else { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(magic)); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); if (more & LIBDIVIDE_ADD_MARKER) { - __m128i sign = _mm_set1_epi32((int32_t)((int8_t)more >> 7)); //must be arithmetic shift - q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); // q += ((numer ^ sign) - sign); + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); } - q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); //q >>= denom->mult_path.shift + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) return q; } } -__m128i libdivide_s64_do_vector_alg0(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return q; -} +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); -__m128i libdivide_s64_do_vector_alg1(__m128i numers, const struct libdivide_s64_t *denom) { - uint32_t shifter = denom->more & LIBDIVIDE_64_SHIFT_MASK; - __m128i roundToZeroTweak = libdivide__u64_to_m128((1LL << shifter) - 1); - __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); - q = libdivide_s64_shift_right_vector(q, shifter); - return _mm_sub_epi64(_mm_setzero_si128(), q); -} + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers -__m128i libdivide_s64_do_vector_alg2(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_add_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg3(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = _mm_sub_epi64(q, numers); - q = libdivide_s64_shift_right_vector(q, denom->more & LIBDIVIDE_64_SHIFT_MASK); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) - return q; -} - -__m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_t *denom) { - __m128i q = libdivide_mullhi_s64_flat_vector(numers, libdivide__u64_to_m128(denom->magic)); - q = libdivide_s64_shift_right_vector(q, denom->more); - q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign return q; } @@ -1204,228 +1930,143 @@ __m128i libdivide_s64_do_vector_alg4(__m128i numers, const struct libdivide_s64_ #ifdef __cplusplus -/* The C++ template design here is a total mess. This needs to be fixed by someone better at templates than I. The current design is: - -- The base is a template divider_base that takes the integer type, the libdivide struct, a generating function, a get algorithm function, a do function, and either a do vector function or a dummy int. -- The base has storage for the libdivide struct. This is the only storage (so the C++ class should be no larger than the libdivide struct). - -- Above that, there's divider_mid. This is an empty struct by default, but it is specialized against our four int types. divider_mid contains a template struct algo, that contains a typedef for a specialization of divider_base. struct algo is specialized to take an "algorithm number," where -1 means to use the general algorithm. - -- Publicly we have class divider, which inherits from divider_mid::algo. This also take an algorithm number, which defaults to -1 (the general algorithm). -- divider has a operator / which allows you to use a divider as the divisor in a quotient expression. - -*/ - -namespace libdivide_internal { - -#if LIBDIVIDE_USE_SSE2 -#define MAYBE_VECTOR(x) x -#define MAYBE_VECTOR_PARAM __m128i vector_func(__m128i, const DenomType *) -#else -#define MAYBE_VECTOR(x) 0 -#define MAYBE_VECTOR_PARAM int vector_func -#endif - - /* Some bogus unswitch functions for unsigned types so the same (presumably templated) code can work for both signed and unsigned. */ - uint32_t crash_u32(uint32_t, const libdivide_u32_t *) { abort(); } - uint64_t crash_u64(uint64_t, const libdivide_u64_t *) { abort(); } -#ifdef __APPLE__ - UInt64 crash_u64(UInt64, const libdivide_u64_t *) { abort(); } -#endif -#if LIBDIVIDE_USE_SSE2 - __m128i crash_u32_vector(__m128i, const libdivide_u32_t *) { abort(); } - __m128i crash_u64_vector(__m128i, const libdivide_u64_t *) { abort(); } -#endif - - template - class divider_base { - public: - DenomType denom; - divider_base(IntType d) : denom(gen_func(d)) { } - divider_base(const DenomType & d) : denom(d) { } - - IntType perform_divide(IntType val) const { return do_func(val, &denom); } -#if LIBDIVIDE_USE_SSE2 - __m128i perform_divide_vector(__m128i val) const { return vector_func(val, &denom); } -#endif - - int get_algorithm() const { return get_algo(&denom); } - }; - - - template struct divider_mid { }; - - template<> struct divider_mid { - typedef uint32_t IntType; - typedef struct libdivide_u32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - - template<> struct divider_mid { - typedef int32_t IntType; - typedef struct libdivide_s32_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - }; - -#ifdef __APPLE__ - template<> struct divider_mid { - typedef Int64 IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - - template<> struct divider_mid { - typedef UInt64 IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; -#endif - - template<> struct divider_mid { - typedef uint64_t IntType; - typedef struct libdivide_u64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - - /* Define two more bogus ones so that the same (templated, presumably) code can handle both signed and unsigned */ - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - - - }; - - template<> struct divider_mid { - typedef int64_t IntType; - typedef struct libdivide_s64_t DenomType; - template struct denom { - typedef divider_base divider; - }; - - template struct algo { }; - template struct algo<-1, J> { typedef denom::divider divider; }; - template struct algo<0, J> { typedef denom::divider divider; }; - template struct algo<1, J> { typedef denom::divider divider; }; - template struct algo<2, J> { typedef denom::divider divider; }; - template struct algo<3, J> { typedef denom::divider divider; }; - template struct algo<4, J> { typedef denom::divider divider; }; - }; - -} - -template -class divider -{ - private: - typename libdivide_internal::divider_mid::template algo::divider sub; - template friend divider unswitch(const divider & d); - divider(const typename libdivide_internal::divider_mid::DenomType & denom) : sub(denom) { } - - public: - - /* Ordinary constructor, that takes the divisor as a parameter. */ - divider(T n) : sub(n) { } - - /* Default constructor, that divides by 1 */ - divider() : sub(1) { } - - /* Divides the parameter by the divisor, returning the quotient */ - T perform_divide(T val) const { return sub.perform_divide(val); } - -#if LIBDIVIDE_USE_SSE2 - /* Treats the vector as either two or four packed values (depending on the size), and divides each of them by the divisor, returning the packed quotients. */ - __m128i perform_divide_vector(__m128i val) const { return sub.perform_divide_vector(val); } -#endif - - /* Returns the index of algorithm, for use in the unswitch function */ - int get_algorithm() const { return sub.get_algorithm(); } // returns the algorithm for unswitching - - /* operator== */ - bool operator==(const divider & him) const { return sub.denom.magic == him.sub.denom.magic && sub.denom.more == him.sub.denom.more; } - - bool operator!=(const divider & him) const { return ! (*this == him); } +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE }; -/* Returns a divider specialized for the given algorithm. */ -template -divider unswitch(const divider & d) { return divider(d.sub.denom); } - -/* Overload of the / operator for scalar division. */ -template -int_type operator/(int_type numer, const divider & denom) { - return denom.perform_divide(numer); -} - -#if LIBDIVIDE_USE_SSE2 -/* Overload of the / operator for vector division. */ -template -__m128i operator/(__m128i numer, const divider & denom) { - return denom.perform_divide_vector(numer); -} +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i #endif - -#endif //__cplusplus - -#endif //LIBDIVIDE_HEADER_ONLY -#ifdef __cplusplus -} //close namespace libdivide -} //close anonymous namespace +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } #endif -#pragma GCC diagnostic pop +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // LIBDIVIDE_H diff --git a/src/Functions/intDiv.cpp b/src/Functions/intDiv.cpp index 0b6734c0136..062a374c00f 100644 --- a/src/Functions/intDiv.cpp +++ b/src/Functions/intDiv.cpp @@ -1,8 +1,9 @@ #include #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 +# define LIBDIVIDE_VECTOR_TYPE #endif #include @@ -45,7 +46,7 @@ struct DivideIntegralByConstantImpl const A * a_end = a_pos + size; -#ifdef __SSE2__ +#if defined(__SSE2__) static constexpr size_t values_per_sse_register = 16 / sizeof(A); const A * a_end_sse = a_pos + size / values_per_sse_register * values_per_sse_register; diff --git a/src/Functions/modulo.cpp b/src/Functions/modulo.cpp index 9e4409ca91b..631b7d12263 100644 --- a/src/Functions/modulo.cpp +++ b/src/Functions/modulo.cpp @@ -1,8 +1,8 @@ #include #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 #endif #include diff --git a/src/Interpreters/createBlockSelector.cpp b/src/Interpreters/createBlockSelector.cpp index 2b08ca0845c..0759b9d9601 100644 --- a/src/Interpreters/createBlockSelector.cpp +++ b/src/Interpreters/createBlockSelector.cpp @@ -5,8 +5,8 @@ #include -#ifdef __SSE2__ - #define LIBDIVIDE_USE_SSE2 1 +#if defined(__SSE2__) +# define LIBDIVIDE_SSE2 1 #endif #include From 19bb2976b9f82eec86965f6dad0d5934f8eee826 Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Sat, 11 Apr 2020 14:16:14 +0300 Subject: [PATCH 309/484] Update pdqsort to recent version (#10171) --- contrib/pdqsort/pdqsort.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/contrib/pdqsort/pdqsort.h b/contrib/pdqsort/pdqsort.h index 31eb06fece4..01e82b710ee 100644 --- a/contrib/pdqsort/pdqsort.h +++ b/contrib/pdqsort/pdqsort.h @@ -124,11 +124,9 @@ namespace pdqsort_detail { inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return true; - - int limit = 0; - for (Iter cur = begin + 1; cur != end; ++cur) { - if (limit > partial_insertion_sort_limit) return false; + std::size_t limit = 0; + for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; @@ -142,6 +140,8 @@ namespace pdqsort_detail { *sift = PDQSORT_PREFER_MOVE(tmp); limit += cur - sift; } + + if (limit > partial_insertion_sort_limit) return false; } return true; @@ -232,7 +232,7 @@ namespace pdqsort_detail { unsigned char* offsets_r = align_cacheline(offsets_r_storage); int num_l, num_r, start_l, start_r; num_l = num_r = start_l = start_r = 0; - + while (last - first > 2 * block_size) { // Fill up offset blocks with elements that are on the wrong side. if (num_l == 0) { @@ -275,7 +275,7 @@ namespace pdqsort_detail { } int l_size = 0, r_size = 0; - int unknown_left = (last - first) - ((num_r || num_l) ? block_size : 0); + int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0); if (num_r) { // Handle leftover block by assigning the unknown elements to the other block. l_size = unknown_left; @@ -311,7 +311,7 @@ namespace pdqsort_detail { start_l += num; start_r += num; if (num_l == 0) first += l_size; if (num_r == 0) last -= r_size; - + // We have now fully identified [first, last)'s proper position. Swap the last elements. if (num_l) { offsets_l += start_l; @@ -340,7 +340,7 @@ namespace pdqsort_detail { template inline std::pair partition_right(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; - + // Move pivot into local for speed. T pivot(PDQSORT_PREFER_MOVE(*begin)); @@ -359,7 +359,7 @@ namespace pdqsort_detail { // If the first pair of elements that should be swapped to partition are the same element, // the passed in sequence already was correctly partitioned. bool already_partitioned = first >= last; - + // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously // swapped pairs guard the searches, which is why the first iteration is special-cased // above. @@ -388,7 +388,7 @@ namespace pdqsort_detail { T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; - + while (comp(pivot, *--last)); if (last + 1 == end) while (first < last && !comp(pivot, *++first)); @@ -475,11 +475,11 @@ namespace pdqsort_detail { std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); } } - + if (r_size >= insertion_sort_threshold) { std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); std::iter_swap(end - 1, end - r_size / 4); - + if (r_size > ninther_threshold) { std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); @@ -493,7 +493,7 @@ namespace pdqsort_detail { if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp) && partial_insertion_sort(pivot_pos + 1, end, comp)) return; } - + // Sort the left partition first using recursion and do tail recursion elimination for // the right-hand partition. pdqsort_loop(begin, pivot_pos, comp, bad_allowed, leftmost); From 1484ab1f1a0b63e2d7816b66a9375682f73cf8a6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 17:52:59 +0300 Subject: [PATCH 310/484] Fix machine translation #10191 --- docs/ru/development/architecture.md | 202 +- docs/ru/development/build.md | 140 +- docs/ru/development/build_cross_arm.md | 42 +- docs/ru/development/build_cross_osx.md | 63 +- docs/ru/development/build_osx.md | 92 +- docs/ru/development/index.md | 9 +- docs/ru/development/tests.md | 251 +- .../engines/table_engines/special/generate.md | 60 +- docs/ru/getting_started/tutorial.md | 670 +----- docs/ru/introduction/adopters.md | 81 +- .../sampling_query_profiler.md | 63 +- docs/ru/operations/performance_test.md | 81 +- .../utilities/clickhouse-benchmark.md | 155 +- docs/ru/whats_new/changelog/2017.md | 267 +-- docs/ru/whats_new/changelog/2018.md | 1062 +-------- docs/ru/whats_new/changelog/2019.md | 2073 +---------------- docs/ru/whats_new/roadmap.md | 18 +- 17 files changed, 17 insertions(+), 5312 deletions(-) mode change 100644 => 120000 docs/ru/development/architecture.md mode change 100644 => 120000 docs/ru/development/build.md mode change 100644 => 120000 docs/ru/development/build_cross_arm.md mode change 100644 => 120000 docs/ru/development/build_cross_osx.md mode change 100644 => 120000 docs/ru/development/build_osx.md mode change 100644 => 120000 docs/ru/development/index.md mode change 100644 => 120000 docs/ru/development/tests.md mode change 100644 => 120000 docs/ru/engines/table_engines/special/generate.md mode change 100644 => 120000 docs/ru/getting_started/tutorial.md mode change 100644 => 120000 docs/ru/introduction/adopters.md mode change 100644 => 120000 docs/ru/operations/optimizing_performance/sampling_query_profiler.md mode change 100644 => 120000 docs/ru/operations/performance_test.md mode change 100644 => 120000 docs/ru/operations/utilities/clickhouse-benchmark.md mode change 100644 => 120000 docs/ru/whats_new/changelog/2017.md mode change 100644 => 120000 docs/ru/whats_new/changelog/2018.md mode change 100644 => 120000 docs/ru/whats_new/changelog/2019.md mode change 100644 => 120000 docs/ru/whats_new/roadmap.md diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md deleted file mode 100644 index 0d1fc2ff947..00000000000 --- a/docs/ru/development/architecture.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Обзор архитектуры ClickHouse {#overview-of-clickhouse-architecture} - -ClickHouse-это настоящая СУБД, ориентированная на столбцы. Данные хранятся столбцами и во время выполнения массивов (векторов или кусков столбцов). Когда это возможно, операции отправляются на массивы, а не на отдельные значения. Это называется «vectorized query execution,» и это помогает снизить стоимость фактической обработки данных. - -> В этой идее нет ничего нового. Она восходит к тому времени, когда `APL` язык программирования и его потомки: `A +`, `J`, `K`, и `Q`. Массивное программирование используется в научной обработке данных. Эта идея также не является чем-то новым в реляционных базах данных: например, она используется в `Vectorwise` система. - -Существует два различных подхода для ускорения обработки запросов: векторизованное выполнение запросов и генерация кода во время выполнения. Последнее устраняет все косвенные действия и динамическую диспетчеризацию. Ни один из этих подходов не является строго лучшим, чем другой. Генерация кода во время выполнения может быть лучше, когда он объединяет множество операций, таким образом полностью используя исполнительные блоки процессора и конвейер. Векторизованное выполнение запроса может быть менее практичным, поскольку оно включает временные векторы, которые должны быть записаны в кэш и считаны обратно. Если временные данные не помещаются в кэш L2, это становится проблемой. Но векторизованное выполнение запросов более легко использует возможности SIMD центрального процессора. Один [научная статья](http://15721.courses.cs.cmu.edu/spring2016/papers/p5-sompolski.pdf) написанное нашими друзьями показывает, что лучше сочетать оба подхода. ClickHouse использует векторизованное выполнение запросов и имеет ограниченную начальную поддержку для генерации кода во время выполнения. - -## Столбцы {#columns} - -`IColumn` интерфейс используется для представления столбцов в памяти (собственно, кусков столбцов). Этот интерфейс предоставляет вспомогательные методы для реализации различных реляционных операторов. Почти все операции неизменяемы: они не изменяют исходный столбец, а создают новый измененный. Например, в `IColumn :: filter` метод принимает маску байта фильтра. Он используется для `WHERE` и `HAVING` реляционный оператор. Дополнительные примеры: `IColumn :: permute` способ поддержки `ORDER BY`, этот `IColumn :: cut` способ поддержки `LIMIT`. - -Различный `IColumn` реализации (`ColumnUInt8`, `ColumnString`, и так далее) отвечают за расположение столбцов в памяти. Расположение памяти обычно представляет собой непрерывный массив. Для целочисленного типа столбцов это всего лишь один непрерывный массив, например `std :: vector`. Для `String` и `Array` столбцы, это два вектора: один для всех элементов массива, расположенных последовательно, и второй для смещений к началу каждого массива. Существует также `ColumnConst` это сохраняет только одно значение в памяти, но выглядит как столбец. - -## Поле {#field} - -Тем не менее, можно работать и с индивидуальными ценностями. Чтобы представить индивидуальную ценность, то `Field` предназначенный. `Field` это просто дискриминированный Союз `UInt64`, `Int64`, `Float64`, `String` и `Array`. `IColumn` имеет `operator[]` метод получения n-го значения в виде a `Field` и `insert` способ, чтобы добавить `Field` до самого конца колонны. Эти методы не очень эффективны, потому что они требуют решения временных проблем `Field` объекты, представляющие индивидуальную ценность. Существуют и более эффективные методы, такие как `insertFrom`, `insertRangeFrom` и так далее. - -`Field` у него нет достаточной информации о конкретном типе данных для таблицы. Например, `UInt8`, `UInt16`, `UInt32`, и `UInt64` все они представлены в виде `UInt64` в `Field`. - -## Дырявые абстракции {#leaky-abstractions} - -`IColumn` есть методы для общих реляционных преобразований данных, но они не удовлетворяют всем потребностям. Например, `ColumnUInt64` не имеет метода для вычисления суммы двух столбцов, и `ColumnString` у него нет метода для запуска поиска по подстрокам. Эти бесчисленные процедуры реализуются за пределами `IColumn`. - -Различные функции на столбцах могут быть реализованы общим, неэффективным способом с использованием `IColumn` способы извлечения `Field` значения, или специализированным способом, использующим знание внутренней компоновки памяти данных в определенном месте. `IColumn` реализация. Он реализуется путем приведения функций к определенному виду `IColumn` тип и дело с внутренним представлением непосредственно. Например, `ColumnUInt64` имеет `getData` метод, который возвращает ссылку на внутренний массив, а затем отдельная процедура считывает или заполняет этот массив непосредственно. У нас есть «leaky abstractions» чтобы обеспечить эффективную специализацию различных процедур. - -## Тип данных {#data_types} - -`IDataType` отвечает за сериализацию и десериализацию: чтение и запись фрагментов столбцов или отдельных значений в двоичной или текстовой форме. `IDataType` непосредственно соответствует типам данных в таблицах. Например, существуют `DataTypeUInt32`, `DataTypeDateTime`, `DataTypeString` и так далее. - -`IDataType` и `IColumn` они лишь слабо связаны друг с другом. Различные типы данных могут быть представлены в памяти одним и тем же именем `IColumn` реализации. Например, `DataTypeUInt32` и `DataTypeDateTime` оба они представлены следующим образом `ColumnUInt32` или `ColumnConstUInt32`. Кроме того, один и тот же тип данных может быть представлен разными `IColumn` реализации. Например, `DataTypeUInt8` может быть представлен следующим образом `ColumnUInt8` или `ColumnConstUInt8`. - -`IDataType` хранит только метаданные. Например, `DataTypeUInt8` не хранит вообще ничего (кроме vptr) и `DataTypeFixedString` магазины просто `N` (размер строк фиксированного размера). - -`IDataType` имеет вспомогательные методы для различных форматов данных. Примерами являются методы сериализации значения с возможным цитированием, сериализации значения для JSON и сериализации значения в формате XML. Прямого соответствия форматам данных не существует. Например, различные форматы данных `Pretty` и `TabSeparated` можно использовать то же самое `serializeTextEscaped` вспомогательный метод от `IDataType` интерфейс. - -## Блок {#block} - -A `Block` это контейнер, представляющий подмножество (фрагмент) таблицы в памяти. Это всего лишь набор троек: `(IColumn, IDataType, column name)`. Во время выполнения запроса данные обрабатываются с помощью `Block`s. Если у нас есть `Block`, у нас есть данные (в `IColumn` объект), у нас есть информация о его типе (в `IDataType`) это говорит нам, как обращаться с этим столбцом, и у нас есть имя столбца. Это может быть либо исходное имя столбца из таблицы, либо какое-то искусственное имя, назначенное для получения временных результатов вычислений. - -Когда мы вычисляем некоторую функцию по столбцам в блоке, мы добавляем другой столбец с его результатом в блок, и мы не касаемся столбцов для аргументов функции, потому что операции неизменяемы. Позже ненужные столбцы могут быть удалены из блока, но не изменены. Это удобно для исключения общих подвыражений. - -Блоки создаются для каждого обработанного фрагмента данных. Обратите внимание, что для одного и того же типа вычисления имена столбцов и типы остаются одинаковыми для разных блоков, и изменяются только данные столбцов. Лучше разделить данные блока из заголовка блока, потому что небольшие размеры блока имеют высокую нагрузку временных строк для копирования shared\_ptrs и имен столбцов. - -## Блокировать Потоки {#block-streams} - -Блочные потоки предназначены для обработки данных. Мы используем потоки блоков для чтения данных откуда-то, выполнения преобразований данных или записи данных куда-то. `IBlockInputStream` имеет `read` метод для извлечения следующего блока, пока он доступен. `IBlockOutputStream` имеет `write` метод, чтобы подтолкнуть блок куда-то. - -Потоки отвечают за: - -1. Чтение или письмо за столом. Таблица просто возвращает поток для чтения или записи блоков. -2. Реализация форматов данных. Например, если вы хотите вывести данные на терминал в `Pretty` форматирование, вы создаете поток вывода блока, где вы толкаете блоки, и он форматирует их. -3. Выполнение преобразований данных. Скажем так у вас есть `IBlockInputStream` и хотите создать отфильтрованный поток. Вы создаете `FilterBlockInputStream` и инициализируйте его с помощью своего потока. Затем, когда вы вытащите блок из `FilterBlockInputStream`, он извлекает блок из вашего потока, фильтрует его и возвращает отфильтрованный блок вам. Конвейеры выполнения запросов представлены таким образом. - -Есть и более сложные трансформации. Например, когда вы тянете из `AggregatingBlockInputStream`, он считывает все данные из своего источника, агрегирует их, а затем возвращает поток агрегированных данных для вас. Еще пример: `UnionBlockInputStream` принимает множество источников ввода в конструкторе, а также ряд потоков. Он запускает несколько потоков и читает из нескольких источников параллельно. - -> Потоки блокируют использовать «pull» подход к управлению потоком: когда вы вытягиваете блок из первого потока, он, следовательно, вытягивает необходимые блоки из вложенных потоков, и весь конвейер выполнения будет работать. Ни «pull» ни «push» это лучшее решение, потому что поток управления является неявным, и это ограничивает реализацию различных функций, таких как одновременное выполнение нескольких запросов (объединение многих конвейеров вместе). Это ограничение может быть преодолено с помощью сопрограмм или просто запуском дополнительных потоков, которые ждут друг друга. У нас может быть больше возможностей, если мы сделаем поток управления явным: если мы найдем логику для передачи данных из одной расчетной единицы в другую вне этих расчетных единиц. Читать это [статья](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) для новых мыслей. - -Следует отметить, что конвейер выполнения запроса создает временные данные на каждом шаге. Мы стараемся держать размер блока достаточно маленьким, чтобы временные данные помещались в кэш процессора. При таком допущении запись и чтение временных данных практически бесплатны по сравнению с другими расчетами. Мы могли бы рассмотреть альтернативу, которая заключается в том, чтобы объединить многие операции в трубопроводе вместе. Это может сделать конвейер как можно короче и удалить большую часть временных данных, что может быть преимуществом, но у него также есть недостатки. Например, разделенный конвейер позволяет легко реализовать кэширование промежуточных данных, кражу промежуточных данных из аналогичных запросов, выполняемых одновременно, и объединение конвейеров для аналогичных запросов. - -## Форматы {#formats} - -Форматы данных реализуются с помощью блочных потоков. Есть «presentational» форматы, пригодные только для вывода данных клиенту, такие как `Pretty` формат, который предоставляет только `IBlockOutputStream`. И есть форматы ввода/вывода, такие как `TabSeparated` или `JSONEachRow`. - -Существуют также потоки подряд : `IRowInputStream` и `IRowOutputStream`. Они позволяют вытягивать / выталкивать данные отдельными строками, а не блоками. И они нужны только для упрощения реализации ориентированных на строки форматов. Обертка `BlockInputStreamFromRowInputStream` и `BlockOutputStreamFromRowOutputStream` позволяет конвертировать потоки, ориентированные на строки, в обычные потоки, ориентированные на блоки. - -## I/O {#io} - -Для байт-ориентированных входов / выходов существуют `ReadBuffer` и `WriteBuffer` абстрактный класс. Они используются вместо C++ `iostream`s. Не волнуйтесь: каждый зрелый проект C++ использует что-то другое, чем `iostream`s по уважительным причинам. - -`ReadBuffer` и `WriteBuffer` это просто непрерывный буфер и курсор, указывающий на позицию в этом буфере. Реализации могут владеть или не владеть памятью для буфера. Существует виртуальный метод заполнения буфера следующими данными (для `ReadBuffer`) или смыть буфер куда-нибудь (например `WriteBuffer`). Виртуальные методы редко вызываются. - -Реализация следующих принципов: `ReadBuffer`/`WriteBuffer` используются для работы с файлами и файловыми дескрипторами, а также сетевыми сокетами, для реализации сжатия (`CompressedWriteBuffer` is initialized with another WriteBuffer and performs compression before writing data to it), and for other purposes – the names `ConcatReadBuffer`, `LimitReadBuffer`, и `HashingWriteBuffer` за себя говорить. - -Буферы чтения/записи имеют дело только с байтами. Есть функции от `ReadHelpers` и `WriteHelpers` заголовочные файлы, чтобы помочь с форматированием ввода / вывода. Например, есть помощники для записи числа в десятичном формате. - -Давайте посмотрим, что происходит, когда вы хотите написать результирующий набор в `JSON` форматирование в stdout. У вас есть результирующий набор, готовый к извлечению из него `IBlockInputStream`. Вы создаете `WriteBufferFromFileDescriptor(STDOUT_FILENO)` чтобы записать байты в stdout. Вы создаете `JSONRowOutputStream`, инициализируется с помощью этого `WriteBuffer`, чтобы записать строки в `JSON` в stdout. Вы создаете `BlockOutputStreamFromRowOutputStream` кроме того, чтобы представить его как `IBlockOutputStream`. А потом ты позвонишь `copyData` для передачи данных из `IBlockInputStream` к `IBlockOutputStream` и все это работает. Внутренне, `JSONRowOutputStream` буду писать в формате JSON различные разделители и вызвать `IDataType::serializeTextJSON` метод со ссылкой на `IColumn` и номер строки в качестве аргументов. Следовательно, `IDataType::serializeTextJSON` вызовет метод из `WriteHelpers.h`: например, `writeText` для числовых типов и `writeJSONString` для `DataTypeString`. - -## Таблицы {#tables} - -То `IStorage` интерфейс представляет собой таблицы. Различные реализации этого интерфейса являются различными движками таблиц. Примеры `StorageMergeTree`, `StorageMemory` и так далее. Экземпляры этих классов являются просто таблицами. - -Ключ `IStorage` методы `read` и `write`. Есть и другие варианты `alter`, `rename`, `drop` и так далее. То `read` метод принимает следующие аргументы: набор столбцов для чтения из таблицы, набор столбцов для чтения из таблицы. `AST` запрос для рассмотрения и желаемое количество потоков для возврата. Он возвращает один или несколько `IBlockInputStream` объекты и информация о стадии обработки данных, которая была завершена внутри табличного движка во время выполнения запроса. - -В большинстве случаев метод read отвечает только за чтение указанных столбцов из таблицы, а не за дальнейшую обработку данных. Вся дальнейшая обработка данных осуществляется интерпретатором запросов и не входит в сферу ответственности компании `IStorage`. - -Но есть и заметные исключения: - -- Запрос AST передается на сервер `read` метод, и механизм таблиц может использовать его для получения использования индекса и считывания меньшего количества данных из таблицы. -- Иногда механизм таблиц может сам обрабатывать данные до определенного этапа. Например, `StorageDistributed` можно отправить запрос на удаленные серверы, попросить их обработать данные на этапе, когда данные с разных удаленных серверов могут быть объединены, и вернуть эти предварительно обработанные данные. Затем интерпретатор запросов завершает обработку данных. - -Стол `read` метод может возвращать несколько значений `IBlockInputStream` объекты, позволяющие осуществлять параллельную обработку данных. Эти несколько блочных входных потоков могут считываться из таблицы параллельно. Затем вы можете обернуть эти потоки с помощью различных преобразований (таких как вычисление выражений или фильтрация), которые могут быть вычислены независимо, и создать `UnionBlockInputStream` поверх них, чтобы читать из нескольких потоков параллельно. - -Есть и другие варианты `TableFunction`s. Это функции, которые возвращают временное значение `IStorage` объект для использования в `FROM` предложение запроса. - -Чтобы получить быстрое представление о том, как реализовать свой движок таблиц, посмотрите на что-то простое, например `StorageMemory` или `StorageTinyLog`. - -> В результате этого `read` метод, `IStorage` возвращается `QueryProcessingStage` – information about what parts of the query were already calculated inside storage. - -## Синтаксический анализатор {#parsers} - -Написанный от руки рекурсивный парсер спуска анализирует запрос. Например, `ParserSelectQuery` просто рекурсивно вызывает базовые Парсеры для различных частей запроса. Парсеры создают `AST`. То `AST` представлен узлами, которые являются экземплярами `IAST`. - -> Генераторы парсеров не используются по историческим причинам. - -## Переводчики {#interpreters} - -Интерпретаторы отвечают за создание конвейера выполнения запроса из `AST`. Есть простые переводчики, такие как `InterpreterExistsQuery` и `InterpreterDropQuery` или более изощренные `InterpreterSelectQuery`. Конвейер выполнения запроса представляет собой комбинацию блочных входных и выходных потоков. Например, результат интерпретации `SELECT` запросов `IBlockInputStream` для чтения результирующего набора из; результат запроса INSERT - это `IBlockOutputStream` чтобы записать данные для вставки в, и результат интерпретации `INSERT SELECT` запросов `IBlockInputStream` это возвращает пустой результирующий набор при первом чтении, но копирует данные из него `SELECT` к `INSERT` в то же время. - -`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` машины для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` это довольно грязно и должно быть переписано: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запрос. - -## Функции {#functions} - -Существуют обычные функции и агрегатные функции. Агрегатные функции см. В следующем разделе. - -Ordinary functions don’t change the number of rows – they work as if they are processing each row independently. In fact, functions are not called for individual rows, but for `Block`’s данных для реализации векторизованного выполнения запросов. - -Есть некоторые другие функции, такие как [размер блока](../sql_reference/functions/other_functions.md#function-blocksize), [роунумберинблок](../sql_reference/functions/other_functions.md#function-rownumberinblock), и [runningAccumulate](../sql_reference/functions/other_functions.md#function-runningaccumulate), которые эксплуатируют обработку блоков и нарушают независимость строк. - -ClickHouse имеет сильную типизацию, поэтому нет никакого неявного преобразования типов. Если функция не поддерживает определенную комбинацию типов, она создает исключение. Но функции могут работать (перегружаться) для многих различных комбинаций типов. Например, в `plus` функция (для реализации `+` оператор) работает для любой комбинации числовых типов: `UInt8` + `Float32`, `UInt16` + `Int8` и так далее. Кроме того, некоторые вариадические функции могут принимать любое количество аргументов, например `concat` функция. - -Реализация функции может быть немного неудобной, поскольку функция явно отправляет поддерживаемые типы данных и поддерживается `IColumns`. Например, в `plus` функция имеет код, генерируемый экземпляром шаблона C++ для каждой комбинации числовых типов, а также постоянные или непостоянные левые и правые аргументы. - -Это отличное место для реализации генерации кода во время выполнения, чтобы избежать раздувания кода шаблона. Кроме того, он позволяет добавлять слитые функции, такие как fused multiply-add или выполнять несколько сравнений в одной итерации цикла. - -Из-за векторизованного выполнения запроса функции не закорачиваются. Например, если вы пишете `WHERE f(x) AND g(y)`, обе стороны вычисляются, даже для строк, когда `f(x)` равно нулю (за исключением тех случаев, когда `f(x)` является нулевым постоянным выражением). Но если избирательность самого `f(x)` состояние является высоким, и расчет `f(x)` это гораздо дешевле, чем `g(y)`, лучше всего реализовать многоходовой расчет. Это будет первый расчет `f(x)`, затем отфильтруйте столбцы по результату, а затем вычислите `g(y)` только для небольших отфильтрованных фрагментов данных. - -## Статистическая функция {#aggregate-functions} - -Агрегатные функции - это функции, определяющие состояние. Они накапливают переданные значения в некотором состоянии и позволяют получать результаты из этого состояния. Они управляются с помощью `IAggregateFunction` интерфейс. Состояния могут быть довольно простыми (состояние для `AggregateFunctionCount` это всего лишь один человек `UInt64` значение) или довольно сложное (состояние `AggregateFunctionUniqCombined` представляет собой комбинацию линейного массива, хэш-таблицы и `HyperLogLog` вероятностная структура данных). - -Государства распределяются в `Arena` (пул памяти) для работы с несколькими состояниями при выполнении высокой мощности `GROUP BY` запрос. Состояния могут иметь нетривиальный конструктор и деструктор: например, сложные агрегатные состояния могут сами выделять дополнительную память. Это требует некоторого внимания к созданию и уничтожению государств и правильной передаче их права собственности и порядка уничтожения. - -Агрегатные состояния могут быть сериализованы и десериализованы для передачи по сети во время выполнения распределенного запроса или для записи их на диск, где недостаточно оперативной памяти. Они даже могут храниться в таблице с `DataTypeAggregateFunction` чтобы разрешить инкрементное агрегирование данных. - -> Сериализованный формат данных для состояний агрегатных функций в настоящее время не является версионным. Это нормально, если агрегатные состояния хранятся только временно. Но у нас есть такая возможность `AggregatingMergeTree` механизм таблиц для инкрементного агрегирования, и люди уже используют его в производстве. Именно по этой причине обратная совместимость требуется при изменении сериализованного формата для любой агрегатной функции в будущем. - -## Сервер {#server} - -Сервер реализует несколько различных интерфейсов: - -- Интерфейс HTTP для любых иностранных клиентов. -- TCP-интерфейс для собственного клиента ClickHouse и для межсерверной связи во время выполнения распределенного запроса. -- Интерфейс для передачи данных для репликации. - -Внутренне это просто примитивный многопоточный сервер без сопрограмм или волокон. Поскольку сервер предназначен не для обработки высокой скорости простых запросов, а для обработки относительно низкой скорости сложных запросов, каждый из них может обрабатывать огромное количество данных для аналитики. - -Сервер инициализирует программу `Context` класс с необходимой средой для выполнения запроса: список доступных баз данных, пользователей и прав доступа, настройки, кластеры, список процессов, журнал запросов и так далее. Переводчики используют эту среду. - -Мы поддерживаем полную обратную и прямую совместимость для протокола TCP сервера: старые клиенты могут разговаривать с новыми серверами, а новые клиенты-со старыми серверами. Но мы не хотим поддерживать его вечно, и мы удаляем поддержку старых версий примерно через год. - -!!! note "Примечание" - Для большинства внешних приложений мы рекомендуем использовать интерфейс HTTP, поскольку он прост и удобен в использовании. Протокол TCP более тесно связан с внутренними структурами данных: он использует внутренний формат для передачи блоков данных, а также использует пользовательское обрамление для сжатых данных. Мы не выпустили библиотеку C для этого протокола, потому что она требует связывания большей части кодовой базы ClickHouse, что нецелесообразно. - -## Выполнение Распределенных Запросов {#distributed-query-execution} - -Серверы в кластерной установке в основном независимы. Вы можете создать `Distributed` таблица на одном или всех серверах кластера. То `Distributed` table does not store data itself – it only provides a «view» ко всем локальным таблицам на нескольких узлах кластера. Когда вы выберите из `Distributed` таблица, он переписывает этот запрос, выбирает удаленные узлы в соответствии с настройками балансировки нагрузки и отправляет запрос к ним. То `Distributed` таблица запрашивает удаленные серверы для обработки запроса только до стадии, когда промежуточные результаты с разных серверов могут быть объединены. Затем он получает промежуточные результаты и сливает их. Распределенная таблица пытается распределить как можно больше работы на удаленные серверы и не отправляет много промежуточных данных по сети. - -Все становится сложнее, когда у вас есть подзапросы в предложениях IN или JOIN, и каждый из них использует a `Distributed` стол. У нас есть разные стратегии выполнения этих запросов. - -Глобального плана запросов для выполнения распределенных запросов не существует. Каждый узел имеет свой локальный план запроса для своей части задания. У нас есть только простое однопроходное распределенное выполнение запросов: мы отправляем запросы на удаленные узлы, а затем объединяем результаты. Но это неосуществимо для сложных запросов с высокой мощностью группы BYs или с большим количеством временных данных для соединения. В таких случаях нам необходимо: «reshuffle» данные между серверами, что требует дополнительной координации. ClickHouse не поддерживает такого рода выполнение запросов, и мы должны работать над этим. - -## Дерево Слияния {#merge-tree} - -`MergeTree` это семейство механизмов хранения данных, поддерживающих индексацию по первичному ключу. Первичный ключ может быть произвольным кортежем столбцов или выражений. Данные в a `MergeTree` таблица хранится в «parts». Каждая часть хранит данные в порядке первичного ключа, поэтому данные лексикографически упорядочиваются кортежем первичного ключа. Все столбцы таблицы хранятся отдельно `column.bin` файлы в этих краях. Файлы состоят из сжатых блоков. Каждый блок обычно содержит от 64 КБ до 1 МБ несжатых данных, в зависимости от среднего размера значения. Блоки состоят из значений столбцов, расположенных последовательно друг за другом. Значения столбцов находятся в одном и том же порядке для каждого столбца (первичный ключ определяет порядок), поэтому при итерации по многим столбцам вы получаете значения для соответствующих строк. - -Сам первичный ключ является «sparse». Он адресует не каждую отдельную строку, а только некоторые диапазоны данных. Разделение `primary.idx` файл имеет значение первичного ключа для каждой N-й строки, где N называется `index_granularity` (обычно N = 8192). Кроме того, для каждой колонки у нас есть `column.mrk` файлы с «marks,» которые являются смещениями для каждой N-й строки в файле данных. Каждая метка представляет собой пару: смещение в файле к началу сжатого блока и смещение в распакованном блоке к началу данных. Обычно сжатые блоки выравниваются по меткам, а смещение в распакованном блоке равно нулю. Данные для `primary.idx` всегда находится в памяти, а данные для `column.mrk` файлы кэшируются. - -Когда мы собираемся прочитать что-то из части в `MergeTree`, мы смотрим на `primary.idx` данные и найдите диапазоны, которые могут содержать запрошенные данные, а затем посмотрите на `column.mrk` данные и рассчитать смещения для того, чтобы начать чтение этих диапазонов. Из-за разреженности могут быть прочитаны избыточные данные. ClickHouse не подходит для высокой загрузки простых точечных запросов, так как весь диапазон с `index_granularity` строки должны быть прочитаны для каждого ключа, и весь сжатый блок должен быть распакован для каждого столбца. Мы сделали индекс разреженным, потому что мы должны быть в состоянии поддерживать триллионы строк на одном сервере без заметного потребления памяти для индекса. Кроме того, поскольку первичный ключ разрежен, он не является уникальным: он не может проверить существование ключа в таблице во время вставки. В таблице может быть много строк с одним и тем же ключом. - -Когда вы `INSERT` куча данных в `MergeTree`, эта связка сортируется по порядку первичного ключа и образует новую часть. Существуют фоновые потоки, которые периодически выделяют некоторые детали и объединяют их в одну сортированную деталь, чтобы сохранить количество деталей относительно низким. Вот почему он так называется `MergeTree`. Конечно, слияние приводит к тому, что «write amplification». Все части неизменны: они только создаются и удаляются, но не изменяются. Когда SELECT выполняется, он содержит снимок таблицы (набор деталей). После слияния мы также сохраняем старые детали в течение некоторого времени, чтобы облегчить восстановление после сбоя, поэтому, если мы видим, что какая-то объединенная деталь, вероятно, сломана, мы можем заменить ее исходными частями. - -`MergeTree` это не дерево LSM, потому что оно не содержит «memtable» и «log»: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications. - -> Таблицы MergeTree могут иметь только один (первичный) индекс: вторичных индексов не существует. Было бы неплохо разрешить несколько физических представлений в одной логической таблице, например, хранить данные в более чем одном физическом порядке или даже разрешить представления с предварительно агрегированными данными наряду с исходными данными. - -Есть движки MergeTree, которые выполняют дополнительную работу во время фоновых слияний. Примеры `CollapsingMergeTree` и `AggregatingMergeTree`. Это можно рассматривать как специальную поддержку обновлений. Имейте в виду, что это не настоящие обновления, поскольку пользователи обычно не имеют никакого контроля над временем выполнения фоновых слияний, а данные в `MergeTree` таблица почти всегда хранится в нескольких частях, а не в полностью объединенном виде. - -## Копирование {#replication} - -Репликация в ClickHouse может быть настроена на основе каждой таблицы. Вы можете иметь некоторые реплицированные и некоторые нереплицированные таблицы на одном сервере. Вы также можете иметь таблицы, реплицируемые различными способами,например, одна таблица с двухфакторной репликацией, а другая-с трехфакторной. - -Репликация осуществляется в виде `ReplicatedMergeTree` подсистема хранилища. Путь в `ZooKeeper` указывается в качестве параметра для механизма хранения данных. Все таблицы с одинаковым путем внутри `ZooKeeper` становятся репликами друг друга: они синхронизируют свои данные и поддерживают согласованность. Реплики можно добавлять и удалять динамически, просто создавая или удаляя таблицу. - -Репликация использует асинхронную многомастерную схему. Вы можете вставить данные в любую реплику, которая имеет сеанс с `ZooKeeper`, и данные реплицируются во все остальные реплики асинхронно. Поскольку ClickHouse не поддерживает обновления, репликация является бесконфликтной. Поскольку нет подтверждения кворума вставок, только что вставленные данные могут быть потеряны, если один узел выйдет из строя. - -Метаданные для репликации хранятся в ZooKeeper. Существует журнал репликации, в котором перечислены необходимые действия. Действия таковы: получить часть; объединить части; удалить раздел и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из этой очереди. Например, при вставке «get the part» действие создается в журнале, и каждая реплика загружает эту часть. Слияния координируются между репликами для получения идентичных байтам результатов. Все части объединяются одинаково на всех репликах. Это достигается путем выбора одной реплики в качестве лидера, и эта реплика инициирует слияние и запись «merge parts» действия по ведению журнала. - -Репликация является физической: между узлами передаются только сжатые части, а не запросы. Слияния обрабатываются на каждой реплике независимо в большинстве случаев, чтобы снизить затраты на сеть, избегая усиления сети. Большие объединенные части передаются по сети только в случаях значительного запаздывания репликации. - -Кроме того, каждая реплика хранит свое состояние в ZooKeeper как набор деталей и их контрольные суммы. Когда состояние локальной файловой системы отличается от эталонного состояния в ZooKeeper, реплика восстанавливает свою согласованность, загружая недостающие и сломанные части из других реплик. Когда в локальной файловой системе появляются неожиданные или неработающие данные, ClickHouse не удаляет их, а перемещает в отдельный каталог и забывает. - -!!! note "Примечание" - Кластер ClickHouse состоит из независимых сегментов, и каждый сегмент состоит из реплик. Кластер таков **неупругий**, поэтому после добавления нового осколка данные не будут автоматически перебалансированы между осколками. Вместо этого предполагается, что нагрузка на кластер будет регулироваться неравномерно. Эта реализация дает вам больше контроля, и это нормально для относительно небольших кластеров, таких как десятки узлов. Но для кластеров с сотнями узлов, которые мы используем в производстве, этот подход становится существенным недостатком. Мы должны реализовать механизм таблиц, который охватывает весь кластер с динамически реплицируемыми областями, которые могут быть разделены и сбалансированы между кластерами автоматически. - -{## [Оригинальная статья](https://clickhouse.tech/docs/en/development/architecture/) ##} diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md new file mode 120000 index 00000000000..61968e46da2 --- /dev/null +++ b/docs/ru/development/architecture.md @@ -0,0 +1 @@ +en/development/architecture.md \ No newline at end of file diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md deleted file mode 100644 index f0e0ccfd4aa..00000000000 --- a/docs/ru/development/build.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Как построить ClickHouse для развития {#how-to-build-clickhouse-for-development} - -Следующий учебник основан на системе Ubuntu Linux. -С соответствующими изменениями он также должен работать на любом другом дистрибутиве Linux. -Поддерживаемые платформы: x86\_64 и AArch64. Поддержка Power9 является экспериментальной. - -## Установите Git, CMake, Python и Ninja {#install-git-cmake-python-and-ninja} - -``` bash -$ sudo apt-get install git cmake python ninja-build -``` - -Или cmake3 вместо cmake на старых системах. - -## Установка GCC 9 {#install-gcc-9} - -Есть несколько способов сделать это. - -### Установка из PPA пакет {#install-from-a-ppa-package} - -``` bash -$ sudo apt-get install software-properties-common -$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test -$ sudo apt-get update -$ sudo apt-get install gcc-9 g++-9 -``` - -### Установка из источников {#install-from-sources} - -Смотреть на [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh) - -## Использовать GCC для сборки 9 {#use-gcc-9-for-builds} - -``` bash -$ export CC=gcc-9 -$ export CXX=g++-9 -``` - -## Проверка Источников ClickHouse {#checkout-clickhouse-sources} - -``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git -``` - -или - -``` bash -$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git -``` - -## Построить ClickHouse {#build-clickhouse} - -``` bash -$ cd ClickHouse -$ mkdir build -$ cd build -$ cmake .. -$ ninja -$ cd .. -``` - -Чтобы создать исполняемый файл, выполните команду `ninja clickhouse`. -Это позволит создать `programs/clickhouse` исполняемый файл, который может быть использован с `client` или `server` аргументы. - -# Как построить ClickHouse на любом Linux {#how-to-build-clickhouse-on-any-linux} - -Для сборки требуются следующие компоненты: - -- Git (используется только для проверки исходных текстов, он не нужен для сборки) -- CMake 3.10 или новее -- Ниндзя (рекомендуется) или сделать -- Компилятор C++: gcc 9 или clang 8 или новее -- Компоновщик: lld или gold (классический GNU ld не будет работать) -- Python (используется только внутри сборки LLVM и является необязательным) - -Если все компоненты установлены, Вы можете построить их так же, как и описанные выше шаги. - -Пример для Ubuntu Eoan: - - sudo apt update - sudo apt install git cmake ninja-build g++ python - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - ninja - -Пример для OpenSUSE перекати-поле: - - sudo zypper install git cmake ninja gcc-c++ python lld - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - ninja - -Пример для сыромятной кожи Fedora: - - sudo yum update - yum --nogpg install git cmake make gcc-c++ python2 - git clone --recursive https://github.com/ClickHouse/ClickHouse.git - mkdir build && cd build - cmake ../ClickHouse - make -j $(nproc) - -# Вам не нужно строить ClickHouse {#you-dont-have-to-build-clickhouse} - -ClickHouse доступен в готовых двоичных файлах и пакетах. Двоичные файлы являются портативными и могут быть запущены на любом вкусе Linux. - -Они созданы для стабильных, предустановленных и тестовых релизов до тех пор, пока для каждого коммита к мастеру и для каждого запроса на вытягивание. - -Чтобы найти самую свежую сборку из `master`, обратиться [совершает страницы](https://github.com/ClickHouse/ClickHouse/commits/master), нажмите на первую зеленую галочку или красный крестик рядом с фиксацией и нажмите на кнопку «Details» ссылка сразу после этого «ClickHouse Build Check». - -# Как создать пакет ClickHouse Debian {#how-to-build-clickhouse-debian-package} - -## Установите Git и Pbuilder {#install-git-and-pbuilder} - -``` bash -$ sudo apt-get update -$ sudo apt-get install git python pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring -``` - -## Проверка Источников ClickHouse {#checkout-clickhouse-sources-1} - -``` bash -$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git -$ cd ClickHouse -``` - -## Запустить Сценарий Выпуска {#run-release-script} - -``` bash -$ ./release -``` - -[Оригинальная статья](https://clickhouse.tech/docs/en/development/build/) diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md new file mode 120000 index 00000000000..156d8382515 --- /dev/null +++ b/docs/ru/development/build.md @@ -0,0 +1 @@ +en/development/build.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md deleted file mode 100644 index 184028212e9..00000000000 --- a/docs/ru/development/build_cross_arm.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Как построить ClickHouse на Linux для архитектуры AArch64 (ARM64) {#how-to-build-clickhouse-on-linux-for-aarch64-arm64-architecture} - -Это для случая, когда у вас есть Linux-машина и вы хотите использовать ее для сборки `clickhouse` двоичный файл, который будет работать на другой машине Linux с архитектурой процессора AARCH64. Это предназначено для непрерывной проверки интеграции, которая выполняется на серверах Linux. - -Кросс-сборка для AARCH64 основана на следующих принципах: [Инструкции по сборке](build.md)- сначала следуйте за ними. - -# Установка Clang-8 {#install-clang-8} - -Следуйте инструкциям от https://apt.llvm.org/ для вашей установки Ubuntu или Debian. -Например, в Ubuntu Bionic вы можете использовать следующие команды: - -``` bash -echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" | sudo tee /etc/apt/sources.list.d/llvm.list -sudo apt-get update -sudo apt-get install clang-8 -``` - -# Установка Набора Инструментов Перекрестной Компиляции {#install-cross-compilation-toolset} - -``` bash -cd ClickHouse -mkdir -p build-aarch64/cmake/toolchain/linux-aarch64 -wget 'https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz?revision=2e88a73f-d233-4f96-b1f4-d8b36e9bb0b9&la=en' -O gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -tar xJf gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz -C build-aarch64/cmake/toolchain/linux-aarch64 --strip-components=1 -``` - -# Построить ClickHouse {#build-clickhouse} - -``` bash -cd ClickHouse -mkdir build-arm64 -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-arm64 -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake -ninja -C build-arm64 -``` - -Полученный двоичный файл будет работать только в Linux с архитектурой процессора AARCH64. diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md new file mode 120000 index 00000000000..ea33bb61837 --- /dev/null +++ b/docs/ru/development/build_cross_arm.md @@ -0,0 +1 @@ +en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md deleted file mode 100644 index 04d505f1a83..00000000000 --- a/docs/ru/development/build_cross_osx.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Как построить ClickHouse на Linux для Mac OS X {#how-to-build-clickhouse-on-linux-for-mac-os-x} - -Это для случая, когда у вас есть Linux-машина и вы хотите использовать ее для сборки `clickhouse` двоичный файл, который будет работать на OS X. Это предназначено для непрерывной проверки интеграции, которая выполняется на серверах Linux. Если вы хотите построить ClickHouse непосредственно на Mac OS X, то продолжайте [еще одна инструкция](build_osx.md). - -Кросс-сборка для Mac OS X основана на следующих принципах: [Инструкции по сборке](build.md)- сначала следуйте за ними. - -# Установка Clang-8 {#install-clang-8} - -Следуйте инструкциям от https://apt.llvm.org/ для вашей установки Ubuntu или Debian. -Например команды для Bionic выглядят так: - -``` bash -sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list -sudo apt-get install clang-8 -``` - -# Установка Набора Инструментов Перекрестной Компиляции {#install-cross-compilation-toolset} - -Давайте вспомним путь, по которому мы устанавливаем `cctools` как ${CCTOOLS} - -``` bash -mkdir ${CCTOOLS} - -git clone https://github.com/tpoechtrager/apple-libtapi.git -cd apple-libtapi -INSTALLPREFIX=${CCTOOLS} ./build.sh -./install.sh -cd .. - -git clone https://github.com/tpoechtrager/cctools-port.git -cd cctools-port/cctools -./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin -make install -``` - -Кроме того, нам нужно загрузить MacOS X SDK в рабочее дерево. - -``` bash -cd ClickHouse -wget 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.14-beta4/MacOSX10.14.sdk.tar.xz' -mkdir -p build-darwin/cmake/toolchain/darwin-x86_64 -tar xJf MacOSX10.14.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --strip-components=1 -``` - -# Построить ClickHouse {#build-clickhouse} - -``` bash -cd ClickHouse -mkdir build-osx -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ - -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ - -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ - -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -ninja -C build-osx -``` - -Полученный двоичный файл будет иметь исполняемый формат Mach-O и не может быть запущен в Linux. diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md new file mode 120000 index 00000000000..d4dc16f2fbc --- /dev/null +++ b/docs/ru/development/build_cross_osx.md @@ -0,0 +1 @@ +en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ru/development/build_osx.md b/docs/ru/development/build_osx.md deleted file mode 100644 index b218304d9d1..00000000000 --- a/docs/ru/development/build_osx.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Как построить ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x} - -Сборка должна работать на Mac OS X 10.15 (Catalina) - -## Установите Homebrew {#install-homebrew} - -``` bash -$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -``` - -## Установите необходимые компиляторы, инструменты и библиотеки {#install-required-compilers-tools-and-libraries} - -``` bash -$ brew install cmake ninja libtool gettext -``` - -## Проверка Источников ClickHouse {#checkout-clickhouse-sources} - -``` bash -$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git -``` - -или - -``` bash -$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git - -$ cd ClickHouse -``` - -## Построить ClickHouse {#build-clickhouse} - -``` bash -$ mkdir build -$ cd build -$ cmake .. -DCMAKE_CXX_COMPILER=`which clang++` -DCMAKE_C_COMPILER=`which clang` -$ ninja -$ cd .. -``` - -## Предостережения {#caveats} - -Если вы собираетесь запустить clickhouse-сервер, убедитесь в том, чтобы увеличить параметром maxfiles системная переменная. - -!!! info "Примечание" - Вам нужно будет использовать sudo. - -Для этого создайте следующий файл: - -/Библиотека / LaunchDaemons / limit.параметром maxfiles.файл plist: - -``` xml - - - - - Label - limit.maxfiles - ProgramArguments - - launchctl - limit - maxfiles - 524288 - 524288 - - RunAtLoad - - ServiceIPC - - - -``` - -Выполните следующую команду: - -``` bash -$ sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist -``` - -Перезагрузить. - -Чтобы проверить, работает ли он, вы можете использовать `ulimit -n` команда. - -[Оригинальная статья](https://clickhouse.tech/docs/en/development/build_osx/) diff --git a/docs/ru/development/build_osx.md b/docs/ru/development/build_osx.md new file mode 120000 index 00000000000..5c38a2b001a --- /dev/null +++ b/docs/ru/development/build_osx.md @@ -0,0 +1 @@ +en/development/build_osx.md \ No newline at end of file diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md deleted file mode 100644 index 8bf31ed0d3f..00000000000 --- a/docs/ru/development/index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Разработка ClickHouse {#clickhouse-development} - -[Оригинальная статья](https://clickhouse.tech/docs/en/development/) diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md new file mode 120000 index 00000000000..754385a9f4b --- /dev/null +++ b/docs/ru/development/index.md @@ -0,0 +1 @@ +en/development/index.md \ No newline at end of file diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md deleted file mode 100644 index 1dfcdfdfe6f..00000000000 --- a/docs/ru/development/tests.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Тестирование ClickHouse {#clickhouse-testing} - -## Функциональные пробы {#functional-tests} - -Функциональные тесты являются наиболее простыми и удобными в использовании. Большинство функций ClickHouse можно протестировать с помощью функциональных тестов, и они обязательны для использования при каждом изменении кода ClickHouse, которое может быть протестировано таким образом. - -Каждый функциональный тест отправляет один или несколько запросов на запущенный сервер ClickHouse и сравнивает результат со ссылкой. - -Тесты расположены в `queries` каталог. Существует два подкаталога: `stateless` и `stateful`. Тесты без состояния выполняют запросы без каких - либо предварительно загруженных тестовых данных-они часто создают небольшие синтетические наборы данных на лету, в самом тесте. Статусные тесты требуют предварительно загруженных тестовых данных от Яндекса.Метрика и не доступна широкой публике. Мы склонны использовать только `stateless` тесты и избегайте добавления новых `stateful` тесты. - -Каждый тест может быть одного из двух типов: `.sql` и `.sh`. `.sql` тест - это простой SQL-скрипт, который передается по конвейеру в `clickhouse-client --multiquery --testmode`. `.sh` тест - это скрипт, который запускается сам по себе. - -Чтобы выполнить все тесты, используйте `testskhouse-test` инструмент. Смотри `--help` для списка возможных вариантов. Вы можете просто запустить все тесты или запустить подмножество тестов, отфильтрованных по подстроке в имени теста: `./clickhouse-test substring`. - -Самый простой способ вызвать функциональные тесты-это скопировать `clickhouse-client` к `/usr/bin/`, бежать `clickhouse-server` а потом бежать `./clickhouse-test` из собственного каталога. - -Чтобы добавить новый тест, создайте `.sql` или `.sh` файл в `queries/0_stateless` каталог, проверьте его вручную, а затем сгенерируйте `.reference` файл создается следующим образом: `clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` или `./00000_test.sh > ./00000_test.reference`. - -Тесты должны использовать (создавать, отбрасывать и т. д.) Только таблицы в `test` предполагается, что база данных создается заранее; также тесты могут использовать временные таблицы. - -Если вы хотите использовать распределенные запросы в функциональных тестах, вы можете использовать их в качестве рычагов `remote` функция таблицы с `127.0.0.{1..2}` адреса для запроса самого сервера; или вы можете использовать предопределенные тестовые кластеры в файле конфигурации сервера, например `test_shard_localhost`. - -Некоторые тесты помечены знаком `zookeeper`, `shard` или `long` в своем названии. -`zookeeper` это для тестов, которые используют ZooKeeper. `shard` это для тестов, что -требуется сервер для прослушивания `127.0.0.*`; `distributed` или `global` есть то же самое -значение. `long` это для тестов, которые работают немного дольше, чем одна секунда. Ты можешь -отключите эти группы тестов с помощью `--no-zookeeper`, `--no-shard` и -`--no-long` варианты, соответственно. - -## Известная ошибка {#known-bugs} - -Если мы знаем некоторые ошибки, которые могут быть легко воспроизведены функциональными тестами, мы помещаем подготовленные функциональные тесты в `queries/bugs` каталог. Эти тесты будут перенесены в `teststests_stateless` когда ошибки будут исправлены. - -## Интеграционные Тесты {#integration-tests} - -Интеграционные тесты позволяют тестировать ClickHouse в кластерной конфигурации и взаимодействие ClickHouse с другими серверами, такими как MySQL, Postgres, MongoDB. Они полезны для эмуляции сетевых разбиений, отбрасывания пакетов и т. д. Эти тесты выполняются в Docker и создают несколько контейнеров с различным программным обеспечением. - -Видеть `testsgration/README.md` о том, как проводить эти тесты. - -Обратите внимание, что интеграция ClickHouse со сторонними драйверами не тестируется. Кроме того, в настоящее время у нас нет интеграционных тестов с нашими драйверами JDBC и ODBC. - -## Модульное тестирование {#unit-tests} - -Модульные тесты полезны, если вы хотите протестировать не весь ClickHouse в целом, а одну изолированную библиотеку или класс. Вы можете включить или отключить сборку тестов с помощью `ENABLE_TESTS` Вариант CMake. Модульные тесты (и другие тестовые программы) расположены в `tests` подкаталоги по всему коду. Чтобы запустить модульные тесты, введите `ninja test`. Некоторые тесты используют `gtest`, но некоторые из них-это просто программы, которые возвращают ненулевой код выхода при сбое теста. - -Не обязательно иметь модульные тесты, Если код уже охвачен функциональными тестами (а функциональные тесты обычно гораздо более просты в использовании). - -## Эксплуатационное испытание {#performance-tests} - -Тесты производительности позволяют измерять и сравнивать производительность некоторой изолированной части ClickHouse по синтетическим запросам. Тесты расположены по адресу `tests/performance`. Каждый тест представлен следующим образом `.xml` файл с описанием тестового случая. Тесты выполняются с помощью `clickhouse performance-test` инструмент (который встроен в `clickhouse` двоичный). Видеть `--help` для призыва. - -Каждый тест запускает один или несколько запросов (возможно, с комбинациями параметров) в цикле с некоторыми условиями остановки (например «maximum execution speed is not changing in three seconds») и измерьте некоторые показатели производительности запросов (например, «maximum execution speed»). Некоторые тесты могут содержать предварительные условия для предварительно загруженного тестового набора данных. - -Если вы хотите улучшить производительность ClickHouse в каком-то сценарии, и если улучшения могут наблюдаться в простых запросах, настоятельно рекомендуется написать тест производительности. Это всегда имеет смысл использовать `perf top` или другие инструменты perf во время ваших тестов. - -## Инструменты И Сценарии Тестирования {#test-tools-and-scripts} - -Некоторые программы в `tests` каталог-это не подготовленные тесты, а инструменты тестирования. Например, для `Lexer` есть такой инструмент `dbms/Parsers/tests/lexer` это просто делает токенизацию stdin и записывает раскрашенный результат в stdout. Вы можете использовать эти инструменты в качестве примеров кода, а также для исследования и ручного тестирования. - -Вы также можете разместить пару файлов `.sh` и `.reference` вместе с инструментом нужно запустить его на каком - то заранее заданном входе- тогда результат скрипта можно сравнить с `.reference` файл. Такого рода тесты не автоматизированы. - -## Различные Тесты {#miscellanous-tests} - -Существуют тесты для внешних словарей, расположенных по адресу `tests/external_dictionaries` и для машинно-обученных моделей в `tests/external_models`. Эти тесты не обновляются и должны быть перенесены в интеграционные тесты. - -Существует отдельный тест для вставки кворума. Этот тест запускает кластер ClickHouse на отдельных серверах и эмулирует различные случаи сбоя: разделение сети, отбрасывание пакетов (между узлами ClickHouse, между ClickHouse и ZooKeeper, между сервером ClickHouse и клиентом и т. д.), `kill -9`, `kill -STOP` и `kill -CONT` , любить [Джепсен](https://aphyr.com/tags/Jepsen). Затем тест проверяет, что все признанные вставки были записаны, а все отклоненные вставки-нет. - -Тест кворума был написан отдельной командой еще до того, как ClickHouse стал открытым исходным кодом. Эта команда больше не работает с ClickHouse. Тест был случайно написан на Java. По этим причинам тест кворума должен быть переписан и перенесен в интеграционные тесты. - -## Ручное тестирование {#manual-testing} - -Когда вы разрабатываете новую функцию, разумно также протестировать ее вручную. Вы можете сделать это с помощью следующих шагов: - -Постройте ClickHouse. Запустите ClickHouse из терминала: измените каталог на `programs/clickhouse-server` и запустить его с помощью `./clickhouse-server`. Он будет использовать конфигурацию (`config.xml`, `users.xml` и файлы внутри `config.d` и `users.d` каталоги) из текущего каталога по умолчанию. Чтобы подключиться к серверу ClickHouse, выполните команду `programs/clickhouse-client/clickhouse-client`. - -Обратите внимание, что все инструменты clickhouse (сервер, клиент и т. д.) являются просто символическими ссылками на один двоичный файл с именем `clickhouse`. Вы можете найти этот двоичный файл по адресу `programs/clickhouse`. Все инструменты также могут быть вызваны как `clickhouse tool` вместо `clickhouse-tool`. - -В качестве альтернативы вы можете установить пакет ClickHouse: либо стабильный релиз из репозитория Яндекса, либо вы можете построить пакет для себя с помощью `./release` в корне источников ClickHouse. Затем запустите сервер с помощью `sudo service clickhouse-server start` (или остановить, чтобы остановить сервер). Ищите журналы по адресу `/etc/clickhouse-server/clickhouse-server.log`. - -Когда ClickHouse уже установлен в вашей системе, вы можете построить новый `clickhouse` двоичный код и заменить существующий двоичный код: - -``` bash -$ sudo service clickhouse-server stop -$ sudo cp ./clickhouse /usr/bin/ -$ sudo service clickhouse-server start -``` - -Также вы можете остановить системный clickhouse-сервер и запустить свой собственный с той же конфигурацией, но с регистрацией в терминал: - -``` bash -$ sudo service clickhouse-server stop -$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml -``` - -Пример с gdb: - -``` bash -$ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml -``` - -Если системный clickhouse-сервер уже запущен, и вы не хотите его останавливать, вы можете изменить номера портов в своей системе. `config.xml` (или переопределить их в файле внутри `config.d` каталог), укажите соответствующий путь к данным и запустите его. - -`clickhouse` binary почти не имеет зависимостей и работает в широком диапазоне дистрибутивов Linux. Чтобы быстро и грязно протестировать свои изменения на сервере, вы можете просто `scp` ваша свежая постройка `clickhouse` двоичный файл на ваш сервер, а затем запустите его, как в приведенных выше примерах. - -## Тестовая среда {#testing-environment} - -Перед публикацией релиза как стабильного мы развертываем его в тестовой среде. Среда тестирования-это кластер, который обрабатывает 1/39 часть [Яндекс.Метрика](https://metrica.yandex.com/) данные. Мы делимся нашей тестовой средой с Яндексом.Команда метрики. ClickHouse обновляется без простоев поверх существующих данных. Мы смотрим сначала на то, что данные обрабатываются успешно, не отставая от реального времени, репликация продолжает работать и нет никаких проблем, видимых Яндексу.Команда метрики. Первую проверку можно провести следующим образом: - -``` sql -SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h; -``` - -В некоторых случаях мы также развернуть на тестирование среды нашего друга команды Яндекса: Маркет, облако и т. д. Кроме того, у нас есть некоторые аппаратные серверы, которые используются для целей разработки. - -## Нагрузочное тестирование {#load-testing} - -После развертывания в среде тестирования мы запускаем нагрузочное тестирование с запросами из производственного кластера. Это делается вручную. - -Убедитесь, что вы включили `query_log` на вашем производственном кластере. - -Сбор журнала запросов в течение одного или нескольких дней: - -``` bash -$ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv -``` - -Это очень сложный пример. `type = 2` будет фильтровать запросы, которые выполняются успешно. `query LIKE '%ym:%'` это выбор релевантных запросов от Яндекса.Метрика. `is_initial_query` это выбор только тех запросов, которые инициируются клиентом, а не самим ClickHouse (как части распределенной обработки запросов). - -`scp` это войдите в свой тестовый кластер и запустите его следующим образом: - -``` bash -$ clickhouse benchmark --concurrency 16 < queries.tsv -``` - -(вероятно, вы также хотите указать a `--user`) - -Затем оставьте его на ночь или выходные и идите отдыхать. - -Вы должны это проверить `clickhouse-server` не дает сбоя, объем памяти ограничен, а производительность не ухудшается с течением времени. - -Точные тайминги выполнения запросов не регистрируются и не сравниваются из-за высокой вариативности запросов и окружающей среды. - -## Построение Тестов {#build-tests} - -Тесты сборки позволяют проверить, что сборка не нарушается на различных альтернативных конфигурациях и на некоторых зарубежных системах. Тесты расположены по адресу `ci` каталог. Они запускают сборку из исходного кода внутри Docker, Vagrant, а иногда и с помощью `qemu-user-static` внутри Докер. Эти тесты находятся в стадии разработки, и тестовые запуски не автоматизированы. - -Мотивация: - -Обычно мы выпускаем и запускаем все тесты на одном варианте сборки ClickHouse. Но есть и альтернативные варианты сборки, которые не проходят тщательной проверки. Примеры: - -- сборка на FreeBSD; -- сборка на Debian с библиотеками из системных пакетов; -- сборка с общим связыванием библиотек; -- построить на платформе AArch64 ; -- постройте на платформе PowerPc. - -Например, сборка с системными пакетами-это плохая практика, потому что мы не можем гарантировать, какая именно версия пакетов будет у системы. Но это действительно необходимо сопровождающим Debian. По этой причине мы, по крайней мере, должны поддерживать этот вариант сборки. Другой пример: Общие ссылки-это общий источник проблем, но он необходим для некоторых энтузиастов. - -Хотя мы не можем выполнить все тесты на всех вариантах сборки, мы хотим проверить, по крайней мере, что различные варианты сборки не нарушены. Для этого мы используем тесты сборки. - -## Тестирование Совместимости Протоколов {#testing-for-protocol-compatibility} - -Когда мы расширяем сетевой протокол ClickHouse, мы вручную проверяем, что старый clickhouse-клиент работает с новым clickhouse-сервером, а новый clickhouse-клиент работает со старым clickhouse-сервером (просто запустив двоичные файлы из соответствующих пакетов). - -## Помощь От Компилятора {#help-from-the-compiler} - -Основной код ClickHouse (который находится в `dbms` каталог) строится с помощью `-Wall -Wextra -Werror` и с некоторыми дополнительными включенными предупреждениями. Хотя эти параметры не включены для сторонних библиотек. - -У Clang есть еще более полезные предупреждения - вы можете искать их с помощью `-Weverything` и выберите что-то для сборки по умолчанию. - -Для производственных сборок используется gcc (он все еще генерирует немного более эффективный код, чем clang). Для развития, лязгают, как правило, более удобны в использовании. Вы можете построить на своей собственной машине с режимом отладки (чтобы сэкономить батарею вашего ноутбука), но обратите внимание, что компилятор способен генерировать больше предупреждений с помощью `-O3` благодаря лучшему потоку управления и межпроцедурному анализу. При строительстве с лязгом, `libc++` используется вместо `libstdc++` и при построении с режимом отладки, отладочная версия `libc++` используется, что позволяет ловить больше ошибок во время выполнения. - -## Дезинфицирующее средство {#sanitizers} - -**Адрес дезинфицирующее средство**. -Мы проводим функциональные и интеграционные тесты в асане на фиксации основы. - -**С Valgrind (Помощи Valgrind)**. -Мы проводим функциональные тесты под Valgrind ночь. Это займет несколько часов. В настоящее время существует один известный ложноположительный результат в `re2` библиотека, см. [эта статья](https://research.swtch.com/sparse). - -**Неопределенное поведение дезинфицирующего средства.** -Мы проводим функциональные и интеграционные тесты в асане на фиксации основы. - -**Дезинфицирующее средство для нитей**. -Мы проводим функциональные тесты в рамках TSan на основе per-commit. Мы все еще не запускаем интеграционные тесты под TSan на основе per-commit. - -**Дезинфицирующее средство для памяти**. -В настоящее время мы все еще не используем MSan. - -**Отладочный распределитель.** -Отладочная версия `jemalloc` используется для отладки сборки. - -## Затуманивающего {#fuzzing} - -Мы используем простой тест fuzz для генерации случайных SQL-запросов и проверки того, что сервер не умирает. Тестирование пуха проводится с помощью адресного дезинфицирующего средства. Вы можете найти его в `00746_sql_fuzzy.pl`. Этот тест следует проводить непрерывно (в течение ночи и дольше). - -По состоянию на декабрь 2018 года мы все еще не используем изолированное тестирование fuzz библиотечного кода. - -## Аудит безопасности {#security-audit} - -Люди из облачного отдела Яндекса делают некоторый базовый обзор возможностей ClickHouse с точки зрения безопасности. - -## Статический анализатор {#static-analyzers} - -Мы бежим `PVS-Studio` на основе каждой фиксации. Мы провели оценку `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. Вы найдете инструкции по использованию в `tests/instructions/` каталог. Кроме того, вы можете читать [статья на русском языке](https://habr.com/company/yandex/blog/342018/). - -Если вы используете `CLion` как IDE, вы можете использовать некоторые из них `clang-tidy` выписывает чеки из коробки. - -## Затвердение {#hardening} - -`FORTIFY_SOURCE` используется по умолчанию. Это почти бесполезно, но все же имеет смысл в редких случаях, и мы не отключаем его. - -## Стиль Кода {#code-style} - -Описаны правила стиля кода [здесь](https://clickhouse.tech/docs/en/development/style/). - -Чтобы проверить наличие некоторых распространенных нарушений стиля, вы можете использовать `utils/check-style` скрипт. - -Чтобы принудительно создать правильный стиль вашего кода, Вы можете использовать `clang-format`. Файл `.clang-format` находится в корне источника. Это в основном соответствует нашему фактическому стилю кода. Но применять его не рекомендуется `clang-format` к существующим файлам, потому что это ухудшает форматирование. Вы можете использовать `clang-format-diff` инструмент, который вы можете найти в репозитории Clang source. - -В качестве альтернативы вы можете попробовать `uncrustify` инструмент для переформатирования вашего кода. Конфигурации в `uncrustify.cfg` в корне источников. Это меньше, чем `clang-format`. - -`CLion` имеет свой собственный формататор кода, который должен быть настроен для нашего стиля кода. - -## В2В метрика тесты {#metrica-b2b-tests} - -Каждый релиз ClickHouse тестируется с помощью движков Yandex Metrica и AppMetrica. Тестовые и стабильные версии ClickHouse развертываются на виртуальных машинах и запускаются с небольшой копией движка Metrica engine, который обрабатывает фиксированную выборку входных данных. Затем результаты двух экземпляров двигателя Metrica сравниваются вместе. - -Эти тесты автоматизированы отдельной командой. Из-за большого количества движущихся частей тесты чаще всего проваливаются по совершенно несвязанным причинам, которые очень трудно выяснить. Скорее всего, эти тесты имеют для нас отрицательное значение. Тем не менее эти тесты оказались полезными примерно в одном или двух случаях из сотен. - -## Тестовое покрытие {#test-coverage} - -По состоянию на июль 2018 года мы не отслеживаем покрытие тестов. - -## Автоматизация тестирования {#test-automation} - -Мы проводим тесты с помощью внутренней CI Яндекса и системы автоматизации заданий под названием «Sandbox». - -Задания сборки и тесты выполняются в песочнице на основе каждой фиксации. Полученные пакеты и результаты тестирования публикуются на GitHub и могут быть загружены по прямым ссылкам. Артефакты хранятся вечно. Когда вы отправляете запрос на вытягивание на GitHub, мы помечаем его как «can be tested» и наша система CI построит пакеты ClickHouse (release, debug, with address sanitizer и т. д.) Для вас. - -Мы не используем Travis CI из-за ограничения по времени и вычислительной мощности. -Мы не используем Дженкинса. Он был использован раньше, и теперь мы счастливы, что не используем Дженкинса. - -[Оригинальная статья](https://clickhouse.tech/docs/en/development/tests/) -разработка / испытания/) diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md new file mode 120000 index 00000000000..ce23c881f32 --- /dev/null +++ b/docs/ru/development/tests.md @@ -0,0 +1 @@ +en/development/tests.md \ No newline at end of file diff --git a/docs/ru/engines/table_engines/special/generate.md b/docs/ru/engines/table_engines/special/generate.md deleted file mode 100644 index 87004bfe5b1..00000000000 --- a/docs/ru/engines/table_engines/special/generate.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# GenerateRandom {#table_engines-generate} - -Механизм генерации случайных таблиц генерирует случайные данные для данной схемы таблиц. - -Примеры употребления: - -- Используйте в тесте для заполнения воспроизводимого большого стола. -- Генерируйте случайные входные данные для тестов размытия. - -## Использование в сервере ClickHouse {#usage-in-clickhouse-server} - -``` sql -ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length) -``` - -То `max_array_length` и `max_string_length` параметры укажите максимальную длину всех -столбцы массива и строки соответственно в генерируемых данных. - -Генерация таблицы движок поддерживает только `SELECT` запросы. - -Он поддерживает все [Тип данных](../../../engines/table_engines/special/generate.md) это может быть сохранено в таблице за исключением `LowCardinality` и `AggregateFunction`. - -**Пример:** - -**1.** Настройка системы `generate_engine_table` стол: - -``` sql -CREATE TABLE generate_engine_table (name String, value UInt32) ENGINE = GenerateRandom(1, 5, 3) -``` - -**2.** Запрос данных: - -``` sql -SELECT * FROM generate_engine_table LIMIT 3 -``` - -``` text -┌─name─┬──────value─┐ -│ c4xJ │ 1412771199 │ -│ r │ 1791099446 │ -│ 7#$ │ 124312908 │ -└──────┴────────────┘ -``` - -## Детали внедрения {#details-of-implementation} - -- Не поддерживаемый: - - `ALTER` - - `SELECT ... SAMPLE` - - `INSERT` - - Индексы - - Копирование - -[Оригинальная статья](https://clickhouse.tech/docs/en/operations/table_engines/generate/) diff --git a/docs/ru/engines/table_engines/special/generate.md b/docs/ru/engines/table_engines/special/generate.md new file mode 120000 index 00000000000..631f9bbba66 --- /dev/null +++ b/docs/ru/engines/table_engines/special/generate.md @@ -0,0 +1 @@ +en/engines/table_engines/special/generate.md \ No newline at end of file diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md deleted file mode 100644 index 69cdeac8387..00000000000 --- a/docs/ru/getting_started/tutorial.md +++ /dev/null @@ -1,669 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Учебник По Клик-Хаусу {#clickhouse-tutorial} - -## Чего ожидать от этого урока? {#what-to-expect-from-this-tutorial} - -Пройдя через этот учебник,вы узнаете, как настроить простой кластер ClickHouse. Он будет небольшим, но отказоустойчивым и масштабируемым. Затем мы будем использовать один из примеров наборов данных, чтобы заполнить его данными и выполнить некоторые демонстрационные запросы. - -## Настройка Одного Узла {#single-node-setup} - -Чтобы избежать сложностей распределенной среды, мы начнем с развертывания ClickHouse на одном сервере или виртуальной машине. ClickHouse обычно устанавливается из [дебютантка](install.md#install-from-deb-packages) или [оборотов в минуту](install.md#from-rpm-packages) пакеты, но есть и такие [альтернативы](install.md#from-docker-image) для операционных систем, которые их не поддерживают. - -Например, вы выбрали `deb` пакеты и выполненные работы: - -``` bash -sudo apt-get install dirmngr -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 - -echo "deb http://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list -sudo apt-get update - -sudo apt-get install -y clickhouse-server clickhouse-client -``` - -Что у нас есть в пакетах, которые были установлены: - -- `clickhouse-client` пакет содержит [clickhouse-клиент](../interfaces/cli.md) приложение, интерактивный консольный клиент ClickHouse. -- `clickhouse-common` пакет содержит исполняемый файл ClickHouse. -- `clickhouse-server` пакет содержит файлы конфигурации для запуска ClickHouse в качестве сервера. - -Файлы конфигурации сервера находятся в `/etc/clickhouse-server/`. Прежде чем идти дальше, пожалуйста, обратите внимание на `` элемент в `config.xml`. Путь определяет место для хранения данных, поэтому он должен быть расположен на Томе с большой емкостью диска; значение по умолчанию равно `/var/lib/clickhouse/`. Если вы хотите настроить конфигурацию, то это не удобно для непосредственного редактирования `config.xml` файл, учитывая, что он может быть переписан при будущих обновлениях пакета. Рекомендуемый способ переопределения элементов конфигурации заключается в создании [файлы в конфигурации.D каталог](../operations/configuration_files.md) которые служат в качестве «patches» к конфигурации.XML. - -Как вы могли заметить, `clickhouse-server` не запускается автоматически после установки пакета. Он также не будет автоматически перезапущен после обновления. То, как вы запускаете сервер, зависит от вашей системы init, как правило, это так: - -``` bash -sudo service clickhouse-server start -``` - -или - -``` bash -sudo /etc/init.d/clickhouse-server start -``` - -По умолчанию для журналов сервера используется следующее расположение `/var/log/clickhouse-server/`. Сервер готов к обработке клиентских подключений, как только он регистрирует `Ready for connections` сообщение. - -Как только это произойдет `clickhouse-server` все готово и работает, мы можем использовать `clickhouse-client` чтобы подключиться к серверу и выполнить некоторые тестовые запросы, такие как `SELECT "Hello, world!";`. - -
    - -Быстрые советы для clickhouse-клиента -Интерактивный режим: - -``` bash -clickhouse-client -clickhouse-client --host=... --port=... --user=... --password=... -``` - -Включить многострочные запросы: - -``` bash -clickhouse-client -m -clickhouse-client --multiline -``` - -Запуск запросов в пакетном режиме: - -``` bash -clickhouse-client --query='SELECT 1' -echo 'SELECT 1' | clickhouse-client -clickhouse-client <<< 'SELECT 1' -``` - -Вставка данных из файла в заданном формате: - -``` bash -clickhouse-client --query='INSERT INTO table VALUES' < data.txt -clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv -``` - -
    - -## Импорт Образца Набора Данных {#import-sample-dataset} - -Теперь пришло время заполнить наш сервер ClickHouse некоторыми образцами данных. В этом уроке мы будем использовать анонимизированные данные Яндекса.Metrica, первый сервис, который запускает ClickHouse в производственном режиме до того, как он стал открытым исходным кодом (подробнее об этом в [раздел истории](../introduction/history.md)). Есть [несколько способов импорта Яндекса.Набор метрика](example_datasets/metrica.md), и ради учебника мы пойдем с самым реалистичным из них. - -### Загрузка и извлечение данных таблицы {#download-and-extract-table-data} - -``` bash -curl https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv -curl https://clickhouse-datasets.s3.yandex.net/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv -``` - -Извлеченные файлы имеют размер около 10 ГБ. - -### Создавать таблицы {#create-tables} - -Как и в большинстве систем управления базами данных, ClickHouse логически группирует таблицы в «databases». Там есть еще один `default` база данных, но мы создадим новую с именем `tutorial`: - -``` bash -clickhouse-client --query "CREATE DATABASE IF NOT EXISTS tutorial" -``` - -Синтаксис для создания таблиц намного сложнее по сравнению с базами данных (см. [ссылка](../sql_reference/statements/create.md). В общем `CREATE TABLE` в заявлении должны быть указаны три ключевых момента: - -1. Имя таблицы для создания. -2. Table schema, i.e. list of columns and their [тип данных](../sql_reference/data_types/index.md). -3. [Настольный двигатель](../engines/table_engines/index.md) и это настройки, которые определяют все детали того, как запросы к этой таблице будут физически выполняться. - -Яндекс.Metrica - это сервис веб-аналитики, и пример набора данных не охватывает его полную функциональность, поэтому для создания необходимо создать только две таблицы: - -- `hits` это таблица с каждым действием, выполняемым всеми пользователями на всех веб-сайтах, охватываемых сервисом. -- `visits` это таблица, которая содержит предварительно построенные сеансы вместо отдельных действий. - -Давайте посмотрим и выполним реальные запросы create table для этих таблиц: - -``` sql -CREATE TABLE tutorial.hits_v1 -( - `WatchID` UInt64, - `JavaEnable` UInt8, - `Title` String, - `GoodEvent` Int16, - `EventTime` DateTime, - `EventDate` Date, - `CounterID` UInt32, - `ClientIP` UInt32, - `ClientIP6` FixedString(16), - `RegionID` UInt32, - `UserID` UInt64, - `CounterClass` Int8, - `OS` UInt8, - `UserAgent` UInt8, - `URL` String, - `Referer` String, - `URLDomain` String, - `RefererDomain` String, - `Refresh` UInt8, - `IsRobot` UInt8, - `RefererCategories` Array(UInt16), - `URLCategories` Array(UInt16), - `URLRegions` Array(UInt32), - `RefererRegions` Array(UInt32), - `ResolutionWidth` UInt16, - `ResolutionHeight` UInt16, - `ResolutionDepth` UInt8, - `FlashMajor` UInt8, - `FlashMinor` UInt8, - `FlashMinor2` String, - `NetMajor` UInt8, - `NetMinor` UInt8, - `UserAgentMajor` UInt16, - `UserAgentMinor` FixedString(2), - `CookieEnable` UInt8, - `JavascriptEnable` UInt8, - `IsMobile` UInt8, - `MobilePhone` UInt8, - `MobilePhoneModel` String, - `Params` String, - `IPNetworkID` UInt32, - `TraficSourceID` Int8, - `SearchEngineID` UInt16, - `SearchPhrase` String, - `AdvEngineID` UInt8, - `IsArtifical` UInt8, - `WindowClientWidth` UInt16, - `WindowClientHeight` UInt16, - `ClientTimeZone` Int16, - `ClientEventTime` DateTime, - `SilverlightVersion1` UInt8, - `SilverlightVersion2` UInt8, - `SilverlightVersion3` UInt32, - `SilverlightVersion4` UInt16, - `PageCharset` String, - `CodeVersion` UInt32, - `IsLink` UInt8, - `IsDownload` UInt8, - `IsNotBounce` UInt8, - `FUniqID` UInt64, - `HID` UInt32, - `IsOldCounter` UInt8, - `IsEvent` UInt8, - `IsParameter` UInt8, - `DontCountHits` UInt8, - `WithHash` UInt8, - `HitColor` FixedString(1), - `UTCEventTime` DateTime, - `Age` UInt8, - `Sex` UInt8, - `Income` UInt8, - `Interests` UInt16, - `Robotness` UInt8, - `GeneralInterests` Array(UInt16), - `RemoteIP` UInt32, - `RemoteIP6` FixedString(16), - `WindowName` Int32, - `OpenerName` Int32, - `HistoryLength` Int16, - `BrowserLanguage` FixedString(2), - `BrowserCountry` FixedString(2), - `SocialNetwork` String, - `SocialAction` String, - `HTTPError` UInt16, - `SendTiming` Int32, - `DNSTiming` Int32, - `ConnectTiming` Int32, - `ResponseStartTiming` Int32, - `ResponseEndTiming` Int32, - `FetchTiming` Int32, - `RedirectTiming` Int32, - `DOMInteractiveTiming` Int32, - `DOMContentLoadedTiming` Int32, - `DOMCompleteTiming` Int32, - `LoadEventStartTiming` Int32, - `LoadEventEndTiming` Int32, - `NSToDOMContentLoadedTiming` Int32, - `FirstPaintTiming` Int32, - `RedirectCount` Int8, - `SocialSourceNetworkID` UInt8, - `SocialSourcePage` String, - `ParamPrice` Int64, - `ParamOrderID` String, - `ParamCurrency` FixedString(3), - `ParamCurrencyID` UInt16, - `GoalsReached` Array(UInt32), - `OpenstatServiceName` String, - `OpenstatCampaignID` String, - `OpenstatAdID` String, - `OpenstatSourceID` String, - `UTMSource` String, - `UTMMedium` String, - `UTMCampaign` String, - `UTMContent` String, - `UTMTerm` String, - `FromTag` String, - `HasGCLID` UInt8, - `RefererHash` UInt64, - `URLHash` UInt64, - `CLID` UInt32, - `YCLID` UInt64, - `ShareService` String, - `ShareURL` String, - `ShareTitle` String, - `ParsedParams` Nested( - Key1 String, - Key2 String, - Key3 String, - Key4 String, - Key5 String, - ValueDouble Float64), - `IslandID` FixedString(16), - `RequestNum` UInt32, - `RequestTry` UInt8 -) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(EventDate) -ORDER BY (CounterID, EventDate, intHash32(UserID)) -SAMPLE BY intHash32(UserID) -SETTINGS index_granularity = 8192 -``` - -``` sql -CREATE TABLE tutorial.visits_v1 -( - `CounterID` UInt32, - `StartDate` Date, - `Sign` Int8, - `IsNew` UInt8, - `VisitID` UInt64, - `UserID` UInt64, - `StartTime` DateTime, - `Duration` UInt32, - `UTCStartTime` DateTime, - `PageViews` Int32, - `Hits` Int32, - `IsBounce` UInt8, - `Referer` String, - `StartURL` String, - `RefererDomain` String, - `StartURLDomain` String, - `EndURL` String, - `LinkURL` String, - `IsDownload` UInt8, - `TraficSourceID` Int8, - `SearchEngineID` UInt16, - `SearchPhrase` String, - `AdvEngineID` UInt8, - `PlaceID` Int32, - `RefererCategories` Array(UInt16), - `URLCategories` Array(UInt16), - `URLRegions` Array(UInt32), - `RefererRegions` Array(UInt32), - `IsYandex` UInt8, - `GoalReachesDepth` Int32, - `GoalReachesURL` Int32, - `GoalReachesAny` Int32, - `SocialSourceNetworkID` UInt8, - `SocialSourcePage` String, - `MobilePhoneModel` String, - `ClientEventTime` DateTime, - `RegionID` UInt32, - `ClientIP` UInt32, - `ClientIP6` FixedString(16), - `RemoteIP` UInt32, - `RemoteIP6` FixedString(16), - `IPNetworkID` UInt32, - `SilverlightVersion3` UInt32, - `CodeVersion` UInt32, - `ResolutionWidth` UInt16, - `ResolutionHeight` UInt16, - `UserAgentMajor` UInt16, - `UserAgentMinor` UInt16, - `WindowClientWidth` UInt16, - `WindowClientHeight` UInt16, - `SilverlightVersion2` UInt8, - `SilverlightVersion4` UInt16, - `FlashVersion3` UInt16, - `FlashVersion4` UInt16, - `ClientTimeZone` Int16, - `OS` UInt8, - `UserAgent` UInt8, - `ResolutionDepth` UInt8, - `FlashMajor` UInt8, - `FlashMinor` UInt8, - `NetMajor` UInt8, - `NetMinor` UInt8, - `MobilePhone` UInt8, - `SilverlightVersion1` UInt8, - `Age` UInt8, - `Sex` UInt8, - `Income` UInt8, - `JavaEnable` UInt8, - `CookieEnable` UInt8, - `JavascriptEnable` UInt8, - `IsMobile` UInt8, - `BrowserLanguage` UInt16, - `BrowserCountry` UInt16, - `Interests` UInt16, - `Robotness` UInt8, - `GeneralInterests` Array(UInt16), - `Params` Array(String), - `Goals` Nested( - ID UInt32, - Serial UInt32, - EventTime DateTime, - Price Int64, - OrderID String, - CurrencyID UInt32), - `WatchIDs` Array(UInt64), - `ParamSumPrice` Int64, - `ParamCurrency` FixedString(3), - `ParamCurrencyID` UInt16, - `ClickLogID` UInt64, - `ClickEventID` Int32, - `ClickGoodEvent` Int32, - `ClickEventTime` DateTime, - `ClickPriorityID` Int32, - `ClickPhraseID` Int32, - `ClickPageID` Int32, - `ClickPlaceID` Int32, - `ClickTypeID` Int32, - `ClickResourceID` Int32, - `ClickCost` UInt32, - `ClickClientIP` UInt32, - `ClickDomainID` UInt32, - `ClickURL` String, - `ClickAttempt` UInt8, - `ClickOrderID` UInt32, - `ClickBannerID` UInt32, - `ClickMarketCategoryID` UInt32, - `ClickMarketPP` UInt32, - `ClickMarketCategoryName` String, - `ClickMarketPPName` String, - `ClickAWAPSCampaignName` String, - `ClickPageName` String, - `ClickTargetType` UInt16, - `ClickTargetPhraseID` UInt64, - `ClickContextType` UInt8, - `ClickSelectType` Int8, - `ClickOptions` String, - `ClickGroupBannerID` Int32, - `OpenstatServiceName` String, - `OpenstatCampaignID` String, - `OpenstatAdID` String, - `OpenstatSourceID` String, - `UTMSource` String, - `UTMMedium` String, - `UTMCampaign` String, - `UTMContent` String, - `UTMTerm` String, - `FromTag` String, - `HasGCLID` UInt8, - `FirstVisit` DateTime, - `PredLastVisit` Date, - `LastVisit` Date, - `TotalVisits` UInt32, - `TraficSource` Nested( - ID Int8, - SearchEngineID UInt16, - AdvEngineID UInt8, - PlaceID UInt16, - SocialSourceNetworkID UInt8, - Domain String, - SearchPhrase String, - SocialSourcePage String), - `Attendance` FixedString(16), - `CLID` UInt32, - `YCLID` UInt64, - `NormalizedRefererHash` UInt64, - `SearchPhraseHash` UInt64, - `RefererDomainHash` UInt64, - `NormalizedStartURLHash` UInt64, - `StartURLDomainHash` UInt64, - `NormalizedEndURLHash` UInt64, - `TopLevelDomain` UInt64, - `URLScheme` UInt64, - `OpenstatServiceNameHash` UInt64, - `OpenstatCampaignIDHash` UInt64, - `OpenstatAdIDHash` UInt64, - `OpenstatSourceIDHash` UInt64, - `UTMSourceHash` UInt64, - `UTMMediumHash` UInt64, - `UTMCampaignHash` UInt64, - `UTMContentHash` UInt64, - `UTMTermHash` UInt64, - `FromHash` UInt64, - `WebVisorEnabled` UInt8, - `WebVisorActivity` UInt32, - `ParsedParams` Nested( - Key1 String, - Key2 String, - Key3 String, - Key4 String, - Key5 String, - ValueDouble Float64), - `Market` Nested( - Type UInt8, - GoalID UInt32, - OrderID String, - OrderPrice Int64, - PP UInt32, - DirectPlaceID UInt32, - DirectOrderID UInt32, - DirectBannerID UInt32, - GoodID String, - GoodName String, - GoodQuantity Int32, - GoodPrice Int64), - `IslandID` FixedString(16) -) -ENGINE = CollapsingMergeTree(Sign) -PARTITION BY toYYYYMM(StartDate) -ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) -SAMPLE BY intHash32(UserID) -SETTINGS index_granularity = 8192 -``` - -Вы можете выполнить эти запросы с помощью интерактивного режима `clickhouse-client` (просто запустите его в терминале, не указывая заранее запрос) или попробуйте некоторые [альтернативный интерфейс](../interfaces/index.md) если ты хочешь. - -Как мы видим, `hits_v1` использует [базовый движок MergeTree](../engines/table_engines/mergetree_family/mergetree.md), в то время как `visits_v1` использует [Разрушение](../engines/table_engines/mergetree_family/collapsingmergetree.md) вариант. - -### Импортировать данные {#import-data} - -Импорт данных в ClickHouse осуществляется через [INSERT INTO](../sql_reference/statements/insert_into.md) запрос, как и во многих других базах данных SQL. Однако данные обычно приводятся в одном из следующих документов: [поддерживаемые форматы сериализации](../interfaces/formats.md) вместо `VALUES` предложение (которое также поддерживается). - -Файлы, которые мы загрузили ранее, находятся в формате с разделенными вкладками, поэтому вот как импортировать их через консольный клиент: - -``` bash -clickhouse-client --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv -clickhouse-client --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv -``` - -У ClickHouse их очень много [настройки для настройки](../operations/settings/index.md) и один из способов указать их в консольном клиенте - это через аргументы, как мы видим с помощью `--max_insert_block_size`. Самый простой способ выяснить, какие настройки доступны, что они означают и каковы значения по умолчанию, - это запросить `system.settings` стол: - -``` sql -SELECT name, value, changed, description -FROM system.settings -WHERE name LIKE '%max_insert_b%' -FORMAT TSV - -max_insert_block_size 1048576 0 "The maximum block size for insertion, if we control the creation of blocks for insertion." -``` - -По желанию вы можете [OPTIMIZE](../sql_reference/misc/#misc_operations-optimize) таблицы после импорта. Таблицы, настроенные с помощью движка из семейства MergeTree, всегда выполняют слияние частей данных в фоновом режиме для оптимизации хранения данных (или, по крайней мере, проверяют, имеет ли это смысл). Эти запросы заставляют механизм таблиц выполнять оптимизацию хранилища прямо сейчас, а не некоторое время спустя: - -``` bash -clickhouse-client --query "OPTIMIZE TABLE tutorial.hits_v1 FINAL" -clickhouse-client --query "OPTIMIZE TABLE tutorial.visits_v1 FINAL" -``` - -Эти запросы запускают интенсивную работу ввода-вывода и процессора, поэтому, если таблица постоянно получает новые данные, лучше оставить ее в покое и позволить слияниям работать в фоновом режиме. - -Теперь мы можем проверить, был ли импорт таблицы успешным: - -``` bash -clickhouse-client --query "SELECT COUNT(*) FROM tutorial.hits_v1" -clickhouse-client --query "SELECT COUNT(*) FROM tutorial.visits_v1" -``` - -## Пример запроса {#example-queries} - -``` sql -SELECT - StartURL AS URL, - AVG(Duration) AS AvgDuration -FROM tutorial.visits_v1 -WHERE StartDate BETWEEN '2014-03-23' AND '2014-03-30' -GROUP BY URL -ORDER BY AvgDuration DESC -LIMIT 10 -``` - -``` sql -SELECT - sum(Sign) AS visits, - sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits, - (100. * goal_visits) / visits AS goal_percent -FROM tutorial.visits_v1 -WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru') -``` - -## Развертывание Кластера {#cluster-deployment} - -Кластер ClickHouse-это однородный кластер. Шаги для настройки: - -1. Установите сервер ClickHouse на всех компьютерах кластера -2. Настройка конфигураций кластера в файлах конфигурации -3. Создание локальных таблиц на каждом экземпляре -4. Создать [Распространены таблицы](../engines/table_engines/special/distributed.md) - -[Распространены таблицы](../engines/table_engines/special/distributed.md) это на самом деле своего рода «view» к локальным таблицам кластера ClickHouse. Запрос SELECT из распределенной таблицы выполняется с использованием ресурсов всех сегментов кластера. Вы можете указать конфигурации для нескольких кластеров и создать несколько распределенных таблиц, предоставляющих представления для разных кластеров. - -Пример конфигурации для кластера с тремя сегментами, по одной реплике в каждом: - -``` xml - - - - - example-perftest01j.yandex.ru - 9000 - - - - - example-perftest02j.yandex.ru - 9000 - - - - - example-perftest03j.yandex.ru - 9000 - - - - -``` - -Для дальнейшей демонстрации давайте создадим новую локальную таблицу с тем же именем `CREATE TABLE` запрос, который мы использовали для `hits_v1`, но другое имя таблицы: - -``` sql -CREATE TABLE tutorial.hits_local (...) ENGINE = MergeTree() ... -``` - -Создание распределенной таблицы, предоставляющей представление в локальные таблицы кластера: - -``` sql -CREATE TABLE tutorial.hits_all AS tutorial.hits_local -ENGINE = Distributed(perftest_3shards_1replicas, tutorial, hits_local, rand()); -``` - -Распространенной практикой является создание одинаковых распределенных таблиц на всех машинах кластера. Он позволяет выполнять распределенные запросы на любой машине кластера. Кроме того, существует альтернативный вариант создания временной распределенной таблицы для данного запроса SELECT с помощью [удаленный](../sql_reference/table_functions/remote.md) табличная функция. - -Давай убежим [INSERT SELECT](../sql_reference/statements/insert_into.md) в распределенную таблицу, чтобы распространить таблицу на несколько серверов. - -``` sql -INSERT INTO tutorial.hits_all SELECT * FROM tutorial.hits_v1; -``` - -!!! warning "Уведомление" - Такой подход не подходит для сегментации больших столов. Есть отдельный инструмент [clickhouse-копировальный аппарат](../operations/utilities/clickhouse-copier.md) это может повторно осколить произвольные большие таблицы. - -Как и следовало ожидать, вычислительно тяжелые запросы выполняются в N раз быстрее, если они используют 3 сервера вместо одного. - -В этом случае мы использовали кластер с 3 осколками, и каждый из них содержит одну реплику. - -Для обеспечения устойчивости в рабочей среде рекомендуется, чтобы каждый сегмент содержал 2-3 реплики, распределенные между несколькими зонами доступности или центрами обработки данных (или, по крайней мере, стойками). Обратите внимание, что ClickHouse поддерживает неограниченное количество реплик. - -Пример конфигурации для кластера из одного осколка, содержащего три реплики: - -``` xml - - ... - - - - example-perftest01j.yandex.ru - 9000 - - - example-perftest02j.yandex.ru - 9000 - - - example-perftest03j.yandex.ru - 9000 - - - - -``` - -Чтобы включить собственную репликацию [Смотритель зоопарка](http://zookeeper.apache.org/) требуемый. ClickHouse заботится о согласованности данных во всех репликах и автоматически запускает процедуру восстановления после сбоя. Рекомендуется развернуть кластер ZooKeeper на отдельных серверах (где не выполняются никакие другие процессы, включая ClickHouse). - -!!! note "Примечание" - ZooKeeper не является строгим требованием: в некоторых простых случаях вы можете дублировать данные, записав их во все реплики из кода вашего приложения. Такой подход является **нет** рекомендуется, чтобы в этом случае ClickHouse не мог гарантировать согласованность данных на всех репликах. Таким образом, это становится ответственностью вашего приложения. - -Расположение ZooKeeper указано в конфигурационном файле: - -``` xml - - - zoo01.yandex.ru - 2181 - - - zoo02.yandex.ru - 2181 - - - zoo03.yandex.ru - 2181 - - -``` - -Кроме того, нам нужно установить макросы для идентификации каждого осколка и реплики, которые используются при создании таблицы: - -``` xml - - 01 - 01 - -``` - -Если в данный момент при создании реплицированной таблицы реплик нет, то создается новая первая реплика. Если уже существуют живые реплики, то новая реплика клонирует данные из существующих. У вас есть возможность сначала создать все реплицированные таблицы, а затем вставить в них данные. Другой вариант-создать некоторые реплики и добавить другие после или во время вставки данных. - -``` sql -CREATE TABLE tutorial.hits_replica (...) -ENGINE = ReplcatedMergeTree( - '/clickhouse_perftest/tables/{shard}/hits', - '{replica}' -) -... -``` - -Здесь мы используем [ReplicatedMergeTree](../engines/table_engines/mergetree_family/replication.md) настольный двигатель. В параметрах мы указываем путь ZooKeeper, содержащий идентификаторы сегментов и реплик. - -``` sql -INSERT INTO tutorial.hits_replica SELECT * FROM tutorial.hits_local; -``` - -Репликация работает в режиме мульти-мастер. Данные могут быть загружены в любую реплику, а затем система автоматически синхронизирует их с другими экземплярами. Репликация является асинхронной, поэтому в данный момент не все реплики могут содержать недавно вставленные данные. По крайней мере, одна реплика должна быть готова, чтобы обеспечить прием данных. Другие будут синхронизировать данные и восстанавливать согласованность, как только они снова станут активными. Обратите внимание, что этот подход допускает низкую вероятность потери недавно вставленных данных. - -[Оригинальная статья](https://clickhouse.tech/docs/en/getting_started/tutorial/) diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md new file mode 120000 index 00000000000..18b86bb2e9c --- /dev/null +++ b/docs/ru/getting_started/tutorial.md @@ -0,0 +1 @@ +en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md deleted file mode 100644 index 5f8b825353c..00000000000 --- a/docs/ru/introduction/adopters.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Пользователи ClickHouse {#clickhouse-adopters} - -!!! warning "Оговорка" - Следующий список компаний, использующих ClickHouse, и их истории успеха собраны из открытых источников, поэтому они могут отличаться от текущей реальности. Мы были бы очень признательны, если бы вы поделились историей принятия ClickHouse в свою компанию и [добавьте его в список](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/introduction/adopters.md), но, пожалуйста, убедитесь, что у вас не будет никаких проблем с NDA, сделав это. Предоставление обновлений с публикациями от других компаний также полезно. - -| Компания | Промышленность | Usecase | Размер кластера | (Un)Сжатый Размер Данных\* | Ссылка | -|---------------------------------------------------------------------------------|----------------------------------------|-----------------------------|------------------------------------------------------------|------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [2ГИС](https://2gis.ru) | Карты | Мониторинг | — | — | [Говорить по-русски, июль 2019](https://youtu.be/58sPkXfq6nw) | -| [Браузер Aloha](https://alohabrowser.com/) | Мобильное приложение | Серверная часть браузера | — | — | [Слайды на русском языке, май 2019 года](https://github.com/yandex/clickhouse-presentations/blob/master/meetup22/aloha.pdf) | -| [Компания Amadeus](https://amadeus.com/) | Путешествовать | Аналитика | — | — | [Пресс-Релиз, Апрель 2018 Года](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) | -| [Компания](https://www.appsflyer.com) | Мобильная аналитика | Главный продукт | — | — | [Говорить по-русски, июль 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) | -| [ArenaData](https://arenadata.tech/) | Платформа данных | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) | -| [На Badoo](https://badoo.com) | Знакомства | Таймсерии | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/forecast.pdf) | -| [Бенокс](https://www.benocs.com/) | Сетевая телеметрия и аналитика | Главный продукт | — | — | [Слайды на английском языке, октябрь 2017 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | -| [Блумберг](https://www.bloomberg.com/) | Финансы, СМИ | Мониторинг | 102 сервера | — | [Слайды, Май 2018 Года](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | -| [Блокси](https://bloxy.info) | Блокчейн | Аналитика | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| `Dataliance/UltraPower` | Телекоммуникационный | Аналитика | — | — | [Слайды на китайском языке, январь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | -| [CARTO](https://carto.com/) | Бизнес-разведка | Гео аналитика | — | — | [Геопространственная обработка с помощью Clickhouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | -| [CERN](http://public.web.cern.ch/public/) | Исследование | Эксперимент | — | — | [Пресс-релиз, апрель 2012 года](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | -| [Компании Cisco](http://cisco.com/) | Сетевой | Анализ трафика | — | — | [Молниеносный разговор, октябрь 2019 года](https://youtu.be/-hI1vDR2oPY?t=5057) | -| [Ценные Бумаги Цитадели](https://www.citadelsecurities.com/) | Финансы | — | — | — | [Взнос, Март 2019 Года](https://github.com/ClickHouse/ClickHouse/pull/4774) | -| [Ситимобил](https://city-mobil.ru) | Такси | Аналитика | — | — | [Запись в блоге на русском языке, март 2020 года](https://habr.com/en/company/citymobil/blog/490660/) | -| [ContentSquare](https://contentsquare.com) | Веб-аналитика | Главный продукт | — | — | [Запись в блоге на французском языке, ноябрь 2018 года](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | -| [Cloudflare](https://cloudflare.com) | CDN | Анализ трафика | 36 серверов | — | [Сообщение в блоге, май 2017 года](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Сообщение в блоге, март 2018 года](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | -| [Корунет](https://coru.net/) | Аналитика | Главный продукт | — | — | [Слайды на английском языке, апрель 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | -| [CraiditX 氪信](https://creditx.com) | Финансовый ИИ | Анализ | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | -| [Criteo / Storetail](https://www.criteo.com/) | Розничная торговля | Главный продукт | — | — | [Слайды на английском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | -| [Дойче банк](https://db.com) | Финансы | Би аналитика | — | — | [Слайды на английском языке, октябрь 2019 года](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | -| [Дива-е](https://www.diva-e.com) | Цифровой Консалтинг | Главный продукт | — | — | [Слайды на английском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | -| [Компания Exness](https://www.exness.com) | Торговый | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, май 2019 года](https://youtu.be/_rpU-TvSfZ8?t=3215) | -| [Джинн](https://geniee.co.jp) | Рекламная сеть | Главный продукт | — | — | [Запись в блоге на японском языке, июль 2017 года](https://tech.geniee.co.jp/entry/2017/07/20/160100) | -| [HUYA](https://www.huya.com/) | Потоковое видео | Аналитика | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Идеалиста](https://www.idealista.com) | Недвижимость | Аналитика | — | — | [Сообщение в блоге на английском языке, апрель 2019 года](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | -| [Infovista](https://www.infovista.com/) | Сети | Аналитика | — | — | [Слайды на английском языке, октябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | -| [Компания innogames](https://www.innogames.com) | Игры | Метрики, Ведение Журнала | — | — | [Слайды на русском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | -| [Интегрос](https://integros.com) | Платформа для видеосервисов | Аналитика | — | — | [Слайды на русском языке, май 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Данные По Кадьяку](https://www.kodiakdata.com/) | Облака | Главный продукт | — | — | [Слайды на английском языке, апрель 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | -| [Контур](https://kontur.ru) | Разработка программного обеспечения | Метрика | — | — | [Говорить по-русски, ноябрь 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | -| [LifeStreet](https://lifestreet.com/) | Рекламная сеть | Главный продукт | 75 серверов (3 реплики) | 5.27 ПИБ | [Запись в блоге на русском языке, февраль 2017 года](https://habr.com/en/post/322620/) | -| [Mail.ru Облачные Решения](https://mcs.mail.ru/) | Облачные сервисы | Главный продукт | — | — | [Запуск экземпляра ClickHouse на русском языке](https://mcs.mail.ru/help/db-create/clickhouse#) | -| [MessageBird](https://www.messagebird.com) | Электросвязь | Статистика | — | — | [Слайды на английском языке, ноябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | -| [MGID](https://www.mgid.com/) | Рекламная сеть | Веб-аналитика | — | — | [Наш опыт внедрения аналитической СУБД ClickHouse на русском языке](http://gs-studio.com/news-about-it/32777----clickhouse---c) | -| [OneAPM](https://www.oneapm.com/) | Мониторинг и анализ данных | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | -| [ПРАГМА Инноваций](http://www.pragma-innovation.fr/) | Телеметрия и анализ Больших Данных | Главный продукт | — | — | [Слайды на английском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | -| [QINGCLOUD](https://www.qingcloud.com/) | Облачные сервисы | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | -| [Qrator](https://qrator.net) | Защита от DDoS-атак | Главный продукт | — | — | [Сообщение В Блоге, Март 2019 Года](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| [Beijing PERCENT Information Technology Co., Лимитед.](https://www.percent.cn/) | Аналитика | Главный продукт | — | — | [Слайды на китайском языке, июнь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| [Бродяга](https://rambler.ru) | Интернет услуги | Аналитика | — | — | [Говорить по-русски, апрель 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| [Tencent](https://www.tencent.com) | Обмен сообщениями | Регистрация | — | — | [Говорить по-китайски, ноябрь 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| [Движения Звезд](https://trafficstars.com/) | Рекламная сеть | — | — | — | [Слайды на русском языке, май 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | -| [S7 Airlines](https://www.s7.ru) | Авиакомпании | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, март 2019 года](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| [Общий](https://www.semrush.com/) | Маркетинг | Главный продукт | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | -| [scireum ГмбХ](https://www.scireum.de/) | электронная коммерция | Главный продукт | — | — | [Говорить по-немецки, февраль 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | -| [Караул](https://sentry.io/) | Разработчик | Бэкэнд для продукта | — | — | [Сообщение в блоге на английском языке, май 2019 года](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| [SGK](http://www.sgk.gov.tr/wps/portal/sgk/tr) | Государственное Социальное Обеспечение | Аналитика | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | -| [СЕО.делать](https://seo.do/) | Аналитика | Главный продукт | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | -| [Зина](http://english.sina.com/index.html) | Новости | — | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | -| [SMI2](https://smi2.ru/) | Новости | Аналитика | — | — | [Запись в блоге на русском языке, ноябрь 2017 года](https://habr.com/ru/company/smi2/blog/314558/) | -| [Чмок](https://www.splunk.com/) | Бизнес-аналитика | Главный продукт | — | — | [Слайды на английском языке, январь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | -| [Спотифай](https://www.spotify.com) | Музыка | Экспериментирование | — | — | [Слайды, Июль 2018 Года](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | -| [Tencent](https://www.tencent.com) | Большие данные | Обработка данных | — | — | [Слайды на китайском языке, октябрь 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | -| [Убер](https://www.uber.com) | Такси | Регистрация | — | — | [Слайды, Февраль 2020 Года](https://presentations.clickhouse.tech/meetup40/uber.pdf) | -| [ВКонтакте](https://vk.com) | Социальная сеть | Статистика, Ведение Журнала | — | — | [Слайды на русском языке, август 2018 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | -| [Мудрецы](https://wisebits.com/) | IT-решение | Аналитика | — | — | [Слайды на русском языке, май 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | -| [Технология Сяосин.](https://www.xiaoheiban.cn/) | Образование | Общая цель | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | -| [Сималайя](https://www.ximalaya.com/) | Общий доступ к аудио | OLAP | — | — | [Слайды на английском языке, ноябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | -| [Облако Яндекса](https://cloud.yandex.ru/services/managed-clickhouse) | Публичное Облако | Главный продукт | — | — | [Разговор на русском языке, декабрь 2019 года](https://www.youtube.com/watch?v=pgnak9e_E0o) | -| [DataLens Яндекс](https://cloud.yandex.ru/services/datalens) | Бизнес-разведка | Главный продукт | — | — | [Слайды на русском языке, декабрь 2019 года](https://presentations.clickhouse.tech/meetup38/datalens.pdf) | -| [Яндекс Маркет](https://market.yandex.ru/) | электронная коммерция | Метрики, Ведение Журнала | — | — | [Разговор на русском языке, январь 2019 года](https://youtu.be/_l1qP0DyBcA?t=478) | -| [Яндекс Метрика](https://metrica.yandex.com) | Веб-аналитика | Главный продукт | 360 серверов в одном кластере, 1862 сервера в одном отделе | 66.41 ПИБ / 5.68 ПИБ | [Слайды, Февраль 2020 Года](https://presentations.clickhouse.tech/meetup40/introduction/#13) | -| [ЦВТ](https://htc-cs.ru/) | Разработка программного обеспечения | Метрики, Ведение Журнала | — | — | [Сообщение в блоге, март 2019 года, на русском языке](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | -| [МКБ](https://mkb.ru/) | Банк | Мониторинг веб-систем | — | — | [Слайды на русском языке, сентябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| [金数据](https://jinshuju.net) | Би аналитика | Главный продукт | — | — | [Слайды на китайском языке, октябрь 2019 года](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | - -[Оригинальная статья](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md new file mode 120000 index 00000000000..b9b77a27eb9 --- /dev/null +++ b/docs/ru/introduction/adopters.md @@ -0,0 +1 @@ +en/introduction/adopters.md \ No newline at end of file diff --git a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md deleted file mode 100644 index d2cc9738749..00000000000 --- a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Выборки Профилировщик Запросов {#sampling-query-profiler} - -ClickHouse запускает профилировщик выборок, который позволяет анализировать выполнение запросов. С помощью profiler можно найти подпрограммы исходного кода, которые наиболее часто используются во время выполнения запроса. Вы можете отслеживать процессорное время и время работы настенных часов, включая время простоя. - -Чтобы использовать профилировщик: - -- Настройка программы [журнал трассировки](../server_configuration_parameters/settings.md#server_configuration_parameters-trace_log) раздел конфигурации сервера. - - В этом разделе настраиваются следующие параметры: [журнал трассировки](../../operations/optimizing_performance/sampling_query_profiler.md#system_tables-trace_log) системная таблица, содержащая результаты работы профилировщика. Он настроен по умолчанию. Помните, что данные в этой таблице действительны только для работающего сервера. После перезагрузки сервера ClickHouse не очищает таблицу, и все сохраненные адреса виртуальной памяти могут стать недействительными. - -- Настройка программы [query\_profiler\_cpu\_time\_period\_ns](../settings/settings.md#query_profiler_cpu_time_period_ns) или [query\_profiler\_real\_time\_period\_ns](../settings/settings.md#query_profiler_real_time_period_ns) настройки. Обе настройки можно использовать одновременно. - - Эти параметры позволяют настроить таймеры профилировщика. Поскольку это параметры сеанса, вы можете получить различную частоту дискретизации для всего сервера, отдельных пользователей или профилей пользователей, для вашего интерактивного сеанса и для каждого отдельного запроса. - -Частота дискретизации по умолчанию составляет одну выборку в секунду, и включены как ЦП, так и реальные таймеры. Эта частота позволяет собрать достаточно информации о кластере ClickHouse. В то же время, работая с такой частотой, профилировщик не влияет на производительность сервера ClickHouse. Если вам нужно профилировать каждый отдельный запрос, попробуйте использовать более высокую частоту дискретизации. - -Для того чтобы проанализировать `trace_log` системная таблица: - -- Установите устройство `clickhouse-common-static-dbg` пакет. Видеть [Установка из пакетов DEB](../../getting_started/install.md#install-from-deb-packages). - -- Разрешить функции самоанализа с помощью [allow\_introspection\_functions](../settings/settings.md#settings-allow_introspection_functions) установка. - - По соображениям безопасности функции самоанализа по умолчанию отключены. - -- Используйте `addressToLine`, `addressToSymbol` и `demangle` [функции самоанализа](../../operations/optimizing_performance/sampling_query_profiler.md) чтобы получить имена функций и их позиции в коде ClickHouse. Чтобы получить профиль для какого-либо запроса, вам необходимо агрегировать данные из `trace_log` стол. Вы можете агрегировать данные по отдельным функциям или по всем трассировкам стека. - -Если вам нужно визуализировать `trace_log` информация, попробуйте [огнемет](../../interfaces/third-party/gui/#clickhouse-flamegraph) и [speedscope](https://github.com/laplab/clickhouse-speedscope). - -## Пример {#example} - -В этом примере мы: - -- Фильтрация `trace_log` данные по идентификатору запроса и текущей дате. - -- Агрегирование по трассировке стека. - -- Используя функции интроспекции, мы получим отчет о: - - - Имена символов и соответствующие им функции исходного кода. - - Расположение исходных кодов этих функций. - - - -``` sql -SELECT - count(), - arrayStringConcat(arrayMap(x -> concat(demangle(addressToSymbol(x)), '\n ', addressToLine(x)), trace), '\n') AS sym -FROM system.trace_log -WHERE (query_id = 'ebca3574-ad0a-400a-9cbc-dca382f5998c') AND (event_date = today()) -GROUP BY trace -ORDER BY count() DESC -LIMIT 10 -``` - -``` text -{% include "operations/performance/sampling_query_profiler_example_result.txt" %} -``` diff --git a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md new file mode 120000 index 00000000000..565f39130fb --- /dev/null +++ b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1 @@ +en/operations/optimizing_performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md deleted file mode 100644 index 9b5c6f4fed3..00000000000 --- a/docs/ru/operations/performance_test.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Как Протестировать Ваше Оборудование С Помощью ClickHouse {#how-to-test-your-hardware-with-clickhouse} - -С помощью этой инструкции вы можете запустить базовый тест производительности ClickHouse на любом сервере без установки пакетов ClickHouse. - -1. Идти к «commits» страница: https://github.com/ClickHouse/ClickHouse/commits/master - -2. Нажмите на первую зеленую галочку или красный крест с зеленым цветом «ClickHouse Build Check» и нажмите на кнопку «Details» ссылка рядом «ClickHouse Build Check». - -3. Скопируйте ссылку на «clickhouse» двоичный код для amd64 или aarch64. - -4. ssh к серверу и скачать его с помощью wget: - - - - # For amd64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse - # For aarch64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse - # Then do: - chmod a+x clickhouse - -1. Скачать конфиги: - - - - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml - mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml - -1. Скачать тест файлы: - - - - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh - chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql - -1. Загрузите тестовые данные в соответствии с [Яндекс.Набор метрика](../getting_started/example_datasets/metrica.md) инструкция («hits» таблица, содержащая 100 миллионов строк). - - - - wget https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz - tar xvf hits_100m_obfuscated_v1.tar.xz -C . - mv hits_100m_obfuscated_v1/* . - -1. Запустите сервер: - - - - ./clickhouse server - -1. Проверьте данные: ssh на сервер в другом терминале - - - - ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" - 100000000 - -1. Отредактируйте текст benchmark-new.sh, изменение «clickhouse-client» к «./clickhouse client» и добавить «–max\_memory\_usage 100000000000» параметр. - - - - mcedit benchmark-new.sh - -1. Выполнить тест: - - - - ./benchmark-new.sh hits_100m_obfuscated - -1. Отправьте номера и информацию о конфигурации вашего оборудования по адресу clickhouse-feedback@yandex-team.com - -Все результаты опубликованы здесь: https://clickhouse-да.технология / benchmark\_hardware.HTML diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md new file mode 120000 index 00000000000..3787adb92bd --- /dev/null +++ b/docs/ru/operations/performance_test.md @@ -0,0 +1 @@ +en/operations/performance_test.md \ No newline at end of file diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md deleted file mode 100644 index 5467a58676e..00000000000 --- a/docs/ru/operations/utilities/clickhouse-benchmark.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# clickhouse-бенчмарк {#clickhouse-benchmark} - -Подключается к серверу ClickHouse и повторно отправляет указанные запросы. - -Синтаксис: - -``` bash -$ echo "single query" | clickhouse-benchmark [keys] -``` - -или - -``` bash -$ clickhouse-benchmark [keys] <<< "single query" -``` - -Если вы хотите отправить набор запросов, создайте текстовый файл и поместите каждый запрос в отдельную строку в этом файле. Например: - -``` sql -SELECT * FROM system.numbers LIMIT 10000000 -SELECT 1 -``` - -Затем передайте этот файл на стандартный вход `clickhouse-benchmark`. - -``` bash -clickhouse-benchmark [keys] < queries_file -``` - -## Ключи {#clickhouse-benchmark-keys} - -- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` посылает одновременно. Значение по умолчанию: 1. -- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1. -- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. Для [режим сравнения](#clickhouse-benchmark-comparison-mode) вы можете использовать несколько `-h` ключи. -- `-p N`, `--port=N` — Server port. Default value: 9000. For the [режим сравнения](#clickhouse-benchmark-comparison-mode) вы можете использовать несколько `-p` ключи. -- `-i N`, `--iterations=N` — Total number of queries. Default value: 0. -- `-r`, `--randomize` — Random order of queries execution if there is more then one input query. -- `-s`, `--secure` — Using TLS connection. -- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` прекращает отправку запросов по достижении указанного срока. Значение по умолчанию: 0 (ограничение по времени отключено). -- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [режим сравнения](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` выполняет следующие функции: [Независимый двухпробный t-тест Стьюдента](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) проверьте, не отличаются ли эти два распределения с выбранным уровнем достоверности. -- `--cumulative` — Printing cumulative data instead of data per interval. -- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` выводит отчет в указанный JSON-файл. -- `--user=USERNAME` — ClickHouse user name. Default value: `default`. -- `--password=PSWD` — ClickHouse user password. Default value: empty string. -- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` выводит трассировки стека исключений. -- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` на указанном этапе. Возможное значение: `complete`, `fetch_columns`, `with_mergeable_state`. Значение по умолчанию: `complete`. -- `--help` — Shows the help message. - -Если вы хотите применить некоторые из них [настройки](../../operations/settings/index.md) для запросов передайте их в качестве ключа `--= SETTING_VALUE`. Например, `--max_memory_usage=1048576`. - -## Выход {#clickhouse-benchmark-output} - -По умолчанию, `clickhouse-benchmark` отчеты для каждого из них `--delay` интервал. - -Пример отчета: - -``` text -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675. - -0.000% 0.145 sec. -10.000% 0.146 sec. -20.000% 0.146 sec. -30.000% 0.146 sec. -40.000% 0.147 sec. -50.000% 0.148 sec. -60.000% 0.148 sec. -70.000% 0.148 sec. -80.000% 0.149 sec. -90.000% 0.150 sec. -95.000% 0.150 sec. -99.000% 0.150 sec. -99.900% 0.150 sec. -99.990% 0.150 sec. -``` - -В отчете вы можете найти:: - -- Количество запросов в системе `Queries executed:` поле. - -- Строка состояния, содержащая (по порядку): - - - Конечная точка сервера ClickHouse. - - Количество обработанных запросов. - - QPS: QPS: сколько запросов сервер выполняет в секунду в течение периода, указанного в `--delay` аргумент. - - RPS: сколько строк сервер читает в секунду в течение периода, указанного в `--delay` аргумент. - - MiB/s: сколько мегабайт сервер читает в секунду в течение периода, указанного в `--delay` аргумент. - - result RPS: сколько строк помещается сервером в результат запроса в секунду в течение периода, указанного в `--delay` аргумент. - - результат MiB/s. сколько мебибайт помещается сервером в результат запроса в секунду в течение периода, указанного в `--delay` аргумент. - -- Процентили времени выполнения запросов. - -## Режим сравнения {#clickhouse-benchmark-comparison-mode} - -`clickhouse-benchmark` можно сравнить производительность для двух запущенных серверов ClickHouse. - -Чтобы использовать режим сравнения, укажите конечные точки обоих серверов по двум парам `--host`, `--port` ключи. Ключи, сопоставленные вместе по позиции в списке аргументов, первые `--host` сопоставляется с первым `--port` и так далее. `clickhouse-benchmark` устанавливает соединения с обоими серверами, а затем отправляет запросы. Каждый запрос адресован случайно выбранному серверу. Результаты отображаются для каждого сервера отдельно. - -## Пример {#clickhouse-benchmark-example} - -``` bash -$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10 -``` - -``` text -Loaded 1 queries. - -Queries executed: 6. - -localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.159 sec. -30.000% 0.160 sec. -40.000% 0.160 sec. -50.000% 0.162 sec. -60.000% 0.164 sec. -70.000% 0.165 sec. -80.000% 0.166 sec. -90.000% 0.166 sec. -95.000% 0.167 sec. -99.000% 0.167 sec. -99.900% 0.167 sec. -99.990% 0.167 sec. - - - -Queries executed: 10. - -localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986. - -0.000% 0.159 sec. -10.000% 0.159 sec. -20.000% 0.160 sec. -30.000% 0.163 sec. -40.000% 0.164 sec. -50.000% 0.165 sec. -60.000% 0.166 sec. -70.000% 0.166 sec. -80.000% 0.167 sec. -90.000% 0.167 sec. -95.000% 0.170 sec. -99.000% 0.172 sec. -99.900% 0.172 sec. -99.990% 0.172 sec. -``` diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md new file mode 120000 index 00000000000..fda8b1a50c7 --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1 @@ +en/operations/utilities/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2017.md b/docs/ru/whats_new/changelog/2017.md deleted file mode 100644 index 1c820453901..00000000000 --- a/docs/ru/whats_new/changelog/2017.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -### ClickHouse релиз 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21} - -Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54318: - -- Исправлена ошибка с возможным состоянием гонки в репликации, которая могла привести к потере данных. Эта проблема затрагивает версии 1.1.54310 и 1.1.54318. Если вы используете одну из этих версий с Реплицированными таблицами, настоятельно рекомендуется обновить ее. Эта проблема отображается в журналах в предупреждающих сообщениях, таких как `Part ... from own log doesn't exist.` Эта проблема актуальна, даже если вы не видите эти сообщения в журналах. - -### ClickHouse релиз 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30} - -Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54310: - -- Исправлено некорректное удаление строк при слияниях в движке SummingMergeTree -- Исправлена утечка памяти в несложных движках MergeTree -- Исправлено снижение производительности при частых вставках в двигатели MergeTree -- Исправлена ошибка, из-за которой очередь репликации останавливалась -- Исправлена ротация и архивация журналов сервера - -### ClickHouse релиз 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01} - -#### Новые средства: {#new-features} - -- Пользовательский ключ секционирования для семейства движков таблиц MergeTree. -- [Кафка](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) настольный двигатель. -- Добавлена поддержка загрузки [CatBoost](https://catboost.yandex/) модели и их применение к данным, хранящимся в ClickHouse. -- Добавлена поддержка часовых поясов с нецелочисленными смещениями от UTC. -- Добавлена поддержка арифметических операций с временными интервалами. -- Диапазон значений для типов Date и DateTime расширен до 2105 года. -- Добавил тот `CREATE MATERIALIZED VIEW x TO y` запрос (указывает существующую таблицу для хранения данных материализованного представления). -- Добавил тот `ATTACH TABLE` запрос без аргументов. -- Логика обработки вложенных столбцов с именами, оканчивающимися на-Map в таблице SummingMergeTree, была извлечена в агрегатную функцию sumMap. Теперь вы можете указать такие столбцы явно. -- Максимальный размер словаря IP trie увеличен до 128 миллионов записей. -- Добавлена функция getSizeOfEnumType. -- Добавлена агрегатная функция sumWithOverflow. -- Добавлена поддержка формата ввода Cap'n Proto. -- Теперь вы можете настроить уровень сжатия при использовании алгоритма zstd. - -#### Назад несовместимые изменения: {#backward-incompatible-changes} - -- Создание временных таблиц с движком, отличным от памяти, не допускается. -- Явное создание таблиц с помощью движка View или MaterializedView не допускается. -- Во время создания таблицы новая проверка проверяет, что выражение ключа выборки включено в первичный ключ. - -#### Устранение ошибок: {#bug-fixes} - -- Исправлены зависания при синхронной вставке в распределенную таблицу. -- Исправлено неатомное добавление и удаление деталей в реплицируемых таблицах. -- Данные, вставленные в материализованное представление, не подвергаются ненужной дедупликации. -- Выполнение запроса к распределенной таблице, для которой локальная реплика отстает, а удаленные реплики недоступны, больше не приводит к ошибке. -- Пользователям не нужны разрешения на доступ к `default` база данных для создания временных таблиц больше не существует. -- Исправлен сбой при указании типа массива без аргументов. -- Исправлены зависания, когда дисковый том, содержащий журналы сервера, заполнен. -- Исправлено переполнение в функции toRelativeWeekNum для первой недели эпохи Unix. - -#### Улучшения сборки: {#build-improvements} - -- Несколько сторонних библиотек (особенно Poco) были обновлены и преобразованы в подмодули git. - -### ClickHouse релиз 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19} - -#### Новые средства: {#new-features-1} - -- Поддержка TLS в собственном протоколе (чтобы включить, установите `tcp_ssl_port` в `config.xml` ). - -#### Устранение ошибок: {#bug-fixes-1} - -- `ALTER` для реплицированных таблиц теперь пытается начать работать как можно скорее. -- Исправлен сбой при чтении данных с настройкой `preferred_block_size_bytes=0.` -- Исправлены сбои в работе `clickhouse-client` при нажатии на кнопку `Page Down` -- Правильная интерпретация некоторых сложных запросов с помощью `GLOBAL IN` и `UNION ALL` -- `FREEZE PARTITION` теперь он всегда работает атомарно. -- Пустые почтовые запросы теперь возвращают ответ с кодом 411. -- Исправлены ошибки интерпретации таких выражений, как `CAST(1 AS Nullable(UInt8)).` -- Исправлена ошибка при чтении `Array(Nullable(String))` колонки от `MergeTree` таблицы. -- Исправлен сбой при разборе таких запросов, как `SELECT dummy AS dummy, dummy AS b` -- Пользователи обновляются правильно с недопустимым `users.xml` -- Правильная обработка, когда исполняемый словарь возвращает ненулевой код ответа. - -### ClickHouse релиз 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20} - -#### Новые средства: {#new-features-2} - -- Добавил тот `pointInPolygon` функция для работы с координатами на координатной плоскости. -- Добавил тот `sumMap` агрегатная функция для вычисления суммы массивов, аналогичная `SummingMergeTree`. -- Добавил тот `trunc` функция. Улучшена производительность функций округления (`round`, `floor`, `ceil`, `roundToExp2`) и скорректировал логику их работы. Поменялась логика игры `roundToExp2` функция для дробей и отрицательных чисел. -- Исполняемый файл ClickHouse теперь меньше зависит от версии libc. Один и тот же исполняемый файл ClickHouse может работать на самых разных системах Linux. Существует еще зависимость при использовании скомпилированных запросов (с настройкой `compile = 1` , который не используется по умолчанию). -- Сократилось время, необходимое для динамической компиляции запросов. - -#### Устранение ошибок: {#bug-fixes-2} - -- Исправлена ошибка, которая иногда производилась `part ... intersects previous part` сообщения и ослабленная согласованность реплик. -- Исправлена ошибка, из-за которой сервер блокировался, если ZooKeeper был недоступен во время завершения работы. -- Удалено избыточное ведение журнала при восстановлении реплик. -- Исправлена ошибка в объединении всех реализаций. -- Исправлена ошибка в функции concat, возникшая, если первый столбец в блоке имеет тип массива. -- Прогресс теперь отображается в системе правильно.таблица слияний. - -### ClickHouse релиз 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13} - -#### Новые средства: {#new-features-3} - -- `SYSTEM` запросы для администрирования сервера: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`. -- Добавлены функции для работы с массивами: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`. -- Добавлен `root` и `identity` параметры для конфигурации ZooKeeper. Это позволяет изолировать отдельных пользователей в одном кластере ZooKeeper. -- Добавлены статистические функции `groupBitAnd`, `groupBitOr`, и `groupBitXor` (для совместимости они также доступны под названиями `BIT_AND`, `BIT_OR`, и `BIT_XOR`). -- Внешние словари можно загрузить из MySQL, указав сокет в файловой системе. -- Внешние словари могут быть загружены из MySQL по протоколу SSL (`ssl_cert`, `ssl_key`, `ssl_ca` параметры). -- Добавил тот `max_network_bandwidth_for_user` настройка для ограничения общего использования полосы пропускания для запросов на одного пользователя. -- Поддержка `DROP TABLE` для временных таблиц. -- Поддержка чтения `DateTime` значения в формате временных меток Unix от `CSV` и `JSONEachRow` форматы. -- Запаздывающие реплики в распределенных запросах теперь исключаются по умолчанию (пороговое значение по умолчанию-5 минут). -- Блокировка FIFO используется во время ALTER: запрос ALTER не блокируется бесконечно для непрерывно выполняемых запросов. -- Возможность установки `umask` в конфигурационном файле. -- Улучшенная производительность для запросов с помощью `DISTINCT` . - -#### Устранение ошибок: {#bug-fixes-3} - -- Улучшен процесс удаления старых узлов в ZooKeeper. Раньше старые узлы иногда не удалялись, если были очень частые вставки, что приводило к медленному завершению работы сервера, среди прочего. -- Исправлена рандомизация при выборе хостов для подключения к ZooKeeper. -- Исправлено исключение запаздывающих реплик в распределенных запросах, если реплика является localhost. -- Исправлена ошибка, когда часть данных в a `ReplicatedMergeTree` стол может быть сломан после запуска `ALTER MODIFY` на элементе в `Nested` структура. -- Исправлена ошибка, которая могла привести к тому, что запросы SELECT «hang». -- Улучшения в распределенных DDL-запросах. -- Исправлен запрос `CREATE TABLE ... AS `. -- Разрешен тупик в работе `ALTER ... CLEAR COLUMN IN PARTITION` запрос для `Buffer` таблицы. -- Исправлено недопустимое значение по умолчанию для `Enum` s (0 вместо минимума) при использовании `JSONEachRow` и `TSKV` форматы. -- Разрешен внешний вид зомби-процессов при использовании словаря с помощью `executable` источник. -- Исправлена обработка выхода онлайн / оффлайн для запроса. - -#### Улучшен рабочий процесс разработки и сборки ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse} - -- Вы можете использовать `pbuilder` чтобы построить ClickHouse. -- Вы можете использовать `libc++` вместо `libstdc++` для сборок на Linux. -- Добавлены инструкции по использованию инструментов статического анализа кода: `Coverage`, `clang-tidy`, `cppcheck`. - -#### Пожалуйста, обратите внимание при обновлении: {#please-note-when-upgrading} - -- Теперь существует более высокое значение по умолчанию для параметра MergeTree `max_bytes_to_merge_at_max_space_in_pool` (максимальный общий размер частей данных для слияния, в байтах): он увеличился со 100 гигабайт до 150 гигабайт. Это может привести к большим слияниям, выполняемым после обновления сервера, что может привести к увеличению нагрузки на дисковую подсистему. Если свободное пространство, доступное на сервере, меньше чем в два раза общего объема выполняемых слияний, это приведет к остановке всех других слияний, включая слияния небольших частей данных. В результате запросы INSERT завершатся ошибкой с сообщением «Merges are processing significantly slower than inserts.» Используйте `SELECT * FROM system.merges` запрос на мониторинг ситуации. Вы также можете проверить следующее: `DiskSpaceReservedForMerge` метрика в системе `system.metrics` таблица, или в графите. Вам не нужно ничего делать, чтобы исправить это, так как проблема будет решена сама собой, как только большие слияния закончатся. Если вы сочтете это неприемлемым, вы можете восстановить предыдущее значение для `max_bytes_to_merge_at_max_space_in_pool` установка. Чтобы сделать это, перейдите в раздел раздел в конфигурации.xml, набор ``` ``107374182400 ``` и перезагрузите сервер. - -### ClickHouse релиз 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29} - -- Это исправленный выпуск для предыдущей версии 1.1.54282. Он исправляет утечки в каталоге запчастей в ZooKeeper. - -### ClickHouse релиз 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23} - -Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54276: - -- Исправлено `DB::Exception: Assertion violation: !_path.empty()` при вставке в распределенную таблицу. -- Исправлен синтаксический анализ при вставке в формат RowBinary, если входные данные начинаются с';'. -- Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`). - -### Clickhouse Релиз 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16} - -#### Новые средства: {#new-features-4} - -- Добавлен дополнительный раздел для запроса SELECT. Пример запроса: `WITH 1+1 AS a SELECT a, a*a` -- Вставка может быть выполнена синхронно в распределенной таблице: OK возвращается только после того, как все данные сохранены на всех осколках. Это активируется установкой insert\_distributed\_sync=1. -- Добавлен тип данных UUID для работы с 16-байтовыми идентификаторами. -- Добавлены псевдонимы CHAR, FLOAT и других типов для совместимости с таблицей. -- Добавлены функции toYYYYMM, toYYYYMMDD и toYYYYMMDDhhmmss для преобразования времени в числа. -- Вы можете использовать IP-адреса (вместе с именем хоста) для идентификации серверов для кластеризованных запросов DDL. -- Добавлена поддержка непостоянных аргументов и отрицательных смещений в функции `substring(str, pos, len).` -- Добавлен параметр max\_size для `groupArray(max_size)(column)` агрегатная функция и оптимизированная ее производительность. - -#### Основное изменение: {#main-changes} - -- Улучшения безопасности: все файлы сервера создаются с разрешениями 0640 (могут быть изменены с помощью параметр config). -- Улучшены сообщения об ошибках для запросов с неверным синтаксисом. -- Значительно сокращается потребление памяти и повышается производительность при слиянии больших разделов данных MergeTree. -- Значительно повысилась производительность слияний данных для заменяющего движка Mergetree. -- Улучшена производительность асинхронных вставок из распределенной таблицы за счет объединения нескольких исходных вставок. Чтобы включить эту функцию, используйте параметр distributed\_directory\_monitor\_batch\_inserts=1. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-1} - -- Изменен двоичный формат агрегатных состояний `groupArray(array_column)` функции для массивов. - -#### Полный список изменений: {#complete-list-of-changes} - -- Добавил тот `output_format_json_quote_denormals` настройка, которая позволяет выводить значения nan и inf в формате JSON. -- Оптимизировано распределение потока при чтении из распределенной таблицы. -- Настройки можно настроить в режиме только для чтения, если значение не изменяется. -- Добавлена возможность извлечения нецелочисленных гранул движка MergeTree для выполнения ограничений на размер блока, указанных в параметре preferred\_block\_size\_bytes. Цель состоит в том, чтобы уменьшить потребление оперативной памяти и увеличить локальность кэша при обработке запросов из таблиц с большими столбцами. -- Эффективное использование индексов, содержащих такие выражения, как `toStartOfHour(x)` для таких условий, как `toStartOfHour(x) op сonstexpr.` -- Добавлены новые настройки для движков MergeTree (раздел merge\_tree в config.XML): - - replicated\_deduplication\_window\_seconds задает количество секунд, разрешенных для дедуплицирующих вставок в реплицируемые таблицы. - - cleanup\_delay\_period устанавливает, как часто нужно запустить программу очистки, чтобы удалить устаревшие данные. - - replicated\_can\_become\_leader может препятствовать тому, чтобы реплика становилась лидером (и назначала слияния). -- Ускоренная очистка для удаления устаревших данных из ZooKeeper. -- Множество улучшений и исправлений для кластеризованных DDL-запросов. Особый интерес представляет новая настройка distributed\_ddl\_task\_timeout, которая ограничивает время ожидания ответа от серверов в кластере. Если запрос ddl не был выполнен на всех хостах, ответ будет содержать ошибку таймаута, и запрос будет выполнен в асинхронном режиме. -- Улучшено отображение трассировок стека в журналах сервера. -- Добавил тот «none» значение для метода сжатия. -- Вы можете использовать несколько разделов dictionaries\_config в config.XML. -- Можно подключиться к MySQL через сокет в файловой системе. -- Система.в таблице деталей появился новый столбец с информацией о размере меток, в байтах. - -#### Устранение ошибок: {#bug-fixes-4} - -- Распределенные таблицы, использующие таблицу слияния, теперь корректно работают для запроса SELECT с условием на `_table` поле. -- Исправлено редкое состояние гонки в ReplicatedMergeTree при проверке частей данных. -- Исправлена возможная заморозка на «leader election» при запуске сервера. -- Параметр max\_replica\_delay\_for\_distributed\_queries был проигнорирован при использовании локальной реплики источника данных. Это было исправлено. -- Исправлено некорректное поведение `ALTER TABLE CLEAR COLUMN IN PARTITION` при попытке очистить несуществующий столбец. -- Исправлено исключение в функции multif при использовании пустых массивов или строк. -- Исправлено чрезмерное выделение памяти при десериализации собственного формата. -- Исправлено некорректное автоматическое обновление словарей Trie. -- Исправлено исключение при выполнении запросов с предложением GROUP BY из таблицы слияния при использовании SAMPLE. -- Исправлена ошибка, из группы при использовании distributed\_aggregation\_memory\_efficient=1. -- Теперь вы можете указать базу данных.таблицы в правой стороне и присоединиться. -- Слишком много потоков было использовано для параллельной агрегации. Это было исправлено. -- Исправлено как то «if» функция работает с аргументами FixedString. -- Выберите из распределенной таблицы неправильно сработавшие осколки с весом 0. Это было исправлено. -- Бегущий `CREATE VIEW IF EXISTS no longer causes crashes.` -- Исправлено некорректное поведение при установке input\_format\_skip\_unknown\_fields=1 и наличии отрицательных чисел. -- Исправлен бесконечный цикл в `dictGetHierarchy()` функция, если в словаре есть какие-то недопустимые данные. -- Исправлено `Syntax error: unexpected (...)` ошибки при выполнении распределенных запросов с вложенными запросами в предложении IN или JOIN и таблицах слияния. -- Исправлена неправильная интерпретация запроса SELECT из таблиц справочника. -- Исправлена ошибка «Cannot mremap» ошибка при использовании массивов в предложениях IN и JOIN с более чем 2 миллиардами элементов. -- Исправлена ошибка отработки отказа для словарей с MySQL в качестве источника. - -#### Улучшен рабочий процесс разработки и сборки ClickHouse: {#improved-workflow-for-developing-and-assembling-clickhouse-1} - -- Сборки могут быть собраны в Аркадии. -- Вы можете использовать gcc 7 для компиляции ClickHouse. -- Параллельные сборки с использованием ccache+distcc теперь работают быстрее. - -### ClickHouse релиз 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04} - -#### Новые средства: {#new-features-5} - -- Распределенный DDL (например, `CREATE TABLE ON CLUSTER`) -- Реплицированный запрос `ALTER TABLE CLEAR COLUMN IN PARTITION.` -- Движок для таблиц словаря (доступ к данным словаря в виде таблицы). -- Компонент Dictionary database engine (этот тип базы данных автоматически содержит таблицы словарей, доступные для всех подключенных внешних словарей). -- Вы можете проверить наличие обновлений в словаре, отправив запрос источнику. -- Полные имена столбцов -- Цитирование идентификаторов с использованием двойных кавычек. -- Сеансы в интерфейсе HTTP. -- Запрос оптимизации для реплицированной таблицы может выполняться не только на лидере. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-2} - -- Удалить набор глобальных. - -#### Несущественные изменения: {#minor-changes} - -- Теперь после срабатывания предупреждения журнал печатает полную трассировку стека. -- Ослаблена проверка количества поврежденных / лишних частей данных при запуске (было слишком много ложных срабатываний). - -#### Устранение ошибок: {#bug-fixes-5} - -- Исправлена плохая связь «sticking» при вставке в распределенную таблицу. -- GLOBAL IN теперь работает для запроса из таблицы слияния, которая смотрит на распределенную таблицу. -- Неверное количество ядер было обнаружено на виртуальной машине Google Compute Engine. Это было исправлено. -- Изменения в том, как работает исполняемый источник кэшированных внешних словарей. -- Исправлено сравнение строк, содержащих нулевые символы. -- Исправлено сравнение полей первичного ключа Float32 с константами. -- Ранее неверная оценка размера поля могла привести к чрезмерно большим распределениям. -- Исправлена ошибка, при отправке запроса столбец допускает значения NULL в таблицу с помощью инструкции Alter. -- Исправлена ошибка при сортировке по нулевому столбцу, если количество строк меньше предельного. -- Исправлен порядок по подзапросу, состоящему только из постоянных значений. -- Ранее реплицированная таблица могла оставаться в недопустимом состоянии после неудачного удаления таблицы. -- Псевдонимы для скалярных подзапросов с пустыми результатами больше не теряются. -- Теперь запрос, который использовал компиляцию, не завершается ошибкой, если файл .so поврежден. diff --git a/docs/ru/whats_new/changelog/2017.md b/docs/ru/whats_new/changelog/2017.md new file mode 120000 index 00000000000..f278c42f170 --- /dev/null +++ b/docs/ru/whats_new/changelog/2017.md @@ -0,0 +1 @@ +en/whats_new/changelog/2017.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2018.md b/docs/ru/whats_new/changelog/2018.md deleted file mode 100644 index 5de3ba68437..00000000000 --- a/docs/ru/whats_new/changelog/2018.md +++ /dev/null @@ -1,1061 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -## ClickHouse релиз 18.16 {#clickhouse-release-18-16} - -### ClickHouse релиз 18.16.1, 2018-12-21 {#clickhouse-release-18-16-1-2018-12-21} - -#### Устранение ошибок: {#bug-fixes} - -- Исправлена ошибка, которая приводила к проблемам с обновлением словарей с источником ODBC. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- JIT-компиляция агрегатных функций теперь работает с колонками LowCardinality. [\#3838](https://github.com/ClickHouse/ClickHouse/issues/3838) - -#### Улучшения: {#improvements} - -- Добавил тот `low_cardinality_allow_in_native_format` настройка (включена по умолчанию). Если этот параметр отключен, столбцы с низким коэффициентом полезности будут преобразованы в обычные столбцы для запросов SELECT, а обычные столбцы будут ожидаться для запросов INSERT. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) - -#### Улучшения сборки: {#build-improvements} - -- Исправления для сборок на macOS и ARM. - -### ClickHouse релиз 18.16.0, 2018-12-14 {#clickhouse-release-18-16-0-2018-12-14} - -#### Новые средства: {#new-features} - -- `DEFAULT` выражения вычисляются для пропущенных полей при загрузке данных в полуструктурированные входные форматы (`JSONEachRow`, `TSKV`). Эта функция включена с помощью `insert_sample_with_metadata` установка. [\#3555](https://github.com/ClickHouse/ClickHouse/pull/3555) -- То `ALTER TABLE` запрос теперь имеет следующее значение `MODIFY ORDER BY` действие для изменения ключа сортировки при добавлении или удалении столбца таблицы. Это полезно для таблиц в `MergeTree` семейство, выполняющее дополнительные задачи при слиянии на основе этого ключа сортировки, например `SummingMergeTree`, `AggregatingMergeTree` и так далее. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) [\#3755](https://github.com/ClickHouse/ClickHouse/pull/3755) -- Для столиков в центре города `MergeTree` семья, теперь вы можете указать другой ключ сортировки (`ORDER BY`) и индекс (`PRIMARY KEY`). Ключ сортировки может быть длиннее индекса. [\#3581](https://github.com/ClickHouse/ClickHouse/pull/3581) -- Добавил тот `hdfs` функция таблицы и `HDFS` механизм таблиц для импорта и экспорта данных в HDFS. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/3617) -- Добавлены функции для работы с base64: `base64Encode`, `base64Decode`, `tryBase64Decode`. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3350) -- Теперь вы можете использовать параметр для настройки точности `uniqCombined` агрегатная функция (выбор количества ячеек Гиперлога). [\#3406](https://github.com/ClickHouse/ClickHouse/pull/3406) -- Добавил тот `system.contributors` таблица, содержащая имена всех, кто совершил коммиты в ClickHouse. [\#3452](https://github.com/ClickHouse/ClickHouse/pull/3452) -- Добавлена возможность опустить Раздел для `ALTER TABLE ... FREEZE` запрос для резервного копирования всех разделов сразу. [\#3514](https://github.com/ClickHouse/ClickHouse/pull/3514) -- Добавлен `dictGet` и `dictGetOrDefault` функции, которые не требуют указания типа возвращаемого значения. Тип определяется автоматически из описания словаря. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3564) -- Теперь вы можете указать комментарии для столбца в описании таблицы и изменить его с помощью `ALTER`. [\#3377](https://github.com/ClickHouse/ClickHouse/pull/3377) -- Чтение поддерживается для `Join` введите таблицы с простыми ключами. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Теперь вы можете указать следующие параметры `join_use_nulls`, `max_rows_in_join`, `max_bytes_in_join`, и `join_overflow_mode` при создании `Join` типизированная таблица. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Добавил тот `joinGet` функция, которая позволяет вам использовать a `Join` введите таблицу, как словарь. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3728) -- Добавил тот `partition_key`, `sorting_key`, `primary_key`, и `sampling_key` колонны в сторону `system.tables` таблица для того, чтобы предоставить информацию о ключах таблицы. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Добавил тот `is_in_partition_key`, `is_in_sorting_key`, `is_in_primary_key`, и `is_in_sampling_key` колонны в сторону `system.columns` стол. [\#3609](https://github.com/ClickHouse/ClickHouse/pull/3609) -- Добавил тот `min_time` и `max_time` колонны в сторону `system.parts` стол. Эти столбцы заполняются, когда ключ секционирования является выражением, состоящим из `DateTime` столбцы. [Emmanuel Donin de Rosière](https://github.com/ClickHouse/ClickHouse/pull/3800) - -#### Устранение ошибок: {#bug-fixes-1} - -- Исправления и улучшения производительности для `LowCardinality` тип данных. `GROUP BY` с помощью `LowCardinality(Nullable(...))`. Получение значений `extremes`. Обработка функций высокого порядка. `LEFT ARRAY JOIN`. Распределенный `GROUP BY`. Функции, которые возвращают `Array`. Исполнение приказа `ORDER BY`. Написание в адрес `Distributed` таблицы (nicelulu). Обратная совместимость для `INSERT` запросы от старых клиентов, которые реализуют `Native` протокол. Поддержка `LowCardinality` для `JOIN`. Улучшена производительность при работе в одном потоке. [\#3823](https://github.com/ClickHouse/ClickHouse/pull/3823) [\#3803](https://github.com/ClickHouse/ClickHouse/pull/3803) [\#3799](https://github.com/ClickHouse/ClickHouse/pull/3799) [\#3769](https://github.com/ClickHouse/ClickHouse/pull/3769) [\#3744](https://github.com/ClickHouse/ClickHouse/pull/3744) [\#3681](https://github.com/ClickHouse/ClickHouse/pull/3681) [\#3651](https://github.com/ClickHouse/ClickHouse/pull/3651) [\#3649](https://github.com/ClickHouse/ClickHouse/pull/3649) [\#3641](https://github.com/ClickHouse/ClickHouse/pull/3641) [\#3632](https://github.com/ClickHouse/ClickHouse/pull/3632) [\#3568](https://github.com/ClickHouse/ClickHouse/pull/3568) [\#3523](https://github.com/ClickHouse/ClickHouse/pull/3523) [\#3518](https://github.com/ClickHouse/ClickHouse/pull/3518) -- Исправлено как то `select_sequential_consistency` вариант работает. Ранее, когда этот параметр был включен, неполный результат иногда возвращался после начала записи в новый раздел. [\#2863](https://github.com/ClickHouse/ClickHouse/pull/2863) -- Базы данных правильно задаются при выполнении DDL `ON CLUSTER` запросы и `ALTER UPDATE/DELETE`. [\#3772](https://github.com/ClickHouse/ClickHouse/pull/3772) [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Базы данных правильно задаются для вложенных запросов внутри представления. [\#3521](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Исправлена ошибка в работе `PREWHERE` с `FINAL` для `VersionedCollapsingMergeTree`. [7167bfd7](https://github.com/ClickHouse/ClickHouse/commit/7167bfd7b365538f7a91c4307ad77e552ab4e8c1) -- Теперь вы можете использовать `KILL QUERY` чтобы отменить запросы, которые еще не начались, потому что они ждут блокировки таблицы. [\#3517](https://github.com/ClickHouse/ClickHouse/pull/3517) -- Исправлены расчеты даты и времени, если часы были перенесены назад в полночь (это происходит в Иране, а произошло в Москве с 1981 по 1983 год). Ранее это приводило к тому, что время сбрасывалось на день раньше необходимого, а также вызывало неправильное форматирование даты и времени в текстовом формате. [\#3819](https://github.com/ClickHouse/ClickHouse/pull/3819) -- Исправлены ошибки в некоторых случаях `VIEW` и подзапросы, которые опускают базу данных. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3521) -- Исправлено состояние гонки при одновременном чтении из `MATERIALIZED VIEW` и удаление `MATERIALIZED VIEW` из-за того, что внутренняя дверь не запирается `MATERIALIZED VIEW`. [\#3404](https://github.com/ClickHouse/ClickHouse/pull/3404) [\#3694](https://github.com/ClickHouse/ClickHouse/pull/3694) -- Исправлена ошибка `Lock handler cannot be nullptr.` [\#3689](https://github.com/ClickHouse/ClickHouse/pull/3689) -- Исправлена обработка запросов, когда `compile_expressions` опция включена (она включена по умолчанию). Недетерминированные постоянные выражения, такие как `now` функции больше не разворачиваются. [\#3457](https://github.com/ClickHouse/ClickHouse/pull/3457) -- Исправлена ошибка при указании непостоянного аргумента масштаба в `toDecimal32/64/128` функции. -- Исправлена ошибка при попытке вставить массив с помощью `NULL` элементы в системе `Values` форматирование в столбец типа `Array` без `Nullable` (если `input_format_values_interpret_expressions` = 1). [\#3487](https://github.com/ClickHouse/ClickHouse/pull/3487) [\#3503](https://github.com/ClickHouse/ClickHouse/pull/3503) -- Исправлена непрерывная ошибка входа в систему `DDLWorker` если смотритель зоопарка не доступен. [8f50c620](https://github.com/ClickHouse/ClickHouse/commit/8f50c620334988b28018213ec0092fe6423847e2) -- Исправлен тип возврата для `quantile*` функции от `Date` и `DateTime` тип аргумента. [\#3580](https://github.com/ClickHouse/ClickHouse/pull/3580) -- Исправлена ошибка `WITH` предложение, если оно указывает простой псевдоним без выражений. [\#3570](https://github.com/ClickHouse/ClickHouse/pull/3570) -- Исправлена обработка запросов с именованными подзапросами и квалифицированными именами столбцов, когда `enable_optimize_predicate_expression` это включено. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3588) -- Исправлена ошибка `Attempt to attach to nullptr thread group` при работе с материализованными представлениями. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3623) -- Исправлена ошибка при передаче некоторых неверных аргументов в систему `arrayReverse` функция. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Исправлено переполнение буфера в системе `extractURLParameter` функция. Повышение производительности. Добавлена корректная обработка строк, содержащих ноль байт. [141e9799](https://github.com/ClickHouse/ClickHouse/commit/141e9799e49201d84ea8e951d1bed4fb6d3dacb5) -- Исправлено переполнение буфера в системе `lowerUTF8` и `upperUTF8` функции. Удалена возможность выполнения этих функций сверх `FixedString` аргумент типа. [\#3662](https://github.com/ClickHouse/ClickHouse/pull/3662) -- Исправлено редкое состояние гонки при удалении `MergeTree` таблицы. [\#3680](https://github.com/ClickHouse/ClickHouse/pull/3680) -- Исправлено состояние гонки при чтении с `Buffer` таблицы и одновременно выполнять `ALTER` или `DROP` на целевых столах. [\#3719](https://github.com/ClickHouse/ClickHouse/pull/3719) -- Исправлен сегфолт, если `max_temporary_non_const_columns` лимит был превышен. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Улучшения: {#improvements-1} - -- Сервер не записывает обработанные конфигурационные файлы в систему. `/etc/clickhouse-server/` каталог. Вместо этого он спасает их в будущем. `preprocessed_configs` каталог внутри `path`. Это означает, что `/etc/clickhouse-server/` Директория не имеет доступа на запись для `clickhouse` пользователь, что повышает безопасность. [\#2443](https://github.com/ClickHouse/ClickHouse/pull/2443) -- То `min_merge_bytes_to_use_direct_io` по умолчанию параметр установлен на 10 гигабайт. Слияние, которое образует большие части таблиц из семейства MergeTree, будет выполнено в `O_DIRECT` режим, который предотвращает чрезмерное вытеснение кэша страниц. [\#3504](https://github.com/ClickHouse/ClickHouse/pull/3504) -- Ускоренный запуск сервера при наличии очень большого количества таблиц. [\#3398](https://github.com/ClickHouse/ClickHouse/pull/3398) -- Добавлен пул соединений и HTTP `Keep-Alive` для связи между репликами. [\#3594](https://github.com/ClickHouse/ClickHouse/pull/3594) -- Если синтаксис запроса неверен, то `400 Bad Request` код возвращается в виде `HTTP` интерфейс (ранее было возвращено 500). [31bc680a](https://github.com/ClickHouse/ClickHouse/commit/31bc680ac5f4bb1d0360a8ba4696fa84bb47d6ab) -- То `join_default_strictness` параметр установлен в значение `ALL` по умолчанию для обеспечения совместимости. [120e2cbe](https://github.com/ClickHouse/ClickHouse/commit/120e2cbe2ff4fbad626c28042d9b28781c805afe) -- Удалено ведение журнала в `stderr` из `re2` библиотека для недопустимых или сложных регулярных выражений. [\#3723](https://github.com/ClickHouse/ClickHouse/pull/3723) -- Добавлено в `Kafka` механизм таблиц: проверяет наличие подписок перед началом чтения из Kafka; параметр kafka\_max\_block\_size для таблицы. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3396) -- То `cityHash64`, `farmHash64`, `metroHash64`, `sipHash64`, `halfMD5`, `murmurHash2_32`, `murmurHash2_64`, `murmurHash3_32`, и `murmurHash3_64` функции теперь работают для любого количества аргументов и для аргументов в виде кортежей. [\#3451](https://github.com/ClickHouse/ClickHouse/pull/3451) [\#3519](https://github.com/ClickHouse/ClickHouse/pull/3519) -- То `arrayReverse` функция теперь работает с любыми типами массивов. [73e3a7b6](https://github.com/ClickHouse/ClickHouse/commit/73e3a7b662161d6005e7727d8a711b930386b871) -- Добавлен необязательный параметр: размер слота для `timeSlots` функция. [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/3724) -- Для `FULL` и `RIGHT JOIN`, этот `max_block_size` настройка используется для потока несвязанных данных из правой таблицы. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3699) -- Добавил тот `--secure` параметр командной строки в `clickhouse-benchmark` и `clickhouse-performance-test` чтобы включить TLS. [\#3688](https://github.com/ClickHouse/ClickHouse/pull/3688) [\#3690](https://github.com/ClickHouse/ClickHouse/pull/3690) -- Тип преобразования, когда структура a `Buffer` таблица типов не соответствует структуре целевой таблицы. [Виталий Баранов](https://github.com/ClickHouse/ClickHouse/pull/3603) -- Добавил тот `tcp_keep_alive_timeout` опция для включения пакетов keep-alive после бездействия в течение заданного интервала времени. [\#3441](https://github.com/ClickHouse/ClickHouse/pull/3441) -- Удалены ненужные кавычки значений для ключа раздела В разделе `system.parts` таблица, если она состоит из одного столбца. [\#3652](https://github.com/ClickHouse/ClickHouse/pull/3652) -- Функция по модулю работает для `Date` и `DateTime` тип данных. [\#3385](https://github.com/ClickHouse/ClickHouse/pull/3385) -- Добавлены синонимы для этого `POWER`, `LN`, `LCASE`, `UCASE`, `REPLACE`, `LOCATE`, `SUBSTR`, и `MID` функции. [\#3774](https://github.com/ClickHouse/ClickHouse/pull/3774) [\#3763](https://github.com/ClickHouse/ClickHouse/pull/3763) Некоторые имена функций не зависят от регистра для обеспечения совместимости со стандартом SQL. Добавлен синтаксический сахар `SUBSTRING(expr FROM start FOR length)` для совместимости с SQL. [\#3804](https://github.com/ClickHouse/ClickHouse/pull/3804) -- Добавлена возможность `mlock` страницы памяти, соответствующие `clickhouse-server` исполняемый код, чтобы предотвратить его вытеснение из памяти. По умолчанию эта функция отключена. [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) -- Улучшенная производительность при чтении с `O_DIRECT` (с помощью `min_bytes_to_use_direct_io` опция включена). [\#3405](https://github.com/ClickHouse/ClickHouse/pull/3405) -- Улучшенная производительность системы `dictGet...OrDefault` функция для постоянного ключевого аргумента и непостоянного аргумента по умолчанию. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3563) -- То `firstSignificantSubdomain` функция теперь обрабатывает Домены `gov`, `mil`, и `edu`. [Игорь Hatarist](https://github.com/ClickHouse/ClickHouse/pull/3601) Повышение производительности. [\#3628](https://github.com/ClickHouse/ClickHouse/pull/3628) -- Возможность указать пользовательские переменные среды для запуска `clickhouse-server` с помощью `SYS-V init.d` сценарий по определению `CLICKHOUSE_PROGRAM_ENV` в `/etc/default/clickhouse`. - [Павел Башинский](https://github.com/ClickHouse/ClickHouse/pull/3612) -- Правильный код возврата для сценария clickhouse-server init. [\#3516](https://github.com/ClickHouse/ClickHouse/pull/3516) -- То `system.metrics` таблица теперь имеет `VersionInteger` метрика и `system.build_options` есть ли добавленная строка `VERSION_INTEGER`, который содержит числовую форму версии ClickHouse, например `18016000`. [\#3644](https://github.com/ClickHouse/ClickHouse/pull/3644) -- Удалена возможность сравнения `Date` введите с номером, чтобы избежать потенциальных ошибок, таких как `date = 2018-12-17`, где кавычки вокруг даты опущены по ошибке. [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) -- Исправлено поведение статусных функций, таких как `rowNumberInAllBlocks`. Ранее они выводили результат, который был на одно число больше из-за запуска во время анализа запроса. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3729) -- Если `force_restore_data` файл не может быть удален, отображается сообщение об ошибке. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3794) - -#### Улучшения сборки: {#build-improvements-1} - -- Обновлено приложение `jemalloc` библиотека, которая исправляет потенциальную утечку памяти. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3557) -- Профилирование с помощью `jemalloc` включен по умолчанию для отладки сборок. [2cc82f5c](https://github.com/ClickHouse/ClickHouse/commit/2cc82f5cbe266421cd4c1165286c2c47e5ffcb15) -- Добавлена возможность запуска интеграционных тестов только тогда, когда `Docker` устанавливается в системе. [\#3650](https://github.com/ClickHouse/ClickHouse/pull/3650) -- Добавлен тест выражения fuzz в запросах SELECT. [\#3442](https://github.com/ClickHouse/ClickHouse/pull/3442) -- Добавлен стресс-тест для коммитов, который выполняет функциональные тесты параллельно и в случайном порядке, чтобы обнаружить больше условий гонки. [\#3438](https://github.com/ClickHouse/ClickHouse/pull/3438) -- Улучшен метод запуска clickhouse-сервера в образе Docker. [Эльгазал Ахмед](https://github.com/ClickHouse/ClickHouse/pull/3663) -- Для Docker образ, добавлена поддержка для инициализации базы данных с помощью файлов в `/docker-entrypoint-initdb.d` каталог. [Константин Лебедев](https://github.com/ClickHouse/ClickHouse/pull/3695) -- Исправления опирается на руку. [\#3709](https://github.com/ClickHouse/ClickHouse/pull/3709) - -#### Назад несовместимые изменения: {#backward-incompatible-changes} - -- Удалена возможность сравнения `Date` тип с номером. Вместо `toDate('2018-12-18') = 17883`, вы должны использовать явное преобразование типов `= toDate(17883)` [\#3687](https://github.com/ClickHouse/ClickHouse/pull/3687) - -## ClickHouse релиз 18.14 {#clickhouse-release-18-14} - -### ClickHouse релиз 18.14.19, 2018-12-19 {#clickhouse-release-18-14-19-2018-12-19} - -#### Устранение ошибок: {#bug-fixes-2} - -- Исправлена ошибка, которая привела к проблемам с обновлением словарей с источником ODBC. [\#3825](https://github.com/ClickHouse/ClickHouse/issues/3825), [\#3829](https://github.com/ClickHouse/ClickHouse/issues/3829) -- Базы данных правильно задаются при выполнении DDL `ON CLUSTER` запросы. [\#3460](https://github.com/ClickHouse/ClickHouse/pull/3460) -- Исправлен сегфолт, если `max_temporary_non_const_columns` лимит был превышен. [\#3788](https://github.com/ClickHouse/ClickHouse/pull/3788) - -#### Улучшения сборки: {#build-improvements-2} - -- Исправления опирается на руку. - -### ClickHouse релиз 18.14.18, 2018-12-04 {#clickhouse-release-18-14-18-2018-12-04} - -#### Устранение ошибок: {#bug-fixes-3} - -- Исправлена ошибка в работе `dictGet...` функция для словарей типа `range`, если один из аргументов является постоянным, а другой-нет. [\#3751](https://github.com/ClickHouse/ClickHouse/pull/3751) -- Исправлена ошибка, приводившая к появлению сообщений `netlink: '...': attribute type 1 has an invalid length` чтобы быть напечатанным в журнале ядра Linux, это происходило только на достаточно свежих версиях ядра Linux. [\#3749](https://github.com/ClickHouse/ClickHouse/pull/3749) -- Исправлена обработка выхода онлайн / оффлайн в функции `empty` для аргументации из `FixedString` тип. [Дэниел, Дао Куанг Мин](https://github.com/ClickHouse/ClickHouse/pull/3703) -- Исправлено чрезмерное выделение памяти при использовании большого значения `max_query_size` настройка (фрагмент памяти из `max_query_size` байты были предварительно распределены сразу). [\#3720](https://github.com/ClickHouse/ClickHouse/pull/3720) - -#### Изменения в сборке: {#build-changes} - -- Исправлена сборка с библиотеками LLVM/Clang версии 7 из пакетов ОС (эти библиотеки используются для компиляции запросов во время выполнения). [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse релиз 18.14.17, 2018-11-30 {#clickhouse-release-18-14-17-2018-11-30} - -#### Устранение ошибок: {#bug-fixes-4} - -- Исправлены случаи, когда процесс моста ODBC не завершался с основным серверным процессом. [\#3642](https://github.com/ClickHouse/ClickHouse/pull/3642) -- Исправлено одновременное включение в `Distributed` таблица со списком столбцов, который отличается от списка столбцов удаленной таблицы. [\#3673](https://github.com/ClickHouse/ClickHouse/pull/3673) -- Исправлено редкое состояние гонки, которое может привести к аварии при падении таблицы MergeTree. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Исправлена взаимоблокировка запросов в случае сбоя при создании потока запросов с помощью `Resource temporarily unavailable` ошибка. [\#3643](https://github.com/ClickHouse/ClickHouse/pull/3643) -- Исправлен разбор текста `ENGINE` п. Когда `CREATE AS table` был использован синтаксис, а также `ENGINE` оговорка была указана еще до того, как `AS table` (ошибка привела к игнорированию указанного движка). [\#3692](https://github.com/ClickHouse/ClickHouse/pull/3692) - -### ClickHouse релиз 18.14.15, 2018-11-21 {#clickhouse-release-18-14-15-2018-11-21} - -#### Устранение ошибок: {#bug-fixes-5} - -- Размер блока памяти был завышен при десериализации столбца типа `Array(String)` это приводит к тому, что «Memory limit exceeded» ошибки. Проблема появилась в версии 18.12.13. [\#3589](https://github.com/ClickHouse/ClickHouse/issues/3589) - -### ClickHouse релиз 18.14.14, 2018-11-20 {#clickhouse-release-18-14-14-2018-11-20} - -#### Устранение ошибок: {#bug-fixes-6} - -- Исправлено `ON CLUSTER` запросы, когда кластер настроен как безопасный (флаг ``). [\#3599](https://github.com/ClickHouse/ClickHouse/pull/3599) - -#### Изменения в сборке: {#build-changes-1} - -- Исправлены неполадки (llvm-7 от system, macos) [\#3582](https://github.com/ClickHouse/ClickHouse/pull/3582) - -### ClickHouse релиз 18.14.13, 2018-11-08 {#clickhouse-release-18-14-13-2018-11-08} - -#### Устранение ошибок: {#bug-fixes-7} - -- Исправлена ошибка `Block structure mismatch in MergingSorted stream` ошибка. [\#3162](https://github.com/ClickHouse/ClickHouse/issues/3162) -- Исправлено `ON CLUSTER` запросы в случае, если в конфигурации кластера были включены защищенные соединения (the `` флаг). [\#3465](https://github.com/ClickHouse/ClickHouse/pull/3465) -- Исправлена ошибка в запросах, которые использовались `SAMPLE`, `PREWHERE` и столбцы псевдонимов. [\#3543](https://github.com/ClickHouse/ClickHouse/pull/3543) -- Исправлена редкая ошибка `unknown compression method` ошибка, когда `min_bytes_to_use_direct_io` настройка была включена. [3544](https://github.com/ClickHouse/ClickHouse/pull/3544) - -#### Улучшения в производительности: {#performance-improvements} - -- Исправлена регрессия производительности запросов с помощью `GROUP BY` столбцов типа UInt16 или Date при выполнении на процессорах AMD EPYC. [Игорь Лапко](https://github.com/ClickHouse/ClickHouse/pull/3512) -- Исправлена регрессия производительности запросов, обрабатывающих длинные строки. [\#3530](https://github.com/ClickHouse/ClickHouse/pull/3530) - -#### Улучшения сборки: {#build-improvements-3} - -- Улучшения для упрощения сборки Arcadia. [\#3475](https://github.com/ClickHouse/ClickHouse/pull/3475), [\#3535](https://github.com/ClickHouse/ClickHouse/pull/3535) - -### ClickHouse релиз 18.14.12, 2018-11-02 {#clickhouse-release-18-14-12-2018-11-02} - -#### Устранение ошибок: {#bug-fixes-8} - -- Исправлена ошибка при соединении двух безымянных подзапросов. [\#3505](https://github.com/ClickHouse/ClickHouse/pull/3505) -- Исправлена генерация некорректных запросов (с пустым именем `WHERE` пункт 2) при запросе внешних баз данных. [хотид](https://github.com/ClickHouse/ClickHouse/pull/3477) -- Исправлено использование неверного значения таймаута в словарях ODBC. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3511) - -### ClickHouse релиз 18.14.11, 2018-10-29 {#clickhouse-release-18-14-11-2018-10-29} - -#### Устранение ошибок: {#bug-fixes-9} - -- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` в предельных запросах. [\#2156](https://github.com/ClickHouse/ClickHouse/issues/2156) -- Исправлены ошибки при объединении данных в таблицах, содержащих массивы внутри вложенных структур. [\#3397](https://github.com/ClickHouse/ClickHouse/pull/3397) -- Исправлены неправильные результаты запроса, если `merge_tree_uniform_read_distribution` настройка отключена (по умолчанию она включена). [\#3429](https://github.com/ClickHouse/ClickHouse/pull/3429) -- Исправлена ошибка при вставках в распределенную таблицу в собственном формате. [\#3411](https://github.com/ClickHouse/ClickHouse/issues/3411) - -### ClickHouse релиз 18.14.10, 2018-10-23 {#clickhouse-release-18-14-10-2018-10-23} - -- То `compile_expressions` настройка (JIT-компиляция выражений) по умолчанию отключена. [\#3410](https://github.com/ClickHouse/ClickHouse/pull/3410) -- То `enable_optimize_predicate_expression` по умолчанию этот параметр отключен. - -### ClickHouse релиз 18.14.9, 2018-10-16 {#clickhouse-release-18-14-9-2018-10-16} - -#### Новые средства: {#new-features-1} - -- То `WITH CUBE` модификатор для `GROUP BY` (альтернативный синтаксис `GROUP BY CUBE(...)` также доступный). [\#3172](https://github.com/ClickHouse/ClickHouse/pull/3172) -- Добавил тот `formatDateTime` функция. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/2770) -- Добавил тот `JDBC` двигатель таблицы и `jdbc` табличная функция (требуется установка clickhouse-jdbc-bridge). [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3210) -- Добавлены функции для работы с номером недели ISO: `toISOWeek`, `toISOYear`, `toStartOfISOYear`, и `toDayOfYear`. [\#3146](https://github.com/ClickHouse/ClickHouse/pull/3146) -- Теперь вы можете использовать `Nullable` колонки для `MySQL` и `ODBC` таблицы. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Вложенные структуры данных могут быть прочитаны как вложенные объекты в `JSONEachRow` формат. Добавил тот `input_format_import_nested_json` установка. [Веломан Юнкан](https://github.com/ClickHouse/ClickHouse/pull/3144) -- Параллельная обработка доступна для многих `MATERIALIZED VIEW`s при вставке данных. Смотрите сами `parallel_view_processing` установка. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3208) -- Добавил тот `SYSTEM FLUSH LOGS` запрос (принудительный сброс журнала в системные таблицы, такие как `query_log`) [\#3321](https://github.com/ClickHouse/ClickHouse/pull/3321) -- Теперь вы можете использовать заранее определенные `database` и `table` макросы при объявлении `Replicated` таблицы. [\#3251](https://github.com/ClickHouse/ClickHouse/pull/3251) -- Добавлена возможность чтения `Decimal` введите значения в инженерной нотации (с указанием степеней десять). [\#3153](https://github.com/ClickHouse/ClickHouse/pull/3153) - -#### Экспериментальная возможность: {#experimental-features} - -- Оптимизация группы по предложению для `LowCardinality data types.` [\#3138](https://github.com/ClickHouse/ClickHouse/pull/3138) -- Оптимизированный расчет выражений для `LowCardinality data types.` [\#3200](https://github.com/ClickHouse/ClickHouse/pull/3200) - -#### Улучшения: {#improvements-2} - -- Значительно уменьшено потребление памяти для запросов с помощью `ORDER BY` и `LIMIT`. Смотрите сами `max_bytes_before_remerge_sort` установка. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- В случае отсутствия `JOIN` (`LEFT`, `INNER`, …), `INNER JOIN` предполагается. [\#3147](https://github.com/ClickHouse/ClickHouse/pull/3147) -- Квалифицированные звездочки корректно работают в запросах с `JOIN`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3202) -- То `ODBC` механизм таблиц правильно выбирает метод для цитирования идентификаторов на диалекте SQL удаленной базы данных. [Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/3210) -- То `compile_expressions` настройка (JIT-компиляция выражений) включена по умолчанию. -- Исправлено поведение для одновременного удаления базы данных / таблицы, если она существует, и создания базы данных/таблицы, если она не существует. Ранее, а `CREATE DATABASE ... IF NOT EXISTS` запрос может вернуть сообщение об ошибке «File … already exists», и то `CREATE TABLE ... IF NOT EXISTS` и `DROP TABLE IF EXISTS` запросы могут вернуться `Table ... is creating or attaching right now`. [\#3101](https://github.com/ClickHouse/ClickHouse/pull/3101) -- Как и в выражениях с постоянной правой половиной, они передаются на удаленный сервер при запросе из таблиц MySQL или ODBC. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Сравнения с постоянными выражениями в предложении WHERE передаются удаленному серверу при запросе из таблиц MySQL и ODBC. Раньше проходили только сравнения с константами. [\#3182](https://github.com/ClickHouse/ClickHouse/pull/3182) -- Правильный расчет ширины строки в терминале для `Pretty` форматы, в том числе строки с иероглифами. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/3257). -- `ON CLUSTER` может быть указан для `ALTER UPDATE` запросы. -- Улучшенная производительность для считывания данных в `JSONEachRow` формат. [\#3332](https://github.com/ClickHouse/ClickHouse/pull/3332) -- Добавлены синонимы для этого `LENGTH` и `CHARACTER_LENGTH` функции для обеспечения совместимости. То `CONCAT` функция больше не зависит от регистра. [\#3306](https://github.com/ClickHouse/ClickHouse/pull/3306) -- Добавил тот `TIMESTAMP` синоним для этого `DateTime` тип. [\#3390](https://github.com/ClickHouse/ClickHouse/pull/3390) -- В журналах сервера всегда есть место, зарезервированное для query\_id, даже если строка журнала не связана с запросом. Это упрощает синтаксический анализ текстовых журналов сервера с помощью сторонних инструментов. -- Потребление памяти запросом регистрируется, когда оно превышает следующий уровень целого числа гигабайт. [\#3205](https://github.com/ClickHouse/ClickHouse/pull/3205) -- Добавлен режим совместимости для случая, когда клиентская библиотека, использующая собственный протокол, по ошибке отправляет меньше столбцов, чем сервер ожидает для запроса INSERT. Этот сценарий был возможен при использовании библиотеки clickhouse-cpp. Ранее этот сценарий приводил к сбою сервера. [\#3171](https://github.com/ClickHouse/ClickHouse/pull/3171) -- В пользовательском выражении WHERE in `clickhouse-copier`, теперь вы можете использовать a `partition_key` псевдоним (для дополнительной фильтрации по исходному разделу таблицы). Это полезно, если схема секционирования изменяется во время копирования,но только незначительно. [\#3166](https://github.com/ClickHouse/ClickHouse/pull/3166) -- Рабочий процесс компании `Kafka` движок был перемещен в фоновый пул потоков, чтобы автоматически снизить скорость считывания данных при высоких нагрузках. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- Поддержка чтения `Tuple` и `Nested` значения таких структур, как `struct` в `Cap'n'Proto format`. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3216) -- Список доменов верхнего уровня для `firstSignificantSubdomain` функция теперь включает в себя домен `biz`. [деказеал](https://github.com/ClickHouse/ClickHouse/pull/3219) -- В конфигурации внешних справочников, `null_value` интерпретируется как значение типа данных по умолчанию. [\#3330](https://github.com/ClickHouse/ClickHouse/pull/3330) -- Поддержка для the `intDiv` и `intDivOrZero` функции для `Decimal`. [b48402e8](https://github.com/ClickHouse/ClickHouse/commit/b48402e8712e2b9b151e0eef8193811d433a1264) -- Поддержка для the `Date`, `DateTime`, `UUID`, и `Decimal` типы в качестве ключа для `sumMap` статистическая функция. [\#3281](https://github.com/ClickHouse/ClickHouse/pull/3281) -- Поддержка для the `Decimal` тип данных во внешних справочниках. [\#3324](https://github.com/ClickHouse/ClickHouse/pull/3324) -- Поддержка для the `Decimal` введите данные в поле `SummingMergeTree` таблицы. [\#3348](https://github.com/ClickHouse/ClickHouse/pull/3348) -- Добавлены специализации для `UUID` в `if`. [\#3366](https://github.com/ClickHouse/ClickHouse/pull/3366) -- Уменьшилось количество `open` и `close` системные вызовы для чтения `MergeTree table`. [\#3283](https://github.com/ClickHouse/ClickHouse/pull/3283) -- A `TRUNCATE TABLE` запрос может быть выполнен на любой реплике (запрос передается в реплику лидера). [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/3375) - -#### Устранение ошибок: {#bug-fixes-10} - -- Исправлена проблема с `Dictionary` таблицы для `range_hashed` словари. Эта ошибка произошла в версии 18.12.17. [\#1702](https://github.com/ClickHouse/ClickHouse/pull/1702) -- Исправлена ошибка при загрузке `range_hashed` словари (сообщение `Unsupported type Nullable (...)`). Эта ошибка произошла в версии 18.12.17. [\#3362](https://github.com/ClickHouse/ClickHouse/pull/3362) -- Исправлены ошибки в работе `pointInPolygon` функция обусловлена накоплением неточных вычислений для полигонов с большим количеством вершин, расположенных близко друг к другу. [\#3331](https://github.com/ClickHouse/ClickHouse/pull/3331) [\#3341](https://github.com/ClickHouse/ClickHouse/pull/3341) -- Если после слияния частей данных контрольная сумма для результирующей части отличается от результата того же слияния в другой реплике, то результат слияния удаляется и часть данных загружается из другой реплики (это правильное поведение). Но после загрузки части данных она не могла быть добавлена в рабочий набор из-за ошибки, что часть уже существует (потому что часть данных была удалена с некоторой задержкой после слияния). Это привело к циклическим попыткам загрузить одни и те же данные. [\#3194](https://github.com/ClickHouse/ClickHouse/pull/3194) -- Исправлено неправильное вычисление общего потребления памяти запросами (из-за неправильного вычисления `max_memory_usage_for_all_queries` установка сработала неправильно и то `MemoryTracking` метрика имела неверное значение). Эта ошибка произошла в версии 18.12.13. [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3344) -- Исправлена функциональность программы `CREATE TABLE ... ON CLUSTER ... AS SELECT ...` Эта ошибка произошла в версии 18.12.13. [\#3247](https://github.com/ClickHouse/ClickHouse/pull/3247) -- Исправлена ненужная подготовка структур данных для `JOIN`s на сервере, который инициирует запрос, если `JOIN` выполняется только на удаленных серверах. [\#3340](https://github.com/ClickHouse/ClickHouse/pull/3340) -- Исправлены ошибки в работе `Kafka` движок: взаимоблокировки после исключений при запуске чтения данных и блокировки по завершении работы [Marek Vavruša](https://github.com/ClickHouse/ClickHouse/pull/3215). -- Для `Kafka` таблицы, опциональные `schema` параметр не был передан (схема `Cap'n'Proto` формат). [Войтех Splichal](https://github.com/ClickHouse/ClickHouse/pull/3150) -- Если в ансамбле серверов ZooKeeper есть серверы, которые принимают соединение, но затем немедленно закрывают его вместо ответа на рукопожатие, ClickHouse выбирает для подключения другой сервер. Ранее это приводило к ошибке `Cannot read all data. Bytes read: 0. Bytes expected: 4.` и сервер не мог запуститься. [8218cf3a](https://github.com/ClickHouse/ClickHouse/commit/8218cf3a5f39a43401953769d6d12a0bb8d29da9) -- Если ансамбль серверов ZooKeeper содержит серверы, для которых DNS-запрос возвращает ошибку, эти серверы игнорируются. [17b8e209](https://github.com/ClickHouse/ClickHouse/commit/17b8e209221061325ad7ba0539f03c6e65f87f29) -- Фиксированное преобразование типа между `Date` и `DateTime` при вставке данных в `VALUES` формат (если `input_format_values_interpret_expressions = 1`). Ранее преобразование производилось между числовым значением числа дней в Unix Epoch time и unix timestamp, что приводило к неожиданным результатам. [\#3229](https://github.com/ClickHouse/ClickHouse/pull/3229) -- Исправлено преобразование типов между `Decimal` и целые числа. [\#3211](https://github.com/ClickHouse/ClickHouse/pull/3211) -- Исправлены ошибки в работе `enable_optimize_predicate_expression` установка. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3231) -- Исправлена ошибка синтаксического анализа в формате CSV с числами с плавающей запятой, если используется нестандартный разделитель CSV, например `;` [\#3155](https://github.com/ClickHouse/ClickHouse/pull/3155) -- Исправлена ошибка `arrayCumSumNonNegative` функция (она не накапливает отрицательных значений, если накопитель меньше нуля). [Алексей Студнев](https://github.com/ClickHouse/ClickHouse/pull/3163) -- Исправлено как `Merge` таблицы работают на верхней части `Distributed` таблицы при использовании `PREWHERE`. [\#3165](https://github.com/ClickHouse/ClickHouse/pull/3165) -- Исправлены ошибки в системе `ALTER UPDATE` запрос. -- Исправлены ошибки в работе `odbc` табличная функция, появившаяся в версии 18.12. [\#3197](https://github.com/ClickHouse/ClickHouse/pull/3197) -- Исправлена работа агрегатных функций с помощью `StateArray` комбинаторы. [\#3188](https://github.com/ClickHouse/ClickHouse/pull/3188) -- Исправлена ошибка при делении `Decimal` значение по нулю. [69dd6609](https://github.com/ClickHouse/ClickHouse/commit/69dd6609193beb4e7acd3e6ad216eca0ccfb8179) -- Фиксированный вывод типов для использования операций `Decimal` и целочисленные аргументы. [\#3224](https://github.com/ClickHouse/ClickHouse/pull/3224) -- Исправлена обработка выхода онлайн / оффлайн в `GROUP BY` на `Decimal128`. [3359ba06](https://github.com/ClickHouse/ClickHouse/commit/3359ba06c39fcd05bfdb87d6c64154819621e13a) -- То `log_query_threads` настройка (протоколирование информации о каждом потоке выполнения запроса) теперь вступает в силу только в том случае, если `log_queries` параметр (протоколирование информации о запросах) имеет значение 1. Поскольку `log_query_threads` опция включена по умолчанию, информация о потоках ранее регистрировалась, даже если ведение журнала запросов было отключено. [\#3241](https://github.com/ClickHouse/ClickHouse/pull/3241) -- Исправлена ошибка в распределенной работе агрегатной функции квантилей (сообщение об ошибке `Not found column quantile...`). [292a8855](https://github.com/ClickHouse/ClickHouse/commit/292a885533b8e3b41ce8993867069d14cbd5a664) -- Исправлена проблема совместимости при работе с кластером серверов версии 18.12.17 и более старых серверов одновременно. Для распределенных запросов с ключами GROUP BY как фиксированной, так и не фиксированной длины при наличии большого объема данных для агрегирования возвращаемые данные не всегда были полностью агрегированы (две разные строки содержали одни и те же ключи агрегирования). [\#3254](https://github.com/ClickHouse/ClickHouse/pull/3254) -- Исправлена обработка подстановок в `clickhouse-performance-test`, если запрос содержит только часть подстановок, объявленных в тесте. [\#3263](https://github.com/ClickHouse/ClickHouse/pull/3263) -- Исправлена ошибка при использовании `FINAL` с `PREWHERE`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Исправлена ошибка при использовании `PREWHERE` над столбцами, которые были добавлены во время `ALTER`. [\#3298](https://github.com/ClickHouse/ClickHouse/pull/3298) -- Добавлена проверка на отсутствие `arrayJoin` для `DEFAULT` и `MATERIALIZED` выражения. Ранее, `arrayJoin` это привело к ошибке при вставке данных. [\#3337](https://github.com/ClickHouse/ClickHouse/pull/3337) -- Добавлена проверка на отсутствие `arrayJoin` в `PREWHERE` пункт. Ранее это приводило к таким сообщениям, как `Size ... doesn't match` или `Unknown compression method` при выполнении запросов. [\#3357](https://github.com/ClickHouse/ClickHouse/pull/3357) -- Исправлена ошибка segfault, которая могла возникнуть в редких случаях после оптимизации, которая заменила и цепочки из оценок равенства с соответствующим выражением IN. [люимин-бытданс](https://github.com/ClickHouse/ClickHouse/pull/3339) -- Незначительные исправления к `clickhouse-benchmark`: раньше информация о клиенте не отправлялась на сервер, теперь количество выполненных запросов вычисляется более точно при выключении и ограничении количества итераций. [\#3351](https://github.com/ClickHouse/ClickHouse/pull/3351) [\#3352](https://github.com/ClickHouse/ClickHouse/pull/3352) - -#### Назад несовместимые изменения: {#backward-incompatible-changes-1} - -- Удалил то `allow_experimental_decimal_type` вариант. То `Decimal` тип данных доступен для использования по умолчанию. [\#3329](https://github.com/ClickHouse/ClickHouse/pull/3329) - -## ClickHouse релиз 18.12 {#clickhouse-release-18-12} - -### ClickHouse релиз 18.12.17, 2018-09-16 {#clickhouse-release-18-12-17-2018-09-16} - -#### Новые средства: {#new-features-2} - -- `invalidate_query` (возможность задать запрос для проверки необходимости обновления внешнего словаря) реализована для `clickhouse` источник. [\#3126](https://github.com/ClickHouse/ClickHouse/pull/3126) -- Добавлена возможность использования `UInt*`, `Int*`, и `DateTime` типы данных (вместе с `Date` типа) как `range_hashed` внешний ключ словаря, определяющий границы диапазонов. Сейчас `NULL` может использоваться для обозначения открытого диапазона. [Василий Немков](https://github.com/ClickHouse/ClickHouse/pull/3123) -- То `Decimal` тип теперь поддерживает `var*` и `stddev*` статистическая функция. [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- То `Decimal` тип теперь поддерживает математические функции (`exp`, `sin` и так далее.) [\#3129](https://github.com/ClickHouse/ClickHouse/pull/3129) -- То `system.part_log` таблица теперь имеет `partition_id` колонка. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Устранение ошибок: {#bug-fixes-11} - -- `Merge` теперь работает правильно на `Distributed` таблицы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3159) -- Исправлена несовместимость (ненужная зависимость от `glibc` версия), что сделало невозможным запуск ClickHouse на `Ubuntu Precise` и более старые версии. Несовместимость возникла в версии 18.12.13. [\#3130](https://github.com/ClickHouse/ClickHouse/pull/3130) -- Исправлены ошибки в работе `enable_optimize_predicate_expression` установка. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3107) -- Исправлена незначительная проблема с обратной совместимостью, возникшая при работе с кластером реплик на версиях ранее 18.12.13 и одновременном создании новой реплики таблицы на сервере с более новой версией (показано в сообщении `Can not clone replica, because the ... updated to new ClickHouse version`, что вполне логично, но не должно произойти). [\#3122](https://github.com/ClickHouse/ClickHouse/pull/3122) - -#### Назад несовместимые изменения: {#backward-incompatible-changes-2} - -- То `enable_optimize_predicate_expression` опция включена по умолчанию (что довольно оптимистично). Если возникают ошибки анализа запросов, связанные с поиском имен столбцов, установите `enable_optimize_predicate_expression` до 0. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3107) - -### ClickHouse релиз 18.12.14, 2018-09-13 {#clickhouse-release-18-12-14-2018-09-13} - -#### Новые средства: {#new-features-3} - -- Добавлена поддержка для `ALTER UPDATE` запросы. [\#3035](https://github.com/ClickHouse/ClickHouse/pull/3035) -- Добавил тот `allow_ddl` опция, которая ограничивает доступ пользователя к DDL-запросам. [\#3104](https://github.com/ClickHouse/ClickHouse/pull/3104) -- Добавил тот `min_merge_bytes_to_use_direct_io` вариант для `MergeTree` движки, которые позволяют установить пороговое значение для общего размера слияния (при превышении порогового значения файлы частей данных будут обрабатываться с помощью O\_DIRECT). [\#3117](https://github.com/ClickHouse/ClickHouse/pull/3117) -- То `system.merges` системная таблица теперь содержит `partition_id` колонка. [\#3099](https://github.com/ClickHouse/ClickHouse/pull/3099) - -#### Улучшения {#improvements-3} - -- Если часть данных остается неизменной во время мутации, она не загружается репликами. [\#3103](https://github.com/ClickHouse/ClickHouse/pull/3103) -- Автозаполнение доступно для имен настроек при работе с ними `clickhouse-client`. [\#3106](https://github.com/ClickHouse/ClickHouse/pull/3106) - -#### Устранение ошибок: {#bug-fixes-12} - -- Добавлена проверка размеров массивов, являющихся элементами `Nested` введите поля при вставке. [\#3118](https://github.com/ClickHouse/ClickHouse/pull/3118) -- Исправлена ошибка обновления внешних словарей с помощью `ODBC` источник и `hashed` место хранения. Эта ошибка произошла в версии 18.12.13. -- Исправлена ошибка при создании временной таблицы из запроса с помощью `IN` состояние. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3098) -- Исправлена ошибка в агрегатных функциях для массивов, которые могут иметь `NULL` элементы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/3097) - -### ClickHouse релиз 18.12.13, 2018-09-10 {#clickhouse-release-18-12-13-2018-09-10} - -#### Новые средства: {#new-features-4} - -- Добавил тот `DECIMAL(digits, scale)` тип данных (`Decimal32(scale)`, `Decimal64(scale)`, `Decimal128(scale)`). Чтобы включить его, используйте параметр `allow_experimental_decimal_type`. [\#2846](https://github.com/ClickHouse/ClickHouse/pull/2846) [\#2970](https://github.com/ClickHouse/ClickHouse/pull/2970) [\#3008](https://github.com/ClickHouse/ClickHouse/pull/3008) [\#3047](https://github.com/ClickHouse/ClickHouse/pull/3047) -- Новый `WITH ROLLUP` модификатор для `GROUP BY` (альтернативный синтаксис: `GROUP BY ROLLUP(...)`). [\#2948](https://github.com/ClickHouse/ClickHouse/pull/2948) -- В запросах с соединением символ звезды расширяется до списка столбцов во всех таблицах в соответствии со стандартом SQL. Вы можете восстановить старое поведение, установив `asterisk_left_columns_only` до 1 на уровне конфигурации пользователя. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2787) -- Добавлена поддержка соединения с табличными функциями. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Автозаполнение осуществляется нажатием клавиши Tab в clickhouse-клиенте. [Сергей Щербин](https://github.com/ClickHouse/ClickHouse/pull/2447) -- Сочетание клавиш CTRL+C в clickhouse-клиент сбрасывает запрос, который был введен. [\#2877](https://github.com/ClickHouse/ClickHouse/pull/2877) -- Добавил тот `join_default_strictness` уставка: `"`, `'any'`, `'all'`). Это позволяет вам не указывать `ANY` или `ALL` для `JOIN`. [\#2982](https://github.com/ClickHouse/ClickHouse/pull/2982) -- Каждая строка журнала сервера, связанная с обработкой запросов, показывает идентификатор запроса. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Теперь вы можете получить журналы выполнения запросов в clickhouse-клиенте (используйте `send_logs_level` установочный). При распределенной обработке запросов журналы каскадируются со всех серверов. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- То `system.query_log` и `system.processes` (`SHOW PROCESSLIST`) таблицы теперь содержат информацию обо всех измененных настройках при выполнении запроса (вложенная структура запроса). `Settings` данные). Добавил тот `log_query_settings` установка. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- То `system.query_log` и `system.processes` теперь в таблицах отображается информация о количестве потоков, участвующих в выполнении запроса (см. `thread_numbers` колонка). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Добавлен `ProfileEvents` счетчики, которые измеряют время, затраченное на чтение и запись по сети и чтение и запись на диск, количество сетевых ошибок и время ожидания, когда пропускная способность сети ограничена. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Добавлен `ProfileEvents`счетчики, содержащие системные метрики из rusage (их можно использовать для получения информации об использовании ЦП в пользовательском пространстве и ядре, сбоях страниц и переключателях контекста), а также метрики taskstats (используйте их для получения информации о времени ожидания ввода-вывода, времени ожидания ЦП и объеме данных, считываемых и записываемых как с помощью кэша страниц, так и без него). [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- То `ProfileEvents` счетчики применяются глобально и для каждого запроса, а также для каждого потока выполнения запроса, что позволяет детально профилировать потребление ресурсов запросом. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- Добавил тот `system.query_thread_log` таблица, содержащая информацию о каждом потоке выполнения запроса. Добавил тот `log_query_threads` установка. [\#2482](https://github.com/ClickHouse/ClickHouse/pull/2482) -- То `system.metrics` и `system.events` таблицы теперь имеют встроенную документацию. [\#3016](https://github.com/ClickHouse/ClickHouse/pull/3016) -- Добавил тот `arrayEnumerateDense` функция. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2975) -- Добавил тот `arrayCumSumNonNegative` и `arrayDifference` функции. [Алексей Студнев](https://github.com/ClickHouse/ClickHouse/pull/2942) -- Добавил тот `retention` статистическая функция. [Вашим Ли](https://github.com/ClickHouse/ClickHouse/pull/2887) -- Теперь вы можете добавить (объединить) состояния агрегатных функций с помощью оператора плюс и умножить состояния агрегатных функций на неотрицательную константу. [\#3062](https://github.com/ClickHouse/ClickHouse/pull/3062) [\#3034](https://github.com/ClickHouse/ClickHouse/pull/3034) -- Таблицы в семействе MergeTree теперь имеют виртуальный столбец `_partition_id`. [\#3089](https://github.com/ClickHouse/ClickHouse/pull/3089) - -#### Экспериментальная возможность: {#experimental-features-1} - -- Добавил тот `LowCardinality(T)` тип данных. Этот тип данных автоматически создает локальный словарь значений и позволяет обрабатывать данные без распаковки словаря. [\#2830](https://github.com/ClickHouse/ClickHouse/pull/2830) -- Добавлен кэш JIT-скомпилированных функций и счетчик количества использований перед компиляцией. Чтобы выполнить JIT-компиляцию выражений, включите `compile_expressions` установка. [\#2990](https://github.com/ClickHouse/ClickHouse/pull/2990) [\#3077](https://github.com/ClickHouse/ClickHouse/pull/3077) - -#### Улучшения: {#improvements-4} - -- Исправлена проблема с неограниченным накоплением журнала репликации при наличии брошенных реплик. Добавлен эффективный режим восстановления для реплик с длительным запаздыванием. -- Улучшенная производительность `GROUP BY` с несколькими полями агрегации, когда одно из них является строковым, а другие-фиксированной длины. -- Улучшенная производительность при использовании `PREWHERE` и с неявной передачей выражений в `PREWHERE`. -- Улучшена производительность синтаксического анализа для текстовых форматов (`CSV`, `TSV`). [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2977) [\#2980](https://github.com/ClickHouse/ClickHouse/pull/2980) -- Улучшена производительность чтения строк и массивов в двоичных форматах. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2955) -- Повышенная производительность и снижение потребления памяти для запросов к `system.tables` и `system.columns` при наличии очень большого количества таблиц на одном сервере. [\#2953](https://github.com/ClickHouse/ClickHouse/pull/2953) -- Исправлена проблема производительности в случае большого потока запросов, приводящих к ошибке (the `_dl_addr` функция видна в `perf top`, но сервер не использует много процессора). [\#2938](https://github.com/ClickHouse/ClickHouse/pull/2938) -- Условия бросаются в поле зрения (когда `enable_optimize_predicate_expression` включен). [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2907) -- Улучшения в функциональности для `UUID` тип данных. [\#3074](https://github.com/ClickHouse/ClickHouse/pull/3074) [\#2985](https://github.com/ClickHouse/ClickHouse/pull/2985) -- То `UUID` тип данных поддерживается в словарях-Alchemist. [\#2822](https://github.com/ClickHouse/ClickHouse/pull/2822) -- То `visitParamExtractRaw` функция корректно работает с вложенными структурами. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2974) -- Когда `input_format_skip_unknown_fields` настройка включена, поля объекта в поле `JSONEachRow` формат пропущен правильно. [BlahGeek](https://github.com/ClickHouse/ClickHouse/pull/2958) -- Для `CASE` выражение с условиями теперь можно опустить `ELSE`, что эквивалентно `ELSE NULL`. [\#2920](https://github.com/ClickHouse/ClickHouse/pull/2920) -- Тайм-аут операции теперь можно настроить при работе с ZooKeeper. [urykhy](https://github.com/ClickHouse/ClickHouse/pull/2971) -- Вы можете указать смещение для `LIMIT n, m` как `LIMIT n OFFSET m`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Вы можете использовать `SELECT TOP n` синтаксис как альтернатива для `LIMIT`. [\#2840](https://github.com/ClickHouse/ClickHouse/pull/2840) -- Увеличен размер очереди для записи в системные таблицы, так что `SystemLog parameter queue is full` ошибки случаются не так часто. -- То `windowFunnel` агрегатная функция теперь поддерживает события, удовлетворяющие нескольким условиям. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2801) -- Повторяющиеся столбцы могут быть использованы в a `USING` пунктом `JOIN`. [\#3006](https://github.com/ClickHouse/ClickHouse/pull/3006) -- `Pretty` форматы теперь имеют ограничение на выравнивание столбцов по ширине. Используйте `output_format_pretty_max_column_pad_width` установка. Если значение больше, то оно все равно будет отображаться полностью, но другие ячейки таблицы не будут слишком широкими. [\#3003](https://github.com/ClickHouse/ClickHouse/pull/3003) -- То `odbc` функция таблицы теперь позволяет указать имя базы данных / схемы. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2885) -- Добавлена возможность использовать имя Пользователя, указанное в `clickhouse-client` конфигурационный файл. [Владимир Козбин](https://github.com/ClickHouse/ClickHouse/pull/2909) -- То `ZooKeeperExceptions` счетчик был разделен на три счетчика: `ZooKeeperUserExceptions`, `ZooKeeperHardwareExceptions`, и `ZooKeeperOtherExceptions`. -- `ALTER DELETE` запросы работают для материализованных представлений. -- Добавлена рандомизация при периодическом запуске потока очистки для `ReplicatedMergeTree` таблицы во избежание периодических скачков нагрузки при наличии очень большого количества `ReplicatedMergeTree` таблицы. -- Поддержка `ATTACH TABLE ... ON CLUSTER` запросы. [\#3025](https://github.com/ClickHouse/ClickHouse/pull/3025) - -#### Устранение ошибок: {#bug-fixes-13} - -- Исправлена проблема с `Dictionary` таблицы (бросает то `Size of offsets doesn't match size of column` или `Unknown compression method` исключение). Эта ошибка появилась в версии 18.10.3. [\#2913](https://github.com/ClickHouse/ClickHouse/issues/2913) -- Исправлена ошибка при слиянии `CollapsingMergeTree` таблицы, если одна из частей данных пуста (эти части формируются во время слияния или `ALTER DELETE` если все данные были удалены), а также `vertical` для слияния был использован алгоритм. [\#3049](https://github.com/ClickHouse/ClickHouse/pull/3049) -- Исправлено состояние гонки во время `DROP` или `TRUNCATE` для `Memory` столы с одновременным `SELECT`, что может привести к сбоям сервера. Эта ошибка появилась в версии 1.1.54388. [\#3038](https://github.com/ClickHouse/ClickHouse/pull/3038) -- Исправлена возможность потери данных при вставке в систему `Replicated` таблицы, если `Session is expired` возвращается ошибка (потеря данных может быть обнаружена с помощью `ReplicatedDataLoss` метрический). Эта ошибка произошла в версии 1.1.54378. [\#2939](https://github.com/ClickHouse/ClickHouse/pull/2939) [\#2949](https://github.com/ClickHouse/ClickHouse/pull/2949) [\#2964](https://github.com/ClickHouse/ClickHouse/pull/2964) -- Исправлен сегфолт при `JOIN ... ON`. [\#3000](https://github.com/ClickHouse/ClickHouse/pull/3000) -- Исправлена ошибка поиска имен столбцов, когда `WHERE` выражение полностью состоит из квалифицированного имени столбца, например `WHERE table.column`. [\#2994](https://github.com/ClickHouse/ClickHouse/pull/2994) -- Исправлена ошибка «Not found column» ошибка, возникшая при выполнении распределенных запросов, если с удаленного сервера запрашивается один столбец, состоящий из выражения IN с вложенным запросом. [\#3087](https://github.com/ClickHouse/ClickHouse/pull/3087) -- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` ошибка, возникшая для распределенных запросов, если один из сегментов является локальным, а другой-нет, и оптимизация перемещения в `PREWHERE` это срабатывает. [\#2226](https://github.com/ClickHouse/ClickHouse/pull/2226) [\#3037](https://github.com/ClickHouse/ClickHouse/pull/3037) [\#3055](https://github.com/ClickHouse/ClickHouse/pull/3055) [\#3065](https://github.com/ClickHouse/ClickHouse/pull/3065) [\#3073](https://github.com/ClickHouse/ClickHouse/pull/3073) [\#3090](https://github.com/ClickHouse/ClickHouse/pull/3090) [\#3093](https://github.com/ClickHouse/ClickHouse/pull/3093) -- Исправлена ошибка `pointInPolygon` функция для некоторых случаев невыпуклых многоугольников. [\#2910](https://github.com/ClickHouse/ClickHouse/pull/2910) -- Исправлен неверный результат при сравнении `nan` с целыми числами. [\#3024](https://github.com/ClickHouse/ClickHouse/pull/3024) -- Исправлена ошибка в системе `zlib-ng` библиотека, которая в редких случаях может привести к segfault. [\#2854](https://github.com/ClickHouse/ClickHouse/pull/2854) -- Исправлена утечка памяти при вставке в таблицу с помощью `AggregateFunction` столбцы, если состояние агрегатной функции не простое (выделяет память отдельно), и если один запрос на вставку приводит к нескольким небольшим блокам. [\#3084](https://github.com/ClickHouse/ClickHouse/pull/3084) -- Исправлено состояние гонки при создании и удалении одного и того же объекта `Buffer` или `MergeTree` стол одновременно. -- Исправлена возможность segfault при сравнении кортежей, составленных из определенных нетривиальных типов, таких как кортежи. [\#2989](https://github.com/ClickHouse/ClickHouse/pull/2989) -- Исправлена возможность возникновения segfault при запуске некоторых `ON CLUSTER` запросы. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2960) -- Исправлена ошибка в системе `arrayDistinct` функция для `Nullable` элемент массива. [\#2845](https://github.com/ClickHouse/ClickHouse/pull/2845) [\#2937](https://github.com/ClickHouse/ClickHouse/pull/2937) -- То `enable_optimize_predicate_expression` опция теперь корректно поддерживает случаи с `SELECT *`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2929) -- Исправлена ошибка segfault при повторной инициализации сеанса ZooKeeper. [\#2917](https://github.com/ClickHouse/ClickHouse/pull/2917) -- Исправлена блокировка при работе с зоопарка. -- Исправлен неверный код для добавления вложенных структур данных в a `SummingMergeTree`. -- При выделении памяти для состояний агрегатных функций корректно учитывается выравнивание, что позволяет использовать операции, требующие выравнивания при реализации состояний агрегатных функций. [chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2808) - -#### Исправление безопасности: {#security-fix} - -- Безопасное использование источников данных ODBC. Взаимодействие с драйверами ODBC использует отдельный интерфейс `clickhouse-odbc-bridge` процесс. Ошибки в сторонних драйверах ODBC больше не вызывают проблем со стабильностью сервера или уязвимостями. [\#2828](https://github.com/ClickHouse/ClickHouse/pull/2828) [\#2879](https://github.com/ClickHouse/ClickHouse/pull/2879) [\#2886](https://github.com/ClickHouse/ClickHouse/pull/2886) [\#2893](https://github.com/ClickHouse/ClickHouse/pull/2893) [\#2921](https://github.com/ClickHouse/ClickHouse/pull/2921) -- Исправлена неправильная проверка пути к файлу в системе `catBoostPool` табличная функция. [\#2894](https://github.com/ClickHouse/ClickHouse/pull/2894) -- Содержание системных таблиц (`tables`, `databases`, `parts`, `columns`, `parts_columns`, `merges`, `mutations`, `replicas`, и `replication_queue`) фильтруются в соответствии с настроенным пользователем доступом к базам данных (`allow_databases`). [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2856) - -#### Назад несовместимые изменения: {#backward-incompatible-changes-3} - -- В запросах с соединением символ звезды расширяется до списка столбцов во всех таблицах в соответствии со стандартом SQL. Вы можете восстановить старое поведение, установив `asterisk_left_columns_only` до 1 на уровне конфигурации пользователя. - -#### Изменения в сборке: {#build-changes-2} - -- Большинство интеграционных тестов теперь можно запускать с помощью commit. -- Проверка стиля кода также может выполняться с помощью commit. -- То `memcpy` реализация выбрана правильно при построении на CentOS7/Fedora. [Этьен Шампетье](https://github.com/ClickHouse/ClickHouse/pull/2912) -- При использовании clang для сборки, некоторые предупреждения от `-Weverything` были добавлены, в дополнение к обычным `-Wall-Wextra -Werror`. [\#2957](https://github.com/ClickHouse/ClickHouse/pull/2957) -- Отладка сборки использует следующие методы: `jemalloc` вариант отладки. -- Интерфейс библиотеки для взаимодействия с ZooKeeper объявлен абстрактным. [\#2950](https://github.com/ClickHouse/ClickHouse/pull/2950) - -## ClickHouse релиз 18.10 {#clickhouse-release-18-10} - -### ClickHouse релиз 18.10.3, 2018-08-13 {#clickhouse-release-18-10-3-2018-08-13} - -#### Новые средства: {#new-features-5} - -- HTTPS можно использовать для репликации. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) -- Добавлены функции `murmurHash2_64`, `murmurHash3_32`, `murmurHash3_64`, и `murmurHash3_128` в дополнение к существующим `murmurHash2_32`. [\#2791](https://github.com/ClickHouse/ClickHouse/pull/2791) -- Поддержка типы, допускающие значения NULL в драйвере ODBC ClickHouse (`ODBCDriver2` выходной формат). [\#2834](https://github.com/ClickHouse/ClickHouse/pull/2834) -- Поддержка `UUID` в ключевых колонках. - -#### Улучшения: {#improvements-5} - -- Кластеры могут быть удалены без перезагрузки сервера, когда они удаляются из конфигурационных файлов. [\#2777](https://github.com/ClickHouse/ClickHouse/pull/2777) -- Внешние словари могут быть удалены без перезагрузки сервера, когда они удаляются из конфигурационных файлов. [\#2779](https://github.com/ClickHouse/ClickHouse/pull/2779) -- Добавлен `SETTINGS` поддержка для the `Kafka` настольный двигатель. [Александр Маршалов](https://github.com/ClickHouse/ClickHouse/pull/2781) -- Улучшения для компании `UUID` тип данных (еще не полный). [\#2618](https://github.com/ClickHouse/ClickHouse/pull/2618) -- Поддержка для пустых частей после слияния в `SummingMergeTree`, `CollapsingMergeTree` и `VersionedCollapsingMergeTree` двигатели. [\#2815](https://github.com/ClickHouse/ClickHouse/pull/2815) -- Старые записи завершенных мутаций удаляются (`ALTER DELETE`). [\#2784](https://github.com/ClickHouse/ClickHouse/pull/2784) -- Добавил тот `system.merge_tree_settings` стол. [Кирилл Шваков](https://github.com/ClickHouse/ClickHouse/pull/2841) -- То `system.tables` таблица теперь имеет столбцы зависимостей: `dependencies_database` и `dependencies_table`. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2851) -- Добавил тот `max_partition_size_to_drop` вариант конфигурации. [\#2782](https://github.com/ClickHouse/ClickHouse/pull/2782) -- Добавил тот `output_format_json_escape_forward_slashes` вариант. [Александр Бочаров](https://github.com/ClickHouse/ClickHouse/pull/2812) -- Добавил тот `max_fetch_partition_retries_count` установка. [\#2831](https://github.com/ClickHouse/ClickHouse/pull/2831) -- Добавил тот `prefer_localhost_replica` настройка для отключения предпочтения для локальной реплики и перехода к локальной реплике без межпроцессного взаимодействия. [\#2832](https://github.com/ClickHouse/ClickHouse/pull/2832) -- То `quantileExact` возвращает агрегатная функция `nan` в случае агрегации на пустом месте `Float32` или `Float64` набор. [Вашим Ли](https://github.com/ClickHouse/ClickHouse/pull/2855) - -#### Устранение ошибок: {#bug-fixes-14} - -- Удалено ненужное экранирование параметров строки подключения для ODBC, что сделало невозможным установление соединения. Эта ошибка произошла в версии 18.6.0. -- Исправлена логика обработки `REPLACE PARTITION` команды в очереди репликации. Если их будет двое `REPLACE` команды для одного и того же раздела, неправильная логика может привести к тому, что один из них останется в очереди репликации и не будет выполнен. [\#2814](https://github.com/ClickHouse/ClickHouse/pull/2814) -- Исправлена ошибка слияния, когда все части данных были пусты (части, которые были сформированы из слияния или из `ALTER DELETE` если все данные были удалены). Эта ошибка появилась в версии 18.1.0. [\#2930](https://github.com/ClickHouse/ClickHouse/pull/2930) -- Исправлена ошибка при одновременном использовании `Set` или `Join`. [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2823) -- Исправлена ошибка `Block structure mismatch in UNION stream: different number of columns` ошибка, которая произошла для `UNION ALL` запросы внутри подзапроса, если один из `SELECT` запросы содержат повторяющиеся имена столбцов. [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2094) -- Исправлена утечка памяти, если при подключении к серверу MySQL возникало исключение. -- Исправлен неверный код ответа clickhouse-клиента в случае ошибки запроса. -- Исправлено некорректное поведение материализованных представлений, содержащих отличия. [\#2795](https://github.com/ClickHouse/ClickHouse/issues/2795) - -#### Назад несовместимые изменения {#backward-incompatible-changes-4} - -- Удалена поддержка запросов CHECK TABLE для распределенных таблиц. - -#### Изменения в сборке: {#build-changes-3} - -- Распределитель был заменен: `jemalloc` теперь используется вместо `tcmalloc`. В некоторых сценариях это увеличивает скорость до 20%. Однако есть запросы, которые замедлились до 20%. Потребление памяти было уменьшено приблизительно на 10% в некоторых сценариях, с улучшенной стабильностью. При высокой конкурентной нагрузке использование процессора в пользовательском пространстве и в системе показывает лишь небольшое увеличение. [\#2773](https://github.com/ClickHouse/ClickHouse/pull/2773) -- Использование libressl из подмодуля. [\#1983](https://github.com/ClickHouse/ClickHouse/pull/1983) [\#2807](https://github.com/ClickHouse/ClickHouse/pull/2807) -- Использование unixodbc из подмодуля. [\#2789](https://github.com/ClickHouse/ClickHouse/pull/2789) -- Использование mariadb-connector-c из подмодуля. [\#2785](https://github.com/ClickHouse/ClickHouse/pull/2785) -- Добавлены функциональные тестовые файлы в репозиторий, зависящие от доступности тестовых данных (пока без самих тестовых данных). - -## ClickHouse релиз 18.6 {#clickhouse-release-18-6} - -### ClickHouse релиз 18.6.0, 2018-08-02 {#clickhouse-release-18-6-0-2018-08-02} - -#### Новые средства: {#new-features-6} - -- Добавлена поддержка выражений для соединения на синтаксис: - `JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` - Выражение должно быть цепочкой равенств, Соединенных оператором и. Каждая сторона равенства может быть произвольным выражением над столбцами одной из таблиц. Поддерживается использование полных имен столбцов (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) для правильного стола. [\#2742](https://github.com/ClickHouse/ClickHouse/pull/2742) -- HTTPS может быть включен для репликации. [\#2760](https://github.com/ClickHouse/ClickHouse/pull/2760) - -#### Улучшения: {#improvements-6} - -- Сервер передает клиенту компонент исправлений своей версии. Данные о компоненте версии патча находятся в `system.processes` и `query_log`. [\#2646](https://github.com/ClickHouse/ClickHouse/pull/2646) - -## ClickHouse релиз 18.5 {#clickhouse-release-18-5} - -### ClickHouse релиз 18.5.1, 2018-07-31 {#clickhouse-release-18-5-1-2018-07-31} - -#### Новые средства: {#new-features-7} - -- Добавлена хэш-функция `murmurHash2_32` [\#2756](https://github.com/ClickHouse/ClickHouse/pull/2756). - -#### Улучшения: {#improvements-7} - -- Теперь вы можете использовать `from_env` [\#2741](https://github.com/ClickHouse/ClickHouse/pull/2741) атрибут для установки значений в конфигурационных файлах из переменных окружения. -- Добавлены версии с нечувствительностью к регистру символов `coalesce`, `ifNull`, и `nullIf functions` [\#2752](https://github.com/ClickHouse/ClickHouse/pull/2752). - -#### Устранение ошибок: {#bug-fixes-15} - -- Исправлена возможная ошибка при запуске реплики [\#2759](https://github.com/ClickHouse/ClickHouse/pull/2759). - -## ClickHouse релиз 18.4 {#clickhouse-release-18-4} - -### ClickHouse релиз 18.4.0, 2018-07-28 {#clickhouse-release-18-4-0-2018-07-28} - -#### Новые средства: {#new-features-8} - -- Добавлены системные таблицы: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [\#2721](https://github.com/ClickHouse/ClickHouse/pull/2721). -- Добавлена возможность использовать табличную функцию вместо таблицы в качестве аргумента a `remote` или `cluster table function` [\#2708](https://github.com/ClickHouse/ClickHouse/pull/2708). -- Поддержка `HTTP Basic` аутентификация в протоколе репликации [\#2727](https://github.com/ClickHouse/ClickHouse/pull/2727). -- То `has` функция теперь позволяет искать числовое значение в массиве `Enum` ценности [Максим Хрисанфов](https://github.com/ClickHouse/ClickHouse/pull/2699). -- Поддержка добавления произвольных разделителей сообщений при чтении из `Kafka` [Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2701). - -#### Улучшения: {#improvements-8} - -- То `ALTER TABLE t DELETE WHERE` запрос не перезаписывает части данных, которые не были затронуты условием WHERE [\#2694](https://github.com/ClickHouse/ClickHouse/pull/2694). -- То `use_minimalistic_checksums_in_zookeeper` вариант для `ReplicatedMergeTree` таблицы включены по умолчанию. Этот параметр был добавлен в версии 1.1.54378, 2018-04-16. Версии, которые старше 1.1.54378, больше не могут быть установлены. -- Поддержка для бега `KILL` и `OPTIMIZE` запросы, которые определяют `ON CLUSTER` [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2689). - -#### Устранение ошибок: {#bug-fixes-16} - -- Исправлена ошибка `Column ... is not under an aggregate function and not in GROUP BY` для агрегатирования с выражением. Эта ошибка появилась в версии 18.1.0. ([bbdd780b](https://github.com/ClickHouse/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) -- Исправлена ошибка в системе `windowFunnel aggregate function` [Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2735). -- Исправлена ошибка в системе `anyHeavy` статистическая функция ([a2101df2](https://github.com/ClickHouse/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) -- Исправлен сбой сервера при использовании `countArray()` статистическая функция. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-5} - -- Параметры для `Kafka` двигатель был изменен с `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_schema, kafka_num_consumers])` к `Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format[, kafka_row_delimiter, kafka_schema, kafka_num_consumers])`. Если ваши таблицы используют `kafka_schema` или `kafka_num_consumers` параметры, вы должны вручную редактировать файлы метаданных `path/metadata/database/table.sql` и добавить `kafka_row_delimiter` параметр с `''` ценность. - -## ClickHouse релиз 18.1 {#clickhouse-release-18-1} - -### ClickHouse релиз 18.1.0, 2018-07-23 {#clickhouse-release-18-1-0-2018-07-23} - -#### Новые средства: {#new-features-9} - -- Поддержка для the `ALTER TABLE t DELETE WHERE` запрос на нереплицируемые MergeTree таблицы ([\#2634](https://github.com/ClickHouse/ClickHouse/pull/2634)). -- Поддержка произвольных типов для `uniq*` семейство агрегатных функций ([\#2010](https://github.com/ClickHouse/ClickHouse/issues/2010)). -- Поддержка произвольных типов в операторах сравнения ([\#2026](https://github.com/ClickHouse/ClickHouse/issues/2026)). -- То `users.xml` файл позволяет установить маску подсети в формате `10.0.0.1/255.255.255.0`. Это необходимо для использования масок для сетей IPv6 с нулями посередине ([\#2637](https://github.com/ClickHouse/ClickHouse/pull/2637)). -- Добавил тот `arrayDistinct` функция ([\#2670](https://github.com/ClickHouse/ClickHouse/pull/2670)). -- Движок SummingMergeTree теперь может работать со столбцами типа AggregateFunction ([Константин Сергеевич Пан](https://github.com/ClickHouse/ClickHouse/pull/2566)). - -#### Улучшения: {#improvements-9} - -- Изменена схема нумерации для версий выпуска. Теперь первая часть содержит год выпуска (A. D., Московский часовой пояс, минус 2000), вторая часть содержит номер для крупных изменений (увеличивается для большинства релизов), а третья часть-это патч-версия. Релизы по-прежнему имеют обратную совместимость, если в списке изменений не указано иное. -- Более быстрое преобразование чисел с плавающей запятой в строку ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2664)). -- Если некоторые строки были пропущены во время вставки из-за ошибок синтаксического анализа (это возможно с помощью `input_allow_errors_num` и `input_allow_errors_ratio` настройки включены), количество пропущенных строк теперь записывается в журнал сервера ([Леонардо Чекки](https://github.com/ClickHouse/ClickHouse/pull/2669)). - -#### Устранение ошибок: {#bug-fixes-17} - -- Исправлена команда усечения для временных таблиц ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2624)). -- Исправлена редкая тупиковая ситуация в клиентской библиотеке ZooKeeper, возникшая при возникновении сетевой ошибки при чтении ответа ([c315200](https://github.com/ClickHouse/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). -- Исправлена ошибка во время бросания на типы, допускающие значение null ([\#1322](https://github.com/ClickHouse/ClickHouse/issues/1322)). -- Исправлен неверный результат работы системы `maxIntersection()` функция, когда границы интервалов совпадают ([Майкл Фурмур](https://github.com/ClickHouse/ClickHouse/pull/2657)). -- Исправлено некорректное преобразование цепочки выражений OR в аргумент функции ([chenxing-xc](https://github.com/ClickHouse/ClickHouse/pull/2663)). -- Исправлено снижение производительности для запросов, содержащих `IN (subquery)` выражения внутри другого подзапроса ([\#2571](https://github.com/ClickHouse/ClickHouse/issues/2571)). -- Исправлена несовместимость серверов с различными версиями в распределенных запросах, использующих a `CAST` функция, которая не написана прописными буквами ([fe8c4d6](https://github.com/ClickHouse/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). -- Добавлено отсутствующее цитирование идентификаторов для запросов к внешней СУБД ([\#2635](https://github.com/ClickHouse/ClickHouse/issues/2635)). - -#### Назад несовместимые изменения: {#backward-incompatible-changes-6} - -- Преобразование строки, содержащей нулевое число, в DateTime не работает. Пример: `SELECT toDateTime('0')`. Это также является причиной того, что `DateTime DEFAULT '0'` не работает в таблицах, а также `0` в словарях. Решение: заменить `0` с `0000-00-00 00:00:00`. - -## ClickHouse release 1.1 {#clickhouse-release-1-1} - -### ClickHouse релиз 1.1.54394, 2018-07-12 {#clickhouse-release-1-1-54394-2018-07-12} - -#### Новые средства: {#new-features-10} - -- Добавил тот `histogram` статистическая функция ([Михаил Сурин](https://github.com/ClickHouse/ClickHouse/pull/2521)). -- Сейчас `OPTIMIZE TABLE ... FINAL` может использоваться без указания разделов для `ReplicatedMergeTree` ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2600)). - -#### Устранение ошибок: {#bug-fixes-18} - -- Исправлена проблема с очень малым таймаутом для сокетов (одна секунда) для чтения и записи при отправке и загрузке реплицированных данных, что делало невозможной загрузку больших частей при наличии нагрузки на сеть или диск (это приводило к циклическим попыткам загрузки частей). Эта ошибка произошла в версии 1.1.54388. -- Исправлены проблемы при использовании chroot в ZooKeeper, если вы вставили дубликаты блоков данных в таблицу. -- То `has` функция теперь корректно работает для массива с нулевыми элементами ([\#2115](https://github.com/ClickHouse/ClickHouse/issues/2115)). -- То `system.tables` таблица теперь работает корректно при использовании в распределенных запросах. То `metadata_modification_time` и `engine_full` столбцы теперь не являются виртуальными. Исправлена ошибка, возникавшая при запросе из таблицы только этих столбцов. -- Исправлено как пустой `TinyLog` таблица работает после вставки пустого блока данных ([\#2563](https://github.com/ClickHouse/ClickHouse/issues/2563)). -- То `system.zookeeper` таблица работает, если значение узла в ZooKeeper равно NULL. - -### ClickHouse релиз 1.1.54390, 2018-07-06 {#clickhouse-release-1-1-54390-2018-07-06} - -#### Новые средства: {#new-features-11} - -- Запросы могут быть отправлены в `multipart/form-data` формат (в виде `query` поле), что полезно, если внешние данные также отправляются для обработки запросов ([Ольга Хвостикова](https://github.com/ClickHouse/ClickHouse/pull/2490)). -- Добавлена возможность включения или отключения обработки одинарных или двойных кавычек при чтении данных в формате CSV. Вы можете настроить это в разделе `format_csv_allow_single_quotes` и `format_csv_allow_double_quotes` настройки ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2574)). -- Сейчас `OPTIMIZE TABLE ... FINAL` может использоваться без указания раздела для нереплицированных вариантов `MergeTree` ([Амос Птица](https://github.com/ClickHouse/ClickHouse/pull/2599)). - -#### Улучшения: {#improvements-10} - -- Улучшенная производительность, снижение потребления памяти и корректное отслеживание потребления памяти с использованием оператора IN, когда можно использовать табличный индекс ([\#2584](https://github.com/ClickHouse/ClickHouse/pull/2584)). -- Удалена избыточная проверка контрольных сумм при добавлении части данных. Это важно при наличии большого количества реплик, так как в этих случаях общее число проверок было равно N^2. -- Добавлена поддержка для `Array(Tuple(...))` аргументы в пользу этого `arrayEnumerateUniq` функция ([\#2573](https://github.com/ClickHouse/ClickHouse/pull/2573)). -- Добавлен `Nullable` поддержка для the `runningDifference` функция ([\#2594](https://github.com/ClickHouse/ClickHouse/pull/2594)). -- Улучшена производительность анализа запросов при наличии очень большого количества выражений ([\#2572](https://github.com/ClickHouse/ClickHouse/pull/2572)). -- Более быстрый выбор частей данных для слияния в `ReplicatedMergeTree` таблицы. Более быстрое восстановление сеанса смотрителя зоопарка ([\#2597](https://github.com/ClickHouse/ClickHouse/pull/2597)). -- То `format_version.txt` файл для `MergeTree` таблицы создаются заново, если они отсутствуют, что имеет смысл, если ClickHouse запускается после копирования структуры каталогов без файлов ([Киприан Хакман](https://github.com/ClickHouse/ClickHouse/pull/2593)). - -#### Устранение ошибок: {#bug-fixes-19} - -- Исправлена ошибка при работе с ZooKeeper, которая могла сделать невозможным восстановление сеанса и состояний таблиц только для чтения перед перезапуском сервера. -- Исправлена ошибка при работе с ZooKeeper, которая могла привести к тому, что старые узлы не удалялись, если сеанс прерывался. -- Исправлена ошибка в системе `quantileTDigest` функции для Аргументов с плавающей точкой (эта ошибка была введена в версии 1.1.54388) ([Михаил Сурин](https://github.com/ClickHouse/ClickHouse/pull/2553)). -- Исправлена ошибка в индексе для таблиц MergeTree, если столбец первичного ключа находится внутри функции преобразования типов между знаковыми и беззнаковыми целыми числами одинакового размера ([\#2603](https://github.com/ClickHouse/ClickHouse/pull/2603)). -- Исправлена обработка выхода онлайн / оффлайн если `macros` используются, но их нет в файле конфигурации ([\#2570](https://github.com/ClickHouse/ClickHouse/pull/2570)). -- Исправлено переключение на базу данных по умолчанию при повторном подключении клиента ([\#2583](https://github.com/ClickHouse/ClickHouse/pull/2583)). -- Исправлена ошибка, возникшая при появлении `use_index_for_in_with_subqueries` настройка была отключена. - -#### Исправление безопасности: {#security-fix-1} - -- Отправка файлов больше не возможна при подключении к MySQL (`LOAD DATA LOCAL INFILE`). - -### ClickHouse релиз 1.1.54388, 2018-06-28 {#clickhouse-release-1-1-54388-2018-06-28} - -#### Новые средства: {#new-features-12} - -- Поддержка для the `ALTER TABLE t DELETE WHERE` запрос для реплицированных таблиц. Добавил тот `system.mutations` таблица для отслеживания хода выполнения запросов этого типа. -- Поддержка для the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` запрос для таблиц \* MergeTree. -- Поддержка для the `TRUNCATE TABLE` запрос ([Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2260)) -- Несколько новый `SYSTEM` запросы к реплицируемым таблицам (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`). -- Добавлена возможность записи в таблицу с помощью движка MySQL и соответствующей табличной функции ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2294)). -- Добавил тот `url()` функция таблицы и `URL` настольный двигатель ([Александр Сапин](https://github.com/ClickHouse/ClickHouse/pull/2501)). -- Добавил тот `windowFunnel` статистическая функция ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2352)). -- Новый `startsWith` и `endsWith` функции для строк ([Вадим Плахтинский](https://github.com/ClickHouse/ClickHouse/pull/2429)). -- То `numbers()` функция таблицы теперь позволяет указать смещение ([Зимний Чжан](https://github.com/ClickHouse/ClickHouse/pull/2535)). -- Пароль к нему `clickhouse-client` может быть введен в интерактивном режиме. -- Теперь журналы сервера можно отправлять в системный журнал ([Александр Крашенинников](https://github.com/ClickHouse/ClickHouse/pull/2459)). -- Поддержка входа в словари с общим источником библиотеки ([Александр Сапин](https://github.com/ClickHouse/ClickHouse/pull/2472)). -- Поддержка пользовательских разделителей CSV ([Иван Жуков](https://github.com/ClickHouse/ClickHouse/pull/2263)) -- Добавил тот `date_time_input_format` установка. Если вы переключите этот параметр на `'best_effort'`, Значения DateTime будут считываться в широком диапазоне форматов. -- Добавил тот `clickhouse-obfuscator` утилита для запутывания данных. Пример использования: публикация данных, используемых в тестах производительности. - -#### Экспериментальная возможность: {#experimental-features-2} - -- Добавлена возможность расчета `and` аргументы только там, где они нужны ([Анастасия Царькова](https://github.com/ClickHouse/ClickHouse/pull/2272)) -- JIT компиляция в машинный код теперь доступна для некоторых выражений ([Плес](https://github.com/ClickHouse/ClickHouse/pull/2277)). - -#### Устранение ошибок: {#bug-fixes-20} - -- Дубликаты больше не появляются для запроса с `DISTINCT` и `ORDER BY`. -- Запросы с помощью `ARRAY JOIN` и `arrayFilter` больше не возвращайте неверный результат. -- Исправлена ошибка при чтении столбца массива из вложенной структуры ([\#2066](https://github.com/ClickHouse/ClickHouse/issues/2066)). -- Исправлена ошибка при анализе запросов с предложением HAVING, например `HAVING tuple IN (...)`. -- Исправлена ошибка при анализе запросов с рекурсивными псевдонимами. -- Исправлена ошибка при чтении из ReplacingMergeTree с условием в PREWHERE, которое фильтрует все строки ([\#2525](https://github.com/ClickHouse/ClickHouse/issues/2525)). -- Настройки профиля пользователя не применялись при использовании сеансов в интерфейсе HTTP. -- Исправлено применение настроек из параметров командной строки в clickhouse-local. -- Клиентская библиотека ZooKeeper теперь использует тайм-аут сеанса, полученный от сервера. -- Исправлена ошибка в клиентской библиотеке ZooKeeper, когда клиент ждал ответа сервера дольше, чем тайм-аут. -- Исправлена обрезка деталей для запросов с условиями по ключевым столбцам разделов ([\#2342](https://github.com/ClickHouse/ClickHouse/issues/2342)). -- Слияния теперь возможны после `CLEAR COLUMN IN PARTITION` ([\#2315](https://github.com/ClickHouse/ClickHouse/issues/2315)). -- Исправлено отображение типов в функции таблицы ODBC ([Санди-ли](https://github.com/ClickHouse/ClickHouse/pull/2268)). -- Сравнение типов было исправлено для `DateTime` с часовым поясом и без него ([Александр Бочаров](https://github.com/ClickHouse/ClickHouse/pull/2400)). -- Исправлен синтаксический разбор и форматирование текста `CAST` оператор. -- Исправлена вставка в материализованный вид для механизма распределенных таблиц ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2411)). -- Исправлено состояние гонки при записи данных из `Kafka` двигатель к материализованным представлениям ([Янкуань Лю](https://github.com/ClickHouse/ClickHouse/pull/2448)). -- Исправлена ошибка SSRF в функции таблицы remote (). -- Исправлено поведение выхода из системы `clickhouse-client` в многострочном режиме ([\#2510](https://github.com/ClickHouse/ClickHouse/issues/2510)). - -#### Улучшения: {#improvements-11} - -- Фоновые задачи в реплицированных таблицах теперь выполняются в пуле потоков, а не в отдельных потоках ([Сильвиу Развивается](https://github.com/ClickHouse/ClickHouse/pull/1722)). -- Улучшена производительность сжатия LZ4. -- Более быстрый анализ запросов с большим количеством соединений и подзапросов. -- Кэш DNS теперь обновляется автоматически, когда возникает слишком много сетевых ошибок. -- Вставка таблицы больше не происходит, если вставка в один из материализованных видов невозможна из-за слишком большого количества деталей. -- Исправлено несоответствие в счетчиках событий `Query`, `SelectQuery`, и `InsertQuery`. -- Такие выражения, как `tuple IN (SELECT tuple)` разрешены, если типы кортежей совпадают. -- Сервер с реплицированными таблицами может запуститься, даже если вы еще не настроили ZooKeeper. -- При расчете количества доступных ядер ЦП теперь учитываются ограничения на контрольные группы ([Атри Шарма](https://github.com/ClickHouse/ClickHouse/pull/2325)). -- Добавлено меню для конфигурации каталогов в файл systemd конфиг ([Михаил Ширяев](https://github.com/ClickHouse/ClickHouse/pull/2421)). - -#### Изменения в сборке: {#build-changes-4} - -- Компилятор gcc8 можно использовать для сборки. -- Добавлена возможность построения llvm из подмодуля. -- Версия библиотеки librdkafka была обновлена с v0.11.4. -- Добавлена возможность использования системной библиотеки libcpuid. Версия библиотеки была обновлена до версии 0.4.0. -- Исправлена сборка с использованием библиотеки vectorclass ([Babacar Diassé](https://github.com/ClickHouse/ClickHouse/pull/2274)). -- Cmake теперь генерирует файлы для ninja по умолчанию (например, при использовании `-G Ninja`). -- Добавлена возможность использовать библиотеку libtinfo вместо libtermcap ([Георгий Кондратьев](https://github.com/ClickHouse/ClickHouse/pull/2519)). -- Исправлен конфликт заголовочных файлов в Fedora Rawhide ([\#2520](https://github.com/ClickHouse/ClickHouse/issues/2520)). - -#### Назад несовместимые изменения: {#backward-incompatible-changes-7} - -- Удален побег в `Vertical` и `Pretty*` форматы и удалил `VerticalRaw` формат. -- Если серверы с версией 1.1.54388 (или более поздней) и серверы с более старой версией используются одновременно в распределенном запросе, то запрос имеет следующее значение: `cast(x, 'Type')` выражение лица без лица `AS` ключевое слово и не имеет этого слова `cast` в верхнем регистре исключение будет выдано с сообщением типа `Not found column cast(0, 'UInt8') in block`. Решение: обновите сервер на всем кластере. - -### ClickHouse релиз 1.1.54385, 2018-06-01 {#clickhouse-release-1-1-54385-2018-06-01} - -#### Устранение ошибок: {#bug-fixes-21} - -- Исправлена ошибка,которая в некоторых случаях приводила к блокировке операций ZooKeeper. - -### ClickHouse релиз 1.1.54383, 2018-05-22 {#clickhouse-release-1-1-54383-2018-05-22} - -#### Устранение ошибок: {#bug-fixes-22} - -- Исправлено замедление очереди репликации, если таблица содержит много реплик. - -### ClickHouse релиз 1.1.54381, 2018-05-14 {#clickhouse-release-1-1-54381-2018-05-14} - -#### Устранение ошибок: {#bug-fixes-23} - -- Исправлена утечка узлов в ZooKeeper, когда ClickHouse теряет соединение с сервером ZooKeeper. - -### ClickHouse релиз 1.1.54380, 2018-04-21 {#clickhouse-release-1-1-54380-2018-04-21} - -#### Новые средства: {#new-features-13} - -- Добавлена функция таблицы `file(path, format, structure)`. Пример чтения байтов из `/dev/urandom`: ``` ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10" ```. - -#### Улучшения: {#improvements-12} - -- Вложенные запросы могут быть обернуты в `()` скобки для повышения удобочитаемости запросов. Например: `(SELECT 1) UNION ALL (SELECT 1)`. -- Простой `SELECT` запросы от компании `system.processes` таблица не входит в состав `max_concurrent_queries` предел. - -#### Устранение ошибок: {#bug-fixes-24} - -- Исправлено некорректное поведение устройства `IN` оператор при выборе из `MATERIALIZED VIEW`. -- Исправлена некорректная фильтрация по индексу раздела в выражениях типа `partition_key_column IN (...)`. -- Исправлена невозможность выполнения `OPTIMIZE` запрос на реплику без лидера, если `REANAME` было исполнено на столе. -- Исправлена ошибка авторизации при выполнении `OPTIMIZE` или `ALTER` запросы к реплике, не являющейся лидером. -- Исправлено зависание `KILL QUERY`. -- Исправлена ошибка в клиентской библиотеке ZooKeeper, которая приводила к потере часов, замораживанию распределенной очереди DDL и замедлению работы очереди репликации, если она была непустой. `chroot` префикс используется в конфигурации ZooKeeper. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-8} - -- Удалена поддержка таких выражений как `(a, b) IN (SELECT (a, b))` (вы можете использовать эквивалентное выражение `(a, b) IN (SELECT a, b)`). В предыдущих выпусках эти выражения приводили к неопределенным `WHERE` фильтрация или вызванные ошибки. - -### ClickHouse релиз 1.1.54378, 2018-04-16 {#clickhouse-release-1-1-54378-2018-04-16} - -#### Новые средства: {#new-features-14} - -- Уровень ведения журнала можно изменить без перезагрузки сервера. -- Добавил тот `SHOW CREATE DATABASE` запрос. -- То `query_id` может быть передан в `clickhouse-client` (локтевой зал). -- Новая настройка: `max_network_bandwidth_for_all_users`. -- Добавлена поддержка для `ALTER TABLE ... PARTITION ...` для `MATERIALIZED VIEW`. -- Добавлена информация о размере частей данных в несжатом виде в системной таблице. -- Поддержка межсерверного шифрования распределенных таблиц (`1` в конфигурации реплики in ``). -- Конфигурация уровня таблицы для `ReplicatedMergeTree` семья для того, чтобы свести к минимуму объем данных, хранящихся в Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1` -- Конфигурация системы `clickhouse-client` срочный. По умолчанию имена серверов теперь выводятся в приглашение. Отображаемое имя сервера может быть изменено. Он также отправлен в США. `X-ClickHouse-Display-Name` Заголовок HTTP (Кирилл Шваков). -- Несколько разделенных запятыми `topics` может быть указан для `Kafka` двигатель (Тобиас Адамсон) -- Когда запрос остановлен `KILL QUERY` или `replace_running_query`, клиент получает `Query was canceled` исключение вместо неполного результата. - -#### Улучшения: {#improvements-13} - -- `ALTER TABLE ... DROP/DETACH PARTITION` запросы выполняются в передней части очереди репликации. -- `SELECT ... FINAL` и `OPTIMIZE ... FINAL` может использоваться даже в том случае, если таблица содержит одну часть данных. -- A `query_log` таблица воссоздается на лету, если она была удалена вручную (Кирилл Шваков). -- То `lengthUTF8` функция работает быстрее (zhang2014). -- Улучшенная производительность синхронных вставок в `Distributed` таблицы (`insert_distributed_sync = 1`) при наличии очень большого количества осколков. -- Сервер принимает следующее: `send_timeout` и `receive_timeout` настройки от клиента и применяет их при подключении к клиенту (они применяются в обратном порядке: сокет сервера `send_timeout` устанавливается в положение `receive_timeout` ценность, полученная от клиента, и наоборот). -- Более надежное аварийное восстановление для асинхронной вставки в систему `Distributed` таблицы. -- Возвращаемый тип объекта `countEqual` функция изменяется от `UInt32` к `UInt64` (谢磊). - -#### Устранение ошибок: {#bug-fixes-25} - -- Исправлена ошибка с помощью `IN` когда левая сторона выражения является `Nullable`. -- Правильные результаты теперь возвращаются при использовании кортежей с `IN` когда некоторые компоненты кортежа находятся в индексе таблицы. -- То `max_execution_time` limit теперь корректно работает с распределенными запросами. -- Исправлены ошибки при вычислении размера составных столбцов в системе `system.columns` стол. -- Исправлена ошибка при создании временной таблицы `CREATE TEMPORARY TABLE IF NOT EXISTS.` -- Исправлены ошибки в работе `StorageKafka` (\#\#2075) -- Исправлены сбои сервера из-за недопустимых аргументов некоторых агрегатных функций. -- Исправлена ошибка, которая помешала `DETACH DATABASE` запрос от остановки фоновых задач для `ReplicatedMergeTree` таблицы. -- `Too many parts` состояние с меньшей вероятностью произойдет при вставке в агрегированные материализованные представления (\#\#2084). -- Исправлена рекурсивная обработка подстановок в конфигурации, если за подстановкой должна следовать другая подстановка на том же уровне. -- Исправлен синтаксис в файле метаданных при создании `VIEW` это использует запрос с `UNION ALL`. -- `SummingMergeTree` теперь корректно работает суммирование вложенных структур данных с помощью составного ключа. -- Исправлена возможность возникновения расового состояния при выборе лидера для участия в гонке. `ReplicatedMergeTree` таблицы. - -#### Изменения в сборке: {#build-changes-5} - -- Сборка поддерживает `ninja` вместо `make` и использует `ninja` по умолчанию для построения релизов. -- Переименованные пакеты: `clickhouse-server-base` в `clickhouse-common-static`; `clickhouse-server-common` в `clickhouse-server`; `clickhouse-common-dbg` в `clickhouse-common-static-dbg`. Для установки используйте `clickhouse-server clickhouse-client`. Пакеты со старыми именами по-прежнему будут загружаться в репозитории для обеспечения обратной совместимости. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-9} - -- Удалена специальная интерпретация выражения IN, если массив указан с левой стороны. Ранее выражение `arr IN (set)` было истолковано как «at least one `arr` element belongs to the `set`». Чтобы получить такое же поведение в новой версии, напишите `arrayExists(x -> x IN (set), arr)`. -- Отключено неправильное использование опции сокета `SO_REUSEPORT`, который был неправильно включен по умолчанию в библиотеке Poco. Обратите внимание, что в Linux больше нет никаких причин одновременно указывать адреса `::` и `0.0.0.0` for listen – use just `::`, что позволяет прослушивать соединение как по IPv4, так и по IPv6 (с настройками конфигурации ядра по умолчанию). Вы также можете вернуться к поведению из предыдущих версий, указав `1` в конфигурации. - -### ClickHouse релиз 1.1.54370, 2018-03-16 {#clickhouse-release-1-1-54370-2018-03-16} - -#### Новые средства: {#new-features-15} - -- Добавил тот `system.macros` таблица и автоматическое обновление макросов при изменении конфигурационного файла. -- Добавил тот `SYSTEM RELOAD CONFIG` запрос. -- Добавил тот `maxIntersections(left_col, right_col)` агрегатная функция, возвращающая максимальное количество одновременно пересекающихся интервалов `[left; right]`. То `maxIntersectionsPosition(left, right)` функция возвращает начало строки «maximum» интервал. ([Майкл Фурмур](https://github.com/ClickHouse/ClickHouse/pull/2012)). - -#### Улучшения: {#improvements-14} - -- При вставке данных в `Replicated` таблица, меньше запросов делается к `ZooKeeper` (и большинство ошибок на уровне пользователя исчезли с экрана. `ZooKeeper` бревно). -- Добавлена возможность создавать псевдонимы для наборов данных. Пример: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`. - -#### Устранение ошибок: {#bug-fixes-26} - -- Исправлена ошибка `Illegal PREWHERE` ошибка при чтении из таблиц слияния для `Distributed`таблицы. -- Добавлены исправления, позволяющие запускать clickhouse-сервер в контейнерах Docker только для IPv4. -- Исправлено состояние гонки при считывании из системы `system.parts_columns tables.` -- Удалена двойная буферизация во время синхронной вставки в a `Distributed` таблица, которая могла бы вызвать тайм-аут соединения. -- Исправлена ошибка, приводившая к чрезмерно долгому ожиданию недоступной реплики перед началом работы. `SELECT` запрос. -- Исправлены неверные даты в программе `system.parts` стол. -- Исправлена ошибка, из-за которой невозможно было вставить данные в `Replicated` таблица если `chroot` был непустым в конфигурации системы. `ZooKeeper` скопление. -- Исправлен алгоритм вертикального слияния для пустого объекта `ORDER BY` стол. -- Восстановлена возможность использования словарей в запросах к удаленным таблицам, даже если эти словари отсутствуют на сервере-запросчике. Эта функциональность была потеряна в выпуске 1.1.54362. -- Восстановлено поведение для таких запросов, как `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` когда правая сторона `IN` следует использовать пульт дистанционного управления `default.table` а не какой-нибудь местный. Это поведение было нарушено в версии 1.1.54358. -- Удалено постороннее протоколирование уровня ошибок `Not found column ... in block`. - -### Clickhouse Релиз 1.1.54362, 2018-03-11 {#clickhouse-release-1-1-54362-2018-03-11} - -#### Новые средства: {#new-features-16} - -- Агрегация без `GROUP BY` для пустого набора (например, `SELECT count(*) FROM table WHERE 0`) теперь возвращает результат с одной строкой с нулевыми значениями для агрегатных функций, в соответствии со стандартом SQL. Чтобы восстановить старое поведение (вернуть пустой результат), установите `empty_result_for_aggregation_by_empty_set` до 1. -- Добавлено преобразование типов для `UNION ALL`. Здесь разрешены разные псевдонимы `SELECT` должности в `UNION ALL`, в соответствии со стандартом SQL. -- Произвольные выражения поддерживаются в `LIMIT BY` статьи. Ранее можно было использовать только столбцы, полученные в результате `SELECT`. -- Индекс из `MergeTree` таблицы используются, когда `IN` применяется к кортежу выражений из столбцов первичного ключа. Пример: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Анастасия Царькова). -- Добавил тот `clickhouse-copier` инструмент для копирования между кластерами и пересчета данных (бета-версия). -- Добавлены последовательные функции хэширования: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. Они могут быть использованы в качестве ключа сегментирования для уменьшения объема сетевого трафика во время последующих повторных сегментирования. -- Добавленные функции: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`. -- Добавил тот `arrayCumSum` функция (Хави Сантана). -- Добавил тот `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, и `parseDateTimeBestEffortOrNull` функции для чтения DateTime из строки, содержащей текст в широком спектре возможных форматов. -- Данные могут быть частично перезагружены из внешних словарей во время обновления (загружаются только те записи, в которых значение указанного поля больше, чем в предыдущей загрузке) (Арсен Акопян). -- Добавил тот `cluster` табличная функция. Пример: `cluster(cluster_name, db, table)`. То `remote` табличная функция может принять имя кластера в качестве первого аргумента, если оно указано в качестве идентификатора. -- То `remote` и `cluster` функции таблицы можно использовать в `INSERT` запросы. -- Добавил тот `create_table_query` и `engine_full` виртуальные столбцы для `system.tables`стол. То `metadata_modification_time` колонка виртуальная. -- Добавил тот `data_path` и `metadata_path` колонны до `system.tables`и`system.databases` таблицы, а также добавил `path` колонка к столу `system.parts` и `system.parts_columns` таблицы. -- Добавлена дополнительная информация о слияниях в системе `system.part_log` стол. -- Для этого можно использовать произвольный ключ секционирования. `system.query_log` стол (Кирилл Шваков). -- То `SHOW TABLES` запрос теперь также показывает временные таблицы. Добавлены временные таблицы и `is_temporary` столбец `system.tables` (zhang2014). -- Добавлен `DROP TEMPORARY TABLE` и `EXISTS TEMPORARY TABLE` запросы (zhang2014). -- Поддержка `SHOW CREATE TABLE` для временных таблиц (zhang2014). -- Добавил тот `system_profile` параметр конфигурации для параметров, используемых внутренними процессами. -- Поддержка для загрузки `object_id` в качестве атрибута `MongoDB` словари (Павел Литвиненко). -- Чтение `null` в качестве значения по умолчанию при загрузке данных для внешнего словаря с помощью `MongoDB` источник (Павел Литвиненко). -- Чтение `DateTime` значения в системе `Values` форматирование из временной метки Unix без одинарных кавычек. -- Отказоустойчивость поддерживается в `remote` табличные функции для случаев, когда некоторые реплики отсутствуют в запрашиваемой таблице. -- Параметры конфигурации могут быть переопределены в командной строке при запуске `clickhouse-server`. Пример: `clickhouse-server -- --logger.level=information`. -- Реализовано следующее `empty` функция от `FixedString` аргумент: функция возвращает 1, если строка полностью состоит из нулевых байтов (zhang2014). -- Добавил тот `listen_try`параметр конфигурации для прослушивания хотя бы одного из прослушиваемых адресов без выхода из системы, если некоторые адреса не могут быть прослушаны (полезно для систем с отключенной поддержкой IPv4 или IPv6). -- Добавил тот `VersionedCollapsingMergeTree` настольный двигатель. -- Поддержка строк и произвольных числовых типов для `library` источник словаря. -- `MergeTree` таблицы можно использовать и без первичного ключа (необходимо указать `ORDER BY tuple()`). -- A `Nullable` тип может быть `CAST` не-`Nullable` введите если аргумент не является таковым `NULL`. -- `RENAME TABLE` может быть выполнена для `VIEW`. -- Добавил тот `throwIf` функция. -- Добавил тот `odbc_default_field_size` опция, которая позволяет расширить максимальный размер значения, загруженного из источника ODBC (по умолчанию это 1024). -- То `system.processes` стол и `SHOW PROCESSLIST` теперь у вас есть `is_cancelled` и `peak_memory_usage` столбцы. - -#### Улучшения: {#improvements-15} - -- Ограничения и квоты на результат больше не применяются к промежуточным данным для `INSERT SELECT` запросы или для `SELECT` подзапросы. -- Меньше ложных срабатываний `force_restore_data` при проверке состояния `Replicated` таблицы при запуске сервера. -- Добавил тот `allow_distributed_ddl` вариант. -- Недетерминированные функции не допускаются в выражениях для `MergeTree` ключи от стола. -- Файлы с заменами из `config.d` каталоги загружаются в алфавитном порядке. -- Улучшенная производительность системы `arrayElement` функция в случае постоянного многомерного массива с пустым массивом в качестве одного из элементов. Пример: `[[1], []][x]`. -- Теперь сервер запускается быстрее при использовании конфигурационных файлов с очень большими заменами (например, очень большими списками IP-сетей). -- При выполнении запроса функции с табличным значением выполняются один раз. Ранее, `remote` и `mysql` функции с табличным значением дважды выполняли один и тот же запрос для получения структуры таблицы с удаленного сервера. -- То `MkDocs` используется генератор документации. -- При попытке удалить столбец таблицы, который `DEFAULT`/`MATERIALIZED` выражения других столбцов зависят от того, возникает ли исключение (zhang2014). -- Добавлена возможность разбирать пустую строку в текстовых форматах как число 0 для `Float` тип данных. Эта функция была ранее доступна, но была потеряна в выпуске 1.1.54342. -- `Enum` значения могут быть использованы в `min`, `max`, `sum` и некоторые другие функции. В этих случаях он использует соответствующие числовые значения. Эта функция была ранее доступна, но была потеряна в выпуске 1.1.54337. -- Добавлен `max_expanded_ast_elements` чтобы ограничить размер AST после рекурсивного расширения псевдонимов. - -#### Устранение ошибок: {#bug-fixes-27} - -- Исправлены случаи, когда ненужные столбцы были удалены из подзапросов по ошибке или не были удалены из подзапросов, содержащих `UNION ALL`. -- Исправлена ошибка в слияниях для `ReplacingMergeTree` таблицы. -- Исправлены синхронные вставки в `Distributed` таблицы (`insert_distributed_sync = 1`). -- Исправлена обработка выхода онлайн / оффлайн для определенного использования `FULL` и `RIGHT JOIN` с повторяющимися столбцами в подзапросах. -- Исправлена ошибка segfault для некоторых видов использования `replace_running_query` и `KILL QUERY`. -- Исправлен порядок следования `source` и `last_exception` колонны в центре города `system.dictionaries` стол. -- Исправлена ошибка, когда `DROP DATABASE` запрос не удалил файл с метаданными. -- Исправлена ошибка `DROP DATABASE` запрос для `Dictionary` база данных. -- Исправлена низкая точность `uniqHLL12` и `uniqCombined` функции для кардинальностей, превышающих 100 миллионов единиц (Алексей Бочаров). -- Исправлено вычисление неявных значений по умолчанию при необходимости одновременного вычисления явных выражений по умолчанию в `INSERT` запросы (zhang2014). -- Исправлен редкий случай, когда запрос к a `MergeTree` стол не смог закончить (chenxing-xc). -- Исправлена ошибка, возникшая при запуске программы `CHECK` запрос для `Distributed` таблицы, если все осколки являются локальными (chenxing.xc). -- Исправлена небольшая регрессия производительности с функциями, использующими регулярные выражения. -- Исправлена регрессия производительности при создании многомерных массивов из сложных выражений. -- Исправлена ошибка, которая могла привести к дополнительному `FORMAT` раздел, который будет отображаться в `.sql` файл с метаданными. -- Исправлена ошибка, которая вызвала `max_table_size_to_drop` ограничение для применения при попытке удалить a `MATERIALIZED VIEW` глядя на явно заданную таблицу. -- Исправлена несовместимость со старыми клиентами (старые клиенты иногда отправляли данные вместе со старыми клиентами). `DateTime('timezone')` типа, которого они не понимают). -- Исправлена ошибка при чтении `Nested` элементы столбцов структур, которые были добавлены с помощью `ALTER` но это пусто для старых разделов, когда условия для этих столбцов переместились в `PREWHERE`. -- Исправлена ошибка при фильтрации таблиц по виртуальным `_table` столбцы в запросах к `Merge` таблицы. -- Исправлена ошибка при использовании `ALIAS` колонны внутри `Distributed` таблицы. -- Исправлена ошибка, которая делала невозможной динамическую компиляцию запросов с агрегатными функциями из `quantile` семья. -- Исправлено условие гонки в конвейере выполнения запросов, которое возникало в очень редких случаях при использовании `Merge` таблицы с большим количеством таблиц, а при использовании `GLOBAL` подзапросы. -- Исправлена ошибка при передаче массивов разных размеров в `arrayReduce` функция при использовании агрегатных функций из нескольких аргументов. -- Запрещено использование запросов с помощью `UNION ALL` в `MATERIALIZED VIEW`. -- Исправлена ошибка при инициализации программы. `part_log` системная таблица при запуске сервера (по умолчанию, `part_log` отключен). - -#### Назад несовместимые изменения: {#backward-incompatible-changes-10} - -- Удалил то `distributed_ddl_allow_replicated_alter` вариант. Это поведение включено по умолчанию. -- Удалил то `strict_insert_defaults` установка. Если вы использовали эту функцию, напишите нам `clickhouse-feedback@yandex-team.com`. -- Удалил то `UnsortedMergeTree` двигатель. - -### Clickhouse Релиз 1.1.54343, 2018-02-05 {#clickhouse-release-1-1-54343-2018-02-05} - -- Добавлена поддержка макросов для определения имен кластеров в распределенных DDL запросах и конструкторах распределенных таблиц: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`. -- Теперь такие запросы, как `SELECT ... FROM table WHERE expr IN (subquery)` обрабатываются с помощью `table` индекс. -- Улучшена обработка дубликатов при вставке в реплицируемые таблицы, поэтому они больше не замедляют выполнение очереди репликации. - -### Clickhouse Релиз 1.1.54342, 2018-01-22 {#clickhouse-release-1-1-54342-2018-01-22} - -Этот выпуск содержит исправления ошибок для предыдущей версии 1.1.54337: - -- Исправлена регрессия в 1.1.54337: если пользователь по умолчанию имеет доступ только для чтения, то сервер отказывается запускаться с сообщением `Cannot create database in readonly mode`. -- Исправлена регрессия в 1.1.54337: в системах с systemd журналы всегда записываются в syslog независимо от конфигурации; сценарий watchdog все еще использует init.д. -- Исправлена регрессия в 1.1.54337: неправильная конфигурация по умолчанию в образе Docker. -- Исправлено недетерминированное поведение GraphiteMergeTree (вы можете увидеть его в сообщениях журнала `Data after merge is not byte-identical to the data on another replicas`). -- Исправлена ошибка, которая могла привести к несогласованным слияниям после оптимизации запроса к Реплицируемым таблицам (вы можете увидеть это в сообщениях журнала `Part ... intersects the previous part`). -- Буферные таблицы теперь работают правильно, когда материализованные столбцы присутствуют в целевой таблице (по zhang2014). -- Исправлена ошибка в реализации NULL. - -### Clickhouse Релиз 1.1.54337, 2018-01-18 {#clickhouse-release-1-1-54337-2018-01-18} - -#### Новые средства: {#new-features-17} - -- Добавлена поддержка хранения многомерных массивов и кортежей (`Tuple` тип данных) в таблицах. -- Поддержка функций таблицы для `DESCRIBE` и `INSERT` запросы. Добавлена поддержка вложенных запросов в `DESCRIBE`. Примеры: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Поддержка `INSERT INTO TABLE` в дополнение к `INSERT INTO`. -- Улучшена поддержка часовых поясов. То `DateTime` тип данных может быть аннотирован с помощью часового пояса, который используется для синтаксического анализа и форматирования в текстовых форматах. Пример: `DateTime('Europe/Moscow')`. Когда часовые пояса указаны в функциях для `DateTime` аргументы, возвращаемый тип будет отслеживать часовой пояс, и значение будет отображаться, как и ожидалось. -- Добавлены функции `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. То `toRelativeHour`/`Minute`/`Second` функции могут принимать значение типа `Date` в качестве аргумента. То `now` имя функции чувствительно к регистру. -- Добавил тот `toStartOfFifteenMinutes` функция (Кирилл Шваков). -- Добавил тот `clickhouse format` инструмент для форматирования запросов. -- Добавил тот `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` формат. Файлы схемы могут быть расположены только в указанном каталоге. -- Добавлена поддержка подстановок конфигураций (`incl` и `conf.d`) для настройки внешних словарей и моделей (Павел Якунин). -- Добавлена колонка с документацией для `system.settings` стол (Кирилл Шваков). -- Добавил тот `system.parts_columns` таблица с информацией о размерах столбцов в каждой части данных `MergeTree` таблицы. -- Добавил тот `system.models` таблица с информацией о загруженных данных `CatBoost` модели машинного обучения. -- Добавил тот `mysql` и `odbc` таблица функций и соответствующих `MySQL` и `ODBC` табличные движки для доступа к удаленным базам данных. Эта функциональность находится в стадии бета-тестирования. -- Добавлена возможность передачи аргумента типа `AggregateFunction` для `groupArray` агрегатная функция (таким образом, вы можете создать массив состояний некоторой агрегатной функции). -- Сняты ограничения на различные комбинации комбинаторов агрегатных функций. Например, вы можете использовать `avgForEachIf` так же как `avgIfForEach` агрегатные функции, которые имеют различное поведение. -- То `-ForEach` комбинатор агрегатных функций расширен для случая агрегатных функций с несколькими аргументами. -- Добавлена поддержка агрегатных функций `Nullable` аргументы даже в тех случаях, когда функция возвращает не --`Nullable` результат (добавлено с вкладом Сильвиу Карагеа). Пример: `groupArray`, `groupUniqArray`, `topK`. -- Добавил тот `max_client_network_bandwidth` для `clickhouse-client` (Кирилл Шваков). -- Пользователи с помощью `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT…) (Kirill Shvakov). -- Добавлена поддержка использования нескольких потребителей с помощью `Kafka` двигатель. Расширенные параметры конфигурации для `Kafka` (Marek Vavruša). -- Добавил тот `intExp3` и `intExp4` функции. -- Добавил тот `sumKahan` статистическая функция. -- Добавлены функции to \* Number\* OrNull, где \* Number\* - это числовой тип. -- Добавлена поддержка для `WITH` положения для `INSERT SELECT` запрос (автор: zhang2014). -- Добавлены настройки: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. В частности, эти параметры используются для загрузки частей данных для репликации. Изменение этих параметров позволяет ускорить отработку отказа при перегрузке сети. -- Добавлена поддержка для `ALTER` для таблиц типа `Null` (Анастасия Царькова). -- То `reinterpretAsString` функция расширена для всех типов данных, которые хранятся последовательно в памяти. -- Добавил тот `--silent` вариант для самого `clickhouse-local` инструмент. Он подавляет печать информации о выполнении запроса в stderr. -- Добавлена поддержка считывания значений типа `Date` из текста в формате, где месяц и / или день месяца указывается с использованием одной цифры вместо двух цифр (Amos Bird). - -#### Оптимизация производительности: {#performance-optimizations} - -- Улучшена производительность агрегатных функций `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` из строковых аргументов. -- Улучшенная производительность функций `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`. -- Улучшена производительность синтаксического анализа и форматирования `Date` и `DateTime` введите значения в текстовом формате. -- Улучшена производительность и точность синтаксического анализа чисел с плавающей запятой. -- Пониженное использование памяти для `JOIN` в том случае, когда левая и правая части имеют столбцы с одинаковыми именами, которые не содержатся в `USING` . -- Улучшена производительность агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` за счет снижения вычислительной стабильности. Старые функции доступны под названиями `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`. - -#### Устранение ошибок: {#bug-fixes-28} - -- Исправлена дедупликация данных после запуска a `DROP` или `DETACH PARTITION` запрос. В предыдущей версии удаление раздела и вставка тех же данных снова не работали, потому что вставленные блоки считались дубликатами. -- Исправлена ошибка, которая могла привести к неправильной интерпретации текста. `WHERE` пунктом `CREATE MATERIALIZED VIEW` запросы с помощью `POPULATE` . -- Исправлена ошибка в использовании `root_path` параметр в поле `zookeeper_servers` конфигурация. -- Исправлены непредвиденные результаты прохождения теста `Date` аргумент в пользу `toStartOfDay` . -- Исправлена ошибка `addMonths` и `subtractMonths` функции и арифметика для `INTERVAL n MONTH` в тех случаях, когда результат имеет предыдущий год. -- Добавлена отсутствующая поддержка для `UUID` тип данных для `DISTINCT` , `JOIN` , и `uniq` агрегатные функции и внешние словари (Евгений Иванов). Поддержка `UUID` он все еще не завершен. -- Исправлено `SummingMergeTree` поведение в тех случаях, когда строки суммируются до нуля. -- Различные исправления для `Kafka` engine (Marek Vavruša). -- Исправлено некорректное поведение устройства `Join` настольный движок (птица Амос). -- Исправлено некорректное поведение распределителя под FreeBSD и OS X. -- То `extractAll` функция теперь поддерживает пустые матчи. -- Исправлена ошибка, которая блокировала использование `libressl` вместо `openssl` . -- Исправлена ошибка `CREATE TABLE AS SELECT` запрос из временных таблиц. -- Исправлена неатомность обновления очереди репликации. Это может привести к тому, что реплики будут несинхронизированы до тех пор, пока сервер не перезагрузится. -- Исправлено возможное переполнение внутри `gcd` , `lcm` и `modulo` (`%` оператор) (Макс Скороход). -- `-preprocessed` файлы теперь создаются после изменения `umask` (`umask` можно изменить в конфигурации). -- Исправлена ошибка в фоновой проверке деталей (`MergeTreePartChecker` ) при использовании пользовательского ключа раздела. -- Исправлен разбор кортежей (значения `Tuple` тип данных) в текстовых форматах. -- Улучшены сообщения об ошибках о несовместимых типах, передаваемых в `multiIf` , `array` и некоторые другие функции. -- Переработанная поддержка для `Nullable` типы. Исправлены ошибки, которые могут привести к сбою сервера. Исправлены почти все другие ошибки, связанные с `NULL` поддержка: некорректное преобразование типов в вставьте выберите, недостаточная поддержка значения NULL в наличии и PREWHERE, `join_use_nulls` режим, типы, допускающие значения NULL в качестве аргументов `OR` оператор и т. д. -- Исправлены различные ошибки, связанные с внутренней семантикой типов данных. Примеры: ненужное суммирование `Enum` поля, тип в `SummingMergeTree` ; выравнивание `Enum` напечатать `Pretty` форматы и т. д. -- Более строгие проверки допустимых комбинаций составных столбцов. -- Исправлено переполнение при указании очень большого параметра для `FixedString` тип данных. -- Исправлена ошибка в системе `topK` агрегатная функция в общем случае. -- Добавлена недостающая проверка на равенство размеров массива в аргументах n-арных вариантов агрегатных функций с АНА - `-Array` комбинатор. -- Исправлена ошибка в работе `--pager` для `clickhouse-client` (автор: кс1322). -- Исправлена точность установки `exp10` функция. -- Исправлено поведение объекта `visitParamExtract` функция для лучшего соответствия документации. -- Исправлена ошибка при указании неверных типов данных. -- Исправлено поведение `DISTINCT` в том случае, когда все столбцы являются константами. -- Исправлено форматирование запроса в случае использования `tupleElement` функция со сложным постоянным выражением в качестве индекса элемента кортежа. -- Исправлена ошибка в работе `Dictionary` таблицы для `range_hashed` словари. -- Исправлена ошибка, приводившая к избыточным строкам в результате `FULL` и `RIGHT JOIN` (Эймос Берд). -- Исправлен сбой сервера при создании и удалении временных файлов в системе `config.d` каталоги во время перезагрузки конфигурации. -- Исправлена ошибка `SYSTEM DROP DNS CACHE` запрос: Кэш был очищен, но адреса узлов кластера не были обновлены. -- Исправлено поведение `MATERIALIZED VIEW` после выполнения `DETACH TABLE` for the table under the view (Marek Vavruša). - -#### Улучшения сборки: {#build-improvements-4} - -- То `pbuilder` инструмент используется для сборки. Процесс сборки практически полностью независим от среды узла сборки. -- Одна сборка используется для разных версий ОС. Пакеты и двоичные файлы были сделаны совместимыми с широким спектром систем Linux. -- Добавил тот `clickhouse-test` пакет. Он может быть использован для выполнения функциональных тестов. -- Исходный тарбол теперь можно опубликовать в репозитории. Он может быть использован для воспроизведения сборки без использования GitHub. -- Добавлена ограниченная интеграция с Travis CI. Из-за ограничений на время сборки в Travis тестируется только отладочная сборка и выполняется ограниченное подмножество тестов. -- Добавлена поддержка для `Cap'n'Proto` в сборке по умолчанию. -- Изменен формат источников документации с `Restricted Text` к `Markdown`. -- Добавлена поддержка для `systemd` (Владимир Смирнов). Он отключен по умолчанию из-за несовместимости с некоторыми образами ОС и может быть включен вручную. -- Для динамической генерации кода, `clang` и `lld` они встроены в систему `clickhouse` двоичный. Они также могут быть вызваны как `clickhouse clang` и `clickhouse lld` . -- Удалено использование расширений GNU из кода. Включил эту функцию `-Wextra` вариант. При строительстве с помощью `clang` значение по умолчанию равно `libc++` вместо `libstdc++`. -- Извлеченный `clickhouse_parsers` и `clickhouse_common_io` библиотеки для ускорения сборки различных инструментов. - -#### Назад несовместимые изменения: {#backward-incompatible-changes-11} - -- Формат для отметок в `Log` введите таблицы, которые содержат `Nullable` колонны были изменены обратно несовместимым образом. Если у вас есть эти таблицы, вы должны преобразовать их в следующие: `TinyLog` введите текст перед запуском новой версии сервера. Чтобы сделать это, замените `ENGINE = Log` с `ENGINE = TinyLog` в соответствующем разделе `.sql` файл в папке `metadata` каталог. Если ваш стол не имеет `Nullable` столбцы или если тип вашей таблицы не указан `Log`- тогда вам ничего не нужно делать. -- Удалил то `experimental_allow_extended_storage_definition_syntax` установка. Теперь эта функция включена по умолчанию. -- То `runningIncome` функция была переименована в `runningDifferenceStartingWithFirstvalue` избежать недоразумений. -- Удалил то `FROM ARRAY JOIN arr` синтаксис, когда соединение массива задается непосредственно после FROM без таблицы (Amos Bird). -- Удалил то `BlockTabSeparated` формат, который использовался исключительно в демонстрационных целях. -- Изменен формат состояния для агрегатных функций `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Если вы сохранили состояния этих агрегатных функций в таблицах (с помощью `AggregateFunction` тип данных или материализованные представления с соответствующими состояниями), пожалуйста, напишите нам clickhouse-feedback@yandex-team.com-да. -- В предыдущих версиях сервера существовала недокументированная функция: если агрегатная функция зависит от параметров, то вы все равно можете указать ее без параметров в типе данных AggregateFunction. Пример: `AggregateFunction(quantiles, UInt64)` вместо `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. Эта особенность была утеряна. Хотя он был недокументирован, мы планируем снова поддержать его в будущих выпусках. -- Типы данных Enum не могут использоваться в агрегатных функциях min/max. Эта способность будет возвращена в следующем выпуске. - -#### Пожалуйста, обратите внимание при обновлении: {#please-note-when-upgrading} - -- При выполнении скользящего обновления в кластере в тот момент, когда некоторые реплики работают под управлением старой версии ClickHouse, а некоторые-под управлением новой версии, репликация временно прекращается и появляется сообщение `unknown parameter 'shard'` появляется в журнале регистрации. Репликация будет продолжена после обновления всех реплик кластера. -- Если на серверах кластера запущены разные версии ClickHouse, то вполне возможно, что распределенные запросы, использующие следующие функции, будут иметь неверные результаты: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. Вы должны обновить все узлы кластера. - -## [Список изменений на 2017 год](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2017.md) {#changelog-for-2017} diff --git a/docs/ru/whats_new/changelog/2018.md b/docs/ru/whats_new/changelog/2018.md new file mode 120000 index 00000000000..675c07e8bbb --- /dev/null +++ b/docs/ru/whats_new/changelog/2018.md @@ -0,0 +1 @@ +en/whats_new/changelog/2018.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2019.md b/docs/ru/whats_new/changelog/2019.md deleted file mode 100644 index ea5bffd74c9..00000000000 --- a/docs/ru/whats_new/changelog/2019.md +++ /dev/null @@ -1,2072 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -## Релиз ClickHouse в19.17 {#clickhouse-release-v19-17} - -### Релиз ClickHouse в19.17.6.36, 2019-12-27 {#clickhouse-release-v19-17-6-36-2019-12-27} - -#### Исправление ошибок {#bug-fix} - -- Исправлено потенциальное переполнение буфера при распаковке. Злонамеренный пользователь может передавать сфабрикованные сжатые данные,которые могут вызвать чтение после буфера. Эту проблему обнаружил Эльдар Зайтов из команды информационной безопасности Яндекса. [\#8404](https://github.com/ClickHouse/ClickHouse/pull/8404) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена возможная ошибка сервера (`std::terminate`) когда сервер не может отправлять или записывать данные в формате JSON или XML со значениями строкового типа данных (которые требуют проверки UTF-8) или при сжатии результирующих данных с помощью алгоритма Brotli или в некоторых других редких случаях. [\#8384](https://github.com/ClickHouse/ClickHouse/pull/8384) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлены словари с исходным кодом из clickhouse `VIEW`, теперь чтение таких словарей не вызывает ошибки `There is no query`. [\#8351](https://github.com/ClickHouse/ClickHouse/pull/8351) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена проверка, разрешен ли клиентский хост с помощью host\_regexp, указанного в users.XML. [\#8241](https://github.com/ClickHouse/ClickHouse/pull/8241), [\#8342](https://github.com/ClickHouse/ClickHouse/pull/8342) ([Виталий Баранов](https://github.com/vitlibar)) -- `RENAME TABLE` для распределенной таблицы теперь переименовывается папка, содержащая вставленные данные перед отправкой в сегменты. Это исправляет проблему с последовательными переименованиями `tableA->tableB`, `tableC->tableA`. [\#8306](https://github.com/ClickHouse/ClickHouse/pull/8306) ([тавплубикс](https://github.com/tavplubix)) -- `range_hashed` внешние словари, созданные запросами DDL, теперь допускают диапазоны произвольных числовых типов. [\#8275](https://github.com/ClickHouse/ClickHouse/pull/8275) ([алесапин](https://github.com/alesapin)) -- Исправлено `INSERT INTO table SELECT ... FROM mysql(...)` табличная функция. [\#8234](https://github.com/ClickHouse/ClickHouse/pull/8234) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена обработка выхода онлайн / оффлайн в `INSERT INTO TABLE FUNCTION file()` при вставке в файл, который не существует. Теперь в этом случае файл будет создан, а затем вставка будет обработана. [\#8177](https://github.com/ClickHouse/ClickHouse/pull/8177) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправлена ошибка bitmapAnd при пересечении агрегированного растрового изображения и скалярного растрового изображения. [\#8082](https://github.com/ClickHouse/ClickHouse/pull/8082) ([Юе Хуанг](https://github.com/moon03432)) -- Исправлена обработка выхода онлайн / оффлайн, когда `EXISTS` запрос был использован без `TABLE` или `DICTIONARY` квалификатор, совсем как `EXISTS t`. [\#8213](https://github.com/ClickHouse/ClickHouse/pull/8213) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированный тип возврата для функций `rand` и `randConstant` в случае ничтожного аргумента. Теперь функции всегда возвращаются `UInt32` и никогда `Nullable(UInt32)`. [\#8204](https://github.com/ClickHouse/ClickHouse/pull/8204) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлено `DROP DICTIONARY IF EXISTS db.dict`, теперь он не бросает исключение, если `db` его просто не существует. [\#8185](https://github.com/ClickHouse/ClickHouse/pull/8185) ([Виталий Баранов](https://github.com/vitlibar)) -- Если таблица не была полностью удалена из-за сбоя сервера, сервер попытается восстановить и загрузить ее [\#8176](https://github.com/ClickHouse/ClickHouse/pull/8176) ([тавплубикс](https://github.com/tavplubix)) -- Исправлен тривиальный запрос count для распределенной таблицы, если существует более двух локальных таблиц shard. [\#8164](https://github.com/ClickHouse/ClickHouse/pull/8164) ([小路](https://github.com/nicelulu)) -- Исправлена ошибка, приводившая к гонке данных в DB:: BlockStreamProfileInfo:: calculateRowsBeforeLimit() [\#8143](https://github.com/ClickHouse/ClickHouse/pull/8143) ([Александр казаков](https://github.com/Akazz)) -- Исправлено `ALTER table MOVE part` выполняется сразу же после слияния указанной детали, что может привести к перемещению детали, в которую данная деталь была объединена. Теперь он правильно перемещает указанную деталь. [\#8104](https://github.com/ClickHouse/ClickHouse/pull/8104) ([Владимир Чеботарев](https://github.com/excitoon)) -- Теперь выражения для словарей можно задавать в виде строк. Это полезно для вычисления атрибутов при извлечении данных из источников, отличных от ClickHouse, поскольку позволяет использовать синтаксис, отличающийся от ClickHouse, для этих выражений. [\#8098](https://github.com/ClickHouse/ClickHouse/pull/8098) ([алесапин](https://github.com/alesapin)) -- Исправлена очень редкая гонка в `clickhouse-copier` из-за переполнения в ZXid. [\#8088](https://github.com/ClickHouse/ClickHouse/pull/8088) ([Дин Сян Фэй](https://github.com/dingxiangfei2009)) -- Исправлена ошибка, когда после неудачного запроса (из-за «Too many simultaneous queries» например) он не будет читать информацию о внешних таблицах, а также - следующий запрос будет интерпретировать эту информацию как начало следующего запроса, вызывающего ошибку типа `Unknown packet from client`. [\#8084](https://github.com/ClickHouse/ClickHouse/pull/8084) ([Азат Хужин](https://github.com/azat)) -- Избежать разыменования null после «Unknown packet X from server» [\#8071](https://github.com/ClickHouse/ClickHouse/pull/8071) ([Азат Хужин](https://github.com/azat)) -- Восстановите поддержку всех локалей ICU, добавьте возможность применять параметры сортировки для постоянных выражений и добавьте имя языка в систему.таблица сортировки. [\#8051](https://github.com/ClickHouse/ClickHouse/pull/8051) ([алесапин](https://github.com/alesapin)) -- Количество потоков для чтения из `StorageFile` и `StorageHDFS` теперь он ограничен, чтобы не превысить лимит памяти. [\#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([алесапин](https://github.com/alesapin)) -- Исправлено `CHECK TABLE` запрос для `*MergeTree` таблицы без ключа. [\#7979](https://github.com/ClickHouse/ClickHouse/pull/7979) ([алесапин](https://github.com/alesapin)) -- Удалил номер мутации из имени детали на тот случай, если мутаций не было. Это удаление улучшило совместимость с более старыми версиями. [\#8250](https://github.com/ClickHouse/ClickHouse/pull/8250) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка, что мутации пропускаются для некоторых присоединенных частей из-за их data\_version больше, чем версия мутации таблицы. [\#7812](https://github.com/ClickHouse/ClickHouse/pull/7812) ([Zhichang Ю](https://github.com/yuzhichang)) -- Разрешить запуск сервера с избыточными копиями деталей после их перемещения на другое устройство. [\#7810](https://github.com/ClickHouse/ClickHouse/pull/7810) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена ошибка «Sizes of columns doesn’t match» это может появиться при использовании столбцов агрегатной функции. [\#7790](https://github.com/ClickHouse/ClickHouse/pull/7790) ([Борис Гранво](https://github.com/bgranvea)) -- Теперь исключение будет сделано в случае использования с привязками рядом с LIMIT BY. И теперь его можно использовать сверху предел. [\#7637](https://github.com/ClickHouse/ClickHouse/pull/7637) ([Никита Михайлов](https://github.com/nikitamikhaylov)) -- Исправьте перезагрузку словаря, если она есть `invalidate_query`, который остановил обновления и некоторые исключения при предыдущих попытках обновления. [\#8029](https://github.com/ClickHouse/ClickHouse/pull/8029) ([алесапин](https://github.com/alesapin)) - -### Релиз ClickHouse в19.17.4.11, 2019-11-22 {#clickhouse-release-v19-17-4-11-2019-11-22} - -#### Назад Несовместимые Изменения {#backward-incompatible-change} - -- Использование столбца вместо AST для хранения скалярных результатов подзапросов для повышения производительности. Установка `enable_scalar_subquery_optimization` был добавлен в 19.17, и он был включен по умолчанию. Это приводит к таким ошибкам, как [этот](https://github.com/ClickHouse/ClickHouse/issues/7851) во время обновления до 19.17.2 или 19.17.3 с предыдущих версий. Этот параметр был отключен по умолчанию в 19.17.4, чтобы сделать возможным обновление с 19.16 и более старых версий без ошибок. [\#7392](https://github.com/ClickHouse/ClickHouse/pull/7392) ([Амос Птица](https://github.com/amosbird)) - -#### Новая функция {#new-feature} - -- Добавьте возможность создавать словари с запросами DDL. [\#7360](https://github.com/ClickHouse/ClickHouse/pull/7360) ([алесапин](https://github.com/alesapin)) -- Сделай `bloom_filter` тип поддержки индекса `LowCardinality` и `Nullable` [\#7363](https://github.com/ClickHouse/ClickHouse/issues/7363) [\#7561](https://github.com/ClickHouse/ClickHouse/pull/7561) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Функция add `isValidJSON` чтобы проверить, что переданная строка является допустимым json. [\#5910](https://github.com/ClickHouse/ClickHouse/issues/5910) [\#7293](https://github.com/ClickHouse/ClickHouse/pull/7293) ([Вдимир](https://github.com/Vdimir)) -- Осуществлять `arrayCompact` функция [\#7328](https://github.com/ClickHouse/ClickHouse/pull/7328) ([Меморандум](https://github.com/Joeywzr)) -- Созданная функция `hex` для десятичных чисел. Это работает так `hex(reinterpretAsString())`, но не удаляет последние нулевые байты. [\#7355](https://github.com/ClickHouse/ClickHouse/pull/7355) ([Михаил Коротов](https://github.com/millb)) -- Добавь `arrayFill` и `arrayReverseFill` функции, которые заменяют элементы другими элементами спереди/сзади от них в массиве. [\#7380](https://github.com/ClickHouse/ClickHouse/pull/7380) ([hcz](https://github.com/hczhcz)) -- Добавь `CRC32IEEE()`/`CRC64()` поддержка [\#7480](https://github.com/ClickHouse/ClickHouse/pull/7480) ([Азат Хужин](https://github.com/azat)) -- Осуществлять `char` функция, аналогичная одной в [в MySQL](https://dev.mysql.com/doc/refman/8.0/en/string-functions.html#function_char) [\#7486](https://github.com/ClickHouse/ClickHouse/pull/7486) ([сундили](https://github.com/sundy-li)) -- Добавь `bitmapTransform` функция. Он преобразует массив значений в растровом изображении в другой массив значений, в результате чего получается новое растровое изображение [\#7598](https://github.com/ClickHouse/ClickHouse/pull/7598) ([Zhichang Ю](https://github.com/yuzhichang)) -- Реализованный `javaHashUTF16LE()` функция [\#7651](https://github.com/ClickHouse/ClickHouse/pull/7651) ([ачимбаб](https://github.com/achimbab)) -- Добавь `_shard_num` виртуальный столбец для распределенного движка [\#7624](https://github.com/ClickHouse/ClickHouse/pull/7624) ([Азат Хужин](https://github.com/azat)) - -#### Экспериментальная возможность {#experimental-feature} - -- Поддержка процессоров (новый конвейер выполнения запросов) в `MergeTree`. [\#7181](https://github.com/ClickHouse/ClickHouse/pull/7181) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -#### Исправление ошибок {#bug-fix-1} - -- Исправить неправильный парсинг float в `Values` [\#7817](https://github.com/ClickHouse/ClickHouse/issues/7817) [\#7870](https://github.com/ClickHouse/ClickHouse/pull/7870) ([тавплубикс](https://github.com/tavplubix)) -- Исправьте редкий тупик, который может произойти, когда trace\_log включен. [\#7838](https://github.com/ClickHouse/ClickHouse/pull/7838) ([Филимонов](https://github.com/filimonov)) -- Предотвратите дублирование сообщений при создании таблицы Кафки, в которой есть любой MVs, выбирающий из нее [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Иван](https://github.com/abyss7)) -- Поддержка `Array(LowCardinality(Nullable(String)))` в `IN`. Разрешает [\#7364](https://github.com/ClickHouse/ClickHouse/issues/7364) [\#7366](https://github.com/ClickHouse/ClickHouse/pull/7366) ([ачимбаб](https://github.com/achimbab)) -- Добавить обработку данных `SQL_TINYINT` и `SQL_BIGINT`, и исправьте обработку `SQL_FLOAT` типы источников данных в ODBC мост. [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Денис Глазачев](https://github.com/traceon)) -- Исправить агрегации (`avg` и квантили) над пустыми десятичными столбцами [\#7431](https://github.com/ClickHouse/ClickHouse/pull/7431) ([Андрей Коняев](https://github.com/akonyaev90)) -- Чинить `INSERT` в распределенный с `MATERIALIZED` столбцы [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Азат Хужин](https://github.com/azat)) -- Сделай `MOVE PARTITION` работайте, если некоторые части раздела уже находятся на целевом диске или томе [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена ошибка, из-за которой жесткие ссылки не создавались во время мутаций в `ReplicatedMergeTree` в конфигурациях с несколькими дисками. [\#7558](https://github.com/ClickHouse/ClickHouse/pull/7558) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена ошибка с мутацией на MergeTree, когда вся часть остается неизменной, а лучшее место находится на другом диске [\#7602](https://github.com/ClickHouse/ClickHouse/pull/7602) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена ошибка с `keep_free_space_ratio` не считывается с конфигурации дисков [\#7645](https://github.com/ClickHouse/ClickHouse/pull/7645) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена ошибка с таблицей содержит только `Tuple` столбцы или столбцы со сложными путями. Исправления [7541](https://github.com/ClickHouse/ClickHouse/issues/7541). [\#7545](https://github.com/ClickHouse/ClickHouse/pull/7545) ([алесапин](https://github.com/alesapin)) -- Не учитывайте память для буферного движка в ограничении max\_memory\_usage [\#7552](https://github.com/ClickHouse/ClickHouse/pull/7552) ([Азат Хужин](https://github.com/azat)) -- Исправлена окончательная отметка использования в `MergeTree` таблицы, заказанные по `tuple()`. В редких случаях это может привести к тому, что `Can't adjust last granule` ошибка при выборе. [\#7639](https://github.com/ClickHouse/ClickHouse/pull/7639) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена ошибка в мутациях, которые имеют предикат с действиями, требующими контекста (например, функции для json), что может привести к сбоям или странным исключениям. [\#7664](https://github.com/ClickHouse/ClickHouse/pull/7664) ([алесапин](https://github.com/alesapin)) -- Исправлено несоответствие экранирования имен баз данных и таблиц `data/` и `shadow/` справочники [\#7575](https://github.com/ClickHouse/ClickHouse/pull/7575) ([Александр Бурмак](https://github.com/Alex-Burmak)) -- Support duplicated keys in RIGHT\|FULL JOINs, e.g. `ON t.x = u.x AND t.x = u.y`. Исправьте сбой в этом случае. [\#7586](https://github.com/ClickHouse/ClickHouse/pull/7586) ([Артем Зуйков](https://github.com/4ertus2)) -- Чинить `Not found column in block` при соединении по выражению с правым или полным соединением. [\#7641](https://github.com/ClickHouse/ClickHouse/pull/7641) ([Артем Зуйков](https://github.com/4ertus2)) -- Еще одна попытка исправить бесконечный цикл в `PrettySpace` формат [\#7591](https://github.com/ClickHouse/ClickHouse/pull/7591) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправлена ошибка в работе `concat` функция, когда все аргументы были `FixedString` такого же размера. [\#7635](https://github.com/ClickHouse/ClickHouse/pull/7635) ([алесапин](https://github.com/alesapin)) -- Исправлено исключение в случае использования 1 аргумента при определении хранилищ S3, URL и HDFS. [\#7618](https://github.com/ClickHouse/ClickHouse/pull/7618) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправлена область действия InterpreterSelectQuery для представлений с запросом [\#7601](https://github.com/ClickHouse/ClickHouse/pull/7601) ([Азат Хужин](https://github.com/azat)) - -#### Улучшение {#improvement} - -- `Nullable` столбцы признал и NULL-значения будут корректно обработаны в ODBC-мост [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Василий Немков](https://github.com/Enmk)) -- Напишите текущий пакет для распределенной отправки атомарно [\#7600](https://github.com/ClickHouse/ClickHouse/pull/7600) ([Азат Хужин](https://github.com/azat)) -- Вызовите исключение, если мы не можем обнаружить таблицу для имени столбца в запросе. [\#7358](https://github.com/ClickHouse/ClickHouse/pull/7358) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавь `merge_max_block_size` установка `MergeTreeSettings` [\#7412](https://github.com/ClickHouse/ClickHouse/pull/7412) ([Артем Зуйков](https://github.com/4ertus2)) -- Запросы с помощью `HAVING` и без него `GROUP BY` предположим, что группа по константе. Так, `SELECT 1 HAVING 1` теперь возвращает результат. [\#7496](https://github.com/ClickHouse/ClickHouse/pull/7496) ([Амос Птица](https://github.com/amosbird)) -- Поддержка синтаксического анализа `(X,)` как Кортеж похож на python. [\#7501](https://github.com/ClickHouse/ClickHouse/pull/7501), [\#7562](https://github.com/ClickHouse/ClickHouse/pull/7562) ([Амос Птица](https://github.com/amosbird)) -- Сделай `range` функциональное поведение почти как у питона. [\#7518](https://github.com/ClickHouse/ClickHouse/pull/7518) ([сундили](https://github.com/sundy-li)) -- Добавь `constraints` столбцы в таблицу `system.settings` [\#7553](https://github.com/ClickHouse/ClickHouse/pull/7553) ([Виталий Баранов](https://github.com/vitlibar)) -- Лучший нулевой формат для обработчика tcp, так что его можно использовать `select ignore() from table format Null` для измерения производительности через clickhouse-клиент [\#7606](https://github.com/ClickHouse/ClickHouse/pull/7606) ([Амос Птица](https://github.com/amosbird)) -- Такие запросы, как `CREATE TABLE ... AS (SELECT (1, 2))` разбираются правильно [\#7542](https://github.com/ClickHouse/ClickHouse/pull/7542) ([hcz](https://github.com/hczhcz)) - -#### Улучшение производительности {#performance-improvement} - -- Улучшена производительность агрегирования по коротким строковым ключам. [\#6243](https://github.com/ClickHouse/ClickHouse/pull/6243) ([Александр Кузьменков](https://github.com/akuzm), [Амос Птица](https://github.com/amosbird)) -- Выполните еще один проход синтаксического анализа / анализа выражений, чтобы получить потенциальную оптимизацию после того, как постоянные предикаты будут свернуты. [\#7497](https://github.com/ClickHouse/ClickHouse/pull/7497) ([Амос Птица](https://github.com/amosbird)) -- Использовать для хранения мета-информации, чтобы оценить тривиально `SELECT count() FROM table;` [\#7510](https://github.com/ClickHouse/ClickHouse/pull/7510) ([Амос Птица](https://github.com/amosbird), [Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Векторизация обработки `arrayReduce` аналогично агрегатору `addBatch`. [\#7608](https://github.com/ClickHouse/ClickHouse/pull/7608) ([Амос Птица](https://github.com/amosbird)) -- Незначительные улучшения в производительности `Kafka` потребление [\#7475](https://github.com/ClickHouse/ClickHouse/pull/7475) ([Иван](https://github.com/abyss7)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement} - -- Добавьте поддержку кросс-компиляции в архитектуру процессора AARCH64. Сценарий рефакторинга упаковщика. [\#7370](https://github.com/ClickHouse/ClickHouse/pull/7370) [\#7539](https://github.com/ClickHouse/ClickHouse/pull/7539) ([Иван](https://github.com/abyss7)) -- Распакуйте цепочки инструментов darwin-x86\_64 и linux-aarch64 в смонтированный том Docker при сборке пакетов [\#7534](https://github.com/ClickHouse/ClickHouse/pull/7534) ([Иван](https://github.com/abyss7)) -- Обновление образа Docker для двоичного упаковщика [\#7474](https://github.com/ClickHouse/ClickHouse/pull/7474) ([Иван](https://github.com/abyss7)) -- Исправлены ошибки компиляции на MacOS Catalina [\#7585](https://github.com/ClickHouse/ClickHouse/pull/7585) ([Эрнест Полетаев](https://github.com/ernestp)) -- Некоторые рефакторинги в логике анализа запросов: разделение сложного класса на несколько простых. [\#7454](https://github.com/ClickHouse/ClickHouse/pull/7454) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена сборка без подмодулей [\#7295](https://github.com/ClickHouse/ClickHouse/pull/7295) ([proller](https://github.com/proller)) -- Лучше `add_globs` в файлах CMake [\#7418](https://github.com/ClickHouse/ClickHouse/pull/7418) ([Амос Птица](https://github.com/amosbird)) -- Удалить жестко закодированные пути в `unwind` цель [\#7460](https://github.com/ClickHouse/ClickHouse/pull/7460) ([Константин Подшумок](https://github.com/podshumok)) -- Разрешить использовать формат mysql без ssl [\#7524](https://github.com/ClickHouse/ClickHouse/pull/7524) ([proller](https://github.com/proller)) - -#### Другой {#other} - -- Добавлена грамматика ANTLR4 для диалекта ClickHouse SQL [\#7595](https://github.com/ClickHouse/ClickHouse/issues/7595) [\#7596](https://github.com/ClickHouse/ClickHouse/pull/7596) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -## Релиз ClickHouse в19.16 {#clickhouse-release-v19-16} - -#### Релиз Clickhouse в19.16.14.65, 2020-03-25 {#clickhouse-release-v19-16-14-65-2020-03-25} - -- Исправлена ошибка в пакетных вычислениях тернарных логических операций по нескольким аргументам (более 10). [\#8718](https://github.com/ClickHouse/ClickHouse/pull/8718) ([Александр казаков](https://github.com/Akazz)) Это исправление было возвращено в версию 19.16 по специальному запросу Altinity. - -#### Релиз Clickhouse в19.16.14.65, 2020-03-05 {#clickhouse-release-v19-16-14-65-2020-03-05} - -- Исправлена несовместимость распределенных подзапросов с более старыми версиями CH. Исправления [\#7851](https://github.com/ClickHouse/ClickHouse/issues/7851) - [(tabplubix)](https://github.com/tavplubix) -- При выполнении `CREATE` запрос, сложите постоянные выражения в аргументах механизма хранения. Замените пустое имя базы данных текущей базой данных. Исправления [\#6508](https://github.com/ClickHouse/ClickHouse/issues/6508), [\#3492](https://github.com/ClickHouse/ClickHouse/issues/3492). Также исправлена проверка наличия локального адреса в системе `ClickHouseDictionarySource`. - [\#9262](https://github.com/ClickHouse/ClickHouse/pull/9262) [(tabplubix)](https://github.com/tavplubix) -- Теперь фон сливается воедино `*MergeTree` семейство движков таблиц более точно сохраняет порядок объема политики хранения. - [\#8549](https://github.com/ClickHouse/ClickHouse/pull/8549) ([Владимир Чеботарев](https://github.com/excitoon)) -- Предотвращение потери данных в `Kafka` в редких случаях, когда исключение происходит после чтения суффикса, но до фиксации. Исправления [\#9378](https://github.com/ClickHouse/ClickHouse/issues/9378). Связанный: [\#7175](https://github.com/ClickHouse/ClickHouse/issues/7175) - [\#9507](https://github.com/ClickHouse/ClickHouse/pull/9507) [(Филимонов)](https://github.com/filimonov) -- Исправлена ошибка, приводящая к завершению работы сервера при попытке использовать / drop `Kafka` таблица создана с неверными параметрами. Исправления [\#9494](https://github.com/ClickHouse/ClickHouse/issues/9494). Включает [\#9507](https://github.com/ClickHouse/ClickHouse/issues/9507). - [\#9513](https://github.com/ClickHouse/ClickHouse/pull/9513) [(Филимонов)](https://github.com/filimonov) -- Разрешить использование `MaterializedView` с подзапросами выше `Kafka` таблицы. - [\#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([Филимонов](https://github.com/filimonov)) - -#### Новая функция {#new-feature-1} - -- Добавь `deduplicate_blocks_in_dependent_materialized_views` возможность управления поведением идемпотентных вставок в таблицы с материализованными представлениями. Эта новая функция была добавлена в релиз исправления ошибок по специальному запросу от Altinity. - [\#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(урыхи)](https://github.com/urykhy) - -### Релиз ClickHouse в19.16.2.2, 2019-10-30 {#clickhouse-release-v19-16-2-2-2019-10-30} - -#### Назад Несовместимые Изменения {#backward-incompatible-change-1} - -- Добавьте недостающую проверку arity для count/countif. - [\#7095](https://github.com/ClickHouse/ClickHouse/issues/7095) - [\#7298](https://github.com/ClickHouse/ClickHouse/pull/7298) ([Вдимир](https://github.com/Vdimir)) -- Удаление устаревших `asterisk_left_columns_only` настройка (по умолчанию она была отключена). - [\#7335](https://github.com/ClickHouse/ClickHouse/pull/7335) ([Артем - Зуйков](https://github.com/4ertus2)) -- Строки формата для формата данных шаблона теперь задаются в файлах. - [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([тавплубикс](https://github.com/tavplubix)) - -#### Новая функция {#new-feature-2} - -- Введите uniqCombined64 () для вычисления мощности, большей, чем UINT\_MAX. - [\#7213](https://github.com/ClickHouse/ClickHouse/pull/7213), - [\#7222](https://github.com/ClickHouse/ClickHouse/pull/7222) ([Азат - Хужин](https://github.com/azat)) -- Поддержка индексов Bloom filter для столбцов массива. - [\#6984](https://github.com/ClickHouse/ClickHouse/pull/6984) - ([ачимбаб](https://github.com/achimbab)) -- Добавление функции `getMacro(name)` это возвращает строку со значением соответствующего `` - из конфигурации сервера. [\#7240](https://github.com/ClickHouse/ClickHouse/pull/7240) - ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Установите два параметра конфигурации для словаря, основанного на источнике HTTP: `credentials` и - `http-headers`. [\#7092](https://github.com/ClickHouse/ClickHouse/pull/7092) ([Гийом - Тассери](https://github.com/YiuRULE)) -- Добавьте новый ProfileEvent `Merge` это подсчитывает количество запущенных фоновых слияний. - [\#7093](https://github.com/ClickHouse/ClickHouse/pull/7093) ([Михаил - Коротов](https://github.com/millb)) -- Добавьте функцию fullHostName, которая возвращает полное доменное имя. - [\#7263](https://github.com/ClickHouse/ClickHouse/issues/7263) - [\#7291](https://github.com/ClickHouse/ClickHouse/pull/7291) ([сундили](https://github.com/sundy-li)) -- Функция add `arraySplit` и `arrayReverseSplit` которые разделяют массив на «cut off» - условия. Они полезны при обработке временных последовательностей. - [\#7294](https://github.com/ClickHouse/ClickHouse/pull/7294) ([hcz](https://github.com/hczhcz)) -- Добавьте новые функции, возвращающие массив всех сопоставленных индексов в семействе функций multiMatch. - [\#7299](https://github.com/ClickHouse/ClickHouse/pull/7299) ([Данила - Кутенин](https://github.com/danlark1)) -- Добавление нового компонента Database engine `Lazy` то есть оптимизирован для хранения большого количества мелких логов - таблицы. [\#7171](https://github.com/ClickHouse/ClickHouse/pull/7171) ([Никита - Васильев](https://github.com/nikvas0)) -- Добавьте агрегатные функции groupBitmapAnd, - Or, - Xor для растровых столбцов. [\#7109](https://github.com/ClickHouse/ClickHouse/pull/7109) ([Чжичан - Ю](https://github.com/yuzhichang)) -- Добавьте комбинаторы агрегатных функций-OrNull и-OrDefault, которые возвращают значение null - или значения по умолчанию, когда агрегировать нечего. - [\#7331](https://github.com/ClickHouse/ClickHouse/pull/7331) - ([hcz](https://github.com/hczhcz)) -- Представьте пользовательский разделенный формат данных, который поддерживает пользовательское экранирование и - правила разграничения. [\#7118](https://github.com/ClickHouse/ClickHouse/pull/7118) - ([тавплубикс](https://github.com/tavplubix)) -- Поддержка Redis в качестве источника внешнего словаря. [\#4361](https://github.com/ClickHouse/ClickHouse/pull/4361) [\#6962](https://github.com/ClickHouse/ClickHouse/pull/6962) ([comunodi](https://github.com/comunodi), [Антон - Попов](https://github.com/CurtizJ)) - -#### Исправление ошибок {#bug-fix-2} - -- Исправьте неправильный результат запроса, если он есть `WHERE IN (SELECT ...)` раздел и `optimize_read_in_order` является - использованный. [\#7371](https://github.com/ClickHouse/ClickHouse/pull/7371) ([Антон - Попов](https://github.com/CurtizJ)) -- Отключен плагин аутентификации MariaDB, который зависит от файлов вне проекта. - [\#7140](https://github.com/ClickHouse/ClickHouse/pull/7140) ([Юрий Владимирович - Баранов](https://github.com/yurriy)) -- Исправить исключение `Cannot convert column ... because it is constant but values of constants are different in source and result` что редко может произойти, когда функции `now()`, `today()`, - `yesterday()`, `randConstant()` не использовать. - [\#7156](https://github.com/ClickHouse/ClickHouse/pull/7156) ([Николай - Кочетов](https://github.com/KochetovNicolai)) -- Исправлена проблема с использованием HTTP, оставьте в живых тайм-аут вместо TCP оставить в живых тайм-аут. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Василий - Немков](https://github.com/Enmk)) -- Исправлена ошибка сегментации в groupBitmapOr (проблема [\#7109](https://github.com/ClickHouse/ClickHouse/issues/7109)). - [\#7289](https://github.com/ClickHouse/ClickHouse/pull/7289) ([Чжичан - Ю](https://github.com/yuzhichang)) -- Для материализованных представлений фиксация для Кафки вызывается после того, как все данные были записаны. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Иван](https://github.com/abyss7)) -- Исправлена ошибка `duration_ms` значение в `system.part_log` стол. Это было в десять раз хуже. - [\#7172](https://github.com/ClickHouse/ClickHouse/pull/7172) ([Владимир - Чеботарев](https://github.com/excitoon)) -- Быстрое исправление для устранения сбоя в таблице LIVE VIEW и повторного включения всех тестов LIVE VIEW. - [\#7201](https://github.com/ClickHouse/ClickHouse/pull/7201) - ([взаказников](https://github.com/vzakaznikov)) -- Правильно сериализовать значение NULL значений в мин/макс показатели MergeTree части. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Александр - Кузьменков](https://github.com/akuzm)) -- Не ставьте виртуальные столбцы .метаданные sql при создании таблицы в виде `CREATE TABLE AS`. - [\#7183](https://github.com/ClickHouse/ClickHouse/pull/7183) ([Иван](https://github.com/abyss7)) -- Исправлена ошибка сегментации в `ATTACH PART` запрос. - [\#7185](https://github.com/ClickHouse/ClickHouse/pull/7185) - ([алесапин](https://github.com/alesapin)) -- Исправьте неправильный результат для некоторых запросов, задаваемых оптимизацией empty в подзапросах и empty - INNER/RIGHT JOIN. [\#7284](https://github.com/ClickHouse/ClickHouse/pull/7284) ([Николай - Кочетов](https://github.com/KochetovNicolai)) -- Исправление ошибок в системах живой вид getHeader() метод. - [\#7271](https://github.com/ClickHouse/ClickHouse/pull/7271) - ([взаказников](https://github.com/vzakaznikov)) - -#### Улучшение {#improvement-1} - -- Добавьте сообщение в случае ожидания queue\_wait\_max\_ms. - [\#7390](https://github.com/ClickHouse/ClickHouse/pull/7390) ([Азат - Хужин](https://github.com/azat)) -- Выполнена установка `s3_min_upload_part_size` уровень таблицы. - [\#7059](https://github.com/ClickHouse/ClickHouse/pull/7059) ([Владимир - Чеботарев](https://github.com/excitoon)) -- Проверьте TTL в StorageFactory. [\#7304](https://github.com/ClickHouse/ClickHouse/pull/7304) - ([сундили](https://github.com/sundy-li)) -- Сквош левых блоков в частичном объединении слиянием (оптимизация). - [\#7122](https://github.com/ClickHouse/ClickHouse/pull/7122) ([Артем - Зуйков](https://github.com/4ertus2)) -- Не допускайте недетерминированных функций в мутациях реплицируемых движков таблиц, поскольку это - может привести к несогласованности между репликами. - [\#7247](https://github.com/ClickHouse/ClickHouse/pull/7247) ([Александр - Казаков](https://github.com/Akazz)) -- Отключите отслеживание памяти при преобразовании трассировки стека исключений в строку. Это может предотвратить потерю - сообщений об ошибках типа `Memory limit exceeded` на сервере, который вызвал `Attempt to read after eof` исключение для клиента. [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) - ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Различные улучшения формата. Разрешает - [\#6033](https://github.com/ClickHouse/ClickHouse/issues/6033), - [\#2633](https://github.com/ClickHouse/ClickHouse/issues/2633), - [\#6611](https://github.com/ClickHouse/ClickHouse/issues/6611), - [\#6742](https://github.com/ClickHouse/ClickHouse/issues/6742) - [\#7215](https://github.com/ClickHouse/ClickHouse/pull/7215) - ([тавплубикс](https://github.com/tavplubix)) -- ClickHouse игнорирует значения в правой части оператора IN, которые не могут быть преобразованы в левую - side type. Make it work properly for compound types – Array and Tuple. - [\#7283](https://github.com/ClickHouse/ClickHouse/pull/7283) ([Александр - Кузьменков](https://github.com/akuzm)) -- Поддержка отсутствует Неравенство для следующих присоединиться. Можно объединить менее-или-равный вариант и строгий - больше и меньше вариантов для столбцов, следующих на синтаксис. - [\#7282](https://github.com/ClickHouse/ClickHouse/pull/7282) ([Артем - Зуйков](https://github.com/4ertus2)) -- Оптимизируйте частичное объединение слиянием. [\#7070](https://github.com/ClickHouse/ClickHouse/pull/7070) - ([Артем Зуйков](https://github.com/4ertus2)) -- Не используйте больше, чем 98К памяти в функции uniqCombined. - [\#7236](https://github.com/ClickHouse/ClickHouse/pull/7236), - [\#7270](https://github.com/ClickHouse/ClickHouse/pull/7270) ([Азат - Хужин](https://github.com/azat)) -- Промыть части правой соединительной таблицы на диске в PartialMergeJoin (если их недостаточно - память). Загружайте данные обратно, когда это необходимо. [\#7186](https://github.com/ClickHouse/ClickHouse/pull/7186) - ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшение производительности {#performance-improvement-1} - -- Ускорьте joinGet с аргументами const, избегая дублирования данных. - [\#7359](https://github.com/ClickHouse/ClickHouse/pull/7359) ([Амос - Птица](https://github.com/amosbird)) -- Возвращайтесь раньше, если подзапрос пуст. - [\#7007](https://github.com/ClickHouse/ClickHouse/pull/7007) ([小路](https://github.com/nicelulu)) -- Оптимизируйте синтаксический анализ SQL-выражения в значениях. - [\#6781](https://github.com/ClickHouse/ClickHouse/pull/6781) - ([тавплубикс](https://github.com/tavplubix)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-1} - -- Отключите некоторые вклады для кросс-компиляции в Mac OS. - [\#7101](https://github.com/ClickHouse/ClickHouse/pull/7101) ([Иван](https://github.com/abyss7)) -- Добавьте недостающую ссылку с PocoXML для clickhouse\_common\_io. - [\#7200](https://github.com/ClickHouse/ClickHouse/pull/7200) ([Азат - Хужин](https://github.com/azat)) -- Примите несколько аргументов тестового фильтра в clickhouse-test. - [\#7226](https://github.com/ClickHouse/ClickHouse/pull/7226) ([Александр - Кузьменков](https://github.com/akuzm)) -- Включите musl и jemalloc для ARM. [\#7300](https://github.com/ClickHouse/ClickHouse/pull/7300) - ([Амос Птица](https://github.com/amosbird)) -- Добавлен `--client-option` параметр to `clickhouse-test` чтобы передать клиенту дополнительные параметры. - [\#7277](https://github.com/ClickHouse/ClickHouse/pull/7277) ([Николай - Кочетов](https://github.com/KochetovNicolai)) -- Сохраните существующие конфигурации при обновлении пакета rpm. - [\#7103](https://github.com/ClickHouse/ClickHouse/pull/7103) - ([Филимонов](https://github.com/filimonov)) -- Исправление ошибок, обнаруженных ПВС. [\#7153](https://github.com/ClickHouse/ClickHouse/pull/7153) ([Артем - Зуйков](https://github.com/4ertus2)) -- Исправьте сборку для Дарвина. [\#7149](https://github.com/ClickHouse/ClickHouse/pull/7149) - ([Иван](https://github.com/abyss7)) -- совместимость с glibc 2.29. [\#7142](https://github.com/ClickHouse/ClickHouse/pull/7142) ([Амос - Птица](https://github.com/amosbird)) -- Убедитесь, что dh\_clean не касается потенциальных исходных файлов. - [\#7205](https://github.com/ClickHouse/ClickHouse/pull/7205) ([Амос - Птица](https://github.com/amosbird)) -- Попытка избежать конфликта при обновлении с altinity rpm - он имеет конфигурационный файл, упакованный отдельно - в clickhouse-server-common. [\#7073](https://github.com/ClickHouse/ClickHouse/pull/7073) - ([Филимонов](https://github.com/filimonov)) -- Оптимизируйте некоторые заголовочные файлы для более быстрого восстановления. - [\#7212](https://github.com/ClickHouse/ClickHouse/pull/7212), - [\#7231](https://github.com/ClickHouse/ClickHouse/pull/7231) ([Александр - Кузьменков](https://github.com/akuzm)) -- Добавьте тесты производительности для Date и DateTime. [\#7332](https://github.com/ClickHouse/ClickHouse/pull/7332) ([Василий - Немков](https://github.com/Enmk)) -- Исправьте некоторые тесты, которые содержали недетерминированные мутации. - [\#7132](https://github.com/ClickHouse/ClickHouse/pull/7132) ([Александр - Казаков](https://github.com/Akazz)) -- Добавьте сборку с MemorySanitizer в CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) - ([Александр Кузьменков](https://github.com/akuzm)) -- Избегайте использования неинициализированных значений в MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Азат - Хужин](https://github.com/azat)) -- Исправьте некоторые проблемы в полях, найденных MemorySanitizer. - [\#7135](https://github.com/ClickHouse/ClickHouse/pull/7135), - [\#7179](https://github.com/ClickHouse/ClickHouse/pull/7179) ([Александр - Кузьменков](https://github.com/akuzm)), [\#7376](https://github.com/ClickHouse/ClickHouse/pull/7376) - ([Амос Птица](https://github.com/amosbird)) -- Исправьте неопределенное поведение в murmurhash32. [\#7388](https://github.com/ClickHouse/ClickHouse/pull/7388) ([Амос - Птица](https://github.com/amosbird)) -- Исправьте неопределенное поведение в StoragesInfoStream. [\#7384](https://github.com/ClickHouse/ClickHouse/pull/7384) - ([тавплубикс](https://github.com/tavplubix)) -- Исправлено сворачивание постоянных выражений для внешних движков баз данных (MySQL, ODBC, JDBC). В предыдущих случаях - версии он не работал для нескольких постоянных выражений и вообще не работал для даты, - Дата-время и UUID. Это исправление [\#7245](https://github.com/ClickHouse/ClickHouse/issues/7245) - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) - ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправление ошибки гонки данных ThreadSanitizer в режиме реального времени при обращении к переменной no\_users\_thread. - [\#7353](https://github.com/ClickHouse/ClickHouse/pull/7353) - ([взаказников](https://github.com/vzakaznikov)) -- Избавьтесь от символов malloc в libcommon - [\#7134](https://github.com/ClickHouse/ClickHouse/pull/7134), - [\#7065](https://github.com/ClickHouse/ClickHouse/pull/7065) ([Амос - Птица](https://github.com/amosbird)) -- Добавьте глобальный флаг ENABLE\_LIBRARIES для отключения всех библиотек. - [\#7063](https://github.com/ClickHouse/ClickHouse/pull/7063) - ([proller](https://github.com/proller)) - -#### Очистка кода {#code-cleanup} - -- Обобщите репозиторий конфигурации для подготовки к DDL для словарей. [\#7155](https://github.com/ClickHouse/ClickHouse/pull/7155) - ([алесапин](https://github.com/alesapin)) -- Парсер для словарей DDL без всякой семантики. - [\#7209](https://github.com/ClickHouse/ClickHouse/pull/7209) - ([алесапин](https://github.com/alesapin)) -- Разделите ParserCreateQuery на различные более мелкие Парсеры. - [\#7253](https://github.com/ClickHouse/ClickHouse/pull/7253) - ([алесапин](https://github.com/alesapin)) -- Небольшой рефакторинг и переименование рядом с внешними словарями. - [\#7111](https://github.com/ClickHouse/ClickHouse/pull/7111) - ([алесапин](https://github.com/alesapin)) -- Рефакторинг некоторого кода для подготовки к управлению доступом на основе ролей. [\#7235](https://github.com/ClickHouse/ClickHouse/pull/7235) ([Виталий - Баранов](https://github.com/vitlibar)) -- Некоторые улучшения в коде DatabaseOrdinary. - [\#7086](https://github.com/ClickHouse/ClickHouse/pull/7086) ([Никита - Васильев](https://github.com/nikvas0)) -- Не используйте итераторы в методах find () и emplace () хэш-таблиц. - [\#7026](https://github.com/ClickHouse/ClickHouse/pull/7026) ([Александр - Кузьменков](https://github.com/akuzm)) -- Исправьте getMultipleValuesFromConfig в случае, если корень параметра не пуст. [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) - ([Михаил Коротов](https://github.com/millb)) -- Удалите часть copy-paste (TemporaryFile и TemporaryFileStream) - [\#7166](https://github.com/ClickHouse/ClickHouse/pull/7166) ([Артем - Зуйков](https://github.com/4ertus2)) -- Немного улучшена читаемость кода (`MergeTreeData::getActiveContainingPart`). - [\#7361](https://github.com/ClickHouse/ClickHouse/pull/7361) ([Владимир - Чеботарев](https://github.com/excitoon)) -- Дождитесь всех запланированных заданий, которые используют локальные объекты, если `ThreadPool::schedule(...)` бросает - исключение. Переименовать `ThreadPool::schedule(...)` к `ThreadPool::scheduleOrThrowOnError(...)` и - исправьте комментарии, чтобы сделать очевидным, что он может бросить. - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) - ([тавплубикс](https://github.com/tavplubix)) - -## ClickHouse релиз 19.15 {#clickhouse-release-19-15} - -### ClickHouse релиз 19.15.4.10, 2019-10-31 {#clickhouse-release-19-15-4-10-2019-10-31} - -#### Исправление ошибок {#bug-fix-3} - -- Добавлена обработка SQL\_TINYINT и SQL\_BIGINT, а также исправлена обработка типов источников данных SQL\_FLOAT в Мосте ODBC. - [\#7491](https://github.com/ClickHouse/ClickHouse/pull/7491) ([Денис Глазачев](https://github.com/traceon)) -- Разрешается иметь некоторые части на целевом диске или Томе в разделе перемещения. - [\#7434](https://github.com/ClickHouse/ClickHouse/pull/7434) ([Владимир Чеботарев](https://github.com/excitoon)) -- Фиксированное значение NULL-значений в столбцы, допускающие значения null через ODBC-мост. - [\#7402](https://github.com/ClickHouse/ClickHouse/pull/7402) ([Василий Немков](https://github.com/Enmk)) -- Исправлена вставка в распределенный нелокальный узел с материализованными столбцами. - [\#7377](https://github.com/ClickHouse/ClickHouse/pull/7377) ([Азат Хужин](https://github.com/azat)) -- Исправлена функция getMultipleValuesFromConfig. - [\#7374](https://github.com/ClickHouse/ClickHouse/pull/7374) ([Михаил Коротов](https://github.com/millb)) -- Исправлена проблема с использованием HTTP, оставьте в живых тайм-аут вместо TCP оставить в живых тайм-аут. - [\#7351](https://github.com/ClickHouse/ClickHouse/pull/7351) ([Василий Немков](https://github.com/Enmk)) -- Дождитесь завершения всех заданий по исключению (исправлены редкие сегменты). - [\#7350](https://github.com/ClickHouse/ClickHouse/pull/7350) ([тавплубикс](https://github.com/tavplubix)) -- Не нажимайте на MVs при вставке в таблицу Кафки. - [\#7265](https://github.com/ClickHouse/ClickHouse/pull/7265) ([Иван](https://github.com/abyss7)) -- Отключите отслеживание памяти для стека исключений. - [\#7264](https://github.com/ClickHouse/ClickHouse/pull/7264) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлен неверный код при преобразовании запроса для внешней базы данных. - [\#7252](https://github.com/ClickHouse/ClickHouse/pull/7252) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте использования неинициализированных значений в MetricsTransmitter. - [\#7158](https://github.com/ClickHouse/ClickHouse/pull/7158) ([Азат Хужин](https://github.com/azat)) -- Добавлен пример конфигурации с макросами для тестов ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.15.3.6, 2019-10-09 {#clickhouse-release-19-15-3-6-2019-10-09} - -#### Исправление ошибок {#bug-fix-4} - -- Исправлена ошибка bad\_variant в хэшированном словаре. - ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка с ошибкой сегментации в запросе ATTACH PART. - ([алесапин](https://github.com/alesapin)) -- Расчет фиксированного времени в `MergeTreeData`. - ([Владимир Чеботарев](https://github.com/excitoon)) -- Посвятите себя Кафке явно после того, как написание будет завершено. - [\#7175](https://github.com/ClickHouse/ClickHouse/pull/7175) ([Иван](https://github.com/abyss7)) -- Правильно сериализовать значение NULL значений в мин/макс показатели MergeTree части. - [\#7234](https://github.com/ClickHouse/ClickHouse/pull/7234) ([Александр Кузьменков](https://github.com/akuzm)) - -### ClickHouse релиз 19.15.2.2, 2019-10-01 {#clickhouse-release-19-15-2-2-2019-10-01} - -#### Новая функция {#new-feature-3} - -- Многоуровневое хранение: поддержка использования нескольких томов хранения для таблиц с движком MergeTree. Можно хранить свежие данные на SSD и автоматически перемещать старые данные на жесткий диск. ([пример](https://clickhouse.github.io/clickhouse-presentations/meetup30/new_features/#12)). [\#4918](https://github.com/ClickHouse/ClickHouse/pull/4918) ([Игр](https://github.com/ObjatieGroba)) [\#6489](https://github.com/ClickHouse/ClickHouse/pull/6489) ([алесапин](https://github.com/alesapin)) -- Добавить функцию таблицы `input` для считывания входящих данных в `INSERT SELECT` запрос. [\#5450](https://github.com/ClickHouse/ClickHouse/pull/5450) ([паласоник1](https://github.com/palasonic1)) [\#6832](https://github.com/ClickHouse/ClickHouse/pull/6832) ([Антон Попов](https://github.com/CurtizJ)) -- Добавить а `sparse_hashed` компоновка словаря, которая функционально эквивалентна `hashed` макет, но более эффективен для работы с памятью. Он использует примерно в два раза меньше памяти за счет более медленного извлечения значений. [\#6894](https://github.com/ClickHouse/ClickHouse/pull/6894) ([Азат Хужин](https://github.com/azat)) -- Реализована возможность определения списка пользователей для доступа к словарям. Используется только текущая подключенная база данных. [\#6907](https://github.com/ClickHouse/ClickHouse/pull/6907) ([Гийом Тассери](https://github.com/YiuRULE)) -- Добавь `LIMIT` опцион на `SHOW` запрос. [\#6944](https://github.com/ClickHouse/ClickHouse/pull/6944) ([Филипп Мальковский](https://github.com/malkfilipp)) -- Добавь `bitmapSubsetLimit(bitmap, range_start, limit)` функция, возвращающая подмножество наименьшего числа `limit` значения в наборе, который не меньше, чем `range_start`. [\#6957](https://github.com/ClickHouse/ClickHouse/pull/6957) ([Zhichang Ю](https://github.com/yuzhichang)) -- Добавь `bitmapMin` и `bitmapMax` функции. [\#6970](https://github.com/ClickHouse/ClickHouse/pull/6970) ([Zhichang Ю](https://github.com/yuzhichang)) -- Функция add `repeat` относится к [выпуск-6648](https://github.com/ClickHouse/ClickHouse/issues/6648) [\#6999](https://github.com/ClickHouse/ClickHouse/pull/6999) ([Флинн](https://github.com/ucasFL)) - -#### Экспериментальная возможность {#experimental-feature-1} - -- Реализуйте (в памяти) вариант соединения слиянием, который не изменяет текущий конвейер. Результат частично сортируется по ключу слияния. Набор `partial_merge_join = 1` чтобы использовать эту функцию. Соединение слиянием все еще находится в разработке. [\#6940](https://github.com/ClickHouse/ClickHouse/pull/6940) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавь `S3` функция двигателя и таблицы. Он все еще находится в разработке (пока нет поддержки аутентификации). [\#5596](https://github.com/ClickHouse/ClickHouse/pull/5596) ([Владимир Чеботарев](https://github.com/excitoon)) - -#### Улучшение {#improvement-2} - -- Каждое сообщение, прочитанное от Кафки, вставляется атомарно. Это решает почти все известные проблемы с двигателем Kafka. [\#6950](https://github.com/ClickHouse/ClickHouse/pull/6950) ([Иван](https://github.com/abyss7)) -- Улучшения для отработки отказа распределенных запросов. Сократите время восстановления, также оно теперь конфигурируемо и может быть увидено в `system.clusters`. [\#6399](https://github.com/ClickHouse/ClickHouse/pull/6399) ([Василий Немков](https://github.com/Enmk)) -- Поддержка числовых значений для перечислений непосредственно в `IN` раздел. \#6766 [\#6941](https://github.com/ClickHouse/ClickHouse/pull/6941) ([dimarub2000](https://github.com/dimarub2000)) -- Поддержка (опционально, по умолчанию отключен) перенаправляет на URL хранение. [\#6914](https://github.com/ClickHouse/ClickHouse/pull/6914) ([maqroll](https://github.com/maqroll)) -- Добавьте информационное сообщение, когда клиент с более старой версией подключается к серверу. [\#6893](https://github.com/ClickHouse/ClickHouse/pull/6893) ([Филипп Мальковский](https://github.com/malkfilipp)) -- Удалите максимальное ограничение времени ожидания обратного отсчета для отправки данных в распределенных таблицах [\#6895](https://github.com/ClickHouse/ClickHouse/pull/6895) ([Азат Хужин](https://github.com/azat)) -- Добавьте возможность отправлять графиту события профиля (счетчики) с кумулятивными значениями. Его можно включить в разделе `` в серверах `config.xml`. [\#6969](https://github.com/ClickHouse/ClickHouse/pull/6969) ([Азат Хужин](https://github.com/azat)) -- Добавить автоматическое приведение типа `T` к `LowCardinality(T)` при вставке данных в столбец типа `LowCardinality(T)` в родном формате через HTTP. [\#6891](https://github.com/ClickHouse/ClickHouse/pull/6891) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Добавить возможность использования функции `hex` без использования `reinterpretAsString` для `Float32`, `Float64`. [\#7024](https://github.com/ClickHouse/ClickHouse/pull/7024) ([Михаил Коротов](https://github.com/millb)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-2} - -- Добавьте gdb-индекс в двоичный файл clickhouse с отладочной информацией. Это ускорит время запуска системы. `gdb`. [\#6947](https://github.com/ClickHouse/ClickHouse/pull/6947) ([алесапин](https://github.com/alesapin)) -- Ускорить деб упаковки с исправленными помощью dpkg-деб, которая использует `pigz`. [\#6960](https://github.com/ClickHouse/ClickHouse/pull/6960) ([алесапин](https://github.com/alesapin)) -- Набор `enable_fuzzing = 1` чтобы включить инструментирование libfuzzer всего кода проекта. [\#7042](https://github.com/ClickHouse/ClickHouse/pull/7042) ([kyprizel](https://github.com/kyprizel)) -- Добавить сплит построить тест в КИ. [\#7061](https://github.com/ClickHouse/ClickHouse/pull/7061) ([алесапин](https://github.com/alesapin)) -- Добавьте сборку с MemorySanitizer в CI. [\#7066](https://github.com/ClickHouse/ClickHouse/pull/7066) ([Александр Кузьменков](https://github.com/akuzm)) -- Заменять `libsparsehash` с `sparsehash-c11` [\#6965](https://github.com/ClickHouse/ClickHouse/pull/6965) ([Азат Хужин](https://github.com/azat)) - -#### Исправление ошибок {#bug-fix-5} - -- Исправлено снижение производительности индексного анализа по сложным ключам на больших таблицах. Это исправляет \#6924. [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена логическая ошибка, вызывающая segfaults при выборе из Кафки пустой темы. [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Иван](https://github.com/abyss7)) -- Исправлено слишком раннее закрытие соединения MySQL `MySQLBlockInputStream.cpp`. [\#6882](https://github.com/ClickHouse/ClickHouse/pull/6882) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Возвращена поддержка очень старых ядер Linux (исправление [\#6841](https://github.com/ClickHouse/ClickHouse/issues/6841)) [\#6853](https://github.com/ClickHouse/ClickHouse/pull/6853) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить возможную потерю данных в `insert select` запрос в случае пустого блока во входном потоке. \#6834 \#6862 [\#6911](https://github.com/ClickHouse/ClickHouse/pull/6911) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Исправьте сложные запросы с помощью соединений массивов и глобальных подзапросов. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Иван](https://github.com/abyss7)) -- Чинить `Unknown identifier` ошибка в порядке ПО и группировка ПО с несколькими соединениями [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено `MSan` предупреждение при выполнении функции с помощью `LowCardinality` аргумент. [\#7062](https://github.com/ClickHouse/ClickHouse/pull/7062) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-2} - -- Изменен формат сериализации состояний растровых \* агрегатных функций для повышения производительности. Сериализованные состояния растрового изображения\* из предыдущих версий не могут быть прочитаны. [\#6908](https://github.com/ClickHouse/ClickHouse/pull/6908) ([Zhichang Ю](https://github.com/yuzhichang)) - -## ClickHouse релиз 19.14 {#clickhouse-release-19-14} - -### ClickHouse релиз 19.14.7.15, 2019-10-02 {#clickhouse-release-19-14-7-15-2019-10-02} - -#### Исправление ошибок {#bug-fix-6} - -- Этот релиз также содержит все исправления ошибок от 19.11.12.69. -- Исправлена совместимость для распределенных запросов между 19.14 и более ранними версиями. Это исправление [\#7068](https://github.com/ClickHouse/ClickHouse/issues/7068). [\#7069](https://github.com/ClickHouse/ClickHouse/pull/7069) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.14.6.12, 2019-09-19 {#clickhouse-release-19-14-6-12-2019-09-19} - -#### Исправление ошибок {#bug-fix-7} - -- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) -- Исправлено имя подзапроса в запросах с `ARRAY JOIN` и `GLOBAL IN subquery` с псевдонимом. Используйте псевдоним подзапроса для внешнего имени таблицы, если оно указано. [\#6934](https://github.com/ClickHouse/ClickHouse/pull/6934) ([Иван](https://github.com/abyss7)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-3} - -- Чинить [хлопанье](https://clickhouse-test-reports.s3.yandex.net/6944/aab95fd5175a513413c7395a73a82044bdafb906/functional_stateless_tests_(debug).html) тест `00715_fetch_merged_or_mutated_part_zookeeper` переписывая его в оболочку скриптов, потому что он должен ждать, пока мутации применятся. [\#6977](https://github.com/ClickHouse/ClickHouse/pull/6977) ([Александр казаков](https://github.com/Akazz)) -- Исправлены системные и MemSan сбой в функции `groupUniqArray` с аргументом массива emtpy. Это было вызвано размещением пустых `PaddedPODArray` в хэш-таблицу нулевая ячейка, потому что конструктор для нулевого значения ячейки не вызывался. [\#6937](https://github.com/ClickHouse/ClickHouse/pull/6937) ([Амос Птица](https://github.com/amosbird)) - -### ClickHouse релиз 19.14.3.3, 2019-09-10 {#clickhouse-release-19-14-3-3-2019-09-10} - -#### Новая функция {#new-feature-4} - -- `WITH FILL` модификатор для `ORDER BY`. (продолжение работы [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Антон Попов](https://github.com/CurtizJ)) -- `WITH TIES` модификатор для `LIMIT`. (продолжение работы [\#5069](https://github.com/ClickHouse/ClickHouse/issues/5069)) [\#6610](https://github.com/ClickHouse/ClickHouse/pull/6610) ([Антон Попов](https://github.com/CurtizJ)) -- Разобрать некотируемых `NULL` буквальное значение NULL (если настройки `format_csv_unquoted_null_literal_as_null=1`). Инициализируйте нулевые поля значениями по умолчанию, если тип данных этого поля не является нулевым (если задано значение `input_format_null_as_default=1`). [\#5990](https://github.com/ClickHouse/ClickHouse/issues/5990) [\#6055](https://github.com/ClickHouse/ClickHouse/pull/6055) ([тавплубикс](https://github.com/tavplubix)) -- Поддержка подстановочных знаков в путях табличных функций `file` и `hdfs`. Если путь содержит подстановочные знаки, то таблица будет доступна только для чтения. Пример использования: `select * from hdfs('hdfs://hdfs1:9000/some_dir/another_dir/*/file{0..9}{0..9}')` и `select * from file('some_dir/{some_file,another_file,yet_another}.tsv', 'TSV', 'value UInt32')`. [\#6092](https://github.com/ClickHouse/ClickHouse/pull/6092) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Новый `system.metric_log` таблица, в которой хранятся значения `system.events` и `system.metrics` с заданным интервалом времени. [\#6363](https://github.com/ClickHouse/ClickHouse/issues/6363) [\#6467](https://github.com/ClickHouse/ClickHouse/pull/6467) ([Никита Михайлов](https://github.com/nikitamikhaylov)) [\#6530](https://github.com/ClickHouse/ClickHouse/pull/6530) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Разрешить запись текстовых журналов ClickHouse в `system.text_log` стол. [\#6037](https://github.com/ClickHouse/ClickHouse/issues/6037) [\#6103](https://github.com/ClickHouse/ClickHouse/pull/6103) ([Никита Михайлов](https://github.com/nikitamikhaylov)) [\#6164](https://github.com/ClickHouse/ClickHouse/pull/6164) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Показывать частные символы в трассировках стека (это делается с помощью синтаксического анализа таблиц символов файлов ELF). Добавлена информация о файле и номере строки в трассировках стека, если присутствует отладочная информация. Ускоренный поиск имени символа с индексацией символов, присутствующих в программе. Добавлены новые функции SQL для самоанализа: `demangle` и `addressToLine`. Переименованная функция `symbolizeAddress` к `addressToSymbol` для последовательности. Функция `addressToSymbol` вернет искалеченное имя по соображениям производительности и вам придется подать заявку `demangle`. Добавлена настройка `allow_introspection_functions` который по умолчанию отключен. [\#6201](https://github.com/ClickHouse/ClickHouse/pull/6201) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Табличная функция `values` (имя не чувствительно к регистру). Это позволяет читать из `VALUES` список предложенных в [\#5984](https://github.com/ClickHouse/ClickHouse/issues/5984). Пример: `SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three'))`. [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) -- Добавлена возможность изменять настройки хранения. Синтаксис: `ALTER TABLE MODIFY SETTING = `. [\#6366](https://github.com/ClickHouse/ClickHouse/pull/6366) [\#6669](https://github.com/ClickHouse/ClickHouse/pull/6669) [\#6685](https://github.com/ClickHouse/ClickHouse/pull/6685) ([алесапин](https://github.com/alesapin)) -- Опора для снятия отсоединенных деталей. Синтаксис: `ALTER TABLE DROP DETACHED PART ''`. [\#6158](https://github.com/ClickHouse/ClickHouse/pull/6158) ([тавплубикс](https://github.com/tavplubix)) -- Ограничения таблицы. Позволяет добавить ограничение к определению таблицы,которое будет проверяться при вставке. [\#5273](https://github.com/ClickHouse/ClickHouse/pull/5273) ([Глеб Новиков](https://github.com/NanoBjorn)) [\#6652](https://github.com/ClickHouse/ClickHouse/pull/6652) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Суппорт для каскадных материализованных представлений. [\#6324](https://github.com/ClickHouse/ClickHouse/pull/6324) ([Амос Птица](https://github.com/amosbird)) -- Включите профилировщик запросов по умолчанию, чтобы один раз в секунду выполнять выборку каждого потока выполнения запроса. [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Входной формат `ORC`. [\#6454](https://github.com/ClickHouse/ClickHouse/pull/6454) [\#6703](https://github.com/ClickHouse/ClickHouse/pull/6703) ([аконяев90](https://github.com/akonyaev90)) -- Добавлены две новые функции: `sigmoid` и `tanh` (которые полезны для приложений машинного обучения). [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Функция `hasToken(haystack, token)`, `hasTokenCaseInsensitive(haystack, token)` чтобы проверить, находится ли данный токен в стоге сена. Токен-это подстрока максимальной длины между двумя не буквенно-цифровыми символами ASCII (или границами стога сена). Токен должен быть постоянной строкой. Поддерживается специализацией индекса tokenbf\_v1. [\#6596](https://github.com/ClickHouse/ClickHouse/pull/6596), [\#6662](https://github.com/ClickHouse/ClickHouse/pull/6662) ([Василий Немков](https://github.com/Enmk)) -- Новая функция `neighbor(value, offset[, default_value])`. Позволяет достичь значения prev / next внутри столбца в блоке данных. [\#5925](https://github.com/ClickHouse/ClickHouse/pull/5925) ([Алекс Краш](https://github.com/alex-krash)) [6685365ab8c5b74f9650492c88a012596eb1b0c6](https://github.com/ClickHouse/ClickHouse/commit/6685365ab8c5b74f9650492c88a012596eb1b0c6) [341e2e4587a18065c2da1ca888c73389f48ce36c](https://github.com/ClickHouse/ClickHouse/commit/341e2e4587a18065c2da1ca888c73389f48ce36c) [Алексей Миловидов](https://github.com/alexey-milovidov) -- Создал функцию `currentUser()`, возвращая логин авторизованного пользователя. Добавлен псевдоним `user()` для совместимости с MySQL. [\#6470](https://github.com/ClickHouse/ClickHouse/pull/6470) ([Алекс Краш](https://github.com/alex-krash)) -- Новые агрегатные функции `quantilesExactInclusive` и `quantilesExactExclusive` которые были предложены в [\#5885](https://github.com/ClickHouse/ClickHouse/issues/5885). [\#6477](https://github.com/ClickHouse/ClickHouse/pull/6477) ([dimarub2000](https://github.com/dimarub2000)) -- Функция `bitmapRange(bitmap, range_begin, range_end)` который возвращает новый набор с заданным диапазоном (не включает в себя `range_end`). [\#6314](https://github.com/ClickHouse/ClickHouse/pull/6314) ([Zhichang Ю](https://github.com/yuzhichang)) -- Функция `geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)` что создает массив прецизионных длинных строк геохаш-боксов, покрывающих заданную площадь. [\#6127](https://github.com/ClickHouse/ClickHouse/pull/6127) ([Василий Немков](https://github.com/Enmk)) -- Реализуйте поддержку запроса INSERT с помощью `Kafka` таблицы. [\#6012](https://github.com/ClickHouse/ClickHouse/pull/6012) ([Иван](https://github.com/abyss7)) -- Добавлена поддержка для `_partition` и `_timestamp` виртуальные колонки для движка Кафки. [\#6400](https://github.com/ClickHouse/ClickHouse/pull/6400) ([Иван](https://github.com/abyss7)) -- Возможность удаления конфиденциальных данных из `query_log`, журналы серверов, список процессов с правилами на основе регулярных выражений. [\#5710](https://github.com/ClickHouse/ClickHouse/pull/5710) ([Филимонов](https://github.com/filimonov)) - -#### Экспериментальная возможность {#experimental-feature-2} - -- Формат входных и выходных данных `Template`. Он позволяет указать строку пользовательского формата для ввода и вывода. [\#4354](https://github.com/ClickHouse/ClickHouse/issues/4354) [\#6727](https://github.com/ClickHouse/ClickHouse/pull/6727) ([тавплубикс](https://github.com/tavplubix)) -- Реализация проекта `LIVE VIEW` таблицы, которые были первоначально предложены в [\#2898](https://github.com/ClickHouse/ClickHouse/pull/2898), подготовленные в [\#3925](https://github.com/ClickHouse/ClickHouse/issues/3925), а затем обновляется в [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541). Видеть [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) для детального описания. [\#5541](https://github.com/ClickHouse/ClickHouse/issues/5541) ([взаказников](https://github.com/vzakaznikov)) [\#6425](https://github.com/ClickHouse/ClickHouse/pull/6425) ([Николай Кочетов](https://github.com/KochetovNicolai)) [\#6656](https://github.com/ClickHouse/ClickHouse/pull/6656) ([взаказников](https://github.com/vzakaznikov)) Заметить что `LIVE VIEW` функция может быть удалена в следующих версиях. - -#### Исправление ошибок {#bug-fix-8} - -- Этот релиз также содержит все исправления ошибок от 19.13 и 19.11. -- Исправьте ошибку сегментации, когда в таблице есть индексы пропуска и происходит вертикальное слияние. [\#6723](https://github.com/ClickHouse/ClickHouse/pull/6723) ([алесапин](https://github.com/alesapin)) -- Исправьте ТТЛ для каждого столбца с нетривиальными значениями по умолчанию для столбцов. Ранее в случае принудительного слияния TTL с `OPTIMIZE ... FINAL` запрос, истекшие значения были заменены типом defaults вместо заданных пользователем значений столбца defaults. [\#6796](https://github.com/ClickHouse/ClickHouse/pull/6796) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена проблема дублирования сообщений Кафки при обычном перезапуске сервера. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Иван](https://github.com/abyss7)) -- Исправлена бесконечная петля при чтении сообщений Кафки. Не приостанавливайте/возобновляйте потребительскую подписку вообще - в противном случае она может быть приостановлена на неопределенный срок в некоторых сценариях. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([Иван](https://github.com/abyss7)) -- Чинить `Key expression contains comparison between inconvertible types` исключение в `bitmapContains` функция. [\#6136](https://github.com/ClickHouse/ClickHouse/issues/6136) [\#6146](https://github.com/ClickHouse/ClickHouse/issues/6146) [\#6156](https://github.com/ClickHouse/ClickHouse/pull/6156) ([dimarub2000](https://github.com/dimarub2000)) -- Исправлена обработка выхода онлайн / оффлайн с поддержкой `optimize_skip_unused_shards` и пропал ключ от осколков. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Убраны лишние подробный вход в интерфейс для MySQL [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Возвращает возможность разбора логических настроек из ‘true’ и ‘false’ в конфигурационном файле. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([алесапин](https://github.com/alesapin)) -- Исправить сбой в работе `quantile` и `median` функции `Nullable(Decimal128)`. [\#6378](https://github.com/ClickHouse/ClickHouse/pull/6378) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлен возможный неполный результат возвращаемый компанией `SELECT` запрос с помощью `WHERE` условие о первичном ключе содержало преобразование в тип Float. Это было вызвано неправильной проверкой монотонности в `toFloat` функция. [\#6248](https://github.com/ClickHouse/ClickHouse/issues/6248) [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Проверять `max_expanded_ast_elements` установка для мутаций. Ясные мутации после `TRUNCATE TABLE`. [\#6205](https://github.com/ClickHouse/ClickHouse/pull/6205) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправьте результаты соединения для ключевых столбцов при использовании с `join_use_nulls`. Прикрепите значения null вместо столбцов по умолчанию. [\#6249](https://github.com/ClickHouse/ClickHouse/pull/6249) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка пропуска индексов с вертикальным слиянием и alter. Исправить для `Bad size of marks file` исключение. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594) [\#6713](https://github.com/ClickHouse/ClickHouse/pull/6713) ([алесапин](https://github.com/alesapin)) -- Исправлена редкая ошибка в `ALTER MODIFY COLUMN` и вертикальное слияние, когда одна из Объединенных/измененных частей пуста (0 строк) [\#6746](https://github.com/ClickHouse/ClickHouse/issues/6746) [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка в преобразовании `LowCardinality` напечатать `AggregateFunctionFactory`. Это исправление [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправить неправильное поведение и возможные вылеты в `topK` и `topKWeighted` агрегированные функции. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлен небезопасный код вокруг `getIdentifier` функция. [\#6401](https://github.com/ClickHouse/ClickHouse/issues/6401) [\#6409](https://github.com/ClickHouse/ClickHouse/pull/6409) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка в протоколе MySQL wire (используется при подключении к ClickHouse form MySQL client). Вызвано переполнением буфера кучи в `PacketPayloadWriteBuffer`. [\#6212](https://github.com/ClickHouse/ClickHouse/pull/6212) ([Юрий Баранов](https://github.com/yurriy)) -- Исправлена утечка памяти внутри `bitmapSubsetInRange` функция. [\#6819](https://github.com/ClickHouse/ClickHouse/pull/6819) ([Zhichang Ю](https://github.com/yuzhichang)) -- Исправлена редкая ошибка, когда мутация выполнялась после изменения детализации. [\#6816](https://github.com/ClickHouse/ClickHouse/pull/6816) ([алесапин](https://github.com/alesapin)) -- Разрешить сообщение protobuf со всеми полями по умолчанию. [\#6132](https://github.com/ClickHouse/ClickHouse/pull/6132) ([Виталий Баранов](https://github.com/vitlibar)) -- Устраните ошибку с помощью `nullIf` функция, когда мы посылаем `NULL` аргумент по второму аргументу. [\#6446](https://github.com/ClickHouse/ClickHouse/pull/6446) ([Гийом Тассери](https://github.com/YiuRULE)) -- Исправлена редкая ошибка с неправильным выделением/освобождением памяти в сложных ключевых словарях кэша со строковыми полями, что приводит к бесконечному потреблению памяти (похоже на утечку памяти). Ошибка воспроизводится, когда размер строки был равен степени два, начиная с восьми (8, 16, 32 и т. д.). [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([алесапин](https://github.com/alesapin)) -- Исправлено кодирование горилл на небольших последовательностях, которое вызывало исключение `Cannot write after end of buffer`. [\#6398](https://github.com/ClickHouse/ClickHouse/issues/6398) [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Василий Немков](https://github.com/Enmk)) -- Разрешить использовать не обнуляемые типы В соединениях с `join_use_nulls` включен. [\#6705](https://github.com/ClickHouse/ClickHouse/pull/6705) ([Артем Зуйков](https://github.com/4ertus2)) -- Отключать `Poco::AbstractConfiguration` подстановки в запрос `clickhouse-client`. [\#6706](https://github.com/ClickHouse/ClickHouse/pull/6706) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте тупиковых ситуаций в `REPLACE PARTITION`. [\#6677](https://github.com/ClickHouse/ClickHouse/pull/6677) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- С помощью `arrayReduce` для постоянных аргументов может привести к обработка выхода онлайн / оффлайн. [\#6242](https://github.com/ClickHouse/ClickHouse/issues/6242) [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте несогласованные детали, которые могут появиться, если реплика была восстановлена после этого `DROP PARTITION`. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) -- Исправлено зависание `JSONExtractRaw` функция. [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка с неправильной сериализацией индексов пропуска и агрегацией с адаптивной детализацией. [\#6594](https://github.com/ClickHouse/ClickHouse/issues/6594). [\#6748](https://github.com/ClickHouse/ClickHouse/pull/6748) ([алесапин](https://github.com/alesapin)) -- Чинить `WITH ROLLUP` и `WITH CUBE` модификаторы `GROUP BY` с двухуровневой агрегацией. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена ошибка с написанием вторичных индексных меток с адаптивной детализацией. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([алесапин](https://github.com/alesapin)) -- Исправьте порядок инициализации при запуске сервера. С `StorageMergeTree::background_task_handle` инициализируется в `startup()` то `MergeTreeBlockOutputStream::write()` возможно, вы попытаетесь использовать его перед инициализацией. Просто проверьте, инициализирован ли он. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Иван](https://github.com/abyss7)) -- Очистка буфера данных от предыдущей операции чтения, которая была завершена с ошибкой. [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Николай](https://github.com/bopohaa)) -- Исправлена ошибка с включением адаптивной детализации при создании новой реплики для реплицированной таблицы \* MergeTree. [\#6394](https://github.com/ClickHouse/ClickHouse/issues/6394) [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([алесапин](https://github.com/alesapin)) -- Исправлена возможная ошибка при запуске сервера в случае возникновения исключения `libunwind` во время исключения при доступе к неинициализированному `ThreadStatus` структура. [\#6456](https://github.com/ClickHouse/ClickHouse/pull/6456) ([Никита Михайлов](https://github.com/nikitamikhaylov)) -- Исправить сбой в работе `yandexConsistentHash` функция. Найдено с помощью теста fuzz. [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304) [\#6305](https://github.com/ClickHouse/ClickHouse/pull/6305) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена возможность зависания запросов, когда сервер перегружен и глобальный пул потоков становится почти полным. Это имеет более высокие шансы произойти в кластерах с большим количеством сегментов (сотни), поскольку распределенные запросы выделяют поток для каждого соединения с каждым сегментом. Например, эта проблема может возникнуть, если кластер из 330 сегментов обрабатывает 30 параллельных распределенных запросов. Эта проблема затрагивает все версии, начиная с версии 19.2. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированная логика работы `arrayEnumerateUniqRanked` функция. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка segfault при декодировании таблицы символов. [\#6603](https://github.com/ClickHouse/ClickHouse/pull/6603) ([Амос Птица](https://github.com/amosbird)) -- Исправлено неуместное исключение в приведении `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Удалено дополнительное цитирование описания в `system.settings` стол. [\#6696](https://github.com/ClickHouse/ClickHouse/issues/6696) [\#6699](https://github.com/ClickHouse/ClickHouse/pull/6699) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте возможных тупиков в работе `TRUNCATE` из реплицированной таблицы. [\#6695](https://github.com/ClickHouse/ClickHouse/pull/6695) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте чтение в порядке сортировки ключа. [\#6189](https://github.com/ClickHouse/ClickHouse/pull/6189) ([Антон Попов](https://github.com/CurtizJ)) -- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) -- Исправить ошибку, открытую [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) (начиная с 19.4.0). Воспроизводится в запросах к распределенным таблицам через таблицы MergeTree, когда мы не запрашиваем никаких столбцов (`SELECT 1`). [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([алесапин](https://github.com/alesapin)) -- Исправлено переполнение при целочисленном делении знакового типа на беззнаковый. Поведение было точно таким же, как в языке C или C++ (целочисленные правила продвижения), что может быть удивительно. Обратите внимание, что переполнение все еще возможно при делении большого числа со знаком на большое число без знака или наоборот (но этот случай менее обычен). Эта проблема существовала во всех версиях сервера. [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214) [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Ограничьте максимальное время сна для дросселирования, когда `max_execution_speed` или `max_execution_speed_bytes` заданный. Исправлены ложные ошибки, такие как `Estimated query execution time (inf seconds) is too long`. [\#5547](https://github.com/ClickHouse/ClickHouse/issues/5547) [\#6232](https://github.com/ClickHouse/ClickHouse/pull/6232) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлены проблемы, связанные с использованием `MATERIALIZED` столбцы и псевдонимы в `MaterializedView`. [\#448](https://github.com/ClickHouse/ClickHouse/issues/448) [\#3484](https://github.com/ClickHouse/ClickHouse/issues/3484) [\#3450](https://github.com/ClickHouse/ClickHouse/issues/3450) [\#2878](https://github.com/ClickHouse/ClickHouse/issues/2878) [\#2285](https://github.com/ClickHouse/ClickHouse/issues/2285) [\#3796](https://github.com/ClickHouse/ClickHouse/pull/3796) ([Амос Птица](https://github.com/amosbird)) [\#6316](https://github.com/ClickHouse/ClickHouse/pull/6316) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Чинить `FormatFactory` поведение для входных потоков, которые не реализованы в качестве процессора. [\#6495](https://github.com/ClickHouse/ClickHouse/pull/6495) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена опечатка. [\#6631](https://github.com/ClickHouse/ClickHouse/pull/6631) ([Алексей Рындин](https://github.com/alexryndin)) -- Опечатка в сообщении об ошибке (is - \> are). [\#6839](https://github.com/ClickHouse/ClickHouse/pull/6839) ([Денис Журавлев](https://github.com/den-crane)) -- Исправлена ошибка при разборе списка столбцов из строки, Если тип содержал запятую (эта проблема была актуальна для `File`, `URL`, `HDFS` хранения) [\#6217](https://github.com/ClickHouse/ClickHouse/issues/6217). [\#6209](https://github.com/ClickHouse/ClickHouse/pull/6209) ([dimarub2000](https://github.com/dimarub2000)) - -#### Исправление безопасности {#security-fix} - -- Этот релиз также содержит все исправления безопасности ошибок от 19.13 и 19.11. -- Исправлена возможность сфабрикованного запроса вызвать сбой сервера из-за переполнения стека в синтаксическом анализаторе SQL. Исправлена возможность переполнения стека в таблицах слияния и распределения, материализованных представлениях и условиях безопасности на уровне строк, включающих подзапросы. [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшение {#improvement-3} - -- Правильная реализация троичной логики для `AND/OR`. [\#6048](https://github.com/ClickHouse/ClickHouse/pull/6048) ([Александр казаков](https://github.com/Akazz)) -- Теперь значения и строки с истекшим сроком действия TTL будут удалены после этого `OPTIMIZE ... FINAL` query from old parts without TTL infos or with outdated TTL infos, e.g. after `ALTER ... MODIFY TTL` запрос. Добавленные запросы `SYSTEM STOP/START TTL MERGES` чтобы запретить / разрешить назначать слияния с TTL и фильтровать просроченные значения во всех слияниях. [\#6274](https://github.com/ClickHouse/ClickHouse/pull/6274) ([Антон Попов](https://github.com/CurtizJ)) -- Возможность изменить расположение файла истории ClickHouse для использования клиентом `CLICKHOUSE_HISTORY_FILE` ОКР. [\#6840](https://github.com/ClickHouse/ClickHouse/pull/6840) ([Филимонов](https://github.com/filimonov)) -- Удалять `dry_run` флаг от `InterpreterSelectQuery`. … [\#6375](https://github.com/ClickHouse/ClickHouse/pull/6375) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Поддержка `ASOF JOIN` с `ON` раздел. [\#6211](https://github.com/ClickHouse/ClickHouse/pull/6211) ([Артем Зуйков](https://github.com/4ertus2)) -- Улучшенная поддержка индексов пропуска для мутаций и репликации. Поддержка `MATERIALIZE/CLEAR INDEX ... IN PARTITION` запрос. `UPDATE x = x` пересчитывает все индексы, использующие столбец `x`. [\#5053](https://github.com/ClickHouse/ClickHouse/pull/5053) ([Никита Васильев](https://github.com/nikvas0)) -- Разрешить `ATTACH` живые представления (например, при запуске сервера) независимо от того, чтобы `allow_experimental_live_view` установка. [\#6754](https://github.com/ClickHouse/ClickHouse/pull/6754) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Для трассировок стека, собранных профилировщиком запросов, не включайте кадры стека, созданные самим профилировщиком запросов. [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Теперь функции таблицы `values`, `file`, `url`, `hdfs` есть поддержка столбцов псевдонимов. [\#6255](https://github.com/ClickHouse/ClickHouse/pull/6255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Создайте исключение, если `config.d` файл не имеет соответствующего корневого элемента в качестве файла конфигурации. [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) -- Распечатайте дополнительную информацию в сообщении об исключении для `no space left on device`. [\#6182](https://github.com/ClickHouse/ClickHouse/issues/6182), [\#6252](https://github.com/ClickHouse/ClickHouse/issues/6252) [\#6352](https://github.com/ClickHouse/ClickHouse/pull/6352) ([тавплубикс](https://github.com/tavplubix)) -- При определении осколков а `Distributed` таблица, которая будет покрыта запросом на чтение (для `optimize_skip_unused_shards` = 1) ClickHouse теперь проверяет условия от обоих `prewhere` и `where` предложения оператора select. [\#6521](https://github.com/ClickHouse/ClickHouse/pull/6521) ([Александр казаков](https://github.com/Akazz)) -- Включенный `SIMDJSON` для машин без AVX2, но с набором инструкций SSE 4.2 и PCLMUL. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285) [\#6320](https://github.com/ClickHouse/ClickHouse/pull/6320) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- ClickHouse может работать на файловых системах без `O_DIRECT` поддержка (например, ZFS и BtrFS) без дополнительной настройки. [\#4449](https://github.com/ClickHouse/ClickHouse/issues/4449) [\#6730](https://github.com/ClickHouse/ClickHouse/pull/6730) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Поддержка push down предиката для окончательного подзапроса. [\#6120](https://github.com/ClickHouse/ClickHouse/pull/6120) ([TCeason](https://github.com/TCeason)) [\#6162](https://github.com/ClickHouse/ClickHouse/pull/6162) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Лучше `JOIN ON` извлечение ключей [\#6131](https://github.com/ClickHouse/ClickHouse/pull/6131) ([Артем Зуйков](https://github.com/4ertus2)) -- Обновление `SIMDJSON`. [\#6285](https://github.com/ClickHouse/ClickHouse/issues/6285). [\#6306](https://github.com/ClickHouse/ClickHouse/pull/6306) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Оптимизируйте выбор самого маленького столбца для `SELECT count()` запрос. [\#6344](https://github.com/ClickHouse/ClickHouse/pull/6344) ([Амос Птица](https://github.com/amosbird)) -- Добавлен `strict` параметр в `windowFunnel()`. Когда `strict` устанавливается, то `windowFunnel()` применяет условия только для уникальных значений. [\#6548](https://github.com/ClickHouse/ClickHouse/pull/6548) ([ачимбаб](https://github.com/achimbab)) -- Более безопасный интерфейс `mysqlxx::Pool`. [\#6150](https://github.com/ClickHouse/ClickHouse/pull/6150) ([авасилиев](https://github.com/avasiliev)) -- Параметры размер строки при выполнении с помощью `--help` опция теперь соответствует размеру терминала. [\#6590](https://github.com/ClickHouse/ClickHouse/pull/6590) ([dimarub2000](https://github.com/dimarub2000)) -- Отключать «read in order» оптимизация для агрегации без ключей. [\#6599](https://github.com/ClickHouse/ClickHouse/pull/6599) ([Антон Попов](https://github.com/CurtizJ)) -- Код состояния HTTP для `INCORRECT_DATA` и `TYPE_MISMATCH` коды ошибок были изменены по умолчанию `500 Internal Server Error` к `400 Bad Request`. [\#6271](https://github.com/ClickHouse/ClickHouse/pull/6271) ([Александр Родин](https://github.com/a-rodin)) -- Переместить объект соединения из `ExpressionAction` в `AnalyzedJoin`. `ExpressionAnalyzer` и `ExpressionAction` не знаю о чем `Join` больше никаких занятий. Его логика скрыта за `AnalyzedJoin` iface защитный. [\#6801](https://github.com/ClickHouse/ClickHouse/pull/6801) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена возможная взаимоблокировка распределенных запросов, когда один из сегментов является localhost, но запрос отправляется через сетевое соединение. [\#6759](https://github.com/ClickHouse/ClickHouse/pull/6759) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Изменена семантика нескольких таблиц `RENAME` чтобы избежать возможных тупиков. [\#6757](https://github.com/ClickHouse/ClickHouse/issues/6757). [\#6756](https://github.com/ClickHouse/ClickHouse/pull/6756) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Переписан сервер совместимости MySQL, чтобы предотвратить загрузку полной полезной нагрузки пакета в память. Снижение потребления памяти для каждого соединения примерно до `2 * DBMS_DEFAULT_BUFFER_SIZE` (буферы чтения/записи). [\#5811](https://github.com/ClickHouse/ClickHouse/pull/5811) ([Юрий Баранов](https://github.com/yurriy)) -- Переместите логику интерпретации псевдонимов AST из синтаксического анализатора, который не должен ничего знать о семантике запросов. [\#6108](https://github.com/ClickHouse/ClickHouse/pull/6108) ([Артем Зуйков](https://github.com/4ertus2)) -- Чуть более безопасный разбор данных `NamesAndTypesList`. [\#6408](https://github.com/ClickHouse/ClickHouse/issues/6408). [\#6410](https://github.com/ClickHouse/ClickHouse/pull/6410) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `clickhouse-copier`: Разрешить использование `where_condition` из конфигурации с `partition_key` псевдоним в запросе для проверки существования раздела (ранее он использовался только при чтении запросов данных). [\#6577](https://github.com/ClickHouse/ClickHouse/pull/6577) ([proller](https://github.com/proller)) -- Добавлен необязательный аргумент сообщения в поле `throwIf`. ([\#5772](https://github.com/ClickHouse/ClickHouse/issues/5772)) [\#6329](https://github.com/ClickHouse/ClickHouse/pull/6329) ([Вдимир](https://github.com/Vdimir)) -- Исключение сервера, полученное при отправке данных вставки, теперь обрабатывается и в клиенте. [\#5891](https://github.com/ClickHouse/ClickHouse/issues/5891) [\#6711](https://github.com/ClickHouse/ClickHouse/pull/6711) ([dimarub2000](https://github.com/dimarub2000)) -- Добавлена метрика `DistributedFilesToInsert` это показывает общее количество файлов в файловой системе, выбранных для отправки на удаленные серверы распределенными таблицами. Это число суммируется по всем осколкам. [\#6600](https://github.com/ClickHouse/ClickHouse/pull/6600) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Переместите большинство соединений подготовьте логику из `ExpressionAction/ExpressionAnalyzer` к `AnalyzedJoin`. [\#6785](https://github.com/ClickHouse/ClickHouse/pull/6785) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправить Цан [предупреждение](https://clickhouse-test-reports.s3.yandex.net/6399/c1c1d1daa98e199e620766f1bd06a5921050a00d/functional_stateful_tests_(thread).html) ‘lock-order-inversion’. [\#6740](https://github.com/ClickHouse/ClickHouse/pull/6740) ([Василий Немков](https://github.com/Enmk)) -- Улучшенные информационные сообщения об отсутствии возможностей Linux. Протоколирование фатальных ошибок с помощью «fatal» уровень, который будет легче найти в `system.text_log`. [\#6441](https://github.com/ClickHouse/ClickHouse/pull/6441) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Когда включить сброс временных данных на диск, чтобы ограничить использование памяти во время `GROUP BY`, `ORDER BY`, он не проверял свободное место на диске. Исправление добавить новую настройку `min_free_disk_space`, когда свободное место на диске будет меньше порогового значения, запрос остановится и бросит `ErrorCodes::NOT_ENOUGH_SPACE`. [\#6678](https://github.com/ClickHouse/ClickHouse/pull/6678) ([Вэйцин Сюй](https://github.com/weiqxu)) [\#6691](https://github.com/ClickHouse/ClickHouse/pull/6691) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Удален рекурсивной записи по теме. Это не имеет смысла, потому что потоки используются повторно между запросами. `SELECT` запрос может получить Блокировку в одном потоке, удерживать блокировку в другом потоке и выходить из первого потока. В то же время, первый поток может быть повторно использован `DROP` запрос. Это приведет к ложным результатам «Attempt to acquire exclusive lock recursively» сообщения. [\#6771](https://github.com/ClickHouse/ClickHouse/pull/6771) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Расщеплять `ExpressionAnalyzer.appendJoin()`. Подготовьте место в `ExpressionAnalyzer` для `MergeJoin`. [\#6524](https://github.com/ClickHouse/ClickHouse/pull/6524) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавлен `mysql_native_password` плагин аутентификации для сервера совместимости MySQL. [\#6194](https://github.com/ClickHouse/ClickHouse/pull/6194) ([Юрий Баранов](https://github.com/yurriy)) -- Меньшее количество `clock_gettime` вызовы; исправлена совместимость ABI между debug/release in `Allocator` (незначительный вопрос). [\#6197](https://github.com/ClickHouse/ClickHouse/pull/6197) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Подвиньте `collectUsedColumns` от `ExpressionAnalyzer` к `SyntaxAnalyzer`. `SyntaxAnalyzer` делает `required_source_columns` теперь он сам по себе. [\#6416](https://github.com/ClickHouse/ClickHouse/pull/6416) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавить настройку `joined_subquery_requires_alias` чтобы требовать псевдонимы для подселектов и табличных функций в `FROM` that more than one table is present (i.e. queries with JOINs). [\#6733](https://github.com/ClickHouse/ClickHouse/pull/6733) ([Артем Зуйков](https://github.com/4ertus2)) -- Извлекать `GetAggregatesVisitor` класс от `ExpressionAnalyzer`. [\#6458](https://github.com/ClickHouse/ClickHouse/pull/6458) ([Артем Зуйков](https://github.com/4ertus2)) -- `system.query_log`: изменение типа данных `type` столбец `Enum`. [\#6265](https://github.com/ClickHouse/ClickHouse/pull/6265) ([Никита Михайлов](https://github.com/nikitamikhaylov)) -- Статическое соединение `sha256_password` плагин аутентификации. [\#6512](https://github.com/ClickHouse/ClickHouse/pull/6512) ([Юрий Баранов](https://github.com/yurriy)) -- Избегайте дополнительной зависимости для настройки `compile` работать. В предыдущих версиях пользователь может получить ошибку типа `cannot open crti.o`, `unable to find library -lc` и т.д. [\#6309](https://github.com/ClickHouse/ClickHouse/pull/6309) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Дополнительная проверка входных данных, которые могут быть получены от вредоносной реплики. [\#6303](https://github.com/ClickHouse/ClickHouse/pull/6303) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Сейчас `clickhouse-obfuscator` файл доступен в формате `clickhouse-client` пакет. В предыдущих версиях он был доступен как `clickhouse obfuscator` (с пробелами). [\#5816](https://github.com/ClickHouse/ClickHouse/issues/5816) [\#6609](https://github.com/ClickHouse/ClickHouse/pull/6609) ([dimarub2000](https://github.com/dimarub2000)) -- Исправлена взаимоблокировка, когда у нас есть по крайней мере два запроса, которые читают по крайней мере две таблицы в разном порядке, и еще один запрос, который выполняет операцию DDL на одной из таблиц. Исправлена еще одна очень редкая тупиковая ситуация. [\#6764](https://github.com/ClickHouse/ClickHouse/pull/6764) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлен `os_thread_ids` столбец `system.processes` и `system.query_log` для улучшения возможностей отладки. [\#6763](https://github.com/ClickHouse/ClickHouse/pull/6763) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Обходной путь для ошибок расширения PHP mysqlnd, которые возникают, когда `sha256_password` используется в качестве плагина аутентификации по умолчанию (описано в разделе [\#6031](https://github.com/ClickHouse/ClickHouse/issues/6031)). [\#6113](https://github.com/ClickHouse/ClickHouse/pull/6113) ([Юрий Баранов](https://github.com/yurriy)) -- Удалите ненужное место с измененными столбцами nullability. [\#6693](https://github.com/ClickHouse/ClickHouse/pull/6693) ([Артем Зуйков](https://github.com/4ertus2)) -- Установите значение по умолчанию: `queue_max_wait_ms` до нуля, потому что текущее значение (пять секунд) не имеет никакого смысла. Есть редкие обстоятельства, когда эта настройка имеет какое-либо применение. Добавлены настройки `replace_running_query_max_wait_ms`, `kafka_max_wait_ms` и `connection_pool_max_wait_ms` для устранения двусмысленности. [\#6692](https://github.com/ClickHouse/ClickHouse/pull/6692) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Извлекать `SelectQueryExpressionAnalyzer` от `ExpressionAnalyzer`. Оставьте последний вариант для запросов, не связанных с выбором. [\#6499](https://github.com/ClickHouse/ClickHouse/pull/6499) ([Артем Зуйков](https://github.com/4ertus2)) -- Удалено дублирование входных и выходных форматов. [\#6239](https://github.com/ClickHouse/ClickHouse/pull/6239) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Позволяет пользователю переопределить `poll_interval` и `idle_connection_timeout` настройки при подключении. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `MergeTree` теперь есть дополнительная опция `ttl_only_drop_parts` (отключено по умолчанию), чтобы избежать частичной обрезки деталей, чтобы они полностью выпадали, когда все строки в детали истекли. [\#6191](https://github.com/ClickHouse/ClickHouse/pull/6191) ([Сергей Владыкин](https://github.com/svladykin)) -- Тип проверяет наличие заданных индексных функций. Бросьте исключение, если функция получила неправильный тип. Это устраняет тестирования с помощью утилиты. [\#6511](https://github.com/ClickHouse/ClickHouse/pull/6511) ([Никита Васильев](https://github.com/nikvas0)) - -#### Улучшение производительности {#performance-improvement-2} - -- Оптимизируйте запросы с помощью `ORDER BY expressions` пункт, где `expressions` есть совпадающий префикс с ключом сортировки в `MergeTree` таблицы. Эта оптимизация управляется с помощью `optimize_read_in_order` установка. [\#6054](https://github.com/ClickHouse/ClickHouse/pull/6054) [\#6629](https://github.com/ClickHouse/ClickHouse/pull/6629) ([Антон Попов](https://github.com/CurtizJ)) -- Позволяет использовать несколько резьб при загрузке и демонтаже деталей. [\#6372](https://github.com/ClickHouse/ClickHouse/issues/6372) [\#6074](https://github.com/ClickHouse/ClickHouse/issues/6074) [\#6438](https://github.com/ClickHouse/ClickHouse/pull/6438) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Реализован пакетный вариант обновления состояний агрегатной функции. Это может привести к повышению производительности. [\#6435](https://github.com/ClickHouse/ClickHouse/pull/6435) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- С помощью `FastOps` библиотека для функций `exp`, `log`, `sigmoid`, `tanh`. FastOps-это быстрая векторная математическая библиотека от Михаила Парахина (технический директор Яндекса). Улучшенная производительность `exp` и `log` функции более чем в 6 раз. Функция `exp` и `log` от `Float32` аргумент вернется `Float32` (в предыдущих версиях они всегда возвращаются `Float64`). Сейчас `exp(nan)` может вернуться `inf`. Результат работы `exp` и `log` функции могут быть не самым близким машинным представимым числом к истинному ответу. [\#6254](https://github.com/ClickHouse/ClickHouse/pull/6254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) Используя вариант Данилы Кутенина, чтобы сделать fastops работающими [\#6317](https://github.com/ClickHouse/ClickHouse/pull/6317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Отключить последовательную оптимизацию ключа для `UInt8/16`. [\#6298](https://github.com/ClickHouse/ClickHouse/pull/6298) [\#6701](https://github.com/ClickHouse/ClickHouse/pull/6701) ([акузм](https://github.com/akuzm)) -- Улучшенная производительность `simdjson` библиотека, избавившись от динамического распределения в `ParsedJson::Iterator`. [\#6479](https://github.com/ClickHouse/ClickHouse/pull/6479) ([Виталий Баранов](https://github.com/vitlibar)) -- Предаварийные страницы при выделении памяти с помощью `mmap()`. [\#6667](https://github.com/ClickHouse/ClickHouse/pull/6667) ([акузм](https://github.com/akuzm)) -- Исправлена ошибка производительности в `Decimal` сравнение. [\#6380](https://github.com/ClickHouse/ClickHouse/pull/6380) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-4} - -- Удалите компилятор (экземпляр шаблона времени выполнения), потому что мы выиграли его производительность. [\#6646](https://github.com/ClickHouse/ClickHouse/pull/6646) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлен тест производительности, чтобы показать ухудшение производительности в gcc-9 более изолированным способом. [\#6302](https://github.com/ClickHouse/ClickHouse/pull/6302) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена функция таблицы `numbers_mt`, который является многопоточным вариантом `numbers`. Обновленные тесты производительности с хэш-функциями. [\#6554](https://github.com/ClickHouse/ClickHouse/pull/6554) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Режим сравнения в `clickhouse-benchmark` [\#6220](https://github.com/ClickHouse/ClickHouse/issues/6220) [\#6343](https://github.com/ClickHouse/ClickHouse/pull/6343) ([dimarub2000](https://github.com/dimarub2000)) -- Самое лучшее усилие для печати следов стека. Также добавить `SIGPROF` в качестве отладочного сигнала для печати трассировки стека запущенного потока. [\#6529](https://github.com/ClickHouse/ClickHouse/pull/6529) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Каждая функция в своем собственном файле, часть 10. [\#6321](https://github.com/ClickHouse/ClickHouse/pull/6321) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Удалить два раза константный `TABLE_IS_READ_ONLY`. [\#6566](https://github.com/ClickHouse/ClickHouse/pull/6566) ([Филимонов](https://github.com/filimonov)) -- Изменения форматирования для `StringHashMap` PR [\#5417](https://github.com/ClickHouse/ClickHouse/issues/5417). [\#6700](https://github.com/ClickHouse/ClickHouse/pull/6700) ([акузм](https://github.com/akuzm)) -- Лучший подзапрос для создания соединения в `ExpressionAnalyzer`. [\#6824](https://github.com/ClickHouse/ClickHouse/pull/6824) ([Артем Зуйков](https://github.com/4ertus2)) -- Удалить ненужное состояние (найденных с помощью PVS-студия). [\#6775](https://github.com/ClickHouse/ClickHouse/pull/6775) ([акузм](https://github.com/akuzm)) -- Разделите интерфейс хэш-таблицы для `ReverseIndex`. [\#6672](https://github.com/ClickHouse/ClickHouse/pull/6672) ([акузм](https://github.com/akuzm)) -- Рефакторинг настроек. [\#6689](https://github.com/ClickHouse/ClickHouse/pull/6689) ([алесапин](https://github.com/alesapin)) -- Добавить комментарии для `set` индексные функции. [\#6319](https://github.com/ClickHouse/ClickHouse/pull/6319) ([Никита Васильев](https://github.com/nikvas0)) -- Увеличьте оценку OOM в отладочной версии на Linux. [\#6152](https://github.com/ClickHouse/ClickHouse/pull/6152) ([акузм](https://github.com/akuzm)) -- HDFS HA теперь работает в debug build. [\#6650](https://github.com/ClickHouse/ClickHouse/pull/6650) ([Вэйцин Сюй](https://github.com/weiqxu)) -- Добавлен тест на `transform_query_for_external_database`. [\#6388](https://github.com/ClickHouse/ClickHouse/pull/6388) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте тест для нескольких материализованных представлений для таблицы Кафки. [\#6509](https://github.com/ClickHouse/ClickHouse/pull/6509) ([Иван](https://github.com/abyss7)) -- Сделайте лучшую схему сборки. [\#6500](https://github.com/ClickHouse/ClickHouse/pull/6500) ([Иван](https://github.com/abyss7)) -- Исправлено `test_external_dictionaries` интеграция в случае, если она была выполнена под некорневым пользователем. [\#6507](https://github.com/ClickHouse/ClickHouse/pull/6507) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Ошибка воспроизводится, когда общий размер записанных пакетов превышает `DBMS_DEFAULT_BUFFER_SIZE`. [\#6204](https://github.com/ClickHouse/ClickHouse/pull/6204) ([Юрий Баранов](https://github.com/yurriy)) -- Добавлен тест для `RENAME` состояние гонки таблицы [\#6752](https://github.com/ClickHouse/ClickHouse/pull/6752) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте гонки данных по настройкам в `KILL QUERY`. [\#6753](https://github.com/ClickHouse/ClickHouse/pull/6753) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте интеграционный тест для обработки ошибок с помощью словаря кэша. [\#6755](https://github.com/ClickHouse/ClickHouse/pull/6755) ([Виталий Баранов](https://github.com/vitlibar)) -- Отключите синтаксический анализ объектных файлов ELF на Mac OS, потому что это не имеет никакого смысла. [\#6578](https://github.com/ClickHouse/ClickHouse/pull/6578) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Попытайтесь сделать генератор списка изменений лучше. [\#6327](https://github.com/ClickHouse/ClickHouse/pull/6327) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавление `-Wshadow` перейти к ССЗ. [\#6325](https://github.com/ClickHouse/ClickHouse/pull/6325) ([kreuzerkrieg](https://github.com/kreuzerkrieg)) -- Удален устаревший код для `mimalloc` поддержка. [\#6715](https://github.com/ClickHouse/ClickHouse/pull/6715) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `zlib-ng` определяет возможности x86 и сохраняет эту информацию в глобальных переменных. Это делается в вызове defalteInit, который может быть выполнен разными потоками одновременно. Чтобы избежать многопоточной записи, сделайте это при запуске библиотеки. [\#6141](https://github.com/ClickHouse/ClickHouse/pull/6141) ([акузм](https://github.com/akuzm)) -- Регрессионный тест на ошибку, которая в соединении была исправлена в [\#5192](https://github.com/ClickHouse/ClickHouse/issues/5192). [\#6147](https://github.com/ClickHouse/ClickHouse/pull/6147) ([Бахтиер Рузиев](https://github.com/theruziev)) -- Исправлен отчет MSan. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте хлопающий тест TTL. [\#6782](https://github.com/ClickHouse/ClickHouse/pull/6782) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена гонка ложных данных в `MergeTreeDataPart::is_frozen` поле. [\#6583](https://github.com/ClickHouse/ClickHouse/pull/6583) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлены тайм-ауты в тесте fuzz. В предыдущей версии ему удалось найти ложное зависание в запросе `SELECT * FROM numbers_mt(gccMurmurHash(''))`. [\#6582](https://github.com/ClickHouse/ClickHouse/pull/6582) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлены отладочные проверки для `static_cast` из колонн. [\#6581](https://github.com/ClickHouse/ClickHouse/pull/6581) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Поддержка Oracle Linux в официальных пакетах RPM. [\#6356](https://github.com/ClickHouse/ClickHouse/issues/6356) [\#6585](https://github.com/ClickHouse/ClickHouse/pull/6585) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Изменен JSON perftests с `once` к `loop` тип. [\#6536](https://github.com/ClickHouse/ClickHouse/pull/6536) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- `odbc-bridge.cpp` определяет `main()` поэтому он не должен быть включен в состав `clickhouse-lib`. [\#6538](https://github.com/ClickHouse/ClickHouse/pull/6538) ([Оривей Деш](https://github.com/orivej)) -- Тест на аварийное включение `FULL|RIGHT JOIN` с нулями в ключах правого стола. [\#6362](https://github.com/ClickHouse/ClickHouse/pull/6362) ([Артем Зуйков](https://github.com/4ertus2)) -- На всякий случай добавлен тест на ограничение по расширению псевдонимов. [\#6442](https://github.com/ClickHouse/ClickHouse/pull/6442) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Переключился с `boost::filesystem` к `std::filesystem` при необходимости. [\#6253](https://github.com/ClickHouse/ClickHouse/pull/6253) [\#6385](https://github.com/ClickHouse/ClickHouse/pull/6385) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлены пакеты RPM на веб-сайт. [\#6251](https://github.com/ClickHouse/ClickHouse/pull/6251) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте тест для фиксированного `Unknown identifier` исключение в `IN` раздел. [\#6708](https://github.com/ClickHouse/ClickHouse/pull/6708) ([Артем Зуйков](https://github.com/4ertus2)) -- Упрощать `shared_ptr_helper` потому что люди сталкиваются с трудностями понимания этого. [\#6675](https://github.com/ClickHouse/ClickHouse/pull/6675) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлены тесты производительности для исправленных кодеков Gorilla и DoubleDelta. [\#6179](https://github.com/ClickHouse/ClickHouse/pull/6179) ([Василий Немков](https://github.com/Enmk)) -- Разделите интеграционный тест `test_dictionaries` в 4 отдельных теста. [\#6776](https://github.com/ClickHouse/ClickHouse/pull/6776) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправить предупреждение PVS-Studio в `PipelineExecutor`. [\#6777](https://github.com/ClickHouse/ClickHouse/pull/6777) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Разрешить использовать `library` источник словаря с ASan. [\#6482](https://github.com/ClickHouse/ClickHouse/pull/6482) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена возможность генерировать список изменений из списка PR. [\#6350](https://github.com/ClickHouse/ClickHouse/pull/6350) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Заприте дверь `TinyLog` хранение при чтении. [\#6226](https://github.com/ClickHouse/ClickHouse/pull/6226) ([акузм](https://github.com/akuzm)) -- Проверить неработающие ссылки в ИЦ. [\#6634](https://github.com/ClickHouse/ClickHouse/pull/6634) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Увеличьте тайм-аут для «stack overflow» тест, потому что это может занять много времени в отладочной сборке. [\#6637](https://github.com/ClickHouse/ClickHouse/pull/6637) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена проверка на наличие двойных пробелов. [\#6643](https://github.com/ClickHouse/ClickHouse/pull/6643) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Чинить `new/delete` отслеживание памяти при сборке с помощью дезинфицирующих средств. Слежка не совсем ясна. Это только предотвращает исключения ограничения памяти в тестах. [\#6450](https://github.com/ClickHouse/ClickHouse/pull/6450) ([Артем Зуйков](https://github.com/4ertus2)) -- Включите обратную проверку неопределенных символов при связывании. [\#6453](https://github.com/ClickHouse/ClickHouse/pull/6453) ([Иван](https://github.com/abyss7)) -- Избежать восстановления `hyperscan` каждый день. [\#6307](https://github.com/ClickHouse/ClickHouse/pull/6307) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен отчет утилиты в `ProtobufWriter`. [\#6163](https://github.com/ClickHouse/ClickHouse/pull/6163) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Не разрешайте использовать query profiler с дезинфицирующими средствами, потому что он не совместим. [\#6769](https://github.com/ClickHouse/ClickHouse/pull/6769) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте тест для перезагрузки словаря после сбоя по таймеру. [\#6114](https://github.com/ClickHouse/ClickHouse/pull/6114) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправить несоответствие `PipelineExecutor::prepareProcessor` тип аргумента. [\#6494](https://github.com/ClickHouse/ClickHouse/pull/6494) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Добавлен тест на плохие URI. [\#6493](https://github.com/ClickHouse/ClickHouse/pull/6493) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлены дополнительные проверки для `CAST` функция. Это должно получить больше информации о неисправности сегментации в нечетком тесте. [\#6346](https://github.com/ClickHouse/ClickHouse/pull/6346) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Добавлен `gcc-9` поддержка для `docker/builder` контейнер, который создает образ локально. [\#6333](https://github.com/ClickHouse/ClickHouse/pull/6333) ([Глеб Новиков](https://github.com/NanoBjorn)) -- Тест для первичного ключа с помощью `LowCardinality(String)`. [\#5044](https://github.com/ClickHouse/ClickHouse/issues/5044) [\#6219](https://github.com/ClickHouse/ClickHouse/pull/6219) ([dimarub2000](https://github.com/dimarub2000)) -- Исправлены тесты, связанные с медленной печатью трассировок стека. [\#6315](https://github.com/ClickHouse/ClickHouse/pull/6315) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте тестовый случай для аварийного входа `groupUniqArray` фиксированной в [\#6029](https://github.com/ClickHouse/ClickHouse/pull/6029). [\#4402](https://github.com/ClickHouse/ClickHouse/issues/4402) [\#6129](https://github.com/ClickHouse/ClickHouse/pull/6129) ([акузм](https://github.com/akuzm)) -- Фиксированные индексы мутаций тестов. [\#6645](https://github.com/ClickHouse/ClickHouse/pull/6645) ([Никита Васильев](https://github.com/nikvas0)) -- В тесте производительности не считывайте журнал запросов для запросов, которые мы не выполняли. [\#6427](https://github.com/ClickHouse/ClickHouse/pull/6427) ([акузм](https://github.com/akuzm)) -- Материализованное представление теперь может быть создано с любыми типами низкой мощности независимо от настройки о подозрительных типах низкой мощности. [\#6428](https://github.com/ClickHouse/ClickHouse/pull/6428) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Обновленные тесты для `send_logs_level` установка. [\#6207](https://github.com/ClickHouse/ClickHouse/pull/6207) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена сборка под gcc-8.2. [\#6196](https://github.com/ClickHouse/ClickHouse/pull/6196) ([Макс Ахмедов](https://github.com/zlobober)) -- Исправлена сборка с помощью внутреннего libc++. [\#6724](https://github.com/ClickHouse/ClickHouse/pull/6724) ([Иван](https://github.com/abyss7)) -- Исправлена общая сборка с помощью `rdkafka` библиотека [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Иван](https://github.com/abyss7)) -- Исправления для сборки Mac OS (неполные). [\#6390](https://github.com/ClickHouse/ClickHouse/pull/6390) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) [\#6429](https://github.com/ClickHouse/ClickHouse/pull/6429) ([Алексей-Зайцев](https://github.com/alex-zaitsev)) -- Чинить «splitted» строить. [\#6618](https://github.com/ClickHouse/ClickHouse/pull/6618) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Другие исправления сборки: [\#6186](https://github.com/ClickHouse/ClickHouse/pull/6186) ([Амос Птица](https://github.com/amosbird)) [\#6486](https://github.com/ClickHouse/ClickHouse/pull/6486) [\#6348](https://github.com/ClickHouse/ClickHouse/pull/6348) ([vxider](https://github.com/Vxider)) [\#6744](https://github.com/ClickHouse/ClickHouse/pull/6744) ([Иван](https://github.com/abyss7)) [\#6016](https://github.com/ClickHouse/ClickHouse/pull/6016) [\#6421](https://github.com/ClickHouse/ClickHouse/pull/6421) [\#6491](https://github.com/ClickHouse/ClickHouse/pull/6491) ([proller](https://github.com/proller)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-3} - -- Удалены редко используемые функции таблица `catBoostPool` и хранение `CatBoostPool`. Если вы использовали эту функцию таблицы, пожалуйста, напишите письмо по адресу `clickhouse-feedback@yandex-team.com`. Обратите внимание, что интеграция CatBoost остается и будет поддерживаться. [\#6279](https://github.com/ClickHouse/ClickHouse/pull/6279) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Отключать `ANY RIGHT JOIN` и `ANY FULL JOIN` по умолчанию. Набор `any_join_distinct_right_table_keys` настройка для их включения. [\#5126](https://github.com/ClickHouse/ClickHouse/issues/5126) [\#6351](https://github.com/ClickHouse/ClickHouse/pull/6351) ([Артем Зуйков](https://github.com/4ertus2)) - -## ClickHouse релиз 19.13 {#clickhouse-release-19-13} - -### ClickHouse релиз 19.13.6.51, 2019-10-02 {#clickhouse-release-19-13-6-51-2019-10-02} - -#### Исправление ошибок {#bug-fix-9} - -- Этот релиз также содержит все исправления ошибок от 19.11.12.69. - -### ClickHouse релиз 19.13.5.44, 2019-09-20 {#clickhouse-release-19-13-5-44-2019-09-20} - -#### Исправление ошибок {#bug-fix-10} - -- Этот релиз также содержит все исправления ошибок от 19.14.6.12. -- Исправлено возможное несогласованное состояние таблицы при выполнении `DROP` запрос для реплицированной таблицы в то время как zookeeper недоступен. [\#6045](https://github.com/ClickHouse/ClickHouse/issues/6045) [\#6413](https://github.com/ClickHouse/ClickHouse/pull/6413) ([Никита Михайлов](https://github.com/nikitamikhaylov)) -- Исправление для гонки данных в StorageMerge [\#6717](https://github.com/ClickHouse/ClickHouse/pull/6717) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, появившаяся в профайлером запрос, который приводит к бесконечному приему от гнезда. [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) ([алесапин](https://github.com/alesapin)) -- Исправлена чрезмерная загрузка процессора во время выполнения `JSONExtractRaw` функция над логическим значением. [\#6208](https://github.com/ClickHouse/ClickHouse/pull/6208) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправляет регрессию при нажатии на материализованный вид. [\#6415](https://github.com/ClickHouse/ClickHouse/pull/6415) ([Иван](https://github.com/abyss7)) -- Табличная функция `url` если бы эта уязвимость позволяла злоумышленнику вводить произвольные HTTP-заголовки в запрос. Эта проблема была обнаружена [Никита Тихомиров](https://github.com/NSTikhomirov). [\#6466](https://github.com/ClickHouse/ClickHouse/pull/6466) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлять бесполезно `AST` проверьте установленный индекс. [\#6510](https://github.com/ClickHouse/ClickHouse/issues/6510) [\#6651](https://github.com/ClickHouse/ClickHouse/pull/6651) ([Никита Васильев](https://github.com/nikvas0)) -- Исправлен парсинг данных `AggregateFunction` значения, встроенные в запрос. [\#6575](https://github.com/ClickHouse/ClickHouse/issues/6575) [\#6773](https://github.com/ClickHouse/ClickHouse/pull/6773) ([Zhichang Ю](https://github.com/yuzhichang)) -- Исправлено неправильное поведение `trim` функции семьи. [\#6647](https://github.com/ClickHouse/ClickHouse/pull/6647) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.13.4.32, 2019-09-10 {#clickhouse-release-19-13-4-32-2019-09-10} - -#### Исправление ошибок {#bug-fix-11} - -- Этот релиз также содержит все исправления безопасности ошибок от 19.11.9.52 и 19.11.10.54. -- Фиксированная гонка данных в `system.parts` стол и `ALTER` запрос. [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245) [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено несовпадение заголовков в потоках, произошедшее при чтении из пустой распределенной таблицы с sample и prewhere. [\#6167](https://github.com/ClickHouse/ClickHouse/issues/6167) ([Лисян Цянь](https://github.com/fancyqlx)) [\#6823](https://github.com/ClickHouse/ClickHouse/pull/6823) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена ошибка при использовании `IN` предложение с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) -- Исправьте случай с одинаковыми именами столбцов в `GLOBAL JOIN ON` раздел. [\#6181](https://github.com/ClickHouse/ClickHouse/pull/6181) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка при приведении типов к `Decimal` это не поддерживает его. Вместо этого бросьте исключение. [\#6297](https://github.com/ClickHouse/ClickHouse/pull/6297) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлен сбой в работе `extractAll()` функция. [\#6644](https://github.com/ClickHouse/ClickHouse/pull/6644) ([Артем Зуйков](https://github.com/4ertus2)) -- Преобразование запроса для `MySQL`, `ODBC`, `JDBC` функции таблицы теперь работают правильно для `SELECT WHERE` запросы с несколькими `AND` выражения. [\#6381](https://github.com/ClickHouse/ClickHouse/issues/6381) [\#6676](https://github.com/ClickHouse/ClickHouse/pull/6676) ([dimarub2000](https://github.com/dimarub2000)) -- Добавлены предыдущие проверки деклараций для интеграции MySQL 8. [\#6569](https://github.com/ClickHouse/ClickHouse/pull/6569) ([Рафаэль Давид Тиноко](https://github.com/rafaeldtinoco)) - -#### Исправление безопасности {#security-fix-1} - -- Исправлены две уязвимости в кодеках на этапе декомпрессии (злоумышленник может сфабриковать сжатые данные, что приведет к переполнению буфера при декомпрессии). [\#6670](https://github.com/ClickHouse/ClickHouse/pull/6670) ([Артем Зуйков](https://github.com/4ertus2)) - -### ClickHouse релиз 19.13.3.26, 2019-08-22 {#clickhouse-release-19-13-3-26-2019-08-22} - -#### Исправление ошибок {#bug-fix-12} - -- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) -- Исправьте NPE при использовании предложения IN с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена проблема, что если устаревшая реплика становится живой, она все еще может содержать части данных, которые были удалены разделом DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена проблема с синтаксическим анализом CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена гонка данных в системе.таблица деталей и запрос ALTER. Это исправление [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена возможная потеря данных после этого `ALTER DELETE` запрос на таблицу с пропущенным индексом. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Никита Васильев](https://github.com/nikvas0)) - -#### Исправление безопасности {#security-fix-2} - -- Если злоумышленник имеет доступ на запись в ZooKeeper и может запустить пользовательский сервер, доступный из сети, где работает ClickHouse, он может создать пользовательский вредоносный сервер, который будет действовать как реплика ClickHouse, и зарегистрировать его в ZooKeeper. Когда другая реплика будет извлекать часть данных из вредоносной реплики, она может заставить clickhouse-сервер выполнить запись в произвольный путь на файловой системе. Найдено Эльдаром Зайтовым, специалистом по информационной безопасности Яндекса. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.13.2.19, 2019-08-14 {#clickhouse-release-19-13-2-19-2019-08-14} - -#### Новая функция {#new-feature-5} - -- Профилировщик выборки на уровне запроса. [Пример](https://gist.github.com/alexey-milovidov/92758583dd41c24c360fdb8d6a4da194). [\#4247](https://github.com/ClickHouse/ClickHouse/issues/4247) ([лаплаб](https://github.com/laplab)) [\#6124](https://github.com/ClickHouse/ClickHouse/pull/6124) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) [\#6250](https://github.com/ClickHouse/ClickHouse/pull/6250) [\#6283](https://github.com/ClickHouse/ClickHouse/pull/6283) [\#6386](https://github.com/ClickHouse/ClickHouse/pull/6386) -- Разрешить указывать список столбцов с помощью `COLUMNS('regexp')` выражение, которое работает как более сложный вариант `*` звездочка. [\#5951](https://github.com/ClickHouse/ClickHouse/pull/5951) ([мфриденталь](https://github.com/mfridental)), ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `CREATE TABLE AS table_function()` теперь возможный [\#6057](https://github.com/ClickHouse/ClickHouse/pull/6057) ([dimarub2000](https://github.com/dimarub2000)) -- Adam optimizer для стохастического градиентного спуска используется по умолчанию в `stochasticLinearRegression()` и `stochasticLogisticRegression()` агрегатные функции, потому что он показывает хорошее качество почти без какой-либо настройки. [\#6000](https://github.com/ClickHouse/ClickHouse/pull/6000) ([Quid37](https://github.com/Quid37)) -- Added functions for working with the сustom week number [\#5212](https://github.com/ClickHouse/ClickHouse/pull/5212) ([Энди Янг](https://github.com/andyyzh)) -- `RENAME` запросы теперь работают со всеми хранилищами. [\#5953](https://github.com/ClickHouse/ClickHouse/pull/5953) ([Иван](https://github.com/abyss7)) -- Теперь клиент получает журналы с сервера с любым желаемым уровнем настройки `send_logs_level` независимо от уровня журнала, указанного в настройках сервера. [\#5964](https://github.com/ClickHouse/ClickHouse/pull/5964) ([Никита Михайлов](https://github.com/nikitamikhaylov)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-4} - -- Установка `input_format_defaults_for_omitted_fields` по умолчанию он включен. Вставки в распределенные таблицы требуют, чтобы этот параметр был одинаковым в кластере (его необходимо установить перед развертыванием обновления). Он позволяет вычислять сложные выражения по умолчанию для пропущенных полей в `JSONEachRow` и `CSV*` форматы. Это должно быть ожидаемое поведение, но может привести к незначительной разнице в производительности. [\#6043](https://github.com/ClickHouse/ClickHouse/pull/6043) ([Артем Зуйков](https://github.com/4ertus2)), [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([акузм](https://github.com/akuzm)) - -#### Экспериментальная возможность {#experimental-features} - -- Новый конвейер обработки запросов. Воспользуйся `experimental_use_processors=1` возможность включить его. Используй для своих собственных проблем. [\#4914](https://github.com/ClickHouse/ClickHouse/pull/4914) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -#### Исправление ошибок {#bug-fix-13} - -- Интеграция Кафки была исправлена в этой версии. -- Исправлено `DoubleDelta` кодирование данных `Int64` для больших `DoubleDelta` значения, улучшенные `DoubleDelta` кодирование случайных данных для `Int32`. [\#5998](https://github.com/ClickHouse/ClickHouse/pull/5998) ([Василий Немков](https://github.com/Enmk)) -- Исправлена завышенная оценка стоимости `max_rows_to_read` если установка `merge_tree_uniform_read_distribution` имеет значение 0. [\#6019](https://github.com/ClickHouse/ClickHouse/pull/6019) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшение {#improvement-4} - -- Создает исключение, если `config.d` файл не имеет соответствующего корневого элемента в качестве файла конфигурации [\#6123](https://github.com/ClickHouse/ClickHouse/pull/6123) ([dimarub2000](https://github.com/dimarub2000)) - -#### Улучшение производительности {#performance-improvement-3} - -- Оптимизировать `count()`. Теперь он использует самый маленький столбец (если это возможно). [\#6028](https://github.com/ClickHouse/ClickHouse/pull/6028) ([Амос Птица](https://github.com/amosbird)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-5} - -- Отчет об использовании памяти в тестах производительности. [\#5899](https://github.com/ClickHouse/ClickHouse/pull/5899) ([акузм](https://github.com/akuzm)) -- Исправление построения с внешним `libcxx` [\#6010](https://github.com/ClickHouse/ClickHouse/pull/6010) ([Иван](https://github.com/abyss7)) -- Исправить общую сборку с помощью `rdkafka` библиотека [\#6101](https://github.com/ClickHouse/ClickHouse/pull/6101) ([Иван](https://github.com/abyss7)) - -## ClickHouse релиз 19.11 {#clickhouse-release-19-11} - -### ClickHouse релиз 19.11.13.74, 2019-11-01 {#clickhouse-release-19-11-13-74-2019-11-01} - -#### Исправление ошибок {#bug-fix-14} - -- Исправлена редкая авария в системе `ALTER MODIFY COLUMN` и вертикальное слияние, когда одна из Объединенных/измененных частей пуста (0 строк). [\#6780](https://github.com/ClickHouse/ClickHouse/pull/6780) ([алесапин](https://github.com/alesapin)) -- Ручное обновление данных `SIMDJSON`. Это устраняет возможные наводнения в stderr файлы с поддельными диагностическими сообщениями в формате JSON. [\#7548](https://github.com/ClickHouse/ClickHouse/pull/7548) ([Александр казаков](https://github.com/Akazz)) -- Исправлена ошибка с `mrk` расширение файла для мутаций ([алесапин](https://github.com/alesapin)) - -### ClickHouse релиз 19.11.12.69, 2019-10-02 {#clickhouse-release-19-11-12-69-2019-10-02} - -#### Исправление ошибок {#bug-fix-15} - -- Исправлено снижение производительности индексного анализа по сложным ключам на больших таблицах. Это исправление [\#6924](https://github.com/ClickHouse/ClickHouse/issues/6924). [\#7075](https://github.com/ClickHouse/ClickHouse/pull/7075) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте редких SIGSEGV при отправке данных в таблицах с распределенным движком (`Failed to send batch: file with index XXXXX is absent`). [\#7032](https://github.com/ClickHouse/ClickHouse/pull/7032) ([Азат Хужин](https://github.com/azat)) -- Чинить `Unknown identifier` с несколькими соединениями. Это исправление [\#5254](https://github.com/ClickHouse/ClickHouse/issues/5254). [\#7022](https://github.com/ClickHouse/ClickHouse/pull/7022) ([Артем Зуйков](https://github.com/4ertus2)) - -### ClickHouse релиз 19.11.11.57, 2019-09-13 {#clickhouse-release-19-11-11-57-2019-09-13} - -- Исправлена логическая ошибка, вызывающая segfaults при выборе из Кафки пустой темы. [\#6902](https://github.com/ClickHouse/ClickHouse/issues/6902) [\#6909](https://github.com/ClickHouse/ClickHouse/pull/6909) ([Иван](https://github.com/abyss7)) -- Исправление для функции `АrrayEnumerateUniqRanked` с пустыми массивами в парах. [\#6928](https://github.com/ClickHouse/ClickHouse/pull/6928) ([proller](https://github.com/proller)) - -### ClickHouse релиз 19.11.10.54, 2019-09-10 {#clickhouse-release-19-11-10-54-2019-09-10} - -#### Исправление ошибок {#bug-fix-16} - -- Сохраняйте смещения для сообщений Кафки вручную, чтобы иметь возможность фиксировать их все сразу для всех разделов. Исправляет потенциальное дублирование в «one consumer - many partitions» сценарий. [\#6872](https://github.com/ClickHouse/ClickHouse/pull/6872) ([Иван](https://github.com/abyss7)) - -### ClickHouse релиз 19.11.9.52, 2019-09-6 {#clickhouse-release-19-11-9-52-2019-09-6} - -- Улучшена обработка ошибок в словарях кэша. [\#6737](https://github.com/ClickHouse/ClickHouse/pull/6737) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлена ошибка в функции `arrayEnumerateUniqRanked`. [\#6779](https://github.com/ClickHouse/ClickHouse/pull/6779) ([proller](https://github.com/proller)) -- Чинить `JSONExtract` функция при извлечении `Tuple` из JSON. [\#6718](https://github.com/ClickHouse/ClickHouse/pull/6718) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлена возможная потеря данных после этого `ALTER DELETE` запрос на таблицу с пропущенным индексом. [\#6224](https://github.com/ClickHouse/ClickHouse/issues/6224) [\#6282](https://github.com/ClickHouse/ClickHouse/pull/6282) ([Никита Васильев](https://github.com/nikvas0)) -- Исправлена проверка производительности. [\#6392](https://github.com/ClickHouse/ClickHouse/pull/6392) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Паркет: исправьте чтение логических столбцов. [\#6579](https://github.com/ClickHouse/ClickHouse/pull/6579) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено неправильное поведение `nullIf` функция для постоянных аргументов. [\#6518](https://github.com/ClickHouse/ClickHouse/pull/6518) ([Гийом Тассери](https://github.com/YiuRULE)) [\#6580](https://github.com/ClickHouse/ClickHouse/pull/6580) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена проблема дублирования сообщений Кафки при обычном перезапуске сервера. [\#6597](https://github.com/ClickHouse/ClickHouse/pull/6597) ([Иван](https://github.com/abyss7)) -- Исправлена ошибка, когда долго `ALTER UPDATE` или `ALTER DELETE` может помешать запуску регулярных слияний. Предотвратите выполнение мутаций, если нет достаточного количества свободных потоков. [\#6502](https://github.com/ClickHouse/ClickHouse/issues/6502) [\#6617](https://github.com/ClickHouse/ClickHouse/pull/6617) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена ошибка при обработке данных «timezone» в файле конфигурации сервера. [\#6709](https://github.com/ClickHouse/ClickHouse/pull/6709) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте тесты Кафки. [\#6805](https://github.com/ClickHouse/ClickHouse/pull/6805) ([Иван](https://github.com/abyss7)) - -#### Исправление безопасности {#security-fix-3} - -- Если злоумышленник имеет доступ на запись в ZooKeeper и может запустить пользовательский сервер, доступный из сети, где работает ClickHouse, он может создать пользовательский вредоносный сервер, который будет действовать как реплика ClickHouse, и зарегистрировать его в ZooKeeper. Когда другая реплика будет извлекать часть данных из вредоносной реплики, она может заставить clickhouse-сервер выполнить запись в произвольный путь на файловой системе. Найдено Эльдаром Зайтовым, специалистом по информационной безопасности Яндекса. [\#6247](https://github.com/ClickHouse/ClickHouse/pull/6247) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.11.8.46, 2019-08-22 {#clickhouse-release-19-11-8-46-2019-08-22} - -#### Исправление ошибок {#bug-fix-17} - -- Чинить `ALTER TABLE ... UPDATE` запрос для таблиц с `enable_mixed_granularity_parts=1`. [\#6543](https://github.com/ClickHouse/ClickHouse/pull/6543) ([алесапин](https://github.com/alesapin)) -- Исправьте NPE при использовании предложения IN с подзапросом с кортежем. [\#6125](https://github.com/ClickHouse/ClickHouse/issues/6125) [\#6550](https://github.com/ClickHouse/ClickHouse/pull/6550) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена проблема, что если устаревшая реплика становится живой, она все еще может содержать части данных, которые были удалены разделом DROP. [\#6522](https://github.com/ClickHouse/ClickHouse/issues/6522) [\#6523](https://github.com/ClickHouse/ClickHouse/pull/6523) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена проблема с синтаксическим анализом CSV [\#6426](https://github.com/ClickHouse/ClickHouse/issues/6426) [\#6559](https://github.com/ClickHouse/ClickHouse/pull/6559) ([тавплубикс](https://github.com/tavplubix)) -- Исправлена гонка данных в системе.таблица деталей и запрос ALTER. Это исправление [\#6245](https://github.com/ClickHouse/ClickHouse/issues/6245). [\#6513](https://github.com/ClickHouse/ClickHouse/pull/6513) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен неправильный код в мутациях, которые могут привести к повреждению памяти. Исправлена обработка выхода онлайн / оффлайн чтения адреса `0x14c0` это может произойти из-за совпадения `DROP TABLE` и `SELECT` от `system.parts` или `system.parts_columns`. Фиксированное состояние расы при подготовке запросов мутаций. Исправлена тупиковая ситуация, вызванная `OPTIMIZE` реплицированных таблиц и параллельных операций модификации, таких как ALTERs. [\#6514](https://github.com/ClickHouse/ClickHouse/pull/6514) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.11.7.40, 2019-08-14 {#clickhouse-release-19-11-7-40-2019-08-14} - -#### Исправление ошибок {#bug-fix-18} - -- Интеграция Кафки была исправлена в этой версии. -- Исправлена обработка выхода онлайн / оффлайн при использовании `arrayReduce` для постоянных споров. [\#6326](https://github.com/ClickHouse/ClickHouse/pull/6326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено `toFloat()` монотонность. [\#6374](https://github.com/ClickHouse/ClickHouse/pull/6374) ([dimarub2000](https://github.com/dimarub2000)) -- Исправлена обработка выхода онлайн / оффлайн с поддержкой `optimize_skip_unused_shards` и пропал ключ от осколков. [\#6384](https://github.com/ClickHouse/ClickHouse/pull/6384) ([CurtizJ](https://github.com/CurtizJ)) -- Фиксированная логика работы `arrayEnumerateUniqRanked` функция. [\#6423](https://github.com/ClickHouse/ClickHouse/pull/6423) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Удалено дополнительное подробное ведение журнала из обработчика MySQL. [\#6389](https://github.com/ClickHouse/ClickHouse/pull/6389) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить неправильное поведение и возможные вылеты в `topK` и `topKWeighted` агрегированные функции. [\#6404](https://github.com/ClickHouse/ClickHouse/pull/6404) ([CurtizJ](https://github.com/CurtizJ)) -- Не выставляйте виртуальные столбцы в `system.columns` стол. Это необходимо для обратной совместимости. [\#6406](https://github.com/ClickHouse/ClickHouse/pull/6406) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка с выделением памяти для строковых полей в сложном словаре кэша ключей. [\#6447](https://github.com/ClickHouse/ClickHouse/pull/6447) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка с включением адаптивной детализации при создании новой реплики для `Replicated*MergeTree` стол. [\#6452](https://github.com/ClickHouse/ClickHouse/pull/6452) ([алесапин](https://github.com/alesapin)) -- Исправьте бесконечный цикл при чтении сообщений Кафки. [\#6354](https://github.com/ClickHouse/ClickHouse/pull/6354) ([abyss7](https://github.com/abyss7)) -- Исправлена возможность готовых запрос, чтобы вызвать падение сервера из-за переполнения стека в парсер SQL и возможность переполнения стека в `Merge` и `Distributed` таблицы [\#6433](https://github.com/ClickHouse/ClickHouse/pull/6433) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка кодирования горилл на небольших последовательностях. [\#6444](https://github.com/ClickHouse/ClickHouse/pull/6444) ([Enmk](https://github.com/Enmk)) - -#### Улучшение {#improvement-5} - -- Позволяет пользователю переопределить `poll_interval` и `idle_connection_timeout` настройки при подключении. [\#6230](https://github.com/ClickHouse/ClickHouse/pull/6230) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.11.5.28, 2019-08-05 {#clickhouse-release-19-11-5-28-2019-08-05} - -#### Исправление ошибок {#bug-fix-19} - -- Исправлена возможность зависания запросов при перегрузке сервера. [\#6301](https://github.com/ClickHouse/ClickHouse/pull/6301) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте FPE в функции yandexConsistentHash. Это исправление [\#6304](https://github.com/ClickHouse/ClickHouse/issues/6304). [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка в преобразовании `LowCardinality` напечатать `AggregateFunctionFactory`. Это исправление [\#6257](https://github.com/ClickHouse/ClickHouse/issues/6257). [\#6281](https://github.com/ClickHouse/ClickHouse/pull/6281) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправьте синтаксический анализ `bool` настройки от `true` и `false` строки в файлах конфигурации. [\#6278](https://github.com/ClickHouse/ClickHouse/pull/6278) ([алесапин](https://github.com/alesapin)) -- Исправлена редкая ошибка с несовместимыми заголовками потока в запросах к `Distributed` стол `MergeTree` таблица, когда часть `WHERE` движется к `PREWHERE`. [\#6236](https://github.com/ClickHouse/ClickHouse/pull/6236) ([алесапин](https://github.com/alesapin)) -- Исправлено переполнение при целочисленном делении знакового типа на беззнаковый. Это исправление [\#6214](https://github.com/ClickHouse/ClickHouse/issues/6214). [\#6233](https://github.com/ClickHouse/ClickHouse/pull/6233) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-5} - -- `Kafka` все еще сломанный. - -### ClickHouse релиз 19.11.4.24, 2019-08-01 {#clickhouse-release-19-11-4-24-2019-08-01} - -#### Исправление ошибок {#bug-fix-20} - -- Исправлена ошибка с написанием вторичных индексных меток с адаптивной детализацией. [\#6126](https://github.com/ClickHouse/ClickHouse/pull/6126) ([алесапин](https://github.com/alesapin)) -- Чинить `WITH ROLLUP` и `WITH CUBE` модификаторы `GROUP BY` с двухуровневой агрегацией. [\#6225](https://github.com/ClickHouse/ClickHouse/pull/6225) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлено зависание `JSONExtractRaw` функция. Исправлено [\#6195](https://github.com/ClickHouse/ClickHouse/issues/6195) [\#6198](https://github.com/ClickHouse/ClickHouse/pull/6198) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка segfault в ExternalLoader:: reloadOutdated (). [\#6082](https://github.com/ClickHouse/ClickHouse/pull/6082) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлен случай, когда сервер может закрыть прослушивающие сокеты, но не выключаться и продолжать обслуживать оставшиеся запросы. В конечном итоге вы можете получить два запущенных процесса clickhouse-server. Иногда сервер может выдать ошибку `bad_function_call` для остальных запросов. [\#6231](https://github.com/ClickHouse/ClickHouse/pull/6231) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено бесполезное и некорректное условие на поле update для начальной загрузки внешних словарей через ODBC, MySQL, ClickHouse и HTTP. Это исправление [\#6069](https://github.com/ClickHouse/ClickHouse/issues/6069) [\#6083](https://github.com/ClickHouse/ClickHouse/pull/6083) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено неуместное исключение в приведении `LowCardinality(Nullable)` to not-Nullable column in case if it doesn't contain Nulls (e.g. in query like `SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String)`. [\#6094](https://github.com/ClickHouse/ClickHouse/issues/6094) [\#6119](https://github.com/ClickHouse/ClickHouse/pull/6119) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Зафиксировать недетерминированный результат «uniq» агрегатная функция в крайне редких случаях. Ошибка присутствовала во всех версиях ClickHouse. [\#6058](https://github.com/ClickHouse/ClickHouse/pull/6058) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Обработка выхода онлайн / оффлайн, когда мы немного завышены запись на функцию `IPv6CIDRToRange`. [\#6068](https://github.com/ClickHouse/ClickHouse/pull/6068) ([Гийом Тассери](https://github.com/YiuRULE)) -- Исправлена небольшая утечка памяти, когда сервер выбрасывает много исключений из многих различных контекстов. [\#6144](https://github.com/ClickHouse/ClickHouse/pull/6144) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте ситуацию, когда потребитель сделал паузу перед подпиской и не возобновил ее после этого. [\#6075](https://github.com/ClickHouse/ClickHouse/pull/6075) ([Иван](https://github.com/abyss7)) Обратите внимание, что Кафка разбит в этой версии. -- Очистка буфера данных Кафки от предыдущей операции чтения, которая была завершена с ошибкой [\#6026](https://github.com/ClickHouse/ClickHouse/pull/6026) ([Николай](https://github.com/bopohaa)) Обратите внимание, что Кафка разбит в этой версии. -- С `StorageMergeTree::background_task_handle` инициализируется в `startup()` то `MergeTreeBlockOutputStream::write()` возможно, вы попытаетесь использовать его перед инициализацией. Просто проверьте, инициализирован ли он. [\#6080](https://github.com/ClickHouse/ClickHouse/pull/6080) ([Иван](https://github.com/abyss7)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-6} - -- Добавлено официальное лицо `rpm` пакеты. [\#5740](https://github.com/ClickHouse/ClickHouse/pull/5740) ([proller](https://github.com/proller)) ([алесапин](https://github.com/alesapin)) -- Добавьте возможность строить `.rpm` и `.tgz` пакеты с `packager` скрипт. [\#5769](https://github.com/ClickHouse/ClickHouse/pull/5769) ([алесапин](https://github.com/alesapin)) -- Исправления для «Arcadia» система сборки. [\#6223](https://github.com/ClickHouse/ClickHouse/pull/6223) ([proller](https://github.com/proller)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-6} - -- `Kafka` сломан в этой версии. - -### ClickHouse релиз 19.11.3.11, 2019-07-18 {#clickhouse-release-19-11-3-11-2019-07-18} - -#### Новая функция {#new-feature-6} - -- Добавлена поддержка подготовленных заявлений. [\#5331](https://github.com/ClickHouse/ClickHouse/pull/5331/) ([Александр](https://github.com/sanych73)) [\#5630](https://github.com/ClickHouse/ClickHouse/pull/5630) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `DoubleDelta` и `Gorilla` кодеки столбцов [\#5600](https://github.com/ClickHouse/ClickHouse/pull/5600) ([Василий Немков](https://github.com/Enmk)) -- Добавлен `os_thread_priority` настройка, позволяющая контролировать «nice» значение потоков обработки запросов, используемых ОС для настройки приоритета динамического планирования. Для этого требуется `CAP_SYS_NICE` возможности для работы. Это орудия труда [\#5858](https://github.com/ClickHouse/ClickHouse/issues/5858) [\#5909](https://github.com/ClickHouse/ClickHouse/pull/5909) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Осуществлять `_topic`, `_offset`, `_key` колонны для двигателя Кафки [\#5382](https://github.com/ClickHouse/ClickHouse/pull/5382) ([Иван](https://github.com/abyss7)) Обратите внимание, что Кафка разбит в этой версии. -- Добавить комбинатор агрегатных функций `-Resample` [\#5590](https://github.com/ClickHouse/ClickHouse/pull/5590) ([hcz](https://github.com/hczhcz)) -- Статистическая функция `groupArrayMovingSum(win_size)(x)` и `groupArrayMovingAvg(win_size)(x)`, которые вычисляют движущуюся сумму / среднее значение с ограничением размера окна или без него. [\#5595](https://github.com/ClickHouse/ClickHouse/pull/5595) ([inv2004](https://github.com/inv2004)) -- Добавить синоним `arrayFlatten` \<-\> `flatten` [\#5764](https://github.com/ClickHouse/ClickHouse/pull/5764) ([hcz](https://github.com/hczhcz)) -- Функция сделала из intergate Н3 `geoToH3` от Uber. [\#4724](https://github.com/ClickHouse/ClickHouse/pull/4724) ([Ремен Иван](https://github.com/BHYCHIK)) [\#5805](https://github.com/ClickHouse/ClickHouse/pull/5805) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Исправление ошибок {#bug-fix-21} - -- Реализуйте кэш DNS с асинхронным обновлением. Отдельный поток разрешает все хосты и обновляет кэш DNS с периодом (настройка `dns_cache_update_period`). Это должно помочь, когда ip хостов часто меняется. [\#5857](https://github.com/ClickHouse/ClickHouse/pull/5857) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена обработка выхода онлайн / оффлайн в `Delta` кодек, который влияет на столбцы со значениями размером менее 32 бит. Ошибка привела к случайному повреждению памяти. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([алесапин](https://github.com/alesapin)) -- Исправлена обработка выхода онлайн / оффлайн в TTL слиться с не-физической столбцов в блоке. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена редкая ошибка при проверке деталей с помощью `LowCardinality` колонка. Ранее `checkDataPart` всегда терпит неудачу при расставании с `LowCardinality` колонка. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([алесапин](https://github.com/alesapin)) -- Избегайте зависания соединений, когда пул потоков сервера заполнен. Это важно для соединений от `remote` табличная функция или соединения с сегментом без реплик при длительном таймауте соединения. Это исправление [\#5878](https://github.com/ClickHouse/ClickHouse/issues/5878) [\#5881](https://github.com/ClickHouse/ClickHouse/pull/5881) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Поддержка постоянных аргументов для того, чтобы `evalMLModel` функция. Это исправление [\#5817](https://github.com/ClickHouse/ClickHouse/issues/5817) [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, когда ClickHouse определяет часовой пояс по умолчанию как `UCT` вместо `UTC`. Это исправление [\#5804](https://github.com/ClickHouse/ClickHouse/issues/5804). [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированный нижний поток буфера в `visitParamExtractRaw`. Это исправление [\#5901](https://github.com/ClickHouse/ClickHouse/issues/5901) [\#5902](https://github.com/ClickHouse/ClickHouse/pull/5902) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Сейчас распространены `DROP/ALTER/TRUNCATE/OPTIMIZE ON CLUSTER` запросы будут выполняться непосредственно на реплике лидера. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([алесапин](https://github.com/alesapin)) -- Чинить `coalesce` для `ColumnConst` с `ColumnNullable` + соответствующие изменения. [\#5755](https://github.com/ClickHouse/ClickHouse/pull/5755) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправьте это `ReadBufferFromKafkaConsumer` так что он продолжает читать новые сообщения после этого `commit()` даже если он был остановлен раньше [\#5852](https://github.com/ClickHouse/ClickHouse/pull/5852) ([Иван](https://github.com/abyss7)) -- Чинить `FULL` и `RIGHT` Результаты соединения при присоединении на `Nullable` ключи в правой таблице. [\#5859](https://github.com/ClickHouse/ClickHouse/pull/5859) ([Артем Зуйков](https://github.com/4ertus2)) -- Возможно исправление бесконечного сна низкоприоритетных запросов. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено состояние гонки, которое приводит к тому, что некоторые запросы могут не отображаться в query\_log после `SYSTEM FLUSH LOGS` запрос. [\#5456](https://github.com/ClickHouse/ClickHouse/issues/5456) [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлено `heap-use-after-free` Предупреждение ASan в ClusterCopier вызвано часами, которые пытаются использовать уже удаленный объект copier. [\#5871](https://github.com/ClickHouse/ClickHouse/pull/5871) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена ошибка `StringRef` указатель, возвращаемый некоторыми реализациями `IColumn::deserializeAndInsertFromArena`. Эта ошибка затронула только модульные тесты. [\#5973](https://github.com/ClickHouse/ClickHouse/pull/5973) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Не допускайте соединения исходного и промежуточного массива со столбцами маскировки одноименных столбцов. [\#5941](https://github.com/ClickHouse/ClickHouse/pull/5941) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка вставки и выбора запроса к движку MySQL с цитированием идентификатора стиля MySQL. [\#5704](https://github.com/ClickHouse/ClickHouse/pull/5704) ([Зимний Чжан](https://github.com/zhang2014)) -- Сейчас `CHECK TABLE` запрос может работать с семейством движков MergeTree. Он возвращает статус проверки и сообщение, если таковые имеются для каждой детали (или файла в случае более простых движков). Кроме того, Исправлена ошибка в извлечении сломанной детали. [\#5865](https://github.com/ClickHouse/ClickHouse/pull/5865) ([алесапин](https://github.com/alesapin)) -- Исправлена среда выполнения SPLIT\_SHARED\_LIBRARIES [\#5793](https://github.com/ClickHouse/ClickHouse/pull/5793) ([Данила Кутенин](https://github.com/danlark1)) -- Инициализация фиксированного часового пояса когда `/etc/localtime` это относительная ссылка, как `../usr/share/zoneinfo/Europe/Moscow` [\#5922](https://github.com/ClickHouse/ClickHouse/pull/5922) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- clickhouse-копир: исправлена использования после освобождения при завершении работы [\#5752](https://github.com/ClickHouse/ClickHouse/pull/5752) ([proller](https://github.com/proller)) -- Обновленный `simdjson`. Исправлена проблема, из-за которой некоторые недопустимые JSONs с нулевыми байтами успешно разбирались. [\#5938](https://github.com/ClickHouse/ClickHouse/pull/5938) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено отключение системных журналов [\#5802](https://github.com/ClickHouse/ClickHouse/pull/5802) ([Антон Попов](https://github.com/CurtizJ)) -- Исправьте зависание, когда условие в invalidate\_query зависит от словаря. [\#6011](https://github.com/ClickHouse/ClickHouse/pull/6011) ([Виталий Баранов](https://github.com/vitlibar)) - -#### Улучшение {#improvement-6} - -- Разрешить неразрешимые адреса в конфигурации кластера. Они будут считаться недоступными и пытаться разрешить их при каждой попытке подключения. Это особенно полезно для Kubernetes. Это исправление [\#5714](https://github.com/ClickHouse/ClickHouse/issues/5714) [\#5924](https://github.com/ClickHouse/ClickHouse/pull/5924) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Закройте неработающие TCP-соединения (по умолчанию с таймаутом в один час). Это особенно важно для больших кластеров с несколькими распределенными таблицами на каждом сервере, поскольку каждый сервер может содержать пул соединений с каждым другим сервером, и после пикового параллелизма запросов соединения будут останавливаться. Это исправление [\#5879](https://github.com/ClickHouse/ClickHouse/issues/5879) [\#5880](https://github.com/ClickHouse/ClickHouse/pull/5880) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Более лучшее качество `topK` функция. Изменено поведение набора SavingSpace для удаления последнего элемента, если новый элемент имеет больший вес. [\#5833](https://github.com/ClickHouse/ClickHouse/issues/5833) [\#5850](https://github.com/ClickHouse/ClickHouse/pull/5850) ([Гийом Тассери](https://github.com/YiuRULE)) -- Функции URL для работы с доменами теперь могут работать для неполных url без схемы [\#5725](https://github.com/ClickHouse/ClickHouse/pull/5725) ([алесапин](https://github.com/alesapin)) -- Контрольные суммы, добавленные к `system.parts_columns` стол. [\#5874](https://github.com/ClickHouse/ClickHouse/pull/5874) ([Никита Михайлов](https://github.com/nikitamikhaylov)) -- Добавлен `Enum` тип данных как синоним для `Enum8` или `Enum16`. [\#5886](https://github.com/ClickHouse/ClickHouse/pull/5886) ([dimarub2000](https://github.com/dimarub2000)) -- Полный вариант транспонирования битов для `T64` кодек. Может привести к лучшему сжатию с помощью `zstd`. [\#5742](https://github.com/ClickHouse/ClickHouse/pull/5742) ([Артем Зуйков](https://github.com/4ertus2)) -- Состояние на `startsWith` функция теперь может использовать первичный ключ. Это исправление [\#5310](https://github.com/ClickHouse/ClickHouse/issues/5310) и [\#5882](https://github.com/ClickHouse/ClickHouse/issues/5882) [\#5919](https://github.com/ClickHouse/ClickHouse/pull/5919) ([dimarub2000](https://github.com/dimarub2000)) -- Разрешить использовать `clickhouse-copier` с перекрестной репликацией кластерной топологии, разрешив пустое имя базы данных. [\#5745](https://github.com/ClickHouse/ClickHouse/pull/5745) ([nvartolomei](https://github.com/nvartolomei)) -- Воспользуйся `UTC` как часовой пояс по умолчанию в системе без `tzdata` (e.g. bare Docker container). Before this patch, error message `Could not determine local time zone` была напечатана, и сервер или клиент отказались запускаться. [\#5827](https://github.com/ClickHouse/ClickHouse/pull/5827) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Возвращенная назад поддержка аргумента с плавающей запятой в функции `quantileTiming` для обратной совместимости. [\#5911](https://github.com/ClickHouse/ClickHouse/pull/5911) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Показать, в какой таблице отсутствует столбец в сообщениях об ошибках. [\#5768](https://github.com/ClickHouse/ClickHouse/pull/5768) ([Иван](https://github.com/abyss7)) -- Запретить выполнение запроса с одинаковым идентификатором query\_id разными пользователями [\#5430](https://github.com/ClickHouse/ClickHouse/pull/5430) ([proller](https://github.com/proller)) -- Более надежный код для отправки метрик в графит. Он будет работать даже во время длительного многократного использования `RENAME TABLE` операция. [\#5875](https://github.com/ClickHouse/ClickHouse/pull/5875) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Более информативные сообщения об ошибках будут отображаться, когда ThreadPool не может запланировать выполнение задачи. Это исправление [\#5305](https://github.com/ClickHouse/ClickHouse/issues/5305) [\#5801](https://github.com/ClickHouse/ClickHouse/pull/5801) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Инвертирование ngramSearch, чтобы быть более интуитивным [\#5807](https://github.com/ClickHouse/ClickHouse/pull/5807) ([Данила Кутенин](https://github.com/danlark1)) -- Добавить пользователя parsing в HDFS engine builder [\#5946](https://github.com/ClickHouse/ClickHouse/pull/5946) ([аконяев90](https://github.com/akonyaev90)) -- Обновить значение по умолчанию `max_ast_elements parameter` [\#5933](https://github.com/ClickHouse/ClickHouse/pull/5933) ([Артем Коновалов](https://github.com/izebit)) -- Добавлено понятие устаревших настроек. Устаревшая настройка `allow_experimental_low_cardinality_type` может использоваться без какого-либо эффекта. [0f15c01c6802f7ce1a1494c12c846be8c98944cd](https://github.com/ClickHouse/ClickHouse/commit/0f15c01c6802f7ce1a1494c12c846be8c98944cd) [Алексей Миловидов](https://github.com/alexey-milovidov) - -#### Улучшение производительности {#performance-improvement-4} - -- Увеличьте количество потоков для выбора из таблицы слияния для более равномерного распределения потоков. Добавлена настройка `max_streams_multiplier_for_merge_tables`. Это исправление [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5915](https://github.com/ClickHouse/ClickHouse/pull/5915) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-7} - -- Добавьте тест обратной совместимости для взаимодействия клиент-сервер с различными версиями clickhouse. [\#5868](https://github.com/ClickHouse/ClickHouse/pull/5868) ([алесапин](https://github.com/alesapin)) -- Проверьте информацию о покрытии в каждом запросе фиксации и вытягивания. [\#5896](https://github.com/ClickHouse/ClickHouse/pull/5896) ([алесапин](https://github.com/alesapin)) -- Сотрудничайте с address sanitizer для поддержки наших пользовательских распределителей (`Arena` и `ArenaWithFreeLists`) для лучшей отладки «use-after-free» ошибки. [\#5728](https://github.com/ClickHouse/ClickHouse/pull/5728) ([акузм](https://github.com/akuzm)) -- Переключитесь на [Реализация LLVM libunwind](https://github.com/llvm-mirror/libunwind) для обработки исключений C++ и для печати трассировок стека [\#4828](https://github.com/ClickHouse/ClickHouse/pull/4828) ([Никита Лапков](https://github.com/laplab)) -- Добавьте еще два предупреждения от -Weverything [\#5923](https://github.com/ClickHouse/ClickHouse/pull/5923) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Разрешите построить ClickHouse с дезинфицирующим средством для памяти. [\#3949](https://github.com/ClickHouse/ClickHouse/pull/3949) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен отчет утилиты о `bitTest` функция в тест. [\#5943](https://github.com/ClickHouse/ClickHouse/pull/5943) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Настройки: добавлена возможность инициализации экземпляра ClickHouse, который требует проверки подлинности. [\#5727](https://github.com/ClickHouse/ClickHouse/pull/5727) ([Корвяков Андрей Николаевич](https://github.com/shurshun)) -- Librdkafka обновление до версии 1.1.0 [\#5872](https://github.com/ClickHouse/ClickHouse/pull/5872) ([Иван](https://github.com/abyss7)) -- Добавьте глобальный тайм-аут для интеграционных тестов и отключите некоторые из них в коде тестов. [\#5741](https://github.com/ClickHouse/ClickHouse/pull/5741) ([алесапин](https://github.com/alesapin)) -- Исправьте некоторые сбои ThreadSanitizer. [\#5854](https://github.com/ClickHouse/ClickHouse/pull/5854) ([акузм](https://github.com/akuzm)) -- То `--no-undefined` опция заставляет компоновщика проверять все внешние имена на наличие во время связывания. Очень полезно отслеживать реальные зависимости между библиотеками в режиме разделенной сборки. [\#5855](https://github.com/ClickHouse/ClickHouse/pull/5855) ([Иван](https://github.com/abyss7)) -- Добавлен тест производительности для [\#5797](https://github.com/ClickHouse/ClickHouse/issues/5797) [\#5914](https://github.com/ClickHouse/ClickHouse/pull/5914) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена совместимость с gcc-7. [\#5840](https://github.com/ClickHouse/ClickHouse/pull/5840) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена поддержка gcc-9. Это исправление [\#5717](https://github.com/ClickHouse/ClickHouse/issues/5717) [\#5774](https://github.com/ClickHouse/ClickHouse/pull/5774) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, когда libunwind может быть связан неправильно. [\#5948](https://github.com/ClickHouse/ClickHouse/pull/5948) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено несколько предупреждений, найденных PVS-Studio. [\#5921](https://github.com/ClickHouse/ClickHouse/pull/5921) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена начальная поддержка для `clang-tidy` статический анализатор. [\#5806](https://github.com/ClickHouse/ClickHouse/pull/5806) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Преобразование макросов BSD / Linux endian( ‘be64toh’ и ‘htobe64’) к эквивалентам Mac OS X [\#5785](https://github.com/ClickHouse/ClickHouse/pull/5785) ([Фу Чен](https://github.com/fredchenbj)) -- Улучшенное руководство по интеграционным тестам. [\#5796](https://github.com/ClickHouse/ClickHouse/pull/5796) ([Владимир Чеботарев](https://github.com/excitoon)) -- Исправление сборки на macosx + gcc9 [\#5822](https://github.com/ClickHouse/ClickHouse/pull/5822) ([Филимонов](https://github.com/filimonov)) -- Исправьте трудноуловимую опечатку: aggreAGte - \> aggregate. [\#5753](https://github.com/ClickHouse/ClickHouse/pull/5753) ([акузм](https://github.com/akuzm)) -- Исправлена сборка freebsd [\#5760](https://github.com/ClickHouse/ClickHouse/pull/5760) ([proller](https://github.com/proller)) -- Добавить ссылку на экспериментальный канал YouTube на сайт [\#5845](https://github.com/ClickHouse/ClickHouse/pull/5845) ([Иван Блинков](https://github.com/blinkov)) -- CMake: добавить опцию для флагов покрытия: WITH\_COVERAGE [\#5776](https://github.com/ClickHouse/ClickHouse/pull/5776) ([proller](https://github.com/proller)) -- Исправьте начальный размер некоторых встроенных подарков. [\#5787](https://github.com/ClickHouse/ClickHouse/pull/5787) ([акузм](https://github.com/akuzm)) -- clickhouse-сервер.postinst: исправлено обнаружение ОС для centos 6 [\#5788](https://github.com/ClickHouse/ClickHouse/pull/5788) ([proller](https://github.com/proller)) -- Добавлена генерация пакетов Arch linux. [\#5719](https://github.com/ClickHouse/ClickHouse/pull/5719) ([Владимир Чеботарев](https://github.com/excitoon)) -- Разделите Common / config.ч по библиотекам (СУБД) [\#5715](https://github.com/ClickHouse/ClickHouse/pull/5715) ([proller](https://github.com/proller)) -- Исправления для «Arcadia» построить платформу [\#5795](https://github.com/ClickHouse/ClickHouse/pull/5795) ([proller](https://github.com/proller)) -- Исправления для нетрадиционной сборки (gcc9, без подмодулей) [\#5792](https://github.com/ClickHouse/ClickHouse/pull/5792) ([proller](https://github.com/proller)) -- Требуется явный тип в unalignedStore, потому что было доказано, что он подвержен ошибкам [\#5791](https://github.com/ClickHouse/ClickHouse/pull/5791) ([акузм](https://github.com/akuzm)) -- Исправлена сборка MacOS [\#5830](https://github.com/ClickHouse/ClickHouse/pull/5830) ([Филимонов](https://github.com/filimonov)) -- Тест производительности относительно новой функции JIT с большим набором данных, как это было запрошено здесь [\#5263](https://github.com/ClickHouse/ClickHouse/issues/5263) [\#5887](https://github.com/ClickHouse/ClickHouse/pull/5887) ([Гийом Тассери](https://github.com/YiuRULE)) -- Запуск статических тестов в стресс-тесте [12693e568722f11e19859742f56428455501fd2a](https://github.com/ClickHouse/ClickHouse/commit/12693e568722f11e19859742f56428455501fd2a) ([алесапин](https://github.com/alesapin)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-7} - -- `Kafka` сломан в этой версии. -- Включить `adaptive_index_granularity` = 10 МБ по умолчанию для новых `MergeTree` таблицы. Если вы создали новые таблицы MergeTree на версии 19.11+, понижение рейтинга до версий до 19.6 будет невозможно. [\#5628](https://github.com/ClickHouse/ClickHouse/pull/5628) ([алесапин](https://github.com/alesapin)) -- Удалены устаревшие недокументированные встроенные словари, которые использовались Яндексом.Метрика. Функция `OSIn`, `SEIn`, `OSToRoot`, `SEToRoot`, `OSHierarchy`, `SEHierarchy` они больше не доступны. Если вы используете эти функции, напишите письмо по адресу clickhouse-feedback@yandex-team.com Примечание: в последний момент мы решили сохранить эти функции на некоторое время. [\#5780](https://github.com/ClickHouse/ClickHouse/pull/5780) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -## ClickHouse релиз 19.10 {#clickhouse-release-19-10} - -### ClickHouse релиз 19.10.1.5, 2019-07-12 {#clickhouse-release-19-10-1-5-2019-07-12} - -#### Новая функция {#new-feature-7} - -- Добавить новый кодек столбца: `T64`. Сделано для(U)столбцов IntX/EnumX/Data (Time)/DecimalX. Это должно быть хорошо для столбцов с постоянными или малыми значениями диапазона. Сам кодек позволяет увеличить или уменьшить тип данных без повторного сжатия. [\#5557](https://github.com/ClickHouse/ClickHouse/pull/5557) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавление СУБД `MySQL` что позволяет просматривать все таблицы на удаленном сервере MySQL [\#5599](https://github.com/ClickHouse/ClickHouse/pull/5599) ([Зимний Чжан](https://github.com/zhang2014)) -- `bitmapContains` реализация. Это в 2 раза быстрее, чем `bitmapHasAny` если второе растровое изображение содержит один элемент. [\#5535](https://github.com/ClickHouse/ClickHouse/pull/5535) ([Zhichang Ю](https://github.com/yuzhichang)) -- Поддержка `crc32` функция (с поведением точно таким же, как в MySQL или PHP). Не используйте его, если вам нужна хэш-функция. [\#5661](https://github.com/ClickHouse/ClickHouse/pull/5661) ([Ремен Иван](https://github.com/BHYCHIK)) -- Реализованный `SYSTEM START/STOP DISTRIBUTED SENDS` запросы для управления асинхронными вставками в `Distributed` таблицы. [\#4935](https://github.com/ClickHouse/ClickHouse/pull/4935) ([Зимний Чжан](https://github.com/zhang2014)) - -#### Исправление ошибок {#bug-fix-22} - -- Игнорируйте ограничения на выполнение запросов и максимальный размер частей для ограничений слияния при выполнении мутаций. [\#5659](https://github.com/ClickHouse/ClickHouse/pull/5659) ([Антон Попов](https://github.com/CurtizJ)) -- Исправлена ошибка, которая может привести к дедупликации обычных блоков (крайне редко) и вставке дубликатов блоков (чаще). [\#5549](https://github.com/ClickHouse/ClickHouse/pull/5549) ([алесапин](https://github.com/alesapin)) -- Исправление функции `arrayEnumerateUniqRanked` для Аргументов с пустыми массивами [\#5559](https://github.com/ClickHouse/ClickHouse/pull/5559) ([proller](https://github.com/proller)) -- Не Подписывайтесь на темы Кафки без намерения опросить какие-либо сообщения. [\#5698](https://github.com/ClickHouse/ClickHouse/pull/5698) ([Иван](https://github.com/abyss7)) -- Сделать настройку `join_use_nulls` не получите никакого эффекта для типов, которые не могут быть внутри Nullable [\#5700](https://github.com/ClickHouse/ClickHouse/pull/5700) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправлено `Incorrect size of index granularity` ошибки [\#5720](https://github.com/ClickHouse/ClickHouse/pull/5720) ([коракстер](https://github.com/coraxster)) -- Фиксировать поплавок в десятичные преобразования переполнения [\#5607](https://github.com/ClickHouse/ClickHouse/pull/5607) ([коракстер](https://github.com/coraxster)) -- Смыть буфер, когда `WriteBufferFromHDFS`- деструктор называется. Это исправляет запись в `HDFS`. [\#5684](https://github.com/ClickHouse/ClickHouse/pull/5684) ([Синьдун Пэн](https://github.com/eejoin)) - -#### Улучшение {#improvement-7} - -- Обработать пустые ячейки в `CSV` в качестве значений по умолчанию при настройке `input_format_defaults_for_omitted_fields` это включено. [\#5625](https://github.com/ClickHouse/ClickHouse/pull/5625) ([акузм](https://github.com/akuzm)) -- Неблокирующая загрузка внешних словарей. [\#5567](https://github.com/ClickHouse/ClickHouse/pull/5567) ([Виталий Баранов](https://github.com/vitlibar)) -- Тайм-ауты сети могут быть динамически изменены для уже установленных соединений в соответствии с настройками. [\#4558](https://github.com/ClickHouse/ClickHouse/pull/4558) ([Константин Подшумок](https://github.com/podshumok)) -- С помощью «public\_suffix\_list» для функций `firstSignificantSubdomain`, `cutToFirstSignificantSubdomain`. Он использует идеальную хэш-таблицу, сгенерированную `gperf` с помощью списка, сгенерированного из файла: https://publicsuffix.org/list/public\_suffix\_list.dat (например, теперь мы признаем домен `ac.uk` как несущественные). [\#5030](https://github.com/ClickHouse/ClickHouse/pull/5030) ([Гийом Тассери](https://github.com/YiuRULE)) -- Усыновленный `IPv6` тип данных в системных таблицах; унифицированные столбцы информации о клиенте в `system.processes` и `system.query_log` [\#5640](https://github.com/ClickHouse/ClickHouse/pull/5640) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Использование сеансов для соединений с протоколом совместимости MySQL. \#5476 [\#5646](https://github.com/ClickHouse/ClickHouse/pull/5646) ([Юрий Баранов](https://github.com/yurriy)) -- Поддержка более `ALTER` запросы `ON CLUSTER`. [\#5593](https://github.com/ClickHouse/ClickHouse/pull/5593) [\#5613](https://github.com/ClickHouse/ClickHouse/pull/5613) ([сундили](https://github.com/sundy-li)) -- Поддержка `` раздел в `clickhouse-local` конфигурационный файл. [\#5540](https://github.com/ClickHouse/ClickHouse/pull/5540) ([proller](https://github.com/proller)) -- Разрешить выполнение запроса с помощью `remote` функция таблицы в `clickhouse-local` [\#5627](https://github.com/ClickHouse/ClickHouse/pull/5627) ([proller](https://github.com/proller)) - -#### Улучшение производительности {#performance-improvement-5} - -- Добавьте возможность написать окончательную отметку в конце столбцов MergeTree. Это позволяет избежать бесполезного считывания ключей, находящихся вне диапазона табличных данных. Он включается только в том случае, если используется адаптивная детализация индекса. [\#5624](https://github.com/ClickHouse/ClickHouse/pull/5624) ([алесапин](https://github.com/alesapin)) -- Улучшена производительность таблиц MergeTree на очень медленных файловых системах за счет уменьшения количества `stat` системных вызовов. [\#5648](https://github.com/ClickHouse/ClickHouse/pull/5648) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено снижение производительности при чтении из таблиц MergeTree, которое было введено в версии 19.6. Исправления №5631. [\#5633](https://github.com/ClickHouse/ClickHouse/pull/5633) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-8} - -- Реализованный `TestKeeper` в качестве реализации интерфейса ZooKeeper используется для тестирования [\#5643](https://github.com/ClickHouse/ClickHouse/pull/5643) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) ([Левушкин Алексей](https://github.com/alexey-milovidov)) -- Отныне `.sql` тесты могут выполняться изолированно сервером, параллельно, со случайной базой данных. Это позволяет запускать их быстрее, добавлять новые тесты с пользовательскими конфигурациями серверов и быть уверенным, что различные тесты не влияют друг на друга. [\#5554](https://github.com/ClickHouse/ClickHouse/pull/5554) ([Иван](https://github.com/abyss7)) -- Удалять `` и `` из тестов производительности [\#5672](https://github.com/ClickHouse/ClickHouse/pull/5672) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправлено «select\_format» тест производительности для `Pretty` форматы [\#5642](https://github.com/ClickHouse/ClickHouse/pull/5642) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -## ClickHouse релиз 19.9 {#clickhouse-release-19-9} - -### ClickHouse релиз 19.9.3.31, 2019-07-05 {#clickhouse-release-19-9-3-31-2019-07-05} - -#### Исправление ошибок {#bug-fix-23} - -- Исправлена ошибка segfault в Дельта-кодеке, которая влияет на столбцы со значениями размером менее 32 бит. Ошибка привела к случайному повреждению памяти. [\#5786](https://github.com/ClickHouse/ClickHouse/pull/5786) ([алесапин](https://github.com/alesapin)) -- Исправлена редкая ошибка в проверке детали с колонкой LowCardinality. [\#5832](https://github.com/ClickHouse/ClickHouse/pull/5832) ([алесапин](https://github.com/alesapin)) -- Исправлена обработка выхода онлайн / оффлайн в TTL слиться с не-физической столбцов в блоке. [\#5819](https://github.com/ClickHouse/ClickHouse/pull/5819) ([Антон Попов](https://github.com/CurtizJ)) -- Исправьте потенциальный бесконечный спящий режим низкоприоритетных запросов. [\#5842](https://github.com/ClickHouse/ClickHouse/pull/5842) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить, как ClickHouse определяет часовой пояс по умолчанию, как СРТ, а не мирового. [\#5828](https://github.com/ClickHouse/ClickHouse/pull/5828) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, связанная с выполнением распределенного DROP/ALTER/TRUNCATE/OPTIMIZE в кластерных запросах на реплику последователя перед репликой лидера. Теперь они будут выполняться непосредственно на реплике лидера. [\#5757](https://github.com/ClickHouse/ClickHouse/pull/5757) ([алесапин](https://github.com/alesapin)) -- Исправлено состояние гонки, которое приводит к тому, что некоторые запросы могут не отображаться в query\_log сразу же после запроса SYSTEM FLUSH LOGS. [\#5685](https://github.com/ClickHouse/ClickHouse/pull/5685) ([Антон Попов](https://github.com/CurtizJ)) -- Добавлена отсутствующая поддержка постоянных аргументов для `evalMLModel` функция. [\#5820](https://github.com/ClickHouse/ClickHouse/pull/5820) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.9.2.4, 2019-06-24 {#clickhouse-release-19-9-2-4-2019-06-24} - -#### Новая функция {#new-feature-8} - -- Печать информации о замороженных деталях в `system.parts` стол. [\#5471](https://github.com/ClickHouse/ClickHouse/pull/5471) ([proller](https://github.com/proller)) -- Ask client password on clickhouse-запуск клиента на tty, если он не задан в аргументах [\#5092](https://github.com/ClickHouse/ClickHouse/pull/5092) ([proller](https://github.com/proller)) -- Осуществлять `dictGet` и `dictGetOrDefault` функции для десятичных типов. [\#5394](https://github.com/ClickHouse/ClickHouse/pull/5394) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшение {#improvement-8} - -- Инициализации в Debian: добавить службу ожидания [\#5522](https://github.com/ClickHouse/ClickHouse/pull/5522) ([proller](https://github.com/proller)) -- Добавление параметра запрещено по умолчанию, чтобы создать таблицу с подозрительных типов для LowCardinality [\#5448](https://github.com/ClickHouse/ClickHouse/pull/5448) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Регрессионные функции возвращают веса модели, если они не используются в качестве состояния в функции `evalMLMethod`. [\#5411](https://github.com/ClickHouse/ClickHouse/pull/5411) ([Quid37](https://github.com/Quid37)) -- Переименуйте и улучшите методы регрессии. [\#5492](https://github.com/ClickHouse/ClickHouse/pull/5492) ([Quid37](https://github.com/Quid37)) -- Более четкие интерфейсы поиска строк. [\#5586](https://github.com/ClickHouse/ClickHouse/pull/5586) ([Данила Кутенин](https://github.com/danlark1)) - -#### Исправление ошибок {#bug-fix-24} - -- Исправить потенциальную потерю данных в Kafka [\#5445](https://github.com/ClickHouse/ClickHouse/pull/5445) ([Иван](https://github.com/abyss7)) -- Исправьте потенциальную бесконечную петлю в `PrettySpace` форматирование при вызове с нулевыми столбцами [\#5560](https://github.com/ClickHouse/ClickHouse/pull/5560) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправлена ошибка переполнения UInt32 в линейных моделях. Разрешить eval ML-модель для аргумента неконстантной модели. [\#5516](https://github.com/ClickHouse/ClickHouse/pull/5516) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- `ALTER TABLE ... DROP INDEX IF EXISTS ...` не следует вызывать исключение, если указанный индекс не существует [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Глеб Новиков](https://github.com/NanoBjorn)) -- Исправлена обработка выхода онлайн / оффлайн с `bitmapHasAny` в скалярном подзапросе [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Ю](https://github.com/yuzhichang)) -- Исправлена ошибка, когда пул соединений репликации не повторяет попытку разрешения узла, даже если кэш DNS был удален. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([алесапин](https://github.com/alesapin)) -- Исправлено `ALTER ... MODIFY TTL` на Реплицированном Mergetree. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539) ([Антон Попов](https://github.com/CurtizJ)) -- Фиксированная вставка в распределенную таблицу с материализованной колонкой [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Азат Хужин](https://github.com/azat)) -- Исправление плохой запас, когда усекают присоединиться хранения [\#5437](https://github.com/ClickHouse/ClickHouse/pull/5437) ([TCeason](https://github.com/TCeason)) -- В последних версиях пакета tzdata некоторые файлы теперь являются символическими ссылками. Текущий механизм обнаружения часового пояса по умолчанию нарушается и дает неверные имена для некоторых часовых поясов. Теперь, по крайней мере, мы заставим имя часового пояса к содержимому TZ, если оно будет предоставлено. [\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Иван](https://github.com/abyss7)) -- Исправьте некоторые крайне редкие случаи с Многовольницким поисковиком, когда постоянные иглы в сумме имеют длину не менее 16 КБ. Алгоритм пропустил или переписал предыдущие результаты, что может привести к неправильному результату работы алгоритма. `multiSearchAny`. [\#5588](https://github.com/ClickHouse/ClickHouse/pull/5588) ([Данила Кутенин](https://github.com/danlark1)) -- Исправлена проблема, когда настройки для запросов ExternalData не могли использовать параметры ClickHouse. Кроме того, на данный момент настройки `date_time_input_format` и `low_cardinality_allow_in_native_format` не может использоваться из-за неоднозначности имен (во внешних данных это может быть интерпретировано как формат таблицы, а в запросе-как настройка). [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Данила Кутенин](https://github.com/danlark1)) -- Исправлена ошибка, когда детали удалялись только из FS, не сбрасывая их из Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([алесапин](https://github.com/alesapin)) -- Удалить ведение журнала отладки из протокола MySQL [\#5478](https://github.com/ClickHouse/ClickHouse/pull/5478) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Пропустить ZNONODE во время обработки DDL запроса [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Азат Хужин](https://github.com/azat)) -- Фикс микс `UNION ALL` тип столбца результатов. Были случаи с несогласованными данными и типами столбцов результирующих столбцов. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Артем Зуйков](https://github.com/4ertus2)) -- Бросьте исключение на неправильные целые числа в `dictGetT` функции вместо сбоя. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка element\_count и load\_factor для хэшированного словаря в `system.dictionaries` стол. [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Азат Хужин](https://github.com/azat)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-9} - -- Исправлена сборка без `Brotli` Поддержка сжатия HTTP (`ENABLE_BROTLI=OFF` переменная cmake). [\#5521](https://github.com/ClickHouse/ClickHouse/pull/5521) ([Антон Южанинов](https://github.com/citrin)) -- Включая рев.ч как рев/рев.х [\#5523](https://github.com/ClickHouse/ClickHouse/pull/5523) ([Оривей Деш](https://github.com/orivej)) -- Исправьте предупреждения gcc9 в hyperscan (директива \# line-это зло!) [\#5546](https://github.com/ClickHouse/ClickHouse/pull/5546) ([Данила Кутенин](https://github.com/danlark1)) -- Исправьте все предупреждения при компиляции с gcc-9. Исправлены некоторые проблемы ВНО. Исправьте GCC9 ICE и отправьте его в bugzilla. [\#5498](https://github.com/ClickHouse/ClickHouse/pull/5498) ([Данила Кутенин](https://github.com/danlark1)) -- Фиксированная связь с LLD [\#5477](https://github.com/ClickHouse/ClickHouse/pull/5477) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Удаление неиспользуемых специализаций в словарях [\#5452](https://github.com/ClickHouse/ClickHouse/pull/5452) ([Артем Зуйков](https://github.com/4ertus2)) -- Улучшение тестов производительности для форматирования и синтаксического анализа таблиц для различных типов файлов [\#5497](https://github.com/ClickHouse/ClickHouse/pull/5497) ([Ольга Хвостикова](https://github.com/stavrolia)) -- Исправления для параллельного выполнения тестов [\#5506](https://github.com/ClickHouse/ClickHouse/pull/5506) ([proller](https://github.com/proller)) -- Docker: используйте конфигурации из clickhouse-test [\#5531](https://github.com/ClickHouse/ClickHouse/pull/5531) ([proller](https://github.com/proller)) -- Исправлена компиляция для FreeBSD [\#5447](https://github.com/ClickHouse/ClickHouse/pull/5447) ([proller](https://github.com/proller)) -- Повышение уровня обновления до 1.70 [\#5570](https://github.com/ClickHouse/ClickHouse/pull/5570) ([proller](https://github.com/proller)) -- Исправлена сборка clickhouse как подмодуля [\#5574](https://github.com/ClickHouse/ClickHouse/pull/5574) ([proller](https://github.com/proller)) -- Улучшение тестов производительности JSONExtract [\#5444](https://github.com/ClickHouse/ClickHouse/pull/5444) ([Виталий Баранов](https://github.com/vitlibar)) - -## ClickHouse релиз 19.8 {#clickhouse-release-19-8} - -### ClickHouse релиз 19.8.3.8, 2019-06-11 {#clickhouse-release-19-8-3-8-2019-06-11} - -#### Новые средства {#new-features} - -- Добавлены функции для работы с JSON [\#4686](https://github.com/ClickHouse/ClickHouse/pull/4686) ([hcz](https://github.com/hczhcz)) [\#5124](https://github.com/ClickHouse/ClickHouse/pull/5124). ([Виталий Баранов](https://github.com/vitlibar)) -- Добавьте функцию basename с аналогичным поведением к функции basename, которая существует во многих языках (`os.path.basename` в Python, `basename` in PHP, etc…). Work with both an UNIX-like path or a Windows path. [\#5136](https://github.com/ClickHouse/ClickHouse/pull/5136) ([Гийом Тассери](https://github.com/YiuRULE)) -- Добавлен `LIMIT n, m BY` или `LIMIT m OFFSET n BY` синтаксис для задания смещения N для ограничения на предложение. [\#5138](https://github.com/ClickHouse/ClickHouse/pull/5138) ([Антон Попов](https://github.com/CurtizJ)) -- Добавлен новый тип данных `SimpleAggregateFunction`, что позволяет иметь столбцы с легкой агрегацией в виде `AggregatingMergeTree`. Это может быть использовано только с простыми функциями, такими как `any`, `anyLast`, `sum`, `min`, `max`. [\#4629](https://github.com/ClickHouse/ClickHouse/pull/4629) ([Борис Гранво](https://github.com/bgranvea)) -- Добавлена поддержка непостоянных аргументов в функции `ngramDistance` [\#5198](https://github.com/ClickHouse/ClickHouse/pull/5198) ([Данила Кутенин](https://github.com/danlark1)) -- Добавленные функции `skewPop`, `skewSamp`, `kurtPop` и `kurtSamp` для вычисления асимметрии последовательности, асимметрии образца, эксцесса и эксцесса образца соответственно. [\#5200](https://github.com/ClickHouse/ClickHouse/pull/5200) ([hcz](https://github.com/hczhcz)) -- Поддержка переименования операции для `MaterializeView` место хранения. [\#5209](https://github.com/ClickHouse/ClickHouse/pull/5209) ([Гийом Тассери](https://github.com/YiuRULE)) -- Добавлен сервер, который позволяет подключаться к ClickHouse с помощью клиента MySQL. [\#4715](https://github.com/ClickHouse/ClickHouse/pull/4715) ([Юрий Баранов](https://github.com/yurriy)) -- Добавь `toDecimal*OrZero` и `toDecimal*OrNull` функции. [\#5291](https://github.com/ClickHouse/ClickHouse/pull/5291) ([Артем Зуйков](https://github.com/4ertus2)) -- Поддержка десятичных типов в функциях: `quantile`, `quantiles`, `median`, `quantileExactWeighted`, `quantilesExactWeighted`, medianExactWeighted. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавлен `toValidUTF8` function, which replaces all invalid UTF-8 characters by replacement character � (U+FFFD). [\#5322](https://github.com/ClickHouse/ClickHouse/pull/5322) ([Данила Кутенин](https://github.com/danlark1)) -- Добавлен `format` функция. Форматирование константы pattern (упрощенный шаблон формата Python) со строками, перечисленными в аргументах. [\#5330](https://github.com/ClickHouse/ClickHouse/pull/5330) ([Данила Кутенин](https://github.com/danlark1)) -- Добавлен `system.detached_parts` таблица, содержащая информацию об отсоединенных частях `MergeTree` таблицы. [\#5353](https://github.com/ClickHouse/ClickHouse/pull/5353) ([акузм](https://github.com/akuzm)) -- Добавлен `ngramSearch` функция для вычисления несимметричной разности между иглой и стогом сена. [\#5418](https://github.com/ClickHouse/ClickHouse/pull/5418)[\#5422](https://github.com/ClickHouse/ClickHouse/pull/5422) ([Данила Кутенин](https://github.com/danlark1)) -- Реализация основных методов машинного обучения (стохастическая линейная регрессия и логистическая регрессия) с использованием интерфейса агрегатных функций. Имеет различные стратегии обновления весов моделей (простой градиентный спуск, метод импульса, метод Нестерова). Также поддерживаются мини-пакеты нестандартного размера. [\#4943](https://github.com/ClickHouse/ClickHouse/pull/4943) ([Quid37](https://github.com/Quid37)) -- Реализация проекта `geohashEncode` и `geohashDecode` функции. [\#5003](https://github.com/ClickHouse/ClickHouse/pull/5003) ([Василий Немков](https://github.com/Enmk)) -- Добавлена статистическая функция `timeSeriesGroupSum`, который может агрегировать различные временные ряды, которые выборка временных меток не выравнивается. Он будет использовать линейную интерполяцию между двумя временными метками выборки, а затем суммировать временные ряды вместе. Добавлена статистическая функция `timeSeriesGroupRateSum`, который вычисляет скорость временных рядов, а затем суммирует ставки вместе. [\#4542](https://github.com/ClickHouse/ClickHouse/pull/4542) ([Янкуань Лю](https://github.com/LiuYangkuan)) -- Добавленные функции `IPv4CIDRtoIPv4Range` и `IPv6CIDRtoIPv6Range` рассчитать нижний и верхний пределы для IP в подсети с использованием бесклассовой междоменной маршрутизации. [\#5095](https://github.com/ClickHouse/ClickHouse/pull/5095) ([Гийом Тассери](https://github.com/YiuRULE)) -- Добавьте заголовок X-ClickHouse-Summary, когда мы отправляем запрос с помощью HTTP с включенной настройкой `send_progress_in_http_headers`. Возвращает обычную информацию X-ClickHouse-Progress с дополнительной информацией, например, сколько строк и байтов было вставлено в запрос. [\#5116](https://github.com/ClickHouse/ClickHouse/pull/5116) ([Гийом Тассери](https://github.com/YiuRULE)) - -#### Улучшения {#improvements} - -- Добавлен `max_parts_in_total` настройка для семейства таблиц MergeTree (по умолчанию: 100 000), которая предотвращает небезопасную спецификацию ключа раздела \#5166. [\#5171](https://github.com/ClickHouse/ClickHouse/pull/5171) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `clickhouse-obfuscator`: выведите начальное значение для отдельных столбцов, объединив начальное значение с именем столбца, а не с позицией столбца. Это предназначено для преобразования наборов данных с несколькими связанными таблицами, чтобы таблицы оставались соединяемыми после преобразования. [\#5178](https://github.com/ClickHouse/ClickHouse/pull/5178) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавленные функции `JSONExtractRaw`, `JSONExtractKeyAndValues`. Переименованные функции `jsonExtract` к `JSONExtract`. Когда что-то идет не так, эти функции возвращают соответствующие значения, а не наоборот. `NULL`. Модифицированная функция `JSONExtract`, теперь он получает возвращаемый тип из своего последнего параметра и не вводит nullables. Реализован резервный вариант для RapidJSON в случае, если инструкции AVX2 недоступны. Библиотека Simdjson обновлена до новой версии. [\#5235](https://github.com/ClickHouse/ClickHouse/pull/5235) ([Виталий Баранов](https://github.com/vitlibar)) -- Сейчас `if` и `multiIf` функции не зависят от условий `Nullable`, но полагайтесь на ветви для обеспечения совместимости sql. [\#5238](https://github.com/ClickHouse/ClickHouse/pull/5238) ([Цзянь Ву](https://github.com/janplus)) -- `In` теперь предикат генерирует `Null` результат от `Null` входные данные, такие как `Equal` функция. [\#5152](https://github.com/ClickHouse/ClickHouse/pull/5152) ([Цзянь Ву](https://github.com/janplus)) -- Проверьте ограничение по времени для каждого (flush\_interval / poll\_timeout) числа строк из Kafka. Это позволяет чаще отрывать чтение от потребителя Кафки и проверять временные ограничения для потоков верхнего уровня [\#5249](https://github.com/ClickHouse/ClickHouse/pull/5249) ([Иван](https://github.com/abyss7)) -- Соедините рдкафку с комплектным САСЛОМ. Это должно позволить использовать аутентификацию SASL SCRAM [\#5253](https://github.com/ClickHouse/ClickHouse/pull/5253) ([Иван](https://github.com/abyss7)) -- Пакетная версия RowRefList для всех соединений. [\#5267](https://github.com/ClickHouse/ClickHouse/pull/5267) ([Артем Зуйков](https://github.com/4ertus2)) -- clickhouse-server: более информативное прослушивание сообщений об ошибках. [\#5268](https://github.com/ClickHouse/ClickHouse/pull/5268) ([proller](https://github.com/proller)) -- Поддержка словарей в clickhouse-copier для функций в `` [\#5270](https://github.com/ClickHouse/ClickHouse/pull/5270) ([proller](https://github.com/proller)) -- Добавить новую настройку `kafka_commit_every_batch` чтобы регулировать политику Кафки. - Он позволяет установить режим фиксации: после обработки каждой партии сообщений или после записи всего блока в хранилище. Это компромисс между потерей некоторых сообщений или чтением их дважды в некоторых экстремальных ситуациях. [\#5308](https://github.com/ClickHouse/ClickHouse/pull/5308) ([Иван](https://github.com/abyss7)) -- Сделай `windowFunnel` поддержка других целочисленных типов без знака. [\#5320](https://github.com/ClickHouse/ClickHouse/pull/5320) ([сундили](https://github.com/sundy-li)) -- Разрешить теневой виртуальный столбец `_table` в двигателе слияния. [\#5325](https://github.com/ClickHouse/ClickHouse/pull/5325) ([Иван](https://github.com/abyss7)) -- Сделай `sequenceMatch` агрегатные функции поддерживают другие целочисленные типы без знака [\#5339](https://github.com/ClickHouse/ClickHouse/pull/5339) ([сундили](https://github.com/sundy-li)) -- Лучше сообщения об ошибках, если несоответствие контрольной суммы, скорее всего, вызвано аппаратными сбоями. [\#5355](https://github.com/ClickHouse/ClickHouse/pull/5355) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Проверьте, что базовые таблицы поддерживают выборку для `StorageMerge` [\#5366](https://github.com/ClickHouse/ClickHouse/pull/5366) ([Иван](https://github.com/abyss7)) -- Сlose MySQL connections after their usage in external dictionaries. It is related to issue \#893. [\#5395](https://github.com/ClickHouse/ClickHouse/pull/5395) ([Clément Rodriguez](https://github.com/clemrodriguez)) -- Улучшения протокола MySQL Wire. Изменено имя формата на MySQLWire. Использование RAII для вызова RSA\_free. Отключение SSL, если контекст не может быть создан. [\#5419](https://github.com/ClickHouse/ClickHouse/pull/5419) ([Юрий Баранов](https://github.com/yurriy)) -- clickhouse-client: allow to run with unaccessable history file (read-only, no disk space, file is directory, …). [\#5431](https://github.com/ClickHouse/ClickHouse/pull/5431) ([proller](https://github.com/proller)) -- Соблюдайте настройки запросов при асинхронных вставках в распределенные таблицы. [\#4936](https://github.com/ClickHouse/ClickHouse/pull/4936) ([TCeason](https://github.com/TCeason)) -- Переименованные функции `leastSqr` к `simpleLinearRegression`, `LinearRegression` к `linearRegression`, `LogisticRegression` к `logisticRegression`. [\#5391](https://github.com/ClickHouse/ClickHouse/pull/5391) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -#### Улучшения в производительности {#performance-improvements} - -- Распараллеливание обработки деталей невоспроизводимого MergeTree столы В изменить изменить запрос. [\#4639](https://github.com/ClickHouse/ClickHouse/pull/4639) ([Иван Куш](https://github.com/IvanKush)) -- Оптимизация при извлечении регулярных выражений. [\#5193](https://github.com/ClickHouse/ClickHouse/pull/5193) [\#5191](https://github.com/ClickHouse/ClickHouse/pull/5191) ([Данила Кутенин](https://github.com/danlark1)) -- Не добавляйте правый ключевой столбец join к результату соединения, если он используется только в разделе join on. [\#5260](https://github.com/ClickHouse/ClickHouse/pull/5260) ([Артем Зуйков](https://github.com/4ertus2)) -- Заморозьте буфер Кафки после первого пустого ответа. Это позволяет избежать многократных обращений к `ReadBuffer::next()` для пустого результата в некоторых потоках разбора строк. [\#5283](https://github.com/ClickHouse/ClickHouse/pull/5283) ([Иван](https://github.com/abyss7)) -- `concat` оптимизация функций для нескольких аргументов. [\#5357](https://github.com/ClickHouse/ClickHouse/pull/5357) ([Данила Кутенин](https://github.com/danlark1)) -- Query optimisation. Allow push down IN statement while rewriting commа/cross join into inner one. [\#5396](https://github.com/ClickHouse/ClickHouse/pull/5396) ([Артем Зуйков](https://github.com/4ertus2)) -- Обновите нашу реализацию LZ4 со ссылкой на нее, чтобы иметь более быструю декомпрессию. [\#5070](https://github.com/ClickHouse/ClickHouse/pull/5070) ([Данила Кутенин](https://github.com/danlark1)) -- Реализована сортировка MSD radix (на основе kxsort) и частичная сортировка. [\#5129](https://github.com/ClickHouse/ClickHouse/pull/5129) ([Евгений Правда](https://github.com/kvinty)) - -#### Устранение ошибок {#bug-fixes} - -- Исправить пуш требуют колонн с соединением [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена ошибка, когда ClickHouse запускался systemd, команда `sudo service clickhouse-server forcerestart` он работал не так, как ожидалось. [\#5204](https://github.com/ClickHouse/ClickHouse/pull/5204) ([proller](https://github.com/proller)) -- Исправьте коды ошибок http в DataPartsExchange (interserver http server на порту 9009 всегда возвращал код 200, даже при ошибках). [\#5216](https://github.com/ClickHouse/ClickHouse/pull/5216) ([proller](https://github.com/proller)) -- Исправить SimpleAggregateFunction на более длительный строк, чем MAX\_SMALL\_STRING\_SIZE [\#5311](https://github.com/ClickHouse/ClickHouse/pull/5311) ([Азат Хужин](https://github.com/azat)) -- Исправьте ошибку для `Decimal` к `Nullable(Decimal)` конверсия в ин. Поддержка других десятичных и десятичных преобразований (включая различные масштабы). [\#5350](https://github.com/ClickHouse/ClickHouse/pull/5350) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено блокирование FPU в библиотеке simdjson, приводящее к неправильному вычислению `uniqHLL` и `uniqCombined` агрегатная функция и математические функции, такие как `log`. [\#5354](https://github.com/ClickHouse/ClickHouse/pull/5354) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена обработка смешанных случаев const/nonconst в функциях JSON. [\#5435](https://github.com/ClickHouse/ClickHouse/pull/5435) ([Виталий Баранов](https://github.com/vitlibar)) -- Чинить `retention` функция. Теперь все условия, которые удовлетворяют в строке данных, добавляются в состояние данных. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) -- Исправьте тип результата для `quantileExact` с десятичными дробями. [\#5304](https://github.com/ClickHouse/ClickHouse/pull/5304) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Документация {#documentation} - -- Перевести документацию для `CollapsingMergeTree` к китайцам. [\#5168](https://github.com/ClickHouse/ClickHouse/pull/5168) ([张风啸](https://github.com/AlexZFX)) -- Переведите некоторые документы о табличных движках на китайский язык. - [\#5134](https://github.com/ClickHouse/ClickHouse/pull/5134) - [\#5328](https://github.com/ClickHouse/ClickHouse/pull/5328) - ([никогда ли](https://github.com/neverlee)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements} - -- Исправьте некоторые отчеты о дезинфицирующих средствах, которые показывают вероятное использование после освобождения.[\#5139](https://github.com/ClickHouse/ClickHouse/pull/5139) [\#5143](https://github.com/ClickHouse/ClickHouse/pull/5143) [\#5393](https://github.com/ClickHouse/ClickHouse/pull/5393) ([Иван](https://github.com/abyss7)) -- Для удобства переместите тесты производительности из отдельных каталогов. [\#5158](https://github.com/ClickHouse/ClickHouse/pull/5158) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте неправильные тесты производительности. [\#5255](https://github.com/ClickHouse/ClickHouse/pull/5255) ([алесапин](https://github.com/alesapin)) -- Добавлен инструмент для вычисления контрольных сумм, вызванных битовыми переворотами, для отладки аппаратных проблем. [\#5334](https://github.com/ClickHouse/ClickHouse/pull/5334) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Сделайте сценарий runner более удобным для использования. [\#5340](https://github.com/ClickHouse/ClickHouse/pull/5340)[\#5360](https://github.com/ClickHouse/ClickHouse/pull/5360) ([Филимонов](https://github.com/filimonov)) -- Добавьте небольшую инструкцию, как писать тесты производительности. [\#5408](https://github.com/ClickHouse/ClickHouse/pull/5408) ([алесапин](https://github.com/alesapin)) -- Добавить возможность делать замены В создать, заполнить и запросов падение производительности тесты [\#5367](https://github.com/ClickHouse/ClickHouse/pull/5367) ([Ольга Хвостикова](https://github.com/stavrolia)) - -## ClickHouse релиз 19.7 {#clickhouse-release-19-7} - -### ClickHouse релиз 19.7.5.29, 2019-07-05 {#clickhouse-release-19-7-5-29-2019-07-05} - -#### Исправление ошибок {#bug-fix-25} - -- Исправьте регрессию производительности в некоторых запросах с помощью JOIN. [\#5192](https://github.com/ClickHouse/ClickHouse/pull/5192) ([Зимний Чжан](https://github.com/zhang2014)) - -### ClickHouse релиз 19.7.5.27, 2019-06-09 {#clickhouse-release-19-7-5-27-2019-06-09} - -#### Новые средства {#new-features-1} - -- Добавлены функции связанные с растровым изображением `bitmapHasAny` и `bitmapHasAll` по аналогии с `hasAny` и `hasAll` функции для массивов. [\#5279](https://github.com/ClickHouse/ClickHouse/pull/5279) ([Сергей Владыкин](https://github.com/svladykin)) - -#### Устранение ошибок {#bug-fixes-1} - -- Исправлена обработка выхода онлайн / оффлайн на `minmax` Индекс с нулевым значением. [\#5246](https://github.com/ClickHouse/ClickHouse/pull/5246) ([Никита Васильев](https://github.com/nikvas0)) -- Отметить все входные столбцы в пределе по мере необходимости выход. Это исправляет ‘Not found column’ ошибка в некоторых распределенных запросах. [\#5407](https://github.com/ClickHouse/ClickHouse/pull/5407) ([Константин Сергеевич Пан](https://github.com/kvap)) -- Чинить «Column ‘0’ already exists» ошибка в работе `SELECT .. PREWHERE` на колонке с дефолтом [\#5397](https://github.com/ClickHouse/ClickHouse/pull/5397) ([proller](https://github.com/proller)) -- Чинить `ALTER MODIFY TTL` запрос на `ReplicatedMergeTree`. [\#5539](https://github.com/ClickHouse/ClickHouse/pull/5539/commits) ([Антон Попов](https://github.com/CurtizJ)) -- Не разрушайте сервер, когда потребители Kafka не смогли запустить его. [\#5285](https://github.com/ClickHouse/ClickHouse/pull/5285) ([Иван](https://github.com/abyss7)) -- Исправленные функции растрового изображения дают неверный результат. [\#5359](https://github.com/ClickHouse/ClickHouse/pull/5359) ([Энди Янг](https://github.com/andyyzh)) -- Исправить element\_count для хэшированного словаря (не включать дубликаты) [\#5440](https://github.com/ClickHouse/ClickHouse/pull/5440) ([Азат Хужин](https://github.com/azat)) -- Используйте содержимое переменной окружения TZ в качестве имени для часового пояса. В некоторых случаях это помогает правильно определить часовой пояс по умолчанию.[\#5443](https://github.com/ClickHouse/ClickHouse/pull/5443) ([Иван](https://github.com/abyss7)) -- Не пытайтесь конвертировать целые числа в `dictGetT` функции, потому что он не работает правильно. Вместо этого создайте исключение. [\#5446](https://github.com/ClickHouse/ClickHouse/pull/5446) ([Артем Зуйков](https://github.com/4ertus2)) -- Фиксировать параметры в запрос внешних данных по протоколу HTTP. [\#5455](https://github.com/ClickHouse/ClickHouse/pull/5455) ([Данила - Кутенин](https://github.com/danlark1)) -- Исправлена ошибка, когда детали удалялись только из FS, не сбрасывая их из Zookeeper. [\#5520](https://github.com/ClickHouse/ClickHouse/pull/5520) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка сегментации в `bitmapHasAny` функция. [\#5528](https://github.com/ClickHouse/ClickHouse/pull/5528) ([Zhichang Ю](https://github.com/yuzhichang)) -- Исправлена ошибка, когда пул соединений репликации не повторяет попытку разрешения узла, даже если кэш DNS был удален. [\#5534](https://github.com/ClickHouse/ClickHouse/pull/5534) ([алесапин](https://github.com/alesapin)) -- Исправлено `DROP INDEX IF EXISTS` запрос. Сейчас `ALTER TABLE ... DROP INDEX IF EXISTS ...` запрос не вызывает исключения, если указанный индекс не существует. [\#5524](https://github.com/ClickHouse/ClickHouse/pull/5524) ([Глеб Новиков](https://github.com/NanoBjorn)) -- Союз исправить все колонки супертипа. Были случаи с несогласованными данными и типами столбцов результирующих столбцов. [\#5503](https://github.com/ClickHouse/ClickHouse/pull/5503) ([Артем Зуйков](https://github.com/4ertus2)) -- Пропустите ZNONODE во время обработки DDL-запроса. До того, как другой узел удалит znode в очереди задач, тот, который - не обработал его, но уже получил список детей,завершит поток DDLWorker. [\#5489](https://github.com/ClickHouse/ClickHouse/pull/5489) ([Азат Хужин](https://github.com/azat)) -- Исправлена вставка в таблицу Distributed () с материализованным столбцом. [\#5429](https://github.com/ClickHouse/ClickHouse/pull/5429) ([Азат Хужин](https://github.com/azat)) - -### ClickHouse релиз 19.7.3.9, 2019-05-30 {#clickhouse-release-19-7-3-9-2019-05-30} - -#### Новые средства {#new-features-2} - -- Разрешить ограничить диапазон настроек, которые могут быть заданы пользователем. - Эти ограничения можно настроить в профиле настроек пользователя. - [\#4931](https://github.com/ClickHouse/ClickHouse/pull/4931) ([Виталий - Баранов](https://github.com/vitlibar)) -- Добавьте вторую версию функции `groupUniqArray` с дополнительным - `max_size` параметр, ограничивающий размер результирующего массива. Этот - поведение похоже на то, что `groupArray(max_size)(x)` функция. - [\#5026](https://github.com/ClickHouse/ClickHouse/pull/5026) ([Гийом - Тассери](https://github.com/YiuRULE)) -- Для форматов входных файлов TSVWithNames/CSVWithNames порядок столбцов теперь может быть - определяется из заголовка файла. Это контролируется с помощью - `input_format_with_names_use_header` параметр. - [\#5081](https://github.com/ClickHouse/ClickHouse/pull/5081) - ([Александр](https://github.com/Akazz)) - -#### Устранение ошибок {#bug-fixes-2} - -- Сбой с uncompressed\_cache + JOIN во время слияния (\#5197) - [\#5133](https://github.com/ClickHouse/ClickHouse/pull/5133) ([Данила - Кутенин](https://github.com/danlark1)) -- Ошибка сегментации при запросе clickhouse-клиента к системным таблицам. \#5066 - [\#5127](https://github.com/ClickHouse/ClickHouse/pull/5127) - ([Иван](https://github.com/abyss7)) -- Потеря данных при большой нагрузке через KafkaEngine (\#4736) - [\#5080](https://github.com/ClickHouse/ClickHouse/pull/5080) - ([Иван](https://github.com/abyss7)) -- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшения в производительности {#performance-improvements-1} - -- Используйте radix sort для сортировки по одному числовому столбцу в `ORDER BY` без - `LIMIT`. [\#5106](https://github.com/ClickHouse/ClickHouse/pull/5106), - [\#4439](https://github.com/ClickHouse/ClickHouse/pull/4439) - ([Евгений Правда](https://github.com/kvinty), - [Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Документация {#documentation-1} - -- Перевод документации для некоторых движков таблиц на китайский язык. - [\#5107](https://github.com/ClickHouse/ClickHouse/pull/5107), - [\#5094](https://github.com/ClickHouse/ClickHouse/pull/5094), - [\#5087](https://github.com/ClickHouse/ClickHouse/pull/5087) - ([张风啸](https://github.com/AlexZFX)), - [\#5068](https://github.com/ClickHouse/ClickHouse/pull/5068) ([никогда - Ли](https://github.com/neverlee)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-1} - -- Правильная печать символов UTF-8 в `clickhouse-test`. - [\#5084](https://github.com/ClickHouse/ClickHouse/pull/5084) - ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте параметр командной строки для clickhouse-client, чтобы всегда загружать предложение - данные. [\#5102](https://github.com/ClickHouse/ClickHouse/pull/5102) - ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Устраните некоторые предупреждения PVS-Studio. - [\#5082](https://github.com/ClickHouse/ClickHouse/pull/5082) - ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Обновление LZ4 [\#5040](https://github.com/ClickHouse/ClickHouse/pull/5040) ([Данила - Кутенин](https://github.com/danlark1)) -- Добавьте gperf для построения требований к предстоящему запросу на вытягивание \#5030. - [\#5110](https://github.com/ClickHouse/ClickHouse/pull/5110) - ([proller](https://github.com/proller)) - -## ClickHouse релиз 19.6 {#clickhouse-release-19-6} - -### ClickHouse релиз 19.6.3.18, 2019-06-13 {#clickhouse-release-19-6-3-18-2019-06-13} - -#### Устранение ошибок {#bug-fixes-3} - -- Исправлено в состоянии pushdown для запросов из табличных функций `mysql` и `odbc` и соответствующие табличные двигатели. Это исправляет \#3540 и \#2384. [\#5313](https://github.com/ClickHouse/ClickHouse/pull/5313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена тупиковая ситуация в Zookeeper. [\#5297](https://github.com/ClickHouse/ClickHouse/pull/5297) ([github1youlc](https://github.com/github1youlc)) -- Разрешить кавычки десятичных знаков в CSV. [\#5284](https://github.com/ClickHouse/ClickHouse/pull/5284) ([Артем Зуйков](https://github.com/4ertus2) -- Запретить преобразование из float Inf/NaN в десятичные дроби (исключение throw). [\#5282](https://github.com/ClickHouse/ClickHouse/pull/5282) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена гонка данных в запросе переименования. [\#5247](https://github.com/ClickHouse/ClickHouse/pull/5247) ([Зимний Чжан](https://github.com/zhang2014)) -- Временно отключите LFAlloc. Использование LFAlloc может привести к большому количеству MAP\_FAILED при выделении несжатого кэша и в результате к сбоям запросов на высоконагруженных серверах. [cfdba93](https://github.com/ClickHouse/ClickHouse/commit/cfdba938ce22f16efeec504f7f90206a515b1280)([Данила Кутенин](https://github.com/danlark1)) - -### ClickHouse релиз 19.6.2.11, 2019-05-13 {#clickhouse-release-19-6-2-11-2019-05-13} - -#### Новые средства {#new-features-3} - -- Выражения TTL для столбцов и таблиц. [\#4212](https://github.com/ClickHouse/ClickHouse/pull/4212) ([Антон Попов](https://github.com/CurtizJ)) -- Добавлена поддержка для `brotli` сжатие для HTTP-ответов (Accept-Encoding: br) [\#4388](https://github.com/ClickHouse/ClickHouse/pull/4388) ([Михаил](https://github.com/fandyushin)) -- Добавлена новая функция `isValidUTF8` для проверки правильности кодировки набора байтов в кодировке utf-8. [\#4934](https://github.com/ClickHouse/ClickHouse/pull/4934) ([Данила Кутенин](https://github.com/danlark1)) -- Добавление новой политики балансировки нагрузки `first_or_random` который отправляет запросы на первый указанный хост, а если он недоступен, то отправляет запросы на случайные хосты shard. Полезно для настройки топологии перекрестной репликации. [\#5012](https://github.com/ClickHouse/ClickHouse/pull/5012) ([nvartolomei](https://github.com/nvartolomei)) - -#### Экспериментальная возможность {#experimental-features-1} - -- Добавить настройку `index_granularity_bytes` (адаптивная степень детализации индекса) для семейства таблиц MergeTree\*. [\#4826](https://github.com/ClickHouse/ClickHouse/pull/4826) ([алесапин](https://github.com/alesapin)) - -#### Улучшения {#improvements-1} - -- Добавлена поддержка непостоянных и отрицательных аргументов размера и длины для функции `substringUTF8`. [\#4989](https://github.com/ClickHouse/ClickHouse/pull/4989) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Отключить push-вниз в правой таблице в левой присоединиться, левой таблицы в правую присоединиться, и обе таблицы полностью присоединиться. Это исправляет неправильные результаты соединения в некоторых случаях. [\#4846](https://github.com/ClickHouse/ClickHouse/pull/4846) ([Иван](https://github.com/abyss7)) -- `clickhouse-copier`: автоматическая загрузка конфигурации задачи из `--task-file` вариант [\#4876](https://github.com/ClickHouse/ClickHouse/pull/4876) ([proller](https://github.com/proller)) -- Добавлен обработчик опечаток для фабрики хранения и фабрики табличных функций. [\#4891](https://github.com/ClickHouse/ClickHouse/pull/4891) ([Данила Кутенин](https://github.com/danlark1)) -- Поддержка звездочек и квалифицированных звездочек для нескольких соединений без вложенных запросов [\#4898](https://github.com/ClickHouse/ClickHouse/pull/4898) ([Артем Зуйков](https://github.com/4ertus2)) -- Сделайте сообщение об ошибке отсутствующего столбца более удобным для пользователя. [\#4915](https://github.com/ClickHouse/ClickHouse/pull/4915) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшения в производительности {#performance-improvements-2} - -- Значительное ускорение от присоединения [\#4924](https://github.com/ClickHouse/ClickHouse/pull/4924) ([Мартийн Баккер](https://github.com/Gladdy)) - -#### Назад Несовместимые Изменения {#backward-incompatible-changes} - -- Заголовок http `Query-Id` был переименован в `X-ClickHouse-Query-Id` для последовательности. [\#4972](https://github.com/ClickHouse/ClickHouse/pull/4972) ([Михаил](https://github.com/fandyushin)) - -#### Устранение ошибок {#bug-fixes-4} - -- Исправлено потенциальное разыменование нулевого указателя в `clickhouse-copier`. [\#4900](https://github.com/ClickHouse/ClickHouse/pull/4900) ([proller](https://github.com/proller)) -- Исправлена ошибка запроса с соединением + массив присоединиться [\#4938](https://github.com/ClickHouse/ClickHouse/pull/4938) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено зависание при запуске сервера, когда словарь зависит от другого словаря через базу данных с engine=Dictionary. [\#4962](https://github.com/ClickHouse/ClickHouse/pull/4962) ([Виталий Баранов](https://github.com/vitlibar)) -- Partially fix distributed\_product\_mode = local. It's possible to allow columns of local tables in where/having/order by/… via table aliases. Throw exception if table does not have alias. There's not possible to access to the columns without table aliases yet. [\#4986](https://github.com/ClickHouse/ClickHouse/pull/4986) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправьте потенциально неправильный результат для `SELECT DISTINCT` с `JOIN` [\#5001](https://github.com/ClickHouse/ClickHouse/pull/5001) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-2} - -- Исправлены ошибки тестирования при запуске clickhouse-сервера на другом хосте [\#4713](https://github.com/ClickHouse/ClickHouse/pull/4713) ([Василий Немков](https://github.com/Enmk)) -- clickhouse-test: отключение последовательностей управления цветом в среде без tty. [\#4937](https://github.com/ClickHouse/ClickHouse/pull/4937) ([алесапин](https://github.com/alesapin)) -- clickhouse-test: разрешить использование любой тестовой базы данных (удалить `test.` квалификация там, где это возможно) [\#5008](https://github.com/ClickHouse/ClickHouse/pull/5008) ([proller](https://github.com/proller)) -- Исправление ошибок утилиты [\#5037](https://github.com/ClickHouse/ClickHouse/pull/5037) ([Виталий Баранов](https://github.com/vitlibar)) -- Yandex LFAlloc был добавлен в ClickHouse для выделения данных MarkCache и UncompressedCache различными способами для более надежного улавливания сегментов [\#4995](https://github.com/ClickHouse/ClickHouse/pull/4995) ([Данила Кутенин](https://github.com/danlark1)) -- Python util, чтобы помочь с backports и changelogs. [\#4949](https://github.com/ClickHouse/ClickHouse/pull/4949) ([Иван](https://github.com/abyss7)) - -## ClickHouse релиз 19.5 {#clickhouse-release-19-5} - -### ClickHouse релиз 19.5.4.22, 2019-05-13 {#clickhouse-release-19-5-4-22-2019-05-13} - -#### Устранение ошибок {#bug-fixes-5} - -- Исправлена возможная ошибка в функциях bitmap\* [\#5220](https://github.com/ClickHouse/ClickHouse/pull/5220) [\#5228](https://github.com/ClickHouse/ClickHouse/pull/5228) ([Энди Янг](https://github.com/andyyzh)) -- Исправлено очень редкое состояние гонки данных, которое могло произойти при выполнении запроса с объединением всех, включающих по крайней мере два выбора из системы.колонны, система.таблицы, система.детали, система.parts\_tables или таблицы объединить семью и исполнительского изменять столбцы из связанных таблиц одновременно. [\#5189](https://github.com/ClickHouse/ClickHouse/pull/5189) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправления ошибок `Set for IN is not created yet in case of using single LowCardinality column in the left part of IN`. Эта ошибка произошла, если столбец LowCardinality был частью первичного ключа. \#5031 [\#5154](https://github.com/ClickHouse/ClickHouse/pull/5154) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Модификация функции удержания: если строка удовлетворяет как первому, так и N-му условию, то в состояние данных добавляется только первое удовлетворенное условие. Теперь все условия, которые удовлетворяют в строке данных, добавляются в состояние данных. [\#5119](https://github.com/ClickHouse/ClickHouse/pull/5119) ([小路](https://github.com/nicelulu)) - -### ClickHouse релиз 19.5.3.8, 2019-04-18 {#clickhouse-release-19-5-3-8-2019-04-18} - -#### Устранение ошибок {#bug-fixes-6} - -- Фиксированный тип установки `max_partitions_per_insert_block` из булев тип uint64. [\#5028](https://github.com/ClickHouse/ClickHouse/pull/5028) ([Мохаммад Хосейн Сехават](https://github.com/mhsekhavat)) - -### ClickHouse релиз 19.5.2.6, 2019-04-15 {#clickhouse-release-19-5-2-6-2019-04-15} - -#### Новые средства {#new-features-4} - -- [Гиперскан](https://github.com/intel/hyperscan) было добавлено несколько совпадений регулярных выражений (функции `multiMatchAny`, `multiMatchAnyIndex`, `multiFuzzyMatchAny`, `multiFuzzyMatchAnyIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780), [\#4841](https://github.com/ClickHouse/ClickHouse/pull/4841) ([Данила Кутенин](https://github.com/danlark1)) -- `multiSearchFirstPosition` была добавлена функция. [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Данила Кутенин](https://github.com/danlark1)) -- Реализуйте предварительно заданный фильтр выражений для каждой строки таблиц. [\#4792](https://github.com/ClickHouse/ClickHouse/pull/4792) ([Иван](https://github.com/abyss7)) -- Новый тип индексов пропуска данных на основе фильтров Блума (может использоваться для `equal`, `in` и `like` должностные обязанности). [\#4499](https://github.com/ClickHouse/ClickHouse/pull/4499) ([Никита Васильев](https://github.com/nikvas0)) -- Добавлен `ASOF JOIN` что позволяет запускать запросы, которые присоединяются к самому последнему известному значению. [\#4774](https://github.com/ClickHouse/ClickHouse/pull/4774) [\#4867](https://github.com/ClickHouse/ClickHouse/pull/4867) [\#4863](https://github.com/ClickHouse/ClickHouse/pull/4863) [\#4875](https://github.com/ClickHouse/ClickHouse/pull/4875) ([Мартийн Баккер](https://github.com/Gladdy), [Артем Зуйков](https://github.com/4ertus2)) -- Переписать несколько раз `COMMA JOIN` к `CROSS JOIN`. Затем перепишите их на `INNER JOIN` если можно. [\#4661](https://github.com/ClickHouse/ClickHouse/pull/4661) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшение {#improvement-9} - -- `topK` и `topKWeighted` теперь поддерживает пользовательские `loadFactor` (Исправлена проблема [\#4252](https://github.com/ClickHouse/ClickHouse/issues/4252)). [\#4634](https://github.com/ClickHouse/ClickHouse/pull/4634) ([Кирилл Даньшин](https://github.com/kirillDanshin)) -- Разрешить использовать `parallel_replicas_count > 1` даже для таблиц без выборки (настройка для них просто игнорируется). В предыдущих версиях это приводило к исключениям. [\#4637](https://github.com/ClickHouse/ClickHouse/pull/4637) ([Алексей Елыманов](https://github.com/digitalist)) -- Поддержка `CREATE OR REPLACE VIEW`. Позволяет создать представление или задать новое определение в одном операторе. [\#4654](https://github.com/ClickHouse/ClickHouse/pull/4654) ([Борис Гранво](https://github.com/bgranvea)) -- `Buffer` движок таблицы теперь поддерживает `PREWHERE`. [\#4671](https://github.com/ClickHouse/ClickHouse/pull/4671) ([Янкуань Лю](https://github.com/LiuYangkuan)) -- Добавить возможность запуска реплицированной таблицы без метаданных в zookeeper in `readonly` режим. [\#4691](https://github.com/ClickHouse/ClickHouse/pull/4691) ([алесапин](https://github.com/alesapin)) -- Исправлено мерцание индикатора выполнения в clickhouse-клиенте. Эта проблема была наиболее заметна при использовании `FORMAT Null` с потоковыми запросами. [\#4811](https://github.com/ClickHouse/ClickHouse/pull/4811) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Разрешить отключать функции с помощью `hyperscan` библиотека на основе каждого пользователя, чтобы ограничить потенциально чрезмерное и неконтролируемое использование ресурсов. [\#4816](https://github.com/ClickHouse/ClickHouse/pull/4816) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте номер версии, регистрирующий все ошибки. [\#4824](https://github.com/ClickHouse/ClickHouse/pull/4824) ([proller](https://github.com/proller)) -- Добавлено ограничение на использование `multiMatch` функции, которые требуют размера строки, чтобы вписаться в `unsigned int`. Также добавлено ограничение по количеству аргументов для `multiSearch` функции. [\#4834](https://github.com/ClickHouse/ClickHouse/pull/4834) ([Данила Кутенин](https://github.com/danlark1)) -- Улучшено использование пространства царапин и обработка ошибок в Hyperscan. [\#4866](https://github.com/ClickHouse/ClickHouse/pull/4866) ([Данила Кутенин](https://github.com/danlark1)) -- Заполнить `system.graphite_detentions` из таблицы config of `*GraphiteMergeTree` столы для двигателей. [\#4584](https://github.com/ClickHouse/ClickHouse/pull/4584) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Переименовать `trigramDistance` функция к `ngramDistance` и добавьте больше функций с помощью `CaseInsensitive` и `UTF`. [\#4602](https://github.com/ClickHouse/ClickHouse/pull/4602) ([Данила Кутенин](https://github.com/danlark1)) -- Улучшен расчет индексов пропуска данных. [\#4640](https://github.com/ClickHouse/ClickHouse/pull/4640) ([Никита Васильев](https://github.com/nikvas0)) -- Держать обычные, `DEFAULT`, `MATERIALIZED` и `ALIAS` столбцы в одном списке (Исправлена проблема [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Алексей Зателепин](https://github.com/ztlpn)) - -#### Исправление ошибок {#bug-fix-26} - -- Избегать `std::terminate` в случае сбоя выделения памяти. Сейчас `std::bad_alloc` исключение создается, как и ожидалось. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено чтение capnproto из буфера. Иногда файлы не были успешно загружены по протоколу HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Владислав](https://github.com/smirnov-vs)) -- Исправления ошибок `Unknown log entry type: 0` после `OPTIMIZE TABLE FINAL` запрос. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Амос Птица](https://github.com/amosbird)) -- Неверные аргументы, чтобы `hasAny` или `hasAll` функции может привести к обработка выхода онлайн / оффлайн. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Взаимоблокировка может произойти во время выполнения `DROP DATABASE dictionary` запрос. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить неопределенное поведение в `median` и `quantile` функции. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Исправьте обнаружение уровня сжатия, когда `network_compression_method` в нижнем регистре. Разбитые в в19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Фиксированное незнание `UTC` настройка (Исправлена проблема [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Чинить `histogram` поведение функции с помощью `Distributed` таблицы. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Исправлен отчет Цан `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен отчет TSan о завершении работы из-за состояния гонки в использовании системных журналов. Исправлено потенциальное использование-после освобождения при выключении, когда включен part\_log. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить перепроверять детали в `ReplicatedMergeTreeAlterThread` в случае ошибки. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Арифметические операции над промежуточными состояниями агрегатной функции не работали для постоянных аргументов (таких как результаты подзапросов). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Всегда делайте обратные кавычки имен столбцов в метаданных. В противном случае невозможно создать таблицу с именем столбца `index` (сервер не будет перезапущен из-за неправильной формы `ATTACH` запрос в метаданных). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить сбой в работе `ALTER ... MODIFY ORDER BY` на `Distributed` стол. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Исправлена обработка выхода онлайн / оффлайн в `JOIN ON` с включенной функцией `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена ошибка с добавлением посторонней строки после использования сообщения protobuf от Кафки. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправить аварию `JOIN` на не значение столбца против значение null. Чинить `NULLs` в правой клавиш в `ANY JOIN` + `join_use_nulls`. [\#4815](https://github.com/ClickHouse/ClickHouse/pull/4815) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Исправлено состояние гонки в `SELECT` от `system.tables` если таблица переименована или изменена одновременно. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена гонка данных при извлечении части данных, которая уже устарела. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена редкая гонка данных, которая может произойти во время `RENAME` таблица семейства MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка сегментации в функции `arrayIntersect`. Ошибка сегментации может произойти, если функция вызывается со смешанными постоянными и обычными аргументами. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Лисян Цянь](https://github.com/fancyqlx)) -- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) -- Чинить `No message received` исключение при извлечении деталей между репликами. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([алесапин](https://github.com/alesapin)) -- Исправлено `arrayIntersect` неправильный результат функции в случае нескольких повторяющихся значений в одном массиве. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправьте состояние гонки во время параллельной работы `ALTER COLUMN` запросы, которые могут привести к сбою сервера (Исправлена проблема [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправьте неправильный результат в `FULL/RIGHT JOIN` с колонкой const. [\#4723](https://github.com/ClickHouse/ClickHouse/pull/4723) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправьте дубликаты внутри `GLOBAL JOIN` с Asterisk. [\#4705](https://github.com/ClickHouse/ClickHouse/pull/4705) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправить вычет параметров в `ALTER MODIFY` из колонки `CODEC` если тип столбца не указан. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([алесапин](https://github.com/alesapin)) -- Функции `cutQueryStringAndFragment()` и `queryStringAndFragment()` теперь работает правильно, когда `URL` содержит фрагмент и не содержит запроса. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлена редкая ошибка при настройке `min_bytes_to_use_direct_io` больше нуля, что происходит, когда поток должен искать назад в файле столбца. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([алесапин](https://github.com/alesapin)) -- Исправьте неправильные типы аргументов для агрегатных функций с помощью `LowCardinality` аргументы (Исправлена проблема [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправить неправильное имя квалификация в `GLOBAL JOIN`. [\#4969](https://github.com/ClickHouse/ClickHouse/pull/4969) ([Артем Зуйков](https://github.com/4ertus2)) -- Фиксированная функция `toISOWeek` результат за 1970 год. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Чинить `DROP`, `TRUNCATE` и `OPTIMIZE` дублирование запросов при выполнении на `ON CLUSTER` для `ReplicatedMergeTree*` столы семейные. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([алесапин](https://github.com/alesapin)) - -#### Назад Несовместимые Изменения {#backward-incompatible-change-8} - -- Переименовать настройки `insert_sample_with_metadata` ставить `input_format_defaults_for_omitted_fields`. [\#4771](https://github.com/ClickHouse/ClickHouse/pull/4771) ([Артем Зуйков](https://github.com/4ertus2)) -- Добавлена настройка `max_partitions_per_insert_block` (со значением 100 по умолчанию). Если вставленный блок содержит большее количество разделов, то возникает исключение. Установите его равным 0, если вы хотите удалить ограничение (не рекомендуется). [\#4845](https://github.com/ClickHouse/ClickHouse/pull/4845) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Функции мульти-поиска были переименованы (`multiPosition` к `multiSearchAllPositions`, `multiSearch` к `multiSearchAny`, `firstMatch` к `multiSearchFirstIndex`). [\#4780](https://github.com/ClickHouse/ClickHouse/pull/4780) ([Данила Кутенин](https://github.com/danlark1)) - -#### Улучшение производительности {#performance-improvement-6} - -- Оптимизировать Volnitsky поисковик путем встраивания, дающая около 5-10% улучшение поиска по запросам со многими иглами или много схожих биграмм. [\#4862](https://github.com/ClickHouse/ClickHouse/pull/4862) ([Данила Кутенин](https://github.com/danlark1)) -- Исправлена проблема производительности при настройке `use_uncompressed_cache` больше нуля, который появился при считывании всех данных, содержащихся в кэше. [\#4913](https://github.com/ClickHouse/ClickHouse/pull/4913) ([алесапин](https://github.com/alesapin)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-10} - -- Упрочнение отладочной сборки: более детализированные сопоставления памяти и ASLR; добавление защиты памяти для кэша меток и индекса. Это позволяет найти больше ошибок топтания памяти в случае, когда ASan и MSan не могут этого сделать. [\#4632](https://github.com/ClickHouse/ClickHouse/pull/4632) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте поддержку переменных cmake `ENABLE_PROTOBUF`, `ENABLE_PARQUET` и `ENABLE_BROTLI` который позволяет включить/выключить выше особенностей (так же, как мы можем сделать для librdkafka, MySQL и т. д). [\#4669](https://github.com/ClickHouse/ClickHouse/pull/4669) ([Сильвиу Развивается](https://github.com/silviucpp)) -- Добавьте возможность печати списка процессов и stacktraces всех потоков, если некоторые запросы зависли после тестового запуска. [\#4675](https://github.com/ClickHouse/ClickHouse/pull/4675) ([алесапин](https://github.com/alesapin)) -- Добавить повторные попытки ВКЛ `Connection loss` ошибка в работе `clickhouse-test`. [\#4682](https://github.com/ClickHouse/ClickHouse/pull/4682) ([алесапин](https://github.com/alesapin)) -- Добавьте build FreeBSD с Vagrant и построить с резьбой дезинфицирующее средство на упаковщик скриптов. [\#4712](https://github.com/ClickHouse/ClickHouse/pull/4712) [\#4748](https://github.com/ClickHouse/ClickHouse/pull/4748) ([алесапин](https://github.com/alesapin)) -- Теперь пользователь запросил пароль для пользователя `'default'` во время установки. [\#4725](https://github.com/ClickHouse/ClickHouse/pull/4725) ([proller](https://github.com/proller)) -- Подавить предупреждение в `rdkafka` библиотека. [\#4740](https://github.com/ClickHouse/ClickHouse/pull/4740) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Разрешить возможность сборки без ssl. [\#4750](https://github.com/ClickHouse/ClickHouse/pull/4750) ([proller](https://github.com/proller)) -- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Обновите contrib boost до 1.69. [\#4793](https://github.com/ClickHouse/ClickHouse/pull/4793) ([proller](https://github.com/proller)) -- Отключить использование `mremap` при компиляции с помощью дезинфицирующего средства для нитей. Как ни странно, Цан не перехватил его `mremap` (хотя это действительно перехват `mmap`, `munmap`) это приводит к ложным срабатываниям. Исправлен отчет TSan в тестах с сохранением состояния. [\#4859](https://github.com/ClickHouse/ClickHouse/pull/4859) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте тестовую проверку с использованием схемы формата через HTTP-интерфейс. [\#4864](https://github.com/ClickHouse/ClickHouse/pull/4864) ([Виталий Баранов](https://github.com/vitlibar)) - -## ClickHouse релиз 19.4 {#clickhouse-release-19-4} - -### ClickHouse релиз 19.4.4.33, 2019-04-17 {#clickhouse-release-19-4-4-33-2019-04-17} - -#### Устранение ошибок {#bug-fixes-7} - -- Избегать `std::terminate` в случае сбоя выделения памяти. Сейчас `std::bad_alloc` исключение создается, как и ожидалось. [\#4665](https://github.com/ClickHouse/ClickHouse/pull/4665) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено чтение capnproto из буфера. Иногда файлы не были успешно загружены по протоколу HTTP. [\#4674](https://github.com/ClickHouse/ClickHouse/pull/4674) ([Владислав](https://github.com/smirnov-vs)) -- Исправления ошибок `Unknown log entry type: 0` после `OPTIMIZE TABLE FINAL` запрос. [\#4683](https://github.com/ClickHouse/ClickHouse/pull/4683) ([Амос Птица](https://github.com/amosbird)) -- Неверные аргументы, чтобы `hasAny` или `hasAll` функции может привести к обработка выхода онлайн / оффлайн. [\#4698](https://github.com/ClickHouse/ClickHouse/pull/4698) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Взаимоблокировка может произойти во время выполнения `DROP DATABASE dictionary` запрос. [\#4701](https://github.com/ClickHouse/ClickHouse/pull/4701) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить неопределенное поведение в `median` и `quantile` функции. [\#4702](https://github.com/ClickHouse/ClickHouse/pull/4702) ([hcz](https://github.com/hczhcz)) -- Исправьте обнаружение уровня сжатия, когда `network_compression_method` в нижнем регистре. Разбитые в в19.1. [\#4706](https://github.com/ClickHouse/ClickHouse/pull/4706) ([proller](https://github.com/proller)) -- Фиксированное незнание `UTC` настройка (Исправлена проблема [\#4658](https://github.com/ClickHouse/ClickHouse/issues/4658)). [\#4718](https://github.com/ClickHouse/ClickHouse/pull/4718) ([proller](https://github.com/proller)) -- Чинить `histogram` поведение функции с помощью `Distributed` таблицы. [\#4741](https://github.com/ClickHouse/ClickHouse/pull/4741) ([olegkv](https://github.com/olegkv)) -- Исправлен отчет Цан `destroy of a locked mutex`. [\#4742](https://github.com/ClickHouse/ClickHouse/pull/4742) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен отчет TSan о завершении работы из-за состояния гонки в использовании системных журналов. Исправлено потенциальное использование-после освобождения при выключении, когда включен part\_log. [\#4758](https://github.com/ClickHouse/ClickHouse/pull/4758) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить перепроверять детали в `ReplicatedMergeTreeAlterThread` в случае ошибки. [\#4772](https://github.com/ClickHouse/ClickHouse/pull/4772) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Арифметические операции над промежуточными состояниями агрегатной функции не работали для постоянных аргументов (таких как результаты подзапросов). [\#4776](https://github.com/ClickHouse/ClickHouse/pull/4776) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Всегда делайте обратные кавычки имен столбцов в метаданных. В противном случае невозможно создать таблицу с именем столбца `index` (сервер не будет перезапущен из-за неправильной формы `ATTACH` запрос в метаданных). [\#4782](https://github.com/ClickHouse/ClickHouse/pull/4782) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправить сбой в работе `ALTER ... MODIFY ORDER BY` на `Distributed` стол. [\#4790](https://github.com/ClickHouse/ClickHouse/pull/4790) ([TCeason](https://github.com/TCeason)) -- Исправлена обработка выхода онлайн / оффлайн в `JOIN ON` с включенной функцией `enable_optimize_predicate_expression`. [\#4794](https://github.com/ClickHouse/ClickHouse/pull/4794) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена ошибка с добавлением посторонней строки после использования сообщения protobuf от Кафки. [\#4808](https://github.com/ClickHouse/ClickHouse/pull/4808) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Исправлено состояние гонки в `SELECT` от `system.tables` если таблица переименована или изменена одновременно. [\#4836](https://github.com/ClickHouse/ClickHouse/pull/4836) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена гонка данных при извлечении части данных, которая уже устарела. [\#4839](https://github.com/ClickHouse/ClickHouse/pull/4839) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена редкая гонка данных, которая может произойти во время `RENAME` таблица семейства MergeTree. [\#4844](https://github.com/ClickHouse/ClickHouse/pull/4844) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка сегментации в функции `arrayIntersect`. Ошибка сегментации может произойти, если функция вызывается со смешанными постоянными и обычными аргументами. [\#4847](https://github.com/ClickHouse/ClickHouse/pull/4847) ([Лисян Цянь](https://github.com/fancyqlx)) -- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Чинить `No message received` исключение при извлечении деталей между репликами. [\#4856](https://github.com/ClickHouse/ClickHouse/pull/4856) ([алесапин](https://github.com/alesapin)) -- Исправлено `arrayIntersect` неправильный результат функции в случае нескольких повторяющихся значений в одном массиве. [\#4871](https://github.com/ClickHouse/ClickHouse/pull/4871) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправьте состояние гонки во время параллельной работы `ALTER COLUMN` запросы, которые могут привести к сбою сервера (Исправлена проблема [\#3421](https://github.com/ClickHouse/ClickHouse/issues/3421)). [\#4592](https://github.com/ClickHouse/ClickHouse/pull/4592) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправьте вычет параметров в `ALTER MODIFY` из колонки `CODEC` если тип столбца не указан. [\#4883](https://github.com/ClickHouse/ClickHouse/pull/4883) ([алесапин](https://github.com/alesapin)) -- Функции `cutQueryStringAndFragment()` и `queryStringAndFragment()` теперь работает правильно, когда `URL` содержит фрагмент и не содержит запроса. [\#4894](https://github.com/ClickHouse/ClickHouse/pull/4894) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправлена редкая ошибка при настройке `min_bytes_to_use_direct_io` больше нуля, что происходит, когда поток должен искать назад в файле столбца. [\#4897](https://github.com/ClickHouse/ClickHouse/pull/4897) ([алесапин](https://github.com/alesapin)) -- Исправьте неправильные типы аргументов для агрегатных функций с помощью `LowCardinality` аргументы (Исправлена проблема [\#4919](https://github.com/ClickHouse/ClickHouse/issues/4919)). [\#4922](https://github.com/ClickHouse/ClickHouse/pull/4922) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Фиксированная функция `toISOWeek` результат за 1970 год. [\#4988](https://github.com/ClickHouse/ClickHouse/pull/4988) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Чинить `DROP`, `TRUNCATE` и `OPTIMIZE` дублирование запросов при выполнении на `ON CLUSTER` для `ReplicatedMergeTree*` столы семейные. [\#4991](https://github.com/ClickHouse/ClickHouse/pull/4991) ([алесапин](https://github.com/alesapin)) - -#### Улучшения {#improvements-2} - -- Держать обычные, `DEFAULT`, `MATERIALIZED` и `ALIAS` столбцы в одном списке (Исправлена проблема [\#2867](https://github.com/ClickHouse/ClickHouse/issues/2867)). [\#4707](https://github.com/ClickHouse/ClickHouse/pull/4707) ([Алексей Зателепин](https://github.com/ztlpn)) - -### ClickHouse релиз 19.4.3.11, 2019-04-02 {#clickhouse-release-19-4-3-11-2019-04-02} - -#### Устранение ошибок {#bug-fixes-8} - -- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-11} - -- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя. [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse релиз 19.4.2.7, 2019-03-30 {#clickhouse-release-19-4-2-7-2019-03-30} - -#### Устранение ошибок {#bug-fixes-9} - -- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -### ClickHouse релиз 19.4.1.3, 2019-03-19 {#clickhouse-release-19-4-1-3-2019-03-19} - -#### Устранение ошибок {#bug-fixes-10} - -- Исправлены удаленные запросы, содержащие и то, и другое `LIMIT BY` и `LIMIT`. Ранее, если `LIMIT BY` и `LIMIT` были использованы для удаленного запроса, `LIMIT` может случиться и раньше `LIMIT BY`, что привело к слишком отфильтрованному результату. [\#4708](https://github.com/ClickHouse/ClickHouse/pull/4708) ([Константин Сергеевич Пан](https://github.com/kvap)) - -### ClickHouse релиз 19.4.0.49, 2019-03-09 {#clickhouse-release-19-4-0-49-2019-03-09} - -#### Новые средства {#new-features-5} - -- Добавлена полная поддержка для `Protobuf` формат (ввод и вывод, вложенные структуры данных). [\#4174](https://github.com/ClickHouse/ClickHouse/pull/4174) [\#4493](https://github.com/ClickHouse/ClickHouse/pull/4493) ([Виталий Баранов](https://github.com/vitlibar)) -- Добавлены растровые функции с ревущими растровыми изображениями. [\#4207](https://github.com/ClickHouse/ClickHouse/pull/4207) ([Энди Янг](https://github.com/andyyzh)) [\#4568](https://github.com/ClickHouse/ClickHouse/pull/4568) ([Виталий Баранов](https://github.com/vitlibar)) -- Поддержка формата паркета. [\#4448](https://github.com/ClickHouse/ClickHouse/pull/4448) ([proller](https://github.com/proller)) -- Для сравнения нечетких строк было добавлено расстояние N-грамм. Это похоже на Q-граммовые метрики в языке R. [\#4466](https://github.com/ClickHouse/ClickHouse/pull/4466) ([Данила Кутенин](https://github.com/danlark1)) -- Комбинируйте правила для свертки графита из выделенных шаблонов агрегации и хранения. [\#4426](https://github.com/ClickHouse/ClickHouse/pull/4426) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) -- Добавлен `max_execution_speed` и `max_execution_speed_bytes` чтобы ограничить использование ресурсов. Добавлен `min_execution_speed_bytes` установка в дополнение к `min_execution_speed`. [\#4430](https://github.com/ClickHouse/ClickHouse/pull/4430) ([Зимний Чжан](https://github.com/zhang2014)) -- Реализованная функция `flatten`. [\#4555](https://github.com/ClickHouse/ClickHouse/pull/4555) [\#4409](https://github.com/ClickHouse/ClickHouse/pull/4409) ([Алексей-Миловидов](https://github.com/alexey-milovidov), [произв](https://github.com/kzon)) -- Добавленные функции `arrayEnumerateDenseRanked` и `arrayEnumerateUniqRanked` (это как будто `arrayEnumerateUniq` но позволяет точно настроить глубину массива, чтобы заглянуть внутрь многомерных массивов). [\#4475](https://github.com/ClickHouse/ClickHouse/pull/4475) ([proller](https://github.com/proller)) [\#4601](https://github.com/ClickHouse/ClickHouse/pull/4601) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Multiple JOINS with some restrictions: no asterisks, no complex aliases in ON/WHERE/GROUP BY/… [\#4462](https://github.com/ClickHouse/ClickHouse/pull/4462) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Устранение ошибок {#bug-fixes-11} - -- Этот релиз также содержит все исправления ошибок из 19.3 и 19.1. -- Исправлена ошибка в индексах пропуска данных: неправильный порядок гранул после вставки. [\#4407](https://github.com/ClickHouse/ClickHouse/pull/4407) ([Никита Васильев](https://github.com/nikvas0)) -- Исправлено `set` индекс для `Nullable` и `LowCardinality` столбцы. Перед ним, `set` индекс с `Nullable` или `LowCardinality` колонка привела к ошибке `Data type must be deserialized with multiple streams` во время выбора. [\#4594](https://github.com/ClickHouse/ClickHouse/pull/4594) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Правильно установите update\_time на полный `executable` обновление словаря. [\#4551](https://github.com/ClickHouse/ClickHouse/pull/4551) ([Тема Новиков](https://github.com/temoon)) -- Исправлена поломка индикатора выполнения в 19.3. [\#4627](https://github.com/ClickHouse/ClickHouse/pull/4627) ([Филимонов](https://github.com/filimonov)) -- Исправлены несогласованные значения MemoryTracker при сжатии области памяти, в некоторых случаях. [\#4619](https://github.com/ClickHouse/ClickHouse/pull/4619) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено неопределенное поведение в ThreadPool. [\#4612](https://github.com/ClickHouse/ClickHouse/pull/4612) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена очень редкая ошибка с сообщением `mutex lock failed: Invalid argument` это может произойти, когда таблица MergeTree была удалена одновременно с SELECT. [\#4608](https://github.com/ClickHouse/ClickHouse/pull/4608) ([Алексей Зателепин](https://github.com/ztlpn)) -- Совместимость драйвера ODBC с `LowCardinality` тип данных. [\#4381](https://github.com/ClickHouse/ClickHouse/pull/4381) ([proller](https://github.com/proller)) -- FreeBSD: исправление для `AIOcontextPool: Found io_event with unknown id 0` ошибка. [\#4438](https://github.com/ClickHouse/ClickHouse/pull/4438) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `system.part_log` таблица была создана независимо от конфигурации. [\#4483](https://github.com/ClickHouse/ClickHouse/pull/4483) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте неопределенное поведение в `dictIsIn` функция для словарей кэша. [\#4515](https://github.com/ClickHouse/ClickHouse/pull/4515) ([алесапин](https://github.com/alesapin)) -- Fixed a deadlock when a SELECT query locks the same table multiple times (e.g. from different threads or when executing multiple subqueries) and there is a concurrent DDL query. [\#4535](https://github.com/ClickHouse/ClickHouse/pull/4535) ([Алексей Зателепин](https://github.com/ztlpn)) -- Отключите compile\_expressions по умолчанию, пока мы не получим собственные `llvm` contrib и может проверить его с помощью `clang` и `asan`. [\#4579](https://github.com/ClickHouse/ClickHouse/pull/4579) ([алесапин](https://github.com/alesapin)) -- Предотвращать `std::terminate` когда `invalidate_query` для `clickhouse` внешний источник словаря вернул неверный результирующий набор (пустой или более одной строки или более одного столбца). Исправлена проблема, когда `invalidate_query` выполнялось каждые пять секунд независимо от `lifetime`. [\#4583](https://github.com/ClickHouse/ClickHouse/pull/4583) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Избегайте тупиковой ситуации, когда `invalidate_query` для словаря с `clickhouse` источник был задействован `system.dictionaries` таблица или `Dictionaries` база данных (редкий случай). [\#4599](https://github.com/ClickHouse/ClickHouse/pull/4599) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено перекрестное соединение с пустым местом. [\#4598](https://github.com/ClickHouse/ClickHouse/pull/4598) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена обработка выхода онлайн / оффлайн в функции «replicate» когда передается постоянный аргумент. [\#4603](https://github.com/ClickHouse/ClickHouse/pull/4603) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте лямбда-функцию с помощью оптимизатора предикатов. [\#4408](https://github.com/ClickHouse/ClickHouse/pull/4408) ([Зимний Чжан](https://github.com/zhang2014)) -- Несколько соединений несколько исправлений. [\#4595](https://github.com/ClickHouse/ClickHouse/pull/4595) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшения {#improvements-3} - -- Поддержка псевдонимов в разделе JOIN ON для правых столбцов таблицы. [\#4412](https://github.com/ClickHouse/ClickHouse/pull/4412) ([Артем Зуйков](https://github.com/4ertus2)) -- Результат нескольких соединений требует правильных имен результатов, которые будут использоваться в подсекциях. В результате замените плоские псевдонимы именами источников. [\#4474](https://github.com/ClickHouse/ClickHouse/pull/4474) ([Артем Зуйков](https://github.com/4ertus2)) -- Улучшить нажимаем-вниз-логика вступила заявления. [\#4387](https://github.com/ClickHouse/ClickHouse/pull/4387) ([Иван](https://github.com/abyss7)) - -#### Улучшения в производительности {#performance-improvements-3} - -- Улучшенная эвристика «move to PREWHERE» оптимизация. [\#4405](https://github.com/ClickHouse/ClickHouse/pull/4405) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Используйте правильные таблицы поиска, которые используют API HashTable для 8-битных и 16-битных ключей. [\#4536](https://github.com/ClickHouse/ClickHouse/pull/4536) ([Амос Птица](https://github.com/amosbird)) -- Улучшена производительность сравнения строк. [\#4564](https://github.com/ClickHouse/ClickHouse/pull/4564) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Очистите распределенную очередь DDL в отдельном потоке, чтобы она не замедляла основной цикл, обрабатывающий распределенные задачи DDL. [\#4502](https://github.com/ClickHouse/ClickHouse/pull/4502) ([Алексей Зателепин](https://github.com/ztlpn)) -- Когда `min_bytes_to_use_direct_io` имеет значение 1, не каждый файл был открыт в режиме O\_DIRECT, потому что размер данных для чтения иногда недооценивался размером одного сжатого блока. [\#4526](https://github.com/ClickHouse/ClickHouse/pull/4526) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-12} - -- Добавлена поддержка clang-9 [\#4604](https://github.com/ClickHouse/ClickHouse/pull/4604) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправь ошибку `__asm__` инструкции (опять же) [\#4621](https://github.com/ClickHouse/ClickHouse/pull/4621) ([Константин Подшумок](https://github.com/podshumok)) -- Добавить возможность задавать настройки для `clickhouse-performance-test` из командной строки. [\#4437](https://github.com/ClickHouse/ClickHouse/pull/4437) ([алесапин](https://github.com/alesapin)) -- Добавьте тесты словарей в интеграционные тесты. [\#4477](https://github.com/ClickHouse/ClickHouse/pull/4477) ([алесапин](https://github.com/alesapin)) -- Добавлены запросы от бенчмарка на веб-сайте к автоматизированным тестам производительности. [\#4496](https://github.com/ClickHouse/ClickHouse/pull/4496) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `xxhash.h` не существует во внешнем lz4, потому что это деталь реализации, и ее символы находятся в пространстве имен с `XXH_NAMESPACE` макрос. Когда lz4 является внешним, xxHash также должен быть внешним, и зависимые должны быть связаны с ним. [\#4495](https://github.com/ClickHouse/ClickHouse/pull/4495) ([Оривей Деш](https://github.com/orivej)) -- Исправлен случай, когда `quantileTiming` агрегатная функция может быть вызвана с отрицательным или плавающим аргументом (это исправляет тест fuzz с неопределенным поведением дезинфицирующего средства). [\#4506](https://github.com/ClickHouse/ClickHouse/pull/4506) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправление орфографических ошибок. [\#4531](https://github.com/ClickHouse/ClickHouse/pull/4531) ([сдк2](https://github.com/sdk2)) -- Исправлена компиляция на Mac. [\#4371](https://github.com/ClickHouse/ClickHouse/pull/4371) ([Виталий Баранов](https://github.com/vitlibar)) -- Исправления сборки для FreeBSD и различных необычных конфигураций сборки. [\#4444](https://github.com/ClickHouse/ClickHouse/pull/4444) ([proller](https://github.com/proller)) - -## ClickHouse релиз 19.3 {#clickhouse-release-19-3} - -### ClickHouse релиз 19.3.9.1, 2019-04-02 {#clickhouse-release-19-3-9-1-2019-04-02} - -#### Устранение ошибок {#bug-fixes-12} - -- Исправить сбой в работе `FULL/RIGHT JOIN` когда мы присоединились на обнуляемой против не допускает значения null. [\#4855](https://github.com/ClickHouse/ClickHouse/pull/4855) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка сегментации в `clickhouse-copier`. [\#4835](https://github.com/ClickHouse/ClickHouse/pull/4835) ([proller](https://github.com/proller)) -- Исправлено чтение из `Array(LowCardinality)` столбец в редком случае, когда столбец содержит длинную последовательность пустых массивов. [\#4850](https://github.com/ClickHouse/ClickHouse/pull/4850) ([Николай Кочетов](https://github.com/KochetovNicolai)) - -#### Сборка/Тестирование / Улучшение Упаковки {#buildtestingpackaging-improvement-13} - -- Добавьте способ запуска образа clickhouse-server от пользовательского пользователя [\#4753](https://github.com/ClickHouse/ClickHouse/pull/4753) ([Mikhail f. Shiryaev](https://github.com/Felixoid)) - -### ClickHouse релиз 19.3.7, 2019-03-12 {#clickhouse-release-19-3-7-2019-03-12} - -#### Устранение ошибок {#bug-fixes-13} - -- Исправлена ошибка в \#3920. Эта ошибка проявляется как случайное повреждение кэша (сообщения `Unknown codec family code`, `Cannot seek through file`) и segfaults. Эта ошибка впервые появилась в версии 19.1 и присутствует в версиях до 19.1.10 и 19.3.6. [\#4623](https://github.com/ClickHouse/ClickHouse/pull/4623) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.3.6, 2019-03-02 {#clickhouse-release-19-3-6-2019-03-02} - -#### Устранение ошибок {#bug-fixes-14} - -- При наличии более 1000 потоков в пуле, `std::terminate` может произойти на выходе из потока. [Азат Хужин](https://github.com/azat) [\#4485](https://github.com/ClickHouse/ClickHouse/pull/4485) [\#4505](https://github.com/ClickHouse/ClickHouse/pull/4505) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Теперь это можно создать `ReplicatedMergeTree*` таблицы с комментариями к столбцам без значений по умолчанию и таблицы со столбцами кодеки без комментариев и значений по умолчанию. Также исправлено сравнение кодеков. [\#4523](https://github.com/ClickHouse/ClickHouse/pull/4523) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка при соединении с массивом или кортежем. [\#4552](https://github.com/ClickHouse/ClickHouse/pull/4552) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка в работе clickhouse-копировальной машины с сообщением `ThreadStatus not created`. [\#4540](https://github.com/ClickHouse/ClickHouse/pull/4540) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено зависание при выключении сервера, если использовались распределенные DDLs. [\#4472](https://github.com/ClickHouse/ClickHouse/pull/4472) ([Алексей Зателепин](https://github.com/ztlpn)) -- Неверные номера столбцов были напечатаны в сообщении об ошибке о синтаксическом анализе текстового формата для столбцов с числом больше 10. [\#4484](https://github.com/ClickHouse/ClickHouse/pull/4484) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-3} - -- Исправлена сборка с включенным AVX. [\#4527](https://github.com/ClickHouse/ClickHouse/pull/4527) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Включите расширенный учет и учет ввода-вывода на основе хорошо известной версии вместо ядра, под которым он компилируется. [\#4541](https://github.com/ClickHouse/ClickHouse/pull/4541) ([nvartolomei](https://github.com/nvartolomei)) -- Разрешить пропустить настройку core\_dump.size\_limit, предупреждение, а не бросать, если лимита не получится. [\#4473](https://github.com/ClickHouse/ClickHouse/pull/4473) ([proller](https://github.com/proller)) -- Удалил то `inline` метки из `void readBinary(...)` в `Field.cpp`. Также объединены избыточные `namespace DB` блоки. [\#4530](https://github.com/ClickHouse/ClickHouse/pull/4530) ([hcz](https://github.com/hczhcz)) - -### ClickHouse релиз 19.3.5, 2019-02-21 {#clickhouse-release-19-3-5-2019-02-21} - -#### Устранение ошибок {#bug-fixes-15} - -- Исправлена ошибка с обработкой больших запросов вставки http. [\#4454](https://github.com/ClickHouse/ClickHouse/pull/4454) ([алесапин](https://github.com/alesapin)) -- Исправлена обратная несовместимость со старыми версиями из-за неправильной реализации `send_logs_level` установка. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена обратная несовместимость функции таблицы `remote` введено с комментариями к колонке. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.3.4, 2019-02-16 {#clickhouse-release-19-3-4-2019-02-16} - -#### Улучшения {#improvements-4} - -- Размер индекса таблицы не учитывается для ограничений памяти при выполнении `ATTACH TABLE` запрос. Избегайте возможности того, что стол не может быть прикреплен после отсоединения. [\#4396](https://github.com/ClickHouse/ClickHouse/pull/4396) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Немного повышен лимит на максимальный размер строки и массива, полученный от ZooKeeper. Это позволяет продолжать работу с увеличенным размером `CLIENT_JVMFLAGS=-Djute.maxbuffer=...` на смотрителя зоопарка. [\#4398](https://github.com/ClickHouse/ClickHouse/pull/4398) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Разрешить восстанавливать брошенную реплику, даже если она уже имеет огромное количество узлов в своей очереди. [\#4399](https://github.com/ClickHouse/ClickHouse/pull/4399) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавьте один обязательный аргумент к `SET` индекс (максимальное количество хранимых строк). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Никита Васильев](https://github.com/nikvas0)) - -#### Устранение ошибок {#bug-fixes-16} - -- Исправлено `WITH ROLLUP` результат для группы по одиночке `LowCardinality` ключ. [\#4384](https://github.com/ClickHouse/ClickHouse/pull/4384) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Исправлена ошибка в заданном индексе (удаление гранулы, если она содержит более `max_rows` грядки). [\#4386](https://github.com/ClickHouse/ClickHouse/pull/4386) ([Никита Васильев](https://github.com/nikvas0)) -- Множество исправлений для сборки FreeBSD. [\#4397](https://github.com/ClickHouse/ClickHouse/pull/4397) ([proller](https://github.com/proller)) -- Исправлена подстановка псевдонимов в запросах с подзапросом, содержащим один и тот же псевдоним (проблема [\#4110](https://github.com/ClickHouse/ClickHouse/issues/4110)). [\#4351](https://github.com/ClickHouse/ClickHouse/pull/4351) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-4} - -- Добавить возможность запуска `clickhouse-server` для тестов без состояния в образе docker. [\#4347](https://github.com/ClickHouse/ClickHouse/pull/4347) ([Василий Немков](https://github.com/Enmk)) - -### ClickHouse релиз 19.3.3, 2019-02-13 {#clickhouse-release-19-3-3-2019-02-13} - -#### Новые средства {#new-features-6} - -- Добавил тот `KILL MUTATION` оператор, который позволяет удалять мутации, которые по каким-то причинам застряли. Добавлен `latest_failed_part`, `latest_fail_time`, `latest_fail_reason` поля к тому же `system.mutations` таблица для более легкого устранения неполадок. [\#4287](https://github.com/ClickHouse/ClickHouse/pull/4287) ([Алексей Зателепин](https://github.com/ztlpn)) -- Добавлена статистическая функция `entropy` который вычисляет энтропию Шеннона. [\#4238](https://github.com/ClickHouse/ClickHouse/pull/4238) ([Quid37](https://github.com/Quid37)) -- Добавлена возможность отправлять запросы `INSERT INTO tbl VALUES (....` к серверу без разделения на `query` и `data` части. [\#4301](https://github.com/ClickHouse/ClickHouse/pull/4301) ([алесапин](https://github.com/alesapin)) -- Общая реализация проекта `arrayWithConstant` была добавлена функция. [\#4322](https://github.com/ClickHouse/ClickHouse/pull/4322) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Реализованный `NOT BETWEEN` оператор сравнения. [\#4228](https://github.com/ClickHouse/ClickHouse/pull/4228) ([Дмитрий Наумов](https://github.com/nezed)) -- Осуществлять `sumMapFiltered` для того чтобы иметь возможность ограничить количество ключей для которых значения будут суммироваться по формуле `sumMap`. [\#4129](https://github.com/ClickHouse/ClickHouse/pull/4129) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Добавлена поддержка `Nullable` напечатать `mysql` табличная функция. [\#4198](https://github.com/ClickHouse/ClickHouse/pull/4198) ([Emmanuel Donin de Rosière](https://github.com/edonin)) -- Поддержка произвольных константных выражений в `LIMIT` пункт. [\#4246](https://github.com/ClickHouse/ClickHouse/pull/4246) ([k3box](https://github.com/k3box)) -- Добавлен `topKWeighted` агрегатная функция, принимающая дополнительный аргумент с весом (целое число без знака). [\#4245](https://github.com/ClickHouse/ClickHouse/pull/4245) ([Андрей Гольман](https://github.com/andrewgolman)) -- `StorageJoin` теперь поддерживать `join_any_take_last_row` настройка, позволяющая перезаписать существующие значения одного и того же ключа. [\#3973](https://github.com/ClickHouse/ClickHouse/pull/3973) ([Амос Птица](https://github.com/amosbird) -- Добавлена функция `toStartOfInterval`. [\#4304](https://github.com/ClickHouse/ClickHouse/pull/4304) ([Виталий Баранов](https://github.com/vitlibar)) -- Добавлен `RowBinaryWithNamesAndTypes` формат. [\#4200](https://github.com/ClickHouse/ClickHouse/pull/4200) ([Козлюк Олег Викторович](https://github.com/DarkWanderer)) -- Добавлен `IPv4` и `IPv6` тип данных. Более эффективное внедрение `IPv*` функции. [\#3669](https://github.com/ClickHouse/ClickHouse/pull/3669) ([Василий Немков](https://github.com/Enmk)) -- Добавлена функция `toStartOfTenMinutes()`. [\#4298](https://github.com/ClickHouse/ClickHouse/pull/4298) ([Виталий Баранов](https://github.com/vitlibar)) -- Добавлен `Protobuf` выходной формат. [\#4005](https://github.com/ClickHouse/ClickHouse/pull/4005) [\#4158](https://github.com/ClickHouse/ClickHouse/pull/4158) ([Виталий Баранов](https://github.com/vitlibar)) -- Добавлена поддержка brotli для HTTP-интерфейса для импорта данных (вставки). [\#4235](https://github.com/ClickHouse/ClickHouse/pull/4235) ([Михаил](https://github.com/fandyushin)) -- Добавлены подсказки, когда пользователь делает опечатку в имени функции или вводит клиент командной строки. [\#4239](https://github.com/ClickHouse/ClickHouse/pull/4239) ([Данила Кутенин](https://github.com/danlark1)) -- Добавлен `Query-Id` к заголовку HTTP-ответа сервера. [\#4231](https://github.com/ClickHouse/ClickHouse/pull/4231) ([Михаил](https://github.com/fandyushin)) - -#### Экспериментальная возможность {#experimental-features-2} - -- Добавлен `minmax` и `set` индексы пропуска данных для семейства движков таблиц MergeTree. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Никита Васильев](https://github.com/nikvas0)) -- Добавлено преобразование из `CROSS JOIN` к `INNER JOIN` если можно. [\#4221](https://github.com/ClickHouse/ClickHouse/pull/4221) [\#4266](https://github.com/ClickHouse/ClickHouse/pull/4266) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Устранение ошибок {#bug-fixes-17} - -- Исправлено `Not found column` для повторяющихся столбцов в `JOIN ON` раздел. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Артем Зуйков](https://github.com/4ertus2)) -- Сделай `START REPLICATED SENDS` команда начала репликации отправляет. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Фиксированное выполнение агрегатных функций с помощью `Array(LowCardinality)` аргументы. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Исправлено неправильное поведение при выполнении `INSERT ... SELECT ... FROM file(...)` запрос и файл имеет `CSVWithNames` или `TSVWIthNames` формат и первая строка данных отсутствуют. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка при перезагрузке словаря, если словарь недоступен. Эта ошибка появилась в 19.1.6. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Исправлено `ALL JOIN` с дубликатами в правой таблице. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка сегментации с помощью `use_uncompressed_cache=1` и исключение с неправильным несжатым размером. Эта ошибка появилась в 19.1.6. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([алесапин](https://github.com/alesapin)) -- Исправлено `compile_expressions` ошибка с сопоставлением больших (более int16) дат. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([алесапин](https://github.com/alesapin)) -- Исправлена бесконечная петля при выборе функции из таблицы `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Временно отключите оптимизацию предикатов для `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлено `Illegal instruction` ошибка при использовании функций base64 на старых процессорах. Эта ошибка была воспроизведена только тогда, когда ClickHouse был скомпилирован с помощью gcc-8. [\#4275](https://github.com/ClickHouse/ClickHouse/pull/4275) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено `No message received` ошибка при взаимодействии с драйвером PostgreSQL ODBC через TLS-соединение. Также исправлена ошибка segfault при использовании драйвера MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен неверный результат, когда `Date` и `DateTime` аргументы используются в ветвях условного оператора (функции `if`). Добавлен общий случай для функции `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Словари ClickHouse теперь загружаются внутри `clickhouse` процесс. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена тупиковая ситуация, когда `SELECT` из-за стола с `File` двигатель был восстановлен после того, как `No such file or directory` ошибка. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированное состояние гонки при выборе из `system.tables` может дать `table doesn't exist` ошибка. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `clickhouse-client` может segfault на выходе при загрузке данных для предложений командной строки, если он был запущен в интерактивном режиме. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка при выполнении мутаций, содержащих `IN` операторы давали неверные результаты. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправлена ошибка: если есть база данных с `Dictionary` движок, все словари принудительно загружаются при запуске сервера, и если есть словарь с источником ClickHouse от localhost, то словарь не может загрузиться. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка при повторной попытке создания системных журналов при выключении сервера. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Правильно верните правильный тип и правильно обработайте замки `joinGet` функция. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Амос Птица](https://github.com/amosbird)) -- Добавлен `sumMapWithOverflow` функция. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Исправлена обработка выхода онлайн / оффлайн с `allow_experimental_multiple_joins_emulation`. [52de2c](https://github.com/ClickHouse/ClickHouse/commit/52de2cd927f7b5257dd67e175f0a5560a48840d0) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлена ошибка с неправильным `Date` и `DateTime` сравнение. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Исправлен тест fuzz при неопределенном поведении дезинфицирующего средства: добавлена проверка типа параметра для `quantile*Weighted` семейство функций. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено редкое состояние гонки при удалении старых частей данных может произойти сбой с помощью `File not found` ошибка. [\#4378](https://github.com/ClickHouse/ClickHouse/pull/4378) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка установки пакета с отсутствующим /etc / clickhouse-server / config.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-5} - -- Пакет Debian: исправьте/etc/clickhouse-server / preprocessed link в соответствии с конфигурацией. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Различные исправления сборки для FreeBSD. [\#4225](https://github.com/ClickHouse/ClickHouse/pull/4225) ([proller](https://github.com/proller)) -- Добавлена возможность создавать, заполнять и удалять таблицы в perftest. [\#4220](https://github.com/ClickHouse/ClickHouse/pull/4220) ([алесапин](https://github.com/alesapin)) -- Добавлен скрипт для проверки наличия дубликатов включений. [\#4326](https://github.com/ClickHouse/ClickHouse/pull/4326) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена возможность выполнения запросов по индексу в тесте производительности. [\#4264](https://github.com/ClickHouse/ClickHouse/pull/4264) ([алесапин](https://github.com/alesapin)) -- Предлагается установить пакет с отладочными символами. [\#4274](https://github.com/ClickHouse/ClickHouse/pull/4274) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Рефакторинг производительности-тест. Улучшенная регистрация и обработка сигналов. [\#4171](https://github.com/ClickHouse/ClickHouse/pull/4171) ([алесапин](https://github.com/alesapin)) -- Добавлены документы в анонимизированный Яндекс.Метрика набирает данные. [\#4164](https://github.com/ClickHouse/ClickHouse/pull/4164) ([алесапин](https://github.com/alesapin)) -- Аdded tool for converting an old month-partitioned part to the custom-partitioned format. [\#4195](https://github.com/ClickHouse/ClickHouse/pull/4195) ([Алексей Зателепин](https://github.com/ztlpn)) -- Добавлены документы о двух наборах данных в s3. [\#4144](https://github.com/ClickHouse/ClickHouse/pull/4144) ([алесапин](https://github.com/alesapin)) -- Добавлен скрипт, который создает список изменений из описания запросов на вытягивание. [\#4169](https://github.com/ClickHouse/ClickHouse/pull/4169) [\#4173](https://github.com/ClickHouse/ClickHouse/pull/4173) ([Кочетовниколай](https://github.com/KochetovNicolai)) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Добавлен кукольный модуль для Clickhouse. [\#4182](https://github.com/ClickHouse/ClickHouse/pull/4182) ([Максим Федотов](https://github.com/MaxFedotov)) -- Добавлены документы для группы недокументированных функций. [\#4168](https://github.com/ClickHouse/ClickHouse/pull/4168) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправление сборки рук. [\#4210](https://github.com/ClickHouse/ClickHouse/pull/4210)[\#4306](https://github.com/ClickHouse/ClickHouse/pull/4306) [\#4291](https://github.com/ClickHouse/ClickHouse/pull/4291) ([proller](https://github.com/proller)) ([proller](https://github.com/proller)) -- Словарные тесты теперь можно запускать из `ctest`. [\#4189](https://github.com/ClickHouse/ClickHouse/pull/4189) ([proller](https://github.com/proller)) -- Сейчас `/etc/ssl` используется в качестве каталога по умолчанию с SSL-сертификатами. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена проверка инструкций SSE и AVX при запуске. [\#4234](https://github.com/ClickHouse/ClickHouse/pull/4234) ([Игр](https://github.com/igron99)) -- Init скрипт будет ждать сервер до запуска. [\#4281](https://github.com/ClickHouse/ClickHouse/pull/4281) ([proller](https://github.com/proller)) - -#### Назад Несовместимые Изменения {#backward-incompatible-changes-1} - -- Удаленный `allow_experimental_low_cardinality_type` установка. `LowCardinality` типы данных уже готовы к производству. [\#4323](https://github.com/ClickHouse/ClickHouse/pull/4323) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Уменьшите размер маркированного кэша и размер несжатого кэша соответственно доступному объему памяти. [\#4240](https://github.com/ClickHouse/ClickHouse/pull/4240) ([Лопатин Константин](https://github.com/k-lopatin) -- Добавить ключевые слова `INDEX` в `CREATE TABLE` запрос. Столбец с именем `index` должен быть заключен в кавычки с обратными или двойными кавычками: `` `index` ``. [\#4143](https://github.com/ClickHouse/ClickHouse/pull/4143) ([Никита Васильев](https://github.com/nikvas0)) -- `sumMap` теперь продвигайте тип результата вместо переполнения. Старое `sumMap` поведение может быть получено с помощью `sumMapWithOverflow` функция. [\#4151](https://github.com/ClickHouse/ClickHouse/pull/4151) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) - -#### Улучшения в производительности {#performance-improvements-4} - -- `std::sort` заменено на `pdqsort` для запросов без `LIMIT`. [\#4236](https://github.com/ClickHouse/ClickHouse/pull/4236) ([Евгений Правда](https://github.com/kvinty)) -- Теперь сервер повторно использует потоки из глобального пула потоков. Это влияет на производительность в некоторых угловых случаях. [\#4150](https://github.com/ClickHouse/ClickHouse/pull/4150) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшения {#improvements-5} - -- Реализована поддержка AIO для FreeBSD. [\#4305](https://github.com/ClickHouse/ClickHouse/pull/4305) ([urgordeadbeef](https://github.com/urgordeadbeef)) -- `SELECT * FROM a JOIN b USING a, b` теперь вернуться `a` и `b` столбцы только из левой таблицы. [\#4141](https://github.com/ClickHouse/ClickHouse/pull/4141) ([Артем Зуйков](https://github.com/4ertus2)) -- Позволять `-C` возможность работы клиента в качестве `-c` вариант. [\#4232](https://github.com/ClickHouse/ClickHouse/pull/4232) ([семинсергей](https://github.com/syominsergey)) -- Теперь вариант `--password` использовать без стоимости требует пароль из stdin. [\#4230](https://github.com/ClickHouse/ClickHouse/pull/4230) ([BSD\_Conqueror](https://github.com/bsd-conqueror)) -- Добавлена подсветка неэскапированных метасимволов в строковых литералах, содержащих `LIKE` выражения или регулярные выражения. [\#4327](https://github.com/ClickHouse/ClickHouse/pull/4327) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена отмена HTTP-запросов только для чтения, если клиентский сокет уходит. [\#4213](https://github.com/ClickHouse/ClickHouse/pull/4213) ([nvartolomei](https://github.com/nvartolomei)) -- Теперь сервер сообщает о прогрессе, чтобы сохранить клиентские соединения живыми. [\#4215](https://github.com/ClickHouse/ClickHouse/pull/4215) ([Иван](https://github.com/abyss7)) -- Немного лучше сообщение с причиной для оптимизации запроса с помощью `optimize_throw_if_noop` настройка включена. [\#4294](https://github.com/ClickHouse/ClickHouse/pull/4294) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена поддержка `--version` опция для сервера clickhouse. [\#4251](https://github.com/ClickHouse/ClickHouse/pull/4251) ([Лопатин Константин](https://github.com/k-lopatin)) -- Добавлен `--help/-h` опцион на `clickhouse-server`. [\#4233](https://github.com/ClickHouse/ClickHouse/pull/4233) ([Юрий Баранов](https://github.com/yurriy)) -- Добавлена поддержка скалярных подзапросов с результатом состояния агрегатной функции. [\#4348](https://github.com/ClickHouse/ClickHouse/pull/4348) ([Николай Кочетов](https://github.com/KochetovNicolai)) -- Улучшено время завершения работы сервера и изменено время ожидания. [\#4372](https://github.com/ClickHouse/ClickHouse/pull/4372) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлена информация о настройке replicated\_can\_become\_leader для system.реплики и добавить ведение журнала, если реплика не будет пытаться стать лидером. [\#4379](https://github.com/ClickHouse/ClickHouse/pull/4379) ([Алексей Зателепин](https://github.com/ztlpn)) - -## ClickHouse релиз 19.1 {#clickhouse-release-19-1} - -### ClickHouse релиз 19.1.14, 2019-03-14 {#clickhouse-release-19-1-14-2019-03-14} - -- Исправления ошибок `Column ... queried more than once` это может произойти, если установка `asterisk_left_columns_only` имеет значение 1 в случае использования `GLOBAL JOIN` с `SELECT *` (редкий случай). Эта проблема не существует в версии 19.3 и более поздних версиях. [6bac7d8d](https://github.com/ClickHouse/ClickHouse/pull/4692/commits/6bac7d8d11a9b0d6de0b32b53c47eb2f6f8e7062) ([Артем Зуйков](https://github.com/4ertus2)) - -### ClickHouse релиз 19.1.13, 2019-03-12 {#clickhouse-release-19-1-13-2019-03-12} - -Этот релиз содержит точно такой же набор исправлений, как и 19.3.7. - -### ClickHouse релиз 19.1.10, 2019-03-03 {#clickhouse-release-19-1-10-2019-03-03} - -Этот релиз содержит точно такой же набор исправлений, как и 19.3.6. - -## ClickHouse релиз 19.1 {#clickhouse-release-19-1-1} - -### ClickHouse релиз 19.1.9, 2019-02-21 {#clickhouse-release-19-1-9-2019-02-21} - -#### Устранение ошибок {#bug-fixes-18} - -- Исправлена обратная несовместимость со старыми версиями из-за неправильной реализации `send_logs_level` установка. [\#4445](https://github.com/ClickHouse/ClickHouse/pull/4445) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена обратная несовместимость функции таблицы `remote` введено с комментариями к колонке. [\#4446](https://github.com/ClickHouse/ClickHouse/pull/4446) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.1.8, 2019-02-16 {#clickhouse-release-19-1-8-2019-02-16} - -#### Устранение ошибок {#bug-fixes-19} - -- Исправлена ошибка установки пакета с отсутствующим /etc / clickhouse-server / config.XML. [\#4343](https://github.com/ClickHouse/ClickHouse/pull/4343) ([proller](https://github.com/proller)) - -## ClickHouse релиз 19.1 {#clickhouse-release-19-1-2} - -### ClickHouse релиз 19.1.7, 2019-02-15 {#clickhouse-release-19-1-7-2019-02-15} - -#### Устранение ошибок {#bug-fixes-20} - -- Правильно возвращайте правильный тип и правильно обрабатывайте замки `joinGet` функция. [\#4153](https://github.com/ClickHouse/ClickHouse/pull/4153) ([Амос Птица](https://github.com/amosbird)) -- Исправлена ошибка при повторной попытке создания системных журналов при выключении сервера. [\#4254](https://github.com/ClickHouse/ClickHouse/pull/4254) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка: если есть база данных с `Dictionary` движок, все словари принудительно загружаются при запуске сервера, и если есть словарь с источником ClickHouse от localhost, то словарь не может загрузиться. [\#4255](https://github.com/ClickHouse/ClickHouse/pull/4255) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка при выполнении мутаций, содержащих `IN` операторы давали неверные результаты. [\#4099](https://github.com/ClickHouse/ClickHouse/pull/4099) ([Алексей Зателепин](https://github.com/ztlpn)) -- `clickhouse-client` может segfault на выходе при загрузке данных для предложений командной строки, если он был запущен в интерактивном режиме. [\#4317](https://github.com/ClickHouse/ClickHouse/pull/4317) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированное состояние гонки при выборе из `system.tables` может дать `table doesn't exist` ошибка. [\#4313](https://github.com/ClickHouse/ClickHouse/pull/4313) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена тупиковая ситуация, когда `SELECT` из-за стола с надписью: `File` двигатель был восстановлен после того, как `No such file or directory` ошибка. [\#4161](https://github.com/ClickHouse/ClickHouse/pull/4161) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка: локальные словари ClickHouse загружаются через TCP, но должны загружаться внутри процесса. [\#4166](https://github.com/ClickHouse/ClickHouse/pull/4166) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено `No message received` ошибка при взаимодействии с драйвером PostgreSQL ODBC через TLS-соединение. Также исправлена ошибка segfault при использовании драйвера MySQL ODBC. [\#4170](https://github.com/ClickHouse/ClickHouse/pull/4170) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Временно отключите оптимизацию предикатов для `ORDER BY`. [\#3890](https://github.com/ClickHouse/ClickHouse/pull/3890) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена бесконечная петля при выборе функции из таблицы `numbers(0)`. [\#4280](https://github.com/ClickHouse/ClickHouse/pull/4280) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлено `compile_expressions` ошибка с сопоставлением больших (более int16) дат. [\#4341](https://github.com/ClickHouse/ClickHouse/pull/4341) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка сегментации с помощью `uncompressed_cache=1` и исключение с неправильным несжатым размером. [\#4186](https://github.com/ClickHouse/ClickHouse/pull/4186) ([алесапин](https://github.com/alesapin)) -- Исправлено `ALL JOIN` с дубликатами в правой таблице. [\#4184](https://github.com/ClickHouse/ClickHouse/pull/4184) ([Артем Зуйков](https://github.com/4ertus2)) -- Исправлено неправильное поведение при выполнении `INSERT ... SELECT ... FROM file(...)` запрос и файл имеет `CSVWithNames` или `TSVWIthNames` формат и первая строка данных отсутствуют. [\#4297](https://github.com/ClickHouse/ClickHouse/pull/4297) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Фиксированное выполнение агрегатных функций с помощью `Array(LowCardinality)` аргументы. [\#4055](https://github.com/ClickHouse/ClickHouse/pull/4055) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Пакет Debian: исправьте/etc/clickhouse-server / preprocessed link в соответствии с конфигурацией. [\#4205](https://github.com/ClickHouse/ClickHouse/pull/4205) ([proller](https://github.com/proller)) -- Исправлен тест fuzz при неопределенном поведении дезинфицирующего средства: добавлена проверка типа параметра для `quantile*Weighted` семейство функций. [\#4145](https://github.com/ClickHouse/ClickHouse/pull/4145) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Сделай `START REPLICATED SENDS` команда начала репликации отправляет. [\#4229](https://github.com/ClickHouse/ClickHouse/pull/4229) ([nvartolomei](https://github.com/nvartolomei)) -- Исправлено `Not found column` для повторяющихся столбцов в разделе JOIN ON. [\#4279](https://github.com/ClickHouse/ClickHouse/pull/4279) ([Артем Зуйков](https://github.com/4ertus2)) -- Сейчас `/etc/ssl` используется в качестве каталога по умолчанию с SSL-сертификатами. [\#4167](https://github.com/ClickHouse/ClickHouse/pull/4167) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка при перезагрузке словаря, если словарь недоступен. [\#4188](https://github.com/ClickHouse/ClickHouse/pull/4188) ([proller](https://github.com/proller)) -- Исправлена ошибка с неправильным `Date` и `DateTime` сравнение. [\#4237](https://github.com/ClickHouse/ClickHouse/pull/4237) ([valexey](https://github.com/valexey)) -- Исправлен неверный результат, когда `Date` и `DateTime` аргументы используются в ветвях условного оператора (функции `if`). Добавлен общий случай для функции `if`. [\#4243](https://github.com/ClickHouse/ClickHouse/pull/4243) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -### ClickHouse релиз 19.1.6, 2019-01-24 {#clickhouse-release-19-1-6-2019-01-24} - -#### Новые средства {#new-features-7} - -- Пользовательские кодеки сжатия для каждого столбца для таблиц. [\#3899](https://github.com/ClickHouse/ClickHouse/pull/3899) [\#4111](https://github.com/ClickHouse/ClickHouse/pull/4111) ([алесапин](https://github.com/alesapin), [Зимний Чжан](https://github.com/zhang2014), [Анатолий](https://github.com/Sindbag)) -- Добавлен кодек сжатия `Delta`. [\#4052](https://github.com/ClickHouse/ClickHouse/pull/4052) ([алесапин](https://github.com/alesapin)) -- Разрешить `ALTER` кодеки сжатия. [\#4054](https://github.com/ClickHouse/ClickHouse/pull/4054) ([алесапин](https://github.com/alesapin)) -- Добавленные функции `left`, `right`, `trim`, `ltrim`, `rtrim`, `timestampadd`, `timestampsub` для совместимости со стандартом SQL. [\#3826](https://github.com/ClickHouse/ClickHouse/pull/3826) ([Иван Блинков](https://github.com/blinkov)) -- Поддержка записи в систему `HDFS` таблица или `hdfs` табличная функция. [\#4084](https://github.com/ClickHouse/ClickHouse/pull/4084) ([алесапин](https://github.com/alesapin)) -- Добавлены функции поиска нескольких постоянных строк из большого стога сена: `multiPosition`, `multiSearch` ,`firstMatch` также с помощью `-UTF8`, `-CaseInsensitive`, и `-CaseInsensitiveUTF8` варианты. [\#4053](https://github.com/ClickHouse/ClickHouse/pull/4053) ([Данила Кутенин](https://github.com/danlark1)) -- Обрезка неиспользуемых осколков, если `SELECT` фильтры запросов по ключу сегментирования (настройка `optimize_skip_unused_shards`). [\#3851](https://github.com/ClickHouse/ClickHouse/pull/3851) ([Глеб Кантеров](https://github.com/kanterov), [Иван](https://github.com/abyss7)) -- Позволять `Kafka` движок для игнорирования некоторого количества ошибок синтаксического анализа в каждом блоке. [\#4094](https://github.com/ClickHouse/ClickHouse/pull/4094) ([Иван](https://github.com/abyss7)) -- Добавлена поддержка для `CatBoost` мультиклассовые модели оценки. Функция `modelEvaluate` возвращает кортеж с необработанными прогнозами для каждого класса для многоклассовых моделей. `libcatboostmodel.so` должно быть построено с помощью [\#607](https://github.com/catboost/catboost/pull/607). [\#3959](https://github.com/ClickHouse/ClickHouse/pull/3959) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Добавленные функции `filesystemAvailable`, `filesystemFree`, `filesystemCapacity`. [\#4097](https://github.com/ClickHouse/ClickHouse/pull/4097) ([Борис Гранво](https://github.com/bgranvea)) -- Добавлены функции хэширования `xxHash64` и `xxHash32`. [\#3905](https://github.com/ClickHouse/ClickHouse/pull/3905) ([Филимонов](https://github.com/filimonov)) -- Добавлен `gccMurmurHash` функция хэширования (GCC flavoured Murmur hash), которая использует то же самое хэш-семя, что и [ССЗ](https://github.com/gcc-mirror/gcc/blob/41d6b10e96a1de98e90a7c0378437c3255814b16/libstdc%2B%2B-v3/include/bits/functional_hash.h#L191) [\#4000](https://github.com/ClickHouse/ClickHouse/pull/4000) ([сундили](https://github.com/sundy-li)) -- Добавлены функции хэширования `javaHash`, `hiveHash`. [\#3811](https://github.com/ClickHouse/ClickHouse/pull/3811) ([shangshujie365](https://github.com/shangshujie365)) -- Добавлена функция таблицы `remoteSecure`. Функция работает как `remote`, но использует безопасное соединение. [\#4088](https://github.com/ClickHouse/ClickHouse/pull/4088) ([proller](https://github.com/proller)) - -#### Экспериментальная возможность {#experimental-features-3} - -- Добавлена эмуляция нескольких соединений (`allow_experimental_multiple_joins_emulation` установочный). [\#3946](https://github.com/ClickHouse/ClickHouse/pull/3946) ([Артем Зуйков](https://github.com/4ertus2)) - -#### Устранение ошибок {#bug-fixes-21} - -- Сделай `compiled_expression_cache_size` установка ограничена по умолчанию для снижения потребления памяти. [\#4041](https://github.com/ClickHouse/ClickHouse/pull/4041) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка, которая привела к зависанию в потоках, выполняющих изменения реплицированных таблиц, и в потоке, обновляющем конфигурацию из ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3891](https://github.com/ClickHouse/ClickHouse/issues/3891) [\#3934](https://github.com/ClickHouse/ClickHouse/pull/3934) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправлено состояние гонки при выполнении распределенной задачи ALTER. Состояние гонки привело к тому, что более чем одна реплика пыталась выполнить задачу, и все реплики, кроме одной, потерпели неудачу с ошибкой ZooKeeper. [\#3904](https://github.com/ClickHouse/ClickHouse/pull/3904) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправьте ошибку, когда `from_zk` элементы конфигурации не были обновлены после истечения времени ожидания запроса к ZooKeeper. [\#2947](https://github.com/ClickHouse/ClickHouse/issues/2947) [\#3947](https://github.com/ClickHouse/ClickHouse/pull/3947) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправлена ошибка с неправильным префиксом для масок подсети IPv4. [\#3945](https://github.com/ClickHouse/ClickHouse/pull/3945) ([алесапин](https://github.com/alesapin)) -- Исправлена ошибка (`std::terminate`) в редких случаях, когда новый поток не может быть создан из-за исчерпания ресурсов. [\#3956](https://github.com/ClickHouse/ClickHouse/pull/3956) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, когда в `remote` таблица выполнения функции, когда ошибались ограничений, используемых в `getStructureOfRemoteTable`. [\#4009](https://github.com/ClickHouse/ClickHouse/pull/4009) ([алесапин](https://github.com/alesapin)) -- Исправьте утечку сокетов netlink. Они были помещены в пул, где они никогда не удалялись, а новые сокеты создавались в начале нового потока, когда использовались все текущие сокеты. [\#4017](https://github.com/ClickHouse/ClickHouse/pull/4017) ([Алексей Зателепин](https://github.com/ztlpn)) -- Исправлена ошибка с закрытием `/proc/self/fd` каталог раньше, чем все fds были прочитаны из него `/proc` после раздвоения `odbc-bridge` подпроцесс. [\#4120](https://github.com/ClickHouse/ClickHouse/pull/4120) ([алесапин](https://github.com/alesapin)) -- Исправлено монотонное преобразование строки в UInt в случае использования строки в первичном ключе. [\#3870](https://github.com/ClickHouse/ClickHouse/pull/3870) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена ошибка при вычислении монотонности функции преобразования целых чисел. [\#3921](https://github.com/ClickHouse/ClickHouse/pull/3921) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена обработка выхода онлайн / оффлайн в `arrayEnumerateUniq`, `arrayEnumerateDense` функции в случае некоторых недопустимых аргументов. [\#3909](https://github.com/ClickHouse/ClickHouse/pull/3909) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправьте UB в StorageMerge. [\#3910](https://github.com/ClickHouse/ClickHouse/pull/3910) ([Амос Птица](https://github.com/amosbird)) -- Исправлена обработка выхода онлайн / оффлайн в функции `addDays`, `subtractDays`. [\#3913](https://github.com/ClickHouse/ClickHouse/pull/3913) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка: функции `round`, `floor`, `trunc`, `ceil` может возвращать фиктивный результат при выполнении с целочисленным аргументом и большим отрицательным масштабом. [\#3914](https://github.com/ClickHouse/ClickHouse/pull/3914) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, вызванная ‘kill query sync’ что ведет к свалке ядра. [\#3916](https://github.com/ClickHouse/ClickHouse/pull/3916) ([мувулдипекер](https://github.com/fancyqlx)) -- Исправлена ошибка с длительной задержкой после пустой очереди репликации. [\#3928](https://github.com/ClickHouse/ClickHouse/pull/3928) [\#3932](https://github.com/ClickHouse/ClickHouse/pull/3932) ([алесапин](https://github.com/alesapin)) -- Исправлено чрезмерное использование памяти при вставке в таблицу с `LowCardinality` первичный ключ. [\#3955](https://github.com/ClickHouse/ClickHouse/pull/3955) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Исправлено `LowCardinality` сериализация для `Native` форматирование в случае пустых массивов. [\#3907](https://github.com/ClickHouse/ClickHouse/issues/3907) [\#4011](https://github.com/ClickHouse/ClickHouse/pull/4011) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Исправлен неверный результат при использовании числового столбца distinct by single LowCardinality. [\#3895](https://github.com/ClickHouse/ClickHouse/issues/3895) [\#4012](https://github.com/ClickHouse/ClickHouse/pull/4012) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Исправлена специализированная агрегация с ключом LowCardinality (в случае, когда `compile` настройка включена). [\#3886](https://github.com/ClickHouse/ClickHouse/pull/3886) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Исправлена переадресация пользователей и паролей для запросов реплицированных таблиц. [\#3957](https://github.com/ClickHouse/ClickHouse/pull/3957) ([алесапин](https://github.com/alesapin)) ([小路](https://github.com/nicelulu)) -- Исправлено очень редкое состояние гонки, которое может произойти при перечислении таблиц в базе данных словаря во время перезагрузки словарей. [\#3970](https://github.com/ClickHouse/ClickHouse/pull/3970) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлен неверный результат при использовании метода Rollup или CUBE. [\#3756](https://github.com/ClickHouse/ClickHouse/issues/3756) [\#3837](https://github.com/ClickHouse/ClickHouse/pull/3837) ([Сэм Чоу](https://github.com/reflection)) -- Исправлены псевдонимы столбцов для запроса с помощью `JOIN ON` синтаксис и распределенные таблицы. [\#3980](https://github.com/ClickHouse/ClickHouse/pull/3980) ([Зимний Чжан](https://github.com/zhang2014)) -- Исправлена ошибка во внутренней реализации `quantileTDigest` (найдено Артемом Вахрушевым). Эта ошибка никогда не происходит в ClickHouse и была актуальна только для тех, кто использует кодовую базу ClickHouse непосредственно в качестве библиотеки. [\#3935](https://github.com/ClickHouse/ClickHouse/pull/3935) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Улучшения {#improvements-6} - -- Поддержка `IF NOT EXISTS` в `ALTER TABLE ADD COLUMN` заявления вместе с `IF EXISTS` в `DROP/MODIFY/CLEAR/COMMENT COLUMN`. [\#3900](https://github.com/ClickHouse/ClickHouse/pull/3900) ([Борис Гранво](https://github.com/bgranvea)) -- Функция `parseDateTimeBestEffort`: поддержка форматов `DD.MM.YYYY`, `DD.MM.YY`, `DD-MM-YYYY`, `DD-Mon-YYYY`, `DD/Month/YYYY` и тому подобное. [\#3922](https://github.com/ClickHouse/ClickHouse/pull/3922) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- `CapnProtoInputStream` теперь поддерживайте зубчатые конструкции. [\#4063](https://github.com/ClickHouse/ClickHouse/pull/4063) ([Hultgren Один Ван Дер Хорст](https://github.com/Miniwoffer)) -- Улучшение удобства использования: добавлена проверка того, что серверный процесс запускается от владельца каталога данных. Не позволяют запускать сервер от root, если данные принадлежат к непривилегированным пользователем. [\#3785](https://github.com/ClickHouse/ClickHouse/pull/3785) ([Сергей-в-Гальцев](https://github.com/sergey-v-galtsev)) -- Улучшена логика проверки необходимых столбцов при анализе запросов с соединениями. [\#3930](https://github.com/ClickHouse/ClickHouse/pull/3930) ([Артем Зуйков](https://github.com/4ertus2)) -- Уменьшено количество подключений в случае большого количества распределенных таблиц на одном сервере. [\#3726](https://github.com/ClickHouse/ClickHouse/pull/3726) ([Зимний Чжан](https://github.com/zhang2014)) -- Поддерживаемые итоговые значения строка для `WITH TOTALS` запрос для драйвера ODBC. [\#3836](https://github.com/ClickHouse/ClickHouse/pull/3836) ([Максим Корицкий](https://github.com/nightweb)) -- Разрешено к использованию `Enum`s как целые числа внутри функции if. [\#3875](https://github.com/ClickHouse/ClickHouse/pull/3875) ([Иван](https://github.com/abyss7)) -- Добавлен `low_cardinality_allow_in_native_format` установка. Если он отключен, не используйте его `LowCadrinality` напечатать `Native` формат. [\#3879](https://github.com/ClickHouse/ClickHouse/pull/3879) ([Кочетовниколай](https://github.com/KochetovNicolai)) -- Удалил некоторые избыточные объекты из кэша скомпилированных выражений, чтобы снизить использование памяти. [\#4042](https://github.com/ClickHouse/ClickHouse/pull/4042) ([алесапин](https://github.com/alesapin)) -- Добавить проверить это `SET send_logs_level = 'value'` запрос принимает соответствующее значение. [\#3873](https://github.com/ClickHouse/ClickHouse/pull/3873) ([Сабянин Максим](https://github.com/s-mx)) -- Исправлена проверка типа данных в функциях преобразования типов. [\#3896](https://github.com/ClickHouse/ClickHouse/pull/3896) ([Зимний Чжан](https://github.com/zhang2014)) - -#### Улучшения в производительности {#performance-improvements-5} - -- Добавьте параметр MergeTree `use_minimalistic_part_header_in_zookeeper`. Если этот параметр включен, реплицированные таблицы будут хранить метаданные компактной детали в одном znode детали. Это может значительно уменьшить размер моментального снимка ZooKeeper (особенно если таблицы содержат много столбцов). Обратите внимание, что после включения этого параметра вы не сможете понизить рейтинг до версии, которая его не поддерживает. [\#3960](https://github.com/ClickHouse/ClickHouse/pull/3960) ([Алексей Зателепин](https://github.com/ztlpn)) -- Добавление реализации функций на основе DFA `sequenceMatch` и `sequenceCount` в случае, если шаблон не содержит времени. [\#4004](https://github.com/ClickHouse/ClickHouse/pull/4004) ([Léo Ercolanelli](https://github.com/ercolanelli-leo)) -- Повышение производительности при сериализации целых чисел. [\#3968](https://github.com/ClickHouse/ClickHouse/pull/3968) ([Амос Птица](https://github.com/amosbird)) -- Нулевое левое заполнение PODArray так, чтобы элемент -1 всегда был действителен и обнулялся. Он используется для безветвевого расчета смещений. [\#3920](https://github.com/ClickHouse/ClickHouse/pull/3920) ([Амос Птица](https://github.com/amosbird)) -- Возвратившегося `jemalloc` версии, которые приводят к снижению производительности. [\#4018](https://github.com/ClickHouse/ClickHouse/pull/4018) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) - -#### Назад Несовместимые Изменения {#backward-incompatible-changes-2} - -- Удалена недокументированная функция `ALTER MODIFY PRIMARY KEY` потому что он был вытеснен на второй план `ALTER MODIFY ORDER BY` команда. [\#3887](https://github.com/ClickHouse/ClickHouse/pull/3887) ([Алексей Зателепин](https://github.com/ztlpn)) -- Удаленная функция `shardByHash`. [\#3833](https://github.com/ClickHouse/ClickHouse/pull/3833) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Запретить использование скалярных подзапросов с результатом типа `AggregateFunction`. [\#3865](https://github.com/ClickHouse/ClickHouse/pull/3865) ([Иван](https://github.com/abyss7)) - -#### Улучшения Сборки / Тестирования / Упаковки {#buildtestingpackaging-improvements-6} - -- Добавлена поддержка PowerPC (`ppc64le`) строить. [\#4132](https://github.com/ClickHouse/ClickHouse/pull/4132) ([Данила Кутенин](https://github.com/danlark1)) -- Функциональные тесты с отслеживанием состояния выполняются на общедоступном наборе данных. [\#3969](https://github.com/ClickHouse/ClickHouse/pull/3969) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлена ошибка, когда сервер не может начать работу с `bash: /usr/bin/clickhouse-extract-from-config: Operation not permitted` сообщение внутри Docker или systemd-nspawn. [\#4136](https://github.com/ClickHouse/ClickHouse/pull/4136) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Обновленный `rdkafka` библиотека для v1.0.0-проект RC5. Используется cppkafka вместо raw c интерфейса. [\#4025](https://github.com/ClickHouse/ClickHouse/pull/4025) ([Иван](https://github.com/abyss7)) -- Обновленный `mariadb-client` библиотека. Исправлена одна из проблем, обнаруженных UBSan. [\#3924](https://github.com/ClickHouse/ClickHouse/pull/3924) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Некоторые исправления для утилиты для сборки. [\#3926](https://github.com/ClickHouse/ClickHouse/pull/3926) [\#3021](https://github.com/ClickHouse/ClickHouse/pull/3021) [\#3948](https://github.com/ClickHouse/ClickHouse/pull/3948) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлено в фиксации запусков тестов с утилиты для сборки. -- Добавлено в фиксации работает в PVS-Studio-статический анализатор. -- Исправлены ошибки, обнаруженные компанией PVS-Studio. [\#4013](https://github.com/ClickHouse/ClickHouse/pull/4013) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлены проблемы совместимости glibc. [\#4100](https://github.com/ClickHouse/ClickHouse/pull/4100) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Переместите изображения Docker в 18.10 и добавьте файл совместимости для glibc \>= 2.28 [\#3965](https://github.com/ClickHouse/ClickHouse/pull/3965) ([алесапин](https://github.com/alesapin)) -- Добавьте переменную env, если пользователь не хочет использовать каталоги chown в образе Server Docker. [\#3967](https://github.com/ClickHouse/ClickHouse/pull/3967) ([алесапин](https://github.com/alesapin)) -- Включено большинство предупреждений от `-Weverything` в лязг. Включенный `-Wpedantic`. [\#3986](https://github.com/ClickHouse/ClickHouse/pull/3986) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Добавлено еще несколько предупреждений, которые доступны только в clang 8. [\#3993](https://github.com/ClickHouse/ClickHouse/pull/3993) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Ссылка на `libLLVM` а не к отдельным библиотекам LLVM при использовании общего связывания. [\#3989](https://github.com/ClickHouse/ClickHouse/pull/3989) ([Оривей Деш](https://github.com/orivej)) -- Добавлены переменные дезинфицирующего средства для тестовых изображений. [\#4072](https://github.com/ClickHouse/ClickHouse/pull/4072) ([алесапин](https://github.com/alesapin)) -- `clickhouse-server` пакет debian будет рекомендовать `libcap2-bin` пакет для использования `setcap` инструмент для настройки возможностей. Это необязательно. [\#4093](https://github.com/ClickHouse/ClickHouse/pull/4093) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Улучшено время компиляции, исправлены ошибки. [\#3898](https://github.com/ClickHouse/ClickHouse/pull/3898) ([proller](https://github.com/proller)) -- Добавлены тесты производительности для хэш-функций. [\#3918](https://github.com/ClickHouse/ClickHouse/pull/3918) ([Филимонов](https://github.com/filimonov)) -- Фиксированные циклические библиотечные зависимости. [\#3958](https://github.com/ClickHouse/ClickHouse/pull/3958) ([proller](https://github.com/proller)) -- Улучшена компиляция с низким уровнем доступной памяти. [\#4030](https://github.com/ClickHouse/ClickHouse/pull/4030) ([proller](https://github.com/proller)) -- Добавлен тестовый сценарий для воспроизведения снижения производительности в `jemalloc`. [\#4036](https://github.com/ClickHouse/ClickHouse/pull/4036) ([Алексей-Миловидов](https://github.com/alexey-milovidov)) -- Исправлены опечатки в комментариях и строковых литералах под заголовком `dbms`. [\#4122](https://github.com/ClickHouse/ClickHouse/pull/4122) ([майха](https://github.com/maiha)) -- Исправлены опечатки в комментариях. [\#4089](https://github.com/ClickHouse/ClickHouse/pull/4089) ([Евгений Правда](https://github.com/kvinty)) - -## [Changelog для 2018](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/changelog/2018.md) {#changelog-for-2018} diff --git a/docs/ru/whats_new/changelog/2019.md b/docs/ru/whats_new/changelog/2019.md new file mode 120000 index 00000000000..905836eef7a --- /dev/null +++ b/docs/ru/whats_new/changelog/2019.md @@ -0,0 +1 @@ +en/whats_new/changelog/2019.md \ No newline at end of file diff --git a/docs/ru/whats_new/roadmap.md b/docs/ru/whats_new/roadmap.md deleted file mode 100644 index 3994ed4ac29..00000000000 --- a/docs/ru/whats_new/roadmap.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -machine_translated: true -machine_translated_rev: 1cd5f0028d917696daf71ac1c9ee849c99c1d5c8 ---- - -# Дорожная карта {#roadmap} - -## Q1 2020 {#q1-2020} - -- Управление доступом на основе ролей - -## Q2 2020 {#q2-2020} - -- Интеграция с внешними службами аутентификации -- Пулы ресурсов для более точного распределения емкости кластера между пользователями - -{## [Оригинальная статья](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/ru/whats_new/roadmap.md b/docs/ru/whats_new/roadmap.md new file mode 120000 index 00000000000..81184f9c26c --- /dev/null +++ b/docs/ru/whats_new/roadmap.md @@ -0,0 +1 @@ +en/whats_new/roadmap.md \ No newline at end of file From b00d9c78550840735a13aba1a47c50cd061c9f91 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 17:56:54 +0300 Subject: [PATCH 311/484] Fix bad translation, step 1: remove files #10191 --- docs/ru/development/architecture.md | 1 - docs/ru/development/build.md | 1 - docs/ru/development/build_cross_arm.md | 1 - docs/ru/development/build_cross_osx.md | 1 - docs/ru/development/build_osx.md | 1 - docs/ru/development/index.md | 1 - docs/ru/development/tests.md | 1 - docs/ru/engines/table_engines/special/generate.md | 1 - docs/ru/getting_started/tutorial.md | 1 - docs/ru/introduction/adopters.md | 1 - .../operations/optimizing_performance/sampling_query_profiler.md | 1 - docs/ru/operations/performance_test.md | 1 - docs/ru/operations/utilities/clickhouse-benchmark.md | 1 - docs/ru/whats_new/changelog/2017.md | 1 - docs/ru/whats_new/changelog/2018.md | 1 - docs/ru/whats_new/changelog/2019.md | 1 - docs/ru/whats_new/roadmap.md | 1 - 17 files changed, 17 deletions(-) delete mode 120000 docs/ru/development/architecture.md delete mode 120000 docs/ru/development/build.md delete mode 120000 docs/ru/development/build_cross_arm.md delete mode 120000 docs/ru/development/build_cross_osx.md delete mode 120000 docs/ru/development/build_osx.md delete mode 120000 docs/ru/development/index.md delete mode 120000 docs/ru/development/tests.md delete mode 120000 docs/ru/engines/table_engines/special/generate.md delete mode 120000 docs/ru/getting_started/tutorial.md delete mode 120000 docs/ru/introduction/adopters.md delete mode 120000 docs/ru/operations/optimizing_performance/sampling_query_profiler.md delete mode 120000 docs/ru/operations/performance_test.md delete mode 120000 docs/ru/operations/utilities/clickhouse-benchmark.md delete mode 120000 docs/ru/whats_new/changelog/2017.md delete mode 120000 docs/ru/whats_new/changelog/2018.md delete mode 120000 docs/ru/whats_new/changelog/2019.md delete mode 120000 docs/ru/whats_new/roadmap.md diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md deleted file mode 120000 index 61968e46da2..00000000000 --- a/docs/ru/development/architecture.md +++ /dev/null @@ -1 +0,0 @@ -en/development/architecture.md \ No newline at end of file diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md deleted file mode 120000 index 156d8382515..00000000000 --- a/docs/ru/development/build.md +++ /dev/null @@ -1 +0,0 @@ -en/development/build.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md deleted file mode 120000 index ea33bb61837..00000000000 --- a/docs/ru/development/build_cross_arm.md +++ /dev/null @@ -1 +0,0 @@ -en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md deleted file mode 120000 index d4dc16f2fbc..00000000000 --- a/docs/ru/development/build_cross_osx.md +++ /dev/null @@ -1 +0,0 @@ -en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ru/development/build_osx.md b/docs/ru/development/build_osx.md deleted file mode 120000 index 5c38a2b001a..00000000000 --- a/docs/ru/development/build_osx.md +++ /dev/null @@ -1 +0,0 @@ -en/development/build_osx.md \ No newline at end of file diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md deleted file mode 120000 index 754385a9f4b..00000000000 --- a/docs/ru/development/index.md +++ /dev/null @@ -1 +0,0 @@ -en/development/index.md \ No newline at end of file diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md deleted file mode 120000 index ce23c881f32..00000000000 --- a/docs/ru/development/tests.md +++ /dev/null @@ -1 +0,0 @@ -en/development/tests.md \ No newline at end of file diff --git a/docs/ru/engines/table_engines/special/generate.md b/docs/ru/engines/table_engines/special/generate.md deleted file mode 120000 index 631f9bbba66..00000000000 --- a/docs/ru/engines/table_engines/special/generate.md +++ /dev/null @@ -1 +0,0 @@ -en/engines/table_engines/special/generate.md \ No newline at end of file diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md deleted file mode 120000 index 18b86bb2e9c..00000000000 --- a/docs/ru/getting_started/tutorial.md +++ /dev/null @@ -1 +0,0 @@ -en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md deleted file mode 120000 index b9b77a27eb9..00000000000 --- a/docs/ru/introduction/adopters.md +++ /dev/null @@ -1 +0,0 @@ -en/introduction/adopters.md \ No newline at end of file diff --git a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md deleted file mode 120000 index 565f39130fb..00000000000 --- a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md +++ /dev/null @@ -1 +0,0 @@ -en/operations/optimizing_performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md deleted file mode 120000 index 3787adb92bd..00000000000 --- a/docs/ru/operations/performance_test.md +++ /dev/null @@ -1 +0,0 @@ -en/operations/performance_test.md \ No newline at end of file diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md deleted file mode 120000 index fda8b1a50c7..00000000000 --- a/docs/ru/operations/utilities/clickhouse-benchmark.md +++ /dev/null @@ -1 +0,0 @@ -en/operations/utilities/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2017.md b/docs/ru/whats_new/changelog/2017.md deleted file mode 120000 index f278c42f170..00000000000 --- a/docs/ru/whats_new/changelog/2017.md +++ /dev/null @@ -1 +0,0 @@ -en/whats_new/changelog/2017.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2018.md b/docs/ru/whats_new/changelog/2018.md deleted file mode 120000 index 675c07e8bbb..00000000000 --- a/docs/ru/whats_new/changelog/2018.md +++ /dev/null @@ -1 +0,0 @@ -en/whats_new/changelog/2018.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2019.md b/docs/ru/whats_new/changelog/2019.md deleted file mode 120000 index 905836eef7a..00000000000 --- a/docs/ru/whats_new/changelog/2019.md +++ /dev/null @@ -1 +0,0 @@ -en/whats_new/changelog/2019.md \ No newline at end of file diff --git a/docs/ru/whats_new/roadmap.md b/docs/ru/whats_new/roadmap.md deleted file mode 120000 index 81184f9c26c..00000000000 --- a/docs/ru/whats_new/roadmap.md +++ /dev/null @@ -1 +0,0 @@ -en/whats_new/roadmap.md \ No newline at end of file From ab8900ecff65150e803c50984cde78e634e095c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 18:03:35 +0300 Subject: [PATCH 312/484] Fix bad translation, step 2: replace with symlinks #10191 --- docs/ru/development/architecture.md | 1 + docs/ru/development/build.md | 1 + docs/ru/development/build_cross_arm.md | 1 + docs/ru/development/build_cross_osx.md | 1 + docs/ru/development/build_osx.md | 1 + docs/ru/development/index.md | 1 + docs/ru/development/tests.md | 1 + docs/ru/engines/table_engines/special/generate.md | 1 + docs/ru/getting_started/tutorial.md | 1 + docs/ru/introduction/adopters.md | 1 + .../operations/optimizing_performance/sampling_query_profiler.md | 1 + docs/ru/operations/performance_test.md | 1 + docs/ru/operations/utilities/clickhouse-benchmark.md | 1 + docs/ru/whats_new/changelog/2017.md | 1 + docs/ru/whats_new/changelog/2018.md | 1 + docs/ru/whats_new/changelog/2019.md | 1 + docs/ru/whats_new/roadmap.md | 1 + 17 files changed, 17 insertions(+) create mode 120000 docs/ru/development/architecture.md create mode 120000 docs/ru/development/build.md create mode 120000 docs/ru/development/build_cross_arm.md create mode 120000 docs/ru/development/build_cross_osx.md create mode 120000 docs/ru/development/build_osx.md create mode 120000 docs/ru/development/index.md create mode 120000 docs/ru/development/tests.md create mode 120000 docs/ru/engines/table_engines/special/generate.md create mode 120000 docs/ru/getting_started/tutorial.md create mode 120000 docs/ru/introduction/adopters.md create mode 120000 docs/ru/operations/optimizing_performance/sampling_query_profiler.md create mode 120000 docs/ru/operations/performance_test.md create mode 120000 docs/ru/operations/utilities/clickhouse-benchmark.md create mode 120000 docs/ru/whats_new/changelog/2017.md create mode 120000 docs/ru/whats_new/changelog/2018.md create mode 120000 docs/ru/whats_new/changelog/2019.md create mode 120000 docs/ru/whats_new/roadmap.md diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md new file mode 120000 index 00000000000..abda4dd48a8 --- /dev/null +++ b/docs/ru/development/architecture.md @@ -0,0 +1 @@ +../../en/development/architecture.md \ No newline at end of file diff --git a/docs/ru/development/build.md b/docs/ru/development/build.md new file mode 120000 index 00000000000..480dbc2e9f5 --- /dev/null +++ b/docs/ru/development/build.md @@ -0,0 +1 @@ +../../en/development/build.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_arm.md b/docs/ru/development/build_cross_arm.md new file mode 120000 index 00000000000..983a9872dc1 --- /dev/null +++ b/docs/ru/development/build_cross_arm.md @@ -0,0 +1 @@ +../../en/development/build_cross_arm.md \ No newline at end of file diff --git a/docs/ru/development/build_cross_osx.md b/docs/ru/development/build_cross_osx.md new file mode 120000 index 00000000000..72e64e8631f --- /dev/null +++ b/docs/ru/development/build_cross_osx.md @@ -0,0 +1 @@ +../../en/development/build_cross_osx.md \ No newline at end of file diff --git a/docs/ru/development/build_osx.md b/docs/ru/development/build_osx.md new file mode 120000 index 00000000000..f9adaf24584 --- /dev/null +++ b/docs/ru/development/build_osx.md @@ -0,0 +1 @@ +../../en/development/build_osx.md \ No newline at end of file diff --git a/docs/ru/development/index.md b/docs/ru/development/index.md new file mode 120000 index 00000000000..1e2ad97dcc5 --- /dev/null +++ b/docs/ru/development/index.md @@ -0,0 +1 @@ +../../en/development/index.md \ No newline at end of file diff --git a/docs/ru/development/tests.md b/docs/ru/development/tests.md new file mode 120000 index 00000000000..c03d36c3916 --- /dev/null +++ b/docs/ru/development/tests.md @@ -0,0 +1 @@ +../../en/development/tests.md \ No newline at end of file diff --git a/docs/ru/engines/table_engines/special/generate.md b/docs/ru/engines/table_engines/special/generate.md new file mode 120000 index 00000000000..566dc4e5382 --- /dev/null +++ b/docs/ru/engines/table_engines/special/generate.md @@ -0,0 +1 @@ +../../../../en/engines/table_engines/special/generate.md \ No newline at end of file diff --git a/docs/ru/getting_started/tutorial.md b/docs/ru/getting_started/tutorial.md new file mode 120000 index 00000000000..8bc40816ab2 --- /dev/null +++ b/docs/ru/getting_started/tutorial.md @@ -0,0 +1 @@ +../../en/getting_started/tutorial.md \ No newline at end of file diff --git a/docs/ru/introduction/adopters.md b/docs/ru/introduction/adopters.md new file mode 120000 index 00000000000..659153d5f6c --- /dev/null +++ b/docs/ru/introduction/adopters.md @@ -0,0 +1 @@ +../../en/introduction/adopters.md \ No newline at end of file diff --git a/docs/ru/operations/optimizing_performance/sampling_query_profiler.md b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md new file mode 120000 index 00000000000..9f3b57cd086 --- /dev/null +++ b/docs/ru/operations/optimizing_performance/sampling_query_profiler.md @@ -0,0 +1 @@ +../../../en/operations/optimizing_performance/sampling_query_profiler.md \ No newline at end of file diff --git a/docs/ru/operations/performance_test.md b/docs/ru/operations/performance_test.md new file mode 120000 index 00000000000..a74c126c63f --- /dev/null +++ b/docs/ru/operations/performance_test.md @@ -0,0 +1 @@ +../../en/operations/performance_test.md \ No newline at end of file diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md new file mode 120000 index 00000000000..3695c9fbdd3 --- /dev/null +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -0,0 +1 @@ +../../../en/operations/utilities/clickhouse-benchmark.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2017.md b/docs/ru/whats_new/changelog/2017.md new file mode 120000 index 00000000000..a098eddf1d8 --- /dev/null +++ b/docs/ru/whats_new/changelog/2017.md @@ -0,0 +1 @@ +../../../en/whats_new/changelog/2017.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2018.md b/docs/ru/whats_new/changelog/2018.md new file mode 120000 index 00000000000..124fb19e175 --- /dev/null +++ b/docs/ru/whats_new/changelog/2018.md @@ -0,0 +1 @@ +../../../en/whats_new/changelog/2018.md \ No newline at end of file diff --git a/docs/ru/whats_new/changelog/2019.md b/docs/ru/whats_new/changelog/2019.md new file mode 120000 index 00000000000..740d1edd238 --- /dev/null +++ b/docs/ru/whats_new/changelog/2019.md @@ -0,0 +1 @@ +../../../en/whats_new/changelog/2019.md \ No newline at end of file diff --git a/docs/ru/whats_new/roadmap.md b/docs/ru/whats_new/roadmap.md new file mode 120000 index 00000000000..5ef0ebdb1bb --- /dev/null +++ b/docs/ru/whats_new/roadmap.md @@ -0,0 +1 @@ +../../en/whats_new/roadmap.md \ No newline at end of file From 218b9b3c6ca0c67526449f12741f0db89117e2ec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 18:40:11 +0300 Subject: [PATCH 313/484] Remove garbage --- docs/en/operations/performance_test.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index d955b50fa02..8c93f4e5f19 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -24,7 +24,7 @@ With this instruction you can run basic ClickHouse performance test on any serve # Then do: chmod a+x clickhouse -1. Download configs: +5. Download configs: @@ -34,7 +34,7 @@ With this instruction you can run basic ClickHouse performance test on any serve wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml -1. Download benchmark files: +6. Download benchmark files: @@ -42,7 +42,7 @@ With this instruction you can run basic ClickHouse performance test on any serve chmod a+x benchmark-new.sh wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql -1. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). +7. Download test data according to the [Yandex.Metrica dataset](../getting_started/example_datasets/metrica.md) instruction (“hits” table containing 100 million rows). @@ -50,31 +50,31 @@ With this instruction you can run basic ClickHouse performance test on any serve tar xvf hits_100m_obfuscated_v1.tar.xz -C . mv hits_100m_obfuscated_v1/* . -1. Run the server: +8. Run the server: ./clickhouse server -1. Check the data: ssh to the server in another terminal +9. Check the data: ssh to the server in another terminal ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -1. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +10. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. mcedit benchmark-new.sh -1. Run the benchmark: +11. Run the benchmark: ./benchmark-new.sh hits_100m_obfuscated -1. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com +12. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com -All the results are published here: https://clickhouse.tech/benchmark\_hardware.html +All the results are published here: https://clickhouse.tech/benchmark_hardware.html From 59b5f88099dae933372b7eb439ecb66cd480bc9a Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 11 Apr 2020 18:43:13 +0300 Subject: [PATCH 314/484] Eliminate the rott. --- docs/en/operations/performance_test.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/performance_test.md b/docs/en/operations/performance_test.md index 8c93f4e5f19..6b59ec6fedd 100644 --- a/docs/en/operations/performance_test.md +++ b/docs/en/operations/performance_test.md @@ -63,7 +63,7 @@ With this instruction you can run basic ClickHouse performance test on any serve ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" 100000000 -10. Edit the benchmark-new.sh, change “clickhouse-client” to “./clickhouse client” and add “–max\_memory\_usage 100000000000” parameter. +10. Edit the benchmark-new.sh, change `clickhouse-client` to `./clickhouse client` and add `–-max_memory_usage 100000000000` parameter. From 1526722333d46e75aa3cc4f327bbb4b2b072744c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 18:54:16 +0300 Subject: [PATCH 315/484] Enforce that there is no machine translation to russian #10191 --- utils/check-style/check-style | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 08b0e81c123..2a2e9dab42d 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -56,3 +56,6 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while # Broken XML files (requires libxml2-utils) find $ROOT_PATH/{src,base,programs,utils} -name '*.xml' | xargs xmllint --noout --nonet + +# Machine translation to Russian is strictly prohibited +find $ROOT_PATH/docs/ru -name '*.md' | xargs grep -l -F 'machine_translated: true' From 3d1e5b4bc998d9bf4a62097673368d2d93da9158 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 19:34:24 +0300 Subject: [PATCH 316/484] Changed Slack Link (tnx. lnuynxa) --- website/templates/index/community.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/templates/index/community.html b/website/templates/index/community.html index 47bcbd67218..e230cac8da9 100644 --- a/website/templates/index/community.html +++ b/website/templates/index/community.html @@ -69,7 +69,7 @@
    -
    Date: Sat, 11 Apr 2020 01:23:27 +0300 Subject: [PATCH 317/484] Fix using the current database for access checking when the database isn't specified. --- src/Access/ContextAccess.cpp | 33 ++++++++++++++++++++------------- src/Access/ContextAccess.h | 8 +++++++- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index cf788a0a63e..915593f58f0 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -196,7 +196,7 @@ bool ContextAccess::isClientHostAllowed() const template -bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const +bool ContextAccess::calculateResultAccessAndCheck(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const { auto access = calculateResultAccess(grant_option); bool is_granted = access->isGranted(flags, args...); @@ -267,6 +267,22 @@ bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & fla } +template +bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags) const +{ + return calculateResultAccessAndCheck(log_, flags); +} + +template +bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const std::string_view & database, const Args &... args) const +{ + if (database.empty()) + return calculateResultAccessAndCheck(log_, flags, params.current_database, args...); + else + return calculateResultAccessAndCheck(log_, flags, database, args...); +} + + template bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & element) const { @@ -276,24 +292,15 @@ bool ContextAccess::checkAccessImpl(Poco::Logger * log_, const AccessRightsEleme } else if (element.any_table) { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database); - else - return checkAccessImpl(log_, element.access_flags, element.database); + return checkAccessImpl(log_, element.access_flags, element.database); } else if (element.any_column) { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table); + return checkAccessImpl(log_, element.access_flags, element.database, element.table); } else { - if (element.database.empty()) - return checkAccessImpl(log_, element.access_flags, params.current_database, element.table, element.columns); - else - return checkAccessImpl(log_, element.access_flags, element.database, element.table, element.columns); + return checkAccessImpl(log_, element.access_flags, element.database, element.table, element.columns); } } diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index bee63103793..e0fbf58dbe8 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -130,8 +130,11 @@ private: void setRolesInfo(const std::shared_ptr & roles_info_) const; void setSettingsAndConstraints() const; + template + bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags) const; + template - bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const; + bool checkAccessImpl(Poco::Logger * log_, const AccessFlags & flags, const std::string_view & database, const Args &... args) const; template bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElement & element) const; @@ -139,6 +142,9 @@ private: template bool checkAccessImpl(Poco::Logger * log_, const AccessRightsElements & elements) const; + template + bool calculateResultAccessAndCheck(Poco::Logger * log_, const AccessFlags & flags, const Args &... args) const; + boost::shared_ptr calculateResultAccess(bool grant_option) const; boost::shared_ptr calculateResultAccess(bool grant_option, UInt64 readonly_, bool allow_ddl_, bool allow_introspection_) const; From 1e2206bdd9cf4c025853151a25a247a619a29562 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 11 Apr 2020 20:54:10 +0300 Subject: [PATCH 318/484] Update security_changelog.md --- docs/ru/whats_new/security_changelog.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/ru/whats_new/security_changelog.md b/docs/ru/whats_new/security_changelog.md index c8f66bf0475..9a2dab8ba14 100644 --- a/docs/ru/whats_new/security_changelog.md +++ b/docs/ru/whats_new/security_changelog.md @@ -1,3 +1,5 @@ +# Security Changelog + ## Исправлено в релизе 19.14.3.3, 2019-09-10 {#ispravleno-v-relize-19-14-3-3-2019-09-10} ### CVE-2019-15024 {#cve-2019-15024} From 53199ae546ae20381a0c0c1fea19534364745311 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Apr 2020 22:51:04 +0300 Subject: [PATCH 319/484] Fix various small issues in interactive mode of clickhouse-client #10189 #5908 --- programs/client/Client.cpp | 45 +++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 9cd1332b513..fef89d9df35 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -82,16 +82,8 @@ #endif /// http://en.wikipedia.org/wiki/ANSI_escape_code - -/// Similar codes \e[s, \e[u don't work in VT100 and Mosh. -#define SAVE_CURSOR_POSITION "\033""7" -#define RESTORE_CURSOR_POSITION "\033""8" - #define CLEAR_TO_END_OF_LINE "\033[K" -/// This codes are possibly not supported everywhere. -#define DISABLE_LINE_WRAPPING "\033[?7l" -#define ENABLE_LINE_WRAPPING "\033[?7h" namespace DB { @@ -133,8 +125,6 @@ private: bool stdin_is_a_tty = false; /// stdin is a terminal. bool stdout_is_a_tty = false; /// stdout is a terminal. - uint16_t terminal_width = 0; /// Terminal width is needed to render progress bar. - std::unique_ptr connection; /// Connection to DB. String query_id; /// Current query_id. String query; /// Current query. @@ -1122,11 +1112,16 @@ private: /// to avoid losing sync. if (!cancelled) { - auto cancel_query = [&] { + auto cancel_query = [&] + { connection->sendCancel(); cancelled = true; if (is_interactive) + { + if (written_progress_chars) + clearProgress(); std::cout << "Cancelling query." << std::endl; + } /// Pressing Ctrl+C twice results in shut down. interrupt_listener.unblock(); @@ -1436,7 +1431,7 @@ private: { written_progress_chars = 0; if (!send_logs) - std::cerr << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE; + std::cerr << "\r" CLEAR_TO_END_OF_LINE; } @@ -1461,20 +1456,14 @@ private: "\033[1m↗\033[0m", }; - if (!send_logs) - { - if (written_progress_chars) - message << RESTORE_CURSOR_POSITION CLEAR_TO_END_OF_LINE; - else - message << SAVE_CURSOR_POSITION; - } + auto indicator = indicators[increment % 8]; - message << DISABLE_LINE_WRAPPING; + if (!send_logs && written_progress_chars) + message << '\r'; size_t prefix_size = message.count(); - message << indicators[increment % 8] - << " Progress: "; + message << indicator << " Progress: "; message << formatReadableQuantity(progress.read_rows) << " rows, " @@ -1488,7 +1477,7 @@ private: else message << ". "; - written_progress_chars = message.count() - prefix_size - (increment % 8 == 7 ? 10 : 13); /// Don't count invisible output (escape sequences). + written_progress_chars = message.count() - prefix_size - (strlen(indicator) - 2); /// Don't count invisible output (escape sequences). /// If the approximate number of rows to process is known, we can display a progress bar and percentage. if (progress.total_rows_to_read > 0) @@ -1506,7 +1495,7 @@ private: if (show_progress_bar) { - ssize_t width_of_progress_bar = static_cast(terminal_width) - written_progress_chars - strlen(" 99%"); + ssize_t width_of_progress_bar = static_cast(getTerminalWidth()) - written_progress_chars - strlen(" 99%"); if (width_of_progress_bar > 0) { std::string bar = UnicodeBar::render(UnicodeBar::getWidth(progress.read_rows, 0, total_rows_corrected, width_of_progress_bar)); @@ -1521,7 +1510,8 @@ private: message << ' ' << (99 * progress.read_rows / total_rows_corrected) << '%'; } - message << ENABLE_LINE_WRAPPING; + message << CLEAR_TO_END_OF_LINE; + if (send_logs) message << '\n'; @@ -1589,7 +1579,11 @@ private: resetOutput(); if (is_interactive && !written_first_block) + { + if (written_progress_chars) + clearProgress(); std::cout << "Ok." << std::endl; + } } static void showClientVersion() @@ -1687,6 +1681,7 @@ public: stdin_is_a_tty = isatty(STDIN_FILENO); stdout_is_a_tty = isatty(STDOUT_FILENO); + uint64_t terminal_width = 0; if (stdin_is_a_tty) terminal_width = getTerminalWidth(); From e63fe6da8404f29a26da91fdc12753feaf31996e Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 11 Apr 2020 23:07:11 +0300 Subject: [PATCH 320/484] Update msgpack.cmake --- cmake/find/msgpack.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/find/msgpack.cmake b/cmake/find/msgpack.cmake index 093555bebc0..46344fc162f 100644 --- a/cmake/find/msgpack.cmake +++ b/cmake/find/msgpack.cmake @@ -2,7 +2,7 @@ option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library if (USE_INTERNAL_MSGPACK_LIBRARY) if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/msgpack-c/include/msgpack.hpp") - message(WARNING "submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive") + message(WARNING "Submodule contrib/msgpack-c is missing. To fix try run: \n git submodule update --init --recursive") set(USE_INTERNAL_MSGPACK_LIBRARY 0) set(MISSING_INTERNAL_MSGPACK_LIBRARY 1) endif() From 121bf7b8c2bbfc2f09379afa2232a8b1b8754844 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 00:37:19 +0300 Subject: [PATCH 321/484] Whitespace #9968 --- src/Storages/MergeTree/MergeTreeIndexFullText.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 93553e0619e..e42ac942362 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -627,7 +627,7 @@ bool SplitTokenExtractor::next(const char * data, size_t len, size_t * pos, size // With the help of https://www.strchr.com/strcmp_and_strlen_using_sse_4.2 const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, '\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0'); - // Every bit represents if `haystack` character is in the ranges (1) or not(0) + // Every bit represents if `haystack` character is in the ranges (1) or not (0) const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES)); #else // NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8). From 85448f4b133527e60af4ff56500f9d8fc1181dc7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 12 Apr 2020 00:41:52 +0300 Subject: [PATCH 322/484] Add test from the #2610 (closes: #2610) --- .../01227_distributed_global_in_issue_2610.reference | 3 +++ .../0_stateless/01227_distributed_global_in_issue_2610.sql | 6 ++++++ 2 files changed, 9 insertions(+) create mode 100644 tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference create mode 100644 tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql diff --git a/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference new file mode 100644 index 00000000000..083edaac248 --- /dev/null +++ b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.reference @@ -0,0 +1,3 @@ +2 +2 +2 diff --git a/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql new file mode 100644 index 00000000000..a063e417e3a --- /dev/null +++ b/tests/queries/0_stateless/01227_distributed_global_in_issue_2610.sql @@ -0,0 +1,6 @@ +-- Test from the issue https://github.com/ClickHouse/ClickHouse/issues/2610 +drop table if exists data_01227; +create table data_01227 (key Int) Engine=MergeTree() order by key; +insert into data_01227 select * from numbers(10); +select * from remote('127.1', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); +select * from cluster('test_cluster_two_shards', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); From cf9f00644e2a4365eeb8386deb1d5ecc06aad5ff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 01:13:06 +0300 Subject: [PATCH 323/484] Fix the issue with arrayJoin and PREWHERE optimization #10092 --- src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index fa29494d1c9..749c0d64525 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -202,10 +202,10 @@ void MergeTreeWhereOptimizer::optimize(ASTSelectQuery & select) const prewhere_conditions.splice(prewhere_conditions.end(), where_conditions, cond_it); total_size_of_moved_conditions += cond_it->columns_size; - /// Move all other conditions that depend on the same set of columns. + /// Move all other viable conditions that depend on the same set of columns. for (auto jt = where_conditions.begin(); jt != where_conditions.end();) { - if (jt->columns_size == cond_it->columns_size && jt->identifiers == cond_it->identifiers) + if (jt->viable && jt->columns_size == cond_it->columns_size && jt->identifiers == cond_it->identifiers) prewhere_conditions.splice(prewhere_conditions.end(), where_conditions, jt++); else ++jt; From cf483b7ecc95d9f0cf1bccd11e53ceb0291c11af Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 01:14:55 +0300 Subject: [PATCH 324/484] Added a test --- .../1_stateful/00093_prewhere_array_join.reference | 0 tests/queries/1_stateful/00093_prewhere_array_join.sql | 9 +++++++++ 2 files changed, 9 insertions(+) create mode 100644 tests/queries/1_stateful/00093_prewhere_array_join.reference create mode 100644 tests/queries/1_stateful/00093_prewhere_array_join.sql diff --git a/tests/queries/1_stateful/00093_prewhere_array_join.reference b/tests/queries/1_stateful/00093_prewhere_array_join.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/1_stateful/00093_prewhere_array_join.sql b/tests/queries/1_stateful/00093_prewhere_array_join.sql new file mode 100644 index 00000000000..a1263144bb1 --- /dev/null +++ b/tests/queries/1_stateful/00093_prewhere_array_join.sql @@ -0,0 +1,9 @@ +SELECT arrayJoin([SearchEngineID]) AS search_engine, URL FROM test.hits WHERE SearchEngineID != 0 AND search_engine != 0 FORMAT Null; + +SELECT + arrayJoin([0]) AS browser, + arrayJoin([SearchEngineID]) AS search_engine, + URL +FROM test.hits +WHERE 1 AND (SearchEngineID != 0) AND (browser != 0) AND (search_engine != 0) +FORMAT Null; From a332d8b01ecdd52b06abdd9f630597870a28b9fb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 12 Apr 2020 01:22:10 +0300 Subject: [PATCH 325/484] Cover GLOBAL IN for Distributed over Distributed --- tests/queries/0_stateless/01223_dist_on_dist.reference | 2 ++ tests/queries/0_stateless/01223_dist_on_dist.sql | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/tests/queries/0_stateless/01223_dist_on_dist.reference b/tests/queries/0_stateless/01223_dist_on_dist.reference index 4a5dd8f316c..aca2f070db4 100644 --- a/tests/queries/0_stateless/01223_dist_on_dist.reference +++ b/tests/queries/0_stateless/01223_dist_on_dist.reference @@ -99,3 +99,5 @@ merge() distributed_group_by_no_merge 33 33 +GLOBAL IN +1 diff --git a/tests/queries/0_stateless/01223_dist_on_dist.sql b/tests/queries/0_stateless/01223_dist_on_dist.sql index 1b9175f622e..65a240fd48b 100644 --- a/tests/queries/0_stateless/01223_dist_on_dist.sql +++ b/tests/queries/0_stateless/01223_dist_on_dist.sql @@ -82,6 +82,10 @@ select count() from merge_dist_01223; select 'distributed_group_by_no_merge'; select count() from merge_dist_01223 settings distributed_group_by_no_merge=1; +-- global in +select 'GLOBAL IN'; +select distinct * from dist_01223 where key global in (select toInt32(1)); + drop table merge_dist_01223; drop table dist_01223; drop table dist_layer_01223; From 29189d427604a49b3ce6eb1bd7210efb85f41d55 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 01:33:16 +0300 Subject: [PATCH 326/484] Also add stateless test to illustrate the issue more clear --- .../0_stateless/01115_prewhere_array_join.reference | 0 tests/queries/0_stateless/01115_prewhere_array_join.sql | 7 +++++++ 2 files changed, 7 insertions(+) create mode 100644 tests/queries/0_stateless/01115_prewhere_array_join.reference create mode 100644 tests/queries/0_stateless/01115_prewhere_array_join.sql diff --git a/tests/queries/0_stateless/01115_prewhere_array_join.reference b/tests/queries/0_stateless/01115_prewhere_array_join.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01115_prewhere_array_join.sql b/tests/queries/0_stateless/01115_prewhere_array_join.sql new file mode 100644 index 00000000000..e614bdf402b --- /dev/null +++ b/tests/queries/0_stateless/01115_prewhere_array_join.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS prewhere; + +CREATE TABLE prewhere (light UInt8, heavy String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO prewhere SELECT 0, randomPrintableASCII(10000) FROM numbers(10000); +SELECT arrayJoin([light]) != 0 AS cond, length(heavy) FROM prewhere WHERE light != 0 AND cond != 0; + +DROP TABLE prewhere; From 2adeabd3c77b63b89b17ef00ec1e182c53aeb106 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 01:58:45 +0300 Subject: [PATCH 327/484] Update Settings.h --- src/Core/Settings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 342d9bef58e..8138af31d5f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -406,7 +406,7 @@ struct Settings : public SettingsCollection M(SettingBool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(SettingUInt64, max_parser_depth, 1000, "Maximum parser depth.", 0) \ M(SettingSeconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \ - M(SettingBool, transform_null_in, false, "Enable null verification of the 'IN' operator.", 0) \ + M(SettingBool, transform_null_in, false, "If enabled, NULL values will be matched with 'IN' operator as if they are considered equal.", 0) \ M(SettingBool, allow_nondeterministic_mutations, false, "Allow non-deterministic functions in ALTER UPDATE/ALTER DELETE statements", 0) \ M(SettingSeconds, lock_acquire_timeout, DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC, "How long locking request should wait before failing", 0) \ \ From 194dcc01fb96f5e808d7ba3b22523037b7d2e98a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 12 Apr 2020 00:28:04 +0300 Subject: [PATCH 328/484] Allow literals for GLOBAL IN --- src/Interpreters/GlobalSubqueriesVisitor.h | 15 ++++++++++++++- .../01226_dist_on_dist_global_in.reference | 6 ++++++ .../01226_dist_on_dist_global_in.sql | 10 ++++++++++ .../01224_dist_on_dist_global_in.reference | 4 ---- .../bugs/01224_dist_on_dist_global_in.sql | 18 ------------------ 5 files changed, 30 insertions(+), 23 deletions(-) create mode 100644 tests/queries/0_stateless/01226_dist_on_dist_global_in.reference create mode 100644 tests/queries/0_stateless/01226_dist_on_dist_global_in.sql delete mode 100644 tests/queries/bugs/01224_dist_on_dist_global_in.reference delete mode 100644 tests/queries/bugs/01224_dist_on_dist_global_in.sql diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index e577219629c..78d98805814 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -166,7 +167,19 @@ private: { if (func.name == "globalIn" || func.name == "globalNotIn") { - data.addExternalStorage(func.arguments->children[1]); + ASTPtr & ast = func.arguments->children[1]; + + /// Literal can use regular IN + if (ast->as()) + { + if (func.name == "globalIn") + func.name = "in"; + else + func.name = "notIn"; + return; + } + + data.addExternalStorage(ast); data.has_global_subqueries = true; } } diff --git a/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference b/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference new file mode 100644 index 00000000000..3d8d7fb770d --- /dev/null +++ b/tests/queries/0_stateless/01226_dist_on_dist_global_in.reference @@ -0,0 +1,6 @@ +GLOBAL IN +0 +0 +0 +0 +GLOBAL NOT IN diff --git a/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql b/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql new file mode 100644 index 00000000000..588ea9c1048 --- /dev/null +++ b/tests/queries/0_stateless/01226_dist_on_dist_global_in.sql @@ -0,0 +1,10 @@ +SELECT 'GLOBAL IN'; +select * from remote('localhost', system.one) where dummy global in (0); +select * from remote('localhost', system.one) where toUInt64(dummy) global in numbers(1); +select * from remote('localhost', system.one) where dummy global in system.one; +select * from remote('localhost', system.one) where dummy global in (select 0); +SELECT 'GLOBAL NOT IN'; +select * from remote('localhost', system.one) where dummy global not in (0); +select * from remote('localhost', system.one) where toUInt64(dummy) global not in numbers(1); +select * from remote('localhost', system.one) where dummy global not in system.one; +select * from remote('localhost', system.one) where dummy global not in (select 0); diff --git a/tests/queries/bugs/01224_dist_on_dist_global_in.reference b/tests/queries/bugs/01224_dist_on_dist_global_in.reference deleted file mode 100644 index 7f75aa873cb..00000000000 --- a/tests/queries/bugs/01224_dist_on_dist_global_in.reference +++ /dev/null @@ -1,4 +0,0 @@ -GLOBAL IN distributed_group_by_no_merge -1 -GLOBAL IN -1 diff --git a/tests/queries/bugs/01224_dist_on_dist_global_in.sql b/tests/queries/bugs/01224_dist_on_dist_global_in.sql deleted file mode 100644 index e363fef2d2b..00000000000 --- a/tests/queries/bugs/01224_dist_on_dist_global_in.sql +++ /dev/null @@ -1,18 +0,0 @@ -create table if not exists data_01224 (key Int) Engine=Memory(); -create table if not exists dist_layer_01224 as data_01224 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01224); -create table if not exists dist_01224 as data_01224 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01224); - -select * from dist_01224; -insert into data_01224 select * from numbers(3); - --- "Table expression is undefined, Method: ExpressionAnalyzer::interpretSubquery" -select 'GLOBAL IN distributed_group_by_no_merge'; -select distinct * from dist_01224 where key global in (1) settings distributed_group_by_no_merge=1; - --- requires #9923 -select 'GLOBAL IN'; -select distinct * from dist_01224 where key global in (1); - -drop table dist_01224; -drop table dist_layer_01224; -drop table data_01224; From c5c1a8def75e1b47fd8e77553fdbdf980631ffc9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 03:03:05 +0300 Subject: [PATCH 329/484] Added a test from Andrey #2641 --- .../01116_asof_join_dolbyzerr.reference | 3 +++ .../0_stateless/01116_asof_join_dolbyzerr.sql | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference create mode 100644 tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference new file mode 100644 index 00000000000..1055a67ea5b --- /dev/null +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference @@ -0,0 +1,3 @@ +v1 o1 ['s2','s1'] +v1 o2 ['s4'] +v2 o3 ['s5','s3'] diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql new file mode 100644 index 00000000000..8a94b6ddd24 --- /dev/null +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql @@ -0,0 +1,18 @@ +CREATE TEMPORARY TABLE sessions (date DateTime, visitorId String, sessionId String); +CREATE TEMPORARY TABLE orders (date DateTime, visitorId String, orderId String); + +INSERT INTO sessions VALUES ('2018-01-01 00:00:00', 'v1', 's1'), ('2018-01-02 00:00:00', 'v1', 's2'), ('2018-01-03 00:00:00', 'v2', 's3'), ('2018-01-04 00:00:00', 'v1', 's4'), ('2018-01-05 00:00:00', 'v2', 's5'), ('2018-01-06 00:00:00', 'v3', 's6'); +INSERT INTO orders VALUES ('2018-01-03 00:00:00', 'v1', 'o1'), ('2018-01-05 00:00:00', 'v1', 'o2'), ('2018-01-06 00:00:00', 'v2', 'o3'); + +SELECT + visitorId, + orderId, + groupUniqArray(sessionId) +FROM sessions +ASOF INNER JOIN orders ON (sessions.visitorId = orders.visitorId) AND (sessions.date <= orders.date) +GROUP BY + visitorId, + orderId +ORDER BY + visitorId ASC, + orderId ASC; From dec3e0f9861f7876ee8a9dd3a97b9f88240cd284 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 03:38:25 +0300 Subject: [PATCH 330/484] Make least and greatest functions case insensitive for compatibility with MySQL --- src/Functions/greatest.cpp | 2 +- src/Functions/least.cpp | 2 +- tests/queries/0_stateless/01117_greatest_least_case.reference | 2 ++ tests/queries/0_stateless/01117_greatest_least_case.sql | 2 ++ 4 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/01117_greatest_least_case.reference create mode 100644 tests/queries/0_stateless/01117_greatest_least_case.sql diff --git a/src/Functions/greatest.cpp b/src/Functions/greatest.cpp index 6eb123708a4..9abf85e751b 100644 --- a/src/Functions/greatest.cpp +++ b/src/Functions/greatest.cpp @@ -57,7 +57,7 @@ using FunctionGreatest = FunctionBinaryArithmetic; void registerFunctionGreatest(FunctionFactory & factory) { - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); } } diff --git a/src/Functions/least.cpp b/src/Functions/least.cpp index 47af759c956..f2e7c1f15d2 100644 --- a/src/Functions/least.cpp +++ b/src/Functions/least.cpp @@ -57,7 +57,7 @@ using FunctionLeast = FunctionBinaryArithmetic; void registerFunctionLeast(FunctionFactory & factory) { - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); } } diff --git a/tests/queries/0_stateless/01117_greatest_least_case.reference b/tests/queries/0_stateless/01117_greatest_least_case.reference new file mode 100644 index 00000000000..4bbcfcf5682 --- /dev/null +++ b/tests/queries/0_stateless/01117_greatest_least_case.reference @@ -0,0 +1,2 @@ +2 +-1 diff --git a/tests/queries/0_stateless/01117_greatest_least_case.sql b/tests/queries/0_stateless/01117_greatest_least_case.sql new file mode 100644 index 00000000000..21bfd240f5a --- /dev/null +++ b/tests/queries/0_stateless/01117_greatest_least_case.sql @@ -0,0 +1,2 @@ +SELECT GREATEST(1, 2); +SELECT LEAST(1, -1); From 754967bde6178ef7fd358b0fd7c37b92ce264b94 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 04:24:00 +0300 Subject: [PATCH 331/484] Add function "isConstant" --- src/Functions/isConstant.cpp | 52 +++++++++++++++++++ .../registerFunctionsMiscellaneous.cpp | 2 + .../0_stateless/01118_is_constant.reference | 9 ++++ .../queries/0_stateless/01118_is_constant.sql | 10 ++++ 4 files changed, 73 insertions(+) create mode 100644 src/Functions/isConstant.cpp create mode 100644 tests/queries/0_stateless/01118_is_constant.reference create mode 100644 tests/queries/0_stateless/01118_is_constant.sql diff --git a/src/Functions/isConstant.cpp b/src/Functions/isConstant.cpp new file mode 100644 index 00000000000..705b4eaac78 --- /dev/null +++ b/src/Functions/isConstant.cpp @@ -0,0 +1,52 @@ +#include +#include +#include +#include + + +namespace DB +{ + +/// Returns 1 if and only if the argument is constant expression. +/// This function is exists for development, debugging and demonstration purposes. +class FunctionIsConstant : public IFunction +{ +public: + static constexpr auto name = "isConstant"; + static FunctionPtr create(const Context &) + { + return std::make_shared(); + } + + String getName() const override + { + return name; + } + + bool useDefaultImplementationForNulls() const override { return false; } + + size_t getNumberOfArguments() const override + { + return 1; + } + + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override + { + return std::make_shared(); + } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override + { + const auto & elem = block.getByPosition(arguments[0]); + block.getByPosition(result).column = ColumnUInt8::create(input_rows_count, isColumnConst(*elem.column)); + } +}; + + +void registerFunctionIsConstant(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} + diff --git a/src/Functions/registerFunctionsMiscellaneous.cpp b/src/Functions/registerFunctionsMiscellaneous.cpp index 44e26542c7d..30cab4cc53a 100644 --- a/src/Functions/registerFunctionsMiscellaneous.cpp +++ b/src/Functions/registerFunctionsMiscellaneous.cpp @@ -56,6 +56,7 @@ void registerFunctionBasename(FunctionFactory &); void registerFunctionTransform(FunctionFactory &); void registerFunctionGetMacro(FunctionFactory &); void registerFunctionGetScalar(FunctionFactory &); +void registerFunctionIsConstant(FunctionFactory &); #if USE_ICU void registerFunctionConvertCharset(FunctionFactory &); @@ -114,6 +115,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory) registerFunctionTransform(factory); registerFunctionGetMacro(factory); registerFunctionGetScalar(factory); + registerFunctionIsConstant(factory); #if USE_ICU registerFunctionConvertCharset(factory); diff --git a/tests/queries/0_stateless/01118_is_constant.reference b/tests/queries/0_stateless/01118_is_constant.reference new file mode 100644 index 00000000000..aba2b912a08 --- /dev/null +++ b/tests/queries/0_stateless/01118_is_constant.reference @@ -0,0 +1,9 @@ +1 +1 +0 +1 +1 +--- +0 +0 +--- diff --git a/tests/queries/0_stateless/01118_is_constant.sql b/tests/queries/0_stateless/01118_is_constant.sql new file mode 100644 index 00000000000..5cbff986dd2 --- /dev/null +++ b/tests/queries/0_stateless/01118_is_constant.sql @@ -0,0 +1,10 @@ +select isConstant(1); +select isConstant([1]); +select isConstant(arrayJoin([1])); +SELECT isConstant((SELECT 1)); +SELECT isConstant(x) FROM (SELECT 1 x); +SELECT '---'; +SELECT isConstant(x) FROM (SELECT 1 x UNION ALL SELECT 2); +SELECT '---'; +select isConstant(); -- { serverError 42 } +select isConstant(1, 2); -- { serverError 42 } From 716ddc4580381ca173824f3d5733c898f65b1777 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 04:26:11 +0300 Subject: [PATCH 332/484] Update isConstant.cpp --- src/Functions/isConstant.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/isConstant.cpp b/src/Functions/isConstant.cpp index 705b4eaac78..5416fbd2d3e 100644 --- a/src/Functions/isConstant.cpp +++ b/src/Functions/isConstant.cpp @@ -8,7 +8,7 @@ namespace DB { /// Returns 1 if and only if the argument is constant expression. -/// This function is exists for development, debugging and demonstration purposes. +/// This function exists for development, debugging and demonstration purposes. class FunctionIsConstant : public IFunction { public: From 01bc88a85113cf7b47a2026a06a090ec86c4e230 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 05:05:30 +0300 Subject: [PATCH 333/484] Fix wrong whitespaces in debug output --- src/Interpreters/ColumnNamesContext.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/ColumnNamesContext.cpp b/src/Interpreters/ColumnNamesContext.cpp index 4d23c6f0e8b..380d5f9ebc3 100644 --- a/src/Interpreters/ColumnNamesContext.cpp +++ b/src/Interpreters/ColumnNamesContext.cpp @@ -87,6 +87,7 @@ std::ostream & operator << (std::ostream & os, const ColumnNamesContext & cols) os << "'" << pr.first << "'"; for (auto & alias : pr.second.aliases) os << "/'" << alias << "'"; + os << ", "; } os << " source_tables: "; for (const auto & x : cols.tables) @@ -94,24 +95,24 @@ std::ostream & operator << (std::ostream & os, const ColumnNamesContext & cols) auto alias = x.alias(); auto name = x.name(); if (alias && name) - os << "'" << *alias << "'/'" << *name << "' "; + os << "'" << *alias << "'/'" << *name << "', "; else if (alias) - os << "'" << *alias << "' "; + os << "'" << *alias << "', "; else if (name) - os << "'" << *name << "' "; + os << "'" << *name << "', "; } os << "table_aliases: "; for (const auto & x : cols.table_aliases) - os << "'" << x << "' "; + os << "'" << x << "', "; os << "complex_aliases: "; for (const auto & x : cols.complex_aliases) - os << "'" << x << "' "; + os << "'" << x << "', "; os << "masked_columns: "; for (const auto & x : cols.masked_columns) - os << "'" << x << "' "; + os << "'" << x << "', "; os << "array_join_columns: "; for (const auto & x : cols.array_join_columns) - os << "'" << x << "' "; + os << "'" << x << "', "; return os; } From f8e1f1b69bd565111a7c8b748ef6d96380485ad7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 05:06:58 +0300 Subject: [PATCH 334/484] Fix wrong whitespaces in debug output --- src/Interpreters/ColumnNamesContext.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ColumnNamesContext.cpp b/src/Interpreters/ColumnNamesContext.cpp index 380d5f9ebc3..c8fde183d96 100644 --- a/src/Interpreters/ColumnNamesContext.cpp +++ b/src/Interpreters/ColumnNamesContext.cpp @@ -89,7 +89,7 @@ std::ostream & operator << (std::ostream & os, const ColumnNamesContext & cols) os << "/'" << alias << "'"; os << ", "; } - os << " source_tables: "; + os << "source_tables: "; for (const auto & x : cols.tables) { auto alias = x.alias(); From d43903211aad13b46ca2cf45b036b30a30fb5983 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 05:51:56 +0300 Subject: [PATCH 335/484] Better exception message #9810 --- src/Functions/FunctionsConversion.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d201b967fb1..b493aef4cac 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -2394,10 +2394,17 @@ protected: DataTypePtr getReturnType(const ColumnsWithTypeAndName & arguments) const override { - const auto type_col = checkAndGetColumnConst(arguments.back().column.get()); + const auto & column = arguments.back().column; + if (!column) + throw Exception("Second argument to " + getName() + " must be a constant string describing type." + " Instead there is non-constant column of type " + arguments.back().type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + const auto type_col = checkAndGetColumnConst(column.get()); if (!type_col) - throw Exception("Second argument to " + getName() + " must be a constant string describing type", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception("Second argument to " + getName() + " must be a constant string describing type." + " Instead there is a column with the following structure: " + column->dumpStructure(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); return DataTypeFactory::instance().get(type_col->getValue()); } From d6544159ba78930b268229a810a44581c3e6b035 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 06:18:21 +0300 Subject: [PATCH 336/484] Update HashJoin.cpp --- src/Interpreters/HashJoin.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index d8c0d239c96..16d4932bb14 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -1162,15 +1162,15 @@ DataTypePtr HashJoin::joinGetReturnType(const String & column_name, bool or_null if (!sample_block_with_columns_to_add.has(column_name)) throw Exception("StorageJoin doesn't contain column " + column_name, ErrorCodes::LOGICAL_ERROR); - auto ctn = sample_block_with_columns_to_add.getByName(column_name); + auto elem = sample_block_with_columns_to_add.getByName(column_name); if (or_null) { if (!ctn.type->canBeInsideNullable()) - throw Exception("Type " + ctn.type->getName() + "cannot be inside Nullable", ErrorCodes::LOGICAL_ERROR); + throw Exception("Type " + elem.type->getName() + " cannot be inside Nullable", ErrorCodes::LOGICAL_ERROR); else - ctn.type = makeNullable(ctn.type); + elem.type = makeNullable(elem.type); } - return ctn.type; + return elem.type; } From a2418f94df9c58e8eba9704215d1f04e299b919f Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 06:19:13 +0300 Subject: [PATCH 337/484] Update HashJoin.cpp --- src/Interpreters/HashJoin.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 16d4932bb14..5845fd131d2 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -1165,7 +1165,7 @@ DataTypePtr HashJoin::joinGetReturnType(const String & column_name, bool or_null auto elem = sample_block_with_columns_to_add.getByName(column_name); if (or_null) { - if (!ctn.type->canBeInsideNullable()) + if (!elem.type->canBeInsideNullable()) throw Exception("Type " + elem.type->getName() + " cannot be inside Nullable", ErrorCodes::LOGICAL_ERROR); else elem.type = makeNullable(elem.type); @@ -1194,15 +1194,15 @@ void HashJoin::joinGet(Block & block, const String & column_name, bool or_null) checkTypeOfKey(block, right_table_keys); - auto ctn = sample_block_with_columns_to_add.getByName(column_name); + auto elem = sample_block_with_columns_to_add.getByName(column_name); if (or_null) - ctn.type = makeNullable(ctn.type); - ctn.column = ctn.type->createColumn(); + elem.type = makeNullable(elem.type); + elem.column = elem.type->createColumn(); if ((strictness == ASTTableJoin::Strictness::Any || strictness == ASTTableJoin::Strictness::RightAny) && kind == ASTTableJoin::Kind::Left) { - joinGetImpl(block, {ctn}, std::get(data->maps)); + joinGetImpl(block, {elem}, std::get(data->maps)); } else throw Exception("joinGet only supports StorageJoin of type Left Any", ErrorCodes::LOGICAL_ERROR); From ca5172cc63fd8ec0c6e28e5c511f2bba86991b3b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 06:25:47 +0300 Subject: [PATCH 338/484] Merging #10094 --- src/Interpreters/HashJoin.cpp | 7 +------ .../0_stateless/01240_join_get_or_null.reference | 1 + tests/queries/0_stateless/01240_join_get_or_null.sql | 10 ++++++++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 5845fd131d2..b8da03acb8b 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -1164,12 +1164,7 @@ DataTypePtr HashJoin::joinGetReturnType(const String & column_name, bool or_null throw Exception("StorageJoin doesn't contain column " + column_name, ErrorCodes::LOGICAL_ERROR); auto elem = sample_block_with_columns_to_add.getByName(column_name); if (or_null) - { - if (!elem.type->canBeInsideNullable()) - throw Exception("Type " + elem.type->getName() + " cannot be inside Nullable", ErrorCodes::LOGICAL_ERROR); - else - elem.type = makeNullable(elem.type); - } + elem.type = makeNullable(elem.type); return elem.type; } diff --git a/tests/queries/0_stateless/01240_join_get_or_null.reference b/tests/queries/0_stateless/01240_join_get_or_null.reference index dec7d2fabd2..96e34d5a44c 100644 --- a/tests/queries/0_stateless/01240_join_get_or_null.reference +++ b/tests/queries/0_stateless/01240_join_get_or_null.reference @@ -1 +1,2 @@ \N +\N diff --git a/tests/queries/0_stateless/01240_join_get_or_null.sql b/tests/queries/0_stateless/01240_join_get_or_null.sql index d1b9a07540a..48fd8228b55 100644 --- a/tests/queries/0_stateless/01240_join_get_or_null.sql +++ b/tests/queries/0_stateless/01240_join_get_or_null.sql @@ -1,7 +1,13 @@ DROP TABLE IF EXISTS join_test; CREATE TABLE join_test (id UInt16, num UInt16) engine = Join(ANY, LEFT, id); - SELECT joinGetOrNull('join_test', 'num', 500); - +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Nullable(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Array(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); -- { serverError 43 } DROP TABLE join_test; From e7f399f6527841b4352dd1e480bac3a15587fef6 Mon Sep 17 00:00:00 2001 From: BohuTANG Date: Sun, 12 Apr 2020 13:37:39 +0800 Subject: [PATCH 339/484] MySQLHandler: max_allowed_packet returned by default when server setup with select variables --- programs/server/MySQLHandler.cpp | 6 ++++-- .../test_mysql_protocol/clients/java/0.reference | 4 ++-- .../integration/test_mysql_protocol/clients/java/Test.java | 7 ++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/programs/server/MySQLHandler.cpp b/programs/server/MySQLHandler.cpp index b72aa8104d3..bfab19061ce 100644 --- a/programs/server/MySQLHandler.cpp +++ b/programs/server/MySQLHandler.cpp @@ -284,15 +284,17 @@ void MySQLHandler::comQuery(ReadBuffer & payload) } else { - String replacement_query = "select ''"; + String replacement_query = "SELECT ''"; bool should_replace = false; bool with_output = false; // Translate query from MySQL to ClickHouse. - // This is a temporary workaround until ClickHouse supports the syntax "@@var_name". + // Required parameters when setup: + // * max_allowed_packet, default 64MB, https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet if (isFederatedServerSetupSelectVarCommand(query)) { should_replace = true; + replacement_query = "SELECT 67108864 AS max_allowed_packet"; } // This is a workaround in order to support adding ClickHouse to MySQL using federated server. diff --git a/tests/integration/test_mysql_protocol/clients/java/0.reference b/tests/integration/test_mysql_protocol/clients/java/0.reference index bcf9e3dde94..3e3e20d1ebb 100644 --- a/tests/integration/test_mysql_protocol/clients/java/0.reference +++ b/tests/integration/test_mysql_protocol/clients/java/0.reference @@ -1,5 +1,5 @@ -33jdbc -44ck +33jdbcnull +44cknull 0 1 2 diff --git a/tests/integration/test_mysql_protocol/clients/java/Test.java b/tests/integration/test_mysql_protocol/clients/java/Test.java index 50ce824f67c..89659529679 100644 --- a/tests/integration/test_mysql_protocol/clients/java/Test.java +++ b/tests/integration/test_mysql_protocol/clients/java/Test.java @@ -5,8 +5,8 @@ import java.sql.SQLException; import java.sql.Statement; class JavaConnectorTest { - private static final String CREATE_TABLE_SQL = "CREATE TABLE IF NOT EXISTS default.test1 (age Int32, name String) Engine = Memory"; - private static final String INSERT_SQL = "INSERT INTO default.test1 VALUES(33, 'jdbc'),(44, 'ck')"; + private static final String CREATE_TABLE_SQL = "CREATE TABLE IF NOT EXISTS default.test1 (`age` Int32, `name` String, `int_nullable` Nullable(Int32)) Engine = Memory"; + private static final String INSERT_SQL = "INSERT INTO default.test1(`age`, `name`) VALUES(33, 'jdbc'),(44, 'ck')"; private static final String SELECT_SQL = "SELECT * FROM default.test1"; private static final String SELECT_NUMBER_SQL = "SELECT * FROM system.numbers LIMIT 13"; private static final String DROP_TABLE_SQL = "DROP TABLE default.test1"; @@ -41,7 +41,7 @@ class JavaConnectorTest { } } - String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?maxAllowedPacket=67108864&useSSL=false", host, port, database); + String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s", host, port, database); Connection conn = null; Statement stmt = null; @@ -55,6 +55,7 @@ class JavaConnectorTest { while (rs.next()) { System.out.print(rs.getString("age")); System.out.print(rs.getString("name")); + System.out.print(rs.getString("int_nullable")); System.out.println(); } From 365b5207b7d02384f4c3e28402c4b3d748f443df Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 5 Apr 2020 00:07:00 +0300 Subject: [PATCH 340/484] Add log_queries_min_type to filter which entries will be written to query_log Can be used to write into query_log only failed queries (i.e. on memory exceeded error), by using: set log_queries_min_type='EXCEPTION_WHILE_PROCESSING' --- docs/en/operations/settings/settings.md | 18 +++++++++++++++++ src/Core/Settings.h | 2 +- src/Core/SettingsCollection.cpp | 7 +++++++ src/Core/SettingsCollection.h | 10 ++++++++++ src/Interpreters/QueryLog.h | 9 ++------- src/Interpreters/executeQuery.cpp | 20 +++++++++---------- .../01231_log_queries_min_type.reference | 5 +++++ .../01231_log_queries_min_type.sql | 15 ++++++++++++++ 8 files changed, 68 insertions(+), 18 deletions(-) create mode 100644 tests/queries/0_stateless/01231_log_queries_min_type.reference create mode 100644 tests/queries/0_stateless/01231_log_queries_min_type.sql diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 69c444ebaef..37b4c713f91 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -507,6 +507,24 @@ Example: log_queries=1 ``` +## log\_queries\_min\_type {#settings-log-queries-min-type} + +`query_log` minimal type to log. + +Possible values: +- `QUERY_START` (`=1`) +- `QUERY_FINISH` (`=2`) +- `EXCEPTION_BEFORE_START` (`=3`) +- `EXCEPTION_WHILE_PROCESSING` (`=4`) + +Default value: `QUERY_START`. + +Can be used to limit which entiries will goes to `query_log`, say you are interesting only in errors, then you can use `EXCEPTION_WHILE_PROCESSING`: + +``` text +log_queries_min_type='EXCEPTION_WHILE_PROCESSING' +``` + ## log\_query\_threads {#settings-log-query-threads} Setting up query threads logging. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8138af31d5f..725171d4a1b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -149,7 +149,7 @@ struct Settings : public SettingsCollection M(SettingInt64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \ \ M(SettingBool, log_queries, 0, "Log requests and write the log to the system table.", 0) \ - \ + M(SettingLogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "query_log minimal type to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \ M(SettingUInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \ \ M(SettingDistributedProductMode, distributed_product_mode, DistributedProductMode::DENY, "How are distributed subqueries performed inside IN or JOIN sections?", IMPORTANT) \ diff --git a/src/Core/SettingsCollection.cpp b/src/Core/SettingsCollection.cpp index 6d879b27181..238ac1c3c62 100644 --- a/src/Core/SettingsCollection.cpp +++ b/src/Core/SettingsCollection.cpp @@ -542,6 +542,13 @@ IMPLEMENT_SETTING_ENUM(FormatSettings::DateTimeInputFormat, DATE_TIME_INPUT_FORM M(trace, "trace") IMPLEMENT_SETTING_ENUM(LogsLevel, LOGS_LEVEL_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) +#define LOG_QUERIES_TYPE_LIST_OF_NAMES(M) \ + M(QUERY_START, "QUERY_START") \ + M(QUERY_FINISH, "QUERY_FINISH") \ + M(EXCEPTION_BEFORE_START, "EXCEPTION_BEFORE_START") \ + M(EXCEPTION_WHILE_PROCESSING, "EXCEPTION_WHILE_PROCESSING") +IMPLEMENT_SETTING_ENUM(QueryLogElementType, LOG_QUERIES_TYPE_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) + namespace details { diff --git a/src/Core/SettingsCollection.h b/src/Core/SettingsCollection.h index da21412b7c1..d93772e86ed 100644 --- a/src/Core/SettingsCollection.h +++ b/src/Core/SettingsCollection.h @@ -298,6 +298,16 @@ enum class LogsLevel }; using SettingLogsLevel = SettingEnum; +// Make it signed for compatibility with DataTypeEnum8 +enum QueryLogElementType : int8_t +{ + QUERY_START = 1, + QUERY_FINISH = 2, + EXCEPTION_BEFORE_START = 3, + EXCEPTION_WHILE_PROCESSING = 4, +}; +using SettingLogQueriesType = SettingEnum; + enum class SettingsBinaryFormat { diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index 836b37095e9..ec14f5e97fb 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace ProfileEvents @@ -22,13 +23,7 @@ namespace DB /// A struct which will be inserted as row into query_log table struct QueryLogElement { - enum Type : int8_t // Make it signed for compatibility with DataTypeEnum8 - { - QUERY_START = 1, - QUERY_FINISH = 2, - EXCEPTION_BEFORE_START = 3, - EXCEPTION_WHILE_PROCESSING = 4, - }; + using Type = QueryLogElementType; Type type = QUERY_START; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index c9c66832f08..68bebb83619 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -157,7 +157,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Log the start of query execution into the table if necessary. QueryLogElement elem; - elem.type = QueryLogElement::EXCEPTION_BEFORE_START; + elem.type = QueryLogElementType::EXCEPTION_BEFORE_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -175,7 +175,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Update performance counters before logging to query_log CurrentThread::finalizePerformanceCounters(); - if (settings.log_queries) + if (settings.log_queries && elem.type >= settings.log_queries_min_type) if (auto query_log = context.getQueryLog()) query_log->add(elem); } @@ -400,7 +400,7 @@ static std::tuple executeQueryImpl( { QueryLogElement elem; - elem.type = QueryLogElement::QUERY_START; + elem.type = QueryLogElementType::QUERY_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -412,7 +412,7 @@ static std::tuple executeQueryImpl( bool log_queries = settings.log_queries && !internal; /// Log into system table start of query execution, if need. - if (log_queries) + if (log_queries && elem.type >= settings.log_queries_min_type) { if (settings.log_query_settings) elem.query_settings = std::make_shared(context.getSettingsRef()); @@ -422,7 +422,7 @@ static std::tuple executeQueryImpl( } /// Also make possible for caller to log successful query finish and exception during execution. - auto finish_callback = [elem, &context, log_queries] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable + auto finish_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable { QueryStatus * process_list_elem = context.getProcessListElement(); @@ -436,7 +436,7 @@ static std::tuple executeQueryImpl( double elapsed_seconds = info.elapsed_seconds; - elem.type = QueryLogElement::QUERY_FINISH; + elem.type = QueryLogElementType::QUERY_FINISH; elem.event_time = time(nullptr); elem.query_duration_ms = elapsed_seconds * 1000; @@ -484,19 +484,19 @@ static std::tuple executeQueryImpl( elem.thread_ids = std::move(info.thread_ids); elem.profile_counters = std::move(info.profile_counters); - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); } }; - auto exception_callback = [elem, &context, log_queries, quota(quota)] () mutable + auto exception_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type, quota(quota)] () mutable { if (quota) quota->used(Quota::ERRORS, 1, /* check_exceeded = */ false); - elem.type = QueryLogElement::EXCEPTION_WHILE_PROCESSING; + elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING; elem.event_time = time(nullptr); elem.query_duration_ms = 1000 * (elem.event_time - elem.query_start_time); @@ -529,7 +529,7 @@ static std::tuple executeQueryImpl( logException(context, elem); /// In case of exception we log internal queries also - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.reference b/tests/queries/0_stateless/01231_log_queries_min_type.reference new file mode 100644 index 00000000000..a358d022033 --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.reference @@ -0,0 +1,5 @@ +01231_log_queries_min_type/QUERY_START +2 +01231_log_queries_min_type/EXCEPTION_BEFORE_START +2 +3 diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql new file mode 100644 index 00000000000..f2229c94a8a --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -0,0 +1,15 @@ +set log_queries=1; + +select '01231_log_queries_min_type/QUERY_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_BEFORE_START'; +select '01231_log_queries_min_type/EXCEPTION_BEFORE_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; +select '01231_log_queries_min_type/', max(number) from system.numbers limit 1e6 settings max_rows_to_read='100K'; -- { serverError 158; } +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; From 676964de658b21e56a49408d708dcad689c14616 Mon Sep 17 00:00:00 2001 From: Maroun Maroun Date: Sun, 12 Apr 2020 12:26:06 +0300 Subject: [PATCH 341/484] Fix typo in the getting started tutorial: "it's" -> "its" (#10201) --- docs/en/getting_started/tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting_started/tutorial.md b/docs/en/getting_started/tutorial.md index 08cca45d21d..9763f814d59 100644 --- a/docs/en/getting_started/tutorial.md +++ b/docs/en/getting_started/tutorial.md @@ -108,7 +108,7 @@ Syntax for creating tables is way more complicated compared to databases (see [r 1. Name of table to create. 2. Table schema, i.e. list of columns and their [data types](../sql_reference/data_types/index.md). -3. [Table engine](../engines/table_engines/index.md) and it’s settings, which determines all the details on how queries to this table will be physically executed. +3. [Table engine](../engines/table_engines/index.md) and its settings, which determines all the details on how queries to this table will be physically executed. Yandex.Metrica is a web analytics service, and sample dataset doesn’t cover its full functionality, so there are only two tables to create: From f5c463e9adccea74507720c71f0456d44c4a54d6 Mon Sep 17 00:00:00 2001 From: BohuTANG Date: Sun, 12 Apr 2020 21:28:36 +0800 Subject: [PATCH 342/484] Fix path and typo in the tests.md --- docs/en/development/tests.md | 12 ++++++------ docs/es/development/tests.md | 6 +++--- docs/fa/development/tests.md | 6 +++--- docs/fr/development/tests.md | 6 +++--- docs/ja/development/tests.md | 6 +++--- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 02620b92367..45adb221b5b 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -15,7 +15,7 @@ Tests are located in `queries` directory. There are two subdirectories: `statele Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. -To run all tests, use `testskhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. +To run all tests, use `clickhouse-test` tool. Look `--help` for the list of possible options. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. The most simple way to invoke functional tests is to copy `clickhouse-client` to `/usr/bin/`, run `clickhouse-server` and then run `./clickhouse-test` from its own directory. @@ -34,13 +34,13 @@ disable these groups of tests using `--no-zookeeper`, `--no-shard` and ## Known Bugs {#known-bugs} -If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `queries/bugs` directory. These tests will be moved to `teststests_stateless` when bugs are fixed. +If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `tests/queries/bugs` directory. These tests will be moved to `tests/queries/0_stateless` when bugs are fixed. ## Integration Tests {#integration-tests} Integration tests allow to test ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software. -See `testsgration/README.md` on how to run these tests. +See `tests/integration/README.md` on how to run these tests. Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don’t have integration tests with our JDBC and ODBC drivers. @@ -54,7 +54,7 @@ It’s not necessarily to have unit tests if the code is already covered by func Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Tests are located at `tests/performance`. Each test is represented by `.xml` file with description of test case. Tests are run with `clickhouse performance-test` tool (that is embedded in `clickhouse` binary). See `--help` for invocation. -Each test run one or miltiple queries (possibly with combinations of parameters) in a loop with some conditions for stop (like “maximum execution speed is not changing in three seconds”) and measure some metrics about query performance (like “maximum execution speed”). Some tests can contain preconditions on preloaded test dataset. +Each test run one or multiple queries (possibly with combinations of parameters) in a loop with some conditions for stop (like “maximum execution speed is not changing in three seconds”) and measure some metrics about query performance (like “maximum execution speed”). Some tests can contain preconditions on preloaded test dataset. If you want to improve performance of ClickHouse in some scenario, and if improvements can be observed on simple queries, it is highly recommended to write a performance test. It always makes sense to use `perf top` or other perf tools during your tests. @@ -64,13 +64,13 @@ Some programs in `tests` directory are not prepared tests, but are test tools. F You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated. -## Miscellanous Tests {#miscellanous-tests} +## Miscellaneous Tests {#miscellaneous-tests} There are tests for external dictionaries located at `tests/external_dictionaries` and for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests. There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not. -Quorum test was written by separate team before ClickHouse was open-sourced. This team no longer work with ClickHouse. Test was accidentially written in Java. For these reasons, quorum test must be rewritten and moved to integration tests. +Quorum test was written by separate team before ClickHouse was open-sourced. This team no longer work with ClickHouse. Test was accidentally written in Java. For these reasons, quorum test must be rewritten and moved to integration tests. ## Manual Testing {#manual-testing} diff --git a/docs/es/development/tests.md b/docs/es/development/tests.md index 388931e9436..12afbb68f2e 100644 --- a/docs/es/development/tests.md +++ b/docs/es/development/tests.md @@ -17,7 +17,7 @@ Las pruebas se encuentran en `queries` directorio. Hay dos subdirectorios: `stat Cada prueba puede ser de dos tipos: `.sql` y `.sh`. `.sql` test es el script SQL simple que se canaliza a `clickhouse-client --multiquery --testmode`. `.sh` test es un script que se ejecuta por sí mismo. -Para ejecutar todas las pruebas, use `testskhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. +Para ejecutar todas las pruebas, use `clickhouse-test` herramienta. Mira `--help` para la lista de posibles opciones. Simplemente puede ejecutar todas las pruebas o ejecutar un subconjunto de pruebas filtradas por subcadena en el nombre de la prueba: `./clickhouse-test substring`. La forma más sencilla de invocar pruebas funcionales es copiar `clickhouse-client` a `/usr/bin/`, ejecutar `clickhouse-server` y luego ejecutar `./clickhouse-test` de su propio directorio. @@ -36,13 +36,13 @@ deshabilitar estos grupos de pruebas utilizando `--no-zookeeper`, `--no-shard` y ## Bugs conocidos {#known-bugs} -Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `queries/bugs` directorio. Estas pruebas se moverán a `teststests_stateless` cuando se corrigen errores. +Si conocemos algunos errores que se pueden reproducir fácilmente mediante pruebas funcionales, colocamos pruebas funcionales preparadas en `tests/queries/bugs` directorio. Estas pruebas se moverán a `tests/queries/0_stateless` cuando se corrigen errores. ## Pruebas de integración {#integration-tests} Las pruebas de integración permiten probar ClickHouse en la configuración agrupada y la interacción de ClickHouse con otros servidores como MySQL, Postgres, MongoDB. Son útiles para emular divisiones de red, caídas de paquetes, etc. Estas pruebas se ejecutan bajo Docker y crean múltiples contenedores con varios software. -Ver `testsgration/README.md` sobre cómo ejecutar estas pruebas. +Ver `tests/integration/README.md` sobre cómo ejecutar estas pruebas. Tenga en cuenta que la integración de ClickHouse con controladores de terceros no se ha probado. Además, actualmente no tenemos pruebas de integración con nuestros controladores JDBC y ODBC. diff --git a/docs/fa/development/tests.md b/docs/fa/development/tests.md index 874ac3063b9..922bc43fd46 100644 --- a/docs/fa/development/tests.md +++ b/docs/fa/development/tests.md @@ -18,7 +18,7 @@ toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633 هر تست می تواند یکی از دو نوع باشد: `.sql` و `.sh`. `.sql` تست اسکریپت ساده مربع است که به لوله کشی است `clickhouse-client --multiquery --testmode`. `.sh` تست یک اسکریپت است که به خودی خود اجرا است. -برای اجرای تمام تست ها استفاده کنید `testskhouse-test` ابزار. نگاه کن `--help` برای لیستی از گزینه های ممکن. شما به سادگی می توانید تمام تست ها را اجرا کنید یا زیر مجموعه ای از تست های فیلتر شده توسط زیر رشته را در نام تست اجرا کنید: `./clickhouse-test substring`. +برای اجرای تمام تست ها استفاده کنید `clickhouse-test` ابزار. نگاه کن `--help` برای لیستی از گزینه های ممکن. شما به سادگی می توانید تمام تست ها را اجرا کنید یا زیر مجموعه ای از تست های فیلتر شده توسط زیر رشته را در نام تست اجرا کنید: `./clickhouse-test substring`. ساده ترین راه برای فراخوانی تست های کاربردی کپی است `clickhouse-client` به `/usr/bin/` فرار کن `clickhouse-server` و سپس اجرا کنید `./clickhouse-test` از دایرکتوری خود را. @@ -37,13 +37,13 @@ toc_title: "\u0646\u062D\u0648\u0647 \u0627\u062C\u0631\u0627\u06CC \u062A\u0633 ## اشکالات شناخته شده {#known-bugs} -اگر ما می دانیم برخی از اشکالات است که می تواند به راحتی توسط تست های کاربردی تکثیر, ما تست های عملکردی تهیه شده در `queries/bugs` فهرست راهنما. این تست خواهد شد به نقل مکان کرد `teststests_stateless` هنگامی که اشکالات ثابت هستند. +اگر ما می دانیم برخی از اشکالات است که می تواند به راحتی توسط تست های کاربردی تکثیر, ما تست های عملکردی تهیه شده در `tests/queries/bugs` فهرست راهنما. این تست خواهد شد به نقل مکان کرد `tests/queries/0_stateless` هنگامی که اشکالات ثابت هستند. ## تست های ادغام {#integration-tests} ادغام آزمون اجازه می دهد برای تست clickhouse در خوشه پیکربندی و clickhouse تعامل با سرور های دیگر مانند mysql, postgres, mongodb. مفید برای تقلید انشعابات شبکه قطره بسته و غیره هستند. این تست ها تحت کارگر بارانداز اجرا و ایجاد ظروف متعدد با نرم افزار های مختلف. -ببینید `testsgration/README.md` در مورد چگونگی اجرای این تست. +ببینید `tests/integration/README.md` در مورد چگونگی اجرای این تست. توجه داشته باشید که ادغام کلیک با رانندگان شخص ثالث تست نشده است. همچنین ما در حال حاضر تست های یکپارچه سازی با رانندگان جی بی سی و بی سی ما ندارد. diff --git a/docs/fr/development/tests.md b/docs/fr/development/tests.md index e5c8a50fa31..6637e9546fe 100644 --- a/docs/fr/development/tests.md +++ b/docs/fr/development/tests.md @@ -17,7 +17,7 @@ Les Tests sont situés dans `queries` répertoire. Il y a deux sous-répertoires Chaque test peut être de deux types: `.sql` et `.sh`. `.sql` test est le script SQL simple qui est canalisé vers `clickhouse-client --multiquery --testmode`. `.sh` test est un script qui est exécuté par lui-même. -Pour exécuter tous les tests, utilisez `testskhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. +Pour exécuter tous les tests, utilisez `clickhouse-test` outil. Regarder `--help` pour la liste des options possibles. Vous pouvez simplement exécuter tous les tests ou exécuter un sous ensemble de tests filtrés par sous chaîne dans le nom du test: `./clickhouse-test substring`. Le moyen le plus simple d'invoquer des tests fonctionnels est de copier `clickhouse-client` de `/usr/bin/`, exécuter `clickhouse-server` et puis exécutez `./clickhouse-test` à partir de son propre répertoire. @@ -36,13 +36,13 @@ désactivez ces groupes de tests en utilisant `--no-zookeeper`, `--no-shard` et ## Bugs connus {#known-bugs} -Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `queries/bugs` répertoire. Ces tests seront déplacés à `teststests_stateless` quand les bugs sont corrigés. +Si nous connaissons des bugs qui peuvent être facilement reproduits par des tests fonctionnels, nous plaçons des tests fonctionnels préparés dans `tests/queries/bugs` répertoire. Ces tests seront déplacés à `tests/queries/0_stateless` quand les bugs sont corrigés. ## Les Tests D'Intégration {#integration-tests} Les tests d'intégration permettent de tester ClickHouse en configuration cluster et clickhouse interaction avec D'autres serveurs comme MySQL, Postgres, MongoDB. Ils sont utiles pour émuler les splits réseau, les chutes de paquets, etc. Ces tests sont exécutés sous Docker et créent plusieurs conteneurs avec divers logiciels. -Voir `testsgration/README.md` sur la façon d'exécuter ces tests. +Voir `tests/integration/README.md` sur la façon d'exécuter ces tests. Notez que l'intégration de ClickHouse avec des pilotes tiers n'est pas testée. De plus, nous n'avons actuellement pas de tests d'intégration avec nos pilotes JDBC et ODBC. diff --git a/docs/ja/development/tests.md b/docs/ja/development/tests.md index 27b8870461e..b6c5abea621 100644 --- a/docs/ja/development/tests.md +++ b/docs/ja/development/tests.md @@ -17,7 +17,7 @@ toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6C それぞれの試験できるの種類: `.sql` と `.sh`. `.sql` testは、パイプ処理される単純なSQLスクリプトです `clickhouse-client --multiquery --testmode`. `.sh` テストは、単独で実行されるスクリプトです。 -すべてのテストを実行するには、 `testskhouse-test` ツール。 見て! `--help` 可能なオプションのリストについて。 できるだけ実行すべての試験または実行のサブセットの試験フィルター部分文字列の試験名: `./clickhouse-test substring`. +すべてのテストを実行するには、 `clickhouse-test` ツール。 見て! `--help` 可能なオプションのリストについて。 できるだけ実行すべての試験または実行のサブセットの試験フィルター部分文字列の試験名: `./clickhouse-test substring`. 機能テストを呼び出す最も簡単な方法は、コピーすることです `clickhouse-client` に `/usr/bin/`、実行 `clickhouse-server` そして、実行 `./clickhouse-test` 独自のディレクトリから。 @@ -36,13 +36,13 @@ toc_title: "ClickHouse\u30C6\u30B9\u30C8\u3092\u5B9F\u884C\u3059\u308B\u65B9\u6C ## 既知のバグ {#known-bugs} -機能テストで簡単に再現できるいくつかのバグを知っていれば、準備された機能テストを `queries/bugs` ディレクトリ。 これらのテストはに移動されます `teststests_stateless` バグが修正されたとき。 +機能テストで簡単に再現できるいくつかのバグを知っていれば、準備された機能テストを `tests/queries/bugs` ディレクトリ。 これらのテストはに移動されます `tests/queries/0_stateless` バグが修正されたとき。 ## 統合テスト {#integration-tests} 統合テストでは、クラスター化された設定でclickhouseをテストし、mysql、postgres、mongodbのような他のサーバーとのclickhouseの相互作用を可能にします。 それらはネットワークの割れ目、包みの低下、等を競争して有用である。 これらの試験する方向に作用しdockerを複数の容器を様々なソフトウェアです。 -見る `testsgration/README.md` これらのテストを実行する方法について。 +見る `tests/integration/README.md` これらのテストを実行する方法について。 ClickHouseとサードパーティドライバの統合はテストされていません。 また、現在、JDBCおよびODBCドライバとの統合テストはありません。 From eaba5c6c73f06c96856743e50dd9a47694418e7f Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev Date: Sat, 11 Apr 2020 13:57:13 +0300 Subject: [PATCH 343/484] Remove mutable defaults from helpers/cluster.py --- tests/integration/helpers/cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 5dc93cb338a..717fab11449 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -139,12 +139,12 @@ class ClickHouseCluster: cmd += " client" return cmd - def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, + def add_instance(self, name, config_dir=None, main_configs=None, user_configs=None, macros=None, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False, with_redis=False, with_minio=False, - hostname=None, env_variables={}, image="yandex/clickhouse-integration-test", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]): + hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", + stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None): """Add an instance to the cluster. name - the name of the instance directory and the value of the 'instance' macro in ClickHouse. @@ -161,13 +161,14 @@ class ClickHouseCluster: raise Exception("Can\'t add instance `%s': there is already an instance with the same name!" % name) instance = ClickHouseInstance( - self, self.base_dir, name, config_dir, main_configs, user_configs, macros, with_zookeeper, + self, self.base_dir, name, config_dir, main_configs or [], user_configs or [], macros or {}, + with_zookeeper, self.zookeeper_config_path, with_mysql, with_kafka, with_mongo, with_redis, with_minio, self.base_configs_dir, self.server_bin_path, self.odbc_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers, hostname=hostname, - env_variables=env_variables, image=image, stay_alive=stay_alive, ipv4_address=ipv4_address, + env_variables=env_variables or {}, image=image, stay_alive=stay_alive, ipv4_address=ipv4_address, ipv6_address=ipv6_address, - with_installed_binary=with_installed_binary, tmpfs=tmpfs) + with_installed_binary=with_installed_binary, tmpfs=tmpfs or []) self.instances[name] = instance if ipv4_address is not None or ipv6_address is not None: @@ -580,17 +581,17 @@ class ClickHouseInstance: self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_mongo, with_redis, with_minio, base_configs_dir, server_bin_path, odbc_bridge_bin_path, - clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables={}, + clickhouse_path_dir, with_odbc_drivers, hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=[]): + stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None): self.name = name - self.base_cmd = cluster.base_cmd[:] + self.base_cmd = cluster.base_cmd self.docker_id = cluster.get_instance_docker_id(self.name) self.cluster = cluster self.hostname = hostname if hostname is not None else self.name - self.tmpfs = tmpfs[:] + self.tmpfs = tmpfs or [] self.custom_config_dir = p.abspath(p.join(base_path, custom_config_dir)) if custom_config_dir else None self.custom_main_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_main_configs] self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs] @@ -611,7 +612,7 @@ class ClickHouseInstance: self.path = p.join(self.cluster.instances_dir, name) self.docker_compose_path = p.join(self.path, 'docker_compose.yml') - self.env_variables = env_variables + self.env_variables = env_variables or {} if with_odbc_drivers: self.odbc_ini_path = os.path.dirname(self.docker_compose_path) + "/odbc.ini:/etc/odbc.ini" self.with_mysql = True @@ -1041,4 +1042,4 @@ class ClickHouseKiller(object): self.clickhouse_node.kill_clickhouse() def __exit__(self, exc_type, exc_val, exc_tb): - self.clickhouse_node.restore_clickhouse() \ No newline at end of file + self.clickhouse_node.restore_clickhouse() From 4da19d122d7532ff4a68cf39ed2147029ef5ace3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sun, 12 Apr 2020 18:02:17 +0300 Subject: [PATCH 344/484] Added IProcessor::onUpdatePorts --- src/Processors/Executors/PipelineExecutor.cpp | 2 ++ src/Processors/IProcessor.h | 4 ++++ src/Processors/Sources/SourceFromInputStream.h | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index f2d2477991e..78229e4d379 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -263,6 +263,8 @@ bool PipelineExecutor::tryAddProcessorToStackIfUpdated(Edge & edge, Queue & queu node.status = ExecStatus::Preparing; return prepareProcessor(edge.to, thread_number, queue, std::move(lock)); } + else + graph[edge.to].processor->onUpdatePorts(); return true; } diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index a613e8008d0..8f43a5e149b 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -233,6 +233,10 @@ public: onCancel(); } + /// Additional method which is called in case if ports were updated while work() method. + /// May be used to stop execution in rare cases. + virtual void onUpdatePorts() {}; + virtual ~IProcessor() = default; auto & getInputs() { return inputs; } diff --git a/src/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h index b547e6a6d1f..13d42c937f3 100644 --- a/src/Processors/Sources/SourceFromInputStream.h +++ b/src/Processors/Sources/SourceFromInputStream.h @@ -37,6 +37,13 @@ public: void setProgressCallback(const ProgressCallback & callback) final { stream->setProgressCallback(callback); } void addTotalRowsApprox(size_t value) final { stream->addTotalRowsApprox(value); } + /// Stop reading from stream if output port is finished. + void onUpdatePorts() override + { + if (getPort().isFinished()) + onCancel(); + } + protected: void onCancel() override { stream->cancel(false); } From bff1f24cf717e004a2b04abaea28a6d82bd2c721 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sun, 12 Apr 2020 18:21:21 +0300 Subject: [PATCH 345/484] Added IProcessor::onUpdatePorts --- src/Processors/Sources/SourceFromInputStream.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Sources/SourceFromInputStream.h b/src/Processors/Sources/SourceFromInputStream.h index 13d42c937f3..88a045e65a2 100644 --- a/src/Processors/Sources/SourceFromInputStream.h +++ b/src/Processors/Sources/SourceFromInputStream.h @@ -41,7 +41,7 @@ public: void onUpdatePorts() override { if (getPort().isFinished()) - onCancel(); + cancel(); } protected: From 2b052a44d98bb91981f97ec8b0664283e9dafbbc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sun, 12 Apr 2020 18:25:51 +0300 Subject: [PATCH 346/484] Added test --- .../01245_limit_infinite_sources.reference | 1 + .../0_stateless/01245_limit_infinite_sources.sql | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 tests/queries/0_stateless/01245_limit_infinite_sources.reference create mode 100644 tests/queries/0_stateless/01245_limit_infinite_sources.sql diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.reference b/tests/queries/0_stateless/01245_limit_infinite_sources.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.sql b/tests/queries/0_stateless/01245_limit_infinite_sources.sql new file mode 100644 index 00000000000..803a2d14c39 --- /dev/null +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.sql @@ -0,0 +1,11 @@ +SELECT number +FROM +( + SELECT zero AS number + FROM remote('127.0.0.2', system.zeros) + UNION ALL + SELECT number + sleep(0.5) + FROM system.numbers +) +WHERE number = 1 +LIMIT 1 From d4a3ef2fdc342cf0951022f9183844505548a5b3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 19:15:53 +0300 Subject: [PATCH 347/484] Fixed wrong code (no changes in behaviour) --- src/IO/parseDateTimeBestEffort.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 6e747b13b3f..84a40144155 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -69,7 +69,6 @@ template inline void readDecimalNumber(T & res, size_t num_digits, const char * src) { #define READ_DECIMAL_NUMBER(N) do { res *= common::exp10_i32(N); readDecimalNumber(res, src); src += (N); num_digits -= (N); } while (false) - while (num_digits) { switch (num_digits) @@ -80,7 +79,7 @@ inline void readDecimalNumber(T & res, size_t num_digits, const char * src) default: READ_DECIMAL_NUMBER(4); break; } } -#undef DECIMAL_NUMBER_CASE +#undef READ_DECIMAL_NUMBER } struct DateTimeSubsecondPart From 6de712f0f4a7e854c9c881b2121b88d4ee450ea2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 06:20:15 +0300 Subject: [PATCH 348/484] Fix joined constants, development --- src/Interpreters/ColumnNamesContext.cpp | 4 ++++ src/Interpreters/ExpressionAnalyzer.cpp | 3 +++ src/Interpreters/RequiredSourceColumnsVisitor.cpp | 2 ++ src/Interpreters/SyntaxAnalyzer.cpp | 7 +++++++ src/Interpreters/TableJoin.cpp | 10 +++++++++- src/Interpreters/TranslateQualifiedNamesVisitor.cpp | 9 +++++++-- 6 files changed, 32 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/ColumnNamesContext.cpp b/src/Interpreters/ColumnNamesContext.cpp index c8fde183d96..d577fea97ae 100644 --- a/src/Interpreters/ColumnNamesContext.cpp +++ b/src/Interpreters/ColumnNamesContext.cpp @@ -24,6 +24,8 @@ bool ColumnNamesContext::addColumnAliasIfAny(const IAST & ast) if (required_names.count(alias)) masked_columns.insert(alias); + std::cerr << "Alias: " << alias << "\n"; + complex_aliases.insert(alias); return true; } @@ -33,6 +35,8 @@ void ColumnNamesContext::addColumnIdentifier(const ASTIdentifier & node) if (!IdentifierSemantic::getColumnName(node)) return; + std::cerr << "Identifier: " << node.name << "\n"; + /// There should be no complex cases after query normalization. Names to aliases: one-to-many. String alias = node.tryGetAlias(); required_names[node.name].addInclusion(alias); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 9ec32737fdc..4ea762c0d6e 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -559,6 +559,9 @@ JoinPtr SelectQueryExpressionAnalyzer::makeTableJoin(const ASTTablesInSelectQuer /// Actions which need to be calculated on joined block. ExpressionActionsPtr joined_block_actions = createJoinedBlockActions(context, analyzedJoin()); + std::cerr << "Joined block actions: " << joined_block_actions->getSampleBlock().dumpStructure() + << "\n\n" << toString(joined_block_actions->getRequiredColumns()) << "\n"; + if (!subquery_for_join.source) { NamesWithAliases required_columns_with_aliases = diff --git a/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp index 5a740805560..469a5852fa5 100644 --- a/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -88,12 +88,14 @@ void RequiredSourceColumnsMatcher::visit(const ASTPtr & ast, Data & data) visit(*t, ast, data); return; } + if (auto * t = ast->as()) { data.addTableAliasIfAny(*ast); visit(*t, ast, data); return; } + if (ast->as()) { data.addTableAliasIfAny(*ast); diff --git a/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp index 5c1b6c7e62b..bd317d61668 100644 --- a/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -638,9 +638,13 @@ void SyntaxAnalyzerResult::collectUsedColumns(const ASTPtr & query) /// We calculate required_source_columns with source_columns modifications and swap them on exit required_source_columns = source_columns; + std::cerr << queryToString(query) << "\n"; + RequiredSourceColumnsVisitor::Data columns_context; RequiredSourceColumnsVisitor(columns_context).visit(query); + std::cerr << columns_context << "\n"; + NameSet source_column_names; for (const auto & column : source_columns) source_column_names.insert(column.name); @@ -922,6 +926,9 @@ void SyntaxAnalyzer::normalize(ASTPtr & query, Aliases & aliases, const Settings /// Creates a dictionary `aliases`: alias -> ASTPtr QueryAliasesVisitor(aliases).visit(query); + for (const auto & alias : aliases) + std::cerr << "Alias: " << alias.first << ": " << queryToString(alias.second) << "\n"; + /// Mark table ASTIdentifiers with not a column marker MarkTableIdentifiersVisitor::Data identifiers_data{aliases}; MarkTableIdentifiersVisitor(identifiers_data).visit(query); diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index 30b5e8e4483..3286bbbefd1 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -1,3 +1,5 @@ +#include + #include #include @@ -5,6 +7,8 @@ #include #include +#include + #include @@ -79,7 +83,9 @@ void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_colu dedup_columns.push_back(column); auto & inserted = dedup_columns.back(); - if (left_table_columns.count(column.name)) + /// Also qualify unusual column names - that does not look like identifiers. + + if (left_table_columns.count(column.name) || !isValidIdentifierBegin(column.name.at(0))) inserted.name = right_table_prefix + column.name; original_names[inserted.name] = column.name; @@ -157,6 +163,8 @@ NamesWithAliases TableJoin::getRequiredColumns(const Block & sample, const Names void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column) { + std::cerr << "Adding " << joined_column.name << "\n"; + if (join_use_nulls && isLeftOrFull(table_join.kind)) { auto type = joined_column.type->canBeInsideNullable() ? makeNullable(joined_column.type) : joined_column.type; diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index 17b1bc004f8..b97aa01826c 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include namespace DB @@ -107,8 +109,9 @@ void TranslateQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, IdentifierSemantic::setMembership(identifier, table_pos); /// In case if column from the joined table are in source columns, change it's name to qualified. + /// Also always leave unusual identifiers qualified. auto & table = data.tables[table_pos].table; - if (table_pos && data.hasColumn(short_name)) + if (table_pos && (data.hasColumn(short_name) || !isValidIdentifierBegin(short_name.at(0)))) IdentifierSemantic::setColumnLongName(identifier, table); else IdentifierSemantic::setColumnShortName(identifier, table); @@ -128,7 +131,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTFunction & node, const ASTPtr &, D func_arguments->children.clear(); } -void TranslateQualifiedNamesMatcher::visit(const ASTQualifiedAsterisk & , const ASTPtr & ast, Data & data) +void TranslateQualifiedNamesMatcher::visit(const ASTQualifiedAsterisk &, const ASTPtr & ast, Data & data) { if (ast->children.size() != 1) throw Exception("Logical error: qualified asterisk must have exactly one child", ErrorCodes::LOGICAL_ERROR); @@ -174,6 +177,8 @@ static void addIdentifier(ASTs & nodes, const DatabaseAndTableWithAlias & table, String table_name = table.getQualifiedNamePrefix(false); auto identifier = std::make_shared(std::vector{table_name, column_name}); + std::cerr << "Expanded identifier: " << queryToString(identifier) << "\n"; + bool added = false; if (aliases && aliases->count(identifier->name)) { From ec4889e43e4c564de279c0af61e0d61fb98533bf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 19:20:12 +0300 Subject: [PATCH 349/484] Remove debug output --- src/Interpreters/ColumnNamesContext.cpp | 4 ---- src/Interpreters/ExpressionAnalyzer.cpp | 3 --- src/Interpreters/SyntaxAnalyzer.cpp | 7 ------- src/Interpreters/TableJoin.cpp | 4 ---- src/Interpreters/TranslateQualifiedNamesVisitor.cpp | 3 --- 5 files changed, 21 deletions(-) diff --git a/src/Interpreters/ColumnNamesContext.cpp b/src/Interpreters/ColumnNamesContext.cpp index d577fea97ae..c8fde183d96 100644 --- a/src/Interpreters/ColumnNamesContext.cpp +++ b/src/Interpreters/ColumnNamesContext.cpp @@ -24,8 +24,6 @@ bool ColumnNamesContext::addColumnAliasIfAny(const IAST & ast) if (required_names.count(alias)) masked_columns.insert(alias); - std::cerr << "Alias: " << alias << "\n"; - complex_aliases.insert(alias); return true; } @@ -35,8 +33,6 @@ void ColumnNamesContext::addColumnIdentifier(const ASTIdentifier & node) if (!IdentifierSemantic::getColumnName(node)) return; - std::cerr << "Identifier: " << node.name << "\n"; - /// There should be no complex cases after query normalization. Names to aliases: one-to-many. String alias = node.tryGetAlias(); required_names[node.name].addInclusion(alias); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 4ea762c0d6e..9ec32737fdc 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -559,9 +559,6 @@ JoinPtr SelectQueryExpressionAnalyzer::makeTableJoin(const ASTTablesInSelectQuer /// Actions which need to be calculated on joined block. ExpressionActionsPtr joined_block_actions = createJoinedBlockActions(context, analyzedJoin()); - std::cerr << "Joined block actions: " << joined_block_actions->getSampleBlock().dumpStructure() - << "\n\n" << toString(joined_block_actions->getRequiredColumns()) << "\n"; - if (!subquery_for_join.source) { NamesWithAliases required_columns_with_aliases = diff --git a/src/Interpreters/SyntaxAnalyzer.cpp b/src/Interpreters/SyntaxAnalyzer.cpp index bd317d61668..5c1b6c7e62b 100644 --- a/src/Interpreters/SyntaxAnalyzer.cpp +++ b/src/Interpreters/SyntaxAnalyzer.cpp @@ -638,13 +638,9 @@ void SyntaxAnalyzerResult::collectUsedColumns(const ASTPtr & query) /// We calculate required_source_columns with source_columns modifications and swap them on exit required_source_columns = source_columns; - std::cerr << queryToString(query) << "\n"; - RequiredSourceColumnsVisitor::Data columns_context; RequiredSourceColumnsVisitor(columns_context).visit(query); - std::cerr << columns_context << "\n"; - NameSet source_column_names; for (const auto & column : source_columns) source_column_names.insert(column.name); @@ -926,9 +922,6 @@ void SyntaxAnalyzer::normalize(ASTPtr & query, Aliases & aliases, const Settings /// Creates a dictionary `aliases`: alias -> ASTPtr QueryAliasesVisitor(aliases).visit(query); - for (const auto & alias : aliases) - std::cerr << "Alias: " << alias.first << ": " << queryToString(alias.second) << "\n"; - /// Mark table ASTIdentifiers with not a column marker MarkTableIdentifiersVisitor::Data identifiers_data{aliases}; MarkTableIdentifiersVisitor(identifiers_data).visit(query); diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index 3286bbbefd1..339fe2dceb3 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -1,5 +1,3 @@ -#include - #include #include @@ -163,8 +161,6 @@ NamesWithAliases TableJoin::getRequiredColumns(const Block & sample, const Names void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column) { - std::cerr << "Adding " << joined_column.name << "\n"; - if (join_use_nulls && isLeftOrFull(table_join.kind)) { auto type = joined_column.type->canBeInsideNullable() ? makeNullable(joined_column.type) : joined_column.type; diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index b97aa01826c..7c31a6db546 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -18,7 +18,6 @@ #include #include #include -#include namespace DB @@ -177,8 +176,6 @@ static void addIdentifier(ASTs & nodes, const DatabaseAndTableWithAlias & table, String table_name = table.getQualifiedNamePrefix(false); auto identifier = std::make_shared(std::vector{table_name, column_name}); - std::cerr << "Expanded identifier: " << queryToString(identifier) << "\n"; - bool added = false; if (aliases && aliases->count(identifier->name)) { From 66d9ba93894eac80d316da1e1f4ce04e5c2d9d98 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 19:24:16 +0300 Subject: [PATCH 350/484] Added a test --- .../0_stateless/01120_join_constants.reference | 2 ++ .../0_stateless/01120_join_constants.sql | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 tests/queries/0_stateless/01120_join_constants.reference create mode 100644 tests/queries/0_stateless/01120_join_constants.sql diff --git a/tests/queries/0_stateless/01120_join_constants.reference b/tests/queries/0_stateless/01120_join_constants.reference new file mode 100644 index 00000000000..a16427fbdf7 --- /dev/null +++ b/tests/queries/0_stateless/01120_join_constants.reference @@ -0,0 +1,2 @@ +1 hello 1 world world 1 +2 hello 0 world 1 diff --git a/tests/queries/0_stateless/01120_join_constants.sql b/tests/queries/0_stateless/01120_join_constants.sql new file mode 100644 index 00000000000..443559c3ea1 --- /dev/null +++ b/tests/queries/0_stateless/01120_join_constants.sql @@ -0,0 +1,17 @@ +SELECT + t1.*, + t2.*, + 'world', + isConstant('world') +FROM +( + SELECT + arrayJoin([1, 2]) AS k, + 'hello' +) AS t1 +LEFT JOIN +( + SELECT + arrayJoin([1, 3]) AS k, + 'world' +) AS t2 ON t1.k = t2.k; From b00330b5db5e95ba8be818885a95d2c7eee322b8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 19:25:20 +0300 Subject: [PATCH 351/484] Added bug --- tests/queries/bugs/join_constants_on.sql | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 tests/queries/bugs/join_constants_on.sql diff --git a/tests/queries/bugs/join_constants_on.sql b/tests/queries/bugs/join_constants_on.sql new file mode 100644 index 00000000000..ae967e07adb --- /dev/null +++ b/tests/queries/bugs/join_constants_on.sql @@ -0,0 +1,2 @@ +select cast(1, 'UInt8') from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8'); +select isConstant('UInt8'), toFixedString('hello', toUInt8(substring('UInt8', 5, 1))) from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8'); From ea7eb2f4afae6890bf23a7f74c19391d4cb67a7f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 19:33:44 +0300 Subject: [PATCH 352/484] Removed old command line option for client --- programs/client/Client.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index fef89d9df35..e01eef98006 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1710,7 +1710,6 @@ public: ("database,d", po::value(), "database") ("pager", po::value(), "pager") ("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.") - ("always_load_suggestion_data", "Load suggestion data even if clickhouse-client is run in non-interactive mode. Used for testing.") ("suggestion_limit", po::value()->default_value(10000), "Suggestion limit for how many databases, tables and columns to fetch.") ("multiline,m", "multiline") From d252c59513db55439ee912690427e1a2f6041ff6 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Sun, 12 Apr 2020 20:04:52 +0300 Subject: [PATCH 353/484] Added a test that produces segfault in StorageSystemTables --- ...ecreate_reattach_and_show_tables.reference | 1 + ...rrent_recreate_reattach_and_show_tables.sh | 109 ++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100755 tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference create mode 100755 tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference new file mode 100755 index 00000000000..678f9a34e6f --- /dev/null +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.reference @@ -0,0 +1 @@ +Test OK diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh new file mode 100755 index 00000000000..8bf21d3cb02 --- /dev/null +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +export CURR_DATABASE="test_lazy_01014_concurrent_${CLICKHOUSE_DATABASE}" + + +function recreate_lazy_func1() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.log (a UInt64, b UInt64) ENGINE = Log; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.log; + "; + + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.log; + "; + done +} + +function recreate_lazy_func2() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog; + "; + done +} + +function recreate_lazy_func3() +{ + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.slog (a UInt64, b UInt64) ENGINE = StripeLog; + "; + + while true; do + $CLICKHOUSE_CLIENT -q " + ATTACH TABLE $CURR_DATABASE.slog; + "; + + $CLICKHOUSE_CLIENT -q " + DETACH TABLE $CURR_DATABASE.slog; + "; + done +} + +function recreate_lazy_func4() +{ + while true; do + $CLICKHOUSE_CLIENT -q " + CREATE TABLE $CURR_DATABASE.tlog2 (a UInt64, b UInt64) ENGINE = TinyLog; + "; + + $CLICKHOUSE_CLIENT -q " + DROP TABLE $CURR_DATABASE.tlog2; + "; + done +} + +function show_tables_func() +{ + while true; do + $CLICKHOUSE_CLIENT -q "SELECT * FROM system.tables WHERE database = '$CURR_DATABASE' FORMAT Null"; + done +} + + +export -f recreate_lazy_func1; +export -f recreate_lazy_func2; +export -f recreate_lazy_func3; +export -f recreate_lazy_func4; +export -f show_tables_func; + + +${CLICKHOUSE_CLIENT} -n -q " + DROP DATABASE IF EXISTS $CURR_DATABASE; + CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); +" + + +TIMEOUT=30 + +timeout $TIMEOUT bash -c recreate_lazy_func1 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func2 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func3 2> /dev/null & +timeout $TIMEOUT bash -c recreate_lazy_func4 2> /dev/null & +timeout $TIMEOUT bash -c show_tables_func 2> /dev/null & + +wait +sleep 1 + +${CLICKHOUSE_CLIENT} -n -q " + DROP TABLE IF EXISTS $CURR_DATABASE.log; + DROP TABLE IF EXISTS $CURR_DATABASE.slog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog; + DROP TABLE IF EXISTS $CURR_DATABASE.tlog2; +" +# DROP DATABASE $CURR_DATABASE; -- This fails for some reason + +echo "Test OK" From 2eb2e4cf41909dc82ccf5cd30c02f81941e40e36 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Sun, 12 Apr 2020 20:17:41 +0300 Subject: [PATCH 354/484] Added proper nullptr check --- src/Storages/System/StorageSystemTables.cpp | 23 +++++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index f4ce4a8b717..81ff6a03e12 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -239,20 +239,25 @@ protected: StoragePtr table = nullptr; TableStructureReadLockHolder lock; - try + if (need_lock_structure) { - if (need_lock_structure) + table = tables_it->table(); + if (table == nullptr) + { + // Table might have just been removed or detached for Lazy engine (see DatabaseLazy::tryGetTable()) + continue; + } + try { - table = tables_it->table(); lock = table->lockStructureForShare( false, context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); } - } - catch (const Exception & e) - { - if (e.code() == ErrorCodes::TABLE_IS_DROPPED) - continue; - throw; + catch (const Exception & e) + { + if (e.code() == ErrorCodes::TABLE_IS_DROPPED) + continue; + throw; + } } ++rows_count; From 718e4bcdf89c96375ff655b233241eee9e12fda4 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 22:01:02 +0300 Subject: [PATCH 355/484] Update IProcessor.h --- src/Processors/IProcessor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 8f43a5e149b..b7c230cb6de 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -235,7 +235,7 @@ public: /// Additional method which is called in case if ports were updated while work() method. /// May be used to stop execution in rare cases. - virtual void onUpdatePorts() {}; + virtual void onUpdatePorts() {} virtual ~IProcessor() = default; From b56945fa1b4d5cd74aa9daf0b98f15645c19899f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 22:07:16 +0300 Subject: [PATCH 356/484] Remove some bugs --- .../0_stateless/01121_remote_scalar_subquery.reference | 2 ++ .../01121_remote_scalar_subquery.sql} | 0 .../01122_totals_rollup_having_block_header.reference | 0 .../01122_totals_rollup_having_block_header.sql} | 6 ++---- tests/queries/bugs/00938_client_suggestions.sh | 6 ------ 5 files changed, 4 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/01121_remote_scalar_subquery.reference rename tests/queries/{bugs/remote_scalar_subquery.sql => 0_stateless/01121_remote_scalar_subquery.sql} (100%) create mode 100644 tests/queries/0_stateless/01122_totals_rollup_having_block_header.reference rename tests/queries/{bugs/totals_rollup_having_block_header.sql => 0_stateless/01122_totals_rollup_having_block_header.sql} (75%) delete mode 100755 tests/queries/bugs/00938_client_suggestions.sh diff --git a/tests/queries/0_stateless/01121_remote_scalar_subquery.reference b/tests/queries/0_stateless/01121_remote_scalar_subquery.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01121_remote_scalar_subquery.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/bugs/remote_scalar_subquery.sql b/tests/queries/0_stateless/01121_remote_scalar_subquery.sql similarity index 100% rename from tests/queries/bugs/remote_scalar_subquery.sql rename to tests/queries/0_stateless/01121_remote_scalar_subquery.sql diff --git a/tests/queries/0_stateless/01122_totals_rollup_having_block_header.reference b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/bugs/totals_rollup_having_block_header.sql b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql similarity index 75% rename from tests/queries/bugs/totals_rollup_having_block_header.sql rename to tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql index 4f7f9692fd0..4f4f3355912 100644 --- a/tests/queries/bugs/totals_rollup_having_block_header.sql +++ b/tests/queries/0_stateless/01122_totals_rollup_having_block_header.sql @@ -1,5 +1,3 @@ --- triggers assertion in debug build - DROP TABLE IF EXISTS test.rollup_having; CREATE TABLE test.rollup_having ( a Nullable(String), @@ -10,7 +8,7 @@ INSERT INTO test.rollup_having VALUES (NULL, NULL); INSERT INTO test.rollup_having VALUES ('a', NULL); INSERT INTO test.rollup_having VALUES ('a', 'b'); -SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL; -SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL and b IS NOT NULL; +SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL; -- { serverError 48 } +SELECT a, b, count(*) FROM test.rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL and b IS NOT NULL; -- { serverError 48 } DROP TABLE test.rollup_having; diff --git a/tests/queries/bugs/00938_client_suggestions.sh b/tests/queries/bugs/00938_client_suggestions.sh deleted file mode 100755 index b4bd9e4480d..00000000000 --- a/tests/queries/bugs/00938_client_suggestions.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh - -for i in {1..100}; do $CLICKHOUSE_CLIENT --always_load_suggestion_data --query="SELECT 1 FORMAT Null"; done From 8cb4dd275a1554f2f995dd27472f42f7b4359b53 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 22:23:59 +0300 Subject: [PATCH 357/484] Remove default argument (harmful) #10082 --- src/IO/parseDateTimeBestEffort.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 84a40144155..2924ad88506 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -89,7 +89,12 @@ struct DateTimeSubsecondPart }; template -ReturnType parseDateTimeBestEffortImpl(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, DateTimeSubsecondPart * fractional = nullptr) +ReturnType parseDateTimeBestEffortImpl( + time_t & res, + ReadBuffer & in, + const DateLUTImpl & local_time_zone, + const DateLUTImpl & utc_time_zone, + DateTimeSubsecondPart * fractional) { auto on_error = [](const std::string & message [[maybe_unused]], int code [[maybe_unused]]) { @@ -581,12 +586,12 @@ ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuf void parseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone); + parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr); } bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone); + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr); } void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) From 142087c4f7538d42a652294524f7351b71a9d0c3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 22:34:04 +0300 Subject: [PATCH 358/484] Fix "parseDateTimeBestEffort" for strings in RFC-2822 when day of week is Thuesday or Thursday #10082 --- src/IO/parseDateTimeBestEffort.cpp | 5 ++++- .../01123_parse_date_time_best_effort_even_more.reference | 2 ++ .../01123_parse_date_time_best_effort_even_more.sql | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference create mode 100644 tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 2924ad88506..68565782edf 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -371,7 +371,10 @@ ReturnType parseDateTimeBestEffortImpl( { char c = *in.position(); - if (c == ' ' || c == 'T') + /// 'T' is a separator between date and time according to ISO 8601. + /// But don't skip it if we didn't read the date part yet, because 'T' is also a prefix or 'Tue' and 'Thu'. + + if (c == ' ' || (c == 'T' && year && !has_time)) { ++in.position(); } diff --git a/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference new file mode 100644 index 00000000000..558ba34abcd --- /dev/null +++ b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.reference @@ -0,0 +1,2 @@ +2018-08-18 07:22:16 +2018-08-16 07:22:16 diff --git a/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql new file mode 100644 index 00000000000..a4f6f173402 --- /dev/null +++ b/tests/queries/0_stateless/01123_parse_date_time_best_effort_even_more.sql @@ -0,0 +1,2 @@ +SELECT toTimeZone(parseDateTimeBestEffort('Thu, 18 Aug 2018 07:22:16 GMT'), 'UTC'); +SELECT toTimeZone(parseDateTimeBestEffort('Tue, 16 Aug 2018 07:22:16 GMT'), 'UTC'); From 3f1658c0e931e8c7467ac959ec2ab175a90d3663 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 12 Apr 2020 22:35:23 +0300 Subject: [PATCH 359/484] Update parseDateTimeBestEffort.cpp --- src/IO/parseDateTimeBestEffort.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 68565782edf..7e40909226c 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -372,7 +372,7 @@ ReturnType parseDateTimeBestEffortImpl( char c = *in.position(); /// 'T' is a separator between date and time according to ISO 8601. - /// But don't skip it if we didn't read the date part yet, because 'T' is also a prefix or 'Tue' and 'Thu'. + /// But don't skip it if we didn't read the date part yet, because 'T' is also a prefix for 'Tue' and 'Thu'. if (c == ' ' || (c == 'T' && year && !has_time)) { From 20dcc4decd4baf058cc9e754516c659f98dcf2cc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 23:05:58 +0300 Subject: [PATCH 360/484] Fixed build on FreeBSD according to the advice from Vitaly @hellvesper --- src/Processors/RowsBeforeLimitCounter.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/RowsBeforeLimitCounter.h b/src/Processors/RowsBeforeLimitCounter.h index abee5a09405..36ea4a557a8 100644 --- a/src/Processors/RowsBeforeLimitCounter.h +++ b/src/Processors/RowsBeforeLimitCounter.h @@ -17,7 +17,7 @@ public: uint64_t get() const { return rows_before_limit.load(std::memory_order_acquire); } - void setAppliedLimit() { has_applied_limit.store(true, std::memory_order::release); } + void setAppliedLimit() { has_applied_limit.store(true, std::memory_order_release); } bool hasAppliedLimit() const { return has_applied_limit.load(std::memory_order_acquire); } private: From ceb5c1964af484bfdfdab15a6e385cd73049b9c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 12 Apr 2020 23:48:51 +0300 Subject: [PATCH 361/484] Update cctz just in case #10211 --- contrib/cctz | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/cctz b/contrib/cctz index 4f9776a310f..44541cf2b85 160000 --- a/contrib/cctz +++ b/contrib/cctz @@ -1 +1 @@ -Subproject commit 4f9776a310f4952454636363def82c2bf6641d5f +Subproject commit 44541cf2b85ced2a6e5ad4276183a9812d1a54ab From b9931863eff3b528109b89d94a555ed81575cc07 Mon Sep 17 00:00:00 2001 From: Avogar Date: Mon, 13 Apr 2020 00:01:17 +0300 Subject: [PATCH 362/484] Fix FixedString packing --- src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index 7c5e2c5b522..cef7b001505 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -85,7 +85,6 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr packer.pack_uint64(assert_cast(column).getElement(row_num)); return; } - case TypeIndex::FixedString: [[fallthrough]]; case TypeIndex::String: { const StringRef & string = assert_cast(column).getDataAt(row_num); @@ -93,6 +92,13 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr packer.pack_str_body(string.data, string.size); return; } + case TypeIndex::FixedString: + { + const StringRef & string = assert_cast(column).getDataAt(row_num); + packer.pack_str(string.size); + packer.pack_str_body(string.data, string.size); + return; + } case TypeIndex::Array: { auto nested_type = assert_cast(*data_type).getNestedType(); From 983950d4ec79cc07c945424d687e72a97b0c979c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 00:07:11 +0300 Subject: [PATCH 363/484] Convert types in Views --- src/Storages/StorageView.cpp | 10 ++++++++-- .../queries/0_stateless/01124_view_bad_types.reference | 10 ++++++++++ .../01124_view_bad_types.sql} | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01124_view_bad_types.reference rename tests/queries/{bugs/view_bad_types.sql => 0_stateless/01124_view_bad_types.sql} (84%) diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 05feeb7d786..78e3c50a879 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -12,13 +12,12 @@ #include #include -#include - #include #include #include #include +#include namespace DB @@ -78,8 +77,15 @@ Pipes StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. for (auto & pipe : pipes) + { pipe.addSimpleTransform(std::make_shared(pipe.getHeader())); + /// And also convert to expected structure. + pipe.addSimpleTransform(std::make_shared( + pipe.getHeader(), getSampleBlockForColumns(column_names), + ConvertingTransform::MatchColumnsMode::Name, context)); + } + return pipes; } diff --git a/tests/queries/0_stateless/01124_view_bad_types.reference b/tests/queries/0_stateless/01124_view_bad_types.reference new file mode 100644 index 00000000000..af98bcd6397 --- /dev/null +++ b/tests/queries/0_stateless/01124_view_bad_types.reference @@ -0,0 +1,10 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 diff --git a/tests/queries/bugs/view_bad_types.sql b/tests/queries/0_stateless/01124_view_bad_types.sql similarity index 84% rename from tests/queries/bugs/view_bad_types.sql rename to tests/queries/0_stateless/01124_view_bad_types.sql index 38daabfd6b8..81fc53930c1 100644 --- a/tests/queries/bugs/view_bad_types.sql +++ b/tests/queries/0_stateless/01124_view_bad_types.sql @@ -5,7 +5,7 @@ INSERT INTO test.table SELECT * FROM system.numbers LIMIT 10; DROP TABLE IF EXISTS test.view; CREATE VIEW test.view (x UInt64) AS SELECT * FROM test.table; -SELECT x, any(x) FROM test.view GROUP BY x; +SELECT x, any(x) FROM test.view GROUP BY x ORDER BY x; DROP TABLE test.view; DROP TABLE test.table; From d49dc5c008cbd3802dd35066f4607e5f3e21fde9 Mon Sep 17 00:00:00 2001 From: Avogar Date: Mon, 13 Apr 2020 00:16:27 +0300 Subject: [PATCH 364/484] Add test --- tests/queries/0_stateless/01098_msgpack_format.reference | 3 +++ tests/queries/0_stateless/01098_msgpack_format.sh | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/tests/queries/0_stateless/01098_msgpack_format.reference b/tests/queries/0_stateless/01098_msgpack_format.reference index 8059526a38f..ad116a5ba91 100644 --- a/tests/queries/0_stateless/01098_msgpack_format.reference +++ b/tests/queries/0_stateless/01098_msgpack_format.reference @@ -8,3 +8,6 @@ [[1,2,3],[1001,2002],[3167]] [[['one'],['two']],[['three']],[['four'],['five']]] [0,1,2,3,42,253,254,255] [255,254,253,42,3,2,1,0] +2020-01-01 +2020-01-02 +2020-01-02 diff --git a/tests/queries/0_stateless/01098_msgpack_format.sh b/tests/queries/0_stateless/01098_msgpack_format.sh index afebd6de3dc..233399570bb 100755 --- a/tests/queries/0_stateless/01098_msgpack_format.sh +++ b/tests/queries/0_stateless/01098_msgpack_format.sh @@ -52,3 +52,11 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; $CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; +$CLICKHOUSE_CLIENT --query="CREATE TABLE msgpack (date FixedString(10)) ENGINE = Memory"; + +$CLICKHOUSE_CLIENT --query="INSERT INTO msgpack VALUES ('2020-01-01'), ('2020-01-02'), ('2020-01-02')"; + +$CLICKHOUSE_CLIENT --query="SELECT * FROM msgpack"; + +$CLICKHOUSE_CLIENT --query="DROP TABLE msgpack"; + From 3215257a90e914cf00a8399336493252e66056d1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 00:27:11 +0300 Subject: [PATCH 365/484] Fixed "generateRandom" function for Date type #9973 --- src/Storages/StorageGenerateRandom.cpp | 5 ++++- .../0_stateless/01125_generate_random_qoega.reference | 1 + tests/queries/0_stateless/01125_generate_random_qoega.sql | 5 +++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01125_generate_random_qoega.reference create mode 100644 tests/queries/0_stateless/01125_generate_random_qoega.sql diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index 1fd2d4ec2d8..d0772254045 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -205,7 +205,10 @@ ColumnPtr fillColumnWithRandomData( { auto column = ColumnUInt16::create(); column->getData().resize(limit); - fillBufferWithRandomData(reinterpret_cast(column->getData().data()), limit * sizeof(UInt16), rng); + + for (size_t i = 0; i < limit; ++i) + column->getData()[i] = rng() % (DATE_LUT_MAX_DAY_NUM + 1); /// Slow + return column; } case TypeIndex::UInt32: [[fallthrough]]; diff --git a/tests/queries/0_stateless/01125_generate_random_qoega.reference b/tests/queries/0_stateless/01125_generate_random_qoega.reference new file mode 100644 index 00000000000..1cb416a722b --- /dev/null +++ b/tests/queries/0_stateless/01125_generate_random_qoega.reference @@ -0,0 +1 @@ +100 4456446406473339606 diff --git a/tests/queries/0_stateless/01125_generate_random_qoega.sql b/tests/queries/0_stateless/01125_generate_random_qoega.sql new file mode 100644 index 00000000000..7fb586ad2b5 --- /dev/null +++ b/tests/queries/0_stateless/01125_generate_random_qoega.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS mass_table_117; +CREATE TABLE mass_table_117 (`dt` Date, `site_id` Int32, `site_key` String) ENGINE = MergeTree(dt, (site_id, site_key, dt), 8192); +INSERT INTO mass_table_117 SELECT * FROM generateRandom('`dt` Date,`site_id` Int32,`site_key` String', 1, 10, 2) LIMIT 100; +SELECT count(), sum(cityHash64(*)) FROM mass_table_117; +DROP TABLE mass_table_117; From 9860ffee5189189b7285dc5641c92c35bae49591 Mon Sep 17 00:00:00 2001 From: Avogar Date: Mon, 13 Apr 2020 00:59:28 +0300 Subject: [PATCH 366/484] Add MsgPack performance test --- tests/performance/parse_engine_file.xml | 1 + tests/performance/select_format.xml | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/performance/parse_engine_file.xml b/tests/performance/parse_engine_file.xml index fb10fa97915..c96f4e537ff 100644 --- a/tests/performance/parse_engine_file.xml +++ b/tests/performance/parse_engine_file.xml @@ -34,6 +34,7 @@ RowBinary Native Avro + MsgPack diff --git a/tests/performance/select_format.xml b/tests/performance/select_format.xml index b8df874304f..e47d981c4d7 100644 --- a/tests/performance/select_format.xml +++ b/tests/performance/select_format.xml @@ -44,6 +44,7 @@ ODBCDriver2 MySQLWire Avro + MsgPack From 4788eb3423a575dc23f207963bf22c79acac1088 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 01:00:03 +0300 Subject: [PATCH 367/484] Make the assertion in code consistent with the real partition expression --- src/Storages/MergeTree/MergeTreeDataWriter.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 34c615994f0..23a60ddab78 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -210,8 +210,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa const auto & date_lut = DateLUT::instance(); - DayNum min_month = date_lut.toFirstDayNumOfMonth(DayNum(min_date)); - DayNum max_month = date_lut.toFirstDayNumOfMonth(DayNum(max_date)); + auto min_month = date_lut.toNumYYYYMM(min_date); + auto max_month = date_lut.toNumYYYYMM(max_date); if (min_month != max_month) throw Exception("Logical error: part spans more than one month.", ErrorCodes::LOGICAL_ERROR); From d1eaa34cd9f24341fb3e9e96a09d38a5b894eb4b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 01:00:10 +0300 Subject: [PATCH 368/484] Added a test --- .../01126_month_partitioning_consistent_code.reference | 0 .../0_stateless/01126_month_partitioning_consistent_code.sql | 4 ++++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/01126_month_partitioning_consistent_code.reference create mode 100644 tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql diff --git a/tests/queries/0_stateless/01126_month_partitioning_consistent_code.reference b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql new file mode 100644 index 00000000000..c9bfbbe5111 --- /dev/null +++ b/tests/queries/0_stateless/01126_month_partitioning_consistent_code.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (d Date, x UInt8) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES (52392, 1), (62677, 2); +DROP TABLE mt; From 860e9092f19b379c4dbb53174c513159507aced5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 01:25:41 +0300 Subject: [PATCH 369/484] Fixed another inconsistency in partition names --- base/common/DateLUTImpl.cpp | 16 ++++++++++++++-- base/common/DateLUTImpl.h | 2 +- src/Storages/MergeTree/MergeTreePartInfo.cpp | 4 ++-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index d7ab0046992..a7ca21c984e 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -133,7 +133,10 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) } /// Fill lookup table for years and months. - for (size_t day = 0; day < DATE_LUT_SIZE && lut[day].year <= DATE_LUT_MAX_YEAR; ++day) + size_t year_months_lut_index = 0; + size_t first_day_of_last_month = 0; + + for (size_t day = 0; day < DATE_LUT_SIZE; ++day) { const Values & values = lut[day]; @@ -141,7 +144,16 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) { if (values.month == 1) years_lut[values.year - DATE_LUT_MIN_YEAR] = day; - years_months_lut[(values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1] = day; + + year_months_lut_index = (values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1; + years_months_lut[year_months_lut_index] = day; + first_day_of_last_month = day; } } + + /// Fill the rest of lookup table with the same last month (2106-02-01). + for (; year_months_lut_index < DATE_LUT_YEARS * 12; ++year_months_lut_index) + { + years_months_lut[year_months_lut_index] = first_day_of_last_month; + } } diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index d9d27c56ee3..ec32d62bcad 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -12,7 +12,7 @@ /// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check. #define DATE_LUT_SIZE 0x10000 #define DATE_LUT_MIN_YEAR 1970 -#define DATE_LUT_MAX_YEAR 2105 /// Last supported year +#define DATE_LUT_MAX_YEAR 2106 /// Last supported year (incomplete) #define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table #if defined(__PPC__) diff --git a/src/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp index 43bd9538e3e..d30f6470bb1 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.cpp +++ b/src/Storages/MergeTree/MergeTreePartInfo.cpp @@ -120,8 +120,8 @@ void MergeTreePartInfo::parseMinMaxDatesFromPartName(const String & part_name, D min_date = date_lut.YYYYMMDDToDayNum(min_yyyymmdd); max_date = date_lut.YYYYMMDDToDayNum(max_yyyymmdd); - DayNum min_month = date_lut.toFirstDayNumOfMonth(min_date); - DayNum max_month = date_lut.toFirstDayNumOfMonth(max_date); + auto min_month = date_lut.toNumYYYYMM(min_date); + auto max_month = date_lut.toNumYYYYMM(max_date); if (min_month != max_month) throw Exception("Part name " + part_name + " contains different months", ErrorCodes::BAD_DATA_PART_NAME); From a517111259af9a7a03d9e94045335d72d01286f3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 01:30:10 +0300 Subject: [PATCH 370/484] Added one more test --- ...month_partitioning_consistency_select.reference | 4 ++++ ...01127_month_partitioning_consistency_select.sql | 14 ++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference create mode 100644 tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql diff --git a/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference new file mode 100644 index 00000000000..1b08e7f2d6f --- /dev/null +++ b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.reference @@ -0,0 +1,4 @@ +Q1 2106-02-07 Hello +Q2 0000-00-00 World +Q1 2106-02-07 Hello +Q2 0000-00-00 World diff --git a/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql new file mode 100644 index 00000000000..59edd0c37b8 --- /dev/null +++ b/tests/queries/0_stateless/01127_month_partitioning_consistency_select.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (d Date, x String) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES ('2106-02-07', 'Hello'), ('1970-01-01', 'World'); + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DETACH TABLE mt; +ATTACH TABLE mt; + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DROP TABLE mt; From 8ad89a82d4522ab84d32068ef3d8d9a063165d3b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 02:08:32 +0300 Subject: [PATCH 371/484] Support Nested types in "generateRandom" --- src/Storages/StorageGenerateRandom.cpp | 28 +++++++++++++++---- .../01128_generate_random_nested.reference | 2 ++ .../01128_generate_random_nested.sql | 8 ++++++ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/01128_generate_random_nested.reference create mode 100644 tests/queries/0_stateless/01128_generate_random_nested.sql diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index d0772254045..70b84c076b7 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,12 @@ void fillBufferWithRandomData(char * __restrict data, size_t size, pcg64 & rng) ColumnPtr fillColumnWithRandomData( - const DataTypePtr type, UInt64 limit, UInt64 max_array_length, UInt64 max_string_length, pcg64 & rng, const Context & context) + const DataTypePtr type, + UInt64 limit, + UInt64 max_array_length, + UInt64 max_string_length, + pcg64 & rng, + const Context & context) { TypeIndex idx = type->getTypeId(); @@ -340,14 +346,24 @@ public: protected: Chunk generate() override { + /// To support Nested types, we will collect them to single Array of Tuple. + auto names_and_types = Nested::collect(block_header.getNamesAndTypesList()); + Columns columns; - columns.reserve(block_header.columns()); - DataTypes types = block_header.getDataTypes(); + columns.reserve(names_and_types.size()); - for (const auto & type : types) - columns.emplace_back(fillColumnWithRandomData(type, block_size, max_array_length, max_string_length, rng, context)); + Block compact_block; + for (const auto & elem : names_and_types) + { + compact_block.insert( + { + fillColumnWithRandomData(elem.type, block_size, max_array_length, max_string_length, rng, context), + elem.type, + elem.name + }); + } - return {std::move(columns), block_size}; + return {Nested::flatten(compact_block).getColumns(), block_size}; } private: diff --git a/tests/queries/0_stateless/01128_generate_random_nested.reference b/tests/queries/0_stateless/01128_generate_random_nested.reference new file mode 100644 index 00000000000..d9d2b251702 --- /dev/null +++ b/tests/queries/0_stateless/01128_generate_random_nested.reference @@ -0,0 +1,2 @@ +100 12366141706519416319 +109 2990700419202507835 diff --git a/tests/queries/0_stateless/01128_generate_random_nested.sql b/tests/queries/0_stateless/01128_generate_random_nested.sql new file mode 100644 index 00000000000..2af52e69893 --- /dev/null +++ b/tests/queries/0_stateless/01128_generate_random_nested.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS mass_table_312; +CREATE TABLE mass_table_312 (d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, b String)) ENGINE = MergeTree(d, x, 1); +INSERT INTO mass_table_312 SELECT * FROM generateRandom('`d` Date,`x` UInt64,`n.a` Array(String),`n.b` Array(String)', 1, 10, 2) LIMIT 100; + +SELECT count(), sum(cityHash64(*)) FROM mass_table_312; +SELECT count(), sum(cityHash64(*)) FROM mass_table_312 ARRAY JOIN n; + +DROP TABLE mass_table_312; From 0cf882f4dfb09790e09635601b210c8e5814d0d0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 02:20:49 +0300 Subject: [PATCH 372/484] Update test --- .../01087_table_function_generate.reference | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/tests/queries/0_stateless/01087_table_function_generate.reference b/tests/queries/0_stateless/01087_table_function_generate.reference index 68238faee48..d7cc6b0a933 100644 --- a/tests/queries/0_stateless/01087_table_function_generate.reference +++ b/tests/queries/0_stateless/01087_table_function_generate.reference @@ -1,14 +1,14 @@ UInt64 Int64 UInt32 Int32 UInt16 Int16 UInt8 Int8 -2804162938822577320 -2776833771540858 3467776823 1163715250 23903 13655 137 -41 -7885388429666205427 -1363628932535403038 484159052 -308788249 56810 -22227 51 -41 -4357435422797280898 1355609803008819271 4126129912 -852056475 64304 -11401 139 86 -5935810273536892891 -804738887697332962 3109335413 -80126721 258 12889 18 88 -368066018677693974 -4927165984347126295 1015254922 2026080544 44305 21973 16 0 -8124171311239967992 -1179703908046100129 1720727300 -138469036 61343 10573 252 -32 -15657812979985370729 -5733276247123822513 3254757884 -500590428 45913 19153 105 -102 -18371568619324220532 -6793779541583578394 1686821450 -455892108 49050 -28603 248 80 -821735343441964030 3148260644406230976 256251035 -885069056 58858 -29361 58 61 -9558594037060121162 -2907172753635797124 4276198376 1947296644 26801 -13531 204 -66 +2804162938822577320 -2776833771540858 3467776823 1163715250 31161 -2916 220 -117 +7885388429666205427 -1363628932535403038 484159052 -308788249 43346 13638 143 -105 +4357435422797280898 1355609803008819271 4126129912 -852056475 34184 9166 49 33 +5935810273536892891 -804738887697332962 3109335413 -80126721 47877 -31421 186 -77 +368066018677693974 -4927165984347126295 1015254922 2026080544 46037 -29626 240 108 +8124171311239967992 -1179703908046100129 1720727300 -138469036 33028 -12819 138 16 +15657812979985370729 -5733276247123822513 3254757884 -500590428 3829 30527 3 -81 +18371568619324220532 -6793779541583578394 1686821450 -455892108 43475 2284 252 -90 +821735343441964030 3148260644406230976 256251035 -885069056 11643 11455 176 90 +9558594037060121162 -2907172753635797124 4276198376 1947296644 45922 26632 97 43 - Enum8(\'hello\' = 1, \'world\' = 5) hello @@ -47,16 +47,16 @@ h o - Date DateTime DateTime(\'Europe/Moscow\') -2106-02-07 2050-12-17 02:46:35 2096-02-16 22:18:22 -2106-02-07 2013-10-17 23:35:26 1976-01-24 12:52:48 -2039-08-16 1974-11-17 23:22:46 1980-03-04 21:02:50 -1997-04-11 1972-09-18 23:44:08 2040-07-10 14:46:42 -2103-11-03 2044-11-23 20:57:12 1970-10-09 02:30:14 -2066-11-19 2029-12-10 03:13:55 2106-01-30 21:52:44 -2064-08-14 2016-07-14 11:33:45 2096-12-12 00:40:50 -2046-09-13 2085-07-10 18:51:14 2096-01-15 16:31:33 -2008-03-16 2047-05-16 23:28:36 2103-02-11 16:44:39 -2000-07-07 2105-07-19 19:29:06 1980-01-02 05:18:22 +2077-09-17 1970-10-09 02:30:14 2074-08-12 11:31:27 +2005-11-19 2106-01-30 21:52:44 2097-05-25 07:54:35 +2007-02-24 2096-12-12 00:40:50 1988-08-10 11:16:31 +2019-06-30 2096-01-15 16:31:33 2063-10-20 08:48:17 +2039-01-16 2103-02-11 16:44:39 2036-10-09 04:29:10 +1994-11-03 1980-01-02 05:18:22 2055-12-23 12:33:52 +2083-08-20 2079-06-11 16:29:02 2000-12-05 17:46:24 +2030-06-25 2100-03-01 18:50:22 1993-03-25 01:19:12 +2087-03-16 2034-08-25 19:46:33 2045-12-10 16:47:40 +2006-04-30 2069-09-30 16:07:48 2084-08-26 03:33:12 - DateTime64(3) DateTime64(6) DateTime64(6, \'Europe/Moscow\') 1978-06-07 23:50:57.320 2013-08-28 10:21:54.010758 1991-08-25 16:23:26.140215 @@ -225,14 +225,14 @@ RL,{Xs\\tw [114] -84125.1554 ('2023-06-06 06:55:06.492','bf9ab359-ef9f-ad11-7e6c-160368b1e5ea') [124] -114719.5228 ('2010-11-11 22:57:23.722','c1046ffb-3415-cc3a-509a-e0005856d7d7') - -[] 1900051923 { -189530.5846 h -5.6279699579452485e47 ('1984-12-06','2028-08-17 06:05:01','2036-04-02 23:52:28.468','4b3d498c-dd44-95c1-5b75-921504ec5d8d') F743 -[-102,-118] 392272782 Eb -14818.0200 o -2.664492247169164e59 ('2082-12-26','2052-09-09 06:50:50','2088-04-21 05:07:08.245','aeb9c26e-0ee7-2b8e-802b-2a96319b8e60') CBF4 -[-71] 775049089 \N -158115.1178 w 4.1323844687113747e-305 ('2106-02-07','2090-07-31 16:45:26','2076-07-10 09:11:06.385','57c69bc6-dddd-0975-e932-a7b5173a1304') EB1D -[-28,100] 3675466147 { -146685.1749 h 3.6676044396877755e142 ('2017-10-25','2100-02-28 18:07:18','2055-10-14 06:36:20.056','14949dae-dfa8-a124-af83-887348b2f609') 6D88 -[-23] 2514120753 (`u, -119659.6174 w 1.3231258347475906e34 ('2106-02-07','2074-08-10 06:25:12','1976-12-04 18:31:55.745','86a9b3c1-4593-4d56-7762-3aa1dd22cbbf') AD43 -[11,-36] 3308237300 \N 171205.1896 \N 5.634708707075817e195 ('1974-10-31','1993-12-24 09:38:45','2038-07-15 05:22:51.805','63d999b8-8cca-e237-c4a4-4dd7d0096f65') 609E -[39] 1614362420 `4A8P 157144.0630 o -1.1843143253872814e-255 ('2106-02-07','2072-09-28 18:27:27','2073-07-10 12:19:58.146','6483f5c0-8733-364c-4fa0-9948d32e8903') A886 -[48,-120] 3848918261 1 Date: Sun, 5 Apr 2020 00:07:00 +0300 Subject: [PATCH 373/484] Add log_queries_min_type to filter which entries will be written to query_log Can be used to write into query_log only failed queries (i.e. on memory exceeded error), by using: set log_queries_min_type='EXCEPTION_WHILE_PROCESSING' --- docs/en/operations/settings/settings.md | 18 +++++++++++++++++ src/Core/Settings.h | 2 +- src/Core/SettingsCollection.cpp | 7 +++++++ src/Core/SettingsCollection.h | 10 ++++++++++ src/Interpreters/QueryLog.h | 9 ++------- src/Interpreters/executeQuery.cpp | 20 +++++++++---------- .../01231_log_queries_min_type.reference | 5 +++++ .../01231_log_queries_min_type.sql | 15 ++++++++++++++ 8 files changed, 68 insertions(+), 18 deletions(-) create mode 100644 tests/queries/0_stateless/01231_log_queries_min_type.reference create mode 100644 tests/queries/0_stateless/01231_log_queries_min_type.sql diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 69c444ebaef..37b4c713f91 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -507,6 +507,24 @@ Example: log_queries=1 ``` +## log\_queries\_min\_type {#settings-log-queries-min-type} + +`query_log` minimal type to log. + +Possible values: +- `QUERY_START` (`=1`) +- `QUERY_FINISH` (`=2`) +- `EXCEPTION_BEFORE_START` (`=3`) +- `EXCEPTION_WHILE_PROCESSING` (`=4`) + +Default value: `QUERY_START`. + +Can be used to limit which entiries will goes to `query_log`, say you are interesting only in errors, then you can use `EXCEPTION_WHILE_PROCESSING`: + +``` text +log_queries_min_type='EXCEPTION_WHILE_PROCESSING' +``` + ## log\_query\_threads {#settings-log-query-threads} Setting up query threads logging. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8138af31d5f..725171d4a1b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -149,7 +149,7 @@ struct Settings : public SettingsCollection M(SettingInt64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \ \ M(SettingBool, log_queries, 0, "Log requests and write the log to the system table.", 0) \ - \ + M(SettingLogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "query_log minimal type to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \ M(SettingUInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \ \ M(SettingDistributedProductMode, distributed_product_mode, DistributedProductMode::DENY, "How are distributed subqueries performed inside IN or JOIN sections?", IMPORTANT) \ diff --git a/src/Core/SettingsCollection.cpp b/src/Core/SettingsCollection.cpp index 6d879b27181..238ac1c3c62 100644 --- a/src/Core/SettingsCollection.cpp +++ b/src/Core/SettingsCollection.cpp @@ -542,6 +542,13 @@ IMPLEMENT_SETTING_ENUM(FormatSettings::DateTimeInputFormat, DATE_TIME_INPUT_FORM M(trace, "trace") IMPLEMENT_SETTING_ENUM(LogsLevel, LOGS_LEVEL_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) +#define LOG_QUERIES_TYPE_LIST_OF_NAMES(M) \ + M(QUERY_START, "QUERY_START") \ + M(QUERY_FINISH, "QUERY_FINISH") \ + M(EXCEPTION_BEFORE_START, "EXCEPTION_BEFORE_START") \ + M(EXCEPTION_WHILE_PROCESSING, "EXCEPTION_WHILE_PROCESSING") +IMPLEMENT_SETTING_ENUM(QueryLogElementType, LOG_QUERIES_TYPE_LIST_OF_NAMES, ErrorCodes::BAD_ARGUMENTS) + namespace details { diff --git a/src/Core/SettingsCollection.h b/src/Core/SettingsCollection.h index da21412b7c1..d93772e86ed 100644 --- a/src/Core/SettingsCollection.h +++ b/src/Core/SettingsCollection.h @@ -298,6 +298,16 @@ enum class LogsLevel }; using SettingLogsLevel = SettingEnum; +// Make it signed for compatibility with DataTypeEnum8 +enum QueryLogElementType : int8_t +{ + QUERY_START = 1, + QUERY_FINISH = 2, + EXCEPTION_BEFORE_START = 3, + EXCEPTION_WHILE_PROCESSING = 4, +}; +using SettingLogQueriesType = SettingEnum; + enum class SettingsBinaryFormat { diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index 836b37095e9..ec14f5e97fb 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace ProfileEvents @@ -22,13 +23,7 @@ namespace DB /// A struct which will be inserted as row into query_log table struct QueryLogElement { - enum Type : int8_t // Make it signed for compatibility with DataTypeEnum8 - { - QUERY_START = 1, - QUERY_FINISH = 2, - EXCEPTION_BEFORE_START = 3, - EXCEPTION_WHILE_PROCESSING = 4, - }; + using Type = QueryLogElementType; Type type = QUERY_START; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index c9c66832f08..68bebb83619 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -157,7 +157,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Log the start of query execution into the table if necessary. QueryLogElement elem; - elem.type = QueryLogElement::EXCEPTION_BEFORE_START; + elem.type = QueryLogElementType::EXCEPTION_BEFORE_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -175,7 +175,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c /// Update performance counters before logging to query_log CurrentThread::finalizePerformanceCounters(); - if (settings.log_queries) + if (settings.log_queries && elem.type >= settings.log_queries_min_type) if (auto query_log = context.getQueryLog()) query_log->add(elem); } @@ -400,7 +400,7 @@ static std::tuple executeQueryImpl( { QueryLogElement elem; - elem.type = QueryLogElement::QUERY_START; + elem.type = QueryLogElementType::QUERY_START; elem.event_time = current_time; elem.query_start_time = current_time; @@ -412,7 +412,7 @@ static std::tuple executeQueryImpl( bool log_queries = settings.log_queries && !internal; /// Log into system table start of query execution, if need. - if (log_queries) + if (log_queries && elem.type >= settings.log_queries_min_type) { if (settings.log_query_settings) elem.query_settings = std::make_shared(context.getSettingsRef()); @@ -422,7 +422,7 @@ static std::tuple executeQueryImpl( } /// Also make possible for caller to log successful query finish and exception during execution. - auto finish_callback = [elem, &context, log_queries] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable + auto finish_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type] (IBlockInputStream * stream_in, IBlockOutputStream * stream_out) mutable { QueryStatus * process_list_elem = context.getProcessListElement(); @@ -436,7 +436,7 @@ static std::tuple executeQueryImpl( double elapsed_seconds = info.elapsed_seconds; - elem.type = QueryLogElement::QUERY_FINISH; + elem.type = QueryLogElementType::QUERY_FINISH; elem.event_time = time(nullptr); elem.query_duration_ms = elapsed_seconds * 1000; @@ -484,19 +484,19 @@ static std::tuple executeQueryImpl( elem.thread_ids = std::move(info.thread_ids); elem.profile_counters = std::move(info.profile_counters); - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); } }; - auto exception_callback = [elem, &context, log_queries, quota(quota)] () mutable + auto exception_callback = [elem, &context, log_queries, log_queries_min_type = settings.log_queries_min_type, quota(quota)] () mutable { if (quota) quota->used(Quota::ERRORS, 1, /* check_exceeded = */ false); - elem.type = QueryLogElement::EXCEPTION_WHILE_PROCESSING; + elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING; elem.event_time = time(nullptr); elem.query_duration_ms = 1000 * (elem.event_time - elem.query_start_time); @@ -529,7 +529,7 @@ static std::tuple executeQueryImpl( logException(context, elem); /// In case of exception we log internal queries also - if (log_queries) + if (log_queries && elem.type >= log_queries_min_type) { if (auto query_log = context.getQueryLog()) query_log->add(elem); diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.reference b/tests/queries/0_stateless/01231_log_queries_min_type.reference new file mode 100644 index 00000000000..a358d022033 --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.reference @@ -0,0 +1,5 @@ +01231_log_queries_min_type/QUERY_START +2 +01231_log_queries_min_type/EXCEPTION_BEFORE_START +2 +3 diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql new file mode 100644 index 00000000000..f2229c94a8a --- /dev/null +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -0,0 +1,15 @@ +set log_queries=1; + +select '01231_log_queries_min_type/QUERY_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_BEFORE_START'; +select '01231_log_queries_min_type/EXCEPTION_BEFORE_START'; +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; + +set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; +select '01231_log_queries_min_type/', max(number) from system.numbers limit 1e6 settings max_rows_to_read='100K'; -- { serverError 158; } +system flush logs; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; From c7eaaaf7fe1a340eeb20f5edfdd0f6d24aa40157 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 13 Apr 2020 04:33:05 +0300 Subject: [PATCH 374/484] Small refactoring of SystemLogs --- src/Interpreters/MetricLog.cpp | 7 +++ src/Interpreters/MetricLog.h | 2 + src/Interpreters/SystemLog.cpp | 30 +++++------ src/Interpreters/SystemLog.h | 96 +++++++++++++++++++++------------- 4 files changed, 84 insertions(+), 51 deletions(-) diff --git a/src/Interpreters/MetricLog.cpp b/src/Interpreters/MetricLog.cpp index 5622e0c65b0..bd898170705 100644 --- a/src/Interpreters/MetricLog.cpp +++ b/src/Interpreters/MetricLog.cpp @@ -70,6 +70,13 @@ void MetricLog::stopCollectMetric() } +void MetricLog::shutdown() +{ + stopCollectMetric(); + stopFlushThread(); +} + + inline UInt64 time_in_milliseconds(std::chrono::time_point timepoint) { return std::chrono::duration_cast(timepoint.time_since_epoch()).count(); diff --git a/src/Interpreters/MetricLog.h b/src/Interpreters/MetricLog.h index c55bad2c12f..a90ce923494 100644 --- a/src/Interpreters/MetricLog.h +++ b/src/Interpreters/MetricLog.h @@ -34,6 +34,8 @@ class MetricLog : public SystemLog using SystemLog::SystemLog; public: + void shutdown() override; + /// Launches a background thread to collect metrics with interval void startCollectMetric(size_t collect_interval_milliseconds_); diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index a78342f8b17..fc0f2f98125 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -83,6 +83,19 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi size_t collect_interval_milliseconds = config.getUInt64("metric_log.collect_interval_milliseconds"); metric_log->startCollectMetric(collect_interval_milliseconds); } + + if (query_log) + logs.emplace_back(query_log.get()); + if (query_thread_log) + logs.emplace_back(query_thread_log.get()); + if (part_log) + logs.emplace_back(part_log.get()); + if (trace_log) + logs.emplace_back(trace_log.get()); + if (text_log) + logs.emplace_back(text_log.get()); + if (metric_log) + logs.emplace_back(metric_log.get()); } @@ -93,21 +106,8 @@ SystemLogs::~SystemLogs() void SystemLogs::shutdown() { - if (query_log) - query_log->shutdown(); - if (query_thread_log) - query_thread_log->shutdown(); - if (part_log) - part_log->shutdown(); - if (trace_log) - trace_log->shutdown(); - if (text_log) - text_log->shutdown(); - if (metric_log) - { - metric_log->stopCollectMetric(); - metric_log->shutdown(); - } + for (auto & log : logs) + log->shutdown(); } } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 87da342ae1f..7c8dc1606f7 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -2,6 +2,9 @@ #include #include +#include +#include + #include #include #include @@ -59,13 +62,20 @@ namespace ErrorCodes #define DBMS_SYSTEM_LOG_QUEUE_SIZE 1048576 + class Context; -class QueryLog; -class QueryThreadLog; -class PartLog; -class TextLog; -class TraceLog; -class MetricLog; + + +class ISystemLog +{ +public: + virtual String getName() = 0; + virtual ASTPtr getCreateTableQuery() = 0; + virtual void flush() = 0; + virtual void shutdown() = 0; + virtual ~ISystemLog() = default; +}; + /// System logs should be destroyed in destructor of the last Context and before tables, /// because SystemLog destruction makes insert query while flushing data into underlying tables @@ -82,11 +92,13 @@ struct SystemLogs std::shared_ptr trace_log; /// Used to log traces from query profiler std::shared_ptr text_log; /// Used to log all text messages. std::shared_ptr metric_log; /// Used to log all metrics. + + std::vector logs; }; template -class SystemLog : private boost::noncopyable +class SystemLog : public ISystemLog, private boost::noncopyable { public: using Self = SystemLog; @@ -106,18 +118,28 @@ public: const String & storage_def_, size_t flush_interval_milliseconds_); - ~SystemLog(); - /** Append a record into log. * Writing to table will be done asynchronously and in case of failure, record could be lost. */ void add(const LogElement & element); + void stopFlushThread(); + /// Flush data in the buffer to disk - void flush(); + void flush() override; /// Stop the background flush thread before destructor. No more data will be written. - void shutdown(); + void shutdown() override + { + stopFlushThread(); + } + + String getName() override + { + return LogElement::name(); + } + + ASTPtr getCreateTableQuery() override; protected: Logger * log; @@ -250,7 +272,7 @@ void SystemLog::flush() template -void SystemLog::shutdown() +void SystemLog::stopFlushThread() { { std::unique_lock lock(mutex); @@ -270,13 +292,6 @@ void SystemLog::shutdown() } -template -SystemLog::~SystemLog() -{ - shutdown(); -} - - template void SystemLog::savingThreadFunction() { @@ -399,7 +414,7 @@ void SystemLog::prepareTable() rename->elements.emplace_back(elem); LOG_DEBUG(log, "Existing table " << description << " for system log has obsolete or different structure." - " Renaming it to " << backQuoteIfNeed(to.table)); + " Renaming it to " << backQuoteIfNeed(to.table)); InterpreterRenameQuery(rename, context).execute(); @@ -415,22 +430,7 @@ void SystemLog::prepareTable() /// Create the table. LOG_DEBUG(log, "Creating new table " << description << " for " + LogElement::name()); - auto create = std::make_shared(); - - create->database = table_id.database_name; - create->table = table_id.table_name; - - Block sample = LogElement::createBlock(); - - auto new_columns_list = std::make_shared(); - new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList())); - create->set(create->columns_list, new_columns_list); - - ParserStorage storage_parser; - ASTPtr storage_ast = parseQuery( - storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), - "Storage to create table for " + LogElement::name(), 0); - create->set(create->storage, storage_ast); + auto create = getCreateTableQuery(); InterpreterCreateQuery interpreter(create, context); interpreter.setInternal(true); @@ -442,4 +442,28 @@ void SystemLog::prepareTable() is_prepared = true; } + +template +ASTPtr SystemLog::getCreateTableQuery() +{ + auto create = std::make_shared(); + + create->database = table_id.database_name; + create->table = table_id.table_name; + + Block sample = LogElement::createBlock(); + + auto new_columns_list = std::make_shared(); + new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList())); + create->set(create->columns_list, new_columns_list); + + ParserStorage storage_parser; + ASTPtr storage_ast = parseQuery( + storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), + "Storage to create table for " + LogElement::name(), 0); + create->set(create->storage, storage_ast); + + return create; +} + } From df28eca407d069c6730d13a6d5c3a620f68c5304 Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev Date: Mon, 13 Apr 2020 01:03:44 +0300 Subject: [PATCH 375/484] Add test for communication between ClickHouse and Zookeeper over SSL --- tests/integration/helpers/cluster.py | 10 +- .../helpers/zookeeper-ssl-entrypoint.sh | 95 +++++++++++++++++++ .../configs/zookeeper_config_with_ssl.xml | 20 ++++ .../configs_secure/client.crt | 19 ++++ .../configs_secure/client.key | 28 ++++++ .../configs_secure/conf.d/remote_servers.xml | 17 ++++ .../configs_secure/conf.d/ssl_conf.xml | 16 ++++ .../integration/test_zookeeper_config/test.py | 54 ++++++++++- 8 files changed, 255 insertions(+), 4 deletions(-) create mode 100755 tests/integration/helpers/zookeeper-ssl-entrypoint.sh create mode 100644 tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml create mode 100644 tests/integration/test_zookeeper_config/configs_secure/client.crt create mode 100644 tests/integration/test_zookeeper_config/configs_secure/client.key create mode 100644 tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml create mode 100644 tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 717fab11449..b5cae86dc2d 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -144,7 +144,8 @@ class ClickHouseCluster: with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False, with_redis=False, with_minio=False, hostname=None, env_variables=None, image="yandex/clickhouse-integration-test", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None): + stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, tmpfs=None, + zookeeper_docker_compose_path=None): """Add an instance to the cluster. name - the name of the instance directory and the value of the 'instance' macro in ClickHouse. @@ -179,10 +180,13 @@ class ClickHouseCluster: cmds = [] if with_zookeeper and not self.with_zookeeper: + if not zookeeper_docker_compose_path: + zookeeper_docker_compose_path = p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml') + self.with_zookeeper = True - self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')]) + self.base_cmd.extend(['--file', zookeeper_docker_compose_path]) self.base_zookeeper_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', - self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')] + self.project_name, '--file', zookeeper_docker_compose_path] cmds.append(self.base_zookeeper_cmd) if with_mysql and not self.with_mysql: diff --git a/tests/integration/helpers/zookeeper-ssl-entrypoint.sh b/tests/integration/helpers/zookeeper-ssl-entrypoint.sh new file mode 100755 index 00000000000..3ddb21881d6 --- /dev/null +++ b/tests/integration/helpers/zookeeper-ssl-entrypoint.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +set -e + +export ZOO_SERVER_CNXN_FACTORY=org.apache.zookeeper.server.NettyServerCnxnFactory +export ZOO_SSL_KEYSTORE_LOCATION=/conf/certs/zookeeper.p12 +export ZOO_SSL_KEYSTORE_PASSWORD=password +export ZOO_SSL_TRUSTSTORE_LOCATION=/conf/certs/truststore.p12 +export ZOO_SSL_TRUSTSTORE_PASSWORD=password + + +# Allow the container to be started with `--user` +if [[ "$1" = 'zkServer.sh' && "$(id -u)" = '0' ]]; then + chown -R zookeeper "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" "$ZOO_LOG_DIR" "$ZOO_CONF_DIR" + exec gosu zookeeper "$0" "$@" +fi + +# Generate the config only if it doesn't exist +if [[ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]]; then + CONFIG="$ZOO_CONF_DIR/zoo.cfg" + { + echo "dataDir=$ZOO_DATA_DIR" + echo "dataLogDir=$ZOO_DATA_LOG_DIR" + + echo "tickTime=$ZOO_TICK_TIME" + echo "initLimit=$ZOO_INIT_LIMIT" + echo "syncLimit=$ZOO_SYNC_LIMIT" + + echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAPRETAINCOUNT" + echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGEINTERVAL" + echo "maxClientCnxns=$ZOO_MAX_CLIENT_CNXNS" + echo "standaloneEnabled=$ZOO_STANDALONE_ENABLED" + echo "admin.enableServer=$ZOO_ADMINSERVER_ENABLED" + } >> "$CONFIG" + if [[ -z $ZOO_SERVERS ]]; then + ZOO_SERVERS="server.1=localhost:2888:3888;2181" + fi + + for server in $ZOO_SERVERS; do + echo "$server" >> "$CONFIG" + done + + if [[ -n $ZOO_4LW_COMMANDS_WHITELIST ]]; then + echo "4lw.commands.whitelist=$ZOO_4LW_COMMANDS_WHITELIST" >> "$CONFIG" + fi + + + if [[ -n $ZOO_SSL_QUORUM ]]; then + { + echo "sslQuorum=$ZOO_SSL_QUORUM" + echo "serverCnxnFactory=$ZOO_SERVER_CNXN_FACTORY" + echo "ssl.quorum.keyStore.location=$ZOO_SSL_QUORUM_KEYSTORE_LOCATION" + echo "ssl.quorum.keyStore.password=$ZOO_SSL_QUORUM_KEYSTORE_PASSWORD" + echo "ssl.quorum.trustStore.location=$ZOO_SSL_QUORUM_TRUSTSTORE_LOCATION" + echo "ssl.quorum.trustStore.password=$ZOO_SSL_QUORUM_TRUSTSTORE_PASSWORD" + } >> "$CONFIG" + fi + + if [[ -n $ZOO_PORT_UNIFICATION ]]; then + echo "portUnification=$ZOO_PORT_UNIFICATION" >> "$CONFIG" + fi + + if [[ -n $ZOO_SECURE_CLIENT_PORT ]]; then + { + echo "secureClientPort=$ZOO_SECURE_CLIENT_PORT" + echo "serverCnxnFactory=$ZOO_SERVER_CNXN_FACTORY" + echo "ssl.keyStore.location=$ZOO_SSL_KEYSTORE_LOCATION" + echo "ssl.keyStore.password=$ZOO_SSL_KEYSTORE_PASSWORD" + echo "ssl.trustStore.location=$ZOO_SSL_TRUSTSTORE_LOCATION" + echo "ssl.trustStore.password=$ZOO_SSL_TRUSTSTORE_PASSWORD" + } >> "$CONFIG" + fi + + if [[ -n $ZOO_CLIENT_PORT_UNIFICATION ]]; then + echo "client.portUnification=$ZOO_CLIENT_PORT_UNIFICATION" >> "$CONFIG" + fi +fi + +# Write myid only if it doesn't exist +if [[ ! -f "$ZOO_DATA_DIR/myid" ]]; then + echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid" +fi + +mkdir -p $(dirname $ZOO_SSL_KEYSTORE_LOCATION) +mkdir -p $(dirname $ZOO_SSL_TRUSTSTORE_LOCATION) + +if [[ ! -f "$ZOO_SSL_KEYSTORE_LOCATION" ]]; then + keytool -genkeypair -alias zookeeper -keyalg RSA -validity 365 -keysize 2048 -dname "cn=zookeeper" -keypass password -keystore $ZOO_SSL_KEYSTORE_LOCATION -storepass password -deststoretype pkcs12 +fi + +if [[ ! -f "$ZOO_SSL_TRUSTSTORE_LOCATION" ]]; then + keytool -importcert -alias zookeeper -file /clickhouse-config/client.crt -keystore $ZOO_SSL_TRUSTSTORE_LOCATION -storepass password -noprompt -deststoretype pkcs12 +fi + +exec "$@" diff --git a/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml new file mode 100644 index 00000000000..fc03b609146 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs/zookeeper_config_with_ssl.xml @@ -0,0 +1,20 @@ + + + + zoo1 + 2281 + 1 + + + zoo2 + 2281 + 1 + + + zoo3 + 2281 + 1 + + 3000 + + diff --git a/tests/integration/test_zookeeper_config/configs_secure/client.crt b/tests/integration/test_zookeeper_config/configs_secure/client.crt new file mode 100644 index 00000000000..7ade2d96273 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/client.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI +4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T +4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU +7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj +sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg +pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC +kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j +4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr +85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C +L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD ++UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L +P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp +0g== +-----END CERTIFICATE----- diff --git a/tests/integration/test_zookeeper_config/configs_secure/client.key b/tests/integration/test_zookeeper_config/configs_secure/client.key new file mode 100644 index 00000000000..f0fb61ac443 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn +DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L +OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus +TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7 +JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F +KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6 +ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP +TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f +7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN +MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5 +212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu +plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr +/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu +Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa +CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g +1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5 +bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq +cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez +on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K +GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54 +nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU +2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh +vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa +dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr +vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1 +KCBtmIrQLqzMpnBpLNuSY+Q= +-----END PRIVATE KEY----- diff --git a/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml b/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml new file mode 100644 index 00000000000..01865e33a85 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/conf.d/remote_servers.xml @@ -0,0 +1,17 @@ + + + + + + node1 + 9000 + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml b/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml new file mode 100644 index 00000000000..5e6f5f37624 --- /dev/null +++ b/tests/integration/test_zookeeper_config/configs_secure/conf.d/ssl_conf.xml @@ -0,0 +1,16 @@ + + + + /etc/clickhouse-server/client.crt + /etc/clickhouse-server/client.key + true + true + sslv2,sslv3 + true + none + + RejectCertificateHandler + + + + diff --git a/tests/integration/test_zookeeper_config/test.py b/tests/integration/test_zookeeper_config/test.py index d9323ae16f3..4be99c8711d 100644 --- a/tests/integration/test_zookeeper_config/test.py +++ b/tests/integration/test_zookeeper_config/test.py @@ -1,7 +1,11 @@ from __future__ import print_function from helpers.cluster import ClickHouseCluster +import helpers import pytest import time +from tempfile import NamedTemporaryFile +from os import path as p, unlink + def test_chroot_with_same_root(): @@ -100,10 +104,58 @@ def test_identity(): with pytest.raises(Exception): cluster_2.start(destroy_dirs=False) node2.query(''' - CREATE TABLE simple (date Date, id UInt32) + CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192); ''') finally: cluster_1.shutdown() cluster_2.shutdown() + + +def test_secure_connection(): + # We need absolute path in zookeeper volumes. Generate it dynamically. + TEMPLATE = ''' + zoo{zoo_id}: + image: zookeeper:3.5.6 + restart: always + environment: + ZOO_TICK_TIME: 500 + ZOO_MY_ID: {zoo_id} + ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 + ZOO_SECURE_CLIENT_PORT: 2281 + volumes: + - {helpers_dir}/zookeeper-ssl-entrypoint.sh:/zookeeper-ssl-entrypoint.sh + - {configs_dir}:/clickhouse-config + command: ["zkServer.sh", "start-foreground"] + entrypoint: /zookeeper-ssl-entrypoint.sh + ''' + configs_dir = p.abspath(p.join(p.dirname(__file__), 'configs_secure')) + helpers_dir = p.abspath(p.dirname(helpers.__file__)) + + cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_ssl.xml') + + docker_compose = NamedTemporaryFile(delete=False) + + docker_compose.write( + "version: '2.2'\nservices:\n" + + TEMPLATE.format(zoo_id=1, configs_dir=configs_dir, helpers_dir=helpers_dir) + + TEMPLATE.format(zoo_id=2, configs_dir=configs_dir, helpers_dir=helpers_dir) + + TEMPLATE.format(zoo_id=3, configs_dir=configs_dir, helpers_dir=helpers_dir) + ) + docker_compose.close() + + node1 = cluster.add_instance('node1', config_dir='configs_secure', with_zookeeper=True, + zookeeper_docker_compose_path=docker_compose.name) + node2 = cluster.add_instance('node2', config_dir='configs_secure', with_zookeeper=True, + zookeeper_docker_compose_path=docker_compose.name) + + try: + cluster.start() + + assert node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n' + assert node2.query("SELECT count() FROM system.zookeeper WHERE path = '/'") == '2\n' + + finally: + cluster.shutdown() + unlink(docker_compose.name) From f7f020263344e3c5055a630441e0673bcbff3d42 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2020 08:04:57 +0000 Subject: [PATCH 376/484] Bump nltk from 3.4.5 to 3.5 in /docs/tools Bumps [nltk](https://github.com/nltk/nltk) from 3.4.5 to 3.5. - [Release notes](https://github.com/nltk/nltk/releases) - [Changelog](https://github.com/nltk/nltk/blob/develop/ChangeLog) - [Commits](https://github.com/nltk/nltk/compare/3.4.5...3.5) Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 0e3e3c24b5f..228229ac30d 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -20,7 +20,7 @@ MarkupSafe==1.1.1 mkdocs==1.1 mkdocs-htmlproofer-plugin==0.0.3 mkdocs-macros-plugin==0.4.6 -nltk==3.4.5 +nltk==3.5 nose==1.3.7 protobuf==3.11.3 numpy==1.18.2 From f1fbd60442d6b101338f4c94e9ae7578ab9e1e08 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 13 Apr 2020 11:16:20 +0300 Subject: [PATCH 377/484] Add instana --- docs/en/introduction/adopters.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index f7006ae15c8..a1494c23066 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -76,5 +76,6 @@ toc_title: Adopters | [ЦВТ](https://htc-cs.ru/) | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | | [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | | [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | +| [Instana](https://www.instana.com) | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) | [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) From 29fc8f145b898e2f854935e039a70e0a3c0907a6 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 13 Apr 2020 11:50:14 +0300 Subject: [PATCH 378/484] Fix tests in debug. --- src/Processors/Sources/SourceFromInputStream.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Processors/Sources/SourceFromInputStream.cpp b/src/Processors/Sources/SourceFromInputStream.cpp index e7ca28f72b9..e34fbd359ae 100644 --- a/src/Processors/Sources/SourceFromInputStream.cpp +++ b/src/Processors/Sources/SourceFromInputStream.cpp @@ -176,6 +176,9 @@ Chunk SourceFromInputStream::generate() return {}; } + if (isCancelled()) + return {}; + #ifndef NDEBUG assertBlocksHaveEqualStructure(getPort().getHeader(), block, "SourceFromInputStream"); #endif From 9a9bedc8ccd7f1ded6c1a237b7452481887651f8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 13 Apr 2020 12:02:50 +0300 Subject: [PATCH 379/484] Fix test for streams. --- .../Executors/TreeExecutorBlockInputStream.cpp | 14 ++++++++++++-- .../Executors/TreeExecutorBlockInputStream.h | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Processors/Executors/TreeExecutorBlockInputStream.cpp b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp index 84fd97f4781..f797fee3ab5 100644 --- a/src/Processors/Executors/TreeExecutorBlockInputStream.cpp +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.cpp @@ -164,7 +164,7 @@ void TreeExecutorBlockInputStream::execute(bool on_totals, bool on_extremes) } }; - while (!stack.empty()) + while (!stack.empty() && !is_cancelled) { IProcessor * node = stack.top(); @@ -295,7 +295,7 @@ void TreeExecutorBlockInputStream::initRowsBeforeLimit() Block TreeExecutorBlockInputStream::readImpl() { - while (true) + while (!is_cancelled) { if (input_port->isFinished()) { @@ -338,6 +338,8 @@ Block TreeExecutorBlockInputStream::readImpl() execute(false, false); } + + return {}; } void TreeExecutorBlockInputStream::setProgressCallback(const ProgressCallback & callback) @@ -373,4 +375,12 @@ void TreeExecutorBlockInputStream::addTotalRowsApprox(size_t value) sources_with_progress.front()->addTotalRowsApprox(value); } +void TreeExecutorBlockInputStream::cancel(bool kill) +{ + IBlockInputStream::cancel(kill); + + for (auto & processor : processors) + processor->cancel(); +} + } diff --git a/src/Processors/Executors/TreeExecutorBlockInputStream.h b/src/Processors/Executors/TreeExecutorBlockInputStream.h index dfe8e66ed09..d96492b3fb8 100644 --- a/src/Processors/Executors/TreeExecutorBlockInputStream.h +++ b/src/Processors/Executors/TreeExecutorBlockInputStream.h @@ -39,6 +39,8 @@ public: String getName() const override { return "TreeExecutor"; } Block getHeader() const override { return root->getOutputs().front().getHeader(); } + void cancel(bool kill) override; + /// This methods does not affect TreeExecutor as IBlockInputStream itself. /// They just passed to all SourceWithProgress processors. void setProgressCallback(const ProgressCallback & callback) final; From 91e9a543d49a3e82dc433fdd46d284689ffc23ad Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Mon, 13 Apr 2020 14:20:13 +0300 Subject: [PATCH 380/484] Support new vXX-backported labels (#10231) --- utils/github/__main__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/github/__main__.py b/utils/github/__main__.py index e05d27c03d6..401908298eb 100644 --- a/utils/github/__main__.py +++ b/utils/github/__main__.py @@ -129,6 +129,7 @@ if bad_commits and not args.login: # TODO: check backports. if need_backporting: re_vlabel = re.compile(r'^v\d+\.\d+$') + re_vlabel_backported = re.compile(r'^v\d+\.\d+-backported$') re_vlabel_conflicts = re.compile(r'^v\d+\.\d+-conflicts$') print('\nPull-requests need to be backported:') @@ -146,8 +147,8 @@ if need_backporting: # FIXME: compatibility logic - check for a manually set label, that indicates status 'backported'. # FIXME: O(n²) - no need to iterate all labels for every `stable` for label in github.get_labels(pull_request): - if re_vlabel.match(label['name']): - if f'v{stable[0]}' == label['name']: + if re_vlabel.match(label['name']) or re_vlabel_backported.match(label['name']): + if f'v{stable[0]}' == label['name'] or f'v{stable[0]}-backported' == label['name']: backport_labeled.add(stable[0]) if re_vlabel_conflicts.match(label['name']): if f'v{stable[0]}-conflicts' == label['name']: From 6bd80a357b221bf9888ae69a75e4b420a150cbda Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 13 Apr 2020 15:00:36 +0300 Subject: [PATCH 381/484] simplified backport script --- utils/simple-backport/backport.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index a0143108383..5cd23b9b541 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -7,8 +7,8 @@ merge_base=$(git merge-base origin/master "origin/$branch") # Make lists of PRs that were merged into each branch. Use first parent here, or else # we'll get weird things like seeing older master that was merged into a PR branch # that was then merged into master. -git log "$merge_base..origin/master" --first-parent --oneline > master-log.txt -git log "$merge_base..origin/$branch" --first-parent --oneline > "$branch-log.txt" +git log "$merge_base..origin/master" --first-parent > master-log.txt +git log "$merge_base..origin/$branch" --first-parent > "$branch-log.txt" # Search for PR numbers in commit messages. First variant is normal merge, and second # variant is squashed. @@ -89,11 +89,14 @@ do continue fi + # Find merge commit SHA for convenience + merge_sha="$(jq -r .merge_commit_sha "$file")" + url="https://github.com/ClickHouse/ClickHouse/pull/$pr" - printf "%s\t%s\t%s\t%s\n" "$action" "$pr" "$url" "$file" >> "$branch-report.tsv" + printf "%s\t%s\t%s\t%s\t%s\n" "$action" "$pr" "$url" "$file" "$merge_sha" >> "$branch-report.tsv" if [ "$action" == "backport" ] then - printf "%s\t%s\n" "$action" "$url" + printf "%s\t%s\t%s\n" "$action" "$url" "$merge_sha" fi done From 204a6b2b8c8203d17cf5203e0fa79c068b4a8d30 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 13 Apr 2020 15:42:15 +0300 Subject: [PATCH 382/484] simple backport script --- utils/simple-backport/backport.sh | 38 +++++-------------------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index 5cd23b9b541..06ec63d25ec 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -56,38 +56,12 @@ do action="backport" fi - # Next, check the tag. They might override the decision. - matched_labels=() - for label in $(jq -r .labels[].name "$file") - do - label_action="" - case "$label" in - pr-must-backport | "v$branch-must-backport") - label_action="backport" - ;; - pr-no-backport | "v$branch-no-backport") - label_action="no-backport" - ;; - "v$branch-conflicts") - label_action="conflict" - ;; - "v$branch" | "v$branch-backported") - label_action="done" - ;; - esac - if [ "$label_action" != "" ] - then - action="$label_action" - matched_labels+=("$label") - fi - done - - # Show an error if there are conflicting labels. - if [ ${#matched_labels[@]} -gt 1 ] - then - >&2 echo "PR #$pr has conflicting labels: ${matched_labels[*]}" - continue - fi + # Next, check the tag. They might override the decision. Checks are ordered by priority. + labels="$(jq -r .labels[].name "$file")" + if echo "$labels" | grep "pr-must-backport\|v$branch-must-backport" > /dev/null; then action="backport"; fi + if echo "$labels" | grep "v$branch-conflicts" > /dev/null; then action="conflict"; fi + if echo "$labels" | grep "pr-no-backport\|v$branch-no-backport" > /dev/null; then action="no-backport"; fi + if echo "$labels" | grep "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi # Find merge commit SHA for convenience merge_sha="$(jq -r .merge_commit_sha "$file")" From 8be5a1f0a5ac176ff7c1c83d9979627b6bc335b9 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 13 Apr 2020 15:54:09 +0300 Subject: [PATCH 383/484] simple backport script --- utils/simple-backport/backport.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index 06ec63d25ec..7fbd34f0a08 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -58,10 +58,10 @@ do # Next, check the tag. They might override the decision. Checks are ordered by priority. labels="$(jq -r .labels[].name "$file")" - if echo "$labels" | grep "pr-must-backport\|v$branch-must-backport" > /dev/null; then action="backport"; fi - if echo "$labels" | grep "v$branch-conflicts" > /dev/null; then action="conflict"; fi - if echo "$labels" | grep "pr-no-backport\|v$branch-no-backport" > /dev/null; then action="no-backport"; fi - if echo "$labels" | grep "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi + if echo "$labels" | grep -x "pr-must-backport\|v$branch-must-backport" > /dev/null; then action="backport"; fi + if echo "$labels" | grep -x "v$branch-conflicts" > /dev/null; then action="conflict"; fi + if echo "$labels" | grep -x "pr-no-backport\|v$branch-no-backport" > /dev/null; then action="no-backport"; fi + if echo "$labels" | grep -x "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi # Find merge commit SHA for convenience merge_sha="$(jq -r .merge_commit_sha "$file")" From 9cc7d0f06aa917fc87219adfb3d6f9311e72f095 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 13 Apr 2020 17:34:01 +0300 Subject: [PATCH 384/484] Fix 'Cannot add column' error while creating range_hashed dictionary using DDL queries --- src/Storages/StorageDictionary.cpp | 16 ++++++++- ...01125_dict_ddl_cannot_add_column.reference | 3 ++ .../01125_dict_ddl_cannot_add_column.sql | 34 +++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference create mode 100644 tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql diff --git a/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp index 396e83cc293..86831593d54 100644 --- a/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -75,16 +75,30 @@ NamesAndTypesList StorageDictionary::getNamesAndTypes(const DictionaryStructure if (dictionary_structure.id) dictionary_names_and_types.emplace_back(dictionary_structure.id->name, std::make_shared()); + + /// In old-style (XML) configuration we don't have this attributes in the + /// main attribute list, so we have to add them to columns list explicitly. + /// In the new configuration (DDL) we have them both in range_* nodes and + /// main attribute list, but for compatibility we add them before main + /// attributes list. if (dictionary_structure.range_min) dictionary_names_and_types.emplace_back(dictionary_structure.range_min->name, dictionary_structure.range_min->type); + if (dictionary_structure.range_max) dictionary_names_and_types.emplace_back(dictionary_structure.range_max->name, dictionary_structure.range_max->type); + if (dictionary_structure.key) + { for (const auto & attribute : *dictionary_structure.key) dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + } for (const auto & attribute : dictionary_structure.attributes) - dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + { + /// Some attributes can be already added (range_min and range_max) + if (!dictionary_names_and_types.contains(attribute.name)) + dictionary_names_and_types.emplace_back(attribute.name, attribute.type); + } return dictionary_names_and_types; } diff --git a/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference new file mode 100644 index 00000000000..1a9e5685a6a --- /dev/null +++ b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.reference @@ -0,0 +1,3 @@ +1 2019-01-05 2020-01-10 1 +date_table +somedict diff --git a/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql new file mode 100644 index 00000000000..3f87235bdf4 --- /dev/null +++ b/tests/queries/0_stateless/01125_dict_ddl_cannot_add_column.sql @@ -0,0 +1,34 @@ +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +use database_for_dict; + +CREATE TABLE date_table +( + id UInt32, + val String, + start Date, + end Date +) Engine = Memory(); + +INSERT INTO date_table VALUES(1, '1', toDate('2019-01-05'), toDate('2020-01-10')); + +CREATE DICTIONARY somedict +( + id UInt32, + val String, + start Date, + end Date +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'date_table' DB 'database_for_dict')) +LAYOUT(RANGE_HASHED()) +RANGE (MIN start MAX end) +LIFETIME(MIN 300 MAX 360); + +SELECT * from somedict; + +SHOW TABLES; + +DROP DATABASE IF EXISTS database_for_dict; From e05e2c76283bbc0d34abfcb0e7bf8764ab79a065 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Mon, 13 Apr 2020 17:34:11 +0300 Subject: [PATCH 385/484] Fixed check for nondeterministic functions to handle lambdas correctly --- src/Interpreters/MutationsInterpreter.cpp | 42 ++++++++++++------- ...eterministic_functions_zookeeper.reference | 2 + ...th_nondeterministic_functions_zookeeper.sh | 6 +++ 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index df0267b9450..985fda3aac7 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -36,34 +36,44 @@ namespace ErrorCodes namespace { -struct FirstNonDeterministicFuncData +/// Helps to detect situations, where non-deterministic functions may be used in mutations of Replicated*MergeTree. +class FirstNonDeterministicFuncMatcher { - using TypeToVisit = ASTFunction; +public: + struct Data { + const Context & context; + std::optional nondeterministic_function_name; + }; - explicit FirstNonDeterministicFuncData(const Context & context_) - : context{context_} - {} - - const Context & context; - std::optional nondeterministic_function_name; - - void visit(ASTFunction & function, ASTPtr &) +public: + static bool needChildVisit(const ASTPtr & /*node*/, const ASTPtr & child) { - if (nondeterministic_function_name) + return child != nullptr; + } + + static void visit(const ASTPtr & node, Data & data) + { + if (data.nondeterministic_function_name) return; - const auto func = FunctionFactory::instance().get(function.name, context); - if (!func->isDeterministic()) - nondeterministic_function_name = func->getName(); + if (const auto * function = typeid_cast(node.get())) + { + if (function->name != "lambda") + { + const auto func = FunctionFactory::instance().get(function->name, data.context); + if (!func->isDeterministic()) + data.nondeterministic_function_name = func->getName(); + } + } } }; using FirstNonDeterministicFuncFinder = - InDepthNodeVisitor, true>; + InDepthNodeVisitor; std::optional findFirstNonDeterministicFuncName(const MutationCommand & command, const Context & context) { - FirstNonDeterministicFuncData finder_data(context); + FirstNonDeterministicFuncMatcher::Data finder_data{context}; switch (command.type) { diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference index f799e8ed8f0..6bf25043399 100644 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.reference @@ -5,3 +5,5 @@ OK OK OK OK +OK +OK diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 9b190855adf..68cb5e0e760 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -43,6 +43,12 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE ignore(rand())" 2>&1 ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = y + rand() % 1 WHERE not ignore()" 2>&1 \ | fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (x + y) % 2, range(y)) WHERE not ignore()" 2>&1 > /dev/null \ +&& echo 'OK' || echo 'FAIL' + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 UPDATE y = x + arrayCount(x -> (rand() + x) % 2, range(y)) WHERE not ignore()" 2>&1 \ +| fgrep -q "must use only deterministic functions" && echo 'OK' || echo 'FAIL' + # For regular tables we do not enforce deterministic functions ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $T1 DELETE WHERE rand() = 0" 2>&1 > /dev/null \ From eacc04fc5cfbdf5cdb8bce2c9d5e9d1040e5bb07 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 13 Apr 2020 19:33:15 +0300 Subject: [PATCH 386/484] Fix race after cancel of RemoteBlockInputStream. --- src/DataStreams/RemoteBlockInputStream.cpp | 13 +++++++++---- src/DataStreams/RemoteBlockInputStream.h | 3 ++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/DataStreams/RemoteBlockInputStream.cpp b/src/DataStreams/RemoteBlockInputStream.cpp index 9d9f629d463..6be189503e9 100644 --- a/src/DataStreams/RemoteBlockInputStream.cpp +++ b/src/DataStreams/RemoteBlockInputStream.cpp @@ -359,12 +359,17 @@ void RemoteBlockInputStream::sendQuery() void RemoteBlockInputStream::tryCancel(const char * reason) { - bool old_val = false; - if (!was_cancelled.compare_exchange_strong(old_val, true, std::memory_order_seq_cst, std::memory_order_relaxed)) - return; + { + std::lock_guard guard(was_cancelled_mutex); + + if (was_cancelled) + return; + + was_cancelled = true; + multiplexed_connections->sendCancel(); + } LOG_TRACE(log, "(" << multiplexed_connections->dumpAddresses() << ") " << reason); - multiplexed_connections->sendCancel(); } bool RemoteBlockInputStream::isQueryPending() const diff --git a/src/DataStreams/RemoteBlockInputStream.h b/src/DataStreams/RemoteBlockInputStream.h index 783811f2521..66b1ebbb6c3 100644 --- a/src/DataStreams/RemoteBlockInputStream.h +++ b/src/DataStreams/RemoteBlockInputStream.h @@ -135,7 +135,8 @@ private: * - data size is already satisfactory (when using LIMIT, for example) * - an exception was thrown from client side */ - std::atomic was_cancelled { false }; + bool was_cancelled { false }; + std::mutex was_cancelled_mutex; /** An exception from replica was received. No need in receiving more packets or * requesting to cancel query execution From 0aa4c85602df716a3fa4cbda23b9866e26a22dcd Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Mon, 13 Apr 2020 20:04:17 +0300 Subject: [PATCH 387/484] Fixed style checker complaint --- src/Interpreters/MutationsInterpreter.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 985fda3aac7..2d3c01292b8 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -40,7 +40,8 @@ namespace class FirstNonDeterministicFuncMatcher { public: - struct Data { + struct Data + { const Context & context; std::optional nondeterministic_function_name; }; @@ -68,8 +69,7 @@ public: } }; -using FirstNonDeterministicFuncFinder = - InDepthNodeVisitor; +using FirstNonDeterministicFuncFinder = InDepthNodeVisitor; std::optional findFirstNonDeterministicFuncName(const MutationCommand & command, const Context & context) { From 45e85724a61b6ea80f065245b84f0ecdfbc83f43 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 13 Apr 2020 21:33:25 +0300 Subject: [PATCH 388/484] Update MutationsInterpreter.cpp --- src/Interpreters/MutationsInterpreter.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 2d3c01292b8..1682bc11f80 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -59,6 +59,9 @@ public: if (const auto * function = typeid_cast(node.get())) { + /// Lambda functions also may be non-deterministic. But we skip them for simplicity. + /// Replication will work correctly even if non-deterministic function is used, + /// it will select any of the results and discard other. if (function->name != "lambda") { const auto func = FunctionFactory::instance().get(function->name, data.context); From def6817ede8854d7c422429d2bbe9204c134ae25 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 13 Apr 2020 22:06:07 +0300 Subject: [PATCH 389/484] Update compare.sh --- docker/test/performance-comparison/compare.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index f89631522f4..bf48fe467ca 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -42,9 +42,11 @@ function configure rm db0/metadata/system/* -rf ||: # Make copies of the original db for both servers. Use hardlinks instead - # of copying. + # of copying. Be careful to remove preprocessed configs or it can lead to + # weird effects. rm -r left/db ||: rm -r right/db ||: + rm -r db0/preprocessed_configs ||: cp -al db0/ left/db/ cp -al db0/ right/db/ } From e46322fcefb1d3e1d88fcd24c4776f91ae0a037a Mon Sep 17 00:00:00 2001 From: Avogar Date: Mon, 13 Apr 2020 22:33:02 +0300 Subject: [PATCH 390/484] Update MsgPack input format. --- src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index ee32aeb6bfe..53c5a623a35 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -119,8 +119,8 @@ void MsgPackRowInputFormat::insertObject(IColumn & column, DataTypePtr data_type case TypeIndex::FixedString: [[fallthrough]]; case TypeIndex::String: { - String str = object.as(); - column.insertData(str.data(), str.size()); + msgpack::object_str obj_str = object.via.str; + column.insertData(obj_str.ptr, obj_str.size); return; } case TypeIndex::Array: From d480707c12133d9d3ad0708447f08bab4bc3f995 Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Mon, 13 Apr 2020 23:02:44 +0300 Subject: [PATCH 391/484] Fixed clang build + tweaked comment --- src/Interpreters/MutationsInterpreter.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 1682bc11f80..b1b226b157f 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -59,9 +59,8 @@ public: if (const auto * function = typeid_cast(node.get())) { - /// Lambda functions also may be non-deterministic. But we skip them for simplicity. - /// Replication will work correctly even if non-deterministic function is used, - /// it will select any of the results and discard other. + /// Property of being deterministic for lambda expression is completely determined + /// by the contents of its definition, so we just proceed to it. if (function->name != "lambda") { const auto func = FunctionFactory::instance().get(function->name, data.context); @@ -76,7 +75,7 @@ using FirstNonDeterministicFuncFinder = InDepthNodeVisitor findFirstNonDeterministicFuncName(const MutationCommand & command, const Context & context) { - FirstNonDeterministicFuncMatcher::Data finder_data{context}; + FirstNonDeterministicFuncMatcher::Data finder_data{context, std::nullopt}; switch (command.type) { From 1d843df1f3bf2619f58679c9e42e1cb7ec1c4945 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 13 Apr 2020 23:43:23 +0300 Subject: [PATCH 392/484] Changelog for v20.3.5.21 --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5301de8a23..2ab006bcdd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ ## ClickHouse release v20.3 +### ClickHouse release v20.3.5.21, 2020-03-27 + +#### Bug Fix + +* Fix 'Different expressions with the same alias' error when query has PREWHERE and WHERE on distributed table and `SET distributed_product_mode = 'local'`. [#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix mutations excessive memory consumption for tables with a composite primary key. This fixes [#9850](https://github.com/ClickHouse/ClickHouse/issues/9850). [#9860](https://github.com/ClickHouse/ClickHouse/pull/9860) ([alesapin](https://github.com/alesapin)). +* Fix 'COMMA to CROSS JOIN rewriter is not enabled or cannot rewrite query' error in case of subqueries with COMMA JOIN out of tables lists (i.e. in WHERE). Fixes [#9782](https://github.com/ClickHouse/ClickHouse/issues/9782). [#9830](https://github.com/ClickHouse/ClickHouse/pull/9830) ([Artem Zuikov](https://github.com/4ertus2)). +* Fix possible exception `Got 0 in totals chunk, expected 1` on client. It happened for queries with `JOIN` in case if right joined table had zero rows. Example: `select * from system.one t1 join system.one t2 on t1.dummy = t2.dummy limit 0 FORMAT TabSeparated;`. Fixes [#9777](https://github.com/ClickHouse/ClickHouse/issues/9777). [#9823](https://github.com/ClickHouse/ClickHouse/pull/9823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix SIGSEGV with optimize_skip_unused_shards when type cannot be converted. [#9804](https://github.com/ClickHouse/ClickHouse/pull/9804) ([Azat Khuzhin](https://github.com/azat)). +* Fix broken `ALTER TABLE DELETE COLUMN` query for compact parts. [#9779](https://github.com/ClickHouse/ClickHouse/pull/9779) ([alesapin](https://github.com/alesapin)). +* Fix max_distributed_connections (w/ and w/o Processors). [#9673](https://github.com/ClickHouse/ClickHouse/pull/9673) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a few cases when timezone of the function argument wasn't used properly. [#9574](https://github.com/ClickHouse/ClickHouse/pull/9574) ([Vasily Nemkov](https://github.com/Enmk)). + +#### Improvement + +* Remove order by stage from mutations because we read from a single ordered part in a single thread. Also add check that the order of rows in mutation is ordered in sorting key order and this order is not violated. [#9886](https://github.com/ClickHouse/ClickHouse/pull/9886) ([alesapin](https://github.com/alesapin)). + + ### ClickHouse release v20.3.4.10, 2020-03-20 #### Bug Fix From 29bb9f666565129587846f1507c9a4a5dad8a24e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 14 Apr 2020 00:15:58 +0300 Subject: [PATCH 393/484] simple backport script --- utils/simple-backport/README.md | 44 ++++++++- utils/simple-backport/backport.sh | 16 +++- utils/simple-backport/changelog.sh | 69 ++++++++++++++ utils/simple-backport/format-changelog.py | 109 ++++++++++++++++++++++ 4 files changed, 229 insertions(+), 9 deletions(-) create mode 100755 utils/simple-backport/changelog.sh create mode 100755 utils/simple-backport/format-changelog.py diff --git a/utils/simple-backport/README.md b/utils/simple-backport/README.md index 13378f93989..c5a625ca0d1 100644 --- a/utils/simple-backport/README.md +++ b/utils/simple-backport/README.md @@ -52,22 +52,56 @@ $ cat 20.1-report.tsv | cut -f1 | sort | uniq -c | sort -rn 10 no-backport ``` - ### Как разметить пулреквест? -По умолчанию бекпортируются все пулреквесты, у которых в описании указана категория чейнжлога Bug fix. Если этого недостаточно, используйте теги: -* v20.1-backported -- этот пулреквест уже бекпортирован в ветку 20.1. На случай, если автоматически не определилось. +По умолчанию бекпортируются все пулреквесты, у которых в описании указана +категория чейнжлога Bug fix. Если этого недостаточно, используйте теги: * v20.1-no-backport -- в ветку 20.1 бекпортировать не нужно. * pr-no-backport -- ни в какие ветки бекпортировать не нужно. -* v20.1-conflicts -- при бекпорте в 20.1 произошёл конфликт. Такие пулреквесты скрипт пропускает, к ним можно потом вернуться. +* v20.1-conflicts -- при бекпорте в 20.1 произошёл конфликт. Такие пулреквесты + скрипт пропускает, к ним можно потом вернуться. * pr-must-backport -- нужно бекпортировать в поддерживаемые ветки. * v20.1-must-backport -- нужно бекпортировать в 20.1. +### Я бекпортировал, почему скрипт не видит? +* Сообщение коммита должно содержать текст backport/cherry-pick #12345, или + иметь вид стандартного гитхабовского мерж-коммита для ПР #12345. +* Коммит должен быть достижим по `git log --first-parent my-branch`. Возможно, + в ветке сделали pull с merge, от чего некоторые коммиты из ветки становятся +недоступны по `--first-parent`. + +В качестве обхода, добавьте в ветку пустой коммит с текстом вроде "backport +#12345 -- real backport commit is ". ### Я поправил пулреквест, почему скрипт не видит? -В процессе работы скрипт кеширует данные о пулреквестах в текущей папке, чтобы экономить квоту гитхаба. Удалите закешированные файлы, например, для всех реквестов, которые не помечены как пропущенные: +В процессе работы скрипт кеширует данные о пулреквестах в текущей папке, чтобы +экономить квоту гитхаба. Удалите закешированные файлы, например, для всех +реквестов, которые не помечены как пропущенные: ``` $ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 $ cat <ваша-ветка>-report.tsv | grep -v "^skip" | cut -f4 | xargs rm ``` +## Как сформировать change log +В этой же папке запустите: +``` +$ time GITHUB_TOKEN=... ./changelog.sh v20.3.4.10-stable v20.3.5.21-stable +9 PRs added between v20.3.4.10-stable and v20.3.5.21-stable. +### ClickHouse release v20.3.5.21-stable FIXME as compared to v20.3.4.10-stable +#### Bug Fix + +* Fix 'Different expressions with the same alias' error when query has PREWHERE + and WHERE on distributed table and `SET distributed_product_mode = 'local'`. +[#9871](https://github.com/ClickHouse/ClickHouse/pull/9871) ([Artem +Zuikov](https://github.com/4ertus2)). +... +``` + +Скрипт выведет changelog на экран, а также сохранит его в `./changelog.md`. +Скопируйте этот текст в большой changelog, проверьте и поправьте версию и дату +релиза, вычитайте сообщения. Если сообщения неправильные, обязательно исправьте +их на гитхабе -- это поможет при последующей генерации changelog для других +версий, содержащих этот пулреквест. Чтобы скрипт подтянул изменения с гитхаба, +удалите соответствующие файлы `./pr12345.json`. Если вы часто видите +неправильно оформленные пулреквесты, это повод подумать об улучшении проверки +Description check в CI. diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index 7fbd34f0a08..b19df885c9e 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -10,10 +10,13 @@ merge_base=$(git merge-base origin/master "origin/$branch") git log "$merge_base..origin/master" --first-parent > master-log.txt git log "$merge_base..origin/$branch" --first-parent > "$branch-log.txt" +# NOTE keep in sync with ./changelog.sh. # Search for PR numbers in commit messages. First variant is normal merge, and second -# variant is squashed. +# variant is squashed. Next are some backport message variants. find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; - s/^.*(#\([[:digit:]]\+\))$/\1/p") + s/^.*(#\([[:digit:]]\+\))$/\1/p; + s/^.*back[- ]*port[ ]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ ]*#\([[:digit:]]\+\).*$/\1/Ip") "${find_prs[@]}" master-log.txt | sort -rn > master-prs.txt "${find_prs[@]}" "$branch-log.txt" | sort -rn > "$branch-prs.txt" @@ -39,7 +42,7 @@ do rm "$file" break fi - sleep 0.5 + sleep 0.1 fi if ! [ "$pr" == "$(jq -r .number "$file")" ] @@ -61,7 +64,12 @@ do if echo "$labels" | grep -x "pr-must-backport\|v$branch-must-backport" > /dev/null; then action="backport"; fi if echo "$labels" | grep -x "v$branch-conflicts" > /dev/null; then action="conflict"; fi if echo "$labels" | grep -x "pr-no-backport\|v$branch-no-backport" > /dev/null; then action="no-backport"; fi - if echo "$labels" | grep -x "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi + # FIXME Ignore "backported" labels for now. If we can't find the backport commit, + # this means that the changelog script also won't be able to. An alternative + # way to mark PR as backported is to add an empty commit with text like + # "backported #12345", so that it can be found between tags and put in proper + # place in changelog. + #if echo "$labels" | grep -x "v$branch\|v$branch-backported" > /dev/null; then action="done"; fi # Find merge commit SHA for convenience merge_sha="$(jq -r .merge_commit_sha "$file")" diff --git a/utils/simple-backport/changelog.sh b/utils/simple-backport/changelog.sh new file mode 100755 index 00000000000..43a0b2d46da --- /dev/null +++ b/utils/simple-backport/changelog.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +from="$1" +to="$2" + +git log "$from..$to" --first-parent > "changelog-log.txt" + +# NOTE keep in sync with ./backport.sh. +# Search for PR numbers in commit messages. First variant is normal merge, and second +# variant is squashed. Next are some backport message variants. +find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; + s/^.*(#\([[:digit:]]\+\))$/\1/p; + s/^.*back[- ]*port[ ]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ ]*#\([[:digit:]]\+\).*$/\1/Ip") + +"${find_prs[@]}" "changelog-log.txt" | sort -rn > "changelog-prs.txt" + + +echo "$(wc -l < "changelog-prs.txt") PRs added between $from and $to." + +function github_download() +{ + local url=${1} + local file=${2} + if ! [ -f "$file" ] + then + if ! curl -H "Authorization: token $GITHUB_TOKEN" \ + -sSf "$url" \ + > "$file" + then + >&2 echo "Failed to download '$url' to '$file'. Contents: '" + >&2 cat "$file" + >&2 echo "'." + rm "$file" + return 1 + fi + sleep 0.1 + fi +} + +for pr in $(cat "changelog-prs.txt") +do + # Download PR info from github. + file="pr$pr.json" + github_download "https://api.github.com/repos/ClickHouse/ClickHouse/pulls/$pr" "$file" || continue + + if ! [ "$pr" == "$(jq -r .number "$file")" ] + then + >&2 echo "Got wrong data for PR #$pr (please check and remove '$file')." + continue + fi + + # Download author info from github. + user_id=$(jq -r .user.id "$file") + user_file="user$user_id.json" + github_download "$(jq -r .user.url "$file")" "$user_file" || continue + + if ! [ "$user_id" == "$(jq -r .id "$user_file")" ] + then + >&2 echo "Got wrong data for user #$user_id (please check and remove '$user_file')." + continue + fi +done + +echo "### ClickHouse release $to FIXME as compared to $from +" > changelog.md +./format-changelog.py changelog-prs.txt >> changelog.md +cat changelog.md diff --git a/utils/simple-backport/format-changelog.py b/utils/simple-backport/format-changelog.py new file mode 100755 index 00000000000..0f379d5529f --- /dev/null +++ b/utils/simple-backport/format-changelog.py @@ -0,0 +1,109 @@ +#!/usr/bin/python3 + +import os +import sys +import itertools +import argparse +import json +import collections +import re + +parser = argparse.ArgumentParser(description='Format changelog for given PRs.') +parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, default=sys.stdin, help='File with PR numbers, one per line.') +args = parser.parse_args() + +# This function mirrors the PR description checks in ClickhousePullRequestTrigger. +# Returns False if the PR should not be mentioned changelog. +def parse_one_pull_request(item): + description = item['body'] + # Don't skip empty lines because they delimit parts of description + lines = [line for line in map(lambda x: x.strip(), description.split('\n') if description else [])] + lines = [re.sub(r'\s+', ' ', l) for l in lines] + + category = '' + entry = '' + + if lines: + i = 0 + while i < len(lines): + if re.match(r'(?i).*category.*:$', lines[i]): + i += 1 + if i >= len(lines): + break + category = re.sub(r'^[-*\s]*', '', lines[i]) + i += 1 + elif re.match(r'(?i)^\**\s*(Short description|Change\s*log entry)', lines[i]): + i += 1 + # Can have one empty line between header and the entry itself. Filter it out. + if i < len(lines) and not lines[i]: + i += 1 + # All following lines until empty one are the changelog entry. + entry_lines = [] + while i < len(lines) and lines[i]: + entry_lines.append(lines[i]) + i += 1 + entry = ' '.join(entry_lines) + else: + i += 1 + + if not category: + # Shouldn't happen, because description check in CI should catch such PRs. + # Fall through, so that it shows up in output and the user can fix it. + category = "NO CL CATEGORY" + + # Filter out the PR categories that are not for changelog. + if re.match(r'(?i)doc|((non|in|not|un)[-\s]*significant)', category): + return False + + if not entry: + # Shouldn't happen, because description check in CI should catch such PRs. + category = "NO CL ENTRY" + entry = "NO CL ENTRY: '" + item['title'] + "'" + + entry = entry.strip() + if entry[-1] != '.': + entry += '.' + + item['entry'] = entry + item['category'] = category + + return True + + +category_to_pr = collections.defaultdict(lambda: []) +users = {} +for line in args.file[0]: + pr = json.loads(open(f'pr{line.strip()}.json').read()) + assert(pr['number']) + if not parse_one_pull_request(pr): + continue + + assert(pr['category']) + category_to_pr[pr['category']].append(pr) + user_id = pr['user']['id'] + users[user_id] = json.loads(open(f'user{user_id}.json').read()) + +def print_category(category): + print("#### " + category) + print() + for pr in category_to_pr[category]: + user = users[pr["user"]["id"]] + user_name = user["name"] if user["name"] else user["login"] + + # Substitute issue links + pr["entry"] = re.sub(r'#([0-9]{4,})', r'[#\1](https://github.com/ClickHouse/ClickHouse/issues/\1)', pr["entry"]) + + print(f'* {pr["entry"]} [#{pr["number"]}]({pr["html_url"]}) ([{user_name}]({user["html_url"]})).') + + print() + +# Print categories in preferred order +categories_preferred_order = ['Backward Incompatible Change', 'New Feature', 'Bug Fix', 'Improvement', 'Performance Improvement', 'Build/Testing/Packaging Improvement', 'Other'] +for category in categories_preferred_order: + if category in category_to_pr: + print_category(category) + category_to_pr.pop(category) + +# Print the rest of the categories +for category in category_to_pr: + print_category(category) From a7c5f622ea4f5fdb6776994492533590b4789ff0 Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev Date: Mon, 13 Apr 2020 10:50:00 +0300 Subject: [PATCH 394/484] Add string_utils for tests/zookeeper_impl.cpp --- src/Common/ZooKeeper/tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/tests/CMakeLists.txt b/src/Common/ZooKeeper/tests/CMakeLists.txt index 06716e49918..45a48ddc7a9 100644 --- a/src/Common/ZooKeeper/tests/CMakeLists.txt +++ b/src/Common/ZooKeeper/tests/CMakeLists.txt @@ -2,7 +2,7 @@ add_executable(zkutil_test_commands zkutil_test_commands.cpp) target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper) add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp) -target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper) +target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper string_utils) add_executable(zkutil_test_lock zkutil_test_lock.cpp) target_link_libraries(zkutil_test_lock PRIVATE clickhouse_common_zookeeper) From 7c6a0c27e775720c5e078a4c91e3ba3c0c9a4a7a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 14 Apr 2020 01:05:05 +0300 Subject: [PATCH 395/484] simple backport script --- utils/simple-backport/backport.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index b19df885c9e..ade0b54f24d 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -15,8 +15,8 @@ git log "$merge_base..origin/$branch" --first-parent > "$branch-log.txt" # variant is squashed. Next are some backport message variants. find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; s/^.*(#\([[:digit:]]\+\))$/\1/p; - s/^.*back[- ]*port[ ]*#\([[:digit:]]\+\).*$/\1/Ip; - s/^.*cherry[- ]*pick[ ]*#\([[:digit:]]\+\).*$/\1/Ip") + s/^.*back[- ]*port[ of]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ of]*#\([[:digit:]]\+\).*$/\1/Ip") "${find_prs[@]}" master-log.txt | sort -rn > master-prs.txt "${find_prs[@]}" "$branch-log.txt" | sort -rn > "$branch-prs.txt" From 2c88e914d704ace3fb3ca503609c55bc0398ea39 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 14 Apr 2020 02:24:33 +0300 Subject: [PATCH 396/484] Update roadmap. Also revert digital degradation due to accidential changes with automatic scripts. --- docs/ru/extended_roadmap.md | 179 ++++++++++++++++++++++++------------ 1 file changed, 122 insertions(+), 57 deletions(-) diff --git a/docs/ru/extended_roadmap.md b/docs/ru/extended_roadmap.md index 1637b54311a..135a49ca8fb 100644 --- a/docs/ru/extended_roadmap.md +++ b/docs/ru/extended_roadmap.md @@ -39,18 +39,20 @@ Upd. Большая часть задачи реализована и добав Требует 1.3. Будет делать [Александр Сапин](https://github.com/alesapin). Ура, сделано. -### 1.5. ALTER RENAME COLUMN. {#alter-rename-column} +### 1.5. + ALTER RENAME COLUMN. {#alter-rename-column} [\#6861](https://github.com/ClickHouse/ClickHouse/issues/6861) Требует 1.3. Будет делать [Александр Сапин](https://github.com/alesapin). -### 1.6. Полиморфные куски данных. {#polimorfnye-kuski-dannykh} +### 1.6. + Полиморфные куски данных. {#polimorfnye-kuski-dannykh} -Компактные куски - Q1, куски в оперативке Q1/Q2. +Компактные куски - Q1, куски в оперативке Q1/Q2 - пункт 1.7. Компактные куски реализованы, ещё не включены по-умолчанию. Первым шагом включаем по-умолчанию для системных таблиц. +Upd. Включено для системных таблиц. + Делает [Антон Попов](https://github.com/CurtizJ), первый рабочий вариант в декабре. Пререквизит чтобы снизить сложность мелких INSERT, что в свою очередь нужно для 1.12, иначе задача 1.12 не сможет нормально работать. Особенно нужно для Яндекс.Облака. Данные в таблицах типа MergeTree в ClickHouse хранятся в виде набора независимых «кусков». Внутри куска, каждый столбец, а также индекс, хранится в отдельных файлах. Это сделано для возможности быстрых манипуляций со столбцами (пример - запрос ALTER DROP COLUMN). При вставке данных (INSERT), создаётся новый кусок. Для таблиц с большим количеством столбцов, запросы INSERT с маленьким количеством строк являются неэффективными, так как требуют создания большого количества файлов в файловой системе. Это является врождённой особенностью ClickHouse - одной из первой проблем, с которыми сталкиваются пользователи. Пользователям приходится буферизовывать данные и собирать их в более крупные пачки перед вставкой в ClickHouse. @@ -61,7 +63,7 @@ Upd. Большая часть задачи реализована и добав ### 1.7. Буферизация и WAL в MergeTree. {#buferizatsiia-i-wal-v-mergetree} -Требует 1.6. +Требует 1.6. Антон Попов. Задача взята в работу. Q2. ### 1.8. + Перенос между разделами по TTL. {#perenos-mezhdu-razdelami-po-ttl} @@ -74,7 +76,7 @@ Q1. Закоммичено, но есть технический долг, ко Будет делать Сорокин Николай, ВШЭ и Яндекс. -Сейчас пользователь может задать в таблице выражение, которое определяет, сколько времени хранятся данные. Обычно это выражение задаётся относительно значения столбца с датой - например: удалять данные через три месяца. https://clickhouse.tech/docs/ru/operations/table\_engines/mergetree/\#table\_engine-mergetree-ttl +Сейчас пользователь может задать в таблице выражение, которое определяет, сколько времени хранятся данные. Обычно это выражение задаётся относительно значения столбца с датой - например: удалять данные через три месяца. https://clickhouse.tech/docs/ru/operations/table_engines/mergetree/\#table_engine-mergetree-ttl Это может быть задано для всей таблицы (тогда строки целиком удаляются после указанного времени) или для отдельных столбцов (тогда данные столбца физически удаляются с диска, а строки в таблице остаются; при чтении значений столбца, они читаются как значения по-умолчанию). @@ -88,7 +90,7 @@ Q1. Закоммичено, но есть технический долг, ко А вот пункт 2 требуется продумать. Не очевидно даже, какой лучше использовать синтаксис для этого при создании таблицы. Но мы придумаем - сразу видно несколько вариантов. -Частный случай такой задачи уже есть в https://clickhouse.tech/docs/ru/operations/table\_engines/graphitemergetree/ Но это было сделано для конкретной задачи. А надо обобщить. +Частный случай такой задачи уже есть в https://clickhouse.tech/docs/ru/operations/table_engines/graphitemergetree/ Но это было сделано для конкретной задачи. А надо обобщить. ### 1.10. Пережатие старых данных в фоне. {#perezhatie-starykh-dannykh-v-fone} @@ -100,17 +102,15 @@ Q1. Закоммичено, но есть технический долг, ко Предлагается добавить в ClickHouse настройки по пережатию данных и фоновые потоки, выполняющие эту задачу. -### 1.11. Виртуальная файловая система. {#virtualnaia-failovaia-sistema} +### 1.11. + Виртуальная файловая система. {#virtualnaia-failovaia-sistema} -В процессе реализации, сейчас на VFS переведены Log, TinyLog, StripeLog, готовится MergeTree. +На VFS переведены Log, TinyLog, StripeLog, а также MergeTree, что доказывает состоятельность реализации. -Q2. - -Нужно для Яндекс.Облака. Делает Александр, Яндекс.Облако, а также Олег Ершов, ВШЭ и Яндекс. +Нужно для Яндекс.Облака. Делает Александр, Яндекс.Облако. ClickHouse использует для хранения данных локальную файловую систему. Существует сценарий работы, в котором размещение старых (архивных) данных было бы выгодно на удалённой файловой системе. Если файловая система POSIX совместимая, то это не составляет проблем: ClickHouse успешно работает с Ceph, GlusterFS, MooseFS. Также востребованным является сценарий использования S3 (из-за доступности в облаке) или HDFS (для интеграции с Hadoop). Но эти файловые системы не являются POSIX совместимыми. Хотя для них существуют FUSE драйверы, но скорость работы сильно страдает и поддержка неполная. -ClickHouse использует небольшое подмножество функций ФС, но в то же время, и некоторые специфические части: симлинки и хардлинки, O\_DIRECT. Предлагается выделить всё взаимодействие с файловой системой в отдельный интерфейс. +ClickHouse использует небольшое подмножество функций ФС, но в то же время, и некоторые специфические части: симлинки и хардлинки, O_DIRECT. Предлагается выделить всё взаимодействие с файловой системой в отдельный интерфейс. ### 1.12. Экспериментальная реализация VFS поверх S3 и HDFS. {#eksperimentalnaia-realizatsiia-vfs-poverkh-s3-i-hdfs} @@ -121,13 +121,15 @@ Q2. Upd. Олег будет делать только часть про HDFS. +Upd. Реализация поверх S3 является рабочей на уровне PoC. + ### 1.13. Ускорение запросов с FINAL. {#uskorenie-zaprosov-s-final} -Требует 2.1. Делает [Николай Кочетов](https://github.com/KochetovNicolai). Нужно для Яндекс.Метрики. +Требует 2.1. Делает [Николай Кочетов](https://github.com/KochetovNicolai). Нужно для Яндекс.Метрики. Q2. ### 1.14. Не писать столбцы, полностью состоящие из нулей. {#ne-pisat-stolbtsy-polnostiu-sostoiashchie-iz-nulei} -Антон Попов. Q1/Q2. +Антон Попов. Q2. В очереди. Простая задача, является небольшим пререквизитом для потенциальной поддержки полуструктурированных данных. ### 1.15. Возможность иметь разный первичный ключ в разных кусках. {#vozmozhnost-imet-raznyi-pervichnyi-kliuch-v-raznykh-kuskakh} @@ -146,6 +148,7 @@ Upd. Олег будет делать только часть про HDFS. Требует 1.3 и 1.6. Полная замена hard links на sym links, что будет лучше для 1.12. + ## 2. Крупные рефакторинги. {#krupnye-refaktoringi} Для обоснования необходимости смотрите ссылки в описании других задач. @@ -161,6 +164,8 @@ Upd. Включили по-умолчанию. Удаление старого Upd. Уже есть первый релиз, в котором это включено по-умолчанию. +Upd. Всё ещё ждём удаление старого кода, которое должно случиться после релиза 20.4. + ### 2.2. Инфраструктура событий/метрик/ограничений/квот/трассировки. {#infrastruktura-sobytiimetrikogranicheniikvottrassirovki} В очереди. https://gist.github.com/alexey-milovidov/d62d73222d83b9319dc519cbb13aeff6 @@ -193,6 +198,8 @@ Upd. Каталог БД вынесен из Context. Средний приоритет. Нужно для YQL. +Upd. В очереди. Иван Лежанкин. + ### 2.9. Логгировние в format-стиле. {#loggirovnie-v-format-stile} Делает [Иван Лежанкин](https://github.com/abyss7). Низкий приоритет. @@ -212,10 +219,14 @@ Upd. Каталог БД вынесен из Context. Задачу делает Алексей Миловидов. Прогресс 50% и разработка временно приостановлена. +Upd. Разработка всё ещё приостановлена. + ### 2.13. Каждая функция в отдельном файле. {#kazhdaia-funktsiia-v-otdelnom-faile} Задачу делает Алексей Миловидов. Прогресс 80%. Потребуется помощь других разработчиков. +Upd. Поползновения наблюдаются. + ### 2.14. Все функции с состоянием переделать на FunctionBuilder. {#vse-funktsii-s-sostoianiem-peredelat-na-functionbuilder} Долг [Николай Кочетов](https://github.com/KochetovNicolai). Сейчас код находится в переходном состоянии, что неприемлемо. @@ -224,13 +235,14 @@ Upd. Каталог БД вынесен из Context. Для нормализации работы materialized views поверх Merge, Distributed, Kafka. + ## 3. Документация. {#dokumentatsiia} Здесь задачи только по инфраструктуре документации. ### 3.1. Перенос документации по функциям в код. {#perenos-dokumentatsii-po-funktsiiam-v-kod} -Требует 2.12 и 2.13. Хотим в Q1/Q2, средний приоритет. +Требует 2.12 и 2.13. Хотим в Q2, средний приоритет. ### 3.2. Перенос однородных частей документации в код. {#perenos-odnorodnykh-chastei-dokumentatsii-v-kod} @@ -246,11 +258,12 @@ Upd. Иван Блинков сделал эту задачу путём зам Эту задачу сделает [Иван Блинков](https://github.com/blinkov/), до конца декабря 2019. Сделано. + ## 4. Сетевое взаимодействие. {#setevoe-vzaimodeistvie} ### 4.1. Уменьшение числа потоков при распределённых запросах. {#umenshenie-chisla-potokov-pri-raspredelionnykh-zaprosakh} -[Никита Лапков](https://github.com/laplab), весна 2020. Upd. Есть прототип. Upd. Он не работает. +Весна 2020. Upd. Есть прототип. Upd. Он не работает. Upd. Человек отказался от задачи, теперь сроки не определены. ### 4.2. Спекулятивное выполнение запросов на нескольких репликах. {#spekuliativnoe-vypolnenie-zaprosov-na-neskolkikh-replikakh} @@ -262,6 +275,8 @@ Upd. Иван Блинков сделал эту задачу путём зам Сейчас для распределённых запросов используется по потоку на соединение. Это позволяет хорошо распараллелить вычисления над полученными данными и утилизировать сеть, но становится сильно избыточным для больших кластеров. Для примера, создание 1000 потоков для чтения данных из 1000 серверов кластера - лишь расходует ресурсы и увеличивает время выполнения запроса. Вместо этого необходимо использовать количество потоков не большее количества процессорных ядер, и мультиплексировать в одном потоке общение с серверами. Реализация нетривиальна, так как мультиплексировать необходимо каждую стадию общения по сети, включая установку соединения и обмен handshake. +Upd. Сейчас обсуждается, как сделать другую задачу вместо этой. + ### 4.3. Ограничение числа одновременных скачиваний с реплик. {#ogranichenie-chisla-odnovremennykh-skachivanii-s-replik} Дмитрий Григорьев, ВШЭ. @@ -284,14 +299,16 @@ Upd. Иван Блинков сделал эту задачу путём зам Дмитрий Григорьев, ВШЭ. В очереди. Исправить проблему, что восстанавливающаяся реплика перестаёт мержить. Частично компенсируется 4.3. + ## 5. Операции. {#operatsii} -### 5.1. Разделение задач на более мелкие куски в clickhouse-copier. {#razdelenie-zadach-na-bolee-melkie-kuski-v-clickhouse-copier} +### 5.1. + Разделение задач на более мелкие куски в clickhouse-copier. {#razdelenie-zadach-na-bolee-melkie-kuski-v-clickhouse-copier} [\#9075](https://github.com/ClickHouse/ClickHouse/pull/9075) Q1. Нужно для Метрики, в очереди. Никита Михайлов. Upd. Задача на финальной стадии разработки. +Upd. Сделано. Эффективность работы под вопросом. Есть варианты, как сделать лучше. ### 5.2. Автонастройка лимита на оперативку и размера кэшей. {#avtonastroika-limita-na-operativku-i-razmera-keshei} @@ -305,6 +322,8 @@ Upd. Задача на финальной стадии разработки. Требует 7.5. Задачу хочет Метрика, Облако, БК, Маркет и Altinity. Первой LTS версией уже стала версия 19.14. Метрика, БК, Маркет, Altinity уже используют более свежие версии чем LTS. +Upd. Появилась вторая версия LTS - 20.3. + ## 6. Инструментирование. {#instrumentirovanie} @@ -321,7 +340,7 @@ Upd. Задача на финальной стадии разработки. ### 6.3. Учёт оперативки total расширить не только на запросы. {#uchiot-operativki-total-rasshirit-ne-tolko-na-zaprosy} -Исправление долгоживущей проблемы с дрифтом учёта оперативки. Нужна для Метрики и БК. Иван Лежанкин. Q1. +Исправление долгоживущей проблемы с дрифтом учёта оперативки. Нужна для Метрики и БК. Иван Лежанкин. Q1. Странно, как будто не сделано. ### 6.4. Поддержка perf events как метрик запроса. {#podderzhka-perf-events-kak-metrik-zaprosa} @@ -339,7 +358,7 @@ Upd. Задача на финальной стадии разработки. Сейчас есть стек трейс для почти всех, но не всех исключений. Требует 7.4. -### 6.7. + Таблица system.stack\_trace. {#tablitsa-system-stack-trace} +### 6.7. + Таблица system.stack_trace. {#tablitsa-system-stack-trace} Сравнительно простая задача, но только для опытных разработчиков. @@ -351,6 +370,7 @@ Upd. Задача на финальной стадии разработки. ### 6.10. Сбор общих системных метрик. {#sbor-obshchikh-sistemnykh-metrik} + ## 7. Сопровождение разработки. {#soprovozhdenie-razrabotki} ### 7.1. + ICU в submodules. {#icu-v-submodules} @@ -361,7 +381,7 @@ Upd. Задача на финальной стадии разработки. Сделал Алексей Миловидов. -### 7.3. Обновление Poco. {#obnovlenie-poco} +### 7.3. + Обновление Poco. {#obnovlenie-poco} Алексанр Кузьменков. @@ -383,13 +403,18 @@ Upd. Задача на финальной стадии разработки. Уже есть ASan, TSan, UBSan. Не хватает тестов под MSan. Они уже добавлены в CI, но не проходят. [Александр Кузьменков](https://github.com/akuzm) и [Александр Токмаков](https://github.com/tavplubix). -### 7.8. Добавить clang-tidy. {#dobavit-clang-tidy} +Upd. Задача всё ещё медленно тащится. + +### 7.8. + Добавить clang-tidy. {#dobavit-clang-tidy} Уже есть PVS-Studio. Мы очень довольны, но этого недостаточно. Upd. Алексей Миловидов. Добавлено некоторое множество проверок, но нужно рассмотреть все проверки подряд и добавить всё, что можно. +Upd. Рассмотрели все проверки подряд. -### 7.9. Проверки на стиль имён с помощью clang-tidy. {#proverki-na-stil-imion-s-pomoshchiu-clang-tidy} +### 7.9. + Проверки на стиль имён с помощью clang-tidy. {#proverki-na-stil-imion-s-pomoshchiu-clang-tidy} + +Сделано. Только в .cpp файлах и только для имён локальных переменных. Остальное слишком сложно. ### 7.10. Включение UBSan и MSan в интеграционных тестах. {#vkliuchenie-ubsan-i-msan-v-integratsionnykh-testakh} @@ -399,6 +424,8 @@ UBSan включен в функциональных тестах, но не в У нас мало unit тестов по сравнению с функциональными тестами и их использование не обязательно. Но они всё-равно важны и нет причин не запускать их под всеми видами sanitizers. +Илья Яцишин. + ### 7.12. Показывать тестовое покрытие нового кода в PR. {#pokazyvat-testovoe-pokrytie-novogo-koda-v-pr} Пока есть просто показ тестового покрытия всего кода. @@ -413,6 +440,8 @@ UBSan включен в функциональных тестах, но не в Подключение replxx вместо readline сделал Иван Лежанкин. +Есть технический долг с лицензиями файлов консорциума Unicode. + ### 7.14.1. Улучшение возможностей интерактивного режима clickhouse-client. {#uluchshenie-vozmozhnostei-interaktivnogo-rezhima-clickhouse-client} Тагир Кускаров, ВШЭ. @@ -476,7 +505,7 @@ https://github.com/ClickHouse/ClickHouse/issues/8027\#issuecomment-566670282 Проверили на настоящем сервере Huawei, а также в специальном Docker контейнере, который содержит внутри qemu-user-static. Также можно проверить на Cavium, на Raspberry Pi а также на твоём Android телефоне. -### 7.20. Автосборка для FreeBSD x86\_64. {#avtosborka-dlia-freebsd-x86-64} +### 7.20. Автосборка для FreeBSD x86_64. {#avtosborka-dlia-freebsd-x86-64} [Иван Лежанкин](https://github.com/abyss7). @@ -535,6 +564,8 @@ Fuzzing тестирование - это тестирование случай Также можно сделать функции с детерминированным генератором случайных чисел (аргументом передаётся seed) для воспроизводимости тестовых кейсов. Upd. Сергей Штыков сделал функцию `randomPrintableASCII`. +Upd. Илья Яцишин сделал табличную функцию `generateRandom`. +Upd. Эльдар Заитов добавляет OSS Fuzz. ### 7.24. Fuzzing лексера и парсера запросов; кодеков и форматов. {#fuzzing-leksera-i-parsera-zaprosov-kodekov-i-formatov} @@ -557,10 +588,12 @@ Upd. Сергей Штыков сделал функцию `randomPrintableASCII Нужно для CHYT и YQL. -UPD: Все патчи Максима отправлены в master. Задача взята в работу. +Upd: Все патчи Максима отправлены в master. Задача взята в работу. Upd: Задача в процессе реализации. Синхронизироваться будет master. Делает [Иван Лежанкин](https://github.com/abyss7) +Upd: Есть собирающийся прототип, но сборка как будто ещё не в trunk Аркадии. + ### 7.26. Побайтовая идентичность репозитория с Аркадией. {#pobaitovaia-identichnost-repozitoriia-s-arkadiei} Команда DevTools. Прогресс по задаче под вопросом. @@ -617,6 +650,7 @@ Upd: Задача в процессе реализации. Синхронизи Upd. Иван Блинков настроил CDN repo.clickhouse.tech, что решает проблему с доступностью зарубежом. Вопрос с operations, visibility пока актуален. + ## 8. Интеграция с внешними системами. {#integratsiia-s-vneshnimi-sistemami} ### 8.1. Поддержка ALTER MODIFY SETTING для Kafka. {#podderzhka-alter-modify-setting-dlia-kafka} @@ -629,11 +663,11 @@ Altinity. Никто не делает эту задачу. [Александр Кузьменков](https://github.com/akuzm). -### 8.3. Доработки globs (правильная поддержка диапазонов, уменьшение числа одновременных stream-ов). {#dorabotki-globs-pravilnaia-podderzhka-diapazonov-umenshenie-chisla-odnovremennykh-stream-ov} +### 8.3. + Доработки globs (правильная поддержка диапазонов, уменьшение числа одновременных stream-ов). {#dorabotki-globs-pravilnaia-podderzhka-diapazonov-umenshenie-chisla-odnovremennykh-stream-ov} [Ольга Хвостикова](https://github.com/stavrolia). -Уменьшение числа stream-ов сделано, а вот правильная поддержка диапазонов - нет. Будем надеяться на Q1/Q2. +Уменьшение числа stream-ов сделано, а вот правильная поддержка диапазонов - нет. Будем надеяться на Q1/Q2. Сделано. ### 8.4. Унификация File, HDFS, S3 под URL. {#unifikatsiia-file-hdfs-s3-pod-url} @@ -690,19 +724,21 @@ Andrew Onyshchuk. Есть pull request. Q1. Сделано. Павел Круглов, ВШЭ и Яндекс. Есть pull request. -### 8.16.2. Поддержка формата Thrift. {#podderzhka-formata-thrift} +### 8.16.2. - Поддержка формата Thrift. {#podderzhka-formata-thrift} -Павел Круглов, ВШЭ и Яндекс. +Павел Круглов, ВШЭ и Яндекс. Задача отменена. ### 8.16.3. Поддержка формата MsgPack. {#podderzhka-formata-msgpack} Павел Круглов, ВШЭ и Яндекс. Задача взята в работу. -### 8.16.4. Формат Regexp. {#format-regexp} +Upd. Почти готово - есть лишь небольшой технический долг. + +### 8.16.4. + Формат Regexp. {#format-regexp} Павел Круглов, ВШЭ и Яндекс. -Есть pull request. +Есть pull request. Готово. ### 8.17. ClickHouse как MySQL реплика. {#clickhouse-kak-mysql-replika} @@ -735,6 +771,7 @@ Maxim Fedotov, Wargaming + Yuri Baranov, Яндекс. Нужно для БК. Декабрь 2019. В декабре для БК сделан минимальный вариант этой задачи. Максимальный вариант, вроде, никому не нужен. +Upd. Всё ещё кажется, что задача не нужна. ### 8.22. Поддержка синтаксиса для переменных в стиле MySQL. {#podderzhka-sintaksisa-dlia-peremennykh-v-stile-mysql} @@ -746,6 +783,7 @@ Upd. Юрий Баранов работает в Google, там запрещен Желательно 2.15. + ## 9. Безопасность. {#bezopasnost} ### 9.1. + Ограничение на хосты в запросах ко внешним системам. {#ogranichenie-na-khosty-v-zaprosakh-ko-vneshnim-sistemam} @@ -761,7 +799,12 @@ ClickHouse предоставляет возможность обратитьс Вместо этого предлагается описывать необходимые данные в конфигурационном файле сервера или в отдельном сервисе и ссылаться на них по именам. ### 9.3. Поддержка TLS для ZooKeeper. {#podderzhka-tls-dlia-zookeeper} + [#10174](https://github.com/ClickHouse/ClickHouse/issues/10174) + +Есть pull request. + + ## 10. Внешние словари. {#vneshnie-slovari} ### 10.1. + Исправление зависания в библиотеке доступа к YT. {#ispravlenie-zavisaniia-v-biblioteke-dostupa-k-yt} @@ -777,6 +820,7 @@ ClickHouse предоставляет возможность обратитьс Нужно для БК и Метрики. Поиск причин - [Александр Сапин](https://github.com/alesapin). Дальшейшее исправление возможно на стороне YT. Upd. Одну причину устранили, но ещё что-то неизвестное осталось. +Upd. Нас заставляют переписать эту библиотеку с одного API на другое, так как старое внезапно устарело. Кажется, что переписывание случайно исправит все проблемы. ### 10.3. Возможность чтения данных из статических таблиц в YT словарях. {#vozmozhnost-chteniia-dannykh-iz-staticheskikh-tablits-v-yt-slovariakh} @@ -802,7 +846,7 @@ Upd. Одну причину устранили, но ещё что-то неи Артём Стрельцов, Николай Дегтеринский, Наталия Михненко, ВШЭ. -### 10.9. Уменьшение блокировок для cache словарей за счёт одновременных запросов одного и того же. {#umenshenie-blokirovok-dlia-cache-slovarei-za-schiot-odnovremennykh-zaprosov-odnogo-i-togo-zhe} +### 10.9. - Уменьшение блокировок для cache словарей за счёт одновременных запросов одного и того же. {#umenshenie-blokirovok-dlia-cache-slovarei-za-schiot-odnovremennykh-zaprosov-odnogo-i-togo-zhe} Заменено в пользу 10.10, 10.11. @@ -825,7 +869,7 @@ Upd. Одну причину устранили, но ещё что-то неи ### 10.14. Поддержка всех типов в функции transform. {#podderzhka-vsekh-tipov-v-funktsii-transform} -Задачу взяла Ольга Хвостикова. +Задачу взяла Ольга Хвостикова. Upd. Статус неизвестен. ### 10.15. Использование словарей как специализированного layout для Join. {#ispolzovanie-slovarei-kak-spetsializirovannogo-layout-dlia-join} @@ -843,6 +887,7 @@ Upd. Одну причину устранили, но ещё что-то неи ### 10.19. Возможность зарегистрировать некоторые функции, использующие словари, под пользовательскими именами. {#vozmozhnost-zaregistrirovat-nekotorye-funktsii-ispolzuiushchie-slovari-pod-polzovatelskimi-imenami} + ## 11. Интерфейсы. {#interfeisy} ### 11.1. Вставка состояний агрегатных функций в виде кортежа аргументов или массива кортежей аргументов. {#vstavka-sostoianii-agregatnykh-funktsii-v-vide-kortezha-argumentov-ili-massiva-kortezhei-argumentov} @@ -851,6 +896,8 @@ Upd. Одну причину устранили, но ещё что-то неи Нужно разобраться, как упаковывать Java в статический бинарник, возможно AppImage. Или предоставить максимально простую инструкцию по установке jdbc-bridge. Может быть будет заинтересован Александр Крашенинников, Badoo, так как он разработал jdbc-bridge. +Upd. Александр Крашенинников перешёл в другую компанию и больше не занимается этим. + ### 11.3. + Интеграционные тесты ODBC драйвера путём подключения ClickHouse к самому себе через ODBC. {#integratsionnye-testy-odbc-draivera-putiom-podkliucheniia-clickhouse-k-samomu-sebe-cherez-odbc} Михаил Филимонов, Altinity. Готово. @@ -881,12 +928,13 @@ zhang2014, есть pull request. Возможность описать в конфигурационном файле handler (путь в URL) для HTTP запросов к серверу, которому соответствует некоторый параметризованный запрос. Пользователь может вызвать этот обработчик и не должен передавать SQL запрос. + ## 12. Управление пользователями и доступом. {#upravlenie-polzovateliami-i-dostupom} -### 12.1. Role Based Access Control. {#role-based-access-control} +### 12.1. + Role Based Access Control. {#role-based-access-control} -[Виталий Баранов](https://github.com/vitlibar). Финальная стадия разработки, рабочая версия в начале февраля 2019. -Q1. Сейчас сделаны все интерфейсы в коде и запросы, но не сделаны варианты хранения прав кроме прототипа. +[Виталий Баранов](https://github.com/vitlibar). Финальная стадия разработки, рабочая версия в начале апреля 2019. +Q2. Сейчас сделаны все интерфейсы в коде и запросы, но не сделаны варианты хранения прав кроме прототипа. Upd. Сделано хранение прав. До готового к использованию состояния осталось несколько доработок. ### 12.2. + Управление пользователями и правами доступа с помощью SQL запросов. {#upravlenie-polzovateliami-i-pravami-dostupa-s-pomoshchiu-sql-zaprosov} @@ -897,7 +945,7 @@ Q1. Сделано управление правами полностью, но ### 12.3. Подключение справочника пользователей и прав доступа из LDAP. {#podkliuchenie-spravochnika-polzovatelei-i-prav-dostupa-iz-ldap} [Виталий Баранов](https://github.com/vitlibar). Требует 12.1. -Q1/Q2. +Q2. ### 12.4. Подключение IDM системы Яндекса как справочника пользователей и прав доступа. {#podkliuchenie-idm-sistemy-iandeksa-kak-spravochnika-polzovatelei-i-prav-dostupa} @@ -911,6 +959,7 @@ Q1/Q2. [Виталий Баранов](https://github.com/vitlibar). Требует 12.1. + ## 13. Разделение ресурсов, multi-tenancy. {#razdelenie-resursov-multi-tenancy} ### 13.1. Overcommit запросов по памяти и вытеснение. {#overcommit-zaprosov-po-pamiati-i-vytesnenie} @@ -926,6 +975,8 @@ Q1/Q2. Требует 13.2 или сможем сделать более неудобную реализацию раньше. Обсуждается вариант неудобной реализации. Пока средний приоритет, целимся на Q1/Q2. Вариант реализации выбрал Александр Казаков. +Upd. Не уследили, и задачу стали обсуждать менеджеры. + ## 14. Диалект SQL. {#dialekt-sql} @@ -936,8 +987,6 @@ Q1/Q2. ### 14.2. Поддержка WITH для подзапросов. {#podderzhka-with-dlia-podzaprosov} -Михаил Коротов. - ### 14.3. Поддержка подстановок для множеств в правой части IN. {#podderzhka-podstanovok-dlia-mnozhestv-v-pravoi-chasti-in} ### 14.4. Поддержка подстановок для идентификаторов (имён) в SQL запросе. {#podderzhka-podstanovok-dlia-identifikatorov-imion-v-sql-zaprose} @@ -993,7 +1042,7 @@ zhang2014 ### 14.16. Синонимы для функций из MySQL. {#sinonimy-dlia-funktsii-iz-mysql} -### 14.17. Ввести понятие stateful функций. {#vvesti-poniatie-stateful-funktsii} +### 14.17. + Ввести понятие stateful функций. {#vvesti-poniatie-stateful-funktsii} zhang2014. Для runningDifference, neighbour - их учёт в оптимизаторе запросов. @@ -1018,13 +1067,15 @@ zhang2014. Павел Потёмкин, ВШЭ. + ## 15. Улучшение поддержки JOIN. {#uluchshenie-podderzhki-join} -### 15.1. Доведение merge JOIN до продакшена. {#dovedenie-merge-join-do-prodakshena} +### 15.1. + Доведение merge JOIN до продакшена. {#dovedenie-merge-join-do-prodakshena} Артём Зуйков. Сейчас merge JOIN включается вручную опцией и всегда замедляет запросы. Хотим, чтобы он замедлял запросы только когда это неизбежно. Кстати, смысл merge JOIN появляется только совместно с 15.2 и 15.3. Q1. Сделали адаптивный вариант, но вроде он что-то всё-ещё замедляет. +Задача сделана, но всё работает слишком медленно. ### 15.1.1. Алгоритм two-level merge JOIN. {#algoritm-two-level-merge-join} @@ -1052,6 +1103,7 @@ Q1. Сделали адаптивный вариант, но вроде он ч Артём Зуйков. + ## 16. Типы данных и функции. {#tipy-dannykh-i-funktsii} ### 16.1. + DateTime64. {#datetime64} @@ -1073,6 +1125,7 @@ Upd. Секретного изменения в работе не будет, з ### 16.6. Функции нормализации и хэширования SQL запросов. {#funktsii-normalizatsii-i-kheshirovaniia-sql-zaprosov} + ## 17. Работа с географическими данными. {#rabota-s-geograficheskimi-dannymi} ### 17.1. Гео-словари для определения региона по координатам. {#geo-slovari-dlia-opredeleniia-regiona-po-koordinatam} @@ -1105,6 +1158,7 @@ Upd. Андрей сделал прототип более оптимально Сейчас функция тихо не работает в случае полигонов с самопересечениями, надо кидать исключение. + ## 18. Машинное обучение и статистика. {#mashinnoe-obuchenie-i-statistika} ### 18.1. Инкрементальная кластеризация данных. {#inkrementalnaia-klasterizatsiia-dannykh} @@ -1123,6 +1177,7 @@ Upd. Андрей сделал прототип более оптимально В очереди. Возможно, Александр Кожихов. У него сначала идёт задача 24.26. + ## 19. Улучшение работы кластера. {#uluchshenie-raboty-klastera} ### 19.1. Параллельные кворумные вставки без линеаризуемости. {#parallelnye-kvorumnye-vstavki-bez-linearizuemosti} @@ -1153,7 +1208,7 @@ Upd. Алексей сделал какой-то вариант, но борет Hold. Полезно для заказчиков внутри Яндекса, но есть риски. Эту задачу никто не будет делать. -### 19.4. internal\_replication = ‘auto’. {#internal-replication-auto} +### 19.4. internal_replication = ‘auto’. {#internal-replication-auto} ### 19.5. Реплицируемые базы данных. {#replitsiruemye-bazy-dannykh} @@ -1177,18 +1232,20 @@ Hold. Полезно для заказчиков внутри Яндекса, н Требует 1.6, 19.1, 19.6, 19.7, 19.8, 19.9. + ## 20. Мутации данных. {#mutatsii-dannykh} Пока все задачи по точечным UPDATE/DELETE имеют низкий приоритет, но ожидаем взять в работу в середине 2020. ### 20.1. Поддержка DELETE путём запоминания множества затронутых кусков и ключей. {#podderzhka-delete-putiom-zapominaniia-mnozhestva-zatronutykh-kuskov-i-kliuchei} -### 20.2. Поддержка DELETE путём преобразования множества ключей в множество row\_numbers на реплике, столбца флагов и индекса по диапазонам. {#podderzhka-delete-putiom-preobrazovaniia-mnozhestva-kliuchei-v-mnozhestvo-row-numbers-na-replike-stolbtsa-flagov-i-indeksa-po-diapazonam} +### 20.2. Поддержка DELETE путём преобразования множества ключей в множество row_numbers на реплике, столбца флагов и индекса по диапазонам. {#podderzhka-delete-putiom-preobrazovaniia-mnozhestva-kliuchei-v-mnozhestvo-row-numbers-na-replike-stolbtsa-flagov-i-indeksa-po-diapazonam} ### 20.3. Поддержка ленивых DELETE путём запоминания выражений и преобразования к множеству ключей в фоне. {#podderzhka-lenivykh-delete-putiom-zapominaniia-vyrazhenii-i-preobrazovaniia-k-mnozhestvu-kliuchei-v-fone} ### 20.4. Поддержка UPDATE с помощью преобразования в DELETE и вставок. {#podderzhka-update-s-pomoshchiu-preobrazovaniia-v-delete-i-vstavok} + ## 21. Оптимизации производительности. {#optimizatsii-proizvoditelnosti} ### 21.1. + Параллельный парсинг форматов. {#parallelnyi-parsing-formatov} @@ -1201,7 +1258,7 @@ Hold. Полезно для заказчиков внутри Яндекса, н После 21.1, предположительно Никита Михайлов. Задача сильно проще чем 21.1. -### 21.3. Исправление низкой производительности анализа индекса в случае большого множества в секции IN. {#ispravlenie-nizkoi-proizvoditelnosti-analiza-indeksa-v-sluchae-bolshogo-mnozhestva-v-sektsii-in} +### 21.3. + Исправление низкой производительности анализа индекса в случае большого множества в секции IN. {#ispravlenie-nizkoi-proizvoditelnosti-analiza-indeksa-v-sluchae-bolshogo-mnozhestva-v-sektsii-in} Нужно всем (Zen, БК, DataLens, TestEnv…). Антон Попов, Q1/Q2. @@ -1309,23 +1366,23 @@ Constraints позволяют задать выражение, истиннос В ClickHouse используется неоптимальный вариант top sort. Суть его в том, что из каждого блока достаётся top N записей, а затем, все блоки мержатся. Но доставание top N записей у каждого следующего блока бессмысленно, если мы знаем, что из них в глобальный top N войдёт меньше. Конечно нужно реализовать вариацию на тему priority queue (heap) с быстрым пропуском целых блоков, если ни одна строка не попадёт в накопленный top. -1. Рекурсивный вариант сортировки по кортежам. +2. Рекурсивный вариант сортировки по кортежам. Для сортировки по кортежам используется обычная сортировка с компаратором, который в цикле по элементам кортежа делает виртуальные вызовы `IColumn::compareAt`. Это неоптимально - как из-за короткого цикла по неизвестному в compile-time количеству элементов, так и из-за виртуальных вызовов. Чтобы обойтись без виртуальных вызовов, есть метод `IColumn::getPermutation`. Он используется в случае сортировки по одному столбцу. Есть вариант, что в случае сортировки по кортежу, что-то похожее тоже можно применить… например, сделать метод `updatePermutation`, принимающий аргументы offset и limit, и допереставляющий перестановку в диапазоне значений, в которых предыдущий столбец имел равные значения. -1. RadixSort для сортировки. +3. RadixSort для сортировки. Один наш знакомый начал делать задачу по попытке использования RadixSort для сортировки столбцов. Был сделан вариант indirect сортировки (для `getPermutation`), но не оптимизирован до конца - есть лишние ненужные перекладывания элементов. Для того, чтобы его оптимизировать, придётся добавить немного шаблонной магии (на последнем шаге что-то не копировать, вместо перекладывания индексов - складывать их в готовое место). Также этот человек добавил метод MSD Radix Sort для реализации radix partial sort. Но даже не проверил производительность. Наиболее содержательная часть задачи может состоять в применении Radix Sort для сортировки кортежей, расположенных в оперативке в виде Structure Of Arrays неизвестного в compile-time размера. Это может работать хуже, чем то, что описано в пункте 2… Но попробовать не помешает. -1. Three-way comparison sort. +4. Three-way comparison sort. Виртуальный метод `compareAt` возвращает -1, 0, 1. Но алгоритмы сортировки сравнениями обычно рассчитаны на `operator<` и не могут получить преимущества от three-way comparison. А можно ли написать так, чтобы преимущество было? -1. pdq partial sort +5. pdq partial sort -Хороший алгоритм сортировки сравнениями `pdqsort` не имеет варианта partial sort. Заметим, что на практике, почти все сортировки в запросах ClickHouse являются partial\_sort, так как `ORDER BY` почти всегда идёт с `LIMIT`. Кстати, Данила Кутенин уже попробовал это и показал, что в тривиальном случае преимущества нет. Но не очевидно, что нельзя сделать лучше. +Хороший алгоритм сортировки сравнениями `pdqsort` не имеет варианта partial sort. Заметим, что на практике, почти все сортировки в запросах ClickHouse являются partial_sort, так как `ORDER BY` почти всегда идёт с `LIMIT`. Кстати, Данила Кутенин уже попробовал это и показал, что в тривиальном случае преимущества нет. Но не очевидно, что нельзя сделать лучше. ### 21.20. Использование материализованных представлений для оптимизации запросов. {#ispolzovanie-materializovannykh-predstavlenii-dlia-optimizatsii-zaprosov} @@ -1344,6 +1401,7 @@ Constraints позволяют задать выражение, истиннос zhang2014. Есть pull request. + ## 22. Долги и недоделанные возможности. {#dolgi-i-nedodelannye-vozmozhnosti} ### 22.1. + Исправление неработающих таймаутов, если используется TLS. {#ispravlenie-nerabotaiushchikh-taimautov-esli-ispolzuetsia-tls} @@ -1362,6 +1420,7 @@ N.Vartolomei. Александр Казаков. Нужно для Яндекс.Метрики и Datalens. Задача постепенно тащится и исправлениями в соседних местах стала менее актуальна. В Q1 будет сделана или отменена с учётом 1.2. и 1.3. +Upd. Добавили таймауты. ### 22.5. + Исправление редких срабатываний TSan в stress тестах в CI. {#ispravlenie-redkikh-srabatyvanii-tsan-v-stress-testakh-v-ci} @@ -1470,18 +1529,19 @@ Altinity. [Александр Сапин](https://github.com/alesapin) + ## 23. Default Festival. {#default-festival} -### 23.1. + Включение minimalistic\_part\_header в ZooKeeper. {#vkliuchenie-minimalistic-part-header-v-zookeeper} +### 23.1. + Включение minimalistic_part_header в ZooKeeper. {#vkliuchenie-minimalistic-part-header-v-zookeeper} Сильно уменьшает объём данных в ZooKeeper. Уже год в продакшене в Яндекс.Метрике. Алексей Миловидов, ноябрь 2019. -### 23.2. Включение distributed\_aggregation\_memory\_efficient. {#vkliuchenie-distributed-aggregation-memory-efficient} +### 23.2. Включение distributed_aggregation_memory_efficient. {#vkliuchenie-distributed-aggregation-memory-efficient} Есть риски меньшей производительности лёгких запросов, хотя производительность тяжёлых запросов всегда увеличивается. -### 23.3. Включение min\_bytes\_to\_external\_sort и min\_bytes\_to\_external\_group\_by. {#vkliuchenie-min-bytes-to-external-sort-i-min-bytes-to-external-group-by} +### 23.3. Включение min_bytes_to_external_sort и min_bytes_to_external_group_by. {#vkliuchenie-min-bytes-to-external-sort-i-min-bytes-to-external-group-by} Желательно 5.2. и 13.1. @@ -1489,7 +1549,7 @@ Altinity. Есть гипотеза, что плохо работает на очень больших кластерах. -### 23.5. Включение compile\_expressions. {#vkliuchenie-compile-expressions} +### 23.5. Включение compile_expressions. {#vkliuchenie-compile-expressions} Требует 7.2. Задачу изначально на 99% сделал Денис Скоробогатов, ВШЭ и Яндекс. Остальной процент доделывал Алексей Миловидов, а затем [Александр Сапин](https://github.com/alesapin). @@ -1514,6 +1574,7 @@ Q1. [Николай Кочетов](https://github.com/KochetovNicolai). Возможность mlock бинарника сделал Олег Алексеенков [\#3553](https://github.com/ClickHouse/ClickHouse/pull/3553) . Поможет, когда на серверах кроме ClickHouse работает много посторонних программ (мы иногда называем их в шутку «треш-программами»). + ## 24. Экспериментальные задачи. {#eksperimentalnye-zadachi} ### 24.1. Веб-интерфейс для просмотра состояния кластера и профилирования запросов. {#veb-interfeis-dlia-prosmotra-sostoianiia-klastera-i-profilirovaniia-zaprosov} @@ -1553,7 +1614,7 @@ ClickHouse поддерживает LZ4 и ZSTD для сжатия данных Смотрите также 24.5. -1. Шифрование отдельных значений. +2. Шифрование отдельных значений. Для этого требуется реализовать функции шифрования и расшифрования, доступные из SQL. Для шифрования реализовать возможность добавления нужного количества случайных бит для исключения одинаковых зашифрованных значений на одинаковых данных. Это позволит реализовать возможность «забывания» данных без удаления строк таблицы: можно шифровать данные разных клиентов разными ключами, и для того, чтобы забыть данные одного клиента, потребуется всего лишь удалить ключ. ### 24.6. Userspace RAID. {#userspace-raid} @@ -1586,7 +1647,7 @@ RAID позволяет одновременно увеличить надёжн Дмитрий Ковальков, ВШЭ и Яндекс. -Подавляющее большинство кода ClickHouse написана для x86\_64 с набором инструкций до SSE 4.2 включительно. Лишь отдельные редкие функции поддерживают AVX/AVX2/AVX512 с динамической диспетчеризацией. +Подавляющее большинство кода ClickHouse написана для x86_64 с набором инструкций до SSE 4.2 включительно. Лишь отдельные редкие функции поддерживают AVX/AVX2/AVX512 с динамической диспетчеризацией. В первой части задачи, следует добавить в ClickHouse реализации некоторых примитивов, оптимизированные под более новый набор инструкций. Например, AVX2 реализацию генератора случайных чисел pcg: https://github.com/lemire/simdpcg @@ -1598,6 +1659,8 @@ RAID позволяет одновременно увеличить надёжн Продолжение 24.8. +Upd. Есть pull request. + ### 24.10. Поддержка типов half/bfloat16/unum. {#podderzhka-tipov-halfbfloat16unum} [\#7657](https://github.com/ClickHouse/ClickHouse/issues/7657) @@ -1633,6 +1696,7 @@ ClickHouse предоставляет достаточно богатый наб В компании nVidia сделали прототип offloading вычисления GROUP BY с некоторыми из агрегатных функций в ClickHouse и обещат предоставить исходники в публичный доступ для дальнейшего развития. Предлагается изучить этот прототип и расширить его применимость для более широкого сценария использования. В качестве альтернативы, предлагается изучить исходные коды системы `OmniSci` или `Alenka` или библиотеку `CUB` https://nvlabs.github.io/cub/ и применить некоторые из алгоритмов в ClickHouse. Upd. В компании nVidia выложили прототип, теперь нужна интеграция в систему сборки. +Upd. Интеграция в систему сборки - Иван Лежанкин. ### 24.13. Stream запросы. {#stream-zaprosy} @@ -1791,7 +1855,7 @@ Amos Bird, но его решение слишком громоздкое и п ### 25.10. Митапы в России и Беларуси: Москва x2 + митап для разработчиков или хакатон, Санкт-Петербург, Минск, Нижний Новгород, Екатеринбург, Новосибирск и/или Академгородок, Иннополис или Казань. {#mitapy-v-rossii-i-belarusi-moskva-x2-mitap-dlia-razrabotchikov-ili-khakaton-sankt-peterburg-minsk-nizhnii-novgorod-ekaterinburg-novosibirsk-iili-akademgorodok-innopolis-ili-kazan} -Екатерина - организация +Екатерина - организация. Upd. Проведено два онлайн митапа на русском. ### 25.11. Митапы зарубежные: восток США (Нью Йорк, возможно Raleigh), возможно северо-запад (Сиэтл), Китай (Пекин снова, возможно митап для разработчиков или хакатон), Лондон. {#mitapy-zarubezhnye-vostok-ssha-niu-iork-vozmozhno-raleigh-vozmozhno-severo-zapad-sietl-kitai-pekin-snova-vozmozhno-mitap-dlia-razrabotchikov-ili-khakaton-london} @@ -1807,7 +1871,8 @@ Amos Bird, но его решение слишком громоздкое и п ### 25.14. Конференции в России: все HighLoad, возможно CodeFest, DUMP или UWDC, возможно C++ Russia. {#konferentsii-v-rossii-vse-highload-vozmozhno-codefest-dump-ili-uwdc-vozmozhno-c-russia} -Алексей Миловидов и все подготовленные докладчики +Алексей Миловидов и все подготовленные докладчики. +Upd. Есть Saint HighLoad online. ### 25.15. Конференции зарубежные: Percona, DataOps, попытка попасть на более крупные. {#konferentsii-zarubezhnye-percona-dataops-popytka-popast-na-bolee-krupnye} @@ -1848,7 +1913,7 @@ Amos Bird, но его решение слишком громоздкое и п ### 25.22. On-site помощь с ClickHouse компаниям в дни рядом с мероприятиями. {#on-site-pomoshch-s-clickhouse-kompaniiam-v-dni-riadom-s-meropriiatiiami} -[Иван Блинков](https://github.com/blinkov/) - организация +[Иван Блинков](https://github.com/blinkov/) - организация. Проверил мероприятие для турецкой компании. ### 25.23. Новый мерч для ClickHouse. {#novyi-merch-dlia-clickhouse} From 9210a50c5c95f26203377f5b3327b4f735fe2709 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 14 Apr 2020 03:00:48 +0300 Subject: [PATCH 397/484] simple backport script --- utils/simple-backport/backport.sh | 4 ++-- utils/simple-backport/changelog.sh | 4 ++-- utils/simple-backport/format-changelog.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/simple-backport/backport.sh b/utils/simple-backport/backport.sh index ade0b54f24d..7d05f6902d0 100755 --- a/utils/simple-backport/backport.sh +++ b/utils/simple-backport/backport.sh @@ -15,8 +15,8 @@ git log "$merge_base..origin/$branch" --first-parent > "$branch-log.txt" # variant is squashed. Next are some backport message variants. find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; s/^.*(#\([[:digit:]]\+\))$/\1/p; - s/^.*back[- ]*port[ of]*#\([[:digit:]]\+\).*$/\1/Ip; - s/^.*cherry[- ]*pick[ of]*#\([[:digit:]]\+\).*$/\1/Ip") + s/^.*back[- ]*port[ed of]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ed of]*#\([[:digit:]]\+\).*$/\1/Ip") "${find_prs[@]}" master-log.txt | sort -rn > master-prs.txt "${find_prs[@]}" "$branch-log.txt" | sort -rn > "$branch-prs.txt" diff --git a/utils/simple-backport/changelog.sh b/utils/simple-backport/changelog.sh index 43a0b2d46da..b2f95f22533 100755 --- a/utils/simple-backport/changelog.sh +++ b/utils/simple-backport/changelog.sh @@ -11,8 +11,8 @@ git log "$from..$to" --first-parent > "changelog-log.txt" # variant is squashed. Next are some backport message variants. find_prs=(sed -n "s/^.*Merge pull request #\([[:digit:]]\+\).*$/\1/p; s/^.*(#\([[:digit:]]\+\))$/\1/p; - s/^.*back[- ]*port[ ]*#\([[:digit:]]\+\).*$/\1/Ip; - s/^.*cherry[- ]*pick[ ]*#\([[:digit:]]\+\).*$/\1/Ip") + s/^.*back[- ]*port[ed of]*#\([[:digit:]]\+\).*$/\1/Ip; + s/^.*cherry[- ]*pick[ed of]*#\([[:digit:]]\+\).*$/\1/Ip") "${find_prs[@]}" "changelog-log.txt" | sort -rn > "changelog-prs.txt" diff --git a/utils/simple-backport/format-changelog.py b/utils/simple-backport/format-changelog.py index 0f379d5529f..d6f97b358ed 100755 --- a/utils/simple-backport/format-changelog.py +++ b/utils/simple-backport/format-changelog.py @@ -9,7 +9,7 @@ import collections import re parser = argparse.ArgumentParser(description='Format changelog for given PRs.') -parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, default=sys.stdin, help='File with PR numbers, one per line.') +parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs='?', default=[sys.stdin], help='File with PR numbers, one per line.') args = parser.parse_args() # This function mirrors the PR description checks in ClickhousePullRequestTrigger. From b4499ca607c9d195ddd6e35846ca79438a2b614a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 14 Apr 2020 05:08:14 +0300 Subject: [PATCH 398/484] Fix ugly typo --- website/templates/index/scalable.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/templates/index/scalable.html b/website/templates/index/scalable.html index fb86091ff06..672a02f202b 100644 --- a/website/templates/index/scalable.html +++ b/website/templates/index/scalable.html @@ -2,7 +2,7 @@
    -

    Linerarly scalable

    +

    Linearly scalable

    ClickHouse scales well both vertically and horizontally. ClickHouse is easily adaptable to perform either on a cluster with hundreds or thousands of nodes or on a single server or even on a tiny virtual machine. Currently, there are installations with more multiple trillion rows or hundreds of terabytes of data per single node.

    There are many ClickHouse clusters consisting of multiple hundred nodes, including few clusters of Yandex Metrica, while the largest known ClickHouse cluster is well over a thousand nodes. From 8e10a5504a5a98df1f8b4870619183e6e2905a56 Mon Sep 17 00:00:00 2001 From: zvrr Date: Tue, 14 Apr 2020 12:21:46 +0800 Subject: [PATCH 399/484] Update settings.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit update "动物管理员" to zookeeper --- .../operations/server_configuration_parameters/settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/operations/server_configuration_parameters/settings.md b/docs/zh/operations/server_configuration_parameters/settings.md index b78f8173741..fefeac9fc57 100644 --- a/docs/zh/operations/server_configuration_parameters/settings.md +++ b/docs/zh/operations/server_configuration_parameters/settings.md @@ -774,11 +774,11 @@ TCP端口,用于与客户端进行安全通信。 使用它与 [OpenSSL](#serv users.xml ``` -## 动物园管理员 {#server-settings_zookeeper} +## zookeeper {#server-settings_zookeeper} -包含允许ClickHouse与 [动物园管理员](http://zookeeper.apache.org/) 集群。 +包含允许ClickHouse与 [zookpeer](http://zookeeper.apache.org/) 集群。 -ClickHouse使用ZooKeeper在使用复制表时存储副本的元数据。 如果未使用复制的表,则可以省略此部分参数。 +ClickHouse使用ZooKeeper存储复制表副本的元数据。 如果未使用复制的表,则可以省略此部分参数。 本节包含以下参数: From b50d622e73e89fd16b4bb8da5f9d2587499ed147 Mon Sep 17 00:00:00 2001 From: zvrr Date: Tue, 14 Apr 2020 12:30:04 +0800 Subject: [PATCH 400/484] Update settings.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit update some setting to 配置 --- .../server_configuration_parameters/settings.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/zh/operations/server_configuration_parameters/settings.md b/docs/zh/operations/server_configuration_parameters/settings.md index b78f8173741..2cffc7e5d2c 100644 --- a/docs/zh/operations/server_configuration_parameters/settings.md +++ b/docs/zh/operations/server_configuration_parameters/settings.md @@ -5,13 +5,13 @@ toc_priority: 57 toc_title: "\u670D\u52A1\u5668\u8BBE\u7F6E" --- -# 服务器设置 {#server-settings} +# 服务器配置 {#server-settings} ## builtin\_dictionaries\_reload\_interval {#builtin-dictionaries-reload-interval} -重新加载内置字典之前的时间间隔(以秒为单位)。 +重新加载内置字典的间隔时间(以秒为单位)。 -ClickHouse每x秒重新加载内置字典。 这使得编辑字典成为可能 “on the fly” 无需重新启动服务器。 +ClickHouse每x秒重新加载内置字典。 这使得编辑字典 “on the fly”,而无需重新启动服务器。 默认值:3600. @@ -23,7 +23,7 @@ ClickHouse每x秒重新加载内置字典。 这使得编辑字典成为可能 ## 压缩 {#server-settings-compression} -数据压缩设置 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-发动机表。 +数据压缩配置 [MergeTree](../../engines/table_engines/mergetree_family/mergetree.md)-引擎表。 !!! warning "警告" 如果您刚开始使用ClickHouse,请不要使用它。 @@ -41,7 +41,7 @@ ClickHouse每x秒重新加载内置字典。 这使得编辑字典成为可能 ``` -`` 字段: +`` 参数: - `min_part_size` – The minimum size of a data part. - `min_part_size_ratio` – The ratio of the data part size to the table size. @@ -82,9 +82,9 @@ ClickHouse每x秒重新加载内置字典。 这使得编辑字典成为可能 ## default\_profile {#default-profile} -默认设置配置文件。 +默认配置文件。 -设置配置文件位于参数中指定的文件中 `user_config`. +配置文件位于`user_config`参数指定的文件中 . **示例** From c5c424b4470ee3f0d03f6780729876ed31f97f22 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 10 Apr 2020 17:26:31 +0300 Subject: [PATCH 401/484] add junit_to_html_util --- utils/junit_to_html/junit-noframes.xsl | 398 +++++++++++++++++++++++++ utils/junit_to_html/junit_to_html | 25 ++ 2 files changed, 423 insertions(+) create mode 100644 utils/junit_to_html/junit-noframes.xsl create mode 100755 utils/junit_to_html/junit_to_html diff --git a/utils/junit_to_html/junit-noframes.xsl b/utils/junit_to_html/junit-noframes.xsl new file mode 100644 index 00000000000..a8df085f719 --- /dev/null +++ b/utils/junit_to_html/junit-noframes.xsl @@ -0,0 +1,398 @@ + + + + + + + + + + Test Results + + + + + + + + +


    + + + + + + + + + + + + + + + + + +

    +
    +
    + + + + + + + + +
    +

    + Back to top + + +

    Summary

    + + + + + + + + + + + + + + + + + Failure + Error + + + + + + + + +
    TestsFailuresErrorsSuccess rateTime
    + + + + + + + +
    + + + + +
    + Note: failures are anticipated and checked for with assertions while errors are unanticipated. +
    + + + + + +

    Test Results

    +
    +
    + + + Name + Tests + Errors + Failures + Time(s) + + + + + + Name + Tests + Errors + Failures + Time(s) + Time Stamp + Host + + + + + + Name + Status + Type + Time(s) + + + + + + + + + Failure + Error + + + + + + + + + + + + + + + + + + + + + Error + Failure + TableRowColor + + + + + + Failure + + + + Error + + + + Success + + + + + + + + + + + + +

    + + + + + +
    + + + +

    + + + + + +
    + + + + N/A + + + + + + +

    + at line + + + , column + + +
    +
    +
    + + + + + + + + + + 32 + + + + + + + + + + + + +
    + + + +
    + + +
    + + + +
    + + + +
    +
    + + + + + + + + + diff --git a/utils/junit_to_html/junit_to_html b/utils/junit_to_html/junit_to_html new file mode 100755 index 00000000000..440541c3db6 --- /dev/null +++ b/utils/junit_to_html/junit_to_html @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import sys +import lxml.etree as etree + +def _convert_junit_to_html(junit_path, html_path): + with open(os.path.join(os.path.dirname(__file__), "junit-noframes.xsl")) as xslt_file: + junit_to_html_xslt = etree.parse(xslt_file) + with open(junit_path) as junit_file: + junit_xml = etree.parse(junit_file) + transform = etree.XSLT(junit_to_html_xslt) + print transform(junit_xml) + html = etree.tostring(transform(junit_xml), encoding="utf-8") + html_dir = os.path.dirname(html_path) + if not os.path.exists(html_dir): + os.makedirs(html_dir) + with open(html_path, "w") as html_file: + html_file.write(html) + +if __name__ == "__main__": + if len(sys.argv) < 3: + raise "Insufficient arguments: junit.xml result.html", level + junit_path, html_path = sys.argv[1] , sys.argv[2] + _convert_junit_to_html(junit_path, html_path) From 26a1b3163a3c14f4fc4aead4215716a3d12a0420 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 14 Apr 2020 10:11:05 +0300 Subject: [PATCH 402/484] better colors --- utils/junit_to_html/junit-noframes.xsl | 11 ++++++----- utils/junit_to_html/junit_to_html | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/junit_to_html/junit-noframes.xsl b/utils/junit_to_html/junit-noframes.xsl index a8df085f719..4532ca87c60 100644 --- a/utils/junit_to_html/junit-noframes.xsl +++ b/utils/junit_to_html/junit-noframes.xsl @@ -34,6 +34,7 @@ under the License. Test Results